Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from Jakub Kicinski:
"One more set of fixes from the networking tree:

- add missing input validation in nl80211_del_key(), preventing
out-of-bounds access

- last minute fix / improvement of a MRP netlink (uAPI) interface
introduced in 5.9 (current) release

- fix "unresolved symbol" build error under CONFIG_NET w/o
CONFIG_INET due to missing tcp_timewait_sock and inet_timewait_sock
BTF.

- fix 32 bit sub-register bounds tracking in the bpf verifier for OR
case

- tcp: fix receive window update in tcp_add_backlog()

- openvswitch: handle DNAT tuple collision in conntrack-related code

- r8169: wait for potential PHY reset to finish after applying a FW
file, avoiding unexpected PHY behaviour and failures later on

- mscc: fix tail dropping watermarks for Ocelot switches

- avoid use-after-free in macsec code after a call to the GRO layer

- avoid use-after-free in sctp error paths

- add a device id for Cellient MPL200 WWAN card

- rxrpc fixes:
- fix the xdr encoding of the contents read from an rxrpc key
- fix a BUG() for a unsupported encoding type.
- fix missing _bh lock annotations.
- fix acceptance handling for an incoming call where the incoming
call is encrypted.
- the server token keyring isn't network namespaced - it belongs
to the server, so there's no need. Namespacing it means that
request_key() fails to find it.
- fix a leak of the server keyring"

* git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (21 commits)
net: usb: qmi_wwan: add Cellient MPL200 card
macsec: avoid use-after-free in macsec_handle_frame()
r8169: consider that PHY reset may still be in progress after applying firmware
openvswitch: handle DNAT tuple collision
sctp: fix sctp_auth_init_hmacs() error path
bridge: Netlink interface fix.
net: wireless: nl80211: fix out-of-bounds access in nl80211_del_key()
bpf: Fix scalar32_min_max_or bounds tracking
tcp: fix receive window update in tcp_add_backlog()
net: usb: rtl8150: set random MAC address when set_ethernet_addr() fails
mptcp: more DATA FIN fixes
net: mscc: ocelot: warn when encoding an out-of-bounds watermark value
net: mscc: ocelot: divide watermark value by 60 when writing to SYS_ATOP
net: qrtr: ns: Fix the incorrect usage of rcu_read_lock()
rxrpc: Fix server keyring leak
rxrpc: The server keyring isn't network-namespaced
rxrpc: Fix accept on a connection that need securing
rxrpc: Fix some missing _bh annotations on locking conn->state_lock
rxrpc: Downgrade the BUG() for unsupported token type in rxrpc_read()
rxrpc: Fix rxkad token xdr encoding
...

+211 -349
+2
drivers/net/dsa/ocelot/felix_vsc9959.c
··· 1171 1171 */ 1172 1172 static u16 vsc9959_wm_enc(u16 value) 1173 1173 { 1174 + WARN_ON(value >= 16 * BIT(8)); 1175 + 1174 1176 if (value >= BIT(8)) 1175 1177 return BIT(8) | (value / 16); 1176 1178
+2
drivers/net/dsa/ocelot/seville_vsc9953.c
··· 911 911 */ 912 912 static u16 vsc9953_wm_enc(u16 value) 913 913 { 914 + WARN_ON(value >= 16 * BIT(9)); 915 + 914 916 if (value >= BIT(9)) 915 917 return BIT(9) | (value / 16); 916 918
+6 -6
drivers/net/ethernet/mscc/ocelot.c
··· 1253 1253 struct ocelot_port *ocelot_port = ocelot->ports[port]; 1254 1254 int maxlen = sdu + ETH_HLEN + ETH_FCS_LEN; 1255 1255 int pause_start, pause_stop; 1256 - int atop_wm; 1256 + int atop, atop_tot; 1257 1257 1258 1258 if (port == ocelot->npi) { 1259 1259 maxlen += OCELOT_TAG_LEN; ··· 1274 1274 ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_STOP, 1275 1275 pause_stop); 1276 1276 1277 - /* Tail dropping watermark */ 1278 - atop_wm = (ocelot->shared_queue_sz - 9 * maxlen) / 1277 + /* Tail dropping watermarks */ 1278 + atop_tot = (ocelot->shared_queue_sz - 9 * maxlen) / 1279 1279 OCELOT_BUFFER_CELL_SZ; 1280 - ocelot_write_rix(ocelot, ocelot->ops->wm_enc(9 * maxlen), 1281 - SYS_ATOP, port); 1282 - ocelot_write(ocelot, ocelot->ops->wm_enc(atop_wm), SYS_ATOP_TOT_CFG); 1280 + atop = (9 * maxlen) / OCELOT_BUFFER_CELL_SZ; 1281 + ocelot_write_rix(ocelot, ocelot->ops->wm_enc(atop), SYS_ATOP, port); 1282 + ocelot_write(ocelot, ocelot->ops->wm_enc(atop_tot), SYS_ATOP_TOT_CFG); 1283 1283 } 1284 1284 EXPORT_SYMBOL(ocelot_port_set_maxlen); 1285 1285
+2
drivers/net/ethernet/mscc/ocelot_vsc7514.c
··· 745 745 */ 746 746 static u16 ocelot_wm_enc(u16 value) 747 747 { 748 + WARN_ON(value >= 16 * BIT(8)); 749 + 748 750 if (value >= BIT(8)) 749 751 return BIT(8) | (value / 16); 750 752
+7
drivers/net/ethernet/realtek/r8169_main.c
··· 2058 2058 2059 2059 void r8169_apply_firmware(struct rtl8169_private *tp) 2060 2060 { 2061 + int val; 2062 + 2061 2063 /* TODO: release firmware if rtl_fw_write_firmware signals failure. */ 2062 2064 if (tp->rtl_fw) { 2063 2065 rtl_fw_write_firmware(tp, tp->rtl_fw); 2064 2066 /* At least one firmware doesn't reset tp->ocp_base. */ 2065 2067 tp->ocp_base = OCP_STD_PHY_BASE; 2068 + 2069 + /* PHY soft reset may still be in progress */ 2070 + phy_read_poll_timeout(tp->phydev, MII_BMCR, val, 2071 + !(val & BMCR_RESET), 2072 + 50000, 600000, true); 2066 2073 } 2067 2074 } 2068 2075
+3 -1
drivers/net/macsec.c
··· 1077 1077 struct macsec_rx_sa *rx_sa; 1078 1078 struct macsec_rxh_data *rxd; 1079 1079 struct macsec_dev *macsec; 1080 + unsigned int len; 1080 1081 sci_t sci; 1081 1082 u32 hdr_pn; 1082 1083 bool cbit; ··· 1233 1232 macsec_rxsc_put(rx_sc); 1234 1233 1235 1234 skb_orphan(skb); 1235 + len = skb->len; 1236 1236 ret = gro_cells_receive(&macsec->gro_cells, skb); 1237 1237 if (ret == NET_RX_SUCCESS) 1238 - count_rx(dev, skb->len); 1238 + count_rx(dev, len); 1239 1239 else 1240 1240 macsec->secy.netdev->stats.rx_dropped++; 1241 1241
+1
drivers/net/usb/qmi_wwan.c
··· 1375 1375 {QMI_QUIRK_SET_DTR(0x2cb7, 0x0104, 4)}, /* Fibocom NL678 series */ 1376 1376 {QMI_FIXED_INTF(0x0489, 0xe0b4, 0)}, /* Foxconn T77W968 LTE */ 1377 1377 {QMI_FIXED_INTF(0x0489, 0xe0b5, 0)}, /* Foxconn T77W968 LTE with eSIM support*/ 1378 + {QMI_FIXED_INTF(0x2692, 0x9025, 4)}, /* Cellient MPL200 (rebranded Qualcomm 05c6:9025) */ 1378 1379 1379 1380 /* 4. Gobi 1000 devices */ 1380 1381 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
+12 -4
drivers/net/usb/rtl8150.c
··· 274 274 return 1; 275 275 } 276 276 277 - static inline void set_ethernet_addr(rtl8150_t * dev) 277 + static void set_ethernet_addr(rtl8150_t *dev) 278 278 { 279 - u8 node_id[6]; 279 + u8 node_id[ETH_ALEN]; 280 + int ret; 280 281 281 - get_registers(dev, IDR, sizeof(node_id), node_id); 282 - memcpy(dev->netdev->dev_addr, node_id, sizeof(node_id)); 282 + ret = get_registers(dev, IDR, sizeof(node_id), node_id); 283 + 284 + if (ret == sizeof(node_id)) { 285 + ether_addr_copy(dev->netdev->dev_addr, node_id); 286 + } else { 287 + eth_hw_addr_random(dev->netdev); 288 + netdev_notice(dev->netdev, "Assigned a random MAC address: %pM\n", 289 + dev->netdev->dev_addr); 290 + } 283 291 } 284 292 285 293 static int rtl8150_set_mac_address(struct net_device *netdev, void *p)
+1 -1
include/uapi/linux/rxrpc.h
··· 51 51 RXRPC_BUSY = 6, /* -r: server busy received [terminal] */ 52 52 RXRPC_LOCAL_ERROR = 7, /* -r: local error generated [terminal] */ 53 53 RXRPC_NEW_CALL = 8, /* -r: [Service] new incoming call notification */ 54 - RXRPC_ACCEPT = 9, /* s-: [Service] accept request */ 55 54 RXRPC_EXCLUSIVE_CALL = 10, /* s-: Call should be on exclusive connection */ 56 55 RXRPC_UPGRADE_SERVICE = 11, /* s-: Request service upgrade for client call */ 57 56 RXRPC_TX_LENGTH = 12, /* s-: Total length of Tx data */ 58 57 RXRPC_SET_CALL_TIMEOUT = 13, /* s-: Set one or more call timeouts */ 58 + RXRPC_CHARGE_ACCEPT = 14, /* s-: Charge the accept pool with a user call ID */ 59 59 RXRPC__SUPPORTED 60 60 }; 61 61
+4 -4
kernel/bpf/verifier.c
··· 5667 5667 bool src_known = tnum_subreg_is_const(src_reg->var_off); 5668 5668 bool dst_known = tnum_subreg_is_const(dst_reg->var_off); 5669 5669 struct tnum var32_off = tnum_subreg(dst_reg->var_off); 5670 - s32 smin_val = src_reg->smin_value; 5671 - u32 umin_val = src_reg->umin_value; 5670 + s32 smin_val = src_reg->s32_min_value; 5671 + u32 umin_val = src_reg->u32_min_value; 5672 5672 5673 5673 /* Assuming scalar64_min_max_or will be called so it is safe 5674 5674 * to skip updating register for known case. ··· 5691 5691 /* ORing two positives gives a positive, so safe to 5692 5692 * cast result into s64. 5693 5693 */ 5694 - dst_reg->s32_min_value = dst_reg->umin_value; 5695 - dst_reg->s32_max_value = dst_reg->umax_value; 5694 + dst_reg->s32_min_value = dst_reg->u32_min_value; 5695 + dst_reg->s32_max_value = dst_reg->u32_max_value; 5696 5696 } 5697 5697 } 5698 5698
+11 -15
net/bridge/br_netlink.c
··· 380 380 u32 filter_mask, const struct net_device *dev) 381 381 { 382 382 u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN; 383 + struct nlattr *af = NULL; 383 384 struct net_bridge *br; 384 385 struct ifinfomsg *hdr; 385 386 struct nlmsghdr *nlh; ··· 424 423 nla_nest_end(skb, nest); 425 424 } 426 425 426 + if (filter_mask & (RTEXT_FILTER_BRVLAN | 427 + RTEXT_FILTER_BRVLAN_COMPRESSED | 428 + RTEXT_FILTER_MRP)) { 429 + af = nla_nest_start_noflag(skb, IFLA_AF_SPEC); 430 + if (!af) 431 + goto nla_put_failure; 432 + } 433 + 427 434 /* Check if the VID information is requested */ 428 435 if ((filter_mask & RTEXT_FILTER_BRVLAN) || 429 436 (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED)) { 430 437 struct net_bridge_vlan_group *vg; 431 - struct nlattr *af; 432 438 int err; 433 439 434 440 /* RCU needed because of the VLAN locking rules (rcu || rtnl) */ ··· 449 441 rcu_read_unlock(); 450 442 goto done; 451 443 } 452 - af = nla_nest_start_noflag(skb, IFLA_AF_SPEC); 453 - if (!af) { 454 - rcu_read_unlock(); 455 - goto nla_put_failure; 456 - } 457 444 if (filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED) 458 445 err = br_fill_ifvlaninfo_compressed(skb, vg); 459 446 else ··· 459 456 rcu_read_unlock(); 460 457 if (err) 461 458 goto nla_put_failure; 462 - 463 - nla_nest_end(skb, af); 464 459 } 465 460 466 461 if (filter_mask & RTEXT_FILTER_MRP) { 467 - struct nlattr *af; 468 462 int err; 469 463 470 464 if (!br_mrp_enabled(br) || port) 471 465 goto done; 472 - 473 - af = nla_nest_start_noflag(skb, IFLA_AF_SPEC); 474 - if (!af) 475 - goto nla_put_failure; 476 466 477 467 rcu_read_lock(); 478 468 err = br_mrp_fill_info(skb, br); ··· 473 477 474 478 if (err) 475 479 goto nla_put_failure; 476 - 477 - nla_nest_end(skb, af); 478 480 } 479 481 480 482 done: 483 + if (af) 484 + nla_nest_end(skb, af); 481 485 nlmsg_end(skb, nlh); 482 486 return 0; 483 487
+6
net/core/filter.c
··· 9558 9558 9559 9559 BPF_CALL_1(bpf_skc_to_tcp_timewait_sock, struct sock *, sk) 9560 9560 { 9561 + /* BTF types for tcp_timewait_sock and inet_timewait_sock are not 9562 + * generated if CONFIG_INET=n. Trigger an explicit generation here. 9563 + */ 9564 + BTF_TYPE_EMIT(struct inet_timewait_sock); 9565 + BTF_TYPE_EMIT(struct tcp_timewait_sock); 9566 + 9561 9567 #ifdef CONFIG_INET 9562 9568 if (sk && sk->sk_prot == &tcp_prot && sk->sk_state == TCP_TIME_WAIT) 9563 9569 return (unsigned long)sk;
+3 -3
net/ipv4/tcp_ipv4.c
··· 1788 1788 1789 1789 __skb_pull(skb, hdrlen); 1790 1790 if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) { 1791 - thtail->window = th->window; 1792 - 1793 1791 TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq; 1794 1792 1795 - if (after(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq)) 1793 + if (likely(!before(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))) { 1796 1794 TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq; 1795 + thtail->window = th->window; 1796 + } 1797 1797 1798 1798 /* We have to update both TCP_SKB_CB(tail)->tcp_flags and 1799 1799 * thtail->fin, so that the fast path in tcp_rcv_established()
+5 -5
net/mptcp/options.c
··· 451 451 static void mptcp_write_data_fin(struct mptcp_subflow_context *subflow, 452 452 struct sk_buff *skb, struct mptcp_ext *ext) 453 453 { 454 - u64 data_fin_tx_seq = READ_ONCE(mptcp_sk(subflow->conn)->write_seq); 454 + /* The write_seq value has already been incremented, so the actual 455 + * sequence number for the DATA_FIN is one less. 456 + */ 457 + u64 data_fin_tx_seq = READ_ONCE(mptcp_sk(subflow->conn)->write_seq) - 1; 455 458 456 459 if (!ext->use_map || !skb->len) { 457 460 /* RFC6824 requires a DSS mapping with specific values ··· 463 460 ext->data_fin = 1; 464 461 ext->use_map = 1; 465 462 ext->dsn64 = 1; 466 - /* The write_seq value has already been incremented, so 467 - * the actual sequence number for the DATA_FIN is one less. 468 - */ 469 - ext->data_seq = data_fin_tx_seq - 1; 463 + ext->data_seq = data_fin_tx_seq; 470 464 ext->subflow_seq = 0; 471 465 ext->data_len = 1; 472 466 } else if (ext->data_seq + ext->data_len == data_fin_tx_seq) {
+1 -1
net/mptcp/subflow.c
··· 749 749 return MAPPING_DATA_FIN; 750 750 } 751 751 } else { 752 - u64 data_fin_seq = mpext->data_seq + data_len; 752 + u64 data_fin_seq = mpext->data_seq + data_len - 1; 753 753 754 754 /* If mpext->data_seq is a 32-bit value, data_fin_seq 755 755 * must also be limited to 32 bits.
+12 -8
net/openvswitch/conntrack.c
··· 905 905 } 906 906 err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, maniptype); 907 907 908 - if (err == NF_ACCEPT && 909 - ct->status & IPS_SRC_NAT && ct->status & IPS_DST_NAT) { 910 - if (maniptype == NF_NAT_MANIP_SRC) 911 - maniptype = NF_NAT_MANIP_DST; 912 - else 913 - maniptype = NF_NAT_MANIP_SRC; 908 + if (err == NF_ACCEPT && ct->status & IPS_DST_NAT) { 909 + if (ct->status & IPS_SRC_NAT) { 910 + if (maniptype == NF_NAT_MANIP_SRC) 911 + maniptype = NF_NAT_MANIP_DST; 912 + else 913 + maniptype = NF_NAT_MANIP_SRC; 914 914 915 - err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, 916 - maniptype); 915 + err = ovs_ct_nat_execute(skb, ct, ctinfo, &info->range, 916 + maniptype); 917 + } else if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) { 918 + err = ovs_ct_nat_execute(skb, ct, ctinfo, NULL, 919 + NF_NAT_MANIP_SRC); 920 + } 917 921 } 918 922 919 923 /* Mark NAT done if successful and update the flow key. */
+64 -12
net/qrtr/ns.c
··· 193 193 struct qrtr_server *srv; 194 194 struct qrtr_node *node; 195 195 void __rcu **slot; 196 - int ret = 0; 196 + int ret; 197 197 198 198 node = node_get(qrtr_ns.local_node); 199 199 if (!node) ··· 203 203 /* Announce the list of servers registered in this node */ 204 204 radix_tree_for_each_slot(slot, &node->servers, &iter, 0) { 205 205 srv = radix_tree_deref_slot(slot); 206 + if (!srv) 207 + continue; 208 + if (radix_tree_deref_retry(srv)) { 209 + slot = radix_tree_iter_retry(&iter); 210 + continue; 211 + } 212 + slot = radix_tree_iter_resume(slot, &iter); 213 + rcu_read_unlock(); 206 214 207 215 ret = service_announce_new(sq, srv); 208 216 if (ret < 0) { 209 217 pr_err("failed to announce new service\n"); 210 - goto err_out; 218 + return ret; 211 219 } 220 + 221 + rcu_read_lock(); 212 222 } 213 223 214 - err_out: 215 224 rcu_read_unlock(); 216 225 217 - return ret; 226 + return 0; 218 227 } 219 228 220 229 static struct qrtr_server *server_add(unsigned int service, ··· 348 339 struct qrtr_node *node; 349 340 void __rcu **slot; 350 341 struct kvec iv; 351 - int ret = 0; 342 + int ret; 352 343 353 344 iv.iov_base = &pkt; 354 345 iv.iov_len = sizeof(pkt); ··· 361 352 /* Advertise removal of this client to all servers of remote node */ 362 353 radix_tree_for_each_slot(slot, &node->servers, &iter, 0) { 363 354 srv = radix_tree_deref_slot(slot); 355 + if (!srv) 356 + continue; 357 + if (radix_tree_deref_retry(srv)) { 358 + slot = radix_tree_iter_retry(&iter); 359 + continue; 360 + } 361 + slot = radix_tree_iter_resume(slot, &iter); 362 + rcu_read_unlock(); 364 363 server_del(node, srv->port); 364 + rcu_read_lock(); 365 365 } 366 366 rcu_read_unlock(); 367 367 ··· 386 368 rcu_read_lock(); 387 369 radix_tree_for_each_slot(slot, &local_node->servers, &iter, 0) { 388 370 srv = radix_tree_deref_slot(slot); 371 + if (!srv) 372 + continue; 373 + if (radix_tree_deref_retry(srv)) { 374 + slot = radix_tree_iter_retry(&iter); 375 + continue; 376 + } 377 + slot = radix_tree_iter_resume(slot, &iter); 378 + rcu_read_unlock(); 389 379 390 380 sq.sq_family = AF_QIPCRTR; 391 381 sq.sq_node = srv->node; ··· 405 379 ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt)); 406 380 if (ret < 0) { 407 381 pr_err("failed to send bye cmd\n"); 408 - goto err_out; 382 + return ret; 409 383 } 384 + rcu_read_lock(); 410 385 } 411 386 412 - err_out: 413 387 rcu_read_unlock(); 414 388 415 - return ret; 389 + return 0; 416 390 } 417 391 418 392 static int ctrl_cmd_del_client(struct sockaddr_qrtr *from, ··· 430 404 struct list_head *li; 431 405 void __rcu **slot; 432 406 struct kvec iv; 433 - int ret = 0; 407 + int ret; 434 408 435 409 iv.iov_base = &pkt; 436 410 iv.iov_len = sizeof(pkt); ··· 473 447 rcu_read_lock(); 474 448 radix_tree_for_each_slot(slot, &local_node->servers, &iter, 0) { 475 449 srv = radix_tree_deref_slot(slot); 450 + if (!srv) 451 + continue; 452 + if (radix_tree_deref_retry(srv)) { 453 + slot = radix_tree_iter_retry(&iter); 454 + continue; 455 + } 456 + slot = radix_tree_iter_resume(slot, &iter); 457 + rcu_read_unlock(); 476 458 477 459 sq.sq_family = AF_QIPCRTR; 478 460 sq.sq_node = srv->node; ··· 492 458 ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt)); 493 459 if (ret < 0) { 494 460 pr_err("failed to send del client cmd\n"); 495 - goto err_out; 461 + return ret; 496 462 } 463 + rcu_read_lock(); 497 464 } 498 465 499 - err_out: 500 466 rcu_read_unlock(); 501 467 502 - return ret; 468 + return 0; 503 469 } 504 470 505 471 static int ctrl_cmd_new_server(struct sockaddr_qrtr *from, ··· 605 571 rcu_read_lock(); 606 572 radix_tree_for_each_slot(node_slot, &nodes, &node_iter, 0) { 607 573 node = radix_tree_deref_slot(node_slot); 574 + if (!node) 575 + continue; 576 + if (radix_tree_deref_retry(node)) { 577 + node_slot = radix_tree_iter_retry(&node_iter); 578 + continue; 579 + } 580 + node_slot = radix_tree_iter_resume(node_slot, &node_iter); 608 581 609 582 radix_tree_for_each_slot(srv_slot, &node->servers, 610 583 &srv_iter, 0) { 611 584 struct qrtr_server *srv; 612 585 613 586 srv = radix_tree_deref_slot(srv_slot); 587 + if (!srv) 588 + continue; 589 + if (radix_tree_deref_retry(srv)) { 590 + srv_slot = radix_tree_iter_retry(&srv_iter); 591 + continue; 592 + } 593 + 614 594 if (!server_match(srv, &filter)) 615 595 continue; 616 596 597 + srv_slot = radix_tree_iter_resume(srv_slot, &srv_iter); 598 + 599 + rcu_read_unlock(); 617 600 lookup_notify(from, srv, true); 601 + rcu_read_lock(); 618 602 } 619 603 } 620 604 rcu_read_unlock();
+2 -5
net/rxrpc/ar-internal.h
··· 518 518 RXRPC_CALL_CLIENT_RECV_REPLY, /* - client receiving reply phase */ 519 519 RXRPC_CALL_SERVER_PREALLOC, /* - service preallocation */ 520 520 RXRPC_CALL_SERVER_SECURING, /* - server securing request connection */ 521 - RXRPC_CALL_SERVER_ACCEPTING, /* - server accepting request */ 522 521 RXRPC_CALL_SERVER_RECV_REQUEST, /* - server receiving request */ 523 522 RXRPC_CALL_SERVER_ACK_REQUEST, /* - server pending ACK of request */ 524 523 RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */ ··· 713 714 enum rxrpc_command { 714 715 RXRPC_CMD_SEND_DATA, /* send data message */ 715 716 RXRPC_CMD_SEND_ABORT, /* request abort generation */ 716 - RXRPC_CMD_ACCEPT, /* [server] accept incoming call */ 717 717 RXRPC_CMD_REJECT_BUSY, /* [server] reject a call as busy */ 718 + RXRPC_CMD_CHARGE_ACCEPT, /* [server] charge accept preallocation */ 718 719 }; 719 720 720 721 struct rxrpc_call_params { ··· 754 755 struct rxrpc_sock *, 755 756 struct sk_buff *); 756 757 void rxrpc_accept_incoming_calls(struct rxrpc_local *); 757 - struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *, unsigned long, 758 - rxrpc_notify_rx_t); 759 - int rxrpc_reject_call(struct rxrpc_sock *); 758 + int rxrpc_user_charge_accept(struct rxrpc_sock *, unsigned long); 760 759 761 760 /* 762 761 * call_event.c
+38 -225
net/rxrpc/call_accept.c
··· 39 39 unsigned int debug_id) 40 40 { 41 41 const void *here = __builtin_return_address(0); 42 - struct rxrpc_call *call; 42 + struct rxrpc_call *call, *xcall; 43 43 struct rxrpc_net *rxnet = rxrpc_net(sock_net(&rx->sk)); 44 + struct rb_node *parent, **pp; 44 45 int max, tmp; 45 46 unsigned int size = RXRPC_BACKLOG_MAX; 46 47 unsigned int head, tail, call_head, call_tail; ··· 95 94 } 96 95 97 96 /* Now it gets complicated, because calls get registered with the 98 - * socket here, particularly if a user ID is preassigned by the user. 97 + * socket here, with a user ID preassigned by the user. 99 98 */ 100 99 call = rxrpc_alloc_call(rx, gfp, debug_id); 101 100 if (!call) ··· 108 107 here, (const void *)user_call_ID); 109 108 110 109 write_lock(&rx->call_lock); 110 + 111 + /* Check the user ID isn't already in use */ 112 + pp = &rx->calls.rb_node; 113 + parent = NULL; 114 + while (*pp) { 115 + parent = *pp; 116 + xcall = rb_entry(parent, struct rxrpc_call, sock_node); 117 + if (user_call_ID < xcall->user_call_ID) 118 + pp = &(*pp)->rb_left; 119 + else if (user_call_ID > xcall->user_call_ID) 120 + pp = &(*pp)->rb_right; 121 + else 122 + goto id_in_use; 123 + } 124 + 125 + call->user_call_ID = user_call_ID; 126 + call->notify_rx = notify_rx; 111 127 if (user_attach_call) { 112 - struct rxrpc_call *xcall; 113 - struct rb_node *parent, **pp; 114 - 115 - /* Check the user ID isn't already in use */ 116 - pp = &rx->calls.rb_node; 117 - parent = NULL; 118 - while (*pp) { 119 - parent = *pp; 120 - xcall = rb_entry(parent, struct rxrpc_call, sock_node); 121 - if (user_call_ID < xcall->user_call_ID) 122 - pp = &(*pp)->rb_left; 123 - else if (user_call_ID > xcall->user_call_ID) 124 - pp = &(*pp)->rb_right; 125 - else 126 - goto id_in_use; 127 - } 128 - 129 - call->user_call_ID = user_call_ID; 130 - call->notify_rx = notify_rx; 131 128 rxrpc_get_call(call, rxrpc_call_got_kernel); 132 129 user_attach_call(call, user_call_ID); 133 - rxrpc_get_call(call, rxrpc_call_got_userid); 134 - rb_link_node(&call->sock_node, parent, pp); 135 - rb_insert_color(&call->sock_node, &rx->calls); 136 - set_bit(RXRPC_CALL_HAS_USERID, &call->flags); 137 130 } 131 + 132 + rxrpc_get_call(call, rxrpc_call_got_userid); 133 + rb_link_node(&call->sock_node, parent, pp); 134 + rb_insert_color(&call->sock_node, &rx->calls); 135 + set_bit(RXRPC_CALL_HAS_USERID, &call->flags); 138 136 139 137 list_add(&call->sock_link, &rx->sock_calls); 140 138 ··· 157 157 } 158 158 159 159 /* 160 - * Preallocate sufficient service connections, calls and peers to cover the 161 - * entire backlog of a socket. When a new call comes in, if we don't have 162 - * sufficient of each available, the call gets rejected as busy or ignored. 163 - * 164 - * The backlog is replenished when a connection is accepted or rejected. 160 + * Allocate the preallocation buffers for incoming service calls. These must 161 + * be charged manually. 165 162 */ 166 163 int rxrpc_service_prealloc(struct rxrpc_sock *rx, gfp_t gfp) 167 164 { ··· 170 173 return -ENOMEM; 171 174 rx->backlog = b; 172 175 } 173 - 174 - if (rx->discard_new_call) 175 - return 0; 176 - 177 - while (rxrpc_service_prealloc_one(rx, b, NULL, NULL, 0, gfp, 178 - atomic_inc_return(&rxrpc_debug_id)) == 0) 179 - ; 180 176 181 177 return 0; 182 178 } ··· 323 333 rxrpc_see_call(call); 324 334 call->conn = conn; 325 335 call->security = conn->security; 336 + call->security_ix = conn->security_ix; 326 337 call->peer = rxrpc_get_peer(conn->params.peer); 327 338 call->cong_cwnd = call->peer->cong_cwnd; 328 339 return call; ··· 393 402 394 403 if (rx->notify_new_call) 395 404 rx->notify_new_call(&rx->sk, call, call->user_call_ID); 396 - else 397 - sk_acceptq_added(&rx->sk); 398 405 399 406 spin_lock(&conn->state_lock); 400 407 switch (conn->state) { ··· 404 415 405 416 case RXRPC_CONN_SERVICE: 406 417 write_lock(&call->state_lock); 407 - if (call->state < RXRPC_CALL_COMPLETE) { 408 - if (rx->discard_new_call) 409 - call->state = RXRPC_CALL_SERVER_RECV_REQUEST; 410 - else 411 - call->state = RXRPC_CALL_SERVER_ACCEPTING; 412 - } 418 + if (call->state < RXRPC_CALL_COMPLETE) 419 + call->state = RXRPC_CALL_SERVER_RECV_REQUEST; 413 420 write_unlock(&call->state_lock); 414 421 break; 415 422 ··· 425 440 426 441 rxrpc_send_ping(call, skb); 427 442 428 - if (call->state == RXRPC_CALL_SERVER_ACCEPTING) 429 - rxrpc_notify_socket(call); 430 - 431 443 /* We have to discard the prealloc queue's ref here and rely on a 432 444 * combination of the RCU read lock and refs held either by the socket 433 445 * (recvmsg queue, to-be-accepted queue or user ID tree) or the kernel ··· 442 460 } 443 461 444 462 /* 445 - * handle acceptance of a call by userspace 446 - * - assign the user call ID to the call at the front of the queue 447 - * - called with the socket locked. 463 + * Charge up socket with preallocated calls, attaching user call IDs. 448 464 */ 449 - struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx, 450 - unsigned long user_call_ID, 451 - rxrpc_notify_rx_t notify_rx) 452 - __releases(&rx->sk.sk_lock.slock) 453 - __acquires(call->user_mutex) 465 + int rxrpc_user_charge_accept(struct rxrpc_sock *rx, unsigned long user_call_ID) 454 466 { 455 - struct rxrpc_call *call; 456 - struct rb_node *parent, **pp; 457 - int ret; 467 + struct rxrpc_backlog *b = rx->backlog; 458 468 459 - _enter(",%lx", user_call_ID); 469 + if (rx->sk.sk_state == RXRPC_CLOSE) 470 + return -ESHUTDOWN; 460 471 461 - ASSERT(!irqs_disabled()); 462 - 463 - write_lock(&rx->call_lock); 464 - 465 - if (list_empty(&rx->to_be_accepted)) { 466 - write_unlock(&rx->call_lock); 467 - release_sock(&rx->sk); 468 - kleave(" = -ENODATA [empty]"); 469 - return ERR_PTR(-ENODATA); 470 - } 471 - 472 - /* check the user ID isn't already in use */ 473 - pp = &rx->calls.rb_node; 474 - parent = NULL; 475 - while (*pp) { 476 - parent = *pp; 477 - call = rb_entry(parent, struct rxrpc_call, sock_node); 478 - 479 - if (user_call_ID < call->user_call_ID) 480 - pp = &(*pp)->rb_left; 481 - else if (user_call_ID > call->user_call_ID) 482 - pp = &(*pp)->rb_right; 483 - else 484 - goto id_in_use; 485 - } 486 - 487 - /* Dequeue the first call and check it's still valid. We gain 488 - * responsibility for the queue's reference. 489 - */ 490 - call = list_entry(rx->to_be_accepted.next, 491 - struct rxrpc_call, accept_link); 492 - write_unlock(&rx->call_lock); 493 - 494 - /* We need to gain the mutex from the interrupt handler without 495 - * upsetting lockdep, so we have to release it there and take it here. 496 - * We are, however, still holding the socket lock, so other accepts 497 - * must wait for us and no one can add the user ID behind our backs. 498 - */ 499 - if (mutex_lock_interruptible(&call->user_mutex) < 0) { 500 - release_sock(&rx->sk); 501 - kleave(" = -ERESTARTSYS"); 502 - return ERR_PTR(-ERESTARTSYS); 503 - } 504 - 505 - write_lock(&rx->call_lock); 506 - list_del_init(&call->accept_link); 507 - sk_acceptq_removed(&rx->sk); 508 - rxrpc_see_call(call); 509 - 510 - /* Find the user ID insertion point. */ 511 - pp = &rx->calls.rb_node; 512 - parent = NULL; 513 - while (*pp) { 514 - parent = *pp; 515 - call = rb_entry(parent, struct rxrpc_call, sock_node); 516 - 517 - if (user_call_ID < call->user_call_ID) 518 - pp = &(*pp)->rb_left; 519 - else if (user_call_ID > call->user_call_ID) 520 - pp = &(*pp)->rb_right; 521 - else 522 - BUG(); 523 - } 524 - 525 - write_lock_bh(&call->state_lock); 526 - switch (call->state) { 527 - case RXRPC_CALL_SERVER_ACCEPTING: 528 - call->state = RXRPC_CALL_SERVER_RECV_REQUEST; 529 - break; 530 - case RXRPC_CALL_COMPLETE: 531 - ret = call->error; 532 - goto out_release; 533 - default: 534 - BUG(); 535 - } 536 - 537 - /* formalise the acceptance */ 538 - call->notify_rx = notify_rx; 539 - call->user_call_ID = user_call_ID; 540 - rxrpc_get_call(call, rxrpc_call_got_userid); 541 - rb_link_node(&call->sock_node, parent, pp); 542 - rb_insert_color(&call->sock_node, &rx->calls); 543 - if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags)) 544 - BUG(); 545 - 546 - write_unlock_bh(&call->state_lock); 547 - write_unlock(&rx->call_lock); 548 - rxrpc_notify_socket(call); 549 - rxrpc_service_prealloc(rx, GFP_KERNEL); 550 - release_sock(&rx->sk); 551 - _leave(" = %p{%d}", call, call->debug_id); 552 - return call; 553 - 554 - out_release: 555 - _debug("release %p", call); 556 - write_unlock_bh(&call->state_lock); 557 - write_unlock(&rx->call_lock); 558 - rxrpc_release_call(rx, call); 559 - rxrpc_put_call(call, rxrpc_call_put); 560 - goto out; 561 - 562 - id_in_use: 563 - ret = -EBADSLT; 564 - write_unlock(&rx->call_lock); 565 - out: 566 - rxrpc_service_prealloc(rx, GFP_KERNEL); 567 - release_sock(&rx->sk); 568 - _leave(" = %d", ret); 569 - return ERR_PTR(ret); 570 - } 571 - 572 - /* 573 - * Handle rejection of a call by userspace 574 - * - reject the call at the front of the queue 575 - */ 576 - int rxrpc_reject_call(struct rxrpc_sock *rx) 577 - { 578 - struct rxrpc_call *call; 579 - bool abort = false; 580 - int ret; 581 - 582 - _enter(""); 583 - 584 - ASSERT(!irqs_disabled()); 585 - 586 - write_lock(&rx->call_lock); 587 - 588 - if (list_empty(&rx->to_be_accepted)) { 589 - write_unlock(&rx->call_lock); 590 - return -ENODATA; 591 - } 592 - 593 - /* Dequeue the first call and check it's still valid. We gain 594 - * responsibility for the queue's reference. 595 - */ 596 - call = list_entry(rx->to_be_accepted.next, 597 - struct rxrpc_call, accept_link); 598 - list_del_init(&call->accept_link); 599 - sk_acceptq_removed(&rx->sk); 600 - rxrpc_see_call(call); 601 - 602 - write_lock_bh(&call->state_lock); 603 - switch (call->state) { 604 - case RXRPC_CALL_SERVER_ACCEPTING: 605 - __rxrpc_abort_call("REJ", call, 1, RX_USER_ABORT, -ECONNABORTED); 606 - abort = true; 607 - fallthrough; 608 - case RXRPC_CALL_COMPLETE: 609 - ret = call->error; 610 - goto out_discard; 611 - default: 612 - BUG(); 613 - } 614 - 615 - out_discard: 616 - write_unlock_bh(&call->state_lock); 617 - write_unlock(&rx->call_lock); 618 - if (abort) { 619 - rxrpc_send_abort_packet(call); 620 - rxrpc_release_call(rx, call); 621 - rxrpc_put_call(call, rxrpc_call_put); 622 - } 623 - rxrpc_service_prealloc(rx, GFP_KERNEL); 624 - _leave(" = %d", ret); 625 - return ret; 472 + return rxrpc_service_prealloc_one(rx, b, NULL, NULL, user_call_ID, 473 + GFP_KERNEL, 474 + atomic_inc_return(&rxrpc_debug_id)); 626 475 } 627 476 628 477 /*
+1 -4
net/rxrpc/call_object.c
··· 23 23 [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl", 24 24 [RXRPC_CALL_SERVER_PREALLOC] = "SvPrealc", 25 25 [RXRPC_CALL_SERVER_SECURING] = "SvSecure", 26 - [RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept", 27 26 [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq", 28 27 [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq", 29 28 [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl", ··· 351 352 call->call_id = sp->hdr.callNumber; 352 353 call->service_id = sp->hdr.serviceId; 353 354 call->cid = sp->hdr.cid; 354 - call->state = RXRPC_CALL_SERVER_ACCEPTING; 355 - if (sp->hdr.securityIndex > 0) 356 - call->state = RXRPC_CALL_SERVER_SECURING; 355 + call->state = RXRPC_CALL_SERVER_SECURING; 357 356 call->cong_tstamp = skb->tstamp; 358 357 359 358 /* Set the channel for this call. We don't get channel_lock as we're
+4 -4
net/rxrpc/conn_event.c
··· 269 269 if (call) { 270 270 write_lock_bh(&call->state_lock); 271 271 if (call->state == RXRPC_CALL_SERVER_SECURING) { 272 - call->state = RXRPC_CALL_SERVER_ACCEPTING; 272 + call->state = RXRPC_CALL_SERVER_RECV_REQUEST; 273 273 rxrpc_notify_socket(call); 274 274 } 275 275 write_unlock_bh(&call->state_lock); ··· 340 340 return ret; 341 341 342 342 spin_lock(&conn->channel_lock); 343 - spin_lock(&conn->state_lock); 343 + spin_lock_bh(&conn->state_lock); 344 344 345 345 if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING) { 346 346 conn->state = RXRPC_CONN_SERVICE; 347 - spin_unlock(&conn->state_lock); 347 + spin_unlock_bh(&conn->state_lock); 348 348 for (loop = 0; loop < RXRPC_MAXCALLS; loop++) 349 349 rxrpc_call_is_secure( 350 350 rcu_dereference_protected( 351 351 conn->channels[loop].call, 352 352 lockdep_is_held(&conn->channel_lock))); 353 353 } else { 354 - spin_unlock(&conn->state_lock); 354 + spin_unlock_bh(&conn->state_lock); 355 355 } 356 356 357 357 spin_unlock(&conn->channel_lock);
+14 -6
net/rxrpc/key.c
··· 903 903 904 904 _enter(""); 905 905 906 - if (optlen <= 0 || optlen > PAGE_SIZE - 1) 906 + if (optlen <= 0 || optlen > PAGE_SIZE - 1 || rx->securities) 907 907 return -EINVAL; 908 908 909 909 description = memdup_sockptr_nul(optval, optlen); ··· 940 940 if (IS_ERR(description)) 941 941 return PTR_ERR(description); 942 942 943 - key = request_key_net(&key_type_keyring, description, sock_net(&rx->sk), NULL); 943 + key = request_key(&key_type_keyring, description, NULL); 944 944 if (IS_ERR(key)) { 945 945 kfree(description); 946 946 _leave(" = %ld", PTR_ERR(key)); ··· 1072 1072 1073 1073 switch (token->security_index) { 1074 1074 case RXRPC_SECURITY_RXKAD: 1075 - toksize += 9 * 4; /* viceid, kvno, key*2 + len, begin, 1075 + toksize += 8 * 4; /* viceid, kvno, key*2, begin, 1076 1076 * end, primary, tktlen */ 1077 1077 toksize += RND(token->kad->ticket_len); 1078 1078 break; ··· 1107 1107 break; 1108 1108 1109 1109 default: /* we have a ticket we can't encode */ 1110 - BUG(); 1110 + pr_err("Unsupported key token type (%u)\n", 1111 + token->security_index); 1111 1112 continue; 1112 1113 } 1113 1114 ··· 1134 1133 do { \ 1135 1134 u32 _l = (l); \ 1136 1135 ENCODE(l); \ 1136 + memcpy(xdr, (s), _l); \ 1137 + if (_l & 3) \ 1138 + memcpy((u8 *)xdr + _l, &zero, 4 - (_l & 3)); \ 1139 + xdr += (_l + 3) >> 2; \ 1140 + } while(0) 1141 + #define ENCODE_BYTES(l, s) \ 1142 + do { \ 1143 + u32 _l = (l); \ 1137 1144 memcpy(xdr, (s), _l); \ 1138 1145 if (_l & 3) \ 1139 1146 memcpy((u8 *)xdr + _l, &zero, 4 - (_l & 3)); \ ··· 1174 1165 case RXRPC_SECURITY_RXKAD: 1175 1166 ENCODE(token->kad->vice_id); 1176 1167 ENCODE(token->kad->kvno); 1177 - ENCODE_DATA(8, token->kad->session_key); 1168 + ENCODE_BYTES(8, token->kad->session_key); 1178 1169 ENCODE(token->kad->start); 1179 1170 ENCODE(token->kad->expiry); 1180 1171 ENCODE(token->kad->primary_flag); ··· 1224 1215 break; 1225 1216 1226 1217 default: 1227 - BUG(); 1228 1218 break; 1229 1219 } 1230 1220
+1 -35
net/rxrpc/recvmsg.c
··· 179 179 } 180 180 181 181 /* 182 - * Pass back notification of a new call. The call is added to the 183 - * to-be-accepted list. This means that the next call to be accepted might not 184 - * be the last call seen awaiting acceptance, but unless we leave this on the 185 - * front of the queue and block all other messages until someone gives us a 186 - * user_ID for it, there's not a lot we can do. 187 - */ 188 - static int rxrpc_recvmsg_new_call(struct rxrpc_sock *rx, 189 - struct rxrpc_call *call, 190 - struct msghdr *msg, int flags) 191 - { 192 - int tmp = 0, ret; 193 - 194 - ret = put_cmsg(msg, SOL_RXRPC, RXRPC_NEW_CALL, 0, &tmp); 195 - 196 - if (ret == 0 && !(flags & MSG_PEEK)) { 197 - _debug("to be accepted"); 198 - write_lock_bh(&rx->recvmsg_lock); 199 - list_del_init(&call->recvmsg_link); 200 - write_unlock_bh(&rx->recvmsg_lock); 201 - 202 - rxrpc_get_call(call, rxrpc_call_got); 203 - write_lock(&rx->call_lock); 204 - list_add_tail(&call->accept_link, &rx->to_be_accepted); 205 - write_unlock(&rx->call_lock); 206 - } 207 - 208 - trace_rxrpc_recvmsg(call, rxrpc_recvmsg_to_be_accepted, 1, 0, 0, ret); 209 - return ret; 210 - } 211 - 212 - /* 213 182 * End the packet reception phase. 214 183 */ 215 184 static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial) ··· 599 630 } 600 631 601 632 switch (READ_ONCE(call->state)) { 602 - case RXRPC_CALL_SERVER_ACCEPTING: 603 - ret = rxrpc_recvmsg_new_call(rx, call, msg, flags); 604 - break; 605 633 case RXRPC_CALL_CLIENT_RECV_REPLY: 606 634 case RXRPC_CALL_SERVER_RECV_REQUEST: 607 635 case RXRPC_CALL_SERVER_ACK_REQUEST: ··· 694 728 call->debug_id, rxrpc_call_states[call->state], 695 729 iov_iter_count(iter), want_more); 696 730 697 - ASSERTCMP(call->state, !=, RXRPC_CALL_SERVER_ACCEPTING); 731 + ASSERTCMP(call->state, !=, RXRPC_CALL_SERVER_SECURING); 698 732 699 733 mutex_lock(&call->user_mutex); 700 734
+5 -10
net/rxrpc/sendmsg.c
··· 530 530 return -EINVAL; 531 531 break; 532 532 533 - case RXRPC_ACCEPT: 533 + case RXRPC_CHARGE_ACCEPT: 534 534 if (p->command != RXRPC_CMD_SEND_DATA) 535 535 return -EINVAL; 536 - p->command = RXRPC_CMD_ACCEPT; 536 + p->command = RXRPC_CMD_CHARGE_ACCEPT; 537 537 if (len != 0) 538 538 return -EINVAL; 539 539 break; ··· 659 659 if (ret < 0) 660 660 goto error_release_sock; 661 661 662 - if (p.command == RXRPC_CMD_ACCEPT) { 662 + if (p.command == RXRPC_CMD_CHARGE_ACCEPT) { 663 663 ret = -EINVAL; 664 664 if (rx->sk.sk_state != RXRPC_SERVER_LISTENING) 665 665 goto error_release_sock; 666 - call = rxrpc_accept_call(rx, p.call.user_call_ID, NULL); 667 - /* The socket is now unlocked. */ 668 - if (IS_ERR(call)) 669 - return PTR_ERR(call); 670 - ret = 0; 671 - goto out_put_unlock; 666 + ret = rxrpc_user_charge_accept(rx, p.call.user_call_ID); 667 + goto error_release_sock; 672 668 } 673 669 674 670 call = rxrpc_find_call_by_user_ID(rx, p.call.user_call_ID); ··· 686 690 case RXRPC_CALL_CLIENT_AWAIT_CONN: 687 691 case RXRPC_CALL_SERVER_PREALLOC: 688 692 case RXRPC_CALL_SERVER_SECURING: 689 - case RXRPC_CALL_SERVER_ACCEPTING: 690 693 rxrpc_put_call(call, rxrpc_call_put); 691 694 ret = -EBUSY; 692 695 goto error_release_sock;
+1
net/sctp/auth.c
··· 494 494 out_err: 495 495 /* Clean up any successful allocations */ 496 496 sctp_auth_destroy_hmacs(ep->auth_hmacs); 497 + ep->auth_hmacs = NULL; 497 498 return -ENOMEM; 498 499 } 499 500
+3
net/wireless/nl80211.c
··· 4172 4172 if (err) 4173 4173 return err; 4174 4174 4175 + if (key.idx < 0) 4176 + return -EINVAL; 4177 + 4175 4178 if (info->attrs[NL80211_ATTR_MAC]) 4176 4179 mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); 4177 4180