Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'net-6.2-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Pull networking fixes from Paolo Abeni:
"Including fixes from rxrpc.

The rxrpc changes are noticeable large: to address a recent regression
has been necessary completing the threaded refactor.

Current release - regressions:

- rxrpc:
- only disconnect calls in the I/O thread
- move client call connection to the I/O thread
- fix incoming call setup race

- eth: mlx5:
- restore pkt rate policing support
- fix memory leak on updating vport counters

Previous releases - regressions:

- gro: take care of DODGY packets

- ipv6: deduct extension header length in rawv6_push_pending_frames

- tipc: fix unexpected link reset due to discovery messages

Previous releases - always broken:

- sched: disallow noqueue for qdisc classes

- eth: ice: fix potential memory leak in ice_gnss_tty_write()

- eth: ixgbe: fix pci device refcount leak

- eth: mlx5:
- fix command stats access after free
- fix macsec possible null dereference when updating MAC security
entity (SecY)"

* tag 'net-6.2-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (64 commits)
r8152: add vendor/device ID pair for Microsoft Devkit
net: stmmac: add aux timestamps fifo clearance wait
bnxt: make sure we return pages to the pool
net: hns3: fix wrong use of rss size during VF rss config
ipv6: raw: Deduct extension header length in rawv6_push_pending_frames
net: lan966x: check for ptp to be enabled in lan966x_ptp_deinit()
net: sched: disallow noqueue for qdisc classes
iavf/iavf_main: actually log ->src mask when talking about it
igc: Fix PPS delta between two synchronized end-points
ixgbe: fix pci device refcount leak
octeontx2-pf: Fix resource leakage in VF driver unbind
selftests/net: l2_tos_ttl_inherit.sh: Ensure environment cleanup on failure.
selftests/net: l2_tos_ttl_inherit.sh: Run tests in their own netns.
selftests/net: l2_tos_ttl_inherit.sh: Set IPv6 addresses with "nodad".
net/mlx5e: Fix macsec possible null dereference when updating MAC security entity (SecY)
net/mlx5e: Fix macsec ssci attribute handling in offload path
net/mlx5: E-switch, Coverity: overlapping copy
net/mlx5e: Don't support encap rules with gbp option
net/mlx5: Fix ptp max frequency adjustment range
net/mlx5e: Fix memory leak on updating vport counters
...

+1952 -1954
+2 -2
Documentation/networking/rxrpc.rst
··· 880 880 881 881 notify_end_rx can be NULL or it can be used to specify a function to be 882 882 called when the call changes state to end the Tx phase. This function is 883 - called with the call-state spinlock held to prevent any reply or final ACK 884 - from being delivered first. 883 + called with a spinlock held to prevent the last DATA packet from being 884 + transmitted until the function returns. 885 885 886 886 (#) Receive data from a call:: 887 887
+2 -2
drivers/net/ethernet/broadcom/bnxt/bnxt.c
··· 993 993 DMA_ATTR_WEAK_ORDERING); 994 994 skb = build_skb(page_address(page), PAGE_SIZE); 995 995 if (!skb) { 996 - __free_page(page); 996 + page_pool_recycle_direct(rxr->page_pool, page); 997 997 return NULL; 998 998 } 999 999 skb_mark_for_recycle(skb); ··· 1031 1031 1032 1032 skb = napi_alloc_skb(&rxr->bnapi->napi, payload); 1033 1033 if (!skb) { 1034 - __free_page(page); 1034 + page_pool_recycle_direct(rxr->page_pool, page); 1035 1035 return NULL; 1036 1036 } 1037 1037
+1 -1
drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c
··· 3130 3130 3131 3131 hclgevf_update_rss_size(handle, new_tqps_num); 3132 3132 3133 - hclge_comm_get_rss_tc_info(cur_rss_size, hdev->hw_tc_map, 3133 + hclge_comm_get_rss_tc_info(kinfo->rss_size, hdev->hw_tc_map, 3134 3134 tc_offset, tc_valid, tc_size); 3135 3135 ret = hclge_comm_set_rss_tc_mode(&hdev->hw.hw, tc_offset, 3136 3136 tc_valid, tc_size);
+1 -1
drivers/net/ethernet/intel/iavf/iavf_main.c
··· 3850 3850 field_flags |= IAVF_CLOUD_FIELD_IIP; 3851 3851 } else { 3852 3852 dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n", 3853 - be32_to_cpu(match.mask->dst)); 3853 + be32_to_cpu(match.mask->src)); 3854 3854 return -EINVAL; 3855 3855 } 3856 3856 }
+15 -9
drivers/net/ethernet/intel/ice/ice_gnss.c
··· 363 363 /* Send the data out to a hardware port */ 364 364 write_buf = kzalloc(sizeof(*write_buf), GFP_KERNEL); 365 365 if (!write_buf) { 366 + kfree(cmd_buf); 366 367 err = -ENOMEM; 367 368 goto exit; 368 369 } ··· 461 460 for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) { 462 461 pf->gnss_tty_port[i] = kzalloc(sizeof(*pf->gnss_tty_port[i]), 463 462 GFP_KERNEL); 463 + if (!pf->gnss_tty_port[i]) 464 + goto err_out; 465 + 464 466 pf->gnss_serial[i] = NULL; 465 467 466 468 tty_port_init(pf->gnss_tty_port[i]); ··· 473 469 err = tty_register_driver(tty_driver); 474 470 if (err) { 475 471 dev_err(dev, "Failed to register TTY driver err=%d\n", err); 476 - 477 - for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) { 478 - tty_port_destroy(pf->gnss_tty_port[i]); 479 - kfree(pf->gnss_tty_port[i]); 480 - } 481 - kfree(ttydrv_name); 482 - tty_driver_kref_put(pf->ice_gnss_tty_driver); 483 - 484 - return NULL; 472 + goto err_out; 485 473 } 486 474 487 475 for (i = 0; i < ICE_GNSS_TTY_MINOR_DEVICES; i++) 488 476 dev_info(dev, "%s%d registered\n", ttydrv_name, i); 489 477 490 478 return tty_driver; 479 + 480 + err_out: 481 + while (i--) { 482 + tty_port_destroy(pf->gnss_tty_port[i]); 483 + kfree(pf->gnss_tty_port[i]); 484 + } 485 + kfree(ttydrv_name); 486 + tty_driver_kref_put(pf->ice_gnss_tty_driver); 487 + 488 + return NULL; 491 489 } 492 490 493 491 /**
+2
drivers/net/ethernet/intel/igc/igc_defines.h
··· 475 475 #define IGC_TSAUXC_EN_TT0 BIT(0) /* Enable target time 0. */ 476 476 #define IGC_TSAUXC_EN_TT1 BIT(1) /* Enable target time 1. */ 477 477 #define IGC_TSAUXC_EN_CLK0 BIT(2) /* Enable Configurable Frequency Clock 0. */ 478 + #define IGC_TSAUXC_ST0 BIT(4) /* Start Clock 0 Toggle on Target Time 0. */ 478 479 #define IGC_TSAUXC_EN_CLK1 BIT(5) /* Enable Configurable Frequency Clock 1. */ 480 + #define IGC_TSAUXC_ST1 BIT(7) /* Start Clock 1 Toggle on Target Time 1. */ 479 481 #define IGC_TSAUXC_EN_TS0 BIT(8) /* Enable hardware timestamp 0. */ 480 482 #define IGC_TSAUXC_AUTT0 BIT(9) /* Auxiliary Timestamp Taken. */ 481 483 #define IGC_TSAUXC_EN_TS1 BIT(10) /* Enable hardware timestamp 0. */
+6 -4
drivers/net/ethernet/intel/igc/igc_ptp.c
··· 322 322 ts = ns_to_timespec64(ns); 323 323 if (rq->perout.index == 1) { 324 324 if (use_freq) { 325 - tsauxc_mask = IGC_TSAUXC_EN_CLK1; 325 + tsauxc_mask = IGC_TSAUXC_EN_CLK1 | IGC_TSAUXC_ST1; 326 326 tsim_mask = 0; 327 327 } else { 328 328 tsauxc_mask = IGC_TSAUXC_EN_TT1; ··· 333 333 freqout = IGC_FREQOUT1; 334 334 } else { 335 335 if (use_freq) { 336 - tsauxc_mask = IGC_TSAUXC_EN_CLK0; 336 + tsauxc_mask = IGC_TSAUXC_EN_CLK0 | IGC_TSAUXC_ST0; 337 337 tsim_mask = 0; 338 338 } else { 339 339 tsauxc_mask = IGC_TSAUXC_EN_TT0; ··· 347 347 tsauxc = rd32(IGC_TSAUXC); 348 348 tsim = rd32(IGC_TSIM); 349 349 if (rq->perout.index == 1) { 350 - tsauxc &= ~(IGC_TSAUXC_EN_TT1 | IGC_TSAUXC_EN_CLK1); 350 + tsauxc &= ~(IGC_TSAUXC_EN_TT1 | IGC_TSAUXC_EN_CLK1 | 351 + IGC_TSAUXC_ST1); 351 352 tsim &= ~IGC_TSICR_TT1; 352 353 } else { 353 - tsauxc &= ~(IGC_TSAUXC_EN_TT0 | IGC_TSAUXC_EN_CLK0); 354 + tsauxc &= ~(IGC_TSAUXC_EN_TT0 | IGC_TSAUXC_EN_CLK0 | 355 + IGC_TSAUXC_ST0); 354 356 tsim &= ~IGC_TSICR_TT0; 355 357 } 356 358 if (on) {
+9 -5
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
··· 855 855 rp_pdev = pci_get_domain_bus_and_slot(0, 0, devfn); 856 856 if (rp_pdev && rp_pdev->subordinate) { 857 857 bus = rp_pdev->subordinate->number; 858 + pci_dev_put(rp_pdev); 858 859 return pci_get_domain_bus_and_slot(0, bus, 0); 859 860 } 860 861 862 + pci_dev_put(rp_pdev); 861 863 return NULL; 862 864 } 863 865 ··· 876 874 struct ixgbe_adapter *adapter = hw->back; 877 875 struct pci_dev *pdev = adapter->pdev; 878 876 struct pci_dev *func0_pdev; 877 + bool has_mii = false; 879 878 880 879 /* For the C3000 family of SoCs (x550em_a) the internal ixgbe devices 881 880 * are always downstream of root ports @ 0000:00:16.0 & 0000:00:17.0 ··· 887 884 func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x16, 0)); 888 885 if (func0_pdev) { 889 886 if (func0_pdev == pdev) 890 - return true; 891 - else 892 - return false; 887 + has_mii = true; 888 + goto out; 893 889 } 894 890 func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x17, 0)); 895 891 if (func0_pdev == pdev) 896 - return true; 892 + has_mii = true; 897 893 898 - return false; 894 + out: 895 + pci_dev_put(func0_pdev); 896 + return has_mii; 899 897 } 900 898 901 899 /**
+2 -2
drivers/net/ethernet/marvell/octeontx2/af/cgx.c
··· 774 774 775 775 cfg = cgx_read(cgx, lmac_id, CGXX_CMRX_CFG); 776 776 if (enable) 777 - cfg |= CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN; 777 + cfg |= DATA_PKT_RX_EN | DATA_PKT_TX_EN; 778 778 else 779 - cfg &= ~(CMR_EN | DATA_PKT_RX_EN | DATA_PKT_TX_EN); 779 + cfg &= ~(DATA_PKT_RX_EN | DATA_PKT_TX_EN); 780 780 cgx_write(cgx, lmac_id, CGXX_CMRX_CFG, cfg); 781 781 return 0; 782 782 }
-1
drivers/net/ethernet/marvell/octeontx2/af/cgx.h
··· 26 26 #define CMR_P2X_SEL_SHIFT 59ULL 27 27 #define CMR_P2X_SEL_NIX0 1ULL 28 28 #define CMR_P2X_SEL_NIX1 2ULL 29 - #define CMR_EN BIT_ULL(55) 30 29 #define DATA_PKT_TX_EN BIT_ULL(53) 31 30 #define DATA_PKT_RX_EN BIT_ULL(54) 32 31 #define CGX_LMAC_TYPE_SHIFT 40
+2
drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c
··· 758 758 if (vf->otx2_wq) 759 759 destroy_workqueue(vf->otx2_wq); 760 760 otx2_ptp_destroy(vf); 761 + otx2_mcam_flow_del(vf); 762 + otx2_shutdown_tc(vf); 761 763 otx2vf_disable_mbox_intr(vf); 762 764 otx2_detach_resources(&vf->mbox); 763 765 if (test_bit(CN10K_LMTST, &vf->hw.cap_flag))
+2 -11
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
··· 2176 2176 return -EINVAL; 2177 2177 } 2178 2178 2179 - cmd->stats = kvcalloc(MLX5_CMD_OP_MAX, sizeof(*cmd->stats), GFP_KERNEL); 2180 - if (!cmd->stats) 2181 - return -ENOMEM; 2182 - 2183 2179 cmd->pool = dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev), size, align, 0); 2184 - if (!cmd->pool) { 2185 - err = -ENOMEM; 2186 - goto dma_pool_err; 2187 - } 2180 + if (!cmd->pool) 2181 + return -ENOMEM; 2188 2182 2189 2183 err = alloc_cmd_page(dev, cmd); 2190 2184 if (err) ··· 2262 2268 2263 2269 err_free_pool: 2264 2270 dma_pool_destroy(cmd->pool); 2265 - dma_pool_err: 2266 - kvfree(cmd->stats); 2267 2271 return err; 2268 2272 } 2269 2273 ··· 2274 2282 destroy_msg_cache(dev); 2275 2283 free_cmd_page(dev, cmd); 2276 2284 dma_pool_destroy(cmd->pool); 2277 - kvfree(cmd->stats); 2278 2285 } 2279 2286 2280 2287 void mlx5_cmd_set_state(struct mlx5_core_dev *dev,
-6
drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c
··· 34 34 return -EOPNOTSUPP; 35 35 } 36 36 37 - if (act->police.rate_pkt_ps) { 38 - NL_SET_ERR_MSG_MOD(extack, 39 - "QoS offload not support packets per second"); 40 - return -EOPNOTSUPP; 41 - } 42 - 43 37 return 0; 44 38 } 45 39
+1
drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c
··· 127 127 attr->counter = act_counter; 128 128 129 129 attr->flags |= MLX5_ATTR_FLAG_NO_IN_PORT; 130 + attr->inner_match_level = MLX5_MATCH_NONE; 130 131 attr->outer_match_level = MLX5_MATCH_NONE; 131 132 attr->chain = 0; 132 133 attr->prio = 0;
+2
drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_vxlan.c
··· 88 88 struct udphdr *udp = (struct udphdr *)(buf); 89 89 struct vxlanhdr *vxh; 90 90 91 + if (tun_key->tun_flags & TUNNEL_VXLAN_OPT) 92 + return -EOPNOTSUPP; 91 93 vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr)); 92 94 *ip_proto = IPPROTO_UDP; 93 95
+9 -10
drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c
··· 62 62 u32 enc_key_id; 63 63 u32 next_pn; 64 64 sci_t sci; 65 + ssci_t ssci; 65 66 salt_t salt; 66 67 67 68 struct rhash_head hash; ··· 359 358 struct mlx5_core_dev *mdev = priv->mdev; 360 359 struct mlx5_macsec_obj_attrs obj_attrs; 361 360 union mlx5e_macsec_rule *macsec_rule; 362 - struct macsec_key *key; 363 361 int err; 364 362 365 363 obj_attrs.next_pn = sa->next_pn; ··· 368 368 obj_attrs.aso_pdn = macsec->aso.pdn; 369 369 obj_attrs.epn_state = sa->epn_state; 370 370 371 - key = (is_tx) ? &ctx->sa.tx_sa->key : &ctx->sa.rx_sa->key; 372 - 373 371 if (sa->epn_state.epn_enabled) { 374 - obj_attrs.ssci = (is_tx) ? cpu_to_be32((__force u32)ctx->sa.tx_sa->ssci) : 375 - cpu_to_be32((__force u32)ctx->sa.rx_sa->ssci); 376 - 377 - memcpy(&obj_attrs.salt, &key->salt, sizeof(key->salt)); 372 + obj_attrs.ssci = cpu_to_be32((__force u32)sa->ssci); 373 + memcpy(&obj_attrs.salt, &sa->salt, sizeof(sa->salt)); 378 374 } 379 375 380 376 obj_attrs.replay_window = ctx->secy->replay_window; ··· 495 499 } 496 500 497 501 static void update_macsec_epn(struct mlx5e_macsec_sa *sa, const struct macsec_key *key, 498 - const pn_t *next_pn_halves) 502 + const pn_t *next_pn_halves, ssci_t ssci) 499 503 { 500 504 struct mlx5e_macsec_epn_state *epn_state = &sa->epn_state; 501 505 506 + sa->ssci = ssci; 502 507 sa->salt = key->salt; 503 508 epn_state->epn_enabled = 1; 504 509 epn_state->epn_msb = next_pn_halves->upper; ··· 547 550 tx_sa->assoc_num = assoc_num; 548 551 549 552 if (secy->xpn) 550 - update_macsec_epn(tx_sa, &ctx_tx_sa->key, &ctx_tx_sa->next_pn_halves); 553 + update_macsec_epn(tx_sa, &ctx_tx_sa->key, &ctx_tx_sa->next_pn_halves, 554 + ctx_tx_sa->ssci); 551 555 552 556 err = mlx5_create_encryption_key(mdev, ctx->sa.key, secy->key_len, 553 557 MLX5_ACCEL_OBJ_MACSEC_KEY, ··· 943 945 rx_sa->fs_id = rx_sc->sc_xarray_element->fs_id; 944 946 945 947 if (ctx->secy->xpn) 946 - update_macsec_epn(rx_sa, &ctx_rx_sa->key, &ctx_rx_sa->next_pn_halves); 948 + update_macsec_epn(rx_sa, &ctx_rx_sa->key, &ctx_rx_sa->next_pn_halves, 949 + ctx_rx_sa->ssci); 947 950 948 951 err = mlx5_create_encryption_key(mdev, ctx->sa.key, ctx->secy->key_len, 949 952 MLX5_ACCEL_OBJ_MACSEC_KEY,
+3
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
··· 4084 4084 struct mlx5e_vlan_table *vlan; 4085 4085 struct mlx5e_params *params; 4086 4086 4087 + if (!netif_device_present(netdev)) 4088 + return features; 4089 + 4087 4090 vlan = mlx5e_fs_get_vlan(priv->fs); 4088 4091 mutex_lock(&priv->state_lock); 4089 4092 params = &priv->channels.params;
+2 -1
drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
··· 191 191 if (err) { 192 192 netdev_warn(priv->netdev, "vport %d error %d reading stats\n", 193 193 rep->vport, err); 194 - return; 194 + goto out; 195 195 } 196 196 197 197 #define MLX5_GET_CTR(p, x) \ ··· 241 241 rep_stats->tx_vport_rdma_multicast_bytes = 242 242 MLX5_GET_CTR(out, received_ib_multicast.octets); 243 243 244 + out: 244 245 kvfree(out); 245 246 } 246 247
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
··· 2419 2419 2420 2420 priv = mlx5i_epriv(netdev); 2421 2421 tstamp = &priv->tstamp; 2422 - stats = rq->stats; 2422 + stats = &priv->channel_stats[rq->ix]->rq; 2423 2423 2424 2424 flags_rqpn = be32_to_cpu(cqe->flags_rqpn); 2425 2425 g = (flags_rqpn >> 28) & 3;
+3 -2
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
··· 1301 1301 1302 1302 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { 1303 1303 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr); 1304 - mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts); 1305 1304 if (err) 1306 1305 return err; 1307 1306 } ··· 1358 1359 } 1359 1360 mutex_unlock(&tc->t_lock); 1360 1361 1361 - if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) 1362 + if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) { 1363 + mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts); 1362 1364 mlx5e_detach_mod_hdr(priv, flow); 1365 + } 1363 1366 1364 1367 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) 1365 1368 mlx5_fc_destroy(priv->mdev, attr->counter);
+1 -5
drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
··· 143 143 if (mlx5_esw_indir_table_decap_vport(attr)) 144 144 vport = mlx5_esw_indir_table_decap_vport(attr); 145 145 146 - if (attr && !attr->chain && esw_attr->int_port) 146 + if (!attr->chain && esw_attr && esw_attr->int_port) 147 147 metadata = 148 148 mlx5e_tc_int_port_get_metadata_for_match(esw_attr->int_port); 149 149 else ··· 4143 4143 } 4144 4144 4145 4145 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); 4146 - memcpy(hca_caps, MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability), 4147 - MLX5_UN_SZ_BYTES(hca_cap_union)); 4148 4146 MLX5_SET(cmd_hca_cap_2, hca_caps, migratable, 1); 4149 4147 4150 4148 err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport->vport, ··· 4234 4236 } 4235 4237 4236 4238 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); 4237 - memcpy(hca_caps, MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability), 4238 - MLX5_UN_SZ_BYTES(hca_cap_union)); 4239 4239 MLX5_SET(cmd_hca_cap, hca_caps, roce, enable); 4240 4240 4241 4241 err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num,
+14 -2
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
··· 90 90 static int mlx5i_set_channels(struct net_device *dev, 91 91 struct ethtool_channels *ch) 92 92 { 93 - struct mlx5e_priv *priv = mlx5i_epriv(dev); 93 + struct mlx5i_priv *ipriv = netdev_priv(dev); 94 + struct mlx5e_priv *epriv = mlx5i_epriv(dev); 94 95 95 - return mlx5e_ethtool_set_channels(priv, ch); 96 + /* rtnl lock protects from race between this ethtool op and sub 97 + * interface ndo_init/uninit. 98 + */ 99 + ASSERT_RTNL(); 100 + if (ipriv->num_sub_interfaces > 0) { 101 + mlx5_core_warn(epriv->mdev, 102 + "can't change number of channels for interfaces with sub interfaces (%u)\n", 103 + ipriv->num_sub_interfaces); 104 + return -EINVAL; 105 + } 106 + 107 + return mlx5e_ethtool_set_channels(epriv, ch); 96 108 } 97 109 98 110 static void mlx5i_get_channels(struct net_device *dev,
+38
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
··· 160 160 stats->tx_dropped = sstats->tx_queue_dropped; 161 161 } 162 162 163 + struct net_device *mlx5i_parent_get(struct net_device *netdev) 164 + { 165 + struct mlx5e_priv *priv = mlx5i_epriv(netdev); 166 + struct mlx5i_priv *ipriv, *parent_ipriv; 167 + struct net_device *parent_dev; 168 + int parent_ifindex; 169 + 170 + ipriv = priv->ppriv; 171 + 172 + parent_ifindex = netdev->netdev_ops->ndo_get_iflink(netdev); 173 + parent_dev = dev_get_by_index(dev_net(netdev), parent_ifindex); 174 + if (!parent_dev) 175 + return NULL; 176 + 177 + parent_ipriv = netdev_priv(parent_dev); 178 + 179 + ASSERT_RTNL(); 180 + parent_ipriv->num_sub_interfaces++; 181 + 182 + ipriv->parent_dev = parent_dev; 183 + 184 + return parent_dev; 185 + } 186 + 187 + void mlx5i_parent_put(struct net_device *netdev) 188 + { 189 + struct mlx5e_priv *priv = mlx5i_epriv(netdev); 190 + struct mlx5i_priv *ipriv, *parent_ipriv; 191 + 192 + ipriv = priv->ppriv; 193 + parent_ipriv = netdev_priv(ipriv->parent_dev); 194 + 195 + ASSERT_RTNL(); 196 + parent_ipriv->num_sub_interfaces--; 197 + 198 + dev_put(ipriv->parent_dev); 199 + } 200 + 163 201 int mlx5i_init_underlay_qp(struct mlx5e_priv *priv) 164 202 { 165 203 struct mlx5_core_dev *mdev = priv->mdev;
+6
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
··· 54 54 struct rdma_netdev rn; /* keep this first */ 55 55 u32 qpn; 56 56 bool sub_interface; 57 + u32 num_sub_interfaces; 57 58 u32 qkey; 58 59 u16 pkey_index; 59 60 struct mlx5i_pkey_qpn_ht *qpn_htbl; 61 + struct net_device *parent_dev; 60 62 char *mlx5e_priv[]; 61 63 }; 62 64 ··· 118 116 void mlx5i_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb, 119 117 struct mlx5_av *av, u32 dqpn, u32 dqkey, bool xmit_more); 120 118 void mlx5i_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); 119 + 120 + /* Reference management for child to parent interfaces. */ 121 + struct net_device *mlx5i_parent_get(struct net_device *netdev); 122 + void mlx5i_parent_put(struct net_device *netdev); 121 123 122 124 #endif /* CONFIG_MLX5_CORE_IPOIB */ 123 125 #endif /* __MLX5E_IPOB_H__ */
+13 -5
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
··· 158 158 struct mlx5e_priv *priv = mlx5i_epriv(dev); 159 159 struct mlx5i_priv *ipriv, *parent_ipriv; 160 160 struct net_device *parent_dev; 161 - int parent_ifindex; 162 161 163 162 ipriv = priv->ppriv; 164 163 165 - /* Get QPN to netdevice hash table from parent */ 166 - parent_ifindex = dev->netdev_ops->ndo_get_iflink(dev); 167 - parent_dev = dev_get_by_index(dev_net(dev), parent_ifindex); 164 + /* Link to parent */ 165 + parent_dev = mlx5i_parent_get(dev); 168 166 if (!parent_dev) { 169 167 mlx5_core_warn(priv->mdev, "failed to get parent device\n"); 170 168 return -EINVAL; 171 169 } 172 170 171 + if (dev->num_rx_queues < parent_dev->real_num_rx_queues) { 172 + mlx5_core_warn(priv->mdev, 173 + "failed to create child device with rx queues [%d] less than parent's [%d]\n", 174 + dev->num_rx_queues, 175 + parent_dev->real_num_rx_queues); 176 + mlx5i_parent_put(dev); 177 + return -EINVAL; 178 + } 179 + 180 + /* Get QPN to netdevice hash table from parent */ 173 181 parent_ipriv = netdev_priv(parent_dev); 174 182 ipriv->qpn_htbl = parent_ipriv->qpn_htbl; 175 - dev_put(parent_dev); 176 183 177 184 return mlx5i_dev_init(dev); 178 185 } ··· 191 184 192 185 static void mlx5i_pkey_dev_cleanup(struct net_device *netdev) 193 186 { 187 + mlx5i_parent_put(netdev); 194 188 return mlx5i_dev_cleanup(netdev); 195 189 } 196 190
+1 -1
drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
··· 681 681 static const struct ptp_clock_info mlx5_ptp_clock_info = { 682 682 .owner = THIS_MODULE, 683 683 .name = "mlx5_ptp", 684 - .max_adj = 100000000, 684 + .max_adj = 50000000, 685 685 .n_alarm = 0, 686 686 .n_ext_ts = 0, 687 687 .n_per_out = 0,
+7 -4
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c
··· 3 3 4 4 #include "dr_types.h" 5 5 6 + #if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN < 2048) 7 + /* don't try to optimize STE allocation if the stack is too constaraining */ 8 + #define DR_RULE_MAX_STES_OPTIMIZED 0 9 + #else 6 10 #define DR_RULE_MAX_STES_OPTIMIZED 5 11 + #endif 7 12 #define DR_RULE_MAX_STE_CHAIN_OPTIMIZED (DR_RULE_MAX_STES_OPTIMIZED + DR_ACTION_MAX_STES) 8 13 9 14 static int dr_rule_append_to_miss_list(struct mlx5dr_domain *dmn, ··· 1223 1218 1224 1219 mlx5dr_domain_nic_unlock(nic_dmn); 1225 1220 1226 - if (unlikely(!hw_ste_arr_is_opt)) 1227 - kfree(hw_ste_arr); 1228 - 1229 - return 0; 1221 + goto out; 1230 1222 1231 1223 free_rule: 1232 1224 dr_rule_clean_rule_members(rule, nic_rule); ··· 1240 1238 free_hw_ste: 1241 1239 mlx5dr_domain_nic_unlock(nic_dmn); 1242 1240 1241 + out: 1243 1242 if (unlikely(!hw_ste_arr_is_opt)) 1244 1243 kfree(hw_ste_arr); 1245 1244
+1 -1
drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
··· 2951 2951 gateway:1, /* routes using the group use a gateway */ 2952 2952 is_resilient:1; 2953 2953 struct list_head list; /* member in nh_res_grp_list */ 2954 - struct mlxsw_sp_nexthop nexthops[0]; 2954 + struct mlxsw_sp_nexthop nexthops[]; 2955 2955 #define nh_rif nexthops[0].rif 2956 2956 }; 2957 2957
+3
drivers/net/ethernet/microchip/lan966x/lan966x_ptp.c
··· 1073 1073 struct lan966x_port *port; 1074 1074 int i; 1075 1075 1076 + if (!lan966x->ptp) 1077 + return; 1078 + 1076 1079 for (i = 0; i < lan966x->num_phys_ports; i++) { 1077 1080 port = lan966x->ports[i]; 1078 1081 if (!port)
-3
drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c
··· 95 95 bool found = false; 96 96 u32 val; 97 97 98 - /* Check if the port keyset selection is enabled */ 99 98 val = lan_rd(lan966x, ANA_VCAP_S2_CFG(port->chip_port)); 100 - if (!ANA_VCAP_S2_CFG_ENA_GET(val)) 101 - return -ENOENT; 102 99 103 100 /* Collect all keysets for the port in a list */ 104 101 if (l3_proto == ETH_P_ALL)
+1 -4
drivers/net/ethernet/realtek/r8169_main.c
··· 1996 1996 1997 1997 /* 8168F family. */ 1998 1998 { 0x7c8, 0x488, RTL_GIGA_MAC_VER_38 }, 1999 - /* It seems this chip version never made it to 2000 - * the wild. Let's disable detection. 2001 - * { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36 }, 2002 - */ 1999 + { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36 }, 2003 2000 { 0x7cf, 0x480, RTL_GIGA_MAC_VER_35 }, 2004 2001 2005 2002 /* 8168E family. */
-26
drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c
··· 90 90 struct mediatek_dwmac_variant { 91 91 int (*dwmac_set_phy_interface)(struct mediatek_dwmac_plat_data *plat); 92 92 int (*dwmac_set_delay)(struct mediatek_dwmac_plat_data *plat); 93 - void (*dwmac_fix_mac_speed)(void *priv, unsigned int speed); 94 93 95 94 /* clock ids to be requested */ 96 95 const char * const *clk_list; ··· 442 443 return 0; 443 444 } 444 445 445 - static void mt8195_fix_mac_speed(void *priv, unsigned int speed) 446 - { 447 - struct mediatek_dwmac_plat_data *priv_plat = priv; 448 - 449 - if ((phy_interface_mode_is_rgmii(priv_plat->phy_mode))) { 450 - /* prefer 2ns fixed delay which is controlled by TXC_PHASE_CTRL, 451 - * when link speed is 1Gbps with RGMII interface, 452 - * Fall back to delay macro circuit for 10/100Mbps link speed. 453 - */ 454 - if (speed == SPEED_1000) 455 - regmap_update_bits(priv_plat->peri_regmap, 456 - MT8195_PERI_ETH_CTRL0, 457 - MT8195_RGMII_TXC_PHASE_CTRL | 458 - MT8195_DLY_GTXC_ENABLE | 459 - MT8195_DLY_GTXC_INV | 460 - MT8195_DLY_GTXC_STAGES, 461 - MT8195_RGMII_TXC_PHASE_CTRL); 462 - else 463 - mt8195_set_delay(priv_plat); 464 - } 465 - } 466 - 467 446 static const struct mediatek_dwmac_variant mt8195_gmac_variant = { 468 447 .dwmac_set_phy_interface = mt8195_set_interface, 469 448 .dwmac_set_delay = mt8195_set_delay, 470 - .dwmac_fix_mac_speed = mt8195_fix_mac_speed, 471 449 .clk_list = mt8195_dwmac_clk_l, 472 450 .num_clks = ARRAY_SIZE(mt8195_dwmac_clk_l), 473 451 .dma_bit_mask = 35, ··· 595 619 plat->bsp_priv = priv_plat; 596 620 plat->init = mediatek_dwmac_init; 597 621 plat->clks_config = mediatek_dwmac_clks_config; 598 - if (priv_plat->variant->dwmac_fix_mac_speed) 599 - plat->fix_mac_speed = priv_plat->variant->dwmac_fix_mac_speed; 600 622 601 623 plat->safety_feat_cfg = devm_kzalloc(&pdev->dev, 602 624 sizeof(*plat->safety_feat_cfg),
+4 -1
drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
··· 210 210 } 211 211 writel(acr_value, ptpaddr + PTP_ACR); 212 212 mutex_unlock(&priv->aux_ts_lock); 213 - ret = 0; 213 + /* wait for auxts fifo clear to finish */ 214 + ret = readl_poll_timeout(ptpaddr + PTP_ACR, acr_value, 215 + !(acr_value & PTP_ACR_ATSFC), 216 + 10, 10000); 214 217 break; 215 218 216 219 default:
+1 -1
drivers/net/ipa/data/ipa_data-v4.7.c
··· 357 357 static const struct ipa_mem_data ipa_mem_data = { 358 358 .local_count = ARRAY_SIZE(ipa_mem_local_data), 359 359 .local = ipa_mem_local_data, 360 - .imem_addr = 0x146a9000, 360 + .imem_addr = 0x146a8000, 361 361 .imem_size = 0x00002000, 362 362 .smem_id = 497, 363 363 .smem_size = 0x00009000,
+6
drivers/net/usb/cdc_ether.c
··· 1008 1008 USB_CDC_PROTO_NONE), 1009 1009 .driver_info = (unsigned long)&wwan_info, 1010 1010 }, { 1011 + /* Cinterion PLS62-W modem by GEMALTO/THALES */ 1012 + USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x005b, USB_CLASS_COMM, 1013 + USB_CDC_SUBCLASS_ETHERNET, 1014 + USB_CDC_PROTO_NONE), 1015 + .driver_info = (unsigned long)&wwan_info, 1016 + }, { 1011 1017 /* Cinterion PLS83/PLS63 modem by GEMALTO/THALES */ 1012 1018 USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0069, USB_CLASS_COMM, 1013 1019 USB_CDC_SUBCLASS_ETHERNET,
+1
drivers/net/usb/r8152.c
··· 9836 9836 REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07ab), 9837 9837 REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x07c6), 9838 9838 REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0927), 9839 + REALTEK_USB_DEVICE(VENDOR_ID_MICROSOFT, 0x0c5e), 9839 9840 REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101), 9840 9841 REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x304f), 9841 9842 REALTEK_USB_DEVICE(VENDOR_ID_LENOVO, 0x3054),
+3 -2
drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c
··· 79 79 /* Apple ARM64 platforms have their own idea of board type, passed in 80 80 * via the device tree. They also have an antenna SKU parameter 81 81 */ 82 - if (!of_property_read_string(np, "brcm,board-type", &prop)) 82 + err = of_property_read_string(np, "brcm,board-type", &prop); 83 + if (!err) 83 84 settings->board_type = prop; 84 85 85 86 if (!of_property_read_string(np, "apple,antenna-sku", &prop)) ··· 88 87 89 88 /* Set board-type to the first string of the machine compatible prop */ 90 89 root = of_find_node_by_path("/"); 91 - if (root && !settings->board_type) { 90 + if (root && err) { 92 91 char *board_type; 93 92 const char *tmp; 94 93
+41 -3
drivers/nfc/pn533/usb.c
··· 153 153 return usb_submit_urb(phy->ack_urb, flags); 154 154 } 155 155 156 + struct pn533_out_arg { 157 + struct pn533_usb_phy *phy; 158 + struct completion done; 159 + }; 160 + 156 161 static int pn533_usb_send_frame(struct pn533 *dev, 157 162 struct sk_buff *out) 158 163 { 159 164 struct pn533_usb_phy *phy = dev->phy; 165 + struct pn533_out_arg arg; 166 + void *cntx; 160 167 int rc; 161 168 162 169 if (phy->priv == NULL) ··· 175 168 print_hex_dump_debug("PN533 TX: ", DUMP_PREFIX_NONE, 16, 1, 176 169 out->data, out->len, false); 177 170 171 + init_completion(&arg.done); 172 + cntx = phy->out_urb->context; 173 + phy->out_urb->context = &arg; 174 + 178 175 rc = usb_submit_urb(phy->out_urb, GFP_KERNEL); 179 176 if (rc) 180 177 return rc; 178 + 179 + wait_for_completion(&arg.done); 180 + phy->out_urb->context = cntx; 181 181 182 182 if (dev->protocol_type == PN533_PROTO_REQ_RESP) { 183 183 /* request for response for sent packet directly */ ··· 422 408 return arg.rc; 423 409 } 424 410 425 - static void pn533_send_complete(struct urb *urb) 411 + static void pn533_out_complete(struct urb *urb) 412 + { 413 + struct pn533_out_arg *arg = urb->context; 414 + struct pn533_usb_phy *phy = arg->phy; 415 + 416 + switch (urb->status) { 417 + case 0: 418 + break; /* success */ 419 + case -ECONNRESET: 420 + case -ENOENT: 421 + dev_dbg(&phy->udev->dev, 422 + "The urb has been stopped (status %d)\n", 423 + urb->status); 424 + break; 425 + case -ESHUTDOWN: 426 + default: 427 + nfc_err(&phy->udev->dev, 428 + "Urb failure (status %d)\n", 429 + urb->status); 430 + } 431 + 432 + complete(&arg->done); 433 + } 434 + 435 + static void pn533_ack_complete(struct urb *urb) 426 436 { 427 437 struct pn533_usb_phy *phy = urb->context; 428 438 ··· 534 496 535 497 usb_fill_bulk_urb(phy->out_urb, phy->udev, 536 498 usb_sndbulkpipe(phy->udev, out_endpoint), 537 - NULL, 0, pn533_send_complete, phy); 499 + NULL, 0, pn533_out_complete, phy); 538 500 usb_fill_bulk_urb(phy->ack_urb, phy->udev, 539 501 usb_sndbulkpipe(phy->udev, out_endpoint), 540 - NULL, 0, pn533_send_complete, phy); 502 + NULL, 0, pn533_ack_complete, phy); 541 503 542 504 switch (id->driver_info) { 543 505 case PN533_DEVICE_STD:
+4 -2
fs/afs/cmservice.c
··· 13 13 #include "internal.h" 14 14 #include "afs_cm.h" 15 15 #include "protocol_yfs.h" 16 + #define RXRPC_TRACE_ONLY_DEFINE_ENUMS 17 + #include <trace/events/rxrpc.h> 16 18 17 19 static int afs_deliver_cb_init_call_back_state(struct afs_call *); 18 20 static int afs_deliver_cb_init_call_back_state3(struct afs_call *); ··· 193 191 * Abort a service call from within an action function. 194 192 */ 195 193 static void afs_abort_service_call(struct afs_call *call, u32 abort_code, int error, 196 - const char *why) 194 + enum rxrpc_abort_reason why) 197 195 { 198 196 rxrpc_kernel_abort_call(call->net->socket, call->rxcall, 199 197 abort_code, error, why); ··· 471 469 if (memcmp(r, &call->net->uuid, sizeof(call->net->uuid)) == 0) 472 470 afs_send_empty_reply(call); 473 471 else 474 - afs_abort_service_call(call, 1, 1, "K-1"); 472 + afs_abort_service_call(call, 1, 1, afs_abort_probeuuid_negative); 475 473 476 474 afs_put_call(call); 477 475 _leave("");
+17 -7
fs/afs/rxrpc.c
··· 13 13 #include "internal.h" 14 14 #include "afs_cm.h" 15 15 #include "protocol_yfs.h" 16 + #define RXRPC_TRACE_ONLY_DEFINE_ENUMS 17 + #include <trace/events/rxrpc.h> 16 18 17 19 struct workqueue_struct *afs_async_calls; 18 20 ··· 399 397 error_do_abort: 400 398 if (ret != -ECONNABORTED) { 401 399 rxrpc_kernel_abort_call(call->net->socket, rxcall, 402 - RX_USER_ABORT, ret, "KSD"); 400 + RX_USER_ABORT, ret, 401 + afs_abort_send_data_error); 403 402 } else { 404 403 len = 0; 405 404 iov_iter_kvec(&msg.msg_iter, ITER_DEST, NULL, 0, 0); ··· 530 527 case -ENOTSUPP: 531 528 abort_code = RXGEN_OPCODE; 532 529 rxrpc_kernel_abort_call(call->net->socket, call->rxcall, 533 - abort_code, ret, "KIV"); 530 + abort_code, ret, 531 + afs_abort_op_not_supported); 534 532 goto local_abort; 535 533 case -EIO: 536 534 pr_err("kAFS: Call %u in bad state %u\n", ··· 546 542 if (state != AFS_CALL_CL_AWAIT_REPLY) 547 543 abort_code = RXGEN_SS_UNMARSHAL; 548 544 rxrpc_kernel_abort_call(call->net->socket, call->rxcall, 549 - abort_code, ret, "KUM"); 545 + abort_code, ret, 546 + afs_abort_unmarshal_error); 550 547 goto local_abort; 551 548 default: 552 549 abort_code = RX_CALL_DEAD; 553 550 rxrpc_kernel_abort_call(call->net->socket, call->rxcall, 554 - abort_code, ret, "KER"); 551 + abort_code, ret, 552 + afs_abort_general_error); 555 553 goto local_abort; 556 554 } 557 555 } ··· 625 619 /* Kill off the call if it's still live. */ 626 620 _debug("call interrupted"); 627 621 if (rxrpc_kernel_abort_call(call->net->socket, call->rxcall, 628 - RX_USER_ABORT, -EINTR, "KWI")) 622 + RX_USER_ABORT, -EINTR, 623 + afs_abort_interrupted)) 629 624 afs_set_call_complete(call, -EINTR, 0); 630 625 } 631 626 } ··· 843 836 case -ENOMEM: 844 837 _debug("oom"); 845 838 rxrpc_kernel_abort_call(net->socket, call->rxcall, 846 - RXGEN_SS_MARSHAL, -ENOMEM, "KOO"); 839 + RXGEN_SS_MARSHAL, -ENOMEM, 840 + afs_abort_oom); 847 841 fallthrough; 848 842 default: 849 843 _leave(" [error]"); ··· 886 878 if (n == -ENOMEM) { 887 879 _debug("oom"); 888 880 rxrpc_kernel_abort_call(net->socket, call->rxcall, 889 - RXGEN_SS_MARSHAL, -ENOMEM, "KOO"); 881 + RXGEN_SS_MARSHAL, -ENOMEM, 882 + afs_abort_oom); 890 883 } 891 884 _leave(" [error]"); 892 885 } ··· 909 900 ret = rxrpc_kernel_recv_data(net->socket, call->rxcall, iter, 910 901 &call->iov_len, want_more, &remote_abort, 911 902 &call->service_id); 903 + trace_afs_receive_data(call, call->iter, want_more, ret); 912 904 if (ret == 0 || ret == -EAGAIN) 913 905 return ret; 914 906
+1 -1
include/linux/mlx5/driver.h
··· 315 315 struct mlx5_cmd_debug dbg; 316 316 struct cmd_msg_cache cache[MLX5_NUM_COMMAND_CACHES]; 317 317 int checksum_disabled; 318 - struct mlx5_cmd_stats *stats; 318 + struct mlx5_cmd_stats stats[MLX5_CMD_OP_MAX]; 319 319 }; 320 320 321 321 struct mlx5_cmd_mailbox {
+2 -1
include/net/af_rxrpc.h
··· 15 15 struct sock; 16 16 struct socket; 17 17 struct rxrpc_call; 18 + enum rxrpc_abort_reason; 18 19 19 20 enum rxrpc_interruptibility { 20 21 RXRPC_INTERRUPTIBLE, /* Call is interruptible */ ··· 56 55 int rxrpc_kernel_recv_data(struct socket *, struct rxrpc_call *, 57 56 struct iov_iter *, size_t *, bool, u32 *, u16 *); 58 57 bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *, 59 - u32, int, const char *); 58 + u32, int, enum rxrpc_abort_reason); 60 59 void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *); 61 60 void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *, 62 61 struct sockaddr_rxrpc *);
+124 -36
include/trace/events/rxrpc.h
··· 16 16 /* 17 17 * Declare tracing information enums and their string mappings for display. 18 18 */ 19 + #define rxrpc_abort_reasons \ 20 + /* AFS errors */ \ 21 + EM(afs_abort_general_error, "afs-error") \ 22 + EM(afs_abort_interrupted, "afs-intr") \ 23 + EM(afs_abort_oom, "afs-oom") \ 24 + EM(afs_abort_op_not_supported, "afs-op-notsupp") \ 25 + EM(afs_abort_probeuuid_negative, "afs-probeuuid-neg") \ 26 + EM(afs_abort_send_data_error, "afs-send-data") \ 27 + EM(afs_abort_unmarshal_error, "afs-unmarshal") \ 28 + /* rxperf errors */ \ 29 + EM(rxperf_abort_general_error, "rxperf-error") \ 30 + EM(rxperf_abort_oom, "rxperf-oom") \ 31 + EM(rxperf_abort_op_not_supported, "rxperf-op-notsupp") \ 32 + EM(rxperf_abort_unmarshal_error, "rxperf-unmarshal") \ 33 + /* RxKAD security errors */ \ 34 + EM(rxkad_abort_1_short_check, "rxkad1-short-check") \ 35 + EM(rxkad_abort_1_short_data, "rxkad1-short-data") \ 36 + EM(rxkad_abort_1_short_encdata, "rxkad1-short-encdata") \ 37 + EM(rxkad_abort_1_short_header, "rxkad1-short-hdr") \ 38 + EM(rxkad_abort_2_short_check, "rxkad2-short-check") \ 39 + EM(rxkad_abort_2_short_data, "rxkad2-short-data") \ 40 + EM(rxkad_abort_2_short_header, "rxkad2-short-hdr") \ 41 + EM(rxkad_abort_2_short_len, "rxkad2-short-len") \ 42 + EM(rxkad_abort_bad_checksum, "rxkad2-bad-cksum") \ 43 + EM(rxkad_abort_chall_key_expired, "rxkad-chall-key-exp") \ 44 + EM(rxkad_abort_chall_level, "rxkad-chall-level") \ 45 + EM(rxkad_abort_chall_no_key, "rxkad-chall-nokey") \ 46 + EM(rxkad_abort_chall_short, "rxkad-chall-short") \ 47 + EM(rxkad_abort_chall_version, "rxkad-chall-version") \ 48 + EM(rxkad_abort_resp_bad_callid, "rxkad-resp-bad-callid") \ 49 + EM(rxkad_abort_resp_bad_checksum, "rxkad-resp-bad-cksum") \ 50 + EM(rxkad_abort_resp_bad_param, "rxkad-resp-bad-param") \ 51 + EM(rxkad_abort_resp_call_ctr, "rxkad-resp-call-ctr") \ 52 + EM(rxkad_abort_resp_call_state, "rxkad-resp-call-state") \ 53 + EM(rxkad_abort_resp_key_expired, "rxkad-resp-key-exp") \ 54 + EM(rxkad_abort_resp_key_rejected, "rxkad-resp-key-rej") \ 55 + EM(rxkad_abort_resp_level, "rxkad-resp-level") \ 56 + EM(rxkad_abort_resp_nokey, "rxkad-resp-nokey") \ 57 + EM(rxkad_abort_resp_ooseq, "rxkad-resp-ooseq") \ 58 + EM(rxkad_abort_resp_short, "rxkad-resp-short") \ 59 + EM(rxkad_abort_resp_short_tkt, "rxkad-resp-short-tkt") \ 60 + EM(rxkad_abort_resp_tkt_aname, "rxkad-resp-tk-aname") \ 61 + EM(rxkad_abort_resp_tkt_expired, "rxkad-resp-tk-exp") \ 62 + EM(rxkad_abort_resp_tkt_future, "rxkad-resp-tk-future") \ 63 + EM(rxkad_abort_resp_tkt_inst, "rxkad-resp-tk-inst") \ 64 + EM(rxkad_abort_resp_tkt_len, "rxkad-resp-tk-len") \ 65 + EM(rxkad_abort_resp_tkt_realm, "rxkad-resp-tk-realm") \ 66 + EM(rxkad_abort_resp_tkt_short, "rxkad-resp-tk-short") \ 67 + EM(rxkad_abort_resp_tkt_sinst, "rxkad-resp-tk-sinst") \ 68 + EM(rxkad_abort_resp_tkt_sname, "rxkad-resp-tk-sname") \ 69 + EM(rxkad_abort_resp_unknown_tkt, "rxkad-resp-unknown-tkt") \ 70 + EM(rxkad_abort_resp_version, "rxkad-resp-version") \ 71 + /* rxrpc errors */ \ 72 + EM(rxrpc_abort_call_improper_term, "call-improper-term") \ 73 + EM(rxrpc_abort_call_reset, "call-reset") \ 74 + EM(rxrpc_abort_call_sendmsg, "call-sendmsg") \ 75 + EM(rxrpc_abort_call_sock_release, "call-sock-rel") \ 76 + EM(rxrpc_abort_call_sock_release_tba, "call-sock-rel-tba") \ 77 + EM(rxrpc_abort_call_timeout, "call-timeout") \ 78 + EM(rxrpc_abort_no_service_key, "no-serv-key") \ 79 + EM(rxrpc_abort_nomem, "nomem") \ 80 + EM(rxrpc_abort_service_not_offered, "serv-not-offered") \ 81 + EM(rxrpc_abort_shut_down, "shut-down") \ 82 + EM(rxrpc_abort_unsupported_security, "unsup-sec") \ 83 + EM(rxrpc_badmsg_bad_abort, "bad-abort") \ 84 + EM(rxrpc_badmsg_bad_jumbo, "bad-jumbo") \ 85 + EM(rxrpc_badmsg_short_ack, "short-ack") \ 86 + EM(rxrpc_badmsg_short_ack_info, "short-ack-info") \ 87 + EM(rxrpc_badmsg_short_hdr, "short-hdr") \ 88 + EM(rxrpc_badmsg_unsupported_packet, "unsup-pkt") \ 89 + EM(rxrpc_badmsg_zero_call, "zero-call") \ 90 + EM(rxrpc_badmsg_zero_seq, "zero-seq") \ 91 + EM(rxrpc_badmsg_zero_service, "zero-service") \ 92 + EM(rxrpc_eproto_ackr_outside_window, "ackr-out-win") \ 93 + EM(rxrpc_eproto_ackr_sack_overflow, "ackr-sack-over") \ 94 + EM(rxrpc_eproto_ackr_short_sack, "ackr-short-sack") \ 95 + EM(rxrpc_eproto_ackr_zero, "ackr-zero") \ 96 + EM(rxrpc_eproto_bad_upgrade, "bad-upgrade") \ 97 + EM(rxrpc_eproto_data_after_last, "data-after-last") \ 98 + EM(rxrpc_eproto_different_last, "diff-last") \ 99 + EM(rxrpc_eproto_early_reply, "early-reply") \ 100 + EM(rxrpc_eproto_improper_term, "improper-term") \ 101 + EM(rxrpc_eproto_no_client_call, "no-cl-call") \ 102 + EM(rxrpc_eproto_no_client_conn, "no-cl-conn") \ 103 + EM(rxrpc_eproto_no_service_call, "no-sv-call") \ 104 + EM(rxrpc_eproto_reupgrade, "re-upgrade") \ 105 + EM(rxrpc_eproto_rxnull_challenge, "rxnull-chall") \ 106 + EM(rxrpc_eproto_rxnull_response, "rxnull-resp") \ 107 + EM(rxrpc_eproto_tx_rot_last, "tx-rot-last") \ 108 + EM(rxrpc_eproto_unexpected_ack, "unex-ack") \ 109 + EM(rxrpc_eproto_unexpected_ackall, "unex-ackall") \ 110 + EM(rxrpc_eproto_unexpected_implicit_end, "unex-impl-end") \ 111 + EM(rxrpc_eproto_unexpected_reply, "unex-reply") \ 112 + EM(rxrpc_eproto_wrong_security, "wrong-sec") \ 113 + EM(rxrpc_recvmsg_excess_data, "recvmsg-excess") \ 114 + EM(rxrpc_recvmsg_short_data, "recvmsg-short") \ 115 + E_(rxrpc_sendmsg_late_send, "sendmsg-late") 116 + 19 117 #define rxrpc_call_poke_traces \ 118 + EM(rxrpc_call_poke_abort, "Abort") \ 119 + EM(rxrpc_call_poke_complete, "Compl") \ 20 120 EM(rxrpc_call_poke_error, "Error") \ 21 121 EM(rxrpc_call_poke_idle, "Idle") \ 22 122 EM(rxrpc_call_poke_start, "Start") \ ··· 126 26 #define rxrpc_skb_traces \ 127 27 EM(rxrpc_skb_eaten_by_unshare, "ETN unshare ") \ 128 28 EM(rxrpc_skb_eaten_by_unshare_nomem, "ETN unshar-nm") \ 29 + EM(rxrpc_skb_get_conn_secured, "GET conn-secd") \ 129 30 EM(rxrpc_skb_get_conn_work, "GET conn-work") \ 130 31 EM(rxrpc_skb_get_local_work, "GET locl-work") \ 131 32 EM(rxrpc_skb_get_reject_work, "GET rej-work ") \ ··· 136 35 EM(rxrpc_skb_new_error_report, "NEW error-rpt") \ 137 36 EM(rxrpc_skb_new_jumbo_subpacket, "NEW jumbo-sub") \ 138 37 EM(rxrpc_skb_new_unshared, "NEW unshared ") \ 38 + EM(rxrpc_skb_put_conn_secured, "PUT conn-secd") \ 139 39 EM(rxrpc_skb_put_conn_work, "PUT conn-work") \ 140 40 EM(rxrpc_skb_put_error_report, "PUT error-rep") \ 141 41 EM(rxrpc_skb_put_input, "PUT input ") \ ··· 178 76 #define rxrpc_peer_traces \ 179 77 EM(rxrpc_peer_free, "FREE ") \ 180 78 EM(rxrpc_peer_get_accept, "GET accept ") \ 181 - EM(rxrpc_peer_get_activate_call, "GET act-call") \ 182 79 EM(rxrpc_peer_get_bundle, "GET bundle ") \ 183 80 EM(rxrpc_peer_get_client_conn, "GET cln-conn") \ 184 81 EM(rxrpc_peer_get_input, "GET input ") \ ··· 190 89 EM(rxrpc_peer_put_bundle, "PUT bundle ") \ 191 90 EM(rxrpc_peer_put_call, "PUT call ") \ 192 91 EM(rxrpc_peer_put_conn, "PUT conn ") \ 193 - EM(rxrpc_peer_put_discard_tmp, "PUT disc-tmp") \ 194 92 EM(rxrpc_peer_put_input, "PUT input ") \ 195 93 EM(rxrpc_peer_put_input_error, "PUT inpt-err") \ 196 94 E_(rxrpc_peer_put_keepalive, "PUT keepaliv") ··· 199 99 EM(rxrpc_bundle_get_client_call, "GET clt-call") \ 200 100 EM(rxrpc_bundle_get_client_conn, "GET clt-conn") \ 201 101 EM(rxrpc_bundle_get_service_conn, "GET svc-conn") \ 102 + EM(rxrpc_bundle_put_call, "PUT call ") \ 202 103 EM(rxrpc_bundle_put_conn, "PUT conn ") \ 203 104 EM(rxrpc_bundle_put_discard, "PUT discard ") \ 204 105 E_(rxrpc_bundle_new, "NEW ") ··· 210 109 EM(rxrpc_conn_get_call_input, "GET inp-call") \ 211 110 EM(rxrpc_conn_get_conn_input, "GET inp-conn") \ 212 111 EM(rxrpc_conn_get_idle, "GET idle ") \ 213 - EM(rxrpc_conn_get_poke, "GET poke ") \ 112 + EM(rxrpc_conn_get_poke_abort, "GET pk-abort") \ 113 + EM(rxrpc_conn_get_poke_timer, "GET poke ") \ 214 114 EM(rxrpc_conn_get_service_conn, "GET svc-conn") \ 215 115 EM(rxrpc_conn_new_client, "NEW client ") \ 216 116 EM(rxrpc_conn_new_service, "NEW service ") \ 217 117 EM(rxrpc_conn_put_call, "PUT call ") \ 218 118 EM(rxrpc_conn_put_call_input, "PUT inp-call") \ 219 119 EM(rxrpc_conn_put_conn_input, "PUT inp-conn") \ 220 - EM(rxrpc_conn_put_discard, "PUT discard ") \ 221 120 EM(rxrpc_conn_put_discard_idle, "PUT disc-idl") \ 222 121 EM(rxrpc_conn_put_local_dead, "PUT loc-dead") \ 223 122 EM(rxrpc_conn_put_noreuse, "PUT noreuse ") \ ··· 225 124 EM(rxrpc_conn_put_service_reaped, "PUT svc-reap") \ 226 125 EM(rxrpc_conn_put_unbundle, "PUT unbundle") \ 227 126 EM(rxrpc_conn_put_unidle, "PUT unidle ") \ 127 + EM(rxrpc_conn_put_work, "PUT work ") \ 228 128 EM(rxrpc_conn_queue_challenge, "QUE chall ") \ 229 129 EM(rxrpc_conn_queue_retry_work, "QUE retry-wk") \ 230 130 EM(rxrpc_conn_queue_rx_work, "QUE rx-work ") \ 231 - EM(rxrpc_conn_queue_timer, "QUE timer ") \ 232 131 EM(rxrpc_conn_see_new_service_conn, "SEE new-svc ") \ 233 132 EM(rxrpc_conn_see_reap_service, "SEE reap-svc") \ 234 133 E_(rxrpc_conn_see_work, "SEE work ") ··· 239 138 EM(rxrpc_client_chan_activate, "ChActv") \ 240 139 EM(rxrpc_client_chan_disconnect, "ChDisc") \ 241 140 EM(rxrpc_client_chan_pass, "ChPass") \ 242 - EM(rxrpc_client_chan_wait_failed, "ChWtFl") \ 243 141 EM(rxrpc_client_cleanup, "Clean ") \ 244 142 EM(rxrpc_client_discard, "Discar") \ 245 - EM(rxrpc_client_duplicate, "Duplic") \ 246 143 EM(rxrpc_client_exposed, "Expose") \ 247 144 EM(rxrpc_client_replace, "Replac") \ 145 + EM(rxrpc_client_queue_new_call, "Q-Call") \ 248 146 EM(rxrpc_client_to_active, "->Actv") \ 249 147 E_(rxrpc_client_to_idle, "->Idle") 250 148 251 149 #define rxrpc_call_traces \ 150 + EM(rxrpc_call_get_io_thread, "GET iothread") \ 252 151 EM(rxrpc_call_get_input, "GET input ") \ 253 152 EM(rxrpc_call_get_kernel_service, "GET krnl-srv") \ 254 153 EM(rxrpc_call_get_notify_socket, "GET notify ") \ ··· 261 160 EM(rxrpc_call_new_prealloc_service, "NEW prealloc") \ 262 161 EM(rxrpc_call_put_discard_prealloc, "PUT disc-pre") \ 263 162 EM(rxrpc_call_put_discard_error, "PUT disc-err") \ 163 + EM(rxrpc_call_put_io_thread, "PUT iothread") \ 264 164 EM(rxrpc_call_put_input, "PUT input ") \ 265 165 EM(rxrpc_call_put_kernel, "PUT kernel ") \ 266 166 EM(rxrpc_call_put_poke, "PUT poke ") \ ··· 271 169 EM(rxrpc_call_put_sendmsg, "PUT sendmsg ") \ 272 170 EM(rxrpc_call_put_unnotify, "PUT unnotify") \ 273 171 EM(rxrpc_call_put_userid_exists, "PUT u-exists") \ 172 + EM(rxrpc_call_put_userid, "PUT user-id ") \ 274 173 EM(rxrpc_call_see_accept, "SEE accept ") \ 275 174 EM(rxrpc_call_see_activate_client, "SEE act-clnt") \ 276 175 EM(rxrpc_call_see_connect_failed, "SEE con-fail") \ 277 176 EM(rxrpc_call_see_connected, "SEE connect ") \ 177 + EM(rxrpc_call_see_disconnected, "SEE disconn ") \ 278 178 EM(rxrpc_call_see_distribute_error, "SEE dist-err") \ 279 179 EM(rxrpc_call_see_input, "SEE input ") \ 280 180 EM(rxrpc_call_see_release, "SEE release ") \ ··· 480 376 #define EM(a, b) a, 481 377 #define E_(a, b) a 482 378 379 + enum rxrpc_abort_reason { rxrpc_abort_reasons } __mode(byte); 483 380 enum rxrpc_bundle_trace { rxrpc_bundle_traces } __mode(byte); 484 381 enum rxrpc_call_poke_trace { rxrpc_call_poke_traces } __mode(byte); 485 382 enum rxrpc_call_trace { rxrpc_call_traces } __mode(byte); ··· 509 404 */ 510 405 #undef EM 511 406 #undef E_ 407 + 408 + #ifndef RXRPC_TRACE_ONLY_DEFINE_ENUMS 409 + 512 410 #define EM(a, b) TRACE_DEFINE_ENUM(a); 513 411 #define E_(a, b) TRACE_DEFINE_ENUM(a); 514 412 413 + rxrpc_abort_reasons; 515 414 rxrpc_bundle_traces; 516 415 rxrpc_call_poke_traces; 517 416 rxrpc_call_traces; ··· 766 657 ); 767 658 768 659 TRACE_EVENT(rxrpc_abort, 769 - TP_PROTO(unsigned int call_nr, const char *why, u32 cid, u32 call_id, 770 - rxrpc_seq_t seq, int abort_code, int error), 660 + TP_PROTO(unsigned int call_nr, enum rxrpc_abort_reason why, 661 + u32 cid, u32 call_id, rxrpc_seq_t seq, int abort_code, int error), 771 662 772 663 TP_ARGS(call_nr, why, cid, call_id, seq, abort_code, error), 773 664 774 665 TP_STRUCT__entry( 775 666 __field(unsigned int, call_nr ) 776 - __array(char, why, 4 ) 667 + __field(enum rxrpc_abort_reason, why ) 777 668 __field(u32, cid ) 778 669 __field(u32, call_id ) 779 670 __field(rxrpc_seq_t, seq ) ··· 782 673 ), 783 674 784 675 TP_fast_assign( 785 - memcpy(__entry->why, why, 4); 786 676 __entry->call_nr = call_nr; 677 + __entry->why = why; 787 678 __entry->cid = cid; 788 679 __entry->call_id = call_id; 789 680 __entry->abort_code = abort_code; ··· 794 685 TP_printk("c=%08x %08x:%08x s=%u a=%d e=%d %s", 795 686 __entry->call_nr, 796 687 __entry->cid, __entry->call_id, __entry->seq, 797 - __entry->abort_code, __entry->error, __entry->why) 688 + __entry->abort_code, __entry->error, 689 + __print_symbolic(__entry->why, rxrpc_abort_reasons)) 798 690 ); 799 691 800 692 TRACE_EVENT(rxrpc_call_complete, ··· 1631 1521 __entry->abort_code) 1632 1522 ); 1633 1523 1634 - TRACE_EVENT(rxrpc_rx_eproto, 1635 - TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial, 1636 - const char *why), 1637 - 1638 - TP_ARGS(call, serial, why), 1639 - 1640 - TP_STRUCT__entry( 1641 - __field(unsigned int, call ) 1642 - __field(rxrpc_serial_t, serial ) 1643 - __field(const char *, why ) 1644 - ), 1645 - 1646 - TP_fast_assign( 1647 - __entry->call = call ? call->debug_id : 0; 1648 - __entry->serial = serial; 1649 - __entry->why = why; 1650 - ), 1651 - 1652 - TP_printk("c=%08x EPROTO %08x %s", 1653 - __entry->call, 1654 - __entry->serial, 1655 - __entry->why) 1656 - ); 1657 - 1658 1524 TRACE_EVENT(rxrpc_connect_call, 1659 1525 TP_PROTO(struct rxrpc_call *call), 1660 1526 ··· 1928 1842 1929 1843 #undef EM 1930 1844 #undef E_ 1845 + 1846 + #endif /* RXRPC_TRACE_ONLY_DEFINE_ENUMS */ 1931 1847 #endif /* _TRACE_RXRPC_H */ 1932 1848 1933 1849 /* This part must be outside protection */
+3 -2
net/core/gro.c
··· 505 505 NAPI_GRO_CB(skb)->count = 1; 506 506 if (unlikely(skb_is_gso(skb))) { 507 507 NAPI_GRO_CB(skb)->count = skb_shinfo(skb)->gso_segs; 508 - /* Only support TCP at the moment. */ 509 - if (!skb_is_gso_tcp(skb)) 508 + /* Only support TCP and non DODGY users. */ 509 + if (!skb_is_gso_tcp(skb) || 510 + (skb_shinfo(skb)->gso_type & SKB_GSO_DODGY)) 510 511 NAPI_GRO_CB(skb)->flush = 1; 511 512 } 512 513
+4
net/ipv6/raw.c
··· 505 505 static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6, 506 506 struct raw6_sock *rp) 507 507 { 508 + struct ipv6_txoptions *opt; 508 509 struct sk_buff *skb; 509 510 int err = 0; 510 511 int offset; ··· 523 522 524 523 offset = rp->offset; 525 524 total_len = inet_sk(sk)->cork.base.length; 525 + opt = inet6_sk(sk)->cork.opt; 526 + total_len -= opt ? opt->opt_flen : 0; 527 + 526 528 if (offset >= total_len - 1) { 527 529 err = -EINVAL; 528 530 ip6_flush_pending_frames(sk);
+1
net/rxrpc/Makefile
··· 10 10 call_accept.o \ 11 11 call_event.o \ 12 12 call_object.o \ 13 + call_state.o \ 13 14 conn_client.o \ 14 15 conn_event.o \ 15 16 conn_object.o \
+11 -16
net/rxrpc/af_rxrpc.c
··· 155 155 156 156 if (service_id) { 157 157 write_lock(&local->services_lock); 158 - if (rcu_access_pointer(local->service)) 158 + if (local->service) 159 159 goto service_in_use; 160 160 rx->local = local; 161 - rcu_assign_pointer(local->service, rx); 161 + local->service = rx; 162 162 write_unlock(&local->services_lock); 163 163 164 164 rx->sk.sk_state = RXRPC_SERVER_BOUND; ··· 328 328 mutex_unlock(&call->user_mutex); 329 329 } 330 330 331 - rxrpc_put_peer(cp.peer, rxrpc_peer_put_discard_tmp); 332 331 _leave(" = %p", call); 333 332 return call; 334 333 } ··· 373 374 * @sock: The socket the call is on 374 375 * @call: The call to check 375 376 * 376 - * Allow a kernel service to find out whether a call is still alive - 377 - * ie. whether it has completed. 377 + * Allow a kernel service to find out whether a call is still alive - whether 378 + * it has completed successfully and all received data has been consumed. 378 379 */ 379 380 bool rxrpc_kernel_check_life(const struct socket *sock, 380 381 const struct rxrpc_call *call) 381 382 { 382 - return call->state != RXRPC_CALL_COMPLETE; 383 + if (!rxrpc_call_is_complete(call)) 384 + return true; 385 + if (call->completion != RXRPC_CALL_SUCCEEDED) 386 + return false; 387 + return !skb_queue_empty(&call->recvmsg_queue); 383 388 } 384 389 EXPORT_SYMBOL(rxrpc_kernel_check_life); 385 390 ··· 875 872 876 873 sk->sk_state = RXRPC_CLOSE; 877 874 878 - if (rx->local && rcu_access_pointer(rx->local->service) == rx) { 875 + if (rx->local && rx->local->service == rx) { 879 876 write_lock(&rx->local->services_lock); 880 - rcu_assign_pointer(rx->local->service, NULL); 877 + rx->local->service = NULL; 881 878 write_unlock(&rx->local->services_lock); 882 879 } 883 880 ··· 960 957 static int __init af_rxrpc_init(void) 961 958 { 962 959 int ret = -1; 963 - unsigned int tmp; 964 960 965 961 BUILD_BUG_ON(sizeof(struct rxrpc_skb_priv) > sizeof_field(struct sk_buff, cb)); 966 - 967 - get_random_bytes(&tmp, sizeof(tmp)); 968 - tmp &= 0x3fffffff; 969 - if (tmp == 0) 970 - tmp = 1; 971 - idr_set_cursor(&rxrpc_client_conn_ids, tmp); 972 962 973 963 ret = -ENOMEM; 974 964 rxrpc_call_jar = kmem_cache_create( ··· 1058 1062 * are released. 1059 1063 */ 1060 1064 rcu_barrier(); 1061 - rxrpc_destroy_client_conn_ids(); 1062 1065 1063 1066 destroy_workqueue(rxrpc_workqueue); 1064 1067 rxrpc_exit_security();
+141 -71
net/rxrpc/ar-internal.h
··· 38 38 enum rxrpc_skb_mark { 39 39 RXRPC_SKB_MARK_PACKET, /* Received packet */ 40 40 RXRPC_SKB_MARK_ERROR, /* Error notification */ 41 + RXRPC_SKB_MARK_SERVICE_CONN_SECURED, /* Service connection response has been verified */ 41 42 RXRPC_SKB_MARK_REJECT_BUSY, /* Reject with BUSY */ 42 43 RXRPC_SKB_MARK_REJECT_ABORT, /* Reject with ABORT (code in skb->priority) */ 43 44 }; ··· 76 75 77 76 bool live; 78 77 79 - bool kill_all_client_conns; 80 78 atomic_t nr_client_conns; 81 - spinlock_t client_conn_cache_lock; /* Lock for ->*_client_conns */ 82 - struct mutex client_conn_discard_lock; /* Prevent multiple discarders */ 83 - struct list_head idle_client_conns; 84 - struct work_struct client_conn_reaper; 85 - struct timer_list client_conn_reap_timer; 86 79 87 80 struct hlist_head local_endpoints; 88 81 struct mutex local_mutex; /* Lock for ->local_endpoints */ ··· 197 202 * - max 48 bytes (struct sk_buff::cb) 198 203 */ 199 204 struct rxrpc_skb_priv { 205 + struct rxrpc_connection *conn; /* Connection referred to (poke packet) */ 200 206 u16 offset; /* Offset of data */ 201 207 u16 len; /* Length of data */ 202 208 u8 flags; ··· 258 262 259 263 /* respond to a challenge */ 260 264 int (*respond_to_challenge)(struct rxrpc_connection *, 261 - struct sk_buff *, 262 - u32 *); 265 + struct sk_buff *); 263 266 264 267 /* verify a response */ 265 268 int (*verify_response)(struct rxrpc_connection *, 266 - struct sk_buff *, 267 - u32 *); 269 + struct sk_buff *); 268 270 269 271 /* clear connection security */ 270 272 void (*clear)(struct rxrpc_connection *); ··· 277 283 struct rcu_head rcu; 278 284 atomic_t active_users; /* Number of users of the local endpoint */ 279 285 refcount_t ref; /* Number of references to the structure */ 280 - struct rxrpc_net *rxnet; /* The network ns in which this resides */ 286 + struct net *net; /* The network namespace */ 287 + struct rxrpc_net *rxnet; /* Our bits in the network namespace */ 281 288 struct hlist_node link; 282 289 struct socket *socket; /* my UDP socket */ 283 290 struct task_struct *io_thread; 284 291 struct completion io_thread_ready; /* Indication that the I/O thread started */ 285 - struct rxrpc_sock __rcu *service; /* Service(s) listening on this endpoint */ 292 + struct rxrpc_sock *service; /* Service(s) listening on this endpoint */ 286 293 struct rw_semaphore defrag_sem; /* control re-enablement of IP DF bit */ 287 294 struct sk_buff_head rx_queue; /* Received packets */ 295 + struct list_head conn_attend_q; /* Conns requiring immediate attention */ 288 296 struct list_head call_attend_q; /* Calls requiring immediate attention */ 297 + 289 298 struct rb_root client_bundles; /* Client connection bundles by socket params */ 290 299 spinlock_t client_bundles_lock; /* Lock for client_bundles */ 300 + bool kill_all_client_conns; 301 + struct list_head idle_client_conns; 302 + struct timer_list client_conn_reap_timer; 303 + unsigned long client_conn_flags; 304 + #define RXRPC_CLIENT_CONN_REAP_TIMER 0 /* The client conn reap timer expired */ 305 + 291 306 spinlock_t lock; /* access lock */ 292 307 rwlock_t services_lock; /* lock for services list */ 293 308 int debug_id; /* debug ID for printks */ 294 309 bool dead; 295 310 bool service_closed; /* Service socket closed */ 311 + struct idr conn_ids; /* List of connection IDs */ 312 + struct list_head new_client_calls; /* Newly created client calls need connection */ 313 + spinlock_t client_call_lock; /* Lock for ->new_client_calls */ 296 314 struct sockaddr_rxrpc srx; /* local address */ 297 315 }; 298 316 ··· 362 356 363 357 struct rxrpc_conn_parameters { 364 358 struct rxrpc_local *local; /* Representation of local endpoint */ 365 - struct rxrpc_peer *peer; /* Remote endpoint */ 366 359 struct key *key; /* Security details */ 367 360 bool exclusive; /* T if conn is exclusive */ 368 361 bool upgrade; /* T if service ID can be upgraded */ ··· 370 365 }; 371 366 372 367 /* 368 + * Call completion condition (state == RXRPC_CALL_COMPLETE). 369 + */ 370 + enum rxrpc_call_completion { 371 + RXRPC_CALL_SUCCEEDED, /* - Normal termination */ 372 + RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */ 373 + RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */ 374 + RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */ 375 + RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */ 376 + NR__RXRPC_CALL_COMPLETIONS 377 + }; 378 + 379 + /* 373 380 * Bits in the connection flags. 374 381 */ 375 382 enum rxrpc_conn_flag { 376 - RXRPC_CONN_HAS_IDR, /* Has a client conn ID assigned */ 377 383 RXRPC_CONN_IN_SERVICE_CONNS, /* Conn is in peer->service_conns */ 378 384 RXRPC_CONN_DONT_REUSE, /* Don't reuse this connection */ 379 385 RXRPC_CONN_PROBING_FOR_UPGRADE, /* Probing for service upgrade */ ··· 404 388 */ 405 389 enum rxrpc_conn_event { 406 390 RXRPC_CONN_EV_CHALLENGE, /* Send challenge packet */ 391 + RXRPC_CONN_EV_ABORT_CALLS, /* Abort attached calls */ 407 392 }; 408 393 409 394 /* ··· 412 395 */ 413 396 enum rxrpc_conn_proto_state { 414 397 RXRPC_CONN_UNUSED, /* Connection not yet attempted */ 398 + RXRPC_CONN_CLIENT_UNSECURED, /* Client connection needs security init */ 415 399 RXRPC_CONN_CLIENT, /* Client connection */ 416 400 RXRPC_CONN_SERVICE_PREALLOC, /* Service connection preallocation */ 417 401 RXRPC_CONN_SERVICE_UNSECURED, /* Service unsecured connection */ 418 402 RXRPC_CONN_SERVICE_CHALLENGING, /* Service challenging for security */ 419 403 RXRPC_CONN_SERVICE, /* Service secured connection */ 420 - RXRPC_CONN_REMOTELY_ABORTED, /* Conn aborted by peer */ 421 - RXRPC_CONN_LOCALLY_ABORTED, /* Conn aborted locally */ 404 + RXRPC_CONN_ABORTED, /* Conn aborted */ 422 405 RXRPC_CONN__NR_STATES 423 406 }; 424 407 ··· 429 412 struct rxrpc_local *local; /* Representation of local endpoint */ 430 413 struct rxrpc_peer *peer; /* Remote endpoint */ 431 414 struct key *key; /* Security details */ 415 + const struct rxrpc_security *security; /* applied security module */ 432 416 refcount_t ref; 433 417 atomic_t active; /* Number of active users */ 434 418 unsigned int debug_id; 435 419 u32 security_level; /* Security level selected */ 436 420 u16 service_id; /* Service ID for this connection */ 437 421 bool try_upgrade; /* True if the bundle is attempting upgrade */ 438 - bool alloc_conn; /* True if someone's getting a conn */ 439 422 bool exclusive; /* T if conn is exclusive */ 440 423 bool upgrade; /* T if service ID can be upgraded */ 441 - short alloc_error; /* Error from last conn allocation */ 442 - spinlock_t channel_lock; 424 + unsigned short alloc_error; /* Error from last conn allocation */ 443 425 struct rb_node local_node; /* Node in local->client_conns */ 444 426 struct list_head waiting_calls; /* Calls waiting for channels */ 445 427 unsigned long avail_chans; /* Mask of available channels */ ··· 456 440 struct rxrpc_peer *peer; /* Remote endpoint */ 457 441 struct rxrpc_net *rxnet; /* Network namespace to which call belongs */ 458 442 struct key *key; /* Security details */ 443 + struct list_head attend_link; /* Link in local->conn_attend_q */ 459 444 460 445 refcount_t ref; 461 446 atomic_t active; /* Active count for service conns */ ··· 466 449 unsigned char act_chans; /* Mask of active channels */ 467 450 struct rxrpc_channel { 468 451 unsigned long final_ack_at; /* Time at which to issue final ACK */ 469 - struct rxrpc_call __rcu *call; /* Active call */ 452 + struct rxrpc_call *call; /* Active call */ 470 453 unsigned int call_debug_id; /* call->debug_id */ 471 454 u32 call_id; /* ID of current call */ 472 455 u32 call_counter; /* Call ID counter */ ··· 487 470 struct list_head link; /* link in master connection list */ 488 471 struct sk_buff_head rx_queue; /* received conn-level packets */ 489 472 473 + struct mutex security_lock; /* Lock for security management */ 490 474 const struct rxrpc_security *security; /* applied security module */ 491 475 union { 492 476 struct { ··· 501 483 unsigned long idle_timestamp; /* Time at which last became idle */ 502 484 spinlock_t state_lock; /* state-change lock */ 503 485 enum rxrpc_conn_proto_state state; /* current state of connection */ 504 - u32 abort_code; /* Abort code of connection abort */ 486 + enum rxrpc_call_completion completion; /* Completion condition */ 487 + s32 abort_code; /* Abort code of connection abort */ 505 488 int debug_id; /* debug ID for printks */ 506 489 atomic_t serial; /* packet serial number counter */ 507 490 unsigned int hi_serial; /* highest serial number received */ ··· 546 527 RXRPC_CALL_KERNEL, /* The call was made by the kernel */ 547 528 RXRPC_CALL_UPGRADE, /* Service upgrade was requested for the call */ 548 529 RXRPC_CALL_EXCLUSIVE, /* The call uses a once-only connection */ 549 - RXRPC_CALL_RX_IS_IDLE, /* Reception is idle - send an ACK */ 530 + RXRPC_CALL_RX_IS_IDLE, /* recvmsg() is idle - send an ACK */ 531 + RXRPC_CALL_RECVMSG_READ_ALL, /* recvmsg() read all of the received data */ 550 532 }; 551 533 552 534 /* ··· 578 558 }; 579 559 580 560 /* 581 - * Call completion condition (state == RXRPC_CALL_COMPLETE). 582 - */ 583 - enum rxrpc_call_completion { 584 - RXRPC_CALL_SUCCEEDED, /* - Normal termination */ 585 - RXRPC_CALL_REMOTELY_ABORTED, /* - call aborted by peer */ 586 - RXRPC_CALL_LOCALLY_ABORTED, /* - call aborted locally on error or close */ 587 - RXRPC_CALL_LOCAL_ERROR, /* - call failed due to local error */ 588 - RXRPC_CALL_NETWORK_ERROR, /* - call terminated by network error */ 589 - NR__RXRPC_CALL_COMPLETIONS 590 - }; 591 - 592 - /* 593 561 * Call Tx congestion management modes. 594 562 */ 595 563 enum rxrpc_congest_mode { ··· 595 587 struct rxrpc_call { 596 588 struct rcu_head rcu; 597 589 struct rxrpc_connection *conn; /* connection carrying call */ 590 + struct rxrpc_bundle *bundle; /* Connection bundle to use */ 598 591 struct rxrpc_peer *peer; /* Peer record for remote address */ 599 592 struct rxrpc_local *local; /* Representation of local endpoint */ 600 593 struct rxrpc_sock __rcu *socket; /* socket responsible */ ··· 618 609 struct work_struct destroyer; /* In-process-context destroyer */ 619 610 rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */ 620 611 struct list_head link; /* link in master call list */ 621 - struct list_head chan_wait_link; /* Link in conn->bundle->waiting_calls */ 612 + struct list_head wait_link; /* Link in local->new_client_calls */ 622 613 struct hlist_node error_link; /* link in error distribution list */ 623 614 struct list_head accept_link; /* Link in rx->acceptq */ 624 615 struct list_head recvmsg_link; /* Link in rx->recvmsg_q */ ··· 632 623 unsigned long flags; 633 624 unsigned long events; 634 625 spinlock_t notify_lock; /* Kernel notification lock */ 635 - rwlock_t state_lock; /* lock for state transition */ 636 - u32 abort_code; /* Local/remote abort code */ 626 + unsigned int send_abort_why; /* Why the abort [enum rxrpc_abort_reason] */ 627 + s32 send_abort; /* Abort code to be sent */ 628 + short send_abort_err; /* Error to be associated with the abort */ 629 + rxrpc_seq_t send_abort_seq; /* DATA packet that incurred the abort (or 0) */ 630 + s32 abort_code; /* Local/remote abort code */ 637 631 int error; /* Local error incurred */ 638 - enum rxrpc_call_state state; /* current state of call */ 632 + enum rxrpc_call_state _state; /* Current state of call (needs barrier) */ 639 633 enum rxrpc_call_completion completion; /* Call completion condition */ 640 634 refcount_t ref; 641 635 u8 security_ix; /* Security type */ ··· 824 812 */ 825 813 int rxrpc_service_prealloc(struct rxrpc_sock *, gfp_t); 826 814 void rxrpc_discard_prealloc(struct rxrpc_sock *); 827 - int rxrpc_new_incoming_call(struct rxrpc_local *, struct rxrpc_peer *, 828 - struct rxrpc_connection *, struct sockaddr_rxrpc *, 829 - struct sk_buff *); 815 + bool rxrpc_new_incoming_call(struct rxrpc_local *local, 816 + struct rxrpc_peer *peer, 817 + struct rxrpc_connection *conn, 818 + struct sockaddr_rxrpc *peer_srx, 819 + struct sk_buff *skb); 830 820 void rxrpc_accept_incoming_calls(struct rxrpc_local *); 831 821 int rxrpc_user_charge_accept(struct rxrpc_sock *, unsigned long); 832 822 ··· 848 834 unsigned long now, 849 835 enum rxrpc_timer_trace why); 850 836 851 - void rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb); 837 + bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb); 852 838 853 839 /* 854 840 * call_object.c ··· 865 851 struct sockaddr_rxrpc *, 866 852 struct rxrpc_call_params *, gfp_t, 867 853 unsigned int); 854 + void rxrpc_start_call_timer(struct rxrpc_call *call); 868 855 void rxrpc_incoming_call(struct rxrpc_sock *, struct rxrpc_call *, 869 856 struct sk_buff *); 870 857 void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *); ··· 888 873 } 889 874 890 875 /* 876 + * call_state.c 877 + */ 878 + bool rxrpc_set_call_completion(struct rxrpc_call *call, 879 + enum rxrpc_call_completion compl, 880 + u32 abort_code, 881 + int error); 882 + bool rxrpc_call_completed(struct rxrpc_call *call); 883 + bool rxrpc_abort_call(struct rxrpc_call *call, rxrpc_seq_t seq, 884 + u32 abort_code, int error, enum rxrpc_abort_reason why); 885 + void rxrpc_prefail_call(struct rxrpc_call *call, enum rxrpc_call_completion compl, 886 + int error); 887 + 888 + static inline void rxrpc_set_call_state(struct rxrpc_call *call, 889 + enum rxrpc_call_state state) 890 + { 891 + /* Order write of completion info before write of ->state. */ 892 + smp_store_release(&call->_state, state); 893 + wake_up(&call->waitq); 894 + } 895 + 896 + static inline enum rxrpc_call_state __rxrpc_call_state(const struct rxrpc_call *call) 897 + { 898 + return call->_state; /* Only inside I/O thread */ 899 + } 900 + 901 + static inline bool __rxrpc_call_is_complete(const struct rxrpc_call *call) 902 + { 903 + return __rxrpc_call_state(call) == RXRPC_CALL_COMPLETE; 904 + } 905 + 906 + static inline enum rxrpc_call_state rxrpc_call_state(const struct rxrpc_call *call) 907 + { 908 + /* Order read ->state before read of completion info. */ 909 + return smp_load_acquire(&call->_state); 910 + } 911 + 912 + static inline bool rxrpc_call_is_complete(const struct rxrpc_call *call) 913 + { 914 + return rxrpc_call_state(call) == RXRPC_CALL_COMPLETE; 915 + } 916 + 917 + static inline bool rxrpc_call_has_failed(const struct rxrpc_call *call) 918 + { 919 + return rxrpc_call_is_complete(call) && call->completion != RXRPC_CALL_SUCCEEDED; 920 + } 921 + 922 + /* 891 923 * conn_client.c 892 924 */ 893 925 extern unsigned int rxrpc_reap_client_connections; 894 926 extern unsigned long rxrpc_conn_idle_client_expiry; 895 927 extern unsigned long rxrpc_conn_idle_client_fast_expiry; 896 - extern struct idr rxrpc_client_conn_ids; 897 928 898 - void rxrpc_destroy_client_conn_ids(void); 929 + void rxrpc_purge_client_connections(struct rxrpc_local *local); 899 930 struct rxrpc_bundle *rxrpc_get_bundle(struct rxrpc_bundle *, enum rxrpc_bundle_trace); 900 931 void rxrpc_put_bundle(struct rxrpc_bundle *, enum rxrpc_bundle_trace); 901 - int rxrpc_connect_call(struct rxrpc_sock *, struct rxrpc_call *, 902 - struct rxrpc_conn_parameters *, struct sockaddr_rxrpc *, 903 - gfp_t); 932 + int rxrpc_look_up_bundle(struct rxrpc_call *call, gfp_t gfp); 933 + void rxrpc_connect_client_calls(struct rxrpc_local *local); 904 934 void rxrpc_expose_client_call(struct rxrpc_call *); 905 935 void rxrpc_disconnect_client_call(struct rxrpc_bundle *, struct rxrpc_call *); 936 + void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle); 906 937 void rxrpc_put_client_conn(struct rxrpc_connection *, enum rxrpc_conn_trace); 907 - void rxrpc_discard_expired_client_conns(struct work_struct *); 908 - void rxrpc_destroy_all_client_connections(struct rxrpc_net *); 938 + void rxrpc_discard_expired_client_conns(struct rxrpc_local *local); 909 939 void rxrpc_clean_up_local_conns(struct rxrpc_local *); 910 940 911 941 /* 912 942 * conn_event.c 913 943 */ 944 + void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, struct sk_buff *skb, 945 + unsigned int channel); 946 + int rxrpc_abort_conn(struct rxrpc_connection *conn, struct sk_buff *skb, 947 + s32 abort_code, int err, enum rxrpc_abort_reason why); 914 948 void rxrpc_process_connection(struct work_struct *); 915 949 void rxrpc_process_delayed_final_acks(struct rxrpc_connection *, bool); 916 - int rxrpc_input_conn_packet(struct rxrpc_connection *conn, struct sk_buff *skb); 950 + bool rxrpc_input_conn_packet(struct rxrpc_connection *conn, struct sk_buff *skb); 951 + void rxrpc_input_conn_event(struct rxrpc_connection *conn, struct sk_buff *skb); 952 + 953 + static inline bool rxrpc_is_conn_aborted(const struct rxrpc_connection *conn) 954 + { 955 + /* Order reading the abort info after the state check. */ 956 + return smp_load_acquire(&conn->state) == RXRPC_CONN_ABORTED; 957 + } 917 958 918 959 /* 919 960 * conn_object.c ··· 977 906 extern unsigned int rxrpc_connection_expiry; 978 907 extern unsigned int rxrpc_closed_conn_expiry; 979 908 909 + void rxrpc_poke_conn(struct rxrpc_connection *conn, enum rxrpc_conn_trace why); 980 910 struct rxrpc_connection *rxrpc_alloc_connection(struct rxrpc_net *, gfp_t); 981 911 struct rxrpc_connection *rxrpc_find_client_connection_rcu(struct rxrpc_local *, 982 912 struct sockaddr_rxrpc *, ··· 1033 961 */ 1034 962 int rxrpc_encap_rcv(struct sock *, struct sk_buff *); 1035 963 void rxrpc_error_report(struct sock *); 964 + bool rxrpc_direct_abort(struct sk_buff *skb, enum rxrpc_abort_reason why, 965 + s32 abort_code, int err); 1036 966 int rxrpc_io_thread(void *data); 1037 967 static inline void rxrpc_wake_up_io_thread(struct rxrpc_local *local) 1038 968 { 1039 969 wake_up_process(local->io_thread); 970 + } 971 + 972 + static inline bool rxrpc_protocol_error(struct sk_buff *skb, enum rxrpc_abort_reason why) 973 + { 974 + return rxrpc_direct_abort(skb, why, RX_PROTOCOL_ERROR, -EPROTO); 1040 975 } 1041 976 1042 977 /* ··· 1127 1048 int rxrpc_send_ack_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb); 1128 1049 int rxrpc_send_abort_packet(struct rxrpc_call *); 1129 1050 int rxrpc_send_data_packet(struct rxrpc_call *, struct rxrpc_txbuf *); 1051 + void rxrpc_send_conn_abort(struct rxrpc_connection *conn); 1130 1052 void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb); 1131 1053 void rxrpc_send_keepalive(struct rxrpc_peer *); 1132 1054 void rxrpc_transmit_one(struct rxrpc_call *call, struct rxrpc_txbuf *txb); ··· 1143 1063 */ 1144 1064 struct rxrpc_peer *rxrpc_lookup_peer_rcu(struct rxrpc_local *, 1145 1065 const struct sockaddr_rxrpc *); 1146 - struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *, struct rxrpc_local *, 1147 - struct sockaddr_rxrpc *, gfp_t); 1066 + struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local, 1067 + struct sockaddr_rxrpc *srx, gfp_t gfp); 1148 1068 struct rxrpc_peer *rxrpc_alloc_peer(struct rxrpc_local *, gfp_t, 1149 1069 enum rxrpc_peer_trace); 1150 - void rxrpc_new_incoming_peer(struct rxrpc_sock *, struct rxrpc_local *, 1151 - struct rxrpc_peer *); 1070 + void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer); 1152 1071 void rxrpc_destroy_all_peers(struct rxrpc_net *); 1153 1072 struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *, enum rxrpc_peer_trace); 1154 1073 struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *, enum rxrpc_peer_trace); ··· 1165 1086 * recvmsg.c 1166 1087 */ 1167 1088 void rxrpc_notify_socket(struct rxrpc_call *); 1168 - bool __rxrpc_set_call_completion(struct rxrpc_call *, enum rxrpc_call_completion, u32, int); 1169 - bool rxrpc_set_call_completion(struct rxrpc_call *, enum rxrpc_call_completion, u32, int); 1170 - bool __rxrpc_call_completed(struct rxrpc_call *); 1171 - bool rxrpc_call_completed(struct rxrpc_call *); 1172 - bool __rxrpc_abort_call(const char *, struct rxrpc_call *, rxrpc_seq_t, u32, int); 1173 - bool rxrpc_abort_call(const char *, struct rxrpc_call *, rxrpc_seq_t, u32, int); 1174 1089 int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int); 1175 1090 1176 1091 /* 1177 1092 * Abort a call due to a protocol error. 1178 1093 */ 1179 - static inline bool __rxrpc_abort_eproto(struct rxrpc_call *call, 1180 - struct sk_buff *skb, 1181 - const char *eproto_why, 1182 - const char *why, 1183 - u32 abort_code) 1094 + static inline int rxrpc_abort_eproto(struct rxrpc_call *call, 1095 + struct sk_buff *skb, 1096 + s32 abort_code, 1097 + enum rxrpc_abort_reason why) 1184 1098 { 1185 1099 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 1186 1100 1187 - trace_rxrpc_rx_eproto(call, sp->hdr.serial, eproto_why); 1188 - return rxrpc_abort_call(why, call, sp->hdr.seq, abort_code, -EPROTO); 1101 + rxrpc_abort_call(call, sp->hdr.seq, abort_code, -EPROTO, why); 1102 + return -EPROTO; 1189 1103 } 1190 - 1191 - #define rxrpc_abort_eproto(call, skb, eproto_why, abort_why, abort_code) \ 1192 - __rxrpc_abort_eproto((call), (skb), tracepoint_string(eproto_why), \ 1193 - (abort_why), (abort_code)) 1194 1104 1195 1105 /* 1196 1106 * rtt.c ··· 1212 1144 /* 1213 1145 * sendmsg.c 1214 1146 */ 1147 + bool rxrpc_propose_abort(struct rxrpc_call *call, s32 abort_code, int error, 1148 + enum rxrpc_abort_reason why); 1215 1149 int rxrpc_do_sendmsg(struct rxrpc_sock *, struct msghdr *, size_t); 1216 1150 1217 1151 /*
+28 -29
net/rxrpc/call_accept.c
··· 99 99 if (!call) 100 100 return -ENOMEM; 101 101 call->flags |= (1 << RXRPC_CALL_IS_SERVICE); 102 - call->state = RXRPC_CALL_SERVER_PREALLOC; 102 + rxrpc_set_call_state(call, RXRPC_CALL_SERVER_PREALLOC); 103 103 __set_bit(RXRPC_CALL_EV_INITIAL_PING, &call->events); 104 104 105 105 trace_rxrpc_call(call->debug_id, refcount_read(&call->ref), ··· 280 280 (peer_tail + 1) & 281 281 (RXRPC_BACKLOG_MAX - 1)); 282 282 283 - rxrpc_new_incoming_peer(rx, local, peer); 283 + rxrpc_new_incoming_peer(local, peer); 284 284 } 285 285 286 286 /* Now allocate and set up the connection */ ··· 326 326 * If we want to report an error, we mark the skb with the packet type and 327 327 * abort code and return false. 328 328 */ 329 - int rxrpc_new_incoming_call(struct rxrpc_local *local, 330 - struct rxrpc_peer *peer, 331 - struct rxrpc_connection *conn, 332 - struct sockaddr_rxrpc *peer_srx, 333 - struct sk_buff *skb) 329 + bool rxrpc_new_incoming_call(struct rxrpc_local *local, 330 + struct rxrpc_peer *peer, 331 + struct rxrpc_connection *conn, 332 + struct sockaddr_rxrpc *peer_srx, 333 + struct sk_buff *skb) 334 334 { 335 335 const struct rxrpc_security *sec = NULL; 336 336 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); ··· 339 339 340 340 _enter(""); 341 341 342 - /* Don't set up a call for anything other than the first DATA packet. */ 343 - if (sp->hdr.seq != 1 || 344 - sp->hdr.type != RXRPC_PACKET_TYPE_DATA) 345 - return 0; /* Just discard */ 342 + /* Don't set up a call for anything other than a DATA packet. */ 343 + if (sp->hdr.type != RXRPC_PACKET_TYPE_DATA) 344 + return rxrpc_protocol_error(skb, rxrpc_eproto_no_service_call); 346 345 347 - rcu_read_lock(); 346 + read_lock(&local->services_lock); 348 347 349 348 /* Weed out packets to services we're not offering. Packets that would 350 349 * begin a call are explicitly rejected and the rest are just 351 350 * discarded. 352 351 */ 353 - rx = rcu_dereference(local->service); 352 + rx = local->service; 354 353 if (!rx || (sp->hdr.serviceId != rx->srx.srx_service && 355 354 sp->hdr.serviceId != rx->second_service) 356 355 ) { ··· 362 363 if (!conn) { 363 364 sec = rxrpc_get_incoming_security(rx, skb); 364 365 if (!sec) 365 - goto reject; 366 + goto unsupported_security; 366 367 } 367 368 368 369 spin_lock(&rx->incoming_lock); 369 370 if (rx->sk.sk_state == RXRPC_SERVER_LISTEN_DISABLED || 370 371 rx->sk.sk_state == RXRPC_CLOSE) { 371 - trace_rxrpc_abort(0, "CLS", sp->hdr.cid, sp->hdr.callNumber, 372 - sp->hdr.seq, RX_INVALID_OPERATION, ESHUTDOWN); 373 - skb->mark = RXRPC_SKB_MARK_REJECT_ABORT; 374 - skb->priority = RX_INVALID_OPERATION; 372 + rxrpc_direct_abort(skb, rxrpc_abort_shut_down, 373 + RX_INVALID_OPERATION, -ESHUTDOWN); 375 374 goto no_call; 376 375 } 377 376 ··· 399 402 spin_unlock(&conn->state_lock); 400 403 401 404 spin_unlock(&rx->incoming_lock); 402 - rcu_read_unlock(); 405 + read_unlock(&local->services_lock); 403 406 404 407 if (hlist_unhashed(&call->error_link)) { 405 408 spin_lock(&call->peer->lock); ··· 410 413 _leave(" = %p{%d}", call, call->debug_id); 411 414 rxrpc_input_call_event(call, skb); 412 415 rxrpc_put_call(call, rxrpc_call_put_input); 413 - return 0; 416 + return true; 414 417 415 418 unsupported_service: 416 - trace_rxrpc_abort(0, "INV", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, 417 - RX_INVALID_OPERATION, EOPNOTSUPP); 418 - skb->priority = RX_INVALID_OPERATION; 419 - goto reject; 419 + read_unlock(&local->services_lock); 420 + return rxrpc_direct_abort(skb, rxrpc_abort_service_not_offered, 421 + RX_INVALID_OPERATION, -EOPNOTSUPP); 422 + unsupported_security: 423 + read_unlock(&local->services_lock); 424 + return rxrpc_direct_abort(skb, rxrpc_abort_service_not_offered, 425 + RX_INVALID_OPERATION, -EKEYREJECTED); 420 426 no_call: 421 427 spin_unlock(&rx->incoming_lock); 422 - reject: 423 - rcu_read_unlock(); 428 + read_unlock(&local->services_lock); 424 429 _leave(" = f [%u]", skb->mark); 425 - return -EPROTO; 430 + return false; 426 431 discard: 427 - rcu_read_unlock(); 428 - return 0; 432 + read_unlock(&local->services_lock); 433 + return true; 429 434 } 430 435 431 436 /*
+71 -15
net/rxrpc/call_event.c
··· 251 251 _leave(""); 252 252 } 253 253 254 + /* 255 + * Start transmitting the reply to a service. This cancels the need to ACK the 256 + * request if we haven't yet done so. 257 + */ 258 + static void rxrpc_begin_service_reply(struct rxrpc_call *call) 259 + { 260 + unsigned long now = jiffies; 261 + 262 + rxrpc_set_call_state(call, RXRPC_CALL_SERVER_SEND_REPLY); 263 + WRITE_ONCE(call->delay_ack_at, now + MAX_JIFFY_OFFSET); 264 + if (call->ackr_reason == RXRPC_ACK_DELAY) 265 + call->ackr_reason = 0; 266 + trace_rxrpc_timer(call, rxrpc_timer_init_for_send_reply, now); 267 + } 268 + 269 + /* 270 + * Close the transmission phase. After this point there is no more data to be 271 + * transmitted in the call. 272 + */ 273 + static void rxrpc_close_tx_phase(struct rxrpc_call *call) 274 + { 275 + _debug("________awaiting reply/ACK__________"); 276 + 277 + switch (__rxrpc_call_state(call)) { 278 + case RXRPC_CALL_CLIENT_SEND_REQUEST: 279 + rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_AWAIT_REPLY); 280 + break; 281 + case RXRPC_CALL_SERVER_SEND_REPLY: 282 + rxrpc_set_call_state(call, RXRPC_CALL_SERVER_AWAIT_ACK); 283 + break; 284 + default: 285 + break; 286 + } 287 + } 288 + 254 289 static bool rxrpc_tx_window_has_space(struct rxrpc_call *call) 255 290 { 256 291 unsigned int winsize = min_t(unsigned int, call->tx_winsize, ··· 305 270 { 306 271 struct rxrpc_txbuf *txb; 307 272 308 - if (rxrpc_is_client_call(call) && 309 - !test_bit(RXRPC_CALL_EXPOSED, &call->flags)) 273 + if (!test_bit(RXRPC_CALL_EXPOSED, &call->flags)) { 274 + if (list_empty(&call->tx_sendmsg)) 275 + return; 310 276 rxrpc_expose_client_call(call); 277 + } 311 278 312 279 while ((txb = list_first_entry_or_null(&call->tx_sendmsg, 313 280 struct rxrpc_txbuf, call_link))) { ··· 320 283 call->tx_top = txb->seq; 321 284 list_add_tail(&txb->call_link, &call->tx_buffer); 322 285 286 + if (txb->wire.flags & RXRPC_LAST_PACKET) 287 + rxrpc_close_tx_phase(call); 288 + 323 289 rxrpc_transmit_one(call, txb); 324 290 325 291 if (!rxrpc_tx_window_has_space(call)) ··· 332 292 333 293 static void rxrpc_transmit_some_data(struct rxrpc_call *call) 334 294 { 335 - switch (call->state) { 295 + switch (__rxrpc_call_state(call)) { 336 296 case RXRPC_CALL_SERVER_ACK_REQUEST: 337 297 if (list_empty(&call->tx_sendmsg)) 338 298 return; 299 + rxrpc_begin_service_reply(call); 339 300 fallthrough; 340 301 341 302 case RXRPC_CALL_SERVER_SEND_REPLY: 342 - case RXRPC_CALL_SERVER_AWAIT_ACK: 343 303 case RXRPC_CALL_CLIENT_SEND_REQUEST: 344 - case RXRPC_CALL_CLIENT_AWAIT_REPLY: 345 304 if (!rxrpc_tx_window_has_space(call)) 346 305 return; 347 306 if (list_empty(&call->tx_sendmsg)) { ··· 370 331 /* 371 332 * Handle retransmission and deferred ACK/abort generation. 372 333 */ 373 - void rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb) 334 + bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb) 374 335 { 375 336 unsigned long now, next, t; 376 337 rxrpc_serial_t ackr_serial; 377 338 bool resend = false, expired = false; 339 + s32 abort_code; 378 340 379 341 rxrpc_see_call(call, rxrpc_call_see_input); 380 342 381 343 //printk("\n--------------------\n"); 382 344 _enter("{%d,%s,%lx}", 383 - call->debug_id, rxrpc_call_states[call->state], call->events); 345 + call->debug_id, rxrpc_call_states[__rxrpc_call_state(call)], 346 + call->events); 384 347 385 - if (call->state == RXRPC_CALL_COMPLETE) 348 + if (__rxrpc_call_is_complete(call)) 386 349 goto out; 350 + 351 + /* Handle abort request locklessly, vs rxrpc_propose_abort(). */ 352 + abort_code = smp_load_acquire(&call->send_abort); 353 + if (abort_code) { 354 + rxrpc_abort_call(call, 0, call->send_abort, call->send_abort_err, 355 + call->send_abort_why); 356 + goto out; 357 + } 387 358 388 359 if (skb && skb->mark == RXRPC_SKB_MARK_ERROR) 389 360 goto out; ··· 407 358 } 408 359 409 360 t = READ_ONCE(call->expect_req_by); 410 - if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST && 361 + if (__rxrpc_call_state(call) == RXRPC_CALL_SERVER_RECV_REQUEST && 411 362 time_after_eq(now, t)) { 412 363 trace_rxrpc_timer(call, rxrpc_timer_exp_idle, now); 413 364 expired = true; ··· 478 429 if (test_bit(RXRPC_CALL_RX_HEARD, &call->flags) && 479 430 (int)call->conn->hi_serial - (int)call->rx_serial > 0) { 480 431 trace_rxrpc_call_reset(call); 481 - rxrpc_abort_call("EXP", call, 0, RX_CALL_DEAD, -ECONNRESET); 432 + rxrpc_abort_call(call, 0, RX_CALL_DEAD, -ECONNRESET, 433 + rxrpc_abort_call_reset); 482 434 } else { 483 - rxrpc_abort_call("EXP", call, 0, RX_CALL_TIMEOUT, -ETIME); 435 + rxrpc_abort_call(call, 0, RX_CALL_TIMEOUT, -ETIME, 436 + rxrpc_abort_call_timeout); 484 437 } 485 - rxrpc_send_abort_packet(call); 486 438 goto out; 487 439 } 488 440 ··· 491 441 rxrpc_send_ACK(call, RXRPC_ACK_PING, 0, 492 442 rxrpc_propose_ack_ping_for_lost_ack); 493 443 494 - if (resend && call->state != RXRPC_CALL_CLIENT_RECV_REPLY) 444 + if (resend && __rxrpc_call_state(call) != RXRPC_CALL_CLIENT_RECV_REPLY) 495 445 rxrpc_resend(call, NULL); 496 446 497 447 if (test_and_clear_bit(RXRPC_CALL_RX_IS_IDLE, &call->flags)) ··· 503 453 rxrpc_propose_ack_input_data); 504 454 505 455 /* Make sure the timer is restarted */ 506 - if (call->state != RXRPC_CALL_COMPLETE) { 456 + if (!__rxrpc_call_is_complete(call)) { 507 457 next = call->expect_rx_by; 508 458 509 459 #define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; } ··· 524 474 } 525 475 526 476 out: 527 - if (call->state == RXRPC_CALL_COMPLETE) 477 + if (__rxrpc_call_is_complete(call)) { 528 478 del_timer_sync(&call->timer); 479 + if (!test_bit(RXRPC_CALL_DISCONNECTED, &call->flags)) 480 + rxrpc_disconnect_call(call); 481 + if (call->security) 482 + call->security->free_call_crypto(call); 483 + } 529 484 if (call->acks_hard_ack != call->tx_bottom) 530 485 rxrpc_shrink_call_tx_buffer(call); 531 486 _leave(""); 487 + return true; 532 488 }
+70 -46
net/rxrpc/call_object.c
··· 50 50 struct rxrpc_local *local = call->local; 51 51 bool busy; 52 52 53 - if (call->state < RXRPC_CALL_COMPLETE) { 53 + if (!test_bit(RXRPC_CALL_DISCONNECTED, &call->flags)) { 54 54 spin_lock_bh(&local->lock); 55 55 busy = !list_empty(&call->attend_link); 56 56 trace_rxrpc_poke_call(call, busy, what); ··· 69 69 70 70 _enter("%d", call->debug_id); 71 71 72 - if (call->state < RXRPC_CALL_COMPLETE) { 72 + if (!__rxrpc_call_is_complete(call)) { 73 73 trace_rxrpc_timer_expired(call, jiffies); 74 74 rxrpc_poke_call(call, rxrpc_call_poke_timer); 75 75 } ··· 150 150 timer_setup(&call->timer, rxrpc_call_timer_expired, 0); 151 151 INIT_WORK(&call->destroyer, rxrpc_destroy_call); 152 152 INIT_LIST_HEAD(&call->link); 153 - INIT_LIST_HEAD(&call->chan_wait_link); 153 + INIT_LIST_HEAD(&call->wait_link); 154 154 INIT_LIST_HEAD(&call->accept_link); 155 155 INIT_LIST_HEAD(&call->recvmsg_link); 156 156 INIT_LIST_HEAD(&call->sock_link); ··· 162 162 init_waitqueue_head(&call->waitq); 163 163 spin_lock_init(&call->notify_lock); 164 164 spin_lock_init(&call->tx_lock); 165 - rwlock_init(&call->state_lock); 166 165 refcount_set(&call->ref, 1); 167 166 call->debug_id = debug_id; 168 167 call->tx_total_len = -1; ··· 210 211 now = ktime_get_real(); 211 212 call->acks_latest_ts = now; 212 213 call->cong_tstamp = now; 213 - call->state = RXRPC_CALL_CLIENT_AWAIT_CONN; 214 214 call->dest_srx = *srx; 215 215 call->interruptibility = p->interruptibility; 216 216 call->tx_total_len = p->tx_total_len; ··· 225 227 226 228 ret = rxrpc_init_client_call_security(call); 227 229 if (ret < 0) { 228 - __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 0, ret); 230 + rxrpc_prefail_call(call, RXRPC_CALL_LOCAL_ERROR, ret); 229 231 rxrpc_put_call(call, rxrpc_call_put_discard_error); 230 232 return ERR_PTR(ret); 231 233 } 234 + 235 + rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_AWAIT_CONN); 232 236 233 237 trace_rxrpc_call(call->debug_id, refcount_read(&call->ref), 234 238 p->user_call_ID, rxrpc_call_new_client); ··· 242 242 /* 243 243 * Initiate the call ack/resend/expiry timer. 244 244 */ 245 - static void rxrpc_start_call_timer(struct rxrpc_call *call) 245 + void rxrpc_start_call_timer(struct rxrpc_call *call) 246 246 { 247 247 unsigned long now = jiffies; 248 248 unsigned long j = now + MAX_JIFFY_OFFSET; ··· 284 284 if (test_bit(RXRPC_CALL_KERNEL, &call->flags)) 285 285 limiter = &rxrpc_kernel_call_limiter; 286 286 up(limiter); 287 + } 288 + 289 + /* 290 + * Start the process of connecting a call. We obtain a peer and a connection 291 + * bundle, but the actual association of a call with a connection is offloaded 292 + * to the I/O thread to simplify locking. 293 + */ 294 + static int rxrpc_connect_call(struct rxrpc_call *call, gfp_t gfp) 295 + { 296 + struct rxrpc_local *local = call->local; 297 + int ret = 0; 298 + 299 + _enter("{%d,%lx},", call->debug_id, call->user_call_ID); 300 + 301 + call->peer = rxrpc_lookup_peer(local, &call->dest_srx, gfp); 302 + if (!call->peer) 303 + goto error; 304 + 305 + ret = rxrpc_look_up_bundle(call, gfp); 306 + if (ret < 0) 307 + goto error; 308 + 309 + trace_rxrpc_client(NULL, -1, rxrpc_client_queue_new_call); 310 + rxrpc_get_call(call, rxrpc_call_get_io_thread); 311 + spin_lock(&local->client_call_lock); 312 + list_add_tail(&call->wait_link, &local->new_client_calls); 313 + spin_unlock(&local->client_call_lock); 314 + rxrpc_wake_up_io_thread(local); 315 + return 0; 316 + 317 + error: 318 + __set_bit(RXRPC_CALL_DISCONNECTED, &call->flags); 319 + return ret; 287 320 } 288 321 289 322 /* ··· 398 365 /* Set up or get a connection record and set the protocol parameters, 399 366 * including channel number and call ID. 400 367 */ 401 - ret = rxrpc_connect_call(rx, call, cp, srx, gfp); 368 + ret = rxrpc_connect_call(call, gfp); 402 369 if (ret < 0) 403 370 goto error_attached_to_socket; 404 - 405 - rxrpc_see_call(call, rxrpc_call_see_connected); 406 - 407 - rxrpc_start_call_timer(call); 408 371 409 372 _leave(" = %p [new]", call); 410 373 return call; ··· 413 384 error_dup_user_ID: 414 385 write_unlock(&rx->call_lock); 415 386 release_sock(&rx->sk); 416 - __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 417 - RX_CALL_DEAD, -EEXIST); 387 + rxrpc_prefail_call(call, RXRPC_CALL_LOCAL_ERROR, -EEXIST); 418 388 trace_rxrpc_call(call->debug_id, refcount_read(&call->ref), 0, 419 389 rxrpc_call_see_userid_exists); 420 - rxrpc_release_call(rx, call); 421 390 mutex_unlock(&call->user_mutex); 422 391 rxrpc_put_call(call, rxrpc_call_put_userid_exists); 423 392 _leave(" = -EEXIST"); 424 393 return ERR_PTR(-EEXIST); 425 394 426 395 /* We got an error, but the call is attached to the socket and is in 427 - * need of release. However, we might now race with recvmsg() when 428 - * completing the call queues it. Return 0 from sys_sendmsg() and 396 + * need of release. However, we might now race with recvmsg() when it 397 + * completion notifies the socket. Return 0 from sys_sendmsg() and 429 398 * leave the error to recvmsg() to deal with. 430 399 */ 431 400 error_attached_to_socket: 432 401 trace_rxrpc_call(call->debug_id, refcount_read(&call->ref), ret, 433 402 rxrpc_call_see_connect_failed); 434 - set_bit(RXRPC_CALL_DISCONNECTED, &call->flags); 435 - __rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 436 - RX_CALL_DEAD, ret); 403 + rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 0, ret); 437 404 _leave(" = c=%08x [err]", call->debug_id); 438 405 return call; 439 406 } ··· 452 427 call->call_id = sp->hdr.callNumber; 453 428 call->dest_srx.srx_service = sp->hdr.serviceId; 454 429 call->cid = sp->hdr.cid; 455 - call->state = RXRPC_CALL_SERVER_SECURING; 456 430 call->cong_tstamp = skb->tstamp; 431 + 432 + __set_bit(RXRPC_CALL_EXPOSED, &call->flags); 433 + rxrpc_set_call_state(call, RXRPC_CALL_SERVER_SECURING); 457 434 458 435 spin_lock(&conn->state_lock); 459 436 460 437 switch (conn->state) { 461 438 case RXRPC_CONN_SERVICE_UNSECURED: 462 439 case RXRPC_CONN_SERVICE_CHALLENGING: 463 - call->state = RXRPC_CALL_SERVER_SECURING; 440 + rxrpc_set_call_state(call, RXRPC_CALL_SERVER_SECURING); 464 441 break; 465 442 case RXRPC_CONN_SERVICE: 466 - call->state = RXRPC_CALL_SERVER_RECV_REQUEST; 443 + rxrpc_set_call_state(call, RXRPC_CALL_SERVER_RECV_REQUEST); 467 444 break; 468 445 469 - case RXRPC_CONN_REMOTELY_ABORTED: 470 - __rxrpc_set_call_completion(call, RXRPC_CALL_REMOTELY_ABORTED, 471 - conn->abort_code, conn->error); 472 - break; 473 - case RXRPC_CONN_LOCALLY_ABORTED: 474 - __rxrpc_abort_call("CON", call, 1, 475 - conn->abort_code, conn->error); 446 + case RXRPC_CONN_ABORTED: 447 + rxrpc_set_call_completion(call, conn->completion, 448 + conn->abort_code, conn->error); 476 449 break; 477 450 default: 478 451 BUG(); 479 452 } 453 + 454 + rxrpc_get_call(call, rxrpc_call_get_io_thread); 480 455 481 456 /* Set the channel for this call. We don't get channel_lock as we're 482 457 * only defending against the data_ready handler (which we're called ··· 487 462 chan = sp->hdr.cid & RXRPC_CHANNELMASK; 488 463 conn->channels[chan].call_counter = call->call_id; 489 464 conn->channels[chan].call_id = call->call_id; 490 - rcu_assign_pointer(conn->channels[chan].call, call); 465 + conn->channels[chan].call = call; 491 466 spin_unlock(&conn->state_lock); 492 467 493 468 spin_lock(&conn->peer->lock); ··· 547 522 void rxrpc_release_call(struct rxrpc_sock *rx, struct rxrpc_call *call) 548 523 { 549 524 struct rxrpc_connection *conn = call->conn; 550 - bool put = false; 525 + bool put = false, putu = false; 551 526 552 527 _enter("{%d,%d}", call->debug_id, refcount_read(&call->ref)); 553 528 554 529 trace_rxrpc_call(call->debug_id, refcount_read(&call->ref), 555 530 call->flags, rxrpc_call_see_release); 556 531 557 - ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); 558 - 559 532 if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags)) 560 533 BUG(); 561 534 562 535 rxrpc_put_call_slot(call); 563 - del_timer_sync(&call->timer); 564 536 565 537 /* Make sure we don't get any more notifications */ 566 538 write_lock(&rx->recvmsg_lock); ··· 582 560 if (test_and_clear_bit(RXRPC_CALL_HAS_USERID, &call->flags)) { 583 561 rb_erase(&call->sock_node, &rx->calls); 584 562 memset(&call->sock_node, 0xdd, sizeof(call->sock_node)); 585 - rxrpc_put_call(call, rxrpc_call_put_userid_exists); 563 + putu = true; 586 564 } 587 565 588 566 list_del(&call->sock_link); ··· 590 568 591 569 _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn); 592 570 593 - if (conn && !test_bit(RXRPC_CALL_DISCONNECTED, &call->flags)) 594 - rxrpc_disconnect_call(call); 595 - if (call->security) 596 - call->security->free_call_crypto(call); 571 + if (putu) 572 + rxrpc_put_call(call, rxrpc_call_put_userid); 573 + 597 574 _leave(""); 598 575 } 599 576 ··· 609 588 call = list_entry(rx->to_be_accepted.next, 610 589 struct rxrpc_call, accept_link); 611 590 list_del(&call->accept_link); 612 - rxrpc_abort_call("SKR", call, 0, RX_CALL_DEAD, -ECONNRESET); 591 + rxrpc_propose_abort(call, RX_CALL_DEAD, -ECONNRESET, 592 + rxrpc_abort_call_sock_release_tba); 613 593 rxrpc_put_call(call, rxrpc_call_put_release_sock_tba); 614 594 } 615 595 ··· 618 596 call = list_entry(rx->sock_calls.next, 619 597 struct rxrpc_call, sock_link); 620 598 rxrpc_get_call(call, rxrpc_call_get_release_sock); 621 - rxrpc_abort_call("SKT", call, 0, RX_CALL_DEAD, -ECONNRESET); 622 - rxrpc_send_abort_packet(call); 599 + rxrpc_propose_abort(call, RX_CALL_DEAD, -ECONNRESET, 600 + rxrpc_abort_call_sock_release); 623 601 rxrpc_release_call(rx, call); 624 602 rxrpc_put_call(call, rxrpc_call_put_release_sock); 625 603 } ··· 642 620 dead = __refcount_dec_and_test(&call->ref, &r); 643 621 trace_rxrpc_call(debug_id, r - 1, 0, why); 644 622 if (dead) { 645 - ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); 623 + ASSERTCMP(__rxrpc_call_state(call), ==, RXRPC_CALL_COMPLETE); 646 624 647 625 if (!list_empty(&call->link)) { 648 626 spin_lock(&rxnet->call_lock); ··· 691 669 692 670 rxrpc_put_txbuf(call->tx_pending, rxrpc_txbuf_put_cleaned); 693 671 rxrpc_put_connection(call->conn, rxrpc_conn_put_call); 672 + rxrpc_deactivate_bundle(call->bundle); 673 + rxrpc_put_bundle(call->bundle, rxrpc_bundle_put_call); 694 674 rxrpc_put_peer(call->peer, rxrpc_peer_put_call); 695 675 rxrpc_put_local(call->local, rxrpc_local_put_call); 696 676 call_rcu(&call->rcu, rxrpc_rcu_free_call); ··· 705 681 { 706 682 memset(&call->sock_node, 0xcd, sizeof(call->sock_node)); 707 683 708 - ASSERTCMP(call->state, ==, RXRPC_CALL_COMPLETE); 684 + ASSERTCMP(__rxrpc_call_state(call), ==, RXRPC_CALL_COMPLETE); 709 685 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags)); 710 686 711 687 del_timer(&call->timer); ··· 743 719 744 720 pr_err("Call %p still in use (%d,%s,%lx,%lx)!\n", 745 721 call, refcount_read(&call->ref), 746 - rxrpc_call_states[call->state], 722 + rxrpc_call_states[__rxrpc_call_state(call)], 747 723 call->flags, call->events); 748 724 749 725 spin_unlock(&rxnet->call_lock);
+69
net/rxrpc/call_state.c
··· 1 + // SPDX-License-Identifier: GPL-2.0-or-later 2 + /* Call state changing functions. 3 + * 4 + * Copyright (C) 2022 Red Hat, Inc. All Rights Reserved. 5 + * Written by David Howells (dhowells@redhat.com) 6 + */ 7 + 8 + #include "ar-internal.h" 9 + 10 + /* 11 + * Transition a call to the complete state. 12 + */ 13 + bool rxrpc_set_call_completion(struct rxrpc_call *call, 14 + enum rxrpc_call_completion compl, 15 + u32 abort_code, 16 + int error) 17 + { 18 + if (__rxrpc_call_state(call) == RXRPC_CALL_COMPLETE) 19 + return false; 20 + 21 + call->abort_code = abort_code; 22 + call->error = error; 23 + call->completion = compl; 24 + /* Allow reader of completion state to operate locklessly */ 25 + rxrpc_set_call_state(call, RXRPC_CALL_COMPLETE); 26 + trace_rxrpc_call_complete(call); 27 + wake_up(&call->waitq); 28 + rxrpc_notify_socket(call); 29 + return true; 30 + } 31 + 32 + /* 33 + * Record that a call successfully completed. 34 + */ 35 + bool rxrpc_call_completed(struct rxrpc_call *call) 36 + { 37 + return rxrpc_set_call_completion(call, RXRPC_CALL_SUCCEEDED, 0, 0); 38 + } 39 + 40 + /* 41 + * Record that a call is locally aborted. 42 + */ 43 + bool rxrpc_abort_call(struct rxrpc_call *call, rxrpc_seq_t seq, 44 + u32 abort_code, int error, enum rxrpc_abort_reason why) 45 + { 46 + trace_rxrpc_abort(call->debug_id, why, call->cid, call->call_id, seq, 47 + abort_code, error); 48 + if (!rxrpc_set_call_completion(call, RXRPC_CALL_LOCALLY_ABORTED, 49 + abort_code, error)) 50 + return false; 51 + if (test_bit(RXRPC_CALL_EXPOSED, &call->flags)) 52 + rxrpc_send_abort_packet(call); 53 + return true; 54 + } 55 + 56 + /* 57 + * Record that a call errored out before even getting off the ground, thereby 58 + * setting the state to allow it to be destroyed. 59 + */ 60 + void rxrpc_prefail_call(struct rxrpc_call *call, enum rxrpc_call_completion compl, 61 + int error) 62 + { 63 + call->abort_code = RX_CALL_DEAD; 64 + call->error = error; 65 + call->completion = compl; 66 + call->_state = RXRPC_CALL_COMPLETE; 67 + trace_rxrpc_call_complete(call); 68 + WARN_ON_ONCE(__test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags)); 69 + }
+196 -513
net/rxrpc/conn_client.c
··· 34 34 __read_mostly unsigned long rxrpc_conn_idle_client_expiry = 2 * 60 * HZ; 35 35 __read_mostly unsigned long rxrpc_conn_idle_client_fast_expiry = 2 * HZ; 36 36 37 - /* 38 - * We use machine-unique IDs for our client connections. 39 - */ 40 - DEFINE_IDR(rxrpc_client_conn_ids); 41 - static DEFINE_SPINLOCK(rxrpc_conn_id_lock); 42 - 43 - static void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle); 44 - 45 - /* 46 - * Get a connection ID and epoch for a client connection from the global pool. 47 - * The connection struct pointer is then recorded in the idr radix tree. The 48 - * epoch doesn't change until the client is rebooted (or, at least, unless the 49 - * module is unloaded). 50 - */ 51 - static int rxrpc_get_client_connection_id(struct rxrpc_connection *conn, 52 - gfp_t gfp) 37 + static void rxrpc_activate_bundle(struct rxrpc_bundle *bundle) 53 38 { 54 - struct rxrpc_net *rxnet = conn->rxnet; 55 - int id; 56 - 57 - _enter(""); 58 - 59 - idr_preload(gfp); 60 - spin_lock(&rxrpc_conn_id_lock); 61 - 62 - id = idr_alloc_cyclic(&rxrpc_client_conn_ids, conn, 63 - 1, 0x40000000, GFP_NOWAIT); 64 - if (id < 0) 65 - goto error; 66 - 67 - spin_unlock(&rxrpc_conn_id_lock); 68 - idr_preload_end(); 69 - 70 - conn->proto.epoch = rxnet->epoch; 71 - conn->proto.cid = id << RXRPC_CIDSHIFT; 72 - set_bit(RXRPC_CONN_HAS_IDR, &conn->flags); 73 - _leave(" [CID %x]", conn->proto.cid); 74 - return 0; 75 - 76 - error: 77 - spin_unlock(&rxrpc_conn_id_lock); 78 - idr_preload_end(); 79 - _leave(" = %d", id); 80 - return id; 39 + atomic_inc(&bundle->active); 81 40 } 82 41 83 42 /* 84 - * Release a connection ID for a client connection from the global pool. 43 + * Release a connection ID for a client connection. 85 44 */ 86 - static void rxrpc_put_client_connection_id(struct rxrpc_connection *conn) 45 + static void rxrpc_put_client_connection_id(struct rxrpc_local *local, 46 + struct rxrpc_connection *conn) 87 47 { 88 - if (test_bit(RXRPC_CONN_HAS_IDR, &conn->flags)) { 89 - spin_lock(&rxrpc_conn_id_lock); 90 - idr_remove(&rxrpc_client_conn_ids, 91 - conn->proto.cid >> RXRPC_CIDSHIFT); 92 - spin_unlock(&rxrpc_conn_id_lock); 93 - } 48 + idr_remove(&local->conn_ids, conn->proto.cid >> RXRPC_CIDSHIFT); 94 49 } 95 50 96 51 /* 97 52 * Destroy the client connection ID tree. 98 53 */ 99 - void rxrpc_destroy_client_conn_ids(void) 54 + static void rxrpc_destroy_client_conn_ids(struct rxrpc_local *local) 100 55 { 101 56 struct rxrpc_connection *conn; 102 57 int id; 103 58 104 - if (!idr_is_empty(&rxrpc_client_conn_ids)) { 105 - idr_for_each_entry(&rxrpc_client_conn_ids, conn, id) { 59 + if (!idr_is_empty(&local->conn_ids)) { 60 + idr_for_each_entry(&local->conn_ids, conn, id) { 106 61 pr_err("AF_RXRPC: Leaked client conn %p {%d}\n", 107 62 conn, refcount_read(&conn->ref)); 108 63 } 109 64 BUG(); 110 65 } 111 66 112 - idr_destroy(&rxrpc_client_conn_ids); 67 + idr_destroy(&local->conn_ids); 113 68 } 114 69 115 70 /* 116 71 * Allocate a connection bundle. 117 72 */ 118 - static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_conn_parameters *cp, 73 + static struct rxrpc_bundle *rxrpc_alloc_bundle(struct rxrpc_call *call, 119 74 gfp_t gfp) 120 75 { 121 76 struct rxrpc_bundle *bundle; 122 77 123 78 bundle = kzalloc(sizeof(*bundle), gfp); 124 79 if (bundle) { 125 - bundle->local = cp->local; 126 - bundle->peer = rxrpc_get_peer(cp->peer, rxrpc_peer_get_bundle); 127 - bundle->key = cp->key; 128 - bundle->exclusive = cp->exclusive; 129 - bundle->upgrade = cp->upgrade; 130 - bundle->service_id = cp->service_id; 131 - bundle->security_level = cp->security_level; 80 + bundle->local = call->local; 81 + bundle->peer = rxrpc_get_peer(call->peer, rxrpc_peer_get_bundle); 82 + bundle->key = key_get(call->key); 83 + bundle->security = call->security; 84 + bundle->exclusive = test_bit(RXRPC_CALL_EXCLUSIVE, &call->flags); 85 + bundle->upgrade = test_bit(RXRPC_CALL_UPGRADE, &call->flags); 86 + bundle->service_id = call->dest_srx.srx_service; 87 + bundle->security_level = call->security_level; 132 88 refcount_set(&bundle->ref, 1); 133 89 atomic_set(&bundle->active, 1); 134 - spin_lock_init(&bundle->channel_lock); 135 90 INIT_LIST_HEAD(&bundle->waiting_calls); 136 91 trace_rxrpc_bundle(bundle->debug_id, 1, rxrpc_bundle_new); 137 92 } ··· 107 152 { 108 153 trace_rxrpc_bundle(bundle->debug_id, 1, rxrpc_bundle_free); 109 154 rxrpc_put_peer(bundle->peer, rxrpc_peer_put_bundle); 155 + key_put(bundle->key); 110 156 kfree(bundle); 111 157 } 112 158 113 159 void rxrpc_put_bundle(struct rxrpc_bundle *bundle, enum rxrpc_bundle_trace why) 114 160 { 115 - unsigned int id = bundle->debug_id; 161 + unsigned int id; 116 162 bool dead; 117 163 int r; 118 164 119 - dead = __refcount_dec_and_test(&bundle->ref, &r); 120 - trace_rxrpc_bundle(id, r - 1, why); 121 - if (dead) 122 - rxrpc_free_bundle(bundle); 165 + if (bundle) { 166 + id = bundle->debug_id; 167 + dead = __refcount_dec_and_test(&bundle->ref, &r); 168 + trace_rxrpc_bundle(id, r - 1, why); 169 + if (dead) 170 + rxrpc_free_bundle(bundle); 171 + } 172 + } 173 + 174 + /* 175 + * Get rid of outstanding client connection preallocations when a local 176 + * endpoint is destroyed. 177 + */ 178 + void rxrpc_purge_client_connections(struct rxrpc_local *local) 179 + { 180 + rxrpc_destroy_client_conn_ids(local); 123 181 } 124 182 125 183 /* 126 184 * Allocate a client connection. 127 185 */ 128 186 static struct rxrpc_connection * 129 - rxrpc_alloc_client_connection(struct rxrpc_bundle *bundle, gfp_t gfp) 187 + rxrpc_alloc_client_connection(struct rxrpc_bundle *bundle) 130 188 { 131 189 struct rxrpc_connection *conn; 132 - struct rxrpc_net *rxnet = bundle->local->rxnet; 133 - int ret; 190 + struct rxrpc_local *local = bundle->local; 191 + struct rxrpc_net *rxnet = local->rxnet; 192 + int id; 134 193 135 194 _enter(""); 136 195 137 - conn = rxrpc_alloc_connection(rxnet, gfp); 138 - if (!conn) { 139 - _leave(" = -ENOMEM"); 196 + conn = rxrpc_alloc_connection(rxnet, GFP_ATOMIC | __GFP_NOWARN); 197 + if (!conn) 140 198 return ERR_PTR(-ENOMEM); 199 + 200 + id = idr_alloc_cyclic(&local->conn_ids, conn, 1, 0x40000000, 201 + GFP_ATOMIC | __GFP_NOWARN); 202 + if (id < 0) { 203 + kfree(conn); 204 + return ERR_PTR(id); 141 205 } 142 206 143 207 refcount_set(&conn->ref, 1); 144 - conn->bundle = bundle; 145 - conn->local = bundle->local; 146 - conn->peer = bundle->peer; 147 - conn->key = bundle->key; 208 + conn->proto.cid = id << RXRPC_CIDSHIFT; 209 + conn->proto.epoch = local->rxnet->epoch; 210 + conn->out_clientflag = RXRPC_CLIENT_INITIATED; 211 + conn->bundle = rxrpc_get_bundle(bundle, rxrpc_bundle_get_client_conn); 212 + conn->local = rxrpc_get_local(bundle->local, rxrpc_local_get_client_conn); 213 + conn->peer = rxrpc_get_peer(bundle->peer, rxrpc_peer_get_client_conn); 214 + conn->key = key_get(bundle->key); 215 + conn->security = bundle->security; 148 216 conn->exclusive = bundle->exclusive; 149 217 conn->upgrade = bundle->upgrade; 150 218 conn->orig_service_id = bundle->service_id; 151 219 conn->security_level = bundle->security_level; 152 - conn->out_clientflag = RXRPC_CLIENT_INITIATED; 153 - conn->state = RXRPC_CONN_CLIENT; 220 + conn->state = RXRPC_CONN_CLIENT_UNSECURED; 154 221 conn->service_id = conn->orig_service_id; 155 222 156 - ret = rxrpc_get_client_connection_id(conn, gfp); 157 - if (ret < 0) 158 - goto error_0; 159 - 160 - ret = rxrpc_init_client_conn_security(conn); 161 - if (ret < 0) 162 - goto error_1; 223 + if (conn->security == &rxrpc_no_security) 224 + conn->state = RXRPC_CONN_CLIENT; 163 225 164 226 atomic_inc(&rxnet->nr_conns); 165 227 write_lock(&rxnet->conn_lock); 166 228 list_add_tail(&conn->proc_link, &rxnet->conn_proc_list); 167 229 write_unlock(&rxnet->conn_lock); 168 230 169 - rxrpc_get_bundle(bundle, rxrpc_bundle_get_client_conn); 170 - rxrpc_get_peer(conn->peer, rxrpc_peer_get_client_conn); 171 - rxrpc_get_local(conn->local, rxrpc_local_get_client_conn); 172 - key_get(conn->key); 173 - 174 - trace_rxrpc_conn(conn->debug_id, refcount_read(&conn->ref), 175 - rxrpc_conn_new_client); 231 + rxrpc_see_connection(conn, rxrpc_conn_new_client); 176 232 177 233 atomic_inc(&rxnet->nr_client_conns); 178 234 trace_rxrpc_client(conn, -1, rxrpc_client_alloc); 179 - _leave(" = %p", conn); 180 235 return conn; 181 - 182 - error_1: 183 - rxrpc_put_client_connection_id(conn); 184 - error_0: 185 - kfree(conn); 186 - _leave(" = %d", ret); 187 - return ERR_PTR(ret); 188 236 } 189 237 190 238 /* ··· 205 247 if (test_bit(RXRPC_CONN_DONT_REUSE, &conn->flags)) 206 248 goto dont_reuse; 207 249 208 - if (conn->state != RXRPC_CONN_CLIENT || 250 + if ((conn->state != RXRPC_CONN_CLIENT_UNSECURED && 251 + conn->state != RXRPC_CONN_CLIENT) || 209 252 conn->proto.epoch != rxnet->epoch) 210 253 goto mark_dont_reuse; 211 254 ··· 216 257 * times the maximum number of client conns away from the current 217 258 * allocation point to try and keep the IDs concentrated. 218 259 */ 219 - id_cursor = idr_get_cursor(&rxrpc_client_conn_ids); 260 + id_cursor = idr_get_cursor(&conn->local->conn_ids); 220 261 id = conn->proto.cid >> RXRPC_CIDSHIFT; 221 262 distance = id - id_cursor; 222 263 if (distance < 0) ··· 237 278 * Look up the conn bundle that matches the connection parameters, adding it if 238 279 * it doesn't yet exist. 239 280 */ 240 - static struct rxrpc_bundle *rxrpc_look_up_bundle(struct rxrpc_conn_parameters *cp, 241 - gfp_t gfp) 281 + int rxrpc_look_up_bundle(struct rxrpc_call *call, gfp_t gfp) 242 282 { 243 283 static atomic_t rxrpc_bundle_id; 244 284 struct rxrpc_bundle *bundle, *candidate; 245 - struct rxrpc_local *local = cp->local; 285 + struct rxrpc_local *local = call->local; 246 286 struct rb_node *p, **pp, *parent; 247 287 long diff; 288 + bool upgrade = test_bit(RXRPC_CALL_UPGRADE, &call->flags); 248 289 249 290 _enter("{%px,%x,%u,%u}", 250 - cp->peer, key_serial(cp->key), cp->security_level, cp->upgrade); 291 + call->peer, key_serial(call->key), call->security_level, 292 + upgrade); 251 293 252 - if (cp->exclusive) 253 - return rxrpc_alloc_bundle(cp, gfp); 294 + if (test_bit(RXRPC_CALL_EXCLUSIVE, &call->flags)) { 295 + call->bundle = rxrpc_alloc_bundle(call, gfp); 296 + return call->bundle ? 0 : -ENOMEM; 297 + } 254 298 255 299 /* First, see if the bundle is already there. */ 256 300 _debug("search 1"); ··· 262 300 while (p) { 263 301 bundle = rb_entry(p, struct rxrpc_bundle, local_node); 264 302 265 - #define cmp(X) ((long)bundle->X - (long)cp->X) 266 - diff = (cmp(peer) ?: 267 - cmp(key) ?: 268 - cmp(security_level) ?: 269 - cmp(upgrade)); 303 + #define cmp(X, Y) ((long)(X) - (long)(Y)) 304 + diff = (cmp(bundle->peer, call->peer) ?: 305 + cmp(bundle->key, call->key) ?: 306 + cmp(bundle->security_level, call->security_level) ?: 307 + cmp(bundle->upgrade, upgrade)); 270 308 #undef cmp 271 309 if (diff < 0) 272 310 p = p->rb_left; ··· 279 317 _debug("not found"); 280 318 281 319 /* It wasn't. We need to add one. */ 282 - candidate = rxrpc_alloc_bundle(cp, gfp); 320 + candidate = rxrpc_alloc_bundle(call, gfp); 283 321 if (!candidate) 284 - return NULL; 322 + return -ENOMEM; 285 323 286 324 _debug("search 2"); 287 325 spin_lock(&local->client_bundles_lock); ··· 291 329 parent = *pp; 292 330 bundle = rb_entry(parent, struct rxrpc_bundle, local_node); 293 331 294 - #define cmp(X) ((long)bundle->X - (long)cp->X) 295 - diff = (cmp(peer) ?: 296 - cmp(key) ?: 297 - cmp(security_level) ?: 298 - cmp(upgrade)); 332 + #define cmp(X, Y) ((long)(X) - (long)(Y)) 333 + diff = (cmp(bundle->peer, call->peer) ?: 334 + cmp(bundle->key, call->key) ?: 335 + cmp(bundle->security_level, call->security_level) ?: 336 + cmp(bundle->upgrade, upgrade)); 299 337 #undef cmp 300 338 if (diff < 0) 301 339 pp = &(*pp)->rb_left; ··· 309 347 candidate->debug_id = atomic_inc_return(&rxrpc_bundle_id); 310 348 rb_link_node(&candidate->local_node, parent, pp); 311 349 rb_insert_color(&candidate->local_node, &local->client_bundles); 312 - rxrpc_get_bundle(candidate, rxrpc_bundle_get_client_call); 350 + call->bundle = rxrpc_get_bundle(candidate, rxrpc_bundle_get_client_call); 313 351 spin_unlock(&local->client_bundles_lock); 314 - _leave(" = %u [new]", candidate->debug_id); 315 - return candidate; 352 + _leave(" = B=%u [new]", call->bundle->debug_id); 353 + return 0; 316 354 317 355 found_bundle_free: 318 356 rxrpc_free_bundle(candidate); 319 357 found_bundle: 320 - rxrpc_get_bundle(bundle, rxrpc_bundle_get_client_call); 321 - atomic_inc(&bundle->active); 358 + call->bundle = rxrpc_get_bundle(bundle, rxrpc_bundle_get_client_call); 359 + rxrpc_activate_bundle(bundle); 322 360 spin_unlock(&local->client_bundles_lock); 323 - _leave(" = %u [found]", bundle->debug_id); 324 - return bundle; 325 - } 326 - 327 - /* 328 - * Create or find a client bundle to use for a call. 329 - * 330 - * If we return with a connection, the call will be on its waiting list. It's 331 - * left to the caller to assign a channel and wake up the call. 332 - */ 333 - static struct rxrpc_bundle *rxrpc_prep_call(struct rxrpc_sock *rx, 334 - struct rxrpc_call *call, 335 - struct rxrpc_conn_parameters *cp, 336 - struct sockaddr_rxrpc *srx, 337 - gfp_t gfp) 338 - { 339 - struct rxrpc_bundle *bundle; 340 - 341 - _enter("{%d,%lx},", call->debug_id, call->user_call_ID); 342 - 343 - cp->peer = rxrpc_lookup_peer(rx, cp->local, srx, gfp); 344 - if (!cp->peer) 345 - goto error; 346 - 347 - call->tx_last_sent = ktime_get_real(); 348 - call->cong_ssthresh = cp->peer->cong_ssthresh; 349 - if (call->cong_cwnd >= call->cong_ssthresh) 350 - call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE; 351 - else 352 - call->cong_mode = RXRPC_CALL_SLOW_START; 353 - if (cp->upgrade) 354 - __set_bit(RXRPC_CALL_UPGRADE, &call->flags); 355 - 356 - /* Find the client connection bundle. */ 357 - bundle = rxrpc_look_up_bundle(cp, gfp); 358 - if (!bundle) 359 - goto error; 360 - 361 - /* Get this call queued. Someone else may activate it whilst we're 362 - * lining up a new connection, but that's fine. 363 - */ 364 - spin_lock(&bundle->channel_lock); 365 - list_add_tail(&call->chan_wait_link, &bundle->waiting_calls); 366 - spin_unlock(&bundle->channel_lock); 367 - 368 - _leave(" = [B=%x]", bundle->debug_id); 369 - return bundle; 370 - 371 - error: 372 - _leave(" = -ENOMEM"); 373 - return ERR_PTR(-ENOMEM); 361 + _leave(" = B=%u [found]", call->bundle->debug_id); 362 + return 0; 374 363 } 375 364 376 365 /* 377 366 * Allocate a new connection and add it into a bundle. 378 367 */ 379 - static void rxrpc_add_conn_to_bundle(struct rxrpc_bundle *bundle, gfp_t gfp) 380 - __releases(bundle->channel_lock) 368 + static bool rxrpc_add_conn_to_bundle(struct rxrpc_bundle *bundle, 369 + unsigned int slot) 381 370 { 382 - struct rxrpc_connection *candidate = NULL, *old = NULL; 383 - bool conflict; 384 - int i; 371 + struct rxrpc_connection *conn, *old; 372 + unsigned int shift = slot * RXRPC_MAXCALLS; 373 + unsigned int i; 385 374 386 - _enter(""); 387 - 388 - conflict = bundle->alloc_conn; 389 - if (!conflict) 390 - bundle->alloc_conn = true; 391 - spin_unlock(&bundle->channel_lock); 392 - if (conflict) { 393 - _leave(" [conf]"); 394 - return; 375 + old = bundle->conns[slot]; 376 + if (old) { 377 + bundle->conns[slot] = NULL; 378 + trace_rxrpc_client(old, -1, rxrpc_client_replace); 379 + rxrpc_put_connection(old, rxrpc_conn_put_noreuse); 395 380 } 396 381 397 - candidate = rxrpc_alloc_client_connection(bundle, gfp); 398 - 399 - spin_lock(&bundle->channel_lock); 400 - bundle->alloc_conn = false; 401 - 402 - if (IS_ERR(candidate)) { 403 - bundle->alloc_error = PTR_ERR(candidate); 404 - spin_unlock(&bundle->channel_lock); 405 - _leave(" [err %ld]", PTR_ERR(candidate)); 406 - return; 382 + conn = rxrpc_alloc_client_connection(bundle); 383 + if (IS_ERR(conn)) { 384 + bundle->alloc_error = PTR_ERR(conn); 385 + return false; 407 386 } 408 387 409 - bundle->alloc_error = 0; 410 - 411 - for (i = 0; i < ARRAY_SIZE(bundle->conns); i++) { 412 - unsigned int shift = i * RXRPC_MAXCALLS; 413 - int j; 414 - 415 - old = bundle->conns[i]; 416 - if (!rxrpc_may_reuse_conn(old)) { 417 - if (old) 418 - trace_rxrpc_client(old, -1, rxrpc_client_replace); 419 - candidate->bundle_shift = shift; 420 - atomic_inc(&bundle->active); 421 - bundle->conns[i] = candidate; 422 - for (j = 0; j < RXRPC_MAXCALLS; j++) 423 - set_bit(shift + j, &bundle->avail_chans); 424 - candidate = NULL; 425 - break; 426 - } 427 - 428 - old = NULL; 429 - } 430 - 431 - spin_unlock(&bundle->channel_lock); 432 - 433 - if (candidate) { 434 - _debug("discard C=%x", candidate->debug_id); 435 - trace_rxrpc_client(candidate, -1, rxrpc_client_duplicate); 436 - rxrpc_put_connection(candidate, rxrpc_conn_put_discard); 437 - } 438 - 439 - rxrpc_put_connection(old, rxrpc_conn_put_noreuse); 440 - _leave(""); 388 + rxrpc_activate_bundle(bundle); 389 + conn->bundle_shift = shift; 390 + bundle->conns[slot] = conn; 391 + for (i = 0; i < RXRPC_MAXCALLS; i++) 392 + set_bit(shift + i, &bundle->avail_chans); 393 + return true; 441 394 } 442 395 443 396 /* 444 397 * Add a connection to a bundle if there are no usable connections or we have 445 398 * connections waiting for extra capacity. 446 399 */ 447 - static void rxrpc_maybe_add_conn(struct rxrpc_bundle *bundle, gfp_t gfp) 400 + static bool rxrpc_bundle_has_space(struct rxrpc_bundle *bundle) 448 401 { 449 - struct rxrpc_call *call; 450 - int i, usable; 402 + int slot = -1, i, usable; 451 403 452 404 _enter(""); 453 405 454 - spin_lock(&bundle->channel_lock); 406 + bundle->alloc_error = 0; 455 407 456 408 /* See if there are any usable connections. */ 457 409 usable = 0; 458 - for (i = 0; i < ARRAY_SIZE(bundle->conns); i++) 410 + for (i = 0; i < ARRAY_SIZE(bundle->conns); i++) { 459 411 if (rxrpc_may_reuse_conn(bundle->conns[i])) 460 412 usable++; 461 - 462 - if (!usable && !list_empty(&bundle->waiting_calls)) { 463 - call = list_first_entry(&bundle->waiting_calls, 464 - struct rxrpc_call, chan_wait_link); 465 - if (test_bit(RXRPC_CALL_UPGRADE, &call->flags)) 466 - bundle->try_upgrade = true; 413 + else if (slot == -1) 414 + slot = i; 467 415 } 416 + 417 + if (!usable && bundle->upgrade) 418 + bundle->try_upgrade = true; 468 419 469 420 if (!usable) 470 421 goto alloc_conn; 471 422 472 423 if (!bundle->avail_chans && 473 424 !bundle->try_upgrade && 474 - !list_empty(&bundle->waiting_calls) && 475 425 usable < ARRAY_SIZE(bundle->conns)) 476 426 goto alloc_conn; 477 427 478 - spin_unlock(&bundle->channel_lock); 479 428 _leave(""); 480 - return; 429 + return usable; 481 430 482 431 alloc_conn: 483 - return rxrpc_add_conn_to_bundle(bundle, gfp); 432 + return slot >= 0 ? rxrpc_add_conn_to_bundle(bundle, slot) : false; 484 433 } 485 434 486 435 /* ··· 405 532 struct rxrpc_channel *chan = &conn->channels[channel]; 406 533 struct rxrpc_bundle *bundle = conn->bundle; 407 534 struct rxrpc_call *call = list_entry(bundle->waiting_calls.next, 408 - struct rxrpc_call, chan_wait_link); 535 + struct rxrpc_call, wait_link); 409 536 u32 call_id = chan->call_counter + 1; 410 537 411 538 _enter("C=%x,%u", conn->debug_id, channel); 539 + 540 + list_del_init(&call->wait_link); 412 541 413 542 trace_rxrpc_client(conn, channel, rxrpc_client_chan_activate); 414 543 ··· 421 546 clear_bit(conn->bundle_shift + channel, &bundle->avail_chans); 422 547 423 548 rxrpc_see_call(call, rxrpc_call_see_activate_client); 424 - list_del_init(&call->chan_wait_link); 425 - call->peer = rxrpc_get_peer(conn->peer, rxrpc_peer_get_activate_call); 426 549 call->conn = rxrpc_get_connection(conn, rxrpc_conn_get_activate_call); 427 550 call->cid = conn->proto.cid | channel; 428 551 call->call_id = call_id; 429 552 call->dest_srx.srx_service = conn->service_id; 430 - 431 - trace_rxrpc_connect_call(call); 432 - 433 - write_lock(&call->state_lock); 434 - call->state = RXRPC_CALL_CLIENT_SEND_REQUEST; 435 - write_unlock(&call->state_lock); 436 - 437 - /* Paired with the read barrier in rxrpc_connect_call(). This orders 438 - * cid and epoch in the connection wrt to call_id without the need to 439 - * take the channel_lock. 440 - * 441 - * We provisionally assign a callNumber at this point, but we don't 442 - * confirm it until the call is about to be exposed. 443 - * 444 - * TODO: Pair with a barrier in the data_ready handler when that looks 445 - * at the call ID through a connection channel. 446 - */ 447 - smp_wmb(); 553 + call->cong_ssthresh = call->peer->cong_ssthresh; 554 + if (call->cong_cwnd >= call->cong_ssthresh) 555 + call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE; 556 + else 557 + call->cong_mode = RXRPC_CALL_SLOW_START; 448 558 449 559 chan->call_id = call_id; 450 560 chan->call_debug_id = call->debug_id; 451 - rcu_assign_pointer(chan->call, call); 561 + chan->call = call; 562 + 563 + rxrpc_see_call(call, rxrpc_call_see_connected); 564 + trace_rxrpc_connect_call(call); 565 + call->tx_last_sent = ktime_get_real(); 566 + rxrpc_start_call_timer(call); 567 + rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_SEND_REQUEST); 452 568 wake_up(&call->waitq); 453 569 } 454 570 455 571 /* 456 572 * Remove a connection from the idle list if it's on it. 457 573 */ 458 - static void rxrpc_unidle_conn(struct rxrpc_bundle *bundle, struct rxrpc_connection *conn) 574 + static void rxrpc_unidle_conn(struct rxrpc_connection *conn) 459 575 { 460 - struct rxrpc_net *rxnet = bundle->local->rxnet; 461 - bool drop_ref; 462 - 463 576 if (!list_empty(&conn->cache_link)) { 464 - drop_ref = false; 465 - spin_lock(&rxnet->client_conn_cache_lock); 466 - if (!list_empty(&conn->cache_link)) { 467 - list_del_init(&conn->cache_link); 468 - drop_ref = true; 469 - } 470 - spin_unlock(&rxnet->client_conn_cache_lock); 471 - if (drop_ref) 472 - rxrpc_put_connection(conn, rxrpc_conn_put_unidle); 577 + list_del_init(&conn->cache_link); 578 + rxrpc_put_connection(conn, rxrpc_conn_put_unidle); 473 579 } 474 580 } 475 581 476 582 /* 477 - * Assign channels and callNumbers to waiting calls with channel_lock 478 - * held by caller. 583 + * Assign channels and callNumbers to waiting calls. 479 584 */ 480 - static void rxrpc_activate_channels_locked(struct rxrpc_bundle *bundle) 585 + static void rxrpc_activate_channels(struct rxrpc_bundle *bundle) 481 586 { 482 587 struct rxrpc_connection *conn; 483 588 unsigned long avail, mask; 484 589 unsigned int channel, slot; 590 + 591 + trace_rxrpc_client(NULL, -1, rxrpc_client_activate_chans); 485 592 486 593 if (bundle->try_upgrade) 487 594 mask = 1; ··· 484 627 485 628 if (bundle->try_upgrade) 486 629 set_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags); 487 - rxrpc_unidle_conn(bundle, conn); 630 + rxrpc_unidle_conn(conn); 488 631 489 632 channel &= (RXRPC_MAXCALLS - 1); 490 633 conn->act_chans |= 1 << channel; ··· 493 636 } 494 637 495 638 /* 496 - * Assign channels and callNumbers to waiting calls. 639 + * Connect waiting channels (called from the I/O thread). 497 640 */ 498 - static void rxrpc_activate_channels(struct rxrpc_bundle *bundle) 641 + void rxrpc_connect_client_calls(struct rxrpc_local *local) 499 642 { 500 - _enter("B=%x", bundle->debug_id); 643 + struct rxrpc_call *call; 501 644 502 - trace_rxrpc_client(NULL, -1, rxrpc_client_activate_chans); 645 + while ((call = list_first_entry_or_null(&local->new_client_calls, 646 + struct rxrpc_call, wait_link)) 647 + ) { 648 + struct rxrpc_bundle *bundle = call->bundle; 503 649 504 - if (!bundle->avail_chans) 505 - return; 650 + spin_lock(&local->client_call_lock); 651 + list_move_tail(&call->wait_link, &bundle->waiting_calls); 652 + spin_unlock(&local->client_call_lock); 506 653 507 - spin_lock(&bundle->channel_lock); 508 - rxrpc_activate_channels_locked(bundle); 509 - spin_unlock(&bundle->channel_lock); 510 - _leave(""); 511 - } 512 - 513 - /* 514 - * Wait for a callNumber and a channel to be granted to a call. 515 - */ 516 - static int rxrpc_wait_for_channel(struct rxrpc_bundle *bundle, 517 - struct rxrpc_call *call, gfp_t gfp) 518 - { 519 - DECLARE_WAITQUEUE(myself, current); 520 - int ret = 0; 521 - 522 - _enter("%d", call->debug_id); 523 - 524 - if (!gfpflags_allow_blocking(gfp)) { 525 - rxrpc_maybe_add_conn(bundle, gfp); 526 - rxrpc_activate_channels(bundle); 527 - ret = bundle->alloc_error ?: -EAGAIN; 528 - goto out; 654 + if (rxrpc_bundle_has_space(bundle)) 655 + rxrpc_activate_channels(bundle); 529 656 } 530 - 531 - add_wait_queue_exclusive(&call->waitq, &myself); 532 - for (;;) { 533 - rxrpc_maybe_add_conn(bundle, gfp); 534 - rxrpc_activate_channels(bundle); 535 - ret = bundle->alloc_error; 536 - if (ret < 0) 537 - break; 538 - 539 - switch (call->interruptibility) { 540 - case RXRPC_INTERRUPTIBLE: 541 - case RXRPC_PREINTERRUPTIBLE: 542 - set_current_state(TASK_INTERRUPTIBLE); 543 - break; 544 - case RXRPC_UNINTERRUPTIBLE: 545 - default: 546 - set_current_state(TASK_UNINTERRUPTIBLE); 547 - break; 548 - } 549 - if (READ_ONCE(call->state) != RXRPC_CALL_CLIENT_AWAIT_CONN) 550 - break; 551 - if ((call->interruptibility == RXRPC_INTERRUPTIBLE || 552 - call->interruptibility == RXRPC_PREINTERRUPTIBLE) && 553 - signal_pending(current)) { 554 - ret = -ERESTARTSYS; 555 - break; 556 - } 557 - schedule(); 558 - } 559 - remove_wait_queue(&call->waitq, &myself); 560 - __set_current_state(TASK_RUNNING); 561 - 562 - out: 563 - _leave(" = %d", ret); 564 - return ret; 565 - } 566 - 567 - /* 568 - * find a connection for a call 569 - * - called in process context with IRQs enabled 570 - */ 571 - int rxrpc_connect_call(struct rxrpc_sock *rx, 572 - struct rxrpc_call *call, 573 - struct rxrpc_conn_parameters *cp, 574 - struct sockaddr_rxrpc *srx, 575 - gfp_t gfp) 576 - { 577 - struct rxrpc_bundle *bundle; 578 - struct rxrpc_net *rxnet = cp->local->rxnet; 579 - int ret = 0; 580 - 581 - _enter("{%d,%lx},", call->debug_id, call->user_call_ID); 582 - 583 - rxrpc_discard_expired_client_conns(&rxnet->client_conn_reaper); 584 - 585 - bundle = rxrpc_prep_call(rx, call, cp, srx, gfp); 586 - if (IS_ERR(bundle)) { 587 - ret = PTR_ERR(bundle); 588 - goto out; 589 - } 590 - 591 - if (call->state == RXRPC_CALL_CLIENT_AWAIT_CONN) { 592 - ret = rxrpc_wait_for_channel(bundle, call, gfp); 593 - if (ret < 0) 594 - goto wait_failed; 595 - } 596 - 597 - granted_channel: 598 - /* Paired with the write barrier in rxrpc_activate_one_channel(). */ 599 - smp_rmb(); 600 - 601 - out_put_bundle: 602 - rxrpc_deactivate_bundle(bundle); 603 - rxrpc_put_bundle(bundle, rxrpc_bundle_get_client_call); 604 - out: 605 - _leave(" = %d", ret); 606 - return ret; 607 - 608 - wait_failed: 609 - spin_lock(&bundle->channel_lock); 610 - list_del_init(&call->chan_wait_link); 611 - spin_unlock(&bundle->channel_lock); 612 - 613 - if (call->state != RXRPC_CALL_CLIENT_AWAIT_CONN) { 614 - ret = 0; 615 - goto granted_channel; 616 - } 617 - 618 - trace_rxrpc_client(call->conn, ret, rxrpc_client_chan_wait_failed); 619 - rxrpc_set_call_completion(call, RXRPC_CALL_LOCAL_ERROR, 0, ret); 620 - rxrpc_disconnect_client_call(bundle, call); 621 - goto out_put_bundle; 622 657 } 623 658 624 659 /* ··· 543 794 /* 544 795 * Set the reap timer. 545 796 */ 546 - static void rxrpc_set_client_reap_timer(struct rxrpc_net *rxnet) 797 + static void rxrpc_set_client_reap_timer(struct rxrpc_local *local) 547 798 { 548 - if (!rxnet->kill_all_client_conns) { 799 + if (!local->kill_all_client_conns) { 549 800 unsigned long now = jiffies; 550 801 unsigned long reap_at = now + rxrpc_conn_idle_client_expiry; 551 802 552 - if (rxnet->live) 553 - timer_reduce(&rxnet->client_conn_reap_timer, reap_at); 803 + if (local->rxnet->live) 804 + timer_reduce(&local->client_conn_reap_timer, reap_at); 554 805 } 555 806 } 556 807 ··· 561 812 { 562 813 struct rxrpc_connection *conn; 563 814 struct rxrpc_channel *chan = NULL; 564 - struct rxrpc_net *rxnet = bundle->local->rxnet; 815 + struct rxrpc_local *local = bundle->local; 565 816 unsigned int channel; 566 817 bool may_reuse; 567 818 u32 cid; 568 819 569 820 _enter("c=%x", call->debug_id); 570 - 571 - spin_lock(&bundle->channel_lock); 572 - set_bit(RXRPC_CALL_DISCONNECTED, &call->flags); 573 821 574 822 /* Calls that have never actually been assigned a channel can simply be 575 823 * discarded. ··· 576 830 _debug("call is waiting"); 577 831 ASSERTCMP(call->call_id, ==, 0); 578 832 ASSERT(!test_bit(RXRPC_CALL_EXPOSED, &call->flags)); 579 - list_del_init(&call->chan_wait_link); 580 - goto out; 833 + list_del_init(&call->wait_link); 834 + return; 581 835 } 582 836 583 837 cid = call->cid; ··· 585 839 chan = &conn->channels[channel]; 586 840 trace_rxrpc_client(conn, channel, rxrpc_client_chan_disconnect); 587 841 588 - if (rcu_access_pointer(chan->call) != call) { 589 - spin_unlock(&bundle->channel_lock); 590 - BUG(); 591 - } 842 + if (WARN_ON(chan->call != call)) 843 + return; 592 844 593 845 may_reuse = rxrpc_may_reuse_conn(conn); 594 846 ··· 607 863 trace_rxrpc_client(conn, channel, rxrpc_client_to_active); 608 864 bundle->try_upgrade = false; 609 865 if (may_reuse) 610 - rxrpc_activate_channels_locked(bundle); 866 + rxrpc_activate_channels(bundle); 611 867 } 612 - 613 868 } 614 869 615 870 /* See if we can pass the channel directly to another call. */ 616 871 if (may_reuse && !list_empty(&bundle->waiting_calls)) { 617 872 trace_rxrpc_client(conn, channel, rxrpc_client_chan_pass); 618 873 rxrpc_activate_one_channel(conn, channel); 619 - goto out; 874 + return; 620 875 } 621 876 622 877 /* Schedule the final ACK to be transmitted in a short while so that it ··· 633 890 } 634 891 635 892 /* Deactivate the channel. */ 636 - rcu_assign_pointer(chan->call, NULL); 893 + chan->call = NULL; 637 894 set_bit(conn->bundle_shift + channel, &conn->bundle->avail_chans); 638 895 conn->act_chans &= ~(1 << channel); 639 896 ··· 646 903 conn->idle_timestamp = jiffies; 647 904 648 905 rxrpc_get_connection(conn, rxrpc_conn_get_idle); 649 - spin_lock(&rxnet->client_conn_cache_lock); 650 - list_move_tail(&conn->cache_link, &rxnet->idle_client_conns); 651 - spin_unlock(&rxnet->client_conn_cache_lock); 906 + list_move_tail(&conn->cache_link, &local->idle_client_conns); 652 907 653 - rxrpc_set_client_reap_timer(rxnet); 908 + rxrpc_set_client_reap_timer(local); 654 909 } 655 - 656 - out: 657 - spin_unlock(&bundle->channel_lock); 658 - _leave(""); 659 - return; 660 910 } 661 911 662 912 /* ··· 659 923 { 660 924 struct rxrpc_bundle *bundle = conn->bundle; 661 925 unsigned int bindex; 662 - bool need_drop = false; 663 926 int i; 664 927 665 928 _enter("C=%x", conn->debug_id); ··· 666 931 if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK) 667 932 rxrpc_process_delayed_final_acks(conn, true); 668 933 669 - spin_lock(&bundle->channel_lock); 670 934 bindex = conn->bundle_shift / RXRPC_MAXCALLS; 671 935 if (bundle->conns[bindex] == conn) { 672 936 _debug("clear slot %u", bindex); 673 937 bundle->conns[bindex] = NULL; 674 938 for (i = 0; i < RXRPC_MAXCALLS; i++) 675 939 clear_bit(conn->bundle_shift + i, &bundle->avail_chans); 676 - need_drop = true; 677 - } 678 - spin_unlock(&bundle->channel_lock); 679 - 680 - if (need_drop) { 940 + rxrpc_put_client_connection_id(bundle->local, conn); 681 941 rxrpc_deactivate_bundle(bundle); 682 942 rxrpc_put_connection(conn, rxrpc_conn_put_unbundle); 683 943 } ··· 681 951 /* 682 952 * Drop the active count on a bundle. 683 953 */ 684 - static void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle) 954 + void rxrpc_deactivate_bundle(struct rxrpc_bundle *bundle) 685 955 { 686 - struct rxrpc_local *local = bundle->local; 956 + struct rxrpc_local *local; 687 957 bool need_put = false; 688 958 959 + if (!bundle) 960 + return; 961 + 962 + local = bundle->local; 689 963 if (atomic_dec_and_lock(&bundle->active, &local->client_bundles_lock)) { 690 964 if (!bundle->exclusive) { 691 965 _debug("erase bundle"); ··· 716 982 trace_rxrpc_client(conn, -1, rxrpc_client_cleanup); 717 983 atomic_dec(&rxnet->nr_client_conns); 718 984 719 - rxrpc_put_client_connection_id(conn); 985 + rxrpc_put_client_connection_id(local, conn); 720 986 } 721 987 722 988 /* ··· 726 992 * This may be called from conn setup or from a work item so cannot be 727 993 * considered non-reentrant. 728 994 */ 729 - void rxrpc_discard_expired_client_conns(struct work_struct *work) 995 + void rxrpc_discard_expired_client_conns(struct rxrpc_local *local) 730 996 { 731 997 struct rxrpc_connection *conn; 732 - struct rxrpc_net *rxnet = 733 - container_of(work, struct rxrpc_net, client_conn_reaper); 734 998 unsigned long expiry, conn_expires_at, now; 735 999 unsigned int nr_conns; 736 1000 737 1001 _enter(""); 738 1002 739 - if (list_empty(&rxnet->idle_client_conns)) { 740 - _leave(" [empty]"); 741 - return; 742 - } 743 - 744 - /* Don't double up on the discarding */ 745 - if (!mutex_trylock(&rxnet->client_conn_discard_lock)) { 746 - _leave(" [already]"); 747 - return; 748 - } 749 - 750 1003 /* We keep an estimate of what the number of conns ought to be after 751 1004 * we've discarded some so that we don't overdo the discarding. 752 1005 */ 753 - nr_conns = atomic_read(&rxnet->nr_client_conns); 1006 + nr_conns = atomic_read(&local->rxnet->nr_client_conns); 754 1007 755 1008 next: 756 - spin_lock(&rxnet->client_conn_cache_lock); 1009 + conn = list_first_entry_or_null(&local->idle_client_conns, 1010 + struct rxrpc_connection, cache_link); 1011 + if (!conn) 1012 + return; 757 1013 758 - if (list_empty(&rxnet->idle_client_conns)) 759 - goto out; 760 - 761 - conn = list_entry(rxnet->idle_client_conns.next, 762 - struct rxrpc_connection, cache_link); 763 - 764 - if (!rxnet->kill_all_client_conns) { 1014 + if (!local->kill_all_client_conns) { 765 1015 /* If the number of connections is over the reap limit, we 766 1016 * expedite discard by reducing the expiry timeout. We must, 767 1017 * however, have at least a short grace period to be able to do ··· 768 1050 trace_rxrpc_client(conn, -1, rxrpc_client_discard); 769 1051 list_del_init(&conn->cache_link); 770 1052 771 - spin_unlock(&rxnet->client_conn_cache_lock); 772 - 773 1053 rxrpc_unbundle_conn(conn); 774 1054 /* Drop the ->cache_link ref */ 775 1055 rxrpc_put_connection(conn, rxrpc_conn_put_discard_idle); ··· 784 1068 * then things get messier. 785 1069 */ 786 1070 _debug("not yet"); 787 - if (!rxnet->kill_all_client_conns) 788 - timer_reduce(&rxnet->client_conn_reap_timer, conn_expires_at); 789 - 790 - out: 791 - spin_unlock(&rxnet->client_conn_cache_lock); 792 - mutex_unlock(&rxnet->client_conn_discard_lock); 793 - _leave(""); 794 - } 795 - 796 - /* 797 - * Preemptively destroy all the client connection records rather than waiting 798 - * for them to time out 799 - */ 800 - void rxrpc_destroy_all_client_connections(struct rxrpc_net *rxnet) 801 - { 802 - _enter(""); 803 - 804 - spin_lock(&rxnet->client_conn_cache_lock); 805 - rxnet->kill_all_client_conns = true; 806 - spin_unlock(&rxnet->client_conn_cache_lock); 807 - 808 - del_timer_sync(&rxnet->client_conn_reap_timer); 809 - 810 - if (!rxrpc_queue_work(&rxnet->client_conn_reaper)) 811 - _debug("destroy: queue failed"); 1071 + if (!local->kill_all_client_conns) 1072 + timer_reduce(&local->client_conn_reap_timer, conn_expires_at); 812 1073 813 1074 _leave(""); 814 1075 } ··· 795 1102 */ 796 1103 void rxrpc_clean_up_local_conns(struct rxrpc_local *local) 797 1104 { 798 - struct rxrpc_connection *conn, *tmp; 799 - struct rxrpc_net *rxnet = local->rxnet; 800 - LIST_HEAD(graveyard); 1105 + struct rxrpc_connection *conn; 801 1106 802 1107 _enter(""); 803 1108 804 - spin_lock(&rxnet->client_conn_cache_lock); 1109 + local->kill_all_client_conns = true; 805 1110 806 - list_for_each_entry_safe(conn, tmp, &rxnet->idle_client_conns, 807 - cache_link) { 808 - if (conn->local == local) { 809 - atomic_dec(&conn->active); 810 - trace_rxrpc_client(conn, -1, rxrpc_client_discard); 811 - list_move(&conn->cache_link, &graveyard); 812 - } 813 - } 1111 + del_timer_sync(&local->client_conn_reap_timer); 814 1112 815 - spin_unlock(&rxnet->client_conn_cache_lock); 816 - 817 - while (!list_empty(&graveyard)) { 818 - conn = list_entry(graveyard.next, 819 - struct rxrpc_connection, cache_link); 1113 + while ((conn = list_first_entry_or_null(&local->idle_client_conns, 1114 + struct rxrpc_connection, cache_link))) { 820 1115 list_del_init(&conn->cache_link); 1116 + atomic_dec(&conn->active); 1117 + trace_rxrpc_client(conn, -1, rxrpc_client_discard); 821 1118 rxrpc_unbundle_conn(conn); 822 1119 rxrpc_put_connection(conn, rxrpc_conn_put_local_dead); 823 1120 }
+138 -244
net/rxrpc/conn_event.c
··· 17 17 #include "ar-internal.h" 18 18 19 19 /* 20 + * Set the completion state on an aborted connection. 21 + */ 22 + static bool rxrpc_set_conn_aborted(struct rxrpc_connection *conn, struct sk_buff *skb, 23 + s32 abort_code, int err, 24 + enum rxrpc_call_completion compl) 25 + { 26 + bool aborted = false; 27 + 28 + if (conn->state != RXRPC_CONN_ABORTED) { 29 + spin_lock(&conn->state_lock); 30 + if (conn->state != RXRPC_CONN_ABORTED) { 31 + conn->abort_code = abort_code; 32 + conn->error = err; 33 + conn->completion = compl; 34 + /* Order the abort info before the state change. */ 35 + smp_store_release(&conn->state, RXRPC_CONN_ABORTED); 36 + set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags); 37 + set_bit(RXRPC_CONN_EV_ABORT_CALLS, &conn->events); 38 + aborted = true; 39 + } 40 + spin_unlock(&conn->state_lock); 41 + } 42 + 43 + return aborted; 44 + } 45 + 46 + /* 47 + * Mark a socket buffer to indicate that the connection it's on should be aborted. 48 + */ 49 + int rxrpc_abort_conn(struct rxrpc_connection *conn, struct sk_buff *skb, 50 + s32 abort_code, int err, enum rxrpc_abort_reason why) 51 + { 52 + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 53 + 54 + if (rxrpc_set_conn_aborted(conn, skb, abort_code, err, 55 + RXRPC_CALL_LOCALLY_ABORTED)) { 56 + trace_rxrpc_abort(0, why, sp->hdr.cid, sp->hdr.callNumber, 57 + sp->hdr.seq, abort_code, err); 58 + rxrpc_poke_conn(conn, rxrpc_conn_get_poke_abort); 59 + } 60 + return -EPROTO; 61 + } 62 + 63 + /* 64 + * Mark a connection as being remotely aborted. 65 + */ 66 + static bool rxrpc_input_conn_abort(struct rxrpc_connection *conn, 67 + struct sk_buff *skb) 68 + { 69 + return rxrpc_set_conn_aborted(conn, skb, skb->priority, -ECONNABORTED, 70 + RXRPC_CALL_REMOTELY_ABORTED); 71 + } 72 + 73 + /* 20 74 * Retransmit terminal ACK or ABORT of the previous call. 21 75 */ 22 - static void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, 23 - struct sk_buff *skb, 24 - unsigned int channel) 76 + void rxrpc_conn_retransmit_call(struct rxrpc_connection *conn, 77 + struct sk_buff *skb, 78 + unsigned int channel) 25 79 { 26 80 struct rxrpc_skb_priv *sp = skb ? rxrpc_skb(skb) : NULL; 27 81 struct rxrpc_channel *chan; ··· 100 46 /* If the last call got moved on whilst we were waiting to run, just 101 47 * ignore this packet. 102 48 */ 103 - call_id = READ_ONCE(chan->last_call); 104 - /* Sync with __rxrpc_disconnect_call() */ 105 - smp_rmb(); 49 + call_id = chan->last_call; 106 50 if (skb && call_id != sp->hdr.callNumber) 107 51 return; 108 52 ··· 117 65 iov[2].iov_base = &ack_info; 118 66 iov[2].iov_len = sizeof(ack_info); 119 67 68 + serial = atomic_inc_return(&conn->serial); 69 + 120 70 pkt.whdr.epoch = htonl(conn->proto.epoch); 121 71 pkt.whdr.cid = htonl(conn->proto.cid | channel); 122 72 pkt.whdr.callNumber = htonl(call_id); 73 + pkt.whdr.serial = htonl(serial); 123 74 pkt.whdr.seq = 0; 124 75 pkt.whdr.type = chan->last_type; 125 76 pkt.whdr.flags = conn->out_clientflag; ··· 159 104 iov[0].iov_len += sizeof(pkt.ack); 160 105 len += sizeof(pkt.ack) + 3 + sizeof(ack_info); 161 106 ioc = 3; 162 - break; 163 107 164 - default: 165 - return; 166 - } 167 - 168 - /* Resync with __rxrpc_disconnect_call() and check that the last call 169 - * didn't get advanced whilst we were filling out the packets. 170 - */ 171 - smp_rmb(); 172 - if (READ_ONCE(chan->last_call) != call_id) 173 - return; 174 - 175 - serial = atomic_inc_return(&conn->serial); 176 - pkt.whdr.serial = htonl(serial); 177 - 178 - switch (chan->last_type) { 179 - case RXRPC_PACKET_TYPE_ABORT: 180 - break; 181 - case RXRPC_PACKET_TYPE_ACK: 182 108 trace_rxrpc_tx_ack(chan->call_debug_id, serial, 183 109 ntohl(pkt.ack.firstPacket), 184 110 ntohl(pkt.ack.serial), 185 111 pkt.ack.reason, 0); 186 112 break; 113 + 114 + default: 115 + return; 187 116 } 188 117 189 118 ret = kernel_sendmsg(conn->local->socket, &msg, iov, ioc, len); ··· 185 146 /* 186 147 * pass a connection-level abort onto all calls on that connection 187 148 */ 188 - static void rxrpc_abort_calls(struct rxrpc_connection *conn, 189 - enum rxrpc_call_completion compl, 190 - rxrpc_serial_t serial) 149 + static void rxrpc_abort_calls(struct rxrpc_connection *conn) 191 150 { 192 151 struct rxrpc_call *call; 193 152 int i; 194 153 195 154 _enter("{%d},%x", conn->debug_id, conn->abort_code); 196 155 197 - spin_lock(&conn->bundle->channel_lock); 198 - 199 156 for (i = 0; i < RXRPC_MAXCALLS; i++) { 200 - call = rcu_dereference_protected( 201 - conn->channels[i].call, 202 - lockdep_is_held(&conn->bundle->channel_lock)); 203 - if (call) { 204 - if (compl == RXRPC_CALL_LOCALLY_ABORTED) 205 - trace_rxrpc_abort(call->debug_id, 206 - "CON", call->cid, 207 - call->call_id, 0, 157 + call = conn->channels[i].call; 158 + if (call) 159 + rxrpc_set_call_completion(call, 160 + conn->completion, 208 161 conn->abort_code, 209 162 conn->error); 210 - else 211 - trace_rxrpc_rx_abort(call, serial, 212 - conn->abort_code); 213 - rxrpc_set_call_completion(call, compl, 214 - conn->abort_code, 215 - conn->error); 216 - } 217 163 } 218 164 219 - spin_unlock(&conn->bundle->channel_lock); 220 165 _leave(""); 221 - } 222 - 223 - /* 224 - * generate a connection-level abort 225 - */ 226 - static int rxrpc_abort_connection(struct rxrpc_connection *conn, 227 - int error, u32 abort_code) 228 - { 229 - struct rxrpc_wire_header whdr; 230 - struct msghdr msg; 231 - struct kvec iov[2]; 232 - __be32 word; 233 - size_t len; 234 - u32 serial; 235 - int ret; 236 - 237 - _enter("%d,,%u,%u", conn->debug_id, error, abort_code); 238 - 239 - /* generate a connection-level abort */ 240 - spin_lock(&conn->state_lock); 241 - if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) { 242 - spin_unlock(&conn->state_lock); 243 - _leave(" = 0 [already dead]"); 244 - return 0; 245 - } 246 - 247 - conn->error = error; 248 - conn->abort_code = abort_code; 249 - conn->state = RXRPC_CONN_LOCALLY_ABORTED; 250 - set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags); 251 - spin_unlock(&conn->state_lock); 252 - 253 - msg.msg_name = &conn->peer->srx.transport; 254 - msg.msg_namelen = conn->peer->srx.transport_len; 255 - msg.msg_control = NULL; 256 - msg.msg_controllen = 0; 257 - msg.msg_flags = 0; 258 - 259 - whdr.epoch = htonl(conn->proto.epoch); 260 - whdr.cid = htonl(conn->proto.cid); 261 - whdr.callNumber = 0; 262 - whdr.seq = 0; 263 - whdr.type = RXRPC_PACKET_TYPE_ABORT; 264 - whdr.flags = conn->out_clientflag; 265 - whdr.userStatus = 0; 266 - whdr.securityIndex = conn->security_ix; 267 - whdr._rsvd = 0; 268 - whdr.serviceId = htons(conn->service_id); 269 - 270 - word = htonl(conn->abort_code); 271 - 272 - iov[0].iov_base = &whdr; 273 - iov[0].iov_len = sizeof(whdr); 274 - iov[1].iov_base = &word; 275 - iov[1].iov_len = sizeof(word); 276 - 277 - len = iov[0].iov_len + iov[1].iov_len; 278 - 279 - serial = atomic_inc_return(&conn->serial); 280 - rxrpc_abort_calls(conn, RXRPC_CALL_LOCALLY_ABORTED, serial); 281 - whdr.serial = htonl(serial); 282 - 283 - ret = kernel_sendmsg(conn->local->socket, &msg, iov, 2, len); 284 - if (ret < 0) { 285 - trace_rxrpc_tx_fail(conn->debug_id, serial, ret, 286 - rxrpc_tx_point_conn_abort); 287 - _debug("sendmsg failed: %d", ret); 288 - return -EAGAIN; 289 - } 290 - 291 - trace_rxrpc_tx_packet(conn->debug_id, &whdr, rxrpc_tx_point_conn_abort); 292 - 293 - conn->peer->last_tx_at = ktime_get_seconds(); 294 - 295 - _leave(" = 0"); 296 - return 0; 297 166 } 298 167 299 168 /* ··· 210 263 */ 211 264 static void rxrpc_call_is_secure(struct rxrpc_call *call) 212 265 { 213 - _enter("%p", call); 214 - if (call) { 215 - write_lock(&call->state_lock); 216 - if (call->state == RXRPC_CALL_SERVER_SECURING) { 217 - call->state = RXRPC_CALL_SERVER_RECV_REQUEST; 218 - rxrpc_notify_socket(call); 219 - } 220 - write_unlock(&call->state_lock); 266 + if (call && __rxrpc_call_state(call) == RXRPC_CALL_SERVER_SECURING) { 267 + rxrpc_set_call_state(call, RXRPC_CALL_SERVER_RECV_REQUEST); 268 + rxrpc_notify_socket(call); 221 269 } 222 270 } 223 271 ··· 220 278 * connection-level Rx packet processor 221 279 */ 222 280 static int rxrpc_process_event(struct rxrpc_connection *conn, 223 - struct sk_buff *skb, 224 - u32 *_abort_code) 281 + struct sk_buff *skb) 225 282 { 226 283 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 227 - int loop, ret; 284 + int ret; 228 285 229 - if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) { 230 - _leave(" = -ECONNABORTED [%u]", conn->state); 286 + if (conn->state == RXRPC_CONN_ABORTED) 231 287 return -ECONNABORTED; 232 - } 233 288 234 289 _enter("{%d},{%u,%%%u},", conn->debug_id, sp->hdr.type, sp->hdr.serial); 235 290 236 291 switch (sp->hdr.type) { 237 - case RXRPC_PACKET_TYPE_DATA: 238 - case RXRPC_PACKET_TYPE_ACK: 239 - rxrpc_conn_retransmit_call(conn, skb, 240 - sp->hdr.cid & RXRPC_CHANNELMASK); 241 - return 0; 242 - 243 - case RXRPC_PACKET_TYPE_BUSY: 244 - /* Just ignore BUSY packets for now. */ 245 - return 0; 246 - 247 - case RXRPC_PACKET_TYPE_ABORT: 248 - conn->error = -ECONNABORTED; 249 - conn->abort_code = skb->priority; 250 - conn->state = RXRPC_CONN_REMOTELY_ABORTED; 251 - set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags); 252 - rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED, sp->hdr.serial); 253 - return -ECONNABORTED; 254 - 255 292 case RXRPC_PACKET_TYPE_CHALLENGE: 256 - return conn->security->respond_to_challenge(conn, skb, 257 - _abort_code); 293 + return conn->security->respond_to_challenge(conn, skb); 258 294 259 295 case RXRPC_PACKET_TYPE_RESPONSE: 260 - ret = conn->security->verify_response(conn, skb, _abort_code); 296 + ret = conn->security->verify_response(conn, skb); 261 297 if (ret < 0) 262 298 return ret; 263 299 ··· 244 324 if (ret < 0) 245 325 return ret; 246 326 247 - spin_lock(&conn->bundle->channel_lock); 248 327 spin_lock(&conn->state_lock); 249 - 250 - if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING) { 328 + if (conn->state == RXRPC_CONN_SERVICE_CHALLENGING) 251 329 conn->state = RXRPC_CONN_SERVICE; 252 - spin_unlock(&conn->state_lock); 253 - for (loop = 0; loop < RXRPC_MAXCALLS; loop++) 254 - rxrpc_call_is_secure( 255 - rcu_dereference_protected( 256 - conn->channels[loop].call, 257 - lockdep_is_held(&conn->bundle->channel_lock))); 258 - } else { 259 - spin_unlock(&conn->state_lock); 260 - } 330 + spin_unlock(&conn->state_lock); 261 331 262 - spin_unlock(&conn->bundle->channel_lock); 332 + if (conn->state == RXRPC_CONN_SERVICE) { 333 + /* Offload call state flipping to the I/O thread. As 334 + * we've already received the packet, put it on the 335 + * front of the queue. 336 + */ 337 + skb->mark = RXRPC_SKB_MARK_SERVICE_CONN_SECURED; 338 + rxrpc_get_skb(skb, rxrpc_skb_get_conn_secured); 339 + skb_queue_head(&conn->local->rx_queue, skb); 340 + rxrpc_wake_up_io_thread(conn->local); 341 + } 263 342 return 0; 264 343 265 344 default: 266 - trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, 267 - tracepoint_string("bad_conn_pkt")); 345 + WARN_ON_ONCE(1); 268 346 return -EPROTO; 269 347 } 270 348 } ··· 272 354 */ 273 355 static void rxrpc_secure_connection(struct rxrpc_connection *conn) 274 356 { 275 - u32 abort_code; 276 - int ret; 277 - 278 - _enter("{%d}", conn->debug_id); 279 - 280 - ASSERT(conn->security_ix != 0); 281 - 282 - if (conn->security->issue_challenge(conn) < 0) { 283 - abort_code = RX_CALL_DEAD; 284 - ret = -ENOMEM; 285 - goto abort; 286 - } 287 - 288 - _leave(""); 289 - return; 290 - 291 - abort: 292 - _debug("abort %d, %d", ret, abort_code); 293 - rxrpc_abort_connection(conn, ret, abort_code); 294 - _leave(" [aborted]"); 357 + if (conn->security->issue_challenge(conn) < 0) 358 + rxrpc_abort_conn(conn, NULL, RX_CALL_DEAD, -ENOMEM, 359 + rxrpc_abort_nomem); 295 360 } 296 361 297 362 /* ··· 296 395 if (!test_bit(RXRPC_CONN_FINAL_ACK_0 + channel, &conn->flags)) 297 396 continue; 298 397 299 - smp_rmb(); /* vs rxrpc_disconnect_client_call */ 300 - ack_at = READ_ONCE(chan->final_ack_at); 301 - 398 + ack_at = chan->final_ack_at; 302 399 if (time_before(j, ack_at) && !force) { 303 400 if (time_before(ack_at, next_j)) { 304 401 next_j = ack_at; ··· 323 424 static void rxrpc_do_process_connection(struct rxrpc_connection *conn) 324 425 { 325 426 struct sk_buff *skb; 326 - u32 abort_code = RX_PROTOCOL_ERROR; 327 427 int ret; 328 428 329 429 if (test_and_clear_bit(RXRPC_CONN_EV_CHALLENGE, &conn->events)) 330 430 rxrpc_secure_connection(conn); 331 431 332 - /* Process delayed ACKs whose time has come. */ 333 - if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK) 334 - rxrpc_process_delayed_final_acks(conn, false); 335 - 336 432 /* go through the conn-level event packets, releasing the ref on this 337 433 * connection that each one has when we've finished with it */ 338 434 while ((skb = skb_dequeue(&conn->rx_queue))) { 339 435 rxrpc_see_skb(skb, rxrpc_skb_see_conn_work); 340 - ret = rxrpc_process_event(conn, skb, &abort_code); 436 + ret = rxrpc_process_event(conn, skb); 341 437 switch (ret) { 342 - case -EPROTO: 343 - case -EKEYEXPIRED: 344 - case -EKEYREJECTED: 345 - goto protocol_error; 346 438 case -ENOMEM: 347 439 case -EAGAIN: 348 - goto requeue_and_leave; 349 - case -ECONNABORTED: 440 + skb_queue_head(&conn->rx_queue, skb); 441 + rxrpc_queue_conn(conn, rxrpc_conn_queue_retry_work); 442 + break; 350 443 default: 351 444 rxrpc_free_skb(skb, rxrpc_skb_put_conn_work); 352 445 break; 353 446 } 354 447 } 355 - 356 - return; 357 - 358 - requeue_and_leave: 359 - skb_queue_head(&conn->rx_queue, skb); 360 - return; 361 - 362 - protocol_error: 363 - if (rxrpc_abort_connection(conn, ret, abort_code) < 0) 364 - goto requeue_and_leave; 365 - rxrpc_free_skb(skb, rxrpc_skb_put_conn_work); 366 - return; 367 448 } 368 449 369 450 void rxrpc_process_connection(struct work_struct *work) ··· 377 498 /* 378 499 * Input a connection-level packet. 379 500 */ 380 - int rxrpc_input_conn_packet(struct rxrpc_connection *conn, struct sk_buff *skb) 501 + bool rxrpc_input_conn_packet(struct rxrpc_connection *conn, struct sk_buff *skb) 381 502 { 382 503 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 383 504 384 - if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) { 385 - _leave(" = -ECONNABORTED [%u]", conn->state); 386 - return -ECONNABORTED; 387 - } 388 - 389 - _enter("{%d},{%u,%%%u},", conn->debug_id, sp->hdr.type, sp->hdr.serial); 390 - 391 505 switch (sp->hdr.type) { 392 - case RXRPC_PACKET_TYPE_DATA: 393 - case RXRPC_PACKET_TYPE_ACK: 394 - rxrpc_conn_retransmit_call(conn, skb, 395 - sp->hdr.cid & RXRPC_CHANNELMASK); 396 - return 0; 397 - 398 506 case RXRPC_PACKET_TYPE_BUSY: 399 507 /* Just ignore BUSY packets for now. */ 400 - return 0; 508 + return true; 401 509 402 510 case RXRPC_PACKET_TYPE_ABORT: 403 - conn->error = -ECONNABORTED; 404 - conn->abort_code = skb->priority; 405 - conn->state = RXRPC_CONN_REMOTELY_ABORTED; 406 - set_bit(RXRPC_CONN_DONT_REUSE, &conn->flags); 407 - rxrpc_abort_calls(conn, RXRPC_CALL_REMOTELY_ABORTED, sp->hdr.serial); 408 - return -ECONNABORTED; 511 + if (rxrpc_is_conn_aborted(conn)) 512 + return true; 513 + rxrpc_input_conn_abort(conn, skb); 514 + rxrpc_abort_calls(conn); 515 + return true; 409 516 410 517 case RXRPC_PACKET_TYPE_CHALLENGE: 411 518 case RXRPC_PACKET_TYPE_RESPONSE: 519 + if (rxrpc_is_conn_aborted(conn)) { 520 + if (conn->completion == RXRPC_CALL_LOCALLY_ABORTED) 521 + rxrpc_send_conn_abort(conn); 522 + return true; 523 + } 412 524 rxrpc_post_packet_to_conn(conn, skb); 413 - return 0; 525 + return true; 414 526 415 527 default: 416 - trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, 417 - tracepoint_string("bad_conn_pkt")); 418 - return -EPROTO; 528 + WARN_ON_ONCE(1); 529 + return true; 419 530 } 531 + } 532 + 533 + /* 534 + * Input a connection event. 535 + */ 536 + void rxrpc_input_conn_event(struct rxrpc_connection *conn, struct sk_buff *skb) 537 + { 538 + unsigned int loop; 539 + 540 + if (test_and_clear_bit(RXRPC_CONN_EV_ABORT_CALLS, &conn->events)) 541 + rxrpc_abort_calls(conn); 542 + 543 + switch (skb->mark) { 544 + case RXRPC_SKB_MARK_SERVICE_CONN_SECURED: 545 + if (conn->state != RXRPC_CONN_SERVICE) 546 + break; 547 + 548 + for (loop = 0; loop < RXRPC_MAXCALLS; loop++) 549 + rxrpc_call_is_secure(conn->channels[loop].call); 550 + break; 551 + } 552 + 553 + /* Process delayed ACKs whose time has come. */ 554 + if (conn->flags & RXRPC_CONN_FINAL_ACK_MASK) 555 + rxrpc_process_delayed_final_acks(conn, false); 420 556 }
+42 -25
net/rxrpc/conn_object.c
··· 23 23 static void rxrpc_set_service_reap_timer(struct rxrpc_net *rxnet, 24 24 unsigned long reap_at); 25 25 26 + void rxrpc_poke_conn(struct rxrpc_connection *conn, enum rxrpc_conn_trace why) 27 + { 28 + struct rxrpc_local *local = conn->local; 29 + bool busy; 30 + 31 + if (WARN_ON_ONCE(!local)) 32 + return; 33 + 34 + spin_lock_bh(&local->lock); 35 + busy = !list_empty(&conn->attend_link); 36 + if (!busy) { 37 + rxrpc_get_connection(conn, why); 38 + list_add_tail(&conn->attend_link, &local->conn_attend_q); 39 + } 40 + spin_unlock_bh(&local->lock); 41 + rxrpc_wake_up_io_thread(local); 42 + } 43 + 26 44 static void rxrpc_connection_timer(struct timer_list *timer) 27 45 { 28 46 struct rxrpc_connection *conn = 29 47 container_of(timer, struct rxrpc_connection, timer); 30 48 31 - rxrpc_queue_conn(conn, rxrpc_conn_queue_timer); 49 + rxrpc_poke_conn(conn, rxrpc_conn_get_poke_timer); 32 50 } 33 51 34 52 /* ··· 67 49 INIT_WORK(&conn->destructor, rxrpc_clean_up_connection); 68 50 INIT_LIST_HEAD(&conn->proc_link); 69 51 INIT_LIST_HEAD(&conn->link); 52 + mutex_init(&conn->security_lock); 70 53 skb_queue_head_init(&conn->rx_queue); 71 54 conn->rxnet = rxnet; 72 55 conn->security = &rxrpc_no_security; ··· 101 82 102 83 _enter(",%x", sp->hdr.cid & RXRPC_CIDMASK); 103 84 104 - /* Look up client connections by connection ID alone as their IDs are 105 - * unique for this machine. 85 + /* Look up client connections by connection ID alone as their 86 + * IDs are unique for this machine. 106 87 */ 107 - conn = idr_find(&rxrpc_client_conn_ids, sp->hdr.cid >> RXRPC_CIDSHIFT); 88 + conn = idr_find(&local->conn_ids, sp->hdr.cid >> RXRPC_CIDSHIFT); 108 89 if (!conn || refcount_read(&conn->ref) == 0) { 109 90 _debug("no conn"); 110 91 goto not_found; ··· 158 139 159 140 _enter("%d,%x", conn->debug_id, call->cid); 160 141 161 - if (rcu_access_pointer(chan->call) == call) { 142 + if (chan->call == call) { 162 143 /* Save the result of the call so that we can repeat it if necessary 163 144 * through the channel, whilst disposing of the actual call record. 164 145 */ ··· 178 159 break; 179 160 } 180 161 181 - /* Sync with rxrpc_conn_retransmit(). */ 182 - smp_wmb(); 183 162 chan->last_call = chan->call_id; 184 163 chan->call_id = chan->call_counter; 185 - 186 - rcu_assign_pointer(chan->call, NULL); 164 + chan->call = NULL; 187 165 } 188 166 189 167 _leave(""); ··· 194 178 { 195 179 struct rxrpc_connection *conn = call->conn; 196 180 181 + set_bit(RXRPC_CALL_DISCONNECTED, &call->flags); 182 + rxrpc_see_call(call, rxrpc_call_see_disconnected); 183 + 197 184 call->peer->cong_ssthresh = call->cong_ssthresh; 198 185 199 186 if (!hlist_unhashed(&call->error_link)) { ··· 205 186 spin_unlock(&call->peer->lock); 206 187 } 207 188 208 - if (rxrpc_is_client_call(call)) 209 - return rxrpc_disconnect_client_call(conn->bundle, call); 189 + if (rxrpc_is_client_call(call)) { 190 + rxrpc_disconnect_client_call(call->bundle, call); 191 + } else { 192 + __rxrpc_disconnect_call(conn, call); 193 + conn->idle_timestamp = jiffies; 194 + if (atomic_dec_and_test(&conn->active)) 195 + rxrpc_set_service_reap_timer(conn->rxnet, 196 + jiffies + rxrpc_connection_expiry); 197 + } 210 198 211 - spin_lock(&conn->bundle->channel_lock); 212 - __rxrpc_disconnect_call(conn, call); 213 - spin_unlock(&conn->bundle->channel_lock); 214 - 215 - set_bit(RXRPC_CALL_DISCONNECTED, &call->flags); 216 - conn->idle_timestamp = jiffies; 217 - if (atomic_dec_and_test(&conn->active)) 218 - rxrpc_set_service_reap_timer(conn->rxnet, 219 - jiffies + rxrpc_connection_expiry); 199 + rxrpc_put_call(call, rxrpc_call_put_io_thread); 220 200 } 221 201 222 202 /* ··· 311 293 container_of(work, struct rxrpc_connection, destructor); 312 294 struct rxrpc_net *rxnet = conn->rxnet; 313 295 314 - ASSERT(!rcu_access_pointer(conn->channels[0].call) && 315 - !rcu_access_pointer(conn->channels[1].call) && 316 - !rcu_access_pointer(conn->channels[2].call) && 317 - !rcu_access_pointer(conn->channels[3].call)); 296 + ASSERT(!conn->channels[0].call && 297 + !conn->channels[1].call && 298 + !conn->channels[2].call && 299 + !conn->channels[3].call); 318 300 ASSERT(list_empty(&conn->cache_link)); 319 301 320 302 del_timer_sync(&conn->timer); ··· 465 447 _enter(""); 466 448 467 449 atomic_dec(&rxnet->nr_conns); 468 - rxrpc_destroy_all_client_connections(rxnet); 469 450 470 451 del_timer_sync(&rxnet->service_conn_reap_timer); 471 452 rxrpc_queue_work(&rxnet->service_conn_reaper);
-1
net/rxrpc/conn_service.c
··· 11 11 static struct rxrpc_bundle rxrpc_service_dummy_bundle = { 12 12 .ref = REFCOUNT_INIT(1), 13 13 .debug_id = UINT_MAX, 14 - .channel_lock = __SPIN_LOCK_UNLOCKED(&rxrpc_service_dummy_bundle.channel_lock), 15 14 }; 16 15 17 16 /*
+92 -83
net/rxrpc/input.c
··· 9 9 10 10 #include "ar-internal.h" 11 11 12 - static void rxrpc_proto_abort(const char *why, 13 - struct rxrpc_call *call, rxrpc_seq_t seq) 12 + static void rxrpc_proto_abort(struct rxrpc_call *call, rxrpc_seq_t seq, 13 + enum rxrpc_abort_reason why) 14 14 { 15 - if (rxrpc_abort_call(why, call, seq, RX_PROTOCOL_ERROR, -EBADMSG)) 16 - rxrpc_send_abort_packet(call); 15 + rxrpc_abort_call(call, seq, RX_PROTOCOL_ERROR, -EBADMSG, why); 17 16 } 18 17 19 18 /* ··· 184 185 if (call->cong_mode != RXRPC_CALL_SLOW_START && 185 186 call->cong_mode != RXRPC_CALL_CONGEST_AVOIDANCE) 186 187 return; 187 - if (call->state == RXRPC_CALL_CLIENT_AWAIT_REPLY) 188 + if (__rxrpc_call_state(call) == RXRPC_CALL_CLIENT_AWAIT_REPLY) 188 189 return; 189 190 190 191 rtt = ns_to_ktime(call->peer->srtt_us * (1000 / 8)); ··· 249 250 * This occurs when we get an ACKALL packet, the first DATA packet of a reply, 250 251 * or a final ACK packet. 251 252 */ 252 - static bool rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun, 253 - const char *abort_why) 253 + static void rxrpc_end_tx_phase(struct rxrpc_call *call, bool reply_begun, 254 + enum rxrpc_abort_reason abort_why) 254 255 { 255 - unsigned int state; 256 - 257 256 ASSERT(test_bit(RXRPC_CALL_TX_LAST, &call->flags)); 258 257 259 - write_lock(&call->state_lock); 260 - 261 - state = call->state; 262 - switch (state) { 258 + switch (__rxrpc_call_state(call)) { 263 259 case RXRPC_CALL_CLIENT_SEND_REQUEST: 264 260 case RXRPC_CALL_CLIENT_AWAIT_REPLY: 265 - if (reply_begun) 266 - call->state = state = RXRPC_CALL_CLIENT_RECV_REPLY; 267 - else 268 - call->state = state = RXRPC_CALL_CLIENT_AWAIT_REPLY; 261 + if (reply_begun) { 262 + rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_RECV_REPLY); 263 + trace_rxrpc_txqueue(call, rxrpc_txqueue_end); 264 + break; 265 + } 266 + 267 + rxrpc_set_call_state(call, RXRPC_CALL_CLIENT_AWAIT_REPLY); 268 + trace_rxrpc_txqueue(call, rxrpc_txqueue_await_reply); 269 269 break; 270 270 271 271 case RXRPC_CALL_SERVER_AWAIT_ACK: 272 - __rxrpc_call_completed(call); 273 - state = call->state; 272 + rxrpc_call_completed(call); 273 + trace_rxrpc_txqueue(call, rxrpc_txqueue_end); 274 274 break; 275 275 276 276 default: 277 - goto bad_state; 277 + kdebug("end_tx %s", rxrpc_call_states[__rxrpc_call_state(call)]); 278 + rxrpc_proto_abort(call, call->tx_top, abort_why); 279 + break; 278 280 } 279 - 280 - write_unlock(&call->state_lock); 281 - if (state == RXRPC_CALL_CLIENT_AWAIT_REPLY) 282 - trace_rxrpc_txqueue(call, rxrpc_txqueue_await_reply); 283 - else 284 - trace_rxrpc_txqueue(call, rxrpc_txqueue_end); 285 - _leave(" = ok"); 286 - return true; 287 - 288 - bad_state: 289 - write_unlock(&call->state_lock); 290 - kdebug("end_tx %s", rxrpc_call_states[call->state]); 291 - rxrpc_proto_abort(abort_why, call, call->tx_top); 292 - return false; 293 281 } 294 282 295 283 /* ··· 291 305 if (call->ackr_reason) { 292 306 now = jiffies; 293 307 timo = now + MAX_JIFFY_OFFSET; 294 - WRITE_ONCE(call->resend_at, timo); 308 + 295 309 WRITE_ONCE(call->delay_ack_at, timo); 296 310 trace_rxrpc_timer(call, rxrpc_timer_init_for_reply, now); 297 311 } 298 312 299 313 if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags)) { 300 314 if (!rxrpc_rotate_tx_window(call, top, &summary)) { 301 - rxrpc_proto_abort("TXL", call, top); 315 + rxrpc_proto_abort(call, top, rxrpc_eproto_early_reply); 302 316 return false; 303 317 } 304 318 } 305 - return rxrpc_end_tx_phase(call, true, "ETD"); 319 + 320 + rxrpc_end_tx_phase(call, true, rxrpc_eproto_unexpected_reply); 321 + return true; 322 + } 323 + 324 + /* 325 + * End the packet reception phase. 326 + */ 327 + static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial) 328 + { 329 + rxrpc_seq_t whigh = READ_ONCE(call->rx_highest_seq); 330 + 331 + _enter("%d,%s", call->debug_id, rxrpc_call_states[__rxrpc_call_state(call)]); 332 + 333 + trace_rxrpc_receive(call, rxrpc_receive_end, 0, whigh); 334 + 335 + switch (__rxrpc_call_state(call)) { 336 + case RXRPC_CALL_CLIENT_RECV_REPLY: 337 + rxrpc_propose_delay_ACK(call, serial, rxrpc_propose_ack_terminal_ack); 338 + rxrpc_call_completed(call); 339 + break; 340 + 341 + case RXRPC_CALL_SERVER_RECV_REQUEST: 342 + rxrpc_set_call_state(call, RXRPC_CALL_SERVER_ACK_REQUEST); 343 + call->expect_req_by = jiffies + MAX_JIFFY_OFFSET; 344 + rxrpc_propose_delay_ACK(call, serial, rxrpc_propose_ack_processing_op); 345 + break; 346 + 347 + default: 348 + break; 349 + } 306 350 } 307 351 308 352 static void rxrpc_input_update_ack_window(struct rxrpc_call *call, ··· 353 337 354 338 __skb_queue_tail(&call->recvmsg_queue, skb); 355 339 rxrpc_input_update_ack_window(call, window, wtop); 356 - 357 340 trace_rxrpc_receive(call, last ? why + 1 : why, sp->hdr.serial, sp->hdr.seq); 341 + if (last) 342 + rxrpc_end_rx_phase(call, sp->hdr.serial); 358 343 } 359 344 360 345 /* ··· 383 366 384 367 if (last) { 385 368 if (test_and_set_bit(RXRPC_CALL_RX_LAST, &call->flags) && 386 - seq + 1 != wtop) { 387 - rxrpc_proto_abort("LSN", call, seq); 388 - return; 389 - } 369 + seq + 1 != wtop) 370 + return rxrpc_proto_abort(call, seq, rxrpc_eproto_different_last); 390 371 } else { 391 372 if (test_bit(RXRPC_CALL_RX_LAST, &call->flags) && 392 373 after_eq(seq, wtop)) { 393 374 pr_warn("Packet beyond last: c=%x q=%x window=%x-%x wlimit=%x\n", 394 375 call->debug_id, seq, window, wtop, wlimit); 395 - rxrpc_proto_abort("LSA", call, seq); 396 - return; 376 + return rxrpc_proto_abort(call, seq, rxrpc_eproto_data_after_last); 397 377 } 398 378 } 399 379 ··· 564 550 static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb) 565 551 { 566 552 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 567 - enum rxrpc_call_state state; 568 553 rxrpc_serial_t serial = sp->hdr.serial; 569 554 rxrpc_seq_t seq0 = sp->hdr.seq; 570 555 ··· 571 558 atomic64_read(&call->ackr_window), call->rx_highest_seq, 572 559 skb->len, seq0); 573 560 574 - state = READ_ONCE(call->state); 575 - if (state >= RXRPC_CALL_COMPLETE) 561 + if (__rxrpc_call_is_complete(call)) 576 562 return; 577 563 578 - if (state == RXRPC_CALL_SERVER_RECV_REQUEST) { 564 + switch (__rxrpc_call_state(call)) { 565 + case RXRPC_CALL_CLIENT_SEND_REQUEST: 566 + case RXRPC_CALL_CLIENT_AWAIT_REPLY: 567 + /* Received data implicitly ACKs all of the request 568 + * packets we sent when we're acting as a client. 569 + */ 570 + if (!rxrpc_receiving_reply(call)) 571 + goto out_notify; 572 + break; 573 + 574 + case RXRPC_CALL_SERVER_RECV_REQUEST: { 579 575 unsigned long timo = READ_ONCE(call->next_req_timo); 580 576 unsigned long now, expect_req_by; 581 577 ··· 595 573 rxrpc_reduce_call_timer(call, expect_req_by, now, 596 574 rxrpc_timer_set_for_idle); 597 575 } 576 + break; 598 577 } 599 578 600 - /* Received data implicitly ACKs all of the request packets we sent 601 - * when we're acting as a client. 602 - */ 603 - if ((state == RXRPC_CALL_CLIENT_SEND_REQUEST || 604 - state == RXRPC_CALL_CLIENT_AWAIT_REPLY) && 605 - !rxrpc_receiving_reply(call)) 606 - goto out_notify; 579 + default: 580 + break; 581 + } 607 582 608 583 if (!rxrpc_input_split_jumbo(call, skb)) { 609 - rxrpc_proto_abort("VLD", call, sp->hdr.seq); 584 + rxrpc_proto_abort(call, sp->hdr.seq, rxrpc_badmsg_bad_jumbo); 610 585 goto out_notify; 611 586 } 612 587 skb = NULL; ··· 784 765 785 766 offset = sizeof(struct rxrpc_wire_header); 786 767 if (skb_copy_bits(skb, offset, &ack, sizeof(ack)) < 0) 787 - return rxrpc_proto_abort("XAK", call, 0); 768 + return rxrpc_proto_abort(call, 0, rxrpc_badmsg_short_ack); 788 769 offset += sizeof(ack); 789 770 790 771 ack_serial = sp->hdr.serial; ··· 864 845 ioffset = offset + nr_acks + 3; 865 846 if (skb->len >= ioffset + sizeof(info) && 866 847 skb_copy_bits(skb, ioffset, &info, sizeof(info)) < 0) 867 - return rxrpc_proto_abort("XAI", call, 0); 848 + return rxrpc_proto_abort(call, 0, rxrpc_badmsg_short_ack_info); 868 849 869 850 if (nr_acks > 0) 870 851 skb_condense(skb); ··· 887 868 rxrpc_input_ackinfo(call, skb, &info); 888 869 889 870 if (first_soft_ack == 0) 890 - return rxrpc_proto_abort("AK0", call, 0); 871 + return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_zero); 891 872 892 873 /* Ignore ACKs unless we are or have just been transmitting. */ 893 - switch (READ_ONCE(call->state)) { 874 + switch (__rxrpc_call_state(call)) { 894 875 case RXRPC_CALL_CLIENT_SEND_REQUEST: 895 876 case RXRPC_CALL_CLIENT_AWAIT_REPLY: 896 877 case RXRPC_CALL_SERVER_SEND_REPLY: ··· 902 883 903 884 if (before(hard_ack, call->acks_hard_ack) || 904 885 after(hard_ack, call->tx_top)) 905 - return rxrpc_proto_abort("AKW", call, 0); 886 + return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_outside_window); 906 887 if (nr_acks > call->tx_top - hard_ack) 907 - return rxrpc_proto_abort("AKN", call, 0); 888 + return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_sack_overflow); 908 889 909 890 if (after(hard_ack, call->acks_hard_ack)) { 910 891 if (rxrpc_rotate_tx_window(call, hard_ack, &summary)) { 911 - rxrpc_end_tx_phase(call, false, "ETA"); 892 + rxrpc_end_tx_phase(call, false, rxrpc_eproto_unexpected_ack); 912 893 return; 913 894 } 914 895 } 915 896 916 897 if (nr_acks > 0) { 917 898 if (offset > (int)skb->len - nr_acks) 918 - return rxrpc_proto_abort("XSA", call, 0); 899 + return rxrpc_proto_abort(call, 0, rxrpc_eproto_ackr_short_sack); 919 900 rxrpc_input_soft_acks(call, skb->data + offset, first_soft_ack, 920 901 nr_acks, &summary); 921 902 } ··· 937 918 struct rxrpc_ack_summary summary = { 0 }; 938 919 939 920 if (rxrpc_rotate_tx_window(call, call->tx_top, &summary)) 940 - rxrpc_end_tx_phase(call, false, "ETL"); 921 + rxrpc_end_tx_phase(call, false, rxrpc_eproto_unexpected_ackall); 941 922 } 942 923 943 924 /* ··· 982 963 983 964 switch (sp->hdr.type) { 984 965 case RXRPC_PACKET_TYPE_DATA: 985 - rxrpc_input_data(call, skb); 986 - break; 966 + return rxrpc_input_data(call, skb); 987 967 988 968 case RXRPC_PACKET_TYPE_ACK: 989 - rxrpc_input_ack(call, skb); 990 - break; 969 + return rxrpc_input_ack(call, skb); 991 970 992 971 case RXRPC_PACKET_TYPE_BUSY: 993 972 /* Just ignore BUSY packets from the server; the retry and 994 973 * lifespan timers will take care of business. BUSY packets 995 974 * from the client don't make sense. 996 975 */ 997 - break; 976 + return; 998 977 999 978 case RXRPC_PACKET_TYPE_ABORT: 1000 - rxrpc_input_abort(call, skb); 1001 - break; 979 + return rxrpc_input_abort(call, skb); 1002 980 1003 981 case RXRPC_PACKET_TYPE_ACKALL: 1004 - rxrpc_input_ackall(call, skb); 1005 - break; 982 + return rxrpc_input_ackall(call, skb); 1006 983 1007 984 default: 1008 985 break; ··· 1013 998 */ 1014 999 void rxrpc_implicit_end_call(struct rxrpc_call *call, struct sk_buff *skb) 1015 1000 { 1016 - struct rxrpc_connection *conn = call->conn; 1017 - 1018 - switch (READ_ONCE(call->state)) { 1001 + switch (__rxrpc_call_state(call)) { 1019 1002 case RXRPC_CALL_SERVER_AWAIT_ACK: 1020 1003 rxrpc_call_completed(call); 1021 1004 fallthrough; 1022 1005 case RXRPC_CALL_COMPLETE: 1023 1006 break; 1024 1007 default: 1025 - if (rxrpc_abort_call("IMP", call, 0, RX_CALL_DEAD, -ESHUTDOWN)) 1026 - rxrpc_send_abort_packet(call); 1008 + rxrpc_abort_call(call, 0, RX_CALL_DEAD, -ESHUTDOWN, 1009 + rxrpc_eproto_improper_term); 1027 1010 trace_rxrpc_improper_term(call); 1028 1011 break; 1029 1012 } 1030 1013 1031 1014 rxrpc_input_call_event(call, skb); 1032 - 1033 - spin_lock(&conn->bundle->channel_lock); 1034 - __rxrpc_disconnect_call(conn, call); 1035 - spin_unlock(&conn->bundle->channel_lock); 1036 1015 }
+6 -14
net/rxrpc/insecure.c
··· 43 43 } 44 44 45 45 static int none_respond_to_challenge(struct rxrpc_connection *conn, 46 - struct sk_buff *skb, 47 - u32 *_abort_code) 46 + struct sk_buff *skb) 48 47 { 49 - struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 50 - 51 - trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, 52 - tracepoint_string("chall_none")); 53 - return -EPROTO; 48 + return rxrpc_abort_conn(conn, skb, RX_PROTOCOL_ERROR, -EPROTO, 49 + rxrpc_eproto_rxnull_challenge); 54 50 } 55 51 56 52 static int none_verify_response(struct rxrpc_connection *conn, 57 - struct sk_buff *skb, 58 - u32 *_abort_code) 53 + struct sk_buff *skb) 59 54 { 60 - struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 61 - 62 - trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, 63 - tracepoint_string("resp_none")); 64 - return -EPROTO; 55 + return rxrpc_abort_conn(conn, skb, RX_PROTOCOL_ERROR, -EPROTO, 56 + rxrpc_eproto_rxnull_response); 65 57 } 66 58 67 59 static void none_clear(struct rxrpc_connection *conn)
+109 -95
net/rxrpc/io_thread.c
··· 67 67 } 68 68 69 69 /* 70 + * Directly produce an abort from a packet. 71 + */ 72 + bool rxrpc_direct_abort(struct sk_buff *skb, enum rxrpc_abort_reason why, 73 + s32 abort_code, int err) 74 + { 75 + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 76 + 77 + trace_rxrpc_abort(0, why, sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, 78 + abort_code, err); 79 + skb->mark = RXRPC_SKB_MARK_REJECT_ABORT; 80 + skb->priority = abort_code; 81 + return false; 82 + } 83 + 84 + static bool rxrpc_bad_message(struct sk_buff *skb, enum rxrpc_abort_reason why) 85 + { 86 + return rxrpc_direct_abort(skb, why, RX_PROTOCOL_ERROR, -EBADMSG); 87 + } 88 + 89 + #define just_discard true 90 + 91 + /* 70 92 * Process event packets targeted at a local endpoint. 71 93 */ 72 - static void rxrpc_input_version(struct rxrpc_local *local, struct sk_buff *skb) 94 + static bool rxrpc_input_version(struct rxrpc_local *local, struct sk_buff *skb) 73 95 { 74 96 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 75 97 char v; ··· 103 81 if (v == 0) 104 82 rxrpc_send_version_request(local, &sp->hdr, skb); 105 83 } 84 + 85 + return true; 106 86 } 107 87 108 88 /* 109 89 * Extract the wire header from a packet and translate the byte order. 110 90 */ 111 - static noinline 112 - int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb) 91 + static bool rxrpc_extract_header(struct rxrpc_skb_priv *sp, 92 + struct sk_buff *skb) 113 93 { 114 94 struct rxrpc_wire_header whdr; 115 95 116 96 /* dig out the RxRPC connection details */ 117 - if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0) { 118 - trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, 119 - tracepoint_string("bad_hdr")); 120 - return -EBADMSG; 121 - } 97 + if (skb_copy_bits(skb, 0, &whdr, sizeof(whdr)) < 0) 98 + return rxrpc_bad_message(skb, rxrpc_badmsg_short_hdr); 122 99 123 100 memset(sp, 0, sizeof(*sp)); 124 101 sp->hdr.epoch = ntohl(whdr.epoch); ··· 131 110 sp->hdr.securityIndex = whdr.securityIndex; 132 111 sp->hdr._rsvd = ntohs(whdr._rsvd); 133 112 sp->hdr.serviceId = ntohs(whdr.serviceId); 134 - return 0; 113 + return true; 135 114 } 136 115 137 116 /* ··· 151 130 /* 152 131 * Process packets received on the local endpoint 153 132 */ 154 - static int rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb) 133 + static bool rxrpc_input_packet(struct rxrpc_local *local, struct sk_buff **_skb) 155 134 { 156 135 struct rxrpc_connection *conn; 157 136 struct sockaddr_rxrpc peer_srx; 158 137 struct rxrpc_skb_priv *sp; 159 138 struct rxrpc_peer *peer = NULL; 160 139 struct sk_buff *skb = *_skb; 161 - int ret = 0; 140 + bool ret = false; 162 141 163 142 skb_pull(skb, sizeof(struct udphdr)); 164 143 165 144 sp = rxrpc_skb(skb); 166 145 167 146 /* dig out the RxRPC connection details */ 168 - if (rxrpc_extract_header(sp, skb) < 0) 169 - goto bad_message; 147 + if (!rxrpc_extract_header(sp, skb)) 148 + return just_discard; 170 149 171 150 if (IS_ENABLED(CONFIG_AF_RXRPC_INJECT_LOSS)) { 172 151 static int lose; 173 152 if ((lose++ & 7) == 7) { 174 153 trace_rxrpc_rx_lose(sp); 175 - return 0; 154 + return just_discard; 176 155 } 177 156 } 178 157 ··· 181 160 switch (sp->hdr.type) { 182 161 case RXRPC_PACKET_TYPE_VERSION: 183 162 if (rxrpc_to_client(sp)) 184 - return 0; 185 - rxrpc_input_version(local, skb); 186 - return 0; 163 + return just_discard; 164 + return rxrpc_input_version(local, skb); 187 165 188 166 case RXRPC_PACKET_TYPE_BUSY: 189 167 if (rxrpc_to_server(sp)) 190 - return 0; 168 + return just_discard; 191 169 fallthrough; 192 170 case RXRPC_PACKET_TYPE_ACK: 193 171 case RXRPC_PACKET_TYPE_ACKALL: 194 172 if (sp->hdr.callNumber == 0) 195 - goto bad_message; 173 + return rxrpc_bad_message(skb, rxrpc_badmsg_zero_call); 196 174 break; 197 175 case RXRPC_PACKET_TYPE_ABORT: 198 176 if (!rxrpc_extract_abort(skb)) 199 - return 0; /* Just discard if malformed */ 177 + return just_discard; /* Just discard if malformed */ 200 178 break; 201 179 202 180 case RXRPC_PACKET_TYPE_DATA: 203 - if (sp->hdr.callNumber == 0 || 204 - sp->hdr.seq == 0) 205 - goto bad_message; 181 + if (sp->hdr.callNumber == 0) 182 + return rxrpc_bad_message(skb, rxrpc_badmsg_zero_call); 183 + if (sp->hdr.seq == 0) 184 + return rxrpc_bad_message(skb, rxrpc_badmsg_zero_seq); 206 185 207 186 /* Unshare the packet so that it can be modified for in-place 208 187 * decryption. ··· 212 191 if (!skb) { 213 192 rxrpc_eaten_skb(*_skb, rxrpc_skb_eaten_by_unshare_nomem); 214 193 *_skb = NULL; 215 - return 0; 194 + return just_discard; 216 195 } 217 196 218 197 if (skb != *_skb) { ··· 226 205 227 206 case RXRPC_PACKET_TYPE_CHALLENGE: 228 207 if (rxrpc_to_server(sp)) 229 - return 0; 208 + return just_discard; 230 209 break; 231 210 case RXRPC_PACKET_TYPE_RESPONSE: 232 211 if (rxrpc_to_client(sp)) 233 - return 0; 212 + return just_discard; 234 213 break; 235 214 236 215 /* Packet types 9-11 should just be ignored. */ 237 216 case RXRPC_PACKET_TYPE_PARAMS: 238 217 case RXRPC_PACKET_TYPE_10: 239 218 case RXRPC_PACKET_TYPE_11: 240 - return 0; 219 + return just_discard; 241 220 242 221 default: 243 - goto bad_message; 222 + return rxrpc_bad_message(skb, rxrpc_badmsg_unsupported_packet); 244 223 } 245 224 246 225 if (sp->hdr.serviceId == 0) 247 - goto bad_message; 226 + return rxrpc_bad_message(skb, rxrpc_badmsg_zero_service); 248 227 249 228 if (WARN_ON_ONCE(rxrpc_extract_addr_from_skb(&peer_srx, skb) < 0)) 250 - return true; /* Unsupported address type - discard. */ 229 + return just_discard; /* Unsupported address type. */ 251 230 252 231 if (peer_srx.transport.family != local->srx.transport.family && 253 232 (peer_srx.transport.family == AF_INET && ··· 255 234 pr_warn_ratelimited("AF_RXRPC: Protocol mismatch %u not %u\n", 256 235 peer_srx.transport.family, 257 236 local->srx.transport.family); 258 - return true; /* Wrong address type - discard. */ 237 + return just_discard; /* Wrong address type. */ 259 238 } 260 239 261 240 if (rxrpc_to_client(sp)) { ··· 263 242 conn = rxrpc_find_client_connection_rcu(local, &peer_srx, skb); 264 243 conn = rxrpc_get_connection_maybe(conn, rxrpc_conn_get_call_input); 265 244 rcu_read_unlock(); 266 - if (!conn) { 267 - trace_rxrpc_abort(0, "NCC", sp->hdr.cid, 268 - sp->hdr.callNumber, sp->hdr.seq, 269 - RXKADINCONSISTENCY, EBADMSG); 270 - goto protocol_error; 271 - } 245 + if (!conn) 246 + return rxrpc_protocol_error(skb, rxrpc_eproto_no_client_conn); 272 247 273 248 ret = rxrpc_input_packet_on_conn(conn, &peer_srx, skb); 274 249 rxrpc_put_connection(conn, rxrpc_conn_put_call_input); ··· 297 280 298 281 ret = rxrpc_new_incoming_call(local, peer, NULL, &peer_srx, skb); 299 282 rxrpc_put_peer(peer, rxrpc_peer_put_input); 300 - if (ret < 0) 301 - goto reject_packet; 302 - return 0; 303 - 304 - bad_message: 305 - trace_rxrpc_abort(0, "BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, 306 - RX_PROTOCOL_ERROR, EBADMSG); 307 - protocol_error: 308 - skb->priority = RX_PROTOCOL_ERROR; 309 - skb->mark = RXRPC_SKB_MARK_REJECT_ABORT; 310 - reject_packet: 311 - rxrpc_reject_packet(local, skb); 312 - return 0; 283 + return ret; 313 284 } 314 285 315 286 /* ··· 311 306 struct rxrpc_channel *chan; 312 307 struct rxrpc_call *call = NULL; 313 308 unsigned int channel; 309 + bool ret; 314 310 315 311 if (sp->hdr.securityIndex != conn->security_ix) 316 - goto wrong_security; 312 + return rxrpc_direct_abort(skb, rxrpc_eproto_wrong_security, 313 + RXKADINCONSISTENCY, -EBADMSG); 317 314 318 315 if (sp->hdr.serviceId != conn->service_id) { 319 316 int old_id; 320 317 321 318 if (!test_bit(RXRPC_CONN_PROBING_FOR_UPGRADE, &conn->flags)) 322 - goto reupgrade; 319 + return rxrpc_protocol_error(skb, rxrpc_eproto_reupgrade); 320 + 323 321 old_id = cmpxchg(&conn->service_id, conn->orig_service_id, 324 322 sp->hdr.serviceId); 325 - 326 323 if (old_id != conn->orig_service_id && 327 324 old_id != sp->hdr.serviceId) 328 - goto reupgrade; 325 + return rxrpc_protocol_error(skb, rxrpc_eproto_bad_upgrade); 329 326 } 330 327 331 328 if (after(sp->hdr.serial, conn->hi_serial)) ··· 343 336 344 337 /* Ignore really old calls */ 345 338 if (sp->hdr.callNumber < chan->last_call) 346 - return 0; 339 + return just_discard; 347 340 348 341 if (sp->hdr.callNumber == chan->last_call) { 349 342 if (chan->call || 350 343 sp->hdr.type == RXRPC_PACKET_TYPE_ABORT) 351 - return 0; 344 + return just_discard; 352 345 353 346 /* For the previous service call, if completed successfully, we 354 347 * discard all further packets. 355 348 */ 356 349 if (rxrpc_conn_is_service(conn) && 357 350 chan->last_type == RXRPC_PACKET_TYPE_ACK) 358 - return 0; 351 + return just_discard; 359 352 360 353 /* But otherwise we need to retransmit the final packet from 361 354 * data cached in the connection record. ··· 365 358 sp->hdr.seq, 366 359 sp->hdr.serial, 367 360 sp->hdr.flags); 368 - rxrpc_input_conn_packet(conn, skb); 369 - return 0; 361 + rxrpc_conn_retransmit_call(conn, skb, channel); 362 + return just_discard; 370 363 } 371 364 372 - rcu_read_lock(); 373 - call = rxrpc_try_get_call(rcu_dereference(chan->call), 374 - rxrpc_call_get_input); 375 - rcu_read_unlock(); 365 + call = rxrpc_try_get_call(chan->call, rxrpc_call_get_input); 376 366 377 367 if (sp->hdr.callNumber > chan->call_id) { 378 368 if (rxrpc_to_client(sp)) { 379 369 rxrpc_put_call(call, rxrpc_call_put_input); 380 - goto reject_packet; 370 + return rxrpc_protocol_error(skb, 371 + rxrpc_eproto_unexpected_implicit_end); 381 372 } 382 373 383 374 if (call) { ··· 387 382 388 383 if (!call) { 389 384 if (rxrpc_to_client(sp)) 390 - goto bad_message; 391 - if (rxrpc_new_incoming_call(conn->local, conn->peer, conn, 392 - peer_srx, skb) == 0) 393 - return 0; 394 - goto reject_packet; 385 + return rxrpc_protocol_error(skb, rxrpc_eproto_no_client_call); 386 + return rxrpc_new_incoming_call(conn->local, conn->peer, conn, 387 + peer_srx, skb); 395 388 } 396 389 397 - rxrpc_input_call_event(call, skb); 390 + ret = rxrpc_input_call_event(call, skb); 398 391 rxrpc_put_call(call, rxrpc_call_put_input); 399 - return 0; 400 - 401 - wrong_security: 402 - trace_rxrpc_abort(0, "SEC", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, 403 - RXKADINCONSISTENCY, EBADMSG); 404 - skb->priority = RXKADINCONSISTENCY; 405 - goto post_abort; 406 - 407 - reupgrade: 408 - trace_rxrpc_abort(0, "UPG", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, 409 - RX_PROTOCOL_ERROR, EBADMSG); 410 - goto protocol_error; 411 - 412 - bad_message: 413 - trace_rxrpc_abort(0, "BAD", sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, 414 - RX_PROTOCOL_ERROR, EBADMSG); 415 - protocol_error: 416 - skb->priority = RX_PROTOCOL_ERROR; 417 - post_abort: 418 - skb->mark = RXRPC_SKB_MARK_REJECT_ABORT; 419 - reject_packet: 420 - rxrpc_reject_packet(conn->local, skb); 421 - return 0; 392 + return ret; 422 393 } 423 394 424 395 /* ··· 402 421 */ 403 422 int rxrpc_io_thread(void *data) 404 423 { 424 + struct rxrpc_connection *conn; 405 425 struct sk_buff_head rx_queue; 406 426 struct rxrpc_local *local = data; 407 427 struct rxrpc_call *call; ··· 418 436 for (;;) { 419 437 rxrpc_inc_stat(local->rxnet, stat_io_loop); 420 438 439 + /* Deal with connections that want immediate attention. */ 440 + conn = list_first_entry_or_null(&local->conn_attend_q, 441 + struct rxrpc_connection, 442 + attend_link); 443 + if (conn) { 444 + spin_lock_bh(&local->lock); 445 + list_del_init(&conn->attend_link); 446 + spin_unlock_bh(&local->lock); 447 + 448 + rxrpc_input_conn_event(conn, NULL); 449 + rxrpc_put_connection(conn, rxrpc_conn_put_poke); 450 + continue; 451 + } 452 + 453 + if (test_and_clear_bit(RXRPC_CLIENT_CONN_REAP_TIMER, 454 + &local->client_conn_flags)) 455 + rxrpc_discard_expired_client_conns(local); 456 + 421 457 /* Deal with calls that want immediate attention. */ 422 458 if ((call = list_first_entry_or_null(&local->call_attend_q, 423 459 struct rxrpc_call, ··· 450 450 continue; 451 451 } 452 452 453 + if (!list_empty(&local->new_client_calls)) 454 + rxrpc_connect_client_calls(local); 455 + 453 456 /* Process received packets and errors. */ 454 457 if ((skb = __skb_dequeue(&rx_queue))) { 458 + struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 455 459 switch (skb->mark) { 456 460 case RXRPC_SKB_MARK_PACKET: 457 461 skb->priority = 0; 458 - rxrpc_input_packet(local, &skb); 462 + if (!rxrpc_input_packet(local, &skb)) 463 + rxrpc_reject_packet(local, skb); 459 464 trace_rxrpc_rx_done(skb->mark, skb->priority); 460 465 rxrpc_free_skb(skb, rxrpc_skb_put_input); 461 466 break; 462 467 case RXRPC_SKB_MARK_ERROR: 463 468 rxrpc_input_error(local, skb); 464 469 rxrpc_free_skb(skb, rxrpc_skb_put_error_report); 470 + break; 471 + case RXRPC_SKB_MARK_SERVICE_CONN_SECURED: 472 + rxrpc_input_conn_event(sp->conn, skb); 473 + rxrpc_put_connection(sp->conn, rxrpc_conn_put_poke); 474 + rxrpc_free_skb(skb, rxrpc_skb_put_conn_secured); 465 475 break; 466 476 default: 467 477 WARN_ON_ONCE(1); ··· 491 481 set_current_state(TASK_INTERRUPTIBLE); 492 482 should_stop = kthread_should_stop(); 493 483 if (!skb_queue_empty(&local->rx_queue) || 494 - !list_empty(&local->call_attend_q)) { 484 + !list_empty(&local->call_attend_q) || 485 + !list_empty(&local->conn_attend_q) || 486 + !list_empty(&local->new_client_calls) || 487 + test_bit(RXRPC_CLIENT_CONN_REAP_TIMER, 488 + &local->client_conn_flags)) { 495 489 __set_current_state(TASK_RUNNING); 496 490 continue; 497 491 }
+32 -3
net/rxrpc/local_object.c
··· 82 82 } 83 83 } 84 84 85 + static void rxrpc_client_conn_reap_timeout(struct timer_list *timer) 86 + { 87 + struct rxrpc_local *local = 88 + container_of(timer, struct rxrpc_local, client_conn_reap_timer); 89 + 90 + if (local->kill_all_client_conns && 91 + test_and_set_bit(RXRPC_CLIENT_CONN_REAP_TIMER, &local->client_conn_flags)) 92 + rxrpc_wake_up_io_thread(local); 93 + } 94 + 85 95 /* 86 96 * Allocate a new local endpoint. 87 97 */ 88 - static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet, 98 + static struct rxrpc_local *rxrpc_alloc_local(struct net *net, 89 99 const struct sockaddr_rxrpc *srx) 90 100 { 91 101 struct rxrpc_local *local; 102 + u32 tmp; 92 103 93 104 local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL); 94 105 if (local) { 95 106 refcount_set(&local->ref, 1); 96 107 atomic_set(&local->active_users, 1); 97 - local->rxnet = rxnet; 108 + local->net = net; 109 + local->rxnet = rxrpc_net(net); 98 110 INIT_HLIST_NODE(&local->link); 99 111 init_rwsem(&local->defrag_sem); 100 112 init_completion(&local->io_thread_ready); 101 113 skb_queue_head_init(&local->rx_queue); 114 + INIT_LIST_HEAD(&local->conn_attend_q); 102 115 INIT_LIST_HEAD(&local->call_attend_q); 116 + 103 117 local->client_bundles = RB_ROOT; 104 118 spin_lock_init(&local->client_bundles_lock); 119 + local->kill_all_client_conns = false; 120 + INIT_LIST_HEAD(&local->idle_client_conns); 121 + timer_setup(&local->client_conn_reap_timer, 122 + rxrpc_client_conn_reap_timeout, 0); 123 + 105 124 spin_lock_init(&local->lock); 106 125 rwlock_init(&local->services_lock); 107 126 local->debug_id = atomic_inc_return(&rxrpc_debug_id); 108 127 memcpy(&local->srx, srx, sizeof(*srx)); 109 128 local->srx.srx_service = 0; 129 + idr_init(&local->conn_ids); 130 + get_random_bytes(&tmp, sizeof(tmp)); 131 + tmp &= 0x3fffffff; 132 + if (tmp == 0) 133 + tmp = 1; 134 + idr_set_cursor(&local->conn_ids, tmp); 135 + INIT_LIST_HEAD(&local->new_client_calls); 136 + spin_lock_init(&local->client_call_lock); 137 + 110 138 trace_rxrpc_local(local->debug_id, rxrpc_local_new, 1, 1); 111 139 } 112 140 ··· 276 248 goto found; 277 249 } 278 250 279 - local = rxrpc_alloc_local(rxnet, srx); 251 + local = rxrpc_alloc_local(net, srx); 280 252 if (!local) 281 253 goto nomem; 282 254 ··· 435 407 * local endpoint. 436 408 */ 437 409 rxrpc_purge_queue(&local->rx_queue); 410 + rxrpc_purge_client_connections(local); 438 411 } 439 412 440 413 /*
-17
net/rxrpc/net_ns.c
··· 10 10 11 11 unsigned int rxrpc_net_id; 12 12 13 - static void rxrpc_client_conn_reap_timeout(struct timer_list *timer) 14 - { 15 - struct rxrpc_net *rxnet = 16 - container_of(timer, struct rxrpc_net, client_conn_reap_timer); 17 - 18 - if (rxnet->live) 19 - rxrpc_queue_work(&rxnet->client_conn_reaper); 20 - } 21 - 22 13 static void rxrpc_service_conn_reap_timeout(struct timer_list *timer) 23 14 { 24 15 struct rxrpc_net *rxnet = ··· 54 63 rxrpc_service_conn_reap_timeout, 0); 55 64 56 65 atomic_set(&rxnet->nr_client_conns, 0); 57 - rxnet->kill_all_client_conns = false; 58 - spin_lock_init(&rxnet->client_conn_cache_lock); 59 - mutex_init(&rxnet->client_conn_discard_lock); 60 - INIT_LIST_HEAD(&rxnet->idle_client_conns); 61 - INIT_WORK(&rxnet->client_conn_reaper, 62 - rxrpc_discard_expired_client_conns); 63 - timer_setup(&rxnet->client_conn_reap_timer, 64 - rxrpc_client_conn_reap_timeout, 0); 65 66 66 67 INIT_HLIST_HEAD(&rxnet->local_endpoints); 67 68 mutex_init(&rxnet->local_mutex);
+58 -2
net/rxrpc/output.c
··· 261 261 rxrpc_tx_point_call_ack); 262 262 rxrpc_tx_backoff(call, ret); 263 263 264 - if (call->state < RXRPC_CALL_COMPLETE) { 264 + if (!__rxrpc_call_is_complete(call)) { 265 265 if (ret < 0) 266 266 rxrpc_cancel_rtt_probe(call, serial, rtt_slot); 267 267 rxrpc_set_keepalive(call); ··· 545 545 } 546 546 547 547 /* 548 + * Transmit a connection-level abort. 549 + */ 550 + void rxrpc_send_conn_abort(struct rxrpc_connection *conn) 551 + { 552 + struct rxrpc_wire_header whdr; 553 + struct msghdr msg; 554 + struct kvec iov[2]; 555 + __be32 word; 556 + size_t len; 557 + u32 serial; 558 + int ret; 559 + 560 + msg.msg_name = &conn->peer->srx.transport; 561 + msg.msg_namelen = conn->peer->srx.transport_len; 562 + msg.msg_control = NULL; 563 + msg.msg_controllen = 0; 564 + msg.msg_flags = 0; 565 + 566 + whdr.epoch = htonl(conn->proto.epoch); 567 + whdr.cid = htonl(conn->proto.cid); 568 + whdr.callNumber = 0; 569 + whdr.seq = 0; 570 + whdr.type = RXRPC_PACKET_TYPE_ABORT; 571 + whdr.flags = conn->out_clientflag; 572 + whdr.userStatus = 0; 573 + whdr.securityIndex = conn->security_ix; 574 + whdr._rsvd = 0; 575 + whdr.serviceId = htons(conn->service_id); 576 + 577 + word = htonl(conn->abort_code); 578 + 579 + iov[0].iov_base = &whdr; 580 + iov[0].iov_len = sizeof(whdr); 581 + iov[1].iov_base = &word; 582 + iov[1].iov_len = sizeof(word); 583 + 584 + len = iov[0].iov_len + iov[1].iov_len; 585 + 586 + serial = atomic_inc_return(&conn->serial); 587 + whdr.serial = htonl(serial); 588 + 589 + iov_iter_kvec(&msg.msg_iter, WRITE, iov, 2, len); 590 + ret = do_udp_sendmsg(conn->local->socket, &msg, len); 591 + if (ret < 0) { 592 + trace_rxrpc_tx_fail(conn->debug_id, serial, ret, 593 + rxrpc_tx_point_conn_abort); 594 + _debug("sendmsg failed: %d", ret); 595 + return; 596 + } 597 + 598 + trace_rxrpc_tx_packet(conn->debug_id, &whdr, rxrpc_tx_point_conn_abort); 599 + 600 + conn->peer->last_tx_at = ktime_get_seconds(); 601 + } 602 + 603 + /* 548 604 * Reject a packet through the local endpoint. 549 605 */ 550 606 void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb) ··· 723 667 static inline void rxrpc_instant_resend(struct rxrpc_call *call, 724 668 struct rxrpc_txbuf *txb) 725 669 { 726 - if (call->state < RXRPC_CALL_COMPLETE) 670 + if (!__rxrpc_call_is_complete(call)) 727 671 kdebug("resend"); 728 672 } 729 673
+10 -13
net/rxrpc/peer_object.c
··· 147 147 * assess the MTU size for the network interface through which this peer is 148 148 * reached 149 149 */ 150 - static void rxrpc_assess_MTU_size(struct rxrpc_sock *rx, 150 + static void rxrpc_assess_MTU_size(struct rxrpc_local *local, 151 151 struct rxrpc_peer *peer) 152 152 { 153 - struct net *net = sock_net(&rx->sk); 153 + struct net *net = local->net; 154 154 struct dst_entry *dst; 155 155 struct rtable *rt; 156 156 struct flowi fl; ··· 236 236 /* 237 237 * Initialise peer record. 238 238 */ 239 - static void rxrpc_init_peer(struct rxrpc_sock *rx, struct rxrpc_peer *peer, 239 + static void rxrpc_init_peer(struct rxrpc_local *local, struct rxrpc_peer *peer, 240 240 unsigned long hash_key) 241 241 { 242 242 peer->hash_key = hash_key; 243 - rxrpc_assess_MTU_size(rx, peer); 243 + rxrpc_assess_MTU_size(local, peer); 244 244 peer->mtu = peer->if_mtu; 245 245 peer->rtt_last_req = ktime_get_real(); 246 246 ··· 272 272 /* 273 273 * Set up a new peer. 274 274 */ 275 - static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_sock *rx, 276 - struct rxrpc_local *local, 275 + static struct rxrpc_peer *rxrpc_create_peer(struct rxrpc_local *local, 277 276 struct sockaddr_rxrpc *srx, 278 277 unsigned long hash_key, 279 278 gfp_t gfp) ··· 284 285 peer = rxrpc_alloc_peer(local, gfp, rxrpc_peer_new_client); 285 286 if (peer) { 286 287 memcpy(&peer->srx, srx, sizeof(*srx)); 287 - rxrpc_init_peer(rx, peer, hash_key); 288 + rxrpc_init_peer(local, peer, hash_key); 288 289 } 289 290 290 291 _leave(" = %p", peer); ··· 303 304 * since we've already done a search in the list from the non-reentrant context 304 305 * (the data_ready handler) that is the only place we can add new peers. 305 306 */ 306 - void rxrpc_new_incoming_peer(struct rxrpc_sock *rx, struct rxrpc_local *local, 307 - struct rxrpc_peer *peer) 307 + void rxrpc_new_incoming_peer(struct rxrpc_local *local, struct rxrpc_peer *peer) 308 308 { 309 309 struct rxrpc_net *rxnet = local->rxnet; 310 310 unsigned long hash_key; 311 311 312 312 hash_key = rxrpc_peer_hash_key(local, &peer->srx); 313 - rxrpc_init_peer(rx, peer, hash_key); 313 + rxrpc_init_peer(local, peer, hash_key); 314 314 315 315 spin_lock(&rxnet->peer_hash_lock); 316 316 hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key); ··· 320 322 /* 321 323 * obtain a remote transport endpoint for the specified address 322 324 */ 323 - struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_sock *rx, 324 - struct rxrpc_local *local, 325 + struct rxrpc_peer *rxrpc_lookup_peer(struct rxrpc_local *local, 325 326 struct sockaddr_rxrpc *srx, gfp_t gfp) 326 327 { 327 328 struct rxrpc_peer *peer, *candidate; ··· 340 343 /* The peer is not yet present in hash - create a candidate 341 344 * for a new record and then redo the search. 342 345 */ 343 - candidate = rxrpc_create_peer(rx, local, srx, hash_key, gfp); 346 + candidate = rxrpc_create_peer(local, srx, hash_key, gfp); 344 347 if (!candidate) { 345 348 _leave(" = NULL [nomem]"); 346 349 return NULL;
+11 -6
net/rxrpc/proc.c
··· 12 12 13 13 static const char *const rxrpc_conn_states[RXRPC_CONN__NR_STATES] = { 14 14 [RXRPC_CONN_UNUSED] = "Unused ", 15 + [RXRPC_CONN_CLIENT_UNSECURED] = "ClUnsec ", 15 16 [RXRPC_CONN_CLIENT] = "Client ", 16 17 [RXRPC_CONN_SERVICE_PREALLOC] = "SvPrealc", 17 18 [RXRPC_CONN_SERVICE_UNSECURED] = "SvUnsec ", 18 19 [RXRPC_CONN_SERVICE_CHALLENGING] = "SvChall ", 19 20 [RXRPC_CONN_SERVICE] = "SvSecure", 20 - [RXRPC_CONN_REMOTELY_ABORTED] = "RmtAbort", 21 - [RXRPC_CONN_LOCALLY_ABORTED] = "LocAbort", 21 + [RXRPC_CONN_ABORTED] = "Aborted ", 22 22 }; 23 23 24 24 /* ··· 51 51 struct rxrpc_local *local; 52 52 struct rxrpc_call *call; 53 53 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 54 + enum rxrpc_call_state state; 54 55 unsigned long timeout = 0; 55 56 rxrpc_seq_t acks_hard_ack; 56 57 char lbuff[50], rbuff[50]; ··· 76 75 77 76 sprintf(rbuff, "%pISpc", &call->dest_srx.transport); 78 77 79 - if (call->state != RXRPC_CALL_SERVER_PREALLOC) { 78 + state = rxrpc_call_state(call); 79 + if (state != RXRPC_CALL_SERVER_PREALLOC) { 80 80 timeout = READ_ONCE(call->expect_rx_by); 81 81 timeout -= jiffies; 82 82 } ··· 94 92 call->call_id, 95 93 rxrpc_is_service_call(call) ? "Svc" : "Clt", 96 94 refcount_read(&call->ref), 97 - rxrpc_call_states[call->state], 95 + rxrpc_call_states[state], 98 96 call->abort_code, 99 97 call->debug_id, 100 98 acks_hard_ack, READ_ONCE(call->tx_top) - acks_hard_ack, ··· 145 143 { 146 144 struct rxrpc_connection *conn; 147 145 struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq)); 146 + const char *state; 148 147 char lbuff[50], rbuff[50]; 149 148 150 149 if (v == &rxnet->conn_proc_list) { ··· 166 163 } 167 164 168 165 sprintf(lbuff, "%pISpc", &conn->local->srx.transport); 169 - 170 166 sprintf(rbuff, "%pISpc", &conn->peer->srx.transport); 171 167 print: 168 + state = rxrpc_is_conn_aborted(conn) ? 169 + rxrpc_call_completions[conn->completion] : 170 + rxrpc_conn_states[conn->state]; 172 171 seq_printf(seq, 173 172 "UDP %-47.47s %-47.47s %4x %08x %s %3u %3d" 174 173 " %s %08x %08x %08x %08x %08x %08x %08x\n", ··· 181 176 rxrpc_conn_is_service(conn) ? "Svc" : "Clt", 182 177 refcount_read(&conn->ref), 183 178 atomic_read(&conn->active), 184 - rxrpc_conn_states[conn->state], 179 + state, 185 180 key_serial(conn->key), 186 181 atomic_read(&conn->serial), 187 182 conn->hi_serial,
+69 -187
net/rxrpc/recvmsg.c
··· 59 59 } 60 60 61 61 /* 62 - * Transition a call to the complete state. 63 - */ 64 - bool __rxrpc_set_call_completion(struct rxrpc_call *call, 65 - enum rxrpc_call_completion compl, 66 - u32 abort_code, 67 - int error) 68 - { 69 - if (call->state < RXRPC_CALL_COMPLETE) { 70 - call->abort_code = abort_code; 71 - call->error = error; 72 - call->completion = compl; 73 - call->state = RXRPC_CALL_COMPLETE; 74 - trace_rxrpc_call_complete(call); 75 - wake_up(&call->waitq); 76 - rxrpc_notify_socket(call); 77 - return true; 78 - } 79 - return false; 80 - } 81 - 82 - bool rxrpc_set_call_completion(struct rxrpc_call *call, 83 - enum rxrpc_call_completion compl, 84 - u32 abort_code, 85 - int error) 86 - { 87 - bool ret = false; 88 - 89 - if (call->state < RXRPC_CALL_COMPLETE) { 90 - write_lock(&call->state_lock); 91 - ret = __rxrpc_set_call_completion(call, compl, abort_code, error); 92 - write_unlock(&call->state_lock); 93 - } 94 - return ret; 95 - } 96 - 97 - /* 98 - * Record that a call successfully completed. 99 - */ 100 - bool __rxrpc_call_completed(struct rxrpc_call *call) 101 - { 102 - return __rxrpc_set_call_completion(call, RXRPC_CALL_SUCCEEDED, 0, 0); 103 - } 104 - 105 - bool rxrpc_call_completed(struct rxrpc_call *call) 106 - { 107 - bool ret = false; 108 - 109 - if (call->state < RXRPC_CALL_COMPLETE) { 110 - write_lock(&call->state_lock); 111 - ret = __rxrpc_call_completed(call); 112 - write_unlock(&call->state_lock); 113 - } 114 - return ret; 115 - } 116 - 117 - /* 118 - * Record that a call is locally aborted. 119 - */ 120 - bool __rxrpc_abort_call(const char *why, struct rxrpc_call *call, 121 - rxrpc_seq_t seq, u32 abort_code, int error) 122 - { 123 - trace_rxrpc_abort(call->debug_id, why, call->cid, call->call_id, seq, 124 - abort_code, error); 125 - return __rxrpc_set_call_completion(call, RXRPC_CALL_LOCALLY_ABORTED, 126 - abort_code, error); 127 - } 128 - 129 - bool rxrpc_abort_call(const char *why, struct rxrpc_call *call, 130 - rxrpc_seq_t seq, u32 abort_code, int error) 131 - { 132 - bool ret; 133 - 134 - write_lock(&call->state_lock); 135 - ret = __rxrpc_abort_call(why, call, seq, abort_code, error); 136 - write_unlock(&call->state_lock); 137 - return ret; 138 - } 139 - 140 - /* 141 62 * Pass a call terminating message to userspace. 142 63 */ 143 64 static int rxrpc_recvmsg_term(struct rxrpc_call *call, struct msghdr *msg) ··· 89 168 ret = put_cmsg(msg, SOL_RXRPC, RXRPC_LOCAL_ERROR, 4, &tmp); 90 169 break; 91 170 default: 92 - pr_err("Invalid terminal call state %u\n", call->state); 171 + pr_err("Invalid terminal call state %u\n", call->completion); 93 172 BUG(); 94 173 break; 95 174 } ··· 98 177 lower_32_bits(atomic64_read(&call->ackr_window)) - 1, 99 178 call->rx_pkt_offset, call->rx_pkt_len, ret); 100 179 return ret; 101 - } 102 - 103 - /* 104 - * End the packet reception phase. 105 - */ 106 - static void rxrpc_end_rx_phase(struct rxrpc_call *call, rxrpc_serial_t serial) 107 - { 108 - rxrpc_seq_t whigh = READ_ONCE(call->rx_highest_seq); 109 - 110 - _enter("%d,%s", call->debug_id, rxrpc_call_states[call->state]); 111 - 112 - trace_rxrpc_receive(call, rxrpc_receive_end, 0, whigh); 113 - 114 - if (call->state == RXRPC_CALL_CLIENT_RECV_REPLY) 115 - rxrpc_propose_delay_ACK(call, serial, rxrpc_propose_ack_terminal_ack); 116 - 117 - write_lock(&call->state_lock); 118 - 119 - switch (call->state) { 120 - case RXRPC_CALL_CLIENT_RECV_REPLY: 121 - __rxrpc_call_completed(call); 122 - write_unlock(&call->state_lock); 123 - break; 124 - 125 - case RXRPC_CALL_SERVER_RECV_REQUEST: 126 - call->state = RXRPC_CALL_SERVER_ACK_REQUEST; 127 - call->expect_req_by = jiffies + MAX_JIFFY_OFFSET; 128 - write_unlock(&call->state_lock); 129 - rxrpc_propose_delay_ACK(call, serial, 130 - rxrpc_propose_ack_processing_op); 131 - break; 132 - default: 133 - write_unlock(&call->state_lock); 134 - break; 135 - } 136 180 } 137 181 138 182 /* ··· 130 244 131 245 trace_rxrpc_receive(call, last ? rxrpc_receive_rotate_last : rxrpc_receive_rotate, 132 246 serial, call->rx_consumed); 133 - if (last) { 134 - rxrpc_end_rx_phase(call, serial); 135 - return; 136 - } 247 + 248 + if (last) 249 + set_bit(RXRPC_CALL_RECVMSG_READ_ALL, &call->flags); 137 250 138 251 /* Check to see if there's an ACK that needs sending. */ 139 252 acked = atomic_add_return(call->rx_consumed - old_consumed, ··· 157 272 /* 158 273 * Deliver messages to a call. This keeps processing packets until the buffer 159 274 * is filled and we find either more DATA (returns 0) or the end of the DATA 160 - * (returns 1). If more packets are required, it returns -EAGAIN. 275 + * (returns 1). If more packets are required, it returns -EAGAIN and if the 276 + * call has failed it returns -EIO. 161 277 */ 162 278 static int rxrpc_recvmsg_data(struct socket *sock, struct rxrpc_call *call, 163 279 struct msghdr *msg, struct iov_iter *iter, ··· 174 288 rx_pkt_offset = call->rx_pkt_offset; 175 289 rx_pkt_len = call->rx_pkt_len; 176 290 177 - if (call->state >= RXRPC_CALL_SERVER_ACK_REQUEST) { 291 + if (rxrpc_call_has_failed(call)) { 292 + seq = lower_32_bits(atomic64_read(&call->ackr_window)) - 1; 293 + ret = -EIO; 294 + goto done; 295 + } 296 + 297 + if (test_bit(RXRPC_CALL_RECVMSG_READ_ALL, &call->flags)) { 178 298 seq = lower_32_bits(atomic64_read(&call->ackr_window)) - 1; 179 299 ret = 1; 180 300 goto done; ··· 204 312 205 313 if (rx_pkt_offset == 0) { 206 314 ret2 = rxrpc_verify_data(call, skb); 207 - rx_pkt_offset = sp->offset; 208 - rx_pkt_len = sp->len; 209 315 trace_rxrpc_recvdata(call, rxrpc_recvmsg_next, seq, 210 - rx_pkt_offset, rx_pkt_len, ret2); 316 + sp->offset, sp->len, ret2); 211 317 if (ret2 < 0) { 318 + kdebug("verify = %d", ret2); 212 319 ret = ret2; 213 320 goto out; 214 321 } 322 + rx_pkt_offset = sp->offset; 323 + rx_pkt_len = sp->len; 215 324 } else { 216 325 trace_rxrpc_recvdata(call, rxrpc_recvmsg_cont, seq, 217 326 rx_pkt_offset, rx_pkt_len, 0); ··· 387 494 msg->msg_namelen = len; 388 495 } 389 496 390 - switch (READ_ONCE(call->state)) { 391 - case RXRPC_CALL_CLIENT_RECV_REPLY: 392 - case RXRPC_CALL_SERVER_RECV_REQUEST: 393 - case RXRPC_CALL_SERVER_ACK_REQUEST: 394 - ret = rxrpc_recvmsg_data(sock, call, msg, &msg->msg_iter, len, 395 - flags, &copied); 396 - if (ret == -EAGAIN) 397 - ret = 0; 398 - 399 - if (!skb_queue_empty(&call->recvmsg_queue)) 400 - rxrpc_notify_socket(call); 401 - break; 402 - default: 497 + ret = rxrpc_recvmsg_data(sock, call, msg, &msg->msg_iter, len, 498 + flags, &copied); 499 + if (ret == -EAGAIN) 403 500 ret = 0; 404 - break; 405 - } 406 - 501 + if (ret == -EIO) 502 + goto call_failed; 407 503 if (ret < 0) 408 504 goto error_unlock_call; 409 505 410 - if (call->state == RXRPC_CALL_COMPLETE) { 411 - ret = rxrpc_recvmsg_term(call, msg); 412 - if (ret < 0) 413 - goto error_unlock_call; 414 - if (!(flags & MSG_PEEK)) 415 - rxrpc_release_call(rx, call); 416 - msg->msg_flags |= MSG_EOR; 417 - ret = 1; 418 - } 506 + if (rxrpc_call_is_complete(call) && 507 + skb_queue_empty(&call->recvmsg_queue)) 508 + goto call_complete; 509 + if (rxrpc_call_has_failed(call)) 510 + goto call_failed; 419 511 512 + rxrpc_notify_socket(call); 513 + goto not_yet_complete; 514 + 515 + call_failed: 516 + rxrpc_purge_queue(&call->recvmsg_queue); 517 + call_complete: 518 + ret = rxrpc_recvmsg_term(call, msg); 519 + if (ret < 0) 520 + goto error_unlock_call; 521 + if (!(flags & MSG_PEEK)) 522 + rxrpc_release_call(rx, call); 523 + msg->msg_flags |= MSG_EOR; 524 + ret = 1; 525 + 526 + not_yet_complete: 420 527 if (ret == 0) 421 528 msg->msg_flags |= MSG_MORE; 422 529 else ··· 479 586 size_t offset = 0; 480 587 int ret; 481 588 482 - _enter("{%d,%s},%zu,%d", 483 - call->debug_id, rxrpc_call_states[call->state], 484 - *_len, want_more); 485 - 486 - ASSERTCMP(call->state, !=, RXRPC_CALL_SERVER_SECURING); 589 + _enter("{%d},%zu,%d", call->debug_id, *_len, want_more); 487 590 488 591 mutex_lock(&call->user_mutex); 489 592 490 - switch (READ_ONCE(call->state)) { 491 - case RXRPC_CALL_CLIENT_RECV_REPLY: 492 - case RXRPC_CALL_SERVER_RECV_REQUEST: 493 - case RXRPC_CALL_SERVER_ACK_REQUEST: 494 - ret = rxrpc_recvmsg_data(sock, call, NULL, iter, 495 - *_len, 0, &offset); 496 - *_len -= offset; 497 - if (ret < 0) 498 - goto out; 499 - 500 - /* We can only reach here with a partially full buffer if we 501 - * have reached the end of the data. We must otherwise have a 502 - * full buffer or have been given -EAGAIN. 503 - */ 504 - if (ret == 1) { 505 - if (iov_iter_count(iter) > 0) 506 - goto short_data; 507 - if (!want_more) 508 - goto read_phase_complete; 509 - ret = 0; 510 - goto out; 511 - } 512 - 513 - if (!want_more) 514 - goto excess_data; 593 + ret = rxrpc_recvmsg_data(sock, call, NULL, iter, *_len, 0, &offset); 594 + *_len -= offset; 595 + if (ret == -EIO) 596 + goto call_failed; 597 + if (ret < 0) 515 598 goto out; 516 599 517 - case RXRPC_CALL_COMPLETE: 518 - goto call_complete; 519 - 520 - default: 521 - ret = -EINPROGRESS; 600 + /* We can only reach here with a partially full buffer if we have 601 + * reached the end of the data. We must otherwise have a full buffer 602 + * or have been given -EAGAIN. 603 + */ 604 + if (ret == 1) { 605 + if (iov_iter_count(iter) > 0) 606 + goto short_data; 607 + if (!want_more) 608 + goto read_phase_complete; 609 + ret = 0; 522 610 goto out; 523 611 } 612 + 613 + if (!want_more) 614 + goto excess_data; 615 + goto out; 524 616 525 617 read_phase_complete: 526 618 ret = 1; ··· 517 639 return ret; 518 640 519 641 short_data: 520 - trace_rxrpc_rx_eproto(call, 0, tracepoint_string("short_data")); 642 + trace_rxrpc_abort(call->debug_id, rxrpc_recvmsg_short_data, 643 + call->cid, call->call_id, call->rx_consumed, 644 + 0, -EBADMSG); 521 645 ret = -EBADMSG; 522 646 goto out; 523 647 excess_data: 524 - trace_rxrpc_rx_eproto(call, 0, tracepoint_string("excess_data")); 648 + trace_rxrpc_abort(call->debug_id, rxrpc_recvmsg_excess_data, 649 + call->cid, call->call_id, call->rx_consumed, 650 + 0, -EMSGSIZE); 525 651 ret = -EMSGSIZE; 526 652 goto out; 527 - call_complete: 653 + call_failed: 528 654 *_abort = call->abort_code; 529 655 ret = call->error; 530 656 if (call->completion == RXRPC_CALL_SUCCEEDED) {
+141 -211
net/rxrpc/rxkad.c
··· 411 411 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 412 412 struct rxrpc_crypt iv; 413 413 struct scatterlist sg[16]; 414 - bool aborted; 415 414 u32 data_size, buf; 416 415 u16 check; 417 416 int ret; 418 417 419 418 _enter(""); 420 419 421 - if (sp->len < 8) { 422 - aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_hdr", "V1H", 423 - RXKADSEALEDINCON); 424 - goto protocol_error; 425 - } 420 + if (sp->len < 8) 421 + return rxrpc_abort_eproto(call, skb, RXKADSEALEDINCON, 422 + rxkad_abort_1_short_header); 426 423 427 424 /* Decrypt the skbuff in-place. TODO: We really want to decrypt 428 425 * directly into the target buffer. ··· 439 442 skcipher_request_zero(req); 440 443 441 444 /* Extract the decrypted packet length */ 442 - if (skb_copy_bits(skb, sp->offset, &sechdr, sizeof(sechdr)) < 0) { 443 - aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_len", "XV1", 444 - RXKADDATALEN); 445 - goto protocol_error; 446 - } 445 + if (skb_copy_bits(skb, sp->offset, &sechdr, sizeof(sechdr)) < 0) 446 + return rxrpc_abort_eproto(call, skb, RXKADDATALEN, 447 + rxkad_abort_1_short_encdata); 447 448 sp->offset += sizeof(sechdr); 448 449 sp->len -= sizeof(sechdr); 449 450 ··· 451 456 check = buf >> 16; 452 457 check ^= seq ^ call->call_id; 453 458 check &= 0xffff; 454 - if (check != 0) { 455 - aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_check", "V1C", 456 - RXKADSEALEDINCON); 457 - goto protocol_error; 458 - } 459 - 460 - if (data_size > sp->len) { 461 - aborted = rxrpc_abort_eproto(call, skb, "rxkad_1_datalen", "V1L", 462 - RXKADDATALEN); 463 - goto protocol_error; 464 - } 459 + if (check != 0) 460 + return rxrpc_abort_eproto(call, skb, RXKADSEALEDINCON, 461 + rxkad_abort_1_short_check); 462 + if (data_size > sp->len) 463 + return rxrpc_abort_eproto(call, skb, RXKADDATALEN, 464 + rxkad_abort_1_short_data); 465 465 sp->len = data_size; 466 466 467 467 _leave(" = 0 [dlen=%x]", data_size); 468 468 return 0; 469 - 470 - protocol_error: 471 - if (aborted) 472 - rxrpc_send_abort_packet(call); 473 - return -EPROTO; 474 469 } 475 470 476 471 /* ··· 475 490 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 476 491 struct rxrpc_crypt iv; 477 492 struct scatterlist _sg[4], *sg; 478 - bool aborted; 479 493 u32 data_size, buf; 480 494 u16 check; 481 495 int nsg, ret; 482 496 483 497 _enter(",{%d}", sp->len); 484 498 485 - if (sp->len < 8) { 486 - aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_hdr", "V2H", 487 - RXKADSEALEDINCON); 488 - goto protocol_error; 489 - } 499 + if (sp->len < 8) 500 + return rxrpc_abort_eproto(call, skb, RXKADSEALEDINCON, 501 + rxkad_abort_2_short_header); 490 502 491 503 /* Decrypt the skbuff in-place. TODO: We really want to decrypt 492 504 * directly into the target buffer. ··· 495 513 } else { 496 514 sg = kmalloc_array(nsg, sizeof(*sg), GFP_NOIO); 497 515 if (!sg) 498 - goto nomem; 516 + return -ENOMEM; 499 517 } 500 518 501 519 sg_init_table(sg, nsg); ··· 519 537 kfree(sg); 520 538 521 539 /* Extract the decrypted packet length */ 522 - if (skb_copy_bits(skb, sp->offset, &sechdr, sizeof(sechdr)) < 0) { 523 - aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_len", "XV2", 524 - RXKADDATALEN); 525 - goto protocol_error; 526 - } 540 + if (skb_copy_bits(skb, sp->offset, &sechdr, sizeof(sechdr)) < 0) 541 + return rxrpc_abort_eproto(call, skb, RXKADDATALEN, 542 + rxkad_abort_2_short_len); 527 543 sp->offset += sizeof(sechdr); 528 544 sp->len -= sizeof(sechdr); 529 545 ··· 531 551 check = buf >> 16; 532 552 check ^= seq ^ call->call_id; 533 553 check &= 0xffff; 534 - if (check != 0) { 535 - aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_check", "V2C", 536 - RXKADSEALEDINCON); 537 - goto protocol_error; 538 - } 554 + if (check != 0) 555 + return rxrpc_abort_eproto(call, skb, RXKADSEALEDINCON, 556 + rxkad_abort_2_short_check); 539 557 540 - if (data_size > sp->len) { 541 - aborted = rxrpc_abort_eproto(call, skb, "rxkad_2_datalen", "V2L", 542 - RXKADDATALEN); 543 - goto protocol_error; 544 - } 558 + if (data_size > sp->len) 559 + return rxrpc_abort_eproto(call, skb, RXKADDATALEN, 560 + rxkad_abort_2_short_data); 545 561 546 562 sp->len = data_size; 547 563 _leave(" = 0 [dlen=%x]", data_size); 548 564 return 0; 549 - 550 - protocol_error: 551 - if (aborted) 552 - rxrpc_send_abort_packet(call); 553 - return -EPROTO; 554 - 555 - nomem: 556 - _leave(" = -ENOMEM"); 557 - return -ENOMEM; 558 565 } 559 566 560 567 /* ··· 557 590 __be32 buf[2]; 558 591 } crypto __aligned(8); 559 592 rxrpc_seq_t seq = sp->hdr.seq; 560 - bool aborted; 561 593 int ret; 562 594 u16 cksum; 563 595 u32 x, y; ··· 593 627 cksum = 1; /* zero checksums are not permitted */ 594 628 595 629 if (cksum != sp->hdr.cksum) { 596 - aborted = rxrpc_abort_eproto(call, skb, "rxkad_csum", "VCK", 597 - RXKADSEALEDINCON); 598 - goto protocol_error; 630 + ret = rxrpc_abort_eproto(call, skb, RXKADSEALEDINCON, 631 + rxkad_abort_bad_checksum); 632 + goto out; 599 633 } 600 634 601 635 switch (call->conn->security_level) { ··· 613 647 break; 614 648 } 615 649 650 + out: 616 651 skcipher_request_free(req); 617 652 return ret; 618 - 619 - protocol_error: 620 - if (aborted) 621 - rxrpc_send_abort_packet(call); 622 - return -EPROTO; 623 653 } 624 654 625 655 /* ··· 783 821 * respond to a challenge packet 784 822 */ 785 823 static int rxkad_respond_to_challenge(struct rxrpc_connection *conn, 786 - struct sk_buff *skb, 787 - u32 *_abort_code) 824 + struct sk_buff *skb) 788 825 { 789 826 const struct rxrpc_key_token *token; 790 827 struct rxkad_challenge challenge; 791 828 struct rxkad_response *resp; 792 829 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 793 - const char *eproto; 794 - u32 version, nonce, min_level, abort_code; 795 - int ret; 830 + u32 version, nonce, min_level; 831 + int ret = -EPROTO; 796 832 797 833 _enter("{%d,%x}", conn->debug_id, key_serial(conn->key)); 798 834 799 - eproto = tracepoint_string("chall_no_key"); 800 - abort_code = RX_PROTOCOL_ERROR; 801 835 if (!conn->key) 802 - goto protocol_error; 836 + return rxrpc_abort_conn(conn, skb, RX_PROTOCOL_ERROR, -EPROTO, 837 + rxkad_abort_chall_no_key); 803 838 804 - abort_code = RXKADEXPIRED; 805 839 ret = key_validate(conn->key); 806 840 if (ret < 0) 807 - goto other_error; 841 + return rxrpc_abort_conn(conn, skb, RXKADEXPIRED, ret, 842 + rxkad_abort_chall_key_expired); 808 843 809 - eproto = tracepoint_string("chall_short"); 810 - abort_code = RXKADPACKETSHORT; 811 844 if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header), 812 845 &challenge, sizeof(challenge)) < 0) 813 - goto protocol_error; 846 + return rxrpc_abort_conn(conn, skb, RXKADPACKETSHORT, -EPROTO, 847 + rxkad_abort_chall_short); 814 848 815 849 version = ntohl(challenge.version); 816 850 nonce = ntohl(challenge.nonce); ··· 814 856 815 857 trace_rxrpc_rx_challenge(conn, sp->hdr.serial, version, nonce, min_level); 816 858 817 - eproto = tracepoint_string("chall_ver"); 818 - abort_code = RXKADINCONSISTENCY; 819 859 if (version != RXKAD_VERSION) 820 - goto protocol_error; 860 + return rxrpc_abort_conn(conn, skb, RXKADINCONSISTENCY, -EPROTO, 861 + rxkad_abort_chall_version); 821 862 822 - abort_code = RXKADLEVELFAIL; 823 - ret = -EACCES; 824 863 if (conn->security_level < min_level) 825 - goto other_error; 864 + return rxrpc_abort_conn(conn, skb, RXKADLEVELFAIL, -EACCES, 865 + rxkad_abort_chall_level); 826 866 827 867 token = conn->key->payload.data[0]; 828 868 ··· 849 893 ret = rxkad_send_response(conn, &sp->hdr, resp, token->kad); 850 894 kfree(resp); 851 895 return ret; 852 - 853 - protocol_error: 854 - trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, eproto); 855 - ret = -EPROTO; 856 - other_error: 857 - *_abort_code = abort_code; 858 - return ret; 859 896 } 860 897 861 898 /* ··· 859 910 struct sk_buff *skb, 860 911 void *ticket, size_t ticket_len, 861 912 struct rxrpc_crypt *_session_key, 862 - time64_t *_expiry, 863 - u32 *_abort_code) 913 + time64_t *_expiry) 864 914 { 865 915 struct skcipher_request *req; 866 - struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 867 916 struct rxrpc_crypt iv, key; 868 917 struct scatterlist sg[1]; 869 918 struct in_addr addr; 870 919 unsigned int life; 871 - const char *eproto; 872 920 time64_t issue, now; 873 921 bool little_endian; 874 - int ret; 875 - u32 abort_code; 876 922 u8 *p, *q, *name, *end; 877 923 878 924 _enter("{%d},{%x}", conn->debug_id, key_serial(server_key)); ··· 879 935 880 936 memcpy(&iv, &server_key->payload.data[2], sizeof(iv)); 881 937 882 - ret = -ENOMEM; 883 938 req = skcipher_request_alloc(server_key->payload.data[0], GFP_NOFS); 884 939 if (!req) 885 - goto temporary_error; 940 + return -ENOMEM; 886 941 887 942 sg_init_one(&sg[0], ticket, ticket_len); 888 943 skcipher_request_set_callback(req, 0, NULL, NULL); ··· 892 949 p = ticket; 893 950 end = p + ticket_len; 894 951 895 - #define Z(field) \ 896 - ({ \ 897 - u8 *__str = p; \ 898 - eproto = tracepoint_string("rxkad_bad_"#field); \ 899 - q = memchr(p, 0, end - p); \ 900 - if (!q || q - p > (field##_SZ)) \ 901 - goto bad_ticket; \ 902 - for (; p < q; p++) \ 903 - if (!isprint(*p)) \ 904 - goto bad_ticket; \ 905 - p++; \ 906 - __str; \ 952 + #define Z(field, fieldl) \ 953 + ({ \ 954 + u8 *__str = p; \ 955 + q = memchr(p, 0, end - p); \ 956 + if (!q || q - p > field##_SZ) \ 957 + return rxrpc_abort_conn( \ 958 + conn, skb, RXKADBADTICKET, -EPROTO, \ 959 + rxkad_abort_resp_tkt_##fieldl); \ 960 + for (; p < q; p++) \ 961 + if (!isprint(*p)) \ 962 + return rxrpc_abort_conn( \ 963 + conn, skb, RXKADBADTICKET, -EPROTO, \ 964 + rxkad_abort_resp_tkt_##fieldl); \ 965 + p++; \ 966 + __str; \ 907 967 }) 908 968 909 969 /* extract the ticket flags */ ··· 915 969 p++; 916 970 917 971 /* extract the authentication name */ 918 - name = Z(ANAME); 972 + name = Z(ANAME, aname); 919 973 _debug("KIV ANAME: %s", name); 920 974 921 975 /* extract the principal's instance */ 922 - name = Z(INST); 976 + name = Z(INST, inst); 923 977 _debug("KIV INST : %s", name); 924 978 925 979 /* extract the principal's authentication domain */ 926 - name = Z(REALM); 980 + name = Z(REALM, realm); 927 981 _debug("KIV REALM: %s", name); 928 982 929 - eproto = tracepoint_string("rxkad_bad_len"); 930 983 if (end - p < 4 + 8 + 4 + 2) 931 - goto bad_ticket; 984 + return rxrpc_abort_conn(conn, skb, RXKADBADTICKET, -EPROTO, 985 + rxkad_abort_resp_tkt_short); 932 986 933 987 /* get the IPv4 address of the entity that requested the ticket */ 934 988 memcpy(&addr, p, sizeof(addr)); ··· 960 1014 _debug("KIV ISSUE: %llx [%llx]", issue, now); 961 1015 962 1016 /* check the ticket is in date */ 963 - if (issue > now) { 964 - abort_code = RXKADNOAUTH; 965 - ret = -EKEYREJECTED; 966 - goto other_error; 967 - } 968 - 969 - if (issue < now - life) { 970 - abort_code = RXKADEXPIRED; 971 - ret = -EKEYEXPIRED; 972 - goto other_error; 973 - } 1017 + if (issue > now) 1018 + return rxrpc_abort_conn(conn, skb, RXKADNOAUTH, -EKEYREJECTED, 1019 + rxkad_abort_resp_tkt_future); 1020 + if (issue < now - life) 1021 + return rxrpc_abort_conn(conn, skb, RXKADEXPIRED, -EKEYEXPIRED, 1022 + rxkad_abort_resp_tkt_expired); 974 1023 975 1024 *_expiry = issue + life; 976 1025 977 1026 /* get the service name */ 978 - name = Z(SNAME); 1027 + name = Z(SNAME, sname); 979 1028 _debug("KIV SNAME: %s", name); 980 1029 981 1030 /* get the service instance name */ 982 - name = Z(INST); 1031 + name = Z(INST, sinst); 983 1032 _debug("KIV SINST: %s", name); 984 1033 return 0; 985 - 986 - bad_ticket: 987 - trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, eproto); 988 - abort_code = RXKADBADTICKET; 989 - ret = -EPROTO; 990 - other_error: 991 - *_abort_code = abort_code; 992 - return ret; 993 - temporary_error: 994 - return ret; 995 1034 } 996 1035 997 1036 /* ··· 1017 1086 * verify a response 1018 1087 */ 1019 1088 static int rxkad_verify_response(struct rxrpc_connection *conn, 1020 - struct sk_buff *skb, 1021 - u32 *_abort_code) 1089 + struct sk_buff *skb) 1022 1090 { 1023 1091 struct rxkad_response *response; 1024 1092 struct rxrpc_skb_priv *sp = rxrpc_skb(skb); 1025 1093 struct rxrpc_crypt session_key; 1026 1094 struct key *server_key; 1027 - const char *eproto; 1028 1095 time64_t expiry; 1029 1096 void *ticket; 1030 - u32 abort_code, version, kvno, ticket_len, level; 1097 + u32 version, kvno, ticket_len, level; 1031 1098 __be32 csum; 1032 1099 int ret, i; 1033 1100 ··· 1033 1104 1034 1105 server_key = rxrpc_look_up_server_security(conn, skb, 0, 0); 1035 1106 if (IS_ERR(server_key)) { 1036 - switch (PTR_ERR(server_key)) { 1107 + ret = PTR_ERR(server_key); 1108 + switch (ret) { 1037 1109 case -ENOKEY: 1038 - abort_code = RXKADUNKNOWNKEY; 1039 - break; 1110 + return rxrpc_abort_conn(conn, skb, RXKADUNKNOWNKEY, ret, 1111 + rxkad_abort_resp_nokey); 1040 1112 case -EKEYEXPIRED: 1041 - abort_code = RXKADEXPIRED; 1042 - break; 1113 + return rxrpc_abort_conn(conn, skb, RXKADEXPIRED, ret, 1114 + rxkad_abort_resp_key_expired); 1043 1115 default: 1044 - abort_code = RXKADNOAUTH; 1045 - break; 1116 + return rxrpc_abort_conn(conn, skb, RXKADNOAUTH, ret, 1117 + rxkad_abort_resp_key_rejected); 1046 1118 } 1047 - trace_rxrpc_abort(0, "SVK", 1048 - sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, 1049 - abort_code, PTR_ERR(server_key)); 1050 - *_abort_code = abort_code; 1051 - return -EPROTO; 1052 1119 } 1053 1120 1054 1121 ret = -ENOMEM; ··· 1052 1127 if (!response) 1053 1128 goto temporary_error; 1054 1129 1055 - eproto = tracepoint_string("rxkad_rsp_short"); 1056 - abort_code = RXKADPACKETSHORT; 1057 1130 if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header), 1058 - response, sizeof(*response)) < 0) 1131 + response, sizeof(*response)) < 0) { 1132 + rxrpc_abort_conn(conn, skb, RXKADPACKETSHORT, -EPROTO, 1133 + rxkad_abort_resp_short); 1059 1134 goto protocol_error; 1135 + } 1060 1136 1061 1137 version = ntohl(response->version); 1062 1138 ticket_len = ntohl(response->ticket_len); ··· 1065 1139 1066 1140 trace_rxrpc_rx_response(conn, sp->hdr.serial, version, kvno, ticket_len); 1067 1141 1068 - eproto = tracepoint_string("rxkad_rsp_ver"); 1069 - abort_code = RXKADINCONSISTENCY; 1070 - if (version != RXKAD_VERSION) 1142 + if (version != RXKAD_VERSION) { 1143 + rxrpc_abort_conn(conn, skb, RXKADINCONSISTENCY, -EPROTO, 1144 + rxkad_abort_resp_version); 1071 1145 goto protocol_error; 1146 + } 1072 1147 1073 - eproto = tracepoint_string("rxkad_rsp_tktlen"); 1074 - abort_code = RXKADTICKETLEN; 1075 - if (ticket_len < 4 || ticket_len > MAXKRB5TICKETLEN) 1148 + if (ticket_len < 4 || ticket_len > MAXKRB5TICKETLEN) { 1149 + rxrpc_abort_conn(conn, skb, RXKADTICKETLEN, -EPROTO, 1150 + rxkad_abort_resp_tkt_len); 1076 1151 goto protocol_error; 1152 + } 1077 1153 1078 - eproto = tracepoint_string("rxkad_rsp_unkkey"); 1079 - abort_code = RXKADUNKNOWNKEY; 1080 - if (kvno >= RXKAD_TKT_TYPE_KERBEROS_V5) 1154 + if (kvno >= RXKAD_TKT_TYPE_KERBEROS_V5) { 1155 + rxrpc_abort_conn(conn, skb, RXKADUNKNOWNKEY, -EPROTO, 1156 + rxkad_abort_resp_unknown_tkt); 1081 1157 goto protocol_error; 1158 + } 1082 1159 1083 1160 /* extract the kerberos ticket and decrypt and decode it */ 1084 1161 ret = -ENOMEM; ··· 1089 1160 if (!ticket) 1090 1161 goto temporary_error_free_resp; 1091 1162 1092 - eproto = tracepoint_string("rxkad_tkt_short"); 1093 - abort_code = RXKADPACKETSHORT; 1094 - ret = skb_copy_bits(skb, sizeof(struct rxrpc_wire_header) + sizeof(*response), 1095 - ticket, ticket_len); 1096 - if (ret < 0) 1097 - goto temporary_error_free_ticket; 1163 + if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header) + sizeof(*response), 1164 + ticket, ticket_len) < 0) { 1165 + rxrpc_abort_conn(conn, skb, RXKADPACKETSHORT, -EPROTO, 1166 + rxkad_abort_resp_short_tkt); 1167 + goto protocol_error; 1168 + } 1098 1169 1099 1170 ret = rxkad_decrypt_ticket(conn, server_key, skb, ticket, ticket_len, 1100 - &session_key, &expiry, _abort_code); 1171 + &session_key, &expiry); 1101 1172 if (ret < 0) 1102 1173 goto temporary_error_free_ticket; 1103 1174 ··· 1105 1176 * response */ 1106 1177 rxkad_decrypt_response(conn, response, &session_key); 1107 1178 1108 - eproto = tracepoint_string("rxkad_rsp_param"); 1109 - abort_code = RXKADSEALEDINCON; 1110 - if (ntohl(response->encrypted.epoch) != conn->proto.epoch) 1179 + if (ntohl(response->encrypted.epoch) != conn->proto.epoch || 1180 + ntohl(response->encrypted.cid) != conn->proto.cid || 1181 + ntohl(response->encrypted.securityIndex) != conn->security_ix) { 1182 + rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO, 1183 + rxkad_abort_resp_bad_param); 1111 1184 goto protocol_error_free; 1112 - if (ntohl(response->encrypted.cid) != conn->proto.cid) 1113 - goto protocol_error_free; 1114 - if (ntohl(response->encrypted.securityIndex) != conn->security_ix) 1115 - goto protocol_error_free; 1185 + } 1186 + 1116 1187 csum = response->encrypted.checksum; 1117 1188 response->encrypted.checksum = 0; 1118 1189 rxkad_calc_response_checksum(response); 1119 - eproto = tracepoint_string("rxkad_rsp_csum"); 1120 - if (response->encrypted.checksum != csum) 1190 + if (response->encrypted.checksum != csum) { 1191 + rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO, 1192 + rxkad_abort_resp_bad_checksum); 1121 1193 goto protocol_error_free; 1194 + } 1122 1195 1123 - spin_lock(&conn->bundle->channel_lock); 1124 1196 for (i = 0; i < RXRPC_MAXCALLS; i++) { 1125 - struct rxrpc_call *call; 1126 1197 u32 call_id = ntohl(response->encrypted.call_id[i]); 1198 + u32 counter = READ_ONCE(conn->channels[i].call_counter); 1127 1199 1128 - eproto = tracepoint_string("rxkad_rsp_callid"); 1129 - if (call_id > INT_MAX) 1130 - goto protocol_error_unlock; 1200 + if (call_id > INT_MAX) { 1201 + rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO, 1202 + rxkad_abort_resp_bad_callid); 1203 + goto protocol_error_free; 1204 + } 1131 1205 1132 - eproto = tracepoint_string("rxkad_rsp_callctr"); 1133 - if (call_id < conn->channels[i].call_counter) 1134 - goto protocol_error_unlock; 1206 + if (call_id < counter) { 1207 + rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO, 1208 + rxkad_abort_resp_call_ctr); 1209 + goto protocol_error_free; 1210 + } 1135 1211 1136 - eproto = tracepoint_string("rxkad_rsp_callst"); 1137 - if (call_id > conn->channels[i].call_counter) { 1138 - call = rcu_dereference_protected( 1139 - conn->channels[i].call, 1140 - lockdep_is_held(&conn->bundle->channel_lock)); 1141 - if (call && call->state < RXRPC_CALL_COMPLETE) 1142 - goto protocol_error_unlock; 1212 + if (call_id > counter) { 1213 + if (conn->channels[i].call) { 1214 + rxrpc_abort_conn(conn, skb, RXKADSEALEDINCON, -EPROTO, 1215 + rxkad_abort_resp_call_state); 1216 + goto protocol_error_free; 1217 + } 1143 1218 conn->channels[i].call_counter = call_id; 1144 1219 } 1145 1220 } 1146 - spin_unlock(&conn->bundle->channel_lock); 1147 1221 1148 - eproto = tracepoint_string("rxkad_rsp_seq"); 1149 - abort_code = RXKADOUTOFSEQUENCE; 1150 - if (ntohl(response->encrypted.inc_nonce) != conn->rxkad.nonce + 1) 1222 + if (ntohl(response->encrypted.inc_nonce) != conn->rxkad.nonce + 1) { 1223 + rxrpc_abort_conn(conn, skb, RXKADOUTOFSEQUENCE, -EPROTO, 1224 + rxkad_abort_resp_ooseq); 1151 1225 goto protocol_error_free; 1226 + } 1152 1227 1153 - eproto = tracepoint_string("rxkad_rsp_level"); 1154 - abort_code = RXKADLEVELFAIL; 1155 1228 level = ntohl(response->encrypted.level); 1156 - if (level > RXRPC_SECURITY_ENCRYPT) 1229 + if (level > RXRPC_SECURITY_ENCRYPT) { 1230 + rxrpc_abort_conn(conn, skb, RXKADLEVELFAIL, -EPROTO, 1231 + rxkad_abort_resp_level); 1157 1232 goto protocol_error_free; 1233 + } 1158 1234 conn->security_level = level; 1159 1235 1160 1236 /* create a key to hold the security data and expiration time - after ··· 1174 1240 _leave(" = 0"); 1175 1241 return 0; 1176 1242 1177 - protocol_error_unlock: 1178 - spin_unlock(&conn->bundle->channel_lock); 1179 1243 protocol_error_free: 1180 1244 kfree(ticket); 1181 1245 protocol_error: 1182 1246 kfree(response); 1183 - trace_rxrpc_rx_eproto(NULL, sp->hdr.serial, eproto); 1184 1247 key_put(server_key); 1185 - *_abort_code = abort_code; 1186 1248 return -EPROTO; 1187 1249 1188 1250 temporary_error_free_ticket:
+12 -5
net/rxrpc/rxperf.c
··· 10 10 #include <linux/slab.h> 11 11 #include <net/sock.h> 12 12 #include <net/af_rxrpc.h> 13 + #define RXRPC_TRACE_ONLY_DEFINE_ENUMS 14 + #include <trace/events/rxrpc.h> 13 15 14 16 MODULE_DESCRIPTION("rxperf test server (afs)"); 15 17 MODULE_AUTHOR("Red Hat, Inc."); ··· 309 307 case -EOPNOTSUPP: 310 308 abort_code = RXGEN_OPCODE; 311 309 rxrpc_kernel_abort_call(rxperf_socket, call->rxcall, 312 - abort_code, ret, "GOP"); 310 + abort_code, ret, 311 + rxperf_abort_op_not_supported); 313 312 goto call_complete; 314 313 case -ENOTSUPP: 315 314 abort_code = RX_USER_ABORT; 316 315 rxrpc_kernel_abort_call(rxperf_socket, call->rxcall, 317 - abort_code, ret, "GUA"); 316 + abort_code, ret, 317 + rxperf_abort_op_not_supported); 318 318 goto call_complete; 319 319 case -EIO: 320 320 pr_err("Call %u in bad state %u\n", ··· 328 324 case -ENOMEM: 329 325 case -EFAULT: 330 326 rxrpc_kernel_abort_call(rxperf_socket, call->rxcall, 331 - RXGEN_SS_UNMARSHAL, ret, "GUM"); 327 + RXGEN_SS_UNMARSHAL, ret, 328 + rxperf_abort_unmarshal_error); 332 329 goto call_complete; 333 330 default: 334 331 rxrpc_kernel_abort_call(rxperf_socket, call->rxcall, 335 - RX_CALL_DEAD, ret, "GER"); 332 + RX_CALL_DEAD, ret, 333 + rxperf_abort_general_error); 336 334 goto call_complete; 337 335 } 338 336 } ··· 529 523 530 524 if (n == -ENOMEM) 531 525 rxrpc_kernel_abort_call(rxperf_socket, call->rxcall, 532 - RXGEN_SS_MARSHAL, -ENOMEM, "GOM"); 526 + RXGEN_SS_MARSHAL, -ENOMEM, 527 + rxperf_abort_oom); 533 528 return n; 534 529 } 535 530
+20 -33
net/rxrpc/security.c
··· 97 97 */ 98 98 int rxrpc_init_client_conn_security(struct rxrpc_connection *conn) 99 99 { 100 - const struct rxrpc_security *sec; 101 100 struct rxrpc_key_token *token; 102 101 struct key *key = conn->key; 103 - int ret; 102 + int ret = 0; 104 103 105 104 _enter("{%d},{%x}", conn->debug_id, key_serial(key)); 106 105 107 - if (!key) 108 - return 0; 109 - 110 - ret = key_validate(key); 111 - if (ret < 0) 112 - return ret; 113 - 114 106 for (token = key->payload.data[0]; token; token = token->next) { 115 - sec = rxrpc_security_lookup(token->security_index); 116 - if (sec) 107 + if (token->security_index == conn->security->security_index) 117 108 goto found; 118 109 } 119 110 return -EKEYREJECTED; 120 111 121 112 found: 122 - conn->security = sec; 123 - 124 - ret = conn->security->init_connection_security(conn, token); 125 - if (ret < 0) { 126 - conn->security = &rxrpc_no_security; 127 - return ret; 113 + mutex_lock(&conn->security_lock); 114 + if (conn->state == RXRPC_CONN_CLIENT_UNSECURED) { 115 + ret = conn->security->init_connection_security(conn, token); 116 + if (ret == 0) { 117 + spin_lock(&conn->state_lock); 118 + if (conn->state == RXRPC_CONN_CLIENT_UNSECURED) 119 + conn->state = RXRPC_CONN_CLIENT; 120 + spin_unlock(&conn->state_lock); 121 + } 128 122 } 129 - 130 - _leave(" = 0"); 131 - return 0; 123 + mutex_unlock(&conn->security_lock); 124 + return ret; 132 125 } 133 126 134 127 /* ··· 137 144 138 145 sec = rxrpc_security_lookup(sp->hdr.securityIndex); 139 146 if (!sec) { 140 - trace_rxrpc_abort(0, "SVS", 141 - sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, 142 - RX_INVALID_OPERATION, EKEYREJECTED); 143 - skb->mark = RXRPC_SKB_MARK_REJECT_ABORT; 144 - skb->priority = RX_INVALID_OPERATION; 147 + rxrpc_direct_abort(skb, rxrpc_abort_unsupported_security, 148 + RX_INVALID_OPERATION, -EKEYREJECTED); 145 149 return NULL; 146 150 } 147 151 148 152 if (sp->hdr.securityIndex != RXRPC_SECURITY_NONE && 149 153 !rx->securities) { 150 - trace_rxrpc_abort(0, "SVR", 151 - sp->hdr.cid, sp->hdr.callNumber, sp->hdr.seq, 152 - RX_INVALID_OPERATION, EKEYREJECTED); 153 - skb->mark = RXRPC_SKB_MARK_REJECT_ABORT; 154 - skb->priority = sec->no_key_abort; 154 + rxrpc_direct_abort(skb, rxrpc_abort_no_service_key, 155 + sec->no_key_abort, -EKEYREJECTED); 155 156 return NULL; 156 157 } 157 158 ··· 178 191 sprintf(kdesc, "%u:%u", 179 192 sp->hdr.serviceId, sp->hdr.securityIndex); 180 193 181 - rcu_read_lock(); 194 + read_lock(&conn->local->services_lock); 182 195 183 - rx = rcu_dereference(conn->local->service); 196 + rx = conn->local->service; 184 197 if (!rx) 185 198 goto out; 186 199 ··· 202 215 } 203 216 204 217 out: 205 - rcu_read_unlock(); 218 + read_unlock(&conn->local->services_lock); 206 219 return key; 207 220 }
+114 -81
net/rxrpc/sendmsg.c
··· 18 18 #include "ar-internal.h" 19 19 20 20 /* 21 + * Propose an abort to be made in the I/O thread. 22 + */ 23 + bool rxrpc_propose_abort(struct rxrpc_call *call, s32 abort_code, int error, 24 + enum rxrpc_abort_reason why) 25 + { 26 + _enter("{%d},%d,%d,%u", call->debug_id, abort_code, error, why); 27 + 28 + if (!call->send_abort && !rxrpc_call_is_complete(call)) { 29 + call->send_abort_why = why; 30 + call->send_abort_err = error; 31 + call->send_abort_seq = 0; 32 + /* Request abort locklessly vs rxrpc_input_call_event(). */ 33 + smp_store_release(&call->send_abort, abort_code); 34 + rxrpc_poke_call(call, rxrpc_call_poke_abort); 35 + return true; 36 + } 37 + 38 + return false; 39 + } 40 + 41 + /* 42 + * Wait for a call to become connected. Interruption here doesn't cause the 43 + * call to be aborted. 44 + */ 45 + static int rxrpc_wait_to_be_connected(struct rxrpc_call *call, long *timeo) 46 + { 47 + DECLARE_WAITQUEUE(myself, current); 48 + int ret = 0; 49 + 50 + _enter("%d", call->debug_id); 51 + 52 + if (rxrpc_call_state(call) != RXRPC_CALL_CLIENT_AWAIT_CONN) 53 + return call->error; 54 + 55 + add_wait_queue_exclusive(&call->waitq, &myself); 56 + 57 + for (;;) { 58 + ret = call->error; 59 + if (ret < 0) 60 + break; 61 + 62 + switch (call->interruptibility) { 63 + case RXRPC_INTERRUPTIBLE: 64 + case RXRPC_PREINTERRUPTIBLE: 65 + set_current_state(TASK_INTERRUPTIBLE); 66 + break; 67 + case RXRPC_UNINTERRUPTIBLE: 68 + default: 69 + set_current_state(TASK_UNINTERRUPTIBLE); 70 + break; 71 + } 72 + if (rxrpc_call_state(call) != RXRPC_CALL_CLIENT_AWAIT_CONN) { 73 + ret = call->error; 74 + break; 75 + } 76 + if ((call->interruptibility == RXRPC_INTERRUPTIBLE || 77 + call->interruptibility == RXRPC_PREINTERRUPTIBLE) && 78 + signal_pending(current)) { 79 + ret = sock_intr_errno(*timeo); 80 + break; 81 + } 82 + *timeo = schedule_timeout(*timeo); 83 + } 84 + 85 + remove_wait_queue(&call->waitq, &myself); 86 + __set_current_state(TASK_RUNNING); 87 + 88 + if (ret == 0 && rxrpc_call_is_complete(call)) 89 + ret = call->error; 90 + 91 + _leave(" = %d", ret); 92 + return ret; 93 + } 94 + 95 + /* 21 96 * Return true if there's sufficient Tx queue space. 22 97 */ 23 98 static bool rxrpc_check_tx_space(struct rxrpc_call *call, rxrpc_seq_t *_tx_win) ··· 114 39 if (rxrpc_check_tx_space(call, NULL)) 115 40 return 0; 116 41 117 - if (call->state >= RXRPC_CALL_COMPLETE) 42 + if (rxrpc_call_is_complete(call)) 118 43 return call->error; 119 44 120 45 if (signal_pending(current)) ··· 149 74 if (rxrpc_check_tx_space(call, &tx_win)) 150 75 return 0; 151 76 152 - if (call->state >= RXRPC_CALL_COMPLETE) 77 + if (rxrpc_call_is_complete(call)) 153 78 return call->error; 154 79 155 80 if (timeout == 0 && ··· 178 103 if (rxrpc_check_tx_space(call, NULL)) 179 104 return 0; 180 105 181 - if (call->state >= RXRPC_CALL_COMPLETE) 106 + if (rxrpc_call_is_complete(call)) 182 107 return call->error; 183 108 184 109 trace_rxrpc_txqueue(call, rxrpc_txqueue_wait); ··· 243 168 struct rxrpc_txbuf *txb, 244 169 rxrpc_notify_end_tx_t notify_end_tx) 245 170 { 246 - unsigned long now; 247 171 rxrpc_seq_t seq = txb->seq; 248 172 bool last = test_bit(RXRPC_TXBUF_LAST, &txb->flags), poke; 249 173 ··· 265 191 poke = list_empty(&call->tx_sendmsg); 266 192 list_add_tail(&txb->call_link, &call->tx_sendmsg); 267 193 call->tx_prepared = seq; 194 + if (last) 195 + rxrpc_notify_end_tx(rx, call, notify_end_tx); 268 196 spin_unlock(&call->tx_lock); 269 - 270 - if (last || call->state == RXRPC_CALL_SERVER_ACK_REQUEST) { 271 - _debug("________awaiting reply/ACK__________"); 272 - write_lock(&call->state_lock); 273 - switch (call->state) { 274 - case RXRPC_CALL_CLIENT_SEND_REQUEST: 275 - call->state = RXRPC_CALL_CLIENT_AWAIT_REPLY; 276 - rxrpc_notify_end_tx(rx, call, notify_end_tx); 277 - break; 278 - case RXRPC_CALL_SERVER_ACK_REQUEST: 279 - call->state = RXRPC_CALL_SERVER_SEND_REPLY; 280 - now = jiffies; 281 - WRITE_ONCE(call->delay_ack_at, now + MAX_JIFFY_OFFSET); 282 - if (call->ackr_reason == RXRPC_ACK_DELAY) 283 - call->ackr_reason = 0; 284 - trace_rxrpc_timer(call, rxrpc_timer_init_for_send_reply, now); 285 - if (!last) 286 - break; 287 - fallthrough; 288 - case RXRPC_CALL_SERVER_SEND_REPLY: 289 - call->state = RXRPC_CALL_SERVER_AWAIT_ACK; 290 - rxrpc_notify_end_tx(rx, call, notify_end_tx); 291 - break; 292 - default: 293 - break; 294 - } 295 - write_unlock(&call->state_lock); 296 - } 297 197 298 198 if (poke) 299 199 rxrpc_poke_call(call, rxrpc_call_poke_start); ··· 293 245 294 246 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 295 247 248 + ret = rxrpc_wait_to_be_connected(call, &timeo); 249 + if (ret < 0) 250 + return ret; 251 + 252 + if (call->conn->state == RXRPC_CONN_CLIENT_UNSECURED) { 253 + ret = rxrpc_init_client_conn_security(call->conn); 254 + if (ret < 0) 255 + return ret; 256 + } 257 + 296 258 /* this should be in poll */ 297 259 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 298 260 ··· 310 252 ret = -EPIPE; 311 253 if (sk->sk_shutdown & SEND_SHUTDOWN) 312 254 goto maybe_error; 313 - state = READ_ONCE(call->state); 255 + state = rxrpc_call_state(call); 314 256 ret = -ESHUTDOWN; 315 257 if (state >= RXRPC_CALL_COMPLETE) 316 258 goto maybe_error; 317 259 ret = -EPROTO; 318 260 if (state != RXRPC_CALL_CLIENT_SEND_REQUEST && 319 261 state != RXRPC_CALL_SERVER_ACK_REQUEST && 320 - state != RXRPC_CALL_SERVER_SEND_REPLY) 262 + state != RXRPC_CALL_SERVER_SEND_REPLY) { 263 + /* Request phase complete for this client call */ 264 + trace_rxrpc_abort(call->debug_id, rxrpc_sendmsg_late_send, 265 + call->cid, call->call_id, call->rx_consumed, 266 + 0, -EPROTO); 321 267 goto maybe_error; 268 + } 322 269 323 270 ret = -EMSGSIZE; 324 271 if (call->tx_total_len != -1) { ··· 392 329 393 330 /* check for the far side aborting the call or a network error 394 331 * occurring */ 395 - if (call->state == RXRPC_CALL_COMPLETE) 332 + if (rxrpc_call_is_complete(call)) 396 333 goto call_terminated; 397 334 398 335 /* add the packet to the send queue if it's now full */ ··· 417 354 418 355 success: 419 356 ret = copied; 420 - if (READ_ONCE(call->state) == RXRPC_CALL_COMPLETE) { 421 - read_lock(&call->state_lock); 422 - if (call->error < 0) 423 - ret = call->error; 424 - read_unlock(&call->state_lock); 425 - } 357 + if (rxrpc_call_is_complete(call) && 358 + call->error < 0) 359 + ret = call->error; 426 360 out: 427 361 call->tx_pending = txb; 428 362 _leave(" = %d", ret); ··· 603 543 atomic_inc_return(&rxrpc_debug_id)); 604 544 /* The socket is now unlocked */ 605 545 606 - rxrpc_put_peer(cp.peer, rxrpc_peer_put_discard_tmp); 607 546 _leave(" = %p\n", call); 608 547 return call; 609 548 } ··· 615 556 int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len) 616 557 __releases(&rx->sk.sk_lock.slock) 617 558 { 618 - enum rxrpc_call_state state; 619 559 struct rxrpc_call *call; 620 560 unsigned long now, j; 621 561 bool dropped_lock = false; ··· 656 598 return PTR_ERR(call); 657 599 /* ... and we have the call lock. */ 658 600 ret = 0; 659 - if (READ_ONCE(call->state) == RXRPC_CALL_COMPLETE) 601 + if (rxrpc_call_is_complete(call)) 660 602 goto out_put_unlock; 661 603 } else { 662 - switch (READ_ONCE(call->state)) { 604 + switch (rxrpc_call_state(call)) { 663 605 case RXRPC_CALL_UNINITIALISED: 664 606 case RXRPC_CALL_CLIENT_AWAIT_CONN: 665 607 case RXRPC_CALL_SERVER_PREALLOC: ··· 713 655 break; 714 656 } 715 657 716 - state = READ_ONCE(call->state); 717 - _debug("CALL %d USR %lx ST %d on CONN %p", 718 - call->debug_id, call->user_call_ID, state, call->conn); 719 - 720 - if (state >= RXRPC_CALL_COMPLETE) { 658 + if (rxrpc_call_is_complete(call)) { 721 659 /* it's too late for this call */ 722 660 ret = -ESHUTDOWN; 723 661 } else if (p.command == RXRPC_CMD_SEND_ABORT) { 662 + rxrpc_propose_abort(call, p.abort_code, -ECONNABORTED, 663 + rxrpc_abort_call_sendmsg); 724 664 ret = 0; 725 - if (rxrpc_abort_call("CMD", call, 0, p.abort_code, -ECONNABORTED)) 726 - ret = rxrpc_send_abort_packet(call); 727 665 } else if (p.command != RXRPC_CMD_SEND_DATA) { 728 666 ret = -EINVAL; 729 667 } else { ··· 759 705 bool dropped_lock = false; 760 706 int ret; 761 707 762 - _enter("{%d,%s},", call->debug_id, rxrpc_call_states[call->state]); 708 + _enter("{%d},", call->debug_id); 763 709 764 710 ASSERTCMP(msg->msg_name, ==, NULL); 765 711 ASSERTCMP(msg->msg_control, ==, NULL); 766 712 767 713 mutex_lock(&call->user_mutex); 768 714 769 - _debug("CALL %d USR %lx ST %d on CONN %p", 770 - call->debug_id, call->user_call_ID, call->state, call->conn); 771 - 772 - switch (READ_ONCE(call->state)) { 773 - case RXRPC_CALL_CLIENT_SEND_REQUEST: 774 - case RXRPC_CALL_SERVER_ACK_REQUEST: 775 - case RXRPC_CALL_SERVER_SEND_REPLY: 776 - ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len, 777 - notify_end_tx, &dropped_lock); 778 - break; 779 - case RXRPC_CALL_COMPLETE: 780 - read_lock(&call->state_lock); 715 + ret = rxrpc_send_data(rxrpc_sk(sock->sk), call, msg, len, 716 + notify_end_tx, &dropped_lock); 717 + if (ret == -ESHUTDOWN) 781 718 ret = call->error; 782 - read_unlock(&call->state_lock); 783 - break; 784 - default: 785 - /* Request phase complete for this client call */ 786 - trace_rxrpc_rx_eproto(call, 0, tracepoint_string("late_send")); 787 - ret = -EPROTO; 788 - break; 789 - } 790 719 791 720 if (!dropped_lock) 792 721 mutex_unlock(&call->user_mutex); ··· 784 747 * @call: The call to be aborted 785 748 * @abort_code: The abort code to stick into the ABORT packet 786 749 * @error: Local error value 787 - * @why: 3-char string indicating why. 750 + * @why: Indication as to why. 788 751 * 789 752 * Allow a kernel service to abort a call, if it's still in an abortable state 790 753 * and return true if the call was aborted, false if it was already complete. 791 754 */ 792 755 bool rxrpc_kernel_abort_call(struct socket *sock, struct rxrpc_call *call, 793 - u32 abort_code, int error, const char *why) 756 + u32 abort_code, int error, enum rxrpc_abort_reason why) 794 757 { 795 758 bool aborted; 796 759 797 - _enter("{%d},%d,%d,%s", call->debug_id, abort_code, error, why); 760 + _enter("{%d},%d,%d,%u", call->debug_id, abort_code, error, why); 798 761 799 762 mutex_lock(&call->user_mutex); 800 - 801 - aborted = rxrpc_abort_call(why, call, 0, abort_code, error); 802 - if (aborted) 803 - rxrpc_send_abort_packet(call); 804 - 763 + aborted = rxrpc_propose_abort(call, abort_code, error, why); 805 764 mutex_unlock(&call->user_mutex); 806 765 return aborted; 807 766 }
+7 -1
net/sched/act_mpls.c
··· 134 134 { 135 135 const u32 *label = nla_data(attr); 136 136 137 + if (nla_len(attr) != sizeof(*label)) { 138 + NL_SET_ERR_MSG_MOD(extack, "Invalid MPLS label length"); 139 + return -EINVAL; 140 + } 141 + 137 142 if (*label & ~MPLS_LABEL_MASK || *label == MPLS_LABEL_IMPLNULL) { 138 143 NL_SET_ERR_MSG_MOD(extack, "MPLS label out of range"); 139 144 return -EINVAL; ··· 150 145 static const struct nla_policy mpls_policy[TCA_MPLS_MAX + 1] = { 151 146 [TCA_MPLS_PARMS] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_mpls)), 152 147 [TCA_MPLS_PROTO] = { .type = NLA_U16 }, 153 - [TCA_MPLS_LABEL] = NLA_POLICY_VALIDATE_FN(NLA_U32, valid_label), 148 + [TCA_MPLS_LABEL] = NLA_POLICY_VALIDATE_FN(NLA_BINARY, 149 + valid_label), 154 150 [TCA_MPLS_TC] = NLA_POLICY_RANGE(NLA_U8, 0, 7), 155 151 [TCA_MPLS_TTL] = NLA_POLICY_MIN(NLA_U8, 1), 156 152 [TCA_MPLS_BOS] = NLA_POLICY_RANGE(NLA_U8, 0, 1),
+5
net/sched/sch_api.c
··· 1133 1133 return -ENOENT; 1134 1134 } 1135 1135 1136 + if (new && new->ops == &noqueue_qdisc_ops) { 1137 + NL_SET_ERR_MSG(extack, "Cannot assign noqueue to a class"); 1138 + return -EINVAL; 1139 + } 1140 + 1136 1141 err = cops->graft(parent, cl, new, &old, extack); 1137 1142 if (err) 1138 1143 return err;
+8 -4
net/tipc/node.c
··· 1179 1179 bool addr_match = false; 1180 1180 bool sign_match = false; 1181 1181 bool link_up = false; 1182 + bool link_is_reset = false; 1182 1183 bool accept_addr = false; 1183 - bool reset = true; 1184 + bool reset = false; 1184 1185 char *if_name; 1185 1186 unsigned long intv; 1186 1187 u16 session; ··· 1201 1200 /* Prepare to validate requesting node's signature and media address */ 1202 1201 l = le->link; 1203 1202 link_up = l && tipc_link_is_up(l); 1203 + link_is_reset = l && tipc_link_is_reset(l); 1204 1204 addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr)); 1205 1205 sign_match = (signature == n->signature); 1206 1206 1207 1207 /* These three flags give us eight permutations: */ 1208 1208 1209 1209 if (sign_match && addr_match && link_up) { 1210 - /* All is fine. Do nothing. */ 1211 - reset = false; 1210 + /* All is fine. Ignore requests. */ 1212 1211 /* Peer node is not a container/local namespace */ 1213 1212 if (!n->peer_hash_mix) 1214 1213 n->peer_hash_mix = hash_mixes; ··· 1233 1232 */ 1234 1233 accept_addr = true; 1235 1234 *respond = true; 1235 + reset = true; 1236 1236 } else if (!sign_match && addr_match && link_up) { 1237 1237 /* Peer node rebooted. Two possibilities: 1238 1238 * - Delayed re-discovery; this link endpoint has already ··· 1265 1263 n->signature = signature; 1266 1264 accept_addr = true; 1267 1265 *respond = true; 1266 + reset = true; 1268 1267 } 1269 1268 1270 1269 if (!accept_addr) ··· 1294 1291 tipc_link_fsm_evt(l, LINK_RESET_EVT); 1295 1292 if (n->state == NODE_FAILINGOVER) 1296 1293 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); 1294 + link_is_reset = tipc_link_is_reset(l); 1297 1295 le->link = l; 1298 1296 n->link_cnt++; 1299 1297 tipc_node_calculate_timer(n, l); ··· 1307 1303 memcpy(&le->maddr, maddr, sizeof(*maddr)); 1308 1304 exit: 1309 1305 tipc_node_write_unlock(n); 1310 - if (reset && l && !tipc_link_is_reset(l)) 1306 + if (reset && !link_is_reset) 1311 1307 tipc_node_link_down(n, b->identity, false); 1312 1308 tipc_node_put(n); 1313 1309 }
+1 -1
tools/testing/selftests/net/af_unix/test_unix_oob.c
··· 124 124 125 125 wait_for_signal(pipefd[0]); 126 126 if (connect(cfd, (struct sockaddr *)consumer_addr, 127 - sizeof(struct sockaddr)) != 0) { 127 + sizeof(*consumer_addr)) != 0) { 128 128 perror("Connect failed"); 129 129 kill(0, SIGTERM); 130 130 exit(1);
+129 -73
tools/testing/selftests/net/l2_tos_ttl_inherit.sh
··· 12 12 # In addition this script also checks if forcing a specific field in the 13 13 # outer header is working. 14 14 15 + # Return 4 by default (Kselftest SKIP code) 16 + ERR=4 17 + 15 18 if [ "$(id -u)" != "0" ]; then 16 19 echo "Please run as root." 17 - exit 0 20 + exit $ERR 18 21 fi 19 22 if ! which tcpdump > /dev/null 2>&1; then 20 23 echo "No tcpdump found. Required for this test." 21 - exit 0 24 + exit $ERR 22 25 fi 23 26 24 27 expected_tos="0x00" 25 28 expected_ttl="0" 26 29 failed=false 30 + 31 + readonly NS0=$(mktemp -u ns0-XXXXXXXX) 32 + readonly NS1=$(mktemp -u ns1-XXXXXXXX) 33 + 34 + RUN_NS0="ip netns exec ${NS0}" 27 35 28 36 get_random_tos() { 29 37 # Get a random hex tos value between 0x00 and 0xfc, a multiple of 4 ··· 69 61 local vlan="$5" 70 62 local test_tos="0x00" 71 63 local test_ttl="0" 72 - local ns="ip netns exec testing" 73 64 74 65 # We don't want a test-tos of 0x00, 75 66 # because this is the value that we get when no tos is set. ··· 101 94 printf "│%7s │%6s │%6s │%13s │%13s │%6s │" \ 102 95 "$type" "$outer" "$inner" "$tos" "$ttl" "$vlan" 103 96 104 - # Create 'testing' netns, veth pair and connect main ns with testing ns 105 - ip netns add testing 106 - ip link add type veth 107 - ip link set veth1 netns testing 108 - ip link set veth0 up 109 - $ns ip link set veth1 up 110 - ip addr flush dev veth0 111 - $ns ip addr flush dev veth1 97 + # Create netns NS0 and NS1 and connect them with a veth pair 98 + ip netns add "${NS0}" 99 + ip netns add "${NS1}" 100 + ip link add name veth0 netns "${NS0}" type veth \ 101 + peer name veth1 netns "${NS1}" 102 + ip -netns "${NS0}" link set dev veth0 up 103 + ip -netns "${NS1}" link set dev veth1 up 104 + ip -netns "${NS0}" address flush dev veth0 105 + ip -netns "${NS1}" address flush dev veth1 112 106 113 107 local local_addr1="" 114 108 local local_addr2="" ··· 135 127 if [ "$type" = "gre" ]; then 136 128 type="gretap" 137 129 fi 138 - ip addr add 198.18.0.1/24 dev veth0 139 - $ns ip addr add 198.18.0.2/24 dev veth1 140 - ip link add name tep0 type $type $local_addr1 remote \ 141 - 198.18.0.2 tos $test_tos ttl $test_ttl $vxlan $geneve 142 - $ns ip link add name tep1 type $type $local_addr2 remote \ 143 - 198.18.0.1 tos $test_tos ttl $test_ttl $vxlan $geneve 130 + ip -netns "${NS0}" address add 198.18.0.1/24 dev veth0 131 + ip -netns "${NS1}" address add 198.18.0.2/24 dev veth1 132 + ip -netns "${NS0}" link add name tep0 type $type $local_addr1 \ 133 + remote 198.18.0.2 tos $test_tos ttl $test_ttl \ 134 + $vxlan $geneve 135 + ip -netns "${NS1}" link add name tep1 type $type $local_addr2 \ 136 + remote 198.18.0.1 tos $test_tos ttl $test_ttl \ 137 + $vxlan $geneve 144 138 elif [ "$outer" = "6" ]; then 145 139 if [ "$type" = "gre" ]; then 146 140 type="ip6gretap" 147 141 fi 148 - ip addr add fdd1:ced0:5d88:3fce::1/64 dev veth0 149 - $ns ip addr add fdd1:ced0:5d88:3fce::2/64 dev veth1 150 - ip link add name tep0 type $type $local_addr1 \ 151 - remote fdd1:ced0:5d88:3fce::2 tos $test_tos ttl $test_ttl \ 152 - $vxlan $geneve 153 - $ns ip link add name tep1 type $type $local_addr2 \ 154 - remote fdd1:ced0:5d88:3fce::1 tos $test_tos ttl $test_ttl \ 155 - $vxlan $geneve 142 + ip -netns "${NS0}" address add fdd1:ced0:5d88:3fce::1/64 \ 143 + dev veth0 nodad 144 + ip -netns "${NS1}" address add fdd1:ced0:5d88:3fce::2/64 \ 145 + dev veth1 nodad 146 + ip -netns "${NS0}" link add name tep0 type $type $local_addr1 \ 147 + remote fdd1:ced0:5d88:3fce::2 tos $test_tos \ 148 + ttl $test_ttl $vxlan $geneve 149 + ip -netns "${NS1}" link add name tep1 type $type $local_addr2 \ 150 + remote fdd1:ced0:5d88:3fce::1 tos $test_tos \ 151 + ttl $test_ttl $vxlan $geneve 156 152 fi 157 153 158 154 # Bring L2-tunnel link up and create VLAN on top 159 - ip link set tep0 up 160 - $ns ip link set tep1 up 161 - ip addr flush dev tep0 162 - $ns ip addr flush dev tep1 155 + ip -netns "${NS0}" link set tep0 up 156 + ip -netns "${NS1}" link set tep1 up 157 + ip -netns "${NS0}" address flush dev tep0 158 + ip -netns "${NS1}" address flush dev tep1 163 159 local parent 164 160 if $vlan; then 165 161 parent="vlan99-" 166 - ip link add link tep0 name ${parent}0 type vlan id 99 167 - $ns ip link add link tep1 name ${parent}1 type vlan id 99 168 - ip link set ${parent}0 up 169 - $ns ip link set ${parent}1 up 170 - ip addr flush dev ${parent}0 171 - $ns ip addr flush dev ${parent}1 162 + ip -netns "${NS0}" link add link tep0 name ${parent}0 \ 163 + type vlan id 99 164 + ip -netns "${NS1}" link add link tep1 name ${parent}1 \ 165 + type vlan id 99 166 + ip -netns "${NS0}" link set dev ${parent}0 up 167 + ip -netns "${NS1}" link set dev ${parent}1 up 168 + ip -netns "${NS0}" address flush dev ${parent}0 169 + ip -netns "${NS1}" address flush dev ${parent}1 172 170 else 173 171 parent="tep" 174 172 fi 175 173 176 174 # Assign inner IPv4/IPv6 addresses 177 175 if [ "$inner" = "4" ] || [ "$inner" = "other" ]; then 178 - ip addr add 198.19.0.1/24 brd + dev ${parent}0 179 - $ns ip addr add 198.19.0.2/24 brd + dev ${parent}1 176 + ip -netns "${NS0}" address add 198.19.0.1/24 brd + dev ${parent}0 177 + ip -netns "${NS1}" address add 198.19.0.2/24 brd + dev ${parent}1 180 178 elif [ "$inner" = "6" ]; then 181 - ip addr add fdd4:96cf:4eae:443b::1/64 dev ${parent}0 182 - $ns ip addr add fdd4:96cf:4eae:443b::2/64 dev ${parent}1 179 + ip -netns "${NS0}" address add fdd4:96cf:4eae:443b::1/64 \ 180 + dev ${parent}0 nodad 181 + ip -netns "${NS1}" address add fdd4:96cf:4eae:443b::2/64 \ 182 + dev ${parent}1 nodad 183 183 fi 184 184 } 185 185 ··· 208 192 ping_dst="198.19.0.3" # Generates ARPs which are not IPv4/IPv6 209 193 fi 210 194 if [ "$tos_ttl" = "inherit" ]; then 211 - ping -i 0.1 $ping_dst -Q "$expected_tos" -t "$expected_ttl" \ 212 - 2>/dev/null 1>&2 & ping_pid="$!" 195 + ${RUN_NS0} ping -i 0.1 $ping_dst -Q "$expected_tos" \ 196 + -t "$expected_ttl" 2>/dev/null 1>&2 & ping_pid="$!" 213 197 else 214 - ping -i 0.1 $ping_dst 2>/dev/null 1>&2 & ping_pid="$!" 198 + ${RUN_NS0} ping -i 0.1 $ping_dst 2>/dev/null 1>&2 & ping_pid="$!" 215 199 fi 216 200 local tunnel_type_offset tunnel_type_proto req_proto_offset req_offset 217 201 if [ "$type" = "gre" ]; then ··· 232 216 req_proto_offset="$((req_proto_offset + 4))" 233 217 req_offset="$((req_offset + 4))" 234 218 fi 235 - out="$(tcpdump --immediate-mode -p -c 1 -v -i veth0 -n \ 236 - ip[$tunnel_type_offset] = $tunnel_type_proto and \ 237 - ip[$req_proto_offset] = 0x01 and \ 238 - ip[$req_offset] = 0x08 2>/dev/null | head -n 1)" 219 + out="$(${RUN_NS0} tcpdump --immediate-mode -p -c 1 -v \ 220 + -i veth0 -n \ 221 + ip[$tunnel_type_offset] = $tunnel_type_proto and \ 222 + ip[$req_proto_offset] = 0x01 and \ 223 + ip[$req_offset] = 0x08 2>/dev/null \ 224 + | head -n 1)" 239 225 elif [ "$inner" = "6" ]; then 240 226 req_proto_offset="44" 241 227 req_offset="78" ··· 249 231 req_proto_offset="$((req_proto_offset + 4))" 250 232 req_offset="$((req_offset + 4))" 251 233 fi 252 - out="$(tcpdump --immediate-mode -p -c 1 -v -i veth0 -n \ 253 - ip[$tunnel_type_offset] = $tunnel_type_proto and \ 254 - ip[$req_proto_offset] = 0x3a and \ 255 - ip[$req_offset] = 0x80 2>/dev/null | head -n 1)" 234 + out="$(${RUN_NS0} tcpdump --immediate-mode -p -c 1 -v \ 235 + -i veth0 -n \ 236 + ip[$tunnel_type_offset] = $tunnel_type_proto and \ 237 + ip[$req_proto_offset] = 0x3a and \ 238 + ip[$req_offset] = 0x80 2>/dev/null \ 239 + | head -n 1)" 256 240 elif [ "$inner" = "other" ]; then 257 241 req_proto_offset="36" 258 242 req_offset="45" ··· 270 250 expected_tos="0x00" 271 251 expected_ttl="64" 272 252 fi 273 - out="$(tcpdump --immediate-mode -p -c 1 -v -i veth0 -n \ 274 - ip[$tunnel_type_offset] = $tunnel_type_proto and \ 275 - ip[$req_proto_offset] = 0x08 and \ 276 - ip[$((req_proto_offset + 1))] = 0x06 and \ 277 - ip[$req_offset] = 0x01 2>/dev/null | head -n 1)" 253 + out="$(${RUN_NS0} tcpdump --immediate-mode -p -c 1 -v \ 254 + -i veth0 -n \ 255 + ip[$tunnel_type_offset] = $tunnel_type_proto and \ 256 + ip[$req_proto_offset] = 0x08 and \ 257 + ip[$((req_proto_offset + 1))] = 0x06 and \ 258 + ip[$req_offset] = 0x01 2>/dev/null \ 259 + | head -n 1)" 278 260 fi 279 261 elif [ "$outer" = "6" ]; then 280 262 if [ "$type" = "gre" ]; then ··· 295 273 req_proto_offset="$((req_proto_offset + 4))" 296 274 req_offset="$((req_offset + 4))" 297 275 fi 298 - out="$(tcpdump --immediate-mode -p -c 1 -v -i veth0 -n \ 299 - ip6[$tunnel_type_offset] = $tunnel_type_proto and \ 300 - ip6[$req_proto_offset] = 0x01 and \ 301 - ip6[$req_offset] = 0x08 2>/dev/null | head -n 1)" 276 + out="$(${RUN_NS0} tcpdump --immediate-mode -p -c 1 -v \ 277 + -i veth0 -n \ 278 + ip6[$tunnel_type_offset] = $tunnel_type_proto and \ 279 + ip6[$req_proto_offset] = 0x01 and \ 280 + ip6[$req_offset] = 0x08 2>/dev/null \ 281 + | head -n 1)" 302 282 elif [ "$inner" = "6" ]; then 303 283 local req_proto_offset="72" 304 284 local req_offset="106" ··· 312 288 req_proto_offset="$((req_proto_offset + 4))" 313 289 req_offset="$((req_offset + 4))" 314 290 fi 315 - out="$(tcpdump --immediate-mode -p -c 1 -v -i veth0 -n \ 316 - ip6[$tunnel_type_offset] = $tunnel_type_proto and \ 317 - ip6[$req_proto_offset] = 0x3a and \ 318 - ip6[$req_offset] = 0x80 2>/dev/null | head -n 1)" 291 + out="$(${RUN_NS0} tcpdump --immediate-mode -p -c 1 -v \ 292 + -i veth0 -n \ 293 + ip6[$tunnel_type_offset] = $tunnel_type_proto and \ 294 + ip6[$req_proto_offset] = 0x3a and \ 295 + ip6[$req_offset] = 0x80 2>/dev/null \ 296 + | head -n 1)" 319 297 elif [ "$inner" = "other" ]; then 320 298 local req_proto_offset="64" 321 299 local req_offset="73" ··· 333 307 expected_tos="0x00" 334 308 expected_ttl="64" 335 309 fi 336 - out="$(tcpdump --immediate-mode -p -c 1 -v -i veth0 -n \ 337 - ip6[$tunnel_type_offset] = $tunnel_type_proto and \ 338 - ip6[$req_proto_offset] = 0x08 and \ 339 - ip6[$((req_proto_offset + 1))] = 0x06 and \ 340 - ip6[$req_offset] = 0x01 2>/dev/null | head -n 1)" 310 + out="$(${RUN_NS0} tcpdump --immediate-mode -p -c 1 -v \ 311 + -i veth0 -n \ 312 + ip6[$tunnel_type_offset] = $tunnel_type_proto and \ 313 + ip6[$req_proto_offset] = 0x08 and \ 314 + ip6[$((req_proto_offset + 1))] = 0x06 and \ 315 + ip6[$req_offset] = 0x01 2>/dev/null \ 316 + | head -n 1)" 341 317 fi 342 318 fi 343 319 kill -9 $ping_pid 344 - wait $ping_pid 2>/dev/null 320 + wait $ping_pid 2>/dev/null || true 345 321 result="FAIL" 346 322 if [ "$outer" = "4" ]; then 347 323 captured_ttl="$(get_field "ttl" "$out")" ··· 379 351 } 380 352 381 353 cleanup() { 382 - ip link del veth0 2>/dev/null 383 - ip netns del testing 2>/dev/null 384 - ip link del tep0 2>/dev/null 354 + ip netns del "${NS0}" 2>/dev/null 355 + ip netns del "${NS1}" 2>/dev/null 385 356 } 357 + 358 + exit_handler() { 359 + # Don't exit immediately if one of the intermediate commands fails. 360 + # We might be called at the end of the script, when the network 361 + # namespaces have already been deleted. So cleanup() may fail, but we 362 + # still need to run until 'exit $ERR' or the script won't return the 363 + # correct error code. 364 + set +e 365 + 366 + cleanup 367 + 368 + exit $ERR 369 + } 370 + 371 + # Restore the default SIGINT handler (just in case) and exit. 372 + # The exit handler will take care of cleaning everything up. 373 + interrupted() { 374 + trap - INT 375 + 376 + exit $ERR 377 + } 378 + 379 + set -e 380 + trap exit_handler EXIT 381 + trap interrupted INT 386 382 387 383 printf "┌────────┬───────┬───────┬──────────────┬" 388 384 printf "──────────────┬───────┬────────┐\n" ··· 437 385 printf "└────────┴───────┴───────┴──────────────┴" 438 386 printf "──────────────┴───────┴────────┘\n" 439 387 388 + # All tests done. 389 + # Set ERR appropriately: it will be returned by the exit handler. 440 390 if $failed; then 441 - exit 1 391 + ERR=1 392 + else 393 + ERR=0 442 394 fi