Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

rxrpc: Kill off the rxrpc_transport struct

The rxrpc_transport struct is now redundant, given that the rxrpc_peer
struct is now per peer port rather than per peer host, so get rid of it.

Service connection lists are transferred to the rxrpc_peer struct, as is
the conn_lock. Previous patches moved the client connection handling out
of the rxrpc_transport struct and discarded the connection bundling code.

Signed-off-by: David Howells <dhowells@redhat.com>

+65 -451
-1
net/rxrpc/Makefile
··· 22 22 recvmsg.o \ 23 23 security.o \ 24 24 skbuff.o \ 25 - transport.o \ 26 25 utils.o 27 26 28 27 af-rxrpc-$(CONFIG_PROC_FS) += proc.o
+1 -45
net/rxrpc/af_rxrpc.c
··· 224 224 return ret; 225 225 } 226 226 227 - /* 228 - * find a transport by address 229 - */ 230 - struct rxrpc_transport * 231 - rxrpc_name_to_transport(struct rxrpc_conn_parameters *cp, 232 - struct sockaddr *addr, 233 - int addr_len, 234 - gfp_t gfp) 235 - { 236 - struct sockaddr_rxrpc *srx = (struct sockaddr_rxrpc *) addr; 237 - struct rxrpc_transport *trans; 238 - 239 - _enter("%p,%d", addr, addr_len); 240 - 241 - if (cp->local->srx.transport_type != srx->transport_type) 242 - return ERR_PTR(-ESOCKTNOSUPPORT); 243 - if (cp->local->srx.transport.family != srx->transport.family) 244 - return ERR_PTR(-EAFNOSUPPORT); 245 - 246 - /* find a remote transport endpoint from the local one */ 247 - cp->peer = rxrpc_lookup_peer(cp->local, srx, gfp); 248 - if (!cp->peer) 249 - return ERR_PTR(-ENOMEM); 250 - 251 - /* find a transport */ 252 - trans = rxrpc_get_transport(cp->local, cp->peer, gfp); 253 - rxrpc_put_peer(cp->peer); 254 - _leave(" = %p", trans); 255 - return trans; 256 - } 257 - 258 227 /** 259 228 * rxrpc_kernel_begin_call - Allow a kernel service to begin a call 260 229 * @sock: The socket on which to make the call ··· 245 276 gfp_t gfp) 246 277 { 247 278 struct rxrpc_conn_parameters cp; 248 - struct rxrpc_transport *trans; 249 279 struct rxrpc_call *call; 250 280 struct rxrpc_sock *rx = rxrpc_sk(sock->sk); 251 281 int ret; ··· 268 300 cp.security_level = 0; 269 301 cp.exclusive = false; 270 302 cp.service_id = srx->srx_service; 303 + call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, gfp); 271 304 272 - trans = rxrpc_name_to_transport(&cp, (struct sockaddr *)srx, 273 - sizeof(*srx), gfp); 274 - if (IS_ERR(trans)) { 275 - call = ERR_CAST(trans); 276 - trans = NULL; 277 - goto out_notrans; 278 - } 279 - cp.peer = trans->peer; 280 - 281 - call = rxrpc_new_client_call(rx, &cp, trans, srx, user_call_ID, gfp); 282 - rxrpc_put_transport(trans); 283 - out_notrans: 284 305 release_sock(&rx->sk); 285 306 _leave(" = %p", call); 286 307 return call; ··· 788 831 proto_unregister(&rxrpc_proto); 789 832 rxrpc_destroy_all_calls(); 790 833 rxrpc_destroy_all_connections(); 791 - rxrpc_destroy_all_transports(); 792 834 793 835 ASSERTCMP(atomic_read(&rxrpc_n_skbs), ==, 0); 794 836
+10 -47
net/rxrpc/ar-internal.h
··· 207 207 struct rxrpc_local *local; 208 208 struct hlist_head error_targets; /* targets for net error distribution */ 209 209 struct work_struct error_distributor; 210 + struct rb_root service_conns; /* Service connections */ 211 + rwlock_t conn_lock; 210 212 spinlock_t lock; /* access lock */ 211 213 unsigned int if_mtu; /* interface MTU for this peer */ 212 214 unsigned int mtu; /* network MTU for this peer */ ··· 225 223 unsigned int rtt_point; /* next entry at which to insert */ 226 224 unsigned int rtt_usage; /* amount of cache actually used */ 227 225 suseconds_t rtt_cache[RXRPC_RTT_CACHE_SIZE]; /* calculated RTT cache */ 228 - }; 229 - 230 - /* 231 - * RxRPC point-to-point transport / connection manager definition 232 - * - handles a bundle of connections between two endpoints 233 - * - matched by { local, peer } 234 - */ 235 - struct rxrpc_transport { 236 - struct rxrpc_local *local; /* local transport endpoint */ 237 - struct rxrpc_peer *peer; /* remote transport endpoint */ 238 - struct rb_root server_conns; /* server connections on this transport */ 239 - struct list_head link; /* link in master session list */ 240 - unsigned long put_time; /* time at which to reap */ 241 - rwlock_t conn_lock; /* lock for active/dead connections */ 242 - atomic_t usage; 243 - int debug_id; /* debug ID for printks */ 244 226 }; 245 227 246 228 /* ··· 257 271 258 272 /* 259 273 * RxRPC connection definition 260 - * - matched by { transport, service_id, conn_id, direction, key } 274 + * - matched by { local, peer, epoch, conn_id, direction } 261 275 * - each connection can only handle four simultaneous calls 262 276 */ 263 277 struct rxrpc_connection { 264 - struct rxrpc_transport *trans; /* transport session */ 265 278 struct rxrpc_conn_proto proto; 266 279 struct rxrpc_conn_parameters params; 267 280 ··· 271 286 struct work_struct processor; /* connection event processor */ 272 287 union { 273 288 struct rb_node client_node; /* Node in local->client_conns */ 274 - struct rb_node service_node; /* Node in trans->server_conns */ 289 + struct rb_node service_node; /* Node in peer->service_conns */ 275 290 }; 276 291 struct list_head link; /* link in master connection list */ 277 292 struct rb_root calls; /* calls on this connection */ ··· 479 494 extern atomic_t rxrpc_debug_id; 480 495 extern struct workqueue_struct *rxrpc_workqueue; 481 496 482 - extern struct rxrpc_transport *rxrpc_name_to_transport(struct rxrpc_conn_parameters *, 483 - struct sockaddr *, 484 - int, gfp_t); 485 - 486 497 /* 487 498 * call_accept.c 488 499 */ ··· 507 526 struct rxrpc_call *rxrpc_find_call_by_user_ID(struct rxrpc_sock *, unsigned long); 508 527 struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *, 509 528 struct rxrpc_conn_parameters *, 510 - struct rxrpc_transport *, 511 529 struct sockaddr_rxrpc *, 512 530 unsigned long, gfp_t); 513 531 struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *, ··· 540 560 extern rwlock_t rxrpc_connection_lock; 541 561 542 562 int rxrpc_connect_call(struct rxrpc_call *, struct rxrpc_conn_parameters *, 543 - struct rxrpc_transport *, 544 563 struct sockaddr_rxrpc *, gfp_t); 564 + struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_local *, 565 + struct rxrpc_peer *, 566 + struct sk_buff *); 545 567 void rxrpc_disconnect_call(struct rxrpc_call *); 546 568 void rxrpc_put_connection(struct rxrpc_connection *); 547 569 void __exit rxrpc_destroy_all_connections(void); 548 - struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *, 549 - struct sk_buff *); 550 - extern struct rxrpc_connection * 551 - rxrpc_incoming_connection(struct rxrpc_transport *, struct sk_buff *); 570 + struct rxrpc_connection *rxrpc_incoming_connection(struct rxrpc_local *, 571 + struct rxrpc_peer *, 572 + struct sk_buff *); 552 573 553 574 static inline bool rxrpc_conn_is_client(const struct rxrpc_connection *conn) 554 575 { ··· 564 583 static inline void rxrpc_get_connection(struct rxrpc_connection *conn) 565 584 { 566 585 atomic_inc(&conn->usage); 567 - } 568 - 569 - static inline 570 - struct rxrpc_connection *rxrpc_get_connection_maybe(struct rxrpc_connection *conn) 571 - { 572 - return atomic_inc_not_zero(&conn->usage) ? conn : NULL; 573 586 } 574 587 575 588 /* ··· 718 743 static inline int __init rxrpc_sysctl_init(void) { return 0; } 719 744 static inline void rxrpc_sysctl_exit(void) {} 720 745 #endif 721 - 722 - /* 723 - * transport.c 724 - */ 725 - extern unsigned int rxrpc_transport_expiry; 726 - 727 - struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *, 728 - struct rxrpc_peer *, gfp_t); 729 - void rxrpc_put_transport(struct rxrpc_transport *); 730 - void __exit rxrpc_destroy_all_transports(void); 731 - struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *, 732 - struct rxrpc_peer *); 733 746 734 747 /* 735 748 * utils.c
+1 -10
net/rxrpc/call_accept.c
··· 74 74 struct sockaddr_rxrpc *srx) 75 75 { 76 76 struct rxrpc_connection *conn; 77 - struct rxrpc_transport *trans; 78 77 struct rxrpc_skb_priv *sp, *nsp; 79 78 struct rxrpc_peer *peer; 80 79 struct rxrpc_call *call; ··· 101 102 goto error; 102 103 } 103 104 104 - trans = rxrpc_get_transport(local, peer, GFP_NOIO); 105 + conn = rxrpc_incoming_connection(local, peer, skb); 105 106 rxrpc_put_peer(peer); 106 - if (IS_ERR(trans)) { 107 - _debug("no trans"); 108 - ret = -EBUSY; 109 - goto error; 110 - } 111 - 112 - conn = rxrpc_incoming_connection(trans, skb); 113 - rxrpc_put_transport(trans); 114 107 if (IS_ERR(conn)) { 115 108 _debug("no conn"); 116 109 ret = PTR_ERR(conn);
+6 -10
net/rxrpc/call_object.c
··· 286 286 /* 287 287 * Allocate a new client call. 288 288 */ 289 - static struct rxrpc_call *rxrpc_alloc_client_call( 290 - struct rxrpc_sock *rx, 291 - struct rxrpc_conn_parameters *cp, 292 - struct sockaddr_rxrpc *srx, 293 - gfp_t gfp) 289 + static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx, 290 + struct sockaddr_rxrpc *srx, 291 + gfp_t gfp) 294 292 { 295 293 struct rxrpc_call *call; 296 294 ··· 331 333 */ 332 334 static int rxrpc_begin_client_call(struct rxrpc_call *call, 333 335 struct rxrpc_conn_parameters *cp, 334 - struct rxrpc_transport *trans, 335 336 struct sockaddr_rxrpc *srx, 336 337 gfp_t gfp) 337 338 { ··· 339 342 /* Set up or get a connection record and set the protocol parameters, 340 343 * including channel number and call ID. 341 344 */ 342 - ret = rxrpc_connect_call(call, cp, trans, srx, gfp); 345 + ret = rxrpc_connect_call(call, cp, srx, gfp); 343 346 if (ret < 0) 344 347 return ret; 345 348 ··· 363 366 */ 364 367 struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *rx, 365 368 struct rxrpc_conn_parameters *cp, 366 - struct rxrpc_transport *trans, 367 369 struct sockaddr_rxrpc *srx, 368 370 unsigned long user_call_ID, 369 371 gfp_t gfp) ··· 373 377 374 378 _enter("%p,%lx", rx, user_call_ID); 375 379 376 - call = rxrpc_alloc_client_call(rx, cp, srx, gfp); 380 + call = rxrpc_alloc_client_call(rx, srx, gfp); 377 381 if (IS_ERR(call)) { 378 382 _leave(" = %ld", PTR_ERR(call)); 379 383 return call; ··· 409 413 list_add_tail(&call->link, &rxrpc_calls); 410 414 write_unlock_bh(&rxrpc_call_lock); 411 415 412 - ret = rxrpc_begin_client_call(call, cp, trans, srx, gfp); 416 + ret = rxrpc_begin_client_call(call, cp, srx, gfp); 413 417 if (ret < 0) 414 418 goto error; 415 419
+43 -35
net/rxrpc/conn_object.c
··· 100 100 * padding bytes in *cp. 101 101 */ 102 102 static struct rxrpc_connection * 103 - rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, 104 - struct rxrpc_transport *trans, 105 - gfp_t gfp) 103 + rxrpc_alloc_client_connection(struct rxrpc_conn_parameters *cp, gfp_t gfp) 106 104 { 107 105 struct rxrpc_connection *conn; 108 106 int ret; ··· 144 146 list_add_tail(&conn->link, &rxrpc_connections); 145 147 write_unlock(&rxrpc_connection_lock); 146 148 149 + /* We steal the caller's peer ref. */ 150 + cp->peer = NULL; 151 + rxrpc_get_local(conn->params.local); 147 152 key_get(conn->params.key); 148 - conn->trans = trans; 149 - atomic_inc(&trans->usage); 150 153 151 154 _leave(" = %p", conn); 152 155 return conn; ··· 166 167 */ 167 168 int rxrpc_connect_call(struct rxrpc_call *call, 168 169 struct rxrpc_conn_parameters *cp, 169 - struct rxrpc_transport *trans, 170 170 struct sockaddr_rxrpc *srx, 171 171 gfp_t gfp) 172 172 { ··· 179 181 180 182 _enter("{%d,%lx},", call->debug_id, call->user_call_ID); 181 183 182 - cp->peer = trans->peer; 183 - rxrpc_get_peer(cp->peer); 184 + cp->peer = rxrpc_lookup_peer(cp->local, srx, gfp); 185 + if (!cp->peer) 186 + return -ENOMEM; 184 187 185 188 if (!cp->exclusive) { 186 189 /* Search for a existing client connection unless this is going ··· 209 210 210 211 /* We didn't find a connection or we want an exclusive one. */ 211 212 _debug("get new conn"); 212 - candidate = rxrpc_alloc_client_connection(cp, trans, gfp); 213 + candidate = rxrpc_alloc_client_connection(cp, gfp); 213 214 if (!candidate) { 214 215 _leave(" = -ENOMEM"); 215 216 return -ENOMEM; ··· 280 281 281 282 rxrpc_add_call_ID_to_conn(conn, call); 282 283 spin_unlock(&conn->channel_lock); 284 + rxrpc_put_peer(cp->peer); 285 + cp->peer = NULL; 283 286 _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage)); 284 287 return 0; 285 288 ··· 330 329 remove_wait_queue(&conn->channel_wq, &myself); 331 330 __set_current_state(TASK_RUNNING); 332 331 rxrpc_put_connection(conn); 332 + rxrpc_put_peer(cp->peer); 333 + cp->peer = NULL; 333 334 _leave(" = -ERESTARTSYS"); 334 335 return -ERESTARTSYS; 335 336 } ··· 339 336 /* 340 337 * get a record of an incoming connection 341 338 */ 342 - struct rxrpc_connection *rxrpc_incoming_connection(struct rxrpc_transport *trans, 339 + struct rxrpc_connection *rxrpc_incoming_connection(struct rxrpc_local *local, 340 + struct rxrpc_peer *peer, 343 341 struct sk_buff *skb) 344 342 { 345 343 struct rxrpc_connection *conn, *candidate = NULL; ··· 358 354 cid = sp->hdr.cid & RXRPC_CIDMASK; 359 355 360 356 /* search the connection list first */ 361 - read_lock_bh(&trans->conn_lock); 357 + read_lock_bh(&peer->conn_lock); 362 358 363 - p = trans->server_conns.rb_node; 359 + p = peer->service_conns.rb_node; 364 360 while (p) { 365 361 conn = rb_entry(p, struct rxrpc_connection, service_node); 366 362 ··· 377 373 else 378 374 goto found_extant_connection; 379 375 } 380 - read_unlock_bh(&trans->conn_lock); 376 + read_unlock_bh(&peer->conn_lock); 381 377 382 378 /* not yet present - create a candidate for a new record and then 383 379 * redo the search */ ··· 387 383 return ERR_PTR(-ENOMEM); 388 384 } 389 385 390 - candidate->trans = trans; 391 - candidate->proto.local = trans->local; 386 + candidate->proto.local = local; 392 387 candidate->proto.epoch = sp->hdr.epoch; 393 388 candidate->proto.cid = sp->hdr.cid & RXRPC_CIDMASK; 394 389 candidate->proto.in_clientflag = RXRPC_CLIENT_INITIATED; 395 - candidate->params.local = trans->local; 396 - candidate->params.peer = trans->peer; 390 + candidate->params.local = local; 391 + candidate->params.peer = peer; 397 392 candidate->params.service_id = sp->hdr.serviceId; 398 393 candidate->security_ix = sp->hdr.securityIndex; 399 394 candidate->out_clientflag = 0; ··· 400 397 if (candidate->params.service_id) 401 398 candidate->state = RXRPC_CONN_SERVER_UNSECURED; 402 399 403 - write_lock_bh(&trans->conn_lock); 400 + write_lock_bh(&peer->conn_lock); 404 401 405 - pp = &trans->server_conns.rb_node; 402 + pp = &peer->service_conns.rb_node; 406 403 p = NULL; 407 404 while (*pp) { 408 405 p = *pp; ··· 424 421 conn = candidate; 425 422 candidate = NULL; 426 423 rb_link_node(&conn->service_node, p, pp); 427 - rb_insert_color(&conn->service_node, &trans->server_conns); 428 - atomic_inc(&conn->trans->usage); 424 + rb_insert_color(&conn->service_node, &peer->service_conns); 425 + rxrpc_get_peer(peer); 426 + rxrpc_get_local(local); 429 427 430 - write_unlock_bh(&trans->conn_lock); 428 + write_unlock_bh(&peer->conn_lock); 431 429 432 430 write_lock(&rxrpc_connection_lock); 433 431 list_add_tail(&conn->link, &rxrpc_connections); ··· 445 441 /* we found the connection in the list immediately */ 446 442 found_extant_connection: 447 443 if (sp->hdr.securityIndex != conn->security_ix) { 448 - read_unlock_bh(&trans->conn_lock); 444 + read_unlock_bh(&peer->conn_lock); 449 445 goto security_mismatch; 450 446 } 451 447 rxrpc_get_connection(conn); 452 - read_unlock_bh(&trans->conn_lock); 448 + read_unlock_bh(&peer->conn_lock); 453 449 goto success; 454 450 455 451 /* we found the connection on the second time through the list */ 456 452 found_extant_second: 457 453 if (sp->hdr.securityIndex != conn->security_ix) { 458 - write_unlock_bh(&trans->conn_lock); 454 + write_unlock_bh(&peer->conn_lock); 459 455 goto security_mismatch; 460 456 } 461 457 rxrpc_get_connection(conn); 462 - write_unlock_bh(&trans->conn_lock); 458 + write_unlock_bh(&peer->conn_lock); 463 459 kfree(candidate); 464 460 goto success; 465 461 ··· 473 469 * find a connection based on transport and RxRPC connection ID for an incoming 474 470 * packet 475 471 */ 476 - struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *trans, 472 + struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_local *local, 473 + struct rxrpc_peer *peer, 477 474 struct sk_buff *skb) 478 475 { 479 476 struct rxrpc_connection *conn; ··· 484 479 485 480 _enter(",{%x,%x}", sp->hdr.cid, sp->hdr.flags); 486 481 487 - read_lock_bh(&trans->conn_lock); 482 + read_lock_bh(&peer->conn_lock); 488 483 489 484 cid = sp->hdr.cid & RXRPC_CIDMASK; 490 485 epoch = sp->hdr.epoch; 491 486 492 487 if (sp->hdr.flags & RXRPC_CLIENT_INITIATED) { 493 - p = trans->server_conns.rb_node; 488 + p = peer->service_conns.rb_node; 494 489 while (p) { 495 490 conn = rb_entry(p, struct rxrpc_connection, service_node); 496 491 ··· 513 508 goto found; 514 509 } 515 510 516 - read_unlock_bh(&trans->conn_lock); 511 + read_unlock_bh(&peer->conn_lock); 517 512 _leave(" = NULL"); 518 513 return NULL; 519 514 520 515 found: 521 516 rxrpc_get_connection(conn); 522 - read_unlock_bh(&trans->conn_lock); 517 + read_unlock_bh(&peer->conn_lock); 523 518 _leave(" = %p", conn); 524 519 return conn; 525 520 } ··· 581 576 conn->security->clear(conn); 582 577 key_put(conn->params.key); 583 578 key_put(conn->server_key); 579 + rxrpc_put_peer(conn->params.peer); 580 + rxrpc_put_local(conn->params.local); 584 581 585 - rxrpc_put_transport(conn->trans); 586 582 kfree(conn); 587 583 _leave(""); 588 584 } ··· 594 588 static void rxrpc_connection_reaper(struct work_struct *work) 595 589 { 596 590 struct rxrpc_connection *conn, *_p; 591 + struct rxrpc_peer *peer; 597 592 unsigned long now, earliest, reap_time; 598 593 599 594 LIST_HEAD(graveyard); ··· 631 624 632 625 spin_unlock(&local->client_conns_lock); 633 626 } else { 634 - write_lock_bh(&conn->trans->conn_lock); 627 + peer = conn->params.peer; 628 + write_lock_bh(&peer->conn_lock); 635 629 reap_time = conn->put_time + rxrpc_connection_expiry; 636 630 637 631 if (atomic_read(&conn->usage) > 0) { ··· 640 632 } else if (reap_time <= now) { 641 633 list_move_tail(&conn->link, &graveyard); 642 634 rb_erase(&conn->service_node, 643 - &conn->trans->server_conns); 635 + &peer->service_conns); 644 636 } else if (reap_time < earliest) { 645 637 earliest = reap_time; 646 638 } 647 639 648 - write_unlock_bh(&conn->trans->conn_lock); 640 + write_unlock_bh(&peer->conn_lock); 649 641 } 650 642 } 651 643 write_unlock(&rxrpc_connection_lock);
+1 -7
net/rxrpc/input.c
··· 631 631 struct sk_buff *skb) 632 632 { 633 633 struct rxrpc_peer *peer; 634 - struct rxrpc_transport *trans; 635 634 struct rxrpc_connection *conn; 636 635 struct sockaddr_rxrpc srx; 637 636 ··· 640 641 if (!peer) 641 642 goto cant_find_peer; 642 643 643 - trans = rxrpc_find_transport(local, peer); 644 + conn = rxrpc_find_connection(local, peer, skb); 644 645 rcu_read_unlock(); 645 - if (!trans) 646 - goto cant_find_conn; 647 - 648 - conn = rxrpc_find_connection(trans, skb); 649 - rxrpc_put_transport(trans); 650 646 if (!conn) 651 647 goto cant_find_conn; 652 648
+1 -23
net/rxrpc/output.c
··· 140 140 unsigned long user_call_ID, bool exclusive) 141 141 { 142 142 struct rxrpc_conn_parameters cp; 143 - struct rxrpc_transport *trans; 144 143 struct rxrpc_call *call; 145 144 struct key *key; 146 - long ret; 147 145 148 146 DECLARE_SOCKADDR(struct sockaddr_rxrpc *, srx, msg->msg_name); 149 147 ··· 160 162 cp.security_level = rx->min_sec_level; 161 163 cp.exclusive = rx->exclusive | exclusive; 162 164 cp.service_id = srx->srx_service; 163 - trans = rxrpc_name_to_transport(&cp, msg->msg_name, msg->msg_namelen, 164 - GFP_KERNEL); 165 - if (IS_ERR(trans)) { 166 - ret = PTR_ERR(trans); 167 - goto out; 168 - } 169 - cp.peer = trans->peer; 170 - 171 - call = rxrpc_new_client_call(rx, &cp, trans, srx, user_call_ID, 172 - GFP_KERNEL); 173 - rxrpc_put_transport(trans); 174 - if (IS_ERR(call)) { 175 - ret = PTR_ERR(call); 176 - goto out_trans; 177 - } 165 + call = rxrpc_new_client_call(rx, &cp, srx, user_call_ID, GFP_KERNEL); 178 166 179 167 _leave(" = %p\n", call); 180 168 return call; 181 - 182 - out_trans: 183 - rxrpc_put_transport(trans); 184 - out: 185 - _leave(" = %ld", ret); 186 - return ERR_PTR(ret); 187 169 } 188 170 189 171 /*
+2
net/rxrpc/peer_object.c
··· 188 188 INIT_HLIST_HEAD(&peer->error_targets); 189 189 INIT_WORK(&peer->error_distributor, 190 190 &rxrpc_peer_error_distributor); 191 + peer->service_conns = RB_ROOT; 192 + rwlock_init(&peer->conn_lock); 191 193 spin_lock_init(&peer->lock); 192 194 peer->debug_id = atomic_inc_return(&rxrpc_debug_id); 193 195 }
-8
net/rxrpc/sysctl.c
··· 90 90 .proc_handler = proc_dointvec_minmax, 91 91 .extra1 = (void *)&one, 92 92 }, 93 - { 94 - .procname = "transport_expiry", 95 - .data = &rxrpc_transport_expiry, 96 - .maxlen = sizeof(unsigned int), 97 - .mode = 0644, 98 - .proc_handler = proc_dointvec_minmax, 99 - .extra1 = (void *)&one, 100 - }, 101 93 102 94 /* Non-time values */ 103 95 {
-265
net/rxrpc/transport.c
··· 1 - /* RxRPC point-to-point transport session management 2 - * 3 - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 - * Written by David Howells (dhowells@redhat.com) 5 - * 6 - * This program is free software; you can redistribute it and/or 7 - * modify it under the terms of the GNU General Public License 8 - * as published by the Free Software Foundation; either version 9 - * 2 of the License, or (at your option) any later version. 10 - */ 11 - 12 - #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 13 - 14 - #include <linux/module.h> 15 - #include <linux/net.h> 16 - #include <linux/skbuff.h> 17 - #include <linux/slab.h> 18 - #include <net/sock.h> 19 - #include <net/af_rxrpc.h> 20 - #include "ar-internal.h" 21 - 22 - /* 23 - * Time after last use at which transport record is cleaned up. 24 - */ 25 - unsigned int rxrpc_transport_expiry = 3600 * 24; 26 - 27 - static void rxrpc_transport_reaper(struct work_struct *work); 28 - 29 - static LIST_HEAD(rxrpc_transports); 30 - static DEFINE_RWLOCK(rxrpc_transport_lock); 31 - static DECLARE_DELAYED_WORK(rxrpc_transport_reap, rxrpc_transport_reaper); 32 - 33 - /* 34 - * allocate a new transport session manager 35 - */ 36 - static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local, 37 - struct rxrpc_peer *peer, 38 - gfp_t gfp) 39 - { 40 - struct rxrpc_transport *trans; 41 - 42 - _enter(""); 43 - 44 - trans = kzalloc(sizeof(struct rxrpc_transport), gfp); 45 - if (trans) { 46 - trans->local = local; 47 - trans->peer = peer; 48 - INIT_LIST_HEAD(&trans->link); 49 - trans->server_conns = RB_ROOT; 50 - rwlock_init(&trans->conn_lock); 51 - atomic_set(&trans->usage, 1); 52 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id); 53 - } 54 - 55 - _leave(" = %p", trans); 56 - return trans; 57 - } 58 - 59 - /* 60 - * obtain a transport session for the nominated endpoints 61 - */ 62 - struct rxrpc_transport *rxrpc_get_transport(struct rxrpc_local *local, 63 - struct rxrpc_peer *peer, 64 - gfp_t gfp) 65 - { 66 - struct rxrpc_transport *trans, *candidate; 67 - const char *new = "old"; 68 - int usage; 69 - 70 - _enter("{%pI4+%hu},{%pI4+%hu},", 71 - &local->srx.transport.sin.sin_addr, 72 - ntohs(local->srx.transport.sin.sin_port), 73 - &peer->srx.transport.sin.sin_addr, 74 - ntohs(peer->srx.transport.sin.sin_port)); 75 - 76 - /* search the transport list first */ 77 - read_lock_bh(&rxrpc_transport_lock); 78 - list_for_each_entry(trans, &rxrpc_transports, link) { 79 - if (trans->local == local && trans->peer == peer) 80 - goto found_extant_transport; 81 - } 82 - read_unlock_bh(&rxrpc_transport_lock); 83 - 84 - /* not yet present - create a candidate for a new record and then 85 - * redo the search */ 86 - candidate = rxrpc_alloc_transport(local, peer, gfp); 87 - if (!candidate) { 88 - _leave(" = -ENOMEM"); 89 - return ERR_PTR(-ENOMEM); 90 - } 91 - 92 - write_lock_bh(&rxrpc_transport_lock); 93 - 94 - list_for_each_entry(trans, &rxrpc_transports, link) { 95 - if (trans->local == local && trans->peer == peer) 96 - goto found_extant_second; 97 - } 98 - 99 - /* we can now add the new candidate to the list */ 100 - trans = candidate; 101 - candidate = NULL; 102 - usage = atomic_read(&trans->usage); 103 - 104 - rxrpc_get_local(trans->local); 105 - rxrpc_get_peer(trans->peer); 106 - list_add_tail(&trans->link, &rxrpc_transports); 107 - write_unlock_bh(&rxrpc_transport_lock); 108 - new = "new"; 109 - 110 - success: 111 - _net("TRANSPORT %s %d local %d -> peer %d", 112 - new, 113 - trans->debug_id, 114 - trans->local->debug_id, 115 - trans->peer->debug_id); 116 - 117 - _leave(" = %p {u=%d}", trans, usage); 118 - return trans; 119 - 120 - /* we found the transport in the list immediately */ 121 - found_extant_transport: 122 - usage = atomic_inc_return(&trans->usage); 123 - read_unlock_bh(&rxrpc_transport_lock); 124 - goto success; 125 - 126 - /* we found the transport on the second time through the list */ 127 - found_extant_second: 128 - usage = atomic_inc_return(&trans->usage); 129 - write_unlock_bh(&rxrpc_transport_lock); 130 - kfree(candidate); 131 - goto success; 132 - } 133 - 134 - /* 135 - * find the transport connecting two endpoints 136 - */ 137 - struct rxrpc_transport *rxrpc_find_transport(struct rxrpc_local *local, 138 - struct rxrpc_peer *peer) 139 - { 140 - struct rxrpc_transport *trans; 141 - 142 - _enter("{%pI4+%hu},{%pI4+%hu},", 143 - &local->srx.transport.sin.sin_addr, 144 - ntohs(local->srx.transport.sin.sin_port), 145 - &peer->srx.transport.sin.sin_addr, 146 - ntohs(peer->srx.transport.sin.sin_port)); 147 - 148 - /* search the transport list */ 149 - read_lock_bh(&rxrpc_transport_lock); 150 - 151 - list_for_each_entry(trans, &rxrpc_transports, link) { 152 - if (trans->local == local && trans->peer == peer) 153 - goto found_extant_transport; 154 - } 155 - 156 - read_unlock_bh(&rxrpc_transport_lock); 157 - _leave(" = NULL"); 158 - return NULL; 159 - 160 - found_extant_transport: 161 - atomic_inc(&trans->usage); 162 - read_unlock_bh(&rxrpc_transport_lock); 163 - _leave(" = %p", trans); 164 - return trans; 165 - } 166 - 167 - /* 168 - * release a transport session 169 - */ 170 - void rxrpc_put_transport(struct rxrpc_transport *trans) 171 - { 172 - _enter("%p{u=%d}", trans, atomic_read(&trans->usage)); 173 - 174 - ASSERTCMP(atomic_read(&trans->usage), >, 0); 175 - 176 - trans->put_time = ktime_get_seconds(); 177 - if (unlikely(atomic_dec_and_test(&trans->usage))) { 178 - _debug("zombie"); 179 - /* let the reaper determine the timeout to avoid a race with 180 - * overextending the timeout if the reaper is running at the 181 - * same time */ 182 - rxrpc_queue_delayed_work(&rxrpc_transport_reap, 0); 183 - } 184 - _leave(""); 185 - } 186 - 187 - /* 188 - * clean up a transport session 189 - */ 190 - static void rxrpc_cleanup_transport(struct rxrpc_transport *trans) 191 - { 192 - _net("DESTROY TRANS %d", trans->debug_id); 193 - 194 - rxrpc_put_local(trans->local); 195 - rxrpc_put_peer(trans->peer); 196 - kfree(trans); 197 - } 198 - 199 - /* 200 - * reap dead transports that have passed their expiry date 201 - */ 202 - static void rxrpc_transport_reaper(struct work_struct *work) 203 - { 204 - struct rxrpc_transport *trans, *_p; 205 - unsigned long now, earliest, reap_time; 206 - 207 - LIST_HEAD(graveyard); 208 - 209 - _enter(""); 210 - 211 - now = ktime_get_seconds(); 212 - earliest = ULONG_MAX; 213 - 214 - /* extract all the transports that have been dead too long */ 215 - write_lock_bh(&rxrpc_transport_lock); 216 - list_for_each_entry_safe(trans, _p, &rxrpc_transports, link) { 217 - _debug("reap TRANS %d { u=%d t=%ld }", 218 - trans->debug_id, atomic_read(&trans->usage), 219 - (long) now - (long) trans->put_time); 220 - 221 - if (likely(atomic_read(&trans->usage) > 0)) 222 - continue; 223 - 224 - reap_time = trans->put_time + rxrpc_transport_expiry; 225 - if (reap_time <= now) 226 - list_move_tail(&trans->link, &graveyard); 227 - else if (reap_time < earliest) 228 - earliest = reap_time; 229 - } 230 - write_unlock_bh(&rxrpc_transport_lock); 231 - 232 - if (earliest != ULONG_MAX) { 233 - _debug("reschedule reaper %ld", (long) earliest - now); 234 - ASSERTCMP(earliest, >, now); 235 - rxrpc_queue_delayed_work(&rxrpc_transport_reap, 236 - (earliest - now) * HZ); 237 - } 238 - 239 - /* then destroy all those pulled out */ 240 - while (!list_empty(&graveyard)) { 241 - trans = list_entry(graveyard.next, struct rxrpc_transport, 242 - link); 243 - list_del_init(&trans->link); 244 - 245 - ASSERTCMP(atomic_read(&trans->usage), ==, 0); 246 - rxrpc_cleanup_transport(trans); 247 - } 248 - 249 - _leave(""); 250 - } 251 - 252 - /* 253 - * preemptively destroy all the transport session records rather than waiting 254 - * for them to time out 255 - */ 256 - void __exit rxrpc_destroy_all_transports(void) 257 - { 258 - _enter(""); 259 - 260 - rxrpc_transport_expiry = 0; 261 - cancel_delayed_work(&rxrpc_transport_reap); 262 - rxrpc_queue_delayed_work(&rxrpc_transport_reap, 0); 263 - 264 - _leave(""); 265 - }