Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

sctp: get netns from asoc and ep base

Commit 312434617cb1 ("sctp: cache netns in sctp_ep_common") set netns
in asoc and ep base since they're created, and it will never change.
It's a better way to get netns from asoc and ep base, comparing to
calling sock_net().

This patch is to replace them.

v1->v2:
- no change.

Suggested-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Signed-off-by: Xin Long <lucien.xin@gmail.com>
Acked-by: Neil Horman <nhorman@tuxdriver.com>
Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Xin Long and committed by
David S. Miller
4e7696d9 26c97a2d

+49 -62
+5 -5
net/sctp/associola.c
··· 584 584 const gfp_t gfp, 585 585 const int peer_state) 586 586 { 587 - struct net *net = sock_net(asoc->base.sk); 588 587 struct sctp_transport *peer; 589 588 struct sctp_sock *sp; 590 589 unsigned short port; ··· 613 614 return peer; 614 615 } 615 616 616 - peer = sctp_transport_new(net, addr, gfp); 617 + peer = sctp_transport_new(asoc->base.net, addr, gfp); 617 618 if (!peer) 618 619 return NULL; 619 620 ··· 973 974 struct sctp_association *asoc = 974 975 container_of(work, struct sctp_association, 975 976 base.inqueue.immediate); 976 - struct net *net = sock_net(asoc->base.sk); 977 + struct net *net = asoc->base.net; 977 978 union sctp_subtype subtype; 978 979 struct sctp_endpoint *ep; 979 980 struct sctp_chunk *chunk; ··· 1441 1442 /* Should we send a SACK to update our peer? */ 1442 1443 static inline bool sctp_peer_needs_update(struct sctp_association *asoc) 1443 1444 { 1444 - struct net *net = sock_net(asoc->base.sk); 1445 + struct net *net = asoc->base.net; 1446 + 1445 1447 switch (asoc->state) { 1446 1448 case SCTP_STATE_ESTABLISHED: 1447 1449 case SCTP_STATE_SHUTDOWN_PENDING: ··· 1576 1576 if (asoc->peer.ipv6_address) 1577 1577 flags |= SCTP_ADDR6_PEERSUPP; 1578 1578 1579 - return sctp_bind_addr_copy(sock_net(asoc->base.sk), 1579 + return sctp_bind_addr_copy(asoc->base.net, 1580 1580 &asoc->base.bind_addr, 1581 1581 &asoc->ep->base.bind_addr, 1582 1582 scope, gfp, flags);
+1 -1
net/sctp/chunk.c
··· 225 225 if (msg_len >= first_len) { 226 226 msg->can_delay = 0; 227 227 if (msg_len > first_len) 228 - SCTP_INC_STATS(sock_net(asoc->base.sk), 228 + SCTP_INC_STATS(asoc->base.net, 229 229 SCTP_MIB_FRAGUSRMSGS); 230 230 } else { 231 231 /* Which may be the only one... */
+3 -3
net/sctp/endpointola.c
··· 244 244 struct sctp_endpoint *retval = NULL; 245 245 246 246 if ((htons(ep->base.bind_addr.port) == laddr->v4.sin_port) && 247 - net_eq(sock_net(ep->base.sk), net)) { 247 + net_eq(ep->base.net, net)) { 248 248 if (sctp_bind_addr_match(&ep->base.bind_addr, laddr, 249 249 sctp_sk(ep->base.sk))) 250 250 retval = ep; ··· 292 292 const union sctp_addr *paddr) 293 293 { 294 294 struct sctp_sockaddr_entry *addr; 295 + struct net *net = ep->base.net; 295 296 struct sctp_bind_addr *bp; 296 - struct net *net = sock_net(ep->base.sk); 297 297 298 298 bp = &ep->base.bind_addr; 299 299 /* This function is called with the socket lock held, ··· 384 384 if (asoc && sctp_chunk_is_data(chunk)) 385 385 asoc->peer.last_data_from = chunk->transport; 386 386 else { 387 - SCTP_INC_STATS(sock_net(ep->base.sk), SCTP_MIB_INCTRLCHUNKS); 387 + SCTP_INC_STATS(ep->base.net, SCTP_MIB_INCTRLCHUNKS); 388 388 if (asoc) 389 389 asoc->stats.ictrlchunks++; 390 390 }
+2 -3
net/sctp/input.c
··· 937 937 if (t->asoc->temp) 938 938 return 0; 939 939 940 - arg.net = sock_net(t->asoc->base.sk); 940 + arg.net = t->asoc->base.net; 941 941 arg.paddr = &t->ipaddr; 942 942 arg.lport = htons(t->asoc->base.bind_addr.port); 943 943 ··· 1004 1004 const struct sctp_endpoint *ep, 1005 1005 const union sctp_addr *paddr) 1006 1006 { 1007 - struct net *net = sock_net(ep->base.sk); 1008 1007 struct rhlist_head *tmp, *list; 1009 1008 struct sctp_transport *t; 1010 1009 struct sctp_hash_cmp_arg arg = { 1011 1010 .paddr = paddr, 1012 - .net = net, 1011 + .net = ep->base.net, 1013 1012 .lport = htons(ep->base.bind_addr.port), 1014 1013 }; 1015 1014
+1 -1
net/sctp/output.c
··· 282 282 sctp_chunk_free(sack); 283 283 goto out; 284 284 } 285 - SCTP_INC_STATS(sock_net(asoc->base.sk), 285 + SCTP_INC_STATS(asoc->base.net, 286 286 SCTP_MIB_OUTCTRLCHUNKS); 287 287 asoc->stats.octrlchunks++; 288 288 asoc->peer.sack_needed = 0;
+3 -3
net/sctp/outqueue.c
··· 279 279 /* Put a new chunk in an sctp_outq. */ 280 280 void sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk, gfp_t gfp) 281 281 { 282 - struct net *net = sock_net(q->asoc->base.sk); 282 + struct net *net = q->asoc->base.net; 283 283 284 284 pr_debug("%s: outq:%p, chunk:%p[%s]\n", __func__, q, chunk, 285 285 chunk && chunk->chunk_hdr ? ··· 533 533 void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport, 534 534 enum sctp_retransmit_reason reason) 535 535 { 536 - struct net *net = sock_net(q->asoc->base.sk); 536 + struct net *net = q->asoc->base.net; 537 537 538 538 switch (reason) { 539 539 case SCTP_RTXR_T3_RTX: ··· 1884 1884 1885 1885 if (ftsn_chunk) { 1886 1886 list_add_tail(&ftsn_chunk->list, &q->control_chunk_list); 1887 - SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS); 1887 + SCTP_INC_STATS(asoc->base.net, SCTP_MIB_OUTCTRLCHUNKS); 1888 1888 } 1889 1889 }
+3 -4
net/sctp/sm_make_chunk.c
··· 2307 2307 const union sctp_addr *peer_addr, 2308 2308 struct sctp_init_chunk *peer_init, gfp_t gfp) 2309 2309 { 2310 - struct net *net = sock_net(asoc->base.sk); 2311 2310 struct sctp_transport *transport; 2312 2311 struct list_head *pos, *temp; 2313 2312 union sctp_params param; ··· 2362 2363 * also give us an option to silently ignore the packet, which 2363 2364 * is what we'll do here. 2364 2365 */ 2365 - if (!net->sctp.addip_noauth && 2366 - (asoc->peer.asconf_capable && !asoc->peer.auth_capable)) { 2366 + if (!asoc->base.net->sctp.addip_noauth && 2367 + (asoc->peer.asconf_capable && !asoc->peer.auth_capable)) { 2367 2368 asoc->peer.addip_disabled_mask |= (SCTP_PARAM_ADD_IP | 2368 2369 SCTP_PARAM_DEL_IP | 2369 2370 SCTP_PARAM_SET_PRIMARY); ··· 2490 2491 const union sctp_addr *peer_addr, 2491 2492 gfp_t gfp) 2492 2493 { 2493 - struct net *net = sock_net(asoc->base.sk); 2494 2494 struct sctp_endpoint *ep = asoc->ep; 2495 2495 union sctp_addr_param *addr_param; 2496 + struct net *net = asoc->base.net; 2496 2497 struct sctp_transport *t; 2497 2498 enum sctp_scope scope; 2498 2499 union sctp_addr addr;
+6 -10
net/sctp/sm_sideeffect.c
··· 516 516 struct sctp_transport *transport, 517 517 int is_hb) 518 518 { 519 - struct net *net = sock_net(asoc->base.sk); 520 - 521 519 /* The check for association's overall error counter exceeding the 522 520 * threshold is done in the state function. 523 521 */ ··· 542 544 * is SCTP_ACTIVE, then mark this transport as Partially Failed, 543 545 * see SCTP Quick Failover Draft, section 5.1 544 546 */ 545 - if (net->sctp.pf_enable && 546 - (transport->state == SCTP_ACTIVE) && 547 - (transport->error_count < transport->pathmaxrxt) && 548 - (transport->error_count > transport->pf_retrans)) { 547 + if (asoc->base.net->sctp.pf_enable && 548 + transport->state == SCTP_ACTIVE && 549 + transport->error_count < transport->pathmaxrxt && 550 + transport->error_count > transport->pf_retrans) { 549 551 550 552 sctp_assoc_control_transport(asoc, transport, 551 553 SCTP_TRANSPORT_PF, ··· 796 798 int err = 0; 797 799 798 800 if (sctp_outq_sack(&asoc->outqueue, chunk)) { 799 - struct net *net = sock_net(asoc->base.sk); 800 - 801 801 /* There are no more TSNs awaiting SACK. */ 802 - err = sctp_do_sm(net, SCTP_EVENT_T_OTHER, 802 + err = sctp_do_sm(asoc->base.net, SCTP_EVENT_T_OTHER, 803 803 SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN), 804 804 asoc->state, asoc->ep, asoc, NULL, 805 805 GFP_ATOMIC); ··· 830 834 struct sctp_association *asoc, 831 835 struct sctp_association *new) 832 836 { 833 - struct net *net = sock_net(asoc->base.sk); 837 + struct net *net = asoc->base.net; 834 838 struct sctp_chunk *abort; 835 839 836 840 if (!sctp_assoc_update(asoc, new))
+1 -1
net/sctp/sm_statefuns.c
··· 1320 1320 struct sctp_chunk *init, 1321 1321 struct sctp_cmd_seq *commands) 1322 1322 { 1323 - struct net *net = sock_net(new_asoc->base.sk); 1323 + struct net *net = new_asoc->base.net; 1324 1324 struct sctp_transport *new_addr; 1325 1325 int ret = 1; 1326 1326
+5 -7
net/sctp/socket.c
··· 436 436 static int sctp_send_asconf(struct sctp_association *asoc, 437 437 struct sctp_chunk *chunk) 438 438 { 439 - struct net *net = sock_net(asoc->base.sk); 440 - int retval = 0; 439 + int retval = 0; 441 440 442 441 /* If there is an outstanding ASCONF chunk, queue it for later 443 442 * transmission. ··· 448 449 449 450 /* Hold the chunk until an ASCONF_ACK is received. */ 450 451 sctp_chunk_hold(chunk); 451 - retval = sctp_primitive_ASCONF(net, asoc, chunk); 452 + retval = sctp_primitive_ASCONF(asoc->base.net, asoc, chunk); 452 453 if (retval) 453 454 sctp_chunk_free(chunk); 454 455 else ··· 2427 2428 int error; 2428 2429 2429 2430 if (params->spp_flags & SPP_HB_DEMAND && trans) { 2430 - struct net *net = sock_net(trans->asoc->base.sk); 2431 - 2432 - error = sctp_primitive_REQUESTHEARTBEAT(net, trans->asoc, trans); 2431 + error = sctp_primitive_REQUESTHEARTBEAT(trans->asoc->base.net, 2432 + trans->asoc, trans); 2433 2433 if (error) 2434 2434 return error; 2435 2435 } ··· 5362 5364 if (!sctp_transport_hold(t)) 5363 5365 continue; 5364 5366 5365 - if (net_eq(sock_net(t->asoc->base.sk), net) && 5367 + if (net_eq(t->asoc->base.net, net) && 5366 5368 t->asoc->peer.primary_path == t) 5367 5369 break; 5368 5370
+1 -2
net/sctp/stream.c
··· 218 218 static int sctp_send_reconf(struct sctp_association *asoc, 219 219 struct sctp_chunk *chunk) 220 220 { 221 - struct net *net = sock_net(asoc->base.sk); 222 221 int retval = 0; 223 222 224 - retval = sctp_primitive_RECONF(net, asoc, chunk); 223 + retval = sctp_primitive_RECONF(asoc->base.net, asoc, chunk); 225 224 if (retval) 226 225 sctp_chunk_free(chunk); 227 226
+10 -13
net/sctp/stream_interleave.c
··· 241 241 if (!first_frag) 242 242 return NULL; 243 243 244 - retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), 245 - &ulpq->reasm, first_frag, 246 - last_frag); 244 + retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm, 245 + first_frag, last_frag); 247 246 if (retval) { 248 247 sin->fsn = next_fsn; 249 248 if (is_last) { ··· 325 326 326 327 pd_point = sctp_sk(asoc->base.sk)->pd_point; 327 328 if (pd_point && pd_point <= pd_len) { 328 - retval = sctp_make_reassembled_event(sock_net(asoc->base.sk), 329 + retval = sctp_make_reassembled_event(asoc->base.net, 329 330 &ulpq->reasm, 330 331 pd_first, pd_last); 331 332 if (retval) { ··· 336 337 goto out; 337 338 338 339 found: 339 - retval = sctp_make_reassembled_event(sock_net(asoc->base.sk), 340 - &ulpq->reasm, 340 + retval = sctp_make_reassembled_event(asoc->base.net, &ulpq->reasm, 341 341 first_frag, pos); 342 342 if (retval) 343 343 retval->msg_flags |= MSG_EOR; ··· 628 630 if (!first_frag) 629 631 return NULL; 630 632 631 - retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), 633 + retval = sctp_make_reassembled_event(ulpq->asoc->base.net, 632 634 &ulpq->reasm_uo, first_frag, 633 635 last_frag); 634 636 if (retval) { ··· 714 716 715 717 pd_point = sctp_sk(asoc->base.sk)->pd_point; 716 718 if (pd_point && pd_point <= pd_len) { 717 - retval = sctp_make_reassembled_event(sock_net(asoc->base.sk), 719 + retval = sctp_make_reassembled_event(asoc->base.net, 718 720 &ulpq->reasm_uo, 719 721 pd_first, pd_last); 720 722 if (retval) { ··· 725 727 goto out; 726 728 727 729 found: 728 - retval = sctp_make_reassembled_event(sock_net(asoc->base.sk), 729 - &ulpq->reasm_uo, 730 + retval = sctp_make_reassembled_event(asoc->base.net, &ulpq->reasm_uo, 730 731 first_frag, pos); 731 732 if (retval) 732 733 retval->msg_flags |= MSG_EOR; ··· 811 814 return NULL; 812 815 813 816 out: 814 - retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), 817 + retval = sctp_make_reassembled_event(ulpq->asoc->base.net, 815 818 &ulpq->reasm_uo, first_frag, 816 819 last_frag); 817 820 if (retval) { ··· 918 921 return NULL; 919 922 920 923 out: 921 - retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), 924 + retval = sctp_make_reassembled_event(ulpq->asoc->base.net, 922 925 &ulpq->reasm, first_frag, 923 926 last_frag); 924 927 if (retval) { ··· 1156 1159 1157 1160 if (ftsn_chunk) { 1158 1161 list_add_tail(&ftsn_chunk->list, &q->control_chunk_list); 1159 - SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS); 1162 + SCTP_INC_STATS(asoc->base.net, SCTP_MIB_OUTCTRLCHUNKS); 1160 1163 } 1161 1164 } 1162 1165
+1 -1
net/sctp/transport.c
··· 334 334 pr_debug("%s: rto_pending not set on transport %p!\n", __func__, tp); 335 335 336 336 if (tp->rttvar || tp->srtt) { 337 - struct net *net = sock_net(tp->asoc->base.sk); 337 + struct net *net = tp->asoc->base.net; 338 338 /* 6.3.1 C3) When a new RTT measurement R' is made, set 339 339 * RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'| 340 340 * SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R'
+7 -8
net/sctp/ulpqueue.c
··· 486 486 cevent = sctp_skb2event(pd_first); 487 487 pd_point = sctp_sk(asoc->base.sk)->pd_point; 488 488 if (pd_point && pd_point <= pd_len) { 489 - retval = sctp_make_reassembled_event(sock_net(asoc->base.sk), 489 + retval = sctp_make_reassembled_event(asoc->base.net, 490 490 &ulpq->reasm, 491 - pd_first, 492 - pd_last); 491 + pd_first, pd_last); 493 492 if (retval) 494 493 sctp_ulpq_set_pd(ulpq); 495 494 } ··· 496 497 done: 497 498 return retval; 498 499 found: 499 - retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), 500 + retval = sctp_make_reassembled_event(ulpq->asoc->base.net, 500 501 &ulpq->reasm, first_frag, pos); 501 502 if (retval) 502 503 retval->msg_flags |= MSG_EOR; ··· 562 563 * further. 563 564 */ 564 565 done: 565 - retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), 566 - &ulpq->reasm, first_frag, last_frag); 566 + retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm, 567 + first_frag, last_frag); 567 568 if (retval && is_last) 568 569 retval->msg_flags |= MSG_EOR; 569 570 ··· 663 664 * further. 664 665 */ 665 666 done: 666 - retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk), 667 - &ulpq->reasm, first_frag, last_frag); 667 + retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm, 668 + first_frag, last_frag); 668 669 return retval; 669 670 } 670 671