Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

tipc: make tipc node table aware of net namespace

Global variables associated with node table are below:
- node table list (node_htable)
- node hash table list (tipc_node_list)
- node table lock (node_list_lock)
- node number counter (tipc_num_nodes)
- node link number counter (tipc_num_links)

To make node table support namespace, above global variables must be
moved to tipc_net structure in order to keep secret for different
namespaces. As a consequence, these variables are allocated and
initialized when namespace is created, and deallocated when namespace
is destroyed. After the change, functions associated with these
variables have to utilize a namespace pointer to access them. So
adding namespace pointer as a parameter of these functions is the
major change made in the commit.

Signed-off-by: Ying Xue <ying.xue@windriver.com>
Tested-by: Tero Aho <Tero.Aho@coriant.com>
Reviewed-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Ying Xue and committed by
David S. Miller
f2f9800d c93d3baa

+329 -244
+2
net/tipc/addr.h
··· 40 40 #define TIPC_ZONE_MASK 0xff000000u 41 41 #define TIPC_CLUSTER_MASK 0xfffff000u 42 42 43 + extern u32 tipc_own_addr __read_mostly; 44 + 43 45 static inline u32 tipc_zone_mask(u32 addr) 44 46 { 45 47 return addr & TIPC_ZONE_MASK;
+12 -12
net/tipc/bcast.c
··· 232 232 * 233 233 * Called with no locks taken 234 234 */ 235 - void tipc_bclink_wakeup_users(void) 235 + void tipc_bclink_wakeup_users(struct net *net) 236 236 { 237 237 struct sk_buff *skb; 238 238 239 239 while ((skb = skb_dequeue(&bclink->link.waiting_sks))) 240 - tipc_sk_rcv(skb); 241 - 240 + tipc_sk_rcv(net, skb); 242 241 } 243 242 244 243 /** ··· 384 385 * Delay any upcoming NACK by this node if another node has already 385 386 * requested the first message this node is going to ask for. 386 387 */ 387 - static void bclink_peek_nack(struct tipc_msg *msg) 388 + static void bclink_peek_nack(struct net *net, struct tipc_msg *msg) 388 389 { 389 - struct tipc_node *n_ptr = tipc_node_find(msg_destnode(msg)); 390 + struct tipc_node *n_ptr = tipc_node_find(net, msg_destnode(msg)); 390 391 391 392 if (unlikely(!n_ptr)) 392 393 return; ··· 403 404 404 405 /* tipc_bclink_xmit - broadcast buffer chain to all nodes in cluster 405 406 * and to identified node local sockets 407 + * @net: the applicable net namespace 406 408 * @list: chain of buffers containing message 407 409 * Consumes the buffer chain, except when returning -ELINKCONG 408 410 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE 409 411 */ 410 - int tipc_bclink_xmit(struct sk_buff_head *list) 412 + int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list) 411 413 { 412 414 int rc = 0; 413 415 int bc = 0; ··· 443 443 444 444 /* Deliver message clone */ 445 445 if (likely(!rc)) 446 - tipc_sk_mcast_rcv(skb); 446 + tipc_sk_mcast_rcv(net, skb); 447 447 else 448 448 kfree_skb(skb); 449 449 ··· 491 491 if (msg_mc_netid(msg) != tn->net_id) 492 492 goto exit; 493 493 494 - node = tipc_node_find(msg_prevnode(msg)); 494 + node = tipc_node_find(net, msg_prevnode(msg)); 495 495 if (unlikely(!node)) 496 496 goto exit; 497 497 ··· 514 514 tipc_bclink_unlock(); 515 515 } else { 516 516 tipc_node_unlock(node); 517 - bclink_peek_nack(msg); 517 + bclink_peek_nack(net, msg); 518 518 } 519 519 goto exit; 520 520 } ··· 532 532 tipc_bclink_unlock(); 533 533 tipc_node_unlock(node); 534 534 if (likely(msg_mcast(msg))) 535 - tipc_sk_mcast_rcv(buf); 535 + tipc_sk_mcast_rcv(net, buf); 536 536 else 537 537 kfree_skb(buf); 538 538 } else if (msg_user(msg) == MSG_BUNDLER) { ··· 542 542 bcl->stats.recv_bundled += msg_msgcnt(msg); 543 543 tipc_bclink_unlock(); 544 544 tipc_node_unlock(node); 545 - tipc_link_bundle_rcv(buf); 545 + tipc_link_bundle_rcv(net, buf); 546 546 } else if (msg_user(msg) == MSG_FRAGMENTER) { 547 547 tipc_buf_append(&node->bclink.reasm_buf, &buf); 548 548 if (unlikely(!buf && !node->bclink.reasm_buf)) ··· 563 563 bclink_accept_pkt(node, seqno); 564 564 tipc_bclink_unlock(); 565 565 tipc_node_unlock(node); 566 - tipc_named_rcv(buf); 566 + tipc_named_rcv(net, buf); 567 567 } else { 568 568 tipc_bclink_lock(); 569 569 bclink_accept_pkt(node, seqno);
+2 -2
net/tipc/bcast.h
··· 101 101 int tipc_bclink_set_queue_limits(u32 limit); 102 102 void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action); 103 103 uint tipc_bclink_get_mtu(void); 104 - int tipc_bclink_xmit(struct sk_buff_head *list); 105 - void tipc_bclink_wakeup_users(void); 104 + int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list); 105 + void tipc_bclink_wakeup_users(struct net *net); 106 106 int tipc_nl_add_bc_link(struct tipc_nl_msg *msg); 107 107 108 108 #endif
+14 -11
net/tipc/bearer.c
··· 69 69 70 70 struct tipc_bearer __rcu *bearer_list[MAX_BEARERS + 1]; 71 71 72 - static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down); 72 + static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr, 73 + bool shutting_down); 73 74 74 75 /** 75 76 * tipc_media_find - locates specified media object by name ··· 365 364 366 365 res = tipc_disc_create(net, b_ptr, &b_ptr->bcast_addr); 367 366 if (res) { 368 - bearer_disable(b_ptr, false); 367 + bearer_disable(net, b_ptr, false); 369 368 pr_warn("Bearer <%s> rejected, discovery object creation failed\n", 370 369 name); 371 370 return -EINVAL; ··· 385 384 static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b_ptr) 386 385 { 387 386 pr_info("Resetting bearer <%s>\n", b_ptr->name); 388 - tipc_link_reset_list(b_ptr->identity); 387 + tipc_link_reset_list(net, b_ptr->identity); 389 388 tipc_disc_reset(net, b_ptr); 390 389 return 0; 391 390 } ··· 395 394 * 396 395 * Note: This routine assumes caller holds RTNL lock. 397 396 */ 398 - static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down) 397 + static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr, 398 + bool shutting_down) 399 399 { 400 400 u32 i; 401 401 402 402 pr_info("Disabling bearer <%s>\n", b_ptr->name); 403 403 b_ptr->media->disable_media(b_ptr); 404 404 405 - tipc_link_delete_list(b_ptr->identity, shutting_down); 405 + tipc_link_delete_list(net, b_ptr->identity, shutting_down); 406 406 if (b_ptr->link_req) 407 407 tipc_disc_delete(b_ptr->link_req); 408 408 ··· 416 414 kfree_rcu(b_ptr, rcu); 417 415 } 418 416 419 - int tipc_disable_bearer(const char *name) 417 + int tipc_disable_bearer(struct net *net, const char *name) 420 418 { 421 419 struct tipc_bearer *b_ptr; 422 420 int res; ··· 426 424 pr_warn("Attempt to disable unknown bearer <%s>\n", name); 427 425 res = -EINVAL; 428 426 } else { 429 - bearer_disable(b_ptr, false); 427 + bearer_disable(net, b_ptr, false); 430 428 res = 0; 431 429 } 432 430 return res; ··· 595 593 break; 596 594 case NETDEV_UNREGISTER: 597 595 case NETDEV_CHANGENAME: 598 - bearer_disable(b_ptr, false); 596 + bearer_disable(dev_net(dev), b_ptr, false); 599 597 break; 600 598 } 601 599 return NOTIFY_OK; ··· 628 626 dev_remove_pack(&tipc_packet_type); 629 627 } 630 628 631 - void tipc_bearer_stop(void) 629 + void tipc_bearer_stop(struct net *net) 632 630 { 633 631 struct tipc_bearer *b_ptr; 634 632 u32 i; ··· 636 634 for (i = 0; i < MAX_BEARERS; i++) { 637 635 b_ptr = rtnl_dereference(bearer_list[i]); 638 636 if (b_ptr) { 639 - bearer_disable(b_ptr, true); 637 + bearer_disable(net, b_ptr, true); 640 638 bearer_list[i] = NULL; 641 639 } 642 640 } ··· 774 772 char *name; 775 773 struct tipc_bearer *bearer; 776 774 struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1]; 775 + struct net *net = genl_info_net(info); 777 776 778 777 if (!info->attrs[TIPC_NLA_BEARER]) 779 778 return -EINVAL; ··· 797 794 return -EINVAL; 798 795 } 799 796 800 - bearer_disable(bearer, false); 797 + bearer_disable(net, bearer, false); 801 798 rtnl_unlock(); 802 799 803 800 return 0;
+2 -2
net/tipc/bearer.h
··· 168 168 void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr); 169 169 int tipc_enable_bearer(struct net *net, const char *bearer_name, 170 170 u32 disc_domain, u32 priority); 171 - int tipc_disable_bearer(const char *name); 171 + int tipc_disable_bearer(struct net *net, const char *name); 172 172 173 173 /* 174 174 * Routines made available to TIPC by supported media types ··· 205 205 struct tipc_media *tipc_media_find(const char *name); 206 206 int tipc_bearer_setup(void); 207 207 void tipc_bearer_cleanup(void); 208 - void tipc_bearer_stop(void); 208 + void tipc_bearer_stop(struct net *net); 209 209 void tipc_bearer_send(u32 bearer_id, struct sk_buff *buf, 210 210 struct tipc_media_addr *dest); 211 211
+13 -8
net/tipc/config.c
··· 150 150 return tipc_cfg_reply_none(); 151 151 } 152 152 153 - static struct sk_buff *cfg_disable_bearer(void) 153 + static struct sk_buff *cfg_disable_bearer(struct net *net) 154 154 { 155 155 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_BEARER_NAME)) 156 156 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 157 157 158 - if (tipc_disable_bearer((char *)TLV_DATA(req_tlv_area))) 158 + if (tipc_disable_bearer(net, (char *)TLV_DATA(req_tlv_area))) 159 159 return tipc_cfg_reply_error_string("unable to disable bearer"); 160 160 161 161 return tipc_cfg_reply_none(); ··· 232 232 rep_tlv_buf = tipc_cfg_reply_none(); 233 233 break; 234 234 case TIPC_CMD_GET_NODES: 235 - rep_tlv_buf = tipc_node_get_nodes(req_tlv_area, req_tlv_space); 235 + rep_tlv_buf = tipc_node_get_nodes(net, req_tlv_area, 236 + req_tlv_space); 236 237 break; 237 238 case TIPC_CMD_GET_LINKS: 238 - rep_tlv_buf = tipc_node_get_links(req_tlv_area, req_tlv_space); 239 + rep_tlv_buf = tipc_node_get_links(net, req_tlv_area, 240 + req_tlv_space); 239 241 break; 240 242 case TIPC_CMD_SHOW_LINK_STATS: 241 - rep_tlv_buf = tipc_link_cmd_show_stats(req_tlv_area, req_tlv_space); 243 + rep_tlv_buf = tipc_link_cmd_show_stats(net, req_tlv_area, 244 + req_tlv_space); 242 245 break; 243 246 case TIPC_CMD_RESET_LINK_STATS: 244 - rep_tlv_buf = tipc_link_cmd_reset_stats(req_tlv_area, req_tlv_space); 247 + rep_tlv_buf = tipc_link_cmd_reset_stats(net, req_tlv_area, 248 + req_tlv_space); 245 249 break; 246 250 case TIPC_CMD_SHOW_NAME_TABLE: 247 251 rep_tlv_buf = tipc_nametbl_get(req_tlv_area, req_tlv_space); ··· 265 261 case TIPC_CMD_SET_LINK_TOL: 266 262 case TIPC_CMD_SET_LINK_PRI: 267 263 case TIPC_CMD_SET_LINK_WINDOW: 268 - rep_tlv_buf = tipc_link_cmd_config(req_tlv_area, req_tlv_space, cmd); 264 + rep_tlv_buf = tipc_link_cmd_config(net, req_tlv_area, 265 + req_tlv_space, cmd); 269 266 break; 270 267 case TIPC_CMD_ENABLE_BEARER: 271 268 rep_tlv_buf = cfg_enable_bearer(net); 272 269 break; 273 270 case TIPC_CMD_DISABLE_BEARER: 274 - rep_tlv_buf = cfg_disable_bearer(); 271 + rep_tlv_buf = cfg_disable_bearer(net); 275 272 break; 276 273 case TIPC_CMD_SET_NODE_ADDR: 277 274 rep_tlv_buf = cfg_set_own_addr(net);
+3 -1
net/tipc/core.c
··· 57 57 struct tipc_net *tn = net_generic(net, tipc_net_id); 58 58 59 59 tn->net_id = 4711; 60 + INIT_LIST_HEAD(&tn->node_list); 61 + spin_lock_init(&tn->node_list_lock); 60 62 61 63 return 0; 62 64 } 63 65 64 66 static void __net_exit tipc_exit_net(struct net *net) 65 67 { 68 + tipc_net_stop(net); 66 69 } 67 70 68 71 static struct pernet_operations tipc_net_ops = { ··· 147 144 static void __exit tipc_exit(void) 148 145 { 149 146 unregister_pernet_subsys(&tipc_net_ops); 150 - tipc_net_stop(); 151 147 tipc_bearer_cleanup(); 152 148 tipc_netlink_stop(); 153 149 tipc_subscr_stop();
+9
net/tipc/core.h
··· 59 59 #include <linux/etherdevice.h> 60 60 #include <net/netns/generic.h> 61 61 62 + #include "node.h" 63 + 62 64 #define TIPC_MOD_VER "2.0.0" 63 65 64 66 int tipc_snprintf(char *buf, int len, const char *fmt, ...); ··· 80 78 81 79 struct tipc_net { 82 80 int net_id; 81 + 82 + /* Node table and node list */ 83 + spinlock_t node_list_lock; 84 + struct hlist_head node_htable[NODE_HTABLE_SIZE]; 85 + struct list_head node_list; 86 + u32 num_nodes; 87 + u32 num_links; 83 88 }; 84 89 85 90 #ifdef CONFIG_SYSCTL
+2 -2
net/tipc/discover.c
··· 162 162 return; 163 163 164 164 /* Locate, or if necessary, create, node: */ 165 - node = tipc_node_find(onode); 165 + node = tipc_node_find(net, onode); 166 166 if (!node) 167 - node = tipc_node_create(onode); 167 + node = tipc_node_create(net, onode); 168 168 if (!node) 169 169 return; 170 170
+66 -41
net/tipc/link.c
··· 114 114 static void link_print(struct tipc_link *l_ptr, const char *str); 115 115 static void tipc_link_sync_xmit(struct tipc_link *l); 116 116 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf); 117 - static int tipc_link_input(struct tipc_link *l, struct sk_buff *buf); 117 + static int tipc_link_input(struct net *net, struct tipc_link *l, 118 + struct sk_buff *buf); 118 119 static int tipc_link_prepare_input(struct net *net, struct tipc_link *l, 119 120 struct sk_buff **buf); 120 121 ··· 311 310 return l_ptr; 312 311 } 313 312 314 - void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down) 313 + void tipc_link_delete_list(struct net *net, unsigned int bearer_id, 314 + bool shutting_down) 315 315 { 316 + struct tipc_net *tn = net_generic(net, tipc_net_id); 316 317 struct tipc_link *l_ptr; 317 318 struct tipc_node *n_ptr; 318 319 319 320 rcu_read_lock(); 320 - list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) { 321 + list_for_each_entry_rcu(n_ptr, &tn->node_list, list) { 321 322 tipc_node_lock(n_ptr); 322 323 l_ptr = n_ptr->links[bearer_id]; 323 324 if (l_ptr) { ··· 454 451 link_reset_statistics(l_ptr); 455 452 } 456 453 457 - void tipc_link_reset_list(unsigned int bearer_id) 454 + void tipc_link_reset_list(struct net *net, unsigned int bearer_id) 458 455 { 456 + struct tipc_net *tn = net_generic(net, tipc_net_id); 459 457 struct tipc_link *l_ptr; 460 458 struct tipc_node *n_ptr; 461 459 462 460 rcu_read_lock(); 463 - list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) { 461 + list_for_each_entry_rcu(n_ptr, &tn->node_list, list) { 464 462 tipc_node_lock(n_ptr); 465 463 l_ptr = n_ptr->links[bearer_id]; 466 464 if (l_ptr) ··· 777 773 return __tipc_link_xmit(link, &head); 778 774 } 779 775 780 - int tipc_link_xmit_skb(struct sk_buff *skb, u32 dnode, u32 selector) 776 + int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode, 777 + u32 selector) 781 778 { 782 779 struct sk_buff_head head; 783 780 784 781 skb2list(skb, &head); 785 - return tipc_link_xmit(&head, dnode, selector); 782 + return tipc_link_xmit(net, &head, dnode, selector); 786 783 } 787 784 788 785 /** 789 786 * tipc_link_xmit() is the general link level function for message sending 787 + * @net: the applicable net namespace 790 788 * @list: chain of buffers containing message 791 789 * @dsz: amount of user data to be sent 792 790 * @dnode: address of destination node ··· 796 790 * Consumes the buffer chain, except when returning -ELINKCONG 797 791 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE 798 792 */ 799 - int tipc_link_xmit(struct sk_buff_head *list, u32 dnode, u32 selector) 793 + int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dnode, 794 + u32 selector) 800 795 { 801 796 struct tipc_link *link = NULL; 802 797 struct tipc_node *node; 803 798 int rc = -EHOSTUNREACH; 804 799 805 - node = tipc_node_find(dnode); 800 + node = tipc_node_find(net, dnode); 806 801 if (node) { 807 802 tipc_node_lock(node); 808 803 link = node->active_links[selector & 1]; ··· 820 813 * buffer, we just need to dequeue one SKB buffer from the 821 814 * head list. 822 815 */ 823 - return tipc_sk_rcv(__skb_dequeue(list)); 816 + return tipc_sk_rcv(net, __skb_dequeue(list)); 824 817 } 825 818 __skb_queue_purge(list); 826 819 ··· 1073 1066 1074 1067 /** 1075 1068 * tipc_rcv - process TIPC packets/messages arriving from off-node 1076 - * @net: net namespace handler 1069 + * @net: the applicable net namespace 1077 1070 * @skb: TIPC packet 1078 1071 * @b_ptr: pointer to bearer message arrived on 1079 1072 * ··· 1119 1112 goto discard; 1120 1113 1121 1114 /* Locate neighboring node that sent message */ 1122 - n_ptr = tipc_node_find(msg_prevnode(msg)); 1115 + n_ptr = tipc_node_find(net, msg_prevnode(msg)); 1123 1116 if (unlikely(!n_ptr)) 1124 1117 goto discard; 1125 1118 tipc_node_lock(n_ptr); ··· 1210 1203 } 1211 1204 tipc_node_unlock(n_ptr); 1212 1205 1213 - if (tipc_link_input(l_ptr, skb) != 0) 1206 + if (tipc_link_input(net, l_ptr, skb) != 0) 1214 1207 goto discard; 1215 1208 continue; 1216 1209 unlock_discard: ··· 1270 1263 /** 1271 1264 * tipc_link_input - Deliver message too higher layers 1272 1265 */ 1273 - static int tipc_link_input(struct tipc_link *l, struct sk_buff *buf) 1266 + static int tipc_link_input(struct net *net, struct tipc_link *l, 1267 + struct sk_buff *buf) 1274 1268 { 1275 1269 struct tipc_msg *msg = buf_msg(buf); 1276 1270 int res = 0; ··· 1282 1274 case TIPC_HIGH_IMPORTANCE: 1283 1275 case TIPC_CRITICAL_IMPORTANCE: 1284 1276 case CONN_MANAGER: 1285 - tipc_sk_rcv(buf); 1277 + tipc_sk_rcv(net, buf); 1286 1278 break; 1287 1279 case NAME_DISTRIBUTOR: 1288 - tipc_named_rcv(buf); 1280 + tipc_named_rcv(net, buf); 1289 1281 break; 1290 1282 case MSG_BUNDLER: 1291 - tipc_link_bundle_rcv(buf); 1283 + tipc_link_bundle_rcv(net, buf); 1292 1284 break; 1293 1285 default: 1294 1286 res = -EINVAL; ··· 1863 1855 /* 1864 1856 * Bundler functionality: 1865 1857 */ 1866 - void tipc_link_bundle_rcv(struct sk_buff *buf) 1858 + void tipc_link_bundle_rcv(struct net *net, struct sk_buff *buf) 1867 1859 { 1868 1860 u32 msgcount = msg_msgcnt(buf_msg(buf)); 1869 1861 u32 pos = INT_H_SIZE; ··· 1880 1872 pos += align(msg_size(omsg)); 1881 1873 if (msg_isdata(omsg)) { 1882 1874 if (unlikely(msg_type(omsg) == TIPC_MCAST_MSG)) 1883 - tipc_sk_mcast_rcv(obuf); 1875 + tipc_sk_mcast_rcv(net, obuf); 1884 1876 else 1885 - tipc_sk_rcv(obuf); 1877 + tipc_sk_rcv(net, obuf); 1886 1878 } else if (msg_user(omsg) == CONN_MANAGER) { 1887 - tipc_sk_rcv(obuf); 1879 + tipc_sk_rcv(net, obuf); 1888 1880 } else if (msg_user(omsg) == NAME_DISTRIBUTOR) { 1889 - tipc_named_rcv(obuf); 1881 + tipc_named_rcv(net, obuf); 1890 1882 } else { 1891 1883 pr_warn("Illegal bundled msg: %u\n", msg_user(omsg)); 1892 1884 kfree_skb(obuf); ··· 1927 1919 } 1928 1920 1929 1921 /* tipc_link_find_owner - locate owner node of link by link's name 1922 + * @net: the applicable net namespace 1930 1923 * @name: pointer to link name string 1931 1924 * @bearer_id: pointer to index in 'node->links' array where the link was found. 1932 1925 * 1933 1926 * Returns pointer to node owning the link, or 0 if no matching link is found. 1934 1927 */ 1935 - static struct tipc_node *tipc_link_find_owner(const char *link_name, 1928 + static struct tipc_node *tipc_link_find_owner(struct net *net, 1929 + const char *link_name, 1936 1930 unsigned int *bearer_id) 1937 1931 { 1932 + struct tipc_net *tn = net_generic(net, tipc_net_id); 1938 1933 struct tipc_link *l_ptr; 1939 1934 struct tipc_node *n_ptr; 1940 1935 struct tipc_node *found_node = NULL; ··· 1945 1934 1946 1935 *bearer_id = 0; 1947 1936 rcu_read_lock(); 1948 - list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) { 1937 + list_for_each_entry_rcu(n_ptr, &tn->node_list, list) { 1949 1938 tipc_node_lock(n_ptr); 1950 1939 for (i = 0; i < MAX_BEARERS; i++) { 1951 1940 l_ptr = n_ptr->links[i]; ··· 1989 1978 1990 1979 /** 1991 1980 * link_cmd_set_value - change priority/tolerance/window for link/bearer/media 1981 + * @net: the applicable net namespace 1992 1982 * @name: ptr to link, bearer, or media name 1993 1983 * @new_value: new value of link, bearer, or media setting 1994 1984 * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*) ··· 1998 1986 * 1999 1987 * Returns 0 if value updated and negative value on error. 2000 1988 */ 2001 - static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd) 1989 + static int link_cmd_set_value(struct net *net, const char *name, u32 new_value, 1990 + u16 cmd) 2002 1991 { 2003 1992 struct tipc_node *node; 2004 1993 struct tipc_link *l_ptr; ··· 2008 1995 int bearer_id; 2009 1996 int res = 0; 2010 1997 2011 - node = tipc_link_find_owner(name, &bearer_id); 1998 + node = tipc_link_find_owner(net, name, &bearer_id); 2012 1999 if (node) { 2013 2000 tipc_node_lock(node); 2014 2001 l_ptr = node->links[bearer_id]; ··· 2076 2063 return res; 2077 2064 } 2078 2065 2079 - struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space, 2080 - u16 cmd) 2066 + struct sk_buff *tipc_link_cmd_config(struct net *net, const void *req_tlv_area, 2067 + int req_tlv_space, u16 cmd) 2081 2068 { 2082 2069 struct tipc_link_config *args; 2083 2070 u32 new_value; ··· 2101 2088 " (cannot change setting on broadcast link)"); 2102 2089 } 2103 2090 2104 - res = link_cmd_set_value(args->name, new_value, cmd); 2091 + res = link_cmd_set_value(net, args->name, new_value, cmd); 2105 2092 if (res) 2106 2093 return tipc_cfg_reply_error_string("cannot change link setting"); 2107 2094 ··· 2119 2106 l_ptr->stats.recv_info = l_ptr->next_in_no; 2120 2107 } 2121 2108 2122 - struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space) 2109 + struct sk_buff *tipc_link_cmd_reset_stats(struct net *net, 2110 + const void *req_tlv_area, 2111 + int req_tlv_space) 2123 2112 { 2124 2113 char *link_name; 2125 2114 struct tipc_link *l_ptr; ··· 2137 2122 return tipc_cfg_reply_error_string("link not found"); 2138 2123 return tipc_cfg_reply_none(); 2139 2124 } 2140 - node = tipc_link_find_owner(link_name, &bearer_id); 2125 + node = tipc_link_find_owner(net, link_name, &bearer_id); 2141 2126 if (!node) 2142 2127 return tipc_cfg_reply_error_string("link not found"); 2143 2128 ··· 2162 2147 2163 2148 /** 2164 2149 * tipc_link_stats - print link statistics 2150 + * @net: the applicable net namespace 2165 2151 * @name: link name 2166 2152 * @buf: print buffer area 2167 2153 * @buf_size: size of print buffer area 2168 2154 * 2169 2155 * Returns length of print buffer data string (or 0 if error) 2170 2156 */ 2171 - static int tipc_link_stats(const char *name, char *buf, const u32 buf_size) 2157 + static int tipc_link_stats(struct net *net, const char *name, char *buf, 2158 + const u32 buf_size) 2172 2159 { 2173 2160 struct tipc_link *l; 2174 2161 struct tipc_stats *s; ··· 2183 2166 if (!strcmp(name, tipc_bclink_name)) 2184 2167 return tipc_bclink_stats(buf, buf_size); 2185 2168 2186 - node = tipc_link_find_owner(name, &bearer_id); 2169 + node = tipc_link_find_owner(net, name, &bearer_id); 2187 2170 if (!node) 2188 2171 return 0; 2189 2172 ··· 2260 2243 return ret; 2261 2244 } 2262 2245 2263 - struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space) 2246 + struct sk_buff *tipc_link_cmd_show_stats(struct net *net, 2247 + const void *req_tlv_area, 2248 + int req_tlv_space) 2264 2249 { 2265 2250 struct sk_buff *buf; 2266 2251 struct tlv_desc *rep_tlv; ··· 2280 2261 rep_tlv = (struct tlv_desc *)buf->data; 2281 2262 pb = TLV_DATA(rep_tlv); 2282 2263 pb_len = ULTRA_STRING_MAX_LEN; 2283 - str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area), 2264 + str_len = tipc_link_stats(net, (char *)TLV_DATA(req_tlv_area), 2284 2265 pb, pb_len); 2285 2266 if (!str_len) { 2286 2267 kfree_skb(buf); ··· 2362 2343 struct tipc_link *link; 2363 2344 struct tipc_node *node; 2364 2345 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 2346 + struct net *net = genl_info_net(info); 2365 2347 2366 2348 if (!info->attrs[TIPC_NLA_LINK]) 2367 2349 return -EINVAL; ··· 2378 2358 2379 2359 name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 2380 2360 2381 - node = tipc_link_find_owner(name, &bearer_id); 2361 + node = tipc_link_find_owner(net, name, &bearer_id); 2382 2362 if (!node) 2383 2363 return -EINVAL; 2384 2364 ··· 2587 2567 2588 2568 int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb) 2589 2569 { 2570 + struct net *net = sock_net(skb->sk); 2571 + struct tipc_net *tn = net_generic(net, tipc_net_id); 2590 2572 struct tipc_node *node; 2591 2573 struct tipc_nl_msg msg; 2592 2574 u32 prev_node = cb->args[0]; ··· 2606 2584 rcu_read_lock(); 2607 2585 2608 2586 if (prev_node) { 2609 - node = tipc_node_find(prev_node); 2587 + node = tipc_node_find(net, prev_node); 2610 2588 if (!node) { 2611 2589 /* We never set seq or call nl_dump_check_consistent() 2612 2590 * this means that setting prev_seq here will cause the ··· 2618 2596 goto out; 2619 2597 } 2620 2598 2621 - list_for_each_entry_continue_rcu(node, &tipc_node_list, list) { 2599 + list_for_each_entry_continue_rcu(node, &tn->node_list, 2600 + list) { 2622 2601 tipc_node_lock(node); 2623 2602 err = __tipc_nl_add_node_links(&msg, node, &prev_link); 2624 2603 tipc_node_unlock(node); ··· 2633 2610 if (err) 2634 2611 goto out; 2635 2612 2636 - list_for_each_entry_rcu(node, &tipc_node_list, list) { 2613 + list_for_each_entry_rcu(node, &tn->node_list, list) { 2637 2614 tipc_node_lock(node); 2638 2615 err = __tipc_nl_add_node_links(&msg, node, &prev_link); 2639 2616 tipc_node_unlock(node); ··· 2656 2633 2657 2634 int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info) 2658 2635 { 2636 + struct net *net = genl_info_net(info); 2659 2637 struct sk_buff *ans_skb; 2660 2638 struct tipc_nl_msg msg; 2661 2639 struct tipc_link *link; ··· 2669 2645 return -EINVAL; 2670 2646 2671 2647 name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]); 2672 - node = tipc_link_find_owner(name, &bearer_id); 2648 + node = tipc_link_find_owner(net, name, &bearer_id); 2673 2649 if (!node) 2674 2650 return -EINVAL; 2675 2651 ··· 2711 2687 struct tipc_link *link; 2712 2688 struct tipc_node *node; 2713 2689 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 2690 + struct net *net = genl_info_net(info); 2714 2691 2715 2692 if (!info->attrs[TIPC_NLA_LINK]) 2716 2693 return -EINVAL; ··· 2734 2709 return 0; 2735 2710 } 2736 2711 2737 - node = tipc_link_find_owner(link_name, &bearer_id); 2712 + node = tipc_link_find_owner(net, link_name, &bearer_id); 2738 2713 if (!node) 2739 2714 return -EINVAL; 2740 2715
+14 -10
net/tipc/link.h
··· 200 200 struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, 201 201 struct tipc_bearer *b_ptr, 202 202 const struct tipc_media_addr *media_addr); 203 - void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down); 203 + void tipc_link_delete_list(struct net *net, unsigned int bearer_id, 204 + bool shutting_down); 204 205 void tipc_link_failover_send_queue(struct tipc_link *l_ptr); 205 206 void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr, struct tipc_link *dest); 206 207 void tipc_link_reset_fragments(struct tipc_link *l_ptr); 207 208 int tipc_link_is_up(struct tipc_link *l_ptr); 208 209 int tipc_link_is_active(struct tipc_link *l_ptr); 209 210 void tipc_link_purge_queues(struct tipc_link *l_ptr); 210 - struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, 211 - int req_tlv_space, 212 - u16 cmd); 213 - struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, 211 + struct sk_buff *tipc_link_cmd_config(struct net *net, const void *req_tlv_area, 212 + int req_tlv_space, u16 cmd); 213 + struct sk_buff *tipc_link_cmd_show_stats(struct net *net, 214 + const void *req_tlv_area, 214 215 int req_tlv_space); 215 - struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, 216 + struct sk_buff *tipc_link_cmd_reset_stats(struct net *net, 217 + const void *req_tlv_area, 216 218 int req_tlv_space); 217 219 void tipc_link_reset_all(struct tipc_node *node); 218 220 void tipc_link_reset(struct tipc_link *l_ptr); 219 - void tipc_link_reset_list(unsigned int bearer_id); 220 - int tipc_link_xmit_skb(struct sk_buff *skb, u32 dest, u32 selector); 221 - int tipc_link_xmit(struct sk_buff_head *list, u32 dest, u32 selector); 221 + void tipc_link_reset_list(struct net *net, unsigned int bearer_id); 222 + int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest, 223 + u32 selector); 224 + int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dest, 225 + u32 selector); 222 226 int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list); 223 - void tipc_link_bundle_rcv(struct sk_buff *buf); 227 + void tipc_link_bundle_rcv(struct net *net, struct sk_buff *buf); 224 228 void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob, 225 229 u32 gap, u32 tolerance, u32 priority, u32 acked_mtu); 226 230 void tipc_link_push_packets(struct tipc_link *l_ptr);
+31 -26
net/tipc/name_distr.c
··· 81 81 return buf; 82 82 } 83 83 84 - void named_cluster_distribute(struct sk_buff *skb) 84 + void named_cluster_distribute(struct net *net, struct sk_buff *skb) 85 85 { 86 + struct tipc_net *tn = net_generic(net, tipc_net_id); 86 87 struct sk_buff *oskb; 87 88 struct tipc_node *node; 88 89 u32 dnode; 89 90 90 91 rcu_read_lock(); 91 - list_for_each_entry_rcu(node, &tipc_node_list, list) { 92 + list_for_each_entry_rcu(node, &tn->node_list, list) { 92 93 dnode = node->addr; 93 94 if (in_own_node(dnode)) 94 95 continue; ··· 99 98 if (!oskb) 100 99 break; 101 100 msg_set_destnode(buf_msg(oskb), dnode); 102 - tipc_link_xmit_skb(oskb, dnode, dnode); 101 + tipc_link_xmit_skb(net, oskb, dnode, dnode); 103 102 } 104 103 rcu_read_unlock(); 105 104 ··· 161 160 * @dnode: node to be updated 162 161 * @pls: linked list of publication items to be packed into buffer chain 163 162 */ 164 - static void named_distribute(struct sk_buff_head *list, u32 dnode, 165 - struct list_head *pls) 163 + static void named_distribute(struct net *net, struct sk_buff_head *list, 164 + u32 dnode, struct list_head *pls) 166 165 { 167 166 struct publication *publ; 168 167 struct sk_buff *skb = NULL; 169 168 struct distr_item *item = NULL; 170 - uint msg_dsz = (tipc_node_get_mtu(dnode, 0) / ITEM_SIZE) * ITEM_SIZE; 169 + uint msg_dsz = (tipc_node_get_mtu(net, dnode, 0) / ITEM_SIZE) * 170 + ITEM_SIZE; 171 171 uint msg_rem = msg_dsz; 172 172 173 173 list_for_each_entry(publ, pls, local_list) { ··· 204 202 /** 205 203 * tipc_named_node_up - tell specified node about all publications by this node 206 204 */ 207 - void tipc_named_node_up(u32 dnode) 205 + void tipc_named_node_up(struct net *net, u32 dnode) 208 206 { 209 207 struct sk_buff_head head; 210 208 211 209 __skb_queue_head_init(&head); 212 210 213 211 rcu_read_lock(); 214 - named_distribute(&head, dnode, 212 + named_distribute(net, &head, dnode, 215 213 &tipc_nametbl->publ_list[TIPC_CLUSTER_SCOPE]); 216 - named_distribute(&head, dnode, 214 + named_distribute(net, &head, dnode, 217 215 &tipc_nametbl->publ_list[TIPC_ZONE_SCOPE]); 218 216 rcu_read_unlock(); 219 217 220 - tipc_link_xmit(&head, dnode, dnode); 218 + tipc_link_xmit(net, &head, dnode, dnode); 221 219 } 222 220 223 - static void tipc_publ_subscribe(struct publication *publ, u32 addr) 221 + static void tipc_publ_subscribe(struct net *net, struct publication *publ, 222 + u32 addr) 224 223 { 225 224 struct tipc_node *node; 226 225 227 226 if (in_own_node(addr)) 228 227 return; 229 228 230 - node = tipc_node_find(addr); 229 + node = tipc_node_find(net, addr); 231 230 if (!node) { 232 231 pr_warn("Node subscription rejected, unknown node 0x%x\n", 233 232 addr); ··· 240 237 tipc_node_unlock(node); 241 238 } 242 239 243 - static void tipc_publ_unsubscribe(struct publication *publ, u32 addr) 240 + static void tipc_publ_unsubscribe(struct net *net, struct publication *publ, 241 + u32 addr) 244 242 { 245 243 struct tipc_node *node; 246 244 247 - node = tipc_node_find(addr); 245 + node = tipc_node_find(net, addr); 248 246 if (!node) 249 247 return; 250 248 ··· 260 256 * Invoked for each publication issued by a newly failed node. 261 257 * Removes publication structure from name table & deletes it. 262 258 */ 263 - static void tipc_publ_purge(struct publication *publ, u32 addr) 259 + static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr) 264 260 { 265 261 struct publication *p; 266 262 ··· 268 264 p = tipc_nametbl_remove_publ(publ->type, publ->lower, 269 265 publ->node, publ->ref, publ->key); 270 266 if (p) 271 - tipc_publ_unsubscribe(p, addr); 267 + tipc_publ_unsubscribe(net, p, addr); 272 268 spin_unlock_bh(&tipc_nametbl_lock); 273 269 274 270 if (p != publ) { ··· 281 277 kfree_rcu(p, rcu); 282 278 } 283 279 284 - void tipc_publ_notify(struct list_head *nsub_list, u32 addr) 280 + void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr) 285 281 { 286 282 struct publication *publ, *tmp; 287 283 288 284 list_for_each_entry_safe(publ, tmp, nsub_list, nodesub_list) 289 - tipc_publ_purge(publ, addr); 285 + tipc_publ_purge(net, publ, addr); 290 286 } 291 287 292 288 /** ··· 296 292 * tipc_nametbl_lock must be held. 297 293 * Returns the publication item if successful, otherwise NULL. 298 294 */ 299 - static bool tipc_update_nametbl(struct distr_item *i, u32 node, u32 dtype) 295 + static bool tipc_update_nametbl(struct net *net, struct distr_item *i, 296 + u32 node, u32 dtype) 300 297 { 301 298 struct publication *publ = NULL; 302 299 ··· 307 302 TIPC_CLUSTER_SCOPE, node, 308 303 ntohl(i->ref), ntohl(i->key)); 309 304 if (publ) { 310 - tipc_publ_subscribe(publ, node); 305 + tipc_publ_subscribe(net, publ, node); 311 306 return true; 312 307 } 313 308 } else if (dtype == WITHDRAWAL) { ··· 315 310 node, ntohl(i->ref), 316 311 ntohl(i->key)); 317 312 if (publ) { 318 - tipc_publ_unsubscribe(publ, node); 313 + tipc_publ_unsubscribe(net, publ, node); 319 314 kfree_rcu(publ, rcu); 320 315 return true; 321 316 } ··· 348 343 * tipc_named_process_backlog - try to process any pending name table updates 349 344 * from the network. 350 345 */ 351 - void tipc_named_process_backlog(void) 346 + void tipc_named_process_backlog(struct net *net) 352 347 { 353 348 struct distr_queue_item *e, *tmp; 354 349 char addr[16]; ··· 356 351 357 352 list_for_each_entry_safe(e, tmp, &tipc_dist_queue, next) { 358 353 if (time_after(e->expires, now)) { 359 - if (!tipc_update_nametbl(&e->i, e->node, e->dtype)) 354 + if (!tipc_update_nametbl(net, &e->i, e->node, e->dtype)) 360 355 continue; 361 356 } else { 362 357 tipc_addr_string_fill(addr, e->node); ··· 374 369 /** 375 370 * tipc_named_rcv - process name table update message sent by another node 376 371 */ 377 - void tipc_named_rcv(struct sk_buff *buf) 372 + void tipc_named_rcv(struct net *net, struct sk_buff *buf) 378 373 { 379 374 struct tipc_msg *msg = buf_msg(buf); 380 375 struct distr_item *item = (struct distr_item *)msg_data(msg); ··· 383 378 384 379 spin_lock_bh(&tipc_nametbl_lock); 385 380 while (count--) { 386 - if (!tipc_update_nametbl(item, node, msg_type(msg))) 381 + if (!tipc_update_nametbl(net, item, node, msg_type(msg))) 387 382 tipc_named_add_backlog(item, msg_type(msg), node); 388 383 item++; 389 384 } 390 - tipc_named_process_backlog(); 385 + tipc_named_process_backlog(net); 391 386 spin_unlock_bh(&tipc_nametbl_lock); 392 387 kfree_skb(buf); 393 388 }
+5 -5
net/tipc/name_distr.h
··· 69 69 70 70 struct sk_buff *tipc_named_publish(struct publication *publ); 71 71 struct sk_buff *tipc_named_withdraw(struct publication *publ); 72 - void named_cluster_distribute(struct sk_buff *buf); 73 - void tipc_named_node_up(u32 dnode); 74 - void tipc_named_rcv(struct sk_buff *buf); 72 + void named_cluster_distribute(struct net *net, struct sk_buff *buf); 73 + void tipc_named_node_up(struct net *net, u32 dnode); 74 + void tipc_named_rcv(struct net *net, struct sk_buff *buf); 75 75 void tipc_named_reinit(void); 76 - void tipc_named_process_backlog(void); 77 - void tipc_publ_notify(struct list_head *nsub_list, u32 addr); 76 + void tipc_named_process_backlog(struct net *net); 77 + void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr); 78 78 79 79 #endif
+9 -7
net/tipc/name_table.c
··· 650 650 /* 651 651 * tipc_nametbl_publish - add name publication to network name tables 652 652 */ 653 - struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper, 654 - u32 scope, u32 port_ref, u32 key) 653 + struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, 654 + u32 upper, u32 scope, u32 port_ref, 655 + u32 key) 655 656 { 656 657 struct publication *publ; 657 658 struct sk_buff *buf = NULL; ··· 671 670 tipc_nametbl->local_publ_count++; 672 671 buf = tipc_named_publish(publ); 673 672 /* Any pending external events? */ 674 - tipc_named_process_backlog(); 673 + tipc_named_process_backlog(net); 675 674 } 676 675 spin_unlock_bh(&tipc_nametbl_lock); 677 676 678 677 if (buf) 679 - named_cluster_distribute(buf); 678 + named_cluster_distribute(net, buf); 680 679 return publ; 681 680 } 682 681 683 682 /** 684 683 * tipc_nametbl_withdraw - withdraw name publication from network name tables 685 684 */ 686 - int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key) 685 + int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, u32 ref, 686 + u32 key) 687 687 { 688 688 struct publication *publ; 689 689 struct sk_buff *skb = NULL; ··· 695 693 tipc_nametbl->local_publ_count--; 696 694 skb = tipc_named_withdraw(publ); 697 695 /* Any pending external events? */ 698 - tipc_named_process_backlog(); 696 + tipc_named_process_backlog(net); 699 697 list_del_init(&publ->pport_list); 700 698 kfree_rcu(publ, rcu); 701 699 } else { ··· 706 704 spin_unlock_bh(&tipc_nametbl_lock); 707 705 708 706 if (skb) { 709 - named_cluster_distribute(skb); 707 + named_cluster_distribute(net, skb); 710 708 return 1; 711 709 } 712 710 return 0;
+5 -3
net/tipc/name_table.h
··· 104 104 u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *node); 105 105 int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit, 106 106 struct tipc_port_list *dports); 107 - struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper, 108 - u32 scope, u32 port_ref, u32 key); 109 - int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key); 107 + struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower, 108 + u32 upper, u32 scope, u32 port_ref, 109 + u32 key); 110 + int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, u32 ref, 111 + u32 key); 110 112 struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper, 111 113 u32 scope, u32 node, u32 ref, 112 114 u32 key);
+6 -5
net/tipc/net.c
··· 121 121 if (res) 122 122 return res; 123 123 124 - tipc_nametbl_publish(TIPC_CFG_SRV, tipc_own_addr, tipc_own_addr, 124 + tipc_nametbl_publish(net, TIPC_CFG_SRV, tipc_own_addr, tipc_own_addr, 125 125 TIPC_ZONE_SCOPE, 0, tipc_own_addr); 126 126 127 127 pr_info("Started in network mode\n"); ··· 131 131 return 0; 132 132 } 133 133 134 - void tipc_net_stop(void) 134 + void tipc_net_stop(struct net *net) 135 135 { 136 136 if (!tipc_own_addr) 137 137 return; 138 138 139 - tipc_nametbl_withdraw(TIPC_CFG_SRV, tipc_own_addr, 0, tipc_own_addr); 139 + tipc_nametbl_withdraw(net, TIPC_CFG_SRV, tipc_own_addr, 0, 140 + tipc_own_addr); 140 141 rtnl_lock(); 141 - tipc_bearer_stop(); 142 + tipc_bearer_stop(net); 142 143 tipc_bclink_stop(); 143 - tipc_node_stop(); 144 + tipc_node_stop(net); 144 145 rtnl_unlock(); 145 146 146 147 pr_info("Left network mode\n");
+1 -1
net/tipc/net.h
··· 41 41 42 42 int tipc_net_start(struct net *net, u32 addr); 43 43 44 - void tipc_net_stop(void); 44 + void tipc_net_stop(struct net *net); 45 45 46 46 int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb); 47 47 int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info);
+69 -61
net/tipc/node.c
··· 40 40 #include "name_distr.h" 41 41 #include "socket.h" 42 42 43 - #define NODE_HTABLE_SIZE 512 44 - 45 43 static void node_lost_contact(struct tipc_node *n_ptr); 46 44 static void node_established_contact(struct tipc_node *n_ptr); 47 - 48 - static struct hlist_head node_htable[NODE_HTABLE_SIZE]; 49 - LIST_HEAD(tipc_node_list); 50 - static u32 tipc_num_nodes; 51 - static u32 tipc_num_links; 52 - static DEFINE_SPINLOCK(node_list_lock); 53 45 54 46 struct tipc_sock_conn { 55 47 u32 port; ··· 70 78 /* 71 79 * tipc_node_find - locate specified node object, if it exists 72 80 */ 73 - struct tipc_node *tipc_node_find(u32 addr) 81 + struct tipc_node *tipc_node_find(struct net *net, u32 addr) 74 82 { 83 + struct tipc_net *tn = net_generic(net, tipc_net_id); 75 84 struct tipc_node *node; 76 85 77 86 if (unlikely(!in_own_cluster_exact(addr))) 78 87 return NULL; 79 88 80 89 rcu_read_lock(); 81 - hlist_for_each_entry_rcu(node, &node_htable[tipc_hashfn(addr)], hash) { 90 + hlist_for_each_entry_rcu(node, &tn->node_htable[tipc_hashfn(addr)], 91 + hash) { 82 92 if (node->addr == addr) { 83 93 rcu_read_unlock(); 84 94 return node; ··· 90 96 return NULL; 91 97 } 92 98 93 - struct tipc_node *tipc_node_create(u32 addr) 99 + struct tipc_node *tipc_node_create(struct net *net, u32 addr) 94 100 { 101 + struct tipc_net *tn = net_generic(net, tipc_net_id); 95 102 struct tipc_node *n_ptr, *temp_node; 96 103 97 - spin_lock_bh(&node_list_lock); 104 + spin_lock_bh(&tn->node_list_lock); 98 105 99 106 n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC); 100 107 if (!n_ptr) { 101 - spin_unlock_bh(&node_list_lock); 108 + spin_unlock_bh(&tn->node_list_lock); 102 109 pr_warn("Node creation failed, no memory\n"); 103 110 return NULL; 104 111 } 105 112 106 113 n_ptr->addr = addr; 114 + n_ptr->net = net; 107 115 spin_lock_init(&n_ptr->lock); 108 116 INIT_HLIST_NODE(&n_ptr->hash); 109 117 INIT_LIST_HEAD(&n_ptr->list); ··· 114 118 skb_queue_head_init(&n_ptr->waiting_sks); 115 119 __skb_queue_head_init(&n_ptr->bclink.deferred_queue); 116 120 117 - hlist_add_head_rcu(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]); 121 + hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]); 118 122 119 - list_for_each_entry_rcu(temp_node, &tipc_node_list, list) { 123 + list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 120 124 if (n_ptr->addr < temp_node->addr) 121 125 break; 122 126 } ··· 124 128 n_ptr->action_flags = TIPC_WAIT_PEER_LINKS_DOWN; 125 129 n_ptr->signature = INVALID_NODE_SIG; 126 130 127 - tipc_num_nodes++; 131 + tn->num_nodes++; 128 132 129 - spin_unlock_bh(&node_list_lock); 133 + spin_unlock_bh(&tn->node_list_lock); 130 134 return n_ptr; 131 135 } 132 136 133 - static void tipc_node_delete(struct tipc_node *n_ptr) 137 + static void tipc_node_delete(struct tipc_net *tn, struct tipc_node *n_ptr) 134 138 { 135 139 list_del_rcu(&n_ptr->list); 136 140 hlist_del_rcu(&n_ptr->hash); 137 141 kfree_rcu(n_ptr, rcu); 138 142 139 - tipc_num_nodes--; 143 + tn->num_nodes--; 140 144 } 141 145 142 - void tipc_node_stop(void) 146 + void tipc_node_stop(struct net *net) 143 147 { 148 + struct tipc_net *tn = net_generic(net, tipc_net_id); 144 149 struct tipc_node *node, *t_node; 145 150 146 - spin_lock_bh(&node_list_lock); 147 - list_for_each_entry_safe(node, t_node, &tipc_node_list, list) 148 - tipc_node_delete(node); 149 - spin_unlock_bh(&node_list_lock); 151 + spin_lock_bh(&tn->node_list_lock); 152 + list_for_each_entry_safe(node, t_node, &tn->node_list, list) 153 + tipc_node_delete(tn, node); 154 + spin_unlock_bh(&tn->node_list_lock); 150 155 } 151 156 152 - int tipc_node_add_conn(u32 dnode, u32 port, u32 peer_port) 157 + int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port) 153 158 { 154 159 struct tipc_node *node; 155 160 struct tipc_sock_conn *conn; ··· 158 161 if (in_own_node(dnode)) 159 162 return 0; 160 163 161 - node = tipc_node_find(dnode); 164 + node = tipc_node_find(net, dnode); 162 165 if (!node) { 163 166 pr_warn("Connecting sock to node 0x%x failed\n", dnode); 164 167 return -EHOSTUNREACH; ··· 176 179 return 0; 177 180 } 178 181 179 - void tipc_node_remove_conn(u32 dnode, u32 port) 182 + void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port) 180 183 { 181 184 struct tipc_node *node; 182 185 struct tipc_sock_conn *conn, *safe; ··· 184 187 if (in_own_node(dnode)) 185 188 return; 186 189 187 - node = tipc_node_find(dnode); 190 + node = tipc_node_find(net, dnode); 188 191 if (!node) 189 192 return; 190 193 ··· 198 201 tipc_node_unlock(node); 199 202 } 200 203 201 - void tipc_node_abort_sock_conns(struct list_head *conns) 204 + void tipc_node_abort_sock_conns(struct net *net, struct list_head *conns) 202 205 { 203 206 struct tipc_sock_conn *conn, *safe; 204 207 struct sk_buff *buf; ··· 209 212 conn->peer_node, conn->port, 210 213 conn->peer_port, TIPC_ERR_NO_NODE); 211 214 if (likely(buf)) 212 - tipc_sk_rcv(buf); 215 + tipc_sk_rcv(net, buf); 213 216 list_del(&conn->list); 214 217 kfree(conn); 215 218 } ··· 339 342 340 343 void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr) 341 344 { 345 + struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id); 346 + 342 347 n_ptr->links[l_ptr->bearer_id] = l_ptr; 343 - spin_lock_bh(&node_list_lock); 344 - tipc_num_links++; 345 - spin_unlock_bh(&node_list_lock); 348 + spin_lock_bh(&tn->node_list_lock); 349 + tn->num_links++; 350 + spin_unlock_bh(&tn->node_list_lock); 346 351 n_ptr->link_cnt++; 347 352 } 348 353 349 354 void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr) 350 355 { 356 + struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id); 351 357 int i; 352 358 353 359 for (i = 0; i < MAX_BEARERS; i++) { 354 360 if (l_ptr != n_ptr->links[i]) 355 361 continue; 356 362 n_ptr->links[i] = NULL; 357 - spin_lock_bh(&node_list_lock); 358 - tipc_num_links--; 359 - spin_unlock_bh(&node_list_lock); 363 + spin_lock_bh(&tn->node_list_lock); 364 + tn->num_links--; 365 + spin_unlock_bh(&tn->node_list_lock); 360 366 n_ptr->link_cnt--; 361 367 } 362 368 } ··· 414 414 TIPC_NOTIFY_NODE_DOWN; 415 415 } 416 416 417 - struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) 417 + struct sk_buff *tipc_node_get_nodes(struct net *net, const void *req_tlv_area, 418 + int req_tlv_space) 418 419 { 420 + struct tipc_net *tn = net_generic(net, tipc_net_id); 419 421 u32 domain; 420 422 struct sk_buff *buf; 421 423 struct tipc_node *n_ptr; ··· 432 430 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 433 431 " (network address)"); 434 432 435 - spin_lock_bh(&node_list_lock); 436 - if (!tipc_num_nodes) { 437 - spin_unlock_bh(&node_list_lock); 433 + spin_lock_bh(&tn->node_list_lock); 434 + if (!tn->num_nodes) { 435 + spin_unlock_bh(&tn->node_list_lock); 438 436 return tipc_cfg_reply_none(); 439 437 } 440 438 441 439 /* For now, get space for all other nodes */ 442 - payload_size = TLV_SPACE(sizeof(node_info)) * tipc_num_nodes; 440 + payload_size = TLV_SPACE(sizeof(node_info)) * tn->num_nodes; 443 441 if (payload_size > 32768u) { 444 - spin_unlock_bh(&node_list_lock); 442 + spin_unlock_bh(&tn->node_list_lock); 445 443 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 446 444 " (too many nodes)"); 447 445 } 448 - spin_unlock_bh(&node_list_lock); 446 + spin_unlock_bh(&tn->node_list_lock); 449 447 450 448 buf = tipc_cfg_reply_alloc(payload_size); 451 449 if (!buf) ··· 453 451 454 452 /* Add TLVs for all nodes in scope */ 455 453 rcu_read_lock(); 456 - list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) { 454 + list_for_each_entry_rcu(n_ptr, &tn->node_list, list) { 457 455 if (!tipc_in_scope(domain, n_ptr->addr)) 458 456 continue; 459 457 node_info.addr = htonl(n_ptr->addr); ··· 465 463 return buf; 466 464 } 467 465 468 - struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) 466 + struct sk_buff *tipc_node_get_links(struct net *net, const void *req_tlv_area, 467 + int req_tlv_space) 469 468 { 469 + struct tipc_net *tn = net_generic(net, tipc_net_id); 470 470 u32 domain; 471 471 struct sk_buff *buf; 472 472 struct tipc_node *n_ptr; ··· 486 482 if (!tipc_own_addr) 487 483 return tipc_cfg_reply_none(); 488 484 489 - spin_lock_bh(&node_list_lock); 485 + spin_lock_bh(&tn->node_list_lock); 490 486 /* Get space for all unicast links + broadcast link */ 491 - payload_size = TLV_SPACE((sizeof(link_info)) * (tipc_num_links + 1)); 487 + payload_size = TLV_SPACE((sizeof(link_info)) * (tn->num_links + 1)); 492 488 if (payload_size > 32768u) { 493 - spin_unlock_bh(&node_list_lock); 489 + spin_unlock_bh(&tn->node_list_lock); 494 490 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 495 491 " (too many links)"); 496 492 } 497 - spin_unlock_bh(&node_list_lock); 493 + spin_unlock_bh(&tn->node_list_lock); 498 494 499 495 buf = tipc_cfg_reply_alloc(payload_size); 500 496 if (!buf) ··· 508 504 509 505 /* Add TLVs for any other links in scope */ 510 506 rcu_read_lock(); 511 - list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) { 507 + list_for_each_entry_rcu(n_ptr, &tn->node_list, list) { 512 508 u32 i; 513 509 514 510 if (!tipc_in_scope(domain, n_ptr->addr)) ··· 538 534 * 539 535 * Returns 0 on success 540 536 */ 541 - int tipc_node_get_linkname(u32 bearer_id, u32 addr, char *linkname, size_t len) 537 + int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr, 538 + char *linkname, size_t len) 542 539 { 543 540 struct tipc_link *link; 544 - struct tipc_node *node = tipc_node_find(addr); 541 + struct tipc_node *node = tipc_node_find(net, addr); 545 542 546 543 if ((bearer_id >= MAX_BEARERS) || !node) 547 544 return -EINVAL; ··· 559 554 560 555 void tipc_node_unlock(struct tipc_node *node) 561 556 { 557 + struct net *net = node->net; 562 558 LIST_HEAD(nsub_list); 563 559 LIST_HEAD(conn_sks); 564 560 struct sk_buff_head waiting_sks; ··· 591 585 spin_unlock_bh(&node->lock); 592 586 593 587 while (!skb_queue_empty(&waiting_sks)) 594 - tipc_sk_rcv(__skb_dequeue(&waiting_sks)); 588 + tipc_sk_rcv(net, __skb_dequeue(&waiting_sks)); 595 589 596 590 if (!list_empty(&conn_sks)) 597 - tipc_node_abort_sock_conns(&conn_sks); 591 + tipc_node_abort_sock_conns(net, &conn_sks); 598 592 599 593 if (!list_empty(&nsub_list)) 600 - tipc_publ_notify(&nsub_list, addr); 594 + tipc_publ_notify(net, &nsub_list, addr); 601 595 602 596 if (flags & TIPC_WAKEUP_BCAST_USERS) 603 - tipc_bclink_wakeup_users(); 597 + tipc_bclink_wakeup_users(net); 604 598 605 599 if (flags & TIPC_NOTIFY_NODE_UP) 606 - tipc_named_node_up(addr); 600 + tipc_named_node_up(net, addr); 607 601 608 602 if (flags & TIPC_NOTIFY_LINK_UP) 609 - tipc_nametbl_publish(TIPC_LINK_STATE, addr, addr, 603 + tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr, 610 604 TIPC_NODE_SCOPE, link_id, addr); 611 605 612 606 if (flags & TIPC_NOTIFY_LINK_DOWN) 613 - tipc_nametbl_withdraw(TIPC_LINK_STATE, addr, 607 + tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr, 614 608 link_id, addr); 615 609 } 616 610 ··· 651 645 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb) 652 646 { 653 647 int err; 648 + struct net *net = sock_net(skb->sk); 649 + struct tipc_net *tn = net_generic(net, tipc_net_id); 654 650 int done = cb->args[0]; 655 651 int last_addr = cb->args[1]; 656 652 struct tipc_node *node; ··· 667 659 668 660 rcu_read_lock(); 669 661 670 - if (last_addr && !tipc_node_find(last_addr)) { 662 + if (last_addr && !tipc_node_find(net, last_addr)) { 671 663 rcu_read_unlock(); 672 664 /* We never set seq or call nl_dump_check_consistent() this 673 665 * means that setting prev_seq here will cause the consistence ··· 679 671 return -EPIPE; 680 672 } 681 673 682 - list_for_each_entry_rcu(node, &tipc_node_list, list) { 674 + list_for_each_entry_rcu(node, &tn->node_list, list) { 683 675 if (last_addr) { 684 676 if (node->addr == last_addr) 685 677 last_addr = 0;
+19 -16
net/tipc/node.h
··· 42 42 #include "bearer.h" 43 43 #include "msg.h" 44 44 45 - /* 46 - * Out-of-range value for node signature 47 - */ 48 - #define INVALID_NODE_SIG 0x10000 45 + /* Out-of-range value for node signature */ 46 + #define INVALID_NODE_SIG 0x10000 47 + 48 + #define NODE_HTABLE_SIZE 512 49 49 50 50 /* Flags used to take different actions according to flag type 51 51 * TIPC_WAIT_PEER_LINKS_DOWN: wait to see that peer's links are down ··· 90 90 * struct tipc_node - TIPC node structure 91 91 * @addr: network address of node 92 92 * @lock: spinlock governing access to structure 93 + * @net: the applicable net namespace 93 94 * @hash: links to adjacent nodes in unsorted hash chain 94 95 * @active_links: pointers to active links to node 95 96 * @links: pointers to all links to node ··· 107 106 struct tipc_node { 108 107 u32 addr; 109 108 spinlock_t lock; 109 + struct net *net; 110 110 struct hlist_node hash; 111 111 struct tipc_link *active_links[2]; 112 112 u32 act_mtus[2]; ··· 125 123 struct rcu_head rcu; 126 124 }; 127 125 128 - extern struct list_head tipc_node_list; 129 - 130 - struct tipc_node *tipc_node_find(u32 addr); 131 - struct tipc_node *tipc_node_create(u32 addr); 132 - void tipc_node_stop(void); 126 + struct tipc_node *tipc_node_find(struct net *net, u32 addr); 127 + struct tipc_node *tipc_node_create(struct net *net, u32 addr); 128 + void tipc_node_stop(struct net *net); 133 129 void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr); 134 130 void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr); 135 131 void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr); 136 132 void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr); 137 133 int tipc_node_active_links(struct tipc_node *n_ptr); 138 134 int tipc_node_is_up(struct tipc_node *n_ptr); 139 - struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space); 140 - struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space); 141 - int tipc_node_get_linkname(u32 bearer_id, u32 node, char *linkname, size_t len); 135 + struct sk_buff *tipc_node_get_links(struct net *net, const void *req_tlv_area, 136 + int req_tlv_space); 137 + struct sk_buff *tipc_node_get_nodes(struct net *net, const void *req_tlv_area, 138 + int req_tlv_space); 139 + int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 node, 140 + char *linkname, size_t len); 142 141 void tipc_node_unlock(struct tipc_node *node); 143 - int tipc_node_add_conn(u32 dnode, u32 port, u32 peer_port); 144 - void tipc_node_remove_conn(u32 dnode, u32 port); 142 + int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port); 143 + void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port); 145 144 146 145 int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb); 147 146 ··· 157 154 TIPC_NOTIFY_NODE_DOWN | TIPC_WAIT_OWN_LINKS_DOWN)); 158 155 } 159 156 160 - static inline uint tipc_node_get_mtu(u32 addr, u32 selector) 157 + static inline uint tipc_node_get_mtu(struct net *net, u32 addr, u32 selector) 161 158 { 162 159 struct tipc_node *node; 163 160 u32 mtu; 164 161 165 - node = tipc_node_find(addr); 162 + node = tipc_node_find(net, addr); 166 163 167 164 if (likely(node)) 168 165 mtu = node->act_mtus[selector & 1];
+43 -29
net/tipc/socket.c
··· 257 257 258 258 while ((skb = __skb_dequeue(&sk->sk_receive_queue))) { 259 259 if (tipc_msg_reverse(skb, &dnode, TIPC_ERR_NO_PORT)) 260 - tipc_link_xmit_skb(skb, dnode, 0); 260 + tipc_link_xmit_skb(sock_net(sk), skb, dnode, 0); 261 261 } 262 262 } 263 263 ··· 473 473 static int tipc_release(struct socket *sock) 474 474 { 475 475 struct sock *sk = sock->sk; 476 + struct net *net = sock_net(sk); 476 477 struct tipc_sock *tsk; 477 478 struct sk_buff *skb; 478 479 u32 dnode, probing_state; ··· 504 503 (sock->state == SS_CONNECTED)) { 505 504 sock->state = SS_DISCONNECTING; 506 505 tsk->connected = 0; 507 - tipc_node_remove_conn(dnode, tsk->portid); 506 + tipc_node_remove_conn(net, dnode, tsk->portid); 508 507 } 509 508 if (tipc_msg_reverse(skb, &dnode, TIPC_ERR_NO_PORT)) 510 - tipc_link_xmit_skb(skb, dnode, 0); 509 + tipc_link_xmit_skb(net, skb, dnode, 0); 511 510 } 512 511 } 513 512 ··· 522 521 tsk_peer_port(tsk), 523 522 tsk->portid, TIPC_ERR_NO_PORT); 524 523 if (skb) 525 - tipc_link_xmit_skb(skb, dnode, tsk->portid); 526 - tipc_node_remove_conn(dnode, tsk->portid); 524 + tipc_link_xmit_skb(net, skb, dnode, tsk->portid); 525 + tipc_node_remove_conn(net, dnode, tsk->portid); 527 526 } 528 527 529 528 /* Discard any remaining (connection-based) messages in receive queue */ ··· 726 725 struct msghdr *msg, size_t dsz, long timeo) 727 726 { 728 727 struct sock *sk = sock->sk; 728 + struct net *net = sock_net(sk); 729 729 struct tipc_msg *mhdr = &tipc_sk(sk)->phdr; 730 730 struct sk_buff_head head; 731 731 uint mtu; ··· 749 747 return rc; 750 748 751 749 do { 752 - rc = tipc_bclink_xmit(&head); 750 + rc = tipc_bclink_xmit(net, &head); 753 751 if (likely(rc >= 0)) { 754 752 rc = dsz; 755 753 break; ··· 768 766 769 767 /* tipc_sk_mcast_rcv - Deliver multicast message to all destination sockets 770 768 */ 771 - void tipc_sk_mcast_rcv(struct sk_buff *buf) 769 + void tipc_sk_mcast_rcv(struct net *net, struct sk_buff *buf) 772 770 { 773 771 struct tipc_msg *msg = buf_msg(buf); 774 772 struct tipc_port_list dports = {0, NULL, }; ··· 800 798 continue; 801 799 } 802 800 msg_set_destport(msg, item->ports[i]); 803 - tipc_sk_rcv(b); 801 + tipc_sk_rcv(net, b); 804 802 } 805 803 } 806 804 tipc_port_list_free(&dports); ··· 888 886 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 889 887 struct sock *sk = sock->sk; 890 888 struct tipc_sock *tsk = tipc_sk(sk); 889 + struct net *net = sock_net(sk); 891 890 struct tipc_msg *mhdr = &tsk->phdr; 892 891 u32 dnode, dport; 893 892 struct sk_buff_head head; ··· 963 960 } 964 961 965 962 new_mtu: 966 - mtu = tipc_node_get_mtu(dnode, tsk->portid); 963 + mtu = tipc_node_get_mtu(net, dnode, tsk->portid); 967 964 __skb_queue_head_init(&head); 968 965 rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &head); 969 966 if (rc < 0) ··· 972 969 do { 973 970 skb = skb_peek(&head); 974 971 TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong; 975 - rc = tipc_link_xmit(&head, dnode, tsk->portid); 972 + rc = tipc_link_xmit(net, &head, dnode, tsk->portid); 976 973 if (likely(rc >= 0)) { 977 974 if (sock->state != SS_READY) 978 975 sock->state = SS_CONNECTING; ··· 1041 1038 struct msghdr *m, size_t dsz) 1042 1039 { 1043 1040 struct sock *sk = sock->sk; 1041 + struct net *net = sock_net(sk); 1044 1042 struct tipc_sock *tsk = tipc_sk(sk); 1045 1043 struct tipc_msg *mhdr = &tsk->phdr; 1046 1044 struct sk_buff_head head; ··· 1085 1081 goto exit; 1086 1082 do { 1087 1083 if (likely(!tsk_conn_cong(tsk))) { 1088 - rc = tipc_link_xmit(&head, dnode, portid); 1084 + rc = tipc_link_xmit(net, &head, dnode, portid); 1089 1085 if (likely(!rc)) { 1090 1086 tsk->sent_unacked++; 1091 1087 sent += send; ··· 1094 1090 goto next; 1095 1091 } 1096 1092 if (rc == -EMSGSIZE) { 1097 - tsk->max_pkt = tipc_node_get_mtu(dnode, portid); 1093 + tsk->max_pkt = tipc_node_get_mtu(net, dnode, 1094 + portid); 1098 1095 goto next; 1099 1096 } 1100 1097 if (rc != -ELINKCONG) ··· 1137 1132 static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port, 1138 1133 u32 peer_node) 1139 1134 { 1135 + struct net *net = sock_net(&tsk->sk); 1140 1136 struct tipc_msg *msg = &tsk->phdr; 1141 1137 1142 1138 msg_set_destnode(msg, peer_node); ··· 1151 1145 tsk->connected = 1; 1152 1146 if (!mod_timer(&tsk->timer, jiffies + tsk->probing_intv)) 1153 1147 sock_hold(&tsk->sk); 1154 - tipc_node_add_conn(peer_node, tsk->portid, peer_port); 1155 - tsk->max_pkt = tipc_node_get_mtu(peer_node, tsk->portid); 1148 + tipc_node_add_conn(net, peer_node, tsk->portid, peer_port); 1149 + tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid); 1156 1150 } 1157 1151 1158 1152 /** ··· 1251 1245 1252 1246 static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack) 1253 1247 { 1248 + struct net *net = sock_net(&tsk->sk); 1254 1249 struct sk_buff *skb = NULL; 1255 1250 struct tipc_msg *msg; 1256 1251 u32 peer_port = tsk_peer_port(tsk); ··· 1265 1258 return; 1266 1259 msg = buf_msg(skb); 1267 1260 msg_set_msgcnt(msg, ack); 1268 - tipc_link_xmit_skb(skb, dnode, msg_link_selector(msg)); 1261 + tipc_link_xmit_skb(net, skb, dnode, msg_link_selector(msg)); 1269 1262 } 1270 1263 1271 1264 static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) ··· 1558 1551 static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf) 1559 1552 { 1560 1553 struct sock *sk = &tsk->sk; 1554 + struct net *net = sock_net(sk); 1561 1555 struct socket *sock = sk->sk_socket; 1562 1556 struct tipc_msg *msg = buf_msg(*buf); 1563 1557 int retval = -TIPC_ERR_NO_PORT; ··· 1574 1566 sock->state = SS_DISCONNECTING; 1575 1567 tsk->connected = 0; 1576 1568 /* let timer expire on it's own */ 1577 - tipc_node_remove_conn(tsk_peer_node(tsk), 1569 + tipc_node_remove_conn(net, tsk_peer_node(tsk), 1578 1570 tsk->portid); 1579 1571 } 1580 1572 retval = TIPC_OK; ··· 1745 1737 if ((rc < 0) && !tipc_msg_reverse(skb, &onode, -rc)) 1746 1738 return 0; 1747 1739 1748 - tipc_link_xmit_skb(skb, onode, 0); 1740 + tipc_link_xmit_skb(sock_net(sk), skb, onode, 0); 1749 1741 1750 1742 return 0; 1751 1743 } ··· 1756 1748 * Consumes buffer 1757 1749 * Returns 0 if success, or errno: -EHOSTUNREACH 1758 1750 */ 1759 - int tipc_sk_rcv(struct sk_buff *skb) 1751 + int tipc_sk_rcv(struct net *net, struct sk_buff *skb) 1760 1752 { 1761 1753 struct tipc_sock *tsk; 1762 1754 struct sock *sk; ··· 1793 1785 if ((rc < 0) && !tipc_msg_reverse(skb, &dnode, -rc)) 1794 1786 return -EHOSTUNREACH; 1795 1787 1796 - tipc_link_xmit_skb(skb, dnode, 0); 1788 + tipc_link_xmit_skb(net, skb, dnode, 0); 1797 1789 return (rc < 0) ? -EHOSTUNREACH : 0; 1798 1790 } 1799 1791 ··· 2050 2042 static int tipc_shutdown(struct socket *sock, int how) 2051 2043 { 2052 2044 struct sock *sk = sock->sk; 2045 + struct net *net = sock_net(sk); 2053 2046 struct tipc_sock *tsk = tipc_sk(sk); 2054 2047 struct sk_buff *skb; 2055 2048 u32 dnode; ··· 2074 2065 goto restart; 2075 2066 } 2076 2067 if (tipc_msg_reverse(skb, &dnode, TIPC_CONN_SHUTDOWN)) 2077 - tipc_link_xmit_skb(skb, dnode, tsk->portid); 2078 - tipc_node_remove_conn(dnode, tsk->portid); 2068 + tipc_link_xmit_skb(net, skb, dnode, 2069 + tsk->portid); 2070 + tipc_node_remove_conn(net, dnode, tsk->portid); 2079 2071 } else { 2080 2072 dnode = tsk_peer_node(tsk); 2081 2073 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, ··· 2084 2074 0, dnode, tipc_own_addr, 2085 2075 tsk_peer_port(tsk), 2086 2076 tsk->portid, TIPC_CONN_SHUTDOWN); 2087 - tipc_link_xmit_skb(skb, dnode, tsk->portid); 2077 + tipc_link_xmit_skb(net, skb, dnode, tsk->portid); 2088 2078 } 2089 2079 tsk->connected = 0; 2090 2080 sock->state = SS_DISCONNECTING; 2091 - tipc_node_remove_conn(dnode, tsk->portid); 2081 + tipc_node_remove_conn(net, dnode, tsk->portid); 2092 2082 /* fall through */ 2093 2083 2094 2084 case SS_DISCONNECTING: ··· 2140 2130 } 2141 2131 bh_unlock_sock(sk); 2142 2132 if (skb) 2143 - tipc_link_xmit_skb(skb, peer_node, tsk->portid); 2133 + tipc_link_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid); 2144 2134 exit: 2145 2135 sock_put(sk); 2146 2136 } ··· 2148 2138 static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, 2149 2139 struct tipc_name_seq const *seq) 2150 2140 { 2141 + struct net *net = sock_net(&tsk->sk); 2151 2142 struct publication *publ; 2152 2143 u32 key; 2153 2144 ··· 2158 2147 if (key == tsk->portid) 2159 2148 return -EADDRINUSE; 2160 2149 2161 - publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper, 2150 + publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper, 2162 2151 scope, tsk->portid, key); 2163 2152 if (unlikely(!publ)) 2164 2153 return -EINVAL; ··· 2172 2161 static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, 2173 2162 struct tipc_name_seq const *seq) 2174 2163 { 2164 + struct net *net = sock_net(&tsk->sk); 2175 2165 struct publication *publ; 2176 2166 struct publication *safe; 2177 2167 int rc = -EINVAL; ··· 2187 2175 continue; 2188 2176 if (publ->upper != seq->upper) 2189 2177 break; 2190 - tipc_nametbl_withdraw(publ->type, publ->lower, 2178 + tipc_nametbl_withdraw(net, publ->type, publ->lower, 2191 2179 publ->ref, publ->key); 2192 2180 rc = 0; 2193 2181 break; 2194 2182 } 2195 - tipc_nametbl_withdraw(publ->type, publ->lower, 2183 + tipc_nametbl_withdraw(net, publ->type, publ->lower, 2196 2184 publ->ref, publ->key); 2197 2185 rc = 0; 2198 2186 } ··· 2504 2492 return put_user(sizeof(value), ol); 2505 2493 } 2506 2494 2507 - static int tipc_ioctl(struct socket *sk, unsigned int cmd, unsigned long arg) 2495 + static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) 2508 2496 { 2497 + struct sock *sk = sock->sk; 2509 2498 struct tipc_sioc_ln_req lnr; 2510 2499 void __user *argp = (void __user *)arg; 2511 2500 ··· 2514 2501 case SIOCGETLINKNAME: 2515 2502 if (copy_from_user(&lnr, argp, sizeof(lnr))) 2516 2503 return -EFAULT; 2517 - if (!tipc_node_get_linkname(lnr.bearer_id & 0xffff, lnr.peer, 2504 + if (!tipc_node_get_linkname(sock_net(sk), 2505 + lnr.bearer_id & 0xffff, lnr.peer, 2518 2506 lnr.linkname, TIPC_MAX_LINK_NAME)) { 2519 2507 if (copy_to_user(argp, &lnr, sizeof(lnr))) 2520 2508 return -EFAULT;
+2 -2
net/tipc/socket.h
··· 49 49 void tipc_sock_release_local(struct socket *sock); 50 50 int tipc_sock_accept_local(struct socket *sock, struct socket **newsock, 51 51 int flags); 52 - int tipc_sk_rcv(struct sk_buff *buf); 52 + int tipc_sk_rcv(struct net *net, struct sk_buff *buf); 53 53 struct sk_buff *tipc_sk_socks_show(void); 54 - void tipc_sk_mcast_rcv(struct sk_buff *buf); 54 + void tipc_sk_mcast_rcv(struct net *net, struct sk_buff *buf); 55 55 void tipc_sk_reinit(void); 56 56 int tipc_sk_rht_init(void); 57 57 void tipc_sk_rht_destroy(void);