Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * net/tipc/node.c: TIPC node management routines
3 *
4 * Copyright (c) 2000-2006, 2012-2016, Ericsson AB
5 * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "link.h"
39#include "node.h"
40#include "name_distr.h"
41#include "socket.h"
42#include "bcast.h"
43#include "monitor.h"
44#include "discover.h"
45#include "netlink.h"
46#include "trace.h"
47#include "crypto.h"
48
49#define INVALID_NODE_SIG 0x10000
50#define NODE_CLEANUP_AFTER 300000
51
52/* Flags used to take different actions according to flag type
53 * TIPC_NOTIFY_NODE_DOWN: notify node is down
54 * TIPC_NOTIFY_NODE_UP: notify node is up
55 * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
56 */
57enum {
58 TIPC_NOTIFY_NODE_DOWN = (1 << 3),
59 TIPC_NOTIFY_NODE_UP = (1 << 4),
60 TIPC_NOTIFY_LINK_UP = (1 << 6),
61 TIPC_NOTIFY_LINK_DOWN = (1 << 7)
62};
63
64struct tipc_link_entry {
65 struct tipc_link *link;
66 spinlock_t lock; /* per link */
67 u32 mtu;
68 struct sk_buff_head inputq;
69 struct tipc_media_addr maddr;
70};
71
72struct tipc_bclink_entry {
73 struct tipc_link *link;
74 struct sk_buff_head inputq1;
75 struct sk_buff_head arrvq;
76 struct sk_buff_head inputq2;
77 struct sk_buff_head namedq;
78};
79
80/**
81 * struct tipc_node - TIPC node structure
82 * @addr: network address of node
83 * @ref: reference counter to node object
84 * @lock: rwlock governing access to structure
85 * @net: the applicable net namespace
86 * @hash: links to adjacent nodes in unsorted hash chain
87 * @inputq: pointer to input queue containing messages for msg event
88 * @namedq: pointer to name table input queue with name table messages
89 * @active_links: bearer ids of active links, used as index into links[] array
90 * @links: array containing references to all links to node
91 * @action_flags: bit mask of different types of node actions
92 * @state: connectivity state vs peer node
93 * @preliminary: a preliminary node or not
94 * @sync_point: sequence number where synch/failover is finished
95 * @list: links to adjacent nodes in sorted list of cluster's nodes
96 * @working_links: number of working links to node (both active and standby)
97 * @link_cnt: number of links to node
98 * @capabilities: bitmap, indicating peer node's functional capabilities
99 * @signature: node instance identifier
100 * @link_id: local and remote bearer ids of changing link, if any
101 * @publ_list: list of publications
102 * @rcu: rcu struct for tipc_node
103 * @delete_at: indicates the time for deleting a down node
104 * @crypto_rx: RX crypto handler
105 */
106struct tipc_node {
107 u32 addr;
108 struct kref kref;
109 rwlock_t lock;
110 struct net *net;
111 struct hlist_node hash;
112 int active_links[2];
113 struct tipc_link_entry links[MAX_BEARERS];
114 struct tipc_bclink_entry bc_entry;
115 int action_flags;
116 struct list_head list;
117 int state;
118 bool preliminary;
119 bool failover_sent;
120 u16 sync_point;
121 int link_cnt;
122 u16 working_links;
123 u16 capabilities;
124 u32 signature;
125 u32 link_id;
126 u8 peer_id[16];
127 char peer_id_string[NODE_ID_STR_LEN];
128 struct list_head publ_list;
129 struct list_head conn_sks;
130 unsigned long keepalive_intv;
131 struct timer_list timer;
132 struct rcu_head rcu;
133 unsigned long delete_at;
134 struct net *peer_net;
135 u32 peer_hash_mix;
136#ifdef CONFIG_TIPC_CRYPTO
137 struct tipc_crypto *crypto_rx;
138#endif
139};
140
141/* Node FSM states and events:
142 */
143enum {
144 SELF_DOWN_PEER_DOWN = 0xdd,
145 SELF_UP_PEER_UP = 0xaa,
146 SELF_DOWN_PEER_LEAVING = 0xd1,
147 SELF_UP_PEER_COMING = 0xac,
148 SELF_COMING_PEER_UP = 0xca,
149 SELF_LEAVING_PEER_DOWN = 0x1d,
150 NODE_FAILINGOVER = 0xf0,
151 NODE_SYNCHING = 0xcc
152};
153
154enum {
155 SELF_ESTABL_CONTACT_EVT = 0xece,
156 SELF_LOST_CONTACT_EVT = 0x1ce,
157 PEER_ESTABL_CONTACT_EVT = 0x9ece,
158 PEER_LOST_CONTACT_EVT = 0x91ce,
159 NODE_FAILOVER_BEGIN_EVT = 0xfbe,
160 NODE_FAILOVER_END_EVT = 0xfee,
161 NODE_SYNCH_BEGIN_EVT = 0xcbe,
162 NODE_SYNCH_END_EVT = 0xcee
163};
164
165static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
166 struct sk_buff_head *xmitq,
167 struct tipc_media_addr **maddr);
168static void tipc_node_link_down(struct tipc_node *n, int bearer_id,
169 bool delete);
170static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq);
171static void tipc_node_delete(struct tipc_node *node);
172static void tipc_node_timeout(struct timer_list *t);
173static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
174static struct tipc_node *tipc_node_find(struct net *net, u32 addr);
175static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id);
176static bool node_is_up(struct tipc_node *n);
177static void tipc_node_delete_from_list(struct tipc_node *node);
178
179struct tipc_sock_conn {
180 u32 port;
181 u32 peer_port;
182 u32 peer_node;
183 struct list_head list;
184};
185
186static struct tipc_link *node_active_link(struct tipc_node *n, int sel)
187{
188 int bearer_id = n->active_links[sel & 1];
189
190 if (unlikely(bearer_id == INVALID_BEARER_ID))
191 return NULL;
192
193 return n->links[bearer_id].link;
194}
195
196int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel, bool connected)
197{
198 struct tipc_node *n;
199 int bearer_id;
200 unsigned int mtu = MAX_MSG_SIZE;
201
202 n = tipc_node_find(net, addr);
203 if (unlikely(!n))
204 return mtu;
205
206 /* Allow MAX_MSG_SIZE when building connection oriented message
207 * if they are in the same core network
208 */
209 if (n->peer_net && connected) {
210 tipc_node_put(n);
211 return mtu;
212 }
213
214 bearer_id = n->active_links[sel & 1];
215 if (likely(bearer_id != INVALID_BEARER_ID))
216 mtu = n->links[bearer_id].mtu;
217 tipc_node_put(n);
218 return mtu;
219}
220
221bool tipc_node_get_id(struct net *net, u32 addr, u8 *id)
222{
223 u8 *own_id = tipc_own_id(net);
224 struct tipc_node *n;
225
226 if (!own_id)
227 return true;
228
229 if (addr == tipc_own_addr(net)) {
230 memcpy(id, own_id, TIPC_NODEID_LEN);
231 return true;
232 }
233 n = tipc_node_find(net, addr);
234 if (!n)
235 return false;
236
237 memcpy(id, &n->peer_id, TIPC_NODEID_LEN);
238 tipc_node_put(n);
239 return true;
240}
241
242u16 tipc_node_get_capabilities(struct net *net, u32 addr)
243{
244 struct tipc_node *n;
245 u16 caps;
246
247 n = tipc_node_find(net, addr);
248 if (unlikely(!n))
249 return TIPC_NODE_CAPABILITIES;
250 caps = n->capabilities;
251 tipc_node_put(n);
252 return caps;
253}
254
255u32 tipc_node_get_addr(struct tipc_node *node)
256{
257 return (node) ? node->addr : 0;
258}
259
260char *tipc_node_get_id_str(struct tipc_node *node)
261{
262 return node->peer_id_string;
263}
264
265#ifdef CONFIG_TIPC_CRYPTO
266/**
267 * tipc_node_crypto_rx - Retrieve crypto RX handle from node
268 * Note: node ref counter must be held first!
269 */
270struct tipc_crypto *tipc_node_crypto_rx(struct tipc_node *__n)
271{
272 return (__n) ? __n->crypto_rx : NULL;
273}
274
275struct tipc_crypto *tipc_node_crypto_rx_by_list(struct list_head *pos)
276{
277 return container_of(pos, struct tipc_node, list)->crypto_rx;
278}
279#endif
280
281static void tipc_node_free(struct rcu_head *rp)
282{
283 struct tipc_node *n = container_of(rp, struct tipc_node, rcu);
284
285#ifdef CONFIG_TIPC_CRYPTO
286 tipc_crypto_stop(&n->crypto_rx);
287#endif
288 kfree(n);
289}
290
291static void tipc_node_kref_release(struct kref *kref)
292{
293 struct tipc_node *n = container_of(kref, struct tipc_node, kref);
294
295 kfree(n->bc_entry.link);
296 call_rcu(&n->rcu, tipc_node_free);
297}
298
299void tipc_node_put(struct tipc_node *node)
300{
301 kref_put(&node->kref, tipc_node_kref_release);
302}
303
304static void tipc_node_get(struct tipc_node *node)
305{
306 kref_get(&node->kref);
307}
308
309/*
310 * tipc_node_find - locate specified node object, if it exists
311 */
312static struct tipc_node *tipc_node_find(struct net *net, u32 addr)
313{
314 struct tipc_net *tn = tipc_net(net);
315 struct tipc_node *node;
316 unsigned int thash = tipc_hashfn(addr);
317
318 rcu_read_lock();
319 hlist_for_each_entry_rcu(node, &tn->node_htable[thash], hash) {
320 if (node->addr != addr || node->preliminary)
321 continue;
322 if (!kref_get_unless_zero(&node->kref))
323 node = NULL;
324 break;
325 }
326 rcu_read_unlock();
327 return node;
328}
329
330/* tipc_node_find_by_id - locate specified node object by its 128-bit id
331 * Note: this function is called only when a discovery request failed
332 * to find the node by its 32-bit id, and is not time critical
333 */
334static struct tipc_node *tipc_node_find_by_id(struct net *net, u8 *id)
335{
336 struct tipc_net *tn = tipc_net(net);
337 struct tipc_node *n;
338 bool found = false;
339
340 rcu_read_lock();
341 list_for_each_entry_rcu(n, &tn->node_list, list) {
342 read_lock_bh(&n->lock);
343 if (!memcmp(id, n->peer_id, 16) &&
344 kref_get_unless_zero(&n->kref))
345 found = true;
346 read_unlock_bh(&n->lock);
347 if (found)
348 break;
349 }
350 rcu_read_unlock();
351 return found ? n : NULL;
352}
353
354static void tipc_node_read_lock(struct tipc_node *n)
355{
356 read_lock_bh(&n->lock);
357}
358
359static void tipc_node_read_unlock(struct tipc_node *n)
360{
361 read_unlock_bh(&n->lock);
362}
363
364static void tipc_node_write_lock(struct tipc_node *n)
365{
366 write_lock_bh(&n->lock);
367}
368
369static void tipc_node_write_unlock_fast(struct tipc_node *n)
370{
371 write_unlock_bh(&n->lock);
372}
373
374static void tipc_node_write_unlock(struct tipc_node *n)
375{
376 struct net *net = n->net;
377 u32 addr = 0;
378 u32 flags = n->action_flags;
379 u32 link_id = 0;
380 u32 bearer_id;
381 struct list_head *publ_list;
382
383 if (likely(!flags)) {
384 write_unlock_bh(&n->lock);
385 return;
386 }
387
388 addr = n->addr;
389 link_id = n->link_id;
390 bearer_id = link_id & 0xffff;
391 publ_list = &n->publ_list;
392
393 n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
394 TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP);
395
396 write_unlock_bh(&n->lock);
397
398 if (flags & TIPC_NOTIFY_NODE_DOWN)
399 tipc_publ_notify(net, publ_list, addr);
400
401 if (flags & TIPC_NOTIFY_NODE_UP)
402 tipc_named_node_up(net, addr);
403
404 if (flags & TIPC_NOTIFY_LINK_UP) {
405 tipc_mon_peer_up(net, addr, bearer_id);
406 tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr,
407 TIPC_NODE_SCOPE, link_id, link_id);
408 }
409 if (flags & TIPC_NOTIFY_LINK_DOWN) {
410 tipc_mon_peer_down(net, addr, bearer_id);
411 tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
412 addr, link_id);
413 }
414}
415
416static void tipc_node_assign_peer_net(struct tipc_node *n, u32 hash_mixes)
417{
418 int net_id = tipc_netid(n->net);
419 struct tipc_net *tn_peer;
420 struct net *tmp;
421 u32 hash_chk;
422
423 if (n->peer_net)
424 return;
425
426 for_each_net_rcu(tmp) {
427 tn_peer = tipc_net(tmp);
428 if (!tn_peer)
429 continue;
430 /* Integrity checking whether node exists in namespace or not */
431 if (tn_peer->net_id != net_id)
432 continue;
433 if (memcmp(n->peer_id, tn_peer->node_id, NODE_ID_LEN))
434 continue;
435 hash_chk = tipc_net_hash_mixes(tmp, tn_peer->random);
436 if (hash_mixes ^ hash_chk)
437 continue;
438 n->peer_net = tmp;
439 n->peer_hash_mix = hash_mixes;
440 break;
441 }
442}
443
444struct tipc_node *tipc_node_create(struct net *net, u32 addr, u8 *peer_id,
445 u16 capabilities, u32 hash_mixes,
446 bool preliminary)
447{
448 struct tipc_net *tn = net_generic(net, tipc_net_id);
449 struct tipc_node *n, *temp_node;
450 struct tipc_link *l;
451 unsigned long intv;
452 int bearer_id;
453 int i;
454
455 spin_lock_bh(&tn->node_list_lock);
456 n = tipc_node_find(net, addr) ?:
457 tipc_node_find_by_id(net, peer_id);
458 if (n) {
459 if (!n->preliminary)
460 goto update;
461 if (preliminary)
462 goto exit;
463 /* A preliminary node becomes "real" now, refresh its data */
464 tipc_node_write_lock(n);
465 n->preliminary = false;
466 n->addr = addr;
467 hlist_del_rcu(&n->hash);
468 hlist_add_head_rcu(&n->hash,
469 &tn->node_htable[tipc_hashfn(addr)]);
470 list_del_rcu(&n->list);
471 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
472 if (n->addr < temp_node->addr)
473 break;
474 }
475 list_add_tail_rcu(&n->list, &temp_node->list);
476 tipc_node_write_unlock_fast(n);
477
478update:
479 if (n->peer_hash_mix ^ hash_mixes)
480 tipc_node_assign_peer_net(n, hash_mixes);
481 if (n->capabilities == capabilities)
482 goto exit;
483 /* Same node may come back with new capabilities */
484 tipc_node_write_lock(n);
485 n->capabilities = capabilities;
486 for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
487 l = n->links[bearer_id].link;
488 if (l)
489 tipc_link_update_caps(l, capabilities);
490 }
491 tipc_node_write_unlock_fast(n);
492
493 /* Calculate cluster capabilities */
494 tn->capabilities = TIPC_NODE_CAPABILITIES;
495 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
496 tn->capabilities &= temp_node->capabilities;
497 }
498
499 tipc_bcast_toggle_rcast(net,
500 (tn->capabilities & TIPC_BCAST_RCAST));
501
502 goto exit;
503 }
504 n = kzalloc(sizeof(*n), GFP_ATOMIC);
505 if (!n) {
506 pr_warn("Node creation failed, no memory\n");
507 goto exit;
508 }
509 tipc_nodeid2string(n->peer_id_string, peer_id);
510#ifdef CONFIG_TIPC_CRYPTO
511 if (unlikely(tipc_crypto_start(&n->crypto_rx, net, n))) {
512 pr_warn("Failed to start crypto RX(%s)!\n", n->peer_id_string);
513 kfree(n);
514 n = NULL;
515 goto exit;
516 }
517#endif
518 n->addr = addr;
519 n->preliminary = preliminary;
520 memcpy(&n->peer_id, peer_id, 16);
521 n->net = net;
522 n->peer_net = NULL;
523 n->peer_hash_mix = 0;
524 /* Assign kernel local namespace if exists */
525 tipc_node_assign_peer_net(n, hash_mixes);
526 n->capabilities = capabilities;
527 kref_init(&n->kref);
528 rwlock_init(&n->lock);
529 INIT_HLIST_NODE(&n->hash);
530 INIT_LIST_HEAD(&n->list);
531 INIT_LIST_HEAD(&n->publ_list);
532 INIT_LIST_HEAD(&n->conn_sks);
533 skb_queue_head_init(&n->bc_entry.namedq);
534 skb_queue_head_init(&n->bc_entry.inputq1);
535 __skb_queue_head_init(&n->bc_entry.arrvq);
536 skb_queue_head_init(&n->bc_entry.inputq2);
537 for (i = 0; i < MAX_BEARERS; i++)
538 spin_lock_init(&n->links[i].lock);
539 n->state = SELF_DOWN_PEER_LEAVING;
540 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
541 n->signature = INVALID_NODE_SIG;
542 n->active_links[0] = INVALID_BEARER_ID;
543 n->active_links[1] = INVALID_BEARER_ID;
544 n->bc_entry.link = NULL;
545 tipc_node_get(n);
546 timer_setup(&n->timer, tipc_node_timeout, 0);
547 /* Start a slow timer anyway, crypto needs it */
548 n->keepalive_intv = 10000;
549 intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
550 if (!mod_timer(&n->timer, intv))
551 tipc_node_get(n);
552 hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]);
553 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
554 if (n->addr < temp_node->addr)
555 break;
556 }
557 list_add_tail_rcu(&n->list, &temp_node->list);
558 /* Calculate cluster capabilities */
559 tn->capabilities = TIPC_NODE_CAPABILITIES;
560 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
561 tn->capabilities &= temp_node->capabilities;
562 }
563 tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST));
564 trace_tipc_node_create(n, true, " ");
565exit:
566 spin_unlock_bh(&tn->node_list_lock);
567 return n;
568}
569
570static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
571{
572 unsigned long tol = tipc_link_tolerance(l);
573 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
574
575 /* Link with lowest tolerance determines timer interval */
576 if (intv < n->keepalive_intv)
577 n->keepalive_intv = intv;
578
579 /* Ensure link's abort limit corresponds to current tolerance */
580 tipc_link_set_abort_limit(l, tol / n->keepalive_intv);
581}
582
583static void tipc_node_delete_from_list(struct tipc_node *node)
584{
585 list_del_rcu(&node->list);
586 hlist_del_rcu(&node->hash);
587 tipc_node_put(node);
588}
589
590static void tipc_node_delete(struct tipc_node *node)
591{
592 trace_tipc_node_delete(node, true, " ");
593 tipc_node_delete_from_list(node);
594
595 del_timer_sync(&node->timer);
596 tipc_node_put(node);
597}
598
599void tipc_node_stop(struct net *net)
600{
601 struct tipc_net *tn = tipc_net(net);
602 struct tipc_node *node, *t_node;
603
604 spin_lock_bh(&tn->node_list_lock);
605 list_for_each_entry_safe(node, t_node, &tn->node_list, list)
606 tipc_node_delete(node);
607 spin_unlock_bh(&tn->node_list_lock);
608}
609
610void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr)
611{
612 struct tipc_node *n;
613
614 if (in_own_node(net, addr))
615 return;
616
617 n = tipc_node_find(net, addr);
618 if (!n) {
619 pr_warn("Node subscribe rejected, unknown node 0x%x\n", addr);
620 return;
621 }
622 tipc_node_write_lock(n);
623 list_add_tail(subscr, &n->publ_list);
624 tipc_node_write_unlock_fast(n);
625 tipc_node_put(n);
626}
627
628void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr)
629{
630 struct tipc_node *n;
631
632 if (in_own_node(net, addr))
633 return;
634
635 n = tipc_node_find(net, addr);
636 if (!n) {
637 pr_warn("Node unsubscribe rejected, unknown node 0x%x\n", addr);
638 return;
639 }
640 tipc_node_write_lock(n);
641 list_del_init(subscr);
642 tipc_node_write_unlock_fast(n);
643 tipc_node_put(n);
644}
645
646int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
647{
648 struct tipc_node *node;
649 struct tipc_sock_conn *conn;
650 int err = 0;
651
652 if (in_own_node(net, dnode))
653 return 0;
654
655 node = tipc_node_find(net, dnode);
656 if (!node) {
657 pr_warn("Connecting sock to node 0x%x failed\n", dnode);
658 return -EHOSTUNREACH;
659 }
660 conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
661 if (!conn) {
662 err = -EHOSTUNREACH;
663 goto exit;
664 }
665 conn->peer_node = dnode;
666 conn->port = port;
667 conn->peer_port = peer_port;
668
669 tipc_node_write_lock(node);
670 list_add_tail(&conn->list, &node->conn_sks);
671 tipc_node_write_unlock(node);
672exit:
673 tipc_node_put(node);
674 return err;
675}
676
677void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
678{
679 struct tipc_node *node;
680 struct tipc_sock_conn *conn, *safe;
681
682 if (in_own_node(net, dnode))
683 return;
684
685 node = tipc_node_find(net, dnode);
686 if (!node)
687 return;
688
689 tipc_node_write_lock(node);
690 list_for_each_entry_safe(conn, safe, &node->conn_sks, list) {
691 if (port != conn->port)
692 continue;
693 list_del(&conn->list);
694 kfree(conn);
695 }
696 tipc_node_write_unlock(node);
697 tipc_node_put(node);
698}
699
700static void tipc_node_clear_links(struct tipc_node *node)
701{
702 int i;
703
704 for (i = 0; i < MAX_BEARERS; i++) {
705 struct tipc_link_entry *le = &node->links[i];
706
707 if (le->link) {
708 kfree(le->link);
709 le->link = NULL;
710 node->link_cnt--;
711 }
712 }
713}
714
715/* tipc_node_cleanup - delete nodes that does not
716 * have active links for NODE_CLEANUP_AFTER time
717 */
718static bool tipc_node_cleanup(struct tipc_node *peer)
719{
720 struct tipc_node *temp_node;
721 struct tipc_net *tn = tipc_net(peer->net);
722 bool deleted = false;
723
724 /* If lock held by tipc_node_stop() the node will be deleted anyway */
725 if (!spin_trylock_bh(&tn->node_list_lock))
726 return false;
727
728 tipc_node_write_lock(peer);
729
730 if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) {
731 tipc_node_clear_links(peer);
732 tipc_node_delete_from_list(peer);
733 deleted = true;
734 }
735 tipc_node_write_unlock(peer);
736
737 if (!deleted) {
738 spin_unlock_bh(&tn->node_list_lock);
739 return deleted;
740 }
741
742 /* Calculate cluster capabilities */
743 tn->capabilities = TIPC_NODE_CAPABILITIES;
744 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
745 tn->capabilities &= temp_node->capabilities;
746 }
747 tipc_bcast_toggle_rcast(peer->net,
748 (tn->capabilities & TIPC_BCAST_RCAST));
749 spin_unlock_bh(&tn->node_list_lock);
750 return deleted;
751}
752
753/* tipc_node_timeout - handle expiration of node timer
754 */
755static void tipc_node_timeout(struct timer_list *t)
756{
757 struct tipc_node *n = from_timer(n, t, timer);
758 struct tipc_link_entry *le;
759 struct sk_buff_head xmitq;
760 int remains = n->link_cnt;
761 int bearer_id;
762 int rc = 0;
763
764 trace_tipc_node_timeout(n, false, " ");
765 if (!node_is_up(n) && tipc_node_cleanup(n)) {
766 /*Removing the reference of Timer*/
767 tipc_node_put(n);
768 return;
769 }
770
771#ifdef CONFIG_TIPC_CRYPTO
772 /* Take any crypto key related actions first */
773 tipc_crypto_timeout(n->crypto_rx);
774#endif
775 __skb_queue_head_init(&xmitq);
776
777 /* Initial node interval to value larger (10 seconds), then it will be
778 * recalculated with link lowest tolerance
779 */
780 tipc_node_read_lock(n);
781 n->keepalive_intv = 10000;
782 tipc_node_read_unlock(n);
783 for (bearer_id = 0; remains && (bearer_id < MAX_BEARERS); bearer_id++) {
784 tipc_node_read_lock(n);
785 le = &n->links[bearer_id];
786 if (le->link) {
787 spin_lock_bh(&le->lock);
788 /* Link tolerance may change asynchronously: */
789 tipc_node_calculate_timer(n, le->link);
790 rc = tipc_link_timeout(le->link, &xmitq);
791 spin_unlock_bh(&le->lock);
792 remains--;
793 }
794 tipc_node_read_unlock(n);
795 tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr, n);
796 if (rc & TIPC_LINK_DOWN_EVT)
797 tipc_node_link_down(n, bearer_id, false);
798 }
799 mod_timer(&n->timer, jiffies + msecs_to_jiffies(n->keepalive_intv));
800}
801
802/**
803 * __tipc_node_link_up - handle addition of link
804 * Node lock must be held by caller
805 * Link becomes active (alone or shared) or standby, depending on its priority.
806 */
807static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
808 struct sk_buff_head *xmitq)
809{
810 int *slot0 = &n->active_links[0];
811 int *slot1 = &n->active_links[1];
812 struct tipc_link *ol = node_active_link(n, 0);
813 struct tipc_link *nl = n->links[bearer_id].link;
814
815 if (!nl || tipc_link_is_up(nl))
816 return;
817
818 tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT);
819 if (!tipc_link_is_up(nl))
820 return;
821
822 n->working_links++;
823 n->action_flags |= TIPC_NOTIFY_LINK_UP;
824 n->link_id = tipc_link_id(nl);
825
826 /* Leave room for tunnel header when returning 'mtu' to users: */
827 n->links[bearer_id].mtu = tipc_link_mss(nl);
828
829 tipc_bearer_add_dest(n->net, bearer_id, n->addr);
830 tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id);
831
832 pr_debug("Established link <%s> on network plane %c\n",
833 tipc_link_name(nl), tipc_link_plane(nl));
834 trace_tipc_node_link_up(n, true, " ");
835
836 /* Ensure that a STATE message goes first */
837 tipc_link_build_state_msg(nl, xmitq);
838
839 /* First link? => give it both slots */
840 if (!ol) {
841 *slot0 = bearer_id;
842 *slot1 = bearer_id;
843 tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT);
844 n->action_flags |= TIPC_NOTIFY_NODE_UP;
845 tipc_link_set_active(nl, true);
846 tipc_bcast_add_peer(n->net, nl, xmitq);
847 return;
848 }
849
850 /* Second link => redistribute slots */
851 if (tipc_link_prio(nl) > tipc_link_prio(ol)) {
852 pr_debug("Old link <%s> becomes standby\n", tipc_link_name(ol));
853 *slot0 = bearer_id;
854 *slot1 = bearer_id;
855 tipc_link_set_active(nl, true);
856 tipc_link_set_active(ol, false);
857 } else if (tipc_link_prio(nl) == tipc_link_prio(ol)) {
858 tipc_link_set_active(nl, true);
859 *slot1 = bearer_id;
860 } else {
861 pr_debug("New link <%s> is standby\n", tipc_link_name(nl));
862 }
863
864 /* Prepare synchronization with first link */
865 tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq);
866}
867
868/**
869 * tipc_node_link_up - handle addition of link
870 *
871 * Link becomes active (alone or shared) or standby, depending on its priority.
872 */
873static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
874 struct sk_buff_head *xmitq)
875{
876 struct tipc_media_addr *maddr;
877
878 tipc_node_write_lock(n);
879 __tipc_node_link_up(n, bearer_id, xmitq);
880 maddr = &n->links[bearer_id].maddr;
881 tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr, n);
882 tipc_node_write_unlock(n);
883}
884
885/**
886 * tipc_node_link_failover() - start failover in case "half-failover"
887 *
888 * This function is only called in a very special situation where link
889 * failover can be already started on peer node but not on this node.
890 * This can happen when e.g.
891 * 1. Both links <1A-2A>, <1B-2B> down
892 * 2. Link endpoint 2A up, but 1A still down (e.g. due to network
893 * disturbance, wrong session, etc.)
894 * 3. Link <1B-2B> up
895 * 4. Link endpoint 2A down (e.g. due to link tolerance timeout)
896 * 5. Node 2 starts failover onto link <1B-2B>
897 *
898 * ==> Node 1 does never start link/node failover!
899 *
900 * @n: tipc node structure
901 * @l: link peer endpoint failingover (- can be NULL)
902 * @tnl: tunnel link
903 * @xmitq: queue for messages to be xmited on tnl link later
904 */
905static void tipc_node_link_failover(struct tipc_node *n, struct tipc_link *l,
906 struct tipc_link *tnl,
907 struct sk_buff_head *xmitq)
908{
909 /* Avoid to be "self-failover" that can never end */
910 if (!tipc_link_is_up(tnl))
911 return;
912
913 /* Don't rush, failure link may be in the process of resetting */
914 if (l && !tipc_link_is_reset(l))
915 return;
916
917 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
918 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
919
920 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
921 tipc_link_failover_prepare(l, tnl, xmitq);
922
923 if (l)
924 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
925 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
926}
927
928/**
929 * __tipc_node_link_down - handle loss of link
930 */
931static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
932 struct sk_buff_head *xmitq,
933 struct tipc_media_addr **maddr)
934{
935 struct tipc_link_entry *le = &n->links[*bearer_id];
936 int *slot0 = &n->active_links[0];
937 int *slot1 = &n->active_links[1];
938 int i, highest = 0, prio;
939 struct tipc_link *l, *_l, *tnl;
940
941 l = n->links[*bearer_id].link;
942 if (!l || tipc_link_is_reset(l))
943 return;
944
945 n->working_links--;
946 n->action_flags |= TIPC_NOTIFY_LINK_DOWN;
947 n->link_id = tipc_link_id(l);
948
949 tipc_bearer_remove_dest(n->net, *bearer_id, n->addr);
950
951 pr_debug("Lost link <%s> on network plane %c\n",
952 tipc_link_name(l), tipc_link_plane(l));
953
954 /* Select new active link if any available */
955 *slot0 = INVALID_BEARER_ID;
956 *slot1 = INVALID_BEARER_ID;
957 for (i = 0; i < MAX_BEARERS; i++) {
958 _l = n->links[i].link;
959 if (!_l || !tipc_link_is_up(_l))
960 continue;
961 if (_l == l)
962 continue;
963 prio = tipc_link_prio(_l);
964 if (prio < highest)
965 continue;
966 if (prio > highest) {
967 highest = prio;
968 *slot0 = i;
969 *slot1 = i;
970 continue;
971 }
972 *slot1 = i;
973 }
974
975 if (!node_is_up(n)) {
976 if (tipc_link_peer_is_down(l))
977 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
978 tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT);
979 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down!");
980 tipc_link_fsm_evt(l, LINK_RESET_EVT);
981 tipc_link_reset(l);
982 tipc_link_build_reset_msg(l, xmitq);
983 *maddr = &n->links[*bearer_id].maddr;
984 node_lost_contact(n, &le->inputq);
985 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
986 return;
987 }
988 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
989
990 /* There is still a working link => initiate failover */
991 *bearer_id = n->active_links[0];
992 tnl = n->links[*bearer_id].link;
993 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
994 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
995 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
996 tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
997 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link down -> failover!");
998 tipc_link_reset(l);
999 tipc_link_fsm_evt(l, LINK_RESET_EVT);
1000 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
1001 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
1002 *maddr = &n->links[*bearer_id].maddr;
1003}
1004
1005static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
1006{
1007 struct tipc_link_entry *le = &n->links[bearer_id];
1008 struct tipc_media_addr *maddr = NULL;
1009 struct tipc_link *l = le->link;
1010 int old_bearer_id = bearer_id;
1011 struct sk_buff_head xmitq;
1012
1013 if (!l)
1014 return;
1015
1016 __skb_queue_head_init(&xmitq);
1017
1018 tipc_node_write_lock(n);
1019 if (!tipc_link_is_establishing(l)) {
1020 __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
1021 } else {
1022 /* Defuse pending tipc_node_link_up() */
1023 tipc_link_reset(l);
1024 tipc_link_fsm_evt(l, LINK_RESET_EVT);
1025 }
1026 if (delete) {
1027 kfree(l);
1028 le->link = NULL;
1029 n->link_cnt--;
1030 }
1031 trace_tipc_node_link_down(n, true, "node link down or deleted!");
1032 tipc_node_write_unlock(n);
1033 if (delete)
1034 tipc_mon_remove_peer(n->net, n->addr, old_bearer_id);
1035 if (!skb_queue_empty(&xmitq))
1036 tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr, n);
1037 tipc_sk_rcv(n->net, &le->inputq);
1038}
1039
1040static bool node_is_up(struct tipc_node *n)
1041{
1042 return n->active_links[0] != INVALID_BEARER_ID;
1043}
1044
1045bool tipc_node_is_up(struct net *net, u32 addr)
1046{
1047 struct tipc_node *n;
1048 bool retval = false;
1049
1050 if (in_own_node(net, addr))
1051 return true;
1052
1053 n = tipc_node_find(net, addr);
1054 if (!n)
1055 return false;
1056 retval = node_is_up(n);
1057 tipc_node_put(n);
1058 return retval;
1059}
1060
1061static u32 tipc_node_suggest_addr(struct net *net, u32 addr)
1062{
1063 struct tipc_node *n;
1064
1065 addr ^= tipc_net(net)->random;
1066 while ((n = tipc_node_find(net, addr))) {
1067 tipc_node_put(n);
1068 addr++;
1069 }
1070 return addr;
1071}
1072
1073/* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not
1074 * Returns suggested address if any, otherwise 0
1075 */
1076u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
1077{
1078 struct tipc_net *tn = tipc_net(net);
1079 struct tipc_node *n;
1080 bool preliminary;
1081 u32 sugg_addr;
1082
1083 /* Suggest new address if some other peer is using this one */
1084 n = tipc_node_find(net, addr);
1085 if (n) {
1086 if (!memcmp(n->peer_id, id, NODE_ID_LEN))
1087 addr = 0;
1088 tipc_node_put(n);
1089 if (!addr)
1090 return 0;
1091 return tipc_node_suggest_addr(net, addr);
1092 }
1093
1094 /* Suggest previously used address if peer is known */
1095 n = tipc_node_find_by_id(net, id);
1096 if (n) {
1097 sugg_addr = n->addr;
1098 preliminary = n->preliminary;
1099 tipc_node_put(n);
1100 if (!preliminary)
1101 return sugg_addr;
1102 }
1103
1104 /* Even this node may be in conflict */
1105 if (tn->trial_addr == addr)
1106 return tipc_node_suggest_addr(net, addr);
1107
1108 return 0;
1109}
1110
1111void tipc_node_check_dest(struct net *net, u32 addr,
1112 u8 *peer_id, struct tipc_bearer *b,
1113 u16 capabilities, u32 signature, u32 hash_mixes,
1114 struct tipc_media_addr *maddr,
1115 bool *respond, bool *dupl_addr)
1116{
1117 struct tipc_node *n;
1118 struct tipc_link *l, *snd_l;
1119 struct tipc_link_entry *le;
1120 bool addr_match = false;
1121 bool sign_match = false;
1122 bool link_up = false;
1123 bool accept_addr = false;
1124 bool reset = true;
1125 char *if_name;
1126 unsigned long intv;
1127 u16 session;
1128
1129 *dupl_addr = false;
1130 *respond = false;
1131
1132 n = tipc_node_create(net, addr, peer_id, capabilities, hash_mixes,
1133 false);
1134 if (!n)
1135 return;
1136
1137 tipc_node_write_lock(n);
1138 if (unlikely(!n->bc_entry.link)) {
1139 snd_l = tipc_bc_sndlink(net);
1140 if (!tipc_link_bc_create(net, tipc_own_addr(net),
1141 addr, peer_id, U16_MAX,
1142 tipc_link_min_win(snd_l),
1143 tipc_link_max_win(snd_l),
1144 n->capabilities,
1145 &n->bc_entry.inputq1,
1146 &n->bc_entry.namedq, snd_l,
1147 &n->bc_entry.link)) {
1148 pr_warn("Broadcast rcv link creation failed, no mem\n");
1149 tipc_node_write_unlock_fast(n);
1150 tipc_node_put(n);
1151 return;
1152 }
1153 }
1154
1155 le = &n->links[b->identity];
1156
1157 /* Prepare to validate requesting node's signature and media address */
1158 l = le->link;
1159 link_up = l && tipc_link_is_up(l);
1160 addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr));
1161 sign_match = (signature == n->signature);
1162
1163 /* These three flags give us eight permutations: */
1164
1165 if (sign_match && addr_match && link_up) {
1166 /* All is fine. Do nothing. */
1167 reset = false;
1168 /* Peer node is not a container/local namespace */
1169 if (!n->peer_hash_mix)
1170 n->peer_hash_mix = hash_mixes;
1171 } else if (sign_match && addr_match && !link_up) {
1172 /* Respond. The link will come up in due time */
1173 *respond = true;
1174 } else if (sign_match && !addr_match && link_up) {
1175 /* Peer has changed i/f address without rebooting.
1176 * If so, the link will reset soon, and the next
1177 * discovery will be accepted. So we can ignore it.
1178 * It may also be an cloned or malicious peer having
1179 * chosen the same node address and signature as an
1180 * existing one.
1181 * Ignore requests until the link goes down, if ever.
1182 */
1183 *dupl_addr = true;
1184 } else if (sign_match && !addr_match && !link_up) {
1185 /* Peer link has changed i/f address without rebooting.
1186 * It may also be a cloned or malicious peer; we can't
1187 * distinguish between the two.
1188 * The signature is correct, so we must accept.
1189 */
1190 accept_addr = true;
1191 *respond = true;
1192 } else if (!sign_match && addr_match && link_up) {
1193 /* Peer node rebooted. Two possibilities:
1194 * - Delayed re-discovery; this link endpoint has already
1195 * reset and re-established contact with the peer, before
1196 * receiving a discovery message from that node.
1197 * (The peer happened to receive one from this node first).
1198 * - The peer came back so fast that our side has not
1199 * discovered it yet. Probing from this side will soon
1200 * reset the link, since there can be no working link
1201 * endpoint at the peer end, and the link will re-establish.
1202 * Accept the signature, since it comes from a known peer.
1203 */
1204 n->signature = signature;
1205 } else if (!sign_match && addr_match && !link_up) {
1206 /* The peer node has rebooted.
1207 * Accept signature, since it is a known peer.
1208 */
1209 n->signature = signature;
1210 *respond = true;
1211 } else if (!sign_match && !addr_match && link_up) {
1212 /* Peer rebooted with new address, or a new/duplicate peer.
1213 * Ignore until the link goes down, if ever.
1214 */
1215 *dupl_addr = true;
1216 } else if (!sign_match && !addr_match && !link_up) {
1217 /* Peer rebooted with new address, or it is a new peer.
1218 * Accept signature and address.
1219 */
1220 n->signature = signature;
1221 accept_addr = true;
1222 *respond = true;
1223 }
1224
1225 if (!accept_addr)
1226 goto exit;
1227
1228 /* Now create new link if not already existing */
1229 if (!l) {
1230 if (n->link_cnt == 2)
1231 goto exit;
1232
1233 if_name = strchr(b->name, ':') + 1;
1234 get_random_bytes(&session, sizeof(u16));
1235 if (!tipc_link_create(net, if_name, b->identity, b->tolerance,
1236 b->net_plane, b->mtu, b->priority,
1237 b->min_win, b->max_win, session,
1238 tipc_own_addr(net), addr, peer_id,
1239 n->capabilities,
1240 tipc_bc_sndlink(n->net), n->bc_entry.link,
1241 &le->inputq,
1242 &n->bc_entry.namedq, &l)) {
1243 *respond = false;
1244 goto exit;
1245 }
1246 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "link created!");
1247 tipc_link_reset(l);
1248 tipc_link_fsm_evt(l, LINK_RESET_EVT);
1249 if (n->state == NODE_FAILINGOVER)
1250 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
1251 le->link = l;
1252 n->link_cnt++;
1253 tipc_node_calculate_timer(n, l);
1254 if (n->link_cnt == 1) {
1255 intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
1256 if (!mod_timer(&n->timer, intv))
1257 tipc_node_get(n);
1258 }
1259 }
1260 memcpy(&le->maddr, maddr, sizeof(*maddr));
1261exit:
1262 tipc_node_write_unlock(n);
1263 if (reset && l && !tipc_link_is_reset(l))
1264 tipc_node_link_down(n, b->identity, false);
1265 tipc_node_put(n);
1266}
1267
1268void tipc_node_delete_links(struct net *net, int bearer_id)
1269{
1270 struct tipc_net *tn = net_generic(net, tipc_net_id);
1271 struct tipc_node *n;
1272
1273 rcu_read_lock();
1274 list_for_each_entry_rcu(n, &tn->node_list, list) {
1275 tipc_node_link_down(n, bearer_id, true);
1276 }
1277 rcu_read_unlock();
1278}
1279
1280static void tipc_node_reset_links(struct tipc_node *n)
1281{
1282 int i;
1283
1284 pr_warn("Resetting all links to %x\n", n->addr);
1285
1286 trace_tipc_node_reset_links(n, true, " ");
1287 for (i = 0; i < MAX_BEARERS; i++) {
1288 tipc_node_link_down(n, i, false);
1289 }
1290}
1291
1292/* tipc_node_fsm_evt - node finite state machine
1293 * Determines when contact is allowed with peer node
1294 */
1295static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
1296{
1297 int state = n->state;
1298
1299 switch (state) {
1300 case SELF_DOWN_PEER_DOWN:
1301 switch (evt) {
1302 case SELF_ESTABL_CONTACT_EVT:
1303 state = SELF_UP_PEER_COMING;
1304 break;
1305 case PEER_ESTABL_CONTACT_EVT:
1306 state = SELF_COMING_PEER_UP;
1307 break;
1308 case SELF_LOST_CONTACT_EVT:
1309 case PEER_LOST_CONTACT_EVT:
1310 break;
1311 case NODE_SYNCH_END_EVT:
1312 case NODE_SYNCH_BEGIN_EVT:
1313 case NODE_FAILOVER_BEGIN_EVT:
1314 case NODE_FAILOVER_END_EVT:
1315 default:
1316 goto illegal_evt;
1317 }
1318 break;
1319 case SELF_UP_PEER_UP:
1320 switch (evt) {
1321 case SELF_LOST_CONTACT_EVT:
1322 state = SELF_DOWN_PEER_LEAVING;
1323 break;
1324 case PEER_LOST_CONTACT_EVT:
1325 state = SELF_LEAVING_PEER_DOWN;
1326 break;
1327 case NODE_SYNCH_BEGIN_EVT:
1328 state = NODE_SYNCHING;
1329 break;
1330 case NODE_FAILOVER_BEGIN_EVT:
1331 state = NODE_FAILINGOVER;
1332 break;
1333 case SELF_ESTABL_CONTACT_EVT:
1334 case PEER_ESTABL_CONTACT_EVT:
1335 case NODE_SYNCH_END_EVT:
1336 case NODE_FAILOVER_END_EVT:
1337 break;
1338 default:
1339 goto illegal_evt;
1340 }
1341 break;
1342 case SELF_DOWN_PEER_LEAVING:
1343 switch (evt) {
1344 case PEER_LOST_CONTACT_EVT:
1345 state = SELF_DOWN_PEER_DOWN;
1346 break;
1347 case SELF_ESTABL_CONTACT_EVT:
1348 case PEER_ESTABL_CONTACT_EVT:
1349 case SELF_LOST_CONTACT_EVT:
1350 break;
1351 case NODE_SYNCH_END_EVT:
1352 case NODE_SYNCH_BEGIN_EVT:
1353 case NODE_FAILOVER_BEGIN_EVT:
1354 case NODE_FAILOVER_END_EVT:
1355 default:
1356 goto illegal_evt;
1357 }
1358 break;
1359 case SELF_UP_PEER_COMING:
1360 switch (evt) {
1361 case PEER_ESTABL_CONTACT_EVT:
1362 state = SELF_UP_PEER_UP;
1363 break;
1364 case SELF_LOST_CONTACT_EVT:
1365 state = SELF_DOWN_PEER_DOWN;
1366 break;
1367 case SELF_ESTABL_CONTACT_EVT:
1368 case PEER_LOST_CONTACT_EVT:
1369 case NODE_SYNCH_END_EVT:
1370 case NODE_FAILOVER_BEGIN_EVT:
1371 break;
1372 case NODE_SYNCH_BEGIN_EVT:
1373 case NODE_FAILOVER_END_EVT:
1374 default:
1375 goto illegal_evt;
1376 }
1377 break;
1378 case SELF_COMING_PEER_UP:
1379 switch (evt) {
1380 case SELF_ESTABL_CONTACT_EVT:
1381 state = SELF_UP_PEER_UP;
1382 break;
1383 case PEER_LOST_CONTACT_EVT:
1384 state = SELF_DOWN_PEER_DOWN;
1385 break;
1386 case SELF_LOST_CONTACT_EVT:
1387 case PEER_ESTABL_CONTACT_EVT:
1388 break;
1389 case NODE_SYNCH_END_EVT:
1390 case NODE_SYNCH_BEGIN_EVT:
1391 case NODE_FAILOVER_BEGIN_EVT:
1392 case NODE_FAILOVER_END_EVT:
1393 default:
1394 goto illegal_evt;
1395 }
1396 break;
1397 case SELF_LEAVING_PEER_DOWN:
1398 switch (evt) {
1399 case SELF_LOST_CONTACT_EVT:
1400 state = SELF_DOWN_PEER_DOWN;
1401 break;
1402 case SELF_ESTABL_CONTACT_EVT:
1403 case PEER_ESTABL_CONTACT_EVT:
1404 case PEER_LOST_CONTACT_EVT:
1405 break;
1406 case NODE_SYNCH_END_EVT:
1407 case NODE_SYNCH_BEGIN_EVT:
1408 case NODE_FAILOVER_BEGIN_EVT:
1409 case NODE_FAILOVER_END_EVT:
1410 default:
1411 goto illegal_evt;
1412 }
1413 break;
1414 case NODE_FAILINGOVER:
1415 switch (evt) {
1416 case SELF_LOST_CONTACT_EVT:
1417 state = SELF_DOWN_PEER_LEAVING;
1418 break;
1419 case PEER_LOST_CONTACT_EVT:
1420 state = SELF_LEAVING_PEER_DOWN;
1421 break;
1422 case NODE_FAILOVER_END_EVT:
1423 state = SELF_UP_PEER_UP;
1424 break;
1425 case NODE_FAILOVER_BEGIN_EVT:
1426 case SELF_ESTABL_CONTACT_EVT:
1427 case PEER_ESTABL_CONTACT_EVT:
1428 break;
1429 case NODE_SYNCH_BEGIN_EVT:
1430 case NODE_SYNCH_END_EVT:
1431 default:
1432 goto illegal_evt;
1433 }
1434 break;
1435 case NODE_SYNCHING:
1436 switch (evt) {
1437 case SELF_LOST_CONTACT_EVT:
1438 state = SELF_DOWN_PEER_LEAVING;
1439 break;
1440 case PEER_LOST_CONTACT_EVT:
1441 state = SELF_LEAVING_PEER_DOWN;
1442 break;
1443 case NODE_SYNCH_END_EVT:
1444 state = SELF_UP_PEER_UP;
1445 break;
1446 case NODE_FAILOVER_BEGIN_EVT:
1447 state = NODE_FAILINGOVER;
1448 break;
1449 case NODE_SYNCH_BEGIN_EVT:
1450 case SELF_ESTABL_CONTACT_EVT:
1451 case PEER_ESTABL_CONTACT_EVT:
1452 break;
1453 case NODE_FAILOVER_END_EVT:
1454 default:
1455 goto illegal_evt;
1456 }
1457 break;
1458 default:
1459 pr_err("Unknown node fsm state %x\n", state);
1460 break;
1461 }
1462 trace_tipc_node_fsm(n->peer_id, n->state, state, evt);
1463 n->state = state;
1464 return;
1465
1466illegal_evt:
1467 pr_err("Illegal node fsm evt %x in state %x\n", evt, state);
1468 trace_tipc_node_fsm(n->peer_id, n->state, state, evt);
1469}
1470
1471static void node_lost_contact(struct tipc_node *n,
1472 struct sk_buff_head *inputq)
1473{
1474 struct tipc_sock_conn *conn, *safe;
1475 struct tipc_link *l;
1476 struct list_head *conns = &n->conn_sks;
1477 struct sk_buff *skb;
1478 uint i;
1479
1480 pr_debug("Lost contact with %x\n", n->addr);
1481 n->delete_at = jiffies + msecs_to_jiffies(NODE_CLEANUP_AFTER);
1482 trace_tipc_node_lost_contact(n, true, " ");
1483
1484 /* Clean up broadcast state */
1485 tipc_bcast_remove_peer(n->net, n->bc_entry.link);
1486
1487 /* Abort any ongoing link failover */
1488 for (i = 0; i < MAX_BEARERS; i++) {
1489 l = n->links[i].link;
1490 if (l)
1491 tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT);
1492 }
1493
1494 /* Notify publications from this node */
1495 n->action_flags |= TIPC_NOTIFY_NODE_DOWN;
1496 n->peer_net = NULL;
1497 n->peer_hash_mix = 0;
1498 /* Notify sockets connected to node */
1499 list_for_each_entry_safe(conn, safe, conns, list) {
1500 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
1501 SHORT_H_SIZE, 0, tipc_own_addr(n->net),
1502 conn->peer_node, conn->port,
1503 conn->peer_port, TIPC_ERR_NO_NODE);
1504 if (likely(skb))
1505 skb_queue_tail(inputq, skb);
1506 list_del(&conn->list);
1507 kfree(conn);
1508 }
1509}
1510
1511/**
1512 * tipc_node_get_linkname - get the name of a link
1513 *
1514 * @bearer_id: id of the bearer
1515 * @node: peer node address
1516 * @linkname: link name output buffer
1517 *
1518 * Returns 0 on success
1519 */
1520int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
1521 char *linkname, size_t len)
1522{
1523 struct tipc_link *link;
1524 int err = -EINVAL;
1525 struct tipc_node *node = tipc_node_find(net, addr);
1526
1527 if (!node)
1528 return err;
1529
1530 if (bearer_id >= MAX_BEARERS)
1531 goto exit;
1532
1533 tipc_node_read_lock(node);
1534 link = node->links[bearer_id].link;
1535 if (link) {
1536 strncpy(linkname, tipc_link_name(link), len);
1537 err = 0;
1538 }
1539 tipc_node_read_unlock(node);
1540exit:
1541 tipc_node_put(node);
1542 return err;
1543}
1544
1545/* Caller should hold node lock for the passed node */
1546static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node)
1547{
1548 void *hdr;
1549 struct nlattr *attrs;
1550
1551 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1552 NLM_F_MULTI, TIPC_NL_NODE_GET);
1553 if (!hdr)
1554 return -EMSGSIZE;
1555
1556 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_NODE);
1557 if (!attrs)
1558 goto msg_full;
1559
1560 if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr))
1561 goto attr_msg_full;
1562 if (node_is_up(node))
1563 if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP))
1564 goto attr_msg_full;
1565
1566 nla_nest_end(msg->skb, attrs);
1567 genlmsg_end(msg->skb, hdr);
1568
1569 return 0;
1570
1571attr_msg_full:
1572 nla_nest_cancel(msg->skb, attrs);
1573msg_full:
1574 genlmsg_cancel(msg->skb, hdr);
1575
1576 return -EMSGSIZE;
1577}
1578
1579static void tipc_lxc_xmit(struct net *peer_net, struct sk_buff_head *list)
1580{
1581 struct tipc_msg *hdr = buf_msg(skb_peek(list));
1582 struct sk_buff_head inputq;
1583
1584 switch (msg_user(hdr)) {
1585 case TIPC_LOW_IMPORTANCE:
1586 case TIPC_MEDIUM_IMPORTANCE:
1587 case TIPC_HIGH_IMPORTANCE:
1588 case TIPC_CRITICAL_IMPORTANCE:
1589 if (msg_connected(hdr) || msg_named(hdr) ||
1590 msg_direct(hdr)) {
1591 tipc_loopback_trace(peer_net, list);
1592 spin_lock_init(&list->lock);
1593 tipc_sk_rcv(peer_net, list);
1594 return;
1595 }
1596 if (msg_mcast(hdr)) {
1597 tipc_loopback_trace(peer_net, list);
1598 skb_queue_head_init(&inputq);
1599 tipc_sk_mcast_rcv(peer_net, list, &inputq);
1600 __skb_queue_purge(list);
1601 skb_queue_purge(&inputq);
1602 return;
1603 }
1604 return;
1605 case MSG_FRAGMENTER:
1606 if (tipc_msg_assemble(list)) {
1607 tipc_loopback_trace(peer_net, list);
1608 skb_queue_head_init(&inputq);
1609 tipc_sk_mcast_rcv(peer_net, list, &inputq);
1610 __skb_queue_purge(list);
1611 skb_queue_purge(&inputq);
1612 }
1613 return;
1614 case GROUP_PROTOCOL:
1615 case CONN_MANAGER:
1616 tipc_loopback_trace(peer_net, list);
1617 spin_lock_init(&list->lock);
1618 tipc_sk_rcv(peer_net, list);
1619 return;
1620 case LINK_PROTOCOL:
1621 case NAME_DISTRIBUTOR:
1622 case TUNNEL_PROTOCOL:
1623 case BCAST_PROTOCOL:
1624 return;
1625 default:
1626 return;
1627 };
1628}
1629
1630/**
1631 * tipc_node_xmit() is the general link level function for message sending
1632 * @net: the applicable net namespace
1633 * @list: chain of buffers containing message
1634 * @dnode: address of destination node
1635 * @selector: a number used for deterministic link selection
1636 * Consumes the buffer chain.
1637 * Returns 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF
1638 */
1639int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
1640 u32 dnode, int selector)
1641{
1642 struct tipc_link_entry *le = NULL;
1643 struct tipc_node *n;
1644 struct sk_buff_head xmitq;
1645 bool node_up = false;
1646 int bearer_id;
1647 int rc;
1648
1649 if (in_own_node(net, dnode)) {
1650 tipc_loopback_trace(net, list);
1651 spin_lock_init(&list->lock);
1652 tipc_sk_rcv(net, list);
1653 return 0;
1654 }
1655
1656 n = tipc_node_find(net, dnode);
1657 if (unlikely(!n)) {
1658 __skb_queue_purge(list);
1659 return -EHOSTUNREACH;
1660 }
1661
1662 tipc_node_read_lock(n);
1663 node_up = node_is_up(n);
1664 if (node_up && n->peer_net && check_net(n->peer_net)) {
1665 /* xmit inner linux container */
1666 tipc_lxc_xmit(n->peer_net, list);
1667 if (likely(skb_queue_empty(list))) {
1668 tipc_node_read_unlock(n);
1669 tipc_node_put(n);
1670 return 0;
1671 }
1672 }
1673
1674 bearer_id = n->active_links[selector & 1];
1675 if (unlikely(bearer_id == INVALID_BEARER_ID)) {
1676 tipc_node_read_unlock(n);
1677 tipc_node_put(n);
1678 __skb_queue_purge(list);
1679 return -EHOSTUNREACH;
1680 }
1681
1682 __skb_queue_head_init(&xmitq);
1683 le = &n->links[bearer_id];
1684 spin_lock_bh(&le->lock);
1685 rc = tipc_link_xmit(le->link, list, &xmitq);
1686 spin_unlock_bh(&le->lock);
1687 tipc_node_read_unlock(n);
1688
1689 if (unlikely(rc == -ENOBUFS))
1690 tipc_node_link_down(n, bearer_id, false);
1691 else
1692 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
1693
1694 tipc_node_put(n);
1695
1696 return rc;
1697}
1698
1699/* tipc_node_xmit_skb(): send single buffer to destination
1700 * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
1701 * messages, which will not be rejected
1702 * The only exception is datagram messages rerouted after secondary
1703 * lookup, which are rare and safe to dispose of anyway.
1704 */
1705int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
1706 u32 selector)
1707{
1708 struct sk_buff_head head;
1709
1710 __skb_queue_head_init(&head);
1711 __skb_queue_tail(&head, skb);
1712 tipc_node_xmit(net, &head, dnode, selector);
1713 return 0;
1714}
1715
1716/* tipc_node_distr_xmit(): send single buffer msgs to individual destinations
1717 * Note: this is only for SYSTEM_IMPORTANCE messages, which cannot be rejected
1718 */
1719int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq)
1720{
1721 struct sk_buff *skb;
1722 u32 selector, dnode;
1723
1724 while ((skb = __skb_dequeue(xmitq))) {
1725 selector = msg_origport(buf_msg(skb));
1726 dnode = msg_destnode(buf_msg(skb));
1727 tipc_node_xmit_skb(net, skb, dnode, selector);
1728 }
1729 return 0;
1730}
1731
1732void tipc_node_broadcast(struct net *net, struct sk_buff *skb)
1733{
1734 struct sk_buff *txskb;
1735 struct tipc_node *n;
1736 u32 dst;
1737
1738 rcu_read_lock();
1739 list_for_each_entry_rcu(n, tipc_nodes(net), list) {
1740 dst = n->addr;
1741 if (in_own_node(net, dst))
1742 continue;
1743 if (!node_is_up(n))
1744 continue;
1745 txskb = pskb_copy(skb, GFP_ATOMIC);
1746 if (!txskb)
1747 break;
1748 msg_set_destnode(buf_msg(txskb), dst);
1749 tipc_node_xmit_skb(net, txskb, dst, 0);
1750 }
1751 rcu_read_unlock();
1752
1753 kfree_skb(skb);
1754}
1755
1756static void tipc_node_mcast_rcv(struct tipc_node *n)
1757{
1758 struct tipc_bclink_entry *be = &n->bc_entry;
1759
1760 /* 'arrvq' is under inputq2's lock protection */
1761 spin_lock_bh(&be->inputq2.lock);
1762 spin_lock_bh(&be->inputq1.lock);
1763 skb_queue_splice_tail_init(&be->inputq1, &be->arrvq);
1764 spin_unlock_bh(&be->inputq1.lock);
1765 spin_unlock_bh(&be->inputq2.lock);
1766 tipc_sk_mcast_rcv(n->net, &be->arrvq, &be->inputq2);
1767}
1768
1769static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr,
1770 int bearer_id, struct sk_buff_head *xmitq)
1771{
1772 struct tipc_link *ucl;
1773 int rc;
1774
1775 rc = tipc_bcast_sync_rcv(n->net, n->bc_entry.link, hdr, xmitq);
1776
1777 if (rc & TIPC_LINK_DOWN_EVT) {
1778 tipc_node_reset_links(n);
1779 return;
1780 }
1781
1782 if (!(rc & TIPC_LINK_SND_STATE))
1783 return;
1784
1785 /* If probe message, a STATE response will be sent anyway */
1786 if (msg_probe(hdr))
1787 return;
1788
1789 /* Produce a STATE message carrying broadcast NACK */
1790 tipc_node_read_lock(n);
1791 ucl = n->links[bearer_id].link;
1792 if (ucl)
1793 tipc_link_build_state_msg(ucl, xmitq);
1794 tipc_node_read_unlock(n);
1795}
1796
1797/**
1798 * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node
1799 * @net: the applicable net namespace
1800 * @skb: TIPC packet
1801 * @bearer_id: id of bearer message arrived on
1802 *
1803 * Invoked with no locks held.
1804 */
1805static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id)
1806{
1807 int rc;
1808 struct sk_buff_head xmitq;
1809 struct tipc_bclink_entry *be;
1810 struct tipc_link_entry *le;
1811 struct tipc_msg *hdr = buf_msg(skb);
1812 int usr = msg_user(hdr);
1813 u32 dnode = msg_destnode(hdr);
1814 struct tipc_node *n;
1815
1816 __skb_queue_head_init(&xmitq);
1817
1818 /* If NACK for other node, let rcv link for that node peek into it */
1819 if ((usr == BCAST_PROTOCOL) && (dnode != tipc_own_addr(net)))
1820 n = tipc_node_find(net, dnode);
1821 else
1822 n = tipc_node_find(net, msg_prevnode(hdr));
1823 if (!n) {
1824 kfree_skb(skb);
1825 return;
1826 }
1827 be = &n->bc_entry;
1828 le = &n->links[bearer_id];
1829
1830 rc = tipc_bcast_rcv(net, be->link, skb);
1831
1832 /* Broadcast ACKs are sent on a unicast link */
1833 if (rc & TIPC_LINK_SND_STATE) {
1834 tipc_node_read_lock(n);
1835 tipc_link_build_state_msg(le->link, &xmitq);
1836 tipc_node_read_unlock(n);
1837 }
1838
1839 if (!skb_queue_empty(&xmitq))
1840 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
1841
1842 if (!skb_queue_empty(&be->inputq1))
1843 tipc_node_mcast_rcv(n);
1844
1845 /* Handle NAME_DISTRIBUTOR messages sent from 1.7 nodes */
1846 if (!skb_queue_empty(&n->bc_entry.namedq))
1847 tipc_named_rcv(net, &n->bc_entry.namedq);
1848
1849 /* If reassembly or retransmission failure => reset all links to peer */
1850 if (rc & TIPC_LINK_DOWN_EVT)
1851 tipc_node_reset_links(n);
1852
1853 tipc_node_put(n);
1854}
1855
1856/**
1857 * tipc_node_check_state - check and if necessary update node state
1858 * @skb: TIPC packet
1859 * @bearer_id: identity of bearer delivering the packet
1860 * Returns true if state and msg are ok, otherwise false
1861 */
1862static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
1863 int bearer_id, struct sk_buff_head *xmitq)
1864{
1865 struct tipc_msg *hdr = buf_msg(skb);
1866 int usr = msg_user(hdr);
1867 int mtyp = msg_type(hdr);
1868 u16 oseqno = msg_seqno(hdr);
1869 u16 exp_pkts = msg_msgcnt(hdr);
1870 u16 rcv_nxt, syncpt, dlv_nxt, inputq_len;
1871 int state = n->state;
1872 struct tipc_link *l, *tnl, *pl = NULL;
1873 struct tipc_media_addr *maddr;
1874 int pb_id;
1875
1876 if (trace_tipc_node_check_state_enabled()) {
1877 trace_tipc_skb_dump(skb, false, "skb for node state check");
1878 trace_tipc_node_check_state(n, true, " ");
1879 }
1880 l = n->links[bearer_id].link;
1881 if (!l)
1882 return false;
1883 rcv_nxt = tipc_link_rcv_nxt(l);
1884
1885
1886 if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL)))
1887 return true;
1888
1889 /* Find parallel link, if any */
1890 for (pb_id = 0; pb_id < MAX_BEARERS; pb_id++) {
1891 if ((pb_id != bearer_id) && n->links[pb_id].link) {
1892 pl = n->links[pb_id].link;
1893 break;
1894 }
1895 }
1896
1897 if (!tipc_link_validate_msg(l, hdr)) {
1898 trace_tipc_skb_dump(skb, false, "PROTO invalid (2)!");
1899 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (2)!");
1900 return false;
1901 }
1902
1903 /* Check and update node accesibility if applicable */
1904 if (state == SELF_UP_PEER_COMING) {
1905 if (!tipc_link_is_up(l))
1906 return true;
1907 if (!msg_peer_link_is_up(hdr))
1908 return true;
1909 tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT);
1910 }
1911
1912 if (state == SELF_DOWN_PEER_LEAVING) {
1913 if (msg_peer_node_is_up(hdr))
1914 return false;
1915 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
1916 return true;
1917 }
1918
1919 if (state == SELF_LEAVING_PEER_DOWN)
1920 return false;
1921
1922 /* Ignore duplicate packets */
1923 if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt))
1924 return true;
1925
1926 /* Initiate or update failover mode if applicable */
1927 if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) {
1928 syncpt = oseqno + exp_pkts - 1;
1929 if (pl && !tipc_link_is_reset(pl)) {
1930 __tipc_node_link_down(n, &pb_id, xmitq, &maddr);
1931 trace_tipc_node_link_down(n, true,
1932 "node link down <- failover!");
1933 tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
1934 tipc_link_inputq(l));
1935 }
1936
1937 /* If parallel link was already down, and this happened before
1938 * the tunnel link came up, node failover was never started.
1939 * Ensure that a FAILOVER_MSG is sent to get peer out of
1940 * NODE_FAILINGOVER state, also this node must accept
1941 * TUNNEL_MSGs from peer.
1942 */
1943 if (n->state != NODE_FAILINGOVER)
1944 tipc_node_link_failover(n, pl, l, xmitq);
1945
1946 /* If pkts arrive out of order, use lowest calculated syncpt */
1947 if (less(syncpt, n->sync_point))
1948 n->sync_point = syncpt;
1949 }
1950
1951 /* Open parallel link when tunnel link reaches synch point */
1952 if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) {
1953 if (!more(rcv_nxt, n->sync_point))
1954 return true;
1955 tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT);
1956 if (pl)
1957 tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT);
1958 return true;
1959 }
1960
1961 /* No synching needed if only one link */
1962 if (!pl || !tipc_link_is_up(pl))
1963 return true;
1964
1965 /* Initiate synch mode if applicable */
1966 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) {
1967 if (n->capabilities & TIPC_TUNNEL_ENHANCED)
1968 syncpt = msg_syncpt(hdr);
1969 else
1970 syncpt = msg_seqno(msg_inner_hdr(hdr)) + exp_pkts - 1;
1971 if (!tipc_link_is_up(l))
1972 __tipc_node_link_up(n, bearer_id, xmitq);
1973 if (n->state == SELF_UP_PEER_UP) {
1974 n->sync_point = syncpt;
1975 tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
1976 tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT);
1977 }
1978 }
1979
1980 /* Open tunnel link when parallel link reaches synch point */
1981 if (n->state == NODE_SYNCHING) {
1982 if (tipc_link_is_synching(l)) {
1983 tnl = l;
1984 } else {
1985 tnl = pl;
1986 pl = l;
1987 }
1988 inputq_len = skb_queue_len(tipc_link_inputq(pl));
1989 dlv_nxt = tipc_link_rcv_nxt(pl) - inputq_len;
1990 if (more(dlv_nxt, n->sync_point)) {
1991 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
1992 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
1993 return true;
1994 }
1995 if (l == pl)
1996 return true;
1997 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG))
1998 return true;
1999 if (usr == LINK_PROTOCOL)
2000 return true;
2001 return false;
2002 }
2003 return true;
2004}
2005
2006/**
2007 * tipc_rcv - process TIPC packets/messages arriving from off-node
2008 * @net: the applicable net namespace
2009 * @skb: TIPC packet
2010 * @bearer: pointer to bearer message arrived on
2011 *
2012 * Invoked with no locks held. Bearer pointer must point to a valid bearer
2013 * structure (i.e. cannot be NULL), but bearer can be inactive.
2014 */
2015void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
2016{
2017 struct sk_buff_head xmitq;
2018 struct tipc_link_entry *le;
2019 struct tipc_msg *hdr;
2020 struct tipc_node *n;
2021 int bearer_id = b->identity;
2022 u32 self = tipc_own_addr(net);
2023 int usr, rc = 0;
2024 u16 bc_ack;
2025#ifdef CONFIG_TIPC_CRYPTO
2026 struct tipc_ehdr *ehdr;
2027
2028 /* Check if message must be decrypted first */
2029 if (TIPC_SKB_CB(skb)->decrypted || !tipc_ehdr_validate(skb))
2030 goto rcv;
2031
2032 ehdr = (struct tipc_ehdr *)skb->data;
2033 if (likely(ehdr->user != LINK_CONFIG)) {
2034 n = tipc_node_find(net, ntohl(ehdr->addr));
2035 if (unlikely(!n))
2036 goto discard;
2037 } else {
2038 n = tipc_node_find_by_id(net, ehdr->id);
2039 }
2040 tipc_crypto_rcv(net, (n) ? n->crypto_rx : NULL, &skb, b);
2041 if (!skb)
2042 return;
2043
2044rcv:
2045#endif
2046 /* Ensure message is well-formed before touching the header */
2047 if (unlikely(!tipc_msg_validate(&skb)))
2048 goto discard;
2049 __skb_queue_head_init(&xmitq);
2050 hdr = buf_msg(skb);
2051 usr = msg_user(hdr);
2052 bc_ack = msg_bcast_ack(hdr);
2053
2054 /* Handle arrival of discovery or broadcast packet */
2055 if (unlikely(msg_non_seq(hdr))) {
2056 if (unlikely(usr == LINK_CONFIG))
2057 return tipc_disc_rcv(net, skb, b);
2058 else
2059 return tipc_node_bc_rcv(net, skb, bearer_id);
2060 }
2061
2062 /* Discard unicast link messages destined for another node */
2063 if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self)))
2064 goto discard;
2065
2066 /* Locate neighboring node that sent packet */
2067 n = tipc_node_find(net, msg_prevnode(hdr));
2068 if (unlikely(!n))
2069 goto discard;
2070 le = &n->links[bearer_id];
2071
2072 /* Ensure broadcast reception is in synch with peer's send state */
2073 if (unlikely(usr == LINK_PROTOCOL)) {
2074 if (unlikely(skb_linearize(skb))) {
2075 tipc_node_put(n);
2076 goto discard;
2077 }
2078 hdr = buf_msg(skb);
2079 tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq);
2080 } else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack)) {
2081 tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr);
2082 }
2083
2084 /* Receive packet directly if conditions permit */
2085 tipc_node_read_lock(n);
2086 if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) {
2087 spin_lock_bh(&le->lock);
2088 if (le->link) {
2089 rc = tipc_link_rcv(le->link, skb, &xmitq);
2090 skb = NULL;
2091 }
2092 spin_unlock_bh(&le->lock);
2093 }
2094 tipc_node_read_unlock(n);
2095
2096 /* Check/update node state before receiving */
2097 if (unlikely(skb)) {
2098 if (unlikely(skb_linearize(skb)))
2099 goto out_node_put;
2100 tipc_node_write_lock(n);
2101 if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
2102 if (le->link) {
2103 rc = tipc_link_rcv(le->link, skb, &xmitq);
2104 skb = NULL;
2105 }
2106 }
2107 tipc_node_write_unlock(n);
2108 }
2109
2110 if (unlikely(rc & TIPC_LINK_UP_EVT))
2111 tipc_node_link_up(n, bearer_id, &xmitq);
2112
2113 if (unlikely(rc & TIPC_LINK_DOWN_EVT))
2114 tipc_node_link_down(n, bearer_id, false);
2115
2116 if (unlikely(!skb_queue_empty(&n->bc_entry.namedq)))
2117 tipc_named_rcv(net, &n->bc_entry.namedq);
2118
2119 if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1)))
2120 tipc_node_mcast_rcv(n);
2121
2122 if (!skb_queue_empty(&le->inputq))
2123 tipc_sk_rcv(net, &le->inputq);
2124
2125 if (!skb_queue_empty(&xmitq))
2126 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr, n);
2127
2128out_node_put:
2129 tipc_node_put(n);
2130discard:
2131 kfree_skb(skb);
2132}
2133
2134void tipc_node_apply_property(struct net *net, struct tipc_bearer *b,
2135 int prop)
2136{
2137 struct tipc_net *tn = tipc_net(net);
2138 int bearer_id = b->identity;
2139 struct sk_buff_head xmitq;
2140 struct tipc_link_entry *e;
2141 struct tipc_node *n;
2142
2143 __skb_queue_head_init(&xmitq);
2144
2145 rcu_read_lock();
2146
2147 list_for_each_entry_rcu(n, &tn->node_list, list) {
2148 tipc_node_write_lock(n);
2149 e = &n->links[bearer_id];
2150 if (e->link) {
2151 if (prop == TIPC_NLA_PROP_TOL)
2152 tipc_link_set_tolerance(e->link, b->tolerance,
2153 &xmitq);
2154 else if (prop == TIPC_NLA_PROP_MTU)
2155 tipc_link_set_mtu(e->link, b->mtu);
2156 }
2157 tipc_node_write_unlock(n);
2158 tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr, NULL);
2159 }
2160
2161 rcu_read_unlock();
2162}
2163
2164int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info)
2165{
2166 struct net *net = sock_net(skb->sk);
2167 struct tipc_net *tn = net_generic(net, tipc_net_id);
2168 struct nlattr *attrs[TIPC_NLA_NET_MAX + 1];
2169 struct tipc_node *peer, *temp_node;
2170 u32 addr;
2171 int err;
2172
2173 /* We identify the peer by its net */
2174 if (!info->attrs[TIPC_NLA_NET])
2175 return -EINVAL;
2176
2177 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_NET_MAX,
2178 info->attrs[TIPC_NLA_NET],
2179 tipc_nl_net_policy, info->extack);
2180 if (err)
2181 return err;
2182
2183 if (!attrs[TIPC_NLA_NET_ADDR])
2184 return -EINVAL;
2185
2186 addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]);
2187
2188 if (in_own_node(net, addr))
2189 return -ENOTSUPP;
2190
2191 spin_lock_bh(&tn->node_list_lock);
2192 peer = tipc_node_find(net, addr);
2193 if (!peer) {
2194 spin_unlock_bh(&tn->node_list_lock);
2195 return -ENXIO;
2196 }
2197
2198 tipc_node_write_lock(peer);
2199 if (peer->state != SELF_DOWN_PEER_DOWN &&
2200 peer->state != SELF_DOWN_PEER_LEAVING) {
2201 tipc_node_write_unlock(peer);
2202 err = -EBUSY;
2203 goto err_out;
2204 }
2205
2206 tipc_node_clear_links(peer);
2207 tipc_node_write_unlock(peer);
2208 tipc_node_delete(peer);
2209
2210 /* Calculate cluster capabilities */
2211 tn->capabilities = TIPC_NODE_CAPABILITIES;
2212 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
2213 tn->capabilities &= temp_node->capabilities;
2214 }
2215 tipc_bcast_toggle_rcast(net, (tn->capabilities & TIPC_BCAST_RCAST));
2216 err = 0;
2217err_out:
2218 tipc_node_put(peer);
2219 spin_unlock_bh(&tn->node_list_lock);
2220
2221 return err;
2222}
2223
2224int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
2225{
2226 int err;
2227 struct net *net = sock_net(skb->sk);
2228 struct tipc_net *tn = net_generic(net, tipc_net_id);
2229 int done = cb->args[0];
2230 int last_addr = cb->args[1];
2231 struct tipc_node *node;
2232 struct tipc_nl_msg msg;
2233
2234 if (done)
2235 return 0;
2236
2237 msg.skb = skb;
2238 msg.portid = NETLINK_CB(cb->skb).portid;
2239 msg.seq = cb->nlh->nlmsg_seq;
2240
2241 rcu_read_lock();
2242 if (last_addr) {
2243 node = tipc_node_find(net, last_addr);
2244 if (!node) {
2245 rcu_read_unlock();
2246 /* We never set seq or call nl_dump_check_consistent()
2247 * this means that setting prev_seq here will cause the
2248 * consistence check to fail in the netlink callback
2249 * handler. Resulting in the NLMSG_DONE message having
2250 * the NLM_F_DUMP_INTR flag set if the node state
2251 * changed while we released the lock.
2252 */
2253 cb->prev_seq = 1;
2254 return -EPIPE;
2255 }
2256 tipc_node_put(node);
2257 }
2258
2259 list_for_each_entry_rcu(node, &tn->node_list, list) {
2260 if (node->preliminary)
2261 continue;
2262 if (last_addr) {
2263 if (node->addr == last_addr)
2264 last_addr = 0;
2265 else
2266 continue;
2267 }
2268
2269 tipc_node_read_lock(node);
2270 err = __tipc_nl_add_node(&msg, node);
2271 if (err) {
2272 last_addr = node->addr;
2273 tipc_node_read_unlock(node);
2274 goto out;
2275 }
2276
2277 tipc_node_read_unlock(node);
2278 }
2279 done = 1;
2280out:
2281 cb->args[0] = done;
2282 cb->args[1] = last_addr;
2283 rcu_read_unlock();
2284
2285 return skb->len;
2286}
2287
2288/* tipc_node_find_by_name - locate owner node of link by link's name
2289 * @net: the applicable net namespace
2290 * @name: pointer to link name string
2291 * @bearer_id: pointer to index in 'node->links' array where the link was found.
2292 *
2293 * Returns pointer to node owning the link, or 0 if no matching link is found.
2294 */
2295static struct tipc_node *tipc_node_find_by_name(struct net *net,
2296 const char *link_name,
2297 unsigned int *bearer_id)
2298{
2299 struct tipc_net *tn = net_generic(net, tipc_net_id);
2300 struct tipc_link *l;
2301 struct tipc_node *n;
2302 struct tipc_node *found_node = NULL;
2303 int i;
2304
2305 *bearer_id = 0;
2306 rcu_read_lock();
2307 list_for_each_entry_rcu(n, &tn->node_list, list) {
2308 tipc_node_read_lock(n);
2309 for (i = 0; i < MAX_BEARERS; i++) {
2310 l = n->links[i].link;
2311 if (l && !strcmp(tipc_link_name(l), link_name)) {
2312 *bearer_id = i;
2313 found_node = n;
2314 break;
2315 }
2316 }
2317 tipc_node_read_unlock(n);
2318 if (found_node)
2319 break;
2320 }
2321 rcu_read_unlock();
2322
2323 return found_node;
2324}
2325
2326int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
2327{
2328 int err;
2329 int res = 0;
2330 int bearer_id;
2331 char *name;
2332 struct tipc_link *link;
2333 struct tipc_node *node;
2334 struct sk_buff_head xmitq;
2335 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2336 struct net *net = sock_net(skb->sk);
2337
2338 __skb_queue_head_init(&xmitq);
2339
2340 if (!info->attrs[TIPC_NLA_LINK])
2341 return -EINVAL;
2342
2343 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
2344 info->attrs[TIPC_NLA_LINK],
2345 tipc_nl_link_policy, info->extack);
2346 if (err)
2347 return err;
2348
2349 if (!attrs[TIPC_NLA_LINK_NAME])
2350 return -EINVAL;
2351
2352 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2353
2354 if (strcmp(name, tipc_bclink_name) == 0)
2355 return tipc_nl_bc_link_set(net, attrs);
2356
2357 node = tipc_node_find_by_name(net, name, &bearer_id);
2358 if (!node)
2359 return -EINVAL;
2360
2361 tipc_node_read_lock(node);
2362
2363 link = node->links[bearer_id].link;
2364 if (!link) {
2365 res = -EINVAL;
2366 goto out;
2367 }
2368
2369 if (attrs[TIPC_NLA_LINK_PROP]) {
2370 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
2371
2372 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP], props);
2373 if (err) {
2374 res = err;
2375 goto out;
2376 }
2377
2378 if (props[TIPC_NLA_PROP_TOL]) {
2379 u32 tol;
2380
2381 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2382 tipc_link_set_tolerance(link, tol, &xmitq);
2383 }
2384 if (props[TIPC_NLA_PROP_PRIO]) {
2385 u32 prio;
2386
2387 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2388 tipc_link_set_prio(link, prio, &xmitq);
2389 }
2390 if (props[TIPC_NLA_PROP_WIN]) {
2391 u32 max_win;
2392
2393 max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2394 tipc_link_set_queue_limits(link,
2395 tipc_link_min_win(link),
2396 max_win);
2397 }
2398 }
2399
2400out:
2401 tipc_node_read_unlock(node);
2402 tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr,
2403 NULL);
2404 return res;
2405}
2406
2407int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
2408{
2409 struct net *net = genl_info_net(info);
2410 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2411 struct tipc_nl_msg msg;
2412 char *name;
2413 int err;
2414
2415 msg.portid = info->snd_portid;
2416 msg.seq = info->snd_seq;
2417
2418 if (!info->attrs[TIPC_NLA_LINK])
2419 return -EINVAL;
2420
2421 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
2422 info->attrs[TIPC_NLA_LINK],
2423 tipc_nl_link_policy, info->extack);
2424 if (err)
2425 return err;
2426
2427 if (!attrs[TIPC_NLA_LINK_NAME])
2428 return -EINVAL;
2429
2430 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2431
2432 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2433 if (!msg.skb)
2434 return -ENOMEM;
2435
2436 if (strcmp(name, tipc_bclink_name) == 0) {
2437 err = tipc_nl_add_bc_link(net, &msg, tipc_net(net)->bcl);
2438 if (err)
2439 goto err_free;
2440 } else {
2441 int bearer_id;
2442 struct tipc_node *node;
2443 struct tipc_link *link;
2444
2445 node = tipc_node_find_by_name(net, name, &bearer_id);
2446 if (!node) {
2447 err = -EINVAL;
2448 goto err_free;
2449 }
2450
2451 tipc_node_read_lock(node);
2452 link = node->links[bearer_id].link;
2453 if (!link) {
2454 tipc_node_read_unlock(node);
2455 err = -EINVAL;
2456 goto err_free;
2457 }
2458
2459 err = __tipc_nl_add_link(net, &msg, link, 0);
2460 tipc_node_read_unlock(node);
2461 if (err)
2462 goto err_free;
2463 }
2464
2465 return genlmsg_reply(msg.skb, info);
2466
2467err_free:
2468 nlmsg_free(msg.skb);
2469 return err;
2470}
2471
2472int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
2473{
2474 int err;
2475 char *link_name;
2476 unsigned int bearer_id;
2477 struct tipc_link *link;
2478 struct tipc_node *node;
2479 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2480 struct net *net = sock_net(skb->sk);
2481 struct tipc_net *tn = tipc_net(net);
2482 struct tipc_link_entry *le;
2483
2484 if (!info->attrs[TIPC_NLA_LINK])
2485 return -EINVAL;
2486
2487 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_LINK_MAX,
2488 info->attrs[TIPC_NLA_LINK],
2489 tipc_nl_link_policy, info->extack);
2490 if (err)
2491 return err;
2492
2493 if (!attrs[TIPC_NLA_LINK_NAME])
2494 return -EINVAL;
2495
2496 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2497
2498 err = -EINVAL;
2499 if (!strcmp(link_name, tipc_bclink_name)) {
2500 err = tipc_bclink_reset_stats(net, tipc_bc_sndlink(net));
2501 if (err)
2502 return err;
2503 return 0;
2504 } else if (strstr(link_name, tipc_bclink_name)) {
2505 rcu_read_lock();
2506 list_for_each_entry_rcu(node, &tn->node_list, list) {
2507 tipc_node_read_lock(node);
2508 link = node->bc_entry.link;
2509 if (link && !strcmp(link_name, tipc_link_name(link))) {
2510 err = tipc_bclink_reset_stats(net, link);
2511 tipc_node_read_unlock(node);
2512 break;
2513 }
2514 tipc_node_read_unlock(node);
2515 }
2516 rcu_read_unlock();
2517 return err;
2518 }
2519
2520 node = tipc_node_find_by_name(net, link_name, &bearer_id);
2521 if (!node)
2522 return -EINVAL;
2523
2524 le = &node->links[bearer_id];
2525 tipc_node_read_lock(node);
2526 spin_lock_bh(&le->lock);
2527 link = node->links[bearer_id].link;
2528 if (!link) {
2529 spin_unlock_bh(&le->lock);
2530 tipc_node_read_unlock(node);
2531 return -EINVAL;
2532 }
2533 tipc_link_reset_stats(link);
2534 spin_unlock_bh(&le->lock);
2535 tipc_node_read_unlock(node);
2536 return 0;
2537}
2538
2539/* Caller should hold node lock */
2540static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
2541 struct tipc_node *node, u32 *prev_link,
2542 bool bc_link)
2543{
2544 u32 i;
2545 int err;
2546
2547 for (i = *prev_link; i < MAX_BEARERS; i++) {
2548 *prev_link = i;
2549
2550 if (!node->links[i].link)
2551 continue;
2552
2553 err = __tipc_nl_add_link(net, msg,
2554 node->links[i].link, NLM_F_MULTI);
2555 if (err)
2556 return err;
2557 }
2558
2559 if (bc_link) {
2560 *prev_link = i;
2561 err = tipc_nl_add_bc_link(net, msg, node->bc_entry.link);
2562 if (err)
2563 return err;
2564 }
2565
2566 *prev_link = 0;
2567
2568 return 0;
2569}
2570
2571int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb)
2572{
2573 struct net *net = sock_net(skb->sk);
2574 struct nlattr **attrs = genl_dumpit_info(cb)->attrs;
2575 struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
2576 struct tipc_net *tn = net_generic(net, tipc_net_id);
2577 struct tipc_node *node;
2578 struct tipc_nl_msg msg;
2579 u32 prev_node = cb->args[0];
2580 u32 prev_link = cb->args[1];
2581 int done = cb->args[2];
2582 bool bc_link = cb->args[3];
2583 int err;
2584
2585 if (done)
2586 return 0;
2587
2588 if (!prev_node) {
2589 /* Check if broadcast-receiver links dumping is needed */
2590 if (attrs && attrs[TIPC_NLA_LINK]) {
2591 err = nla_parse_nested_deprecated(link,
2592 TIPC_NLA_LINK_MAX,
2593 attrs[TIPC_NLA_LINK],
2594 tipc_nl_link_policy,
2595 NULL);
2596 if (unlikely(err))
2597 return err;
2598 if (unlikely(!link[TIPC_NLA_LINK_BROADCAST]))
2599 return -EINVAL;
2600 bc_link = true;
2601 }
2602 }
2603
2604 msg.skb = skb;
2605 msg.portid = NETLINK_CB(cb->skb).portid;
2606 msg.seq = cb->nlh->nlmsg_seq;
2607
2608 rcu_read_lock();
2609 if (prev_node) {
2610 node = tipc_node_find(net, prev_node);
2611 if (!node) {
2612 /* We never set seq or call nl_dump_check_consistent()
2613 * this means that setting prev_seq here will cause the
2614 * consistence check to fail in the netlink callback
2615 * handler. Resulting in the last NLMSG_DONE message
2616 * having the NLM_F_DUMP_INTR flag set.
2617 */
2618 cb->prev_seq = 1;
2619 goto out;
2620 }
2621 tipc_node_put(node);
2622
2623 list_for_each_entry_continue_rcu(node, &tn->node_list,
2624 list) {
2625 tipc_node_read_lock(node);
2626 err = __tipc_nl_add_node_links(net, &msg, node,
2627 &prev_link, bc_link);
2628 tipc_node_read_unlock(node);
2629 if (err)
2630 goto out;
2631
2632 prev_node = node->addr;
2633 }
2634 } else {
2635 err = tipc_nl_add_bc_link(net, &msg, tn->bcl);
2636 if (err)
2637 goto out;
2638
2639 list_for_each_entry_rcu(node, &tn->node_list, list) {
2640 tipc_node_read_lock(node);
2641 err = __tipc_nl_add_node_links(net, &msg, node,
2642 &prev_link, bc_link);
2643 tipc_node_read_unlock(node);
2644 if (err)
2645 goto out;
2646
2647 prev_node = node->addr;
2648 }
2649 }
2650 done = 1;
2651out:
2652 rcu_read_unlock();
2653
2654 cb->args[0] = prev_node;
2655 cb->args[1] = prev_link;
2656 cb->args[2] = done;
2657 cb->args[3] = bc_link;
2658
2659 return skb->len;
2660}
2661
2662int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info)
2663{
2664 struct nlattr *attrs[TIPC_NLA_MON_MAX + 1];
2665 struct net *net = sock_net(skb->sk);
2666 int err;
2667
2668 if (!info->attrs[TIPC_NLA_MON])
2669 return -EINVAL;
2670
2671 err = nla_parse_nested_deprecated(attrs, TIPC_NLA_MON_MAX,
2672 info->attrs[TIPC_NLA_MON],
2673 tipc_nl_monitor_policy,
2674 info->extack);
2675 if (err)
2676 return err;
2677
2678 if (attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]) {
2679 u32 val;
2680
2681 val = nla_get_u32(attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]);
2682 err = tipc_nl_monitor_set_threshold(net, val);
2683 if (err)
2684 return err;
2685 }
2686
2687 return 0;
2688}
2689
2690static int __tipc_nl_add_monitor_prop(struct net *net, struct tipc_nl_msg *msg)
2691{
2692 struct nlattr *attrs;
2693 void *hdr;
2694 u32 val;
2695
2696 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2697 0, TIPC_NL_MON_GET);
2698 if (!hdr)
2699 return -EMSGSIZE;
2700
2701 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_MON);
2702 if (!attrs)
2703 goto msg_full;
2704
2705 val = tipc_nl_monitor_get_threshold(net);
2706
2707 if (nla_put_u32(msg->skb, TIPC_NLA_MON_ACTIVATION_THRESHOLD, val))
2708 goto attr_msg_full;
2709
2710 nla_nest_end(msg->skb, attrs);
2711 genlmsg_end(msg->skb, hdr);
2712
2713 return 0;
2714
2715attr_msg_full:
2716 nla_nest_cancel(msg->skb, attrs);
2717msg_full:
2718 genlmsg_cancel(msg->skb, hdr);
2719
2720 return -EMSGSIZE;
2721}
2722
2723int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info)
2724{
2725 struct net *net = sock_net(skb->sk);
2726 struct tipc_nl_msg msg;
2727 int err;
2728
2729 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2730 if (!msg.skb)
2731 return -ENOMEM;
2732 msg.portid = info->snd_portid;
2733 msg.seq = info->snd_seq;
2734
2735 err = __tipc_nl_add_monitor_prop(net, &msg);
2736 if (err) {
2737 nlmsg_free(msg.skb);
2738 return err;
2739 }
2740
2741 return genlmsg_reply(msg.skb, info);
2742}
2743
2744int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
2745{
2746 struct net *net = sock_net(skb->sk);
2747 u32 prev_bearer = cb->args[0];
2748 struct tipc_nl_msg msg;
2749 int bearer_id;
2750 int err;
2751
2752 if (prev_bearer == MAX_BEARERS)
2753 return 0;
2754
2755 msg.skb = skb;
2756 msg.portid = NETLINK_CB(cb->skb).portid;
2757 msg.seq = cb->nlh->nlmsg_seq;
2758
2759 rtnl_lock();
2760 for (bearer_id = prev_bearer; bearer_id < MAX_BEARERS; bearer_id++) {
2761 err = __tipc_nl_add_monitor(net, &msg, bearer_id);
2762 if (err)
2763 break;
2764 }
2765 rtnl_unlock();
2766 cb->args[0] = bearer_id;
2767
2768 return skb->len;
2769}
2770
2771int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
2772 struct netlink_callback *cb)
2773{
2774 struct net *net = sock_net(skb->sk);
2775 u32 prev_node = cb->args[1];
2776 u32 bearer_id = cb->args[2];
2777 int done = cb->args[0];
2778 struct tipc_nl_msg msg;
2779 int err;
2780
2781 if (!prev_node) {
2782 struct nlattr **attrs = genl_dumpit_info(cb)->attrs;
2783 struct nlattr *mon[TIPC_NLA_MON_MAX + 1];
2784
2785 if (!attrs[TIPC_NLA_MON])
2786 return -EINVAL;
2787
2788 err = nla_parse_nested_deprecated(mon, TIPC_NLA_MON_MAX,
2789 attrs[TIPC_NLA_MON],
2790 tipc_nl_monitor_policy,
2791 NULL);
2792 if (err)
2793 return err;
2794
2795 if (!mon[TIPC_NLA_MON_REF])
2796 return -EINVAL;
2797
2798 bearer_id = nla_get_u32(mon[TIPC_NLA_MON_REF]);
2799
2800 if (bearer_id >= MAX_BEARERS)
2801 return -EINVAL;
2802 }
2803
2804 if (done)
2805 return 0;
2806
2807 msg.skb = skb;
2808 msg.portid = NETLINK_CB(cb->skb).portid;
2809 msg.seq = cb->nlh->nlmsg_seq;
2810
2811 rtnl_lock();
2812 err = tipc_nl_add_monitor_peer(net, &msg, bearer_id, &prev_node);
2813 if (!err)
2814 done = 1;
2815
2816 rtnl_unlock();
2817 cb->args[0] = done;
2818 cb->args[1] = prev_node;
2819 cb->args[2] = bearer_id;
2820
2821 return skb->len;
2822}
2823
2824#ifdef CONFIG_TIPC_CRYPTO
2825static int tipc_nl_retrieve_key(struct nlattr **attrs,
2826 struct tipc_aead_key **key)
2827{
2828 struct nlattr *attr = attrs[TIPC_NLA_NODE_KEY];
2829
2830 if (!attr)
2831 return -ENODATA;
2832
2833 *key = (struct tipc_aead_key *)nla_data(attr);
2834 if (nla_len(attr) < tipc_aead_key_size(*key))
2835 return -EINVAL;
2836
2837 return 0;
2838}
2839
2840static int tipc_nl_retrieve_nodeid(struct nlattr **attrs, u8 **node_id)
2841{
2842 struct nlattr *attr = attrs[TIPC_NLA_NODE_ID];
2843
2844 if (!attr)
2845 return -ENODATA;
2846
2847 if (nla_len(attr) < TIPC_NODEID_LEN)
2848 return -EINVAL;
2849
2850 *node_id = (u8 *)nla_data(attr);
2851 return 0;
2852}
2853
2854static int __tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
2855{
2856 struct nlattr *attrs[TIPC_NLA_NODE_MAX + 1];
2857 struct net *net = sock_net(skb->sk);
2858 struct tipc_net *tn = tipc_net(net);
2859 struct tipc_node *n = NULL;
2860 struct tipc_aead_key *ukey;
2861 struct tipc_crypto *c;
2862 u8 *id, *own_id;
2863 int rc = 0;
2864
2865 if (!info->attrs[TIPC_NLA_NODE])
2866 return -EINVAL;
2867
2868 rc = nla_parse_nested(attrs, TIPC_NLA_NODE_MAX,
2869 info->attrs[TIPC_NLA_NODE],
2870 tipc_nl_node_policy, info->extack);
2871 if (rc)
2872 goto exit;
2873
2874 own_id = tipc_own_id(net);
2875 if (!own_id) {
2876 rc = -EPERM;
2877 goto exit;
2878 }
2879
2880 rc = tipc_nl_retrieve_key(attrs, &ukey);
2881 if (rc)
2882 goto exit;
2883
2884 rc = tipc_aead_key_validate(ukey);
2885 if (rc)
2886 goto exit;
2887
2888 rc = tipc_nl_retrieve_nodeid(attrs, &id);
2889 switch (rc) {
2890 case -ENODATA:
2891 /* Cluster key mode */
2892 rc = tipc_crypto_key_init(tn->crypto_tx, ukey, CLUSTER_KEY);
2893 break;
2894 case 0:
2895 /* Per-node key mode */
2896 if (!memcmp(id, own_id, NODE_ID_LEN)) {
2897 c = tn->crypto_tx;
2898 } else {
2899 n = tipc_node_find_by_id(net, id) ?:
2900 tipc_node_create(net, 0, id, 0xffffu, 0, true);
2901 if (unlikely(!n)) {
2902 rc = -ENOMEM;
2903 break;
2904 }
2905 c = n->crypto_rx;
2906 }
2907
2908 rc = tipc_crypto_key_init(c, ukey, PER_NODE_KEY);
2909 if (n)
2910 tipc_node_put(n);
2911 break;
2912 default:
2913 break;
2914 }
2915
2916exit:
2917 return (rc < 0) ? rc : 0;
2918}
2919
2920int tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
2921{
2922 int err;
2923
2924 rtnl_lock();
2925 err = __tipc_nl_node_set_key(skb, info);
2926 rtnl_unlock();
2927
2928 return err;
2929}
2930
2931static int __tipc_nl_node_flush_key(struct sk_buff *skb,
2932 struct genl_info *info)
2933{
2934 struct net *net = sock_net(skb->sk);
2935 struct tipc_net *tn = tipc_net(net);
2936 struct tipc_node *n;
2937
2938 tipc_crypto_key_flush(tn->crypto_tx);
2939 rcu_read_lock();
2940 list_for_each_entry_rcu(n, &tn->node_list, list)
2941 tipc_crypto_key_flush(n->crypto_rx);
2942 rcu_read_unlock();
2943
2944 pr_info("All keys are flushed!\n");
2945 return 0;
2946}
2947
2948int tipc_nl_node_flush_key(struct sk_buff *skb, struct genl_info *info)
2949{
2950 int err;
2951
2952 rtnl_lock();
2953 err = __tipc_nl_node_flush_key(skb, info);
2954 rtnl_unlock();
2955
2956 return err;
2957}
2958#endif
2959
2960/**
2961 * tipc_node_dump - dump TIPC node data
2962 * @n: tipc node to be dumped
2963 * @more: dump more?
2964 * - false: dump only tipc node data
2965 * - true: dump node link data as well
2966 * @buf: returned buffer of dump data in format
2967 */
2968int tipc_node_dump(struct tipc_node *n, bool more, char *buf)
2969{
2970 int i = 0;
2971 size_t sz = (more) ? NODE_LMAX : NODE_LMIN;
2972
2973 if (!n) {
2974 i += scnprintf(buf, sz, "node data: (null)\n");
2975 return i;
2976 }
2977
2978 i += scnprintf(buf, sz, "node data: %x", n->addr);
2979 i += scnprintf(buf + i, sz - i, " %x", n->state);
2980 i += scnprintf(buf + i, sz - i, " %d", n->active_links[0]);
2981 i += scnprintf(buf + i, sz - i, " %d", n->active_links[1]);
2982 i += scnprintf(buf + i, sz - i, " %x", n->action_flags);
2983 i += scnprintf(buf + i, sz - i, " %u", n->failover_sent);
2984 i += scnprintf(buf + i, sz - i, " %u", n->sync_point);
2985 i += scnprintf(buf + i, sz - i, " %d", n->link_cnt);
2986 i += scnprintf(buf + i, sz - i, " %u", n->working_links);
2987 i += scnprintf(buf + i, sz - i, " %x", n->capabilities);
2988 i += scnprintf(buf + i, sz - i, " %lu\n", n->keepalive_intv);
2989
2990 if (!more)
2991 return i;
2992
2993 i += scnprintf(buf + i, sz - i, "link_entry[0]:\n");
2994 i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[0].mtu);
2995 i += scnprintf(buf + i, sz - i, " media: ");
2996 i += tipc_media_addr_printf(buf + i, sz - i, &n->links[0].maddr);
2997 i += scnprintf(buf + i, sz - i, "\n");
2998 i += tipc_link_dump(n->links[0].link, TIPC_DUMP_NONE, buf + i);
2999 i += scnprintf(buf + i, sz - i, " inputq: ");
3000 i += tipc_list_dump(&n->links[0].inputq, false, buf + i);
3001
3002 i += scnprintf(buf + i, sz - i, "link_entry[1]:\n");
3003 i += scnprintf(buf + i, sz - i, " mtu: %u\n", n->links[1].mtu);
3004 i += scnprintf(buf + i, sz - i, " media: ");
3005 i += tipc_media_addr_printf(buf + i, sz - i, &n->links[1].maddr);
3006 i += scnprintf(buf + i, sz - i, "\n");
3007 i += tipc_link_dump(n->links[1].link, TIPC_DUMP_NONE, buf + i);
3008 i += scnprintf(buf + i, sz - i, " inputq: ");
3009 i += tipc_list_dump(&n->links[1].inputq, false, buf + i);
3010
3011 i += scnprintf(buf + i, sz - i, "bclink:\n ");
3012 i += tipc_link_dump(n->bc_entry.link, TIPC_DUMP_NONE, buf + i);
3013
3014 return i;
3015}
3016
3017void tipc_node_pre_cleanup_net(struct net *exit_net)
3018{
3019 struct tipc_node *n;
3020 struct tipc_net *tn;
3021 struct net *tmp;
3022
3023 rcu_read_lock();
3024 for_each_net_rcu(tmp) {
3025 if (tmp == exit_net)
3026 continue;
3027 tn = tipc_net(tmp);
3028 if (!tn)
3029 continue;
3030 spin_lock_bh(&tn->node_list_lock);
3031 list_for_each_entry_rcu(n, &tn->node_list, list) {
3032 if (!n->peer_net)
3033 continue;
3034 if (n->peer_net != exit_net)
3035 continue;
3036 tipc_node_write_lock(n);
3037 n->peer_net = NULL;
3038 n->peer_hash_mix = 0;
3039 tipc_node_write_unlock_fast(n);
3040 break;
3041 }
3042 spin_unlock_bh(&tn->node_list_lock);
3043 }
3044 rcu_read_unlock();
3045}