Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v5.9-rc1 1923 lines 48 kB view raw
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * DECnet An implementation of the DECnet protocol suite for the LINUX 4 * operating system. DECnet is implemented using the BSD Socket 5 * interface as the means of communication with the user level. 6 * 7 * DECnet Routing Functions (Endnode and Router) 8 * 9 * Authors: Steve Whitehouse <SteveW@ACM.org> 10 * Eduardo Marcelo Serrat <emserrat@geocities.com> 11 * 12 * Changes: 13 * Steve Whitehouse : Fixes to allow "intra-ethernet" and 14 * "return-to-sender" bits on outgoing 15 * packets. 16 * Steve Whitehouse : Timeouts for cached routes. 17 * Steve Whitehouse : Use dst cache for input routes too. 18 * Steve Whitehouse : Fixed error values in dn_send_skb. 19 * Steve Whitehouse : Rework routing functions to better fit 20 * DECnet routing design 21 * Alexey Kuznetsov : New SMP locking 22 * Steve Whitehouse : More SMP locking changes & dn_cache_dump() 23 * Steve Whitehouse : Prerouting NF hook, now really is prerouting. 24 * Fixed possible skb leak in rtnetlink funcs. 25 * Steve Whitehouse : Dave Miller's dynamic hash table sizing and 26 * Alexey Kuznetsov's finer grained locking 27 * from ipv4/route.c. 28 * Steve Whitehouse : Routing is now starting to look like a 29 * sensible set of code now, mainly due to 30 * my copying the IPv4 routing code. The 31 * hooks here are modified and will continue 32 * to evolve for a while. 33 * Steve Whitehouse : Real SMP at last :-) Also new netfilter 34 * stuff. Look out raw sockets your days 35 * are numbered! 36 * Steve Whitehouse : Added return-to-sender functions. Added 37 * backlog congestion level return codes. 38 * Steve Whitehouse : Fixed bug where routes were set up with 39 * no ref count on net devices. 40 * Steve Whitehouse : RCU for the route cache 41 * Steve Whitehouse : Preparations for the flow cache 42 * Steve Whitehouse : Prepare for nonlinear skbs 43 */ 44 45/****************************************************************************** 46 (c) 1995-1998 E.M. Serrat emserrat@geocities.com 47 48*******************************************************************************/ 49 50#include <linux/errno.h> 51#include <linux/types.h> 52#include <linux/socket.h> 53#include <linux/in.h> 54#include <linux/kernel.h> 55#include <linux/sockios.h> 56#include <linux/net.h> 57#include <linux/netdevice.h> 58#include <linux/inet.h> 59#include <linux/route.h> 60#include <linux/in_route.h> 61#include <linux/slab.h> 62#include <net/sock.h> 63#include <linux/mm.h> 64#include <linux/proc_fs.h> 65#include <linux/seq_file.h> 66#include <linux/init.h> 67#include <linux/rtnetlink.h> 68#include <linux/string.h> 69#include <linux/netfilter_decnet.h> 70#include <linux/rcupdate.h> 71#include <linux/times.h> 72#include <linux/export.h> 73#include <asm/errno.h> 74#include <net/net_namespace.h> 75#include <net/netlink.h> 76#include <net/neighbour.h> 77#include <net/dst.h> 78#include <net/flow.h> 79#include <net/fib_rules.h> 80#include <net/dn.h> 81#include <net/dn_dev.h> 82#include <net/dn_nsp.h> 83#include <net/dn_route.h> 84#include <net/dn_neigh.h> 85#include <net/dn_fib.h> 86 87struct dn_rt_hash_bucket 88{ 89 struct dn_route __rcu *chain; 90 spinlock_t lock; 91}; 92 93extern struct neigh_table dn_neigh_table; 94 95 96static unsigned char dn_hiord_addr[6] = {0xAA,0x00,0x04,0x00,0x00,0x00}; 97 98static const int dn_rt_min_delay = 2 * HZ; 99static const int dn_rt_max_delay = 10 * HZ; 100static const int dn_rt_mtu_expires = 10 * 60 * HZ; 101 102static unsigned long dn_rt_deadline; 103 104static int dn_dst_gc(struct dst_ops *ops); 105static struct dst_entry *dn_dst_check(struct dst_entry *, __u32); 106static unsigned int dn_dst_default_advmss(const struct dst_entry *dst); 107static unsigned int dn_dst_mtu(const struct dst_entry *dst); 108static void dn_dst_destroy(struct dst_entry *); 109static void dn_dst_ifdown(struct dst_entry *, struct net_device *dev, int how); 110static struct dst_entry *dn_dst_negative_advice(struct dst_entry *); 111static void dn_dst_link_failure(struct sk_buff *); 112static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk, 113 struct sk_buff *skb , u32 mtu, 114 bool confirm_neigh); 115static void dn_dst_redirect(struct dst_entry *dst, struct sock *sk, 116 struct sk_buff *skb); 117static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, 118 struct sk_buff *skb, 119 const void *daddr); 120static int dn_route_input(struct sk_buff *); 121static void dn_run_flush(struct timer_list *unused); 122 123static struct dn_rt_hash_bucket *dn_rt_hash_table; 124static unsigned int dn_rt_hash_mask; 125 126static struct timer_list dn_route_timer; 127static DEFINE_TIMER(dn_rt_flush_timer, dn_run_flush); 128int decnet_dst_gc_interval = 2; 129 130static struct dst_ops dn_dst_ops = { 131 .family = PF_DECnet, 132 .gc_thresh = 128, 133 .gc = dn_dst_gc, 134 .check = dn_dst_check, 135 .default_advmss = dn_dst_default_advmss, 136 .mtu = dn_dst_mtu, 137 .cow_metrics = dst_cow_metrics_generic, 138 .destroy = dn_dst_destroy, 139 .ifdown = dn_dst_ifdown, 140 .negative_advice = dn_dst_negative_advice, 141 .link_failure = dn_dst_link_failure, 142 .update_pmtu = dn_dst_update_pmtu, 143 .redirect = dn_dst_redirect, 144 .neigh_lookup = dn_dst_neigh_lookup, 145}; 146 147static void dn_dst_destroy(struct dst_entry *dst) 148{ 149 struct dn_route *rt = (struct dn_route *) dst; 150 151 if (rt->n) 152 neigh_release(rt->n); 153 dst_destroy_metrics_generic(dst); 154} 155 156static void dn_dst_ifdown(struct dst_entry *dst, struct net_device *dev, int how) 157{ 158 if (how) { 159 struct dn_route *rt = (struct dn_route *) dst; 160 struct neighbour *n = rt->n; 161 162 if (n && n->dev == dev) { 163 n->dev = dev_net(dev)->loopback_dev; 164 dev_hold(n->dev); 165 dev_put(dev); 166 } 167 } 168} 169 170static __inline__ unsigned int dn_hash(__le16 src, __le16 dst) 171{ 172 __u16 tmp = (__u16 __force)(src ^ dst); 173 tmp ^= (tmp >> 3); 174 tmp ^= (tmp >> 5); 175 tmp ^= (tmp >> 10); 176 return dn_rt_hash_mask & (unsigned int)tmp; 177} 178 179static void dn_dst_check_expire(struct timer_list *unused) 180{ 181 int i; 182 struct dn_route *rt; 183 struct dn_route __rcu **rtp; 184 unsigned long now = jiffies; 185 unsigned long expire = 120 * HZ; 186 187 for (i = 0; i <= dn_rt_hash_mask; i++) { 188 rtp = &dn_rt_hash_table[i].chain; 189 190 spin_lock(&dn_rt_hash_table[i].lock); 191 while ((rt = rcu_dereference_protected(*rtp, 192 lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) { 193 if (atomic_read(&rt->dst.__refcnt) > 1 || 194 (now - rt->dst.lastuse) < expire) { 195 rtp = &rt->dn_next; 196 continue; 197 } 198 *rtp = rt->dn_next; 199 rt->dn_next = NULL; 200 dst_dev_put(&rt->dst); 201 dst_release(&rt->dst); 202 } 203 spin_unlock(&dn_rt_hash_table[i].lock); 204 205 if ((jiffies - now) > 0) 206 break; 207 } 208 209 mod_timer(&dn_route_timer, now + decnet_dst_gc_interval * HZ); 210} 211 212static int dn_dst_gc(struct dst_ops *ops) 213{ 214 struct dn_route *rt; 215 struct dn_route __rcu **rtp; 216 int i; 217 unsigned long now = jiffies; 218 unsigned long expire = 10 * HZ; 219 220 for (i = 0; i <= dn_rt_hash_mask; i++) { 221 222 spin_lock_bh(&dn_rt_hash_table[i].lock); 223 rtp = &dn_rt_hash_table[i].chain; 224 225 while ((rt = rcu_dereference_protected(*rtp, 226 lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) { 227 if (atomic_read(&rt->dst.__refcnt) > 1 || 228 (now - rt->dst.lastuse) < expire) { 229 rtp = &rt->dn_next; 230 continue; 231 } 232 *rtp = rt->dn_next; 233 rt->dn_next = NULL; 234 dst_dev_put(&rt->dst); 235 dst_release(&rt->dst); 236 break; 237 } 238 spin_unlock_bh(&dn_rt_hash_table[i].lock); 239 } 240 241 return 0; 242} 243 244/* 245 * The decnet standards don't impose a particular minimum mtu, what they 246 * do insist on is that the routing layer accepts a datagram of at least 247 * 230 bytes long. Here we have to subtract the routing header length from 248 * 230 to get the minimum acceptable mtu. If there is no neighbour, then we 249 * assume the worst and use a long header size. 250 * 251 * We update both the mtu and the advertised mss (i.e. the segment size we 252 * advertise to the other end). 253 */ 254static void dn_dst_update_pmtu(struct dst_entry *dst, struct sock *sk, 255 struct sk_buff *skb, u32 mtu, 256 bool confirm_neigh) 257{ 258 struct dn_route *rt = (struct dn_route *) dst; 259 struct neighbour *n = rt->n; 260 u32 min_mtu = 230; 261 struct dn_dev *dn; 262 263 dn = n ? rcu_dereference_raw(n->dev->dn_ptr) : NULL; 264 265 if (dn && dn->use_long == 0) 266 min_mtu -= 6; 267 else 268 min_mtu -= 21; 269 270 if (dst_metric(dst, RTAX_MTU) > mtu && mtu >= min_mtu) { 271 if (!(dst_metric_locked(dst, RTAX_MTU))) { 272 dst_metric_set(dst, RTAX_MTU, mtu); 273 dst_set_expires(dst, dn_rt_mtu_expires); 274 } 275 if (!(dst_metric_locked(dst, RTAX_ADVMSS))) { 276 u32 mss = mtu - DN_MAX_NSP_DATA_HEADER; 277 u32 existing_mss = dst_metric_raw(dst, RTAX_ADVMSS); 278 if (!existing_mss || existing_mss > mss) 279 dst_metric_set(dst, RTAX_ADVMSS, mss); 280 } 281 } 282} 283 284static void dn_dst_redirect(struct dst_entry *dst, struct sock *sk, 285 struct sk_buff *skb) 286{ 287} 288 289/* 290 * When a route has been marked obsolete. (e.g. routing cache flush) 291 */ 292static struct dst_entry *dn_dst_check(struct dst_entry *dst, __u32 cookie) 293{ 294 return NULL; 295} 296 297static struct dst_entry *dn_dst_negative_advice(struct dst_entry *dst) 298{ 299 dst_release(dst); 300 return NULL; 301} 302 303static void dn_dst_link_failure(struct sk_buff *skb) 304{ 305} 306 307static inline int compare_keys(struct flowidn *fl1, struct flowidn *fl2) 308{ 309 return ((fl1->daddr ^ fl2->daddr) | 310 (fl1->saddr ^ fl2->saddr) | 311 (fl1->flowidn_mark ^ fl2->flowidn_mark) | 312 (fl1->flowidn_scope ^ fl2->flowidn_scope) | 313 (fl1->flowidn_oif ^ fl2->flowidn_oif) | 314 (fl1->flowidn_iif ^ fl2->flowidn_iif)) == 0; 315} 316 317static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_route **rp) 318{ 319 struct dn_route *rth; 320 struct dn_route __rcu **rthp; 321 unsigned long now = jiffies; 322 323 rthp = &dn_rt_hash_table[hash].chain; 324 325 spin_lock_bh(&dn_rt_hash_table[hash].lock); 326 while ((rth = rcu_dereference_protected(*rthp, 327 lockdep_is_held(&dn_rt_hash_table[hash].lock))) != NULL) { 328 if (compare_keys(&rth->fld, &rt->fld)) { 329 /* Put it first */ 330 *rthp = rth->dn_next; 331 rcu_assign_pointer(rth->dn_next, 332 dn_rt_hash_table[hash].chain); 333 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth); 334 335 dst_hold_and_use(&rth->dst, now); 336 spin_unlock_bh(&dn_rt_hash_table[hash].lock); 337 338 dst_release_immediate(&rt->dst); 339 *rp = rth; 340 return 0; 341 } 342 rthp = &rth->dn_next; 343 } 344 345 rcu_assign_pointer(rt->dn_next, dn_rt_hash_table[hash].chain); 346 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt); 347 348 dst_hold_and_use(&rt->dst, now); 349 spin_unlock_bh(&dn_rt_hash_table[hash].lock); 350 *rp = rt; 351 return 0; 352} 353 354static void dn_run_flush(struct timer_list *unused) 355{ 356 int i; 357 struct dn_route *rt, *next; 358 359 for (i = 0; i < dn_rt_hash_mask; i++) { 360 spin_lock_bh(&dn_rt_hash_table[i].lock); 361 362 if ((rt = xchg((struct dn_route **)&dn_rt_hash_table[i].chain, NULL)) == NULL) 363 goto nothing_to_declare; 364 365 for(; rt; rt = next) { 366 next = rcu_dereference_raw(rt->dn_next); 367 RCU_INIT_POINTER(rt->dn_next, NULL); 368 dst_dev_put(&rt->dst); 369 dst_release(&rt->dst); 370 } 371 372nothing_to_declare: 373 spin_unlock_bh(&dn_rt_hash_table[i].lock); 374 } 375} 376 377static DEFINE_SPINLOCK(dn_rt_flush_lock); 378 379void dn_rt_cache_flush(int delay) 380{ 381 unsigned long now = jiffies; 382 int user_mode = !in_interrupt(); 383 384 if (delay < 0) 385 delay = dn_rt_min_delay; 386 387 spin_lock_bh(&dn_rt_flush_lock); 388 389 if (del_timer(&dn_rt_flush_timer) && delay > 0 && dn_rt_deadline) { 390 long tmo = (long)(dn_rt_deadline - now); 391 392 if (user_mode && tmo < dn_rt_max_delay - dn_rt_min_delay) 393 tmo = 0; 394 395 if (delay > tmo) 396 delay = tmo; 397 } 398 399 if (delay <= 0) { 400 spin_unlock_bh(&dn_rt_flush_lock); 401 dn_run_flush(NULL); 402 return; 403 } 404 405 if (dn_rt_deadline == 0) 406 dn_rt_deadline = now + dn_rt_max_delay; 407 408 dn_rt_flush_timer.expires = now + delay; 409 add_timer(&dn_rt_flush_timer); 410 spin_unlock_bh(&dn_rt_flush_lock); 411} 412 413/** 414 * dn_return_short - Return a short packet to its sender 415 * @skb: The packet to return 416 * 417 */ 418static int dn_return_short(struct sk_buff *skb) 419{ 420 struct dn_skb_cb *cb; 421 unsigned char *ptr; 422 __le16 *src; 423 __le16 *dst; 424 425 /* Add back headers */ 426 skb_push(skb, skb->data - skb_network_header(skb)); 427 428 if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL) 429 return NET_RX_DROP; 430 431 cb = DN_SKB_CB(skb); 432 /* Skip packet length and point to flags */ 433 ptr = skb->data + 2; 434 *ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS; 435 436 dst = (__le16 *)ptr; 437 ptr += 2; 438 src = (__le16 *)ptr; 439 ptr += 2; 440 *ptr = 0; /* Zero hop count */ 441 442 swap(*src, *dst); 443 444 skb->pkt_type = PACKET_OUTGOING; 445 dn_rt_finish_output(skb, NULL, NULL); 446 return NET_RX_SUCCESS; 447} 448 449/** 450 * dn_return_long - Return a long packet to its sender 451 * @skb: The long format packet to return 452 * 453 */ 454static int dn_return_long(struct sk_buff *skb) 455{ 456 struct dn_skb_cb *cb; 457 unsigned char *ptr; 458 unsigned char *src_addr, *dst_addr; 459 unsigned char tmp[ETH_ALEN]; 460 461 /* Add back all headers */ 462 skb_push(skb, skb->data - skb_network_header(skb)); 463 464 if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL) 465 return NET_RX_DROP; 466 467 cb = DN_SKB_CB(skb); 468 /* Ignore packet length and point to flags */ 469 ptr = skb->data + 2; 470 471 /* Skip padding */ 472 if (*ptr & DN_RT_F_PF) { 473 char padlen = (*ptr & ~DN_RT_F_PF); 474 ptr += padlen; 475 } 476 477 *ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS; 478 ptr += 2; 479 dst_addr = ptr; 480 ptr += 8; 481 src_addr = ptr; 482 ptr += 6; 483 *ptr = 0; /* Zero hop count */ 484 485 /* Swap source and destination */ 486 memcpy(tmp, src_addr, ETH_ALEN); 487 memcpy(src_addr, dst_addr, ETH_ALEN); 488 memcpy(dst_addr, tmp, ETH_ALEN); 489 490 skb->pkt_type = PACKET_OUTGOING; 491 dn_rt_finish_output(skb, dst_addr, src_addr); 492 return NET_RX_SUCCESS; 493} 494 495/** 496 * dn_route_rx_packet - Try and find a route for an incoming packet 497 * @net: The applicable net namespace 498 * @sk: Socket packet transmitted on 499 * @skb: The packet to find a route for 500 * 501 * Returns: result of input function if route is found, error code otherwise 502 */ 503static int dn_route_rx_packet(struct net *net, struct sock *sk, struct sk_buff *skb) 504{ 505 struct dn_skb_cb *cb; 506 int err; 507 508 if ((err = dn_route_input(skb)) == 0) 509 return dst_input(skb); 510 511 cb = DN_SKB_CB(skb); 512 if (decnet_debug_level & 4) { 513 char *devname = skb->dev ? skb->dev->name : "???"; 514 515 printk(KERN_DEBUG 516 "DECnet: dn_route_rx_packet: rt_flags=0x%02x dev=%s len=%d src=0x%04hx dst=0x%04hx err=%d type=%d\n", 517 (int)cb->rt_flags, devname, skb->len, 518 le16_to_cpu(cb->src), le16_to_cpu(cb->dst), 519 err, skb->pkt_type); 520 } 521 522 if ((skb->pkt_type == PACKET_HOST) && (cb->rt_flags & DN_RT_F_RQR)) { 523 switch (cb->rt_flags & DN_RT_PKT_MSK) { 524 case DN_RT_PKT_SHORT: 525 return dn_return_short(skb); 526 case DN_RT_PKT_LONG: 527 return dn_return_long(skb); 528 } 529 } 530 531 kfree_skb(skb); 532 return NET_RX_DROP; 533} 534 535static int dn_route_rx_long(struct sk_buff *skb) 536{ 537 struct dn_skb_cb *cb = DN_SKB_CB(skb); 538 unsigned char *ptr = skb->data; 539 540 if (!pskb_may_pull(skb, 21)) /* 20 for long header, 1 for shortest nsp */ 541 goto drop_it; 542 543 skb_pull(skb, 20); 544 skb_reset_transport_header(skb); 545 546 /* Destination info */ 547 ptr += 2; 548 cb->dst = dn_eth2dn(ptr); 549 if (memcmp(ptr, dn_hiord_addr, 4) != 0) 550 goto drop_it; 551 ptr += 6; 552 553 554 /* Source info */ 555 ptr += 2; 556 cb->src = dn_eth2dn(ptr); 557 if (memcmp(ptr, dn_hiord_addr, 4) != 0) 558 goto drop_it; 559 ptr += 6; 560 /* Other junk */ 561 ptr++; 562 cb->hops = *ptr++; /* Visit Count */ 563 564 return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, 565 &init_net, NULL, skb, skb->dev, NULL, 566 dn_route_rx_packet); 567 568drop_it: 569 kfree_skb(skb); 570 return NET_RX_DROP; 571} 572 573 574 575static int dn_route_rx_short(struct sk_buff *skb) 576{ 577 struct dn_skb_cb *cb = DN_SKB_CB(skb); 578 unsigned char *ptr = skb->data; 579 580 if (!pskb_may_pull(skb, 6)) /* 5 for short header + 1 for shortest nsp */ 581 goto drop_it; 582 583 skb_pull(skb, 5); 584 skb_reset_transport_header(skb); 585 586 cb->dst = *(__le16 *)ptr; 587 ptr += 2; 588 cb->src = *(__le16 *)ptr; 589 ptr += 2; 590 cb->hops = *ptr & 0x3f; 591 592 return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, 593 &init_net, NULL, skb, skb->dev, NULL, 594 dn_route_rx_packet); 595 596drop_it: 597 kfree_skb(skb); 598 return NET_RX_DROP; 599} 600 601static int dn_route_discard(struct net *net, struct sock *sk, struct sk_buff *skb) 602{ 603 /* 604 * I know we drop the packet here, but thats considered success in 605 * this case 606 */ 607 kfree_skb(skb); 608 return NET_RX_SUCCESS; 609} 610 611static int dn_route_ptp_hello(struct net *net, struct sock *sk, struct sk_buff *skb) 612{ 613 dn_dev_hello(skb); 614 dn_neigh_pointopoint_hello(skb); 615 return NET_RX_SUCCESS; 616} 617 618int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) 619{ 620 struct dn_skb_cb *cb; 621 unsigned char flags = 0; 622 __u16 len = le16_to_cpu(*(__le16 *)skb->data); 623 struct dn_dev *dn = rcu_dereference(dev->dn_ptr); 624 unsigned char padlen = 0; 625 626 if (!net_eq(dev_net(dev), &init_net)) 627 goto dump_it; 628 629 if (dn == NULL) 630 goto dump_it; 631 632 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) 633 goto out; 634 635 if (!pskb_may_pull(skb, 3)) 636 goto dump_it; 637 638 skb_pull(skb, 2); 639 640 if (len > skb->len) 641 goto dump_it; 642 643 skb_trim(skb, len); 644 645 flags = *skb->data; 646 647 cb = DN_SKB_CB(skb); 648 cb->stamp = jiffies; 649 cb->iif = dev->ifindex; 650 651 /* 652 * If we have padding, remove it. 653 */ 654 if (flags & DN_RT_F_PF) { 655 padlen = flags & ~DN_RT_F_PF; 656 if (!pskb_may_pull(skb, padlen + 1)) 657 goto dump_it; 658 skb_pull(skb, padlen); 659 flags = *skb->data; 660 } 661 662 skb_reset_network_header(skb); 663 664 /* 665 * Weed out future version DECnet 666 */ 667 if (flags & DN_RT_F_VER) 668 goto dump_it; 669 670 cb->rt_flags = flags; 671 672 if (decnet_debug_level & 1) 673 printk(KERN_DEBUG 674 "dn_route_rcv: got 0x%02x from %s [%d %d %d]\n", 675 (int)flags, dev->name, len, skb->len, 676 padlen); 677 678 if (flags & DN_RT_PKT_CNTL) { 679 if (unlikely(skb_linearize(skb))) 680 goto dump_it; 681 682 switch (flags & DN_RT_CNTL_MSK) { 683 case DN_RT_PKT_INIT: 684 dn_dev_init_pkt(skb); 685 break; 686 case DN_RT_PKT_VERI: 687 dn_dev_veri_pkt(skb); 688 break; 689 } 690 691 if (dn->parms.state != DN_DEV_S_RU) 692 goto dump_it; 693 694 switch (flags & DN_RT_CNTL_MSK) { 695 case DN_RT_PKT_HELO: 696 return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO, 697 &init_net, NULL, skb, skb->dev, NULL, 698 dn_route_ptp_hello); 699 700 case DN_RT_PKT_L1RT: 701 case DN_RT_PKT_L2RT: 702 return NF_HOOK(NFPROTO_DECNET, NF_DN_ROUTE, 703 &init_net, NULL, skb, skb->dev, NULL, 704 dn_route_discard); 705 case DN_RT_PKT_ERTH: 706 return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO, 707 &init_net, NULL, skb, skb->dev, NULL, 708 dn_neigh_router_hello); 709 710 case DN_RT_PKT_EEDH: 711 return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO, 712 &init_net, NULL, skb, skb->dev, NULL, 713 dn_neigh_endnode_hello); 714 } 715 } else { 716 if (dn->parms.state != DN_DEV_S_RU) 717 goto dump_it; 718 719 skb_pull(skb, 1); /* Pull flags */ 720 721 switch (flags & DN_RT_PKT_MSK) { 722 case DN_RT_PKT_LONG: 723 return dn_route_rx_long(skb); 724 case DN_RT_PKT_SHORT: 725 return dn_route_rx_short(skb); 726 } 727 } 728 729dump_it: 730 kfree_skb(skb); 731out: 732 return NET_RX_DROP; 733} 734 735static int dn_output(struct net *net, struct sock *sk, struct sk_buff *skb) 736{ 737 struct dst_entry *dst = skb_dst(skb); 738 struct dn_route *rt = (struct dn_route *)dst; 739 struct net_device *dev = dst->dev; 740 struct dn_skb_cb *cb = DN_SKB_CB(skb); 741 742 int err = -EINVAL; 743 744 if (rt->n == NULL) 745 goto error; 746 747 skb->dev = dev; 748 749 cb->src = rt->rt_saddr; 750 cb->dst = rt->rt_daddr; 751 752 /* 753 * Always set the Intra-Ethernet bit on all outgoing packets 754 * originated on this node. Only valid flag from upper layers 755 * is return-to-sender-requested. Set hop count to 0 too. 756 */ 757 cb->rt_flags &= ~DN_RT_F_RQR; 758 cb->rt_flags |= DN_RT_F_IE; 759 cb->hops = 0; 760 761 return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_OUT, 762 &init_net, sk, skb, NULL, dev, 763 dn_to_neigh_output); 764 765error: 766 net_dbg_ratelimited("dn_output: This should not happen\n"); 767 768 kfree_skb(skb); 769 770 return err; 771} 772 773static int dn_forward(struct sk_buff *skb) 774{ 775 struct dn_skb_cb *cb = DN_SKB_CB(skb); 776 struct dst_entry *dst = skb_dst(skb); 777 struct dn_dev *dn_db = rcu_dereference(dst->dev->dn_ptr); 778 struct dn_route *rt; 779 int header_len; 780 struct net_device *dev = skb->dev; 781 782 if (skb->pkt_type != PACKET_HOST) 783 goto drop; 784 785 /* Ensure that we have enough space for headers */ 786 rt = (struct dn_route *)skb_dst(skb); 787 header_len = dn_db->use_long ? 21 : 6; 788 if (skb_cow(skb, LL_RESERVED_SPACE(rt->dst.dev)+header_len)) 789 goto drop; 790 791 /* 792 * Hop count exceeded. 793 */ 794 if (++cb->hops > 30) 795 goto drop; 796 797 skb->dev = rt->dst.dev; 798 799 /* 800 * If packet goes out same interface it came in on, then set 801 * the Intra-Ethernet bit. This has no effect for short 802 * packets, so we don't need to test for them here. 803 */ 804 cb->rt_flags &= ~DN_RT_F_IE; 805 if (rt->rt_flags & RTCF_DOREDIRECT) 806 cb->rt_flags |= DN_RT_F_IE; 807 808 return NF_HOOK(NFPROTO_DECNET, NF_DN_FORWARD, 809 &init_net, NULL, skb, dev, skb->dev, 810 dn_to_neigh_output); 811 812drop: 813 kfree_skb(skb); 814 return NET_RX_DROP; 815} 816 817/* 818 * Used to catch bugs. This should never normally get 819 * called. 820 */ 821static int dn_rt_bug_out(struct net *net, struct sock *sk, struct sk_buff *skb) 822{ 823 struct dn_skb_cb *cb = DN_SKB_CB(skb); 824 825 net_dbg_ratelimited("dn_rt_bug: skb from:%04x to:%04x\n", 826 le16_to_cpu(cb->src), le16_to_cpu(cb->dst)); 827 828 kfree_skb(skb); 829 830 return NET_RX_DROP; 831} 832 833static int dn_rt_bug(struct sk_buff *skb) 834{ 835 struct dn_skb_cb *cb = DN_SKB_CB(skb); 836 837 net_dbg_ratelimited("dn_rt_bug: skb from:%04x to:%04x\n", 838 le16_to_cpu(cb->src), le16_to_cpu(cb->dst)); 839 840 kfree_skb(skb); 841 842 return NET_RX_DROP; 843} 844 845static unsigned int dn_dst_default_advmss(const struct dst_entry *dst) 846{ 847 return dn_mss_from_pmtu(dst->dev, dst_mtu(dst)); 848} 849 850static unsigned int dn_dst_mtu(const struct dst_entry *dst) 851{ 852 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); 853 854 return mtu ? : dst->dev->mtu; 855} 856 857static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst, 858 struct sk_buff *skb, 859 const void *daddr) 860{ 861 return __neigh_lookup_errno(&dn_neigh_table, daddr, dst->dev); 862} 863 864static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res) 865{ 866 struct dn_fib_info *fi = res->fi; 867 struct net_device *dev = rt->dst.dev; 868 unsigned int mss_metric; 869 struct neighbour *n; 870 871 if (fi) { 872 if (DN_FIB_RES_GW(*res) && 873 DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) 874 rt->rt_gateway = DN_FIB_RES_GW(*res); 875 dst_init_metrics(&rt->dst, fi->fib_metrics, true); 876 } 877 rt->rt_type = res->type; 878 879 if (dev != NULL && rt->n == NULL) { 880 n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev); 881 if (IS_ERR(n)) 882 return PTR_ERR(n); 883 rt->n = n; 884 } 885 886 if (dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu) 887 dst_metric_set(&rt->dst, RTAX_MTU, rt->dst.dev->mtu); 888 mss_metric = dst_metric_raw(&rt->dst, RTAX_ADVMSS); 889 if (mss_metric) { 890 unsigned int mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->dst)); 891 if (mss_metric > mss) 892 dst_metric_set(&rt->dst, RTAX_ADVMSS, mss); 893 } 894 return 0; 895} 896 897static inline int dn_match_addr(__le16 addr1, __le16 addr2) 898{ 899 __u16 tmp = le16_to_cpu(addr1) ^ le16_to_cpu(addr2); 900 int match = 16; 901 while(tmp) { 902 tmp >>= 1; 903 match--; 904 } 905 return match; 906} 907 908static __le16 dnet_select_source(const struct net_device *dev, __le16 daddr, int scope) 909{ 910 __le16 saddr = 0; 911 struct dn_dev *dn_db; 912 struct dn_ifaddr *ifa; 913 int best_match = 0; 914 int ret; 915 916 rcu_read_lock(); 917 dn_db = rcu_dereference(dev->dn_ptr); 918 for (ifa = rcu_dereference(dn_db->ifa_list); 919 ifa != NULL; 920 ifa = rcu_dereference(ifa->ifa_next)) { 921 if (ifa->ifa_scope > scope) 922 continue; 923 if (!daddr) { 924 saddr = ifa->ifa_local; 925 break; 926 } 927 ret = dn_match_addr(daddr, ifa->ifa_local); 928 if (ret > best_match) 929 saddr = ifa->ifa_local; 930 if (best_match == 0) 931 saddr = ifa->ifa_local; 932 } 933 rcu_read_unlock(); 934 935 return saddr; 936} 937 938static inline __le16 __dn_fib_res_prefsrc(struct dn_fib_res *res) 939{ 940 return dnet_select_source(DN_FIB_RES_DEV(*res), DN_FIB_RES_GW(*res), res->scope); 941} 942 943static inline __le16 dn_fib_rules_map_destination(__le16 daddr, struct dn_fib_res *res) 944{ 945 __le16 mask = dnet_make_mask(res->prefixlen); 946 return (daddr&~mask)|res->fi->fib_nh->nh_gw; 947} 948 949static int dn_route_output_slow(struct dst_entry **pprt, const struct flowidn *oldflp, int try_hard) 950{ 951 struct flowidn fld = { 952 .daddr = oldflp->daddr, 953 .saddr = oldflp->saddr, 954 .flowidn_scope = RT_SCOPE_UNIVERSE, 955 .flowidn_mark = oldflp->flowidn_mark, 956 .flowidn_iif = LOOPBACK_IFINDEX, 957 .flowidn_oif = oldflp->flowidn_oif, 958 }; 959 struct dn_route *rt = NULL; 960 struct net_device *dev_out = NULL, *dev; 961 struct neighbour *neigh = NULL; 962 unsigned int hash; 963 unsigned int flags = 0; 964 struct dn_fib_res res = { .fi = NULL, .type = RTN_UNICAST }; 965 int err; 966 int free_res = 0; 967 __le16 gateway = 0; 968 969 if (decnet_debug_level & 16) 970 printk(KERN_DEBUG 971 "dn_route_output_slow: dst=%04x src=%04x mark=%d" 972 " iif=%d oif=%d\n", le16_to_cpu(oldflp->daddr), 973 le16_to_cpu(oldflp->saddr), 974 oldflp->flowidn_mark, LOOPBACK_IFINDEX, 975 oldflp->flowidn_oif); 976 977 /* If we have an output interface, verify its a DECnet device */ 978 if (oldflp->flowidn_oif) { 979 dev_out = dev_get_by_index(&init_net, oldflp->flowidn_oif); 980 err = -ENODEV; 981 if (dev_out && dev_out->dn_ptr == NULL) { 982 dev_put(dev_out); 983 dev_out = NULL; 984 } 985 if (dev_out == NULL) 986 goto out; 987 } 988 989 /* If we have a source address, verify that its a local address */ 990 if (oldflp->saddr) { 991 err = -EADDRNOTAVAIL; 992 993 if (dev_out) { 994 if (dn_dev_islocal(dev_out, oldflp->saddr)) 995 goto source_ok; 996 dev_put(dev_out); 997 goto out; 998 } 999 rcu_read_lock(); 1000 for_each_netdev_rcu(&init_net, dev) { 1001 if (!dev->dn_ptr) 1002 continue; 1003 if (!dn_dev_islocal(dev, oldflp->saddr)) 1004 continue; 1005 if ((dev->flags & IFF_LOOPBACK) && 1006 oldflp->daddr && 1007 !dn_dev_islocal(dev, oldflp->daddr)) 1008 continue; 1009 1010 dev_out = dev; 1011 break; 1012 } 1013 rcu_read_unlock(); 1014 if (dev_out == NULL) 1015 goto out; 1016 dev_hold(dev_out); 1017source_ok: 1018 ; 1019 } 1020 1021 /* No destination? Assume its local */ 1022 if (!fld.daddr) { 1023 fld.daddr = fld.saddr; 1024 1025 if (dev_out) 1026 dev_put(dev_out); 1027 err = -EINVAL; 1028 dev_out = init_net.loopback_dev; 1029 if (!dev_out->dn_ptr) 1030 goto out; 1031 err = -EADDRNOTAVAIL; 1032 dev_hold(dev_out); 1033 if (!fld.daddr) { 1034 fld.daddr = 1035 fld.saddr = dnet_select_source(dev_out, 0, 1036 RT_SCOPE_HOST); 1037 if (!fld.daddr) 1038 goto out; 1039 } 1040 fld.flowidn_oif = LOOPBACK_IFINDEX; 1041 res.type = RTN_LOCAL; 1042 goto make_route; 1043 } 1044 1045 if (decnet_debug_level & 16) 1046 printk(KERN_DEBUG 1047 "dn_route_output_slow: initial checks complete." 1048 " dst=%04x src=%04x oif=%d try_hard=%d\n", 1049 le16_to_cpu(fld.daddr), le16_to_cpu(fld.saddr), 1050 fld.flowidn_oif, try_hard); 1051 1052 /* 1053 * N.B. If the kernel is compiled without router support then 1054 * dn_fib_lookup() will evaluate to non-zero so this if () block 1055 * will always be executed. 1056 */ 1057 err = -ESRCH; 1058 if (try_hard || (err = dn_fib_lookup(&fld, &res)) != 0) { 1059 struct dn_dev *dn_db; 1060 if (err != -ESRCH) 1061 goto out; 1062 /* 1063 * Here the fallback is basically the standard algorithm for 1064 * routing in endnodes which is described in the DECnet routing 1065 * docs 1066 * 1067 * If we are not trying hard, look in neighbour cache. 1068 * The result is tested to ensure that if a specific output 1069 * device/source address was requested, then we honour that 1070 * here 1071 */ 1072 if (!try_hard) { 1073 neigh = neigh_lookup_nodev(&dn_neigh_table, &init_net, &fld.daddr); 1074 if (neigh) { 1075 if ((oldflp->flowidn_oif && 1076 (neigh->dev->ifindex != oldflp->flowidn_oif)) || 1077 (oldflp->saddr && 1078 (!dn_dev_islocal(neigh->dev, 1079 oldflp->saddr)))) { 1080 neigh_release(neigh); 1081 neigh = NULL; 1082 } else { 1083 if (dev_out) 1084 dev_put(dev_out); 1085 if (dn_dev_islocal(neigh->dev, fld.daddr)) { 1086 dev_out = init_net.loopback_dev; 1087 res.type = RTN_LOCAL; 1088 } else { 1089 dev_out = neigh->dev; 1090 } 1091 dev_hold(dev_out); 1092 goto select_source; 1093 } 1094 } 1095 } 1096 1097 /* Not there? Perhaps its a local address */ 1098 if (dev_out == NULL) 1099 dev_out = dn_dev_get_default(); 1100 err = -ENODEV; 1101 if (dev_out == NULL) 1102 goto out; 1103 dn_db = rcu_dereference_raw(dev_out->dn_ptr); 1104 if (!dn_db) 1105 goto e_inval; 1106 /* Possible improvement - check all devices for local addr */ 1107 if (dn_dev_islocal(dev_out, fld.daddr)) { 1108 dev_put(dev_out); 1109 dev_out = init_net.loopback_dev; 1110 dev_hold(dev_out); 1111 res.type = RTN_LOCAL; 1112 goto select_source; 1113 } 1114 /* Not local either.... try sending it to the default router */ 1115 neigh = neigh_clone(dn_db->router); 1116 BUG_ON(neigh && neigh->dev != dev_out); 1117 1118 /* Ok then, we assume its directly connected and move on */ 1119select_source: 1120 if (neigh) 1121 gateway = ((struct dn_neigh *)neigh)->addr; 1122 if (gateway == 0) 1123 gateway = fld.daddr; 1124 if (fld.saddr == 0) { 1125 fld.saddr = dnet_select_source(dev_out, gateway, 1126 res.type == RTN_LOCAL ? 1127 RT_SCOPE_HOST : 1128 RT_SCOPE_LINK); 1129 if (fld.saddr == 0 && res.type != RTN_LOCAL) 1130 goto e_addr; 1131 } 1132 fld.flowidn_oif = dev_out->ifindex; 1133 goto make_route; 1134 } 1135 free_res = 1; 1136 1137 if (res.type == RTN_NAT) 1138 goto e_inval; 1139 1140 if (res.type == RTN_LOCAL) { 1141 if (!fld.saddr) 1142 fld.saddr = fld.daddr; 1143 if (dev_out) 1144 dev_put(dev_out); 1145 dev_out = init_net.loopback_dev; 1146 dev_hold(dev_out); 1147 if (!dev_out->dn_ptr) 1148 goto e_inval; 1149 fld.flowidn_oif = dev_out->ifindex; 1150 if (res.fi) 1151 dn_fib_info_put(res.fi); 1152 res.fi = NULL; 1153 goto make_route; 1154 } 1155 1156 if (res.fi->fib_nhs > 1 && fld.flowidn_oif == 0) 1157 dn_fib_select_multipath(&fld, &res); 1158 1159 /* 1160 * We could add some logic to deal with default routes here and 1161 * get rid of some of the special casing above. 1162 */ 1163 1164 if (!fld.saddr) 1165 fld.saddr = DN_FIB_RES_PREFSRC(res); 1166 1167 if (dev_out) 1168 dev_put(dev_out); 1169 dev_out = DN_FIB_RES_DEV(res); 1170 dev_hold(dev_out); 1171 fld.flowidn_oif = dev_out->ifindex; 1172 gateway = DN_FIB_RES_GW(res); 1173 1174make_route: 1175 if (dev_out->flags & IFF_LOOPBACK) 1176 flags |= RTCF_LOCAL; 1177 1178 rt = dst_alloc(&dn_dst_ops, dev_out, 0, DST_OBSOLETE_NONE, 0); 1179 if (rt == NULL) 1180 goto e_nobufs; 1181 1182 rt->dn_next = NULL; 1183 memset(&rt->fld, 0, sizeof(rt->fld)); 1184 rt->fld.saddr = oldflp->saddr; 1185 rt->fld.daddr = oldflp->daddr; 1186 rt->fld.flowidn_oif = oldflp->flowidn_oif; 1187 rt->fld.flowidn_iif = 0; 1188 rt->fld.flowidn_mark = oldflp->flowidn_mark; 1189 1190 rt->rt_saddr = fld.saddr; 1191 rt->rt_daddr = fld.daddr; 1192 rt->rt_gateway = gateway ? gateway : fld.daddr; 1193 rt->rt_local_src = fld.saddr; 1194 1195 rt->rt_dst_map = fld.daddr; 1196 rt->rt_src_map = fld.saddr; 1197 1198 rt->n = neigh; 1199 neigh = NULL; 1200 1201 rt->dst.lastuse = jiffies; 1202 rt->dst.output = dn_output; 1203 rt->dst.input = dn_rt_bug; 1204 rt->rt_flags = flags; 1205 if (flags & RTCF_LOCAL) 1206 rt->dst.input = dn_nsp_rx; 1207 1208 err = dn_rt_set_next_hop(rt, &res); 1209 if (err) 1210 goto e_neighbour; 1211 1212 hash = dn_hash(rt->fld.saddr, rt->fld.daddr); 1213 /* dn_insert_route() increments dst->__refcnt */ 1214 dn_insert_route(rt, hash, (struct dn_route **)pprt); 1215 1216done: 1217 if (neigh) 1218 neigh_release(neigh); 1219 if (free_res) 1220 dn_fib_res_put(&res); 1221 if (dev_out) 1222 dev_put(dev_out); 1223out: 1224 return err; 1225 1226e_addr: 1227 err = -EADDRNOTAVAIL; 1228 goto done; 1229e_inval: 1230 err = -EINVAL; 1231 goto done; 1232e_nobufs: 1233 err = -ENOBUFS; 1234 goto done; 1235e_neighbour: 1236 dst_release_immediate(&rt->dst); 1237 goto e_nobufs; 1238} 1239 1240 1241/* 1242 * N.B. The flags may be moved into the flowi at some future stage. 1243 */ 1244static int __dn_route_output_key(struct dst_entry **pprt, const struct flowidn *flp, int flags) 1245{ 1246 unsigned int hash = dn_hash(flp->saddr, flp->daddr); 1247 struct dn_route *rt = NULL; 1248 1249 if (!(flags & MSG_TRYHARD)) { 1250 rcu_read_lock_bh(); 1251 for (rt = rcu_dereference_bh(dn_rt_hash_table[hash].chain); rt; 1252 rt = rcu_dereference_bh(rt->dn_next)) { 1253 if ((flp->daddr == rt->fld.daddr) && 1254 (flp->saddr == rt->fld.saddr) && 1255 (flp->flowidn_mark == rt->fld.flowidn_mark) && 1256 dn_is_output_route(rt) && 1257 (rt->fld.flowidn_oif == flp->flowidn_oif)) { 1258 dst_hold_and_use(&rt->dst, jiffies); 1259 rcu_read_unlock_bh(); 1260 *pprt = &rt->dst; 1261 return 0; 1262 } 1263 } 1264 rcu_read_unlock_bh(); 1265 } 1266 1267 return dn_route_output_slow(pprt, flp, flags); 1268} 1269 1270static int dn_route_output_key(struct dst_entry **pprt, struct flowidn *flp, int flags) 1271{ 1272 int err; 1273 1274 err = __dn_route_output_key(pprt, flp, flags); 1275 if (err == 0 && flp->flowidn_proto) { 1276 *pprt = xfrm_lookup(&init_net, *pprt, 1277 flowidn_to_flowi(flp), NULL, 0); 1278 if (IS_ERR(*pprt)) { 1279 err = PTR_ERR(*pprt); 1280 *pprt = NULL; 1281 } 1282 } 1283 return err; 1284} 1285 1286int dn_route_output_sock(struct dst_entry __rcu **pprt, struct flowidn *fl, struct sock *sk, int flags) 1287{ 1288 int err; 1289 1290 err = __dn_route_output_key(pprt, fl, flags & MSG_TRYHARD); 1291 if (err == 0 && fl->flowidn_proto) { 1292 *pprt = xfrm_lookup(&init_net, *pprt, 1293 flowidn_to_flowi(fl), sk, 0); 1294 if (IS_ERR(*pprt)) { 1295 err = PTR_ERR(*pprt); 1296 *pprt = NULL; 1297 } 1298 } 1299 return err; 1300} 1301 1302static int dn_route_input_slow(struct sk_buff *skb) 1303{ 1304 struct dn_route *rt = NULL; 1305 struct dn_skb_cb *cb = DN_SKB_CB(skb); 1306 struct net_device *in_dev = skb->dev; 1307 struct net_device *out_dev = NULL; 1308 struct dn_dev *dn_db; 1309 struct neighbour *neigh = NULL; 1310 unsigned int hash; 1311 int flags = 0; 1312 __le16 gateway = 0; 1313 __le16 local_src = 0; 1314 struct flowidn fld = { 1315 .daddr = cb->dst, 1316 .saddr = cb->src, 1317 .flowidn_scope = RT_SCOPE_UNIVERSE, 1318 .flowidn_mark = skb->mark, 1319 .flowidn_iif = skb->dev->ifindex, 1320 }; 1321 struct dn_fib_res res = { .fi = NULL, .type = RTN_UNREACHABLE }; 1322 int err = -EINVAL; 1323 int free_res = 0; 1324 1325 dev_hold(in_dev); 1326 1327 if ((dn_db = rcu_dereference(in_dev->dn_ptr)) == NULL) 1328 goto out; 1329 1330 /* Zero source addresses are not allowed */ 1331 if (fld.saddr == 0) 1332 goto out; 1333 1334 /* 1335 * In this case we've just received a packet from a source 1336 * outside ourselves pretending to come from us. We don't 1337 * allow it any further to prevent routing loops, spoofing and 1338 * other nasties. Loopback packets already have the dst attached 1339 * so this only affects packets which have originated elsewhere. 1340 */ 1341 err = -ENOTUNIQ; 1342 if (dn_dev_islocal(in_dev, cb->src)) 1343 goto out; 1344 1345 err = dn_fib_lookup(&fld, &res); 1346 if (err) { 1347 if (err != -ESRCH) 1348 goto out; 1349 /* 1350 * Is the destination us ? 1351 */ 1352 if (!dn_dev_islocal(in_dev, cb->dst)) 1353 goto e_inval; 1354 1355 res.type = RTN_LOCAL; 1356 } else { 1357 __le16 src_map = fld.saddr; 1358 free_res = 1; 1359 1360 out_dev = DN_FIB_RES_DEV(res); 1361 if (out_dev == NULL) { 1362 net_crit_ratelimited("Bug in dn_route_input_slow() No output device\n"); 1363 goto e_inval; 1364 } 1365 dev_hold(out_dev); 1366 1367 if (res.r) 1368 src_map = fld.saddr; /* no NAT support for now */ 1369 1370 gateway = DN_FIB_RES_GW(res); 1371 if (res.type == RTN_NAT) { 1372 fld.daddr = dn_fib_rules_map_destination(fld.daddr, &res); 1373 dn_fib_res_put(&res); 1374 free_res = 0; 1375 if (dn_fib_lookup(&fld, &res)) 1376 goto e_inval; 1377 free_res = 1; 1378 if (res.type != RTN_UNICAST) 1379 goto e_inval; 1380 flags |= RTCF_DNAT; 1381 gateway = fld.daddr; 1382 } 1383 fld.saddr = src_map; 1384 } 1385 1386 switch(res.type) { 1387 case RTN_UNICAST: 1388 /* 1389 * Forwarding check here, we only check for forwarding 1390 * being turned off, if you want to only forward intra 1391 * area, its up to you to set the routing tables up 1392 * correctly. 1393 */ 1394 if (dn_db->parms.forwarding == 0) 1395 goto e_inval; 1396 1397 if (res.fi->fib_nhs > 1 && fld.flowidn_oif == 0) 1398 dn_fib_select_multipath(&fld, &res); 1399 1400 /* 1401 * Check for out_dev == in_dev. We use the RTCF_DOREDIRECT 1402 * flag as a hint to set the intra-ethernet bit when 1403 * forwarding. If we've got NAT in operation, we don't do 1404 * this optimisation. 1405 */ 1406 if (out_dev == in_dev && !(flags & RTCF_NAT)) 1407 flags |= RTCF_DOREDIRECT; 1408 1409 local_src = DN_FIB_RES_PREFSRC(res); 1410 1411 case RTN_BLACKHOLE: 1412 case RTN_UNREACHABLE: 1413 break; 1414 case RTN_LOCAL: 1415 flags |= RTCF_LOCAL; 1416 fld.saddr = cb->dst; 1417 fld.daddr = cb->src; 1418 1419 /* Routing tables gave us a gateway */ 1420 if (gateway) 1421 goto make_route; 1422 1423 /* Packet was intra-ethernet, so we know its on-link */ 1424 if (cb->rt_flags & DN_RT_F_IE) { 1425 gateway = cb->src; 1426 goto make_route; 1427 } 1428 1429 /* Use the default router if there is one */ 1430 neigh = neigh_clone(dn_db->router); 1431 if (neigh) { 1432 gateway = ((struct dn_neigh *)neigh)->addr; 1433 goto make_route; 1434 } 1435 1436 /* Close eyes and pray */ 1437 gateway = cb->src; 1438 goto make_route; 1439 default: 1440 goto e_inval; 1441 } 1442 1443make_route: 1444 rt = dst_alloc(&dn_dst_ops, out_dev, 1, DST_OBSOLETE_NONE, 0); 1445 if (rt == NULL) 1446 goto e_nobufs; 1447 1448 rt->dn_next = NULL; 1449 memset(&rt->fld, 0, sizeof(rt->fld)); 1450 rt->rt_saddr = fld.saddr; 1451 rt->rt_daddr = fld.daddr; 1452 rt->rt_gateway = fld.daddr; 1453 if (gateway) 1454 rt->rt_gateway = gateway; 1455 rt->rt_local_src = local_src ? local_src : rt->rt_saddr; 1456 1457 rt->rt_dst_map = fld.daddr; 1458 rt->rt_src_map = fld.saddr; 1459 1460 rt->fld.saddr = cb->src; 1461 rt->fld.daddr = cb->dst; 1462 rt->fld.flowidn_oif = 0; 1463 rt->fld.flowidn_iif = in_dev->ifindex; 1464 rt->fld.flowidn_mark = fld.flowidn_mark; 1465 1466 rt->n = neigh; 1467 rt->dst.lastuse = jiffies; 1468 rt->dst.output = dn_rt_bug_out; 1469 switch (res.type) { 1470 case RTN_UNICAST: 1471 rt->dst.input = dn_forward; 1472 break; 1473 case RTN_LOCAL: 1474 rt->dst.output = dn_output; 1475 rt->dst.input = dn_nsp_rx; 1476 rt->dst.dev = in_dev; 1477 flags |= RTCF_LOCAL; 1478 break; 1479 default: 1480 case RTN_UNREACHABLE: 1481 case RTN_BLACKHOLE: 1482 rt->dst.input = dst_discard; 1483 } 1484 rt->rt_flags = flags; 1485 1486 err = dn_rt_set_next_hop(rt, &res); 1487 if (err) 1488 goto e_neighbour; 1489 1490 hash = dn_hash(rt->fld.saddr, rt->fld.daddr); 1491 /* dn_insert_route() increments dst->__refcnt */ 1492 dn_insert_route(rt, hash, &rt); 1493 skb_dst_set(skb, &rt->dst); 1494 1495done: 1496 if (neigh) 1497 neigh_release(neigh); 1498 if (free_res) 1499 dn_fib_res_put(&res); 1500 dev_put(in_dev); 1501 if (out_dev) 1502 dev_put(out_dev); 1503out: 1504 return err; 1505 1506e_inval: 1507 err = -EINVAL; 1508 goto done; 1509 1510e_nobufs: 1511 err = -ENOBUFS; 1512 goto done; 1513 1514e_neighbour: 1515 dst_release_immediate(&rt->dst); 1516 goto done; 1517} 1518 1519static int dn_route_input(struct sk_buff *skb) 1520{ 1521 struct dn_route *rt; 1522 struct dn_skb_cb *cb = DN_SKB_CB(skb); 1523 unsigned int hash = dn_hash(cb->src, cb->dst); 1524 1525 if (skb_dst(skb)) 1526 return 0; 1527 1528 rcu_read_lock(); 1529 for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL; 1530 rt = rcu_dereference(rt->dn_next)) { 1531 if ((rt->fld.saddr == cb->src) && 1532 (rt->fld.daddr == cb->dst) && 1533 (rt->fld.flowidn_oif == 0) && 1534 (rt->fld.flowidn_mark == skb->mark) && 1535 (rt->fld.flowidn_iif == cb->iif)) { 1536 dst_hold_and_use(&rt->dst, jiffies); 1537 rcu_read_unlock(); 1538 skb_dst_set(skb, (struct dst_entry *)rt); 1539 return 0; 1540 } 1541 } 1542 rcu_read_unlock(); 1543 1544 return dn_route_input_slow(skb); 1545} 1546 1547static int dn_rt_fill_info(struct sk_buff *skb, u32 portid, u32 seq, 1548 int event, int nowait, unsigned int flags) 1549{ 1550 struct dn_route *rt = (struct dn_route *)skb_dst(skb); 1551 struct rtmsg *r; 1552 struct nlmsghdr *nlh; 1553 long expires; 1554 1555 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags); 1556 if (!nlh) 1557 return -EMSGSIZE; 1558 1559 r = nlmsg_data(nlh); 1560 r->rtm_family = AF_DECnet; 1561 r->rtm_dst_len = 16; 1562 r->rtm_src_len = 0; 1563 r->rtm_tos = 0; 1564 r->rtm_table = RT_TABLE_MAIN; 1565 r->rtm_type = rt->rt_type; 1566 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED; 1567 r->rtm_scope = RT_SCOPE_UNIVERSE; 1568 r->rtm_protocol = RTPROT_UNSPEC; 1569 1570 if (rt->rt_flags & RTCF_NOTIFY) 1571 r->rtm_flags |= RTM_F_NOTIFY; 1572 1573 if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN) < 0 || 1574 nla_put_le16(skb, RTA_DST, rt->rt_daddr) < 0) 1575 goto errout; 1576 1577 if (rt->fld.saddr) { 1578 r->rtm_src_len = 16; 1579 if (nla_put_le16(skb, RTA_SRC, rt->fld.saddr) < 0) 1580 goto errout; 1581 } 1582 if (rt->dst.dev && 1583 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex) < 0) 1584 goto errout; 1585 1586 /* 1587 * Note to self - change this if input routes reverse direction when 1588 * they deal only with inputs and not with replies like they do 1589 * currently. 1590 */ 1591 if (nla_put_le16(skb, RTA_PREFSRC, rt->rt_local_src) < 0) 1592 goto errout; 1593 1594 if (rt->rt_daddr != rt->rt_gateway && 1595 nla_put_le16(skb, RTA_GATEWAY, rt->rt_gateway) < 0) 1596 goto errout; 1597 1598 if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0) 1599 goto errout; 1600 1601 expires = rt->dst.expires ? rt->dst.expires - jiffies : 0; 1602 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, 1603 rt->dst.error) < 0) 1604 goto errout; 1605 1606 if (dn_is_input_route(rt) && 1607 nla_put_u32(skb, RTA_IIF, rt->fld.flowidn_iif) < 0) 1608 goto errout; 1609 1610 nlmsg_end(skb, nlh); 1611 return 0; 1612 1613errout: 1614 nlmsg_cancel(skb, nlh); 1615 return -EMSGSIZE; 1616} 1617 1618const struct nla_policy rtm_dn_policy[RTA_MAX + 1] = { 1619 [RTA_DST] = { .type = NLA_U16 }, 1620 [RTA_SRC] = { .type = NLA_U16 }, 1621 [RTA_IIF] = { .type = NLA_U32 }, 1622 [RTA_OIF] = { .type = NLA_U32 }, 1623 [RTA_GATEWAY] = { .type = NLA_U16 }, 1624 [RTA_PRIORITY] = { .type = NLA_U32 }, 1625 [RTA_PREFSRC] = { .type = NLA_U16 }, 1626 [RTA_METRICS] = { .type = NLA_NESTED }, 1627 [RTA_MULTIPATH] = { .type = NLA_NESTED }, 1628 [RTA_TABLE] = { .type = NLA_U32 }, 1629 [RTA_MARK] = { .type = NLA_U32 }, 1630}; 1631 1632/* 1633 * This is called by both endnodes and routers now. 1634 */ 1635static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, 1636 struct netlink_ext_ack *extack) 1637{ 1638 struct net *net = sock_net(in_skb->sk); 1639 struct rtmsg *rtm = nlmsg_data(nlh); 1640 struct dn_route *rt = NULL; 1641 struct dn_skb_cb *cb; 1642 int err; 1643 struct sk_buff *skb; 1644 struct flowidn fld; 1645 struct nlattr *tb[RTA_MAX+1]; 1646 1647 if (!net_eq(net, &init_net)) 1648 return -EINVAL; 1649 1650 err = nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX, 1651 rtm_dn_policy, extack); 1652 if (err < 0) 1653 return err; 1654 1655 memset(&fld, 0, sizeof(fld)); 1656 fld.flowidn_proto = DNPROTO_NSP; 1657 1658 skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); 1659 if (skb == NULL) 1660 return -ENOBUFS; 1661 skb_reset_mac_header(skb); 1662 cb = DN_SKB_CB(skb); 1663 1664 if (tb[RTA_SRC]) 1665 fld.saddr = nla_get_le16(tb[RTA_SRC]); 1666 1667 if (tb[RTA_DST]) 1668 fld.daddr = nla_get_le16(tb[RTA_DST]); 1669 1670 if (tb[RTA_IIF]) 1671 fld.flowidn_iif = nla_get_u32(tb[RTA_IIF]); 1672 1673 if (fld.flowidn_iif) { 1674 struct net_device *dev; 1675 dev = __dev_get_by_index(&init_net, fld.flowidn_iif); 1676 if (!dev || !dev->dn_ptr) { 1677 kfree_skb(skb); 1678 return -ENODEV; 1679 } 1680 skb->protocol = htons(ETH_P_DNA_RT); 1681 skb->dev = dev; 1682 cb->src = fld.saddr; 1683 cb->dst = fld.daddr; 1684 local_bh_disable(); 1685 err = dn_route_input(skb); 1686 local_bh_enable(); 1687 memset(cb, 0, sizeof(struct dn_skb_cb)); 1688 rt = (struct dn_route *)skb_dst(skb); 1689 if (!err && -rt->dst.error) 1690 err = rt->dst.error; 1691 } else { 1692 if (tb[RTA_OIF]) 1693 fld.flowidn_oif = nla_get_u32(tb[RTA_OIF]); 1694 1695 err = dn_route_output_key((struct dst_entry **)&rt, &fld, 0); 1696 } 1697 1698 skb->dev = NULL; 1699 if (err) 1700 goto out_free; 1701 skb_dst_set(skb, &rt->dst); 1702 if (rtm->rtm_flags & RTM_F_NOTIFY) 1703 rt->rt_flags |= RTCF_NOTIFY; 1704 1705 err = dn_rt_fill_info(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, RTM_NEWROUTE, 0, 0); 1706 if (err < 0) { 1707 err = -EMSGSIZE; 1708 goto out_free; 1709 } 1710 1711 return rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).portid); 1712 1713out_free: 1714 kfree_skb(skb); 1715 return err; 1716} 1717 1718/* 1719 * For routers, this is called from dn_fib_dump, but for endnodes its 1720 * called directly from the rtnetlink dispatch table. 1721 */ 1722int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb) 1723{ 1724 struct net *net = sock_net(skb->sk); 1725 struct dn_route *rt; 1726 int h, s_h; 1727 int idx, s_idx; 1728 struct rtmsg *rtm; 1729 1730 if (!net_eq(net, &init_net)) 1731 return 0; 1732 1733 if (nlmsg_len(cb->nlh) < sizeof(struct rtmsg)) 1734 return -EINVAL; 1735 1736 rtm = nlmsg_data(cb->nlh); 1737 if (!(rtm->rtm_flags & RTM_F_CLONED)) 1738 return 0; 1739 1740 s_h = cb->args[0]; 1741 s_idx = idx = cb->args[1]; 1742 for(h = 0; h <= dn_rt_hash_mask; h++) { 1743 if (h < s_h) 1744 continue; 1745 if (h > s_h) 1746 s_idx = 0; 1747 rcu_read_lock_bh(); 1748 for(rt = rcu_dereference_bh(dn_rt_hash_table[h].chain), idx = 0; 1749 rt; 1750 rt = rcu_dereference_bh(rt->dn_next), idx++) { 1751 if (idx < s_idx) 1752 continue; 1753 skb_dst_set(skb, dst_clone(&rt->dst)); 1754 if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).portid, 1755 cb->nlh->nlmsg_seq, RTM_NEWROUTE, 1756 1, NLM_F_MULTI) < 0) { 1757 skb_dst_drop(skb); 1758 rcu_read_unlock_bh(); 1759 goto done; 1760 } 1761 skb_dst_drop(skb); 1762 } 1763 rcu_read_unlock_bh(); 1764 } 1765 1766done: 1767 cb->args[0] = h; 1768 cb->args[1] = idx; 1769 return skb->len; 1770} 1771 1772#ifdef CONFIG_PROC_FS 1773struct dn_rt_cache_iter_state { 1774 int bucket; 1775}; 1776 1777static struct dn_route *dn_rt_cache_get_first(struct seq_file *seq) 1778{ 1779 struct dn_route *rt = NULL; 1780 struct dn_rt_cache_iter_state *s = seq->private; 1781 1782 for(s->bucket = dn_rt_hash_mask; s->bucket >= 0; --s->bucket) { 1783 rcu_read_lock_bh(); 1784 rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain); 1785 if (rt) 1786 break; 1787 rcu_read_unlock_bh(); 1788 } 1789 return rt; 1790} 1791 1792static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_route *rt) 1793{ 1794 struct dn_rt_cache_iter_state *s = seq->private; 1795 1796 rt = rcu_dereference_bh(rt->dn_next); 1797 while (!rt) { 1798 rcu_read_unlock_bh(); 1799 if (--s->bucket < 0) 1800 break; 1801 rcu_read_lock_bh(); 1802 rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain); 1803 } 1804 return rt; 1805} 1806 1807static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos) 1808{ 1809 struct dn_route *rt = dn_rt_cache_get_first(seq); 1810 1811 if (rt) { 1812 while(*pos && (rt = dn_rt_cache_get_next(seq, rt))) 1813 --*pos; 1814 } 1815 return *pos ? NULL : rt; 1816} 1817 1818static void *dn_rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1819{ 1820 struct dn_route *rt = dn_rt_cache_get_next(seq, v); 1821 ++*pos; 1822 return rt; 1823} 1824 1825static void dn_rt_cache_seq_stop(struct seq_file *seq, void *v) 1826{ 1827 if (v) 1828 rcu_read_unlock_bh(); 1829} 1830 1831static int dn_rt_cache_seq_show(struct seq_file *seq, void *v) 1832{ 1833 struct dn_route *rt = v; 1834 char buf1[DN_ASCBUF_LEN], buf2[DN_ASCBUF_LEN]; 1835 1836 seq_printf(seq, "%-8s %-7s %-7s %04d %04d %04d\n", 1837 rt->dst.dev ? rt->dst.dev->name : "*", 1838 dn_addr2asc(le16_to_cpu(rt->rt_daddr), buf1), 1839 dn_addr2asc(le16_to_cpu(rt->rt_saddr), buf2), 1840 atomic_read(&rt->dst.__refcnt), 1841 rt->dst.__use, 0); 1842 return 0; 1843} 1844 1845static const struct seq_operations dn_rt_cache_seq_ops = { 1846 .start = dn_rt_cache_seq_start, 1847 .next = dn_rt_cache_seq_next, 1848 .stop = dn_rt_cache_seq_stop, 1849 .show = dn_rt_cache_seq_show, 1850}; 1851#endif /* CONFIG_PROC_FS */ 1852 1853void __init dn_route_init(void) 1854{ 1855 int i, goal, order; 1856 1857 dn_dst_ops.kmem_cachep = 1858 kmem_cache_create("dn_dst_cache", sizeof(struct dn_route), 0, 1859 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1860 dst_entries_init(&dn_dst_ops); 1861 timer_setup(&dn_route_timer, dn_dst_check_expire, 0); 1862 dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ; 1863 add_timer(&dn_route_timer); 1864 1865 goal = totalram_pages() >> (26 - PAGE_SHIFT); 1866 1867 for(order = 0; (1UL << order) < goal; order++) 1868 /* NOTHING */; 1869 1870 /* 1871 * Only want 1024 entries max, since the table is very, very unlikely 1872 * to be larger than that. 1873 */ 1874 while(order && ((((1UL << order) * PAGE_SIZE) / 1875 sizeof(struct dn_rt_hash_bucket)) >= 2048)) 1876 order--; 1877 1878 do { 1879 dn_rt_hash_mask = (1UL << order) * PAGE_SIZE / 1880 sizeof(struct dn_rt_hash_bucket); 1881 while(dn_rt_hash_mask & (dn_rt_hash_mask - 1)) 1882 dn_rt_hash_mask--; 1883 dn_rt_hash_table = (struct dn_rt_hash_bucket *) 1884 __get_free_pages(GFP_ATOMIC, order); 1885 } while (dn_rt_hash_table == NULL && --order > 0); 1886 1887 if (!dn_rt_hash_table) 1888 panic("Failed to allocate DECnet route cache hash table\n"); 1889 1890 printk(KERN_INFO 1891 "DECnet: Routing cache hash table of %u buckets, %ldKbytes\n", 1892 dn_rt_hash_mask, 1893 (long)(dn_rt_hash_mask*sizeof(struct dn_rt_hash_bucket))/1024); 1894 1895 dn_rt_hash_mask--; 1896 for(i = 0; i <= dn_rt_hash_mask; i++) { 1897 spin_lock_init(&dn_rt_hash_table[i].lock); 1898 dn_rt_hash_table[i].chain = NULL; 1899 } 1900 1901 dn_dst_ops.gc_thresh = (dn_rt_hash_mask + 1); 1902 1903 proc_create_seq_private("decnet_cache", 0444, init_net.proc_net, 1904 &dn_rt_cache_seq_ops, 1905 sizeof(struct dn_rt_cache_iter_state), NULL); 1906 1907#ifdef CONFIG_DECNET_ROUTER 1908 rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_GETROUTE, 1909 dn_cache_getroute, dn_fib_dump, 0); 1910#else 1911 rtnl_register_module(THIS_MODULE, PF_DECnet, RTM_GETROUTE, 1912 dn_cache_getroute, dn_cache_dump, 0); 1913#endif 1914} 1915 1916void __exit dn_route_cleanup(void) 1917{ 1918 del_timer(&dn_route_timer); 1919 dn_run_flush(NULL); 1920 1921 remove_proc_entry("decnet_cache", init_net.proc_net); 1922 dst_entries_destroy(&dn_dst_ops); 1923}