Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.18-rc6 1845 lines 46 kB view raw
1/* 2 * DECnet An implementation of the DECnet protocol suite for the LINUX 3 * operating system. DECnet is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * DECnet Routing Functions (Endnode and Router) 7 * 8 * Authors: Steve Whitehouse <SteveW@ACM.org> 9 * Eduardo Marcelo Serrat <emserrat@geocities.com> 10 * 11 * Changes: 12 * Steve Whitehouse : Fixes to allow "intra-ethernet" and 13 * "return-to-sender" bits on outgoing 14 * packets. 15 * Steve Whitehouse : Timeouts for cached routes. 16 * Steve Whitehouse : Use dst cache for input routes too. 17 * Steve Whitehouse : Fixed error values in dn_send_skb. 18 * Steve Whitehouse : Rework routing functions to better fit 19 * DECnet routing design 20 * Alexey Kuznetsov : New SMP locking 21 * Steve Whitehouse : More SMP locking changes & dn_cache_dump() 22 * Steve Whitehouse : Prerouting NF hook, now really is prerouting. 23 * Fixed possible skb leak in rtnetlink funcs. 24 * Steve Whitehouse : Dave Miller's dynamic hash table sizing and 25 * Alexey Kuznetsov's finer grained locking 26 * from ipv4/route.c. 27 * Steve Whitehouse : Routing is now starting to look like a 28 * sensible set of code now, mainly due to 29 * my copying the IPv4 routing code. The 30 * hooks here are modified and will continue 31 * to evolve for a while. 32 * Steve Whitehouse : Real SMP at last :-) Also new netfilter 33 * stuff. Look out raw sockets your days 34 * are numbered! 35 * Steve Whitehouse : Added return-to-sender functions. Added 36 * backlog congestion level return codes. 37 * Steve Whitehouse : Fixed bug where routes were set up with 38 * no ref count on net devices. 39 * Steve Whitehouse : RCU for the route cache 40 * Steve Whitehouse : Preparations for the flow cache 41 * Steve Whitehouse : Prepare for nonlinear skbs 42 */ 43 44/****************************************************************************** 45 (c) 1995-1998 E.M. Serrat emserrat@geocities.com 46 47 This program is free software; you can redistribute it and/or modify 48 it under the terms of the GNU General Public License as published by 49 the Free Software Foundation; either version 2 of the License, or 50 any later version. 51 52 This program is distributed in the hope that it will be useful, 53 but WITHOUT ANY WARRANTY; without even the implied warranty of 54 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 55 GNU General Public License for more details. 56*******************************************************************************/ 57 58#include <linux/errno.h> 59#include <linux/types.h> 60#include <linux/socket.h> 61#include <linux/in.h> 62#include <linux/kernel.h> 63#include <linux/sockios.h> 64#include <linux/net.h> 65#include <linux/netdevice.h> 66#include <linux/inet.h> 67#include <linux/route.h> 68#include <linux/in_route.h> 69#include <net/sock.h> 70#include <linux/mm.h> 71#include <linux/proc_fs.h> 72#include <linux/seq_file.h> 73#include <linux/init.h> 74#include <linux/rtnetlink.h> 75#include <linux/string.h> 76#include <linux/netfilter_decnet.h> 77#include <linux/rcupdate.h> 78#include <linux/times.h> 79#include <asm/errno.h> 80#include <net/neighbour.h> 81#include <net/dst.h> 82#include <net/flow.h> 83#include <net/dn.h> 84#include <net/dn_dev.h> 85#include <net/dn_nsp.h> 86#include <net/dn_route.h> 87#include <net/dn_neigh.h> 88#include <net/dn_fib.h> 89 90struct dn_rt_hash_bucket 91{ 92 struct dn_route *chain; 93 spinlock_t lock; 94} __attribute__((__aligned__(8))); 95 96extern struct neigh_table dn_neigh_table; 97 98 99static unsigned char dn_hiord_addr[6] = {0xAA,0x00,0x04,0x00,0x00,0x00}; 100 101static const int dn_rt_min_delay = 2 * HZ; 102static const int dn_rt_max_delay = 10 * HZ; 103static const int dn_rt_mtu_expires = 10 * 60 * HZ; 104 105static unsigned long dn_rt_deadline; 106 107static int dn_dst_gc(void); 108static struct dst_entry *dn_dst_check(struct dst_entry *, __u32); 109static struct dst_entry *dn_dst_negative_advice(struct dst_entry *); 110static void dn_dst_link_failure(struct sk_buff *); 111static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu); 112static int dn_route_input(struct sk_buff *); 113static void dn_run_flush(unsigned long dummy); 114 115static struct dn_rt_hash_bucket *dn_rt_hash_table; 116static unsigned dn_rt_hash_mask; 117 118static struct timer_list dn_route_timer; 119static DEFINE_TIMER(dn_rt_flush_timer, dn_run_flush, 0, 0); 120int decnet_dst_gc_interval = 2; 121 122static struct dst_ops dn_dst_ops = { 123 .family = PF_DECnet, 124 .protocol = __constant_htons(ETH_P_DNA_RT), 125 .gc_thresh = 128, 126 .gc = dn_dst_gc, 127 .check = dn_dst_check, 128 .negative_advice = dn_dst_negative_advice, 129 .link_failure = dn_dst_link_failure, 130 .update_pmtu = dn_dst_update_pmtu, 131 .entry_size = sizeof(struct dn_route), 132 .entries = ATOMIC_INIT(0), 133}; 134 135static __inline__ unsigned dn_hash(__le16 src, __le16 dst) 136{ 137 __u16 tmp = (__u16 __force)(src ^ dst); 138 tmp ^= (tmp >> 3); 139 tmp ^= (tmp >> 5); 140 tmp ^= (tmp >> 10); 141 return dn_rt_hash_mask & (unsigned)tmp; 142} 143 144static inline void dnrt_free(struct dn_route *rt) 145{ 146 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free); 147} 148 149static inline void dnrt_drop(struct dn_route *rt) 150{ 151 dst_release(&rt->u.dst); 152 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free); 153} 154 155static void dn_dst_check_expire(unsigned long dummy) 156{ 157 int i; 158 struct dn_route *rt, **rtp; 159 unsigned long now = jiffies; 160 unsigned long expire = 120 * HZ; 161 162 for(i = 0; i <= dn_rt_hash_mask; i++) { 163 rtp = &dn_rt_hash_table[i].chain; 164 165 spin_lock(&dn_rt_hash_table[i].lock); 166 while((rt=*rtp) != NULL) { 167 if (atomic_read(&rt->u.dst.__refcnt) || 168 (now - rt->u.dst.lastuse) < expire) { 169 rtp = &rt->u.rt_next; 170 continue; 171 } 172 *rtp = rt->u.rt_next; 173 rt->u.rt_next = NULL; 174 dnrt_free(rt); 175 } 176 spin_unlock(&dn_rt_hash_table[i].lock); 177 178 if ((jiffies - now) > 0) 179 break; 180 } 181 182 mod_timer(&dn_route_timer, now + decnet_dst_gc_interval * HZ); 183} 184 185static int dn_dst_gc(void) 186{ 187 struct dn_route *rt, **rtp; 188 int i; 189 unsigned long now = jiffies; 190 unsigned long expire = 10 * HZ; 191 192 for(i = 0; i <= dn_rt_hash_mask; i++) { 193 194 spin_lock_bh(&dn_rt_hash_table[i].lock); 195 rtp = &dn_rt_hash_table[i].chain; 196 197 while((rt=*rtp) != NULL) { 198 if (atomic_read(&rt->u.dst.__refcnt) || 199 (now - rt->u.dst.lastuse) < expire) { 200 rtp = &rt->u.rt_next; 201 continue; 202 } 203 *rtp = rt->u.rt_next; 204 rt->u.rt_next = NULL; 205 dnrt_drop(rt); 206 break; 207 } 208 spin_unlock_bh(&dn_rt_hash_table[i].lock); 209 } 210 211 return 0; 212} 213 214/* 215 * The decnet standards don't impose a particular minimum mtu, what they 216 * do insist on is that the routing layer accepts a datagram of at least 217 * 230 bytes long. Here we have to subtract the routing header length from 218 * 230 to get the minimum acceptable mtu. If there is no neighbour, then we 219 * assume the worst and use a long header size. 220 * 221 * We update both the mtu and the advertised mss (i.e. the segment size we 222 * advertise to the other end). 223 */ 224static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu) 225{ 226 u32 min_mtu = 230; 227 struct dn_dev *dn = dst->neighbour ? 228 (struct dn_dev *)dst->neighbour->dev->dn_ptr : NULL; 229 230 if (dn && dn->use_long == 0) 231 min_mtu -= 6; 232 else 233 min_mtu -= 21; 234 235 if (dst->metrics[RTAX_MTU-1] > mtu && mtu >= min_mtu) { 236 if (!(dst_metric_locked(dst, RTAX_MTU))) { 237 dst->metrics[RTAX_MTU-1] = mtu; 238 dst_set_expires(dst, dn_rt_mtu_expires); 239 } 240 if (!(dst_metric_locked(dst, RTAX_ADVMSS))) { 241 u32 mss = mtu - DN_MAX_NSP_DATA_HEADER; 242 if (dst->metrics[RTAX_ADVMSS-1] > mss) 243 dst->metrics[RTAX_ADVMSS-1] = mss; 244 } 245 } 246} 247 248/* 249 * When a route has been marked obsolete. (e.g. routing cache flush) 250 */ 251static struct dst_entry *dn_dst_check(struct dst_entry *dst, __u32 cookie) 252{ 253 return NULL; 254} 255 256static struct dst_entry *dn_dst_negative_advice(struct dst_entry *dst) 257{ 258 dst_release(dst); 259 return NULL; 260} 261 262static void dn_dst_link_failure(struct sk_buff *skb) 263{ 264 return; 265} 266 267static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) 268{ 269 return memcmp(&fl1->nl_u.dn_u, &fl2->nl_u.dn_u, sizeof(fl1->nl_u.dn_u)) == 0 && 270 fl1->oif == fl2->oif && 271 fl1->iif == fl2->iif; 272} 273 274static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp) 275{ 276 struct dn_route *rth, **rthp; 277 unsigned long now = jiffies; 278 279 rthp = &dn_rt_hash_table[hash].chain; 280 281 spin_lock_bh(&dn_rt_hash_table[hash].lock); 282 while((rth = *rthp) != NULL) { 283 if (compare_keys(&rth->fl, &rt->fl)) { 284 /* Put it first */ 285 *rthp = rth->u.rt_next; 286 rcu_assign_pointer(rth->u.rt_next, 287 dn_rt_hash_table[hash].chain); 288 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth); 289 290 rth->u.dst.__use++; 291 dst_hold(&rth->u.dst); 292 rth->u.dst.lastuse = now; 293 spin_unlock_bh(&dn_rt_hash_table[hash].lock); 294 295 dnrt_drop(rt); 296 *rp = rth; 297 return 0; 298 } 299 rthp = &rth->u.rt_next; 300 } 301 302 rcu_assign_pointer(rt->u.rt_next, dn_rt_hash_table[hash].chain); 303 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt); 304 305 dst_hold(&rt->u.dst); 306 rt->u.dst.__use++; 307 rt->u.dst.lastuse = now; 308 spin_unlock_bh(&dn_rt_hash_table[hash].lock); 309 *rp = rt; 310 return 0; 311} 312 313void dn_run_flush(unsigned long dummy) 314{ 315 int i; 316 struct dn_route *rt, *next; 317 318 for(i = 0; i < dn_rt_hash_mask; i++) { 319 spin_lock_bh(&dn_rt_hash_table[i].lock); 320 321 if ((rt = xchg(&dn_rt_hash_table[i].chain, NULL)) == NULL) 322 goto nothing_to_declare; 323 324 for(; rt; rt=next) { 325 next = rt->u.rt_next; 326 rt->u.rt_next = NULL; 327 dst_free((struct dst_entry *)rt); 328 } 329 330nothing_to_declare: 331 spin_unlock_bh(&dn_rt_hash_table[i].lock); 332 } 333} 334 335static DEFINE_SPINLOCK(dn_rt_flush_lock); 336 337void dn_rt_cache_flush(int delay) 338{ 339 unsigned long now = jiffies; 340 int user_mode = !in_interrupt(); 341 342 if (delay < 0) 343 delay = dn_rt_min_delay; 344 345 spin_lock_bh(&dn_rt_flush_lock); 346 347 if (del_timer(&dn_rt_flush_timer) && delay > 0 && dn_rt_deadline) { 348 long tmo = (long)(dn_rt_deadline - now); 349 350 if (user_mode && tmo < dn_rt_max_delay - dn_rt_min_delay) 351 tmo = 0; 352 353 if (delay > tmo) 354 delay = tmo; 355 } 356 357 if (delay <= 0) { 358 spin_unlock_bh(&dn_rt_flush_lock); 359 dn_run_flush(0); 360 return; 361 } 362 363 if (dn_rt_deadline == 0) 364 dn_rt_deadline = now + dn_rt_max_delay; 365 366 dn_rt_flush_timer.expires = now + delay; 367 add_timer(&dn_rt_flush_timer); 368 spin_unlock_bh(&dn_rt_flush_lock); 369} 370 371/** 372 * dn_return_short - Return a short packet to its sender 373 * @skb: The packet to return 374 * 375 */ 376static int dn_return_short(struct sk_buff *skb) 377{ 378 struct dn_skb_cb *cb; 379 unsigned char *ptr; 380 __le16 *src; 381 __le16 *dst; 382 __le16 tmp; 383 384 /* Add back headers */ 385 skb_push(skb, skb->data - skb->nh.raw); 386 387 if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL) 388 return NET_RX_DROP; 389 390 cb = DN_SKB_CB(skb); 391 /* Skip packet length and point to flags */ 392 ptr = skb->data + 2; 393 *ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS; 394 395 dst = (__le16 *)ptr; 396 ptr += 2; 397 src = (__le16 *)ptr; 398 ptr += 2; 399 *ptr = 0; /* Zero hop count */ 400 401 /* Swap source and destination */ 402 tmp = *src; 403 *src = *dst; 404 *dst = tmp; 405 406 skb->pkt_type = PACKET_OUTGOING; 407 dn_rt_finish_output(skb, NULL, NULL); 408 return NET_RX_SUCCESS; 409} 410 411/** 412 * dn_return_long - Return a long packet to its sender 413 * @skb: The long format packet to return 414 * 415 */ 416static int dn_return_long(struct sk_buff *skb) 417{ 418 struct dn_skb_cb *cb; 419 unsigned char *ptr; 420 unsigned char *src_addr, *dst_addr; 421 unsigned char tmp[ETH_ALEN]; 422 423 /* Add back all headers */ 424 skb_push(skb, skb->data - skb->nh.raw); 425 426 if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL) 427 return NET_RX_DROP; 428 429 cb = DN_SKB_CB(skb); 430 /* Ignore packet length and point to flags */ 431 ptr = skb->data + 2; 432 433 /* Skip padding */ 434 if (*ptr & DN_RT_F_PF) { 435 char padlen = (*ptr & ~DN_RT_F_PF); 436 ptr += padlen; 437 } 438 439 *ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS; 440 ptr += 2; 441 dst_addr = ptr; 442 ptr += 8; 443 src_addr = ptr; 444 ptr += 6; 445 *ptr = 0; /* Zero hop count */ 446 447 /* Swap source and destination */ 448 memcpy(tmp, src_addr, ETH_ALEN); 449 memcpy(src_addr, dst_addr, ETH_ALEN); 450 memcpy(dst_addr, tmp, ETH_ALEN); 451 452 skb->pkt_type = PACKET_OUTGOING; 453 dn_rt_finish_output(skb, dst_addr, src_addr); 454 return NET_RX_SUCCESS; 455} 456 457/** 458 * dn_route_rx_packet - Try and find a route for an incoming packet 459 * @skb: The packet to find a route for 460 * 461 * Returns: result of input function if route is found, error code otherwise 462 */ 463static int dn_route_rx_packet(struct sk_buff *skb) 464{ 465 struct dn_skb_cb *cb = DN_SKB_CB(skb); 466 int err; 467 468 if ((err = dn_route_input(skb)) == 0) 469 return dst_input(skb); 470 471 if (decnet_debug_level & 4) { 472 char *devname = skb->dev ? skb->dev->name : "???"; 473 struct dn_skb_cb *cb = DN_SKB_CB(skb); 474 printk(KERN_DEBUG 475 "DECnet: dn_route_rx_packet: rt_flags=0x%02x dev=%s len=%d src=0x%04hx dst=0x%04hx err=%d type=%d\n", 476 (int)cb->rt_flags, devname, skb->len, 477 dn_ntohs(cb->src), dn_ntohs(cb->dst), 478 err, skb->pkt_type); 479 } 480 481 if ((skb->pkt_type == PACKET_HOST) && (cb->rt_flags & DN_RT_F_RQR)) { 482 switch(cb->rt_flags & DN_RT_PKT_MSK) { 483 case DN_RT_PKT_SHORT: 484 return dn_return_short(skb); 485 case DN_RT_PKT_LONG: 486 return dn_return_long(skb); 487 } 488 } 489 490 kfree_skb(skb); 491 return NET_RX_DROP; 492} 493 494static int dn_route_rx_long(struct sk_buff *skb) 495{ 496 struct dn_skb_cb *cb = DN_SKB_CB(skb); 497 unsigned char *ptr = skb->data; 498 499 if (!pskb_may_pull(skb, 21)) /* 20 for long header, 1 for shortest nsp */ 500 goto drop_it; 501 502 skb_pull(skb, 20); 503 skb->h.raw = skb->data; 504 505 /* Destination info */ 506 ptr += 2; 507 cb->dst = dn_eth2dn(ptr); 508 if (memcmp(ptr, dn_hiord_addr, 4) != 0) 509 goto drop_it; 510 ptr += 6; 511 512 513 /* Source info */ 514 ptr += 2; 515 cb->src = dn_eth2dn(ptr); 516 if (memcmp(ptr, dn_hiord_addr, 4) != 0) 517 goto drop_it; 518 ptr += 6; 519 /* Other junk */ 520 ptr++; 521 cb->hops = *ptr++; /* Visit Count */ 522 523 return NF_HOOK(PF_DECnet, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, dn_route_rx_packet); 524 525drop_it: 526 kfree_skb(skb); 527 return NET_RX_DROP; 528} 529 530 531 532static int dn_route_rx_short(struct sk_buff *skb) 533{ 534 struct dn_skb_cb *cb = DN_SKB_CB(skb); 535 unsigned char *ptr = skb->data; 536 537 if (!pskb_may_pull(skb, 6)) /* 5 for short header + 1 for shortest nsp */ 538 goto drop_it; 539 540 skb_pull(skb, 5); 541 skb->h.raw = skb->data; 542 543 cb->dst = *(__le16 *)ptr; 544 ptr += 2; 545 cb->src = *(__le16 *)ptr; 546 ptr += 2; 547 cb->hops = *ptr & 0x3f; 548 549 return NF_HOOK(PF_DECnet, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, dn_route_rx_packet); 550 551drop_it: 552 kfree_skb(skb); 553 return NET_RX_DROP; 554} 555 556static int dn_route_discard(struct sk_buff *skb) 557{ 558 /* 559 * I know we drop the packet here, but thats considered success in 560 * this case 561 */ 562 kfree_skb(skb); 563 return NET_RX_SUCCESS; 564} 565 566static int dn_route_ptp_hello(struct sk_buff *skb) 567{ 568 dn_dev_hello(skb); 569 dn_neigh_pointopoint_hello(skb); 570 return NET_RX_SUCCESS; 571} 572 573int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) 574{ 575 struct dn_skb_cb *cb; 576 unsigned char flags = 0; 577 __u16 len = dn_ntohs(*(__le16 *)skb->data); 578 struct dn_dev *dn = (struct dn_dev *)dev->dn_ptr; 579 unsigned char padlen = 0; 580 581 if (dn == NULL) 582 goto dump_it; 583 584 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) 585 goto out; 586 587 if (!pskb_may_pull(skb, 3)) 588 goto dump_it; 589 590 skb_pull(skb, 2); 591 592 if (len > skb->len) 593 goto dump_it; 594 595 skb_trim(skb, len); 596 597 flags = *skb->data; 598 599 cb = DN_SKB_CB(skb); 600 cb->stamp = jiffies; 601 cb->iif = dev->ifindex; 602 603 /* 604 * If we have padding, remove it. 605 */ 606 if (flags & DN_RT_F_PF) { 607 padlen = flags & ~DN_RT_F_PF; 608 if (!pskb_may_pull(skb, padlen + 1)) 609 goto dump_it; 610 skb_pull(skb, padlen); 611 flags = *skb->data; 612 } 613 614 skb->nh.raw = skb->data; 615 616 /* 617 * Weed out future version DECnet 618 */ 619 if (flags & DN_RT_F_VER) 620 goto dump_it; 621 622 cb->rt_flags = flags; 623 624 if (decnet_debug_level & 1) 625 printk(KERN_DEBUG 626 "dn_route_rcv: got 0x%02x from %s [%d %d %d]\n", 627 (int)flags, (dev) ? dev->name : "???", len, skb->len, 628 padlen); 629 630 if (flags & DN_RT_PKT_CNTL) { 631 if (unlikely(skb_linearize(skb))) 632 goto dump_it; 633 634 switch(flags & DN_RT_CNTL_MSK) { 635 case DN_RT_PKT_INIT: 636 dn_dev_init_pkt(skb); 637 break; 638 case DN_RT_PKT_VERI: 639 dn_dev_veri_pkt(skb); 640 break; 641 } 642 643 if (dn->parms.state != DN_DEV_S_RU) 644 goto dump_it; 645 646 switch(flags & DN_RT_CNTL_MSK) { 647 case DN_RT_PKT_HELO: 648 return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_route_ptp_hello); 649 650 case DN_RT_PKT_L1RT: 651 case DN_RT_PKT_L2RT: 652 return NF_HOOK(PF_DECnet, NF_DN_ROUTE, skb, skb->dev, NULL, dn_route_discard); 653 case DN_RT_PKT_ERTH: 654 return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_neigh_router_hello); 655 656 case DN_RT_PKT_EEDH: 657 return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_neigh_endnode_hello); 658 } 659 } else { 660 if (dn->parms.state != DN_DEV_S_RU) 661 goto dump_it; 662 663 skb_pull(skb, 1); /* Pull flags */ 664 665 switch(flags & DN_RT_PKT_MSK) { 666 case DN_RT_PKT_LONG: 667 return dn_route_rx_long(skb); 668 case DN_RT_PKT_SHORT: 669 return dn_route_rx_short(skb); 670 } 671 } 672 673dump_it: 674 kfree_skb(skb); 675out: 676 return NET_RX_DROP; 677} 678 679static int dn_output(struct sk_buff *skb) 680{ 681 struct dst_entry *dst = skb->dst; 682 struct dn_route *rt = (struct dn_route *)dst; 683 struct net_device *dev = dst->dev; 684 struct dn_skb_cb *cb = DN_SKB_CB(skb); 685 struct neighbour *neigh; 686 687 int err = -EINVAL; 688 689 if ((neigh = dst->neighbour) == NULL) 690 goto error; 691 692 skb->dev = dev; 693 694 cb->src = rt->rt_saddr; 695 cb->dst = rt->rt_daddr; 696 697 /* 698 * Always set the Intra-Ethernet bit on all outgoing packets 699 * originated on this node. Only valid flag from upper layers 700 * is return-to-sender-requested. Set hop count to 0 too. 701 */ 702 cb->rt_flags &= ~DN_RT_F_RQR; 703 cb->rt_flags |= DN_RT_F_IE; 704 cb->hops = 0; 705 706 return NF_HOOK(PF_DECnet, NF_DN_LOCAL_OUT, skb, NULL, dev, neigh->output); 707 708error: 709 if (net_ratelimit()) 710 printk(KERN_DEBUG "dn_output: This should not happen\n"); 711 712 kfree_skb(skb); 713 714 return err; 715} 716 717static int dn_forward(struct sk_buff *skb) 718{ 719 struct dn_skb_cb *cb = DN_SKB_CB(skb); 720 struct dst_entry *dst = skb->dst; 721 struct dn_dev *dn_db = dst->dev->dn_ptr; 722 struct dn_route *rt; 723 struct neighbour *neigh = dst->neighbour; 724 int header_len; 725#ifdef CONFIG_NETFILTER 726 struct net_device *dev = skb->dev; 727#endif 728 729 if (skb->pkt_type != PACKET_HOST) 730 goto drop; 731 732 /* Ensure that we have enough space for headers */ 733 rt = (struct dn_route *)skb->dst; 734 header_len = dn_db->use_long ? 21 : 6; 735 if (skb_cow(skb, LL_RESERVED_SPACE(rt->u.dst.dev)+header_len)) 736 goto drop; 737 738 /* 739 * Hop count exceeded. 740 */ 741 if (++cb->hops > 30) 742 goto drop; 743 744 skb->dev = rt->u.dst.dev; 745 746 /* 747 * If packet goes out same interface it came in on, then set 748 * the Intra-Ethernet bit. This has no effect for short 749 * packets, so we don't need to test for them here. 750 */ 751 cb->rt_flags &= ~DN_RT_F_IE; 752 if (rt->rt_flags & RTCF_DOREDIRECT) 753 cb->rt_flags |= DN_RT_F_IE; 754 755 return NF_HOOK(PF_DECnet, NF_DN_FORWARD, skb, dev, skb->dev, neigh->output); 756 757drop: 758 kfree_skb(skb); 759 return NET_RX_DROP; 760} 761 762/* 763 * Drop packet. This is used for endnodes and for 764 * when we should not be forwarding packets from 765 * this dest. 766 */ 767static int dn_blackhole(struct sk_buff *skb) 768{ 769 kfree_skb(skb); 770 return NET_RX_DROP; 771} 772 773/* 774 * Used to catch bugs. This should never normally get 775 * called. 776 */ 777static int dn_rt_bug(struct sk_buff *skb) 778{ 779 if (net_ratelimit()) { 780 struct dn_skb_cb *cb = DN_SKB_CB(skb); 781 782 printk(KERN_DEBUG "dn_rt_bug: skb from:%04x to:%04x\n", 783 dn_ntohs(cb->src), dn_ntohs(cb->dst)); 784 } 785 786 kfree_skb(skb); 787 788 return NET_RX_BAD; 789} 790 791static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res) 792{ 793 struct dn_fib_info *fi = res->fi; 794 struct net_device *dev = rt->u.dst.dev; 795 struct neighbour *n; 796 unsigned mss; 797 798 if (fi) { 799 if (DN_FIB_RES_GW(*res) && 800 DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) 801 rt->rt_gateway = DN_FIB_RES_GW(*res); 802 memcpy(rt->u.dst.metrics, fi->fib_metrics, 803 sizeof(rt->u.dst.metrics)); 804 } 805 rt->rt_type = res->type; 806 807 if (dev != NULL && rt->u.dst.neighbour == NULL) { 808 n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev); 809 if (IS_ERR(n)) 810 return PTR_ERR(n); 811 rt->u.dst.neighbour = n; 812 } 813 814 if (rt->u.dst.metrics[RTAX_MTU-1] == 0 || 815 rt->u.dst.metrics[RTAX_MTU-1] > rt->u.dst.dev->mtu) 816 rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu; 817 mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->u.dst)); 818 if (rt->u.dst.metrics[RTAX_ADVMSS-1] == 0 || 819 rt->u.dst.metrics[RTAX_ADVMSS-1] > mss) 820 rt->u.dst.metrics[RTAX_ADVMSS-1] = mss; 821 return 0; 822} 823 824static inline int dn_match_addr(__le16 addr1, __le16 addr2) 825{ 826 __u16 tmp = dn_ntohs(addr1) ^ dn_ntohs(addr2); 827 int match = 16; 828 while(tmp) { 829 tmp >>= 1; 830 match--; 831 } 832 return match; 833} 834 835static __le16 dnet_select_source(const struct net_device *dev, __le16 daddr, int scope) 836{ 837 __le16 saddr = 0; 838 struct dn_dev *dn_db = dev->dn_ptr; 839 struct dn_ifaddr *ifa; 840 int best_match = 0; 841 int ret; 842 843 read_lock(&dev_base_lock); 844 for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next) { 845 if (ifa->ifa_scope > scope) 846 continue; 847 if (!daddr) { 848 saddr = ifa->ifa_local; 849 break; 850 } 851 ret = dn_match_addr(daddr, ifa->ifa_local); 852 if (ret > best_match) 853 saddr = ifa->ifa_local; 854 if (best_match == 0) 855 saddr = ifa->ifa_local; 856 } 857 read_unlock(&dev_base_lock); 858 859 return saddr; 860} 861 862static inline __le16 __dn_fib_res_prefsrc(struct dn_fib_res *res) 863{ 864 return dnet_select_source(DN_FIB_RES_DEV(*res), DN_FIB_RES_GW(*res), res->scope); 865} 866 867static inline __le16 dn_fib_rules_map_destination(__le16 daddr, struct dn_fib_res *res) 868{ 869 __le16 mask = dnet_make_mask(res->prefixlen); 870 return (daddr&~mask)|res->fi->fib_nh->nh_gw; 871} 872 873static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *oldflp, int try_hard) 874{ 875 struct flowi fl = { .nl_u = { .dn_u = 876 { .daddr = oldflp->fld_dst, 877 .saddr = oldflp->fld_src, 878 .scope = RT_SCOPE_UNIVERSE, 879#ifdef CONFIG_DECNET_ROUTE_FWMARK 880 .fwmark = oldflp->fld_fwmark 881#endif 882 } }, 883 .iif = loopback_dev.ifindex, 884 .oif = oldflp->oif }; 885 struct dn_route *rt = NULL; 886 struct net_device *dev_out = NULL; 887 struct neighbour *neigh = NULL; 888 unsigned hash; 889 unsigned flags = 0; 890 struct dn_fib_res res = { .fi = NULL, .type = RTN_UNICAST }; 891 int err; 892 int free_res = 0; 893 __le16 gateway = 0; 894 895 if (decnet_debug_level & 16) 896 printk(KERN_DEBUG 897 "dn_route_output_slow: dst=%04x src=%04x mark=%d" 898 " iif=%d oif=%d\n", dn_ntohs(oldflp->fld_dst), 899 dn_ntohs(oldflp->fld_src), 900 oldflp->fld_fwmark, loopback_dev.ifindex, oldflp->oif); 901 902 /* If we have an output interface, verify its a DECnet device */ 903 if (oldflp->oif) { 904 dev_out = dev_get_by_index(oldflp->oif); 905 err = -ENODEV; 906 if (dev_out && dev_out->dn_ptr == NULL) { 907 dev_put(dev_out); 908 dev_out = NULL; 909 } 910 if (dev_out == NULL) 911 goto out; 912 } 913 914 /* If we have a source address, verify that its a local address */ 915 if (oldflp->fld_src) { 916 err = -EADDRNOTAVAIL; 917 918 if (dev_out) { 919 if (dn_dev_islocal(dev_out, oldflp->fld_src)) 920 goto source_ok; 921 dev_put(dev_out); 922 goto out; 923 } 924 read_lock(&dev_base_lock); 925 for(dev_out = dev_base; dev_out; dev_out = dev_out->next) { 926 if (!dev_out->dn_ptr) 927 continue; 928 if (!dn_dev_islocal(dev_out, oldflp->fld_src)) 929 continue; 930 if ((dev_out->flags & IFF_LOOPBACK) && 931 oldflp->fld_dst && 932 !dn_dev_islocal(dev_out, oldflp->fld_dst)) 933 continue; 934 break; 935 } 936 read_unlock(&dev_base_lock); 937 if (dev_out == NULL) 938 goto out; 939 dev_hold(dev_out); 940source_ok: 941 ; 942 } 943 944 /* No destination? Assume its local */ 945 if (!fl.fld_dst) { 946 fl.fld_dst = fl.fld_src; 947 948 err = -EADDRNOTAVAIL; 949 if (dev_out) 950 dev_put(dev_out); 951 dev_out = &loopback_dev; 952 dev_hold(dev_out); 953 if (!fl.fld_dst) { 954 fl.fld_dst = 955 fl.fld_src = dnet_select_source(dev_out, 0, 956 RT_SCOPE_HOST); 957 if (!fl.fld_dst) 958 goto out; 959 } 960 fl.oif = loopback_dev.ifindex; 961 res.type = RTN_LOCAL; 962 goto make_route; 963 } 964 965 if (decnet_debug_level & 16) 966 printk(KERN_DEBUG 967 "dn_route_output_slow: initial checks complete." 968 " dst=%o4x src=%04x oif=%d try_hard=%d\n", 969 dn_ntohs(fl.fld_dst), dn_ntohs(fl.fld_src), 970 fl.oif, try_hard); 971 972 /* 973 * N.B. If the kernel is compiled without router support then 974 * dn_fib_lookup() will evaluate to non-zero so this if () block 975 * will always be executed. 976 */ 977 err = -ESRCH; 978 if (try_hard || (err = dn_fib_lookup(&fl, &res)) != 0) { 979 struct dn_dev *dn_db; 980 if (err != -ESRCH) 981 goto out; 982 /* 983 * Here the fallback is basically the standard algorithm for 984 * routing in endnodes which is described in the DECnet routing 985 * docs 986 * 987 * If we are not trying hard, look in neighbour cache. 988 * The result is tested to ensure that if a specific output 989 * device/source address was requested, then we honour that 990 * here 991 */ 992 if (!try_hard) { 993 neigh = neigh_lookup_nodev(&dn_neigh_table, &fl.fld_dst); 994 if (neigh) { 995 if ((oldflp->oif && 996 (neigh->dev->ifindex != oldflp->oif)) || 997 (oldflp->fld_src && 998 (!dn_dev_islocal(neigh->dev, 999 oldflp->fld_src)))) { 1000 neigh_release(neigh); 1001 neigh = NULL; 1002 } else { 1003 if (dev_out) 1004 dev_put(dev_out); 1005 if (dn_dev_islocal(neigh->dev, fl.fld_dst)) { 1006 dev_out = &loopback_dev; 1007 res.type = RTN_LOCAL; 1008 } else { 1009 dev_out = neigh->dev; 1010 } 1011 dev_hold(dev_out); 1012 goto select_source; 1013 } 1014 } 1015 } 1016 1017 /* Not there? Perhaps its a local address */ 1018 if (dev_out == NULL) 1019 dev_out = dn_dev_get_default(); 1020 err = -ENODEV; 1021 if (dev_out == NULL) 1022 goto out; 1023 dn_db = dev_out->dn_ptr; 1024 /* Possible improvement - check all devices for local addr */ 1025 if (dn_dev_islocal(dev_out, fl.fld_dst)) { 1026 dev_put(dev_out); 1027 dev_out = &loopback_dev; 1028 dev_hold(dev_out); 1029 res.type = RTN_LOCAL; 1030 goto select_source; 1031 } 1032 /* Not local either.... try sending it to the default router */ 1033 neigh = neigh_clone(dn_db->router); 1034 BUG_ON(neigh && neigh->dev != dev_out); 1035 1036 /* Ok then, we assume its directly connected and move on */ 1037select_source: 1038 if (neigh) 1039 gateway = ((struct dn_neigh *)neigh)->addr; 1040 if (gateway == 0) 1041 gateway = fl.fld_dst; 1042 if (fl.fld_src == 0) { 1043 fl.fld_src = dnet_select_source(dev_out, gateway, 1044 res.type == RTN_LOCAL ? 1045 RT_SCOPE_HOST : 1046 RT_SCOPE_LINK); 1047 if (fl.fld_src == 0 && res.type != RTN_LOCAL) 1048 goto e_addr; 1049 } 1050 fl.oif = dev_out->ifindex; 1051 goto make_route; 1052 } 1053 free_res = 1; 1054 1055 if (res.type == RTN_NAT) 1056 goto e_inval; 1057 1058 if (res.type == RTN_LOCAL) { 1059 if (!fl.fld_src) 1060 fl.fld_src = fl.fld_dst; 1061 if (dev_out) 1062 dev_put(dev_out); 1063 dev_out = &loopback_dev; 1064 dev_hold(dev_out); 1065 fl.oif = dev_out->ifindex; 1066 if (res.fi) 1067 dn_fib_info_put(res.fi); 1068 res.fi = NULL; 1069 goto make_route; 1070 } 1071 1072 if (res.fi->fib_nhs > 1 && fl.oif == 0) 1073 dn_fib_select_multipath(&fl, &res); 1074 1075 /* 1076 * We could add some logic to deal with default routes here and 1077 * get rid of some of the special casing above. 1078 */ 1079 1080 if (!fl.fld_src) 1081 fl.fld_src = DN_FIB_RES_PREFSRC(res); 1082 1083 if (dev_out) 1084 dev_put(dev_out); 1085 dev_out = DN_FIB_RES_DEV(res); 1086 dev_hold(dev_out); 1087 fl.oif = dev_out->ifindex; 1088 gateway = DN_FIB_RES_GW(res); 1089 1090make_route: 1091 if (dev_out->flags & IFF_LOOPBACK) 1092 flags |= RTCF_LOCAL; 1093 1094 rt = dst_alloc(&dn_dst_ops); 1095 if (rt == NULL) 1096 goto e_nobufs; 1097 1098 atomic_set(&rt->u.dst.__refcnt, 1); 1099 rt->u.dst.flags = DST_HOST; 1100 1101 rt->fl.fld_src = oldflp->fld_src; 1102 rt->fl.fld_dst = oldflp->fld_dst; 1103 rt->fl.oif = oldflp->oif; 1104 rt->fl.iif = 0; 1105#ifdef CONFIG_DECNET_ROUTE_FWMARK 1106 rt->fl.fld_fwmark = oldflp->fld_fwmark; 1107#endif 1108 1109 rt->rt_saddr = fl.fld_src; 1110 rt->rt_daddr = fl.fld_dst; 1111 rt->rt_gateway = gateway ? gateway : fl.fld_dst; 1112 rt->rt_local_src = fl.fld_src; 1113 1114 rt->rt_dst_map = fl.fld_dst; 1115 rt->rt_src_map = fl.fld_src; 1116 1117 rt->u.dst.dev = dev_out; 1118 dev_hold(dev_out); 1119 rt->u.dst.neighbour = neigh; 1120 neigh = NULL; 1121 1122 rt->u.dst.lastuse = jiffies; 1123 rt->u.dst.output = dn_output; 1124 rt->u.dst.input = dn_rt_bug; 1125 rt->rt_flags = flags; 1126 if (flags & RTCF_LOCAL) 1127 rt->u.dst.input = dn_nsp_rx; 1128 1129 err = dn_rt_set_next_hop(rt, &res); 1130 if (err) 1131 goto e_neighbour; 1132 1133 hash = dn_hash(rt->fl.fld_src, rt->fl.fld_dst); 1134 dn_insert_route(rt, hash, (struct dn_route **)pprt); 1135 1136done: 1137 if (neigh) 1138 neigh_release(neigh); 1139 if (free_res) 1140 dn_fib_res_put(&res); 1141 if (dev_out) 1142 dev_put(dev_out); 1143out: 1144 return err; 1145 1146e_addr: 1147 err = -EADDRNOTAVAIL; 1148 goto done; 1149e_inval: 1150 err = -EINVAL; 1151 goto done; 1152e_nobufs: 1153 err = -ENOBUFS; 1154 goto done; 1155e_neighbour: 1156 dst_free(&rt->u.dst); 1157 goto e_nobufs; 1158} 1159 1160 1161/* 1162 * N.B. The flags may be moved into the flowi at some future stage. 1163 */ 1164static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *flp, int flags) 1165{ 1166 unsigned hash = dn_hash(flp->fld_src, flp->fld_dst); 1167 struct dn_route *rt = NULL; 1168 1169 if (!(flags & MSG_TRYHARD)) { 1170 rcu_read_lock_bh(); 1171 for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt; 1172 rt = rcu_dereference(rt->u.rt_next)) { 1173 if ((flp->fld_dst == rt->fl.fld_dst) && 1174 (flp->fld_src == rt->fl.fld_src) && 1175#ifdef CONFIG_DECNET_ROUTE_FWMARK 1176 (flp->fld_fwmark == rt->fl.fld_fwmark) && 1177#endif 1178 (rt->fl.iif == 0) && 1179 (rt->fl.oif == flp->oif)) { 1180 rt->u.dst.lastuse = jiffies; 1181 dst_hold(&rt->u.dst); 1182 rt->u.dst.__use++; 1183 rcu_read_unlock_bh(); 1184 *pprt = &rt->u.dst; 1185 return 0; 1186 } 1187 } 1188 rcu_read_unlock_bh(); 1189 } 1190 1191 return dn_route_output_slow(pprt, flp, flags); 1192} 1193 1194static int dn_route_output_key(struct dst_entry **pprt, struct flowi *flp, int flags) 1195{ 1196 int err; 1197 1198 err = __dn_route_output_key(pprt, flp, flags); 1199 if (err == 0 && flp->proto) { 1200 err = xfrm_lookup(pprt, flp, NULL, 0); 1201 } 1202 return err; 1203} 1204 1205int dn_route_output_sock(struct dst_entry **pprt, struct flowi *fl, struct sock *sk, int flags) 1206{ 1207 int err; 1208 1209 err = __dn_route_output_key(pprt, fl, flags & MSG_TRYHARD); 1210 if (err == 0 && fl->proto) { 1211 err = xfrm_lookup(pprt, fl, sk, !(flags & MSG_DONTWAIT)); 1212 } 1213 return err; 1214} 1215 1216static int dn_route_input_slow(struct sk_buff *skb) 1217{ 1218 struct dn_route *rt = NULL; 1219 struct dn_skb_cb *cb = DN_SKB_CB(skb); 1220 struct net_device *in_dev = skb->dev; 1221 struct net_device *out_dev = NULL; 1222 struct dn_dev *dn_db; 1223 struct neighbour *neigh = NULL; 1224 unsigned hash; 1225 int flags = 0; 1226 __le16 gateway = 0; 1227 __le16 local_src = 0; 1228 struct flowi fl = { .nl_u = { .dn_u = 1229 { .daddr = cb->dst, 1230 .saddr = cb->src, 1231 .scope = RT_SCOPE_UNIVERSE, 1232#ifdef CONFIG_DECNET_ROUTE_FWMARK 1233 .fwmark = skb->nfmark 1234#endif 1235 } }, 1236 .iif = skb->dev->ifindex }; 1237 struct dn_fib_res res = { .fi = NULL, .type = RTN_UNREACHABLE }; 1238 int err = -EINVAL; 1239 int free_res = 0; 1240 1241 dev_hold(in_dev); 1242 1243 if ((dn_db = in_dev->dn_ptr) == NULL) 1244 goto out; 1245 1246 /* Zero source addresses are not allowed */ 1247 if (fl.fld_src == 0) 1248 goto out; 1249 1250 /* 1251 * In this case we've just received a packet from a source 1252 * outside ourselves pretending to come from us. We don't 1253 * allow it any further to prevent routing loops, spoofing and 1254 * other nasties. Loopback packets already have the dst attached 1255 * so this only affects packets which have originated elsewhere. 1256 */ 1257 err = -ENOTUNIQ; 1258 if (dn_dev_islocal(in_dev, cb->src)) 1259 goto out; 1260 1261 err = dn_fib_lookup(&fl, &res); 1262 if (err) { 1263 if (err != -ESRCH) 1264 goto out; 1265 /* 1266 * Is the destination us ? 1267 */ 1268 if (!dn_dev_islocal(in_dev, cb->dst)) 1269 goto e_inval; 1270 1271 res.type = RTN_LOCAL; 1272 flags |= RTCF_DIRECTSRC; 1273 } else { 1274 __le16 src_map = fl.fld_src; 1275 free_res = 1; 1276 1277 out_dev = DN_FIB_RES_DEV(res); 1278 if (out_dev == NULL) { 1279 if (net_ratelimit()) 1280 printk(KERN_CRIT "Bug in dn_route_input_slow() " 1281 "No output device\n"); 1282 goto e_inval; 1283 } 1284 dev_hold(out_dev); 1285 1286 if (res.r) 1287 src_map = dn_fib_rules_policy(fl.fld_src, &res, &flags); 1288 1289 gateway = DN_FIB_RES_GW(res); 1290 if (res.type == RTN_NAT) { 1291 fl.fld_dst = dn_fib_rules_map_destination(fl.fld_dst, &res); 1292 dn_fib_res_put(&res); 1293 free_res = 0; 1294 if (dn_fib_lookup(&fl, &res)) 1295 goto e_inval; 1296 free_res = 1; 1297 if (res.type != RTN_UNICAST) 1298 goto e_inval; 1299 flags |= RTCF_DNAT; 1300 gateway = fl.fld_dst; 1301 } 1302 fl.fld_src = src_map; 1303 } 1304 1305 switch(res.type) { 1306 case RTN_UNICAST: 1307 /* 1308 * Forwarding check here, we only check for forwarding 1309 * being turned off, if you want to only forward intra 1310 * area, its up to you to set the routing tables up 1311 * correctly. 1312 */ 1313 if (dn_db->parms.forwarding == 0) 1314 goto e_inval; 1315 1316 if (res.fi->fib_nhs > 1 && fl.oif == 0) 1317 dn_fib_select_multipath(&fl, &res); 1318 1319 /* 1320 * Check for out_dev == in_dev. We use the RTCF_DOREDIRECT 1321 * flag as a hint to set the intra-ethernet bit when 1322 * forwarding. If we've got NAT in operation, we don't do 1323 * this optimisation. 1324 */ 1325 if (out_dev == in_dev && !(flags & RTCF_NAT)) 1326 flags |= RTCF_DOREDIRECT; 1327 1328 local_src = DN_FIB_RES_PREFSRC(res); 1329 1330 case RTN_BLACKHOLE: 1331 case RTN_UNREACHABLE: 1332 break; 1333 case RTN_LOCAL: 1334 flags |= RTCF_LOCAL; 1335 fl.fld_src = cb->dst; 1336 fl.fld_dst = cb->src; 1337 1338 /* Routing tables gave us a gateway */ 1339 if (gateway) 1340 goto make_route; 1341 1342 /* Packet was intra-ethernet, so we know its on-link */ 1343 if (cb->rt_flags | DN_RT_F_IE) { 1344 gateway = cb->src; 1345 flags |= RTCF_DIRECTSRC; 1346 goto make_route; 1347 } 1348 1349 /* Use the default router if there is one */ 1350 neigh = neigh_clone(dn_db->router); 1351 if (neigh) { 1352 gateway = ((struct dn_neigh *)neigh)->addr; 1353 goto make_route; 1354 } 1355 1356 /* Close eyes and pray */ 1357 gateway = cb->src; 1358 flags |= RTCF_DIRECTSRC; 1359 goto make_route; 1360 default: 1361 goto e_inval; 1362 } 1363 1364make_route: 1365 rt = dst_alloc(&dn_dst_ops); 1366 if (rt == NULL) 1367 goto e_nobufs; 1368 1369 rt->rt_saddr = fl.fld_src; 1370 rt->rt_daddr = fl.fld_dst; 1371 rt->rt_gateway = fl.fld_dst; 1372 if (gateway) 1373 rt->rt_gateway = gateway; 1374 rt->rt_local_src = local_src ? local_src : rt->rt_saddr; 1375 1376 rt->rt_dst_map = fl.fld_dst; 1377 rt->rt_src_map = fl.fld_src; 1378 1379 rt->fl.fld_src = cb->src; 1380 rt->fl.fld_dst = cb->dst; 1381 rt->fl.oif = 0; 1382 rt->fl.iif = in_dev->ifindex; 1383 rt->fl.fld_fwmark = fl.fld_fwmark; 1384 1385 rt->u.dst.flags = DST_HOST; 1386 rt->u.dst.neighbour = neigh; 1387 rt->u.dst.dev = out_dev; 1388 rt->u.dst.lastuse = jiffies; 1389 rt->u.dst.output = dn_rt_bug; 1390 switch(res.type) { 1391 case RTN_UNICAST: 1392 rt->u.dst.input = dn_forward; 1393 break; 1394 case RTN_LOCAL: 1395 rt->u.dst.output = dn_output; 1396 rt->u.dst.input = dn_nsp_rx; 1397 rt->u.dst.dev = in_dev; 1398 flags |= RTCF_LOCAL; 1399 break; 1400 default: 1401 case RTN_UNREACHABLE: 1402 case RTN_BLACKHOLE: 1403 rt->u.dst.input = dn_blackhole; 1404 } 1405 rt->rt_flags = flags; 1406 if (rt->u.dst.dev) 1407 dev_hold(rt->u.dst.dev); 1408 1409 err = dn_rt_set_next_hop(rt, &res); 1410 if (err) 1411 goto e_neighbour; 1412 1413 hash = dn_hash(rt->fl.fld_src, rt->fl.fld_dst); 1414 dn_insert_route(rt, hash, (struct dn_route **)&skb->dst); 1415 1416done: 1417 if (neigh) 1418 neigh_release(neigh); 1419 if (free_res) 1420 dn_fib_res_put(&res); 1421 dev_put(in_dev); 1422 if (out_dev) 1423 dev_put(out_dev); 1424out: 1425 return err; 1426 1427e_inval: 1428 err = -EINVAL; 1429 goto done; 1430 1431e_nobufs: 1432 err = -ENOBUFS; 1433 goto done; 1434 1435e_neighbour: 1436 dst_free(&rt->u.dst); 1437 goto done; 1438} 1439 1440int dn_route_input(struct sk_buff *skb) 1441{ 1442 struct dn_route *rt; 1443 struct dn_skb_cb *cb = DN_SKB_CB(skb); 1444 unsigned hash = dn_hash(cb->src, cb->dst); 1445 1446 if (skb->dst) 1447 return 0; 1448 1449 rcu_read_lock(); 1450 for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL; 1451 rt = rcu_dereference(rt->u.rt_next)) { 1452 if ((rt->fl.fld_src == cb->src) && 1453 (rt->fl.fld_dst == cb->dst) && 1454 (rt->fl.oif == 0) && 1455#ifdef CONFIG_DECNET_ROUTE_FWMARK 1456 (rt->fl.fld_fwmark == skb->nfmark) && 1457#endif 1458 (rt->fl.iif == cb->iif)) { 1459 rt->u.dst.lastuse = jiffies; 1460 dst_hold(&rt->u.dst); 1461 rt->u.dst.__use++; 1462 rcu_read_unlock(); 1463 skb->dst = (struct dst_entry *)rt; 1464 return 0; 1465 } 1466 } 1467 rcu_read_unlock(); 1468 1469 return dn_route_input_slow(skb); 1470} 1471 1472static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, 1473 int event, int nowait, unsigned int flags) 1474{ 1475 struct dn_route *rt = (struct dn_route *)skb->dst; 1476 struct rtmsg *r; 1477 struct nlmsghdr *nlh; 1478 unsigned char *b = skb->tail; 1479 struct rta_cacheinfo ci; 1480 1481 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags); 1482 r = NLMSG_DATA(nlh); 1483 r->rtm_family = AF_DECnet; 1484 r->rtm_dst_len = 16; 1485 r->rtm_src_len = 0; 1486 r->rtm_tos = 0; 1487 r->rtm_table = RT_TABLE_MAIN; 1488 r->rtm_type = rt->rt_type; 1489 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED; 1490 r->rtm_scope = RT_SCOPE_UNIVERSE; 1491 r->rtm_protocol = RTPROT_UNSPEC; 1492 if (rt->rt_flags & RTCF_NOTIFY) 1493 r->rtm_flags |= RTM_F_NOTIFY; 1494 RTA_PUT(skb, RTA_DST, 2, &rt->rt_daddr); 1495 if (rt->fl.fld_src) { 1496 r->rtm_src_len = 16; 1497 RTA_PUT(skb, RTA_SRC, 2, &rt->fl.fld_src); 1498 } 1499 if (rt->u.dst.dev) 1500 RTA_PUT(skb, RTA_OIF, sizeof(int), &rt->u.dst.dev->ifindex); 1501 /* 1502 * Note to self - change this if input routes reverse direction when 1503 * they deal only with inputs and not with replies like they do 1504 * currently. 1505 */ 1506 RTA_PUT(skb, RTA_PREFSRC, 2, &rt->rt_local_src); 1507 if (rt->rt_daddr != rt->rt_gateway) 1508 RTA_PUT(skb, RTA_GATEWAY, 2, &rt->rt_gateway); 1509 if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0) 1510 goto rtattr_failure; 1511 ci.rta_lastuse = jiffies_to_clock_t(jiffies - rt->u.dst.lastuse); 1512 ci.rta_used = rt->u.dst.__use; 1513 ci.rta_clntref = atomic_read(&rt->u.dst.__refcnt); 1514 if (rt->u.dst.expires) 1515 ci.rta_expires = jiffies_to_clock_t(rt->u.dst.expires - jiffies); 1516 else 1517 ci.rta_expires = 0; 1518 ci.rta_error = rt->u.dst.error; 1519 ci.rta_id = ci.rta_ts = ci.rta_tsage = 0; 1520 RTA_PUT(skb, RTA_CACHEINFO, sizeof(ci), &ci); 1521 if (rt->fl.iif) 1522 RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif); 1523 1524 nlh->nlmsg_len = skb->tail - b; 1525 return skb->len; 1526 1527nlmsg_failure: 1528rtattr_failure: 1529 skb_trim(skb, b - skb->data); 1530 return -1; 1531} 1532 1533/* 1534 * This is called by both endnodes and routers now. 1535 */ 1536int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg) 1537{ 1538 struct rtattr **rta = arg; 1539 struct rtmsg *rtm = NLMSG_DATA(nlh); 1540 struct dn_route *rt = NULL; 1541 struct dn_skb_cb *cb; 1542 int err; 1543 struct sk_buff *skb; 1544 struct flowi fl; 1545 1546 memset(&fl, 0, sizeof(fl)); 1547 fl.proto = DNPROTO_NSP; 1548 1549 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1550 if (skb == NULL) 1551 return -ENOBUFS; 1552 skb->mac.raw = skb->data; 1553 cb = DN_SKB_CB(skb); 1554 1555 if (rta[RTA_SRC-1]) 1556 memcpy(&fl.fld_src, RTA_DATA(rta[RTA_SRC-1]), 2); 1557 if (rta[RTA_DST-1]) 1558 memcpy(&fl.fld_dst, RTA_DATA(rta[RTA_DST-1]), 2); 1559 if (rta[RTA_IIF-1]) 1560 memcpy(&fl.iif, RTA_DATA(rta[RTA_IIF-1]), sizeof(int)); 1561 1562 if (fl.iif) { 1563 struct net_device *dev; 1564 if ((dev = dev_get_by_index(fl.iif)) == NULL) { 1565 kfree_skb(skb); 1566 return -ENODEV; 1567 } 1568 if (!dev->dn_ptr) { 1569 dev_put(dev); 1570 kfree_skb(skb); 1571 return -ENODEV; 1572 } 1573 skb->protocol = __constant_htons(ETH_P_DNA_RT); 1574 skb->dev = dev; 1575 cb->src = fl.fld_src; 1576 cb->dst = fl.fld_dst; 1577 local_bh_disable(); 1578 err = dn_route_input(skb); 1579 local_bh_enable(); 1580 memset(cb, 0, sizeof(struct dn_skb_cb)); 1581 rt = (struct dn_route *)skb->dst; 1582 if (!err && -rt->u.dst.error) 1583 err = rt->u.dst.error; 1584 } else { 1585 int oif = 0; 1586 if (rta[RTA_OIF - 1]) 1587 memcpy(&oif, RTA_DATA(rta[RTA_OIF - 1]), sizeof(int)); 1588 fl.oif = oif; 1589 err = dn_route_output_key((struct dst_entry **)&rt, &fl, 0); 1590 } 1591 1592 if (skb->dev) 1593 dev_put(skb->dev); 1594 skb->dev = NULL; 1595 if (err) 1596 goto out_free; 1597 skb->dst = &rt->u.dst; 1598 if (rtm->rtm_flags & RTM_F_NOTIFY) 1599 rt->rt_flags |= RTCF_NOTIFY; 1600 1601 NETLINK_CB(skb).dst_pid = NETLINK_CB(in_skb).pid; 1602 1603 err = dn_rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, RTM_NEWROUTE, 0, 0); 1604 1605 if (err == 0) 1606 goto out_free; 1607 if (err < 0) { 1608 err = -EMSGSIZE; 1609 goto out_free; 1610 } 1611 1612 err = netlink_unicast(rtnl, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT); 1613 1614 return err; 1615 1616out_free: 1617 kfree_skb(skb); 1618 return err; 1619} 1620 1621/* 1622 * For routers, this is called from dn_fib_dump, but for endnodes its 1623 * called directly from the rtnetlink dispatch table. 1624 */ 1625int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb) 1626{ 1627 struct dn_route *rt; 1628 int h, s_h; 1629 int idx, s_idx; 1630 1631 if (NLMSG_PAYLOAD(cb->nlh, 0) < sizeof(struct rtmsg)) 1632 return -EINVAL; 1633 if (!(((struct rtmsg *)NLMSG_DATA(cb->nlh))->rtm_flags&RTM_F_CLONED)) 1634 return 0; 1635 1636 s_h = cb->args[0]; 1637 s_idx = idx = cb->args[1]; 1638 for(h = 0; h <= dn_rt_hash_mask; h++) { 1639 if (h < s_h) 1640 continue; 1641 if (h > s_h) 1642 s_idx = 0; 1643 rcu_read_lock_bh(); 1644 for(rt = rcu_dereference(dn_rt_hash_table[h].chain), idx = 0; 1645 rt; 1646 rt = rcu_dereference(rt->u.rt_next), idx++) { 1647 if (idx < s_idx) 1648 continue; 1649 skb->dst = dst_clone(&rt->u.dst); 1650 if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).pid, 1651 cb->nlh->nlmsg_seq, RTM_NEWROUTE, 1652 1, NLM_F_MULTI) <= 0) { 1653 dst_release(xchg(&skb->dst, NULL)); 1654 rcu_read_unlock_bh(); 1655 goto done; 1656 } 1657 dst_release(xchg(&skb->dst, NULL)); 1658 } 1659 rcu_read_unlock_bh(); 1660 } 1661 1662done: 1663 cb->args[0] = h; 1664 cb->args[1] = idx; 1665 return skb->len; 1666} 1667 1668#ifdef CONFIG_PROC_FS 1669struct dn_rt_cache_iter_state { 1670 int bucket; 1671}; 1672 1673static struct dn_route *dn_rt_cache_get_first(struct seq_file *seq) 1674{ 1675 struct dn_route *rt = NULL; 1676 struct dn_rt_cache_iter_state *s = seq->private; 1677 1678 for(s->bucket = dn_rt_hash_mask; s->bucket >= 0; --s->bucket) { 1679 rcu_read_lock_bh(); 1680 rt = dn_rt_hash_table[s->bucket].chain; 1681 if (rt) 1682 break; 1683 rcu_read_unlock_bh(); 1684 } 1685 return rt; 1686} 1687 1688static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_route *rt) 1689{ 1690 struct dn_rt_cache_iter_state *s = rcu_dereference(seq->private); 1691 1692 rt = rt->u.rt_next; 1693 while(!rt) { 1694 rcu_read_unlock_bh(); 1695 if (--s->bucket < 0) 1696 break; 1697 rcu_read_lock_bh(); 1698 rt = dn_rt_hash_table[s->bucket].chain; 1699 } 1700 return rt; 1701} 1702 1703static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos) 1704{ 1705 struct dn_route *rt = dn_rt_cache_get_first(seq); 1706 1707 if (rt) { 1708 while(*pos && (rt = dn_rt_cache_get_next(seq, rt))) 1709 --*pos; 1710 } 1711 return *pos ? NULL : rt; 1712} 1713 1714static void *dn_rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1715{ 1716 struct dn_route *rt = dn_rt_cache_get_next(seq, v); 1717 ++*pos; 1718 return rt; 1719} 1720 1721static void dn_rt_cache_seq_stop(struct seq_file *seq, void *v) 1722{ 1723 if (v) 1724 rcu_read_unlock_bh(); 1725} 1726 1727static int dn_rt_cache_seq_show(struct seq_file *seq, void *v) 1728{ 1729 struct dn_route *rt = v; 1730 char buf1[DN_ASCBUF_LEN], buf2[DN_ASCBUF_LEN]; 1731 1732 seq_printf(seq, "%-8s %-7s %-7s %04d %04d %04d\n", 1733 rt->u.dst.dev ? rt->u.dst.dev->name : "*", 1734 dn_addr2asc(dn_ntohs(rt->rt_daddr), buf1), 1735 dn_addr2asc(dn_ntohs(rt->rt_saddr), buf2), 1736 atomic_read(&rt->u.dst.__refcnt), 1737 rt->u.dst.__use, 1738 (int) dst_metric(&rt->u.dst, RTAX_RTT)); 1739 return 0; 1740} 1741 1742static struct seq_operations dn_rt_cache_seq_ops = { 1743 .start = dn_rt_cache_seq_start, 1744 .next = dn_rt_cache_seq_next, 1745 .stop = dn_rt_cache_seq_stop, 1746 .show = dn_rt_cache_seq_show, 1747}; 1748 1749static int dn_rt_cache_seq_open(struct inode *inode, struct file *file) 1750{ 1751 struct seq_file *seq; 1752 int rc = -ENOMEM; 1753 struct dn_rt_cache_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 1754 1755 if (!s) 1756 goto out; 1757 rc = seq_open(file, &dn_rt_cache_seq_ops); 1758 if (rc) 1759 goto out_kfree; 1760 seq = file->private_data; 1761 seq->private = s; 1762 memset(s, 0, sizeof(*s)); 1763out: 1764 return rc; 1765out_kfree: 1766 kfree(s); 1767 goto out; 1768} 1769 1770static struct file_operations dn_rt_cache_seq_fops = { 1771 .owner = THIS_MODULE, 1772 .open = dn_rt_cache_seq_open, 1773 .read = seq_read, 1774 .llseek = seq_lseek, 1775 .release = seq_release_private, 1776}; 1777 1778#endif /* CONFIG_PROC_FS */ 1779 1780void __init dn_route_init(void) 1781{ 1782 int i, goal, order; 1783 1784 dn_dst_ops.kmem_cachep = kmem_cache_create("dn_dst_cache", 1785 sizeof(struct dn_route), 1786 0, SLAB_HWCACHE_ALIGN, 1787 NULL, NULL); 1788 1789 if (!dn_dst_ops.kmem_cachep) 1790 panic("DECnet: Failed to allocate dn_dst_cache\n"); 1791 1792 init_timer(&dn_route_timer); 1793 dn_route_timer.function = dn_dst_check_expire; 1794 dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ; 1795 add_timer(&dn_route_timer); 1796 1797 goal = num_physpages >> (26 - PAGE_SHIFT); 1798 1799 for(order = 0; (1UL << order) < goal; order++) 1800 /* NOTHING */; 1801 1802 /* 1803 * Only want 1024 entries max, since the table is very, very unlikely 1804 * to be larger than that. 1805 */ 1806 while(order && ((((1UL << order) * PAGE_SIZE) / 1807 sizeof(struct dn_rt_hash_bucket)) >= 2048)) 1808 order--; 1809 1810 do { 1811 dn_rt_hash_mask = (1UL << order) * PAGE_SIZE / 1812 sizeof(struct dn_rt_hash_bucket); 1813 while(dn_rt_hash_mask & (dn_rt_hash_mask - 1)) 1814 dn_rt_hash_mask--; 1815 dn_rt_hash_table = (struct dn_rt_hash_bucket *) 1816 __get_free_pages(GFP_ATOMIC, order); 1817 } while (dn_rt_hash_table == NULL && --order > 0); 1818 1819 if (!dn_rt_hash_table) 1820 panic("Failed to allocate DECnet route cache hash table\n"); 1821 1822 printk(KERN_INFO 1823 "DECnet: Routing cache hash table of %u buckets, %ldKbytes\n", 1824 dn_rt_hash_mask, 1825 (long)(dn_rt_hash_mask*sizeof(struct dn_rt_hash_bucket))/1024); 1826 1827 dn_rt_hash_mask--; 1828 for(i = 0; i <= dn_rt_hash_mask; i++) { 1829 spin_lock_init(&dn_rt_hash_table[i].lock); 1830 dn_rt_hash_table[i].chain = NULL; 1831 } 1832 1833 dn_dst_ops.gc_thresh = (dn_rt_hash_mask + 1); 1834 1835 proc_net_fops_create("decnet_cache", S_IRUGO, &dn_rt_cache_seq_fops); 1836} 1837 1838void __exit dn_route_cleanup(void) 1839{ 1840 del_timer(&dn_route_timer); 1841 dn_run_flush(0); 1842 1843 proc_net_remove("decnet_cache"); 1844} 1845