Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.12-rc5 1840 lines 46 kB view raw
1/* 2 * DECnet An implementation of the DECnet protocol suite for the LINUX 3 * operating system. DECnet is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * DECnet Routing Functions (Endnode and Router) 7 * 8 * Authors: Steve Whitehouse <SteveW@ACM.org> 9 * Eduardo Marcelo Serrat <emserrat@geocities.com> 10 * 11 * Changes: 12 * Steve Whitehouse : Fixes to allow "intra-ethernet" and 13 * "return-to-sender" bits on outgoing 14 * packets. 15 * Steve Whitehouse : Timeouts for cached routes. 16 * Steve Whitehouse : Use dst cache for input routes too. 17 * Steve Whitehouse : Fixed error values in dn_send_skb. 18 * Steve Whitehouse : Rework routing functions to better fit 19 * DECnet routing design 20 * Alexey Kuznetsov : New SMP locking 21 * Steve Whitehouse : More SMP locking changes & dn_cache_dump() 22 * Steve Whitehouse : Prerouting NF hook, now really is prerouting. 23 * Fixed possible skb leak in rtnetlink funcs. 24 * Steve Whitehouse : Dave Miller's dynamic hash table sizing and 25 * Alexey Kuznetsov's finer grained locking 26 * from ipv4/route.c. 27 * Steve Whitehouse : Routing is now starting to look like a 28 * sensible set of code now, mainly due to 29 * my copying the IPv4 routing code. The 30 * hooks here are modified and will continue 31 * to evolve for a while. 32 * Steve Whitehouse : Real SMP at last :-) Also new netfilter 33 * stuff. Look out raw sockets your days 34 * are numbered! 35 * Steve Whitehouse : Added return-to-sender functions. Added 36 * backlog congestion level return codes. 37 * Steve Whitehouse : Fixed bug where routes were set up with 38 * no ref count on net devices. 39 * Steve Whitehouse : RCU for the route cache 40 * Steve Whitehouse : Preparations for the flow cache 41 * Steve Whitehouse : Prepare for nonlinear skbs 42 */ 43 44/****************************************************************************** 45 (c) 1995-1998 E.M. Serrat emserrat@geocities.com 46 47 This program is free software; you can redistribute it and/or modify 48 it under the terms of the GNU General Public License as published by 49 the Free Software Foundation; either version 2 of the License, or 50 any later version. 51 52 This program is distributed in the hope that it will be useful, 53 but WITHOUT ANY WARRANTY; without even the implied warranty of 54 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 55 GNU General Public License for more details. 56*******************************************************************************/ 57 58#include <linux/config.h> 59#include <linux/errno.h> 60#include <linux/types.h> 61#include <linux/socket.h> 62#include <linux/in.h> 63#include <linux/kernel.h> 64#include <linux/sockios.h> 65#include <linux/net.h> 66#include <linux/netdevice.h> 67#include <linux/inet.h> 68#include <linux/route.h> 69#include <linux/in_route.h> 70#include <net/sock.h> 71#include <linux/mm.h> 72#include <linux/proc_fs.h> 73#include <linux/seq_file.h> 74#include <linux/init.h> 75#include <linux/rtnetlink.h> 76#include <linux/string.h> 77#include <linux/netfilter_decnet.h> 78#include <linux/rcupdate.h> 79#include <linux/times.h> 80#include <asm/errno.h> 81#include <net/neighbour.h> 82#include <net/dst.h> 83#include <net/flow.h> 84#include <net/dn.h> 85#include <net/dn_dev.h> 86#include <net/dn_nsp.h> 87#include <net/dn_route.h> 88#include <net/dn_neigh.h> 89#include <net/dn_fib.h> 90 91struct dn_rt_hash_bucket 92{ 93 struct dn_route *chain; 94 spinlock_t lock; 95} __attribute__((__aligned__(8))); 96 97extern struct neigh_table dn_neigh_table; 98 99 100static unsigned char dn_hiord_addr[6] = {0xAA,0x00,0x04,0x00,0x00,0x00}; 101 102static const int dn_rt_min_delay = 2 * HZ; 103static const int dn_rt_max_delay = 10 * HZ; 104static const int dn_rt_mtu_expires = 10 * 60 * HZ; 105 106static unsigned long dn_rt_deadline; 107 108static int dn_dst_gc(void); 109static struct dst_entry *dn_dst_check(struct dst_entry *, __u32); 110static struct dst_entry *dn_dst_negative_advice(struct dst_entry *); 111static void dn_dst_link_failure(struct sk_buff *); 112static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu); 113static int dn_route_input(struct sk_buff *); 114static void dn_run_flush(unsigned long dummy); 115 116static struct dn_rt_hash_bucket *dn_rt_hash_table; 117static unsigned dn_rt_hash_mask; 118 119static struct timer_list dn_route_timer; 120static struct timer_list dn_rt_flush_timer = 121 TIMER_INITIALIZER(dn_run_flush, 0, 0); 122int decnet_dst_gc_interval = 2; 123 124static struct dst_ops dn_dst_ops = { 125 .family = PF_DECnet, 126 .protocol = __constant_htons(ETH_P_DNA_RT), 127 .gc_thresh = 128, 128 .gc = dn_dst_gc, 129 .check = dn_dst_check, 130 .negative_advice = dn_dst_negative_advice, 131 .link_failure = dn_dst_link_failure, 132 .update_pmtu = dn_dst_update_pmtu, 133 .entry_size = sizeof(struct dn_route), 134 .entries = ATOMIC_INIT(0), 135}; 136 137static __inline__ unsigned dn_hash(unsigned short src, unsigned short dst) 138{ 139 unsigned short tmp = src ^ dst; 140 tmp ^= (tmp >> 3); 141 tmp ^= (tmp >> 5); 142 tmp ^= (tmp >> 10); 143 return dn_rt_hash_mask & (unsigned)tmp; 144} 145 146static inline void dnrt_free(struct dn_route *rt) 147{ 148 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free); 149} 150 151static inline void dnrt_drop(struct dn_route *rt) 152{ 153 if (rt) 154 dst_release(&rt->u.dst); 155 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free); 156} 157 158static void dn_dst_check_expire(unsigned long dummy) 159{ 160 int i; 161 struct dn_route *rt, **rtp; 162 unsigned long now = jiffies; 163 unsigned long expire = 120 * HZ; 164 165 for(i = 0; i <= dn_rt_hash_mask; i++) { 166 rtp = &dn_rt_hash_table[i].chain; 167 168 spin_lock(&dn_rt_hash_table[i].lock); 169 while((rt=*rtp) != NULL) { 170 if (atomic_read(&rt->u.dst.__refcnt) || 171 (now - rt->u.dst.lastuse) < expire) { 172 rtp = &rt->u.rt_next; 173 continue; 174 } 175 *rtp = rt->u.rt_next; 176 rt->u.rt_next = NULL; 177 dnrt_free(rt); 178 } 179 spin_unlock(&dn_rt_hash_table[i].lock); 180 181 if ((jiffies - now) > 0) 182 break; 183 } 184 185 mod_timer(&dn_route_timer, now + decnet_dst_gc_interval * HZ); 186} 187 188static int dn_dst_gc(void) 189{ 190 struct dn_route *rt, **rtp; 191 int i; 192 unsigned long now = jiffies; 193 unsigned long expire = 10 * HZ; 194 195 for(i = 0; i <= dn_rt_hash_mask; i++) { 196 197 spin_lock_bh(&dn_rt_hash_table[i].lock); 198 rtp = &dn_rt_hash_table[i].chain; 199 200 while((rt=*rtp) != NULL) { 201 if (atomic_read(&rt->u.dst.__refcnt) || 202 (now - rt->u.dst.lastuse) < expire) { 203 rtp = &rt->u.rt_next; 204 continue; 205 } 206 *rtp = rt->u.rt_next; 207 rt->u.rt_next = NULL; 208 dnrt_drop(rt); 209 break; 210 } 211 spin_unlock_bh(&dn_rt_hash_table[i].lock); 212 } 213 214 return 0; 215} 216 217/* 218 * The decnet standards don't impose a particular minimum mtu, what they 219 * do insist on is that the routing layer accepts a datagram of at least 220 * 230 bytes long. Here we have to subtract the routing header length from 221 * 230 to get the minimum acceptable mtu. If there is no neighbour, then we 222 * assume the worst and use a long header size. 223 * 224 * We update both the mtu and the advertised mss (i.e. the segment size we 225 * advertise to the other end). 226 */ 227static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu) 228{ 229 u32 min_mtu = 230; 230 struct dn_dev *dn = dst->neighbour ? 231 (struct dn_dev *)dst->neighbour->dev->dn_ptr : NULL; 232 233 if (dn && dn->use_long == 0) 234 min_mtu -= 6; 235 else 236 min_mtu -= 21; 237 238 if (dst->metrics[RTAX_MTU-1] > mtu && mtu >= min_mtu) { 239 if (!(dst_metric_locked(dst, RTAX_MTU))) { 240 dst->metrics[RTAX_MTU-1] = mtu; 241 dst_set_expires(dst, dn_rt_mtu_expires); 242 } 243 if (!(dst_metric_locked(dst, RTAX_ADVMSS))) { 244 u32 mss = mtu - DN_MAX_NSP_DATA_HEADER; 245 if (dst->metrics[RTAX_ADVMSS-1] > mss) 246 dst->metrics[RTAX_ADVMSS-1] = mss; 247 } 248 } 249} 250 251/* 252 * When a route has been marked obsolete. (e.g. routing cache flush) 253 */ 254static struct dst_entry *dn_dst_check(struct dst_entry *dst, __u32 cookie) 255{ 256 return NULL; 257} 258 259static struct dst_entry *dn_dst_negative_advice(struct dst_entry *dst) 260{ 261 dst_release(dst); 262 return NULL; 263} 264 265static void dn_dst_link_failure(struct sk_buff *skb) 266{ 267 return; 268} 269 270static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) 271{ 272 return memcmp(&fl1->nl_u.dn_u, &fl2->nl_u.dn_u, sizeof(fl1->nl_u.dn_u)) == 0 && 273 fl1->oif == fl2->oif && 274 fl1->iif == fl2->iif; 275} 276 277static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp) 278{ 279 struct dn_route *rth, **rthp; 280 unsigned long now = jiffies; 281 282 rthp = &dn_rt_hash_table[hash].chain; 283 284 spin_lock_bh(&dn_rt_hash_table[hash].lock); 285 while((rth = *rthp) != NULL) { 286 if (compare_keys(&rth->fl, &rt->fl)) { 287 /* Put it first */ 288 *rthp = rth->u.rt_next; 289 rcu_assign_pointer(rth->u.rt_next, 290 dn_rt_hash_table[hash].chain); 291 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth); 292 293 rth->u.dst.__use++; 294 dst_hold(&rth->u.dst); 295 rth->u.dst.lastuse = now; 296 spin_unlock_bh(&dn_rt_hash_table[hash].lock); 297 298 dnrt_drop(rt); 299 *rp = rth; 300 return 0; 301 } 302 rthp = &rth->u.rt_next; 303 } 304 305 rcu_assign_pointer(rt->u.rt_next, dn_rt_hash_table[hash].chain); 306 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt); 307 308 dst_hold(&rt->u.dst); 309 rt->u.dst.__use++; 310 rt->u.dst.lastuse = now; 311 spin_unlock_bh(&dn_rt_hash_table[hash].lock); 312 *rp = rt; 313 return 0; 314} 315 316void dn_run_flush(unsigned long dummy) 317{ 318 int i; 319 struct dn_route *rt, *next; 320 321 for(i = 0; i < dn_rt_hash_mask; i++) { 322 spin_lock_bh(&dn_rt_hash_table[i].lock); 323 324 if ((rt = xchg(&dn_rt_hash_table[i].chain, NULL)) == NULL) 325 goto nothing_to_declare; 326 327 for(; rt; rt=next) { 328 next = rt->u.rt_next; 329 rt->u.rt_next = NULL; 330 dst_free((struct dst_entry *)rt); 331 } 332 333nothing_to_declare: 334 spin_unlock_bh(&dn_rt_hash_table[i].lock); 335 } 336} 337 338static DEFINE_SPINLOCK(dn_rt_flush_lock); 339 340void dn_rt_cache_flush(int delay) 341{ 342 unsigned long now = jiffies; 343 int user_mode = !in_interrupt(); 344 345 if (delay < 0) 346 delay = dn_rt_min_delay; 347 348 spin_lock_bh(&dn_rt_flush_lock); 349 350 if (del_timer(&dn_rt_flush_timer) && delay > 0 && dn_rt_deadline) { 351 long tmo = (long)(dn_rt_deadline - now); 352 353 if (user_mode && tmo < dn_rt_max_delay - dn_rt_min_delay) 354 tmo = 0; 355 356 if (delay > tmo) 357 delay = tmo; 358 } 359 360 if (delay <= 0) { 361 spin_unlock_bh(&dn_rt_flush_lock); 362 dn_run_flush(0); 363 return; 364 } 365 366 if (dn_rt_deadline == 0) 367 dn_rt_deadline = now + dn_rt_max_delay; 368 369 dn_rt_flush_timer.expires = now + delay; 370 add_timer(&dn_rt_flush_timer); 371 spin_unlock_bh(&dn_rt_flush_lock); 372} 373 374/** 375 * dn_return_short - Return a short packet to its sender 376 * @skb: The packet to return 377 * 378 */ 379static int dn_return_short(struct sk_buff *skb) 380{ 381 struct dn_skb_cb *cb; 382 unsigned char *ptr; 383 dn_address *src; 384 dn_address *dst; 385 dn_address tmp; 386 387 /* Add back headers */ 388 skb_push(skb, skb->data - skb->nh.raw); 389 390 if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL) 391 return NET_RX_DROP; 392 393 cb = DN_SKB_CB(skb); 394 /* Skip packet length and point to flags */ 395 ptr = skb->data + 2; 396 *ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS; 397 398 dst = (dn_address *)ptr; 399 ptr += 2; 400 src = (dn_address *)ptr; 401 ptr += 2; 402 *ptr = 0; /* Zero hop count */ 403 404 /* Swap source and destination */ 405 tmp = *src; 406 *src = *dst; 407 *dst = tmp; 408 409 skb->pkt_type = PACKET_OUTGOING; 410 dn_rt_finish_output(skb, NULL, NULL); 411 return NET_RX_SUCCESS; 412} 413 414/** 415 * dn_return_long - Return a long packet to its sender 416 * @skb: The long format packet to return 417 * 418 */ 419static int dn_return_long(struct sk_buff *skb) 420{ 421 struct dn_skb_cb *cb; 422 unsigned char *ptr; 423 unsigned char *src_addr, *dst_addr; 424 unsigned char tmp[ETH_ALEN]; 425 426 /* Add back all headers */ 427 skb_push(skb, skb->data - skb->nh.raw); 428 429 if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL) 430 return NET_RX_DROP; 431 432 cb = DN_SKB_CB(skb); 433 /* Ignore packet length and point to flags */ 434 ptr = skb->data + 2; 435 436 /* Skip padding */ 437 if (*ptr & DN_RT_F_PF) { 438 char padlen = (*ptr & ~DN_RT_F_PF); 439 ptr += padlen; 440 } 441 442 *ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS; 443 ptr += 2; 444 dst_addr = ptr; 445 ptr += 8; 446 src_addr = ptr; 447 ptr += 6; 448 *ptr = 0; /* Zero hop count */ 449 450 /* Swap source and destination */ 451 memcpy(tmp, src_addr, ETH_ALEN); 452 memcpy(src_addr, dst_addr, ETH_ALEN); 453 memcpy(dst_addr, tmp, ETH_ALEN); 454 455 skb->pkt_type = PACKET_OUTGOING; 456 dn_rt_finish_output(skb, dst_addr, src_addr); 457 return NET_RX_SUCCESS; 458} 459 460/** 461 * dn_route_rx_packet - Try and find a route for an incoming packet 462 * @skb: The packet to find a route for 463 * 464 * Returns: result of input function if route is found, error code otherwise 465 */ 466static int dn_route_rx_packet(struct sk_buff *skb) 467{ 468 struct dn_skb_cb *cb = DN_SKB_CB(skb); 469 int err; 470 471 if ((err = dn_route_input(skb)) == 0) 472 return dst_input(skb); 473 474 if (decnet_debug_level & 4) { 475 char *devname = skb->dev ? skb->dev->name : "???"; 476 struct dn_skb_cb *cb = DN_SKB_CB(skb); 477 printk(KERN_DEBUG 478 "DECnet: dn_route_rx_packet: rt_flags=0x%02x dev=%s len=%d src=0x%04hx dst=0x%04hx err=%d type=%d\n", 479 (int)cb->rt_flags, devname, skb->len, cb->src, cb->dst, 480 err, skb->pkt_type); 481 } 482 483 if ((skb->pkt_type == PACKET_HOST) && (cb->rt_flags & DN_RT_F_RQR)) { 484 switch(cb->rt_flags & DN_RT_PKT_MSK) { 485 case DN_RT_PKT_SHORT: 486 return dn_return_short(skb); 487 case DN_RT_PKT_LONG: 488 return dn_return_long(skb); 489 } 490 } 491 492 kfree_skb(skb); 493 return NET_RX_DROP; 494} 495 496static int dn_route_rx_long(struct sk_buff *skb) 497{ 498 struct dn_skb_cb *cb = DN_SKB_CB(skb); 499 unsigned char *ptr = skb->data; 500 501 if (!pskb_may_pull(skb, 21)) /* 20 for long header, 1 for shortest nsp */ 502 goto drop_it; 503 504 skb_pull(skb, 20); 505 skb->h.raw = skb->data; 506 507 /* Destination info */ 508 ptr += 2; 509 cb->dst = dn_htons(dn_eth2dn(ptr)); 510 if (memcmp(ptr, dn_hiord_addr, 4) != 0) 511 goto drop_it; 512 ptr += 6; 513 514 515 /* Source info */ 516 ptr += 2; 517 cb->src = dn_htons(dn_eth2dn(ptr)); 518 if (memcmp(ptr, dn_hiord_addr, 4) != 0) 519 goto drop_it; 520 ptr += 6; 521 /* Other junk */ 522 ptr++; 523 cb->hops = *ptr++; /* Visit Count */ 524 525 return NF_HOOK(PF_DECnet, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, dn_route_rx_packet); 526 527drop_it: 528 kfree_skb(skb); 529 return NET_RX_DROP; 530} 531 532 533 534static int dn_route_rx_short(struct sk_buff *skb) 535{ 536 struct dn_skb_cb *cb = DN_SKB_CB(skb); 537 unsigned char *ptr = skb->data; 538 539 if (!pskb_may_pull(skb, 6)) /* 5 for short header + 1 for shortest nsp */ 540 goto drop_it; 541 542 skb_pull(skb, 5); 543 skb->h.raw = skb->data; 544 545 cb->dst = *(dn_address *)ptr; 546 ptr += 2; 547 cb->src = *(dn_address *)ptr; 548 ptr += 2; 549 cb->hops = *ptr & 0x3f; 550 551 return NF_HOOK(PF_DECnet, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, dn_route_rx_packet); 552 553drop_it: 554 kfree_skb(skb); 555 return NET_RX_DROP; 556} 557 558static int dn_route_discard(struct sk_buff *skb) 559{ 560 /* 561 * I know we drop the packet here, but thats considered success in 562 * this case 563 */ 564 kfree_skb(skb); 565 return NET_RX_SUCCESS; 566} 567 568static int dn_route_ptp_hello(struct sk_buff *skb) 569{ 570 dn_dev_hello(skb); 571 dn_neigh_pointopoint_hello(skb); 572 return NET_RX_SUCCESS; 573} 574 575int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt) 576{ 577 struct dn_skb_cb *cb; 578 unsigned char flags = 0; 579 __u16 len = dn_ntohs(*(__u16 *)skb->data); 580 struct dn_dev *dn = (struct dn_dev *)dev->dn_ptr; 581 unsigned char padlen = 0; 582 583 if (dn == NULL) 584 goto dump_it; 585 586 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) 587 goto out; 588 589 if (!pskb_may_pull(skb, 3)) 590 goto dump_it; 591 592 skb_pull(skb, 2); 593 594 if (len > skb->len) 595 goto dump_it; 596 597 skb_trim(skb, len); 598 599 flags = *skb->data; 600 601 cb = DN_SKB_CB(skb); 602 cb->stamp = jiffies; 603 cb->iif = dev->ifindex; 604 605 /* 606 * If we have padding, remove it. 607 */ 608 if (flags & DN_RT_F_PF) { 609 padlen = flags & ~DN_RT_F_PF; 610 if (!pskb_may_pull(skb, padlen + 1)) 611 goto dump_it; 612 skb_pull(skb, padlen); 613 flags = *skb->data; 614 } 615 616 skb->nh.raw = skb->data; 617 618 /* 619 * Weed out future version DECnet 620 */ 621 if (flags & DN_RT_F_VER) 622 goto dump_it; 623 624 cb->rt_flags = flags; 625 626 if (decnet_debug_level & 1) 627 printk(KERN_DEBUG 628 "dn_route_rcv: got 0x%02x from %s [%d %d %d]\n", 629 (int)flags, (dev) ? dev->name : "???", len, skb->len, 630 padlen); 631 632 if (flags & DN_RT_PKT_CNTL) { 633 if (unlikely(skb_is_nonlinear(skb)) && 634 skb_linearize(skb, GFP_ATOMIC) != 0) 635 goto dump_it; 636 637 switch(flags & DN_RT_CNTL_MSK) { 638 case DN_RT_PKT_INIT: 639 dn_dev_init_pkt(skb); 640 break; 641 case DN_RT_PKT_VERI: 642 dn_dev_veri_pkt(skb); 643 break; 644 } 645 646 if (dn->parms.state != DN_DEV_S_RU) 647 goto dump_it; 648 649 switch(flags & DN_RT_CNTL_MSK) { 650 case DN_RT_PKT_HELO: 651 return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_route_ptp_hello); 652 653 case DN_RT_PKT_L1RT: 654 case DN_RT_PKT_L2RT: 655 return NF_HOOK(PF_DECnet, NF_DN_ROUTE, skb, skb->dev, NULL, dn_route_discard); 656 case DN_RT_PKT_ERTH: 657 return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_neigh_router_hello); 658 659 case DN_RT_PKT_EEDH: 660 return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_neigh_endnode_hello); 661 } 662 } else { 663 if (dn->parms.state != DN_DEV_S_RU) 664 goto dump_it; 665 666 skb_pull(skb, 1); /* Pull flags */ 667 668 switch(flags & DN_RT_PKT_MSK) { 669 case DN_RT_PKT_LONG: 670 return dn_route_rx_long(skb); 671 case DN_RT_PKT_SHORT: 672 return dn_route_rx_short(skb); 673 } 674 } 675 676dump_it: 677 kfree_skb(skb); 678out: 679 return NET_RX_DROP; 680} 681 682static int dn_output(struct sk_buff *skb) 683{ 684 struct dst_entry *dst = skb->dst; 685 struct dn_route *rt = (struct dn_route *)dst; 686 struct net_device *dev = dst->dev; 687 struct dn_skb_cb *cb = DN_SKB_CB(skb); 688 struct neighbour *neigh; 689 690 int err = -EINVAL; 691 692 if ((neigh = dst->neighbour) == NULL) 693 goto error; 694 695 skb->dev = dev; 696 697 cb->src = rt->rt_saddr; 698 cb->dst = rt->rt_daddr; 699 700 /* 701 * Always set the Intra-Ethernet bit on all outgoing packets 702 * originated on this node. Only valid flag from upper layers 703 * is return-to-sender-requested. Set hop count to 0 too. 704 */ 705 cb->rt_flags &= ~DN_RT_F_RQR; 706 cb->rt_flags |= DN_RT_F_IE; 707 cb->hops = 0; 708 709 return NF_HOOK(PF_DECnet, NF_DN_LOCAL_OUT, skb, NULL, dev, neigh->output); 710 711error: 712 if (net_ratelimit()) 713 printk(KERN_DEBUG "dn_output: This should not happen\n"); 714 715 kfree_skb(skb); 716 717 return err; 718} 719 720static int dn_forward(struct sk_buff *skb) 721{ 722 struct dn_skb_cb *cb = DN_SKB_CB(skb); 723 struct dst_entry *dst = skb->dst; 724 struct dn_dev *dn_db = dst->dev->dn_ptr; 725 struct dn_route *rt; 726 struct neighbour *neigh = dst->neighbour; 727 int header_len; 728#ifdef CONFIG_NETFILTER 729 struct net_device *dev = skb->dev; 730#endif 731 732 if (skb->pkt_type != PACKET_HOST) 733 goto drop; 734 735 /* Ensure that we have enough space for headers */ 736 rt = (struct dn_route *)skb->dst; 737 header_len = dn_db->use_long ? 21 : 6; 738 if (skb_cow(skb, LL_RESERVED_SPACE(rt->u.dst.dev)+header_len)) 739 goto drop; 740 741 /* 742 * Hop count exceeded. 743 */ 744 if (++cb->hops > 30) 745 goto drop; 746 747 skb->dev = rt->u.dst.dev; 748 749 /* 750 * If packet goes out same interface it came in on, then set 751 * the Intra-Ethernet bit. This has no effect for short 752 * packets, so we don't need to test for them here. 753 */ 754 cb->rt_flags &= ~DN_RT_F_IE; 755 if (rt->rt_flags & RTCF_DOREDIRECT) 756 cb->rt_flags |= DN_RT_F_IE; 757 758 return NF_HOOK(PF_DECnet, NF_DN_FORWARD, skb, dev, skb->dev, neigh->output); 759 760drop: 761 kfree_skb(skb); 762 return NET_RX_DROP; 763} 764 765/* 766 * Drop packet. This is used for endnodes and for 767 * when we should not be forwarding packets from 768 * this dest. 769 */ 770static int dn_blackhole(struct sk_buff *skb) 771{ 772 kfree_skb(skb); 773 return NET_RX_DROP; 774} 775 776/* 777 * Used to catch bugs. This should never normally get 778 * called. 779 */ 780static int dn_rt_bug(struct sk_buff *skb) 781{ 782 if (net_ratelimit()) { 783 struct dn_skb_cb *cb = DN_SKB_CB(skb); 784 785 printk(KERN_DEBUG "dn_rt_bug: skb from:%04x to:%04x\n", 786 cb->src, cb->dst); 787 } 788 789 kfree_skb(skb); 790 791 return NET_RX_BAD; 792} 793 794static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res) 795{ 796 struct dn_fib_info *fi = res->fi; 797 struct net_device *dev = rt->u.dst.dev; 798 struct neighbour *n; 799 unsigned mss; 800 801 if (fi) { 802 if (DN_FIB_RES_GW(*res) && 803 DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) 804 rt->rt_gateway = DN_FIB_RES_GW(*res); 805 memcpy(rt->u.dst.metrics, fi->fib_metrics, 806 sizeof(rt->u.dst.metrics)); 807 } 808 rt->rt_type = res->type; 809 810 if (dev != NULL && rt->u.dst.neighbour == NULL) { 811 n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev); 812 if (IS_ERR(n)) 813 return PTR_ERR(n); 814 rt->u.dst.neighbour = n; 815 } 816 817 if (rt->u.dst.metrics[RTAX_MTU-1] == 0 || 818 rt->u.dst.metrics[RTAX_MTU-1] > rt->u.dst.dev->mtu) 819 rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu; 820 mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->u.dst)); 821 if (rt->u.dst.metrics[RTAX_ADVMSS-1] == 0 || 822 rt->u.dst.metrics[RTAX_ADVMSS-1] > mss) 823 rt->u.dst.metrics[RTAX_ADVMSS-1] = mss; 824 return 0; 825} 826 827static inline int dn_match_addr(__u16 addr1, __u16 addr2) 828{ 829 __u16 tmp = dn_ntohs(addr1) ^ dn_ntohs(addr2); 830 int match = 16; 831 while(tmp) { 832 tmp >>= 1; 833 match--; 834 } 835 return match; 836} 837 838static __u16 dnet_select_source(const struct net_device *dev, __u16 daddr, int scope) 839{ 840 __u16 saddr = 0; 841 struct dn_dev *dn_db = dev->dn_ptr; 842 struct dn_ifaddr *ifa; 843 int best_match = 0; 844 int ret; 845 846 read_lock(&dev_base_lock); 847 for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next) { 848 if (ifa->ifa_scope > scope) 849 continue; 850 if (!daddr) { 851 saddr = ifa->ifa_local; 852 break; 853 } 854 ret = dn_match_addr(daddr, ifa->ifa_local); 855 if (ret > best_match) 856 saddr = ifa->ifa_local; 857 if (best_match == 0) 858 saddr = ifa->ifa_local; 859 } 860 read_unlock(&dev_base_lock); 861 862 return saddr; 863} 864 865static inline __u16 __dn_fib_res_prefsrc(struct dn_fib_res *res) 866{ 867 return dnet_select_source(DN_FIB_RES_DEV(*res), DN_FIB_RES_GW(*res), res->scope); 868} 869 870static inline __u16 dn_fib_rules_map_destination(__u16 daddr, struct dn_fib_res *res) 871{ 872 __u16 mask = dnet_make_mask(res->prefixlen); 873 return (daddr&~mask)|res->fi->fib_nh->nh_gw; 874} 875 876static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *oldflp, int try_hard) 877{ 878 struct flowi fl = { .nl_u = { .dn_u = 879 { .daddr = oldflp->fld_dst, 880 .saddr = oldflp->fld_src, 881 .scope = RT_SCOPE_UNIVERSE, 882#ifdef CONFIG_DECNET_ROUTE_FWMARK 883 .fwmark = oldflp->fld_fwmark 884#endif 885 } }, 886 .iif = loopback_dev.ifindex, 887 .oif = oldflp->oif }; 888 struct dn_route *rt = NULL; 889 struct net_device *dev_out = NULL; 890 struct neighbour *neigh = NULL; 891 unsigned hash; 892 unsigned flags = 0; 893 struct dn_fib_res res = { .fi = NULL, .type = RTN_UNICAST }; 894 int err; 895 int free_res = 0; 896 __u16 gateway = 0; 897 898 if (decnet_debug_level & 16) 899 printk(KERN_DEBUG 900 "dn_route_output_slow: dst=%04x src=%04x mark=%d" 901 " iif=%d oif=%d\n", oldflp->fld_dst, oldflp->fld_src, 902 oldflp->fld_fwmark, loopback_dev.ifindex, oldflp->oif); 903 904 /* If we have an output interface, verify its a DECnet device */ 905 if (oldflp->oif) { 906 dev_out = dev_get_by_index(oldflp->oif); 907 err = -ENODEV; 908 if (dev_out && dev_out->dn_ptr == NULL) { 909 dev_put(dev_out); 910 dev_out = NULL; 911 } 912 if (dev_out == NULL) 913 goto out; 914 } 915 916 /* If we have a source address, verify that its a local address */ 917 if (oldflp->fld_src) { 918 err = -EADDRNOTAVAIL; 919 920 if (dev_out) { 921 if (dn_dev_islocal(dev_out, oldflp->fld_src)) 922 goto source_ok; 923 dev_put(dev_out); 924 goto out; 925 } 926 read_lock(&dev_base_lock); 927 for(dev_out = dev_base; dev_out; dev_out = dev_out->next) { 928 if (!dev_out->dn_ptr) 929 continue; 930 if (dn_dev_islocal(dev_out, oldflp->fld_src)) 931 break; 932 } 933 read_unlock(&dev_base_lock); 934 if (dev_out == NULL) 935 goto out; 936 dev_hold(dev_out); 937source_ok: 938 ; 939 } 940 941 /* No destination? Assume its local */ 942 if (!fl.fld_dst) { 943 fl.fld_dst = fl.fld_src; 944 945 err = -EADDRNOTAVAIL; 946 if (dev_out) 947 dev_put(dev_out); 948 dev_out = &loopback_dev; 949 dev_hold(dev_out); 950 if (!fl.fld_dst) { 951 fl.fld_dst = 952 fl.fld_src = dnet_select_source(dev_out, 0, 953 RT_SCOPE_HOST); 954 if (!fl.fld_dst) 955 goto out; 956 } 957 fl.oif = loopback_dev.ifindex; 958 res.type = RTN_LOCAL; 959 goto make_route; 960 } 961 962 if (decnet_debug_level & 16) 963 printk(KERN_DEBUG 964 "dn_route_output_slow: initial checks complete." 965 " dst=%o4x src=%04x oif=%d try_hard=%d\n", fl.fld_dst, 966 fl.fld_src, fl.oif, try_hard); 967 968 /* 969 * N.B. If the kernel is compiled without router support then 970 * dn_fib_lookup() will evaluate to non-zero so this if () block 971 * will always be executed. 972 */ 973 err = -ESRCH; 974 if (try_hard || (err = dn_fib_lookup(&fl, &res)) != 0) { 975 struct dn_dev *dn_db; 976 if (err != -ESRCH) 977 goto out; 978 /* 979 * Here the fallback is basically the standard algorithm for 980 * routing in endnodes which is described in the DECnet routing 981 * docs 982 * 983 * If we are not trying hard, look in neighbour cache. 984 * The result is tested to ensure that if a specific output 985 * device/source address was requested, then we honour that 986 * here 987 */ 988 if (!try_hard) { 989 neigh = neigh_lookup_nodev(&dn_neigh_table, &fl.fld_dst); 990 if (neigh) { 991 if ((oldflp->oif && 992 (neigh->dev->ifindex != oldflp->oif)) || 993 (oldflp->fld_src && 994 (!dn_dev_islocal(neigh->dev, 995 oldflp->fld_src)))) { 996 neigh_release(neigh); 997 neigh = NULL; 998 } else { 999 if (dev_out) 1000 dev_put(dev_out); 1001 if (dn_dev_islocal(neigh->dev, fl.fld_dst)) { 1002 dev_out = &loopback_dev; 1003 res.type = RTN_LOCAL; 1004 } else { 1005 dev_out = neigh->dev; 1006 } 1007 dev_hold(dev_out); 1008 goto select_source; 1009 } 1010 } 1011 } 1012 1013 /* Not there? Perhaps its a local address */ 1014 if (dev_out == NULL) 1015 dev_out = dn_dev_get_default(); 1016 err = -ENODEV; 1017 if (dev_out == NULL) 1018 goto out; 1019 dn_db = dev_out->dn_ptr; 1020 /* Possible improvement - check all devices for local addr */ 1021 if (dn_dev_islocal(dev_out, fl.fld_dst)) { 1022 dev_put(dev_out); 1023 dev_out = &loopback_dev; 1024 dev_hold(dev_out); 1025 res.type = RTN_LOCAL; 1026 goto select_source; 1027 } 1028 /* Not local either.... try sending it to the default router */ 1029 neigh = neigh_clone(dn_db->router); 1030 BUG_ON(neigh && neigh->dev != dev_out); 1031 1032 /* Ok then, we assume its directly connected and move on */ 1033select_source: 1034 if (neigh) 1035 gateway = ((struct dn_neigh *)neigh)->addr; 1036 if (gateway == 0) 1037 gateway = fl.fld_dst; 1038 if (fl.fld_src == 0) { 1039 fl.fld_src = dnet_select_source(dev_out, gateway, 1040 res.type == RTN_LOCAL ? 1041 RT_SCOPE_HOST : 1042 RT_SCOPE_LINK); 1043 if (fl.fld_src == 0 && res.type != RTN_LOCAL) 1044 goto e_addr; 1045 } 1046 fl.oif = dev_out->ifindex; 1047 goto make_route; 1048 } 1049 free_res = 1; 1050 1051 if (res.type == RTN_NAT) 1052 goto e_inval; 1053 1054 if (res.type == RTN_LOCAL) { 1055 if (!fl.fld_src) 1056 fl.fld_src = fl.fld_dst; 1057 if (dev_out) 1058 dev_put(dev_out); 1059 dev_out = &loopback_dev; 1060 dev_hold(dev_out); 1061 fl.oif = dev_out->ifindex; 1062 if (res.fi) 1063 dn_fib_info_put(res.fi); 1064 res.fi = NULL; 1065 goto make_route; 1066 } 1067 1068 if (res.fi->fib_nhs > 1 && fl.oif == 0) 1069 dn_fib_select_multipath(&fl, &res); 1070 1071 /* 1072 * We could add some logic to deal with default routes here and 1073 * get rid of some of the special casing above. 1074 */ 1075 1076 if (!fl.fld_src) 1077 fl.fld_src = DN_FIB_RES_PREFSRC(res); 1078 1079 if (dev_out) 1080 dev_put(dev_out); 1081 dev_out = DN_FIB_RES_DEV(res); 1082 dev_hold(dev_out); 1083 fl.oif = dev_out->ifindex; 1084 gateway = DN_FIB_RES_GW(res); 1085 1086make_route: 1087 if (dev_out->flags & IFF_LOOPBACK) 1088 flags |= RTCF_LOCAL; 1089 1090 rt = dst_alloc(&dn_dst_ops); 1091 if (rt == NULL) 1092 goto e_nobufs; 1093 1094 atomic_set(&rt->u.dst.__refcnt, 1); 1095 rt->u.dst.flags = DST_HOST; 1096 1097 rt->fl.fld_src = oldflp->fld_src; 1098 rt->fl.fld_dst = oldflp->fld_dst; 1099 rt->fl.oif = oldflp->oif; 1100 rt->fl.iif = 0; 1101#ifdef CONFIG_DECNET_ROUTE_FWMARK 1102 rt->fl.fld_fwmark = oldflp->fld_fwmark; 1103#endif 1104 1105 rt->rt_saddr = fl.fld_src; 1106 rt->rt_daddr = fl.fld_dst; 1107 rt->rt_gateway = gateway ? gateway : fl.fld_dst; 1108 rt->rt_local_src = fl.fld_src; 1109 1110 rt->rt_dst_map = fl.fld_dst; 1111 rt->rt_src_map = fl.fld_src; 1112 1113 rt->u.dst.dev = dev_out; 1114 dev_hold(dev_out); 1115 rt->u.dst.neighbour = neigh; 1116 neigh = NULL; 1117 1118 rt->u.dst.lastuse = jiffies; 1119 rt->u.dst.output = dn_output; 1120 rt->u.dst.input = dn_rt_bug; 1121 rt->rt_flags = flags; 1122 if (flags & RTCF_LOCAL) 1123 rt->u.dst.input = dn_nsp_rx; 1124 1125 err = dn_rt_set_next_hop(rt, &res); 1126 if (err) 1127 goto e_neighbour; 1128 1129 hash = dn_hash(rt->fl.fld_src, rt->fl.fld_dst); 1130 dn_insert_route(rt, hash, (struct dn_route **)pprt); 1131 1132done: 1133 if (neigh) 1134 neigh_release(neigh); 1135 if (free_res) 1136 dn_fib_res_put(&res); 1137 if (dev_out) 1138 dev_put(dev_out); 1139out: 1140 return err; 1141 1142e_addr: 1143 err = -EADDRNOTAVAIL; 1144 goto done; 1145e_inval: 1146 err = -EINVAL; 1147 goto done; 1148e_nobufs: 1149 err = -ENOBUFS; 1150 goto done; 1151e_neighbour: 1152 dst_free(&rt->u.dst); 1153 goto e_nobufs; 1154} 1155 1156 1157/* 1158 * N.B. The flags may be moved into the flowi at some future stage. 1159 */ 1160static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *flp, int flags) 1161{ 1162 unsigned hash = dn_hash(flp->fld_src, flp->fld_dst); 1163 struct dn_route *rt = NULL; 1164 1165 if (!(flags & MSG_TRYHARD)) { 1166 rcu_read_lock_bh(); 1167 for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt; 1168 rt = rcu_dereference(rt->u.rt_next)) { 1169 if ((flp->fld_dst == rt->fl.fld_dst) && 1170 (flp->fld_src == rt->fl.fld_src) && 1171#ifdef CONFIG_DECNET_ROUTE_FWMARK 1172 (flp->fld_fwmark == rt->fl.fld_fwmark) && 1173#endif 1174 (rt->fl.iif == 0) && 1175 (rt->fl.oif == flp->oif)) { 1176 rt->u.dst.lastuse = jiffies; 1177 dst_hold(&rt->u.dst); 1178 rt->u.dst.__use++; 1179 rcu_read_unlock_bh(); 1180 *pprt = &rt->u.dst; 1181 return 0; 1182 } 1183 } 1184 rcu_read_unlock_bh(); 1185 } 1186 1187 return dn_route_output_slow(pprt, flp, flags); 1188} 1189 1190static int dn_route_output_key(struct dst_entry **pprt, struct flowi *flp, int flags) 1191{ 1192 int err; 1193 1194 err = __dn_route_output_key(pprt, flp, flags); 1195 if (err == 0 && flp->proto) { 1196 err = xfrm_lookup(pprt, flp, NULL, 0); 1197 } 1198 return err; 1199} 1200 1201int dn_route_output_sock(struct dst_entry **pprt, struct flowi *fl, struct sock *sk, int flags) 1202{ 1203 int err; 1204 1205 err = __dn_route_output_key(pprt, fl, flags & MSG_TRYHARD); 1206 if (err == 0 && fl->proto) { 1207 err = xfrm_lookup(pprt, fl, sk, !(flags & MSG_DONTWAIT)); 1208 } 1209 return err; 1210} 1211 1212static int dn_route_input_slow(struct sk_buff *skb) 1213{ 1214 struct dn_route *rt = NULL; 1215 struct dn_skb_cb *cb = DN_SKB_CB(skb); 1216 struct net_device *in_dev = skb->dev; 1217 struct net_device *out_dev = NULL; 1218 struct dn_dev *dn_db; 1219 struct neighbour *neigh = NULL; 1220 unsigned hash; 1221 int flags = 0; 1222 __u16 gateway = 0; 1223 __u16 local_src = 0; 1224 struct flowi fl = { .nl_u = { .dn_u = 1225 { .daddr = cb->dst, 1226 .saddr = cb->src, 1227 .scope = RT_SCOPE_UNIVERSE, 1228#ifdef CONFIG_DECNET_ROUTE_FWMARK 1229 .fwmark = skb->nfmark 1230#endif 1231 } }, 1232 .iif = skb->dev->ifindex }; 1233 struct dn_fib_res res = { .fi = NULL, .type = RTN_UNREACHABLE }; 1234 int err = -EINVAL; 1235 int free_res = 0; 1236 1237 dev_hold(in_dev); 1238 1239 if ((dn_db = in_dev->dn_ptr) == NULL) 1240 goto out; 1241 1242 /* Zero source addresses are not allowed */ 1243 if (fl.fld_src == 0) 1244 goto out; 1245 1246 /* 1247 * In this case we've just received a packet from a source 1248 * outside ourselves pretending to come from us. We don't 1249 * allow it any further to prevent routing loops, spoofing and 1250 * other nasties. Loopback packets already have the dst attached 1251 * so this only affects packets which have originated elsewhere. 1252 */ 1253 err = -ENOTUNIQ; 1254 if (dn_dev_islocal(in_dev, cb->src)) 1255 goto out; 1256 1257 err = dn_fib_lookup(&fl, &res); 1258 if (err) { 1259 if (err != -ESRCH) 1260 goto out; 1261 /* 1262 * Is the destination us ? 1263 */ 1264 if (!dn_dev_islocal(in_dev, cb->dst)) 1265 goto e_inval; 1266 1267 res.type = RTN_LOCAL; 1268 flags |= RTCF_DIRECTSRC; 1269 } else { 1270 __u16 src_map = fl.fld_src; 1271 free_res = 1; 1272 1273 out_dev = DN_FIB_RES_DEV(res); 1274 if (out_dev == NULL) { 1275 if (net_ratelimit()) 1276 printk(KERN_CRIT "Bug in dn_route_input_slow() " 1277 "No output device\n"); 1278 goto e_inval; 1279 } 1280 dev_hold(out_dev); 1281 1282 if (res.r) 1283 src_map = dn_fib_rules_policy(fl.fld_src, &res, &flags); 1284 1285 gateway = DN_FIB_RES_GW(res); 1286 if (res.type == RTN_NAT) { 1287 fl.fld_dst = dn_fib_rules_map_destination(fl.fld_dst, &res); 1288 dn_fib_res_put(&res); 1289 free_res = 0; 1290 if (dn_fib_lookup(&fl, &res)) 1291 goto e_inval; 1292 free_res = 1; 1293 if (res.type != RTN_UNICAST) 1294 goto e_inval; 1295 flags |= RTCF_DNAT; 1296 gateway = fl.fld_dst; 1297 } 1298 fl.fld_src = src_map; 1299 } 1300 1301 switch(res.type) { 1302 case RTN_UNICAST: 1303 /* 1304 * Forwarding check here, we only check for forwarding 1305 * being turned off, if you want to only forward intra 1306 * area, its up to you to set the routing tables up 1307 * correctly. 1308 */ 1309 if (dn_db->parms.forwarding == 0) 1310 goto e_inval; 1311 1312 if (res.fi->fib_nhs > 1 && fl.oif == 0) 1313 dn_fib_select_multipath(&fl, &res); 1314 1315 /* 1316 * Check for out_dev == in_dev. We use the RTCF_DOREDIRECT 1317 * flag as a hint to set the intra-ethernet bit when 1318 * forwarding. If we've got NAT in operation, we don't do 1319 * this optimisation. 1320 */ 1321 if (out_dev == in_dev && !(flags & RTCF_NAT)) 1322 flags |= RTCF_DOREDIRECT; 1323 1324 local_src = DN_FIB_RES_PREFSRC(res); 1325 1326 case RTN_BLACKHOLE: 1327 case RTN_UNREACHABLE: 1328 break; 1329 case RTN_LOCAL: 1330 flags |= RTCF_LOCAL; 1331 fl.fld_src = cb->dst; 1332 fl.fld_dst = cb->src; 1333 1334 /* Routing tables gave us a gateway */ 1335 if (gateway) 1336 goto make_route; 1337 1338 /* Packet was intra-ethernet, so we know its on-link */ 1339 if (cb->rt_flags | DN_RT_F_IE) { 1340 gateway = cb->src; 1341 flags |= RTCF_DIRECTSRC; 1342 goto make_route; 1343 } 1344 1345 /* Use the default router if there is one */ 1346 neigh = neigh_clone(dn_db->router); 1347 if (neigh) { 1348 gateway = ((struct dn_neigh *)neigh)->addr; 1349 goto make_route; 1350 } 1351 1352 /* Close eyes and pray */ 1353 gateway = cb->src; 1354 flags |= RTCF_DIRECTSRC; 1355 goto make_route; 1356 default: 1357 goto e_inval; 1358 } 1359 1360make_route: 1361 rt = dst_alloc(&dn_dst_ops); 1362 if (rt == NULL) 1363 goto e_nobufs; 1364 1365 rt->rt_saddr = fl.fld_src; 1366 rt->rt_daddr = fl.fld_dst; 1367 rt->rt_gateway = fl.fld_dst; 1368 if (gateway) 1369 rt->rt_gateway = gateway; 1370 rt->rt_local_src = local_src ? local_src : rt->rt_saddr; 1371 1372 rt->rt_dst_map = fl.fld_dst; 1373 rt->rt_src_map = fl.fld_src; 1374 1375 rt->fl.fld_src = cb->src; 1376 rt->fl.fld_dst = cb->dst; 1377 rt->fl.oif = 0; 1378 rt->fl.iif = in_dev->ifindex; 1379 rt->fl.fld_fwmark = fl.fld_fwmark; 1380 1381 rt->u.dst.flags = DST_HOST; 1382 rt->u.dst.neighbour = neigh; 1383 rt->u.dst.dev = out_dev; 1384 rt->u.dst.lastuse = jiffies; 1385 rt->u.dst.output = dn_rt_bug; 1386 switch(res.type) { 1387 case RTN_UNICAST: 1388 rt->u.dst.input = dn_forward; 1389 break; 1390 case RTN_LOCAL: 1391 rt->u.dst.output = dn_output; 1392 rt->u.dst.input = dn_nsp_rx; 1393 rt->u.dst.dev = in_dev; 1394 flags |= RTCF_LOCAL; 1395 break; 1396 default: 1397 case RTN_UNREACHABLE: 1398 case RTN_BLACKHOLE: 1399 rt->u.dst.input = dn_blackhole; 1400 } 1401 rt->rt_flags = flags; 1402 if (rt->u.dst.dev) 1403 dev_hold(rt->u.dst.dev); 1404 1405 err = dn_rt_set_next_hop(rt, &res); 1406 if (err) 1407 goto e_neighbour; 1408 1409 hash = dn_hash(rt->fl.fld_src, rt->fl.fld_dst); 1410 dn_insert_route(rt, hash, (struct dn_route **)&skb->dst); 1411 1412done: 1413 if (neigh) 1414 neigh_release(neigh); 1415 if (free_res) 1416 dn_fib_res_put(&res); 1417 dev_put(in_dev); 1418 if (out_dev) 1419 dev_put(out_dev); 1420out: 1421 return err; 1422 1423e_inval: 1424 err = -EINVAL; 1425 goto done; 1426 1427e_nobufs: 1428 err = -ENOBUFS; 1429 goto done; 1430 1431e_neighbour: 1432 dst_free(&rt->u.dst); 1433 goto done; 1434} 1435 1436int dn_route_input(struct sk_buff *skb) 1437{ 1438 struct dn_route *rt; 1439 struct dn_skb_cb *cb = DN_SKB_CB(skb); 1440 unsigned hash = dn_hash(cb->src, cb->dst); 1441 1442 if (skb->dst) 1443 return 0; 1444 1445 rcu_read_lock(); 1446 for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL; 1447 rt = rcu_dereference(rt->u.rt_next)) { 1448 if ((rt->fl.fld_src == cb->src) && 1449 (rt->fl.fld_dst == cb->dst) && 1450 (rt->fl.oif == 0) && 1451#ifdef CONFIG_DECNET_ROUTE_FWMARK 1452 (rt->fl.fld_fwmark == skb->nfmark) && 1453#endif 1454 (rt->fl.iif == cb->iif)) { 1455 rt->u.dst.lastuse = jiffies; 1456 dst_hold(&rt->u.dst); 1457 rt->u.dst.__use++; 1458 rcu_read_unlock(); 1459 skb->dst = (struct dst_entry *)rt; 1460 return 0; 1461 } 1462 } 1463 rcu_read_unlock(); 1464 1465 return dn_route_input_slow(skb); 1466} 1467 1468static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, int event, int nowait) 1469{ 1470 struct dn_route *rt = (struct dn_route *)skb->dst; 1471 struct rtmsg *r; 1472 struct nlmsghdr *nlh; 1473 unsigned char *b = skb->tail; 1474 struct rta_cacheinfo ci; 1475 1476 nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(*r)); 1477 r = NLMSG_DATA(nlh); 1478 nlh->nlmsg_flags = (nowait && pid) ? NLM_F_MULTI : 0; 1479 r->rtm_family = AF_DECnet; 1480 r->rtm_dst_len = 16; 1481 r->rtm_src_len = 0; 1482 r->rtm_tos = 0; 1483 r->rtm_table = RT_TABLE_MAIN; 1484 r->rtm_type = rt->rt_type; 1485 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED; 1486 r->rtm_scope = RT_SCOPE_UNIVERSE; 1487 r->rtm_protocol = RTPROT_UNSPEC; 1488 if (rt->rt_flags & RTCF_NOTIFY) 1489 r->rtm_flags |= RTM_F_NOTIFY; 1490 RTA_PUT(skb, RTA_DST, 2, &rt->rt_daddr); 1491 if (rt->fl.fld_src) { 1492 r->rtm_src_len = 16; 1493 RTA_PUT(skb, RTA_SRC, 2, &rt->fl.fld_src); 1494 } 1495 if (rt->u.dst.dev) 1496 RTA_PUT(skb, RTA_OIF, sizeof(int), &rt->u.dst.dev->ifindex); 1497 /* 1498 * Note to self - change this if input routes reverse direction when 1499 * they deal only with inputs and not with replies like they do 1500 * currently. 1501 */ 1502 RTA_PUT(skb, RTA_PREFSRC, 2, &rt->rt_local_src); 1503 if (rt->rt_daddr != rt->rt_gateway) 1504 RTA_PUT(skb, RTA_GATEWAY, 2, &rt->rt_gateway); 1505 if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0) 1506 goto rtattr_failure; 1507 ci.rta_lastuse = jiffies_to_clock_t(jiffies - rt->u.dst.lastuse); 1508 ci.rta_used = rt->u.dst.__use; 1509 ci.rta_clntref = atomic_read(&rt->u.dst.__refcnt); 1510 if (rt->u.dst.expires) 1511 ci.rta_expires = jiffies_to_clock_t(rt->u.dst.expires - jiffies); 1512 else 1513 ci.rta_expires = 0; 1514 ci.rta_error = rt->u.dst.error; 1515 ci.rta_id = ci.rta_ts = ci.rta_tsage = 0; 1516 RTA_PUT(skb, RTA_CACHEINFO, sizeof(ci), &ci); 1517 if (rt->fl.iif) 1518 RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif); 1519 1520 nlh->nlmsg_len = skb->tail - b; 1521 return skb->len; 1522 1523nlmsg_failure: 1524rtattr_failure: 1525 skb_trim(skb, b - skb->data); 1526 return -1; 1527} 1528 1529/* 1530 * This is called by both endnodes and routers now. 1531 */ 1532int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg) 1533{ 1534 struct rtattr **rta = arg; 1535 struct rtmsg *rtm = NLMSG_DATA(nlh); 1536 struct dn_route *rt = NULL; 1537 struct dn_skb_cb *cb; 1538 int err; 1539 struct sk_buff *skb; 1540 struct flowi fl; 1541 1542 memset(&fl, 0, sizeof(fl)); 1543 fl.proto = DNPROTO_NSP; 1544 1545 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1546 if (skb == NULL) 1547 return -ENOBUFS; 1548 skb->mac.raw = skb->data; 1549 cb = DN_SKB_CB(skb); 1550 1551 if (rta[RTA_SRC-1]) 1552 memcpy(&fl.fld_src, RTA_DATA(rta[RTA_SRC-1]), 2); 1553 if (rta[RTA_DST-1]) 1554 memcpy(&fl.fld_dst, RTA_DATA(rta[RTA_DST-1]), 2); 1555 if (rta[RTA_IIF-1]) 1556 memcpy(&fl.iif, RTA_DATA(rta[RTA_IIF-1]), sizeof(int)); 1557 1558 if (fl.iif) { 1559 struct net_device *dev; 1560 if ((dev = dev_get_by_index(fl.iif)) == NULL) { 1561 kfree_skb(skb); 1562 return -ENODEV; 1563 } 1564 if (!dev->dn_ptr) { 1565 dev_put(dev); 1566 kfree_skb(skb); 1567 return -ENODEV; 1568 } 1569 skb->protocol = __constant_htons(ETH_P_DNA_RT); 1570 skb->dev = dev; 1571 cb->src = fl.fld_src; 1572 cb->dst = fl.fld_dst; 1573 local_bh_disable(); 1574 err = dn_route_input(skb); 1575 local_bh_enable(); 1576 memset(cb, 0, sizeof(struct dn_skb_cb)); 1577 rt = (struct dn_route *)skb->dst; 1578 if (!err && -rt->u.dst.error) 1579 err = rt->u.dst.error; 1580 } else { 1581 int oif = 0; 1582 if (rta[RTA_OIF - 1]) 1583 memcpy(&oif, RTA_DATA(rta[RTA_OIF - 1]), sizeof(int)); 1584 fl.oif = oif; 1585 err = dn_route_output_key((struct dst_entry **)&rt, &fl, 0); 1586 } 1587 1588 if (skb->dev) 1589 dev_put(skb->dev); 1590 skb->dev = NULL; 1591 if (err) 1592 goto out_free; 1593 skb->dst = &rt->u.dst; 1594 if (rtm->rtm_flags & RTM_F_NOTIFY) 1595 rt->rt_flags |= RTCF_NOTIFY; 1596 1597 NETLINK_CB(skb).dst_pid = NETLINK_CB(in_skb).pid; 1598 1599 err = dn_rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, RTM_NEWROUTE, 0); 1600 1601 if (err == 0) 1602 goto out_free; 1603 if (err < 0) { 1604 err = -EMSGSIZE; 1605 goto out_free; 1606 } 1607 1608 err = netlink_unicast(rtnl, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT); 1609 1610 return err; 1611 1612out_free: 1613 kfree_skb(skb); 1614 return err; 1615} 1616 1617/* 1618 * For routers, this is called from dn_fib_dump, but for endnodes its 1619 * called directly from the rtnetlink dispatch table. 1620 */ 1621int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb) 1622{ 1623 struct dn_route *rt; 1624 int h, s_h; 1625 int idx, s_idx; 1626 1627 if (NLMSG_PAYLOAD(cb->nlh, 0) < sizeof(struct rtmsg)) 1628 return -EINVAL; 1629 if (!(((struct rtmsg *)NLMSG_DATA(cb->nlh))->rtm_flags&RTM_F_CLONED)) 1630 return 0; 1631 1632 s_h = cb->args[0]; 1633 s_idx = idx = cb->args[1]; 1634 for(h = 0; h <= dn_rt_hash_mask; h++) { 1635 if (h < s_h) 1636 continue; 1637 if (h > s_h) 1638 s_idx = 0; 1639 rcu_read_lock_bh(); 1640 for(rt = rcu_dereference(dn_rt_hash_table[h].chain), idx = 0; 1641 rt; 1642 rt = rcu_dereference(rt->u.rt_next), idx++) { 1643 if (idx < s_idx) 1644 continue; 1645 skb->dst = dst_clone(&rt->u.dst); 1646 if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).pid, 1647 cb->nlh->nlmsg_seq, RTM_NEWROUTE, 1) <= 0) { 1648 dst_release(xchg(&skb->dst, NULL)); 1649 rcu_read_unlock_bh(); 1650 goto done; 1651 } 1652 dst_release(xchg(&skb->dst, NULL)); 1653 } 1654 rcu_read_unlock_bh(); 1655 } 1656 1657done: 1658 cb->args[0] = h; 1659 cb->args[1] = idx; 1660 return skb->len; 1661} 1662 1663#ifdef CONFIG_PROC_FS 1664struct dn_rt_cache_iter_state { 1665 int bucket; 1666}; 1667 1668static struct dn_route *dn_rt_cache_get_first(struct seq_file *seq) 1669{ 1670 struct dn_route *rt = NULL; 1671 struct dn_rt_cache_iter_state *s = seq->private; 1672 1673 for(s->bucket = dn_rt_hash_mask; s->bucket >= 0; --s->bucket) { 1674 rcu_read_lock_bh(); 1675 rt = dn_rt_hash_table[s->bucket].chain; 1676 if (rt) 1677 break; 1678 rcu_read_unlock_bh(); 1679 } 1680 return rt; 1681} 1682 1683static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_route *rt) 1684{ 1685 struct dn_rt_cache_iter_state *s = rcu_dereference(seq->private); 1686 1687 rt = rt->u.rt_next; 1688 while(!rt) { 1689 rcu_read_unlock_bh(); 1690 if (--s->bucket < 0) 1691 break; 1692 rcu_read_lock_bh(); 1693 rt = dn_rt_hash_table[s->bucket].chain; 1694 } 1695 return rt; 1696} 1697 1698static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos) 1699{ 1700 struct dn_route *rt = dn_rt_cache_get_first(seq); 1701 1702 if (rt) { 1703 while(*pos && (rt = dn_rt_cache_get_next(seq, rt))) 1704 --*pos; 1705 } 1706 return *pos ? NULL : rt; 1707} 1708 1709static void *dn_rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1710{ 1711 struct dn_route *rt = dn_rt_cache_get_next(seq, v); 1712 ++*pos; 1713 return rt; 1714} 1715 1716static void dn_rt_cache_seq_stop(struct seq_file *seq, void *v) 1717{ 1718 if (v) 1719 rcu_read_unlock_bh(); 1720} 1721 1722static int dn_rt_cache_seq_show(struct seq_file *seq, void *v) 1723{ 1724 struct dn_route *rt = v; 1725 char buf1[DN_ASCBUF_LEN], buf2[DN_ASCBUF_LEN]; 1726 1727 seq_printf(seq, "%-8s %-7s %-7s %04d %04d %04d\n", 1728 rt->u.dst.dev ? rt->u.dst.dev->name : "*", 1729 dn_addr2asc(dn_ntohs(rt->rt_daddr), buf1), 1730 dn_addr2asc(dn_ntohs(rt->rt_saddr), buf2), 1731 atomic_read(&rt->u.dst.__refcnt), 1732 rt->u.dst.__use, 1733 (int) dst_metric(&rt->u.dst, RTAX_RTT)); 1734 return 0; 1735} 1736 1737static struct seq_operations dn_rt_cache_seq_ops = { 1738 .start = dn_rt_cache_seq_start, 1739 .next = dn_rt_cache_seq_next, 1740 .stop = dn_rt_cache_seq_stop, 1741 .show = dn_rt_cache_seq_show, 1742}; 1743 1744static int dn_rt_cache_seq_open(struct inode *inode, struct file *file) 1745{ 1746 struct seq_file *seq; 1747 int rc = -ENOMEM; 1748 struct dn_rt_cache_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 1749 1750 if (!s) 1751 goto out; 1752 rc = seq_open(file, &dn_rt_cache_seq_ops); 1753 if (rc) 1754 goto out_kfree; 1755 seq = file->private_data; 1756 seq->private = s; 1757 memset(s, 0, sizeof(*s)); 1758out: 1759 return rc; 1760out_kfree: 1761 kfree(s); 1762 goto out; 1763} 1764 1765static struct file_operations dn_rt_cache_seq_fops = { 1766 .owner = THIS_MODULE, 1767 .open = dn_rt_cache_seq_open, 1768 .read = seq_read, 1769 .llseek = seq_lseek, 1770 .release = seq_release_private, 1771}; 1772 1773#endif /* CONFIG_PROC_FS */ 1774 1775void __init dn_route_init(void) 1776{ 1777 int i, goal, order; 1778 1779 dn_dst_ops.kmem_cachep = kmem_cache_create("dn_dst_cache", 1780 sizeof(struct dn_route), 1781 0, SLAB_HWCACHE_ALIGN, 1782 NULL, NULL); 1783 1784 if (!dn_dst_ops.kmem_cachep) 1785 panic("DECnet: Failed to allocate dn_dst_cache\n"); 1786 1787 init_timer(&dn_route_timer); 1788 dn_route_timer.function = dn_dst_check_expire; 1789 dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ; 1790 add_timer(&dn_route_timer); 1791 1792 goal = num_physpages >> (26 - PAGE_SHIFT); 1793 1794 for(order = 0; (1UL << order) < goal; order++) 1795 /* NOTHING */; 1796 1797 /* 1798 * Only want 1024 entries max, since the table is very, very unlikely 1799 * to be larger than that. 1800 */ 1801 while(order && ((((1UL << order) * PAGE_SIZE) / 1802 sizeof(struct dn_rt_hash_bucket)) >= 2048)) 1803 order--; 1804 1805 do { 1806 dn_rt_hash_mask = (1UL << order) * PAGE_SIZE / 1807 sizeof(struct dn_rt_hash_bucket); 1808 while(dn_rt_hash_mask & (dn_rt_hash_mask - 1)) 1809 dn_rt_hash_mask--; 1810 dn_rt_hash_table = (struct dn_rt_hash_bucket *) 1811 __get_free_pages(GFP_ATOMIC, order); 1812 } while (dn_rt_hash_table == NULL && --order > 0); 1813 1814 if (!dn_rt_hash_table) 1815 panic("Failed to allocate DECnet route cache hash table\n"); 1816 1817 printk(KERN_INFO 1818 "DECnet: Routing cache hash table of %u buckets, %ldKbytes\n", 1819 dn_rt_hash_mask, 1820 (long)(dn_rt_hash_mask*sizeof(struct dn_rt_hash_bucket))/1024); 1821 1822 dn_rt_hash_mask--; 1823 for(i = 0; i <= dn_rt_hash_mask; i++) { 1824 spin_lock_init(&dn_rt_hash_table[i].lock); 1825 dn_rt_hash_table[i].chain = NULL; 1826 } 1827 1828 dn_dst_ops.gc_thresh = (dn_rt_hash_mask + 1); 1829 1830 proc_net_fops_create("decnet_cache", S_IRUGO, &dn_rt_cache_seq_fops); 1831} 1832 1833void __exit dn_route_cleanup(void) 1834{ 1835 del_timer(&dn_route_timer); 1836 dn_run_flush(0); 1837 1838 proc_net_remove("decnet_cache"); 1839} 1840