Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at be662a18b7763496a052d489206af9ca2c2e1ac2 1840 lines 46 kB view raw
1/* 2 * DECnet An implementation of the DECnet protocol suite for the LINUX 3 * operating system. DECnet is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * DECnet Routing Functions (Endnode and Router) 7 * 8 * Authors: Steve Whitehouse <SteveW@ACM.org> 9 * Eduardo Marcelo Serrat <emserrat@geocities.com> 10 * 11 * Changes: 12 * Steve Whitehouse : Fixes to allow "intra-ethernet" and 13 * "return-to-sender" bits on outgoing 14 * packets. 15 * Steve Whitehouse : Timeouts for cached routes. 16 * Steve Whitehouse : Use dst cache for input routes too. 17 * Steve Whitehouse : Fixed error values in dn_send_skb. 18 * Steve Whitehouse : Rework routing functions to better fit 19 * DECnet routing design 20 * Alexey Kuznetsov : New SMP locking 21 * Steve Whitehouse : More SMP locking changes & dn_cache_dump() 22 * Steve Whitehouse : Prerouting NF hook, now really is prerouting. 23 * Fixed possible skb leak in rtnetlink funcs. 24 * Steve Whitehouse : Dave Miller's dynamic hash table sizing and 25 * Alexey Kuznetsov's finer grained locking 26 * from ipv4/route.c. 27 * Steve Whitehouse : Routing is now starting to look like a 28 * sensible set of code now, mainly due to 29 * my copying the IPv4 routing code. The 30 * hooks here are modified and will continue 31 * to evolve for a while. 32 * Steve Whitehouse : Real SMP at last :-) Also new netfilter 33 * stuff. Look out raw sockets your days 34 * are numbered! 35 * Steve Whitehouse : Added return-to-sender functions. Added 36 * backlog congestion level return codes. 37 * Steve Whitehouse : Fixed bug where routes were set up with 38 * no ref count on net devices. 39 * Steve Whitehouse : RCU for the route cache 40 * Steve Whitehouse : Preparations for the flow cache 41 * Steve Whitehouse : Prepare for nonlinear skbs 42 */ 43 44/****************************************************************************** 45 (c) 1995-1998 E.M. Serrat emserrat@geocities.com 46 47 This program is free software; you can redistribute it and/or modify 48 it under the terms of the GNU General Public License as published by 49 the Free Software Foundation; either version 2 of the License, or 50 any later version. 51 52 This program is distributed in the hope that it will be useful, 53 but WITHOUT ANY WARRANTY; without even the implied warranty of 54 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 55 GNU General Public License for more details. 56*******************************************************************************/ 57 58#include <linux/config.h> 59#include <linux/errno.h> 60#include <linux/types.h> 61#include <linux/socket.h> 62#include <linux/in.h> 63#include <linux/kernel.h> 64#include <linux/sockios.h> 65#include <linux/net.h> 66#include <linux/netdevice.h> 67#include <linux/inet.h> 68#include <linux/route.h> 69#include <linux/in_route.h> 70#include <net/sock.h> 71#include <linux/mm.h> 72#include <linux/proc_fs.h> 73#include <linux/seq_file.h> 74#include <linux/init.h> 75#include <linux/rtnetlink.h> 76#include <linux/string.h> 77#include <linux/netfilter_decnet.h> 78#include <linux/rcupdate.h> 79#include <linux/times.h> 80#include <asm/errno.h> 81#include <net/neighbour.h> 82#include <net/dst.h> 83#include <net/flow.h> 84#include <net/dn.h> 85#include <net/dn_dev.h> 86#include <net/dn_nsp.h> 87#include <net/dn_route.h> 88#include <net/dn_neigh.h> 89#include <net/dn_fib.h> 90 91struct dn_rt_hash_bucket 92{ 93 struct dn_route *chain; 94 spinlock_t lock; 95} __attribute__((__aligned__(8))); 96 97extern struct neigh_table dn_neigh_table; 98 99 100static unsigned char dn_hiord_addr[6] = {0xAA,0x00,0x04,0x00,0x00,0x00}; 101 102static const int dn_rt_min_delay = 2 * HZ; 103static const int dn_rt_max_delay = 10 * HZ; 104static const int dn_rt_mtu_expires = 10 * 60 * HZ; 105 106static unsigned long dn_rt_deadline; 107 108static int dn_dst_gc(void); 109static struct dst_entry *dn_dst_check(struct dst_entry *, __u32); 110static struct dst_entry *dn_dst_negative_advice(struct dst_entry *); 111static void dn_dst_link_failure(struct sk_buff *); 112static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu); 113static int dn_route_input(struct sk_buff *); 114static void dn_run_flush(unsigned long dummy); 115 116static struct dn_rt_hash_bucket *dn_rt_hash_table; 117static unsigned dn_rt_hash_mask; 118 119static struct timer_list dn_route_timer; 120static DEFINE_TIMER(dn_rt_flush_timer, dn_run_flush, 0, 0); 121int decnet_dst_gc_interval = 2; 122 123static struct dst_ops dn_dst_ops = { 124 .family = PF_DECnet, 125 .protocol = __constant_htons(ETH_P_DNA_RT), 126 .gc_thresh = 128, 127 .gc = dn_dst_gc, 128 .check = dn_dst_check, 129 .negative_advice = dn_dst_negative_advice, 130 .link_failure = dn_dst_link_failure, 131 .update_pmtu = dn_dst_update_pmtu, 132 .entry_size = sizeof(struct dn_route), 133 .entries = ATOMIC_INIT(0), 134}; 135 136static __inline__ unsigned dn_hash(unsigned short src, unsigned short dst) 137{ 138 unsigned short tmp = src ^ dst; 139 tmp ^= (tmp >> 3); 140 tmp ^= (tmp >> 5); 141 tmp ^= (tmp >> 10); 142 return dn_rt_hash_mask & (unsigned)tmp; 143} 144 145static inline void dnrt_free(struct dn_route *rt) 146{ 147 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free); 148} 149 150static inline void dnrt_drop(struct dn_route *rt) 151{ 152 if (rt) 153 dst_release(&rt->u.dst); 154 call_rcu_bh(&rt->u.dst.rcu_head, dst_rcu_free); 155} 156 157static void dn_dst_check_expire(unsigned long dummy) 158{ 159 int i; 160 struct dn_route *rt, **rtp; 161 unsigned long now = jiffies; 162 unsigned long expire = 120 * HZ; 163 164 for(i = 0; i <= dn_rt_hash_mask; i++) { 165 rtp = &dn_rt_hash_table[i].chain; 166 167 spin_lock(&dn_rt_hash_table[i].lock); 168 while((rt=*rtp) != NULL) { 169 if (atomic_read(&rt->u.dst.__refcnt) || 170 (now - rt->u.dst.lastuse) < expire) { 171 rtp = &rt->u.rt_next; 172 continue; 173 } 174 *rtp = rt->u.rt_next; 175 rt->u.rt_next = NULL; 176 dnrt_free(rt); 177 } 178 spin_unlock(&dn_rt_hash_table[i].lock); 179 180 if ((jiffies - now) > 0) 181 break; 182 } 183 184 mod_timer(&dn_route_timer, now + decnet_dst_gc_interval * HZ); 185} 186 187static int dn_dst_gc(void) 188{ 189 struct dn_route *rt, **rtp; 190 int i; 191 unsigned long now = jiffies; 192 unsigned long expire = 10 * HZ; 193 194 for(i = 0; i <= dn_rt_hash_mask; i++) { 195 196 spin_lock_bh(&dn_rt_hash_table[i].lock); 197 rtp = &dn_rt_hash_table[i].chain; 198 199 while((rt=*rtp) != NULL) { 200 if (atomic_read(&rt->u.dst.__refcnt) || 201 (now - rt->u.dst.lastuse) < expire) { 202 rtp = &rt->u.rt_next; 203 continue; 204 } 205 *rtp = rt->u.rt_next; 206 rt->u.rt_next = NULL; 207 dnrt_drop(rt); 208 break; 209 } 210 spin_unlock_bh(&dn_rt_hash_table[i].lock); 211 } 212 213 return 0; 214} 215 216/* 217 * The decnet standards don't impose a particular minimum mtu, what they 218 * do insist on is that the routing layer accepts a datagram of at least 219 * 230 bytes long. Here we have to subtract the routing header length from 220 * 230 to get the minimum acceptable mtu. If there is no neighbour, then we 221 * assume the worst and use a long header size. 222 * 223 * We update both the mtu and the advertised mss (i.e. the segment size we 224 * advertise to the other end). 225 */ 226static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu) 227{ 228 u32 min_mtu = 230; 229 struct dn_dev *dn = dst->neighbour ? 230 (struct dn_dev *)dst->neighbour->dev->dn_ptr : NULL; 231 232 if (dn && dn->use_long == 0) 233 min_mtu -= 6; 234 else 235 min_mtu -= 21; 236 237 if (dst->metrics[RTAX_MTU-1] > mtu && mtu >= min_mtu) { 238 if (!(dst_metric_locked(dst, RTAX_MTU))) { 239 dst->metrics[RTAX_MTU-1] = mtu; 240 dst_set_expires(dst, dn_rt_mtu_expires); 241 } 242 if (!(dst_metric_locked(dst, RTAX_ADVMSS))) { 243 u32 mss = mtu - DN_MAX_NSP_DATA_HEADER; 244 if (dst->metrics[RTAX_ADVMSS-1] > mss) 245 dst->metrics[RTAX_ADVMSS-1] = mss; 246 } 247 } 248} 249 250/* 251 * When a route has been marked obsolete. (e.g. routing cache flush) 252 */ 253static struct dst_entry *dn_dst_check(struct dst_entry *dst, __u32 cookie) 254{ 255 return NULL; 256} 257 258static struct dst_entry *dn_dst_negative_advice(struct dst_entry *dst) 259{ 260 dst_release(dst); 261 return NULL; 262} 263 264static void dn_dst_link_failure(struct sk_buff *skb) 265{ 266 return; 267} 268 269static inline int compare_keys(struct flowi *fl1, struct flowi *fl2) 270{ 271 return memcmp(&fl1->nl_u.dn_u, &fl2->nl_u.dn_u, sizeof(fl1->nl_u.dn_u)) == 0 && 272 fl1->oif == fl2->oif && 273 fl1->iif == fl2->iif; 274} 275 276static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp) 277{ 278 struct dn_route *rth, **rthp; 279 unsigned long now = jiffies; 280 281 rthp = &dn_rt_hash_table[hash].chain; 282 283 spin_lock_bh(&dn_rt_hash_table[hash].lock); 284 while((rth = *rthp) != NULL) { 285 if (compare_keys(&rth->fl, &rt->fl)) { 286 /* Put it first */ 287 *rthp = rth->u.rt_next; 288 rcu_assign_pointer(rth->u.rt_next, 289 dn_rt_hash_table[hash].chain); 290 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth); 291 292 rth->u.dst.__use++; 293 dst_hold(&rth->u.dst); 294 rth->u.dst.lastuse = now; 295 spin_unlock_bh(&dn_rt_hash_table[hash].lock); 296 297 dnrt_drop(rt); 298 *rp = rth; 299 return 0; 300 } 301 rthp = &rth->u.rt_next; 302 } 303 304 rcu_assign_pointer(rt->u.rt_next, dn_rt_hash_table[hash].chain); 305 rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt); 306 307 dst_hold(&rt->u.dst); 308 rt->u.dst.__use++; 309 rt->u.dst.lastuse = now; 310 spin_unlock_bh(&dn_rt_hash_table[hash].lock); 311 *rp = rt; 312 return 0; 313} 314 315void dn_run_flush(unsigned long dummy) 316{ 317 int i; 318 struct dn_route *rt, *next; 319 320 for(i = 0; i < dn_rt_hash_mask; i++) { 321 spin_lock_bh(&dn_rt_hash_table[i].lock); 322 323 if ((rt = xchg(&dn_rt_hash_table[i].chain, NULL)) == NULL) 324 goto nothing_to_declare; 325 326 for(; rt; rt=next) { 327 next = rt->u.rt_next; 328 rt->u.rt_next = NULL; 329 dst_free((struct dst_entry *)rt); 330 } 331 332nothing_to_declare: 333 spin_unlock_bh(&dn_rt_hash_table[i].lock); 334 } 335} 336 337static DEFINE_SPINLOCK(dn_rt_flush_lock); 338 339void dn_rt_cache_flush(int delay) 340{ 341 unsigned long now = jiffies; 342 int user_mode = !in_interrupt(); 343 344 if (delay < 0) 345 delay = dn_rt_min_delay; 346 347 spin_lock_bh(&dn_rt_flush_lock); 348 349 if (del_timer(&dn_rt_flush_timer) && delay > 0 && dn_rt_deadline) { 350 long tmo = (long)(dn_rt_deadline - now); 351 352 if (user_mode && tmo < dn_rt_max_delay - dn_rt_min_delay) 353 tmo = 0; 354 355 if (delay > tmo) 356 delay = tmo; 357 } 358 359 if (delay <= 0) { 360 spin_unlock_bh(&dn_rt_flush_lock); 361 dn_run_flush(0); 362 return; 363 } 364 365 if (dn_rt_deadline == 0) 366 dn_rt_deadline = now + dn_rt_max_delay; 367 368 dn_rt_flush_timer.expires = now + delay; 369 add_timer(&dn_rt_flush_timer); 370 spin_unlock_bh(&dn_rt_flush_lock); 371} 372 373/** 374 * dn_return_short - Return a short packet to its sender 375 * @skb: The packet to return 376 * 377 */ 378static int dn_return_short(struct sk_buff *skb) 379{ 380 struct dn_skb_cb *cb; 381 unsigned char *ptr; 382 dn_address *src; 383 dn_address *dst; 384 dn_address tmp; 385 386 /* Add back headers */ 387 skb_push(skb, skb->data - skb->nh.raw); 388 389 if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL) 390 return NET_RX_DROP; 391 392 cb = DN_SKB_CB(skb); 393 /* Skip packet length and point to flags */ 394 ptr = skb->data + 2; 395 *ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS; 396 397 dst = (dn_address *)ptr; 398 ptr += 2; 399 src = (dn_address *)ptr; 400 ptr += 2; 401 *ptr = 0; /* Zero hop count */ 402 403 /* Swap source and destination */ 404 tmp = *src; 405 *src = *dst; 406 *dst = tmp; 407 408 skb->pkt_type = PACKET_OUTGOING; 409 dn_rt_finish_output(skb, NULL, NULL); 410 return NET_RX_SUCCESS; 411} 412 413/** 414 * dn_return_long - Return a long packet to its sender 415 * @skb: The long format packet to return 416 * 417 */ 418static int dn_return_long(struct sk_buff *skb) 419{ 420 struct dn_skb_cb *cb; 421 unsigned char *ptr; 422 unsigned char *src_addr, *dst_addr; 423 unsigned char tmp[ETH_ALEN]; 424 425 /* Add back all headers */ 426 skb_push(skb, skb->data - skb->nh.raw); 427 428 if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL) 429 return NET_RX_DROP; 430 431 cb = DN_SKB_CB(skb); 432 /* Ignore packet length and point to flags */ 433 ptr = skb->data + 2; 434 435 /* Skip padding */ 436 if (*ptr & DN_RT_F_PF) { 437 char padlen = (*ptr & ~DN_RT_F_PF); 438 ptr += padlen; 439 } 440 441 *ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS; 442 ptr += 2; 443 dst_addr = ptr; 444 ptr += 8; 445 src_addr = ptr; 446 ptr += 6; 447 *ptr = 0; /* Zero hop count */ 448 449 /* Swap source and destination */ 450 memcpy(tmp, src_addr, ETH_ALEN); 451 memcpy(src_addr, dst_addr, ETH_ALEN); 452 memcpy(dst_addr, tmp, ETH_ALEN); 453 454 skb->pkt_type = PACKET_OUTGOING; 455 dn_rt_finish_output(skb, dst_addr, src_addr); 456 return NET_RX_SUCCESS; 457} 458 459/** 460 * dn_route_rx_packet - Try and find a route for an incoming packet 461 * @skb: The packet to find a route for 462 * 463 * Returns: result of input function if route is found, error code otherwise 464 */ 465static int dn_route_rx_packet(struct sk_buff *skb) 466{ 467 struct dn_skb_cb *cb = DN_SKB_CB(skb); 468 int err; 469 470 if ((err = dn_route_input(skb)) == 0) 471 return dst_input(skb); 472 473 if (decnet_debug_level & 4) { 474 char *devname = skb->dev ? skb->dev->name : "???"; 475 struct dn_skb_cb *cb = DN_SKB_CB(skb); 476 printk(KERN_DEBUG 477 "DECnet: dn_route_rx_packet: rt_flags=0x%02x dev=%s len=%d src=0x%04hx dst=0x%04hx err=%d type=%d\n", 478 (int)cb->rt_flags, devname, skb->len, cb->src, cb->dst, 479 err, skb->pkt_type); 480 } 481 482 if ((skb->pkt_type == PACKET_HOST) && (cb->rt_flags & DN_RT_F_RQR)) { 483 switch(cb->rt_flags & DN_RT_PKT_MSK) { 484 case DN_RT_PKT_SHORT: 485 return dn_return_short(skb); 486 case DN_RT_PKT_LONG: 487 return dn_return_long(skb); 488 } 489 } 490 491 kfree_skb(skb); 492 return NET_RX_DROP; 493} 494 495static int dn_route_rx_long(struct sk_buff *skb) 496{ 497 struct dn_skb_cb *cb = DN_SKB_CB(skb); 498 unsigned char *ptr = skb->data; 499 500 if (!pskb_may_pull(skb, 21)) /* 20 for long header, 1 for shortest nsp */ 501 goto drop_it; 502 503 skb_pull(skb, 20); 504 skb->h.raw = skb->data; 505 506 /* Destination info */ 507 ptr += 2; 508 cb->dst = dn_htons(dn_eth2dn(ptr)); 509 if (memcmp(ptr, dn_hiord_addr, 4) != 0) 510 goto drop_it; 511 ptr += 6; 512 513 514 /* Source info */ 515 ptr += 2; 516 cb->src = dn_htons(dn_eth2dn(ptr)); 517 if (memcmp(ptr, dn_hiord_addr, 4) != 0) 518 goto drop_it; 519 ptr += 6; 520 /* Other junk */ 521 ptr++; 522 cb->hops = *ptr++; /* Visit Count */ 523 524 return NF_HOOK(PF_DECnet, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, dn_route_rx_packet); 525 526drop_it: 527 kfree_skb(skb); 528 return NET_RX_DROP; 529} 530 531 532 533static int dn_route_rx_short(struct sk_buff *skb) 534{ 535 struct dn_skb_cb *cb = DN_SKB_CB(skb); 536 unsigned char *ptr = skb->data; 537 538 if (!pskb_may_pull(skb, 6)) /* 5 for short header + 1 for shortest nsp */ 539 goto drop_it; 540 541 skb_pull(skb, 5); 542 skb->h.raw = skb->data; 543 544 cb->dst = *(dn_address *)ptr; 545 ptr += 2; 546 cb->src = *(dn_address *)ptr; 547 ptr += 2; 548 cb->hops = *ptr & 0x3f; 549 550 return NF_HOOK(PF_DECnet, NF_DN_PRE_ROUTING, skb, skb->dev, NULL, dn_route_rx_packet); 551 552drop_it: 553 kfree_skb(skb); 554 return NET_RX_DROP; 555} 556 557static int dn_route_discard(struct sk_buff *skb) 558{ 559 /* 560 * I know we drop the packet here, but thats considered success in 561 * this case 562 */ 563 kfree_skb(skb); 564 return NET_RX_SUCCESS; 565} 566 567static int dn_route_ptp_hello(struct sk_buff *skb) 568{ 569 dn_dev_hello(skb); 570 dn_neigh_pointopoint_hello(skb); 571 return NET_RX_SUCCESS; 572} 573 574int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) 575{ 576 struct dn_skb_cb *cb; 577 unsigned char flags = 0; 578 __u16 len = dn_ntohs(*(__u16 *)skb->data); 579 struct dn_dev *dn = (struct dn_dev *)dev->dn_ptr; 580 unsigned char padlen = 0; 581 582 if (dn == NULL) 583 goto dump_it; 584 585 if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) 586 goto out; 587 588 if (!pskb_may_pull(skb, 3)) 589 goto dump_it; 590 591 skb_pull(skb, 2); 592 593 if (len > skb->len) 594 goto dump_it; 595 596 skb_trim(skb, len); 597 598 flags = *skb->data; 599 600 cb = DN_SKB_CB(skb); 601 cb->stamp = jiffies; 602 cb->iif = dev->ifindex; 603 604 /* 605 * If we have padding, remove it. 606 */ 607 if (flags & DN_RT_F_PF) { 608 padlen = flags & ~DN_RT_F_PF; 609 if (!pskb_may_pull(skb, padlen + 1)) 610 goto dump_it; 611 skb_pull(skb, padlen); 612 flags = *skb->data; 613 } 614 615 skb->nh.raw = skb->data; 616 617 /* 618 * Weed out future version DECnet 619 */ 620 if (flags & DN_RT_F_VER) 621 goto dump_it; 622 623 cb->rt_flags = flags; 624 625 if (decnet_debug_level & 1) 626 printk(KERN_DEBUG 627 "dn_route_rcv: got 0x%02x from %s [%d %d %d]\n", 628 (int)flags, (dev) ? dev->name : "???", len, skb->len, 629 padlen); 630 631 if (flags & DN_RT_PKT_CNTL) { 632 if (unlikely(skb_is_nonlinear(skb)) && 633 skb_linearize(skb, GFP_ATOMIC) != 0) 634 goto dump_it; 635 636 switch(flags & DN_RT_CNTL_MSK) { 637 case DN_RT_PKT_INIT: 638 dn_dev_init_pkt(skb); 639 break; 640 case DN_RT_PKT_VERI: 641 dn_dev_veri_pkt(skb); 642 break; 643 } 644 645 if (dn->parms.state != DN_DEV_S_RU) 646 goto dump_it; 647 648 switch(flags & DN_RT_CNTL_MSK) { 649 case DN_RT_PKT_HELO: 650 return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_route_ptp_hello); 651 652 case DN_RT_PKT_L1RT: 653 case DN_RT_PKT_L2RT: 654 return NF_HOOK(PF_DECnet, NF_DN_ROUTE, skb, skb->dev, NULL, dn_route_discard); 655 case DN_RT_PKT_ERTH: 656 return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_neigh_router_hello); 657 658 case DN_RT_PKT_EEDH: 659 return NF_HOOK(PF_DECnet, NF_DN_HELLO, skb, skb->dev, NULL, dn_neigh_endnode_hello); 660 } 661 } else { 662 if (dn->parms.state != DN_DEV_S_RU) 663 goto dump_it; 664 665 skb_pull(skb, 1); /* Pull flags */ 666 667 switch(flags & DN_RT_PKT_MSK) { 668 case DN_RT_PKT_LONG: 669 return dn_route_rx_long(skb); 670 case DN_RT_PKT_SHORT: 671 return dn_route_rx_short(skb); 672 } 673 } 674 675dump_it: 676 kfree_skb(skb); 677out: 678 return NET_RX_DROP; 679} 680 681static int dn_output(struct sk_buff *skb) 682{ 683 struct dst_entry *dst = skb->dst; 684 struct dn_route *rt = (struct dn_route *)dst; 685 struct net_device *dev = dst->dev; 686 struct dn_skb_cb *cb = DN_SKB_CB(skb); 687 struct neighbour *neigh; 688 689 int err = -EINVAL; 690 691 if ((neigh = dst->neighbour) == NULL) 692 goto error; 693 694 skb->dev = dev; 695 696 cb->src = rt->rt_saddr; 697 cb->dst = rt->rt_daddr; 698 699 /* 700 * Always set the Intra-Ethernet bit on all outgoing packets 701 * originated on this node. Only valid flag from upper layers 702 * is return-to-sender-requested. Set hop count to 0 too. 703 */ 704 cb->rt_flags &= ~DN_RT_F_RQR; 705 cb->rt_flags |= DN_RT_F_IE; 706 cb->hops = 0; 707 708 return NF_HOOK(PF_DECnet, NF_DN_LOCAL_OUT, skb, NULL, dev, neigh->output); 709 710error: 711 if (net_ratelimit()) 712 printk(KERN_DEBUG "dn_output: This should not happen\n"); 713 714 kfree_skb(skb); 715 716 return err; 717} 718 719static int dn_forward(struct sk_buff *skb) 720{ 721 struct dn_skb_cb *cb = DN_SKB_CB(skb); 722 struct dst_entry *dst = skb->dst; 723 struct dn_dev *dn_db = dst->dev->dn_ptr; 724 struct dn_route *rt; 725 struct neighbour *neigh = dst->neighbour; 726 int header_len; 727#ifdef CONFIG_NETFILTER 728 struct net_device *dev = skb->dev; 729#endif 730 731 if (skb->pkt_type != PACKET_HOST) 732 goto drop; 733 734 /* Ensure that we have enough space for headers */ 735 rt = (struct dn_route *)skb->dst; 736 header_len = dn_db->use_long ? 21 : 6; 737 if (skb_cow(skb, LL_RESERVED_SPACE(rt->u.dst.dev)+header_len)) 738 goto drop; 739 740 /* 741 * Hop count exceeded. 742 */ 743 if (++cb->hops > 30) 744 goto drop; 745 746 skb->dev = rt->u.dst.dev; 747 748 /* 749 * If packet goes out same interface it came in on, then set 750 * the Intra-Ethernet bit. This has no effect for short 751 * packets, so we don't need to test for them here. 752 */ 753 cb->rt_flags &= ~DN_RT_F_IE; 754 if (rt->rt_flags & RTCF_DOREDIRECT) 755 cb->rt_flags |= DN_RT_F_IE; 756 757 return NF_HOOK(PF_DECnet, NF_DN_FORWARD, skb, dev, skb->dev, neigh->output); 758 759drop: 760 kfree_skb(skb); 761 return NET_RX_DROP; 762} 763 764/* 765 * Drop packet. This is used for endnodes and for 766 * when we should not be forwarding packets from 767 * this dest. 768 */ 769static int dn_blackhole(struct sk_buff *skb) 770{ 771 kfree_skb(skb); 772 return NET_RX_DROP; 773} 774 775/* 776 * Used to catch bugs. This should never normally get 777 * called. 778 */ 779static int dn_rt_bug(struct sk_buff *skb) 780{ 781 if (net_ratelimit()) { 782 struct dn_skb_cb *cb = DN_SKB_CB(skb); 783 784 printk(KERN_DEBUG "dn_rt_bug: skb from:%04x to:%04x\n", 785 cb->src, cb->dst); 786 } 787 788 kfree_skb(skb); 789 790 return NET_RX_BAD; 791} 792 793static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res) 794{ 795 struct dn_fib_info *fi = res->fi; 796 struct net_device *dev = rt->u.dst.dev; 797 struct neighbour *n; 798 unsigned mss; 799 800 if (fi) { 801 if (DN_FIB_RES_GW(*res) && 802 DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK) 803 rt->rt_gateway = DN_FIB_RES_GW(*res); 804 memcpy(rt->u.dst.metrics, fi->fib_metrics, 805 sizeof(rt->u.dst.metrics)); 806 } 807 rt->rt_type = res->type; 808 809 if (dev != NULL && rt->u.dst.neighbour == NULL) { 810 n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev); 811 if (IS_ERR(n)) 812 return PTR_ERR(n); 813 rt->u.dst.neighbour = n; 814 } 815 816 if (rt->u.dst.metrics[RTAX_MTU-1] == 0 || 817 rt->u.dst.metrics[RTAX_MTU-1] > rt->u.dst.dev->mtu) 818 rt->u.dst.metrics[RTAX_MTU-1] = rt->u.dst.dev->mtu; 819 mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->u.dst)); 820 if (rt->u.dst.metrics[RTAX_ADVMSS-1] == 0 || 821 rt->u.dst.metrics[RTAX_ADVMSS-1] > mss) 822 rt->u.dst.metrics[RTAX_ADVMSS-1] = mss; 823 return 0; 824} 825 826static inline int dn_match_addr(__u16 addr1, __u16 addr2) 827{ 828 __u16 tmp = dn_ntohs(addr1) ^ dn_ntohs(addr2); 829 int match = 16; 830 while(tmp) { 831 tmp >>= 1; 832 match--; 833 } 834 return match; 835} 836 837static __u16 dnet_select_source(const struct net_device *dev, __u16 daddr, int scope) 838{ 839 __u16 saddr = 0; 840 struct dn_dev *dn_db = dev->dn_ptr; 841 struct dn_ifaddr *ifa; 842 int best_match = 0; 843 int ret; 844 845 read_lock(&dev_base_lock); 846 for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next) { 847 if (ifa->ifa_scope > scope) 848 continue; 849 if (!daddr) { 850 saddr = ifa->ifa_local; 851 break; 852 } 853 ret = dn_match_addr(daddr, ifa->ifa_local); 854 if (ret > best_match) 855 saddr = ifa->ifa_local; 856 if (best_match == 0) 857 saddr = ifa->ifa_local; 858 } 859 read_unlock(&dev_base_lock); 860 861 return saddr; 862} 863 864static inline __u16 __dn_fib_res_prefsrc(struct dn_fib_res *res) 865{ 866 return dnet_select_source(DN_FIB_RES_DEV(*res), DN_FIB_RES_GW(*res), res->scope); 867} 868 869static inline __u16 dn_fib_rules_map_destination(__u16 daddr, struct dn_fib_res *res) 870{ 871 __u16 mask = dnet_make_mask(res->prefixlen); 872 return (daddr&~mask)|res->fi->fib_nh->nh_gw; 873} 874 875static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *oldflp, int try_hard) 876{ 877 struct flowi fl = { .nl_u = { .dn_u = 878 { .daddr = oldflp->fld_dst, 879 .saddr = oldflp->fld_src, 880 .scope = RT_SCOPE_UNIVERSE, 881#ifdef CONFIG_DECNET_ROUTE_FWMARK 882 .fwmark = oldflp->fld_fwmark 883#endif 884 } }, 885 .iif = loopback_dev.ifindex, 886 .oif = oldflp->oif }; 887 struct dn_route *rt = NULL; 888 struct net_device *dev_out = NULL; 889 struct neighbour *neigh = NULL; 890 unsigned hash; 891 unsigned flags = 0; 892 struct dn_fib_res res = { .fi = NULL, .type = RTN_UNICAST }; 893 int err; 894 int free_res = 0; 895 __u16 gateway = 0; 896 897 if (decnet_debug_level & 16) 898 printk(KERN_DEBUG 899 "dn_route_output_slow: dst=%04x src=%04x mark=%d" 900 " iif=%d oif=%d\n", oldflp->fld_dst, oldflp->fld_src, 901 oldflp->fld_fwmark, loopback_dev.ifindex, oldflp->oif); 902 903 /* If we have an output interface, verify its a DECnet device */ 904 if (oldflp->oif) { 905 dev_out = dev_get_by_index(oldflp->oif); 906 err = -ENODEV; 907 if (dev_out && dev_out->dn_ptr == NULL) { 908 dev_put(dev_out); 909 dev_out = NULL; 910 } 911 if (dev_out == NULL) 912 goto out; 913 } 914 915 /* If we have a source address, verify that its a local address */ 916 if (oldflp->fld_src) { 917 err = -EADDRNOTAVAIL; 918 919 if (dev_out) { 920 if (dn_dev_islocal(dev_out, oldflp->fld_src)) 921 goto source_ok; 922 dev_put(dev_out); 923 goto out; 924 } 925 read_lock(&dev_base_lock); 926 for(dev_out = dev_base; dev_out; dev_out = dev_out->next) { 927 if (!dev_out->dn_ptr) 928 continue; 929 if (dn_dev_islocal(dev_out, oldflp->fld_src)) 930 break; 931 } 932 read_unlock(&dev_base_lock); 933 if (dev_out == NULL) 934 goto out; 935 dev_hold(dev_out); 936source_ok: 937 ; 938 } 939 940 /* No destination? Assume its local */ 941 if (!fl.fld_dst) { 942 fl.fld_dst = fl.fld_src; 943 944 err = -EADDRNOTAVAIL; 945 if (dev_out) 946 dev_put(dev_out); 947 dev_out = &loopback_dev; 948 dev_hold(dev_out); 949 if (!fl.fld_dst) { 950 fl.fld_dst = 951 fl.fld_src = dnet_select_source(dev_out, 0, 952 RT_SCOPE_HOST); 953 if (!fl.fld_dst) 954 goto out; 955 } 956 fl.oif = loopback_dev.ifindex; 957 res.type = RTN_LOCAL; 958 goto make_route; 959 } 960 961 if (decnet_debug_level & 16) 962 printk(KERN_DEBUG 963 "dn_route_output_slow: initial checks complete." 964 " dst=%o4x src=%04x oif=%d try_hard=%d\n", fl.fld_dst, 965 fl.fld_src, fl.oif, try_hard); 966 967 /* 968 * N.B. If the kernel is compiled without router support then 969 * dn_fib_lookup() will evaluate to non-zero so this if () block 970 * will always be executed. 971 */ 972 err = -ESRCH; 973 if (try_hard || (err = dn_fib_lookup(&fl, &res)) != 0) { 974 struct dn_dev *dn_db; 975 if (err != -ESRCH) 976 goto out; 977 /* 978 * Here the fallback is basically the standard algorithm for 979 * routing in endnodes which is described in the DECnet routing 980 * docs 981 * 982 * If we are not trying hard, look in neighbour cache. 983 * The result is tested to ensure that if a specific output 984 * device/source address was requested, then we honour that 985 * here 986 */ 987 if (!try_hard) { 988 neigh = neigh_lookup_nodev(&dn_neigh_table, &fl.fld_dst); 989 if (neigh) { 990 if ((oldflp->oif && 991 (neigh->dev->ifindex != oldflp->oif)) || 992 (oldflp->fld_src && 993 (!dn_dev_islocal(neigh->dev, 994 oldflp->fld_src)))) { 995 neigh_release(neigh); 996 neigh = NULL; 997 } else { 998 if (dev_out) 999 dev_put(dev_out); 1000 if (dn_dev_islocal(neigh->dev, fl.fld_dst)) { 1001 dev_out = &loopback_dev; 1002 res.type = RTN_LOCAL; 1003 } else { 1004 dev_out = neigh->dev; 1005 } 1006 dev_hold(dev_out); 1007 goto select_source; 1008 } 1009 } 1010 } 1011 1012 /* Not there? Perhaps its a local address */ 1013 if (dev_out == NULL) 1014 dev_out = dn_dev_get_default(); 1015 err = -ENODEV; 1016 if (dev_out == NULL) 1017 goto out; 1018 dn_db = dev_out->dn_ptr; 1019 /* Possible improvement - check all devices for local addr */ 1020 if (dn_dev_islocal(dev_out, fl.fld_dst)) { 1021 dev_put(dev_out); 1022 dev_out = &loopback_dev; 1023 dev_hold(dev_out); 1024 res.type = RTN_LOCAL; 1025 goto select_source; 1026 } 1027 /* Not local either.... try sending it to the default router */ 1028 neigh = neigh_clone(dn_db->router); 1029 BUG_ON(neigh && neigh->dev != dev_out); 1030 1031 /* Ok then, we assume its directly connected and move on */ 1032select_source: 1033 if (neigh) 1034 gateway = ((struct dn_neigh *)neigh)->addr; 1035 if (gateway == 0) 1036 gateway = fl.fld_dst; 1037 if (fl.fld_src == 0) { 1038 fl.fld_src = dnet_select_source(dev_out, gateway, 1039 res.type == RTN_LOCAL ? 1040 RT_SCOPE_HOST : 1041 RT_SCOPE_LINK); 1042 if (fl.fld_src == 0 && res.type != RTN_LOCAL) 1043 goto e_addr; 1044 } 1045 fl.oif = dev_out->ifindex; 1046 goto make_route; 1047 } 1048 free_res = 1; 1049 1050 if (res.type == RTN_NAT) 1051 goto e_inval; 1052 1053 if (res.type == RTN_LOCAL) { 1054 if (!fl.fld_src) 1055 fl.fld_src = fl.fld_dst; 1056 if (dev_out) 1057 dev_put(dev_out); 1058 dev_out = &loopback_dev; 1059 dev_hold(dev_out); 1060 fl.oif = dev_out->ifindex; 1061 if (res.fi) 1062 dn_fib_info_put(res.fi); 1063 res.fi = NULL; 1064 goto make_route; 1065 } 1066 1067 if (res.fi->fib_nhs > 1 && fl.oif == 0) 1068 dn_fib_select_multipath(&fl, &res); 1069 1070 /* 1071 * We could add some logic to deal with default routes here and 1072 * get rid of some of the special casing above. 1073 */ 1074 1075 if (!fl.fld_src) 1076 fl.fld_src = DN_FIB_RES_PREFSRC(res); 1077 1078 if (dev_out) 1079 dev_put(dev_out); 1080 dev_out = DN_FIB_RES_DEV(res); 1081 dev_hold(dev_out); 1082 fl.oif = dev_out->ifindex; 1083 gateway = DN_FIB_RES_GW(res); 1084 1085make_route: 1086 if (dev_out->flags & IFF_LOOPBACK) 1087 flags |= RTCF_LOCAL; 1088 1089 rt = dst_alloc(&dn_dst_ops); 1090 if (rt == NULL) 1091 goto e_nobufs; 1092 1093 atomic_set(&rt->u.dst.__refcnt, 1); 1094 rt->u.dst.flags = DST_HOST; 1095 1096 rt->fl.fld_src = oldflp->fld_src; 1097 rt->fl.fld_dst = oldflp->fld_dst; 1098 rt->fl.oif = oldflp->oif; 1099 rt->fl.iif = 0; 1100#ifdef CONFIG_DECNET_ROUTE_FWMARK 1101 rt->fl.fld_fwmark = oldflp->fld_fwmark; 1102#endif 1103 1104 rt->rt_saddr = fl.fld_src; 1105 rt->rt_daddr = fl.fld_dst; 1106 rt->rt_gateway = gateway ? gateway : fl.fld_dst; 1107 rt->rt_local_src = fl.fld_src; 1108 1109 rt->rt_dst_map = fl.fld_dst; 1110 rt->rt_src_map = fl.fld_src; 1111 1112 rt->u.dst.dev = dev_out; 1113 dev_hold(dev_out); 1114 rt->u.dst.neighbour = neigh; 1115 neigh = NULL; 1116 1117 rt->u.dst.lastuse = jiffies; 1118 rt->u.dst.output = dn_output; 1119 rt->u.dst.input = dn_rt_bug; 1120 rt->rt_flags = flags; 1121 if (flags & RTCF_LOCAL) 1122 rt->u.dst.input = dn_nsp_rx; 1123 1124 err = dn_rt_set_next_hop(rt, &res); 1125 if (err) 1126 goto e_neighbour; 1127 1128 hash = dn_hash(rt->fl.fld_src, rt->fl.fld_dst); 1129 dn_insert_route(rt, hash, (struct dn_route **)pprt); 1130 1131done: 1132 if (neigh) 1133 neigh_release(neigh); 1134 if (free_res) 1135 dn_fib_res_put(&res); 1136 if (dev_out) 1137 dev_put(dev_out); 1138out: 1139 return err; 1140 1141e_addr: 1142 err = -EADDRNOTAVAIL; 1143 goto done; 1144e_inval: 1145 err = -EINVAL; 1146 goto done; 1147e_nobufs: 1148 err = -ENOBUFS; 1149 goto done; 1150e_neighbour: 1151 dst_free(&rt->u.dst); 1152 goto e_nobufs; 1153} 1154 1155 1156/* 1157 * N.B. The flags may be moved into the flowi at some future stage. 1158 */ 1159static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *flp, int flags) 1160{ 1161 unsigned hash = dn_hash(flp->fld_src, flp->fld_dst); 1162 struct dn_route *rt = NULL; 1163 1164 if (!(flags & MSG_TRYHARD)) { 1165 rcu_read_lock_bh(); 1166 for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt; 1167 rt = rcu_dereference(rt->u.rt_next)) { 1168 if ((flp->fld_dst == rt->fl.fld_dst) && 1169 (flp->fld_src == rt->fl.fld_src) && 1170#ifdef CONFIG_DECNET_ROUTE_FWMARK 1171 (flp->fld_fwmark == rt->fl.fld_fwmark) && 1172#endif 1173 (rt->fl.iif == 0) && 1174 (rt->fl.oif == flp->oif)) { 1175 rt->u.dst.lastuse = jiffies; 1176 dst_hold(&rt->u.dst); 1177 rt->u.dst.__use++; 1178 rcu_read_unlock_bh(); 1179 *pprt = &rt->u.dst; 1180 return 0; 1181 } 1182 } 1183 rcu_read_unlock_bh(); 1184 } 1185 1186 return dn_route_output_slow(pprt, flp, flags); 1187} 1188 1189static int dn_route_output_key(struct dst_entry **pprt, struct flowi *flp, int flags) 1190{ 1191 int err; 1192 1193 err = __dn_route_output_key(pprt, flp, flags); 1194 if (err == 0 && flp->proto) { 1195 err = xfrm_lookup(pprt, flp, NULL, 0); 1196 } 1197 return err; 1198} 1199 1200int dn_route_output_sock(struct dst_entry **pprt, struct flowi *fl, struct sock *sk, int flags) 1201{ 1202 int err; 1203 1204 err = __dn_route_output_key(pprt, fl, flags & MSG_TRYHARD); 1205 if (err == 0 && fl->proto) { 1206 err = xfrm_lookup(pprt, fl, sk, !(flags & MSG_DONTWAIT)); 1207 } 1208 return err; 1209} 1210 1211static int dn_route_input_slow(struct sk_buff *skb) 1212{ 1213 struct dn_route *rt = NULL; 1214 struct dn_skb_cb *cb = DN_SKB_CB(skb); 1215 struct net_device *in_dev = skb->dev; 1216 struct net_device *out_dev = NULL; 1217 struct dn_dev *dn_db; 1218 struct neighbour *neigh = NULL; 1219 unsigned hash; 1220 int flags = 0; 1221 __u16 gateway = 0; 1222 __u16 local_src = 0; 1223 struct flowi fl = { .nl_u = { .dn_u = 1224 { .daddr = cb->dst, 1225 .saddr = cb->src, 1226 .scope = RT_SCOPE_UNIVERSE, 1227#ifdef CONFIG_DECNET_ROUTE_FWMARK 1228 .fwmark = skb->nfmark 1229#endif 1230 } }, 1231 .iif = skb->dev->ifindex }; 1232 struct dn_fib_res res = { .fi = NULL, .type = RTN_UNREACHABLE }; 1233 int err = -EINVAL; 1234 int free_res = 0; 1235 1236 dev_hold(in_dev); 1237 1238 if ((dn_db = in_dev->dn_ptr) == NULL) 1239 goto out; 1240 1241 /* Zero source addresses are not allowed */ 1242 if (fl.fld_src == 0) 1243 goto out; 1244 1245 /* 1246 * In this case we've just received a packet from a source 1247 * outside ourselves pretending to come from us. We don't 1248 * allow it any further to prevent routing loops, spoofing and 1249 * other nasties. Loopback packets already have the dst attached 1250 * so this only affects packets which have originated elsewhere. 1251 */ 1252 err = -ENOTUNIQ; 1253 if (dn_dev_islocal(in_dev, cb->src)) 1254 goto out; 1255 1256 err = dn_fib_lookup(&fl, &res); 1257 if (err) { 1258 if (err != -ESRCH) 1259 goto out; 1260 /* 1261 * Is the destination us ? 1262 */ 1263 if (!dn_dev_islocal(in_dev, cb->dst)) 1264 goto e_inval; 1265 1266 res.type = RTN_LOCAL; 1267 flags |= RTCF_DIRECTSRC; 1268 } else { 1269 __u16 src_map = fl.fld_src; 1270 free_res = 1; 1271 1272 out_dev = DN_FIB_RES_DEV(res); 1273 if (out_dev == NULL) { 1274 if (net_ratelimit()) 1275 printk(KERN_CRIT "Bug in dn_route_input_slow() " 1276 "No output device\n"); 1277 goto e_inval; 1278 } 1279 dev_hold(out_dev); 1280 1281 if (res.r) 1282 src_map = dn_fib_rules_policy(fl.fld_src, &res, &flags); 1283 1284 gateway = DN_FIB_RES_GW(res); 1285 if (res.type == RTN_NAT) { 1286 fl.fld_dst = dn_fib_rules_map_destination(fl.fld_dst, &res); 1287 dn_fib_res_put(&res); 1288 free_res = 0; 1289 if (dn_fib_lookup(&fl, &res)) 1290 goto e_inval; 1291 free_res = 1; 1292 if (res.type != RTN_UNICAST) 1293 goto e_inval; 1294 flags |= RTCF_DNAT; 1295 gateway = fl.fld_dst; 1296 } 1297 fl.fld_src = src_map; 1298 } 1299 1300 switch(res.type) { 1301 case RTN_UNICAST: 1302 /* 1303 * Forwarding check here, we only check for forwarding 1304 * being turned off, if you want to only forward intra 1305 * area, its up to you to set the routing tables up 1306 * correctly. 1307 */ 1308 if (dn_db->parms.forwarding == 0) 1309 goto e_inval; 1310 1311 if (res.fi->fib_nhs > 1 && fl.oif == 0) 1312 dn_fib_select_multipath(&fl, &res); 1313 1314 /* 1315 * Check for out_dev == in_dev. We use the RTCF_DOREDIRECT 1316 * flag as a hint to set the intra-ethernet bit when 1317 * forwarding. If we've got NAT in operation, we don't do 1318 * this optimisation. 1319 */ 1320 if (out_dev == in_dev && !(flags & RTCF_NAT)) 1321 flags |= RTCF_DOREDIRECT; 1322 1323 local_src = DN_FIB_RES_PREFSRC(res); 1324 1325 case RTN_BLACKHOLE: 1326 case RTN_UNREACHABLE: 1327 break; 1328 case RTN_LOCAL: 1329 flags |= RTCF_LOCAL; 1330 fl.fld_src = cb->dst; 1331 fl.fld_dst = cb->src; 1332 1333 /* Routing tables gave us a gateway */ 1334 if (gateway) 1335 goto make_route; 1336 1337 /* Packet was intra-ethernet, so we know its on-link */ 1338 if (cb->rt_flags | DN_RT_F_IE) { 1339 gateway = cb->src; 1340 flags |= RTCF_DIRECTSRC; 1341 goto make_route; 1342 } 1343 1344 /* Use the default router if there is one */ 1345 neigh = neigh_clone(dn_db->router); 1346 if (neigh) { 1347 gateway = ((struct dn_neigh *)neigh)->addr; 1348 goto make_route; 1349 } 1350 1351 /* Close eyes and pray */ 1352 gateway = cb->src; 1353 flags |= RTCF_DIRECTSRC; 1354 goto make_route; 1355 default: 1356 goto e_inval; 1357 } 1358 1359make_route: 1360 rt = dst_alloc(&dn_dst_ops); 1361 if (rt == NULL) 1362 goto e_nobufs; 1363 1364 rt->rt_saddr = fl.fld_src; 1365 rt->rt_daddr = fl.fld_dst; 1366 rt->rt_gateway = fl.fld_dst; 1367 if (gateway) 1368 rt->rt_gateway = gateway; 1369 rt->rt_local_src = local_src ? local_src : rt->rt_saddr; 1370 1371 rt->rt_dst_map = fl.fld_dst; 1372 rt->rt_src_map = fl.fld_src; 1373 1374 rt->fl.fld_src = cb->src; 1375 rt->fl.fld_dst = cb->dst; 1376 rt->fl.oif = 0; 1377 rt->fl.iif = in_dev->ifindex; 1378 rt->fl.fld_fwmark = fl.fld_fwmark; 1379 1380 rt->u.dst.flags = DST_HOST; 1381 rt->u.dst.neighbour = neigh; 1382 rt->u.dst.dev = out_dev; 1383 rt->u.dst.lastuse = jiffies; 1384 rt->u.dst.output = dn_rt_bug; 1385 switch(res.type) { 1386 case RTN_UNICAST: 1387 rt->u.dst.input = dn_forward; 1388 break; 1389 case RTN_LOCAL: 1390 rt->u.dst.output = dn_output; 1391 rt->u.dst.input = dn_nsp_rx; 1392 rt->u.dst.dev = in_dev; 1393 flags |= RTCF_LOCAL; 1394 break; 1395 default: 1396 case RTN_UNREACHABLE: 1397 case RTN_BLACKHOLE: 1398 rt->u.dst.input = dn_blackhole; 1399 } 1400 rt->rt_flags = flags; 1401 if (rt->u.dst.dev) 1402 dev_hold(rt->u.dst.dev); 1403 1404 err = dn_rt_set_next_hop(rt, &res); 1405 if (err) 1406 goto e_neighbour; 1407 1408 hash = dn_hash(rt->fl.fld_src, rt->fl.fld_dst); 1409 dn_insert_route(rt, hash, (struct dn_route **)&skb->dst); 1410 1411done: 1412 if (neigh) 1413 neigh_release(neigh); 1414 if (free_res) 1415 dn_fib_res_put(&res); 1416 dev_put(in_dev); 1417 if (out_dev) 1418 dev_put(out_dev); 1419out: 1420 return err; 1421 1422e_inval: 1423 err = -EINVAL; 1424 goto done; 1425 1426e_nobufs: 1427 err = -ENOBUFS; 1428 goto done; 1429 1430e_neighbour: 1431 dst_free(&rt->u.dst); 1432 goto done; 1433} 1434 1435int dn_route_input(struct sk_buff *skb) 1436{ 1437 struct dn_route *rt; 1438 struct dn_skb_cb *cb = DN_SKB_CB(skb); 1439 unsigned hash = dn_hash(cb->src, cb->dst); 1440 1441 if (skb->dst) 1442 return 0; 1443 1444 rcu_read_lock(); 1445 for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL; 1446 rt = rcu_dereference(rt->u.rt_next)) { 1447 if ((rt->fl.fld_src == cb->src) && 1448 (rt->fl.fld_dst == cb->dst) && 1449 (rt->fl.oif == 0) && 1450#ifdef CONFIG_DECNET_ROUTE_FWMARK 1451 (rt->fl.fld_fwmark == skb->nfmark) && 1452#endif 1453 (rt->fl.iif == cb->iif)) { 1454 rt->u.dst.lastuse = jiffies; 1455 dst_hold(&rt->u.dst); 1456 rt->u.dst.__use++; 1457 rcu_read_unlock(); 1458 skb->dst = (struct dst_entry *)rt; 1459 return 0; 1460 } 1461 } 1462 rcu_read_unlock(); 1463 1464 return dn_route_input_slow(skb); 1465} 1466 1467static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq, 1468 int event, int nowait, unsigned int flags) 1469{ 1470 struct dn_route *rt = (struct dn_route *)skb->dst; 1471 struct rtmsg *r; 1472 struct nlmsghdr *nlh; 1473 unsigned char *b = skb->tail; 1474 struct rta_cacheinfo ci; 1475 1476 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags); 1477 r = NLMSG_DATA(nlh); 1478 r->rtm_family = AF_DECnet; 1479 r->rtm_dst_len = 16; 1480 r->rtm_src_len = 0; 1481 r->rtm_tos = 0; 1482 r->rtm_table = RT_TABLE_MAIN; 1483 r->rtm_type = rt->rt_type; 1484 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED; 1485 r->rtm_scope = RT_SCOPE_UNIVERSE; 1486 r->rtm_protocol = RTPROT_UNSPEC; 1487 if (rt->rt_flags & RTCF_NOTIFY) 1488 r->rtm_flags |= RTM_F_NOTIFY; 1489 RTA_PUT(skb, RTA_DST, 2, &rt->rt_daddr); 1490 if (rt->fl.fld_src) { 1491 r->rtm_src_len = 16; 1492 RTA_PUT(skb, RTA_SRC, 2, &rt->fl.fld_src); 1493 } 1494 if (rt->u.dst.dev) 1495 RTA_PUT(skb, RTA_OIF, sizeof(int), &rt->u.dst.dev->ifindex); 1496 /* 1497 * Note to self - change this if input routes reverse direction when 1498 * they deal only with inputs and not with replies like they do 1499 * currently. 1500 */ 1501 RTA_PUT(skb, RTA_PREFSRC, 2, &rt->rt_local_src); 1502 if (rt->rt_daddr != rt->rt_gateway) 1503 RTA_PUT(skb, RTA_GATEWAY, 2, &rt->rt_gateway); 1504 if (rtnetlink_put_metrics(skb, rt->u.dst.metrics) < 0) 1505 goto rtattr_failure; 1506 ci.rta_lastuse = jiffies_to_clock_t(jiffies - rt->u.dst.lastuse); 1507 ci.rta_used = rt->u.dst.__use; 1508 ci.rta_clntref = atomic_read(&rt->u.dst.__refcnt); 1509 if (rt->u.dst.expires) 1510 ci.rta_expires = jiffies_to_clock_t(rt->u.dst.expires - jiffies); 1511 else 1512 ci.rta_expires = 0; 1513 ci.rta_error = rt->u.dst.error; 1514 ci.rta_id = ci.rta_ts = ci.rta_tsage = 0; 1515 RTA_PUT(skb, RTA_CACHEINFO, sizeof(ci), &ci); 1516 if (rt->fl.iif) 1517 RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif); 1518 1519 nlh->nlmsg_len = skb->tail - b; 1520 return skb->len; 1521 1522nlmsg_failure: 1523rtattr_failure: 1524 skb_trim(skb, b - skb->data); 1525 return -1; 1526} 1527 1528/* 1529 * This is called by both endnodes and routers now. 1530 */ 1531int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg) 1532{ 1533 struct rtattr **rta = arg; 1534 struct rtmsg *rtm = NLMSG_DATA(nlh); 1535 struct dn_route *rt = NULL; 1536 struct dn_skb_cb *cb; 1537 int err; 1538 struct sk_buff *skb; 1539 struct flowi fl; 1540 1541 memset(&fl, 0, sizeof(fl)); 1542 fl.proto = DNPROTO_NSP; 1543 1544 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 1545 if (skb == NULL) 1546 return -ENOBUFS; 1547 skb->mac.raw = skb->data; 1548 cb = DN_SKB_CB(skb); 1549 1550 if (rta[RTA_SRC-1]) 1551 memcpy(&fl.fld_src, RTA_DATA(rta[RTA_SRC-1]), 2); 1552 if (rta[RTA_DST-1]) 1553 memcpy(&fl.fld_dst, RTA_DATA(rta[RTA_DST-1]), 2); 1554 if (rta[RTA_IIF-1]) 1555 memcpy(&fl.iif, RTA_DATA(rta[RTA_IIF-1]), sizeof(int)); 1556 1557 if (fl.iif) { 1558 struct net_device *dev; 1559 if ((dev = dev_get_by_index(fl.iif)) == NULL) { 1560 kfree_skb(skb); 1561 return -ENODEV; 1562 } 1563 if (!dev->dn_ptr) { 1564 dev_put(dev); 1565 kfree_skb(skb); 1566 return -ENODEV; 1567 } 1568 skb->protocol = __constant_htons(ETH_P_DNA_RT); 1569 skb->dev = dev; 1570 cb->src = fl.fld_src; 1571 cb->dst = fl.fld_dst; 1572 local_bh_disable(); 1573 err = dn_route_input(skb); 1574 local_bh_enable(); 1575 memset(cb, 0, sizeof(struct dn_skb_cb)); 1576 rt = (struct dn_route *)skb->dst; 1577 if (!err && -rt->u.dst.error) 1578 err = rt->u.dst.error; 1579 } else { 1580 int oif = 0; 1581 if (rta[RTA_OIF - 1]) 1582 memcpy(&oif, RTA_DATA(rta[RTA_OIF - 1]), sizeof(int)); 1583 fl.oif = oif; 1584 err = dn_route_output_key((struct dst_entry **)&rt, &fl, 0); 1585 } 1586 1587 if (skb->dev) 1588 dev_put(skb->dev); 1589 skb->dev = NULL; 1590 if (err) 1591 goto out_free; 1592 skb->dst = &rt->u.dst; 1593 if (rtm->rtm_flags & RTM_F_NOTIFY) 1594 rt->rt_flags |= RTCF_NOTIFY; 1595 1596 NETLINK_CB(skb).dst_pid = NETLINK_CB(in_skb).pid; 1597 1598 err = dn_rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, RTM_NEWROUTE, 0, 0); 1599 1600 if (err == 0) 1601 goto out_free; 1602 if (err < 0) { 1603 err = -EMSGSIZE; 1604 goto out_free; 1605 } 1606 1607 err = netlink_unicast(rtnl, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT); 1608 1609 return err; 1610 1611out_free: 1612 kfree_skb(skb); 1613 return err; 1614} 1615 1616/* 1617 * For routers, this is called from dn_fib_dump, but for endnodes its 1618 * called directly from the rtnetlink dispatch table. 1619 */ 1620int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb) 1621{ 1622 struct dn_route *rt; 1623 int h, s_h; 1624 int idx, s_idx; 1625 1626 if (NLMSG_PAYLOAD(cb->nlh, 0) < sizeof(struct rtmsg)) 1627 return -EINVAL; 1628 if (!(((struct rtmsg *)NLMSG_DATA(cb->nlh))->rtm_flags&RTM_F_CLONED)) 1629 return 0; 1630 1631 s_h = cb->args[0]; 1632 s_idx = idx = cb->args[1]; 1633 for(h = 0; h <= dn_rt_hash_mask; h++) { 1634 if (h < s_h) 1635 continue; 1636 if (h > s_h) 1637 s_idx = 0; 1638 rcu_read_lock_bh(); 1639 for(rt = rcu_dereference(dn_rt_hash_table[h].chain), idx = 0; 1640 rt; 1641 rt = rcu_dereference(rt->u.rt_next), idx++) { 1642 if (idx < s_idx) 1643 continue; 1644 skb->dst = dst_clone(&rt->u.dst); 1645 if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).pid, 1646 cb->nlh->nlmsg_seq, RTM_NEWROUTE, 1647 1, NLM_F_MULTI) <= 0) { 1648 dst_release(xchg(&skb->dst, NULL)); 1649 rcu_read_unlock_bh(); 1650 goto done; 1651 } 1652 dst_release(xchg(&skb->dst, NULL)); 1653 } 1654 rcu_read_unlock_bh(); 1655 } 1656 1657done: 1658 cb->args[0] = h; 1659 cb->args[1] = idx; 1660 return skb->len; 1661} 1662 1663#ifdef CONFIG_PROC_FS 1664struct dn_rt_cache_iter_state { 1665 int bucket; 1666}; 1667 1668static struct dn_route *dn_rt_cache_get_first(struct seq_file *seq) 1669{ 1670 struct dn_route *rt = NULL; 1671 struct dn_rt_cache_iter_state *s = seq->private; 1672 1673 for(s->bucket = dn_rt_hash_mask; s->bucket >= 0; --s->bucket) { 1674 rcu_read_lock_bh(); 1675 rt = dn_rt_hash_table[s->bucket].chain; 1676 if (rt) 1677 break; 1678 rcu_read_unlock_bh(); 1679 } 1680 return rt; 1681} 1682 1683static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_route *rt) 1684{ 1685 struct dn_rt_cache_iter_state *s = rcu_dereference(seq->private); 1686 1687 rt = rt->u.rt_next; 1688 while(!rt) { 1689 rcu_read_unlock_bh(); 1690 if (--s->bucket < 0) 1691 break; 1692 rcu_read_lock_bh(); 1693 rt = dn_rt_hash_table[s->bucket].chain; 1694 } 1695 return rt; 1696} 1697 1698static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos) 1699{ 1700 struct dn_route *rt = dn_rt_cache_get_first(seq); 1701 1702 if (rt) { 1703 while(*pos && (rt = dn_rt_cache_get_next(seq, rt))) 1704 --*pos; 1705 } 1706 return *pos ? NULL : rt; 1707} 1708 1709static void *dn_rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos) 1710{ 1711 struct dn_route *rt = dn_rt_cache_get_next(seq, v); 1712 ++*pos; 1713 return rt; 1714} 1715 1716static void dn_rt_cache_seq_stop(struct seq_file *seq, void *v) 1717{ 1718 if (v) 1719 rcu_read_unlock_bh(); 1720} 1721 1722static int dn_rt_cache_seq_show(struct seq_file *seq, void *v) 1723{ 1724 struct dn_route *rt = v; 1725 char buf1[DN_ASCBUF_LEN], buf2[DN_ASCBUF_LEN]; 1726 1727 seq_printf(seq, "%-8s %-7s %-7s %04d %04d %04d\n", 1728 rt->u.dst.dev ? rt->u.dst.dev->name : "*", 1729 dn_addr2asc(dn_ntohs(rt->rt_daddr), buf1), 1730 dn_addr2asc(dn_ntohs(rt->rt_saddr), buf2), 1731 atomic_read(&rt->u.dst.__refcnt), 1732 rt->u.dst.__use, 1733 (int) dst_metric(&rt->u.dst, RTAX_RTT)); 1734 return 0; 1735} 1736 1737static struct seq_operations dn_rt_cache_seq_ops = { 1738 .start = dn_rt_cache_seq_start, 1739 .next = dn_rt_cache_seq_next, 1740 .stop = dn_rt_cache_seq_stop, 1741 .show = dn_rt_cache_seq_show, 1742}; 1743 1744static int dn_rt_cache_seq_open(struct inode *inode, struct file *file) 1745{ 1746 struct seq_file *seq; 1747 int rc = -ENOMEM; 1748 struct dn_rt_cache_iter_state *s = kmalloc(sizeof(*s), GFP_KERNEL); 1749 1750 if (!s) 1751 goto out; 1752 rc = seq_open(file, &dn_rt_cache_seq_ops); 1753 if (rc) 1754 goto out_kfree; 1755 seq = file->private_data; 1756 seq->private = s; 1757 memset(s, 0, sizeof(*s)); 1758out: 1759 return rc; 1760out_kfree: 1761 kfree(s); 1762 goto out; 1763} 1764 1765static struct file_operations dn_rt_cache_seq_fops = { 1766 .owner = THIS_MODULE, 1767 .open = dn_rt_cache_seq_open, 1768 .read = seq_read, 1769 .llseek = seq_lseek, 1770 .release = seq_release_private, 1771}; 1772 1773#endif /* CONFIG_PROC_FS */ 1774 1775void __init dn_route_init(void) 1776{ 1777 int i, goal, order; 1778 1779 dn_dst_ops.kmem_cachep = kmem_cache_create("dn_dst_cache", 1780 sizeof(struct dn_route), 1781 0, SLAB_HWCACHE_ALIGN, 1782 NULL, NULL); 1783 1784 if (!dn_dst_ops.kmem_cachep) 1785 panic("DECnet: Failed to allocate dn_dst_cache\n"); 1786 1787 init_timer(&dn_route_timer); 1788 dn_route_timer.function = dn_dst_check_expire; 1789 dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ; 1790 add_timer(&dn_route_timer); 1791 1792 goal = num_physpages >> (26 - PAGE_SHIFT); 1793 1794 for(order = 0; (1UL << order) < goal; order++) 1795 /* NOTHING */; 1796 1797 /* 1798 * Only want 1024 entries max, since the table is very, very unlikely 1799 * to be larger than that. 1800 */ 1801 while(order && ((((1UL << order) * PAGE_SIZE) / 1802 sizeof(struct dn_rt_hash_bucket)) >= 2048)) 1803 order--; 1804 1805 do { 1806 dn_rt_hash_mask = (1UL << order) * PAGE_SIZE / 1807 sizeof(struct dn_rt_hash_bucket); 1808 while(dn_rt_hash_mask & (dn_rt_hash_mask - 1)) 1809 dn_rt_hash_mask--; 1810 dn_rt_hash_table = (struct dn_rt_hash_bucket *) 1811 __get_free_pages(GFP_ATOMIC, order); 1812 } while (dn_rt_hash_table == NULL && --order > 0); 1813 1814 if (!dn_rt_hash_table) 1815 panic("Failed to allocate DECnet route cache hash table\n"); 1816 1817 printk(KERN_INFO 1818 "DECnet: Routing cache hash table of %u buckets, %ldKbytes\n", 1819 dn_rt_hash_mask, 1820 (long)(dn_rt_hash_mask*sizeof(struct dn_rt_hash_bucket))/1024); 1821 1822 dn_rt_hash_mask--; 1823 for(i = 0; i <= dn_rt_hash_mask; i++) { 1824 spin_lock_init(&dn_rt_hash_table[i].lock); 1825 dn_rt_hash_table[i].chain = NULL; 1826 } 1827 1828 dn_dst_ops.gc_thresh = (dn_rt_hash_mask + 1); 1829 1830 proc_net_fops_create("decnet_cache", S_IRUGO, &dn_rt_cache_seq_fops); 1831} 1832 1833void __exit dn_route_cleanup(void) 1834{ 1835 del_timer(&dn_route_timer); 1836 dn_run_flush(0); 1837 1838 proc_net_remove("decnet_cache"); 1839} 1840