at v2.6.18-rc6 817 lines 20 kB view raw
1/* 2 * DECnet An implementation of the DECnet protocol suite for the LINUX 3 * operating system. DECnet is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 5 * 6 * DECnet Routing Forwarding Information Base (Routing Tables) 7 * 8 * Author: Steve Whitehouse <SteveW@ACM.org> 9 * Mostly copied from the IPv4 routing code 10 * 11 * 12 * Changes: 13 * 14 */ 15#include <linux/string.h> 16#include <linux/net.h> 17#include <linux/socket.h> 18#include <linux/sockios.h> 19#include <linux/init.h> 20#include <linux/skbuff.h> 21#include <linux/netlink.h> 22#include <linux/rtnetlink.h> 23#include <linux/proc_fs.h> 24#include <linux/netdevice.h> 25#include <linux/timer.h> 26#include <linux/spinlock.h> 27#include <asm/atomic.h> 28#include <asm/uaccess.h> 29#include <linux/route.h> /* RTF_xxx */ 30#include <net/neighbour.h> 31#include <net/dst.h> 32#include <net/flow.h> 33#include <net/dn.h> 34#include <net/dn_route.h> 35#include <net/dn_fib.h> 36#include <net/dn_neigh.h> 37#include <net/dn_dev.h> 38 39struct dn_zone 40{ 41 struct dn_zone *dz_next; 42 struct dn_fib_node **dz_hash; 43 int dz_nent; 44 int dz_divisor; 45 u32 dz_hashmask; 46#define DZ_HASHMASK(dz) ((dz)->dz_hashmask) 47 int dz_order; 48 __le16 dz_mask; 49#define DZ_MASK(dz) ((dz)->dz_mask) 50}; 51 52struct dn_hash 53{ 54 struct dn_zone *dh_zones[17]; 55 struct dn_zone *dh_zone_list; 56}; 57 58#define dz_key_0(key) ((key).datum = 0) 59#define dz_prefix(key,dz) ((key).datum) 60 61#define for_nexthops(fi) { int nhsel; const struct dn_fib_nh *nh;\ 62 for(nhsel = 0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++) 63 64#define endfor_nexthops(fi) } 65 66#define DN_MAX_DIVISOR 1024 67#define DN_S_ZOMBIE 1 68#define DN_S_ACCESSED 2 69 70#define DN_FIB_SCAN(f, fp) \ 71for( ; ((f) = *(fp)) != NULL; (fp) = &(f)->fn_next) 72 73#define DN_FIB_SCAN_KEY(f, fp, key) \ 74for( ; ((f) = *(fp)) != NULL && dn_key_eq((f)->fn_key, (key)); (fp) = &(f)->fn_next) 75 76#define RT_TABLE_MIN 1 77 78static DEFINE_RWLOCK(dn_fib_tables_lock); 79struct dn_fib_table *dn_fib_tables[RT_TABLE_MAX + 1]; 80 81static kmem_cache_t *dn_hash_kmem __read_mostly; 82static int dn_fib_hash_zombies; 83 84static inline dn_fib_idx_t dn_hash(dn_fib_key_t key, struct dn_zone *dz) 85{ 86 u16 h = dn_ntohs(key.datum)>>(16 - dz->dz_order); 87 h ^= (h >> 10); 88 h ^= (h >> 6); 89 h &= DZ_HASHMASK(dz); 90 return *(dn_fib_idx_t *)&h; 91} 92 93static inline dn_fib_key_t dz_key(__le16 dst, struct dn_zone *dz) 94{ 95 dn_fib_key_t k; 96 k.datum = dst & DZ_MASK(dz); 97 return k; 98} 99 100static inline struct dn_fib_node **dn_chain_p(dn_fib_key_t key, struct dn_zone *dz) 101{ 102 return &dz->dz_hash[dn_hash(key, dz).datum]; 103} 104 105static inline struct dn_fib_node *dz_chain(dn_fib_key_t key, struct dn_zone *dz) 106{ 107 return dz->dz_hash[dn_hash(key, dz).datum]; 108} 109 110static inline int dn_key_eq(dn_fib_key_t a, dn_fib_key_t b) 111{ 112 return a.datum == b.datum; 113} 114 115static inline int dn_key_leq(dn_fib_key_t a, dn_fib_key_t b) 116{ 117 return a.datum <= b.datum; 118} 119 120static inline void dn_rebuild_zone(struct dn_zone *dz, 121 struct dn_fib_node **old_ht, 122 int old_divisor) 123{ 124 int i; 125 struct dn_fib_node *f, **fp, *next; 126 127 for(i = 0; i < old_divisor; i++) { 128 for(f = old_ht[i]; f; f = f->fn_next) { 129 next = f->fn_next; 130 for(fp = dn_chain_p(f->fn_key, dz); 131 *fp && dn_key_leq((*fp)->fn_key, f->fn_key); 132 fp = &(*fp)->fn_next) 133 /* NOTHING */; 134 f->fn_next = *fp; 135 *fp = f; 136 } 137 } 138} 139 140static void dn_rehash_zone(struct dn_zone *dz) 141{ 142 struct dn_fib_node **ht, **old_ht; 143 int old_divisor, new_divisor; 144 u32 new_hashmask; 145 146 old_divisor = dz->dz_divisor; 147 148 switch(old_divisor) { 149 case 16: 150 new_divisor = 256; 151 new_hashmask = 0xFF; 152 break; 153 default: 154 printk(KERN_DEBUG "DECnet: dn_rehash_zone: BUG! %d\n", old_divisor); 155 case 256: 156 new_divisor = 1024; 157 new_hashmask = 0x3FF; 158 break; 159 } 160 161 ht = kcalloc(new_divisor, sizeof(struct dn_fib_node*), GFP_KERNEL); 162 if (ht == NULL) 163 return; 164 165 write_lock_bh(&dn_fib_tables_lock); 166 old_ht = dz->dz_hash; 167 dz->dz_hash = ht; 168 dz->dz_hashmask = new_hashmask; 169 dz->dz_divisor = new_divisor; 170 dn_rebuild_zone(dz, old_ht, old_divisor); 171 write_unlock_bh(&dn_fib_tables_lock); 172 kfree(old_ht); 173} 174 175static void dn_free_node(struct dn_fib_node *f) 176{ 177 dn_fib_release_info(DN_FIB_INFO(f)); 178 kmem_cache_free(dn_hash_kmem, f); 179} 180 181 182static struct dn_zone *dn_new_zone(struct dn_hash *table, int z) 183{ 184 int i; 185 struct dn_zone *dz = kzalloc(sizeof(struct dn_zone), GFP_KERNEL); 186 if (!dz) 187 return NULL; 188 189 if (z) { 190 dz->dz_divisor = 16; 191 dz->dz_hashmask = 0x0F; 192 } else { 193 dz->dz_divisor = 1; 194 dz->dz_hashmask = 0; 195 } 196 197 dz->dz_hash = kcalloc(dz->dz_divisor, sizeof(struct dn_fib_node *), GFP_KERNEL); 198 if (!dz->dz_hash) { 199 kfree(dz); 200 return NULL; 201 } 202 203 dz->dz_order = z; 204 dz->dz_mask = dnet_make_mask(z); 205 206 for(i = z + 1; i <= 16; i++) 207 if (table->dh_zones[i]) 208 break; 209 210 write_lock_bh(&dn_fib_tables_lock); 211 if (i>16) { 212 dz->dz_next = table->dh_zone_list; 213 table->dh_zone_list = dz; 214 } else { 215 dz->dz_next = table->dh_zones[i]->dz_next; 216 table->dh_zones[i]->dz_next = dz; 217 } 218 table->dh_zones[z] = dz; 219 write_unlock_bh(&dn_fib_tables_lock); 220 return dz; 221} 222 223 224static int dn_fib_nh_match(struct rtmsg *r, struct nlmsghdr *nlh, struct dn_kern_rta *rta, struct dn_fib_info *fi) 225{ 226 struct rtnexthop *nhp; 227 int nhlen; 228 229 if (rta->rta_priority && *rta->rta_priority != fi->fib_priority) 230 return 1; 231 232 if (rta->rta_oif || rta->rta_gw) { 233 if ((!rta->rta_oif || *rta->rta_oif == fi->fib_nh->nh_oif) && 234 (!rta->rta_gw || memcmp(rta->rta_gw, &fi->fib_nh->nh_gw, 2) == 0)) 235 return 0; 236 return 1; 237 } 238 239 if (rta->rta_mp == NULL) 240 return 0; 241 242 nhp = RTA_DATA(rta->rta_mp); 243 nhlen = RTA_PAYLOAD(rta->rta_mp); 244 245 for_nexthops(fi) { 246 int attrlen = nhlen - sizeof(struct rtnexthop); 247 __le16 gw; 248 249 if (attrlen < 0 || (nhlen -= nhp->rtnh_len) < 0) 250 return -EINVAL; 251 if (nhp->rtnh_ifindex && nhp->rtnh_ifindex != nh->nh_oif) 252 return 1; 253 if (attrlen) { 254 gw = dn_fib_get_attr16(RTNH_DATA(nhp), attrlen, RTA_GATEWAY); 255 256 if (gw && gw != nh->nh_gw) 257 return 1; 258 } 259 nhp = RTNH_NEXT(nhp); 260 } endfor_nexthops(fi); 261 262 return 0; 263} 264 265static int dn_fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event, 266 u8 tb_id, u8 type, u8 scope, void *dst, int dst_len, 267 struct dn_fib_info *fi, unsigned int flags) 268{ 269 struct rtmsg *rtm; 270 struct nlmsghdr *nlh; 271 unsigned char *b = skb->tail; 272 273 nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*rtm), flags); 274 rtm = NLMSG_DATA(nlh); 275 rtm->rtm_family = AF_DECnet; 276 rtm->rtm_dst_len = dst_len; 277 rtm->rtm_src_len = 0; 278 rtm->rtm_tos = 0; 279 rtm->rtm_table = tb_id; 280 rtm->rtm_flags = fi->fib_flags; 281 rtm->rtm_scope = scope; 282 rtm->rtm_type = type; 283 if (rtm->rtm_dst_len) 284 RTA_PUT(skb, RTA_DST, 2, dst); 285 rtm->rtm_protocol = fi->fib_protocol; 286 if (fi->fib_priority) 287 RTA_PUT(skb, RTA_PRIORITY, 4, &fi->fib_priority); 288 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0) 289 goto rtattr_failure; 290 if (fi->fib_nhs == 1) { 291 if (fi->fib_nh->nh_gw) 292 RTA_PUT(skb, RTA_GATEWAY, 2, &fi->fib_nh->nh_gw); 293 if (fi->fib_nh->nh_oif) 294 RTA_PUT(skb, RTA_OIF, sizeof(int), &fi->fib_nh->nh_oif); 295 } 296 if (fi->fib_nhs > 1) { 297 struct rtnexthop *nhp; 298 struct rtattr *mp_head; 299 if (skb_tailroom(skb) <= RTA_SPACE(0)) 300 goto rtattr_failure; 301 mp_head = (struct rtattr *)skb_put(skb, RTA_SPACE(0)); 302 303 for_nexthops(fi) { 304 if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4)) 305 goto rtattr_failure; 306 nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp))); 307 nhp->rtnh_flags = nh->nh_flags & 0xFF; 308 nhp->rtnh_hops = nh->nh_weight - 1; 309 nhp->rtnh_ifindex = nh->nh_oif; 310 if (nh->nh_gw) 311 RTA_PUT(skb, RTA_GATEWAY, 2, &nh->nh_gw); 312 nhp->rtnh_len = skb->tail - (unsigned char *)nhp; 313 } endfor_nexthops(fi); 314 mp_head->rta_type = RTA_MULTIPATH; 315 mp_head->rta_len = skb->tail - (u8*)mp_head; 316 } 317 318 nlh->nlmsg_len = skb->tail - b; 319 return skb->len; 320 321 322nlmsg_failure: 323rtattr_failure: 324 skb_trim(skb, b - skb->data); 325 return -1; 326} 327 328 329static void dn_rtmsg_fib(int event, struct dn_fib_node *f, int z, int tb_id, 330 struct nlmsghdr *nlh, struct netlink_skb_parms *req) 331{ 332 struct sk_buff *skb; 333 u32 pid = req ? req->pid : 0; 334 int size = NLMSG_SPACE(sizeof(struct rtmsg) + 256); 335 336 skb = alloc_skb(size, GFP_KERNEL); 337 if (!skb) 338 return; 339 340 if (dn_fib_dump_info(skb, pid, nlh->nlmsg_seq, event, tb_id, 341 f->fn_type, f->fn_scope, &f->fn_key, z, 342 DN_FIB_INFO(f), 0) < 0) { 343 kfree_skb(skb); 344 return; 345 } 346 NETLINK_CB(skb).dst_group = RTNLGRP_DECnet_ROUTE; 347 if (nlh->nlmsg_flags & NLM_F_ECHO) 348 atomic_inc(&skb->users); 349 netlink_broadcast(rtnl, skb, pid, RTNLGRP_DECnet_ROUTE, GFP_KERNEL); 350 if (nlh->nlmsg_flags & NLM_F_ECHO) 351 netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT); 352} 353 354static __inline__ int dn_hash_dump_bucket(struct sk_buff *skb, 355 struct netlink_callback *cb, 356 struct dn_fib_table *tb, 357 struct dn_zone *dz, 358 struct dn_fib_node *f) 359{ 360 int i, s_i; 361 362 s_i = cb->args[3]; 363 for(i = 0; f; i++, f = f->fn_next) { 364 if (i < s_i) 365 continue; 366 if (f->fn_state & DN_S_ZOMBIE) 367 continue; 368 if (dn_fib_dump_info(skb, NETLINK_CB(cb->skb).pid, 369 cb->nlh->nlmsg_seq, 370 RTM_NEWROUTE, 371 tb->n, 372 (f->fn_state & DN_S_ZOMBIE) ? 0 : f->fn_type, 373 f->fn_scope, &f->fn_key, dz->dz_order, 374 f->fn_info, NLM_F_MULTI) < 0) { 375 cb->args[3] = i; 376 return -1; 377 } 378 } 379 cb->args[3] = i; 380 return skb->len; 381} 382 383static __inline__ int dn_hash_dump_zone(struct sk_buff *skb, 384 struct netlink_callback *cb, 385 struct dn_fib_table *tb, 386 struct dn_zone *dz) 387{ 388 int h, s_h; 389 390 s_h = cb->args[2]; 391 for(h = 0; h < dz->dz_divisor; h++) { 392 if (h < s_h) 393 continue; 394 if (h > s_h) 395 memset(&cb->args[3], 0, sizeof(cb->args) - 3*sizeof(cb->args[0])); 396 if (dz->dz_hash == NULL || dz->dz_hash[h] == NULL) 397 continue; 398 if (dn_hash_dump_bucket(skb, cb, tb, dz, dz->dz_hash[h]) < 0) { 399 cb->args[2] = h; 400 return -1; 401 } 402 } 403 cb->args[2] = h; 404 return skb->len; 405} 406 407static int dn_fib_table_dump(struct dn_fib_table *tb, struct sk_buff *skb, 408 struct netlink_callback *cb) 409{ 410 int m, s_m; 411 struct dn_zone *dz; 412 struct dn_hash *table = (struct dn_hash *)tb->data; 413 414 s_m = cb->args[1]; 415 read_lock(&dn_fib_tables_lock); 416 for(dz = table->dh_zone_list, m = 0; dz; dz = dz->dz_next, m++) { 417 if (m < s_m) 418 continue; 419 if (m > s_m) 420 memset(&cb->args[2], 0, sizeof(cb->args) - 2*sizeof(cb->args[0])); 421 422 if (dn_hash_dump_zone(skb, cb, tb, dz) < 0) { 423 cb->args[1] = m; 424 read_unlock(&dn_fib_tables_lock); 425 return -1; 426 } 427 } 428 read_unlock(&dn_fib_tables_lock); 429 cb->args[1] = m; 430 431 return skb->len; 432} 433 434static int dn_fib_table_insert(struct dn_fib_table *tb, struct rtmsg *r, struct dn_kern_rta *rta, struct nlmsghdr *n, struct netlink_skb_parms *req) 435{ 436 struct dn_hash *table = (struct dn_hash *)tb->data; 437 struct dn_fib_node *new_f, *f, **fp, **del_fp; 438 struct dn_zone *dz; 439 struct dn_fib_info *fi; 440 int z = r->rtm_dst_len; 441 int type = r->rtm_type; 442 dn_fib_key_t key; 443 int err; 444 445 if (z > 16) 446 return -EINVAL; 447 448 dz = table->dh_zones[z]; 449 if (!dz && !(dz = dn_new_zone(table, z))) 450 return -ENOBUFS; 451 452 dz_key_0(key); 453 if (rta->rta_dst) { 454 __le16 dst; 455 memcpy(&dst, rta->rta_dst, 2); 456 if (dst & ~DZ_MASK(dz)) 457 return -EINVAL; 458 key = dz_key(dst, dz); 459 } 460 461 if ((fi = dn_fib_create_info(r, rta, n, &err)) == NULL) 462 return err; 463 464 if (dz->dz_nent > (dz->dz_divisor << 2) && 465 dz->dz_divisor > DN_MAX_DIVISOR && 466 (z==16 || (1<<z) > dz->dz_divisor)) 467 dn_rehash_zone(dz); 468 469 fp = dn_chain_p(key, dz); 470 471 DN_FIB_SCAN(f, fp) { 472 if (dn_key_leq(key, f->fn_key)) 473 break; 474 } 475 476 del_fp = NULL; 477 478 if (f && (f->fn_state & DN_S_ZOMBIE) && 479 dn_key_eq(f->fn_key, key)) { 480 del_fp = fp; 481 fp = &f->fn_next; 482 f = *fp; 483 goto create; 484 } 485 486 DN_FIB_SCAN_KEY(f, fp, key) { 487 if (fi->fib_priority <= DN_FIB_INFO(f)->fib_priority) 488 break; 489 } 490 491 if (f && dn_key_eq(f->fn_key, key) && 492 fi->fib_priority == DN_FIB_INFO(f)->fib_priority) { 493 struct dn_fib_node **ins_fp; 494 495 err = -EEXIST; 496 if (n->nlmsg_flags & NLM_F_EXCL) 497 goto out; 498 499 if (n->nlmsg_flags & NLM_F_REPLACE) { 500 del_fp = fp; 501 fp = &f->fn_next; 502 f = *fp; 503 goto replace; 504 } 505 506 ins_fp = fp; 507 err = -EEXIST; 508 509 DN_FIB_SCAN_KEY(f, fp, key) { 510 if (fi->fib_priority != DN_FIB_INFO(f)->fib_priority) 511 break; 512 if (f->fn_type == type && f->fn_scope == r->rtm_scope 513 && DN_FIB_INFO(f) == fi) 514 goto out; 515 } 516 517 if (!(n->nlmsg_flags & NLM_F_APPEND)) { 518 fp = ins_fp; 519 f = *fp; 520 } 521 } 522 523create: 524 err = -ENOENT; 525 if (!(n->nlmsg_flags & NLM_F_CREATE)) 526 goto out; 527 528replace: 529 err = -ENOBUFS; 530 new_f = kmem_cache_alloc(dn_hash_kmem, SLAB_KERNEL); 531 if (new_f == NULL) 532 goto out; 533 534 memset(new_f, 0, sizeof(struct dn_fib_node)); 535 536 new_f->fn_key = key; 537 new_f->fn_type = type; 538 new_f->fn_scope = r->rtm_scope; 539 DN_FIB_INFO(new_f) = fi; 540 541 new_f->fn_next = f; 542 write_lock_bh(&dn_fib_tables_lock); 543 *fp = new_f; 544 write_unlock_bh(&dn_fib_tables_lock); 545 dz->dz_nent++; 546 547 if (del_fp) { 548 f = *del_fp; 549 write_lock_bh(&dn_fib_tables_lock); 550 *del_fp = f->fn_next; 551 write_unlock_bh(&dn_fib_tables_lock); 552 553 if (!(f->fn_state & DN_S_ZOMBIE)) 554 dn_rtmsg_fib(RTM_DELROUTE, f, z, tb->n, n, req); 555 if (f->fn_state & DN_S_ACCESSED) 556 dn_rt_cache_flush(-1); 557 dn_free_node(f); 558 dz->dz_nent--; 559 } else { 560 dn_rt_cache_flush(-1); 561 } 562 563 dn_rtmsg_fib(RTM_NEWROUTE, new_f, z, tb->n, n, req); 564 565 return 0; 566out: 567 dn_fib_release_info(fi); 568 return err; 569} 570 571 572static int dn_fib_table_delete(struct dn_fib_table *tb, struct rtmsg *r, struct dn_kern_rta *rta, struct nlmsghdr *n, struct netlink_skb_parms *req) 573{ 574 struct dn_hash *table = (struct dn_hash*)tb->data; 575 struct dn_fib_node **fp, **del_fp, *f; 576 int z = r->rtm_dst_len; 577 struct dn_zone *dz; 578 dn_fib_key_t key; 579 int matched; 580 581 582 if (z > 16) 583 return -EINVAL; 584 585 if ((dz = table->dh_zones[z]) == NULL) 586 return -ESRCH; 587 588 dz_key_0(key); 589 if (rta->rta_dst) { 590 __le16 dst; 591 memcpy(&dst, rta->rta_dst, 2); 592 if (dst & ~DZ_MASK(dz)) 593 return -EINVAL; 594 key = dz_key(dst, dz); 595 } 596 597 fp = dn_chain_p(key, dz); 598 599 DN_FIB_SCAN(f, fp) { 600 if (dn_key_eq(f->fn_key, key)) 601 break; 602 if (dn_key_leq(key, f->fn_key)) 603 return -ESRCH; 604 } 605 606 matched = 0; 607 del_fp = NULL; 608 DN_FIB_SCAN_KEY(f, fp, key) { 609 struct dn_fib_info *fi = DN_FIB_INFO(f); 610 611 if (f->fn_state & DN_S_ZOMBIE) 612 return -ESRCH; 613 614 matched++; 615 616 if (del_fp == NULL && 617 (!r->rtm_type || f->fn_type == r->rtm_type) && 618 (r->rtm_scope == RT_SCOPE_NOWHERE || f->fn_scope == r->rtm_scope) && 619 (!r->rtm_protocol || 620 fi->fib_protocol == r->rtm_protocol) && 621 dn_fib_nh_match(r, n, rta, fi) == 0) 622 del_fp = fp; 623 } 624 625 if (del_fp) { 626 f = *del_fp; 627 dn_rtmsg_fib(RTM_DELROUTE, f, z, tb->n, n, req); 628 629 if (matched != 1) { 630 write_lock_bh(&dn_fib_tables_lock); 631 *del_fp = f->fn_next; 632 write_unlock_bh(&dn_fib_tables_lock); 633 634 if (f->fn_state & DN_S_ACCESSED) 635 dn_rt_cache_flush(-1); 636 dn_free_node(f); 637 dz->dz_nent--; 638 } else { 639 f->fn_state |= DN_S_ZOMBIE; 640 if (f->fn_state & DN_S_ACCESSED) { 641 f->fn_state &= ~DN_S_ACCESSED; 642 dn_rt_cache_flush(-1); 643 } 644 if (++dn_fib_hash_zombies > 128) 645 dn_fib_flush(); 646 } 647 648 return 0; 649 } 650 651 return -ESRCH; 652} 653 654static inline int dn_flush_list(struct dn_fib_node **fp, int z, struct dn_hash *table) 655{ 656 int found = 0; 657 struct dn_fib_node *f; 658 659 while((f = *fp) != NULL) { 660 struct dn_fib_info *fi = DN_FIB_INFO(f); 661 662 if (fi && ((f->fn_state & DN_S_ZOMBIE) || (fi->fib_flags & RTNH_F_DEAD))) { 663 write_lock_bh(&dn_fib_tables_lock); 664 *fp = f->fn_next; 665 write_unlock_bh(&dn_fib_tables_lock); 666 667 dn_free_node(f); 668 found++; 669 continue; 670 } 671 fp = &f->fn_next; 672 } 673 674 return found; 675} 676 677static int dn_fib_table_flush(struct dn_fib_table *tb) 678{ 679 struct dn_hash *table = (struct dn_hash *)tb->data; 680 struct dn_zone *dz; 681 int found = 0; 682 683 dn_fib_hash_zombies = 0; 684 for(dz = table->dh_zone_list; dz; dz = dz->dz_next) { 685 int i; 686 int tmp = 0; 687 for(i = dz->dz_divisor-1; i >= 0; i--) 688 tmp += dn_flush_list(&dz->dz_hash[i], dz->dz_order, table); 689 dz->dz_nent -= tmp; 690 found += tmp; 691 } 692 693 return found; 694} 695 696static int dn_fib_table_lookup(struct dn_fib_table *tb, const struct flowi *flp, struct dn_fib_res *res) 697{ 698 int err; 699 struct dn_zone *dz; 700 struct dn_hash *t = (struct dn_hash *)tb->data; 701 702 read_lock(&dn_fib_tables_lock); 703 for(dz = t->dh_zone_list; dz; dz = dz->dz_next) { 704 struct dn_fib_node *f; 705 dn_fib_key_t k = dz_key(flp->fld_dst, dz); 706 707 for(f = dz_chain(k, dz); f; f = f->fn_next) { 708 if (!dn_key_eq(k, f->fn_key)) { 709 if (dn_key_leq(k, f->fn_key)) 710 break; 711 else 712 continue; 713 } 714 715 f->fn_state |= DN_S_ACCESSED; 716 717 if (f->fn_state&DN_S_ZOMBIE) 718 continue; 719 720 if (f->fn_scope < flp->fld_scope) 721 continue; 722 723 err = dn_fib_semantic_match(f->fn_type, DN_FIB_INFO(f), flp, res); 724 725 if (err == 0) { 726 res->type = f->fn_type; 727 res->scope = f->fn_scope; 728 res->prefixlen = dz->dz_order; 729 goto out; 730 } 731 if (err < 0) 732 goto out; 733 } 734 } 735 err = 1; 736out: 737 read_unlock(&dn_fib_tables_lock); 738 return err; 739} 740 741 742struct dn_fib_table *dn_fib_get_table(int n, int create) 743{ 744 struct dn_fib_table *t; 745 746 if (n < RT_TABLE_MIN) 747 return NULL; 748 749 if (n > RT_TABLE_MAX) 750 return NULL; 751 752 if (dn_fib_tables[n]) 753 return dn_fib_tables[n]; 754 755 if (!create) 756 return NULL; 757 758 if (in_interrupt() && net_ratelimit()) { 759 printk(KERN_DEBUG "DECnet: BUG! Attempt to create routing table from interrupt\n"); 760 return NULL; 761 } 762 if ((t = kmalloc(sizeof(struct dn_fib_table) + sizeof(struct dn_hash), GFP_KERNEL)) == NULL) 763 return NULL; 764 765 memset(t, 0, sizeof(struct dn_fib_table)); 766 767 t->n = n; 768 t->insert = dn_fib_table_insert; 769 t->delete = dn_fib_table_delete; 770 t->lookup = dn_fib_table_lookup; 771 t->flush = dn_fib_table_flush; 772 t->dump = dn_fib_table_dump; 773 memset(t->data, 0, sizeof(struct dn_hash)); 774 dn_fib_tables[n] = t; 775 776 return t; 777} 778 779static void dn_fib_del_tree(int n) 780{ 781 struct dn_fib_table *t; 782 783 write_lock(&dn_fib_tables_lock); 784 t = dn_fib_tables[n]; 785 dn_fib_tables[n] = NULL; 786 write_unlock(&dn_fib_tables_lock); 787 788 kfree(t); 789} 790 791struct dn_fib_table *dn_fib_empty_table(void) 792{ 793 int id; 794 795 for(id = RT_TABLE_MIN; id <= RT_TABLE_MAX; id++) 796 if (dn_fib_tables[id] == NULL) 797 return dn_fib_get_table(id, 1); 798 return NULL; 799} 800 801void __init dn_fib_table_init(void) 802{ 803 dn_hash_kmem = kmem_cache_create("dn_fib_info_cache", 804 sizeof(struct dn_fib_info), 805 0, SLAB_HWCACHE_ALIGN, 806 NULL, NULL); 807} 808 809void __exit dn_fib_table_cleanup(void) 810{ 811 int i; 812 813 for (i = RT_TABLE_MIN; i <= RT_TABLE_MAX; ++i) 814 dn_fib_del_tree(i); 815 816 return; 817}