Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.39-rc6 2962 lines 72 kB view raw
1/* 2 * xfrm_policy.c 3 * 4 * Changes: 5 * Mitsuru KANDA @USAGI 6 * Kazunori MIYAZAWA @USAGI 7 * Kunihiro Ishiguro <kunihiro@ipinfusion.com> 8 * IPv6 support 9 * Kazunori MIYAZAWA @USAGI 10 * YOSHIFUJI Hideaki 11 * Split up af-specific portion 12 * Derek Atkins <derek@ihtfp.com> Add the post_input processor 13 * 14 */ 15 16#include <linux/err.h> 17#include <linux/slab.h> 18#include <linux/kmod.h> 19#include <linux/list.h> 20#include <linux/spinlock.h> 21#include <linux/workqueue.h> 22#include <linux/notifier.h> 23#include <linux/netdevice.h> 24#include <linux/netfilter.h> 25#include <linux/module.h> 26#include <linux/cache.h> 27#include <linux/audit.h> 28#include <net/dst.h> 29#include <net/xfrm.h> 30#include <net/ip.h> 31#ifdef CONFIG_XFRM_STATISTICS 32#include <net/snmp.h> 33#endif 34 35#include "xfrm_hash.h" 36 37DEFINE_MUTEX(xfrm_cfg_mutex); 38EXPORT_SYMBOL(xfrm_cfg_mutex); 39 40static DEFINE_SPINLOCK(xfrm_policy_sk_bundle_lock); 41static struct dst_entry *xfrm_policy_sk_bundles; 42static DEFINE_RWLOCK(xfrm_policy_lock); 43 44static DEFINE_RWLOCK(xfrm_policy_afinfo_lock); 45static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO]; 46 47static struct kmem_cache *xfrm_dst_cache __read_mostly; 48 49static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family); 50static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo); 51static void xfrm_init_pmtu(struct dst_entry *dst); 52static int stale_bundle(struct dst_entry *dst); 53static int xfrm_bundle_ok(struct xfrm_dst *xdst, int family); 54 55 56static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, 57 int dir); 58 59static inline int 60__xfrm4_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) 61{ 62 const struct flowi4 *fl4 = &fl->u.ip4; 63 64 return addr_match(&fl4->daddr, &sel->daddr, sel->prefixlen_d) && 65 addr_match(&fl4->saddr, &sel->saddr, sel->prefixlen_s) && 66 !((xfrm_flowi_dport(fl, &fl4->uli) ^ sel->dport) & sel->dport_mask) && 67 !((xfrm_flowi_sport(fl, &fl4->uli) ^ sel->sport) & sel->sport_mask) && 68 (fl4->flowi4_proto == sel->proto || !sel->proto) && 69 (fl4->flowi4_oif == sel->ifindex || !sel->ifindex); 70} 71 72static inline int 73__xfrm6_selector_match(const struct xfrm_selector *sel, const struct flowi *fl) 74{ 75 const struct flowi6 *fl6 = &fl->u.ip6; 76 77 return addr_match(&fl6->daddr, &sel->daddr, sel->prefixlen_d) && 78 addr_match(&fl6->saddr, &sel->saddr, sel->prefixlen_s) && 79 !((xfrm_flowi_dport(fl, &fl6->uli) ^ sel->dport) & sel->dport_mask) && 80 !((xfrm_flowi_sport(fl, &fl6->uli) ^ sel->sport) & sel->sport_mask) && 81 (fl6->flowi6_proto == sel->proto || !sel->proto) && 82 (fl6->flowi6_oif == sel->ifindex || !sel->ifindex); 83} 84 85int xfrm_selector_match(const struct xfrm_selector *sel, const struct flowi *fl, 86 unsigned short family) 87{ 88 switch (family) { 89 case AF_INET: 90 return __xfrm4_selector_match(sel, fl); 91 case AF_INET6: 92 return __xfrm6_selector_match(sel, fl); 93 } 94 return 0; 95} 96 97static inline struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, 98 const xfrm_address_t *saddr, 99 const xfrm_address_t *daddr, 100 int family) 101{ 102 struct xfrm_policy_afinfo *afinfo; 103 struct dst_entry *dst; 104 105 afinfo = xfrm_policy_get_afinfo(family); 106 if (unlikely(afinfo == NULL)) 107 return ERR_PTR(-EAFNOSUPPORT); 108 109 dst = afinfo->dst_lookup(net, tos, saddr, daddr); 110 111 xfrm_policy_put_afinfo(afinfo); 112 113 return dst; 114} 115 116static inline struct dst_entry *xfrm_dst_lookup(struct xfrm_state *x, int tos, 117 xfrm_address_t *prev_saddr, 118 xfrm_address_t *prev_daddr, 119 int family) 120{ 121 struct net *net = xs_net(x); 122 xfrm_address_t *saddr = &x->props.saddr; 123 xfrm_address_t *daddr = &x->id.daddr; 124 struct dst_entry *dst; 125 126 if (x->type->flags & XFRM_TYPE_LOCAL_COADDR) { 127 saddr = x->coaddr; 128 daddr = prev_daddr; 129 } 130 if (x->type->flags & XFRM_TYPE_REMOTE_COADDR) { 131 saddr = prev_saddr; 132 daddr = x->coaddr; 133 } 134 135 dst = __xfrm_dst_lookup(net, tos, saddr, daddr, family); 136 137 if (!IS_ERR(dst)) { 138 if (prev_saddr != saddr) 139 memcpy(prev_saddr, saddr, sizeof(*prev_saddr)); 140 if (prev_daddr != daddr) 141 memcpy(prev_daddr, daddr, sizeof(*prev_daddr)); 142 } 143 144 return dst; 145} 146 147static inline unsigned long make_jiffies(long secs) 148{ 149 if (secs >= (MAX_SCHEDULE_TIMEOUT-1)/HZ) 150 return MAX_SCHEDULE_TIMEOUT-1; 151 else 152 return secs*HZ; 153} 154 155static void xfrm_policy_timer(unsigned long data) 156{ 157 struct xfrm_policy *xp = (struct xfrm_policy*)data; 158 unsigned long now = get_seconds(); 159 long next = LONG_MAX; 160 int warn = 0; 161 int dir; 162 163 read_lock(&xp->lock); 164 165 if (unlikely(xp->walk.dead)) 166 goto out; 167 168 dir = xfrm_policy_id2dir(xp->index); 169 170 if (xp->lft.hard_add_expires_seconds) { 171 long tmo = xp->lft.hard_add_expires_seconds + 172 xp->curlft.add_time - now; 173 if (tmo <= 0) 174 goto expired; 175 if (tmo < next) 176 next = tmo; 177 } 178 if (xp->lft.hard_use_expires_seconds) { 179 long tmo = xp->lft.hard_use_expires_seconds + 180 (xp->curlft.use_time ? : xp->curlft.add_time) - now; 181 if (tmo <= 0) 182 goto expired; 183 if (tmo < next) 184 next = tmo; 185 } 186 if (xp->lft.soft_add_expires_seconds) { 187 long tmo = xp->lft.soft_add_expires_seconds + 188 xp->curlft.add_time - now; 189 if (tmo <= 0) { 190 warn = 1; 191 tmo = XFRM_KM_TIMEOUT; 192 } 193 if (tmo < next) 194 next = tmo; 195 } 196 if (xp->lft.soft_use_expires_seconds) { 197 long tmo = xp->lft.soft_use_expires_seconds + 198 (xp->curlft.use_time ? : xp->curlft.add_time) - now; 199 if (tmo <= 0) { 200 warn = 1; 201 tmo = XFRM_KM_TIMEOUT; 202 } 203 if (tmo < next) 204 next = tmo; 205 } 206 207 if (warn) 208 km_policy_expired(xp, dir, 0, 0); 209 if (next != LONG_MAX && 210 !mod_timer(&xp->timer, jiffies + make_jiffies(next))) 211 xfrm_pol_hold(xp); 212 213out: 214 read_unlock(&xp->lock); 215 xfrm_pol_put(xp); 216 return; 217 218expired: 219 read_unlock(&xp->lock); 220 if (!xfrm_policy_delete(xp, dir)) 221 km_policy_expired(xp, dir, 1, 0); 222 xfrm_pol_put(xp); 223} 224 225static struct flow_cache_object *xfrm_policy_flo_get(struct flow_cache_object *flo) 226{ 227 struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo); 228 229 if (unlikely(pol->walk.dead)) 230 flo = NULL; 231 else 232 xfrm_pol_hold(pol); 233 234 return flo; 235} 236 237static int xfrm_policy_flo_check(struct flow_cache_object *flo) 238{ 239 struct xfrm_policy *pol = container_of(flo, struct xfrm_policy, flo); 240 241 return !pol->walk.dead; 242} 243 244static void xfrm_policy_flo_delete(struct flow_cache_object *flo) 245{ 246 xfrm_pol_put(container_of(flo, struct xfrm_policy, flo)); 247} 248 249static const struct flow_cache_ops xfrm_policy_fc_ops = { 250 .get = xfrm_policy_flo_get, 251 .check = xfrm_policy_flo_check, 252 .delete = xfrm_policy_flo_delete, 253}; 254 255/* Allocate xfrm_policy. Not used here, it is supposed to be used by pfkeyv2 256 * SPD calls. 257 */ 258 259struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp) 260{ 261 struct xfrm_policy *policy; 262 263 policy = kzalloc(sizeof(struct xfrm_policy), gfp); 264 265 if (policy) { 266 write_pnet(&policy->xp_net, net); 267 INIT_LIST_HEAD(&policy->walk.all); 268 INIT_HLIST_NODE(&policy->bydst); 269 INIT_HLIST_NODE(&policy->byidx); 270 rwlock_init(&policy->lock); 271 atomic_set(&policy->refcnt, 1); 272 setup_timer(&policy->timer, xfrm_policy_timer, 273 (unsigned long)policy); 274 policy->flo.ops = &xfrm_policy_fc_ops; 275 } 276 return policy; 277} 278EXPORT_SYMBOL(xfrm_policy_alloc); 279 280/* Destroy xfrm_policy: descendant resources must be released to this moment. */ 281 282void xfrm_policy_destroy(struct xfrm_policy *policy) 283{ 284 BUG_ON(!policy->walk.dead); 285 286 if (del_timer(&policy->timer)) 287 BUG(); 288 289 security_xfrm_policy_free(policy->security); 290 kfree(policy); 291} 292EXPORT_SYMBOL(xfrm_policy_destroy); 293 294/* Rule must be locked. Release descentant resources, announce 295 * entry dead. The rule must be unlinked from lists to the moment. 296 */ 297 298static void xfrm_policy_kill(struct xfrm_policy *policy) 299{ 300 policy->walk.dead = 1; 301 302 atomic_inc(&policy->genid); 303 304 if (del_timer(&policy->timer)) 305 xfrm_pol_put(policy); 306 307 xfrm_pol_put(policy); 308} 309 310static unsigned int xfrm_policy_hashmax __read_mostly = 1 * 1024 * 1024; 311 312static inline unsigned int idx_hash(struct net *net, u32 index) 313{ 314 return __idx_hash(index, net->xfrm.policy_idx_hmask); 315} 316 317static struct hlist_head *policy_hash_bysel(struct net *net, 318 const struct xfrm_selector *sel, 319 unsigned short family, int dir) 320{ 321 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 322 unsigned int hash = __sel_hash(sel, family, hmask); 323 324 return (hash == hmask + 1 ? 325 &net->xfrm.policy_inexact[dir] : 326 net->xfrm.policy_bydst[dir].table + hash); 327} 328 329static struct hlist_head *policy_hash_direct(struct net *net, 330 const xfrm_address_t *daddr, 331 const xfrm_address_t *saddr, 332 unsigned short family, int dir) 333{ 334 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 335 unsigned int hash = __addr_hash(daddr, saddr, family, hmask); 336 337 return net->xfrm.policy_bydst[dir].table + hash; 338} 339 340static void xfrm_dst_hash_transfer(struct hlist_head *list, 341 struct hlist_head *ndsttable, 342 unsigned int nhashmask) 343{ 344 struct hlist_node *entry, *tmp, *entry0 = NULL; 345 struct xfrm_policy *pol; 346 unsigned int h0 = 0; 347 348redo: 349 hlist_for_each_entry_safe(pol, entry, tmp, list, bydst) { 350 unsigned int h; 351 352 h = __addr_hash(&pol->selector.daddr, &pol->selector.saddr, 353 pol->family, nhashmask); 354 if (!entry0) { 355 hlist_del(entry); 356 hlist_add_head(&pol->bydst, ndsttable+h); 357 h0 = h; 358 } else { 359 if (h != h0) 360 continue; 361 hlist_del(entry); 362 hlist_add_after(entry0, &pol->bydst); 363 } 364 entry0 = entry; 365 } 366 if (!hlist_empty(list)) { 367 entry0 = NULL; 368 goto redo; 369 } 370} 371 372static void xfrm_idx_hash_transfer(struct hlist_head *list, 373 struct hlist_head *nidxtable, 374 unsigned int nhashmask) 375{ 376 struct hlist_node *entry, *tmp; 377 struct xfrm_policy *pol; 378 379 hlist_for_each_entry_safe(pol, entry, tmp, list, byidx) { 380 unsigned int h; 381 382 h = __idx_hash(pol->index, nhashmask); 383 hlist_add_head(&pol->byidx, nidxtable+h); 384 } 385} 386 387static unsigned long xfrm_new_hash_mask(unsigned int old_hmask) 388{ 389 return ((old_hmask + 1) << 1) - 1; 390} 391 392static void xfrm_bydst_resize(struct net *net, int dir) 393{ 394 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 395 unsigned int nhashmask = xfrm_new_hash_mask(hmask); 396 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head); 397 struct hlist_head *odst = net->xfrm.policy_bydst[dir].table; 398 struct hlist_head *ndst = xfrm_hash_alloc(nsize); 399 int i; 400 401 if (!ndst) 402 return; 403 404 write_lock_bh(&xfrm_policy_lock); 405 406 for (i = hmask; i >= 0; i--) 407 xfrm_dst_hash_transfer(odst + i, ndst, nhashmask); 408 409 net->xfrm.policy_bydst[dir].table = ndst; 410 net->xfrm.policy_bydst[dir].hmask = nhashmask; 411 412 write_unlock_bh(&xfrm_policy_lock); 413 414 xfrm_hash_free(odst, (hmask + 1) * sizeof(struct hlist_head)); 415} 416 417static void xfrm_byidx_resize(struct net *net, int total) 418{ 419 unsigned int hmask = net->xfrm.policy_idx_hmask; 420 unsigned int nhashmask = xfrm_new_hash_mask(hmask); 421 unsigned int nsize = (nhashmask + 1) * sizeof(struct hlist_head); 422 struct hlist_head *oidx = net->xfrm.policy_byidx; 423 struct hlist_head *nidx = xfrm_hash_alloc(nsize); 424 int i; 425 426 if (!nidx) 427 return; 428 429 write_lock_bh(&xfrm_policy_lock); 430 431 for (i = hmask; i >= 0; i--) 432 xfrm_idx_hash_transfer(oidx + i, nidx, nhashmask); 433 434 net->xfrm.policy_byidx = nidx; 435 net->xfrm.policy_idx_hmask = nhashmask; 436 437 write_unlock_bh(&xfrm_policy_lock); 438 439 xfrm_hash_free(oidx, (hmask + 1) * sizeof(struct hlist_head)); 440} 441 442static inline int xfrm_bydst_should_resize(struct net *net, int dir, int *total) 443{ 444 unsigned int cnt = net->xfrm.policy_count[dir]; 445 unsigned int hmask = net->xfrm.policy_bydst[dir].hmask; 446 447 if (total) 448 *total += cnt; 449 450 if ((hmask + 1) < xfrm_policy_hashmax && 451 cnt > hmask) 452 return 1; 453 454 return 0; 455} 456 457static inline int xfrm_byidx_should_resize(struct net *net, int total) 458{ 459 unsigned int hmask = net->xfrm.policy_idx_hmask; 460 461 if ((hmask + 1) < xfrm_policy_hashmax && 462 total > hmask) 463 return 1; 464 465 return 0; 466} 467 468void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si) 469{ 470 read_lock_bh(&xfrm_policy_lock); 471 si->incnt = net->xfrm.policy_count[XFRM_POLICY_IN]; 472 si->outcnt = net->xfrm.policy_count[XFRM_POLICY_OUT]; 473 si->fwdcnt = net->xfrm.policy_count[XFRM_POLICY_FWD]; 474 si->inscnt = net->xfrm.policy_count[XFRM_POLICY_IN+XFRM_POLICY_MAX]; 475 si->outscnt = net->xfrm.policy_count[XFRM_POLICY_OUT+XFRM_POLICY_MAX]; 476 si->fwdscnt = net->xfrm.policy_count[XFRM_POLICY_FWD+XFRM_POLICY_MAX]; 477 si->spdhcnt = net->xfrm.policy_idx_hmask; 478 si->spdhmcnt = xfrm_policy_hashmax; 479 read_unlock_bh(&xfrm_policy_lock); 480} 481EXPORT_SYMBOL(xfrm_spd_getinfo); 482 483static DEFINE_MUTEX(hash_resize_mutex); 484static void xfrm_hash_resize(struct work_struct *work) 485{ 486 struct net *net = container_of(work, struct net, xfrm.policy_hash_work); 487 int dir, total; 488 489 mutex_lock(&hash_resize_mutex); 490 491 total = 0; 492 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) { 493 if (xfrm_bydst_should_resize(net, dir, &total)) 494 xfrm_bydst_resize(net, dir); 495 } 496 if (xfrm_byidx_should_resize(net, total)) 497 xfrm_byidx_resize(net, total); 498 499 mutex_unlock(&hash_resize_mutex); 500} 501 502/* Generate new index... KAME seems to generate them ordered by cost 503 * of an absolute inpredictability of ordering of rules. This will not pass. */ 504static u32 xfrm_gen_index(struct net *net, int dir) 505{ 506 static u32 idx_generator; 507 508 for (;;) { 509 struct hlist_node *entry; 510 struct hlist_head *list; 511 struct xfrm_policy *p; 512 u32 idx; 513 int found; 514 515 idx = (idx_generator | dir); 516 idx_generator += 8; 517 if (idx == 0) 518 idx = 8; 519 list = net->xfrm.policy_byidx + idx_hash(net, idx); 520 found = 0; 521 hlist_for_each_entry(p, entry, list, byidx) { 522 if (p->index == idx) { 523 found = 1; 524 break; 525 } 526 } 527 if (!found) 528 return idx; 529 } 530} 531 532static inline int selector_cmp(struct xfrm_selector *s1, struct xfrm_selector *s2) 533{ 534 u32 *p1 = (u32 *) s1; 535 u32 *p2 = (u32 *) s2; 536 int len = sizeof(struct xfrm_selector) / sizeof(u32); 537 int i; 538 539 for (i = 0; i < len; i++) { 540 if (p1[i] != p2[i]) 541 return 1; 542 } 543 544 return 0; 545} 546 547int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) 548{ 549 struct net *net = xp_net(policy); 550 struct xfrm_policy *pol; 551 struct xfrm_policy *delpol; 552 struct hlist_head *chain; 553 struct hlist_node *entry, *newpos; 554 u32 mark = policy->mark.v & policy->mark.m; 555 556 write_lock_bh(&xfrm_policy_lock); 557 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir); 558 delpol = NULL; 559 newpos = NULL; 560 hlist_for_each_entry(pol, entry, chain, bydst) { 561 if (pol->type == policy->type && 562 !selector_cmp(&pol->selector, &policy->selector) && 563 (mark & pol->mark.m) == pol->mark.v && 564 xfrm_sec_ctx_match(pol->security, policy->security) && 565 !WARN_ON(delpol)) { 566 if (excl) { 567 write_unlock_bh(&xfrm_policy_lock); 568 return -EEXIST; 569 } 570 delpol = pol; 571 if (policy->priority > pol->priority) 572 continue; 573 } else if (policy->priority >= pol->priority) { 574 newpos = &pol->bydst; 575 continue; 576 } 577 if (delpol) 578 break; 579 } 580 if (newpos) 581 hlist_add_after(newpos, &policy->bydst); 582 else 583 hlist_add_head(&policy->bydst, chain); 584 xfrm_pol_hold(policy); 585 net->xfrm.policy_count[dir]++; 586 atomic_inc(&flow_cache_genid); 587 if (delpol) 588 __xfrm_policy_unlink(delpol, dir); 589 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir); 590 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index)); 591 policy->curlft.add_time = get_seconds(); 592 policy->curlft.use_time = 0; 593 if (!mod_timer(&policy->timer, jiffies + HZ)) 594 xfrm_pol_hold(policy); 595 list_add(&policy->walk.all, &net->xfrm.policy_all); 596 write_unlock_bh(&xfrm_policy_lock); 597 598 if (delpol) 599 xfrm_policy_kill(delpol); 600 else if (xfrm_bydst_should_resize(net, dir, NULL)) 601 schedule_work(&net->xfrm.policy_hash_work); 602 603 return 0; 604} 605EXPORT_SYMBOL(xfrm_policy_insert); 606 607struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u8 type, 608 int dir, struct xfrm_selector *sel, 609 struct xfrm_sec_ctx *ctx, int delete, 610 int *err) 611{ 612 struct xfrm_policy *pol, *ret; 613 struct hlist_head *chain; 614 struct hlist_node *entry; 615 616 *err = 0; 617 write_lock_bh(&xfrm_policy_lock); 618 chain = policy_hash_bysel(net, sel, sel->family, dir); 619 ret = NULL; 620 hlist_for_each_entry(pol, entry, chain, bydst) { 621 if (pol->type == type && 622 (mark & pol->mark.m) == pol->mark.v && 623 !selector_cmp(sel, &pol->selector) && 624 xfrm_sec_ctx_match(ctx, pol->security)) { 625 xfrm_pol_hold(pol); 626 if (delete) { 627 *err = security_xfrm_policy_delete( 628 pol->security); 629 if (*err) { 630 write_unlock_bh(&xfrm_policy_lock); 631 return pol; 632 } 633 __xfrm_policy_unlink(pol, dir); 634 } 635 ret = pol; 636 break; 637 } 638 } 639 write_unlock_bh(&xfrm_policy_lock); 640 641 if (ret && delete) 642 xfrm_policy_kill(ret); 643 return ret; 644} 645EXPORT_SYMBOL(xfrm_policy_bysel_ctx); 646 647struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8 type, 648 int dir, u32 id, int delete, int *err) 649{ 650 struct xfrm_policy *pol, *ret; 651 struct hlist_head *chain; 652 struct hlist_node *entry; 653 654 *err = -ENOENT; 655 if (xfrm_policy_id2dir(id) != dir) 656 return NULL; 657 658 *err = 0; 659 write_lock_bh(&xfrm_policy_lock); 660 chain = net->xfrm.policy_byidx + idx_hash(net, id); 661 ret = NULL; 662 hlist_for_each_entry(pol, entry, chain, byidx) { 663 if (pol->type == type && pol->index == id && 664 (mark & pol->mark.m) == pol->mark.v) { 665 xfrm_pol_hold(pol); 666 if (delete) { 667 *err = security_xfrm_policy_delete( 668 pol->security); 669 if (*err) { 670 write_unlock_bh(&xfrm_policy_lock); 671 return pol; 672 } 673 __xfrm_policy_unlink(pol, dir); 674 } 675 ret = pol; 676 break; 677 } 678 } 679 write_unlock_bh(&xfrm_policy_lock); 680 681 if (ret && delete) 682 xfrm_policy_kill(ret); 683 return ret; 684} 685EXPORT_SYMBOL(xfrm_policy_byid); 686 687#ifdef CONFIG_SECURITY_NETWORK_XFRM 688static inline int 689xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info) 690{ 691 int dir, err = 0; 692 693 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 694 struct xfrm_policy *pol; 695 struct hlist_node *entry; 696 int i; 697 698 hlist_for_each_entry(pol, entry, 699 &net->xfrm.policy_inexact[dir], bydst) { 700 if (pol->type != type) 701 continue; 702 err = security_xfrm_policy_delete(pol->security); 703 if (err) { 704 xfrm_audit_policy_delete(pol, 0, 705 audit_info->loginuid, 706 audit_info->sessionid, 707 audit_info->secid); 708 return err; 709 } 710 } 711 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) { 712 hlist_for_each_entry(pol, entry, 713 net->xfrm.policy_bydst[dir].table + i, 714 bydst) { 715 if (pol->type != type) 716 continue; 717 err = security_xfrm_policy_delete( 718 pol->security); 719 if (err) { 720 xfrm_audit_policy_delete(pol, 0, 721 audit_info->loginuid, 722 audit_info->sessionid, 723 audit_info->secid); 724 return err; 725 } 726 } 727 } 728 } 729 return err; 730} 731#else 732static inline int 733xfrm_policy_flush_secctx_check(struct net *net, u8 type, struct xfrm_audit *audit_info) 734{ 735 return 0; 736} 737#endif 738 739int xfrm_policy_flush(struct net *net, u8 type, struct xfrm_audit *audit_info) 740{ 741 int dir, err = 0, cnt = 0; 742 743 write_lock_bh(&xfrm_policy_lock); 744 745 err = xfrm_policy_flush_secctx_check(net, type, audit_info); 746 if (err) 747 goto out; 748 749 for (dir = 0; dir < XFRM_POLICY_MAX; dir++) { 750 struct xfrm_policy *pol; 751 struct hlist_node *entry; 752 int i; 753 754 again1: 755 hlist_for_each_entry(pol, entry, 756 &net->xfrm.policy_inexact[dir], bydst) { 757 if (pol->type != type) 758 continue; 759 __xfrm_policy_unlink(pol, dir); 760 write_unlock_bh(&xfrm_policy_lock); 761 cnt++; 762 763 xfrm_audit_policy_delete(pol, 1, audit_info->loginuid, 764 audit_info->sessionid, 765 audit_info->secid); 766 767 xfrm_policy_kill(pol); 768 769 write_lock_bh(&xfrm_policy_lock); 770 goto again1; 771 } 772 773 for (i = net->xfrm.policy_bydst[dir].hmask; i >= 0; i--) { 774 again2: 775 hlist_for_each_entry(pol, entry, 776 net->xfrm.policy_bydst[dir].table + i, 777 bydst) { 778 if (pol->type != type) 779 continue; 780 __xfrm_policy_unlink(pol, dir); 781 write_unlock_bh(&xfrm_policy_lock); 782 cnt++; 783 784 xfrm_audit_policy_delete(pol, 1, 785 audit_info->loginuid, 786 audit_info->sessionid, 787 audit_info->secid); 788 xfrm_policy_kill(pol); 789 790 write_lock_bh(&xfrm_policy_lock); 791 goto again2; 792 } 793 } 794 795 } 796 if (!cnt) 797 err = -ESRCH; 798out: 799 write_unlock_bh(&xfrm_policy_lock); 800 return err; 801} 802EXPORT_SYMBOL(xfrm_policy_flush); 803 804int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk, 805 int (*func)(struct xfrm_policy *, int, int, void*), 806 void *data) 807{ 808 struct xfrm_policy *pol; 809 struct xfrm_policy_walk_entry *x; 810 int error = 0; 811 812 if (walk->type >= XFRM_POLICY_TYPE_MAX && 813 walk->type != XFRM_POLICY_TYPE_ANY) 814 return -EINVAL; 815 816 if (list_empty(&walk->walk.all) && walk->seq != 0) 817 return 0; 818 819 write_lock_bh(&xfrm_policy_lock); 820 if (list_empty(&walk->walk.all)) 821 x = list_first_entry(&net->xfrm.policy_all, struct xfrm_policy_walk_entry, all); 822 else 823 x = list_entry(&walk->walk.all, struct xfrm_policy_walk_entry, all); 824 list_for_each_entry_from(x, &net->xfrm.policy_all, all) { 825 if (x->dead) 826 continue; 827 pol = container_of(x, struct xfrm_policy, walk); 828 if (walk->type != XFRM_POLICY_TYPE_ANY && 829 walk->type != pol->type) 830 continue; 831 error = func(pol, xfrm_policy_id2dir(pol->index), 832 walk->seq, data); 833 if (error) { 834 list_move_tail(&walk->walk.all, &x->all); 835 goto out; 836 } 837 walk->seq++; 838 } 839 if (walk->seq == 0) { 840 error = -ENOENT; 841 goto out; 842 } 843 list_del_init(&walk->walk.all); 844out: 845 write_unlock_bh(&xfrm_policy_lock); 846 return error; 847} 848EXPORT_SYMBOL(xfrm_policy_walk); 849 850void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type) 851{ 852 INIT_LIST_HEAD(&walk->walk.all); 853 walk->walk.dead = 1; 854 walk->type = type; 855 walk->seq = 0; 856} 857EXPORT_SYMBOL(xfrm_policy_walk_init); 858 859void xfrm_policy_walk_done(struct xfrm_policy_walk *walk) 860{ 861 if (list_empty(&walk->walk.all)) 862 return; 863 864 write_lock_bh(&xfrm_policy_lock); 865 list_del(&walk->walk.all); 866 write_unlock_bh(&xfrm_policy_lock); 867} 868EXPORT_SYMBOL(xfrm_policy_walk_done); 869 870/* 871 * Find policy to apply to this flow. 872 * 873 * Returns 0 if policy found, else an -errno. 874 */ 875static int xfrm_policy_match(const struct xfrm_policy *pol, 876 const struct flowi *fl, 877 u8 type, u16 family, int dir) 878{ 879 const struct xfrm_selector *sel = &pol->selector; 880 int match, ret = -ESRCH; 881 882 if (pol->family != family || 883 (fl->flowi_mark & pol->mark.m) != pol->mark.v || 884 pol->type != type) 885 return ret; 886 887 match = xfrm_selector_match(sel, fl, family); 888 if (match) 889 ret = security_xfrm_policy_lookup(pol->security, fl->flowi_secid, 890 dir); 891 892 return ret; 893} 894 895static struct xfrm_policy *xfrm_policy_lookup_bytype(struct net *net, u8 type, 896 const struct flowi *fl, 897 u16 family, u8 dir) 898{ 899 int err; 900 struct xfrm_policy *pol, *ret; 901 const xfrm_address_t *daddr, *saddr; 902 struct hlist_node *entry; 903 struct hlist_head *chain; 904 u32 priority = ~0U; 905 906 daddr = xfrm_flowi_daddr(fl, family); 907 saddr = xfrm_flowi_saddr(fl, family); 908 if (unlikely(!daddr || !saddr)) 909 return NULL; 910 911 read_lock_bh(&xfrm_policy_lock); 912 chain = policy_hash_direct(net, daddr, saddr, family, dir); 913 ret = NULL; 914 hlist_for_each_entry(pol, entry, chain, bydst) { 915 err = xfrm_policy_match(pol, fl, type, family, dir); 916 if (err) { 917 if (err == -ESRCH) 918 continue; 919 else { 920 ret = ERR_PTR(err); 921 goto fail; 922 } 923 } else { 924 ret = pol; 925 priority = ret->priority; 926 break; 927 } 928 } 929 chain = &net->xfrm.policy_inexact[dir]; 930 hlist_for_each_entry(pol, entry, chain, bydst) { 931 err = xfrm_policy_match(pol, fl, type, family, dir); 932 if (err) { 933 if (err == -ESRCH) 934 continue; 935 else { 936 ret = ERR_PTR(err); 937 goto fail; 938 } 939 } else if (pol->priority < priority) { 940 ret = pol; 941 break; 942 } 943 } 944 if (ret) 945 xfrm_pol_hold(ret); 946fail: 947 read_unlock_bh(&xfrm_policy_lock); 948 949 return ret; 950} 951 952static struct xfrm_policy * 953__xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir) 954{ 955#ifdef CONFIG_XFRM_SUB_POLICY 956 struct xfrm_policy *pol; 957 958 pol = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_SUB, fl, family, dir); 959 if (pol != NULL) 960 return pol; 961#endif 962 return xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, fl, family, dir); 963} 964 965static struct flow_cache_object * 966xfrm_policy_lookup(struct net *net, const struct flowi *fl, u16 family, 967 u8 dir, struct flow_cache_object *old_obj, void *ctx) 968{ 969 struct xfrm_policy *pol; 970 971 if (old_obj) 972 xfrm_pol_put(container_of(old_obj, struct xfrm_policy, flo)); 973 974 pol = __xfrm_policy_lookup(net, fl, family, dir); 975 if (IS_ERR_OR_NULL(pol)) 976 return ERR_CAST(pol); 977 978 /* Resolver returns two references: 979 * one for cache and one for caller of flow_cache_lookup() */ 980 xfrm_pol_hold(pol); 981 982 return &pol->flo; 983} 984 985static inline int policy_to_flow_dir(int dir) 986{ 987 if (XFRM_POLICY_IN == FLOW_DIR_IN && 988 XFRM_POLICY_OUT == FLOW_DIR_OUT && 989 XFRM_POLICY_FWD == FLOW_DIR_FWD) 990 return dir; 991 switch (dir) { 992 default: 993 case XFRM_POLICY_IN: 994 return FLOW_DIR_IN; 995 case XFRM_POLICY_OUT: 996 return FLOW_DIR_OUT; 997 case XFRM_POLICY_FWD: 998 return FLOW_DIR_FWD; 999 } 1000} 1001 1002static struct xfrm_policy *xfrm_sk_policy_lookup(struct sock *sk, int dir, 1003 const struct flowi *fl) 1004{ 1005 struct xfrm_policy *pol; 1006 1007 read_lock_bh(&xfrm_policy_lock); 1008 if ((pol = sk->sk_policy[dir]) != NULL) { 1009 int match = xfrm_selector_match(&pol->selector, fl, 1010 sk->sk_family); 1011 int err = 0; 1012 1013 if (match) { 1014 if ((sk->sk_mark & pol->mark.m) != pol->mark.v) { 1015 pol = NULL; 1016 goto out; 1017 } 1018 err = security_xfrm_policy_lookup(pol->security, 1019 fl->flowi_secid, 1020 policy_to_flow_dir(dir)); 1021 if (!err) 1022 xfrm_pol_hold(pol); 1023 else if (err == -ESRCH) 1024 pol = NULL; 1025 else 1026 pol = ERR_PTR(err); 1027 } else 1028 pol = NULL; 1029 } 1030out: 1031 read_unlock_bh(&xfrm_policy_lock); 1032 return pol; 1033} 1034 1035static void __xfrm_policy_link(struct xfrm_policy *pol, int dir) 1036{ 1037 struct net *net = xp_net(pol); 1038 struct hlist_head *chain = policy_hash_bysel(net, &pol->selector, 1039 pol->family, dir); 1040 1041 list_add(&pol->walk.all, &net->xfrm.policy_all); 1042 hlist_add_head(&pol->bydst, chain); 1043 hlist_add_head(&pol->byidx, net->xfrm.policy_byidx+idx_hash(net, pol->index)); 1044 net->xfrm.policy_count[dir]++; 1045 xfrm_pol_hold(pol); 1046 1047 if (xfrm_bydst_should_resize(net, dir, NULL)) 1048 schedule_work(&net->xfrm.policy_hash_work); 1049} 1050 1051static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol, 1052 int dir) 1053{ 1054 struct net *net = xp_net(pol); 1055 1056 if (hlist_unhashed(&pol->bydst)) 1057 return NULL; 1058 1059 hlist_del(&pol->bydst); 1060 hlist_del(&pol->byidx); 1061 list_del(&pol->walk.all); 1062 net->xfrm.policy_count[dir]--; 1063 1064 return pol; 1065} 1066 1067int xfrm_policy_delete(struct xfrm_policy *pol, int dir) 1068{ 1069 write_lock_bh(&xfrm_policy_lock); 1070 pol = __xfrm_policy_unlink(pol, dir); 1071 write_unlock_bh(&xfrm_policy_lock); 1072 if (pol) { 1073 xfrm_policy_kill(pol); 1074 return 0; 1075 } 1076 return -ENOENT; 1077} 1078EXPORT_SYMBOL(xfrm_policy_delete); 1079 1080int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol) 1081{ 1082 struct net *net = xp_net(pol); 1083 struct xfrm_policy *old_pol; 1084 1085#ifdef CONFIG_XFRM_SUB_POLICY 1086 if (pol && pol->type != XFRM_POLICY_TYPE_MAIN) 1087 return -EINVAL; 1088#endif 1089 1090 write_lock_bh(&xfrm_policy_lock); 1091 old_pol = sk->sk_policy[dir]; 1092 sk->sk_policy[dir] = pol; 1093 if (pol) { 1094 pol->curlft.add_time = get_seconds(); 1095 pol->index = xfrm_gen_index(net, XFRM_POLICY_MAX+dir); 1096 __xfrm_policy_link(pol, XFRM_POLICY_MAX+dir); 1097 } 1098 if (old_pol) 1099 /* Unlinking succeeds always. This is the only function 1100 * allowed to delete or replace socket policy. 1101 */ 1102 __xfrm_policy_unlink(old_pol, XFRM_POLICY_MAX+dir); 1103 write_unlock_bh(&xfrm_policy_lock); 1104 1105 if (old_pol) { 1106 xfrm_policy_kill(old_pol); 1107 } 1108 return 0; 1109} 1110 1111static struct xfrm_policy *clone_policy(const struct xfrm_policy *old, int dir) 1112{ 1113 struct xfrm_policy *newp = xfrm_policy_alloc(xp_net(old), GFP_ATOMIC); 1114 1115 if (newp) { 1116 newp->selector = old->selector; 1117 if (security_xfrm_policy_clone(old->security, 1118 &newp->security)) { 1119 kfree(newp); 1120 return NULL; /* ENOMEM */ 1121 } 1122 newp->lft = old->lft; 1123 newp->curlft = old->curlft; 1124 newp->mark = old->mark; 1125 newp->action = old->action; 1126 newp->flags = old->flags; 1127 newp->xfrm_nr = old->xfrm_nr; 1128 newp->index = old->index; 1129 newp->type = old->type; 1130 memcpy(newp->xfrm_vec, old->xfrm_vec, 1131 newp->xfrm_nr*sizeof(struct xfrm_tmpl)); 1132 write_lock_bh(&xfrm_policy_lock); 1133 __xfrm_policy_link(newp, XFRM_POLICY_MAX+dir); 1134 write_unlock_bh(&xfrm_policy_lock); 1135 xfrm_pol_put(newp); 1136 } 1137 return newp; 1138} 1139 1140int __xfrm_sk_clone_policy(struct sock *sk) 1141{ 1142 struct xfrm_policy *p0 = sk->sk_policy[0], 1143 *p1 = sk->sk_policy[1]; 1144 1145 sk->sk_policy[0] = sk->sk_policy[1] = NULL; 1146 if (p0 && (sk->sk_policy[0] = clone_policy(p0, 0)) == NULL) 1147 return -ENOMEM; 1148 if (p1 && (sk->sk_policy[1] = clone_policy(p1, 1)) == NULL) 1149 return -ENOMEM; 1150 return 0; 1151} 1152 1153static int 1154xfrm_get_saddr(struct net *net, xfrm_address_t *local, xfrm_address_t *remote, 1155 unsigned short family) 1156{ 1157 int err; 1158 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1159 1160 if (unlikely(afinfo == NULL)) 1161 return -EINVAL; 1162 err = afinfo->get_saddr(net, local, remote); 1163 xfrm_policy_put_afinfo(afinfo); 1164 return err; 1165} 1166 1167/* Resolve list of templates for the flow, given policy. */ 1168 1169static int 1170xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl, 1171 struct xfrm_state **xfrm, unsigned short family) 1172{ 1173 struct net *net = xp_net(policy); 1174 int nx; 1175 int i, error; 1176 xfrm_address_t *daddr = xfrm_flowi_daddr(fl, family); 1177 xfrm_address_t *saddr = xfrm_flowi_saddr(fl, family); 1178 xfrm_address_t tmp; 1179 1180 for (nx=0, i = 0; i < policy->xfrm_nr; i++) { 1181 struct xfrm_state *x; 1182 xfrm_address_t *remote = daddr; 1183 xfrm_address_t *local = saddr; 1184 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i]; 1185 1186 if (tmpl->mode == XFRM_MODE_TUNNEL || 1187 tmpl->mode == XFRM_MODE_BEET) { 1188 remote = &tmpl->id.daddr; 1189 local = &tmpl->saddr; 1190 if (xfrm_addr_any(local, tmpl->encap_family)) { 1191 error = xfrm_get_saddr(net, &tmp, remote, tmpl->encap_family); 1192 if (error) 1193 goto fail; 1194 local = &tmp; 1195 } 1196 } 1197 1198 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family); 1199 1200 if (x && x->km.state == XFRM_STATE_VALID) { 1201 xfrm[nx++] = x; 1202 daddr = remote; 1203 saddr = local; 1204 continue; 1205 } 1206 if (x) { 1207 error = (x->km.state == XFRM_STATE_ERROR ? 1208 -EINVAL : -EAGAIN); 1209 xfrm_state_put(x); 1210 } 1211 else if (error == -ESRCH) 1212 error = -EAGAIN; 1213 1214 if (!tmpl->optional) 1215 goto fail; 1216 } 1217 return nx; 1218 1219fail: 1220 for (nx--; nx>=0; nx--) 1221 xfrm_state_put(xfrm[nx]); 1222 return error; 1223} 1224 1225static int 1226xfrm_tmpl_resolve(struct xfrm_policy **pols, int npols, const struct flowi *fl, 1227 struct xfrm_state **xfrm, unsigned short family) 1228{ 1229 struct xfrm_state *tp[XFRM_MAX_DEPTH]; 1230 struct xfrm_state **tpp = (npols > 1) ? tp : xfrm; 1231 int cnx = 0; 1232 int error; 1233 int ret; 1234 int i; 1235 1236 for (i = 0; i < npols; i++) { 1237 if (cnx + pols[i]->xfrm_nr >= XFRM_MAX_DEPTH) { 1238 error = -ENOBUFS; 1239 goto fail; 1240 } 1241 1242 ret = xfrm_tmpl_resolve_one(pols[i], fl, &tpp[cnx], family); 1243 if (ret < 0) { 1244 error = ret; 1245 goto fail; 1246 } else 1247 cnx += ret; 1248 } 1249 1250 /* found states are sorted for outbound processing */ 1251 if (npols > 1) 1252 xfrm_state_sort(xfrm, tpp, cnx, family); 1253 1254 return cnx; 1255 1256 fail: 1257 for (cnx--; cnx>=0; cnx--) 1258 xfrm_state_put(tpp[cnx]); 1259 return error; 1260 1261} 1262 1263/* Check that the bundle accepts the flow and its components are 1264 * still valid. 1265 */ 1266 1267static inline int xfrm_get_tos(const struct flowi *fl, int family) 1268{ 1269 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1270 int tos; 1271 1272 if (!afinfo) 1273 return -EINVAL; 1274 1275 tos = afinfo->get_tos(fl); 1276 1277 xfrm_policy_put_afinfo(afinfo); 1278 1279 return tos; 1280} 1281 1282static struct flow_cache_object *xfrm_bundle_flo_get(struct flow_cache_object *flo) 1283{ 1284 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo); 1285 struct dst_entry *dst = &xdst->u.dst; 1286 1287 if (xdst->route == NULL) { 1288 /* Dummy bundle - if it has xfrms we were not 1289 * able to build bundle as template resolution failed. 1290 * It means we need to try again resolving. */ 1291 if (xdst->num_xfrms > 0) 1292 return NULL; 1293 } else { 1294 /* Real bundle */ 1295 if (stale_bundle(dst)) 1296 return NULL; 1297 } 1298 1299 dst_hold(dst); 1300 return flo; 1301} 1302 1303static int xfrm_bundle_flo_check(struct flow_cache_object *flo) 1304{ 1305 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo); 1306 struct dst_entry *dst = &xdst->u.dst; 1307 1308 if (!xdst->route) 1309 return 0; 1310 if (stale_bundle(dst)) 1311 return 0; 1312 1313 return 1; 1314} 1315 1316static void xfrm_bundle_flo_delete(struct flow_cache_object *flo) 1317{ 1318 struct xfrm_dst *xdst = container_of(flo, struct xfrm_dst, flo); 1319 struct dst_entry *dst = &xdst->u.dst; 1320 1321 dst_free(dst); 1322} 1323 1324static const struct flow_cache_ops xfrm_bundle_fc_ops = { 1325 .get = xfrm_bundle_flo_get, 1326 .check = xfrm_bundle_flo_check, 1327 .delete = xfrm_bundle_flo_delete, 1328}; 1329 1330static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) 1331{ 1332 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1333 struct dst_ops *dst_ops; 1334 struct xfrm_dst *xdst; 1335 1336 if (!afinfo) 1337 return ERR_PTR(-EINVAL); 1338 1339 switch (family) { 1340 case AF_INET: 1341 dst_ops = &net->xfrm.xfrm4_dst_ops; 1342 break; 1343#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 1344 case AF_INET6: 1345 dst_ops = &net->xfrm.xfrm6_dst_ops; 1346 break; 1347#endif 1348 default: 1349 BUG(); 1350 } 1351 xdst = dst_alloc(dst_ops, 0); 1352 xfrm_policy_put_afinfo(afinfo); 1353 1354 if (likely(xdst)) 1355 xdst->flo.ops = &xfrm_bundle_fc_ops; 1356 else 1357 xdst = ERR_PTR(-ENOBUFS); 1358 1359 return xdst; 1360} 1361 1362static inline int xfrm_init_path(struct xfrm_dst *path, struct dst_entry *dst, 1363 int nfheader_len) 1364{ 1365 struct xfrm_policy_afinfo *afinfo = 1366 xfrm_policy_get_afinfo(dst->ops->family); 1367 int err; 1368 1369 if (!afinfo) 1370 return -EINVAL; 1371 1372 err = afinfo->init_path(path, dst, nfheader_len); 1373 1374 xfrm_policy_put_afinfo(afinfo); 1375 1376 return err; 1377} 1378 1379static inline int xfrm_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, 1380 const struct flowi *fl) 1381{ 1382 struct xfrm_policy_afinfo *afinfo = 1383 xfrm_policy_get_afinfo(xdst->u.dst.ops->family); 1384 int err; 1385 1386 if (!afinfo) 1387 return -EINVAL; 1388 1389 err = afinfo->fill_dst(xdst, dev, fl); 1390 1391 xfrm_policy_put_afinfo(afinfo); 1392 1393 return err; 1394} 1395 1396 1397/* Allocate chain of dst_entry's, attach known xfrm's, calculate 1398 * all the metrics... Shortly, bundle a bundle. 1399 */ 1400 1401static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, 1402 struct xfrm_state **xfrm, int nx, 1403 const struct flowi *fl, 1404 struct dst_entry *dst) 1405{ 1406 struct net *net = xp_net(policy); 1407 unsigned long now = jiffies; 1408 struct net_device *dev; 1409 struct dst_entry *dst_prev = NULL; 1410 struct dst_entry *dst0 = NULL; 1411 int i = 0; 1412 int err; 1413 int header_len = 0; 1414 int nfheader_len = 0; 1415 int trailer_len = 0; 1416 int tos; 1417 int family = policy->selector.family; 1418 xfrm_address_t saddr, daddr; 1419 1420 xfrm_flowi_addr_get(fl, &saddr, &daddr, family); 1421 1422 tos = xfrm_get_tos(fl, family); 1423 err = tos; 1424 if (tos < 0) 1425 goto put_states; 1426 1427 dst_hold(dst); 1428 1429 for (; i < nx; i++) { 1430 struct xfrm_dst *xdst = xfrm_alloc_dst(net, family); 1431 struct dst_entry *dst1 = &xdst->u.dst; 1432 1433 err = PTR_ERR(xdst); 1434 if (IS_ERR(xdst)) { 1435 dst_release(dst); 1436 goto put_states; 1437 } 1438 1439 if (!dst_prev) 1440 dst0 = dst1; 1441 else { 1442 dst_prev->child = dst_clone(dst1); 1443 dst1->flags |= DST_NOHASH; 1444 } 1445 1446 xdst->route = dst; 1447 dst_copy_metrics(dst1, dst); 1448 1449 if (xfrm[i]->props.mode != XFRM_MODE_TRANSPORT) { 1450 family = xfrm[i]->props.family; 1451 dst = xfrm_dst_lookup(xfrm[i], tos, &saddr, &daddr, 1452 family); 1453 err = PTR_ERR(dst); 1454 if (IS_ERR(dst)) 1455 goto put_states; 1456 } else 1457 dst_hold(dst); 1458 1459 dst1->xfrm = xfrm[i]; 1460 xdst->xfrm_genid = xfrm[i]->genid; 1461 1462 dst1->obsolete = -1; 1463 dst1->flags |= DST_HOST; 1464 dst1->lastuse = now; 1465 1466 dst1->input = dst_discard; 1467 dst1->output = xfrm[i]->outer_mode->afinfo->output; 1468 1469 dst1->next = dst_prev; 1470 dst_prev = dst1; 1471 1472 header_len += xfrm[i]->props.header_len; 1473 if (xfrm[i]->type->flags & XFRM_TYPE_NON_FRAGMENT) 1474 nfheader_len += xfrm[i]->props.header_len; 1475 trailer_len += xfrm[i]->props.trailer_len; 1476 } 1477 1478 dst_prev->child = dst; 1479 dst0->path = dst; 1480 1481 err = -ENODEV; 1482 dev = dst->dev; 1483 if (!dev) 1484 goto free_dst; 1485 1486 /* Copy neighbour for reachability confirmation */ 1487 dst0->neighbour = neigh_clone(dst->neighbour); 1488 1489 xfrm_init_path((struct xfrm_dst *)dst0, dst, nfheader_len); 1490 xfrm_init_pmtu(dst_prev); 1491 1492 for (dst_prev = dst0; dst_prev != dst; dst_prev = dst_prev->child) { 1493 struct xfrm_dst *xdst = (struct xfrm_dst *)dst_prev; 1494 1495 err = xfrm_fill_dst(xdst, dev, fl); 1496 if (err) 1497 goto free_dst; 1498 1499 dst_prev->header_len = header_len; 1500 dst_prev->trailer_len = trailer_len; 1501 header_len -= xdst->u.dst.xfrm->props.header_len; 1502 trailer_len -= xdst->u.dst.xfrm->props.trailer_len; 1503 } 1504 1505out: 1506 return dst0; 1507 1508put_states: 1509 for (; i < nx; i++) 1510 xfrm_state_put(xfrm[i]); 1511free_dst: 1512 if (dst0) 1513 dst_free(dst0); 1514 dst0 = ERR_PTR(err); 1515 goto out; 1516} 1517 1518static int inline 1519xfrm_dst_alloc_copy(void **target, const void *src, int size) 1520{ 1521 if (!*target) { 1522 *target = kmalloc(size, GFP_ATOMIC); 1523 if (!*target) 1524 return -ENOMEM; 1525 } 1526 memcpy(*target, src, size); 1527 return 0; 1528} 1529 1530static int inline 1531xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel) 1532{ 1533#ifdef CONFIG_XFRM_SUB_POLICY 1534 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 1535 return xfrm_dst_alloc_copy((void **)&(xdst->partner), 1536 sel, sizeof(*sel)); 1537#else 1538 return 0; 1539#endif 1540} 1541 1542static int inline 1543xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl) 1544{ 1545#ifdef CONFIG_XFRM_SUB_POLICY 1546 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 1547 return xfrm_dst_alloc_copy((void **)&(xdst->origin), fl, sizeof(*fl)); 1548#else 1549 return 0; 1550#endif 1551} 1552 1553static int xfrm_expand_policies(const struct flowi *fl, u16 family, 1554 struct xfrm_policy **pols, 1555 int *num_pols, int *num_xfrms) 1556{ 1557 int i; 1558 1559 if (*num_pols == 0 || !pols[0]) { 1560 *num_pols = 0; 1561 *num_xfrms = 0; 1562 return 0; 1563 } 1564 if (IS_ERR(pols[0])) 1565 return PTR_ERR(pols[0]); 1566 1567 *num_xfrms = pols[0]->xfrm_nr; 1568 1569#ifdef CONFIG_XFRM_SUB_POLICY 1570 if (pols[0] && pols[0]->action == XFRM_POLICY_ALLOW && 1571 pols[0]->type != XFRM_POLICY_TYPE_MAIN) { 1572 pols[1] = xfrm_policy_lookup_bytype(xp_net(pols[0]), 1573 XFRM_POLICY_TYPE_MAIN, 1574 fl, family, 1575 XFRM_POLICY_OUT); 1576 if (pols[1]) { 1577 if (IS_ERR(pols[1])) { 1578 xfrm_pols_put(pols, *num_pols); 1579 return PTR_ERR(pols[1]); 1580 } 1581 (*num_pols) ++; 1582 (*num_xfrms) += pols[1]->xfrm_nr; 1583 } 1584 } 1585#endif 1586 for (i = 0; i < *num_pols; i++) { 1587 if (pols[i]->action != XFRM_POLICY_ALLOW) { 1588 *num_xfrms = -1; 1589 break; 1590 } 1591 } 1592 1593 return 0; 1594 1595} 1596 1597static struct xfrm_dst * 1598xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols, 1599 const struct flowi *fl, u16 family, 1600 struct dst_entry *dst_orig) 1601{ 1602 struct net *net = xp_net(pols[0]); 1603 struct xfrm_state *xfrm[XFRM_MAX_DEPTH]; 1604 struct dst_entry *dst; 1605 struct xfrm_dst *xdst; 1606 int err; 1607 1608 /* Try to instantiate a bundle */ 1609 err = xfrm_tmpl_resolve(pols, num_pols, fl, xfrm, family); 1610 if (err <= 0) { 1611 if (err != 0 && err != -EAGAIN) 1612 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); 1613 return ERR_PTR(err); 1614 } 1615 1616 dst = xfrm_bundle_create(pols[0], xfrm, err, fl, dst_orig); 1617 if (IS_ERR(dst)) { 1618 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLEGENERROR); 1619 return ERR_CAST(dst); 1620 } 1621 1622 xdst = (struct xfrm_dst *)dst; 1623 xdst->num_xfrms = err; 1624 if (num_pols > 1) 1625 err = xfrm_dst_update_parent(dst, &pols[1]->selector); 1626 else 1627 err = xfrm_dst_update_origin(dst, fl); 1628 if (unlikely(err)) { 1629 dst_free(dst); 1630 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTBUNDLECHECKERROR); 1631 return ERR_PTR(err); 1632 } 1633 1634 xdst->num_pols = num_pols; 1635 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols); 1636 xdst->policy_genid = atomic_read(&pols[0]->genid); 1637 1638 return xdst; 1639} 1640 1641static struct flow_cache_object * 1642xfrm_bundle_lookup(struct net *net, const struct flowi *fl, u16 family, u8 dir, 1643 struct flow_cache_object *oldflo, void *ctx) 1644{ 1645 struct dst_entry *dst_orig = (struct dst_entry *)ctx; 1646 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 1647 struct xfrm_dst *xdst, *new_xdst; 1648 int num_pols = 0, num_xfrms = 0, i, err, pol_dead; 1649 1650 /* Check if the policies from old bundle are usable */ 1651 xdst = NULL; 1652 if (oldflo) { 1653 xdst = container_of(oldflo, struct xfrm_dst, flo); 1654 num_pols = xdst->num_pols; 1655 num_xfrms = xdst->num_xfrms; 1656 pol_dead = 0; 1657 for (i = 0; i < num_pols; i++) { 1658 pols[i] = xdst->pols[i]; 1659 pol_dead |= pols[i]->walk.dead; 1660 } 1661 if (pol_dead) { 1662 dst_free(&xdst->u.dst); 1663 xdst = NULL; 1664 num_pols = 0; 1665 num_xfrms = 0; 1666 oldflo = NULL; 1667 } 1668 } 1669 1670 /* Resolve policies to use if we couldn't get them from 1671 * previous cache entry */ 1672 if (xdst == NULL) { 1673 num_pols = 1; 1674 pols[0] = __xfrm_policy_lookup(net, fl, family, dir); 1675 err = xfrm_expand_policies(fl, family, pols, 1676 &num_pols, &num_xfrms); 1677 if (err < 0) 1678 goto inc_error; 1679 if (num_pols == 0) 1680 return NULL; 1681 if (num_xfrms <= 0) 1682 goto make_dummy_bundle; 1683 } 1684 1685 new_xdst = xfrm_resolve_and_create_bundle(pols, num_pols, fl, family, dst_orig); 1686 if (IS_ERR(new_xdst)) { 1687 err = PTR_ERR(new_xdst); 1688 if (err != -EAGAIN) 1689 goto error; 1690 if (oldflo == NULL) 1691 goto make_dummy_bundle; 1692 dst_hold(&xdst->u.dst); 1693 return oldflo; 1694 } else if (new_xdst == NULL) { 1695 num_xfrms = 0; 1696 if (oldflo == NULL) 1697 goto make_dummy_bundle; 1698 xdst->num_xfrms = 0; 1699 dst_hold(&xdst->u.dst); 1700 return oldflo; 1701 } 1702 1703 /* Kill the previous bundle */ 1704 if (xdst) { 1705 /* The policies were stolen for newly generated bundle */ 1706 xdst->num_pols = 0; 1707 dst_free(&xdst->u.dst); 1708 } 1709 1710 /* Flow cache does not have reference, it dst_free()'s, 1711 * but we do need to return one reference for original caller */ 1712 dst_hold(&new_xdst->u.dst); 1713 return &new_xdst->flo; 1714 1715make_dummy_bundle: 1716 /* We found policies, but there's no bundles to instantiate: 1717 * either because the policy blocks, has no transformations or 1718 * we could not build template (no xfrm_states).*/ 1719 xdst = xfrm_alloc_dst(net, family); 1720 if (IS_ERR(xdst)) { 1721 xfrm_pols_put(pols, num_pols); 1722 return ERR_CAST(xdst); 1723 } 1724 xdst->num_pols = num_pols; 1725 xdst->num_xfrms = num_xfrms; 1726 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols); 1727 1728 dst_hold(&xdst->u.dst); 1729 return &xdst->flo; 1730 1731inc_error: 1732 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLERROR); 1733error: 1734 if (xdst != NULL) 1735 dst_free(&xdst->u.dst); 1736 else 1737 xfrm_pols_put(pols, num_pols); 1738 return ERR_PTR(err); 1739} 1740 1741static struct dst_entry *make_blackhole(struct net *net, u16 family, 1742 struct dst_entry *dst_orig) 1743{ 1744 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1745 struct dst_entry *ret; 1746 1747 if (!afinfo) { 1748 dst_release(dst_orig); 1749 ret = ERR_PTR(-EINVAL); 1750 } else { 1751 ret = afinfo->blackhole_route(net, dst_orig); 1752 } 1753 xfrm_policy_put_afinfo(afinfo); 1754 1755 return ret; 1756} 1757 1758/* Main function: finds/creates a bundle for given flow. 1759 * 1760 * At the moment we eat a raw IP route. Mostly to speed up lookups 1761 * on interfaces with disabled IPsec. 1762 */ 1763struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig, 1764 const struct flowi *fl, 1765 struct sock *sk, int flags) 1766{ 1767 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 1768 struct flow_cache_object *flo; 1769 struct xfrm_dst *xdst; 1770 struct dst_entry *dst, *route; 1771 u16 family = dst_orig->ops->family; 1772 u8 dir = policy_to_flow_dir(XFRM_POLICY_OUT); 1773 int i, err, num_pols, num_xfrms = 0, drop_pols = 0; 1774 1775restart: 1776 dst = NULL; 1777 xdst = NULL; 1778 route = NULL; 1779 1780 if (sk && sk->sk_policy[XFRM_POLICY_OUT]) { 1781 num_pols = 1; 1782 pols[0] = xfrm_sk_policy_lookup(sk, XFRM_POLICY_OUT, fl); 1783 err = xfrm_expand_policies(fl, family, pols, 1784 &num_pols, &num_xfrms); 1785 if (err < 0) 1786 goto dropdst; 1787 1788 if (num_pols) { 1789 if (num_xfrms <= 0) { 1790 drop_pols = num_pols; 1791 goto no_transform; 1792 } 1793 1794 xdst = xfrm_resolve_and_create_bundle( 1795 pols, num_pols, fl, 1796 family, dst_orig); 1797 if (IS_ERR(xdst)) { 1798 xfrm_pols_put(pols, num_pols); 1799 err = PTR_ERR(xdst); 1800 goto dropdst; 1801 } else if (xdst == NULL) { 1802 num_xfrms = 0; 1803 drop_pols = num_pols; 1804 goto no_transform; 1805 } 1806 1807 dst_hold(&xdst->u.dst); 1808 1809 spin_lock_bh(&xfrm_policy_sk_bundle_lock); 1810 xdst->u.dst.next = xfrm_policy_sk_bundles; 1811 xfrm_policy_sk_bundles = &xdst->u.dst; 1812 spin_unlock_bh(&xfrm_policy_sk_bundle_lock); 1813 1814 route = xdst->route; 1815 } 1816 } 1817 1818 if (xdst == NULL) { 1819 /* To accelerate a bit... */ 1820 if ((dst_orig->flags & DST_NOXFRM) || 1821 !net->xfrm.policy_count[XFRM_POLICY_OUT]) 1822 goto nopol; 1823 1824 flo = flow_cache_lookup(net, fl, family, dir, 1825 xfrm_bundle_lookup, dst_orig); 1826 if (flo == NULL) 1827 goto nopol; 1828 if (IS_ERR(flo)) { 1829 err = PTR_ERR(flo); 1830 goto dropdst; 1831 } 1832 xdst = container_of(flo, struct xfrm_dst, flo); 1833 1834 num_pols = xdst->num_pols; 1835 num_xfrms = xdst->num_xfrms; 1836 memcpy(pols, xdst->pols, sizeof(struct xfrm_policy*) * num_pols); 1837 route = xdst->route; 1838 } 1839 1840 dst = &xdst->u.dst; 1841 if (route == NULL && num_xfrms > 0) { 1842 /* The only case when xfrm_bundle_lookup() returns a 1843 * bundle with null route, is when the template could 1844 * not be resolved. It means policies are there, but 1845 * bundle could not be created, since we don't yet 1846 * have the xfrm_state's. We need to wait for KM to 1847 * negotiate new SA's or bail out with error.*/ 1848 if (net->xfrm.sysctl_larval_drop) { 1849 /* EREMOTE tells the caller to generate 1850 * a one-shot blackhole route. */ 1851 dst_release(dst); 1852 xfrm_pols_put(pols, drop_pols); 1853 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); 1854 1855 return make_blackhole(net, family, dst_orig); 1856 } 1857 if (fl->flowi_flags & FLOWI_FLAG_CAN_SLEEP) { 1858 DECLARE_WAITQUEUE(wait, current); 1859 1860 add_wait_queue(&net->xfrm.km_waitq, &wait); 1861 set_current_state(TASK_INTERRUPTIBLE); 1862 schedule(); 1863 set_current_state(TASK_RUNNING); 1864 remove_wait_queue(&net->xfrm.km_waitq, &wait); 1865 1866 if (!signal_pending(current)) { 1867 dst_release(dst); 1868 goto restart; 1869 } 1870 1871 err = -ERESTART; 1872 } else 1873 err = -EAGAIN; 1874 1875 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES); 1876 goto error; 1877 } 1878 1879no_transform: 1880 if (num_pols == 0) 1881 goto nopol; 1882 1883 if ((flags & XFRM_LOOKUP_ICMP) && 1884 !(pols[0]->flags & XFRM_POLICY_ICMP)) { 1885 err = -ENOENT; 1886 goto error; 1887 } 1888 1889 for (i = 0; i < num_pols; i++) 1890 pols[i]->curlft.use_time = get_seconds(); 1891 1892 if (num_xfrms < 0) { 1893 /* Prohibit the flow */ 1894 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTPOLBLOCK); 1895 err = -EPERM; 1896 goto error; 1897 } else if (num_xfrms > 0) { 1898 /* Flow transformed */ 1899 dst_release(dst_orig); 1900 } else { 1901 /* Flow passes untransformed */ 1902 dst_release(dst); 1903 dst = dst_orig; 1904 } 1905ok: 1906 xfrm_pols_put(pols, drop_pols); 1907 return dst; 1908 1909nopol: 1910 if (!(flags & XFRM_LOOKUP_ICMP)) { 1911 dst = dst_orig; 1912 goto ok; 1913 } 1914 err = -ENOENT; 1915error: 1916 dst_release(dst); 1917dropdst: 1918 dst_release(dst_orig); 1919 xfrm_pols_put(pols, drop_pols); 1920 return ERR_PTR(err); 1921} 1922EXPORT_SYMBOL(xfrm_lookup); 1923 1924static inline int 1925xfrm_secpath_reject(int idx, struct sk_buff *skb, const struct flowi *fl) 1926{ 1927 struct xfrm_state *x; 1928 1929 if (!skb->sp || idx < 0 || idx >= skb->sp->len) 1930 return 0; 1931 x = skb->sp->xvec[idx]; 1932 if (!x->type->reject) 1933 return 0; 1934 return x->type->reject(x, skb, fl); 1935} 1936 1937/* When skb is transformed back to its "native" form, we have to 1938 * check policy restrictions. At the moment we make this in maximally 1939 * stupid way. Shame on me. :-) Of course, connected sockets must 1940 * have policy cached at them. 1941 */ 1942 1943static inline int 1944xfrm_state_ok(const struct xfrm_tmpl *tmpl, const struct xfrm_state *x, 1945 unsigned short family) 1946{ 1947 if (xfrm_state_kern(x)) 1948 return tmpl->optional && !xfrm_state_addr_cmp(tmpl, x, tmpl->encap_family); 1949 return x->id.proto == tmpl->id.proto && 1950 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) && 1951 (x->props.reqid == tmpl->reqid || !tmpl->reqid) && 1952 x->props.mode == tmpl->mode && 1953 (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) || 1954 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) && 1955 !(x->props.mode != XFRM_MODE_TRANSPORT && 1956 xfrm_state_addr_cmp(tmpl, x, family)); 1957} 1958 1959/* 1960 * 0 or more than 0 is returned when validation is succeeded (either bypass 1961 * because of optional transport mode, or next index of the mathced secpath 1962 * state with the template. 1963 * -1 is returned when no matching template is found. 1964 * Otherwise "-2 - errored_index" is returned. 1965 */ 1966static inline int 1967xfrm_policy_ok(const struct xfrm_tmpl *tmpl, const struct sec_path *sp, int start, 1968 unsigned short family) 1969{ 1970 int idx = start; 1971 1972 if (tmpl->optional) { 1973 if (tmpl->mode == XFRM_MODE_TRANSPORT) 1974 return start; 1975 } else 1976 start = -1; 1977 for (; idx < sp->len; idx++) { 1978 if (xfrm_state_ok(tmpl, sp->xvec[idx], family)) 1979 return ++idx; 1980 if (sp->xvec[idx]->props.mode != XFRM_MODE_TRANSPORT) { 1981 if (start == -1) 1982 start = -2-idx; 1983 break; 1984 } 1985 } 1986 return start; 1987} 1988 1989int __xfrm_decode_session(struct sk_buff *skb, struct flowi *fl, 1990 unsigned int family, int reverse) 1991{ 1992 struct xfrm_policy_afinfo *afinfo = xfrm_policy_get_afinfo(family); 1993 int err; 1994 1995 if (unlikely(afinfo == NULL)) 1996 return -EAFNOSUPPORT; 1997 1998 afinfo->decode_session(skb, fl, reverse); 1999 err = security_xfrm_decode_session(skb, &fl->flowi_secid); 2000 xfrm_policy_put_afinfo(afinfo); 2001 return err; 2002} 2003EXPORT_SYMBOL(__xfrm_decode_session); 2004 2005static inline int secpath_has_nontransport(const struct sec_path *sp, int k, int *idxp) 2006{ 2007 for (; k < sp->len; k++) { 2008 if (sp->xvec[k]->props.mode != XFRM_MODE_TRANSPORT) { 2009 *idxp = k; 2010 return 1; 2011 } 2012 } 2013 2014 return 0; 2015} 2016 2017int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, 2018 unsigned short family) 2019{ 2020 struct net *net = dev_net(skb->dev); 2021 struct xfrm_policy *pol; 2022 struct xfrm_policy *pols[XFRM_POLICY_TYPE_MAX]; 2023 int npols = 0; 2024 int xfrm_nr; 2025 int pi; 2026 int reverse; 2027 struct flowi fl; 2028 u8 fl_dir; 2029 int xerr_idx = -1; 2030 2031 reverse = dir & ~XFRM_POLICY_MASK; 2032 dir &= XFRM_POLICY_MASK; 2033 fl_dir = policy_to_flow_dir(dir); 2034 2035 if (__xfrm_decode_session(skb, &fl, family, reverse) < 0) { 2036 XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR); 2037 return 0; 2038 } 2039 2040 nf_nat_decode_session(skb, &fl, family); 2041 2042 /* First, check used SA against their selectors. */ 2043 if (skb->sp) { 2044 int i; 2045 2046 for (i=skb->sp->len-1; i>=0; i--) { 2047 struct xfrm_state *x = skb->sp->xvec[i]; 2048 if (!xfrm_selector_match(&x->sel, &fl, family)) { 2049 XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMISMATCH); 2050 return 0; 2051 } 2052 } 2053 } 2054 2055 pol = NULL; 2056 if (sk && sk->sk_policy[dir]) { 2057 pol = xfrm_sk_policy_lookup(sk, dir, &fl); 2058 if (IS_ERR(pol)) { 2059 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 2060 return 0; 2061 } 2062 } 2063 2064 if (!pol) { 2065 struct flow_cache_object *flo; 2066 2067 flo = flow_cache_lookup(net, &fl, family, fl_dir, 2068 xfrm_policy_lookup, NULL); 2069 if (IS_ERR_OR_NULL(flo)) 2070 pol = ERR_CAST(flo); 2071 else 2072 pol = container_of(flo, struct xfrm_policy, flo); 2073 } 2074 2075 if (IS_ERR(pol)) { 2076 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 2077 return 0; 2078 } 2079 2080 if (!pol) { 2081 if (skb->sp && secpath_has_nontransport(skb->sp, 0, &xerr_idx)) { 2082 xfrm_secpath_reject(xerr_idx, skb, &fl); 2083 XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS); 2084 return 0; 2085 } 2086 return 1; 2087 } 2088 2089 pol->curlft.use_time = get_seconds(); 2090 2091 pols[0] = pol; 2092 npols ++; 2093#ifdef CONFIG_XFRM_SUB_POLICY 2094 if (pols[0]->type != XFRM_POLICY_TYPE_MAIN) { 2095 pols[1] = xfrm_policy_lookup_bytype(net, XFRM_POLICY_TYPE_MAIN, 2096 &fl, family, 2097 XFRM_POLICY_IN); 2098 if (pols[1]) { 2099 if (IS_ERR(pols[1])) { 2100 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLERROR); 2101 return 0; 2102 } 2103 pols[1]->curlft.use_time = get_seconds(); 2104 npols ++; 2105 } 2106 } 2107#endif 2108 2109 if (pol->action == XFRM_POLICY_ALLOW) { 2110 struct sec_path *sp; 2111 static struct sec_path dummy; 2112 struct xfrm_tmpl *tp[XFRM_MAX_DEPTH]; 2113 struct xfrm_tmpl *stp[XFRM_MAX_DEPTH]; 2114 struct xfrm_tmpl **tpp = tp; 2115 int ti = 0; 2116 int i, k; 2117 2118 if ((sp = skb->sp) == NULL) 2119 sp = &dummy; 2120 2121 for (pi = 0; pi < npols; pi++) { 2122 if (pols[pi] != pol && 2123 pols[pi]->action != XFRM_POLICY_ALLOW) { 2124 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK); 2125 goto reject; 2126 } 2127 if (ti + pols[pi]->xfrm_nr >= XFRM_MAX_DEPTH) { 2128 XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR); 2129 goto reject_error; 2130 } 2131 for (i = 0; i < pols[pi]->xfrm_nr; i++) 2132 tpp[ti++] = &pols[pi]->xfrm_vec[i]; 2133 } 2134 xfrm_nr = ti; 2135 if (npols > 1) { 2136 xfrm_tmpl_sort(stp, tpp, xfrm_nr, family); 2137 tpp = stp; 2138 } 2139 2140 /* For each tunnel xfrm, find the first matching tmpl. 2141 * For each tmpl before that, find corresponding xfrm. 2142 * Order is _important_. Later we will implement 2143 * some barriers, but at the moment barriers 2144 * are implied between each two transformations. 2145 */ 2146 for (i = xfrm_nr-1, k = 0; i >= 0; i--) { 2147 k = xfrm_policy_ok(tpp[i], sp, k, family); 2148 if (k < 0) { 2149 if (k < -1) 2150 /* "-2 - errored_index" returned */ 2151 xerr_idx = -(2+k); 2152 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH); 2153 goto reject; 2154 } 2155 } 2156 2157 if (secpath_has_nontransport(sp, k, &xerr_idx)) { 2158 XFRM_INC_STATS(net, LINUX_MIB_XFRMINTMPLMISMATCH); 2159 goto reject; 2160 } 2161 2162 xfrm_pols_put(pols, npols); 2163 return 1; 2164 } 2165 XFRM_INC_STATS(net, LINUX_MIB_XFRMINPOLBLOCK); 2166 2167reject: 2168 xfrm_secpath_reject(xerr_idx, skb, &fl); 2169reject_error: 2170 xfrm_pols_put(pols, npols); 2171 return 0; 2172} 2173EXPORT_SYMBOL(__xfrm_policy_check); 2174 2175int __xfrm_route_forward(struct sk_buff *skb, unsigned short family) 2176{ 2177 struct net *net = dev_net(skb->dev); 2178 struct flowi fl; 2179 struct dst_entry *dst; 2180 int res = 1; 2181 2182 if (xfrm_decode_session(skb, &fl, family) < 0) { 2183 XFRM_INC_STATS(net, LINUX_MIB_XFRMFWDHDRERROR); 2184 return 0; 2185 } 2186 2187 skb_dst_force(skb); 2188 2189 dst = xfrm_lookup(net, skb_dst(skb), &fl, NULL, 0); 2190 if (IS_ERR(dst)) { 2191 res = 0; 2192 dst = NULL; 2193 } 2194 skb_dst_set(skb, dst); 2195 return res; 2196} 2197EXPORT_SYMBOL(__xfrm_route_forward); 2198 2199/* Optimize later using cookies and generation ids. */ 2200 2201static struct dst_entry *xfrm_dst_check(struct dst_entry *dst, u32 cookie) 2202{ 2203 /* Code (such as __xfrm4_bundle_create()) sets dst->obsolete 2204 * to "-1" to force all XFRM destinations to get validated by 2205 * dst_ops->check on every use. We do this because when a 2206 * normal route referenced by an XFRM dst is obsoleted we do 2207 * not go looking around for all parent referencing XFRM dsts 2208 * so that we can invalidate them. It is just too much work. 2209 * Instead we make the checks here on every use. For example: 2210 * 2211 * XFRM dst A --> IPv4 dst X 2212 * 2213 * X is the "xdst->route" of A (X is also the "dst->path" of A 2214 * in this example). If X is marked obsolete, "A" will not 2215 * notice. That's what we are validating here via the 2216 * stale_bundle() check. 2217 * 2218 * When a policy's bundle is pruned, we dst_free() the XFRM 2219 * dst which causes it's ->obsolete field to be set to a 2220 * positive non-zero integer. If an XFRM dst has been pruned 2221 * like this, we want to force a new route lookup. 2222 */ 2223 if (dst->obsolete < 0 && !stale_bundle(dst)) 2224 return dst; 2225 2226 return NULL; 2227} 2228 2229static int stale_bundle(struct dst_entry *dst) 2230{ 2231 return !xfrm_bundle_ok((struct xfrm_dst *)dst, AF_UNSPEC); 2232} 2233 2234void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev) 2235{ 2236 while ((dst = dst->child) && dst->xfrm && dst->dev == dev) { 2237 dst->dev = dev_net(dev)->loopback_dev; 2238 dev_hold(dst->dev); 2239 dev_put(dev); 2240 } 2241} 2242EXPORT_SYMBOL(xfrm_dst_ifdown); 2243 2244static void xfrm_link_failure(struct sk_buff *skb) 2245{ 2246 /* Impossible. Such dst must be popped before reaches point of failure. */ 2247} 2248 2249static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst) 2250{ 2251 if (dst) { 2252 if (dst->obsolete) { 2253 dst_release(dst); 2254 dst = NULL; 2255 } 2256 } 2257 return dst; 2258} 2259 2260static void __xfrm_garbage_collect(struct net *net) 2261{ 2262 struct dst_entry *head, *next; 2263 2264 flow_cache_flush(); 2265 2266 spin_lock_bh(&xfrm_policy_sk_bundle_lock); 2267 head = xfrm_policy_sk_bundles; 2268 xfrm_policy_sk_bundles = NULL; 2269 spin_unlock_bh(&xfrm_policy_sk_bundle_lock); 2270 2271 while (head) { 2272 next = head->next; 2273 dst_free(head); 2274 head = next; 2275 } 2276} 2277 2278static void xfrm_init_pmtu(struct dst_entry *dst) 2279{ 2280 do { 2281 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 2282 u32 pmtu, route_mtu_cached; 2283 2284 pmtu = dst_mtu(dst->child); 2285 xdst->child_mtu_cached = pmtu; 2286 2287 pmtu = xfrm_state_mtu(dst->xfrm, pmtu); 2288 2289 route_mtu_cached = dst_mtu(xdst->route); 2290 xdst->route_mtu_cached = route_mtu_cached; 2291 2292 if (pmtu > route_mtu_cached) 2293 pmtu = route_mtu_cached; 2294 2295 dst_metric_set(dst, RTAX_MTU, pmtu); 2296 } while ((dst = dst->next)); 2297} 2298 2299/* Check that the bundle accepts the flow and its components are 2300 * still valid. 2301 */ 2302 2303static int xfrm_bundle_ok(struct xfrm_dst *first, int family) 2304{ 2305 struct dst_entry *dst = &first->u.dst; 2306 struct xfrm_dst *last; 2307 u32 mtu; 2308 2309 if (!dst_check(dst->path, ((struct xfrm_dst *)dst)->path_cookie) || 2310 (dst->dev && !netif_running(dst->dev))) 2311 return 0; 2312 2313 last = NULL; 2314 2315 do { 2316 struct xfrm_dst *xdst = (struct xfrm_dst *)dst; 2317 2318 if (dst->xfrm->km.state != XFRM_STATE_VALID) 2319 return 0; 2320 if (xdst->xfrm_genid != dst->xfrm->genid) 2321 return 0; 2322 if (xdst->num_pols > 0 && 2323 xdst->policy_genid != atomic_read(&xdst->pols[0]->genid)) 2324 return 0; 2325 2326 mtu = dst_mtu(dst->child); 2327 if (xdst->child_mtu_cached != mtu) { 2328 last = xdst; 2329 xdst->child_mtu_cached = mtu; 2330 } 2331 2332 if (!dst_check(xdst->route, xdst->route_cookie)) 2333 return 0; 2334 mtu = dst_mtu(xdst->route); 2335 if (xdst->route_mtu_cached != mtu) { 2336 last = xdst; 2337 xdst->route_mtu_cached = mtu; 2338 } 2339 2340 dst = dst->child; 2341 } while (dst->xfrm); 2342 2343 if (likely(!last)) 2344 return 1; 2345 2346 mtu = last->child_mtu_cached; 2347 for (;;) { 2348 dst = &last->u.dst; 2349 2350 mtu = xfrm_state_mtu(dst->xfrm, mtu); 2351 if (mtu > last->route_mtu_cached) 2352 mtu = last->route_mtu_cached; 2353 dst_metric_set(dst, RTAX_MTU, mtu); 2354 2355 if (last == first) 2356 break; 2357 2358 last = (struct xfrm_dst *)last->u.dst.next; 2359 last->child_mtu_cached = mtu; 2360 } 2361 2362 return 1; 2363} 2364 2365static unsigned int xfrm_default_advmss(const struct dst_entry *dst) 2366{ 2367 return dst_metric_advmss(dst->path); 2368} 2369 2370static unsigned int xfrm_default_mtu(const struct dst_entry *dst) 2371{ 2372 return dst_mtu(dst->path); 2373} 2374 2375int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo) 2376{ 2377 struct net *net; 2378 int err = 0; 2379 if (unlikely(afinfo == NULL)) 2380 return -EINVAL; 2381 if (unlikely(afinfo->family >= NPROTO)) 2382 return -EAFNOSUPPORT; 2383 write_lock_bh(&xfrm_policy_afinfo_lock); 2384 if (unlikely(xfrm_policy_afinfo[afinfo->family] != NULL)) 2385 err = -ENOBUFS; 2386 else { 2387 struct dst_ops *dst_ops = afinfo->dst_ops; 2388 if (likely(dst_ops->kmem_cachep == NULL)) 2389 dst_ops->kmem_cachep = xfrm_dst_cache; 2390 if (likely(dst_ops->check == NULL)) 2391 dst_ops->check = xfrm_dst_check; 2392 if (likely(dst_ops->default_advmss == NULL)) 2393 dst_ops->default_advmss = xfrm_default_advmss; 2394 if (likely(dst_ops->default_mtu == NULL)) 2395 dst_ops->default_mtu = xfrm_default_mtu; 2396 if (likely(dst_ops->negative_advice == NULL)) 2397 dst_ops->negative_advice = xfrm_negative_advice; 2398 if (likely(dst_ops->link_failure == NULL)) 2399 dst_ops->link_failure = xfrm_link_failure; 2400 if (likely(afinfo->garbage_collect == NULL)) 2401 afinfo->garbage_collect = __xfrm_garbage_collect; 2402 xfrm_policy_afinfo[afinfo->family] = afinfo; 2403 } 2404 write_unlock_bh(&xfrm_policy_afinfo_lock); 2405 2406 rtnl_lock(); 2407 for_each_net(net) { 2408 struct dst_ops *xfrm_dst_ops; 2409 2410 switch (afinfo->family) { 2411 case AF_INET: 2412 xfrm_dst_ops = &net->xfrm.xfrm4_dst_ops; 2413 break; 2414#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 2415 case AF_INET6: 2416 xfrm_dst_ops = &net->xfrm.xfrm6_dst_ops; 2417 break; 2418#endif 2419 default: 2420 BUG(); 2421 } 2422 *xfrm_dst_ops = *afinfo->dst_ops; 2423 } 2424 rtnl_unlock(); 2425 2426 return err; 2427} 2428EXPORT_SYMBOL(xfrm_policy_register_afinfo); 2429 2430int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo) 2431{ 2432 int err = 0; 2433 if (unlikely(afinfo == NULL)) 2434 return -EINVAL; 2435 if (unlikely(afinfo->family >= NPROTO)) 2436 return -EAFNOSUPPORT; 2437 write_lock_bh(&xfrm_policy_afinfo_lock); 2438 if (likely(xfrm_policy_afinfo[afinfo->family] != NULL)) { 2439 if (unlikely(xfrm_policy_afinfo[afinfo->family] != afinfo)) 2440 err = -EINVAL; 2441 else { 2442 struct dst_ops *dst_ops = afinfo->dst_ops; 2443 xfrm_policy_afinfo[afinfo->family] = NULL; 2444 dst_ops->kmem_cachep = NULL; 2445 dst_ops->check = NULL; 2446 dst_ops->negative_advice = NULL; 2447 dst_ops->link_failure = NULL; 2448 afinfo->garbage_collect = NULL; 2449 } 2450 } 2451 write_unlock_bh(&xfrm_policy_afinfo_lock); 2452 return err; 2453} 2454EXPORT_SYMBOL(xfrm_policy_unregister_afinfo); 2455 2456static void __net_init xfrm_dst_ops_init(struct net *net) 2457{ 2458 struct xfrm_policy_afinfo *afinfo; 2459 2460 read_lock_bh(&xfrm_policy_afinfo_lock); 2461 afinfo = xfrm_policy_afinfo[AF_INET]; 2462 if (afinfo) 2463 net->xfrm.xfrm4_dst_ops = *afinfo->dst_ops; 2464#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 2465 afinfo = xfrm_policy_afinfo[AF_INET6]; 2466 if (afinfo) 2467 net->xfrm.xfrm6_dst_ops = *afinfo->dst_ops; 2468#endif 2469 read_unlock_bh(&xfrm_policy_afinfo_lock); 2470} 2471 2472static struct xfrm_policy_afinfo *xfrm_policy_get_afinfo(unsigned short family) 2473{ 2474 struct xfrm_policy_afinfo *afinfo; 2475 if (unlikely(family >= NPROTO)) 2476 return NULL; 2477 read_lock(&xfrm_policy_afinfo_lock); 2478 afinfo = xfrm_policy_afinfo[family]; 2479 if (unlikely(!afinfo)) 2480 read_unlock(&xfrm_policy_afinfo_lock); 2481 return afinfo; 2482} 2483 2484static void xfrm_policy_put_afinfo(struct xfrm_policy_afinfo *afinfo) 2485{ 2486 read_unlock(&xfrm_policy_afinfo_lock); 2487} 2488 2489static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr) 2490{ 2491 struct net_device *dev = ptr; 2492 2493 switch (event) { 2494 case NETDEV_DOWN: 2495 __xfrm_garbage_collect(dev_net(dev)); 2496 } 2497 return NOTIFY_DONE; 2498} 2499 2500static struct notifier_block xfrm_dev_notifier = { 2501 .notifier_call = xfrm_dev_event, 2502}; 2503 2504#ifdef CONFIG_XFRM_STATISTICS 2505static int __net_init xfrm_statistics_init(struct net *net) 2506{ 2507 int rv; 2508 2509 if (snmp_mib_init((void __percpu **)net->mib.xfrm_statistics, 2510 sizeof(struct linux_xfrm_mib), 2511 __alignof__(struct linux_xfrm_mib)) < 0) 2512 return -ENOMEM; 2513 rv = xfrm_proc_init(net); 2514 if (rv < 0) 2515 snmp_mib_free((void __percpu **)net->mib.xfrm_statistics); 2516 return rv; 2517} 2518 2519static void xfrm_statistics_fini(struct net *net) 2520{ 2521 xfrm_proc_fini(net); 2522 snmp_mib_free((void __percpu **)net->mib.xfrm_statistics); 2523} 2524#else 2525static int __net_init xfrm_statistics_init(struct net *net) 2526{ 2527 return 0; 2528} 2529 2530static void xfrm_statistics_fini(struct net *net) 2531{ 2532} 2533#endif 2534 2535static int __net_init xfrm_policy_init(struct net *net) 2536{ 2537 unsigned int hmask, sz; 2538 int dir; 2539 2540 if (net_eq(net, &init_net)) 2541 xfrm_dst_cache = kmem_cache_create("xfrm_dst_cache", 2542 sizeof(struct xfrm_dst), 2543 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, 2544 NULL); 2545 2546 hmask = 8 - 1; 2547 sz = (hmask+1) * sizeof(struct hlist_head); 2548 2549 net->xfrm.policy_byidx = xfrm_hash_alloc(sz); 2550 if (!net->xfrm.policy_byidx) 2551 goto out_byidx; 2552 net->xfrm.policy_idx_hmask = hmask; 2553 2554 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) { 2555 struct xfrm_policy_hash *htab; 2556 2557 net->xfrm.policy_count[dir] = 0; 2558 INIT_HLIST_HEAD(&net->xfrm.policy_inexact[dir]); 2559 2560 htab = &net->xfrm.policy_bydst[dir]; 2561 htab->table = xfrm_hash_alloc(sz); 2562 if (!htab->table) 2563 goto out_bydst; 2564 htab->hmask = hmask; 2565 } 2566 2567 INIT_LIST_HEAD(&net->xfrm.policy_all); 2568 INIT_WORK(&net->xfrm.policy_hash_work, xfrm_hash_resize); 2569 if (net_eq(net, &init_net)) 2570 register_netdevice_notifier(&xfrm_dev_notifier); 2571 return 0; 2572 2573out_bydst: 2574 for (dir--; dir >= 0; dir--) { 2575 struct xfrm_policy_hash *htab; 2576 2577 htab = &net->xfrm.policy_bydst[dir]; 2578 xfrm_hash_free(htab->table, sz); 2579 } 2580 xfrm_hash_free(net->xfrm.policy_byidx, sz); 2581out_byidx: 2582 return -ENOMEM; 2583} 2584 2585static void xfrm_policy_fini(struct net *net) 2586{ 2587 struct xfrm_audit audit_info; 2588 unsigned int sz; 2589 int dir; 2590 2591 flush_work(&net->xfrm.policy_hash_work); 2592#ifdef CONFIG_XFRM_SUB_POLICY 2593 audit_info.loginuid = -1; 2594 audit_info.sessionid = -1; 2595 audit_info.secid = 0; 2596 xfrm_policy_flush(net, XFRM_POLICY_TYPE_SUB, &audit_info); 2597#endif 2598 audit_info.loginuid = -1; 2599 audit_info.sessionid = -1; 2600 audit_info.secid = 0; 2601 xfrm_policy_flush(net, XFRM_POLICY_TYPE_MAIN, &audit_info); 2602 2603 WARN_ON(!list_empty(&net->xfrm.policy_all)); 2604 2605 for (dir = 0; dir < XFRM_POLICY_MAX * 2; dir++) { 2606 struct xfrm_policy_hash *htab; 2607 2608 WARN_ON(!hlist_empty(&net->xfrm.policy_inexact[dir])); 2609 2610 htab = &net->xfrm.policy_bydst[dir]; 2611 sz = (htab->hmask + 1); 2612 WARN_ON(!hlist_empty(htab->table)); 2613 xfrm_hash_free(htab->table, sz); 2614 } 2615 2616 sz = (net->xfrm.policy_idx_hmask + 1) * sizeof(struct hlist_head); 2617 WARN_ON(!hlist_empty(net->xfrm.policy_byidx)); 2618 xfrm_hash_free(net->xfrm.policy_byidx, sz); 2619} 2620 2621static int __net_init xfrm_net_init(struct net *net) 2622{ 2623 int rv; 2624 2625 rv = xfrm_statistics_init(net); 2626 if (rv < 0) 2627 goto out_statistics; 2628 rv = xfrm_state_init(net); 2629 if (rv < 0) 2630 goto out_state; 2631 rv = xfrm_policy_init(net); 2632 if (rv < 0) 2633 goto out_policy; 2634 xfrm_dst_ops_init(net); 2635 rv = xfrm_sysctl_init(net); 2636 if (rv < 0) 2637 goto out_sysctl; 2638 return 0; 2639 2640out_sysctl: 2641 xfrm_policy_fini(net); 2642out_policy: 2643 xfrm_state_fini(net); 2644out_state: 2645 xfrm_statistics_fini(net); 2646out_statistics: 2647 return rv; 2648} 2649 2650static void __net_exit xfrm_net_exit(struct net *net) 2651{ 2652 xfrm_sysctl_fini(net); 2653 xfrm_policy_fini(net); 2654 xfrm_state_fini(net); 2655 xfrm_statistics_fini(net); 2656} 2657 2658static struct pernet_operations __net_initdata xfrm_net_ops = { 2659 .init = xfrm_net_init, 2660 .exit = xfrm_net_exit, 2661}; 2662 2663void __init xfrm_init(void) 2664{ 2665 register_pernet_subsys(&xfrm_net_ops); 2666 xfrm_input_init(); 2667} 2668 2669#ifdef CONFIG_AUDITSYSCALL 2670static void xfrm_audit_common_policyinfo(struct xfrm_policy *xp, 2671 struct audit_buffer *audit_buf) 2672{ 2673 struct xfrm_sec_ctx *ctx = xp->security; 2674 struct xfrm_selector *sel = &xp->selector; 2675 2676 if (ctx) 2677 audit_log_format(audit_buf, " sec_alg=%u sec_doi=%u sec_obj=%s", 2678 ctx->ctx_alg, ctx->ctx_doi, ctx->ctx_str); 2679 2680 switch(sel->family) { 2681 case AF_INET: 2682 audit_log_format(audit_buf, " src=%pI4", &sel->saddr.a4); 2683 if (sel->prefixlen_s != 32) 2684 audit_log_format(audit_buf, " src_prefixlen=%d", 2685 sel->prefixlen_s); 2686 audit_log_format(audit_buf, " dst=%pI4", &sel->daddr.a4); 2687 if (sel->prefixlen_d != 32) 2688 audit_log_format(audit_buf, " dst_prefixlen=%d", 2689 sel->prefixlen_d); 2690 break; 2691 case AF_INET6: 2692 audit_log_format(audit_buf, " src=%pI6", sel->saddr.a6); 2693 if (sel->prefixlen_s != 128) 2694 audit_log_format(audit_buf, " src_prefixlen=%d", 2695 sel->prefixlen_s); 2696 audit_log_format(audit_buf, " dst=%pI6", sel->daddr.a6); 2697 if (sel->prefixlen_d != 128) 2698 audit_log_format(audit_buf, " dst_prefixlen=%d", 2699 sel->prefixlen_d); 2700 break; 2701 } 2702} 2703 2704void xfrm_audit_policy_add(struct xfrm_policy *xp, int result, 2705 uid_t auid, u32 sessionid, u32 secid) 2706{ 2707 struct audit_buffer *audit_buf; 2708 2709 audit_buf = xfrm_audit_start("SPD-add"); 2710 if (audit_buf == NULL) 2711 return; 2712 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf); 2713 audit_log_format(audit_buf, " res=%u", result); 2714 xfrm_audit_common_policyinfo(xp, audit_buf); 2715 audit_log_end(audit_buf); 2716} 2717EXPORT_SYMBOL_GPL(xfrm_audit_policy_add); 2718 2719void xfrm_audit_policy_delete(struct xfrm_policy *xp, int result, 2720 uid_t auid, u32 sessionid, u32 secid) 2721{ 2722 struct audit_buffer *audit_buf; 2723 2724 audit_buf = xfrm_audit_start("SPD-delete"); 2725 if (audit_buf == NULL) 2726 return; 2727 xfrm_audit_helper_usrinfo(auid, sessionid, secid, audit_buf); 2728 audit_log_format(audit_buf, " res=%u", result); 2729 xfrm_audit_common_policyinfo(xp, audit_buf); 2730 audit_log_end(audit_buf); 2731} 2732EXPORT_SYMBOL_GPL(xfrm_audit_policy_delete); 2733#endif 2734 2735#ifdef CONFIG_XFRM_MIGRATE 2736static int xfrm_migrate_selector_match(const struct xfrm_selector *sel_cmp, 2737 const struct xfrm_selector *sel_tgt) 2738{ 2739 if (sel_cmp->proto == IPSEC_ULPROTO_ANY) { 2740 if (sel_tgt->family == sel_cmp->family && 2741 xfrm_addr_cmp(&sel_tgt->daddr, &sel_cmp->daddr, 2742 sel_cmp->family) == 0 && 2743 xfrm_addr_cmp(&sel_tgt->saddr, &sel_cmp->saddr, 2744 sel_cmp->family) == 0 && 2745 sel_tgt->prefixlen_d == sel_cmp->prefixlen_d && 2746 sel_tgt->prefixlen_s == sel_cmp->prefixlen_s) { 2747 return 1; 2748 } 2749 } else { 2750 if (memcmp(sel_tgt, sel_cmp, sizeof(*sel_tgt)) == 0) { 2751 return 1; 2752 } 2753 } 2754 return 0; 2755} 2756 2757static struct xfrm_policy * xfrm_migrate_policy_find(const struct xfrm_selector *sel, 2758 u8 dir, u8 type) 2759{ 2760 struct xfrm_policy *pol, *ret = NULL; 2761 struct hlist_node *entry; 2762 struct hlist_head *chain; 2763 u32 priority = ~0U; 2764 2765 read_lock_bh(&xfrm_policy_lock); 2766 chain = policy_hash_direct(&init_net, &sel->daddr, &sel->saddr, sel->family, dir); 2767 hlist_for_each_entry(pol, entry, chain, bydst) { 2768 if (xfrm_migrate_selector_match(sel, &pol->selector) && 2769 pol->type == type) { 2770 ret = pol; 2771 priority = ret->priority; 2772 break; 2773 } 2774 } 2775 chain = &init_net.xfrm.policy_inexact[dir]; 2776 hlist_for_each_entry(pol, entry, chain, bydst) { 2777 if (xfrm_migrate_selector_match(sel, &pol->selector) && 2778 pol->type == type && 2779 pol->priority < priority) { 2780 ret = pol; 2781 break; 2782 } 2783 } 2784 2785 if (ret) 2786 xfrm_pol_hold(ret); 2787 2788 read_unlock_bh(&xfrm_policy_lock); 2789 2790 return ret; 2791} 2792 2793static int migrate_tmpl_match(const struct xfrm_migrate *m, const struct xfrm_tmpl *t) 2794{ 2795 int match = 0; 2796 2797 if (t->mode == m->mode && t->id.proto == m->proto && 2798 (m->reqid == 0 || t->reqid == m->reqid)) { 2799 switch (t->mode) { 2800 case XFRM_MODE_TUNNEL: 2801 case XFRM_MODE_BEET: 2802 if (xfrm_addr_cmp(&t->id.daddr, &m->old_daddr, 2803 m->old_family) == 0 && 2804 xfrm_addr_cmp(&t->saddr, &m->old_saddr, 2805 m->old_family) == 0) { 2806 match = 1; 2807 } 2808 break; 2809 case XFRM_MODE_TRANSPORT: 2810 /* in case of transport mode, template does not store 2811 any IP addresses, hence we just compare mode and 2812 protocol */ 2813 match = 1; 2814 break; 2815 default: 2816 break; 2817 } 2818 } 2819 return match; 2820} 2821 2822/* update endpoint address(es) of template(s) */ 2823static int xfrm_policy_migrate(struct xfrm_policy *pol, 2824 struct xfrm_migrate *m, int num_migrate) 2825{ 2826 struct xfrm_migrate *mp; 2827 int i, j, n = 0; 2828 2829 write_lock_bh(&pol->lock); 2830 if (unlikely(pol->walk.dead)) { 2831 /* target policy has been deleted */ 2832 write_unlock_bh(&pol->lock); 2833 return -ENOENT; 2834 } 2835 2836 for (i = 0; i < pol->xfrm_nr; i++) { 2837 for (j = 0, mp = m; j < num_migrate; j++, mp++) { 2838 if (!migrate_tmpl_match(mp, &pol->xfrm_vec[i])) 2839 continue; 2840 n++; 2841 if (pol->xfrm_vec[i].mode != XFRM_MODE_TUNNEL && 2842 pol->xfrm_vec[i].mode != XFRM_MODE_BEET) 2843 continue; 2844 /* update endpoints */ 2845 memcpy(&pol->xfrm_vec[i].id.daddr, &mp->new_daddr, 2846 sizeof(pol->xfrm_vec[i].id.daddr)); 2847 memcpy(&pol->xfrm_vec[i].saddr, &mp->new_saddr, 2848 sizeof(pol->xfrm_vec[i].saddr)); 2849 pol->xfrm_vec[i].encap_family = mp->new_family; 2850 /* flush bundles */ 2851 atomic_inc(&pol->genid); 2852 } 2853 } 2854 2855 write_unlock_bh(&pol->lock); 2856 2857 if (!n) 2858 return -ENODATA; 2859 2860 return 0; 2861} 2862 2863static int xfrm_migrate_check(const struct xfrm_migrate *m, int num_migrate) 2864{ 2865 int i, j; 2866 2867 if (num_migrate < 1 || num_migrate > XFRM_MAX_DEPTH) 2868 return -EINVAL; 2869 2870 for (i = 0; i < num_migrate; i++) { 2871 if ((xfrm_addr_cmp(&m[i].old_daddr, &m[i].new_daddr, 2872 m[i].old_family) == 0) && 2873 (xfrm_addr_cmp(&m[i].old_saddr, &m[i].new_saddr, 2874 m[i].old_family) == 0)) 2875 return -EINVAL; 2876 if (xfrm_addr_any(&m[i].new_daddr, m[i].new_family) || 2877 xfrm_addr_any(&m[i].new_saddr, m[i].new_family)) 2878 return -EINVAL; 2879 2880 /* check if there is any duplicated entry */ 2881 for (j = i + 1; j < num_migrate; j++) { 2882 if (!memcmp(&m[i].old_daddr, &m[j].old_daddr, 2883 sizeof(m[i].old_daddr)) && 2884 !memcmp(&m[i].old_saddr, &m[j].old_saddr, 2885 sizeof(m[i].old_saddr)) && 2886 m[i].proto == m[j].proto && 2887 m[i].mode == m[j].mode && 2888 m[i].reqid == m[j].reqid && 2889 m[i].old_family == m[j].old_family) 2890 return -EINVAL; 2891 } 2892 } 2893 2894 return 0; 2895} 2896 2897int xfrm_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, 2898 struct xfrm_migrate *m, int num_migrate, 2899 struct xfrm_kmaddress *k) 2900{ 2901 int i, err, nx_cur = 0, nx_new = 0; 2902 struct xfrm_policy *pol = NULL; 2903 struct xfrm_state *x, *xc; 2904 struct xfrm_state *x_cur[XFRM_MAX_DEPTH]; 2905 struct xfrm_state *x_new[XFRM_MAX_DEPTH]; 2906 struct xfrm_migrate *mp; 2907 2908 if ((err = xfrm_migrate_check(m, num_migrate)) < 0) 2909 goto out; 2910 2911 /* Stage 1 - find policy */ 2912 if ((pol = xfrm_migrate_policy_find(sel, dir, type)) == NULL) { 2913 err = -ENOENT; 2914 goto out; 2915 } 2916 2917 /* Stage 2 - find and update state(s) */ 2918 for (i = 0, mp = m; i < num_migrate; i++, mp++) { 2919 if ((x = xfrm_migrate_state_find(mp))) { 2920 x_cur[nx_cur] = x; 2921 nx_cur++; 2922 if ((xc = xfrm_state_migrate(x, mp))) { 2923 x_new[nx_new] = xc; 2924 nx_new++; 2925 } else { 2926 err = -ENODATA; 2927 goto restore_state; 2928 } 2929 } 2930 } 2931 2932 /* Stage 3 - update policy */ 2933 if ((err = xfrm_policy_migrate(pol, m, num_migrate)) < 0) 2934 goto restore_state; 2935 2936 /* Stage 4 - delete old state(s) */ 2937 if (nx_cur) { 2938 xfrm_states_put(x_cur, nx_cur); 2939 xfrm_states_delete(x_cur, nx_cur); 2940 } 2941 2942 /* Stage 5 - announce */ 2943 km_migrate(sel, dir, type, m, num_migrate, k); 2944 2945 xfrm_pol_put(pol); 2946 2947 return 0; 2948out: 2949 return err; 2950 2951restore_state: 2952 if (pol) 2953 xfrm_pol_put(pol); 2954 if (nx_cur) 2955 xfrm_states_put(x_cur, nx_cur); 2956 if (nx_new) 2957 xfrm_states_delete(x_new, nx_new); 2958 2959 return err; 2960} 2961EXPORT_SYMBOL(xfrm_migrate); 2962#endif