Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xfrm: Cache used outbound xfrm states at the policy.

Now that we can have percpu xfrm states, the number of active
states might increase. To get a better lookup performance,
we cache the used xfrm states at the policy for outbound
IPsec traffic.

Signed-off-by: Steffen Klassert <steffen.klassert@secunet.com>
Tested-by: Antony Antony <antony.antony@secunet.com>
Tested-by: Tobias Brunner <tobias@strongswan.org>

+71
+4
include/net/xfrm.h
··· 184 184 }; 185 185 struct hlist_node byspi; 186 186 struct hlist_node byseq; 187 + struct hlist_node state_cache; 187 188 188 189 refcount_t refcnt; 189 190 spinlock_t lock; ··· 538 537 * @xp_net: network namespace the policy lives in 539 538 * @bydst: hlist node for SPD hash table or rbtree list 540 539 * @byidx: hlist node for index hash table 540 + * @state_cache_list: hlist head for policy cached xfrm states 541 541 * @lock: serialize changes to policy structure members 542 542 * @refcnt: reference count, freed once it reaches 0 543 543 * @pos: kernel internal tie-breaker to determine age of policy ··· 568 566 possible_net_t xp_net; 569 567 struct hlist_node bydst; 570 568 struct hlist_node byidx; 569 + 570 + struct hlist_head state_cache_list; 571 571 572 572 /* This lock only affects elements except for entry. */ 573 573 rwlock_t lock;
+12
net/xfrm/xfrm_policy.c
··· 434 434 if (policy) { 435 435 write_pnet(&policy->xp_net, net); 436 436 INIT_LIST_HEAD(&policy->walk.all); 437 + INIT_HLIST_HEAD(&policy->state_cache_list); 437 438 INIT_HLIST_NODE(&policy->bydst); 438 439 INIT_HLIST_NODE(&policy->byidx); 439 440 rwlock_init(&policy->lock); ··· 476 475 477 476 static void xfrm_policy_kill(struct xfrm_policy *policy) 478 477 { 478 + struct net *net = xp_net(policy); 479 + struct xfrm_state *x; 480 + 479 481 xfrm_dev_policy_delete(policy); 480 482 481 483 write_lock_bh(&policy->lock); ··· 493 489 494 490 if (del_timer(&policy->timer)) 495 491 xfrm_pol_put(policy); 492 + 493 + /* XXX: Flush state cache */ 494 + spin_lock_bh(&net->xfrm.xfrm_state_lock); 495 + hlist_for_each_entry_rcu(x, &policy->state_cache_list, state_cache) { 496 + hlist_del_init_rcu(&x->state_cache); 497 + } 498 + spin_unlock_bh(&net->xfrm.xfrm_state_lock); 496 499 497 500 xfrm_pol_put(policy); 498 501 } ··· 3286 3275 dst_release(dst); 3287 3276 dst = dst_orig; 3288 3277 } 3278 + 3289 3279 ok: 3290 3280 xfrm_pols_put(pols, drop_pols); 3291 3281 if (dst && dst->xfrm &&
+55
net/xfrm/xfrm_state.c
··· 665 665 refcount_set(&x->refcnt, 1); 666 666 atomic_set(&x->tunnel_users, 0); 667 667 INIT_LIST_HEAD(&x->km.all); 668 + INIT_HLIST_NODE(&x->state_cache); 668 669 INIT_HLIST_NODE(&x->bydst); 669 670 INIT_HLIST_NODE(&x->bysrc); 670 671 INIT_HLIST_NODE(&x->byspi); ··· 745 744 746 745 if (x->km.state != XFRM_STATE_DEAD) { 747 746 x->km.state = XFRM_STATE_DEAD; 747 + 748 748 spin_lock(&net->xfrm.xfrm_state_lock); 749 749 list_del(&x->km.all); 750 750 hlist_del_rcu(&x->bydst); 751 751 hlist_del_rcu(&x->bysrc); 752 752 if (x->km.seq) 753 753 hlist_del_rcu(&x->byseq); 754 + if (!hlist_unhashed(&x->state_cache)) 755 + hlist_del_rcu(&x->state_cache); 754 756 if (x->id.spi) 755 757 hlist_del_rcu(&x->byspi); 756 758 net->xfrm.state_num--; ··· 1226 1222 unsigned int sequence; 1227 1223 struct km_event c; 1228 1224 unsigned int pcpu_id; 1225 + bool cached = false; 1229 1226 1230 1227 /* We need the cpu id just as a lookup key, 1231 1228 * we don't require it to be stable. ··· 1239 1234 sequence = read_seqcount_begin(&net->xfrm.xfrm_state_hash_generation); 1240 1235 1241 1236 rcu_read_lock(); 1237 + hlist_for_each_entry_rcu(x, &pol->state_cache_list, state_cache) { 1238 + if (x->props.family == encap_family && 1239 + x->props.reqid == tmpl->reqid && 1240 + (mark & x->mark.m) == x->mark.v && 1241 + x->if_id == if_id && 1242 + !(x->props.flags & XFRM_STATE_WILDRECV) && 1243 + xfrm_state_addr_check(x, daddr, saddr, encap_family) && 1244 + tmpl->mode == x->props.mode && 1245 + tmpl->id.proto == x->id.proto && 1246 + (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) 1247 + xfrm_state_look_at(pol, x, fl, encap_family, 1248 + &best, &acquire_in_progress, &error); 1249 + } 1250 + 1251 + if (best) 1252 + goto cached; 1253 + 1254 + hlist_for_each_entry_rcu(x, &pol->state_cache_list, state_cache) { 1255 + if (x->props.family == encap_family && 1256 + x->props.reqid == tmpl->reqid && 1257 + (mark & x->mark.m) == x->mark.v && 1258 + x->if_id == if_id && 1259 + !(x->props.flags & XFRM_STATE_WILDRECV) && 1260 + xfrm_addr_equal(&x->id.daddr, daddr, encap_family) && 1261 + tmpl->mode == x->props.mode && 1262 + tmpl->id.proto == x->id.proto && 1263 + (tmpl->id.spi == x->id.spi || !tmpl->id.spi)) 1264 + xfrm_state_look_at(pol, x, fl, family, 1265 + &best, &acquire_in_progress, &error); 1266 + } 1267 + 1268 + cached: 1269 + cached = true; 1270 + if (best) 1271 + goto found; 1272 + else if (error) 1273 + best = NULL; 1274 + else if (acquire_in_progress) /* XXX: acquire_in_progress should not happen */ 1275 + WARN_ON(1); 1276 + 1242 1277 h = xfrm_dst_hash(net, daddr, saddr, tmpl->reqid, encap_family); 1243 1278 hlist_for_each_entry_rcu(x, net->xfrm.state_bydst + h, bydst) { 1244 1279 #ifdef CONFIG_XFRM_OFFLOAD ··· 1428 1383 XFRM_STATE_INSERT(bysrc, &x->bysrc, 1429 1384 net->xfrm.state_bysrc + h, 1430 1385 x->xso.type); 1386 + INIT_HLIST_NODE(&x->state_cache); 1431 1387 if (x->id.spi) { 1432 1388 h = xfrm_spi_hash(net, &x->id.daddr, x->id.spi, x->id.proto, encap_family); 1433 1389 XFRM_STATE_INSERT(byspi, &x->byspi, ··· 1477 1431 } else { 1478 1432 *err = acquire_in_progress ? -EAGAIN : error; 1479 1433 } 1434 + 1435 + if (x && x->km.state == XFRM_STATE_VALID && !cached && 1436 + (!(pol->flags & XFRM_POLICY_CPU_ACQUIRE) || x->pcpu_num == pcpu_id)) { 1437 + spin_lock_bh(&net->xfrm.xfrm_state_lock); 1438 + if (hlist_unhashed(&x->state_cache)) 1439 + hlist_add_head_rcu(&x->state_cache, &pol->state_cache_list); 1440 + spin_unlock_bh(&net->xfrm.xfrm_state_lock); 1441 + } 1442 + 1480 1443 rcu_read_unlock(); 1481 1444 if (to_put) 1482 1445 xfrm_state_put(to_put);