Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net/flow_dissector: switch to siphash

UDP IPv6 packets auto flowlabels are using a 32bit secret
(static u32 hashrnd in net/core/flow_dissector.c) and
apply jhash() over fields known by the receivers.

Attackers can easily infer the 32bit secret and use this information
to identify a device and/or user, since this 32bit secret is only
set at boot time.

Really, using jhash() to generate cookies sent on the wire
is a serious security concern.

Trying to change the rol32(hash, 16) in ip6_make_flowlabel() would be
a dead end. Trying to periodically change the secret (like in sch_sfq.c)
could change paths taken in the network for long lived flows.

Let's switch to siphash, as we did in commit df453700e8d8
("inet: switch IP ID generator to siphash")

Using a cryptographically strong pseudo random function will solve this
privacy issue and more generally remove other weak points in the stack.

Packet schedulers using skb_get_hash_perturb() benefit from this change.

Fixes: b56774163f99 ("ipv6: Enable auto flow labels by default")
Fixes: 42240901f7c4 ("ipv6: Implement different admin modes for automatic flow labels")
Fixes: 67800f9b1f4e ("ipv6: Call skb_get_hash_flowi6 to get skb->hash in ip6_make_flowlabel")
Fixes: cb1ce2ef387b ("ipv6: Implement automatic flow label generation on transmit")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Reported-by: Jonathan Berger <jonathann1@walla.com>
Reported-by: Amit Klein <aksecurity@gmail.com>
Reported-by: Benny Pinkas <benny@pinkas.net>
Cc: Tom Herbert <tom@herbertland.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Eric Dumazet and committed by
David S. Miller
55667441 6c5d9c2a

+42 -43
+2 -1
include/linux/skbuff.h
··· 1354 1354 return skb->hash; 1355 1355 } 1356 1356 1357 - __u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb); 1357 + __u32 skb_get_hash_perturb(const struct sk_buff *skb, 1358 + const siphash_key_t *perturb); 1358 1359 1359 1360 static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) 1360 1361 {
+2 -1
include/net/flow_dissector.h
··· 4 4 5 5 #include <linux/types.h> 6 6 #include <linux/in6.h> 7 + #include <linux/siphash.h> 7 8 #include <uapi/linux/if_ether.h> 8 9 9 10 /** ··· 277 276 struct flow_keys { 278 277 struct flow_dissector_key_control control; 279 278 #define FLOW_KEYS_HASH_START_FIELD basic 280 - struct flow_dissector_key_basic basic; 279 + struct flow_dissector_key_basic basic __aligned(SIPHASH_ALIGNMENT); 281 280 struct flow_dissector_key_tags tags; 282 281 struct flow_dissector_key_vlan vlan; 283 282 struct flow_dissector_key_vlan cvlan;
+1 -1
include/net/fq.h
··· 69 69 struct list_head backlogs; 70 70 spinlock_t lock; 71 71 u32 flows_cnt; 72 - u32 perturbation; 72 + siphash_key_t perturbation; 73 73 u32 limit; 74 74 u32 memory_limit; 75 75 u32 memory_usage;
+2 -2
include/net/fq_impl.h
··· 108 108 109 109 static u32 fq_flow_idx(struct fq *fq, struct sk_buff *skb) 110 110 { 111 - u32 hash = skb_get_hash_perturb(skb, fq->perturbation); 111 + u32 hash = skb_get_hash_perturb(skb, &fq->perturbation); 112 112 113 113 return reciprocal_scale(hash, fq->flows_cnt); 114 114 } ··· 308 308 INIT_LIST_HEAD(&fq->backlogs); 309 309 spin_lock_init(&fq->lock); 310 310 fq->flows_cnt = max_t(u32, flows_cnt, 1); 311 - fq->perturbation = prandom_u32(); 311 + get_random_bytes(&fq->perturbation, sizeof(fq->perturbation)); 312 312 fq->quantum = 300; 313 313 fq->limit = 8192; 314 314 fq->memory_limit = 16 << 20; /* 16 MBytes */
+16 -22
net/core/flow_dissector.c
··· 1350 1350 } 1351 1351 EXPORT_SYMBOL(__skb_flow_dissect); 1352 1352 1353 - static u32 hashrnd __read_mostly; 1353 + static siphash_key_t hashrnd __read_mostly; 1354 1354 static __always_inline void __flow_hash_secret_init(void) 1355 1355 { 1356 1356 net_get_random_once(&hashrnd, sizeof(hashrnd)); 1357 1357 } 1358 1358 1359 - static __always_inline u32 __flow_hash_words(const u32 *words, u32 length, 1360 - u32 keyval) 1359 + static const void *flow_keys_hash_start(const struct flow_keys *flow) 1361 1360 { 1362 - return jhash2(words, length, keyval); 1363 - } 1364 - 1365 - static inline const u32 *flow_keys_hash_start(const struct flow_keys *flow) 1366 - { 1367 - const void *p = flow; 1368 - 1369 - BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % sizeof(u32)); 1370 - return (const u32 *)(p + FLOW_KEYS_HASH_OFFSET); 1361 + BUILD_BUG_ON(FLOW_KEYS_HASH_OFFSET % SIPHASH_ALIGNMENT); 1362 + return &flow->FLOW_KEYS_HASH_START_FIELD; 1371 1363 } 1372 1364 1373 1365 static inline size_t flow_keys_hash_length(const struct flow_keys *flow) 1374 1366 { 1375 1367 size_t diff = FLOW_KEYS_HASH_OFFSET + sizeof(flow->addrs); 1376 - BUILD_BUG_ON((sizeof(*flow) - FLOW_KEYS_HASH_OFFSET) % sizeof(u32)); 1377 1368 BUILD_BUG_ON(offsetof(typeof(*flow), addrs) != 1378 1369 sizeof(*flow) - sizeof(flow->addrs)); 1379 1370 ··· 1379 1388 diff -= sizeof(flow->addrs.tipckey); 1380 1389 break; 1381 1390 } 1382 - return (sizeof(*flow) - diff) / sizeof(u32); 1391 + return sizeof(*flow) - diff; 1383 1392 } 1384 1393 1385 1394 __be32 flow_get_u32_src(const struct flow_keys *flow) ··· 1445 1454 } 1446 1455 } 1447 1456 1448 - static inline u32 __flow_hash_from_keys(struct flow_keys *keys, u32 keyval) 1457 + static inline u32 __flow_hash_from_keys(struct flow_keys *keys, 1458 + const siphash_key_t *keyval) 1449 1459 { 1450 1460 u32 hash; 1451 1461 1452 1462 __flow_hash_consistentify(keys); 1453 1463 1454 - hash = __flow_hash_words(flow_keys_hash_start(keys), 1455 - flow_keys_hash_length(keys), keyval); 1464 + hash = siphash(flow_keys_hash_start(keys), 1465 + flow_keys_hash_length(keys), keyval); 1456 1466 if (!hash) 1457 1467 hash = 1; 1458 1468 ··· 1463 1471 u32 flow_hash_from_keys(struct flow_keys *keys) 1464 1472 { 1465 1473 __flow_hash_secret_init(); 1466 - return __flow_hash_from_keys(keys, hashrnd); 1474 + return __flow_hash_from_keys(keys, &hashrnd); 1467 1475 } 1468 1476 EXPORT_SYMBOL(flow_hash_from_keys); 1469 1477 1470 1478 static inline u32 ___skb_get_hash(const struct sk_buff *skb, 1471 - struct flow_keys *keys, u32 keyval) 1479 + struct flow_keys *keys, 1480 + const siphash_key_t *keyval) 1472 1481 { 1473 1482 skb_flow_dissect_flow_keys(skb, keys, 1474 1483 FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); ··· 1517 1524 &keys, NULL, 0, 0, 0, 1518 1525 FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); 1519 1526 1520 - return __flow_hash_from_keys(&keys, hashrnd); 1527 + return __flow_hash_from_keys(&keys, &hashrnd); 1521 1528 } 1522 1529 EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric); 1523 1530 ··· 1537 1544 1538 1545 __flow_hash_secret_init(); 1539 1546 1540 - hash = ___skb_get_hash(skb, &keys, hashrnd); 1547 + hash = ___skb_get_hash(skb, &keys, &hashrnd); 1541 1548 1542 1549 __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys)); 1543 1550 } 1544 1551 EXPORT_SYMBOL(__skb_get_hash); 1545 1552 1546 - __u32 skb_get_hash_perturb(const struct sk_buff *skb, u32 perturb) 1553 + __u32 skb_get_hash_perturb(const struct sk_buff *skb, 1554 + const siphash_key_t *perturb) 1547 1555 { 1548 1556 struct flow_keys keys; 1549 1557
+4 -4
net/sched/sch_hhf.c
··· 5 5 * Copyright (C) 2013 Nandita Dukkipati <nanditad@google.com> 6 6 */ 7 7 8 - #include <linux/jhash.h> 9 8 #include <linux/jiffies.h> 10 9 #include <linux/module.h> 11 10 #include <linux/skbuff.h> 12 11 #include <linux/vmalloc.h> 12 + #include <linux/siphash.h> 13 13 #include <net/pkt_sched.h> 14 14 #include <net/sock.h> 15 15 ··· 126 126 127 127 struct hhf_sched_data { 128 128 struct wdrr_bucket buckets[WDRR_BUCKET_CNT]; 129 - u32 perturbation; /* hash perturbation */ 129 + siphash_key_t perturbation; /* hash perturbation */ 130 130 u32 quantum; /* psched_mtu(qdisc_dev(sch)); */ 131 131 u32 drop_overlimit; /* number of times max qdisc packet 132 132 * limit was hit ··· 264 264 } 265 265 266 266 /* Get hashed flow-id of the skb. */ 267 - hash = skb_get_hash_perturb(skb, q->perturbation); 267 + hash = skb_get_hash_perturb(skb, &q->perturbation); 268 268 269 269 /* Check if this packet belongs to an already established HH flow. */ 270 270 flow_pos = hash & HHF_BIT_MASK; ··· 582 582 583 583 sch->limit = 1000; 584 584 q->quantum = psched_mtu(qdisc_dev(sch)); 585 - q->perturbation = prandom_u32(); 585 + get_random_bytes(&q->perturbation, sizeof(q->perturbation)); 586 586 INIT_LIST_HEAD(&q->new_buckets); 587 587 INIT_LIST_HEAD(&q->old_buckets); 588 588
+7 -6
net/sched/sch_sfb.c
··· 18 18 #include <linux/errno.h> 19 19 #include <linux/skbuff.h> 20 20 #include <linux/random.h> 21 - #include <linux/jhash.h> 21 + #include <linux/siphash.h> 22 22 #include <net/ip.h> 23 23 #include <net/pkt_sched.h> 24 24 #include <net/pkt_cls.h> ··· 45 45 * (Section 4.4 of SFB reference : moving hash functions) 46 46 */ 47 47 struct sfb_bins { 48 - u32 perturbation; /* jhash perturbation */ 48 + siphash_key_t perturbation; /* siphash key */ 49 49 struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS]; 50 50 }; 51 51 ··· 217 217 218 218 static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q) 219 219 { 220 - q->bins[slot].perturbation = prandom_u32(); 220 + get_random_bytes(&q->bins[slot].perturbation, 221 + sizeof(q->bins[slot].perturbation)); 221 222 } 222 223 223 224 static void sfb_swap_slot(struct sfb_sched_data *q) ··· 315 314 /* If using external classifiers, get result and record it. */ 316 315 if (!sfb_classify(skb, fl, &ret, &salt)) 317 316 goto other_drop; 318 - sfbhash = jhash_1word(salt, q->bins[slot].perturbation); 317 + sfbhash = siphash_1u32(salt, &q->bins[slot].perturbation); 319 318 } else { 320 - sfbhash = skb_get_hash_perturb(skb, q->bins[slot].perturbation); 319 + sfbhash = skb_get_hash_perturb(skb, &q->bins[slot].perturbation); 321 320 } 322 321 323 322 ··· 353 352 /* Inelastic flow */ 354 353 if (q->double_buffering) { 355 354 sfbhash = skb_get_hash_perturb(skb, 356 - q->bins[slot].perturbation); 355 + &q->bins[slot].perturbation); 357 356 if (!sfbhash) 358 357 sfbhash = 1; 359 358 sfb_skb_cb(skb)->hashes[slot] = sfbhash;
+8 -6
net/sched/sch_sfq.c
··· 14 14 #include <linux/errno.h> 15 15 #include <linux/init.h> 16 16 #include <linux/skbuff.h> 17 - #include <linux/jhash.h> 17 + #include <linux/siphash.h> 18 18 #include <linux/slab.h> 19 19 #include <linux/vmalloc.h> 20 20 #include <net/netlink.h> ··· 117 117 u8 headdrop; 118 118 u8 maxdepth; /* limit of packets per flow */ 119 119 120 - u32 perturbation; 120 + siphash_key_t perturbation; 121 121 u8 cur_depth; /* depth of longest slot */ 122 122 u8 flags; 123 123 unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */ ··· 157 157 static unsigned int sfq_hash(const struct sfq_sched_data *q, 158 158 const struct sk_buff *skb) 159 159 { 160 - return skb_get_hash_perturb(skb, q->perturbation) & (q->divisor - 1); 160 + return skb_get_hash_perturb(skb, &q->perturbation) & (q->divisor - 1); 161 161 } 162 162 163 163 static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch, ··· 607 607 struct sfq_sched_data *q = from_timer(q, t, perturb_timer); 608 608 struct Qdisc *sch = q->sch; 609 609 spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch)); 610 + siphash_key_t nkey; 610 611 612 + get_random_bytes(&nkey, sizeof(nkey)); 611 613 spin_lock(root_lock); 612 - q->perturbation = prandom_u32(); 614 + q->perturbation = nkey; 613 615 if (!q->filter_list && q->tail) 614 616 sfq_rehash(sch); 615 617 spin_unlock(root_lock); ··· 690 688 del_timer(&q->perturb_timer); 691 689 if (q->perturb_period) { 692 690 mod_timer(&q->perturb_timer, jiffies + q->perturb_period); 693 - q->perturbation = prandom_u32(); 691 + get_random_bytes(&q->perturbation, sizeof(q->perturbation)); 694 692 } 695 693 sch_tree_unlock(sch); 696 694 kfree(p); ··· 747 745 q->quantum = psched_mtu(qdisc_dev(sch)); 748 746 q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum); 749 747 q->perturb_period = 0; 750 - q->perturbation = prandom_u32(); 748 + get_random_bytes(&q->perturbation, sizeof(q->perturbation)); 751 749 752 750 if (opt) { 753 751 int err = sfq_change(sch, opt);