Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: hsr: use hlist_head instead of list_head for mac addresses

Currently, HSR manages mac addresses of known HSR nodes by using list_head.
It takes a lot of time when there are a lot of registered nodes due to
finding specific mac address nodes by using linear search. We can be
reducing the time by using hlist. Thus, this patch moves list_head to
hlist_head for mac addresses and this allows for further improvement of
network performance.

Condition: registered 10,000 known HSR nodes
Before:
# iperf3 -c 192.168.10.1 -i 1 -t 10
Connecting to host 192.168.10.1, port 5201
[ 5] local 192.168.10.2 port 59442 connected to 192.168.10.1 port 5201
[ ID] Interval Transfer Bitrate Retr Cwnd
[ 5] 0.00-1.49 sec 3.75 MBytes 21.1 Mbits/sec 0 158 KBytes
[ 5] 1.49-2.05 sec 1.25 MBytes 18.7 Mbits/sec 0 166 KBytes
[ 5] 2.05-3.06 sec 2.44 MBytes 20.3 Mbits/sec 56 16.9 KBytes
[ 5] 3.06-4.08 sec 1.43 MBytes 11.7 Mbits/sec 11 38.0 KBytes
[ 5] 4.08-5.00 sec 951 KBytes 8.49 Mbits/sec 0 56.3 KBytes

After:
# iperf3 -c 192.168.10.1 -i 1 -t 10
Connecting to host 192.168.10.1, port 5201
[ 5] local 192.168.10.2 port 36460 connected to 192.168.10.1 port 5201
[ ID] Interval Transfer Bitrate Retr Cwnd
[ 5] 0.00-1.00 sec 7.39 MBytes 62.0 Mbits/sec 3 130 KBytes
[ 5] 1.00-2.00 sec 5.06 MBytes 42.4 Mbits/sec 16 113 KBytes
[ 5] 2.00-3.00 sec 8.58 MBytes 72.0 Mbits/sec 42 94.3 KBytes
[ 5] 3.00-4.00 sec 7.44 MBytes 62.4 Mbits/sec 2 131 KBytes
[ 5] 4.00-5.07 sec 8.13 MBytes 63.5 Mbits/sec 38 92.9 KBytes

Signed-off-by: Juhee Kang <claudiajkang@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Juhee Kang and committed by
David S. Miller
4acc45db 5a8fb33e

+166 -102
+23 -15
net/hsr/hsr_debugfs.c
··· 17 17 #include <linux/module.h> 18 18 #include <linux/errno.h> 19 19 #include <linux/debugfs.h> 20 + #include <linux/jhash.h> 20 21 #include "hsr_main.h" 21 22 #include "hsr_framereg.h" 22 23 ··· 29 28 { 30 29 struct hsr_priv *priv = (struct hsr_priv *)sfp->private; 31 30 struct hsr_node *node; 31 + int i; 32 32 33 33 seq_printf(sfp, "Node Table entries for (%s) device\n", 34 34 (priv->prot_version == PRP_V1 ? "PRP" : "HSR")); ··· 41 39 seq_puts(sfp, "DAN-H\n"); 42 40 43 41 rcu_read_lock(); 44 - list_for_each_entry_rcu(node, &priv->node_db, mac_list) { 45 - /* skip self node */ 46 - if (hsr_addr_is_self(priv, node->macaddress_A)) 47 - continue; 48 - seq_printf(sfp, "%pM ", &node->macaddress_A[0]); 49 - seq_printf(sfp, "%pM ", &node->macaddress_B[0]); 50 - seq_printf(sfp, "%10lx, ", node->time_in[HSR_PT_SLAVE_A]); 51 - seq_printf(sfp, "%10lx, ", node->time_in[HSR_PT_SLAVE_B]); 52 - seq_printf(sfp, "%14x, ", node->addr_B_port); 53 42 54 - if (priv->prot_version == PRP_V1) 55 - seq_printf(sfp, "%5x, %5x, %5x\n", 56 - node->san_a, node->san_b, 57 - (node->san_a == 0 && node->san_b == 0)); 58 - else 59 - seq_printf(sfp, "%5x\n", 1); 43 + for (i = 0 ; i < priv->hash_buckets; i++) { 44 + hlist_for_each_entry_rcu(node, &priv->node_db[i], mac_list) { 45 + /* skip self node */ 46 + if (hsr_addr_is_self(priv, node->macaddress_A)) 47 + continue; 48 + seq_printf(sfp, "%pM ", &node->macaddress_A[0]); 49 + seq_printf(sfp, "%pM ", &node->macaddress_B[0]); 50 + seq_printf(sfp, "%10lx, ", 51 + node->time_in[HSR_PT_SLAVE_A]); 52 + seq_printf(sfp, "%10lx, ", 53 + node->time_in[HSR_PT_SLAVE_B]); 54 + seq_printf(sfp, "%14x, ", node->addr_B_port); 55 + 56 + if (priv->prot_version == PRP_V1) 57 + seq_printf(sfp, "%5x, %5x, %5x\n", 58 + node->san_a, node->san_b, 59 + (node->san_a == 0 && 60 + node->san_b == 0)); 61 + else 62 + seq_printf(sfp, "%5x\n", 1); 63 + } 60 64 } 61 65 rcu_read_unlock(); 62 66 return 0;
+7 -3
net/hsr/hsr_device.c
··· 485 485 { 486 486 bool unregister = false; 487 487 struct hsr_priv *hsr; 488 - int res; 488 + int res, i; 489 489 490 490 hsr = netdev_priv(hsr_dev); 491 491 INIT_LIST_HEAD(&hsr->ports); 492 - INIT_LIST_HEAD(&hsr->node_db); 493 - INIT_LIST_HEAD(&hsr->self_node_db); 492 + INIT_HLIST_HEAD(&hsr->self_node_db); 493 + hsr->hash_buckets = HSR_HSIZE; 494 + get_random_bytes(&hsr->hash_seed, sizeof(hsr->hash_seed)); 495 + for (i = 0; i < hsr->hash_buckets; i++) 496 + INIT_HLIST_HEAD(&hsr->node_db[i]); 497 + 494 498 spin_lock_init(&hsr->list_lock); 495 499 496 500 eth_hw_addr_set(hsr_dev, slave[0]->dev_addr);
+5 -2
net/hsr/hsr_forward.c
··· 570 570 struct ethhdr *ethhdr; 571 571 __be16 proto; 572 572 int ret; 573 + u32 hash; 573 574 574 575 /* Check if skb contains ethhdr */ 575 576 if (skb->mac_len < sizeof(struct ethhdr)) 576 577 return -EINVAL; 577 578 578 579 memset(frame, 0, sizeof(*frame)); 580 + 581 + ethhdr = (struct ethhdr *)skb_mac_header(skb); 582 + hash = hsr_mac_hash(port->hsr, ethhdr->h_source); 579 583 frame->is_supervision = is_supervision_frame(port->hsr, skb); 580 - frame->node_src = hsr_get_node(port, &hsr->node_db, skb, 584 + frame->node_src = hsr_get_node(port, &hsr->node_db[hash], skb, 581 585 frame->is_supervision, 582 586 port->type); 583 587 if (!frame->node_src) 584 588 return -1; /* Unknown node and !is_supervision, or no mem */ 585 589 586 - ethhdr = (struct ethhdr *)skb_mac_header(skb); 587 590 frame->is_vlan = false; 588 591 proto = ethhdr->h_proto; 589 592
+116 -76
net/hsr/hsr_framereg.c
··· 15 15 #include <linux/etherdevice.h> 16 16 #include <linux/slab.h> 17 17 #include <linux/rculist.h> 18 + #include <linux/jhash.h> 18 19 #include "hsr_main.h" 19 20 #include "hsr_framereg.h" 20 21 #include "hsr_netlink.h" 21 22 22 - /* TODO: use hash lists for mac addresses (linux/jhash.h)? */ 23 + u32 hsr_mac_hash(struct hsr_priv *hsr, const unsigned char *addr) 24 + { 25 + u32 hash = jhash(addr, ETH_ALEN, hsr->hash_seed); 26 + 27 + return reciprocal_scale(hash, hsr->hash_buckets); 28 + } 29 + 30 + struct hsr_node *hsr_node_get_first(struct hlist_head *head) 31 + { 32 + struct hlist_node *first; 33 + 34 + first = rcu_dereference(hlist_first_rcu(head)); 35 + if (first) 36 + return hlist_entry(first, struct hsr_node, mac_list); 37 + 38 + return NULL; 39 + } 23 40 24 41 /* seq_nr_after(a, b) - return true if a is after (higher in sequence than) b, 25 42 * false otherwise. ··· 59 42 { 60 43 struct hsr_node *node; 61 44 62 - node = list_first_or_null_rcu(&hsr->self_node_db, struct hsr_node, 63 - mac_list); 45 + node = hsr_node_get_first(&hsr->self_node_db); 64 46 if (!node) { 65 47 WARN_ONCE(1, "HSR: No self node\n"); 66 48 return false; ··· 75 59 76 60 /* Search for mac entry. Caller must hold rcu read lock. 77 61 */ 78 - static struct hsr_node *find_node_by_addr_A(struct list_head *node_db, 62 + static struct hsr_node *find_node_by_addr_A(struct hlist_head *node_db, 79 63 const unsigned char addr[ETH_ALEN]) 80 64 { 81 65 struct hsr_node *node; 82 66 83 - list_for_each_entry_rcu(node, node_db, mac_list) { 67 + hlist_for_each_entry_rcu(node, node_db, mac_list) { 84 68 if (ether_addr_equal(node->macaddress_A, addr)) 85 69 return node; 86 70 } ··· 95 79 const unsigned char addr_a[ETH_ALEN], 96 80 const unsigned char addr_b[ETH_ALEN]) 97 81 { 98 - struct list_head *self_node_db = &hsr->self_node_db; 82 + struct hlist_head *self_node_db = &hsr->self_node_db; 99 83 struct hsr_node *node, *oldnode; 100 84 101 85 node = kmalloc(sizeof(*node), GFP_KERNEL); ··· 106 90 ether_addr_copy(node->macaddress_B, addr_b); 107 91 108 92 spin_lock_bh(&hsr->list_lock); 109 - oldnode = list_first_or_null_rcu(self_node_db, 110 - struct hsr_node, mac_list); 93 + oldnode = hsr_node_get_first(self_node_db); 111 94 if (oldnode) { 112 - list_replace_rcu(&oldnode->mac_list, &node->mac_list); 95 + hlist_replace_rcu(&oldnode->mac_list, &node->mac_list); 113 96 spin_unlock_bh(&hsr->list_lock); 114 97 kfree_rcu(oldnode, rcu_head); 115 98 } else { 116 - list_add_tail_rcu(&node->mac_list, self_node_db); 99 + hlist_add_tail_rcu(&node->mac_list, self_node_db); 117 100 spin_unlock_bh(&hsr->list_lock); 118 101 } 119 102 ··· 121 106 122 107 void hsr_del_self_node(struct hsr_priv *hsr) 123 108 { 124 - struct list_head *self_node_db = &hsr->self_node_db; 109 + struct hlist_head *self_node_db = &hsr->self_node_db; 125 110 struct hsr_node *node; 126 111 127 112 spin_lock_bh(&hsr->list_lock); 128 - node = list_first_or_null_rcu(self_node_db, struct hsr_node, mac_list); 113 + node = hsr_node_get_first(self_node_db); 129 114 if (node) { 130 - list_del_rcu(&node->mac_list); 115 + hlist_del_rcu(&node->mac_list); 131 116 kfree_rcu(node, rcu_head); 132 117 } 133 118 spin_unlock_bh(&hsr->list_lock); 134 119 } 135 120 136 - void hsr_del_nodes(struct list_head *node_db) 121 + void hsr_del_nodes(struct hlist_head *node_db) 137 122 { 138 123 struct hsr_node *node; 139 - struct hsr_node *tmp; 124 + struct hlist_node *tmp; 140 125 141 - list_for_each_entry_safe(node, tmp, node_db, mac_list) 142 - kfree(node); 126 + hlist_for_each_entry_safe(node, tmp, node_db, mac_list) 127 + kfree_rcu(node, rcu_head); 143 128 } 144 129 145 130 void prp_handle_san_frame(bool san, enum hsr_port_type port, ··· 160 145 * originating from the newly added node. 161 146 */ 162 147 static struct hsr_node *hsr_add_node(struct hsr_priv *hsr, 163 - struct list_head *node_db, 148 + struct hlist_head *node_db, 164 149 unsigned char addr[], 165 150 u16 seq_out, bool san, 166 151 enum hsr_port_type rx_port) ··· 190 175 hsr->proto_ops->handle_san_frame(san, rx_port, new_node); 191 176 192 177 spin_lock_bh(&hsr->list_lock); 193 - list_for_each_entry_rcu(node, node_db, mac_list, 194 - lockdep_is_held(&hsr->list_lock)) { 178 + hlist_for_each_entry_rcu(node, node_db, mac_list, 179 + lockdep_is_held(&hsr->list_lock)) { 195 180 if (ether_addr_equal(node->macaddress_A, addr)) 196 181 goto out; 197 182 if (ether_addr_equal(node->macaddress_B, addr)) 198 183 goto out; 199 184 } 200 - list_add_tail_rcu(&new_node->mac_list, node_db); 185 + hlist_add_tail_rcu(&new_node->mac_list, node_db); 201 186 spin_unlock_bh(&hsr->list_lock); 202 187 return new_node; 203 188 out: ··· 217 202 218 203 /* Get the hsr_node from which 'skb' was sent. 219 204 */ 220 - struct hsr_node *hsr_get_node(struct hsr_port *port, struct list_head *node_db, 205 + struct hsr_node *hsr_get_node(struct hsr_port *port, struct hlist_head *node_db, 221 206 struct sk_buff *skb, bool is_sup, 222 207 enum hsr_port_type rx_port) 223 208 { ··· 233 218 234 219 ethhdr = (struct ethhdr *)skb_mac_header(skb); 235 220 236 - list_for_each_entry_rcu(node, node_db, mac_list) { 221 + hlist_for_each_entry_rcu(node, node_db, mac_list) { 237 222 if (ether_addr_equal(node->macaddress_A, ethhdr->h_source)) { 238 223 if (hsr->proto_ops->update_san_info) 239 224 hsr->proto_ops->update_san_info(node, is_sup); ··· 283 268 struct hsr_sup_tlv *hsr_sup_tlv; 284 269 struct hsr_node *node_real; 285 270 struct sk_buff *skb = NULL; 286 - struct list_head *node_db; 271 + struct hlist_head *node_db; 287 272 struct ethhdr *ethhdr; 288 273 int i; 289 274 unsigned int pull_size = 0; 290 275 unsigned int total_pull_size = 0; 276 + u32 hash; 291 277 292 278 /* Here either frame->skb_hsr or frame->skb_prp should be 293 279 * valid as supervision frame always will have protocol ··· 326 310 hsr_sp = (struct hsr_sup_payload *)skb->data; 327 311 328 312 /* Merge node_curr (registered on macaddress_B) into node_real */ 329 - node_db = &port_rcv->hsr->node_db; 330 - node_real = find_node_by_addr_A(node_db, hsr_sp->macaddress_A); 313 + node_db = port_rcv->hsr->node_db; 314 + hash = hsr_mac_hash(hsr, hsr_sp->macaddress_A); 315 + node_real = find_node_by_addr_A(&node_db[hash], hsr_sp->macaddress_A); 331 316 if (!node_real) 332 317 /* No frame received from AddrA of this node yet */ 333 - node_real = hsr_add_node(hsr, node_db, hsr_sp->macaddress_A, 318 + node_real = hsr_add_node(hsr, &node_db[hash], 319 + hsr_sp->macaddress_A, 334 320 HSR_SEQNR_START - 1, true, 335 321 port_rcv->type); 336 322 if (!node_real) ··· 366 348 hsr_sp = (struct hsr_sup_payload *)skb->data; 367 349 368 350 /* Check if redbox mac and node mac are equal. */ 369 - if (!ether_addr_equal(node_real->macaddress_A, hsr_sp->macaddress_A)) { 351 + if (!ether_addr_equal(node_real->macaddress_A, 352 + hsr_sp->macaddress_A)) { 370 353 /* This is a redbox supervision frame for a VDAN! */ 371 354 goto done; 372 355 } ··· 387 368 node_real->addr_B_port = port_rcv->type; 388 369 389 370 spin_lock_bh(&hsr->list_lock); 390 - list_del_rcu(&node_curr->mac_list); 371 + hlist_del_rcu(&node_curr->mac_list); 391 372 spin_unlock_bh(&hsr->list_lock); 392 373 kfree_rcu(node_curr, rcu_head); 393 374 ··· 425 406 struct hsr_port *port) 426 407 { 427 408 struct hsr_node *node_dst; 409 + u32 hash; 428 410 429 411 if (!skb_mac_header_was_set(skb)) { 430 412 WARN_ONCE(1, "%s: Mac header not set\n", __func__); ··· 435 415 if (!is_unicast_ether_addr(eth_hdr(skb)->h_dest)) 436 416 return; 437 417 438 - node_dst = find_node_by_addr_A(&port->hsr->node_db, 418 + hash = hsr_mac_hash(port->hsr, eth_hdr(skb)->h_dest); 419 + node_dst = find_node_by_addr_A(&port->hsr->node_db[hash], 439 420 eth_hdr(skb)->h_dest); 440 421 if (!node_dst) { 441 422 if (net_ratelimit()) ··· 512 491 void hsr_prune_nodes(struct timer_list *t) 513 492 { 514 493 struct hsr_priv *hsr = from_timer(hsr, t, prune_timer); 494 + struct hlist_node *tmp; 515 495 struct hsr_node *node; 516 - struct hsr_node *tmp; 517 496 struct hsr_port *port; 518 497 unsigned long timestamp; 519 498 unsigned long time_a, time_b; 499 + int i; 520 500 521 501 spin_lock_bh(&hsr->list_lock); 522 - list_for_each_entry_safe(node, tmp, &hsr->node_db, mac_list) { 523 - /* Don't prune own node. Neither time_in[HSR_PT_SLAVE_A] 524 - * nor time_in[HSR_PT_SLAVE_B], will ever be updated for 525 - * the master port. Thus the master node will be repeatedly 526 - * pruned leading to packet loss. 527 - */ 528 - if (hsr_addr_is_self(hsr, node->macaddress_A)) 529 - continue; 530 502 531 - /* Shorthand */ 532 - time_a = node->time_in[HSR_PT_SLAVE_A]; 533 - time_b = node->time_in[HSR_PT_SLAVE_B]; 503 + for (i = 0; i < hsr->hash_buckets; i++) { 504 + hlist_for_each_entry_safe(node, tmp, &hsr->node_db[i], 505 + mac_list) { 506 + /* Don't prune own node. 507 + * Neither time_in[HSR_PT_SLAVE_A] 508 + * nor time_in[HSR_PT_SLAVE_B], will ever be updated 509 + * for the master port. Thus the master node will be 510 + * repeatedly pruned leading to packet loss. 511 + */ 512 + if (hsr_addr_is_self(hsr, node->macaddress_A)) 513 + continue; 534 514 535 - /* Check for timestamps old enough to risk wrap-around */ 536 - if (time_after(jiffies, time_a + MAX_JIFFY_OFFSET / 2)) 537 - node->time_in_stale[HSR_PT_SLAVE_A] = true; 538 - if (time_after(jiffies, time_b + MAX_JIFFY_OFFSET / 2)) 539 - node->time_in_stale[HSR_PT_SLAVE_B] = true; 515 + /* Shorthand */ 516 + time_a = node->time_in[HSR_PT_SLAVE_A]; 517 + time_b = node->time_in[HSR_PT_SLAVE_B]; 540 518 541 - /* Get age of newest frame from node. 542 - * At least one time_in is OK here; nodes get pruned long 543 - * before both time_ins can get stale 544 - */ 545 - timestamp = time_a; 546 - if (node->time_in_stale[HSR_PT_SLAVE_A] || 547 - (!node->time_in_stale[HSR_PT_SLAVE_B] && 548 - time_after(time_b, time_a))) 549 - timestamp = time_b; 519 + /* Check for timestamps old enough to 520 + * risk wrap-around 521 + */ 522 + if (time_after(jiffies, time_a + MAX_JIFFY_OFFSET / 2)) 523 + node->time_in_stale[HSR_PT_SLAVE_A] = true; 524 + if (time_after(jiffies, time_b + MAX_JIFFY_OFFSET / 2)) 525 + node->time_in_stale[HSR_PT_SLAVE_B] = true; 550 526 551 - /* Warn of ring error only as long as we get frames at all */ 552 - if (time_is_after_jiffies(timestamp + 553 - msecs_to_jiffies(1.5 * MAX_SLAVE_DIFF))) { 554 - rcu_read_lock(); 555 - port = get_late_port(hsr, node); 556 - if (port) 557 - hsr_nl_ringerror(hsr, node->macaddress_A, port); 558 - rcu_read_unlock(); 559 - } 527 + /* Get age of newest frame from node. 528 + * At least one time_in is OK here; nodes get pruned 529 + * long before both time_ins can get stale 530 + */ 531 + timestamp = time_a; 532 + if (node->time_in_stale[HSR_PT_SLAVE_A] || 533 + (!node->time_in_stale[HSR_PT_SLAVE_B] && 534 + time_after(time_b, time_a))) 535 + timestamp = time_b; 560 536 561 - /* Prune old entries */ 562 - if (time_is_before_jiffies(timestamp + 563 - msecs_to_jiffies(HSR_NODE_FORGET_TIME))) { 564 - hsr_nl_nodedown(hsr, node->macaddress_A); 565 - list_del_rcu(&node->mac_list); 566 - /* Note that we need to free this entry later: */ 567 - kfree_rcu(node, rcu_head); 537 + /* Warn of ring error only as long as we get 538 + * frames at all 539 + */ 540 + if (time_is_after_jiffies(timestamp + 541 + msecs_to_jiffies(1.5 * MAX_SLAVE_DIFF))) { 542 + rcu_read_lock(); 543 + port = get_late_port(hsr, node); 544 + if (port) 545 + hsr_nl_ringerror(hsr, 546 + node->macaddress_A, 547 + port); 548 + rcu_read_unlock(); 549 + } 550 + 551 + /* Prune old entries */ 552 + if (time_is_before_jiffies(timestamp + 553 + msecs_to_jiffies(HSR_NODE_FORGET_TIME))) { 554 + hsr_nl_nodedown(hsr, node->macaddress_A); 555 + hlist_del_rcu(&node->mac_list); 556 + /* Note that we need to free this 557 + * entry later: 558 + */ 559 + kfree_rcu(node, rcu_head); 560 + } 568 561 } 569 562 } 570 563 spin_unlock_bh(&hsr->list_lock); ··· 592 557 unsigned char addr[ETH_ALEN]) 593 558 { 594 559 struct hsr_node *node; 560 + u32 hash; 561 + 562 + hash = hsr_mac_hash(hsr, addr); 595 563 596 564 if (!_pos) { 597 - node = list_first_or_null_rcu(&hsr->node_db, 598 - struct hsr_node, mac_list); 565 + node = hsr_node_get_first(&hsr->node_db[hash]); 599 566 if (node) 600 567 ether_addr_copy(addr, node->macaddress_A); 601 568 return node; 602 569 } 603 570 604 571 node = _pos; 605 - list_for_each_entry_continue_rcu(node, &hsr->node_db, mac_list) { 572 + hlist_for_each_entry_continue_rcu(node, mac_list) { 606 573 ether_addr_copy(addr, node->macaddress_A); 607 574 return node; 608 575 } ··· 624 587 struct hsr_node *node; 625 588 struct hsr_port *port; 626 589 unsigned long tdiff; 590 + u32 hash; 627 591 628 - node = find_node_by_addr_A(&hsr->node_db, addr); 592 + hash = hsr_mac_hash(hsr, addr); 593 + 594 + node = find_node_by_addr_A(&hsr->node_db[hash], addr); 629 595 if (!node) 630 596 return -ENOENT; 631 597
+5 -3
net/hsr/hsr_framereg.h
··· 28 28 bool is_from_san; 29 29 }; 30 30 31 + u32 hsr_mac_hash(struct hsr_priv *hsr, const unsigned char *addr); 32 + struct hsr_node *hsr_node_get_first(struct hlist_head *head); 31 33 void hsr_del_self_node(struct hsr_priv *hsr); 32 - void hsr_del_nodes(struct list_head *node_db); 33 - struct hsr_node *hsr_get_node(struct hsr_port *port, struct list_head *node_db, 34 + void hsr_del_nodes(struct hlist_head *node_db); 35 + struct hsr_node *hsr_get_node(struct hsr_port *port, struct hlist_head *node_db, 34 36 struct sk_buff *skb, bool is_sup, 35 37 enum hsr_port_type rx_port); 36 38 void hsr_handle_sup_frame(struct hsr_frame_info *frame); ··· 70 68 void prp_update_san_info(struct hsr_node *node, bool is_sup); 71 69 72 70 struct hsr_node { 73 - struct list_head mac_list; 71 + struct hlist_node mac_list; 74 72 unsigned char macaddress_A[ETH_ALEN]; 75 73 unsigned char macaddress_B[ETH_ALEN]; 76 74 /* Local slave through which AddrB frames are received from this node */
+7 -2
net/hsr/hsr_main.h
··· 63 63 64 64 #define HSR_V1_SUP_LSDUSIZE 52 65 65 66 + #define HSR_HSIZE_SHIFT 8 67 + #define HSR_HSIZE BIT(HSR_HSIZE_SHIFT) 68 + 66 69 /* The helper functions below assumes that 'path' occupies the 4 most 67 70 * significant bits of the 16-bit field shared by 'path' and 'LSDU_size' (or 68 71 * equivalently, the 4 most significant bits of HSR tag byte 14). ··· 204 201 struct hsr_priv { 205 202 struct rcu_head rcu_head; 206 203 struct list_head ports; 207 - struct list_head node_db; /* Known HSR nodes */ 208 - struct list_head self_node_db; /* MACs of slaves */ 204 + struct hlist_head node_db[HSR_HSIZE]; /* Known HSR nodes */ 205 + struct hlist_head self_node_db; /* MACs of slaves */ 209 206 struct timer_list announce_timer; /* Supervision frame dispatch */ 210 207 struct timer_list prune_timer; 211 208 int announce_count; ··· 215 212 spinlock_t seqnr_lock; /* locking for sequence_nr */ 216 213 spinlock_t list_lock; /* locking for node list */ 217 214 struct hsr_proto_ops *proto_ops; 215 + u32 hash_buckets; 216 + u32 hash_seed; 218 217 #define PRP_LAN_ID 0x5 /* 0x1010 for A and 0x1011 for B. Bit 0 is set 219 218 * based on SLAVE_A or SLAVE_B 220 219 */
+3 -1
net/hsr/hsr_netlink.c
··· 105 105 static void hsr_dellink(struct net_device *dev, struct list_head *head) 106 106 { 107 107 struct hsr_priv *hsr = netdev_priv(dev); 108 + int i; 108 109 109 110 del_timer_sync(&hsr->prune_timer); 110 111 del_timer_sync(&hsr->announce_timer); ··· 114 113 hsr_del_ports(hsr); 115 114 116 115 hsr_del_self_node(hsr); 117 - hsr_del_nodes(&hsr->node_db); 116 + for (i = 0; i < hsr->hash_buckets; i++) 117 + hsr_del_nodes(&hsr->node_db[i]); 118 118 119 119 unregister_netdevice_queue(dev, head); 120 120 }