Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mac80211: annotate and fix RCU in mesh code

This adds proper RCU annotations to the mesh path
table code, and fixes a number of bugs in the code
that I found while checking the sparse warnings I
got as a result of the annotations.

Some things like the changes in mesh_path_add() or
mesh_pathtbl_init() only serve to shut up sparse,
but other changes like the changes surrounding the
for_each_mesh_entry() macro fix real RCU bugs in
the code.

Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Signed-off-by: John W. Linville <linville@tuxdriver.com>

authored by

Johannes Berg and committed by
John W. Linville
349eb8cf 1928ecab

+103 -57
-4
net/mac80211/mesh.h
··· 289 289 return sdata->u.mesh.mesh_pp_id == IEEE80211_PATH_PROTOCOL_HWMP; 290 290 } 291 291 292 - #define for_each_mesh_entry(x, p, node, i) \ 293 - for (i = 0; i <= x->hash_mask; i++) \ 294 - hlist_for_each_entry_rcu(node, p, &x->hash_buckets[i], list) 295 - 296 292 void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local); 297 293 298 294 void ieee80211_mesh_quiesce(struct ieee80211_sub_if_data *sdata);
+103 -53
net/mac80211/mesh_pathtbl.c
··· 36 36 struct mesh_path *mpath; 37 37 }; 38 38 39 - static struct mesh_table *mesh_paths; 40 - static struct mesh_table *mpp_paths; /* Store paths for MPP&MAP */ 39 + static struct mesh_table __rcu *mesh_paths; 40 + static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */ 41 41 42 42 int mesh_paths_generation; 43 43 ··· 46 46 * by RCU 47 47 */ 48 48 static DEFINE_RWLOCK(pathtbl_resize_lock); 49 + 50 + 51 + static inline struct mesh_table *resize_dereference_mesh_paths(void) 52 + { 53 + return rcu_dereference_protected(mesh_paths, 54 + lockdep_is_held(&pathtbl_resize_lock)); 55 + } 56 + 57 + static inline struct mesh_table *resize_dereference_mpp_paths(void) 58 + { 59 + return rcu_dereference_protected(mpp_paths, 60 + lockdep_is_held(&pathtbl_resize_lock)); 61 + } 62 + 63 + /* 64 + * CAREFUL -- "tbl" must not be an expression, 65 + * in particular not an rcu_dereference(), since 66 + * it's used twice. So it is illegal to do 67 + * for_each_mesh_entry(rcu_dereference(...), ...) 68 + */ 69 + #define for_each_mesh_entry(tbl, p, node, i) \ 70 + for (i = 0; i <= tbl->hash_mask; i++) \ 71 + hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list) 49 72 50 73 51 74 static struct mesh_table *mesh_table_alloc(int size_order) ··· 281 258 */ 282 259 struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata) 283 260 { 261 + struct mesh_table *tbl = rcu_dereference(mesh_paths); 284 262 struct mpath_node *node; 285 263 struct hlist_node *p; 286 264 int i; 287 265 int j = 0; 288 266 289 - for_each_mesh_entry(mesh_paths, p, node, i) { 267 + for_each_mesh_entry(tbl, p, node, i) { 290 268 if (sdata && node->mpath->sdata != sdata) 291 269 continue; 292 270 if (j++ == idx) { ··· 317 293 { 318 294 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 319 295 struct ieee80211_local *local = sdata->local; 296 + struct mesh_table *tbl; 320 297 struct mesh_path *mpath, *new_mpath; 321 298 struct mpath_node *node, *new_node; 322 299 struct hlist_head *bucket; ··· 357 332 spin_lock_init(&new_mpath->state_lock); 358 333 init_timer(&new_mpath->timer); 359 334 360 - hash_idx = mesh_table_hash(dst, sdata, mesh_paths); 361 - bucket = &mesh_paths->hash_buckets[hash_idx]; 335 + tbl = resize_dereference_mesh_paths(); 362 336 363 - spin_lock_bh(&mesh_paths->hashwlock[hash_idx]); 337 + hash_idx = mesh_table_hash(dst, sdata, tbl); 338 + bucket = &tbl->hash_buckets[hash_idx]; 339 + 340 + spin_lock_bh(&tbl->hashwlock[hash_idx]); 364 341 365 342 err = -EEXIST; 366 343 hlist_for_each_entry(node, n, bucket, list) { ··· 372 345 } 373 346 374 347 hlist_add_head_rcu(&new_node->list, bucket); 375 - if (atomic_inc_return(&mesh_paths->entries) >= 376 - mesh_paths->mean_chain_len * (mesh_paths->hash_mask + 1)) 348 + if (atomic_inc_return(&tbl->entries) >= 349 + tbl->mean_chain_len * (tbl->hash_mask + 1)) 377 350 grow = 1; 378 351 379 352 mesh_paths_generation++; 380 353 381 - spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]); 354 + spin_unlock_bh(&tbl->hashwlock[hash_idx]); 382 355 read_unlock_bh(&pathtbl_resize_lock); 383 356 if (grow) { 384 357 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags); ··· 387 360 return 0; 388 361 389 362 err_exists: 390 - spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]); 363 + spin_unlock_bh(&tbl->hashwlock[hash_idx]); 391 364 read_unlock_bh(&pathtbl_resize_lock); 392 365 kfree(new_node); 393 366 err_node_alloc: ··· 409 382 struct mesh_table *oldtbl, *newtbl; 410 383 411 384 write_lock_bh(&pathtbl_resize_lock); 412 - newtbl = mesh_table_alloc(mesh_paths->size_order + 1); 385 + oldtbl = resize_dereference_mesh_paths(); 386 + newtbl = mesh_table_alloc(oldtbl->size_order + 1); 413 387 if (!newtbl) 414 388 goto out; 415 - oldtbl = mesh_paths; 416 - if (mesh_table_grow(mesh_paths, newtbl) < 0) { 389 + if (mesh_table_grow(oldtbl, newtbl) < 0) { 417 390 __mesh_table_free(newtbl); 418 391 goto out; 419 392 } ··· 430 403 struct mesh_table *oldtbl, *newtbl; 431 404 432 405 write_lock_bh(&pathtbl_resize_lock); 433 - newtbl = mesh_table_alloc(mpp_paths->size_order + 1); 406 + oldtbl = resize_dereference_mpp_paths(); 407 + newtbl = mesh_table_alloc(oldtbl->size_order + 1); 434 408 if (!newtbl) 435 409 goto out; 436 - oldtbl = mpp_paths; 437 - if (mesh_table_grow(mpp_paths, newtbl) < 0) { 410 + if (mesh_table_grow(oldtbl, newtbl) < 0) { 438 411 __mesh_table_free(newtbl); 439 412 goto out; 440 413 } ··· 449 422 { 450 423 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh; 451 424 struct ieee80211_local *local = sdata->local; 425 + struct mesh_table *tbl; 452 426 struct mesh_path *mpath, *new_mpath; 453 427 struct mpath_node *node, *new_node; 454 428 struct hlist_head *bucket; ··· 484 456 new_mpath->exp_time = jiffies; 485 457 spin_lock_init(&new_mpath->state_lock); 486 458 487 - hash_idx = mesh_table_hash(dst, sdata, mpp_paths); 488 - bucket = &mpp_paths->hash_buckets[hash_idx]; 459 + tbl = resize_dereference_mpp_paths(); 489 460 490 - spin_lock_bh(&mpp_paths->hashwlock[hash_idx]); 461 + hash_idx = mesh_table_hash(dst, sdata, tbl); 462 + bucket = &tbl->hash_buckets[hash_idx]; 463 + 464 + spin_lock_bh(&tbl->hashwlock[hash_idx]); 491 465 492 466 err = -EEXIST; 493 467 hlist_for_each_entry(node, n, bucket, list) { ··· 499 469 } 500 470 501 471 hlist_add_head_rcu(&new_node->list, bucket); 502 - if (atomic_inc_return(&mpp_paths->entries) >= 503 - mpp_paths->mean_chain_len * (mpp_paths->hash_mask + 1)) 472 + if (atomic_inc_return(&tbl->entries) >= 473 + tbl->mean_chain_len * (tbl->hash_mask + 1)) 504 474 grow = 1; 505 475 506 - spin_unlock_bh(&mpp_paths->hashwlock[hash_idx]); 476 + spin_unlock_bh(&tbl->hashwlock[hash_idx]); 507 477 read_unlock_bh(&pathtbl_resize_lock); 508 478 if (grow) { 509 479 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags); ··· 512 482 return 0; 513 483 514 484 err_exists: 515 - spin_unlock_bh(&mpp_paths->hashwlock[hash_idx]); 485 + spin_unlock_bh(&tbl->hashwlock[hash_idx]); 516 486 read_unlock_bh(&pathtbl_resize_lock); 517 487 kfree(new_node); 518 488 err_node_alloc: ··· 532 502 */ 533 503 void mesh_plink_broken(struct sta_info *sta) 534 504 { 505 + struct mesh_table *tbl; 535 506 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 536 507 struct mesh_path *mpath; 537 508 struct mpath_node *node; ··· 541 510 int i; 542 511 543 512 rcu_read_lock(); 544 - for_each_mesh_entry(mesh_paths, p, node, i) { 513 + tbl = rcu_dereference(mesh_paths); 514 + for_each_mesh_entry(tbl, p, node, i) { 545 515 mpath = node->mpath; 546 516 spin_lock_bh(&mpath->state_lock); 547 - if (mpath->next_hop == sta && 517 + if (rcu_dereference(mpath->next_hop) == sta && 548 518 mpath->flags & MESH_PATH_ACTIVE && 549 519 !(mpath->flags & MESH_PATH_FIXED)) { 550 520 mpath->flags &= ~MESH_PATH_ACTIVE; ··· 574 542 */ 575 543 void mesh_path_flush_by_nexthop(struct sta_info *sta) 576 544 { 545 + struct mesh_table *tbl; 577 546 struct mesh_path *mpath; 578 547 struct mpath_node *node; 579 548 struct hlist_node *p; 580 549 int i; 581 550 582 - for_each_mesh_entry(mesh_paths, p, node, i) { 551 + rcu_read_lock(); 552 + tbl = rcu_dereference(mesh_paths); 553 + for_each_mesh_entry(tbl, p, node, i) { 583 554 mpath = node->mpath; 584 - if (mpath->next_hop == sta) 555 + if (rcu_dereference(mpath->next_hop) == sta) 585 556 mesh_path_del(mpath->dst, mpath->sdata); 586 557 } 558 + rcu_read_unlock(); 587 559 } 588 560 589 561 void mesh_path_flush(struct ieee80211_sub_if_data *sdata) 590 562 { 563 + struct mesh_table *tbl; 591 564 struct mesh_path *mpath; 592 565 struct mpath_node *node; 593 566 struct hlist_node *p; 594 567 int i; 595 568 596 - for_each_mesh_entry(mesh_paths, p, node, i) { 569 + rcu_read_lock(); 570 + tbl = rcu_dereference(mesh_paths); 571 + for_each_mesh_entry(tbl, p, node, i) { 597 572 mpath = node->mpath; 598 573 if (mpath->sdata == sdata) 599 574 mesh_path_del(mpath->dst, mpath->sdata); 600 575 } 576 + rcu_read_unlock(); 601 577 } 602 578 603 579 static void mesh_path_node_reclaim(struct rcu_head *rp) ··· 629 589 */ 630 590 int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata) 631 591 { 592 + struct mesh_table *tbl; 632 593 struct mesh_path *mpath; 633 594 struct mpath_node *node; 634 595 struct hlist_head *bucket; ··· 638 597 int err = 0; 639 598 640 599 read_lock_bh(&pathtbl_resize_lock); 641 - hash_idx = mesh_table_hash(addr, sdata, mesh_paths); 642 - bucket = &mesh_paths->hash_buckets[hash_idx]; 600 + tbl = resize_dereference_mesh_paths(); 601 + hash_idx = mesh_table_hash(addr, sdata, tbl); 602 + bucket = &tbl->hash_buckets[hash_idx]; 643 603 644 - spin_lock_bh(&mesh_paths->hashwlock[hash_idx]); 604 + spin_lock_bh(&tbl->hashwlock[hash_idx]); 645 605 hlist_for_each_entry(node, n, bucket, list) { 646 606 mpath = node->mpath; 647 607 if (mpath->sdata == sdata && 648 - memcmp(addr, mpath->dst, ETH_ALEN) == 0) { 608 + memcmp(addr, mpath->dst, ETH_ALEN) == 0) { 649 609 spin_lock_bh(&mpath->state_lock); 650 610 mpath->flags |= MESH_PATH_RESOLVING; 651 611 hlist_del_rcu(&node->list); 652 612 call_rcu(&node->rcu, mesh_path_node_reclaim); 653 - atomic_dec(&mesh_paths->entries); 613 + atomic_dec(&tbl->entries); 654 614 spin_unlock_bh(&mpath->state_lock); 655 615 goto enddel; 656 616 } ··· 660 618 err = -ENXIO; 661 619 enddel: 662 620 mesh_paths_generation++; 663 - spin_unlock_bh(&mesh_paths->hashwlock[hash_idx]); 621 + spin_unlock_bh(&tbl->hashwlock[hash_idx]); 664 622 read_unlock_bh(&pathtbl_resize_lock); 665 623 return err; 666 624 } ··· 789 747 790 748 int mesh_pathtbl_init(void) 791 749 { 792 - mesh_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); 793 - if (!mesh_paths) 794 - return -ENOMEM; 795 - mesh_paths->free_node = &mesh_path_node_free; 796 - mesh_paths->copy_node = &mesh_path_node_copy; 797 - mesh_paths->mean_chain_len = MEAN_CHAIN_LEN; 750 + struct mesh_table *tbl_path, *tbl_mpp; 798 751 799 - mpp_paths = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); 800 - if (!mpp_paths) { 801 - mesh_table_free(mesh_paths, true); 752 + tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); 753 + if (!tbl_path) 754 + return -ENOMEM; 755 + tbl_path->free_node = &mesh_path_node_free; 756 + tbl_path->copy_node = &mesh_path_node_copy; 757 + tbl_path->mean_chain_len = MEAN_CHAIN_LEN; 758 + 759 + tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER); 760 + if (!tbl_mpp) { 761 + mesh_table_free(tbl_path, true); 802 762 return -ENOMEM; 803 763 } 804 - mpp_paths->free_node = &mesh_path_node_free; 805 - mpp_paths->copy_node = &mesh_path_node_copy; 806 - mpp_paths->mean_chain_len = MEAN_CHAIN_LEN; 764 + tbl_mpp->free_node = &mesh_path_node_free; 765 + tbl_mpp->copy_node = &mesh_path_node_copy; 766 + tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN; 767 + 768 + /* Need no locking since this is during init */ 769 + RCU_INIT_POINTER(mesh_paths, tbl_path); 770 + RCU_INIT_POINTER(mpp_paths, tbl_mpp); 807 771 808 772 return 0; 809 773 } 810 774 811 775 void mesh_path_expire(struct ieee80211_sub_if_data *sdata) 812 776 { 777 + struct mesh_table *tbl; 813 778 struct mesh_path *mpath; 814 779 struct mpath_node *node; 815 780 struct hlist_node *p; 816 781 int i; 817 782 818 - read_lock_bh(&pathtbl_resize_lock); 819 - for_each_mesh_entry(mesh_paths, p, node, i) { 783 + rcu_read_lock(); 784 + tbl = rcu_dereference(mesh_paths); 785 + for_each_mesh_entry(tbl, p, node, i) { 820 786 if (node->mpath->sdata != sdata) 821 787 continue; 822 788 mpath = node->mpath; 823 789 spin_lock_bh(&mpath->state_lock); 824 790 if ((!(mpath->flags & MESH_PATH_RESOLVING)) && 825 791 (!(mpath->flags & MESH_PATH_FIXED)) && 826 - time_after(jiffies, 827 - mpath->exp_time + MESH_PATH_EXPIRE)) { 792 + time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE)) { 828 793 spin_unlock_bh(&mpath->state_lock); 829 794 mesh_path_del(mpath->dst, mpath->sdata); 830 795 } else 831 796 spin_unlock_bh(&mpath->state_lock); 832 797 } 833 - read_unlock_bh(&pathtbl_resize_lock); 798 + rcu_read_unlock(); 834 799 } 835 800 836 801 void mesh_pathtbl_unregister(void) 837 802 { 838 - mesh_table_free(mesh_paths, true); 839 - mesh_table_free(mpp_paths, true); 803 + /* no need for locking during exit path */ 804 + mesh_table_free(rcu_dereference_raw(mesh_paths), true); 805 + mesh_table_free(rcu_dereference_raw(mpp_paths), true); 840 806 }