x86, UV: Modularize BAU send and wait

Streamline the large uv_flush_send_and_wait() function by use of
a couple of helper functions.

And remove some excess comments.

Signed-off-by: Cliff Wickman <cpw@sgi.com>
Cc: gregkh@suse.de
LKML-Reference: <E1OJvNy-0004ay-IH@eag09.americas.sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>

authored by Cliff Wickman and committed by Ingo Molnar f6d8a566 450a007e

+44 -39
-1
arch/x86/include/asm/uv/uv_bau.h
··· 75 #define DESC_STATUS_DESTINATION_TIMEOUT 2 76 #define DESC_STATUS_SOURCE_TIMEOUT 3 77 78 - #define TIMEOUT_DELAY 10 79 /* 80 * delay for 'plugged' timeout retries, in microseconds 81 */
··· 75 #define DESC_STATUS_DESTINATION_TIMEOUT 2 76 #define DESC_STATUS_SOURCE_TIMEOUT 3 77 78 /* 79 * delay for 'plugged' timeout retries, in microseconds 80 */
+44 -38
arch/x86/kernel/tlb_uv.c
··· 485 } 486 487 /* 488 * Completions are taking a very long time due to a congested numalink 489 * network. 490 */ ··· 559 * 560 * Send a broadcast and wait for it to complete. 561 * 562 - * The flush_mask contains the cpus the broadcast is to be sent to, plus 563 * cpus that are on the local uvhub. 564 * 565 * Returns 0 if all flushing represented in the mask was done. ··· 594 &hmaster->active_descriptor_count, 595 hmaster->max_bau_concurrent)); 596 } 597 - 598 while (hmaster->uvhub_quiesce) 599 cpu_relax(); 600 ··· 624 right_shift, this_cpu, bcp, smaster, try); 625 626 if (completion_status == FLUSH_RETRY_PLUGGED) { 627 - /* 628 - * Our retries may be blocked by all destination swack 629 - * resources being consumed, and a timeout pending. In 630 - * that case hardware immediately returns the ERROR 631 - * that looks like a destination timeout. 632 - */ 633 - udelay(bcp->plugged_delay); 634 - bcp->plugged_tries++; 635 - if (bcp->plugged_tries >= bcp->plugsb4reset) { 636 - bcp->plugged_tries = 0; 637 - quiesce_local_uvhub(hmaster); 638 - spin_lock(&hmaster->queue_lock); 639 - uv_reset_with_ipi(&bau_desc->distribution, 640 - this_cpu); 641 - spin_unlock(&hmaster->queue_lock); 642 - end_uvhub_quiesce(hmaster); 643 - bcp->ipi_attempts++; 644 - stat->s_resets_plug++; 645 - } 646 } else if (completion_status == FLUSH_RETRY_TIMEOUT) { 647 - hmaster->max_bau_concurrent = 1; 648 - bcp->timeout_tries++; 649 - udelay(TIMEOUT_DELAY); 650 - if (bcp->timeout_tries >= bcp->timeoutsb4reset) { 651 - bcp->timeout_tries = 0; 652 - quiesce_local_uvhub(hmaster); 653 - spin_lock(&hmaster->queue_lock); 654 - uv_reset_with_ipi(&bau_desc->distribution, 655 - this_cpu); 656 - spin_unlock(&hmaster->queue_lock); 657 - end_uvhub_quiesce(hmaster); 658 - bcp->ipi_attempts++; 659 - stat->s_resets_timeout++; 660 - } 661 } 662 if (bcp->ipi_attempts >= bcp->ipi_reset_limit) { 663 bcp->ipi_attempts = 0; ··· 637 } while ((completion_status == FLUSH_RETRY_PLUGGED) || 638 (completion_status == FLUSH_RETRY_TIMEOUT)); 639 time2 = get_cycles(); 640 - 641 bcp->plugged_tries = 0; 642 bcp->timeout_tries = 0; 643 - 644 if ((completion_status == FLUSH_COMPLETE) && 645 (bcp->conseccompletes > bcp->complete_threshold) && 646 (hmaster->max_bau_concurrent < ··· 747 748 bau_desc = bcp->descriptor_base; 749 bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu; 750 - 751 bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); 752 753 /* cpu statistics */
··· 485 } 486 487 /* 488 + * Our retries are blocked by all destination swack resources being 489 + * in use, and a timeout is pending. In that case hardware immediately 490 + * returns the ERROR that looks like a destination timeout. 491 + */ 492 + static void 493 + destination_plugged(struct bau_desc *bau_desc, struct bau_control *bcp, 494 + struct bau_control *hmaster, struct ptc_stats *stat) 495 + { 496 + udelay(bcp->plugged_delay); 497 + bcp->plugged_tries++; 498 + if (bcp->plugged_tries >= bcp->plugsb4reset) { 499 + bcp->plugged_tries = 0; 500 + quiesce_local_uvhub(hmaster); 501 + spin_lock(&hmaster->queue_lock); 502 + uv_reset_with_ipi(&bau_desc->distribution, bcp->cpu); 503 + spin_unlock(&hmaster->queue_lock); 504 + end_uvhub_quiesce(hmaster); 505 + bcp->ipi_attempts++; 506 + stat->s_resets_plug++; 507 + } 508 + } 509 + 510 + static void 511 + destination_timeout(struct bau_desc *bau_desc, struct bau_control *bcp, 512 + struct bau_control *hmaster, struct ptc_stats *stat) 513 + { 514 + hmaster->max_bau_concurrent = 1; 515 + bcp->timeout_tries++; 516 + if (bcp->timeout_tries >= bcp->timeoutsb4reset) { 517 + bcp->timeout_tries = 0; 518 + quiesce_local_uvhub(hmaster); 519 + spin_lock(&hmaster->queue_lock); 520 + uv_reset_with_ipi(&bau_desc->distribution, bcp->cpu); 521 + spin_unlock(&hmaster->queue_lock); 522 + end_uvhub_quiesce(hmaster); 523 + bcp->ipi_attempts++; 524 + stat->s_resets_timeout++; 525 + } 526 + } 527 + 528 + /* 529 * Completions are taking a very long time due to a congested numalink 530 * network. 531 */ ··· 518 * 519 * Send a broadcast and wait for it to complete. 520 * 521 + * The flush_mask contains the cpus the broadcast is to be sent to including 522 * cpus that are on the local uvhub. 523 * 524 * Returns 0 if all flushing represented in the mask was done. ··· 553 &hmaster->active_descriptor_count, 554 hmaster->max_bau_concurrent)); 555 } 556 while (hmaster->uvhub_quiesce) 557 cpu_relax(); 558 ··· 584 right_shift, this_cpu, bcp, smaster, try); 585 586 if (completion_status == FLUSH_RETRY_PLUGGED) { 587 + destination_plugged(bau_desc, bcp, hmaster, stat); 588 } else if (completion_status == FLUSH_RETRY_TIMEOUT) { 589 + destination_timeout(bau_desc, bcp, hmaster, stat); 590 } 591 if (bcp->ipi_attempts >= bcp->ipi_reset_limit) { 592 bcp->ipi_attempts = 0; ··· 628 } while ((completion_status == FLUSH_RETRY_PLUGGED) || 629 (completion_status == FLUSH_RETRY_TIMEOUT)); 630 time2 = get_cycles(); 631 bcp->plugged_tries = 0; 632 bcp->timeout_tries = 0; 633 if ((completion_status == FLUSH_COMPLETE) && 634 (bcp->conseccompletes > bcp->complete_threshold) && 635 (hmaster->max_bau_concurrent < ··· 740 741 bau_desc = bcp->descriptor_base; 742 bau_desc += UV_ITEMS_PER_DESCRIPTOR * bcp->uvhub_cpu; 743 bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE); 744 745 /* cpu statistics */