Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

firmware: arm_scmi: Refactor message response path

Refactor code path waiting for message responses into a dedicated helper
function.

No functional change.

Link: https://lore.kernel.org/r/20211129191156.29322-4-cristian.marussi@arm.com
Signed-off-by: Cristian Marussi <cristian.marussi@arm.com>
Signed-off-by: Sudeep Holla <sudeep.holla@arm.com>

authored by

Cristian Marussi and committed by
Sudeep Holla
5a731aeb 582730b9

+56 -32
+56 -32
drivers/firmware/arm_scmi/driver.c
··· 739 739 } 740 740 741 741 /** 742 + * scmi_wait_for_message_response - An helper to group all the possible ways of 743 + * waiting for a synchronous message response. 744 + * 745 + * @cinfo: SCMI channel info 746 + * @xfer: Reference to the transfer being waited for. 747 + * 748 + * Chooses waiting strategy (sleep-waiting vs busy-waiting) depending on 749 + * configuration flags like xfer->hdr.poll_completion. 750 + * 751 + * Return: 0 on Success, error otherwise. 752 + */ 753 + static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo, 754 + struct scmi_xfer *xfer) 755 + { 756 + struct scmi_info *info = handle_to_scmi_info(cinfo->handle); 757 + struct device *dev = info->dev; 758 + int ret = 0, timeout_ms = info->desc->max_rx_timeout_ms; 759 + 760 + if (xfer->hdr.poll_completion) { 761 + ktime_t stop = ktime_add_ms(ktime_get(), timeout_ms); 762 + 763 + spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop)); 764 + if (ktime_before(ktime_get(), stop)) { 765 + unsigned long flags; 766 + 767 + /* 768 + * Do not fetch_response if an out-of-order delayed 769 + * response is being processed. 770 + */ 771 + spin_lock_irqsave(&xfer->lock, flags); 772 + if (xfer->state == SCMI_XFER_SENT_OK) { 773 + info->desc->ops->fetch_response(cinfo, xfer); 774 + xfer->state = SCMI_XFER_RESP_OK; 775 + } 776 + spin_unlock_irqrestore(&xfer->lock, flags); 777 + } else { 778 + dev_err(dev, 779 + "timed out in resp(caller: %pS) - polling\n", 780 + (void *)_RET_IP_); 781 + ret = -ETIMEDOUT; 782 + } 783 + } else { 784 + /* And we wait for the response. */ 785 + if (!wait_for_completion_timeout(&xfer->done, 786 + msecs_to_jiffies(timeout_ms))) { 787 + dev_err(dev, "timed out in resp(caller: %pS)\n", 788 + (void *)_RET_IP_); 789 + ret = -ETIMEDOUT; 790 + } 791 + } 792 + 793 + return ret; 794 + } 795 + 796 + /** 742 797 * do_xfer() - Do one transfer 743 798 * 744 799 * @ph: Pointer to SCMI protocol handle ··· 807 752 struct scmi_xfer *xfer) 808 753 { 809 754 int ret; 810 - int timeout; 811 755 const struct scmi_protocol_instance *pi = ph_to_pi(ph); 812 756 struct scmi_info *info = handle_to_scmi_info(pi->handle); 813 757 struct device *dev = info->dev; ··· 850 796 return ret; 851 797 } 852 798 853 - if (xfer->hdr.poll_completion) { 854 - ktime_t stop = ktime_add_ms(ktime_get(), 855 - info->desc->max_rx_timeout_ms); 856 - 857 - spin_until_cond(scmi_xfer_done_no_timeout(cinfo, xfer, stop)); 858 - if (ktime_before(ktime_get(), stop)) { 859 - unsigned long flags; 860 - 861 - /* 862 - * Do not fetch_response if an out-of-order delayed 863 - * response is being processed. 864 - */ 865 - spin_lock_irqsave(&xfer->lock, flags); 866 - if (xfer->state == SCMI_XFER_SENT_OK) { 867 - info->desc->ops->fetch_response(cinfo, xfer); 868 - xfer->state = SCMI_XFER_RESP_OK; 869 - } 870 - spin_unlock_irqrestore(&xfer->lock, flags); 871 - } else { 872 - ret = -ETIMEDOUT; 873 - } 874 - } else { 875 - /* And we wait for the response. */ 876 - timeout = msecs_to_jiffies(info->desc->max_rx_timeout_ms); 877 - if (!wait_for_completion_timeout(&xfer->done, timeout)) { 878 - dev_err(dev, "timed out in resp(caller: %pS)\n", 879 - (void *)_RET_IP_); 880 - ret = -ETIMEDOUT; 881 - } 882 - } 883 - 799 + ret = scmi_wait_for_message_response(cinfo, xfer); 884 800 if (!ret && xfer->hdr.status) 885 801 ret = scmi_to_linux_errno(xfer->hdr.status); 886 802