Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bus: mhi: ep: Add support for suspending and resuming channels

Add support for suspending and resuming the channels in MHI endpoint stack.
The channels will be moved to the suspended state during M3 state
transition and will be resumed during M0 transition.

Reviewed-by: Alex Elder <elder@linaro.org>
Signed-off-by: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
Link: https://lore.kernel.org/r/20220405135754.6622-18-manivannan.sadhasivam@linaro.org
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Manivannan Sadhasivam and committed by
Greg Kroah-Hartman
e4b7b5f0 2d945a39

+65
+2
drivers/bus/mhi/ep/internal.h
··· 212 212 int mhi_ep_set_m3_state(struct mhi_ep_cntrl *mhi_cntrl); 213 213 int mhi_ep_set_ready_state(struct mhi_ep_cntrl *mhi_cntrl); 214 214 void mhi_ep_handle_syserr(struct mhi_ep_cntrl *mhi_cntrl); 215 + void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl); 216 + void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl); 215 217 216 218 #endif
+58
drivers/bus/mhi/ep/main.c
··· 1097 1097 } 1098 1098 EXPORT_SYMBOL_GPL(mhi_ep_power_down); 1099 1099 1100 + void mhi_ep_suspend_channels(struct mhi_ep_cntrl *mhi_cntrl) 1101 + { 1102 + struct mhi_ep_chan *mhi_chan; 1103 + u32 tmp; 1104 + int i; 1105 + 1106 + for (i = 0; i < mhi_cntrl->max_chan; i++) { 1107 + mhi_chan = &mhi_cntrl->mhi_chan[i]; 1108 + 1109 + if (!mhi_chan->mhi_dev) 1110 + continue; 1111 + 1112 + mutex_lock(&mhi_chan->lock); 1113 + /* Skip if the channel is not currently running */ 1114 + tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg); 1115 + if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_RUNNING) { 1116 + mutex_unlock(&mhi_chan->lock); 1117 + continue; 1118 + } 1119 + 1120 + dev_dbg(&mhi_chan->mhi_dev->dev, "Suspending channel\n"); 1121 + /* Set channel state to SUSPENDED */ 1122 + tmp &= ~CHAN_CTX_CHSTATE_MASK; 1123 + tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_SUSPENDED); 1124 + mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp); 1125 + mutex_unlock(&mhi_chan->lock); 1126 + } 1127 + } 1128 + 1129 + void mhi_ep_resume_channels(struct mhi_ep_cntrl *mhi_cntrl) 1130 + { 1131 + struct mhi_ep_chan *mhi_chan; 1132 + u32 tmp; 1133 + int i; 1134 + 1135 + for (i = 0; i < mhi_cntrl->max_chan; i++) { 1136 + mhi_chan = &mhi_cntrl->mhi_chan[i]; 1137 + 1138 + if (!mhi_chan->mhi_dev) 1139 + continue; 1140 + 1141 + mutex_lock(&mhi_chan->lock); 1142 + /* Skip if the channel is not currently suspended */ 1143 + tmp = le32_to_cpu(mhi_cntrl->ch_ctx_cache[i].chcfg); 1144 + if (FIELD_GET(CHAN_CTX_CHSTATE_MASK, tmp) != MHI_CH_STATE_SUSPENDED) { 1145 + mutex_unlock(&mhi_chan->lock); 1146 + continue; 1147 + } 1148 + 1149 + dev_dbg(&mhi_chan->mhi_dev->dev, "Resuming channel\n"); 1150 + /* Set channel state to RUNNING */ 1151 + tmp &= ~CHAN_CTX_CHSTATE_MASK; 1152 + tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_RUNNING); 1153 + mhi_cntrl->ch_ctx_cache[i].chcfg = cpu_to_le32(tmp); 1154 + mutex_unlock(&mhi_chan->lock); 1155 + } 1156 + } 1157 + 1100 1158 static void mhi_ep_release_device(struct device *dev) 1101 1159 { 1102 1160 struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
+5
drivers/bus/mhi/ep/sm.c
··· 62 62 enum mhi_state old_state; 63 63 int ret; 64 64 65 + /* If MHI is in M3, resume suspended channels */ 65 66 spin_lock_bh(&mhi_cntrl->state_lock); 66 67 old_state = mhi_cntrl->mhi_state; 68 + if (old_state == MHI_STATE_M3) 69 + mhi_ep_resume_channels(mhi_cntrl); 67 70 68 71 ret = mhi_ep_set_mhi_state(mhi_cntrl, MHI_STATE_M0); 69 72 spin_unlock_bh(&mhi_cntrl->state_lock); ··· 108 105 mhi_ep_handle_syserr(mhi_cntrl); 109 106 return ret; 110 107 } 108 + 109 + mhi_ep_suspend_channels(mhi_cntrl); 111 110 112 111 /* Signal host that the device moved to M3 */ 113 112 ret = mhi_ep_send_state_change_event(mhi_cntrl, MHI_STATE_M3);