Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ALSA: xen: Use guard() for mutex locks

Replace the manual mutex lock/unlock pairs with guard() for code
simplification.

Only code refactoring, and no behavior change.

Signed-off-by: Takashi Iwai <tiwai@suse.de>
Link: https://patch.msgid.link/20250829151335.7342-9-tiwai@suse.de

+52 -61
+37 -43
sound/xen/xen_snd_front.c
··· 62 62 struct xensnd_req *req; 63 63 int ret; 64 64 65 - mutex_lock(&evtchnl->u.req.req_io_lock); 65 + guard(mutex)(&evtchnl->u.req.req_io_lock); 66 66 67 - mutex_lock(&evtchnl->ring_io_lock); 68 - req = be_stream_prepare_req(evtchnl, XENSND_OP_HW_PARAM_QUERY); 69 - req->op.hw_param = *hw_param_req; 70 - mutex_unlock(&evtchnl->ring_io_lock); 67 + scoped_guard(mutex, &evtchnl->ring_io_lock) { 68 + req = be_stream_prepare_req(evtchnl, XENSND_OP_HW_PARAM_QUERY); 69 + req->op.hw_param = *hw_param_req; 70 + } 71 71 72 72 ret = be_stream_do_io(evtchnl); 73 73 ··· 77 77 if (ret == 0) 78 78 *hw_param_resp = evtchnl->u.req.resp.hw_param; 79 79 80 - mutex_unlock(&evtchnl->u.req.req_io_lock); 81 80 return ret; 82 81 } 83 82 ··· 89 90 struct xensnd_req *req; 90 91 int ret; 91 92 92 - mutex_lock(&evtchnl->u.req.req_io_lock); 93 + guard(mutex)(&evtchnl->u.req.req_io_lock); 93 94 94 - mutex_lock(&evtchnl->ring_io_lock); 95 - req = be_stream_prepare_req(evtchnl, XENSND_OP_OPEN); 96 - req->op.open.pcm_format = format; 97 - req->op.open.pcm_channels = channels; 98 - req->op.open.pcm_rate = rate; 99 - req->op.open.buffer_sz = buffer_sz; 100 - req->op.open.period_sz = period_sz; 101 - req->op.open.gref_directory = 102 - xen_front_pgdir_shbuf_get_dir_start(shbuf); 103 - mutex_unlock(&evtchnl->ring_io_lock); 95 + scoped_guard(mutex, &evtchnl->ring_io_lock) { 96 + req = be_stream_prepare_req(evtchnl, XENSND_OP_OPEN); 97 + req->op.open.pcm_format = format; 98 + req->op.open.pcm_channels = channels; 99 + req->op.open.pcm_rate = rate; 100 + req->op.open.buffer_sz = buffer_sz; 101 + req->op.open.period_sz = period_sz; 102 + req->op.open.gref_directory = 103 + xen_front_pgdir_shbuf_get_dir_start(shbuf); 104 + } 104 105 105 106 ret = be_stream_do_io(evtchnl); 106 107 107 108 if (ret == 0) 108 109 ret = be_stream_wait_io(evtchnl); 109 110 110 - mutex_unlock(&evtchnl->u.req.req_io_lock); 111 111 return ret; 112 112 } 113 113 ··· 115 117 __always_unused struct xensnd_req *req; 116 118 int ret; 117 119 118 - mutex_lock(&evtchnl->u.req.req_io_lock); 120 + guard(mutex)(&evtchnl->u.req.req_io_lock); 119 121 120 - mutex_lock(&evtchnl->ring_io_lock); 121 - req = be_stream_prepare_req(evtchnl, XENSND_OP_CLOSE); 122 - mutex_unlock(&evtchnl->ring_io_lock); 122 + scoped_guard(mutex, &evtchnl->ring_io_lock) { 123 + req = be_stream_prepare_req(evtchnl, XENSND_OP_CLOSE); 124 + } 123 125 124 126 ret = be_stream_do_io(evtchnl); 125 127 126 128 if (ret == 0) 127 129 ret = be_stream_wait_io(evtchnl); 128 130 129 - mutex_unlock(&evtchnl->u.req.req_io_lock); 130 131 return ret; 131 132 } 132 133 ··· 135 138 struct xensnd_req *req; 136 139 int ret; 137 140 138 - mutex_lock(&evtchnl->u.req.req_io_lock); 141 + guard(mutex)(&evtchnl->u.req.req_io_lock); 139 142 140 - mutex_lock(&evtchnl->ring_io_lock); 141 - req = be_stream_prepare_req(evtchnl, XENSND_OP_WRITE); 142 - req->op.rw.length = count; 143 - req->op.rw.offset = pos; 144 - mutex_unlock(&evtchnl->ring_io_lock); 143 + scoped_guard(mutex, &evtchnl->ring_io_lock) { 144 + req = be_stream_prepare_req(evtchnl, XENSND_OP_WRITE); 145 + req->op.rw.length = count; 146 + req->op.rw.offset = pos; 147 + } 145 148 146 149 ret = be_stream_do_io(evtchnl); 147 150 148 151 if (ret == 0) 149 152 ret = be_stream_wait_io(evtchnl); 150 153 151 - mutex_unlock(&evtchnl->u.req.req_io_lock); 152 154 return ret; 153 155 } 154 156 ··· 157 161 struct xensnd_req *req; 158 162 int ret; 159 163 160 - mutex_lock(&evtchnl->u.req.req_io_lock); 164 + guard(mutex)(&evtchnl->u.req.req_io_lock); 161 165 162 - mutex_lock(&evtchnl->ring_io_lock); 163 - req = be_stream_prepare_req(evtchnl, XENSND_OP_READ); 164 - req->op.rw.length = count; 165 - req->op.rw.offset = pos; 166 - mutex_unlock(&evtchnl->ring_io_lock); 166 + scoped_guard(mutex, &evtchnl->ring_io_lock) { 167 + req = be_stream_prepare_req(evtchnl, XENSND_OP_READ); 168 + req->op.rw.length = count; 169 + req->op.rw.offset = pos; 170 + } 167 171 168 172 ret = be_stream_do_io(evtchnl); 169 173 170 174 if (ret == 0) 171 175 ret = be_stream_wait_io(evtchnl); 172 176 173 - mutex_unlock(&evtchnl->u.req.req_io_lock); 174 177 return ret; 175 178 } 176 179 ··· 179 184 struct xensnd_req *req; 180 185 int ret; 181 186 182 - mutex_lock(&evtchnl->u.req.req_io_lock); 187 + guard(mutex)(&evtchnl->u.req.req_io_lock); 183 188 184 - mutex_lock(&evtchnl->ring_io_lock); 185 - req = be_stream_prepare_req(evtchnl, XENSND_OP_TRIGGER); 186 - req->op.trigger.type = type; 187 - mutex_unlock(&evtchnl->ring_io_lock); 189 + scoped_guard(mutex, &evtchnl->ring_io_lock) { 190 + req = be_stream_prepare_req(evtchnl, XENSND_OP_TRIGGER); 191 + req->op.trigger.type = type; 192 + } 188 193 189 194 ret = be_stream_do_io(evtchnl); 190 195 191 196 if (ret == 0) 192 197 ret = be_stream_wait_io(evtchnl); 193 198 194 - mutex_unlock(&evtchnl->u.req.req_io_lock); 195 199 return ret; 196 200 } 197 201
+15 -18
sound/xen/xen_snd_front_evtchnl.c
··· 28 28 if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED)) 29 29 return IRQ_HANDLED; 30 30 31 - mutex_lock(&channel->ring_io_lock); 31 + guard(mutex)(&channel->ring_io_lock); 32 32 33 33 again: 34 34 rp = channel->u.req.ring.sring->rsp_prod; ··· 80 80 channel->u.req.ring.sring->rsp_event = i + 1; 81 81 } 82 82 83 - mutex_unlock(&channel->ring_io_lock); 84 83 return IRQ_HANDLED; 85 84 } 86 85 ··· 92 93 if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED)) 93 94 return IRQ_HANDLED; 94 95 95 - mutex_lock(&channel->ring_io_lock); 96 + guard(mutex)(&channel->ring_io_lock); 96 97 97 98 prod = page->in_prod; 98 99 /* Ensure we see ring contents up to prod. */ 99 100 virt_rmb(); 100 101 if (prod == page->in_cons) 101 - goto out; 102 + return IRQ_HANDLED; 102 103 103 104 /* 104 105 * Assume that the backend is trusted to always write sane values ··· 124 125 /* Ensure ring contents. */ 125 126 virt_wmb(); 126 127 127 - out: 128 - mutex_unlock(&channel->ring_io_lock); 129 128 return IRQ_HANDLED; 130 129 } 131 130 ··· 441 444 else 442 445 state = EVTCHNL_STATE_DISCONNECTED; 443 446 444 - mutex_lock(&evt_pair->req.ring_io_lock); 445 - evt_pair->req.state = state; 446 - mutex_unlock(&evt_pair->req.ring_io_lock); 447 + scoped_guard(mutex, &evt_pair->req.ring_io_lock) { 448 + evt_pair->req.state = state; 449 + } 447 450 448 - mutex_lock(&evt_pair->evt.ring_io_lock); 449 - evt_pair->evt.state = state; 450 - mutex_unlock(&evt_pair->evt.ring_io_lock); 451 + scoped_guard(mutex, &evt_pair->evt.ring_io_lock) { 452 + evt_pair->evt.state = state; 453 + } 451 454 } 452 455 453 456 void xen_snd_front_evtchnl_pair_clear(struct xen_snd_front_evtchnl_pair *evt_pair) 454 457 { 455 - mutex_lock(&evt_pair->req.ring_io_lock); 456 - evt_pair->req.evt_next_id = 0; 457 - mutex_unlock(&evt_pair->req.ring_io_lock); 458 + scoped_guard(mutex, &evt_pair->req.ring_io_lock) { 459 + evt_pair->req.evt_next_id = 0; 460 + } 458 461 459 - mutex_lock(&evt_pair->evt.ring_io_lock); 460 - evt_pair->evt.evt_next_id = 0; 461 - mutex_unlock(&evt_pair->evt.ring_io_lock); 462 + scoped_guard(mutex, &evt_pair->evt.ring_io_lock) { 463 + evt_pair->evt.evt_next_id = 0; 464 + } 462 465 } 463 466