Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ALSA: xen-front: Implement Xen event channel handling

Handle Xen event channels:
- create for all configured streams and publish
corresponding ring references and event channels in Xen store,
so backend can connect
- implement event channels interrupt handlers
- create and destroy event channels with respect to Xen bus state

Signed-off-by: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
Signed-off-by: Takashi Iwai <tiwai@suse.de>

authored by

Oleksandr Andrushchenko and committed by
Takashi Iwai
788ef64a fd3b3604

+604 -2
+2 -1
sound/xen/Makefile
··· 1 1 # SPDX-License-Identifier: GPL-2.0 OR MIT 2 2 3 3 snd_xen_front-objs := xen_snd_front.o \ 4 - xen_snd_front_cfg.o 4 + xen_snd_front_cfg.o \ 5 + xen_snd_front_evtchnl.o 5 6 6 7 obj-$(CONFIG_SND_XEN_FRONTEND) += snd_xen_front.o
+8 -1
sound/xen/xen_snd_front.c
··· 18 18 #include <xen/interface/io/sndif.h> 19 19 20 20 #include "xen_snd_front.h" 21 + #include "xen_snd_front_evtchnl.h" 21 22 22 23 static void xen_snd_drv_fini(struct xen_snd_front_info *front_info) 23 24 { 25 + xen_snd_front_evtchnl_free_all(front_info); 24 26 } 25 27 26 28 static int sndback_initwait(struct xen_snd_front_info *front_info) ··· 34 32 if (ret < 0) 35 33 return ret; 36 34 37 - return 0; 35 + /* create event channels for all streams and publish */ 36 + ret = xen_snd_front_evtchnl_create_all(front_info, num_streams); 37 + if (ret < 0) 38 + return ret; 39 + 40 + return xen_snd_front_evtchnl_publish_all(front_info); 38 41 } 39 42 40 43 static int sndback_connect(struct xen_snd_front_info *front_info)
+5
sound/xen/xen_snd_front.h
··· 13 13 14 14 #include "xen_snd_front_cfg.h" 15 15 16 + struct xen_snd_front_evtchnl_pair; 17 + 16 18 struct xen_snd_front_info { 17 19 struct xenbus_device *xb_dev; 20 + 21 + int num_evt_pairs; 22 + struct xen_snd_front_evtchnl_pair *evt_pairs; 18 23 19 24 struct xen_front_cfg_card cfg; 20 25 };
+494
sound/xen/xen_snd_front_evtchnl.c
··· 1 + // SPDX-License-Identifier: GPL-2.0 OR MIT 2 + 3 + /* 4 + * Xen para-virtual sound device 5 + * 6 + * Copyright (C) 2016-2018 EPAM Systems Inc. 7 + * 8 + * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> 9 + */ 10 + 11 + #include <xen/events.h> 12 + #include <xen/grant_table.h> 13 + #include <xen/xen.h> 14 + #include <xen/xenbus.h> 15 + 16 + #include "xen_snd_front.h" 17 + #include "xen_snd_front_cfg.h" 18 + #include "xen_snd_front_evtchnl.h" 19 + 20 + static irqreturn_t evtchnl_interrupt_req(int irq, void *dev_id) 21 + { 22 + struct xen_snd_front_evtchnl *channel = dev_id; 23 + struct xen_snd_front_info *front_info = channel->front_info; 24 + struct xensnd_resp *resp; 25 + RING_IDX i, rp; 26 + 27 + if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED)) 28 + return IRQ_HANDLED; 29 + 30 + mutex_lock(&channel->ring_io_lock); 31 + 32 + again: 33 + rp = channel->u.req.ring.sring->rsp_prod; 34 + /* Ensure we see queued responses up to rp. */ 35 + rmb(); 36 + 37 + /* 38 + * Assume that the backend is trusted to always write sane values 39 + * to the ring counters, so no overflow checks on frontend side 40 + * are required. 41 + */ 42 + for (i = channel->u.req.ring.rsp_cons; i != rp; i++) { 43 + resp = RING_GET_RESPONSE(&channel->u.req.ring, i); 44 + if (resp->id != channel->evt_id) 45 + continue; 46 + switch (resp->operation) { 47 + case XENSND_OP_OPEN: 48 + /* fall through */ 49 + case XENSND_OP_CLOSE: 50 + /* fall through */ 51 + case XENSND_OP_READ: 52 + /* fall through */ 53 + case XENSND_OP_WRITE: 54 + /* fall through */ 55 + case XENSND_OP_TRIGGER: 56 + channel->u.req.resp_status = resp->status; 57 + complete(&channel->u.req.completion); 58 + break; 59 + case XENSND_OP_HW_PARAM_QUERY: 60 + channel->u.req.resp_status = resp->status; 61 + channel->u.req.resp.hw_param = 62 + resp->resp.hw_param; 63 + complete(&channel->u.req.completion); 64 + break; 65 + 66 + default: 67 + dev_err(&front_info->xb_dev->dev, 68 + "Operation %d is not supported\n", 69 + resp->operation); 70 + break; 71 + } 72 + } 73 + 74 + channel->u.req.ring.rsp_cons = i; 75 + if (i != channel->u.req.ring.req_prod_pvt) { 76 + int more_to_do; 77 + 78 + RING_FINAL_CHECK_FOR_RESPONSES(&channel->u.req.ring, 79 + more_to_do); 80 + if (more_to_do) 81 + goto again; 82 + } else { 83 + channel->u.req.ring.sring->rsp_event = i + 1; 84 + } 85 + 86 + mutex_unlock(&channel->ring_io_lock); 87 + return IRQ_HANDLED; 88 + } 89 + 90 + static irqreturn_t evtchnl_interrupt_evt(int irq, void *dev_id) 91 + { 92 + struct xen_snd_front_evtchnl *channel = dev_id; 93 + struct xensnd_event_page *page = channel->u.evt.page; 94 + u32 cons, prod; 95 + 96 + if (unlikely(channel->state != EVTCHNL_STATE_CONNECTED)) 97 + return IRQ_HANDLED; 98 + 99 + mutex_lock(&channel->ring_io_lock); 100 + 101 + prod = page->in_prod; 102 + /* Ensure we see ring contents up to prod. */ 103 + virt_rmb(); 104 + if (prod == page->in_cons) 105 + goto out; 106 + 107 + /* 108 + * Assume that the backend is trusted to always write sane values 109 + * to the ring counters, so no overflow checks on frontend side 110 + * are required. 111 + */ 112 + for (cons = page->in_cons; cons != prod; cons++) { 113 + struct xensnd_evt *event; 114 + 115 + event = &XENSND_IN_RING_REF(page, cons); 116 + if (unlikely(event->id != channel->evt_id++)) 117 + continue; 118 + 119 + switch (event->type) { 120 + case XENSND_EVT_CUR_POS: 121 + /* Do nothing at the moment. */ 122 + break; 123 + } 124 + } 125 + 126 + page->in_cons = cons; 127 + /* Ensure ring contents. */ 128 + virt_wmb(); 129 + 130 + out: 131 + mutex_unlock(&channel->ring_io_lock); 132 + return IRQ_HANDLED; 133 + } 134 + 135 + void xen_snd_front_evtchnl_flush(struct xen_snd_front_evtchnl *channel) 136 + { 137 + int notify; 138 + 139 + channel->u.req.ring.req_prod_pvt++; 140 + RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&channel->u.req.ring, notify); 141 + if (notify) 142 + notify_remote_via_irq(channel->irq); 143 + } 144 + 145 + static void evtchnl_free(struct xen_snd_front_info *front_info, 146 + struct xen_snd_front_evtchnl *channel) 147 + { 148 + unsigned long page = 0; 149 + 150 + if (channel->type == EVTCHNL_TYPE_REQ) 151 + page = (unsigned long)channel->u.req.ring.sring; 152 + else if (channel->type == EVTCHNL_TYPE_EVT) 153 + page = (unsigned long)channel->u.evt.page; 154 + 155 + if (!page) 156 + return; 157 + 158 + channel->state = EVTCHNL_STATE_DISCONNECTED; 159 + if (channel->type == EVTCHNL_TYPE_REQ) { 160 + /* Release all who still waits for response if any. */ 161 + channel->u.req.resp_status = -EIO; 162 + complete_all(&channel->u.req.completion); 163 + } 164 + 165 + if (channel->irq) 166 + unbind_from_irqhandler(channel->irq, channel); 167 + 168 + if (channel->port) 169 + xenbus_free_evtchn(front_info->xb_dev, channel->port); 170 + 171 + /* End access and free the page. */ 172 + if (channel->gref != GRANT_INVALID_REF) 173 + gnttab_end_foreign_access(channel->gref, 0, page); 174 + else 175 + free_page(page); 176 + 177 + memset(channel, 0, sizeof(*channel)); 178 + } 179 + 180 + void xen_snd_front_evtchnl_free_all(struct xen_snd_front_info *front_info) 181 + { 182 + int i; 183 + 184 + if (!front_info->evt_pairs) 185 + return; 186 + 187 + for (i = 0; i < front_info->num_evt_pairs; i++) { 188 + evtchnl_free(front_info, &front_info->evt_pairs[i].req); 189 + evtchnl_free(front_info, &front_info->evt_pairs[i].evt); 190 + } 191 + 192 + kfree(front_info->evt_pairs); 193 + front_info->evt_pairs = NULL; 194 + } 195 + 196 + static int evtchnl_alloc(struct xen_snd_front_info *front_info, int index, 197 + struct xen_snd_front_evtchnl *channel, 198 + enum xen_snd_front_evtchnl_type type) 199 + { 200 + struct xenbus_device *xb_dev = front_info->xb_dev; 201 + unsigned long page; 202 + grant_ref_t gref; 203 + irq_handler_t handler; 204 + char *handler_name = NULL; 205 + int ret; 206 + 207 + memset(channel, 0, sizeof(*channel)); 208 + channel->type = type; 209 + channel->index = index; 210 + channel->front_info = front_info; 211 + channel->state = EVTCHNL_STATE_DISCONNECTED; 212 + channel->gref = GRANT_INVALID_REF; 213 + page = get_zeroed_page(GFP_KERNEL); 214 + if (!page) { 215 + ret = -ENOMEM; 216 + goto fail; 217 + } 218 + 219 + handler_name = kasprintf(GFP_KERNEL, "%s-%s", XENSND_DRIVER_NAME, 220 + type == EVTCHNL_TYPE_REQ ? 221 + XENSND_FIELD_RING_REF : 222 + XENSND_FIELD_EVT_RING_REF); 223 + if (!handler_name) { 224 + ret = -ENOMEM; 225 + goto fail; 226 + } 227 + 228 + mutex_init(&channel->ring_io_lock); 229 + 230 + if (type == EVTCHNL_TYPE_REQ) { 231 + struct xen_sndif_sring *sring = (struct xen_sndif_sring *)page; 232 + 233 + init_completion(&channel->u.req.completion); 234 + mutex_init(&channel->u.req.req_io_lock); 235 + SHARED_RING_INIT(sring); 236 + FRONT_RING_INIT(&channel->u.req.ring, sring, XEN_PAGE_SIZE); 237 + 238 + ret = xenbus_grant_ring(xb_dev, sring, 1, &gref); 239 + if (ret < 0) { 240 + channel->u.req.ring.sring = NULL; 241 + goto fail; 242 + } 243 + 244 + handler = evtchnl_interrupt_req; 245 + } else { 246 + ret = gnttab_grant_foreign_access(xb_dev->otherend_id, 247 + virt_to_gfn((void *)page), 0); 248 + if (ret < 0) 249 + goto fail; 250 + 251 + channel->u.evt.page = (struct xensnd_event_page *)page; 252 + gref = ret; 253 + handler = evtchnl_interrupt_evt; 254 + } 255 + 256 + channel->gref = gref; 257 + 258 + ret = xenbus_alloc_evtchn(xb_dev, &channel->port); 259 + if (ret < 0) 260 + goto fail; 261 + 262 + ret = bind_evtchn_to_irq(channel->port); 263 + if (ret < 0) { 264 + dev_err(&xb_dev->dev, 265 + "Failed to bind IRQ for domid %d port %d: %d\n", 266 + front_info->xb_dev->otherend_id, channel->port, ret); 267 + goto fail; 268 + } 269 + 270 + channel->irq = ret; 271 + 272 + ret = request_threaded_irq(channel->irq, NULL, handler, 273 + IRQF_ONESHOT, handler_name, channel); 274 + if (ret < 0) { 275 + dev_err(&xb_dev->dev, "Failed to request IRQ %d: %d\n", 276 + channel->irq, ret); 277 + goto fail; 278 + } 279 + 280 + kfree(handler_name); 281 + return 0; 282 + 283 + fail: 284 + if (page) 285 + free_page(page); 286 + kfree(handler_name); 287 + dev_err(&xb_dev->dev, "Failed to allocate ring: %d\n", ret); 288 + return ret; 289 + } 290 + 291 + int xen_snd_front_evtchnl_create_all(struct xen_snd_front_info *front_info, 292 + int num_streams) 293 + { 294 + struct xen_front_cfg_card *cfg = &front_info->cfg; 295 + struct device *dev = &front_info->xb_dev->dev; 296 + int d, ret = 0; 297 + 298 + front_info->evt_pairs = 299 + kcalloc(num_streams, 300 + sizeof(struct xen_snd_front_evtchnl_pair), 301 + GFP_KERNEL); 302 + if (!front_info->evt_pairs) 303 + return -ENOMEM; 304 + 305 + /* Iterate over devices and their streams and create event channels. */ 306 + for (d = 0; d < cfg->num_pcm_instances; d++) { 307 + struct xen_front_cfg_pcm_instance *pcm_instance; 308 + int s, index; 309 + 310 + pcm_instance = &cfg->pcm_instances[d]; 311 + 312 + for (s = 0; s < pcm_instance->num_streams_pb; s++) { 313 + index = pcm_instance->streams_pb[s].index; 314 + 315 + ret = evtchnl_alloc(front_info, index, 316 + &front_info->evt_pairs[index].req, 317 + EVTCHNL_TYPE_REQ); 318 + if (ret < 0) { 319 + dev_err(dev, "Error allocating control channel\n"); 320 + goto fail; 321 + } 322 + 323 + ret = evtchnl_alloc(front_info, index, 324 + &front_info->evt_pairs[index].evt, 325 + EVTCHNL_TYPE_EVT); 326 + if (ret < 0) { 327 + dev_err(dev, "Error allocating in-event channel\n"); 328 + goto fail; 329 + } 330 + } 331 + 332 + for (s = 0; s < pcm_instance->num_streams_cap; s++) { 333 + index = pcm_instance->streams_cap[s].index; 334 + 335 + ret = evtchnl_alloc(front_info, index, 336 + &front_info->evt_pairs[index].req, 337 + EVTCHNL_TYPE_REQ); 338 + if (ret < 0) { 339 + dev_err(dev, "Error allocating control channel\n"); 340 + goto fail; 341 + } 342 + 343 + ret = evtchnl_alloc(front_info, index, 344 + &front_info->evt_pairs[index].evt, 345 + EVTCHNL_TYPE_EVT); 346 + if (ret < 0) { 347 + dev_err(dev, "Error allocating in-event channel\n"); 348 + goto fail; 349 + } 350 + } 351 + } 352 + if (ret < 0) 353 + goto fail; 354 + 355 + front_info->num_evt_pairs = num_streams; 356 + return 0; 357 + 358 + fail: 359 + xen_snd_front_evtchnl_free_all(front_info); 360 + return ret; 361 + } 362 + 363 + static int evtchnl_publish(struct xenbus_transaction xbt, 364 + struct xen_snd_front_evtchnl *channel, 365 + const char *path, const char *node_ring, 366 + const char *node_chnl) 367 + { 368 + struct xenbus_device *xb_dev = channel->front_info->xb_dev; 369 + int ret; 370 + 371 + /* Write control channel ring reference. */ 372 + ret = xenbus_printf(xbt, path, node_ring, "%u", channel->gref); 373 + if (ret < 0) { 374 + dev_err(&xb_dev->dev, "Error writing ring-ref: %d\n", ret); 375 + return ret; 376 + } 377 + 378 + /* Write event channel ring reference. */ 379 + ret = xenbus_printf(xbt, path, node_chnl, "%u", channel->port); 380 + if (ret < 0) { 381 + dev_err(&xb_dev->dev, "Error writing event channel: %d\n", ret); 382 + return ret; 383 + } 384 + 385 + return 0; 386 + } 387 + 388 + int xen_snd_front_evtchnl_publish_all(struct xen_snd_front_info *front_info) 389 + { 390 + struct xen_front_cfg_card *cfg = &front_info->cfg; 391 + struct xenbus_transaction xbt; 392 + int ret, d; 393 + 394 + again: 395 + ret = xenbus_transaction_start(&xbt); 396 + if (ret < 0) { 397 + xenbus_dev_fatal(front_info->xb_dev, ret, 398 + "starting transaction"); 399 + return ret; 400 + } 401 + 402 + for (d = 0; d < cfg->num_pcm_instances; d++) { 403 + struct xen_front_cfg_pcm_instance *pcm_instance; 404 + int s, index; 405 + 406 + pcm_instance = &cfg->pcm_instances[d]; 407 + 408 + for (s = 0; s < pcm_instance->num_streams_pb; s++) { 409 + index = pcm_instance->streams_pb[s].index; 410 + 411 + ret = evtchnl_publish(xbt, 412 + &front_info->evt_pairs[index].req, 413 + pcm_instance->streams_pb[s].xenstore_path, 414 + XENSND_FIELD_RING_REF, 415 + XENSND_FIELD_EVT_CHNL); 416 + if (ret < 0) 417 + goto fail; 418 + 419 + ret = evtchnl_publish(xbt, 420 + &front_info->evt_pairs[index].evt, 421 + pcm_instance->streams_pb[s].xenstore_path, 422 + XENSND_FIELD_EVT_RING_REF, 423 + XENSND_FIELD_EVT_EVT_CHNL); 424 + if (ret < 0) 425 + goto fail; 426 + } 427 + 428 + for (s = 0; s < pcm_instance->num_streams_cap; s++) { 429 + index = pcm_instance->streams_cap[s].index; 430 + 431 + ret = evtchnl_publish(xbt, 432 + &front_info->evt_pairs[index].req, 433 + pcm_instance->streams_cap[s].xenstore_path, 434 + XENSND_FIELD_RING_REF, 435 + XENSND_FIELD_EVT_CHNL); 436 + if (ret < 0) 437 + goto fail; 438 + 439 + ret = evtchnl_publish(xbt, 440 + &front_info->evt_pairs[index].evt, 441 + pcm_instance->streams_cap[s].xenstore_path, 442 + XENSND_FIELD_EVT_RING_REF, 443 + XENSND_FIELD_EVT_EVT_CHNL); 444 + if (ret < 0) 445 + goto fail; 446 + } 447 + } 448 + ret = xenbus_transaction_end(xbt, 0); 449 + if (ret < 0) { 450 + if (ret == -EAGAIN) 451 + goto again; 452 + 453 + xenbus_dev_fatal(front_info->xb_dev, ret, 454 + "completing transaction"); 455 + goto fail_to_end; 456 + } 457 + return 0; 458 + fail: 459 + xenbus_transaction_end(xbt, 1); 460 + fail_to_end: 461 + xenbus_dev_fatal(front_info->xb_dev, ret, "writing XenStore"); 462 + return ret; 463 + } 464 + 465 + void xen_snd_front_evtchnl_pair_set_connected(struct xen_snd_front_evtchnl_pair *evt_pair, 466 + bool is_connected) 467 + { 468 + enum xen_snd_front_evtchnl_state state; 469 + 470 + if (is_connected) 471 + state = EVTCHNL_STATE_CONNECTED; 472 + else 473 + state = EVTCHNL_STATE_DISCONNECTED; 474 + 475 + mutex_lock(&evt_pair->req.ring_io_lock); 476 + evt_pair->req.state = state; 477 + mutex_unlock(&evt_pair->req.ring_io_lock); 478 + 479 + mutex_lock(&evt_pair->evt.ring_io_lock); 480 + evt_pair->evt.state = state; 481 + mutex_unlock(&evt_pair->evt.ring_io_lock); 482 + } 483 + 484 + void xen_snd_front_evtchnl_pair_clear(struct xen_snd_front_evtchnl_pair *evt_pair) 485 + { 486 + mutex_lock(&evt_pair->req.ring_io_lock); 487 + evt_pair->req.evt_next_id = 0; 488 + mutex_unlock(&evt_pair->req.ring_io_lock); 489 + 490 + mutex_lock(&evt_pair->evt.ring_io_lock); 491 + evt_pair->evt.evt_next_id = 0; 492 + mutex_unlock(&evt_pair->evt.ring_io_lock); 493 + } 494 +
+95
sound/xen/xen_snd_front_evtchnl.h
··· 1 + /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 + 3 + /* 4 + * Xen para-virtual sound device 5 + * 6 + * Copyright (C) 2016-2018 EPAM Systems Inc. 7 + * 8 + * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com> 9 + */ 10 + 11 + #ifndef __XEN_SND_FRONT_EVTCHNL_H 12 + #define __XEN_SND_FRONT_EVTCHNL_H 13 + 14 + #include <xen/interface/io/sndif.h> 15 + 16 + struct xen_snd_front_info; 17 + 18 + #ifndef GRANT_INVALID_REF 19 + /* 20 + * FIXME: usage of grant reference 0 as invalid grant reference: 21 + * grant reference 0 is valid, but never exposed to a PV driver, 22 + * because of the fact it is already in use/reserved by the PV console. 23 + */ 24 + #define GRANT_INVALID_REF 0 25 + #endif 26 + 27 + /* Timeout in ms to wait for backend to respond. */ 28 + #define VSND_WAIT_BACK_MS 3000 29 + 30 + enum xen_snd_front_evtchnl_state { 31 + EVTCHNL_STATE_DISCONNECTED, 32 + EVTCHNL_STATE_CONNECTED, 33 + }; 34 + 35 + enum xen_snd_front_evtchnl_type { 36 + EVTCHNL_TYPE_REQ, 37 + EVTCHNL_TYPE_EVT, 38 + }; 39 + 40 + struct xen_snd_front_evtchnl { 41 + struct xen_snd_front_info *front_info; 42 + int gref; 43 + int port; 44 + int irq; 45 + int index; 46 + /* State of the event channel. */ 47 + enum xen_snd_front_evtchnl_state state; 48 + enum xen_snd_front_evtchnl_type type; 49 + /* Either response id or incoming event id. */ 50 + u16 evt_id; 51 + /* Next request id or next expected event id. */ 52 + u16 evt_next_id; 53 + /* Shared ring access lock. */ 54 + struct mutex ring_io_lock; 55 + union { 56 + struct { 57 + struct xen_sndif_front_ring ring; 58 + struct completion completion; 59 + /* Serializer for backend IO: request/response. */ 60 + struct mutex req_io_lock; 61 + 62 + /* Latest response status. */ 63 + int resp_status; 64 + union { 65 + struct xensnd_query_hw_param hw_param; 66 + } resp; 67 + } req; 68 + struct { 69 + struct xensnd_event_page *page; 70 + /* This is needed to handle XENSND_EVT_CUR_POS event. */ 71 + struct snd_pcm_substream *substream; 72 + } evt; 73 + } u; 74 + }; 75 + 76 + struct xen_snd_front_evtchnl_pair { 77 + struct xen_snd_front_evtchnl req; 78 + struct xen_snd_front_evtchnl evt; 79 + }; 80 + 81 + int xen_snd_front_evtchnl_create_all(struct xen_snd_front_info *front_info, 82 + int num_streams); 83 + 84 + void xen_snd_front_evtchnl_free_all(struct xen_snd_front_info *front_info); 85 + 86 + int xen_snd_front_evtchnl_publish_all(struct xen_snd_front_info *front_info); 87 + 88 + void xen_snd_front_evtchnl_flush(struct xen_snd_front_evtchnl *evtchnl); 89 + 90 + void xen_snd_front_evtchnl_pair_set_connected(struct xen_snd_front_evtchnl_pair *evt_pair, 91 + bool is_connected); 92 + 93 + void xen_snd_front_evtchnl_pair_clear(struct xen_snd_front_evtchnl_pair *evt_pair); 94 + 95 + #endif /* __XEN_SND_FRONT_EVTCHNL_H */