Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * (c) 2017 Stefano Stabellini <stefano@aporeto.com>
4 */
5
6#include <linux/inet.h>
7#include <linux/kthread.h>
8#include <linux/list.h>
9#include <linux/radix-tree.h>
10#include <linux/module.h>
11#include <linux/semaphore.h>
12#include <linux/wait.h>
13#include <net/sock.h>
14#include <net/inet_common.h>
15#include <net/inet_connection_sock.h>
16#include <net/request_sock.h>
17#include <trace/events/sock.h>
18
19#include <xen/events.h>
20#include <xen/grant_table.h>
21#include <xen/xen.h>
22#include <xen/xenbus.h>
23#include <xen/interface/io/pvcalls.h>
24
25#define PVCALLS_VERSIONS "1"
26#define MAX_RING_ORDER XENBUS_MAX_RING_GRANT_ORDER
27
28static struct pvcalls_back_global {
29 struct list_head frontends;
30 struct semaphore frontends_lock;
31} pvcalls_back_global;
32
33/*
34 * Per-frontend data structure. It contains pointers to the command
35 * ring, its event channel, a list of active sockets and a tree of
36 * passive sockets.
37 */
38struct pvcalls_fedata {
39 struct list_head list;
40 struct xenbus_device *dev;
41 struct xen_pvcalls_sring *sring;
42 struct xen_pvcalls_back_ring ring;
43 int irq;
44 struct list_head socket_mappings;
45 struct radix_tree_root socketpass_mappings;
46 struct semaphore socket_lock;
47};
48
49struct pvcalls_ioworker {
50 struct work_struct register_work;
51 struct workqueue_struct *wq;
52};
53
54struct sock_mapping {
55 struct list_head list;
56 struct pvcalls_fedata *fedata;
57 struct sockpass_mapping *sockpass;
58 struct socket *sock;
59 uint64_t id;
60 grant_ref_t ref;
61 struct pvcalls_data_intf *ring;
62 void *bytes;
63 struct pvcalls_data data;
64 uint32_t ring_order;
65 int irq;
66 atomic_t read;
67 atomic_t write;
68 atomic_t io;
69 atomic_t release;
70 atomic_t eoi;
71 void (*saved_data_ready)(struct sock *sk);
72 struct pvcalls_ioworker ioworker;
73};
74
75struct sockpass_mapping {
76 struct list_head list;
77 struct pvcalls_fedata *fedata;
78 struct socket *sock;
79 uint64_t id;
80 struct xen_pvcalls_request reqcopy;
81 spinlock_t copy_lock;
82 struct workqueue_struct *wq;
83 struct work_struct register_work;
84 void (*saved_data_ready)(struct sock *sk);
85};
86
87static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map);
88static int pvcalls_back_release_active(struct xenbus_device *dev,
89 struct pvcalls_fedata *fedata,
90 struct sock_mapping *map);
91
92static bool pvcalls_conn_back_read(void *opaque)
93{
94 struct sock_mapping *map = (struct sock_mapping *)opaque;
95 struct msghdr msg;
96 struct kvec vec[2];
97 RING_IDX cons, prod, size, wanted, array_size, masked_prod, masked_cons;
98 int32_t error;
99 struct pvcalls_data_intf *intf = map->ring;
100 struct pvcalls_data *data = &map->data;
101 unsigned long flags;
102 int ret;
103
104 array_size = XEN_FLEX_RING_SIZE(map->ring_order);
105 cons = intf->in_cons;
106 prod = intf->in_prod;
107 error = intf->in_error;
108 /* read the indexes first, then deal with the data */
109 virt_mb();
110
111 if (error)
112 return false;
113
114 size = pvcalls_queued(prod, cons, array_size);
115 if (size >= array_size)
116 return false;
117 spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags);
118 if (skb_queue_empty(&map->sock->sk->sk_receive_queue)) {
119 atomic_set(&map->read, 0);
120 spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock,
121 flags);
122 return true;
123 }
124 spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags);
125 wanted = array_size - size;
126 masked_prod = pvcalls_mask(prod, array_size);
127 masked_cons = pvcalls_mask(cons, array_size);
128
129 memset(&msg, 0, sizeof(msg));
130 if (masked_prod < masked_cons) {
131 vec[0].iov_base = data->in + masked_prod;
132 vec[0].iov_len = wanted;
133 iov_iter_kvec(&msg.msg_iter, ITER_DEST, vec, 1, wanted);
134 } else {
135 vec[0].iov_base = data->in + masked_prod;
136 vec[0].iov_len = array_size - masked_prod;
137 vec[1].iov_base = data->in;
138 vec[1].iov_len = wanted - vec[0].iov_len;
139 iov_iter_kvec(&msg.msg_iter, ITER_DEST, vec, 2, wanted);
140 }
141
142 atomic_set(&map->read, 0);
143 ret = inet_recvmsg(map->sock, &msg, wanted, MSG_DONTWAIT);
144 WARN_ON(ret > wanted);
145 if (ret == -EAGAIN) /* shouldn't happen */
146 return true;
147 if (!ret)
148 ret = -ENOTCONN;
149 spin_lock_irqsave(&map->sock->sk->sk_receive_queue.lock, flags);
150 if (ret > 0 && !skb_queue_empty(&map->sock->sk->sk_receive_queue))
151 atomic_inc(&map->read);
152 spin_unlock_irqrestore(&map->sock->sk->sk_receive_queue.lock, flags);
153
154 /* write the data, then modify the indexes */
155 virt_wmb();
156 if (ret < 0) {
157 atomic_set(&map->read, 0);
158 intf->in_error = ret;
159 } else
160 intf->in_prod = prod + ret;
161 /* update the indexes, then notify the other end */
162 virt_wmb();
163 notify_remote_via_irq(map->irq);
164
165 return true;
166}
167
168static bool pvcalls_conn_back_write(struct sock_mapping *map)
169{
170 struct pvcalls_data_intf *intf = map->ring;
171 struct pvcalls_data *data = &map->data;
172 struct msghdr msg;
173 struct kvec vec[2];
174 RING_IDX cons, prod, size, array_size;
175 int ret;
176
177 atomic_set(&map->write, 0);
178
179 cons = intf->out_cons;
180 prod = intf->out_prod;
181 /* read the indexes before dealing with the data */
182 virt_mb();
183
184 array_size = XEN_FLEX_RING_SIZE(map->ring_order);
185 size = pvcalls_queued(prod, cons, array_size);
186 if (size == 0)
187 return false;
188
189 memset(&msg, 0, sizeof(msg));
190 msg.msg_flags |= MSG_DONTWAIT;
191 if (pvcalls_mask(prod, array_size) > pvcalls_mask(cons, array_size)) {
192 vec[0].iov_base = data->out + pvcalls_mask(cons, array_size);
193 vec[0].iov_len = size;
194 iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, vec, 1, size);
195 } else {
196 vec[0].iov_base = data->out + pvcalls_mask(cons, array_size);
197 vec[0].iov_len = array_size - pvcalls_mask(cons, array_size);
198 vec[1].iov_base = data->out;
199 vec[1].iov_len = size - vec[0].iov_len;
200 iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, vec, 2, size);
201 }
202
203 ret = inet_sendmsg(map->sock, &msg, size);
204 if (ret == -EAGAIN) {
205 atomic_inc(&map->write);
206 atomic_inc(&map->io);
207 return true;
208 }
209
210 /* write the data, then update the indexes */
211 virt_wmb();
212 if (ret < 0) {
213 intf->out_error = ret;
214 } else {
215 intf->out_error = 0;
216 intf->out_cons = cons + ret;
217 prod = intf->out_prod;
218 }
219 /* update the indexes, then notify the other end */
220 virt_wmb();
221 if (prod != cons + ret) {
222 atomic_inc(&map->write);
223 atomic_inc(&map->io);
224 }
225 notify_remote_via_irq(map->irq);
226
227 return true;
228}
229
230static void pvcalls_back_ioworker(struct work_struct *work)
231{
232 struct pvcalls_ioworker *ioworker = container_of(work,
233 struct pvcalls_ioworker, register_work);
234 struct sock_mapping *map = container_of(ioworker, struct sock_mapping,
235 ioworker);
236 unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
237
238 while (atomic_read(&map->io) > 0) {
239 if (atomic_read(&map->release) > 0) {
240 atomic_set(&map->release, 0);
241 return;
242 }
243
244 if (atomic_read(&map->read) > 0 &&
245 pvcalls_conn_back_read(map))
246 eoi_flags = 0;
247 if (atomic_read(&map->write) > 0 &&
248 pvcalls_conn_back_write(map))
249 eoi_flags = 0;
250
251 if (atomic_read(&map->eoi) > 0 && !atomic_read(&map->write)) {
252 atomic_set(&map->eoi, 0);
253 xen_irq_lateeoi(map->irq, eoi_flags);
254 eoi_flags = XEN_EOI_FLAG_SPURIOUS;
255 }
256
257 atomic_dec(&map->io);
258 }
259}
260
261static int pvcalls_back_socket(struct xenbus_device *dev,
262 struct xen_pvcalls_request *req)
263{
264 struct pvcalls_fedata *fedata;
265 int ret;
266 struct xen_pvcalls_response *rsp;
267
268 fedata = dev_get_drvdata(&dev->dev);
269
270 if (req->u.socket.domain != AF_INET ||
271 req->u.socket.type != SOCK_STREAM ||
272 (req->u.socket.protocol != IPPROTO_IP &&
273 req->u.socket.protocol != AF_INET))
274 ret = -EAFNOSUPPORT;
275 else
276 ret = 0;
277
278 /* leave the actual socket allocation for later */
279
280 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
281 rsp->req_id = req->req_id;
282 rsp->cmd = req->cmd;
283 rsp->u.socket.id = req->u.socket.id;
284 rsp->ret = ret;
285
286 return 0;
287}
288
289static void pvcalls_sk_state_change(struct sock *sock)
290{
291 struct sock_mapping *map = sock->sk_user_data;
292
293 if (map == NULL)
294 return;
295
296 atomic_inc(&map->read);
297 notify_remote_via_irq(map->irq);
298}
299
300static void pvcalls_sk_data_ready(struct sock *sock)
301{
302 struct sock_mapping *map = sock->sk_user_data;
303 struct pvcalls_ioworker *iow;
304
305 trace_sk_data_ready(sock);
306
307 if (map == NULL)
308 return;
309
310 iow = &map->ioworker;
311 atomic_inc(&map->read);
312 atomic_inc(&map->io);
313 queue_work(iow->wq, &iow->register_work);
314}
315
316static struct sock_mapping *pvcalls_new_active_socket(
317 struct pvcalls_fedata *fedata,
318 uint64_t id,
319 grant_ref_t ref,
320 evtchn_port_t evtchn,
321 struct socket *sock)
322{
323 int ret;
324 struct sock_mapping *map;
325 void *page;
326
327 map = kzalloc(sizeof(*map), GFP_KERNEL);
328 if (map == NULL)
329 return NULL;
330
331 map->fedata = fedata;
332 map->sock = sock;
333 map->id = id;
334 map->ref = ref;
335
336 ret = xenbus_map_ring_valloc(fedata->dev, &ref, 1, &page);
337 if (ret < 0)
338 goto out;
339 map->ring = page;
340 map->ring_order = map->ring->ring_order;
341 /* first read the order, then map the data ring */
342 virt_rmb();
343 if (map->ring_order > MAX_RING_ORDER) {
344 pr_warn("%s frontend requested ring_order %u, which is > MAX (%u)\n",
345 __func__, map->ring_order, MAX_RING_ORDER);
346 goto out;
347 }
348 ret = xenbus_map_ring_valloc(fedata->dev, map->ring->ref,
349 (1 << map->ring_order), &page);
350 if (ret < 0)
351 goto out;
352 map->bytes = page;
353
354 ret = bind_interdomain_evtchn_to_irqhandler_lateeoi(
355 fedata->dev, evtchn,
356 pvcalls_back_conn_event, 0, "pvcalls-backend", map);
357 if (ret < 0)
358 goto out;
359 map->irq = ret;
360
361 map->data.in = map->bytes;
362 map->data.out = map->bytes + XEN_FLEX_RING_SIZE(map->ring_order);
363
364 map->ioworker.wq = alloc_workqueue("pvcalls_io", WQ_UNBOUND, 1);
365 if (!map->ioworker.wq)
366 goto out;
367 atomic_set(&map->io, 1);
368 INIT_WORK(&map->ioworker.register_work, pvcalls_back_ioworker);
369
370 down(&fedata->socket_lock);
371 list_add_tail(&map->list, &fedata->socket_mappings);
372 up(&fedata->socket_lock);
373
374 write_lock_bh(&map->sock->sk->sk_callback_lock);
375 map->saved_data_ready = map->sock->sk->sk_data_ready;
376 map->sock->sk->sk_user_data = map;
377 map->sock->sk->sk_data_ready = pvcalls_sk_data_ready;
378 map->sock->sk->sk_state_change = pvcalls_sk_state_change;
379 write_unlock_bh(&map->sock->sk->sk_callback_lock);
380
381 return map;
382out:
383 down(&fedata->socket_lock);
384 list_del(&map->list);
385 pvcalls_back_release_active(fedata->dev, fedata, map);
386 up(&fedata->socket_lock);
387 return NULL;
388}
389
390static int pvcalls_back_connect(struct xenbus_device *dev,
391 struct xen_pvcalls_request *req)
392{
393 struct pvcalls_fedata *fedata;
394 int ret = -EINVAL;
395 struct socket *sock;
396 struct sock_mapping *map;
397 struct xen_pvcalls_response *rsp;
398 struct sockaddr *sa = (struct sockaddr *)&req->u.connect.addr;
399
400 fedata = dev_get_drvdata(&dev->dev);
401
402 if (req->u.connect.len < sizeof(sa->sa_family) ||
403 req->u.connect.len > sizeof(req->u.connect.addr) ||
404 sa->sa_family != AF_INET)
405 goto out;
406
407 ret = sock_create(AF_INET, SOCK_STREAM, 0, &sock);
408 if (ret < 0)
409 goto out;
410 ret = inet_stream_connect(sock, sa, req->u.connect.len, 0);
411 if (ret < 0) {
412 sock_release(sock);
413 goto out;
414 }
415
416 map = pvcalls_new_active_socket(fedata,
417 req->u.connect.id,
418 req->u.connect.ref,
419 req->u.connect.evtchn,
420 sock);
421 if (!map) {
422 ret = -EFAULT;
423 sock_release(sock);
424 }
425
426out:
427 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
428 rsp->req_id = req->req_id;
429 rsp->cmd = req->cmd;
430 rsp->u.connect.id = req->u.connect.id;
431 rsp->ret = ret;
432
433 return 0;
434}
435
436static int pvcalls_back_release_active(struct xenbus_device *dev,
437 struct pvcalls_fedata *fedata,
438 struct sock_mapping *map)
439{
440 disable_irq(map->irq);
441 if (map->sock->sk != NULL) {
442 write_lock_bh(&map->sock->sk->sk_callback_lock);
443 map->sock->sk->sk_user_data = NULL;
444 map->sock->sk->sk_data_ready = map->saved_data_ready;
445 write_unlock_bh(&map->sock->sk->sk_callback_lock);
446 }
447
448 atomic_set(&map->release, 1);
449 flush_work(&map->ioworker.register_work);
450
451 xenbus_unmap_ring_vfree(dev, map->bytes);
452 xenbus_unmap_ring_vfree(dev, (void *)map->ring);
453 unbind_from_irqhandler(map->irq, map);
454
455 sock_release(map->sock);
456 kfree(map);
457
458 return 0;
459}
460
461static int pvcalls_back_release_passive(struct xenbus_device *dev,
462 struct pvcalls_fedata *fedata,
463 struct sockpass_mapping *mappass)
464{
465 if (mappass->sock->sk != NULL) {
466 write_lock_bh(&mappass->sock->sk->sk_callback_lock);
467 mappass->sock->sk->sk_user_data = NULL;
468 mappass->sock->sk->sk_data_ready = mappass->saved_data_ready;
469 write_unlock_bh(&mappass->sock->sk->sk_callback_lock);
470 }
471 sock_release(mappass->sock);
472 destroy_workqueue(mappass->wq);
473 kfree(mappass);
474
475 return 0;
476}
477
478static int pvcalls_back_release(struct xenbus_device *dev,
479 struct xen_pvcalls_request *req)
480{
481 struct pvcalls_fedata *fedata;
482 struct sock_mapping *map, *n;
483 struct sockpass_mapping *mappass;
484 int ret = 0;
485 struct xen_pvcalls_response *rsp;
486
487 fedata = dev_get_drvdata(&dev->dev);
488
489 down(&fedata->socket_lock);
490 list_for_each_entry_safe(map, n, &fedata->socket_mappings, list) {
491 if (map->id == req->u.release.id) {
492 list_del(&map->list);
493 up(&fedata->socket_lock);
494 ret = pvcalls_back_release_active(dev, fedata, map);
495 goto out;
496 }
497 }
498 mappass = radix_tree_lookup(&fedata->socketpass_mappings,
499 req->u.release.id);
500 if (mappass != NULL) {
501 radix_tree_delete(&fedata->socketpass_mappings, mappass->id);
502 up(&fedata->socket_lock);
503 ret = pvcalls_back_release_passive(dev, fedata, mappass);
504 } else
505 up(&fedata->socket_lock);
506
507out:
508 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
509 rsp->req_id = req->req_id;
510 rsp->u.release.id = req->u.release.id;
511 rsp->cmd = req->cmd;
512 rsp->ret = ret;
513 return 0;
514}
515
516static void __pvcalls_back_accept(struct work_struct *work)
517{
518 struct sockpass_mapping *mappass = container_of(
519 work, struct sockpass_mapping, register_work);
520 struct sock_mapping *map;
521 struct pvcalls_ioworker *iow;
522 struct pvcalls_fedata *fedata;
523 struct socket *sock;
524 struct xen_pvcalls_response *rsp;
525 struct xen_pvcalls_request *req;
526 int notify;
527 int ret = -EINVAL;
528 unsigned long flags;
529
530 fedata = mappass->fedata;
531 /*
532 * __pvcalls_back_accept can race against pvcalls_back_accept.
533 * We only need to check the value of "cmd" on read. It could be
534 * done atomically, but to simplify the code on the write side, we
535 * use a spinlock.
536 */
537 spin_lock_irqsave(&mappass->copy_lock, flags);
538 req = &mappass->reqcopy;
539 if (req->cmd != PVCALLS_ACCEPT) {
540 spin_unlock_irqrestore(&mappass->copy_lock, flags);
541 return;
542 }
543 spin_unlock_irqrestore(&mappass->copy_lock, flags);
544
545 sock = sock_alloc();
546 if (sock == NULL)
547 goto out_error;
548 sock->type = mappass->sock->type;
549 sock->ops = mappass->sock->ops;
550
551 ret = inet_accept(mappass->sock, sock, O_NONBLOCK, true);
552 if (ret == -EAGAIN) {
553 sock_release(sock);
554 return;
555 }
556
557 map = pvcalls_new_active_socket(fedata,
558 req->u.accept.id_new,
559 req->u.accept.ref,
560 req->u.accept.evtchn,
561 sock);
562 if (!map) {
563 ret = -EFAULT;
564 sock_release(sock);
565 goto out_error;
566 }
567
568 map->sockpass = mappass;
569 iow = &map->ioworker;
570 atomic_inc(&map->read);
571 atomic_inc(&map->io);
572 queue_work(iow->wq, &iow->register_work);
573
574out_error:
575 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
576 rsp->req_id = req->req_id;
577 rsp->cmd = req->cmd;
578 rsp->u.accept.id = req->u.accept.id;
579 rsp->ret = ret;
580 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&fedata->ring, notify);
581 if (notify)
582 notify_remote_via_irq(fedata->irq);
583
584 mappass->reqcopy.cmd = 0;
585}
586
587static void pvcalls_pass_sk_data_ready(struct sock *sock)
588{
589 struct sockpass_mapping *mappass = sock->sk_user_data;
590 struct pvcalls_fedata *fedata;
591 struct xen_pvcalls_response *rsp;
592 unsigned long flags;
593 int notify;
594
595 trace_sk_data_ready(sock);
596
597 if (mappass == NULL)
598 return;
599
600 fedata = mappass->fedata;
601 spin_lock_irqsave(&mappass->copy_lock, flags);
602 if (mappass->reqcopy.cmd == PVCALLS_POLL) {
603 rsp = RING_GET_RESPONSE(&fedata->ring,
604 fedata->ring.rsp_prod_pvt++);
605 rsp->req_id = mappass->reqcopy.req_id;
606 rsp->u.poll.id = mappass->reqcopy.u.poll.id;
607 rsp->cmd = mappass->reqcopy.cmd;
608 rsp->ret = 0;
609
610 mappass->reqcopy.cmd = 0;
611 spin_unlock_irqrestore(&mappass->copy_lock, flags);
612
613 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&fedata->ring, notify);
614 if (notify)
615 notify_remote_via_irq(mappass->fedata->irq);
616 } else {
617 spin_unlock_irqrestore(&mappass->copy_lock, flags);
618 queue_work(mappass->wq, &mappass->register_work);
619 }
620}
621
622static int pvcalls_back_bind(struct xenbus_device *dev,
623 struct xen_pvcalls_request *req)
624{
625 struct pvcalls_fedata *fedata;
626 int ret;
627 struct sockpass_mapping *map;
628 struct xen_pvcalls_response *rsp;
629
630 fedata = dev_get_drvdata(&dev->dev);
631
632 map = kzalloc(sizeof(*map), GFP_KERNEL);
633 if (map == NULL) {
634 ret = -ENOMEM;
635 goto out;
636 }
637
638 INIT_WORK(&map->register_work, __pvcalls_back_accept);
639 spin_lock_init(&map->copy_lock);
640 map->wq = alloc_workqueue("pvcalls_wq", WQ_UNBOUND, 1);
641 if (!map->wq) {
642 ret = -ENOMEM;
643 goto out;
644 }
645
646 ret = sock_create(AF_INET, SOCK_STREAM, 0, &map->sock);
647 if (ret < 0)
648 goto out;
649
650 ret = inet_bind(map->sock, (struct sockaddr *)&req->u.bind.addr,
651 req->u.bind.len);
652 if (ret < 0)
653 goto out;
654
655 map->fedata = fedata;
656 map->id = req->u.bind.id;
657
658 down(&fedata->socket_lock);
659 ret = radix_tree_insert(&fedata->socketpass_mappings, map->id,
660 map);
661 up(&fedata->socket_lock);
662 if (ret)
663 goto out;
664
665 write_lock_bh(&map->sock->sk->sk_callback_lock);
666 map->saved_data_ready = map->sock->sk->sk_data_ready;
667 map->sock->sk->sk_user_data = map;
668 map->sock->sk->sk_data_ready = pvcalls_pass_sk_data_ready;
669 write_unlock_bh(&map->sock->sk->sk_callback_lock);
670
671out:
672 if (ret) {
673 if (map && map->sock)
674 sock_release(map->sock);
675 if (map && map->wq)
676 destroy_workqueue(map->wq);
677 kfree(map);
678 }
679 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
680 rsp->req_id = req->req_id;
681 rsp->cmd = req->cmd;
682 rsp->u.bind.id = req->u.bind.id;
683 rsp->ret = ret;
684 return 0;
685}
686
687static int pvcalls_back_listen(struct xenbus_device *dev,
688 struct xen_pvcalls_request *req)
689{
690 struct pvcalls_fedata *fedata;
691 int ret = -EINVAL;
692 struct sockpass_mapping *map;
693 struct xen_pvcalls_response *rsp;
694
695 fedata = dev_get_drvdata(&dev->dev);
696
697 down(&fedata->socket_lock);
698 map = radix_tree_lookup(&fedata->socketpass_mappings, req->u.listen.id);
699 up(&fedata->socket_lock);
700 if (map == NULL)
701 goto out;
702
703 ret = inet_listen(map->sock, req->u.listen.backlog);
704
705out:
706 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
707 rsp->req_id = req->req_id;
708 rsp->cmd = req->cmd;
709 rsp->u.listen.id = req->u.listen.id;
710 rsp->ret = ret;
711 return 0;
712}
713
714static int pvcalls_back_accept(struct xenbus_device *dev,
715 struct xen_pvcalls_request *req)
716{
717 struct pvcalls_fedata *fedata;
718 struct sockpass_mapping *mappass;
719 int ret = -EINVAL;
720 struct xen_pvcalls_response *rsp;
721 unsigned long flags;
722
723 fedata = dev_get_drvdata(&dev->dev);
724
725 down(&fedata->socket_lock);
726 mappass = radix_tree_lookup(&fedata->socketpass_mappings,
727 req->u.accept.id);
728 up(&fedata->socket_lock);
729 if (mappass == NULL)
730 goto out_error;
731
732 /*
733 * Limitation of the current implementation: only support one
734 * concurrent accept or poll call on one socket.
735 */
736 spin_lock_irqsave(&mappass->copy_lock, flags);
737 if (mappass->reqcopy.cmd != 0) {
738 spin_unlock_irqrestore(&mappass->copy_lock, flags);
739 ret = -EINTR;
740 goto out_error;
741 }
742
743 mappass->reqcopy = *req;
744 spin_unlock_irqrestore(&mappass->copy_lock, flags);
745 queue_work(mappass->wq, &mappass->register_work);
746
747 /* Tell the caller we don't need to send back a notification yet */
748 return -1;
749
750out_error:
751 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
752 rsp->req_id = req->req_id;
753 rsp->cmd = req->cmd;
754 rsp->u.accept.id = req->u.accept.id;
755 rsp->ret = ret;
756 return 0;
757}
758
759static int pvcalls_back_poll(struct xenbus_device *dev,
760 struct xen_pvcalls_request *req)
761{
762 struct pvcalls_fedata *fedata;
763 struct sockpass_mapping *mappass;
764 struct xen_pvcalls_response *rsp;
765 struct inet_connection_sock *icsk;
766 struct request_sock_queue *queue;
767 unsigned long flags;
768 int ret;
769 bool data;
770
771 fedata = dev_get_drvdata(&dev->dev);
772
773 down(&fedata->socket_lock);
774 mappass = radix_tree_lookup(&fedata->socketpass_mappings,
775 req->u.poll.id);
776 up(&fedata->socket_lock);
777 if (mappass == NULL)
778 return -EINVAL;
779
780 /*
781 * Limitation of the current implementation: only support one
782 * concurrent accept or poll call on one socket.
783 */
784 spin_lock_irqsave(&mappass->copy_lock, flags);
785 if (mappass->reqcopy.cmd != 0) {
786 ret = -EINTR;
787 goto out;
788 }
789
790 mappass->reqcopy = *req;
791 icsk = inet_csk(mappass->sock->sk);
792 queue = &icsk->icsk_accept_queue;
793 data = READ_ONCE(queue->rskq_accept_head) != NULL;
794 if (data) {
795 mappass->reqcopy.cmd = 0;
796 ret = 0;
797 goto out;
798 }
799 spin_unlock_irqrestore(&mappass->copy_lock, flags);
800
801 /* Tell the caller we don't need to send back a notification yet */
802 return -1;
803
804out:
805 spin_unlock_irqrestore(&mappass->copy_lock, flags);
806
807 rsp = RING_GET_RESPONSE(&fedata->ring, fedata->ring.rsp_prod_pvt++);
808 rsp->req_id = req->req_id;
809 rsp->cmd = req->cmd;
810 rsp->u.poll.id = req->u.poll.id;
811 rsp->ret = ret;
812 return 0;
813}
814
815static int pvcalls_back_handle_cmd(struct xenbus_device *dev,
816 struct xen_pvcalls_request *req)
817{
818 int ret = 0;
819
820 switch (req->cmd) {
821 case PVCALLS_SOCKET:
822 ret = pvcalls_back_socket(dev, req);
823 break;
824 case PVCALLS_CONNECT:
825 ret = pvcalls_back_connect(dev, req);
826 break;
827 case PVCALLS_RELEASE:
828 ret = pvcalls_back_release(dev, req);
829 break;
830 case PVCALLS_BIND:
831 ret = pvcalls_back_bind(dev, req);
832 break;
833 case PVCALLS_LISTEN:
834 ret = pvcalls_back_listen(dev, req);
835 break;
836 case PVCALLS_ACCEPT:
837 ret = pvcalls_back_accept(dev, req);
838 break;
839 case PVCALLS_POLL:
840 ret = pvcalls_back_poll(dev, req);
841 break;
842 default:
843 {
844 struct pvcalls_fedata *fedata;
845 struct xen_pvcalls_response *rsp;
846
847 fedata = dev_get_drvdata(&dev->dev);
848 rsp = RING_GET_RESPONSE(
849 &fedata->ring, fedata->ring.rsp_prod_pvt++);
850 rsp->req_id = req->req_id;
851 rsp->cmd = req->cmd;
852 rsp->ret = -ENOTSUPP;
853 break;
854 }
855 }
856 return ret;
857}
858
859static void pvcalls_back_work(struct pvcalls_fedata *fedata)
860{
861 int notify, notify_all = 0, more = 1;
862 struct xen_pvcalls_request req;
863 struct xenbus_device *dev = fedata->dev;
864
865 while (more) {
866 while (RING_HAS_UNCONSUMED_REQUESTS(&fedata->ring)) {
867 RING_COPY_REQUEST(&fedata->ring,
868 fedata->ring.req_cons++,
869 &req);
870
871 if (!pvcalls_back_handle_cmd(dev, &req)) {
872 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(
873 &fedata->ring, notify);
874 notify_all += notify;
875 }
876 }
877
878 if (notify_all) {
879 notify_remote_via_irq(fedata->irq);
880 notify_all = 0;
881 }
882
883 RING_FINAL_CHECK_FOR_REQUESTS(&fedata->ring, more);
884 }
885}
886
887static irqreturn_t pvcalls_back_event(int irq, void *dev_id)
888{
889 struct xenbus_device *dev = dev_id;
890 struct pvcalls_fedata *fedata = NULL;
891 unsigned int eoi_flags = XEN_EOI_FLAG_SPURIOUS;
892
893 if (dev) {
894 fedata = dev_get_drvdata(&dev->dev);
895 if (fedata) {
896 pvcalls_back_work(fedata);
897 eoi_flags = 0;
898 }
899 }
900
901 xen_irq_lateeoi(irq, eoi_flags);
902
903 return IRQ_HANDLED;
904}
905
906static irqreturn_t pvcalls_back_conn_event(int irq, void *sock_map)
907{
908 struct sock_mapping *map = sock_map;
909 struct pvcalls_ioworker *iow;
910
911 if (map == NULL || map->sock == NULL || map->sock->sk == NULL ||
912 map->sock->sk->sk_user_data != map) {
913 xen_irq_lateeoi(irq, 0);
914 return IRQ_HANDLED;
915 }
916
917 iow = &map->ioworker;
918
919 atomic_inc(&map->write);
920 atomic_inc(&map->eoi);
921 atomic_inc(&map->io);
922 queue_work(iow->wq, &iow->register_work);
923
924 return IRQ_HANDLED;
925}
926
927static int backend_connect(struct xenbus_device *dev)
928{
929 int err;
930 evtchn_port_t evtchn;
931 grant_ref_t ring_ref;
932 struct pvcalls_fedata *fedata = NULL;
933
934 fedata = kzalloc(sizeof(struct pvcalls_fedata), GFP_KERNEL);
935 if (!fedata)
936 return -ENOMEM;
937
938 fedata->irq = -1;
939 err = xenbus_scanf(XBT_NIL, dev->otherend, "port", "%u",
940 &evtchn);
941 if (err != 1) {
942 err = -EINVAL;
943 xenbus_dev_fatal(dev, err, "reading %s/event-channel",
944 dev->otherend);
945 goto error;
946 }
947
948 err = xenbus_scanf(XBT_NIL, dev->otherend, "ring-ref", "%u", &ring_ref);
949 if (err != 1) {
950 err = -EINVAL;
951 xenbus_dev_fatal(dev, err, "reading %s/ring-ref",
952 dev->otherend);
953 goto error;
954 }
955
956 err = bind_interdomain_evtchn_to_irq_lateeoi(dev, evtchn);
957 if (err < 0)
958 goto error;
959 fedata->irq = err;
960
961 err = request_threaded_irq(fedata->irq, NULL, pvcalls_back_event,
962 IRQF_ONESHOT, "pvcalls-back", dev);
963 if (err < 0)
964 goto error;
965
966 err = xenbus_map_ring_valloc(dev, &ring_ref, 1,
967 (void **)&fedata->sring);
968 if (err < 0)
969 goto error;
970
971 BACK_RING_INIT(&fedata->ring, fedata->sring, XEN_PAGE_SIZE * 1);
972 fedata->dev = dev;
973
974 INIT_LIST_HEAD(&fedata->socket_mappings);
975 INIT_RADIX_TREE(&fedata->socketpass_mappings, GFP_KERNEL);
976 sema_init(&fedata->socket_lock, 1);
977 dev_set_drvdata(&dev->dev, fedata);
978
979 down(&pvcalls_back_global.frontends_lock);
980 list_add_tail(&fedata->list, &pvcalls_back_global.frontends);
981 up(&pvcalls_back_global.frontends_lock);
982
983 return 0;
984
985 error:
986 if (fedata->irq >= 0)
987 unbind_from_irqhandler(fedata->irq, dev);
988 if (fedata->sring != NULL)
989 xenbus_unmap_ring_vfree(dev, fedata->sring);
990 kfree(fedata);
991 return err;
992}
993
994static int backend_disconnect(struct xenbus_device *dev)
995{
996 struct pvcalls_fedata *fedata;
997 struct sock_mapping *map, *n;
998 struct sockpass_mapping *mappass;
999 struct radix_tree_iter iter;
1000 void **slot;
1001
1002
1003 fedata = dev_get_drvdata(&dev->dev);
1004
1005 down(&fedata->socket_lock);
1006 list_for_each_entry_safe(map, n, &fedata->socket_mappings, list) {
1007 list_del(&map->list);
1008 pvcalls_back_release_active(dev, fedata, map);
1009 }
1010
1011 radix_tree_for_each_slot(slot, &fedata->socketpass_mappings, &iter, 0) {
1012 mappass = radix_tree_deref_slot(slot);
1013 if (!mappass)
1014 continue;
1015 if (radix_tree_exception(mappass)) {
1016 if (radix_tree_deref_retry(mappass))
1017 slot = radix_tree_iter_retry(&iter);
1018 } else {
1019 radix_tree_delete(&fedata->socketpass_mappings,
1020 mappass->id);
1021 pvcalls_back_release_passive(dev, fedata, mappass);
1022 }
1023 }
1024 up(&fedata->socket_lock);
1025
1026 unbind_from_irqhandler(fedata->irq, dev);
1027 xenbus_unmap_ring_vfree(dev, fedata->sring);
1028
1029 list_del(&fedata->list);
1030 kfree(fedata);
1031 dev_set_drvdata(&dev->dev, NULL);
1032
1033 return 0;
1034}
1035
1036static int pvcalls_back_probe(struct xenbus_device *dev,
1037 const struct xenbus_device_id *id)
1038{
1039 int err, abort;
1040 struct xenbus_transaction xbt;
1041
1042again:
1043 abort = 1;
1044
1045 err = xenbus_transaction_start(&xbt);
1046 if (err) {
1047 pr_warn("%s cannot create xenstore transaction\n", __func__);
1048 return err;
1049 }
1050
1051 err = xenbus_printf(xbt, dev->nodename, "versions", "%s",
1052 PVCALLS_VERSIONS);
1053 if (err) {
1054 pr_warn("%s write out 'versions' failed\n", __func__);
1055 goto abort;
1056 }
1057
1058 err = xenbus_printf(xbt, dev->nodename, "max-page-order", "%u",
1059 MAX_RING_ORDER);
1060 if (err) {
1061 pr_warn("%s write out 'max-page-order' failed\n", __func__);
1062 goto abort;
1063 }
1064
1065 err = xenbus_printf(xbt, dev->nodename, "function-calls",
1066 XENBUS_FUNCTIONS_CALLS);
1067 if (err) {
1068 pr_warn("%s write out 'function-calls' failed\n", __func__);
1069 goto abort;
1070 }
1071
1072 abort = 0;
1073abort:
1074 err = xenbus_transaction_end(xbt, abort);
1075 if (err) {
1076 if (err == -EAGAIN && !abort)
1077 goto again;
1078 pr_warn("%s cannot complete xenstore transaction\n", __func__);
1079 return err;
1080 }
1081
1082 if (abort)
1083 return -EFAULT;
1084
1085 xenbus_switch_state(dev, XenbusStateInitWait);
1086
1087 return 0;
1088}
1089
1090static void set_backend_state(struct xenbus_device *dev,
1091 enum xenbus_state state)
1092{
1093 while (dev->state != state) {
1094 switch (dev->state) {
1095 case XenbusStateClosed:
1096 switch (state) {
1097 case XenbusStateInitWait:
1098 case XenbusStateConnected:
1099 xenbus_switch_state(dev, XenbusStateInitWait);
1100 break;
1101 case XenbusStateClosing:
1102 xenbus_switch_state(dev, XenbusStateClosing);
1103 break;
1104 default:
1105 WARN_ON(1);
1106 }
1107 break;
1108 case XenbusStateInitWait:
1109 case XenbusStateInitialised:
1110 switch (state) {
1111 case XenbusStateConnected:
1112 if (backend_connect(dev))
1113 return;
1114 xenbus_switch_state(dev, XenbusStateConnected);
1115 break;
1116 case XenbusStateClosing:
1117 case XenbusStateClosed:
1118 xenbus_switch_state(dev, XenbusStateClosing);
1119 break;
1120 default:
1121 WARN_ON(1);
1122 }
1123 break;
1124 case XenbusStateConnected:
1125 switch (state) {
1126 case XenbusStateInitWait:
1127 case XenbusStateClosing:
1128 case XenbusStateClosed:
1129 down(&pvcalls_back_global.frontends_lock);
1130 backend_disconnect(dev);
1131 up(&pvcalls_back_global.frontends_lock);
1132 xenbus_switch_state(dev, XenbusStateClosing);
1133 break;
1134 default:
1135 WARN_ON(1);
1136 }
1137 break;
1138 case XenbusStateClosing:
1139 switch (state) {
1140 case XenbusStateInitWait:
1141 case XenbusStateConnected:
1142 case XenbusStateClosed:
1143 xenbus_switch_state(dev, XenbusStateClosed);
1144 break;
1145 default:
1146 WARN_ON(1);
1147 }
1148 break;
1149 default:
1150 WARN_ON(1);
1151 }
1152 }
1153}
1154
1155static void pvcalls_back_changed(struct xenbus_device *dev,
1156 enum xenbus_state frontend_state)
1157{
1158 switch (frontend_state) {
1159 case XenbusStateInitialising:
1160 set_backend_state(dev, XenbusStateInitWait);
1161 break;
1162
1163 case XenbusStateInitialised:
1164 case XenbusStateConnected:
1165 set_backend_state(dev, XenbusStateConnected);
1166 break;
1167
1168 case XenbusStateClosing:
1169 set_backend_state(dev, XenbusStateClosing);
1170 break;
1171
1172 case XenbusStateClosed:
1173 set_backend_state(dev, XenbusStateClosed);
1174 if (xenbus_dev_is_online(dev))
1175 break;
1176 device_unregister(&dev->dev);
1177 break;
1178 case XenbusStateUnknown:
1179 set_backend_state(dev, XenbusStateClosed);
1180 device_unregister(&dev->dev);
1181 break;
1182
1183 default:
1184 xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
1185 frontend_state);
1186 break;
1187 }
1188}
1189
1190static void pvcalls_back_remove(struct xenbus_device *dev)
1191{
1192}
1193
1194static int pvcalls_back_uevent(const struct xenbus_device *xdev,
1195 struct kobj_uevent_env *env)
1196{
1197 return 0;
1198}
1199
1200static const struct xenbus_device_id pvcalls_back_ids[] = {
1201 { "pvcalls" },
1202 { "" }
1203};
1204
1205static struct xenbus_driver pvcalls_back_driver = {
1206 .ids = pvcalls_back_ids,
1207 .probe = pvcalls_back_probe,
1208 .remove = pvcalls_back_remove,
1209 .uevent = pvcalls_back_uevent,
1210 .otherend_changed = pvcalls_back_changed,
1211};
1212
1213static int __init pvcalls_back_init(void)
1214{
1215 int ret;
1216
1217 if (!xen_domain())
1218 return -ENODEV;
1219
1220 ret = xenbus_register_backend(&pvcalls_back_driver);
1221 if (ret < 0)
1222 return ret;
1223
1224 sema_init(&pvcalls_back_global.frontends_lock, 1);
1225 INIT_LIST_HEAD(&pvcalls_back_global.frontends);
1226 return 0;
1227}
1228module_init(pvcalls_back_init);
1229
1230static void __exit pvcalls_back_fin(void)
1231{
1232 struct pvcalls_fedata *fedata, *nfedata;
1233
1234 down(&pvcalls_back_global.frontends_lock);
1235 list_for_each_entry_safe(fedata, nfedata,
1236 &pvcalls_back_global.frontends, list) {
1237 backend_disconnect(fedata->dev);
1238 }
1239 up(&pvcalls_back_global.frontends_lock);
1240
1241 xenbus_unregister_driver(&pvcalls_back_driver);
1242}
1243
1244module_exit(pvcalls_back_fin);
1245
1246MODULE_DESCRIPTION("Xen PV Calls backend driver");
1247MODULE_AUTHOR("Stefano Stabellini <sstabellini@kernel.org>");
1248MODULE_LICENSE("GPL");