Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * VMware VMCI Driver
3 *
4 * Copyright (C) 2012 VMware, Inc. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation version 2 and no later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * for more details.
14 */
15
16#include <linux/vmw_vmci_defs.h>
17#include <linux/vmw_vmci_api.h>
18#include <linux/highmem.h>
19#include <linux/kernel.h>
20#include <linux/mm.h>
21#include <linux/module.h>
22#include <linux/mutex.h>
23#include <linux/pagemap.h>
24#include <linux/pci.h>
25#include <linux/sched.h>
26#include <linux/slab.h>
27#include <linux/uio.h>
28#include <linux/wait.h>
29#include <linux/vmalloc.h>
30#include <linux/skbuff.h>
31
32#include "vmci_handle_array.h"
33#include "vmci_queue_pair.h"
34#include "vmci_datagram.h"
35#include "vmci_resource.h"
36#include "vmci_context.h"
37#include "vmci_driver.h"
38#include "vmci_event.h"
39#include "vmci_route.h"
40
41/*
42 * In the following, we will distinguish between two kinds of VMX processes -
43 * the ones with versions lower than VMCI_VERSION_NOVMVM that use specialized
44 * VMCI page files in the VMX and supporting VM to VM communication and the
45 * newer ones that use the guest memory directly. We will in the following
46 * refer to the older VMX versions as old-style VMX'en, and the newer ones as
47 * new-style VMX'en.
48 *
49 * The state transition datagram is as follows (the VMCIQPB_ prefix has been
50 * removed for readability) - see below for more details on the transtions:
51 *
52 * -------------- NEW -------------
53 * | |
54 * \_/ \_/
55 * CREATED_NO_MEM <-----------------> CREATED_MEM
56 * | | |
57 * | o-----------------------o |
58 * | | |
59 * \_/ \_/ \_/
60 * ATTACHED_NO_MEM <----------------> ATTACHED_MEM
61 * | | |
62 * | o----------------------o |
63 * | | |
64 * \_/ \_/ \_/
65 * SHUTDOWN_NO_MEM <----------------> SHUTDOWN_MEM
66 * | |
67 * | |
68 * -------------> gone <-------------
69 *
70 * In more detail. When a VMCI queue pair is first created, it will be in the
71 * VMCIQPB_NEW state. It will then move into one of the following states:
72 *
73 * - VMCIQPB_CREATED_NO_MEM: this state indicates that either:
74 *
75 * - the created was performed by a host endpoint, in which case there is
76 * no backing memory yet.
77 *
78 * - the create was initiated by an old-style VMX, that uses
79 * vmci_qp_broker_set_page_store to specify the UVAs of the queue pair at
80 * a later point in time. This state can be distinguished from the one
81 * above by the context ID of the creator. A host side is not allowed to
82 * attach until the page store has been set.
83 *
84 * - VMCIQPB_CREATED_MEM: this state is the result when the queue pair
85 * is created by a VMX using the queue pair device backend that
86 * sets the UVAs of the queue pair immediately and stores the
87 * information for later attachers. At this point, it is ready for
88 * the host side to attach to it.
89 *
90 * Once the queue pair is in one of the created states (with the exception of
91 * the case mentioned for older VMX'en above), it is possible to attach to the
92 * queue pair. Again we have two new states possible:
93 *
94 * - VMCIQPB_ATTACHED_MEM: this state can be reached through the following
95 * paths:
96 *
97 * - from VMCIQPB_CREATED_NO_MEM when a new-style VMX allocates a queue
98 * pair, and attaches to a queue pair previously created by the host side.
99 *
100 * - from VMCIQPB_CREATED_MEM when the host side attaches to a queue pair
101 * already created by a guest.
102 *
103 * - from VMCIQPB_ATTACHED_NO_MEM, when an old-style VMX calls
104 * vmci_qp_broker_set_page_store (see below).
105 *
106 * - VMCIQPB_ATTACHED_NO_MEM: If the queue pair already was in the
107 * VMCIQPB_CREATED_NO_MEM due to a host side create, an old-style VMX will
108 * bring the queue pair into this state. Once vmci_qp_broker_set_page_store
109 * is called to register the user memory, the VMCIQPB_ATTACH_MEM state
110 * will be entered.
111 *
112 * From the attached queue pair, the queue pair can enter the shutdown states
113 * when either side of the queue pair detaches. If the guest side detaches
114 * first, the queue pair will enter the VMCIQPB_SHUTDOWN_NO_MEM state, where
115 * the content of the queue pair will no longer be available. If the host
116 * side detaches first, the queue pair will either enter the
117 * VMCIQPB_SHUTDOWN_MEM, if the guest memory is currently mapped, or
118 * VMCIQPB_SHUTDOWN_NO_MEM, if the guest memory is not mapped
119 * (e.g., the host detaches while a guest is stunned).
120 *
121 * New-style VMX'en will also unmap guest memory, if the guest is
122 * quiesced, e.g., during a snapshot operation. In that case, the guest
123 * memory will no longer be available, and the queue pair will transition from
124 * *_MEM state to a *_NO_MEM state. The VMX may later map the memory once more,
125 * in which case the queue pair will transition from the *_NO_MEM state at that
126 * point back to the *_MEM state. Note that the *_NO_MEM state may have changed,
127 * since the peer may have either attached or detached in the meantime. The
128 * values are laid out such that ++ on a state will move from a *_NO_MEM to a
129 * *_MEM state, and vice versa.
130 */
131
132/* The Kernel specific component of the struct vmci_queue structure. */
133struct vmci_queue_kern_if {
134 struct mutex __mutex; /* Protects the queue. */
135 struct mutex *mutex; /* Shared by producer and consumer queues. */
136 size_t num_pages; /* Number of pages incl. header. */
137 bool host; /* Host or guest? */
138 union {
139 struct {
140 dma_addr_t *pas;
141 void **vas;
142 } g; /* Used by the guest. */
143 struct {
144 struct page **page;
145 struct page **header_page;
146 } h; /* Used by the host. */
147 } u;
148};
149
150/*
151 * This structure is opaque to the clients.
152 */
153struct vmci_qp {
154 struct vmci_handle handle;
155 struct vmci_queue *produce_q;
156 struct vmci_queue *consume_q;
157 u64 produce_q_size;
158 u64 consume_q_size;
159 u32 peer;
160 u32 flags;
161 u32 priv_flags;
162 bool guest_endpoint;
163 unsigned int blocked;
164 unsigned int generation;
165 wait_queue_head_t event;
166};
167
168enum qp_broker_state {
169 VMCIQPB_NEW,
170 VMCIQPB_CREATED_NO_MEM,
171 VMCIQPB_CREATED_MEM,
172 VMCIQPB_ATTACHED_NO_MEM,
173 VMCIQPB_ATTACHED_MEM,
174 VMCIQPB_SHUTDOWN_NO_MEM,
175 VMCIQPB_SHUTDOWN_MEM,
176 VMCIQPB_GONE
177};
178
179#define QPBROKERSTATE_HAS_MEM(_qpb) (_qpb->state == VMCIQPB_CREATED_MEM || \
180 _qpb->state == VMCIQPB_ATTACHED_MEM || \
181 _qpb->state == VMCIQPB_SHUTDOWN_MEM)
182
183/*
184 * In the queue pair broker, we always use the guest point of view for
185 * the produce and consume queue values and references, e.g., the
186 * produce queue size stored is the guests produce queue size. The
187 * host endpoint will need to swap these around. The only exception is
188 * the local queue pairs on the host, in which case the host endpoint
189 * that creates the queue pair will have the right orientation, and
190 * the attaching host endpoint will need to swap.
191 */
192struct qp_entry {
193 struct list_head list_item;
194 struct vmci_handle handle;
195 u32 peer;
196 u32 flags;
197 u64 produce_size;
198 u64 consume_size;
199 u32 ref_count;
200};
201
202struct qp_broker_entry {
203 struct vmci_resource resource;
204 struct qp_entry qp;
205 u32 create_id;
206 u32 attach_id;
207 enum qp_broker_state state;
208 bool require_trusted_attach;
209 bool created_by_trusted;
210 bool vmci_page_files; /* Created by VMX using VMCI page files */
211 struct vmci_queue *produce_q;
212 struct vmci_queue *consume_q;
213 struct vmci_queue_header saved_produce_q;
214 struct vmci_queue_header saved_consume_q;
215 vmci_event_release_cb wakeup_cb;
216 void *client_data;
217 void *local_mem; /* Kernel memory for local queue pair */
218};
219
220struct qp_guest_endpoint {
221 struct vmci_resource resource;
222 struct qp_entry qp;
223 u64 num_ppns;
224 void *produce_q;
225 void *consume_q;
226 struct ppn_set ppn_set;
227};
228
229struct qp_list {
230 struct list_head head;
231 struct mutex mutex; /* Protect queue list. */
232};
233
234static struct qp_list qp_broker_list = {
235 .head = LIST_HEAD_INIT(qp_broker_list.head),
236 .mutex = __MUTEX_INITIALIZER(qp_broker_list.mutex),
237};
238
239static struct qp_list qp_guest_endpoints = {
240 .head = LIST_HEAD_INIT(qp_guest_endpoints.head),
241 .mutex = __MUTEX_INITIALIZER(qp_guest_endpoints.mutex),
242};
243
244#define INVALID_VMCI_GUEST_MEM_ID 0
245#define QPE_NUM_PAGES(_QPE) ((u32) \
246 (DIV_ROUND_UP(_QPE.produce_size, PAGE_SIZE) + \
247 DIV_ROUND_UP(_QPE.consume_size, PAGE_SIZE) + 2))
248
249
250/*
251 * Frees kernel VA space for a given queue and its queue header, and
252 * frees physical data pages.
253 */
254static void qp_free_queue(void *q, u64 size)
255{
256 struct vmci_queue *queue = q;
257
258 if (queue) {
259 u64 i;
260
261 /* Given size does not include header, so add in a page here. */
262 for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE) + 1; i++) {
263 dma_free_coherent(&vmci_pdev->dev, PAGE_SIZE,
264 queue->kernel_if->u.g.vas[i],
265 queue->kernel_if->u.g.pas[i]);
266 }
267
268 vfree(queue);
269 }
270}
271
272/*
273 * Allocates kernel queue pages of specified size with IOMMU mappings,
274 * plus space for the queue structure/kernel interface and the queue
275 * header.
276 */
277static void *qp_alloc_queue(u64 size, u32 flags)
278{
279 u64 i;
280 struct vmci_queue *queue;
281 size_t pas_size;
282 size_t vas_size;
283 size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if);
284 u64 num_pages;
285
286 if (size > SIZE_MAX - PAGE_SIZE)
287 return NULL;
288 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
289 if (num_pages >
290 (SIZE_MAX - queue_size) /
291 (sizeof(*queue->kernel_if->u.g.pas) +
292 sizeof(*queue->kernel_if->u.g.vas)))
293 return NULL;
294
295 pas_size = num_pages * sizeof(*queue->kernel_if->u.g.pas);
296 vas_size = num_pages * sizeof(*queue->kernel_if->u.g.vas);
297 queue_size += pas_size + vas_size;
298
299 queue = vmalloc(queue_size);
300 if (!queue)
301 return NULL;
302
303 queue->q_header = NULL;
304 queue->saved_header = NULL;
305 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
306 queue->kernel_if->mutex = NULL;
307 queue->kernel_if->num_pages = num_pages;
308 queue->kernel_if->u.g.pas = (dma_addr_t *)(queue->kernel_if + 1);
309 queue->kernel_if->u.g.vas =
310 (void **)((u8 *)queue->kernel_if->u.g.pas + pas_size);
311 queue->kernel_if->host = false;
312
313 for (i = 0; i < num_pages; i++) {
314 queue->kernel_if->u.g.vas[i] =
315 dma_alloc_coherent(&vmci_pdev->dev, PAGE_SIZE,
316 &queue->kernel_if->u.g.pas[i],
317 GFP_KERNEL);
318 if (!queue->kernel_if->u.g.vas[i]) {
319 /* Size excl. the header. */
320 qp_free_queue(queue, i * PAGE_SIZE);
321 return NULL;
322 }
323 }
324
325 /* Queue header is the first page. */
326 queue->q_header = queue->kernel_if->u.g.vas[0];
327
328 return queue;
329}
330
331/*
332 * Copies from a given buffer or iovector to a VMCI Queue. Uses
333 * kmap()/kunmap() to dynamically map/unmap required portions of the queue
334 * by traversing the offset -> page translation structure for the queue.
335 * Assumes that offset + size does not wrap around in the queue.
336 */
337static int qp_memcpy_to_queue_iter(struct vmci_queue *queue,
338 u64 queue_offset,
339 struct iov_iter *from,
340 size_t size)
341{
342 struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
343 size_t bytes_copied = 0;
344
345 while (bytes_copied < size) {
346 const u64 page_index =
347 (queue_offset + bytes_copied) / PAGE_SIZE;
348 const size_t page_offset =
349 (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
350 void *va;
351 size_t to_copy;
352
353 if (kernel_if->host)
354 va = kmap(kernel_if->u.h.page[page_index]);
355 else
356 va = kernel_if->u.g.vas[page_index + 1];
357 /* Skip header. */
358
359 if (size - bytes_copied > PAGE_SIZE - page_offset)
360 /* Enough payload to fill up from this page. */
361 to_copy = PAGE_SIZE - page_offset;
362 else
363 to_copy = size - bytes_copied;
364
365 if (!copy_from_iter_full((u8 *)va + page_offset, to_copy,
366 from)) {
367 if (kernel_if->host)
368 kunmap(kernel_if->u.h.page[page_index]);
369 return VMCI_ERROR_INVALID_ARGS;
370 }
371 bytes_copied += to_copy;
372 if (kernel_if->host)
373 kunmap(kernel_if->u.h.page[page_index]);
374 }
375
376 return VMCI_SUCCESS;
377}
378
379/*
380 * Copies to a given buffer or iovector from a VMCI Queue. Uses
381 * kmap()/kunmap() to dynamically map/unmap required portions of the queue
382 * by traversing the offset -> page translation structure for the queue.
383 * Assumes that offset + size does not wrap around in the queue.
384 */
385static int qp_memcpy_from_queue_iter(struct iov_iter *to,
386 const struct vmci_queue *queue,
387 u64 queue_offset, size_t size)
388{
389 struct vmci_queue_kern_if *kernel_if = queue->kernel_if;
390 size_t bytes_copied = 0;
391
392 while (bytes_copied < size) {
393 const u64 page_index =
394 (queue_offset + bytes_copied) / PAGE_SIZE;
395 const size_t page_offset =
396 (queue_offset + bytes_copied) & (PAGE_SIZE - 1);
397 void *va;
398 size_t to_copy;
399 int err;
400
401 if (kernel_if->host)
402 va = kmap(kernel_if->u.h.page[page_index]);
403 else
404 va = kernel_if->u.g.vas[page_index + 1];
405 /* Skip header. */
406
407 if (size - bytes_copied > PAGE_SIZE - page_offset)
408 /* Enough payload to fill up this page. */
409 to_copy = PAGE_SIZE - page_offset;
410 else
411 to_copy = size - bytes_copied;
412
413 err = copy_to_iter((u8 *)va + page_offset, to_copy, to);
414 if (err != to_copy) {
415 if (kernel_if->host)
416 kunmap(kernel_if->u.h.page[page_index]);
417 return VMCI_ERROR_INVALID_ARGS;
418 }
419 bytes_copied += to_copy;
420 if (kernel_if->host)
421 kunmap(kernel_if->u.h.page[page_index]);
422 }
423
424 return VMCI_SUCCESS;
425}
426
427/*
428 * Allocates two list of PPNs --- one for the pages in the produce queue,
429 * and the other for the pages in the consume queue. Intializes the list
430 * of PPNs with the page frame numbers of the KVA for the two queues (and
431 * the queue headers).
432 */
433static int qp_alloc_ppn_set(void *prod_q,
434 u64 num_produce_pages,
435 void *cons_q,
436 u64 num_consume_pages, struct ppn_set *ppn_set)
437{
438 u64 *produce_ppns;
439 u64 *consume_ppns;
440 struct vmci_queue *produce_q = prod_q;
441 struct vmci_queue *consume_q = cons_q;
442 u64 i;
443
444 if (!produce_q || !num_produce_pages || !consume_q ||
445 !num_consume_pages || !ppn_set)
446 return VMCI_ERROR_INVALID_ARGS;
447
448 if (ppn_set->initialized)
449 return VMCI_ERROR_ALREADY_EXISTS;
450
451 produce_ppns =
452 kmalloc_array(num_produce_pages, sizeof(*produce_ppns),
453 GFP_KERNEL);
454 if (!produce_ppns)
455 return VMCI_ERROR_NO_MEM;
456
457 consume_ppns =
458 kmalloc_array(num_consume_pages, sizeof(*consume_ppns),
459 GFP_KERNEL);
460 if (!consume_ppns) {
461 kfree(produce_ppns);
462 return VMCI_ERROR_NO_MEM;
463 }
464
465 for (i = 0; i < num_produce_pages; i++)
466 produce_ppns[i] =
467 produce_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
468
469 for (i = 0; i < num_consume_pages; i++)
470 consume_ppns[i] =
471 consume_q->kernel_if->u.g.pas[i] >> PAGE_SHIFT;
472
473 ppn_set->num_produce_pages = num_produce_pages;
474 ppn_set->num_consume_pages = num_consume_pages;
475 ppn_set->produce_ppns = produce_ppns;
476 ppn_set->consume_ppns = consume_ppns;
477 ppn_set->initialized = true;
478 return VMCI_SUCCESS;
479}
480
481/*
482 * Frees the two list of PPNs for a queue pair.
483 */
484static void qp_free_ppn_set(struct ppn_set *ppn_set)
485{
486 if (ppn_set->initialized) {
487 /* Do not call these functions on NULL inputs. */
488 kfree(ppn_set->produce_ppns);
489 kfree(ppn_set->consume_ppns);
490 }
491 memset(ppn_set, 0, sizeof(*ppn_set));
492}
493
494/*
495 * Populates the list of PPNs in the hypercall structure with the PPNS
496 * of the produce queue and the consume queue.
497 */
498static int qp_populate_ppn_set(u8 *call_buf, const struct ppn_set *ppn_set)
499{
500 if (vmci_use_ppn64()) {
501 memcpy(call_buf, ppn_set->produce_ppns,
502 ppn_set->num_produce_pages *
503 sizeof(*ppn_set->produce_ppns));
504 memcpy(call_buf +
505 ppn_set->num_produce_pages *
506 sizeof(*ppn_set->produce_ppns),
507 ppn_set->consume_ppns,
508 ppn_set->num_consume_pages *
509 sizeof(*ppn_set->consume_ppns));
510 } else {
511 int i;
512 u32 *ppns = (u32 *) call_buf;
513
514 for (i = 0; i < ppn_set->num_produce_pages; i++)
515 ppns[i] = (u32) ppn_set->produce_ppns[i];
516
517 ppns = &ppns[ppn_set->num_produce_pages];
518
519 for (i = 0; i < ppn_set->num_consume_pages; i++)
520 ppns[i] = (u32) ppn_set->consume_ppns[i];
521 }
522
523 return VMCI_SUCCESS;
524}
525
526/*
527 * Allocates kernel VA space of specified size plus space for the queue
528 * and kernel interface. This is different from the guest queue allocator,
529 * because we do not allocate our own queue header/data pages here but
530 * share those of the guest.
531 */
532static struct vmci_queue *qp_host_alloc_queue(u64 size)
533{
534 struct vmci_queue *queue;
535 size_t queue_page_size;
536 u64 num_pages;
537 const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if));
538
539 if (size > SIZE_MAX - PAGE_SIZE)
540 return NULL;
541 num_pages = DIV_ROUND_UP(size, PAGE_SIZE) + 1;
542 if (num_pages > (SIZE_MAX - queue_size) /
543 sizeof(*queue->kernel_if->u.h.page))
544 return NULL;
545
546 queue_page_size = num_pages * sizeof(*queue->kernel_if->u.h.page);
547
548 queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL);
549 if (queue) {
550 queue->q_header = NULL;
551 queue->saved_header = NULL;
552 queue->kernel_if = (struct vmci_queue_kern_if *)(queue + 1);
553 queue->kernel_if->host = true;
554 queue->kernel_if->mutex = NULL;
555 queue->kernel_if->num_pages = num_pages;
556 queue->kernel_if->u.h.header_page =
557 (struct page **)((u8 *)queue + queue_size);
558 queue->kernel_if->u.h.page =
559 &queue->kernel_if->u.h.header_page[1];
560 }
561
562 return queue;
563}
564
565/*
566 * Frees kernel memory for a given queue (header plus translation
567 * structure).
568 */
569static void qp_host_free_queue(struct vmci_queue *queue, u64 queue_size)
570{
571 kfree(queue);
572}
573
574/*
575 * Initialize the mutex for the pair of queues. This mutex is used to
576 * protect the q_header and the buffer from changing out from under any
577 * users of either queue. Of course, it's only any good if the mutexes
578 * are actually acquired. Queue structure must lie on non-paged memory
579 * or we cannot guarantee access to the mutex.
580 */
581static void qp_init_queue_mutex(struct vmci_queue *produce_q,
582 struct vmci_queue *consume_q)
583{
584 /*
585 * Only the host queue has shared state - the guest queues do not
586 * need to synchronize access using a queue mutex.
587 */
588
589 if (produce_q->kernel_if->host) {
590 produce_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
591 consume_q->kernel_if->mutex = &produce_q->kernel_if->__mutex;
592 mutex_init(produce_q->kernel_if->mutex);
593 }
594}
595
596/*
597 * Cleans up the mutex for the pair of queues.
598 */
599static void qp_cleanup_queue_mutex(struct vmci_queue *produce_q,
600 struct vmci_queue *consume_q)
601{
602 if (produce_q->kernel_if->host) {
603 produce_q->kernel_if->mutex = NULL;
604 consume_q->kernel_if->mutex = NULL;
605 }
606}
607
608/*
609 * Acquire the mutex for the queue. Note that the produce_q and
610 * the consume_q share a mutex. So, only one of the two need to
611 * be passed in to this routine. Either will work just fine.
612 */
613static void qp_acquire_queue_mutex(struct vmci_queue *queue)
614{
615 if (queue->kernel_if->host)
616 mutex_lock(queue->kernel_if->mutex);
617}
618
619/*
620 * Release the mutex for the queue. Note that the produce_q and
621 * the consume_q share a mutex. So, only one of the two need to
622 * be passed in to this routine. Either will work just fine.
623 */
624static void qp_release_queue_mutex(struct vmci_queue *queue)
625{
626 if (queue->kernel_if->host)
627 mutex_unlock(queue->kernel_if->mutex);
628}
629
630/*
631 * Helper function to release pages in the PageStoreAttachInfo
632 * previously obtained using get_user_pages.
633 */
634static void qp_release_pages(struct page **pages,
635 u64 num_pages, bool dirty)
636{
637 int i;
638
639 for (i = 0; i < num_pages; i++) {
640 if (dirty)
641 set_page_dirty(pages[i]);
642
643 put_page(pages[i]);
644 pages[i] = NULL;
645 }
646}
647
648/*
649 * Lock the user pages referenced by the {produce,consume}Buffer
650 * struct into memory and populate the {produce,consume}Pages
651 * arrays in the attach structure with them.
652 */
653static int qp_host_get_user_memory(u64 produce_uva,
654 u64 consume_uva,
655 struct vmci_queue *produce_q,
656 struct vmci_queue *consume_q)
657{
658 int retval;
659 int err = VMCI_SUCCESS;
660
661 retval = get_user_pages_fast((uintptr_t) produce_uva,
662 produce_q->kernel_if->num_pages, 1,
663 produce_q->kernel_if->u.h.header_page);
664 if (retval < (int)produce_q->kernel_if->num_pages) {
665 pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
666 retval);
667 qp_release_pages(produce_q->kernel_if->u.h.header_page,
668 retval, false);
669 err = VMCI_ERROR_NO_MEM;
670 goto out;
671 }
672
673 retval = get_user_pages_fast((uintptr_t) consume_uva,
674 consume_q->kernel_if->num_pages, 1,
675 consume_q->kernel_if->u.h.header_page);
676 if (retval < (int)consume_q->kernel_if->num_pages) {
677 pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
678 retval);
679 qp_release_pages(consume_q->kernel_if->u.h.header_page,
680 retval, false);
681 qp_release_pages(produce_q->kernel_if->u.h.header_page,
682 produce_q->kernel_if->num_pages, false);
683 err = VMCI_ERROR_NO_MEM;
684 }
685
686 out:
687 return err;
688}
689
690/*
691 * Registers the specification of the user pages used for backing a queue
692 * pair. Enough information to map in pages is stored in the OS specific
693 * part of the struct vmci_queue structure.
694 */
695static int qp_host_register_user_memory(struct vmci_qp_page_store *page_store,
696 struct vmci_queue *produce_q,
697 struct vmci_queue *consume_q)
698{
699 u64 produce_uva;
700 u64 consume_uva;
701
702 /*
703 * The new style and the old style mapping only differs in
704 * that we either get a single or two UVAs, so we split the
705 * single UVA range at the appropriate spot.
706 */
707 produce_uva = page_store->pages;
708 consume_uva = page_store->pages +
709 produce_q->kernel_if->num_pages * PAGE_SIZE;
710 return qp_host_get_user_memory(produce_uva, consume_uva, produce_q,
711 consume_q);
712}
713
714/*
715 * Releases and removes the references to user pages stored in the attach
716 * struct. Pages are released from the page cache and may become
717 * swappable again.
718 */
719static void qp_host_unregister_user_memory(struct vmci_queue *produce_q,
720 struct vmci_queue *consume_q)
721{
722 qp_release_pages(produce_q->kernel_if->u.h.header_page,
723 produce_q->kernel_if->num_pages, true);
724 memset(produce_q->kernel_if->u.h.header_page, 0,
725 sizeof(*produce_q->kernel_if->u.h.header_page) *
726 produce_q->kernel_if->num_pages);
727 qp_release_pages(consume_q->kernel_if->u.h.header_page,
728 consume_q->kernel_if->num_pages, true);
729 memset(consume_q->kernel_if->u.h.header_page, 0,
730 sizeof(*consume_q->kernel_if->u.h.header_page) *
731 consume_q->kernel_if->num_pages);
732}
733
734/*
735 * Once qp_host_register_user_memory has been performed on a
736 * queue, the queue pair headers can be mapped into the
737 * kernel. Once mapped, they must be unmapped with
738 * qp_host_unmap_queues prior to calling
739 * qp_host_unregister_user_memory.
740 * Pages are pinned.
741 */
742static int qp_host_map_queues(struct vmci_queue *produce_q,
743 struct vmci_queue *consume_q)
744{
745 int result;
746
747 if (!produce_q->q_header || !consume_q->q_header) {
748 struct page *headers[2];
749
750 if (produce_q->q_header != consume_q->q_header)
751 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
752
753 if (produce_q->kernel_if->u.h.header_page == NULL ||
754 *produce_q->kernel_if->u.h.header_page == NULL)
755 return VMCI_ERROR_UNAVAILABLE;
756
757 headers[0] = *produce_q->kernel_if->u.h.header_page;
758 headers[1] = *consume_q->kernel_if->u.h.header_page;
759
760 produce_q->q_header = vmap(headers, 2, VM_MAP, PAGE_KERNEL);
761 if (produce_q->q_header != NULL) {
762 consume_q->q_header =
763 (struct vmci_queue_header *)((u8 *)
764 produce_q->q_header +
765 PAGE_SIZE);
766 result = VMCI_SUCCESS;
767 } else {
768 pr_warn("vmap failed\n");
769 result = VMCI_ERROR_NO_MEM;
770 }
771 } else {
772 result = VMCI_SUCCESS;
773 }
774
775 return result;
776}
777
778/*
779 * Unmaps previously mapped queue pair headers from the kernel.
780 * Pages are unpinned.
781 */
782static int qp_host_unmap_queues(u32 gid,
783 struct vmci_queue *produce_q,
784 struct vmci_queue *consume_q)
785{
786 if (produce_q->q_header) {
787 if (produce_q->q_header < consume_q->q_header)
788 vunmap(produce_q->q_header);
789 else
790 vunmap(consume_q->q_header);
791
792 produce_q->q_header = NULL;
793 consume_q->q_header = NULL;
794 }
795
796 return VMCI_SUCCESS;
797}
798
799/*
800 * Finds the entry in the list corresponding to a given handle. Assumes
801 * that the list is locked.
802 */
803static struct qp_entry *qp_list_find(struct qp_list *qp_list,
804 struct vmci_handle handle)
805{
806 struct qp_entry *entry;
807
808 if (vmci_handle_is_invalid(handle))
809 return NULL;
810
811 list_for_each_entry(entry, &qp_list->head, list_item) {
812 if (vmci_handle_is_equal(entry->handle, handle))
813 return entry;
814 }
815
816 return NULL;
817}
818
819/*
820 * Finds the entry in the list corresponding to a given handle.
821 */
822static struct qp_guest_endpoint *
823qp_guest_handle_to_entry(struct vmci_handle handle)
824{
825 struct qp_guest_endpoint *entry;
826 struct qp_entry *qp = qp_list_find(&qp_guest_endpoints, handle);
827
828 entry = qp ? container_of(
829 qp, struct qp_guest_endpoint, qp) : NULL;
830 return entry;
831}
832
833/*
834 * Finds the entry in the list corresponding to a given handle.
835 */
836static struct qp_broker_entry *
837qp_broker_handle_to_entry(struct vmci_handle handle)
838{
839 struct qp_broker_entry *entry;
840 struct qp_entry *qp = qp_list_find(&qp_broker_list, handle);
841
842 entry = qp ? container_of(
843 qp, struct qp_broker_entry, qp) : NULL;
844 return entry;
845}
846
847/*
848 * Dispatches a queue pair event message directly into the local event
849 * queue.
850 */
851static int qp_notify_peer_local(bool attach, struct vmci_handle handle)
852{
853 u32 context_id = vmci_get_context_id();
854 struct vmci_event_qp ev;
855
856 ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER);
857 ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
858 VMCI_CONTEXT_RESOURCE_ID);
859 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
860 ev.msg.event_data.event =
861 attach ? VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
862 ev.payload.peer_id = context_id;
863 ev.payload.handle = handle;
864
865 return vmci_event_dispatch(&ev.msg.hdr);
866}
867
868/*
869 * Allocates and initializes a qp_guest_endpoint structure.
870 * Allocates a queue_pair rid (and handle) iff the given entry has
871 * an invalid handle. 0 through VMCI_RESERVED_RESOURCE_ID_MAX
872 * are reserved handles. Assumes that the QP list mutex is held
873 * by the caller.
874 */
875static struct qp_guest_endpoint *
876qp_guest_endpoint_create(struct vmci_handle handle,
877 u32 peer,
878 u32 flags,
879 u64 produce_size,
880 u64 consume_size,
881 void *produce_q,
882 void *consume_q)
883{
884 int result;
885 struct qp_guest_endpoint *entry;
886 /* One page each for the queue headers. */
887 const u64 num_ppns = DIV_ROUND_UP(produce_size, PAGE_SIZE) +
888 DIV_ROUND_UP(consume_size, PAGE_SIZE) + 2;
889
890 if (vmci_handle_is_invalid(handle)) {
891 u32 context_id = vmci_get_context_id();
892
893 handle = vmci_make_handle(context_id, VMCI_INVALID_ID);
894 }
895
896 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
897 if (entry) {
898 entry->qp.peer = peer;
899 entry->qp.flags = flags;
900 entry->qp.produce_size = produce_size;
901 entry->qp.consume_size = consume_size;
902 entry->qp.ref_count = 0;
903 entry->num_ppns = num_ppns;
904 entry->produce_q = produce_q;
905 entry->consume_q = consume_q;
906 INIT_LIST_HEAD(&entry->qp.list_item);
907
908 /* Add resource obj */
909 result = vmci_resource_add(&entry->resource,
910 VMCI_RESOURCE_TYPE_QPAIR_GUEST,
911 handle);
912 entry->qp.handle = vmci_resource_handle(&entry->resource);
913 if ((result != VMCI_SUCCESS) ||
914 qp_list_find(&qp_guest_endpoints, entry->qp.handle)) {
915 pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
916 handle.context, handle.resource, result);
917 kfree(entry);
918 entry = NULL;
919 }
920 }
921 return entry;
922}
923
924/*
925 * Frees a qp_guest_endpoint structure.
926 */
927static void qp_guest_endpoint_destroy(struct qp_guest_endpoint *entry)
928{
929 qp_free_ppn_set(&entry->ppn_set);
930 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
931 qp_free_queue(entry->produce_q, entry->qp.produce_size);
932 qp_free_queue(entry->consume_q, entry->qp.consume_size);
933 /* Unlink from resource hash table and free callback */
934 vmci_resource_remove(&entry->resource);
935
936 kfree(entry);
937}
938
939/*
940 * Helper to make a queue_pairAlloc hypercall when the driver is
941 * supporting a guest device.
942 */
943static int qp_alloc_hypercall(const struct qp_guest_endpoint *entry)
944{
945 struct vmci_qp_alloc_msg *alloc_msg;
946 size_t msg_size;
947 size_t ppn_size;
948 int result;
949
950 if (!entry || entry->num_ppns <= 2)
951 return VMCI_ERROR_INVALID_ARGS;
952
953 ppn_size = vmci_use_ppn64() ? sizeof(u64) : sizeof(u32);
954 msg_size = sizeof(*alloc_msg) +
955 (size_t) entry->num_ppns * ppn_size;
956 alloc_msg = kmalloc(msg_size, GFP_KERNEL);
957 if (!alloc_msg)
958 return VMCI_ERROR_NO_MEM;
959
960 alloc_msg->hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
961 VMCI_QUEUEPAIR_ALLOC);
962 alloc_msg->hdr.src = VMCI_ANON_SRC_HANDLE;
963 alloc_msg->hdr.payload_size = msg_size - VMCI_DG_HEADERSIZE;
964 alloc_msg->handle = entry->qp.handle;
965 alloc_msg->peer = entry->qp.peer;
966 alloc_msg->flags = entry->qp.flags;
967 alloc_msg->produce_size = entry->qp.produce_size;
968 alloc_msg->consume_size = entry->qp.consume_size;
969 alloc_msg->num_ppns = entry->num_ppns;
970
971 result = qp_populate_ppn_set((u8 *)alloc_msg + sizeof(*alloc_msg),
972 &entry->ppn_set);
973 if (result == VMCI_SUCCESS)
974 result = vmci_send_datagram(&alloc_msg->hdr);
975
976 kfree(alloc_msg);
977
978 return result;
979}
980
981/*
982 * Helper to make a queue_pairDetach hypercall when the driver is
983 * supporting a guest device.
984 */
985static int qp_detatch_hypercall(struct vmci_handle handle)
986{
987 struct vmci_qp_detach_msg detach_msg;
988
989 detach_msg.hdr.dst = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
990 VMCI_QUEUEPAIR_DETACH);
991 detach_msg.hdr.src = VMCI_ANON_SRC_HANDLE;
992 detach_msg.hdr.payload_size = sizeof(handle);
993 detach_msg.handle = handle;
994
995 return vmci_send_datagram(&detach_msg.hdr);
996}
997
998/*
999 * Adds the given entry to the list. Assumes that the list is locked.
1000 */
1001static void qp_list_add_entry(struct qp_list *qp_list, struct qp_entry *entry)
1002{
1003 if (entry)
1004 list_add(&entry->list_item, &qp_list->head);
1005}
1006
1007/*
1008 * Removes the given entry from the list. Assumes that the list is locked.
1009 */
1010static void qp_list_remove_entry(struct qp_list *qp_list,
1011 struct qp_entry *entry)
1012{
1013 if (entry)
1014 list_del(&entry->list_item);
1015}
1016
1017/*
1018 * Helper for VMCI queue_pair detach interface. Frees the physical
1019 * pages for the queue pair.
1020 */
1021static int qp_detatch_guest_work(struct vmci_handle handle)
1022{
1023 int result;
1024 struct qp_guest_endpoint *entry;
1025 u32 ref_count = ~0; /* To avoid compiler warning below */
1026
1027 mutex_lock(&qp_guest_endpoints.mutex);
1028
1029 entry = qp_guest_handle_to_entry(handle);
1030 if (!entry) {
1031 mutex_unlock(&qp_guest_endpoints.mutex);
1032 return VMCI_ERROR_NOT_FOUND;
1033 }
1034
1035 if (entry->qp.flags & VMCI_QPFLAG_LOCAL) {
1036 result = VMCI_SUCCESS;
1037
1038 if (entry->qp.ref_count > 1) {
1039 result = qp_notify_peer_local(false, handle);
1040 /*
1041 * We can fail to notify a local queuepair
1042 * because we can't allocate. We still want
1043 * to release the entry if that happens, so
1044 * don't bail out yet.
1045 */
1046 }
1047 } else {
1048 result = qp_detatch_hypercall(handle);
1049 if (result < VMCI_SUCCESS) {
1050 /*
1051 * We failed to notify a non-local queuepair.
1052 * That other queuepair might still be
1053 * accessing the shared memory, so don't
1054 * release the entry yet. It will get cleaned
1055 * up by VMCIqueue_pair_Exit() if necessary
1056 * (assuming we are going away, otherwise why
1057 * did this fail?).
1058 */
1059
1060 mutex_unlock(&qp_guest_endpoints.mutex);
1061 return result;
1062 }
1063 }
1064
1065 /*
1066 * If we get here then we either failed to notify a local queuepair, or
1067 * we succeeded in all cases. Release the entry if required.
1068 */
1069
1070 entry->qp.ref_count--;
1071 if (entry->qp.ref_count == 0)
1072 qp_list_remove_entry(&qp_guest_endpoints, &entry->qp);
1073
1074 /* If we didn't remove the entry, this could change once we unlock. */
1075 if (entry)
1076 ref_count = entry->qp.ref_count;
1077
1078 mutex_unlock(&qp_guest_endpoints.mutex);
1079
1080 if (ref_count == 0)
1081 qp_guest_endpoint_destroy(entry);
1082
1083 return result;
1084}
1085
1086/*
1087 * This functions handles the actual allocation of a VMCI queue
1088 * pair guest endpoint. Allocates physical pages for the queue
1089 * pair. It makes OS dependent calls through generic wrappers.
1090 */
1091static int qp_alloc_guest_work(struct vmci_handle *handle,
1092 struct vmci_queue **produce_q,
1093 u64 produce_size,
1094 struct vmci_queue **consume_q,
1095 u64 consume_size,
1096 u32 peer,
1097 u32 flags,
1098 u32 priv_flags)
1099{
1100 const u64 num_produce_pages =
1101 DIV_ROUND_UP(produce_size, PAGE_SIZE) + 1;
1102 const u64 num_consume_pages =
1103 DIV_ROUND_UP(consume_size, PAGE_SIZE) + 1;
1104 void *my_produce_q = NULL;
1105 void *my_consume_q = NULL;
1106 int result;
1107 struct qp_guest_endpoint *queue_pair_entry = NULL;
1108
1109 if (priv_flags != VMCI_NO_PRIVILEGE_FLAGS)
1110 return VMCI_ERROR_NO_ACCESS;
1111
1112 mutex_lock(&qp_guest_endpoints.mutex);
1113
1114 queue_pair_entry = qp_guest_handle_to_entry(*handle);
1115 if (queue_pair_entry) {
1116 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
1117 /* Local attach case. */
1118 if (queue_pair_entry->qp.ref_count > 1) {
1119 pr_devel("Error attempting to attach more than once\n");
1120 result = VMCI_ERROR_UNAVAILABLE;
1121 goto error_keep_entry;
1122 }
1123
1124 if (queue_pair_entry->qp.produce_size != consume_size ||
1125 queue_pair_entry->qp.consume_size !=
1126 produce_size ||
1127 queue_pair_entry->qp.flags !=
1128 (flags & ~VMCI_QPFLAG_ATTACH_ONLY)) {
1129 pr_devel("Error mismatched queue pair in local attach\n");
1130 result = VMCI_ERROR_QUEUEPAIR_MISMATCH;
1131 goto error_keep_entry;
1132 }
1133
1134 /*
1135 * Do a local attach. We swap the consume and
1136 * produce queues for the attacher and deliver
1137 * an attach event.
1138 */
1139 result = qp_notify_peer_local(true, *handle);
1140 if (result < VMCI_SUCCESS)
1141 goto error_keep_entry;
1142
1143 my_produce_q = queue_pair_entry->consume_q;
1144 my_consume_q = queue_pair_entry->produce_q;
1145 goto out;
1146 }
1147
1148 result = VMCI_ERROR_ALREADY_EXISTS;
1149 goto error_keep_entry;
1150 }
1151
1152 my_produce_q = qp_alloc_queue(produce_size, flags);
1153 if (!my_produce_q) {
1154 pr_warn("Error allocating pages for produce queue\n");
1155 result = VMCI_ERROR_NO_MEM;
1156 goto error;
1157 }
1158
1159 my_consume_q = qp_alloc_queue(consume_size, flags);
1160 if (!my_consume_q) {
1161 pr_warn("Error allocating pages for consume queue\n");
1162 result = VMCI_ERROR_NO_MEM;
1163 goto error;
1164 }
1165
1166 queue_pair_entry = qp_guest_endpoint_create(*handle, peer, flags,
1167 produce_size, consume_size,
1168 my_produce_q, my_consume_q);
1169 if (!queue_pair_entry) {
1170 pr_warn("Error allocating memory in %s\n", __func__);
1171 result = VMCI_ERROR_NO_MEM;
1172 goto error;
1173 }
1174
1175 result = qp_alloc_ppn_set(my_produce_q, num_produce_pages, my_consume_q,
1176 num_consume_pages,
1177 &queue_pair_entry->ppn_set);
1178 if (result < VMCI_SUCCESS) {
1179 pr_warn("qp_alloc_ppn_set failed\n");
1180 goto error;
1181 }
1182
1183 /*
1184 * It's only necessary to notify the host if this queue pair will be
1185 * attached to from another context.
1186 */
1187 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) {
1188 /* Local create case. */
1189 u32 context_id = vmci_get_context_id();
1190
1191 /*
1192 * Enforce similar checks on local queue pairs as we
1193 * do for regular ones. The handle's context must
1194 * match the creator or attacher context id (here they
1195 * are both the current context id) and the
1196 * attach-only flag cannot exist during create. We
1197 * also ensure specified peer is this context or an
1198 * invalid one.
1199 */
1200 if (queue_pair_entry->qp.handle.context != context_id ||
1201 (queue_pair_entry->qp.peer != VMCI_INVALID_ID &&
1202 queue_pair_entry->qp.peer != context_id)) {
1203 result = VMCI_ERROR_NO_ACCESS;
1204 goto error;
1205 }
1206
1207 if (queue_pair_entry->qp.flags & VMCI_QPFLAG_ATTACH_ONLY) {
1208 result = VMCI_ERROR_NOT_FOUND;
1209 goto error;
1210 }
1211 } else {
1212 result = qp_alloc_hypercall(queue_pair_entry);
1213 if (result < VMCI_SUCCESS) {
1214 pr_warn("qp_alloc_hypercall result = %d\n", result);
1215 goto error;
1216 }
1217 }
1218
1219 qp_init_queue_mutex((struct vmci_queue *)my_produce_q,
1220 (struct vmci_queue *)my_consume_q);
1221
1222 qp_list_add_entry(&qp_guest_endpoints, &queue_pair_entry->qp);
1223
1224 out:
1225 queue_pair_entry->qp.ref_count++;
1226 *handle = queue_pair_entry->qp.handle;
1227 *produce_q = (struct vmci_queue *)my_produce_q;
1228 *consume_q = (struct vmci_queue *)my_consume_q;
1229
1230 /*
1231 * We should initialize the queue pair header pages on a local
1232 * queue pair create. For non-local queue pairs, the
1233 * hypervisor initializes the header pages in the create step.
1234 */
1235 if ((queue_pair_entry->qp.flags & VMCI_QPFLAG_LOCAL) &&
1236 queue_pair_entry->qp.ref_count == 1) {
1237 vmci_q_header_init((*produce_q)->q_header, *handle);
1238 vmci_q_header_init((*consume_q)->q_header, *handle);
1239 }
1240
1241 mutex_unlock(&qp_guest_endpoints.mutex);
1242
1243 return VMCI_SUCCESS;
1244
1245 error:
1246 mutex_unlock(&qp_guest_endpoints.mutex);
1247 if (queue_pair_entry) {
1248 /* The queues will be freed inside the destroy routine. */
1249 qp_guest_endpoint_destroy(queue_pair_entry);
1250 } else {
1251 qp_free_queue(my_produce_q, produce_size);
1252 qp_free_queue(my_consume_q, consume_size);
1253 }
1254 return result;
1255
1256 error_keep_entry:
1257 /* This path should only be used when an existing entry was found. */
1258 mutex_unlock(&qp_guest_endpoints.mutex);
1259 return result;
1260}
1261
1262/*
1263 * The first endpoint issuing a queue pair allocation will create the state
1264 * of the queue pair in the queue pair broker.
1265 *
1266 * If the creator is a guest, it will associate a VMX virtual address range
1267 * with the queue pair as specified by the page_store. For compatibility with
1268 * older VMX'en, that would use a separate step to set the VMX virtual
1269 * address range, the virtual address range can be registered later using
1270 * vmci_qp_broker_set_page_store. In that case, a page_store of NULL should be
1271 * used.
1272 *
1273 * If the creator is the host, a page_store of NULL should be used as well,
1274 * since the host is not able to supply a page store for the queue pair.
1275 *
1276 * For older VMX and host callers, the queue pair will be created in the
1277 * VMCIQPB_CREATED_NO_MEM state, and for current VMX callers, it will be
1278 * created in VMCOQPB_CREATED_MEM state.
1279 */
1280static int qp_broker_create(struct vmci_handle handle,
1281 u32 peer,
1282 u32 flags,
1283 u32 priv_flags,
1284 u64 produce_size,
1285 u64 consume_size,
1286 struct vmci_qp_page_store *page_store,
1287 struct vmci_ctx *context,
1288 vmci_event_release_cb wakeup_cb,
1289 void *client_data, struct qp_broker_entry **ent)
1290{
1291 struct qp_broker_entry *entry = NULL;
1292 const u32 context_id = vmci_ctx_get_id(context);
1293 bool is_local = flags & VMCI_QPFLAG_LOCAL;
1294 int result;
1295 u64 guest_produce_size;
1296 u64 guest_consume_size;
1297
1298 /* Do not create if the caller asked not to. */
1299 if (flags & VMCI_QPFLAG_ATTACH_ONLY)
1300 return VMCI_ERROR_NOT_FOUND;
1301
1302 /*
1303 * Creator's context ID should match handle's context ID or the creator
1304 * must allow the context in handle's context ID as the "peer".
1305 */
1306 if (handle.context != context_id && handle.context != peer)
1307 return VMCI_ERROR_NO_ACCESS;
1308
1309 if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(peer))
1310 return VMCI_ERROR_DST_UNREACHABLE;
1311
1312 /*
1313 * Creator's context ID for local queue pairs should match the
1314 * peer, if a peer is specified.
1315 */
1316 if (is_local && peer != VMCI_INVALID_ID && context_id != peer)
1317 return VMCI_ERROR_NO_ACCESS;
1318
1319 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1320 if (!entry)
1321 return VMCI_ERROR_NO_MEM;
1322
1323 if (vmci_ctx_get_id(context) == VMCI_HOST_CONTEXT_ID && !is_local) {
1324 /*
1325 * The queue pair broker entry stores values from the guest
1326 * point of view, so a creating host side endpoint should swap
1327 * produce and consume values -- unless it is a local queue
1328 * pair, in which case no swapping is necessary, since the local
1329 * attacher will swap queues.
1330 */
1331
1332 guest_produce_size = consume_size;
1333 guest_consume_size = produce_size;
1334 } else {
1335 guest_produce_size = produce_size;
1336 guest_consume_size = consume_size;
1337 }
1338
1339 entry->qp.handle = handle;
1340 entry->qp.peer = peer;
1341 entry->qp.flags = flags;
1342 entry->qp.produce_size = guest_produce_size;
1343 entry->qp.consume_size = guest_consume_size;
1344 entry->qp.ref_count = 1;
1345 entry->create_id = context_id;
1346 entry->attach_id = VMCI_INVALID_ID;
1347 entry->state = VMCIQPB_NEW;
1348 entry->require_trusted_attach =
1349 !!(context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED);
1350 entry->created_by_trusted =
1351 !!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED);
1352 entry->vmci_page_files = false;
1353 entry->wakeup_cb = wakeup_cb;
1354 entry->client_data = client_data;
1355 entry->produce_q = qp_host_alloc_queue(guest_produce_size);
1356 if (entry->produce_q == NULL) {
1357 result = VMCI_ERROR_NO_MEM;
1358 goto error;
1359 }
1360 entry->consume_q = qp_host_alloc_queue(guest_consume_size);
1361 if (entry->consume_q == NULL) {
1362 result = VMCI_ERROR_NO_MEM;
1363 goto error;
1364 }
1365
1366 qp_init_queue_mutex(entry->produce_q, entry->consume_q);
1367
1368 INIT_LIST_HEAD(&entry->qp.list_item);
1369
1370 if (is_local) {
1371 u8 *tmp;
1372
1373 entry->local_mem = kcalloc(QPE_NUM_PAGES(entry->qp),
1374 PAGE_SIZE, GFP_KERNEL);
1375 if (entry->local_mem == NULL) {
1376 result = VMCI_ERROR_NO_MEM;
1377 goto error;
1378 }
1379 entry->state = VMCIQPB_CREATED_MEM;
1380 entry->produce_q->q_header = entry->local_mem;
1381 tmp = (u8 *)entry->local_mem + PAGE_SIZE *
1382 (DIV_ROUND_UP(entry->qp.produce_size, PAGE_SIZE) + 1);
1383 entry->consume_q->q_header = (struct vmci_queue_header *)tmp;
1384 } else if (page_store) {
1385 /*
1386 * The VMX already initialized the queue pair headers, so no
1387 * need for the kernel side to do that.
1388 */
1389 result = qp_host_register_user_memory(page_store,
1390 entry->produce_q,
1391 entry->consume_q);
1392 if (result < VMCI_SUCCESS)
1393 goto error;
1394
1395 entry->state = VMCIQPB_CREATED_MEM;
1396 } else {
1397 /*
1398 * A create without a page_store may be either a host
1399 * side create (in which case we are waiting for the
1400 * guest side to supply the memory) or an old style
1401 * queue pair create (in which case we will expect a
1402 * set page store call as the next step).
1403 */
1404 entry->state = VMCIQPB_CREATED_NO_MEM;
1405 }
1406
1407 qp_list_add_entry(&qp_broker_list, &entry->qp);
1408 if (ent != NULL)
1409 *ent = entry;
1410
1411 /* Add to resource obj */
1412 result = vmci_resource_add(&entry->resource,
1413 VMCI_RESOURCE_TYPE_QPAIR_HOST,
1414 handle);
1415 if (result != VMCI_SUCCESS) {
1416 pr_warn("Failed to add new resource (handle=0x%x:0x%x), error: %d",
1417 handle.context, handle.resource, result);
1418 goto error;
1419 }
1420
1421 entry->qp.handle = vmci_resource_handle(&entry->resource);
1422 if (is_local) {
1423 vmci_q_header_init(entry->produce_q->q_header,
1424 entry->qp.handle);
1425 vmci_q_header_init(entry->consume_q->q_header,
1426 entry->qp.handle);
1427 }
1428
1429 vmci_ctx_qp_create(context, entry->qp.handle);
1430
1431 return VMCI_SUCCESS;
1432
1433 error:
1434 if (entry != NULL) {
1435 qp_host_free_queue(entry->produce_q, guest_produce_size);
1436 qp_host_free_queue(entry->consume_q, guest_consume_size);
1437 kfree(entry);
1438 }
1439
1440 return result;
1441}
1442
1443/*
1444 * Enqueues an event datagram to notify the peer VM attached to
1445 * the given queue pair handle about attach/detach event by the
1446 * given VM. Returns Payload size of datagram enqueued on
1447 * success, error code otherwise.
1448 */
1449static int qp_notify_peer(bool attach,
1450 struct vmci_handle handle,
1451 u32 my_id,
1452 u32 peer_id)
1453{
1454 int rv;
1455 struct vmci_event_qp ev;
1456
1457 if (vmci_handle_is_invalid(handle) || my_id == VMCI_INVALID_ID ||
1458 peer_id == VMCI_INVALID_ID)
1459 return VMCI_ERROR_INVALID_ARGS;
1460
1461 /*
1462 * In vmci_ctx_enqueue_datagram() we enforce the upper limit on
1463 * number of pending events from the hypervisor to a given VM
1464 * otherwise a rogue VM could do an arbitrary number of attach
1465 * and detach operations causing memory pressure in the host
1466 * kernel.
1467 */
1468
1469 ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER);
1470 ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID,
1471 VMCI_CONTEXT_RESOURCE_ID);
1472 ev.msg.hdr.payload_size = sizeof(ev) - sizeof(ev.msg.hdr);
1473 ev.msg.event_data.event = attach ?
1474 VMCI_EVENT_QP_PEER_ATTACH : VMCI_EVENT_QP_PEER_DETACH;
1475 ev.payload.handle = handle;
1476 ev.payload.peer_id = my_id;
1477
1478 rv = vmci_datagram_dispatch(VMCI_HYPERVISOR_CONTEXT_ID,
1479 &ev.msg.hdr, false);
1480 if (rv < VMCI_SUCCESS)
1481 pr_warn("Failed to enqueue queue_pair %s event datagram for context (ID=0x%x)\n",
1482 attach ? "ATTACH" : "DETACH", peer_id);
1483
1484 return rv;
1485}
1486
1487/*
1488 * The second endpoint issuing a queue pair allocation will attach to
1489 * the queue pair registered with the queue pair broker.
1490 *
1491 * If the attacher is a guest, it will associate a VMX virtual address
1492 * range with the queue pair as specified by the page_store. At this
1493 * point, the already attach host endpoint may start using the queue
1494 * pair, and an attach event is sent to it. For compatibility with
1495 * older VMX'en, that used a separate step to set the VMX virtual
1496 * address range, the virtual address range can be registered later
1497 * using vmci_qp_broker_set_page_store. In that case, a page_store of
1498 * NULL should be used, and the attach event will be generated once
1499 * the actual page store has been set.
1500 *
1501 * If the attacher is the host, a page_store of NULL should be used as
1502 * well, since the page store information is already set by the guest.
1503 *
1504 * For new VMX and host callers, the queue pair will be moved to the
1505 * VMCIQPB_ATTACHED_MEM state, and for older VMX callers, it will be
1506 * moved to the VMCOQPB_ATTACHED_NO_MEM state.
1507 */
1508static int qp_broker_attach(struct qp_broker_entry *entry,
1509 u32 peer,
1510 u32 flags,
1511 u32 priv_flags,
1512 u64 produce_size,
1513 u64 consume_size,
1514 struct vmci_qp_page_store *page_store,
1515 struct vmci_ctx *context,
1516 vmci_event_release_cb wakeup_cb,
1517 void *client_data,
1518 struct qp_broker_entry **ent)
1519{
1520 const u32 context_id = vmci_ctx_get_id(context);
1521 bool is_local = flags & VMCI_QPFLAG_LOCAL;
1522 int result;
1523
1524 if (entry->state != VMCIQPB_CREATED_NO_MEM &&
1525 entry->state != VMCIQPB_CREATED_MEM)
1526 return VMCI_ERROR_UNAVAILABLE;
1527
1528 if (is_local) {
1529 if (!(entry->qp.flags & VMCI_QPFLAG_LOCAL) ||
1530 context_id != entry->create_id) {
1531 return VMCI_ERROR_INVALID_ARGS;
1532 }
1533 } else if (context_id == entry->create_id ||
1534 context_id == entry->attach_id) {
1535 return VMCI_ERROR_ALREADY_EXISTS;
1536 }
1537
1538 if (VMCI_CONTEXT_IS_VM(context_id) &&
1539 VMCI_CONTEXT_IS_VM(entry->create_id))
1540 return VMCI_ERROR_DST_UNREACHABLE;
1541
1542 /*
1543 * If we are attaching from a restricted context then the queuepair
1544 * must have been created by a trusted endpoint.
1545 */
1546 if ((context->priv_flags & VMCI_PRIVILEGE_FLAG_RESTRICTED) &&
1547 !entry->created_by_trusted)
1548 return VMCI_ERROR_NO_ACCESS;
1549
1550 /*
1551 * If we are attaching to a queuepair that was created by a restricted
1552 * context then we must be trusted.
1553 */
1554 if (entry->require_trusted_attach &&
1555 (!(priv_flags & VMCI_PRIVILEGE_FLAG_TRUSTED)))
1556 return VMCI_ERROR_NO_ACCESS;
1557
1558 /*
1559 * If the creator specifies VMCI_INVALID_ID in "peer" field, access
1560 * control check is not performed.
1561 */
1562 if (entry->qp.peer != VMCI_INVALID_ID && entry->qp.peer != context_id)
1563 return VMCI_ERROR_NO_ACCESS;
1564
1565 if (entry->create_id == VMCI_HOST_CONTEXT_ID) {
1566 /*
1567 * Do not attach if the caller doesn't support Host Queue Pairs
1568 * and a host created this queue pair.
1569 */
1570
1571 if (!vmci_ctx_supports_host_qp(context))
1572 return VMCI_ERROR_INVALID_RESOURCE;
1573
1574 } else if (context_id == VMCI_HOST_CONTEXT_ID) {
1575 struct vmci_ctx *create_context;
1576 bool supports_host_qp;
1577
1578 /*
1579 * Do not attach a host to a user created queue pair if that
1580 * user doesn't support host queue pair end points.
1581 */
1582
1583 create_context = vmci_ctx_get(entry->create_id);
1584 supports_host_qp = vmci_ctx_supports_host_qp(create_context);
1585 vmci_ctx_put(create_context);
1586
1587 if (!supports_host_qp)
1588 return VMCI_ERROR_INVALID_RESOURCE;
1589 }
1590
1591 if ((entry->qp.flags & ~VMCI_QP_ASYMM) != (flags & ~VMCI_QP_ASYMM_PEER))
1592 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
1593
1594 if (context_id != VMCI_HOST_CONTEXT_ID) {
1595 /*
1596 * The queue pair broker entry stores values from the guest
1597 * point of view, so an attaching guest should match the values
1598 * stored in the entry.
1599 */
1600
1601 if (entry->qp.produce_size != produce_size ||
1602 entry->qp.consume_size != consume_size) {
1603 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
1604 }
1605 } else if (entry->qp.produce_size != consume_size ||
1606 entry->qp.consume_size != produce_size) {
1607 return VMCI_ERROR_QUEUEPAIR_MISMATCH;
1608 }
1609
1610 if (context_id != VMCI_HOST_CONTEXT_ID) {
1611 /*
1612 * If a guest attached to a queue pair, it will supply
1613 * the backing memory. If this is a pre NOVMVM vmx,
1614 * the backing memory will be supplied by calling
1615 * vmci_qp_broker_set_page_store() following the
1616 * return of the vmci_qp_broker_alloc() call. If it is
1617 * a vmx of version NOVMVM or later, the page store
1618 * must be supplied as part of the
1619 * vmci_qp_broker_alloc call. Under all circumstances
1620 * must the initially created queue pair not have any
1621 * memory associated with it already.
1622 */
1623
1624 if (entry->state != VMCIQPB_CREATED_NO_MEM)
1625 return VMCI_ERROR_INVALID_ARGS;
1626
1627 if (page_store != NULL) {
1628 /*
1629 * Patch up host state to point to guest
1630 * supplied memory. The VMX already
1631 * initialized the queue pair headers, so no
1632 * need for the kernel side to do that.
1633 */
1634
1635 result = qp_host_register_user_memory(page_store,
1636 entry->produce_q,
1637 entry->consume_q);
1638 if (result < VMCI_SUCCESS)
1639 return result;
1640
1641 entry->state = VMCIQPB_ATTACHED_MEM;
1642 } else {
1643 entry->state = VMCIQPB_ATTACHED_NO_MEM;
1644 }
1645 } else if (entry->state == VMCIQPB_CREATED_NO_MEM) {
1646 /*
1647 * The host side is attempting to attach to a queue
1648 * pair that doesn't have any memory associated with
1649 * it. This must be a pre NOVMVM vmx that hasn't set
1650 * the page store information yet, or a quiesced VM.
1651 */
1652
1653 return VMCI_ERROR_UNAVAILABLE;
1654 } else {
1655 /* The host side has successfully attached to a queue pair. */
1656 entry->state = VMCIQPB_ATTACHED_MEM;
1657 }
1658
1659 if (entry->state == VMCIQPB_ATTACHED_MEM) {
1660 result =
1661 qp_notify_peer(true, entry->qp.handle, context_id,
1662 entry->create_id);
1663 if (result < VMCI_SUCCESS)
1664 pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
1665 entry->create_id, entry->qp.handle.context,
1666 entry->qp.handle.resource);
1667 }
1668
1669 entry->attach_id = context_id;
1670 entry->qp.ref_count++;
1671 if (wakeup_cb) {
1672 entry->wakeup_cb = wakeup_cb;
1673 entry->client_data = client_data;
1674 }
1675
1676 /*
1677 * When attaching to local queue pairs, the context already has
1678 * an entry tracking the queue pair, so don't add another one.
1679 */
1680 if (!is_local)
1681 vmci_ctx_qp_create(context, entry->qp.handle);
1682
1683 if (ent != NULL)
1684 *ent = entry;
1685
1686 return VMCI_SUCCESS;
1687}
1688
1689/*
1690 * queue_pair_Alloc for use when setting up queue pair endpoints
1691 * on the host.
1692 */
1693static int qp_broker_alloc(struct vmci_handle handle,
1694 u32 peer,
1695 u32 flags,
1696 u32 priv_flags,
1697 u64 produce_size,
1698 u64 consume_size,
1699 struct vmci_qp_page_store *page_store,
1700 struct vmci_ctx *context,
1701 vmci_event_release_cb wakeup_cb,
1702 void *client_data,
1703 struct qp_broker_entry **ent,
1704 bool *swap)
1705{
1706 const u32 context_id = vmci_ctx_get_id(context);
1707 bool create;
1708 struct qp_broker_entry *entry = NULL;
1709 bool is_local = flags & VMCI_QPFLAG_LOCAL;
1710 int result;
1711
1712 if (vmci_handle_is_invalid(handle) ||
1713 (flags & ~VMCI_QP_ALL_FLAGS) || is_local ||
1714 !(produce_size || consume_size) ||
1715 !context || context_id == VMCI_INVALID_ID ||
1716 handle.context == VMCI_INVALID_ID) {
1717 return VMCI_ERROR_INVALID_ARGS;
1718 }
1719
1720 if (page_store && !VMCI_QP_PAGESTORE_IS_WELLFORMED(page_store))
1721 return VMCI_ERROR_INVALID_ARGS;
1722
1723 /*
1724 * In the initial argument check, we ensure that non-vmkernel hosts
1725 * are not allowed to create local queue pairs.
1726 */
1727
1728 mutex_lock(&qp_broker_list.mutex);
1729
1730 if (!is_local && vmci_ctx_qp_exists(context, handle)) {
1731 pr_devel("Context (ID=0x%x) already attached to queue pair (handle=0x%x:0x%x)\n",
1732 context_id, handle.context, handle.resource);
1733 mutex_unlock(&qp_broker_list.mutex);
1734 return VMCI_ERROR_ALREADY_EXISTS;
1735 }
1736
1737 if (handle.resource != VMCI_INVALID_ID)
1738 entry = qp_broker_handle_to_entry(handle);
1739
1740 if (!entry) {
1741 create = true;
1742 result =
1743 qp_broker_create(handle, peer, flags, priv_flags,
1744 produce_size, consume_size, page_store,
1745 context, wakeup_cb, client_data, ent);
1746 } else {
1747 create = false;
1748 result =
1749 qp_broker_attach(entry, peer, flags, priv_flags,
1750 produce_size, consume_size, page_store,
1751 context, wakeup_cb, client_data, ent);
1752 }
1753
1754 mutex_unlock(&qp_broker_list.mutex);
1755
1756 if (swap)
1757 *swap = (context_id == VMCI_HOST_CONTEXT_ID) &&
1758 !(create && is_local);
1759
1760 return result;
1761}
1762
1763/*
1764 * This function implements the kernel API for allocating a queue
1765 * pair.
1766 */
1767static int qp_alloc_host_work(struct vmci_handle *handle,
1768 struct vmci_queue **produce_q,
1769 u64 produce_size,
1770 struct vmci_queue **consume_q,
1771 u64 consume_size,
1772 u32 peer,
1773 u32 flags,
1774 u32 priv_flags,
1775 vmci_event_release_cb wakeup_cb,
1776 void *client_data)
1777{
1778 struct vmci_handle new_handle;
1779 struct vmci_ctx *context;
1780 struct qp_broker_entry *entry;
1781 int result;
1782 bool swap;
1783
1784 if (vmci_handle_is_invalid(*handle)) {
1785 new_handle = vmci_make_handle(
1786 VMCI_HOST_CONTEXT_ID, VMCI_INVALID_ID);
1787 } else
1788 new_handle = *handle;
1789
1790 context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
1791 entry = NULL;
1792 result =
1793 qp_broker_alloc(new_handle, peer, flags, priv_flags,
1794 produce_size, consume_size, NULL, context,
1795 wakeup_cb, client_data, &entry, &swap);
1796 if (result == VMCI_SUCCESS) {
1797 if (swap) {
1798 /*
1799 * If this is a local queue pair, the attacher
1800 * will swap around produce and consume
1801 * queues.
1802 */
1803
1804 *produce_q = entry->consume_q;
1805 *consume_q = entry->produce_q;
1806 } else {
1807 *produce_q = entry->produce_q;
1808 *consume_q = entry->consume_q;
1809 }
1810
1811 *handle = vmci_resource_handle(&entry->resource);
1812 } else {
1813 *handle = VMCI_INVALID_HANDLE;
1814 pr_devel("queue pair broker failed to alloc (result=%d)\n",
1815 result);
1816 }
1817 vmci_ctx_put(context);
1818 return result;
1819}
1820
1821/*
1822 * Allocates a VMCI queue_pair. Only checks validity of input
1823 * arguments. The real work is done in the host or guest
1824 * specific function.
1825 */
1826int vmci_qp_alloc(struct vmci_handle *handle,
1827 struct vmci_queue **produce_q,
1828 u64 produce_size,
1829 struct vmci_queue **consume_q,
1830 u64 consume_size,
1831 u32 peer,
1832 u32 flags,
1833 u32 priv_flags,
1834 bool guest_endpoint,
1835 vmci_event_release_cb wakeup_cb,
1836 void *client_data)
1837{
1838 if (!handle || !produce_q || !consume_q ||
1839 (!produce_size && !consume_size) || (flags & ~VMCI_QP_ALL_FLAGS))
1840 return VMCI_ERROR_INVALID_ARGS;
1841
1842 if (guest_endpoint) {
1843 return qp_alloc_guest_work(handle, produce_q,
1844 produce_size, consume_q,
1845 consume_size, peer,
1846 flags, priv_flags);
1847 } else {
1848 return qp_alloc_host_work(handle, produce_q,
1849 produce_size, consume_q,
1850 consume_size, peer, flags,
1851 priv_flags, wakeup_cb, client_data);
1852 }
1853}
1854
1855/*
1856 * This function implements the host kernel API for detaching from
1857 * a queue pair.
1858 */
1859static int qp_detatch_host_work(struct vmci_handle handle)
1860{
1861 int result;
1862 struct vmci_ctx *context;
1863
1864 context = vmci_ctx_get(VMCI_HOST_CONTEXT_ID);
1865
1866 result = vmci_qp_broker_detach(handle, context);
1867
1868 vmci_ctx_put(context);
1869 return result;
1870}
1871
1872/*
1873 * Detaches from a VMCI queue_pair. Only checks validity of input argument.
1874 * Real work is done in the host or guest specific function.
1875 */
1876static int qp_detatch(struct vmci_handle handle, bool guest_endpoint)
1877{
1878 if (vmci_handle_is_invalid(handle))
1879 return VMCI_ERROR_INVALID_ARGS;
1880
1881 if (guest_endpoint)
1882 return qp_detatch_guest_work(handle);
1883 else
1884 return qp_detatch_host_work(handle);
1885}
1886
1887/*
1888 * Returns the entry from the head of the list. Assumes that the list is
1889 * locked.
1890 */
1891static struct qp_entry *qp_list_get_head(struct qp_list *qp_list)
1892{
1893 if (!list_empty(&qp_list->head)) {
1894 struct qp_entry *entry =
1895 list_first_entry(&qp_list->head, struct qp_entry,
1896 list_item);
1897 return entry;
1898 }
1899
1900 return NULL;
1901}
1902
1903void vmci_qp_broker_exit(void)
1904{
1905 struct qp_entry *entry;
1906 struct qp_broker_entry *be;
1907
1908 mutex_lock(&qp_broker_list.mutex);
1909
1910 while ((entry = qp_list_get_head(&qp_broker_list))) {
1911 be = (struct qp_broker_entry *)entry;
1912
1913 qp_list_remove_entry(&qp_broker_list, entry);
1914 kfree(be);
1915 }
1916
1917 mutex_unlock(&qp_broker_list.mutex);
1918}
1919
1920/*
1921 * Requests that a queue pair be allocated with the VMCI queue
1922 * pair broker. Allocates a queue pair entry if one does not
1923 * exist. Attaches to one if it exists, and retrieves the page
1924 * files backing that queue_pair. Assumes that the queue pair
1925 * broker lock is held.
1926 */
1927int vmci_qp_broker_alloc(struct vmci_handle handle,
1928 u32 peer,
1929 u32 flags,
1930 u32 priv_flags,
1931 u64 produce_size,
1932 u64 consume_size,
1933 struct vmci_qp_page_store *page_store,
1934 struct vmci_ctx *context)
1935{
1936 return qp_broker_alloc(handle, peer, flags, priv_flags,
1937 produce_size, consume_size,
1938 page_store, context, NULL, NULL, NULL, NULL);
1939}
1940
1941/*
1942 * VMX'en with versions lower than VMCI_VERSION_NOVMVM use a separate
1943 * step to add the UVAs of the VMX mapping of the queue pair. This function
1944 * provides backwards compatibility with such VMX'en, and takes care of
1945 * registering the page store for a queue pair previously allocated by the
1946 * VMX during create or attach. This function will move the queue pair state
1947 * to either from VMCIQBP_CREATED_NO_MEM to VMCIQBP_CREATED_MEM or
1948 * VMCIQBP_ATTACHED_NO_MEM to VMCIQBP_ATTACHED_MEM. If moving to the
1949 * attached state with memory, the queue pair is ready to be used by the
1950 * host peer, and an attached event will be generated.
1951 *
1952 * Assumes that the queue pair broker lock is held.
1953 *
1954 * This function is only used by the hosted platform, since there is no
1955 * issue with backwards compatibility for vmkernel.
1956 */
1957int vmci_qp_broker_set_page_store(struct vmci_handle handle,
1958 u64 produce_uva,
1959 u64 consume_uva,
1960 struct vmci_ctx *context)
1961{
1962 struct qp_broker_entry *entry;
1963 int result;
1964 const u32 context_id = vmci_ctx_get_id(context);
1965
1966 if (vmci_handle_is_invalid(handle) || !context ||
1967 context_id == VMCI_INVALID_ID)
1968 return VMCI_ERROR_INVALID_ARGS;
1969
1970 /*
1971 * We only support guest to host queue pairs, so the VMX must
1972 * supply UVAs for the mapped page files.
1973 */
1974
1975 if (produce_uva == 0 || consume_uva == 0)
1976 return VMCI_ERROR_INVALID_ARGS;
1977
1978 mutex_lock(&qp_broker_list.mutex);
1979
1980 if (!vmci_ctx_qp_exists(context, handle)) {
1981 pr_warn("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
1982 context_id, handle.context, handle.resource);
1983 result = VMCI_ERROR_NOT_FOUND;
1984 goto out;
1985 }
1986
1987 entry = qp_broker_handle_to_entry(handle);
1988 if (!entry) {
1989 result = VMCI_ERROR_NOT_FOUND;
1990 goto out;
1991 }
1992
1993 /*
1994 * If I'm the owner then I can set the page store.
1995 *
1996 * Or, if a host created the queue_pair and I'm the attached peer
1997 * then I can set the page store.
1998 */
1999 if (entry->create_id != context_id &&
2000 (entry->create_id != VMCI_HOST_CONTEXT_ID ||
2001 entry->attach_id != context_id)) {
2002 result = VMCI_ERROR_QUEUEPAIR_NOTOWNER;
2003 goto out;
2004 }
2005
2006 if (entry->state != VMCIQPB_CREATED_NO_MEM &&
2007 entry->state != VMCIQPB_ATTACHED_NO_MEM) {
2008 result = VMCI_ERROR_UNAVAILABLE;
2009 goto out;
2010 }
2011
2012 result = qp_host_get_user_memory(produce_uva, consume_uva,
2013 entry->produce_q, entry->consume_q);
2014 if (result < VMCI_SUCCESS)
2015 goto out;
2016
2017 result = qp_host_map_queues(entry->produce_q, entry->consume_q);
2018 if (result < VMCI_SUCCESS) {
2019 qp_host_unregister_user_memory(entry->produce_q,
2020 entry->consume_q);
2021 goto out;
2022 }
2023
2024 if (entry->state == VMCIQPB_CREATED_NO_MEM)
2025 entry->state = VMCIQPB_CREATED_MEM;
2026 else
2027 entry->state = VMCIQPB_ATTACHED_MEM;
2028
2029 entry->vmci_page_files = true;
2030
2031 if (entry->state == VMCIQPB_ATTACHED_MEM) {
2032 result =
2033 qp_notify_peer(true, handle, context_id, entry->create_id);
2034 if (result < VMCI_SUCCESS) {
2035 pr_warn("Failed to notify peer (ID=0x%x) of attach to queue pair (handle=0x%x:0x%x)\n",
2036 entry->create_id, entry->qp.handle.context,
2037 entry->qp.handle.resource);
2038 }
2039 }
2040
2041 result = VMCI_SUCCESS;
2042 out:
2043 mutex_unlock(&qp_broker_list.mutex);
2044 return result;
2045}
2046
2047/*
2048 * Resets saved queue headers for the given QP broker
2049 * entry. Should be used when guest memory becomes available
2050 * again, or the guest detaches.
2051 */
2052static void qp_reset_saved_headers(struct qp_broker_entry *entry)
2053{
2054 entry->produce_q->saved_header = NULL;
2055 entry->consume_q->saved_header = NULL;
2056}
2057
2058/*
2059 * The main entry point for detaching from a queue pair registered with the
2060 * queue pair broker. If more than one endpoint is attached to the queue
2061 * pair, the first endpoint will mainly decrement a reference count and
2062 * generate a notification to its peer. The last endpoint will clean up
2063 * the queue pair state registered with the broker.
2064 *
2065 * When a guest endpoint detaches, it will unmap and unregister the guest
2066 * memory backing the queue pair. If the host is still attached, it will
2067 * no longer be able to access the queue pair content.
2068 *
2069 * If the queue pair is already in a state where there is no memory
2070 * registered for the queue pair (any *_NO_MEM state), it will transition to
2071 * the VMCIQPB_SHUTDOWN_NO_MEM state. This will also happen, if a guest
2072 * endpoint is the first of two endpoints to detach. If the host endpoint is
2073 * the first out of two to detach, the queue pair will move to the
2074 * VMCIQPB_SHUTDOWN_MEM state.
2075 */
2076int vmci_qp_broker_detach(struct vmci_handle handle, struct vmci_ctx *context)
2077{
2078 struct qp_broker_entry *entry;
2079 const u32 context_id = vmci_ctx_get_id(context);
2080 u32 peer_id;
2081 bool is_local = false;
2082 int result;
2083
2084 if (vmci_handle_is_invalid(handle) || !context ||
2085 context_id == VMCI_INVALID_ID) {
2086 return VMCI_ERROR_INVALID_ARGS;
2087 }
2088
2089 mutex_lock(&qp_broker_list.mutex);
2090
2091 if (!vmci_ctx_qp_exists(context, handle)) {
2092 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2093 context_id, handle.context, handle.resource);
2094 result = VMCI_ERROR_NOT_FOUND;
2095 goto out;
2096 }
2097
2098 entry = qp_broker_handle_to_entry(handle);
2099 if (!entry) {
2100 pr_devel("Context (ID=0x%x) reports being attached to queue pair(handle=0x%x:0x%x) that isn't present in broker\n",
2101 context_id, handle.context, handle.resource);
2102 result = VMCI_ERROR_NOT_FOUND;
2103 goto out;
2104 }
2105
2106 if (context_id != entry->create_id && context_id != entry->attach_id) {
2107 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2108 goto out;
2109 }
2110
2111 if (context_id == entry->create_id) {
2112 peer_id = entry->attach_id;
2113 entry->create_id = VMCI_INVALID_ID;
2114 } else {
2115 peer_id = entry->create_id;
2116 entry->attach_id = VMCI_INVALID_ID;
2117 }
2118 entry->qp.ref_count--;
2119
2120 is_local = entry->qp.flags & VMCI_QPFLAG_LOCAL;
2121
2122 if (context_id != VMCI_HOST_CONTEXT_ID) {
2123 bool headers_mapped;
2124
2125 /*
2126 * Pre NOVMVM vmx'en may detach from a queue pair
2127 * before setting the page store, and in that case
2128 * there is no user memory to detach from. Also, more
2129 * recent VMX'en may detach from a queue pair in the
2130 * quiesced state.
2131 */
2132
2133 qp_acquire_queue_mutex(entry->produce_q);
2134 headers_mapped = entry->produce_q->q_header ||
2135 entry->consume_q->q_header;
2136 if (QPBROKERSTATE_HAS_MEM(entry)) {
2137 result =
2138 qp_host_unmap_queues(INVALID_VMCI_GUEST_MEM_ID,
2139 entry->produce_q,
2140 entry->consume_q);
2141 if (result < VMCI_SUCCESS)
2142 pr_warn("Failed to unmap queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
2143 handle.context, handle.resource,
2144 result);
2145
2146 qp_host_unregister_user_memory(entry->produce_q,
2147 entry->consume_q);
2148
2149 }
2150
2151 if (!headers_mapped)
2152 qp_reset_saved_headers(entry);
2153
2154 qp_release_queue_mutex(entry->produce_q);
2155
2156 if (!headers_mapped && entry->wakeup_cb)
2157 entry->wakeup_cb(entry->client_data);
2158
2159 } else {
2160 if (entry->wakeup_cb) {
2161 entry->wakeup_cb = NULL;
2162 entry->client_data = NULL;
2163 }
2164 }
2165
2166 if (entry->qp.ref_count == 0) {
2167 qp_list_remove_entry(&qp_broker_list, &entry->qp);
2168
2169 if (is_local)
2170 kfree(entry->local_mem);
2171
2172 qp_cleanup_queue_mutex(entry->produce_q, entry->consume_q);
2173 qp_host_free_queue(entry->produce_q, entry->qp.produce_size);
2174 qp_host_free_queue(entry->consume_q, entry->qp.consume_size);
2175 /* Unlink from resource hash table and free callback */
2176 vmci_resource_remove(&entry->resource);
2177
2178 kfree(entry);
2179
2180 vmci_ctx_qp_destroy(context, handle);
2181 } else {
2182 qp_notify_peer(false, handle, context_id, peer_id);
2183 if (context_id == VMCI_HOST_CONTEXT_ID &&
2184 QPBROKERSTATE_HAS_MEM(entry)) {
2185 entry->state = VMCIQPB_SHUTDOWN_MEM;
2186 } else {
2187 entry->state = VMCIQPB_SHUTDOWN_NO_MEM;
2188 }
2189
2190 if (!is_local)
2191 vmci_ctx_qp_destroy(context, handle);
2192
2193 }
2194 result = VMCI_SUCCESS;
2195 out:
2196 mutex_unlock(&qp_broker_list.mutex);
2197 return result;
2198}
2199
2200/*
2201 * Establishes the necessary mappings for a queue pair given a
2202 * reference to the queue pair guest memory. This is usually
2203 * called when a guest is unquiesced and the VMX is allowed to
2204 * map guest memory once again.
2205 */
2206int vmci_qp_broker_map(struct vmci_handle handle,
2207 struct vmci_ctx *context,
2208 u64 guest_mem)
2209{
2210 struct qp_broker_entry *entry;
2211 const u32 context_id = vmci_ctx_get_id(context);
2212 int result;
2213
2214 if (vmci_handle_is_invalid(handle) || !context ||
2215 context_id == VMCI_INVALID_ID)
2216 return VMCI_ERROR_INVALID_ARGS;
2217
2218 mutex_lock(&qp_broker_list.mutex);
2219
2220 if (!vmci_ctx_qp_exists(context, handle)) {
2221 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2222 context_id, handle.context, handle.resource);
2223 result = VMCI_ERROR_NOT_FOUND;
2224 goto out;
2225 }
2226
2227 entry = qp_broker_handle_to_entry(handle);
2228 if (!entry) {
2229 pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
2230 context_id, handle.context, handle.resource);
2231 result = VMCI_ERROR_NOT_FOUND;
2232 goto out;
2233 }
2234
2235 if (context_id != entry->create_id && context_id != entry->attach_id) {
2236 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2237 goto out;
2238 }
2239
2240 result = VMCI_SUCCESS;
2241
2242 if (context_id != VMCI_HOST_CONTEXT_ID) {
2243 struct vmci_qp_page_store page_store;
2244
2245 page_store.pages = guest_mem;
2246 page_store.len = QPE_NUM_PAGES(entry->qp);
2247
2248 qp_acquire_queue_mutex(entry->produce_q);
2249 qp_reset_saved_headers(entry);
2250 result =
2251 qp_host_register_user_memory(&page_store,
2252 entry->produce_q,
2253 entry->consume_q);
2254 qp_release_queue_mutex(entry->produce_q);
2255 if (result == VMCI_SUCCESS) {
2256 /* Move state from *_NO_MEM to *_MEM */
2257
2258 entry->state++;
2259
2260 if (entry->wakeup_cb)
2261 entry->wakeup_cb(entry->client_data);
2262 }
2263 }
2264
2265 out:
2266 mutex_unlock(&qp_broker_list.mutex);
2267 return result;
2268}
2269
2270/*
2271 * Saves a snapshot of the queue headers for the given QP broker
2272 * entry. Should be used when guest memory is unmapped.
2273 * Results:
2274 * VMCI_SUCCESS on success, appropriate error code if guest memory
2275 * can't be accessed..
2276 */
2277static int qp_save_headers(struct qp_broker_entry *entry)
2278{
2279 int result;
2280
2281 if (entry->produce_q->saved_header != NULL &&
2282 entry->consume_q->saved_header != NULL) {
2283 /*
2284 * If the headers have already been saved, we don't need to do
2285 * it again, and we don't want to map in the headers
2286 * unnecessarily.
2287 */
2288
2289 return VMCI_SUCCESS;
2290 }
2291
2292 if (NULL == entry->produce_q->q_header ||
2293 NULL == entry->consume_q->q_header) {
2294 result = qp_host_map_queues(entry->produce_q, entry->consume_q);
2295 if (result < VMCI_SUCCESS)
2296 return result;
2297 }
2298
2299 memcpy(&entry->saved_produce_q, entry->produce_q->q_header,
2300 sizeof(entry->saved_produce_q));
2301 entry->produce_q->saved_header = &entry->saved_produce_q;
2302 memcpy(&entry->saved_consume_q, entry->consume_q->q_header,
2303 sizeof(entry->saved_consume_q));
2304 entry->consume_q->saved_header = &entry->saved_consume_q;
2305
2306 return VMCI_SUCCESS;
2307}
2308
2309/*
2310 * Removes all references to the guest memory of a given queue pair, and
2311 * will move the queue pair from state *_MEM to *_NO_MEM. It is usually
2312 * called when a VM is being quiesced where access to guest memory should
2313 * avoided.
2314 */
2315int vmci_qp_broker_unmap(struct vmci_handle handle,
2316 struct vmci_ctx *context,
2317 u32 gid)
2318{
2319 struct qp_broker_entry *entry;
2320 const u32 context_id = vmci_ctx_get_id(context);
2321 int result;
2322
2323 if (vmci_handle_is_invalid(handle) || !context ||
2324 context_id == VMCI_INVALID_ID)
2325 return VMCI_ERROR_INVALID_ARGS;
2326
2327 mutex_lock(&qp_broker_list.mutex);
2328
2329 if (!vmci_ctx_qp_exists(context, handle)) {
2330 pr_devel("Context (ID=0x%x) not attached to queue pair (handle=0x%x:0x%x)\n",
2331 context_id, handle.context, handle.resource);
2332 result = VMCI_ERROR_NOT_FOUND;
2333 goto out;
2334 }
2335
2336 entry = qp_broker_handle_to_entry(handle);
2337 if (!entry) {
2338 pr_devel("Context (ID=0x%x) reports being attached to queue pair (handle=0x%x:0x%x) that isn't present in broker\n",
2339 context_id, handle.context, handle.resource);
2340 result = VMCI_ERROR_NOT_FOUND;
2341 goto out;
2342 }
2343
2344 if (context_id != entry->create_id && context_id != entry->attach_id) {
2345 result = VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2346 goto out;
2347 }
2348
2349 if (context_id != VMCI_HOST_CONTEXT_ID) {
2350 qp_acquire_queue_mutex(entry->produce_q);
2351 result = qp_save_headers(entry);
2352 if (result < VMCI_SUCCESS)
2353 pr_warn("Failed to save queue headers for queue pair (handle=0x%x:0x%x,result=%d)\n",
2354 handle.context, handle.resource, result);
2355
2356 qp_host_unmap_queues(gid, entry->produce_q, entry->consume_q);
2357
2358 /*
2359 * On hosted, when we unmap queue pairs, the VMX will also
2360 * unmap the guest memory, so we invalidate the previously
2361 * registered memory. If the queue pair is mapped again at a
2362 * later point in time, we will need to reregister the user
2363 * memory with a possibly new user VA.
2364 */
2365 qp_host_unregister_user_memory(entry->produce_q,
2366 entry->consume_q);
2367
2368 /*
2369 * Move state from *_MEM to *_NO_MEM.
2370 */
2371 entry->state--;
2372
2373 qp_release_queue_mutex(entry->produce_q);
2374 }
2375
2376 result = VMCI_SUCCESS;
2377
2378 out:
2379 mutex_unlock(&qp_broker_list.mutex);
2380 return result;
2381}
2382
2383/*
2384 * Destroys all guest queue pair endpoints. If active guest queue
2385 * pairs still exist, hypercalls to attempt detach from these
2386 * queue pairs will be made. Any failure to detach is silently
2387 * ignored.
2388 */
2389void vmci_qp_guest_endpoints_exit(void)
2390{
2391 struct qp_entry *entry;
2392 struct qp_guest_endpoint *ep;
2393
2394 mutex_lock(&qp_guest_endpoints.mutex);
2395
2396 while ((entry = qp_list_get_head(&qp_guest_endpoints))) {
2397 ep = (struct qp_guest_endpoint *)entry;
2398
2399 /* Don't make a hypercall for local queue_pairs. */
2400 if (!(entry->flags & VMCI_QPFLAG_LOCAL))
2401 qp_detatch_hypercall(entry->handle);
2402
2403 /* We cannot fail the exit, so let's reset ref_count. */
2404 entry->ref_count = 0;
2405 qp_list_remove_entry(&qp_guest_endpoints, entry);
2406
2407 qp_guest_endpoint_destroy(ep);
2408 }
2409
2410 mutex_unlock(&qp_guest_endpoints.mutex);
2411}
2412
2413/*
2414 * Helper routine that will lock the queue pair before subsequent
2415 * operations.
2416 * Note: Non-blocking on the host side is currently only implemented in ESX.
2417 * Since non-blocking isn't yet implemented on the host personality we
2418 * have no reason to acquire a spin lock. So to avoid the use of an
2419 * unnecessary lock only acquire the mutex if we can block.
2420 */
2421static void qp_lock(const struct vmci_qp *qpair)
2422{
2423 qp_acquire_queue_mutex(qpair->produce_q);
2424}
2425
2426/*
2427 * Helper routine that unlocks the queue pair after calling
2428 * qp_lock.
2429 */
2430static void qp_unlock(const struct vmci_qp *qpair)
2431{
2432 qp_release_queue_mutex(qpair->produce_q);
2433}
2434
2435/*
2436 * The queue headers may not be mapped at all times. If a queue is
2437 * currently not mapped, it will be attempted to do so.
2438 */
2439static int qp_map_queue_headers(struct vmci_queue *produce_q,
2440 struct vmci_queue *consume_q)
2441{
2442 int result;
2443
2444 if (NULL == produce_q->q_header || NULL == consume_q->q_header) {
2445 result = qp_host_map_queues(produce_q, consume_q);
2446 if (result < VMCI_SUCCESS)
2447 return (produce_q->saved_header &&
2448 consume_q->saved_header) ?
2449 VMCI_ERROR_QUEUEPAIR_NOT_READY :
2450 VMCI_ERROR_QUEUEPAIR_NOTATTACHED;
2451 }
2452
2453 return VMCI_SUCCESS;
2454}
2455
2456/*
2457 * Helper routine that will retrieve the produce and consume
2458 * headers of a given queue pair. If the guest memory of the
2459 * queue pair is currently not available, the saved queue headers
2460 * will be returned, if these are available.
2461 */
2462static int qp_get_queue_headers(const struct vmci_qp *qpair,
2463 struct vmci_queue_header **produce_q_header,
2464 struct vmci_queue_header **consume_q_header)
2465{
2466 int result;
2467
2468 result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q);
2469 if (result == VMCI_SUCCESS) {
2470 *produce_q_header = qpair->produce_q->q_header;
2471 *consume_q_header = qpair->consume_q->q_header;
2472 } else if (qpair->produce_q->saved_header &&
2473 qpair->consume_q->saved_header) {
2474 *produce_q_header = qpair->produce_q->saved_header;
2475 *consume_q_header = qpair->consume_q->saved_header;
2476 result = VMCI_SUCCESS;
2477 }
2478
2479 return result;
2480}
2481
2482/*
2483 * Callback from VMCI queue pair broker indicating that a queue
2484 * pair that was previously not ready, now either is ready or
2485 * gone forever.
2486 */
2487static int qp_wakeup_cb(void *client_data)
2488{
2489 struct vmci_qp *qpair = (struct vmci_qp *)client_data;
2490
2491 qp_lock(qpair);
2492 while (qpair->blocked > 0) {
2493 qpair->blocked--;
2494 qpair->generation++;
2495 wake_up(&qpair->event);
2496 }
2497 qp_unlock(qpair);
2498
2499 return VMCI_SUCCESS;
2500}
2501
2502/*
2503 * Makes the calling thread wait for the queue pair to become
2504 * ready for host side access. Returns true when thread is
2505 * woken up after queue pair state change, false otherwise.
2506 */
2507static bool qp_wait_for_ready_queue(struct vmci_qp *qpair)
2508{
2509 unsigned int generation;
2510
2511 qpair->blocked++;
2512 generation = qpair->generation;
2513 qp_unlock(qpair);
2514 wait_event(qpair->event, generation != qpair->generation);
2515 qp_lock(qpair);
2516
2517 return true;
2518}
2519
2520/*
2521 * Enqueues a given buffer to the produce queue using the provided
2522 * function. As many bytes as possible (space available in the queue)
2523 * are enqueued. Assumes the queue->mutex has been acquired. Returns
2524 * VMCI_ERROR_QUEUEPAIR_NOSPACE if no space was available to enqueue
2525 * data, VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the
2526 * queue (as defined by the queue size), VMCI_ERROR_INVALID_ARGS, if
2527 * an error occured when accessing the buffer,
2528 * VMCI_ERROR_QUEUEPAIR_NOTATTACHED, if the queue pair pages aren't
2529 * available. Otherwise, the number of bytes written to the queue is
2530 * returned. Updates the tail pointer of the produce queue.
2531 */
2532static ssize_t qp_enqueue_locked(struct vmci_queue *produce_q,
2533 struct vmci_queue *consume_q,
2534 const u64 produce_q_size,
2535 struct iov_iter *from)
2536{
2537 s64 free_space;
2538 u64 tail;
2539 size_t buf_size = iov_iter_count(from);
2540 size_t written;
2541 ssize_t result;
2542
2543 result = qp_map_queue_headers(produce_q, consume_q);
2544 if (unlikely(result != VMCI_SUCCESS))
2545 return result;
2546
2547 free_space = vmci_q_header_free_space(produce_q->q_header,
2548 consume_q->q_header,
2549 produce_q_size);
2550 if (free_space == 0)
2551 return VMCI_ERROR_QUEUEPAIR_NOSPACE;
2552
2553 if (free_space < VMCI_SUCCESS)
2554 return (ssize_t) free_space;
2555
2556 written = (size_t) (free_space > buf_size ? buf_size : free_space);
2557 tail = vmci_q_header_producer_tail(produce_q->q_header);
2558 if (likely(tail + written < produce_q_size)) {
2559 result = qp_memcpy_to_queue_iter(produce_q, tail, from, written);
2560 } else {
2561 /* Tail pointer wraps around. */
2562
2563 const size_t tmp = (size_t) (produce_q_size - tail);
2564
2565 result = qp_memcpy_to_queue_iter(produce_q, tail, from, tmp);
2566 if (result >= VMCI_SUCCESS)
2567 result = qp_memcpy_to_queue_iter(produce_q, 0, from,
2568 written - tmp);
2569 }
2570
2571 if (result < VMCI_SUCCESS)
2572 return result;
2573
2574 vmci_q_header_add_producer_tail(produce_q->q_header, written,
2575 produce_q_size);
2576 return written;
2577}
2578
2579/*
2580 * Dequeues data (if available) from the given consume queue. Writes data
2581 * to the user provided buffer using the provided function.
2582 * Assumes the queue->mutex has been acquired.
2583 * Results:
2584 * VMCI_ERROR_QUEUEPAIR_NODATA if no data was available to dequeue.
2585 * VMCI_ERROR_INVALID_SIZE, if any queue pointer is outside the queue
2586 * (as defined by the queue size).
2587 * VMCI_ERROR_INVALID_ARGS, if an error occured when accessing the buffer.
2588 * Otherwise the number of bytes dequeued is returned.
2589 * Side effects:
2590 * Updates the head pointer of the consume queue.
2591 */
2592static ssize_t qp_dequeue_locked(struct vmci_queue *produce_q,
2593 struct vmci_queue *consume_q,
2594 const u64 consume_q_size,
2595 struct iov_iter *to,
2596 bool update_consumer)
2597{
2598 size_t buf_size = iov_iter_count(to);
2599 s64 buf_ready;
2600 u64 head;
2601 size_t read;
2602 ssize_t result;
2603
2604 result = qp_map_queue_headers(produce_q, consume_q);
2605 if (unlikely(result != VMCI_SUCCESS))
2606 return result;
2607
2608 buf_ready = vmci_q_header_buf_ready(consume_q->q_header,
2609 produce_q->q_header,
2610 consume_q_size);
2611 if (buf_ready == 0)
2612 return VMCI_ERROR_QUEUEPAIR_NODATA;
2613
2614 if (buf_ready < VMCI_SUCCESS)
2615 return (ssize_t) buf_ready;
2616
2617 read = (size_t) (buf_ready > buf_size ? buf_size : buf_ready);
2618 head = vmci_q_header_consumer_head(produce_q->q_header);
2619 if (likely(head + read < consume_q_size)) {
2620 result = qp_memcpy_from_queue_iter(to, consume_q, head, read);
2621 } else {
2622 /* Head pointer wraps around. */
2623
2624 const size_t tmp = (size_t) (consume_q_size - head);
2625
2626 result = qp_memcpy_from_queue_iter(to, consume_q, head, tmp);
2627 if (result >= VMCI_SUCCESS)
2628 result = qp_memcpy_from_queue_iter(to, consume_q, 0,
2629 read - tmp);
2630
2631 }
2632
2633 if (result < VMCI_SUCCESS)
2634 return result;
2635
2636 if (update_consumer)
2637 vmci_q_header_add_consumer_head(produce_q->q_header,
2638 read, consume_q_size);
2639
2640 return read;
2641}
2642
2643/*
2644 * vmci_qpair_alloc() - Allocates a queue pair.
2645 * @qpair: Pointer for the new vmci_qp struct.
2646 * @handle: Handle to track the resource.
2647 * @produce_qsize: Desired size of the producer queue.
2648 * @consume_qsize: Desired size of the consumer queue.
2649 * @peer: ContextID of the peer.
2650 * @flags: VMCI flags.
2651 * @priv_flags: VMCI priviledge flags.
2652 *
2653 * This is the client interface for allocating the memory for a
2654 * vmci_qp structure and then attaching to the underlying
2655 * queue. If an error occurs allocating the memory for the
2656 * vmci_qp structure no attempt is made to attach. If an
2657 * error occurs attaching, then the structure is freed.
2658 */
2659int vmci_qpair_alloc(struct vmci_qp **qpair,
2660 struct vmci_handle *handle,
2661 u64 produce_qsize,
2662 u64 consume_qsize,
2663 u32 peer,
2664 u32 flags,
2665 u32 priv_flags)
2666{
2667 struct vmci_qp *my_qpair;
2668 int retval;
2669 struct vmci_handle src = VMCI_INVALID_HANDLE;
2670 struct vmci_handle dst = vmci_make_handle(peer, VMCI_INVALID_ID);
2671 enum vmci_route route;
2672 vmci_event_release_cb wakeup_cb;
2673 void *client_data;
2674
2675 /*
2676 * Restrict the size of a queuepair. The device already
2677 * enforces a limit on the total amount of memory that can be
2678 * allocated to queuepairs for a guest. However, we try to
2679 * allocate this memory before we make the queuepair
2680 * allocation hypercall. On Linux, we allocate each page
2681 * separately, which means rather than fail, the guest will
2682 * thrash while it tries to allocate, and will become
2683 * increasingly unresponsive to the point where it appears to
2684 * be hung. So we place a limit on the size of an individual
2685 * queuepair here, and leave the device to enforce the
2686 * restriction on total queuepair memory. (Note that this
2687 * doesn't prevent all cases; a user with only this much
2688 * physical memory could still get into trouble.) The error
2689 * used by the device is NO_RESOURCES, so use that here too.
2690 */
2691
2692 if (produce_qsize + consume_qsize < max(produce_qsize, consume_qsize) ||
2693 produce_qsize + consume_qsize > VMCI_MAX_GUEST_QP_MEMORY)
2694 return VMCI_ERROR_NO_RESOURCES;
2695
2696 retval = vmci_route(&src, &dst, false, &route);
2697 if (retval < VMCI_SUCCESS)
2698 route = vmci_guest_code_active() ?
2699 VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST;
2700
2701 if (flags & (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED)) {
2702 pr_devel("NONBLOCK OR PINNED set");
2703 return VMCI_ERROR_INVALID_ARGS;
2704 }
2705
2706 my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL);
2707 if (!my_qpair)
2708 return VMCI_ERROR_NO_MEM;
2709
2710 my_qpair->produce_q_size = produce_qsize;
2711 my_qpair->consume_q_size = consume_qsize;
2712 my_qpair->peer = peer;
2713 my_qpair->flags = flags;
2714 my_qpair->priv_flags = priv_flags;
2715
2716 wakeup_cb = NULL;
2717 client_data = NULL;
2718
2719 if (VMCI_ROUTE_AS_HOST == route) {
2720 my_qpair->guest_endpoint = false;
2721 if (!(flags & VMCI_QPFLAG_LOCAL)) {
2722 my_qpair->blocked = 0;
2723 my_qpair->generation = 0;
2724 init_waitqueue_head(&my_qpair->event);
2725 wakeup_cb = qp_wakeup_cb;
2726 client_data = (void *)my_qpair;
2727 }
2728 } else {
2729 my_qpair->guest_endpoint = true;
2730 }
2731
2732 retval = vmci_qp_alloc(handle,
2733 &my_qpair->produce_q,
2734 my_qpair->produce_q_size,
2735 &my_qpair->consume_q,
2736 my_qpair->consume_q_size,
2737 my_qpair->peer,
2738 my_qpair->flags,
2739 my_qpair->priv_flags,
2740 my_qpair->guest_endpoint,
2741 wakeup_cb, client_data);
2742
2743 if (retval < VMCI_SUCCESS) {
2744 kfree(my_qpair);
2745 return retval;
2746 }
2747
2748 *qpair = my_qpair;
2749 my_qpair->handle = *handle;
2750
2751 return retval;
2752}
2753EXPORT_SYMBOL_GPL(vmci_qpair_alloc);
2754
2755/*
2756 * vmci_qpair_detach() - Detatches the client from a queue pair.
2757 * @qpair: Reference of a pointer to the qpair struct.
2758 *
2759 * This is the client interface for detaching from a VMCIQPair.
2760 * Note that this routine will free the memory allocated for the
2761 * vmci_qp structure too.
2762 */
2763int vmci_qpair_detach(struct vmci_qp **qpair)
2764{
2765 int result;
2766 struct vmci_qp *old_qpair;
2767
2768 if (!qpair || !(*qpair))
2769 return VMCI_ERROR_INVALID_ARGS;
2770
2771 old_qpair = *qpair;
2772 result = qp_detatch(old_qpair->handle, old_qpair->guest_endpoint);
2773
2774 /*
2775 * The guest can fail to detach for a number of reasons, and
2776 * if it does so, it will cleanup the entry (if there is one).
2777 * The host can fail too, but it won't cleanup the entry
2778 * immediately, it will do that later when the context is
2779 * freed. Either way, we need to release the qpair struct
2780 * here; there isn't much the caller can do, and we don't want
2781 * to leak.
2782 */
2783
2784 memset(old_qpair, 0, sizeof(*old_qpair));
2785 old_qpair->handle = VMCI_INVALID_HANDLE;
2786 old_qpair->peer = VMCI_INVALID_ID;
2787 kfree(old_qpair);
2788 *qpair = NULL;
2789
2790 return result;
2791}
2792EXPORT_SYMBOL_GPL(vmci_qpair_detach);
2793
2794/*
2795 * vmci_qpair_get_produce_indexes() - Retrieves the indexes of the producer.
2796 * @qpair: Pointer to the queue pair struct.
2797 * @producer_tail: Reference used for storing producer tail index.
2798 * @consumer_head: Reference used for storing the consumer head index.
2799 *
2800 * This is the client interface for getting the current indexes of the
2801 * QPair from the point of the view of the caller as the producer.
2802 */
2803int vmci_qpair_get_produce_indexes(const struct vmci_qp *qpair,
2804 u64 *producer_tail,
2805 u64 *consumer_head)
2806{
2807 struct vmci_queue_header *produce_q_header;
2808 struct vmci_queue_header *consume_q_header;
2809 int result;
2810
2811 if (!qpair)
2812 return VMCI_ERROR_INVALID_ARGS;
2813
2814 qp_lock(qpair);
2815 result =
2816 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2817 if (result == VMCI_SUCCESS)
2818 vmci_q_header_get_pointers(produce_q_header, consume_q_header,
2819 producer_tail, consumer_head);
2820 qp_unlock(qpair);
2821
2822 if (result == VMCI_SUCCESS &&
2823 ((producer_tail && *producer_tail >= qpair->produce_q_size) ||
2824 (consumer_head && *consumer_head >= qpair->produce_q_size)))
2825 return VMCI_ERROR_INVALID_SIZE;
2826
2827 return result;
2828}
2829EXPORT_SYMBOL_GPL(vmci_qpair_get_produce_indexes);
2830
2831/*
2832 * vmci_qpair_get_consume_indexes() - Retrieves the indexes of the consumer.
2833 * @qpair: Pointer to the queue pair struct.
2834 * @consumer_tail: Reference used for storing consumer tail index.
2835 * @producer_head: Reference used for storing the producer head index.
2836 *
2837 * This is the client interface for getting the current indexes of the
2838 * QPair from the point of the view of the caller as the consumer.
2839 */
2840int vmci_qpair_get_consume_indexes(const struct vmci_qp *qpair,
2841 u64 *consumer_tail,
2842 u64 *producer_head)
2843{
2844 struct vmci_queue_header *produce_q_header;
2845 struct vmci_queue_header *consume_q_header;
2846 int result;
2847
2848 if (!qpair)
2849 return VMCI_ERROR_INVALID_ARGS;
2850
2851 qp_lock(qpair);
2852 result =
2853 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2854 if (result == VMCI_SUCCESS)
2855 vmci_q_header_get_pointers(consume_q_header, produce_q_header,
2856 consumer_tail, producer_head);
2857 qp_unlock(qpair);
2858
2859 if (result == VMCI_SUCCESS &&
2860 ((consumer_tail && *consumer_tail >= qpair->consume_q_size) ||
2861 (producer_head && *producer_head >= qpair->consume_q_size)))
2862 return VMCI_ERROR_INVALID_SIZE;
2863
2864 return result;
2865}
2866EXPORT_SYMBOL_GPL(vmci_qpair_get_consume_indexes);
2867
2868/*
2869 * vmci_qpair_produce_free_space() - Retrieves free space in producer queue.
2870 * @qpair: Pointer to the queue pair struct.
2871 *
2872 * This is the client interface for getting the amount of free
2873 * space in the QPair from the point of the view of the caller as
2874 * the producer which is the common case. Returns < 0 if err, else
2875 * available bytes into which data can be enqueued if > 0.
2876 */
2877s64 vmci_qpair_produce_free_space(const struct vmci_qp *qpair)
2878{
2879 struct vmci_queue_header *produce_q_header;
2880 struct vmci_queue_header *consume_q_header;
2881 s64 result;
2882
2883 if (!qpair)
2884 return VMCI_ERROR_INVALID_ARGS;
2885
2886 qp_lock(qpair);
2887 result =
2888 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2889 if (result == VMCI_SUCCESS)
2890 result = vmci_q_header_free_space(produce_q_header,
2891 consume_q_header,
2892 qpair->produce_q_size);
2893 else
2894 result = 0;
2895
2896 qp_unlock(qpair);
2897
2898 return result;
2899}
2900EXPORT_SYMBOL_GPL(vmci_qpair_produce_free_space);
2901
2902/*
2903 * vmci_qpair_consume_free_space() - Retrieves free space in consumer queue.
2904 * @qpair: Pointer to the queue pair struct.
2905 *
2906 * This is the client interface for getting the amount of free
2907 * space in the QPair from the point of the view of the caller as
2908 * the consumer which is not the common case. Returns < 0 if err, else
2909 * available bytes into which data can be enqueued if > 0.
2910 */
2911s64 vmci_qpair_consume_free_space(const struct vmci_qp *qpair)
2912{
2913 struct vmci_queue_header *produce_q_header;
2914 struct vmci_queue_header *consume_q_header;
2915 s64 result;
2916
2917 if (!qpair)
2918 return VMCI_ERROR_INVALID_ARGS;
2919
2920 qp_lock(qpair);
2921 result =
2922 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2923 if (result == VMCI_SUCCESS)
2924 result = vmci_q_header_free_space(consume_q_header,
2925 produce_q_header,
2926 qpair->consume_q_size);
2927 else
2928 result = 0;
2929
2930 qp_unlock(qpair);
2931
2932 return result;
2933}
2934EXPORT_SYMBOL_GPL(vmci_qpair_consume_free_space);
2935
2936/*
2937 * vmci_qpair_produce_buf_ready() - Gets bytes ready to read from
2938 * producer queue.
2939 * @qpair: Pointer to the queue pair struct.
2940 *
2941 * This is the client interface for getting the amount of
2942 * enqueued data in the QPair from the point of the view of the
2943 * caller as the producer which is not the common case. Returns < 0 if err,
2944 * else available bytes that may be read.
2945 */
2946s64 vmci_qpair_produce_buf_ready(const struct vmci_qp *qpair)
2947{
2948 struct vmci_queue_header *produce_q_header;
2949 struct vmci_queue_header *consume_q_header;
2950 s64 result;
2951
2952 if (!qpair)
2953 return VMCI_ERROR_INVALID_ARGS;
2954
2955 qp_lock(qpair);
2956 result =
2957 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2958 if (result == VMCI_SUCCESS)
2959 result = vmci_q_header_buf_ready(produce_q_header,
2960 consume_q_header,
2961 qpair->produce_q_size);
2962 else
2963 result = 0;
2964
2965 qp_unlock(qpair);
2966
2967 return result;
2968}
2969EXPORT_SYMBOL_GPL(vmci_qpair_produce_buf_ready);
2970
2971/*
2972 * vmci_qpair_consume_buf_ready() - Gets bytes ready to read from
2973 * consumer queue.
2974 * @qpair: Pointer to the queue pair struct.
2975 *
2976 * This is the client interface for getting the amount of
2977 * enqueued data in the QPair from the point of the view of the
2978 * caller as the consumer which is the normal case. Returns < 0 if err,
2979 * else available bytes that may be read.
2980 */
2981s64 vmci_qpair_consume_buf_ready(const struct vmci_qp *qpair)
2982{
2983 struct vmci_queue_header *produce_q_header;
2984 struct vmci_queue_header *consume_q_header;
2985 s64 result;
2986
2987 if (!qpair)
2988 return VMCI_ERROR_INVALID_ARGS;
2989
2990 qp_lock(qpair);
2991 result =
2992 qp_get_queue_headers(qpair, &produce_q_header, &consume_q_header);
2993 if (result == VMCI_SUCCESS)
2994 result = vmci_q_header_buf_ready(consume_q_header,
2995 produce_q_header,
2996 qpair->consume_q_size);
2997 else
2998 result = 0;
2999
3000 qp_unlock(qpair);
3001
3002 return result;
3003}
3004EXPORT_SYMBOL_GPL(vmci_qpair_consume_buf_ready);
3005
3006/*
3007 * vmci_qpair_enqueue() - Throw data on the queue.
3008 * @qpair: Pointer to the queue pair struct.
3009 * @buf: Pointer to buffer containing data
3010 * @buf_size: Length of buffer.
3011 * @buf_type: Buffer type (Unused).
3012 *
3013 * This is the client interface for enqueueing data into the queue.
3014 * Returns number of bytes enqueued or < 0 on error.
3015 */
3016ssize_t vmci_qpair_enqueue(struct vmci_qp *qpair,
3017 const void *buf,
3018 size_t buf_size,
3019 int buf_type)
3020{
3021 ssize_t result;
3022 struct iov_iter from;
3023 struct kvec v = {.iov_base = (void *)buf, .iov_len = buf_size};
3024
3025 if (!qpair || !buf)
3026 return VMCI_ERROR_INVALID_ARGS;
3027
3028 iov_iter_kvec(&from, WRITE, &v, 1, buf_size);
3029
3030 qp_lock(qpair);
3031
3032 do {
3033 result = qp_enqueue_locked(qpair->produce_q,
3034 qpair->consume_q,
3035 qpair->produce_q_size,
3036 &from);
3037
3038 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3039 !qp_wait_for_ready_queue(qpair))
3040 result = VMCI_ERROR_WOULD_BLOCK;
3041
3042 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3043
3044 qp_unlock(qpair);
3045
3046 return result;
3047}
3048EXPORT_SYMBOL_GPL(vmci_qpair_enqueue);
3049
3050/*
3051 * vmci_qpair_dequeue() - Get data from the queue.
3052 * @qpair: Pointer to the queue pair struct.
3053 * @buf: Pointer to buffer for the data
3054 * @buf_size: Length of buffer.
3055 * @buf_type: Buffer type (Unused).
3056 *
3057 * This is the client interface for dequeueing data from the queue.
3058 * Returns number of bytes dequeued or < 0 on error.
3059 */
3060ssize_t vmci_qpair_dequeue(struct vmci_qp *qpair,
3061 void *buf,
3062 size_t buf_size,
3063 int buf_type)
3064{
3065 ssize_t result;
3066 struct iov_iter to;
3067 struct kvec v = {.iov_base = buf, .iov_len = buf_size};
3068
3069 if (!qpair || !buf)
3070 return VMCI_ERROR_INVALID_ARGS;
3071
3072 iov_iter_kvec(&to, READ, &v, 1, buf_size);
3073
3074 qp_lock(qpair);
3075
3076 do {
3077 result = qp_dequeue_locked(qpair->produce_q,
3078 qpair->consume_q,
3079 qpair->consume_q_size,
3080 &to, true);
3081
3082 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3083 !qp_wait_for_ready_queue(qpair))
3084 result = VMCI_ERROR_WOULD_BLOCK;
3085
3086 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3087
3088 qp_unlock(qpair);
3089
3090 return result;
3091}
3092EXPORT_SYMBOL_GPL(vmci_qpair_dequeue);
3093
3094/*
3095 * vmci_qpair_peek() - Peek at the data in the queue.
3096 * @qpair: Pointer to the queue pair struct.
3097 * @buf: Pointer to buffer for the data
3098 * @buf_size: Length of buffer.
3099 * @buf_type: Buffer type (Unused on Linux).
3100 *
3101 * This is the client interface for peeking into a queue. (I.e.,
3102 * copy data from the queue without updating the head pointer.)
3103 * Returns number of bytes dequeued or < 0 on error.
3104 */
3105ssize_t vmci_qpair_peek(struct vmci_qp *qpair,
3106 void *buf,
3107 size_t buf_size,
3108 int buf_type)
3109{
3110 struct iov_iter to;
3111 struct kvec v = {.iov_base = buf, .iov_len = buf_size};
3112 ssize_t result;
3113
3114 if (!qpair || !buf)
3115 return VMCI_ERROR_INVALID_ARGS;
3116
3117 iov_iter_kvec(&to, READ, &v, 1, buf_size);
3118
3119 qp_lock(qpair);
3120
3121 do {
3122 result = qp_dequeue_locked(qpair->produce_q,
3123 qpair->consume_q,
3124 qpair->consume_q_size,
3125 &to, false);
3126
3127 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3128 !qp_wait_for_ready_queue(qpair))
3129 result = VMCI_ERROR_WOULD_BLOCK;
3130
3131 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3132
3133 qp_unlock(qpair);
3134
3135 return result;
3136}
3137EXPORT_SYMBOL_GPL(vmci_qpair_peek);
3138
3139/*
3140 * vmci_qpair_enquev() - Throw data on the queue using iov.
3141 * @qpair: Pointer to the queue pair struct.
3142 * @iov: Pointer to buffer containing data
3143 * @iov_size: Length of buffer.
3144 * @buf_type: Buffer type (Unused).
3145 *
3146 * This is the client interface for enqueueing data into the queue.
3147 * This function uses IO vectors to handle the work. Returns number
3148 * of bytes enqueued or < 0 on error.
3149 */
3150ssize_t vmci_qpair_enquev(struct vmci_qp *qpair,
3151 struct msghdr *msg,
3152 size_t iov_size,
3153 int buf_type)
3154{
3155 ssize_t result;
3156
3157 if (!qpair)
3158 return VMCI_ERROR_INVALID_ARGS;
3159
3160 qp_lock(qpair);
3161
3162 do {
3163 result = qp_enqueue_locked(qpair->produce_q,
3164 qpair->consume_q,
3165 qpair->produce_q_size,
3166 &msg->msg_iter);
3167
3168 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3169 !qp_wait_for_ready_queue(qpair))
3170 result = VMCI_ERROR_WOULD_BLOCK;
3171
3172 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3173
3174 qp_unlock(qpair);
3175
3176 return result;
3177}
3178EXPORT_SYMBOL_GPL(vmci_qpair_enquev);
3179
3180/*
3181 * vmci_qpair_dequev() - Get data from the queue using iov.
3182 * @qpair: Pointer to the queue pair struct.
3183 * @iov: Pointer to buffer for the data
3184 * @iov_size: Length of buffer.
3185 * @buf_type: Buffer type (Unused).
3186 *
3187 * This is the client interface for dequeueing data from the queue.
3188 * This function uses IO vectors to handle the work. Returns number
3189 * of bytes dequeued or < 0 on error.
3190 */
3191ssize_t vmci_qpair_dequev(struct vmci_qp *qpair,
3192 struct msghdr *msg,
3193 size_t iov_size,
3194 int buf_type)
3195{
3196 ssize_t result;
3197
3198 if (!qpair)
3199 return VMCI_ERROR_INVALID_ARGS;
3200
3201 qp_lock(qpair);
3202
3203 do {
3204 result = qp_dequeue_locked(qpair->produce_q,
3205 qpair->consume_q,
3206 qpair->consume_q_size,
3207 &msg->msg_iter, true);
3208
3209 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3210 !qp_wait_for_ready_queue(qpair))
3211 result = VMCI_ERROR_WOULD_BLOCK;
3212
3213 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3214
3215 qp_unlock(qpair);
3216
3217 return result;
3218}
3219EXPORT_SYMBOL_GPL(vmci_qpair_dequev);
3220
3221/*
3222 * vmci_qpair_peekv() - Peek at the data in the queue using iov.
3223 * @qpair: Pointer to the queue pair struct.
3224 * @iov: Pointer to buffer for the data
3225 * @iov_size: Length of buffer.
3226 * @buf_type: Buffer type (Unused on Linux).
3227 *
3228 * This is the client interface for peeking into a queue. (I.e.,
3229 * copy data from the queue without updating the head pointer.)
3230 * This function uses IO vectors to handle the work. Returns number
3231 * of bytes peeked or < 0 on error.
3232 */
3233ssize_t vmci_qpair_peekv(struct vmci_qp *qpair,
3234 struct msghdr *msg,
3235 size_t iov_size,
3236 int buf_type)
3237{
3238 ssize_t result;
3239
3240 if (!qpair)
3241 return VMCI_ERROR_INVALID_ARGS;
3242
3243 qp_lock(qpair);
3244
3245 do {
3246 result = qp_dequeue_locked(qpair->produce_q,
3247 qpair->consume_q,
3248 qpair->consume_q_size,
3249 &msg->msg_iter, false);
3250
3251 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY &&
3252 !qp_wait_for_ready_queue(qpair))
3253 result = VMCI_ERROR_WOULD_BLOCK;
3254
3255 } while (result == VMCI_ERROR_QUEUEPAIR_NOT_READY);
3256
3257 qp_unlock(qpair);
3258 return result;
3259}
3260EXPORT_SYMBOL_GPL(vmci_qpair_peekv);