Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

VMCI: Remove non-blocking/pinned queuepair support

We added this for a special case that doesn't exist on Linux. Remove
the non-blocking/pinned queuepair code and simplify the driver in
preparation for adding virtual IOMMU support.

Acked-by: Aditya Sarwade <asarwade@vmware.com>
Signed-off-by: Andy King <acking@vmware.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Andy King and committed by
Greg Kroah-Hartman
45412bef 440ab3b3

+22 -145
+22 -127
drivers/misc/vmw_vmci/vmci_queue_pair.c
··· 148 148 struct vmci_queue_kern_if { 149 149 struct page **page; 150 150 struct page **header_page; 151 - void *va; 152 151 struct mutex __mutex; /* Protects the queue. */ 153 152 struct mutex *mutex; /* Shared by producer and consumer queues. */ 154 153 bool host; 155 154 size_t num_pages; 156 - bool mapped; 157 155 }; 158 156 159 157 /* ··· 265 267 if (queue) { 266 268 u64 i = DIV_ROUND_UP(size, PAGE_SIZE); 267 269 268 - if (queue->kernel_if->mapped) { 269 - vunmap(queue->kernel_if->va); 270 - queue->kernel_if->va = NULL; 271 - } 272 - 273 270 while (i) 274 271 __free_page(queue->kernel_if->page[--i]); 275 272 ··· 304 311 queue->kernel_if->header_page = NULL; /* Unused in guest. */ 305 312 queue->kernel_if->page = (struct page **)(queue->kernel_if + 1); 306 313 queue->kernel_if->host = false; 307 - queue->kernel_if->va = NULL; 308 - queue->kernel_if->mapped = false; 309 314 310 315 for (i = 0; i < num_data_pages; i++) { 311 316 queue->kernel_if->page[i] = alloc_pages(GFP_KERNEL, 0); 312 317 if (!queue->kernel_if->page[i]) 313 318 goto fail; 314 - } 315 - 316 - if (vmci_qp_pinned(flags)) { 317 - queue->kernel_if->va = 318 - vmap(queue->kernel_if->page, num_data_pages, VM_MAP, 319 - PAGE_KERNEL); 320 - if (!queue->kernel_if->va) 321 - goto fail; 322 - 323 - queue->kernel_if->mapped = true; 324 319 } 325 320 326 321 return (void *)queue; ··· 340 359 void *va; 341 360 size_t to_copy; 342 361 343 - if (!kernel_if->mapped) 344 - va = kmap(kernel_if->page[page_index]); 345 - else 346 - va = (void *)((u8 *)kernel_if->va + 347 - (page_index * PAGE_SIZE)); 362 + va = kmap(kernel_if->page[page_index]); 348 363 349 364 if (size - bytes_copied > PAGE_SIZE - page_offset) 350 365 /* Enough payload to fill up from this page. */ ··· 365 388 } 366 389 367 390 bytes_copied += to_copy; 368 - if (!kernel_if->mapped) 369 - kunmap(kernel_if->page[page_index]); 391 + kunmap(kernel_if->page[page_index]); 370 392 } 371 393 372 394 return VMCI_SUCCESS; ··· 393 417 void *va; 394 418 size_t to_copy; 395 419 396 - if (!kernel_if->mapped) 397 - va = kmap(kernel_if->page[page_index]); 398 - else 399 - va = (void *)((u8 *)kernel_if->va + 400 - (page_index * PAGE_SIZE)); 420 + va = kmap(kernel_if->page[page_index]); 401 421 402 422 if (size - bytes_copied > PAGE_SIZE - page_offset) 403 423 /* Enough payload to fill up this page. */ ··· 418 446 } 419 447 420 448 bytes_copied += to_copy; 421 - if (!kernel_if->mapped) 422 - kunmap(kernel_if->page[page_index]); 449 + kunmap(kernel_if->page[page_index]); 423 450 } 424 451 425 452 return VMCI_SUCCESS; ··· 605 634 queue->kernel_if->header_page = 606 635 (struct page **)((u8 *)queue + queue_size); 607 636 queue->kernel_if->page = &queue->kernel_if->header_page[1]; 608 - queue->kernel_if->va = NULL; 609 - queue->kernel_if->mapped = false; 610 637 } 611 638 612 639 return queue; ··· 1689 1720 if (result < VMCI_SUCCESS) 1690 1721 return result; 1691 1722 1692 - /* 1693 - * Preemptively load in the headers if non-blocking to 1694 - * prevent blocking later. 1695 - */ 1696 - if (entry->qp.flags & VMCI_QPFLAG_NONBLOCK) { 1697 - result = qp_host_map_queues(entry->produce_q, 1698 - entry->consume_q); 1699 - if (result < VMCI_SUCCESS) { 1700 - qp_host_unregister_user_memory( 1701 - entry->produce_q, 1702 - entry->consume_q); 1703 - return result; 1704 - } 1705 - } 1706 - 1707 1723 entry->state = VMCIQPB_ATTACHED_MEM; 1708 1724 } else { 1709 1725 entry->state = VMCIQPB_ATTACHED_NO_MEM; ··· 1703 1749 1704 1750 return VMCI_ERROR_UNAVAILABLE; 1705 1751 } else { 1706 - /* 1707 - * For non-blocking queue pairs, we cannot rely on 1708 - * enqueue/dequeue to map in the pages on the 1709 - * host-side, since it may block, so we make an 1710 - * attempt here. 1711 - */ 1712 - 1713 - if (flags & VMCI_QPFLAG_NONBLOCK) { 1714 - result = 1715 - qp_host_map_queues(entry->produce_q, 1716 - entry->consume_q); 1717 - if (result < VMCI_SUCCESS) 1718 - return result; 1719 - 1720 - entry->qp.flags |= flags & 1721 - (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED); 1722 - } 1723 - 1724 1752 /* The host side has successfully attached to a queue pair. */ 1725 1753 entry->state = VMCIQPB_ATTACHED_MEM; 1726 1754 } ··· 2479 2543 * Since non-blocking isn't yet implemented on the host personality we 2480 2544 * have no reason to acquire a spin lock. So to avoid the use of an 2481 2545 * unnecessary lock only acquire the mutex if we can block. 2482 - * Note: It is assumed that QPFLAG_PINNED implies QPFLAG_NONBLOCK. Therefore 2483 - * we can use the same locking function for access to both the queue 2484 - * and the queue headers as it is the same logic. Assert this behvior. 2485 2546 */ 2486 2547 static void qp_lock(const struct vmci_qp *qpair) 2487 2548 { 2488 - if (vmci_can_block(qpair->flags)) 2489 - qp_acquire_queue_mutex(qpair->produce_q); 2549 + qp_acquire_queue_mutex(qpair->produce_q); 2490 2550 } 2491 2551 2492 2552 /* 2493 2553 * Helper routine that unlocks the queue pair after calling 2494 - * qp_lock. Respects non-blocking and pinning flags. 2554 + * qp_lock. 2495 2555 */ 2496 2556 static void qp_unlock(const struct vmci_qp *qpair) 2497 2557 { 2498 - if (vmci_can_block(qpair->flags)) 2499 - qp_release_queue_mutex(qpair->produce_q); 2558 + qp_release_queue_mutex(qpair->produce_q); 2500 2559 } 2501 2560 2502 2561 /* ··· 2499 2568 * currently not mapped, it will be attempted to do so. 2500 2569 */ 2501 2570 static int qp_map_queue_headers(struct vmci_queue *produce_q, 2502 - struct vmci_queue *consume_q, 2503 - bool can_block) 2571 + struct vmci_queue *consume_q) 2504 2572 { 2505 2573 int result; 2506 2574 2507 2575 if (NULL == produce_q->q_header || NULL == consume_q->q_header) { 2508 - if (can_block) 2509 - result = qp_host_map_queues(produce_q, consume_q); 2510 - else 2511 - result = VMCI_ERROR_QUEUEPAIR_NOT_READY; 2512 - 2576 + result = qp_host_map_queues(produce_q, consume_q); 2513 2577 if (result < VMCI_SUCCESS) 2514 2578 return (produce_q->saved_header && 2515 2579 consume_q->saved_header) ? ··· 2527 2601 { 2528 2602 int result; 2529 2603 2530 - result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q, 2531 - vmci_can_block(qpair->flags)); 2604 + result = qp_map_queue_headers(qpair->produce_q, qpair->consume_q); 2532 2605 if (result == VMCI_SUCCESS) { 2533 2606 *produce_q_header = qpair->produce_q->q_header; 2534 2607 *consume_q_header = qpair->consume_q->q_header; ··· 2570 2645 { 2571 2646 unsigned int generation; 2572 2647 2573 - if (qpair->flags & VMCI_QPFLAG_NONBLOCK) 2574 - return false; 2575 - 2576 2648 qpair->blocked++; 2577 2649 generation = qpair->generation; 2578 2650 qp_unlock(qpair); ··· 2596 2674 const u64 produce_q_size, 2597 2675 const void *buf, 2598 2676 size_t buf_size, 2599 - vmci_memcpy_to_queue_func memcpy_to_queue, 2600 - bool can_block) 2677 + vmci_memcpy_to_queue_func memcpy_to_queue) 2601 2678 { 2602 2679 s64 free_space; 2603 2680 u64 tail; 2604 2681 size_t written; 2605 2682 ssize_t result; 2606 2683 2607 - result = qp_map_queue_headers(produce_q, consume_q, can_block); 2684 + result = qp_map_queue_headers(produce_q, consume_q); 2608 2685 if (unlikely(result != VMCI_SUCCESS)) 2609 2686 return result; 2610 2687 ··· 2658 2737 void *buf, 2659 2738 size_t buf_size, 2660 2739 vmci_memcpy_from_queue_func memcpy_from_queue, 2661 - bool update_consumer, 2662 - bool can_block) 2740 + bool update_consumer) 2663 2741 { 2664 2742 s64 buf_ready; 2665 2743 u64 head; 2666 2744 size_t read; 2667 2745 ssize_t result; 2668 2746 2669 - result = qp_map_queue_headers(produce_q, consume_q, can_block); 2747 + result = qp_map_queue_headers(produce_q, consume_q); 2670 2748 if (unlikely(result != VMCI_SUCCESS)) 2671 2749 return result; 2672 2750 ··· 2762 2842 route = vmci_guest_code_active() ? 2763 2843 VMCI_ROUTE_AS_GUEST : VMCI_ROUTE_AS_HOST; 2764 2844 2765 - /* If NONBLOCK or PINNED is set, we better be the guest personality. */ 2766 - if ((!vmci_can_block(flags) || vmci_qp_pinned(flags)) && 2767 - VMCI_ROUTE_AS_GUEST != route) { 2768 - pr_devel("Not guest personality w/ NONBLOCK OR PINNED set"); 2845 + if (flags & (VMCI_QPFLAG_NONBLOCK | VMCI_QPFLAG_PINNED)) { 2846 + pr_devel("NONBLOCK OR PINNED set"); 2769 2847 return VMCI_ERROR_INVALID_ARGS; 2770 - } 2771 - 2772 - /* 2773 - * Limit the size of pinned QPs and check sanity. 2774 - * 2775 - * Pinned pages implies non-blocking mode. Mutexes aren't acquired 2776 - * when the NONBLOCK flag is set in qpair code; and also should not be 2777 - * acquired when the PINNED flagged is set. Since pinning pages 2778 - * implies we want speed, it makes no sense not to have NONBLOCK 2779 - * set if PINNED is set. Hence enforce this implication. 2780 - */ 2781 - if (vmci_qp_pinned(flags)) { 2782 - if (vmci_can_block(flags)) { 2783 - pr_err("Attempted to enable pinning w/o non-blocking"); 2784 - return VMCI_ERROR_INVALID_ARGS; 2785 - } 2786 - 2787 - if (produce_qsize + consume_qsize > VMCI_MAX_PINNED_QP_MEMORY) 2788 - return VMCI_ERROR_NO_RESOURCES; 2789 2848 } 2790 2849 2791 2850 my_qpair = kzalloc(sizeof(*my_qpair), GFP_KERNEL); ··· 3094 3195 qpair->consume_q, 3095 3196 qpair->produce_q_size, 3096 3197 buf, buf_size, 3097 - qp_memcpy_to_queue, 3098 - vmci_can_block(qpair->flags)); 3198 + qp_memcpy_to_queue); 3099 3199 3100 3200 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3101 3201 !qp_wait_for_ready_queue(qpair)) ··· 3135 3237 qpair->consume_q, 3136 3238 qpair->consume_q_size, 3137 3239 buf, buf_size, 3138 - qp_memcpy_from_queue, true, 3139 - vmci_can_block(qpair->flags)); 3240 + qp_memcpy_from_queue, true); 3140 3241 3141 3242 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3142 3243 !qp_wait_for_ready_queue(qpair)) ··· 3177 3280 qpair->consume_q, 3178 3281 qpair->consume_q_size, 3179 3282 buf, buf_size, 3180 - qp_memcpy_from_queue, false, 3181 - vmci_can_block(qpair->flags)); 3283 + qp_memcpy_from_queue, false); 3182 3284 3183 3285 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3184 3286 !qp_wait_for_ready_queue(qpair)) ··· 3219 3323 qpair->consume_q, 3220 3324 qpair->produce_q_size, 3221 3325 iov, iov_size, 3222 - qp_memcpy_to_queue_iov, 3223 - vmci_can_block(qpair->flags)); 3326 + qp_memcpy_to_queue_iov); 3224 3327 3225 3328 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3226 3329 !qp_wait_for_ready_queue(qpair)) ··· 3262 3367 qpair->consume_q_size, 3263 3368 iov, iov_size, 3264 3369 qp_memcpy_from_queue_iov, 3265 - true, vmci_can_block(qpair->flags)); 3370 + true); 3266 3371 3267 3372 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3268 3373 !qp_wait_for_ready_queue(qpair)) ··· 3306 3411 qpair->consume_q_size, 3307 3412 iov, iov_size, 3308 3413 qp_memcpy_from_queue_iov, 3309 - false, vmci_can_block(qpair->flags)); 3414 + false); 3310 3415 3311 3416 if (result == VMCI_ERROR_QUEUEPAIR_NOT_READY && 3312 3417 !qp_wait_for_ready_queue(qpair))
-18
drivers/misc/vmw_vmci/vmci_queue_pair.h
··· 146 146 return page_store->len >= 2; 147 147 } 148 148 149 - /* 150 - * Helper function to check if the non-blocking flag 151 - * is set for a given queue pair. 152 - */ 153 - static inline bool vmci_can_block(u32 flags) 154 - { 155 - return !(flags & VMCI_QPFLAG_NONBLOCK); 156 - } 157 - 158 - /* 159 - * Helper function to check if the queue pair is pinned 160 - * into memory. 161 - */ 162 - static inline bool vmci_qp_pinned(u32 flags) 163 - { 164 - return flags & VMCI_QPFLAG_PINNED; 165 - } 166 - 167 149 void vmci_qp_broker_exit(void); 168 150 int vmci_qp_broker_alloc(struct vmci_handle handle, u32 peer, 169 151 u32 flags, u32 priv_flags,