Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

virt: Add vboxguest driver for Virtual Box Guest integration

This commit adds a driver for the Virtual Box Guest PCI device used in
Virtual Box virtual machines. Enabling this driver will add support for
Virtual Box Guest integration features such as copy-and-paste, seamless
mode and OpenGL pass-through.

This driver also offers vboxguest IPC functionality which is needed
for the vboxfs driver which offers folder sharing support.

Signed-off-by: Hans de Goede <hdegoede@redhat.com>
Reviewed-by: Larry Finger <Larry.Finger@lwfinger.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Hans de Goede and committed by
Greg Kroah-Hartman
0ba002bc 579db9d4

+2253
+1
drivers/virt/Kconfig
··· 30 30 4) A kernel interface for receiving callbacks when a managed 31 31 partition shuts down. 32 32 33 + source "drivers/virt/vboxguest/Kconfig" 33 34 endif
+1
drivers/virt/Makefile
··· 3 3 # 4 4 5 5 obj-$(CONFIG_FSL_HV_MANAGER) += fsl_hypervisor.o 6 + obj-y += vboxguest/
+18
drivers/virt/vboxguest/Kconfig
··· 1 + config VBOXGUEST 2 + tristate "Virtual Box Guest integration support" 3 + depends on X86 && PCI && INPUT 4 + help 5 + This is a driver for the Virtual Box Guest PCI device used in 6 + Virtual Box virtual machines. Enabling this driver will add 7 + support for Virtual Box Guest integration features such as 8 + copy-and-paste, seamless mode and OpenGL pass-through. 9 + 10 + This driver also offers vboxguest IPC functionality which is needed 11 + for the vboxfs driver which offers folder sharing support. 12 + 13 + If you enable this driver you should also enable the VBOXVIDEO option. 14 + 15 + Although it is possible to build this module in, it is advised 16 + to build this driver as a module, so that it can be updated 17 + independently of the kernel. Select M to build this driver as a 18 + module.
+3
drivers/virt/vboxguest/Makefile
··· 1 + vboxguest-y := vboxguest_linux.o vboxguest_core.o vboxguest_utils.o 2 + 3 + obj-$(CONFIG_VBOXGUEST) += vboxguest.o
+1571
drivers/virt/vboxguest/vboxguest_core.c
··· 1 + /* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */ 2 + /* 3 + * vboxguest core guest-device handling code, VBoxGuest.cpp in upstream svn. 4 + * 5 + * Copyright (C) 2007-2016 Oracle Corporation 6 + */ 7 + 8 + #include <linux/device.h> 9 + #include <linux/mm.h> 10 + #include <linux/sched.h> 11 + #include <linux/sizes.h> 12 + #include <linux/slab.h> 13 + #include <linux/vbox_err.h> 14 + #include <linux/vbox_utils.h> 15 + #include <linux/vmalloc.h> 16 + #include "vboxguest_core.h" 17 + #include "vboxguest_version.h" 18 + 19 + /* Get the pointer to the first HGCM parameter. */ 20 + #define VBG_IOCTL_HGCM_CALL_PARMS(a) \ 21 + ((struct vmmdev_hgcm_function_parameter *)( \ 22 + (u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call))) 23 + /* Get the pointer to the first HGCM parameter in a 32-bit request. */ 24 + #define VBG_IOCTL_HGCM_CALL_PARMS32(a) \ 25 + ((struct vmmdev_hgcm_function_parameter32 *)( \ 26 + (u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call))) 27 + 28 + #define GUEST_MAPPINGS_TRIES 5 29 + 30 + /** 31 + * Reserves memory in which the VMM can relocate any guest mappings 32 + * that are floating around. 33 + * 34 + * This operation is a little bit tricky since the VMM might not accept 35 + * just any address because of address clashes between the three contexts 36 + * it operates in, so we try several times. 37 + * 38 + * Failure to reserve the guest mappings is ignored. 39 + * 40 + * @gdev: The Guest extension device. 41 + */ 42 + static void vbg_guest_mappings_init(struct vbg_dev *gdev) 43 + { 44 + struct vmmdev_hypervisorinfo *req; 45 + void *guest_mappings[GUEST_MAPPINGS_TRIES]; 46 + struct page **pages = NULL; 47 + u32 size, hypervisor_size; 48 + int i, rc; 49 + 50 + /* Query the required space. */ 51 + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO); 52 + if (!req) 53 + return; 54 + 55 + req->hypervisor_start = 0; 56 + req->hypervisor_size = 0; 57 + rc = vbg_req_perform(gdev, req); 58 + if (rc < 0) 59 + goto out; 60 + 61 + /* 62 + * The VMM will report back if there is nothing it wants to map, like 63 + * for instance in VT-x and AMD-V mode. 64 + */ 65 + if (req->hypervisor_size == 0) 66 + goto out; 67 + 68 + hypervisor_size = req->hypervisor_size; 69 + /* Add 4M so that we can align the vmap to 4MiB as the host requires. */ 70 + size = PAGE_ALIGN(req->hypervisor_size) + SZ_4M; 71 + 72 + pages = kmalloc(sizeof(*pages) * (size >> PAGE_SHIFT), GFP_KERNEL); 73 + if (!pages) 74 + goto out; 75 + 76 + gdev->guest_mappings_dummy_page = alloc_page(GFP_HIGHUSER); 77 + if (!gdev->guest_mappings_dummy_page) 78 + goto out; 79 + 80 + for (i = 0; i < (size >> PAGE_SHIFT); i++) 81 + pages[i] = gdev->guest_mappings_dummy_page; 82 + 83 + /* 84 + * Try several times, the VMM might not accept some addresses because 85 + * of address clashes between the three contexts. 86 + */ 87 + for (i = 0; i < GUEST_MAPPINGS_TRIES; i++) { 88 + guest_mappings[i] = vmap(pages, (size >> PAGE_SHIFT), 89 + VM_MAP, PAGE_KERNEL_RO); 90 + if (!guest_mappings[i]) 91 + break; 92 + 93 + req->header.request_type = VMMDEVREQ_SET_HYPERVISOR_INFO; 94 + req->header.rc = VERR_INTERNAL_ERROR; 95 + req->hypervisor_size = hypervisor_size; 96 + req->hypervisor_start = 97 + (unsigned long)PTR_ALIGN(guest_mappings[i], SZ_4M); 98 + 99 + rc = vbg_req_perform(gdev, req); 100 + if (rc >= 0) { 101 + gdev->guest_mappings = guest_mappings[i]; 102 + break; 103 + } 104 + } 105 + 106 + /* Free vmap's from failed attempts. */ 107 + while (--i >= 0) 108 + vunmap(guest_mappings[i]); 109 + 110 + /* On failure free the dummy-page backing the vmap */ 111 + if (!gdev->guest_mappings) { 112 + __free_page(gdev->guest_mappings_dummy_page); 113 + gdev->guest_mappings_dummy_page = NULL; 114 + } 115 + 116 + out: 117 + kfree(req); 118 + kfree(pages); 119 + } 120 + 121 + /** 122 + * Undo what vbg_guest_mappings_init did. 123 + * 124 + * @gdev: The Guest extension device. 125 + */ 126 + static void vbg_guest_mappings_exit(struct vbg_dev *gdev) 127 + { 128 + struct vmmdev_hypervisorinfo *req; 129 + int rc; 130 + 131 + if (!gdev->guest_mappings) 132 + return; 133 + 134 + /* 135 + * Tell the host that we're going to free the memory we reserved for 136 + * it, the free it up. (Leak the memory if anything goes wrong here.) 137 + */ 138 + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO); 139 + if (!req) 140 + return; 141 + 142 + req->hypervisor_start = 0; 143 + req->hypervisor_size = 0; 144 + 145 + rc = vbg_req_perform(gdev, req); 146 + 147 + kfree(req); 148 + 149 + if (rc < 0) { 150 + vbg_err("%s error: %d\n", __func__, rc); 151 + return; 152 + } 153 + 154 + vunmap(gdev->guest_mappings); 155 + gdev->guest_mappings = NULL; 156 + 157 + __free_page(gdev->guest_mappings_dummy_page); 158 + gdev->guest_mappings_dummy_page = NULL; 159 + } 160 + 161 + /** 162 + * Report the guest information to the host. 163 + * Return: 0 or negative errno value. 164 + * @gdev: The Guest extension device. 165 + */ 166 + static int vbg_report_guest_info(struct vbg_dev *gdev) 167 + { 168 + /* 169 + * Allocate and fill in the two guest info reports. 170 + */ 171 + struct vmmdev_guest_info *req1 = NULL; 172 + struct vmmdev_guest_info2 *req2 = NULL; 173 + int rc, ret = -ENOMEM; 174 + 175 + req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO); 176 + req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2); 177 + if (!req1 || !req2) 178 + goto out_free; 179 + 180 + req1->interface_version = VMMDEV_VERSION; 181 + req1->os_type = VMMDEV_OSTYPE_LINUX26; 182 + #if __BITS_PER_LONG == 64 183 + req1->os_type |= VMMDEV_OSTYPE_X64; 184 + #endif 185 + 186 + req2->additions_major = VBG_VERSION_MAJOR; 187 + req2->additions_minor = VBG_VERSION_MINOR; 188 + req2->additions_build = VBG_VERSION_BUILD; 189 + req2->additions_revision = VBG_SVN_REV; 190 + /* (no features defined yet) */ 191 + req2->additions_features = 0; 192 + strlcpy(req2->name, VBG_VERSION_STRING, 193 + sizeof(req2->name)); 194 + 195 + /* 196 + * There are two protocols here: 197 + * 1. INFO2 + INFO1. Supported by >=3.2.51. 198 + * 2. INFO1 and optionally INFO2. The old protocol. 199 + * 200 + * We try protocol 2 first. It will fail with VERR_NOT_SUPPORTED 201 + * if not supported by the VMMDev (message ordering requirement). 202 + */ 203 + rc = vbg_req_perform(gdev, req2); 204 + if (rc >= 0) { 205 + rc = vbg_req_perform(gdev, req1); 206 + } else if (rc == VERR_NOT_SUPPORTED || rc == VERR_NOT_IMPLEMENTED) { 207 + rc = vbg_req_perform(gdev, req1); 208 + if (rc >= 0) { 209 + rc = vbg_req_perform(gdev, req2); 210 + if (rc == VERR_NOT_IMPLEMENTED) 211 + rc = VINF_SUCCESS; 212 + } 213 + } 214 + ret = vbg_status_code_to_errno(rc); 215 + 216 + out_free: 217 + kfree(req2); 218 + kfree(req1); 219 + return ret; 220 + } 221 + 222 + /** 223 + * Report the guest driver status to the host. 224 + * Return: 0 or negative errno value. 225 + * @gdev: The Guest extension device. 226 + * @active: Flag whether the driver is now active or not. 227 + */ 228 + static int vbg_report_driver_status(struct vbg_dev *gdev, bool active) 229 + { 230 + struct vmmdev_guest_status *req; 231 + int rc; 232 + 233 + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS); 234 + if (!req) 235 + return -ENOMEM; 236 + 237 + req->facility = VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER; 238 + if (active) 239 + req->status = VBOXGUEST_FACILITY_STATUS_ACTIVE; 240 + else 241 + req->status = VBOXGUEST_FACILITY_STATUS_INACTIVE; 242 + req->flags = 0; 243 + 244 + rc = vbg_req_perform(gdev, req); 245 + if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */ 246 + rc = VINF_SUCCESS; 247 + 248 + kfree(req); 249 + 250 + return vbg_status_code_to_errno(rc); 251 + } 252 + 253 + /** 254 + * Inflate the balloon by one chunk. The caller owns the balloon mutex. 255 + * Return: 0 or negative errno value. 256 + * @gdev: The Guest extension device. 257 + * @chunk_idx: Index of the chunk. 258 + */ 259 + static int vbg_balloon_inflate(struct vbg_dev *gdev, u32 chunk_idx) 260 + { 261 + struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req; 262 + struct page **pages; 263 + int i, rc, ret; 264 + 265 + pages = kmalloc(sizeof(*pages) * VMMDEV_MEMORY_BALLOON_CHUNK_PAGES, 266 + GFP_KERNEL | __GFP_NOWARN); 267 + if (!pages) 268 + return -ENOMEM; 269 + 270 + req->header.size = sizeof(*req); 271 + req->inflate = true; 272 + req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; 273 + 274 + for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++) { 275 + pages[i] = alloc_page(GFP_KERNEL | __GFP_NOWARN); 276 + if (!pages[i]) { 277 + ret = -ENOMEM; 278 + goto out_error; 279 + } 280 + 281 + req->phys_page[i] = page_to_phys(pages[i]); 282 + } 283 + 284 + rc = vbg_req_perform(gdev, req); 285 + if (rc < 0) { 286 + vbg_err("%s error, rc: %d\n", __func__, rc); 287 + ret = vbg_status_code_to_errno(rc); 288 + goto out_error; 289 + } 290 + 291 + gdev->mem_balloon.pages[chunk_idx] = pages; 292 + 293 + return 0; 294 + 295 + out_error: 296 + while (--i >= 0) 297 + __free_page(pages[i]); 298 + kfree(pages); 299 + 300 + return ret; 301 + } 302 + 303 + /** 304 + * Deflate the balloon by one chunk. The caller owns the balloon mutex. 305 + * Return: 0 or negative errno value. 306 + * @gdev: The Guest extension device. 307 + * @chunk_idx: Index of the chunk. 308 + */ 309 + static int vbg_balloon_deflate(struct vbg_dev *gdev, u32 chunk_idx) 310 + { 311 + struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req; 312 + struct page **pages = gdev->mem_balloon.pages[chunk_idx]; 313 + int i, rc; 314 + 315 + req->header.size = sizeof(*req); 316 + req->inflate = false; 317 + req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; 318 + 319 + for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++) 320 + req->phys_page[i] = page_to_phys(pages[i]); 321 + 322 + rc = vbg_req_perform(gdev, req); 323 + if (rc < 0) { 324 + vbg_err("%s error, rc: %d\n", __func__, rc); 325 + return vbg_status_code_to_errno(rc); 326 + } 327 + 328 + for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++) 329 + __free_page(pages[i]); 330 + kfree(pages); 331 + gdev->mem_balloon.pages[chunk_idx] = NULL; 332 + 333 + return 0; 334 + } 335 + 336 + /** 337 + * Respond to VMMDEV_EVENT_BALLOON_CHANGE_REQUEST events, query the size 338 + * the host wants the balloon to be and adjust accordingly. 339 + */ 340 + static void vbg_balloon_work(struct work_struct *work) 341 + { 342 + struct vbg_dev *gdev = 343 + container_of(work, struct vbg_dev, mem_balloon.work); 344 + struct vmmdev_memballoon_info *req = gdev->mem_balloon.get_req; 345 + u32 i, chunks; 346 + int rc, ret; 347 + 348 + /* 349 + * Setting this bit means that we request the value from the host and 350 + * change the guest memory balloon according to the returned value. 351 + */ 352 + req->event_ack = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST; 353 + rc = vbg_req_perform(gdev, req); 354 + if (rc < 0) { 355 + vbg_err("%s error, rc: %d)\n", __func__, rc); 356 + return; 357 + } 358 + 359 + /* 360 + * The host always returns the same maximum amount of chunks, so 361 + * we do this once. 362 + */ 363 + if (!gdev->mem_balloon.max_chunks) { 364 + gdev->mem_balloon.pages = 365 + devm_kcalloc(gdev->dev, req->phys_mem_chunks, 366 + sizeof(struct page **), GFP_KERNEL); 367 + if (!gdev->mem_balloon.pages) 368 + return; 369 + 370 + gdev->mem_balloon.max_chunks = req->phys_mem_chunks; 371 + } 372 + 373 + chunks = req->balloon_chunks; 374 + if (chunks > gdev->mem_balloon.max_chunks) { 375 + vbg_err("%s: illegal balloon size %u (max=%u)\n", 376 + __func__, chunks, gdev->mem_balloon.max_chunks); 377 + return; 378 + } 379 + 380 + if (chunks > gdev->mem_balloon.chunks) { 381 + /* inflate */ 382 + for (i = gdev->mem_balloon.chunks; i < chunks; i++) { 383 + ret = vbg_balloon_inflate(gdev, i); 384 + if (ret < 0) 385 + return; 386 + 387 + gdev->mem_balloon.chunks++; 388 + } 389 + } else { 390 + /* deflate */ 391 + for (i = gdev->mem_balloon.chunks; i-- > chunks;) { 392 + ret = vbg_balloon_deflate(gdev, i); 393 + if (ret < 0) 394 + return; 395 + 396 + gdev->mem_balloon.chunks--; 397 + } 398 + } 399 + } 400 + 401 + /** 402 + * Callback for heartbeat timer. 403 + */ 404 + static void vbg_heartbeat_timer(struct timer_list *t) 405 + { 406 + struct vbg_dev *gdev = from_timer(gdev, t, heartbeat_timer); 407 + 408 + vbg_req_perform(gdev, gdev->guest_heartbeat_req); 409 + mod_timer(&gdev->heartbeat_timer, 410 + msecs_to_jiffies(gdev->heartbeat_interval_ms)); 411 + } 412 + 413 + /** 414 + * Configure the host to check guest's heartbeat 415 + * and get heartbeat interval from the host. 416 + * Return: 0 or negative errno value. 417 + * @gdev: The Guest extension device. 418 + * @enabled: Set true to enable guest heartbeat checks on host. 419 + */ 420 + static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled) 421 + { 422 + struct vmmdev_heartbeat *req; 423 + int rc; 424 + 425 + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE); 426 + if (!req) 427 + return -ENOMEM; 428 + 429 + req->enabled = enabled; 430 + req->interval_ns = 0; 431 + rc = vbg_req_perform(gdev, req); 432 + do_div(req->interval_ns, 1000000); /* ns -> ms */ 433 + gdev->heartbeat_interval_ms = req->interval_ns; 434 + kfree(req); 435 + 436 + return vbg_status_code_to_errno(rc); 437 + } 438 + 439 + /** 440 + * Initializes the heartbeat timer. This feature may be disabled by the host. 441 + * Return: 0 or negative errno value. 442 + * @gdev: The Guest extension device. 443 + */ 444 + static int vbg_heartbeat_init(struct vbg_dev *gdev) 445 + { 446 + int ret; 447 + 448 + /* Make sure that heartbeat checking is disabled if we fail. */ 449 + ret = vbg_heartbeat_host_config(gdev, false); 450 + if (ret < 0) 451 + return ret; 452 + 453 + ret = vbg_heartbeat_host_config(gdev, true); 454 + if (ret < 0) 455 + return ret; 456 + 457 + /* 458 + * Preallocate the request to use it from the timer callback because: 459 + * 1) on Windows vbg_req_alloc must be called at IRQL <= APC_LEVEL 460 + * and the timer callback runs at DISPATCH_LEVEL; 461 + * 2) avoid repeated allocations. 462 + */ 463 + gdev->guest_heartbeat_req = vbg_req_alloc( 464 + sizeof(*gdev->guest_heartbeat_req), 465 + VMMDEVREQ_GUEST_HEARTBEAT); 466 + if (!gdev->guest_heartbeat_req) 467 + return -ENOMEM; 468 + 469 + vbg_info("%s: Setting up heartbeat to trigger every %d milliseconds\n", 470 + __func__, gdev->heartbeat_interval_ms); 471 + mod_timer(&gdev->heartbeat_timer, 0); 472 + 473 + return 0; 474 + } 475 + 476 + /** 477 + * Cleanup hearbeat code, stop HB timer and disable host heartbeat checking. 478 + * @gdev: The Guest extension device. 479 + */ 480 + static void vbg_heartbeat_exit(struct vbg_dev *gdev) 481 + { 482 + del_timer_sync(&gdev->heartbeat_timer); 483 + vbg_heartbeat_host_config(gdev, false); 484 + kfree(gdev->guest_heartbeat_req); 485 + 486 + } 487 + 488 + /** 489 + * Applies a change to the bit usage tracker. 490 + * Return: true if the mask changed, false if not. 491 + * @tracker: The bit usage tracker. 492 + * @changed: The bits to change. 493 + * @previous: The previous value of the bits. 494 + */ 495 + static bool vbg_track_bit_usage(struct vbg_bit_usage_tracker *tracker, 496 + u32 changed, u32 previous) 497 + { 498 + bool global_change = false; 499 + 500 + while (changed) { 501 + u32 bit = ffs(changed) - 1; 502 + u32 bitmask = BIT(bit); 503 + 504 + if (bitmask & previous) { 505 + tracker->per_bit_usage[bit] -= 1; 506 + if (tracker->per_bit_usage[bit] == 0) { 507 + global_change = true; 508 + tracker->mask &= ~bitmask; 509 + } 510 + } else { 511 + tracker->per_bit_usage[bit] += 1; 512 + if (tracker->per_bit_usage[bit] == 1) { 513 + global_change = true; 514 + tracker->mask |= bitmask; 515 + } 516 + } 517 + 518 + changed &= ~bitmask; 519 + } 520 + 521 + return global_change; 522 + } 523 + 524 + /** 525 + * Init and termination worker for resetting the (host) event filter on the host 526 + * Return: 0 or negative errno value. 527 + * @gdev: The Guest extension device. 528 + * @fixed_events: Fixed events (init time). 529 + */ 530 + static int vbg_reset_host_event_filter(struct vbg_dev *gdev, 531 + u32 fixed_events) 532 + { 533 + struct vmmdev_mask *req; 534 + int rc; 535 + 536 + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK); 537 + if (!req) 538 + return -ENOMEM; 539 + 540 + req->not_mask = U32_MAX & ~fixed_events; 541 + req->or_mask = fixed_events; 542 + rc = vbg_req_perform(gdev, req); 543 + if (rc < 0) 544 + vbg_err("%s error, rc: %d\n", __func__, rc); 545 + 546 + kfree(req); 547 + return vbg_status_code_to_errno(rc); 548 + } 549 + 550 + /** 551 + * Changes the event filter mask for the given session. 552 + * 553 + * This is called in response to VBG_IOCTL_CHANGE_FILTER_MASK as well as to 554 + * do session cleanup. Takes the session spinlock. 555 + * 556 + * Return: 0 or negative errno value. 557 + * @gdev: The Guest extension device. 558 + * @session: The session. 559 + * @or_mask: The events to add. 560 + * @not_mask: The events to remove. 561 + * @session_termination: Set if we're called by the session cleanup code. 562 + * This tweaks the error handling so we perform 563 + * proper session cleanup even if the host 564 + * misbehaves. 565 + */ 566 + static int vbg_set_session_event_filter(struct vbg_dev *gdev, 567 + struct vbg_session *session, 568 + u32 or_mask, u32 not_mask, 569 + bool session_termination) 570 + { 571 + struct vmmdev_mask *req; 572 + u32 changed, previous; 573 + int rc, ret = 0; 574 + 575 + /* Allocate a request buffer before taking the spinlock */ 576 + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK); 577 + if (!req) { 578 + if (!session_termination) 579 + return -ENOMEM; 580 + /* Ignore allocation failure, we must do session cleanup. */ 581 + } 582 + 583 + mutex_lock(&gdev->session_mutex); 584 + 585 + /* Apply the changes to the session mask. */ 586 + previous = session->event_filter; 587 + session->event_filter |= or_mask; 588 + session->event_filter &= ~not_mask; 589 + 590 + /* If anything actually changed, update the global usage counters. */ 591 + changed = previous ^ session->event_filter; 592 + if (!changed) 593 + goto out; 594 + 595 + vbg_track_bit_usage(&gdev->event_filter_tracker, changed, previous); 596 + or_mask = gdev->fixed_events | gdev->event_filter_tracker.mask; 597 + 598 + if (gdev->event_filter_host == or_mask || !req) 599 + goto out; 600 + 601 + gdev->event_filter_host = or_mask; 602 + req->or_mask = or_mask; 603 + req->not_mask = ~or_mask; 604 + rc = vbg_req_perform(gdev, req); 605 + if (rc < 0) { 606 + ret = vbg_status_code_to_errno(rc); 607 + 608 + /* Failed, roll back (unless it's session termination time). */ 609 + gdev->event_filter_host = U32_MAX; 610 + if (session_termination) 611 + goto out; 612 + 613 + vbg_track_bit_usage(&gdev->event_filter_tracker, changed, 614 + session->event_filter); 615 + session->event_filter = previous; 616 + } 617 + 618 + out: 619 + mutex_unlock(&gdev->session_mutex); 620 + kfree(req); 621 + 622 + return ret; 623 + } 624 + 625 + /** 626 + * Init and termination worker for set guest capabilities to zero on the host. 627 + * Return: 0 or negative errno value. 628 + * @gdev: The Guest extension device. 629 + */ 630 + static int vbg_reset_host_capabilities(struct vbg_dev *gdev) 631 + { 632 + struct vmmdev_mask *req; 633 + int rc; 634 + 635 + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES); 636 + if (!req) 637 + return -ENOMEM; 638 + 639 + req->not_mask = U32_MAX; 640 + req->or_mask = 0; 641 + rc = vbg_req_perform(gdev, req); 642 + if (rc < 0) 643 + vbg_err("%s error, rc: %d\n", __func__, rc); 644 + 645 + kfree(req); 646 + return vbg_status_code_to_errno(rc); 647 + } 648 + 649 + /** 650 + * Sets the guest capabilities for a session. Takes the session spinlock. 651 + * Return: 0 or negative errno value. 652 + * @gdev: The Guest extension device. 653 + * @session: The session. 654 + * @or_mask: The capabilities to add. 655 + * @not_mask: The capabilities to remove. 656 + * @session_termination: Set if we're called by the session cleanup code. 657 + * This tweaks the error handling so we perform 658 + * proper session cleanup even if the host 659 + * misbehaves. 660 + */ 661 + static int vbg_set_session_capabilities(struct vbg_dev *gdev, 662 + struct vbg_session *session, 663 + u32 or_mask, u32 not_mask, 664 + bool session_termination) 665 + { 666 + struct vmmdev_mask *req; 667 + u32 changed, previous; 668 + int rc, ret = 0; 669 + 670 + /* Allocate a request buffer before taking the spinlock */ 671 + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES); 672 + if (!req) { 673 + if (!session_termination) 674 + return -ENOMEM; 675 + /* Ignore allocation failure, we must do session cleanup. */ 676 + } 677 + 678 + mutex_lock(&gdev->session_mutex); 679 + 680 + /* Apply the changes to the session mask. */ 681 + previous = session->guest_caps; 682 + session->guest_caps |= or_mask; 683 + session->guest_caps &= ~not_mask; 684 + 685 + /* If anything actually changed, update the global usage counters. */ 686 + changed = previous ^ session->guest_caps; 687 + if (!changed) 688 + goto out; 689 + 690 + vbg_track_bit_usage(&gdev->guest_caps_tracker, changed, previous); 691 + or_mask = gdev->guest_caps_tracker.mask; 692 + 693 + if (gdev->guest_caps_host == or_mask || !req) 694 + goto out; 695 + 696 + gdev->guest_caps_host = or_mask; 697 + req->or_mask = or_mask; 698 + req->not_mask = ~or_mask; 699 + rc = vbg_req_perform(gdev, req); 700 + if (rc < 0) { 701 + ret = vbg_status_code_to_errno(rc); 702 + 703 + /* Failed, roll back (unless it's session termination time). */ 704 + gdev->guest_caps_host = U32_MAX; 705 + if (session_termination) 706 + goto out; 707 + 708 + vbg_track_bit_usage(&gdev->guest_caps_tracker, changed, 709 + session->guest_caps); 710 + session->guest_caps = previous; 711 + } 712 + 713 + out: 714 + mutex_unlock(&gdev->session_mutex); 715 + kfree(req); 716 + 717 + return ret; 718 + } 719 + 720 + /** 721 + * vbg_query_host_version get the host feature mask and version information. 722 + * Return: 0 or negative errno value. 723 + * @gdev: The Guest extension device. 724 + */ 725 + static int vbg_query_host_version(struct vbg_dev *gdev) 726 + { 727 + struct vmmdev_host_version *req; 728 + int rc, ret; 729 + 730 + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION); 731 + if (!req) 732 + return -ENOMEM; 733 + 734 + rc = vbg_req_perform(gdev, req); 735 + ret = vbg_status_code_to_errno(rc); 736 + if (ret) 737 + goto out; 738 + 739 + snprintf(gdev->host_version, sizeof(gdev->host_version), "%u.%u.%ur%u", 740 + req->major, req->minor, req->build, req->revision); 741 + gdev->host_features = req->features; 742 + 743 + vbg_info("vboxguest: host-version: %s %#x\n", gdev->host_version, 744 + gdev->host_features); 745 + 746 + if (!(req->features & VMMDEV_HVF_HGCM_PHYS_PAGE_LIST)) { 747 + vbg_err("vboxguest: Error host too old (does not support page-lists)\n"); 748 + ret = -ENODEV; 749 + } 750 + 751 + out: 752 + kfree(req); 753 + return ret; 754 + } 755 + 756 + /** 757 + * Initializes the VBoxGuest device extension when the 758 + * device driver is loaded. 759 + * 760 + * The native code locates the VMMDev on the PCI bus and retrieve 761 + * the MMIO and I/O port ranges, this function will take care of 762 + * mapping the MMIO memory (if present). Upon successful return 763 + * the native code should set up the interrupt handler. 764 + * 765 + * Return: 0 or negative errno value. 766 + * 767 + * @gdev: The Guest extension device. 768 + * @fixed_events: Events that will be enabled upon init and no client 769 + * will ever be allowed to mask. 770 + */ 771 + int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events) 772 + { 773 + int ret = -ENOMEM; 774 + 775 + gdev->fixed_events = fixed_events | VMMDEV_EVENT_HGCM; 776 + gdev->event_filter_host = U32_MAX; /* forces a report */ 777 + gdev->guest_caps_host = U32_MAX; /* forces a report */ 778 + 779 + init_waitqueue_head(&gdev->event_wq); 780 + init_waitqueue_head(&gdev->hgcm_wq); 781 + spin_lock_init(&gdev->event_spinlock); 782 + mutex_init(&gdev->session_mutex); 783 + mutex_init(&gdev->cancel_req_mutex); 784 + timer_setup(&gdev->heartbeat_timer, vbg_heartbeat_timer, 0); 785 + INIT_WORK(&gdev->mem_balloon.work, vbg_balloon_work); 786 + 787 + gdev->mem_balloon.get_req = 788 + vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req), 789 + VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ); 790 + gdev->mem_balloon.change_req = 791 + vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req), 792 + VMMDEVREQ_CHANGE_MEMBALLOON); 793 + gdev->cancel_req = 794 + vbg_req_alloc(sizeof(*(gdev->cancel_req)), 795 + VMMDEVREQ_HGCM_CANCEL2); 796 + gdev->ack_events_req = 797 + vbg_req_alloc(sizeof(*gdev->ack_events_req), 798 + VMMDEVREQ_ACKNOWLEDGE_EVENTS); 799 + gdev->mouse_status_req = 800 + vbg_req_alloc(sizeof(*gdev->mouse_status_req), 801 + VMMDEVREQ_GET_MOUSE_STATUS); 802 + 803 + if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req || 804 + !gdev->cancel_req || !gdev->ack_events_req || 805 + !gdev->mouse_status_req) 806 + goto err_free_reqs; 807 + 808 + ret = vbg_query_host_version(gdev); 809 + if (ret) 810 + goto err_free_reqs; 811 + 812 + ret = vbg_report_guest_info(gdev); 813 + if (ret) { 814 + vbg_err("vboxguest: vbg_report_guest_info error: %d\n", ret); 815 + goto err_free_reqs; 816 + } 817 + 818 + ret = vbg_reset_host_event_filter(gdev, gdev->fixed_events); 819 + if (ret) { 820 + vbg_err("vboxguest: Error setting fixed event filter: %d\n", 821 + ret); 822 + goto err_free_reqs; 823 + } 824 + 825 + ret = vbg_reset_host_capabilities(gdev); 826 + if (ret) { 827 + vbg_err("vboxguest: Error clearing guest capabilities: %d\n", 828 + ret); 829 + goto err_free_reqs; 830 + } 831 + 832 + ret = vbg_core_set_mouse_status(gdev, 0); 833 + if (ret) { 834 + vbg_err("vboxguest: Error clearing mouse status: %d\n", ret); 835 + goto err_free_reqs; 836 + } 837 + 838 + /* These may fail without requiring the driver init to fail. */ 839 + vbg_guest_mappings_init(gdev); 840 + vbg_heartbeat_init(gdev); 841 + 842 + /* All Done! */ 843 + ret = vbg_report_driver_status(gdev, true); 844 + if (ret < 0) 845 + vbg_err("vboxguest: Error reporting driver status: %d\n", ret); 846 + 847 + return 0; 848 + 849 + err_free_reqs: 850 + kfree(gdev->mouse_status_req); 851 + kfree(gdev->ack_events_req); 852 + kfree(gdev->cancel_req); 853 + kfree(gdev->mem_balloon.change_req); 854 + kfree(gdev->mem_balloon.get_req); 855 + return ret; 856 + } 857 + 858 + /** 859 + * Call this on exit to clean-up vboxguest-core managed resources. 860 + * 861 + * The native code should call this before the driver is loaded, 862 + * but don't call this on shutdown. 863 + * @gdev: The Guest extension device. 864 + */ 865 + void vbg_core_exit(struct vbg_dev *gdev) 866 + { 867 + vbg_heartbeat_exit(gdev); 868 + vbg_guest_mappings_exit(gdev); 869 + 870 + /* Clear the host flags (mouse status etc). */ 871 + vbg_reset_host_event_filter(gdev, 0); 872 + vbg_reset_host_capabilities(gdev); 873 + vbg_core_set_mouse_status(gdev, 0); 874 + 875 + kfree(gdev->mouse_status_req); 876 + kfree(gdev->ack_events_req); 877 + kfree(gdev->cancel_req); 878 + kfree(gdev->mem_balloon.change_req); 879 + kfree(gdev->mem_balloon.get_req); 880 + } 881 + 882 + /** 883 + * Creates a VBoxGuest user session. 884 + * 885 + * vboxguest_linux.c calls this when userspace opens the char-device. 886 + * Return: A pointer to the new session or an ERR_PTR on error. 887 + * @gdev: The Guest extension device. 888 + * @user: Set if this is a session for the vboxuser device. 889 + */ 890 + struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user) 891 + { 892 + struct vbg_session *session; 893 + 894 + session = kzalloc(sizeof(*session), GFP_KERNEL); 895 + if (!session) 896 + return ERR_PTR(-ENOMEM); 897 + 898 + session->gdev = gdev; 899 + session->user_session = user; 900 + 901 + return session; 902 + } 903 + 904 + /** 905 + * Closes a VBoxGuest session. 906 + * @session: The session to close (and free). 907 + */ 908 + void vbg_core_close_session(struct vbg_session *session) 909 + { 910 + struct vbg_dev *gdev = session->gdev; 911 + int i, rc; 912 + 913 + vbg_set_session_capabilities(gdev, session, 0, U32_MAX, true); 914 + vbg_set_session_event_filter(gdev, session, 0, U32_MAX, true); 915 + 916 + for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) { 917 + if (!session->hgcm_client_ids[i]) 918 + continue; 919 + 920 + vbg_hgcm_disconnect(gdev, session->hgcm_client_ids[i], &rc); 921 + } 922 + 923 + kfree(session); 924 + } 925 + 926 + static int vbg_ioctl_chk(struct vbg_ioctl_hdr *hdr, size_t in_size, 927 + size_t out_size) 928 + { 929 + if (hdr->size_in != (sizeof(*hdr) + in_size) || 930 + hdr->size_out != (sizeof(*hdr) + out_size)) 931 + return -EINVAL; 932 + 933 + return 0; 934 + } 935 + 936 + static int vbg_ioctl_driver_version_info( 937 + struct vbg_ioctl_driver_version_info *info) 938 + { 939 + const u16 vbg_maj_version = VBG_IOC_VERSION >> 16; 940 + u16 min_maj_version, req_maj_version; 941 + 942 + if (vbg_ioctl_chk(&info->hdr, sizeof(info->u.in), sizeof(info->u.out))) 943 + return -EINVAL; 944 + 945 + req_maj_version = info->u.in.req_version >> 16; 946 + min_maj_version = info->u.in.min_version >> 16; 947 + 948 + if (info->u.in.min_version > info->u.in.req_version || 949 + min_maj_version != req_maj_version) 950 + return -EINVAL; 951 + 952 + if (info->u.in.min_version <= VBG_IOC_VERSION && 953 + min_maj_version == vbg_maj_version) { 954 + info->u.out.session_version = VBG_IOC_VERSION; 955 + } else { 956 + info->u.out.session_version = U32_MAX; 957 + info->hdr.rc = VERR_VERSION_MISMATCH; 958 + } 959 + 960 + info->u.out.driver_version = VBG_IOC_VERSION; 961 + info->u.out.driver_revision = 0; 962 + info->u.out.reserved1 = 0; 963 + info->u.out.reserved2 = 0; 964 + 965 + return 0; 966 + } 967 + 968 + static bool vbg_wait_event_cond(struct vbg_dev *gdev, 969 + struct vbg_session *session, 970 + u32 event_mask) 971 + { 972 + unsigned long flags; 973 + bool wakeup; 974 + u32 events; 975 + 976 + spin_lock_irqsave(&gdev->event_spinlock, flags); 977 + 978 + events = gdev->pending_events & event_mask; 979 + wakeup = events || session->cancel_waiters; 980 + 981 + spin_unlock_irqrestore(&gdev->event_spinlock, flags); 982 + 983 + return wakeup; 984 + } 985 + 986 + /* Must be called with the event_lock held */ 987 + static u32 vbg_consume_events_locked(struct vbg_dev *gdev, 988 + struct vbg_session *session, 989 + u32 event_mask) 990 + { 991 + u32 events = gdev->pending_events & event_mask; 992 + 993 + gdev->pending_events &= ~events; 994 + return events; 995 + } 996 + 997 + static int vbg_ioctl_wait_for_events(struct vbg_dev *gdev, 998 + struct vbg_session *session, 999 + struct vbg_ioctl_wait_for_events *wait) 1000 + { 1001 + u32 timeout_ms = wait->u.in.timeout_ms; 1002 + u32 event_mask = wait->u.in.events; 1003 + unsigned long flags; 1004 + long timeout; 1005 + int ret = 0; 1006 + 1007 + if (vbg_ioctl_chk(&wait->hdr, sizeof(wait->u.in), sizeof(wait->u.out))) 1008 + return -EINVAL; 1009 + 1010 + if (timeout_ms == U32_MAX) 1011 + timeout = MAX_SCHEDULE_TIMEOUT; 1012 + else 1013 + timeout = msecs_to_jiffies(timeout_ms); 1014 + 1015 + wait->u.out.events = 0; 1016 + do { 1017 + timeout = wait_event_interruptible_timeout( 1018 + gdev->event_wq, 1019 + vbg_wait_event_cond(gdev, session, event_mask), 1020 + timeout); 1021 + 1022 + spin_lock_irqsave(&gdev->event_spinlock, flags); 1023 + 1024 + if (timeout < 0 || session->cancel_waiters) { 1025 + ret = -EINTR; 1026 + } else if (timeout == 0) { 1027 + ret = -ETIMEDOUT; 1028 + } else { 1029 + wait->u.out.events = 1030 + vbg_consume_events_locked(gdev, session, event_mask); 1031 + } 1032 + 1033 + spin_unlock_irqrestore(&gdev->event_spinlock, flags); 1034 + 1035 + /* 1036 + * Someone else may have consumed the event(s) first, in 1037 + * which case we go back to waiting. 1038 + */ 1039 + } while (ret == 0 && wait->u.out.events == 0); 1040 + 1041 + return ret; 1042 + } 1043 + 1044 + static int vbg_ioctl_interrupt_all_wait_events(struct vbg_dev *gdev, 1045 + struct vbg_session *session, 1046 + struct vbg_ioctl_hdr *hdr) 1047 + { 1048 + unsigned long flags; 1049 + 1050 + if (hdr->size_in != sizeof(*hdr) || hdr->size_out != sizeof(*hdr)) 1051 + return -EINVAL; 1052 + 1053 + spin_lock_irqsave(&gdev->event_spinlock, flags); 1054 + session->cancel_waiters = true; 1055 + spin_unlock_irqrestore(&gdev->event_spinlock, flags); 1056 + 1057 + wake_up(&gdev->event_wq); 1058 + 1059 + return 0; 1060 + } 1061 + 1062 + /** 1063 + * Checks if the VMM request is allowed in the context of the given session. 1064 + * Return: 0 or negative errno value. 1065 + * @gdev: The Guest extension device. 1066 + * @session: The calling session. 1067 + * @req: The request. 1068 + */ 1069 + static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session, 1070 + const struct vmmdev_request_header *req) 1071 + { 1072 + const struct vmmdev_guest_status *guest_status; 1073 + bool trusted_apps_only; 1074 + 1075 + switch (req->request_type) { 1076 + /* Trusted users apps only. */ 1077 + case VMMDEVREQ_QUERY_CREDENTIALS: 1078 + case VMMDEVREQ_REPORT_CREDENTIALS_JUDGEMENT: 1079 + case VMMDEVREQ_REGISTER_SHARED_MODULE: 1080 + case VMMDEVREQ_UNREGISTER_SHARED_MODULE: 1081 + case VMMDEVREQ_WRITE_COREDUMP: 1082 + case VMMDEVREQ_GET_CPU_HOTPLUG_REQ: 1083 + case VMMDEVREQ_SET_CPU_HOTPLUG_STATUS: 1084 + case VMMDEVREQ_CHECK_SHARED_MODULES: 1085 + case VMMDEVREQ_GET_PAGE_SHARING_STATUS: 1086 + case VMMDEVREQ_DEBUG_IS_PAGE_SHARED: 1087 + case VMMDEVREQ_REPORT_GUEST_STATS: 1088 + case VMMDEVREQ_REPORT_GUEST_USER_STATE: 1089 + case VMMDEVREQ_GET_STATISTICS_CHANGE_REQ: 1090 + trusted_apps_only = true; 1091 + break; 1092 + 1093 + /* Anyone. */ 1094 + case VMMDEVREQ_GET_MOUSE_STATUS: 1095 + case VMMDEVREQ_SET_MOUSE_STATUS: 1096 + case VMMDEVREQ_SET_POINTER_SHAPE: 1097 + case VMMDEVREQ_GET_HOST_VERSION: 1098 + case VMMDEVREQ_IDLE: 1099 + case VMMDEVREQ_GET_HOST_TIME: 1100 + case VMMDEVREQ_SET_POWER_STATUS: 1101 + case VMMDEVREQ_ACKNOWLEDGE_EVENTS: 1102 + case VMMDEVREQ_CTL_GUEST_FILTER_MASK: 1103 + case VMMDEVREQ_REPORT_GUEST_STATUS: 1104 + case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ: 1105 + case VMMDEVREQ_VIDEMODE_SUPPORTED: 1106 + case VMMDEVREQ_GET_HEIGHT_REDUCTION: 1107 + case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ2: 1108 + case VMMDEVREQ_VIDEMODE_SUPPORTED2: 1109 + case VMMDEVREQ_VIDEO_ACCEL_ENABLE: 1110 + case VMMDEVREQ_VIDEO_ACCEL_FLUSH: 1111 + case VMMDEVREQ_VIDEO_SET_VISIBLE_REGION: 1112 + case VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX: 1113 + case VMMDEVREQ_GET_SEAMLESS_CHANGE_REQ: 1114 + case VMMDEVREQ_GET_VRDPCHANGE_REQ: 1115 + case VMMDEVREQ_LOG_STRING: 1116 + case VMMDEVREQ_GET_SESSION_ID: 1117 + trusted_apps_only = false; 1118 + break; 1119 + 1120 + /* Depends on the request parameters... */ 1121 + case VMMDEVREQ_REPORT_GUEST_CAPABILITIES: 1122 + guest_status = (const struct vmmdev_guest_status *)req; 1123 + switch (guest_status->facility) { 1124 + case VBOXGUEST_FACILITY_TYPE_ALL: 1125 + case VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER: 1126 + vbg_err("Denying userspace vmm report guest cap. call facility %#08x\n", 1127 + guest_status->facility); 1128 + return -EPERM; 1129 + case VBOXGUEST_FACILITY_TYPE_VBOX_SERVICE: 1130 + trusted_apps_only = true; 1131 + break; 1132 + case VBOXGUEST_FACILITY_TYPE_VBOX_TRAY_CLIENT: 1133 + case VBOXGUEST_FACILITY_TYPE_SEAMLESS: 1134 + case VBOXGUEST_FACILITY_TYPE_GRAPHICS: 1135 + default: 1136 + trusted_apps_only = false; 1137 + break; 1138 + } 1139 + break; 1140 + 1141 + /* Anything else is not allowed. */ 1142 + default: 1143 + vbg_err("Denying userspace vmm call type %#08x\n", 1144 + req->request_type); 1145 + return -EPERM; 1146 + } 1147 + 1148 + if (trusted_apps_only && session->user_session) { 1149 + vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n", 1150 + req->request_type); 1151 + return -EPERM; 1152 + } 1153 + 1154 + return 0; 1155 + } 1156 + 1157 + static int vbg_ioctl_vmmrequest(struct vbg_dev *gdev, 1158 + struct vbg_session *session, void *data) 1159 + { 1160 + struct vbg_ioctl_hdr *hdr = data; 1161 + int ret; 1162 + 1163 + if (hdr->size_in != hdr->size_out) 1164 + return -EINVAL; 1165 + 1166 + if (hdr->size_in > VMMDEV_MAX_VMMDEVREQ_SIZE) 1167 + return -E2BIG; 1168 + 1169 + if (hdr->type == VBG_IOCTL_HDR_TYPE_DEFAULT) 1170 + return -EINVAL; 1171 + 1172 + ret = vbg_req_allowed(gdev, session, data); 1173 + if (ret < 0) 1174 + return ret; 1175 + 1176 + vbg_req_perform(gdev, data); 1177 + WARN_ON(hdr->rc == VINF_HGCM_ASYNC_EXECUTE); 1178 + 1179 + return 0; 1180 + } 1181 + 1182 + static int vbg_ioctl_hgcm_connect(struct vbg_dev *gdev, 1183 + struct vbg_session *session, 1184 + struct vbg_ioctl_hgcm_connect *conn) 1185 + { 1186 + u32 client_id; 1187 + int i, ret; 1188 + 1189 + if (vbg_ioctl_chk(&conn->hdr, sizeof(conn->u.in), sizeof(conn->u.out))) 1190 + return -EINVAL; 1191 + 1192 + /* Find a free place in the sessions clients array and claim it */ 1193 + mutex_lock(&gdev->session_mutex); 1194 + for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) { 1195 + if (!session->hgcm_client_ids[i]) { 1196 + session->hgcm_client_ids[i] = U32_MAX; 1197 + break; 1198 + } 1199 + } 1200 + mutex_unlock(&gdev->session_mutex); 1201 + 1202 + if (i >= ARRAY_SIZE(session->hgcm_client_ids)) 1203 + return -EMFILE; 1204 + 1205 + ret = vbg_hgcm_connect(gdev, &conn->u.in.loc, &client_id, 1206 + &conn->hdr.rc); 1207 + 1208 + mutex_lock(&gdev->session_mutex); 1209 + if (ret == 0 && conn->hdr.rc >= 0) { 1210 + conn->u.out.client_id = client_id; 1211 + session->hgcm_client_ids[i] = client_id; 1212 + } else { 1213 + conn->u.out.client_id = 0; 1214 + session->hgcm_client_ids[i] = 0; 1215 + } 1216 + mutex_unlock(&gdev->session_mutex); 1217 + 1218 + return ret; 1219 + } 1220 + 1221 + static int vbg_ioctl_hgcm_disconnect(struct vbg_dev *gdev, 1222 + struct vbg_session *session, 1223 + struct vbg_ioctl_hgcm_disconnect *disconn) 1224 + { 1225 + u32 client_id; 1226 + int i, ret; 1227 + 1228 + if (vbg_ioctl_chk(&disconn->hdr, sizeof(disconn->u.in), 0)) 1229 + return -EINVAL; 1230 + 1231 + client_id = disconn->u.in.client_id; 1232 + if (client_id == 0 || client_id == U32_MAX) 1233 + return -EINVAL; 1234 + 1235 + mutex_lock(&gdev->session_mutex); 1236 + for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) { 1237 + if (session->hgcm_client_ids[i] == client_id) { 1238 + session->hgcm_client_ids[i] = U32_MAX; 1239 + break; 1240 + } 1241 + } 1242 + mutex_unlock(&gdev->session_mutex); 1243 + 1244 + if (i >= ARRAY_SIZE(session->hgcm_client_ids)) 1245 + return -EINVAL; 1246 + 1247 + ret = vbg_hgcm_disconnect(gdev, client_id, &disconn->hdr.rc); 1248 + 1249 + mutex_lock(&gdev->session_mutex); 1250 + if (ret == 0 && disconn->hdr.rc >= 0) 1251 + session->hgcm_client_ids[i] = 0; 1252 + else 1253 + session->hgcm_client_ids[i] = client_id; 1254 + mutex_unlock(&gdev->session_mutex); 1255 + 1256 + return ret; 1257 + } 1258 + 1259 + static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev, 1260 + struct vbg_session *session, bool f32bit, 1261 + struct vbg_ioctl_hgcm_call *call) 1262 + { 1263 + size_t actual_size; 1264 + u32 client_id; 1265 + int i, ret; 1266 + 1267 + if (call->hdr.size_in < sizeof(*call)) 1268 + return -EINVAL; 1269 + 1270 + if (call->hdr.size_in != call->hdr.size_out) 1271 + return -EINVAL; 1272 + 1273 + if (call->parm_count > VMMDEV_HGCM_MAX_PARMS) 1274 + return -E2BIG; 1275 + 1276 + client_id = call->client_id; 1277 + if (client_id == 0 || client_id == U32_MAX) 1278 + return -EINVAL; 1279 + 1280 + actual_size = sizeof(*call); 1281 + if (f32bit) 1282 + actual_size += call->parm_count * 1283 + sizeof(struct vmmdev_hgcm_function_parameter32); 1284 + else 1285 + actual_size += call->parm_count * 1286 + sizeof(struct vmmdev_hgcm_function_parameter); 1287 + if (call->hdr.size_in < actual_size) { 1288 + vbg_debug("VBG_IOCTL_HGCM_CALL: hdr.size_in %d required size is %zd\n", 1289 + call->hdr.size_in, actual_size); 1290 + return -EINVAL; 1291 + } 1292 + call->hdr.size_out = actual_size; 1293 + 1294 + /* 1295 + * Validate the client id. 1296 + */ 1297 + mutex_lock(&gdev->session_mutex); 1298 + for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) 1299 + if (session->hgcm_client_ids[i] == client_id) 1300 + break; 1301 + mutex_unlock(&gdev->session_mutex); 1302 + if (i >= ARRAY_SIZE(session->hgcm_client_ids)) { 1303 + vbg_debug("VBG_IOCTL_HGCM_CALL: INVALID handle. u32Client=%#08x\n", 1304 + client_id); 1305 + return -EINVAL; 1306 + } 1307 + 1308 + if (f32bit) 1309 + ret = vbg_hgcm_call32(gdev, client_id, 1310 + call->function, call->timeout_ms, 1311 + VBG_IOCTL_HGCM_CALL_PARMS32(call), 1312 + call->parm_count, &call->hdr.rc); 1313 + else 1314 + ret = vbg_hgcm_call(gdev, client_id, 1315 + call->function, call->timeout_ms, 1316 + VBG_IOCTL_HGCM_CALL_PARMS(call), 1317 + call->parm_count, &call->hdr.rc); 1318 + 1319 + if (ret == -E2BIG) { 1320 + /* E2BIG needs to be reported through the hdr.rc field. */ 1321 + call->hdr.rc = VERR_OUT_OF_RANGE; 1322 + ret = 0; 1323 + } 1324 + 1325 + if (ret && ret != -EINTR && ret != -ETIMEDOUT) 1326 + vbg_err("VBG_IOCTL_HGCM_CALL error: %d\n", ret); 1327 + 1328 + return ret; 1329 + } 1330 + 1331 + static int vbg_ioctl_log(struct vbg_ioctl_log *log) 1332 + { 1333 + if (log->hdr.size_out != sizeof(log->hdr)) 1334 + return -EINVAL; 1335 + 1336 + vbg_info("%.*s", (int)(log->hdr.size_in - sizeof(log->hdr)), 1337 + log->u.in.msg); 1338 + 1339 + return 0; 1340 + } 1341 + 1342 + static int vbg_ioctl_change_filter_mask(struct vbg_dev *gdev, 1343 + struct vbg_session *session, 1344 + struct vbg_ioctl_change_filter *filter) 1345 + { 1346 + u32 or_mask, not_mask; 1347 + 1348 + if (vbg_ioctl_chk(&filter->hdr, sizeof(filter->u.in), 0)) 1349 + return -EINVAL; 1350 + 1351 + or_mask = filter->u.in.or_mask; 1352 + not_mask = filter->u.in.not_mask; 1353 + 1354 + if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK) 1355 + return -EINVAL; 1356 + 1357 + return vbg_set_session_event_filter(gdev, session, or_mask, not_mask, 1358 + false); 1359 + } 1360 + 1361 + static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev, 1362 + struct vbg_session *session, struct vbg_ioctl_set_guest_caps *caps) 1363 + { 1364 + u32 or_mask, not_mask; 1365 + int ret; 1366 + 1367 + if (vbg_ioctl_chk(&caps->hdr, sizeof(caps->u.in), sizeof(caps->u.out))) 1368 + return -EINVAL; 1369 + 1370 + or_mask = caps->u.in.or_mask; 1371 + not_mask = caps->u.in.not_mask; 1372 + 1373 + if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK) 1374 + return -EINVAL; 1375 + 1376 + ret = vbg_set_session_capabilities(gdev, session, or_mask, not_mask, 1377 + false); 1378 + if (ret) 1379 + return ret; 1380 + 1381 + caps->u.out.session_caps = session->guest_caps; 1382 + caps->u.out.global_caps = gdev->guest_caps_host; 1383 + 1384 + return 0; 1385 + } 1386 + 1387 + static int vbg_ioctl_check_balloon(struct vbg_dev *gdev, 1388 + struct vbg_ioctl_check_balloon *balloon_info) 1389 + { 1390 + if (vbg_ioctl_chk(&balloon_info->hdr, 0, sizeof(balloon_info->u.out))) 1391 + return -EINVAL; 1392 + 1393 + balloon_info->u.out.balloon_chunks = gdev->mem_balloon.chunks; 1394 + /* 1395 + * Under Linux we handle VMMDEV_EVENT_BALLOON_CHANGE_REQUEST 1396 + * events entirely in the kernel, see vbg_core_isr(). 1397 + */ 1398 + balloon_info->u.out.handle_in_r3 = false; 1399 + 1400 + return 0; 1401 + } 1402 + 1403 + static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev, 1404 + struct vbg_ioctl_write_coredump *dump) 1405 + { 1406 + struct vmmdev_write_core_dump *req; 1407 + 1408 + if (vbg_ioctl_chk(&dump->hdr, sizeof(dump->u.in), 0)) 1409 + return -EINVAL; 1410 + 1411 + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP); 1412 + if (!req) 1413 + return -ENOMEM; 1414 + 1415 + req->flags = dump->u.in.flags; 1416 + dump->hdr.rc = vbg_req_perform(gdev, req); 1417 + 1418 + kfree(req); 1419 + return 0; 1420 + } 1421 + 1422 + /** 1423 + * Common IOCtl for user to kernel communication. 1424 + * Return: 0 or negative errno value. 1425 + * @session: The client session. 1426 + * @req: The requested function. 1427 + * @data: The i/o data buffer, minimum size sizeof(struct vbg_ioctl_hdr). 1428 + */ 1429 + int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data) 1430 + { 1431 + unsigned int req_no_size = req & ~IOCSIZE_MASK; 1432 + struct vbg_dev *gdev = session->gdev; 1433 + struct vbg_ioctl_hdr *hdr = data; 1434 + bool f32bit = false; 1435 + 1436 + hdr->rc = VINF_SUCCESS; 1437 + if (!hdr->size_out) 1438 + hdr->size_out = hdr->size_in; 1439 + 1440 + /* 1441 + * hdr->version and hdr->size_in / hdr->size_out minimum size are 1442 + * already checked by vbg_misc_device_ioctl(). 1443 + */ 1444 + 1445 + /* For VMMDEV_REQUEST hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT */ 1446 + if (req_no_size == VBG_IOCTL_VMMDEV_REQUEST(0) || 1447 + req == VBG_IOCTL_VMMDEV_REQUEST_BIG) 1448 + return vbg_ioctl_vmmrequest(gdev, session, data); 1449 + 1450 + if (hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT) 1451 + return -EINVAL; 1452 + 1453 + /* Fixed size requests. */ 1454 + switch (req) { 1455 + case VBG_IOCTL_DRIVER_VERSION_INFO: 1456 + return vbg_ioctl_driver_version_info(data); 1457 + case VBG_IOCTL_HGCM_CONNECT: 1458 + return vbg_ioctl_hgcm_connect(gdev, session, data); 1459 + case VBG_IOCTL_HGCM_DISCONNECT: 1460 + return vbg_ioctl_hgcm_disconnect(gdev, session, data); 1461 + case VBG_IOCTL_WAIT_FOR_EVENTS: 1462 + return vbg_ioctl_wait_for_events(gdev, session, data); 1463 + case VBG_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS: 1464 + return vbg_ioctl_interrupt_all_wait_events(gdev, session, data); 1465 + case VBG_IOCTL_CHANGE_FILTER_MASK: 1466 + return vbg_ioctl_change_filter_mask(gdev, session, data); 1467 + case VBG_IOCTL_CHANGE_GUEST_CAPABILITIES: 1468 + return vbg_ioctl_change_guest_capabilities(gdev, session, data); 1469 + case VBG_IOCTL_CHECK_BALLOON: 1470 + return vbg_ioctl_check_balloon(gdev, data); 1471 + case VBG_IOCTL_WRITE_CORE_DUMP: 1472 + return vbg_ioctl_write_core_dump(gdev, data); 1473 + } 1474 + 1475 + /* Variable sized requests. */ 1476 + switch (req_no_size) { 1477 + #ifdef CONFIG_COMPAT 1478 + case VBG_IOCTL_HGCM_CALL_32(0): 1479 + f32bit = true; 1480 + /* Fall through */ 1481 + #endif 1482 + case VBG_IOCTL_HGCM_CALL(0): 1483 + return vbg_ioctl_hgcm_call(gdev, session, f32bit, data); 1484 + case VBG_IOCTL_LOG(0): 1485 + return vbg_ioctl_log(data); 1486 + } 1487 + 1488 + vbg_debug("VGDrvCommonIoCtl: Unknown req %#08x\n", req); 1489 + return -ENOTTY; 1490 + } 1491 + 1492 + /** 1493 + * Report guest supported mouse-features to the host. 1494 + * 1495 + * Return: 0 or negative errno value. 1496 + * @gdev: The Guest extension device. 1497 + * @features: The set of features to report to the host. 1498 + */ 1499 + int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features) 1500 + { 1501 + struct vmmdev_mouse_status *req; 1502 + int rc; 1503 + 1504 + req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS); 1505 + if (!req) 1506 + return -ENOMEM; 1507 + 1508 + req->mouse_features = features; 1509 + req->pointer_pos_x = 0; 1510 + req->pointer_pos_y = 0; 1511 + 1512 + rc = vbg_req_perform(gdev, req); 1513 + if (rc < 0) 1514 + vbg_err("%s error, rc: %d\n", __func__, rc); 1515 + 1516 + kfree(req); 1517 + return vbg_status_code_to_errno(rc); 1518 + } 1519 + 1520 + /** Core interrupt service routine. */ 1521 + irqreturn_t vbg_core_isr(int irq, void *dev_id) 1522 + { 1523 + struct vbg_dev *gdev = dev_id; 1524 + struct vmmdev_events *req = gdev->ack_events_req; 1525 + bool mouse_position_changed = false; 1526 + unsigned long flags; 1527 + u32 events = 0; 1528 + int rc; 1529 + 1530 + if (!gdev->mmio->V.V1_04.have_events) 1531 + return IRQ_NONE; 1532 + 1533 + /* Get and acknowlegde events. */ 1534 + req->header.rc = VERR_INTERNAL_ERROR; 1535 + req->events = 0; 1536 + rc = vbg_req_perform(gdev, req); 1537 + if (rc < 0) { 1538 + vbg_err("Error performing events req, rc: %d\n", rc); 1539 + return IRQ_NONE; 1540 + } 1541 + 1542 + events = req->events; 1543 + 1544 + if (events & VMMDEV_EVENT_MOUSE_POSITION_CHANGED) { 1545 + mouse_position_changed = true; 1546 + events &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED; 1547 + } 1548 + 1549 + if (events & VMMDEV_EVENT_HGCM) { 1550 + wake_up(&gdev->hgcm_wq); 1551 + events &= ~VMMDEV_EVENT_HGCM; 1552 + } 1553 + 1554 + if (events & VMMDEV_EVENT_BALLOON_CHANGE_REQUEST) { 1555 + schedule_work(&gdev->mem_balloon.work); 1556 + events &= ~VMMDEV_EVENT_BALLOON_CHANGE_REQUEST; 1557 + } 1558 + 1559 + if (events) { 1560 + spin_lock_irqsave(&gdev->event_spinlock, flags); 1561 + gdev->pending_events |= events; 1562 + spin_unlock_irqrestore(&gdev->event_spinlock, flags); 1563 + 1564 + wake_up(&gdev->event_wq); 1565 + } 1566 + 1567 + if (mouse_position_changed) 1568 + vbg_linux_mouse_event(gdev); 1569 + 1570 + return IRQ_HANDLED; 1571 + }
+174
drivers/virt/vboxguest/vboxguest_core.h
··· 1 + /* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */ 2 + /* Copyright (C) 2010-2016 Oracle Corporation */ 3 + 4 + #ifndef __VBOXGUEST_CORE_H__ 5 + #define __VBOXGUEST_CORE_H__ 6 + 7 + #include <linux/input.h> 8 + #include <linux/interrupt.h> 9 + #include <linux/kernel.h> 10 + #include <linux/list.h> 11 + #include <linux/miscdevice.h> 12 + #include <linux/spinlock.h> 13 + #include <linux/wait.h> 14 + #include <linux/workqueue.h> 15 + #include <linux/vboxguest.h> 16 + #include "vmmdev.h" 17 + 18 + struct vbg_session; 19 + 20 + /** VBox guest memory balloon. */ 21 + struct vbg_mem_balloon { 22 + /** Work handling VMMDEV_EVENT_BALLOON_CHANGE_REQUEST events */ 23 + struct work_struct work; 24 + /** Pre-allocated vmmdev_memballoon_info req for query */ 25 + struct vmmdev_memballoon_info *get_req; 26 + /** Pre-allocated vmmdev_memballoon_change req for inflate / deflate */ 27 + struct vmmdev_memballoon_change *change_req; 28 + /** The current number of chunks in the balloon. */ 29 + u32 chunks; 30 + /** The maximum number of chunks in the balloon. */ 31 + u32 max_chunks; 32 + /** 33 + * Array of pointers to page arrays. A page * array is allocated for 34 + * each chunk when inflating, and freed when the deflating. 35 + */ 36 + struct page ***pages; 37 + }; 38 + 39 + /** 40 + * Per bit usage tracker for a u32 mask. 41 + * 42 + * Used for optimal handling of guest properties and event filter. 43 + */ 44 + struct vbg_bit_usage_tracker { 45 + /** Per bit usage counters. */ 46 + u32 per_bit_usage[32]; 47 + /** The current mask according to per_bit_usage. */ 48 + u32 mask; 49 + }; 50 + 51 + /** VBox guest device (data) extension. */ 52 + struct vbg_dev { 53 + struct device *dev; 54 + /** The base of the adapter I/O ports. */ 55 + u16 io_port; 56 + /** Pointer to the mapping of the VMMDev adapter memory. */ 57 + struct vmmdev_memory *mmio; 58 + /** Host version */ 59 + char host_version[64]; 60 + /** Host features */ 61 + unsigned int host_features; 62 + /** 63 + * Dummy page and vmap address for reserved kernel virtual-address 64 + * space for the guest mappings, only used on hosts lacking vtx. 65 + */ 66 + struct page *guest_mappings_dummy_page; 67 + void *guest_mappings; 68 + /** Spinlock protecting pending_events. */ 69 + spinlock_t event_spinlock; 70 + /** Preallocated struct vmmdev_events for the IRQ handler. */ 71 + struct vmmdev_events *ack_events_req; 72 + /** Wait-for-event list for threads waiting for multiple events. */ 73 + wait_queue_head_t event_wq; 74 + /** Mask of pending events. */ 75 + u32 pending_events; 76 + /** Wait-for-event list for threads waiting on HGCM async completion. */ 77 + wait_queue_head_t hgcm_wq; 78 + /** Pre-allocated hgcm cancel2 req. for cancellation on timeout */ 79 + struct vmmdev_hgcm_cancel2 *cancel_req; 80 + /** Mutex protecting cancel_req accesses */ 81 + struct mutex cancel_req_mutex; 82 + /** Pre-allocated mouse-status request for the input-device handling. */ 83 + struct vmmdev_mouse_status *mouse_status_req; 84 + /** Input device for reporting abs mouse coordinates to the guest. */ 85 + struct input_dev *input; 86 + 87 + /** Memory balloon information. */ 88 + struct vbg_mem_balloon mem_balloon; 89 + 90 + /** Lock for session related items in vbg_dev and vbg_session */ 91 + struct mutex session_mutex; 92 + /** Events we won't permit anyone to filter out. */ 93 + u32 fixed_events; 94 + /** 95 + * Usage counters for the host events (excludes fixed events), 96 + * Protected by session_mutex. 97 + */ 98 + struct vbg_bit_usage_tracker event_filter_tracker; 99 + /** 100 + * The event filter last reported to the host (or UINT32_MAX). 101 + * Protected by session_mutex. 102 + */ 103 + u32 event_filter_host; 104 + 105 + /** 106 + * Usage counters for guest capabilities. Indexed by capability bit 107 + * number, one count per session using a capability. 108 + * Protected by session_mutex. 109 + */ 110 + struct vbg_bit_usage_tracker guest_caps_tracker; 111 + /** 112 + * The guest capabilities last reported to the host (or UINT32_MAX). 113 + * Protected by session_mutex. 114 + */ 115 + u32 guest_caps_host; 116 + 117 + /** 118 + * Heartbeat timer which fires with interval 119 + * cNsHearbeatInterval and its handler sends 120 + * VMMDEVREQ_GUEST_HEARTBEAT to VMMDev. 121 + */ 122 + struct timer_list heartbeat_timer; 123 + /** Heartbeat timer interval in ms. */ 124 + int heartbeat_interval_ms; 125 + /** Preallocated VMMDEVREQ_GUEST_HEARTBEAT request. */ 126 + struct vmmdev_request_header *guest_heartbeat_req; 127 + 128 + /** "vboxguest" char-device */ 129 + struct miscdevice misc_device; 130 + /** "vboxuser" char-device */ 131 + struct miscdevice misc_device_user; 132 + }; 133 + 134 + /** The VBoxGuest per session data. */ 135 + struct vbg_session { 136 + /** Pointer to the device extension. */ 137 + struct vbg_dev *gdev; 138 + 139 + /** 140 + * Array containing HGCM client IDs associated with this session. 141 + * These will be automatically disconnected when the session is closed. 142 + * Protected by vbg_gdev.session_mutex. 143 + */ 144 + u32 hgcm_client_ids[64]; 145 + /** 146 + * Host events requested by the session. 147 + * An event type requested in any guest session will be added to the 148 + * host filter. Protected by vbg_gdev.session_mutex. 149 + */ 150 + u32 event_filter; 151 + /** 152 + * Guest capabilities for this session. 153 + * A capability claimed by any guest session will be reported to the 154 + * host. Protected by vbg_gdev.session_mutex. 155 + */ 156 + u32 guest_caps; 157 + /** Does this session belong to a root process or a user one? */ 158 + bool user_session; 159 + /** Set on CANCEL_ALL_WAITEVENTS, protected by vbg_devevent_spinlock. */ 160 + bool cancel_waiters; 161 + }; 162 + 163 + int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events); 164 + void vbg_core_exit(struct vbg_dev *gdev); 165 + struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user); 166 + void vbg_core_close_session(struct vbg_session *session); 167 + int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data); 168 + int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features); 169 + 170 + irqreturn_t vbg_core_isr(int irq, void *dev_id); 171 + 172 + void vbg_linux_mouse_event(struct vbg_dev *gdev); 173 + 174 + #endif
+466
drivers/virt/vboxguest/vboxguest_linux.c
··· 1 + /* SPDX-License-Identifier: GPL-2.0 */ 2 + /* 3 + * vboxguest linux pci driver, char-dev and input-device code, 4 + * 5 + * Copyright (C) 2006-2016 Oracle Corporation 6 + */ 7 + 8 + #include <linux/input.h> 9 + #include <linux/kernel.h> 10 + #include <linux/miscdevice.h> 11 + #include <linux/module.h> 12 + #include <linux/pci.h> 13 + #include <linux/poll.h> 14 + #include <linux/vbox_utils.h> 15 + #include "vboxguest_core.h" 16 + 17 + /** The device name. */ 18 + #define DEVICE_NAME "vboxguest" 19 + /** The device name for the device node open to everyone. */ 20 + #define DEVICE_NAME_USER "vboxuser" 21 + /** VirtualBox PCI vendor ID. */ 22 + #define VBOX_VENDORID 0x80ee 23 + /** VMMDev PCI card product ID. */ 24 + #define VMMDEV_DEVICEID 0xcafe 25 + 26 + /** Mutex protecting the global vbg_gdev pointer used by vbg_get/put_gdev. */ 27 + static DEFINE_MUTEX(vbg_gdev_mutex); 28 + /** Global vbg_gdev pointer used by vbg_get/put_gdev. */ 29 + static struct vbg_dev *vbg_gdev; 30 + 31 + static int vbg_misc_device_open(struct inode *inode, struct file *filp) 32 + { 33 + struct vbg_session *session; 34 + struct vbg_dev *gdev; 35 + 36 + /* misc_open sets filp->private_data to our misc device */ 37 + gdev = container_of(filp->private_data, struct vbg_dev, misc_device); 38 + 39 + session = vbg_core_open_session(gdev, false); 40 + if (IS_ERR(session)) 41 + return PTR_ERR(session); 42 + 43 + filp->private_data = session; 44 + return 0; 45 + } 46 + 47 + static int vbg_misc_device_user_open(struct inode *inode, struct file *filp) 48 + { 49 + struct vbg_session *session; 50 + struct vbg_dev *gdev; 51 + 52 + /* misc_open sets filp->private_data to our misc device */ 53 + gdev = container_of(filp->private_data, struct vbg_dev, 54 + misc_device_user); 55 + 56 + session = vbg_core_open_session(gdev, false); 57 + if (IS_ERR(session)) 58 + return PTR_ERR(session); 59 + 60 + filp->private_data = session; 61 + return 0; 62 + } 63 + 64 + /** 65 + * Close device. 66 + * Return: 0 on success, negated errno on failure. 67 + * @inode: Pointer to inode info structure. 68 + * @filp: Associated file pointer. 69 + */ 70 + static int vbg_misc_device_close(struct inode *inode, struct file *filp) 71 + { 72 + vbg_core_close_session(filp->private_data); 73 + filp->private_data = NULL; 74 + return 0; 75 + } 76 + 77 + /** 78 + * Device I/O Control entry point. 79 + * Return: 0 on success, negated errno on failure. 80 + * @filp: Associated file pointer. 81 + * @req: The request specified to ioctl(). 82 + * @arg: The argument specified to ioctl(). 83 + */ 84 + static long vbg_misc_device_ioctl(struct file *filp, unsigned int req, 85 + unsigned long arg) 86 + { 87 + struct vbg_session *session = filp->private_data; 88 + size_t returned_size, size; 89 + struct vbg_ioctl_hdr hdr; 90 + int ret = 0; 91 + void *buf; 92 + 93 + if (copy_from_user(&hdr, (void *)arg, sizeof(hdr))) 94 + return -EFAULT; 95 + 96 + if (hdr.version != VBG_IOCTL_HDR_VERSION) 97 + return -EINVAL; 98 + 99 + if (hdr.size_in < sizeof(hdr) || 100 + (hdr.size_out && hdr.size_out < sizeof(hdr))) 101 + return -EINVAL; 102 + 103 + size = max(hdr.size_in, hdr.size_out); 104 + if (_IOC_SIZE(req) && _IOC_SIZE(req) != size) 105 + return -EINVAL; 106 + if (size > SZ_16M) 107 + return -E2BIG; 108 + 109 + /* __GFP_DMA32 because IOCTL_VMMDEV_REQUEST passes this to the host */ 110 + buf = kmalloc(size, GFP_KERNEL | __GFP_DMA32); 111 + if (!buf) 112 + return -ENOMEM; 113 + 114 + if (copy_from_user(buf, (void *)arg, hdr.size_in)) { 115 + ret = -EFAULT; 116 + goto out; 117 + } 118 + if (hdr.size_in < size) 119 + memset(buf + hdr.size_in, 0, size - hdr.size_in); 120 + 121 + ret = vbg_core_ioctl(session, req, buf); 122 + if (ret) 123 + goto out; 124 + 125 + returned_size = ((struct vbg_ioctl_hdr *)buf)->size_out; 126 + if (returned_size > size) { 127 + vbg_debug("%s: too much output data %zu > %zu\n", 128 + __func__, returned_size, size); 129 + returned_size = size; 130 + } 131 + if (copy_to_user((void *)arg, buf, returned_size) != 0) 132 + ret = -EFAULT; 133 + 134 + out: 135 + kfree(buf); 136 + 137 + return ret; 138 + } 139 + 140 + /** The file_operations structures. */ 141 + static const struct file_operations vbg_misc_device_fops = { 142 + .owner = THIS_MODULE, 143 + .open = vbg_misc_device_open, 144 + .release = vbg_misc_device_close, 145 + .unlocked_ioctl = vbg_misc_device_ioctl, 146 + #ifdef CONFIG_COMPAT 147 + .compat_ioctl = vbg_misc_device_ioctl, 148 + #endif 149 + }; 150 + static const struct file_operations vbg_misc_device_user_fops = { 151 + .owner = THIS_MODULE, 152 + .open = vbg_misc_device_user_open, 153 + .release = vbg_misc_device_close, 154 + .unlocked_ioctl = vbg_misc_device_ioctl, 155 + #ifdef CONFIG_COMPAT 156 + .compat_ioctl = vbg_misc_device_ioctl, 157 + #endif 158 + }; 159 + 160 + /** 161 + * Called when the input device is first opened. 162 + * 163 + * Sets up absolute mouse reporting. 164 + */ 165 + static int vbg_input_open(struct input_dev *input) 166 + { 167 + struct vbg_dev *gdev = input_get_drvdata(input); 168 + u32 feat = VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE | VMMDEV_MOUSE_NEW_PROTOCOL; 169 + int ret; 170 + 171 + ret = vbg_core_set_mouse_status(gdev, feat); 172 + if (ret) 173 + return ret; 174 + 175 + return 0; 176 + } 177 + 178 + /** 179 + * Called if all open handles to the input device are closed. 180 + * 181 + * Disables absolute reporting. 182 + */ 183 + static void vbg_input_close(struct input_dev *input) 184 + { 185 + struct vbg_dev *gdev = input_get_drvdata(input); 186 + 187 + vbg_core_set_mouse_status(gdev, 0); 188 + } 189 + 190 + /** 191 + * Creates the kernel input device. 192 + * 193 + * Return: 0 on success, negated errno on failure. 194 + */ 195 + static int vbg_create_input_device(struct vbg_dev *gdev) 196 + { 197 + struct input_dev *input; 198 + 199 + input = devm_input_allocate_device(gdev->dev); 200 + if (!input) 201 + return -ENOMEM; 202 + 203 + input->id.bustype = BUS_PCI; 204 + input->id.vendor = VBOX_VENDORID; 205 + input->id.product = VMMDEV_DEVICEID; 206 + input->open = vbg_input_open; 207 + input->close = vbg_input_close; 208 + input->dev.parent = gdev->dev; 209 + input->name = "VirtualBox mouse integration"; 210 + 211 + input_set_abs_params(input, ABS_X, VMMDEV_MOUSE_RANGE_MIN, 212 + VMMDEV_MOUSE_RANGE_MAX, 0, 0); 213 + input_set_abs_params(input, ABS_Y, VMMDEV_MOUSE_RANGE_MIN, 214 + VMMDEV_MOUSE_RANGE_MAX, 0, 0); 215 + input_set_capability(input, EV_KEY, BTN_MOUSE); 216 + input_set_drvdata(input, gdev); 217 + 218 + gdev->input = input; 219 + 220 + return input_register_device(gdev->input); 221 + } 222 + 223 + static ssize_t host_version_show(struct device *dev, 224 + struct device_attribute *attr, char *buf) 225 + { 226 + struct vbg_dev *gdev = dev_get_drvdata(dev); 227 + 228 + return sprintf(buf, "%s\n", gdev->host_version); 229 + } 230 + 231 + static ssize_t host_features_show(struct device *dev, 232 + struct device_attribute *attr, char *buf) 233 + { 234 + struct vbg_dev *gdev = dev_get_drvdata(dev); 235 + 236 + return sprintf(buf, "%#x\n", gdev->host_features); 237 + } 238 + 239 + static DEVICE_ATTR_RO(host_version); 240 + static DEVICE_ATTR_RO(host_features); 241 + 242 + /** 243 + * Does the PCI detection and init of the device. 244 + * 245 + * Return: 0 on success, negated errno on failure. 246 + */ 247 + static int vbg_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) 248 + { 249 + struct device *dev = &pci->dev; 250 + resource_size_t io, io_len, mmio, mmio_len; 251 + struct vmmdev_memory *vmmdev; 252 + struct vbg_dev *gdev; 253 + int ret; 254 + 255 + gdev = devm_kzalloc(dev, sizeof(*gdev), GFP_KERNEL); 256 + if (!gdev) 257 + return -ENOMEM; 258 + 259 + ret = pci_enable_device(pci); 260 + if (ret != 0) { 261 + vbg_err("vboxguest: Error enabling device: %d\n", ret); 262 + return ret; 263 + } 264 + 265 + ret = -ENODEV; 266 + 267 + io = pci_resource_start(pci, 0); 268 + io_len = pci_resource_len(pci, 0); 269 + if (!io || !io_len) { 270 + vbg_err("vboxguest: Error IO-port resource (0) is missing\n"); 271 + goto err_disable_pcidev; 272 + } 273 + if (devm_request_region(dev, io, io_len, DEVICE_NAME) == NULL) { 274 + vbg_err("vboxguest: Error could not claim IO resource\n"); 275 + ret = -EBUSY; 276 + goto err_disable_pcidev; 277 + } 278 + 279 + mmio = pci_resource_start(pci, 1); 280 + mmio_len = pci_resource_len(pci, 1); 281 + if (!mmio || !mmio_len) { 282 + vbg_err("vboxguest: Error MMIO resource (1) is missing\n"); 283 + goto err_disable_pcidev; 284 + } 285 + 286 + if (devm_request_mem_region(dev, mmio, mmio_len, DEVICE_NAME) == NULL) { 287 + vbg_err("vboxguest: Error could not claim MMIO resource\n"); 288 + ret = -EBUSY; 289 + goto err_disable_pcidev; 290 + } 291 + 292 + vmmdev = devm_ioremap(dev, mmio, mmio_len); 293 + if (!vmmdev) { 294 + vbg_err("vboxguest: Error ioremap failed; MMIO addr=%p size=%d\n", 295 + (void *)mmio, (int)mmio_len); 296 + goto err_disable_pcidev; 297 + } 298 + 299 + /* Validate MMIO region version and size. */ 300 + if (vmmdev->version != VMMDEV_MEMORY_VERSION || 301 + vmmdev->size < 32 || vmmdev->size > mmio_len) { 302 + vbg_err("vboxguest: Bogus VMMDev memory; version=%08x (expected %08x) size=%d (expected <= %d)\n", 303 + vmmdev->version, VMMDEV_MEMORY_VERSION, 304 + vmmdev->size, (int)mmio_len); 305 + goto err_disable_pcidev; 306 + } 307 + 308 + gdev->io_port = io; 309 + gdev->mmio = vmmdev; 310 + gdev->dev = dev; 311 + gdev->misc_device.minor = MISC_DYNAMIC_MINOR; 312 + gdev->misc_device.name = DEVICE_NAME; 313 + gdev->misc_device.fops = &vbg_misc_device_fops; 314 + gdev->misc_device_user.minor = MISC_DYNAMIC_MINOR; 315 + gdev->misc_device_user.name = DEVICE_NAME_USER; 316 + gdev->misc_device_user.fops = &vbg_misc_device_user_fops; 317 + 318 + ret = vbg_core_init(gdev, VMMDEV_EVENT_MOUSE_POSITION_CHANGED); 319 + if (ret) 320 + goto err_disable_pcidev; 321 + 322 + ret = vbg_create_input_device(gdev); 323 + if (ret) { 324 + vbg_err("vboxguest: Error creating input device: %d\n", ret); 325 + goto err_vbg_core_exit; 326 + } 327 + 328 + ret = devm_request_irq(dev, pci->irq, vbg_core_isr, IRQF_SHARED, 329 + DEVICE_NAME, gdev); 330 + if (ret) { 331 + vbg_err("vboxguest: Error requesting irq: %d\n", ret); 332 + goto err_vbg_core_exit; 333 + } 334 + 335 + ret = misc_register(&gdev->misc_device); 336 + if (ret) { 337 + vbg_err("vboxguest: Error misc_register %s failed: %d\n", 338 + DEVICE_NAME, ret); 339 + goto err_vbg_core_exit; 340 + } 341 + 342 + ret = misc_register(&gdev->misc_device_user); 343 + if (ret) { 344 + vbg_err("vboxguest: Error misc_register %s failed: %d\n", 345 + DEVICE_NAME_USER, ret); 346 + goto err_unregister_misc_device; 347 + } 348 + 349 + mutex_lock(&vbg_gdev_mutex); 350 + if (!vbg_gdev) 351 + vbg_gdev = gdev; 352 + else 353 + ret = -EBUSY; 354 + mutex_unlock(&vbg_gdev_mutex); 355 + 356 + if (ret) { 357 + vbg_err("vboxguest: Error more then 1 vbox guest pci device\n"); 358 + goto err_unregister_misc_device_user; 359 + } 360 + 361 + pci_set_drvdata(pci, gdev); 362 + device_create_file(dev, &dev_attr_host_version); 363 + device_create_file(dev, &dev_attr_host_features); 364 + 365 + vbg_info("vboxguest: misc device minor %d, IRQ %d, I/O port %x, MMIO at %p (size %d)\n", 366 + gdev->misc_device.minor, pci->irq, gdev->io_port, 367 + (void *)mmio, (int)mmio_len); 368 + 369 + return 0; 370 + 371 + err_unregister_misc_device_user: 372 + misc_deregister(&gdev->misc_device_user); 373 + err_unregister_misc_device: 374 + misc_deregister(&gdev->misc_device); 375 + err_vbg_core_exit: 376 + vbg_core_exit(gdev); 377 + err_disable_pcidev: 378 + pci_disable_device(pci); 379 + 380 + return ret; 381 + } 382 + 383 + static void vbg_pci_remove(struct pci_dev *pci) 384 + { 385 + struct vbg_dev *gdev = pci_get_drvdata(pci); 386 + 387 + mutex_lock(&vbg_gdev_mutex); 388 + vbg_gdev = NULL; 389 + mutex_unlock(&vbg_gdev_mutex); 390 + 391 + device_remove_file(gdev->dev, &dev_attr_host_features); 392 + device_remove_file(gdev->dev, &dev_attr_host_version); 393 + misc_deregister(&gdev->misc_device_user); 394 + misc_deregister(&gdev->misc_device); 395 + vbg_core_exit(gdev); 396 + pci_disable_device(pci); 397 + } 398 + 399 + struct vbg_dev *vbg_get_gdev(void) 400 + { 401 + mutex_lock(&vbg_gdev_mutex); 402 + 403 + /* 404 + * Note on success we keep the mutex locked until vbg_put_gdev(), 405 + * this stops vbg_pci_remove from removing the device from underneath 406 + * vboxsf. vboxsf will only hold a reference for a short while. 407 + */ 408 + if (vbg_gdev) 409 + return vbg_gdev; 410 + 411 + mutex_unlock(&vbg_gdev_mutex); 412 + return ERR_PTR(-ENODEV); 413 + } 414 + EXPORT_SYMBOL(vbg_get_gdev); 415 + 416 + void vbg_put_gdev(struct vbg_dev *gdev) 417 + { 418 + WARN_ON(gdev != vbg_gdev); 419 + mutex_unlock(&vbg_gdev_mutex); 420 + } 421 + EXPORT_SYMBOL(vbg_put_gdev); 422 + 423 + /** 424 + * Callback for mouse events. 425 + * 426 + * This is called at the end of the ISR, after leaving the event spinlock, if 427 + * VMMDEV_EVENT_MOUSE_POSITION_CHANGED was raised by the host. 428 + * 429 + * @gdev: The device extension. 430 + */ 431 + void vbg_linux_mouse_event(struct vbg_dev *gdev) 432 + { 433 + int rc; 434 + 435 + /* Report events to the kernel input device */ 436 + gdev->mouse_status_req->mouse_features = 0; 437 + gdev->mouse_status_req->pointer_pos_x = 0; 438 + gdev->mouse_status_req->pointer_pos_y = 0; 439 + rc = vbg_req_perform(gdev, gdev->mouse_status_req); 440 + if (rc >= 0) { 441 + input_report_abs(gdev->input, ABS_X, 442 + gdev->mouse_status_req->pointer_pos_x); 443 + input_report_abs(gdev->input, ABS_Y, 444 + gdev->mouse_status_req->pointer_pos_y); 445 + input_sync(gdev->input); 446 + } 447 + } 448 + 449 + static const struct pci_device_id vbg_pci_ids[] = { 450 + { .vendor = VBOX_VENDORID, .device = VMMDEV_DEVICEID }, 451 + {} 452 + }; 453 + MODULE_DEVICE_TABLE(pci, vbg_pci_ids); 454 + 455 + static struct pci_driver vbg_pci_driver = { 456 + .name = DEVICE_NAME, 457 + .id_table = vbg_pci_ids, 458 + .probe = vbg_pci_probe, 459 + .remove = vbg_pci_remove, 460 + }; 461 + 462 + module_pci_driver(vbg_pci_driver); 463 + 464 + MODULE_AUTHOR("Oracle Corporation"); 465 + MODULE_DESCRIPTION("Oracle VM VirtualBox Guest Additions for Linux Module"); 466 + MODULE_LICENSE("GPL");
+19
drivers/virt/vboxguest/vboxguest_version.h
··· 1 + /* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */ 2 + /* 3 + * VBox Guest additions version info, this is used by the host to determine 4 + * supported guest-addition features in some cases. So this will need to be 5 + * synced with vbox upstreams versioning scheme when we implement / port 6 + * new features from the upstream out-of-tree vboxguest driver. 7 + */ 8 + 9 + #ifndef __VBOX_VERSION_H__ 10 + #define __VBOX_VERSION_H__ 11 + 12 + /* Last synced October 4th 2017 */ 13 + #define VBG_VERSION_MAJOR 5 14 + #define VBG_VERSION_MINOR 2 15 + #define VBG_VERSION_BUILD 0 16 + #define VBG_SVN_REV 68940 17 + #define VBG_VERSION_STRING "5.2.0" 18 + 19 + #endif