Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/vmwgfx: Add basic support for SVGA3

SVGA3 is the next version of our PCI device. Some of the changes
include using MMIO for register accesses instead of ioports,
deprecating the FIFO MMIO and removing a lot of the old and
legacy functionality. SVGA3 doesn't support guest backed
objects right now so everything except 3D is working.

v2: Fixes all the static analyzer warnings

Signed-off-by: Zack Rusin <zackr@vmware.com>
Cc: Martin Krastev <krastevm@vmware.com>
Reviewed-by: Roland Scheidegger <sroland@vmware.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20210505191007.305872-1-zackr@vmware.com

+403 -308
+48 -7
drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
··· 1 1 /* SPDX-License-Identifier: GPL-2.0 OR MIT */ 2 2 /********************************************************** 3 - * Copyright 1998-2015 VMware, Inc. 3 + * Copyright 1998-2021 VMware, Inc. 4 4 * 5 5 * Permission is hereby granted, free of charge, to any person 6 6 * obtaining a copy of this software and associated documentation ··· 98 98 #define SVGA_MAGIC 0x900000UL 99 99 #define SVGA_MAKE_ID(ver) (SVGA_MAGIC << 8 | (ver)) 100 100 101 + /* Version 3 has the control bar instead of the FIFO */ 102 + #define SVGA_VERSION_3 3 103 + #define SVGA_ID_3 SVGA_MAKE_ID(SVGA_VERSION_3) 104 + 101 105 /* Version 2 let the address of the frame buffer be unsigned on Win32 */ 102 106 #define SVGA_VERSION_2 2 103 107 #define SVGA_ID_2 SVGA_MAKE_ID(SVGA_VERSION_2) ··· 133 129 * Interrupts are only supported when the 134 130 * SVGA_CAP_IRQMASK capability is present. 135 131 */ 136 - #define SVGA_IRQFLAG_ANY_FENCE 0x1 /* Any fence was passed */ 137 - #define SVGA_IRQFLAG_FIFO_PROGRESS 0x2 /* Made forward progress in the FIFO */ 138 - #define SVGA_IRQFLAG_FENCE_GOAL 0x4 /* SVGA_FIFO_FENCE_GOAL reached */ 139 - #define SVGA_IRQFLAG_COMMAND_BUFFER 0x8 /* Command buffer completed */ 140 - #define SVGA_IRQFLAG_ERROR 0x10 /* Error while processing commands */ 132 + #define SVGA_IRQFLAG_ANY_FENCE (1 << 0) /* Any fence was passed */ 133 + #define SVGA_IRQFLAG_FIFO_PROGRESS (1 << 1) /* Made forward progress in the FIFO */ 134 + #define SVGA_IRQFLAG_FENCE_GOAL (1 << 2) /* SVGA_FIFO_FENCE_GOAL reached */ 135 + #define SVGA_IRQFLAG_COMMAND_BUFFER (1 << 3) /* Command buffer completed */ 136 + #define SVGA_IRQFLAG_ERROR (1 << 4) /* Error while processing commands */ 137 + #define SVGA_IRQFLAG_MAX (1 << 5) 141 138 142 139 /* 143 140 * The byte-size is the size of the actual cursor data, ··· 291 286 */ 292 287 SVGA_REG_GBOBJECT_MEM_SIZE_KB = 76, 293 288 294 - SVGA_REG_TOP = 77, /* Must be 1 more than the last register */ 289 + /* 290 + + * These registers are for the addresses of the memory BARs for SVGA3 291 + */ 292 + SVGA_REG_REGS_START_HIGH32 = 77, 293 + SVGA_REG_REGS_START_LOW32 = 78, 294 + SVGA_REG_FB_START_HIGH32 = 79, 295 + SVGA_REG_FB_START_LOW32 = 80, 296 + 297 + /* 298 + * A hint register that recommends which quality level the guest should 299 + * currently use to define multisample surfaces. 300 + * 301 + * If the register is SVGA_REG_MSHINT_DISABLED, 302 + * the guest is only allowed to use SVGA3D_MS_QUALITY_FULL. 303 + * 304 + * Otherwise, this is a live value that can change while the VM is 305 + * powered on with the hint suggestion for which quality level the guest 306 + * should be using. Guests are free to ignore the hint and use either 307 + * RESOLVE or FULL quality. 308 + */ 309 + SVGA_REG_MSHINT = 81, 310 + 311 + SVGA_REG_IRQ_STATUS = 82, 312 + SVGA_REG_DIRTY_TRACKING = 83, 313 + 314 + SVGA_REG_TOP = 84, /* Must be 1 more than the last register */ 295 315 296 316 SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */ 297 317 /* Next 768 (== 256*3) registers exist for colormap */ ··· 339 309 340 310 SVGA_REG_GUEST_DRIVER_ID_SUBMIT = MAX_UINT32, 341 311 } SVGARegGuestDriverId; 312 + 313 + typedef enum SVGARegMSHint { 314 + SVGA_REG_MSHINT_DISABLED = 0, 315 + SVGA_REG_MSHINT_FULL = 1, 316 + SVGA_REG_MSHINT_RESOLVED = 2, 317 + } SVGARegMSHint; 318 + 319 + typedef enum SVGARegDirtyTracking { 320 + SVGA_REG_DIRTY_TRACKING_PER_IMAGE = 0, 321 + SVGA_REG_DIRTY_TRACKING_PER_SURFACE = 1, 322 + } SVGARegDirtyTracking; 342 323 343 324 344 325 /*
+4 -4
drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
··· 788 788 } 789 789 790 790 /** 791 - * vmw_binding_emit_set_sr - Issue delayed DX shader resource binding commands 791 + * vmw_emit_set_sr - Issue delayed DX shader resource binding commands 792 792 * 793 793 * @cbs: Pointer to the context's struct vmw_ctx_binding_state 794 794 * @shader_slot: The shader slot of the binding. ··· 832 832 } 833 833 834 834 /** 835 - * vmw_binding_emit_set_rt - Issue delayed DX rendertarget binding commands 835 + * vmw_emit_set_rt - Issue delayed DX rendertarget binding commands 836 836 * 837 837 * @cbs: Pointer to the context's struct vmw_ctx_binding_state 838 838 */ ··· 1024 1024 } 1025 1025 1026 1026 /** 1027 - * vmw_binding_emit_set_vb - Issue delayed vertex buffer binding commands 1027 + * vmw_emit_set_vb - Issue delayed vertex buffer binding commands 1028 1028 * 1029 1029 * @cbs: Pointer to the context's struct vmw_ctx_binding_state 1030 1030 * ··· 1394 1394 } 1395 1395 1396 1396 /** 1397 - * vmwgfx_binding_state_reset - clear a struct vmw_ctx_binding_state 1397 + * vmw_binding_state_reset - clear a struct vmw_ctx_binding_state 1398 1398 * 1399 1399 * @cbs: Pointer to the struct vmw_ctx_binding_state to be cleared 1400 1400 *
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
··· 421 421 } 422 422 423 423 /** 424 - * ttm_bo_cpu_blit - in-kernel cpu blit. 424 + * vmw_bo_cpu_blit - in-kernel cpu blit. 425 425 * 426 426 * @dst: Destination buffer object. 427 427 * @dst_offset: Destination offset of blit start in bytes.
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
··· 600 600 601 601 602 602 /** 603 - * vmw_user_bo_ref_obj-release - TTM synccpu reference object release callback 603 + * vmw_user_bo_ref_obj_release - TTM synccpu reference object release callback 604 604 * for vmw user buffer objects 605 605 * 606 606 * @base: Pointer to the TTM base object
+55 -63
drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
··· 31 31 32 32 #include "vmwgfx_drv.h" 33 33 34 - struct vmw_temp_set_context { 35 - SVGA3dCmdHeader header; 36 - SVGA3dCmdDXTempSetContext body; 37 - }; 38 - 39 34 bool vmw_supports_3d(struct vmw_private *dev_priv) 40 35 { 41 36 uint32_t fifo_min, hwversion; 42 - const struct vmw_fifo_state *fifo = &dev_priv->fifo; 37 + const struct vmw_fifo_state *fifo = dev_priv->fifo; 43 38 44 39 if (!(dev_priv->capabilities & SVGA_CAP_3D)) 45 40 return false; ··· 55 60 56 61 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) 57 62 return false; 63 + 64 + BUG_ON(vmw_is_svga_v3(dev_priv)); 58 65 59 66 fifo_min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN); 60 67 if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int)) ··· 95 98 return false; 96 99 } 97 100 98 - int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) 101 + struct vmw_fifo_state *vmw_fifo_create(struct vmw_private *dev_priv) 99 102 { 103 + struct vmw_fifo_state *fifo; 100 104 uint32_t max; 101 105 uint32_t min; 102 106 103 - fifo->dx = false; 107 + if (!dev_priv->fifo_mem) 108 + return NULL; 109 + 110 + fifo = kzalloc(sizeof(*fifo), GFP_KERNEL); 104 111 fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE; 105 112 fifo->static_buffer = vmalloc(fifo->static_buffer_size); 106 113 if (unlikely(fifo->static_buffer == NULL)) 107 - return -ENOMEM; 114 + return ERR_PTR(-ENOMEM); 108 115 109 116 fifo->dynamic_buffer = NULL; 110 117 fifo->reserved_size = 0; ··· 116 115 117 116 mutex_init(&fifo->fifo_mutex); 118 117 init_rwsem(&fifo->rwsem); 119 - 120 - DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH)); 121 - DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT)); 122 - DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL)); 123 - 124 - dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); 125 - dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); 126 - dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); 127 - 128 - vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE | 129 - SVGA_REG_ENABLE_HIDE); 130 - 131 - vmw_write(dev_priv, SVGA_REG_TRACES, 0); 132 - 133 118 min = 4; 134 119 if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO) 135 120 min = vmw_read(dev_priv, SVGA_REG_MEM_REGS); ··· 142 155 (unsigned int) max, 143 156 (unsigned int) min, 144 157 (unsigned int) fifo->capabilities); 145 - 146 - atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno); 147 - vmw_fifo_mem_write(dev_priv, SVGA_FIFO_FENCE, dev_priv->last_read_seqno); 148 - 149 - return 0; 158 + return fifo; 150 159 } 151 160 152 161 void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) 153 162 { 154 163 u32 *fifo_mem = dev_priv->fifo_mem; 155 - 156 - if (cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0) 164 + if (fifo_mem && cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0) 157 165 vmw_write(dev_priv, SVGA_REG_SYNC, reason); 166 + 158 167 } 159 168 160 - void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) 169 + void vmw_fifo_destroy(struct vmw_private *dev_priv) 161 170 { 162 - vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); 163 - while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) 164 - ; 171 + struct vmw_fifo_state *fifo = dev_priv->fifo; 165 172 166 - dev_priv->last_read_seqno = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE); 167 - 168 - vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 169 - dev_priv->config_done_state); 170 - vmw_write(dev_priv, SVGA_REG_ENABLE, 171 - dev_priv->enable_state); 172 - vmw_write(dev_priv, SVGA_REG_TRACES, 173 - dev_priv->traces_state); 173 + if (!fifo) 174 + return; 174 175 175 176 if (likely(fifo->static_buffer != NULL)) { 176 177 vfree(fifo->static_buffer); ··· 169 194 vfree(fifo->dynamic_buffer); 170 195 fifo->dynamic_buffer = NULL; 171 196 } 197 + kfree(fifo); 198 + dev_priv->fifo = NULL; 172 199 } 173 200 174 201 static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes) ··· 266 289 static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv, 267 290 uint32_t bytes) 268 291 { 269 - struct vmw_fifo_state *fifo_state = &dev_priv->fifo; 292 + struct vmw_fifo_state *fifo_state = dev_priv->fifo; 270 293 u32 *fifo_mem = dev_priv->fifo_mem; 271 294 uint32_t max; 272 295 uint32_t min; ··· 415 438 416 439 static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) 417 440 { 418 - struct vmw_fifo_state *fifo_state = &dev_priv->fifo; 441 + struct vmw_fifo_state *fifo_state = dev_priv->fifo; 419 442 uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD); 420 443 uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX); 421 444 uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN); 422 445 bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; 423 446 424 - if (fifo_state->dx) 425 - bytes += sizeof(struct vmw_temp_set_context); 426 - 427 - fifo_state->dx = false; 428 447 BUG_ON((bytes & 3) != 0); 429 448 BUG_ON(bytes > fifo_state->reserved_size); 430 449 ··· 468 495 469 496 470 497 /** 471 - * vmw_fifo_commit_flush - Commit fifo space and flush any buffered commands. 498 + * vmw_cmd_commit_flush - Commit fifo space and flush any buffered commands. 472 499 * 473 500 * @dev_priv: Pointer to device private structure. 474 501 * @bytes: Number of bytes to commit. ··· 482 509 } 483 510 484 511 /** 485 - * vmw_fifo_flush - Flush any buffered commands and make sure command processing 512 + * vmw_cmd_flush - Flush any buffered commands and make sure command processing 486 513 * starts. 487 514 * 488 515 * @dev_priv: Pointer to device private structure. ··· 500 527 501 528 int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno) 502 529 { 503 - struct vmw_fifo_state *fifo_state = &dev_priv->fifo; 504 530 struct svga_fifo_cmd_fence *cmd_fence; 505 531 u32 *fm; 506 532 int ret = 0; ··· 518 546 *seqno = atomic_add_return(1, &dev_priv->marker_seq); 519 547 } while (*seqno == 0); 520 548 521 - if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) { 549 + if (!(vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_FENCE)) { 522 550 523 551 /* 524 552 * Don't request hardware to send a fence. The ··· 533 561 cmd_fence = (struct svga_fifo_cmd_fence *) fm; 534 562 cmd_fence->fence = *seqno; 535 563 vmw_cmd_commit_flush(dev_priv, bytes); 536 - vmw_update_seqno(dev_priv, fifo_state); 564 + vmw_update_seqno(dev_priv); 537 565 538 566 out_err: 539 567 return ret; 540 568 } 541 569 542 570 /** 543 - * vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using 571 + * vmw_cmd_emit_dummy_legacy_query - emits a dummy query to the fifo using 544 572 * legacy query commands. 545 573 * 546 574 * @dev_priv: The device private structure. 547 575 * @cid: The hardware context id used for the query. 548 576 * 549 - * See the vmw_fifo_emit_dummy_query documentation. 577 + * See the vmw_cmd_emit_dummy_query documentation. 550 578 */ 551 - static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv, 579 + static int vmw_cmd_emit_dummy_legacy_query(struct vmw_private *dev_priv, 552 580 uint32_t cid) 553 581 { 554 582 /* ··· 586 614 } 587 615 588 616 /** 589 - * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using 617 + * vmw_cmd_emit_dummy_gb_query - emits a dummy query to the fifo using 590 618 * guest-backed resource query commands. 591 619 * 592 620 * @dev_priv: The device private structure. 593 621 * @cid: The hardware context id used for the query. 594 622 * 595 - * See the vmw_fifo_emit_dummy_query documentation. 623 + * See the vmw_cmd_emit_dummy_query documentation. 596 624 */ 597 - static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv, 598 - uint32_t cid) 625 + static int vmw_cmd_emit_dummy_gb_query(struct vmw_private *dev_priv, 626 + uint32_t cid) 599 627 { 600 628 /* 601 629 * A query wait without a preceding query end will ··· 628 656 629 657 630 658 /** 631 - * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using 659 + * vmw_cmd_emit_dummy_query - emits a dummy query to the fifo using 632 660 * appropriate resource query commands. 633 661 * 634 662 * @dev_priv: The device private structure. ··· 649 677 uint32_t cid) 650 678 { 651 679 if (dev_priv->has_mob) 652 - return vmw_fifo_emit_dummy_gb_query(dev_priv, cid); 680 + return vmw_cmd_emit_dummy_gb_query(dev_priv, cid); 653 681 654 - return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid); 682 + return vmw_cmd_emit_dummy_legacy_query(dev_priv, cid); 683 + } 684 + 685 + 686 + /** 687 + * vmw_cmd_supported - returns true if the given device supports 688 + * command queues. 689 + * 690 + * @vmw: The device private structure. 691 + * 692 + * Returns true if we can issue commands. 693 + */ 694 + bool vmw_cmd_supported(struct vmw_private *vmw) 695 + { 696 + if ((vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS | 697 + SVGA_CAP_CMD_BUFFERS_2)) != 0) 698 + return true; 699 + /* 700 + * We have FIFO cmd's 701 + */ 702 + return vmw->fifo_mem != NULL; 655 703 }
+2 -2
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
··· 295 295 296 296 297 297 /** 298 - * vmw_cmbuf_header_submit: Submit a command buffer to hardware. 298 + * vmw_cmdbuf_header_submit: Submit a command buffer to hardware. 299 299 * 300 300 * @header: The header of the buffer to submit. 301 301 */ ··· 620 620 } 621 621 622 622 /** 623 - * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle. 623 + * vmw_cmdbuf_man_idle - Check whether the command buffer manager is idle. 624 624 * 625 625 * @man: The command buffer manager. 626 626 * @check_preempted: Check also the preempted queue for pending command buffers.
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
··· 653 653 } 654 654 655 655 /** 656 - * vmw_cotable_add_view - add a view to the cotable's list of active views. 656 + * vmw_cotable_add_resource - add a view to the cotable's list of active views. 657 657 * 658 658 * @res: pointer struct vmw_resource representing the cotable. 659 659 * @head: pointer to the struct list_head member of the resource, dedicated
+108 -26
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
··· 50 50 #define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE) 51 51 52 52 53 - /** 53 + /* 54 54 * Fully encoded drm commands. Might move to vmw_drm.h 55 55 */ 56 56 ··· 246 246 247 247 static const struct pci_device_id vmw_pci_id_list[] = { 248 248 { PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA2) }, 249 + { PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA3) }, 249 250 { } 250 251 }; 251 252 MODULE_DEVICE_TABLE(pci, vmw_pci_id_list); ··· 394 393 return ret; 395 394 } 396 395 396 + static int vmw_device_init(struct vmw_private *dev_priv) 397 + { 398 + bool uses_fb_traces = false; 399 + 400 + DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH)); 401 + DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT)); 402 + DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL)); 403 + 404 + dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); 405 + dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); 406 + dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); 407 + 408 + vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE | 409 + SVGA_REG_ENABLE_HIDE); 410 + 411 + uses_fb_traces = !vmw_cmd_supported(dev_priv) && 412 + (dev_priv->capabilities & SVGA_CAP_TRACES) != 0; 413 + 414 + vmw_write(dev_priv, SVGA_REG_TRACES, uses_fb_traces); 415 + dev_priv->fifo = vmw_fifo_create(dev_priv); 416 + if (IS_ERR(dev_priv->fifo)) { 417 + int err = PTR_ERR(dev_priv->fifo); 418 + dev_priv->fifo = NULL; 419 + return err; 420 + } else if (!dev_priv->fifo) { 421 + vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1); 422 + } 423 + 424 + dev_priv->last_read_seqno = vmw_fence_read(dev_priv); 425 + atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno); 426 + return 0; 427 + } 428 + 429 + static void vmw_device_fini(struct vmw_private *vmw) 430 + { 431 + /* 432 + * Legacy sync 433 + */ 434 + vmw_write(vmw, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); 435 + while (vmw_read(vmw, SVGA_REG_BUSY) != 0) 436 + ; 437 + 438 + vmw->last_read_seqno = vmw_fence_read(vmw); 439 + 440 + vmw_write(vmw, SVGA_REG_CONFIG_DONE, 441 + vmw->config_done_state); 442 + vmw_write(vmw, SVGA_REG_ENABLE, 443 + vmw->enable_state); 444 + vmw_write(vmw, SVGA_REG_TRACES, 445 + vmw->traces_state); 446 + 447 + vmw_fifo_destroy(vmw); 448 + } 449 + 397 450 /** 398 451 * vmw_request_device_late - Perform late device setup 399 452 * ··· 488 433 { 489 434 int ret; 490 435 491 - ret = vmw_fifo_init(dev_priv, &dev_priv->fifo); 436 + ret = vmw_device_init(dev_priv); 492 437 if (unlikely(ret != 0)) { 493 - DRM_ERROR("Unable to initialize FIFO.\n"); 438 + DRM_ERROR("Unable to initialize the device.\n"); 494 439 return ret; 495 440 } 496 441 vmw_fence_fifo_up(dev_priv->fman); ··· 524 469 vmw_cmdbuf_man_destroy(dev_priv->cman); 525 470 out_no_mob: 526 471 vmw_fence_fifo_down(dev_priv->fman); 527 - vmw_fifo_release(dev_priv, &dev_priv->fifo); 472 + vmw_device_fini(dev_priv); 528 473 return ret; 529 474 } 530 475 ··· 572 517 if (dev_priv->cman) 573 518 vmw_cmdbuf_man_destroy(dev_priv->cman); 574 519 575 - vmw_fifo_release(dev_priv, &dev_priv->fifo); 520 + vmw_device_fini(dev_priv); 576 521 } 577 522 578 523 /* ··· 693 638 static int vmw_setup_pci_resources(struct vmw_private *dev, 694 639 unsigned long pci_id) 695 640 { 641 + resource_size_t rmmio_start; 642 + resource_size_t rmmio_size; 696 643 resource_size_t fifo_start; 697 644 resource_size_t fifo_size; 698 645 int ret; ··· 706 649 if (ret) 707 650 return ret; 708 651 709 - dev->io_start = pci_resource_start(pdev, 0); 710 - dev->vram_start = pci_resource_start(pdev, 1); 711 - dev->vram_size = pci_resource_len(pdev, 1); 712 - fifo_start = pci_resource_start(pdev, 2); 713 - fifo_size = pci_resource_len(pdev, 2); 652 + dev->pci_id = pci_id; 653 + if (pci_id == VMWGFX_PCI_ID_SVGA3) { 654 + rmmio_start = pci_resource_start(pdev, 0); 655 + rmmio_size = pci_resource_len(pdev, 0); 656 + dev->vram_start = pci_resource_start(pdev, 2); 657 + dev->vram_size = pci_resource_len(pdev, 2); 714 658 715 - DRM_INFO("FIFO at %pa size is %llu kiB\n", 716 - &fifo_start, (uint64_t)fifo_size / 1024); 717 - dev->fifo_mem = devm_memremap(dev->drm.dev, 718 - fifo_start, 719 - fifo_size, 720 - MEMREMAP_WB); 659 + DRM_INFO("Register MMIO at 0x%pa size is %llu kiB\n", 660 + &rmmio_start, (uint64_t)rmmio_size / 1024); 661 + dev->rmmio = devm_ioremap(dev->drm.dev, 662 + rmmio_start, 663 + rmmio_size); 664 + if (IS_ERR(dev->rmmio)) { 665 + DRM_ERROR("Failed mapping registers mmio memory.\n"); 666 + pci_release_regions(pdev); 667 + return PTR_ERR(dev->rmmio); 668 + } 669 + } else if (pci_id == VMWGFX_PCI_ID_SVGA2) { 670 + dev->io_start = pci_resource_start(pdev, 0); 671 + dev->vram_start = pci_resource_start(pdev, 1); 672 + dev->vram_size = pci_resource_len(pdev, 1); 673 + fifo_start = pci_resource_start(pdev, 2); 674 + fifo_size = pci_resource_len(pdev, 2); 721 675 722 - if (IS_ERR(dev->fifo_mem)) { 723 - DRM_ERROR("Failed mapping FIFO memory.\n"); 676 + DRM_INFO("FIFO at %pa size is %llu kiB\n", 677 + &fifo_start, (uint64_t)fifo_size / 1024); 678 + dev->fifo_mem = devm_memremap(dev->drm.dev, 679 + fifo_start, 680 + fifo_size, 681 + MEMREMAP_WB); 682 + 683 + if (IS_ERR(dev->fifo_mem)) { 684 + DRM_ERROR("Failed mapping FIFO memory.\n"); 685 + pci_release_regions(pdev); 686 + return PTR_ERR(dev->fifo_mem); 687 + } 688 + } else { 724 689 pci_release_regions(pdev); 725 - return PTR_ERR(dev->fifo_mem); 690 + return -EINVAL; 726 691 } 727 692 728 693 /* ··· 763 684 { 764 685 uint32_t svga_id; 765 686 766 - vmw_write(dev, SVGA_REG_ID, SVGA_ID_2); 687 + vmw_write(dev, SVGA_REG_ID, vmw_is_svga_v3(dev) ? 688 + SVGA_ID_3 : SVGA_ID_2); 767 689 svga_id = vmw_read(dev, SVGA_REG_ID); 768 - if (svga_id != SVGA_ID_2) { 690 + if (svga_id != SVGA_ID_2 && svga_id != SVGA_ID_3) { 769 691 DRM_ERROR("Unsupported SVGA ID 0x%x on chipset 0x%x\n", 770 692 svga_id, dev->vmw_chipset); 771 693 return -ENOSYS; 772 694 } 695 + BUG_ON(vmw_is_svga_v3(dev) && (svga_id != SVGA_ID_3)); 696 + DRM_INFO("Running on SVGA version %d.\n", (svga_id & 0xff)); 773 697 return 0; 774 698 } 775 699 ··· 785 703 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 786 704 787 705 dev_priv->vmw_chipset = pci_id; 788 - dev_priv->last_read_seqno = (uint32_t) -100; 789 706 dev_priv->drm.dev_private = dev_priv; 790 707 791 708 mutex_init(&dev_priv->cmdbuf_mutex); ··· 905 824 vmw_print_capabilities(dev_priv->capabilities); 906 825 if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER) 907 826 vmw_print_capabilities2(dev_priv->capabilities2); 827 + DRM_INFO("Supports command queues = %d\n", 828 + vmw_cmd_supported((dev_priv))); 908 829 909 830 ret = vmw_dma_masks(dev_priv); 910 831 if (unlikely(ret != 0)) ··· 1473 1390 struct vmw_private *dev_priv = vmw_priv(dev); 1474 1391 int ret; 1475 1392 1476 - vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); 1477 - (void) vmw_read(dev_priv, SVGA_REG_ID); 1393 + vmw_detect_version(dev_priv); 1478 1394 1479 1395 if (dev_priv->enable_fb) 1480 1396 vmw_fifo_resource_inc(dev_priv); ··· 1510 1428 .release = drm_release, 1511 1429 .unlocked_ioctl = vmw_unlocked_ioctl, 1512 1430 .mmap = vmw_mmap, 1513 - .poll = vmw_fops_poll, 1514 - .read = vmw_fops_read, 1431 + .poll = drm_poll, 1432 + .read = drm_read, 1515 1433 #if defined(CONFIG_COMPAT) 1516 1434 .compat_ioctl = vmw_compat_ioctl, 1517 1435 #endif
+96 -24
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
··· 66 66 #define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1 67 67 68 68 #define VMWGFX_PCI_ID_SVGA2 0x0405 69 + #define VMWGFX_PCI_ID_SVGA3 0x0406 69 70 70 71 /* 71 72 * Perhaps we should have sysfs entries for these. ··· 285 284 uint32_t capabilities; 286 285 struct mutex fifo_mutex; 287 286 struct rw_semaphore rwsem; 288 - bool dx; 289 287 }; 290 288 291 289 /** ··· 485 485 struct drm_device drm; 486 486 struct ttm_device bdev; 487 487 488 - struct vmw_fifo_state fifo; 489 - 490 488 struct drm_vma_offset_manager vma_manager; 489 + unsigned long pci_id; 491 490 u32 vmw_chipset; 492 491 resource_size_t io_start; 493 492 resource_size_t vram_start; 494 493 resource_size_t vram_size; 495 494 resource_size_t prim_bb_mem; 495 + void __iomem *rmmio; 496 496 u32 *fifo_mem; 497 497 resource_size_t fifo_mem_size; 498 498 uint32_t fb_max_width; ··· 623 623 */ 624 624 struct vmw_otable_batch otable_batch; 625 625 626 + struct vmw_fifo_state *fifo; 626 627 struct vmw_cmdbuf_man *cman; 627 628 DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX); 628 629 ··· 647 646 } 648 647 649 648 /* 649 + * SVGA v3 has mmio register access and lacks fifo cmds 650 + */ 651 + static inline bool vmw_is_svga_v3(const struct vmw_private *dev) 652 + { 653 + return dev->pci_id == VMWGFX_PCI_ID_SVGA3; 654 + } 655 + 656 + /* 650 657 * The locking here is fine-grained, so that it is performed once 651 658 * for every read- and write operation. This is of course costly, but we 652 659 * don't perform much register access in the timing critical paths anyway. ··· 664 655 static inline void vmw_write(struct vmw_private *dev_priv, 665 656 unsigned int offset, uint32_t value) 666 657 { 667 - spin_lock(&dev_priv->hw_lock); 668 - outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); 669 - outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT); 670 - spin_unlock(&dev_priv->hw_lock); 658 + if (vmw_is_svga_v3(dev_priv)) { 659 + iowrite32(value, dev_priv->rmmio + offset); 660 + } else { 661 + spin_lock(&dev_priv->hw_lock); 662 + outl(offset, dev_priv->io_start + SVGA_INDEX_PORT); 663 + outl(value, dev_priv->io_start + SVGA_VALUE_PORT); 664 + spin_unlock(&dev_priv->hw_lock); 665 + } 671 666 } 672 667 673 668 static inline uint32_t vmw_read(struct vmw_private *dev_priv, ··· 679 666 { 680 667 u32 val; 681 668 682 - spin_lock(&dev_priv->hw_lock); 683 - outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT); 684 - val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT); 685 - spin_unlock(&dev_priv->hw_lock); 669 + if (vmw_is_svga_v3(dev_priv)) { 670 + val = ioread32(dev_priv->rmmio + offset); 671 + } else { 672 + spin_lock(&dev_priv->hw_lock); 673 + outl(offset, dev_priv->io_start + SVGA_INDEX_PORT); 674 + val = inl(dev_priv->io_start + SVGA_VALUE_PORT); 675 + spin_unlock(&dev_priv->hw_lock); 676 + } 686 677 687 678 return val; 688 679 } ··· 949 932 struct drm_file *file_priv); 950 933 extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data, 951 934 struct drm_file *file_priv); 952 - extern __poll_t vmw_fops_poll(struct file *filp, 953 - struct poll_table_struct *wait); 954 - extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer, 955 - size_t count, loff_t *offset); 956 935 957 936 /** 958 937 * Fifo utilities - vmwgfx_fifo.c 959 938 */ 960 939 961 - extern int vmw_fifo_init(struct vmw_private *dev_priv, 962 - struct vmw_fifo_state *fifo); 963 - extern void vmw_fifo_release(struct vmw_private *dev_priv, 964 - struct vmw_fifo_state *fifo); 940 + extern struct vmw_fifo_state *vmw_fifo_create(struct vmw_private *dev_priv); 941 + extern void vmw_fifo_destroy(struct vmw_private *dev_priv); 942 + extern bool vmw_cmd_supported(struct vmw_private *vmw); 965 943 extern void * 966 944 vmw_cmd_ctx_reserve(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id); 967 945 extern void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes); ··· 981 969 982 970 #define VMW_CMD_RESERVE(__priv, __bytes) \ 983 971 VMW_CMD_CTX_RESERVE(__priv, __bytes, SVGA3D_INVALID_ID) 972 + 973 + 974 + /** 975 + * vmw_fifo_caps - Returns the capabilities of the FIFO command 976 + * queue or 0 if fifo memory isn't present. 977 + * @dev_priv: The device private context 978 + */ 979 + static inline uint32_t vmw_fifo_caps(const struct vmw_private *dev_priv) 980 + { 981 + if (!dev_priv->fifo_mem || !dev_priv->fifo) 982 + return 0; 983 + return dev_priv->fifo->capabilities; 984 + } 985 + 986 + 987 + /** 988 + * vmw_is_cursor_bypass3_enabled - Returns TRUE iff Cursor Bypass 3 989 + * is enabled in the FIFO. 990 + * @dev_priv: The device private context 991 + */ 992 + static inline bool 993 + vmw_is_cursor_bypass3_enabled(const struct vmw_private *dev_priv) 994 + { 995 + return (vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_CURSOR_BYPASS_3) != 0; 996 + } 984 997 985 998 /** 986 999 * TTM glue - vmwgfx_ttm_glue.c ··· 1116 1079 * IRQs and wating - vmwgfx_irq.c 1117 1080 */ 1118 1081 1119 - extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy, 1120 - uint32_t seqno, bool interruptible, 1121 - unsigned long timeout); 1122 1082 extern int vmw_irq_install(struct drm_device *dev, int irq); 1123 1083 extern void vmw_irq_uninstall(struct drm_device *dev); 1124 1084 extern bool vmw_seqno_passed(struct vmw_private *dev_priv, ··· 1126 1092 uint32_t seqno, 1127 1093 bool interruptible, 1128 1094 unsigned long timeout); 1129 - extern void vmw_update_seqno(struct vmw_private *dev_priv, 1130 - struct vmw_fifo_state *fifo_state); 1095 + extern void vmw_update_seqno(struct vmw_private *dev_priv); 1131 1096 extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv); 1132 1097 extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv); 1133 1098 extern void vmw_goal_waiter_add(struct vmw_private *dev_priv); ··· 1605 1572 */ 1606 1573 static inline u32 vmw_fifo_mem_read(struct vmw_private *vmw, uint32 fifo_reg) 1607 1574 { 1575 + BUG_ON(vmw_is_svga_v3(vmw)); 1608 1576 return READ_ONCE(*(vmw->fifo_mem + fifo_reg)); 1609 1577 } 1610 1578 ··· 1620 1586 static inline void vmw_fifo_mem_write(struct vmw_private *vmw, u32 fifo_reg, 1621 1587 u32 value) 1622 1588 { 1589 + BUG_ON(vmw_is_svga_v3(vmw)); 1623 1590 WRITE_ONCE(*(vmw->fifo_mem + fifo_reg), value); 1624 1591 } 1592 + 1593 + static inline u32 vmw_fence_read(struct vmw_private *dev_priv) 1594 + { 1595 + u32 fence; 1596 + if (vmw_is_svga_v3(dev_priv)) 1597 + fence = vmw_read(dev_priv, SVGA_REG_FENCE); 1598 + else 1599 + fence = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE); 1600 + return fence; 1601 + } 1602 + 1603 + static inline void vmw_fence_write(struct vmw_private *dev_priv, 1604 + u32 fence) 1605 + { 1606 + BUG_ON(vmw_is_svga_v3(dev_priv)); 1607 + vmw_fifo_mem_write(dev_priv, SVGA_FIFO_FENCE, fence); 1608 + } 1609 + 1610 + static inline u32 vmw_irq_status_read(struct vmw_private *vmw) 1611 + { 1612 + u32 status; 1613 + if (vmw_is_svga_v3(vmw)) 1614 + status = vmw_read(vmw, SVGA_REG_IRQ_STATUS); 1615 + else 1616 + status = inl(vmw->io_start + SVGA_IRQSTATUS_PORT); 1617 + return status; 1618 + } 1619 + 1620 + static inline void vmw_irq_status_write(struct vmw_private *vmw, 1621 + uint32 status) 1622 + { 1623 + if (vmw_is_svga_v3(vmw)) 1624 + vmw_write(vmw, SVGA_REG_IRQ_STATUS, status); 1625 + else 1626 + outl(status, vmw->io_start + SVGA_IRQSTATUS_PORT); 1627 + } 1628 + 1625 1629 #endif
+7 -7
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
··· 711 711 } 712 712 713 713 /** 714 - * vmw_rebind_dx_query - Rebind DX query associated with the context 714 + * vmw_rebind_all_dx_query - Rebind DX query associated with the context 715 715 * 716 716 * @ctx_res: context the query belongs to 717 717 * ··· 1140 1140 } 1141 1141 1142 1142 /** 1143 - * vmw_translate_mob_pointer - Prepare to translate a user-space buffer handle 1143 + * vmw_translate_mob_ptr - Prepare to translate a user-space buffer handle 1144 1144 * to a MOB id. 1145 1145 * 1146 1146 * @dev_priv: Pointer to a device private structure. ··· 1195 1195 } 1196 1196 1197 1197 /** 1198 - * vmw_translate_guest_pointer - Prepare to translate a user-space buffer handle 1198 + * vmw_translate_guest_ptr - Prepare to translate a user-space buffer handle 1199 1199 * to a valid SVGAGuestPtr 1200 1200 * 1201 1201 * @dev_priv: Pointer to a device private structure. ··· 2308 2308 } 2309 2309 2310 2310 /** 2311 - * vmw_cmd_dx_ia_set_vertex_buffers - Validate 2311 + * vmw_cmd_dx_set_index_buffer - Validate 2312 2312 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command. 2313 2313 * 2314 2314 * @dev_priv: Pointer to a device private struct. ··· 2347 2347 } 2348 2348 2349 2349 /** 2350 - * vmw_cmd_dx_set_rendertarget - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS 2350 + * vmw_cmd_dx_set_rendertargets - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS 2351 2351 * command 2352 2352 * 2353 2353 * @dev_priv: Pointer to a device private struct. ··· 2402 2402 } 2403 2403 2404 2404 /** 2405 - * vmw_cmd_dx_clear_rendertarget_view - Validate 2405 + * vmw_cmd_dx_clear_depthstencil_view - Validate 2406 2406 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command 2407 2407 * 2408 2408 * @dev_priv: Pointer to a device private struct. ··· 3841 3841 3842 3842 fence_rep.handle = fence_handle; 3843 3843 fence_rep.seqno = fence->base.seqno; 3844 - vmw_update_seqno(dev_priv, &dev_priv->fifo); 3844 + vmw_update_seqno(dev_priv); 3845 3845 fence_rep.passed_seqno = dev_priv->last_read_seqno; 3846 3846 } 3847 3847
+4 -14
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
··· 139 139 struct vmw_fence_manager *fman = fman_from_fence(fence); 140 140 struct vmw_private *dev_priv = fman->dev_priv; 141 141 142 - u32 seqno = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE); 142 + u32 seqno = vmw_fence_read(dev_priv); 143 143 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) 144 144 return false; 145 - 146 - vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); 147 145 148 146 return true; 149 147 } ··· 175 177 if (likely(vmw_fence_obj_signaled(fence))) 176 178 return timeout; 177 179 178 - vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); 179 180 vmw_seqno_waiter_add(dev_priv); 180 181 181 182 spin_lock(f->lock); ··· 461 464 bool needs_rerun; 462 465 uint32_t seqno, new_seqno; 463 466 464 - seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE); 467 + seqno = vmw_fence_read(fman->dev_priv); 465 468 rerun: 466 469 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) { 467 470 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) { ··· 483 486 484 487 needs_rerun = vmw_fence_goal_new_locked(fman, seqno); 485 488 if (unlikely(needs_rerun)) { 486 - new_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE); 489 + new_seqno = vmw_fence_read(fman->dev_priv); 487 490 if (new_seqno != seqno) { 488 491 seqno = new_seqno; 489 492 goto rerun; ··· 524 527 return -EBUSY; 525 528 else 526 529 return ret; 527 - } 528 - 529 - void vmw_fence_obj_flush(struct vmw_fence_obj *fence) 530 - { 531 - struct vmw_private *dev_priv = fman_from_fence(fence)->dev_priv; 532 - 533 - vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); 534 530 } 535 531 536 532 static void vmw_fence_destroy(struct vmw_fence_obj *fence) ··· 982 992 } 983 993 984 994 /** 985 - * vmw_event_fence_action_create - Post an event for sending when a fence 995 + * vmw_event_fence_action_queue - Post an event for sending when a fence 986 996 * object seqno has passed. 987 997 * 988 998 * @file_priv: The file connection on which the event should be posted.
-2
drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
··· 94 94 bool lazy, 95 95 bool interruptible, unsigned long timeout); 96 96 97 - extern void vmw_fence_obj_flush(struct vmw_fence_obj *fence); 98 - 99 97 extern int vmw_fence_create(struct vmw_fence_manager *fman, 100 98 uint32_t seqno, 101 99 struct vmw_fence_obj **p_fence);
+2 -47
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
··· 60 60 param->value = dev_priv->capabilities2; 61 61 break; 62 62 case DRM_VMW_PARAM_FIFO_CAPS: 63 - param->value = dev_priv->fifo.capabilities; 63 + param->value = vmw_fifo_caps(dev_priv); 64 64 break; 65 65 case DRM_VMW_PARAM_MAX_FB_SIZE: 66 66 param->value = dev_priv->prim_bb_mem; 67 67 break; 68 68 case DRM_VMW_PARAM_FIFO_HW_VERSION: 69 69 { 70 - const struct vmw_fifo_state *fifo = &dev_priv->fifo; 71 - 72 70 if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) { 73 71 param->value = SVGA3D_HWVERSION_WS8_B1; 74 72 break; ··· 74 76 75 77 param->value = 76 78 vmw_fifo_mem_read(dev_priv, 77 - ((fifo->capabilities & 79 + ((vmw_fifo_caps(dev_priv) & 78 80 SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ? 79 81 SVGA_FIFO_3D_HWVERSION_REVISED : 80 82 SVGA_FIFO_3D_HWVERSION)); ··· 395 397 kfree(clips); 396 398 out_clips: 397 399 return ret; 398 - } 399 - 400 - 401 - /** 402 - * vmw_fops_poll - wrapper around the drm_poll function 403 - * 404 - * @filp: See the linux fops poll documentation. 405 - * @wait: See the linux fops poll documentation. 406 - * 407 - * Wrapper around the drm_poll function that makes sure the device is 408 - * processing the fifo if drm_poll decides to wait. 409 - */ 410 - __poll_t vmw_fops_poll(struct file *filp, struct poll_table_struct *wait) 411 - { 412 - struct drm_file *file_priv = filp->private_data; 413 - struct vmw_private *dev_priv = 414 - vmw_priv(file_priv->minor->dev); 415 - 416 - vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); 417 - return drm_poll(filp, wait); 418 - } 419 - 420 - 421 - /** 422 - * vmw_fops_read - wrapper around the drm_read function 423 - * 424 - * @filp: See the linux fops read documentation. 425 - * @buffer: See the linux fops read documentation. 426 - * @count: See the linux fops read documentation. 427 - * @offset: See the linux fops read documentation. 428 - * 429 - * Wrapper around the drm_read function that makes sure the device is 430 - * processing the fifo if drm_read decides to wait. 431 - */ 432 - ssize_t vmw_fops_read(struct file *filp, char __user *buffer, 433 - size_t count, loff_t *offset) 434 - { 435 - struct drm_file *file_priv = filp->private_data; 436 - struct vmw_private *dev_priv = 437 - vmw_priv(file_priv->minor->dev); 438 - 439 - vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); 440 - return drm_read(filp, buffer, count, offset); 441 400 }
+14 -63
drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
··· 65 65 } 66 66 67 67 /** 68 - * vmw_irq_handler irq handler 68 + * vmw_irq_handler: irq handler 69 69 * 70 70 * @irq: irq number 71 71 * @arg: Closure argument. Pointer to a struct drm_device cast to void * ··· 82 82 uint32_t status, masked_status; 83 83 irqreturn_t ret = IRQ_HANDLED; 84 84 85 - status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 85 + status = vmw_irq_status_read(dev_priv); 86 86 masked_status = status & READ_ONCE(dev_priv->irq_mask); 87 87 88 88 if (likely(status)) 89 - outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 89 + vmw_irq_status_write(dev_priv, status); 90 90 91 91 if (!status) 92 92 return IRQ_NONE; ··· 114 114 return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0); 115 115 } 116 116 117 - void vmw_update_seqno(struct vmw_private *dev_priv, 118 - struct vmw_fifo_state *fifo_state) 117 + void vmw_update_seqno(struct vmw_private *dev_priv) 119 118 { 120 - uint32_t seqno = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE); 119 + uint32_t seqno = vmw_fence_read(dev_priv); 121 120 122 121 if (dev_priv->last_read_seqno != seqno) { 123 122 dev_priv->last_read_seqno = seqno; ··· 127 128 bool vmw_seqno_passed(struct vmw_private *dev_priv, 128 129 uint32_t seqno) 129 130 { 130 - struct vmw_fifo_state *fifo_state; 131 131 bool ret; 132 132 133 133 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) 134 134 return true; 135 135 136 - fifo_state = &dev_priv->fifo; 137 - vmw_update_seqno(dev_priv, fifo_state); 136 + vmw_update_seqno(dev_priv); 138 137 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) 139 138 return true; 140 139 141 - if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) && 140 + if (!(vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_FENCE) && 142 141 vmw_fifo_idle(dev_priv, seqno)) 143 142 return true; 144 143 ··· 158 161 bool interruptible, 159 162 unsigned long timeout) 160 163 { 161 - struct vmw_fifo_state *fifo_state = &dev_priv->fifo; 164 + struct vmw_fifo_state *fifo_state = dev_priv->fifo; 162 165 163 166 uint32_t count = 0; 164 167 uint32_t signal_seq; ··· 218 221 } 219 222 finish_wait(&dev_priv->fence_queue, &__wait); 220 223 if (ret == 0 && fifo_idle) 221 - vmw_fifo_mem_write(dev_priv, SVGA_FIFO_FENCE, signal_seq); 224 + vmw_fence_write(dev_priv, signal_seq); 222 225 223 226 wake_up_all(&dev_priv->fence_queue); 224 227 out_err: ··· 233 236 { 234 237 spin_lock_bh(&dev_priv->waiter_lock); 235 238 if ((*waiter_count)++ == 0) { 236 - outl(flag, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 239 + vmw_irq_status_write(dev_priv, flag); 237 240 dev_priv->irq_mask |= flag; 238 241 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 239 242 } ··· 275 278 &dev_priv->goal_queue_waiters); 276 279 } 277 280 278 - int vmw_wait_seqno(struct vmw_private *dev_priv, 279 - bool lazy, uint32_t seqno, 280 - bool interruptible, unsigned long timeout) 281 - { 282 - long ret; 283 - struct vmw_fifo_state *fifo = &dev_priv->fifo; 284 - 285 - if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) 286 - return 0; 287 - 288 - if (likely(vmw_seqno_passed(dev_priv, seqno))) 289 - return 0; 290 - 291 - vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); 292 - 293 - if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE)) 294 - return vmw_fallback_wait(dev_priv, lazy, true, seqno, 295 - interruptible, timeout); 296 - 297 - if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) 298 - return vmw_fallback_wait(dev_priv, lazy, false, seqno, 299 - interruptible, timeout); 300 - 301 - vmw_seqno_waiter_add(dev_priv); 302 - 303 - if (interruptible) 304 - ret = wait_event_interruptible_timeout 305 - (dev_priv->fence_queue, 306 - vmw_seqno_passed(dev_priv, seqno), 307 - timeout); 308 - else 309 - ret = wait_event_timeout 310 - (dev_priv->fence_queue, 311 - vmw_seqno_passed(dev_priv, seqno), 312 - timeout); 313 - 314 - vmw_seqno_waiter_remove(dev_priv); 315 - 316 - if (unlikely(ret == 0)) 317 - ret = -EBUSY; 318 - else if (likely(ret > 0)) 319 - ret = 0; 320 - 321 - return ret; 322 - } 323 - 324 281 static void vmw_irq_preinstall(struct drm_device *dev) 325 282 { 326 283 struct vmw_private *dev_priv = vmw_priv(dev); 327 284 uint32_t status; 328 285 329 - status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 330 - outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 286 + status = vmw_irq_status_read(dev_priv); 287 + vmw_irq_status_write(dev_priv, status); 331 288 } 332 289 333 290 void vmw_irq_uninstall(struct drm_device *dev) ··· 297 346 298 347 vmw_write(dev_priv, SVGA_REG_IRQMASK, 0); 299 348 300 - status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 301 - outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 349 + status = vmw_irq_status_read(dev_priv); 350 + vmw_irq_status_write(dev_priv, status); 302 351 303 352 dev->irq_enabled = false; 304 353 free_irq(dev->irq, dev);
+19 -10
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
··· 38 38 39 39 void vmw_du_cleanup(struct vmw_display_unit *du) 40 40 { 41 + struct vmw_private *dev_priv = vmw_priv(du->primary.dev); 41 42 drm_plane_cleanup(&du->primary); 42 - drm_plane_cleanup(&du->cursor); 43 + if (vmw_cmd_supported(dev_priv)) 44 + drm_plane_cleanup(&du->cursor); 43 45 44 46 drm_connector_unregister(&du->connector); 45 47 drm_crtc_cleanup(&du->crtc); ··· 130 128 uint32_t count; 131 129 132 130 spin_lock(&dev_priv->cursor_lock); 133 - vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, show ? 1 : 0); 134 - vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x); 135 - vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y); 136 - count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT); 137 - vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count); 131 + if (vmw_is_cursor_bypass3_enabled(dev_priv)) { 132 + vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, show ? 1 : 0); 133 + vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x); 134 + vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y); 135 + count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT); 136 + vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count); 137 + } else { 138 + vmw_write(dev_priv, SVGA_REG_CURSOR_X, x); 139 + vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y); 140 + vmw_write(dev_priv, SVGA_REG_CURSOR_ON, show ? 1 : 0); 141 + } 138 142 spin_unlock(&dev_priv->cursor_lock); 139 143 } 140 144 ··· 297 289 298 290 299 291 /** 300 - * vmw_du_vps_unpin_surf - unpins resource associated with a framebuffer surface 292 + * vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface 301 293 * 302 294 * @vps: plane state associated with the display surface 303 295 * @unreference: true if we also want to unreference the display. ··· 482 474 * vmw_du_cursor_plane_atomic_check - check if the new state is okay 483 475 * 484 476 * @plane: cursor plane 485 - * @new_state: info on the new plane state 477 + * @state: info on the new plane state 486 478 * 487 479 * This is a chance to fail if the new cursor state does not fit 488 480 * our requirements. ··· 1053 1045 { 1054 1046 struct vmw_private *dev_priv = vmw_priv(framebuffer->dev); 1055 1047 1056 - if (dev_priv->active_display_unit == vmw_du_legacy) 1048 + if (dev_priv->active_display_unit == vmw_du_legacy && 1049 + vmw_cmd_supported(dev_priv)) 1057 1050 return vmw_framebuffer_bo_dirty(framebuffer, file_priv, flags, 1058 1051 color, clips, num_clips); 1059 1052 ··· 2642 2633 } 2643 2634 2644 2635 /** 2645 - * vmw_kms_create_implicit_placement_proparty - Set up the implicit placement 2636 + * vmw_kms_create_implicit_placement_property - Set up the implicit placement 2646 2637 * property. 2647 2638 * 2648 2639 * @dev_priv: Pointer to a device private struct.
+21 -15
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
··· 404 404 405 405 drm_plane_helper_add(primary, &vmw_ldu_primary_plane_helper_funcs); 406 406 407 - /* Initialize cursor plane */ 408 - ret = drm_universal_plane_init(dev, &ldu->base.cursor, 409 - 0, &vmw_ldu_cursor_funcs, 410 - vmw_cursor_plane_formats, 411 - ARRAY_SIZE(vmw_cursor_plane_formats), 412 - NULL, DRM_PLANE_TYPE_CURSOR, NULL); 413 - if (ret) { 414 - DRM_ERROR("Failed to initialize cursor plane"); 415 - drm_plane_cleanup(&ldu->base.primary); 416 - goto err_free; 417 - } 407 + /* 408 + * We're going to be using traces and software cursors 409 + */ 410 + if (vmw_cmd_supported(dev_priv)) { 411 + /* Initialize cursor plane */ 412 + ret = drm_universal_plane_init(dev, &ldu->base.cursor, 413 + 0, &vmw_ldu_cursor_funcs, 414 + vmw_cursor_plane_formats, 415 + ARRAY_SIZE(vmw_cursor_plane_formats), 416 + NULL, DRM_PLANE_TYPE_CURSOR, NULL); 417 + if (ret) { 418 + DRM_ERROR("Failed to initialize cursor plane"); 419 + drm_plane_cleanup(&ldu->base.primary); 420 + goto err_free; 421 + } 418 422 419 - drm_plane_helper_add(cursor, &vmw_ldu_cursor_plane_helper_funcs); 423 + drm_plane_helper_add(cursor, &vmw_ldu_cursor_plane_helper_funcs); 424 + } 420 425 421 426 ret = drm_connector_init(dev, connector, &vmw_legacy_connector_funcs, 422 427 DRM_MODE_CONNECTOR_VIRTUAL); ··· 450 445 goto err_free_encoder; 451 446 } 452 447 453 - ret = drm_crtc_init_with_planes(dev, crtc, &ldu->base.primary, 454 - &ldu->base.cursor, 455 - &vmw_legacy_crtc_funcs, NULL); 448 + ret = drm_crtc_init_with_planes( 449 + dev, crtc, &ldu->base.primary, 450 + vmw_cmd_supported(dev_priv) ? &ldu->base.cursor : NULL, 451 + &vmw_legacy_crtc_funcs, NULL); 456 452 if (ret) { 457 453 DRM_ERROR("Failed to initialize CRTC\n"); 458 454 goto err_free_unregister;
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
··· 421 421 static bool vmw_overlay_available(const struct vmw_private *dev_priv) 422 422 { 423 423 return (dev_priv->overlay_priv != NULL && 424 - ((dev_priv->fifo.capabilities & VMW_OVERLAY_CAP_MASK) == 424 + ((vmw_fifo_caps(dev_priv) & VMW_OVERLAY_CAP_MASK) == 425 425 VMW_OVERLAY_CAP_MASK)); 426 426 } 427 427
-4
drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
··· 34 34 35 35 #include <linux/types.h> 36 36 37 - #define VMWGFX_INDEX_PORT 0x0 38 - #define VMWGFX_VALUE_PORT 0x1 39 - #define VMWGFX_IRQSTATUS_PORT 0x8 40 - 41 37 struct svga_guest_mem_descriptor { 42 38 u32 ppn; 43 39 u32 num_pages;
+2 -2
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
··· 280 280 } 281 281 282 282 /** 283 - * vmw_user_resource_lookup_handle - lookup a struct resource from a 283 + * vmw_user_resource_noref_lookup_handle - lookup a struct resource from a 284 284 * TTM user-space handle and perform basic type checks 285 285 * 286 286 * @dev_priv: Pointer to a device private struct ··· 1075 1075 } 1076 1076 1077 1077 /** 1078 - * vmw_resource_update_dirty - Update a resource's dirty tracker with a 1078 + * vmw_resource_dirty_update - Update a resource's dirty tracker with a 1079 1079 * sequential range of touched backing store memory. 1080 1080 * @res: The resource. 1081 1081 * @start: The first page touched.
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
··· 1222 1222 } 1223 1223 1224 1224 /** 1225 - * vmw_kms_do_bo_dirty - Dirty part of a buffer-object backed framebuffer 1225 + * vmw_kms_sou_do_bo_dirty - Dirty part of a buffer-object backed framebuffer 1226 1226 * 1227 1227 * @dev_priv: Pointer to the device private structure. 1228 1228 * @framebuffer: Pointer to the buffer-object backed framebuffer.
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_so.c
··· 90 90 }; 91 91 92 92 /** 93 - * struct vmw_view - view define command body stub 93 + * struct vmw_view_define - view define command body stub 94 94 * 95 95 * @view_id: The device id of the view being defined 96 96 * @sid: The surface id of the view being defined
+3 -3
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
··· 742 742 } 743 743 744 744 /** 745 - * vmw_stdu_surface_clip - Callback to encode a surface copy command cliprect 745 + * vmw_kms_stdu_surface_clip - Callback to encode a surface copy command cliprect 746 746 * 747 747 * @dirty: The closure structure. 748 748 * ··· 780 780 } 781 781 782 782 /** 783 - * vmw_stdu_surface_fifo_commit - Callback to fill in and submit a surface 783 + * vmw_kms_stdu_surface_fifo_commit - Callback to fill in and submit a surface 784 784 * copy command. 785 785 * 786 786 * @dirty: The closure structure. ··· 1571 1571 /** 1572 1572 * vmw_stdu_primary_plane_atomic_update - formally switches STDU to new plane 1573 1573 * @plane: display plane 1574 - * @old_state: Only used to get crtc info 1574 + * @state: Only used to get crtc info 1575 1575 * 1576 1576 * Formally update stdu->display_srf to the new plane, and bind the new 1577 1577 * plane STDU. This function is called during the commit phase when
+5 -5
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
··· 680 680 } 681 681 682 682 /** 683 - * vmw_user_surface_free - User visible surface TTM base object destructor 683 + * vmw_user_surface_base_release - User visible surface TTM base object destructor 684 684 * 685 685 * @p_base: Pointer to a pointer to a TTM base object 686 686 * embedded in a struct vmw_user_surface. ··· 702 702 } 703 703 704 704 /** 705 - * vmw_user_surface_destroy_ioctl - Ioctl function implementing 705 + * vmw_surface_destroy_ioctl - Ioctl function implementing 706 706 * the user surface destroy functionality. 707 707 * 708 708 * @dev: Pointer to a struct drm_device. ··· 719 719 } 720 720 721 721 /** 722 - * vmw_user_surface_define_ioctl - Ioctl function implementing 722 + * vmw_surface_define_ioctl - Ioctl function implementing 723 723 * the user surface define functionality. 724 724 * 725 725 * @dev: Pointer to a struct drm_device. ··· 1001 1001 } 1002 1002 1003 1003 /** 1004 - * vmw_user_surface_define_ioctl - Ioctl function implementing 1004 + * vmw_surface_reference_ioctl - Ioctl function implementing 1005 1005 * the user surface reference functionality. 1006 1006 * 1007 1007 * @dev: Pointer to a struct drm_device. ··· 1055 1055 } 1056 1056 1057 1057 /** 1058 - * vmw_surface_define_encode - Encode a surface_define command. 1058 + * vmw_gb_surface_create - Encode a surface_define command. 1059 1059 * 1060 1060 * @res: Pointer to a struct vmw_resource embedded in a struct 1061 1061 * vmw_surface.
+6 -3
drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
··· 200 200 const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt); 201 201 202 202 /** 203 - * Helper functions to advance a struct vmw_piter iterator. 203 + * __vmw_piter_non_sg_next: Helper functions to advance 204 + * a struct vmw_piter iterator. 204 205 * 205 206 * @viter: Pointer to the iterator. 206 207 * ··· 223 222 224 223 225 224 /** 226 - * Helper functions to return a pointer to the current page. 225 + * __vmw_piter_non_sg_page: Helper functions to return a pointer 226 + * to the current page. 227 227 * 228 228 * @viter: Pointer to the iterator 229 229 * ··· 238 236 } 239 237 240 238 /** 241 - * Helper functions to return the DMA address of the current page. 239 + * __vmw_piter_phys_addr: Helper functions to return the DMA 240 + * address of the current page. 242 241 * 243 242 * @viter: Pointer to the iterator 244 243 *
+1 -1
drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
··· 809 809 } 810 810 811 811 /** 812 - * vmw_validation_cone - Commit validation actions after command submission 812 + * vmw_validation_done - Commit validation actions after command submission 813 813 * success. 814 814 * @ctx: The validation context. 815 815 * @fence: Fence with which to fence all buffer objects taking part in the