Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v3.18-rc4 655 lines 18 kB view raw
1/************************************************************************** 2 * 3 * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28#include "vmwgfx_drv.h" 29#include <drm/drmP.h> 30#include <drm/ttm/ttm_placement.h> 31 32bool vmw_fifo_have_3d(struct vmw_private *dev_priv) 33{ 34 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 35 uint32_t fifo_min, hwversion; 36 const struct vmw_fifo_state *fifo = &dev_priv->fifo; 37 38 if (!(dev_priv->capabilities & SVGA_CAP_3D)) 39 return false; 40 41 if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { 42 uint32_t result; 43 44 if (!dev_priv->has_mob) 45 return false; 46 47 mutex_lock(&dev_priv->hw_mutex); 48 vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D); 49 result = vmw_read(dev_priv, SVGA_REG_DEV_CAP); 50 mutex_unlock(&dev_priv->hw_mutex); 51 52 return (result != 0); 53 } 54 55 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) 56 return false; 57 58 fifo_min = ioread32(fifo_mem + SVGA_FIFO_MIN); 59 if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int)) 60 return false; 61 62 hwversion = ioread32(fifo_mem + 63 ((fifo->capabilities & 64 SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ? 65 SVGA_FIFO_3D_HWVERSION_REVISED : 66 SVGA_FIFO_3D_HWVERSION)); 67 68 if (hwversion == 0) 69 return false; 70 71 if (hwversion < SVGA3D_HWVERSION_WS8_B1) 72 return false; 73 74 /* Non-Screen Object path does not support surfaces */ 75 if (!dev_priv->sou_priv) 76 return false; 77 78 return true; 79} 80 81bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv) 82{ 83 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 84 uint32_t caps; 85 86 if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)) 87 return false; 88 89 caps = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES); 90 if (caps & SVGA_FIFO_CAP_PITCHLOCK) 91 return true; 92 93 return false; 94} 95 96int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) 97{ 98 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 99 uint32_t max; 100 uint32_t min; 101 uint32_t dummy; 102 103 fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE; 104 fifo->static_buffer = vmalloc(fifo->static_buffer_size); 105 if (unlikely(fifo->static_buffer == NULL)) 106 return -ENOMEM; 107 108 fifo->dynamic_buffer = NULL; 109 fifo->reserved_size = 0; 110 fifo->using_bounce_buffer = false; 111 112 mutex_init(&fifo->fifo_mutex); 113 init_rwsem(&fifo->rwsem); 114 115 /* 116 * Allow mapping the first page read-only to user-space. 117 */ 118 119 DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH)); 120 DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT)); 121 DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL)); 122 123 mutex_lock(&dev_priv->hw_mutex); 124 dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE); 125 dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE); 126 dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES); 127 vmw_write(dev_priv, SVGA_REG_ENABLE, 1); 128 129 min = 4; 130 if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO) 131 min = vmw_read(dev_priv, SVGA_REG_MEM_REGS); 132 min <<= 2; 133 134 if (min < PAGE_SIZE) 135 min = PAGE_SIZE; 136 137 iowrite32(min, fifo_mem + SVGA_FIFO_MIN); 138 iowrite32(dev_priv->mmio_size, fifo_mem + SVGA_FIFO_MAX); 139 wmb(); 140 iowrite32(min, fifo_mem + SVGA_FIFO_NEXT_CMD); 141 iowrite32(min, fifo_mem + SVGA_FIFO_STOP); 142 iowrite32(0, fifo_mem + SVGA_FIFO_BUSY); 143 mb(); 144 145 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1); 146 mutex_unlock(&dev_priv->hw_mutex); 147 148 max = ioread32(fifo_mem + SVGA_FIFO_MAX); 149 min = ioread32(fifo_mem + SVGA_FIFO_MIN); 150 fifo->capabilities = ioread32(fifo_mem + SVGA_FIFO_CAPABILITIES); 151 152 DRM_INFO("Fifo max 0x%08x min 0x%08x cap 0x%08x\n", 153 (unsigned int) max, 154 (unsigned int) min, 155 (unsigned int) fifo->capabilities); 156 157 atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno); 158 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE); 159 vmw_marker_queue_init(&fifo->marker_queue); 160 return vmw_fifo_send_fence(dev_priv, &dummy); 161} 162 163void vmw_fifo_ping_host_locked(struct vmw_private *dev_priv, uint32_t reason) 164{ 165 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 166 167 if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) { 168 iowrite32(1, fifo_mem + SVGA_FIFO_BUSY); 169 vmw_write(dev_priv, SVGA_REG_SYNC, reason); 170 } 171} 172 173void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason) 174{ 175 mutex_lock(&dev_priv->hw_mutex); 176 177 vmw_fifo_ping_host_locked(dev_priv, reason); 178 179 mutex_unlock(&dev_priv->hw_mutex); 180} 181 182void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo) 183{ 184 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 185 186 mutex_lock(&dev_priv->hw_mutex); 187 188 vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC); 189 while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0) 190 ; 191 192 dev_priv->last_read_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); 193 194 vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 195 dev_priv->config_done_state); 196 vmw_write(dev_priv, SVGA_REG_ENABLE, 197 dev_priv->enable_state); 198 vmw_write(dev_priv, SVGA_REG_TRACES, 199 dev_priv->traces_state); 200 201 mutex_unlock(&dev_priv->hw_mutex); 202 vmw_marker_queue_takedown(&fifo->marker_queue); 203 204 if (likely(fifo->static_buffer != NULL)) { 205 vfree(fifo->static_buffer); 206 fifo->static_buffer = NULL; 207 } 208 209 if (likely(fifo->dynamic_buffer != NULL)) { 210 vfree(fifo->dynamic_buffer); 211 fifo->dynamic_buffer = NULL; 212 } 213} 214 215static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes) 216{ 217 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 218 uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX); 219 uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); 220 uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN); 221 uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP); 222 223 return ((max - next_cmd) + (stop - min) <= bytes); 224} 225 226static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv, 227 uint32_t bytes, bool interruptible, 228 unsigned long timeout) 229{ 230 int ret = 0; 231 unsigned long end_jiffies = jiffies + timeout; 232 DEFINE_WAIT(__wait); 233 234 DRM_INFO("Fifo wait noirq.\n"); 235 236 for (;;) { 237 prepare_to_wait(&dev_priv->fifo_queue, &__wait, 238 (interruptible) ? 239 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); 240 if (!vmw_fifo_is_full(dev_priv, bytes)) 241 break; 242 if (time_after_eq(jiffies, end_jiffies)) { 243 ret = -EBUSY; 244 DRM_ERROR("SVGA device lockup.\n"); 245 break; 246 } 247 schedule_timeout(1); 248 if (interruptible && signal_pending(current)) { 249 ret = -ERESTARTSYS; 250 break; 251 } 252 } 253 finish_wait(&dev_priv->fifo_queue, &__wait); 254 wake_up_all(&dev_priv->fifo_queue); 255 DRM_INFO("Fifo noirq exit.\n"); 256 return ret; 257} 258 259static int vmw_fifo_wait(struct vmw_private *dev_priv, 260 uint32_t bytes, bool interruptible, 261 unsigned long timeout) 262{ 263 long ret = 1L; 264 unsigned long irq_flags; 265 266 if (likely(!vmw_fifo_is_full(dev_priv, bytes))) 267 return 0; 268 269 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_FIFOFULL); 270 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) 271 return vmw_fifo_wait_noirq(dev_priv, bytes, 272 interruptible, timeout); 273 274 mutex_lock(&dev_priv->hw_mutex); 275 if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) { 276 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); 277 outl(SVGA_IRQFLAG_FIFO_PROGRESS, 278 dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 279 dev_priv->irq_mask |= SVGA_IRQFLAG_FIFO_PROGRESS; 280 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 281 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); 282 } 283 mutex_unlock(&dev_priv->hw_mutex); 284 285 if (interruptible) 286 ret = wait_event_interruptible_timeout 287 (dev_priv->fifo_queue, 288 !vmw_fifo_is_full(dev_priv, bytes), timeout); 289 else 290 ret = wait_event_timeout 291 (dev_priv->fifo_queue, 292 !vmw_fifo_is_full(dev_priv, bytes), timeout); 293 294 if (unlikely(ret == 0)) 295 ret = -EBUSY; 296 else if (likely(ret > 0)) 297 ret = 0; 298 299 mutex_lock(&dev_priv->hw_mutex); 300 if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) { 301 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); 302 dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS; 303 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 304 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); 305 } 306 mutex_unlock(&dev_priv->hw_mutex); 307 308 return ret; 309} 310 311/** 312 * Reserve @bytes number of bytes in the fifo. 313 * 314 * This function will return NULL (error) on two conditions: 315 * If it timeouts waiting for fifo space, or if @bytes is larger than the 316 * available fifo space. 317 * 318 * Returns: 319 * Pointer to the fifo, or null on error (possible hardware hang). 320 */ 321void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) 322{ 323 struct vmw_fifo_state *fifo_state = &dev_priv->fifo; 324 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 325 uint32_t max; 326 uint32_t min; 327 uint32_t next_cmd; 328 uint32_t reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; 329 int ret; 330 331 mutex_lock(&fifo_state->fifo_mutex); 332 max = ioread32(fifo_mem + SVGA_FIFO_MAX); 333 min = ioread32(fifo_mem + SVGA_FIFO_MIN); 334 next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); 335 336 if (unlikely(bytes >= (max - min))) 337 goto out_err; 338 339 BUG_ON(fifo_state->reserved_size != 0); 340 BUG_ON(fifo_state->dynamic_buffer != NULL); 341 342 fifo_state->reserved_size = bytes; 343 344 while (1) { 345 uint32_t stop = ioread32(fifo_mem + SVGA_FIFO_STOP); 346 bool need_bounce = false; 347 bool reserve_in_place = false; 348 349 if (next_cmd >= stop) { 350 if (likely((next_cmd + bytes < max || 351 (next_cmd + bytes == max && stop > min)))) 352 reserve_in_place = true; 353 354 else if (vmw_fifo_is_full(dev_priv, bytes)) { 355 ret = vmw_fifo_wait(dev_priv, bytes, 356 false, 3 * HZ); 357 if (unlikely(ret != 0)) 358 goto out_err; 359 } else 360 need_bounce = true; 361 362 } else { 363 364 if (likely((next_cmd + bytes < stop))) 365 reserve_in_place = true; 366 else { 367 ret = vmw_fifo_wait(dev_priv, bytes, 368 false, 3 * HZ); 369 if (unlikely(ret != 0)) 370 goto out_err; 371 } 372 } 373 374 if (reserve_in_place) { 375 if (reserveable || bytes <= sizeof(uint32_t)) { 376 fifo_state->using_bounce_buffer = false; 377 378 if (reserveable) 379 iowrite32(bytes, fifo_mem + 380 SVGA_FIFO_RESERVED); 381 return fifo_mem + (next_cmd >> 2); 382 } else { 383 need_bounce = true; 384 } 385 } 386 387 if (need_bounce) { 388 fifo_state->using_bounce_buffer = true; 389 if (bytes < fifo_state->static_buffer_size) 390 return fifo_state->static_buffer; 391 else { 392 fifo_state->dynamic_buffer = vmalloc(bytes); 393 return fifo_state->dynamic_buffer; 394 } 395 } 396 } 397out_err: 398 fifo_state->reserved_size = 0; 399 mutex_unlock(&fifo_state->fifo_mutex); 400 return NULL; 401} 402 403static void vmw_fifo_res_copy(struct vmw_fifo_state *fifo_state, 404 __le32 __iomem *fifo_mem, 405 uint32_t next_cmd, 406 uint32_t max, uint32_t min, uint32_t bytes) 407{ 408 uint32_t chunk_size = max - next_cmd; 409 uint32_t rest; 410 uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ? 411 fifo_state->dynamic_buffer : fifo_state->static_buffer; 412 413 if (bytes < chunk_size) 414 chunk_size = bytes; 415 416 iowrite32(bytes, fifo_mem + SVGA_FIFO_RESERVED); 417 mb(); 418 memcpy_toio(fifo_mem + (next_cmd >> 2), buffer, chunk_size); 419 rest = bytes - chunk_size; 420 if (rest) 421 memcpy_toio(fifo_mem + (min >> 2), buffer + (chunk_size >> 2), 422 rest); 423} 424 425static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state, 426 __le32 __iomem *fifo_mem, 427 uint32_t next_cmd, 428 uint32_t max, uint32_t min, uint32_t bytes) 429{ 430 uint32_t *buffer = (fifo_state->dynamic_buffer != NULL) ? 431 fifo_state->dynamic_buffer : fifo_state->static_buffer; 432 433 while (bytes > 0) { 434 iowrite32(*buffer++, fifo_mem + (next_cmd >> 2)); 435 next_cmd += sizeof(uint32_t); 436 if (unlikely(next_cmd == max)) 437 next_cmd = min; 438 mb(); 439 iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD); 440 mb(); 441 bytes -= sizeof(uint32_t); 442 } 443} 444 445void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes) 446{ 447 struct vmw_fifo_state *fifo_state = &dev_priv->fifo; 448 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 449 uint32_t next_cmd = ioread32(fifo_mem + SVGA_FIFO_NEXT_CMD); 450 uint32_t max = ioread32(fifo_mem + SVGA_FIFO_MAX); 451 uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN); 452 bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; 453 454 BUG_ON((bytes & 3) != 0); 455 BUG_ON(bytes > fifo_state->reserved_size); 456 457 fifo_state->reserved_size = 0; 458 459 if (fifo_state->using_bounce_buffer) { 460 if (reserveable) 461 vmw_fifo_res_copy(fifo_state, fifo_mem, 462 next_cmd, max, min, bytes); 463 else 464 vmw_fifo_slow_copy(fifo_state, fifo_mem, 465 next_cmd, max, min, bytes); 466 467 if (fifo_state->dynamic_buffer) { 468 vfree(fifo_state->dynamic_buffer); 469 fifo_state->dynamic_buffer = NULL; 470 } 471 472 } 473 474 down_write(&fifo_state->rwsem); 475 if (fifo_state->using_bounce_buffer || reserveable) { 476 next_cmd += bytes; 477 if (next_cmd >= max) 478 next_cmd -= max - min; 479 mb(); 480 iowrite32(next_cmd, fifo_mem + SVGA_FIFO_NEXT_CMD); 481 } 482 483 if (reserveable) 484 iowrite32(0, fifo_mem + SVGA_FIFO_RESERVED); 485 mb(); 486 up_write(&fifo_state->rwsem); 487 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); 488 mutex_unlock(&fifo_state->fifo_mutex); 489} 490 491int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno) 492{ 493 struct vmw_fifo_state *fifo_state = &dev_priv->fifo; 494 struct svga_fifo_cmd_fence *cmd_fence; 495 void *fm; 496 int ret = 0; 497 uint32_t bytes = sizeof(__le32) + sizeof(*cmd_fence); 498 499 fm = vmw_fifo_reserve(dev_priv, bytes); 500 if (unlikely(fm == NULL)) { 501 *seqno = atomic_read(&dev_priv->marker_seq); 502 ret = -ENOMEM; 503 (void)vmw_fallback_wait(dev_priv, false, true, *seqno, 504 false, 3*HZ); 505 goto out_err; 506 } 507 508 do { 509 *seqno = atomic_add_return(1, &dev_priv->marker_seq); 510 } while (*seqno == 0); 511 512 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) { 513 514 /* 515 * Don't request hardware to send a fence. The 516 * waiting code in vmwgfx_irq.c will emulate this. 517 */ 518 519 vmw_fifo_commit(dev_priv, 0); 520 return 0; 521 } 522 523 *(__le32 *) fm = cpu_to_le32(SVGA_CMD_FENCE); 524 cmd_fence = (struct svga_fifo_cmd_fence *) 525 ((unsigned long)fm + sizeof(__le32)); 526 527 iowrite32(*seqno, &cmd_fence->fence); 528 vmw_fifo_commit(dev_priv, bytes); 529 (void) vmw_marker_push(&fifo_state->marker_queue, *seqno); 530 vmw_update_seqno(dev_priv, fifo_state); 531 532out_err: 533 return ret; 534} 535 536/** 537 * vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using 538 * legacy query commands. 539 * 540 * @dev_priv: The device private structure. 541 * @cid: The hardware context id used for the query. 542 * 543 * See the vmw_fifo_emit_dummy_query documentation. 544 */ 545static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv, 546 uint32_t cid) 547{ 548 /* 549 * A query wait without a preceding query end will 550 * actually finish all queries for this cid 551 * without writing to the query result structure. 552 */ 553 554 struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; 555 struct { 556 SVGA3dCmdHeader header; 557 SVGA3dCmdWaitForQuery body; 558 } *cmd; 559 560 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 561 562 if (unlikely(cmd == NULL)) { 563 DRM_ERROR("Out of fifo space for dummy query.\n"); 564 return -ENOMEM; 565 } 566 567 cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY; 568 cmd->header.size = sizeof(cmd->body); 569 cmd->body.cid = cid; 570 cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION; 571 572 if (bo->mem.mem_type == TTM_PL_VRAM) { 573 cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER; 574 cmd->body.guestResult.offset = bo->offset; 575 } else { 576 cmd->body.guestResult.gmrId = bo->mem.start; 577 cmd->body.guestResult.offset = 0; 578 } 579 580 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 581 582 return 0; 583} 584 585/** 586 * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using 587 * guest-backed resource query commands. 588 * 589 * @dev_priv: The device private structure. 590 * @cid: The hardware context id used for the query. 591 * 592 * See the vmw_fifo_emit_dummy_query documentation. 593 */ 594static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv, 595 uint32_t cid) 596{ 597 /* 598 * A query wait without a preceding query end will 599 * actually finish all queries for this cid 600 * without writing to the query result structure. 601 */ 602 603 struct ttm_buffer_object *bo = dev_priv->dummy_query_bo; 604 struct { 605 SVGA3dCmdHeader header; 606 SVGA3dCmdWaitForGBQuery body; 607 } *cmd; 608 609 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 610 611 if (unlikely(cmd == NULL)) { 612 DRM_ERROR("Out of fifo space for dummy query.\n"); 613 return -ENOMEM; 614 } 615 616 cmd->header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY; 617 cmd->header.size = sizeof(cmd->body); 618 cmd->body.cid = cid; 619 cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION; 620 BUG_ON(bo->mem.mem_type != VMW_PL_MOB); 621 cmd->body.mobid = bo->mem.start; 622 cmd->body.offset = 0; 623 624 vmw_fifo_commit(dev_priv, sizeof(*cmd)); 625 626 return 0; 627} 628 629 630/** 631 * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using 632 * appropriate resource query commands. 633 * 634 * @dev_priv: The device private structure. 635 * @cid: The hardware context id used for the query. 636 * 637 * This function is used to emit a dummy occlusion query with 638 * no primitives rendered between query begin and query end. 639 * It's used to provide a query barrier, in order to know that when 640 * this query is finished, all preceding queries are also finished. 641 * 642 * A Query results structure should have been initialized at the start 643 * of the dev_priv->dummy_query_bo buffer object. And that buffer object 644 * must also be either reserved or pinned when this function is called. 645 * 646 * Returns -ENOMEM on failure to reserve fifo space. 647 */ 648int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv, 649 uint32_t cid) 650{ 651 if (dev_priv->has_mob) 652 return vmw_fifo_emit_dummy_gb_query(dev_priv, cid); 653 654 return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid); 655}