Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm/vmwgfx: Initial DX support

Initial DX support.
Co-authored with Sinclair Yeh, Charmaine Lee and Jakob Bornecrantz.

Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: Sinclair Yeh <syeh@vmware.com>
Signed-off-by: Charmaine Lee <charmainel@vmware.com>

+5362 -790
+1
drivers/gpu/drm/vmwgfx/Makefile
··· 8 8 vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \ 9 9 vmwgfx_surface.o vmwgfx_prime.o vmwgfx_mob.o vmwgfx_shader.o \ 10 10 vmwgfx_cmdbuf_res.o vmwgfx_cmdbuf.o vmwgfx_stdu.o \ 11 + vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o 11 12 12 13 obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
+1294
drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
··· 1 + /************************************************************************** 2 + * 3 + * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA 4 + * All Rights Reserved. 5 + * 6 + * Permission is hereby granted, free of charge, to any person obtaining a 7 + * copy of this software and associated documentation files (the 8 + * "Software"), to deal in the Software without restriction, including 9 + * without limitation the rights to use, copy, modify, merge, publish, 10 + * distribute, sub license, and/or sell copies of the Software, and to 11 + * permit persons to whom the Software is furnished to do so, subject to 12 + * the following conditions: 13 + * 14 + * The above copyright notice and this permission notice (including the 15 + * next paragraph) shall be included in all copies or substantial portions 16 + * of the Software. 17 + * 18 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 + * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 + * 26 + **************************************************************************/ 27 + /* 28 + * This file implements the vmwgfx context binding manager, 29 + * The sole reason for having to use this code is that vmware guest 30 + * backed contexts can be swapped out to their backing mobs by the device 31 + * at any time, also swapped in at any time. At swapin time, the device 32 + * validates the context bindings to make sure they point to valid resources. 33 + * It's this outside-of-drawcall validation (that can happen at any time), 34 + * that makes this code necessary. 35 + * 36 + * We therefore need to kill any context bindings pointing to a resource 37 + * when the resource is swapped out. Furthermore, if the vmwgfx driver has 38 + * swapped out the context we can't swap it in again to kill bindings because 39 + * of backing mob reservation lockdep violations, so as part of 40 + * context swapout, also kill all bindings of a context, so that they are 41 + * already killed if a resource to which a binding points 42 + * needs to be swapped out. 43 + * 44 + * Note that a resource can be pointed to by bindings from multiple contexts, 45 + * Therefore we can't easily protect this data by a per context mutex 46 + * (unless we use deadlock-safe WW mutexes). So we use a global binding_mutex 47 + * to protect all binding manager data. 48 + * 49 + * Finally, any association between a context and a global resource 50 + * (surface, shader or even DX query) is conceptually a context binding that 51 + * needs to be tracked by this code. 52 + */ 53 + 54 + #include "vmwgfx_drv.h" 55 + #include "vmwgfx_binding.h" 56 + #include "device_include/svga3d_reg.h" 57 + 58 + #define VMW_BINDING_RT_BIT 0 59 + #define VMW_BINDING_PS_BIT 1 60 + #define VMW_BINDING_SO_BIT 2 61 + #define VMW_BINDING_VB_BIT 3 62 + #define VMW_BINDING_NUM_BITS 4 63 + 64 + #define VMW_BINDING_PS_SR_BIT 0 65 + 66 + /** 67 + * struct vmw_ctx_binding_state - per context binding state 68 + * 69 + * @dev_priv: Pointer to device private structure. 70 + * @list: linked list of individual active bindings. 71 + * @render_targets: Render target bindings. 72 + * @texture_units: Texture units bindings. 73 + * @ds_view: Depth-stencil view binding. 74 + * @so_targets: StreamOutput target bindings. 75 + * @vertex_buffers: Vertex buffer bindings. 76 + * @index_buffer: Index buffer binding. 77 + * @per_shader: Per shader-type bindings. 78 + * @dirty: Bitmap tracking per binding-type changes that have not yet 79 + * been emitted to the device. 80 + * @dirty_vb: Bitmap tracking individual vertex buffer binding changes that 81 + * have not yet been emitted to the device. 82 + * @bind_cmd_buffer: Scratch space used to construct binding commands. 83 + * @bind_cmd_count: Number of binding command data entries in @bind_cmd_buffer 84 + * @bind_first_slot: Used together with @bind_cmd_buffer to indicate the 85 + * device binding slot of the first command data entry in @bind_cmd_buffer. 86 + * 87 + * Note that this structure also provides storage space for the individual 88 + * struct vmw_ctx_binding objects, so that no dynamic allocation is needed 89 + * for individual bindings. 90 + * 91 + */ 92 + struct vmw_ctx_binding_state { 93 + struct vmw_private *dev_priv; 94 + struct list_head list; 95 + struct vmw_ctx_bindinfo_view render_targets[SVGA3D_RT_MAX]; 96 + struct vmw_ctx_bindinfo_tex texture_units[SVGA3D_NUM_TEXTURE_UNITS]; 97 + struct vmw_ctx_bindinfo_view ds_view; 98 + struct vmw_ctx_bindinfo_so so_targets[SVGA3D_DX_MAX_SOTARGETS]; 99 + struct vmw_ctx_bindinfo_vb vertex_buffers[SVGA3D_DX_MAX_VERTEXBUFFERS]; 100 + struct vmw_ctx_bindinfo_ib index_buffer; 101 + struct vmw_dx_shader_bindings per_shader[SVGA3D_NUM_SHADERTYPE_DX10]; 102 + 103 + unsigned long dirty; 104 + DECLARE_BITMAP(dirty_vb, SVGA3D_DX_MAX_VERTEXBUFFERS); 105 + 106 + u32 bind_cmd_buffer[VMW_MAX_VIEW_BINDINGS]; 107 + u32 bind_cmd_count; 108 + u32 bind_first_slot; 109 + }; 110 + 111 + static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind); 112 + static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi, 113 + bool rebind); 114 + static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind); 115 + static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind); 116 + static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind); 117 + static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind); 118 + static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind); 119 + static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs); 120 + static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, 121 + bool rebind); 122 + static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind); 123 + static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind); 124 + static void vmw_binding_build_asserts(void) __attribute__ ((unused)); 125 + 126 + typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool); 127 + 128 + /** 129 + * struct vmw_binding_info - Per binding type information for the binding 130 + * manager 131 + * 132 + * @size: The size of the struct binding derived from a struct vmw_ctx_bindinfo. 133 + * @offsets: array[shader_slot] of offsets to the array[slot] 134 + * of struct bindings for the binding type. 135 + * @scrub_func: Pointer to the scrub function for this binding type. 136 + * 137 + * Holds static information to help optimize the binding manager and avoid 138 + * an excessive amount of switch statements. 139 + */ 140 + struct vmw_binding_info { 141 + size_t size; 142 + const size_t *offsets; 143 + vmw_scrub_func scrub_func; 144 + }; 145 + 146 + /* 147 + * A number of static variables that help determine the scrub func and the 148 + * location of the struct vmw_ctx_bindinfo slots for each binding type. 149 + */ 150 + static const size_t vmw_binding_shader_offsets[] = { 151 + offsetof(struct vmw_ctx_binding_state, per_shader[0].shader), 152 + offsetof(struct vmw_ctx_binding_state, per_shader[1].shader), 153 + offsetof(struct vmw_ctx_binding_state, per_shader[2].shader), 154 + }; 155 + static const size_t vmw_binding_rt_offsets[] = { 156 + offsetof(struct vmw_ctx_binding_state, render_targets), 157 + }; 158 + static const size_t vmw_binding_tex_offsets[] = { 159 + offsetof(struct vmw_ctx_binding_state, texture_units), 160 + }; 161 + static const size_t vmw_binding_cb_offsets[] = { 162 + offsetof(struct vmw_ctx_binding_state, per_shader[0].const_buffers), 163 + offsetof(struct vmw_ctx_binding_state, per_shader[1].const_buffers), 164 + offsetof(struct vmw_ctx_binding_state, per_shader[2].const_buffers), 165 + }; 166 + static const size_t vmw_binding_dx_ds_offsets[] = { 167 + offsetof(struct vmw_ctx_binding_state, ds_view), 168 + }; 169 + static const size_t vmw_binding_sr_offsets[] = { 170 + offsetof(struct vmw_ctx_binding_state, per_shader[0].shader_res), 171 + offsetof(struct vmw_ctx_binding_state, per_shader[1].shader_res), 172 + offsetof(struct vmw_ctx_binding_state, per_shader[2].shader_res), 173 + }; 174 + static const size_t vmw_binding_so_offsets[] = { 175 + offsetof(struct vmw_ctx_binding_state, so_targets), 176 + }; 177 + static const size_t vmw_binding_vb_offsets[] = { 178 + offsetof(struct vmw_ctx_binding_state, vertex_buffers), 179 + }; 180 + static const size_t vmw_binding_ib_offsets[] = { 181 + offsetof(struct vmw_ctx_binding_state, index_buffer), 182 + }; 183 + 184 + static const struct vmw_binding_info vmw_binding_infos[] = { 185 + [vmw_ctx_binding_shader] = { 186 + .size = sizeof(struct vmw_ctx_bindinfo_shader), 187 + .offsets = vmw_binding_shader_offsets, 188 + .scrub_func = vmw_binding_scrub_shader}, 189 + [vmw_ctx_binding_rt] = { 190 + .size = sizeof(struct vmw_ctx_bindinfo_view), 191 + .offsets = vmw_binding_rt_offsets, 192 + .scrub_func = vmw_binding_scrub_render_target}, 193 + [vmw_ctx_binding_tex] = { 194 + .size = sizeof(struct vmw_ctx_bindinfo_tex), 195 + .offsets = vmw_binding_tex_offsets, 196 + .scrub_func = vmw_binding_scrub_texture}, 197 + [vmw_ctx_binding_cb] = { 198 + .size = sizeof(struct vmw_ctx_bindinfo_cb), 199 + .offsets = vmw_binding_cb_offsets, 200 + .scrub_func = vmw_binding_scrub_cb}, 201 + [vmw_ctx_binding_dx_shader] = { 202 + .size = sizeof(struct vmw_ctx_bindinfo_shader), 203 + .offsets = vmw_binding_shader_offsets, 204 + .scrub_func = vmw_binding_scrub_dx_shader}, 205 + [vmw_ctx_binding_dx_rt] = { 206 + .size = sizeof(struct vmw_ctx_bindinfo_view), 207 + .offsets = vmw_binding_rt_offsets, 208 + .scrub_func = vmw_binding_scrub_dx_rt}, 209 + [vmw_ctx_binding_sr] = { 210 + .size = sizeof(struct vmw_ctx_bindinfo_view), 211 + .offsets = vmw_binding_sr_offsets, 212 + .scrub_func = vmw_binding_scrub_sr}, 213 + [vmw_ctx_binding_ds] = { 214 + .size = sizeof(struct vmw_ctx_bindinfo_view), 215 + .offsets = vmw_binding_dx_ds_offsets, 216 + .scrub_func = vmw_binding_scrub_dx_rt}, 217 + [vmw_ctx_binding_so] = { 218 + .size = sizeof(struct vmw_ctx_bindinfo_so), 219 + .offsets = vmw_binding_so_offsets, 220 + .scrub_func = vmw_binding_scrub_so}, 221 + [vmw_ctx_binding_vb] = { 222 + .size = sizeof(struct vmw_ctx_bindinfo_vb), 223 + .offsets = vmw_binding_vb_offsets, 224 + .scrub_func = vmw_binding_scrub_vb}, 225 + [vmw_ctx_binding_ib] = { 226 + .size = sizeof(struct vmw_ctx_bindinfo_ib), 227 + .offsets = vmw_binding_ib_offsets, 228 + .scrub_func = vmw_binding_scrub_ib}, 229 + }; 230 + 231 + /** 232 + * vmw_cbs_context - Return a pointer to the context resource of a 233 + * context binding state tracker. 234 + * 235 + * @cbs: The context binding state tracker. 236 + * 237 + * Provided there are any active bindings, this function will return an 238 + * unreferenced pointer to the context resource that owns the context 239 + * binding state tracker. If there are no active bindings, this function 240 + * will return NULL. Note that the caller must somehow ensure that a reference 241 + * is held on the context resource prior to calling this function. 242 + */ 243 + static const struct vmw_resource * 244 + vmw_cbs_context(const struct vmw_ctx_binding_state *cbs) 245 + { 246 + if (list_empty(&cbs->list)) 247 + return NULL; 248 + 249 + return list_first_entry(&cbs->list, struct vmw_ctx_bindinfo, 250 + ctx_list)->ctx; 251 + } 252 + 253 + /** 254 + * vmw_binding_loc - determine the struct vmw_ctx_bindinfo slot location. 255 + * 256 + * @cbs: Pointer to a struct vmw_ctx_binding state which holds the slot. 257 + * @bt: The binding type. 258 + * @shader_slot: The shader slot of the binding. If none, then set to 0. 259 + * @slot: The slot of the binding. 260 + */ 261 + static struct vmw_ctx_bindinfo * 262 + vmw_binding_loc(struct vmw_ctx_binding_state *cbs, 263 + enum vmw_ctx_binding_type bt, u32 shader_slot, u32 slot) 264 + { 265 + const struct vmw_binding_info *b = &vmw_binding_infos[bt]; 266 + size_t offset = b->offsets[shader_slot] + b->size*slot; 267 + 268 + return (struct vmw_ctx_bindinfo *)((u8 *) cbs + offset); 269 + } 270 + 271 + /** 272 + * vmw_binding_drop: Stop tracking a context binding 273 + * 274 + * @bi: Pointer to binding tracker storage. 275 + * 276 + * Stops tracking a context binding, and re-initializes its storage. 277 + * Typically used when the context binding is replaced with a binding to 278 + * another (or the same, for that matter) resource. 279 + */ 280 + static void vmw_binding_drop(struct vmw_ctx_bindinfo *bi) 281 + { 282 + list_del(&bi->ctx_list); 283 + if (!list_empty(&bi->res_list)) 284 + list_del(&bi->res_list); 285 + bi->ctx = NULL; 286 + } 287 + 288 + /** 289 + * vmw_binding_add: Start tracking a context binding 290 + * 291 + * @cbs: Pointer to the context binding state tracker. 292 + * @bi: Information about the binding to track. 293 + * 294 + * Starts tracking the binding in the context binding 295 + * state structure @cbs. 296 + */ 297 + void vmw_binding_add(struct vmw_ctx_binding_state *cbs, 298 + const struct vmw_ctx_bindinfo *bi, 299 + u32 shader_slot, u32 slot) 300 + { 301 + struct vmw_ctx_bindinfo *loc = 302 + vmw_binding_loc(cbs, bi->bt, shader_slot, slot); 303 + const struct vmw_binding_info *b = &vmw_binding_infos[bi->bt]; 304 + 305 + if (loc->ctx != NULL) 306 + vmw_binding_drop(loc); 307 + 308 + memcpy(loc, bi, b->size); 309 + loc->scrubbed = false; 310 + list_add(&loc->ctx_list, &cbs->list); 311 + INIT_LIST_HEAD(&loc->res_list); 312 + } 313 + 314 + /** 315 + * vmw_binding_transfer: Transfer a context binding tracking entry. 316 + * 317 + * @cbs: Pointer to the persistent context binding state tracker. 318 + * @bi: Information about the binding to track. 319 + * 320 + */ 321 + static void vmw_binding_transfer(struct vmw_ctx_binding_state *cbs, 322 + const struct vmw_ctx_binding_state *from, 323 + const struct vmw_ctx_bindinfo *bi) 324 + { 325 + size_t offset = (unsigned long)bi - (unsigned long)from; 326 + struct vmw_ctx_bindinfo *loc = (struct vmw_ctx_bindinfo *) 327 + ((unsigned long) cbs + offset); 328 + 329 + if (loc->ctx != NULL) { 330 + WARN_ON(bi->scrubbed); 331 + 332 + vmw_binding_drop(loc); 333 + } 334 + 335 + if (bi->res != NULL) { 336 + memcpy(loc, bi, vmw_binding_infos[bi->bt].size); 337 + list_add_tail(&loc->ctx_list, &cbs->list); 338 + list_add_tail(&loc->res_list, &loc->res->binding_head); 339 + } 340 + } 341 + 342 + /** 343 + * vmw_binding_state_kill - Kill all bindings associated with a 344 + * struct vmw_ctx_binding state structure, and re-initialize the structure. 345 + * 346 + * @cbs: Pointer to the context binding state tracker. 347 + * 348 + * Emits commands to scrub all bindings associated with the 349 + * context binding state tracker. Then re-initializes the whole structure. 350 + */ 351 + void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs) 352 + { 353 + struct vmw_ctx_bindinfo *entry, *next; 354 + 355 + vmw_binding_state_scrub(cbs); 356 + list_for_each_entry_safe(entry, next, &cbs->list, ctx_list) 357 + vmw_binding_drop(entry); 358 + } 359 + 360 + /** 361 + * vmw_binding_state_scrub - Scrub all bindings associated with a 362 + * struct vmw_ctx_binding state structure. 363 + * 364 + * @cbs: Pointer to the context binding state tracker. 365 + * 366 + * Emits commands to scrub all bindings associated with the 367 + * context binding state tracker. 368 + */ 369 + void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs) 370 + { 371 + struct vmw_ctx_bindinfo *entry; 372 + 373 + list_for_each_entry(entry, &cbs->list, ctx_list) { 374 + if (!entry->scrubbed) { 375 + (void) vmw_binding_infos[entry->bt].scrub_func 376 + (entry, false); 377 + entry->scrubbed = true; 378 + } 379 + } 380 + 381 + (void) vmw_binding_emit_dirty(cbs); 382 + } 383 + 384 + /** 385 + * vmw_binding_res_list_kill - Kill all bindings on a 386 + * resource binding list 387 + * 388 + * @head: list head of resource binding list 389 + * 390 + * Kills all bindings associated with a specific resource. Typically 391 + * called before the resource is destroyed. 392 + */ 393 + void vmw_binding_res_list_kill(struct list_head *head) 394 + { 395 + struct vmw_ctx_bindinfo *entry, *next; 396 + 397 + vmw_binding_res_list_scrub(head); 398 + list_for_each_entry_safe(entry, next, head, res_list) 399 + vmw_binding_drop(entry); 400 + } 401 + 402 + /** 403 + * vmw_binding_res_list_scrub - Scrub all bindings on a 404 + * resource binding list 405 + * 406 + * @head: list head of resource binding list 407 + * 408 + * Scrub all bindings associated with a specific resource. Typically 409 + * called before the resource is evicted. 410 + */ 411 + void vmw_binding_res_list_scrub(struct list_head *head) 412 + { 413 + struct vmw_ctx_bindinfo *entry; 414 + 415 + list_for_each_entry(entry, head, res_list) { 416 + if (!entry->scrubbed) { 417 + (void) vmw_binding_infos[entry->bt].scrub_func 418 + (entry, false); 419 + entry->scrubbed = true; 420 + } 421 + } 422 + 423 + list_for_each_entry(entry, head, res_list) { 424 + struct vmw_ctx_binding_state *cbs = 425 + vmw_context_binding_state(entry->ctx); 426 + 427 + (void) vmw_binding_emit_dirty(cbs); 428 + } 429 + } 430 + 431 + 432 + /** 433 + * vmw_binding_state_commit - Commit staged binding info 434 + * 435 + * @ctx: Pointer to context to commit the staged binding info to. 436 + * @from: Staged binding info built during execbuf. 437 + * @scrubbed: Transfer only scrubbed bindings. 438 + * 439 + * Transfers binding info from a temporary structure 440 + * (typically used by execbuf) to the persistent 441 + * structure in the context. This can be done once commands have been 442 + * submitted to hardware 443 + */ 444 + void vmw_binding_state_commit(struct vmw_ctx_binding_state *to, 445 + struct vmw_ctx_binding_state *from) 446 + { 447 + struct vmw_ctx_bindinfo *entry, *next; 448 + 449 + list_for_each_entry_safe(entry, next, &from->list, ctx_list) { 450 + vmw_binding_transfer(to, from, entry); 451 + vmw_binding_drop(entry); 452 + } 453 + } 454 + 455 + /** 456 + * vmw_binding_rebind_all - Rebind all scrubbed bindings of a context 457 + * 458 + * @ctx: The context resource 459 + * 460 + * Walks through the context binding list and rebinds all scrubbed 461 + * resources. 462 + */ 463 + int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs) 464 + { 465 + struct vmw_ctx_bindinfo *entry; 466 + int ret; 467 + 468 + list_for_each_entry(entry, &cbs->list, ctx_list) { 469 + if (likely(!entry->scrubbed)) 470 + continue; 471 + 472 + if ((entry->res == NULL || entry->res->id == 473 + SVGA3D_INVALID_ID)) 474 + continue; 475 + 476 + ret = vmw_binding_infos[entry->bt].scrub_func(entry, true); 477 + if (unlikely(ret != 0)) 478 + return ret; 479 + 480 + entry->scrubbed = false; 481 + } 482 + 483 + return vmw_binding_emit_dirty(cbs); 484 + } 485 + 486 + /** 487 + * vmw_binding_scrub_shader - scrub a shader binding from a context. 488 + * 489 + * @bi: single binding information. 490 + * @rebind: Whether to issue a bind instead of scrub command. 491 + */ 492 + static int vmw_binding_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind) 493 + { 494 + struct vmw_ctx_bindinfo_shader *binding = 495 + container_of(bi, typeof(*binding), bi); 496 + struct vmw_private *dev_priv = bi->ctx->dev_priv; 497 + struct { 498 + SVGA3dCmdHeader header; 499 + SVGA3dCmdSetShader body; 500 + } *cmd; 501 + 502 + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 503 + if (unlikely(cmd == NULL)) { 504 + DRM_ERROR("Failed reserving FIFO space for shader " 505 + "unbinding.\n"); 506 + return -ENOMEM; 507 + } 508 + 509 + cmd->header.id = SVGA_3D_CMD_SET_SHADER; 510 + cmd->header.size = sizeof(cmd->body); 511 + cmd->body.cid = bi->ctx->id; 512 + cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN; 513 + cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); 514 + vmw_fifo_commit(dev_priv, sizeof(*cmd)); 515 + 516 + return 0; 517 + } 518 + 519 + /** 520 + * vmw_binding_scrub_render_target - scrub a render target binding 521 + * from a context. 522 + * 523 + * @bi: single binding information. 524 + * @rebind: Whether to issue a bind instead of scrub command. 525 + */ 526 + static int vmw_binding_scrub_render_target(struct vmw_ctx_bindinfo *bi, 527 + bool rebind) 528 + { 529 + struct vmw_ctx_bindinfo_view *binding = 530 + container_of(bi, typeof(*binding), bi); 531 + struct vmw_private *dev_priv = bi->ctx->dev_priv; 532 + struct { 533 + SVGA3dCmdHeader header; 534 + SVGA3dCmdSetRenderTarget body; 535 + } *cmd; 536 + 537 + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 538 + if (unlikely(cmd == NULL)) { 539 + DRM_ERROR("Failed reserving FIFO space for render target " 540 + "unbinding.\n"); 541 + return -ENOMEM; 542 + } 543 + 544 + cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET; 545 + cmd->header.size = sizeof(cmd->body); 546 + cmd->body.cid = bi->ctx->id; 547 + cmd->body.type = binding->slot; 548 + cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); 549 + cmd->body.target.face = 0; 550 + cmd->body.target.mipmap = 0; 551 + vmw_fifo_commit(dev_priv, sizeof(*cmd)); 552 + 553 + return 0; 554 + } 555 + 556 + /** 557 + * vmw_binding_scrub_texture - scrub a texture binding from a context. 558 + * 559 + * @bi: single binding information. 560 + * @rebind: Whether to issue a bind instead of scrub command. 561 + * 562 + * TODO: Possibly complement this function with a function that takes 563 + * a list of texture bindings and combines them to a single command. 564 + */ 565 + static int vmw_binding_scrub_texture(struct vmw_ctx_bindinfo *bi, 566 + bool rebind) 567 + { 568 + struct vmw_ctx_bindinfo_tex *binding = 569 + container_of(bi, typeof(*binding), bi); 570 + struct vmw_private *dev_priv = bi->ctx->dev_priv; 571 + struct { 572 + SVGA3dCmdHeader header; 573 + struct { 574 + SVGA3dCmdSetTextureState c; 575 + SVGA3dTextureState s1; 576 + } body; 577 + } *cmd; 578 + 579 + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 580 + if (unlikely(cmd == NULL)) { 581 + DRM_ERROR("Failed reserving FIFO space for texture " 582 + "unbinding.\n"); 583 + return -ENOMEM; 584 + } 585 + 586 + cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE; 587 + cmd->header.size = sizeof(cmd->body); 588 + cmd->body.c.cid = bi->ctx->id; 589 + cmd->body.s1.stage = binding->texture_stage; 590 + cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE; 591 + cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); 592 + vmw_fifo_commit(dev_priv, sizeof(*cmd)); 593 + 594 + return 0; 595 + } 596 + 597 + /** 598 + * vmw_binding_scrub_dx_shader - scrub a dx shader binding from a context. 599 + * 600 + * @bi: single binding information. 601 + * @rebind: Whether to issue a bind instead of scrub command. 602 + */ 603 + static int vmw_binding_scrub_dx_shader(struct vmw_ctx_bindinfo *bi, bool rebind) 604 + { 605 + struct vmw_ctx_bindinfo_shader *binding = 606 + container_of(bi, typeof(*binding), bi); 607 + struct vmw_private *dev_priv = bi->ctx->dev_priv; 608 + struct { 609 + SVGA3dCmdHeader header; 610 + SVGA3dCmdDXSetShader body; 611 + } *cmd; 612 + 613 + cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id); 614 + if (unlikely(cmd == NULL)) { 615 + DRM_ERROR("Failed reserving FIFO space for DX shader " 616 + "unbinding.\n"); 617 + return -ENOMEM; 618 + } 619 + cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER; 620 + cmd->header.size = sizeof(cmd->body); 621 + cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN; 622 + cmd->body.shaderId = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); 623 + vmw_fifo_commit(dev_priv, sizeof(*cmd)); 624 + 625 + return 0; 626 + } 627 + 628 + /** 629 + * vmw_binding_scrub_cb - scrub a constant buffer binding from a context. 630 + * 631 + * @bi: single binding information. 632 + * @rebind: Whether to issue a bind instead of scrub command. 633 + */ 634 + static int vmw_binding_scrub_cb(struct vmw_ctx_bindinfo *bi, bool rebind) 635 + { 636 + struct vmw_ctx_bindinfo_cb *binding = 637 + container_of(bi, typeof(*binding), bi); 638 + struct vmw_private *dev_priv = bi->ctx->dev_priv; 639 + struct { 640 + SVGA3dCmdHeader header; 641 + SVGA3dCmdDXSetSingleConstantBuffer body; 642 + } *cmd; 643 + 644 + cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id); 645 + if (unlikely(cmd == NULL)) { 646 + DRM_ERROR("Failed reserving FIFO space for DX shader " 647 + "unbinding.\n"); 648 + return -ENOMEM; 649 + } 650 + 651 + cmd->header.id = SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER; 652 + cmd->header.size = sizeof(cmd->body); 653 + cmd->body.slot = binding->slot; 654 + cmd->body.type = binding->shader_slot + SVGA3D_SHADERTYPE_MIN; 655 + if (rebind) { 656 + cmd->body.offsetInBytes = binding->offset; 657 + cmd->body.sizeInBytes = binding->size; 658 + cmd->body.sid = bi->res->id; 659 + } else { 660 + cmd->body.offsetInBytes = 0; 661 + cmd->body.sizeInBytes = 0; 662 + cmd->body.sid = SVGA3D_INVALID_ID; 663 + } 664 + vmw_fifo_commit(dev_priv, sizeof(*cmd)); 665 + 666 + return 0; 667 + } 668 + 669 + /** 670 + * vmw_collect_view_ids - Build view id data for a view binding command 671 + * without checking which bindings actually need to be emitted 672 + * 673 + * @cbs: Pointer to the context's struct vmw_ctx_binding_state 674 + * @bi: Pointer to where the binding info array is stored in @cbs 675 + * @max_num: Maximum number of entries in the @bi array. 676 + * 677 + * Scans the @bi array for bindings and builds a buffer of view id data. 678 + * Stops at the first non-existing binding in the @bi array. 679 + * On output, @cbs->bind_cmd_count contains the number of bindings to be 680 + * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer 681 + * contains the command data. 682 + */ 683 + static void vmw_collect_view_ids(struct vmw_ctx_binding_state *cbs, 684 + const struct vmw_ctx_bindinfo *bi, 685 + u32 max_num) 686 + { 687 + const struct vmw_ctx_bindinfo_view *biv = 688 + container_of(bi, struct vmw_ctx_bindinfo_view, bi); 689 + unsigned long i; 690 + 691 + cbs->bind_cmd_count = 0; 692 + cbs->bind_first_slot = 0; 693 + 694 + for (i = 0; i < max_num; ++i, ++biv) { 695 + if (!biv->bi.ctx) 696 + break; 697 + 698 + cbs->bind_cmd_buffer[cbs->bind_cmd_count++] = 699 + ((biv->bi.scrubbed) ? 700 + SVGA3D_INVALID_ID : biv->bi.res->id); 701 + } 702 + } 703 + 704 + /** 705 + * vmw_collect_dirty_view_ids - Build view id data for a view binding command 706 + * 707 + * @cbs: Pointer to the context's struct vmw_ctx_binding_state 708 + * @bi: Pointer to where the binding info array is stored in @cbs 709 + * @dirty: Bitmap indicating which bindings need to be emitted. 710 + * @max_num: Maximum number of entries in the @bi array. 711 + * 712 + * Scans the @bi array for bindings that need to be emitted and 713 + * builds a buffer of view id data. 714 + * On output, @cbs->bind_cmd_count contains the number of bindings to be 715 + * emitted, @cbs->bind_first_slot indicates the index of the first emitted 716 + * binding, and @cbs->bind_cmd_buffer contains the command data. 717 + */ 718 + static void vmw_collect_dirty_view_ids(struct vmw_ctx_binding_state *cbs, 719 + const struct vmw_ctx_bindinfo *bi, 720 + unsigned long *dirty, 721 + u32 max_num) 722 + { 723 + const struct vmw_ctx_bindinfo_view *biv = 724 + container_of(bi, struct vmw_ctx_bindinfo_view, bi); 725 + unsigned long i, next_bit; 726 + 727 + cbs->bind_cmd_count = 0; 728 + i = find_first_bit(dirty, max_num); 729 + next_bit = i; 730 + cbs->bind_first_slot = i; 731 + 732 + biv += i; 733 + for (; i < max_num; ++i, ++biv) { 734 + cbs->bind_cmd_buffer[cbs->bind_cmd_count++] = 735 + ((!biv->bi.ctx || biv->bi.scrubbed) ? 736 + SVGA3D_INVALID_ID : biv->bi.res->id); 737 + 738 + if (next_bit == i) { 739 + next_bit = find_next_bit(dirty, max_num, i + 1); 740 + if (next_bit >= max_num) 741 + break; 742 + } 743 + } 744 + } 745 + 746 + /** 747 + * vmw_binding_emit_set_sr - Issue delayed DX shader resource binding commands 748 + * 749 + * @cbs: Pointer to the context's struct vmw_ctx_binding_state 750 + */ 751 + static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs, 752 + int shader_slot) 753 + { 754 + const struct vmw_ctx_bindinfo *loc = 755 + &cbs->per_shader[shader_slot].shader_res[0].bi; 756 + struct { 757 + SVGA3dCmdHeader header; 758 + SVGA3dCmdDXSetShaderResources body; 759 + } *cmd; 760 + size_t cmd_size, view_id_size; 761 + const struct vmw_resource *ctx = vmw_cbs_context(cbs); 762 + 763 + vmw_collect_dirty_view_ids(cbs, loc, 764 + cbs->per_shader[shader_slot].dirty_sr, 765 + SVGA3D_DX_MAX_SRVIEWS); 766 + if (cbs->bind_cmd_count == 0) 767 + return 0; 768 + 769 + view_id_size = cbs->bind_cmd_count*sizeof(uint32); 770 + cmd_size = sizeof(*cmd) + view_id_size; 771 + cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id); 772 + if (unlikely(cmd == NULL)) { 773 + DRM_ERROR("Failed reserving FIFO space for DX shader" 774 + " resource binding.\n"); 775 + return -ENOMEM; 776 + } 777 + 778 + cmd->header.id = SVGA_3D_CMD_DX_SET_SHADER_RESOURCES; 779 + cmd->header.size = sizeof(cmd->body) + view_id_size; 780 + cmd->body.type = shader_slot + SVGA3D_SHADERTYPE_MIN; 781 + cmd->body.startView = cbs->bind_first_slot; 782 + 783 + memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size); 784 + 785 + vmw_fifo_commit(ctx->dev_priv, cmd_size); 786 + bitmap_clear(cbs->per_shader[shader_slot].dirty_sr, 787 + cbs->bind_first_slot, cbs->bind_cmd_count); 788 + 789 + return 0; 790 + } 791 + 792 + /** 793 + * vmw_binding_emit_set_rt - Issue delayed DX rendertarget binding commands 794 + * 795 + * @cbs: Pointer to the context's struct vmw_ctx_binding_state 796 + */ 797 + static int vmw_emit_set_rt(struct vmw_ctx_binding_state *cbs) 798 + { 799 + const struct vmw_ctx_bindinfo *loc = &cbs->render_targets[0].bi; 800 + struct { 801 + SVGA3dCmdHeader header; 802 + SVGA3dCmdDXSetRenderTargets body; 803 + } *cmd; 804 + size_t cmd_size, view_id_size; 805 + const struct vmw_resource *ctx = vmw_cbs_context(cbs); 806 + 807 + vmw_collect_view_ids(cbs, loc, SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS); 808 + view_id_size = cbs->bind_cmd_count*sizeof(uint32); 809 + cmd_size = sizeof(*cmd) + view_id_size; 810 + cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id); 811 + if (unlikely(cmd == NULL)) { 812 + DRM_ERROR("Failed reserving FIFO space for DX render-target" 813 + " binding.\n"); 814 + return -ENOMEM; 815 + } 816 + 817 + cmd->header.id = SVGA_3D_CMD_DX_SET_RENDERTARGETS; 818 + cmd->header.size = sizeof(cmd->body) + view_id_size; 819 + 820 + if (cbs->ds_view.bi.ctx && !cbs->ds_view.bi.scrubbed) 821 + cmd->body.depthStencilViewId = cbs->ds_view.bi.res->id; 822 + else 823 + cmd->body.depthStencilViewId = SVGA3D_INVALID_ID; 824 + 825 + memcpy(&cmd[1], cbs->bind_cmd_buffer, view_id_size); 826 + 827 + vmw_fifo_commit(ctx->dev_priv, cmd_size); 828 + 829 + return 0; 830 + 831 + } 832 + 833 + /** 834 + * vmw_collect_so_targets - Build SVGA3dSoTarget data for a binding command 835 + * without checking which bindings actually need to be emitted 836 + * 837 + * @cbs: Pointer to the context's struct vmw_ctx_binding_state 838 + * @bi: Pointer to where the binding info array is stored in @cbs 839 + * @max_num: Maximum number of entries in the @bi array. 840 + * 841 + * Scans the @bi array for bindings and builds a buffer of SVGA3dSoTarget data. 842 + * Stops at the first non-existing binding in the @bi array. 843 + * On output, @cbs->bind_cmd_count contains the number of bindings to be 844 + * emitted, @cbs->bind_first_slot is set to zero, and @cbs->bind_cmd_buffer 845 + * contains the command data. 846 + */ 847 + static void vmw_collect_so_targets(struct vmw_ctx_binding_state *cbs, 848 + const struct vmw_ctx_bindinfo *bi, 849 + u32 max_num) 850 + { 851 + const struct vmw_ctx_bindinfo_so *biso = 852 + container_of(bi, struct vmw_ctx_bindinfo_so, bi); 853 + unsigned long i; 854 + SVGA3dSoTarget *so_buffer = (SVGA3dSoTarget *) cbs->bind_cmd_buffer; 855 + 856 + cbs->bind_cmd_count = 0; 857 + cbs->bind_first_slot = 0; 858 + 859 + for (i = 0; i < max_num; ++i, ++biso, ++so_buffer, 860 + ++cbs->bind_cmd_count) { 861 + if (!biso->bi.ctx) 862 + break; 863 + 864 + if (!biso->bi.scrubbed) { 865 + so_buffer->sid = biso->bi.res->id; 866 + so_buffer->offset = biso->offset; 867 + so_buffer->sizeInBytes = biso->size; 868 + } else { 869 + so_buffer->sid = SVGA3D_INVALID_ID; 870 + so_buffer->offset = 0; 871 + so_buffer->sizeInBytes = 0; 872 + } 873 + } 874 + } 875 + 876 + /** 877 + * vmw_binding_emit_set_so - Issue delayed streamout binding commands 878 + * 879 + * @cbs: Pointer to the context's struct vmw_ctx_binding_state 880 + */ 881 + static int vmw_emit_set_so(struct vmw_ctx_binding_state *cbs) 882 + { 883 + const struct vmw_ctx_bindinfo *loc = &cbs->so_targets[0].bi; 884 + struct { 885 + SVGA3dCmdHeader header; 886 + SVGA3dCmdDXSetSOTargets body; 887 + } *cmd; 888 + size_t cmd_size, so_target_size; 889 + const struct vmw_resource *ctx = vmw_cbs_context(cbs); 890 + 891 + vmw_collect_so_targets(cbs, loc, SVGA3D_DX_MAX_SOTARGETS); 892 + if (cbs->bind_cmd_count == 0) 893 + return 0; 894 + 895 + so_target_size = cbs->bind_cmd_count*sizeof(SVGA3dSoTarget); 896 + cmd_size = sizeof(*cmd) + so_target_size; 897 + cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id); 898 + if (unlikely(cmd == NULL)) { 899 + DRM_ERROR("Failed reserving FIFO space for DX SO target" 900 + " binding.\n"); 901 + return -ENOMEM; 902 + } 903 + 904 + cmd->header.id = SVGA_3D_CMD_DX_SET_SOTARGETS; 905 + cmd->header.size = sizeof(cmd->body) + so_target_size; 906 + memcpy(&cmd[1], cbs->bind_cmd_buffer, so_target_size); 907 + 908 + vmw_fifo_commit(ctx->dev_priv, cmd_size); 909 + 910 + return 0; 911 + 912 + } 913 + 914 + /** 915 + * vmw_binding_emit_dirty_ps - Issue delayed per shader binding commands 916 + * 917 + * @cbs: Pointer to the context's struct vmw_ctx_binding_state 918 + * 919 + */ 920 + static int vmw_binding_emit_dirty_ps(struct vmw_ctx_binding_state *cbs) 921 + { 922 + struct vmw_dx_shader_bindings *sb = &cbs->per_shader[0]; 923 + u32 i; 924 + int ret; 925 + 926 + for (i = 0; i < SVGA3D_NUM_SHADERTYPE_DX10; ++i, ++sb) { 927 + if (!test_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty)) 928 + continue; 929 + 930 + ret = vmw_emit_set_sr(cbs, i); 931 + if (ret) 932 + break; 933 + 934 + __clear_bit(VMW_BINDING_PS_SR_BIT, &sb->dirty); 935 + } 936 + 937 + return 0; 938 + } 939 + 940 + /** 941 + * vmw_collect_dirty_vbs - Build SVGA3dVertexBuffer data for a 942 + * SVGA3dCmdDXSetVertexBuffers command 943 + * 944 + * @cbs: Pointer to the context's struct vmw_ctx_binding_state 945 + * @bi: Pointer to where the binding info array is stored in @cbs 946 + * @dirty: Bitmap indicating which bindings need to be emitted. 947 + * @max_num: Maximum number of entries in the @bi array. 948 + * 949 + * Scans the @bi array for bindings that need to be emitted and 950 + * builds a buffer of SVGA3dVertexBuffer data. 951 + * On output, @cbs->bind_cmd_count contains the number of bindings to be 952 + * emitted, @cbs->bind_first_slot indicates the index of the first emitted 953 + * binding, and @cbs->bind_cmd_buffer contains the command data. 954 + */ 955 + static void vmw_collect_dirty_vbs(struct vmw_ctx_binding_state *cbs, 956 + const struct vmw_ctx_bindinfo *bi, 957 + unsigned long *dirty, 958 + u32 max_num) 959 + { 960 + const struct vmw_ctx_bindinfo_vb *biv = 961 + container_of(bi, struct vmw_ctx_bindinfo_vb, bi); 962 + unsigned long i, next_bit; 963 + SVGA3dVertexBuffer *vbs = (SVGA3dVertexBuffer *) &cbs->bind_cmd_buffer; 964 + 965 + cbs->bind_cmd_count = 0; 966 + i = find_first_bit(dirty, max_num); 967 + next_bit = i; 968 + cbs->bind_first_slot = i; 969 + 970 + biv += i; 971 + for (; i < max_num; ++i, ++biv, ++vbs) { 972 + if (!biv->bi.ctx || biv->bi.scrubbed) { 973 + vbs->sid = SVGA3D_INVALID_ID; 974 + vbs->stride = 0; 975 + vbs->offset = 0; 976 + } else { 977 + vbs->sid = biv->bi.res->id; 978 + vbs->stride = biv->stride; 979 + vbs->offset = biv->offset; 980 + } 981 + cbs->bind_cmd_count++; 982 + if (next_bit == i) { 983 + next_bit = find_next_bit(dirty, max_num, i + 1); 984 + if (next_bit >= max_num) 985 + break; 986 + } 987 + } 988 + } 989 + 990 + /** 991 + * vmw_binding_emit_set_vb - Issue delayed vertex buffer binding commands 992 + * 993 + * @cbs: Pointer to the context's struct vmw_ctx_binding_state 994 + * 995 + */ 996 + static int vmw_emit_set_vb(struct vmw_ctx_binding_state *cbs) 997 + { 998 + const struct vmw_ctx_bindinfo *loc = 999 + &cbs->vertex_buffers[0].bi; 1000 + struct { 1001 + SVGA3dCmdHeader header; 1002 + SVGA3dCmdDXSetVertexBuffers body; 1003 + } *cmd; 1004 + size_t cmd_size, set_vb_size; 1005 + const struct vmw_resource *ctx = vmw_cbs_context(cbs); 1006 + 1007 + vmw_collect_dirty_vbs(cbs, loc, cbs->dirty_vb, 1008 + SVGA3D_DX_MAX_VERTEXBUFFERS); 1009 + if (cbs->bind_cmd_count == 0) 1010 + return 0; 1011 + 1012 + set_vb_size = cbs->bind_cmd_count*sizeof(SVGA3dVertexBuffer); 1013 + cmd_size = sizeof(*cmd) + set_vb_size; 1014 + cmd = vmw_fifo_reserve_dx(ctx->dev_priv, cmd_size, ctx->id); 1015 + if (unlikely(cmd == NULL)) { 1016 + DRM_ERROR("Failed reserving FIFO space for DX vertex buffer" 1017 + " binding.\n"); 1018 + return -ENOMEM; 1019 + } 1020 + 1021 + cmd->header.id = SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS; 1022 + cmd->header.size = sizeof(cmd->body) + set_vb_size; 1023 + cmd->body.startBuffer = cbs->bind_first_slot; 1024 + 1025 + memcpy(&cmd[1], cbs->bind_cmd_buffer, set_vb_size); 1026 + 1027 + vmw_fifo_commit(ctx->dev_priv, cmd_size); 1028 + bitmap_clear(cbs->dirty_vb, 1029 + cbs->bind_first_slot, cbs->bind_cmd_count); 1030 + 1031 + return 0; 1032 + } 1033 + 1034 + /** 1035 + * vmw_binding_emit_dirty - Issue delayed binding commands 1036 + * 1037 + * @cbs: Pointer to the context's struct vmw_ctx_binding_state 1038 + * 1039 + * This function issues the delayed binding commands that arise from 1040 + * previous scrub / unscrub calls. These binding commands are typically 1041 + * commands that batch a number of bindings and therefore it makes sense 1042 + * to delay them. 1043 + */ 1044 + static int vmw_binding_emit_dirty(struct vmw_ctx_binding_state *cbs) 1045 + { 1046 + int ret = 0; 1047 + unsigned long hit = 0; 1048 + 1049 + while ((hit = find_next_bit(&cbs->dirty, VMW_BINDING_NUM_BITS, hit)) 1050 + < VMW_BINDING_NUM_BITS) { 1051 + 1052 + switch (hit) { 1053 + case VMW_BINDING_RT_BIT: 1054 + ret = vmw_emit_set_rt(cbs); 1055 + break; 1056 + case VMW_BINDING_PS_BIT: 1057 + ret = vmw_binding_emit_dirty_ps(cbs); 1058 + break; 1059 + case VMW_BINDING_SO_BIT: 1060 + ret = vmw_emit_set_so(cbs); 1061 + break; 1062 + case VMW_BINDING_VB_BIT: 1063 + ret = vmw_emit_set_vb(cbs); 1064 + break; 1065 + default: 1066 + BUG(); 1067 + } 1068 + if (ret) 1069 + return ret; 1070 + 1071 + __clear_bit(hit, &cbs->dirty); 1072 + hit++; 1073 + } 1074 + 1075 + return 0; 1076 + } 1077 + 1078 + /** 1079 + * vmw_binding_scrub_sr - Schedule a dx shaderresource binding 1080 + * scrub from a context 1081 + * 1082 + * @bi: single binding information. 1083 + * @rebind: Whether to issue a bind instead of scrub command. 1084 + */ 1085 + static int vmw_binding_scrub_sr(struct vmw_ctx_bindinfo *bi, bool rebind) 1086 + { 1087 + struct vmw_ctx_bindinfo_view *biv = 1088 + container_of(bi, struct vmw_ctx_bindinfo_view, bi); 1089 + struct vmw_ctx_binding_state *cbs = 1090 + vmw_context_binding_state(bi->ctx); 1091 + 1092 + __set_bit(biv->slot, cbs->per_shader[biv->shader_slot].dirty_sr); 1093 + __set_bit(VMW_BINDING_PS_SR_BIT, 1094 + &cbs->per_shader[biv->shader_slot].dirty); 1095 + __set_bit(VMW_BINDING_PS_BIT, &cbs->dirty); 1096 + 1097 + return 0; 1098 + } 1099 + 1100 + /** 1101 + * vmw_binding_scrub_dx_rt - Schedule a dx rendertarget binding 1102 + * scrub from a context 1103 + * 1104 + * @bi: single binding information. 1105 + * @rebind: Whether to issue a bind instead of scrub command. 1106 + */ 1107 + static int vmw_binding_scrub_dx_rt(struct vmw_ctx_bindinfo *bi, bool rebind) 1108 + { 1109 + struct vmw_ctx_binding_state *cbs = 1110 + vmw_context_binding_state(bi->ctx); 1111 + 1112 + __set_bit(VMW_BINDING_RT_BIT, &cbs->dirty); 1113 + 1114 + return 0; 1115 + } 1116 + 1117 + /** 1118 + * vmw_binding_scrub_so - Schedule a dx streamoutput buffer binding 1119 + * scrub from a context 1120 + * 1121 + * @bi: single binding information. 1122 + * @rebind: Whether to issue a bind instead of scrub command. 1123 + */ 1124 + static int vmw_binding_scrub_so(struct vmw_ctx_bindinfo *bi, bool rebind) 1125 + { 1126 + struct vmw_ctx_binding_state *cbs = 1127 + vmw_context_binding_state(bi->ctx); 1128 + 1129 + __set_bit(VMW_BINDING_SO_BIT, &cbs->dirty); 1130 + 1131 + return 0; 1132 + } 1133 + 1134 + /** 1135 + * vmw_binding_scrub_vb - Schedule a dx vertex buffer binding 1136 + * scrub from a context 1137 + * 1138 + * @bi: single binding information. 1139 + * @rebind: Whether to issue a bind instead of scrub command. 1140 + */ 1141 + static int vmw_binding_scrub_vb(struct vmw_ctx_bindinfo *bi, bool rebind) 1142 + { 1143 + struct vmw_ctx_bindinfo_vb *bivb = 1144 + container_of(bi, struct vmw_ctx_bindinfo_vb, bi); 1145 + struct vmw_ctx_binding_state *cbs = 1146 + vmw_context_binding_state(bi->ctx); 1147 + 1148 + __set_bit(bivb->slot, cbs->dirty_vb); 1149 + __set_bit(VMW_BINDING_VB_BIT, &cbs->dirty); 1150 + 1151 + return 0; 1152 + } 1153 + 1154 + /** 1155 + * vmw_binding_scrub_ib - scrub a dx index buffer binding from a context 1156 + * 1157 + * @bi: single binding information. 1158 + * @rebind: Whether to issue a bind instead of scrub command. 1159 + */ 1160 + static int vmw_binding_scrub_ib(struct vmw_ctx_bindinfo *bi, bool rebind) 1161 + { 1162 + struct vmw_ctx_bindinfo_ib *binding = 1163 + container_of(bi, typeof(*binding), bi); 1164 + struct vmw_private *dev_priv = bi->ctx->dev_priv; 1165 + struct { 1166 + SVGA3dCmdHeader header; 1167 + SVGA3dCmdDXSetIndexBuffer body; 1168 + } *cmd; 1169 + 1170 + cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), bi->ctx->id); 1171 + if (unlikely(cmd == NULL)) { 1172 + DRM_ERROR("Failed reserving FIFO space for DX index buffer " 1173 + "binding.\n"); 1174 + return -ENOMEM; 1175 + } 1176 + cmd->header.id = SVGA_3D_CMD_DX_SET_INDEX_BUFFER; 1177 + cmd->header.size = sizeof(cmd->body); 1178 + if (rebind) { 1179 + cmd->body.sid = bi->res->id; 1180 + cmd->body.format = binding->format; 1181 + cmd->body.offset = binding->offset; 1182 + } else { 1183 + cmd->body.sid = SVGA3D_INVALID_ID; 1184 + cmd->body.format = 0; 1185 + cmd->body.offset = 0; 1186 + } 1187 + 1188 + vmw_fifo_commit(dev_priv, sizeof(*cmd)); 1189 + 1190 + return 0; 1191 + } 1192 + 1193 + /** 1194 + * vmw_binding_state_alloc - Allocate a struct vmw_ctx_binding_state with 1195 + * memory accounting. 1196 + * 1197 + * @dev_priv: Pointer to a device private structure. 1198 + * 1199 + * Returns a pointer to a newly allocated struct or an error pointer on error. 1200 + */ 1201 + struct vmw_ctx_binding_state * 1202 + vmw_binding_state_alloc(struct vmw_private *dev_priv) 1203 + { 1204 + struct vmw_ctx_binding_state *cbs; 1205 + int ret; 1206 + 1207 + ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), sizeof(*cbs), 1208 + false, false); 1209 + if (ret) 1210 + return ERR_PTR(ret); 1211 + 1212 + cbs = vzalloc(sizeof(*cbs)); 1213 + if (!cbs) { 1214 + ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs)); 1215 + return ERR_PTR(-ENOMEM); 1216 + } 1217 + 1218 + cbs->dev_priv = dev_priv; 1219 + INIT_LIST_HEAD(&cbs->list); 1220 + 1221 + return cbs; 1222 + } 1223 + 1224 + /** 1225 + * vmw_binding_state_free - Free a struct vmw_ctx_binding_state and its 1226 + * memory accounting info. 1227 + * 1228 + * @cbs: Pointer to the struct vmw_ctx_binding_state to be freed. 1229 + */ 1230 + void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs) 1231 + { 1232 + struct vmw_private *dev_priv = cbs->dev_priv; 1233 + 1234 + vfree(cbs); 1235 + ttm_mem_global_free(vmw_mem_glob(dev_priv), sizeof(*cbs)); 1236 + } 1237 + 1238 + /** 1239 + * vmw_binding_state_list - Get the binding list of a 1240 + * struct vmw_ctx_binding_state 1241 + * 1242 + * @cbs: Pointer to the struct vmw_ctx_binding_state 1243 + * 1244 + * Returns the binding list which can be used to traverse through the bindings 1245 + * and access the resource information of all bindings. 1246 + */ 1247 + struct list_head *vmw_binding_state_list(struct vmw_ctx_binding_state *cbs) 1248 + { 1249 + return &cbs->list; 1250 + } 1251 + 1252 + /** 1253 + * vmwgfx_binding_state_reset - clear a struct vmw_ctx_binding_state 1254 + * 1255 + * @cbs: Pointer to the struct vmw_ctx_binding_state to be cleared 1256 + * 1257 + * Drops all bindings registered in @cbs. No device binding actions are 1258 + * performed. 1259 + */ 1260 + void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs) 1261 + { 1262 + struct vmw_ctx_bindinfo *entry, *next; 1263 + 1264 + list_for_each_entry_safe(entry, next, &cbs->list, ctx_list) 1265 + vmw_binding_drop(entry); 1266 + } 1267 + 1268 + /* 1269 + * This function is unused at run-time, and only used to hold various build 1270 + * asserts important for code optimization assumptions. 1271 + */ 1272 + static void vmw_binding_build_asserts(void) 1273 + { 1274 + BUILD_BUG_ON(SVGA3D_NUM_SHADERTYPE_DX10 != 3); 1275 + BUILD_BUG_ON(SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS > SVGA3D_RT_MAX); 1276 + BUILD_BUG_ON(sizeof(uint32) != sizeof(u32)); 1277 + 1278 + /* 1279 + * struct vmw_ctx_binding_state::bind_cmd_buffer is used for various 1280 + * view id arrays. 1281 + */ 1282 + BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_RT_MAX); 1283 + BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_SRVIEWS); 1284 + BUILD_BUG_ON(VMW_MAX_VIEW_BINDINGS < SVGA3D_DX_MAX_CONSTBUFFERS); 1285 + 1286 + /* 1287 + * struct vmw_ctx_binding_state::bind_cmd_buffer is used for 1288 + * u32 view ids, SVGA3dSoTargets and SVGA3dVertexBuffers 1289 + */ 1290 + BUILD_BUG_ON(SVGA3D_DX_MAX_SOTARGETS*sizeof(SVGA3dSoTarget) > 1291 + VMW_MAX_VIEW_BINDINGS*sizeof(u32)); 1292 + BUILD_BUG_ON(SVGA3D_DX_MAX_VERTEXBUFFERS*sizeof(SVGA3dVertexBuffer) > 1293 + VMW_MAX_VIEW_BINDINGS*sizeof(u32)); 1294 + }
+209
drivers/gpu/drm/vmwgfx/vmwgfx_binding.h
··· 1 + /************************************************************************** 2 + * 3 + * Copyright © 2015 VMware, Inc., Palo Alto, CA., USA 4 + * All Rights Reserved. 5 + * 6 + * Permission is hereby granted, free of charge, to any person obtaining a 7 + * copy of this software and associated documentation files (the 8 + * "Software"), to deal in the Software without restriction, including 9 + * without limitation the rights to use, copy, modify, merge, publish, 10 + * distribute, sub license, and/or sell copies of the Software, and to 11 + * permit persons to whom the Software is furnished to do so, subject to 12 + * the following conditions: 13 + * 14 + * The above copyright notice and this permission notice (including the 15 + * next paragraph) shall be included in all copies or substantial portions 16 + * of the Software. 17 + * 18 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 + * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 + * 26 + **************************************************************************/ 27 + #ifndef _VMWGFX_BINDING_H_ 28 + #define _VMWGFX_BINDING_H_ 29 + 30 + #include "device_include/svga3d_reg.h" 31 + #include <linux/list.h> 32 + 33 + #define VMW_MAX_VIEW_BINDINGS 128 34 + 35 + struct vmw_private; 36 + struct vmw_ctx_binding_state; 37 + 38 + /* 39 + * enum vmw_ctx_binding_type - abstract resource to context binding types 40 + */ 41 + enum vmw_ctx_binding_type { 42 + vmw_ctx_binding_shader, 43 + vmw_ctx_binding_rt, 44 + vmw_ctx_binding_tex, 45 + vmw_ctx_binding_cb, 46 + vmw_ctx_binding_dx_shader, 47 + vmw_ctx_binding_dx_rt, 48 + vmw_ctx_binding_sr, 49 + vmw_ctx_binding_ds, 50 + vmw_ctx_binding_so, 51 + vmw_ctx_binding_vb, 52 + vmw_ctx_binding_ib, 53 + vmw_ctx_binding_max 54 + }; 55 + 56 + /** 57 + * struct vmw_ctx_bindinfo - single binding metadata 58 + * 59 + * @ctx_list: List head for the context's list of bindings. 60 + * @res_list: List head for a resource's list of bindings. 61 + * @ctx: Non-refcounted pointer to the context that owns the binding. NULL 62 + * indicates no binding present. 63 + * @res: Non-refcounted pointer to the resource the binding points to. This 64 + * is typically a surface or a view. 65 + * @bt: Binding type. 66 + * @scrubbed: Whether the binding has been scrubbed from the context. 67 + */ 68 + struct vmw_ctx_bindinfo { 69 + struct list_head ctx_list; 70 + struct list_head res_list; 71 + struct vmw_resource *ctx; 72 + struct vmw_resource *res; 73 + enum vmw_ctx_binding_type bt; 74 + bool scrubbed; 75 + }; 76 + 77 + /** 78 + * struct vmw_ctx_bindinfo_tex - texture stage binding metadata 79 + * 80 + * @bi: struct vmw_ctx_bindinfo we derive from. 81 + * @texture_stage: Device data used to reconstruct binding command. 82 + */ 83 + struct vmw_ctx_bindinfo_tex { 84 + struct vmw_ctx_bindinfo bi; 85 + uint32 texture_stage; 86 + }; 87 + 88 + /** 89 + * struct vmw_ctx_bindinfo_shader - Shader binding metadata 90 + * 91 + * @bi: struct vmw_ctx_bindinfo we derive from. 92 + * @shader_slot: Device data used to reconstruct binding command. 93 + */ 94 + struct vmw_ctx_bindinfo_shader { 95 + struct vmw_ctx_bindinfo bi; 96 + SVGA3dShaderType shader_slot; 97 + }; 98 + 99 + /** 100 + * struct vmw_ctx_bindinfo_cb - Constant buffer binding metadata 101 + * 102 + * @bi: struct vmw_ctx_bindinfo we derive from. 103 + * @shader_slot: Device data used to reconstruct binding command. 104 + * @offset: Device data used to reconstruct binding command. 105 + * @size: Device data used to reconstruct binding command. 106 + * @slot: Device data used to reconstruct binding command. 107 + */ 108 + struct vmw_ctx_bindinfo_cb { 109 + struct vmw_ctx_bindinfo bi; 110 + SVGA3dShaderType shader_slot; 111 + uint32 offset; 112 + uint32 size; 113 + uint32 slot; 114 + }; 115 + 116 + /** 117 + * struct vmw_ctx_bindinfo_view - View binding metadata 118 + * 119 + * @bi: struct vmw_ctx_bindinfo we derive from. 120 + * @shader_slot: Device data used to reconstruct binding command. 121 + * @slot: Device data used to reconstruct binding command. 122 + */ 123 + struct vmw_ctx_bindinfo_view { 124 + struct vmw_ctx_bindinfo bi; 125 + SVGA3dShaderType shader_slot; 126 + uint32 slot; 127 + }; 128 + 129 + /** 130 + * struct vmw_ctx_bindinfo_so - StreamOutput binding metadata 131 + * 132 + * @bi: struct vmw_ctx_bindinfo we derive from. 133 + * @offset: Device data used to reconstruct binding command. 134 + * @size: Device data used to reconstruct binding command. 135 + * @slot: Device data used to reconstruct binding command. 136 + */ 137 + struct vmw_ctx_bindinfo_so { 138 + struct vmw_ctx_bindinfo bi; 139 + uint32 offset; 140 + uint32 size; 141 + uint32 slot; 142 + }; 143 + 144 + /** 145 + * struct vmw_ctx_bindinfo_vb - Vertex buffer binding metadata 146 + * 147 + * @bi: struct vmw_ctx_bindinfo we derive from. 148 + * @offset: Device data used to reconstruct binding command. 149 + * @stride: Device data used to reconstruct binding command. 150 + * @slot: Device data used to reconstruct binding command. 151 + */ 152 + struct vmw_ctx_bindinfo_vb { 153 + struct vmw_ctx_bindinfo bi; 154 + uint32 offset; 155 + uint32 stride; 156 + uint32 slot; 157 + }; 158 + 159 + /** 160 + * struct vmw_ctx_bindinfo_ib - StreamOutput binding metadata 161 + * 162 + * @bi: struct vmw_ctx_bindinfo we derive from. 163 + * @offset: Device data used to reconstruct binding command. 164 + * @format: Device data used to reconstruct binding command. 165 + */ 166 + struct vmw_ctx_bindinfo_ib { 167 + struct vmw_ctx_bindinfo bi; 168 + uint32 offset; 169 + uint32 format; 170 + }; 171 + 172 + /** 173 + * struct vmw_dx_shader_bindings - per shader type context binding state 174 + * 175 + * @shader: The shader binding for this shader type 176 + * @const_buffer: Const buffer bindings for this shader type. 177 + * @shader_res: Shader resource view bindings for this shader type. 178 + * @dirty_sr: Bitmap tracking individual shader resource bindings changes 179 + * that have not yet been emitted to the device. 180 + * @dirty: Bitmap tracking per-binding type binding changes that have not 181 + * yet been emitted to the device. 182 + */ 183 + struct vmw_dx_shader_bindings { 184 + struct vmw_ctx_bindinfo_shader shader; 185 + struct vmw_ctx_bindinfo_cb const_buffers[SVGA3D_DX_MAX_CONSTBUFFERS]; 186 + struct vmw_ctx_bindinfo_view shader_res[SVGA3D_DX_MAX_SRVIEWS]; 187 + DECLARE_BITMAP(dirty_sr, SVGA3D_DX_MAX_SRVIEWS); 188 + unsigned long dirty; 189 + }; 190 + 191 + extern void vmw_binding_add(struct vmw_ctx_binding_state *cbs, 192 + const struct vmw_ctx_bindinfo *ci, 193 + u32 shader_slot, u32 slot); 194 + extern void 195 + vmw_binding_state_commit(struct vmw_ctx_binding_state *to, 196 + struct vmw_ctx_binding_state *from); 197 + extern void vmw_binding_res_list_kill(struct list_head *head); 198 + extern void vmw_binding_res_list_scrub(struct list_head *head); 199 + extern int vmw_binding_rebind_all(struct vmw_ctx_binding_state *cbs); 200 + extern void vmw_binding_state_kill(struct vmw_ctx_binding_state *cbs); 201 + extern void vmw_binding_state_scrub(struct vmw_ctx_binding_state *cbs); 202 + extern struct vmw_ctx_binding_state * 203 + vmw_binding_state_alloc(struct vmw_private *dev_priv); 204 + extern void vmw_binding_state_free(struct vmw_ctx_binding_state *cbs); 205 + extern struct list_head * 206 + vmw_binding_state_list(struct vmw_ctx_binding_state *cbs); 207 + extern void vmw_binding_state_reset(struct vmw_ctx_binding_state *cbs); 208 + 209 + #endif
+2 -3
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
··· 916 916 917 917 cur = man->cur; 918 918 if (cur && (size + man->cur_pos > cur->size || 919 - (ctx_id != SVGA3D_INVALID_ID && 920 - (cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) && 921 - ctx_id != cur->cb_header->dxContext))) 919 + ((cur->cb_header->flags & SVGA_CB_FLAG_DX_CONTEXT) && 920 + ctx_id != cur->cb_header->dxContext))) 922 921 __vmw_cmdbuf_cur_flush(man); 923 922 924 923 if (!man->cur) {
+14 -10
drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
··· 26 26 **************************************************************************/ 27 27 28 28 #include "vmwgfx_drv.h" 29 + #include "vmwgfx_resource_priv.h" 29 30 30 31 #define VMW_CMDBUF_RES_MAN_HT_ORDER 12 31 - 32 - enum vmw_cmdbuf_res_state { 33 - VMW_CMDBUF_RES_COMMITED, 34 - VMW_CMDBUF_RES_ADD, 35 - VMW_CMDBUF_RES_DEL 36 - }; 37 32 38 33 /** 39 34 * struct vmw_cmdbuf_res - Command buffer managed resource entry. ··· 127 132 128 133 list_for_each_entry_safe(entry, next, list, head) { 129 134 list_del(&entry->head); 135 + if (entry->res->func->commit_notify) 136 + entry->res->func->commit_notify(entry->res, 137 + entry->state); 130 138 switch (entry->state) { 131 139 case VMW_CMDBUF_RES_ADD: 132 - entry->state = VMW_CMDBUF_RES_COMMITED; 140 + entry->state = VMW_CMDBUF_RES_COMMITTED; 133 141 list_add_tail(&entry->head, &entry->man->list); 134 142 break; 135 143 case VMW_CMDBUF_RES_DEL: ··· 173 175 &entry->hash); 174 176 list_del(&entry->head); 175 177 list_add_tail(&entry->head, &entry->man->list); 176 - entry->state = VMW_CMDBUF_RES_COMMITED; 178 + entry->state = VMW_CMDBUF_RES_COMMITTED; 177 179 break; 178 180 default: 179 181 BUG(); ··· 229 231 * @res_type: The resource type. 230 232 * @user_key: The user-space id of the resource. 231 233 * @list: The staging list. 234 + * @res_p: If the resource is in an already committed state, points to the 235 + * struct vmw_resource on successful return. The pointer will be 236 + * non ref-counted. 232 237 * 233 238 * This function looks up the struct vmw_cmdbuf_res entry from the manager 234 239 * hash table and, if it exists, removes it. Depending on its current staging ··· 241 240 int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, 242 241 enum vmw_cmdbuf_res_type res_type, 243 242 u32 user_key, 244 - struct list_head *list) 243 + struct list_head *list, 244 + struct vmw_resource **res_p) 245 245 { 246 246 struct vmw_cmdbuf_res *entry; 247 247 struct drm_hash_item *hash; ··· 258 256 switch (entry->state) { 259 257 case VMW_CMDBUF_RES_ADD: 260 258 vmw_cmdbuf_res_free(man, entry); 259 + *res_p = NULL; 261 260 break; 262 - case VMW_CMDBUF_RES_COMMITED: 261 + case VMW_CMDBUF_RES_COMMITTED: 263 262 (void) drm_ht_remove_item(&man->resources, &entry->hash); 264 263 list_del(&entry->head); 265 264 entry->state = VMW_CMDBUF_RES_DEL; 266 265 list_add_tail(&entry->head, list); 266 + *res_p = entry->res; 267 267 break; 268 268 default: 269 269 BUG();
+346 -399
drivers/gpu/drm/vmwgfx/vmwgfx_context.c
··· 27 27 28 28 #include "vmwgfx_drv.h" 29 29 #include "vmwgfx_resource_priv.h" 30 + #include "vmwgfx_binding.h" 30 31 #include "ttm/ttm_placement.h" 31 32 32 33 struct vmw_user_context { 33 34 struct ttm_base_object base; 34 35 struct vmw_resource res; 35 - struct vmw_ctx_binding_state cbs; 36 + struct vmw_ctx_binding_state *cbs; 36 37 struct vmw_cmdbuf_res_manager *man; 38 + struct vmw_resource *cotables[SVGA_COTABLE_DX10_MAX]; 39 + spinlock_t cotable_lock; 37 40 }; 38 - 39 - 40 - 41 - typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool); 42 41 43 42 static void vmw_user_context_free(struct vmw_resource *res); 44 43 static struct vmw_resource * ··· 50 51 bool readback, 51 52 struct ttm_validate_buffer *val_buf); 52 53 static int vmw_gb_context_destroy(struct vmw_resource *res); 53 - static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind); 54 - static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi, 55 - bool rebind); 56 - static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind); 57 - static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs); 58 - static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs); 54 + static int vmw_dx_context_create(struct vmw_resource *res); 55 + static int vmw_dx_context_bind(struct vmw_resource *res, 56 + struct ttm_validate_buffer *val_buf); 57 + static int vmw_dx_context_unbind(struct vmw_resource *res, 58 + bool readback, 59 + struct ttm_validate_buffer *val_buf); 60 + static int vmw_dx_context_destroy(struct vmw_resource *res); 61 + 59 62 static uint64_t vmw_user_context_size; 60 63 61 64 static const struct vmw_user_resource_conv user_context_conv = { ··· 94 93 .unbind = vmw_gb_context_unbind 95 94 }; 96 95 97 - static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = { 98 - [vmw_ctx_binding_shader] = vmw_context_scrub_shader, 99 - [vmw_ctx_binding_rt] = vmw_context_scrub_render_target, 100 - [vmw_ctx_binding_tex] = vmw_context_scrub_texture }; 96 + static const struct vmw_res_func vmw_dx_context_func = { 97 + .res_type = vmw_res_dx_context, 98 + .needs_backup = true, 99 + .may_evict = true, 100 + .type_name = "dx contexts", 101 + .backup_placement = &vmw_mob_placement, 102 + .create = vmw_dx_context_create, 103 + .destroy = vmw_dx_context_destroy, 104 + .bind = vmw_dx_context_bind, 105 + .unbind = vmw_dx_context_unbind 106 + }; 101 107 102 108 /** 103 109 * Context management: 104 110 */ 111 + 112 + static void vmw_context_cotables_unref(struct vmw_user_context *uctx) 113 + { 114 + struct vmw_resource *res; 115 + int i; 116 + 117 + for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { 118 + spin_lock(&uctx->cotable_lock); 119 + res = uctx->cotables[i]; 120 + uctx->cotables[i] = NULL; 121 + spin_unlock(&uctx->cotable_lock); 122 + vmw_resource_unreference(&res); 123 + } 124 + } 105 125 106 126 static void vmw_hw_context_destroy(struct vmw_resource *res) 107 127 { ··· 135 113 } *cmd; 136 114 137 115 138 - if (res->func->destroy == vmw_gb_context_destroy) { 116 + if (res->func->destroy == vmw_gb_context_destroy || 117 + res->func->destroy == vmw_dx_context_destroy) { 139 118 mutex_lock(&dev_priv->cmdbuf_mutex); 140 119 vmw_cmdbuf_res_man_destroy(uctx->man); 141 120 mutex_lock(&dev_priv->binding_mutex); 142 - (void) vmw_context_binding_state_kill(&uctx->cbs); 143 - (void) vmw_gb_context_destroy(res); 121 + vmw_binding_state_kill(uctx->cbs); 122 + (void) res->func->destroy(res); 144 123 mutex_unlock(&dev_priv->binding_mutex); 145 124 if (dev_priv->pinned_bo != NULL && 146 125 !dev_priv->query_cid_valid) 147 126 __vmw_execbuf_release_pinned_bo(dev_priv, NULL); 148 127 mutex_unlock(&dev_priv->cmdbuf_mutex); 128 + vmw_context_cotables_unref(uctx); 149 129 return; 150 130 } 151 131 ··· 168 144 } 169 145 170 146 static int vmw_gb_context_init(struct vmw_private *dev_priv, 147 + bool dx, 171 148 struct vmw_resource *res, 172 - void (*res_free) (struct vmw_resource *res)) 149 + void (*res_free)(struct vmw_resource *res)) 173 150 { 174 - int ret; 151 + int ret, i; 175 152 struct vmw_user_context *uctx = 176 153 container_of(res, struct vmw_user_context, res); 177 154 155 + res->backup_size = (dx ? sizeof(SVGADXContextMobFormat) : 156 + SVGA3D_CONTEXT_DATA_SIZE); 178 157 ret = vmw_resource_init(dev_priv, res, true, 179 - res_free, &vmw_gb_context_func); 180 - res->backup_size = SVGA3D_CONTEXT_DATA_SIZE; 158 + res_free, 159 + dx ? &vmw_dx_context_func : 160 + &vmw_gb_context_func); 181 161 if (unlikely(ret != 0)) 182 162 goto out_err; 183 163 ··· 194 166 } 195 167 } 196 168 197 - memset(&uctx->cbs, 0, sizeof(uctx->cbs)); 198 - INIT_LIST_HEAD(&uctx->cbs.list); 169 + uctx->cbs = vmw_binding_state_alloc(dev_priv); 170 + if (IS_ERR(uctx->cbs)) { 171 + ret = PTR_ERR(uctx->cbs); 172 + goto out_err; 173 + } 174 + 175 + spin_lock_init(&uctx->cotable_lock); 176 + 177 + if (dx) { 178 + for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { 179 + uctx->cotables[i] = vmw_cotable_alloc(dev_priv, 180 + &uctx->res, i); 181 + if (unlikely(uctx->cotables[i] == NULL)) { 182 + ret = -ENOMEM; 183 + goto out_cotables; 184 + } 185 + } 186 + } 187 + 188 + 199 189 200 190 vmw_resource_activate(res, vmw_hw_context_destroy); 201 191 return 0; 202 192 193 + out_cotables: 194 + vmw_context_cotables_unref(uctx); 203 195 out_err: 204 196 if (res_free) 205 197 res_free(res); ··· 230 182 231 183 static int vmw_context_init(struct vmw_private *dev_priv, 232 184 struct vmw_resource *res, 233 - void (*res_free) (struct vmw_resource *res)) 185 + void (*res_free)(struct vmw_resource *res), 186 + bool dx) 234 187 { 235 188 int ret; 236 189 ··· 241 192 } *cmd; 242 193 243 194 if (dev_priv->has_mob) 244 - return vmw_gb_context_init(dev_priv, res, res_free); 195 + return vmw_gb_context_init(dev_priv, dx, res, res_free); 245 196 246 197 ret = vmw_resource_init(dev_priv, res, false, 247 198 res_free, &vmw_legacy_context_func); ··· 281 232 return ret; 282 233 } 283 234 284 - struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv) 285 - { 286 - struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL); 287 - int ret; 288 235 289 - if (unlikely(res == NULL)) 290 - return NULL; 291 - 292 - ret = vmw_context_init(dev_priv, res, NULL); 293 - 294 - return (ret == 0) ? res : NULL; 295 - } 296 - 236 + /* 237 + * GB context. 238 + */ 297 239 298 240 static int vmw_gb_context_create(struct vmw_resource *res) 299 241 { ··· 349 309 "binding.\n"); 350 310 return -ENOMEM; 351 311 } 352 - 353 312 cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT; 354 313 cmd->header.size = sizeof(cmd->body); 355 314 cmd->body.cid = res->id; ··· 385 346 BUG_ON(bo->mem.mem_type != VMW_PL_MOB); 386 347 387 348 mutex_lock(&dev_priv->binding_mutex); 388 - vmw_context_binding_state_scrub(&uctx->cbs); 349 + vmw_binding_state_scrub(uctx->cbs); 389 350 390 351 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); 391 352 ··· 458 419 return 0; 459 420 } 460 421 422 + /* 423 + * DX context. 424 + */ 425 + 426 + static int vmw_dx_context_create(struct vmw_resource *res) 427 + { 428 + struct vmw_private *dev_priv = res->dev_priv; 429 + int ret; 430 + struct { 431 + SVGA3dCmdHeader header; 432 + SVGA3dCmdDXDefineContext body; 433 + } *cmd; 434 + 435 + if (likely(res->id != -1)) 436 + return 0; 437 + 438 + ret = vmw_resource_alloc_id(res); 439 + if (unlikely(ret != 0)) { 440 + DRM_ERROR("Failed to allocate a context id.\n"); 441 + goto out_no_id; 442 + } 443 + 444 + if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) { 445 + ret = -EBUSY; 446 + goto out_no_fifo; 447 + } 448 + 449 + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 450 + if (unlikely(cmd == NULL)) { 451 + DRM_ERROR("Failed reserving FIFO space for context " 452 + "creation.\n"); 453 + ret = -ENOMEM; 454 + goto out_no_fifo; 455 + } 456 + 457 + cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT; 458 + cmd->header.size = sizeof(cmd->body); 459 + cmd->body.cid = res->id; 460 + vmw_fifo_commit(dev_priv, sizeof(*cmd)); 461 + vmw_fifo_resource_inc(dev_priv); 462 + 463 + return 0; 464 + 465 + out_no_fifo: 466 + vmw_resource_release_id(res); 467 + out_no_id: 468 + return ret; 469 + } 470 + 471 + static int vmw_dx_context_bind(struct vmw_resource *res, 472 + struct ttm_validate_buffer *val_buf) 473 + { 474 + struct vmw_private *dev_priv = res->dev_priv; 475 + struct { 476 + SVGA3dCmdHeader header; 477 + SVGA3dCmdDXBindContext body; 478 + } *cmd; 479 + struct ttm_buffer_object *bo = val_buf->bo; 480 + 481 + BUG_ON(bo->mem.mem_type != VMW_PL_MOB); 482 + 483 + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 484 + if (unlikely(cmd == NULL)) { 485 + DRM_ERROR("Failed reserving FIFO space for context " 486 + "binding.\n"); 487 + return -ENOMEM; 488 + } 489 + 490 + cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT; 491 + cmd->header.size = sizeof(cmd->body); 492 + cmd->body.cid = res->id; 493 + cmd->body.mobid = bo->mem.start; 494 + cmd->body.validContents = res->backup_dirty; 495 + res->backup_dirty = false; 496 + vmw_fifo_commit(dev_priv, sizeof(*cmd)); 497 + 498 + 499 + return 0; 500 + } 501 + 502 + /** 503 + * vmw_dx_context_scrub_cotables - Scrub all bindings and 504 + * cotables from a context 505 + * 506 + * @ctx: Pointer to the context resource 507 + * @readback: Whether to save the otable contents on scrubbing. 508 + * 509 + * COtables must be unbound before their context, but unbinding requires 510 + * the backup buffer being reserved, whereas scrubbing does not. 511 + * This function scrubs all cotables of a context, potentially reading back 512 + * the contents into their backup buffers. However, scrubbing cotables 513 + * also makes the device context invalid, so scrub all bindings first so 514 + * that doesn't have to be done later with an invalid context. 515 + */ 516 + void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx, 517 + bool readback) 518 + { 519 + struct vmw_user_context *uctx = 520 + container_of(ctx, struct vmw_user_context, res); 521 + int i; 522 + 523 + vmw_binding_state_scrub(uctx->cbs); 524 + for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { 525 + struct vmw_resource *res; 526 + 527 + /* Avoid racing with ongoing cotable destruction. */ 528 + spin_lock(&uctx->cotable_lock); 529 + res = uctx->cotables[vmw_cotable_scrub_order[i]]; 530 + if (res) 531 + res = vmw_resource_reference_unless_doomed(res); 532 + spin_unlock(&uctx->cotable_lock); 533 + if (!res) 534 + continue; 535 + 536 + WARN_ON(vmw_cotable_scrub(res, readback)); 537 + vmw_resource_unreference(&res); 538 + } 539 + } 540 + 541 + static int vmw_dx_context_unbind(struct vmw_resource *res, 542 + bool readback, 543 + struct ttm_validate_buffer *val_buf) 544 + { 545 + struct vmw_private *dev_priv = res->dev_priv; 546 + struct ttm_buffer_object *bo = val_buf->bo; 547 + struct vmw_fence_obj *fence; 548 + 549 + struct { 550 + SVGA3dCmdHeader header; 551 + SVGA3dCmdDXReadbackContext body; 552 + } *cmd1; 553 + struct { 554 + SVGA3dCmdHeader header; 555 + SVGA3dCmdDXBindContext body; 556 + } *cmd2; 557 + uint32_t submit_size; 558 + uint8_t *cmd; 559 + 560 + 561 + BUG_ON(bo->mem.mem_type != VMW_PL_MOB); 562 + 563 + mutex_lock(&dev_priv->binding_mutex); 564 + vmw_dx_context_scrub_cotables(res, readback); 565 + 566 + submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0); 567 + 568 + cmd = vmw_fifo_reserve(dev_priv, submit_size); 569 + if (unlikely(cmd == NULL)) { 570 + DRM_ERROR("Failed reserving FIFO space for context " 571 + "unbinding.\n"); 572 + mutex_unlock(&dev_priv->binding_mutex); 573 + return -ENOMEM; 574 + } 575 + 576 + cmd2 = (void *) cmd; 577 + if (readback) { 578 + cmd1 = (void *) cmd; 579 + cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT; 580 + cmd1->header.size = sizeof(cmd1->body); 581 + cmd1->body.cid = res->id; 582 + cmd2 = (void *) (&cmd1[1]); 583 + } 584 + cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT; 585 + cmd2->header.size = sizeof(cmd2->body); 586 + cmd2->body.cid = res->id; 587 + cmd2->body.mobid = SVGA3D_INVALID_ID; 588 + 589 + vmw_fifo_commit(dev_priv, submit_size); 590 + mutex_unlock(&dev_priv->binding_mutex); 591 + 592 + /* 593 + * Create a fence object and fence the backup buffer. 594 + */ 595 + 596 + (void) vmw_execbuf_fence_commands(NULL, dev_priv, 597 + &fence, NULL); 598 + 599 + vmw_fence_single_bo(bo, fence); 600 + 601 + if (likely(fence != NULL)) 602 + vmw_fence_obj_unreference(&fence); 603 + 604 + return 0; 605 + } 606 + 607 + static int vmw_dx_context_destroy(struct vmw_resource *res) 608 + { 609 + struct vmw_private *dev_priv = res->dev_priv; 610 + struct { 611 + SVGA3dCmdHeader header; 612 + SVGA3dCmdDXDestroyContext body; 613 + } *cmd; 614 + 615 + if (likely(res->id == -1)) 616 + return 0; 617 + 618 + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 619 + if (unlikely(cmd == NULL)) { 620 + DRM_ERROR("Failed reserving FIFO space for context " 621 + "destruction.\n"); 622 + return -ENOMEM; 623 + } 624 + 625 + cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT; 626 + cmd->header.size = sizeof(cmd->body); 627 + cmd->body.cid = res->id; 628 + vmw_fifo_commit(dev_priv, sizeof(*cmd)); 629 + if (dev_priv->query_cid == res->id) 630 + dev_priv->query_cid_valid = false; 631 + vmw_resource_release_id(res); 632 + vmw_fifo_resource_dec(dev_priv); 633 + 634 + return 0; 635 + } 636 + 461 637 /** 462 638 * User-space context management: 463 639 */ ··· 689 435 container_of(res, struct vmw_user_context, res); 690 436 struct vmw_private *dev_priv = res->dev_priv; 691 437 438 + if (ctx->cbs) 439 + vmw_binding_state_free(ctx->cbs); 692 440 ttm_base_object_kfree(ctx, base); 693 441 ttm_mem_global_free(vmw_mem_glob(dev_priv), 694 442 vmw_user_context_size); ··· 721 465 return ttm_ref_object_base_unref(tfile, arg->cid, TTM_REF_USAGE); 722 466 } 723 467 724 - int vmw_context_define_ioctl(struct drm_device *dev, void *data, 725 - struct drm_file *file_priv) 468 + static int vmw_context_define(struct drm_device *dev, void *data, 469 + struct drm_file *file_priv, bool dx) 726 470 { 727 471 struct vmw_private *dev_priv = vmw_priv(dev); 728 472 struct vmw_user_context *ctx; ··· 732 476 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 733 477 int ret; 734 478 479 + if (!dev_priv->has_dx && dx) { 480 + DRM_ERROR("DX contexts not supported by device.\n"); 481 + return -EINVAL; 482 + } 735 483 736 484 /* 737 485 * Approximate idr memory usage with 128 bytes. It will be limited ··· 776 516 * From here on, the destructor takes over resource freeing. 777 517 */ 778 518 779 - ret = vmw_context_init(dev_priv, res, vmw_user_context_free); 519 + ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx); 780 520 if (unlikely(ret != 0)) 781 521 goto out_unlock; 782 522 ··· 795 535 out_unlock: 796 536 ttm_read_unlock(&dev_priv->reservation_sem); 797 537 return ret; 798 - 799 538 } 800 539 801 - /** 802 - * vmw_context_scrub_shader - scrub a shader binding from a context. 803 - * 804 - * @bi: single binding information. 805 - * @rebind: Whether to issue a bind instead of scrub command. 806 - */ 807 - static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind) 540 + int vmw_context_define_ioctl(struct drm_device *dev, void *data, 541 + struct drm_file *file_priv) 808 542 { 809 - struct vmw_private *dev_priv = bi->ctx->dev_priv; 810 - struct { 811 - SVGA3dCmdHeader header; 812 - SVGA3dCmdSetShader body; 813 - } *cmd; 814 - 815 - cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 816 - if (unlikely(cmd == NULL)) { 817 - DRM_ERROR("Failed reserving FIFO space for shader " 818 - "unbinding.\n"); 819 - return -ENOMEM; 820 - } 821 - 822 - cmd->header.id = SVGA_3D_CMD_SET_SHADER; 823 - cmd->header.size = sizeof(cmd->body); 824 - cmd->body.cid = bi->ctx->id; 825 - cmd->body.type = bi->i1.shader_type; 826 - cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); 827 - vmw_fifo_commit(dev_priv, sizeof(*cmd)); 828 - 829 - return 0; 543 + return vmw_context_define(dev, data, file_priv, false); 830 544 } 831 545 832 - /** 833 - * vmw_context_scrub_render_target - scrub a render target binding 834 - * from a context. 835 - * 836 - * @bi: single binding information. 837 - * @rebind: Whether to issue a bind instead of scrub command. 838 - */ 839 - static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi, 840 - bool rebind) 546 + int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data, 547 + struct drm_file *file_priv) 841 548 { 842 - struct vmw_private *dev_priv = bi->ctx->dev_priv; 843 - struct { 844 - SVGA3dCmdHeader header; 845 - SVGA3dCmdSetRenderTarget body; 846 - } *cmd; 549 + union drm_vmw_extended_context_arg *arg = (typeof(arg)) data; 550 + struct drm_vmw_context_arg *rep = &arg->rep; 847 551 848 - cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 849 - if (unlikely(cmd == NULL)) { 850 - DRM_ERROR("Failed reserving FIFO space for render target " 851 - "unbinding.\n"); 852 - return -ENOMEM; 853 - } 854 - 855 - cmd->header.id = SVGA_3D_CMD_SETRENDERTARGET; 856 - cmd->header.size = sizeof(cmd->body); 857 - cmd->body.cid = bi->ctx->id; 858 - cmd->body.type = bi->i1.rt_type; 859 - cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); 860 - cmd->body.target.face = 0; 861 - cmd->body.target.mipmap = 0; 862 - vmw_fifo_commit(dev_priv, sizeof(*cmd)); 863 - 864 - return 0; 865 - } 866 - 867 - /** 868 - * vmw_context_scrub_texture - scrub a texture binding from a context. 869 - * 870 - * @bi: single binding information. 871 - * @rebind: Whether to issue a bind instead of scrub command. 872 - * 873 - * TODO: Possibly complement this function with a function that takes 874 - * a list of texture bindings and combines them to a single command. 875 - */ 876 - static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, 877 - bool rebind) 878 - { 879 - struct vmw_private *dev_priv = bi->ctx->dev_priv; 880 - struct { 881 - SVGA3dCmdHeader header; 882 - struct { 883 - SVGA3dCmdSetTextureState c; 884 - SVGA3dTextureState s1; 885 - } body; 886 - } *cmd; 887 - 888 - cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 889 - if (unlikely(cmd == NULL)) { 890 - DRM_ERROR("Failed reserving FIFO space for texture " 891 - "unbinding.\n"); 892 - return -ENOMEM; 893 - } 894 - 895 - 896 - cmd->header.id = SVGA_3D_CMD_SETTEXTURESTATE; 897 - cmd->header.size = sizeof(cmd->body); 898 - cmd->body.c.cid = bi->ctx->id; 899 - cmd->body.s1.stage = bi->i1.texture_stage; 900 - cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE; 901 - cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID); 902 - vmw_fifo_commit(dev_priv, sizeof(*cmd)); 903 - 904 - return 0; 905 - } 906 - 907 - /** 908 - * vmw_context_binding_drop: Stop tracking a context binding 909 - * 910 - * @cb: Pointer to binding tracker storage. 911 - * 912 - * Stops tracking a context binding, and re-initializes its storage. 913 - * Typically used when the context binding is replaced with a binding to 914 - * another (or the same, for that matter) resource. 915 - */ 916 - static void vmw_context_binding_drop(struct vmw_ctx_binding *cb) 917 - { 918 - list_del(&cb->ctx_list); 919 - if (!list_empty(&cb->res_list)) 920 - list_del(&cb->res_list); 921 - cb->bi.ctx = NULL; 922 - } 923 - 924 - /** 925 - * vmw_context_binding_add: Start tracking a context binding 926 - * 927 - * @cbs: Pointer to the context binding state tracker. 928 - * @bi: Information about the binding to track. 929 - * 930 - * Performs basic checks on the binding to make sure arguments are within 931 - * bounds and then starts tracking the binding in the context binding 932 - * state structure @cbs. 933 - */ 934 - int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs, 935 - const struct vmw_ctx_bindinfo *bi) 936 - { 937 - struct vmw_ctx_binding *loc; 938 - 939 - switch (bi->bt) { 940 - case vmw_ctx_binding_rt: 941 - if (unlikely((unsigned)bi->i1.rt_type >= SVGA3D_RT_MAX)) { 942 - DRM_ERROR("Illegal render target type %u.\n", 943 - (unsigned) bi->i1.rt_type); 944 - return -EINVAL; 945 - } 946 - loc = &cbs->render_targets[bi->i1.rt_type]; 947 - break; 948 - case vmw_ctx_binding_tex: 949 - if (unlikely((unsigned)bi->i1.texture_stage >= 950 - SVGA3D_NUM_TEXTURE_UNITS)) { 951 - DRM_ERROR("Illegal texture/sampler unit %u.\n", 952 - (unsigned) bi->i1.texture_stage); 953 - return -EINVAL; 954 - } 955 - loc = &cbs->texture_units[bi->i1.texture_stage]; 956 - break; 957 - case vmw_ctx_binding_shader: 958 - if (unlikely((unsigned)bi->i1.shader_type >= 959 - SVGA3D_SHADERTYPE_PREDX_MAX)) { 960 - DRM_ERROR("Illegal shader type %u.\n", 961 - (unsigned) bi->i1.shader_type); 962 - return -EINVAL; 963 - } 964 - loc = &cbs->shaders[bi->i1.shader_type]; 965 - break; 552 + switch (arg->req) { 553 + case drm_vmw_context_legacy: 554 + return vmw_context_define(dev, rep, file_priv, false); 555 + case drm_vmw_context_dx: 556 + return vmw_context_define(dev, rep, file_priv, true); 966 557 default: 967 - BUG(); 968 - } 969 - 970 - if (loc->bi.ctx != NULL) 971 - vmw_context_binding_drop(loc); 972 - 973 - loc->bi = *bi; 974 - loc->bi.scrubbed = false; 975 - list_add_tail(&loc->ctx_list, &cbs->list); 976 - INIT_LIST_HEAD(&loc->res_list); 977 - 978 - return 0; 979 - } 980 - 981 - /** 982 - * vmw_context_binding_transfer: Transfer a context binding tracking entry. 983 - * 984 - * @cbs: Pointer to the persistent context binding state tracker. 985 - * @bi: Information about the binding to track. 986 - * 987 - */ 988 - static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs, 989 - const struct vmw_ctx_bindinfo *bi) 990 - { 991 - struct vmw_ctx_binding *loc; 992 - 993 - switch (bi->bt) { 994 - case vmw_ctx_binding_rt: 995 - loc = &cbs->render_targets[bi->i1.rt_type]; 996 558 break; 997 - case vmw_ctx_binding_tex: 998 - loc = &cbs->texture_units[bi->i1.texture_stage]; 999 - break; 1000 - case vmw_ctx_binding_shader: 1001 - loc = &cbs->shaders[bi->i1.shader_type]; 1002 - break; 1003 - default: 1004 - BUG(); 1005 559 } 1006 - 1007 - if (loc->bi.ctx != NULL) 1008 - vmw_context_binding_drop(loc); 1009 - 1010 - if (bi->res != NULL) { 1011 - loc->bi = *bi; 1012 - list_add_tail(&loc->ctx_list, &cbs->list); 1013 - list_add_tail(&loc->res_list, &bi->res->binding_head); 1014 - } 1015 - } 1016 - 1017 - /** 1018 - * vmw_context_binding_kill - Kill a binding on the device 1019 - * and stop tracking it. 1020 - * 1021 - * @cb: Pointer to binding tracker storage. 1022 - * 1023 - * Emits FIFO commands to scrub a binding represented by @cb. 1024 - * Then stops tracking the binding and re-initializes its storage. 1025 - */ 1026 - static void vmw_context_binding_kill(struct vmw_ctx_binding *cb) 1027 - { 1028 - if (!cb->bi.scrubbed) { 1029 - (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false); 1030 - cb->bi.scrubbed = true; 1031 - } 1032 - vmw_context_binding_drop(cb); 1033 - } 1034 - 1035 - /** 1036 - * vmw_context_binding_state_kill - Kill all bindings associated with a 1037 - * struct vmw_ctx_binding state structure, and re-initialize the structure. 1038 - * 1039 - * @cbs: Pointer to the context binding state tracker. 1040 - * 1041 - * Emits commands to scrub all bindings associated with the 1042 - * context binding state tracker. Then re-initializes the whole structure. 1043 - */ 1044 - static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs) 1045 - { 1046 - struct vmw_ctx_binding *entry, *next; 1047 - 1048 - list_for_each_entry_safe(entry, next, &cbs->list, ctx_list) 1049 - vmw_context_binding_kill(entry); 1050 - } 1051 - 1052 - /** 1053 - * vmw_context_binding_state_scrub - Scrub all bindings associated with a 1054 - * struct vmw_ctx_binding state structure. 1055 - * 1056 - * @cbs: Pointer to the context binding state tracker. 1057 - * 1058 - * Emits commands to scrub all bindings associated with the 1059 - * context binding state tracker. 1060 - */ 1061 - static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs) 1062 - { 1063 - struct vmw_ctx_binding *entry; 1064 - 1065 - list_for_each_entry(entry, &cbs->list, ctx_list) { 1066 - if (!entry->bi.scrubbed) { 1067 - (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false); 1068 - entry->bi.scrubbed = true; 1069 - } 1070 - } 1071 - } 1072 - 1073 - /** 1074 - * vmw_context_binding_res_list_kill - Kill all bindings on a 1075 - * resource binding list 1076 - * 1077 - * @head: list head of resource binding list 1078 - * 1079 - * Kills all bindings associated with a specific resource. Typically 1080 - * called before the resource is destroyed. 1081 - */ 1082 - void vmw_context_binding_res_list_kill(struct list_head *head) 1083 - { 1084 - struct vmw_ctx_binding *entry, *next; 1085 - 1086 - list_for_each_entry_safe(entry, next, head, res_list) 1087 - vmw_context_binding_kill(entry); 1088 - } 1089 - 1090 - /** 1091 - * vmw_context_binding_res_list_scrub - Scrub all bindings on a 1092 - * resource binding list 1093 - * 1094 - * @head: list head of resource binding list 1095 - * 1096 - * Scrub all bindings associated with a specific resource. Typically 1097 - * called before the resource is evicted. 1098 - */ 1099 - void vmw_context_binding_res_list_scrub(struct list_head *head) 1100 - { 1101 - struct vmw_ctx_binding *entry; 1102 - 1103 - list_for_each_entry(entry, head, res_list) { 1104 - if (!entry->bi.scrubbed) { 1105 - (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false); 1106 - entry->bi.scrubbed = true; 1107 - } 1108 - } 1109 - } 1110 - 1111 - /** 1112 - * vmw_context_binding_state_transfer - Commit staged binding info 1113 - * 1114 - * @ctx: Pointer to context to commit the staged binding info to. 1115 - * @from: Staged binding info built during execbuf. 1116 - * 1117 - * Transfers binding info from a temporary structure to the persistent 1118 - * structure in the context. This can be done once commands 1119 - */ 1120 - void vmw_context_binding_state_transfer(struct vmw_resource *ctx, 1121 - struct vmw_ctx_binding_state *from) 1122 - { 1123 - struct vmw_user_context *uctx = 1124 - container_of(ctx, struct vmw_user_context, res); 1125 - struct vmw_ctx_binding *entry, *next; 1126 - 1127 - list_for_each_entry_safe(entry, next, &from->list, ctx_list) 1128 - vmw_context_binding_transfer(&uctx->cbs, &entry->bi); 1129 - } 1130 - 1131 - /** 1132 - * vmw_context_rebind_all - Rebind all scrubbed bindings of a context 1133 - * 1134 - * @ctx: The context resource 1135 - * 1136 - * Walks through the context binding list and rebinds all scrubbed 1137 - * resources. 1138 - */ 1139 - int vmw_context_rebind_all(struct vmw_resource *ctx) 1140 - { 1141 - struct vmw_ctx_binding *entry; 1142 - struct vmw_user_context *uctx = 1143 - container_of(ctx, struct vmw_user_context, res); 1144 - struct vmw_ctx_binding_state *cbs = &uctx->cbs; 1145 - int ret; 1146 - 1147 - list_for_each_entry(entry, &cbs->list, ctx_list) { 1148 - if (likely(!entry->bi.scrubbed)) 1149 - continue; 1150 - 1151 - if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id == 1152 - SVGA3D_INVALID_ID)) 1153 - continue; 1154 - 1155 - ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true); 1156 - if (unlikely(ret != 0)) 1157 - return ret; 1158 - 1159 - entry->bi.scrubbed = false; 1160 - } 1161 - 1162 - return 0; 560 + return -EINVAL; 1163 561 } 1164 562 1165 563 /** ··· 830 912 */ 831 913 struct list_head *vmw_context_binding_list(struct vmw_resource *ctx) 832 914 { 833 - return &(container_of(ctx, struct vmw_user_context, res)->cbs.list); 915 + struct vmw_user_context *uctx = 916 + container_of(ctx, struct vmw_user_context, res); 917 + 918 + return vmw_binding_state_list(uctx->cbs); 834 919 } 835 920 836 921 struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx) 837 922 { 838 923 return container_of(ctx, struct vmw_user_context, res)->man; 924 + } 925 + 926 + struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx, 927 + SVGACOTableType cotable_type) 928 + { 929 + if (cotable_type >= SVGA_COTABLE_DX10_MAX) 930 + return ERR_PTR(-EINVAL); 931 + 932 + return vmw_resource_reference 933 + (container_of(ctx, struct vmw_user_context, res)-> 934 + cotables[cotable_type]); 935 + } 936 + 937 + /** 938 + * vmw_context_binding_state - 939 + * Return a pointer to a context binding state structure 940 + * 941 + * @ctx: The context resource 942 + * 943 + * Returns the current state of bindings of the given context. Note that 944 + * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked. 945 + */ 946 + struct vmw_ctx_binding_state * 947 + vmw_context_binding_state(struct vmw_resource *ctx) 948 + { 949 + return container_of(ctx, struct vmw_user_context, res)->cbs; 839 950 }
+662
drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
··· 1 + /************************************************************************** 2 + * 3 + * Copyright © 2014 VMware, Inc., Palo Alto, CA., USA 4 + * All Rights Reserved. 5 + * 6 + * Permission is hereby granted, free of charge, to any person obtaining a 7 + * copy of this software and associated documentation files (the 8 + * "Software"), to deal in the Software without restriction, including 9 + * without limitation the rights to use, copy, modify, merge, publish, 10 + * distribute, sub license, and/or sell copies of the Software, and to 11 + * permit persons to whom the Software is furnished to do so, subject to 12 + * the following conditions: 13 + * 14 + * The above copyright notice and this permission notice (including the 15 + * next paragraph) shall be included in all copies or substantial portions 16 + * of the Software. 17 + * 18 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 + * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 + * 26 + **************************************************************************/ 27 + /* 28 + * Treat context OTables as resources to make use of the resource 29 + * backing MOB eviction mechanism, that is used to read back the COTable 30 + * whenever the backing MOB is evicted. 31 + */ 32 + 33 + #include "vmwgfx_drv.h" 34 + #include "vmwgfx_resource_priv.h" 35 + #include <ttm/ttm_placement.h> 36 + #include "vmwgfx_so.h" 37 + 38 + /** 39 + * struct vmw_cotable - Context Object Table resource 40 + * 41 + * @res: struct vmw_resource we are deriving from. 42 + * @ctx: non-refcounted pointer to the owning context. 43 + * @size_read_back: Size of data read back during eviction. 44 + * @seen_entries: Seen entries in command stream for this cotable. 45 + * @type: The cotable type. 46 + * @scrubbed: Whether the cotable has been scrubbed. 47 + * @resource_list: List of resources in the cotable. 48 + */ 49 + struct vmw_cotable { 50 + struct vmw_resource res; 51 + struct vmw_resource *ctx; 52 + size_t size_read_back; 53 + int seen_entries; 54 + u32 type; 55 + bool scrubbed; 56 + struct list_head resource_list; 57 + }; 58 + 59 + /** 60 + * struct vmw_cotable_info - Static info about cotable types 61 + * 62 + * @min_initial_entries: Min number of initial intries at cotable allocation 63 + * for this cotable type. 64 + * @size: Size of each entry. 65 + */ 66 + struct vmw_cotable_info { 67 + u32 min_initial_entries; 68 + u32 size; 69 + void (*unbind_func)(struct vmw_private *, struct list_head *, 70 + bool); 71 + }; 72 + 73 + static const struct vmw_cotable_info co_info[] = { 74 + {1, sizeof(SVGACOTableDXRTViewEntry), &vmw_view_cotable_list_destroy}, 75 + {1, sizeof(SVGACOTableDXDSViewEntry), &vmw_view_cotable_list_destroy}, 76 + {1, sizeof(SVGACOTableDXSRViewEntry), &vmw_view_cotable_list_destroy}, 77 + {1, sizeof(SVGACOTableDXElementLayoutEntry), NULL}, 78 + {1, sizeof(SVGACOTableDXBlendStateEntry), NULL}, 79 + {1, sizeof(SVGACOTableDXDepthStencilEntry), NULL}, 80 + {1, sizeof(SVGACOTableDXRasterizerStateEntry), NULL}, 81 + {1, sizeof(SVGACOTableDXSamplerEntry), NULL}, 82 + {1, sizeof(SVGACOTableDXStreamOutputEntry), NULL}, 83 + {1, sizeof(SVGACOTableDXQueryEntry), NULL}, 84 + {1, sizeof(SVGACOTableDXShaderEntry), &vmw_dx_shader_cotable_list_scrub} 85 + }; 86 + 87 + /* 88 + * Cotables with bindings that we remove must be scrubbed first, 89 + * otherwise, the device will swap in an invalid context when we remove 90 + * bindings before scrubbing a cotable... 91 + */ 92 + const SVGACOTableType vmw_cotable_scrub_order[] = { 93 + SVGA_COTABLE_RTVIEW, 94 + SVGA_COTABLE_DSVIEW, 95 + SVGA_COTABLE_SRVIEW, 96 + SVGA_COTABLE_DXSHADER, 97 + SVGA_COTABLE_ELEMENTLAYOUT, 98 + SVGA_COTABLE_BLENDSTATE, 99 + SVGA_COTABLE_DEPTHSTENCIL, 100 + SVGA_COTABLE_RASTERIZERSTATE, 101 + SVGA_COTABLE_SAMPLER, 102 + SVGA_COTABLE_STREAMOUTPUT, 103 + SVGA_COTABLE_DXQUERY, 104 + }; 105 + 106 + static int vmw_cotable_bind(struct vmw_resource *res, 107 + struct ttm_validate_buffer *val_buf); 108 + static int vmw_cotable_unbind(struct vmw_resource *res, 109 + bool readback, 110 + struct ttm_validate_buffer *val_buf); 111 + static int vmw_cotable_create(struct vmw_resource *res); 112 + static int vmw_cotable_destroy(struct vmw_resource *res); 113 + 114 + static const struct vmw_res_func vmw_cotable_func = { 115 + .res_type = vmw_res_cotable, 116 + .needs_backup = true, 117 + .may_evict = true, 118 + .type_name = "context guest backed object tables", 119 + .backup_placement = &vmw_mob_placement, 120 + .create = vmw_cotable_create, 121 + .destroy = vmw_cotable_destroy, 122 + .bind = vmw_cotable_bind, 123 + .unbind = vmw_cotable_unbind, 124 + }; 125 + 126 + /** 127 + * vmw_cotable - Convert a struct vmw_resource pointer to a struct 128 + * vmw_cotable pointer 129 + * 130 + * @res: Pointer to the resource. 131 + */ 132 + static struct vmw_cotable *vmw_cotable(struct vmw_resource *res) 133 + { 134 + return container_of(res, struct vmw_cotable, res); 135 + } 136 + 137 + /** 138 + * vmw_cotable_destroy - Cotable resource destroy callback 139 + * 140 + * @res: Pointer to the cotable resource. 141 + * 142 + * There is no device cotable destroy command, so this function only 143 + * makes sure that the resource id is set to invalid. 144 + */ 145 + static int vmw_cotable_destroy(struct vmw_resource *res) 146 + { 147 + res->id = -1; 148 + return 0; 149 + } 150 + 151 + /** 152 + * vmw_cotable_unscrub - Undo a cotable unscrub operation 153 + * 154 + * @res: Pointer to the cotable resource 155 + * 156 + * This function issues commands to (re)bind the cotable to 157 + * its backing mob, which needs to be validated and reserved at this point. 158 + * This is identical to bind() except the function interface looks different. 159 + */ 160 + static int vmw_cotable_unscrub(struct vmw_resource *res) 161 + { 162 + struct vmw_cotable *vcotbl = vmw_cotable(res); 163 + struct vmw_private *dev_priv = res->dev_priv; 164 + struct ttm_buffer_object *bo = &res->backup->base; 165 + struct { 166 + SVGA3dCmdHeader header; 167 + SVGA3dCmdDXSetCOTable body; 168 + } *cmd; 169 + 170 + WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB); 171 + lockdep_assert_held(&bo->resv->lock.base); 172 + 173 + cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), SVGA3D_INVALID_ID); 174 + if (!cmd) { 175 + DRM_ERROR("Failed reserving FIFO space for cotable " 176 + "binding.\n"); 177 + return -ENOMEM; 178 + } 179 + 180 + WARN_ON(vcotbl->ctx->id == SVGA3D_INVALID_ID); 181 + WARN_ON(bo->mem.mem_type != VMW_PL_MOB); 182 + cmd->header.id = SVGA_3D_CMD_DX_SET_COTABLE; 183 + cmd->header.size = sizeof(cmd->body); 184 + cmd->body.cid = vcotbl->ctx->id; 185 + cmd->body.type = vcotbl->type; 186 + cmd->body.mobid = bo->mem.start; 187 + cmd->body.validSizeInBytes = vcotbl->size_read_back; 188 + 189 + vmw_fifo_commit_flush(dev_priv, sizeof(*cmd)); 190 + vcotbl->scrubbed = false; 191 + 192 + return 0; 193 + } 194 + 195 + /** 196 + * vmw_cotable_bind - Undo a cotable unscrub operation 197 + * 198 + * @res: Pointer to the cotable resource 199 + * @val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller 200 + * for convenience / fencing. 201 + * 202 + * This function issues commands to (re)bind the cotable to 203 + * its backing mob, which needs to be validated and reserved at this point. 204 + */ 205 + static int vmw_cotable_bind(struct vmw_resource *res, 206 + struct ttm_validate_buffer *val_buf) 207 + { 208 + /* 209 + * The create() callback may have changed @res->backup without 210 + * the caller noticing, and with val_buf->bo still pointing to 211 + * the old backup buffer. Although hackish, and not used currently, 212 + * take the opportunity to correct the value here so that it's not 213 + * misused in the future. 214 + */ 215 + val_buf->bo = &res->backup->base; 216 + 217 + return vmw_cotable_unscrub(res); 218 + } 219 + 220 + /** 221 + * vmw_cotable_scrub - Scrub the cotable from the device. 222 + * 223 + * @res: Pointer to the cotable resource. 224 + * @readback: Whether initiate a readback of the cotable data to the backup 225 + * buffer. 226 + * 227 + * In some situations (context swapouts) it might be desirable to make the 228 + * device forget about the cotable without performing a full unbind. A full 229 + * unbind requires reserved backup buffers and it might not be possible to 230 + * reserve them due to locking order violation issues. The vmw_cotable_scrub 231 + * function implements a partial unbind() without that requirement but with the 232 + * following restrictions. 233 + * 1) Before the cotable is again used by the GPU, vmw_cotable_unscrub() must 234 + * be called. 235 + * 2) Before the cotable backing buffer is used by the CPU, or during the 236 + * resource destruction, vmw_cotable_unbind() must be called. 237 + */ 238 + int vmw_cotable_scrub(struct vmw_resource *res, bool readback) 239 + { 240 + struct vmw_cotable *vcotbl = vmw_cotable(res); 241 + struct vmw_private *dev_priv = res->dev_priv; 242 + size_t submit_size; 243 + 244 + struct { 245 + SVGA3dCmdHeader header; 246 + SVGA3dCmdDXReadbackCOTable body; 247 + } *cmd0; 248 + struct { 249 + SVGA3dCmdHeader header; 250 + SVGA3dCmdDXSetCOTable body; 251 + } *cmd1; 252 + 253 + if (vcotbl->scrubbed) 254 + return 0; 255 + 256 + if (co_info[vcotbl->type].unbind_func) 257 + co_info[vcotbl->type].unbind_func(dev_priv, 258 + &vcotbl->resource_list, 259 + readback); 260 + submit_size = sizeof(*cmd1); 261 + if (readback) 262 + submit_size += sizeof(*cmd0); 263 + 264 + cmd1 = vmw_fifo_reserve_dx(dev_priv, submit_size, SVGA3D_INVALID_ID); 265 + if (!cmd1) { 266 + DRM_ERROR("Failed reserving FIFO space for cotable " 267 + "unbinding.\n"); 268 + return -ENOMEM; 269 + } 270 + 271 + vcotbl->size_read_back = 0; 272 + if (readback) { 273 + cmd0 = (void *) cmd1; 274 + cmd0->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE; 275 + cmd0->header.size = sizeof(cmd0->body); 276 + cmd0->body.cid = vcotbl->ctx->id; 277 + cmd0->body.type = vcotbl->type; 278 + cmd1 = (void *) &cmd0[1]; 279 + vcotbl->size_read_back = res->backup_size; 280 + } 281 + cmd1->header.id = SVGA_3D_CMD_DX_SET_COTABLE; 282 + cmd1->header.size = sizeof(cmd1->body); 283 + cmd1->body.cid = vcotbl->ctx->id; 284 + cmd1->body.type = vcotbl->type; 285 + cmd1->body.mobid = SVGA3D_INVALID_ID; 286 + cmd1->body.validSizeInBytes = 0; 287 + vmw_fifo_commit_flush(dev_priv, submit_size); 288 + vcotbl->scrubbed = true; 289 + 290 + /* Trigger a create() on next validate. */ 291 + res->id = -1; 292 + 293 + return 0; 294 + } 295 + 296 + /** 297 + * vmw_cotable_unbind - Cotable resource unbind callback 298 + * 299 + * @res: Pointer to the cotable resource. 300 + * @readback: Whether to read back cotable data to the backup buffer. 301 + * val_buf: Pointer to a struct ttm_validate_buffer prepared by the caller 302 + * for convenience / fencing. 303 + * 304 + * Unbinds the cotable from the device and fences the backup buffer. 305 + */ 306 + static int vmw_cotable_unbind(struct vmw_resource *res, 307 + bool readback, 308 + struct ttm_validate_buffer *val_buf) 309 + { 310 + struct vmw_cotable *vcotbl = vmw_cotable(res); 311 + struct vmw_private *dev_priv = res->dev_priv; 312 + struct ttm_buffer_object *bo = val_buf->bo; 313 + struct vmw_fence_obj *fence; 314 + int ret; 315 + 316 + if (list_empty(&res->mob_head)) 317 + return 0; 318 + 319 + WARN_ON_ONCE(bo->mem.mem_type != VMW_PL_MOB); 320 + lockdep_assert_held(&bo->resv->lock.base); 321 + 322 + mutex_lock(&dev_priv->binding_mutex); 323 + if (!vcotbl->scrubbed) 324 + vmw_dx_context_scrub_cotables(vcotbl->ctx, readback); 325 + mutex_unlock(&dev_priv->binding_mutex); 326 + (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); 327 + vmw_fence_single_bo(bo, fence); 328 + if (likely(fence != NULL)) 329 + vmw_fence_obj_unreference(&fence); 330 + 331 + return ret; 332 + } 333 + 334 + /** 335 + * vmw_cotable_readback - Read back a cotable without unbinding. 336 + * 337 + * @res: The cotable resource. 338 + * 339 + * Reads back a cotable to its backing mob without scrubbing the MOB from 340 + * the cotable. The MOB is fenced for subsequent CPU access. 341 + */ 342 + static int vmw_cotable_readback(struct vmw_resource *res) 343 + { 344 + struct vmw_cotable *vcotbl = vmw_cotable(res); 345 + struct vmw_private *dev_priv = res->dev_priv; 346 + 347 + struct { 348 + SVGA3dCmdHeader header; 349 + SVGA3dCmdDXReadbackCOTable body; 350 + } *cmd; 351 + struct vmw_fence_obj *fence; 352 + 353 + if (!vcotbl->scrubbed) { 354 + cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), 355 + SVGA3D_INVALID_ID); 356 + if (!cmd) { 357 + DRM_ERROR("Failed reserving FIFO space for cotable " 358 + "readback.\n"); 359 + return -ENOMEM; 360 + } 361 + cmd->header.id = SVGA_3D_CMD_DX_READBACK_COTABLE; 362 + cmd->header.size = sizeof(cmd->body); 363 + cmd->body.cid = vcotbl->ctx->id; 364 + cmd->body.type = vcotbl->type; 365 + vcotbl->size_read_back = res->backup_size; 366 + vmw_fifo_commit(dev_priv, sizeof(*cmd)); 367 + } 368 + 369 + (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); 370 + vmw_fence_single_bo(&res->backup->base, fence); 371 + vmw_fence_obj_unreference(&fence); 372 + 373 + return 0; 374 + } 375 + 376 + /** 377 + * vmw_cotable_resize - Resize a cotable. 378 + * 379 + * @res: The cotable resource. 380 + * @new_size: The new size. 381 + * 382 + * Resizes a cotable and binds the new backup buffer. 383 + * On failure the cotable is left intact. 384 + * Important! This function may not fail once the MOB switch has been 385 + * committed to hardware. That would put the device context in an 386 + * invalid state which we can't currently recover from. 387 + */ 388 + static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) 389 + { 390 + struct vmw_private *dev_priv = res->dev_priv; 391 + struct vmw_cotable *vcotbl = vmw_cotable(res); 392 + struct vmw_dma_buffer *buf, *old_buf = res->backup; 393 + struct ttm_buffer_object *bo, *old_bo = &res->backup->base; 394 + size_t old_size = res->backup_size; 395 + size_t old_size_read_back = vcotbl->size_read_back; 396 + size_t cur_size_read_back; 397 + struct ttm_bo_kmap_obj old_map, new_map; 398 + int ret; 399 + size_t i; 400 + 401 + ret = vmw_cotable_readback(res); 402 + if (ret) 403 + return ret; 404 + 405 + cur_size_read_back = vcotbl->size_read_back; 406 + vcotbl->size_read_back = old_size_read_back; 407 + 408 + /* 409 + * While device is processing, Allocate and reserve a buffer object 410 + * for the new COTable. Initially pin the buffer object to make sure 411 + * we can use tryreserve without failure. 412 + */ 413 + buf = kzalloc(sizeof(*buf), GFP_KERNEL); 414 + if (!buf) 415 + return -ENOMEM; 416 + 417 + ret = vmw_dmabuf_init(dev_priv, buf, new_size, &vmw_mob_ne_placement, 418 + true, vmw_dmabuf_bo_free); 419 + if (ret) { 420 + DRM_ERROR("Failed initializing new cotable MOB.\n"); 421 + return ret; 422 + } 423 + 424 + bo = &buf->base; 425 + WARN_ON_ONCE(ttm_bo_reserve(bo, false, true, false, NULL)); 426 + 427 + ret = ttm_bo_wait(old_bo, false, false, false); 428 + if (unlikely(ret != 0)) { 429 + DRM_ERROR("Failed waiting for cotable unbind.\n"); 430 + goto out_wait; 431 + } 432 + 433 + /* 434 + * Do a page by page copy of COTables. This eliminates slow vmap()s. 435 + * This should really be a TTM utility. 436 + */ 437 + for (i = 0; i < old_bo->num_pages; ++i) { 438 + bool dummy; 439 + 440 + ret = ttm_bo_kmap(old_bo, i, 1, &old_map); 441 + if (unlikely(ret != 0)) { 442 + DRM_ERROR("Failed mapping old COTable on resize.\n"); 443 + goto out_wait; 444 + } 445 + ret = ttm_bo_kmap(bo, i, 1, &new_map); 446 + if (unlikely(ret != 0)) { 447 + DRM_ERROR("Failed mapping new COTable on resize.\n"); 448 + goto out_map_new; 449 + } 450 + memcpy(ttm_kmap_obj_virtual(&new_map, &dummy), 451 + ttm_kmap_obj_virtual(&old_map, &dummy), 452 + PAGE_SIZE); 453 + ttm_bo_kunmap(&new_map); 454 + ttm_bo_kunmap(&old_map); 455 + } 456 + 457 + /* Unpin new buffer, and switch backup buffers. */ 458 + ret = ttm_bo_validate(bo, &vmw_mob_placement, false, false); 459 + if (unlikely(ret != 0)) { 460 + DRM_ERROR("Failed validating new COTable backup buffer.\n"); 461 + goto out_wait; 462 + } 463 + 464 + res->backup = buf; 465 + res->backup_size = new_size; 466 + vcotbl->size_read_back = cur_size_read_back; 467 + 468 + /* 469 + * Now tell the device to switch. If this fails, then we need to 470 + * revert the full resize. 471 + */ 472 + ret = vmw_cotable_unscrub(res); 473 + if (ret) { 474 + DRM_ERROR("Failed switching COTable backup buffer.\n"); 475 + res->backup = old_buf; 476 + res->backup_size = old_size; 477 + vcotbl->size_read_back = old_size_read_back; 478 + goto out_wait; 479 + } 480 + 481 + /* Let go of the old mob. */ 482 + list_del(&res->mob_head); 483 + list_add_tail(&res->mob_head, &buf->res_list); 484 + vmw_dmabuf_unreference(&old_buf); 485 + res->id = vcotbl->type; 486 + 487 + return 0; 488 + 489 + out_map_new: 490 + ttm_bo_kunmap(&old_map); 491 + out_wait: 492 + ttm_bo_unreserve(bo); 493 + vmw_dmabuf_unreference(&buf); 494 + 495 + return ret; 496 + } 497 + 498 + /** 499 + * vmw_cotable_create - Cotable resource create callback 500 + * 501 + * @res: Pointer to a cotable resource. 502 + * 503 + * There is no separate create command for cotables, so this callback, which 504 + * is called before bind() in the validation sequence is instead used for two 505 + * things. 506 + * 1) Unscrub the cotable if it is scrubbed and still attached to a backup 507 + * buffer, that is, if @res->mob_head is non-empty. 508 + * 2) Resize the cotable if needed. 509 + */ 510 + static int vmw_cotable_create(struct vmw_resource *res) 511 + { 512 + struct vmw_cotable *vcotbl = vmw_cotable(res); 513 + size_t new_size = res->backup_size; 514 + size_t needed_size; 515 + int ret; 516 + 517 + /* Check whether we need to resize the cotable */ 518 + needed_size = (vcotbl->seen_entries + 1) * co_info[vcotbl->type].size; 519 + while (needed_size > new_size) 520 + new_size *= 2; 521 + 522 + if (likely(new_size <= res->backup_size)) { 523 + if (vcotbl->scrubbed && !list_empty(&res->mob_head)) { 524 + ret = vmw_cotable_unscrub(res); 525 + if (ret) 526 + return ret; 527 + } 528 + res->id = vcotbl->type; 529 + return 0; 530 + } 531 + 532 + return vmw_cotable_resize(res, new_size); 533 + } 534 + 535 + /** 536 + * vmw_hw_cotable_destroy - Cotable hw_destroy callback 537 + * 538 + * @res: Pointer to a cotable resource. 539 + * 540 + * The final (part of resource destruction) destroy callback. 541 + */ 542 + static void vmw_hw_cotable_destroy(struct vmw_resource *res) 543 + { 544 + (void) vmw_cotable_destroy(res); 545 + } 546 + 547 + static size_t cotable_acc_size; 548 + 549 + /** 550 + * vmw_cotable_free - Cotable resource destructor 551 + * 552 + * @res: Pointer to a cotable resource. 553 + */ 554 + static void vmw_cotable_free(struct vmw_resource *res) 555 + { 556 + struct vmw_private *dev_priv = res->dev_priv; 557 + 558 + kfree(res); 559 + ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size); 560 + } 561 + 562 + /** 563 + * vmw_cotable_alloc - Create a cotable resource 564 + * 565 + * @dev_priv: Pointer to a device private struct. 566 + * @ctx: Pointer to the context resource. 567 + * The cotable resource will not add a refcount. 568 + * @type: The cotable type. 569 + */ 570 + struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv, 571 + struct vmw_resource *ctx, 572 + u32 type) 573 + { 574 + struct vmw_cotable *vcotbl; 575 + int ret; 576 + u32 num_entries; 577 + 578 + if (unlikely(cotable_acc_size == 0)) 579 + cotable_acc_size = ttm_round_pot(sizeof(struct vmw_cotable)); 580 + 581 + ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), 582 + cotable_acc_size, false, true); 583 + if (unlikely(ret)) 584 + return ERR_PTR(ret); 585 + 586 + vcotbl = kzalloc(sizeof(*vcotbl), GFP_KERNEL); 587 + if (unlikely(vcotbl == NULL)) { 588 + ret = -ENOMEM; 589 + goto out_no_alloc; 590 + } 591 + 592 + ret = vmw_resource_init(dev_priv, &vcotbl->res, true, 593 + vmw_cotable_free, &vmw_cotable_func); 594 + if (unlikely(ret != 0)) 595 + goto out_no_init; 596 + 597 + INIT_LIST_HEAD(&vcotbl->resource_list); 598 + vcotbl->res.id = type; 599 + vcotbl->res.backup_size = PAGE_SIZE; 600 + num_entries = PAGE_SIZE / co_info[type].size; 601 + if (num_entries < co_info[type].min_initial_entries) { 602 + vcotbl->res.backup_size = co_info[type].min_initial_entries * 603 + co_info[type].size; 604 + vcotbl->res.backup_size = 605 + (vcotbl->res.backup_size + PAGE_SIZE - 1) & PAGE_MASK; 606 + } 607 + 608 + vcotbl->scrubbed = true; 609 + vcotbl->seen_entries = -1; 610 + vcotbl->type = type; 611 + vcotbl->ctx = ctx; 612 + 613 + vmw_resource_activate(&vcotbl->res, vmw_hw_cotable_destroy); 614 + 615 + return &vcotbl->res; 616 + 617 + out_no_init: 618 + kfree(vcotbl); 619 + out_no_alloc: 620 + ttm_mem_global_free(vmw_mem_glob(dev_priv), cotable_acc_size); 621 + return ERR_PTR(ret); 622 + } 623 + 624 + /** 625 + * vmw_cotable_notify - Notify the cotable about an item creation 626 + * 627 + * @res: Pointer to a cotable resource. 628 + * @id: Item id. 629 + */ 630 + int vmw_cotable_notify(struct vmw_resource *res, int id) 631 + { 632 + struct vmw_cotable *vcotbl = vmw_cotable(res); 633 + 634 + if (id < 0 || id >= SVGA_COTABLE_MAX_IDS) { 635 + DRM_ERROR("Illegal COTable id. Type is %u. Id is %d\n", 636 + (unsigned) vcotbl->type, id); 637 + return -EINVAL; 638 + } 639 + 640 + if (vcotbl->seen_entries < id) { 641 + /* Trigger a call to create() on next validate */ 642 + res->id = -1; 643 + vcotbl->seen_entries = id; 644 + } 645 + 646 + return 0; 647 + } 648 + 649 + /** 650 + * vmw_cotable_add_view - add a view to the cotable's list of active views. 651 + * 652 + * @res: pointer struct vmw_resource representing the cotable. 653 + * @head: pointer to the struct list_head member of the resource, dedicated 654 + * to the cotable active resource list. 655 + */ 656 + void vmw_cotable_add_resource(struct vmw_resource *res, struct list_head *head) 657 + { 658 + struct vmw_cotable *vcotbl = 659 + container_of(res, struct vmw_cotable, res); 660 + 661 + list_add_tail(head, &vcotbl->resource_list); 662 + }
+46 -8
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
··· 28 28 29 29 #include <drm/drmP.h> 30 30 #include "vmwgfx_drv.h" 31 + #include "vmwgfx_binding.h" 31 32 #include <drm/ttm/ttm_placement.h> 32 33 #include <drm/ttm/ttm_bo_driver.h> 33 34 #include <drm/ttm/ttm_object.h> ··· 128 127 #define DRM_IOCTL_VMW_SYNCCPU \ 129 128 DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_SYNCCPU, \ 130 129 struct drm_vmw_synccpu_arg) 130 + #define DRM_IOCTL_VMW_CREATE_EXTENDED_CONTEXT \ 131 + DRM_IOWR(DRM_COMMAND_BASE + DRM_VMW_CREATE_EXTENDED_CONTEXT, \ 132 + struct drm_vmw_context_arg) 131 133 132 134 /** 133 135 * The core DRM version of this macro doesn't account for ··· 172 168 DRM_UNLOCKED | DRM_RENDER_ALLOW), 173 169 VMW_IOCTL_DEF(VMW_REF_SURFACE, vmw_surface_reference_ioctl, 174 170 DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), 175 - VMW_IOCTL_DEF(VMW_EXECBUF, vmw_execbuf_ioctl, 176 - DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), 171 + VMW_IOCTL_DEF(VMW_EXECBUF, NULL, DRM_AUTH | DRM_UNLOCKED | 172 + DRM_RENDER_ALLOW), 177 173 VMW_IOCTL_DEF(VMW_FENCE_WAIT, vmw_fence_obj_wait_ioctl, 178 174 DRM_UNLOCKED | DRM_RENDER_ALLOW), 179 175 VMW_IOCTL_DEF(VMW_FENCE_SIGNALED, ··· 210 206 VMW_IOCTL_DEF(VMW_SYNCCPU, 211 207 vmw_user_dmabuf_synccpu_ioctl, 212 208 DRM_UNLOCKED | DRM_RENDER_ALLOW), 209 + VMW_IOCTL_DEF(VMW_CREATE_EXTENDED_CONTEXT, 210 + vmw_extended_context_define_ioctl, 211 + DRM_AUTH | DRM_UNLOCKED | DRM_RENDER_ALLOW), 213 212 }; 214 213 215 214 static struct pci_device_id vmw_pci_id_list[] = { ··· 397 390 } 398 391 vmw_fence_fifo_up(dev_priv->fman); 399 392 dev_priv->cman = vmw_cmdbuf_man_create(dev_priv); 400 - if (IS_ERR(dev_priv->cman)) 393 + if (IS_ERR(dev_priv->cman)) { 401 394 dev_priv->cman = NULL; 395 + dev_priv->has_dx = false; 396 + } 402 397 403 398 ret = vmw_request_device_late(dev_priv); 404 399 if (ret) ··· 857 848 } 858 849 } 859 850 851 + if (dev_priv->has_mob) { 852 + spin_lock(&dev_priv->cap_lock); 853 + vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX); 854 + dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP); 855 + spin_unlock(&dev_priv->cap_lock); 856 + } 857 + 858 + 860 859 ret = vmw_kms_init(dev_priv); 861 860 if (unlikely(ret != 0)) 862 861 goto out_no_kms; ··· 873 856 ret = vmw_request_device(dev_priv); 874 857 if (ret) 875 858 goto out_no_fifo; 859 + 860 + DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no."); 876 861 877 862 if (dev_priv->enable_fb) { 878 863 vmw_fifo_resource_inc(dev_priv); ··· 919 900 for (i = vmw_res_context; i < vmw_res_max; ++i) 920 901 idr_destroy(&dev_priv->res_idr[i]); 921 902 903 + if (dev_priv->ctx.staged_bindings) 904 + vmw_binding_state_free(dev_priv->ctx.staged_bindings); 922 905 kfree(dev_priv); 923 906 return ret; 924 907 } ··· 966 945 iounmap(dev_priv->mmio_virt); 967 946 arch_phys_wc_del(dev_priv->mmio_mtrr); 968 947 (void)ttm_bo_device_release(&dev_priv->bdev); 948 + if (dev_priv->ctx.staged_bindings) 949 + vmw_binding_state_free(dev_priv->ctx.staged_bindings); 969 950 vmw_ttm_global_release(dev_priv); 970 951 971 952 for (i = vmw_res_context; i < vmw_res_max; ++i) ··· 1105 1082 const struct drm_ioctl_desc *ioctl = 1106 1083 &vmw_ioctls[nr - DRM_COMMAND_BASE]; 1107 1084 1108 - if (unlikely(ioctl->cmd != cmd)) { 1109 - DRM_ERROR("Invalid command format, ioctl %d\n", 1110 - nr - DRM_COMMAND_BASE); 1111 - return -EINVAL; 1085 + if (nr == DRM_COMMAND_BASE + DRM_VMW_EXECBUF) { 1086 + ret = (long) drm_ioctl_permit(ioctl->flags, file_priv); 1087 + if (unlikely(ret != 0)) 1088 + return ret; 1089 + 1090 + if (unlikely((cmd & (IOC_IN | IOC_OUT)) != IOC_IN)) 1091 + goto out_io_encoding; 1092 + 1093 + return (long) vmw_execbuf_ioctl(dev, arg, file_priv, 1094 + _IOC_SIZE(cmd)); 1112 1095 } 1096 + 1097 + if (unlikely(ioctl->cmd != cmd)) 1098 + goto out_io_encoding; 1099 + 1113 1100 flags = ioctl->flags; 1114 1101 } else if (!drm_ioctl_flags(nr, &flags)) 1115 1102 return -EINVAL; ··· 1139 1106 ttm_read_unlock(&vmaster->lock); 1140 1107 1141 1108 return ret; 1109 + 1110 + out_io_encoding: 1111 + DRM_ERROR("Invalid command format, ioctl %d\n", 1112 + nr - DRM_COMMAND_BASE); 1113 + 1114 + return -EINVAL; 1142 1115 } 1143 1116 1144 1117 static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd, ··· 1194 1155 master->driver_priv = NULL; 1195 1156 kfree(vmaster); 1196 1157 } 1197 - 1198 1158 1199 1159 static int vmw_master_set(struct drm_device *dev, 1200 1160 struct drm_file *file_priv,
+86 -89
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
··· 59 59 #define VMWGFX_NUM_GB_SHADER 20000 60 60 #define VMWGFX_NUM_GB_SURFACE 32768 61 61 #define VMWGFX_NUM_GB_SCREEN_TARGET VMWGFX_MAX_DISPLAYS 62 + #define VMWGFX_NUM_DXCONTEXT 256 63 + #define VMWGFX_NUM_DXQUERY 512 62 64 #define VMWGFX_NUM_MOB (VMWGFX_NUM_GB_CONTEXT +\ 63 65 VMWGFX_NUM_GB_SHADER +\ 64 66 VMWGFX_NUM_GB_SURFACE +\ ··· 134 132 vmw_res_surface, 135 133 vmw_res_stream, 136 134 vmw_res_shader, 135 + vmw_res_dx_context, 136 + vmw_res_cotable, 137 + vmw_res_view, 137 138 vmw_res_max 138 139 }; 139 140 ··· 144 139 * Resources that are managed using command streams. 145 140 */ 146 141 enum vmw_cmdbuf_res_type { 147 - vmw_cmdbuf_res_compat_shader 142 + vmw_cmdbuf_res_shader, 143 + vmw_cmdbuf_res_view 148 144 }; 149 145 150 146 struct vmw_cmdbuf_res_manager; ··· 168 162 struct drm_vmw_size *sizes; 169 163 uint32_t num_sizes; 170 164 bool scanout; 165 + uint32_t array_size; 171 166 /* TODO so far just a extra pointer */ 172 167 struct vmw_cursor_snooper snooper; 173 168 struct vmw_surface_offset *offsets; 174 169 SVGA3dTextureFilter autogen_filter; 175 170 uint32_t multisample_count; 171 + struct list_head view_list; 176 172 }; 177 173 178 174 struct vmw_marker_queue { ··· 194 186 struct mutex fifo_mutex; 195 187 struct rw_semaphore rwsem; 196 188 struct vmw_marker_queue marker_queue; 189 + bool dx; 197 190 }; 198 191 199 192 struct vmw_relocation { ··· 275 266 }; 276 267 277 268 /* 278 - * enum vmw_ctx_binding_type - abstract resource to context binding types 279 - */ 280 - enum vmw_ctx_binding_type { 281 - vmw_ctx_binding_shader, 282 - vmw_ctx_binding_rt, 283 - vmw_ctx_binding_tex, 284 - vmw_ctx_binding_max 285 - }; 286 - 287 - /** 288 - * struct vmw_ctx_bindinfo - structure representing a single context binding 289 - * 290 - * @ctx: Pointer to the context structure. NULL means the binding is not 291 - * active. 292 - * @res: Non ref-counted pointer to the bound resource. 293 - * @bt: The binding type. 294 - * @i1: Union of information needed to unbind. 295 - */ 296 - struct vmw_ctx_bindinfo { 297 - struct vmw_resource *ctx; 298 - struct vmw_resource *res; 299 - enum vmw_ctx_binding_type bt; 300 - bool scrubbed; 301 - union { 302 - SVGA3dShaderType shader_type; 303 - SVGA3dRenderTargetType rt_type; 304 - uint32 texture_stage; 305 - } i1; 306 - }; 307 - 308 - /** 309 - * struct vmw_ctx_binding - structure representing a single context binding 310 - * - suitable for tracking in a context 311 - * 312 - * @ctx_list: List head for context. 313 - * @res_list: List head for bound resource. 314 - * @bi: Binding info 315 - */ 316 - struct vmw_ctx_binding { 317 - struct list_head ctx_list; 318 - struct list_head res_list; 319 - struct vmw_ctx_bindinfo bi; 320 - }; 321 - 322 - 323 - /** 324 - * struct vmw_ctx_binding_state - context binding state 325 - * 326 - * @list: linked list of individual bindings. 327 - * @render_targets: Render target bindings. 328 - * @texture_units: Texture units/samplers bindings. 329 - * @shaders: Shader bindings. 330 - * 331 - * Note that this structure also provides storage space for the individual 332 - * struct vmw_ctx_binding objects, so that no dynamic allocation is needed 333 - * for individual bindings. 334 - * 335 - */ 336 - struct vmw_ctx_binding_state { 337 - struct list_head list; 338 - struct vmw_ctx_binding render_targets[SVGA3D_RT_MAX]; 339 - struct vmw_ctx_binding texture_units[SVGA3D_NUM_TEXTURE_UNITS]; 340 - struct vmw_ctx_binding shaders[SVGA3D_SHADERTYPE_PREDX_MAX]; 341 - }; 342 - 343 - 344 - /* 345 269 * enum vmw_display_unit_type - Describes the display unit 346 270 */ 347 271 enum vmw_display_unit_type { ··· 298 356 uint32_t *cmd_bounce; 299 357 uint32_t cmd_bounce_size; 300 358 struct list_head resource_list; 359 + struct list_head ctx_resource_list; /* For contexts and cotables */ 301 360 struct vmw_dma_buffer *cur_query_bo; 302 361 struct list_head res_relocations; 303 362 uint32_t *buf_start; ··· 306 363 struct vmw_resource *last_query_ctx; 307 364 bool needs_post_query_barrier; 308 365 struct vmw_resource *error_resource; 309 - struct vmw_ctx_binding_state staged_bindings; 366 + struct vmw_ctx_binding_state *staged_bindings; 367 + bool staged_bindings_inuse; 310 368 struct list_head staged_cmd_res; 369 + struct vmw_resource_val_node *dx_ctx_node; 370 + struct vmw_dma_buffer *dx_query_mob; 371 + struct vmw_resource *dx_query_ctx; 372 + struct vmw_cmdbuf_res_manager *man; 311 373 }; 312 374 313 375 struct vmw_legacy_display; ··· 328 380 uint32_t primary; 329 381 uint32_t pos_x; 330 382 uint32_t pos_y; 383 + }; 384 + 385 + 386 + /* 387 + * struct vmw_otable - Guest Memory OBject table metadata 388 + * 389 + * @size: Size of the table (page-aligned). 390 + * @page_table: Pointer to a struct vmw_mob holding the page table. 391 + */ 392 + struct vmw_otable { 393 + unsigned long size; 394 + struct vmw_mob *page_table; 395 + bool enabled; 396 + }; 397 + 398 + struct vmw_otable_batch { 399 + unsigned num_otables; 400 + struct vmw_otable *otables; 401 + struct vmw_resource *context; 402 + struct ttm_buffer_object *otable_bo; 331 403 }; 332 404 333 405 struct vmw_private { ··· 385 417 bool has_mob; 386 418 spinlock_t hw_lock; 387 419 spinlock_t cap_lock; 420 + bool has_dx; 388 421 389 422 /* 390 423 * VGA registers. ··· 521 552 /* 522 553 * Guest Backed stuff 523 554 */ 524 - struct ttm_buffer_object *otable_bo; 525 - struct vmw_otable *otables; 555 + struct vmw_otable_batch otable_batch; 526 556 527 557 struct vmw_cmdbuf_man *cman; 528 558 }; ··· 653 685 uint32_t *inout_id, 654 686 struct vmw_resource **out); 655 687 extern void vmw_resource_unreserve(struct vmw_resource *res, 688 + bool switch_backup, 656 689 struct vmw_dma_buffer *new_backup, 657 690 unsigned long new_backup_offset); 658 691 extern void vmw_resource_move_notify(struct ttm_buffer_object *bo, ··· 711 742 extern void vmw_fifo_release(struct vmw_private *dev_priv, 712 743 struct vmw_fifo_state *fifo); 713 744 extern void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes); 745 + extern void * 746 + vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id); 714 747 extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes); 748 + extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes); 715 749 extern int vmw_fifo_send_fence(struct vmw_private *dev_priv, 716 750 uint32_t *seqno); 717 751 extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason); ··· 800 828 * Command submission - vmwgfx_execbuf.c 801 829 */ 802 830 803 - extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data, 804 - struct drm_file *file_priv); 831 + extern int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data, 832 + struct drm_file *file_priv, size_t size); 805 833 extern int vmw_execbuf_process(struct drm_file *file_priv, 806 834 struct vmw_private *dev_priv, 807 835 void __user *user_commands, 808 836 void *kernel_commands, 809 837 uint32_t command_size, 810 838 uint64_t throttle_us, 839 + uint32_t dx_context_handle, 811 840 struct drm_vmw_fence_rep __user 812 841 *user_fence_rep, 813 842 struct vmw_fence_obj **out_fence); ··· 933 960 uint32_t handle); 934 961 extern int vmw_resource_pin(struct vmw_resource *res, bool interruptible); 935 962 extern void vmw_resource_unpin(struct vmw_resource *res); 963 + extern enum vmw_res_type vmw_res_type(const struct vmw_resource *res); 936 964 937 965 /** 938 966 * Overlay control - vmwgfx_overlay.c ··· 990 1016 991 1017 extern const struct vmw_user_resource_conv *user_context_converter; 992 1018 993 - extern struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv); 994 - 995 1019 extern int vmw_context_check(struct vmw_private *dev_priv, 996 1020 struct ttm_object_file *tfile, 997 1021 int id, 998 1022 struct vmw_resource **p_res); 999 1023 extern int vmw_context_define_ioctl(struct drm_device *dev, void *data, 1000 1024 struct drm_file *file_priv); 1025 + extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data, 1026 + struct drm_file *file_priv); 1001 1027 extern int vmw_context_destroy_ioctl(struct drm_device *dev, void *data, 1002 1028 struct drm_file *file_priv); 1003 - extern int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs, 1004 - const struct vmw_ctx_bindinfo *ci); 1005 - extern void 1006 - vmw_context_binding_state_transfer(struct vmw_resource *res, 1007 - struct vmw_ctx_binding_state *cbs); 1008 - extern void vmw_context_binding_res_list_kill(struct list_head *head); 1009 - extern void vmw_context_binding_res_list_scrub(struct list_head *head); 1010 - extern int vmw_context_rebind_all(struct vmw_resource *ctx); 1011 1029 extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); 1012 1030 extern struct vmw_cmdbuf_res_manager * 1013 1031 vmw_context_res_man(struct vmw_resource *ctx); 1032 + extern struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx, 1033 + SVGACOTableType cotable_type); 1034 + extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx); 1035 + struct vmw_ctx_binding_state; 1036 + extern struct vmw_ctx_binding_state * 1037 + vmw_context_binding_state(struct vmw_resource *ctx); 1038 + extern void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx, 1039 + bool readback); 1040 + 1014 1041 /* 1015 1042 * Surface management - vmwgfx_surface.c 1016 1043 */ ··· 1041 1066 bool for_scanout, 1042 1067 uint32_t num_mip_levels, 1043 1068 uint32_t multisample_count, 1069 + uint32_t array_size, 1044 1070 struct drm_vmw_size size, 1045 1071 struct vmw_surface **srf_out); 1046 1072 ··· 1061 1085 SVGA3dShaderType shader_type, 1062 1086 size_t size, 1063 1087 struct list_head *list); 1064 - extern int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man, 1065 - u32 user_key, SVGA3dShaderType shader_type, 1066 - struct list_head *list); 1088 + extern int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man, 1089 + u32 user_key, SVGA3dShaderType shader_type, 1090 + struct list_head *list); 1091 + extern int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man, 1092 + struct vmw_resource *ctx, 1093 + u32 user_key, 1094 + SVGA3dShaderType shader_type, 1095 + struct list_head *list); 1096 + extern void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv, 1097 + struct list_head *list, 1098 + bool readback); 1099 + 1067 1100 extern struct vmw_resource * 1068 - vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man, 1069 - u32 user_key, SVGA3dShaderType shader_type); 1101 + vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man, 1102 + u32 user_key, SVGA3dShaderType shader_type); 1070 1103 1071 1104 /* 1072 1105 * Command buffer managed resources - vmwgfx_cmdbuf_res.c ··· 1099 1114 extern int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man, 1100 1115 enum vmw_cmdbuf_res_type res_type, 1101 1116 u32 user_key, 1102 - struct list_head *list); 1117 + struct list_head *list, 1118 + struct vmw_resource **res); 1103 1119 1120 + /* 1121 + * COTable management - vmwgfx_cotable.c 1122 + */ 1123 + extern const SVGACOTableType vmw_cotable_scrub_order[]; 1124 + extern struct vmw_resource *vmw_cotable_alloc(struct vmw_private *dev_priv, 1125 + struct vmw_resource *ctx, 1126 + u32 type); 1127 + extern int vmw_cotable_notify(struct vmw_resource *res, int id); 1128 + extern int vmw_cotable_scrub(struct vmw_resource *res, bool readback); 1129 + extern void vmw_cotable_add_resource(struct vmw_resource *ctx, 1130 + struct list_head *head); 1104 1131 1105 1132 /* 1106 1133 * Command buffer managerment vmwgfx_cmdbuf.c
+1231 -115
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
··· 29 29 #include "vmwgfx_reg.h" 30 30 #include <drm/ttm/ttm_bo_api.h> 31 31 #include <drm/ttm/ttm_placement.h> 32 + #include "vmwgfx_so.h" 33 + #include "vmwgfx_binding.h" 32 34 33 35 #define VMW_RES_HT_ORDER 12 34 36 ··· 61 59 * @new_backup_offset: New backup buffer offset if @new_backup is non-NUll. 62 60 * @first_usage: Set to true the first time the resource is referenced in 63 61 * the command stream. 64 - * @no_buffer_needed: Resources do not need to allocate buffer backup on 65 - * reservation. The command stream will provide one. 62 + * @switching_backup: The command stream provides a new backup buffer for a 63 + * resource. 64 + * @no_buffer_needed: This means @switching_backup is true on first buffer 65 + * reference. So resource reservation does not need to allocate a backup 66 + * buffer for the resource. 66 67 */ 67 68 struct vmw_resource_val_node { 68 69 struct list_head head; ··· 74 69 struct vmw_dma_buffer *new_backup; 75 70 struct vmw_ctx_binding_state *staged_bindings; 76 71 unsigned long new_backup_offset; 77 - bool first_usage; 78 - bool no_buffer_needed; 72 + u32 first_usage : 1; 73 + u32 switching_backup : 1; 74 + u32 no_buffer_needed : 1; 79 75 }; 80 76 81 77 /** ··· 98 92 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\ 99 93 (_gb_disable), (_gb_enable)} 100 94 95 + static int vmw_resource_context_res_add(struct vmw_private *dev_priv, 96 + struct vmw_sw_context *sw_context, 97 + struct vmw_resource *ctx); 98 + 101 99 /** 102 100 * vmw_resource_unreserve - unreserve resources previously reserved for 103 101 * command submission. ··· 109 99 * @list_head: list of resources to unreserve. 110 100 * @backoff: Whether command submission failed. 111 101 */ 112 - static void vmw_resource_list_unreserve(struct list_head *list, 102 + static void vmw_resource_list_unreserve(struct vmw_sw_context *sw_context, 103 + struct list_head *list, 113 104 bool backoff) 114 105 { 115 106 struct vmw_resource_val_node *val; 116 107 117 108 list_for_each_entry(val, list, head) { 118 109 struct vmw_resource *res = val->res; 119 - struct vmw_dma_buffer *new_backup = 120 - backoff ? NULL : val->new_backup; 110 + bool switch_backup = 111 + (backoff) ? false : val->switching_backup; 121 112 122 113 /* 123 114 * Transfer staged context bindings to the ··· 126 115 */ 127 116 if (unlikely(val->staged_bindings)) { 128 117 if (!backoff) { 129 - vmw_context_binding_state_transfer 130 - (val->res, val->staged_bindings); 118 + vmw_binding_state_commit 119 + (vmw_context_binding_state(val->res), 120 + val->staged_bindings); 131 121 } 132 - kfree(val->staged_bindings); 122 + 123 + if (val->staged_bindings != sw_context->staged_bindings) 124 + vmw_binding_state_free(val->staged_bindings); 125 + else 126 + sw_context->staged_bindings_inuse = false; 133 127 val->staged_bindings = NULL; 134 128 } 135 - vmw_resource_unreserve(res, new_backup, 136 - val->new_backup_offset); 129 + vmw_resource_unreserve(res, switch_backup, val->new_backup, 130 + val->new_backup_offset); 137 131 vmw_dmabuf_unreference(&val->new_backup); 138 132 } 139 133 } 140 134 135 + /** 136 + * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is 137 + * added to the validate list. 138 + * 139 + * @dev_priv: Pointer to the device private: 140 + * @sw_context: The validation context: 141 + * @node: The validation node holding this context. 142 + */ 143 + static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv, 144 + struct vmw_sw_context *sw_context, 145 + struct vmw_resource_val_node *node) 146 + { 147 + int ret; 148 + 149 + ret = vmw_resource_context_res_add(dev_priv, sw_context, node->res); 150 + if (unlikely(ret != 0)) 151 + goto out_err; 152 + 153 + if (!sw_context->staged_bindings) { 154 + sw_context->staged_bindings = 155 + vmw_binding_state_alloc(dev_priv); 156 + if (IS_ERR(sw_context->staged_bindings)) { 157 + DRM_ERROR("Failed to allocate context binding " 158 + "information.\n"); 159 + ret = PTR_ERR(sw_context->staged_bindings); 160 + sw_context->staged_bindings = NULL; 161 + goto out_err; 162 + } 163 + } 164 + 165 + if (sw_context->staged_bindings_inuse) { 166 + node->staged_bindings = vmw_binding_state_alloc(dev_priv); 167 + if (IS_ERR(node->staged_bindings)) { 168 + DRM_ERROR("Failed to allocate context binding " 169 + "information.\n"); 170 + ret = PTR_ERR(node->staged_bindings); 171 + node->staged_bindings = NULL; 172 + goto out_err; 173 + } 174 + } else { 175 + node->staged_bindings = sw_context->staged_bindings; 176 + sw_context->staged_bindings_inuse = true; 177 + } 178 + 179 + return 0; 180 + out_err: 181 + return ret; 182 + } 141 183 142 184 /** 143 185 * vmw_resource_val_add - Add a resource to the software context's ··· 205 141 struct vmw_resource *res, 206 142 struct vmw_resource_val_node **p_node) 207 143 { 144 + struct vmw_private *dev_priv = res->dev_priv; 208 145 struct vmw_resource_val_node *node; 209 146 struct drm_hash_item *hash; 210 147 int ret; ··· 234 169 kfree(node); 235 170 return ret; 236 171 } 237 - list_add_tail(&node->head, &sw_context->resource_list); 238 172 node->res = vmw_resource_reference(res); 239 173 node->first_usage = true; 240 - 241 174 if (unlikely(p_node != NULL)) 242 175 *p_node = node; 243 176 244 - return 0; 177 + if (!dev_priv->has_mob) { 178 + list_add_tail(&node->head, &sw_context->resource_list); 179 + return 0; 180 + } 181 + 182 + switch (vmw_res_type(res)) { 183 + case vmw_res_context: 184 + case vmw_res_dx_context: 185 + list_add(&node->head, &sw_context->ctx_resource_list); 186 + ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, node); 187 + break; 188 + case vmw_res_cotable: 189 + list_add_tail(&node->head, &sw_context->ctx_resource_list); 190 + break; 191 + default: 192 + list_add_tail(&node->head, &sw_context->resource_list); 193 + break; 194 + } 195 + 196 + return ret; 197 + } 198 + 199 + /** 200 + * vmw_view_res_val_add - Add a view and the surface it's pointing to 201 + * to the validation list 202 + * 203 + * @sw_context: The software context holding the validation list. 204 + * @view: Pointer to the view resource. 205 + * 206 + * Returns 0 if success, negative error code otherwise. 207 + */ 208 + static int vmw_view_res_val_add(struct vmw_sw_context *sw_context, 209 + struct vmw_resource *view) 210 + { 211 + int ret; 212 + 213 + /* 214 + * First add the resource the view is pointing to, otherwise 215 + * it may be swapped out when the view is validated. 216 + */ 217 + ret = vmw_resource_val_add(sw_context, vmw_view_srf(view), NULL); 218 + if (ret) 219 + return ret; 220 + 221 + return vmw_resource_val_add(sw_context, view, NULL); 222 + } 223 + 224 + /** 225 + * vmw_view_id_val_add - Look up a view and add it and the surface it's 226 + * pointing to to the validation list. 227 + * 228 + * @sw_context: The software context holding the validation list. 229 + * @view_type: The view type to look up. 230 + * @id: view id of the view. 231 + * 232 + * The view is represented by a view id and the DX context it's created on, 233 + * or scheduled for creation on. If there is no DX context set, the function 234 + * will return -EINVAL. Otherwise returns 0 on success and -EINVAL on failure. 235 + */ 236 + static int vmw_view_id_val_add(struct vmw_sw_context *sw_context, 237 + enum vmw_view_type view_type, u32 id) 238 + { 239 + struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; 240 + struct vmw_resource *view; 241 + int ret; 242 + 243 + if (!ctx_node) { 244 + DRM_ERROR("DX Context not set.\n"); 245 + return -EINVAL; 246 + } 247 + 248 + view = vmw_view_lookup(sw_context->man, view_type, id); 249 + if (IS_ERR(view)) 250 + return PTR_ERR(view); 251 + 252 + ret = vmw_view_res_val_add(sw_context, view); 253 + vmw_resource_unreference(&view); 254 + 255 + return ret; 245 256 } 246 257 247 258 /** ··· 336 195 struct vmw_resource *ctx) 337 196 { 338 197 struct list_head *binding_list; 339 - struct vmw_ctx_binding *entry; 198 + struct vmw_ctx_bindinfo *entry; 340 199 int ret = 0; 341 200 struct vmw_resource *res; 201 + u32 i; 342 202 203 + /* Add all cotables to the validation list. */ 204 + if (dev_priv->has_dx && vmw_res_type(ctx) == vmw_res_dx_context) { 205 + for (i = 0; i < SVGA_COTABLE_DX10_MAX; ++i) { 206 + res = vmw_context_cotable(ctx, i); 207 + if (IS_ERR(res)) 208 + continue; 209 + 210 + ret = vmw_resource_val_add(sw_context, res, NULL); 211 + vmw_resource_unreference(&res); 212 + if (unlikely(ret != 0)) 213 + return ret; 214 + } 215 + } 216 + 217 + 218 + /* Add all resources bound to the context to the validation list */ 343 219 mutex_lock(&dev_priv->binding_mutex); 344 220 binding_list = vmw_context_binding_list(ctx); 345 221 346 222 list_for_each_entry(entry, binding_list, ctx_list) { 347 - res = vmw_resource_reference_unless_doomed(entry->bi.res); 223 + /* entry->res is not refcounted */ 224 + res = vmw_resource_reference_unless_doomed(entry->res); 348 225 if (unlikely(res == NULL)) 349 226 continue; 350 227 351 - ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL); 228 + if (vmw_res_type(entry->res) == vmw_res_view) 229 + ret = vmw_view_res_val_add(sw_context, entry->res); 230 + else 231 + ret = vmw_resource_val_add(sw_context, entry->res, 232 + NULL); 352 233 vmw_resource_unreference(&res); 353 234 if (unlikely(ret != 0)) 354 235 break; ··· 572 409 573 410 list_for_each_entry(val, &sw_context->resource_list, head) { 574 411 struct vmw_resource *res = val->res; 412 + struct vmw_dma_buffer *backup = res->backup; 575 413 576 414 ret = vmw_resource_validate(res); 577 415 if (unlikely(ret != 0)) { ··· 580 416 DRM_ERROR("Failed to validate resource.\n"); 581 417 return ret; 582 418 } 419 + 420 + /* Check if the resource switched backup buffer */ 421 + if (backup && res->backup && (backup != res->backup)) { 422 + struct vmw_dma_buffer *vbo = res->backup; 423 + 424 + ret = vmw_bo_to_validate_list 425 + (sw_context, vbo, 426 + vmw_resource_needs_backup(res), NULL); 427 + if (ret) { 428 + ttm_bo_unreserve(&vbo->base); 429 + return ret; 430 + } 431 + } 583 432 } 584 433 return 0; 585 434 } 586 - 587 435 588 436 /** 589 437 * vmw_cmd_res_reloc_add - Add a resource to a software context's ··· 603 427 * 604 428 * @dev_priv: Pointer to a struct vmw_private identifying the device. 605 429 * @sw_context: Pointer to the software context. 606 - * @res_type: Resource type. 607 430 * @id_loc: Pointer to where the id that needs translation is located. 608 431 * @res: Valid pointer to a struct vmw_resource. 609 432 * @p_val: If non null, a pointer to the struct vmw_resource_validate_node ··· 610 435 */ 611 436 static int vmw_cmd_res_reloc_add(struct vmw_private *dev_priv, 612 437 struct vmw_sw_context *sw_context, 613 - enum vmw_res_type res_type, 614 438 uint32_t *id_loc, 615 439 struct vmw_resource *res, 616 440 struct vmw_resource_val_node **p_val) ··· 627 453 ret = vmw_resource_val_add(sw_context, res, &node); 628 454 if (unlikely(ret != 0)) 629 455 return ret; 630 - 631 - if (res_type == vmw_res_context && dev_priv->has_mob && 632 - node->first_usage) { 633 - 634 - /* 635 - * Put contexts first on the list to be able to exit 636 - * list traversal for contexts early. 637 - */ 638 - list_del(&node->head); 639 - list_add(&node->head, &sw_context->resource_list); 640 - 641 - ret = vmw_resource_context_res_add(dev_priv, sw_context, res); 642 - if (unlikely(ret != 0)) 643 - return ret; 644 - node->staged_bindings = 645 - kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL); 646 - if (node->staged_bindings == NULL) { 647 - DRM_ERROR("Failed to allocate context binding " 648 - "information.\n"); 649 - return -ENOMEM; 650 - } 651 - INIT_LIST_HEAD(&node->staged_bindings->list); 652 - } 653 456 654 457 if (p_val) 655 458 *p_val = node; ··· 705 554 rcache->res = res; 706 555 rcache->handle = *id_loc; 707 556 708 - ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, res_type, id_loc, 557 + ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, id_loc, 709 558 res, &node); 710 559 if (unlikely(ret != 0)) 711 560 goto out_no_reloc; ··· 740 589 if (unlikely(!val->staged_bindings)) 741 590 break; 742 591 743 - ret = vmw_context_rebind_all(val->res); 592 + ret = vmw_binding_rebind_all 593 + (vmw_context_binding_state(val->res)); 744 594 if (unlikely(ret != 0)) { 745 595 if (ret != -ERESTARTSYS) 746 596 DRM_ERROR("Failed to rebind context.\n"); 747 597 return ret; 748 598 } 599 + } 600 + 601 + return 0; 602 + } 603 + 604 + /** 605 + * vmw_view_bindings_add - Add an array of view bindings to a context 606 + * binding state tracker. 607 + * 608 + * @sw_context: The execbuf state used for this command. 609 + * @view_type: View type for the bindings. 610 + * @binding_type: Binding type for the bindings. 611 + * @shader_slot: The shader slot to user for the bindings. 612 + * @view_ids: Array of view ids to be bound. 613 + * @num_views: Number of view ids in @view_ids. 614 + * @first_slot: The binding slot to be used for the first view id in @view_ids. 615 + */ 616 + static int vmw_view_bindings_add(struct vmw_sw_context *sw_context, 617 + enum vmw_view_type view_type, 618 + enum vmw_ctx_binding_type binding_type, 619 + uint32 shader_slot, 620 + uint32 view_ids[], u32 num_views, 621 + u32 first_slot) 622 + { 623 + struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; 624 + struct vmw_cmdbuf_res_manager *man; 625 + u32 i; 626 + int ret; 627 + 628 + if (!ctx_node) { 629 + DRM_ERROR("DX Context not set.\n"); 630 + return -EINVAL; 631 + } 632 + 633 + man = sw_context->man; 634 + for (i = 0; i < num_views; ++i) { 635 + struct vmw_ctx_bindinfo_view binding; 636 + struct vmw_resource *view = NULL; 637 + 638 + if (view_ids[i] != SVGA3D_INVALID_ID) { 639 + view = vmw_view_lookup(man, view_type, view_ids[i]); 640 + if (IS_ERR(view)) { 641 + DRM_ERROR("View not found.\n"); 642 + return PTR_ERR(view); 643 + } 644 + 645 + ret = vmw_view_res_val_add(sw_context, view); 646 + if (ret) { 647 + DRM_ERROR("Could not add view to " 648 + "validation list.\n"); 649 + vmw_resource_unreference(&view); 650 + return ret; 651 + } 652 + } 653 + binding.bi.ctx = ctx_node->res; 654 + binding.bi.res = view; 655 + binding.bi.bt = binding_type; 656 + binding.shader_slot = shader_slot; 657 + binding.slot = first_slot + i; 658 + vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 659 + shader_slot, binding.slot); 660 + if (view) 661 + vmw_resource_unreference(&view); 749 662 } 750 663 751 664 return 0; ··· 853 638 854 639 cmd = container_of(header, struct vmw_sid_cmd, header); 855 640 641 + if (cmd->body.type >= SVGA3D_RT_MAX) { 642 + DRM_ERROR("Illegal render target type %u.\n", 643 + (unsigned) cmd->body.type); 644 + return -EINVAL; 645 + } 646 + 856 647 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 857 648 user_context_converter, &cmd->body.cid, 858 649 &ctx_node); ··· 872 651 return ret; 873 652 874 653 if (dev_priv->has_mob) { 875 - struct vmw_ctx_bindinfo bi; 654 + struct vmw_ctx_bindinfo_view binding; 876 655 877 - bi.ctx = ctx_node->res; 878 - bi.res = res_node ? res_node->res : NULL; 879 - bi.bt = vmw_ctx_binding_rt; 880 - bi.i1.rt_type = cmd->body.type; 881 - return vmw_context_binding_add(ctx_node->staged_bindings, &bi); 656 + binding.bi.ctx = ctx_node->res; 657 + binding.bi.res = res_node ? res_node->res : NULL; 658 + binding.bi.bt = vmw_ctx_binding_rt; 659 + binding.slot = cmd->body.type; 660 + vmw_binding_add(ctx_node->staged_bindings, 661 + &binding.bi, 0, binding.slot); 882 662 } 883 663 884 664 return 0; ··· 1586 1364 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE)) 1587 1365 continue; 1588 1366 1367 + if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) { 1368 + DRM_ERROR("Illegal texture/sampler unit %u.\n", 1369 + (unsigned) cur_state->stage); 1370 + return -EINVAL; 1371 + } 1372 + 1589 1373 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1590 1374 user_surface_converter, 1591 1375 &cur_state->value, &res_node); ··· 1599 1371 return ret; 1600 1372 1601 1373 if (dev_priv->has_mob) { 1602 - struct vmw_ctx_bindinfo bi; 1374 + struct vmw_ctx_bindinfo_tex binding; 1603 1375 1604 - bi.ctx = ctx_node->res; 1605 - bi.res = res_node ? res_node->res : NULL; 1606 - bi.bt = vmw_ctx_binding_tex; 1607 - bi.i1.texture_stage = cur_state->stage; 1608 - vmw_context_binding_add(ctx_node->staged_bindings, 1609 - &bi); 1376 + binding.bi.ctx = ctx_node->res; 1377 + binding.bi.res = res_node ? res_node->res : NULL; 1378 + binding.bi.bt = vmw_ctx_binding_tex; 1379 + binding.texture_stage = cur_state->stage; 1380 + vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 1381 + 0, binding.texture_stage); 1610 1382 } 1611 1383 } 1612 1384 ··· 1636 1408 return ret; 1637 1409 } 1638 1410 1411 + 1412 + /** 1413 + * vmw_cmd_res_switch_backup - Utility function to handle backup buffer 1414 + * switching 1415 + * 1416 + * @dev_priv: Pointer to a device private struct. 1417 + * @sw_context: The software context being used for this batch. 1418 + * @val_node: The validation node representing the resource. 1419 + * @buf_id: Pointer to the user-space backup buffer handle in the command 1420 + * stream. 1421 + * @backup_offset: Offset of backup into MOB. 1422 + * 1423 + * This function prepares for registering a switch of backup buffers 1424 + * in the resource metadata just prior to unreserving. It's basically a wrapper 1425 + * around vmw_cmd_res_switch_backup with a different interface. 1426 + */ 1427 + static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv, 1428 + struct vmw_sw_context *sw_context, 1429 + struct vmw_resource_val_node *val_node, 1430 + uint32_t *buf_id, 1431 + unsigned long backup_offset) 1432 + { 1433 + struct vmw_dma_buffer *dma_buf; 1434 + int ret; 1435 + 1436 + ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf); 1437 + if (ret) 1438 + return ret; 1439 + 1440 + val_node->switching_backup = true; 1441 + if (val_node->first_usage) 1442 + val_node->no_buffer_needed = true; 1443 + 1444 + vmw_dmabuf_unreference(&val_node->new_backup); 1445 + val_node->new_backup = dma_buf; 1446 + val_node->new_backup_offset = backup_offset; 1447 + 1448 + return 0; 1449 + } 1450 + 1451 + 1639 1452 /** 1640 1453 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching 1641 1454 * ··· 1690 1421 * @backup_offset: Offset of backup into MOB. 1691 1422 * 1692 1423 * This function prepares for registering a switch of backup buffers 1693 - * in the resource metadata just prior to unreserving. 1424 + * in the resource metadata just prior to unreserving. It's basically a wrapper 1425 + * around vmw_cmd_res_switch_backup with a different interface. 1694 1426 */ 1695 1427 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv, 1696 1428 struct vmw_sw_context *sw_context, ··· 1702 1432 uint32_t *buf_id, 1703 1433 unsigned long backup_offset) 1704 1434 { 1705 - int ret; 1706 - struct vmw_dma_buffer *dma_buf; 1707 1435 struct vmw_resource_val_node *val_node; 1436 + int ret; 1708 1437 1709 1438 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type, 1710 1439 converter, res_id, &val_node); 1711 - if (unlikely(ret != 0)) 1440 + if (ret) 1712 1441 return ret; 1713 1442 1714 - ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &dma_buf); 1715 - if (unlikely(ret != 0)) 1716 - return ret; 1717 - 1718 - if (val_node->first_usage) 1719 - val_node->no_buffer_needed = true; 1720 - 1721 - vmw_dmabuf_unreference(&val_node->new_backup); 1722 - val_node->new_backup = dma_buf; 1723 - val_node->new_backup_offset = backup_offset; 1724 - 1725 - return 0; 1443 + return vmw_cmd_res_switch_backup(dev_priv, sw_context, val_node, 1444 + buf_id, backup_offset); 1726 1445 } 1727 1446 1728 1447 /** ··· 1963 1704 if (unlikely(!dev_priv->has_mob)) 1964 1705 return 0; 1965 1706 1966 - ret = vmw_compat_shader_remove(vmw_context_res_man(val->res), 1967 - cmd->body.shid, 1968 - cmd->body.type, 1969 - &sw_context->staged_cmd_res); 1707 + ret = vmw_shader_remove(vmw_context_res_man(val->res), 1708 + cmd->body.shid, 1709 + cmd->body.type, 1710 + &sw_context->staged_cmd_res); 1970 1711 if (unlikely(ret != 0)) 1971 1712 return ret; 1972 1713 ··· 1994 1735 SVGA3dCmdSetShader body; 1995 1736 } *cmd; 1996 1737 struct vmw_resource_val_node *ctx_node, *res_node = NULL; 1997 - struct vmw_ctx_bindinfo bi; 1738 + struct vmw_ctx_bindinfo_shader binding; 1998 1739 struct vmw_resource *res = NULL; 1999 1740 int ret; 2000 1741 2001 1742 cmd = container_of(header, struct vmw_set_shader_cmd, 2002 1743 header); 1744 + 1745 + if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) { 1746 + DRM_ERROR("Illegal shader type %u.\n", 1747 + (unsigned) cmd->body.type); 1748 + return -EINVAL; 1749 + } 2003 1750 2004 1751 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 2005 1752 user_context_converter, &cmd->body.cid, ··· 2017 1752 return 0; 2018 1753 2019 1754 if (cmd->body.shid != SVGA3D_INVALID_ID) { 2020 - res = vmw_compat_shader_lookup 2021 - (vmw_context_res_man(ctx_node->res), 2022 - cmd->body.shid, 2023 - cmd->body.type); 1755 + res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res), 1756 + cmd->body.shid, 1757 + cmd->body.type); 2024 1758 2025 1759 if (!IS_ERR(res)) { 2026 1760 ret = vmw_cmd_res_reloc_add(dev_priv, sw_context, 2027 - vmw_res_shader, 2028 1761 &cmd->body.shid, res, 2029 1762 &res_node); 2030 1763 vmw_resource_unreference(&res); ··· 2040 1777 return ret; 2041 1778 } 2042 1779 2043 - bi.ctx = ctx_node->res; 2044 - bi.res = res_node ? res_node->res : NULL; 2045 - bi.bt = vmw_ctx_binding_shader; 2046 - bi.i1.shader_type = cmd->body.type; 2047 - return vmw_context_binding_add(ctx_node->staged_bindings, &bi); 1780 + binding.bi.ctx = ctx_node->res; 1781 + binding.bi.res = res_node ? res_node->res : NULL; 1782 + binding.bi.bt = vmw_ctx_binding_shader; 1783 + binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; 1784 + vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 1785 + binding.shader_slot, 0); 1786 + return 0; 2048 1787 } 2049 1788 2050 1789 /** ··· 2106 1841 user_shader_converter, 2107 1842 &cmd->body.shid, &cmd->body.mobid, 2108 1843 cmd->body.offsetInBytes); 1844 + } 1845 + 1846 + /** 1847 + * vmw_cmd_dx_set_single_constant_buffer - Validate an 1848 + * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command. 1849 + * 1850 + * @dev_priv: Pointer to a device private struct. 1851 + * @sw_context: The software context being used for this batch. 1852 + * @header: Pointer to the command header in the command stream. 1853 + */ 1854 + static int 1855 + vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv, 1856 + struct vmw_sw_context *sw_context, 1857 + SVGA3dCmdHeader *header) 1858 + { 1859 + struct { 1860 + SVGA3dCmdHeader header; 1861 + SVGA3dCmdDXSetSingleConstantBuffer body; 1862 + } *cmd; 1863 + struct vmw_resource_val_node *res_node = NULL; 1864 + struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; 1865 + struct vmw_ctx_bindinfo_cb binding; 1866 + int ret; 1867 + 1868 + if (unlikely(ctx_node == NULL)) { 1869 + DRM_ERROR("DX Context not set.\n"); 1870 + return -EINVAL; 1871 + } 1872 + 1873 + cmd = container_of(header, typeof(*cmd), header); 1874 + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 1875 + user_surface_converter, 1876 + &cmd->body.sid, &res_node); 1877 + if (unlikely(ret != 0)) 1878 + return ret; 1879 + 1880 + binding.bi.ctx = ctx_node->res; 1881 + binding.bi.res = res_node ? res_node->res : NULL; 1882 + binding.bi.bt = vmw_ctx_binding_cb; 1883 + binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; 1884 + binding.offset = cmd->body.offsetInBytes; 1885 + binding.size = cmd->body.sizeInBytes; 1886 + binding.slot = cmd->body.slot; 1887 + 1888 + if (binding.shader_slot >= SVGA3D_NUM_SHADERTYPE_DX10 || 1889 + binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) { 1890 + DRM_ERROR("Illegal const buffer shader %u slot %u.\n", 1891 + (unsigned) cmd->body.type, 1892 + (unsigned) binding.slot); 1893 + return -EINVAL; 1894 + } 1895 + 1896 + vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 1897 + binding.shader_slot, binding.slot); 1898 + 1899 + return 0; 1900 + } 1901 + 1902 + /** 1903 + * vmw_cmd_dx_set_shader_res - Validate an 1904 + * SVGA_3D_CMD_DX_SET_SHADER_RESOURCES command 1905 + * 1906 + * @dev_priv: Pointer to a device private struct. 1907 + * @sw_context: The software context being used for this batch. 1908 + * @header: Pointer to the command header in the command stream. 1909 + */ 1910 + static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv, 1911 + struct vmw_sw_context *sw_context, 1912 + SVGA3dCmdHeader *header) 1913 + { 1914 + struct { 1915 + SVGA3dCmdHeader header; 1916 + SVGA3dCmdDXSetShaderResources body; 1917 + } *cmd = container_of(header, typeof(*cmd), header); 1918 + u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) / 1919 + sizeof(SVGA3dShaderResourceViewId); 1920 + 1921 + if ((u64) cmd->body.startView + (u64) num_sr_view > 1922 + (u64) SVGA3D_DX_MAX_SRVIEWS || 1923 + cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) { 1924 + DRM_ERROR("Invalid shader binding.\n"); 1925 + return -EINVAL; 1926 + } 1927 + 1928 + return vmw_view_bindings_add(sw_context, vmw_view_sr, 1929 + vmw_ctx_binding_sr, 1930 + cmd->body.type - SVGA3D_SHADERTYPE_MIN, 1931 + (void *) &cmd[1], num_sr_view, 1932 + cmd->body.startView); 1933 + } 1934 + 1935 + /** 1936 + * vmw_cmd_dx_set_shader - Validate an SVGA_3D_CMD_DX_SET_SHADER 1937 + * command 1938 + * 1939 + * @dev_priv: Pointer to a device private struct. 1940 + * @sw_context: The software context being used for this batch. 1941 + * @header: Pointer to the command header in the command stream. 1942 + */ 1943 + static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv, 1944 + struct vmw_sw_context *sw_context, 1945 + SVGA3dCmdHeader *header) 1946 + { 1947 + struct { 1948 + SVGA3dCmdHeader header; 1949 + SVGA3dCmdDXSetShader body; 1950 + } *cmd; 1951 + struct vmw_resource *res = NULL; 1952 + struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; 1953 + struct vmw_ctx_bindinfo_shader binding; 1954 + int ret = 0; 1955 + 1956 + if (unlikely(ctx_node == NULL)) { 1957 + DRM_ERROR("DX Context not set.\n"); 1958 + return -EINVAL; 1959 + } 1960 + 1961 + cmd = container_of(header, typeof(*cmd), header); 1962 + 1963 + if (cmd->body.type >= SVGA3D_SHADERTYPE_DX10_MAX) { 1964 + DRM_ERROR("Illegal shader type %u.\n", 1965 + (unsigned) cmd->body.type); 1966 + return -EINVAL; 1967 + } 1968 + 1969 + if (cmd->body.shaderId != SVGA3D_INVALID_ID) { 1970 + res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0); 1971 + if (IS_ERR(res)) { 1972 + DRM_ERROR("Could not find shader for binding.\n"); 1973 + return PTR_ERR(res); 1974 + } 1975 + 1976 + ret = vmw_resource_val_add(sw_context, res, NULL); 1977 + if (ret) 1978 + goto out_unref; 1979 + } 1980 + 1981 + binding.bi.ctx = ctx_node->res; 1982 + binding.bi.res = res; 1983 + binding.bi.bt = vmw_ctx_binding_dx_shader; 1984 + binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN; 1985 + 1986 + vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 1987 + binding.shader_slot, 0); 1988 + out_unref: 1989 + if (res) 1990 + vmw_resource_unreference(&res); 1991 + 1992 + return ret; 1993 + } 1994 + 1995 + /** 1996 + * vmw_cmd_dx_set_vertex_buffers - Validates an 1997 + * SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS command 1998 + * 1999 + * @dev_priv: Pointer to a device private struct. 2000 + * @sw_context: The software context being used for this batch. 2001 + * @header: Pointer to the command header in the command stream. 2002 + */ 2003 + static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv, 2004 + struct vmw_sw_context *sw_context, 2005 + SVGA3dCmdHeader *header) 2006 + { 2007 + struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; 2008 + struct vmw_ctx_bindinfo_vb binding; 2009 + struct vmw_resource_val_node *res_node; 2010 + struct { 2011 + SVGA3dCmdHeader header; 2012 + SVGA3dCmdDXSetVertexBuffers body; 2013 + SVGA3dVertexBuffer buf[]; 2014 + } *cmd; 2015 + int i, ret, num; 2016 + 2017 + if (unlikely(ctx_node == NULL)) { 2018 + DRM_ERROR("DX Context not set.\n"); 2019 + return -EINVAL; 2020 + } 2021 + 2022 + cmd = container_of(header, typeof(*cmd), header); 2023 + num = (cmd->header.size - sizeof(cmd->body)) / 2024 + sizeof(SVGA3dVertexBuffer); 2025 + if ((u64)num + (u64)cmd->body.startBuffer > 2026 + (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) { 2027 + DRM_ERROR("Invalid number of vertex buffers.\n"); 2028 + return -EINVAL; 2029 + } 2030 + 2031 + for (i = 0; i < num; i++) { 2032 + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2033 + user_surface_converter, 2034 + &cmd->buf[i].sid, &res_node); 2035 + if (unlikely(ret != 0)) 2036 + return ret; 2037 + 2038 + binding.bi.ctx = ctx_node->res; 2039 + binding.bi.bt = vmw_ctx_binding_vb; 2040 + binding.bi.res = ((res_node) ? res_node->res : NULL); 2041 + binding.offset = cmd->buf[i].offset; 2042 + binding.stride = cmd->buf[i].stride; 2043 + binding.slot = i + cmd->body.startBuffer; 2044 + 2045 + vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 2046 + 0, binding.slot); 2047 + } 2048 + 2049 + return 0; 2050 + } 2051 + 2052 + /** 2053 + * vmw_cmd_dx_ia_set_vertex_buffers - Validate an 2054 + * SVGA_3D_CMD_DX_IA_SET_VERTEX_BUFFERS command. 2055 + * 2056 + * @dev_priv: Pointer to a device private struct. 2057 + * @sw_context: The software context being used for this batch. 2058 + * @header: Pointer to the command header in the command stream. 2059 + */ 2060 + static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv, 2061 + struct vmw_sw_context *sw_context, 2062 + SVGA3dCmdHeader *header) 2063 + { 2064 + struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; 2065 + struct vmw_ctx_bindinfo_ib binding; 2066 + struct vmw_resource_val_node *res_node; 2067 + struct { 2068 + SVGA3dCmdHeader header; 2069 + SVGA3dCmdDXSetIndexBuffer body; 2070 + } *cmd; 2071 + int ret; 2072 + 2073 + if (unlikely(ctx_node == NULL)) { 2074 + DRM_ERROR("DX Context not set.\n"); 2075 + return -EINVAL; 2076 + } 2077 + 2078 + cmd = container_of(header, typeof(*cmd), header); 2079 + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2080 + user_surface_converter, 2081 + &cmd->body.sid, &res_node); 2082 + if (unlikely(ret != 0)) 2083 + return ret; 2084 + 2085 + binding.bi.ctx = ctx_node->res; 2086 + binding.bi.res = ((res_node) ? res_node->res : NULL); 2087 + binding.bi.bt = vmw_ctx_binding_ib; 2088 + binding.offset = cmd->body.offset; 2089 + binding.format = cmd->body.format; 2090 + 2091 + vmw_binding_add(ctx_node->staged_bindings, &binding.bi, 0, 0); 2092 + 2093 + return 0; 2094 + } 2095 + 2096 + /** 2097 + * vmw_cmd_dx_set_rendertarget - Validate an 2098 + * SVGA_3D_CMD_DX_SET_RENDERTARGETS command 2099 + * 2100 + * @dev_priv: Pointer to a device private struct. 2101 + * @sw_context: The software context being used for this batch. 2102 + * @header: Pointer to the command header in the command stream. 2103 + */ 2104 + static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv, 2105 + struct vmw_sw_context *sw_context, 2106 + SVGA3dCmdHeader *header) 2107 + { 2108 + struct { 2109 + SVGA3dCmdHeader header; 2110 + SVGA3dCmdDXSetRenderTargets body; 2111 + } *cmd = container_of(header, typeof(*cmd), header); 2112 + int ret; 2113 + u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) / 2114 + sizeof(SVGA3dRenderTargetViewId); 2115 + 2116 + if (num_rt_view > SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS) { 2117 + DRM_ERROR("Invalid DX Rendertarget binding.\n"); 2118 + return -EINVAL; 2119 + } 2120 + 2121 + ret = vmw_view_bindings_add(sw_context, vmw_view_ds, 2122 + vmw_ctx_binding_ds, 0, 2123 + &cmd->body.depthStencilViewId, 1, 0); 2124 + if (ret) 2125 + return ret; 2126 + 2127 + return vmw_view_bindings_add(sw_context, vmw_view_rt, 2128 + vmw_ctx_binding_dx_rt, 0, 2129 + (void *)&cmd[1], num_rt_view, 0); 2130 + } 2131 + 2132 + /** 2133 + * vmw_cmd_dx_clear_rendertarget_view - Validate an 2134 + * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command 2135 + * 2136 + * @dev_priv: Pointer to a device private struct. 2137 + * @sw_context: The software context being used for this batch. 2138 + * @header: Pointer to the command header in the command stream. 2139 + */ 2140 + static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv, 2141 + struct vmw_sw_context *sw_context, 2142 + SVGA3dCmdHeader *header) 2143 + { 2144 + struct { 2145 + SVGA3dCmdHeader header; 2146 + SVGA3dCmdDXClearRenderTargetView body; 2147 + } *cmd = container_of(header, typeof(*cmd), header); 2148 + 2149 + return vmw_view_id_val_add(sw_context, vmw_view_rt, 2150 + cmd->body.renderTargetViewId); 2151 + } 2152 + 2153 + /** 2154 + * vmw_cmd_dx_clear_rendertarget_view - Validate an 2155 + * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command 2156 + * 2157 + * @dev_priv: Pointer to a device private struct. 2158 + * @sw_context: The software context being used for this batch. 2159 + * @header: Pointer to the command header in the command stream. 2160 + */ 2161 + static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv, 2162 + struct vmw_sw_context *sw_context, 2163 + SVGA3dCmdHeader *header) 2164 + { 2165 + struct { 2166 + SVGA3dCmdHeader header; 2167 + SVGA3dCmdDXClearDepthStencilView body; 2168 + } *cmd = container_of(header, typeof(*cmd), header); 2169 + 2170 + return vmw_view_id_val_add(sw_context, vmw_view_ds, 2171 + cmd->body.depthStencilViewId); 2172 + } 2173 + 2174 + static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv, 2175 + struct vmw_sw_context *sw_context, 2176 + SVGA3dCmdHeader *header) 2177 + { 2178 + struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; 2179 + struct vmw_resource_val_node *srf_node; 2180 + struct vmw_resource *res; 2181 + enum vmw_view_type view_type; 2182 + int ret; 2183 + /* 2184 + * This is based on the fact that all affected define commands have 2185 + * the same initial command body layout. 2186 + */ 2187 + struct { 2188 + SVGA3dCmdHeader header; 2189 + uint32 defined_id; 2190 + uint32 sid; 2191 + } *cmd; 2192 + 2193 + if (unlikely(ctx_node == NULL)) { 2194 + DRM_ERROR("DX Context not set.\n"); 2195 + return -EINVAL; 2196 + } 2197 + 2198 + view_type = vmw_view_cmd_to_type(header->id); 2199 + cmd = container_of(header, typeof(*cmd), header); 2200 + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2201 + user_surface_converter, 2202 + &cmd->sid, &srf_node); 2203 + if (unlikely(ret != 0)) 2204 + return ret; 2205 + 2206 + res = vmw_context_cotable(ctx_node->res, vmw_view_cotables[view_type]); 2207 + ret = vmw_cotable_notify(res, cmd->defined_id); 2208 + vmw_resource_unreference(&res); 2209 + if (unlikely(ret != 0)) 2210 + return ret; 2211 + 2212 + return vmw_view_add(sw_context->man, 2213 + ctx_node->res, 2214 + srf_node->res, 2215 + view_type, 2216 + cmd->defined_id, 2217 + header, 2218 + header->size + sizeof(*header), 2219 + &sw_context->staged_cmd_res); 2220 + } 2221 + 2222 + static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv, 2223 + struct vmw_sw_context *sw_context, 2224 + SVGA3dCmdHeader *header) 2225 + { 2226 + struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; 2227 + struct vmw_resource *res; 2228 + /* 2229 + * This is based on the fact that all affected define commands have 2230 + * the same initial command body layout. 2231 + */ 2232 + struct { 2233 + SVGA3dCmdHeader header; 2234 + uint32 defined_id; 2235 + } *cmd; 2236 + enum vmw_so_type so_type; 2237 + int ret; 2238 + 2239 + if (unlikely(ctx_node == NULL)) { 2240 + DRM_ERROR("DX Context not set.\n"); 2241 + return -EINVAL; 2242 + } 2243 + 2244 + so_type = vmw_so_cmd_to_type(header->id); 2245 + res = vmw_context_cotable(ctx_node->res, vmw_so_cotables[so_type]); 2246 + cmd = container_of(header, typeof(*cmd), header); 2247 + ret = vmw_cotable_notify(res, cmd->defined_id); 2248 + vmw_resource_unreference(&res); 2249 + 2250 + return ret; 2251 + } 2252 + 2253 + /** 2254 + * vmw_cmd_dx_check_subresource - Validate an 2255 + * SVGA_3D_CMD_DX_[X]_SUBRESOURCE command 2256 + * 2257 + * @dev_priv: Pointer to a device private struct. 2258 + * @sw_context: The software context being used for this batch. 2259 + * @header: Pointer to the command header in the command stream. 2260 + */ 2261 + static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv, 2262 + struct vmw_sw_context *sw_context, 2263 + SVGA3dCmdHeader *header) 2264 + { 2265 + struct { 2266 + SVGA3dCmdHeader header; 2267 + union { 2268 + SVGA3dCmdDXReadbackSubResource r_body; 2269 + SVGA3dCmdDXInvalidateSubResource i_body; 2270 + SVGA3dCmdDXUpdateSubResource u_body; 2271 + SVGA3dSurfaceId sid; 2272 + }; 2273 + } *cmd; 2274 + 2275 + BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) != 2276 + offsetof(typeof(*cmd), sid)); 2277 + BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) != 2278 + offsetof(typeof(*cmd), sid)); 2279 + BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) != 2280 + offsetof(typeof(*cmd), sid)); 2281 + 2282 + cmd = container_of(header, typeof(*cmd), header); 2283 + 2284 + return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, 2285 + user_surface_converter, 2286 + &cmd->sid, NULL); 2287 + } 2288 + 2289 + static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv, 2290 + struct vmw_sw_context *sw_context, 2291 + SVGA3dCmdHeader *header) 2292 + { 2293 + struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; 2294 + 2295 + if (unlikely(ctx_node == NULL)) { 2296 + DRM_ERROR("DX Context not set.\n"); 2297 + return -EINVAL; 2298 + } 2299 + 2300 + return 0; 2301 + } 2302 + 2303 + /** 2304 + * vmw_cmd_dx_view_remove - validate a view remove command and 2305 + * schedule the view resource for removal. 2306 + * 2307 + * @dev_priv: Pointer to a device private struct. 2308 + * @sw_context: The software context being used for this batch. 2309 + * @header: Pointer to the command header in the command stream. 2310 + * 2311 + * Check that the view exists, and if it was not created using this 2312 + * command batch, make sure it's validated (present in the device) so that 2313 + * the remove command will not confuse the device. 2314 + */ 2315 + static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv, 2316 + struct vmw_sw_context *sw_context, 2317 + SVGA3dCmdHeader *header) 2318 + { 2319 + struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; 2320 + struct { 2321 + SVGA3dCmdHeader header; 2322 + union vmw_view_destroy body; 2323 + } *cmd = container_of(header, typeof(*cmd), header); 2324 + enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id); 2325 + struct vmw_resource *view; 2326 + int ret; 2327 + 2328 + if (!ctx_node) { 2329 + DRM_ERROR("DX Context not set.\n"); 2330 + return -EINVAL; 2331 + } 2332 + 2333 + ret = vmw_view_remove(sw_context->man, 2334 + cmd->body.view_id, view_type, 2335 + &sw_context->staged_cmd_res, 2336 + &view); 2337 + if (ret || !view) 2338 + return ret; 2339 + 2340 + /* 2341 + * Add view to the validate list iff it was not created using this 2342 + * command batch. 2343 + */ 2344 + return vmw_view_res_val_add(sw_context, view); 2345 + } 2346 + 2347 + /** 2348 + * vmw_cmd_dx_define_shader - Validate an SVGA_3D_CMD_DX_DEFINE_SHADER 2349 + * command 2350 + * 2351 + * @dev_priv: Pointer to a device private struct. 2352 + * @sw_context: The software context being used for this batch. 2353 + * @header: Pointer to the command header in the command stream. 2354 + */ 2355 + static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv, 2356 + struct vmw_sw_context *sw_context, 2357 + SVGA3dCmdHeader *header) 2358 + { 2359 + struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; 2360 + struct vmw_resource *res; 2361 + struct { 2362 + SVGA3dCmdHeader header; 2363 + SVGA3dCmdDXDefineShader body; 2364 + } *cmd = container_of(header, typeof(*cmd), header); 2365 + int ret; 2366 + 2367 + if (!ctx_node) { 2368 + DRM_ERROR("DX Context not set.\n"); 2369 + return -EINVAL; 2370 + } 2371 + 2372 + res = vmw_context_cotable(ctx_node->res, SVGA_COTABLE_DXSHADER); 2373 + ret = vmw_cotable_notify(res, cmd->body.shaderId); 2374 + vmw_resource_unreference(&res); 2375 + if (ret) 2376 + return ret; 2377 + 2378 + return vmw_dx_shader_add(sw_context->man, ctx_node->res, 2379 + cmd->body.shaderId, cmd->body.type, 2380 + &sw_context->staged_cmd_res); 2381 + } 2382 + 2383 + /** 2384 + * vmw_cmd_dx_destroy_shader - Validate an SVGA_3D_CMD_DX_DESTROY_SHADER 2385 + * command 2386 + * 2387 + * @dev_priv: Pointer to a device private struct. 2388 + * @sw_context: The software context being used for this batch. 2389 + * @header: Pointer to the command header in the command stream. 2390 + */ 2391 + static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv, 2392 + struct vmw_sw_context *sw_context, 2393 + SVGA3dCmdHeader *header) 2394 + { 2395 + struct vmw_resource_val_node *ctx_node = sw_context->dx_ctx_node; 2396 + struct { 2397 + SVGA3dCmdHeader header; 2398 + SVGA3dCmdDXDestroyShader body; 2399 + } *cmd = container_of(header, typeof(*cmd), header); 2400 + int ret; 2401 + 2402 + if (!ctx_node) { 2403 + DRM_ERROR("DX Context not set.\n"); 2404 + return -EINVAL; 2405 + } 2406 + 2407 + ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0, 2408 + &sw_context->staged_cmd_res); 2409 + if (ret) 2410 + DRM_ERROR("Could not find shader to remove.\n"); 2411 + 2412 + return ret; 2413 + } 2414 + 2415 + /** 2416 + * vmw_cmd_dx_bind_shader - Validate an SVGA_3D_CMD_DX_BIND_SHADER 2417 + * command 2418 + * 2419 + * @dev_priv: Pointer to a device private struct. 2420 + * @sw_context: The software context being used for this batch. 2421 + * @header: Pointer to the command header in the command stream. 2422 + */ 2423 + static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv, 2424 + struct vmw_sw_context *sw_context, 2425 + SVGA3dCmdHeader *header) 2426 + { 2427 + struct vmw_resource_val_node *ctx_node; 2428 + struct vmw_resource_val_node *res_node; 2429 + struct vmw_resource *res; 2430 + struct { 2431 + SVGA3dCmdHeader header; 2432 + SVGA3dCmdDXBindShader body; 2433 + } *cmd = container_of(header, typeof(*cmd), header); 2434 + int ret; 2435 + 2436 + if (cmd->body.cid != SVGA3D_INVALID_ID) { 2437 + ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, 2438 + user_context_converter, 2439 + &cmd->body.cid, &ctx_node); 2440 + if (ret) 2441 + return ret; 2442 + } else { 2443 + ctx_node = sw_context->dx_ctx_node; 2444 + if (!ctx_node) { 2445 + DRM_ERROR("DX Context not set.\n"); 2446 + return -EINVAL; 2447 + } 2448 + } 2449 + 2450 + res = vmw_shader_lookup(vmw_context_res_man(ctx_node->res), 2451 + cmd->body.shid, 0); 2452 + if (IS_ERR(res)) { 2453 + DRM_ERROR("Could not find shader to bind.\n"); 2454 + return PTR_ERR(res); 2455 + } 2456 + 2457 + ret = vmw_resource_val_add(sw_context, res, &res_node); 2458 + if (ret) { 2459 + DRM_ERROR("Error creating resource validation node.\n"); 2460 + goto out_unref; 2461 + } 2462 + 2463 + 2464 + ret = vmw_cmd_res_switch_backup(dev_priv, sw_context, res_node, 2465 + &cmd->body.mobid, 2466 + cmd->body.offsetInBytes); 2467 + out_unref: 2468 + vmw_resource_unreference(&res); 2469 + 2470 + return ret; 2109 2471 } 2110 2472 2111 2473 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv, ··· 2942 2050 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid, 2943 2051 false, false, true), 2944 2052 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check, 2945 - true, false, true) 2053 + true, false, true), 2054 + VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid, 2055 + false, false, true), 2056 + VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid, 2057 + false, false, true), 2058 + VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid, 2059 + false, false, true), 2060 + VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid, 2061 + false, false, true), 2062 + 2063 + /* 2064 + * DX commands 2065 + */ 2066 + VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid, 2067 + false, false, true), 2068 + VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid, 2069 + false, false, true), 2070 + VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid, 2071 + false, false, true), 2072 + VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid, 2073 + false, false, true), 2074 + VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid, 2075 + false, false, true), 2076 + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER, 2077 + &vmw_cmd_dx_set_single_constant_buffer, true, false, true), 2078 + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES, 2079 + &vmw_cmd_dx_set_shader_res, true, false, true), 2080 + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader, 2081 + true, false, true), 2082 + VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_invalid, 2083 + true, false, true), 2084 + VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED, &vmw_cmd_invalid, 2085 + true, false, true), 2086 + VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_invalid, 2087 + true, false, true), 2088 + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS, 2089 + &vmw_cmd_dx_set_vertex_buffers, true, false, true), 2090 + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER, 2091 + &vmw_cmd_dx_set_index_buffer, true, false, true), 2092 + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS, 2093 + &vmw_cmd_dx_set_rendertargets, true, false, true), 2094 + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check, 2095 + true, false, true), 2096 + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE, &vmw_cmd_dx_cid_check, 2097 + true, false, true), 2098 + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE, 2099 + &vmw_cmd_dx_cid_check, 2100 + true, false, true), 2101 + VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_invalid, 2102 + true, false, true), 2103 + VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_invalid, 2104 + true, false, true), 2105 + VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_invalid, 2106 + true, false, true), 2107 + VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_invalid, 2108 + true, false, true), 2109 + VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_invalid, 2110 + true, false, true), 2111 + VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid, 2112 + true, false, true), 2113 + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_invalid, 2114 + true, false, true), 2115 + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check, 2116 + true, false, true), 2117 + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check, 2118 + true, false, true), 2119 + VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW, 2120 + &vmw_cmd_dx_clear_rendertarget_view, true, false, true), 2121 + VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW, 2122 + &vmw_cmd_dx_clear_depthstencil_view, true, false, true), 2123 + VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION, &vmw_cmd_invalid, 2124 + true, false, true), 2125 + VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid, 2126 + true, false, true), 2127 + VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_invalid, 2128 + true, false, true), 2129 + VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE, 2130 + &vmw_cmd_dx_check_subresource, true, false, true), 2131 + VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE, 2132 + &vmw_cmd_dx_check_subresource, true, false, true), 2133 + VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE, 2134 + &vmw_cmd_dx_check_subresource, true, false, true), 2135 + VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW, 2136 + &vmw_cmd_dx_view_define, true, false, true), 2137 + VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW, 2138 + &vmw_cmd_dx_view_remove, true, false, true), 2139 + VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW, 2140 + &vmw_cmd_dx_view_define, true, false, true), 2141 + VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW, 2142 + &vmw_cmd_dx_view_remove, true, false, true), 2143 + VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW, 2144 + &vmw_cmd_dx_view_define, true, false, true), 2145 + VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW, 2146 + &vmw_cmd_dx_view_remove, true, false, true), 2147 + VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT, 2148 + &vmw_cmd_dx_so_define, true, false, true), 2149 + VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT, 2150 + &vmw_cmd_dx_cid_check, true, false, true), 2151 + VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE, 2152 + &vmw_cmd_dx_so_define, true, false, true), 2153 + VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE, 2154 + &vmw_cmd_dx_cid_check, true, false, true), 2155 + VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE, 2156 + &vmw_cmd_dx_so_define, true, false, true), 2157 + VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE, 2158 + &vmw_cmd_dx_cid_check, true, false, true), 2159 + VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE, 2160 + &vmw_cmd_dx_so_define, true, false, true), 2161 + VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE, 2162 + &vmw_cmd_dx_cid_check, true, false, true), 2163 + VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE, 2164 + &vmw_cmd_dx_so_define, true, false, true), 2165 + VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE, 2166 + &vmw_cmd_dx_cid_check, true, false, true), 2167 + VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER, 2168 + &vmw_cmd_dx_define_shader, true, false, true), 2169 + VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER, 2170 + &vmw_cmd_dx_destroy_shader, true, false, true), 2171 + VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER, 2172 + &vmw_cmd_dx_bind_shader, true, false, true), 2173 + VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT, 2174 + &vmw_cmd_dx_so_define, true, false, true), 2175 + VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT, 2176 + &vmw_cmd_dx_cid_check, true, false, true), 2177 + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT, &vmw_cmd_invalid, 2178 + true, false, true), 2179 + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT, 2180 + &vmw_cmd_dx_cid_check, true, false, true), 2181 + VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY, 2182 + &vmw_cmd_dx_cid_check, true, false, true), 2946 2183 }; 2947 2184 2948 2185 static int vmw_cmd_check(struct vmw_private *dev_priv, ··· 3204 2183 * 3205 2184 * @list: The resource list. 3206 2185 */ 3207 - static void vmw_resource_list_unreference(struct list_head *list) 2186 + static void vmw_resource_list_unreference(struct vmw_sw_context *sw_context, 2187 + struct list_head *list) 3208 2188 { 3209 2189 struct vmw_resource_val_node *val, *val_next; 3210 2190 ··· 3216 2194 list_for_each_entry_safe(val, val_next, list, head) { 3217 2195 list_del_init(&val->head); 3218 2196 vmw_resource_unreference(&val->res); 3219 - if (unlikely(val->staged_bindings)) 3220 - kfree(val->staged_bindings); 2197 + 2198 + if (val->staged_bindings) { 2199 + if (val->staged_bindings != sw_context->staged_bindings) 2200 + vmw_binding_state_free(val->staged_bindings); 2201 + else 2202 + sw_context->staged_bindings_inuse = false; 2203 + val->staged_bindings = NULL; 2204 + } 2205 + 3221 2206 kfree(val); 3222 2207 } 3223 2208 } ··· 3460 2431 u32 command_size, 3461 2432 struct vmw_sw_context *sw_context) 3462 2433 { 3463 - void *cmd = vmw_fifo_reserve(dev_priv, command_size); 2434 + void *cmd; 3464 2435 2436 + if (sw_context->dx_ctx_node) 2437 + cmd = vmw_fifo_reserve_dx(dev_priv, command_size, 2438 + sw_context->dx_ctx_node->res->id); 2439 + else 2440 + cmd = vmw_fifo_reserve(dev_priv, command_size); 3465 2441 if (!cmd) { 3466 2442 DRM_ERROR("Failed reserving fifo space for commands.\n"); 3467 2443 return -ENOMEM; ··· 3498 2464 u32 command_size, 3499 2465 struct vmw_sw_context *sw_context) 3500 2466 { 2467 + u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->res->id : 2468 + SVGA3D_INVALID_ID); 3501 2469 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, 3502 - SVGA3D_INVALID_ID, false, header); 2470 + id, false, header); 3503 2471 3504 2472 vmw_apply_relocations(sw_context); 3505 2473 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations); ··· 3571 2535 return kernel_commands; 3572 2536 } 3573 2537 2538 + static int vmw_execbuf_tie_context(struct vmw_private *dev_priv, 2539 + struct vmw_sw_context *sw_context, 2540 + uint32_t handle) 2541 + { 2542 + struct vmw_resource_val_node *ctx_node; 2543 + struct vmw_resource *res; 2544 + int ret; 2545 + 2546 + if (handle == SVGA3D_INVALID_ID) 2547 + return 0; 2548 + 2549 + ret = vmw_user_resource_lookup_handle(dev_priv, sw_context->fp->tfile, 2550 + handle, user_context_converter, 2551 + &res); 2552 + if (unlikely(ret != 0)) { 2553 + DRM_ERROR("Could not find or user DX context 0x%08x.\n", 2554 + (unsigned) handle); 2555 + return ret; 2556 + } 2557 + 2558 + ret = vmw_resource_val_add(sw_context, res, &ctx_node); 2559 + if (unlikely(ret != 0)) 2560 + goto out_err; 2561 + 2562 + sw_context->dx_ctx_node = ctx_node; 2563 + sw_context->man = vmw_context_res_man(res); 2564 + out_err: 2565 + vmw_resource_unreference(&res); 2566 + return ret; 2567 + } 2568 + 3574 2569 int vmw_execbuf_process(struct drm_file *file_priv, 3575 2570 struct vmw_private *dev_priv, 3576 2571 void __user *user_commands, 3577 2572 void *kernel_commands, 3578 2573 uint32_t command_size, 3579 2574 uint64_t throttle_us, 2575 + uint32_t dx_context_handle, 3580 2576 struct drm_vmw_fence_rep __user *user_fence_rep, 3581 2577 struct vmw_fence_obj **out_fence) 3582 2578 { ··· 3664 2596 sw_context->cur_reloc = 0; 3665 2597 sw_context->cur_val_buf = 0; 3666 2598 INIT_LIST_HEAD(&sw_context->resource_list); 2599 + INIT_LIST_HEAD(&sw_context->ctx_resource_list); 3667 2600 sw_context->cur_query_bo = dev_priv->pinned_bo; 3668 2601 sw_context->last_query_ctx = NULL; 3669 2602 sw_context->needs_post_query_barrier = false; 2603 + sw_context->dx_ctx_node = NULL; 3670 2604 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache)); 3671 2605 INIT_LIST_HEAD(&sw_context->validate_nodes); 3672 2606 INIT_LIST_HEAD(&sw_context->res_relocations); 2607 + if (sw_context->staged_bindings) 2608 + vmw_binding_state_reset(sw_context->staged_bindings); 2609 + 3673 2610 if (!sw_context->res_ht_initialized) { 3674 2611 ret = drm_ht_create(&sw_context->res_ht, VMW_RES_HT_ORDER); 3675 2612 if (unlikely(ret != 0)) ··· 3683 2610 } 3684 2611 INIT_LIST_HEAD(&sw_context->staged_cmd_res); 3685 2612 INIT_LIST_HEAD(&resource_list); 2613 + ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle); 2614 + if (unlikely(ret != 0)) { 2615 + list_splice_init(&sw_context->ctx_resource_list, 2616 + &sw_context->resource_list); 2617 + goto out_err_nores; 2618 + } 2619 + 3686 2620 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands, 3687 2621 command_size); 3688 2622 if (unlikely(ret != 0)) 3689 2623 goto out_err_nores; 3690 2624 2625 + list_splice_init(&sw_context->ctx_resource_list, 2626 + &sw_context->resource_list); 3691 2627 ret = vmw_resources_reserve(sw_context); 3692 2628 if (unlikely(ret != 0)) 3693 2629 goto out_err_nores; ··· 3704 2622 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, 3705 2623 true, NULL); 3706 2624 if (unlikely(ret != 0)) 3707 - goto out_err; 2625 + goto out_err_nores; 3708 2626 3709 2627 ret = vmw_validate_buffers(dev_priv, sw_context); 3710 2628 if (unlikely(ret != 0)) ··· 3734 2652 sw_context); 3735 2653 header = NULL; 3736 2654 } 2655 + mutex_unlock(&dev_priv->binding_mutex); 3737 2656 if (ret) 3738 - goto out_unlock_binding; 2657 + goto out_err; 3739 2658 3740 2659 vmw_query_bo_switch_commit(dev_priv, sw_context); 3741 2660 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, ··· 3751 2668 if (ret != 0) 3752 2669 DRM_ERROR("Fence submission error. Syncing.\n"); 3753 2670 3754 - vmw_resource_list_unreserve(&sw_context->resource_list, false); 3755 - mutex_unlock(&dev_priv->binding_mutex); 2671 + vmw_resource_list_unreserve(sw_context, &sw_context->resource_list, 2672 + false); 3756 2673 3757 2674 ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, 3758 2675 (void *) fence); ··· 3781 2698 * Unreference resources outside of the cmdbuf_mutex to 3782 2699 * avoid deadlocks in resource destruction paths. 3783 2700 */ 3784 - vmw_resource_list_unreference(&resource_list); 2701 + vmw_resource_list_unreference(sw_context, &resource_list); 3785 2702 3786 2703 return 0; 3787 2704 ··· 3790 2707 out_err: 3791 2708 ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes); 3792 2709 out_err_nores: 3793 - vmw_resource_list_unreserve(&sw_context->resource_list, true); 2710 + vmw_resource_list_unreserve(sw_context, &sw_context->resource_list, 2711 + true); 3794 2712 vmw_resource_relocations_free(&sw_context->res_relocations); 3795 2713 vmw_free_relocations(sw_context); 3796 2714 vmw_clear_validations(sw_context); ··· 3809 2725 * Unreference resources outside of the cmdbuf_mutex to 3810 2726 * avoid deadlocks in resource destruction paths. 3811 2727 */ 3812 - vmw_resource_list_unreference(&resource_list); 2728 + vmw_resource_list_unreference(sw_context, &resource_list); 3813 2729 if (unlikely(error_resource != NULL)) 3814 2730 vmw_resource_unreference(&error_resource); 3815 2731 out_free_header: ··· 3961 2877 mutex_unlock(&dev_priv->cmdbuf_mutex); 3962 2878 } 3963 2879 3964 - 3965 - int vmw_execbuf_ioctl(struct drm_device *dev, void *data, 3966 - struct drm_file *file_priv) 2880 + int vmw_execbuf_ioctl(struct drm_device *dev, unsigned long data, 2881 + struct drm_file *file_priv, size_t size) 3967 2882 { 3968 2883 struct vmw_private *dev_priv = vmw_priv(dev); 3969 - struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data; 2884 + struct drm_vmw_execbuf_arg arg; 3970 2885 int ret; 2886 + static const size_t copy_offset[] = { 2887 + offsetof(struct drm_vmw_execbuf_arg, context_handle), 2888 + sizeof(struct drm_vmw_execbuf_arg)}; 2889 + 2890 + if (unlikely(size < copy_offset[0])) { 2891 + DRM_ERROR("Invalid command size, ioctl %d\n", 2892 + DRM_VMW_EXECBUF); 2893 + return -EINVAL; 2894 + } 2895 + 2896 + if (copy_from_user(&arg, (void __user *) data, copy_offset[0]) != 0) 2897 + return -EFAULT; 3971 2898 3972 2899 /* 3973 - * This will allow us to extend the ioctl argument while 2900 + * Extend the ioctl argument while 3974 2901 * maintaining backwards compatibility: 3975 2902 * We take different code paths depending on the value of 3976 - * arg->version. 2903 + * arg.version. 3977 2904 */ 3978 2905 3979 - if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) { 2906 + if (unlikely(arg.version > DRM_VMW_EXECBUF_VERSION || 2907 + arg.version == 0)) { 3980 2908 DRM_ERROR("Incorrect execbuf version.\n"); 3981 - DRM_ERROR("You're running outdated experimental " 3982 - "vmwgfx user-space drivers."); 3983 2909 return -EINVAL; 2910 + } 2911 + 2912 + if (arg.version > 1 && 2913 + copy_from_user(&arg.context_handle, 2914 + (void __user *) (data + copy_offset[0]), 2915 + copy_offset[arg.version - 1] - 2916 + copy_offset[0]) != 0) 2917 + return -EFAULT; 2918 + 2919 + switch (arg.version) { 2920 + case 1: 2921 + arg.context_handle = (uint32_t) -1; 2922 + break; 2923 + case 2: 2924 + if (arg.pad64 != 0) { 2925 + DRM_ERROR("Unused IOCTL data not set to zero.\n"); 2926 + return -EINVAL; 2927 + } 2928 + break; 2929 + default: 2930 + break; 3984 2931 } 3985 2932 3986 2933 ret = ttm_read_lock(&dev_priv->reservation_sem, true); ··· 4019 2904 return ret; 4020 2905 4021 2906 ret = vmw_execbuf_process(file_priv, dev_priv, 4022 - (void __user *)(unsigned long)arg->commands, 4023 - NULL, arg->command_size, arg->throttle_us, 4024 - (void __user *)(unsigned long)arg->fence_rep, 2907 + (void __user *)(unsigned long)arg.commands, 2908 + NULL, arg.command_size, arg.throttle_us, 2909 + arg.context_handle, 2910 + (void __user *)(unsigned long)arg.fence_rep, 4025 2911 NULL); 4026 2912 ttm_read_unlock(&dev_priv->reservation_sem); 4027 2913 if (unlikely(ret != 0))
+24 -4
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
··· 29 29 #include <drm/drmP.h> 30 30 #include <drm/ttm/ttm_placement.h> 31 31 32 + struct vmw_temp_set_context { 33 + SVGA3dCmdHeader header; 34 + SVGA3dCmdDXTempSetContext body; 35 + }; 36 + 32 37 bool vmw_fifo_have_3d(struct vmw_private *dev_priv) 33 38 { 34 39 u32 __iomem *fifo_mem = dev_priv->mmio_virt; ··· 104 99 uint32_t max; 105 100 uint32_t min; 106 101 102 + fifo->dx = false; 107 103 fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE; 108 104 fifo->static_buffer = vmalloc(fifo->static_buffer_size); 109 105 if (unlikely(fifo->static_buffer == NULL)) ··· 402 396 return NULL; 403 397 } 404 398 405 - void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) 399 + void *vmw_fifo_reserve_dx(struct vmw_private *dev_priv, uint32_t bytes, 400 + int ctx_id) 406 401 { 407 402 void *ret; 408 403 409 404 if (dev_priv->cman) 410 405 ret = vmw_cmdbuf_reserve(dev_priv->cman, bytes, 411 - SVGA3D_INVALID_ID, false, NULL); 412 - else 406 + ctx_id, false, NULL); 407 + else if (ctx_id == SVGA3D_INVALID_ID) 413 408 ret = vmw_local_fifo_reserve(dev_priv, bytes); 409 + else { 410 + WARN_ON("Command buffer has not been allocated.\n"); 411 + ret = NULL; 412 + } 414 413 if (IS_ERR_OR_NULL(ret)) { 415 414 DRM_ERROR("Fifo reserve failure of %u bytes.\n", 416 415 (unsigned) bytes); ··· 477 466 uint32_t min = ioread32(fifo_mem + SVGA_FIFO_MIN); 478 467 bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE; 479 468 469 + if (fifo_state->dx) 470 + bytes += sizeof(struct vmw_temp_set_context); 471 + 472 + fifo_state->dx = false; 480 473 BUG_ON((bytes & 3) != 0); 481 474 BUG_ON(bytes > fifo_state->reserved_size); 482 475 ··· 533 518 * @dev_priv: Pointer to device private structure. 534 519 * @bytes: Number of bytes to commit. 535 520 */ 536 - static void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes) 521 + void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes) 537 522 { 538 523 if (dev_priv->cman) 539 524 vmw_cmdbuf_commit(dev_priv->cman, bytes, NULL, true); ··· 720 705 return vmw_fifo_emit_dummy_gb_query(dev_priv, cid); 721 706 722 707 return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid); 708 + } 709 + 710 + void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes) 711 + { 712 + return vmw_fifo_reserve_dx(dev_priv, bytes, SVGA3D_INVALID_ID); 723 713 }
+5 -2
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
··· 110 110 param->value = 111 111 (dev_priv->active_display_unit == vmw_du_screen_target); 112 112 break; 113 + case DRM_VMW_PARAM_DX: 114 + param->value = dev_priv->has_dx; 115 + break; 113 116 default: 114 117 DRM_ERROR("Illegal vmwgfx get param request: %d\n", 115 118 param->param); ··· 196 193 uint32_t *bounce32 = (uint32_t *) bounce; 197 194 198 195 num = size / sizeof(uint32_t); 199 - if (num > SVGA3D_DEVCAP_MAX) 200 - num = SVGA3D_DEVCAP_MAX; 196 + if (num > SVGA3D_DEVCAP_DX) 197 + num = SVGA3D_DEVCAP_DX; 201 198 202 199 spin_lock(&dev_priv->cap_lock); 203 200 for (i = 0; i < num; ++i) {
+10 -5
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
··· 528 528 return -EINVAL; 529 529 } 530 530 531 - if (unlikely(format != surface->format)) { 531 + /* 532 + * For DX, surface format validation is done when surface->scanout 533 + * is set. 534 + */ 535 + if (!dev_priv->has_dx && format != surface->format) { 532 536 DRM_ERROR("Invalid surface format for requested mode.\n"); 533 537 return -EINVAL; 534 538 } ··· 758 754 true, /* can be a scanout buffer */ 759 755 1, /* num of mip levels */ 760 756 0, 757 + 0, 761 758 content_base_size, 762 759 srf_out); 763 760 if (ret) { ··· 774 769 vmw_dmabuf_unreference(&res->backup); 775 770 res->backup = vmw_dmabuf_reference(dmabuf_mob); 776 771 res->backup_offset = 0; 777 - vmw_resource_unreserve(res, NULL, 0); 772 + vmw_resource_unreserve(res, false, NULL, 0); 778 773 mutex_unlock(&res->dev_priv->cmdbuf_mutex); 779 774 780 775 return 0; ··· 1874 1869 void vmw_kms_helper_resource_revert(struct vmw_resource *res) 1875 1870 { 1876 1871 vmw_kms_helper_buffer_revert(res->backup); 1877 - vmw_resource_unreserve(res, NULL, 0); 1872 + vmw_resource_unreserve(res, false, NULL, 0); 1878 1873 mutex_unlock(&res->dev_priv->cmdbuf_mutex); 1879 1874 } 1880 1875 ··· 1921 1916 out_revert: 1922 1917 vmw_kms_helper_buffer_revert(res->backup); 1923 1918 out_unreserve: 1924 - vmw_resource_unreserve(res, NULL, 0); 1919 + vmw_resource_unreserve(res, false, NULL, 0); 1925 1920 out_unlock: 1926 1921 mutex_unlock(&res->dev_priv->cmdbuf_mutex); 1927 1922 return ret; ··· 1942 1937 vmw_kms_helper_buffer_finish(res->dev_priv, NULL, res->backup, 1943 1938 out_fence, NULL); 1944 1939 1945 - vmw_resource_unreserve(res, NULL, 0); 1940 + vmw_resource_unreserve(res, false, NULL, 0); 1946 1941 mutex_unlock(&res->dev_priv->cmdbuf_mutex); 1947 1942 } 1948 1943
+112 -71
drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
··· 67 67 * @size: Size of the table (page-aligned). 68 68 * @page_table: Pointer to a struct vmw_mob holding the page table. 69 69 */ 70 - struct vmw_otable { 71 - unsigned long size; 72 - struct vmw_mob *page_table; 70 + static const struct vmw_otable pre_dx_tables[] = { 71 + {VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true}, 72 + {VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true}, 73 + {VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true}, 74 + {VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true}, 75 + {VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE, 76 + NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE} 77 + }; 78 + 79 + static const struct vmw_otable dx_tables[] = { 80 + {VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE, NULL, true}, 81 + {VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE, NULL, true}, 82 + {VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE, NULL, true}, 83 + {VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE, NULL, true}, 84 + {VMWGFX_NUM_GB_SCREEN_TARGET * SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE, 85 + NULL, VMWGFX_ENABLE_SCREEN_TARGET_OTABLE}, 86 + {VMWGFX_NUM_DXCONTEXT * sizeof(SVGAOTableDXContextEntry), NULL, true}, 73 87 }; 74 88 75 89 static int vmw_mob_pt_populate(struct vmw_private *dev_priv, ··· 106 92 */ 107 93 static int vmw_setup_otable_base(struct vmw_private *dev_priv, 108 94 SVGAOTableType type, 95 + struct ttm_buffer_object *otable_bo, 109 96 unsigned long offset, 110 97 struct vmw_otable *otable) 111 98 { ··· 121 106 122 107 BUG_ON(otable->page_table != NULL); 123 108 124 - vsgt = vmw_bo_sg_table(dev_priv->otable_bo); 109 + vsgt = vmw_bo_sg_table(otable_bo); 125 110 vmw_piter_start(&iter, vsgt, offset >> PAGE_SHIFT); 126 111 WARN_ON(!vmw_piter_next(&iter)); 127 112 ··· 208 193 "takedown.\n"); 209 194 return; 210 195 } 211 - 196 + 212 197 memset(cmd, 0, sizeof(*cmd)); 213 198 cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE; 214 199 cmd->header.size = sizeof(cmd->body); ··· 233 218 otable->page_table = NULL; 234 219 } 235 220 236 - /* 237 - * vmw_otables_setup - Set up guest backed memory object tables 238 - * 239 - * @dev_priv: Pointer to a device private structure 240 - * 241 - * Takes care of the device guest backed surface 242 - * initialization, by setting up the guest backed memory object tables. 243 - * Returns 0 on success and various error codes on failure. A succesful return 244 - * means the object tables can be taken down using the vmw_otables_takedown 245 - * function. 246 - */ 247 - int vmw_otables_setup(struct vmw_private *dev_priv) 221 + 222 + static int vmw_otable_batch_setup(struct vmw_private *dev_priv, 223 + struct vmw_otable_batch *batch) 248 224 { 249 225 unsigned long offset; 250 226 unsigned long bo_size; 251 - struct vmw_otable *otables; 227 + struct vmw_otable *otables = batch->otables; 252 228 SVGAOTableType i; 253 229 int ret; 254 230 255 - otables = kzalloc(SVGA_OTABLE_DX9_MAX * sizeof(*otables), 256 - GFP_KERNEL); 257 - if (unlikely(otables == NULL)) { 258 - DRM_ERROR("Failed to allocate space for otable " 259 - "metadata.\n"); 260 - return -ENOMEM; 261 - } 262 - 263 - otables[SVGA_OTABLE_MOB].size = 264 - VMWGFX_NUM_MOB * SVGA3D_OTABLE_MOB_ENTRY_SIZE; 265 - otables[SVGA_OTABLE_SURFACE].size = 266 - VMWGFX_NUM_GB_SURFACE * SVGA3D_OTABLE_SURFACE_ENTRY_SIZE; 267 - otables[SVGA_OTABLE_CONTEXT].size = 268 - VMWGFX_NUM_GB_CONTEXT * SVGA3D_OTABLE_CONTEXT_ENTRY_SIZE; 269 - otables[SVGA_OTABLE_SHADER].size = 270 - VMWGFX_NUM_GB_SHADER * SVGA3D_OTABLE_SHADER_ENTRY_SIZE; 271 - otables[SVGA_OTABLE_SCREENTARGET].size = 272 - VMWGFX_NUM_GB_SCREEN_TARGET * 273 - SVGA3D_OTABLE_SCREEN_TARGET_ENTRY_SIZE; 274 - 275 231 bo_size = 0; 276 - for (i = 0; i < SVGA_OTABLE_DX9_MAX; ++i) { 232 + for (i = 0; i < batch->num_otables; ++i) { 233 + if (!otables[i].enabled) 234 + continue; 235 + 277 236 otables[i].size = 278 237 (otables[i].size + PAGE_SIZE - 1) & PAGE_MASK; 279 238 bo_size += otables[i].size; ··· 257 268 ttm_bo_type_device, 258 269 &vmw_sys_ne_placement, 259 270 0, false, NULL, 260 - &dev_priv->otable_bo); 271 + &batch->otable_bo); 261 272 262 273 if (unlikely(ret != 0)) 263 274 goto out_no_bo; 264 275 265 - ret = ttm_bo_reserve(dev_priv->otable_bo, false, true, false, NULL); 276 + ret = ttm_bo_reserve(batch->otable_bo, false, true, false, NULL); 266 277 BUG_ON(ret != 0); 267 - ret = vmw_bo_driver.ttm_tt_populate(dev_priv->otable_bo->ttm); 278 + ret = vmw_bo_driver.ttm_tt_populate(batch->otable_bo->ttm); 268 279 if (unlikely(ret != 0)) 269 280 goto out_unreserve; 270 - ret = vmw_bo_map_dma(dev_priv->otable_bo); 281 + ret = vmw_bo_map_dma(batch->otable_bo); 271 282 if (unlikely(ret != 0)) 272 283 goto out_unreserve; 273 284 274 - ttm_bo_unreserve(dev_priv->otable_bo); 285 + ttm_bo_unreserve(batch->otable_bo); 275 286 276 287 offset = 0; 277 - for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) { 278 - ret = vmw_setup_otable_base(dev_priv, i, offset, 288 + for (i = 0; i < batch->num_otables; ++i) { 289 + if (!batch->otables[i].enabled) 290 + continue; 291 + 292 + ret = vmw_setup_otable_base(dev_priv, i, batch->otable_bo, 293 + offset, 279 294 &otables[i]); 280 295 if (unlikely(ret != 0)) 281 296 goto out_no_setup; 282 297 offset += otables[i].size; 283 298 } 284 299 285 - dev_priv->otables = otables; 286 300 return 0; 287 301 288 302 out_unreserve: 289 - ttm_bo_unreserve(dev_priv->otable_bo); 303 + ttm_bo_unreserve(batch->otable_bo); 290 304 out_no_setup: 291 - for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) 292 - vmw_takedown_otable_base(dev_priv, i, &otables[i]); 305 + for (i = 0; i < batch->num_otables; ++i) { 306 + if (batch->otables[i].enabled) 307 + vmw_takedown_otable_base(dev_priv, i, 308 + &batch->otables[i]); 309 + } 293 310 294 - ttm_bo_unref(&dev_priv->otable_bo); 311 + ttm_bo_unref(&batch->otable_bo); 295 312 out_no_bo: 296 - kfree(otables); 297 313 return ret; 298 314 } 299 315 316 + /* 317 + * vmw_otables_setup - Set up guest backed memory object tables 318 + * 319 + * @dev_priv: Pointer to a device private structure 320 + * 321 + * Takes care of the device guest backed surface 322 + * initialization, by setting up the guest backed memory object tables. 323 + * Returns 0 on success and various error codes on failure. A successful return 324 + * means the object tables can be taken down using the vmw_otables_takedown 325 + * function. 326 + */ 327 + int vmw_otables_setup(struct vmw_private *dev_priv) 328 + { 329 + struct vmw_otable **otables = &dev_priv->otable_batch.otables; 330 + int ret; 331 + 332 + if (dev_priv->has_dx) { 333 + *otables = kmalloc(sizeof(dx_tables), GFP_KERNEL); 334 + if (*otables == NULL) 335 + return -ENOMEM; 336 + 337 + memcpy(*otables, dx_tables, sizeof(dx_tables)); 338 + dev_priv->otable_batch.num_otables = ARRAY_SIZE(dx_tables); 339 + } else { 340 + *otables = kmalloc(sizeof(pre_dx_tables), GFP_KERNEL); 341 + if (*otables == NULL) 342 + return -ENOMEM; 343 + 344 + memcpy(*otables, pre_dx_tables, sizeof(pre_dx_tables)); 345 + dev_priv->otable_batch.num_otables = ARRAY_SIZE(pre_dx_tables); 346 + } 347 + 348 + ret = vmw_otable_batch_setup(dev_priv, &dev_priv->otable_batch); 349 + if (unlikely(ret != 0)) 350 + goto out_setup; 351 + 352 + return 0; 353 + 354 + out_setup: 355 + kfree(*otables); 356 + return ret; 357 + } 358 + 359 + static void vmw_otable_batch_takedown(struct vmw_private *dev_priv, 360 + struct vmw_otable_batch *batch) 361 + { 362 + SVGAOTableType i; 363 + struct ttm_buffer_object *bo = batch->otable_bo; 364 + int ret; 365 + 366 + for (i = 0; i < batch->num_otables; ++i) 367 + if (batch->otables[i].enabled) 368 + vmw_takedown_otable_base(dev_priv, i, 369 + &batch->otables[i]); 370 + 371 + ret = ttm_bo_reserve(bo, false, true, false, NULL); 372 + BUG_ON(ret != 0); 373 + 374 + vmw_fence_single_bo(bo, NULL); 375 + ttm_bo_unreserve(bo); 376 + 377 + ttm_bo_unref(&batch->otable_bo); 378 + } 300 379 301 380 /* 302 381 * vmw_otables_takedown - Take down guest backed memory object tables ··· 375 318 */ 376 319 void vmw_otables_takedown(struct vmw_private *dev_priv) 377 320 { 378 - SVGAOTableType i; 379 - struct ttm_buffer_object *bo = dev_priv->otable_bo; 380 - int ret; 381 - 382 - for (i = 0; i < SVGA_OTABLE_DX9_MAX - VMW_OTABLE_SETUP_SUB; ++i) 383 - vmw_takedown_otable_base(dev_priv, i, 384 - &dev_priv->otables[i]); 385 - 386 - ret = ttm_bo_reserve(bo, false, true, false, NULL); 387 - BUG_ON(ret != 0); 388 - 389 - vmw_fence_single_bo(bo, NULL); 390 - ttm_bo_unreserve(bo); 391 - 392 - ttm_bo_unref(&dev_priv->otable_bo); 393 - kfree(dev_priv->otables); 394 - dev_priv->otables = NULL; 321 + vmw_otable_batch_takedown(dev_priv, &dev_priv->otable_batch); 322 + kfree(dev_priv->otable_batch.otables); 395 323 } 396 - 397 324 398 325 /* 399 326 * vmw_mob_calculate_pt_pages - Calculate the number of page table pages ··· 451 410 goto out_unreserve; 452 411 453 412 ttm_bo_unreserve(mob->pt_bo); 454 - 413 + 455 414 return 0; 456 415 457 416 out_unreserve:
+34 -14
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
··· 31 31 #include <drm/ttm/ttm_placement.h> 32 32 #include <drm/drmP.h> 33 33 #include "vmwgfx_resource_priv.h" 34 + #include "vmwgfx_binding.h" 34 35 35 36 #define VMW_RES_EVICT_ERR_COUNT 10 36 37 ··· 145 144 } 146 145 147 146 if (likely(res->hw_destroy != NULL)) { 148 - res->hw_destroy(res); 149 147 mutex_lock(&dev_priv->binding_mutex); 150 - vmw_context_binding_res_list_kill(&res->binding_head); 148 + vmw_binding_res_list_kill(&res->binding_head); 151 149 mutex_unlock(&dev_priv->binding_mutex); 150 + res->hw_destroy(res); 152 151 } 153 152 154 153 id = res->id; ··· 1150 1149 * command submission. 1151 1150 * 1152 1151 * @res: Pointer to the struct vmw_resource to unreserve. 1152 + * @switch_backup: Backup buffer has been switched. 1153 1153 * @new_backup: Pointer to new backup buffer if command submission 1154 - * switched. 1155 - * @new_backup_offset: New backup offset if @new_backup is !NULL. 1154 + * switched. May be NULL. 1155 + * @new_backup_offset: New backup offset if @switch_backup is true. 1156 1156 * 1157 1157 * Currently unreserving a resource means putting it back on the device's 1158 1158 * resource lru list, so that it can be evicted if necessary. 1159 1159 */ 1160 1160 void vmw_resource_unreserve(struct vmw_resource *res, 1161 + bool switch_backup, 1161 1162 struct vmw_dma_buffer *new_backup, 1162 1163 unsigned long new_backup_offset) 1163 1164 { ··· 1168 1165 if (!list_empty(&res->lru_head)) 1169 1166 return; 1170 1167 1171 - if (new_backup && new_backup != res->backup) { 1172 - 1168 + if (switch_backup && new_backup != res->backup) { 1173 1169 if (res->backup) { 1174 1170 lockdep_assert_held(&res->backup->base.resv->lock.base); 1175 1171 list_del_init(&res->mob_head); 1176 1172 vmw_dmabuf_unreference(&res->backup); 1177 1173 } 1178 1174 1179 - res->backup = vmw_dmabuf_reference(new_backup); 1180 - lockdep_assert_held(&new_backup->base.resv->lock.base); 1181 - list_add_tail(&res->mob_head, &new_backup->res_list); 1175 + if (new_backup) { 1176 + res->backup = vmw_dmabuf_reference(new_backup); 1177 + lockdep_assert_held(&new_backup->base.resv->lock.base); 1178 + list_add_tail(&res->mob_head, &new_backup->res_list); 1179 + } else { 1180 + res->backup = NULL; 1181 + } 1182 1182 } 1183 - if (new_backup) 1183 + if (switch_backup) 1184 1184 res->backup_offset = new_backup_offset; 1185 1185 1186 1186 if (!res->func->may_evict || res->id == -1 || res->pin_count) ··· 1275 1269 if (res->func->needs_backup && res->backup == NULL && 1276 1270 !no_backup) { 1277 1271 ret = vmw_resource_buf_alloc(res, interruptible); 1278 - if (unlikely(ret != 0)) 1272 + if (unlikely(ret != 0)) { 1273 + DRM_ERROR("Failed to allocate a backup buffer " 1274 + "of size %lu. bytes\n", 1275 + (unsigned long) res->backup_size); 1279 1276 return ret; 1277 + } 1280 1278 } 1281 1279 1282 1280 return 0; ··· 1364 1354 struct ttm_validate_buffer val_buf; 1365 1355 unsigned err_count = 0; 1366 1356 1367 - if (likely(!res->func->may_evict)) 1357 + if (!res->func->create) 1368 1358 return 0; 1369 1359 1370 1360 val_buf.bo = NULL; ··· 1634 1624 res->pin_count++; 1635 1625 1636 1626 out_no_validate: 1637 - vmw_resource_unreserve(res, NULL, 0UL); 1627 + vmw_resource_unreserve(res, false, NULL, 0UL); 1638 1628 out_no_reserve: 1639 1629 mutex_unlock(&dev_priv->cmdbuf_mutex); 1640 1630 ttm_write_unlock(&dev_priv->reservation_sem); ··· 1670 1660 ttm_bo_unreserve(&vbo->base); 1671 1661 } 1672 1662 1673 - vmw_resource_unreserve(res, NULL, 0UL); 1663 + vmw_resource_unreserve(res, false, NULL, 0UL); 1674 1664 1675 1665 mutex_unlock(&dev_priv->cmdbuf_mutex); 1676 1666 ttm_read_unlock(&dev_priv->reservation_sem); 1667 + } 1668 + 1669 + /** 1670 + * vmw_res_type - Return the resource type 1671 + * 1672 + * @res: Pointer to the resource 1673 + */ 1674 + enum vmw_res_type vmw_res_type(const struct vmw_resource *res) 1675 + { 1676 + return res->func->res_type; 1677 1677 }
+11 -1
drivers/gpu/drm/vmwgfx/vmwgfx_resource_priv.h
··· 30 30 31 31 #include "vmwgfx_drv.h" 32 32 33 + enum vmw_cmdbuf_res_state { 34 + VMW_CMDBUF_RES_COMMITTED, 35 + VMW_CMDBUF_RES_ADD, 36 + VMW_CMDBUF_RES_DEL 37 + }; 38 + 33 39 /** 34 40 * struct vmw_user_resource_conv - Identify a derived user-exported resource 35 41 * type and provide a function to convert its ttm_base_object pointer to ··· 61 55 * @bind: Bind a hardware resource to persistent buffer storage. 62 56 * @unbind: Unbind a hardware resource from persistent 63 57 * buffer storage. 58 + * @commit_notify: If the resource is a command buffer managed resource, 59 + * callback to notify that a define or remove command 60 + * has been committed to the device. 64 61 */ 65 - 66 62 struct vmw_res_func { 67 63 enum vmw_res_type res_type; 68 64 bool needs_backup; ··· 79 71 int (*unbind) (struct vmw_resource *res, 80 72 bool readback, 81 73 struct ttm_validate_buffer *val_buf); 74 + void (*commit_notify)(struct vmw_resource *res, 75 + enum vmw_cmdbuf_res_state state); 82 76 }; 83 77 84 78 int vmw_resource_alloc_id(struct vmw_resource *res);
+440 -44
drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
··· 27 27 28 28 #include "vmwgfx_drv.h" 29 29 #include "vmwgfx_resource_priv.h" 30 + #include "vmwgfx_binding.h" 30 31 #include "ttm/ttm_placement.h" 31 32 32 33 struct vmw_shader { 33 34 struct vmw_resource res; 34 35 SVGA3dShaderType type; 35 36 uint32_t size; 37 + uint8_t num_input_sig; 38 + uint8_t num_output_sig; 36 39 }; 37 40 38 41 struct vmw_user_shader { ··· 43 40 struct vmw_shader shader; 44 41 }; 45 42 43 + struct vmw_dx_shader { 44 + struct vmw_resource res; 45 + struct vmw_resource *ctx; 46 + struct vmw_resource *cotable; 47 + u32 id; 48 + bool committed; 49 + struct list_head cotable_head; 50 + }; 51 + 46 52 static uint64_t vmw_user_shader_size; 47 53 static uint64_t vmw_shader_size; 54 + static size_t vmw_shader_dx_size; 48 55 49 56 static void vmw_user_shader_free(struct vmw_resource *res); 50 57 static struct vmw_resource * ··· 67 54 bool readback, 68 55 struct ttm_validate_buffer *val_buf); 69 56 static int vmw_gb_shader_destroy(struct vmw_resource *res); 57 + 58 + static int vmw_dx_shader_create(struct vmw_resource *res); 59 + static int vmw_dx_shader_bind(struct vmw_resource *res, 60 + struct ttm_validate_buffer *val_buf); 61 + static int vmw_dx_shader_unbind(struct vmw_resource *res, 62 + bool readback, 63 + struct ttm_validate_buffer *val_buf); 64 + static void vmw_dx_shader_commit_notify(struct vmw_resource *res, 65 + enum vmw_cmdbuf_res_state state); 66 + static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type); 67 + static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type); 68 + static uint64_t vmw_user_shader_size; 70 69 71 70 static const struct vmw_user_resource_conv user_shader_conv = { 72 71 .object_type = VMW_RES_SHADER, ··· 102 77 .unbind = vmw_gb_shader_unbind 103 78 }; 104 79 80 + static const struct vmw_res_func vmw_dx_shader_func = { 81 + .res_type = vmw_res_shader, 82 + .needs_backup = true, 83 + .may_evict = false, 84 + .type_name = "dx shaders", 85 + .backup_placement = &vmw_mob_placement, 86 + .create = vmw_dx_shader_create, 87 + /* 88 + * The destroy callback is only called with a committed resource on 89 + * context destroy, in which case we destroy the cotable anyway, 90 + * so there's no need to destroy DX shaders separately. 91 + */ 92 + .destroy = NULL, 93 + .bind = vmw_dx_shader_bind, 94 + .unbind = vmw_dx_shader_unbind, 95 + .commit_notify = vmw_dx_shader_commit_notify, 96 + }; 97 + 105 98 /** 106 99 * Shader management: 107 100 */ ··· 130 87 return container_of(res, struct vmw_shader, res); 131 88 } 132 89 90 + /** 91 + * vmw_res_to_dx_shader - typecast a struct vmw_resource to a 92 + * struct vmw_dx_shader 93 + * 94 + * @res: Pointer to the struct vmw_resource. 95 + */ 96 + static inline struct vmw_dx_shader * 97 + vmw_res_to_dx_shader(struct vmw_resource *res) 98 + { 99 + return container_of(res, struct vmw_dx_shader, res); 100 + } 101 + 133 102 static void vmw_hw_shader_destroy(struct vmw_resource *res) 134 103 { 135 - (void) vmw_gb_shader_destroy(res); 104 + if (likely(res->func->destroy)) 105 + (void) res->func->destroy(res); 106 + else 107 + res->id = -1; 136 108 } 109 + 137 110 138 111 static int vmw_gb_shader_init(struct vmw_private *dev_priv, 139 112 struct vmw_resource *res, 140 113 uint32_t size, 141 114 uint64_t offset, 142 115 SVGA3dShaderType type, 116 + uint8_t num_input_sig, 117 + uint8_t num_output_sig, 143 118 struct vmw_dma_buffer *byte_code, 144 119 void (*res_free) (struct vmw_resource *res)) 145 120 { 146 121 struct vmw_shader *shader = vmw_res_to_shader(res); 147 122 int ret; 148 123 149 - ret = vmw_resource_init(dev_priv, res, true, 150 - res_free, &vmw_gb_shader_func); 151 - 124 + ret = vmw_resource_init(dev_priv, res, true, res_free, 125 + &vmw_gb_shader_func); 152 126 153 127 if (unlikely(ret != 0)) { 154 128 if (res_free) ··· 182 122 } 183 123 shader->size = size; 184 124 shader->type = type; 125 + shader->num_input_sig = num_input_sig; 126 + shader->num_output_sig = num_output_sig; 185 127 186 128 vmw_resource_activate(res, vmw_hw_shader_destroy); 187 129 return 0; 188 130 } 131 + 132 + /* 133 + * GB shader code: 134 + */ 189 135 190 136 static int vmw_gb_shader_create(struct vmw_resource *res) 191 137 { ··· 325 259 return 0; 326 260 327 261 mutex_lock(&dev_priv->binding_mutex); 328 - vmw_context_binding_res_list_scrub(&res->binding_head); 262 + vmw_binding_res_list_scrub(&res->binding_head); 329 263 330 264 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 331 265 if (unlikely(cmd == NULL)) { ··· 345 279 346 280 return 0; 347 281 } 282 + 283 + /* 284 + * DX shader code: 285 + */ 286 + 287 + /** 288 + * vmw_dx_shader_commit_notify - Notify that a shader operation has been 289 + * committed to hardware from a user-supplied command stream. 290 + * 291 + * @res: Pointer to the shader resource. 292 + * @state: Indicating whether a creation or removal has been committed. 293 + * 294 + */ 295 + static void vmw_dx_shader_commit_notify(struct vmw_resource *res, 296 + enum vmw_cmdbuf_res_state state) 297 + { 298 + struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res); 299 + struct vmw_private *dev_priv = res->dev_priv; 300 + 301 + if (state == VMW_CMDBUF_RES_ADD) { 302 + mutex_lock(&dev_priv->binding_mutex); 303 + vmw_cotable_add_resource(shader->cotable, 304 + &shader->cotable_head); 305 + shader->committed = true; 306 + res->id = shader->id; 307 + mutex_unlock(&dev_priv->binding_mutex); 308 + } else { 309 + mutex_lock(&dev_priv->binding_mutex); 310 + list_del_init(&shader->cotable_head); 311 + shader->committed = false; 312 + res->id = -1; 313 + mutex_unlock(&dev_priv->binding_mutex); 314 + } 315 + } 316 + 317 + /** 318 + * vmw_dx_shader_unscrub - Have the device reattach a MOB to a DX shader. 319 + * 320 + * @res: The shader resource 321 + * 322 + * This function reverts a scrub operation. 323 + */ 324 + static int vmw_dx_shader_unscrub(struct vmw_resource *res) 325 + { 326 + struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res); 327 + struct vmw_private *dev_priv = res->dev_priv; 328 + struct { 329 + SVGA3dCmdHeader header; 330 + SVGA3dCmdDXBindShader body; 331 + } *cmd; 332 + 333 + if (!list_empty(&shader->cotable_head) || !shader->committed) 334 + return 0; 335 + 336 + cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), 337 + shader->ctx->id); 338 + if (unlikely(cmd == NULL)) { 339 + DRM_ERROR("Failed reserving FIFO space for shader " 340 + "scrubbing.\n"); 341 + return -ENOMEM; 342 + } 343 + 344 + cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER; 345 + cmd->header.size = sizeof(cmd->body); 346 + cmd->body.cid = shader->ctx->id; 347 + cmd->body.shid = shader->id; 348 + cmd->body.mobid = res->backup->base.mem.start; 349 + cmd->body.offsetInBytes = res->backup_offset; 350 + vmw_fifo_commit(dev_priv, sizeof(*cmd)); 351 + 352 + vmw_cotable_add_resource(shader->cotable, &shader->cotable_head); 353 + 354 + return 0; 355 + } 356 + 357 + /** 358 + * vmw_dx_shader_create - The DX shader create callback 359 + * 360 + * @res: The DX shader resource 361 + * 362 + * The create callback is called as part of resource validation and 363 + * makes sure that we unscrub the shader if it's previously been scrubbed. 364 + */ 365 + static int vmw_dx_shader_create(struct vmw_resource *res) 366 + { 367 + struct vmw_private *dev_priv = res->dev_priv; 368 + struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res); 369 + int ret = 0; 370 + 371 + WARN_ON_ONCE(!shader->committed); 372 + 373 + if (!list_empty(&res->mob_head)) { 374 + mutex_lock(&dev_priv->binding_mutex); 375 + ret = vmw_dx_shader_unscrub(res); 376 + mutex_unlock(&dev_priv->binding_mutex); 377 + } 378 + 379 + res->id = shader->id; 380 + return ret; 381 + } 382 + 383 + /** 384 + * vmw_dx_shader_bind - The DX shader bind callback 385 + * 386 + * @res: The DX shader resource 387 + * @val_buf: Pointer to the validate buffer. 388 + * 389 + */ 390 + static int vmw_dx_shader_bind(struct vmw_resource *res, 391 + struct ttm_validate_buffer *val_buf) 392 + { 393 + struct vmw_private *dev_priv = res->dev_priv; 394 + struct ttm_buffer_object *bo = val_buf->bo; 395 + 396 + BUG_ON(bo->mem.mem_type != VMW_PL_MOB); 397 + mutex_lock(&dev_priv->binding_mutex); 398 + vmw_dx_shader_unscrub(res); 399 + mutex_unlock(&dev_priv->binding_mutex); 400 + 401 + return 0; 402 + } 403 + 404 + /** 405 + * vmw_dx_shader_scrub - Have the device unbind a MOB from a DX shader. 406 + * 407 + * @res: The shader resource 408 + * 409 + * This function unbinds a MOB from the DX shader without requiring the 410 + * MOB dma_buffer to be reserved. The driver still considers the MOB bound. 411 + * However, once the driver eventually decides to unbind the MOB, it doesn't 412 + * need to access the context. 413 + */ 414 + static int vmw_dx_shader_scrub(struct vmw_resource *res) 415 + { 416 + struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res); 417 + struct vmw_private *dev_priv = res->dev_priv; 418 + struct { 419 + SVGA3dCmdHeader header; 420 + SVGA3dCmdDXBindShader body; 421 + } *cmd; 422 + 423 + if (list_empty(&shader->cotable_head)) 424 + return 0; 425 + 426 + WARN_ON_ONCE(!shader->committed); 427 + cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 428 + if (unlikely(cmd == NULL)) { 429 + DRM_ERROR("Failed reserving FIFO space for shader " 430 + "scrubbing.\n"); 431 + return -ENOMEM; 432 + } 433 + 434 + cmd->header.id = SVGA_3D_CMD_DX_BIND_SHADER; 435 + cmd->header.size = sizeof(cmd->body); 436 + cmd->body.cid = shader->ctx->id; 437 + cmd->body.shid = res->id; 438 + cmd->body.mobid = SVGA3D_INVALID_ID; 439 + cmd->body.offsetInBytes = 0; 440 + vmw_fifo_commit(dev_priv, sizeof(*cmd)); 441 + res->id = -1; 442 + list_del_init(&shader->cotable_head); 443 + 444 + return 0; 445 + } 446 + 447 + /** 448 + * vmw_dx_shader_unbind - The dx shader unbind callback. 449 + * 450 + * @res: The shader resource 451 + * @readback: Whether this is a readback unbind. Currently unused. 452 + * @val_buf: MOB buffer information. 453 + */ 454 + static int vmw_dx_shader_unbind(struct vmw_resource *res, 455 + bool readback, 456 + struct ttm_validate_buffer *val_buf) 457 + { 458 + struct vmw_private *dev_priv = res->dev_priv; 459 + struct vmw_fence_obj *fence; 460 + int ret; 461 + 462 + BUG_ON(res->backup->base.mem.mem_type != VMW_PL_MOB); 463 + 464 + mutex_lock(&dev_priv->binding_mutex); 465 + ret = vmw_dx_shader_scrub(res); 466 + mutex_unlock(&dev_priv->binding_mutex); 467 + 468 + if (ret) 469 + return ret; 470 + 471 + (void) vmw_execbuf_fence_commands(NULL, dev_priv, 472 + &fence, NULL); 473 + vmw_fence_single_bo(val_buf->bo, fence); 474 + 475 + if (likely(fence != NULL)) 476 + vmw_fence_obj_unreference(&fence); 477 + 478 + return 0; 479 + } 480 + 481 + /** 482 + * vmw_dx_shader_cotable_list_scrub - The cotable unbind_func callback for 483 + * DX shaders. 484 + * 485 + * @dev_priv: Pointer to device private structure. 486 + * @list: The list of cotable resources. 487 + * @readback: Whether the call was part of a readback unbind. 488 + * 489 + * Scrubs all shader MOBs so that any subsequent shader unbind or shader 490 + * destroy operation won't need to swap in the context. 491 + */ 492 + void vmw_dx_shader_cotable_list_scrub(struct vmw_private *dev_priv, 493 + struct list_head *list, 494 + bool readback) 495 + { 496 + struct vmw_dx_shader *entry, *next; 497 + 498 + WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex)); 499 + 500 + list_for_each_entry_safe(entry, next, list, cotable_head) { 501 + WARN_ON(vmw_dx_shader_scrub(&entry->res)); 502 + if (!readback) 503 + entry->committed = false; 504 + } 505 + } 506 + 507 + /** 508 + * vmw_dx_shader_res_free - The DX shader free callback 509 + * 510 + * @res: The shader resource 511 + * 512 + * Frees the DX shader resource and updates memory accounting. 513 + */ 514 + static void vmw_dx_shader_res_free(struct vmw_resource *res) 515 + { 516 + struct vmw_private *dev_priv = res->dev_priv; 517 + struct vmw_dx_shader *shader = vmw_res_to_dx_shader(res); 518 + 519 + vmw_resource_unreference(&shader->cotable); 520 + kfree(shader); 521 + ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size); 522 + } 523 + 524 + /** 525 + * vmw_dx_shader_add - Add a shader resource as a command buffer managed 526 + * resource. 527 + * 528 + * @man: The command buffer resource manager. 529 + * @ctx: Pointer to the context resource. 530 + * @user_key: The id used for this shader. 531 + * @shader_type: The shader type. 532 + * @list: The list of staged command buffer managed resources. 533 + */ 534 + int vmw_dx_shader_add(struct vmw_cmdbuf_res_manager *man, 535 + struct vmw_resource *ctx, 536 + u32 user_key, 537 + SVGA3dShaderType shader_type, 538 + struct list_head *list) 539 + { 540 + struct vmw_dx_shader *shader; 541 + struct vmw_resource *res; 542 + struct vmw_private *dev_priv = ctx->dev_priv; 543 + int ret; 544 + 545 + if (!vmw_shader_dx_size) 546 + vmw_shader_dx_size = ttm_round_pot(sizeof(*shader)); 547 + 548 + if (!vmw_shader_id_ok(user_key, shader_type)) 549 + return -EINVAL; 550 + 551 + ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), vmw_shader_dx_size, 552 + false, true); 553 + if (ret) { 554 + if (ret != -ERESTARTSYS) 555 + DRM_ERROR("Out of graphics memory for shader " 556 + "creation.\n"); 557 + return ret; 558 + } 559 + 560 + shader = kmalloc(sizeof(*shader), GFP_KERNEL); 561 + if (!shader) { 562 + ttm_mem_global_free(vmw_mem_glob(dev_priv), vmw_shader_dx_size); 563 + return -ENOMEM; 564 + } 565 + 566 + res = &shader->res; 567 + shader->ctx = ctx; 568 + shader->cotable = vmw_context_cotable(ctx, SVGA_COTABLE_DXSHADER); 569 + shader->id = user_key; 570 + shader->committed = false; 571 + INIT_LIST_HEAD(&shader->cotable_head); 572 + ret = vmw_resource_init(dev_priv, res, true, 573 + vmw_dx_shader_res_free, &vmw_dx_shader_func); 574 + if (ret) 575 + goto out_resource_init; 576 + 577 + /* 578 + * The user_key name-space is not per shader type for DX shaders, 579 + * so when hashing, use a single zero shader type. 580 + */ 581 + ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader, 582 + vmw_shader_key(user_key, 0), 583 + res, list); 584 + if (ret) 585 + goto out_resource_init; 586 + 587 + res->id = shader->id; 588 + vmw_resource_activate(res, vmw_hw_shader_destroy); 589 + 590 + out_resource_init: 591 + vmw_resource_unreference(&res); 592 + 593 + return ret; 594 + } 595 + 596 + 348 597 349 598 /** 350 599 * User-space shader management: ··· 722 341 size_t shader_size, 723 342 size_t offset, 724 343 SVGA3dShaderType shader_type, 344 + uint8_t num_input_sig, 345 + uint8_t num_output_sig, 725 346 struct ttm_object_file *tfile, 726 347 u32 *handle) 727 348 { ··· 766 383 */ 767 384 768 385 ret = vmw_gb_shader_init(dev_priv, res, shader_size, 769 - offset, shader_type, buffer, 386 + offset, shader_type, num_input_sig, 387 + num_output_sig, buffer, 770 388 vmw_user_shader_free); 771 389 if (unlikely(ret != 0)) 772 390 goto out; ··· 833 449 * From here on, the destructor takes over resource freeing. 834 450 */ 835 451 ret = vmw_gb_shader_init(dev_priv, res, shader_size, 836 - offset, shader_type, buffer, 452 + offset, shader_type, 0, 0, buffer, 837 453 vmw_shader_free); 838 454 839 455 out_err: ··· 841 457 } 842 458 843 459 844 - int vmw_shader_define_ioctl(struct drm_device *dev, void *data, 845 - struct drm_file *file_priv) 460 + static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv, 461 + enum drm_vmw_shader_type shader_type_drm, 462 + u32 buffer_handle, size_t size, size_t offset, 463 + uint8_t num_input_sig, uint8_t num_output_sig, 464 + uint32_t *shader_handle) 846 465 { 847 466 struct vmw_private *dev_priv = vmw_priv(dev); 848 - struct drm_vmw_shader_create_arg *arg = 849 - (struct drm_vmw_shader_create_arg *)data; 850 467 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 851 468 struct vmw_dma_buffer *buffer = NULL; 852 469 SVGA3dShaderType shader_type; 853 470 int ret; 854 471 855 - if (arg->buffer_handle != SVGA3D_INVALID_ID) { 856 - ret = vmw_user_dmabuf_lookup(tfile, arg->buffer_handle, 472 + if (buffer_handle != SVGA3D_INVALID_ID) { 473 + ret = vmw_user_dmabuf_lookup(tfile, buffer_handle, 857 474 &buffer); 858 475 if (unlikely(ret != 0)) { 859 476 DRM_ERROR("Could not find buffer for shader " ··· 863 478 } 864 479 865 480 if ((u64)buffer->base.num_pages * PAGE_SIZE < 866 - (u64)arg->size + (u64)arg->offset) { 481 + (u64)size + (u64)offset) { 867 482 DRM_ERROR("Illegal buffer- or shader size.\n"); 868 483 ret = -EINVAL; 869 484 goto out_bad_arg; 870 485 } 871 486 } 872 487 873 - switch (arg->shader_type) { 488 + switch (shader_type_drm) { 874 489 case drm_vmw_shader_type_vs: 875 490 shader_type = SVGA3D_SHADERTYPE_VS; 876 491 break; 877 492 case drm_vmw_shader_type_ps: 878 493 shader_type = SVGA3D_SHADERTYPE_PS; 879 - break; 880 - case drm_vmw_shader_type_gs: 881 - shader_type = SVGA3D_SHADERTYPE_GS; 882 494 break; 883 495 default: 884 496 DRM_ERROR("Illegal shader type.\n"); ··· 887 505 if (unlikely(ret != 0)) 888 506 goto out_bad_arg; 889 507 890 - ret = vmw_user_shader_alloc(dev_priv, buffer, arg->size, arg->offset, 891 - shader_type, tfile, &arg->shader_handle); 508 + ret = vmw_user_shader_alloc(dev_priv, buffer, size, offset, 509 + shader_type, num_input_sig, 510 + num_output_sig, tfile, shader_handle); 892 511 893 512 ttm_read_unlock(&dev_priv->reservation_sem); 894 513 out_bad_arg: ··· 898 515 } 899 516 900 517 /** 901 - * vmw_compat_shader_id_ok - Check whether a compat shader user key and 518 + * vmw_shader_id_ok - Check whether a compat shader user key and 902 519 * shader type are within valid bounds. 903 520 * 904 521 * @user_key: User space id of the shader. ··· 906 523 * 907 524 * Returns true if valid false if not. 908 525 */ 909 - static bool vmw_compat_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type) 526 + static bool vmw_shader_id_ok(u32 user_key, SVGA3dShaderType shader_type) 910 527 { 911 528 return user_key <= ((1 << 20) - 1) && (unsigned) shader_type < 16; 912 529 } 913 530 914 531 /** 915 - * vmw_compat_shader_key - Compute a hash key suitable for a compat shader. 532 + * vmw_shader_key - Compute a hash key suitable for a compat shader. 916 533 * 917 534 * @user_key: User space id of the shader. 918 535 * @shader_type: Shader type. ··· 920 537 * Returns a hash key suitable for a command buffer managed resource 921 538 * manager hash table. 922 539 */ 923 - static u32 vmw_compat_shader_key(u32 user_key, SVGA3dShaderType shader_type) 540 + static u32 vmw_shader_key(u32 user_key, SVGA3dShaderType shader_type) 924 541 { 925 542 return user_key | (shader_type << 20); 926 543 } 927 544 928 545 /** 929 - * vmw_compat_shader_remove - Stage a compat shader for removal. 546 + * vmw_shader_remove - Stage a compat shader for removal. 930 547 * 931 548 * @man: Pointer to the compat shader manager identifying the shader namespace. 932 549 * @user_key: The key that is used to identify the shader. The key is ··· 934 551 * @shader_type: Shader type. 935 552 * @list: Caller's list of staged command buffer resource actions. 936 553 */ 937 - int vmw_compat_shader_remove(struct vmw_cmdbuf_res_manager *man, 938 - u32 user_key, SVGA3dShaderType shader_type, 939 - struct list_head *list) 554 + int vmw_shader_remove(struct vmw_cmdbuf_res_manager *man, 555 + u32 user_key, SVGA3dShaderType shader_type, 556 + struct list_head *list) 940 557 { 941 - if (!vmw_compat_shader_id_ok(user_key, shader_type)) 558 + struct vmw_resource *dummy; 559 + 560 + if (!vmw_shader_id_ok(user_key, shader_type)) 942 561 return -EINVAL; 943 562 944 - return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_compat_shader, 945 - vmw_compat_shader_key(user_key, 946 - shader_type), 947 - list); 563 + return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_shader, 564 + vmw_shader_key(user_key, shader_type), 565 + list, &dummy); 948 566 } 949 567 950 568 /** ··· 975 591 int ret; 976 592 struct vmw_resource *res; 977 593 978 - if (!vmw_compat_shader_id_ok(user_key, shader_type)) 594 + if (!vmw_shader_id_ok(user_key, shader_type)) 979 595 return -EINVAL; 980 596 981 597 /* Allocate and pin a DMA buffer */ ··· 1012 628 if (unlikely(ret != 0)) 1013 629 goto no_reserve; 1014 630 1015 - ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_compat_shader, 1016 - vmw_compat_shader_key(user_key, shader_type), 631 + ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_shader, 632 + vmw_shader_key(user_key, shader_type), 1017 633 res, list); 1018 634 vmw_resource_unreference(&res); 1019 635 no_reserve: ··· 1023 639 } 1024 640 1025 641 /** 1026 - * vmw_compat_shader_lookup - Look up a compat shader 642 + * vmw_shader_lookup - Look up a compat shader 1027 643 * 1028 644 * @man: Pointer to the command buffer managed resource manager identifying 1029 645 * the shader namespace. ··· 1034 650 * found. An error pointer otherwise. 1035 651 */ 1036 652 struct vmw_resource * 1037 - vmw_compat_shader_lookup(struct vmw_cmdbuf_res_manager *man, 1038 - u32 user_key, 1039 - SVGA3dShaderType shader_type) 653 + vmw_shader_lookup(struct vmw_cmdbuf_res_manager *man, 654 + u32 user_key, 655 + SVGA3dShaderType shader_type) 1040 656 { 1041 - if (!vmw_compat_shader_id_ok(user_key, shader_type)) 657 + if (!vmw_shader_id_ok(user_key, shader_type)) 1042 658 return ERR_PTR(-EINVAL); 1043 659 1044 - return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_compat_shader, 1045 - vmw_compat_shader_key(user_key, 1046 - shader_type)); 660 + return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_shader, 661 + vmw_shader_key(user_key, shader_type)); 662 + } 663 + 664 + int vmw_shader_define_ioctl(struct drm_device *dev, void *data, 665 + struct drm_file *file_priv) 666 + { 667 + struct drm_vmw_shader_create_arg *arg = 668 + (struct drm_vmw_shader_create_arg *)data; 669 + 670 + return vmw_shader_define(dev, file_priv, arg->shader_type, 671 + arg->buffer_handle, 672 + arg->size, arg->offset, 673 + 0, 0, 674 + &arg->shader_handle); 1047 675 }
+555
drivers/gpu/drm/vmwgfx/vmwgfx_so.c
··· 1 + /************************************************************************** 2 + * Copyright © 2014 VMware, Inc., Palo Alto, CA., USA 3 + * All Rights Reserved. 4 + * 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the 7 + * "Software"), to deal in the Software without restriction, including 8 + * without limitation the rights to use, copy, modify, merge, publish, 9 + * distribute, sub license, and/or sell copies of the Software, and to 10 + * permit persons to whom the Software is furnished to do so, subject to 11 + * the following conditions: 12 + * 13 + * The above copyright notice and this permission notice (including the 14 + * next paragraph) shall be included in all copies or substantial portions 15 + * of the Software. 16 + * 17 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 20 + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 21 + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 22 + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 23 + * USE OR OTHER DEALINGS IN THE SOFTWARE. 24 + * 25 + **************************************************************************/ 26 + 27 + #include "vmwgfx_drv.h" 28 + #include "vmwgfx_resource_priv.h" 29 + #include "vmwgfx_so.h" 30 + #include "vmwgfx_binding.h" 31 + 32 + /* 33 + * The currently only reason we need to keep track of views is that if we 34 + * destroy a hardware surface, all views pointing to it must also be destroyed, 35 + * otherwise the device will error. 36 + * So in particuar if a surface is evicted, we must destroy all views pointing 37 + * to it, and all context bindings of that view. Similarly we must restore 38 + * the view bindings, views and surfaces pointed to by the views when a 39 + * context is referenced in the command stream. 40 + */ 41 + 42 + /** 43 + * struct vmw_view - view metadata 44 + * 45 + * @res: The struct vmw_resource we derive from 46 + * @ctx: Non-refcounted pointer to the context this view belongs to. 47 + * @srf: Refcounted pointer to the surface pointed to by this view. 48 + * @cotable: Refcounted pointer to the cotable holding this view. 49 + * @srf_head: List head for the surface-to-view list. 50 + * @cotable_head: List head for the cotable-to_view list. 51 + * @view_type: View type. 52 + * @view_id: User-space per context view id. Currently used also as per 53 + * context device view id. 54 + * @cmd_size: Size of the SVGA3D define view command that we've copied from the 55 + * command stream. 56 + * @committed: Whether the view is actually created or pending creation at the 57 + * device level. 58 + * @cmd: The SVGA3D define view command copied from the command stream. 59 + */ 60 + struct vmw_view { 61 + struct rcu_head rcu; 62 + struct vmw_resource res; 63 + struct vmw_resource *ctx; /* Immutable */ 64 + struct vmw_resource *srf; /* Immutable */ 65 + struct vmw_resource *cotable; /* Immutable */ 66 + struct list_head srf_head; /* Protected by binding_mutex */ 67 + struct list_head cotable_head; /* Protected by binding_mutex */ 68 + unsigned view_type; /* Immutable */ 69 + unsigned view_id; /* Immutable */ 70 + u32 cmd_size; /* Immutable */ 71 + bool committed; /* Protected by binding_mutex */ 72 + u32 cmd[1]; /* Immutable */ 73 + }; 74 + 75 + static int vmw_view_create(struct vmw_resource *res); 76 + static int vmw_view_destroy(struct vmw_resource *res); 77 + static void vmw_hw_view_destroy(struct vmw_resource *res); 78 + static void vmw_view_commit_notify(struct vmw_resource *res, 79 + enum vmw_cmdbuf_res_state state); 80 + 81 + static const struct vmw_res_func vmw_view_func = { 82 + .res_type = vmw_res_view, 83 + .needs_backup = false, 84 + .may_evict = false, 85 + .type_name = "DX view", 86 + .backup_placement = NULL, 87 + .create = vmw_view_create, 88 + .commit_notify = vmw_view_commit_notify, 89 + }; 90 + 91 + /** 92 + * struct vmw_view - view define command body stub 93 + * 94 + * @view_id: The device id of the view being defined 95 + * @sid: The surface id of the view being defined 96 + * 97 + * This generic struct is used by the code to change @view_id and @sid of a 98 + * saved view define command. 99 + */ 100 + struct vmw_view_define { 101 + uint32 view_id; 102 + uint32 sid; 103 + }; 104 + 105 + /** 106 + * vmw_view - Convert a struct vmw_resource to a struct vmw_view 107 + * 108 + * @res: Pointer to the resource to convert. 109 + * 110 + * Returns a pointer to a struct vmw_view. 111 + */ 112 + static struct vmw_view *vmw_view(struct vmw_resource *res) 113 + { 114 + return container_of(res, struct vmw_view, res); 115 + } 116 + 117 + /** 118 + * vmw_view_commit_notify - Notify that a view operation has been committed to 119 + * hardware from a user-supplied command stream. 120 + * 121 + * @res: Pointer to the view resource. 122 + * @state: Indicating whether a creation or removal has been committed. 123 + * 124 + */ 125 + static void vmw_view_commit_notify(struct vmw_resource *res, 126 + enum vmw_cmdbuf_res_state state) 127 + { 128 + struct vmw_view *view = vmw_view(res); 129 + struct vmw_private *dev_priv = res->dev_priv; 130 + 131 + mutex_lock(&dev_priv->binding_mutex); 132 + if (state == VMW_CMDBUF_RES_ADD) { 133 + struct vmw_surface *srf = vmw_res_to_srf(view->srf); 134 + 135 + list_add_tail(&view->srf_head, &srf->view_list); 136 + vmw_cotable_add_resource(view->cotable, &view->cotable_head); 137 + view->committed = true; 138 + res->id = view->view_id; 139 + 140 + } else { 141 + list_del_init(&view->cotable_head); 142 + list_del_init(&view->srf_head); 143 + view->committed = false; 144 + res->id = -1; 145 + } 146 + mutex_unlock(&dev_priv->binding_mutex); 147 + } 148 + 149 + /** 150 + * vmw_view_create - Create a hardware view. 151 + * 152 + * @res: Pointer to the view resource. 153 + * 154 + * Create a hardware view. Typically used if that view has previously been 155 + * destroyed by an eviction operation. 156 + */ 157 + static int vmw_view_create(struct vmw_resource *res) 158 + { 159 + struct vmw_view *view = vmw_view(res); 160 + struct vmw_surface *srf = vmw_res_to_srf(view->srf); 161 + struct vmw_private *dev_priv = res->dev_priv; 162 + struct { 163 + SVGA3dCmdHeader header; 164 + struct vmw_view_define body; 165 + } *cmd; 166 + 167 + mutex_lock(&dev_priv->binding_mutex); 168 + if (!view->committed) { 169 + mutex_unlock(&dev_priv->binding_mutex); 170 + return 0; 171 + } 172 + 173 + cmd = vmw_fifo_reserve_dx(res->dev_priv, view->cmd_size, 174 + view->ctx->id); 175 + if (!cmd) { 176 + DRM_ERROR("Failed reserving FIFO space for view creation.\n"); 177 + mutex_unlock(&dev_priv->binding_mutex); 178 + return -ENOMEM; 179 + } 180 + memcpy(cmd, &view->cmd, view->cmd_size); 181 + WARN_ON(cmd->body.view_id != view->view_id); 182 + /* Sid may have changed due to surface eviction. */ 183 + WARN_ON(view->srf->id == SVGA3D_INVALID_ID); 184 + cmd->body.sid = view->srf->id; 185 + vmw_fifo_commit(res->dev_priv, view->cmd_size); 186 + res->id = view->view_id; 187 + list_add_tail(&view->srf_head, &srf->view_list); 188 + vmw_cotable_add_resource(view->cotable, &view->cotable_head); 189 + mutex_unlock(&dev_priv->binding_mutex); 190 + 191 + return 0; 192 + } 193 + 194 + /** 195 + * vmw_view_destroy - Destroy a hardware view. 196 + * 197 + * @res: Pointer to the view resource. 198 + * 199 + * Destroy a hardware view. Typically used on unexpected termination of the 200 + * owning process or if the surface the view is pointing to is destroyed. 201 + */ 202 + static int vmw_view_destroy(struct vmw_resource *res) 203 + { 204 + struct vmw_private *dev_priv = res->dev_priv; 205 + struct vmw_view *view = vmw_view(res); 206 + struct { 207 + SVGA3dCmdHeader header; 208 + union vmw_view_destroy body; 209 + } *cmd; 210 + 211 + WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex)); 212 + vmw_binding_res_list_scrub(&res->binding_head); 213 + 214 + if (!view->committed || res->id == -1) 215 + return 0; 216 + 217 + cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), view->ctx->id); 218 + if (!cmd) { 219 + DRM_ERROR("Failed reserving FIFO space for view " 220 + "destruction.\n"); 221 + return -ENOMEM; 222 + } 223 + 224 + cmd->header.id = vmw_view_destroy_cmds[view->view_type]; 225 + cmd->header.size = sizeof(cmd->body); 226 + cmd->body.view_id = view->view_id; 227 + vmw_fifo_commit(dev_priv, sizeof(*cmd)); 228 + res->id = -1; 229 + list_del_init(&view->cotable_head); 230 + list_del_init(&view->srf_head); 231 + 232 + return 0; 233 + } 234 + 235 + /** 236 + * vmw_hw_view_destroy - Destroy a hardware view as part of resource cleanup. 237 + * 238 + * @res: Pointer to the view resource. 239 + * 240 + * Destroy a hardware view if it's still present. 241 + */ 242 + static void vmw_hw_view_destroy(struct vmw_resource *res) 243 + { 244 + struct vmw_private *dev_priv = res->dev_priv; 245 + 246 + mutex_lock(&dev_priv->binding_mutex); 247 + WARN_ON(vmw_view_destroy(res)); 248 + res->id = -1; 249 + mutex_unlock(&dev_priv->binding_mutex); 250 + } 251 + 252 + /** 253 + * vmw_view_key - Compute a view key suitable for the cmdbuf resource manager 254 + * 255 + * @user_key: The user-space id used for the view. 256 + * @view_type: The view type. 257 + * 258 + * Destroy a hardware view if it's still present. 259 + */ 260 + static u32 vmw_view_key(u32 user_key, enum vmw_view_type view_type) 261 + { 262 + return user_key | (view_type << 20); 263 + } 264 + 265 + /** 266 + * vmw_view_id_ok - Basic view id and type range checks. 267 + * 268 + * @user_key: The user-space id used for the view. 269 + * @view_type: The view type. 270 + * 271 + * Checks that the view id and type (typically provided by user-space) is 272 + * valid. 273 + */ 274 + static bool vmw_view_id_ok(u32 user_key, enum vmw_view_type view_type) 275 + { 276 + return (user_key < SVGA_COTABLE_MAX_IDS && 277 + view_type < vmw_view_max); 278 + } 279 + 280 + /** 281 + * vmw_view_res_free - resource res_free callback for view resources 282 + * 283 + * @res: Pointer to a struct vmw_resource 284 + * 285 + * Frees memory and memory accounting held by a struct vmw_view. 286 + */ 287 + static void vmw_view_res_free(struct vmw_resource *res) 288 + { 289 + struct vmw_view *view = vmw_view(res); 290 + size_t size = offsetof(struct vmw_view, cmd) + view->cmd_size; 291 + struct vmw_private *dev_priv = res->dev_priv; 292 + 293 + vmw_resource_unreference(&view->cotable); 294 + vmw_resource_unreference(&view->srf); 295 + kfree_rcu(view, rcu); 296 + ttm_mem_global_free(vmw_mem_glob(dev_priv), size); 297 + } 298 + 299 + /** 300 + * vmw_view_add - Create a view resource and stage it for addition 301 + * as a command buffer managed resource. 302 + * 303 + * @man: Pointer to the compat shader manager identifying the shader namespace. 304 + * @ctx: Pointer to a struct vmw_resource identifying the active context. 305 + * @srf: Pointer to a struct vmw_resource identifying the surface the view 306 + * points to. 307 + * @view_type: The view type deduced from the view create command. 308 + * @user_key: The key that is used to identify the shader. The key is 309 + * unique to the view type and to the context. 310 + * @cmd: Pointer to the view create command in the command stream. 311 + * @cmd_size: Size of the view create command in the command stream. 312 + * @list: Caller's list of staged command buffer resource actions. 313 + */ 314 + int vmw_view_add(struct vmw_cmdbuf_res_manager *man, 315 + struct vmw_resource *ctx, 316 + struct vmw_resource *srf, 317 + enum vmw_view_type view_type, 318 + u32 user_key, 319 + const void *cmd, 320 + size_t cmd_size, 321 + struct list_head *list) 322 + { 323 + static const size_t vmw_view_define_sizes[] = { 324 + [vmw_view_sr] = sizeof(SVGA3dCmdDXDefineShaderResourceView), 325 + [vmw_view_rt] = sizeof(SVGA3dCmdDXDefineRenderTargetView), 326 + [vmw_view_ds] = sizeof(SVGA3dCmdDXDefineDepthStencilView) 327 + }; 328 + 329 + struct vmw_private *dev_priv = ctx->dev_priv; 330 + struct vmw_resource *res; 331 + struct vmw_view *view; 332 + size_t size; 333 + int ret; 334 + 335 + if (cmd_size != vmw_view_define_sizes[view_type] + 336 + sizeof(SVGA3dCmdHeader)) { 337 + DRM_ERROR("Illegal view create command size.\n"); 338 + return -EINVAL; 339 + } 340 + 341 + if (!vmw_view_id_ok(user_key, view_type)) { 342 + DRM_ERROR("Illegal view add view id.\n"); 343 + return -EINVAL; 344 + } 345 + 346 + size = offsetof(struct vmw_view, cmd) + cmd_size; 347 + 348 + ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), size, false, true); 349 + if (ret) { 350 + if (ret != -ERESTARTSYS) 351 + DRM_ERROR("Out of graphics memory for view" 352 + " creation.\n"); 353 + return ret; 354 + } 355 + 356 + view = kmalloc(size, GFP_KERNEL); 357 + if (!view) { 358 + ttm_mem_global_free(vmw_mem_glob(dev_priv), size); 359 + return -ENOMEM; 360 + } 361 + 362 + res = &view->res; 363 + view->ctx = ctx; 364 + view->srf = vmw_resource_reference(srf); 365 + view->cotable = vmw_context_cotable(ctx, vmw_view_cotables[view_type]); 366 + view->view_type = view_type; 367 + view->view_id = user_key; 368 + view->cmd_size = cmd_size; 369 + view->committed = false; 370 + INIT_LIST_HEAD(&view->srf_head); 371 + INIT_LIST_HEAD(&view->cotable_head); 372 + memcpy(&view->cmd, cmd, cmd_size); 373 + ret = vmw_resource_init(dev_priv, res, true, 374 + vmw_view_res_free, &vmw_view_func); 375 + if (ret) 376 + goto out_resource_init; 377 + 378 + ret = vmw_cmdbuf_res_add(man, vmw_cmdbuf_res_view, 379 + vmw_view_key(user_key, view_type), 380 + res, list); 381 + if (ret) 382 + goto out_resource_init; 383 + 384 + res->id = view->view_id; 385 + vmw_resource_activate(res, vmw_hw_view_destroy); 386 + 387 + out_resource_init: 388 + vmw_resource_unreference(&res); 389 + 390 + return ret; 391 + } 392 + 393 + /** 394 + * vmw_view_remove - Stage a view for removal. 395 + * 396 + * @man: Pointer to the view manager identifying the shader namespace. 397 + * @user_key: The key that is used to identify the view. The key is 398 + * unique to the view type. 399 + * @view_type: View type 400 + * @list: Caller's list of staged command buffer resource actions. 401 + * @res_p: If the resource is in an already committed state, points to the 402 + * struct vmw_resource on successful return. The pointer will be 403 + * non ref-counted. 404 + */ 405 + int vmw_view_remove(struct vmw_cmdbuf_res_manager *man, 406 + u32 user_key, enum vmw_view_type view_type, 407 + struct list_head *list, 408 + struct vmw_resource **res_p) 409 + { 410 + if (!vmw_view_id_ok(user_key, view_type)) { 411 + DRM_ERROR("Illegal view remove view id.\n"); 412 + return -EINVAL; 413 + } 414 + 415 + return vmw_cmdbuf_res_remove(man, vmw_cmdbuf_res_view, 416 + vmw_view_key(user_key, view_type), 417 + list, res_p); 418 + } 419 + 420 + /** 421 + * vmw_view_cotable_list_destroy - Evict all views belonging to a cotable. 422 + * 423 + * @dev_priv: Pointer to a device private struct. 424 + * @list: List of views belonging to a cotable. 425 + * @readback: Unused. Needed for function interface only. 426 + * 427 + * This function evicts all views belonging to a cotable. 428 + * It must be called with the binding_mutex held, and the caller must hold 429 + * a reference to the view resource. This is typically called before the 430 + * cotable is paged out. 431 + */ 432 + void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv, 433 + struct list_head *list, 434 + bool readback) 435 + { 436 + struct vmw_view *entry, *next; 437 + 438 + WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex)); 439 + 440 + list_for_each_entry_safe(entry, next, list, cotable_head) 441 + WARN_ON(vmw_view_destroy(&entry->res)); 442 + } 443 + 444 + /** 445 + * vmw_view_surface_list_destroy - Evict all views pointing to a surface 446 + * 447 + * @dev_priv: Pointer to a device private struct. 448 + * @list: List of views pointing to a surface. 449 + * 450 + * This function evicts all views pointing to a surface. This is typically 451 + * called before the surface is evicted. 452 + */ 453 + void vmw_view_surface_list_destroy(struct vmw_private *dev_priv, 454 + struct list_head *list) 455 + { 456 + struct vmw_view *entry, *next; 457 + 458 + WARN_ON_ONCE(!mutex_is_locked(&dev_priv->binding_mutex)); 459 + 460 + list_for_each_entry_safe(entry, next, list, srf_head) 461 + WARN_ON(vmw_view_destroy(&entry->res)); 462 + } 463 + 464 + /** 465 + * vmw_view_srf - Return a non-refcounted pointer to the surface a view is 466 + * pointing to. 467 + * 468 + * @res: pointer to a view resource. 469 + * 470 + * Note that the view itself is holding a reference, so as long 471 + * the view resource is alive, the surface resource will be. 472 + */ 473 + struct vmw_resource *vmw_view_srf(struct vmw_resource *res) 474 + { 475 + return vmw_view(res)->srf; 476 + } 477 + 478 + /** 479 + * vmw_view_lookup - Look up a view. 480 + * 481 + * @man: The context's cmdbuf ref manager. 482 + * @view_type: The view type. 483 + * @user_key: The view user id. 484 + * 485 + * returns a refcounted pointer to a view or an error pointer if not found. 486 + */ 487 + struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man, 488 + enum vmw_view_type view_type, 489 + u32 user_key) 490 + { 491 + return vmw_cmdbuf_res_lookup(man, vmw_cmdbuf_res_view, 492 + vmw_view_key(user_key, view_type)); 493 + } 494 + 495 + const u32 vmw_view_destroy_cmds[] = { 496 + [vmw_view_sr] = SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW, 497 + [vmw_view_rt] = SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW, 498 + [vmw_view_ds] = SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW, 499 + }; 500 + 501 + const SVGACOTableType vmw_view_cotables[] = { 502 + [vmw_view_sr] = SVGA_COTABLE_SRVIEW, 503 + [vmw_view_rt] = SVGA_COTABLE_RTVIEW, 504 + [vmw_view_ds] = SVGA_COTABLE_DSVIEW, 505 + }; 506 + 507 + const SVGACOTableType vmw_so_cotables[] = { 508 + [vmw_so_el] = SVGA_COTABLE_ELEMENTLAYOUT, 509 + [vmw_so_bs] = SVGA_COTABLE_BLENDSTATE, 510 + [vmw_so_ds] = SVGA_COTABLE_DEPTHSTENCIL, 511 + [vmw_so_rs] = SVGA_COTABLE_RASTERIZERSTATE, 512 + [vmw_so_ss] = SVGA_COTABLE_SAMPLER, 513 + [vmw_so_so] = SVGA_COTABLE_STREAMOUTPUT 514 + }; 515 + 516 + 517 + /* To remove unused function warning */ 518 + static void vmw_so_build_asserts(void) __attribute__((used)); 519 + 520 + 521 + /* 522 + * This function is unused at run-time, and only used to dump various build 523 + * asserts important for code optimization assumptions. 524 + */ 525 + static void vmw_so_build_asserts(void) 526 + { 527 + /* Assert that our vmw_view_cmd_to_type() function is correct. */ 528 + BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW != 529 + SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 1); 530 + BUILD_BUG_ON(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW != 531 + SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 2); 532 + BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW != 533 + SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 3); 534 + BUILD_BUG_ON(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW != 535 + SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 4); 536 + BUILD_BUG_ON(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW != 537 + SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW + 5); 538 + 539 + /* Assert that our "one body fits all" assumption is valid */ 540 + BUILD_BUG_ON(sizeof(union vmw_view_destroy) != sizeof(u32)); 541 + 542 + /* Assert that the view key space can hold all view ids. */ 543 + BUILD_BUG_ON(SVGA_COTABLE_MAX_IDS >= ((1 << 20) - 1)); 544 + 545 + /* 546 + * Assert that the offset of sid in all view define commands 547 + * is what we assume it to be. 548 + */ 549 + BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) != 550 + offsetof(SVGA3dCmdDXDefineShaderResourceView, sid)); 551 + BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) != 552 + offsetof(SVGA3dCmdDXDefineRenderTargetView, sid)); 553 + BUILD_BUG_ON(offsetof(struct vmw_view_define, sid) != 554 + offsetof(SVGA3dCmdDXDefineDepthStencilView, sid)); 555 + }
+160
drivers/gpu/drm/vmwgfx/vmwgfx_so.h
··· 1 + /************************************************************************** 2 + * Copyright © 2014 VMware, Inc., Palo Alto, CA., USA 3 + * All Rights Reserved. 4 + * 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the 7 + * "Software"), to deal in the Software without restriction, including 8 + * without limitation the rights to use, copy, modify, merge, publish, 9 + * distribute, sub license, and/or sell copies of the Software, and to 10 + * permit persons to whom the Software is furnished to do so, subject to 11 + * the following conditions: 12 + * 13 + * The above copyright notice and this permission notice (including the 14 + * next paragraph) shall be included in all copies or substantial portions 15 + * of the Software. 16 + * 17 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 20 + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 21 + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 22 + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 23 + * USE OR OTHER DEALINGS IN THE SOFTWARE. 24 + * 25 + **************************************************************************/ 26 + #ifndef VMW_SO_H 27 + #define VMW_SO_H 28 + 29 + enum vmw_view_type { 30 + vmw_view_sr, 31 + vmw_view_rt, 32 + vmw_view_ds, 33 + vmw_view_max, 34 + }; 35 + 36 + enum vmw_so_type { 37 + vmw_so_el, 38 + vmw_so_bs, 39 + vmw_so_ds, 40 + vmw_so_rs, 41 + vmw_so_ss, 42 + vmw_so_so, 43 + vmw_so_max, 44 + }; 45 + 46 + /** 47 + * union vmw_view_destroy - view destruction command body 48 + * 49 + * @rtv: RenderTarget view destruction command body 50 + * @srv: ShaderResource view destruction command body 51 + * @dsv: DepthStencil view destruction command body 52 + * @view_id: A single u32 view id. 53 + * 54 + * The assumption here is that all union members are really represented by a 55 + * single u32 in the command stream. If that's not the case, 56 + * the size of this union will not equal the size of an u32, and the 57 + * assumption is invalid, and we detect that at compile time in the 58 + * vmw_so_build_asserts() function. 59 + */ 60 + union vmw_view_destroy { 61 + struct SVGA3dCmdDXDestroyRenderTargetView rtv; 62 + struct SVGA3dCmdDXDestroyShaderResourceView srv; 63 + struct SVGA3dCmdDXDestroyDepthStencilView dsv; 64 + u32 view_id; 65 + }; 66 + 67 + /* Map enum vmw_view_type to view destroy command ids*/ 68 + extern const u32 vmw_view_destroy_cmds[]; 69 + 70 + /* Map enum vmw_view_type to SVGACOTableType */ 71 + extern const SVGACOTableType vmw_view_cotables[]; 72 + 73 + /* Map enum vmw_so_type to SVGACOTableType */ 74 + extern const SVGACOTableType vmw_so_cotables[]; 75 + 76 + /* 77 + * vmw_view_cmd_to_type - Return the view type for a create or destroy command 78 + * 79 + * @id: The SVGA3D command id. 80 + * 81 + * For a given view create or destroy command id, return the corresponding 82 + * enum vmw_view_type. If the command is unknown, return vmw_view_max. 83 + * The validity of the simplified calculation is verified in the 84 + * vmw_so_build_asserts() function. 85 + */ 86 + static inline enum vmw_view_type vmw_view_cmd_to_type(u32 id) 87 + { 88 + u32 tmp = (id - SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW) / 2; 89 + 90 + if (tmp > (u32)vmw_view_max) 91 + return vmw_view_max; 92 + 93 + return (enum vmw_view_type) tmp; 94 + } 95 + 96 + /* 97 + * vmw_so_cmd_to_type - Return the state object type for a 98 + * create or destroy command 99 + * 100 + * @id: The SVGA3D command id. 101 + * 102 + * For a given state object create or destroy command id, 103 + * return the corresponding enum vmw_so_type. If the command is uknown, 104 + * return vmw_so_max. We should perhaps optimize this function using 105 + * a similar strategy as vmw_view_cmd_to_type(). 106 + */ 107 + static inline enum vmw_so_type vmw_so_cmd_to_type(u32 id) 108 + { 109 + switch (id) { 110 + case SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT: 111 + case SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT: 112 + return vmw_so_el; 113 + case SVGA_3D_CMD_DX_DEFINE_BLEND_STATE: 114 + case SVGA_3D_CMD_DX_DESTROY_BLEND_STATE: 115 + return vmw_so_bs; 116 + case SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE: 117 + case SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE: 118 + return vmw_so_ds; 119 + case SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE: 120 + case SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE: 121 + return vmw_so_rs; 122 + case SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE: 123 + case SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE: 124 + return vmw_so_ss; 125 + case SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT: 126 + case SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT: 127 + return vmw_so_so; 128 + default: 129 + break; 130 + } 131 + return vmw_so_max; 132 + } 133 + 134 + /* 135 + * View management - vmwgfx_so.c 136 + */ 137 + extern int vmw_view_add(struct vmw_cmdbuf_res_manager *man, 138 + struct vmw_resource *ctx, 139 + struct vmw_resource *srf, 140 + enum vmw_view_type view_type, 141 + u32 user_key, 142 + const void *cmd, 143 + size_t cmd_size, 144 + struct list_head *list); 145 + 146 + extern int vmw_view_remove(struct vmw_cmdbuf_res_manager *man, 147 + u32 user_key, enum vmw_view_type view_type, 148 + struct list_head *list, 149 + struct vmw_resource **res_p); 150 + 151 + extern void vmw_view_surface_list_destroy(struct vmw_private *dev_priv, 152 + struct list_head *view_list); 153 + extern void vmw_view_cotable_list_destroy(struct vmw_private *dev_priv, 154 + struct list_head *list, 155 + bool readback); 156 + extern struct vmw_resource *vmw_view_srf(struct vmw_resource *res); 157 + extern struct vmw_resource *vmw_view_lookup(struct vmw_cmdbuf_res_manager *man, 158 + enum vmw_view_type view_type, 159 + u32 user_key); 160 + #endif
+1
drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
··· 561 561 true, /* a scanout buffer */ 562 562 content_srf.mip_levels[0], 563 563 content_srf.multisample_count, 564 + 0, 564 565 display_base_size, 565 566 &display_srf); 566 567 if (unlikely(ret != 0)) {
+87 -22
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
··· 27 27 28 28 #include "vmwgfx_drv.h" 29 29 #include "vmwgfx_resource_priv.h" 30 + #include "vmwgfx_so.h" 31 + #include "vmwgfx_binding.h" 30 32 #include <ttm/ttm_placement.h> 31 33 #include "device_include/svga3d_surfacedefs.h" 34 + 32 35 33 36 /** 34 37 * struct vmw_user_surface - User-space visible surface resource ··· 596 593 * surface validate. 597 594 */ 598 595 596 + INIT_LIST_HEAD(&srf->view_list); 599 597 vmw_resource_activate(res, vmw_hw_surface_destroy); 600 598 return ret; 601 599 } ··· 727 723 desc = svga3dsurface_get_desc(req->format); 728 724 if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) { 729 725 DRM_ERROR("Invalid surface format for surface creation.\n"); 726 + DRM_ERROR("Format requested is: %d\n", req->format); 730 727 return -EINVAL; 731 728 } 732 729 ··· 1023 1018 { 1024 1019 struct vmw_private *dev_priv = res->dev_priv; 1025 1020 struct vmw_surface *srf = vmw_res_to_srf(res); 1026 - uint32_t cmd_len, submit_len; 1021 + uint32_t cmd_len, cmd_id, submit_len; 1027 1022 int ret; 1028 1023 struct { 1029 1024 SVGA3dCmdHeader header; 1030 1025 SVGA3dCmdDefineGBSurface body; 1031 1026 } *cmd; 1027 + struct { 1028 + SVGA3dCmdHeader header; 1029 + SVGA3dCmdDefineGBSurface_v2 body; 1030 + } *cmd2; 1032 1031 1033 1032 if (likely(res->id != -1)) 1034 1033 return 0; ··· 1049 1040 goto out_no_fifo; 1050 1041 } 1051 1042 1052 - cmd_len = sizeof(cmd->body); 1053 - submit_len = sizeof(*cmd); 1043 + if (srf->array_size > 0) { 1044 + /* has_dx checked on creation time. */ 1045 + cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE_V2; 1046 + cmd_len = sizeof(cmd2->body); 1047 + submit_len = sizeof(*cmd2); 1048 + } else { 1049 + cmd_id = SVGA_3D_CMD_DEFINE_GB_SURFACE; 1050 + cmd_len = sizeof(cmd->body); 1051 + submit_len = sizeof(*cmd); 1052 + } 1053 + 1054 1054 cmd = vmw_fifo_reserve(dev_priv, submit_len); 1055 + cmd2 = (typeof(cmd2))cmd; 1055 1056 if (unlikely(cmd == NULL)) { 1056 1057 DRM_ERROR("Failed reserving FIFO space for surface " 1057 1058 "creation.\n"); ··· 1069 1050 goto out_no_fifo; 1070 1051 } 1071 1052 1072 - cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SURFACE; 1073 - cmd->header.size = cmd_len; 1074 - cmd->body.sid = srf->res.id; 1075 - cmd->body.surfaceFlags = srf->flags; 1076 - cmd->body.format = srf->format; 1077 - cmd->body.numMipLevels = srf->mip_levels[0]; 1078 - cmd->body.multisampleCount = srf->multisample_count; 1079 - cmd->body.autogenFilter = srf->autogen_filter; 1080 - cmd->body.size.width = srf->base_size.width; 1081 - cmd->body.size.height = srf->base_size.height; 1082 - cmd->body.size.depth = srf->base_size.depth; 1053 + if (srf->array_size > 0) { 1054 + cmd2->header.id = cmd_id; 1055 + cmd2->header.size = cmd_len; 1056 + cmd2->body.sid = srf->res.id; 1057 + cmd2->body.surfaceFlags = srf->flags; 1058 + cmd2->body.format = cpu_to_le32(srf->format); 1059 + cmd2->body.numMipLevels = srf->mip_levels[0]; 1060 + cmd2->body.multisampleCount = srf->multisample_count; 1061 + cmd2->body.autogenFilter = srf->autogen_filter; 1062 + cmd2->body.size.width = srf->base_size.width; 1063 + cmd2->body.size.height = srf->base_size.height; 1064 + cmd2->body.size.depth = srf->base_size.depth; 1065 + cmd2->body.arraySize = srf->array_size; 1066 + } else { 1067 + cmd->header.id = cmd_id; 1068 + cmd->header.size = cmd_len; 1069 + cmd->body.sid = srf->res.id; 1070 + cmd->body.surfaceFlags = srf->flags; 1071 + cmd->body.format = cpu_to_le32(srf->format); 1072 + cmd->body.numMipLevels = srf->mip_levels[0]; 1073 + cmd->body.multisampleCount = srf->multisample_count; 1074 + cmd->body.autogenFilter = srf->autogen_filter; 1075 + cmd->body.size.width = srf->base_size.width; 1076 + cmd->body.size.height = srf->base_size.height; 1077 + cmd->body.size.depth = srf->base_size.depth; 1078 + } 1079 + 1083 1080 vmw_fifo_commit(dev_priv, submit_len); 1084 1081 1085 1082 return 0; ··· 1223 1188 static int vmw_gb_surface_destroy(struct vmw_resource *res) 1224 1189 { 1225 1190 struct vmw_private *dev_priv = res->dev_priv; 1191 + struct vmw_surface *srf = vmw_res_to_srf(res); 1226 1192 struct { 1227 1193 SVGA3dCmdHeader header; 1228 1194 SVGA3dCmdDestroyGBSurface body; ··· 1233 1197 return 0; 1234 1198 1235 1199 mutex_lock(&dev_priv->binding_mutex); 1236 - vmw_context_binding_res_list_scrub(&res->binding_head); 1200 + vmw_view_surface_list_destroy(dev_priv, &srf->view_list); 1201 + vmw_binding_res_list_scrub(&res->binding_head); 1237 1202 1238 1203 cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); 1239 1204 if (unlikely(cmd == NULL)) { ··· 1296 1259 req->drm_surface_flags & drm_vmw_surface_flag_scanout, 1297 1260 req->mip_levels, 1298 1261 req->multisample_count, 1262 + req->array_size, 1299 1263 req->base_size, 1300 1264 &srf); 1301 1265 if (unlikely(ret != 0)) ··· 1313 1275 res = &user_srf->srf.res; 1314 1276 1315 1277 1316 - if (req->buffer_handle != SVGA3D_INVALID_ID) 1278 + if (req->buffer_handle != SVGA3D_INVALID_ID) { 1317 1279 ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, 1318 1280 &res->backup); 1319 - else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer) 1281 + if (ret == 0 && res->backup->base.num_pages * PAGE_SIZE < 1282 + res->backup_size) { 1283 + DRM_ERROR("Surface backup buffer is too small.\n"); 1284 + vmw_dmabuf_unreference(&res->backup); 1285 + ret = -EINVAL; 1286 + goto out_unlock; 1287 + } 1288 + } else if (req->drm_surface_flags & drm_vmw_surface_flag_create_buffer) 1320 1289 ret = vmw_user_dmabuf_alloc(dev_priv, tfile, 1321 1290 res->backup_size, 1322 1291 req->drm_surface_flags & ··· 1423 1378 rep->creq.drm_surface_flags = 0; 1424 1379 rep->creq.multisample_count = srf->multisample_count; 1425 1380 rep->creq.autogen_filter = srf->autogen_filter; 1381 + rep->creq.array_size = srf->array_size; 1426 1382 rep->creq.buffer_handle = backup_handle; 1427 1383 rep->creq.base_size = srf->base_size; 1428 1384 rep->crep.handle = user_srf->prime.base.hash.key; ··· 1450 1404 * @for_scanout: true if inteded to be used for scanout buffer 1451 1405 * @num_mip_levels: number of MIP levels 1452 1406 * @multisample_count: 1407 + * @array_size: Surface array size. 1453 1408 * @size: width, heigh, depth of the surface requested 1454 1409 * @user_srf_out: allocated user_srf. Set to NULL on failure. 1455 1410 * ··· 1466 1419 bool for_scanout, 1467 1420 uint32_t num_mip_levels, 1468 1421 uint32_t multisample_count, 1422 + uint32_t array_size, 1469 1423 struct drm_vmw_size size, 1470 1424 struct vmw_surface **srf_out) 1471 1425 { ··· 1474 1426 struct vmw_user_surface *user_srf; 1475 1427 struct vmw_surface *srf; 1476 1428 int ret; 1477 - 1429 + u32 num_layers; 1478 1430 1479 1431 *srf_out = NULL; 1480 1432 ··· 1491 1443 DRM_ERROR("Invalid surface format.\n"); 1492 1444 return -EINVAL; 1493 1445 } 1446 + } 1447 + 1448 + /* array_size must be null for non-GL3 host. */ 1449 + if (array_size > 0 && !dev_priv->has_dx) { 1450 + DRM_ERROR("Tried to create DX surface on non-DX host.\n"); 1451 + return -EINVAL; 1494 1452 } 1495 1453 1496 1454 ret = ttm_read_lock(&dev_priv->reservation_sem, true); ··· 1535 1481 srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; 1536 1482 srf->multisample_count = multisample_count; 1537 1483 1538 - srf->res.backup_size = svga3dsurface_get_serialized_size(srf->format, 1539 - srf->base_size, 1540 - srf->mip_levels[0], 1541 - srf->flags & SVGA3D_SURFACE_CUBEMAP); 1484 + if (array_size) 1485 + num_layers = array_size; 1486 + else if (svga3d_flags & SVGA3D_SURFACE_CUBEMAP) 1487 + num_layers = SVGA3D_MAX_SURFACE_FACES; 1488 + else 1489 + num_layers = 1; 1490 + 1491 + srf->res.backup_size = 1492 + svga3dsurface_get_serialized_size(srf->format, 1493 + srf->base_size, 1494 + srf->mip_levels[0], 1495 + num_layers); 1496 + 1497 + if (srf->flags & SVGA3D_SURFACE_BIND_STREAM_OUTPUT) 1498 + srf->res.backup_size += sizeof(SVGA3dDXSOState); 1542 1499 1543 1500 if (dev_priv->active_display_unit == vmw_du_screen_target && 1544 1501 for_scanout)
+32 -3
include/uapi/drm/vmwgfx_drm.h
··· 64 64 #define DRM_VMW_GB_SURFACE_CREATE 23 65 65 #define DRM_VMW_GB_SURFACE_REF 24 66 66 #define DRM_VMW_SYNCCPU 25 67 + #define DRM_VMW_CREATE_EXTENDED_CONTEXT 26 67 68 68 69 /*************************************************************************/ 69 70 /** ··· 90 89 #define DRM_VMW_PARAM_MAX_MOB_MEMORY 9 91 90 #define DRM_VMW_PARAM_MAX_MOB_SIZE 10 92 91 #define DRM_VMW_PARAM_SCREEN_TARGET 11 92 + #define DRM_VMW_PARAM_DX 12 93 93 94 94 /** 95 95 * enum drm_vmw_handle_type - handle type for ref ioctls ··· 299 297 * Argument to the DRM_VMW_EXECBUF Ioctl. 300 298 */ 301 299 302 - #define DRM_VMW_EXECBUF_VERSION 1 300 + #define DRM_VMW_EXECBUF_VERSION 2 303 301 304 302 struct drm_vmw_execbuf_arg { 305 303 uint64_t commands; ··· 308 306 uint64_t fence_rep; 309 307 uint32_t version; 310 308 uint32_t flags; 309 + uint32_t context_handle; 310 + uint32_t pad64; 311 311 }; 312 312 313 313 /** ··· 830 826 enum drm_vmw_shader_type { 831 827 drm_vmw_shader_type_vs = 0, 832 828 drm_vmw_shader_type_ps, 833 - drm_vmw_shader_type_gs 834 829 }; 835 830 836 831 ··· 911 908 * @buffer_handle Buffer handle of backup buffer. SVGA3D_INVALID_ID 912 909 * if none. 913 910 * @base_size Size of the base mip level for all faces. 911 + * @array_size Must be zero for non-DX hardware, and if non-zero 912 + * svga3d_flags must have proper bind flags setup. 914 913 * 915 914 * Input argument to the DRM_VMW_GB_SURFACE_CREATE Ioctl. 916 915 * Part of output argument for the DRM_VMW_GB_SURFACE_REF Ioctl. ··· 925 920 uint32_t multisample_count; 926 921 uint32_t autogen_filter; 927 922 uint32_t buffer_handle; 928 - uint32_t pad64; 923 + uint32_t array_size; 929 924 struct drm_vmw_size base_size; 930 925 }; 931 926 ··· 1065 1060 uint32_t pad64; 1066 1061 }; 1067 1062 1063 + /*************************************************************************/ 1064 + /** 1065 + * DRM_VMW_CREATE_EXTENDED_CONTEXT - Create a host context. 1066 + * 1067 + * Allocates a device unique context id, and queues a create context command 1068 + * for the host. Does not wait for host completion. 1069 + */ 1070 + enum drm_vmw_extended_context { 1071 + drm_vmw_context_legacy, 1072 + drm_vmw_context_dx 1073 + }; 1074 + 1075 + /** 1076 + * union drm_vmw_extended_context_arg 1077 + * 1078 + * @req: Context type. 1079 + * @rep: Context identifier. 1080 + * 1081 + * Argument to the DRM_VMW_CREATE_EXTENDED_CONTEXT Ioctl. 1082 + */ 1083 + union drm_vmw_extended_context_arg { 1084 + enum drm_vmw_extended_context req; 1085 + struct drm_vmw_context_arg rep; 1086 + }; 1068 1087 #endif