Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'gvt-fixes-2017-01-25' of https://github.com/01org/gvt-linux into drm-intel-fixes

gvt-fixes-2017-01-25

- re-enable shadow batch buffer for security that was falsely turned off.
- kvmgt/mdev typo fix for correct ABI
- gvt mail list change

Signed-off-by: Jani Nikula <jani.nikula@intel.com>

+25 -57
+1 -1
MAINTAINERS
··· 4153 4153 INTEL GVT-g DRIVERS (Intel GPU Virtualization) 4154 4154 M: Zhenyu Wang <zhenyuw@linux.intel.com> 4155 4155 M: Zhi Wang <zhi.a.wang@intel.com> 4156 - L: igvt-g-dev@lists.01.org 4156 + L: intel-gvt-dev@lists.freedesktop.org 4157 4157 L: intel-gfx@lists.freedesktop.org 4158 4158 W: https://01.org/igvt-g 4159 4159 T: git https://github.com/01org/gvt-linux.git
-4
drivers/gpu/drm/i915/gvt/cmd_parser.c
··· 481 481 (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2) 482 482 483 483 static unsigned long bypass_scan_mask = 0; 484 - static bool bypass_batch_buffer_scan = true; 485 484 486 485 /* ring ALL, type = 0 */ 487 486 static struct sub_op_bits sub_op_mi[] = { ··· 1523 1524 static int batch_buffer_needs_scan(struct parser_exec_state *s) 1524 1525 { 1525 1526 struct intel_gvt *gvt = s->vgpu->gvt; 1526 - 1527 - if (bypass_batch_buffer_scan) 1528 - return 0; 1529 1527 1530 1528 if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) { 1531 1529 /* BDW decides privilege based on address space */
+19 -47
drivers/gpu/drm/i915/gvt/execlist.c
··· 364 364 #define get_desc_from_elsp_dwords(ed, i) \ 365 365 ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2])) 366 366 367 - 368 - #define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2)) 369 - #define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U)) 370 - static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj, 371 - unsigned long add, int gmadr_bytes) 372 - { 373 - if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8)) 374 - return -1; 375 - 376 - *((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add & 377 - BATCH_BUFFER_ADDR_MASK; 378 - if (gmadr_bytes == 8) { 379 - *((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) = 380 - add & BATCH_BUFFER_ADDR_HIGH_MASK; 381 - } 382 - 383 - return 0; 384 - } 385 - 386 367 static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) 387 368 { 388 - int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd; 369 + const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd; 370 + struct intel_shadow_bb_entry *entry_obj; 389 371 390 372 /* pin the gem object to ggtt */ 391 - if (!list_empty(&workload->shadow_bb)) { 392 - struct intel_shadow_bb_entry *entry_obj = 393 - list_first_entry(&workload->shadow_bb, 394 - struct intel_shadow_bb_entry, 395 - list); 396 - struct intel_shadow_bb_entry *temp; 373 + list_for_each_entry(entry_obj, &workload->shadow_bb, list) { 374 + struct i915_vma *vma; 397 375 398 - list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb, 399 - list) { 400 - struct i915_vma *vma; 401 - 402 - vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 403 - 4, 0); 404 - if (IS_ERR(vma)) { 405 - gvt_err("Cannot pin\n"); 406 - return; 407 - } 408 - 409 - /* FIXME: we are not tracking our pinned VMA leaving it 410 - * up to the core to fix up the stray pin_count upon 411 - * free. 412 - */ 413 - 414 - /* update the relocate gma with shadow batch buffer*/ 415 - set_gma_to_bb_cmd(entry_obj, 416 - i915_ggtt_offset(vma), 417 - gmadr_bytes); 376 + vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0); 377 + if (IS_ERR(vma)) { 378 + gvt_err("Cannot pin\n"); 379 + return; 418 380 } 381 + 382 + /* FIXME: we are not tracking our pinned VMA leaving it 383 + * up to the core to fix up the stray pin_count upon 384 + * free. 385 + */ 386 + 387 + /* update the relocate gma with shadow batch buffer*/ 388 + entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma); 389 + if (gmadr_bytes == 8) 390 + entry_obj->bb_start_cmd_va[2] = 0; 419 391 } 420 392 } 421 393 ··· 798 826 INIT_LIST_HEAD(&vgpu->workload_q_head[i]); 799 827 } 800 828 801 - vgpu->workloads = kmem_cache_create("gvt-g vgpu workload", 829 + vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload", 802 830 sizeof(struct intel_vgpu_workload), 0, 803 831 SLAB_HWCACHE_ALIGN, 804 832 NULL);
+4 -4
drivers/gpu/drm/i915/gvt/kvmgt.c
··· 230 230 return NULL; 231 231 } 232 232 233 - static ssize_t available_instance_show(struct kobject *kobj, struct device *dev, 234 - char *buf) 233 + static ssize_t available_instances_show(struct kobject *kobj, 234 + struct device *dev, char *buf) 235 235 { 236 236 struct intel_vgpu_type *type; 237 237 unsigned int num = 0; ··· 269 269 type->fence); 270 270 } 271 271 272 - static MDEV_TYPE_ATTR_RO(available_instance); 272 + static MDEV_TYPE_ATTR_RO(available_instances); 273 273 static MDEV_TYPE_ATTR_RO(device_api); 274 274 static MDEV_TYPE_ATTR_RO(description); 275 275 276 276 static struct attribute *type_attrs[] = { 277 - &mdev_type_attr_available_instance.attr, 277 + &mdev_type_attr_available_instances.attr, 278 278 &mdev_type_attr_device_api.attr, 279 279 &mdev_type_attr_description.attr, 280 280 NULL,
+1 -1
drivers/gpu/drm/i915/gvt/scheduler.h
··· 113 113 struct drm_i915_gem_object *obj; 114 114 void *va; 115 115 unsigned long len; 116 - void *bb_start_cmd_va; 116 + u32 *bb_start_cmd_va; 117 117 }; 118 118 119 119 #define workload_q_head(vgpu, ring_id) \