Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'gvt-fixes-2016-12-26' of https://github.com/01org/gvt-linux into drm-intel-fixes

From Zhenyu, "This is current GVT-g device model fixes for 4.10. I need
to base on v4.10-rc1 for merged vfio and KVMGT support."

Signed-off-by: Jani Nikula <jani.nikula@intel.com>

+100 -14
+2 -2
drivers/gpu/drm/i915/gvt/cfg_space.c
··· 123 123 u8 changed = old ^ new; 124 124 int ret; 125 125 126 + memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes); 126 127 if (!(changed & PCI_COMMAND_MEMORY)) 127 128 return 0; 128 129 ··· 143 142 return ret; 144 143 } 145 144 146 - memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes); 147 145 return 0; 148 146 } 149 147 ··· 240 240 if (WARN_ON(bytes > 4)) 241 241 return -EINVAL; 242 242 243 - if (WARN_ON(offset + bytes >= INTEL_GVT_MAX_CFG_SPACE_SZ)) 243 + if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ)) 244 244 return -EINVAL; 245 245 246 246 /* First check if it's PCI_COMMAND */
+55
drivers/gpu/drm/i915/gvt/gtt.c
··· 1998 1998 INIT_LIST_HEAD(&gtt->oos_page_list_head); 1999 1999 INIT_LIST_HEAD(&gtt->post_shadow_list_head); 2000 2000 2001 + intel_vgpu_reset_ggtt(vgpu); 2002 + 2001 2003 ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT, 2002 2004 NULL, 1, 0); 2003 2005 if (IS_ERR(ggtt_mm)) { ··· 2208 2206 int intel_gvt_init_gtt(struct intel_gvt *gvt) 2209 2207 { 2210 2208 int ret; 2209 + void *page_addr; 2211 2210 2212 2211 gvt_dbg_core("init gtt\n"); 2213 2212 ··· 2219 2216 gvt->gtt.mm_free_page_table = gen8_mm_free_page_table; 2220 2217 } else { 2221 2218 return -ENODEV; 2219 + } 2220 + 2221 + gvt->gtt.scratch_ggtt_page = 2222 + alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO); 2223 + if (!gvt->gtt.scratch_ggtt_page) { 2224 + gvt_err("fail to allocate scratch ggtt page\n"); 2225 + return -ENOMEM; 2226 + } 2227 + 2228 + page_addr = page_address(gvt->gtt.scratch_ggtt_page); 2229 + 2230 + gvt->gtt.scratch_ggtt_mfn = 2231 + intel_gvt_hypervisor_virt_to_mfn(page_addr); 2232 + if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) { 2233 + gvt_err("fail to translate scratch ggtt page\n"); 2234 + __free_page(gvt->gtt.scratch_ggtt_page); 2235 + return -EFAULT; 2222 2236 } 2223 2237 2224 2238 if (enable_out_of_sync) { ··· 2259 2239 */ 2260 2240 void intel_gvt_clean_gtt(struct intel_gvt *gvt) 2261 2241 { 2242 + __free_page(gvt->gtt.scratch_ggtt_page); 2243 + 2262 2244 if (enable_out_of_sync) 2263 2245 clean_spt_oos(gvt); 2246 + } 2247 + 2248 + /** 2249 + * intel_vgpu_reset_ggtt - reset the GGTT entry 2250 + * @vgpu: a vGPU 2251 + * 2252 + * This function is called at the vGPU create stage 2253 + * to reset all the GGTT entries. 2254 + * 2255 + */ 2256 + void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) 2257 + { 2258 + struct intel_gvt *gvt = vgpu->gvt; 2259 + struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; 2260 + u32 index; 2261 + u32 offset; 2262 + u32 num_entries; 2263 + struct intel_gvt_gtt_entry e; 2264 + 2265 + memset(&e, 0, sizeof(struct intel_gvt_gtt_entry)); 2266 + e.type = GTT_TYPE_GGTT_PTE; 2267 + ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn); 2268 + e.val64 |= _PAGE_PRESENT; 2269 + 2270 + index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT; 2271 + num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT; 2272 + for (offset = 0; offset < num_entries; offset++) 2273 + ops->set_entry(NULL, &e, index + offset, false, 0, vgpu); 2274 + 2275 + index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT; 2276 + num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; 2277 + for (offset = 0; offset < num_entries; offset++) 2278 + ops->set_entry(NULL, &e, index + offset, false, 0, vgpu); 2264 2279 }
+4
drivers/gpu/drm/i915/gvt/gtt.h
··· 81 81 struct list_head oos_page_use_list_head; 82 82 struct list_head oos_page_free_list_head; 83 83 struct list_head mm_lru_list_head; 84 + 85 + struct page *scratch_ggtt_page; 86 + unsigned long scratch_ggtt_mfn; 84 87 }; 85 88 86 89 enum { ··· 205 202 206 203 extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); 207 204 extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); 205 + void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu); 208 206 209 207 extern int intel_gvt_init_gtt(struct intel_gvt *gvt); 210 208 extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
+1
drivers/gpu/drm/i915/gvt/gvt.h
··· 175 175 struct notifier_block group_notifier; 176 176 struct kvm *kvm; 177 177 struct work_struct release_work; 178 + atomic_t released; 178 179 } vdev; 179 180 #endif 180 181 };
+37 -11
drivers/gpu/drm/i915/gvt/kvmgt.c
··· 114 114 static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn) 115 115 { 116 116 struct gvt_dma *entry; 117 + kvm_pfn_t pfn; 117 118 118 119 mutex_lock(&vgpu->vdev.cache_lock); 119 - entry = __gvt_cache_find(vgpu, gfn); 120 - mutex_unlock(&vgpu->vdev.cache_lock); 121 120 122 - return entry == NULL ? 0 : entry->pfn; 121 + entry = __gvt_cache_find(vgpu, gfn); 122 + pfn = (entry == NULL) ? 0 : entry->pfn; 123 + 124 + mutex_unlock(&vgpu->vdev.cache_lock); 125 + return pfn; 123 126 } 124 127 125 128 static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn) ··· 500 497 goto undo_iommu; 501 498 } 502 499 503 - return kvmgt_guest_init(mdev); 500 + ret = kvmgt_guest_init(mdev); 501 + if (ret) 502 + goto undo_group; 503 + 504 + atomic_set(&vgpu->vdev.released, 0); 505 + return ret; 506 + 507 + undo_group: 508 + vfio_unregister_notifier(&mdev->dev, VFIO_GROUP_NOTIFY, 509 + &vgpu->vdev.group_notifier); 504 510 505 511 undo_iommu: 506 512 vfio_unregister_notifier(&mdev->dev, VFIO_IOMMU_NOTIFY, ··· 521 509 static void __intel_vgpu_release(struct intel_vgpu *vgpu) 522 510 { 523 511 struct kvmgt_guest_info *info; 512 + int ret; 524 513 525 514 if (!handle_valid(vgpu->handle)) 526 515 return; 527 516 528 - vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_IOMMU_NOTIFY, 517 + if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1)) 518 + return; 519 + 520 + ret = vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_IOMMU_NOTIFY, 529 521 &vgpu->vdev.iommu_notifier); 530 - vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_GROUP_NOTIFY, 522 + WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret); 523 + 524 + ret = vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_GROUP_NOTIFY, 531 525 &vgpu->vdev.group_notifier); 526 + WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret); 532 527 533 528 info = (struct kvmgt_guest_info *)vgpu->handle; 534 529 kvmgt_guest_exit(info); 530 + 531 + vgpu->vdev.kvm = NULL; 535 532 vgpu->handle = 0; 536 533 } 537 534 ··· 555 534 { 556 535 struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu, 557 536 vdev.release_work); 537 + 558 538 __intel_vgpu_release(vgpu); 559 539 } 560 540 ··· 1156 1134 1157 1135 idx = srcu_read_lock(&kvm->srcu); 1158 1136 slot = gfn_to_memslot(kvm, gfn); 1137 + if (!slot) { 1138 + srcu_read_unlock(&kvm->srcu, idx); 1139 + return -EINVAL; 1140 + } 1159 1141 1160 1142 spin_lock(&kvm->mmu_lock); 1161 1143 ··· 1190 1164 1191 1165 idx = srcu_read_lock(&kvm->srcu); 1192 1166 slot = gfn_to_memslot(kvm, gfn); 1167 + if (!slot) { 1168 + srcu_read_unlock(&kvm->srcu, idx); 1169 + return -EINVAL; 1170 + } 1193 1171 1194 1172 spin_lock(&kvm->mmu_lock); 1195 1173 ··· 1341 1311 1342 1312 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) 1343 1313 { 1344 - struct intel_vgpu *vgpu; 1345 - 1346 1314 if (!info) { 1347 1315 gvt_err("kvmgt_guest_info invalid\n"); 1348 1316 return false; 1349 1317 } 1350 1318 1351 - vgpu = info->vgpu; 1352 - 1353 1319 kvm_page_track_unregister_notifier(info->kvm, &info->track_node); 1354 1320 kvmgt_protect_table_destroy(info); 1355 - gvt_cache_destroy(vgpu); 1321 + gvt_cache_destroy(info->vgpu); 1356 1322 vfree(info); 1357 1323 1358 1324 return true;
+1 -1
drivers/gpu/drm/i915/gvt/opregion.c
··· 65 65 int i, ret; 66 66 67 67 for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) { 68 - mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu) 68 + mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va 69 69 + i * PAGE_SIZE); 70 70 if (mfn == INTEL_GVT_INVALID_ADDR) { 71 71 gvt_err("fail to get MFN from VA\n");