Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'gvt-fixes-2017-01-16' of https://github.com/01org/gvt-linux into drm-intel-fixes

gvt-fixes-2017-01-16

vGPU reset fixes from Changbin.

Signed-off-by: Jani Nikula <jani.nikula@intel.com>

+298 -142
+27 -2
drivers/gpu/drm/i915/gvt/aperture_gm.c
··· 158 158 POSTING_READ(fence_reg_lo); 159 159 } 160 160 161 + static void _clear_vgpu_fence(struct intel_vgpu *vgpu) 162 + { 163 + int i; 164 + 165 + for (i = 0; i < vgpu_fence_sz(vgpu); i++) 166 + intel_vgpu_write_fence(vgpu, i, 0); 167 + } 168 + 161 169 static void free_vgpu_fence(struct intel_vgpu *vgpu) 162 170 { 163 171 struct intel_gvt *gvt = vgpu->gvt; ··· 179 171 intel_runtime_pm_get(dev_priv); 180 172 181 173 mutex_lock(&dev_priv->drm.struct_mutex); 174 + _clear_vgpu_fence(vgpu); 182 175 for (i = 0; i < vgpu_fence_sz(vgpu); i++) { 183 176 reg = vgpu->fence.regs[i]; 184 - intel_vgpu_write_fence(vgpu, i, 0); 185 177 list_add_tail(&reg->link, 186 178 &dev_priv->mm.fence_list); 187 179 } ··· 209 201 continue; 210 202 list_del(pos); 211 203 vgpu->fence.regs[i] = reg; 212 - intel_vgpu_write_fence(vgpu, i, 0); 213 204 if (++i == vgpu_fence_sz(vgpu)) 214 205 break; 215 206 } 216 207 if (i != vgpu_fence_sz(vgpu)) 217 208 goto out_free_fence; 209 + 210 + _clear_vgpu_fence(vgpu); 218 211 219 212 mutex_unlock(&dev_priv->drm.struct_mutex); 220 213 intel_runtime_pm_put(dev_priv); ··· 313 304 free_vgpu_gm(vgpu); 314 305 free_vgpu_fence(vgpu); 315 306 free_resource(vgpu); 307 + } 308 + 309 + /** 310 + * intel_vgpu_reset_resource - reset resource state owned by a vGPU 311 + * @vgpu: a vGPU 312 + * 313 + * This function is used to reset resource state owned by a vGPU. 314 + * 315 + */ 316 + void intel_vgpu_reset_resource(struct intel_vgpu *vgpu) 317 + { 318 + struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 319 + 320 + intel_runtime_pm_get(dev_priv); 321 + _clear_vgpu_fence(vgpu); 322 + intel_runtime_pm_put(dev_priv); 316 323 } 317 324 318 325 /**
+74
drivers/gpu/drm/i915/gvt/cfg_space.c
··· 282 282 } 283 283 return 0; 284 284 } 285 + 286 + /** 287 + * intel_vgpu_init_cfg_space - init vGPU configuration space when create vGPU 288 + * 289 + * @vgpu: a vGPU 290 + * @primary: is the vGPU presented as primary 291 + * 292 + */ 293 + void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, 294 + bool primary) 295 + { 296 + struct intel_gvt *gvt = vgpu->gvt; 297 + const struct intel_gvt_device_info *info = &gvt->device_info; 298 + u16 *gmch_ctl; 299 + int i; 300 + 301 + memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space, 302 + info->cfg_space_size); 303 + 304 + if (!primary) { 305 + vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] = 306 + INTEL_GVT_PCI_CLASS_VGA_OTHER; 307 + vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] = 308 + INTEL_GVT_PCI_CLASS_VGA_OTHER; 309 + } 310 + 311 + /* Show guest that there isn't any stolen memory.*/ 312 + gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL); 313 + *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT); 314 + 315 + intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2, 316 + gvt_aperture_pa_base(gvt), true); 317 + 318 + vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO 319 + | PCI_COMMAND_MEMORY 320 + | PCI_COMMAND_MASTER); 321 + /* 322 + * Clear the bar upper 32bit and let guest to assign the new value 323 + */ 324 + memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4); 325 + memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4); 326 + memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4); 327 + 328 + for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) { 329 + vgpu->cfg_space.bar[i].size = pci_resource_len( 330 + gvt->dev_priv->drm.pdev, i * 2); 331 + vgpu->cfg_space.bar[i].tracked = false; 332 + } 333 + } 334 + 335 + /** 336 + * intel_vgpu_reset_cfg_space - reset vGPU configuration space 337 + * 338 + * @vgpu: a vGPU 339 + * 340 + */ 341 + void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu) 342 + { 343 + u8 cmd = vgpu_cfg_space(vgpu)[PCI_COMMAND]; 344 + bool primary = vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] != 345 + INTEL_GVT_PCI_CLASS_VGA_OTHER; 346 + 347 + if (cmd & PCI_COMMAND_MEMORY) { 348 + trap_gttmmio(vgpu, false); 349 + map_aperture(vgpu, false); 350 + } 351 + 352 + /** 353 + * Currently we only do such reset when vGPU is not 354 + * owned by any VM, so we simply restore entire cfg 355 + * space to default value. 356 + */ 357 + intel_vgpu_init_cfg_space(vgpu, primary); 358 + }
+27
drivers/gpu/drm/i915/gvt/gtt.c
··· 2277 2277 for (offset = 0; offset < num_entries; offset++) 2278 2278 ops->set_entry(NULL, &e, index + offset, false, 0, vgpu); 2279 2279 } 2280 + 2281 + /** 2282 + * intel_vgpu_reset_gtt - reset the all GTT related status 2283 + * @vgpu: a vGPU 2284 + * @dmlr: true for vGPU Device Model Level Reset, false for GT Reset 2285 + * 2286 + * This function is called from vfio core to reset reset all 2287 + * GTT related status, including GGTT, PPGTT, scratch page. 2288 + * 2289 + */ 2290 + void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr) 2291 + { 2292 + int i; 2293 + 2294 + ppgtt_free_all_shadow_page(vgpu); 2295 + if (!dmlr) 2296 + return; 2297 + 2298 + intel_vgpu_reset_ggtt(vgpu); 2299 + 2300 + /* clear scratch page for security */ 2301 + for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { 2302 + if (vgpu->gtt.scratch_pt[i].page != NULL) 2303 + memset(page_address(vgpu->gtt.scratch_pt[i].page), 2304 + 0, PAGE_SIZE); 2305 + } 2306 + }
+1
drivers/gpu/drm/i915/gvt/gtt.h
··· 208 208 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu); 209 209 210 210 extern int intel_gvt_init_gtt(struct intel_gvt *gvt); 211 + extern void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr); 211 212 extern void intel_gvt_clean_gtt(struct intel_gvt *gvt); 212 213 213 214 extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
+7 -1
drivers/gpu/drm/i915/gvt/gvt.h
··· 323 323 324 324 int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu, 325 325 struct intel_vgpu_creation_params *param); 326 + void intel_vgpu_reset_resource(struct intel_vgpu *vgpu); 326 327 void intel_vgpu_free_resource(struct intel_vgpu *vgpu); 327 328 void intel_vgpu_write_fence(struct intel_vgpu *vgpu, 328 329 u32 fence, u64 value); ··· 376 375 struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, 377 376 struct intel_vgpu_type *type); 378 377 void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu); 378 + void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, 379 + unsigned int engine_mask); 379 380 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu); 380 381 381 382 ··· 414 411 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, 415 412 unsigned long *g_index); 416 413 414 + void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu, 415 + bool primary); 416 + void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu); 417 + 417 418 int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset, 418 419 void *p_data, unsigned int bytes); 419 420 ··· 431 424 int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa); 432 425 433 426 int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci); 434 - int setup_vgpu_mmio(struct intel_vgpu *vgpu); 435 427 void populate_pvinfo_page(struct intel_vgpu *vgpu); 436 428 437 429 struct intel_gvt_ops {
+29 -61
drivers/gpu/drm/i915/gvt/handlers.c
··· 231 231 return 0; 232 232 } 233 233 234 - static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset, 235 - void *p_data, unsigned int bytes, unsigned long bitmap) 236 - { 237 - struct intel_gvt_workload_scheduler *scheduler = 238 - &vgpu->gvt->scheduler; 239 - 240 - vgpu->resetting = true; 241 - 242 - intel_vgpu_stop_schedule(vgpu); 243 - /* 244 - * The current_vgpu will set to NULL after stopping the 245 - * scheduler when the reset is triggered by current vgpu. 246 - */ 247 - if (scheduler->current_vgpu == NULL) { 248 - mutex_unlock(&vgpu->gvt->lock); 249 - intel_gvt_wait_vgpu_idle(vgpu); 250 - mutex_lock(&vgpu->gvt->lock); 251 - } 252 - 253 - intel_vgpu_reset_execlist(vgpu, bitmap); 254 - 255 - /* full GPU reset */ 256 - if (bitmap == 0xff) { 257 - mutex_unlock(&vgpu->gvt->lock); 258 - intel_vgpu_clean_gtt(vgpu); 259 - mutex_lock(&vgpu->gvt->lock); 260 - setup_vgpu_mmio(vgpu); 261 - populate_pvinfo_page(vgpu); 262 - intel_vgpu_init_gtt(vgpu); 263 - } 264 - 265 - vgpu->resetting = false; 266 - 267 - return 0; 268 - } 269 - 270 234 static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, 271 - void *p_data, unsigned int bytes) 235 + void *p_data, unsigned int bytes) 272 236 { 237 + unsigned int engine_mask = 0; 273 238 u32 data; 274 - u64 bitmap = 0; 275 239 276 240 write_vreg(vgpu, offset, p_data, bytes); 277 241 data = vgpu_vreg(vgpu, offset); 278 242 279 243 if (data & GEN6_GRDOM_FULL) { 280 244 gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id); 281 - bitmap = 0xff; 245 + engine_mask = ALL_ENGINES; 246 + } else { 247 + if (data & GEN6_GRDOM_RENDER) { 248 + gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id); 249 + engine_mask |= (1 << RCS); 250 + } 251 + if (data & GEN6_GRDOM_MEDIA) { 252 + gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id); 253 + engine_mask |= (1 << VCS); 254 + } 255 + if (data & GEN6_GRDOM_BLT) { 256 + gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id); 257 + engine_mask |= (1 << BCS); 258 + } 259 + if (data & GEN6_GRDOM_VECS) { 260 + gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id); 261 + engine_mask |= (1 << VECS); 262 + } 263 + if (data & GEN8_GRDOM_MEDIA2) { 264 + gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id); 265 + if (HAS_BSD2(vgpu->gvt->dev_priv)) 266 + engine_mask |= (1 << VCS2); 267 + } 282 268 } 283 - if (data & GEN6_GRDOM_RENDER) { 284 - gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id); 285 - bitmap |= (1 << RCS); 286 - } 287 - if (data & GEN6_GRDOM_MEDIA) { 288 - gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id); 289 - bitmap |= (1 << VCS); 290 - } 291 - if (data & GEN6_GRDOM_BLT) { 292 - gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id); 293 - bitmap |= (1 << BCS); 294 - } 295 - if (data & GEN6_GRDOM_VECS) { 296 - gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id); 297 - bitmap |= (1 << VECS); 298 - } 299 - if (data & GEN8_GRDOM_MEDIA2) { 300 - gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id); 301 - if (HAS_BSD2(vgpu->gvt->dev_priv)) 302 - bitmap |= (1 << VCS2); 303 - } 304 - return handle_device_reset(vgpu, offset, p_data, bytes, bitmap); 269 + 270 + intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask); 271 + 272 + return 0; 305 273 } 306 274 307 275 static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
+53
drivers/gpu/drm/i915/gvt/mmio.c
··· 303 303 mutex_unlock(&gvt->lock); 304 304 return ret; 305 305 } 306 + 307 + 308 + /** 309 + * intel_vgpu_reset_mmio - reset virtual MMIO space 310 + * @vgpu: a vGPU 311 + * 312 + */ 313 + void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu) 314 + { 315 + struct intel_gvt *gvt = vgpu->gvt; 316 + const struct intel_gvt_device_info *info = &gvt->device_info; 317 + 318 + memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size); 319 + memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size); 320 + 321 + vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0; 322 + 323 + /* set the bit 0:2(Core C-State ) to C0 */ 324 + vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0; 325 + } 326 + 327 + /** 328 + * intel_vgpu_init_mmio - init MMIO space 329 + * @vgpu: a vGPU 330 + * 331 + * Returns: 332 + * Zero on success, negative error code if failed 333 + */ 334 + int intel_vgpu_init_mmio(struct intel_vgpu *vgpu) 335 + { 336 + const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; 337 + 338 + vgpu->mmio.vreg = vzalloc(info->mmio_size * 2); 339 + if (!vgpu->mmio.vreg) 340 + return -ENOMEM; 341 + 342 + vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size; 343 + 344 + intel_vgpu_reset_mmio(vgpu); 345 + 346 + return 0; 347 + } 348 + 349 + /** 350 + * intel_vgpu_clean_mmio - clean MMIO space 351 + * @vgpu: a vGPU 352 + * 353 + */ 354 + void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu) 355 + { 356 + vfree(vgpu->mmio.vreg); 357 + vgpu->mmio.vreg = vgpu->mmio.sreg = NULL; 358 + }
+4
drivers/gpu/drm/i915/gvt/mmio.h
··· 86 86 *offset; \ 87 87 }) 88 88 89 + int intel_vgpu_init_mmio(struct intel_vgpu *vgpu); 90 + void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu); 91 + void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu); 92 + 89 93 int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa); 90 94 91 95 int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
+76 -78
drivers/gpu/drm/i915/gvt/vgpu.c
··· 35 35 #include "gvt.h" 36 36 #include "i915_pvinfo.h" 37 37 38 - static void clean_vgpu_mmio(struct intel_vgpu *vgpu) 39 - { 40 - vfree(vgpu->mmio.vreg); 41 - vgpu->mmio.vreg = vgpu->mmio.sreg = NULL; 42 - } 43 - 44 - int setup_vgpu_mmio(struct intel_vgpu *vgpu) 45 - { 46 - struct intel_gvt *gvt = vgpu->gvt; 47 - const struct intel_gvt_device_info *info = &gvt->device_info; 48 - 49 - if (vgpu->mmio.vreg) 50 - memset(vgpu->mmio.vreg, 0, info->mmio_size * 2); 51 - else { 52 - vgpu->mmio.vreg = vzalloc(info->mmio_size * 2); 53 - if (!vgpu->mmio.vreg) 54 - return -ENOMEM; 55 - } 56 - 57 - vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size; 58 - 59 - memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size); 60 - memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size); 61 - 62 - vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0; 63 - 64 - /* set the bit 0:2(Core C-State ) to C0 */ 65 - vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0; 66 - return 0; 67 - } 68 - 69 - static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu, 70 - struct intel_vgpu_creation_params *param) 71 - { 72 - struct intel_gvt *gvt = vgpu->gvt; 73 - const struct intel_gvt_device_info *info = &gvt->device_info; 74 - u16 *gmch_ctl; 75 - int i; 76 - 77 - memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space, 78 - info->cfg_space_size); 79 - 80 - if (!param->primary) { 81 - vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] = 82 - INTEL_GVT_PCI_CLASS_VGA_OTHER; 83 - vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] = 84 - INTEL_GVT_PCI_CLASS_VGA_OTHER; 85 - } 86 - 87 - /* Show guest that there isn't any stolen memory.*/ 88 - gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL); 89 - *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT); 90 - 91 - intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2, 92 - gvt_aperture_pa_base(gvt), true); 93 - 94 - vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO 95 - | PCI_COMMAND_MEMORY 96 - | PCI_COMMAND_MASTER); 97 - /* 98 - * Clear the bar upper 32bit and let guest to assign the new value 99 - */ 100 - memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4); 101 - memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4); 102 - memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4); 103 - 104 - for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) { 105 - vgpu->cfg_space.bar[i].size = pci_resource_len( 106 - gvt->dev_priv->drm.pdev, i * 2); 107 - vgpu->cfg_space.bar[i].tracked = false; 108 - } 109 - } 110 - 111 38 void populate_pvinfo_page(struct intel_vgpu *vgpu) 112 39 { 113 40 /* setup the ballooning information */ ··· 195 268 intel_vgpu_clean_gtt(vgpu); 196 269 intel_gvt_hypervisor_detach_vgpu(vgpu); 197 270 intel_vgpu_free_resource(vgpu); 198 - clean_vgpu_mmio(vgpu); 271 + intel_vgpu_clean_mmio(vgpu); 199 272 vfree(vgpu); 200 273 201 274 intel_gvt_update_vgpu_types(gvt); ··· 227 300 vgpu->gvt = gvt; 228 301 bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES); 229 302 230 - setup_vgpu_cfg_space(vgpu, param); 303 + intel_vgpu_init_cfg_space(vgpu, param->primary); 231 304 232 - ret = setup_vgpu_mmio(vgpu); 305 + ret = intel_vgpu_init_mmio(vgpu); 233 306 if (ret) 234 307 goto out_clean_idr; 235 308 ··· 281 354 out_clean_vgpu_resource: 282 355 intel_vgpu_free_resource(vgpu); 283 356 out_clean_vgpu_mmio: 284 - clean_vgpu_mmio(vgpu); 357 + intel_vgpu_clean_mmio(vgpu); 285 358 out_clean_idr: 286 359 idr_remove(&gvt->vgpu_idr, vgpu->id); 287 360 out_free_vgpu: ··· 327 400 } 328 401 329 402 /** 330 - * intel_gvt_reset_vgpu - reset a virtual GPU 403 + * intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset 404 + * @vgpu: virtual GPU 405 + * @dmlr: vGPU Device Model Level Reset or GT Reset 406 + * @engine_mask: engines to reset for GT reset 407 + * 408 + * This function is called when user wants to reset a virtual GPU through 409 + * device model reset or GT reset. The caller should hold the gvt lock. 410 + * 411 + * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset 412 + * the whole vGPU to default state as when it is created. This vGPU function 413 + * is required both for functionary and security concerns.The ultimate goal 414 + * of vGPU FLR is that reuse a vGPU instance by virtual machines. When we 415 + * assign a vGPU to a virtual machine we must isse such reset first. 416 + * 417 + * Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines 418 + * (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec. 419 + * Unlike the FLR, GT reset only reset particular resource of a vGPU per 420 + * the reset request. Guest driver can issue a GT reset by programming the 421 + * virtual GDRST register to reset specific virtual GPU engine or all 422 + * engines. 423 + * 424 + * The parameter dev_level is to identify if we will do DMLR or GT reset. 425 + * The parameter engine_mask is to specific the engines that need to be 426 + * resetted. If value ALL_ENGINES is given for engine_mask, it means 427 + * the caller requests a full GT reset that we will reset all virtual 428 + * GPU engines. For FLR, engine_mask is ignored. 429 + */ 430 + void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, 431 + unsigned int engine_mask) 432 + { 433 + struct intel_gvt *gvt = vgpu->gvt; 434 + struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 435 + 436 + gvt_dbg_core("------------------------------------------\n"); 437 + gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n", 438 + vgpu->id, dmlr, engine_mask); 439 + vgpu->resetting = true; 440 + 441 + intel_vgpu_stop_schedule(vgpu); 442 + /* 443 + * The current_vgpu will set to NULL after stopping the 444 + * scheduler when the reset is triggered by current vgpu. 445 + */ 446 + if (scheduler->current_vgpu == NULL) { 447 + mutex_unlock(&gvt->lock); 448 + intel_gvt_wait_vgpu_idle(vgpu); 449 + mutex_lock(&gvt->lock); 450 + } 451 + 452 + intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask); 453 + 454 + /* full GPU reset or device model level reset */ 455 + if (engine_mask == ALL_ENGINES || dmlr) { 456 + intel_vgpu_reset_gtt(vgpu, dmlr); 457 + intel_vgpu_reset_resource(vgpu); 458 + intel_vgpu_reset_mmio(vgpu); 459 + populate_pvinfo_page(vgpu); 460 + 461 + if (dmlr) 462 + intel_vgpu_reset_cfg_space(vgpu); 463 + } 464 + 465 + vgpu->resetting = false; 466 + gvt_dbg_core("reset vgpu%d done\n", vgpu->id); 467 + gvt_dbg_core("------------------------------------------\n"); 468 + } 469 + 470 + /** 471 + * intel_gvt_reset_vgpu - reset a virtual GPU (Function Level) 331 472 * @vgpu: virtual GPU 332 473 * 333 474 * This function is called when user wants to reset a virtual GPU. ··· 403 408 */ 404 409 void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu) 405 410 { 411 + mutex_lock(&vgpu->gvt->lock); 412 + intel_gvt_reset_vgpu_locked(vgpu, true, 0); 413 + mutex_unlock(&vgpu->gvt->lock); 406 414 }