Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'gvt-fixes-2017-03-17' of https://github.com/01org/gvt-linux into drm-intel-fixes

gvt-fixes-2017-03-17

- force_nonpriv reg handling in cmd parser (Yan)
- gvt error message cleanup (Tina)
- i915_wait_request fix from Chris
- KVM srcu warning fix (Changbin)
- ensure shadow ctx pinned (Chuanxiao)
- critical gvt scheduler interval time fix (Zhenyu)
- etc.

Signed-off-by: Jani Nikula <jani.nikula@intel.com>

+268 -178
+4 -4
drivers/gpu/drm/i915/gvt/aperture_gm.c
··· 242 242 const char *item; 243 243 244 244 if (!param->low_gm_sz || !param->high_gm_sz || !param->fence_sz) { 245 - gvt_err("Invalid vGPU creation params\n"); 245 + gvt_vgpu_err("Invalid vGPU creation params\n"); 246 246 return -EINVAL; 247 247 } 248 248 ··· 285 285 return 0; 286 286 287 287 no_enough_resource: 288 - gvt_err("vgpu%d: fail to allocate resource %s\n", vgpu->id, item); 289 - gvt_err("vgpu%d: request %luMB avail %luMB max %luMB taken %luMB\n", 290 - vgpu->id, BYTES_TO_MB(request), BYTES_TO_MB(avail), 288 + gvt_vgpu_err("fail to allocate resource %s\n", item); 289 + gvt_vgpu_err("request %luMB avail %luMB max %luMB taken %luMB\n", 290 + BYTES_TO_MB(request), BYTES_TO_MB(avail), 291 291 BYTES_TO_MB(max), BYTES_TO_MB(taken)); 292 292 return -ENOSPC; 293 293 }
+72 -37
drivers/gpu/drm/i915/gvt/cmd_parser.c
··· 817 817 return ret; 818 818 } 819 819 820 + static inline bool is_force_nonpriv_mmio(unsigned int offset) 821 + { 822 + return (offset >= 0x24d0 && offset < 0x2500); 823 + } 824 + 825 + static int force_nonpriv_reg_handler(struct parser_exec_state *s, 826 + unsigned int offset, unsigned int index) 827 + { 828 + struct intel_gvt *gvt = s->vgpu->gvt; 829 + unsigned int data = cmd_val(s, index + 1); 830 + 831 + if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data)) { 832 + gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n", 833 + offset, data); 834 + return -EINVAL; 835 + } 836 + return 0; 837 + } 838 + 820 839 static int cmd_reg_handler(struct parser_exec_state *s, 821 840 unsigned int offset, unsigned int index, char *cmd) 822 841 { ··· 843 824 struct intel_gvt *gvt = vgpu->gvt; 844 825 845 826 if (offset + 4 > gvt->device_info.mmio_size) { 846 - gvt_err("%s access to (%x) outside of MMIO range\n", 827 + gvt_vgpu_err("%s access to (%x) outside of MMIO range\n", 847 828 cmd, offset); 848 829 return -EINVAL; 849 830 } 850 831 851 832 if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) { 852 - gvt_err("vgpu%d: %s access to non-render register (%x)\n", 853 - s->vgpu->id, cmd, offset); 833 + gvt_vgpu_err("%s access to non-render register (%x)\n", 834 + cmd, offset); 854 835 return 0; 855 836 } 856 837 857 838 if (is_shadowed_mmio(offset)) { 858 - gvt_err("vgpu%d: found access of shadowed MMIO %x\n", 859 - s->vgpu->id, offset); 839 + gvt_vgpu_err("found access of shadowed MMIO %x\n", offset); 860 840 return 0; 861 841 } 842 + 843 + if (is_force_nonpriv_mmio(offset) && 844 + force_nonpriv_reg_handler(s, offset, index)) 845 + return -EINVAL; 862 846 863 847 if (offset == i915_mmio_reg_offset(DERRMR) || 864 848 offset == i915_mmio_reg_offset(FORCEWAKE_MT)) { ··· 1030 1008 ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl"); 1031 1009 else if (post_sync == 1) { 1032 1010 /* check ggtt*/ 1033 - if ((cmd_val(s, 2) & (1 << 2))) { 1011 + if ((cmd_val(s, 1) & PIPE_CONTROL_GLOBAL_GTT_IVB)) { 1034 1012 gma = cmd_val(s, 2) & GENMASK(31, 3); 1035 1013 if (gmadr_bytes == 8) 1036 1014 gma |= (cmd_gma_hi(s, 3)) << 32; ··· 1151 1129 struct mi_display_flip_command_info *info) 1152 1130 { 1153 1131 struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; 1132 + struct intel_vgpu *vgpu = s->vgpu; 1154 1133 u32 dword0 = cmd_val(s, 0); 1155 1134 u32 dword1 = cmd_val(s, 1); 1156 1135 u32 dword2 = cmd_val(s, 2); ··· 1190 1167 break; 1191 1168 1192 1169 default: 1193 - gvt_err("unknown plane code %d\n", plane); 1170 + gvt_vgpu_err("unknown plane code %d\n", plane); 1194 1171 return -EINVAL; 1195 1172 } 1196 1173 ··· 1297 1274 static int cmd_handler_mi_display_flip(struct parser_exec_state *s) 1298 1275 { 1299 1276 struct mi_display_flip_command_info info; 1277 + struct intel_vgpu *vgpu = s->vgpu; 1300 1278 int ret; 1301 1279 int i; 1302 1280 int len = cmd_length(s); 1303 1281 1304 1282 ret = decode_mi_display_flip(s, &info); 1305 1283 if (ret) { 1306 - gvt_err("fail to decode MI display flip command\n"); 1284 + gvt_vgpu_err("fail to decode MI display flip command\n"); 1307 1285 return ret; 1308 1286 } 1309 1287 1310 1288 ret = check_mi_display_flip(s, &info); 1311 1289 if (ret) { 1312 - gvt_err("invalid MI display flip command\n"); 1290 + gvt_vgpu_err("invalid MI display flip command\n"); 1313 1291 return ret; 1314 1292 } 1315 1293 1316 1294 ret = update_plane_mmio_from_mi_display_flip(s, &info); 1317 1295 if (ret) { 1318 - gvt_err("fail to update plane mmio\n"); 1296 + gvt_vgpu_err("fail to update plane mmio\n"); 1319 1297 return ret; 1320 1298 } 1321 1299 ··· 1374 1350 int ret; 1375 1351 1376 1352 if (op_size > max_surface_size) { 1377 - gvt_err("command address audit fail name %s\n", s->info->name); 1353 + gvt_vgpu_err("command address audit fail name %s\n", 1354 + s->info->name); 1378 1355 return -EINVAL; 1379 1356 } 1380 1357 ··· 1392 1367 } 1393 1368 return 0; 1394 1369 err: 1395 - gvt_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n", 1370 + gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n", 1396 1371 s->info->name, guest_gma, op_size); 1397 1372 1398 1373 pr_err("cmd dump: "); ··· 1437 1412 1438 1413 static inline int unexpected_cmd(struct parser_exec_state *s) 1439 1414 { 1440 - gvt_err("vgpu%d: Unexpected %s in command buffer!\n", 1441 - s->vgpu->id, s->info->name); 1415 + struct intel_vgpu *vgpu = s->vgpu; 1416 + 1417 + gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name); 1418 + 1442 1419 return -EINVAL; 1443 1420 } 1444 1421 ··· 1543 1516 while (gma != end_gma) { 1544 1517 gpa = intel_vgpu_gma_to_gpa(mm, gma); 1545 1518 if (gpa == INTEL_GVT_INVALID_ADDR) { 1546 - gvt_err("invalid gma address: %lx\n", gma); 1519 + gvt_vgpu_err("invalid gma address: %lx\n", gma); 1547 1520 return -EFAULT; 1548 1521 } 1549 1522 ··· 1584 1557 uint32_t bb_size = 0; 1585 1558 uint32_t cmd_len = 0; 1586 1559 bool met_bb_end = false; 1560 + struct intel_vgpu *vgpu = s->vgpu; 1587 1561 u32 cmd; 1588 1562 1589 1563 /* get the start gm address of the batch buffer */ ··· 1593 1565 1594 1566 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); 1595 1567 if (info == NULL) { 1596 - gvt_err("unknown cmd 0x%x, opcode=0x%x\n", 1568 + gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n", 1597 1569 cmd, get_opcode(cmd, s->ring_id)); 1598 1570 return -EINVAL; 1599 1571 } ··· 1602 1574 gma, gma + 4, &cmd); 1603 1575 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); 1604 1576 if (info == NULL) { 1605 - gvt_err("unknown cmd 0x%x, opcode=0x%x\n", 1577 + gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n", 1606 1578 cmd, get_opcode(cmd, s->ring_id)); 1607 1579 return -EINVAL; 1608 1580 } ··· 1627 1599 static int perform_bb_shadow(struct parser_exec_state *s) 1628 1600 { 1629 1601 struct intel_shadow_bb_entry *entry_obj; 1602 + struct intel_vgpu *vgpu = s->vgpu; 1630 1603 unsigned long gma = 0; 1631 1604 uint32_t bb_size; 1632 1605 void *dst = NULL; ··· 1662 1633 1663 1634 ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false); 1664 1635 if (ret) { 1665 - gvt_err("failed to set shadow batch to CPU\n"); 1636 + gvt_vgpu_err("failed to set shadow batch to CPU\n"); 1666 1637 goto unmap_src; 1667 1638 } 1668 1639 ··· 1674 1645 gma, gma + bb_size, 1675 1646 dst); 1676 1647 if (ret) { 1677 - gvt_err("fail to copy guest ring buffer\n"); 1648 + gvt_vgpu_err("fail to copy guest ring buffer\n"); 1678 1649 goto unmap_src; 1679 1650 } 1680 1651 ··· 1705 1676 { 1706 1677 bool second_level; 1707 1678 int ret = 0; 1679 + struct intel_vgpu *vgpu = s->vgpu; 1708 1680 1709 1681 if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) { 1710 - gvt_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n"); 1682 + gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n"); 1711 1683 return -EINVAL; 1712 1684 } 1713 1685 1714 1686 second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1; 1715 1687 if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) { 1716 - gvt_err("Jumping to 2nd level BB from RB is not allowed\n"); 1688 + gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n"); 1717 1689 return -EINVAL; 1718 1690 } 1719 1691 ··· 1732 1702 if (batch_buffer_needs_scan(s)) { 1733 1703 ret = perform_bb_shadow(s); 1734 1704 if (ret < 0) 1735 - gvt_err("invalid shadow batch buffer\n"); 1705 + gvt_vgpu_err("invalid shadow batch buffer\n"); 1736 1706 } else { 1737 1707 /* emulate a batch buffer end to do return right */ 1738 1708 ret = cmd_handler_mi_batch_buffer_end(s); ··· 2459 2429 int ret = 0; 2460 2430 cycles_t t0, t1, t2; 2461 2431 struct parser_exec_state s_before_advance_custom; 2432 + struct intel_vgpu *vgpu = s->vgpu; 2462 2433 2463 2434 t0 = get_cycles(); 2464 2435 ··· 2467 2436 2468 2437 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id); 2469 2438 if (info == NULL) { 2470 - gvt_err("unknown cmd 0x%x, opcode=0x%x\n", 2439 + gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n", 2471 2440 cmd, get_opcode(cmd, s->ring_id)); 2472 2441 return -EINVAL; 2473 2442 } ··· 2483 2452 if (info->handler) { 2484 2453 ret = info->handler(s); 2485 2454 if (ret < 0) { 2486 - gvt_err("%s handler error\n", info->name); 2455 + gvt_vgpu_err("%s handler error\n", info->name); 2487 2456 return ret; 2488 2457 } 2489 2458 } ··· 2494 2463 if (!(info->flag & F_IP_ADVANCE_CUSTOM)) { 2495 2464 ret = cmd_advance_default(s); 2496 2465 if (ret) { 2497 - gvt_err("%s IP advance error\n", info->name); 2466 + gvt_vgpu_err("%s IP advance error\n", info->name); 2498 2467 return ret; 2499 2468 } 2500 2469 } ··· 2517 2486 2518 2487 unsigned long gma_head, gma_tail, gma_bottom; 2519 2488 int ret = 0; 2489 + struct intel_vgpu *vgpu = s->vgpu; 2520 2490 2521 2491 gma_head = rb_start + rb_head; 2522 2492 gma_tail = rb_start + rb_tail; ··· 2529 2497 if (s->buf_type == RING_BUFFER_INSTRUCTION) { 2530 2498 if (!(s->ip_gma >= rb_start) || 2531 2499 !(s->ip_gma < gma_bottom)) { 2532 - gvt_err("ip_gma %lx out of ring scope." 2500 + gvt_vgpu_err("ip_gma %lx out of ring scope." 2533 2501 "(base:0x%lx, bottom: 0x%lx)\n", 2534 2502 s->ip_gma, rb_start, 2535 2503 gma_bottom); ··· 2537 2505 return -EINVAL; 2538 2506 } 2539 2507 if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) { 2540 - gvt_err("ip_gma %lx out of range." 2508 + gvt_vgpu_err("ip_gma %lx out of range." 2541 2509 "base 0x%lx head 0x%lx tail 0x%lx\n", 2542 2510 s->ip_gma, rb_start, 2543 2511 rb_head, rb_tail); ··· 2547 2515 } 2548 2516 ret = cmd_parser_exec(s); 2549 2517 if (ret) { 2550 - gvt_err("cmd parser error\n"); 2518 + gvt_vgpu_err("cmd parser error\n"); 2551 2519 parser_exec_state_dump(s); 2552 2520 break; 2553 2521 } ··· 2671 2639 gma_head, gma_top, 2672 2640 workload->shadow_ring_buffer_va); 2673 2641 if (ret) { 2674 - gvt_err("fail to copy guest ring buffer\n"); 2642 + gvt_vgpu_err("fail to copy guest ring buffer\n"); 2675 2643 return ret; 2676 2644 } 2677 2645 copy_len = gma_top - gma_head; ··· 2683 2651 gma_head, gma_tail, 2684 2652 workload->shadow_ring_buffer_va + copy_len); 2685 2653 if (ret) { 2686 - gvt_err("fail to copy guest ring buffer\n"); 2654 + gvt_vgpu_err("fail to copy guest ring buffer\n"); 2687 2655 return ret; 2688 2656 } 2689 2657 ring->tail += workload->rb_len; ··· 2694 2662 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload) 2695 2663 { 2696 2664 int ret; 2665 + struct intel_vgpu *vgpu = workload->vgpu; 2697 2666 2698 2667 ret = shadow_workload_ring_buffer(workload); 2699 2668 if (ret) { 2700 - gvt_err("fail to shadow workload ring_buffer\n"); 2669 + gvt_vgpu_err("fail to shadow workload ring_buffer\n"); 2701 2670 return ret; 2702 2671 } 2703 2672 2704 2673 ret = scan_workload(workload); 2705 2674 if (ret) { 2706 - gvt_err("scan workload error\n"); 2675 + gvt_vgpu_err("scan workload error\n"); 2707 2676 return ret; 2708 2677 } 2709 2678 return 0; ··· 2714 2681 { 2715 2682 int ctx_size = wa_ctx->indirect_ctx.size; 2716 2683 unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma; 2684 + struct intel_vgpu *vgpu = wa_ctx->workload->vgpu; 2717 2685 struct drm_i915_gem_object *obj; 2718 2686 int ret = 0; 2719 2687 void *map; ··· 2728 2694 /* get the va of the shadow batch buffer */ 2729 2695 map = i915_gem_object_pin_map(obj, I915_MAP_WB); 2730 2696 if (IS_ERR(map)) { 2731 - gvt_err("failed to vmap shadow indirect ctx\n"); 2697 + gvt_vgpu_err("failed to vmap shadow indirect ctx\n"); 2732 2698 ret = PTR_ERR(map); 2733 2699 goto put_obj; 2734 2700 } 2735 2701 2736 2702 ret = i915_gem_object_set_to_cpu_domain(obj, false); 2737 2703 if (ret) { 2738 - gvt_err("failed to set shadow indirect ctx to CPU\n"); 2704 + gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n"); 2739 2705 goto unmap_src; 2740 2706 } 2741 2707 ··· 2744 2710 guest_gma, guest_gma + ctx_size, 2745 2711 map); 2746 2712 if (ret) { 2747 - gvt_err("fail to copy guest indirect ctx\n"); 2713 + gvt_vgpu_err("fail to copy guest indirect ctx\n"); 2748 2714 goto unmap_src; 2749 2715 } 2750 2716 ··· 2778 2744 int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) 2779 2745 { 2780 2746 int ret; 2747 + struct intel_vgpu *vgpu = wa_ctx->workload->vgpu; 2781 2748 2782 2749 if (wa_ctx->indirect_ctx.size == 0) 2783 2750 return 0; 2784 2751 2785 2752 ret = shadow_indirect_ctx(wa_ctx); 2786 2753 if (ret) { 2787 - gvt_err("fail to shadow indirect ctx\n"); 2754 + gvt_vgpu_err("fail to shadow indirect ctx\n"); 2788 2755 return ret; 2789 2756 } 2790 2757 ··· 2793 2758 2794 2759 ret = scan_wa_ctx(wa_ctx); 2795 2760 if (ret) { 2796 - gvt_err("scan wa ctx error\n"); 2761 + gvt_vgpu_err("scan wa ctx error\n"); 2797 2762 return ret; 2798 2763 } 2799 2764
+8
drivers/gpu/drm/i915/gvt/debug.h
··· 27 27 #define gvt_err(fmt, args...) \ 28 28 DRM_ERROR("gvt: "fmt, ##args) 29 29 30 + #define gvt_vgpu_err(fmt, args...) \ 31 + do { \ 32 + if (IS_ERR_OR_NULL(vgpu)) \ 33 + DRM_DEBUG_DRIVER("gvt: "fmt, ##args); \ 34 + else \ 35 + DRM_DEBUG_DRIVER("gvt: vgpu %d: "fmt, vgpu->id, ##args);\ 36 + } while (0) 37 + 30 38 #define gvt_dbg_core(fmt, args...) \ 31 39 DRM_DEBUG_DRIVER("gvt: core: "fmt, ##args) 32 40
+6 -7
drivers/gpu/drm/i915/gvt/edid.c
··· 52 52 unsigned char chr = 0; 53 53 54 54 if (edid->state == I2C_NOT_SPECIFIED || !edid->slave_selected) { 55 - gvt_err("Driver tries to read EDID without proper sequence!\n"); 55 + gvt_vgpu_err("Driver tries to read EDID without proper sequence!\n"); 56 56 return 0; 57 57 } 58 58 if (edid->current_edid_read >= EDID_SIZE) { 59 - gvt_err("edid_get_byte() exceeds the size of EDID!\n"); 59 + gvt_vgpu_err("edid_get_byte() exceeds the size of EDID!\n"); 60 60 return 0; 61 61 } 62 62 63 63 if (!edid->edid_available) { 64 - gvt_err("Reading EDID but EDID is not available!\n"); 64 + gvt_vgpu_err("Reading EDID but EDID is not available!\n"); 65 65 return 0; 66 66 } 67 67 ··· 72 72 chr = edid_data->edid_block[edid->current_edid_read]; 73 73 edid->current_edid_read++; 74 74 } else { 75 - gvt_err("No EDID available during the reading?\n"); 75 + gvt_vgpu_err("No EDID available during the reading?\n"); 76 76 } 77 77 return chr; 78 78 } ··· 223 223 vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE; 224 224 break; 225 225 default: 226 - gvt_err("Unknown/reserved GMBUS cycle detected!\n"); 226 + gvt_vgpu_err("Unknown/reserved GMBUS cycle detected!\n"); 227 227 break; 228 228 } 229 229 /* ··· 292 292 */ 293 293 } else { 294 294 memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes); 295 - gvt_err("vgpu%d: warning: gmbus3 read with nothing returned\n", 296 - vgpu->id); 295 + gvt_vgpu_err("warning: gmbus3 read with nothing returned\n"); 297 296 } 298 297 return 0; 299 298 }
+13 -16
drivers/gpu/drm/i915/gvt/execlist.c
··· 172 172 struct intel_vgpu_execlist *execlist, 173 173 struct execlist_ctx_descriptor_format *ctx) 174 174 { 175 + struct intel_vgpu *vgpu = execlist->vgpu; 175 176 struct intel_vgpu_execlist_slot *running = execlist->running_slot; 176 177 struct intel_vgpu_execlist_slot *pending = execlist->pending_slot; 177 178 struct execlist_ctx_descriptor_format *ctx0 = &running->ctx[0]; ··· 184 183 gvt_dbg_el("schedule out context id %x\n", ctx->context_id); 185 184 186 185 if (WARN_ON(!same_context(ctx, execlist->running_context))) { 187 - gvt_err("schedule out context is not running context," 186 + gvt_vgpu_err("schedule out context is not running context," 188 187 "ctx id %x running ctx id %x\n", 189 188 ctx->context_id, 190 189 execlist->running_context->context_id); ··· 255 254 status.udw = vgpu_vreg(vgpu, status_reg + 4); 256 255 257 256 if (status.execlist_queue_full) { 258 - gvt_err("virtual execlist slots are full\n"); 257 + gvt_vgpu_err("virtual execlist slots are full\n"); 259 258 return NULL; 260 259 } 261 260 ··· 271 270 272 271 struct execlist_ctx_descriptor_format *ctx0, *ctx1; 273 272 struct execlist_context_status_format status; 273 + struct intel_vgpu *vgpu = execlist->vgpu; 274 274 275 275 gvt_dbg_el("emulate schedule-in\n"); 276 276 277 277 if (!slot) { 278 - gvt_err("no available execlist slot\n"); 278 + gvt_vgpu_err("no available execlist slot\n"); 279 279 return -EINVAL; 280 280 } 281 281 ··· 377 375 378 376 vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0); 379 377 if (IS_ERR(vma)) { 380 - gvt_err("Cannot pin\n"); 381 378 return; 382 379 } 383 380 ··· 429 428 vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL, 430 429 0, CACHELINE_BYTES, 0); 431 430 if (IS_ERR(vma)) { 432 - gvt_err("Cannot pin indirect ctx obj\n"); 433 431 return; 434 432 } 435 433 ··· 561 561 { 562 562 struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc; 563 563 struct intel_vgpu_mm *mm; 564 + struct intel_vgpu *vgpu = workload->vgpu; 564 565 int page_table_level; 565 566 u32 pdp[8]; 566 567 ··· 570 569 } else if (desc->addressing_mode == 3) { /* legacy 64 bit */ 571 570 page_table_level = 4; 572 571 } else { 573 - gvt_err("Advanced Context mode(SVM) is not supported!\n"); 572 + gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n"); 574 573 return -EINVAL; 575 574 } 576 575 ··· 584 583 mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT, 585 584 pdp, page_table_level, 0); 586 585 if (IS_ERR(mm)) { 587 - gvt_err("fail to create mm object.\n"); 586 + gvt_vgpu_err("fail to create mm object.\n"); 588 587 return PTR_ERR(mm); 589 588 } 590 589 } ··· 610 609 ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, 611 610 (u32)((desc->lrca + 1) << GTT_PAGE_SHIFT)); 612 611 if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) { 613 - gvt_err("invalid guest context LRCA: %x\n", desc->lrca); 612 + gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca); 614 613 return -EINVAL; 615 614 } 616 615 ··· 725 724 continue; 726 725 727 726 if (!desc[i]->privilege_access) { 728 - gvt_err("vgpu%d: unexpected GGTT elsp submission\n", 729 - vgpu->id); 727 + gvt_vgpu_err("unexpected GGTT elsp submission\n"); 730 728 return -EINVAL; 731 729 } 732 730 ··· 735 735 } 736 736 737 737 if (!valid_desc_bitmap) { 738 - gvt_err("vgpu%d: no valid desc in a elsp submission\n", 739 - vgpu->id); 738 + gvt_vgpu_err("no valid desc in a elsp submission\n"); 740 739 return -EINVAL; 741 740 } 742 741 743 742 if (!test_bit(0, (void *)&valid_desc_bitmap) && 744 743 test_bit(1, (void *)&valid_desc_bitmap)) { 745 - gvt_err("vgpu%d: weird elsp submission, desc 0 is not valid\n", 746 - vgpu->id); 744 + gvt_vgpu_err("weird elsp submission, desc 0 is not valid\n"); 747 745 return -EINVAL; 748 746 } 749 747 ··· 750 752 ret = submit_context(vgpu, ring_id, &valid_desc[i], 751 753 emulate_schedule_in); 752 754 if (ret) { 753 - gvt_err("vgpu%d: fail to schedule workload\n", 754 - vgpu->id); 755 + gvt_vgpu_err("fail to schedule workload\n"); 755 756 return ret; 756 757 } 757 758 emulate_schedule_in = false;
+36 -38
drivers/gpu/drm/i915/gvt/gtt.c
··· 49 49 { 50 50 if ((!vgpu_gmadr_is_valid(vgpu, addr)) || (size 51 51 && !vgpu_gmadr_is_valid(vgpu, addr + size - 1))) { 52 - gvt_err("vgpu%d: invalid range gmadr 0x%llx size 0x%x\n", 53 - vgpu->id, addr, size); 52 + gvt_vgpu_err("invalid range gmadr 0x%llx size 0x%x\n", 53 + addr, size); 54 54 return false; 55 55 } 56 56 return true; ··· 430 430 431 431 mfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, gfn); 432 432 if (mfn == INTEL_GVT_INVALID_ADDR) { 433 - gvt_err("fail to translate gfn: 0x%lx\n", gfn); 433 + gvt_vgpu_err("fail to translate gfn: 0x%lx\n", gfn); 434 434 return -ENXIO; 435 435 } 436 436 ··· 611 611 612 612 daddr = dma_map_page(kdev, p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL); 613 613 if (dma_mapping_error(kdev, daddr)) { 614 - gvt_err("fail to map dma addr\n"); 614 + gvt_vgpu_err("fail to map dma addr\n"); 615 615 return -EINVAL; 616 616 } 617 617 ··· 735 735 if (reclaim_one_mm(vgpu->gvt)) 736 736 goto retry; 737 737 738 - gvt_err("fail to allocate ppgtt shadow page\n"); 738 + gvt_vgpu_err("fail to allocate ppgtt shadow page\n"); 739 739 return ERR_PTR(-ENOMEM); 740 740 } 741 741 ··· 750 750 */ 751 751 ret = init_shadow_page(vgpu, &spt->shadow_page, type); 752 752 if (ret) { 753 - gvt_err("fail to initialize shadow page for spt\n"); 753 + gvt_vgpu_err("fail to initialize shadow page for spt\n"); 754 754 goto err; 755 755 } 756 756 757 757 ret = intel_vgpu_init_guest_page(vgpu, &spt->guest_page, 758 758 gfn, ppgtt_write_protection_handler, NULL); 759 759 if (ret) { 760 - gvt_err("fail to initialize guest page for spt\n"); 760 + gvt_vgpu_err("fail to initialize guest page for spt\n"); 761 761 goto err; 762 762 } 763 763 ··· 776 776 if (p) 777 777 return shadow_page_to_ppgtt_spt(p); 778 778 779 - gvt_err("vgpu%d: fail to find ppgtt shadow page: 0x%lx\n", 780 - vgpu->id, mfn); 779 + gvt_vgpu_err("fail to find ppgtt shadow page: 0x%lx\n", mfn); 781 780 return NULL; 782 781 } 783 782 ··· 826 827 } 827 828 s = ppgtt_find_shadow_page(vgpu, ops->get_pfn(e)); 828 829 if (!s) { 829 - gvt_err("vgpu%d: fail to find shadow page: mfn: 0x%lx\n", 830 - vgpu->id, ops->get_pfn(e)); 830 + gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n", 831 + ops->get_pfn(e)); 831 832 return -ENXIO; 832 833 } 833 834 return ppgtt_invalidate_shadow_page(s); ··· 835 836 836 837 static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) 837 838 { 839 + struct intel_vgpu *vgpu = spt->vgpu; 838 840 struct intel_gvt_gtt_entry e; 839 841 unsigned long index; 840 842 int ret; ··· 854 854 855 855 for_each_present_shadow_entry(spt, &e, index) { 856 856 if (!gtt_type_is_pt(get_next_pt_type(e.type))) { 857 - gvt_err("GVT doesn't support pse bit for now\n"); 857 + gvt_vgpu_err("GVT doesn't support pse bit for now\n"); 858 858 return -EINVAL; 859 859 } 860 860 ret = ppgtt_invalidate_shadow_page_by_shadow_entry( ··· 868 868 ppgtt_free_shadow_page(spt); 869 869 return 0; 870 870 fail: 871 - gvt_err("vgpu%d: fail: shadow page %p shadow entry 0x%llx type %d\n", 872 - spt->vgpu->id, spt, e.val64, e.type); 871 + gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n", 872 + spt, e.val64, e.type); 873 873 return ret; 874 874 } 875 875 ··· 914 914 } 915 915 return s; 916 916 fail: 917 - gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n", 918 - vgpu->id, s, we->val64, we->type); 917 + gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", 918 + s, we->val64, we->type); 919 919 return ERR_PTR(ret); 920 920 } 921 921 ··· 953 953 954 954 for_each_present_guest_entry(spt, &ge, i) { 955 955 if (!gtt_type_is_pt(get_next_pt_type(ge.type))) { 956 - gvt_err("GVT doesn't support pse bit now\n"); 956 + gvt_vgpu_err("GVT doesn't support pse bit now\n"); 957 957 ret = -EINVAL; 958 958 goto fail; 959 959 } ··· 969 969 } 970 970 return 0; 971 971 fail: 972 - gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n", 973 - vgpu->id, spt, ge.val64, ge.type); 972 + gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", 973 + spt, ge.val64, ge.type); 974 974 return ret; 975 975 } 976 976 ··· 999 999 struct intel_vgpu_ppgtt_spt *s = 1000 1000 ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e)); 1001 1001 if (!s) { 1002 - gvt_err("fail to find guest page\n"); 1002 + gvt_vgpu_err("fail to find guest page\n"); 1003 1003 ret = -ENXIO; 1004 1004 goto fail; 1005 1005 } ··· 1011 1011 ppgtt_set_shadow_entry(spt, &e, index); 1012 1012 return 0; 1013 1013 fail: 1014 - gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n", 1015 - vgpu->id, spt, e.val64, e.type); 1014 + gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n", 1015 + spt, e.val64, e.type); 1016 1016 return ret; 1017 1017 } 1018 1018 ··· 1046 1046 } 1047 1047 return 0; 1048 1048 fail: 1049 - gvt_err("vgpu%d: fail: spt %p guest entry 0x%llx type %d\n", vgpu->id, 1050 - spt, we->val64, we->type); 1049 + gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n", 1050 + spt, we->val64, we->type); 1051 1051 return ret; 1052 1052 } 1053 1053 ··· 1250 1250 } 1251 1251 return 0; 1252 1252 fail: 1253 - gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d.\n", 1254 - vgpu->id, spt, we->val64, we->type); 1253 + gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n", 1254 + spt, we->val64, we->type); 1255 1255 return ret; 1256 1256 } 1257 1257 ··· 1493 1493 1494 1494 spt = ppgtt_populate_shadow_page_by_guest_entry(vgpu, &ge); 1495 1495 if (IS_ERR(spt)) { 1496 - gvt_err("fail to populate guest root pointer\n"); 1496 + gvt_vgpu_err("fail to populate guest root pointer\n"); 1497 1497 ret = PTR_ERR(spt); 1498 1498 goto fail; 1499 1499 } ··· 1566 1566 1567 1567 ret = gtt->mm_alloc_page_table(mm); 1568 1568 if (ret) { 1569 - gvt_err("fail to allocate page table for mm\n"); 1569 + gvt_vgpu_err("fail to allocate page table for mm\n"); 1570 1570 goto fail; 1571 1571 } 1572 1572 ··· 1584 1584 } 1585 1585 return mm; 1586 1586 fail: 1587 - gvt_err("fail to create mm\n"); 1587 + gvt_vgpu_err("fail to create mm\n"); 1588 1588 if (mm) 1589 1589 intel_gvt_mm_unreference(mm); 1590 1590 return ERR_PTR(ret); ··· 1760 1760 mm->page_table_level, gma, gpa); 1761 1761 return gpa; 1762 1762 err: 1763 - gvt_err("invalid mm type: %d gma %lx\n", mm->type, gma); 1763 + gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma); 1764 1764 return INTEL_GVT_INVALID_ADDR; 1765 1765 } 1766 1766 ··· 1836 1836 if (ops->test_present(&e)) { 1837 1837 ret = gtt_entry_p2m(vgpu, &e, &m); 1838 1838 if (ret) { 1839 - gvt_err("vgpu%d: fail to translate guest gtt entry\n", 1840 - vgpu->id); 1839 + gvt_vgpu_err("fail to translate guest gtt entry\n"); 1841 1840 return ret; 1842 1841 } 1843 1842 } else { ··· 1892 1893 1893 1894 scratch_pt = (void *)get_zeroed_page(GFP_KERNEL); 1894 1895 if (!scratch_pt) { 1895 - gvt_err("fail to allocate scratch page\n"); 1896 + gvt_vgpu_err("fail to allocate scratch page\n"); 1896 1897 return -ENOMEM; 1897 1898 } 1898 1899 1899 1900 daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, 1900 1901 4096, PCI_DMA_BIDIRECTIONAL); 1901 1902 if (dma_mapping_error(dev, daddr)) { 1902 - gvt_err("fail to dmamap scratch_pt\n"); 1903 + gvt_vgpu_err("fail to dmamap scratch_pt\n"); 1903 1904 __free_page(virt_to_page(scratch_pt)); 1904 1905 return -ENOMEM; 1905 1906 } ··· 2002 2003 ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT, 2003 2004 NULL, 1, 0); 2004 2005 if (IS_ERR(ggtt_mm)) { 2005 - gvt_err("fail to create mm for ggtt.\n"); 2006 + gvt_vgpu_err("fail to create mm for ggtt.\n"); 2006 2007 return PTR_ERR(ggtt_mm); 2007 2008 } 2008 2009 ··· 2075 2076 for (i = 0; i < preallocated_oos_pages; i++) { 2076 2077 oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL); 2077 2078 if (!oos_page) { 2078 - gvt_err("fail to pre-allocate oos page\n"); 2079 2079 ret = -ENOMEM; 2080 2080 goto fail; 2081 2081 } ··· 2164 2166 mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_PPGTT, 2165 2167 pdp, page_table_level, 0); 2166 2168 if (IS_ERR(mm)) { 2167 - gvt_err("fail to create mm\n"); 2169 + gvt_vgpu_err("fail to create mm\n"); 2168 2170 return PTR_ERR(mm); 2169 2171 } 2170 2172 } ··· 2194 2196 2195 2197 mm = intel_vgpu_find_ppgtt_mm(vgpu, page_table_level, pdp); 2196 2198 if (!mm) { 2197 - gvt_err("fail to find ppgtt instance.\n"); 2199 + gvt_vgpu_err("fail to find ppgtt instance.\n"); 2198 2200 return -EINVAL; 2199 2201 } 2200 2202 intel_gvt_mm_unreference(mm);
+30 -15
drivers/gpu/drm/i915/gvt/handlers.c
··· 181 181 GVT_FAILSAFE_UNSUPPORTED_GUEST); 182 182 183 183 if (!vgpu->mmio.disable_warn_untrack) { 184 - gvt_err("vgpu%d: found oob fence register access\n", 185 - vgpu->id); 186 - gvt_err("vgpu%d: total fence %d, access fence %d\n", 187 - vgpu->id, vgpu_fence_sz(vgpu), 188 - fence_num); 184 + gvt_vgpu_err("found oob fence register access\n"); 185 + gvt_vgpu_err("total fence %d, access fence %d\n", 186 + vgpu_fence_sz(vgpu), fence_num); 189 187 } 190 188 memset(p_data, 0, bytes); 191 189 return -EINVAL; ··· 247 249 break; 248 250 default: 249 251 /*should not hit here*/ 250 - gvt_err("invalid forcewake offset 0x%x\n", offset); 252 + gvt_vgpu_err("invalid forcewake offset 0x%x\n", offset); 251 253 return -EINVAL; 252 254 } 253 255 } else { ··· 528 530 fdi_tx_train_bits = FDI_LINK_TRAIN_PATTERN_2; 529 531 fdi_iir_check_bits = FDI_RX_SYMBOL_LOCK; 530 532 } else { 531 - gvt_err("Invalid train pattern %d\n", train_pattern); 533 + gvt_vgpu_err("Invalid train pattern %d\n", train_pattern); 532 534 return -EINVAL; 533 535 } 534 536 ··· 586 588 else if (FDI_RX_IMR_TO_PIPE(offset) != INVALID_INDEX) 587 589 index = FDI_RX_IMR_TO_PIPE(offset); 588 590 else { 589 - gvt_err("Unsupport registers %x\n", offset); 591 + gvt_vgpu_err("Unsupport registers %x\n", offset); 590 592 return -EINVAL; 591 593 } 592 594 ··· 816 818 u32 data; 817 819 818 820 if (!dpy_is_valid_port(port_index)) { 819 - gvt_err("GVT(%d): Unsupported DP port access!\n", vgpu->id); 821 + gvt_vgpu_err("Unsupported DP port access!\n"); 820 822 return 0; 821 823 } 822 824 ··· 1014 1016 1015 1017 if (i == num) { 1016 1018 if (num == SBI_REG_MAX) { 1017 - gvt_err("vgpu%d: SBI caching meets maximum limits\n", 1018 - vgpu->id); 1019 + gvt_vgpu_err("SBI caching meets maximum limits\n"); 1019 1020 return; 1020 1021 } 1021 1022 display->sbi.number++; ··· 1094 1097 break; 1095 1098 } 1096 1099 if (invalid_read) 1097 - gvt_err("invalid pvinfo read: [%x:%x] = %x\n", 1100 + gvt_vgpu_err("invalid pvinfo read: [%x:%x] = %x\n", 1098 1101 offset, bytes, *(u32 *)p_data); 1099 1102 vgpu->pv_notified = true; 1100 1103 return 0; ··· 1122 1125 case 1: /* Remove this in guest driver. */ 1123 1126 break; 1124 1127 default: 1125 - gvt_err("Invalid PV notification %d\n", notification); 1128 + gvt_vgpu_err("Invalid PV notification %d\n", notification); 1126 1129 } 1127 1130 return ret; 1128 1131 } ··· 1178 1181 enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE); 1179 1182 break; 1180 1183 default: 1181 - gvt_err("invalid pvinfo write offset %x bytes %x data %x\n", 1184 + gvt_vgpu_err("invalid pvinfo write offset %x bytes %x data %x\n", 1182 1185 offset, bytes, data); 1183 1186 break; 1184 1187 } ··· 1412 1415 if (execlist->elsp_dwords.index == 3) { 1413 1416 ret = intel_vgpu_submit_execlist(vgpu, ring_id); 1414 1417 if(ret) 1415 - gvt_err("fail submit workload on ring %d\n", ring_id); 1418 + gvt_vgpu_err("fail submit workload on ring %d\n", 1419 + ring_id); 1416 1420 } 1417 1421 1418 1422 ++execlist->elsp_dwords.index; ··· 2985 2987 { 2986 2988 write_vreg(vgpu, offset, p_data, bytes); 2987 2989 return 0; 2990 + } 2991 + 2992 + /** 2993 + * intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be 2994 + * force-nopriv register 2995 + * 2996 + * @gvt: a GVT device 2997 + * @offset: register offset 2998 + * 2999 + * Returns: 3000 + * True if the register is in force-nonpriv whitelist; 3001 + * False if outside; 3002 + */ 3003 + bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt, 3004 + unsigned int offset) 3005 + { 3006 + return in_whitelist(offset); 2988 3007 }
+23 -14
drivers/gpu/drm/i915/gvt/kvmgt.c
··· 426 426 427 427 static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev) 428 428 { 429 - struct intel_vgpu *vgpu; 429 + struct intel_vgpu *vgpu = NULL; 430 430 struct intel_vgpu_type *type; 431 431 struct device *pdev; 432 432 void *gvt; ··· 437 437 438 438 type = intel_gvt_find_vgpu_type(gvt, kobject_name(kobj)); 439 439 if (!type) { 440 - gvt_err("failed to find type %s to create\n", 440 + gvt_vgpu_err("failed to find type %s to create\n", 441 441 kobject_name(kobj)); 442 442 ret = -EINVAL; 443 443 goto out; ··· 446 446 vgpu = intel_gvt_ops->vgpu_create(gvt, type); 447 447 if (IS_ERR_OR_NULL(vgpu)) { 448 448 ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu); 449 - gvt_err("failed to create intel vgpu: %d\n", ret); 449 + gvt_vgpu_err("failed to create intel vgpu: %d\n", ret); 450 450 goto out; 451 451 } 452 452 ··· 526 526 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events, 527 527 &vgpu->vdev.iommu_notifier); 528 528 if (ret != 0) { 529 - gvt_err("vfio_register_notifier for iommu failed: %d\n", ret); 529 + gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n", 530 + ret); 530 531 goto out; 531 532 } 532 533 ··· 535 534 ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events, 536 535 &vgpu->vdev.group_notifier); 537 536 if (ret != 0) { 538 - gvt_err("vfio_register_notifier for group failed: %d\n", ret); 537 + gvt_vgpu_err("vfio_register_notifier for group failed: %d\n", 538 + ret); 539 539 goto undo_iommu; 540 540 } 541 541 ··· 637 635 638 636 639 637 if (index >= VFIO_PCI_NUM_REGIONS) { 640 - gvt_err("invalid index: %u\n", index); 638 + gvt_vgpu_err("invalid index: %u\n", index); 641 639 return -EINVAL; 642 640 } 643 641 ··· 671 669 case VFIO_PCI_VGA_REGION_INDEX: 672 670 case VFIO_PCI_ROM_REGION_INDEX: 673 671 default: 674 - gvt_err("unsupported region: %u\n", index); 672 + gvt_vgpu_err("unsupported region: %u\n", index); 675 673 } 676 674 677 675 return ret == 0 ? count : ret; ··· 863 861 864 862 trigger = eventfd_ctx_fdget(fd); 865 863 if (IS_ERR(trigger)) { 866 - gvt_err("eventfd_ctx_fdget failed\n"); 864 + gvt_vgpu_err("eventfd_ctx_fdget failed\n"); 867 865 return PTR_ERR(trigger); 868 866 } 869 867 vgpu->vdev.msi_trigger = trigger; ··· 1122 1120 ret = vfio_set_irqs_validate_and_prepare(&hdr, max, 1123 1121 VFIO_PCI_NUM_IRQS, &data_size); 1124 1122 if (ret) { 1125 - gvt_err("intel:vfio_set_irqs_validate_and_prepare failed\n"); 1123 + gvt_vgpu_err("intel:vfio_set_irqs_validate_and_prepare failed\n"); 1126 1124 return -EINVAL; 1127 1125 } 1128 1126 if (data_size) { ··· 1312 1310 1313 1311 kvm = vgpu->vdev.kvm; 1314 1312 if (!kvm || kvm->mm != current->mm) { 1315 - gvt_err("KVM is required to use Intel vGPU\n"); 1313 + gvt_vgpu_err("KVM is required to use Intel vGPU\n"); 1316 1314 return -ESRCH; 1317 1315 } 1318 1316 ··· 1339 1337 1340 1338 static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) 1341 1339 { 1340 + struct intel_vgpu *vgpu = info->vgpu; 1341 + 1342 1342 if (!info) { 1343 - gvt_err("kvmgt_guest_info invalid\n"); 1343 + gvt_vgpu_err("kvmgt_guest_info invalid\n"); 1344 1344 return false; 1345 1345 } 1346 1346 ··· 1387 1383 unsigned long iova, pfn; 1388 1384 struct kvmgt_guest_info *info; 1389 1385 struct device *dev; 1386 + struct intel_vgpu *vgpu; 1390 1387 int rc; 1391 1388 1392 1389 if (!handle_valid(handle)) 1393 1390 return INTEL_GVT_INVALID_ADDR; 1394 1391 1395 1392 info = (struct kvmgt_guest_info *)handle; 1393 + vgpu = info->vgpu; 1396 1394 iova = gvt_cache_find(info->vgpu, gfn); 1397 1395 if (iova != INTEL_GVT_INVALID_ADDR) 1398 1396 return iova; ··· 1403 1397 dev = mdev_dev(info->vgpu->vdev.mdev); 1404 1398 rc = vfio_pin_pages(dev, &gfn, 1, IOMMU_READ | IOMMU_WRITE, &pfn); 1405 1399 if (rc != 1) { 1406 - gvt_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", gfn, rc); 1400 + gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx: %d\n", 1401 + gfn, rc); 1407 1402 return INTEL_GVT_INVALID_ADDR; 1408 1403 } 1409 1404 /* transfer to host iova for GFX to use DMA */ 1410 1405 rc = gvt_dma_map_iova(info->vgpu, pfn, &iova); 1411 1406 if (rc) { 1412 - gvt_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn); 1407 + gvt_vgpu_err("gvt_dma_map_iova failed for gfn: 0x%lx\n", gfn); 1413 1408 vfio_unpin_pages(dev, &gfn, 1); 1414 1409 return INTEL_GVT_INVALID_ADDR; 1415 1410 } ··· 1424 1417 { 1425 1418 struct kvmgt_guest_info *info; 1426 1419 struct kvm *kvm; 1427 - int ret; 1420 + int idx, ret; 1428 1421 bool kthread = current->mm == NULL; 1429 1422 1430 1423 if (!handle_valid(handle)) ··· 1436 1429 if (kthread) 1437 1430 use_mm(kvm->mm); 1438 1431 1432 + idx = srcu_read_lock(&kvm->srcu); 1439 1433 ret = write ? kvm_write_guest(kvm, gpa, buf, len) : 1440 1434 kvm_read_guest(kvm, gpa, buf, len); 1435 + srcu_read_unlock(&kvm->srcu, idx); 1441 1436 1442 1437 if (kthread) 1443 1438 unuse_mm(kvm->mm);
+19 -19
drivers/gpu/drm/i915/gvt/mmio.c
··· 142 142 ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, 143 143 p_data, bytes); 144 144 if (ret) { 145 - gvt_err("vgpu%d: guest page read error %d, " 145 + gvt_vgpu_err("guest page read error %d, " 146 146 "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n", 147 - vgpu->id, ret, 148 - gp->gfn, pa, *(u32 *)p_data, bytes); 147 + ret, gp->gfn, pa, *(u32 *)p_data, 148 + bytes); 149 149 } 150 150 mutex_unlock(&gvt->lock); 151 151 return ret; ··· 200 200 ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); 201 201 202 202 if (!vgpu->mmio.disable_warn_untrack) { 203 - gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n", 204 - vgpu->id, offset, bytes, *(u32 *)p_data); 203 + gvt_vgpu_err("read untracked MMIO %x(%dB) val %x\n", 204 + offset, bytes, *(u32 *)p_data); 205 205 206 206 if (offset == 0x206c) { 207 - gvt_err("------------------------------------------\n"); 208 - gvt_err("vgpu%d: likely triggers a gfx reset\n", 209 - vgpu->id); 210 - gvt_err("------------------------------------------\n"); 207 + gvt_vgpu_err("------------------------------------------\n"); 208 + gvt_vgpu_err("likely triggers a gfx reset\n"); 209 + gvt_vgpu_err("------------------------------------------\n"); 211 210 vgpu->mmio.disable_warn_untrack = true; 212 211 } 213 212 } ··· 219 220 mutex_unlock(&gvt->lock); 220 221 return 0; 221 222 err: 222 - gvt_err("vgpu%d: fail to emulate MMIO read %08x len %d\n", 223 - vgpu->id, offset, bytes); 223 + gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n", 224 + offset, bytes); 224 225 mutex_unlock(&gvt->lock); 225 226 return ret; 226 227 } ··· 258 259 if (gp) { 259 260 ret = gp->handler(gp, pa, p_data, bytes); 260 261 if (ret) { 261 - gvt_err("vgpu%d: guest page write error %d, " 262 - "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n", 263 - vgpu->id, ret, 264 - gp->gfn, pa, *(u32 *)p_data, bytes); 262 + gvt_err("guest page write error %d, " 263 + "gfn 0x%lx, pa 0x%llx, " 264 + "var 0x%x, len %d\n", 265 + ret, gp->gfn, pa, 266 + *(u32 *)p_data, bytes); 265 267 } 266 268 mutex_unlock(&gvt->lock); 267 269 return ret; ··· 329 329 330 330 /* all register bits are RO. */ 331 331 if (ro_mask == ~(u64)0) { 332 - gvt_err("vgpu%d: try to write RO reg %x\n", 333 - vgpu->id, offset); 332 + gvt_vgpu_err("try to write RO reg %x\n", 333 + offset); 334 334 ret = 0; 335 335 goto out; 336 336 } ··· 360 360 mutex_unlock(&gvt->lock); 361 361 return 0; 362 362 err: 363 - gvt_err("vgpu%d: fail to emulate MMIO write %08x len %d\n", 364 - vgpu->id, offset, bytes); 363 + gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset, 364 + bytes); 365 365 mutex_unlock(&gvt->lock); 366 366 return ret; 367 367 }
+3
drivers/gpu/drm/i915/gvt/mmio.h
··· 107 107 void *p_data, unsigned int bytes); 108 108 int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, 109 109 void *p_data, unsigned int bytes); 110 + 111 + bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt, 112 + unsigned int offset); 110 113 #endif
+5 -5
drivers/gpu/drm/i915/gvt/opregion.c
··· 67 67 mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va 68 68 + i * PAGE_SIZE); 69 69 if (mfn == INTEL_GVT_INVALID_ADDR) { 70 - gvt_err("fail to get MFN from VA\n"); 70 + gvt_vgpu_err("fail to get MFN from VA\n"); 71 71 return -EINVAL; 72 72 } 73 73 ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, 74 74 vgpu_opregion(vgpu)->gfn[i], 75 75 mfn, 1, map); 76 76 if (ret) { 77 - gvt_err("fail to map GFN to MFN, errno: %d\n", ret); 77 + gvt_vgpu_err("fail to map GFN to MFN, errno: %d\n", 78 + ret); 78 79 return ret; 79 80 } 80 81 } ··· 288 287 parm = vgpu_opregion(vgpu)->va + INTEL_GVT_OPREGION_PARM; 289 288 290 289 if (!(swsci & SWSCI_SCI_SELECT)) { 291 - gvt_err("vgpu%d: requesting SMI service\n", vgpu->id); 290 + gvt_vgpu_err("requesting SMI service\n"); 292 291 return 0; 293 292 } 294 293 /* ignore non 0->1 trasitions */ ··· 301 300 func = GVT_OPREGION_FUNC(*scic); 302 301 subfunc = GVT_OPREGION_SUBFUNC(*scic); 303 302 if (!querying_capabilities(*scic)) { 304 - gvt_err("vgpu%d: requesting runtime service: func \"%s\"," 303 + gvt_vgpu_err("requesting runtime service: func \"%s\"," 305 304 " subfunc \"%s\"\n", 306 - vgpu->id, 307 305 opregion_func_name(func), 308 306 opregion_subfunc_name(subfunc)); 309 307 /*
+1 -1
drivers/gpu/drm/i915/gvt/render.c
··· 167 167 I915_WRITE_FW(reg, 0x1); 168 168 169 169 if (wait_for_atomic((I915_READ_FW(reg) == 0), 50)) 170 - gvt_err("timeout in invalidate ring (%d) tlb\n", ring_id); 170 + gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id); 171 171 else 172 172 vgpu_vreg(vgpu, regs[ring_id]) = 0; 173 173
+2 -2
drivers/gpu/drm/i915/gvt/sched_policy.c
··· 101 101 struct list_head runq_head; 102 102 }; 103 103 104 - #define GVT_DEFAULT_TIME_SLICE (1 * HZ / 1000) 104 + #define GVT_DEFAULT_TIME_SLICE (msecs_to_jiffies(1)) 105 105 106 106 static void tbs_sched_func(struct work_struct *work) 107 107 { ··· 223 223 return; 224 224 225 225 list_add_tail(&vgpu_data->list, &sched_data->runq_head); 226 - schedule_delayed_work(&sched_data->work, sched_data->period); 226 + schedule_delayed_work(&sched_data->work, 0); 227 227 } 228 228 229 229 static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu)
+41 -20
drivers/gpu/drm/i915/gvt/scheduler.c
··· 84 84 (u32)((workload->ctx_desc.lrca + i) << 85 85 GTT_PAGE_SHIFT)); 86 86 if (context_gpa == INTEL_GVT_INVALID_ADDR) { 87 - gvt_err("Invalid guest context descriptor\n"); 87 + gvt_vgpu_err("Invalid guest context descriptor\n"); 88 88 return -EINVAL; 89 89 } 90 90 ··· 175 175 int ring_id = workload->ring_id; 176 176 struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx; 177 177 struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; 178 + struct intel_engine_cs *engine = dev_priv->engine[ring_id]; 178 179 struct drm_i915_gem_request *rq; 180 + struct intel_vgpu *vgpu = workload->vgpu; 179 181 int ret; 180 182 181 183 gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n", ··· 189 187 190 188 mutex_lock(&dev_priv->drm.struct_mutex); 191 189 190 + /* pin shadow context by gvt even the shadow context will be pinned 191 + * when i915 alloc request. That is because gvt will update the guest 192 + * context from shadow context when workload is completed, and at that 193 + * moment, i915 may already unpined the shadow context to make the 194 + * shadow_ctx pages invalid. So gvt need to pin itself. After update 195 + * the guest context, gvt can unpin the shadow_ctx safely. 196 + */ 197 + ret = engine->context_pin(engine, shadow_ctx); 198 + if (ret) { 199 + gvt_vgpu_err("fail to pin shadow context\n"); 200 + workload->status = ret; 201 + mutex_unlock(&dev_priv->drm.struct_mutex); 202 + return ret; 203 + } 204 + 192 205 rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx); 193 206 if (IS_ERR(rq)) { 194 - gvt_err("fail to allocate gem request\n"); 207 + gvt_vgpu_err("fail to allocate gem request\n"); 195 208 ret = PTR_ERR(rq); 196 209 goto out; 197 210 } ··· 219 202 if (ret) 220 203 goto out; 221 204 222 - ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); 223 - if (ret) 224 - goto out; 205 + if ((workload->ring_id == RCS) && 206 + (workload->wa_ctx.indirect_ctx.size != 0)) { 207 + ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx); 208 + if (ret) 209 + goto out; 210 + } 225 211 226 212 ret = populate_shadow_context(workload); 227 213 if (ret) ··· 247 227 248 228 if (!IS_ERR_OR_NULL(rq)) 249 229 i915_add_request_no_flush(rq); 230 + else 231 + engine->context_unpin(engine, shadow_ctx); 232 + 250 233 mutex_unlock(&dev_priv->drm.struct_mutex); 251 234 return ret; 252 235 } ··· 345 322 (u32)((workload->ctx_desc.lrca + i) << 346 323 GTT_PAGE_SHIFT)); 347 324 if (context_gpa == INTEL_GVT_INVALID_ADDR) { 348 - gvt_err("invalid guest context descriptor\n"); 325 + gvt_vgpu_err("invalid guest context descriptor\n"); 349 326 return; 350 327 } 351 328 ··· 399 376 * For the workload w/o request, directly complete the workload. 400 377 */ 401 378 if (workload->req) { 379 + struct drm_i915_private *dev_priv = 380 + workload->vgpu->gvt->dev_priv; 381 + struct intel_engine_cs *engine = 382 + dev_priv->engine[workload->ring_id]; 402 383 wait_event(workload->shadow_ctx_status_wq, 403 384 !atomic_read(&workload->shadow_ctx_active)); 404 385 ··· 415 388 INTEL_GVT_EVENT_MAX) 416 389 intel_vgpu_trigger_virtual_event(vgpu, event); 417 390 } 391 + mutex_lock(&dev_priv->drm.struct_mutex); 392 + /* unpin shadow ctx as the shadow_ctx update is done */ 393 + engine->context_unpin(engine, workload->vgpu->shadow_ctx); 394 + mutex_unlock(&dev_priv->drm.struct_mutex); 418 395 } 419 396 420 397 gvt_dbg_sched("ring id %d complete workload %p status %d\n", ··· 448 417 int ring_id = p->ring_id; 449 418 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 450 419 struct intel_vgpu_workload *workload = NULL; 420 + struct intel_vgpu *vgpu = NULL; 451 421 int ret; 452 422 bool need_force_wake = IS_SKYLAKE(gvt->dev_priv); 453 423 DEFINE_WAIT_FUNC(wait, woken_wake_function); ··· 491 459 mutex_unlock(&gvt->lock); 492 460 493 461 if (ret) { 494 - gvt_err("fail to dispatch workload, skip\n"); 462 + vgpu = workload->vgpu; 463 + gvt_vgpu_err("fail to dispatch workload, skip\n"); 495 464 goto complete; 496 465 } 497 466 498 467 gvt_dbg_sched("ring id %d wait workload %p\n", 499 468 workload->ring_id, workload); 500 - retry: 501 - i915_wait_request(workload->req, 502 - 0, MAX_SCHEDULE_TIMEOUT); 503 - /* I915 has replay mechanism and a request will be replayed 504 - * if there is i915 reset. So the seqno will be updated anyway. 505 - * If the seqno is not updated yet after waiting, which means 506 - * the replay may still be in progress and we can wait again. 507 - */ 508 - if (!i915_gem_request_completed(workload->req)) { 509 - gvt_dbg_sched("workload %p not completed, wait again\n", 510 - workload); 511 - goto retry; 512 - } 469 + i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT); 513 470 514 471 complete: 515 472 gvt_dbg_sched("will complete workload %p, status: %d\n",
+5
drivers/gpu/drm/i915/intel_gvt.c
··· 77 77 goto bail; 78 78 } 79 79 80 + if (!i915.enable_execlists) { 81 + DRM_INFO("GPU guest virtualisation [GVT-g] disabled due to disabled execlist submission [i915.enable_execlists module parameter]\n"); 82 + goto bail; 83 + } 84 + 80 85 /* 81 86 * We're not in host or fail to find a MPT module, disable GVT-g 82 87 */