Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-intel-fixes-2017-03-09' of git://anongit.freedesktop.org/git/drm-intel into drm-fixes

flushing out gvt-g fixes

* tag 'drm-intel-fixes-2017-03-09' of git://anongit.freedesktop.org/git/drm-intel: (29 commits)
drm/i915/gvt: change some gvt_err to gvt_dbg_cmd
drm/i915/gvt: protect RO and Rsvd bits of virtual vgpu configuration space
drm/i915/gvt: handle workload lifecycle properly
drm/i915/gvt: fix an error for F_RO flag
drm/i915/gvt: use pfn_valid for better checking
drm/i915/gvt: set SFUSE_STRAP properly for vitual monitor detection
drm/i915/gvt: fix an error for one register
drm/i915/gvt: add more registers into handlers list
drm/i915/gvt: have more registers with F_CMD_ACCESS flags set
drm/i915/gvt: add some new MMIOs to cmd_access white list
drm/i915/gvt: fix pcode mailbox write emulation of BDW
drm/i915/gvt: add resolution definition for vGPU type
drm/i915/gvt: Add more edid definition support
drm/i915/gvt: adjust to fixed vGPU types
drm/i915/gvt: remove unnecessary error msg from gtt write
drm/i915/gvt: refine pcode write emulation
drm/i915/gvt: clear the vGPU reset logic
drm/i915/gvt: decrease priority of output msg for untracked mmio
drm/i915/gvt: set default value to 0 for unhandled mmio regs
drm/i915/gvt: add cmd_access to GEN7_HALF_SLICE_CHICKEN1
...

+681 -251
+54 -3
drivers/gpu/drm/i915/gvt/cfg_space.c
··· 41 41 INTEL_GVT_PCI_BAR_MAX, 42 42 }; 43 43 44 + /* bitmap for writable bits (RW or RW1C bits, but cannot co-exist in one 45 + * byte) byte by byte in standard pci configuration space. (not the full 46 + * 256 bytes.) 47 + */ 48 + static const u8 pci_cfg_space_rw_bmp[PCI_INTERRUPT_LINE + 4] = { 49 + [PCI_COMMAND] = 0xff, 0x07, 50 + [PCI_STATUS] = 0x00, 0xf9, /* the only one RW1C byte */ 51 + [PCI_CACHE_LINE_SIZE] = 0xff, 52 + [PCI_BASE_ADDRESS_0 ... PCI_CARDBUS_CIS - 1] = 0xff, 53 + [PCI_ROM_ADDRESS] = 0x01, 0xf8, 0xff, 0xff, 54 + [PCI_INTERRUPT_LINE] = 0xff, 55 + }; 56 + 57 + /** 58 + * vgpu_pci_cfg_mem_write - write virtual cfg space memory 59 + * 60 + * Use this function to write virtual cfg space memory. 61 + * For standard cfg space, only RW bits can be changed, 62 + * and we emulates the RW1C behavior of PCI_STATUS register. 63 + */ 64 + static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off, 65 + u8 *src, unsigned int bytes) 66 + { 67 + u8 *cfg_base = vgpu_cfg_space(vgpu); 68 + u8 mask, new, old; 69 + int i = 0; 70 + 71 + for (; i < bytes && (off + i < sizeof(pci_cfg_space_rw_bmp)); i++) { 72 + mask = pci_cfg_space_rw_bmp[off + i]; 73 + old = cfg_base[off + i]; 74 + new = src[i] & mask; 75 + 76 + /** 77 + * The PCI_STATUS high byte has RW1C bits, here 78 + * emulates clear by writing 1 for these bits. 79 + * Writing a 0b to RW1C bits has no effect. 80 + */ 81 + if (off + i == PCI_STATUS + 1) 82 + new = (~new & old) & mask; 83 + 84 + cfg_base[off + i] = (old & ~mask) | new; 85 + } 86 + 87 + /* For other configuration space directly copy as it is. */ 88 + if (i < bytes) 89 + memcpy(cfg_base + off + i, src + i, bytes - i); 90 + } 91 + 44 92 /** 45 93 * intel_vgpu_emulate_cfg_read - emulate vGPU configuration space read 46 94 * ··· 171 123 u8 changed = old ^ new; 172 124 int ret; 173 125 174 - memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes); 126 + vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes); 175 127 if (!(changed & PCI_COMMAND_MEMORY)) 176 128 return 0; 177 129 ··· 285 237 { 286 238 int ret; 287 239 240 + if (vgpu->failsafe) 241 + return 0; 242 + 288 243 if (WARN_ON(bytes > 4)) 289 244 return -EINVAL; 290 245 ··· 325 274 if (ret) 326 275 return ret; 327 276 328 - memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes); 277 + vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes); 329 278 break; 330 279 default: 331 - memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes); 280 + vgpu_pci_cfg_mem_write(vgpu, offset, p_data, bytes); 332 281 break; 333 282 } 334 283 return 0;
+5 -5
drivers/gpu/drm/i915/gvt/cmd_parser.c
··· 668 668 if (d_info == NULL) 669 669 return; 670 670 671 - gvt_err("opcode=0x%x %s sub_ops:", 671 + gvt_dbg_cmd("opcode=0x%x %s sub_ops:", 672 672 cmd >> (32 - d_info->op_len), d_info->name); 673 673 674 674 for (i = 0; i < d_info->nr_sub_op; i++) ··· 693 693 int cnt = 0; 694 694 int i; 695 695 696 - gvt_err(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)" 696 + gvt_dbg_cmd(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)" 697 697 " ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id, 698 698 s->ring_id, s->ring_start, s->ring_start + s->ring_size, 699 699 s->ring_head, s->ring_tail); 700 700 701 - gvt_err(" %s %s ip_gma(%08lx) ", 701 + gvt_dbg_cmd(" %s %s ip_gma(%08lx) ", 702 702 s->buf_type == RING_BUFFER_INSTRUCTION ? 703 703 "RING_BUFFER" : "BATCH_BUFFER", 704 704 s->buf_addr_type == GTT_BUFFER ? 705 705 "GTT" : "PPGTT", s->ip_gma); 706 706 707 707 if (s->ip_va == NULL) { 708 - gvt_err(" ip_va(NULL)"); 708 + gvt_dbg_cmd(" ip_va(NULL)"); 709 709 return; 710 710 } 711 711 712 - gvt_err(" ip_va=%p: %08x %08x %08x %08x\n", 712 + gvt_dbg_cmd(" ip_va=%p: %08x %08x %08x %08x\n", 713 713 s->ip_va, cmd_val(s, 0), cmd_val(s, 1), 714 714 cmd_val(s, 2), cmd_val(s, 3)); 715 715
+94 -45
drivers/gpu/drm/i915/gvt/display.c
··· 83 83 return 0; 84 84 } 85 85 86 + static unsigned char virtual_dp_monitor_edid[GVT_EDID_NUM][EDID_SIZE] = { 87 + { 88 + /* EDID with 1024x768 as its resolution */ 89 + /*Header*/ 90 + 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 91 + /* Vendor & Product Identification */ 92 + 0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17, 93 + /* Version & Revision */ 94 + 0x01, 0x04, 95 + /* Basic Display Parameters & Features */ 96 + 0xa5, 0x34, 0x20, 0x78, 0x23, 97 + /* Color Characteristics */ 98 + 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54, 99 + /* Established Timings: maximum resolution is 1024x768 */ 100 + 0x21, 0x08, 0x00, 101 + /* Standard Timings. All invalid */ 102 + 0x00, 0xc0, 0x00, 0xc0, 0x00, 0x40, 0x00, 0x80, 0x00, 0x00, 103 + 0x00, 0x40, 0x00, 0x00, 0x00, 0x01, 104 + /* 18 Byte Data Blocks 1: invalid */ 105 + 0x00, 0x00, 0x80, 0xa0, 0x70, 0xb0, 106 + 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a, 107 + /* 18 Byte Data Blocks 2: invalid */ 108 + 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a, 109 + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 110 + /* 18 Byte Data Blocks 3: invalid */ 111 + 0x00, 0x00, 0x00, 0xfc, 0x00, 0x48, 112 + 0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20, 113 + /* 18 Byte Data Blocks 4: invalid */ 114 + 0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30, 115 + 0x44, 0x58, 0x51, 0x0a, 0x20, 0x20, 116 + /* Extension Block Count */ 117 + 0x00, 118 + /* Checksum */ 119 + 0xef, 120 + }, 121 + { 86 122 /* EDID with 1920x1200 as its resolution */ 87 - static unsigned char virtual_dp_monitor_edid[] = { 88 - /*Header*/ 89 - 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 90 - /* Vendor & Product Identification */ 91 - 0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17, 92 - /* Version & Revision */ 93 - 0x01, 0x04, 94 - /* Basic Display Parameters & Features */ 95 - 0xa5, 0x34, 0x20, 0x78, 0x23, 96 - /* Color Characteristics */ 97 - 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54, 98 - /* Established Timings: maximum resolution is 1024x768 */ 99 - 0x21, 0x08, 0x00, 100 - /* 101 - * Standard Timings. 102 - * below new resolutions can be supported: 103 - * 1920x1080, 1280x720, 1280x960, 1280x1024, 104 - * 1440x900, 1600x1200, 1680x1050 105 - */ 106 - 0xd1, 0xc0, 0x81, 0xc0, 0x81, 0x40, 0x81, 0x80, 0x95, 0x00, 107 - 0xa9, 0x40, 0xb3, 0x00, 0x01, 0x01, 108 - /* 18 Byte Data Blocks 1: max resolution is 1920x1200 */ 109 - 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0, 110 - 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a, 111 - /* 18 Byte Data Blocks 2: invalid */ 112 - 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a, 113 - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 114 - /* 18 Byte Data Blocks 3: invalid */ 115 - 0x00, 0x00, 0x00, 0xfc, 0x00, 0x48, 116 - 0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20, 117 - /* 18 Byte Data Blocks 4: invalid */ 118 - 0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30, 119 - 0x44, 0x58, 0x51, 0x0a, 0x20, 0x20, 120 - /* Extension Block Count */ 121 - 0x00, 122 - /* Checksum */ 123 - 0x45, 123 + /*Header*/ 124 + 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 125 + /* Vendor & Product Identification */ 126 + 0x22, 0xf0, 0x54, 0x29, 0x00, 0x00, 0x00, 0x00, 0x04, 0x17, 127 + /* Version & Revision */ 128 + 0x01, 0x04, 129 + /* Basic Display Parameters & Features */ 130 + 0xa5, 0x34, 0x20, 0x78, 0x23, 131 + /* Color Characteristics */ 132 + 0xfc, 0x81, 0xa4, 0x55, 0x4d, 0x9d, 0x25, 0x12, 0x50, 0x54, 133 + /* Established Timings: maximum resolution is 1024x768 */ 134 + 0x21, 0x08, 0x00, 135 + /* 136 + * Standard Timings. 137 + * below new resolutions can be supported: 138 + * 1920x1080, 1280x720, 1280x960, 1280x1024, 139 + * 1440x900, 1600x1200, 1680x1050 140 + */ 141 + 0xd1, 0xc0, 0x81, 0xc0, 0x81, 0x40, 0x81, 0x80, 0x95, 0x00, 142 + 0xa9, 0x40, 0xb3, 0x00, 0x01, 0x01, 143 + /* 18 Byte Data Blocks 1: max resolution is 1920x1200 */ 144 + 0x28, 0x3c, 0x80, 0xa0, 0x70, 0xb0, 145 + 0x23, 0x40, 0x30, 0x20, 0x36, 0x00, 0x06, 0x44, 0x21, 0x00, 0x00, 0x1a, 146 + /* 18 Byte Data Blocks 2: invalid */ 147 + 0x00, 0x00, 0x00, 0xfd, 0x00, 0x18, 0x3c, 0x18, 0x50, 0x11, 0x00, 0x0a, 148 + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 149 + /* 18 Byte Data Blocks 3: invalid */ 150 + 0x00, 0x00, 0x00, 0xfc, 0x00, 0x48, 151 + 0x50, 0x20, 0x5a, 0x52, 0x32, 0x34, 0x34, 0x30, 0x77, 0x0a, 0x20, 0x20, 152 + /* 18 Byte Data Blocks 4: invalid */ 153 + 0x00, 0x00, 0x00, 0xff, 0x00, 0x43, 0x4e, 0x34, 0x33, 0x30, 0x34, 0x30, 154 + 0x44, 0x58, 0x51, 0x0a, 0x20, 0x20, 155 + /* Extension Block Count */ 156 + 0x00, 157 + /* Checksum */ 158 + 0x45, 159 + }, 124 160 }; 125 161 126 162 #define DPCD_HEADER_SIZE 0xb ··· 176 140 vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT | 177 141 SDE_PORTE_HOTPLUG_SPT); 178 142 179 - if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) 143 + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { 180 144 vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT; 145 + vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED; 146 + } 181 147 182 - if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) 148 + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { 183 149 vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT; 150 + vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED; 151 + } 184 152 185 - if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) 153 + if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) { 186 154 vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT; 155 + vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED; 156 + } 187 157 188 158 if (IS_SKYLAKE(dev_priv) && 189 159 intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) { ··· 202 160 GEN8_PORT_DP_A_HOTPLUG; 203 161 else 204 162 vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT; 163 + 164 + vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED; 205 165 } 206 166 } 207 167 ··· 219 175 } 220 176 221 177 static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, 222 - int type) 178 + int type, unsigned int resolution) 223 179 { 224 180 struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num); 181 + 182 + if (WARN_ON(resolution >= GVT_EDID_NUM)) 183 + return -EINVAL; 225 184 226 185 port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL); 227 186 if (!port->edid) ··· 236 189 return -ENOMEM; 237 190 } 238 191 239 - memcpy(port->edid->edid_block, virtual_dp_monitor_edid, 192 + memcpy(port->edid->edid_block, virtual_dp_monitor_edid[resolution], 240 193 EDID_SIZE); 241 194 port->edid->data_valid = true; 242 195 ··· 369 322 * Zero on success, negative error code if failed. 370 323 * 371 324 */ 372 - int intel_vgpu_init_display(struct intel_vgpu *vgpu) 325 + int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution) 373 326 { 374 327 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 375 328 376 329 intel_vgpu_init_i2c_edid(vgpu); 377 330 378 331 if (IS_SKYLAKE(dev_priv)) 379 - return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D); 332 + return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D, 333 + resolution); 380 334 else 381 - return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B); 335 + return setup_virtual_dp_monitor(vgpu, PORT_B, GVT_DP_B, 336 + resolution); 382 337 } 383 338 384 339 /**
+19 -1
drivers/gpu/drm/i915/gvt/display.h
··· 154 154 int type; 155 155 }; 156 156 157 + enum intel_vgpu_edid { 158 + GVT_EDID_1024_768, 159 + GVT_EDID_1920_1200, 160 + GVT_EDID_NUM, 161 + }; 162 + 163 + static inline char *vgpu_edid_str(enum intel_vgpu_edid id) 164 + { 165 + switch (id) { 166 + case GVT_EDID_1024_768: 167 + return "1024x768"; 168 + case GVT_EDID_1920_1200: 169 + return "1920x1200"; 170 + default: 171 + return ""; 172 + } 173 + } 174 + 157 175 void intel_gvt_emulate_vblank(struct intel_gvt *gvt); 158 176 void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt); 159 177 160 - int intel_vgpu_init_display(struct intel_vgpu *vgpu); 178 + int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution); 161 179 void intel_vgpu_reset_display(struct intel_vgpu *vgpu); 162 180 void intel_vgpu_clean_display(struct intel_vgpu *vgpu); 163 181
+1 -1
drivers/gpu/drm/i915/gvt/firmware.c
··· 80 80 int ret; 81 81 82 82 size = sizeof(*h) + info->mmio_size + info->cfg_space_size - 1; 83 - firmware = vmalloc(size); 83 + firmware = vzalloc(size); 84 84 if (!firmware) 85 85 return -ENOMEM; 86 86
+26 -14
drivers/gpu/drm/i915/gvt/gtt.c
··· 1825 1825 gma = g_gtt_index << GTT_PAGE_SHIFT; 1826 1826 1827 1827 /* the VM may configure the whole GM space when ballooning is used */ 1828 - if (WARN_ONCE(!vgpu_gmadr_is_valid(vgpu, gma), 1829 - "vgpu%d: found oob ggtt write, offset %x\n", 1830 - vgpu->id, off)) { 1828 + if (!vgpu_gmadr_is_valid(vgpu, gma)) 1831 1829 return 0; 1832 - } 1833 1830 1834 1831 ggtt_get_guest_entry(ggtt_mm, &e, g_gtt_index); 1835 1832 ··· 2012 2015 return create_scratch_page_tree(vgpu); 2013 2016 } 2014 2017 2018 + static void intel_vgpu_free_mm(struct intel_vgpu *vgpu, int type) 2019 + { 2020 + struct list_head *pos, *n; 2021 + struct intel_vgpu_mm *mm; 2022 + 2023 + list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) { 2024 + mm = container_of(pos, struct intel_vgpu_mm, list); 2025 + if (mm->type == type) { 2026 + vgpu->gvt->gtt.mm_free_page_table(mm); 2027 + list_del(&mm->list); 2028 + list_del(&mm->lru_list); 2029 + kfree(mm); 2030 + } 2031 + } 2032 + } 2033 + 2015 2034 /** 2016 2035 * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization 2017 2036 * @vgpu: a vGPU ··· 2040 2027 */ 2041 2028 void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu) 2042 2029 { 2043 - struct list_head *pos, *n; 2044 - struct intel_vgpu_mm *mm; 2045 - 2046 2030 ppgtt_free_all_shadow_page(vgpu); 2047 2031 release_scratch_page_tree(vgpu); 2048 2032 2049 - list_for_each_safe(pos, n, &vgpu->gtt.mm_list_head) { 2050 - mm = container_of(pos, struct intel_vgpu_mm, list); 2051 - vgpu->gvt->gtt.mm_free_page_table(mm); 2052 - list_del(&mm->list); 2053 - list_del(&mm->lru_list); 2054 - kfree(mm); 2055 - } 2033 + intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT); 2034 + intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_GGTT); 2056 2035 } 2057 2036 2058 2037 static void clean_spt_oos(struct intel_gvt *gvt) ··· 2327 2322 int i; 2328 2323 2329 2324 ppgtt_free_all_shadow_page(vgpu); 2325 + 2326 + /* Shadow pages are only created when there is no page 2327 + * table tracking data, so remove page tracking data after 2328 + * removing the shadow pages. 2329 + */ 2330 + intel_vgpu_free_mm(vgpu, INTEL_GVT_MM_PPGTT); 2331 + 2330 2332 if (!dmlr) 2331 2333 return; 2332 2334
+10 -2
drivers/gpu/drm/i915/gvt/gvt.h
··· 143 143 int id; 144 144 unsigned long handle; /* vGPU handle used by hypervisor MPT modules */ 145 145 bool active; 146 + bool pv_notified; 147 + bool failsafe; 146 148 bool resetting; 147 149 void *sched_data; 148 150 ··· 205 203 }; 206 204 207 205 struct intel_gvt_opregion { 208 - void __iomem *opregion_va; 206 + void *opregion_va; 209 207 u32 opregion_pa; 210 208 }; 211 209 212 210 #define NR_MAX_INTEL_VGPU_TYPES 20 213 211 struct intel_vgpu_type { 214 212 char name[16]; 215 - unsigned int max_instance; 216 213 unsigned int avail_instance; 217 214 unsigned int low_gm_size; 218 215 unsigned int high_gm_size; 219 216 unsigned int fence; 217 + enum intel_vgpu_edid resolution; 220 218 }; 221 219 222 220 struct intel_gvt { ··· 319 317 __u64 low_gm_sz; /* in MB */ 320 318 __u64 high_gm_sz; /* in MB */ 321 319 __u64 fence_sz; 320 + __u64 resolution; 322 321 __s32 primary; 323 322 __u64 vgpu_id; 324 323 }; ··· 451 448 void (*vgpu_reset)(struct intel_vgpu *); 452 449 }; 453 450 451 + 452 + enum { 453 + GVT_FAILSAFE_UNSUPPORTED_GUEST, 454 + GVT_FAILSAFE_INSUFFICIENT_RESOURCE, 455 + }; 454 456 455 457 #include "mpt.h" 456 458
+302 -127
drivers/gpu/drm/i915/gvt/handlers.c
··· 121 121 info->size = size; 122 122 info->length = (i + 4) < end ? 4 : (end - i); 123 123 info->addr_mask = addr_mask; 124 + info->ro_mask = ro_mask; 124 125 info->device = device; 125 126 info->read = read ? read : intel_vgpu_default_mmio_read; 126 127 info->write = write ? write : intel_vgpu_default_mmio_write; ··· 151 150 #define fence_num_to_offset(num) \ 152 151 (num * 8 + i915_mmio_reg_offset(FENCE_REG_GEN6_LO(0))) 153 152 153 + 154 + static void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason) 155 + { 156 + switch (reason) { 157 + case GVT_FAILSAFE_UNSUPPORTED_GUEST: 158 + pr_err("Detected your guest driver doesn't support GVT-g.\n"); 159 + break; 160 + case GVT_FAILSAFE_INSUFFICIENT_RESOURCE: 161 + pr_err("Graphics resource is not enough for the guest\n"); 162 + default: 163 + break; 164 + } 165 + pr_err("Now vgpu %d will enter failsafe mode.\n", vgpu->id); 166 + vgpu->failsafe = true; 167 + } 168 + 154 169 static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu, 155 170 unsigned int fence_num, void *p_data, unsigned int bytes) 156 171 { 157 172 if (fence_num >= vgpu_fence_sz(vgpu)) { 158 - gvt_err("vgpu%d: found oob fence register access\n", 159 - vgpu->id); 160 - gvt_err("vgpu%d: total fence num %d access fence num %d\n", 161 - vgpu->id, vgpu_fence_sz(vgpu), fence_num); 173 + 174 + /* When guest access oob fence regs without access 175 + * pv_info first, we treat guest not supporting GVT, 176 + * and we will let vgpu enter failsafe mode. 177 + */ 178 + if (!vgpu->pv_notified) 179 + enter_failsafe_mode(vgpu, 180 + GVT_FAILSAFE_UNSUPPORTED_GUEST); 181 + 182 + if (!vgpu->mmio.disable_warn_untrack) { 183 + gvt_err("vgpu%d: found oob fence register access\n", 184 + vgpu->id); 185 + gvt_err("vgpu%d: total fence %d, access fence %d\n", 186 + vgpu->id, vgpu_fence_sz(vgpu), 187 + fence_num); 188 + } 162 189 memset(p_data, 0, bytes); 190 + return -EINVAL; 163 191 } 164 192 return 0; 165 193 } ··· 397 367 vgpu_vreg(vgpu, offset) &= ~I965_PIPECONF_ACTIVE; 398 368 intel_gvt_check_vblank_emulation(vgpu->gvt); 399 369 return 0; 370 + } 371 + 372 + /* ascendingly sorted */ 373 + static i915_reg_t force_nonpriv_white_list[] = { 374 + GEN9_CS_DEBUG_MODE1, //_MMIO(0x20ec) 375 + GEN9_CTX_PREEMPT_REG,//_MMIO(0x2248) 376 + GEN8_CS_CHICKEN1,//_MMIO(0x2580) 377 + _MMIO(0x2690), 378 + _MMIO(0x2694), 379 + _MMIO(0x2698), 380 + _MMIO(0x4de0), 381 + _MMIO(0x4de4), 382 + _MMIO(0x4dfc), 383 + GEN7_COMMON_SLICE_CHICKEN1,//_MMIO(0x7010) 384 + _MMIO(0x7014), 385 + HDC_CHICKEN0,//_MMIO(0x7300) 386 + GEN8_HDC_CHICKEN1,//_MMIO(0x7304) 387 + _MMIO(0x7700), 388 + _MMIO(0x7704), 389 + _MMIO(0x7708), 390 + _MMIO(0x770c), 391 + _MMIO(0xb110), 392 + GEN8_L3SQCREG4,//_MMIO(0xb118) 393 + _MMIO(0xe100), 394 + _MMIO(0xe18c), 395 + _MMIO(0xe48c), 396 + _MMIO(0xe5f4), 397 + }; 398 + 399 + /* a simple bsearch */ 400 + static inline bool in_whitelist(unsigned int reg) 401 + { 402 + int left = 0, right = ARRAY_SIZE(force_nonpriv_white_list); 403 + i915_reg_t *array = force_nonpriv_white_list; 404 + 405 + while (left < right) { 406 + int mid = (left + right)/2; 407 + 408 + if (reg > array[mid].reg) 409 + left = mid + 1; 410 + else if (reg < array[mid].reg) 411 + right = mid; 412 + else 413 + return true; 414 + } 415 + return false; 416 + } 417 + 418 + static int force_nonpriv_write(struct intel_vgpu *vgpu, 419 + unsigned int offset, void *p_data, unsigned int bytes) 420 + { 421 + u32 reg_nonpriv = *(u32 *)p_data; 422 + int ret = -EINVAL; 423 + 424 + if ((bytes != 4) || ((offset & (bytes - 1)) != 0)) { 425 + gvt_err("vgpu(%d) Invalid FORCE_NONPRIV offset %x(%dB)\n", 426 + vgpu->id, offset, bytes); 427 + return ret; 428 + } 429 + 430 + if (in_whitelist(reg_nonpriv)) { 431 + ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data, 432 + bytes); 433 + } else { 434 + gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x\n", 435 + vgpu->id, reg_nonpriv); 436 + } 437 + return ret; 400 438 } 401 439 402 440 static int ddi_buf_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ··· 1099 1001 if (invalid_read) 1100 1002 gvt_err("invalid pvinfo read: [%x:%x] = %x\n", 1101 1003 offset, bytes, *(u32 *)p_data); 1004 + vgpu->pv_notified = true; 1102 1005 return 0; 1103 1006 } 1104 1007 ··· 1138 1039 char vmid_str[20]; 1139 1040 char display_ready_str[20]; 1140 1041 1141 - snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d\n", ready); 1042 + snprintf(display_ready_str, 20, "GVT_DISPLAY_READY=%d", ready); 1142 1043 env[0] = display_ready_str; 1143 1044 1144 1045 snprintf(vmid_str, 20, "VMID=%d", vgpu->id); ··· 1176 1077 case _vgtif_reg(pdp[3].hi): 1177 1078 case _vgtif_reg(execlist_context_descriptor_lo): 1178 1079 case _vgtif_reg(execlist_context_descriptor_hi): 1080 + break; 1081 + case _vgtif_reg(rsv5[0])..._vgtif_reg(rsv5[3]): 1082 + enter_failsafe_mode(vgpu, GVT_FAILSAFE_INSUFFICIENT_RESOURCE); 1179 1083 break; 1180 1084 default: 1181 1085 gvt_err("invalid pvinfo write offset %x bytes %x data %x\n", ··· 1305 1203 u32 *data0 = &vgpu_vreg(vgpu, GEN6_PCODE_DATA); 1306 1204 1307 1205 switch (cmd) { 1308 - case 0x6: 1309 - /** 1310 - * "Read memory latency" command on gen9. 1311 - * Below memory latency values are read 1312 - * from skylake platform. 1313 - */ 1314 - if (!*data0) 1315 - *data0 = 0x1e1a1100; 1316 - else 1317 - *data0 = 0x61514b3d; 1206 + case GEN9_PCODE_READ_MEM_LATENCY: 1207 + if (IS_SKYLAKE(vgpu->gvt->dev_priv)) { 1208 + /** 1209 + * "Read memory latency" command on gen9. 1210 + * Below memory latency values are read 1211 + * from skylake platform. 1212 + */ 1213 + if (!*data0) 1214 + *data0 = 0x1e1a1100; 1215 + else 1216 + *data0 = 0x61514b3d; 1217 + } 1318 1218 break; 1319 - case 0x5: 1219 + case SKL_PCODE_CDCLK_CONTROL: 1220 + if (IS_SKYLAKE(vgpu->gvt->dev_priv)) 1221 + *data0 = SKL_CDCLK_READY_FOR_CHANGE; 1222 + break; 1223 + case GEN6_PCODE_READ_RC6VIDS: 1320 1224 *data0 |= 0x1; 1321 1225 break; 1322 1226 } 1323 1227 1324 1228 gvt_dbg_core("VM(%d) write %x to mailbox, return data0 %x\n", 1325 1229 vgpu->id, value, *data0); 1326 - 1327 - value &= ~(1 << 31); 1230 + /** 1231 + * PCODE_READY clear means ready for pcode read/write, 1232 + * PCODE_ERROR_MASK clear means no error happened. In GVT-g we 1233 + * always emulate as pcode read/write success and ready for access 1234 + * anytime, since we don't touch real physical registers here. 1235 + */ 1236 + value &= ~(GEN6_PCODE_READY | GEN6_PCODE_ERROR_MASK); 1328 1237 return intel_vgpu_default_mmio_write(vgpu, offset, &value, bytes); 1329 1238 } 1330 1239 ··· 1431 1318 bool enable_execlist; 1432 1319 1433 1320 write_vreg(vgpu, offset, p_data, bytes); 1321 + 1322 + /* when PPGTT mode enabled, we will check if guest has called 1323 + * pvinfo, if not, we will treat this guest as non-gvtg-aware 1324 + * guest, and stop emulating its cfg space, mmio, gtt, etc. 1325 + */ 1326 + if (((data & _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)) || 1327 + (data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE))) 1328 + && !vgpu->pv_notified) { 1329 + enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST); 1330 + return 0; 1331 + } 1434 1332 if ((data & _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE)) 1435 1333 || (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) { 1436 1334 enable_execlist = !!(data & GFX_RUN_LIST_ENABLE); ··· 1524 1400 #define MMIO_GM(reg, d, r, w) \ 1525 1401 MMIO_F(reg, 4, F_GMADR, 0xFFFFF000, 0, d, r, w) 1526 1402 1403 + #define MMIO_GM_RDR(reg, d, r, w) \ 1404 + MMIO_F(reg, 4, F_GMADR | F_CMD_ACCESS, 0xFFFFF000, 0, d, r, w) 1405 + 1527 1406 #define MMIO_RO(reg, d, f, rm, r, w) \ 1528 1407 MMIO_F(reg, 4, F_RO | f, 0, rm, d, r, w) 1529 1408 ··· 1546 1419 #define MMIO_RING_GM(prefix, d, r, w) \ 1547 1420 MMIO_RING_F(prefix, 4, F_GMADR, 0xFFFF0000, 0, d, r, w) 1548 1421 1422 + #define MMIO_RING_GM_RDR(prefix, d, r, w) \ 1423 + MMIO_RING_F(prefix, 4, F_GMADR | F_CMD_ACCESS, 0xFFFF0000, 0, d, r, w) 1424 + 1549 1425 #define MMIO_RING_RO(prefix, d, f, rm, r, w) \ 1550 1426 MMIO_RING_F(prefix, 4, F_RO | f, 0, rm, d, r, w) 1551 1427 ··· 1557 1427 struct drm_i915_private *dev_priv = gvt->dev_priv; 1558 1428 int ret; 1559 1429 1560 - MMIO_RING_DFH(RING_IMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler); 1430 + MMIO_RING_DFH(RING_IMR, D_ALL, F_CMD_ACCESS, NULL, 1431 + intel_vgpu_reg_imr_handler); 1561 1432 1562 1433 MMIO_DFH(SDEIMR, D_ALL, 0, NULL, intel_vgpu_reg_imr_handler); 1563 1434 MMIO_DFH(SDEIER, D_ALL, 0, NULL, intel_vgpu_reg_ier_handler); 1564 1435 MMIO_DFH(SDEIIR, D_ALL, 0, NULL, intel_vgpu_reg_iir_handler); 1565 1436 MMIO_D(SDEISR, D_ALL); 1566 1437 1567 - MMIO_RING_D(RING_HWSTAM, D_ALL); 1438 + MMIO_RING_DFH(RING_HWSTAM, D_ALL, F_CMD_ACCESS, NULL, NULL); 1568 1439 1569 - MMIO_GM(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL); 1570 - MMIO_GM(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL); 1571 - MMIO_GM(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL); 1572 - MMIO_GM(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL); 1440 + MMIO_GM_RDR(RENDER_HWS_PGA_GEN7, D_ALL, NULL, NULL); 1441 + MMIO_GM_RDR(BSD_HWS_PGA_GEN7, D_ALL, NULL, NULL); 1442 + MMIO_GM_RDR(BLT_HWS_PGA_GEN7, D_ALL, NULL, NULL); 1443 + MMIO_GM_RDR(VEBOX_HWS_PGA_GEN7, D_ALL, NULL, NULL); 1573 1444 1574 1445 #define RING_REG(base) (base + 0x28) 1575 - MMIO_RING_D(RING_REG, D_ALL); 1446 + MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL); 1576 1447 #undef RING_REG 1577 1448 1578 1449 #define RING_REG(base) (base + 0x134) 1579 - MMIO_RING_D(RING_REG, D_ALL); 1450 + MMIO_RING_DFH(RING_REG, D_ALL, F_CMD_ACCESS, NULL, NULL); 1580 1451 #undef RING_REG 1581 1452 1582 - MMIO_GM(0x2148, D_ALL, NULL, NULL); 1583 - MMIO_GM(CCID, D_ALL, NULL, NULL); 1584 - MMIO_GM(0x12198, D_ALL, NULL, NULL); 1453 + MMIO_GM_RDR(0x2148, D_ALL, NULL, NULL); 1454 + MMIO_GM_RDR(CCID, D_ALL, NULL, NULL); 1455 + MMIO_GM_RDR(0x12198, D_ALL, NULL, NULL); 1585 1456 MMIO_D(GEN7_CXT_SIZE, D_ALL); 1586 1457 1587 - MMIO_RING_D(RING_TAIL, D_ALL); 1588 - MMIO_RING_D(RING_HEAD, D_ALL); 1589 - MMIO_RING_D(RING_CTL, D_ALL); 1590 - MMIO_RING_D(RING_ACTHD, D_ALL); 1591 - MMIO_RING_GM(RING_START, D_ALL, NULL, NULL); 1458 + MMIO_RING_DFH(RING_TAIL, D_ALL, F_CMD_ACCESS, NULL, NULL); 1459 + MMIO_RING_DFH(RING_HEAD, D_ALL, F_CMD_ACCESS, NULL, NULL); 1460 + MMIO_RING_DFH(RING_CTL, D_ALL, F_CMD_ACCESS, NULL, NULL); 1461 + MMIO_RING_DFH(RING_ACTHD, D_ALL, F_CMD_ACCESS, NULL, NULL); 1462 + MMIO_RING_GM_RDR(RING_START, D_ALL, NULL, NULL); 1592 1463 1593 1464 /* RING MODE */ 1594 1465 #define RING_REG(base) (base + 0x29c) 1595 - MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK, NULL, ring_mode_mmio_write); 1466 + MMIO_RING_DFH(RING_REG, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, 1467 + ring_mode_mmio_write); 1596 1468 #undef RING_REG 1597 1469 1598 - MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK, NULL, NULL); 1599 - MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK, NULL, NULL); 1470 + MMIO_RING_DFH(RING_MI_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, 1471 + NULL, NULL); 1472 + MMIO_RING_DFH(RING_INSTPM, D_ALL, F_MODE_MASK | F_CMD_ACCESS, 1473 + NULL, NULL); 1600 1474 MMIO_RING_DFH(RING_TIMESTAMP, D_ALL, F_CMD_ACCESS, 1601 1475 ring_timestamp_mmio_read, NULL); 1602 1476 MMIO_RING_DFH(RING_TIMESTAMP_UDW, D_ALL, F_CMD_ACCESS, 1603 1477 ring_timestamp_mmio_read, NULL); 1604 1478 1605 - MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK, NULL, NULL); 1606 - MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK, NULL, NULL); 1479 + MMIO_DFH(GEN7_GT_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 1480 + MMIO_DFH(CACHE_MODE_0_GEN7, D_ALL, F_MODE_MASK | F_CMD_ACCESS, 1481 + NULL, NULL); 1607 1482 MMIO_DFH(CACHE_MODE_1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 1483 + MMIO_DFH(CACHE_MODE_0, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 1484 + MMIO_DFH(0x2124, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 1608 1485 1609 - MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK, NULL, NULL); 1610 - MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK, NULL, NULL); 1611 - MMIO_DFH(0x2088, D_ALL, F_MODE_MASK, NULL, NULL); 1612 - MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK, NULL, NULL); 1613 - MMIO_DFH(0x2470, D_ALL, F_MODE_MASK, NULL, NULL); 1614 - MMIO_D(GAM_ECOCHK, D_ALL); 1615 - MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK, NULL, NULL); 1486 + MMIO_DFH(0x20dc, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 1487 + MMIO_DFH(_3D_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 1488 + MMIO_DFH(0x2088, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 1489 + MMIO_DFH(0x20e4, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 1490 + MMIO_DFH(0x2470, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 1491 + MMIO_DFH(GAM_ECOCHK, D_ALL, F_CMD_ACCESS, NULL, NULL); 1492 + MMIO_DFH(GEN7_COMMON_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, 1493 + NULL, NULL); 1616 1494 MMIO_DFH(COMMON_SLICE_CHICKEN2, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 1617 - MMIO_D(0x9030, D_ALL); 1618 - MMIO_D(0x20a0, D_ALL); 1619 - MMIO_D(0x2420, D_ALL); 1620 - MMIO_D(0x2430, D_ALL); 1621 - MMIO_D(0x2434, D_ALL); 1622 - MMIO_D(0x2438, D_ALL); 1623 - MMIO_D(0x243c, D_ALL); 1624 - MMIO_DFH(0x7018, D_ALL, F_MODE_MASK, NULL, NULL); 1495 + MMIO_DFH(0x9030, D_ALL, F_CMD_ACCESS, NULL, NULL); 1496 + MMIO_DFH(0x20a0, D_ALL, F_CMD_ACCESS, NULL, NULL); 1497 + MMIO_DFH(0x2420, D_ALL, F_CMD_ACCESS, NULL, NULL); 1498 + MMIO_DFH(0x2430, D_ALL, F_CMD_ACCESS, NULL, NULL); 1499 + MMIO_DFH(0x2434, D_ALL, F_CMD_ACCESS, NULL, NULL); 1500 + MMIO_DFH(0x2438, D_ALL, F_CMD_ACCESS, NULL, NULL); 1501 + MMIO_DFH(0x243c, D_ALL, F_CMD_ACCESS, NULL, NULL); 1502 + MMIO_DFH(0x7018, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 1625 1503 MMIO_DFH(HALF_SLICE_CHICKEN3, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 1626 - MMIO_DFH(0xe100, D_ALL, F_MODE_MASK, NULL, NULL); 1504 + MMIO_DFH(GEN7_HALF_SLICE_CHICKEN1, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 1627 1505 1628 1506 /* display */ 1629 1507 MMIO_F(0x60220, 0x20, 0, 0, 0, D_ALL, NULL, NULL); ··· 2160 2022 MMIO_D(FORCEWAKE_ACK, D_ALL); 2161 2023 MMIO_D(GEN6_GT_CORE_STATUS, D_ALL); 2162 2024 MMIO_D(GEN6_GT_THREAD_STATUS_REG, D_ALL); 2163 - MMIO_D(GTFIFODBG, D_ALL); 2164 - MMIO_D(GTFIFOCTL, D_ALL); 2025 + MMIO_DFH(GTFIFODBG, D_ALL, F_CMD_ACCESS, NULL, NULL); 2026 + MMIO_DFH(GTFIFOCTL, D_ALL, F_CMD_ACCESS, NULL, NULL); 2165 2027 MMIO_DH(FORCEWAKE_MT, D_PRE_SKL, NULL, mul_force_wake_write); 2166 2028 MMIO_DH(FORCEWAKE_ACK_HSW, D_HSW | D_BDW, NULL, NULL); 2167 2029 MMIO_D(ECOBUS, D_ALL); ··· 2218 2080 2219 2081 MMIO_F(0x4f000, 0x90, 0, 0, 0, D_ALL, NULL, NULL); 2220 2082 2221 - MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_SKL); 2083 + MMIO_D(GEN6_PCODE_MAILBOX, D_PRE_BDW); 2222 2084 MMIO_D(GEN6_PCODE_DATA, D_ALL); 2223 2085 MMIO_D(0x13812c, D_ALL); 2224 2086 MMIO_DH(GEN7_ERR_INT, D_ALL, NULL, NULL); ··· 2297 2159 MMIO_D(0x1a054, D_ALL); 2298 2160 2299 2161 MMIO_D(0x44070, D_ALL); 2300 - 2301 - MMIO_D(0x215c, D_HSW_PLUS); 2162 + MMIO_DFH(0x215c, D_HSW_PLUS, F_CMD_ACCESS, NULL, NULL); 2302 2163 MMIO_DFH(0x2178, D_ALL, F_CMD_ACCESS, NULL, NULL); 2303 2164 MMIO_DFH(0x217c, D_ALL, F_CMD_ACCESS, NULL, NULL); 2304 2165 MMIO_DFH(0x12178, D_ALL, F_CMD_ACCESS, NULL, NULL); 2305 2166 MMIO_DFH(0x1217c, D_ALL, F_CMD_ACCESS, NULL, NULL); 2306 2167 2307 - MMIO_F(0x2290, 8, 0, 0, 0, D_HSW_PLUS, NULL, NULL); 2308 - MMIO_D(GEN7_OACONTROL, D_HSW); 2168 + MMIO_F(0x2290, 8, F_CMD_ACCESS, 0, 0, D_HSW_PLUS, NULL, NULL); 2169 + MMIO_DFH(GEN7_OACONTROL, D_HSW, F_CMD_ACCESS, NULL, NULL); 2309 2170 MMIO_D(0x2b00, D_BDW_PLUS); 2310 2171 MMIO_D(0x2360, D_BDW_PLUS); 2311 - MMIO_F(0x5200, 32, 0, 0, 0, D_ALL, NULL, NULL); 2312 - MMIO_F(0x5240, 32, 0, 0, 0, D_ALL, NULL, NULL); 2313 - MMIO_F(0x5280, 16, 0, 0, 0, D_ALL, NULL, NULL); 2172 + MMIO_F(0x5200, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); 2173 + MMIO_F(0x5240, 32, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); 2174 + MMIO_F(0x5280, 16, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); 2314 2175 2315 2176 MMIO_DFH(0x1c17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2316 2177 MMIO_DFH(0x1c178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2317 - MMIO_D(BCS_SWCTRL, D_ALL); 2178 + MMIO_DFH(BCS_SWCTRL, D_ALL, F_CMD_ACCESS, NULL, NULL); 2318 2179 2319 - MMIO_F(HS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2320 - MMIO_F(DS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2321 - MMIO_F(IA_VERTICES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2322 - MMIO_F(IA_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2323 - MMIO_F(VS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2324 - MMIO_F(GS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2325 - MMIO_F(GS_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2326 - MMIO_F(CL_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2327 - MMIO_F(CL_PRIMITIVES_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2328 - MMIO_F(PS_INVOCATION_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2329 - MMIO_F(PS_DEPTH_COUNT, 8, 0, 0, 0, D_ALL, NULL, NULL); 2180 + MMIO_F(HS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); 2181 + MMIO_F(DS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); 2182 + MMIO_F(IA_VERTICES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); 2183 + MMIO_F(IA_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); 2184 + MMIO_F(VS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); 2185 + MMIO_F(GS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); 2186 + MMIO_F(GS_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); 2187 + MMIO_F(CL_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); 2188 + MMIO_F(CL_PRIMITIVES_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); 2189 + MMIO_F(PS_INVOCATION_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); 2190 + MMIO_F(PS_DEPTH_COUNT, 8, F_CMD_ACCESS, 0, 0, D_ALL, NULL, NULL); 2330 2191 MMIO_DH(0x4260, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); 2331 2192 MMIO_DH(0x4264, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); 2332 2193 MMIO_DH(0x4268, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); ··· 2333 2196 MMIO_DH(0x4270, D_BDW_PLUS, NULL, gvt_reg_tlb_control_handler); 2334 2197 MMIO_DFH(0x4094, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2335 2198 2199 + MMIO_DFH(ARB_MODE, D_ALL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2200 + MMIO_RING_GM_RDR(RING_BBADDR, D_ALL, NULL, NULL); 2201 + MMIO_DFH(0x2220, D_ALL, F_CMD_ACCESS, NULL, NULL); 2202 + MMIO_DFH(0x12220, D_ALL, F_CMD_ACCESS, NULL, NULL); 2203 + MMIO_DFH(0x22220, D_ALL, F_CMD_ACCESS, NULL, NULL); 2204 + MMIO_RING_DFH(RING_SYNC_1, D_ALL, F_CMD_ACCESS, NULL, NULL); 2205 + MMIO_RING_DFH(RING_SYNC_0, D_ALL, F_CMD_ACCESS, NULL, NULL); 2206 + MMIO_DFH(0x22178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2207 + MMIO_DFH(0x1a178, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2208 + MMIO_DFH(0x1a17c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2209 + MMIO_DFH(0x2217c, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2336 2210 return 0; 2337 2211 } 2338 2212 ··· 2352 2204 struct drm_i915_private *dev_priv = gvt->dev_priv; 2353 2205 int ret; 2354 2206 2355 - MMIO_DH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, 2207 + MMIO_DFH(RING_IMR(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, NULL, 2356 2208 intel_vgpu_reg_imr_handler); 2357 2209 2358 2210 MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler); ··· 2417 2269 MMIO_DH(GEN8_MASTER_IRQ, D_BDW_PLUS, NULL, 2418 2270 intel_vgpu_reg_master_irq_handler); 2419 2271 2420 - MMIO_D(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS); 2421 - MMIO_D(0x1c134, D_BDW_PLUS); 2272 + MMIO_DFH(RING_HWSTAM(GEN8_BSD2_RING_BASE), D_BDW_PLUS, 2273 + F_CMD_ACCESS, NULL, NULL); 2274 + MMIO_DFH(0x1c134, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2422 2275 2423 - MMIO_D(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS); 2424 - MMIO_D(RING_HEAD(GEN8_BSD2_RING_BASE), D_BDW_PLUS); 2425 - MMIO_GM(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL); 2426 - MMIO_D(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS); 2427 - MMIO_D(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS); 2428 - MMIO_D(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS); 2429 - MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK, NULL, ring_mode_mmio_write); 2430 - MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK, 2431 - NULL, NULL); 2432 - MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_MODE_MASK, 2433 - NULL, NULL); 2276 + MMIO_DFH(RING_TAIL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, 2277 + NULL, NULL); 2278 + MMIO_DFH(RING_HEAD(GEN8_BSD2_RING_BASE), D_BDW_PLUS, 2279 + F_CMD_ACCESS, NULL, NULL); 2280 + MMIO_GM_RDR(RING_START(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL); 2281 + MMIO_DFH(RING_CTL(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, 2282 + NULL, NULL); 2283 + MMIO_DFH(RING_ACTHD(GEN8_BSD2_RING_BASE), D_BDW_PLUS, 2284 + F_CMD_ACCESS, NULL, NULL); 2285 + MMIO_DFH(RING_ACTHD_UDW(GEN8_BSD2_RING_BASE), D_BDW_PLUS, 2286 + F_CMD_ACCESS, NULL, NULL); 2287 + MMIO_DFH(0x1c29c, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, 2288 + ring_mode_mmio_write); 2289 + MMIO_DFH(RING_MI_MODE(GEN8_BSD2_RING_BASE), D_BDW_PLUS, 2290 + F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2291 + MMIO_DFH(RING_INSTPM(GEN8_BSD2_RING_BASE), D_BDW_PLUS, 2292 + F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2434 2293 MMIO_DFH(RING_TIMESTAMP(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, 2435 2294 ring_timestamp_mmio_read, NULL); 2436 2295 2437 - MMIO_RING_D(RING_ACTHD_UDW, D_BDW_PLUS); 2296 + MMIO_RING_DFH(RING_ACTHD_UDW, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2438 2297 2439 2298 #define RING_REG(base) (base + 0xd0) 2440 2299 MMIO_RING_F(RING_REG, 4, F_RO, 0, ··· 2458 2303 #undef RING_REG 2459 2304 2460 2305 #define RING_REG(base) (base + 0x234) 2461 - MMIO_RING_F(RING_REG, 8, F_RO, 0, ~0, D_BDW_PLUS, NULL, NULL); 2462 - MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO, 0, ~0LL, D_BDW_PLUS, NULL, NULL); 2306 + MMIO_RING_F(RING_REG, 8, F_RO | F_CMD_ACCESS, 0, ~0, D_BDW_PLUS, 2307 + NULL, NULL); 2308 + MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 4, F_RO | F_CMD_ACCESS, 0, 2309 + ~0LL, D_BDW_PLUS, NULL, NULL); 2463 2310 #undef RING_REG 2464 2311 2465 2312 #define RING_REG(base) (base + 0x244) 2466 - MMIO_RING_D(RING_REG, D_BDW_PLUS); 2467 - MMIO_D(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS); 2313 + MMIO_RING_DFH(RING_REG, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2314 + MMIO_DFH(RING_REG(GEN8_BSD2_RING_BASE), D_BDW_PLUS, F_CMD_ACCESS, 2315 + NULL, NULL); 2468 2316 #undef RING_REG 2469 2317 2470 2318 #define RING_REG(base) (base + 0x370) ··· 2489 2331 MMIO_D(GEN7_MISCCPCTL, D_BDW_PLUS); 2490 2332 MMIO_D(0x1c054, D_BDW_PLUS); 2491 2333 2334 + MMIO_DH(GEN6_PCODE_MAILBOX, D_BDW_PLUS, NULL, mailbox_write); 2335 + 2492 2336 MMIO_D(GEN8_PRIVATE_PAT_LO, D_BDW_PLUS); 2493 2337 MMIO_D(GEN8_PRIVATE_PAT_HI, D_BDW_PLUS); 2494 2338 ··· 2501 2341 MMIO_F(RING_REG(GEN8_BSD2_RING_BASE), 32, 0, 0, 0, D_BDW_PLUS, NULL, NULL); 2502 2342 #undef RING_REG 2503 2343 2504 - MMIO_RING_GM(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL); 2505 - MMIO_GM(0x1c080, D_BDW_PLUS, NULL, NULL); 2344 + MMIO_RING_GM_RDR(RING_HWS_PGA, D_BDW_PLUS, NULL, NULL); 2345 + MMIO_GM_RDR(RING_HWS_PGA(GEN8_BSD2_RING_BASE), D_BDW_PLUS, NULL, NULL); 2506 2346 2507 2347 MMIO_DFH(HDC_CHICKEN0, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2508 2348 2509 - MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW); 2510 - MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW); 2511 - MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW); 2349 + MMIO_D(CHICKEN_PIPESL_1(PIPE_A), D_BDW_PLUS); 2350 + MMIO_D(CHICKEN_PIPESL_1(PIPE_B), D_BDW_PLUS); 2351 + MMIO_D(CHICKEN_PIPESL_1(PIPE_C), D_BDW_PLUS); 2512 2352 2513 2353 MMIO_D(WM_MISC, D_BDW); 2514 2354 MMIO_D(BDW_EDP_PSR_BASE, D_BDW); ··· 2522 2362 MMIO_D(GEN8_EU_DISABLE1, D_BDW_PLUS); 2523 2363 MMIO_D(GEN8_EU_DISABLE2, D_BDW_PLUS); 2524 2364 2525 - MMIO_D(0xfdc, D_BDW); 2526 - MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2527 - MMIO_D(GEN7_ROW_CHICKEN2, D_BDW_PLUS); 2528 - MMIO_D(GEN8_UCGCTL6, D_BDW_PLUS); 2365 + MMIO_D(0xfdc, D_BDW_PLUS); 2366 + MMIO_DFH(GEN8_ROW_CHICKEN, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, 2367 + NULL, NULL); 2368 + MMIO_DFH(GEN7_ROW_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, 2369 + NULL, NULL); 2370 + MMIO_DFH(GEN8_UCGCTL6, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2529 2371 2530 - MMIO_D(0xb1f0, D_BDW); 2531 - MMIO_D(0xb1c0, D_BDW); 2372 + MMIO_DFH(0xb1f0, D_BDW, F_CMD_ACCESS, NULL, NULL); 2373 + MMIO_DFH(0xb1c0, D_BDW, F_CMD_ACCESS, NULL, NULL); 2532 2374 MMIO_DFH(GEN8_L3SQCREG4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2533 - MMIO_D(0xb100, D_BDW); 2534 - MMIO_D(0xb10c, D_BDW); 2375 + MMIO_DFH(0xb100, D_BDW, F_CMD_ACCESS, NULL, NULL); 2376 + MMIO_DFH(0xb10c, D_BDW, F_CMD_ACCESS, NULL, NULL); 2535 2377 MMIO_D(0xb110, D_BDW); 2536 2378 2537 - MMIO_DFH(0x24d0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2538 - MMIO_DFH(0x24d4, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2539 - MMIO_DFH(0x24d8, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2540 - MMIO_DFH(0x24dc, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2379 + MMIO_F(0x24d0, 48, F_CMD_ACCESS, 0, 0, D_BDW_PLUS, 2380 + NULL, force_nonpriv_write); 2541 2381 2542 - MMIO_D(0x83a4, D_BDW); 2382 + MMIO_D(0x22040, D_BDW_PLUS); 2383 + MMIO_D(0x44484, D_BDW_PLUS); 2384 + MMIO_D(0x4448c, D_BDW_PLUS); 2385 + 2386 + MMIO_DFH(0x83a4, D_BDW, F_CMD_ACCESS, NULL, NULL); 2543 2387 MMIO_D(GEN8_L3_LRA_1_GPGPU, D_BDW_PLUS); 2544 2388 2545 - MMIO_D(0x8430, D_BDW); 2389 + MMIO_DFH(0x8430, D_BDW, F_CMD_ACCESS, NULL, NULL); 2546 2390 2547 2391 MMIO_D(0x110000, D_BDW_PLUS); 2548 2392 ··· 2558 2394 MMIO_DFH(0xe194, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2559 2395 MMIO_DFH(0xe188, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2560 2396 MMIO_DFH(HALF_SLICE_CHICKEN2, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2561 - MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK, NULL, NULL); 2397 + MMIO_DFH(0x2580, D_BDW_PLUS, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2562 2398 2563 - MMIO_D(0x2248, D_BDW); 2399 + MMIO_DFH(0x2248, D_BDW, F_CMD_ACCESS, NULL, NULL); 2564 2400 2401 + MMIO_DFH(0xe220, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2402 + MMIO_DFH(0xe230, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2403 + MMIO_DFH(0xe240, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2404 + MMIO_DFH(0xe260, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2405 + MMIO_DFH(0xe270, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2406 + MMIO_DFH(0xe280, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2407 + MMIO_DFH(0xe2a0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2408 + MMIO_DFH(0xe2b0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2409 + MMIO_DFH(0xe2c0, D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); 2565 2410 return 0; 2566 2411 } 2567 2412 ··· 2593 2420 MMIO_D(HSW_PWR_WELL_BIOS, D_SKL); 2594 2421 MMIO_DH(HSW_PWR_WELL_DRIVER, D_SKL, NULL, skl_power_well_ctl_write); 2595 2422 2596 - MMIO_DH(GEN6_PCODE_MAILBOX, D_SKL, NULL, mailbox_write); 2597 2423 MMIO_D(0xa210, D_SKL_PLUS); 2598 2424 MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS); 2599 2425 MMIO_D(GEN9_RENDER_PG_IDLE_HYSTERESIS, D_SKL_PLUS); ··· 2750 2578 MMIO_F(0xb020, 0x80, F_CMD_ACCESS, 0, 0, D_SKL, NULL, NULL); 2751 2579 2752 2580 MMIO_D(0xd08, D_SKL); 2753 - MMIO_D(0x20e0, D_SKL); 2754 - MMIO_D(0x20ec, D_SKL); 2581 + MMIO_DFH(0x20e0, D_SKL, F_MODE_MASK, NULL, NULL); 2582 + MMIO_DFH(0x20ec, D_SKL, F_MODE_MASK | F_CMD_ACCESS, NULL, NULL); 2755 2583 2756 2584 /* TRTT */ 2757 - MMIO_D(0x4de0, D_SKL); 2758 - MMIO_D(0x4de4, D_SKL); 2759 - MMIO_D(0x4de8, D_SKL); 2760 - MMIO_D(0x4dec, D_SKL); 2761 - MMIO_D(0x4df0, D_SKL); 2762 - MMIO_DH(0x4df4, D_SKL, NULL, gen9_trtte_write); 2585 + MMIO_DFH(0x4de0, D_SKL, F_CMD_ACCESS, NULL, NULL); 2586 + MMIO_DFH(0x4de4, D_SKL, F_CMD_ACCESS, NULL, NULL); 2587 + MMIO_DFH(0x4de8, D_SKL, F_CMD_ACCESS, NULL, NULL); 2588 + MMIO_DFH(0x4dec, D_SKL, F_CMD_ACCESS, NULL, NULL); 2589 + MMIO_DFH(0x4df0, D_SKL, F_CMD_ACCESS, NULL, NULL); 2590 + MMIO_DFH(0x4df4, D_SKL, F_CMD_ACCESS, NULL, gen9_trtte_write); 2763 2591 MMIO_DH(0x4dfc, D_SKL, NULL, gen9_trtt_chicken_write); 2764 2592 2765 2593 MMIO_D(0x45008, D_SKL); ··· 2783 2611 MMIO_D(0x65f08, D_SKL); 2784 2612 MMIO_D(0x320f0, D_SKL); 2785 2613 2786 - MMIO_D(_REG_VCS2_EXCC, D_SKL); 2614 + MMIO_DFH(_REG_VCS2_EXCC, D_SKL, F_CMD_ACCESS, NULL, NULL); 2787 2615 MMIO_D(0x70034, D_SKL); 2788 2616 MMIO_D(0x71034, D_SKL); 2789 2617 MMIO_D(0x72034, D_SKL); ··· 2796 2624 MMIO_D(_PLANE_KEYMSK_1(PIPE_C), D_SKL); 2797 2625 2798 2626 MMIO_D(0x44500, D_SKL); 2627 + MMIO_DFH(GEN9_CSFE_CHICKEN1_RCS, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); 2628 + MMIO_DFH(GEN8_HDC_CHICKEN1, D_SKL, F_MODE_MASK | F_CMD_ACCESS, 2629 + NULL, NULL); 2799 2630 return 0; 2800 2631 } 2801 2632
+6 -6
drivers/gpu/drm/i915/gvt/kvmgt.c
··· 96 96 struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev; 97 97 dma_addr_t daddr; 98 98 99 - page = pfn_to_page(pfn); 100 - if (is_error_page(page)) 99 + if (unlikely(!pfn_valid(pfn))) 101 100 return -EFAULT; 102 101 102 + page = pfn_to_page(pfn); 103 103 daddr = dma_map_page(dev, page, 0, PAGE_SIZE, 104 104 PCI_DMA_BIDIRECTIONAL); 105 105 if (dma_mapping_error(dev, daddr)) ··· 295 295 return 0; 296 296 297 297 return sprintf(buf, "low_gm_size: %dMB\nhigh_gm_size: %dMB\n" 298 - "fence: %d\n", 299 - BYTES_TO_MB(type->low_gm_size), 300 - BYTES_TO_MB(type->high_gm_size), 301 - type->fence); 298 + "fence: %d\nresolution: %s\n", 299 + BYTES_TO_MB(type->low_gm_size), 300 + BYTES_TO_MB(type->high_gm_size), 301 + type->fence, vgpu_edid_str(type->resolution)); 302 302 } 303 303 304 304 static MDEV_TYPE_ATTR_RO(available_instances);
+65 -1
drivers/gpu/drm/i915/gvt/mmio.c
··· 57 57 (reg >= gvt->device_info.gtt_start_offset \ 58 58 && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) 59 59 60 + static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa, 61 + void *p_data, unsigned int bytes, bool read) 62 + { 63 + struct intel_gvt *gvt = NULL; 64 + void *pt = NULL; 65 + unsigned int offset = 0; 66 + 67 + if (!vgpu || !p_data) 68 + return; 69 + 70 + gvt = vgpu->gvt; 71 + mutex_lock(&gvt->lock); 72 + offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); 73 + if (reg_is_mmio(gvt, offset)) { 74 + if (read) 75 + intel_vgpu_default_mmio_read(vgpu, offset, p_data, 76 + bytes); 77 + else 78 + intel_vgpu_default_mmio_write(vgpu, offset, p_data, 79 + bytes); 80 + } else if (reg_is_gtt(gvt, offset) && 81 + vgpu->gtt.ggtt_mm->virtual_page_table) { 82 + offset -= gvt->device_info.gtt_start_offset; 83 + pt = vgpu->gtt.ggtt_mm->virtual_page_table + offset; 84 + if (read) 85 + memcpy(p_data, pt, bytes); 86 + else 87 + memcpy(pt, p_data, bytes); 88 + 89 + } else if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { 90 + struct intel_vgpu_guest_page *gp; 91 + 92 + /* Since we enter the failsafe mode early during guest boot, 93 + * guest may not have chance to set up its ppgtt table, so 94 + * there should not be any wp pages for guest. Keep the wp 95 + * related code here in case we need to handle it in furture. 96 + */ 97 + gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT); 98 + if (gp) { 99 + /* remove write protection to prevent furture traps */ 100 + intel_vgpu_clean_guest_page(vgpu, gp); 101 + if (read) 102 + intel_gvt_hypervisor_read_gpa(vgpu, pa, 103 + p_data, bytes); 104 + else 105 + intel_gvt_hypervisor_write_gpa(vgpu, pa, 106 + p_data, bytes); 107 + } 108 + } 109 + mutex_unlock(&gvt->lock); 110 + } 111 + 60 112 /** 61 113 * intel_vgpu_emulate_mmio_read - emulate MMIO read 62 114 * @vgpu: a vGPU ··· 127 75 unsigned int offset = 0; 128 76 int ret = -EINVAL; 129 77 78 + 79 + if (vgpu->failsafe) { 80 + failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true); 81 + return 0; 82 + } 130 83 mutex_lock(&gvt->lock); 131 84 132 85 if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { ··· 245 188 u32 old_vreg = 0, old_sreg = 0; 246 189 int ret = -EINVAL; 247 190 191 + if (vgpu->failsafe) { 192 + failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, false); 193 + return 0; 194 + } 195 + 248 196 mutex_lock(&gvt->lock); 249 197 250 198 if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { ··· 298 236 299 237 mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4)); 300 238 if (!mmio && !vgpu->mmio.disable_warn_untrack) 301 - gvt_err("vgpu%d: write untracked MMIO %x len %d val %x\n", 239 + gvt_dbg_mmio("vgpu%d: write untracked MMIO %x len %d val %x\n", 302 240 vgpu->id, offset, bytes, *(u32 *)p_data); 303 241 304 242 if (!intel_gvt_mmio_is_unalign(gvt, offset)) { ··· 384 322 385 323 /* set the bit 0:2(Core C-State ) to C0 */ 386 324 vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0; 325 + 326 + vgpu->mmio.disable_warn_untrack = false; 387 327 } 388 328 389 329 /**
+2 -3
drivers/gpu/drm/i915/gvt/opregion.c
··· 27 27 28 28 static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa) 29 29 { 30 - void __iomem *host_va = vgpu->gvt->opregion.opregion_va; 31 30 u8 *buf; 32 31 int i; 33 32 ··· 42 43 if (!vgpu_opregion(vgpu)->va) 43 44 return -ENOMEM; 44 45 45 - memcpy_fromio(vgpu_opregion(vgpu)->va, host_va, 46 - INTEL_GVT_OPREGION_SIZE); 46 + memcpy(vgpu_opregion(vgpu)->va, vgpu->gvt->opregion.opregion_va, 47 + INTEL_GVT_OPREGION_SIZE); 47 48 48 49 for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) 49 50 vgpu_opregion(vgpu)->gfn[i] = (gpa >> PAGE_SHIFT) + i;
+16
drivers/gpu/drm/i915/gvt/render.c
··· 53 53 {RCS, _MMIO(0x24d4), 0, false}, 54 54 {RCS, _MMIO(0x24d8), 0, false}, 55 55 {RCS, _MMIO(0x24dc), 0, false}, 56 + {RCS, _MMIO(0x24e0), 0, false}, 57 + {RCS, _MMIO(0x24e4), 0, false}, 58 + {RCS, _MMIO(0x24e8), 0, false}, 59 + {RCS, _MMIO(0x24ec), 0, false}, 60 + {RCS, _MMIO(0x24f0), 0, false}, 61 + {RCS, _MMIO(0x24f4), 0, false}, 62 + {RCS, _MMIO(0x24f8), 0, false}, 63 + {RCS, _MMIO(0x24fc), 0, false}, 56 64 {RCS, _MMIO(0x7004), 0xffff, true}, 57 65 {RCS, _MMIO(0x7008), 0xffff, true}, 58 66 {RCS, _MMIO(0x7000), 0xffff, true}, ··· 84 76 {RCS, _MMIO(0x24d4), 0, false}, 85 77 {RCS, _MMIO(0x24d8), 0, false}, 86 78 {RCS, _MMIO(0x24dc), 0, false}, 79 + {RCS, _MMIO(0x24e0), 0, false}, 80 + {RCS, _MMIO(0x24e4), 0, false}, 81 + {RCS, _MMIO(0x24e8), 0, false}, 82 + {RCS, _MMIO(0x24ec), 0, false}, 83 + {RCS, _MMIO(0x24f0), 0, false}, 84 + {RCS, _MMIO(0x24f4), 0, false}, 85 + {RCS, _MMIO(0x24f8), 0, false}, 86 + {RCS, _MMIO(0x24fc), 0, false}, 87 87 {RCS, _MMIO(0x7004), 0xffff, true}, 88 88 {RCS, _MMIO(0x7008), 0xffff, true}, 89 89 {RCS, _MMIO(0x7000), 0xffff, true},
+36 -16
drivers/gpu/drm/i915/gvt/scheduler.c
··· 139 139 struct intel_vgpu_workload *workload = 140 140 scheduler->current_workload[req->engine->id]; 141 141 142 + if (unlikely(!workload)) 143 + return NOTIFY_OK; 144 + 142 145 switch (action) { 143 146 case INTEL_CONTEXT_SCHEDULE_IN: 144 147 intel_gvt_load_render_mmio(workload->vgpu, ··· 151 148 case INTEL_CONTEXT_SCHEDULE_OUT: 152 149 intel_gvt_restore_render_mmio(workload->vgpu, 153 150 workload->ring_id); 151 + /* If the status is -EINPROGRESS means this workload 152 + * doesn't meet any issue during dispatching so when 153 + * get the SCHEDULE_OUT set the status to be zero for 154 + * good. If the status is NOT -EINPROGRESS means there 155 + * is something wrong happened during dispatching and 156 + * the status should not be set to zero 157 + */ 158 + if (workload->status == -EINPROGRESS) 159 + workload->status = 0; 154 160 atomic_set(&workload->shadow_ctx_active, 0); 155 161 break; 156 162 default: ··· 371 359 workload = scheduler->current_workload[ring_id]; 372 360 vgpu = workload->vgpu; 373 361 374 - if (!workload->status && !vgpu->resetting) { 362 + /* For the workload w/ request, needs to wait for the context 363 + * switch to make sure request is completed. 364 + * For the workload w/o request, directly complete the workload. 365 + */ 366 + if (workload->req) { 375 367 wait_event(workload->shadow_ctx_status_wq, 376 368 !atomic_read(&workload->shadow_ctx_active)); 377 369 378 - update_guest_context(workload); 370 + i915_gem_request_put(fetch_and_zero(&workload->req)); 379 371 380 - for_each_set_bit(event, workload->pending_events, 381 - INTEL_GVT_EVENT_MAX) 382 - intel_vgpu_trigger_virtual_event(vgpu, event); 372 + if (!workload->status && !vgpu->resetting) { 373 + update_guest_context(workload); 374 + 375 + for_each_set_bit(event, workload->pending_events, 376 + INTEL_GVT_EVENT_MAX) 377 + intel_vgpu_trigger_virtual_event(vgpu, event); 378 + } 383 379 } 384 380 385 381 gvt_dbg_sched("ring id %d complete workload %p status %d\n", ··· 417 397 int ring_id = p->ring_id; 418 398 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; 419 399 struct intel_vgpu_workload *workload = NULL; 420 - long lret; 421 400 int ret; 422 401 bool need_force_wake = IS_SKYLAKE(gvt->dev_priv); 423 402 DEFINE_WAIT_FUNC(wait, woken_wake_function); ··· 465 446 466 447 gvt_dbg_sched("ring id %d wait workload %p\n", 467 448 workload->ring_id, workload); 468 - 469 - lret = i915_wait_request(workload->req, 449 + retry: 450 + i915_wait_request(workload->req, 470 451 0, MAX_SCHEDULE_TIMEOUT); 471 - if (lret < 0) { 472 - workload->status = lret; 473 - gvt_err("fail to wait workload, skip\n"); 474 - } else { 475 - workload->status = 0; 452 + /* I915 has replay mechanism and a request will be replayed 453 + * if there is i915 reset. So the seqno will be updated anyway. 454 + * If the seqno is not updated yet after waiting, which means 455 + * the replay may still be in progress and we can wait again. 456 + */ 457 + if (!i915_gem_request_completed(workload->req)) { 458 + gvt_dbg_sched("workload %p not completed, wait again\n", 459 + workload); 460 + goto retry; 476 461 } 477 462 478 463 complete: 479 464 gvt_dbg_sched("will complete workload %p, status: %d\n", 480 465 workload, workload->status); 481 - 482 - if (workload->req) 483 - i915_gem_request_put(fetch_and_zero(&workload->req)); 484 466 485 467 complete_current_workload(gvt, ring_id); 486 468
+45 -27
drivers/gpu/drm/i915/gvt/vgpu.c
··· 64 64 WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); 65 65 } 66 66 67 + static struct { 68 + unsigned int low_mm; 69 + unsigned int high_mm; 70 + unsigned int fence; 71 + enum intel_vgpu_edid edid; 72 + char *name; 73 + } vgpu_types[] = { 74 + /* Fixed vGPU type table */ 75 + { MB_TO_BYTES(64), MB_TO_BYTES(512), 4, GVT_EDID_1024_768, "8" }, 76 + { MB_TO_BYTES(128), MB_TO_BYTES(512), 4, GVT_EDID_1920_1200, "4" }, 77 + { MB_TO_BYTES(256), MB_TO_BYTES(1024), 4, GVT_EDID_1920_1200, "2" }, 78 + { MB_TO_BYTES(512), MB_TO_BYTES(2048), 4, GVT_EDID_1920_1200, "1" }, 79 + }; 80 + 67 81 /** 68 82 * intel_gvt_init_vgpu_types - initialize vGPU type list 69 83 * @gvt : GVT device ··· 92 78 unsigned int min_low; 93 79 94 80 /* vGPU type name is defined as GVTg_Vx_y which contains 95 - * physical GPU generation type and 'y' means maximum vGPU 96 - * instances user can create on one physical GPU for this 97 - * type. 81 + * physical GPU generation type (e.g V4 as BDW server, V5 as 82 + * SKL server). 98 83 * 99 84 * Depend on physical SKU resource, might see vGPU types like 100 85 * GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create ··· 105 92 */ 106 93 low_avail = gvt_aperture_sz(gvt) - HOST_LOW_GM_SIZE; 107 94 high_avail = gvt_hidden_sz(gvt) - HOST_HIGH_GM_SIZE; 108 - num_types = 4; 95 + num_types = sizeof(vgpu_types) / sizeof(vgpu_types[0]); 109 96 110 97 gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type), 111 98 GFP_KERNEL); ··· 114 101 115 102 min_low = MB_TO_BYTES(32); 116 103 for (i = 0; i < num_types; ++i) { 117 - if (low_avail / min_low == 0) 104 + if (low_avail / vgpu_types[i].low_mm == 0) 118 105 break; 119 - gvt->types[i].low_gm_size = min_low; 120 - gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U)); 121 - gvt->types[i].fence = 4; 122 - gvt->types[i].max_instance = min(low_avail / min_low, 123 - high_avail / gvt->types[i].high_gm_size); 124 - gvt->types[i].avail_instance = gvt->types[i].max_instance; 106 + 107 + gvt->types[i].low_gm_size = vgpu_types[i].low_mm; 108 + gvt->types[i].high_gm_size = vgpu_types[i].high_mm; 109 + gvt->types[i].fence = vgpu_types[i].fence; 110 + gvt->types[i].resolution = vgpu_types[i].edid; 111 + gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm, 112 + high_avail / vgpu_types[i].high_mm); 125 113 126 114 if (IS_GEN8(gvt->dev_priv)) 127 - sprintf(gvt->types[i].name, "GVTg_V4_%u", 128 - gvt->types[i].max_instance); 115 + sprintf(gvt->types[i].name, "GVTg_V4_%s", 116 + vgpu_types[i].name); 129 117 else if (IS_GEN9(gvt->dev_priv)) 130 - sprintf(gvt->types[i].name, "GVTg_V5_%u", 131 - gvt->types[i].max_instance); 118 + sprintf(gvt->types[i].name, "GVTg_V5_%s", 119 + vgpu_types[i].name); 132 120 133 - min_low <<= 1; 134 - gvt_dbg_core("type[%d]: %s max %u avail %u low %u high %u fence %u\n", 135 - i, gvt->types[i].name, gvt->types[i].max_instance, 121 + gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u res %s\n", 122 + i, gvt->types[i].name, 136 123 gvt->types[i].avail_instance, 137 124 gvt->types[i].low_gm_size, 138 - gvt->types[i].high_gm_size, gvt->types[i].fence); 125 + gvt->types[i].high_gm_size, gvt->types[i].fence, 126 + vgpu_edid_str(gvt->types[i].resolution)); 139 127 } 140 128 141 129 gvt->num_types = i; ··· 152 138 { 153 139 int i; 154 140 unsigned int low_gm_avail, high_gm_avail, fence_avail; 155 - unsigned int low_gm_min, high_gm_min, fence_min, total_min; 141 + unsigned int low_gm_min, high_gm_min, fence_min; 156 142 157 143 /* Need to depend on maxium hw resource size but keep on 158 144 * static config for now. ··· 168 154 low_gm_min = low_gm_avail / gvt->types[i].low_gm_size; 169 155 high_gm_min = high_gm_avail / gvt->types[i].high_gm_size; 170 156 fence_min = fence_avail / gvt->types[i].fence; 171 - total_min = min(min(low_gm_min, high_gm_min), fence_min); 172 - gvt->types[i].avail_instance = min(gvt->types[i].max_instance, 173 - total_min); 157 + gvt->types[i].avail_instance = min(min(low_gm_min, high_gm_min), 158 + fence_min); 174 159 175 - gvt_dbg_core("update type[%d]: %s max %u avail %u low %u high %u fence %u\n", 176 - i, gvt->types[i].name, gvt->types[i].max_instance, 160 + gvt_dbg_core("update type[%d]: %s avail %u low %u high %u fence %u\n", 161 + i, gvt->types[i].name, 177 162 gvt->types[i].avail_instance, gvt->types[i].low_gm_size, 178 163 gvt->types[i].high_gm_size, gvt->types[i].fence); 179 164 } ··· 261 248 if (ret) 262 249 goto out_detach_hypervisor_vgpu; 263 250 264 - ret = intel_vgpu_init_display(vgpu); 251 + ret = intel_vgpu_init_display(vgpu, param->resolution); 265 252 if (ret) 266 253 goto out_clean_gtt; 267 254 ··· 325 312 param.low_gm_sz = type->low_gm_size; 326 313 param.high_gm_sz = type->high_gm_size; 327 314 param.fence_sz = type->fence; 315 + param.resolution = type->resolution; 328 316 329 317 /* XXX current param based on MB */ 330 318 param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz); ··· 401 387 populate_pvinfo_page(vgpu); 402 388 intel_vgpu_reset_display(vgpu); 403 389 404 - if (dmlr) 390 + if (dmlr) { 405 391 intel_vgpu_reset_cfg_space(vgpu); 392 + /* only reset the failsafe mode when dmlr reset */ 393 + vgpu->failsafe = false; 394 + vgpu->pv_notified = false; 395 + } 406 396 } 407 397 408 398 vgpu->resetting = false;