Merge tag 'drm-intel-fixes-2013-09-06' of git://people.freedesktop.org/~danvet/drm-intel into drm-fixes

- Early stolen mem reservation from Jesse in x86 boot code. Acked by Ingo
and hpa. This was ready much earlier but somehow I've thought it'd go
in through x86 trees, hence why this is late. Avoids the pci resource
code to plant mmiobars in the middle of stolen mem and other ugliness.
- vgaarb improvements from Alex Williamson plus the fix from Ville for the
vgacon->fbcon smooth transition "feature".
- Render pageflips on ivb/hsw to avoid stalls due to the ring switching
when only flipping on the blitter (Chris).
- Deadlock fixes around our flush_workqueue which crept back in - lockdep
isn't clever enough :(
- Shrinker recursion fix from Chris - this is the thing that blew the vma
patches from Ben I've taken out of 3.12.
- Fixup for the relocation refactoring. Also an igt testcase to make sure
we don't break this again.
- Pile of smaller fixups all over, shortlog has full details.

* tag 'drm-intel-fixes-2013-09-06' of git://people.freedesktop.org/~danvet/drm-intel: (29 commits)
drm/i915: Delay disabling of VGA memory until vgacon->fbcon handoff is done
drm/i915: try not to lose backlight CBLV precision
drm/i915: Confine page flips to BCS on Valleyview
drm/i915: Skip stolen region initialisation if none is reserved
drm/i915: fix gpu hang vs. flip stall deadlocks
drm/i915: Hold an object reference whilst we shrink it
drm/i915: fix i9xx_crtc_clock_get for multiplied pixels
drm/i915: handle sdvo input pixel multiplier correctly again
drm/i915: fix hpd work vs. flush_work in the pageflip code deadlock
drm/i915: fix up the relocate_entry refactoring
drm/i915: Fix pipe config warnings when dealing with LVDS fixed mode
drm/i915: Don't call sg_free_table() if sg_alloc_table() fails
i915: Update VGA arbiter support for newer devices
vgaarb: Fix VGA decodes changes
vgaarb: Don't disable resources that are not owned
drm/i915: Pin pages whilst mapping the dma-buf
drm/i915: enable trickle feed on Haswell
x86: add early quirk for reserving Intel graphics stolen memory v5
drm/i915: split PCI IDs out into i915_drm.h v4
i915_gem: Convert kmem_cache_alloc(...GFP_ZERO) to kmem_cache_zalloc
...

+774 -334
+154
arch/x86/kernel/early-quirks.c
··· 12 #include <linux/pci.h> 13 #include <linux/acpi.h> 14 #include <linux/pci_ids.h> 15 #include <asm/pci-direct.h> 16 #include <asm/dma.h> 17 #include <asm/io_apic.h> ··· 217 218 } 219 220 #define QFLAG_APPLY_ONCE 0x1 221 #define QFLAG_APPLIED 0x2 222 #define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) ··· 403 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, 404 { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST, 405 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, 406 {} 407 }; 408
··· 12 #include <linux/pci.h> 13 #include <linux/acpi.h> 14 #include <linux/pci_ids.h> 15 + #include <drm/i915_drm.h> 16 #include <asm/pci-direct.h> 17 #include <asm/dma.h> 18 #include <asm/io_apic.h> ··· 216 217 } 218 219 + /* 220 + * Systems with Intel graphics controllers set aside memory exclusively 221 + * for gfx driver use. This memory is not marked in the E820 as reserved 222 + * or as RAM, and so is subject to overlap from E820 manipulation later 223 + * in the boot process. On some systems, MMIO space is allocated on top, 224 + * despite the efforts of the "RAM buffer" approach, which simply rounds 225 + * memory boundaries up to 64M to try to catch space that may decode 226 + * as RAM and so is not suitable for MMIO. 227 + * 228 + * And yes, so far on current devices the base addr is always under 4G. 229 + */ 230 + static u32 __init intel_stolen_base(int num, int slot, int func) 231 + { 232 + u32 base; 233 + 234 + /* 235 + * For the PCI IDs in this quirk, the stolen base is always 236 + * in 0x5c, aka the BDSM register (yes that's really what 237 + * it's called). 238 + */ 239 + base = read_pci_config(num, slot, func, 0x5c); 240 + base &= ~((1<<20) - 1); 241 + 242 + return base; 243 + } 244 + 245 + #define KB(x) ((x) * 1024) 246 + #define MB(x) (KB (KB (x))) 247 + #define GB(x) (MB (KB (x))) 248 + 249 + static size_t __init gen3_stolen_size(int num, int slot, int func) 250 + { 251 + size_t stolen_size; 252 + u16 gmch_ctrl; 253 + 254 + gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL); 255 + 256 + switch (gmch_ctrl & I855_GMCH_GMS_MASK) { 257 + case I855_GMCH_GMS_STOLEN_1M: 258 + stolen_size = MB(1); 259 + break; 260 + case I855_GMCH_GMS_STOLEN_4M: 261 + stolen_size = MB(4); 262 + break; 263 + case I855_GMCH_GMS_STOLEN_8M: 264 + stolen_size = MB(8); 265 + break; 266 + case I855_GMCH_GMS_STOLEN_16M: 267 + stolen_size = MB(16); 268 + break; 269 + case I855_GMCH_GMS_STOLEN_32M: 270 + stolen_size = MB(32); 271 + break; 272 + case I915_GMCH_GMS_STOLEN_48M: 273 + stolen_size = MB(48); 274 + break; 275 + case I915_GMCH_GMS_STOLEN_64M: 276 + stolen_size = MB(64); 277 + break; 278 + case G33_GMCH_GMS_STOLEN_128M: 279 + stolen_size = MB(128); 280 + break; 281 + case G33_GMCH_GMS_STOLEN_256M: 282 + stolen_size = MB(256); 283 + break; 284 + case INTEL_GMCH_GMS_STOLEN_96M: 285 + stolen_size = MB(96); 286 + break; 287 + case INTEL_GMCH_GMS_STOLEN_160M: 288 + stolen_size = MB(160); 289 + break; 290 + case INTEL_GMCH_GMS_STOLEN_224M: 291 + stolen_size = MB(224); 292 + break; 293 + case INTEL_GMCH_GMS_STOLEN_352M: 294 + stolen_size = MB(352); 295 + break; 296 + default: 297 + stolen_size = 0; 298 + break; 299 + } 300 + 301 + return stolen_size; 302 + } 303 + 304 + static size_t __init gen6_stolen_size(int num, int slot, int func) 305 + { 306 + u16 gmch_ctrl; 307 + 308 + gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); 309 + gmch_ctrl >>= SNB_GMCH_GMS_SHIFT; 310 + gmch_ctrl &= SNB_GMCH_GMS_MASK; 311 + 312 + return gmch_ctrl << 25; /* 32 MB units */ 313 + } 314 + 315 + typedef size_t (*stolen_size_fn)(int num, int slot, int func); 316 + 317 + static struct pci_device_id intel_stolen_ids[] __initdata = { 318 + INTEL_I915G_IDS(gen3_stolen_size), 319 + INTEL_I915GM_IDS(gen3_stolen_size), 320 + INTEL_I945G_IDS(gen3_stolen_size), 321 + INTEL_I945GM_IDS(gen3_stolen_size), 322 + INTEL_VLV_M_IDS(gen3_stolen_size), 323 + INTEL_VLV_D_IDS(gen3_stolen_size), 324 + INTEL_PINEVIEW_IDS(gen3_stolen_size), 325 + INTEL_I965G_IDS(gen3_stolen_size), 326 + INTEL_G33_IDS(gen3_stolen_size), 327 + INTEL_I965GM_IDS(gen3_stolen_size), 328 + INTEL_GM45_IDS(gen3_stolen_size), 329 + INTEL_G45_IDS(gen3_stolen_size), 330 + INTEL_IRONLAKE_D_IDS(gen3_stolen_size), 331 + INTEL_IRONLAKE_M_IDS(gen3_stolen_size), 332 + INTEL_SNB_D_IDS(gen6_stolen_size), 333 + INTEL_SNB_M_IDS(gen6_stolen_size), 334 + INTEL_IVB_M_IDS(gen6_stolen_size), 335 + INTEL_IVB_D_IDS(gen6_stolen_size), 336 + INTEL_HSW_D_IDS(gen6_stolen_size), 337 + INTEL_HSW_M_IDS(gen6_stolen_size), 338 + }; 339 + 340 + static void __init intel_graphics_stolen(int num, int slot, int func) 341 + { 342 + size_t size; 343 + int i; 344 + u32 start; 345 + u16 device, subvendor, subdevice; 346 + 347 + device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID); 348 + subvendor = read_pci_config_16(num, slot, func, 349 + PCI_SUBSYSTEM_VENDOR_ID); 350 + subdevice = read_pci_config_16(num, slot, func, PCI_SUBSYSTEM_ID); 351 + 352 + for (i = 0; i < ARRAY_SIZE(intel_stolen_ids); i++) { 353 + if (intel_stolen_ids[i].device == device) { 354 + stolen_size_fn stolen_size = 355 + (stolen_size_fn)intel_stolen_ids[i].driver_data; 356 + size = stolen_size(num, slot, func); 357 + start = intel_stolen_base(num, slot, func); 358 + if (size && start) { 359 + /* Mark this space as reserved */ 360 + e820_add_region(start, size, E820_RESERVED); 361 + sanitize_e820_map(e820.map, 362 + ARRAY_SIZE(e820.map), 363 + &e820.nr_map); 364 + } 365 + return; 366 + } 367 + } 368 + } 369 + 370 #define QFLAG_APPLY_ONCE 0x1 371 #define QFLAG_APPLIED 0x2 372 #define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) ··· 251 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, 252 { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST, 253 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, 254 + { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID, 255 + QFLAG_APPLY_ONCE, intel_graphics_stolen }, 256 {} 257 }; 258
+10 -1
drivers/gpu/drm/i915/i915_debugfs.c
··· 857 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 858 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 859 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 860 - u32 rpstat, cagf; 861 u32 rpupei, rpcurup, rpprevup; 862 u32 rpdownei, rpcurdown, rpprevdown; 863 int max_freq; ··· 868 return ret; 869 870 gen6_gt_force_wake_get(dev_priv); 871 872 rpstat = I915_READ(GEN6_RPSTAT1); 873 rpupei = I915_READ(GEN6_RP_CUR_UP_EI); ··· 901 gt_perf_status & 0xff); 902 seq_printf(m, "Render p-state limit: %d\n", 903 rp_state_limits & 0xff); 904 seq_printf(m, "CAGF: %dMHz\n", cagf); 905 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & 906 GEN6_CURICONT_MASK);
··· 857 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 858 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 859 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 860 + u32 rpstat, cagf, reqf; 861 u32 rpupei, rpcurup, rpprevup; 862 u32 rpdownei, rpcurdown, rpprevdown; 863 int max_freq; ··· 868 return ret; 869 870 gen6_gt_force_wake_get(dev_priv); 871 + 872 + reqf = I915_READ(GEN6_RPNSWREQ); 873 + reqf &= ~GEN6_TURBO_DISABLE; 874 + if (IS_HASWELL(dev)) 875 + reqf >>= 24; 876 + else 877 + reqf >>= 25; 878 + reqf *= GT_FREQUENCY_MULTIPLIER; 879 880 rpstat = I915_READ(GEN6_RPSTAT1); 881 rpupei = I915_READ(GEN6_RP_CUR_UP_EI); ··· 893 gt_perf_status & 0xff); 894 seq_printf(m, "Render p-state limit: %d\n", 895 rp_state_limits & 0xff); 896 + seq_printf(m, "RPNSWREQ: %dMHz\n", reqf); 897 seq_printf(m, "CAGF: %dMHz\n", cagf); 898 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & 899 GEN6_CURICONT_MASK);
+12 -3
drivers/gpu/drm/i915/i915_dma.c
··· 1290 * then we do not take part in VGA arbitration and the 1291 * vga_client_register() fails with -ENODEV. 1292 */ 1293 - ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); 1294 - if (ret && ret != -ENODEV) 1295 - goto out; 1296 1297 intel_register_dsm_handler(); 1298 ··· 1350 * tiny window where we will loose hotplug notifactions. 1351 */ 1352 intel_fbdev_initial_config(dev); 1353 1354 /* Only enable hotplug handling once the fbdev is fully set up. */ 1355 dev_priv->enable_hotplug_processing = true;
··· 1290 * then we do not take part in VGA arbitration and the 1291 * vga_client_register() fails with -ENODEV. 1292 */ 1293 + if (!HAS_PCH_SPLIT(dev)) { 1294 + ret = vga_client_register(dev->pdev, dev, NULL, 1295 + i915_vga_set_decode); 1296 + if (ret && ret != -ENODEV) 1297 + goto out; 1298 + } 1299 1300 intel_register_dsm_handler(); 1301 ··· 1347 * tiny window where we will loose hotplug notifactions. 1348 */ 1349 intel_fbdev_initial_config(dev); 1350 + 1351 + /* 1352 + * Must do this after fbcon init so that 1353 + * vgacon_save_screen() works during the handover. 1354 + */ 1355 + i915_disable_vga_mem(dev); 1356 1357 /* Only enable hotplug handling once the fbdev is fully set up. */ 1358 dev_priv->enable_hotplug_processing = true;
+34 -130
drivers/gpu/drm/i915/i915_drv.c
··· 157 static struct drm_driver driver; 158 extern int intel_agp_enabled; 159 160 - #define INTEL_VGA_DEVICE(id, info) { \ 161 - .class = PCI_BASE_CLASS_DISPLAY << 16, \ 162 - .class_mask = 0xff0000, \ 163 - .vendor = 0x8086, \ 164 - .device = id, \ 165 - .subvendor = PCI_ANY_ID, \ 166 - .subdevice = PCI_ANY_ID, \ 167 - .driver_data = (unsigned long) info } 168 - 169 - #define INTEL_QUANTA_VGA_DEVICE(info) { \ 170 - .class = PCI_BASE_CLASS_DISPLAY << 16, \ 171 - .class_mask = 0xff0000, \ 172 - .vendor = 0x8086, \ 173 - .device = 0x16a, \ 174 - .subvendor = 0x152d, \ 175 - .subdevice = 0x8990, \ 176 - .driver_data = (unsigned long) info } 177 - 178 - 179 static const struct intel_device_info intel_i830_info = { 180 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, 181 .has_overlay = 1, .overlay_needs_physical = 1, ··· 331 .has_vebox_ring = 1, 332 }; 333 334 static const struct pci_device_id pciidlist[] = { /* aka */ 335 - INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */ 336 - INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */ 337 - INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), /* I855_GM */ 338 - INTEL_VGA_DEVICE(0x358e, &intel_i85x_info), 339 - INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), /* I865_G */ 340 - INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), /* I915_G */ 341 - INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), /* E7221_G */ 342 - INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), /* I915_GM */ 343 - INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), /* I945_G */ 344 - INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), /* I945_GM */ 345 - INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), /* I945_GME */ 346 - INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), /* I946_GZ */ 347 - INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), /* G35_G */ 348 - INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), /* I965_Q */ 349 - INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), /* I965_G */ 350 - INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), /* Q35_G */ 351 - INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), /* G33_G */ 352 - INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), /* Q33_G */ 353 - INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), /* I965_GM */ 354 - INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), /* I965_GME */ 355 - INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), /* GM45_G */ 356 - INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), /* IGD_E_G */ 357 - INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), /* Q45_G */ 358 - INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */ 359 - INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */ 360 - INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */ 361 - INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */ 362 - INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), 363 - INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), 364 - INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), 365 - INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), 366 - INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info), 367 - INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info), 368 - INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info), 369 - INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info), 370 - INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info), 371 - INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info), 372 - INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info), 373 - INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */ 374 - INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */ 375 - INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */ 376 - INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */ 377 - INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */ 378 - INTEL_QUANTA_VGA_DEVICE(&intel_ivybridge_q_info), /* Quanta transcode */ 379 - INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */ 380 - INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */ 381 - INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */ 382 - INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT3 desktop */ 383 - INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */ 384 - INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */ 385 - INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT3 server */ 386 - INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */ 387 - INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */ 388 - INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */ 389 - INTEL_VGA_DEVICE(0x040B, &intel_haswell_d_info), /* GT1 reserved */ 390 - INTEL_VGA_DEVICE(0x041B, &intel_haswell_d_info), /* GT2 reserved */ 391 - INTEL_VGA_DEVICE(0x042B, &intel_haswell_d_info), /* GT3 reserved */ 392 - INTEL_VGA_DEVICE(0x040E, &intel_haswell_d_info), /* GT1 reserved */ 393 - INTEL_VGA_DEVICE(0x041E, &intel_haswell_d_info), /* GT2 reserved */ 394 - INTEL_VGA_DEVICE(0x042E, &intel_haswell_d_info), /* GT3 reserved */ 395 - INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */ 396 - INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */ 397 - INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT3 desktop */ 398 - INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */ 399 - INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */ 400 - INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT3 server */ 401 - INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */ 402 - INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */ 403 - INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT3 mobile */ 404 - INTEL_VGA_DEVICE(0x0C0B, &intel_haswell_d_info), /* SDV GT1 reserved */ 405 - INTEL_VGA_DEVICE(0x0C1B, &intel_haswell_d_info), /* SDV GT2 reserved */ 406 - INTEL_VGA_DEVICE(0x0C2B, &intel_haswell_d_info), /* SDV GT3 reserved */ 407 - INTEL_VGA_DEVICE(0x0C0E, &intel_haswell_d_info), /* SDV GT1 reserved */ 408 - INTEL_VGA_DEVICE(0x0C1E, &intel_haswell_d_info), /* SDV GT2 reserved */ 409 - INTEL_VGA_DEVICE(0x0C2E, &intel_haswell_d_info), /* SDV GT3 reserved */ 410 - INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */ 411 - INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */ 412 - INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT3 desktop */ 413 - INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */ 414 - INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */ 415 - INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT3 server */ 416 - INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */ 417 - INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */ 418 - INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT3 mobile */ 419 - INTEL_VGA_DEVICE(0x0A0B, &intel_haswell_d_info), /* ULT GT1 reserved */ 420 - INTEL_VGA_DEVICE(0x0A1B, &intel_haswell_d_info), /* ULT GT2 reserved */ 421 - INTEL_VGA_DEVICE(0x0A2B, &intel_haswell_d_info), /* ULT GT3 reserved */ 422 - INTEL_VGA_DEVICE(0x0A0E, &intel_haswell_m_info), /* ULT GT1 reserved */ 423 - INTEL_VGA_DEVICE(0x0A1E, &intel_haswell_m_info), /* ULT GT2 reserved */ 424 - INTEL_VGA_DEVICE(0x0A2E, &intel_haswell_m_info), /* ULT GT3 reserved */ 425 - INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */ 426 - INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */ 427 - INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT3 desktop */ 428 - INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */ 429 - INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */ 430 - INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT3 server */ 431 - INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */ 432 - INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */ 433 - INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT3 mobile */ 434 - INTEL_VGA_DEVICE(0x0D0B, &intel_haswell_d_info), /* CRW GT1 reserved */ 435 - INTEL_VGA_DEVICE(0x0D1B, &intel_haswell_d_info), /* CRW GT2 reserved */ 436 - INTEL_VGA_DEVICE(0x0D2B, &intel_haswell_d_info), /* CRW GT3 reserved */ 437 - INTEL_VGA_DEVICE(0x0D0E, &intel_haswell_d_info), /* CRW GT1 reserved */ 438 - INTEL_VGA_DEVICE(0x0D1E, &intel_haswell_d_info), /* CRW GT2 reserved */ 439 - INTEL_VGA_DEVICE(0x0D2E, &intel_haswell_d_info), /* CRW GT3 reserved */ 440 - INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info), 441 - INTEL_VGA_DEVICE(0x0f31, &intel_valleyview_m_info), 442 - INTEL_VGA_DEVICE(0x0f32, &intel_valleyview_m_info), 443 - INTEL_VGA_DEVICE(0x0f33, &intel_valleyview_m_info), 444 - INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info), 445 - INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info), 446 {0, 0, 0} 447 }; 448
··· 157 static struct drm_driver driver; 158 extern int intel_agp_enabled; 159 160 static const struct intel_device_info intel_i830_info = { 161 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, 162 .has_overlay = 1, .overlay_needs_physical = 1, ··· 350 .has_vebox_ring = 1, 351 }; 352 353 + /* 354 + * Make sure any device matches here are from most specific to most 355 + * general. For example, since the Quanta match is based on the subsystem 356 + * and subvendor IDs, we need it to come before the more general IVB 357 + * PCI ID matches, otherwise we'll use the wrong info struct above. 358 + */ 359 + #define INTEL_PCI_IDS \ 360 + INTEL_I830_IDS(&intel_i830_info), \ 361 + INTEL_I845G_IDS(&intel_845g_info), \ 362 + INTEL_I85X_IDS(&intel_i85x_info), \ 363 + INTEL_I865G_IDS(&intel_i865g_info), \ 364 + INTEL_I915G_IDS(&intel_i915g_info), \ 365 + INTEL_I915GM_IDS(&intel_i915gm_info), \ 366 + INTEL_I945G_IDS(&intel_i945g_info), \ 367 + INTEL_I945GM_IDS(&intel_i945gm_info), \ 368 + INTEL_I965G_IDS(&intel_i965g_info), \ 369 + INTEL_G33_IDS(&intel_g33_info), \ 370 + INTEL_I965GM_IDS(&intel_i965gm_info), \ 371 + INTEL_GM45_IDS(&intel_gm45_info), \ 372 + INTEL_G45_IDS(&intel_g45_info), \ 373 + INTEL_PINEVIEW_IDS(&intel_pineview_info), \ 374 + INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), \ 375 + INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), \ 376 + INTEL_SNB_D_IDS(&intel_sandybridge_d_info), \ 377 + INTEL_SNB_M_IDS(&intel_sandybridge_m_info), \ 378 + INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \ 379 + INTEL_IVB_M_IDS(&intel_ivybridge_m_info), \ 380 + INTEL_IVB_D_IDS(&intel_ivybridge_d_info), \ 381 + INTEL_HSW_D_IDS(&intel_haswell_d_info), \ 382 + INTEL_HSW_M_IDS(&intel_haswell_m_info), \ 383 + INTEL_VLV_M_IDS(&intel_valleyview_m_info), \ 384 + INTEL_VLV_D_IDS(&intel_valleyview_d_info) 385 + 386 static const struct pci_device_id pciidlist[] = { /* aka */ 387 + INTEL_PCI_IDS, 388 {0, 0, 0} 389 }; 390
+7
drivers/gpu/drm/i915/i915_drv.h
··· 1236 1237 unsigned int fsb_freq, mem_freq, is_ddr3; 1238 1239 struct workqueue_struct *wq; 1240 1241 /* Display functions */
··· 1236 1237 unsigned int fsb_freq, mem_freq, is_ddr3; 1238 1239 + /** 1240 + * wq - Driver workqueue for GEM. 1241 + * 1242 + * NOTE: Work items scheduled here are not allowed to grab any modeset 1243 + * locks, for otherwise the flushing done in the pageflip code will 1244 + * result in deadlocks. 1245 + */ 1246 struct workqueue_struct *wq; 1247 1248 /* Display functions */
+40 -8
drivers/gpu/drm/i915/i915_gem.c
··· 212 void *i915_gem_object_alloc(struct drm_device *dev) 213 { 214 struct drm_i915_private *dev_priv = dev->dev_private; 215 - return kmem_cache_alloc(dev_priv->slab, GFP_KERNEL | __GFP_ZERO); 216 } 217 218 void i915_gem_object_free(struct drm_i915_gem_object *obj) ··· 1695 __i915_gem_shrink(struct drm_i915_private *dev_priv, long target, 1696 bool purgeable_only) 1697 { 1698 struct drm_i915_gem_object *obj, *next; 1699 long count = 0; 1700 ··· 1710 } 1711 } 1712 1713 - list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list, 1714 - global_list) { 1715 struct i915_vma *vma, *v; 1716 1717 if (!i915_gem_object_is_purgeable(obj) && purgeable_only) 1718 continue; 1719 1720 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link) 1721 if (i915_vma_unbind(vma)) 1722 break; 1723 1724 - if (!i915_gem_object_put_pages(obj)) { 1725 count += obj->base.size >> PAGE_SHIFT; 1726 - if (count >= target) 1727 - return count; 1728 - } 1729 } 1730 1731 return count; 1732 } ··· 1807 1808 page_count = obj->base.size / PAGE_SIZE; 1809 if (sg_alloc_table(st, page_count, GFP_KERNEL)) { 1810 - sg_free_table(st); 1811 kfree(st); 1812 return -ENOMEM; 1813 }
··· 212 void *i915_gem_object_alloc(struct drm_device *dev) 213 { 214 struct drm_i915_private *dev_priv = dev->dev_private; 215 + return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL); 216 } 217 218 void i915_gem_object_free(struct drm_i915_gem_object *obj) ··· 1695 __i915_gem_shrink(struct drm_i915_private *dev_priv, long target, 1696 bool purgeable_only) 1697 { 1698 + struct list_head still_bound_list; 1699 struct drm_i915_gem_object *obj, *next; 1700 long count = 0; 1701 ··· 1709 } 1710 } 1711 1712 + /* 1713 + * As we may completely rewrite the bound list whilst unbinding 1714 + * (due to retiring requests) we have to strictly process only 1715 + * one element of the list at the time, and recheck the list 1716 + * on every iteration. 1717 + */ 1718 + INIT_LIST_HEAD(&still_bound_list); 1719 + while (count < target && !list_empty(&dev_priv->mm.bound_list)) { 1720 struct i915_vma *vma, *v; 1721 + 1722 + obj = list_first_entry(&dev_priv->mm.bound_list, 1723 + typeof(*obj), global_list); 1724 + list_move_tail(&obj->global_list, &still_bound_list); 1725 1726 if (!i915_gem_object_is_purgeable(obj) && purgeable_only) 1727 continue; 1728 + 1729 + /* 1730 + * Hold a reference whilst we unbind this object, as we may 1731 + * end up waiting for and retiring requests. This might 1732 + * release the final reference (held by the active list) 1733 + * and result in the object being freed from under us. 1734 + * in this object being freed. 1735 + * 1736 + * Note 1: Shrinking the bound list is special since only active 1737 + * (and hence bound objects) can contain such limbo objects, so 1738 + * we don't need special tricks for shrinking the unbound list. 1739 + * The only other place where we have to be careful with active 1740 + * objects suddenly disappearing due to retiring requests is the 1741 + * eviction code. 1742 + * 1743 + * Note 2: Even though the bound list doesn't hold a reference 1744 + * to the object we can safely grab one here: The final object 1745 + * unreferencing and the bound_list are both protected by the 1746 + * dev->struct_mutex and so we won't ever be able to observe an 1747 + * object on the bound_list with a reference count equals 0. 1748 + */ 1749 + drm_gem_object_reference(&obj->base); 1750 1751 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link) 1752 if (i915_vma_unbind(vma)) 1753 break; 1754 1755 + if (i915_gem_object_put_pages(obj) == 0) 1756 count += obj->base.size >> PAGE_SHIFT; 1757 + 1758 + drm_gem_object_unreference(&obj->base); 1759 } 1760 + list_splice(&still_bound_list, &dev_priv->mm.bound_list); 1761 1762 return count; 1763 } ··· 1774 1775 page_count = obj->base.size / PAGE_SIZE; 1776 if (sg_alloc_table(st, page_count, GFP_KERNEL)) { 1777 kfree(st); 1778 return -ENOMEM; 1779 }
+22 -19
drivers/gpu/drm/i915/i915_gem_dmabuf.c
··· 42 43 ret = i915_mutex_lock_interruptible(obj->base.dev); 44 if (ret) 45 - return ERR_PTR(ret); 46 47 ret = i915_gem_object_get_pages(obj); 48 - if (ret) { 49 - st = ERR_PTR(ret); 50 - goto out; 51 - } 52 53 /* Copy sg so that we make an independent mapping */ 54 st = kmalloc(sizeof(struct sg_table), GFP_KERNEL); 55 if (st == NULL) { 56 - st = ERR_PTR(-ENOMEM); 57 - goto out; 58 } 59 60 ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL); 61 - if (ret) { 62 - kfree(st); 63 - st = ERR_PTR(ret); 64 - goto out; 65 - } 66 67 src = obj->pages->sgl; 68 dst = st->sgl; ··· 70 } 71 72 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) { 73 - sg_free_table(st); 74 - kfree(st); 75 - st = ERR_PTR(-ENOMEM); 76 - goto out; 77 } 78 79 - i915_gem_object_pin_pages(obj); 80 - 81 - out: 82 mutex_unlock(&obj->base.dev->struct_mutex); 83 return st; 84 } 85 86 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
··· 42 43 ret = i915_mutex_lock_interruptible(obj->base.dev); 44 if (ret) 45 + goto err; 46 47 ret = i915_gem_object_get_pages(obj); 48 + if (ret) 49 + goto err_unlock; 50 + 51 + i915_gem_object_pin_pages(obj); 52 53 /* Copy sg so that we make an independent mapping */ 54 st = kmalloc(sizeof(struct sg_table), GFP_KERNEL); 55 if (st == NULL) { 56 + ret = -ENOMEM; 57 + goto err_unpin; 58 } 59 60 ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL); 61 + if (ret) 62 + goto err_free; 63 64 src = obj->pages->sgl; 65 dst = st->sgl; ··· 73 } 74 75 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) { 76 + ret =-ENOMEM; 77 + goto err_free_sg; 78 } 79 80 mutex_unlock(&obj->base.dev->struct_mutex); 81 return st; 82 + 83 + err_free_sg: 84 + sg_free_table(st); 85 + err_free: 86 + kfree(st); 87 + err_unpin: 88 + i915_gem_object_unpin_pages(obj); 89 + err_unlock: 90 + mutex_unlock(&obj->base.dev->struct_mutex); 91 + err: 92 + return ERR_PTR(ret); 93 } 94 95 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
+3
drivers/gpu/drm/i915/i915_gem_execbuffer.c
··· 310 else 311 ret = relocate_entry_gtt(obj, reloc); 312 313 /* and update the user's relocation entry */ 314 reloc->presumed_offset = target_offset; 315
··· 310 else 311 ret = relocate_entry_gtt(obj, reloc); 312 313 + if (ret) 314 + return ret; 315 + 316 /* and update the user's relocation entry */ 317 reloc->presumed_offset = target_offset; 318
+3
drivers/gpu/drm/i915/i915_gem_stolen.c
··· 201 struct drm_i915_private *dev_priv = dev->dev_private; 202 int bios_reserved = 0; 203 204 dev_priv->mm.stolen_base = i915_stolen_to_physical(dev); 205 if (dev_priv->mm.stolen_base == 0) 206 return 0;
··· 201 struct drm_i915_private *dev_priv = dev->dev_private; 202 int bios_reserved = 0; 203 204 + if (dev_priv->gtt.stolen_size == 0) 205 + return 0; 206 + 207 dev_priv->mm.stolen_base = i915_stolen_to_physical(dev); 208 if (dev_priv->mm.stolen_base == 0) 209 return 0;
+1 -1
drivers/gpu/drm/i915/i915_gpu_error.c
··· 641 if (WARN_ON(ring->id != RCS)) 642 return NULL; 643 644 - obj = ring->private; 645 if (acthd >= i915_gem_obj_ggtt_offset(obj) && 646 acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size) 647 return i915_error_object_create(dev_priv, obj);
··· 641 if (WARN_ON(ring->id != RCS)) 642 return NULL; 643 644 + obj = ring->scratch.obj; 645 if (acthd >= i915_gem_obj_ggtt_offset(obj) && 646 acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size) 647 return i915_error_object_create(dev_priv, obj);
+17 -6
drivers/gpu/drm/i915/i915_irq.c
··· 1027 dev_priv->display.hpd_irq_setup(dev); 1028 spin_unlock(&dev_priv->irq_lock); 1029 1030 - queue_work(dev_priv->wq, 1031 - &dev_priv->hotplug_work); 1032 } 1033 1034 static void gmbus_irq_handler(struct drm_device *dev) ··· 1660 wake_up_all(&ring->irq_queue); 1661 } 1662 1663 - queue_work(dev_priv->wq, &dev_priv->gpu_error.work); 1664 } 1665 1666 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) ··· 2038 2039 for_each_ring(ring, dev_priv, i) { 2040 if (ring->hangcheck.score > FIRE) { 2041 - DRM_ERROR("%s on %s\n", 2042 - stuck[i] ? "stuck" : "no progress", 2043 - ring->name); 2044 rings_hung++; 2045 } 2046 }
··· 1027 dev_priv->display.hpd_irq_setup(dev); 1028 spin_unlock(&dev_priv->irq_lock); 1029 1030 + /* 1031 + * Our hotplug handler can grab modeset locks (by calling down into the 1032 + * fb helpers). Hence it must not be run on our own dev-priv->wq work 1033 + * queue for otherwise the flush_work in the pageflip code will 1034 + * deadlock. 1035 + */ 1036 + schedule_work(&dev_priv->hotplug_work); 1037 } 1038 1039 static void gmbus_irq_handler(struct drm_device *dev) ··· 1655 wake_up_all(&ring->irq_queue); 1656 } 1657 1658 + /* 1659 + * Our reset work can grab modeset locks (since it needs to reset the 1660 + * state of outstanding pagelips). Hence it must not be run on our own 1661 + * dev-priv->wq work queue for otherwise the flush_work in the pageflip 1662 + * code will deadlock. 1663 + */ 1664 + schedule_work(&dev_priv->gpu_error.work); 1665 } 1666 1667 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) ··· 2027 2028 for_each_ring(ring, dev_priv, i) { 2029 if (ring->hangcheck.score > FIRE) { 2030 + DRM_INFO("%s on %s\n", 2031 + stuck[i] ? "stuck" : "no progress", 2032 + ring->name); 2033 rings_hung++; 2034 } 2035 }
+19 -15
drivers/gpu/drm/i915/i915_reg.h
··· 33 #define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a)) 34 #define _MASKED_BIT_DISABLE(a) ((a) << 16) 35 36 - /* 37 - * The Bridge device's PCI config space has information about the 38 - * fb aperture size and the amount of pre-reserved memory. 39 - * This is all handled in the intel-gtt.ko module. i915.ko only 40 - * cares about the vga bit for the vga rbiter. 41 - */ 42 - #define INTEL_GMCH_CTRL 0x52 43 - #define INTEL_GMCH_VGA_DISABLE (1 << 1) 44 - #define SNB_GMCH_CTRL 0x50 45 - #define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */ 46 - #define SNB_GMCH_GGMS_MASK 0x3 47 - #define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */ 48 - #define SNB_GMCH_GMS_MASK 0x1f 49 - 50 - 51 /* PCI config space */ 52 53 #define HPLLCC 0xc0 /* 855 only */ ··· 230 * address/value pairs. Don't overdue it, though, x <= 2^4 must hold! 231 */ 232 #define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) 233 #define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ 234 #define MI_FLUSH_DW_STORE_INDEX (1<<21) 235 #define MI_INVALIDATE_TLB (1<<18) ··· 679 #define FPGA_DBG_RM_NOCLAIM (1<<31) 680 681 #define DERRMR 0x44050 682 683 /* GM45+ chicken bits -- debug workaround bits that may be required 684 * for various sorts of correct behavior. The top 16 bits of each are ··· 3313 #define MCURSOR_PIPE_A 0x00 3314 #define MCURSOR_PIPE_B (1 << 28) 3315 #define MCURSOR_GAMMA_ENABLE (1 << 26) 3316 #define _CURABASE (dev_priv->info->display_mmio_offset + 0x70084) 3317 #define _CURAPOS (dev_priv->info->display_mmio_offset + 0x70088) 3318 #define CURSOR_POS_MASK 0x007FF
··· 33 #define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a)) 34 #define _MASKED_BIT_DISABLE(a) ((a) << 16) 35 36 /* PCI config space */ 37 38 #define HPLLCC 0xc0 /* 855 only */ ··· 245 * address/value pairs. Don't overdue it, though, x <= 2^4 must hold! 246 */ 247 #define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) 248 + #define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1) 249 #define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ 250 #define MI_FLUSH_DW_STORE_INDEX (1<<21) 251 #define MI_INVALIDATE_TLB (1<<18) ··· 693 #define FPGA_DBG_RM_NOCLAIM (1<<31) 694 695 #define DERRMR 0x44050 696 + #define DERRMR_PIPEA_SCANLINE (1<<0) 697 + #define DERRMR_PIPEA_PRI_FLIP_DONE (1<<1) 698 + #define DERRMR_PIPEA_SPR_FLIP_DONE (1<<2) 699 + #define DERRMR_PIPEA_VBLANK (1<<3) 700 + #define DERRMR_PIPEA_HBLANK (1<<5) 701 + #define DERRMR_PIPEB_SCANLINE (1<<8) 702 + #define DERRMR_PIPEB_PRI_FLIP_DONE (1<<9) 703 + #define DERRMR_PIPEB_SPR_FLIP_DONE (1<<10) 704 + #define DERRMR_PIPEB_VBLANK (1<<11) 705 + #define DERRMR_PIPEB_HBLANK (1<<13) 706 + /* Note that PIPEC is not a simple translation of PIPEA/PIPEB */ 707 + #define DERRMR_PIPEC_SCANLINE (1<<14) 708 + #define DERRMR_PIPEC_PRI_FLIP_DONE (1<<15) 709 + #define DERRMR_PIPEC_SPR_FLIP_DONE (1<<20) 710 + #define DERRMR_PIPEC_VBLANK (1<<21) 711 + #define DERRMR_PIPEC_HBLANK (1<<22) 712 + 713 714 /* GM45+ chicken bits -- debug workaround bits that may be required 715 * for various sorts of correct behavior. The top 16 bits of each are ··· 3310 #define MCURSOR_PIPE_A 0x00 3311 #define MCURSOR_PIPE_B (1 << 28) 3312 #define MCURSOR_GAMMA_ENABLE (1 << 26) 3313 + #define CURSOR_TRICKLE_FEED_DISABLE (1 << 14) 3314 #define _CURABASE (dev_priv->info->display_mmio_offset + 0x70084) 3315 #define _CURAPOS (dev_priv->info->display_mmio_offset + 0x70088) 3316 #define CURSOR_POS_MASK 0x007FF
+31 -5
drivers/gpu/drm/i915/i915_sysfs.c
··· 224 return snprintf(buf, PAGE_SIZE, "%d\n", ret); 225 } 226 227 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) 228 { 229 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); ··· 378 static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store); 379 static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store); 380 381 382 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf); 383 static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); ··· 419 &dev_attr_gt_RP0_freq_mhz.attr, 420 &dev_attr_gt_RP1_freq_mhz.attr, 421 &dev_attr_gt_RPn_freq_mhz.attr, 422 NULL, 423 }; 424 ··· 513 DRM_ERROR("l3 parity sysfs setup failed\n"); 514 } 515 516 - if (INTEL_INFO(dev)->gen >= 6) { 517 ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs); 518 - if (ret) 519 - DRM_ERROR("gen6 sysfs setup failed\n"); 520 - } 521 522 ret = sysfs_create_bin_file(&dev->primary->kdev.kobj, 523 &error_state_attr); ··· 530 void i915_teardown_sysfs(struct drm_device *dev) 531 { 532 sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr); 533 - sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs); 534 device_remove_bin_file(&dev->primary->kdev, &dpf_attrs); 535 #ifdef CONFIG_PM 536 sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
··· 224 return snprintf(buf, PAGE_SIZE, "%d\n", ret); 225 } 226 227 + static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, 228 + struct device_attribute *attr, char *buf) 229 + { 230 + struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); 231 + struct drm_device *dev = minor->dev; 232 + struct drm_i915_private *dev_priv = dev->dev_private; 233 + 234 + return snprintf(buf, PAGE_SIZE, "%d\n", 235 + vlv_gpu_freq(dev_priv->mem_freq, 236 + dev_priv->rps.rpe_delay)); 237 + } 238 + 239 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) 240 { 241 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); ··· 366 static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store); 367 static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store); 368 369 + static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL); 370 371 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf); 372 static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); ··· 406 &dev_attr_gt_RP0_freq_mhz.attr, 407 &dev_attr_gt_RP1_freq_mhz.attr, 408 &dev_attr_gt_RPn_freq_mhz.attr, 409 + NULL, 410 + }; 411 + 412 + static const struct attribute *vlv_attrs[] = { 413 + &dev_attr_gt_cur_freq_mhz.attr, 414 + &dev_attr_gt_max_freq_mhz.attr, 415 + &dev_attr_gt_min_freq_mhz.attr, 416 + &dev_attr_vlv_rpe_freq_mhz.attr, 417 NULL, 418 }; 419 ··· 492 DRM_ERROR("l3 parity sysfs setup failed\n"); 493 } 494 495 + ret = 0; 496 + if (IS_VALLEYVIEW(dev)) 497 + ret = sysfs_create_files(&dev->primary->kdev.kobj, vlv_attrs); 498 + else if (INTEL_INFO(dev)->gen >= 6) 499 ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs); 500 + if (ret) 501 + DRM_ERROR("RPS sysfs setup failed\n"); 502 503 ret = sysfs_create_bin_file(&dev->primary->kdev.kobj, 504 &error_state_attr); ··· 507 void i915_teardown_sysfs(struct drm_device *dev) 508 { 509 sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr); 510 + if (IS_VALLEYVIEW(dev)) 511 + sysfs_remove_files(&dev->primary->kdev.kobj, vlv_attrs); 512 + else 513 + sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs); 514 device_remove_bin_file(&dev->primary->kdev, &dpf_attrs); 515 #ifdef CONFIG_PM 516 sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
+1 -1
drivers/gpu/drm/i915/intel_crt.c
··· 688 struct drm_i915_private *dev_priv = dev->dev_private; 689 struct intel_crt *crt = intel_attached_crt(connector); 690 691 - if (HAS_PCH_SPLIT(dev)) { 692 u32 adpa; 693 694 adpa = I915_READ(crt->adpa_reg);
··· 688 struct drm_i915_private *dev_priv = dev->dev_private; 689 struct intel_crt *crt = intel_attached_crt(connector); 690 691 + if (INTEL_INFO(dev)->gen >= 5) { 692 u32 adpa; 693 694 adpa = I915_READ(crt->adpa_reg);
+69 -14
drivers/gpu/drm/i915/intel_display.c
··· 2077 else 2078 dspcntr &= ~DISPPLANE_TILED; 2079 2080 - /* must disable */ 2081 - dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2082 2083 I915_WRITE(reg, dspcntr); 2084 ··· 6764 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); 6765 cntl |= CURSOR_MODE_DISABLE; 6766 } 6767 - if (IS_HASWELL(dev)) 6768 cntl |= CURSOR_PIPE_CSC_ENABLE; 6769 I915_WRITE(CURCNTR_IVB(pipe), cntl); 6770 6771 intel_crtc->cursor_visible = visible; ··· 7313 } 7314 } 7315 7316 - pipe_config->adjusted_mode.clock = clock.dot * 7317 - pipe_config->pixel_multiplier; 7318 } 7319 7320 static void ironlake_crtc_clock_get(struct intel_crtc *crtc, ··· 7831 return ret; 7832 } 7833 7834 - /* 7835 - * On gen7 we currently use the blit ring because (in early silicon at least) 7836 - * the render ring doesn't give us interrpts for page flip completion, which 7837 - * means clients will hang after the first flip is queued. Fortunately the 7838 - * blit ring generates interrupts properly, so use it instead. 7839 - */ 7840 static int intel_gen7_queue_flip(struct drm_device *dev, 7841 struct drm_crtc *crtc, 7842 struct drm_framebuffer *fb, ··· 7839 { 7840 struct drm_i915_private *dev_priv = dev->dev_private; 7841 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7842 - struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; 7843 uint32_t plane_bit = 0; 7844 - int ret; 7845 7846 ret = intel_pin_and_fence_fb_obj(dev, obj, ring); 7847 if (ret) ··· 7867 goto err_unpin; 7868 } 7869 7870 - ret = intel_ring_begin(ring, 4); 7871 if (ret) 7872 goto err_unpin; 7873 7874 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); 7875 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); ··· 10047 POSTING_READ(vga_reg); 10048 } 10049 10050 void intel_modeset_init_hw(struct drm_device *dev) 10051 { 10052 intel_init_power_well(dev); ··· 10352 if (I915_READ(vga_reg) != VGA_DISP_DISABLE) { 10353 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); 10354 i915_disable_vga(dev); 10355 } 10356 } 10357 ··· 10565 } 10566 10567 intel_disable_fbc(dev); 10568 10569 intel_disable_gt_powersave(dev); 10570
··· 2077 else 2078 dspcntr &= ~DISPPLANE_TILED; 2079 2080 + if (IS_HASWELL(dev)) 2081 + dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE; 2082 + else 2083 + dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2084 2085 I915_WRITE(reg, dspcntr); 2086 ··· 6762 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); 6763 cntl |= CURSOR_MODE_DISABLE; 6764 } 6765 + if (IS_HASWELL(dev)) { 6766 cntl |= CURSOR_PIPE_CSC_ENABLE; 6767 + cntl &= ~CURSOR_TRICKLE_FEED_DISABLE; 6768 + } 6769 I915_WRITE(CURCNTR_IVB(pipe), cntl); 6770 6771 intel_crtc->cursor_visible = visible; ··· 7309 } 7310 } 7311 7312 + pipe_config->adjusted_mode.clock = clock.dot; 7313 } 7314 7315 static void ironlake_crtc_clock_get(struct intel_crtc *crtc, ··· 7828 return ret; 7829 } 7830 7831 static int intel_gen7_queue_flip(struct drm_device *dev, 7832 struct drm_crtc *crtc, 7833 struct drm_framebuffer *fb, ··· 7842 { 7843 struct drm_i915_private *dev_priv = dev->dev_private; 7844 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7845 + struct intel_ring_buffer *ring; 7846 uint32_t plane_bit = 0; 7847 + int len, ret; 7848 + 7849 + ring = obj->ring; 7850 + if (IS_VALLEYVIEW(dev) || ring == NULL || ring->id != RCS) 7851 + ring = &dev_priv->ring[BCS]; 7852 7853 ret = intel_pin_and_fence_fb_obj(dev, obj, ring); 7854 if (ret) ··· 7866 goto err_unpin; 7867 } 7868 7869 + len = 4; 7870 + if (ring->id == RCS) 7871 + len += 6; 7872 + 7873 + ret = intel_ring_begin(ring, len); 7874 if (ret) 7875 goto err_unpin; 7876 + 7877 + /* Unmask the flip-done completion message. Note that the bspec says that 7878 + * we should do this for both the BCS and RCS, and that we must not unmask 7879 + * more than one flip event at any time (or ensure that one flip message 7880 + * can be sent by waiting for flip-done prior to queueing new flips). 7881 + * Experimentation says that BCS works despite DERRMR masking all 7882 + * flip-done completion events and that unmasking all planes at once 7883 + * for the RCS also doesn't appear to drop events. Setting the DERRMR 7884 + * to zero does lead to lockups within MI_DISPLAY_FLIP. 7885 + */ 7886 + if (ring->id == RCS) { 7887 + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 7888 + intel_ring_emit(ring, DERRMR); 7889 + intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | 7890 + DERRMR_PIPEB_PRI_FLIP_DONE | 7891 + DERRMR_PIPEC_PRI_FLIP_DONE)); 7892 + intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1)); 7893 + intel_ring_emit(ring, DERRMR); 7894 + intel_ring_emit(ring, ring->scratch.gtt_offset + 256); 7895 + } 7896 7897 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); 7898 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); ··· 10022 POSTING_READ(vga_reg); 10023 } 10024 10025 + static void i915_enable_vga_mem(struct drm_device *dev) 10026 + { 10027 + /* Enable VGA memory on Intel HD */ 10028 + if (HAS_PCH_SPLIT(dev)) { 10029 + vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 10030 + outb(inb(VGA_MSR_READ) | VGA_MSR_MEM_EN, VGA_MSR_WRITE); 10031 + vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO | 10032 + VGA_RSRC_LEGACY_MEM | 10033 + VGA_RSRC_NORMAL_IO | 10034 + VGA_RSRC_NORMAL_MEM); 10035 + vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 10036 + } 10037 + } 10038 + 10039 + void i915_disable_vga_mem(struct drm_device *dev) 10040 + { 10041 + /* Disable VGA memory on Intel HD */ 10042 + if (HAS_PCH_SPLIT(dev)) { 10043 + vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 10044 + outb(inb(VGA_MSR_READ) & ~VGA_MSR_MEM_EN, VGA_MSR_WRITE); 10045 + vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO | 10046 + VGA_RSRC_NORMAL_IO | 10047 + VGA_RSRC_NORMAL_MEM); 10048 + vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 10049 + } 10050 + } 10051 + 10052 void intel_modeset_init_hw(struct drm_device *dev) 10053 { 10054 intel_init_power_well(dev); ··· 10300 if (I915_READ(vga_reg) != VGA_DISP_DISABLE) { 10301 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); 10302 i915_disable_vga(dev); 10303 + i915_disable_vga_mem(dev); 10304 } 10305 } 10306 ··· 10512 } 10513 10514 intel_disable_fbc(dev); 10515 + 10516 + i915_enable_vga_mem(dev); 10517 10518 intel_disable_gt_powersave(dev); 10519
+2 -1
drivers/gpu/drm/i915/intel_drv.h
··· 551 struct drm_display_mode *fixed_mode); 552 extern void intel_panel_fini(struct intel_panel *panel); 553 554 - extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, 555 struct drm_display_mode *adjusted_mode); 556 extern void intel_pch_panel_fitting(struct intel_crtc *crtc, 557 struct intel_crtc_config *pipe_config, ··· 792 extern void hsw_pc8_restore_interrupts(struct drm_device *dev); 793 extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv); 794 extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv); 795 796 #endif /* __INTEL_DRV_H__ */
··· 551 struct drm_display_mode *fixed_mode); 552 extern void intel_panel_fini(struct intel_panel *panel); 553 554 + extern void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode, 555 struct drm_display_mode *adjusted_mode); 556 extern void intel_pch_panel_fitting(struct intel_crtc *crtc, 557 struct intel_crtc_config *pipe_config, ··· 792 extern void hsw_pc8_restore_interrupts(struct drm_device *dev); 793 extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv); 794 extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv); 795 + extern void i915_disable_vga_mem(struct drm_device *dev); 796 797 #endif /* __INTEL_DRV_H__ */
+4 -4
drivers/gpu/drm/i915/intel_lvds.c
··· 128 struct drm_device *dev = encoder->base.dev; 129 struct drm_i915_private *dev_priv = dev->dev_private; 130 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 131 - struct drm_display_mode *fixed_mode = 132 - lvds_encoder->attached_connector->base.panel.fixed_mode; 133 int pipe = crtc->pipe; 134 u32 temp; 135 ··· 183 temp &= ~LVDS_ENABLE_DITHER; 184 } 185 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); 186 - if (fixed_mode->flags & DRM_MODE_FLAG_NHSYNC) 187 temp |= LVDS_HSYNC_POLARITY; 188 - if (fixed_mode->flags & DRM_MODE_FLAG_NVSYNC) 189 temp |= LVDS_VSYNC_POLARITY; 190 191 I915_WRITE(lvds_encoder->reg, temp);
··· 128 struct drm_device *dev = encoder->base.dev; 129 struct drm_i915_private *dev_priv = dev->dev_private; 130 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 131 + const struct drm_display_mode *adjusted_mode = 132 + &crtc->config.adjusted_mode; 133 int pipe = crtc->pipe; 134 u32 temp; 135 ··· 183 temp &= ~LVDS_ENABLE_DITHER; 184 } 185 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); 186 + if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) 187 temp |= LVDS_HSYNC_POLARITY; 188 + if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) 189 temp |= LVDS_VSYNC_POLARITY; 190 191 I915_WRITE(lvds_encoder->reg, temp);
+1 -1
drivers/gpu/drm/i915/intel_opregion.c
··· 173 return ASLE_BACKLIGHT_FAILED; 174 175 intel_panel_set_backlight(dev, bclp, 255); 176 - iowrite32((bclp*0x64)/0xff | ASLE_CBLV_VALID, &asle->cblv); 177 178 return 0; 179 }
··· 173 return ASLE_BACKLIGHT_FAILED; 174 175 intel_panel_set_backlight(dev, bclp, 255); 176 + iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv); 177 178 return 0; 179 }
+3 -11
drivers/gpu/drm/i915/intel_panel.c
··· 36 #define PCI_LBPC 0xf4 /* legacy/combination backlight modes */ 37 38 void 39 - intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, 40 struct drm_display_mode *adjusted_mode) 41 { 42 - adjusted_mode->hdisplay = fixed_mode->hdisplay; 43 - adjusted_mode->hsync_start = fixed_mode->hsync_start; 44 - adjusted_mode->hsync_end = fixed_mode->hsync_end; 45 - adjusted_mode->htotal = fixed_mode->htotal; 46 47 - adjusted_mode->vdisplay = fixed_mode->vdisplay; 48 - adjusted_mode->vsync_start = fixed_mode->vsync_start; 49 - adjusted_mode->vsync_end = fixed_mode->vsync_end; 50 - adjusted_mode->vtotal = fixed_mode->vtotal; 51 - 52 - adjusted_mode->clock = fixed_mode->clock; 53 } 54 55 /* adjusted_mode has been preset to be the panel's fixed mode */
··· 36 #define PCI_LBPC 0xf4 /* legacy/combination backlight modes */ 37 38 void 39 + intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode, 40 struct drm_display_mode *adjusted_mode) 41 { 42 + drm_mode_copy(adjusted_mode, fixed_mode); 43 44 + drm_mode_set_crtcinfo(adjusted_mode, 0); 45 } 46 47 /* adjusted_mode has been preset to be the panel's fixed mode */
+11 -3
drivers/gpu/drm/i915/intel_pm.c
··· 3447 static void gen6_enable_rps_interrupts(struct drm_device *dev) 3448 { 3449 struct drm_i915_private *dev_priv = dev->dev_private; 3450 3451 spin_lock_irq(&dev_priv->irq_lock); 3452 WARN_ON(dev_priv->rps.pm_iir); 3453 snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); 3454 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS); 3455 spin_unlock_irq(&dev_priv->irq_lock); 3456 /* only unmask PM interrupts we need. Mask all others. */ 3457 - I915_WRITE(GEN6_PMINTRMSK, ~GEN6_PM_RPS_EVENTS); 3458 } 3459 3460 static void gen6_enable_rps(struct drm_device *dev) ··· 4959 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 4960 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 4961 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 4962 - 4963 - g4x_disable_trickle_feed(dev); 4964 4965 /* WaVSRefCountFullforceMissDisable:hsw */ 4966 gen7_setup_fixed_func_scheduler(dev_priv);
··· 3447 static void gen6_enable_rps_interrupts(struct drm_device *dev) 3448 { 3449 struct drm_i915_private *dev_priv = dev->dev_private; 3450 + u32 enabled_intrs; 3451 3452 spin_lock_irq(&dev_priv->irq_lock); 3453 WARN_ON(dev_priv->rps.pm_iir); 3454 snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); 3455 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS); 3456 spin_unlock_irq(&dev_priv->irq_lock); 3457 + 3458 /* only unmask PM interrupts we need. Mask all others. */ 3459 + enabled_intrs = GEN6_PM_RPS_EVENTS; 3460 + 3461 + /* IVB and SNB hard hangs on looping batchbuffer 3462 + * if GEN6_PM_UP_EI_EXPIRED is masked. 3463 + */ 3464 + if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) 3465 + enabled_intrs |= GEN6_PM_RP_UP_EI_EXPIRED; 3466 + 3467 + I915_WRITE(GEN6_PMINTRMSK, ~enabled_intrs); 3468 } 3469 3470 static void gen6_enable_rps(struct drm_device *dev) ··· 4949 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 4950 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 4951 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 4952 4953 /* WaVSRefCountFullforceMissDisable:hsw */ 4954 gen7_setup_fixed_func_scheduler(dev_priv);
+29 -70
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 33 #include "i915_trace.h" 34 #include "intel_drv.h" 35 36 - /* 37 - * 965+ support PIPE_CONTROL commands, which provide finer grained control 38 - * over cache flushing. 39 - */ 40 - struct pipe_control { 41 - struct drm_i915_gem_object *obj; 42 - volatile u32 *cpu_page; 43 - u32 gtt_offset; 44 - }; 45 - 46 static inline int ring_space(struct intel_ring_buffer *ring) 47 { 48 int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE); ··· 165 static int 166 intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring) 167 { 168 - struct pipe_control *pc = ring->private; 169 - u32 scratch_addr = pc->gtt_offset + 128; 170 int ret; 171 172 ··· 202 u32 invalidate_domains, u32 flush_domains) 203 { 204 u32 flags = 0; 205 - struct pipe_control *pc = ring->private; 206 - u32 scratch_addr = pc->gtt_offset + 128; 207 int ret; 208 209 /* Force SNB workarounds for PIPE_CONTROL flushes */ ··· 294 u32 invalidate_domains, u32 flush_domains) 295 { 296 u32 flags = 0; 297 - struct pipe_control *pc = ring->private; 298 - u32 scratch_addr = pc->gtt_offset + 128; 299 int ret; 300 301 /* ··· 468 static int 469 init_pipe_control(struct intel_ring_buffer *ring) 470 { 471 - struct pipe_control *pc; 472 - struct drm_i915_gem_object *obj; 473 int ret; 474 475 - if (ring->private) 476 return 0; 477 478 - pc = kmalloc(sizeof(*pc), GFP_KERNEL); 479 - if (!pc) 480 - return -ENOMEM; 481 - 482 - obj = i915_gem_alloc_object(ring->dev, 4096); 483 - if (obj == NULL) { 484 DRM_ERROR("Failed to allocate seqno page\n"); 485 ret = -ENOMEM; 486 goto err; 487 } 488 489 - i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 490 491 - ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false); 492 if (ret) 493 goto err_unref; 494 495 - pc->gtt_offset = i915_gem_obj_ggtt_offset(obj); 496 - pc->cpu_page = kmap(sg_page(obj->pages->sgl)); 497 - if (pc->cpu_page == NULL) { 498 ret = -ENOMEM; 499 goto err_unpin; 500 } 501 502 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", 503 - ring->name, pc->gtt_offset); 504 - 505 - pc->obj = obj; 506 - ring->private = pc; 507 return 0; 508 509 err_unpin: 510 - i915_gem_object_unpin(obj); 511 err_unref: 512 - drm_gem_object_unreference(&obj->base); 513 err: 514 - kfree(pc); 515 return ret; 516 - } 517 - 518 - static void 519 - cleanup_pipe_control(struct intel_ring_buffer *ring) 520 - { 521 - struct pipe_control *pc = ring->private; 522 - struct drm_i915_gem_object *obj; 523 - 524 - obj = pc->obj; 525 - 526 - kunmap(sg_page(obj->pages->sgl)); 527 - i915_gem_object_unpin(obj); 528 - drm_gem_object_unreference(&obj->base); 529 - 530 - kfree(pc); 531 } 532 533 static int init_render_ring(struct intel_ring_buffer *ring) ··· 569 { 570 struct drm_device *dev = ring->dev; 571 572 - if (!ring->private) 573 return; 574 575 - if (HAS_BROKEN_CS_TLB(dev)) 576 - drm_gem_object_unreference(to_gem_object(ring->private)); 577 578 - if (INTEL_INFO(dev)->gen >= 5) 579 - cleanup_pipe_control(ring); 580 - 581 - ring->private = NULL; 582 } 583 584 static void ··· 704 static int 705 pc_render_add_request(struct intel_ring_buffer *ring) 706 { 707 - struct pipe_control *pc = ring->private; 708 - u32 scratch_addr = pc->gtt_offset + 128; 709 int ret; 710 711 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently ··· 722 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | 723 PIPE_CONTROL_WRITE_FLUSH | 724 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); 725 - intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 726 intel_ring_emit(ring, ring->outstanding_lazy_request); 727 intel_ring_emit(ring, 0); 728 PIPE_CONTROL_FLUSH(ring, scratch_addr); ··· 741 PIPE_CONTROL_WRITE_FLUSH | 742 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | 743 PIPE_CONTROL_NOTIFY); 744 - intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 745 intel_ring_emit(ring, ring->outstanding_lazy_request); 746 intel_ring_emit(ring, 0); 747 intel_ring_advance(ring); ··· 775 static u32 776 pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) 777 { 778 - struct pipe_control *pc = ring->private; 779 - return pc->cpu_page[0]; 780 } 781 782 static void 783 pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno) 784 { 785 - struct pipe_control *pc = ring->private; 786 - pc->cpu_page[0] = seqno; 787 } 788 789 static bool ··· 1100 intel_ring_emit(ring, MI_NOOP); 1101 intel_ring_advance(ring); 1102 } else { 1103 - struct drm_i915_gem_object *obj = ring->private; 1104 - u32 cs_offset = i915_gem_obj_ggtt_offset(obj); 1105 1106 if (len > I830_BATCH_LIMIT) 1107 return -ENOSPC; ··· 1793 return ret; 1794 } 1795 1796 - ring->private = obj; 1797 } 1798 1799 return intel_init_ring_buffer(dev, ring);
··· 33 #include "i915_trace.h" 34 #include "intel_drv.h" 35 36 static inline int ring_space(struct intel_ring_buffer *ring) 37 { 38 int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE); ··· 175 static int 176 intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring) 177 { 178 + u32 scratch_addr = ring->scratch.gtt_offset + 128; 179 int ret; 180 181 ··· 213 u32 invalidate_domains, u32 flush_domains) 214 { 215 u32 flags = 0; 216 + u32 scratch_addr = ring->scratch.gtt_offset + 128; 217 int ret; 218 219 /* Force SNB workarounds for PIPE_CONTROL flushes */ ··· 306 u32 invalidate_domains, u32 flush_domains) 307 { 308 u32 flags = 0; 309 + u32 scratch_addr = ring->scratch.gtt_offset + 128; 310 int ret; 311 312 /* ··· 481 static int 482 init_pipe_control(struct intel_ring_buffer *ring) 483 { 484 int ret; 485 486 + if (ring->scratch.obj) 487 return 0; 488 489 + ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096); 490 + if (ring->scratch.obj == NULL) { 491 DRM_ERROR("Failed to allocate seqno page\n"); 492 ret = -ENOMEM; 493 goto err; 494 } 495 496 + i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC); 497 498 + ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false); 499 if (ret) 500 goto err_unref; 501 502 + ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj); 503 + ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl)); 504 + if (ring->scratch.cpu_page == NULL) { 505 ret = -ENOMEM; 506 goto err_unpin; 507 } 508 509 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", 510 + ring->name, ring->scratch.gtt_offset); 511 return 0; 512 513 err_unpin: 514 + i915_gem_object_unpin(ring->scratch.obj); 515 err_unref: 516 + drm_gem_object_unreference(&ring->scratch.obj->base); 517 err: 518 return ret; 519 } 520 521 static int init_render_ring(struct intel_ring_buffer *ring) ··· 607 { 608 struct drm_device *dev = ring->dev; 609 610 + if (ring->scratch.obj == NULL) 611 return; 612 613 + if (INTEL_INFO(dev)->gen >= 5) { 614 + kunmap(sg_page(ring->scratch.obj->pages->sgl)); 615 + i915_gem_object_unpin(ring->scratch.obj); 616 + } 617 618 + drm_gem_object_unreference(&ring->scratch.obj->base); 619 + ring->scratch.obj = NULL; 620 } 621 622 static void ··· 742 static int 743 pc_render_add_request(struct intel_ring_buffer *ring) 744 { 745 + u32 scratch_addr = ring->scratch.gtt_offset + 128; 746 int ret; 747 748 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently ··· 761 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | 762 PIPE_CONTROL_WRITE_FLUSH | 763 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); 764 + intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 765 intel_ring_emit(ring, ring->outstanding_lazy_request); 766 intel_ring_emit(ring, 0); 767 PIPE_CONTROL_FLUSH(ring, scratch_addr); ··· 780 PIPE_CONTROL_WRITE_FLUSH | 781 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | 782 PIPE_CONTROL_NOTIFY); 783 + intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 784 intel_ring_emit(ring, ring->outstanding_lazy_request); 785 intel_ring_emit(ring, 0); 786 intel_ring_advance(ring); ··· 814 static u32 815 pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) 816 { 817 + return ring->scratch.cpu_page[0]; 818 } 819 820 static void 821 pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno) 822 { 823 + ring->scratch.cpu_page[0] = seqno; 824 } 825 826 static bool ··· 1141 intel_ring_emit(ring, MI_NOOP); 1142 intel_ring_advance(ring); 1143 } else { 1144 + u32 cs_offset = ring->scratch.gtt_offset; 1145 1146 if (len > I830_BATCH_LIMIT) 1147 return -ENOSPC; ··· 1835 return ret; 1836 } 1837 1838 + ring->scratch.obj = obj; 1839 + ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj); 1840 } 1841 1842 return intel_init_ring_buffer(dev, ring);
+5 -1
drivers/gpu/drm/i915/intel_ringbuffer.h
··· 155 156 struct intel_ring_hangcheck hangcheck; 157 158 - void *private; 159 }; 160 161 static inline bool
··· 155 156 struct intel_ring_hangcheck hangcheck; 157 158 + struct { 159 + struct drm_i915_gem_object *obj; 160 + u32 gtt_offset; 161 + volatile u32 *cpu_page; 162 + } scratch; 163 }; 164 165 static inline bool
+9 -8
drivers/gpu/drm/i915/intel_sdvo.c
··· 1151 { 1152 struct drm_device *dev = intel_encoder->base.dev; 1153 struct drm_i915_private *dev_priv = dev->dev_private; 1154 - struct drm_crtc *crtc = intel_encoder->base.crtc; 1155 - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1156 struct drm_display_mode *adjusted_mode = 1157 - &intel_crtc->config.adjusted_mode; 1158 - struct drm_display_mode *mode = &intel_crtc->config.requested_mode; 1159 struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder); 1160 u32 sdvox; 1161 struct intel_sdvo_in_out_map in_out; ··· 1212 * adjusted_mode. 1213 */ 1214 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); 1215 if (intel_sdvo->is_tv || intel_sdvo->is_lvds) 1216 input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags; 1217 if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd)) 1218 DRM_INFO("Setting input timings on %s failed\n", 1219 SDVO_NAME(intel_sdvo)); 1220 1221 - switch (intel_crtc->config.pixel_multiplier) { 1222 default: 1223 WARN(1, "unknown pixel mutlipler specified\n"); 1224 case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break; ··· 1253 } 1254 1255 if (INTEL_PCH_TYPE(dev) >= PCH_CPT) 1256 - sdvox |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe); 1257 else 1258 - sdvox |= SDVO_PIPE_SEL(intel_crtc->pipe); 1259 1260 if (intel_sdvo->has_hdmi_audio) 1261 sdvox |= SDVO_AUDIO_ENABLE; ··· 1265 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { 1266 /* done in crtc_mode_set as it lives inside the dpll register */ 1267 } else { 1268 - sdvox |= (intel_crtc->config.pixel_multiplier - 1) 1269 << SDVO_PORT_MULTIPLY_SHIFT; 1270 } 1271
··· 1151 { 1152 struct drm_device *dev = intel_encoder->base.dev; 1153 struct drm_i915_private *dev_priv = dev->dev_private; 1154 + struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc); 1155 struct drm_display_mode *adjusted_mode = 1156 + &crtc->config.adjusted_mode; 1157 + struct drm_display_mode *mode = &crtc->config.requested_mode; 1158 struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder); 1159 u32 sdvox; 1160 struct intel_sdvo_in_out_map in_out; ··· 1213 * adjusted_mode. 1214 */ 1215 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); 1216 + input_dtd.part1.clock /= crtc->config.pixel_multiplier; 1217 + 1218 if (intel_sdvo->is_tv || intel_sdvo->is_lvds) 1219 input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags; 1220 if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd)) 1221 DRM_INFO("Setting input timings on %s failed\n", 1222 SDVO_NAME(intel_sdvo)); 1223 1224 + switch (crtc->config.pixel_multiplier) { 1225 default: 1226 WARN(1, "unknown pixel mutlipler specified\n"); 1227 case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break; ··· 1252 } 1253 1254 if (INTEL_PCH_TYPE(dev) >= PCH_CPT) 1255 + sdvox |= SDVO_PIPE_SEL_CPT(crtc->pipe); 1256 else 1257 + sdvox |= SDVO_PIPE_SEL(crtc->pipe); 1258 1259 if (intel_sdvo->has_hdmi_audio) 1260 sdvox |= SDVO_AUDIO_ENABLE; ··· 1264 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { 1265 /* done in crtc_mode_set as it lives inside the dpll register */ 1266 } else { 1267 + sdvox |= (crtc->config.pixel_multiplier - 1) 1268 << SDVO_PORT_MULTIPLY_SHIFT; 1269 } 1270
+5 -2
drivers/gpu/drm/i915/intel_sprite.c
··· 260 if (obj->tiling_mode != I915_TILING_NONE) 261 sprctl |= SPRITE_TILED; 262 263 - /* must disable */ 264 - sprctl |= SPRITE_TRICKLE_FEED_DISABLE; 265 sprctl |= SPRITE_ENABLE; 266 267 if (IS_HASWELL(dev))
··· 260 if (obj->tiling_mode != I915_TILING_NONE) 261 sprctl |= SPRITE_TILED; 262 263 + if (IS_HASWELL(dev)) 264 + sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE; 265 + else 266 + sprctl |= SPRITE_TRICKLE_FEED_DISABLE; 267 + 268 sprctl |= SPRITE_ENABLE; 269 270 if (IS_HASWELL(dev))
+8 -1
drivers/gpu/drm/i915/intel_uncore.c
··· 261 } 262 } 263 264 - void intel_uncore_sanitize(struct drm_device *dev) 265 { 266 struct drm_i915_private *dev_priv = dev->dev_private; 267 ··· 272 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 273 __gen6_gt_force_wake_mt_reset(dev_priv); 274 } 275 276 /* BIOS often leaves RC6 enabled, but disable it for hw init */ 277 intel_disable_gt_powersave(dev); ··· 553 554 /* Spin waiting for the device to ack the reset request */ 555 ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); 556 557 /* If reset with a user forcewake, try to restore, otherwise turn it off */ 558 if (dev_priv->uncore.forcewake_count)
··· 261 } 262 } 263 264 + static void intel_uncore_forcewake_reset(struct drm_device *dev) 265 { 266 struct drm_i915_private *dev_priv = dev->dev_private; 267 ··· 272 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 273 __gen6_gt_force_wake_mt_reset(dev_priv); 274 } 275 + } 276 + 277 + void intel_uncore_sanitize(struct drm_device *dev) 278 + { 279 + intel_uncore_forcewake_reset(dev); 280 281 /* BIOS often leaves RC6 enabled, but disable it for hw init */ 282 intel_disable_gt_powersave(dev); ··· 548 549 /* Spin waiting for the device to ack the reset request */ 550 ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); 551 + 552 + intel_uncore_forcewake_reset(dev); 553 554 /* If reset with a user forcewake, try to restore, otherwise turn it off */ 555 if (dev_priv->uncore.forcewake_count)
+22 -29
drivers/gpu/vga/vgaarb.c
··· 257 if (!conflict->bridge_has_one_vga) { 258 vga_irq_set_state(conflict, false); 259 flags |= PCI_VGA_STATE_CHANGE_DECODES; 260 - if (lwants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM)) 261 pci_bits |= PCI_COMMAND_MEMORY; 262 - if (lwants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO)) 263 pci_bits |= PCI_COMMAND_IO; 264 } 265 ··· 267 flags |= PCI_VGA_STATE_CHANGE_BRIDGE; 268 269 pci_set_vga_state(conflict->pdev, false, pci_bits, flags); 270 - conflict->owns &= ~lwants; 271 /* If he also owned non-legacy, that is no longer the case */ 272 - if (lwants & VGA_RSRC_LEGACY_MEM) 273 conflict->owns &= ~VGA_RSRC_NORMAL_MEM; 274 - if (lwants & VGA_RSRC_LEGACY_IO) 275 conflict->owns &= ~VGA_RSRC_NORMAL_IO; 276 } 277 ··· 644 static inline void vga_update_device_decodes(struct vga_device *vgadev, 645 int new_decodes) 646 { 647 - int old_decodes; 648 - struct vga_device *new_vgadev, *conflict; 649 650 old_decodes = vgadev->decodes; 651 vgadev->decodes = new_decodes; 652 653 pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n", ··· 658 vga_iostate_to_str(vgadev->decodes), 659 vga_iostate_to_str(vgadev->owns)); 660 661 - 662 - /* if we own the decodes we should move them along to 663 - another card */ 664 - if ((vgadev->owns & old_decodes) && (vga_count > 1)) { 665 - /* set us to own nothing */ 666 - vgadev->owns &= ~old_decodes; 667 - list_for_each_entry(new_vgadev, &vga_list, list) { 668 - if ((new_vgadev != vgadev) && 669 - (new_vgadev->decodes & VGA_RSRC_LEGACY_MASK)) { 670 - pr_info("vgaarb: transferring owner from PCI:%s to PCI:%s\n", pci_name(vgadev->pdev), pci_name(new_vgadev->pdev)); 671 - conflict = __vga_tryget(new_vgadev, VGA_RSRC_LEGACY_MASK); 672 - if (!conflict) 673 - __vga_put(new_vgadev, VGA_RSRC_LEGACY_MASK); 674 - break; 675 - } 676 - } 677 } 678 679 /* change decodes counter */ 680 - if (old_decodes != new_decodes) { 681 - if (new_decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM)) 682 - vga_decode_count++; 683 - else 684 - vga_decode_count--; 685 - } 686 pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count); 687 } 688
··· 257 if (!conflict->bridge_has_one_vga) { 258 vga_irq_set_state(conflict, false); 259 flags |= PCI_VGA_STATE_CHANGE_DECODES; 260 + if (match & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM)) 261 pci_bits |= PCI_COMMAND_MEMORY; 262 + if (match & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO)) 263 pci_bits |= PCI_COMMAND_IO; 264 } 265 ··· 267 flags |= PCI_VGA_STATE_CHANGE_BRIDGE; 268 269 pci_set_vga_state(conflict->pdev, false, pci_bits, flags); 270 + conflict->owns &= ~match; 271 /* If he also owned non-legacy, that is no longer the case */ 272 + if (match & VGA_RSRC_LEGACY_MEM) 273 conflict->owns &= ~VGA_RSRC_NORMAL_MEM; 274 + if (match & VGA_RSRC_LEGACY_IO) 275 conflict->owns &= ~VGA_RSRC_NORMAL_IO; 276 } 277 ··· 644 static inline void vga_update_device_decodes(struct vga_device *vgadev, 645 int new_decodes) 646 { 647 + int old_decodes, decodes_removed, decodes_unlocked; 648 649 old_decodes = vgadev->decodes; 650 + decodes_removed = ~new_decodes & old_decodes; 651 + decodes_unlocked = vgadev->locks & decodes_removed; 652 + vgadev->owns &= ~decodes_removed; 653 vgadev->decodes = new_decodes; 654 655 pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n", ··· 656 vga_iostate_to_str(vgadev->decodes), 657 vga_iostate_to_str(vgadev->owns)); 658 659 + /* if we removed locked decodes, lock count goes to zero, and release */ 660 + if (decodes_unlocked) { 661 + if (decodes_unlocked & VGA_RSRC_LEGACY_IO) 662 + vgadev->io_lock_cnt = 0; 663 + if (decodes_unlocked & VGA_RSRC_LEGACY_MEM) 664 + vgadev->mem_lock_cnt = 0; 665 + __vga_put(vgadev, decodes_unlocked); 666 } 667 668 /* change decodes counter */ 669 + if (old_decodes & VGA_RSRC_LEGACY_MASK && 670 + !(new_decodes & VGA_RSRC_LEGACY_MASK)) 671 + vga_decode_count--; 672 + if (!(old_decodes & VGA_RSRC_LEGACY_MASK) && 673 + new_decodes & VGA_RSRC_LEGACY_MASK) 674 + vga_decode_count++; 675 pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count); 676 } 677
+34
include/drm/i915_drm.h
··· 26 #ifndef _I915_DRM_H_ 27 #define _I915_DRM_H_ 28 29 #include <uapi/drm/i915_drm.h> 30 31 /* For use by IPS driver */ ··· 35 extern bool i915_gpu_lower(void); 36 extern bool i915_gpu_busy(void); 37 extern bool i915_gpu_turbo_disable(void); 38 #endif /* _I915_DRM_H_ */
··· 26 #ifndef _I915_DRM_H_ 27 #define _I915_DRM_H_ 28 29 + #include <drm/i915_pciids.h> 30 #include <uapi/drm/i915_drm.h> 31 32 /* For use by IPS driver */ ··· 34 extern bool i915_gpu_lower(void); 35 extern bool i915_gpu_busy(void); 36 extern bool i915_gpu_turbo_disable(void); 37 + 38 + /* 39 + * The Bridge device's PCI config space has information about the 40 + * fb aperture size and the amount of pre-reserved memory. 41 + * This is all handled in the intel-gtt.ko module. i915.ko only 42 + * cares about the vga bit for the vga rbiter. 43 + */ 44 + #define INTEL_GMCH_CTRL 0x52 45 + #define INTEL_GMCH_VGA_DISABLE (1 << 1) 46 + #define SNB_GMCH_CTRL 0x50 47 + #define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */ 48 + #define SNB_GMCH_GGMS_MASK 0x3 49 + #define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */ 50 + #define SNB_GMCH_GMS_MASK 0x1f 51 + 52 + #define I830_GMCH_CTRL 0x52 53 + 54 + #define I855_GMCH_GMS_MASK 0xF0 55 + #define I855_GMCH_GMS_STOLEN_0M 0x0 56 + #define I855_GMCH_GMS_STOLEN_1M (0x1 << 4) 57 + #define I855_GMCH_GMS_STOLEN_4M (0x2 << 4) 58 + #define I855_GMCH_GMS_STOLEN_8M (0x3 << 4) 59 + #define I855_GMCH_GMS_STOLEN_16M (0x4 << 4) 60 + #define I855_GMCH_GMS_STOLEN_32M (0x5 << 4) 61 + #define I915_GMCH_GMS_STOLEN_48M (0x6 << 4) 62 + #define I915_GMCH_GMS_STOLEN_64M (0x7 << 4) 63 + #define G33_GMCH_GMS_STOLEN_128M (0x8 << 4) 64 + #define G33_GMCH_GMS_STOLEN_256M (0x9 << 4) 65 + #define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4) 66 + #define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4) 67 + #define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4) 68 + #define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) 69 + 70 #endif /* _I915_DRM_H_ */
+211
include/drm/i915_pciids.h
···
··· 1 + /* 2 + * Copyright 2013 Intel Corporation 3 + * All Rights Reserved. 4 + * 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the 7 + * "Software"), to deal in the Software without restriction, including 8 + * without limitation the rights to use, copy, modify, merge, publish, 9 + * distribute, sub license, and/or sell copies of the Software, and to 10 + * permit persons to whom the Software is furnished to do so, subject to 11 + * the following conditions: 12 + * 13 + * The above copyright notice and this permission notice (including the 14 + * next paragraph) shall be included in all copies or substantial portions 15 + * of the Software. 16 + * 17 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 22 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 + * DEALINGS IN THE SOFTWARE. 24 + */ 25 + #ifndef _I915_PCIIDS_H 26 + #define _I915_PCIIDS_H 27 + 28 + /* 29 + * A pci_device_id struct { 30 + * __u32 vendor, device; 31 + * __u32 subvendor, subdevice; 32 + * __u32 class, class_mask; 33 + * kernel_ulong_t driver_data; 34 + * }; 35 + * Don't use C99 here because "class" is reserved and we want to 36 + * give userspace flexibility. 37 + */ 38 + #define INTEL_VGA_DEVICE(id, info) { \ 39 + 0x8086, id, \ 40 + ~0, ~0, \ 41 + 0x030000, 0xff0000, \ 42 + (unsigned long) info } 43 + 44 + #define INTEL_QUANTA_VGA_DEVICE(info) { \ 45 + 0x8086, 0x16a, \ 46 + 0x152d, 0x8990, \ 47 + 0x030000, 0xff0000, \ 48 + (unsigned long) info } 49 + 50 + #define INTEL_I830_IDS(info) \ 51 + INTEL_VGA_DEVICE(0x3577, info) 52 + 53 + #define INTEL_I845G_IDS(info) \ 54 + INTEL_VGA_DEVICE(0x2562, info) 55 + 56 + #define INTEL_I85X_IDS(info) \ 57 + INTEL_VGA_DEVICE(0x3582, info), /* I855_GM */ \ 58 + INTEL_VGA_DEVICE(0x358e, info) 59 + 60 + #define INTEL_I865G_IDS(info) \ 61 + INTEL_VGA_DEVICE(0x2572, info) /* I865_G */ 62 + 63 + #define INTEL_I915G_IDS(info) \ 64 + INTEL_VGA_DEVICE(0x2582, info), /* I915_G */ \ 65 + INTEL_VGA_DEVICE(0x258a, info) /* E7221_G */ 66 + 67 + #define INTEL_I915GM_IDS(info) \ 68 + INTEL_VGA_DEVICE(0x2592, info) /* I915_GM */ 69 + 70 + #define INTEL_I945G_IDS(info) \ 71 + INTEL_VGA_DEVICE(0x2772, info) /* I945_G */ 72 + 73 + #define INTEL_I945GM_IDS(info) \ 74 + INTEL_VGA_DEVICE(0x27a2, info), /* I945_GM */ \ 75 + INTEL_VGA_DEVICE(0x27ae, info) /* I945_GME */ 76 + 77 + #define INTEL_I965G_IDS(info) \ 78 + INTEL_VGA_DEVICE(0x2972, info), /* I946_GZ */ \ 79 + INTEL_VGA_DEVICE(0x2982, info), /* G35_G */ \ 80 + INTEL_VGA_DEVICE(0x2992, info), /* I965_Q */ \ 81 + INTEL_VGA_DEVICE(0x29a2, info) /* I965_G */ 82 + 83 + #define INTEL_G33_IDS(info) \ 84 + INTEL_VGA_DEVICE(0x29b2, info), /* Q35_G */ \ 85 + INTEL_VGA_DEVICE(0x29c2, info), /* G33_G */ \ 86 + INTEL_VGA_DEVICE(0x29d2, info) /* Q33_G */ 87 + 88 + #define INTEL_I965GM_IDS(info) \ 89 + INTEL_VGA_DEVICE(0x2a02, info), /* I965_GM */ \ 90 + INTEL_VGA_DEVICE(0x2a12, info) /* I965_GME */ 91 + 92 + #define INTEL_GM45_IDS(info) \ 93 + INTEL_VGA_DEVICE(0x2a42, info) /* GM45_G */ 94 + 95 + #define INTEL_G45_IDS(info) \ 96 + INTEL_VGA_DEVICE(0x2e02, info), /* IGD_E_G */ \ 97 + INTEL_VGA_DEVICE(0x2e12, info), /* Q45_G */ \ 98 + INTEL_VGA_DEVICE(0x2e22, info), /* G45_G */ \ 99 + INTEL_VGA_DEVICE(0x2e32, info), /* G41_G */ \ 100 + INTEL_VGA_DEVICE(0x2e42, info), /* B43_G */ \ 101 + INTEL_VGA_DEVICE(0x2e92, info) /* B43_G.1 */ 102 + 103 + #define INTEL_PINEVIEW_IDS(info) \ 104 + INTEL_VGA_DEVICE(0xa001, info), \ 105 + INTEL_VGA_DEVICE(0xa011, info) 106 + 107 + #define INTEL_IRONLAKE_D_IDS(info) \ 108 + INTEL_VGA_DEVICE(0x0042, info) 109 + 110 + #define INTEL_IRONLAKE_M_IDS(info) \ 111 + INTEL_VGA_DEVICE(0x0046, info) 112 + 113 + #define INTEL_SNB_D_IDS(info) \ 114 + INTEL_VGA_DEVICE(0x0102, info), \ 115 + INTEL_VGA_DEVICE(0x0112, info), \ 116 + INTEL_VGA_DEVICE(0x0122, info), \ 117 + INTEL_VGA_DEVICE(0x010A, info) 118 + 119 + #define INTEL_SNB_M_IDS(info) \ 120 + INTEL_VGA_DEVICE(0x0106, info), \ 121 + INTEL_VGA_DEVICE(0x0116, info), \ 122 + INTEL_VGA_DEVICE(0x0126, info) 123 + 124 + #define INTEL_IVB_M_IDS(info) \ 125 + INTEL_VGA_DEVICE(0x0156, info), /* GT1 mobile */ \ 126 + INTEL_VGA_DEVICE(0x0166, info) /* GT2 mobile */ 127 + 128 + #define INTEL_IVB_D_IDS(info) \ 129 + INTEL_VGA_DEVICE(0x0152, info), /* GT1 desktop */ \ 130 + INTEL_VGA_DEVICE(0x0162, info), /* GT2 desktop */ \ 131 + INTEL_VGA_DEVICE(0x015a, info), /* GT1 server */ \ 132 + INTEL_VGA_DEVICE(0x016a, info) /* GT2 server */ 133 + 134 + #define INTEL_IVB_Q_IDS(info) \ 135 + INTEL_QUANTA_VGA_DEVICE(info) /* Quanta transcode */ 136 + 137 + #define INTEL_HSW_D_IDS(info) \ 138 + INTEL_VGA_DEVICE(0x0402, info), /* GT1 desktop */ \ 139 + INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \ 140 + INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \ 141 + INTEL_VGA_DEVICE(0x040a, info), /* GT1 server */ \ 142 + INTEL_VGA_DEVICE(0x041a, info), /* GT2 server */ \ 143 + INTEL_VGA_DEVICE(0x042a, info), /* GT3 server */ \ 144 + INTEL_VGA_DEVICE(0x040B, info), /* GT1 reserved */ \ 145 + INTEL_VGA_DEVICE(0x041B, info), /* GT2 reserved */ \ 146 + INTEL_VGA_DEVICE(0x042B, info), /* GT3 reserved */ \ 147 + INTEL_VGA_DEVICE(0x040E, info), /* GT1 reserved */ \ 148 + INTEL_VGA_DEVICE(0x041E, info), /* GT2 reserved */ \ 149 + INTEL_VGA_DEVICE(0x042E, info), /* GT3 reserved */ \ 150 + INTEL_VGA_DEVICE(0x0C02, info), /* SDV GT1 desktop */ \ 151 + INTEL_VGA_DEVICE(0x0C12, info), /* SDV GT2 desktop */ \ 152 + INTEL_VGA_DEVICE(0x0C22, info), /* SDV GT3 desktop */ \ 153 + INTEL_VGA_DEVICE(0x0C0A, info), /* SDV GT1 server */ \ 154 + INTEL_VGA_DEVICE(0x0C1A, info), /* SDV GT2 server */ \ 155 + INTEL_VGA_DEVICE(0x0C2A, info), /* SDV GT3 server */ \ 156 + INTEL_VGA_DEVICE(0x0C0B, info), /* SDV GT1 reserved */ \ 157 + INTEL_VGA_DEVICE(0x0C1B, info), /* SDV GT2 reserved */ \ 158 + INTEL_VGA_DEVICE(0x0C2B, info), /* SDV GT3 reserved */ \ 159 + INTEL_VGA_DEVICE(0x0C0E, info), /* SDV GT1 reserved */ \ 160 + INTEL_VGA_DEVICE(0x0C1E, info), /* SDV GT2 reserved */ \ 161 + INTEL_VGA_DEVICE(0x0C2E, info), /* SDV GT3 reserved */ \ 162 + INTEL_VGA_DEVICE(0x0A02, info), /* ULT GT1 desktop */ \ 163 + INTEL_VGA_DEVICE(0x0A12, info), /* ULT GT2 desktop */ \ 164 + INTEL_VGA_DEVICE(0x0A22, info), /* ULT GT3 desktop */ \ 165 + INTEL_VGA_DEVICE(0x0A0A, info), /* ULT GT1 server */ \ 166 + INTEL_VGA_DEVICE(0x0A1A, info), /* ULT GT2 server */ \ 167 + INTEL_VGA_DEVICE(0x0A2A, info), /* ULT GT3 server */ \ 168 + INTEL_VGA_DEVICE(0x0A0B, info), /* ULT GT1 reserved */ \ 169 + INTEL_VGA_DEVICE(0x0A1B, info), /* ULT GT2 reserved */ \ 170 + INTEL_VGA_DEVICE(0x0A2B, info), /* ULT GT3 reserved */ \ 171 + INTEL_VGA_DEVICE(0x0D02, info), /* CRW GT1 desktop */ \ 172 + INTEL_VGA_DEVICE(0x0D12, info), /* CRW GT2 desktop */ \ 173 + INTEL_VGA_DEVICE(0x0D22, info), /* CRW GT3 desktop */ \ 174 + INTEL_VGA_DEVICE(0x0D0A, info), /* CRW GT1 server */ \ 175 + INTEL_VGA_DEVICE(0x0D1A, info), /* CRW GT2 server */ \ 176 + INTEL_VGA_DEVICE(0x0D2A, info), /* CRW GT3 server */ \ 177 + INTEL_VGA_DEVICE(0x0D0B, info), /* CRW GT1 reserved */ \ 178 + INTEL_VGA_DEVICE(0x0D1B, info), /* CRW GT2 reserved */ \ 179 + INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \ 180 + INTEL_VGA_DEVICE(0x0D0E, info), /* CRW GT1 reserved */ \ 181 + INTEL_VGA_DEVICE(0x0D1E, info), /* CRW GT2 reserved */ \ 182 + INTEL_VGA_DEVICE(0x0D2E, info) /* CRW GT3 reserved */ \ 183 + 184 + #define INTEL_HSW_M_IDS(info) \ 185 + INTEL_VGA_DEVICE(0x0406, info), /* GT1 mobile */ \ 186 + INTEL_VGA_DEVICE(0x0416, info), /* GT2 mobile */ \ 187 + INTEL_VGA_DEVICE(0x0426, info), /* GT2 mobile */ \ 188 + INTEL_VGA_DEVICE(0x0C06, info), /* SDV GT1 mobile */ \ 189 + INTEL_VGA_DEVICE(0x0C16, info), /* SDV GT2 mobile */ \ 190 + INTEL_VGA_DEVICE(0x0C26, info), /* SDV GT3 mobile */ \ 191 + INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \ 192 + INTEL_VGA_DEVICE(0x0A16, info), /* ULT GT2 mobile */ \ 193 + INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \ 194 + INTEL_VGA_DEVICE(0x0A0E, info), /* ULT GT1 reserved */ \ 195 + INTEL_VGA_DEVICE(0x0A1E, info), /* ULT GT2 reserved */ \ 196 + INTEL_VGA_DEVICE(0x0A2E, info), /* ULT GT3 reserved */ \ 197 + INTEL_VGA_DEVICE(0x0D06, info), /* CRW GT1 mobile */ \ 198 + INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \ 199 + INTEL_VGA_DEVICE(0x0D26, info) /* CRW GT3 mobile */ 200 + 201 + #define INTEL_VLV_M_IDS(info) \ 202 + INTEL_VGA_DEVICE(0x0f30, info), \ 203 + INTEL_VGA_DEVICE(0x0f31, info), \ 204 + INTEL_VGA_DEVICE(0x0f32, info), \ 205 + INTEL_VGA_DEVICE(0x0f33, info), \ 206 + INTEL_VGA_DEVICE(0x0157, info) 207 + 208 + #define INTEL_VLV_D_IDS(info) \ 209 + INTEL_VGA_DEVICE(0x0155, info) 210 + 211 + #endif /* _I915_PCIIDS_H */
+7
include/linux/vgaarb.h
··· 65 * out of the arbitration process (and can be safe to take 66 * interrupts at any time. 67 */ 68 extern void vga_set_legacy_decoding(struct pci_dev *pdev, 69 unsigned int decodes); 70 71 /** 72 * vga_get - acquire & locks VGA resources
··· 65 * out of the arbitration process (and can be safe to take 66 * interrupts at any time. 67 */ 68 + #if defined(CONFIG_VGA_ARB) 69 extern void vga_set_legacy_decoding(struct pci_dev *pdev, 70 unsigned int decodes); 71 + #else 72 + static inline void vga_set_legacy_decoding(struct pci_dev *pdev, 73 + unsigned int decodes) 74 + { 75 + } 76 + #endif 77 78 /** 79 * vga_get - acquire & locks VGA resources