Merge tag 'drm-intel-fixes-2013-09-06' of git://people.freedesktop.org/~danvet/drm-intel into drm-fixes

- Early stolen mem reservation from Jesse in x86 boot code. Acked by Ingo
and hpa. This was ready much earlier but somehow I've thought it'd go
in through x86 trees, hence why this is late. Avoids the pci resource
code to plant mmiobars in the middle of stolen mem and other ugliness.
- vgaarb improvements from Alex Williamson plus the fix from Ville for the
vgacon->fbcon smooth transition "feature".
- Render pageflips on ivb/hsw to avoid stalls due to the ring switching
when only flipping on the blitter (Chris).
- Deadlock fixes around our flush_workqueue which crept back in - lockdep
isn't clever enough :(
- Shrinker recursion fix from Chris - this is the thing that blew the vma
patches from Ben I've taken out of 3.12.
- Fixup for the relocation refactoring. Also an igt testcase to make sure
we don't break this again.
- Pile of smaller fixups all over, shortlog has full details.

* tag 'drm-intel-fixes-2013-09-06' of git://people.freedesktop.org/~danvet/drm-intel: (29 commits)
drm/i915: Delay disabling of VGA memory until vgacon->fbcon handoff is done
drm/i915: try not to lose backlight CBLV precision
drm/i915: Confine page flips to BCS on Valleyview
drm/i915: Skip stolen region initialisation if none is reserved
drm/i915: fix gpu hang vs. flip stall deadlocks
drm/i915: Hold an object reference whilst we shrink it
drm/i915: fix i9xx_crtc_clock_get for multiplied pixels
drm/i915: handle sdvo input pixel multiplier correctly again
drm/i915: fix hpd work vs. flush_work in the pageflip code deadlock
drm/i915: fix up the relocate_entry refactoring
drm/i915: Fix pipe config warnings when dealing with LVDS fixed mode
drm/i915: Don't call sg_free_table() if sg_alloc_table() fails
i915: Update VGA arbiter support for newer devices
vgaarb: Fix VGA decodes changes
vgaarb: Don't disable resources that are not owned
drm/i915: Pin pages whilst mapping the dma-buf
drm/i915: enable trickle feed on Haswell
x86: add early quirk for reserving Intel graphics stolen memory v5
drm/i915: split PCI IDs out into i915_drm.h v4
i915_gem: Convert kmem_cache_alloc(...GFP_ZERO) to kmem_cache_zalloc
...

+774 -334
+154
arch/x86/kernel/early-quirks.c
··· 12 12 #include <linux/pci.h> 13 13 #include <linux/acpi.h> 14 14 #include <linux/pci_ids.h> 15 + #include <drm/i915_drm.h> 15 16 #include <asm/pci-direct.h> 16 17 #include <asm/dma.h> 17 18 #include <asm/io_apic.h> ··· 217 216 218 217 } 219 218 219 + /* 220 + * Systems with Intel graphics controllers set aside memory exclusively 221 + * for gfx driver use. This memory is not marked in the E820 as reserved 222 + * or as RAM, and so is subject to overlap from E820 manipulation later 223 + * in the boot process. On some systems, MMIO space is allocated on top, 224 + * despite the efforts of the "RAM buffer" approach, which simply rounds 225 + * memory boundaries up to 64M to try to catch space that may decode 226 + * as RAM and so is not suitable for MMIO. 227 + * 228 + * And yes, so far on current devices the base addr is always under 4G. 229 + */ 230 + static u32 __init intel_stolen_base(int num, int slot, int func) 231 + { 232 + u32 base; 233 + 234 + /* 235 + * For the PCI IDs in this quirk, the stolen base is always 236 + * in 0x5c, aka the BDSM register (yes that's really what 237 + * it's called). 238 + */ 239 + base = read_pci_config(num, slot, func, 0x5c); 240 + base &= ~((1<<20) - 1); 241 + 242 + return base; 243 + } 244 + 245 + #define KB(x) ((x) * 1024) 246 + #define MB(x) (KB (KB (x))) 247 + #define GB(x) (MB (KB (x))) 248 + 249 + static size_t __init gen3_stolen_size(int num, int slot, int func) 250 + { 251 + size_t stolen_size; 252 + u16 gmch_ctrl; 253 + 254 + gmch_ctrl = read_pci_config_16(0, 0, 0, I830_GMCH_CTRL); 255 + 256 + switch (gmch_ctrl & I855_GMCH_GMS_MASK) { 257 + case I855_GMCH_GMS_STOLEN_1M: 258 + stolen_size = MB(1); 259 + break; 260 + case I855_GMCH_GMS_STOLEN_4M: 261 + stolen_size = MB(4); 262 + break; 263 + case I855_GMCH_GMS_STOLEN_8M: 264 + stolen_size = MB(8); 265 + break; 266 + case I855_GMCH_GMS_STOLEN_16M: 267 + stolen_size = MB(16); 268 + break; 269 + case I855_GMCH_GMS_STOLEN_32M: 270 + stolen_size = MB(32); 271 + break; 272 + case I915_GMCH_GMS_STOLEN_48M: 273 + stolen_size = MB(48); 274 + break; 275 + case I915_GMCH_GMS_STOLEN_64M: 276 + stolen_size = MB(64); 277 + break; 278 + case G33_GMCH_GMS_STOLEN_128M: 279 + stolen_size = MB(128); 280 + break; 281 + case G33_GMCH_GMS_STOLEN_256M: 282 + stolen_size = MB(256); 283 + break; 284 + case INTEL_GMCH_GMS_STOLEN_96M: 285 + stolen_size = MB(96); 286 + break; 287 + case INTEL_GMCH_GMS_STOLEN_160M: 288 + stolen_size = MB(160); 289 + break; 290 + case INTEL_GMCH_GMS_STOLEN_224M: 291 + stolen_size = MB(224); 292 + break; 293 + case INTEL_GMCH_GMS_STOLEN_352M: 294 + stolen_size = MB(352); 295 + break; 296 + default: 297 + stolen_size = 0; 298 + break; 299 + } 300 + 301 + return stolen_size; 302 + } 303 + 304 + static size_t __init gen6_stolen_size(int num, int slot, int func) 305 + { 306 + u16 gmch_ctrl; 307 + 308 + gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); 309 + gmch_ctrl >>= SNB_GMCH_GMS_SHIFT; 310 + gmch_ctrl &= SNB_GMCH_GMS_MASK; 311 + 312 + return gmch_ctrl << 25; /* 32 MB units */ 313 + } 314 + 315 + typedef size_t (*stolen_size_fn)(int num, int slot, int func); 316 + 317 + static struct pci_device_id intel_stolen_ids[] __initdata = { 318 + INTEL_I915G_IDS(gen3_stolen_size), 319 + INTEL_I915GM_IDS(gen3_stolen_size), 320 + INTEL_I945G_IDS(gen3_stolen_size), 321 + INTEL_I945GM_IDS(gen3_stolen_size), 322 + INTEL_VLV_M_IDS(gen3_stolen_size), 323 + INTEL_VLV_D_IDS(gen3_stolen_size), 324 + INTEL_PINEVIEW_IDS(gen3_stolen_size), 325 + INTEL_I965G_IDS(gen3_stolen_size), 326 + INTEL_G33_IDS(gen3_stolen_size), 327 + INTEL_I965GM_IDS(gen3_stolen_size), 328 + INTEL_GM45_IDS(gen3_stolen_size), 329 + INTEL_G45_IDS(gen3_stolen_size), 330 + INTEL_IRONLAKE_D_IDS(gen3_stolen_size), 331 + INTEL_IRONLAKE_M_IDS(gen3_stolen_size), 332 + INTEL_SNB_D_IDS(gen6_stolen_size), 333 + INTEL_SNB_M_IDS(gen6_stolen_size), 334 + INTEL_IVB_M_IDS(gen6_stolen_size), 335 + INTEL_IVB_D_IDS(gen6_stolen_size), 336 + INTEL_HSW_D_IDS(gen6_stolen_size), 337 + INTEL_HSW_M_IDS(gen6_stolen_size), 338 + }; 339 + 340 + static void __init intel_graphics_stolen(int num, int slot, int func) 341 + { 342 + size_t size; 343 + int i; 344 + u32 start; 345 + u16 device, subvendor, subdevice; 346 + 347 + device = read_pci_config_16(num, slot, func, PCI_DEVICE_ID); 348 + subvendor = read_pci_config_16(num, slot, func, 349 + PCI_SUBSYSTEM_VENDOR_ID); 350 + subdevice = read_pci_config_16(num, slot, func, PCI_SUBSYSTEM_ID); 351 + 352 + for (i = 0; i < ARRAY_SIZE(intel_stolen_ids); i++) { 353 + if (intel_stolen_ids[i].device == device) { 354 + stolen_size_fn stolen_size = 355 + (stolen_size_fn)intel_stolen_ids[i].driver_data; 356 + size = stolen_size(num, slot, func); 357 + start = intel_stolen_base(num, slot, func); 358 + if (size && start) { 359 + /* Mark this space as reserved */ 360 + e820_add_region(start, size, E820_RESERVED); 361 + sanitize_e820_map(e820.map, 362 + ARRAY_SIZE(e820.map), 363 + &e820.nr_map); 364 + } 365 + return; 366 + } 367 + } 368 + } 369 + 220 370 #define QFLAG_APPLY_ONCE 0x1 221 371 #define QFLAG_APPLIED 0x2 222 372 #define QFLAG_DONE (QFLAG_APPLY_ONCE|QFLAG_APPLIED) ··· 403 251 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, 404 252 { PCI_VENDOR_ID_INTEL, 0x3406, PCI_CLASS_BRIDGE_HOST, 405 253 PCI_BASE_CLASS_BRIDGE, 0, intel_remapping_check }, 254 + { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID, 255 + QFLAG_APPLY_ONCE, intel_graphics_stolen }, 406 256 {} 407 257 }; 408 258
+10 -1
drivers/gpu/drm/i915/i915_debugfs.c
··· 857 857 u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); 858 858 u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); 859 859 u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); 860 - u32 rpstat, cagf; 860 + u32 rpstat, cagf, reqf; 861 861 u32 rpupei, rpcurup, rpprevup; 862 862 u32 rpdownei, rpcurdown, rpprevdown; 863 863 int max_freq; ··· 868 868 return ret; 869 869 870 870 gen6_gt_force_wake_get(dev_priv); 871 + 872 + reqf = I915_READ(GEN6_RPNSWREQ); 873 + reqf &= ~GEN6_TURBO_DISABLE; 874 + if (IS_HASWELL(dev)) 875 + reqf >>= 24; 876 + else 877 + reqf >>= 25; 878 + reqf *= GT_FREQUENCY_MULTIPLIER; 871 879 872 880 rpstat = I915_READ(GEN6_RPSTAT1); 873 881 rpupei = I915_READ(GEN6_RP_CUR_UP_EI); ··· 901 893 gt_perf_status & 0xff); 902 894 seq_printf(m, "Render p-state limit: %d\n", 903 895 rp_state_limits & 0xff); 896 + seq_printf(m, "RPNSWREQ: %dMHz\n", reqf); 904 897 seq_printf(m, "CAGF: %dMHz\n", cagf); 905 898 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & 906 899 GEN6_CURICONT_MASK);
+12 -3
drivers/gpu/drm/i915/i915_dma.c
··· 1290 1290 * then we do not take part in VGA arbitration and the 1291 1291 * vga_client_register() fails with -ENODEV. 1292 1292 */ 1293 - ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode); 1294 - if (ret && ret != -ENODEV) 1295 - goto out; 1293 + if (!HAS_PCH_SPLIT(dev)) { 1294 + ret = vga_client_register(dev->pdev, dev, NULL, 1295 + i915_vga_set_decode); 1296 + if (ret && ret != -ENODEV) 1297 + goto out; 1298 + } 1296 1299 1297 1300 intel_register_dsm_handler(); 1298 1301 ··· 1350 1347 * tiny window where we will loose hotplug notifactions. 1351 1348 */ 1352 1349 intel_fbdev_initial_config(dev); 1350 + 1351 + /* 1352 + * Must do this after fbcon init so that 1353 + * vgacon_save_screen() works during the handover. 1354 + */ 1355 + i915_disable_vga_mem(dev); 1353 1356 1354 1357 /* Only enable hotplug handling once the fbdev is fully set up. */ 1355 1358 dev_priv->enable_hotplug_processing = true;
+34 -130
drivers/gpu/drm/i915/i915_drv.c
··· 157 157 static struct drm_driver driver; 158 158 extern int intel_agp_enabled; 159 159 160 - #define INTEL_VGA_DEVICE(id, info) { \ 161 - .class = PCI_BASE_CLASS_DISPLAY << 16, \ 162 - .class_mask = 0xff0000, \ 163 - .vendor = 0x8086, \ 164 - .device = id, \ 165 - .subvendor = PCI_ANY_ID, \ 166 - .subdevice = PCI_ANY_ID, \ 167 - .driver_data = (unsigned long) info } 168 - 169 - #define INTEL_QUANTA_VGA_DEVICE(info) { \ 170 - .class = PCI_BASE_CLASS_DISPLAY << 16, \ 171 - .class_mask = 0xff0000, \ 172 - .vendor = 0x8086, \ 173 - .device = 0x16a, \ 174 - .subvendor = 0x152d, \ 175 - .subdevice = 0x8990, \ 176 - .driver_data = (unsigned long) info } 177 - 178 - 179 160 static const struct intel_device_info intel_i830_info = { 180 161 .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .num_pipes = 2, 181 162 .has_overlay = 1, .overlay_needs_physical = 1, ··· 331 350 .has_vebox_ring = 1, 332 351 }; 333 352 353 + /* 354 + * Make sure any device matches here are from most specific to most 355 + * general. For example, since the Quanta match is based on the subsystem 356 + * and subvendor IDs, we need it to come before the more general IVB 357 + * PCI ID matches, otherwise we'll use the wrong info struct above. 358 + */ 359 + #define INTEL_PCI_IDS \ 360 + INTEL_I830_IDS(&intel_i830_info), \ 361 + INTEL_I845G_IDS(&intel_845g_info), \ 362 + INTEL_I85X_IDS(&intel_i85x_info), \ 363 + INTEL_I865G_IDS(&intel_i865g_info), \ 364 + INTEL_I915G_IDS(&intel_i915g_info), \ 365 + INTEL_I915GM_IDS(&intel_i915gm_info), \ 366 + INTEL_I945G_IDS(&intel_i945g_info), \ 367 + INTEL_I945GM_IDS(&intel_i945gm_info), \ 368 + INTEL_I965G_IDS(&intel_i965g_info), \ 369 + INTEL_G33_IDS(&intel_g33_info), \ 370 + INTEL_I965GM_IDS(&intel_i965gm_info), \ 371 + INTEL_GM45_IDS(&intel_gm45_info), \ 372 + INTEL_G45_IDS(&intel_g45_info), \ 373 + INTEL_PINEVIEW_IDS(&intel_pineview_info), \ 374 + INTEL_IRONLAKE_D_IDS(&intel_ironlake_d_info), \ 375 + INTEL_IRONLAKE_M_IDS(&intel_ironlake_m_info), \ 376 + INTEL_SNB_D_IDS(&intel_sandybridge_d_info), \ 377 + INTEL_SNB_M_IDS(&intel_sandybridge_m_info), \ 378 + INTEL_IVB_Q_IDS(&intel_ivybridge_q_info), /* must be first IVB */ \ 379 + INTEL_IVB_M_IDS(&intel_ivybridge_m_info), \ 380 + INTEL_IVB_D_IDS(&intel_ivybridge_d_info), \ 381 + INTEL_HSW_D_IDS(&intel_haswell_d_info), \ 382 + INTEL_HSW_M_IDS(&intel_haswell_m_info), \ 383 + INTEL_VLV_M_IDS(&intel_valleyview_m_info), \ 384 + INTEL_VLV_D_IDS(&intel_valleyview_d_info) 385 + 334 386 static const struct pci_device_id pciidlist[] = { /* aka */ 335 - INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */ 336 - INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */ 337 - INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), /* I855_GM */ 338 - INTEL_VGA_DEVICE(0x358e, &intel_i85x_info), 339 - INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), /* I865_G */ 340 - INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), /* I915_G */ 341 - INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), /* E7221_G */ 342 - INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), /* I915_GM */ 343 - INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), /* I945_G */ 344 - INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), /* I945_GM */ 345 - INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), /* I945_GME */ 346 - INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), /* I946_GZ */ 347 - INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), /* G35_G */ 348 - INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), /* I965_Q */ 349 - INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), /* I965_G */ 350 - INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), /* Q35_G */ 351 - INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), /* G33_G */ 352 - INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), /* Q33_G */ 353 - INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), /* I965_GM */ 354 - INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), /* I965_GME */ 355 - INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), /* GM45_G */ 356 - INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), /* IGD_E_G */ 357 - INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), /* Q45_G */ 358 - INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */ 359 - INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */ 360 - INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */ 361 - INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */ 362 - INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), 363 - INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), 364 - INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), 365 - INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), 366 - INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info), 367 - INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info), 368 - INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info), 369 - INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info), 370 - INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info), 371 - INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info), 372 - INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info), 373 - INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */ 374 - INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */ 375 - INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */ 376 - INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */ 377 - INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */ 378 - INTEL_QUANTA_VGA_DEVICE(&intel_ivybridge_q_info), /* Quanta transcode */ 379 - INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */ 380 - INTEL_VGA_DEVICE(0x0402, &intel_haswell_d_info), /* GT1 desktop */ 381 - INTEL_VGA_DEVICE(0x0412, &intel_haswell_d_info), /* GT2 desktop */ 382 - INTEL_VGA_DEVICE(0x0422, &intel_haswell_d_info), /* GT3 desktop */ 383 - INTEL_VGA_DEVICE(0x040a, &intel_haswell_d_info), /* GT1 server */ 384 - INTEL_VGA_DEVICE(0x041a, &intel_haswell_d_info), /* GT2 server */ 385 - INTEL_VGA_DEVICE(0x042a, &intel_haswell_d_info), /* GT3 server */ 386 - INTEL_VGA_DEVICE(0x0406, &intel_haswell_m_info), /* GT1 mobile */ 387 - INTEL_VGA_DEVICE(0x0416, &intel_haswell_m_info), /* GT2 mobile */ 388 - INTEL_VGA_DEVICE(0x0426, &intel_haswell_m_info), /* GT2 mobile */ 389 - INTEL_VGA_DEVICE(0x040B, &intel_haswell_d_info), /* GT1 reserved */ 390 - INTEL_VGA_DEVICE(0x041B, &intel_haswell_d_info), /* GT2 reserved */ 391 - INTEL_VGA_DEVICE(0x042B, &intel_haswell_d_info), /* GT3 reserved */ 392 - INTEL_VGA_DEVICE(0x040E, &intel_haswell_d_info), /* GT1 reserved */ 393 - INTEL_VGA_DEVICE(0x041E, &intel_haswell_d_info), /* GT2 reserved */ 394 - INTEL_VGA_DEVICE(0x042E, &intel_haswell_d_info), /* GT3 reserved */ 395 - INTEL_VGA_DEVICE(0x0C02, &intel_haswell_d_info), /* SDV GT1 desktop */ 396 - INTEL_VGA_DEVICE(0x0C12, &intel_haswell_d_info), /* SDV GT2 desktop */ 397 - INTEL_VGA_DEVICE(0x0C22, &intel_haswell_d_info), /* SDV GT3 desktop */ 398 - INTEL_VGA_DEVICE(0x0C0A, &intel_haswell_d_info), /* SDV GT1 server */ 399 - INTEL_VGA_DEVICE(0x0C1A, &intel_haswell_d_info), /* SDV GT2 server */ 400 - INTEL_VGA_DEVICE(0x0C2A, &intel_haswell_d_info), /* SDV GT3 server */ 401 - INTEL_VGA_DEVICE(0x0C06, &intel_haswell_m_info), /* SDV GT1 mobile */ 402 - INTEL_VGA_DEVICE(0x0C16, &intel_haswell_m_info), /* SDV GT2 mobile */ 403 - INTEL_VGA_DEVICE(0x0C26, &intel_haswell_m_info), /* SDV GT3 mobile */ 404 - INTEL_VGA_DEVICE(0x0C0B, &intel_haswell_d_info), /* SDV GT1 reserved */ 405 - INTEL_VGA_DEVICE(0x0C1B, &intel_haswell_d_info), /* SDV GT2 reserved */ 406 - INTEL_VGA_DEVICE(0x0C2B, &intel_haswell_d_info), /* SDV GT3 reserved */ 407 - INTEL_VGA_DEVICE(0x0C0E, &intel_haswell_d_info), /* SDV GT1 reserved */ 408 - INTEL_VGA_DEVICE(0x0C1E, &intel_haswell_d_info), /* SDV GT2 reserved */ 409 - INTEL_VGA_DEVICE(0x0C2E, &intel_haswell_d_info), /* SDV GT3 reserved */ 410 - INTEL_VGA_DEVICE(0x0A02, &intel_haswell_d_info), /* ULT GT1 desktop */ 411 - INTEL_VGA_DEVICE(0x0A12, &intel_haswell_d_info), /* ULT GT2 desktop */ 412 - INTEL_VGA_DEVICE(0x0A22, &intel_haswell_d_info), /* ULT GT3 desktop */ 413 - INTEL_VGA_DEVICE(0x0A0A, &intel_haswell_d_info), /* ULT GT1 server */ 414 - INTEL_VGA_DEVICE(0x0A1A, &intel_haswell_d_info), /* ULT GT2 server */ 415 - INTEL_VGA_DEVICE(0x0A2A, &intel_haswell_d_info), /* ULT GT3 server */ 416 - INTEL_VGA_DEVICE(0x0A06, &intel_haswell_m_info), /* ULT GT1 mobile */ 417 - INTEL_VGA_DEVICE(0x0A16, &intel_haswell_m_info), /* ULT GT2 mobile */ 418 - INTEL_VGA_DEVICE(0x0A26, &intel_haswell_m_info), /* ULT GT3 mobile */ 419 - INTEL_VGA_DEVICE(0x0A0B, &intel_haswell_d_info), /* ULT GT1 reserved */ 420 - INTEL_VGA_DEVICE(0x0A1B, &intel_haswell_d_info), /* ULT GT2 reserved */ 421 - INTEL_VGA_DEVICE(0x0A2B, &intel_haswell_d_info), /* ULT GT3 reserved */ 422 - INTEL_VGA_DEVICE(0x0A0E, &intel_haswell_m_info), /* ULT GT1 reserved */ 423 - INTEL_VGA_DEVICE(0x0A1E, &intel_haswell_m_info), /* ULT GT2 reserved */ 424 - INTEL_VGA_DEVICE(0x0A2E, &intel_haswell_m_info), /* ULT GT3 reserved */ 425 - INTEL_VGA_DEVICE(0x0D02, &intel_haswell_d_info), /* CRW GT1 desktop */ 426 - INTEL_VGA_DEVICE(0x0D12, &intel_haswell_d_info), /* CRW GT2 desktop */ 427 - INTEL_VGA_DEVICE(0x0D22, &intel_haswell_d_info), /* CRW GT3 desktop */ 428 - INTEL_VGA_DEVICE(0x0D0A, &intel_haswell_d_info), /* CRW GT1 server */ 429 - INTEL_VGA_DEVICE(0x0D1A, &intel_haswell_d_info), /* CRW GT2 server */ 430 - INTEL_VGA_DEVICE(0x0D2A, &intel_haswell_d_info), /* CRW GT3 server */ 431 - INTEL_VGA_DEVICE(0x0D06, &intel_haswell_m_info), /* CRW GT1 mobile */ 432 - INTEL_VGA_DEVICE(0x0D16, &intel_haswell_m_info), /* CRW GT2 mobile */ 433 - INTEL_VGA_DEVICE(0x0D26, &intel_haswell_m_info), /* CRW GT3 mobile */ 434 - INTEL_VGA_DEVICE(0x0D0B, &intel_haswell_d_info), /* CRW GT1 reserved */ 435 - INTEL_VGA_DEVICE(0x0D1B, &intel_haswell_d_info), /* CRW GT2 reserved */ 436 - INTEL_VGA_DEVICE(0x0D2B, &intel_haswell_d_info), /* CRW GT3 reserved */ 437 - INTEL_VGA_DEVICE(0x0D0E, &intel_haswell_d_info), /* CRW GT1 reserved */ 438 - INTEL_VGA_DEVICE(0x0D1E, &intel_haswell_d_info), /* CRW GT2 reserved */ 439 - INTEL_VGA_DEVICE(0x0D2E, &intel_haswell_d_info), /* CRW GT3 reserved */ 440 - INTEL_VGA_DEVICE(0x0f30, &intel_valleyview_m_info), 441 - INTEL_VGA_DEVICE(0x0f31, &intel_valleyview_m_info), 442 - INTEL_VGA_DEVICE(0x0f32, &intel_valleyview_m_info), 443 - INTEL_VGA_DEVICE(0x0f33, &intel_valleyview_m_info), 444 - INTEL_VGA_DEVICE(0x0157, &intel_valleyview_m_info), 445 - INTEL_VGA_DEVICE(0x0155, &intel_valleyview_d_info), 387 + INTEL_PCI_IDS, 446 388 {0, 0, 0} 447 389 }; 448 390
+7
drivers/gpu/drm/i915/i915_drv.h
··· 1236 1236 1237 1237 unsigned int fsb_freq, mem_freq, is_ddr3; 1238 1238 1239 + /** 1240 + * wq - Driver workqueue for GEM. 1241 + * 1242 + * NOTE: Work items scheduled here are not allowed to grab any modeset 1243 + * locks, for otherwise the flushing done in the pageflip code will 1244 + * result in deadlocks. 1245 + */ 1239 1246 struct workqueue_struct *wq; 1240 1247 1241 1248 /* Display functions */
+40 -8
drivers/gpu/drm/i915/i915_gem.c
··· 212 212 void *i915_gem_object_alloc(struct drm_device *dev) 213 213 { 214 214 struct drm_i915_private *dev_priv = dev->dev_private; 215 - return kmem_cache_alloc(dev_priv->slab, GFP_KERNEL | __GFP_ZERO); 215 + return kmem_cache_zalloc(dev_priv->slab, GFP_KERNEL); 216 216 } 217 217 218 218 void i915_gem_object_free(struct drm_i915_gem_object *obj) ··· 1695 1695 __i915_gem_shrink(struct drm_i915_private *dev_priv, long target, 1696 1696 bool purgeable_only) 1697 1697 { 1698 + struct list_head still_bound_list; 1698 1699 struct drm_i915_gem_object *obj, *next; 1699 1700 long count = 0; 1700 1701 ··· 1710 1709 } 1711 1710 } 1712 1711 1713 - list_for_each_entry_safe(obj, next, &dev_priv->mm.bound_list, 1714 - global_list) { 1712 + /* 1713 + * As we may completely rewrite the bound list whilst unbinding 1714 + * (due to retiring requests) we have to strictly process only 1715 + * one element of the list at the time, and recheck the list 1716 + * on every iteration. 1717 + */ 1718 + INIT_LIST_HEAD(&still_bound_list); 1719 + while (count < target && !list_empty(&dev_priv->mm.bound_list)) { 1715 1720 struct i915_vma *vma, *v; 1721 + 1722 + obj = list_first_entry(&dev_priv->mm.bound_list, 1723 + typeof(*obj), global_list); 1724 + list_move_tail(&obj->global_list, &still_bound_list); 1716 1725 1717 1726 if (!i915_gem_object_is_purgeable(obj) && purgeable_only) 1718 1727 continue; 1728 + 1729 + /* 1730 + * Hold a reference whilst we unbind this object, as we may 1731 + * end up waiting for and retiring requests. This might 1732 + * release the final reference (held by the active list) 1733 + * and result in the object being freed from under us. 1734 + * in this object being freed. 1735 + * 1736 + * Note 1: Shrinking the bound list is special since only active 1737 + * (and hence bound objects) can contain such limbo objects, so 1738 + * we don't need special tricks for shrinking the unbound list. 1739 + * The only other place where we have to be careful with active 1740 + * objects suddenly disappearing due to retiring requests is the 1741 + * eviction code. 1742 + * 1743 + * Note 2: Even though the bound list doesn't hold a reference 1744 + * to the object we can safely grab one here: The final object 1745 + * unreferencing and the bound_list are both protected by the 1746 + * dev->struct_mutex and so we won't ever be able to observe an 1747 + * object on the bound_list with a reference count equals 0. 1748 + */ 1749 + drm_gem_object_reference(&obj->base); 1719 1750 1720 1751 list_for_each_entry_safe(vma, v, &obj->vma_list, vma_link) 1721 1752 if (i915_vma_unbind(vma)) 1722 1753 break; 1723 1754 1724 - if (!i915_gem_object_put_pages(obj)) { 1755 + if (i915_gem_object_put_pages(obj) == 0) 1725 1756 count += obj->base.size >> PAGE_SHIFT; 1726 - if (count >= target) 1727 - return count; 1728 - } 1757 + 1758 + drm_gem_object_unreference(&obj->base); 1729 1759 } 1760 + list_splice(&still_bound_list, &dev_priv->mm.bound_list); 1730 1761 1731 1762 return count; 1732 1763 } ··· 1807 1774 1808 1775 page_count = obj->base.size / PAGE_SIZE; 1809 1776 if (sg_alloc_table(st, page_count, GFP_KERNEL)) { 1810 - sg_free_table(st); 1811 1777 kfree(st); 1812 1778 return -ENOMEM; 1813 1779 }
+22 -19
drivers/gpu/drm/i915/i915_gem_dmabuf.c
··· 42 42 43 43 ret = i915_mutex_lock_interruptible(obj->base.dev); 44 44 if (ret) 45 - return ERR_PTR(ret); 45 + goto err; 46 46 47 47 ret = i915_gem_object_get_pages(obj); 48 - if (ret) { 49 - st = ERR_PTR(ret); 50 - goto out; 51 - } 48 + if (ret) 49 + goto err_unlock; 50 + 51 + i915_gem_object_pin_pages(obj); 52 52 53 53 /* Copy sg so that we make an independent mapping */ 54 54 st = kmalloc(sizeof(struct sg_table), GFP_KERNEL); 55 55 if (st == NULL) { 56 - st = ERR_PTR(-ENOMEM); 57 - goto out; 56 + ret = -ENOMEM; 57 + goto err_unpin; 58 58 } 59 59 60 60 ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL); 61 - if (ret) { 62 - kfree(st); 63 - st = ERR_PTR(ret); 64 - goto out; 65 - } 61 + if (ret) 62 + goto err_free; 66 63 67 64 src = obj->pages->sgl; 68 65 dst = st->sgl; ··· 70 73 } 71 74 72 75 if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) { 73 - sg_free_table(st); 74 - kfree(st); 75 - st = ERR_PTR(-ENOMEM); 76 - goto out; 76 + ret =-ENOMEM; 77 + goto err_free_sg; 77 78 } 78 79 79 - i915_gem_object_pin_pages(obj); 80 - 81 - out: 82 80 mutex_unlock(&obj->base.dev->struct_mutex); 83 81 return st; 82 + 83 + err_free_sg: 84 + sg_free_table(st); 85 + err_free: 86 + kfree(st); 87 + err_unpin: 88 + i915_gem_object_unpin_pages(obj); 89 + err_unlock: 90 + mutex_unlock(&obj->base.dev->struct_mutex); 91 + err: 92 + return ERR_PTR(ret); 84 93 } 85 94 86 95 static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
+3
drivers/gpu/drm/i915/i915_gem_execbuffer.c
··· 310 310 else 311 311 ret = relocate_entry_gtt(obj, reloc); 312 312 313 + if (ret) 314 + return ret; 315 + 313 316 /* and update the user's relocation entry */ 314 317 reloc->presumed_offset = target_offset; 315 318
+3
drivers/gpu/drm/i915/i915_gem_stolen.c
··· 201 201 struct drm_i915_private *dev_priv = dev->dev_private; 202 202 int bios_reserved = 0; 203 203 204 + if (dev_priv->gtt.stolen_size == 0) 205 + return 0; 206 + 204 207 dev_priv->mm.stolen_base = i915_stolen_to_physical(dev); 205 208 if (dev_priv->mm.stolen_base == 0) 206 209 return 0;
+1 -1
drivers/gpu/drm/i915/i915_gpu_error.c
··· 641 641 if (WARN_ON(ring->id != RCS)) 642 642 return NULL; 643 643 644 - obj = ring->private; 644 + obj = ring->scratch.obj; 645 645 if (acthd >= i915_gem_obj_ggtt_offset(obj) && 646 646 acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size) 647 647 return i915_error_object_create(dev_priv, obj);
+17 -6
drivers/gpu/drm/i915/i915_irq.c
··· 1027 1027 dev_priv->display.hpd_irq_setup(dev); 1028 1028 spin_unlock(&dev_priv->irq_lock); 1029 1029 1030 - queue_work(dev_priv->wq, 1031 - &dev_priv->hotplug_work); 1030 + /* 1031 + * Our hotplug handler can grab modeset locks (by calling down into the 1032 + * fb helpers). Hence it must not be run on our own dev-priv->wq work 1033 + * queue for otherwise the flush_work in the pageflip code will 1034 + * deadlock. 1035 + */ 1036 + schedule_work(&dev_priv->hotplug_work); 1032 1037 } 1033 1038 1034 1039 static void gmbus_irq_handler(struct drm_device *dev) ··· 1660 1655 wake_up_all(&ring->irq_queue); 1661 1656 } 1662 1657 1663 - queue_work(dev_priv->wq, &dev_priv->gpu_error.work); 1658 + /* 1659 + * Our reset work can grab modeset locks (since it needs to reset the 1660 + * state of outstanding pagelips). Hence it must not be run on our own 1661 + * dev-priv->wq work queue for otherwise the flush_work in the pageflip 1662 + * code will deadlock. 1663 + */ 1664 + schedule_work(&dev_priv->gpu_error.work); 1664 1665 } 1665 1666 1666 1667 static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) ··· 2038 2027 2039 2028 for_each_ring(ring, dev_priv, i) { 2040 2029 if (ring->hangcheck.score > FIRE) { 2041 - DRM_ERROR("%s on %s\n", 2042 - stuck[i] ? "stuck" : "no progress", 2043 - ring->name); 2030 + DRM_INFO("%s on %s\n", 2031 + stuck[i] ? "stuck" : "no progress", 2032 + ring->name); 2044 2033 rings_hung++; 2045 2034 } 2046 2035 }
+19 -15
drivers/gpu/drm/i915/i915_reg.h
··· 33 33 #define _MASKED_BIT_ENABLE(a) (((a) << 16) | (a)) 34 34 #define _MASKED_BIT_DISABLE(a) ((a) << 16) 35 35 36 - /* 37 - * The Bridge device's PCI config space has information about the 38 - * fb aperture size and the amount of pre-reserved memory. 39 - * This is all handled in the intel-gtt.ko module. i915.ko only 40 - * cares about the vga bit for the vga rbiter. 41 - */ 42 - #define INTEL_GMCH_CTRL 0x52 43 - #define INTEL_GMCH_VGA_DISABLE (1 << 1) 44 - #define SNB_GMCH_CTRL 0x50 45 - #define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */ 46 - #define SNB_GMCH_GGMS_MASK 0x3 47 - #define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */ 48 - #define SNB_GMCH_GMS_MASK 0x1f 49 - 50 - 51 36 /* PCI config space */ 52 37 53 38 #define HPLLCC 0xc0 /* 855 only */ ··· 230 245 * address/value pairs. Don't overdue it, though, x <= 2^4 must hold! 231 246 */ 232 247 #define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1) 248 + #define MI_STORE_REGISTER_MEM(x) MI_INSTR(0x24, 2*x-1) 233 249 #define MI_FLUSH_DW MI_INSTR(0x26, 1) /* for GEN6 */ 234 250 #define MI_FLUSH_DW_STORE_INDEX (1<<21) 235 251 #define MI_INVALIDATE_TLB (1<<18) ··· 679 693 #define FPGA_DBG_RM_NOCLAIM (1<<31) 680 694 681 695 #define DERRMR 0x44050 696 + #define DERRMR_PIPEA_SCANLINE (1<<0) 697 + #define DERRMR_PIPEA_PRI_FLIP_DONE (1<<1) 698 + #define DERRMR_PIPEA_SPR_FLIP_DONE (1<<2) 699 + #define DERRMR_PIPEA_VBLANK (1<<3) 700 + #define DERRMR_PIPEA_HBLANK (1<<5) 701 + #define DERRMR_PIPEB_SCANLINE (1<<8) 702 + #define DERRMR_PIPEB_PRI_FLIP_DONE (1<<9) 703 + #define DERRMR_PIPEB_SPR_FLIP_DONE (1<<10) 704 + #define DERRMR_PIPEB_VBLANK (1<<11) 705 + #define DERRMR_PIPEB_HBLANK (1<<13) 706 + /* Note that PIPEC is not a simple translation of PIPEA/PIPEB */ 707 + #define DERRMR_PIPEC_SCANLINE (1<<14) 708 + #define DERRMR_PIPEC_PRI_FLIP_DONE (1<<15) 709 + #define DERRMR_PIPEC_SPR_FLIP_DONE (1<<20) 710 + #define DERRMR_PIPEC_VBLANK (1<<21) 711 + #define DERRMR_PIPEC_HBLANK (1<<22) 712 + 682 713 683 714 /* GM45+ chicken bits -- debug workaround bits that may be required 684 715 * for various sorts of correct behavior. The top 16 bits of each are ··· 3313 3310 #define MCURSOR_PIPE_A 0x00 3314 3311 #define MCURSOR_PIPE_B (1 << 28) 3315 3312 #define MCURSOR_GAMMA_ENABLE (1 << 26) 3313 + #define CURSOR_TRICKLE_FEED_DISABLE (1 << 14) 3316 3314 #define _CURABASE (dev_priv->info->display_mmio_offset + 0x70084) 3317 3315 #define _CURAPOS (dev_priv->info->display_mmio_offset + 0x70088) 3318 3316 #define CURSOR_POS_MASK 0x007FF
+31 -5
drivers/gpu/drm/i915/i915_sysfs.c
··· 224 224 return snprintf(buf, PAGE_SIZE, "%d\n", ret); 225 225 } 226 226 227 + static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev, 228 + struct device_attribute *attr, char *buf) 229 + { 230 + struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); 231 + struct drm_device *dev = minor->dev; 232 + struct drm_i915_private *dev_priv = dev->dev_private; 233 + 234 + return snprintf(buf, PAGE_SIZE, "%d\n", 235 + vlv_gpu_freq(dev_priv->mem_freq, 236 + dev_priv->rps.rpe_delay)); 237 + } 238 + 227 239 static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf) 228 240 { 229 241 struct drm_minor *minor = container_of(kdev, struct drm_minor, kdev); ··· 378 366 static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store); 379 367 static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store); 380 368 369 + static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL); 381 370 382 371 static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf); 383 372 static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL); ··· 419 406 &dev_attr_gt_RP0_freq_mhz.attr, 420 407 &dev_attr_gt_RP1_freq_mhz.attr, 421 408 &dev_attr_gt_RPn_freq_mhz.attr, 409 + NULL, 410 + }; 411 + 412 + static const struct attribute *vlv_attrs[] = { 413 + &dev_attr_gt_cur_freq_mhz.attr, 414 + &dev_attr_gt_max_freq_mhz.attr, 415 + &dev_attr_gt_min_freq_mhz.attr, 416 + &dev_attr_vlv_rpe_freq_mhz.attr, 422 417 NULL, 423 418 }; 424 419 ··· 513 492 DRM_ERROR("l3 parity sysfs setup failed\n"); 514 493 } 515 494 516 - if (INTEL_INFO(dev)->gen >= 6) { 495 + ret = 0; 496 + if (IS_VALLEYVIEW(dev)) 497 + ret = sysfs_create_files(&dev->primary->kdev.kobj, vlv_attrs); 498 + else if (INTEL_INFO(dev)->gen >= 6) 517 499 ret = sysfs_create_files(&dev->primary->kdev.kobj, gen6_attrs); 518 - if (ret) 519 - DRM_ERROR("gen6 sysfs setup failed\n"); 520 - } 500 + if (ret) 501 + DRM_ERROR("RPS sysfs setup failed\n"); 521 502 522 503 ret = sysfs_create_bin_file(&dev->primary->kdev.kobj, 523 504 &error_state_attr); ··· 530 507 void i915_teardown_sysfs(struct drm_device *dev) 531 508 { 532 509 sysfs_remove_bin_file(&dev->primary->kdev.kobj, &error_state_attr); 533 - sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs); 510 + if (IS_VALLEYVIEW(dev)) 511 + sysfs_remove_files(&dev->primary->kdev.kobj, vlv_attrs); 512 + else 513 + sysfs_remove_files(&dev->primary->kdev.kobj, gen6_attrs); 534 514 device_remove_bin_file(&dev->primary->kdev, &dpf_attrs); 535 515 #ifdef CONFIG_PM 536 516 sysfs_unmerge_group(&dev->primary->kdev.kobj, &rc6_attr_group);
+1 -1
drivers/gpu/drm/i915/intel_crt.c
··· 688 688 struct drm_i915_private *dev_priv = dev->dev_private; 689 689 struct intel_crt *crt = intel_attached_crt(connector); 690 690 691 - if (HAS_PCH_SPLIT(dev)) { 691 + if (INTEL_INFO(dev)->gen >= 5) { 692 692 u32 adpa; 693 693 694 694 adpa = I915_READ(crt->adpa_reg);
+69 -14
drivers/gpu/drm/i915/intel_display.c
··· 2077 2077 else 2078 2078 dspcntr &= ~DISPPLANE_TILED; 2079 2079 2080 - /* must disable */ 2081 - dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2080 + if (IS_HASWELL(dev)) 2081 + dspcntr &= ~DISPPLANE_TRICKLE_FEED_DISABLE; 2082 + else 2083 + dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; 2082 2084 2083 2085 I915_WRITE(reg, dspcntr); 2084 2086 ··· 6764 6762 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); 6765 6763 cntl |= CURSOR_MODE_DISABLE; 6766 6764 } 6767 - if (IS_HASWELL(dev)) 6765 + if (IS_HASWELL(dev)) { 6768 6766 cntl |= CURSOR_PIPE_CSC_ENABLE; 6767 + cntl &= ~CURSOR_TRICKLE_FEED_DISABLE; 6768 + } 6769 6769 I915_WRITE(CURCNTR_IVB(pipe), cntl); 6770 6770 6771 6771 intel_crtc->cursor_visible = visible; ··· 7313 7309 } 7314 7310 } 7315 7311 7316 - pipe_config->adjusted_mode.clock = clock.dot * 7317 - pipe_config->pixel_multiplier; 7312 + pipe_config->adjusted_mode.clock = clock.dot; 7318 7313 } 7319 7314 7320 7315 static void ironlake_crtc_clock_get(struct intel_crtc *crtc, ··· 7831 7828 return ret; 7832 7829 } 7833 7830 7834 - /* 7835 - * On gen7 we currently use the blit ring because (in early silicon at least) 7836 - * the render ring doesn't give us interrpts for page flip completion, which 7837 - * means clients will hang after the first flip is queued. Fortunately the 7838 - * blit ring generates interrupts properly, so use it instead. 7839 - */ 7840 7831 static int intel_gen7_queue_flip(struct drm_device *dev, 7841 7832 struct drm_crtc *crtc, 7842 7833 struct drm_framebuffer *fb, ··· 7839 7842 { 7840 7843 struct drm_i915_private *dev_priv = dev->dev_private; 7841 7844 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 7842 - struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; 7845 + struct intel_ring_buffer *ring; 7843 7846 uint32_t plane_bit = 0; 7844 - int ret; 7847 + int len, ret; 7848 + 7849 + ring = obj->ring; 7850 + if (IS_VALLEYVIEW(dev) || ring == NULL || ring->id != RCS) 7851 + ring = &dev_priv->ring[BCS]; 7845 7852 7846 7853 ret = intel_pin_and_fence_fb_obj(dev, obj, ring); 7847 7854 if (ret) ··· 7867 7866 goto err_unpin; 7868 7867 } 7869 7868 7870 - ret = intel_ring_begin(ring, 4); 7869 + len = 4; 7870 + if (ring->id == RCS) 7871 + len += 6; 7872 + 7873 + ret = intel_ring_begin(ring, len); 7871 7874 if (ret) 7872 7875 goto err_unpin; 7876 + 7877 + /* Unmask the flip-done completion message. Note that the bspec says that 7878 + * we should do this for both the BCS and RCS, and that we must not unmask 7879 + * more than one flip event at any time (or ensure that one flip message 7880 + * can be sent by waiting for flip-done prior to queueing new flips). 7881 + * Experimentation says that BCS works despite DERRMR masking all 7882 + * flip-done completion events and that unmasking all planes at once 7883 + * for the RCS also doesn't appear to drop events. Setting the DERRMR 7884 + * to zero does lead to lockups within MI_DISPLAY_FLIP. 7885 + */ 7886 + if (ring->id == RCS) { 7887 + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); 7888 + intel_ring_emit(ring, DERRMR); 7889 + intel_ring_emit(ring, ~(DERRMR_PIPEA_PRI_FLIP_DONE | 7890 + DERRMR_PIPEB_PRI_FLIP_DONE | 7891 + DERRMR_PIPEC_PRI_FLIP_DONE)); 7892 + intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1)); 7893 + intel_ring_emit(ring, DERRMR); 7894 + intel_ring_emit(ring, ring->scratch.gtt_offset + 256); 7895 + } 7873 7896 7874 7897 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit); 7875 7898 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); ··· 10047 10022 POSTING_READ(vga_reg); 10048 10023 } 10049 10024 10025 + static void i915_enable_vga_mem(struct drm_device *dev) 10026 + { 10027 + /* Enable VGA memory on Intel HD */ 10028 + if (HAS_PCH_SPLIT(dev)) { 10029 + vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 10030 + outb(inb(VGA_MSR_READ) | VGA_MSR_MEM_EN, VGA_MSR_WRITE); 10031 + vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO | 10032 + VGA_RSRC_LEGACY_MEM | 10033 + VGA_RSRC_NORMAL_IO | 10034 + VGA_RSRC_NORMAL_MEM); 10035 + vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 10036 + } 10037 + } 10038 + 10039 + void i915_disable_vga_mem(struct drm_device *dev) 10040 + { 10041 + /* Disable VGA memory on Intel HD */ 10042 + if (HAS_PCH_SPLIT(dev)) { 10043 + vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); 10044 + outb(inb(VGA_MSR_READ) & ~VGA_MSR_MEM_EN, VGA_MSR_WRITE); 10045 + vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO | 10046 + VGA_RSRC_NORMAL_IO | 10047 + VGA_RSRC_NORMAL_MEM); 10048 + vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); 10049 + } 10050 + } 10051 + 10050 10052 void intel_modeset_init_hw(struct drm_device *dev) 10051 10053 { 10052 10054 intel_init_power_well(dev); ··· 10352 10300 if (I915_READ(vga_reg) != VGA_DISP_DISABLE) { 10353 10301 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n"); 10354 10302 i915_disable_vga(dev); 10303 + i915_disable_vga_mem(dev); 10355 10304 } 10356 10305 } 10357 10306 ··· 10565 10512 } 10566 10513 10567 10514 intel_disable_fbc(dev); 10515 + 10516 + i915_enable_vga_mem(dev); 10568 10517 10569 10518 intel_disable_gt_powersave(dev); 10570 10519
+2 -1
drivers/gpu/drm/i915/intel_drv.h
··· 551 551 struct drm_display_mode *fixed_mode); 552 552 extern void intel_panel_fini(struct intel_panel *panel); 553 553 554 - extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, 554 + extern void intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode, 555 555 struct drm_display_mode *adjusted_mode); 556 556 extern void intel_pch_panel_fitting(struct intel_crtc *crtc, 557 557 struct intel_crtc_config *pipe_config, ··· 792 792 extern void hsw_pc8_restore_interrupts(struct drm_device *dev); 793 793 extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv); 794 794 extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv); 795 + extern void i915_disable_vga_mem(struct drm_device *dev); 795 796 796 797 #endif /* __INTEL_DRV_H__ */
+4 -4
drivers/gpu/drm/i915/intel_lvds.c
··· 128 128 struct drm_device *dev = encoder->base.dev; 129 129 struct drm_i915_private *dev_priv = dev->dev_private; 130 130 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 131 - struct drm_display_mode *fixed_mode = 132 - lvds_encoder->attached_connector->base.panel.fixed_mode; 131 + const struct drm_display_mode *adjusted_mode = 132 + &crtc->config.adjusted_mode; 133 133 int pipe = crtc->pipe; 134 134 u32 temp; 135 135 ··· 183 183 temp &= ~LVDS_ENABLE_DITHER; 184 184 } 185 185 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); 186 - if (fixed_mode->flags & DRM_MODE_FLAG_NHSYNC) 186 + if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) 187 187 temp |= LVDS_HSYNC_POLARITY; 188 - if (fixed_mode->flags & DRM_MODE_FLAG_NVSYNC) 188 + if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) 189 189 temp |= LVDS_VSYNC_POLARITY; 190 190 191 191 I915_WRITE(lvds_encoder->reg, temp);
+1 -1
drivers/gpu/drm/i915/intel_opregion.c
··· 173 173 return ASLE_BACKLIGHT_FAILED; 174 174 175 175 intel_panel_set_backlight(dev, bclp, 255); 176 - iowrite32((bclp*0x64)/0xff | ASLE_CBLV_VALID, &asle->cblv); 176 + iowrite32(DIV_ROUND_UP(bclp * 100, 255) | ASLE_CBLV_VALID, &asle->cblv); 177 177 178 178 return 0; 179 179 }
+3 -11
drivers/gpu/drm/i915/intel_panel.c
··· 36 36 #define PCI_LBPC 0xf4 /* legacy/combination backlight modes */ 37 37 38 38 void 39 - intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, 39 + intel_fixed_panel_mode(const struct drm_display_mode *fixed_mode, 40 40 struct drm_display_mode *adjusted_mode) 41 41 { 42 - adjusted_mode->hdisplay = fixed_mode->hdisplay; 43 - adjusted_mode->hsync_start = fixed_mode->hsync_start; 44 - adjusted_mode->hsync_end = fixed_mode->hsync_end; 45 - adjusted_mode->htotal = fixed_mode->htotal; 42 + drm_mode_copy(adjusted_mode, fixed_mode); 46 43 47 - adjusted_mode->vdisplay = fixed_mode->vdisplay; 48 - adjusted_mode->vsync_start = fixed_mode->vsync_start; 49 - adjusted_mode->vsync_end = fixed_mode->vsync_end; 50 - adjusted_mode->vtotal = fixed_mode->vtotal; 51 - 52 - adjusted_mode->clock = fixed_mode->clock; 44 + drm_mode_set_crtcinfo(adjusted_mode, 0); 53 45 } 54 46 55 47 /* adjusted_mode has been preset to be the panel's fixed mode */
+11 -3
drivers/gpu/drm/i915/intel_pm.c
··· 3447 3447 static void gen6_enable_rps_interrupts(struct drm_device *dev) 3448 3448 { 3449 3449 struct drm_i915_private *dev_priv = dev->dev_private; 3450 + u32 enabled_intrs; 3450 3451 3451 3452 spin_lock_irq(&dev_priv->irq_lock); 3452 3453 WARN_ON(dev_priv->rps.pm_iir); 3453 3454 snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); 3454 3455 I915_WRITE(GEN6_PMIIR, GEN6_PM_RPS_EVENTS); 3455 3456 spin_unlock_irq(&dev_priv->irq_lock); 3457 + 3456 3458 /* only unmask PM interrupts we need. Mask all others. */ 3457 - I915_WRITE(GEN6_PMINTRMSK, ~GEN6_PM_RPS_EVENTS); 3459 + enabled_intrs = GEN6_PM_RPS_EVENTS; 3460 + 3461 + /* IVB and SNB hard hangs on looping batchbuffer 3462 + * if GEN6_PM_UP_EI_EXPIRED is masked. 3463 + */ 3464 + if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) 3465 + enabled_intrs |= GEN6_PM_RP_UP_EI_EXPIRED; 3466 + 3467 + I915_WRITE(GEN6_PMINTRMSK, ~enabled_intrs); 3458 3468 } 3459 3469 3460 3470 static void gen6_enable_rps(struct drm_device *dev) ··· 4959 4949 I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG, 4960 4950 I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) | 4961 4951 GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB); 4962 - 4963 - g4x_disable_trickle_feed(dev); 4964 4952 4965 4953 /* WaVSRefCountFullforceMissDisable:hsw */ 4966 4954 gen7_setup_fixed_func_scheduler(dev_priv);
+29 -70
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 33 33 #include "i915_trace.h" 34 34 #include "intel_drv.h" 35 35 36 - /* 37 - * 965+ support PIPE_CONTROL commands, which provide finer grained control 38 - * over cache flushing. 39 - */ 40 - struct pipe_control { 41 - struct drm_i915_gem_object *obj; 42 - volatile u32 *cpu_page; 43 - u32 gtt_offset; 44 - }; 45 - 46 36 static inline int ring_space(struct intel_ring_buffer *ring) 47 37 { 48 38 int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE); ··· 165 175 static int 166 176 intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring) 167 177 { 168 - struct pipe_control *pc = ring->private; 169 - u32 scratch_addr = pc->gtt_offset + 128; 178 + u32 scratch_addr = ring->scratch.gtt_offset + 128; 170 179 int ret; 171 180 172 181 ··· 202 213 u32 invalidate_domains, u32 flush_domains) 203 214 { 204 215 u32 flags = 0; 205 - struct pipe_control *pc = ring->private; 206 - u32 scratch_addr = pc->gtt_offset + 128; 216 + u32 scratch_addr = ring->scratch.gtt_offset + 128; 207 217 int ret; 208 218 209 219 /* Force SNB workarounds for PIPE_CONTROL flushes */ ··· 294 306 u32 invalidate_domains, u32 flush_domains) 295 307 { 296 308 u32 flags = 0; 297 - struct pipe_control *pc = ring->private; 298 - u32 scratch_addr = pc->gtt_offset + 128; 309 + u32 scratch_addr = ring->scratch.gtt_offset + 128; 299 310 int ret; 300 311 301 312 /* ··· 468 481 static int 469 482 init_pipe_control(struct intel_ring_buffer *ring) 470 483 { 471 - struct pipe_control *pc; 472 - struct drm_i915_gem_object *obj; 473 484 int ret; 474 485 475 - if (ring->private) 486 + if (ring->scratch.obj) 476 487 return 0; 477 488 478 - pc = kmalloc(sizeof(*pc), GFP_KERNEL); 479 - if (!pc) 480 - return -ENOMEM; 481 - 482 - obj = i915_gem_alloc_object(ring->dev, 4096); 483 - if (obj == NULL) { 489 + ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096); 490 + if (ring->scratch.obj == NULL) { 484 491 DRM_ERROR("Failed to allocate seqno page\n"); 485 492 ret = -ENOMEM; 486 493 goto err; 487 494 } 488 495 489 - i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); 496 + i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC); 490 497 491 - ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false); 498 + ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false); 492 499 if (ret) 493 500 goto err_unref; 494 501 495 - pc->gtt_offset = i915_gem_obj_ggtt_offset(obj); 496 - pc->cpu_page = kmap(sg_page(obj->pages->sgl)); 497 - if (pc->cpu_page == NULL) { 502 + ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj); 503 + ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl)); 504 + if (ring->scratch.cpu_page == NULL) { 498 505 ret = -ENOMEM; 499 506 goto err_unpin; 500 507 } 501 508 502 509 DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n", 503 - ring->name, pc->gtt_offset); 504 - 505 - pc->obj = obj; 506 - ring->private = pc; 510 + ring->name, ring->scratch.gtt_offset); 507 511 return 0; 508 512 509 513 err_unpin: 510 - i915_gem_object_unpin(obj); 514 + i915_gem_object_unpin(ring->scratch.obj); 511 515 err_unref: 512 - drm_gem_object_unreference(&obj->base); 516 + drm_gem_object_unreference(&ring->scratch.obj->base); 513 517 err: 514 - kfree(pc); 515 518 return ret; 516 - } 517 - 518 - static void 519 - cleanup_pipe_control(struct intel_ring_buffer *ring) 520 - { 521 - struct pipe_control *pc = ring->private; 522 - struct drm_i915_gem_object *obj; 523 - 524 - obj = pc->obj; 525 - 526 - kunmap(sg_page(obj->pages->sgl)); 527 - i915_gem_object_unpin(obj); 528 - drm_gem_object_unreference(&obj->base); 529 - 530 - kfree(pc); 531 519 } 532 520 533 521 static int init_render_ring(struct intel_ring_buffer *ring) ··· 569 607 { 570 608 struct drm_device *dev = ring->dev; 571 609 572 - if (!ring->private) 610 + if (ring->scratch.obj == NULL) 573 611 return; 574 612 575 - if (HAS_BROKEN_CS_TLB(dev)) 576 - drm_gem_object_unreference(to_gem_object(ring->private)); 613 + if (INTEL_INFO(dev)->gen >= 5) { 614 + kunmap(sg_page(ring->scratch.obj->pages->sgl)); 615 + i915_gem_object_unpin(ring->scratch.obj); 616 + } 577 617 578 - if (INTEL_INFO(dev)->gen >= 5) 579 - cleanup_pipe_control(ring); 580 - 581 - ring->private = NULL; 618 + drm_gem_object_unreference(&ring->scratch.obj->base); 619 + ring->scratch.obj = NULL; 582 620 } 583 621 584 622 static void ··· 704 742 static int 705 743 pc_render_add_request(struct intel_ring_buffer *ring) 706 744 { 707 - struct pipe_control *pc = ring->private; 708 - u32 scratch_addr = pc->gtt_offset + 128; 745 + u32 scratch_addr = ring->scratch.gtt_offset + 128; 709 746 int ret; 710 747 711 748 /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently ··· 722 761 intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | 723 762 PIPE_CONTROL_WRITE_FLUSH | 724 763 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); 725 - intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 764 + intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 726 765 intel_ring_emit(ring, ring->outstanding_lazy_request); 727 766 intel_ring_emit(ring, 0); 728 767 PIPE_CONTROL_FLUSH(ring, scratch_addr); ··· 741 780 PIPE_CONTROL_WRITE_FLUSH | 742 781 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | 743 782 PIPE_CONTROL_NOTIFY); 744 - intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 783 + intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 745 784 intel_ring_emit(ring, ring->outstanding_lazy_request); 746 785 intel_ring_emit(ring, 0); 747 786 intel_ring_advance(ring); ··· 775 814 static u32 776 815 pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) 777 816 { 778 - struct pipe_control *pc = ring->private; 779 - return pc->cpu_page[0]; 817 + return ring->scratch.cpu_page[0]; 780 818 } 781 819 782 820 static void 783 821 pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno) 784 822 { 785 - struct pipe_control *pc = ring->private; 786 - pc->cpu_page[0] = seqno; 823 + ring->scratch.cpu_page[0] = seqno; 787 824 } 788 825 789 826 static bool ··· 1100 1141 intel_ring_emit(ring, MI_NOOP); 1101 1142 intel_ring_advance(ring); 1102 1143 } else { 1103 - struct drm_i915_gem_object *obj = ring->private; 1104 - u32 cs_offset = i915_gem_obj_ggtt_offset(obj); 1144 + u32 cs_offset = ring->scratch.gtt_offset; 1105 1145 1106 1146 if (len > I830_BATCH_LIMIT) 1107 1147 return -ENOSPC; ··· 1793 1835 return ret; 1794 1836 } 1795 1837 1796 - ring->private = obj; 1838 + ring->scratch.obj = obj; 1839 + ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj); 1797 1840 } 1798 1841 1799 1842 return intel_init_ring_buffer(dev, ring);
+5 -1
drivers/gpu/drm/i915/intel_ringbuffer.h
··· 155 155 156 156 struct intel_ring_hangcheck hangcheck; 157 157 158 - void *private; 158 + struct { 159 + struct drm_i915_gem_object *obj; 160 + u32 gtt_offset; 161 + volatile u32 *cpu_page; 162 + } scratch; 159 163 }; 160 164 161 165 static inline bool
+9 -8
drivers/gpu/drm/i915/intel_sdvo.c
··· 1151 1151 { 1152 1152 struct drm_device *dev = intel_encoder->base.dev; 1153 1153 struct drm_i915_private *dev_priv = dev->dev_private; 1154 - struct drm_crtc *crtc = intel_encoder->base.crtc; 1155 - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1154 + struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc); 1156 1155 struct drm_display_mode *adjusted_mode = 1157 - &intel_crtc->config.adjusted_mode; 1158 - struct drm_display_mode *mode = &intel_crtc->config.requested_mode; 1156 + &crtc->config.adjusted_mode; 1157 + struct drm_display_mode *mode = &crtc->config.requested_mode; 1159 1158 struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder); 1160 1159 u32 sdvox; 1161 1160 struct intel_sdvo_in_out_map in_out; ··· 1212 1213 * adjusted_mode. 1213 1214 */ 1214 1215 intel_sdvo_get_dtd_from_mode(&input_dtd, adjusted_mode); 1216 + input_dtd.part1.clock /= crtc->config.pixel_multiplier; 1217 + 1215 1218 if (intel_sdvo->is_tv || intel_sdvo->is_lvds) 1216 1219 input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags; 1217 1220 if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd)) 1218 1221 DRM_INFO("Setting input timings on %s failed\n", 1219 1222 SDVO_NAME(intel_sdvo)); 1220 1223 1221 - switch (intel_crtc->config.pixel_multiplier) { 1224 + switch (crtc->config.pixel_multiplier) { 1222 1225 default: 1223 1226 WARN(1, "unknown pixel mutlipler specified\n"); 1224 1227 case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break; ··· 1253 1252 } 1254 1253 1255 1254 if (INTEL_PCH_TYPE(dev) >= PCH_CPT) 1256 - sdvox |= SDVO_PIPE_SEL_CPT(intel_crtc->pipe); 1255 + sdvox |= SDVO_PIPE_SEL_CPT(crtc->pipe); 1257 1256 else 1258 - sdvox |= SDVO_PIPE_SEL(intel_crtc->pipe); 1257 + sdvox |= SDVO_PIPE_SEL(crtc->pipe); 1259 1258 1260 1259 if (intel_sdvo->has_hdmi_audio) 1261 1260 sdvox |= SDVO_AUDIO_ENABLE; ··· 1265 1264 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) { 1266 1265 /* done in crtc_mode_set as it lives inside the dpll register */ 1267 1266 } else { 1268 - sdvox |= (intel_crtc->config.pixel_multiplier - 1) 1267 + sdvox |= (crtc->config.pixel_multiplier - 1) 1269 1268 << SDVO_PORT_MULTIPLY_SHIFT; 1270 1269 } 1271 1270
+5 -2
drivers/gpu/drm/i915/intel_sprite.c
··· 260 260 if (obj->tiling_mode != I915_TILING_NONE) 261 261 sprctl |= SPRITE_TILED; 262 262 263 - /* must disable */ 264 - sprctl |= SPRITE_TRICKLE_FEED_DISABLE; 263 + if (IS_HASWELL(dev)) 264 + sprctl &= ~SPRITE_TRICKLE_FEED_DISABLE; 265 + else 266 + sprctl |= SPRITE_TRICKLE_FEED_DISABLE; 267 + 265 268 sprctl |= SPRITE_ENABLE; 266 269 267 270 if (IS_HASWELL(dev))
+8 -1
drivers/gpu/drm/i915/intel_uncore.c
··· 261 261 } 262 262 } 263 263 264 - void intel_uncore_sanitize(struct drm_device *dev) 264 + static void intel_uncore_forcewake_reset(struct drm_device *dev) 265 265 { 266 266 struct drm_i915_private *dev_priv = dev->dev_private; 267 267 ··· 272 272 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) 273 273 __gen6_gt_force_wake_mt_reset(dev_priv); 274 274 } 275 + } 276 + 277 + void intel_uncore_sanitize(struct drm_device *dev) 278 + { 279 + intel_uncore_forcewake_reset(dev); 275 280 276 281 /* BIOS often leaves RC6 enabled, but disable it for hw init */ 277 282 intel_disable_gt_powersave(dev); ··· 553 548 554 549 /* Spin waiting for the device to ack the reset request */ 555 550 ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); 551 + 552 + intel_uncore_forcewake_reset(dev); 556 553 557 554 /* If reset with a user forcewake, try to restore, otherwise turn it off */ 558 555 if (dev_priv->uncore.forcewake_count)
+22 -29
drivers/gpu/vga/vgaarb.c
··· 257 257 if (!conflict->bridge_has_one_vga) { 258 258 vga_irq_set_state(conflict, false); 259 259 flags |= PCI_VGA_STATE_CHANGE_DECODES; 260 - if (lwants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM)) 260 + if (match & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM)) 261 261 pci_bits |= PCI_COMMAND_MEMORY; 262 - if (lwants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO)) 262 + if (match & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO)) 263 263 pci_bits |= PCI_COMMAND_IO; 264 264 } 265 265 ··· 267 267 flags |= PCI_VGA_STATE_CHANGE_BRIDGE; 268 268 269 269 pci_set_vga_state(conflict->pdev, false, pci_bits, flags); 270 - conflict->owns &= ~lwants; 270 + conflict->owns &= ~match; 271 271 /* If he also owned non-legacy, that is no longer the case */ 272 - if (lwants & VGA_RSRC_LEGACY_MEM) 272 + if (match & VGA_RSRC_LEGACY_MEM) 273 273 conflict->owns &= ~VGA_RSRC_NORMAL_MEM; 274 - if (lwants & VGA_RSRC_LEGACY_IO) 274 + if (match & VGA_RSRC_LEGACY_IO) 275 275 conflict->owns &= ~VGA_RSRC_NORMAL_IO; 276 276 } 277 277 ··· 644 644 static inline void vga_update_device_decodes(struct vga_device *vgadev, 645 645 int new_decodes) 646 646 { 647 - int old_decodes; 648 - struct vga_device *new_vgadev, *conflict; 647 + int old_decodes, decodes_removed, decodes_unlocked; 649 648 650 649 old_decodes = vgadev->decodes; 650 + decodes_removed = ~new_decodes & old_decodes; 651 + decodes_unlocked = vgadev->locks & decodes_removed; 652 + vgadev->owns &= ~decodes_removed; 651 653 vgadev->decodes = new_decodes; 652 654 653 655 pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n", ··· 658 656 vga_iostate_to_str(vgadev->decodes), 659 657 vga_iostate_to_str(vgadev->owns)); 660 658 661 - 662 - /* if we own the decodes we should move them along to 663 - another card */ 664 - if ((vgadev->owns & old_decodes) && (vga_count > 1)) { 665 - /* set us to own nothing */ 666 - vgadev->owns &= ~old_decodes; 667 - list_for_each_entry(new_vgadev, &vga_list, list) { 668 - if ((new_vgadev != vgadev) && 669 - (new_vgadev->decodes & VGA_RSRC_LEGACY_MASK)) { 670 - pr_info("vgaarb: transferring owner from PCI:%s to PCI:%s\n", pci_name(vgadev->pdev), pci_name(new_vgadev->pdev)); 671 - conflict = __vga_tryget(new_vgadev, VGA_RSRC_LEGACY_MASK); 672 - if (!conflict) 673 - __vga_put(new_vgadev, VGA_RSRC_LEGACY_MASK); 674 - break; 675 - } 676 - } 659 + /* if we removed locked decodes, lock count goes to zero, and release */ 660 + if (decodes_unlocked) { 661 + if (decodes_unlocked & VGA_RSRC_LEGACY_IO) 662 + vgadev->io_lock_cnt = 0; 663 + if (decodes_unlocked & VGA_RSRC_LEGACY_MEM) 664 + vgadev->mem_lock_cnt = 0; 665 + __vga_put(vgadev, decodes_unlocked); 677 666 } 678 667 679 668 /* change decodes counter */ 680 - if (old_decodes != new_decodes) { 681 - if (new_decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM)) 682 - vga_decode_count++; 683 - else 684 - vga_decode_count--; 685 - } 669 + if (old_decodes & VGA_RSRC_LEGACY_MASK && 670 + !(new_decodes & VGA_RSRC_LEGACY_MASK)) 671 + vga_decode_count--; 672 + if (!(old_decodes & VGA_RSRC_LEGACY_MASK) && 673 + new_decodes & VGA_RSRC_LEGACY_MASK) 674 + vga_decode_count++; 686 675 pr_debug("vgaarb: decoding count now is: %d\n", vga_decode_count); 687 676 } 688 677
+34
include/drm/i915_drm.h
··· 26 26 #ifndef _I915_DRM_H_ 27 27 #define _I915_DRM_H_ 28 28 29 + #include <drm/i915_pciids.h> 29 30 #include <uapi/drm/i915_drm.h> 30 31 31 32 /* For use by IPS driver */ ··· 35 34 extern bool i915_gpu_lower(void); 36 35 extern bool i915_gpu_busy(void); 37 36 extern bool i915_gpu_turbo_disable(void); 37 + 38 + /* 39 + * The Bridge device's PCI config space has information about the 40 + * fb aperture size and the amount of pre-reserved memory. 41 + * This is all handled in the intel-gtt.ko module. i915.ko only 42 + * cares about the vga bit for the vga rbiter. 43 + */ 44 + #define INTEL_GMCH_CTRL 0x52 45 + #define INTEL_GMCH_VGA_DISABLE (1 << 1) 46 + #define SNB_GMCH_CTRL 0x50 47 + #define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */ 48 + #define SNB_GMCH_GGMS_MASK 0x3 49 + #define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */ 50 + #define SNB_GMCH_GMS_MASK 0x1f 51 + 52 + #define I830_GMCH_CTRL 0x52 53 + 54 + #define I855_GMCH_GMS_MASK 0xF0 55 + #define I855_GMCH_GMS_STOLEN_0M 0x0 56 + #define I855_GMCH_GMS_STOLEN_1M (0x1 << 4) 57 + #define I855_GMCH_GMS_STOLEN_4M (0x2 << 4) 58 + #define I855_GMCH_GMS_STOLEN_8M (0x3 << 4) 59 + #define I855_GMCH_GMS_STOLEN_16M (0x4 << 4) 60 + #define I855_GMCH_GMS_STOLEN_32M (0x5 << 4) 61 + #define I915_GMCH_GMS_STOLEN_48M (0x6 << 4) 62 + #define I915_GMCH_GMS_STOLEN_64M (0x7 << 4) 63 + #define G33_GMCH_GMS_STOLEN_128M (0x8 << 4) 64 + #define G33_GMCH_GMS_STOLEN_256M (0x9 << 4) 65 + #define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4) 66 + #define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4) 67 + #define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4) 68 + #define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) 69 + 38 70 #endif /* _I915_DRM_H_ */
+211
include/drm/i915_pciids.h
··· 1 + /* 2 + * Copyright 2013 Intel Corporation 3 + * All Rights Reserved. 4 + * 5 + * Permission is hereby granted, free of charge, to any person obtaining a 6 + * copy of this software and associated documentation files (the 7 + * "Software"), to deal in the Software without restriction, including 8 + * without limitation the rights to use, copy, modify, merge, publish, 9 + * distribute, sub license, and/or sell copies of the Software, and to 10 + * permit persons to whom the Software is furnished to do so, subject to 11 + * the following conditions: 12 + * 13 + * The above copyright notice and this permission notice (including the 14 + * next paragraph) shall be included in all copies or substantial portions 15 + * of the Software. 16 + * 17 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 22 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 23 + * DEALINGS IN THE SOFTWARE. 24 + */ 25 + #ifndef _I915_PCIIDS_H 26 + #define _I915_PCIIDS_H 27 + 28 + /* 29 + * A pci_device_id struct { 30 + * __u32 vendor, device; 31 + * __u32 subvendor, subdevice; 32 + * __u32 class, class_mask; 33 + * kernel_ulong_t driver_data; 34 + * }; 35 + * Don't use C99 here because "class" is reserved and we want to 36 + * give userspace flexibility. 37 + */ 38 + #define INTEL_VGA_DEVICE(id, info) { \ 39 + 0x8086, id, \ 40 + ~0, ~0, \ 41 + 0x030000, 0xff0000, \ 42 + (unsigned long) info } 43 + 44 + #define INTEL_QUANTA_VGA_DEVICE(info) { \ 45 + 0x8086, 0x16a, \ 46 + 0x152d, 0x8990, \ 47 + 0x030000, 0xff0000, \ 48 + (unsigned long) info } 49 + 50 + #define INTEL_I830_IDS(info) \ 51 + INTEL_VGA_DEVICE(0x3577, info) 52 + 53 + #define INTEL_I845G_IDS(info) \ 54 + INTEL_VGA_DEVICE(0x2562, info) 55 + 56 + #define INTEL_I85X_IDS(info) \ 57 + INTEL_VGA_DEVICE(0x3582, info), /* I855_GM */ \ 58 + INTEL_VGA_DEVICE(0x358e, info) 59 + 60 + #define INTEL_I865G_IDS(info) \ 61 + INTEL_VGA_DEVICE(0x2572, info) /* I865_G */ 62 + 63 + #define INTEL_I915G_IDS(info) \ 64 + INTEL_VGA_DEVICE(0x2582, info), /* I915_G */ \ 65 + INTEL_VGA_DEVICE(0x258a, info) /* E7221_G */ 66 + 67 + #define INTEL_I915GM_IDS(info) \ 68 + INTEL_VGA_DEVICE(0x2592, info) /* I915_GM */ 69 + 70 + #define INTEL_I945G_IDS(info) \ 71 + INTEL_VGA_DEVICE(0x2772, info) /* I945_G */ 72 + 73 + #define INTEL_I945GM_IDS(info) \ 74 + INTEL_VGA_DEVICE(0x27a2, info), /* I945_GM */ \ 75 + INTEL_VGA_DEVICE(0x27ae, info) /* I945_GME */ 76 + 77 + #define INTEL_I965G_IDS(info) \ 78 + INTEL_VGA_DEVICE(0x2972, info), /* I946_GZ */ \ 79 + INTEL_VGA_DEVICE(0x2982, info), /* G35_G */ \ 80 + INTEL_VGA_DEVICE(0x2992, info), /* I965_Q */ \ 81 + INTEL_VGA_DEVICE(0x29a2, info) /* I965_G */ 82 + 83 + #define INTEL_G33_IDS(info) \ 84 + INTEL_VGA_DEVICE(0x29b2, info), /* Q35_G */ \ 85 + INTEL_VGA_DEVICE(0x29c2, info), /* G33_G */ \ 86 + INTEL_VGA_DEVICE(0x29d2, info) /* Q33_G */ 87 + 88 + #define INTEL_I965GM_IDS(info) \ 89 + INTEL_VGA_DEVICE(0x2a02, info), /* I965_GM */ \ 90 + INTEL_VGA_DEVICE(0x2a12, info) /* I965_GME */ 91 + 92 + #define INTEL_GM45_IDS(info) \ 93 + INTEL_VGA_DEVICE(0x2a42, info) /* GM45_G */ 94 + 95 + #define INTEL_G45_IDS(info) \ 96 + INTEL_VGA_DEVICE(0x2e02, info), /* IGD_E_G */ \ 97 + INTEL_VGA_DEVICE(0x2e12, info), /* Q45_G */ \ 98 + INTEL_VGA_DEVICE(0x2e22, info), /* G45_G */ \ 99 + INTEL_VGA_DEVICE(0x2e32, info), /* G41_G */ \ 100 + INTEL_VGA_DEVICE(0x2e42, info), /* B43_G */ \ 101 + INTEL_VGA_DEVICE(0x2e92, info) /* B43_G.1 */ 102 + 103 + #define INTEL_PINEVIEW_IDS(info) \ 104 + INTEL_VGA_DEVICE(0xa001, info), \ 105 + INTEL_VGA_DEVICE(0xa011, info) 106 + 107 + #define INTEL_IRONLAKE_D_IDS(info) \ 108 + INTEL_VGA_DEVICE(0x0042, info) 109 + 110 + #define INTEL_IRONLAKE_M_IDS(info) \ 111 + INTEL_VGA_DEVICE(0x0046, info) 112 + 113 + #define INTEL_SNB_D_IDS(info) \ 114 + INTEL_VGA_DEVICE(0x0102, info), \ 115 + INTEL_VGA_DEVICE(0x0112, info), \ 116 + INTEL_VGA_DEVICE(0x0122, info), \ 117 + INTEL_VGA_DEVICE(0x010A, info) 118 + 119 + #define INTEL_SNB_M_IDS(info) \ 120 + INTEL_VGA_DEVICE(0x0106, info), \ 121 + INTEL_VGA_DEVICE(0x0116, info), \ 122 + INTEL_VGA_DEVICE(0x0126, info) 123 + 124 + #define INTEL_IVB_M_IDS(info) \ 125 + INTEL_VGA_DEVICE(0x0156, info), /* GT1 mobile */ \ 126 + INTEL_VGA_DEVICE(0x0166, info) /* GT2 mobile */ 127 + 128 + #define INTEL_IVB_D_IDS(info) \ 129 + INTEL_VGA_DEVICE(0x0152, info), /* GT1 desktop */ \ 130 + INTEL_VGA_DEVICE(0x0162, info), /* GT2 desktop */ \ 131 + INTEL_VGA_DEVICE(0x015a, info), /* GT1 server */ \ 132 + INTEL_VGA_DEVICE(0x016a, info) /* GT2 server */ 133 + 134 + #define INTEL_IVB_Q_IDS(info) \ 135 + INTEL_QUANTA_VGA_DEVICE(info) /* Quanta transcode */ 136 + 137 + #define INTEL_HSW_D_IDS(info) \ 138 + INTEL_VGA_DEVICE(0x0402, info), /* GT1 desktop */ \ 139 + INTEL_VGA_DEVICE(0x0412, info), /* GT2 desktop */ \ 140 + INTEL_VGA_DEVICE(0x0422, info), /* GT3 desktop */ \ 141 + INTEL_VGA_DEVICE(0x040a, info), /* GT1 server */ \ 142 + INTEL_VGA_DEVICE(0x041a, info), /* GT2 server */ \ 143 + INTEL_VGA_DEVICE(0x042a, info), /* GT3 server */ \ 144 + INTEL_VGA_DEVICE(0x040B, info), /* GT1 reserved */ \ 145 + INTEL_VGA_DEVICE(0x041B, info), /* GT2 reserved */ \ 146 + INTEL_VGA_DEVICE(0x042B, info), /* GT3 reserved */ \ 147 + INTEL_VGA_DEVICE(0x040E, info), /* GT1 reserved */ \ 148 + INTEL_VGA_DEVICE(0x041E, info), /* GT2 reserved */ \ 149 + INTEL_VGA_DEVICE(0x042E, info), /* GT3 reserved */ \ 150 + INTEL_VGA_DEVICE(0x0C02, info), /* SDV GT1 desktop */ \ 151 + INTEL_VGA_DEVICE(0x0C12, info), /* SDV GT2 desktop */ \ 152 + INTEL_VGA_DEVICE(0x0C22, info), /* SDV GT3 desktop */ \ 153 + INTEL_VGA_DEVICE(0x0C0A, info), /* SDV GT1 server */ \ 154 + INTEL_VGA_DEVICE(0x0C1A, info), /* SDV GT2 server */ \ 155 + INTEL_VGA_DEVICE(0x0C2A, info), /* SDV GT3 server */ \ 156 + INTEL_VGA_DEVICE(0x0C0B, info), /* SDV GT1 reserved */ \ 157 + INTEL_VGA_DEVICE(0x0C1B, info), /* SDV GT2 reserved */ \ 158 + INTEL_VGA_DEVICE(0x0C2B, info), /* SDV GT3 reserved */ \ 159 + INTEL_VGA_DEVICE(0x0C0E, info), /* SDV GT1 reserved */ \ 160 + INTEL_VGA_DEVICE(0x0C1E, info), /* SDV GT2 reserved */ \ 161 + INTEL_VGA_DEVICE(0x0C2E, info), /* SDV GT3 reserved */ \ 162 + INTEL_VGA_DEVICE(0x0A02, info), /* ULT GT1 desktop */ \ 163 + INTEL_VGA_DEVICE(0x0A12, info), /* ULT GT2 desktop */ \ 164 + INTEL_VGA_DEVICE(0x0A22, info), /* ULT GT3 desktop */ \ 165 + INTEL_VGA_DEVICE(0x0A0A, info), /* ULT GT1 server */ \ 166 + INTEL_VGA_DEVICE(0x0A1A, info), /* ULT GT2 server */ \ 167 + INTEL_VGA_DEVICE(0x0A2A, info), /* ULT GT3 server */ \ 168 + INTEL_VGA_DEVICE(0x0A0B, info), /* ULT GT1 reserved */ \ 169 + INTEL_VGA_DEVICE(0x0A1B, info), /* ULT GT2 reserved */ \ 170 + INTEL_VGA_DEVICE(0x0A2B, info), /* ULT GT3 reserved */ \ 171 + INTEL_VGA_DEVICE(0x0D02, info), /* CRW GT1 desktop */ \ 172 + INTEL_VGA_DEVICE(0x0D12, info), /* CRW GT2 desktop */ \ 173 + INTEL_VGA_DEVICE(0x0D22, info), /* CRW GT3 desktop */ \ 174 + INTEL_VGA_DEVICE(0x0D0A, info), /* CRW GT1 server */ \ 175 + INTEL_VGA_DEVICE(0x0D1A, info), /* CRW GT2 server */ \ 176 + INTEL_VGA_DEVICE(0x0D2A, info), /* CRW GT3 server */ \ 177 + INTEL_VGA_DEVICE(0x0D0B, info), /* CRW GT1 reserved */ \ 178 + INTEL_VGA_DEVICE(0x0D1B, info), /* CRW GT2 reserved */ \ 179 + INTEL_VGA_DEVICE(0x0D2B, info), /* CRW GT3 reserved */ \ 180 + INTEL_VGA_DEVICE(0x0D0E, info), /* CRW GT1 reserved */ \ 181 + INTEL_VGA_DEVICE(0x0D1E, info), /* CRW GT2 reserved */ \ 182 + INTEL_VGA_DEVICE(0x0D2E, info) /* CRW GT3 reserved */ \ 183 + 184 + #define INTEL_HSW_M_IDS(info) \ 185 + INTEL_VGA_DEVICE(0x0406, info), /* GT1 mobile */ \ 186 + INTEL_VGA_DEVICE(0x0416, info), /* GT2 mobile */ \ 187 + INTEL_VGA_DEVICE(0x0426, info), /* GT2 mobile */ \ 188 + INTEL_VGA_DEVICE(0x0C06, info), /* SDV GT1 mobile */ \ 189 + INTEL_VGA_DEVICE(0x0C16, info), /* SDV GT2 mobile */ \ 190 + INTEL_VGA_DEVICE(0x0C26, info), /* SDV GT3 mobile */ \ 191 + INTEL_VGA_DEVICE(0x0A06, info), /* ULT GT1 mobile */ \ 192 + INTEL_VGA_DEVICE(0x0A16, info), /* ULT GT2 mobile */ \ 193 + INTEL_VGA_DEVICE(0x0A26, info), /* ULT GT3 mobile */ \ 194 + INTEL_VGA_DEVICE(0x0A0E, info), /* ULT GT1 reserved */ \ 195 + INTEL_VGA_DEVICE(0x0A1E, info), /* ULT GT2 reserved */ \ 196 + INTEL_VGA_DEVICE(0x0A2E, info), /* ULT GT3 reserved */ \ 197 + INTEL_VGA_DEVICE(0x0D06, info), /* CRW GT1 mobile */ \ 198 + INTEL_VGA_DEVICE(0x0D16, info), /* CRW GT2 mobile */ \ 199 + INTEL_VGA_DEVICE(0x0D26, info) /* CRW GT3 mobile */ 200 + 201 + #define INTEL_VLV_M_IDS(info) \ 202 + INTEL_VGA_DEVICE(0x0f30, info), \ 203 + INTEL_VGA_DEVICE(0x0f31, info), \ 204 + INTEL_VGA_DEVICE(0x0f32, info), \ 205 + INTEL_VGA_DEVICE(0x0f33, info), \ 206 + INTEL_VGA_DEVICE(0x0157, info) 207 + 208 + #define INTEL_VLV_D_IDS(info) \ 209 + INTEL_VGA_DEVICE(0x0155, info) 210 + 211 + #endif /* _I915_PCIIDS_H */
+7
include/linux/vgaarb.h
··· 65 65 * out of the arbitration process (and can be safe to take 66 66 * interrupts at any time. 67 67 */ 68 + #if defined(CONFIG_VGA_ARB) 68 69 extern void vga_set_legacy_decoding(struct pci_dev *pdev, 69 70 unsigned int decodes); 71 + #else 72 + static inline void vga_set_legacy_decoding(struct pci_dev *pdev, 73 + unsigned int decodes) 74 + { 75 + } 76 + #endif 70 77 71 78 /** 72 79 * vga_get - acquire & locks VGA resources