Merge branch 'drm-intel-next' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel

* 'drm-intel-next' of git://git.kernel.org/pub/scm/linux/kernel/git/anholt/drm-intel: (23 commits)
drm/i915: remove full registers dump debug
drm/i915: Add DP dpll limit on ironlake and use existing DPLL search function
drm/i915: Select the correct BPC for LVDS on Ironlake
drm/i915: Make the BPC in FDI rx/transcoder be consistent with that in pipeconf on Ironlake
drm/i915: Enable/disable the dithering for LVDS based on VBT setting
drm/i915: Permit pinning whilst the device is 'suspended'
drm/i915: Hold struct mutex whilst pinning power context bo.
drm/i915: fix unused var
drm/i915: Storage class should be before const qualifier
drm/i915: remove render reclock support
drm/i915: Fix RC6 suspend/resume
drm/i915: execbuf2 support
drm/i915: Reload hangcheck timer too for Ironlake
drm/i915: only enable hotplug for detected outputs
drm/i915: Track whether cursor needs physical address in intel_device_info
drm/i915: Implement IS_* macros using static tables
drm/i915: Move PCI IDs into i915 driver
drm/i915: Update LVDS connector status when receiving ACPI LID event
drm/i915: Add MALATA PC-81005 to ACPI LID quirk list
drm/i915: implement new pm ops for i915
...

+737 -538
+1 -30
drivers/gpu/drm/i915/i915_debugfs.c
··· 272 mem = kmap_atomic(pages[page], KM_USER0); 273 for (i = 0; i < PAGE_SIZE; i += 4) 274 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); 275 - kunmap_atomic(pages[page], KM_USER0); 276 } 277 } 278 ··· 386 return 0; 387 } 388 389 - static int i915_registers_info(struct seq_file *m, void *data) { 390 - struct drm_info_node *node = (struct drm_info_node *) m->private; 391 - struct drm_device *dev = node->minor->dev; 392 - drm_i915_private_t *dev_priv = dev->dev_private; 393 - uint32_t reg; 394 - 395 - #define DUMP_RANGE(start, end) \ 396 - for (reg=start; reg < end; reg += 4) \ 397 - seq_printf(m, "%08x\t%08x\n", reg, I915_READ(reg)); 398 - 399 - DUMP_RANGE(0x00000, 0x00fff); /* VGA registers */ 400 - DUMP_RANGE(0x02000, 0x02fff); /* instruction, memory, interrupt control registers */ 401 - DUMP_RANGE(0x03000, 0x031ff); /* FENCE and PPGTT control registers */ 402 - DUMP_RANGE(0x03200, 0x03fff); /* frame buffer compression registers */ 403 - DUMP_RANGE(0x05000, 0x05fff); /* I/O control registers */ 404 - DUMP_RANGE(0x06000, 0x06fff); /* clock control registers */ 405 - DUMP_RANGE(0x07000, 0x07fff); /* 3D internal debug registers */ 406 - DUMP_RANGE(0x07400, 0x088ff); /* GPE debug registers */ 407 - DUMP_RANGE(0x0a000, 0x0afff); /* display palette registers */ 408 - DUMP_RANGE(0x10000, 0x13fff); /* MMIO MCHBAR */ 409 - DUMP_RANGE(0x30000, 0x3ffff); /* overlay registers */ 410 - DUMP_RANGE(0x60000, 0x6ffff); /* display engine pipeline registers */ 411 - DUMP_RANGE(0x70000, 0x72fff); /* display and cursor registers */ 412 - DUMP_RANGE(0x73000, 0x73fff); /* performance counters */ 413 - 414 - return 0; 415 - } 416 - 417 static int 418 i915_wedged_open(struct inode *inode, 419 struct file *filp) ··· 491 } 492 493 static struct drm_info_list i915_debugfs_list[] = { 494 - {"i915_regs", i915_registers_info, 0}, 495 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 496 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, 497 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
··· 272 mem = kmap_atomic(pages[page], KM_USER0); 273 for (i = 0; i < PAGE_SIZE; i += 4) 274 seq_printf(m, "%08x : %08x\n", i, mem[i / 4]); 275 + kunmap_atomic(mem, KM_USER0); 276 } 277 } 278 ··· 386 return 0; 387 } 388 389 static int 390 i915_wedged_open(struct inode *inode, 391 struct file *filp) ··· 519 } 520 521 static struct drm_info_list i915_debugfs_list[] = { 522 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, 523 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, 524 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
+13 -13
drivers/gpu/drm/i915/i915_dma.c
··· 813 case I915_PARAM_HAS_PAGEFLIPPING: 814 value = 1; 815 break; 816 default: 817 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 818 - param->param); 819 return -EINVAL; 820 } 821 ··· 1121 { 1122 struct drm_i915_private *dev_priv = dev->dev_private; 1123 struct drm_mm_node *compressed_fb, *compressed_llb; 1124 - unsigned long cfb_base, ll_base; 1125 1126 /* Leave 1M for line length buffer & misc. */ 1127 compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); ··· 1205 dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) & 1206 0xff000000; 1207 1208 - if (IS_MOBILE(dev) || IS_I9XX(dev)) 1209 - dev_priv->cursor_needs_physical = true; 1210 - else 1211 - dev_priv->cursor_needs_physical = false; 1212 - 1213 - if (IS_I965G(dev) || IS_G33(dev)) 1214 - dev_priv->cursor_needs_physical = false; 1215 - 1216 /* Basic memrange allocator for stolen space (aka vram) */ 1217 drm_mm_init(&dev_priv->vram, 0, prealloc_size); 1218 DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024)); ··· 1254 if (ret) 1255 goto destroy_ringbuffer; 1256 1257 ret = drm_irq_install(dev); 1258 if (ret) 1259 goto destroy_ringbuffer; ··· 1269 */ 1270 1271 I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); 1272 - 1273 - intel_modeset_init(dev); 1274 1275 drm_helper_initial_config(dev); 1276 ··· 1357 { 1358 struct drm_i915_private *dev_priv = dev->dev_private; 1359 resource_size_t base, size; 1360 - int ret = 0, mmio_bar = IS_I9XX(dev) ? 0 : 1; 1361 uint32_t agp_size, prealloc_size, prealloc_start; 1362 1363 /* i915 has 4 more counters */ ··· 1373 1374 dev->dev_private = (void *)dev_priv; 1375 dev_priv->dev = dev; 1376 1377 /* Add register map (needed for suspend/resume) */ 1378 base = drm_get_resource_start(dev, mmio_bar); 1379 size = drm_get_resource_len(dev, mmio_bar); 1380 ··· 1651 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1652 DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1653 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), 1654 DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 1655 DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 1656 DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
··· 813 case I915_PARAM_HAS_PAGEFLIPPING: 814 value = 1; 815 break; 816 + case I915_PARAM_HAS_EXECBUF2: 817 + /* depends on GEM */ 818 + value = dev_priv->has_gem; 819 + break; 820 default: 821 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 822 + param->param); 823 return -EINVAL; 824 } 825 ··· 1117 { 1118 struct drm_i915_private *dev_priv = dev->dev_private; 1119 struct drm_mm_node *compressed_fb, *compressed_llb; 1120 + unsigned long cfb_base; 1121 + unsigned long ll_base = 0; 1122 1123 /* Leave 1M for line length buffer & misc. */ 1124 compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0); ··· 1200 dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) & 1201 0xff000000; 1202 1203 /* Basic memrange allocator for stolen space (aka vram) */ 1204 drm_mm_init(&dev_priv->vram, 0, prealloc_size); 1205 DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024)); ··· 1257 if (ret) 1258 goto destroy_ringbuffer; 1259 1260 + intel_modeset_init(dev); 1261 + 1262 ret = drm_irq_install(dev); 1263 if (ret) 1264 goto destroy_ringbuffer; ··· 1270 */ 1271 1272 I915_WRITE(INSTPM, (1 << 5) | (1 << 21)); 1273 1274 drm_helper_initial_config(dev); 1275 ··· 1360 { 1361 struct drm_i915_private *dev_priv = dev->dev_private; 1362 resource_size_t base, size; 1363 + int ret = 0, mmio_bar; 1364 uint32_t agp_size, prealloc_size, prealloc_start; 1365 1366 /* i915 has 4 more counters */ ··· 1376 1377 dev->dev_private = (void *)dev_priv; 1378 dev_priv->dev = dev; 1379 + dev_priv->info = (struct intel_device_info *) flags; 1380 1381 /* Add register map (needed for suspend/resume) */ 1382 + mmio_bar = IS_I9XX(dev) ? 0 : 1; 1383 base = drm_get_resource_start(dev, mmio_bar); 1384 size = drm_get_resource_len(dev, mmio_bar); 1385 ··· 1652 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1653 DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 1654 DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), 1655 + DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER2, i915_gem_execbuffer2, DRM_AUTH), 1656 DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 1657 DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 1658 DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
+165 -9
drivers/gpu/drm/i915/i915_drv.c
··· 33 #include "i915_drm.h" 34 #include "i915_drv.h" 35 36 - #include "drm_pciids.h" 37 #include <linux/console.h> 38 #include "drm_crtc_helper.h" 39 ··· 47 48 static struct drm_driver driver; 49 50 - static struct pci_device_id pciidlist[] = { 51 - i915_PCI_IDS 52 }; 53 54 #if defined(CONFIG_DRM_I915_KMS) ··· 399 return i915_resume(dev); 400 } 401 402 static struct vm_operations_struct i915_gem_vm_ops = { 403 .fault = i915_gem_fault, 404 .open = drm_gem_vm_open, ··· 464 .lastclose = i915_driver_lastclose, 465 .preclose = i915_driver_preclose, 466 .postclose = i915_driver_postclose, 467 - .suspend = i915_suspend, 468 - .resume = i915_resume, 469 .device_is_agp = i915_driver_device_is_agp, 470 .enable_vblank = i915_enable_vblank, 471 .disable_vblank = i915_disable_vblank, ··· 503 .id_table = pciidlist, 504 .probe = i915_pci_probe, 505 .remove = i915_pci_remove, 506 - #ifdef CONFIG_PM 507 - .resume = i915_pci_resume, 508 - .suspend = i915_pci_suspend, 509 - #endif 510 }, 511 512 .name = DRIVER_NAME,
··· 33 #include "i915_drm.h" 34 #include "i915_drv.h" 35 36 #include <linux/console.h> 37 #include "drm_crtc_helper.h" 38 ··· 48 49 static struct drm_driver driver; 50 51 + #define INTEL_VGA_DEVICE(id, info) { \ 52 + .class = PCI_CLASS_DISPLAY_VGA << 8, \ 53 + .class_mask = 0xffff00, \ 54 + .vendor = 0x8086, \ 55 + .device = id, \ 56 + .subvendor = PCI_ANY_ID, \ 57 + .subdevice = PCI_ANY_ID, \ 58 + .driver_data = (unsigned long) info } 59 + 60 + const static struct intel_device_info intel_i830_info = { 61 + .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, 62 + }; 63 + 64 + const static struct intel_device_info intel_845g_info = { 65 + .is_i8xx = 1, 66 + }; 67 + 68 + const static struct intel_device_info intel_i85x_info = { 69 + .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1, 70 + }; 71 + 72 + const static struct intel_device_info intel_i865g_info = { 73 + .is_i8xx = 1, 74 + }; 75 + 76 + const static struct intel_device_info intel_i915g_info = { 77 + .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1, 78 + }; 79 + const static struct intel_device_info intel_i915gm_info = { 80 + .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1, 81 + .cursor_needs_physical = 1, 82 + }; 83 + const static struct intel_device_info intel_i945g_info = { 84 + .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1, 85 + }; 86 + const static struct intel_device_info intel_i945gm_info = { 87 + .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1, .has_fbc = 1, 88 + .has_hotplug = 1, .cursor_needs_physical = 1, 89 + }; 90 + 91 + const static struct intel_device_info intel_i965g_info = { 92 + .is_i965g = 1, .is_i9xx = 1, .has_hotplug = 1, 93 + }; 94 + 95 + const static struct intel_device_info intel_i965gm_info = { 96 + .is_i965g = 1, .is_mobile = 1, .is_i965gm = 1, .is_i9xx = 1, 97 + .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, 98 + .has_hotplug = 1, 99 + }; 100 + 101 + const static struct intel_device_info intel_g33_info = { 102 + .is_g33 = 1, .is_i9xx = 1, .need_gfx_hws = 1, 103 + .has_hotplug = 1, 104 + }; 105 + 106 + const static struct intel_device_info intel_g45_info = { 107 + .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1, 108 + .has_pipe_cxsr = 1, 109 + .has_hotplug = 1, 110 + }; 111 + 112 + const static struct intel_device_info intel_gm45_info = { 113 + .is_i965g = 1, .is_mobile = 1, .is_g4x = 1, .is_i9xx = 1, 114 + .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, 115 + .has_pipe_cxsr = 1, 116 + .has_hotplug = 1, 117 + }; 118 + 119 + const static struct intel_device_info intel_pineview_info = { 120 + .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1, 121 + .has_pipe_cxsr = 1, 122 + .has_hotplug = 1, 123 + }; 124 + 125 + const static struct intel_device_info intel_ironlake_d_info = { 126 + .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1, .need_gfx_hws = 1, 127 + .has_pipe_cxsr = 1, 128 + .has_hotplug = 1, 129 + }; 130 + 131 + const static struct intel_device_info intel_ironlake_m_info = { 132 + .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1, 133 + .need_gfx_hws = 1, .has_rc6 = 1, 134 + .has_hotplug = 1, 135 + }; 136 + 137 + const static struct pci_device_id pciidlist[] = { 138 + INTEL_VGA_DEVICE(0x3577, &intel_i830_info), 139 + INTEL_VGA_DEVICE(0x2562, &intel_845g_info), 140 + INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), 141 + INTEL_VGA_DEVICE(0x35e8, &intel_i85x_info), 142 + INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), 143 + INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), 144 + INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), 145 + INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), 146 + INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), 147 + INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), 148 + INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), 149 + INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), 150 + INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), 151 + INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), 152 + INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), 153 + INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), 154 + INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), 155 + INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), 156 + INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), 157 + INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), 158 + INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), 159 + INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), 160 + INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), 161 + INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), 162 + INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), 163 + INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), 164 + INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), 165 + INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), 166 + INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), 167 + INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), 168 + {0, 0, 0} 169 }; 170 171 #if defined(CONFIG_DRM_I915_KMS) ··· 284 return i915_resume(dev); 285 } 286 287 + static int 288 + i915_pm_suspend(struct device *dev) 289 + { 290 + return i915_pci_suspend(to_pci_dev(dev), PMSG_SUSPEND); 291 + } 292 + 293 + static int 294 + i915_pm_resume(struct device *dev) 295 + { 296 + return i915_pci_resume(to_pci_dev(dev)); 297 + } 298 + 299 + static int 300 + i915_pm_freeze(struct device *dev) 301 + { 302 + return i915_pci_suspend(to_pci_dev(dev), PMSG_FREEZE); 303 + } 304 + 305 + static int 306 + i915_pm_thaw(struct device *dev) 307 + { 308 + /* thaw during hibernate, do nothing! */ 309 + return 0; 310 + } 311 + 312 + static int 313 + i915_pm_poweroff(struct device *dev) 314 + { 315 + return i915_pci_suspend(to_pci_dev(dev), PMSG_HIBERNATE); 316 + } 317 + 318 + static int 319 + i915_pm_restore(struct device *dev) 320 + { 321 + return i915_pci_resume(to_pci_dev(dev)); 322 + } 323 + 324 + const struct dev_pm_ops i915_pm_ops = { 325 + .suspend = i915_pm_suspend, 326 + .resume = i915_pm_resume, 327 + .freeze = i915_pm_freeze, 328 + .thaw = i915_pm_thaw, 329 + .poweroff = i915_pm_poweroff, 330 + .restore = i915_pm_restore, 331 + }; 332 + 333 static struct vm_operations_struct i915_gem_vm_ops = { 334 .fault = i915_gem_fault, 335 .open = drm_gem_vm_open, ··· 303 .lastclose = i915_driver_lastclose, 304 .preclose = i915_driver_preclose, 305 .postclose = i915_driver_postclose, 306 .device_is_agp = i915_driver_device_is_agp, 307 .enable_vblank = i915_enable_vblank, 308 .disable_vblank = i915_disable_vblank, ··· 344 .id_table = pciidlist, 345 .probe = i915_pci_probe, 346 .remove = i915_pci_remove, 347 + .driver.pm = &i915_pm_ops, 348 }, 349 350 .name = DRIVER_NAME,
+54 -67
drivers/gpu/drm/i915/i915_drv.h
··· 172 173 struct intel_overlay; 174 175 typedef struct drm_i915_private { 176 struct drm_device *dev; 177 178 int has_gem; 179 ··· 254 int hangcheck_count; 255 uint32_t last_acthd; 256 257 - bool cursor_needs_physical; 258 - 259 struct drm_mm vram; 260 261 unsigned long cfb_size; ··· 307 u32 saveDSPACNTR; 308 u32 saveDSPBCNTR; 309 u32 saveDSPARB; 310 - u32 saveRENDERSTANDBY; 311 - u32 savePWRCTXA; 312 u32 saveHWS; 313 u32 savePIPEACONF; 314 u32 savePIPEBCONF; ··· 579 u16 orig_clock; 580 int child_dev_num; 581 struct child_device_config *child_dev; 582 } drm_i915_private_t; 583 584 /** driver private structure attached to each drm_gem_object */ ··· 813 struct drm_file *file_priv); 814 int i915_gem_execbuffer(struct drm_device *dev, void *data, 815 struct drm_file *file_priv); 816 int i915_gem_pin_ioctl(struct drm_device *dev, void *data, 817 struct drm_file *file_priv); 818 int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, ··· 881 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 882 void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); 883 void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); 884 885 /* i915_gem_debug.c */ 886 void i915_gem_dump_object(struct drm_gem_object *obj, int len, ··· 1006 extern int i915_wrap_ring(struct drm_device * dev); 1007 extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); 1008 1009 - #define IS_I830(dev) ((dev)->pci_device == 0x3577) 1010 - #define IS_845G(dev) ((dev)->pci_device == 0x2562) 1011 - #define IS_I85X(dev) ((dev)->pci_device == 0x3582) 1012 - #define IS_I865G(dev) ((dev)->pci_device == 0x2572) 1013 - #define IS_I8XX(dev) (IS_I830(dev) || IS_845G(dev) || IS_I85X(dev) || IS_I865G(dev)) 1014 1015 - #define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a) 1016 - #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) 1017 - #define IS_I945G(dev) ((dev)->pci_device == 0x2772) 1018 - #define IS_I945GM(dev) ((dev)->pci_device == 0x27A2 ||\ 1019 - (dev)->pci_device == 0x27AE) 1020 - #define IS_I965G(dev) ((dev)->pci_device == 0x2972 || \ 1021 - (dev)->pci_device == 0x2982 || \ 1022 - (dev)->pci_device == 0x2992 || \ 1023 - (dev)->pci_device == 0x29A2 || \ 1024 - (dev)->pci_device == 0x2A02 || \ 1025 - (dev)->pci_device == 0x2A12 || \ 1026 - (dev)->pci_device == 0x2A42 || \ 1027 - (dev)->pci_device == 0x2E02 || \ 1028 - (dev)->pci_device == 0x2E12 || \ 1029 - (dev)->pci_device == 0x2E22 || \ 1030 - (dev)->pci_device == 0x2E32 || \ 1031 - (dev)->pci_device == 0x2E42 || \ 1032 - (dev)->pci_device == 0x0042 || \ 1033 - (dev)->pci_device == 0x0046) 1034 - 1035 - #define IS_I965GM(dev) ((dev)->pci_device == 0x2A02 || \ 1036 - (dev)->pci_device == 0x2A12) 1037 - 1038 - #define IS_GM45(dev) ((dev)->pci_device == 0x2A42) 1039 - 1040 - #define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \ 1041 - (dev)->pci_device == 0x2E12 || \ 1042 - (dev)->pci_device == 0x2E22 || \ 1043 - (dev)->pci_device == 0x2E32 || \ 1044 - (dev)->pci_device == 0x2E42 || \ 1045 - IS_GM45(dev)) 1046 - 1047 - #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) 1048 - #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) 1049 - #define IS_PINEVIEW(dev) (IS_PINEVIEW_G(dev) || IS_PINEVIEW_M(dev)) 1050 - 1051 - #define IS_G33(dev) ((dev)->pci_device == 0x29C2 || \ 1052 - (dev)->pci_device == 0x29B2 || \ 1053 - (dev)->pci_device == 0x29D2 || \ 1054 - (IS_PINEVIEW(dev))) 1055 - 1056 #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) 1057 #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) 1058 - #define IS_IRONLAKE(dev) (IS_IRONLAKE_D(dev) || IS_IRONLAKE_M(dev)) 1059 1060 - #define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ 1061 - IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \ 1062 - IS_IRONLAKE(dev)) 1063 1064 - #define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ 1065 - IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \ 1066 - IS_PINEVIEW(dev) || IS_IRONLAKE_M(dev)) 1067 - 1068 - #define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev) || \ 1069 - IS_IRONLAKE(dev)) 1070 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 1071 * rows, which changed the alignment requirements and fence programming. 1072 */ ··· 1044 #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) 1045 #define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \ 1046 !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev)) 1047 - #define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev)) 1048 /* dsparb controlled by hw only */ 1049 #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) 1050 1051 #define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev)) 1052 - #define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) 1053 - #define I915_HAS_FBC(dev) (IS_MOBILE(dev) && \ 1054 - (IS_I9XX(dev) || IS_GM45(dev)) && \ 1055 - !IS_PINEVIEW(dev) && \ 1056 - !IS_IRONLAKE(dev)) 1057 - #define I915_HAS_RC6(dev) (IS_I965GM(dev) || IS_GM45(dev) || IS_IRONLAKE_M(dev)) 1058 1059 #define PRIMARY_RINGBUFFER_SIZE (128*1024) 1060
··· 172 173 struct intel_overlay; 174 175 + struct intel_device_info { 176 + u8 is_mobile : 1; 177 + u8 is_i8xx : 1; 178 + u8 is_i915g : 1; 179 + u8 is_i9xx : 1; 180 + u8 is_i945gm : 1; 181 + u8 is_i965g : 1; 182 + u8 is_i965gm : 1; 183 + u8 is_g33 : 1; 184 + u8 need_gfx_hws : 1; 185 + u8 is_g4x : 1; 186 + u8 is_pineview : 1; 187 + u8 is_ironlake : 1; 188 + u8 has_fbc : 1; 189 + u8 has_rc6 : 1; 190 + u8 has_pipe_cxsr : 1; 191 + u8 has_hotplug : 1; 192 + u8 cursor_needs_physical : 1; 193 + }; 194 + 195 typedef struct drm_i915_private { 196 struct drm_device *dev; 197 + 198 + const struct intel_device_info *info; 199 200 int has_gem; 201 ··· 232 int hangcheck_count; 233 uint32_t last_acthd; 234 235 struct drm_mm vram; 236 237 unsigned long cfb_size; ··· 287 u32 saveDSPACNTR; 288 u32 saveDSPBCNTR; 289 u32 saveDSPARB; 290 u32 saveHWS; 291 u32 savePIPEACONF; 292 u32 savePIPEBCONF; ··· 561 u16 orig_clock; 562 int child_dev_num; 563 struct child_device_config *child_dev; 564 + struct drm_connector *int_lvds_connector; 565 } drm_i915_private_t; 566 567 /** driver private structure attached to each drm_gem_object */ ··· 794 struct drm_file *file_priv); 795 int i915_gem_execbuffer(struct drm_device *dev, void *data, 796 struct drm_file *file_priv); 797 + int i915_gem_execbuffer2(struct drm_device *dev, void *data, 798 + struct drm_file *file_priv); 799 int i915_gem_pin_ioctl(struct drm_device *dev, void *data, 800 struct drm_file *file_priv); 801 int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, ··· 860 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 861 void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); 862 void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj); 863 + bool i915_tiling_ok(struct drm_device *dev, int stride, int size, 864 + int tiling_mode); 865 + bool i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj); 866 867 /* i915_gem_debug.c */ 868 void i915_gem_dump_object(struct drm_gem_object *obj, int len, ··· 982 extern int i915_wrap_ring(struct drm_device * dev); 983 extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); 984 985 + #define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info) 986 987 + #define IS_I830(dev) ((dev)->pci_device == 0x3577) 988 + #define IS_845G(dev) ((dev)->pci_device == 0x2562) 989 + #define IS_I85X(dev) ((dev)->pci_device == 0x3582) 990 + #define IS_I865G(dev) ((dev)->pci_device == 0x2572) 991 + #define IS_I8XX(dev) (INTEL_INFO(dev)->is_i8xx) 992 + #define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g) 993 + #define IS_I915GM(dev) ((dev)->pci_device == 0x2592) 994 + #define IS_I945G(dev) ((dev)->pci_device == 0x2772) 995 + #define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm) 996 + #define IS_I965G(dev) (INTEL_INFO(dev)->is_i965g) 997 + #define IS_I965GM(dev) (INTEL_INFO(dev)->is_i965gm) 998 + #define IS_GM45(dev) ((dev)->pci_device == 0x2A42) 999 + #define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x) 1000 + #define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001) 1001 + #define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011) 1002 + #define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview) 1003 + #define IS_G33(dev) (INTEL_INFO(dev)->is_g33) 1004 #define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042) 1005 #define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046) 1006 + #define IS_IRONLAKE(dev) (INTEL_INFO(dev)->is_ironlake) 1007 + #define IS_I9XX(dev) (INTEL_INFO(dev)->is_i9xx) 1008 + #define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile) 1009 1010 + #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws) 1011 1012 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 1013 * rows, which changed the alignment requirements and fence programming. 1014 */ ··· 1054 #define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev)) 1055 #define SUPPORTS_TV(dev) (IS_I9XX(dev) && IS_MOBILE(dev) && \ 1056 !IS_IRONLAKE(dev) && !IS_PINEVIEW(dev)) 1057 + #define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug) 1058 /* dsparb controlled by hw only */ 1059 #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev)) 1060 1061 #define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev)) 1062 + #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) 1063 + #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) 1064 + #define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6) 1065 1066 #define PRIMARY_RINGBUFFER_SIZE (128*1024) 1067
+190 -59
drivers/gpu/drm/i915/i915_gem.c
··· 2021 /* blow away mappings if mapped through GTT */ 2022 i915_gem_release_mmap(obj); 2023 2024 - if (obj_priv->fence_reg != I915_FENCE_REG_NONE) 2025 - i915_gem_clear_fence_reg(obj); 2026 - 2027 /* Move the object to the CPU domain to ensure that 2028 * any possible CPU writes while it's not in the GTT 2029 * are flushed when we go to remap it. This will ··· 2035 } 2036 2037 BUG_ON(obj_priv->active); 2038 2039 if (obj_priv->agp_mem != NULL) { 2040 drm_unbind_agp(obj_priv->agp_mem); ··· 2581 struct drm_mm_node *free_space; 2582 bool retry_alloc = false; 2583 int ret; 2584 - 2585 - if (dev_priv->mm.suspended) 2586 - return -EBUSY; 2587 2588 if (obj_priv->madv != I915_MADV_WILLNEED) { 2589 DRM_ERROR("Attempting to bind a purgeable object\n"); ··· 3196 static int 3197 i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, 3198 struct drm_file *file_priv, 3199 - struct drm_i915_gem_exec_object *entry, 3200 struct drm_i915_gem_relocation_entry *relocs) 3201 { 3202 struct drm_device *dev = obj->dev; ··· 3204 struct drm_i915_gem_object *obj_priv = obj->driver_private; 3205 int i, ret; 3206 void __iomem *reloc_page; 3207 3208 /* Choose the GTT offset for our buffer and put it there. */ 3209 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); 3210 if (ret) 3211 return ret; 3212 3213 entry->offset = obj_priv->gtt_offset; 3214 ··· 3394 */ 3395 static int 3396 i915_dispatch_gem_execbuffer(struct drm_device *dev, 3397 - struct drm_i915_gem_execbuffer *exec, 3398 struct drm_clip_rect *cliprects, 3399 uint64_t exec_offset) 3400 { ··· 3484 } 3485 3486 static int 3487 - i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list, 3488 uint32_t buffer_count, 3489 struct drm_i915_gem_relocation_entry **relocs) 3490 { ··· 3499 } 3500 3501 *relocs = drm_calloc_large(reloc_count, sizeof(**relocs)); 3502 - if (*relocs == NULL) 3503 return -ENOMEM; 3504 3505 for (i = 0; i < buffer_count; i++) { 3506 struct drm_i915_gem_relocation_entry __user *user_relocs; ··· 3526 } 3527 3528 static int 3529 - i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list, 3530 uint32_t buffer_count, 3531 struct drm_i915_gem_relocation_entry *relocs) 3532 { ··· 3559 } 3560 3561 static int 3562 - i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec, 3563 uint64_t exec_offset) 3564 { 3565 uint32_t exec_start, exec_len; ··· 3612 } 3613 3614 int 3615 - i915_gem_execbuffer(struct drm_device *dev, void *data, 3616 - struct drm_file *file_priv) 3617 { 3618 drm_i915_private_t *dev_priv = dev->dev_private; 3619 - struct drm_i915_gem_execbuffer *args = data; 3620 - struct drm_i915_gem_exec_object *exec_list = NULL; 3621 struct drm_gem_object **object_list = NULL; 3622 struct drm_gem_object *batch_obj; 3623 struct drm_i915_gem_object *obj_priv; 3624 struct drm_clip_rect *cliprects = NULL; 3625 struct drm_i915_gem_relocation_entry *relocs; 3626 - int ret, ret2, i, pinned = 0; 3627 uint64_t exec_offset; 3628 uint32_t seqno, flush_domains, reloc_index; 3629 int pin_tries, flips; ··· 3637 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); 3638 return -EINVAL; 3639 } 3640 - /* Copy in the exec list from userland */ 3641 - exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); 3642 object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count); 3643 - if (exec_list == NULL || object_list == NULL) { 3644 - DRM_ERROR("Failed to allocate exec or object list " 3645 - "for %d buffers\n", 3646 args->buffer_count); 3647 ret = -ENOMEM; 3648 - goto pre_mutex_err; 3649 - } 3650 - ret = copy_from_user(exec_list, 3651 - (struct drm_i915_relocation_entry __user *) 3652 - (uintptr_t) args->buffers_ptr, 3653 - sizeof(*exec_list) * args->buffer_count); 3654 - if (ret != 0) { 3655 - DRM_ERROR("copy %d exec entries failed %d\n", 3656 - args->buffer_count, ret); 3657 goto pre_mutex_err; 3658 } 3659 ··· 3895 3896 mutex_unlock(&dev->struct_mutex); 3897 3898 - if (!ret) { 3899 - /* Copy the new buffer offsets back to the user's exec list. */ 3900 - ret = copy_to_user((struct drm_i915_relocation_entry __user *) 3901 - (uintptr_t) args->buffers_ptr, 3902 - exec_list, 3903 - sizeof(*exec_list) * args->buffer_count); 3904 - if (ret) { 3905 - ret = -EFAULT; 3906 - DRM_ERROR("failed to copy %d exec entries " 3907 - "back to user (%d)\n", 3908 - args->buffer_count, ret); 3909 - } 3910 - } 3911 - 3912 /* Copy the updated relocations out regardless of current error 3913 * state. Failure to update the relocs would mean that the next 3914 * time userland calls execbuf, it would do so with presumed offset ··· 3911 3912 pre_mutex_err: 3913 drm_free_large(object_list); 3914 - drm_free_large(exec_list); 3915 kfree(cliprects); 3916 3917 return ret; 3918 } 3919 ··· 4076 if (ret) 4077 return ret; 4078 } 4079 - /* 4080 - * Pre-965 chips need a fence register set up in order to 4081 - * properly handle tiled surfaces. 4082 - */ 4083 - if (!IS_I965G(dev) && obj_priv->tiling_mode != I915_TILING_NONE) { 4084 - ret = i915_gem_object_get_fence_reg(obj); 4085 - if (ret != 0) { 4086 - if (ret != -EBUSY && ret != -ERESTARTSYS) 4087 - DRM_ERROR("Failure to install fence: %d\n", 4088 - ret); 4089 - return ret; 4090 - } 4091 - } 4092 obj_priv->pin_count++; 4093 4094 /* If the object is not active and not pending a flush,
··· 2021 /* blow away mappings if mapped through GTT */ 2022 i915_gem_release_mmap(obj); 2023 2024 /* Move the object to the CPU domain to ensure that 2025 * any possible CPU writes while it's not in the GTT 2026 * are flushed when we go to remap it. This will ··· 2038 } 2039 2040 BUG_ON(obj_priv->active); 2041 + 2042 + /* release the fence reg _after_ flushing */ 2043 + if (obj_priv->fence_reg != I915_FENCE_REG_NONE) 2044 + i915_gem_clear_fence_reg(obj); 2045 2046 if (obj_priv->agp_mem != NULL) { 2047 drm_unbind_agp(obj_priv->agp_mem); ··· 2580 struct drm_mm_node *free_space; 2581 bool retry_alloc = false; 2582 int ret; 2583 2584 if (obj_priv->madv != I915_MADV_WILLNEED) { 2585 DRM_ERROR("Attempting to bind a purgeable object\n"); ··· 3198 static int 3199 i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, 3200 struct drm_file *file_priv, 3201 + struct drm_i915_gem_exec_object2 *entry, 3202 struct drm_i915_gem_relocation_entry *relocs) 3203 { 3204 struct drm_device *dev = obj->dev; ··· 3206 struct drm_i915_gem_object *obj_priv = obj->driver_private; 3207 int i, ret; 3208 void __iomem *reloc_page; 3209 + bool need_fence; 3210 + 3211 + need_fence = entry->flags & EXEC_OBJECT_NEEDS_FENCE && 3212 + obj_priv->tiling_mode != I915_TILING_NONE; 3213 + 3214 + /* Check fence reg constraints and rebind if necessary */ 3215 + if (need_fence && !i915_obj_fenceable(dev, obj)) 3216 + i915_gem_object_unbind(obj); 3217 3218 /* Choose the GTT offset for our buffer and put it there. */ 3219 ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); 3220 if (ret) 3221 return ret; 3222 + 3223 + /* 3224 + * Pre-965 chips need a fence register set up in order to 3225 + * properly handle blits to/from tiled surfaces. 3226 + */ 3227 + if (need_fence) { 3228 + ret = i915_gem_object_get_fence_reg(obj); 3229 + if (ret != 0) { 3230 + if (ret != -EBUSY && ret != -ERESTARTSYS) 3231 + DRM_ERROR("Failure to install fence: %d\n", 3232 + ret); 3233 + i915_gem_object_unpin(obj); 3234 + return ret; 3235 + } 3236 + } 3237 3238 entry->offset = obj_priv->gtt_offset; 3239 ··· 3373 */ 3374 static int 3375 i915_dispatch_gem_execbuffer(struct drm_device *dev, 3376 + struct drm_i915_gem_execbuffer2 *exec, 3377 struct drm_clip_rect *cliprects, 3378 uint64_t exec_offset) 3379 { ··· 3463 } 3464 3465 static int 3466 + i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object2 *exec_list, 3467 uint32_t buffer_count, 3468 struct drm_i915_gem_relocation_entry **relocs) 3469 { ··· 3478 } 3479 3480 *relocs = drm_calloc_large(reloc_count, sizeof(**relocs)); 3481 + if (*relocs == NULL) { 3482 + DRM_ERROR("failed to alloc relocs, count %d\n", reloc_count); 3483 return -ENOMEM; 3484 + } 3485 3486 for (i = 0; i < buffer_count; i++) { 3487 struct drm_i915_gem_relocation_entry __user *user_relocs; ··· 3503 } 3504 3505 static int 3506 + i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object2 *exec_list, 3507 uint32_t buffer_count, 3508 struct drm_i915_gem_relocation_entry *relocs) 3509 { ··· 3536 } 3537 3538 static int 3539 + i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec, 3540 uint64_t exec_offset) 3541 { 3542 uint32_t exec_start, exec_len; ··· 3589 } 3590 3591 int 3592 + i915_gem_do_execbuffer(struct drm_device *dev, void *data, 3593 + struct drm_file *file_priv, 3594 + struct drm_i915_gem_execbuffer2 *args, 3595 + struct drm_i915_gem_exec_object2 *exec_list) 3596 { 3597 drm_i915_private_t *dev_priv = dev->dev_private; 3598 struct drm_gem_object **object_list = NULL; 3599 struct drm_gem_object *batch_obj; 3600 struct drm_i915_gem_object *obj_priv; 3601 struct drm_clip_rect *cliprects = NULL; 3602 struct drm_i915_gem_relocation_entry *relocs; 3603 + int ret = 0, ret2, i, pinned = 0; 3604 uint64_t exec_offset; 3605 uint32_t seqno, flush_domains, reloc_index; 3606 int pin_tries, flips; ··· 3614 DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); 3615 return -EINVAL; 3616 } 3617 object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count); 3618 + if (object_list == NULL) { 3619 + DRM_ERROR("Failed to allocate object list for %d buffers\n", 3620 args->buffer_count); 3621 ret = -ENOMEM; 3622 goto pre_mutex_err; 3623 } 3624 ··· 3884 3885 mutex_unlock(&dev->struct_mutex); 3886 3887 /* Copy the updated relocations out regardless of current error 3888 * state. Failure to update the relocs would mean that the next 3889 * time userland calls execbuf, it would do so with presumed offset ··· 3914 3915 pre_mutex_err: 3916 drm_free_large(object_list); 3917 kfree(cliprects); 3918 3919 + return ret; 3920 + } 3921 + 3922 + /* 3923 + * Legacy execbuffer just creates an exec2 list from the original exec object 3924 + * list array and passes it to the real function. 3925 + */ 3926 + int 3927 + i915_gem_execbuffer(struct drm_device *dev, void *data, 3928 + struct drm_file *file_priv) 3929 + { 3930 + struct drm_i915_gem_execbuffer *args = data; 3931 + struct drm_i915_gem_execbuffer2 exec2; 3932 + struct drm_i915_gem_exec_object *exec_list = NULL; 3933 + struct drm_i915_gem_exec_object2 *exec2_list = NULL; 3934 + int ret, i; 3935 + 3936 + #if WATCH_EXEC 3937 + DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", 3938 + (int) args->buffers_ptr, args->buffer_count, args->batch_len); 3939 + #endif 3940 + 3941 + if (args->buffer_count < 1) { 3942 + DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); 3943 + return -EINVAL; 3944 + } 3945 + 3946 + /* Copy in the exec list from userland */ 3947 + exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count); 3948 + exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); 3949 + if (exec_list == NULL || exec2_list == NULL) { 3950 + DRM_ERROR("Failed to allocate exec list for %d buffers\n", 3951 + args->buffer_count); 3952 + drm_free_large(exec_list); 3953 + drm_free_large(exec2_list); 3954 + return -ENOMEM; 3955 + } 3956 + ret = copy_from_user(exec_list, 3957 + (struct drm_i915_relocation_entry __user *) 3958 + (uintptr_t) args->buffers_ptr, 3959 + sizeof(*exec_list) * args->buffer_count); 3960 + if (ret != 0) { 3961 + DRM_ERROR("copy %d exec entries failed %d\n", 3962 + args->buffer_count, ret); 3963 + drm_free_large(exec_list); 3964 + drm_free_large(exec2_list); 3965 + return -EFAULT; 3966 + } 3967 + 3968 + for (i = 0; i < args->buffer_count; i++) { 3969 + exec2_list[i].handle = exec_list[i].handle; 3970 + exec2_list[i].relocation_count = exec_list[i].relocation_count; 3971 + exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr; 3972 + exec2_list[i].alignment = exec_list[i].alignment; 3973 + exec2_list[i].offset = exec_list[i].offset; 3974 + if (!IS_I965G(dev)) 3975 + exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE; 3976 + else 3977 + exec2_list[i].flags = 0; 3978 + } 3979 + 3980 + exec2.buffers_ptr = args->buffers_ptr; 3981 + exec2.buffer_count = args->buffer_count; 3982 + exec2.batch_start_offset = args->batch_start_offset; 3983 + exec2.batch_len = args->batch_len; 3984 + exec2.DR1 = args->DR1; 3985 + exec2.DR4 = args->DR4; 3986 + exec2.num_cliprects = args->num_cliprects; 3987 + exec2.cliprects_ptr = args->cliprects_ptr; 3988 + exec2.flags = 0; 3989 + 3990 + ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list); 3991 + if (!ret) { 3992 + /* Copy the new buffer offsets back to the user's exec list. */ 3993 + for (i = 0; i < args->buffer_count; i++) 3994 + exec_list[i].offset = exec2_list[i].offset; 3995 + /* ... and back out to userspace */ 3996 + ret = copy_to_user((struct drm_i915_relocation_entry __user *) 3997 + (uintptr_t) args->buffers_ptr, 3998 + exec_list, 3999 + sizeof(*exec_list) * args->buffer_count); 4000 + if (ret) { 4001 + ret = -EFAULT; 4002 + DRM_ERROR("failed to copy %d exec entries " 4003 + "back to user (%d)\n", 4004 + args->buffer_count, ret); 4005 + } 4006 + } else { 4007 + DRM_ERROR("i915_gem_do_execbuffer returns %d\n", ret); 4008 + } 4009 + 4010 + drm_free_large(exec_list); 4011 + drm_free_large(exec2_list); 4012 + return ret; 4013 + } 4014 + 4015 + int 4016 + i915_gem_execbuffer2(struct drm_device *dev, void *data, 4017 + struct drm_file *file_priv) 4018 + { 4019 + struct drm_i915_gem_execbuffer2 *args = data; 4020 + struct drm_i915_gem_exec_object2 *exec2_list = NULL; 4021 + int ret; 4022 + 4023 + #if WATCH_EXEC 4024 + DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", 4025 + (int) args->buffers_ptr, args->buffer_count, args->batch_len); 4026 + #endif 4027 + 4028 + if (args->buffer_count < 1) { 4029 + DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count); 4030 + return -EINVAL; 4031 + } 4032 + 4033 + exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count); 4034 + if (exec2_list == NULL) { 4035 + DRM_ERROR("Failed to allocate exec list for %d buffers\n", 4036 + args->buffer_count); 4037 + return -ENOMEM; 4038 + } 4039 + ret = copy_from_user(exec2_list, 4040 + (struct drm_i915_relocation_entry __user *) 4041 + (uintptr_t) args->buffers_ptr, 4042 + sizeof(*exec2_list) * args->buffer_count); 4043 + if (ret != 0) { 4044 + DRM_ERROR("copy %d exec entries failed %d\n", 4045 + args->buffer_count, ret); 4046 + drm_free_large(exec2_list); 4047 + return -EFAULT; 4048 + } 4049 + 4050 + ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list); 4051 + if (!ret) { 4052 + /* Copy the new buffer offsets back to the user's exec list. */ 4053 + ret = copy_to_user((struct drm_i915_relocation_entry __user *) 4054 + (uintptr_t) args->buffers_ptr, 4055 + exec2_list, 4056 + sizeof(*exec2_list) * args->buffer_count); 4057 + if (ret) { 4058 + ret = -EFAULT; 4059 + DRM_ERROR("failed to copy %d exec entries " 4060 + "back to user (%d)\n", 4061 + args->buffer_count, ret); 4062 + } 4063 + } 4064 + 4065 + drm_free_large(exec2_list); 4066 return ret; 4067 } 4068 ··· 3933 if (ret) 3934 return ret; 3935 } 3936 + 3937 obj_priv->pin_count++; 3938 3939 /* If the object is not active and not pending a flush,
+23 -25
drivers/gpu/drm/i915/i915_gem_tiling.c
··· 304 305 306 /** 307 - * Returns the size of the fence for a tiled object of the given size. 308 */ 309 - static int 310 - i915_get_fence_size(struct drm_device *dev, int size) 311 { 312 - int i; 313 - int start; 314 315 if (IS_I965G(dev)) { 316 /* The 965 can have fences at any page boundary. */ 317 - return ALIGN(size, 4096); 318 } else { 319 - /* Align the size to a power of two greater than the smallest 320 - * fence size. 321 - */ 322 - if (IS_I9XX(dev)) 323 - start = 1024 * 1024; 324 - else 325 - start = 512 * 1024; 326 - 327 - for (i = start; i < size; i <<= 1) 328 - ; 329 - 330 - return i; 331 } 332 } 333 334 /* Check pitch constriants for all chips & tiling formats */ 335 - static bool 336 i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) 337 { 338 int tile_width; ··· 386 return false; 387 388 if (stride & (stride - 1)) 389 - return false; 390 - 391 - /* We don't 0handle the aperture area covered by the fence being bigger 392 - * than the object size. 393 - */ 394 - if (i915_get_fence_size(dev, size) != size) 395 return false; 396 397 return true;
··· 304 305 306 /** 307 + * Returns whether an object is currently fenceable. If not, it may need 308 + * to be unbound and have its pitch adjusted. 309 */ 310 + bool 311 + i915_obj_fenceable(struct drm_device *dev, struct drm_gem_object *obj) 312 { 313 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 314 315 if (IS_I965G(dev)) { 316 /* The 965 can have fences at any page boundary. */ 317 + if (obj->size & 4095) 318 + return false; 319 + return true; 320 + } else if (IS_I9XX(dev)) { 321 + if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK) 322 + return false; 323 } else { 324 + if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK) 325 + return false; 326 } 327 + 328 + /* Power of two sized... */ 329 + if (obj->size & (obj->size - 1)) 330 + return false; 331 + 332 + /* Objects must be size aligned as well */ 333 + if (obj_priv->gtt_offset & (obj->size - 1)) 334 + return false; 335 + return true; 336 } 337 338 /* Check pitch constriants for all chips & tiling formats */ 339 + bool 340 i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) 341 { 342 int tile_width; ··· 382 return false; 383 384 if (stride & (stride - 1)) 385 return false; 386 387 return true;
+21 -11
drivers/gpu/drm/i915/i915_irq.c
··· 313 dev_priv->mm.irq_gem_seqno = seqno; 314 trace_i915_gem_request_complete(dev, seqno); 315 DRM_WAKEUP(&dev_priv->irq_queue); 316 } 317 318 if (de_iir & DE_GSE) ··· 1086 (void) I915_READ(IER); 1087 } 1088 1089 int i915_driver_irq_postinstall(struct drm_device *dev) 1090 { 1091 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ··· 1112 if (I915_HAS_HOTPLUG(dev)) { 1113 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 1114 1115 - /* Leave other bits alone */ 1116 - hotplug_en |= HOTPLUG_EN_MASK; 1117 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 1118 1119 - dev_priv->hotplug_supported_mask = CRT_HOTPLUG_INT_STATUS | 1120 - TV_HOTPLUG_INT_STATUS | SDVOC_HOTPLUG_INT_STATUS | 1121 - SDVOB_HOTPLUG_INT_STATUS; 1122 - if (IS_G4X(dev)) { 1123 - dev_priv->hotplug_supported_mask |= 1124 - HDMIB_HOTPLUG_INT_STATUS | 1125 - HDMIC_HOTPLUG_INT_STATUS | 1126 - HDMID_HOTPLUG_INT_STATUS; 1127 - } 1128 /* Enable in IER... */ 1129 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 1130 /* and unmask in IMR */
··· 313 dev_priv->mm.irq_gem_seqno = seqno; 314 trace_i915_gem_request_complete(dev, seqno); 315 DRM_WAKEUP(&dev_priv->irq_queue); 316 + dev_priv->hangcheck_count = 0; 317 + mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD); 318 } 319 320 if (de_iir & DE_GSE) ··· 1084 (void) I915_READ(IER); 1085 } 1086 1087 + /* 1088 + * Must be called after intel_modeset_init or hotplug interrupts won't be 1089 + * enabled correctly. 1090 + */ 1091 int i915_driver_irq_postinstall(struct drm_device *dev) 1092 { 1093 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; ··· 1106 if (I915_HAS_HOTPLUG(dev)) { 1107 u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN); 1108 1109 + /* Note HDMI and DP share bits */ 1110 + if (dev_priv->hotplug_supported_mask & HDMIB_HOTPLUG_INT_STATUS) 1111 + hotplug_en |= HDMIB_HOTPLUG_INT_EN; 1112 + if (dev_priv->hotplug_supported_mask & HDMIC_HOTPLUG_INT_STATUS) 1113 + hotplug_en |= HDMIC_HOTPLUG_INT_EN; 1114 + if (dev_priv->hotplug_supported_mask & HDMID_HOTPLUG_INT_STATUS) 1115 + hotplug_en |= HDMID_HOTPLUG_INT_EN; 1116 + if (dev_priv->hotplug_supported_mask & SDVOC_HOTPLUG_INT_STATUS) 1117 + hotplug_en |= SDVOC_HOTPLUG_INT_EN; 1118 + if (dev_priv->hotplug_supported_mask & SDVOB_HOTPLUG_INT_STATUS) 1119 + hotplug_en |= SDVOB_HOTPLUG_INT_EN; 1120 + if (dev_priv->hotplug_supported_mask & CRT_HOTPLUG_INT_STATUS) 1121 + hotplug_en |= CRT_HOTPLUG_INT_EN; 1122 + /* Ignore TV since it's buggy */ 1123 + 1124 I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); 1125 1126 /* Enable in IER... */ 1127 enable_mask |= I915_DISPLAY_PORT_INTERRUPT; 1128 /* and unmask in IMR */
+4 -7
drivers/gpu/drm/i915/i915_reg.h
··· 879 #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) 880 #define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */ 881 #define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f 882 - #define HOTPLUG_EN_MASK (HDMIB_HOTPLUG_INT_EN | \ 883 - HDMIC_HOTPLUG_INT_EN | \ 884 - HDMID_HOTPLUG_INT_EN | \ 885 - SDVOB_HOTPLUG_INT_EN | \ 886 - SDVOC_HOTPLUG_INT_EN | \ 887 - CRT_HOTPLUG_INT_EN) 888 - 889 890 #define PORT_HOTPLUG_STAT 0x61114 891 #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) ··· 975 #define LVDS_PORT_EN (1 << 31) 976 /* Selects pipe B for LVDS data. Must be set on pre-965. */ 977 #define LVDS_PIPEB_SELECT (1 << 30) 978 /* Enable border for unscaled (or aspect-scaled) display */ 979 #define LVDS_BORDER_ENABLE (1 << 15) 980 /* ··· 1746 1747 /* Display & cursor control */ 1748 1749 /* Pipe A */ 1750 #define PIPEADSL 0x70000 1751 #define PIPEACONF 0x70008
··· 879 #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) 880 #define CRT_HOTPLUG_MASK (0x3fc) /* Bits 9-2 */ 881 #define CRT_FORCE_HOTPLUG_MASK 0xfffffe1f 882 883 #define PORT_HOTPLUG_STAT 0x61114 884 #define HDMIB_HOTPLUG_INT_STATUS (1 << 29) ··· 982 #define LVDS_PORT_EN (1 << 31) 983 /* Selects pipe B for LVDS data. Must be set on pre-965. */ 984 #define LVDS_PIPEB_SELECT (1 << 30) 985 + /* LVDS dithering flag on 965/g4x platform */ 986 + #define LVDS_ENABLE_DITHER (1 << 25) 987 /* Enable border for unscaled (or aspect-scaled) display */ 988 #define LVDS_BORDER_ENABLE (1 << 15) 989 /* ··· 1751 1752 /* Display & cursor control */ 1753 1754 + /* dithering flag on Ironlake */ 1755 + #define PIPE_ENABLE_DITHER (1 << 4) 1756 /* Pipe A */ 1757 #define PIPEADSL 0x70000 1758 #define PIPEACONF 0x70008
-12
drivers/gpu/drm/i915/i915_suspend.c
··· 732 733 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); 734 735 - /* Render Standby */ 736 - if (I915_HAS_RC6(dev)) { 737 - dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY); 738 - dev_priv->savePWRCTXA = I915_READ(PWRCTXA); 739 - } 740 - 741 /* Hardware status page */ 742 dev_priv->saveHWS = I915_READ(HWS_PGA); 743 ··· 786 int i; 787 788 pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); 789 - 790 - /* Render Standby */ 791 - if (I915_HAS_RC6(dev)) { 792 - I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY); 793 - I915_WRITE(PWRCTXA, dev_priv->savePWRCTXA); 794 - } 795 796 /* Hardware status page */ 797 I915_WRITE(HWS_PGA, dev_priv->saveHWS);
··· 732 733 pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); 734 735 /* Hardware status page */ 736 dev_priv->saveHWS = I915_READ(HWS_PGA); 737 ··· 792 int i; 793 794 pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); 795 796 /* Hardware status page */ 797 I915_WRITE(HWS_PGA, dev_priv->saveHWS);
+2
drivers/gpu/drm/i915/intel_crt.c
··· 548 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); 549 550 drm_sysfs_connector_add(connector); 551 }
··· 548 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); 549 550 drm_sysfs_connector_add(connector); 551 + 552 + dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; 553 }
+176 -240
drivers/gpu/drm/i915/intel_display.c
··· 262 #define IRONLAKE_P2_LVDS_FAST 7 /* double channel */ 263 #define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */ 264 265 static bool 266 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 267 int target, int refclk, intel_clock_t *best_clock); ··· 279 static bool 280 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 281 int target, int refclk, intel_clock_t *best_clock); 282 - static bool 283 - intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 284 - int target, int refclk, intel_clock_t *best_clock); 285 286 static bool 287 intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, ··· 501 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 502 .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW, 503 .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST }, 504 - .find_pll = intel_ironlake_find_best_PLL, 505 }; 506 507 static const intel_limit_t intel_limits_ironlake_lvds = { ··· 516 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 517 .p2_slow = IRONLAKE_P2_LVDS_SLOW, 518 .p2_fast = IRONLAKE_P2_LVDS_FAST }, 519 - .find_pll = intel_ironlake_find_best_PLL, 520 }; 521 522 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) ··· 547 const intel_limit_t *limit; 548 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 549 limit = &intel_limits_ironlake_lvds; 550 else 551 limit = &intel_limits_ironlake_sdvo; 552 ··· 822 found = false; 823 824 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 825 - if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == 826 LVDS_CLKB_POWER_UP) 827 clock.p2 = limit->p2.p2_fast; 828 else ··· 876 { 877 struct drm_device *dev = crtc->dev; 878 intel_clock_t clock; 879 if (target < 200000) { 880 clock.n = 1; 881 clock.p1 = 2; ··· 896 } 897 intel_clock(dev, refclk, &clock); 898 memcpy(best_clock, &clock, sizeof(intel_clock_t)); 899 - return true; 900 - } 901 - 902 - static bool 903 - intel_ironlake_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 904 - int target, int refclk, intel_clock_t *best_clock) 905 - { 906 - struct drm_device *dev = crtc->dev; 907 - struct drm_i915_private *dev_priv = dev->dev_private; 908 - intel_clock_t clock; 909 - int err_most = 47; 910 - int err_min = 10000; 911 - 912 - /* eDP has only 2 clock choice, no n/m/p setting */ 913 - if (HAS_eDP) 914 - return true; 915 - 916 - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) 917 - return intel_find_pll_ironlake_dp(limit, crtc, target, 918 - refclk, best_clock); 919 - 920 - if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 921 - if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == 922 - LVDS_CLKB_POWER_UP) 923 - clock.p2 = limit->p2.p2_fast; 924 - else 925 - clock.p2 = limit->p2.p2_slow; 926 - } else { 927 - if (target < limit->p2.dot_limit) 928 - clock.p2 = limit->p2.p2_slow; 929 - else 930 - clock.p2 = limit->p2.p2_fast; 931 - } 932 - 933 - memset(best_clock, 0, sizeof(*best_clock)); 934 - for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) { 935 - /* based on hardware requriment prefer smaller n to precision */ 936 - for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) { 937 - /* based on hardware requirment prefere larger m1,m2 */ 938 - for (clock.m1 = limit->m1.max; 939 - clock.m1 >= limit->m1.min; clock.m1--) { 940 - for (clock.m2 = limit->m2.max; 941 - clock.m2 >= limit->m2.min; clock.m2--) { 942 - int this_err; 943 - 944 - intel_clock(dev, refclk, &clock); 945 - if (!intel_PLL_is_valid(crtc, &clock)) 946 - continue; 947 - this_err = abs((10000 - (target*10000/clock.dot))); 948 - if (this_err < err_most) { 949 - *best_clock = clock; 950 - /* found on first matching */ 951 - goto out; 952 - } else if (this_err < err_min) { 953 - *best_clock = clock; 954 - err_min = this_err; 955 - } 956 - } 957 - } 958 - } 959 - } 960 - out: 961 return true; 962 } 963 ··· 1473 int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; 1474 u32 temp; 1475 int tries = 5, j, n; 1476 1477 /* XXX: When our outputs are all unaware of DPMS modes other than off 1478 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. ··· 1508 1509 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 1510 temp = I915_READ(fdi_rx_reg); 1511 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | 1512 FDI_SEL_PCDCLK | 1513 FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ ··· 1656 1657 /* enable PCH transcoder */ 1658 temp = I915_READ(transconf_reg); 1659 I915_WRITE(transconf_reg, temp | TRANS_ENABLE); 1660 I915_READ(transconf_reg); 1661 ··· 1741 I915_READ(fdi_tx_reg); 1742 1743 temp = I915_READ(fdi_rx_reg); 1744 I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE); 1745 I915_READ(fdi_rx_reg); 1746 ··· 1788 } 1789 } 1790 } 1791 - 1792 udelay(100); 1793 1794 /* disable PCH DPLL */ ··· 2452 * A value of 5us seems to be a good balance; safe for very low end 2453 * platforms but not overly aggressive on lower latency configs. 2454 */ 2455 - const static int latency_ns = 5000; 2456 2457 static int i9xx_get_fifo_size(struct drm_device *dev, int plane) 2458 { ··· 2563 /* Calc sr entries for one plane configs */ 2564 if (sr_hdisplay && (!planea_clock || !planeb_clock)) { 2565 /* self-refresh has much higher latency */ 2566 - const static int sr_latency_ns = 12000; 2567 2568 sr_clock = planea_clock ? planea_clock : planeb_clock; 2569 line_time_us = ((sr_hdisplay * 1000) / sr_clock); ··· 2602 /* Calc sr entries for one plane configs */ 2603 if (sr_hdisplay && (!planea_clock || !planeb_clock)) { 2604 /* self-refresh has much higher latency */ 2605 - const static int sr_latency_ns = 12000; 2606 2607 sr_clock = planea_clock ? planea_clock : planeb_clock; 2608 line_time_us = ((sr_hdisplay * 1000) / sr_clock); ··· 2671 if (HAS_FW_BLC(dev) && sr_hdisplay && 2672 (!planea_clock || !planeb_clock)) { 2673 /* self-refresh has much higher latency */ 2674 - const static int sr_latency_ns = 6000; 2675 2676 sr_clock = planea_clock ? planea_clock : planeb_clock; 2677 line_time_us = ((sr_hdisplay * 1000) / sr_clock); ··· 2973 2974 /* determine panel color depth */ 2975 temp = I915_READ(pipeconf_reg); 2976 2977 switch (temp & PIPE_BPC_MASK) { 2978 case PIPE_8BPC: ··· 3211 * appropriately here, but we need to look more thoroughly into how 3212 * panels behave in the two modes. 3213 */ 3214 - 3215 I915_WRITE(lvds_reg, lvds); 3216 I915_READ(lvds_reg); 3217 } ··· 3414 3415 /* we only need to pin inside GTT if cursor is non-phy */ 3416 mutex_lock(&dev->struct_mutex); 3417 - if (!dev_priv->cursor_needs_physical) { 3418 ret = i915_gem_object_pin(bo, PAGE_SIZE); 3419 if (ret) { 3420 DRM_ERROR("failed to pin cursor bo\n"); ··· 3449 I915_WRITE(base, addr); 3450 3451 if (intel_crtc->cursor_bo) { 3452 - if (dev_priv->cursor_needs_physical) { 3453 if (intel_crtc->cursor_bo != bo) 3454 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); 3455 } else ··· 3808 queue_work(dev_priv->wq, &dev_priv->idle_work); 3809 } 3810 3811 - void intel_increase_renderclock(struct drm_device *dev, bool schedule) 3812 - { 3813 - drm_i915_private_t *dev_priv = dev->dev_private; 3814 - 3815 - if (IS_IRONLAKE(dev)) 3816 - return; 3817 - 3818 - if (!dev_priv->render_reclock_avail) { 3819 - DRM_DEBUG_DRIVER("not reclocking render clock\n"); 3820 - return; 3821 - } 3822 - 3823 - /* Restore render clock frequency to original value */ 3824 - if (IS_G4X(dev) || IS_I9XX(dev)) 3825 - pci_write_config_word(dev->pdev, GCFGC, dev_priv->orig_clock); 3826 - else if (IS_I85X(dev)) 3827 - pci_write_config_word(dev->pdev, HPLLCC, dev_priv->orig_clock); 3828 - DRM_DEBUG_DRIVER("increasing render clock frequency\n"); 3829 - 3830 - /* Schedule downclock */ 3831 - if (schedule) 3832 - mod_timer(&dev_priv->idle_timer, jiffies + 3833 - msecs_to_jiffies(GPU_IDLE_TIMEOUT)); 3834 - } 3835 - 3836 - void intel_decrease_renderclock(struct drm_device *dev) 3837 - { 3838 - drm_i915_private_t *dev_priv = dev->dev_private; 3839 - 3840 - if (IS_IRONLAKE(dev)) 3841 - return; 3842 - 3843 - if (!dev_priv->render_reclock_avail) { 3844 - DRM_DEBUG_DRIVER("not reclocking render clock\n"); 3845 - return; 3846 - } 3847 - 3848 - if (IS_G4X(dev)) { 3849 - u16 gcfgc; 3850 - 3851 - /* Adjust render clock... */ 3852 - pci_read_config_word(dev->pdev, GCFGC, &gcfgc); 3853 - 3854 - /* Down to minimum... */ 3855 - gcfgc &= ~GM45_GC_RENDER_CLOCK_MASK; 3856 - gcfgc |= GM45_GC_RENDER_CLOCK_266_MHZ; 3857 - 3858 - pci_write_config_word(dev->pdev, GCFGC, gcfgc); 3859 - } else if (IS_I965G(dev)) { 3860 - u16 gcfgc; 3861 - 3862 - /* Adjust render clock... */ 3863 - pci_read_config_word(dev->pdev, GCFGC, &gcfgc); 3864 - 3865 - /* Down to minimum... */ 3866 - gcfgc &= ~I965_GC_RENDER_CLOCK_MASK; 3867 - gcfgc |= I965_GC_RENDER_CLOCK_267_MHZ; 3868 - 3869 - pci_write_config_word(dev->pdev, GCFGC, gcfgc); 3870 - } else if (IS_I945G(dev) || IS_I945GM(dev)) { 3871 - u16 gcfgc; 3872 - 3873 - /* Adjust render clock... */ 3874 - pci_read_config_word(dev->pdev, GCFGC, &gcfgc); 3875 - 3876 - /* Down to minimum... */ 3877 - gcfgc &= ~I945_GC_RENDER_CLOCK_MASK; 3878 - gcfgc |= I945_GC_RENDER_CLOCK_166_MHZ; 3879 - 3880 - pci_write_config_word(dev->pdev, GCFGC, gcfgc); 3881 - } else if (IS_I915G(dev)) { 3882 - u16 gcfgc; 3883 - 3884 - /* Adjust render clock... */ 3885 - pci_read_config_word(dev->pdev, GCFGC, &gcfgc); 3886 - 3887 - /* Down to minimum... */ 3888 - gcfgc &= ~I915_GC_RENDER_CLOCK_MASK; 3889 - gcfgc |= I915_GC_RENDER_CLOCK_166_MHZ; 3890 - 3891 - pci_write_config_word(dev->pdev, GCFGC, gcfgc); 3892 - } else if (IS_I85X(dev)) { 3893 - u16 hpllcc; 3894 - 3895 - /* Adjust render clock... */ 3896 - pci_read_config_word(dev->pdev, HPLLCC, &hpllcc); 3897 - 3898 - /* Up to maximum... */ 3899 - hpllcc &= ~GC_CLOCK_CONTROL_MASK; 3900 - hpllcc |= GC_CLOCK_133_200; 3901 - 3902 - pci_write_config_word(dev->pdev, HPLLCC, hpllcc); 3903 - } 3904 - DRM_DEBUG_DRIVER("decreasing render clock frequency\n"); 3905 - } 3906 - 3907 - /* Note that no increase function is needed for this - increase_renderclock() 3908 - * will also rewrite these bits 3909 - */ 3910 - void intel_decrease_displayclock(struct drm_device *dev) 3911 - { 3912 - if (IS_IRONLAKE(dev)) 3913 - return; 3914 - 3915 - if (IS_I945G(dev) || IS_I945GM(dev) || IS_I915G(dev) || 3916 - IS_I915GM(dev)) { 3917 - u16 gcfgc; 3918 - 3919 - /* Adjust render clock... */ 3920 - pci_read_config_word(dev->pdev, GCFGC, &gcfgc); 3921 - 3922 - /* Down to minimum... */ 3923 - gcfgc &= ~0xf0; 3924 - gcfgc |= 0x80; 3925 - 3926 - pci_write_config_word(dev->pdev, GCFGC, gcfgc); 3927 - } 3928 - } 3929 - 3930 #define CRTC_IDLE_TIMEOUT 1000 /* ms */ 3931 3932 static void intel_crtc_idle_timer(unsigned long arg) ··· 3921 3922 mutex_lock(&dev->struct_mutex); 3923 3924 - /* GPU isn't processing, downclock it. */ 3925 - if (!dev_priv->busy) { 3926 - intel_decrease_renderclock(dev); 3927 - intel_decrease_displayclock(dev); 3928 - } 3929 - 3930 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 3931 /* Skip inactive CRTCs */ 3932 if (!crtc->fb) ··· 3954 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 3955 return; 3956 3957 - if (!dev_priv->busy) { 3958 dev_priv->busy = true; 3959 - intel_increase_renderclock(dev, true); 3960 - } else { 3961 mod_timer(&dev_priv->idle_timer, jiffies + 3962 msecs_to_jiffies(GPU_IDLE_TIMEOUT)); 3963 - } 3964 3965 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 3966 if (!crtc->fb) ··· 4302 bool found = false; 4303 4304 if (I915_READ(SDVOB) & SDVO_DETECTED) { 4305 found = intel_sdvo_init(dev, SDVOB); 4306 - if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) 4307 intel_hdmi_init(dev, SDVOB); 4308 4309 - if (!found && SUPPORTS_INTEGRATED_DP(dev)) 4310 intel_dp_init(dev, DP_B); 4311 } 4312 4313 /* Before G4X SDVOC doesn't have its own detect register */ 4314 4315 - if (I915_READ(SDVOB) & SDVO_DETECTED) 4316 found = intel_sdvo_init(dev, SDVOC); 4317 4318 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { 4319 4320 - if (SUPPORTS_INTEGRATED_HDMI(dev)) 4321 intel_hdmi_init(dev, SDVOC); 4322 - if (SUPPORTS_INTEGRATED_DP(dev)) 4323 intel_dp_init(dev, DP_C); 4324 } 4325 4326 - if (SUPPORTS_INTEGRATED_DP(dev) && (I915_READ(DP_D) & DP_DETECTED)) 4327 intel_dp_init(dev, DP_D); 4328 } else if (IS_I8XX(dev)) 4329 intel_dvo_init(dev); 4330 ··· 4443 .fb_changed = intelfb_probe, 4444 }; 4445 4446 void intel_init_clock_gating(struct drm_device *dev) 4447 { 4448 struct drm_i915_private *dev_priv = dev->dev_private; ··· 4531 * GPU can automatically power down the render unit if given a page 4532 * to save state. 4533 */ 4534 - if (I915_HAS_RC6(dev)) { 4535 - struct drm_gem_object *pwrctx; 4536 - struct drm_i915_gem_object *obj_priv; 4537 - int ret; 4538 4539 if (dev_priv->pwrctx) { 4540 obj_priv = dev_priv->pwrctx->driver_private; 4541 } else { 4542 - pwrctx = drm_gem_object_alloc(dev, 4096); 4543 - if (!pwrctx) { 4544 - DRM_DEBUG("failed to alloc power context, " 4545 - "RC6 disabled\n"); 4546 - goto out; 4547 } 4548 - 4549 - ret = i915_gem_object_pin(pwrctx, 4096); 4550 - if (ret) { 4551 - DRM_ERROR("failed to pin power context: %d\n", 4552 - ret); 4553 - drm_gem_object_unreference(pwrctx); 4554 - goto out; 4555 - } 4556 - 4557 - i915_gem_object_set_to_gtt_domain(pwrctx, 1); 4558 - 4559 - dev_priv->pwrctx = pwrctx; 4560 - obj_priv = pwrctx->driver_private; 4561 } 4562 4563 - I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN); 4564 - I915_WRITE(MCHBAR_RENDER_STANDBY, 4565 - I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); 4566 } 4567 - 4568 - out: 4569 - return; 4570 } 4571 4572 /* Set up chip specific display functions */ ··· 4707 del_timer_sync(&intel_crtc->idle_timer); 4708 } 4709 4710 - intel_increase_renderclock(dev, false); 4711 del_timer_sync(&dev_priv->idle_timer); 4712 4713 if (dev_priv->display.disable_fbc)
··· 262 #define IRONLAKE_P2_LVDS_FAST 7 /* double channel */ 263 #define IRONLAKE_P2_DOT_LIMIT 225000 /* 225Mhz */ 264 265 + #define IRONLAKE_P_DISPLAY_PORT_MIN 10 266 + #define IRONLAKE_P_DISPLAY_PORT_MAX 20 267 + #define IRONLAKE_P2_DISPLAY_PORT_FAST 10 268 + #define IRONLAKE_P2_DISPLAY_PORT_SLOW 10 269 + #define IRONLAKE_P2_DISPLAY_PORT_LIMIT 0 270 + #define IRONLAKE_P1_DISPLAY_PORT_MIN 1 271 + #define IRONLAKE_P1_DISPLAY_PORT_MAX 2 272 + 273 static bool 274 intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 275 int target, int refclk, intel_clock_t *best_clock); ··· 271 static bool 272 intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 273 int target, int refclk, intel_clock_t *best_clock); 274 275 static bool 276 intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, ··· 496 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 497 .p2_slow = IRONLAKE_P2_SDVO_DAC_SLOW, 498 .p2_fast = IRONLAKE_P2_SDVO_DAC_FAST }, 499 + .find_pll = intel_g4x_find_best_PLL, 500 }; 501 502 static const intel_limit_t intel_limits_ironlake_lvds = { ··· 511 .p2 = { .dot_limit = IRONLAKE_P2_DOT_LIMIT, 512 .p2_slow = IRONLAKE_P2_LVDS_SLOW, 513 .p2_fast = IRONLAKE_P2_LVDS_FAST }, 514 + .find_pll = intel_g4x_find_best_PLL, 515 + }; 516 + 517 + static const intel_limit_t intel_limits_ironlake_display_port = { 518 + .dot = { .min = IRONLAKE_DOT_MIN, 519 + .max = IRONLAKE_DOT_MAX }, 520 + .vco = { .min = IRONLAKE_VCO_MIN, 521 + .max = IRONLAKE_VCO_MAX}, 522 + .n = { .min = IRONLAKE_N_MIN, 523 + .max = IRONLAKE_N_MAX }, 524 + .m = { .min = IRONLAKE_M_MIN, 525 + .max = IRONLAKE_M_MAX }, 526 + .m1 = { .min = IRONLAKE_M1_MIN, 527 + .max = IRONLAKE_M1_MAX }, 528 + .m2 = { .min = IRONLAKE_M2_MIN, 529 + .max = IRONLAKE_M2_MAX }, 530 + .p = { .min = IRONLAKE_P_DISPLAY_PORT_MIN, 531 + .max = IRONLAKE_P_DISPLAY_PORT_MAX }, 532 + .p1 = { .min = IRONLAKE_P1_DISPLAY_PORT_MIN, 533 + .max = IRONLAKE_P1_DISPLAY_PORT_MAX}, 534 + .p2 = { .dot_limit = IRONLAKE_P2_DISPLAY_PORT_LIMIT, 535 + .p2_slow = IRONLAKE_P2_DISPLAY_PORT_SLOW, 536 + .p2_fast = IRONLAKE_P2_DISPLAY_PORT_FAST }, 537 + .find_pll = intel_find_pll_ironlake_dp, 538 }; 539 540 static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc) ··· 519 const intel_limit_t *limit; 520 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 521 limit = &intel_limits_ironlake_lvds; 522 + else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 523 + HAS_eDP) 524 + limit = &intel_limits_ironlake_display_port; 525 else 526 limit = &intel_limits_ironlake_sdvo; 527 ··· 791 found = false; 792 793 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { 794 + int lvds_reg; 795 + 796 + if (IS_IRONLAKE(dev)) 797 + lvds_reg = PCH_LVDS; 798 + else 799 + lvds_reg = LVDS; 800 + if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) == 801 LVDS_CLKB_POWER_UP) 802 clock.p2 = limit->p2.p2_fast; 803 else ··· 839 { 840 struct drm_device *dev = crtc->dev; 841 intel_clock_t clock; 842 + 843 + /* return directly when it is eDP */ 844 + if (HAS_eDP) 845 + return true; 846 + 847 if (target < 200000) { 848 clock.n = 1; 849 clock.p1 = 2; ··· 854 } 855 intel_clock(dev, refclk, &clock); 856 memcpy(best_clock, &clock, sizeof(intel_clock_t)); 857 return true; 858 } 859 ··· 1493 int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B; 1494 u32 temp; 1495 int tries = 5, j, n; 1496 + u32 pipe_bpc; 1497 + 1498 + temp = I915_READ(pipeconf_reg); 1499 + pipe_bpc = temp & PIPE_BPC_MASK; 1500 1501 /* XXX: When our outputs are all unaware of DPMS modes other than off 1502 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. ··· 1524 1525 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ 1526 temp = I915_READ(fdi_rx_reg); 1527 + /* 1528 + * make the BPC in FDI Rx be consistent with that in 1529 + * pipeconf reg. 1530 + */ 1531 + temp &= ~(0x7 << 16); 1532 + temp |= (pipe_bpc << 11); 1533 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE | 1534 FDI_SEL_PCDCLK | 1535 FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */ ··· 1666 1667 /* enable PCH transcoder */ 1668 temp = I915_READ(transconf_reg); 1669 + /* 1670 + * make the BPC in transcoder be consistent with 1671 + * that in pipeconf reg. 1672 + */ 1673 + temp &= ~PIPE_BPC_MASK; 1674 + temp |= pipe_bpc; 1675 I915_WRITE(transconf_reg, temp | TRANS_ENABLE); 1676 I915_READ(transconf_reg); 1677 ··· 1745 I915_READ(fdi_tx_reg); 1746 1747 temp = I915_READ(fdi_rx_reg); 1748 + /* BPC in FDI rx is consistent with that in pipeconf */ 1749 + temp &= ~(0x07 << 16); 1750 + temp |= (pipe_bpc << 11); 1751 I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE); 1752 I915_READ(fdi_rx_reg); 1753 ··· 1789 } 1790 } 1791 } 1792 + temp = I915_READ(transconf_reg); 1793 + /* BPC in transcoder is consistent with that in pipeconf */ 1794 + temp &= ~PIPE_BPC_MASK; 1795 + temp |= pipe_bpc; 1796 + I915_WRITE(transconf_reg, temp); 1797 + I915_READ(transconf_reg); 1798 udelay(100); 1799 1800 /* disable PCH DPLL */ ··· 2448 * A value of 5us seems to be a good balance; safe for very low end 2449 * platforms but not overly aggressive on lower latency configs. 2450 */ 2451 + static const int latency_ns = 5000; 2452 2453 static int i9xx_get_fifo_size(struct drm_device *dev, int plane) 2454 { ··· 2559 /* Calc sr entries for one plane configs */ 2560 if (sr_hdisplay && (!planea_clock || !planeb_clock)) { 2561 /* self-refresh has much higher latency */ 2562 + static const int sr_latency_ns = 12000; 2563 2564 sr_clock = planea_clock ? planea_clock : planeb_clock; 2565 line_time_us = ((sr_hdisplay * 1000) / sr_clock); ··· 2598 /* Calc sr entries for one plane configs */ 2599 if (sr_hdisplay && (!planea_clock || !planeb_clock)) { 2600 /* self-refresh has much higher latency */ 2601 + static const int sr_latency_ns = 12000; 2602 2603 sr_clock = planea_clock ? planea_clock : planeb_clock; 2604 line_time_us = ((sr_hdisplay * 1000) / sr_clock); ··· 2667 if (HAS_FW_BLC(dev) && sr_hdisplay && 2668 (!planea_clock || !planeb_clock)) { 2669 /* self-refresh has much higher latency */ 2670 + static const int sr_latency_ns = 6000; 2671 2672 sr_clock = planea_clock ? planea_clock : planeb_clock; 2673 line_time_us = ((sr_hdisplay * 1000) / sr_clock); ··· 2969 2970 /* determine panel color depth */ 2971 temp = I915_READ(pipeconf_reg); 2972 + temp &= ~PIPE_BPC_MASK; 2973 + if (is_lvds) { 2974 + int lvds_reg = I915_READ(PCH_LVDS); 2975 + /* the BPC will be 6 if it is 18-bit LVDS panel */ 2976 + if ((lvds_reg & LVDS_A3_POWER_MASK) == LVDS_A3_POWER_UP) 2977 + temp |= PIPE_8BPC; 2978 + else 2979 + temp |= PIPE_6BPC; 2980 + } else 2981 + temp |= PIPE_8BPC; 2982 + I915_WRITE(pipeconf_reg, temp); 2983 + I915_READ(pipeconf_reg); 2984 2985 switch (temp & PIPE_BPC_MASK) { 2986 case PIPE_8BPC: ··· 3195 * appropriately here, but we need to look more thoroughly into how 3196 * panels behave in the two modes. 3197 */ 3198 + /* set the dithering flag */ 3199 + if (IS_I965G(dev)) { 3200 + if (dev_priv->lvds_dither) { 3201 + if (IS_IRONLAKE(dev)) 3202 + pipeconf |= PIPE_ENABLE_DITHER; 3203 + else 3204 + lvds |= LVDS_ENABLE_DITHER; 3205 + } else { 3206 + if (IS_IRONLAKE(dev)) 3207 + pipeconf &= ~PIPE_ENABLE_DITHER; 3208 + else 3209 + lvds &= ~LVDS_ENABLE_DITHER; 3210 + } 3211 + } 3212 I915_WRITE(lvds_reg, lvds); 3213 I915_READ(lvds_reg); 3214 } ··· 3385 3386 /* we only need to pin inside GTT if cursor is non-phy */ 3387 mutex_lock(&dev->struct_mutex); 3388 + if (!dev_priv->info->cursor_needs_physical) { 3389 ret = i915_gem_object_pin(bo, PAGE_SIZE); 3390 if (ret) { 3391 DRM_ERROR("failed to pin cursor bo\n"); ··· 3420 I915_WRITE(base, addr); 3421 3422 if (intel_crtc->cursor_bo) { 3423 + if (dev_priv->info->cursor_needs_physical) { 3424 if (intel_crtc->cursor_bo != bo) 3425 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); 3426 } else ··· 3779 queue_work(dev_priv->wq, &dev_priv->idle_work); 3780 } 3781 3782 #define CRTC_IDLE_TIMEOUT 1000 /* ms */ 3783 3784 static void intel_crtc_idle_timer(unsigned long arg) ··· 4011 4012 mutex_lock(&dev->struct_mutex); 4013 4014 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 4015 /* Skip inactive CRTCs */ 4016 if (!crtc->fb) ··· 4050 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 4051 return; 4052 4053 + if (!dev_priv->busy) 4054 dev_priv->busy = true; 4055 + else 4056 mod_timer(&dev_priv->idle_timer, jiffies + 4057 msecs_to_jiffies(GPU_IDLE_TIMEOUT)); 4058 4059 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 4060 if (!crtc->fb) ··· 4400 bool found = false; 4401 4402 if (I915_READ(SDVOB) & SDVO_DETECTED) { 4403 + DRM_DEBUG_KMS("probing SDVOB\n"); 4404 found = intel_sdvo_init(dev, SDVOB); 4405 + if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { 4406 + DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); 4407 intel_hdmi_init(dev, SDVOB); 4408 + } 4409 4410 + if (!found && SUPPORTS_INTEGRATED_DP(dev)) { 4411 + DRM_DEBUG_KMS("probing DP_B\n"); 4412 intel_dp_init(dev, DP_B); 4413 + } 4414 } 4415 4416 /* Before G4X SDVOC doesn't have its own detect register */ 4417 4418 + if (I915_READ(SDVOB) & SDVO_DETECTED) { 4419 + DRM_DEBUG_KMS("probing SDVOC\n"); 4420 found = intel_sdvo_init(dev, SDVOC); 4421 + } 4422 4423 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { 4424 4425 + if (SUPPORTS_INTEGRATED_HDMI(dev)) { 4426 + DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); 4427 intel_hdmi_init(dev, SDVOC); 4428 + } 4429 + if (SUPPORTS_INTEGRATED_DP(dev)) { 4430 + DRM_DEBUG_KMS("probing DP_C\n"); 4431 intel_dp_init(dev, DP_C); 4432 + } 4433 } 4434 4435 + if (SUPPORTS_INTEGRATED_DP(dev) && 4436 + (I915_READ(DP_D) & DP_DETECTED)) { 4437 + DRM_DEBUG_KMS("probing DP_D\n"); 4438 intel_dp_init(dev, DP_D); 4439 + } 4440 } else if (IS_I8XX(dev)) 4441 intel_dvo_init(dev); 4442 ··· 4527 .fb_changed = intelfb_probe, 4528 }; 4529 4530 + static struct drm_gem_object * 4531 + intel_alloc_power_context(struct drm_device *dev) 4532 + { 4533 + struct drm_gem_object *pwrctx; 4534 + int ret; 4535 + 4536 + pwrctx = drm_gem_object_alloc(dev, 4096); 4537 + if (!pwrctx) { 4538 + DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); 4539 + return NULL; 4540 + } 4541 + 4542 + mutex_lock(&dev->struct_mutex); 4543 + ret = i915_gem_object_pin(pwrctx, 4096); 4544 + if (ret) { 4545 + DRM_ERROR("failed to pin power context: %d\n", ret); 4546 + goto err_unref; 4547 + } 4548 + 4549 + ret = i915_gem_object_set_to_gtt_domain(pwrctx, 1); 4550 + if (ret) { 4551 + DRM_ERROR("failed to set-domain on power context: %d\n", ret); 4552 + goto err_unpin; 4553 + } 4554 + mutex_unlock(&dev->struct_mutex); 4555 + 4556 + return pwrctx; 4557 + 4558 + err_unpin: 4559 + i915_gem_object_unpin(pwrctx); 4560 + err_unref: 4561 + drm_gem_object_unreference(pwrctx); 4562 + mutex_unlock(&dev->struct_mutex); 4563 + return NULL; 4564 + } 4565 + 4566 void intel_init_clock_gating(struct drm_device *dev) 4567 { 4568 struct drm_i915_private *dev_priv = dev->dev_private; ··· 4579 * GPU can automatically power down the render unit if given a page 4580 * to save state. 4581 */ 4582 + if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) { 4583 + struct drm_i915_gem_object *obj_priv = NULL; 4584 4585 if (dev_priv->pwrctx) { 4586 obj_priv = dev_priv->pwrctx->driver_private; 4587 } else { 4588 + struct drm_gem_object *pwrctx; 4589 + 4590 + pwrctx = intel_alloc_power_context(dev); 4591 + if (pwrctx) { 4592 + dev_priv->pwrctx = pwrctx; 4593 + obj_priv = pwrctx->driver_private; 4594 } 4595 } 4596 4597 + if (obj_priv) { 4598 + I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN); 4599 + I915_WRITE(MCHBAR_RENDER_STANDBY, 4600 + I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT); 4601 + } 4602 } 4603 } 4604 4605 /* Set up chip specific display functions */ ··· 4770 del_timer_sync(&intel_crtc->idle_timer); 4771 } 4772 4773 del_timer_sync(&dev_priv->idle_timer); 4774 4775 if (dev_priv->display.disable_fbc)
+6
drivers/gpu/drm/i915/intel_dp.c
··· 1402 break; 1403 case DP_B: 1404 case PCH_DP_B: 1405 name = "DPDDC-B"; 1406 break; 1407 case DP_C: 1408 case PCH_DP_C: 1409 name = "DPDDC-C"; 1410 break; 1411 case DP_D: 1412 case PCH_DP_D: 1413 name = "DPDDC-D"; 1414 break; 1415 }
··· 1402 break; 1403 case DP_B: 1404 case PCH_DP_B: 1405 + dev_priv->hotplug_supported_mask |= 1406 + HDMIB_HOTPLUG_INT_STATUS; 1407 name = "DPDDC-B"; 1408 break; 1409 case DP_C: 1410 case PCH_DP_C: 1411 + dev_priv->hotplug_supported_mask |= 1412 + HDMIC_HOTPLUG_INT_STATUS; 1413 name = "DPDDC-C"; 1414 break; 1415 case DP_D: 1416 case PCH_DP_D: 1417 + dev_priv->hotplug_supported_mask |= 1418 + HDMID_HOTPLUG_INT_STATUS; 1419 name = "DPDDC-D"; 1420 break; 1421 }
+5
drivers/gpu/drm/i915/intel_hdmi.c
··· 303 if (sdvox_reg == SDVOB) { 304 intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); 305 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); 306 } else if (sdvox_reg == SDVOC) { 307 intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); 308 intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); 309 } else if (sdvox_reg == HDMIB) { 310 intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); 311 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, 312 "HDMIB"); 313 } else if (sdvox_reg == HDMIC) { 314 intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); 315 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, 316 "HDMIC"); 317 } else if (sdvox_reg == HDMID) { 318 intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); 319 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, 320 "HDMID"); 321 } 322 if (!intel_output->ddc_bus) 323 goto err_connector;
··· 303 if (sdvox_reg == SDVOB) { 304 intel_output->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT); 305 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); 306 + dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; 307 } else if (sdvox_reg == SDVOC) { 308 intel_output->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT); 309 intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); 310 + dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; 311 } else if (sdvox_reg == HDMIB) { 312 intel_output->clone_mask = (1 << INTEL_HDMID_CLONE_BIT); 313 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE, 314 "HDMIB"); 315 + dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS; 316 } else if (sdvox_reg == HDMIC) { 317 intel_output->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT); 318 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD, 319 "HDMIC"); 320 + dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS; 321 } else if (sdvox_reg == HDMID) { 322 intel_output->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT); 323 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF, 324 "HDMID"); 325 + dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS; 326 } 327 if (!intel_output->ddc_bus) 328 goto err_connector;
+18 -65
drivers/gpu/drm/i915/intel_lvds.c
··· 608 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"), 609 }, 610 }, 611 { } 612 }; 613 ··· 686 struct drm_i915_private *dev_priv = 687 container_of(nb, struct drm_i915_private, lid_notifier); 688 struct drm_device *dev = dev_priv->dev; 689 690 if (!acpi_lid_open()) { 691 dev_priv->modeset_on_lid = 1; 692 return NOTIFY_OK; ··· 868 { } /* terminating entry */ 869 }; 870 871 - #ifdef CONFIG_ACPI 872 - /* 873 - * check_lid_device -- check whether @handle is an ACPI LID device. 874 - * @handle: ACPI device handle 875 - * @level : depth in the ACPI namespace tree 876 - * @context: the number of LID device when we find the device 877 - * @rv: a return value to fill if desired (Not use) 878 - */ 879 - static acpi_status 880 - check_lid_device(acpi_handle handle, u32 level, void *context, 881 - void **return_value) 882 - { 883 - struct acpi_device *acpi_dev; 884 - int *lid_present = context; 885 - 886 - acpi_dev = NULL; 887 - /* Get the acpi device for device handle */ 888 - if (acpi_bus_get_device(handle, &acpi_dev) || !acpi_dev) { 889 - /* If there is no ACPI device for handle, return */ 890 - return AE_OK; 891 - } 892 - 893 - if (!strncmp(acpi_device_hid(acpi_dev), "PNP0C0D", 7)) 894 - *lid_present = 1; 895 - 896 - return AE_OK; 897 - } 898 - 899 - /** 900 - * check whether there exists the ACPI LID device by enumerating the ACPI 901 - * device tree. 902 - */ 903 - static int intel_lid_present(void) 904 - { 905 - int lid_present = 0; 906 - 907 - if (acpi_disabled) { 908 - /* If ACPI is disabled, there is no ACPI device tree to 909 - * check, so assume the LID device would have been present. 910 - */ 911 - return 1; 912 - } 913 - 914 - acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, 915 - ACPI_UINT32_MAX, 916 - check_lid_device, NULL, &lid_present, NULL); 917 - 918 - return lid_present; 919 - } 920 - #else 921 - static int intel_lid_present(void) 922 - { 923 - /* In the absence of ACPI built in, assume that the LID device would 924 - * have been present. 925 - */ 926 - return 1; 927 - } 928 - #endif 929 - 930 /** 931 * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID 932 * @dev: drm device ··· 986 if (dmi_check_system(intel_no_lvds)) 987 return; 988 989 - /* 990 - * Assume LVDS is present if there's an ACPI lid device or if the 991 - * device is present in the VBT. 992 - */ 993 - if (!lvds_is_present_in_vbt(dev) && !intel_lid_present()) { 994 - DRM_DEBUG_KMS("LVDS is not present in VBT and no lid detected\n"); 995 return; 996 } 997 ··· 1131 DRM_DEBUG_KMS("lid notifier registration failed\n"); 1132 dev_priv->lid_notifier.notifier_call = NULL; 1133 } 1134 drm_sysfs_connector_add(connector); 1135 return; 1136
··· 608 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire one"), 609 }, 610 }, 611 + { 612 + .ident = "PC-81005", 613 + .matches = { 614 + DMI_MATCH(DMI_SYS_VENDOR, "MALATA"), 615 + DMI_MATCH(DMI_PRODUCT_NAME, "PC-81005"), 616 + }, 617 + }, 618 { } 619 }; 620 ··· 679 struct drm_i915_private *dev_priv = 680 container_of(nb, struct drm_i915_private, lid_notifier); 681 struct drm_device *dev = dev_priv->dev; 682 + struct drm_connector *connector = dev_priv->int_lvds_connector; 683 684 + /* 685 + * check and update the status of LVDS connector after receiving 686 + * the LID nofication event. 687 + */ 688 + if (connector) 689 + connector->status = connector->funcs->detect(connector); 690 if (!acpi_lid_open()) { 691 dev_priv->modeset_on_lid = 1; 692 return NOTIFY_OK; ··· 854 { } /* terminating entry */ 855 }; 856 857 /** 858 * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID 859 * @dev: drm device ··· 1031 if (dmi_check_system(intel_no_lvds)) 1032 return; 1033 1034 + if (!lvds_is_present_in_vbt(dev)) { 1035 + DRM_DEBUG_KMS("LVDS is not present in VBT\n"); 1036 return; 1037 } 1038 ··· 1180 DRM_DEBUG_KMS("lid notifier registration failed\n"); 1181 dev_priv->lid_notifier.notifier_call = NULL; 1182 } 1183 + /* keep the LVDS connector */ 1184 + dev_priv->int_lvds_connector = connector; 1185 drm_sysfs_connector_add(connector); 1186 return; 1187
+3
drivers/gpu/drm/i915/intel_sdvo.c
··· 2662 2663 bool intel_sdvo_init(struct drm_device *dev, int output_device) 2664 { 2665 struct drm_connector *connector; 2666 struct intel_output *intel_output; 2667 struct intel_sdvo_priv *sdvo_priv; ··· 2709 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); 2710 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, 2711 "SDVOB/VGA DDC BUS"); 2712 } else { 2713 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); 2714 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, 2715 "SDVOC/VGA DDC BUS"); 2716 } 2717 2718 if (intel_output->ddc_bus == NULL)
··· 2662 2663 bool intel_sdvo_init(struct drm_device *dev, int output_device) 2664 { 2665 + struct drm_i915_private *dev_priv = dev->dev_private; 2666 struct drm_connector *connector; 2667 struct intel_output *intel_output; 2668 struct intel_sdvo_priv *sdvo_priv; ··· 2708 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS"); 2709 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, 2710 "SDVOB/VGA DDC BUS"); 2711 + dev_priv->hotplug_supported_mask |= SDVOB_HOTPLUG_INT_STATUS; 2712 } else { 2713 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS"); 2714 sdvo_priv->analog_ddc_bus = intel_i2c_create(dev, GPIOA, 2715 "SDVOC/VGA DDC BUS"); 2716 + dev_priv->hotplug_supported_mask |= SDVOC_HOTPLUG_INT_STATUS; 2717 } 2718 2719 if (intel_output->ddc_bus == NULL)
+2
drivers/gpu/drm/i915/intel_tv.c
··· 1840 drm_connector_attach_property(connector, 1841 dev->mode_config.tv_bottom_margin_property, 1842 tv_priv->margin[TV_MARGIN_BOTTOM]); 1843 out: 1844 drm_sysfs_connector_add(connector); 1845 }
··· 1840 drm_connector_attach_property(connector, 1841 dev->mode_config.tv_bottom_margin_property, 1842 tv_priv->margin[TV_MARGIN_BOTTOM]); 1843 + 1844 + dev_priv->hotplug_supported_mask |= TV_HOTPLUG_INT_STATUS; 1845 out: 1846 drm_sysfs_connector_add(connector); 1847 }
+54
include/drm/i915_drm.h
··· 188 #define DRM_I915_GEM_MADVISE 0x26 189 #define DRM_I915_OVERLAY_PUT_IMAGE 0x27 190 #define DRM_I915_OVERLAY_ATTRS 0x28 191 192 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 193 #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) ··· 208 #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) 209 #define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) 210 #define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) 211 #define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) 212 #define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) 213 #define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) ··· 274 #define I915_PARAM_NUM_FENCES_AVAIL 6 275 #define I915_PARAM_HAS_OVERLAY 7 276 #define I915_PARAM_HAS_PAGEFLIPPING 8 277 278 typedef struct drm_i915_getparam { 279 int param; ··· 568 __u32 num_cliprects; 569 /** This is a struct drm_clip_rect *cliprects */ 570 __u64 cliprects_ptr; 571 }; 572 573 struct drm_i915_gem_pin {
··· 188 #define DRM_I915_GEM_MADVISE 0x26 189 #define DRM_I915_OVERLAY_PUT_IMAGE 0x27 190 #define DRM_I915_OVERLAY_ATTRS 0x28 191 + #define DRM_I915_GEM_EXECBUFFER2 0x29 192 193 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 194 #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) ··· 207 #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) 208 #define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) 209 #define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) 210 + #define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2) 211 #define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) 212 #define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) 213 #define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) ··· 272 #define I915_PARAM_NUM_FENCES_AVAIL 6 273 #define I915_PARAM_HAS_OVERLAY 7 274 #define I915_PARAM_HAS_PAGEFLIPPING 8 275 + #define I915_PARAM_HAS_EXECBUF2 9 276 277 typedef struct drm_i915_getparam { 278 int param; ··· 565 __u32 num_cliprects; 566 /** This is a struct drm_clip_rect *cliprects */ 567 __u64 cliprects_ptr; 568 + }; 569 + 570 + struct drm_i915_gem_exec_object2 { 571 + /** 572 + * User's handle for a buffer to be bound into the GTT for this 573 + * operation. 574 + */ 575 + __u32 handle; 576 + 577 + /** Number of relocations to be performed on this buffer */ 578 + __u32 relocation_count; 579 + /** 580 + * Pointer to array of struct drm_i915_gem_relocation_entry containing 581 + * the relocations to be performed in this buffer. 582 + */ 583 + __u64 relocs_ptr; 584 + 585 + /** Required alignment in graphics aperture */ 586 + __u64 alignment; 587 + 588 + /** 589 + * Returned value of the updated offset of the object, for future 590 + * presumed_offset writes. 591 + */ 592 + __u64 offset; 593 + 594 + #define EXEC_OBJECT_NEEDS_FENCE (1<<0) 595 + __u64 flags; 596 + __u64 rsvd1; 597 + __u64 rsvd2; 598 + }; 599 + 600 + struct drm_i915_gem_execbuffer2 { 601 + /** 602 + * List of gem_exec_object2 structs 603 + */ 604 + __u64 buffers_ptr; 605 + __u32 buffer_count; 606 + 607 + /** Offset in the batchbuffer to start execution from. */ 608 + __u32 batch_start_offset; 609 + /** Bytes used in batchbuffer from batch_start_offset */ 610 + __u32 batch_len; 611 + __u32 DR1; 612 + __u32 DR4; 613 + __u32 num_cliprects; 614 + /** This is a struct drm_clip_rect *cliprects */ 615 + __u64 cliprects_ptr; 616 + __u64 flags; /* currently unused */ 617 + __u64 rsvd1; 618 + __u64 rsvd2; 619 }; 620 621 struct drm_i915_gem_pin {