Merge branch 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6:
drm/vblank: update recently added vbl interface to be more future proof.
drm radeon: Return -EINVAL on wrong pm sysfs access
drm/radeon/kms: fix hardcoded EDID handling
Revert "drm/i915: Don't save/restore hardware status page address register"
drm/i915: Avoid unmapping pages from a NULL address space
drm/i915: Fix use after free within tracepoint
drm/i915: Restore missing command flush before interrupt on BLT ring
drm/i915: Disable pagefaults along execbuffer relocation fast path
drm/i915: Fix computation of pitch for dumb bo creator
drm/i915: report correct render clock frequencies on SNB
drm/i915/dp: Correct the order of deletion for ghost eDP devices
drm/i915: Fix tiling corruption from pipelined fencing
drm/i915: Re-enable self-refresh
drm/i915: Prevent racy removal of request from client list
drm/i915: skip redundant operations whilst enabling pipes and planes
drm/i915: Remove surplus POSTING_READs before wait_for_vblank
drm/radeon/kms: prefer legacy pll algo for tv-out
drm: check for modesetting on modeset ioctls
drm/kernel: vblank wait on crtc > 1
drm: Fix use-after-free in drm_gem_vm_close()

+251 -146
+51
drivers/gpu/drm/drm_crtc.c
··· 1073 1073 uint32_t __user *encoder_id; 1074 1074 struct drm_mode_group *mode_group; 1075 1075 1076 + if (!drm_core_check_feature(dev, DRIVER_MODESET)) 1077 + return -EINVAL; 1078 + 1076 1079 mutex_lock(&dev->mode_config.mutex); 1077 1080 1078 1081 /* ··· 1247 1244 struct drm_mode_object *obj; 1248 1245 int ret = 0; 1249 1246 1247 + if (!drm_core_check_feature(dev, DRIVER_MODESET)) 1248 + return -EINVAL; 1249 + 1250 1250 mutex_lock(&dev->mode_config.mutex); 1251 1251 1252 1252 obj = drm_mode_object_find(dev, crtc_resp->crtc_id, ··· 1317 1311 uint32_t __user *prop_ptr; 1318 1312 uint64_t __user *prop_values; 1319 1313 uint32_t __user *encoder_ptr; 1314 + 1315 + if (!drm_core_check_feature(dev, DRIVER_MODESET)) 1316 + return -EINVAL; 1320 1317 1321 1318 memset(&u_mode, 0, sizeof(struct drm_mode_modeinfo)); 1322 1319 ··· 1440 1431 struct drm_encoder *encoder; 1441 1432 int ret = 0; 1442 1433 1434 + if (!drm_core_check_feature(dev, DRIVER_MODESET)) 1435 + return -EINVAL; 1436 + 1443 1437 mutex_lock(&dev->mode_config.mutex); 1444 1438 obj = drm_mode_object_find(dev, enc_resp->encoder_id, 1445 1439 DRM_MODE_OBJECT_ENCODER); ··· 1497 1485 uint32_t __user *set_connectors_ptr; 1498 1486 int ret = 0; 1499 1487 int i; 1488 + 1489 + if (!drm_core_check_feature(dev, DRIVER_MODESET)) 1490 + return -EINVAL; 1500 1491 1501 1492 mutex_lock(&dev->mode_config.mutex); 1502 1493 obj = drm_mode_object_find(dev, crtc_req->crtc_id, ··· 1618 1603 struct drm_crtc *crtc; 1619 1604 int ret = 0; 1620 1605 1606 + if (!drm_core_check_feature(dev, DRIVER_MODESET)) 1607 + return -EINVAL; 1608 + 1621 1609 if (!req->flags) { 1622 1610 DRM_ERROR("no operation set\n"); 1623 1611 return -EINVAL; ··· 1685 1667 struct drm_framebuffer *fb; 1686 1668 int ret = 0; 1687 1669 1670 + if (!drm_core_check_feature(dev, DRIVER_MODESET)) 1671 + return -EINVAL; 1672 + 1688 1673 if ((config->min_width > r->width) || (r->width > config->max_width)) { 1689 1674 DRM_ERROR("mode new framebuffer width not within limits\n"); 1690 1675 return -EINVAL; ··· 1745 1724 int ret = 0; 1746 1725 int found = 0; 1747 1726 1727 + if (!drm_core_check_feature(dev, DRIVER_MODESET)) 1728 + return -EINVAL; 1729 + 1748 1730 mutex_lock(&dev->mode_config.mutex); 1749 1731 obj = drm_mode_object_find(dev, *id, DRM_MODE_OBJECT_FB); 1750 1732 /* TODO check that we realy get a framebuffer back. */ ··· 1804 1780 struct drm_framebuffer *fb; 1805 1781 int ret = 0; 1806 1782 1783 + if (!drm_core_check_feature(dev, DRIVER_MODESET)) 1784 + return -EINVAL; 1785 + 1807 1786 mutex_lock(&dev->mode_config.mutex); 1808 1787 obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB); 1809 1788 if (!obj) { ··· 1839 1812 unsigned flags; 1840 1813 int num_clips; 1841 1814 int ret = 0; 1815 + 1816 + if (!drm_core_check_feature(dev, DRIVER_MODESET)) 1817 + return -EINVAL; 1842 1818 1843 1819 mutex_lock(&dev->mode_config.mutex); 1844 1820 obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB); ··· 2026 1996 struct drm_mode_modeinfo *umode = &mode_cmd->mode; 2027 1997 int ret = 0; 2028 1998 1999 + if (!drm_core_check_feature(dev, DRIVER_MODESET)) 2000 + return -EINVAL; 2001 + 2029 2002 mutex_lock(&dev->mode_config.mutex); 2030 2003 2031 2004 obj = drm_mode_object_find(dev, mode_cmd->connector_id, DRM_MODE_OBJECT_CONNECTOR); ··· 2074 2041 struct drm_display_mode mode; 2075 2042 struct drm_mode_modeinfo *umode = &mode_cmd->mode; 2076 2043 int ret = 0; 2044 + 2045 + if (!drm_core_check_feature(dev, DRIVER_MODESET)) 2046 + return -EINVAL; 2077 2047 2078 2048 mutex_lock(&dev->mode_config.mutex); 2079 2049 ··· 2247 2211 uint64_t __user *values_ptr; 2248 2212 uint32_t __user *blob_length_ptr; 2249 2213 2214 + if (!drm_core_check_feature(dev, DRIVER_MODESET)) 2215 + return -EINVAL; 2216 + 2250 2217 mutex_lock(&dev->mode_config.mutex); 2251 2218 obj = drm_mode_object_find(dev, out_resp->prop_id, DRM_MODE_OBJECT_PROPERTY); 2252 2219 if (!obj) { ··· 2372 2333 int ret = 0; 2373 2334 void *blob_ptr; 2374 2335 2336 + if (!drm_core_check_feature(dev, DRIVER_MODESET)) 2337 + return -EINVAL; 2338 + 2375 2339 mutex_lock(&dev->mode_config.mutex); 2376 2340 obj = drm_mode_object_find(dev, out_resp->blob_id, DRM_MODE_OBJECT_BLOB); 2377 2341 if (!obj) { ··· 2434 2392 struct drm_connector *connector; 2435 2393 int ret = -EINVAL; 2436 2394 int i; 2395 + 2396 + if (!drm_core_check_feature(dev, DRIVER_MODESET)) 2397 + return -EINVAL; 2437 2398 2438 2399 mutex_lock(&dev->mode_config.mutex); 2439 2400 ··· 2554 2509 int size; 2555 2510 int ret = 0; 2556 2511 2512 + if (!drm_core_check_feature(dev, DRIVER_MODESET)) 2513 + return -EINVAL; 2514 + 2557 2515 mutex_lock(&dev->mode_config.mutex); 2558 2516 obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC); 2559 2517 if (!obj) { ··· 2607 2559 void *r_base, *g_base, *b_base; 2608 2560 int size; 2609 2561 int ret = 0; 2562 + 2563 + if (!drm_core_check_feature(dev, DRIVER_MODESET)) 2564 + return -EINVAL; 2610 2565 2611 2566 mutex_lock(&dev->mode_config.mutex); 2612 2567 obj = drm_mode_object_find(dev, crtc_lut->crtc_id, DRM_MODE_OBJECT_CRTC);
+3 -2
drivers/gpu/drm/drm_gem.c
··· 499 499 void drm_gem_vm_close(struct vm_area_struct *vma) 500 500 { 501 501 struct drm_gem_object *obj = vma->vm_private_data; 502 + struct drm_device *dev = obj->dev; 502 503 503 - mutex_lock(&obj->dev->struct_mutex); 504 + mutex_lock(&dev->struct_mutex); 504 505 drm_vm_close_locked(vma); 505 506 drm_gem_object_unreference(obj); 506 - mutex_unlock(&obj->dev->struct_mutex); 507 + mutex_unlock(&dev->struct_mutex); 507 508 } 508 509 EXPORT_SYMBOL(drm_gem_vm_close); 509 510
+3
drivers/gpu/drm/drm_ioctl.c
··· 280 280 if (dev->driver->dumb_create) 281 281 req->value = 1; 282 282 break; 283 + case DRM_CAP_VBLANK_HIGH_CRTC: 284 + req->value = 1; 285 + break; 283 286 default: 284 287 return -EINVAL; 285 288 }
+10 -5
drivers/gpu/drm/drm_irq.c
··· 1125 1125 { 1126 1126 union drm_wait_vblank *vblwait = data; 1127 1127 int ret = 0; 1128 - unsigned int flags, seq, crtc; 1128 + unsigned int flags, seq, crtc, high_crtc; 1129 1129 1130 1130 if ((!drm_dev_to_irq(dev)) || (!dev->irq_enabled)) 1131 1131 return -EINVAL; ··· 1134 1134 return -EINVAL; 1135 1135 1136 1136 if (vblwait->request.type & 1137 - ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)) { 1137 + ~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK | 1138 + _DRM_VBLANK_HIGH_CRTC_MASK)) { 1138 1139 DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n", 1139 1140 vblwait->request.type, 1140 - (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK)); 1141 + (_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK | 1142 + _DRM_VBLANK_HIGH_CRTC_MASK)); 1141 1143 return -EINVAL; 1142 1144 } 1143 1145 1144 1146 flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK; 1145 - crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0; 1146 - 1147 + high_crtc = (vblwait->request.type & _DRM_VBLANK_HIGH_CRTC_MASK); 1148 + if (high_crtc) 1149 + crtc = high_crtc >> _DRM_VBLANK_HIGH_CRTC_SHIFT; 1150 + else 1151 + crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0; 1147 1152 if (crtc >= dev->num_crtcs) 1148 1153 return -EINVAL; 1149 1154
+4 -4
drivers/gpu/drm/i915/i915_debugfs.c
··· 892 892 seq_printf(m, "Render p-state limit: %d\n", 893 893 rp_state_limits & 0xff); 894 894 seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >> 895 - GEN6_CAGF_SHIFT) * 100); 895 + GEN6_CAGF_SHIFT) * 50); 896 896 seq_printf(m, "RP CUR UP EI: %dus\n", rpupei & 897 897 GEN6_CURICONT_MASK); 898 898 seq_printf(m, "RP CUR UP: %dus\n", rpcurup & ··· 908 908 909 909 max_freq = (rp_state_cap & 0xff0000) >> 16; 910 910 seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", 911 - max_freq * 100); 911 + max_freq * 50); 912 912 913 913 max_freq = (rp_state_cap & 0xff00) >> 8; 914 914 seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", 915 - max_freq * 100); 915 + max_freq * 50); 916 916 917 917 max_freq = rp_state_cap & 0xff; 918 918 seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", 919 - max_freq * 100); 919 + max_freq * 50); 920 920 921 921 __gen6_gt_force_wake_put(dev_priv); 922 922 } else {
+34 -36
drivers/gpu/drm/i915/i915_gem.c
··· 224 224 struct drm_mode_create_dumb *args) 225 225 { 226 226 /* have to work out size/pitch and return them */ 227 - args->pitch = ALIGN(args->width & ((args->bpp + 1) / 8), 64); 227 + args->pitch = ALIGN(args->width * ((args->bpp + 7) / 8), 64); 228 228 args->size = args->pitch * args->height; 229 229 return i915_gem_create(file, dev, 230 230 args->size, &args->handle); ··· 1356 1356 if (!obj->fault_mappable) 1357 1357 return; 1358 1358 1359 - unmap_mapping_range(obj->base.dev->dev_mapping, 1360 - (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT, 1361 - obj->base.size, 1); 1359 + if (obj->base.dev->dev_mapping) 1360 + unmap_mapping_range(obj->base.dev->dev_mapping, 1361 + (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT, 1362 + obj->base.size, 1); 1362 1363 1363 1364 obj->fault_mappable = false; 1364 1365 } ··· 1797 1796 return; 1798 1797 1799 1798 spin_lock(&file_priv->mm.lock); 1800 - list_del(&request->client_list); 1801 - request->file_priv = NULL; 1799 + if (request->file_priv) { 1800 + list_del(&request->client_list); 1801 + request->file_priv = NULL; 1802 + } 1802 1803 spin_unlock(&file_priv->mm.lock); 1803 1804 } 1804 1805 ··· 2220 2217 { 2221 2218 int ret; 2222 2219 2220 + if (((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) == 0) 2221 + return 0; 2222 + 2223 2223 trace_i915_gem_ring_flush(ring, invalidate_domains, flush_domains); 2224 2224 2225 2225 ret = ring->flush(ring, invalidate_domains, flush_domains); 2226 2226 if (ret) 2227 2227 return ret; 2228 2228 2229 - i915_gem_process_flushing_list(ring, flush_domains); 2229 + if (flush_domains & I915_GEM_GPU_DOMAINS) 2230 + i915_gem_process_flushing_list(ring, flush_domains); 2231 + 2230 2232 return 0; 2231 2233 } 2232 2234 ··· 2587 2579 reg = &dev_priv->fence_regs[obj->fence_reg]; 2588 2580 list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list); 2589 2581 2590 - if (!obj->fenced_gpu_access && !obj->last_fenced_seqno) 2591 - pipelined = NULL; 2582 + if (obj->tiling_changed) { 2583 + ret = i915_gem_object_flush_fence(obj, pipelined); 2584 + if (ret) 2585 + return ret; 2586 + 2587 + if (!obj->fenced_gpu_access && !obj->last_fenced_seqno) 2588 + pipelined = NULL; 2589 + 2590 + if (pipelined) { 2591 + reg->setup_seqno = 2592 + i915_gem_next_request_seqno(pipelined); 2593 + obj->last_fenced_seqno = reg->setup_seqno; 2594 + obj->last_fenced_ring = pipelined; 2595 + } 2596 + 2597 + goto update; 2598 + } 2592 2599 2593 2600 if (!pipelined) { 2594 2601 if (reg->setup_seqno) { ··· 2622 2599 ret = i915_gem_object_flush_fence(obj, pipelined); 2623 2600 if (ret) 2624 2601 return ret; 2625 - } else if (obj->tiling_changed) { 2626 - if (obj->fenced_gpu_access) { 2627 - if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) { 2628 - ret = i915_gem_flush_ring(obj->ring, 2629 - 0, obj->base.write_domain); 2630 - if (ret) 2631 - return ret; 2632 - } 2633 - 2634 - obj->fenced_gpu_access = false; 2635 - } 2636 - } 2637 - 2638 - if (!obj->fenced_gpu_access && !obj->last_fenced_seqno) 2639 - pipelined = NULL; 2640 - BUG_ON(!pipelined && reg->setup_seqno); 2641 - 2642 - if (obj->tiling_changed) { 2643 - if (pipelined) { 2644 - reg->setup_seqno = 2645 - i915_gem_next_request_seqno(pipelined); 2646 - obj->last_fenced_seqno = reg->setup_seqno; 2647 - obj->last_fenced_ring = pipelined; 2648 - } 2649 - goto update; 2650 2602 } 2651 2603 2652 2604 return 0; ··· 3604 3606 return; 3605 3607 } 3606 3608 3609 + trace_i915_gem_object_destroy(obj); 3610 + 3607 3611 if (obj->base.map_list.map) 3608 3612 i915_gem_free_mmap_offset(obj); 3609 3613 ··· 3615 3615 kfree(obj->page_cpu_valid); 3616 3616 kfree(obj->bit_17); 3617 3617 kfree(obj); 3618 - 3619 - trace_i915_gem_object_destroy(obj); 3620 3618 } 3621 3619 3622 3620 void i915_gem_free_object(struct drm_gem_object *gem_obj)
+16 -3
drivers/gpu/drm/i915/i915_gem_execbuffer.c
··· 367 367 uint32_t __iomem *reloc_entry; 368 368 void __iomem *reloc_page; 369 369 370 + /* We can't wait for rendering with pagefaults disabled */ 371 + if (obj->active && in_atomic()) 372 + return -EFAULT; 373 + 370 374 ret = i915_gem_object_set_to_gtt_domain(obj, 1); 371 375 if (ret) 372 376 return ret; ··· 444 440 struct list_head *objects) 445 441 { 446 442 struct drm_i915_gem_object *obj; 447 - int ret; 443 + int ret = 0; 448 444 445 + /* This is the fast path and we cannot handle a pagefault whilst 446 + * holding the struct mutex lest the user pass in the relocations 447 + * contained within a mmaped bo. For in such a case we, the page 448 + * fault handler would call i915_gem_fault() and we would try to 449 + * acquire the struct mutex again. Obviously this is bad and so 450 + * lockdep complains vehemently. 451 + */ 452 + pagefault_disable(); 449 453 list_for_each_entry(obj, objects, exec_list) { 450 454 ret = i915_gem_execbuffer_relocate_object(obj, eb); 451 455 if (ret) 452 - return ret; 456 + break; 453 457 } 458 + pagefault_enable(); 454 459 455 - return 0; 460 + return ret; 456 461 } 457 462 458 463 static int
+18 -21
drivers/gpu/drm/i915/intel_display.c
··· 1516 1516 1517 1517 reg = PIPECONF(pipe); 1518 1518 val = I915_READ(reg); 1519 - val |= PIPECONF_ENABLE; 1520 - I915_WRITE(reg, val); 1521 - POSTING_READ(reg); 1519 + if (val & PIPECONF_ENABLE) 1520 + return; 1521 + 1522 + I915_WRITE(reg, val | PIPECONF_ENABLE); 1522 1523 intel_wait_for_vblank(dev_priv->dev, pipe); 1523 1524 } 1524 1525 ··· 1553 1552 1554 1553 reg = PIPECONF(pipe); 1555 1554 val = I915_READ(reg); 1556 - val &= ~PIPECONF_ENABLE; 1557 - I915_WRITE(reg, val); 1558 - POSTING_READ(reg); 1555 + if ((val & PIPECONF_ENABLE) == 0) 1556 + return; 1557 + 1558 + I915_WRITE(reg, val & ~PIPECONF_ENABLE); 1559 1559 intel_wait_for_pipe_off(dev_priv->dev, pipe); 1560 1560 } 1561 1561 ··· 1579 1577 1580 1578 reg = DSPCNTR(plane); 1581 1579 val = I915_READ(reg); 1582 - val |= DISPLAY_PLANE_ENABLE; 1583 - I915_WRITE(reg, val); 1584 - POSTING_READ(reg); 1580 + if (val & DISPLAY_PLANE_ENABLE) 1581 + return; 1582 + 1583 + I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); 1585 1584 intel_wait_for_vblank(dev_priv->dev, pipe); 1586 1585 } 1587 1586 ··· 1613 1610 1614 1611 reg = DSPCNTR(plane); 1615 1612 val = I915_READ(reg); 1616 - val &= ~DISPLAY_PLANE_ENABLE; 1617 - I915_WRITE(reg, val); 1618 - POSTING_READ(reg); 1613 + if ((val & DISPLAY_PLANE_ENABLE) == 0) 1614 + return; 1615 + 1616 + I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE); 1619 1617 intel_flush_display_plane(dev_priv, plane); 1620 1618 intel_wait_for_vblank(dev_priv->dev, pipe); 1621 1619 } ··· 1773 1769 return; 1774 1770 1775 1771 I915_WRITE(DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN); 1776 - POSTING_READ(DPFC_CONTROL); 1777 1772 intel_wait_for_vblank(dev, intel_crtc->pipe); 1778 1773 } 1779 1774 ··· 1864 1861 return; 1865 1862 1866 1863 I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl & ~DPFC_CTL_EN); 1867 - POSTING_READ(ILK_DPFC_CONTROL); 1868 1864 intel_wait_for_vblank(dev, intel_crtc->pipe); 1869 1865 } 1870 1866 ··· 3885 3883 display, cursor); 3886 3884 } 3887 3885 3888 - static inline bool single_plane_enabled(unsigned int mask) 3889 - { 3890 - return mask && (mask & -mask) == 0; 3891 - } 3886 + #define single_plane_enabled(mask) is_power_of_2(mask) 3892 3887 3893 3888 static void g4x_update_wm(struct drm_device *dev) 3894 3889 { ··· 5776 5777 5777 5778 dpll &= ~DISPLAY_RATE_SELECT_FPA1; 5778 5779 I915_WRITE(dpll_reg, dpll); 5779 - POSTING_READ(dpll_reg); 5780 5780 intel_wait_for_vblank(dev, pipe); 5781 5781 5782 5782 dpll = I915_READ(dpll_reg); ··· 5819 5821 5820 5822 dpll |= DISPLAY_RATE_SELECT_FPA1; 5821 5823 I915_WRITE(dpll_reg, dpll); 5822 - dpll = I915_READ(dpll_reg); 5823 5824 intel_wait_for_vblank(dev, pipe); 5824 5825 dpll = I915_READ(dpll_reg); 5825 5826 if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) ··· 6930 6933 DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); 6931 6934 if (pcu_mbox & (1<<31)) { /* OC supported */ 6932 6935 max_freq = pcu_mbox & 0xff; 6933 - DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 100); 6936 + DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); 6934 6937 } 6935 6938 6936 6939 /* In units of 100MHz */
+2 -2
drivers/gpu/drm/i915/intel_dp.c
··· 1957 1957 DP_NO_AUX_HANDSHAKE_LINK_TRAINING; 1958 1958 } else { 1959 1959 /* if this fails, presume the device is a ghost */ 1960 - DRM_ERROR("failed to retrieve link info\n"); 1961 - intel_dp_destroy(&intel_connector->base); 1960 + DRM_INFO("failed to retrieve link info, disabling eDP\n"); 1962 1961 intel_dp_encoder_destroy(&intel_dp->base.base); 1962 + intel_dp_destroy(&intel_connector->base); 1963 1963 return; 1964 1964 } 1965 1965 }
+51 -62
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 65 65 u32 cmd; 66 66 int ret; 67 67 68 - if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) { 68 + /* 69 + * read/write caches: 70 + * 71 + * I915_GEM_DOMAIN_RENDER is always invalidated, but is 72 + * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is 73 + * also flushed at 2d versus 3d pipeline switches. 74 + * 75 + * read-only caches: 76 + * 77 + * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if 78 + * MI_READ_FLUSH is set, and is always flushed on 965. 79 + * 80 + * I915_GEM_DOMAIN_COMMAND may not exist? 81 + * 82 + * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is 83 + * invalidated when MI_EXE_FLUSH is set. 84 + * 85 + * I915_GEM_DOMAIN_VERTEX, which exists on 965, is 86 + * invalidated with every MI_FLUSH. 87 + * 88 + * TLBs: 89 + * 90 + * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND 91 + * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and 92 + * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER 93 + * are flushed at any MI_FLUSH. 94 + */ 95 + 96 + cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; 97 + if ((invalidate_domains|flush_domains) & 98 + I915_GEM_DOMAIN_RENDER) 99 + cmd &= ~MI_NO_WRITE_FLUSH; 100 + if (INTEL_INFO(dev)->gen < 4) { 69 101 /* 70 - * read/write caches: 71 - * 72 - * I915_GEM_DOMAIN_RENDER is always invalidated, but is 73 - * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is 74 - * also flushed at 2d versus 3d pipeline switches. 75 - * 76 - * read-only caches: 77 - * 78 - * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if 79 - * MI_READ_FLUSH is set, and is always flushed on 965. 80 - * 81 - * I915_GEM_DOMAIN_COMMAND may not exist? 82 - * 83 - * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is 84 - * invalidated when MI_EXE_FLUSH is set. 85 - * 86 - * I915_GEM_DOMAIN_VERTEX, which exists on 965, is 87 - * invalidated with every MI_FLUSH. 88 - * 89 - * TLBs: 90 - * 91 - * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND 92 - * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and 93 - * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER 94 - * are flushed at any MI_FLUSH. 102 + * On the 965, the sampler cache always gets flushed 103 + * and this bit is reserved. 95 104 */ 96 - 97 - cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; 98 - if ((invalidate_domains|flush_domains) & 99 - I915_GEM_DOMAIN_RENDER) 100 - cmd &= ~MI_NO_WRITE_FLUSH; 101 - if (INTEL_INFO(dev)->gen < 4) { 102 - /* 103 - * On the 965, the sampler cache always gets flushed 104 - * and this bit is reserved. 105 - */ 106 - if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) 107 - cmd |= MI_READ_FLUSH; 108 - } 109 - if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) 110 - cmd |= MI_EXE_FLUSH; 111 - 112 - if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && 113 - (IS_G4X(dev) || IS_GEN5(dev))) 114 - cmd |= MI_INVALIDATE_ISP; 115 - 116 - ret = intel_ring_begin(ring, 2); 117 - if (ret) 118 - return ret; 119 - 120 - intel_ring_emit(ring, cmd); 121 - intel_ring_emit(ring, MI_NOOP); 122 - intel_ring_advance(ring); 105 + if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) 106 + cmd |= MI_READ_FLUSH; 123 107 } 108 + if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) 109 + cmd |= MI_EXE_FLUSH; 110 + 111 + if (invalidate_domains & I915_GEM_DOMAIN_COMMAND && 112 + (IS_G4X(dev) || IS_GEN5(dev))) 113 + cmd |= MI_INVALIDATE_ISP; 114 + 115 + ret = intel_ring_begin(ring, 2); 116 + if (ret) 117 + return ret; 118 + 119 + intel_ring_emit(ring, cmd); 120 + intel_ring_emit(ring, MI_NOOP); 121 + intel_ring_advance(ring); 124 122 125 123 return 0; 126 124 } ··· 565 567 u32 flush_domains) 566 568 { 567 569 int ret; 568 - 569 - if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0) 570 - return 0; 571 570 572 571 ret = intel_ring_begin(ring, 2); 573 572 if (ret) ··· 1051 1056 uint32_t cmd; 1052 1057 int ret; 1053 1058 1054 - if (((invalidate | flush) & I915_GEM_GPU_DOMAINS) == 0) 1055 - return 0; 1056 - 1057 1059 ret = intel_ring_begin(ring, 4); 1058 1060 if (ret) 1059 1061 return ret; ··· 1221 1229 { 1222 1230 uint32_t cmd; 1223 1231 int ret; 1224 - 1225 - if (((invalidate | flush) & I915_GEM_DOMAIN_RENDER) == 0) 1226 - return 0; 1227 1232 1228 1233 ret = blt_ring_begin(ring, 4); 1229 1234 if (ret)
+5 -1
drivers/gpu/drm/radeon/atombios_crtc.c
··· 957 957 /* adjust pixel clock as needed */ 958 958 adjusted_clock = atombios_adjust_pll(crtc, mode, pll, ss_enabled, &ss); 959 959 960 - if (ASIC_IS_AVIVO(rdev)) 960 + if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) 961 + /* TV seems to prefer the legacy algo on some boards */ 962 + radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, 963 + &ref_div, &post_div); 964 + else if (ASIC_IS_AVIVO(rdev)) 961 965 radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div, 962 966 &ref_div, &post_div); 963 967 else
+16 -5
drivers/gpu/drm/radeon/radeon_combios.c
··· 448 448 449 449 bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) 450 450 { 451 - int edid_info; 451 + int edid_info, size; 452 452 struct edid *edid; 453 453 unsigned char *raw; 454 454 edid_info = combios_get_table_offset(rdev->ddev, COMBIOS_HARDCODED_EDID_TABLE); ··· 456 456 return false; 457 457 458 458 raw = rdev->bios + edid_info; 459 - edid = kmalloc(EDID_LENGTH * (raw[0x7e] + 1), GFP_KERNEL); 459 + size = EDID_LENGTH * (raw[0x7e] + 1); 460 + edid = kmalloc(size, GFP_KERNEL); 460 461 if (edid == NULL) 461 462 return false; 462 463 463 - memcpy((unsigned char *)edid, raw, EDID_LENGTH * (raw[0x7e] + 1)); 464 + memcpy((unsigned char *)edid, raw, size); 464 465 465 466 if (!drm_edid_is_valid(edid)) { 466 467 kfree(edid); ··· 469 468 } 470 469 471 470 rdev->mode_info.bios_hardcoded_edid = edid; 471 + rdev->mode_info.bios_hardcoded_edid_size = size; 472 472 return true; 473 473 } 474 474 ··· 477 475 struct edid * 478 476 radeon_bios_get_hardcoded_edid(struct radeon_device *rdev) 479 477 { 480 - if (rdev->mode_info.bios_hardcoded_edid) 481 - return rdev->mode_info.bios_hardcoded_edid; 478 + struct edid *edid; 479 + 480 + if (rdev->mode_info.bios_hardcoded_edid) { 481 + edid = kmalloc(rdev->mode_info.bios_hardcoded_edid_size, GFP_KERNEL); 482 + if (edid) { 483 + memcpy((unsigned char *)edid, 484 + (unsigned char *)rdev->mode_info.bios_hardcoded_edid, 485 + rdev->mode_info.bios_hardcoded_edid_size); 486 + return edid; 487 + } 488 + } 482 489 return NULL; 483 490 } 484 491
+28 -2
drivers/gpu/drm/radeon/radeon_connectors.c
··· 633 633 static enum drm_connector_status 634 634 radeon_vga_detect(struct drm_connector *connector, bool force) 635 635 { 636 + struct drm_device *dev = connector->dev; 637 + struct radeon_device *rdev = dev->dev_private; 636 638 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 637 639 struct drm_encoder *encoder; 638 640 struct drm_encoder_helper_funcs *encoder_funcs; ··· 685 683 686 684 if (ret == connector_status_connected) 687 685 ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true); 686 + 687 + /* RN50 and some RV100 asics in servers often have a hardcoded EDID in the 688 + * vbios to deal with KVMs. If we have one and are not able to detect a monitor 689 + * by other means, assume the CRT is connected and use that EDID. 690 + */ 691 + if ((!rdev->is_atom_bios) && 692 + (ret == connector_status_disconnected) && 693 + rdev->mode_info.bios_hardcoded_edid_size) { 694 + ret = connector_status_connected; 695 + } 696 + 688 697 radeon_connector_update_scratch_regs(connector, ret); 689 698 return ret; 690 699 } ··· 807 794 static enum drm_connector_status 808 795 radeon_dvi_detect(struct drm_connector *connector, bool force) 809 796 { 797 + struct drm_device *dev = connector->dev; 798 + struct radeon_device *rdev = dev->dev_private; 810 799 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 811 800 struct drm_encoder *encoder = NULL; 812 801 struct drm_encoder_helper_funcs *encoder_funcs; ··· 848 833 * you don't really know what's connected to which port as both are digital. 849 834 */ 850 835 if (radeon_connector->shared_ddc && (ret == connector_status_connected)) { 851 - struct drm_device *dev = connector->dev; 852 - struct radeon_device *rdev = dev->dev_private; 853 836 struct drm_connector *list_connector; 854 837 struct radeon_connector *list_radeon_connector; 855 838 list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) { ··· 910 897 if ((ret == connector_status_connected) && (radeon_connector->use_digital == false) && 911 898 encoder) { 912 899 ret = radeon_connector_analog_encoder_conflict_solve(connector, encoder, ret, true); 900 + } 901 + 902 + /* RN50 and some RV100 asics in servers often have a hardcoded EDID in the 903 + * vbios to deal with KVMs. If we have one and are not able to detect a monitor 904 + * by other means, assume the DFP is connected and use that EDID. In most 905 + * cases the DVI port is actually a virtual KVM port connected to the service 906 + * processor. 907 + */ 908 + if ((!rdev->is_atom_bios) && 909 + (ret == connector_status_disconnected) && 910 + rdev->mode_info.bios_hardcoded_edid_size) { 911 + radeon_connector->use_digital = true; 912 + ret = connector_status_connected; 913 913 } 914 914 915 915 out:
+1
drivers/gpu/drm/radeon/radeon_mode.h
··· 239 239 struct drm_property *underscan_vborder_property; 240 240 /* hardcoded DFP edid from BIOS */ 241 241 struct edid *bios_hardcoded_edid; 242 + int bios_hardcoded_edid_size; 242 243 243 244 /* pointer to fbdev info structure */ 244 245 struct radeon_fbdev *rfbdev;
+5 -3
drivers/gpu/drm/radeon/radeon_pm.c
··· 365 365 else if (strncmp("high", buf, strlen("high")) == 0) 366 366 rdev->pm.profile = PM_PROFILE_HIGH; 367 367 else { 368 - DRM_ERROR("invalid power profile!\n"); 368 + count = -EINVAL; 369 369 goto fail; 370 370 } 371 371 radeon_pm_update_profile(rdev); 372 372 radeon_pm_set_clocks(rdev); 373 - } 373 + } else 374 + count = -EINVAL; 375 + 374 376 fail: 375 377 mutex_unlock(&rdev->pm.mutex); 376 378 ··· 415 413 mutex_unlock(&rdev->pm.mutex); 416 414 cancel_delayed_work_sync(&rdev->pm.dynpm_idle_work); 417 415 } else { 418 - DRM_ERROR("invalid power method!\n"); 416 + count = -EINVAL; 419 417 goto fail; 420 418 } 421 419 radeon_pm_compute_clocks(rdev);
+4
include/drm/drm.h
··· 463 463 enum drm_vblank_seq_type { 464 464 _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */ 465 465 _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */ 466 + /* bits 1-6 are reserved for high crtcs */ 467 + _DRM_VBLANK_HIGH_CRTC_MASK = 0x0000003e, 466 468 _DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */ 467 469 _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */ 468 470 _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */ 469 471 _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */ 470 472 _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking, unsupported */ 471 473 }; 474 + #define _DRM_VBLANK_HIGH_CRTC_SHIFT 1 472 475 473 476 #define _DRM_VBLANK_TYPES_MASK (_DRM_VBLANK_ABSOLUTE | _DRM_VBLANK_RELATIVE) 474 477 #define _DRM_VBLANK_FLAGS_MASK (_DRM_VBLANK_EVENT | _DRM_VBLANK_SIGNAL | \ ··· 756 753 }; 757 754 758 755 #define DRM_CAP_DUMB_BUFFER 0x1 756 + #define DRM_CAP_VBLANK_HIGH_CRTC 0x2 759 757 760 758 /* typedef area */ 761 759 #ifndef __KERNEL__