Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (31 commits)
drm: integer overflow in drm_mode_dirtyfb_ioctl()
drivers/gpu/vga/vgaarb.c: add missing kfree
drm/radeon/kms/atom: unify i2c gpio table handling
drm/radeon/kms: fix up gpio i2c mask bits for r4xx for real
ttm: Don't return the bo reserved on error path
drm/radeon/kms: add a CS ioctl flag not to rewrite tiling flags in the CS
drm/i915: Fix inconsistent backlight level during disabled
drm, i915: Fix memory leak in i915_gem_busy_ioctl().
drm/i915: Use DPCD value for max DP lanes.
drm/i915: Initiate DP link training only on the lanes we'll be using
drm/i915: Remove trailing white space
drm/i915: Try harder during dp pattern 1 link training
drm/i915: Make DP prepare/commit consistent with DP dpms
drm/i915: Let panel power sequencing hardware do its job
drm/i915: Treat PCH eDP like DP in most places
drm/i915: Remove link_status field from intel_dp structure
drm/i915: Move common PCH_PP_CONTROL setup to ironlake_get_pp_control
drm/i915: Module parameters using '-1' as default must be signed type
drm/i915: Turn on another required clock gating bit on gen6.
drm/i915: Turn on a required 3D clock gating bit on Sandybridge.
...

+601 -433
+4
drivers/gpu/drm/drm_crtc.c
··· 1873 1873 } 1874 1874 1875 1875 if (num_clips && clips_ptr) { 1876 + if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) { 1877 + ret = -EINVAL; 1878 + goto out_err1; 1879 + } 1876 1880 clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); 1877 1881 if (!clips) { 1878 1882 ret = -ENOMEM;
+51 -6
drivers/gpu/drm/i915/i915_debugfs.c
··· 636 636 struct drm_device *dev = node->minor->dev; 637 637 drm_i915_private_t *dev_priv = dev->dev_private; 638 638 struct intel_ring_buffer *ring; 639 + int ret; 639 640 640 641 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 641 642 if (ring->size == 0) 642 643 return 0; 644 + 645 + ret = mutex_lock_interruptible(&dev->struct_mutex); 646 + if (ret) 647 + return ret; 643 648 644 649 seq_printf(m, "Ring %s:\n", ring->name); 645 650 seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR); ··· 658 653 } 659 654 seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring)); 660 655 seq_printf(m, " Start : %08x\n", I915_READ_START(ring)); 656 + 657 + mutex_unlock(&dev->struct_mutex); 661 658 662 659 return 0; 663 660 } ··· 849 842 struct drm_info_node *node = (struct drm_info_node *) m->private; 850 843 struct drm_device *dev = node->minor->dev; 851 844 drm_i915_private_t *dev_priv = dev->dev_private; 852 - u16 crstanddelay = I915_READ16(CRSTANDVID); 845 + u16 crstanddelay; 846 + int ret; 847 + 848 + ret = mutex_lock_interruptible(&dev->struct_mutex); 849 + if (ret) 850 + return ret; 851 + 852 + crstanddelay = I915_READ16(CRSTANDVID); 853 + 854 + mutex_unlock(&dev->struct_mutex); 853 855 854 856 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); 855 857 ··· 956 940 struct drm_device *dev = node->minor->dev; 957 941 drm_i915_private_t *dev_priv = dev->dev_private; 958 942 u32 delayfreq; 959 - int i; 943 + int ret, i; 944 + 945 + ret = mutex_lock_interruptible(&dev->struct_mutex); 946 + if (ret) 947 + return ret; 960 948 961 949 for (i = 0; i < 16; i++) { 962 950 delayfreq = I915_READ(PXVFREQ_BASE + i * 4); 963 951 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq, 964 952 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); 965 953 } 954 + 955 + mutex_unlock(&dev->struct_mutex); 966 956 967 957 return 0; 968 958 } ··· 984 962 struct drm_device *dev = node->minor->dev; 985 963 drm_i915_private_t *dev_priv = dev->dev_private; 986 964 u32 inttoext; 987 - int i; 965 + int ret, i; 966 + 967 + ret = mutex_lock_interruptible(&dev->struct_mutex); 968 + if (ret) 969 + return ret; 988 970 989 971 for (i = 1; i <= 32; i++) { 990 972 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); 991 973 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); 992 974 } 975 + 976 + mutex_unlock(&dev->struct_mutex); 993 977 994 978 return 0; 995 979 } ··· 1005 977 struct drm_info_node *node = (struct drm_info_node *) m->private; 1006 978 struct drm_device *dev = node->minor->dev; 1007 979 drm_i915_private_t *dev_priv = dev->dev_private; 1008 - u32 rgvmodectl = I915_READ(MEMMODECTL); 1009 - u32 rstdbyctl = I915_READ(RSTDBYCTL); 1010 - u16 crstandvid = I915_READ16(CRSTANDVID); 980 + u32 rgvmodectl, rstdbyctl; 981 + u16 crstandvid; 982 + int ret; 983 + 984 + ret = mutex_lock_interruptible(&dev->struct_mutex); 985 + if (ret) 986 + return ret; 987 + 988 + rgvmodectl = I915_READ(MEMMODECTL); 989 + rstdbyctl = I915_READ(RSTDBYCTL); 990 + crstandvid = I915_READ16(CRSTANDVID); 991 + 992 + mutex_unlock(&dev->struct_mutex); 1011 993 1012 994 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 1013 995 "yes" : "no"); ··· 1205 1167 struct drm_info_node *node = (struct drm_info_node *) m->private; 1206 1168 struct drm_device *dev = node->minor->dev; 1207 1169 drm_i915_private_t *dev_priv = dev->dev_private; 1170 + int ret; 1171 + 1172 + ret = mutex_lock_interruptible(&dev->struct_mutex); 1173 + if (ret) 1174 + return ret; 1208 1175 1209 1176 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); 1177 + 1178 + mutex_unlock(&dev->struct_mutex); 1210 1179 1211 1180 return 0; 1212 1181 }
+3 -3
drivers/gpu/drm/i915/i915_drv.c
··· 68 68 MODULE_PARM_DESC(i915_enable_rc6, 69 69 "Enable power-saving render C-state 6 (default: true)"); 70 70 71 - unsigned int i915_enable_fbc __read_mostly = -1; 71 + int i915_enable_fbc __read_mostly = -1; 72 72 module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); 73 73 MODULE_PARM_DESC(i915_enable_fbc, 74 74 "Enable frame buffer compression for power savings " ··· 80 80 "Use panel (LVDS/eDP) downclocking for power savings " 81 81 "(default: false)"); 82 82 83 - unsigned int i915_panel_use_ssc __read_mostly = -1; 83 + int i915_panel_use_ssc __read_mostly = -1; 84 84 module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); 85 85 MODULE_PARM_DESC(lvds_use_ssc, 86 86 "Use Spread Spectrum Clock with panels [LVDS/eDP] " ··· 107 107 extern int intel_agp_enabled; 108 108 109 109 #define INTEL_VGA_DEVICE(id, info) { \ 110 - .class = PCI_CLASS_DISPLAY_VGA << 8, \ 110 + .class = PCI_BASE_CLASS_DISPLAY << 16, \ 111 111 .class_mask = 0xff0000, \ 112 112 .vendor = 0x8086, \ 113 113 .device = id, \
+10 -9
drivers/gpu/drm/i915/i915_drv.h
··· 126 126 struct _drm_i915_sarea *sarea_priv; 127 127 }; 128 128 #define I915_FENCE_REG_NONE -1 129 + #define I915_MAX_NUM_FENCES 16 130 + /* 16 fences + sign bit for FENCE_REG_NONE */ 131 + #define I915_MAX_NUM_FENCE_BITS 5 129 132 130 133 struct drm_i915_fence_reg { 131 134 struct list_head lru_list; ··· 171 168 u32 instdone1; 172 169 u32 seqno; 173 170 u64 bbaddr; 174 - u64 fence[16]; 171 + u64 fence[I915_MAX_NUM_FENCES]; 175 172 struct timeval time; 176 173 struct drm_i915_error_object { 177 174 int page_count; ··· 185 182 u32 gtt_offset; 186 183 u32 read_domains; 187 184 u32 write_domain; 188 - s32 fence_reg:5; 185 + s32 fence_reg:I915_MAX_NUM_FENCE_BITS; 189 186 s32 pinned:2; 190 187 u32 tiling:2; 191 188 u32 dirty:1; ··· 378 375 struct notifier_block lid_notifier; 379 376 380 377 int crt_ddc_pin; 381 - struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ 378 + struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ 382 379 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ 383 380 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 384 381 ··· 509 506 u8 saveAR[21]; 510 507 u8 saveDACMASK; 511 508 u8 saveCR[37]; 512 - uint64_t saveFENCE[16]; 509 + uint64_t saveFENCE[I915_MAX_NUM_FENCES]; 513 510 u32 saveCURACNTR; 514 511 u32 saveCURAPOS; 515 512 u32 saveCURABASE; ··· 780 777 * Fence register bits (if any) for this object. Will be set 781 778 * as needed when mapped into the GTT. 782 779 * Protected by dev->struct_mutex. 783 - * 784 - * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE) 785 780 */ 786 - signed int fence_reg:5; 781 + signed int fence_reg:I915_MAX_NUM_FENCE_BITS; 787 782 788 783 /** 789 784 * Advice: are the backing pages purgeable? ··· 1000 999 extern unsigned int i915_powersave __read_mostly; 1001 1000 extern unsigned int i915_semaphores __read_mostly; 1002 1001 extern unsigned int i915_lvds_downclock __read_mostly; 1003 - extern unsigned int i915_panel_use_ssc __read_mostly; 1002 + extern int i915_panel_use_ssc __read_mostly; 1004 1003 extern int i915_vbt_sdvo_panel_type __read_mostly; 1005 1004 extern unsigned int i915_enable_rc6 __read_mostly; 1006 - extern unsigned int i915_enable_fbc __read_mostly; 1005 + extern int i915_enable_fbc __read_mostly; 1007 1006 extern bool i915_enable_hangcheck __read_mostly; 1008 1007 1009 1008 extern int i915_suspend(struct drm_device *dev, pm_message_t state);
+7 -5
drivers/gpu/drm/i915/i915_gem.c
··· 1745 1745 struct drm_i915_private *dev_priv = dev->dev_private; 1746 1746 int i; 1747 1747 1748 - for (i = 0; i < 16; i++) { 1748 + for (i = 0; i < dev_priv->num_fence_regs; i++) { 1749 1749 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; 1750 1750 struct drm_i915_gem_object *obj = reg->obj; 1751 1751 ··· 3512 3512 * so emit a request to do so. 3513 3513 */ 3514 3514 request = kzalloc(sizeof(*request), GFP_KERNEL); 3515 - if (request) 3515 + if (request) { 3516 3516 ret = i915_add_request(obj->ring, NULL, request); 3517 - else 3517 + if (ret) 3518 + kfree(request); 3519 + } else 3518 3520 ret = -ENOMEM; 3519 3521 } 3520 3522 ··· 3615 3613 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 3616 3614 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 3617 3615 3618 - if (IS_GEN6(dev)) { 3616 + if (IS_GEN6(dev) || IS_GEN7(dev)) { 3619 3617 /* On Gen6, we can have the GPU use the LLC (the CPU 3620 3618 * cache) for about a 10% performance improvement 3621 3619 * compared to uncached. Graphics requests other than ··· 3879 3877 INIT_LIST_HEAD(&dev_priv->mm.gtt_list); 3880 3878 for (i = 0; i < I915_NUM_RINGS; i++) 3881 3879 init_ring_lists(&dev_priv->ring[i]); 3882 - for (i = 0; i < 16; i++) 3880 + for (i = 0; i < I915_MAX_NUM_FENCES; i++) 3883 3881 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); 3884 3882 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 3885 3883 i915_gem_retire_work_handler);
+1
drivers/gpu/drm/i915/i915_irq.c
··· 824 824 825 825 /* Fences */ 826 826 switch (INTEL_INFO(dev)->gen) { 827 + case 7: 827 828 case 6: 828 829 for (i = 0; i < 16; i++) 829 830 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+17 -4
drivers/gpu/drm/i915/i915_reg.h
··· 1553 1553 */ 1554 1554 #define PP_READY (1 << 30) 1555 1555 #define PP_SEQUENCE_NONE (0 << 28) 1556 - #define PP_SEQUENCE_ON (1 << 28) 1557 - #define PP_SEQUENCE_OFF (2 << 28) 1558 - #define PP_SEQUENCE_MASK 0x30000000 1556 + #define PP_SEQUENCE_POWER_UP (1 << 28) 1557 + #define PP_SEQUENCE_POWER_DOWN (2 << 28) 1558 + #define PP_SEQUENCE_MASK (3 << 28) 1559 + #define PP_SEQUENCE_SHIFT 28 1559 1560 #define PP_CYCLE_DELAY_ACTIVE (1 << 27) 1560 - #define PP_SEQUENCE_STATE_ON_IDLE (1 << 3) 1561 1561 #define PP_SEQUENCE_STATE_MASK 0x0000000f 1562 + #define PP_SEQUENCE_STATE_OFF_IDLE (0x0 << 0) 1563 + #define PP_SEQUENCE_STATE_OFF_S0_1 (0x1 << 0) 1564 + #define PP_SEQUENCE_STATE_OFF_S0_2 (0x2 << 0) 1565 + #define PP_SEQUENCE_STATE_OFF_S0_3 (0x3 << 0) 1566 + #define PP_SEQUENCE_STATE_ON_IDLE (0x8 << 0) 1567 + #define PP_SEQUENCE_STATE_ON_S1_0 (0x9 << 0) 1568 + #define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0) 1569 + #define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0) 1570 + #define PP_SEQUENCE_STATE_RESET (0xf << 0) 1562 1571 #define PP_CONTROL 0x61204 1563 1572 #define POWER_TARGET_ON (1 << 0) 1564 1573 #define PP_ON_DELAYS 0x61208 ··· 3452 3443 3453 3444 #define GT_FIFO_FREE_ENTRIES 0x120008 3454 3445 #define GT_FIFO_NUM_RESERVED_ENTRIES 20 3446 + 3447 + #define GEN6_UCGCTL2 0x9404 3448 + # define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12) 3449 + # define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11) 3455 3450 3456 3451 #define GEN6_RPNSWREQ 0xA008 3457 3452 #define GEN6_TURBO_DISABLE (1<<31)
+2
drivers/gpu/drm/i915/i915_suspend.c
··· 370 370 371 371 /* Fences */ 372 372 switch (INTEL_INFO(dev)->gen) { 373 + case 7: 373 374 case 6: 374 375 for (i = 0; i < 16; i++) 375 376 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); ··· 405 404 406 405 /* Fences */ 407 406 switch (INTEL_INFO(dev)->gen) { 407 + case 7: 408 408 case 6: 409 409 for (i = 0; i < 16; i++) 410 410 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
+24 -9
drivers/gpu/drm/i915/intel_display.c
··· 2933 2933 2934 2934 /* For PCH DP, enable TRANS_DP_CTL */ 2935 2935 if (HAS_PCH_CPT(dev) && 2936 - intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 2936 + (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 2937 + intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { 2937 2938 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5; 2938 2939 reg = TRANS_DP_CTL(pipe); 2939 2940 temp = I915_READ(reg); ··· 4712 4711 lvds_bpc = 6; 4713 4712 4714 4713 if (lvds_bpc < display_bpc) { 4715 - DRM_DEBUG_DRIVER("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc); 4714 + DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc); 4716 4715 display_bpc = lvds_bpc; 4717 4716 } 4718 4717 continue; ··· 4723 4722 unsigned int edp_bpc = dev_priv->edp.bpp / 3; 4724 4723 4725 4724 if (edp_bpc < display_bpc) { 4726 - DRM_DEBUG_DRIVER("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); 4725 + DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); 4727 4726 display_bpc = edp_bpc; 4728 4727 } 4729 4728 continue; ··· 4738 4737 /* Don't use an invalid EDID bpc value */ 4739 4738 if (connector->display_info.bpc && 4740 4739 connector->display_info.bpc < display_bpc) { 4741 - DRM_DEBUG_DRIVER("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc); 4740 + DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc); 4742 4741 display_bpc = connector->display_info.bpc; 4743 4742 } 4744 4743 } ··· 4749 4748 */ 4750 4749 if (intel_encoder->type == INTEL_OUTPUT_HDMI) { 4751 4750 if (display_bpc > 8 && display_bpc < 12) { 4752 - DRM_DEBUG_DRIVER("forcing bpc to 12 for HDMI\n"); 4751 + DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n"); 4753 4752 display_bpc = 12; 4754 4753 } else { 4755 - DRM_DEBUG_DRIVER("forcing bpc to 8 for HDMI\n"); 4754 + DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n"); 4756 4755 display_bpc = 8; 4757 4756 } 4758 4757 } ··· 4790 4789 4791 4790 display_bpc = min(display_bpc, bpc); 4792 4791 4793 - DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n", 4794 - bpc, display_bpc); 4792 + DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n", 4793 + bpc, display_bpc); 4795 4794 4796 4795 *pipe_bpp = display_bpc * 3; 4797 4796 ··· 5672 5671 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; 5673 5672 if ((is_lvds && dev_priv->lvds_dither) || dither) { 5674 5673 pipeconf |= PIPECONF_DITHER_EN; 5675 - pipeconf |= PIPECONF_DITHER_TYPE_ST1; 5674 + pipeconf |= PIPECONF_DITHER_TYPE_SP; 5676 5675 } 5677 5676 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 5678 5677 intel_dp_set_m_n(crtc, mode, adjusted_mode); ··· 8148 8147 I915_WRITE(WM3_LP_ILK, 0); 8149 8148 I915_WRITE(WM2_LP_ILK, 0); 8150 8149 I915_WRITE(WM1_LP_ILK, 0); 8150 + 8151 + /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock 8152 + * gating disable must be set. Failure to set it results in 8153 + * flickering pixels due to Z write ordering failures after 8154 + * some amount of runtime in the Mesa "fire" demo, and Unigine 8155 + * Sanctuary and Tropics, and apparently anything else with 8156 + * alpha test or pixel discard. 8157 + * 8158 + * According to the spec, bit 11 (RCCUNIT) must also be set, 8159 + * but we didn't debug actual testcases to find it out. 8160 + */ 8161 + I915_WRITE(GEN6_UCGCTL2, 8162 + GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | 8163 + GEN6_RCCUNIT_CLOCK_GATE_DISABLE); 8151 8164 8152 8165 /* 8153 8166 * According to the spec the following bits should be
+237 -174
drivers/gpu/drm/i915/intel_dp.c
··· 59 59 struct i2c_algo_dp_aux_data algo; 60 60 bool is_pch_edp; 61 61 uint8_t train_set[4]; 62 - uint8_t link_status[DP_LINK_STATUS_SIZE]; 63 62 int panel_power_up_delay; 64 63 int panel_power_down_delay; 65 64 int panel_power_cycle_delay; ··· 67 68 struct drm_display_mode *panel_fixed_mode; /* for eDP */ 68 69 struct delayed_work panel_vdd_work; 69 70 bool want_panel_vdd; 70 - unsigned long panel_off_jiffies; 71 71 }; 72 72 73 73 /** ··· 155 157 static int 156 158 intel_dp_max_lane_count(struct intel_dp *intel_dp) 157 159 { 158 - int max_lane_count = 4; 159 - 160 - if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { 161 - max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f; 162 - switch (max_lane_count) { 163 - case 1: case 2: case 4: 164 - break; 165 - default: 166 - max_lane_count = 4; 167 - } 160 + int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f; 161 + switch (max_lane_count) { 162 + case 1: case 2: case 4: 163 + break; 164 + default: 165 + max_lane_count = 4; 168 166 } 169 167 return max_lane_count; 170 168 } ··· 762 768 continue; 763 769 764 770 intel_dp = enc_to_intel_dp(encoder); 765 - if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) { 771 + if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || 772 + intel_dp->base.type == INTEL_OUTPUT_EDP) 773 + { 766 774 lane_count = intel_dp->lane_count; 767 - break; 768 - } else if (is_edp(intel_dp)) { 769 - lane_count = dev_priv->edp.lanes; 770 775 break; 771 776 } 772 777 } ··· 803 810 struct drm_display_mode *adjusted_mode) 804 811 { 805 812 struct drm_device *dev = encoder->dev; 813 + struct drm_i915_private *dev_priv = dev->dev_private; 806 814 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 807 815 struct drm_crtc *crtc = intel_dp->base.base.crtc; 808 816 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); ··· 816 822 ironlake_edp_pll_off(encoder); 817 823 } 818 824 819 - intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 820 - intel_dp->DP |= intel_dp->color_range; 825 + /* 826 + * There are three kinds of DP registers: 827 + * 828 + * IBX PCH 829 + * CPU 830 + * CPT PCH 831 + * 832 + * IBX PCH and CPU are the same for almost everything, 833 + * except that the CPU DP PLL is configured in this 834 + * register 835 + * 836 + * CPT PCH is quite different, having many bits moved 837 + * to the TRANS_DP_CTL register instead. That 838 + * configuration happens (oddly) in ironlake_pch_enable 839 + */ 821 840 822 - if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 823 - intel_dp->DP |= DP_SYNC_HS_HIGH; 824 - if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 825 - intel_dp->DP |= DP_SYNC_VS_HIGH; 841 + /* Preserve the BIOS-computed detected bit. This is 842 + * supposed to be read-only. 843 + */ 844 + intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; 845 + intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 826 846 827 - if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 828 - intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 829 - else 830 - intel_dp->DP |= DP_LINK_TRAIN_OFF; 847 + /* Handle DP bits in common between all three register formats */ 848 + 849 + intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 831 850 832 851 switch (intel_dp->lane_count) { 833 852 case 1: ··· 859 852 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 860 853 intel_write_eld(encoder, adjusted_mode); 861 854 } 862 - 863 855 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); 864 856 intel_dp->link_configuration[0] = intel_dp->link_bw; 865 857 intel_dp->link_configuration[1] = intel_dp->lane_count; 866 858 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; 867 - 868 859 /* 869 860 * Check for DPCD version > 1.1 and enhanced framing support 870 861 */ 871 862 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 872 863 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { 873 864 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 874 - intel_dp->DP |= DP_ENHANCED_FRAMING; 875 865 } 876 866 877 - /* CPT DP's pipe select is decided in TRANS_DP_CTL */ 878 - if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev)) 879 - intel_dp->DP |= DP_PIPEB_SELECT; 867 + /* Split out the IBX/CPU vs CPT settings */ 880 868 881 - if (is_cpu_edp(intel_dp)) { 882 - /* don't miss out required setting for eDP */ 883 - intel_dp->DP |= DP_PLL_ENABLE; 884 - if (adjusted_mode->clock < 200000) 885 - intel_dp->DP |= DP_PLL_FREQ_160MHZ; 886 - else 887 - intel_dp->DP |= DP_PLL_FREQ_270MHZ; 869 + if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { 870 + intel_dp->DP |= intel_dp->color_range; 871 + 872 + if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 873 + intel_dp->DP |= DP_SYNC_HS_HIGH; 874 + if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 875 + intel_dp->DP |= DP_SYNC_VS_HIGH; 876 + intel_dp->DP |= DP_LINK_TRAIN_OFF; 877 + 878 + if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 879 + intel_dp->DP |= DP_ENHANCED_FRAMING; 880 + 881 + if (intel_crtc->pipe == 1) 882 + intel_dp->DP |= DP_PIPEB_SELECT; 883 + 884 + if (is_cpu_edp(intel_dp)) { 885 + /* don't miss out required setting for eDP */ 886 + intel_dp->DP |= DP_PLL_ENABLE; 887 + if (adjusted_mode->clock < 200000) 888 + intel_dp->DP |= DP_PLL_FREQ_160MHZ; 889 + else 890 + intel_dp->DP |= DP_PLL_FREQ_270MHZ; 891 + } 892 + } else { 893 + intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 888 894 } 895 + } 896 + 897 + #define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 898 + #define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) 899 + 900 + #define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 901 + #define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 902 + 903 + #define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 904 + #define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 905 + 906 + static void ironlake_wait_panel_status(struct intel_dp *intel_dp, 907 + u32 mask, 908 + u32 value) 909 + { 910 + struct drm_device *dev = intel_dp->base.base.dev; 911 + struct drm_i915_private *dev_priv = dev->dev_private; 912 + 913 + DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", 914 + mask, value, 915 + I915_READ(PCH_PP_STATUS), 916 + I915_READ(PCH_PP_CONTROL)); 917 + 918 + if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) { 919 + DRM_ERROR("Panel status timeout: status %08x control %08x\n", 920 + I915_READ(PCH_PP_STATUS), 921 + I915_READ(PCH_PP_CONTROL)); 922 + } 923 + } 924 + 925 + static void ironlake_wait_panel_on(struct intel_dp *intel_dp) 926 + { 927 + DRM_DEBUG_KMS("Wait for panel power on\n"); 928 + ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); 889 929 } 890 930 891 931 static void ironlake_wait_panel_off(struct intel_dp *intel_dp) 892 932 { 893 - unsigned long off_time; 894 - unsigned long delay; 895 - 896 933 DRM_DEBUG_KMS("Wait for panel power off time\n"); 934 + ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); 935 + } 897 936 898 - if (ironlake_edp_have_panel_power(intel_dp) || 899 - ironlake_edp_have_panel_vdd(intel_dp)) 900 - { 901 - DRM_DEBUG_KMS("Panel still on, no delay needed\n"); 902 - return; 903 - } 937 + static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp) 938 + { 939 + DRM_DEBUG_KMS("Wait for panel power cycle\n"); 940 + ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); 941 + } 904 942 905 - off_time = intel_dp->panel_off_jiffies + msecs_to_jiffies(intel_dp->panel_power_down_delay); 906 - if (time_after(jiffies, off_time)) { 907 - DRM_DEBUG_KMS("Time already passed"); 908 - return; 909 - } 910 - delay = jiffies_to_msecs(off_time - jiffies); 911 - if (delay > intel_dp->panel_power_down_delay) 912 - delay = intel_dp->panel_power_down_delay; 913 - DRM_DEBUG_KMS("Waiting an additional %ld ms\n", delay); 914 - msleep(delay); 943 + 944 + /* Read the current pp_control value, unlocking the register if it 945 + * is locked 946 + */ 947 + 948 + static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv) 949 + { 950 + u32 control = I915_READ(PCH_PP_CONTROL); 951 + 952 + control &= ~PANEL_UNLOCK_MASK; 953 + control |= PANEL_UNLOCK_REGS; 954 + return control; 915 955 } 916 956 917 957 static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) ··· 975 921 "eDP VDD already requested on\n"); 976 922 977 923 intel_dp->want_panel_vdd = true; 924 + 978 925 if (ironlake_edp_have_panel_vdd(intel_dp)) { 979 926 DRM_DEBUG_KMS("eDP VDD already on\n"); 980 927 return; 981 928 } 982 929 983 - ironlake_wait_panel_off(intel_dp); 984 - pp = I915_READ(PCH_PP_CONTROL); 985 - pp &= ~PANEL_UNLOCK_MASK; 986 - pp |= PANEL_UNLOCK_REGS; 930 + if (!ironlake_edp_have_panel_power(intel_dp)) 931 + ironlake_wait_panel_power_cycle(intel_dp); 932 + 933 + pp = ironlake_get_pp_control(dev_priv); 987 934 pp |= EDP_FORCE_VDD; 988 935 I915_WRITE(PCH_PP_CONTROL, pp); 989 936 POSTING_READ(PCH_PP_CONTROL); ··· 1007 952 u32 pp; 1008 953 1009 954 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { 1010 - pp = I915_READ(PCH_PP_CONTROL); 1011 - pp &= ~PANEL_UNLOCK_MASK; 1012 - pp |= PANEL_UNLOCK_REGS; 955 + pp = ironlake_get_pp_control(dev_priv); 1013 956 pp &= ~EDP_FORCE_VDD; 1014 957 I915_WRITE(PCH_PP_CONTROL, pp); 1015 958 POSTING_READ(PCH_PP_CONTROL); ··· 1015 962 /* Make sure sequencer is idle before allowing subsequent activity */ 1016 963 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", 1017 964 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); 1018 - intel_dp->panel_off_jiffies = jiffies; 965 + 966 + msleep(intel_dp->panel_power_down_delay); 1019 967 } 1020 968 } 1021 969 ··· 1026 972 struct intel_dp, panel_vdd_work); 1027 973 struct drm_device *dev = intel_dp->base.base.dev; 1028 974 1029 - mutex_lock(&dev->struct_mutex); 975 + mutex_lock(&dev->mode_config.mutex); 1030 976 ironlake_panel_vdd_off_sync(intel_dp); 1031 - mutex_unlock(&dev->struct_mutex); 977 + mutex_unlock(&dev->mode_config.mutex); 1032 978 } 1033 979 1034 980 static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) ··· 1038 984 1039 985 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd); 1040 986 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); 1041 - 987 + 1042 988 intel_dp->want_panel_vdd = false; 1043 989 1044 990 if (sync) { ··· 1054 1000 } 1055 1001 } 1056 1002 1057 - /* Returns true if the panel was already on when called */ 1058 1003 static void ironlake_edp_panel_on(struct intel_dp *intel_dp) 1059 1004 { 1060 1005 struct drm_device *dev = intel_dp->base.base.dev; 1061 1006 struct drm_i915_private *dev_priv = dev->dev_private; 1062 - u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE; 1007 + u32 pp; 1063 1008 1064 1009 if (!is_edp(intel_dp)) 1065 1010 return; 1066 - if (ironlake_edp_have_panel_power(intel_dp)) 1011 + 1012 + DRM_DEBUG_KMS("Turn eDP power on\n"); 1013 + 1014 + if (ironlake_edp_have_panel_power(intel_dp)) { 1015 + DRM_DEBUG_KMS("eDP power already on\n"); 1067 1016 return; 1017 + } 1068 1018 1069 - ironlake_wait_panel_off(intel_dp); 1070 - pp = I915_READ(PCH_PP_CONTROL); 1071 - pp &= ~PANEL_UNLOCK_MASK; 1072 - pp |= PANEL_UNLOCK_REGS; 1019 + ironlake_wait_panel_power_cycle(intel_dp); 1073 1020 1021 + pp = ironlake_get_pp_control(dev_priv); 1074 1022 if (IS_GEN5(dev)) { 1075 1023 /* ILK workaround: disable reset around power sequence */ 1076 1024 pp &= ~PANEL_POWER_RESET; ··· 1081 1025 } 1082 1026 1083 1027 pp |= POWER_TARGET_ON; 1028 + if (!IS_GEN5(dev)) 1029 + pp |= PANEL_POWER_RESET; 1030 + 1084 1031 I915_WRITE(PCH_PP_CONTROL, pp); 1085 1032 POSTING_READ(PCH_PP_CONTROL); 1086 1033 1087 - if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask, 1088 - 5000)) 1089 - DRM_ERROR("panel on wait timed out: 0x%08x\n", 1090 - I915_READ(PCH_PP_STATUS)); 1034 + ironlake_wait_panel_on(intel_dp); 1091 1035 1092 1036 if (IS_GEN5(dev)) { 1093 1037 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ ··· 1096 1040 } 1097 1041 } 1098 1042 1099 - static void ironlake_edp_panel_off(struct drm_encoder *encoder) 1043 + static void ironlake_edp_panel_off(struct intel_dp *intel_dp) 1100 1044 { 1101 - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1102 - struct drm_device *dev = encoder->dev; 1045 + struct drm_device *dev = intel_dp->base.base.dev; 1103 1046 struct drm_i915_private *dev_priv = dev->dev_private; 1104 - u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK | 1105 - PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK; 1047 + u32 pp; 1106 1048 1107 1049 if (!is_edp(intel_dp)) 1108 1050 return; 1109 - pp = I915_READ(PCH_PP_CONTROL); 1110 - pp &= ~PANEL_UNLOCK_MASK; 1111 - pp |= PANEL_UNLOCK_REGS; 1112 1051 1113 - if (IS_GEN5(dev)) { 1114 - /* ILK workaround: disable reset around power sequence */ 1115 - pp &= ~PANEL_POWER_RESET; 1116 - I915_WRITE(PCH_PP_CONTROL, pp); 1117 - POSTING_READ(PCH_PP_CONTROL); 1118 - } 1052 + DRM_DEBUG_KMS("Turn eDP power off\n"); 1119 1053 1120 - intel_dp->panel_off_jiffies = jiffies; 1054 + WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n"); 1121 1055 1122 - if (IS_GEN5(dev)) { 1123 - pp &= ~POWER_TARGET_ON; 1124 - I915_WRITE(PCH_PP_CONTROL, pp); 1125 - POSTING_READ(PCH_PP_CONTROL); 1126 - pp &= ~POWER_TARGET_ON; 1127 - I915_WRITE(PCH_PP_CONTROL, pp); 1128 - POSTING_READ(PCH_PP_CONTROL); 1129 - msleep(intel_dp->panel_power_cycle_delay); 1056 + pp = ironlake_get_pp_control(dev_priv); 1057 + pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); 1058 + I915_WRITE(PCH_PP_CONTROL, pp); 1059 + POSTING_READ(PCH_PP_CONTROL); 1130 1060 1131 - if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000)) 1132 - DRM_ERROR("panel off wait timed out: 0x%08x\n", 1133 - I915_READ(PCH_PP_STATUS)); 1134 - 1135 - pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 1136 - I915_WRITE(PCH_PP_CONTROL, pp); 1137 - POSTING_READ(PCH_PP_CONTROL); 1138 - } 1061 + ironlake_wait_panel_off(intel_dp); 1139 1062 } 1140 1063 1141 1064 static void ironlake_edp_backlight_on(struct intel_dp *intel_dp) ··· 1134 1099 * allowing it to appear. 1135 1100 */ 1136 1101 msleep(intel_dp->backlight_on_delay); 1137 - pp = I915_READ(PCH_PP_CONTROL); 1138 - pp &= ~PANEL_UNLOCK_MASK; 1139 - pp |= PANEL_UNLOCK_REGS; 1102 + pp = ironlake_get_pp_control(dev_priv); 1140 1103 pp |= EDP_BLC_ENABLE; 1141 1104 I915_WRITE(PCH_PP_CONTROL, pp); 1142 1105 POSTING_READ(PCH_PP_CONTROL); ··· 1150 1117 return; 1151 1118 1152 1119 DRM_DEBUG_KMS("\n"); 1153 - pp = I915_READ(PCH_PP_CONTROL); 1154 - pp &= ~PANEL_UNLOCK_MASK; 1155 - pp |= PANEL_UNLOCK_REGS; 1120 + pp = ironlake_get_pp_control(dev_priv); 1156 1121 pp &= ~EDP_BLC_ENABLE; 1157 1122 I915_WRITE(PCH_PP_CONTROL, pp); 1158 1123 POSTING_READ(PCH_PP_CONTROL); ··· 1218 1187 { 1219 1188 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1220 1189 1190 + ironlake_edp_backlight_off(intel_dp); 1191 + ironlake_edp_panel_off(intel_dp); 1192 + 1221 1193 /* Wake up the sink first */ 1222 1194 ironlake_edp_panel_vdd_on(intel_dp); 1223 1195 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1196 + intel_dp_link_down(intel_dp); 1224 1197 ironlake_edp_panel_vdd_off(intel_dp, false); 1225 1198 1226 1199 /* Make sure the panel is off before trying to 1227 1200 * change the mode 1228 1201 */ 1229 - ironlake_edp_backlight_off(intel_dp); 1230 - intel_dp_link_down(intel_dp); 1231 - ironlake_edp_panel_off(encoder); 1232 1202 } 1233 1203 1234 1204 static void intel_dp_commit(struct drm_encoder *encoder) ··· 1243 1211 intel_dp_start_link_train(intel_dp); 1244 1212 ironlake_edp_panel_on(intel_dp); 1245 1213 ironlake_edp_panel_vdd_off(intel_dp, true); 1246 - 1247 1214 intel_dp_complete_link_train(intel_dp); 1248 1215 ironlake_edp_backlight_on(intel_dp); 1249 1216 ··· 1261 1230 uint32_t dp_reg = I915_READ(intel_dp->output_reg); 1262 1231 1263 1232 if (mode != DRM_MODE_DPMS_ON) { 1233 + ironlake_edp_backlight_off(intel_dp); 1234 + ironlake_edp_panel_off(intel_dp); 1235 + 1264 1236 ironlake_edp_panel_vdd_on(intel_dp); 1265 - if (is_edp(intel_dp)) 1266 - ironlake_edp_backlight_off(intel_dp); 1267 1237 intel_dp_sink_dpms(intel_dp, mode); 1268 1238 intel_dp_link_down(intel_dp); 1269 - ironlake_edp_panel_off(encoder); 1270 - if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) 1271 - ironlake_edp_pll_off(encoder); 1272 1239 ironlake_edp_panel_vdd_off(intel_dp, false); 1240 + 1241 + if (is_cpu_edp(intel_dp)) 1242 + ironlake_edp_pll_off(encoder); 1273 1243 } else { 1244 + if (is_cpu_edp(intel_dp)) 1245 + ironlake_edp_pll_on(encoder); 1246 + 1274 1247 ironlake_edp_panel_vdd_on(intel_dp); 1275 1248 intel_dp_sink_dpms(intel_dp, mode); 1276 1249 if (!(dp_reg & DP_PORT_EN)) { ··· 1282 1247 ironlake_edp_panel_on(intel_dp); 1283 1248 ironlake_edp_panel_vdd_off(intel_dp, true); 1284 1249 intel_dp_complete_link_train(intel_dp); 1285 - ironlake_edp_backlight_on(intel_dp); 1286 1250 } else 1287 1251 ironlake_edp_panel_vdd_off(intel_dp, false); 1288 1252 ironlake_edp_backlight_on(intel_dp); ··· 1319 1285 * link status information 1320 1286 */ 1321 1287 static bool 1322 - intel_dp_get_link_status(struct intel_dp *intel_dp) 1288 + intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1323 1289 { 1324 1290 return intel_dp_aux_native_read_retry(intel_dp, 1325 1291 DP_LANE0_1_STATUS, 1326 - intel_dp->link_status, 1292 + link_status, 1327 1293 DP_LINK_STATUS_SIZE); 1328 1294 } 1329 1295 ··· 1335 1301 } 1336 1302 1337 1303 static uint8_t 1338 - intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE], 1304 + intel_get_adjust_request_voltage(uint8_t adjust_request[2], 1339 1305 int lane) 1340 1306 { 1341 - int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); 1342 1307 int s = ((lane & 1) ? 1343 1308 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : 1344 1309 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); 1345 - uint8_t l = intel_dp_link_status(link_status, i); 1310 + uint8_t l = adjust_request[lane>>1]; 1346 1311 1347 1312 return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; 1348 1313 } 1349 1314 1350 1315 static uint8_t 1351 - intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE], 1316 + intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2], 1352 1317 int lane) 1353 1318 { 1354 - int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); 1355 1319 int s = ((lane & 1) ? 1356 1320 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : 1357 1321 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); 1358 - uint8_t l = intel_dp_link_status(link_status, i); 1322 + uint8_t l = adjust_request[lane>>1]; 1359 1323 1360 1324 return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; 1361 1325 } ··· 1376 1344 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB 1377 1345 */ 1378 1346 #define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800 1347 + #define I830_DP_VOLTAGE_MAX_CPT DP_TRAIN_VOLTAGE_SWING_1200 1379 1348 1380 1349 static uint8_t 1381 1350 intel_dp_pre_emphasis_max(uint8_t voltage_swing) ··· 1395 1362 } 1396 1363 1397 1364 static void 1398 - intel_get_adjust_train(struct intel_dp *intel_dp) 1365 + intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1399 1366 { 1367 + struct drm_device *dev = intel_dp->base.base.dev; 1400 1368 uint8_t v = 0; 1401 1369 uint8_t p = 0; 1402 1370 int lane; 1371 + uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS); 1372 + int voltage_max; 1403 1373 1404 1374 for (lane = 0; lane < intel_dp->lane_count; lane++) { 1405 - uint8_t this_v = intel_get_adjust_request_voltage(intel_dp->link_status, lane); 1406 - uint8_t this_p = intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane); 1375 + uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane); 1376 + uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane); 1407 1377 1408 1378 if (this_v > v) 1409 1379 v = this_v; ··· 1414 1378 p = this_p; 1415 1379 } 1416 1380 1417 - if (v >= I830_DP_VOLTAGE_MAX) 1418 - v = I830_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED; 1381 + if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 1382 + voltage_max = I830_DP_VOLTAGE_MAX_CPT; 1383 + else 1384 + voltage_max = I830_DP_VOLTAGE_MAX; 1385 + if (v >= voltage_max) 1386 + v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; 1419 1387 1420 1388 if (p >= intel_dp_pre_emphasis_max(v)) 1421 1389 p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; ··· 1429 1389 } 1430 1390 1431 1391 static uint32_t 1432 - intel_dp_signal_levels(uint8_t train_set, int lane_count) 1392 + intel_dp_signal_levels(uint8_t train_set) 1433 1393 { 1434 1394 uint32_t signal_levels = 0; 1435 1395 ··· 1498 1458 intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], 1499 1459 int lane) 1500 1460 { 1501 - int i = DP_LANE0_1_STATUS + (lane >> 1); 1502 1461 int s = (lane & 1) * 4; 1503 - uint8_t l = intel_dp_link_status(link_status, i); 1462 + uint8_t l = link_status[lane>>1]; 1504 1463 1505 1464 return (l >> s) & 0xf; 1506 1465 } ··· 1524 1485 DP_LANE_CHANNEL_EQ_DONE|\ 1525 1486 DP_LANE_SYMBOL_LOCKED) 1526 1487 static bool 1527 - intel_channel_eq_ok(struct intel_dp *intel_dp) 1488 + intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1528 1489 { 1529 1490 uint8_t lane_align; 1530 1491 uint8_t lane_status; 1531 1492 int lane; 1532 1493 1533 - lane_align = intel_dp_link_status(intel_dp->link_status, 1494 + lane_align = intel_dp_link_status(link_status, 1534 1495 DP_LANE_ALIGN_STATUS_UPDATED); 1535 1496 if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) 1536 1497 return false; 1537 1498 for (lane = 0; lane < intel_dp->lane_count; lane++) { 1538 - lane_status = intel_get_lane_status(intel_dp->link_status, lane); 1499 + lane_status = intel_get_lane_status(link_status, lane); 1539 1500 if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) 1540 1501 return false; 1541 1502 } ··· 1560 1521 1561 1522 ret = intel_dp_aux_native_write(intel_dp, 1562 1523 DP_TRAINING_LANE0_SET, 1563 - intel_dp->train_set, 4); 1564 - if (ret != 4) 1524 + intel_dp->train_set, 1525 + intel_dp->lane_count); 1526 + if (ret != intel_dp->lane_count) 1565 1527 return false; 1566 1528 1567 1529 return true; ··· 1578 1538 int i; 1579 1539 uint8_t voltage; 1580 1540 bool clock_recovery = false; 1581 - int tries; 1541 + int voltage_tries, loop_tries; 1582 1542 u32 reg; 1583 1543 uint32_t DP = intel_dp->DP; 1584 1544 ··· 1605 1565 DP &= ~DP_LINK_TRAIN_MASK; 1606 1566 memset(intel_dp->train_set, 0, 4); 1607 1567 voltage = 0xff; 1608 - tries = 0; 1568 + voltage_tries = 0; 1569 + loop_tries = 0; 1609 1570 clock_recovery = false; 1610 1571 for (;;) { 1611 1572 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1573 + uint8_t link_status[DP_LINK_STATUS_SIZE]; 1612 1574 uint32_t signal_levels; 1613 - if (IS_GEN6(dev) && is_edp(intel_dp)) { 1575 + 1576 + if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { 1614 1577 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1615 1578 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1616 1579 } else { 1617 - signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); 1580 + signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); 1581 + DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels); 1618 1582 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1619 1583 } 1620 1584 ··· 1634 1590 /* Set training pattern 1 */ 1635 1591 1636 1592 udelay(100); 1637 - if (!intel_dp_get_link_status(intel_dp)) 1593 + if (!intel_dp_get_link_status(intel_dp, link_status)) { 1594 + DRM_ERROR("failed to get link status\n"); 1638 1595 break; 1596 + } 1639 1597 1640 - if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { 1598 + if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { 1599 + DRM_DEBUG_KMS("clock recovery OK\n"); 1641 1600 clock_recovery = true; 1642 1601 break; 1643 1602 } ··· 1649 1602 for (i = 0; i < intel_dp->lane_count; i++) 1650 1603 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 1651 1604 break; 1652 - if (i == intel_dp->lane_count) 1653 - break; 1605 + if (i == intel_dp->lane_count) { 1606 + ++loop_tries; 1607 + if (loop_tries == 5) { 1608 + DRM_DEBUG_KMS("too many full retries, give up\n"); 1609 + break; 1610 + } 1611 + memset(intel_dp->train_set, 0, 4); 1612 + voltage_tries = 0; 1613 + continue; 1614 + } 1654 1615 1655 1616 /* Check to see if we've tried the same voltage 5 times */ 1656 1617 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { 1657 - ++tries; 1658 - if (tries == 5) 1618 + ++voltage_tries; 1619 + if (voltage_tries == 5) { 1620 + DRM_DEBUG_KMS("too many voltage retries, give up\n"); 1659 1621 break; 1622 + } 1660 1623 } else 1661 - tries = 0; 1624 + voltage_tries = 0; 1662 1625 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 1663 1626 1664 1627 /* Compute new intel_dp->train_set as requested by target */ 1665 - intel_get_adjust_train(intel_dp); 1628 + intel_get_adjust_train(intel_dp, link_status); 1666 1629 } 1667 1630 1668 1631 intel_dp->DP = DP; ··· 1695 1638 for (;;) { 1696 1639 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1697 1640 uint32_t signal_levels; 1641 + uint8_t link_status[DP_LINK_STATUS_SIZE]; 1698 1642 1699 1643 if (cr_tries > 5) { 1700 1644 DRM_ERROR("failed to train DP, aborting\n"); ··· 1703 1645 break; 1704 1646 } 1705 1647 1706 - if (IS_GEN6(dev) && is_edp(intel_dp)) { 1648 + if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { 1707 1649 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1708 1650 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1709 1651 } else { 1710 - signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); 1652 + signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); 1711 1653 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1712 1654 } 1713 1655 ··· 1723 1665 break; 1724 1666 1725 1667 udelay(400); 1726 - if (!intel_dp_get_link_status(intel_dp)) 1668 + if (!intel_dp_get_link_status(intel_dp, link_status)) 1727 1669 break; 1728 1670 1729 1671 /* Make sure clock is still ok */ 1730 - if (!intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { 1672 + if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { 1731 1673 intel_dp_start_link_train(intel_dp); 1732 1674 cr_tries++; 1733 1675 continue; 1734 1676 } 1735 1677 1736 - if (intel_channel_eq_ok(intel_dp)) { 1678 + if (intel_channel_eq_ok(intel_dp, link_status)) { 1737 1679 channel_eq = true; 1738 1680 break; 1739 1681 } ··· 1748 1690 } 1749 1691 1750 1692 /* Compute new intel_dp->train_set as requested by target */ 1751 - intel_get_adjust_train(intel_dp); 1693 + intel_get_adjust_train(intel_dp, link_status); 1752 1694 ++tries; 1753 1695 } 1754 1696 ··· 1793 1735 1794 1736 msleep(17); 1795 1737 1796 - if (is_edp(intel_dp)) 1797 - DP |= DP_LINK_TRAIN_OFF; 1738 + if (is_edp(intel_dp)) { 1739 + if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 1740 + DP |= DP_LINK_TRAIN_OFF_CPT; 1741 + else 1742 + DP |= DP_LINK_TRAIN_OFF; 1743 + } 1798 1744 1799 1745 if (!HAS_PCH_CPT(dev) && 1800 1746 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { ··· 1884 1822 intel_dp_check_link_status(struct intel_dp *intel_dp) 1885 1823 { 1886 1824 u8 sink_irq_vector; 1825 + u8 link_status[DP_LINK_STATUS_SIZE]; 1887 1826 1888 1827 if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON) 1889 1828 return; ··· 1893 1830 return; 1894 1831 1895 1832 /* Try to read receiver status if the link appears to be up */ 1896 - if (!intel_dp_get_link_status(intel_dp)) { 1833 + if (!intel_dp_get_link_status(intel_dp, link_status)) { 1897 1834 intel_dp_link_down(intel_dp); 1898 1835 return; 1899 1836 } ··· 1918 1855 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); 1919 1856 } 1920 1857 1921 - if (!intel_channel_eq_ok(intel_dp)) { 1858 + if (!intel_channel_eq_ok(intel_dp, link_status)) { 1922 1859 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", 1923 1860 drm_get_encoder_name(&intel_dp->base.base)); 1924 1861 intel_dp_start_link_train(intel_dp); ··· 2242 2179 continue; 2243 2180 2244 2181 intel_dp = enc_to_intel_dp(encoder); 2245 - if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) 2182 + if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || 2183 + intel_dp->base.type == INTEL_OUTPUT_EDP) 2246 2184 return intel_dp->output_reg; 2247 2185 } 2248 2186 ··· 2385 2321 2386 2322 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> 2387 2323 PANEL_LIGHT_ON_DELAY_SHIFT; 2388 - 2324 + 2389 2325 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> 2390 2326 PANEL_LIGHT_OFF_DELAY_SHIFT; 2391 2327 ··· 2418 2354 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", 2419 2355 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); 2420 2356 2421 - intel_dp->panel_off_jiffies = jiffies - intel_dp->panel_power_down_delay; 2422 - 2423 2357 ironlake_edp_panel_vdd_on(intel_dp); 2424 2358 ret = intel_dp_get_dpcd(intel_dp); 2425 2359 ironlake_edp_panel_vdd_off(intel_dp, false); 2360 + 2426 2361 if (ret) { 2427 2362 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 2428 2363 dev_priv->no_aux_handshake =
+2 -1
drivers/gpu/drm/i915/intel_panel.c
··· 326 326 static int intel_panel_get_brightness(struct backlight_device *bd) 327 327 { 328 328 struct drm_device *dev = bl_get_data(bd); 329 - return intel_panel_get_backlight(dev); 329 + struct drm_i915_private *dev_priv = dev->dev_private; 330 + return dev_priv->backlight_level; 330 331 } 331 332 332 333 static const struct backlight_ops intel_panel_bl_ops = {
+50 -42
drivers/gpu/drm/radeon/evergreen_cs.c
··· 480 480 } 481 481 break; 482 482 case DB_Z_INFO: 483 - r = evergreen_cs_packet_next_reloc(p, &reloc); 484 - if (r) { 485 - dev_warn(p->dev, "bad SET_CONTEXT_REG " 486 - "0x%04X\n", reg); 487 - return -EINVAL; 488 - } 489 483 track->db_z_info = radeon_get_ib_value(p, idx); 490 - ib[idx] &= ~Z_ARRAY_MODE(0xf); 491 - track->db_z_info &= ~Z_ARRAY_MODE(0xf); 492 - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 493 - ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 494 - track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 495 - } else { 496 - ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 497 - track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 484 + if (!p->keep_tiling_flags) { 485 + r = evergreen_cs_packet_next_reloc(p, &reloc); 486 + if (r) { 487 + dev_warn(p->dev, "bad SET_CONTEXT_REG " 488 + "0x%04X\n", reg); 489 + return -EINVAL; 490 + } 491 + ib[idx] &= ~Z_ARRAY_MODE(0xf); 492 + track->db_z_info &= ~Z_ARRAY_MODE(0xf); 493 + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 494 + ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 495 + track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 496 + } else { 497 + ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 498 + track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 499 + } 498 500 } 499 501 break; 500 502 case DB_STENCIL_INFO: ··· 609 607 case CB_COLOR5_INFO: 610 608 case CB_COLOR6_INFO: 611 609 case CB_COLOR7_INFO: 612 - r = evergreen_cs_packet_next_reloc(p, &reloc); 613 - if (r) { 614 - dev_warn(p->dev, "bad SET_CONTEXT_REG " 615 - "0x%04X\n", reg); 616 - return -EINVAL; 617 - } 618 610 tmp = (reg - CB_COLOR0_INFO) / 0x3c; 619 611 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); 620 - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 621 - ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 622 - track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 623 - } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { 624 - ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 625 - track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 612 + if (!p->keep_tiling_flags) { 613 + r = evergreen_cs_packet_next_reloc(p, &reloc); 614 + if (r) { 615 + dev_warn(p->dev, "bad SET_CONTEXT_REG " 616 + "0x%04X\n", reg); 617 + return -EINVAL; 618 + } 619 + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 620 + ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 621 + track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 622 + } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { 623 + ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 624 + track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 625 + } 626 626 } 627 627 break; 628 628 case CB_COLOR8_INFO: 629 629 case CB_COLOR9_INFO: 630 630 case CB_COLOR10_INFO: 631 631 case CB_COLOR11_INFO: 632 - r = evergreen_cs_packet_next_reloc(p, &reloc); 633 - if (r) { 634 - dev_warn(p->dev, "bad SET_CONTEXT_REG " 635 - "0x%04X\n", reg); 636 - return -EINVAL; 637 - } 638 632 tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8; 639 633 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); 640 - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 641 - ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 642 - track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 643 - } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { 644 - ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 645 - track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 634 + if (!p->keep_tiling_flags) { 635 + r = evergreen_cs_packet_next_reloc(p, &reloc); 636 + if (r) { 637 + dev_warn(p->dev, "bad SET_CONTEXT_REG " 638 + "0x%04X\n", reg); 639 + return -EINVAL; 640 + } 641 + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 642 + ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 643 + track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 644 + } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { 645 + ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 646 + track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 647 + } 646 648 } 647 649 break; 648 650 case CB_COLOR0_PITCH: ··· 1317 1311 return -EINVAL; 1318 1312 } 1319 1313 ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1320 - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1321 - ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 1322 - else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1323 - ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 1314 + if (!p->keep_tiling_flags) { 1315 + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1316 + ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 1317 + else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1318 + ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 1319 + } 1324 1320 texture = reloc->robj; 1325 1321 /* tex mip base */ 1326 1322 r = evergreen_cs_packet_next_reloc(p, &reloc);
+52 -44
drivers/gpu/drm/radeon/r300.c
··· 701 701 return r; 702 702 } 703 703 704 - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 705 - tile_flags |= R300_TXO_MACRO_TILE; 706 - if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 707 - tile_flags |= R300_TXO_MICRO_TILE; 708 - else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) 709 - tile_flags |= R300_TXO_MICRO_TILE_SQUARE; 704 + if (p->keep_tiling_flags) { 705 + ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */ 706 + ((idx_value & ~31) + (u32)reloc->lobj.gpu_offset); 707 + } else { 708 + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 709 + tile_flags |= R300_TXO_MACRO_TILE; 710 + if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 711 + tile_flags |= R300_TXO_MICRO_TILE; 712 + else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) 713 + tile_flags |= R300_TXO_MICRO_TILE_SQUARE; 710 714 711 - tmp = idx_value + ((u32)reloc->lobj.gpu_offset); 712 - tmp |= tile_flags; 713 - ib[idx] = tmp; 715 + tmp = idx_value + ((u32)reloc->lobj.gpu_offset); 716 + tmp |= tile_flags; 717 + ib[idx] = tmp; 718 + } 714 719 track->textures[i].robj = reloc->robj; 715 720 track->tex_dirty = true; 716 721 break; ··· 765 760 /* RB3D_COLORPITCH1 */ 766 761 /* RB3D_COLORPITCH2 */ 767 762 /* RB3D_COLORPITCH3 */ 768 - r = r100_cs_packet_next_reloc(p, &reloc); 769 - if (r) { 770 - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 771 - idx, reg); 772 - r100_cs_dump_packet(p, pkt); 773 - return r; 763 + if (!p->keep_tiling_flags) { 764 + r = r100_cs_packet_next_reloc(p, &reloc); 765 + if (r) { 766 + DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 767 + idx, reg); 768 + r100_cs_dump_packet(p, pkt); 769 + return r; 770 + } 771 + 772 + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 773 + tile_flags |= R300_COLOR_TILE_ENABLE; 774 + if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 775 + tile_flags |= R300_COLOR_MICROTILE_ENABLE; 776 + else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) 777 + tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE; 778 + 779 + tmp = idx_value & ~(0x7 << 16); 780 + tmp |= tile_flags; 781 + ib[idx] = tmp; 774 782 } 775 - 776 - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 777 - tile_flags |= R300_COLOR_TILE_ENABLE; 778 - if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 779 - tile_flags |= R300_COLOR_MICROTILE_ENABLE; 780 - else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) 781 - tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE; 782 - 783 - tmp = idx_value & ~(0x7 << 16); 784 - tmp |= tile_flags; 785 - ib[idx] = tmp; 786 783 i = (reg - 0x4E38) >> 2; 787 784 track->cb[i].pitch = idx_value & 0x3FFE; 788 785 switch (((idx_value >> 21) & 0xF)) { ··· 850 843 break; 851 844 case 0x4F24: 852 845 /* ZB_DEPTHPITCH */ 853 - r = r100_cs_packet_next_reloc(p, &reloc); 854 - if (r) { 855 - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 856 - idx, reg); 857 - r100_cs_dump_packet(p, pkt); 858 - return r; 846 + if (!p->keep_tiling_flags) { 847 + r = r100_cs_packet_next_reloc(p, &reloc); 848 + if (r) { 849 + DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 850 + idx, reg); 851 + r100_cs_dump_packet(p, pkt); 852 + return r; 853 + } 854 + 855 + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 856 + tile_flags |= R300_DEPTHMACROTILE_ENABLE; 857 + if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 858 + tile_flags |= R300_DEPTHMICROTILE_TILED; 859 + else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) 860 + tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE; 861 + 862 + tmp = idx_value & ~(0x7 << 16); 863 + tmp |= tile_flags; 864 + ib[idx] = tmp; 859 865 } 860 - 861 - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 862 - tile_flags |= R300_DEPTHMACROTILE_ENABLE; 863 - if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 864 - tile_flags |= R300_DEPTHMICROTILE_TILED; 865 - else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) 866 - tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE; 867 - 868 - tmp = idx_value & ~(0x7 << 16); 869 - tmp |= tile_flags; 870 - ib[idx] = tmp; 871 - 872 866 track->zb.pitch = idx_value & 0x3FFC; 873 867 track->zb_dirty = true; 874 868 break;
+16 -10
drivers/gpu/drm/radeon/r600_cs.c
··· 941 941 track->db_depth_control = radeon_get_ib_value(p, idx); 942 942 break; 943 943 case R_028010_DB_DEPTH_INFO: 944 - if (r600_cs_packet_next_is_pkt3_nop(p)) { 944 + if (!p->keep_tiling_flags && 945 + r600_cs_packet_next_is_pkt3_nop(p)) { 945 946 r = r600_cs_packet_next_reloc(p, &reloc); 946 947 if (r) { 947 948 dev_warn(p->dev, "bad SET_CONTEXT_REG " ··· 993 992 case R_0280B4_CB_COLOR5_INFO: 994 993 case R_0280B8_CB_COLOR6_INFO: 995 994 case R_0280BC_CB_COLOR7_INFO: 996 - if (r600_cs_packet_next_is_pkt3_nop(p)) { 995 + if (!p->keep_tiling_flags && 996 + r600_cs_packet_next_is_pkt3_nop(p)) { 997 997 r = r600_cs_packet_next_reloc(p, &reloc); 998 998 if (r) { 999 999 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); ··· 1293 1291 mip_offset <<= 8; 1294 1292 1295 1293 word0 = radeon_get_ib_value(p, idx + 0); 1296 - if (tiling_flags & RADEON_TILING_MACRO) 1297 - word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); 1298 - else if (tiling_flags & RADEON_TILING_MICRO) 1299 - word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); 1294 + if (!p->keep_tiling_flags) { 1295 + if (tiling_flags & RADEON_TILING_MACRO) 1296 + word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); 1297 + else if (tiling_flags & RADEON_TILING_MICRO) 1298 + word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); 1299 + } 1300 1300 word1 = radeon_get_ib_value(p, idx + 1); 1301 1301 w0 = G_038000_TEX_WIDTH(word0) + 1; 1302 1302 h0 = G_038004_TEX_HEIGHT(word1) + 1; ··· 1625 1621 return -EINVAL; 1626 1622 } 1627 1623 base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1628 - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1629 - ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); 1630 - else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1631 - ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); 1624 + if (!p->keep_tiling_flags) { 1625 + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1626 + ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); 1627 + else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1628 + ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); 1629 + } 1632 1630 texture = reloc->robj; 1633 1631 /* tex mip base */ 1634 1632 r = r600_cs_packet_next_reloc(p, &reloc);
+2 -1
drivers/gpu/drm/radeon/radeon.h
··· 611 611 struct radeon_ib *ib; 612 612 void *track; 613 613 unsigned family; 614 - int parser_error; 614 + int parser_error; 615 + bool keep_tiling_flags; 615 616 }; 616 617 617 618 extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
+86 -116
drivers/gpu/drm/radeon/radeon_atombios.c
··· 62 62 struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1; 63 63 }; 64 64 65 + static void radeon_lookup_i2c_gpio_quirks(struct radeon_device *rdev, 66 + ATOM_GPIO_I2C_ASSIGMENT *gpio, 67 + u8 index) 68 + { 69 + /* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */ 70 + if ((rdev->family == CHIP_R420) || 71 + (rdev->family == CHIP_R423) || 72 + (rdev->family == CHIP_RV410)) { 73 + if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) || 74 + (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) || 75 + (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) { 76 + gpio->ucClkMaskShift = 0x19; 77 + gpio->ucDataMaskShift = 0x18; 78 + } 79 + } 80 + 81 + /* some evergreen boards have bad data for this entry */ 82 + if (ASIC_IS_DCE4(rdev)) { 83 + if ((index == 7) && 84 + (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) && 85 + (gpio->sucI2cId.ucAccess == 0)) { 86 + gpio->sucI2cId.ucAccess = 0x97; 87 + gpio->ucDataMaskShift = 8; 88 + gpio->ucDataEnShift = 8; 89 + gpio->ucDataY_Shift = 8; 90 + gpio->ucDataA_Shift = 8; 91 + } 92 + } 93 + 94 + /* some DCE3 boards have bad data for this entry */ 95 + if (ASIC_IS_DCE3(rdev)) { 96 + if ((index == 4) && 97 + (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) && 98 + (gpio->sucI2cId.ucAccess == 0x94)) 99 + gpio->sucI2cId.ucAccess = 0x14; 100 + } 101 + } 102 + 103 + static struct radeon_i2c_bus_rec radeon_get_bus_rec_for_i2c_gpio(ATOM_GPIO_I2C_ASSIGMENT *gpio) 104 + { 105 + struct radeon_i2c_bus_rec i2c; 106 + 107 + memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); 108 + 109 + i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; 110 + i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; 111 + i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; 112 + i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4; 113 + i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4; 114 + i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4; 115 + i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4; 116 + i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4; 117 + i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift); 118 + i2c.mask_data_mask = (1 << gpio->ucDataMaskShift); 119 + i2c.en_clk_mask = (1 << gpio->ucClkEnShift); 120 + i2c.en_data_mask = (1 << gpio->ucDataEnShift); 121 + i2c.y_clk_mask = (1 << gpio->ucClkY_Shift); 122 + i2c.y_data_mask = (1 << gpio->ucDataY_Shift); 123 + i2c.a_clk_mask = (1 << gpio->ucClkA_Shift); 124 + i2c.a_data_mask = (1 << gpio->ucDataA_Shift); 125 + 126 + if (gpio->sucI2cId.sbfAccess.bfHW_Capable) 127 + i2c.hw_capable = true; 128 + else 129 + i2c.hw_capable = false; 130 + 131 + if (gpio->sucI2cId.ucAccess == 0xa0) 132 + i2c.mm_i2c = true; 133 + else 134 + i2c.mm_i2c = false; 135 + 136 + i2c.i2c_id = gpio->sucI2cId.ucAccess; 137 + 138 + if (i2c.mask_clk_reg) 139 + i2c.valid = true; 140 + else 141 + i2c.valid = false; 142 + 143 + return i2c; 144 + } 145 + 65 146 static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev, 66 147 uint8_t id) 67 148 { ··· 166 85 for (i = 0; i < num_indices; i++) { 167 86 gpio = &i2c_info->asGPIO_Info[i]; 168 87 169 - /* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */ 170 - if ((rdev->family == CHIP_R420) || 171 - (rdev->family == CHIP_R423) || 172 - (rdev->family == CHIP_RV410)) { 173 - if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) || 174 - (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) || 175 - (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) { 176 - gpio->ucClkMaskShift = 0x19; 177 - gpio->ucDataMaskShift = 0x18; 178 - } 179 - } 180 - 181 - /* some evergreen boards have bad data for this entry */ 182 - if (ASIC_IS_DCE4(rdev)) { 183 - if ((i == 7) && 184 - (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) && 185 - (gpio->sucI2cId.ucAccess == 0)) { 186 - gpio->sucI2cId.ucAccess = 0x97; 187 - gpio->ucDataMaskShift = 8; 188 - gpio->ucDataEnShift = 8; 189 - gpio->ucDataY_Shift = 8; 190 - gpio->ucDataA_Shift = 8; 191 - } 192 - } 193 - 194 - /* some DCE3 boards have bad data for this entry */ 195 - if (ASIC_IS_DCE3(rdev)) { 196 - if ((i == 4) && 197 - (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) && 198 - (gpio->sucI2cId.ucAccess == 0x94)) 199 - gpio->sucI2cId.ucAccess = 0x14; 200 - } 88 + radeon_lookup_i2c_gpio_quirks(rdev, gpio, i); 201 89 202 90 if (gpio->sucI2cId.ucAccess == id) { 203 - i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; 204 - i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; 205 - i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; 206 - i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4; 207 - i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4; 208 - i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4; 209 - i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4; 210 - i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4; 211 - i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift); 212 - i2c.mask_data_mask = (1 << gpio->ucDataMaskShift); 213 - i2c.en_clk_mask = (1 << gpio->ucClkEnShift); 214 - i2c.en_data_mask = (1 << gpio->ucDataEnShift); 215 - i2c.y_clk_mask = (1 << gpio->ucClkY_Shift); 216 - i2c.y_data_mask = (1 << gpio->ucDataY_Shift); 217 - i2c.a_clk_mask = (1 << gpio->ucClkA_Shift); 218 - i2c.a_data_mask = (1 << gpio->ucDataA_Shift); 219 - 220 - if (gpio->sucI2cId.sbfAccess.bfHW_Capable) 221 - i2c.hw_capable = true; 222 - else 223 - i2c.hw_capable = false; 224 - 225 - if (gpio->sucI2cId.ucAccess == 0xa0) 226 - i2c.mm_i2c = true; 227 - else 228 - i2c.mm_i2c = false; 229 - 230 - i2c.i2c_id = gpio->sucI2cId.ucAccess; 231 - 232 - if (i2c.mask_clk_reg) 233 - i2c.valid = true; 91 + i2c = radeon_get_bus_rec_for_i2c_gpio(gpio); 234 92 break; 235 93 } 236 94 } ··· 189 169 int i, num_indices; 190 170 char stmp[32]; 191 171 192 - memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); 193 - 194 172 if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) { 195 173 i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); 196 174 ··· 197 179 198 180 for (i = 0; i < num_indices; i++) { 199 181 gpio = &i2c_info->asGPIO_Info[i]; 200 - i2c.valid = false; 201 182 202 - /* some evergreen boards have bad data for this entry */ 203 - if (ASIC_IS_DCE4(rdev)) { 204 - if ((i == 7) && 205 - (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) && 206 - (gpio->sucI2cId.ucAccess == 0)) { 207 - gpio->sucI2cId.ucAccess = 0x97; 208 - gpio->ucDataMaskShift = 8; 209 - gpio->ucDataEnShift = 8; 210 - gpio->ucDataY_Shift = 8; 211 - gpio->ucDataA_Shift = 8; 212 - } 213 - } 183 + radeon_lookup_i2c_gpio_quirks(rdev, gpio, i); 214 184 215 - /* some DCE3 boards have bad data for this entry */ 216 - if (ASIC_IS_DCE3(rdev)) { 217 - if ((i == 4) && 218 - (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) && 219 - (gpio->sucI2cId.ucAccess == 0x94)) 220 - gpio->sucI2cId.ucAccess = 0x14; 221 - } 185 + i2c = radeon_get_bus_rec_for_i2c_gpio(gpio); 222 186 223 - i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; 224 - i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; 225 - i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; 226 - i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4; 227 - i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4; 228 - i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4; 229 - i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4; 230 - i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4; 231 - i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift); 232 - i2c.mask_data_mask = (1 << gpio->ucDataMaskShift); 233 - i2c.en_clk_mask = (1 << gpio->ucClkEnShift); 234 - i2c.en_data_mask = (1 << gpio->ucDataEnShift); 235 - i2c.y_clk_mask = (1 << gpio->ucClkY_Shift); 236 - i2c.y_data_mask = (1 << gpio->ucDataY_Shift); 237 - i2c.a_clk_mask = (1 << gpio->ucClkA_Shift); 238 - i2c.a_data_mask = (1 << gpio->ucDataA_Shift); 239 - 240 - if (gpio->sucI2cId.sbfAccess.bfHW_Capable) 241 - i2c.hw_capable = true; 242 - else 243 - i2c.hw_capable = false; 244 - 245 - if (gpio->sucI2cId.ucAccess == 0xa0) 246 - i2c.mm_i2c = true; 247 - else 248 - i2c.mm_i2c = false; 249 - 250 - i2c.i2c_id = gpio->sucI2cId.ucAccess; 251 - 252 - if (i2c.mask_clk_reg) { 253 - i2c.valid = true; 187 + if (i2c.valid) { 254 188 sprintf(stmp, "0x%x", i2c.i2c_id); 255 189 rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp); 256 190 }
+10 -1
drivers/gpu/drm/radeon/radeon_cs.c
··· 93 93 { 94 94 struct drm_radeon_cs *cs = data; 95 95 uint64_t *chunk_array_ptr; 96 - unsigned size, i; 96 + unsigned size, i, flags = 0; 97 97 98 98 if (!cs->num_chunks) { 99 99 return 0; ··· 140 140 if (p->chunks[i].length_dw == 0) 141 141 return -EINVAL; 142 142 } 143 + if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS && 144 + !p->chunks[i].length_dw) { 145 + return -EINVAL; 146 + } 143 147 144 148 p->chunks[i].length_dw = user_chunk.length_dw; 145 149 p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data; ··· 158 154 if (DRM_COPY_FROM_USER(p->chunks[i].kdata, 159 155 p->chunks[i].user_ptr, size)) { 160 156 return -EFAULT; 157 + } 158 + if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) { 159 + flags = p->chunks[i].kdata[0]; 161 160 } 162 161 } else { 163 162 p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL); ··· 181 174 p->chunks[p->chunk_ib_idx].length_dw); 182 175 return -EINVAL; 183 176 } 177 + 178 + p->keep_tiling_flags = (flags & RADEON_CS_KEEP_TILING_FLAGS) != 0; 184 179 return 0; 185 180 } 186 181
+2 -1
drivers/gpu/drm/radeon/radeon_drv.c
··· 53 53 * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query 54 54 * 2.10.0 - fusion 2D tiling 55 55 * 2.11.0 - backend map, initial compute support for the CS checker 56 + * 2.12.0 - RADEON_CS_KEEP_TILING_FLAGS 56 57 */ 57 58 #define KMS_DRIVER_MAJOR 2 58 - #define KMS_DRIVER_MINOR 11 59 + #define KMS_DRIVER_MINOR 12 59 60 #define KMS_DRIVER_PATCHLEVEL 0 60 61 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 61 62 int radeon_driver_unload_kms(struct drm_device *dev);
+7 -1
drivers/gpu/drm/ttm/ttm_bo.c
··· 574 574 return ret; 575 575 576 576 spin_lock(&glob->lru_lock); 577 + 578 + if (unlikely(list_empty(&bo->ddestroy))) { 579 + spin_unlock(&glob->lru_lock); 580 + return 0; 581 + } 582 + 577 583 ret = ttm_bo_reserve_locked(bo, interruptible, 578 584 no_wait_reserve, false, 0); 579 585 580 - if (unlikely(ret != 0) || list_empty(&bo->ddestroy)) { 586 + if (unlikely(ret != 0)) { 581 587 spin_unlock(&glob->lru_lock); 582 588 return ret; 583 589 }
+12 -6
drivers/gpu/vga/vgaarb.c
··· 991 991 uc = &priv->cards[i]; 992 992 } 993 993 994 - if (!uc) 995 - return -EINVAL; 994 + if (!uc) { 995 + ret_val = -EINVAL; 996 + goto done; 997 + } 996 998 997 - if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) 998 - return -EINVAL; 999 + if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) { 1000 + ret_val = -EINVAL; 1001 + goto done; 1002 + } 999 1003 1000 - if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) 1001 - return -EINVAL; 1004 + if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) { 1005 + ret_val = -EINVAL; 1006 + goto done; 1007 + } 1002 1008 1003 1009 vga_put(pdev, io_state); 1004 1010
+2
include/drm/drm_mode.h
··· 235 235 #define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02 236 236 #define DRM_MODE_FB_DIRTY_FLAGS 0x03 237 237 238 + #define DRM_MODE_FB_DIRTY_MAX_CLIPS 256 239 + 238 240 /* 239 241 * Mark a region of a framebuffer as dirty. 240 242 *
+4
include/drm/radeon_drm.h
··· 874 874 875 875 #define RADEON_CHUNK_ID_RELOCS 0x01 876 876 #define RADEON_CHUNK_ID_IB 0x02 877 + #define RADEON_CHUNK_ID_FLAGS 0x03 878 + 879 + /* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */ 880 + #define RADEON_CS_KEEP_TILING_FLAGS 0x01 877 881 878 882 struct drm_radeon_cs_chunk { 879 883 uint32_t chunk_id;