Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (31 commits)
drm: integer overflow in drm_mode_dirtyfb_ioctl()
drivers/gpu/vga/vgaarb.c: add missing kfree
drm/radeon/kms/atom: unify i2c gpio table handling
drm/radeon/kms: fix up gpio i2c mask bits for r4xx for real
ttm: Don't return the bo reserved on error path
drm/radeon/kms: add a CS ioctl flag not to rewrite tiling flags in the CS
drm/i915: Fix inconsistent backlight level during disabled
drm, i915: Fix memory leak in i915_gem_busy_ioctl().
drm/i915: Use DPCD value for max DP lanes.
drm/i915: Initiate DP link training only on the lanes we'll be using
drm/i915: Remove trailing white space
drm/i915: Try harder during dp pattern 1 link training
drm/i915: Make DP prepare/commit consistent with DP dpms
drm/i915: Let panel power sequencing hardware do its job
drm/i915: Treat PCH eDP like DP in most places
drm/i915: Remove link_status field from intel_dp structure
drm/i915: Move common PCH_PP_CONTROL setup to ironlake_get_pp_control
drm/i915: Module parameters using '-1' as default must be signed type
drm/i915: Turn on another required clock gating bit on gen6.
drm/i915: Turn on a required 3D clock gating bit on Sandybridge.
...

+601 -433
+4
drivers/gpu/drm/drm_crtc.c
··· 1873 } 1874 1875 if (num_clips && clips_ptr) { 1876 clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); 1877 if (!clips) { 1878 ret = -ENOMEM;
··· 1873 } 1874 1875 if (num_clips && clips_ptr) { 1876 + if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) { 1877 + ret = -EINVAL; 1878 + goto out_err1; 1879 + } 1880 clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); 1881 if (!clips) { 1882 ret = -ENOMEM;
+51 -6
drivers/gpu/drm/i915/i915_debugfs.c
··· 636 struct drm_device *dev = node->minor->dev; 637 drm_i915_private_t *dev_priv = dev->dev_private; 638 struct intel_ring_buffer *ring; 639 640 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 641 if (ring->size == 0) 642 return 0; 643 644 seq_printf(m, "Ring %s:\n", ring->name); 645 seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR); ··· 658 } 659 seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring)); 660 seq_printf(m, " Start : %08x\n", I915_READ_START(ring)); 661 662 return 0; 663 } ··· 849 struct drm_info_node *node = (struct drm_info_node *) m->private; 850 struct drm_device *dev = node->minor->dev; 851 drm_i915_private_t *dev_priv = dev->dev_private; 852 - u16 crstanddelay = I915_READ16(CRSTANDVID); 853 854 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); 855 ··· 956 struct drm_device *dev = node->minor->dev; 957 drm_i915_private_t *dev_priv = dev->dev_private; 958 u32 delayfreq; 959 - int i; 960 961 for (i = 0; i < 16; i++) { 962 delayfreq = I915_READ(PXVFREQ_BASE + i * 4); 963 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq, 964 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); 965 } 966 967 return 0; 968 } ··· 984 struct drm_device *dev = node->minor->dev; 985 drm_i915_private_t *dev_priv = dev->dev_private; 986 u32 inttoext; 987 - int i; 988 989 for (i = 1; i <= 32; i++) { 990 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); 991 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); 992 } 993 994 return 0; 995 } ··· 1005 struct drm_info_node *node = (struct drm_info_node *) m->private; 1006 struct drm_device *dev = node->minor->dev; 1007 drm_i915_private_t *dev_priv = dev->dev_private; 1008 - u32 rgvmodectl = I915_READ(MEMMODECTL); 1009 - u32 rstdbyctl = I915_READ(RSTDBYCTL); 1010 - u16 crstandvid = I915_READ16(CRSTANDVID); 1011 1012 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 1013 "yes" : "no"); ··· 1205 struct drm_info_node *node = (struct drm_info_node *) m->private; 1206 struct drm_device *dev = node->minor->dev; 1207 drm_i915_private_t *dev_priv = dev->dev_private; 1208 1209 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); 1210 1211 return 0; 1212 }
··· 636 struct drm_device *dev = node->minor->dev; 637 drm_i915_private_t *dev_priv = dev->dev_private; 638 struct intel_ring_buffer *ring; 639 + int ret; 640 641 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 642 if (ring->size == 0) 643 return 0; 644 + 645 + ret = mutex_lock_interruptible(&dev->struct_mutex); 646 + if (ret) 647 + return ret; 648 649 seq_printf(m, "Ring %s:\n", ring->name); 650 seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR); ··· 653 } 654 seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring)); 655 seq_printf(m, " Start : %08x\n", I915_READ_START(ring)); 656 + 657 + mutex_unlock(&dev->struct_mutex); 658 659 return 0; 660 } ··· 842 struct drm_info_node *node = (struct drm_info_node *) m->private; 843 struct drm_device *dev = node->minor->dev; 844 drm_i915_private_t *dev_priv = dev->dev_private; 845 + u16 crstanddelay; 846 + int ret; 847 + 848 + ret = mutex_lock_interruptible(&dev->struct_mutex); 849 + if (ret) 850 + return ret; 851 + 852 + crstanddelay = I915_READ16(CRSTANDVID); 853 + 854 + mutex_unlock(&dev->struct_mutex); 855 856 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); 857 ··· 940 struct drm_device *dev = node->minor->dev; 941 drm_i915_private_t *dev_priv = dev->dev_private; 942 u32 delayfreq; 943 + int ret, i; 944 + 945 + ret = mutex_lock_interruptible(&dev->struct_mutex); 946 + if (ret) 947 + return ret; 948 949 for (i = 0; i < 16; i++) { 950 delayfreq = I915_READ(PXVFREQ_BASE + i * 4); 951 seq_printf(m, "P%02dVIDFREQ: 0x%08x (VID: %d)\n", i, delayfreq, 952 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); 953 } 954 + 955 + mutex_unlock(&dev->struct_mutex); 956 957 return 0; 958 } ··· 962 struct drm_device *dev = node->minor->dev; 963 drm_i915_private_t *dev_priv = dev->dev_private; 964 u32 inttoext; 965 + int ret, i; 966 + 967 + ret = mutex_lock_interruptible(&dev->struct_mutex); 968 + if (ret) 969 + return ret; 970 971 for (i = 1; i <= 32; i++) { 972 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); 973 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); 974 } 975 + 976 + mutex_unlock(&dev->struct_mutex); 977 978 return 0; 979 } ··· 977 struct drm_info_node *node = (struct drm_info_node *) m->private; 978 struct drm_device *dev = node->minor->dev; 979 drm_i915_private_t *dev_priv = dev->dev_private; 980 + u32 rgvmodectl, rstdbyctl; 981 + u16 crstandvid; 982 + int ret; 983 + 984 + ret = mutex_lock_interruptible(&dev->struct_mutex); 985 + if (ret) 986 + return ret; 987 + 988 + rgvmodectl = I915_READ(MEMMODECTL); 989 + rstdbyctl = I915_READ(RSTDBYCTL); 990 + crstandvid = I915_READ16(CRSTANDVID); 991 + 992 + mutex_unlock(&dev->struct_mutex); 993 994 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 995 "yes" : "no"); ··· 1167 struct drm_info_node *node = (struct drm_info_node *) m->private; 1168 struct drm_device *dev = node->minor->dev; 1169 drm_i915_private_t *dev_priv = dev->dev_private; 1170 + int ret; 1171 + 1172 + ret = mutex_lock_interruptible(&dev->struct_mutex); 1173 + if (ret) 1174 + return ret; 1175 1176 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); 1177 + 1178 + mutex_unlock(&dev->struct_mutex); 1179 1180 return 0; 1181 }
+3 -3
drivers/gpu/drm/i915/i915_drv.c
··· 68 MODULE_PARM_DESC(i915_enable_rc6, 69 "Enable power-saving render C-state 6 (default: true)"); 70 71 - unsigned int i915_enable_fbc __read_mostly = -1; 72 module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); 73 MODULE_PARM_DESC(i915_enable_fbc, 74 "Enable frame buffer compression for power savings " ··· 80 "Use panel (LVDS/eDP) downclocking for power savings " 81 "(default: false)"); 82 83 - unsigned int i915_panel_use_ssc __read_mostly = -1; 84 module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); 85 MODULE_PARM_DESC(lvds_use_ssc, 86 "Use Spread Spectrum Clock with panels [LVDS/eDP] " ··· 107 extern int intel_agp_enabled; 108 109 #define INTEL_VGA_DEVICE(id, info) { \ 110 - .class = PCI_CLASS_DISPLAY_VGA << 8, \ 111 .class_mask = 0xff0000, \ 112 .vendor = 0x8086, \ 113 .device = id, \
··· 68 MODULE_PARM_DESC(i915_enable_rc6, 69 "Enable power-saving render C-state 6 (default: true)"); 70 71 + int i915_enable_fbc __read_mostly = -1; 72 module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); 73 MODULE_PARM_DESC(i915_enable_fbc, 74 "Enable frame buffer compression for power savings " ··· 80 "Use panel (LVDS/eDP) downclocking for power savings " 81 "(default: false)"); 82 83 + int i915_panel_use_ssc __read_mostly = -1; 84 module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); 85 MODULE_PARM_DESC(lvds_use_ssc, 86 "Use Spread Spectrum Clock with panels [LVDS/eDP] " ··· 107 extern int intel_agp_enabled; 108 109 #define INTEL_VGA_DEVICE(id, info) { \ 110 + .class = PCI_BASE_CLASS_DISPLAY << 16, \ 111 .class_mask = 0xff0000, \ 112 .vendor = 0x8086, \ 113 .device = id, \
+10 -9
drivers/gpu/drm/i915/i915_drv.h
··· 126 struct _drm_i915_sarea *sarea_priv; 127 }; 128 #define I915_FENCE_REG_NONE -1 129 130 struct drm_i915_fence_reg { 131 struct list_head lru_list; ··· 171 u32 instdone1; 172 u32 seqno; 173 u64 bbaddr; 174 - u64 fence[16]; 175 struct timeval time; 176 struct drm_i915_error_object { 177 int page_count; ··· 185 u32 gtt_offset; 186 u32 read_domains; 187 u32 write_domain; 188 - s32 fence_reg:5; 189 s32 pinned:2; 190 u32 tiling:2; 191 u32 dirty:1; ··· 378 struct notifier_block lid_notifier; 379 380 int crt_ddc_pin; 381 - struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ 382 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ 383 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 384 ··· 509 u8 saveAR[21]; 510 u8 saveDACMASK; 511 u8 saveCR[37]; 512 - uint64_t saveFENCE[16]; 513 u32 saveCURACNTR; 514 u32 saveCURAPOS; 515 u32 saveCURABASE; ··· 780 * Fence register bits (if any) for this object. Will be set 781 * as needed when mapped into the GTT. 782 * Protected by dev->struct_mutex. 783 - * 784 - * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE) 785 */ 786 - signed int fence_reg:5; 787 788 /** 789 * Advice: are the backing pages purgeable? ··· 1000 extern unsigned int i915_powersave __read_mostly; 1001 extern unsigned int i915_semaphores __read_mostly; 1002 extern unsigned int i915_lvds_downclock __read_mostly; 1003 - extern unsigned int i915_panel_use_ssc __read_mostly; 1004 extern int i915_vbt_sdvo_panel_type __read_mostly; 1005 extern unsigned int i915_enable_rc6 __read_mostly; 1006 - extern unsigned int i915_enable_fbc __read_mostly; 1007 extern bool i915_enable_hangcheck __read_mostly; 1008 1009 extern int i915_suspend(struct drm_device *dev, pm_message_t state);
··· 126 struct _drm_i915_sarea *sarea_priv; 127 }; 128 #define I915_FENCE_REG_NONE -1 129 + #define I915_MAX_NUM_FENCES 16 130 + /* 16 fences + sign bit for FENCE_REG_NONE */ 131 + #define I915_MAX_NUM_FENCE_BITS 5 132 133 struct drm_i915_fence_reg { 134 struct list_head lru_list; ··· 168 u32 instdone1; 169 u32 seqno; 170 u64 bbaddr; 171 + u64 fence[I915_MAX_NUM_FENCES]; 172 struct timeval time; 173 struct drm_i915_error_object { 174 int page_count; ··· 182 u32 gtt_offset; 183 u32 read_domains; 184 u32 write_domain; 185 + s32 fence_reg:I915_MAX_NUM_FENCE_BITS; 186 s32 pinned:2; 187 u32 tiling:2; 188 u32 dirty:1; ··· 375 struct notifier_block lid_notifier; 376 377 int crt_ddc_pin; 378 + struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */ 379 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ 380 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 381 ··· 506 u8 saveAR[21]; 507 u8 saveDACMASK; 508 u8 saveCR[37]; 509 + uint64_t saveFENCE[I915_MAX_NUM_FENCES]; 510 u32 saveCURACNTR; 511 u32 saveCURAPOS; 512 u32 saveCURABASE; ··· 777 * Fence register bits (if any) for this object. Will be set 778 * as needed when mapped into the GTT. 779 * Protected by dev->struct_mutex. 780 */ 781 + signed int fence_reg:I915_MAX_NUM_FENCE_BITS; 782 783 /** 784 * Advice: are the backing pages purgeable? ··· 999 extern unsigned int i915_powersave __read_mostly; 1000 extern unsigned int i915_semaphores __read_mostly; 1001 extern unsigned int i915_lvds_downclock __read_mostly; 1002 + extern int i915_panel_use_ssc __read_mostly; 1003 extern int i915_vbt_sdvo_panel_type __read_mostly; 1004 extern unsigned int i915_enable_rc6 __read_mostly; 1005 + extern int i915_enable_fbc __read_mostly; 1006 extern bool i915_enable_hangcheck __read_mostly; 1007 1008 extern int i915_suspend(struct drm_device *dev, pm_message_t state);
+7 -5
drivers/gpu/drm/i915/i915_gem.c
··· 1745 struct drm_i915_private *dev_priv = dev->dev_private; 1746 int i; 1747 1748 - for (i = 0; i < 16; i++) { 1749 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; 1750 struct drm_i915_gem_object *obj = reg->obj; 1751 ··· 3512 * so emit a request to do so. 3513 */ 3514 request = kzalloc(sizeof(*request), GFP_KERNEL); 3515 - if (request) 3516 ret = i915_add_request(obj->ring, NULL, request); 3517 - else 3518 ret = -ENOMEM; 3519 } 3520 ··· 3615 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 3616 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 3617 3618 - if (IS_GEN6(dev)) { 3619 /* On Gen6, we can have the GPU use the LLC (the CPU 3620 * cache) for about a 10% performance improvement 3621 * compared to uncached. Graphics requests other than ··· 3879 INIT_LIST_HEAD(&dev_priv->mm.gtt_list); 3880 for (i = 0; i < I915_NUM_RINGS; i++) 3881 init_ring_lists(&dev_priv->ring[i]); 3882 - for (i = 0; i < 16; i++) 3883 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); 3884 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 3885 i915_gem_retire_work_handler);
··· 1745 struct drm_i915_private *dev_priv = dev->dev_private; 1746 int i; 1747 1748 + for (i = 0; i < dev_priv->num_fence_regs; i++) { 1749 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; 1750 struct drm_i915_gem_object *obj = reg->obj; 1751 ··· 3512 * so emit a request to do so. 3513 */ 3514 request = kzalloc(sizeof(*request), GFP_KERNEL); 3515 + if (request) { 3516 ret = i915_add_request(obj->ring, NULL, request); 3517 + if (ret) 3518 + kfree(request); 3519 + } else 3520 ret = -ENOMEM; 3521 } 3522 ··· 3613 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 3614 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 3615 3616 + if (IS_GEN6(dev) || IS_GEN7(dev)) { 3617 /* On Gen6, we can have the GPU use the LLC (the CPU 3618 * cache) for about a 10% performance improvement 3619 * compared to uncached. Graphics requests other than ··· 3877 INIT_LIST_HEAD(&dev_priv->mm.gtt_list); 3878 for (i = 0; i < I915_NUM_RINGS; i++) 3879 init_ring_lists(&dev_priv->ring[i]); 3880 + for (i = 0; i < I915_MAX_NUM_FENCES; i++) 3881 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); 3882 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 3883 i915_gem_retire_work_handler);
+1
drivers/gpu/drm/i915/i915_irq.c
··· 824 825 /* Fences */ 826 switch (INTEL_INFO(dev)->gen) { 827 case 6: 828 for (i = 0; i < 16; i++) 829 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
··· 824 825 /* Fences */ 826 switch (INTEL_INFO(dev)->gen) { 827 + case 7: 828 case 6: 829 for (i = 0; i < 16; i++) 830 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+17 -4
drivers/gpu/drm/i915/i915_reg.h
··· 1553 */ 1554 #define PP_READY (1 << 30) 1555 #define PP_SEQUENCE_NONE (0 << 28) 1556 - #define PP_SEQUENCE_ON (1 << 28) 1557 - #define PP_SEQUENCE_OFF (2 << 28) 1558 - #define PP_SEQUENCE_MASK 0x30000000 1559 #define PP_CYCLE_DELAY_ACTIVE (1 << 27) 1560 - #define PP_SEQUENCE_STATE_ON_IDLE (1 << 3) 1561 #define PP_SEQUENCE_STATE_MASK 0x0000000f 1562 #define PP_CONTROL 0x61204 1563 #define POWER_TARGET_ON (1 << 0) 1564 #define PP_ON_DELAYS 0x61208 ··· 3452 3453 #define GT_FIFO_FREE_ENTRIES 0x120008 3454 #define GT_FIFO_NUM_RESERVED_ENTRIES 20 3455 3456 #define GEN6_RPNSWREQ 0xA008 3457 #define GEN6_TURBO_DISABLE (1<<31)
··· 1553 */ 1554 #define PP_READY (1 << 30) 1555 #define PP_SEQUENCE_NONE (0 << 28) 1556 + #define PP_SEQUENCE_POWER_UP (1 << 28) 1557 + #define PP_SEQUENCE_POWER_DOWN (2 << 28) 1558 + #define PP_SEQUENCE_MASK (3 << 28) 1559 + #define PP_SEQUENCE_SHIFT 28 1560 #define PP_CYCLE_DELAY_ACTIVE (1 << 27) 1561 #define PP_SEQUENCE_STATE_MASK 0x0000000f 1562 + #define PP_SEQUENCE_STATE_OFF_IDLE (0x0 << 0) 1563 + #define PP_SEQUENCE_STATE_OFF_S0_1 (0x1 << 0) 1564 + #define PP_SEQUENCE_STATE_OFF_S0_2 (0x2 << 0) 1565 + #define PP_SEQUENCE_STATE_OFF_S0_3 (0x3 << 0) 1566 + #define PP_SEQUENCE_STATE_ON_IDLE (0x8 << 0) 1567 + #define PP_SEQUENCE_STATE_ON_S1_0 (0x9 << 0) 1568 + #define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0) 1569 + #define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0) 1570 + #define PP_SEQUENCE_STATE_RESET (0xf << 0) 1571 #define PP_CONTROL 0x61204 1572 #define POWER_TARGET_ON (1 << 0) 1573 #define PP_ON_DELAYS 0x61208 ··· 3443 3444 #define GT_FIFO_FREE_ENTRIES 0x120008 3445 #define GT_FIFO_NUM_RESERVED_ENTRIES 20 3446 + 3447 + #define GEN6_UCGCTL2 0x9404 3448 + # define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12) 3449 + # define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11) 3450 3451 #define GEN6_RPNSWREQ 0xA008 3452 #define GEN6_TURBO_DISABLE (1<<31)
+2
drivers/gpu/drm/i915/i915_suspend.c
··· 370 371 /* Fences */ 372 switch (INTEL_INFO(dev)->gen) { 373 case 6: 374 for (i = 0; i < 16; i++) 375 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); ··· 405 406 /* Fences */ 407 switch (INTEL_INFO(dev)->gen) { 408 case 6: 409 for (i = 0; i < 16; i++) 410 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
··· 370 371 /* Fences */ 372 switch (INTEL_INFO(dev)->gen) { 373 + case 7: 374 case 6: 375 for (i = 0; i < 16; i++) 376 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); ··· 404 405 /* Fences */ 406 switch (INTEL_INFO(dev)->gen) { 407 + case 7: 408 case 6: 409 for (i = 0; i < 16; i++) 410 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
+24 -9
drivers/gpu/drm/i915/intel_display.c
··· 2933 2934 /* For PCH DP, enable TRANS_DP_CTL */ 2935 if (HAS_PCH_CPT(dev) && 2936 - intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 2937 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5; 2938 reg = TRANS_DP_CTL(pipe); 2939 temp = I915_READ(reg); ··· 4712 lvds_bpc = 6; 4713 4714 if (lvds_bpc < display_bpc) { 4715 - DRM_DEBUG_DRIVER("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc); 4716 display_bpc = lvds_bpc; 4717 } 4718 continue; ··· 4723 unsigned int edp_bpc = dev_priv->edp.bpp / 3; 4724 4725 if (edp_bpc < display_bpc) { 4726 - DRM_DEBUG_DRIVER("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); 4727 display_bpc = edp_bpc; 4728 } 4729 continue; ··· 4738 /* Don't use an invalid EDID bpc value */ 4739 if (connector->display_info.bpc && 4740 connector->display_info.bpc < display_bpc) { 4741 - DRM_DEBUG_DRIVER("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc); 4742 display_bpc = connector->display_info.bpc; 4743 } 4744 } ··· 4749 */ 4750 if (intel_encoder->type == INTEL_OUTPUT_HDMI) { 4751 if (display_bpc > 8 && display_bpc < 12) { 4752 - DRM_DEBUG_DRIVER("forcing bpc to 12 for HDMI\n"); 4753 display_bpc = 12; 4754 } else { 4755 - DRM_DEBUG_DRIVER("forcing bpc to 8 for HDMI\n"); 4756 display_bpc = 8; 4757 } 4758 } ··· 4790 4791 display_bpc = min(display_bpc, bpc); 4792 4793 - DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n", 4794 - bpc, display_bpc); 4795 4796 *pipe_bpp = display_bpc * 3; 4797 ··· 5672 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; 5673 if ((is_lvds && dev_priv->lvds_dither) || dither) { 5674 pipeconf |= PIPECONF_DITHER_EN; 5675 - pipeconf |= PIPECONF_DITHER_TYPE_ST1; 5676 } 5677 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 5678 intel_dp_set_m_n(crtc, mode, adjusted_mode); ··· 8148 I915_WRITE(WM3_LP_ILK, 0); 8149 I915_WRITE(WM2_LP_ILK, 0); 8150 I915_WRITE(WM1_LP_ILK, 0); 8151 8152 /* 8153 * According to the spec the following bits should be
··· 2933 2934 /* For PCH DP, enable TRANS_DP_CTL */ 2935 if (HAS_PCH_CPT(dev) && 2936 + (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || 2937 + intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { 2938 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5; 2939 reg = TRANS_DP_CTL(pipe); 2940 temp = I915_READ(reg); ··· 4711 lvds_bpc = 6; 4712 4713 if (lvds_bpc < display_bpc) { 4714 + DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc); 4715 display_bpc = lvds_bpc; 4716 } 4717 continue; ··· 4722 unsigned int edp_bpc = dev_priv->edp.bpp / 3; 4723 4724 if (edp_bpc < display_bpc) { 4725 + DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); 4726 display_bpc = edp_bpc; 4727 } 4728 continue; ··· 4737 /* Don't use an invalid EDID bpc value */ 4738 if (connector->display_info.bpc && 4739 connector->display_info.bpc < display_bpc) { 4740 + DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc); 4741 display_bpc = connector->display_info.bpc; 4742 } 4743 } ··· 4748 */ 4749 if (intel_encoder->type == INTEL_OUTPUT_HDMI) { 4750 if (display_bpc > 8 && display_bpc < 12) { 4751 + DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n"); 4752 display_bpc = 12; 4753 } else { 4754 + DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n"); 4755 display_bpc = 8; 4756 } 4757 } ··· 4789 4790 display_bpc = min(display_bpc, bpc); 4791 4792 + DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n", 4793 + bpc, display_bpc); 4794 4795 *pipe_bpp = display_bpc * 3; 4796 ··· 5671 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; 5672 if ((is_lvds && dev_priv->lvds_dither) || dither) { 5673 pipeconf |= PIPECONF_DITHER_EN; 5674 + pipeconf |= PIPECONF_DITHER_TYPE_SP; 5675 } 5676 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 5677 intel_dp_set_m_n(crtc, mode, adjusted_mode); ··· 8147 I915_WRITE(WM3_LP_ILK, 0); 8148 I915_WRITE(WM2_LP_ILK, 0); 8149 I915_WRITE(WM1_LP_ILK, 0); 8150 + 8151 + /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock 8152 + * gating disable must be set. Failure to set it results in 8153 + * flickering pixels due to Z write ordering failures after 8154 + * some amount of runtime in the Mesa "fire" demo, and Unigine 8155 + * Sanctuary and Tropics, and apparently anything else with 8156 + * alpha test or pixel discard. 8157 + * 8158 + * According to the spec, bit 11 (RCCUNIT) must also be set, 8159 + * but we didn't debug actual testcases to find it out. 8160 + */ 8161 + I915_WRITE(GEN6_UCGCTL2, 8162 + GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | 8163 + GEN6_RCCUNIT_CLOCK_GATE_DISABLE); 8164 8165 /* 8166 * According to the spec the following bits should be
+237 -174
drivers/gpu/drm/i915/intel_dp.c
··· 59 struct i2c_algo_dp_aux_data algo; 60 bool is_pch_edp; 61 uint8_t train_set[4]; 62 - uint8_t link_status[DP_LINK_STATUS_SIZE]; 63 int panel_power_up_delay; 64 int panel_power_down_delay; 65 int panel_power_cycle_delay; ··· 67 struct drm_display_mode *panel_fixed_mode; /* for eDP */ 68 struct delayed_work panel_vdd_work; 69 bool want_panel_vdd; 70 - unsigned long panel_off_jiffies; 71 }; 72 73 /** ··· 155 static int 156 intel_dp_max_lane_count(struct intel_dp *intel_dp) 157 { 158 - int max_lane_count = 4; 159 - 160 - if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { 161 - max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f; 162 - switch (max_lane_count) { 163 - case 1: case 2: case 4: 164 - break; 165 - default: 166 - max_lane_count = 4; 167 - } 168 } 169 return max_lane_count; 170 } ··· 762 continue; 763 764 intel_dp = enc_to_intel_dp(encoder); 765 - if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) { 766 lane_count = intel_dp->lane_count; 767 - break; 768 - } else if (is_edp(intel_dp)) { 769 - lane_count = dev_priv->edp.lanes; 770 break; 771 } 772 } ··· 803 struct drm_display_mode *adjusted_mode) 804 { 805 struct drm_device *dev = encoder->dev; 806 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 807 struct drm_crtc *crtc = intel_dp->base.base.crtc; 808 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); ··· 816 ironlake_edp_pll_off(encoder); 817 } 818 819 - intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 820 - intel_dp->DP |= intel_dp->color_range; 821 822 - if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 823 - intel_dp->DP |= DP_SYNC_HS_HIGH; 824 - if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 825 - intel_dp->DP |= DP_SYNC_VS_HIGH; 826 827 - if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 828 - intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 829 - else 830 - intel_dp->DP |= DP_LINK_TRAIN_OFF; 831 832 switch (intel_dp->lane_count) { 833 case 1: ··· 859 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 860 intel_write_eld(encoder, adjusted_mode); 861 } 862 - 863 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); 864 intel_dp->link_configuration[0] = intel_dp->link_bw; 865 intel_dp->link_configuration[1] = intel_dp->lane_count; 866 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; 867 - 868 /* 869 * Check for DPCD version > 1.1 and enhanced framing support 870 */ 871 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 872 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { 873 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 874 - intel_dp->DP |= DP_ENHANCED_FRAMING; 875 } 876 877 - /* CPT DP's pipe select is decided in TRANS_DP_CTL */ 878 - if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev)) 879 - intel_dp->DP |= DP_PIPEB_SELECT; 880 881 - if (is_cpu_edp(intel_dp)) { 882 - /* don't miss out required setting for eDP */ 883 - intel_dp->DP |= DP_PLL_ENABLE; 884 - if (adjusted_mode->clock < 200000) 885 - intel_dp->DP |= DP_PLL_FREQ_160MHZ; 886 - else 887 - intel_dp->DP |= DP_PLL_FREQ_270MHZ; 888 } 889 } 890 891 static void ironlake_wait_panel_off(struct intel_dp *intel_dp) 892 { 893 - unsigned long off_time; 894 - unsigned long delay; 895 - 896 DRM_DEBUG_KMS("Wait for panel power off time\n"); 897 898 - if (ironlake_edp_have_panel_power(intel_dp) || 899 - ironlake_edp_have_panel_vdd(intel_dp)) 900 - { 901 - DRM_DEBUG_KMS("Panel still on, no delay needed\n"); 902 - return; 903 - } 904 905 - off_time = intel_dp->panel_off_jiffies + msecs_to_jiffies(intel_dp->panel_power_down_delay); 906 - if (time_after(jiffies, off_time)) { 907 - DRM_DEBUG_KMS("Time already passed"); 908 - return; 909 - } 910 - delay = jiffies_to_msecs(off_time - jiffies); 911 - if (delay > intel_dp->panel_power_down_delay) 912 - delay = intel_dp->panel_power_down_delay; 913 - DRM_DEBUG_KMS("Waiting an additional %ld ms\n", delay); 914 - msleep(delay); 915 } 916 917 static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) ··· 975 "eDP VDD already requested on\n"); 976 977 intel_dp->want_panel_vdd = true; 978 if (ironlake_edp_have_panel_vdd(intel_dp)) { 979 DRM_DEBUG_KMS("eDP VDD already on\n"); 980 return; 981 } 982 983 - ironlake_wait_panel_off(intel_dp); 984 - pp = I915_READ(PCH_PP_CONTROL); 985 - pp &= ~PANEL_UNLOCK_MASK; 986 - pp |= PANEL_UNLOCK_REGS; 987 pp |= EDP_FORCE_VDD; 988 I915_WRITE(PCH_PP_CONTROL, pp); 989 POSTING_READ(PCH_PP_CONTROL); ··· 1007 u32 pp; 1008 1009 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { 1010 - pp = I915_READ(PCH_PP_CONTROL); 1011 - pp &= ~PANEL_UNLOCK_MASK; 1012 - pp |= PANEL_UNLOCK_REGS; 1013 pp &= ~EDP_FORCE_VDD; 1014 I915_WRITE(PCH_PP_CONTROL, pp); 1015 POSTING_READ(PCH_PP_CONTROL); ··· 1015 /* Make sure sequencer is idle before allowing subsequent activity */ 1016 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", 1017 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); 1018 - intel_dp->panel_off_jiffies = jiffies; 1019 } 1020 } 1021 ··· 1026 struct intel_dp, panel_vdd_work); 1027 struct drm_device *dev = intel_dp->base.base.dev; 1028 1029 - mutex_lock(&dev->struct_mutex); 1030 ironlake_panel_vdd_off_sync(intel_dp); 1031 - mutex_unlock(&dev->struct_mutex); 1032 } 1033 1034 static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) ··· 1038 1039 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd); 1040 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); 1041 - 1042 intel_dp->want_panel_vdd = false; 1043 1044 if (sync) { ··· 1054 } 1055 } 1056 1057 - /* Returns true if the panel was already on when called */ 1058 static void ironlake_edp_panel_on(struct intel_dp *intel_dp) 1059 { 1060 struct drm_device *dev = intel_dp->base.base.dev; 1061 struct drm_i915_private *dev_priv = dev->dev_private; 1062 - u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE; 1063 1064 if (!is_edp(intel_dp)) 1065 return; 1066 - if (ironlake_edp_have_panel_power(intel_dp)) 1067 return; 1068 1069 - ironlake_wait_panel_off(intel_dp); 1070 - pp = I915_READ(PCH_PP_CONTROL); 1071 - pp &= ~PANEL_UNLOCK_MASK; 1072 - pp |= PANEL_UNLOCK_REGS; 1073 1074 if (IS_GEN5(dev)) { 1075 /* ILK workaround: disable reset around power sequence */ 1076 pp &= ~PANEL_POWER_RESET; ··· 1081 } 1082 1083 pp |= POWER_TARGET_ON; 1084 I915_WRITE(PCH_PP_CONTROL, pp); 1085 POSTING_READ(PCH_PP_CONTROL); 1086 1087 - if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask, 1088 - 5000)) 1089 - DRM_ERROR("panel on wait timed out: 0x%08x\n", 1090 - I915_READ(PCH_PP_STATUS)); 1091 1092 if (IS_GEN5(dev)) { 1093 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ ··· 1096 } 1097 } 1098 1099 - static void ironlake_edp_panel_off(struct drm_encoder *encoder) 1100 { 1101 - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1102 - struct drm_device *dev = encoder->dev; 1103 struct drm_i915_private *dev_priv = dev->dev_private; 1104 - u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK | 1105 - PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK; 1106 1107 if (!is_edp(intel_dp)) 1108 return; 1109 - pp = I915_READ(PCH_PP_CONTROL); 1110 - pp &= ~PANEL_UNLOCK_MASK; 1111 - pp |= PANEL_UNLOCK_REGS; 1112 1113 - if (IS_GEN5(dev)) { 1114 - /* ILK workaround: disable reset around power sequence */ 1115 - pp &= ~PANEL_POWER_RESET; 1116 - I915_WRITE(PCH_PP_CONTROL, pp); 1117 - POSTING_READ(PCH_PP_CONTROL); 1118 - } 1119 1120 - intel_dp->panel_off_jiffies = jiffies; 1121 1122 - if (IS_GEN5(dev)) { 1123 - pp &= ~POWER_TARGET_ON; 1124 - I915_WRITE(PCH_PP_CONTROL, pp); 1125 - POSTING_READ(PCH_PP_CONTROL); 1126 - pp &= ~POWER_TARGET_ON; 1127 - I915_WRITE(PCH_PP_CONTROL, pp); 1128 - POSTING_READ(PCH_PP_CONTROL); 1129 - msleep(intel_dp->panel_power_cycle_delay); 1130 1131 - if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000)) 1132 - DRM_ERROR("panel off wait timed out: 0x%08x\n", 1133 - I915_READ(PCH_PP_STATUS)); 1134 - 1135 - pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 1136 - I915_WRITE(PCH_PP_CONTROL, pp); 1137 - POSTING_READ(PCH_PP_CONTROL); 1138 - } 1139 } 1140 1141 static void ironlake_edp_backlight_on(struct intel_dp *intel_dp) ··· 1134 * allowing it to appear. 1135 */ 1136 msleep(intel_dp->backlight_on_delay); 1137 - pp = I915_READ(PCH_PP_CONTROL); 1138 - pp &= ~PANEL_UNLOCK_MASK; 1139 - pp |= PANEL_UNLOCK_REGS; 1140 pp |= EDP_BLC_ENABLE; 1141 I915_WRITE(PCH_PP_CONTROL, pp); 1142 POSTING_READ(PCH_PP_CONTROL); ··· 1150 return; 1151 1152 DRM_DEBUG_KMS("\n"); 1153 - pp = I915_READ(PCH_PP_CONTROL); 1154 - pp &= ~PANEL_UNLOCK_MASK; 1155 - pp |= PANEL_UNLOCK_REGS; 1156 pp &= ~EDP_BLC_ENABLE; 1157 I915_WRITE(PCH_PP_CONTROL, pp); 1158 POSTING_READ(PCH_PP_CONTROL); ··· 1218 { 1219 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1220 1221 /* Wake up the sink first */ 1222 ironlake_edp_panel_vdd_on(intel_dp); 1223 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1224 ironlake_edp_panel_vdd_off(intel_dp, false); 1225 1226 /* Make sure the panel is off before trying to 1227 * change the mode 1228 */ 1229 - ironlake_edp_backlight_off(intel_dp); 1230 - intel_dp_link_down(intel_dp); 1231 - ironlake_edp_panel_off(encoder); 1232 } 1233 1234 static void intel_dp_commit(struct drm_encoder *encoder) ··· 1243 intel_dp_start_link_train(intel_dp); 1244 ironlake_edp_panel_on(intel_dp); 1245 ironlake_edp_panel_vdd_off(intel_dp, true); 1246 - 1247 intel_dp_complete_link_train(intel_dp); 1248 ironlake_edp_backlight_on(intel_dp); 1249 ··· 1261 uint32_t dp_reg = I915_READ(intel_dp->output_reg); 1262 1263 if (mode != DRM_MODE_DPMS_ON) { 1264 ironlake_edp_panel_vdd_on(intel_dp); 1265 - if (is_edp(intel_dp)) 1266 - ironlake_edp_backlight_off(intel_dp); 1267 intel_dp_sink_dpms(intel_dp, mode); 1268 intel_dp_link_down(intel_dp); 1269 - ironlake_edp_panel_off(encoder); 1270 - if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) 1271 - ironlake_edp_pll_off(encoder); 1272 ironlake_edp_panel_vdd_off(intel_dp, false); 1273 } else { 1274 ironlake_edp_panel_vdd_on(intel_dp); 1275 intel_dp_sink_dpms(intel_dp, mode); 1276 if (!(dp_reg & DP_PORT_EN)) { ··· 1282 ironlake_edp_panel_on(intel_dp); 1283 ironlake_edp_panel_vdd_off(intel_dp, true); 1284 intel_dp_complete_link_train(intel_dp); 1285 - ironlake_edp_backlight_on(intel_dp); 1286 } else 1287 ironlake_edp_panel_vdd_off(intel_dp, false); 1288 ironlake_edp_backlight_on(intel_dp); ··· 1319 * link status information 1320 */ 1321 static bool 1322 - intel_dp_get_link_status(struct intel_dp *intel_dp) 1323 { 1324 return intel_dp_aux_native_read_retry(intel_dp, 1325 DP_LANE0_1_STATUS, 1326 - intel_dp->link_status, 1327 DP_LINK_STATUS_SIZE); 1328 } 1329 ··· 1335 } 1336 1337 static uint8_t 1338 - intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE], 1339 int lane) 1340 { 1341 - int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); 1342 int s = ((lane & 1) ? 1343 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : 1344 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); 1345 - uint8_t l = intel_dp_link_status(link_status, i); 1346 1347 return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; 1348 } 1349 1350 static uint8_t 1351 - intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE], 1352 int lane) 1353 { 1354 - int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1); 1355 int s = ((lane & 1) ? 1356 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : 1357 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); 1358 - uint8_t l = intel_dp_link_status(link_status, i); 1359 1360 return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; 1361 } ··· 1376 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB 1377 */ 1378 #define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800 1379 1380 static uint8_t 1381 intel_dp_pre_emphasis_max(uint8_t voltage_swing) ··· 1395 } 1396 1397 static void 1398 - intel_get_adjust_train(struct intel_dp *intel_dp) 1399 { 1400 uint8_t v = 0; 1401 uint8_t p = 0; 1402 int lane; 1403 1404 for (lane = 0; lane < intel_dp->lane_count; lane++) { 1405 - uint8_t this_v = intel_get_adjust_request_voltage(intel_dp->link_status, lane); 1406 - uint8_t this_p = intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane); 1407 1408 if (this_v > v) 1409 v = this_v; ··· 1414 p = this_p; 1415 } 1416 1417 - if (v >= I830_DP_VOLTAGE_MAX) 1418 - v = I830_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED; 1419 1420 if (p >= intel_dp_pre_emphasis_max(v)) 1421 p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; ··· 1429 } 1430 1431 static uint32_t 1432 - intel_dp_signal_levels(uint8_t train_set, int lane_count) 1433 { 1434 uint32_t signal_levels = 0; 1435 ··· 1498 intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], 1499 int lane) 1500 { 1501 - int i = DP_LANE0_1_STATUS + (lane >> 1); 1502 int s = (lane & 1) * 4; 1503 - uint8_t l = intel_dp_link_status(link_status, i); 1504 1505 return (l >> s) & 0xf; 1506 } ··· 1524 DP_LANE_CHANNEL_EQ_DONE|\ 1525 DP_LANE_SYMBOL_LOCKED) 1526 static bool 1527 - intel_channel_eq_ok(struct intel_dp *intel_dp) 1528 { 1529 uint8_t lane_align; 1530 uint8_t lane_status; 1531 int lane; 1532 1533 - lane_align = intel_dp_link_status(intel_dp->link_status, 1534 DP_LANE_ALIGN_STATUS_UPDATED); 1535 if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) 1536 return false; 1537 for (lane = 0; lane < intel_dp->lane_count; lane++) { 1538 - lane_status = intel_get_lane_status(intel_dp->link_status, lane); 1539 if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) 1540 return false; 1541 } ··· 1560 1561 ret = intel_dp_aux_native_write(intel_dp, 1562 DP_TRAINING_LANE0_SET, 1563 - intel_dp->train_set, 4); 1564 - if (ret != 4) 1565 return false; 1566 1567 return true; ··· 1578 int i; 1579 uint8_t voltage; 1580 bool clock_recovery = false; 1581 - int tries; 1582 u32 reg; 1583 uint32_t DP = intel_dp->DP; 1584 ··· 1605 DP &= ~DP_LINK_TRAIN_MASK; 1606 memset(intel_dp->train_set, 0, 4); 1607 voltage = 0xff; 1608 - tries = 0; 1609 clock_recovery = false; 1610 for (;;) { 1611 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1612 uint32_t signal_levels; 1613 - if (IS_GEN6(dev) && is_edp(intel_dp)) { 1614 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1615 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1616 } else { 1617 - signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); 1618 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1619 } 1620 ··· 1634 /* Set training pattern 1 */ 1635 1636 udelay(100); 1637 - if (!intel_dp_get_link_status(intel_dp)) 1638 break; 1639 1640 - if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { 1641 clock_recovery = true; 1642 break; 1643 } ··· 1649 for (i = 0; i < intel_dp->lane_count; i++) 1650 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 1651 break; 1652 - if (i == intel_dp->lane_count) 1653 - break; 1654 1655 /* Check to see if we've tried the same voltage 5 times */ 1656 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { 1657 - ++tries; 1658 - if (tries == 5) 1659 break; 1660 } else 1661 - tries = 0; 1662 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 1663 1664 /* Compute new intel_dp->train_set as requested by target */ 1665 - intel_get_adjust_train(intel_dp); 1666 } 1667 1668 intel_dp->DP = DP; ··· 1695 for (;;) { 1696 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1697 uint32_t signal_levels; 1698 1699 if (cr_tries > 5) { 1700 DRM_ERROR("failed to train DP, aborting\n"); ··· 1703 break; 1704 } 1705 1706 - if (IS_GEN6(dev) && is_edp(intel_dp)) { 1707 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1708 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1709 } else { 1710 - signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); 1711 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1712 } 1713 ··· 1723 break; 1724 1725 udelay(400); 1726 - if (!intel_dp_get_link_status(intel_dp)) 1727 break; 1728 1729 /* Make sure clock is still ok */ 1730 - if (!intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { 1731 intel_dp_start_link_train(intel_dp); 1732 cr_tries++; 1733 continue; 1734 } 1735 1736 - if (intel_channel_eq_ok(intel_dp)) { 1737 channel_eq = true; 1738 break; 1739 } ··· 1748 } 1749 1750 /* Compute new intel_dp->train_set as requested by target */ 1751 - intel_get_adjust_train(intel_dp); 1752 ++tries; 1753 } 1754 ··· 1793 1794 msleep(17); 1795 1796 - if (is_edp(intel_dp)) 1797 - DP |= DP_LINK_TRAIN_OFF; 1798 1799 if (!HAS_PCH_CPT(dev) && 1800 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { ··· 1884 intel_dp_check_link_status(struct intel_dp *intel_dp) 1885 { 1886 u8 sink_irq_vector; 1887 1888 if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON) 1889 return; ··· 1893 return; 1894 1895 /* Try to read receiver status if the link appears to be up */ 1896 - if (!intel_dp_get_link_status(intel_dp)) { 1897 intel_dp_link_down(intel_dp); 1898 return; 1899 } ··· 1918 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); 1919 } 1920 1921 - if (!intel_channel_eq_ok(intel_dp)) { 1922 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", 1923 drm_get_encoder_name(&intel_dp->base.base)); 1924 intel_dp_start_link_train(intel_dp); ··· 2242 continue; 2243 2244 intel_dp = enc_to_intel_dp(encoder); 2245 - if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) 2246 return intel_dp->output_reg; 2247 } 2248 ··· 2385 2386 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> 2387 PANEL_LIGHT_ON_DELAY_SHIFT; 2388 - 2389 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> 2390 PANEL_LIGHT_OFF_DELAY_SHIFT; 2391 ··· 2418 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", 2419 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); 2420 2421 - intel_dp->panel_off_jiffies = jiffies - intel_dp->panel_power_down_delay; 2422 - 2423 ironlake_edp_panel_vdd_on(intel_dp); 2424 ret = intel_dp_get_dpcd(intel_dp); 2425 ironlake_edp_panel_vdd_off(intel_dp, false); 2426 if (ret) { 2427 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 2428 dev_priv->no_aux_handshake =
··· 59 struct i2c_algo_dp_aux_data algo; 60 bool is_pch_edp; 61 uint8_t train_set[4]; 62 int panel_power_up_delay; 63 int panel_power_down_delay; 64 int panel_power_cycle_delay; ··· 68 struct drm_display_mode *panel_fixed_mode; /* for eDP */ 69 struct delayed_work panel_vdd_work; 70 bool want_panel_vdd; 71 }; 72 73 /** ··· 157 static int 158 intel_dp_max_lane_count(struct intel_dp *intel_dp) 159 { 160 + int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f; 161 + switch (max_lane_count) { 162 + case 1: case 2: case 4: 163 + break; 164 + default: 165 + max_lane_count = 4; 166 } 167 return max_lane_count; 168 } ··· 768 continue; 769 770 intel_dp = enc_to_intel_dp(encoder); 771 + if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || 772 + intel_dp->base.type == INTEL_OUTPUT_EDP) 773 + { 774 lane_count = intel_dp->lane_count; 775 break; 776 } 777 } ··· 810 struct drm_display_mode *adjusted_mode) 811 { 812 struct drm_device *dev = encoder->dev; 813 + struct drm_i915_private *dev_priv = dev->dev_private; 814 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 815 struct drm_crtc *crtc = intel_dp->base.base.crtc; 816 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); ··· 822 ironlake_edp_pll_off(encoder); 823 } 824 825 + /* 826 + * There are three kinds of DP registers: 827 + * 828 + * IBX PCH 829 + * CPU 830 + * CPT PCH 831 + * 832 + * IBX PCH and CPU are the same for almost everything, 833 + * except that the CPU DP PLL is configured in this 834 + * register 835 + * 836 + * CPT PCH is quite different, having many bits moved 837 + * to the TRANS_DP_CTL register instead. That 838 + * configuration happens (oddly) in ironlake_pch_enable 839 + */ 840 841 + /* Preserve the BIOS-computed detected bit. This is 842 + * supposed to be read-only. 843 + */ 844 + intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; 845 + intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 846 847 + /* Handle DP bits in common between all three register formats */ 848 + 849 + intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 850 851 switch (intel_dp->lane_count) { 852 case 1: ··· 852 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 853 intel_write_eld(encoder, adjusted_mode); 854 } 855 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); 856 intel_dp->link_configuration[0] = intel_dp->link_bw; 857 intel_dp->link_configuration[1] = intel_dp->lane_count; 858 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; 859 /* 860 * Check for DPCD version > 1.1 and enhanced framing support 861 */ 862 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 863 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { 864 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 865 } 866 867 + /* Split out the IBX/CPU vs CPT settings */ 868 869 + if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { 870 + intel_dp->DP |= intel_dp->color_range; 871 + 872 + if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 873 + intel_dp->DP |= DP_SYNC_HS_HIGH; 874 + if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 875 + intel_dp->DP |= DP_SYNC_VS_HIGH; 876 + intel_dp->DP |= DP_LINK_TRAIN_OFF; 877 + 878 + if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 879 + intel_dp->DP |= DP_ENHANCED_FRAMING; 880 + 881 + if (intel_crtc->pipe == 1) 882 + intel_dp->DP |= DP_PIPEB_SELECT; 883 + 884 + if (is_cpu_edp(intel_dp)) { 885 + /* don't miss out required setting for eDP */ 886 + intel_dp->DP |= DP_PLL_ENABLE; 887 + if (adjusted_mode->clock < 200000) 888 + intel_dp->DP |= DP_PLL_FREQ_160MHZ; 889 + else 890 + intel_dp->DP |= DP_PLL_FREQ_270MHZ; 891 + } 892 + } else { 893 + intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 894 } 895 + } 896 + 897 + #define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 898 + #define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) 899 + 900 + #define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 901 + #define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 902 + 903 + #define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 904 + #define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 905 + 906 + static void ironlake_wait_panel_status(struct intel_dp *intel_dp, 907 + u32 mask, 908 + u32 value) 909 + { 910 + struct drm_device *dev = intel_dp->base.base.dev; 911 + struct drm_i915_private *dev_priv = dev->dev_private; 912 + 913 + DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", 914 + mask, value, 915 + I915_READ(PCH_PP_STATUS), 916 + I915_READ(PCH_PP_CONTROL)); 917 + 918 + if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) { 919 + DRM_ERROR("Panel status timeout: status %08x control %08x\n", 920 + I915_READ(PCH_PP_STATUS), 921 + I915_READ(PCH_PP_CONTROL)); 922 + } 923 + } 924 + 925 + static void ironlake_wait_panel_on(struct intel_dp *intel_dp) 926 + { 927 + DRM_DEBUG_KMS("Wait for panel power on\n"); 928 + ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); 929 } 930 931 static void ironlake_wait_panel_off(struct intel_dp *intel_dp) 932 { 933 DRM_DEBUG_KMS("Wait for panel power off time\n"); 934 + ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); 935 + } 936 937 + static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp) 938 + { 939 + DRM_DEBUG_KMS("Wait for panel power cycle\n"); 940 + ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); 941 + } 942 943 + 944 + /* Read the current pp_control value, unlocking the register if it 945 + * is locked 946 + */ 947 + 948 + static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv) 949 + { 950 + u32 control = I915_READ(PCH_PP_CONTROL); 951 + 952 + control &= ~PANEL_UNLOCK_MASK; 953 + control |= PANEL_UNLOCK_REGS; 954 + return control; 955 } 956 957 static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) ··· 921 "eDP VDD already requested on\n"); 922 923 intel_dp->want_panel_vdd = true; 924 + 925 if (ironlake_edp_have_panel_vdd(intel_dp)) { 926 DRM_DEBUG_KMS("eDP VDD already on\n"); 927 return; 928 } 929 930 + if (!ironlake_edp_have_panel_power(intel_dp)) 931 + ironlake_wait_panel_power_cycle(intel_dp); 932 + 933 + pp = ironlake_get_pp_control(dev_priv); 934 pp |= EDP_FORCE_VDD; 935 I915_WRITE(PCH_PP_CONTROL, pp); 936 POSTING_READ(PCH_PP_CONTROL); ··· 952 u32 pp; 953 954 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { 955 + pp = ironlake_get_pp_control(dev_priv); 956 pp &= ~EDP_FORCE_VDD; 957 I915_WRITE(PCH_PP_CONTROL, pp); 958 POSTING_READ(PCH_PP_CONTROL); ··· 962 /* Make sure sequencer is idle before allowing subsequent activity */ 963 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", 964 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); 965 + 966 + msleep(intel_dp->panel_power_down_delay); 967 } 968 } 969 ··· 972 struct intel_dp, panel_vdd_work); 973 struct drm_device *dev = intel_dp->base.base.dev; 974 975 + mutex_lock(&dev->mode_config.mutex); 976 ironlake_panel_vdd_off_sync(intel_dp); 977 + mutex_unlock(&dev->mode_config.mutex); 978 } 979 980 static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) ··· 984 985 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd); 986 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); 987 + 988 intel_dp->want_panel_vdd = false; 989 990 if (sync) { ··· 1000 } 1001 } 1002 1003 static void ironlake_edp_panel_on(struct intel_dp *intel_dp) 1004 { 1005 struct drm_device *dev = intel_dp->base.base.dev; 1006 struct drm_i915_private *dev_priv = dev->dev_private; 1007 + u32 pp; 1008 1009 if (!is_edp(intel_dp)) 1010 return; 1011 + 1012 + DRM_DEBUG_KMS("Turn eDP power on\n"); 1013 + 1014 + if (ironlake_edp_have_panel_power(intel_dp)) { 1015 + DRM_DEBUG_KMS("eDP power already on\n"); 1016 return; 1017 + } 1018 1019 + ironlake_wait_panel_power_cycle(intel_dp); 1020 1021 + pp = ironlake_get_pp_control(dev_priv); 1022 if (IS_GEN5(dev)) { 1023 /* ILK workaround: disable reset around power sequence */ 1024 pp &= ~PANEL_POWER_RESET; ··· 1025 } 1026 1027 pp |= POWER_TARGET_ON; 1028 + if (!IS_GEN5(dev)) 1029 + pp |= PANEL_POWER_RESET; 1030 + 1031 I915_WRITE(PCH_PP_CONTROL, pp); 1032 POSTING_READ(PCH_PP_CONTROL); 1033 1034 + ironlake_wait_panel_on(intel_dp); 1035 1036 if (IS_GEN5(dev)) { 1037 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ ··· 1040 } 1041 } 1042 1043 + static void ironlake_edp_panel_off(struct intel_dp *intel_dp) 1044 { 1045 + struct drm_device *dev = intel_dp->base.base.dev; 1046 struct drm_i915_private *dev_priv = dev->dev_private; 1047 + u32 pp; 1048 1049 if (!is_edp(intel_dp)) 1050 return; 1051 1052 + DRM_DEBUG_KMS("Turn eDP power off\n"); 1053 1054 + WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n"); 1055 1056 + pp = ironlake_get_pp_control(dev_priv); 1057 + pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); 1058 + I915_WRITE(PCH_PP_CONTROL, pp); 1059 + POSTING_READ(PCH_PP_CONTROL); 1060 1061 + ironlake_wait_panel_off(intel_dp); 1062 } 1063 1064 static void ironlake_edp_backlight_on(struct intel_dp *intel_dp) ··· 1099 * allowing it to appear. 1100 */ 1101 msleep(intel_dp->backlight_on_delay); 1102 + pp = ironlake_get_pp_control(dev_priv); 1103 pp |= EDP_BLC_ENABLE; 1104 I915_WRITE(PCH_PP_CONTROL, pp); 1105 POSTING_READ(PCH_PP_CONTROL); ··· 1117 return; 1118 1119 DRM_DEBUG_KMS("\n"); 1120 + pp = ironlake_get_pp_control(dev_priv); 1121 pp &= ~EDP_BLC_ENABLE; 1122 I915_WRITE(PCH_PP_CONTROL, pp); 1123 POSTING_READ(PCH_PP_CONTROL); ··· 1187 { 1188 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1189 1190 + ironlake_edp_backlight_off(intel_dp); 1191 + ironlake_edp_panel_off(intel_dp); 1192 + 1193 /* Wake up the sink first */ 1194 ironlake_edp_panel_vdd_on(intel_dp); 1195 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1196 + intel_dp_link_down(intel_dp); 1197 ironlake_edp_panel_vdd_off(intel_dp, false); 1198 1199 /* Make sure the panel is off before trying to 1200 * change the mode 1201 */ 1202 } 1203 1204 static void intel_dp_commit(struct drm_encoder *encoder) ··· 1211 intel_dp_start_link_train(intel_dp); 1212 ironlake_edp_panel_on(intel_dp); 1213 ironlake_edp_panel_vdd_off(intel_dp, true); 1214 intel_dp_complete_link_train(intel_dp); 1215 ironlake_edp_backlight_on(intel_dp); 1216 ··· 1230 uint32_t dp_reg = I915_READ(intel_dp->output_reg); 1231 1232 if (mode != DRM_MODE_DPMS_ON) { 1233 + ironlake_edp_backlight_off(intel_dp); 1234 + ironlake_edp_panel_off(intel_dp); 1235 + 1236 ironlake_edp_panel_vdd_on(intel_dp); 1237 intel_dp_sink_dpms(intel_dp, mode); 1238 intel_dp_link_down(intel_dp); 1239 ironlake_edp_panel_vdd_off(intel_dp, false); 1240 + 1241 + if (is_cpu_edp(intel_dp)) 1242 + ironlake_edp_pll_off(encoder); 1243 } else { 1244 + if (is_cpu_edp(intel_dp)) 1245 + ironlake_edp_pll_on(encoder); 1246 + 1247 ironlake_edp_panel_vdd_on(intel_dp); 1248 intel_dp_sink_dpms(intel_dp, mode); 1249 if (!(dp_reg & DP_PORT_EN)) { ··· 1247 ironlake_edp_panel_on(intel_dp); 1248 ironlake_edp_panel_vdd_off(intel_dp, true); 1249 intel_dp_complete_link_train(intel_dp); 1250 } else 1251 ironlake_edp_panel_vdd_off(intel_dp, false); 1252 ironlake_edp_backlight_on(intel_dp); ··· 1285 * link status information 1286 */ 1287 static bool 1288 + intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1289 { 1290 return intel_dp_aux_native_read_retry(intel_dp, 1291 DP_LANE0_1_STATUS, 1292 + link_status, 1293 DP_LINK_STATUS_SIZE); 1294 } 1295 ··· 1301 } 1302 1303 static uint8_t 1304 + intel_get_adjust_request_voltage(uint8_t adjust_request[2], 1305 int lane) 1306 { 1307 int s = ((lane & 1) ? 1308 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : 1309 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); 1310 + uint8_t l = adjust_request[lane>>1]; 1311 1312 return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; 1313 } 1314 1315 static uint8_t 1316 + intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2], 1317 int lane) 1318 { 1319 int s = ((lane & 1) ? 1320 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : 1321 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); 1322 + uint8_t l = adjust_request[lane>>1]; 1323 1324 return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; 1325 } ··· 1344 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB 1345 */ 1346 #define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800 1347 + #define I830_DP_VOLTAGE_MAX_CPT DP_TRAIN_VOLTAGE_SWING_1200 1348 1349 static uint8_t 1350 intel_dp_pre_emphasis_max(uint8_t voltage_swing) ··· 1362 } 1363 1364 static void 1365 + intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1366 { 1367 + struct drm_device *dev = intel_dp->base.base.dev; 1368 uint8_t v = 0; 1369 uint8_t p = 0; 1370 int lane; 1371 + uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS); 1372 + int voltage_max; 1373 1374 for (lane = 0; lane < intel_dp->lane_count; lane++) { 1375 + uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane); 1376 + uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane); 1377 1378 if (this_v > v) 1379 v = this_v; ··· 1378 p = this_p; 1379 } 1380 1381 + if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 1382 + voltage_max = I830_DP_VOLTAGE_MAX_CPT; 1383 + else 1384 + voltage_max = I830_DP_VOLTAGE_MAX; 1385 + if (v >= voltage_max) 1386 + v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; 1387 1388 if (p >= intel_dp_pre_emphasis_max(v)) 1389 p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; ··· 1389 } 1390 1391 static uint32_t 1392 + intel_dp_signal_levels(uint8_t train_set) 1393 { 1394 uint32_t signal_levels = 0; 1395 ··· 1458 intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], 1459 int lane) 1460 { 1461 int s = (lane & 1) * 4; 1462 + uint8_t l = link_status[lane>>1]; 1463 1464 return (l >> s) & 0xf; 1465 } ··· 1485 DP_LANE_CHANNEL_EQ_DONE|\ 1486 DP_LANE_SYMBOL_LOCKED) 1487 static bool 1488 + intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1489 { 1490 uint8_t lane_align; 1491 uint8_t lane_status; 1492 int lane; 1493 1494 + lane_align = intel_dp_link_status(link_status, 1495 DP_LANE_ALIGN_STATUS_UPDATED); 1496 if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) 1497 return false; 1498 for (lane = 0; lane < intel_dp->lane_count; lane++) { 1499 + lane_status = intel_get_lane_status(link_status, lane); 1500 if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) 1501 return false; 1502 } ··· 1521 1522 ret = intel_dp_aux_native_write(intel_dp, 1523 DP_TRAINING_LANE0_SET, 1524 + intel_dp->train_set, 1525 + intel_dp->lane_count); 1526 + if (ret != intel_dp->lane_count) 1527 return false; 1528 1529 return true; ··· 1538 int i; 1539 uint8_t voltage; 1540 bool clock_recovery = false; 1541 + int voltage_tries, loop_tries; 1542 u32 reg; 1543 uint32_t DP = intel_dp->DP; 1544 ··· 1565 DP &= ~DP_LINK_TRAIN_MASK; 1566 memset(intel_dp->train_set, 0, 4); 1567 voltage = 0xff; 1568 + voltage_tries = 0; 1569 + loop_tries = 0; 1570 clock_recovery = false; 1571 for (;;) { 1572 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1573 + uint8_t link_status[DP_LINK_STATUS_SIZE]; 1574 uint32_t signal_levels; 1575 + 1576 + if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { 1577 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1578 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1579 } else { 1580 + signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); 1581 + DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels); 1582 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1583 } 1584 ··· 1590 /* Set training pattern 1 */ 1591 1592 udelay(100); 1593 + if (!intel_dp_get_link_status(intel_dp, link_status)) { 1594 + DRM_ERROR("failed to get link status\n"); 1595 break; 1596 + } 1597 1598 + if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { 1599 + DRM_DEBUG_KMS("clock recovery OK\n"); 1600 clock_recovery = true; 1601 break; 1602 } ··· 1602 for (i = 0; i < intel_dp->lane_count; i++) 1603 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 1604 break; 1605 + if (i == intel_dp->lane_count) { 1606 + ++loop_tries; 1607 + if (loop_tries == 5) { 1608 + DRM_DEBUG_KMS("too many full retries, give up\n"); 1609 + break; 1610 + } 1611 + memset(intel_dp->train_set, 0, 4); 1612 + voltage_tries = 0; 1613 + continue; 1614 + } 1615 1616 /* Check to see if we've tried the same voltage 5 times */ 1617 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { 1618 + ++voltage_tries; 1619 + if (voltage_tries == 5) { 1620 + DRM_DEBUG_KMS("too many voltage retries, give up\n"); 1621 break; 1622 + } 1623 } else 1624 + voltage_tries = 0; 1625 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 1626 1627 /* Compute new intel_dp->train_set as requested by target */ 1628 + intel_get_adjust_train(intel_dp, link_status); 1629 } 1630 1631 intel_dp->DP = DP; ··· 1638 for (;;) { 1639 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1640 uint32_t signal_levels; 1641 + uint8_t link_status[DP_LINK_STATUS_SIZE]; 1642 1643 if (cr_tries > 5) { 1644 DRM_ERROR("failed to train DP, aborting\n"); ··· 1645 break; 1646 } 1647 1648 + if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { 1649 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1650 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1651 } else { 1652 + signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); 1653 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1654 } 1655 ··· 1665 break; 1666 1667 udelay(400); 1668 + if (!intel_dp_get_link_status(intel_dp, link_status)) 1669 break; 1670 1671 /* Make sure clock is still ok */ 1672 + if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { 1673 intel_dp_start_link_train(intel_dp); 1674 cr_tries++; 1675 continue; 1676 } 1677 1678 + if (intel_channel_eq_ok(intel_dp, link_status)) { 1679 channel_eq = true; 1680 break; 1681 } ··· 1690 } 1691 1692 /* Compute new intel_dp->train_set as requested by target */ 1693 + intel_get_adjust_train(intel_dp, link_status); 1694 ++tries; 1695 } 1696 ··· 1735 1736 msleep(17); 1737 1738 + if (is_edp(intel_dp)) { 1739 + if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 1740 + DP |= DP_LINK_TRAIN_OFF_CPT; 1741 + else 1742 + DP |= DP_LINK_TRAIN_OFF; 1743 + } 1744 1745 if (!HAS_PCH_CPT(dev) && 1746 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { ··· 1822 intel_dp_check_link_status(struct intel_dp *intel_dp) 1823 { 1824 u8 sink_irq_vector; 1825 + u8 link_status[DP_LINK_STATUS_SIZE]; 1826 1827 if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON) 1828 return; ··· 1830 return; 1831 1832 /* Try to read receiver status if the link appears to be up */ 1833 + if (!intel_dp_get_link_status(intel_dp, link_status)) { 1834 intel_dp_link_down(intel_dp); 1835 return; 1836 } ··· 1855 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); 1856 } 1857 1858 + if (!intel_channel_eq_ok(intel_dp, link_status)) { 1859 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", 1860 drm_get_encoder_name(&intel_dp->base.base)); 1861 intel_dp_start_link_train(intel_dp); ··· 2179 continue; 2180 2181 intel_dp = enc_to_intel_dp(encoder); 2182 + if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || 2183 + intel_dp->base.type == INTEL_OUTPUT_EDP) 2184 return intel_dp->output_reg; 2185 } 2186 ··· 2321 2322 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> 2323 PANEL_LIGHT_ON_DELAY_SHIFT; 2324 + 2325 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> 2326 PANEL_LIGHT_OFF_DELAY_SHIFT; 2327 ··· 2354 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", 2355 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); 2356 2357 ironlake_edp_panel_vdd_on(intel_dp); 2358 ret = intel_dp_get_dpcd(intel_dp); 2359 ironlake_edp_panel_vdd_off(intel_dp, false); 2360 + 2361 if (ret) { 2362 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 2363 dev_priv->no_aux_handshake =
+2 -1
drivers/gpu/drm/i915/intel_panel.c
··· 326 static int intel_panel_get_brightness(struct backlight_device *bd) 327 { 328 struct drm_device *dev = bl_get_data(bd); 329 - return intel_panel_get_backlight(dev); 330 } 331 332 static const struct backlight_ops intel_panel_bl_ops = {
··· 326 static int intel_panel_get_brightness(struct backlight_device *bd) 327 { 328 struct drm_device *dev = bl_get_data(bd); 329 + struct drm_i915_private *dev_priv = dev->dev_private; 330 + return dev_priv->backlight_level; 331 } 332 333 static const struct backlight_ops intel_panel_bl_ops = {
+50 -42
drivers/gpu/drm/radeon/evergreen_cs.c
··· 480 } 481 break; 482 case DB_Z_INFO: 483 - r = evergreen_cs_packet_next_reloc(p, &reloc); 484 - if (r) { 485 - dev_warn(p->dev, "bad SET_CONTEXT_REG " 486 - "0x%04X\n", reg); 487 - return -EINVAL; 488 - } 489 track->db_z_info = radeon_get_ib_value(p, idx); 490 - ib[idx] &= ~Z_ARRAY_MODE(0xf); 491 - track->db_z_info &= ~Z_ARRAY_MODE(0xf); 492 - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 493 - ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 494 - track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 495 - } else { 496 - ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 497 - track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 498 } 499 break; 500 case DB_STENCIL_INFO: ··· 609 case CB_COLOR5_INFO: 610 case CB_COLOR6_INFO: 611 case CB_COLOR7_INFO: 612 - r = evergreen_cs_packet_next_reloc(p, &reloc); 613 - if (r) { 614 - dev_warn(p->dev, "bad SET_CONTEXT_REG " 615 - "0x%04X\n", reg); 616 - return -EINVAL; 617 - } 618 tmp = (reg - CB_COLOR0_INFO) / 0x3c; 619 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); 620 - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 621 - ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 622 - track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 623 - } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { 624 - ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 625 - track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 626 } 627 break; 628 case CB_COLOR8_INFO: 629 case CB_COLOR9_INFO: 630 case CB_COLOR10_INFO: 631 case CB_COLOR11_INFO: 632 - r = evergreen_cs_packet_next_reloc(p, &reloc); 633 - if (r) { 634 - dev_warn(p->dev, "bad SET_CONTEXT_REG " 635 - "0x%04X\n", reg); 636 - return -EINVAL; 637 - } 638 tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8; 639 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); 640 - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 641 - ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 642 - track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 643 - } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { 644 - ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 645 - track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 646 } 647 break; 648 case CB_COLOR0_PITCH: ··· 1317 return -EINVAL; 1318 } 1319 ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1320 - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1321 - ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 1322 - else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1323 - ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 1324 texture = reloc->robj; 1325 /* tex mip base */ 1326 r = evergreen_cs_packet_next_reloc(p, &reloc);
··· 480 } 481 break; 482 case DB_Z_INFO: 483 track->db_z_info = radeon_get_ib_value(p, idx); 484 + if (!p->keep_tiling_flags) { 485 + r = evergreen_cs_packet_next_reloc(p, &reloc); 486 + if (r) { 487 + dev_warn(p->dev, "bad SET_CONTEXT_REG " 488 + "0x%04X\n", reg); 489 + return -EINVAL; 490 + } 491 + ib[idx] &= ~Z_ARRAY_MODE(0xf); 492 + track->db_z_info &= ~Z_ARRAY_MODE(0xf); 493 + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 494 + ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 495 + track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 496 + } else { 497 + ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 498 + track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 499 + } 500 } 501 break; 502 case DB_STENCIL_INFO: ··· 607 case CB_COLOR5_INFO: 608 case CB_COLOR6_INFO: 609 case CB_COLOR7_INFO: 610 tmp = (reg - CB_COLOR0_INFO) / 0x3c; 611 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); 612 + if (!p->keep_tiling_flags) { 613 + r = evergreen_cs_packet_next_reloc(p, &reloc); 614 + if (r) { 615 + dev_warn(p->dev, "bad SET_CONTEXT_REG " 616 + "0x%04X\n", reg); 617 + return -EINVAL; 618 + } 619 + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 620 + ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 621 + track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 622 + } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { 623 + ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 624 + track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 625 + } 626 } 627 break; 628 case CB_COLOR8_INFO: 629 case CB_COLOR9_INFO: 630 case CB_COLOR10_INFO: 631 case CB_COLOR11_INFO: 632 tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8; 633 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); 634 + if (!p->keep_tiling_flags) { 635 + r = evergreen_cs_packet_next_reloc(p, &reloc); 636 + if (r) { 637 + dev_warn(p->dev, "bad SET_CONTEXT_REG " 638 + "0x%04X\n", reg); 639 + return -EINVAL; 640 + } 641 + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 642 + ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 643 + track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 644 + } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { 645 + ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 646 + track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 647 + } 648 } 649 break; 650 case CB_COLOR0_PITCH: ··· 1311 return -EINVAL; 1312 } 1313 ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1314 + if (!p->keep_tiling_flags) { 1315 + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1316 + ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 1317 + else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1318 + ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 1319 + } 1320 texture = reloc->robj; 1321 /* tex mip base */ 1322 r = evergreen_cs_packet_next_reloc(p, &reloc);
+52 -44
drivers/gpu/drm/radeon/r300.c
··· 701 return r; 702 } 703 704 - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 705 - tile_flags |= R300_TXO_MACRO_TILE; 706 - if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 707 - tile_flags |= R300_TXO_MICRO_TILE; 708 - else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) 709 - tile_flags |= R300_TXO_MICRO_TILE_SQUARE; 710 711 - tmp = idx_value + ((u32)reloc->lobj.gpu_offset); 712 - tmp |= tile_flags; 713 - ib[idx] = tmp; 714 track->textures[i].robj = reloc->robj; 715 track->tex_dirty = true; 716 break; ··· 765 /* RB3D_COLORPITCH1 */ 766 /* RB3D_COLORPITCH2 */ 767 /* RB3D_COLORPITCH3 */ 768 - r = r100_cs_packet_next_reloc(p, &reloc); 769 - if (r) { 770 - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 771 - idx, reg); 772 - r100_cs_dump_packet(p, pkt); 773 - return r; 774 } 775 - 776 - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 777 - tile_flags |= R300_COLOR_TILE_ENABLE; 778 - if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 779 - tile_flags |= R300_COLOR_MICROTILE_ENABLE; 780 - else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) 781 - tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE; 782 - 783 - tmp = idx_value & ~(0x7 << 16); 784 - tmp |= tile_flags; 785 - ib[idx] = tmp; 786 i = (reg - 0x4E38) >> 2; 787 track->cb[i].pitch = idx_value & 0x3FFE; 788 switch (((idx_value >> 21) & 0xF)) { ··· 850 break; 851 case 0x4F24: 852 /* ZB_DEPTHPITCH */ 853 - r = r100_cs_packet_next_reloc(p, &reloc); 854 - if (r) { 855 - DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 856 - idx, reg); 857 - r100_cs_dump_packet(p, pkt); 858 - return r; 859 } 860 - 861 - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 862 - tile_flags |= R300_DEPTHMACROTILE_ENABLE; 863 - if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 864 - tile_flags |= R300_DEPTHMICROTILE_TILED; 865 - else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) 866 - tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE; 867 - 868 - tmp = idx_value & ~(0x7 << 16); 869 - tmp |= tile_flags; 870 - ib[idx] = tmp; 871 - 872 track->zb.pitch = idx_value & 0x3FFC; 873 track->zb_dirty = true; 874 break;
··· 701 return r; 702 } 703 704 + if (p->keep_tiling_flags) { 705 + ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */ 706 + ((idx_value & ~31) + (u32)reloc->lobj.gpu_offset); 707 + } else { 708 + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 709 + tile_flags |= R300_TXO_MACRO_TILE; 710 + if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 711 + tile_flags |= R300_TXO_MICRO_TILE; 712 + else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) 713 + tile_flags |= R300_TXO_MICRO_TILE_SQUARE; 714 715 + tmp = idx_value + ((u32)reloc->lobj.gpu_offset); 716 + tmp |= tile_flags; 717 + ib[idx] = tmp; 718 + } 719 track->textures[i].robj = reloc->robj; 720 track->tex_dirty = true; 721 break; ··· 760 /* RB3D_COLORPITCH1 */ 761 /* RB3D_COLORPITCH2 */ 762 /* RB3D_COLORPITCH3 */ 763 + if (!p->keep_tiling_flags) { 764 + r = r100_cs_packet_next_reloc(p, &reloc); 765 + if (r) { 766 + DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 767 + idx, reg); 768 + r100_cs_dump_packet(p, pkt); 769 + return r; 770 + } 771 + 772 + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 773 + tile_flags |= R300_COLOR_TILE_ENABLE; 774 + if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 775 + tile_flags |= R300_COLOR_MICROTILE_ENABLE; 776 + else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) 777 + tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE; 778 + 779 + tmp = idx_value & ~(0x7 << 16); 780 + tmp |= tile_flags; 781 + ib[idx] = tmp; 782 } 783 i = (reg - 0x4E38) >> 2; 784 track->cb[i].pitch = idx_value & 0x3FFE; 785 switch (((idx_value >> 21) & 0xF)) { ··· 843 break; 844 case 0x4F24: 845 /* ZB_DEPTHPITCH */ 846 + if (!p->keep_tiling_flags) { 847 + r = r100_cs_packet_next_reloc(p, &reloc); 848 + if (r) { 849 + DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 850 + idx, reg); 851 + r100_cs_dump_packet(p, pkt); 852 + return r; 853 + } 854 + 855 + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 856 + tile_flags |= R300_DEPTHMACROTILE_ENABLE; 857 + if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 858 + tile_flags |= R300_DEPTHMICROTILE_TILED; 859 + else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) 860 + tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE; 861 + 862 + tmp = idx_value & ~(0x7 << 16); 863 + tmp |= tile_flags; 864 + ib[idx] = tmp; 865 } 866 track->zb.pitch = idx_value & 0x3FFC; 867 track->zb_dirty = true; 868 break;
+16 -10
drivers/gpu/drm/radeon/r600_cs.c
··· 941 track->db_depth_control = radeon_get_ib_value(p, idx); 942 break; 943 case R_028010_DB_DEPTH_INFO: 944 - if (r600_cs_packet_next_is_pkt3_nop(p)) { 945 r = r600_cs_packet_next_reloc(p, &reloc); 946 if (r) { 947 dev_warn(p->dev, "bad SET_CONTEXT_REG " ··· 993 case R_0280B4_CB_COLOR5_INFO: 994 case R_0280B8_CB_COLOR6_INFO: 995 case R_0280BC_CB_COLOR7_INFO: 996 - if (r600_cs_packet_next_is_pkt3_nop(p)) { 997 r = r600_cs_packet_next_reloc(p, &reloc); 998 if (r) { 999 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); ··· 1293 mip_offset <<= 8; 1294 1295 word0 = radeon_get_ib_value(p, idx + 0); 1296 - if (tiling_flags & RADEON_TILING_MACRO) 1297 - word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); 1298 - else if (tiling_flags & RADEON_TILING_MICRO) 1299 - word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); 1300 word1 = radeon_get_ib_value(p, idx + 1); 1301 w0 = G_038000_TEX_WIDTH(word0) + 1; 1302 h0 = G_038004_TEX_HEIGHT(word1) + 1; ··· 1625 return -EINVAL; 1626 } 1627 base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1628 - if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1629 - ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); 1630 - else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1631 - ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); 1632 texture = reloc->robj; 1633 /* tex mip base */ 1634 r = r600_cs_packet_next_reloc(p, &reloc);
··· 941 track->db_depth_control = radeon_get_ib_value(p, idx); 942 break; 943 case R_028010_DB_DEPTH_INFO: 944 + if (!p->keep_tiling_flags && 945 + r600_cs_packet_next_is_pkt3_nop(p)) { 946 r = r600_cs_packet_next_reloc(p, &reloc); 947 if (r) { 948 dev_warn(p->dev, "bad SET_CONTEXT_REG " ··· 992 case R_0280B4_CB_COLOR5_INFO: 993 case R_0280B8_CB_COLOR6_INFO: 994 case R_0280BC_CB_COLOR7_INFO: 995 + if (!p->keep_tiling_flags && 996 + r600_cs_packet_next_is_pkt3_nop(p)) { 997 r = r600_cs_packet_next_reloc(p, &reloc); 998 if (r) { 999 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); ··· 1291 mip_offset <<= 8; 1292 1293 word0 = radeon_get_ib_value(p, idx + 0); 1294 + if (!p->keep_tiling_flags) { 1295 + if (tiling_flags & RADEON_TILING_MACRO) 1296 + word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); 1297 + else if (tiling_flags & RADEON_TILING_MICRO) 1298 + word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); 1299 + } 1300 word1 = radeon_get_ib_value(p, idx + 1); 1301 w0 = G_038000_TEX_WIDTH(word0) + 1; 1302 h0 = G_038004_TEX_HEIGHT(word1) + 1; ··· 1621 return -EINVAL; 1622 } 1623 base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1624 + if (!p->keep_tiling_flags) { 1625 + if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1626 + ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); 1627 + else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1628 + ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); 1629 + } 1630 texture = reloc->robj; 1631 /* tex mip base */ 1632 r = r600_cs_packet_next_reloc(p, &reloc);
+2 -1
drivers/gpu/drm/radeon/radeon.h
··· 611 struct radeon_ib *ib; 612 void *track; 613 unsigned family; 614 - int parser_error; 615 }; 616 617 extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
··· 611 struct radeon_ib *ib; 612 void *track; 613 unsigned family; 614 + int parser_error; 615 + bool keep_tiling_flags; 616 }; 617 618 extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
+86 -116
drivers/gpu/drm/radeon/radeon_atombios.c
··· 62 struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1; 63 }; 64 65 static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev, 66 uint8_t id) 67 { ··· 166 for (i = 0; i < num_indices; i++) { 167 gpio = &i2c_info->asGPIO_Info[i]; 168 169 - /* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */ 170 - if ((rdev->family == CHIP_R420) || 171 - (rdev->family == CHIP_R423) || 172 - (rdev->family == CHIP_RV410)) { 173 - if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) || 174 - (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) || 175 - (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) { 176 - gpio->ucClkMaskShift = 0x19; 177 - gpio->ucDataMaskShift = 0x18; 178 - } 179 - } 180 - 181 - /* some evergreen boards have bad data for this entry */ 182 - if (ASIC_IS_DCE4(rdev)) { 183 - if ((i == 7) && 184 - (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) && 185 - (gpio->sucI2cId.ucAccess == 0)) { 186 - gpio->sucI2cId.ucAccess = 0x97; 187 - gpio->ucDataMaskShift = 8; 188 - gpio->ucDataEnShift = 8; 189 - gpio->ucDataY_Shift = 8; 190 - gpio->ucDataA_Shift = 8; 191 - } 192 - } 193 - 194 - /* some DCE3 boards have bad data for this entry */ 195 - if (ASIC_IS_DCE3(rdev)) { 196 - if ((i == 4) && 197 - (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) && 198 - (gpio->sucI2cId.ucAccess == 0x94)) 199 - gpio->sucI2cId.ucAccess = 0x14; 200 - } 201 202 if (gpio->sucI2cId.ucAccess == id) { 203 - i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; 204 - i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; 205 - i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; 206 - i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4; 207 - i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4; 208 - i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4; 209 - i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4; 210 - i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4; 211 - i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift); 212 - i2c.mask_data_mask = (1 << gpio->ucDataMaskShift); 213 - i2c.en_clk_mask = (1 << gpio->ucClkEnShift); 214 - i2c.en_data_mask = (1 << gpio->ucDataEnShift); 215 - i2c.y_clk_mask = (1 << gpio->ucClkY_Shift); 216 - i2c.y_data_mask = (1 << gpio->ucDataY_Shift); 217 - i2c.a_clk_mask = (1 << gpio->ucClkA_Shift); 218 - i2c.a_data_mask = (1 << gpio->ucDataA_Shift); 219 - 220 - if (gpio->sucI2cId.sbfAccess.bfHW_Capable) 221 - i2c.hw_capable = true; 222 - else 223 - i2c.hw_capable = false; 224 - 225 - if (gpio->sucI2cId.ucAccess == 0xa0) 226 - i2c.mm_i2c = true; 227 - else 228 - i2c.mm_i2c = false; 229 - 230 - i2c.i2c_id = gpio->sucI2cId.ucAccess; 231 - 232 - if (i2c.mask_clk_reg) 233 - i2c.valid = true; 234 break; 235 } 236 } ··· 189 int i, num_indices; 190 char stmp[32]; 191 192 - memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); 193 - 194 if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) { 195 i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); 196 ··· 197 198 for (i = 0; i < num_indices; i++) { 199 gpio = &i2c_info->asGPIO_Info[i]; 200 - i2c.valid = false; 201 202 - /* some evergreen boards have bad data for this entry */ 203 - if (ASIC_IS_DCE4(rdev)) { 204 - if ((i == 7) && 205 - (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) && 206 - (gpio->sucI2cId.ucAccess == 0)) { 207 - gpio->sucI2cId.ucAccess = 0x97; 208 - gpio->ucDataMaskShift = 8; 209 - gpio->ucDataEnShift = 8; 210 - gpio->ucDataY_Shift = 8; 211 - gpio->ucDataA_Shift = 8; 212 - } 213 - } 214 215 - /* some DCE3 boards have bad data for this entry */ 216 - if (ASIC_IS_DCE3(rdev)) { 217 - if ((i == 4) && 218 - (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) && 219 - (gpio->sucI2cId.ucAccess == 0x94)) 220 - gpio->sucI2cId.ucAccess = 0x14; 221 - } 222 223 - i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; 224 - i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; 225 - i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; 226 - i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4; 227 - i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4; 228 - i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4; 229 - i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4; 230 - i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4; 231 - i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift); 232 - i2c.mask_data_mask = (1 << gpio->ucDataMaskShift); 233 - i2c.en_clk_mask = (1 << gpio->ucClkEnShift); 234 - i2c.en_data_mask = (1 << gpio->ucDataEnShift); 235 - i2c.y_clk_mask = (1 << gpio->ucClkY_Shift); 236 - i2c.y_data_mask = (1 << gpio->ucDataY_Shift); 237 - i2c.a_clk_mask = (1 << gpio->ucClkA_Shift); 238 - i2c.a_data_mask = (1 << gpio->ucDataA_Shift); 239 - 240 - if (gpio->sucI2cId.sbfAccess.bfHW_Capable) 241 - i2c.hw_capable = true; 242 - else 243 - i2c.hw_capable = false; 244 - 245 - if (gpio->sucI2cId.ucAccess == 0xa0) 246 - i2c.mm_i2c = true; 247 - else 248 - i2c.mm_i2c = false; 249 - 250 - i2c.i2c_id = gpio->sucI2cId.ucAccess; 251 - 252 - if (i2c.mask_clk_reg) { 253 - i2c.valid = true; 254 sprintf(stmp, "0x%x", i2c.i2c_id); 255 rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp); 256 }
··· 62 struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1; 63 }; 64 65 + static void radeon_lookup_i2c_gpio_quirks(struct radeon_device *rdev, 66 + ATOM_GPIO_I2C_ASSIGMENT *gpio, 67 + u8 index) 68 + { 69 + /* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */ 70 + if ((rdev->family == CHIP_R420) || 71 + (rdev->family == CHIP_R423) || 72 + (rdev->family == CHIP_RV410)) { 73 + if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) || 74 + (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) || 75 + (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) { 76 + gpio->ucClkMaskShift = 0x19; 77 + gpio->ucDataMaskShift = 0x18; 78 + } 79 + } 80 + 81 + /* some evergreen boards have bad data for this entry */ 82 + if (ASIC_IS_DCE4(rdev)) { 83 + if ((index == 7) && 84 + (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) && 85 + (gpio->sucI2cId.ucAccess == 0)) { 86 + gpio->sucI2cId.ucAccess = 0x97; 87 + gpio->ucDataMaskShift = 8; 88 + gpio->ucDataEnShift = 8; 89 + gpio->ucDataY_Shift = 8; 90 + gpio->ucDataA_Shift = 8; 91 + } 92 + } 93 + 94 + /* some DCE3 boards have bad data for this entry */ 95 + if (ASIC_IS_DCE3(rdev)) { 96 + if ((index == 4) && 97 + (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) && 98 + (gpio->sucI2cId.ucAccess == 0x94)) 99 + gpio->sucI2cId.ucAccess = 0x14; 100 + } 101 + } 102 + 103 + static struct radeon_i2c_bus_rec radeon_get_bus_rec_for_i2c_gpio(ATOM_GPIO_I2C_ASSIGMENT *gpio) 104 + { 105 + struct radeon_i2c_bus_rec i2c; 106 + 107 + memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); 108 + 109 + i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; 110 + i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; 111 + i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; 112 + i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4; 113 + i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4; 114 + i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4; 115 + i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4; 116 + i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4; 117 + i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift); 118 + i2c.mask_data_mask = (1 << gpio->ucDataMaskShift); 119 + i2c.en_clk_mask = (1 << gpio->ucClkEnShift); 120 + i2c.en_data_mask = (1 << gpio->ucDataEnShift); 121 + i2c.y_clk_mask = (1 << gpio->ucClkY_Shift); 122 + i2c.y_data_mask = (1 << gpio->ucDataY_Shift); 123 + i2c.a_clk_mask = (1 << gpio->ucClkA_Shift); 124 + i2c.a_data_mask = (1 << gpio->ucDataA_Shift); 125 + 126 + if (gpio->sucI2cId.sbfAccess.bfHW_Capable) 127 + i2c.hw_capable = true; 128 + else 129 + i2c.hw_capable = false; 130 + 131 + if (gpio->sucI2cId.ucAccess == 0xa0) 132 + i2c.mm_i2c = true; 133 + else 134 + i2c.mm_i2c = false; 135 + 136 + i2c.i2c_id = gpio->sucI2cId.ucAccess; 137 + 138 + if (i2c.mask_clk_reg) 139 + i2c.valid = true; 140 + else 141 + i2c.valid = false; 142 + 143 + return i2c; 144 + } 145 + 146 static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev, 147 uint8_t id) 148 { ··· 85 for (i = 0; i < num_indices; i++) { 86 gpio = &i2c_info->asGPIO_Info[i]; 87 88 + radeon_lookup_i2c_gpio_quirks(rdev, gpio, i); 89 90 if (gpio->sucI2cId.ucAccess == id) { 91 + i2c = radeon_get_bus_rec_for_i2c_gpio(gpio); 92 break; 93 } 94 } ··· 169 int i, num_indices; 170 char stmp[32]; 171 172 if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) { 173 i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); 174 ··· 179 180 for (i = 0; i < num_indices; i++) { 181 gpio = &i2c_info->asGPIO_Info[i]; 182 183 + radeon_lookup_i2c_gpio_quirks(rdev, gpio, i); 184 185 + i2c = radeon_get_bus_rec_for_i2c_gpio(gpio); 186 187 + if (i2c.valid) { 188 sprintf(stmp, "0x%x", i2c.i2c_id); 189 rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp); 190 }
+10 -1
drivers/gpu/drm/radeon/radeon_cs.c
··· 93 { 94 struct drm_radeon_cs *cs = data; 95 uint64_t *chunk_array_ptr; 96 - unsigned size, i; 97 98 if (!cs->num_chunks) { 99 return 0; ··· 140 if (p->chunks[i].length_dw == 0) 141 return -EINVAL; 142 } 143 144 p->chunks[i].length_dw = user_chunk.length_dw; 145 p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data; ··· 158 if (DRM_COPY_FROM_USER(p->chunks[i].kdata, 159 p->chunks[i].user_ptr, size)) { 160 return -EFAULT; 161 } 162 } else { 163 p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL); ··· 181 p->chunks[p->chunk_ib_idx].length_dw); 182 return -EINVAL; 183 } 184 return 0; 185 } 186
··· 93 { 94 struct drm_radeon_cs *cs = data; 95 uint64_t *chunk_array_ptr; 96 + unsigned size, i, flags = 0; 97 98 if (!cs->num_chunks) { 99 return 0; ··· 140 if (p->chunks[i].length_dw == 0) 141 return -EINVAL; 142 } 143 + if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS && 144 + !p->chunks[i].length_dw) { 145 + return -EINVAL; 146 + } 147 148 p->chunks[i].length_dw = user_chunk.length_dw; 149 p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data; ··· 154 if (DRM_COPY_FROM_USER(p->chunks[i].kdata, 155 p->chunks[i].user_ptr, size)) { 156 return -EFAULT; 157 + } 158 + if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) { 159 + flags = p->chunks[i].kdata[0]; 160 } 161 } else { 162 p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL); ··· 174 p->chunks[p->chunk_ib_idx].length_dw); 175 return -EINVAL; 176 } 177 + 178 + p->keep_tiling_flags = (flags & RADEON_CS_KEEP_TILING_FLAGS) != 0; 179 return 0; 180 } 181
+2 -1
drivers/gpu/drm/radeon/radeon_drv.c
··· 53 * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query 54 * 2.10.0 - fusion 2D tiling 55 * 2.11.0 - backend map, initial compute support for the CS checker 56 */ 57 #define KMS_DRIVER_MAJOR 2 58 - #define KMS_DRIVER_MINOR 11 59 #define KMS_DRIVER_PATCHLEVEL 0 60 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 61 int radeon_driver_unload_kms(struct drm_device *dev);
··· 53 * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query 54 * 2.10.0 - fusion 2D tiling 55 * 2.11.0 - backend map, initial compute support for the CS checker 56 + * 2.12.0 - RADEON_CS_KEEP_TILING_FLAGS 57 */ 58 #define KMS_DRIVER_MAJOR 2 59 + #define KMS_DRIVER_MINOR 12 60 #define KMS_DRIVER_PATCHLEVEL 0 61 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 62 int radeon_driver_unload_kms(struct drm_device *dev);
+7 -1
drivers/gpu/drm/ttm/ttm_bo.c
··· 574 return ret; 575 576 spin_lock(&glob->lru_lock); 577 ret = ttm_bo_reserve_locked(bo, interruptible, 578 no_wait_reserve, false, 0); 579 580 - if (unlikely(ret != 0) || list_empty(&bo->ddestroy)) { 581 spin_unlock(&glob->lru_lock); 582 return ret; 583 }
··· 574 return ret; 575 576 spin_lock(&glob->lru_lock); 577 + 578 + if (unlikely(list_empty(&bo->ddestroy))) { 579 + spin_unlock(&glob->lru_lock); 580 + return 0; 581 + } 582 + 583 ret = ttm_bo_reserve_locked(bo, interruptible, 584 no_wait_reserve, false, 0); 585 586 + if (unlikely(ret != 0)) { 587 spin_unlock(&glob->lru_lock); 588 return ret; 589 }
+12 -6
drivers/gpu/vga/vgaarb.c
··· 991 uc = &priv->cards[i]; 992 } 993 994 - if (!uc) 995 - return -EINVAL; 996 997 - if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) 998 - return -EINVAL; 999 1000 - if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) 1001 - return -EINVAL; 1002 1003 vga_put(pdev, io_state); 1004
··· 991 uc = &priv->cards[i]; 992 } 993 994 + if (!uc) { 995 + ret_val = -EINVAL; 996 + goto done; 997 + } 998 999 + if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) { 1000 + ret_val = -EINVAL; 1001 + goto done; 1002 + } 1003 1004 + if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) { 1005 + ret_val = -EINVAL; 1006 + goto done; 1007 + } 1008 1009 vga_put(pdev, io_state); 1010
+2
include/drm/drm_mode.h
··· 235 #define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02 236 #define DRM_MODE_FB_DIRTY_FLAGS 0x03 237 238 /* 239 * Mark a region of a framebuffer as dirty. 240 *
··· 235 #define DRM_MODE_FB_DIRTY_ANNOTATE_FILL 0x02 236 #define DRM_MODE_FB_DIRTY_FLAGS 0x03 237 238 + #define DRM_MODE_FB_DIRTY_MAX_CLIPS 256 239 + 240 /* 241 * Mark a region of a framebuffer as dirty. 242 *
+4
include/drm/radeon_drm.h
··· 874 875 #define RADEON_CHUNK_ID_RELOCS 0x01 876 #define RADEON_CHUNK_ID_IB 0x02 877 878 struct drm_radeon_cs_chunk { 879 uint32_t chunk_id;
··· 874 875 #define RADEON_CHUNK_ID_RELOCS 0x01 876 #define RADEON_CHUNK_ID_IB 0x02 877 + #define RADEON_CHUNK_ID_FLAGS 0x03 878 + 879 + /* The first dword of RADEON_CHUNK_ID_FLAGS is a uint32 of these flags: */ 880 + #define RADEON_CS_KEEP_TILING_FLAGS 0x01 881 882 struct drm_radeon_cs_chunk { 883 uint32_t chunk_id;