Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-intel-next-2015-10-10' of git://anongit.freedesktop.org/drm-intel into drm-next

- dmc fixes from Animesh (not yet all) for deeper sleep states
- piles of prep patches from Ville to make mmio functions type-safe
- more fbc work from Paulo all over
- w/a shuffling from Arun Siluvery
- first part of atomic watermark updates from Matt and Ville (later parts had to
be dropped again unfortunately)
- lots of patches to prepare bxt dsi support ( Shashank Sharma)
- userptr fixes from Chris
- audio rate interface between i915/snd_hda plus kerneldoc (Libin Yang)
- shrinker improvements and fixes (Chris Wilson)
- lots and lots of small patches all over

* tag 'drm-intel-next-2015-10-10' of git://anongit.freedesktop.org/drm-intel: (134 commits)
drm/i915: Update DRIVER_DATE to 20151010
drm/i915: Partial revert of atomic watermark series
drm/i915: Early exit from semaphore_waits_for for execlist mode.
drm/i915: Remove wrong warning from i915_gem_context_clean
drm/i915: Determine the stolen memory base address on gen2
drm/i915: fix FBC buffer size checks
drm/i915: fix CFB size calculation
drm/i915: remove pre-atomic check from SKL update_primary_plane
drm/i915: don't allocate fbcon from stolen memory if it's too big
Revert "drm/i915: Call encoder hotplug for init and resume cases"
Revert "drm/i915: Add hot_plug hook for hdmi encoder"
drm/i915: use error path
drm/i915/irq: Fix misspelled word register in kernel-doc
drm/i915/irq: Fix kernel-doc warnings
drm/i915: Hook up ring workaround writes at context creation time on Gen6-7.
drm/i915: Don't warn if the workaround list is empty.
drm/i915: Resurrect golden context on gen6/7
drm/i915/chv: remove pre-production hardware workarounds
drm/i915/snb: remove pre-production hardware workaround
drm/i915/bxt: Set time interval unit to 0.833us
...

+2614 -1742
+1
Documentation/DocBook/drm.tmpl
··· 3989 3989 <title>High Definition Audio</title> 3990 3990 !Pdrivers/gpu/drm/i915/intel_audio.c High Definition Audio over HDMI and Display Port 3991 3991 !Idrivers/gpu/drm/i915/intel_audio.c 3992 + !Iinclude/drm/i915_component.h 3992 3993 </sect2> 3993 3994 <sect2> 3994 3995 <title>Panel Self Refresh PSR (PSR/SRD)</title>
+2 -2
drivers/gpu/drm/i915/dvo.h
··· 94 94 * after this function is called. 95 95 */ 96 96 void (*mode_set)(struct intel_dvo_device *dvo, 97 - struct drm_display_mode *mode, 98 - struct drm_display_mode *adjusted_mode); 97 + const struct drm_display_mode *mode, 98 + const struct drm_display_mode *adjusted_mode); 99 99 100 100 /* 101 101 * Probe for a connected output, and return detect_status.
+2 -2
drivers/gpu/drm/i915/dvo_ch7017.c
··· 255 255 } 256 256 257 257 static void ch7017_mode_set(struct intel_dvo_device *dvo, 258 - struct drm_display_mode *mode, 259 - struct drm_display_mode *adjusted_mode) 258 + const struct drm_display_mode *mode, 259 + const struct drm_display_mode *adjusted_mode) 260 260 { 261 261 uint8_t lvds_pll_feedback_div, lvds_pll_vco_control; 262 262 uint8_t outputs_enable, lvds_control_2, lvds_power_down;
+2 -2
drivers/gpu/drm/i915/dvo_ch7xxx.c
··· 275 275 } 276 276 277 277 static void ch7xxx_mode_set(struct intel_dvo_device *dvo, 278 - struct drm_display_mode *mode, 279 - struct drm_display_mode *adjusted_mode) 278 + const struct drm_display_mode *mode, 279 + const struct drm_display_mode *adjusted_mode) 280 280 { 281 281 uint8_t tvco, tpcp, tpd, tlpf, idf; 282 282
+6 -6
drivers/gpu/drm/i915/dvo_ivch.c
··· 394 394 } 395 395 396 396 static void ivch_mode_set(struct intel_dvo_device *dvo, 397 - struct drm_display_mode *mode, 398 - struct drm_display_mode *adjusted_mode) 397 + const struct drm_display_mode *mode, 398 + const struct drm_display_mode *adjusted_mode) 399 399 { 400 400 struct ivch_priv *priv = dvo->dev_priv; 401 401 uint16_t vr40 = 0; ··· 414 414 vr40 = (VR40_STALL_ENABLE | VR40_VERTICAL_INTERP_ENABLE | 415 415 VR40_HORIZONTAL_INTERP_ENABLE); 416 416 417 - if (mode->hdisplay != adjusted_mode->hdisplay || 418 - mode->vdisplay != adjusted_mode->vdisplay) { 417 + if (mode->hdisplay != adjusted_mode->crtc_hdisplay || 418 + mode->vdisplay != adjusted_mode->crtc_vdisplay) { 419 419 uint16_t x_ratio, y_ratio; 420 420 421 421 vr01 |= VR01_PANEL_FIT_ENABLE; 422 422 vr40 |= VR40_CLOCK_GATING_ENABLE; 423 423 x_ratio = (((mode->hdisplay - 1) << 16) / 424 - (adjusted_mode->hdisplay - 1)) >> 2; 424 + (adjusted_mode->crtc_hdisplay - 1)) >> 2; 425 425 y_ratio = (((mode->vdisplay - 1) << 16) / 426 - (adjusted_mode->vdisplay - 1)) >> 2; 426 + (adjusted_mode->crtc_vdisplay - 1)) >> 2; 427 427 ivch_write(dvo, VR42, x_ratio); 428 428 ivch_write(dvo, VR41, y_ratio); 429 429 } else {
+2 -2
drivers/gpu/drm/i915/dvo_ns2501.c
··· 546 546 } 547 547 548 548 static void ns2501_mode_set(struct intel_dvo_device *dvo, 549 - struct drm_display_mode *mode, 550 - struct drm_display_mode *adjusted_mode) 549 + const struct drm_display_mode *mode, 550 + const struct drm_display_mode *adjusted_mode) 551 551 { 552 552 const struct ns2501_configuration *conf; 553 553 struct ns2501_priv *ns = (struct ns2501_priv *)(dvo->dev_priv);
+2 -2
drivers/gpu/drm/i915/dvo_sil164.c
··· 190 190 } 191 191 192 192 static void sil164_mode_set(struct intel_dvo_device *dvo, 193 - struct drm_display_mode *mode, 194 - struct drm_display_mode *adjusted_mode) 193 + const struct drm_display_mode *mode, 194 + const struct drm_display_mode *adjusted_mode) 195 195 { 196 196 /* As long as the basics are set up, since we don't have clock 197 197 * dependencies in the mode setup, we can just leave the
+2 -2
drivers/gpu/drm/i915/dvo_tfp410.c
··· 222 222 } 223 223 224 224 static void tfp410_mode_set(struct intel_dvo_device *dvo, 225 - struct drm_display_mode *mode, 226 - struct drm_display_mode *adjusted_mode) 225 + const struct drm_display_mode *mode, 226 + const struct drm_display_mode *adjusted_mode) 227 227 { 228 228 /* As long as the basics are set up, since we don't have clock dependencies 229 229 * in the mode setup, we can just leave the registers alone and everything
+5 -1
drivers/gpu/drm/i915/i915_cmd_parser.c
··· 448 448 REG32(GEN7_3DPRIM_INSTANCE_COUNT), 449 449 REG32(GEN7_3DPRIM_START_INSTANCE), 450 450 REG32(GEN7_3DPRIM_BASE_VERTEX), 451 + REG32(GEN7_GPGPU_DISPATCHDIMX), 452 + REG32(GEN7_GPGPU_DISPATCHDIMY), 453 + REG32(GEN7_GPGPU_DISPATCHDIMZ), 451 454 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(0)), 452 455 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(1)), 453 456 REG64(GEN7_SO_NUM_PRIMS_WRITTEN(2)), ··· 1217 1214 * MI_PREDICATE_SRC1 registers. 1218 1215 * 3. Allow access to the GPGPU_THREADS_DISPATCHED register. 1219 1216 * 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3. 1217 + * 5. GPGPU dispatch compute indirect registers. 1220 1218 */ 1221 - return 4; 1219 + return 5; 1222 1220 }
+47 -10
drivers/gpu/drm/i915/i915_debugfs.c
··· 253 253 struct drm_i915_gem_object *b = 254 254 container_of(B, struct drm_i915_gem_object, obj_exec_link); 255 255 256 - return a->stolen->start - b->stolen->start; 256 + if (a->stolen->start < b->stolen->start) 257 + return -1; 258 + if (a->stolen->start > b->stolen->start) 259 + return 1; 260 + return 0; 257 261 } 258 262 259 263 static int i915_gem_stolen_list_info(struct seq_file *m, void *data) ··· 1312 1308 seq_puts(m, "no P-state info available\n"); 1313 1309 } 1314 1310 1311 + seq_printf(m, "Current CD clock frequency: %d kHz\n", dev_priv->cdclk_freq); 1312 + seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq); 1313 + seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq); 1314 + 1315 1315 out: 1316 1316 intel_runtime_pm_put(dev_priv); 1317 1317 return ret; ··· 2238 2230 for_each_ring(ring, dev_priv, unused) { 2239 2231 seq_printf(m, "%s\n", ring->name); 2240 2232 for (i = 0; i < 4; i++) { 2241 - u32 offset = 0x270 + i * 8; 2242 - u64 pdp = I915_READ(ring->mmio_base + offset + 4); 2233 + u64 pdp = I915_READ(GEN8_RING_PDP_UDW(ring, i)); 2243 2234 pdp <<= 32; 2244 - pdp |= I915_READ(ring->mmio_base + offset); 2235 + pdp |= I915_READ(GEN8_RING_PDP_LDW(ring, i)); 2245 2236 seq_printf(m, "\tPDP%d 0x%016llx\n", i, pdp); 2246 2237 } 2247 2238 } ··· 2297 2290 struct task_struct *task; 2298 2291 2299 2292 task = get_pid_task(file->pid, PIDTYPE_PID); 2300 - if (!task) 2301 - return -ESRCH; 2293 + if (!task) { 2294 + ret = -ESRCH; 2295 + goto out_put; 2296 + } 2302 2297 seq_printf(m, "\nproc: %s\n", task->comm); 2303 2298 put_task_struct(task); 2304 2299 idr_for_each(&file_priv->context_idr, per_file_ctx, 2305 2300 (void *)(unsigned long)m); 2306 2301 } 2307 2302 2303 + out_put: 2308 2304 intel_runtime_pm_put(dev_priv); 2309 2305 mutex_unlock(&dev->struct_mutex); 2310 2306 2311 - return 0; 2307 + return ret; 2312 2308 } 2313 2309 2314 2310 static int count_irq_waiters(struct drm_i915_private *i915) ··· 2919 2909 u32 state; 2920 2910 2921 2911 if (IS_845G(dev) || IS_I865G(dev)) 2922 - state = I915_READ(_CURACNTR) & CURSOR_ENABLE; 2912 + state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; 2923 2913 else 2924 2914 state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; 2925 2915 ··· 3157 3147 skl_ddb_entry_size(entry)); 3158 3148 } 3159 3149 3160 - entry = &ddb->cursor[pipe]; 3150 + entry = &ddb->plane[pipe][PLANE_CURSOR]; 3161 3151 seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start, 3162 3152 entry->end, skl_ddb_entry_size(entry)); 3163 3153 } ··· 5050 5040 } 5051 5041 } 5052 5042 5043 + static void broadwell_sseu_device_status(struct drm_device *dev, 5044 + struct sseu_dev_status *stat) 5045 + { 5046 + struct drm_i915_private *dev_priv = dev->dev_private; 5047 + int s; 5048 + u32 slice_info = I915_READ(GEN8_GT_SLICE_INFO); 5049 + 5050 + stat->slice_total = hweight32(slice_info & GEN8_LSLICESTAT_MASK); 5051 + 5052 + if (stat->slice_total) { 5053 + stat->subslice_per_slice = INTEL_INFO(dev)->subslice_per_slice; 5054 + stat->subslice_total = stat->slice_total * 5055 + stat->subslice_per_slice; 5056 + stat->eu_per_subslice = INTEL_INFO(dev)->eu_per_subslice; 5057 + stat->eu_total = stat->eu_per_subslice * stat->subslice_total; 5058 + 5059 + /* subtract fused off EU(s) from enabled slice(s) */ 5060 + for (s = 0; s < stat->slice_total; s++) { 5061 + u8 subslice_7eu = INTEL_INFO(dev)->subslice_7eu[s]; 5062 + 5063 + stat->eu_total -= hweight8(subslice_7eu); 5064 + } 5065 + } 5066 + } 5067 + 5053 5068 static int i915_sseu_status(struct seq_file *m, void *unused) 5054 5069 { 5055 5070 struct drm_info_node *node = (struct drm_info_node *) m->private; 5056 5071 struct drm_device *dev = node->minor->dev; 5057 5072 struct sseu_dev_status stat; 5058 5073 5059 - if ((INTEL_INFO(dev)->gen < 8) || IS_BROADWELL(dev)) 5074 + if (INTEL_INFO(dev)->gen < 8) 5060 5075 return -ENODEV; 5061 5076 5062 5077 seq_puts(m, "SSEU Device Info\n"); ··· 5106 5071 memset(&stat, 0, sizeof(stat)); 5107 5072 if (IS_CHERRYVIEW(dev)) { 5108 5073 cherryview_sseu_device_status(dev, &stat); 5074 + } else if (IS_BROADWELL(dev)) { 5075 + broadwell_sseu_device_status(dev, &stat); 5109 5076 } else if (INTEL_INFO(dev)->gen >= 9) { 5110 5077 gen9_sseu_device_status(dev, &stat); 5111 5078 }
+85 -13
drivers/gpu/drm/i915/i915_dma.c
··· 673 673 info->has_eu_pg = (info->eu_per_subslice > 2); 674 674 } 675 675 676 + static void broadwell_sseu_info_init(struct drm_device *dev) 677 + { 678 + struct drm_i915_private *dev_priv = dev->dev_private; 679 + struct intel_device_info *info; 680 + const int s_max = 3, ss_max = 3, eu_max = 8; 681 + int s, ss; 682 + u32 fuse2, eu_disable[s_max], s_enable, ss_disable; 683 + 684 + fuse2 = I915_READ(GEN8_FUSE2); 685 + s_enable = (fuse2 & GEN8_F2_S_ENA_MASK) >> GEN8_F2_S_ENA_SHIFT; 686 + ss_disable = (fuse2 & GEN8_F2_SS_DIS_MASK) >> GEN8_F2_SS_DIS_SHIFT; 687 + 688 + eu_disable[0] = I915_READ(GEN8_EU_DISABLE0) & GEN8_EU_DIS0_S0_MASK; 689 + eu_disable[1] = (I915_READ(GEN8_EU_DISABLE0) >> GEN8_EU_DIS0_S1_SHIFT) | 690 + ((I915_READ(GEN8_EU_DISABLE1) & GEN8_EU_DIS1_S1_MASK) << 691 + (32 - GEN8_EU_DIS0_S1_SHIFT)); 692 + eu_disable[2] = (I915_READ(GEN8_EU_DISABLE1) >> GEN8_EU_DIS1_S2_SHIFT) | 693 + ((I915_READ(GEN8_EU_DISABLE2) & GEN8_EU_DIS2_S2_MASK) << 694 + (32 - GEN8_EU_DIS1_S2_SHIFT)); 695 + 696 + 697 + info = (struct intel_device_info *)&dev_priv->info; 698 + info->slice_total = hweight32(s_enable); 699 + 700 + /* 701 + * The subslice disable field is global, i.e. it applies 702 + * to each of the enabled slices. 703 + */ 704 + info->subslice_per_slice = ss_max - hweight32(ss_disable); 705 + info->subslice_total = info->slice_total * info->subslice_per_slice; 706 + 707 + /* 708 + * Iterate through enabled slices and subslices to 709 + * count the total enabled EU. 710 + */ 711 + for (s = 0; s < s_max; s++) { 712 + if (!(s_enable & (0x1 << s))) 713 + /* skip disabled slice */ 714 + continue; 715 + 716 + for (ss = 0; ss < ss_max; ss++) { 717 + u32 n_disabled; 718 + 719 + if (ss_disable & (0x1 << ss)) 720 + /* skip disabled subslice */ 721 + continue; 722 + 723 + n_disabled = hweight8(eu_disable[s] >> (ss * eu_max)); 724 + 725 + /* 726 + * Record which subslices have 7 EUs. 727 + */ 728 + if (eu_max - n_disabled == 7) 729 + info->subslice_7eu[s] |= 1 << ss; 730 + 731 + info->eu_total += eu_max - n_disabled; 732 + } 733 + } 734 + 735 + /* 736 + * BDW is expected to always have a uniform distribution of EU across 737 + * subslices with the exception that any one EU in any one subslice may 738 + * be fused off for die recovery. 739 + */ 740 + info->eu_per_subslice = info->subslice_total ? 741 + DIV_ROUND_UP(info->eu_total, info->subslice_total) : 0; 742 + 743 + /* 744 + * BDW supports slice power gating on devices with more than 745 + * one slice. 746 + */ 747 + info->has_slice_pg = (info->slice_total > 1); 748 + info->has_subslice_pg = 0; 749 + info->has_eu_pg = 0; 750 + } 751 + 676 752 /* 677 753 * Determine various intel_device_info fields at runtime. 678 754 * ··· 819 743 /* Initialize slice/subslice/EU info */ 820 744 if (IS_CHERRYVIEW(dev)) 821 745 cherryview_sseu_info_init(dev); 746 + else if (IS_BROADWELL(dev)) 747 + broadwell_sseu_info_init(dev); 822 748 else if (INTEL_INFO(dev)->gen >= 9) 823 749 gen9_sseu_info_init(dev); 824 750 ··· 896 818 mutex_init(&dev_priv->sb_lock); 897 819 mutex_init(&dev_priv->modeset_restore_lock); 898 820 mutex_init(&dev_priv->csr_lock); 821 + mutex_init(&dev_priv->av_mutex); 899 822 900 823 intel_pm_setup(dev); 901 824 ··· 1124 1045 put_bridge: 1125 1046 pci_dev_put(dev_priv->bridge_dev); 1126 1047 free_priv: 1127 - if (dev_priv->requests) 1128 - kmem_cache_destroy(dev_priv->requests); 1129 - if (dev_priv->vmas) 1130 - kmem_cache_destroy(dev_priv->vmas); 1131 - if (dev_priv->objects) 1132 - kmem_cache_destroy(dev_priv->objects); 1048 + kmem_cache_destroy(dev_priv->requests); 1049 + kmem_cache_destroy(dev_priv->vmas); 1050 + kmem_cache_destroy(dev_priv->objects); 1133 1051 kfree(dev_priv); 1134 1052 return ret; 1135 1053 } ··· 1217 1141 if (dev_priv->regs != NULL) 1218 1142 pci_iounmap(dev->pdev, dev_priv->regs); 1219 1143 1220 - if (dev_priv->requests) 1221 - kmem_cache_destroy(dev_priv->requests); 1222 - if (dev_priv->vmas) 1223 - kmem_cache_destroy(dev_priv->vmas); 1224 - if (dev_priv->objects) 1225 - kmem_cache_destroy(dev_priv->objects); 1226 - 1144 + kmem_cache_destroy(dev_priv->requests); 1145 + kmem_cache_destroy(dev_priv->vmas); 1146 + kmem_cache_destroy(dev_priv->objects); 1227 1147 pci_dev_put(dev_priv->bridge_dev); 1228 1148 kfree(dev_priv); 1229 1149
+38 -6
drivers/gpu/drm/i915/i915_drv.c
··· 443 443 444 444 MODULE_DEVICE_TABLE(pci, pciidlist); 445 445 446 + static enum intel_pch intel_virt_detect_pch(struct drm_device *dev) 447 + { 448 + enum intel_pch ret = PCH_NOP; 449 + 450 + /* 451 + * In a virtualized passthrough environment we can be in a 452 + * setup where the ISA bridge is not able to be passed through. 453 + * In this case, a south bridge can be emulated and we have to 454 + * make an educated guess as to which PCH is really there. 455 + */ 456 + 457 + if (IS_GEN5(dev)) { 458 + ret = PCH_IBX; 459 + DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n"); 460 + } else if (IS_GEN6(dev) || IS_IVYBRIDGE(dev)) { 461 + ret = PCH_CPT; 462 + DRM_DEBUG_KMS("Assuming CouarPoint PCH\n"); 463 + } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { 464 + ret = PCH_LPT; 465 + DRM_DEBUG_KMS("Assuming LynxPoint PCH\n"); 466 + } else if (IS_SKYLAKE(dev)) { 467 + ret = PCH_SPT; 468 + DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n"); 469 + } 470 + 471 + return ret; 472 + } 473 + 446 474 void intel_detect_pch(struct drm_device *dev) 447 475 { 448 476 struct drm_i915_private *dev_priv = dev->dev_private; ··· 531 503 dev_priv->pch_type = PCH_SPT; 532 504 DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); 533 505 WARN_ON(!IS_SKYLAKE(dev)); 506 + } else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE) { 507 + dev_priv->pch_type = intel_virt_detect_pch(dev); 534 508 } else 535 509 continue; 536 510 ··· 637 607 "GEM idle failed, resume might fail\n"); 638 608 return error; 639 609 } 610 + 611 + intel_guc_suspend(dev); 640 612 641 613 intel_suspend_gt_powersave(dev); 642 614 ··· 768 736 atomic_or(I915_WEDGED, &dev_priv->gpu_error.reset_counter); 769 737 } 770 738 mutex_unlock(&dev->struct_mutex); 739 + 740 + intel_guc_resume(dev); 771 741 772 742 intel_modeset_init_hw(dev); 773 743 ··· 1054 1020 static int skl_suspend_complete(struct drm_i915_private *dev_priv) 1055 1021 { 1056 1022 /* Enabling DC6 is not a hard requirement to enter runtime D3 */ 1057 - 1058 - /* 1059 - * This is to ensure that CSR isn't identified as loaded before 1060 - * CSR-loading program is called during runtime-resume. 1061 - */ 1062 - intel_csr_load_status_set(dev_priv, FW_UNINITIALIZED); 1063 1023 1064 1024 skl_uninit_cdclk(dev_priv); 1065 1025 ··· 1504 1476 i915_gem_release_all_mmaps(dev_priv); 1505 1477 mutex_unlock(&dev->struct_mutex); 1506 1478 1479 + intel_guc_suspend(dev); 1480 + 1507 1481 intel_suspend_gt_powersave(dev); 1508 1482 intel_runtime_pm_disable_interrupts(dev_priv); 1509 1483 ··· 1564 1534 1565 1535 intel_opregion_notify_adapter(dev, PCI_D0); 1566 1536 dev_priv->pm.suspended = false; 1537 + 1538 + intel_guc_resume(dev); 1567 1539 1568 1540 if (IS_GEN6(dev_priv)) 1569 1541 intel_init_pch_refclk(dev);
+28 -31
drivers/gpu/drm/i915/i915_drv.h
··· 57 57 58 58 #define DRIVER_NAME "i915" 59 59 #define DRIVER_DESC "Intel Graphics" 60 - #define DRIVER_DATE "20150928" 60 + #define DRIVER_DATE "20151010" 61 61 62 62 #undef WARN_ON 63 63 /* Many gcc seem to no see through this and fall over :( */ ··· 131 131 #define transcoder_name(t) ((t) + 'A') 132 132 133 133 /* 134 - * This is the maximum (across all platforms) number of planes (primary + 135 - * sprites) that can be active at the same time on one pipe. 136 - * 137 - * This value doesn't count the cursor plane. 134 + * I915_MAX_PLANES in the enum below is the maximum (across all platforms) 135 + * number of planes per CRTC. Not all platforms really have this many planes, 136 + * which means some arrays of size I915_MAX_PLANES may have unused entries 137 + * between the topmost sprite plane and the cursor plane. 138 138 */ 139 - #define I915_MAX_PLANES 4 140 - 141 139 enum plane { 142 140 PLANE_A = 0, 143 141 PLANE_B, 144 142 PLANE_C, 143 + PLANE_CURSOR, 144 + I915_MAX_PLANES, 145 145 }; 146 146 #define plane_name(p) ((p) + 'A') 147 147 ··· 628 628 struct dpll *match_clock, 629 629 struct dpll *best_clock); 630 630 void (*update_wm)(struct drm_crtc *crtc); 631 - void (*update_sprite_wm)(struct drm_plane *plane, 632 - struct drm_crtc *crtc, 633 - uint32_t sprite_width, uint32_t sprite_height, 634 - int pixel_size, bool enable, bool scaled); 635 631 int (*modeset_calc_cdclk)(struct drm_atomic_state *state); 636 632 void (*modeset_commit_cdclk)(struct drm_atomic_state *state); 637 633 /* Returns the active state of the crtc, and if the crtc is active, ··· 642 646 void (*crtc_disable)(struct drm_crtc *crtc); 643 647 void (*audio_codec_enable)(struct drm_connector *connector, 644 648 struct intel_encoder *encoder, 645 - struct drm_display_mode *mode); 649 + const struct drm_display_mode *adjusted_mode); 646 650 void (*audio_codec_disable)(struct intel_encoder *encoder); 647 651 void (*fdi_link_train)(struct drm_crtc *crtc); 648 652 void (*init_clock_gating)(struct drm_device *dev); ··· 660 664 /* render clock increase/decrease */ 661 665 /* display clock increase/decrease */ 662 666 /* pll clock increase/decrease */ 663 - 664 - int (*setup_backlight)(struct intel_connector *connector, enum pipe pipe); 665 - uint32_t (*get_backlight)(struct intel_connector *connector); 666 - void (*set_backlight)(struct intel_connector *connector, 667 - uint32_t level); 668 - void (*disable_backlight)(struct intel_connector *connector); 669 - void (*enable_backlight)(struct intel_connector *connector); 670 - uint32_t (*backlight_hz_to_pwm)(struct intel_connector *connector, 671 - uint32_t hz); 672 667 }; 673 668 674 669 enum forcewake_domain_id { ··· 1133 1146 u8 efficient_freq; /* AKA RPe. Pre-determined balanced frequency */ 1134 1147 u8 rp1_freq; /* "less than" RP0 power/freqency */ 1135 1148 u8 rp0_freq; /* Non-overclocked max frequency. */ 1136 - u32 cz_freq; 1137 1149 1138 1150 u8 up_threshold; /* Current %busy required to uplock */ 1139 1151 u8 down_threshold; /* Current %busy required to downclock */ ··· 1574 1588 struct skl_ddb_allocation { 1575 1589 struct skl_ddb_entry pipe[I915_MAX_PIPES]; 1576 1590 struct skl_ddb_entry plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* packed/uv */ 1577 - struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES]; /* y-plane */ 1578 - struct skl_ddb_entry cursor[I915_MAX_PIPES]; 1591 + struct skl_ddb_entry y_plane[I915_MAX_PIPES][I915_MAX_PLANES]; 1579 1592 }; 1580 1593 1581 1594 struct skl_wm_values { ··· 1582 1597 struct skl_ddb_allocation ddb; 1583 1598 uint32_t wm_linetime[I915_MAX_PIPES]; 1584 1599 uint32_t plane[I915_MAX_PIPES][I915_MAX_PLANES][8]; 1585 - uint32_t cursor[I915_MAX_PIPES][8]; 1586 1600 uint32_t plane_trans[I915_MAX_PIPES][I915_MAX_PLANES]; 1587 - uint32_t cursor_trans[I915_MAX_PIPES]; 1588 1601 }; 1589 1602 1590 1603 struct skl_wm_level { 1591 1604 bool plane_en[I915_MAX_PLANES]; 1592 - bool cursor_en; 1593 1605 uint16_t plane_res_b[I915_MAX_PLANES]; 1594 1606 uint8_t plane_res_l[I915_MAX_PLANES]; 1595 - uint16_t cursor_res_b; 1596 - uint8_t cursor_res_l; 1597 1607 }; 1598 1608 1599 1609 /* ··· 1789 1809 unsigned int cdclk_freq, max_cdclk_freq; 1790 1810 unsigned int max_dotclk_freq; 1791 1811 unsigned int hpll_freq; 1812 + unsigned int czclk_freq; 1792 1813 1793 1814 /** 1794 1815 * wq - Driver workqueue for GEM. ··· 1878 1897 /* hda/i915 audio component */ 1879 1898 struct i915_audio_component *audio_component; 1880 1899 bool audio_component_registered; 1900 + /** 1901 + * av_mutex - mutex for audio/video sync 1902 + * 1903 + */ 1904 + struct mutex av_mutex; 1881 1905 1882 1906 uint32_t hw_context_size; 1883 1907 struct list_head context_list; ··· 1944 1958 } gt; 1945 1959 1946 1960 bool edp_low_vswing; 1961 + 1962 + /* perform PHY state sanity checks? */ 1963 + bool chv_phy_assert[2]; 1947 1964 1948 1965 /* 1949 1966 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch ··· 2596 2607 #define INTEL_PCH_LPT_LP_DEVICE_ID_TYPE 0x9c00 2597 2608 #define INTEL_PCH_SPT_DEVICE_ID_TYPE 0xA100 2598 2609 #define INTEL_PCH_SPT_LP_DEVICE_ID_TYPE 0x9D00 2610 + #define INTEL_PCH_P2X_DEVICE_ID_TYPE 0x7100 2599 2611 2600 2612 #define INTEL_PCH_TYPE(dev) (__I915__(dev)->pch_type) 2601 2613 #define HAS_PCH_SPT(dev) (INTEL_PCH_TYPE(dev) == PCH_SPT) ··· 2814 2824 #define PIN_OFFSET_BIAS (1<<3) 2815 2825 #define PIN_USER (1<<4) 2816 2826 #define PIN_UPDATE (1<<5) 2827 + #define PIN_ZONE_4G (1<<6) 2828 + #define PIN_HIGH (1<<7) 2817 2829 #define PIN_OFFSET_MASK (~4095) 2818 2830 int __must_check 2819 2831 i915_gem_object_pin(struct drm_i915_gem_object *obj, ··· 2831 2839 int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, 2832 2840 u32 flags); 2833 2841 int __must_check i915_vma_unbind(struct i915_vma *vma); 2842 + /* 2843 + * BEWARE: Do not use the function below unless you can _absolutely_ 2844 + * _guarantee_ VMA in question is _not in use_ anywhere. 2845 + */ 2846 + int __must_check __i915_vma_unbind_no_wait(struct i915_vma *vma); 2834 2847 int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); 2835 2848 void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv); 2836 2849 void i915_gem_release_mmap(struct drm_i915_gem_object *obj); ··· 3164 3167 unsigned long end, 3165 3168 unsigned flags); 3166 3169 int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle); 3167 - int i915_gem_evict_everything(struct drm_device *dev); 3168 3170 3169 3171 /* belongs in i915_gem_gtt.h */ 3170 3172 static inline void i915_gem_chipset_flush(struct drm_device *dev) ··· 3194 3198 3195 3199 /* i915_gem_shrinker.c */ 3196 3200 unsigned long i915_gem_shrink(struct drm_i915_private *dev_priv, 3197 - long target, 3201 + unsigned long target, 3198 3202 unsigned flags); 3199 3203 #define I915_SHRINK_PURGEABLE 0x1 3200 3204 #define I915_SHRINK_UNBOUND 0x2 3201 3205 #define I915_SHRINK_BOUND 0x4 3206 + #define I915_SHRINK_ACTIVE 0x8 3202 3207 unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv); 3203 3208 void i915_gem_shrinker_init(struct drm_i915_private *dev_priv); 3204 3209
+36 -27
drivers/gpu/drm/i915/i915_gem.c
··· 3208 3208 old_write_domain); 3209 3209 } 3210 3210 3211 - int i915_vma_unbind(struct i915_vma *vma) 3211 + static int __i915_vma_unbind(struct i915_vma *vma, bool wait) 3212 3212 { 3213 3213 struct drm_i915_gem_object *obj = vma->obj; 3214 3214 struct drm_i915_private *dev_priv = obj->base.dev->dev_private; ··· 3227 3227 3228 3228 BUG_ON(obj->pages == NULL); 3229 3229 3230 - ret = i915_gem_object_wait_rendering(obj, false); 3231 - if (ret) 3232 - return ret; 3230 + if (wait) { 3231 + ret = i915_gem_object_wait_rendering(obj, false); 3232 + if (ret) 3233 + return ret; 3234 + } 3233 3235 3234 3236 if (i915_is_ggtt(vma->vm) && 3235 3237 vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL) { ··· 3274 3272 i915_gem_object_unpin_pages(obj); 3275 3273 3276 3274 return 0; 3275 + } 3276 + 3277 + int i915_vma_unbind(struct i915_vma *vma) 3278 + { 3279 + return __i915_vma_unbind(vma, true); 3280 + } 3281 + 3282 + int __i915_vma_unbind_no_wait(struct i915_vma *vma) 3283 + { 3284 + return __i915_vma_unbind(vma, false); 3277 3285 } 3278 3286 3279 3287 int i915_gpu_idle(struct drm_device *dev) ··· 3366 3354 struct drm_device *dev = obj->base.dev; 3367 3355 struct drm_i915_private *dev_priv = dev->dev_private; 3368 3356 u32 fence_alignment, unfenced_alignment; 3357 + u32 search_flag, alloc_flag; 3358 + u64 start, end; 3369 3359 u64 size, fence_size; 3370 - u64 start = 3371 - flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; 3372 - u64 end = 3373 - flags & PIN_MAPPABLE ? dev_priv->gtt.mappable_end : vm->total; 3374 3360 struct i915_vma *vma; 3375 3361 int ret; 3376 3362 ··· 3408 3398 size = flags & PIN_MAPPABLE ? fence_size : obj->base.size; 3409 3399 } 3410 3400 3401 + start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0; 3402 + end = vm->total; 3403 + if (flags & PIN_MAPPABLE) 3404 + end = min_t(u64, end, dev_priv->gtt.mappable_end); 3405 + if (flags & PIN_ZONE_4G) 3406 + end = min_t(u64, end, (1ULL << 32)); 3407 + 3411 3408 if (alignment == 0) 3412 3409 alignment = flags & PIN_MAPPABLE ? fence_alignment : 3413 3410 unfenced_alignment; ··· 3450 3433 if (IS_ERR(vma)) 3451 3434 goto err_unpin; 3452 3435 3436 + if (flags & PIN_HIGH) { 3437 + search_flag = DRM_MM_SEARCH_BELOW; 3438 + alloc_flag = DRM_MM_CREATE_TOP; 3439 + } else { 3440 + search_flag = DRM_MM_SEARCH_DEFAULT; 3441 + alloc_flag = DRM_MM_CREATE_DEFAULT; 3442 + } 3443 + 3453 3444 search_free: 3454 3445 ret = drm_mm_insert_node_in_range_generic(&vm->mm, &vma->node, 3455 3446 size, alignment, 3456 3447 obj->cache_level, 3457 3448 start, end, 3458 - DRM_MM_SEARCH_DEFAULT, 3459 - DRM_MM_CREATE_DEFAULT); 3449 + search_flag, 3450 + alloc_flag); 3460 3451 if (ret) { 3461 3452 ret = i915_gem_evict_something(dev, vm, size, alignment, 3462 3453 obj->cache_level, ··· 4558 4533 BUG(); 4559 4534 } 4560 4535 4561 - static bool 4562 - intel_enable_blt(struct drm_device *dev) 4563 - { 4564 - if (!HAS_BLT(dev)) 4565 - return false; 4566 - 4567 - /* The blitter was dysfunctional on early prototypes */ 4568 - if (IS_GEN6(dev) && dev->pdev->revision < 8) { 4569 - DRM_INFO("BLT not supported on this pre-production hardware;" 4570 - " graphics performance will be degraded.\n"); 4571 - return false; 4572 - } 4573 - 4574 - return true; 4575 - } 4576 - 4577 4536 static void init_unused_ring(struct drm_device *dev, u32 base) 4578 4537 { 4579 4538 struct drm_i915_private *dev_priv = dev->dev_private; ··· 4600 4591 goto cleanup_render_ring; 4601 4592 } 4602 4593 4603 - if (intel_enable_blt(dev)) { 4594 + if (HAS_BLT(dev)) { 4604 4595 ret = intel_init_blt_ring_buffer(dev); 4605 4596 if (ret) 4606 4597 goto cleanup_bsd_ring;
+24
drivers/gpu/drm/i915/i915_gem_context.c
··· 133 133 return ret; 134 134 } 135 135 136 + static void i915_gem_context_clean(struct intel_context *ctx) 137 + { 138 + struct i915_hw_ppgtt *ppgtt = ctx->ppgtt; 139 + struct i915_vma *vma, *next; 140 + 141 + if (!ppgtt) 142 + return; 143 + 144 + WARN_ON(!list_empty(&ppgtt->base.active_list)); 145 + 146 + list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list, 147 + mm_list) { 148 + if (WARN_ON(__i915_vma_unbind_no_wait(vma))) 149 + break; 150 + } 151 + } 152 + 136 153 void i915_gem_context_free(struct kref *ctx_ref) 137 154 { 138 155 struct intel_context *ctx = container_of(ctx_ref, typeof(*ctx), ref); ··· 158 141 159 142 if (i915.enable_execlists) 160 143 intel_lr_context_free(ctx); 144 + 145 + /* 146 + * This context is going away and we need to remove all VMAs still 147 + * around. This is to handle imported shared objects for which 148 + * destructor did not run when their handles were closed. 149 + */ 150 + i915_gem_context_clean(ctx); 161 151 162 152 i915_ppgtt_put(ctx->ppgtt); 163 153
-45
drivers/gpu/drm/i915/i915_gem_evict.c
··· 237 237 238 238 return 0; 239 239 } 240 - 241 - /** 242 - * i915_gem_evict_everything - Try to evict all objects 243 - * @dev: Device to evict objects for 244 - * 245 - * This functions tries to evict all gem objects from all address spaces. Used 246 - * by the shrinker as a last-ditch effort and for suspend, before releasing the 247 - * backing storage of all unbound objects. 248 - */ 249 - int 250 - i915_gem_evict_everything(struct drm_device *dev) 251 - { 252 - struct drm_i915_private *dev_priv = dev->dev_private; 253 - struct i915_address_space *vm, *v; 254 - bool lists_empty = true; 255 - int ret; 256 - 257 - list_for_each_entry(vm, &dev_priv->vm_list, global_link) { 258 - lists_empty = (list_empty(&vm->inactive_list) && 259 - list_empty(&vm->active_list)); 260 - if (!lists_empty) 261 - lists_empty = false; 262 - } 263 - 264 - if (lists_empty) 265 - return -ENOSPC; 266 - 267 - trace_i915_gem_evict_everything(dev); 268 - 269 - /* The gpu_idle will flush everything in the write domain to the 270 - * active list. Then we must move everything off the active list 271 - * with retire requests. 272 - */ 273 - ret = i915_gpu_idle(dev); 274 - if (ret) 275 - return ret; 276 - 277 - i915_gem_retire_requests(dev); 278 - 279 - /* Having flushed everything, unbind() should never raise an error */ 280 - list_for_each_entry_safe(vm, v, &dev_priv->vm_list, global_link) 281 - WARN_ON(i915_gem_evict_vm(vm, false)); 282 - 283 - return 0; 284 - }
+42 -123
drivers/gpu/drm/i915/i915_gem_execbuffer.c
··· 590 590 flags |= PIN_GLOBAL; 591 591 592 592 if (!drm_mm_node_allocated(&vma->node)) { 593 + /* Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset, 594 + * limit address to the first 4GBs for unflagged objects. 595 + */ 596 + if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0) 597 + flags |= PIN_ZONE_4G; 593 598 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP) 594 599 flags |= PIN_GLOBAL | PIN_MAPPABLE; 595 600 if (entry->flags & __EXEC_OBJECT_NEEDS_BIAS) 596 601 flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS; 602 + if ((flags & PIN_MAPPABLE) == 0) 603 + flags |= PIN_HIGH; 597 604 } 598 605 599 606 ret = i915_gem_object_pin(obj, vma->vm, entry->alignment, flags); ··· 677 670 /* avoid costly ping-pong once a batch bo ended up non-mappable */ 678 671 if (entry->flags & __EXEC_OBJECT_NEEDS_MAP && !obj->map_and_fenceable) 679 672 return !only_mappable_for_reloc(entry->flags); 673 + 674 + if ((entry->flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) == 0 && 675 + (vma->node.start + vma->node.size - 1) >> 32) 676 + return true; 680 677 681 678 return false; 682 679 } ··· 945 934 if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS) 946 935 return false; 947 936 948 - return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0; 937 + /* Kernel clipping was a DRI1 misfeature */ 938 + if (exec->num_cliprects || exec->cliprects_ptr) 939 + return false; 940 + 941 + if (exec->DR4 == 0xffffffff) { 942 + DRM_DEBUG("UXA submitting garbage DR4, fixing up\n"); 943 + exec->DR4 = 0; 944 + } 945 + if (exec->DR1 || exec->DR4) 946 + return false; 947 + 948 + if ((exec->batch_start_offset | exec->batch_len) & 0x7) 949 + return false; 950 + 951 + return true; 949 952 } 950 953 951 954 static int ··· 1123 1098 return 0; 1124 1099 } 1125 1100 1126 - static int 1127 - i915_emit_box(struct drm_i915_gem_request *req, 1128 - struct drm_clip_rect *box, 1129 - int DR1, int DR4) 1130 - { 1131 - struct intel_engine_cs *ring = req->ring; 1132 - int ret; 1133 - 1134 - if (box->y2 <= box->y1 || box->x2 <= box->x1 || 1135 - box->y2 <= 0 || box->x2 <= 0) { 1136 - DRM_ERROR("Bad box %d,%d..%d,%d\n", 1137 - box->x1, box->y1, box->x2, box->y2); 1138 - return -EINVAL; 1139 - } 1140 - 1141 - if (INTEL_INFO(ring->dev)->gen >= 4) { 1142 - ret = intel_ring_begin(req, 4); 1143 - if (ret) 1144 - return ret; 1145 - 1146 - intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO_I965); 1147 - intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16); 1148 - intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16); 1149 - intel_ring_emit(ring, DR4); 1150 - } else { 1151 - ret = intel_ring_begin(req, 6); 1152 - if (ret) 1153 - return ret; 1154 - 1155 - intel_ring_emit(ring, GFX_OP_DRAWRECT_INFO); 1156 - intel_ring_emit(ring, DR1); 1157 - intel_ring_emit(ring, (box->x1 & 0xffff) | box->y1 << 16); 1158 - intel_ring_emit(ring, ((box->x2 - 1) & 0xffff) | (box->y2 - 1) << 16); 1159 - intel_ring_emit(ring, DR4); 1160 - intel_ring_emit(ring, 0); 1161 - } 1162 - intel_ring_advance(ring); 1163 - 1164 - return 0; 1165 - } 1166 - 1167 1101 static struct drm_i915_gem_object* 1168 1102 i915_gem_execbuffer_parse(struct intel_engine_cs *ring, 1169 1103 struct drm_i915_gem_exec_object2 *shadow_exec_entry, ··· 1181 1197 struct drm_i915_gem_execbuffer2 *args, 1182 1198 struct list_head *vmas) 1183 1199 { 1184 - struct drm_clip_rect *cliprects = NULL; 1185 1200 struct drm_device *dev = params->dev; 1186 1201 struct intel_engine_cs *ring = params->ring; 1187 1202 struct drm_i915_private *dev_priv = dev->dev_private; 1188 1203 u64 exec_start, exec_len; 1189 1204 int instp_mode; 1190 1205 u32 instp_mask; 1191 - int i, ret = 0; 1192 - 1193 - if (args->num_cliprects != 0) { 1194 - if (ring != &dev_priv->ring[RCS]) { 1195 - DRM_DEBUG("clip rectangles are only valid with the render ring\n"); 1196 - return -EINVAL; 1197 - } 1198 - 1199 - if (INTEL_INFO(dev)->gen >= 5) { 1200 - DRM_DEBUG("clip rectangles are only valid on pre-gen5\n"); 1201 - return -EINVAL; 1202 - } 1203 - 1204 - if (args->num_cliprects > UINT_MAX / sizeof(*cliprects)) { 1205 - DRM_DEBUG("execbuf with %u cliprects\n", 1206 - args->num_cliprects); 1207 - return -EINVAL; 1208 - } 1209 - 1210 - cliprects = kcalloc(args->num_cliprects, 1211 - sizeof(*cliprects), 1212 - GFP_KERNEL); 1213 - if (cliprects == NULL) { 1214 - ret = -ENOMEM; 1215 - goto error; 1216 - } 1217 - 1218 - if (copy_from_user(cliprects, 1219 - to_user_ptr(args->cliprects_ptr), 1220 - sizeof(*cliprects)*args->num_cliprects)) { 1221 - ret = -EFAULT; 1222 - goto error; 1223 - } 1224 - } else { 1225 - if (args->DR4 == 0xffffffff) { 1226 - DRM_DEBUG("UXA submitting garbage DR4, fixing up\n"); 1227 - args->DR4 = 0; 1228 - } 1229 - 1230 - if (args->DR1 || args->DR4 || args->cliprects_ptr) { 1231 - DRM_DEBUG("0 cliprects but dirt in cliprects fields\n"); 1232 - return -EINVAL; 1233 - } 1234 - } 1206 + int ret; 1235 1207 1236 1208 ret = i915_gem_execbuffer_move_to_gpu(params->request, vmas); 1237 1209 if (ret) 1238 - goto error; 1210 + return ret; 1239 1211 1240 1212 ret = i915_switch_context(params->request); 1241 1213 if (ret) 1242 - goto error; 1214 + return ret; 1243 1215 1244 1216 WARN(params->ctx->ppgtt && params->ctx->ppgtt->pd_dirty_rings & (1<<ring->id), 1245 1217 "%s didn't clear reload\n", ring->name); ··· 1208 1268 case I915_EXEC_CONSTANTS_REL_SURFACE: 1209 1269 if (instp_mode != 0 && ring != &dev_priv->ring[RCS]) { 1210 1270 DRM_DEBUG("non-0 rel constants mode on non-RCS\n"); 1211 - ret = -EINVAL; 1212 - goto error; 1271 + return -EINVAL; 1213 1272 } 1214 1273 1215 1274 if (instp_mode != dev_priv->relative_constants_mode) { 1216 1275 if (INTEL_INFO(dev)->gen < 4) { 1217 1276 DRM_DEBUG("no rel constants on pre-gen4\n"); 1218 - ret = -EINVAL; 1219 - goto error; 1277 + return -EINVAL; 1220 1278 } 1221 1279 1222 1280 if (INTEL_INFO(dev)->gen > 5 && 1223 1281 instp_mode == I915_EXEC_CONSTANTS_REL_SURFACE) { 1224 1282 DRM_DEBUG("rel surface constants mode invalid on gen5+\n"); 1225 - ret = -EINVAL; 1226 - goto error; 1283 + return -EINVAL; 1227 1284 } 1228 1285 1229 1286 /* The HW changed the meaning on this bit on gen6 */ ··· 1230 1293 break; 1231 1294 default: 1232 1295 DRM_DEBUG("execbuf with unknown constants: %d\n", instp_mode); 1233 - ret = -EINVAL; 1234 - goto error; 1296 + return -EINVAL; 1235 1297 } 1236 1298 1237 1299 if (ring == &dev_priv->ring[RCS] && 1238 - instp_mode != dev_priv->relative_constants_mode) { 1300 + instp_mode != dev_priv->relative_constants_mode) { 1239 1301 ret = intel_ring_begin(params->request, 4); 1240 1302 if (ret) 1241 - goto error; 1303 + return ret; 1242 1304 1243 1305 intel_ring_emit(ring, MI_NOOP); 1244 1306 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); ··· 1251 1315 if (args->flags & I915_EXEC_GEN7_SOL_RESET) { 1252 1316 ret = i915_reset_gen7_sol_offsets(dev, params->request); 1253 1317 if (ret) 1254 - goto error; 1318 + return ret; 1255 1319 } 1256 1320 1257 1321 exec_len = args->batch_len; 1258 1322 exec_start = params->batch_obj_vm_offset + 1259 1323 params->args_batch_start_offset; 1260 1324 1261 - if (cliprects) { 1262 - for (i = 0; i < args->num_cliprects; i++) { 1263 - ret = i915_emit_box(params->request, &cliprects[i], 1264 - args->DR1, args->DR4); 1265 - if (ret) 1266 - goto error; 1267 - 1268 - ret = ring->dispatch_execbuffer(params->request, 1269 - exec_start, exec_len, 1270 - params->dispatch_flags); 1271 - if (ret) 1272 - goto error; 1273 - } 1274 - } else { 1275 - ret = ring->dispatch_execbuffer(params->request, 1276 - exec_start, exec_len, 1277 - params->dispatch_flags); 1278 - if (ret) 1279 - return ret; 1280 - } 1325 + ret = ring->dispatch_execbuffer(params->request, 1326 + exec_start, exec_len, 1327 + params->dispatch_flags); 1328 + if (ret) 1329 + return ret; 1281 1330 1282 1331 trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags); 1283 1332 1284 1333 i915_gem_execbuffer_move_to_active(vmas, params->request); 1285 1334 i915_gem_execbuffer_retire_commands(params); 1286 1335 1287 - error: 1288 - kfree(cliprects); 1289 - return ret; 1336 + return 0; 1290 1337 } 1291 1338 1292 1339 /**
+18 -23
drivers/gpu/drm/i915/i915_gem_fence.c
··· 59 59 struct drm_i915_gem_object *obj) 60 60 { 61 61 struct drm_i915_private *dev_priv = dev->dev_private; 62 - int fence_reg; 62 + int fence_reg_lo, fence_reg_hi; 63 63 int fence_pitch_shift; 64 64 65 65 if (INTEL_INFO(dev)->gen >= 6) { 66 - fence_reg = FENCE_REG_SANDYBRIDGE_0; 67 - fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT; 66 + fence_reg_lo = FENCE_REG_GEN6_LO(reg); 67 + fence_reg_hi = FENCE_REG_GEN6_HI(reg); 68 + fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT; 68 69 } else { 69 - fence_reg = FENCE_REG_965_0; 70 + fence_reg_lo = FENCE_REG_965_LO(reg); 71 + fence_reg_hi = FENCE_REG_965_HI(reg); 70 72 fence_pitch_shift = I965_FENCE_PITCH_SHIFT; 71 73 } 72 - 73 - fence_reg += reg * 8; 74 74 75 75 /* To w/a incoherency with non-atomic 64-bit register updates, 76 76 * we split the 64-bit update into two 32-bit writes. In order ··· 81 81 * For extra levels of paranoia, we make sure each step lands 82 82 * before applying the next step. 83 83 */ 84 - I915_WRITE(fence_reg, 0); 85 - POSTING_READ(fence_reg); 84 + I915_WRITE(fence_reg_lo, 0); 85 + POSTING_READ(fence_reg_lo); 86 86 87 87 if (obj) { 88 88 u32 size = i915_gem_obj_ggtt_size(obj); ··· 103 103 val |= 1 << I965_FENCE_TILING_Y_SHIFT; 104 104 val |= I965_FENCE_REG_VALID; 105 105 106 - I915_WRITE(fence_reg + 4, val >> 32); 107 - POSTING_READ(fence_reg + 4); 106 + I915_WRITE(fence_reg_hi, val >> 32); 107 + POSTING_READ(fence_reg_hi); 108 108 109 - I915_WRITE(fence_reg + 0, val); 110 - POSTING_READ(fence_reg); 109 + I915_WRITE(fence_reg_lo, val); 110 + POSTING_READ(fence_reg_lo); 111 111 } else { 112 - I915_WRITE(fence_reg + 4, 0); 113 - POSTING_READ(fence_reg + 4); 112 + I915_WRITE(fence_reg_hi, 0); 113 + POSTING_READ(fence_reg_hi); 114 114 } 115 115 } 116 116 ··· 149 149 } else 150 150 val = 0; 151 151 152 - if (reg < 8) 153 - reg = FENCE_REG_830_0 + reg * 4; 154 - else 155 - reg = FENCE_REG_945_8 + (reg - 8) * 4; 156 - 157 - I915_WRITE(reg, val); 158 - POSTING_READ(reg); 152 + I915_WRITE(FENCE_REG(reg), val); 153 + POSTING_READ(FENCE_REG(reg)); 159 154 } 160 155 161 156 static void i830_write_fence_reg(struct drm_device *dev, int reg, ··· 181 186 } else 182 187 val = 0; 183 188 184 - I915_WRITE(FENCE_REG_830_0 + reg * 4, val); 185 - POSTING_READ(FENCE_REG_830_0 + reg * 4); 189 + I915_WRITE(FENCE_REG(reg), val); 190 + POSTING_READ(FENCE_REG(reg)); 186 191 } 187 192 188 193 inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
+4 -4
drivers/gpu/drm/i915/i915_gem_gtt.c
··· 2889 2889 2890 2890 /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b 2891 2891 * write would work. */ 2892 - I915_WRITE(GEN8_PRIVATE_PAT, pat); 2893 - I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32); 2892 + I915_WRITE(GEN8_PRIVATE_PAT_LO, pat); 2893 + I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32); 2894 2894 } 2895 2895 2896 2896 static void chv_setup_private_ppat(struct drm_i915_private *dev_priv) ··· 2924 2924 GEN8_PPAT(6, CHV_PPAT_SNOOP) | 2925 2925 GEN8_PPAT(7, CHV_PPAT_SNOOP); 2926 2926 2927 - I915_WRITE(GEN8_PRIVATE_PAT, pat); 2928 - I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32); 2927 + I915_WRITE(GEN8_PRIVATE_PAT_LO, pat); 2928 + I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32); 2929 2929 } 2930 2930 2931 2931 static int gen8_gmch_probe(struct drm_device *dev,
+8 -6
drivers/gpu/drm/i915/i915_gem_gtt.h
··· 394 394 */ 395 395 #define gen6_for_each_pde(pt, pd, start, length, temp, iter) \ 396 396 for (iter = gen6_pde_index(start); \ 397 - pt = (pd)->page_table[iter], length > 0 && iter < I915_PDES; \ 397 + length > 0 && iter < I915_PDES ? \ 398 + (pt = (pd)->page_table[iter]), 1 : 0; \ 398 399 iter++, \ 399 400 temp = ALIGN(start+1, 1 << GEN6_PDE_SHIFT) - start, \ 400 401 temp = min_t(unsigned, temp, length), \ ··· 460 459 */ 461 460 #define gen8_for_each_pde(pt, pd, start, length, temp, iter) \ 462 461 for (iter = gen8_pde_index(start); \ 463 - pt = (pd)->page_table[iter], length > 0 && iter < I915_PDES; \ 462 + length > 0 && iter < I915_PDES ? \ 463 + (pt = (pd)->page_table[iter]), 1 : 0; \ 464 464 iter++, \ 465 465 temp = ALIGN(start+1, 1 << GEN8_PDE_SHIFT) - start, \ 466 466 temp = min(temp, length), \ ··· 469 467 470 468 #define gen8_for_each_pdpe(pd, pdp, start, length, temp, iter) \ 471 469 for (iter = gen8_pdpe_index(start); \ 472 - pd = (pdp)->page_directory[iter], \ 473 - length > 0 && (iter < I915_PDPES_PER_PDP(dev)); \ 470 + length > 0 && (iter < I915_PDPES_PER_PDP(dev)) ? \ 471 + (pd = (pdp)->page_directory[iter]), 1 : 0; \ 474 472 iter++, \ 475 473 temp = ALIGN(start+1, 1 << GEN8_PDPE_SHIFT) - start, \ 476 474 temp = min(temp, length), \ ··· 478 476 479 477 #define gen8_for_each_pml4e(pdp, pml4, start, length, temp, iter) \ 480 478 for (iter = gen8_pml4e_index(start); \ 481 - pdp = (pml4)->pdps[iter], \ 482 - length > 0 && iter < GEN8_PML4ES_PER_PML4; \ 479 + length > 0 && iter < GEN8_PML4ES_PER_PML4 ? \ 480 + (pdp = (pml4)->pdps[iter]), 1 : 0; \ 483 481 iter++, \ 484 482 temp = ALIGN(start+1, 1ULL << GEN8_PML4E_SHIFT) - start, \ 485 483 temp = min(temp, length), \
+15 -6
drivers/gpu/drm/i915/i915_gem_shrinker.c
··· 73 73 */ 74 74 unsigned long 75 75 i915_gem_shrink(struct drm_i915_private *dev_priv, 76 - long target, unsigned flags) 76 + unsigned long target, unsigned flags) 77 77 { 78 78 const struct { 79 79 struct list_head *list; ··· 84 84 { NULL, 0 }, 85 85 }, *phase; 86 86 unsigned long count = 0; 87 + 88 + trace_i915_gem_shrink(dev_priv, target, flags); 89 + i915_gem_retire_requests(dev_priv->dev); 87 90 88 91 /* 89 92 * As we may completely rewrite the (un)bound list whilst unbinding ··· 126 123 obj->madv != I915_MADV_DONTNEED) 127 124 continue; 128 125 126 + if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active) 127 + continue; 128 + 129 129 drm_gem_object_reference(&obj->base); 130 130 131 131 /* For the unbound phase, this should be a no-op! */ ··· 145 139 list_splice(&still_in_list, phase->list); 146 140 } 147 141 142 + i915_gem_retire_requests(dev_priv->dev); 143 + 148 144 return count; 149 145 } 150 146 151 147 /** 152 - * i915_gem_shrink - Shrink buffer object caches completely 148 + * i915_gem_shrink_all - Shrink buffer object caches completely 153 149 * @dev_priv: i915 device 154 150 * 155 151 * This is a simple wraper around i915_gem_shrink() to aggressively shrink all ··· 166 158 */ 167 159 unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv) 168 160 { 169 - i915_gem_evict_everything(dev_priv->dev); 170 - return i915_gem_shrink(dev_priv, LONG_MAX, 171 - I915_SHRINK_BOUND | I915_SHRINK_UNBOUND); 161 + return i915_gem_shrink(dev_priv, -1UL, 162 + I915_SHRINK_BOUND | 163 + I915_SHRINK_UNBOUND | 164 + I915_SHRINK_ACTIVE); 172 165 } 173 166 174 167 static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock) ··· 222 213 count += obj->base.size >> PAGE_SHIFT; 223 214 224 215 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 225 - if (obj->pages_pin_count == num_vma_bound(obj)) 216 + if (!obj->active && obj->pages_pin_count == num_vma_bound(obj)) 226 217 count += obj->base.size >> PAGE_SHIFT; 227 218 } 228 219
+97 -12
drivers/gpu/drm/i915/i915_gem_stolen.c
··· 30 30 #include <drm/i915_drm.h> 31 31 #include "i915_drv.h" 32 32 33 + #define KB(x) ((x) * 1024) 34 + #define MB(x) (KB(x) * 1024) 35 + 33 36 /* 34 37 * The BIOS typically reserves some of the system's memory for the exclusive 35 38 * use of the integrated graphics. This memory is no longer available for ··· 53 50 54 51 if (!drm_mm_initialized(&dev_priv->mm.stolen)) 55 52 return -ENODEV; 53 + 54 + /* See the comment at the drm_mm_init() call for more about this check. 55 + * WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */ 56 + if (INTEL_INFO(dev_priv)->gen == 8 && start < 4096) 57 + start = 4096; 56 58 57 59 mutex_lock(&dev_priv->mm.stolen_lock); 58 60 ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size, ··· 94 86 /* Almost universally we can find the Graphics Base of Stolen Memory 95 87 * at offset 0x5c in the igfx configuration space. On a few (desktop) 96 88 * machines this is also mirrored in the bridge device at different 97 - * locations, or in the MCHBAR. On gen2, the layout is again slightly 98 - * different with the Graphics Segment immediately following Top of 99 - * Memory (or Top of Usable DRAM). Note it appears that TOUD is only 100 - * reported by 865g, so we just use the top of memory as determined 101 - * by the e820 probe. 89 + * locations, or in the MCHBAR. 102 90 * 103 - * XXX However gen2 requires an unavailable symbol. 91 + * On 865 we just check the TOUD register. 92 + * 93 + * On 830/845/85x the stolen memory base isn't available in any 94 + * register. We need to calculate it as TOM-TSEG_SIZE-stolen_size. 95 + * 104 96 */ 105 97 base = 0; 106 98 if (INTEL_INFO(dev)->gen >= 3) { 107 99 /* Read Graphics Base of Stolen Memory directly */ 108 100 pci_read_config_dword(dev->pdev, 0x5c, &base); 109 101 base &= ~((1<<20) - 1); 110 - } else { /* GEN2 */ 111 - #if 0 112 - /* Stolen is immediately above Top of Memory */ 113 - base = max_low_pfn_mapped << PAGE_SHIFT; 114 - #endif 102 + } else if (IS_I865G(dev)) { 103 + u16 toud = 0; 104 + 105 + /* 106 + * FIXME is the graphics stolen memory region 107 + * always at TOUD? Ie. is it always the last 108 + * one to be allocated by the BIOS? 109 + */ 110 + pci_bus_read_config_word(dev->pdev->bus, PCI_DEVFN(0, 0), 111 + I865_TOUD, &toud); 112 + 113 + base = toud << 16; 114 + } else if (IS_I85X(dev)) { 115 + u32 tseg_size = 0; 116 + u32 tom; 117 + u8 tmp; 118 + 119 + pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0), 120 + I85X_ESMRAMC, &tmp); 121 + 122 + if (tmp & TSEG_ENABLE) 123 + tseg_size = MB(1); 124 + 125 + pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 1), 126 + I85X_DRB3, &tmp); 127 + tom = tmp * MB(32); 128 + 129 + base = tom - tseg_size - dev_priv->gtt.stolen_size; 130 + } else if (IS_845G(dev)) { 131 + u32 tseg_size = 0; 132 + u32 tom; 133 + u8 tmp; 134 + 135 + pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0), 136 + I845_ESMRAMC, &tmp); 137 + 138 + if (tmp & TSEG_ENABLE) { 139 + switch (tmp & I845_TSEG_SIZE_MASK) { 140 + case I845_TSEG_SIZE_512K: 141 + tseg_size = KB(512); 142 + break; 143 + case I845_TSEG_SIZE_1M: 144 + tseg_size = MB(1); 145 + break; 146 + } 147 + } 148 + 149 + pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0), 150 + I830_DRB3, &tmp); 151 + tom = tmp * MB(32); 152 + 153 + base = tom - tseg_size - dev_priv->gtt.stolen_size; 154 + } else if (IS_I830(dev)) { 155 + u32 tseg_size = 0; 156 + u32 tom; 157 + u8 tmp; 158 + 159 + pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0), 160 + I830_ESMRAMC, &tmp); 161 + 162 + if (tmp & TSEG_ENABLE) { 163 + if (tmp & I830_TSEG_SIZE_1M) 164 + tseg_size = MB(1); 165 + else 166 + tseg_size = KB(512); 167 + } 168 + 169 + pci_bus_read_config_byte(dev->pdev->bus, PCI_DEVFN(0, 0), 170 + I830_DRB3, &tmp); 171 + tom = tmp * MB(32); 172 + 173 + base = tom - tseg_size - dev_priv->gtt.stolen_size; 115 174 } 116 175 117 176 if (base == 0) ··· 468 393 dev_priv->gtt.stolen_usable_size = dev_priv->gtt.stolen_size - 469 394 reserved_total; 470 395 471 - /* Basic memrange allocator for stolen space */ 396 + /* 397 + * Basic memrange allocator for stolen space. 398 + * 399 + * TODO: Notice that some platforms require us to not use the first page 400 + * of the stolen memory but their BIOSes may still put the framebuffer 401 + * on the first page. So we don't reserve this page for now because of 402 + * that. Our current solution is to just prevent new nodes from being 403 + * inserted on the first page - see the check we have at 404 + * i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon 405 + * problem later. 406 + */ 472 407 drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_usable_size); 473 408 474 409 return 0;
+169 -152
drivers/gpu/drm/i915/i915_gem_userptr.c
··· 50 50 struct mmu_notifier mn; 51 51 struct rb_root objects; 52 52 struct list_head linear; 53 - unsigned long serial; 54 53 bool has_linear; 55 54 }; 56 55 ··· 58 59 struct interval_tree_node it; 59 60 struct list_head link; 60 61 struct drm_i915_gem_object *obj; 62 + struct work_struct work; 63 + bool active; 61 64 bool is_linear; 62 65 }; 63 66 64 - static unsigned long cancel_userptr(struct drm_i915_gem_object *obj) 67 + static void __cancel_userptr__worker(struct work_struct *work) 65 68 { 69 + struct i915_mmu_object *mo = container_of(work, typeof(*mo), work); 70 + struct drm_i915_gem_object *obj = mo->obj; 66 71 struct drm_device *dev = obj->base.dev; 67 - unsigned long end; 68 72 69 73 mutex_lock(&dev->struct_mutex); 70 74 /* Cancel any active worker and force us to re-evaluate gup */ ··· 90 88 dev_priv->mm.interruptible = was_interruptible; 91 89 } 92 90 93 - end = obj->userptr.ptr + obj->base.size; 94 - 95 91 drm_gem_object_unreference(&obj->base); 96 92 mutex_unlock(&dev->struct_mutex); 97 - 98 - return end; 99 93 } 100 94 101 - static void *invalidate_range__linear(struct i915_mmu_notifier *mn, 102 - struct mm_struct *mm, 103 - unsigned long start, 104 - unsigned long end) 95 + static unsigned long cancel_userptr(struct i915_mmu_object *mo) 105 96 { 106 - struct i915_mmu_object *mo; 107 - unsigned long serial; 97 + unsigned long end = mo->obj->userptr.ptr + mo->obj->base.size; 108 98 109 - restart: 110 - serial = mn->serial; 111 - list_for_each_entry(mo, &mn->linear, link) { 112 - struct drm_i915_gem_object *obj; 113 - 114 - if (mo->it.last < start || mo->it.start > end) 115 - continue; 116 - 117 - obj = mo->obj; 118 - 119 - if (!kref_get_unless_zero(&obj->base.refcount)) 120 - continue; 121 - 122 - spin_unlock(&mn->lock); 123 - 124 - cancel_userptr(obj); 125 - 126 - spin_lock(&mn->lock); 127 - if (serial != mn->serial) 128 - goto restart; 99 + /* The mmu_object is released late when destroying the 100 + * GEM object so it is entirely possible to gain a 101 + * reference on an object in the process of being freed 102 + * since our serialisation is via the spinlock and not 103 + * the struct_mutex - and consequently use it after it 104 + * is freed and then double free it. 105 + */ 106 + if (mo->active && kref_get_unless_zero(&mo->obj->base.refcount)) { 107 + schedule_work(&mo->work); 108 + /* only schedule one work packet to avoid the refleak */ 109 + mo->active = false; 129 110 } 130 111 131 - return NULL; 112 + return end; 132 113 } 133 114 134 115 static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn, ··· 119 134 unsigned long start, 120 135 unsigned long end) 121 136 { 122 - struct i915_mmu_notifier *mn = container_of(_mn, struct i915_mmu_notifier, mn); 123 - struct interval_tree_node *it = NULL; 124 - unsigned long next = start; 125 - unsigned long serial = 0; 137 + struct i915_mmu_notifier *mn = 138 + container_of(_mn, struct i915_mmu_notifier, mn); 139 + struct i915_mmu_object *mo; 126 140 127 - end--; /* interval ranges are inclusive, but invalidate range is exclusive */ 128 - while (next < end) { 129 - struct drm_i915_gem_object *obj = NULL; 141 + /* interval ranges are inclusive, but invalidate range is exclusive */ 142 + end--; 130 143 131 - spin_lock(&mn->lock); 132 - if (mn->has_linear) 133 - it = invalidate_range__linear(mn, mm, start, end); 134 - else if (serial == mn->serial) 135 - it = interval_tree_iter_next(it, next, end); 136 - else 137 - it = interval_tree_iter_first(&mn->objects, start, end); 138 - if (it != NULL) { 139 - obj = container_of(it, struct i915_mmu_object, it)->obj; 140 - 141 - /* The mmu_object is released late when destroying the 142 - * GEM object so it is entirely possible to gain a 143 - * reference on an object in the process of being freed 144 - * since our serialisation is via the spinlock and not 145 - * the struct_mutex - and consequently use it after it 146 - * is freed and then double free it. 147 - */ 148 - if (!kref_get_unless_zero(&obj->base.refcount)) { 149 - spin_unlock(&mn->lock); 150 - serial = 0; 144 + spin_lock(&mn->lock); 145 + if (mn->has_linear) { 146 + list_for_each_entry(mo, &mn->linear, link) { 147 + if (mo->it.last < start || mo->it.start > end) 151 148 continue; 152 - } 153 149 154 - serial = mn->serial; 150 + cancel_userptr(mo); 155 151 } 156 - spin_unlock(&mn->lock); 157 - if (obj == NULL) 158 - return; 152 + } else { 153 + struct interval_tree_node *it; 159 154 160 - next = cancel_userptr(obj); 155 + it = interval_tree_iter_first(&mn->objects, start, end); 156 + while (it) { 157 + mo = container_of(it, struct i915_mmu_object, it); 158 + start = cancel_userptr(mo); 159 + it = interval_tree_iter_next(it, start, end); 160 + } 161 161 } 162 + spin_unlock(&mn->lock); 162 163 } 163 164 164 165 static const struct mmu_notifier_ops i915_gem_userptr_notifier = { ··· 164 193 spin_lock_init(&mn->lock); 165 194 mn->mn.ops = &i915_gem_userptr_notifier; 166 195 mn->objects = RB_ROOT; 167 - mn->serial = 1; 168 196 INIT_LIST_HEAD(&mn->linear); 169 197 mn->has_linear = false; 170 198 ··· 175 205 } 176 206 177 207 return mn; 178 - } 179 - 180 - static void __i915_mmu_notifier_update_serial(struct i915_mmu_notifier *mn) 181 - { 182 - if (++mn->serial == 0) 183 - mn->serial = 1; 184 208 } 185 209 186 210 static int ··· 223 259 } else 224 260 interval_tree_insert(&mo->it, &mn->objects); 225 261 226 - if (ret == 0) { 262 + if (ret == 0) 227 263 list_add(&mo->link, &mn->linear); 228 - __i915_mmu_notifier_update_serial(mn); 229 - } 264 + 230 265 spin_unlock(&mn->lock); 231 266 mutex_unlock(&dev->struct_mutex); 232 267 ··· 253 290 mn->has_linear = i915_mmu_notifier_has_linear(mn); 254 291 else 255 292 interval_tree_remove(&mo->it, &mn->objects); 256 - __i915_mmu_notifier_update_serial(mn); 257 293 spin_unlock(&mn->lock); 258 294 } 259 295 ··· 319 357 mo->it.start = obj->userptr.ptr; 320 358 mo->it.last = mo->it.start + obj->base.size - 1; 321 359 mo->obj = obj; 360 + INIT_WORK(&mo->work, __cancel_userptr__worker); 322 361 323 362 ret = i915_mmu_notifier_add(obj->base.dev, mn, mo); 324 363 if (ret) { ··· 528 565 return ret; 529 566 } 530 567 568 + static int 569 + __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj, 570 + bool value) 571 + { 572 + int ret = 0; 573 + 574 + /* During mm_invalidate_range we need to cancel any userptr that 575 + * overlaps the range being invalidated. Doing so requires the 576 + * struct_mutex, and that risks recursion. In order to cause 577 + * recursion, the user must alias the userptr address space with 578 + * a GTT mmapping (possible with a MAP_FIXED) - then when we have 579 + * to invalidate that mmaping, mm_invalidate_range is called with 580 + * the userptr address *and* the struct_mutex held. To prevent that 581 + * we set a flag under the i915_mmu_notifier spinlock to indicate 582 + * whether this object is valid. 583 + */ 584 + #if defined(CONFIG_MMU_NOTIFIER) 585 + if (obj->userptr.mmu_object == NULL) 586 + return 0; 587 + 588 + spin_lock(&obj->userptr.mmu_object->mn->lock); 589 + /* In order to serialise get_pages with an outstanding 590 + * cancel_userptr, we must drop the struct_mutex and try again. 591 + */ 592 + if (!value || !work_pending(&obj->userptr.mmu_object->work)) 593 + obj->userptr.mmu_object->active = value; 594 + else 595 + ret = -EAGAIN; 596 + spin_unlock(&obj->userptr.mmu_object->mn->lock); 597 + #endif 598 + 599 + return ret; 600 + } 601 + 531 602 static void 532 603 __i915_gem_userptr_get_pages_worker(struct work_struct *_work) 533 604 { 534 605 struct get_pages_work *work = container_of(_work, typeof(*work), work); 535 606 struct drm_i915_gem_object *obj = work->obj; 536 607 struct drm_device *dev = obj->base.dev; 537 - const int num_pages = obj->base.size >> PAGE_SHIFT; 608 + const int npages = obj->base.size >> PAGE_SHIFT; 538 609 struct page **pvec; 539 610 int pinned, ret; 540 611 541 612 ret = -ENOMEM; 542 613 pinned = 0; 543 614 544 - pvec = kmalloc(num_pages*sizeof(struct page *), 615 + pvec = kmalloc(npages*sizeof(struct page *), 545 616 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); 546 617 if (pvec == NULL) 547 - pvec = drm_malloc_ab(num_pages, sizeof(struct page *)); 618 + pvec = drm_malloc_ab(npages, sizeof(struct page *)); 548 619 if (pvec != NULL) { 549 620 struct mm_struct *mm = obj->userptr.mm->mm; 550 621 551 622 down_read(&mm->mmap_sem); 552 - while (pinned < num_pages) { 623 + while (pinned < npages) { 553 624 ret = get_user_pages(work->task, mm, 554 625 obj->userptr.ptr + pinned * PAGE_SIZE, 555 - num_pages - pinned, 626 + npages - pinned, 556 627 !obj->userptr.read_only, 0, 557 628 pvec + pinned, NULL); 558 629 if (ret < 0) ··· 598 601 } 599 602 600 603 mutex_lock(&dev->struct_mutex); 601 - if (obj->userptr.work != &work->work) { 602 - ret = 0; 603 - } else if (pinned == num_pages) { 604 - ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages); 605 - if (ret == 0) { 606 - list_add_tail(&obj->global_list, &to_i915(dev)->mm.unbound_list); 607 - obj->get_page.sg = obj->pages->sgl; 608 - obj->get_page.last = 0; 609 - 610 - pinned = 0; 604 + if (obj->userptr.work == &work->work) { 605 + if (pinned == npages) { 606 + ret = __i915_gem_userptr_set_pages(obj, pvec, npages); 607 + if (ret == 0) { 608 + list_add_tail(&obj->global_list, 609 + &to_i915(dev)->mm.unbound_list); 610 + obj->get_page.sg = obj->pages->sgl; 611 + obj->get_page.last = 0; 612 + pinned = 0; 613 + } 611 614 } 615 + obj->userptr.work = ERR_PTR(ret); 616 + if (ret) 617 + __i915_gem_userptr_set_active(obj, false); 612 618 } 613 619 614 - obj->userptr.work = ERR_PTR(ret); 615 620 obj->userptr.workers--; 616 621 drm_gem_object_unreference(&obj->base); 617 622 mutex_unlock(&dev->struct_mutex); ··· 626 627 } 627 628 628 629 static int 630 + __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj, 631 + bool *active) 632 + { 633 + struct get_pages_work *work; 634 + 635 + /* Spawn a worker so that we can acquire the 636 + * user pages without holding our mutex. Access 637 + * to the user pages requires mmap_sem, and we have 638 + * a strict lock ordering of mmap_sem, struct_mutex - 639 + * we already hold struct_mutex here and so cannot 640 + * call gup without encountering a lock inversion. 641 + * 642 + * Userspace will keep on repeating the operation 643 + * (thanks to EAGAIN) until either we hit the fast 644 + * path or the worker completes. If the worker is 645 + * cancelled or superseded, the task is still run 646 + * but the results ignored. (This leads to 647 + * complications that we may have a stray object 648 + * refcount that we need to be wary of when 649 + * checking for existing objects during creation.) 650 + * If the worker encounters an error, it reports 651 + * that error back to this function through 652 + * obj->userptr.work = ERR_PTR. 653 + */ 654 + if (obj->userptr.workers >= I915_GEM_USERPTR_MAX_WORKERS) 655 + return -EAGAIN; 656 + 657 + work = kmalloc(sizeof(*work), GFP_KERNEL); 658 + if (work == NULL) 659 + return -ENOMEM; 660 + 661 + obj->userptr.work = &work->work; 662 + obj->userptr.workers++; 663 + 664 + work->obj = obj; 665 + drm_gem_object_reference(&obj->base); 666 + 667 + work->task = current; 668 + get_task_struct(work->task); 669 + 670 + INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker); 671 + schedule_work(&work->work); 672 + 673 + *active = true; 674 + return -EAGAIN; 675 + } 676 + 677 + static int 629 678 i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) 630 679 { 631 680 const int num_pages = obj->base.size >> PAGE_SHIFT; 632 681 struct page **pvec; 633 682 int pinned, ret; 683 + bool active; 634 684 635 685 /* If userspace should engineer that these pages are replaced in 636 686 * the vma between us binding this page into the GTT and completion ··· 697 649 * to the vma (discard or cloning) which should prevent the more 698 650 * egregious cases from causing harm. 699 651 */ 652 + if (IS_ERR(obj->userptr.work)) { 653 + /* active flag will have been dropped already by the worker */ 654 + ret = PTR_ERR(obj->userptr.work); 655 + obj->userptr.work = NULL; 656 + return ret; 657 + } 658 + if (obj->userptr.work) 659 + /* active flag should still be held for the pending work */ 660 + return -EAGAIN; 661 + 662 + /* Let the mmu-notifier know that we have begun and need cancellation */ 663 + ret = __i915_gem_userptr_set_active(obj, true); 664 + if (ret) 665 + return ret; 700 666 701 667 pvec = NULL; 702 668 pinned = 0; ··· 719 657 GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY); 720 658 if (pvec == NULL) { 721 659 pvec = drm_malloc_ab(num_pages, sizeof(struct page *)); 722 - if (pvec == NULL) 660 + if (pvec == NULL) { 661 + __i915_gem_userptr_set_active(obj, false); 723 662 return -ENOMEM; 663 + } 724 664 } 725 665 726 666 pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages, 727 667 !obj->userptr.read_only, pvec); 728 668 } 729 - if (pinned < num_pages) { 730 - if (pinned < 0) { 731 - ret = pinned; 732 - pinned = 0; 733 - } else { 734 - /* Spawn a worker so that we can acquire the 735 - * user pages without holding our mutex. Access 736 - * to the user pages requires mmap_sem, and we have 737 - * a strict lock ordering of mmap_sem, struct_mutex - 738 - * we already hold struct_mutex here and so cannot 739 - * call gup without encountering a lock inversion. 740 - * 741 - * Userspace will keep on repeating the operation 742 - * (thanks to EAGAIN) until either we hit the fast 743 - * path or the worker completes. If the worker is 744 - * cancelled or superseded, the task is still run 745 - * but the results ignored. (This leads to 746 - * complications that we may have a stray object 747 - * refcount that we need to be wary of when 748 - * checking for existing objects during creation.) 749 - * If the worker encounters an error, it reports 750 - * that error back to this function through 751 - * obj->userptr.work = ERR_PTR. 752 - */ 753 - ret = -EAGAIN; 754 - if (obj->userptr.work == NULL && 755 - obj->userptr.workers < I915_GEM_USERPTR_MAX_WORKERS) { 756 - struct get_pages_work *work; 757 669 758 - work = kmalloc(sizeof(*work), GFP_KERNEL); 759 - if (work != NULL) { 760 - obj->userptr.work = &work->work; 761 - obj->userptr.workers++; 762 - 763 - work->obj = obj; 764 - drm_gem_object_reference(&obj->base); 765 - 766 - work->task = current; 767 - get_task_struct(work->task); 768 - 769 - INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker); 770 - schedule_work(&work->work); 771 - } else 772 - ret = -ENOMEM; 773 - } else { 774 - if (IS_ERR(obj->userptr.work)) { 775 - ret = PTR_ERR(obj->userptr.work); 776 - obj->userptr.work = NULL; 777 - } 778 - } 779 - } 780 - } else { 670 + active = false; 671 + if (pinned < 0) 672 + ret = pinned, pinned = 0; 673 + else if (pinned < num_pages) 674 + ret = __i915_gem_userptr_get_pages_schedule(obj, &active); 675 + else 781 676 ret = __i915_gem_userptr_set_pages(obj, pvec, num_pages); 782 - if (ret == 0) { 783 - obj->userptr.work = NULL; 784 - pinned = 0; 785 - } 677 + if (ret) { 678 + __i915_gem_userptr_set_active(obj, active); 679 + release_pages(pvec, pinned, 0); 786 680 } 787 - 788 - release_pages(pvec, pinned, 0); 789 681 drm_free_large(pvec); 790 682 return ret; 791 683 } ··· 750 734 struct sg_page_iter sg_iter; 751 735 752 736 BUG_ON(obj->userptr.work != NULL); 737 + __i915_gem_userptr_set_active(obj, false); 753 738 754 739 if (obj->madv != I915_MADV_WILLNEED) 755 740 obj->dirty = 0;
+13 -18
drivers/gpu/drm/i915/i915_gpu_error.c
··· 792 792 int i; 793 793 794 794 if (IS_GEN3(dev) || IS_GEN2(dev)) { 795 - for (i = 0; i < 8; i++) 796 - error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); 797 - if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) 798 - for (i = 0; i < 8; i++) 799 - error->fence[i+8] = I915_READ(FENCE_REG_945_8 + 800 - (i * 4)); 801 - } else if (IS_GEN5(dev) || IS_GEN4(dev)) 802 - for (i = 0; i < 16; i++) 803 - error->fence[i] = I915_READ64(FENCE_REG_965_0 + 804 - (i * 8)); 805 - else if (INTEL_INFO(dev)->gen >= 6) 806 795 for (i = 0; i < dev_priv->num_fence_regs; i++) 807 - error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + 808 - (i * 8)); 796 + error->fence[i] = I915_READ(FENCE_REG(i)); 797 + } else if (IS_GEN5(dev) || IS_GEN4(dev)) { 798 + for (i = 0; i < dev_priv->num_fence_regs; i++) 799 + error->fence[i] = I915_READ64(FENCE_REG_965_LO(i)); 800 + } else if (INTEL_INFO(dev)->gen >= 6) { 801 + for (i = 0; i < dev_priv->num_fence_regs; i++) 802 + error->fence[i] = I915_READ64(FENCE_REG_GEN6_LO(i)); 803 + } 809 804 } 810 805 811 806 ··· 886 891 ering->faddr = I915_READ(DMA_FADD_I8XX); 887 892 ering->ipeir = I915_READ(IPEIR); 888 893 ering->ipehr = I915_READ(IPEHR); 889 - ering->instdone = I915_READ(INSTDONE); 894 + ering->instdone = I915_READ(GEN2_INSTDONE); 890 895 } 891 896 892 897 ering->waiting = waitqueue_active(&ring->irq_queue); ··· 1388 1393 memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); 1389 1394 1390 1395 if (IS_GEN2(dev) || IS_GEN3(dev)) 1391 - instdone[0] = I915_READ(INSTDONE); 1396 + instdone[0] = I915_READ(GEN2_INSTDONE); 1392 1397 else if (IS_GEN4(dev) || IS_GEN5(dev) || IS_GEN6(dev)) { 1393 - instdone[0] = I915_READ(INSTDONE_I965); 1394 - instdone[1] = I915_READ(INSTDONE1); 1398 + instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE)); 1399 + instdone[1] = I915_READ(GEN4_INSTDONE1); 1395 1400 } else if (INTEL_INFO(dev)->gen >= 7) { 1396 - instdone[0] = I915_READ(GEN7_INSTDONE_1); 1401 + instdone[0] = I915_READ(RING_INSTDONE(RENDER_RING_BASE)); 1397 1402 instdone[1] = I915_READ(GEN7_SC_INSTDONE); 1398 1403 instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); 1399 1404 instdone[3] = I915_READ(GEN7_ROW_INSTDONE);
+2 -1
drivers/gpu/drm/i915/i915_guc_reg.h
··· 37 37 #define GS_UKERNEL_READY (0xF0 << GS_UKERNEL_SHIFT) 38 38 #define GS_MIA_SHIFT 16 39 39 #define GS_MIA_MASK (0x07 << GS_MIA_SHIFT) 40 + #define GS_MIA_CORE_STATE (1 << GS_MIA_SHIFT) 40 41 41 42 #define SOFT_SCRATCH(n) (0xc180 + ((n) * 4)) 42 43 43 - #define UOS_RSA_SCRATCH_0 0xc200 44 + #define UOS_RSA_SCRATCH(i) (0xc200 + (i) * 4) 44 45 #define DMA_ADDR_0_LOW 0xc300 45 46 #define DMA_ADDR_0_HIGH 0xc304 46 47 #define DMA_ADDR_1_LOW 0xc308
+61 -2
drivers/gpu/drm/i915/i915_guc_submission.c
··· 155 155 struct i915_guc_client *client) 156 156 { 157 157 struct drm_i915_private *dev_priv = guc_to_i915(guc); 158 + struct drm_device *dev = dev_priv->dev; 158 159 u32 data[2]; 159 160 160 161 data[0] = HOST2GUC_ACTION_SAMPLE_FORCEWAKE; 161 - data[1] = (intel_enable_rc6(dev_priv->dev)) ? 1 : 0; 162 + /* WaRsDisableCoarsePowerGating:skl,bxt */ 163 + if (!intel_enable_rc6(dev_priv->dev) || 164 + (IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) || 165 + (IS_SKL_GT3(dev) && (INTEL_REVID(dev) <= SKL_REVID_E0)) || 166 + (IS_SKL_GT4(dev) && (INTEL_REVID(dev) <= SKL_REVID_E0))) 167 + data[1] = 0; 168 + else 169 + /* bit 0 and 1 are for Render and Media domain separately */ 170 + data[1] = GUC_FORCEWAKE_RENDER | GUC_FORCEWAKE_MEDIA; 162 171 163 - return host2guc_action(guc, data, 2); 172 + return host2guc_action(guc, data, ARRAY_SIZE(data)); 164 173 } 165 174 166 175 /* ··· 922 913 ida_destroy(&guc->ctx_ids); 923 914 gem_release_guc_obj(guc->ctx_pool_obj); 924 915 guc->ctx_pool_obj = NULL; 916 + } 917 + 918 + /** 919 + * intel_guc_suspend() - notify GuC entering suspend state 920 + * @dev: drm device 921 + */ 922 + int intel_guc_suspend(struct drm_device *dev) 923 + { 924 + struct drm_i915_private *dev_priv = dev->dev_private; 925 + struct intel_guc *guc = &dev_priv->guc; 926 + struct intel_context *ctx; 927 + u32 data[3]; 928 + 929 + if (!i915.enable_guc_submission) 930 + return 0; 931 + 932 + ctx = dev_priv->ring[RCS].default_context; 933 + 934 + data[0] = HOST2GUC_ACTION_ENTER_S_STATE; 935 + /* any value greater than GUC_POWER_D0 */ 936 + data[1] = GUC_POWER_D1; 937 + /* first page is shared data with GuC */ 938 + data[2] = i915_gem_obj_ggtt_offset(ctx->engine[RCS].state); 939 + 940 + return host2guc_action(guc, data, ARRAY_SIZE(data)); 941 + } 942 + 943 + 944 + /** 945 + * intel_guc_resume() - notify GuC resuming from suspend state 946 + * @dev: drm device 947 + */ 948 + int intel_guc_resume(struct drm_device *dev) 949 + { 950 + struct drm_i915_private *dev_priv = dev->dev_private; 951 + struct intel_guc *guc = &dev_priv->guc; 952 + struct intel_context *ctx; 953 + u32 data[3]; 954 + 955 + if (!i915.enable_guc_submission) 956 + return 0; 957 + 958 + ctx = dev_priv->ring[RCS].default_context; 959 + 960 + data[0] = HOST2GUC_ACTION_EXIT_S_STATE; 961 + data[1] = GUC_POWER_D0; 962 + /* first page is shared data with GuC */ 963 + data[2] = i915_gem_obj_ggtt_offset(ctx->engine[RCS].state); 964 + 965 + return host2guc_action(guc, data, ARRAY_SIZE(data)); 925 966 }
+29 -3
drivers/gpu/drm/i915/i915_irq.c
··· 581 581 582 582 /** 583 583 * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion 584 + * @dev: drm device 584 585 */ 585 586 static void i915_enable_asle_pipestat(struct drm_device *dev) 586 587 { ··· 998 997 int threshold) 999 998 { 1000 999 u64 time, c0; 1000 + unsigned int mul = 100; 1001 1001 1002 1002 if (old->cz_clock == 0) 1003 1003 return false; 1004 1004 1005 + if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) 1006 + mul <<= 8; 1007 + 1005 1008 time = now->cz_clock - old->cz_clock; 1006 - time *= threshold * dev_priv->mem_freq; 1009 + time *= threshold * dev_priv->czclk_freq; 1007 1010 1008 1011 /* Workload can be split between render + media, e.g. SwapBuffers 1009 1012 * being blitted in X after being rendered in mesa. To account for ··· 1015 1010 */ 1016 1011 c0 = now->render_c0 - old->render_c0; 1017 1012 c0 += now->media_c0 - old->media_c0; 1018 - c0 *= 100 * VLV_CZ_CLOCK_TO_MILLI_SEC * 4 / 1000; 1013 + c0 *= mul * VLV_CZ_CLOCK_TO_MILLI_SEC; 1019 1014 1020 1015 return c0 >= time; 1021 1016 } ··· 2393 2388 2394 2389 /** 2395 2390 * i915_reset_and_wakeup - do process context error handling work 2391 + * @dev: drm device 2396 2392 * 2397 2393 * Fire an error uevent so userspace can see that a hang or error 2398 2394 * was detected. ··· 2571 2565 * i915_handle_error - handle a gpu error 2572 2566 * @dev: drm device 2573 2567 * 2574 - * Do some basic checking of regsiter state at error time and 2568 + * Do some basic checking of register state at error time and 2575 2569 * dump it to the syslog. Also call i915_capture_error_state() to make 2576 2570 * sure we get a record and make it available in debugfs. Fire a uevent 2577 2571 * so userspace knows something bad happened (should trigger collection ··· 2783 2777 u32 cmd, ipehr, head; 2784 2778 u64 offset = 0; 2785 2779 int i, backwards; 2780 + 2781 + /* 2782 + * This function does not support execlist mode - any attempt to 2783 + * proceed further into this function will result in a kernel panic 2784 + * when dereferencing ring->buffer, which is not set up in execlist 2785 + * mode. 2786 + * 2787 + * The correct way of doing it would be to derive the currently 2788 + * executing ring buffer from the current context, which is derived 2789 + * from the currently running request. Unfortunately, to get the 2790 + * current request we would have to grab the struct_mutex before doing 2791 + * anything else, which would be ill-advised since some other thread 2792 + * might have grabbed it already and managed to hang itself, causing 2793 + * the hang checker to deadlock. 2794 + * 2795 + * Therefore, this function does not support execlist mode in its 2796 + * current form. Just return NULL and move on. 2797 + */ 2798 + if (ring->buffer == NULL) 2799 + return NULL; 2786 2800 2787 2801 ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); 2788 2802 if (!ipehr_is_semaphore_wait(ring->dev, ipehr))
+156 -30
drivers/gpu/drm/i915/i915_reg.h
··· 105 105 #define GRDOM_RESET_STATUS (1<<1) 106 106 #define GRDOM_RESET_ENABLE (1<<0) 107 107 108 - #define ILK_GDSR 0x2ca4 /* MCHBAR offset */ 108 + #define ILK_GDSR (MCHBAR_MIRROR_BASE + 0x2ca4) 109 109 #define ILK_GRDOM_FULL (0<<1) 110 110 #define ILK_GRDOM_RENDER (1<<1) 111 111 #define ILK_GRDOM_MEDIA (3<<1) ··· 536 536 #define GEN7_3DPRIM_START_INSTANCE 0x243C 537 537 #define GEN7_3DPRIM_BASE_VERTEX 0x2440 538 538 539 + #define GEN7_GPGPU_DISPATCHDIMX 0x2500 540 + #define GEN7_GPGPU_DISPATCHDIMY 0x2504 541 + #define GEN7_GPGPU_DISPATCHDIMZ 0x2508 542 + 539 543 #define OACONTROL 0x2360 540 544 541 545 #define _GEN7_PIPEA_DE_LOAD_SL 0x70068 ··· 732 728 #define DSI_PLL_N1_DIV_MASK (3 << 16) 733 729 #define DSI_PLL_M1_DIV_SHIFT 0 734 730 #define DSI_PLL_M1_DIV_MASK (0x1ff << 0) 731 + #define CCK_CZ_CLOCK_CONTROL 0x62 735 732 #define CCK_DISPLAY_CLOCK_CONTROL 0x6b 736 - #define DISPLAY_TRUNK_FORCE_ON (1 << 17) 737 - #define DISPLAY_TRUNK_FORCE_OFF (1 << 16) 738 - #define DISPLAY_FREQUENCY_STATUS (0x1f << 8) 739 - #define DISPLAY_FREQUENCY_STATUS_SHIFT 8 740 - #define DISPLAY_FREQUENCY_VALUES (0x1f << 0) 733 + #define CCK_TRUNK_FORCE_ON (1 << 17) 734 + #define CCK_TRUNK_FORCE_OFF (1 << 16) 735 + #define CCK_FREQUENCY_STATUS (0x1f << 8) 736 + #define CCK_FREQUENCY_STATUS_SHIFT 8 737 + #define CCK_FREQUENCY_VALUES (0x1f << 0) 741 738 742 739 /** 743 740 * DOC: DPIO ··· 1400 1395 #define BXT_PORT_TX_DW3_LN0(port) _PORT3(port, _PORT_TX_DW3_LN0_A, \ 1401 1396 _PORT_TX_DW3_LN0_B, \ 1402 1397 _PORT_TX_DW3_LN0_C) 1403 - #define UNIQE_TRANGE_EN_METHOD (1 << 27) 1398 + #define SCALE_DCOMP_METHOD (1 << 26) 1399 + #define UNIQUE_TRANGE_EN_METHOD (1 << 27) 1404 1400 1405 1401 #define _PORT_TX_DW4_LN0_A 0x162510 1406 1402 #define _PORT_TX_DW4_LN0_B 0x6C510 ··· 1442 1436 1443 1437 /* 1444 1438 * Fence registers 1439 + * [0-7] @ 0x2000 gen2,gen3 1440 + * [8-15] @ 0x3000 945,g33,pnv 1441 + * 1442 + * [0-15] @ 0x3000 gen4,gen5 1443 + * 1444 + * [0-15] @ 0x100000 gen6,vlv,chv 1445 + * [0-31] @ 0x100000 gen7+ 1445 1446 */ 1446 - #define FENCE_REG_830_0 0x2000 1447 - #define FENCE_REG_945_8 0x3000 1447 + #define FENCE_REG(i) (0x2000 + (((i) & 8) << 9) + ((i) & 7) * 4) 1448 1448 #define I830_FENCE_START_MASK 0x07f80000 1449 1449 #define I830_FENCE_TILING_Y_SHIFT 12 1450 1450 #define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) ··· 1463 1451 #define I915_FENCE_START_MASK 0x0ff00000 1464 1452 #define I915_FENCE_SIZE_BITS(size) ((ffs((size) >> 20) - 1) << 8) 1465 1453 1466 - #define FENCE_REG_965_0 0x03000 1454 + #define FENCE_REG_965_LO(i) (0x03000 + (i) * 8) 1455 + #define FENCE_REG_965_HI(i) (0x03000 + (i) * 8 + 4) 1467 1456 #define I965_FENCE_PITCH_SHIFT 2 1468 1457 #define I965_FENCE_TILING_Y_SHIFT 1 1469 1458 #define I965_FENCE_REG_VALID (1<<0) 1470 1459 #define I965_FENCE_MAX_PITCH_VAL 0x0400 1471 1460 1472 - #define FENCE_REG_SANDYBRIDGE_0 0x100000 1473 - #define SANDYBRIDGE_FENCE_PITCH_SHIFT 32 1461 + #define FENCE_REG_GEN6_LO(i) (0x100000 + (i) * 8) 1462 + #define FENCE_REG_GEN6_HI(i) (0x100000 + (i) * 8 + 4) 1463 + #define GEN6_FENCE_PITCH_SHIFT 32 1474 1464 #define GEN7_FENCE_MAX_PITCH_VAL 0x0800 1475 1465 1476 1466 ··· 1556 1542 #define RING_FAULT_FAULT_TYPE(x) ((x >> 1) & 0x3) 1557 1543 #define RING_FAULT_VALID (1<<0) 1558 1544 #define DONE_REG 0x40b0 1559 - #define GEN8_PRIVATE_PAT 0x40e0 1545 + #define GEN8_PRIVATE_PAT_LO 0x40e0 1546 + #define GEN8_PRIVATE_PAT_HI (0x40e0 + 4) 1560 1547 #define BSD_HWS_PGA_GEN7 (0x04180) 1561 1548 #define BLT_HWS_PGA_GEN7 (0x04280) 1562 1549 #define VEBOX_HWS_PGA_GEN7 (0x04380) ··· 1597 1582 #endif 1598 1583 #define IPEIR_I965 0x02064 1599 1584 #define IPEHR_I965 0x02068 1600 - #define INSTDONE_I965 0x0206c 1601 - #define GEN7_INSTDONE_1 0x0206c 1602 1585 #define GEN7_SC_INSTDONE 0x07100 1603 1586 #define GEN7_SAMPLER_INSTDONE 0x0e160 1604 1587 #define GEN7_ROW_INSTDONE 0x0e164 1605 1588 #define I915_NUM_INSTDONE_REG 4 1606 1589 #define RING_IPEIR(base) ((base)+0x64) 1607 1590 #define RING_IPEHR(base) ((base)+0x68) 1591 + /* 1592 + * On GEN4, only the render ring INSTDONE exists and has a different 1593 + * layout than the GEN7+ version. 1594 + * The GEN2 counterpart of this register is GEN2_INSTDONE. 1595 + */ 1608 1596 #define RING_INSTDONE(base) ((base)+0x6c) 1609 1597 #define RING_INSTPS(base) ((base)+0x70) 1610 1598 #define RING_DMA_FADD(base) ((base)+0x78) ··· 1615 1597 #define RING_INSTPM(base) ((base)+0xc0) 1616 1598 #define RING_MI_MODE(base) ((base)+0x9c) 1617 1599 #define INSTPS 0x02070 /* 965+ only */ 1618 - #define INSTDONE1 0x0207c /* 965+ only */ 1600 + #define GEN4_INSTDONE1 0x0207c /* 965+ only, aka INSTDONE_2 on SNB */ 1619 1601 #define ACTHD_I965 0x02074 1620 1602 #define HWS_PGA 0x02080 1621 1603 #define HWS_ADDRESS_MASK 0xfffff000 ··· 1624 1606 #define PWRCTX_EN (1<<0) 1625 1607 #define IPEIR 0x02088 1626 1608 #define IPEHR 0x0208c 1627 - #define INSTDONE 0x02090 1609 + #define GEN2_INSTDONE 0x02090 1628 1610 #define NOPID 0x02094 1629 1611 #define HWSTAM 0x02098 1630 1612 #define DMA_FADD_I8XX 0x020d0 ··· 1894 1876 #define CHV_FGT_EU_DIS_SS1_R1_MASK (0xf << CHV_FGT_EU_DIS_SS1_R1_SHIFT) 1895 1877 1896 1878 #define GEN8_FUSE2 0x9120 1879 + #define GEN8_F2_SS_DIS_SHIFT 21 1880 + #define GEN8_F2_SS_DIS_MASK (0x7 << GEN8_F2_SS_DIS_SHIFT) 1897 1881 #define GEN8_F2_S_ENA_SHIFT 25 1898 1882 #define GEN8_F2_S_ENA_MASK (0x7 << GEN8_F2_S_ENA_SHIFT) 1899 1883 1900 1884 #define GEN9_F2_SS_DIS_SHIFT 20 1901 1885 #define GEN9_F2_SS_DIS_MASK (0xf << GEN9_F2_SS_DIS_SHIFT) 1886 + 1887 + #define GEN8_EU_DISABLE0 0x9134 1888 + #define GEN8_EU_DIS0_S0_MASK 0xffffff 1889 + #define GEN8_EU_DIS0_S1_SHIFT 24 1890 + #define GEN8_EU_DIS0_S1_MASK (0xff << GEN8_EU_DIS0_S1_SHIFT) 1891 + 1892 + #define GEN8_EU_DISABLE1 0x9138 1893 + #define GEN8_EU_DIS1_S1_MASK 0xffff 1894 + #define GEN8_EU_DIS1_S2_SHIFT 16 1895 + #define GEN8_EU_DIS1_S2_MASK (0xffff << GEN8_EU_DIS1_S2_SHIFT) 1896 + 1897 + #define GEN8_EU_DISABLE2 0x913c 1898 + #define GEN8_EU_DIS2_S2_MASK 0xff 1902 1899 1903 1900 #define GEN9_EU_DISABLE(slice) (0x9134 + (slice)*0x4) 1904 1901 ··· 2508 2475 #define PALETTE_A_OFFSET 0xa000 2509 2476 #define PALETTE_B_OFFSET 0xa800 2510 2477 #define CHV_PALETTE_C_OFFSET 0xc000 2511 - #define PALETTE(pipe) (dev_priv->info.palette_offsets[pipe] + \ 2512 - dev_priv->info.display_mmio_offset) 2478 + #define PALETTE(pipe, i) (dev_priv->info.palette_offsets[pipe] + \ 2479 + dev_priv->info.display_mmio_offset + (i) * 4) 2513 2480 2514 2481 /* MCH MMIO space */ 2515 2482 ··· 2840 2807 2841 2808 #define INTERVAL_1_28_US(us) (((us) * 100) >> 7) 2842 2809 #define INTERVAL_1_33_US(us) (((us) * 3) >> 2) 2810 + #define INTERVAL_0_833_US(us) (((us) * 6) / 5) 2843 2811 #define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \ 2844 - INTERVAL_1_33_US(us) : \ 2812 + (IS_BROXTON(dev_priv) ? \ 2813 + INTERVAL_0_833_US(us) : \ 2814 + INTERVAL_1_33_US(us)) : \ 2845 2815 INTERVAL_1_28_US(us)) 2846 2816 2847 2817 /* ··· 3300 3264 #define GEN3_SDVOC 0x61160 3301 3265 #define GEN4_HDMIB GEN3_SDVOB 3302 3266 #define GEN4_HDMIC GEN3_SDVOC 3303 - #define CHV_HDMID 0x6116C 3267 + #define VLV_HDMIB (VLV_DISPLAY_BASE + GEN4_HDMIB) 3268 + #define VLV_HDMIC (VLV_DISPLAY_BASE + GEN4_HDMIC) 3269 + #define CHV_HDMID (VLV_DISPLAY_BASE + 0x6116C) 3304 3270 #define PCH_SDVOB 0xe1140 3305 3271 #define PCH_HDMIB PCH_SDVOB 3306 3272 #define PCH_HDMIC 0xe1150 ··· 3634 3596 #define UTIL_PIN_CTL 0x48400 3635 3597 #define UTIL_PIN_ENABLE (1 << 31) 3636 3598 3599 + #define UTIL_PIN_PIPE(x) ((x) << 29) 3600 + #define UTIL_PIN_PIPE_MASK (3 << 29) 3601 + #define UTIL_PIN_MODE_PWM (1 << 24) 3602 + #define UTIL_PIN_MODE_MASK (0xf << 24) 3603 + #define UTIL_PIN_POLARITY (1 << 22) 3604 + 3637 3605 /* BXT backlight register definition. */ 3638 - #define BXT_BLC_PWM_CTL1 0xC8250 3606 + #define _BXT_BLC_PWM_CTL1 0xC8250 3639 3607 #define BXT_BLC_PWM_ENABLE (1 << 31) 3640 3608 #define BXT_BLC_PWM_POLARITY (1 << 29) 3641 - #define BXT_BLC_PWM_FREQ1 0xC8254 3642 - #define BXT_BLC_PWM_DUTY1 0xC8258 3609 + #define _BXT_BLC_PWM_FREQ1 0xC8254 3610 + #define _BXT_BLC_PWM_DUTY1 0xC8258 3643 3611 3644 - #define BXT_BLC_PWM_CTL2 0xC8350 3645 - #define BXT_BLC_PWM_FREQ2 0xC8354 3646 - #define BXT_BLC_PWM_DUTY2 0xC8358 3612 + #define _BXT_BLC_PWM_CTL2 0xC8350 3613 + #define _BXT_BLC_PWM_FREQ2 0xC8354 3614 + #define _BXT_BLC_PWM_DUTY2 0xC8358 3647 3615 3616 + #define BXT_BLC_PWM_CTL(controller) _PIPE(controller, \ 3617 + _BXT_BLC_PWM_CTL1, _BXT_BLC_PWM_CTL2) 3618 + #define BXT_BLC_PWM_FREQ(controller) _PIPE(controller, \ 3619 + _BXT_BLC_PWM_FREQ1, _BXT_BLC_PWM_FREQ2) 3620 + #define BXT_BLC_PWM_DUTY(controller) _PIPE(controller, \ 3621 + _BXT_BLC_PWM_DUTY1, _BXT_BLC_PWM_DUTY2) 3648 3622 3649 3623 #define PCH_GTC_CTL 0xe7000 3650 3624 #define PCH_GTC_ENABLE (1 << 31) ··· 4142 4092 #define DP_B 0x64100 4143 4093 #define DP_C 0x64200 4144 4094 #define DP_D 0x64300 4095 + 4096 + #define VLV_DP_B (VLV_DISPLAY_BASE + DP_B) 4097 + #define VLV_DP_C (VLV_DISPLAY_BASE + DP_C) 4098 + #define CHV_DP_D (VLV_DISPLAY_BASE + DP_D) 4145 4099 4146 4100 #define DP_PORT_EN (1 << 31) 4147 4101 #define DP_PIPEB_SELECT (1 << 30) ··· 5685 5631 /* legacy palette */ 5686 5632 #define _LGC_PALETTE_A 0x4a000 5687 5633 #define _LGC_PALETTE_B 0x4a800 5688 - #define LGC_PALETTE(pipe) _PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B) 5634 + #define LGC_PALETTE(pipe, i) (_PIPE(pipe, _LGC_PALETTE_A, _LGC_PALETTE_B) + (i) * 4) 5689 5635 5690 5636 #define _GAMMA_MODE_A 0x4a480 5691 5637 #define _GAMMA_MODE_B 0x4ac80 ··· 6922 6868 #define GEN6_RC6 3 6923 6869 #define GEN6_RC7 4 6924 6870 6871 + #define GEN8_GT_SLICE_INFO 0x138064 6872 + #define GEN8_LSLICESTAT_MASK 0x7 6873 + 6925 6874 #define CHV_POWER_SS0_SIG1 0xa720 6926 6875 #define CHV_POWER_SS1_SIG1 0xa728 6927 6876 #define CHV_SS_PG_ENABLE (1<<1) ··· 7460 7403 #define DPLL_CFGCR2_PDIV_7 (4<<2) 7461 7404 #define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3) 7462 7405 7463 - #define GET_CFG_CR1_REG(id) (DPLL1_CFGCR1 + (id - SKL_DPLL1) * 8) 7464 - #define GET_CFG_CR2_REG(id) (DPLL1_CFGCR2 + (id - SKL_DPLL1) * 8) 7406 + #define DPLL_CFGCR1(id) (DPLL1_CFGCR1 + ((id) - SKL_DPLL1) * 8) 7407 + #define DPLL_CFGCR2(id) (DPLL1_CFGCR2 + ((id) - SKL_DPLL1) * 8) 7465 7408 7466 7409 /* BXT display engine PLL */ 7467 7410 #define BXT_DE_PLL_CTL 0x6d000 ··· 7566 7509 7567 7510 #define _MIPI_PORT(port, a, c) _PORT3(port, a, 0, c) /* ports A and C only */ 7568 7511 7512 + /* BXT MIPI clock controls */ 7513 + #define BXT_MAX_VAR_OUTPUT_KHZ 39500 7514 + 7515 + #define BXT_MIPI_CLOCK_CTL 0x46090 7516 + #define BXT_MIPI1_DIV_SHIFT 26 7517 + #define BXT_MIPI2_DIV_SHIFT 10 7518 + #define BXT_MIPI_DIV_SHIFT(port) \ 7519 + _MIPI_PORT(port, BXT_MIPI1_DIV_SHIFT, \ 7520 + BXT_MIPI2_DIV_SHIFT) 7521 + /* Var clock divider to generate TX source. Result must be < 39.5 M */ 7522 + #define BXT_MIPI1_ESCLK_VAR_DIV_MASK (0x3F << 26) 7523 + #define BXT_MIPI2_ESCLK_VAR_DIV_MASK (0x3F << 10) 7524 + #define BXT_MIPI_ESCLK_VAR_DIV_MASK(port) \ 7525 + _MIPI_PORT(port, BXT_MIPI1_ESCLK_VAR_DIV_MASK, \ 7526 + BXT_MIPI2_ESCLK_VAR_DIV_MASK) 7527 + 7528 + #define BXT_MIPI_ESCLK_VAR_DIV(port, val) \ 7529 + (val << BXT_MIPI_DIV_SHIFT(port)) 7530 + /* TX control divider to select actual TX clock output from (8x/var) */ 7531 + #define BXT_MIPI1_TX_ESCLK_SHIFT 21 7532 + #define BXT_MIPI2_TX_ESCLK_SHIFT 5 7533 + #define BXT_MIPI_TX_ESCLK_SHIFT(port) \ 7534 + _MIPI_PORT(port, BXT_MIPI1_TX_ESCLK_SHIFT, \ 7535 + BXT_MIPI2_TX_ESCLK_SHIFT) 7536 + #define BXT_MIPI1_TX_ESCLK_FIXDIV_MASK (3 << 21) 7537 + #define BXT_MIPI2_TX_ESCLK_FIXDIV_MASK (3 << 5) 7538 + #define BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port) \ 7539 + _MIPI_PORT(port, BXT_MIPI1_TX_ESCLK_FIXDIV_MASK, \ 7540 + BXT_MIPI2_TX_ESCLK_FIXDIV_MASK) 7541 + #define BXT_MIPI_TX_ESCLK_8XDIV_BY2(port) \ 7542 + (0x0 << BXT_MIPI_TX_ESCLK_SHIFT(port)) 7543 + #define BXT_MIPI_TX_ESCLK_8XDIV_BY4(port) \ 7544 + (0x1 << BXT_MIPI_TX_ESCLK_SHIFT(port)) 7545 + #define BXT_MIPI_TX_ESCLK_8XDIV_BY8(port) \ 7546 + (0x2 << BXT_MIPI_TX_ESCLK_SHIFT(port)) 7547 + /* RX control divider to select actual RX clock output from 8x*/ 7548 + #define BXT_MIPI1_RX_ESCLK_SHIFT 19 7549 + #define BXT_MIPI2_RX_ESCLK_SHIFT 3 7550 + #define BXT_MIPI_RX_ESCLK_SHIFT(port) \ 7551 + _MIPI_PORT(port, BXT_MIPI1_RX_ESCLK_SHIFT, \ 7552 + BXT_MIPI2_RX_ESCLK_SHIFT) 7553 + #define BXT_MIPI1_RX_ESCLK_FIXDIV_MASK (3 << 19) 7554 + #define BXT_MIPI2_RX_ESCLK_FIXDIV_MASK (3 << 3) 7555 + #define BXT_MIPI_RX_ESCLK_FIXDIV_MASK(port) \ 7556 + (3 << BXT_MIPI_RX_ESCLK_SHIFT(port)) 7557 + #define BXT_MIPI_RX_ESCLK_8X_BY2(port) \ 7558 + (1 << BXT_MIPI_RX_ESCLK_SHIFT(port)) 7559 + #define BXT_MIPI_RX_ESCLK_8X_BY3(port) \ 7560 + (2 << BXT_MIPI_RX_ESCLK_SHIFT(port)) 7561 + #define BXT_MIPI_RX_ESCLK_8X_BY4(port) \ 7562 + (3 << BXT_MIPI_RX_ESCLK_SHIFT(port)) 7563 + /* BXT-A WA: Always prog DPHY dividers to 00 */ 7564 + #define BXT_MIPI1_DPHY_DIV_SHIFT 16 7565 + #define BXT_MIPI2_DPHY_DIV_SHIFT 0 7566 + #define BXT_MIPI_DPHY_DIV_SHIFT(port) \ 7567 + _MIPI_PORT(port, BXT_MIPI1_DPHY_DIV_SHIFT, \ 7568 + BXT_MIPI2_DPHY_DIV_SHIFT) 7569 + #define BXT_MIPI_1_DPHY_DIVIDER_MASK (3 << 16) 7570 + #define BXT_MIPI_2_DPHY_DIVIDER_MASK (3 << 0) 7571 + #define BXT_MIPI_DPHY_DIVIDER_MASK(port) \ 7572 + (3 << BXT_MIPI_DPHY_DIV_SHIFT(port)) 7573 + 7569 7574 /* BXT MIPI mode configure */ 7570 7575 #define _BXT_MIPIA_TRANS_HACTIVE 0x6B0F8 7571 7576 #define _BXT_MIPIC_TRANS_HACTIVE 0x6B8F8 ··· 7669 7550 #define _MIPIA_PORT_CTRL (VLV_DISPLAY_BASE + 0x61190) 7670 7551 #define _MIPIC_PORT_CTRL (VLV_DISPLAY_BASE + 0x61700) 7671 7552 #define MIPI_PORT_CTRL(port) _MIPI_PORT(port, _MIPIA_PORT_CTRL, _MIPIC_PORT_CTRL) 7553 + 7554 + /* BXT port control */ 7555 + #define _BXT_MIPIA_PORT_CTRL 0x6B0C0 7556 + #define _BXT_MIPIC_PORT_CTRL 0x6B8C0 7557 + #define BXT_MIPI_PORT_CTRL(tc) _MIPI_PORT(tc, _BXT_MIPIA_PORT_CTRL, \ 7558 + _BXT_MIPIC_PORT_CTRL) 7559 + 7672 7560 #define DPI_ENABLE (1 << 31) /* A + C */ 7673 7561 #define MIPIA_MIPI4DPHY_DELAY_COUNT_SHIFT 27 7674 7562 #define MIPIA_MIPI4DPHY_DELAY_COUNT_MASK (0xf << 27)
+6 -28
drivers/gpu/drm/i915/i915_sysfs.c
··· 39 39 { 40 40 struct drm_i915_private *dev_priv = dev->dev_private; 41 41 u64 raw_time; /* 32b value may overflow during fixed point math */ 42 - u64 units = 128ULL, div = 100000ULL, bias = 100ULL; 42 + u64 units = 128ULL, div = 100000ULL; 43 43 u32 ret; 44 44 45 45 if (!intel_enable_rc6(dev)) ··· 49 49 50 50 /* On VLV and CHV, residency time is in CZ units rather than 1.28us */ 51 51 if (IS_VALLEYVIEW(dev)) { 52 - u32 clk_reg, czcount_30ns; 53 - 54 - if (IS_CHERRYVIEW(dev)) 55 - clk_reg = CHV_CLK_CTL1; 56 - else 57 - clk_reg = VLV_CLK_CTL2; 58 - 59 - czcount_30ns = I915_READ(clk_reg) >> CLK_CTL2_CZCOUNT_30NS_SHIFT; 60 - 61 - if (!czcount_30ns) { 62 - WARN(!czcount_30ns, "bogus CZ count value"); 63 - ret = 0; 64 - goto out; 65 - } 66 - 67 - if (IS_CHERRYVIEW(dev) && czcount_30ns == 1) { 68 - /* Special case for 320Mhz */ 69 - div = 10000000ULL; 70 - units = 3125ULL; 71 - } else { 72 - czcount_30ns += 1; 73 - div = 1000000ULL; 74 - units = DIV_ROUND_UP_ULL(30ULL * bias, czcount_30ns); 75 - } 52 + units = 1; 53 + div = dev_priv->czclk_freq; 76 54 77 55 if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH) 78 56 units <<= 8; 79 - 80 - div = div * bias; 57 + } else if (IS_BROXTON(dev)) { 58 + units = 1; 59 + div = 1200; /* 833.33ns */ 81 60 } 82 61 83 62 raw_time = I915_READ(reg) * units; 84 63 ret = DIV_ROUND_UP_ULL(raw_time, div); 85 64 86 - out: 87 65 intel_runtime_pm_put(dev_priv); 88 66 return ret; 89 67 }
+20
drivers/gpu/drm/i915/i915_trace.h
··· 107 107 TP_printk("obj=%p, size=%u", __entry->obj, __entry->size) 108 108 ); 109 109 110 + TRACE_EVENT(i915_gem_shrink, 111 + TP_PROTO(struct drm_i915_private *i915, unsigned long target, unsigned flags), 112 + TP_ARGS(i915, target, flags), 113 + 114 + TP_STRUCT__entry( 115 + __field(int, dev) 116 + __field(unsigned long, target) 117 + __field(unsigned, flags) 118 + ), 119 + 120 + TP_fast_assign( 121 + __entry->dev = i915->dev->primary->index; 122 + __entry->target = target; 123 + __entry->flags = flags; 124 + ), 125 + 126 + TP_printk("dev=%d, target=%lu, flags=%x", 127 + __entry->dev, __entry->target, __entry->flags) 128 + ); 129 + 110 130 TRACE_EVENT(i915_vma_bind, 111 131 TP_PROTO(struct i915_vma *vma, unsigned flags), 112 132 TP_ARGS(vma, flags),
+1
drivers/gpu/drm/i915/intel_atomic.c
··· 94 94 __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base); 95 95 96 96 crtc_state->update_pipe = false; 97 + crtc_state->disable_lp_wm = false; 97 98 98 99 return &crtc_state->base; 99 100 }
+196 -12
drivers/gpu/drm/i915/intel_audio.c
··· 50 50 * co-operation between the graphics and audio drivers is handled via audio 51 51 * related registers. (The notable exception is the power management, not 52 52 * covered here.) 53 + * 54 + * The struct i915_audio_component is used to interact between the graphics 55 + * and audio drivers. The struct i915_audio_component_ops *ops in it is 56 + * defined in graphics driver and called in audio driver. The 57 + * struct i915_audio_component_audio_ops *audio_ops is called from i915 driver. 53 58 */ 54 59 55 60 static const struct { ··· 73 68 { 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 }, 74 69 }; 75 70 71 + /* HDMI N/CTS table */ 72 + #define TMDS_297M 297000 73 + #define TMDS_296M DIV_ROUND_UP(297000 * 1000, 1001) 74 + static const struct { 75 + int sample_rate; 76 + int clock; 77 + int n; 78 + int cts; 79 + } aud_ncts[] = { 80 + { 44100, TMDS_296M, 4459, 234375 }, 81 + { 44100, TMDS_297M, 4704, 247500 }, 82 + { 48000, TMDS_296M, 5824, 281250 }, 83 + { 48000, TMDS_297M, 5120, 247500 }, 84 + { 32000, TMDS_296M, 5824, 421875 }, 85 + { 32000, TMDS_297M, 3072, 222750 }, 86 + { 88200, TMDS_296M, 8918, 234375 }, 87 + { 88200, TMDS_297M, 9408, 247500 }, 88 + { 96000, TMDS_296M, 11648, 281250 }, 89 + { 96000, TMDS_297M, 10240, 247500 }, 90 + { 176400, TMDS_296M, 17836, 234375 }, 91 + { 176400, TMDS_297M, 18816, 247500 }, 92 + { 192000, TMDS_296M, 23296, 281250 }, 93 + { 192000, TMDS_297M, 20480, 247500 }, 94 + }; 95 + 76 96 /* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */ 77 - static u32 audio_config_hdmi_pixel_clock(struct drm_display_mode *mode) 97 + static u32 audio_config_hdmi_pixel_clock(const struct drm_display_mode *adjusted_mode) 78 98 { 79 99 int i; 80 100 81 101 for (i = 0; i < ARRAY_SIZE(hdmi_audio_clock); i++) { 82 - if (mode->clock == hdmi_audio_clock[i].clock) 102 + if (adjusted_mode->crtc_clock == hdmi_audio_clock[i].clock) 83 103 break; 84 104 } 85 105 86 106 if (i == ARRAY_SIZE(hdmi_audio_clock)) { 87 - DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", mode->clock); 107 + DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n", 108 + adjusted_mode->crtc_clock); 88 109 i = 1; 89 110 } 90 111 ··· 119 88 hdmi_audio_clock[i].config); 120 89 121 90 return hdmi_audio_clock[i].config; 91 + } 92 + 93 + static int audio_config_get_n(const struct drm_display_mode *mode, int rate) 94 + { 95 + int i; 96 + 97 + for (i = 0; i < ARRAY_SIZE(aud_ncts); i++) { 98 + if ((rate == aud_ncts[i].sample_rate) && 99 + (mode->clock == aud_ncts[i].clock)) { 100 + return aud_ncts[i].n; 101 + } 102 + } 103 + return 0; 104 + } 105 + 106 + static uint32_t audio_config_setup_n_reg(int n, uint32_t val) 107 + { 108 + int n_low, n_up; 109 + uint32_t tmp = val; 110 + 111 + n_low = n & 0xfff; 112 + n_up = (n >> 12) & 0xff; 113 + tmp &= ~(AUD_CONFIG_UPPER_N_MASK | AUD_CONFIG_LOWER_N_MASK); 114 + tmp |= ((n_up << AUD_CONFIG_UPPER_N_SHIFT) | 115 + (n_low << AUD_CONFIG_LOWER_N_SHIFT) | 116 + AUD_CONFIG_N_PROG_ENABLE); 117 + return tmp; 118 + } 119 + 120 + /* check whether N/CTS/M need be set manually */ 121 + static bool audio_rate_need_prog(struct intel_crtc *crtc, 122 + const struct drm_display_mode *mode) 123 + { 124 + if (((mode->clock == TMDS_297M) || 125 + (mode->clock == TMDS_296M)) && 126 + intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) 127 + return true; 128 + else 129 + return false; 122 130 } 123 131 124 132 static bool intel_eld_uptodate(struct drm_connector *connector, ··· 208 138 209 139 static void g4x_audio_codec_enable(struct drm_connector *connector, 210 140 struct intel_encoder *encoder, 211 - struct drm_display_mode *mode) 141 + const struct drm_display_mode *adjusted_mode) 212 142 { 213 143 struct drm_i915_private *dev_priv = connector->dev->dev_private; 214 144 uint8_t *eld = connector->eld; ··· 254 184 255 185 DRM_DEBUG_KMS("Disable audio codec on pipe %c\n", pipe_name(pipe)); 256 186 187 + mutex_lock(&dev_priv->av_mutex); 188 + 257 189 /* Disable timestamps */ 258 190 tmp = I915_READ(HSW_AUD_CFG(pipe)); 259 191 tmp &= ~AUD_CONFIG_N_VALUE_INDEX; ··· 271 199 tmp &= ~AUDIO_ELD_VALID(pipe); 272 200 tmp &= ~AUDIO_OUTPUT_ENABLE(pipe); 273 201 I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp); 202 + 203 + mutex_unlock(&dev_priv->av_mutex); 274 204 } 275 205 276 206 static void hsw_audio_codec_enable(struct drm_connector *connector, 277 207 struct intel_encoder *encoder, 278 - struct drm_display_mode *mode) 208 + const struct drm_display_mode *adjusted_mode) 279 209 { 280 210 struct drm_i915_private *dev_priv = connector->dev->dev_private; 281 211 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 282 212 enum pipe pipe = intel_crtc->pipe; 213 + struct i915_audio_component *acomp = dev_priv->audio_component; 283 214 const uint8_t *eld = connector->eld; 215 + struct intel_digital_port *intel_dig_port = 216 + enc_to_dig_port(&encoder->base); 217 + enum port port = intel_dig_port->port; 284 218 uint32_t tmp; 285 219 int len, i; 220 + int n, rate; 286 221 287 222 DRM_DEBUG_KMS("Enable audio codec on pipe %c, %u bytes ELD\n", 288 223 pipe_name(pipe), drm_eld_size(eld)); 224 + 225 + mutex_lock(&dev_priv->av_mutex); 289 226 290 227 /* Enable audio presence detect, invalidate ELD */ 291 228 tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD); ··· 327 246 /* Enable timestamps */ 328 247 tmp = I915_READ(HSW_AUD_CFG(pipe)); 329 248 tmp &= ~AUD_CONFIG_N_VALUE_INDEX; 330 - tmp &= ~AUD_CONFIG_N_PROG_ENABLE; 331 249 tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK; 332 250 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT)) 333 251 tmp |= AUD_CONFIG_N_VALUE_INDEX; 334 252 else 335 - tmp |= audio_config_hdmi_pixel_clock(mode); 253 + tmp |= audio_config_hdmi_pixel_clock(adjusted_mode); 254 + 255 + tmp &= ~AUD_CONFIG_N_PROG_ENABLE; 256 + if (audio_rate_need_prog(intel_crtc, adjusted_mode)) { 257 + if (!acomp) 258 + rate = 0; 259 + else if (port >= PORT_A && port <= PORT_E) 260 + rate = acomp->aud_sample_rate[port]; 261 + else { 262 + DRM_ERROR("invalid port: %d\n", port); 263 + rate = 0; 264 + } 265 + n = audio_config_get_n(adjusted_mode, rate); 266 + if (n != 0) 267 + tmp = audio_config_setup_n_reg(n, tmp); 268 + else 269 + DRM_DEBUG_KMS("no suitable N value is found\n"); 270 + } 271 + 336 272 I915_WRITE(HSW_AUD_CFG(pipe), tmp); 273 + 274 + mutex_unlock(&dev_priv->av_mutex); 337 275 } 338 276 339 277 static void ilk_audio_codec_disable(struct intel_encoder *encoder) ··· 404 304 405 305 static void ilk_audio_codec_enable(struct drm_connector *connector, 406 306 struct intel_encoder *encoder, 407 - struct drm_display_mode *mode) 307 + const struct drm_display_mode *adjusted_mode) 408 308 { 409 309 struct drm_i915_private *dev_priv = connector->dev->dev_private; 410 310 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); ··· 481 381 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DISPLAYPORT)) 482 382 tmp |= AUD_CONFIG_N_VALUE_INDEX; 483 383 else 484 - tmp |= audio_config_hdmi_pixel_clock(mode); 384 + tmp |= audio_config_hdmi_pixel_clock(adjusted_mode); 485 385 I915_WRITE(aud_config, tmp); 486 386 } 487 387 ··· 496 396 { 497 397 struct drm_encoder *encoder = &intel_encoder->base; 498 398 struct intel_crtc *crtc = to_intel_crtc(encoder->crtc); 499 - struct drm_display_mode *mode = &crtc->config->base.adjusted_mode; 399 + const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 500 400 struct drm_connector *connector; 501 401 struct drm_device *dev = encoder->dev; 502 402 struct drm_i915_private *dev_priv = dev->dev_private; ··· 519 419 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) 520 420 connector->eld[5] |= (1 << 2); 521 421 522 - connector->eld[6] = drm_av_sync_delay(connector, mode) / 2; 422 + connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2; 523 423 524 424 if (dev_priv->display.audio_codec_enable) 525 - dev_priv->display.audio_codec_enable(connector, intel_encoder, mode); 425 + dev_priv->display.audio_codec_enable(connector, intel_encoder, 426 + adjusted_mode); 526 427 527 428 if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) 528 429 acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, (int) port); ··· 628 527 return ret; 629 528 } 630 529 530 + static int i915_audio_component_sync_audio_rate(struct device *dev, 531 + int port, int rate) 532 + { 533 + struct drm_i915_private *dev_priv = dev_to_i915(dev); 534 + struct drm_device *drm_dev = dev_priv->dev; 535 + struct intel_encoder *intel_encoder; 536 + struct intel_digital_port *intel_dig_port; 537 + struct intel_crtc *crtc; 538 + struct drm_display_mode *mode; 539 + struct i915_audio_component *acomp = dev_priv->audio_component; 540 + enum pipe pipe = -1; 541 + u32 tmp; 542 + int n; 543 + 544 + /* HSW, BDW SKL need this fix */ 545 + if (!IS_SKYLAKE(dev_priv) && 546 + !IS_BROADWELL(dev_priv) && 547 + !IS_HASWELL(dev_priv)) 548 + return 0; 549 + 550 + mutex_lock(&dev_priv->av_mutex); 551 + /* 1. get the pipe */ 552 + for_each_intel_encoder(drm_dev, intel_encoder) { 553 + if (intel_encoder->type != INTEL_OUTPUT_HDMI) 554 + continue; 555 + intel_dig_port = enc_to_dig_port(&intel_encoder->base); 556 + if (port == intel_dig_port->port) { 557 + crtc = to_intel_crtc(intel_encoder->base.crtc); 558 + if (!crtc) { 559 + DRM_DEBUG_KMS("%s: crtc is NULL\n", __func__); 560 + continue; 561 + } 562 + pipe = crtc->pipe; 563 + break; 564 + } 565 + } 566 + 567 + if (pipe == INVALID_PIPE) { 568 + DRM_DEBUG_KMS("no pipe for the port %c\n", port_name(port)); 569 + mutex_unlock(&dev_priv->av_mutex); 570 + return -ENODEV; 571 + } 572 + DRM_DEBUG_KMS("pipe %c connects port %c\n", 573 + pipe_name(pipe), port_name(port)); 574 + mode = &crtc->config->base.adjusted_mode; 575 + 576 + /* port must be valid now, otherwise the pipe will be invalid */ 577 + acomp->aud_sample_rate[port] = rate; 578 + 579 + /* 2. check whether to set the N/CTS/M manually or not */ 580 + if (!audio_rate_need_prog(crtc, mode)) { 581 + tmp = I915_READ(HSW_AUD_CFG(pipe)); 582 + tmp &= ~AUD_CONFIG_N_PROG_ENABLE; 583 + I915_WRITE(HSW_AUD_CFG(pipe), tmp); 584 + mutex_unlock(&dev_priv->av_mutex); 585 + return 0; 586 + } 587 + 588 + n = audio_config_get_n(mode, rate); 589 + if (n == 0) { 590 + DRM_DEBUG_KMS("Using automatic mode for N value on port %c\n", 591 + port_name(port)); 592 + tmp = I915_READ(HSW_AUD_CFG(pipe)); 593 + tmp &= ~AUD_CONFIG_N_PROG_ENABLE; 594 + I915_WRITE(HSW_AUD_CFG(pipe), tmp); 595 + mutex_unlock(&dev_priv->av_mutex); 596 + return 0; 597 + } 598 + 599 + /* 3. set the N/CTS/M */ 600 + tmp = I915_READ(HSW_AUD_CFG(pipe)); 601 + tmp = audio_config_setup_n_reg(n, tmp); 602 + I915_WRITE(HSW_AUD_CFG(pipe), tmp); 603 + 604 + mutex_unlock(&dev_priv->av_mutex); 605 + return 0; 606 + } 607 + 631 608 static const struct i915_audio_component_ops i915_audio_component_ops = { 632 609 .owner = THIS_MODULE, 633 610 .get_power = i915_audio_component_get_power, 634 611 .put_power = i915_audio_component_put_power, 635 612 .codec_wake_override = i915_audio_component_codec_wake_override, 636 613 .get_cdclk_freq = i915_audio_component_get_cdclk_freq, 614 + .sync_audio_rate = i915_audio_component_sync_audio_rate, 637 615 }; 638 616 639 617 static int i915_audio_component_bind(struct device *i915_dev, ··· 720 540 { 721 541 struct i915_audio_component *acomp = data; 722 542 struct drm_i915_private *dev_priv = dev_to_i915(i915_dev); 543 + int i; 723 544 724 545 if (WARN_ON(acomp->ops || acomp->dev)) 725 546 return -EEXIST; ··· 728 547 drm_modeset_lock_all(dev_priv->dev); 729 548 acomp->ops = &i915_audio_component_ops; 730 549 acomp->dev = i915_dev; 550 + BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS); 551 + for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++) 552 + acomp->aud_sample_rate[i] = 0; 731 553 dev_priv->audio_component = acomp; 732 554 drm_modeset_unlock_all(dev_priv->dev); 733 555
+2 -2
drivers/gpu/drm/i915/intel_crt.c
··· 158 158 struct drm_i915_private *dev_priv = dev->dev_private; 159 159 struct intel_crt *crt = intel_encoder_to_crt(encoder); 160 160 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 161 - struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 161 + const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 162 162 u32 adpa; 163 163 164 164 if (INTEL_INFO(dev)->gen >= 5) ··· 890 890 u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT | 891 891 FDI_RX_LINK_REVERSAL_OVERRIDE; 892 892 893 - dev_priv->fdi_rx_config = I915_READ(_FDI_RXA_CTL) & fdi_config; 893 + dev_priv->fdi_rx_config = I915_READ(FDI_RX_CTL(PIPE_A)) & fdi_config; 894 894 } 895 895 896 896 intel_crt_reset(connector);
+9
drivers/gpu/drm/i915/intel_csr.c
··· 265 265 return; 266 266 } 267 267 268 + /* 269 + * FIXME: Firmware gets lost on S3/S4, but not when entering system 270 + * standby or suspend-to-idle (which is just like forced runtime pm). 271 + * Unfortunately the ACPI subsystem doesn't yet give us a way to 272 + * differentiate this, hence figure it out with this hack. 273 + */ 274 + if (I915_READ(CSR_PROGRAM(0))) 275 + return; 276 + 268 277 mutex_lock(&dev_priv->csr_lock); 269 278 fw_size = dev_priv->csr.dmc_fw_size; 270 279 for (i = 0; i < fw_size; i++)
+74 -48
drivers/gpu/drm/i915/intel_ddi.c
··· 256 256 bool default_index; /* true if the entry represents default value */ 257 257 }; 258 258 259 - /* BSpec does not define separate vswing/pre-emphasis values for eDP. 260 - * Using DP values for eDP as well. 261 - */ 262 259 static const struct bxt_ddi_buf_trans bxt_ddi_translations_dp[] = { 263 260 /* Idx NT mV diff db */ 264 261 { 52, 0x9A, 0, 128, true }, /* 0: 400 0 */ ··· 268 271 { 102, 0x9A, 0, 128, false }, /* 7: 800 0 */ 269 272 { 154, 0x9A, 0, 85, false }, /* 8: 800 3.5 */ 270 273 { 154, 0x9A, 1, 128, false }, /* 9: 1200 0 */ 274 + }; 275 + 276 + static const struct bxt_ddi_buf_trans bxt_ddi_translations_edp[] = { 277 + /* Idx NT mV diff db */ 278 + { 26, 0, 0, 128, false }, /* 0: 200 0 */ 279 + { 38, 0, 0, 112, false }, /* 1: 200 1.5 */ 280 + { 48, 0, 0, 96, false }, /* 2: 200 4 */ 281 + { 54, 0, 0, 69, false }, /* 3: 200 6 */ 282 + { 32, 0, 0, 128, false }, /* 4: 250 0 */ 283 + { 48, 0, 0, 104, false }, /* 5: 250 1.5 */ 284 + { 54, 0, 0, 85, false }, /* 6: 250 4 */ 285 + { 43, 0, 0, 128, false }, /* 7: 300 0 */ 286 + { 54, 0, 0, 101, false }, /* 8: 300 1.5 */ 287 + { 48, 0, 0, 128, false }, /* 9: 300 0 */ 271 288 }; 272 289 273 290 /* BSpec has 2 recommended values - entries 0 and 8. ··· 309 298 enum port *port) 310 299 { 311 300 struct drm_encoder *encoder = &intel_encoder->base; 312 - int type = intel_encoder->type; 313 301 314 - if (type == INTEL_OUTPUT_DP_MST) { 302 + switch (intel_encoder->type) { 303 + case INTEL_OUTPUT_DP_MST: 315 304 *dig_port = enc_to_mst(encoder)->primary; 316 305 *port = (*dig_port)->port; 317 - } else if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP || 318 - type == INTEL_OUTPUT_HDMI || type == INTEL_OUTPUT_UNKNOWN) { 306 + break; 307 + case INTEL_OUTPUT_DISPLAYPORT: 308 + case INTEL_OUTPUT_EDP: 309 + case INTEL_OUTPUT_HDMI: 310 + case INTEL_OUTPUT_UNKNOWN: 319 311 *dig_port = enc_to_dig_port(encoder); 320 312 *port = (*dig_port)->port; 321 - } else if (type == INTEL_OUTPUT_ANALOG) { 313 + break; 314 + case INTEL_OUTPUT_ANALOG: 322 315 *dig_port = NULL; 323 316 *port = PORT_E; 324 - } else { 325 - DRM_ERROR("Invalid DDI encoder type %d\n", type); 326 - BUG(); 317 + break; 318 + default: 319 + WARN(1, "Invalid DDI encoder type %d\n", intel_encoder->type); 320 + break; 327 321 } 328 322 } 329 323 ··· 558 542 enum port port; 559 543 bool supports_hdmi; 560 544 561 - ddi_get_encoder_port(intel_encoder, &intel_dig_port, &port); 545 + if (intel_encoder->type == INTEL_OUTPUT_DSI) 546 + continue; 562 547 548 + ddi_get_encoder_port(intel_encoder, &intel_dig_port, &port); 563 549 if (visited[port]) 564 550 continue; 565 551 ··· 610 592 * 611 593 * WaFDIAutoLinkSetTimingOverrride:hsw 612 594 */ 613 - I915_WRITE(_FDI_RXA_MISC, FDI_RX_PWRDN_LANE1_VAL(2) | 595 + I915_WRITE(FDI_RX_MISC(PIPE_A), FDI_RX_PWRDN_LANE1_VAL(2) | 614 596 FDI_RX_PWRDN_LANE0_VAL(2) | 615 597 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90); 616 598 ··· 618 600 rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE | 619 601 FDI_RX_PLL_ENABLE | 620 602 FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes); 621 - I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); 622 - POSTING_READ(_FDI_RXA_CTL); 603 + I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val); 604 + POSTING_READ(FDI_RX_CTL(PIPE_A)); 623 605 udelay(220); 624 606 625 607 /* Switch from Rawclk to PCDclk */ 626 608 rx_ctl_val |= FDI_PCDCLK; 627 - I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); 609 + I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val); 628 610 629 611 /* Configure Port Clock Select */ 630 612 I915_WRITE(PORT_CLK_SEL(PORT_E), intel_crtc->config->ddi_pll_sel); ··· 653 635 udelay(600); 654 636 655 637 /* Program PCH FDI Receiver TU */ 656 - I915_WRITE(_FDI_RXA_TUSIZE1, TU_SIZE(64)); 638 + I915_WRITE(FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64)); 657 639 658 640 /* Enable PCH FDI Receiver with auto-training */ 659 641 rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO; 660 - I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); 661 - POSTING_READ(_FDI_RXA_CTL); 642 + I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val); 643 + POSTING_READ(FDI_RX_CTL(PIPE_A)); 662 644 663 645 /* Wait for FDI receiver lane calibration */ 664 646 udelay(30); 665 647 666 648 /* Unset FDI_RX_MISC pwrdn lanes */ 667 - temp = I915_READ(_FDI_RXA_MISC); 649 + temp = I915_READ(FDI_RX_MISC(PIPE_A)); 668 650 temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); 669 - I915_WRITE(_FDI_RXA_MISC, temp); 670 - POSTING_READ(_FDI_RXA_MISC); 651 + I915_WRITE(FDI_RX_MISC(PIPE_A), temp); 652 + POSTING_READ(FDI_RX_MISC(PIPE_A)); 671 653 672 654 /* Wait for FDI auto training time */ 673 655 udelay(5); ··· 701 683 intel_wait_ddi_buf_idle(dev_priv, PORT_E); 702 684 703 685 rx_ctl_val &= ~FDI_RX_ENABLE; 704 - I915_WRITE(_FDI_RXA_CTL, rx_ctl_val); 705 - POSTING_READ(_FDI_RXA_CTL); 686 + I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val); 687 + POSTING_READ(FDI_RX_CTL(PIPE_A)); 706 688 707 689 /* Reset FDI_RX_MISC pwrdn lanes */ 708 - temp = I915_READ(_FDI_RXA_MISC); 690 + temp = I915_READ(FDI_RX_MISC(PIPE_A)); 709 691 temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); 710 692 temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); 711 - I915_WRITE(_FDI_RXA_MISC, temp); 712 - POSTING_READ(_FDI_RXA_MISC); 693 + I915_WRITE(FDI_RX_MISC(PIPE_A), temp); 694 + POSTING_READ(FDI_RX_MISC(PIPE_A)); 713 695 } 714 696 715 697 DRM_ERROR("FDI link training failed!\n"); ··· 971 953 uint32_t cfgcr1_val, cfgcr2_val; 972 954 uint32_t p0, p1, p2, dco_freq; 973 955 974 - cfgcr1_reg = GET_CFG_CR1_REG(dpll); 975 - cfgcr2_reg = GET_CFG_CR2_REG(dpll); 956 + cfgcr1_reg = DPLL_CFGCR1(dpll); 957 + cfgcr2_reg = DPLL_CFGCR2(dpll); 976 958 977 959 cfgcr1_val = I915_READ(cfgcr1_reg); 978 960 cfgcr2_val = I915_READ(cfgcr2_reg); ··· 2045 2027 void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc) 2046 2028 { 2047 2029 struct drm_crtc *crtc = &intel_crtc->base; 2048 - struct drm_i915_private *dev_priv = crtc->dev->dev_private; 2030 + struct drm_device *dev = crtc->dev; 2031 + struct drm_i915_private *dev_priv = dev->dev_private; 2049 2032 struct intel_encoder *intel_encoder = intel_ddi_get_crtc_encoder(crtc); 2050 2033 enum port port = intel_ddi_get_encoder_port(intel_encoder); 2051 2034 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; ··· 2131 2112 u32 n_entries, i; 2132 2113 uint32_t val; 2133 2114 2134 - if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) { 2115 + if (type == INTEL_OUTPUT_EDP && dev_priv->edp_low_vswing) { 2116 + n_entries = ARRAY_SIZE(bxt_ddi_translations_edp); 2117 + ddi_translations = bxt_ddi_translations_edp; 2118 + } else if (type == INTEL_OUTPUT_DISPLAYPORT 2119 + || type == INTEL_OUTPUT_EDP) { 2135 2120 n_entries = ARRAY_SIZE(bxt_ddi_translations_dp); 2136 2121 ddi_translations = bxt_ddi_translations_dp; 2137 2122 } else if (type == INTEL_OUTPUT_HDMI) { ··· 2173 2150 I915_WRITE(BXT_PORT_TX_DW2_GRP(port), val); 2174 2151 2175 2152 val = I915_READ(BXT_PORT_TX_DW3_LN0(port)); 2176 - val &= ~UNIQE_TRANGE_EN_METHOD; 2153 + val &= ~SCALE_DCOMP_METHOD; 2177 2154 if (ddi_translations[level].enable) 2178 - val |= UNIQE_TRANGE_EN_METHOD; 2155 + val |= SCALE_DCOMP_METHOD; 2156 + 2157 + if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD)) 2158 + DRM_ERROR("Disabled scaling while ouniqetrangenmethod was set"); 2159 + 2179 2160 I915_WRITE(BXT_PORT_TX_DW3_GRP(port), val); 2180 2161 2181 2162 val = I915_READ(BXT_PORT_TX_DW4_LN0(port)); ··· 2320 2293 2321 2294 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 2322 2295 intel_dp_start_link_train(intel_dp); 2323 - intel_dp_complete_link_train(intel_dp); 2324 2296 if (port != PORT_A || INTEL_INFO(dev)->gen >= 9) 2325 2297 intel_dp_stop_link_train(intel_dp); 2326 2298 } else if (type == INTEL_OUTPUT_HDMI) { ··· 2506 2480 { 2507 2481 /* DPLL 1 */ 2508 2482 .ctl = LCPLL2_CTL, 2509 - .cfgcr1 = DPLL1_CFGCR1, 2510 - .cfgcr2 = DPLL1_CFGCR2, 2483 + .cfgcr1 = DPLL_CFGCR1(SKL_DPLL1), 2484 + .cfgcr2 = DPLL_CFGCR2(SKL_DPLL1), 2511 2485 }, 2512 2486 { 2513 2487 /* DPLL 2 */ 2514 2488 .ctl = WRPLL_CTL1, 2515 - .cfgcr1 = DPLL2_CFGCR1, 2516 - .cfgcr2 = DPLL2_CFGCR2, 2489 + .cfgcr1 = DPLL_CFGCR1(SKL_DPLL2), 2490 + .cfgcr2 = DPLL_CFGCR2(SKL_DPLL2), 2517 2491 }, 2518 2492 { 2519 2493 /* DPLL 3 */ 2520 2494 .ctl = WRPLL_CTL2, 2521 - .cfgcr1 = DPLL3_CFGCR1, 2522 - .cfgcr2 = DPLL3_CFGCR2, 2495 + .cfgcr1 = DPLL_CFGCR1(SKL_DPLL3), 2496 + .cfgcr2 = DPLL_CFGCR2(SKL_DPLL3), 2523 2497 }, 2524 2498 }; 2525 2499 ··· 3025 2999 3026 3000 intel_ddi_post_disable(intel_encoder); 3027 3001 3028 - val = I915_READ(_FDI_RXA_CTL); 3002 + val = I915_READ(FDI_RX_CTL(PIPE_A)); 3029 3003 val &= ~FDI_RX_ENABLE; 3030 - I915_WRITE(_FDI_RXA_CTL, val); 3004 + I915_WRITE(FDI_RX_CTL(PIPE_A), val); 3031 3005 3032 - val = I915_READ(_FDI_RXA_MISC); 3006 + val = I915_READ(FDI_RX_MISC(PIPE_A)); 3033 3007 val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK); 3034 3008 val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2); 3035 - I915_WRITE(_FDI_RXA_MISC, val); 3009 + I915_WRITE(FDI_RX_MISC(PIPE_A), val); 3036 3010 3037 - val = I915_READ(_FDI_RXA_CTL); 3011 + val = I915_READ(FDI_RX_CTL(PIPE_A)); 3038 3012 val &= ~FDI_PCDCLK; 3039 - I915_WRITE(_FDI_RXA_CTL, val); 3013 + I915_WRITE(FDI_RX_CTL(PIPE_A), val); 3040 3014 3041 - val = I915_READ(_FDI_RXA_CTL); 3015 + val = I915_READ(FDI_RX_CTL(PIPE_A)); 3042 3016 val &= ~FDI_RX_PLL_ENABLE; 3043 - I915_WRITE(_FDI_RXA_CTL, val); 3017 + I915_WRITE(FDI_RX_CTL(PIPE_A), val); 3044 3018 } 3045 3019 3046 3020 void intel_ddi_get_config(struct intel_encoder *encoder,
+181 -137
drivers/gpu/drm/i915/intel_display.c
··· 132 132 intel_p2_t p2; 133 133 }; 134 134 135 + /* returns HPLL frequency in kHz */ 136 + static int valleyview_get_vco(struct drm_i915_private *dev_priv) 137 + { 138 + int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 139 + 140 + /* Obtain SKU information */ 141 + mutex_lock(&dev_priv->sb_lock); 142 + hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & 143 + CCK_FUSE_HPLL_FREQ_MASK; 144 + mutex_unlock(&dev_priv->sb_lock); 145 + 146 + return vco_freq[hpll_freq] * 1000; 147 + } 148 + 149 + static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, 150 + const char *name, u32 reg) 151 + { 152 + u32 val; 153 + int divider; 154 + 155 + if (dev_priv->hpll_freq == 0) 156 + dev_priv->hpll_freq = valleyview_get_vco(dev_priv); 157 + 158 + mutex_lock(&dev_priv->sb_lock); 159 + val = vlv_cck_read(dev_priv, reg); 160 + mutex_unlock(&dev_priv->sb_lock); 161 + 162 + divider = val & CCK_FREQUENCY_VALUES; 163 + 164 + WARN((val & CCK_FREQUENCY_STATUS) != 165 + (divider << CCK_FREQUENCY_STATUS_SHIFT), 166 + "%s change in progress\n", name); 167 + 168 + return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1); 169 + } 170 + 135 171 int 136 172 intel_pch_rawclk(struct drm_device *dev) 137 173 { ··· 209 173 default: 210 174 return 133; 211 175 } 176 + } 177 + 178 + static void intel_update_czclk(struct drm_i915_private *dev_priv) 179 + { 180 + if (!IS_VALLEYVIEW(dev_priv)) 181 + return; 182 + 183 + dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk", 184 + CCK_CZ_CLOCK_CONTROL); 185 + 186 + DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq); 212 187 } 213 188 214 189 static inline u32 /* units of 100MHz */ ··· 1342 1295 bool cur_state; 1343 1296 1344 1297 if (IS_845G(dev) || IS_I865G(dev)) 1345 - cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE; 1298 + cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE; 1346 1299 else 1347 1300 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; 1348 1301 ··· 2050 2003 assert_fdi_rx_enabled(dev_priv, TRANSCODER_A); 2051 2004 2052 2005 /* Workaround: set timing override bit. */ 2053 - val = I915_READ(_TRANSA_CHICKEN2); 2006 + val = I915_READ(TRANS_CHICKEN2(PIPE_A)); 2054 2007 val |= TRANS_CHICKEN2_TIMING_OVERRIDE; 2055 - I915_WRITE(_TRANSA_CHICKEN2, val); 2008 + I915_WRITE(TRANS_CHICKEN2(PIPE_A), val); 2056 2009 2057 2010 val = TRANS_ENABLE; 2058 2011 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder)); ··· 2110 2063 DRM_ERROR("Failed to disable PCH transcoder\n"); 2111 2064 2112 2065 /* Workaround: clear timing override bit. */ 2113 - val = I915_READ(_TRANSA_CHICKEN2); 2066 + val = I915_READ(TRANS_CHICKEN2(PIPE_A)); 2114 2067 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE; 2115 - I915_WRITE(_TRANSA_CHICKEN2, val); 2068 + I915_WRITE(TRANS_CHICKEN2(PIPE_A), val); 2116 2069 } 2117 2070 2118 2071 /** ··· 2545 2498 struct intel_initial_plane_config *plane_config) 2546 2499 { 2547 2500 struct drm_device *dev = crtc->base.dev; 2501 + struct drm_i915_private *dev_priv = to_i915(dev); 2548 2502 struct drm_i915_gem_object *obj = NULL; 2549 2503 struct drm_mode_fb_cmd2 mode_cmd = { 0 }; 2550 2504 struct drm_framebuffer *fb = &plane_config->fb->base; ··· 2556 2508 size_aligned -= base_aligned; 2557 2509 2558 2510 if (plane_config->size == 0) 2511 + return false; 2512 + 2513 + /* If the FB is too big, just don't use it since fbdev is not very 2514 + * important and we should probably use that space with FBC or other 2515 + * features. */ 2516 + if (size_aligned * 2 > dev_priv->gtt.stolen_usable_size) 2559 2517 return false; 2560 2518 2561 2519 obj = i915_gem_object_create_stolen_for_preallocated(dev, ··· 3131 3077 fb->pixel_format); 3132 3078 surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0); 3133 3079 3134 - /* 3135 - * FIXME: intel_plane_state->src, dst aren't set when transitional 3136 - * update_plane helpers are called from legacy paths. 3137 - * Once full atomic crtc is available, below check can be avoided. 3138 - */ 3139 - if (drm_rect_width(&plane_state->src)) { 3140 - scaler_id = plane_state->scaler_id; 3141 - src_x = plane_state->src.x1 >> 16; 3142 - src_y = plane_state->src.y1 >> 16; 3143 - src_w = drm_rect_width(&plane_state->src) >> 16; 3144 - src_h = drm_rect_height(&plane_state->src) >> 16; 3145 - dst_x = plane_state->dst.x1; 3146 - dst_y = plane_state->dst.y1; 3147 - dst_w = drm_rect_width(&plane_state->dst); 3148 - dst_h = drm_rect_height(&plane_state->dst); 3080 + WARN_ON(drm_rect_width(&plane_state->src) == 0); 3149 3081 3150 - WARN_ON(x != src_x || y != src_y); 3151 - } else { 3152 - src_w = intel_crtc->config->pipe_src_w; 3153 - src_h = intel_crtc->config->pipe_src_h; 3154 - } 3082 + scaler_id = plane_state->scaler_id; 3083 + src_x = plane_state->src.x1 >> 16; 3084 + src_y = plane_state->src.y1 >> 16; 3085 + src_w = drm_rect_width(&plane_state->src) >> 16; 3086 + src_h = drm_rect_height(&plane_state->src) >> 16; 3087 + dst_x = plane_state->dst.x1; 3088 + dst_y = plane_state->dst.y1; 3089 + dst_w = drm_rect_width(&plane_state->dst); 3090 + dst_h = drm_rect_height(&plane_state->dst); 3091 + 3092 + WARN_ON(x != src_x || y != src_y); 3155 3093 3156 3094 if (intel_rotation_90_or_270(rotation)) { 3157 3095 /* stride = Surface height in tiles */ ··· 4438 4392 int skl_update_scaler_crtc(struct intel_crtc_state *state) 4439 4393 { 4440 4394 struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc); 4441 - struct drm_display_mode *adjusted_mode = 4442 - &state->base.adjusted_mode; 4395 + const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; 4443 4396 4444 4397 DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n", 4445 4398 intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX); ··· 4446 4401 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, 4447 4402 &state->scaler_state.scaler_id, DRM_ROTATE_0, 4448 4403 state->pipe_src_w, state->pipe_src_h, 4449 - adjusted_mode->hdisplay, adjusted_mode->vdisplay); 4404 + adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay); 4450 4405 } 4451 4406 4452 4407 /** ··· 4639 4594 struct drm_i915_private *dev_priv = dev->dev_private; 4640 4595 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 4641 4596 enum pipe pipe = intel_crtc->pipe; 4642 - int palreg = PALETTE(pipe); 4643 4597 int i; 4644 4598 bool reenable_ips = false; 4645 4599 ··· 4653 4609 assert_pll_enabled(dev_priv, pipe); 4654 4610 } 4655 4611 4656 - /* use legacy palette for Ironlake */ 4657 - if (!HAS_GMCH_DISPLAY(dev)) 4658 - palreg = LGC_PALETTE(pipe); 4659 - 4660 4612 /* Workaround : Do not read or write the pipe palette/gamma data while 4661 4613 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled. 4662 4614 */ ··· 4664 4624 } 4665 4625 4666 4626 for (i = 0; i < 256; i++) { 4667 - I915_WRITE(palreg + 4 * i, 4627 + u32 palreg; 4628 + 4629 + if (HAS_GMCH_DISPLAY(dev)) 4630 + palreg = PALETTE(pipe, i); 4631 + else 4632 + palreg = LGC_PALETTE(pipe, i); 4633 + 4634 + I915_WRITE(palreg, 4668 4635 (intel_crtc->lut_r[i] << 16) | 4669 4636 (intel_crtc->lut_g[i] << 8) | 4670 4637 intel_crtc->lut_b[i]); ··· 4804 4757 struct intel_crtc_atomic_commit *atomic = &crtc->atomic; 4805 4758 struct drm_device *dev = crtc->base.dev; 4806 4759 struct drm_i915_private *dev_priv = dev->dev_private; 4807 - struct drm_plane *plane; 4808 4760 4809 4761 if (atomic->wait_vblank) 4810 4762 intel_wait_for_vblank(dev, crtc->pipe); ··· 4821 4775 4822 4776 if (atomic->post_enable_primary) 4823 4777 intel_post_enable_primary(&crtc->base); 4824 - 4825 - drm_for_each_plane_mask(plane, dev, atomic->update_sprite_watermarks) 4826 - intel_update_sprite_watermarks(plane, &crtc->base, 4827 - 0, 0, 0, false, false); 4828 4778 4829 4779 memset(atomic, 0, sizeof(*atomic)); 4830 4780 } ··· 4964 4922 int pipe = intel_crtc->pipe, hsw_workaround_pipe; 4965 4923 struct intel_crtc_state *pipe_config = 4966 4924 to_intel_crtc_state(crtc->state); 4925 + bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI); 4967 4926 4968 4927 if (WARN_ON(intel_crtc->active)) 4969 4928 return; ··· 4994 4951 intel_crtc->active = true; 4995 4952 4996 4953 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); 4997 - for_each_encoder_on_crtc(dev, crtc, encoder) 4954 + for_each_encoder_on_crtc(dev, crtc, encoder) { 4955 + if (encoder->pre_pll_enable) 4956 + encoder->pre_pll_enable(encoder); 4998 4957 if (encoder->pre_enable) 4999 4958 encoder->pre_enable(encoder); 4959 + } 5000 4960 5001 4961 if (intel_crtc->config->has_pch_encoder) { 5002 4962 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A, ··· 5007 4961 dev_priv->display.fdi_link_train(crtc); 5008 4962 } 5009 4963 5010 - intel_ddi_enable_pipe_clock(intel_crtc); 4964 + if (!is_dsi) 4965 + intel_ddi_enable_pipe_clock(intel_crtc); 5011 4966 5012 4967 if (INTEL_INFO(dev)->gen >= 9) 5013 4968 skylake_pfit_enable(intel_crtc); ··· 5022 4975 intel_crtc_load_lut(crtc); 5023 4976 5024 4977 intel_ddi_set_pipe_settings(crtc); 5025 - intel_ddi_enable_transcoder_func(crtc); 4978 + if (!is_dsi) 4979 + intel_ddi_enable_transcoder_func(crtc); 5026 4980 5027 4981 intel_update_watermarks(crtc); 5028 4982 intel_enable_pipe(intel_crtc); ··· 5031 4983 if (intel_crtc->config->has_pch_encoder) 5032 4984 lpt_pch_enable(crtc); 5033 4985 5034 - if (intel_crtc->config->dp_encoder_is_mst) 4986 + if (intel_crtc->config->dp_encoder_is_mst && !is_dsi) 5035 4987 intel_ddi_set_vc_payload_alloc(crtc, true); 5036 4988 5037 4989 assert_vblank_disabled(crtc); ··· 5115 5067 5116 5068 ironlake_fdi_pll_disable(intel_crtc); 5117 5069 } 5118 - 5119 - intel_crtc->active = false; 5120 - intel_update_watermarks(crtc); 5121 5070 } 5122 5071 5123 5072 static void haswell_crtc_disable(struct drm_crtc *crtc) ··· 5124 5079 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 5125 5080 struct intel_encoder *encoder; 5126 5081 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 5082 + bool is_dsi = intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_DSI); 5127 5083 5128 5084 for_each_encoder_on_crtc(dev, crtc, encoder) { 5129 5085 intel_opregion_notify_encoder(encoder, false); ··· 5142 5096 if (intel_crtc->config->dp_encoder_is_mst) 5143 5097 intel_ddi_set_vc_payload_alloc(crtc, false); 5144 5098 5145 - intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); 5099 + if (!is_dsi) 5100 + intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder); 5146 5101 5147 5102 if (INTEL_INFO(dev)->gen >= 9) 5148 5103 skylake_scaler_disable(intel_crtc); 5149 5104 else 5150 5105 ironlake_pfit_disable(intel_crtc, false); 5151 5106 5152 - intel_ddi_disable_pipe_clock(intel_crtc); 5107 + if (!is_dsi) 5108 + intel_ddi_disable_pipe_clock(intel_crtc); 5153 5109 5154 5110 if (intel_crtc->config->has_pch_encoder) { 5155 5111 lpt_disable_pch_transcoder(dev_priv); ··· 5161 5113 for_each_encoder_on_crtc(dev, crtc, encoder) 5162 5114 if (encoder->post_disable) 5163 5115 encoder->post_disable(encoder); 5164 - 5165 - intel_crtc->active = false; 5166 - intel_update_watermarks(crtc); 5167 5116 } 5168 5117 5169 5118 static void i9xx_pfit_enable(struct intel_crtc *crtc) ··· 5754 5709 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) 5755 5710 DRM_ERROR("DBuf power disable timeout\n"); 5756 5711 5757 - /* disable DPLL0 */ 5758 - I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE); 5759 - if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1)) 5760 - DRM_ERROR("Couldn't disable DPLL0\n"); 5712 + /* 5713 + * DMC assumes ownership of LCPLL and will get confused if we touch it. 5714 + */ 5715 + if (dev_priv->csr.dmc_payload) { 5716 + /* disable DPLL0 */ 5717 + I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & 5718 + ~LCPLL_PLL_ENABLE); 5719 + if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1)) 5720 + DRM_ERROR("Couldn't disable DPLL0\n"); 5721 + } 5761 5722 5762 5723 intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS); 5763 5724 } ··· 5798 5747 5799 5748 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) 5800 5749 DRM_ERROR("DBuf power enable timeout\n"); 5801 - } 5802 - 5803 - /* returns HPLL frequency in kHz */ 5804 - static int valleyview_get_vco(struct drm_i915_private *dev_priv) 5805 - { 5806 - int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; 5807 - 5808 - /* Obtain SKU information */ 5809 - mutex_lock(&dev_priv->sb_lock); 5810 - hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & 5811 - CCK_FUSE_HPLL_FREQ_MASK; 5812 - mutex_unlock(&dev_priv->sb_lock); 5813 - 5814 - return vco_freq[hpll_freq] * 1000; 5815 5750 } 5816 5751 5817 5752 /* Adjust CDclk dividers to allow high res or save power if possible */ ··· 5837 5800 5838 5801 /* adjust cdclk divider */ 5839 5802 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); 5840 - val &= ~DISPLAY_FREQUENCY_VALUES; 5803 + val &= ~CCK_FREQUENCY_VALUES; 5841 5804 val |= divider; 5842 5805 vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val); 5843 5806 5844 5807 if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) & 5845 - DISPLAY_FREQUENCY_STATUS) == (divider << DISPLAY_FREQUENCY_STATUS_SHIFT), 5808 + CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT), 5846 5809 50)) 5847 5810 DRM_ERROR("timed out waiting for CDclk change\n"); 5848 5811 } ··· 6020 5983 else 6021 5984 default_credits = PFI_CREDIT(8); 6022 5985 6023 - if (DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 1000) >= dev_priv->rps.cz_freq) { 5986 + if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) { 6024 5987 /* CHV suggested value is 31 or 63 */ 6025 5988 if (IS_CHERRYVIEW(dev_priv)) 6026 5989 credits = PFI_CREDIT_63; ··· 6251 6214 6252 6215 if (!IS_GEN2(dev)) 6253 6216 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); 6254 - 6255 - intel_crtc->active = false; 6256 - intel_update_watermarks(crtc); 6257 6217 } 6258 6218 6259 6219 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) ··· 6270 6236 6271 6237 intel_crtc_disable_planes(crtc, crtc->state->plane_mask); 6272 6238 dev_priv->display.crtc_disable(crtc); 6239 + intel_crtc->active = false; 6240 + intel_update_watermarks(crtc); 6273 6241 intel_disable_shared_dpll(intel_crtc); 6274 6242 6275 6243 domains = intel_crtc->enabled_power_domains; ··· 6508 6472 struct intel_crtc_state *pipe_config) 6509 6473 { 6510 6474 struct drm_device *dev = intel_crtc->base.dev; 6511 - struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 6475 + const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 6512 6476 int lane, link_bw, fdi_dotclock, ret; 6513 6477 bool needs_recompute = false; 6514 6478 ··· 6587 6551 { 6588 6552 struct drm_device *dev = crtc->base.dev; 6589 6553 struct drm_i915_private *dev_priv = dev->dev_private; 6590 - struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 6554 + const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 6591 6555 6592 6556 /* FIXME should check pixel clock limits on all platforms */ 6593 6557 if (INTEL_INFO(dev)->gen < 4) { ··· 6624 6588 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw. 6625 6589 */ 6626 6590 if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) && 6627 - adjusted_mode->hsync_start == adjusted_mode->hdisplay) 6591 + adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay) 6628 6592 return -EINVAL; 6629 6593 6630 6594 if (HAS_IPS(dev)) ··· 6751 6715 6752 6716 static int valleyview_get_display_clock_speed(struct drm_device *dev) 6753 6717 { 6754 - struct drm_i915_private *dev_priv = dev->dev_private; 6755 - u32 val; 6756 - int divider; 6757 - 6758 - if (dev_priv->hpll_freq == 0) 6759 - dev_priv->hpll_freq = valleyview_get_vco(dev_priv); 6760 - 6761 - mutex_lock(&dev_priv->sb_lock); 6762 - val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL); 6763 - mutex_unlock(&dev_priv->sb_lock); 6764 - 6765 - divider = val & DISPLAY_FREQUENCY_VALUES; 6766 - 6767 - WARN((val & DISPLAY_FREQUENCY_STATUS) != 6768 - (divider << DISPLAY_FREQUENCY_STATUS_SHIFT), 6769 - "cdclk change in progress\n"); 6770 - 6771 - return DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, divider + 1); 6718 + return vlv_get_cck_clock_hpll(to_i915(dev), "cdclk", 6719 + CCK_DISPLAY_CLOCK_CONTROL); 6772 6720 } 6773 6721 6774 6722 static int ilk_get_display_clock_speed(struct drm_device *dev) ··· 7639 7619 struct drm_i915_private *dev_priv = dev->dev_private; 7640 7620 enum pipe pipe = intel_crtc->pipe; 7641 7621 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder; 7642 - struct drm_display_mode *adjusted_mode = 7643 - &intel_crtc->config->base.adjusted_mode; 7622 + const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; 7644 7623 uint32_t crtc_vtotal, crtc_vblank_end; 7645 7624 int vsyncshift = 0; 7646 7625 ··· 9903 9884 /* On these chipsets we can only modify the base/size/stride 9904 9885 * whilst the cursor is disabled. 9905 9886 */ 9906 - I915_WRITE(_CURACNTR, 0); 9907 - POSTING_READ(_CURACNTR); 9887 + I915_WRITE(CURCNTR(PIPE_A), 0); 9888 + POSTING_READ(CURCNTR(PIPE_A)); 9908 9889 intel_crtc->cursor_cntl = 0; 9909 9890 } 9910 9891 9911 9892 if (intel_crtc->cursor_base != base) { 9912 - I915_WRITE(_CURABASE, base); 9893 + I915_WRITE(CURBASE(PIPE_A), base); 9913 9894 intel_crtc->cursor_base = base; 9914 9895 } 9915 9896 ··· 9919 9900 } 9920 9901 9921 9902 if (intel_crtc->cursor_cntl != cntl) { 9922 - I915_WRITE(_CURACNTR, cntl); 9923 - POSTING_READ(_CURACNTR); 9903 + I915_WRITE(CURCNTR(PIPE_A), cntl); 9904 + POSTING_READ(CURCNTR(PIPE_A)); 9924 9905 intel_crtc->cursor_cntl = cntl; 9925 9906 } 9926 9907 } ··· 11577 11558 static bool intel_wm_need_update(struct drm_plane *plane, 11578 11559 struct drm_plane_state *state) 11579 11560 { 11580 - /* Update watermarks on tiling changes. */ 11561 + struct intel_plane_state *new = to_intel_plane_state(state); 11562 + struct intel_plane_state *cur = to_intel_plane_state(plane->state); 11563 + 11564 + /* Update watermarks on tiling or size changes. */ 11581 11565 if (!plane->state->fb || !state->fb || 11582 11566 plane->state->fb->modifier[0] != state->fb->modifier[0] || 11583 - plane->state->rotation != state->rotation) 11584 - return true; 11585 - 11586 - if (plane->state->crtc_w != state->crtc_w) 11567 + plane->state->rotation != state->rotation || 11568 + drm_rect_width(&new->src) != drm_rect_width(&cur->src) || 11569 + drm_rect_height(&new->src) != drm_rect_height(&cur->src) || 11570 + drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) || 11571 + drm_rect_height(&new->dst) != drm_rect_height(&cur->dst)) 11587 11572 return true; 11588 11573 11589 11574 return false; 11575 + } 11576 + 11577 + static bool needs_scaling(struct intel_plane_state *state) 11578 + { 11579 + int src_w = drm_rect_width(&state->src) >> 16; 11580 + int src_h = drm_rect_height(&state->src) >> 16; 11581 + int dst_w = drm_rect_width(&state->dst); 11582 + int dst_h = drm_rect_height(&state->dst); 11583 + 11584 + return (src_w != dst_w || src_h != dst_h); 11590 11585 } 11591 11586 11592 11587 int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state, ··· 11618 11585 bool mode_changed = needs_modeset(crtc_state); 11619 11586 bool was_crtc_enabled = crtc->state->active; 11620 11587 bool is_crtc_enabled = crtc_state->active; 11621 - 11622 11588 bool turn_off, turn_on, visible, was_visible; 11623 11589 struct drm_framebuffer *fb = plane_state->fb; 11624 11590 ··· 11735 11703 case DRM_PLANE_TYPE_CURSOR: 11736 11704 break; 11737 11705 case DRM_PLANE_TYPE_OVERLAY: 11738 - if (turn_off && !mode_changed) { 11706 + /* 11707 + * WaCxSRDisabledForSpriteScaling:ivb 11708 + * 11709 + * cstate->update_wm was already set above, so this flag will 11710 + * take effect when we commit and program watermarks. 11711 + */ 11712 + if (IS_IVYBRIDGE(dev) && 11713 + needs_scaling(to_intel_plane_state(plane_state)) && 11714 + !needs_scaling(old_plane_state)) { 11715 + to_intel_crtc_state(crtc_state)->disable_lp_wm = true; 11716 + } else if (turn_off && !mode_changed) { 11739 11717 intel_crtc->atomic.wait_vblank = true; 11740 11718 intel_crtc->atomic.update_sprite_watermarks |= 11741 11719 1 << i; 11742 11720 } 11721 + 11722 + break; 11743 11723 } 11744 11724 return 0; 11745 11725 } ··· 12615 12571 } 12616 12572 12617 12573 /* cursor */ 12618 - hw_entry = &hw_ddb.cursor[pipe]; 12619 - sw_entry = &sw_ddb->cursor[pipe]; 12574 + hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR]; 12575 + sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR]; 12620 12576 12621 12577 if (skl_ddb_entry_equal(hw_entry, sw_entry)) 12622 12578 continue; ··· 12859 12815 * one to the value. 12860 12816 */ 12861 12817 if (IS_GEN2(dev)) { 12862 - const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode; 12818 + const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 12863 12819 int vtotal; 12864 12820 12865 - vtotal = mode->crtc_vtotal; 12866 - if (mode->flags & DRM_MODE_FLAG_INTERLACE) 12821 + vtotal = adjusted_mode->crtc_vtotal; 12822 + if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 12867 12823 vtotal /= 2; 12868 12824 12869 12825 crtc->scanline_offset = vtotal - 1; ··· 13366 13322 static void intel_shared_dpll_init(struct drm_device *dev) 13367 13323 { 13368 13324 struct drm_i915_private *dev_priv = dev->dev_private; 13369 - 13370 - intel_update_cdclk(dev); 13371 13325 13372 13326 if (HAS_DDI(dev)) 13373 13327 intel_ddi_pll_init(dev); ··· 14025 13983 * On SKL pre-D0 the strap isn't connected, so we assume 14026 13984 * it's there. 14027 13985 */ 14028 - found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED; 13986 + found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED; 14029 13987 /* WaIgnoreDDIAStrap: skl */ 14030 13988 if (found || IS_SKYLAKE(dev)) 14031 13989 intel_ddi_init(dev, PORT_A); ··· 14086 14044 * eDP ports. Consult the VBT as well as DP_DETECTED to 14087 14045 * detect eDP ports. 14088 14046 */ 14089 - if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED && 14047 + if (I915_READ(VLV_HDMIB) & SDVO_DETECTED && 14090 14048 !intel_dp_is_edp(dev, PORT_B)) 14091 - intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB, 14092 - PORT_B); 14093 - if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED || 14049 + intel_hdmi_init(dev, VLV_HDMIB, PORT_B); 14050 + if (I915_READ(VLV_DP_B) & DP_DETECTED || 14094 14051 intel_dp_is_edp(dev, PORT_B)) 14095 - intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B); 14052 + intel_dp_init(dev, VLV_DP_B, PORT_B); 14096 14053 14097 - if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED && 14054 + if (I915_READ(VLV_HDMIC) & SDVO_DETECTED && 14098 14055 !intel_dp_is_edp(dev, PORT_C)) 14099 - intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC, 14100 - PORT_C); 14101 - if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED || 14056 + intel_hdmi_init(dev, VLV_HDMIC, PORT_C); 14057 + if (I915_READ(VLV_DP_C) & DP_DETECTED || 14102 14058 intel_dp_is_edp(dev, PORT_C)) 14103 - intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C); 14059 + intel_dp_init(dev, VLV_DP_C, PORT_C); 14104 14060 14105 14061 if (IS_CHERRYVIEW(dev)) { 14106 - if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED) 14107 - intel_hdmi_init(dev, VLV_DISPLAY_BASE + CHV_HDMID, 14108 - PORT_D); 14109 14062 /* eDP not supported on port D, so don't check VBT */ 14110 - if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED) 14111 - intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D); 14063 + if (I915_READ(CHV_HDMID) & SDVO_DETECTED) 14064 + intel_hdmi_init(dev, CHV_HDMID, PORT_D); 14065 + if (I915_READ(CHV_DP_D) & DP_DETECTED) 14066 + intel_dp_init(dev, CHV_DP_D, PORT_D); 14112 14067 } 14113 14068 14114 14069 intel_dsi_init(dev); ··· 14596 14557 dev_priv->display.queue_flip = intel_default_queue_flip; 14597 14558 } 14598 14559 14599 - intel_panel_init_backlight_funcs(dev); 14600 - 14601 14560 mutex_init(&dev_priv->pps_mutex); 14602 14561 } 14603 14562 ··· 14872 14835 pipe_name(pipe), sprite_name(pipe, sprite), ret); 14873 14836 } 14874 14837 } 14838 + 14839 + intel_update_czclk(dev_priv); 14840 + intel_update_cdclk(dev); 14875 14841 14876 14842 intel_shared_dpll_init(dev); 14877 14843 ··· 15164 15124 /* FIXME read out full plane state for all planes */ 15165 15125 static void readout_plane_state(struct intel_crtc *crtc) 15166 15126 { 15127 + struct drm_plane *primary = crtc->base.primary; 15167 15128 struct intel_plane_state *plane_state = 15168 - to_intel_plane_state(crtc->base.primary->state); 15129 + to_intel_plane_state(primary->state); 15169 15130 15170 15131 plane_state->visible = 15171 - primary_get_hw_state(to_intel_plane(crtc->base.primary)); 15132 + primary_get_hw_state(to_intel_plane(primary)); 15133 + 15134 + if (plane_state->visible) 15135 + crtc->base.state->plane_mask |= 1 << drm_plane_index(primary); 15172 15136 } 15173 15137 15174 15138 static void intel_modeset_readout_hw_state(struct drm_device *dev)
+29 -25
drivers/gpu/drm/i915/intel_dp.c
··· 1587 1587 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1588 1588 enum port port = dp_to_dig_port(intel_dp)->port; 1589 1589 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 1590 - struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 1590 + const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 1591 1591 1592 1592 intel_dp_set_link_params(intel_dp, crtc->config); 1593 1593 ··· 2604 2604 2605 2605 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 2606 2606 intel_dp_start_link_train(intel_dp); 2607 - intel_dp_complete_link_train(intel_dp); 2608 2607 intel_dp_stop_link_train(intel_dp); 2609 2608 2610 2609 if (crtc->config->has_audio) { ··· 3416 3417 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); 3417 3418 } 3418 3419 3419 - /* LRC Bypass */ 3420 - val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); 3421 - val |= DPIO_LRC_BYPASS; 3422 - vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val); 3423 - 3424 3420 mutex_unlock(&dev_priv->sb_lock); 3425 3421 3426 3422 return 0; ··· 3690 3696 } 3691 3697 3692 3698 /* Enable corresponding port and start training pattern 1 */ 3693 - void 3694 - intel_dp_start_link_train(struct intel_dp *intel_dp) 3699 + static void 3700 + intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp) 3695 3701 { 3696 3702 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base; 3697 3703 struct drm_device *dev = encoder->dev; ··· 3804 3810 intel_dp->DP = DP; 3805 3811 } 3806 3812 3807 - void 3808 - intel_dp_complete_link_train(struct intel_dp *intel_dp) 3813 + static void 3814 + intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp) 3809 3815 { 3810 3816 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); 3811 3817 struct drm_device *dev = dig_port->base.base.dev; ··· 3858 3864 if (!drm_dp_clock_recovery_ok(link_status, 3859 3865 intel_dp->lane_count)) { 3860 3866 intel_dp->train_set_valid = false; 3861 - intel_dp_start_link_train(intel_dp); 3867 + intel_dp_link_training_clock_recovery(intel_dp); 3862 3868 intel_dp_set_link_train(intel_dp, &DP, 3863 3869 training_pattern | 3864 3870 DP_LINK_SCRAMBLING_DISABLE); ··· 3875 3881 /* Try 5 times, then try clock recovery if that fails */ 3876 3882 if (tries > 5) { 3877 3883 intel_dp->train_set_valid = false; 3878 - intel_dp_start_link_train(intel_dp); 3884 + intel_dp_link_training_clock_recovery(intel_dp); 3879 3885 intel_dp_set_link_train(intel_dp, &DP, 3880 3886 training_pattern | 3881 3887 DP_LINK_SCRAMBLING_DISABLE); ··· 3906 3912 { 3907 3913 intel_dp_set_link_train(intel_dp, &intel_dp->DP, 3908 3914 DP_TRAINING_PATTERN_DISABLE); 3915 + } 3916 + 3917 + void 3918 + intel_dp_start_link_train(struct intel_dp *intel_dp) 3919 + { 3920 + intel_dp_link_training_clock_recovery(intel_dp); 3921 + intel_dp_link_training_channel_equalization(intel_dp); 3909 3922 } 3910 3923 3911 3924 static void ··· 4383 4382 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { 4384 4383 DRM_DEBUG_KMS("channel EQ not ok, retraining\n"); 4385 4384 intel_dp_start_link_train(intel_dp); 4386 - intel_dp_complete_link_train(intel_dp); 4387 4385 intel_dp_stop_link_train(intel_dp); 4388 4386 } 4389 4387 ··· 4473 4473 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", 4474 4474 intel_encoder->base.name); 4475 4475 intel_dp_start_link_train(intel_dp); 4476 - intel_dp_complete_link_train(intel_dp); 4477 4476 intel_dp_stop_link_train(intel_dp); 4478 4477 } 4479 4478 } ··· 5999 6000 } 6000 6001 6001 6002 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); 6002 - intel_connector->panel.backlight_power = intel_edp_backlight_power; 6003 + intel_connector->panel.backlight.power = intel_edp_backlight_power; 6003 6004 intel_panel_setup_backlight(connector, pipe); 6004 6005 6005 6006 return true; ··· 6168 6169 return; 6169 6170 6170 6171 intel_connector = intel_connector_alloc(); 6171 - if (!intel_connector) { 6172 - kfree(intel_dig_port); 6173 - return; 6174 - } 6172 + if (!intel_connector) 6173 + goto err_connector_alloc; 6175 6174 6176 6175 intel_encoder = &intel_dig_port->base; 6177 6176 encoder = &intel_encoder->base; ··· 6217 6220 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse; 6218 6221 dev_priv->hotplug.irq_port[port] = intel_dig_port; 6219 6222 6220 - if (!intel_dp_init_connector(intel_dig_port, intel_connector)) { 6221 - drm_encoder_cleanup(encoder); 6222 - kfree(intel_dig_port); 6223 - kfree(intel_connector); 6224 - } 6223 + if (!intel_dp_init_connector(intel_dig_port, intel_connector)) 6224 + goto err_init_connector; 6225 + 6226 + return; 6227 + 6228 + err_init_connector: 6229 + drm_encoder_cleanup(encoder); 6230 + kfree(intel_connector); 6231 + err_connector_alloc: 6232 + kfree(intel_dig_port); 6233 + 6234 + return; 6225 6235 } 6226 6236 6227 6237 void intel_dp_mst_suspend(struct drm_device *dev)
+2 -3
drivers/gpu/drm/i915/intel_dp_mst.c
··· 40 40 struct drm_atomic_state *state; 41 41 int bpp, i; 42 42 int lane_count, slots; 43 - struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 43 + const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 44 44 struct drm_connector *drm_connector; 45 45 struct intel_connector *connector, *found = NULL; 46 46 struct drm_connector_state *connector_state; ··· 78 78 return false; 79 79 } 80 80 81 - mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->clock, bpp); 81 + mst_pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, bpp); 82 82 83 83 pipe_config->pbn = mst_pbn; 84 84 slots = drm_dp_find_vcpi_slots(&intel_dp->mst_mgr, mst_pbn); ··· 188 188 189 189 190 190 intel_dp_start_link_train(intel_dp); 191 - intel_dp_complete_link_train(intel_dp); 192 191 intel_dp_stop_link_train(intel_dp); 193 192 } 194 193
+17 -11
drivers/gpu/drm/i915/intel_drv.h
··· 179 179 bool active_low_pwm; 180 180 181 181 /* PWM chip */ 182 + bool util_pin_active_low; /* bxt+ */ 183 + u8 controller; /* bxt+ only */ 182 184 struct pwm_device *pwm; 183 185 184 186 struct backlight_device *device; 185 - } backlight; 186 187 187 - void (*backlight_power)(struct intel_connector *, bool enable); 188 + /* Connector and platform specific backlight functions */ 189 + int (*setup)(struct intel_connector *connector, enum pipe pipe); 190 + uint32_t (*get)(struct intel_connector *connector); 191 + void (*set)(struct intel_connector *connector, uint32_t level); 192 + void (*disable)(struct intel_connector *connector); 193 + void (*enable)(struct intel_connector *connector); 194 + uint32_t (*hz_to_pwm)(struct intel_connector *connector, 195 + uint32_t hz); 196 + void (*power)(struct intel_connector *, bool enable); 197 + } backlight; 188 198 }; 189 199 190 200 struct intel_connector { ··· 468 458 469 459 /* w/a for waiting 2 vblanks during crtc enable */ 470 460 enum pipe hsw_workaround_pipe; 461 + 462 + /* IVB sprite scaling w/a (WaCxSRDisabledForSpriteScaling:ivb) */ 463 + bool disable_lp_wm; 471 464 }; 472 465 473 466 struct vlv_wm_state { ··· 696 683 const void *frame, ssize_t len); 697 684 void (*set_infoframes)(struct drm_encoder *encoder, 698 685 bool enable, 699 - struct drm_display_mode *adjusted_mode); 686 + const struct drm_display_mode *adjusted_mode); 700 687 bool (*infoframe_enabled)(struct drm_encoder *encoder); 701 688 }; 702 689 ··· 1204 1191 void intel_dp_set_link_params(struct intel_dp *intel_dp, 1205 1192 const struct intel_crtc_state *pipe_config); 1206 1193 void intel_dp_start_link_train(struct intel_dp *intel_dp); 1207 - void intel_dp_complete_link_train(struct intel_dp *intel_dp); 1208 1194 void intel_dp_stop_link_train(struct intel_dp *intel_dp); 1209 1195 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode); 1210 1196 void intel_dp_encoder_destroy(struct drm_encoder *encoder); ··· 1312 1300 int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter); 1313 1301 void intel_attach_force_audio_property(struct drm_connector *connector); 1314 1302 void intel_attach_broadcast_rgb_property(struct drm_connector *connector); 1303 + void intel_attach_aspect_ratio_property(struct drm_connector *connector); 1315 1304 1316 1305 1317 1306 /* intel_overlay.c */ ··· 1345 1332 void intel_panel_enable_backlight(struct intel_connector *connector); 1346 1333 void intel_panel_disable_backlight(struct intel_connector *connector); 1347 1334 void intel_panel_destroy_backlight(struct drm_connector *connector); 1348 - void intel_panel_init_backlight_funcs(struct drm_device *dev); 1349 1335 enum drm_connector_status intel_panel_detect(struct drm_device *dev); 1350 1336 extern struct drm_display_mode *intel_find_panel_downclock( 1351 1337 struct drm_device *dev, ··· 1399 1387 void intel_suspend_hw(struct drm_device *dev); 1400 1388 int ilk_wm_max_level(const struct drm_device *dev); 1401 1389 void intel_update_watermarks(struct drm_crtc *crtc); 1402 - void intel_update_sprite_watermarks(struct drm_plane *plane, 1403 - struct drm_crtc *crtc, 1404 - uint32_t sprite_width, 1405 - uint32_t sprite_height, 1406 - int pixel_size, 1407 - bool enabled, bool scaled); 1408 1390 void intel_init_pm(struct drm_device *dev); 1409 1391 void intel_pm_setup(struct drm_device *dev); 1410 1392 void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
+160 -93
drivers/gpu/drm/i915/intel_dsi.c
··· 282 282 return true; 283 283 } 284 284 285 - static void intel_dsi_port_enable(struct intel_encoder *encoder) 285 + static void bxt_dsi_device_ready(struct intel_encoder *encoder) 286 286 { 287 - struct drm_device *dev = encoder->base.dev; 288 - struct drm_i915_private *dev_priv = dev->dev_private; 289 - struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 287 + struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 290 288 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 291 289 enum port port; 292 - u32 temp; 290 + u32 val; 293 291 294 - if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) { 295 - temp = I915_READ(VLV_CHICKEN_3); 296 - temp &= ~PIXEL_OVERLAP_CNT_MASK | 297 - intel_dsi->pixel_overlap << 298 - PIXEL_OVERLAP_CNT_SHIFT; 299 - I915_WRITE(VLV_CHICKEN_3, temp); 300 - } 292 + DRM_DEBUG_KMS("\n"); 301 293 294 + /* Exit Low power state in 4 steps*/ 302 295 for_each_dsi_port(port, intel_dsi->ports) { 303 - temp = I915_READ(MIPI_PORT_CTRL(port)); 304 - temp &= ~LANE_CONFIGURATION_MASK; 305 - temp &= ~DUAL_LINK_MODE_MASK; 306 296 307 - if (intel_dsi->ports == ((1 << PORT_A) | (1 << PORT_C))) { 308 - temp |= (intel_dsi->dual_link - 1) 309 - << DUAL_LINK_MODE_SHIFT; 310 - temp |= intel_crtc->pipe ? 311 - LANE_CONFIGURATION_DUAL_LINK_B : 312 - LANE_CONFIGURATION_DUAL_LINK_A; 313 - } 314 - /* assert ip_tg_enable signal */ 315 - I915_WRITE(MIPI_PORT_CTRL(port), temp | DPI_ENABLE); 316 - POSTING_READ(MIPI_PORT_CTRL(port)); 297 + /* 1. Enable MIPI PHY transparent latch */ 298 + val = I915_READ(BXT_MIPI_PORT_CTRL(port)); 299 + I915_WRITE(BXT_MIPI_PORT_CTRL(port), val | LP_OUTPUT_HOLD); 300 + usleep_range(2000, 2500); 301 + 302 + /* 2. Enter ULPS */ 303 + val = I915_READ(MIPI_DEVICE_READY(port)); 304 + val &= ~ULPS_STATE_MASK; 305 + val |= (ULPS_STATE_ENTER | DEVICE_READY); 306 + I915_WRITE(MIPI_DEVICE_READY(port), val); 307 + usleep_range(2, 3); 308 + 309 + /* 3. Exit ULPS */ 310 + val = I915_READ(MIPI_DEVICE_READY(port)); 311 + val &= ~ULPS_STATE_MASK; 312 + val |= (ULPS_STATE_EXIT | DEVICE_READY); 313 + I915_WRITE(MIPI_DEVICE_READY(port), val); 314 + usleep_range(1000, 1500); 315 + 316 + /* Clear ULPS and set device ready */ 317 + val = I915_READ(MIPI_DEVICE_READY(port)); 318 + val &= ~ULPS_STATE_MASK; 319 + val |= DEVICE_READY; 320 + I915_WRITE(MIPI_DEVICE_READY(port), val); 317 321 } 318 322 } 319 323 320 - static void intel_dsi_port_disable(struct intel_encoder *encoder) 321 - { 322 - struct drm_device *dev = encoder->base.dev; 323 - struct drm_i915_private *dev_priv = dev->dev_private; 324 - struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 325 - enum port port; 326 - u32 temp; 327 - 328 - for_each_dsi_port(port, intel_dsi->ports) { 329 - /* de-assert ip_tg_enable signal */ 330 - temp = I915_READ(MIPI_PORT_CTRL(port)); 331 - I915_WRITE(MIPI_PORT_CTRL(port), temp & ~DPI_ENABLE); 332 - POSTING_READ(MIPI_PORT_CTRL(port)); 333 - } 334 - } 335 - 336 - static void intel_dsi_device_ready(struct intel_encoder *encoder) 324 + static void vlv_dsi_device_ready(struct intel_encoder *encoder) 337 325 { 338 326 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 339 327 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); ··· 357 369 358 370 I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY); 359 371 usleep_range(2500, 3000); 372 + } 373 + } 374 + 375 + static void intel_dsi_device_ready(struct intel_encoder *encoder) 376 + { 377 + struct drm_device *dev = encoder->base.dev; 378 + 379 + if (IS_VALLEYVIEW(dev)) 380 + vlv_dsi_device_ready(encoder); 381 + else if (IS_BROXTON(dev)) 382 + bxt_dsi_device_ready(encoder); 383 + } 384 + 385 + static void intel_dsi_port_enable(struct intel_encoder *encoder) 386 + { 387 + struct drm_device *dev = encoder->base.dev; 388 + struct drm_i915_private *dev_priv = dev->dev_private; 389 + struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 390 + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 391 + enum port port; 392 + u32 temp; 393 + u32 port_ctrl; 394 + 395 + if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK) { 396 + temp = I915_READ(VLV_CHICKEN_3); 397 + temp &= ~PIXEL_OVERLAP_CNT_MASK | 398 + intel_dsi->pixel_overlap << 399 + PIXEL_OVERLAP_CNT_SHIFT; 400 + I915_WRITE(VLV_CHICKEN_3, temp); 401 + } 402 + 403 + for_each_dsi_port(port, intel_dsi->ports) { 404 + port_ctrl = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) : 405 + MIPI_PORT_CTRL(port); 406 + 407 + temp = I915_READ(port_ctrl); 408 + 409 + temp &= ~LANE_CONFIGURATION_MASK; 410 + temp &= ~DUAL_LINK_MODE_MASK; 411 + 412 + if (intel_dsi->ports == ((1 << PORT_A) | (1 << PORT_C))) { 413 + temp |= (intel_dsi->dual_link - 1) 414 + << DUAL_LINK_MODE_SHIFT; 415 + temp |= intel_crtc->pipe ? 416 + LANE_CONFIGURATION_DUAL_LINK_B : 417 + LANE_CONFIGURATION_DUAL_LINK_A; 418 + } 419 + /* assert ip_tg_enable signal */ 420 + I915_WRITE(port_ctrl, temp | DPI_ENABLE); 421 + POSTING_READ(port_ctrl); 422 + } 423 + } 424 + 425 + static void intel_dsi_port_disable(struct intel_encoder *encoder) 426 + { 427 + struct drm_device *dev = encoder->base.dev; 428 + struct drm_i915_private *dev_priv = dev->dev_private; 429 + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 430 + enum port port; 431 + u32 temp; 432 + u32 port_ctrl; 433 + 434 + for_each_dsi_port(port, intel_dsi->ports) { 435 + /* de-assert ip_tg_enable signal */ 436 + port_ctrl = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) : 437 + MIPI_PORT_CTRL(port); 438 + temp = I915_READ(port_ctrl); 439 + I915_WRITE(port_ctrl, temp & ~DPI_ENABLE); 440 + POSTING_READ(port_ctrl); 360 441 } 361 442 } 362 443 ··· 476 419 477 420 msleep(intel_dsi->panel_on_delay); 478 421 479 - /* Disable DPOunit clock gating, can stall pipe 480 - * and we need DPLL REFA always enabled */ 481 - tmp = I915_READ(DPLL(pipe)); 482 - tmp |= DPLL_REF_CLK_ENABLE_VLV; 483 - I915_WRITE(DPLL(pipe), tmp); 422 + if (IS_VALLEYVIEW(dev)) { 423 + /* 424 + * Disable DPOunit clock gating, can stall pipe 425 + * and we need DPLL REFA always enabled 426 + */ 427 + tmp = I915_READ(DPLL(pipe)); 428 + tmp |= DPLL_REF_CLK_ENABLE_VLV; 429 + I915_WRITE(DPLL(pipe), tmp); 484 430 485 - /* update the hw state for DPLL */ 486 - intel_crtc->config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV | 487 - DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 431 + /* update the hw state for DPLL */ 432 + intel_crtc->config->dpll_hw_state.dpll = 433 + DPLL_INTEGRATED_REF_CLK_VLV | 434 + DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS; 488 435 489 - tmp = I915_READ(DSPCLK_GATE_D); 490 - tmp |= DPOUNIT_CLOCK_GATE_DISABLE; 491 - I915_WRITE(DSPCLK_GATE_D, tmp); 436 + tmp = I915_READ(DSPCLK_GATE_D); 437 + tmp |= DPOUNIT_CLOCK_GATE_DISABLE; 438 + I915_WRITE(DSPCLK_GATE_D, tmp); 439 + } 492 440 493 441 /* put device in ready state */ 494 442 intel_dsi_device_ready(encoder); ··· 557 495 /* Panel commands can be sent when clock is in LP11 */ 558 496 I915_WRITE(MIPI_DEVICE_READY(port), 0x0); 559 497 560 - temp = I915_READ(MIPI_CTRL(port)); 561 - temp &= ~ESCAPE_CLOCK_DIVIDER_MASK; 562 - I915_WRITE(MIPI_CTRL(port), temp | 563 - intel_dsi->escape_clk_div << 564 - ESCAPE_CLOCK_DIVIDER_SHIFT); 565 - 498 + intel_dsi_reset_clocks(encoder, port); 566 499 I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP); 567 500 568 501 temp = I915_READ(MIPI_DSI_FUNC_PRG(port)); ··· 576 519 577 520 static void intel_dsi_clear_device_ready(struct intel_encoder *encoder) 578 521 { 522 + struct drm_device *dev = encoder->base.dev; 579 523 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 580 524 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 581 525 enum port port; 582 526 u32 val; 527 + u32 port_ctrl = 0; 583 528 584 529 DRM_DEBUG_KMS("\n"); 585 530 for_each_dsi_port(port, intel_dsi->ports) { ··· 598 539 ULPS_STATE_ENTER); 599 540 usleep_range(2000, 2500); 600 541 542 + if (IS_BROXTON(dev)) 543 + port_ctrl = BXT_MIPI_PORT_CTRL(port); 544 + else if (IS_VALLEYVIEW(dev)) 545 + /* Common bit for both MIPI Port A & MIPI Port C */ 546 + port_ctrl = MIPI_PORT_CTRL(PORT_A); 547 + 601 548 /* Wait till Clock lanes are in LP-00 state for MIPI Port A 602 549 * only. MIPI Port C has no similar bit for checking 603 550 */ 604 - if (wait_for(((I915_READ(MIPI_PORT_CTRL(PORT_A)) & AFE_LATCHOUT) 605 - == 0x00000), 30)) 551 + if (wait_for(((I915_READ(port_ctrl) & AFE_LATCHOUT) 552 + == 0x00000), 30)) 606 553 DRM_ERROR("DSI LP not going Low\n"); 607 554 608 - /* Disable MIPI PHY transparent latch 609 - * Common bit for both MIPI Port A & MIPI Port C 610 - */ 611 - val = I915_READ(MIPI_PORT_CTRL(PORT_A)); 612 - I915_WRITE(MIPI_PORT_CTRL(PORT_A), val & ~LP_OUTPUT_HOLD); 555 + /* Disable MIPI PHY transparent latch */ 556 + val = I915_READ(port_ctrl); 557 + I915_WRITE(port_ctrl, val & ~LP_OUTPUT_HOLD); 613 558 usleep_range(1000, 1500); 614 559 615 560 I915_WRITE(MIPI_DEVICE_READY(port), 0x00); ··· 656 593 struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 657 594 struct drm_device *dev = encoder->base.dev; 658 595 enum intel_display_power_domain power_domain; 659 - u32 dpi_enabled, func; 596 + u32 dpi_enabled, func, ctrl_reg; 660 597 enum port port; 661 598 662 599 DRM_DEBUG_KMS("\n"); ··· 668 605 /* XXX: this only works for one DSI output */ 669 606 for_each_dsi_port(port, intel_dsi->ports) { 670 607 func = I915_READ(MIPI_DSI_FUNC_PRG(port)); 671 - dpi_enabled = I915_READ(MIPI_PORT_CTRL(port)) & 672 - DPI_ENABLE; 608 + ctrl_reg = IS_BROXTON(dev) ? BXT_MIPI_PORT_CTRL(port) : 609 + MIPI_PORT_CTRL(port); 610 + dpi_enabled = I915_READ(ctrl_reg) & DPI_ENABLE; 673 611 674 612 /* Due to some hardware limitations on BYT, MIPI Port C DPI 675 613 * Enable bit does not get set. To check whether DSI Port C ··· 695 631 static void intel_dsi_get_config(struct intel_encoder *encoder, 696 632 struct intel_crtc_state *pipe_config) 697 633 { 698 - u32 pclk; 634 + u32 pclk = 0; 699 635 DRM_DEBUG_KMS("\n"); 700 636 701 637 /* ··· 704 640 */ 705 641 pipe_config->dpll_hw_state.dpll_md = 0; 706 642 707 - pclk = vlv_get_dsi_pclk(encoder, pipe_config->pipe_bpp); 643 + if (IS_BROXTON(encoder->base.dev)) 644 + pclk = bxt_get_dsi_pclk(encoder, pipe_config->pipe_bpp); 645 + else if (IS_VALLEYVIEW(encoder->base.dev)) 646 + pclk = vlv_get_dsi_pclk(encoder, pipe_config->pipe_bpp); 647 + 708 648 if (!pclk) 709 649 return; 710 650 ··· 766 698 } 767 699 768 700 static void set_dsi_timings(struct drm_encoder *encoder, 769 - const struct drm_display_mode *mode) 701 + const struct drm_display_mode *adjusted_mode) 770 702 { 771 703 struct drm_device *dev = encoder->dev; 772 704 struct drm_i915_private *dev_priv = dev->dev_private; ··· 778 710 779 711 u16 hactive, hfp, hsync, hbp, vfp, vsync, vbp; 780 712 781 - hactive = mode->hdisplay; 782 - hfp = mode->hsync_start - mode->hdisplay; 783 - hsync = mode->hsync_end - mode->hsync_start; 784 - hbp = mode->htotal - mode->hsync_end; 713 + hactive = adjusted_mode->crtc_hdisplay; 714 + hfp = adjusted_mode->crtc_hsync_start - adjusted_mode->crtc_hdisplay; 715 + hsync = adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start; 716 + hbp = adjusted_mode->crtc_htotal - adjusted_mode->crtc_hsync_end; 785 717 786 718 if (intel_dsi->dual_link) { 787 719 hactive /= 2; ··· 792 724 hbp /= 2; 793 725 } 794 726 795 - vfp = mode->vsync_start - mode->vdisplay; 796 - vsync = mode->vsync_end - mode->vsync_start; 797 - vbp = mode->vtotal - mode->vsync_end; 727 + vfp = adjusted_mode->crtc_vsync_start - adjusted_mode->crtc_vdisplay; 728 + vsync = adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start; 729 + vbp = adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vsync_end; 798 730 799 731 /* horizontal values are in terms of high speed byte clock */ 800 732 hactive = txbyteclkhs(hactive, bpp, lane_count, ··· 813 745 * whereas these values should be based on resolution. 814 746 */ 815 747 I915_WRITE(BXT_MIPI_TRANS_HACTIVE(port), 816 - mode->hdisplay); 748 + adjusted_mode->crtc_hdisplay); 817 749 I915_WRITE(BXT_MIPI_TRANS_VACTIVE(port), 818 - mode->vdisplay); 750 + adjusted_mode->crtc_vdisplay); 819 751 I915_WRITE(BXT_MIPI_TRANS_VTOTAL(port), 820 - mode->vtotal); 752 + adjusted_mode->crtc_vtotal); 821 753 } 822 754 823 755 I915_WRITE(MIPI_HACTIVE_AREA_COUNT(port), hactive); ··· 842 774 struct drm_i915_private *dev_priv = dev->dev_private; 843 775 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 844 776 struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); 845 - struct drm_display_mode *adjusted_mode = 846 - &intel_crtc->config->base.adjusted_mode; 777 + const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; 847 778 enum port port; 848 779 unsigned int bpp = intel_crtc->config->pipe_bpp; 849 780 u32 val, tmp; ··· 850 783 851 784 DRM_DEBUG_KMS("pipe %c\n", pipe_name(intel_crtc->pipe)); 852 785 853 - mode_hdisplay = adjusted_mode->hdisplay; 786 + mode_hdisplay = adjusted_mode->crtc_hdisplay; 854 787 855 788 if (intel_dsi->dual_link) { 856 789 mode_hdisplay /= 2; ··· 900 833 I915_WRITE(MIPI_DPHY_PARAM(port), intel_dsi->dphy_reg); 901 834 902 835 I915_WRITE(MIPI_DPI_RESOLUTION(port), 903 - adjusted_mode->vdisplay << VERTICAL_ADDRESS_SHIFT | 836 + adjusted_mode->crtc_vdisplay << VERTICAL_ADDRESS_SHIFT | 904 837 mode_hdisplay << HORIZONTAL_ADDRESS_SHIFT); 905 838 } 906 839 ··· 946 879 if (is_vid_mode(intel_dsi) && 947 880 intel_dsi->video_mode_format == VIDEO_MODE_BURST) { 948 881 I915_WRITE(MIPI_HS_TX_TIMEOUT(port), 949 - txbyteclkhs(adjusted_mode->htotal, bpp, 950 - intel_dsi->lane_count, 951 - intel_dsi->burst_mode_ratio) + 1); 882 + txbyteclkhs(adjusted_mode->crtc_htotal, bpp, 883 + intel_dsi->lane_count, 884 + intel_dsi->burst_mode_ratio) + 1); 952 885 } else { 953 886 I915_WRITE(MIPI_HS_TX_TIMEOUT(port), 954 - txbyteclkhs(adjusted_mode->vtotal * 955 - adjusted_mode->htotal, 956 - bpp, intel_dsi->lane_count, 957 - intel_dsi->burst_mode_ratio) + 1); 887 + txbyteclkhs(adjusted_mode->crtc_vtotal * 888 + adjusted_mode->crtc_htotal, 889 + bpp, intel_dsi->lane_count, 890 + intel_dsi->burst_mode_ratio) + 1); 958 891 } 959 892 I915_WRITE(MIPI_LP_RX_TIMEOUT(port), intel_dsi->lp_rx_timeout); 960 893 I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(port),
+3
drivers/gpu/drm/i915/intel_dsi.h
··· 127 127 extern void intel_enable_dsi_pll(struct intel_encoder *encoder); 128 128 extern void intel_disable_dsi_pll(struct intel_encoder *encoder); 129 129 extern u32 vlv_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp); 130 + extern u32 bxt_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp); 131 + extern void intel_dsi_reset_clocks(struct intel_encoder *encoder, 132 + enum port port); 130 133 131 134 struct drm_panel *vbt_panel_init(struct intel_dsi *intel_dsi, u16 panel_id); 132 135
+116
drivers/gpu/drm/i915/intel_dsi_pll.c
··· 384 384 return pclk; 385 385 } 386 386 387 + u32 bxt_get_dsi_pclk(struct intel_encoder *encoder, int pipe_bpp) 388 + { 389 + u32 pclk; 390 + u32 dsi_clk; 391 + u32 dsi_ratio; 392 + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 393 + struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 394 + 395 + /* Divide by zero */ 396 + if (!pipe_bpp) { 397 + DRM_ERROR("Invalid BPP(0)\n"); 398 + return 0; 399 + } 400 + 401 + dsi_ratio = I915_READ(BXT_DSI_PLL_CTL) & 402 + BXT_DSI_PLL_RATIO_MASK; 403 + 404 + /* Invalid DSI ratio ? */ 405 + if (dsi_ratio < BXT_DSI_PLL_RATIO_MIN || 406 + dsi_ratio > BXT_DSI_PLL_RATIO_MAX) { 407 + DRM_ERROR("Invalid DSI pll ratio(%u) programmed\n", dsi_ratio); 408 + return 0; 409 + } 410 + 411 + dsi_clk = (dsi_ratio * BXT_REF_CLOCK_KHZ) / 2; 412 + 413 + /* pixel_format and pipe_bpp should agree */ 414 + assert_bpp_mismatch(intel_dsi->pixel_format, pipe_bpp); 415 + 416 + pclk = DIV_ROUND_CLOSEST(dsi_clk * intel_dsi->lane_count, pipe_bpp); 417 + 418 + DRM_DEBUG_DRIVER("Calculated pclk=%u\n", pclk); 419 + return pclk; 420 + } 421 + 422 + static void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) 423 + { 424 + u32 temp; 425 + struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 426 + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 427 + 428 + temp = I915_READ(MIPI_CTRL(port)); 429 + temp &= ~ESCAPE_CLOCK_DIVIDER_MASK; 430 + I915_WRITE(MIPI_CTRL(port), temp | 431 + intel_dsi->escape_clk_div << 432 + ESCAPE_CLOCK_DIVIDER_SHIFT); 433 + } 434 + 435 + /* Program BXT Mipi clocks and dividers */ 436 + static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port) 437 + { 438 + u32 tmp; 439 + u32 divider; 440 + u32 dsi_rate; 441 + u32 pll_ratio; 442 + struct drm_i915_private *dev_priv = dev->dev_private; 443 + 444 + /* Clear old configurations */ 445 + tmp = I915_READ(BXT_MIPI_CLOCK_CTL); 446 + tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port)); 447 + tmp &= ~(BXT_MIPI_RX_ESCLK_FIXDIV_MASK(port)); 448 + tmp &= ~(BXT_MIPI_ESCLK_VAR_DIV_MASK(port)); 449 + tmp &= ~(BXT_MIPI_DPHY_DIVIDER_MASK(port)); 450 + 451 + /* Get the current DSI rate(actual) */ 452 + pll_ratio = I915_READ(BXT_DSI_PLL_CTL) & 453 + BXT_DSI_PLL_RATIO_MASK; 454 + dsi_rate = (BXT_REF_CLOCK_KHZ * pll_ratio) / 2; 455 + 456 + /* Max possible output of clock is 39.5 MHz, program value -1 */ 457 + divider = (dsi_rate / BXT_MAX_VAR_OUTPUT_KHZ) - 1; 458 + tmp |= BXT_MIPI_ESCLK_VAR_DIV(port, divider); 459 + 460 + /* 461 + * Tx escape clock must be as close to 20MHz possible, but should 462 + * not exceed it. Hence select divide by 2 463 + */ 464 + tmp |= BXT_MIPI_TX_ESCLK_8XDIV_BY2(port); 465 + 466 + tmp |= BXT_MIPI_RX_ESCLK_8X_BY3(port); 467 + 468 + I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp); 469 + } 470 + 387 471 static bool bxt_configure_dsi_pll(struct intel_encoder *encoder) 388 472 { 389 473 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; ··· 519 435 static void bxt_enable_dsi_pll(struct intel_encoder *encoder) 520 436 { 521 437 struct drm_i915_private *dev_priv = encoder->base.dev->dev_private; 438 + struct intel_dsi *intel_dsi = enc_to_intel_dsi(&encoder->base); 439 + enum port port; 522 440 u32 val; 523 441 524 442 DRM_DEBUG_KMS("\n"); ··· 538 452 DRM_ERROR("Configure DSI PLL failed, abort PLL enable\n"); 539 453 return; 540 454 } 455 + 456 + /* Program TX, RX, Dphy clocks */ 457 + for_each_dsi_port(port, intel_dsi->ports) 458 + bxt_dsi_program_clocks(encoder->base.dev, port); 541 459 542 460 /* Enable DSI PLL */ 543 461 val = I915_READ(BXT_DSI_PLL_ENABLE); ··· 575 485 vlv_disable_dsi_pll(encoder); 576 486 else if (IS_BROXTON(dev)) 577 487 bxt_disable_dsi_pll(encoder); 488 + } 489 + 490 + static void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) 491 + { 492 + u32 tmp; 493 + struct drm_device *dev = encoder->base.dev; 494 + struct drm_i915_private *dev_priv = dev->dev_private; 495 + 496 + /* Clear old configurations */ 497 + tmp = I915_READ(BXT_MIPI_CLOCK_CTL); 498 + tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port)); 499 + tmp &= ~(BXT_MIPI_RX_ESCLK_FIXDIV_MASK(port)); 500 + tmp &= ~(BXT_MIPI_ESCLK_VAR_DIV_MASK(port)); 501 + tmp &= ~(BXT_MIPI_DPHY_DIVIDER_MASK(port)); 502 + I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp); 503 + I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP); 504 + } 505 + 506 + void intel_dsi_reset_clocks(struct intel_encoder *encoder, enum port port) 507 + { 508 + struct drm_device *dev = encoder->base.dev; 509 + 510 + if (IS_BROXTON(dev)) 511 + bxt_dsi_reset_clocks(encoder, port); 512 + else if (IS_VALLEYVIEW(dev)) 513 + vlv_dsi_reset_clocks(encoder, port); 578 514 }
+27 -32
drivers/gpu/drm/i915/intel_dvo.c
··· 97 97 98 98 struct intel_dvo_device dev; 99 99 100 - struct drm_display_mode *panel_fixed_mode; 100 + struct intel_connector *attached_connector; 101 + 101 102 bool panel_wants_dither; 102 103 }; 103 104 ··· 202 201 struct drm_display_mode *mode) 203 202 { 204 203 struct intel_dvo *intel_dvo = intel_attached_dvo(connector); 204 + const struct drm_display_mode *fixed_mode = 205 + to_intel_connector(connector)->panel.fixed_mode; 205 206 int max_dotclk = to_i915(connector->dev)->max_dotclk_freq; 206 207 int target_clock = mode->clock; 207 208 ··· 212 209 213 210 /* XXX: Validate clock range */ 214 211 215 - if (intel_dvo->panel_fixed_mode) { 216 - if (mode->hdisplay > intel_dvo->panel_fixed_mode->hdisplay) 212 + if (fixed_mode) { 213 + if (mode->hdisplay > fixed_mode->hdisplay) 217 214 return MODE_PANEL; 218 - if (mode->vdisplay > intel_dvo->panel_fixed_mode->vdisplay) 215 + if (mode->vdisplay > fixed_mode->vdisplay) 219 216 return MODE_PANEL; 220 217 221 - target_clock = intel_dvo->panel_fixed_mode->clock; 218 + target_clock = fixed_mode->clock; 222 219 } 223 220 224 221 if (target_clock > max_dotclk) ··· 231 228 struct intel_crtc_state *pipe_config) 232 229 { 233 230 struct intel_dvo *intel_dvo = enc_to_dvo(encoder); 231 + const struct drm_display_mode *fixed_mode = 232 + intel_dvo->attached_connector->panel.fixed_mode; 234 233 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 235 234 236 235 /* If we have timings from the BIOS for the panel, put them in ··· 240 235 * with the panel scaling set up to source from the H/VDisplay 241 236 * of the original mode. 242 237 */ 243 - if (intel_dvo->panel_fixed_mode != NULL) { 244 - #define C(x) adjusted_mode->x = intel_dvo->panel_fixed_mode->x 245 - C(hdisplay); 246 - C(hsync_start); 247 - C(hsync_end); 248 - C(htotal); 249 - C(vdisplay); 250 - C(vsync_start); 251 - C(vsync_end); 252 - C(vtotal); 253 - C(clock); 254 - #undef C 255 - 256 - drm_mode_set_crtcinfo(adjusted_mode, 0); 257 - } 238 + if (fixed_mode) 239 + intel_fixed_panel_mode(fixed_mode, adjusted_mode); 258 240 259 241 return true; 260 242 } ··· 251 259 struct drm_device *dev = encoder->base.dev; 252 260 struct drm_i915_private *dev_priv = dev->dev_private; 253 261 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 254 - struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 262 + const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 255 263 struct intel_dvo *intel_dvo = enc_to_dvo(encoder); 256 264 int pipe = crtc->pipe; 257 265 u32 dvo_val; ··· 285 293 dvo_val |= DVO_VSYNC_ACTIVE_HIGH; 286 294 287 295 /*I915_WRITE(DVOB_SRCDIM, 288 - (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | 289 - (adjusted_mode->VDisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/ 296 + (adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | 297 + (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/ 290 298 I915_WRITE(dvo_srcdim_reg, 291 - (adjusted_mode->hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | 292 - (adjusted_mode->vdisplay << DVO_SRCDIM_VERTICAL_SHIFT)); 299 + (adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | 300 + (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT)); 293 301 /*I915_WRITE(DVOB, dvo_val);*/ 294 302 I915_WRITE(dvo_reg, dvo_val); 295 303 } ··· 310 318 311 319 static int intel_dvo_get_modes(struct drm_connector *connector) 312 320 { 313 - struct intel_dvo *intel_dvo = intel_attached_dvo(connector); 314 321 struct drm_i915_private *dev_priv = connector->dev->dev_private; 322 + const struct drm_display_mode *fixed_mode = 323 + to_intel_connector(connector)->panel.fixed_mode; 315 324 316 325 /* We should probably have an i2c driver get_modes function for those 317 326 * devices which will have a fixed set of modes determined by the chip ··· 324 331 if (!list_empty(&connector->probed_modes)) 325 332 return 1; 326 333 327 - if (intel_dvo->panel_fixed_mode != NULL) { 334 + if (fixed_mode) { 328 335 struct drm_display_mode *mode; 329 - mode = drm_mode_duplicate(connector->dev, intel_dvo->panel_fixed_mode); 336 + mode = drm_mode_duplicate(connector->dev, fixed_mode); 330 337 if (mode) { 331 338 drm_mode_probed_add(connector, mode); 332 339 return 1; ··· 339 346 static void intel_dvo_destroy(struct drm_connector *connector) 340 347 { 341 348 drm_connector_cleanup(connector); 349 + intel_panel_fini(&to_intel_connector(connector)->panel); 342 350 kfree(connector); 343 351 } 344 352 ··· 365 371 366 372 if (intel_dvo->dev.dev_ops->destroy) 367 373 intel_dvo->dev.dev_ops->destroy(&intel_dvo->dev); 368 - 369 - kfree(intel_dvo->panel_fixed_mode); 370 374 371 375 intel_encoder_destroy(encoder); 372 376 } ··· 429 437 kfree(intel_dvo); 430 438 return; 431 439 } 440 + 441 + intel_dvo->attached_connector = intel_connector; 432 442 433 443 intel_encoder = &intel_dvo->base; 434 444 drm_encoder_init(dev, &intel_encoder->base, ··· 536 542 * headers, likely), so for now, just get the current 537 543 * mode being output through DVO. 538 544 */ 539 - intel_dvo->panel_fixed_mode = 540 - intel_dvo_get_current_mode(connector); 545 + intel_panel_init(&intel_connector->panel, 546 + intel_dvo_get_current_mode(connector), 547 + NULL); 541 548 intel_dvo->panel_wants_dither = true; 542 549 } 543 550
+91 -24
drivers/gpu/drm/i915/intel_fbc.c
··· 41 41 #include "intel_drv.h" 42 42 #include "i915_drv.h" 43 43 44 + static inline bool fbc_supported(struct drm_i915_private *dev_priv) 45 + { 46 + return dev_priv->fbc.enable_fbc != NULL; 47 + } 48 + 44 49 /* 45 50 * In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the 46 51 * frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's ··· 444 439 */ 445 440 void intel_fbc_disable(struct drm_i915_private *dev_priv) 446 441 { 447 - if (!dev_priv->fbc.enable_fbc) 442 + if (!fbc_supported(dev_priv)) 448 443 return; 449 444 450 445 mutex_lock(&dev_priv->fbc.lock); ··· 462 457 { 463 458 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 464 459 465 - if (!dev_priv->fbc.enable_fbc) 460 + if (!fbc_supported(dev_priv)) 466 461 return; 467 462 468 463 mutex_lock(&dev_priv->fbc.lock); ··· 690 685 691 686 void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv) 692 687 { 693 - if (!dev_priv->fbc.enable_fbc) 688 + if (!fbc_supported(dev_priv)) 694 689 return; 695 690 696 691 mutex_lock(&dev_priv->fbc.lock); ··· 698 693 mutex_unlock(&dev_priv->fbc.lock); 699 694 } 700 695 701 - static int intel_fbc_setup_cfb(struct drm_i915_private *dev_priv, int size, 702 - int fb_cpp) 696 + /* 697 + * For SKL+, the plane source size used by the hardware is based on the value we 698 + * write to the PLANE_SIZE register. For BDW-, the hardware looks at the value 699 + * we wrote to PIPESRC. 700 + */ 701 + static void intel_fbc_get_plane_source_size(struct intel_crtc *crtc, 702 + int *width, int *height) 703 703 { 704 + struct intel_plane_state *plane_state = 705 + to_intel_plane_state(crtc->base.primary->state); 706 + int w, h; 707 + 708 + if (intel_rotation_90_or_270(plane_state->base.rotation)) { 709 + w = drm_rect_height(&plane_state->src) >> 16; 710 + h = drm_rect_width(&plane_state->src) >> 16; 711 + } else { 712 + w = drm_rect_width(&plane_state->src) >> 16; 713 + h = drm_rect_height(&plane_state->src) >> 16; 714 + } 715 + 716 + if (width) 717 + *width = w; 718 + if (height) 719 + *height = h; 720 + } 721 + 722 + static int intel_fbc_calculate_cfb_size(struct intel_crtc *crtc) 723 + { 724 + struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 725 + struct drm_framebuffer *fb = crtc->base.primary->fb; 726 + int lines; 727 + 728 + intel_fbc_get_plane_source_size(crtc, NULL, &lines); 729 + if (INTEL_INFO(dev_priv)->gen >= 7) 730 + lines = min(lines, 2048); 731 + 732 + return lines * fb->pitches[0]; 733 + } 734 + 735 + static int intel_fbc_setup_cfb(struct intel_crtc *crtc) 736 + { 737 + struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 738 + struct drm_framebuffer *fb = crtc->base.primary->fb; 739 + int size, cpp; 740 + 741 + size = intel_fbc_calculate_cfb_size(crtc); 742 + cpp = drm_format_plane_cpp(fb->pixel_format, 0); 743 + 704 744 if (size <= dev_priv->fbc.uncompressed_size) 705 745 return 0; 706 746 707 747 /* Release any current block */ 708 748 __intel_fbc_cleanup_cfb(dev_priv); 709 749 710 - return intel_fbc_alloc_cfb(dev_priv, size, fb_cpp); 750 + return intel_fbc_alloc_cfb(dev_priv, size, cpp); 711 751 } 712 752 713 753 static bool stride_is_valid(struct drm_i915_private *dev_priv, ··· 799 749 } 800 750 } 801 751 752 + /* 753 + * For some reason, the hardware tracking starts looking at whatever we 754 + * programmed as the display plane base address register. It does not look at 755 + * the X and Y offset registers. That's why we look at the crtc->adjusted{x,y} 756 + * variables instead of just looking at the pipe/plane size. 757 + */ 758 + static bool intel_fbc_hw_tracking_covers_screen(struct intel_crtc *crtc) 759 + { 760 + struct drm_i915_private *dev_priv = crtc->base.dev->dev_private; 761 + unsigned int effective_w, effective_h, max_w, max_h; 762 + 763 + if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) { 764 + max_w = 4096; 765 + max_h = 4096; 766 + } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { 767 + max_w = 4096; 768 + max_h = 2048; 769 + } else { 770 + max_w = 2048; 771 + max_h = 1536; 772 + } 773 + 774 + intel_fbc_get_plane_source_size(crtc, &effective_w, &effective_h); 775 + effective_w += crtc->adjusted_x; 776 + effective_h += crtc->adjusted_y; 777 + 778 + return effective_w <= max_w && effective_h <= max_h; 779 + } 780 + 802 781 /** 803 782 * __intel_fbc_update - enable/disable FBC as needed, unlocked 804 783 * @dev_priv: i915 device instance ··· 854 775 struct drm_framebuffer *fb; 855 776 struct drm_i915_gem_object *obj; 856 777 const struct drm_display_mode *adjusted_mode; 857 - unsigned int max_width, max_height; 858 778 859 779 WARN_ON(!mutex_is_locked(&dev_priv->fbc.lock)); 860 780 ··· 902 824 goto out_disable; 903 825 } 904 826 905 - if (INTEL_INFO(dev_priv)->gen >= 8 || IS_HASWELL(dev_priv)) { 906 - max_width = 4096; 907 - max_height = 4096; 908 - } else if (IS_G4X(dev_priv) || INTEL_INFO(dev_priv)->gen >= 5) { 909 - max_width = 4096; 910 - max_height = 2048; 911 - } else { 912 - max_width = 2048; 913 - max_height = 1536; 914 - } 915 - if (intel_crtc->config->pipe_src_w > max_width || 916 - intel_crtc->config->pipe_src_h > max_height) { 827 + if (!intel_fbc_hw_tracking_covers_screen(intel_crtc)) { 917 828 set_no_fbc_reason(dev_priv, FBC_MODE_TOO_LARGE); 918 829 goto out_disable; 919 830 } 831 + 920 832 if ((INTEL_INFO(dev_priv)->gen < 4 || HAS_DDI(dev_priv)) && 921 833 intel_crtc->plane != PLANE_A) { 922 834 set_no_fbc_reason(dev_priv, FBC_BAD_PLANE); ··· 951 883 goto out_disable; 952 884 } 953 885 954 - if (intel_fbc_setup_cfb(dev_priv, obj->base.size, 955 - drm_format_plane_cpp(fb->pixel_format, 0))) { 886 + if (intel_fbc_setup_cfb(intel_crtc)) { 956 887 set_no_fbc_reason(dev_priv, FBC_STOLEN_TOO_SMALL); 957 888 goto out_disable; 958 889 } ··· 1015 948 */ 1016 949 void intel_fbc_update(struct drm_i915_private *dev_priv) 1017 950 { 1018 - if (!dev_priv->fbc.enable_fbc) 951 + if (!fbc_supported(dev_priv)) 1019 952 return; 1020 953 1021 954 mutex_lock(&dev_priv->fbc.lock); ··· 1029 962 { 1030 963 unsigned int fbc_bits; 1031 964 1032 - if (!dev_priv->fbc.enable_fbc) 965 + if (!fbc_supported(dev_priv)) 1033 966 return; 1034 967 1035 968 if (origin == ORIGIN_GTT) ··· 1056 989 void intel_fbc_flush(struct drm_i915_private *dev_priv, 1057 990 unsigned int frontbuffer_bits, enum fb_op_origin origin) 1058 991 { 1059 - if (!dev_priv->fbc.enable_fbc) 992 + if (!fbc_supported(dev_priv)) 1060 993 return; 1061 994 1062 995 if (origin == ORIGIN_GTT)
+8 -2
drivers/gpu/drm/i915/intel_fbdev.c
··· 121 121 container_of(helper, struct intel_fbdev, helper); 122 122 struct drm_framebuffer *fb; 123 123 struct drm_device *dev = helper->dev; 124 + struct drm_i915_private *dev_priv = to_i915(dev); 124 125 struct drm_mode_fb_cmd2 mode_cmd = {}; 125 - struct drm_i915_gem_object *obj; 126 + struct drm_i915_gem_object *obj = NULL; 126 127 int size, ret; 127 128 128 129 /* we don't do packed 24bpp */ ··· 140 139 141 140 size = mode_cmd.pitches[0] * mode_cmd.height; 142 141 size = PAGE_ALIGN(size); 143 - obj = i915_gem_object_create_stolen(dev, size); 142 + 143 + /* If the FB is too big, just don't use it since fbdev is not very 144 + * important and we should probably use that space with FBC or other 145 + * features. */ 146 + if (size * 2 < dev_priv->gtt.stolen_usable_size) 147 + obj = i915_gem_object_create_stolen(dev, size); 144 148 if (obj == NULL) 145 149 obj = i915_gem_alloc_object(dev, size); 146 150 if (!obj) {
+2
drivers/gpu/drm/i915/intel_guc.h
··· 110 110 extern int intel_guc_ucode_load(struct drm_device *dev); 111 111 extern void intel_guc_ucode_fini(struct drm_device *dev); 112 112 extern const char *intel_guc_fw_status_repr(enum intel_guc_fw_status status); 113 + extern int intel_guc_suspend(struct drm_device *dev); 114 + extern int intel_guc_resume(struct drm_device *dev); 113 115 114 116 /* i915_guc_submission.c */ 115 117 int i915_guc_submission_init(struct drm_device *dev);
+11
drivers/gpu/drm/i915/intel_guc_fwif.h
··· 218 218 u64 desc_private; 219 219 } __packed; 220 220 221 + #define GUC_FORCEWAKE_RENDER (1 << 0) 222 + #define GUC_FORCEWAKE_MEDIA (1 << 1) 223 + 224 + #define GUC_POWER_UNSPECIFIED 0 225 + #define GUC_POWER_D0 1 226 + #define GUC_POWER_D1 2 227 + #define GUC_POWER_D2 3 228 + #define GUC_POWER_D3 4 229 + 221 230 /* This Action will be programmed in C180 - SOFT_SCRATCH_O_REG */ 222 231 enum host2guc_action { 223 232 HOST2GUC_ACTION_DEFAULT = 0x0, 224 233 HOST2GUC_ACTION_SAMPLE_FORCEWAKE = 0x6, 225 234 HOST2GUC_ACTION_ALLOCATE_DOORBELL = 0x10, 226 235 HOST2GUC_ACTION_DEALLOCATE_DOORBELL = 0x20, 236 + HOST2GUC_ACTION_ENTER_S_STATE = 0x501, 237 + HOST2GUC_ACTION_EXIT_S_STATE = 0x502, 227 238 HOST2GUC_ACTION_SLPC_REQUEST = 0x3003, 228 239 HOST2GUC_ACTION_LIMIT 229 240 };
+7 -14
drivers/gpu/drm/i915/intel_guc_loader.c
··· 90 90 for_each_ring(ring, dev_priv, i) 91 91 I915_WRITE(RING_MODE_GEN7(ring), irqs); 92 92 93 - /* tell DE to send nothing to GuC */ 94 - I915_WRITE(DE_GUCRMR, ~0); 95 - 96 93 /* route all GT interrupts to the host */ 97 94 I915_WRITE(GUC_BCS_RCS_IER, 0); 98 95 I915_WRITE(GUC_VCS2_VCS1_IER, 0); ··· 106 109 irqs |= _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING); 107 110 for_each_ring(ring, dev_priv, i) 108 111 I915_WRITE(RING_MODE_GEN7(ring), irqs); 109 - 110 - /* tell DE to send (all) flip_done to GuC */ 111 - irqs = DERRMR_PIPEA_PRI_FLIP_DONE | DERRMR_PIPEA_SPR_FLIP_DONE | 112 - DERRMR_PIPEB_PRI_FLIP_DONE | DERRMR_PIPEB_SPR_FLIP_DONE | 113 - DERRMR_PIPEC_PRI_FLIP_DONE | DERRMR_PIPEC_SPR_FLIP_DONE; 114 - /* Unmasked bits will cause GuC response message to be sent */ 115 - I915_WRITE(DE_GUCRMR, ~irqs); 116 112 117 113 /* route USER_INTERRUPT to Host, all others are sent to GuC. */ 118 114 irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT | ··· 199 209 u32 *status) 200 210 { 201 211 u32 val = I915_READ(GUC_STATUS); 212 + u32 uk_val = val & GS_UKERNEL_MASK; 202 213 *status = val; 203 - return ((val & GS_UKERNEL_MASK) == GS_UKERNEL_READY || 204 - (val & GS_UKERNEL_MASK) == GS_UKERNEL_LAPIC_DONE); 214 + return (uk_val == GS_UKERNEL_READY || 215 + ((val & GS_MIA_CORE_STATE) && uk_val == GS_UKERNEL_LAPIC_DONE)); 205 216 } 206 217 207 218 /* ··· 248 257 /* Copy RSA signature from the fw image to HW for verification */ 249 258 sg_pcopy_to_buffer(sg->sgl, sg->nents, rsa, UOS_RSA_SIG_SIZE, offset); 250 259 for (i = 0; i < UOS_RSA_SIG_SIZE / sizeof(u32); i++) 251 - I915_WRITE(UOS_RSA_SCRATCH_0 + i * sizeof(u32), rsa[i]); 260 + I915_WRITE(UOS_RSA_SCRATCH(i), rsa[i]); 252 261 253 262 /* Set the source address for the new blob */ 254 263 offset = i915_gem_obj_ggtt_offset(fw_obj); ··· 383 392 intel_guc_fw_status_repr(guc_fw->guc_fw_load_status)); 384 393 385 394 direct_interrupts_to_host(dev_priv); 386 - i915_guc_submission_disable(dev); 387 395 388 396 if (guc_fw->guc_fw_fetch_status == GUC_FIRMWARE_NONE) 389 397 return 0; ··· 432 442 intel_guc_fw_status_repr(guc_fw->guc_fw_load_status)); 433 443 434 444 if (i915.enable_guc_submission) { 445 + /* The execbuf_client will be recreated. Release it first. */ 446 + i915_guc_submission_disable(dev); 447 + 435 448 err = i915_guc_submission_enable(dev); 436 449 if (err) 437 450 goto fail;
+14 -31
drivers/gpu/drm/i915/intel_hdmi.c
··· 447 447 } 448 448 449 449 static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder, 450 - struct drm_display_mode *adjusted_mode) 450 + const struct drm_display_mode *adjusted_mode) 451 451 { 452 452 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); 453 453 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); 454 454 union hdmi_infoframe frame; 455 455 int ret; 456 - 457 - /* Set user selected PAR to incoming mode's member */ 458 - adjusted_mode->picture_aspect_ratio = intel_hdmi->aspect_ratio; 459 456 460 457 ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi, 461 458 adjusted_mode); ··· 491 494 492 495 static void 493 496 intel_hdmi_set_hdmi_infoframe(struct drm_encoder *encoder, 494 - struct drm_display_mode *adjusted_mode) 497 + const struct drm_display_mode *adjusted_mode) 495 498 { 496 499 union hdmi_infoframe frame; 497 500 int ret; ··· 506 509 507 510 static void g4x_set_infoframes(struct drm_encoder *encoder, 508 511 bool enable, 509 - struct drm_display_mode *adjusted_mode) 512 + const struct drm_display_mode *adjusted_mode) 510 513 { 511 514 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 512 515 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); ··· 658 661 659 662 static void ibx_set_infoframes(struct drm_encoder *encoder, 660 663 bool enable, 661 - struct drm_display_mode *adjusted_mode) 664 + const struct drm_display_mode *adjusted_mode) 662 665 { 663 666 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 664 667 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); ··· 710 713 711 714 static void cpt_set_infoframes(struct drm_encoder *encoder, 712 715 bool enable, 713 - struct drm_display_mode *adjusted_mode) 716 + const struct drm_display_mode *adjusted_mode) 714 717 { 715 718 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 716 719 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); ··· 752 755 753 756 static void vlv_set_infoframes(struct drm_encoder *encoder, 754 757 bool enable, 755 - struct drm_display_mode *adjusted_mode) 758 + const struct drm_display_mode *adjusted_mode) 756 759 { 757 760 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 758 761 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); ··· 804 807 805 808 static void hsw_set_infoframes(struct drm_encoder *encoder, 806 809 bool enable, 807 - struct drm_display_mode *adjusted_mode) 810 + const struct drm_display_mode *adjusted_mode) 808 811 { 809 812 struct drm_i915_private *dev_priv = encoder->dev->dev_private; 810 813 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc); ··· 841 844 struct drm_i915_private *dev_priv = dev->dev_private; 842 845 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 843 846 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 844 - struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 847 + const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 845 848 u32 hdmi_val; 846 849 847 850 hdmi_val = SDVO_ENCODING_HDMI; ··· 1309 1312 return false; 1310 1313 } 1311 1314 1315 + /* Set user selected PAR to incoming mode's member */ 1316 + adjusted_mode->picture_aspect_ratio = intel_hdmi->aspect_ratio; 1317 + 1312 1318 return true; 1313 1319 } 1314 1320 ··· 1537 1537 { 1538 1538 struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base); 1539 1539 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc); 1540 - struct drm_display_mode *adjusted_mode = 1541 - &intel_crtc->config->base.adjusted_mode; 1540 + const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; 1542 1541 1543 1542 intel_hdmi_prepare(encoder); 1544 1543 ··· 1554 1555 struct drm_i915_private *dev_priv = dev->dev_private; 1555 1556 struct intel_crtc *intel_crtc = 1556 1557 to_intel_crtc(encoder->base.crtc); 1557 - struct drm_display_mode *adjusted_mode = 1558 - &intel_crtc->config->base.adjusted_mode; 1558 + const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; 1559 1559 enum dpio_channel port = vlv_dport_to_channel(dport); 1560 1560 int pipe = intel_crtc->pipe; 1561 1561 u32 val; ··· 1820 1822 struct drm_i915_private *dev_priv = dev->dev_private; 1821 1823 struct intel_crtc *intel_crtc = 1822 1824 to_intel_crtc(encoder->base.crtc); 1823 - struct drm_display_mode *adjusted_mode = 1824 - &intel_crtc->config->base.adjusted_mode; 1825 + const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; 1825 1826 enum dpio_channel ch = vlv_dport_to_channel(dport); 1826 1827 int pipe = intel_crtc->pipe; 1827 1828 int data, i, stagger; ··· 1952 1955 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3; 1953 1956 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val); 1954 1957 1955 - /* LRC Bypass */ 1956 - val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30); 1957 - val |= DPIO_LRC_BYPASS; 1958 - vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val); 1959 - 1960 1958 mutex_unlock(&dev_priv->sb_lock); 1961 1959 1962 1960 intel_hdmi->set_infoframes(&encoder->base, ··· 1997 2005 static const struct drm_encoder_funcs intel_hdmi_enc_funcs = { 1998 2006 .destroy = intel_encoder_destroy, 1999 2007 }; 2000 - 2001 - static void 2002 - intel_attach_aspect_ratio_property(struct drm_connector *connector) 2003 - { 2004 - if (!drm_mode_create_aspect_ratio_property(connector->dev)) 2005 - drm_object_attach_property(&connector->base, 2006 - connector->dev->mode_config.aspect_ratio_property, 2007 - DRM_MODE_PICTURE_ASPECT_NONE); 2008 - } 2009 2008 2010 2009 static void 2011 2010 intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
-15
drivers/gpu/drm/i915/intel_lrc.c
··· 904 904 return -EINVAL; 905 905 } 906 906 907 - if (args->num_cliprects != 0) { 908 - DRM_DEBUG("clip rectangles are only valid on pre-gen5\n"); 909 - return -EINVAL; 910 - } else { 911 - if (args->DR4 == 0xffffffff) { 912 - DRM_DEBUG("UXA submitting garbage DR4, fixing up\n"); 913 - args->DR4 = 0; 914 - } 915 - 916 - if (args->DR1 || args->DR4 || args->cliprects_ptr) { 917 - DRM_DEBUG("0 cliprects but dirt in cliprects fields\n"); 918 - return -EINVAL; 919 - } 920 - } 921 - 922 907 if (args->flags & I915_EXEC_GEN7_SOL_RESET) { 923 908 DRM_DEBUG("sol reset is gen7 only\n"); 924 909 return -EINVAL;
+1 -2
drivers/gpu/drm/i915/intel_lvds.c
··· 139 139 struct drm_device *dev = encoder->base.dev; 140 140 struct drm_i915_private *dev_priv = dev->dev_private; 141 141 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc); 142 - const struct drm_display_mode *adjusted_mode = 143 - &crtc->config->base.adjusted_mode; 142 + const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 144 143 int pipe = crtc->pipe; 145 144 u32 temp; 146 145
+9
drivers/gpu/drm/i915/intel_modes.c
··· 126 126 127 127 drm_object_attach_property(&connector->base, prop, 0); 128 128 } 129 + 130 + void 131 + intel_attach_aspect_ratio_property(struct drm_connector *connector) 132 + { 133 + if (!drm_mode_create_aspect_ratio_property(connector->dev)) 134 + drm_object_attach_property(&connector->base, 135 + connector->dev->mode_config.aspect_ratio_property, 136 + DRM_MODE_PICTURE_ASPECT_NONE); 137 + }
+7 -2
drivers/gpu/drm/i915/intel_opregion.c
··· 341 341 if (!HAS_DDI(dev)) 342 342 return 0; 343 343 344 - port = intel_ddi_get_encoder_port(intel_encoder); 345 - if (port == PORT_E) { 344 + if (intel_encoder->type == INTEL_OUTPUT_DSI) 345 + port = 0; 346 + else 347 + port = intel_ddi_get_encoder_port(intel_encoder); 348 + 349 + if (port == PORT_E) { 346 350 port = 0; 347 351 } else { 348 352 parm |= 1 << port; ··· 367 363 type = DISPLAY_TYPE_EXTERNAL_FLAT_PANEL; 368 364 break; 369 365 case INTEL_OUTPUT_EDP: 366 + case INTEL_OUTPUT_DSI: 370 367 type = DISPLAY_TYPE_INTERNAL_FLAT_PANEL; 371 368 break; 372 369 default:
+178 -123
drivers/gpu/drm/i915/intel_panel.c
··· 105 105 struct intel_crtc_state *pipe_config, 106 106 int fitting_mode) 107 107 { 108 - struct drm_display_mode *adjusted_mode; 109 - int x, y, width, height; 110 - 111 - adjusted_mode = &pipe_config->base.adjusted_mode; 112 - 113 - x = y = width = height = 0; 108 + const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 109 + int x = 0, y = 0, width = 0, height = 0; 114 110 115 111 /* Native modes don't need fitting */ 116 - if (adjusted_mode->hdisplay == pipe_config->pipe_src_w && 117 - adjusted_mode->vdisplay == pipe_config->pipe_src_h) 112 + if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w && 113 + adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h) 118 114 goto done; 119 115 120 116 switch (fitting_mode) { 121 117 case DRM_MODE_SCALE_CENTER: 122 118 width = pipe_config->pipe_src_w; 123 119 height = pipe_config->pipe_src_h; 124 - x = (adjusted_mode->hdisplay - width + 1)/2; 125 - y = (adjusted_mode->vdisplay - height + 1)/2; 120 + x = (adjusted_mode->crtc_hdisplay - width + 1)/2; 121 + y = (adjusted_mode->crtc_vdisplay - height + 1)/2; 126 122 break; 127 123 128 124 case DRM_MODE_SCALE_ASPECT: 129 125 /* Scale but preserve the aspect ratio */ 130 126 { 131 - u32 scaled_width = adjusted_mode->hdisplay 127 + u32 scaled_width = adjusted_mode->crtc_hdisplay 132 128 * pipe_config->pipe_src_h; 133 129 u32 scaled_height = pipe_config->pipe_src_w 134 - * adjusted_mode->vdisplay; 130 + * adjusted_mode->crtc_vdisplay; 135 131 if (scaled_width > scaled_height) { /* pillar */ 136 132 width = scaled_height / pipe_config->pipe_src_h; 137 133 if (width & 1) 138 134 width++; 139 - x = (adjusted_mode->hdisplay - width + 1) / 2; 135 + x = (adjusted_mode->crtc_hdisplay - width + 1) / 2; 140 136 y = 0; 141 - height = adjusted_mode->vdisplay; 137 + height = adjusted_mode->crtc_vdisplay; 142 138 } else if (scaled_width < scaled_height) { /* letter */ 143 139 height = scaled_width / pipe_config->pipe_src_w; 144 140 if (height & 1) 145 141 height++; 146 - y = (adjusted_mode->vdisplay - height + 1) / 2; 142 + y = (adjusted_mode->crtc_vdisplay - height + 1) / 2; 147 143 x = 0; 148 - width = adjusted_mode->hdisplay; 144 + width = adjusted_mode->crtc_hdisplay; 149 145 } else { 150 146 x = y = 0; 151 - width = adjusted_mode->hdisplay; 152 - height = adjusted_mode->vdisplay; 147 + width = adjusted_mode->crtc_hdisplay; 148 + height = adjusted_mode->crtc_vdisplay; 153 149 } 154 150 } 155 151 break; 156 152 157 153 case DRM_MODE_SCALE_FULLSCREEN: 158 154 x = y = 0; 159 - width = adjusted_mode->hdisplay; 160 - height = adjusted_mode->vdisplay; 155 + width = adjusted_mode->crtc_hdisplay; 156 + height = adjusted_mode->crtc_vdisplay; 161 157 break; 162 158 163 159 default: ··· 168 172 } 169 173 170 174 static void 171 - centre_horizontally(struct drm_display_mode *mode, 175 + centre_horizontally(struct drm_display_mode *adjusted_mode, 172 176 int width) 173 177 { 174 178 u32 border, sync_pos, blank_width, sync_width; 175 179 176 180 /* keep the hsync and hblank widths constant */ 177 - sync_width = mode->crtc_hsync_end - mode->crtc_hsync_start; 178 - blank_width = mode->crtc_hblank_end - mode->crtc_hblank_start; 181 + sync_width = adjusted_mode->crtc_hsync_end - adjusted_mode->crtc_hsync_start; 182 + blank_width = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start; 179 183 sync_pos = (blank_width - sync_width + 1) / 2; 180 184 181 - border = (mode->hdisplay - width + 1) / 2; 185 + border = (adjusted_mode->crtc_hdisplay - width + 1) / 2; 182 186 border += border & 1; /* make the border even */ 183 187 184 - mode->crtc_hdisplay = width; 185 - mode->crtc_hblank_start = width + border; 186 - mode->crtc_hblank_end = mode->crtc_hblank_start + blank_width; 188 + adjusted_mode->crtc_hdisplay = width; 189 + adjusted_mode->crtc_hblank_start = width + border; 190 + adjusted_mode->crtc_hblank_end = adjusted_mode->crtc_hblank_start + blank_width; 187 191 188 - mode->crtc_hsync_start = mode->crtc_hblank_start + sync_pos; 189 - mode->crtc_hsync_end = mode->crtc_hsync_start + sync_width; 192 + adjusted_mode->crtc_hsync_start = adjusted_mode->crtc_hblank_start + sync_pos; 193 + adjusted_mode->crtc_hsync_end = adjusted_mode->crtc_hsync_start + sync_width; 190 194 } 191 195 192 196 static void 193 - centre_vertically(struct drm_display_mode *mode, 197 + centre_vertically(struct drm_display_mode *adjusted_mode, 194 198 int height) 195 199 { 196 200 u32 border, sync_pos, blank_width, sync_width; 197 201 198 202 /* keep the vsync and vblank widths constant */ 199 - sync_width = mode->crtc_vsync_end - mode->crtc_vsync_start; 200 - blank_width = mode->crtc_vblank_end - mode->crtc_vblank_start; 203 + sync_width = adjusted_mode->crtc_vsync_end - adjusted_mode->crtc_vsync_start; 204 + blank_width = adjusted_mode->crtc_vblank_end - adjusted_mode->crtc_vblank_start; 201 205 sync_pos = (blank_width - sync_width + 1) / 2; 202 206 203 - border = (mode->vdisplay - height + 1) / 2; 207 + border = (adjusted_mode->crtc_vdisplay - height + 1) / 2; 204 208 205 - mode->crtc_vdisplay = height; 206 - mode->crtc_vblank_start = height + border; 207 - mode->crtc_vblank_end = mode->crtc_vblank_start + blank_width; 209 + adjusted_mode->crtc_vdisplay = height; 210 + adjusted_mode->crtc_vblank_start = height + border; 211 + adjusted_mode->crtc_vblank_end = adjusted_mode->crtc_vblank_start + blank_width; 208 212 209 - mode->crtc_vsync_start = mode->crtc_vblank_start + sync_pos; 210 - mode->crtc_vsync_end = mode->crtc_vsync_start + sync_width; 213 + adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vblank_start + sync_pos; 214 + adjusted_mode->crtc_vsync_end = adjusted_mode->crtc_vsync_start + sync_width; 211 215 } 212 216 213 217 static inline u32 panel_fitter_scaling(u32 source, u32 target) ··· 226 230 static void i965_scale_aspect(struct intel_crtc_state *pipe_config, 227 231 u32 *pfit_control) 228 232 { 229 - struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 230 - u32 scaled_width = adjusted_mode->hdisplay * 233 + const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 234 + u32 scaled_width = adjusted_mode->crtc_hdisplay * 231 235 pipe_config->pipe_src_h; 232 236 u32 scaled_height = pipe_config->pipe_src_w * 233 - adjusted_mode->vdisplay; 237 + adjusted_mode->crtc_vdisplay; 234 238 235 239 /* 965+ is easy, it does everything in hw */ 236 240 if (scaled_width > scaled_height) ··· 239 243 else if (scaled_width < scaled_height) 240 244 *pfit_control |= PFIT_ENABLE | 241 245 PFIT_SCALING_LETTER; 242 - else if (adjusted_mode->hdisplay != pipe_config->pipe_src_w) 246 + else if (adjusted_mode->crtc_hdisplay != pipe_config->pipe_src_w) 243 247 *pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO; 244 248 } 245 249 ··· 248 252 u32 *border) 249 253 { 250 254 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 251 - u32 scaled_width = adjusted_mode->hdisplay * 255 + u32 scaled_width = adjusted_mode->crtc_hdisplay * 252 256 pipe_config->pipe_src_h; 253 257 u32 scaled_height = pipe_config->pipe_src_w * 254 - adjusted_mode->vdisplay; 258 + adjusted_mode->crtc_vdisplay; 255 259 u32 bits; 256 260 257 261 /* ··· 265 269 pipe_config->pipe_src_h); 266 270 267 271 *border = LVDS_BORDER_ENABLE; 268 - if (pipe_config->pipe_src_h != adjusted_mode->vdisplay) { 272 + if (pipe_config->pipe_src_h != adjusted_mode->crtc_vdisplay) { 269 273 bits = panel_fitter_scaling(pipe_config->pipe_src_h, 270 - adjusted_mode->vdisplay); 274 + adjusted_mode->crtc_vdisplay); 271 275 272 276 *pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT | 273 277 bits << PFIT_VERT_SCALE_SHIFT); ··· 281 285 pipe_config->pipe_src_w); 282 286 283 287 *border = LVDS_BORDER_ENABLE; 284 - if (pipe_config->pipe_src_w != adjusted_mode->hdisplay) { 288 + if (pipe_config->pipe_src_w != adjusted_mode->crtc_hdisplay) { 285 289 bits = panel_fitter_scaling(pipe_config->pipe_src_w, 286 - adjusted_mode->hdisplay); 290 + adjusted_mode->crtc_hdisplay); 287 291 288 292 *pfit_pgm_ratios |= (bits << PFIT_HORIZ_SCALE_SHIFT | 289 293 bits << PFIT_VERT_SCALE_SHIFT); ··· 306 310 { 307 311 struct drm_device *dev = intel_crtc->base.dev; 308 312 u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0; 309 - struct drm_display_mode *adjusted_mode; 310 - 311 - adjusted_mode = &pipe_config->base.adjusted_mode; 313 + struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 312 314 313 315 /* Native modes don't need fitting */ 314 - if (adjusted_mode->hdisplay == pipe_config->pipe_src_w && 315 - adjusted_mode->vdisplay == pipe_config->pipe_src_h) 316 + if (adjusted_mode->crtc_hdisplay == pipe_config->pipe_src_w && 317 + adjusted_mode->crtc_vdisplay == pipe_config->pipe_src_h) 316 318 goto out; 317 319 318 320 switch (fitting_mode) { ··· 336 342 * Full scaling, even if it changes the aspect ratio. 337 343 * Fortunately this is all done for us in hw. 338 344 */ 339 - if (pipe_config->pipe_src_h != adjusted_mode->vdisplay || 340 - pipe_config->pipe_src_w != adjusted_mode->hdisplay) { 345 + if (pipe_config->pipe_src_h != adjusted_mode->crtc_vdisplay || 346 + pipe_config->pipe_src_w != adjusted_mode->crtc_hdisplay) { 341 347 pfit_control |= PFIT_ENABLE; 342 348 if (INTEL_INFO(dev)->gen >= 4) 343 349 pfit_control |= PFIT_SCALING_AUTO; ··· 536 542 static u32 bxt_get_backlight(struct intel_connector *connector) 537 543 { 538 544 struct drm_device *dev = connector->base.dev; 545 + struct intel_panel *panel = &connector->panel; 539 546 struct drm_i915_private *dev_priv = dev->dev_private; 540 547 541 - return I915_READ(BXT_BLC_PWM_DUTY1); 548 + return I915_READ(BXT_BLC_PWM_DUTY(panel->backlight.controller)); 542 549 } 543 550 544 551 static u32 pwm_get_backlight(struct intel_connector *connector) ··· 561 566 mutex_lock(&dev_priv->backlight_lock); 562 567 563 568 if (panel->backlight.enabled) { 564 - val = dev_priv->display.get_backlight(connector); 569 + val = panel->backlight.get(connector); 565 570 val = intel_panel_compute_brightness(connector, val); 566 571 } 567 572 ··· 635 640 { 636 641 struct drm_device *dev = connector->base.dev; 637 642 struct drm_i915_private *dev_priv = dev->dev_private; 643 + struct intel_panel *panel = &connector->panel; 638 644 639 - I915_WRITE(BXT_BLC_PWM_DUTY1, level); 645 + I915_WRITE(BXT_BLC_PWM_DUTY(panel->backlight.controller), level); 640 646 } 641 647 642 648 static void pwm_set_backlight(struct intel_connector *connector, u32 level) ··· 651 655 static void 652 656 intel_panel_actually_set_backlight(struct intel_connector *connector, u32 level) 653 657 { 654 - struct drm_device *dev = connector->base.dev; 655 - struct drm_i915_private *dev_priv = dev->dev_private; 658 + struct intel_panel *panel = &connector->panel; 656 659 657 660 DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level); 658 661 659 662 level = intel_panel_compute_brightness(connector, level); 660 - dev_priv->display.set_backlight(connector, level); 663 + panel->backlight.set(connector, level); 661 664 } 662 665 663 666 /* set backlight brightness to level in range [0..max], scaling wrt hw min */ ··· 788 793 { 789 794 struct drm_device *dev = connector->base.dev; 790 795 struct drm_i915_private *dev_priv = dev->dev_private; 791 - u32 tmp; 796 + struct intel_panel *panel = &connector->panel; 797 + u32 tmp, val; 792 798 793 799 intel_panel_actually_set_backlight(connector, 0); 794 800 795 - tmp = I915_READ(BXT_BLC_PWM_CTL1); 796 - I915_WRITE(BXT_BLC_PWM_CTL1, tmp & ~BXT_BLC_PWM_ENABLE); 801 + tmp = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller)); 802 + I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller), 803 + tmp & ~BXT_BLC_PWM_ENABLE); 804 + 805 + if (panel->backlight.controller == 1) { 806 + val = I915_READ(UTIL_PIN_CTL); 807 + val &= ~UTIL_PIN_ENABLE; 808 + I915_WRITE(UTIL_PIN_CTL, val); 809 + } 797 810 } 798 811 799 812 static void pwm_disable_backlight(struct intel_connector *connector) ··· 839 836 if (panel->backlight.device) 840 837 panel->backlight.device->props.power = FB_BLANK_POWERDOWN; 841 838 panel->backlight.enabled = false; 842 - dev_priv->display.disable_backlight(connector); 839 + panel->backlight.disable(connector); 843 840 844 841 mutex_unlock(&dev_priv->backlight_lock); 845 842 } ··· 1033 1030 struct drm_device *dev = connector->base.dev; 1034 1031 struct drm_i915_private *dev_priv = dev->dev_private; 1035 1032 struct intel_panel *panel = &connector->panel; 1036 - u32 pwm_ctl; 1033 + enum pipe pipe = intel_get_pipe_from_connector(connector); 1034 + u32 pwm_ctl, val; 1037 1035 1038 - pwm_ctl = I915_READ(BXT_BLC_PWM_CTL1); 1036 + /* To use 2nd set of backlight registers, utility pin has to be 1037 + * enabled with PWM mode. 1038 + * The field should only be changed when the utility pin is disabled 1039 + */ 1040 + if (panel->backlight.controller == 1) { 1041 + val = I915_READ(UTIL_PIN_CTL); 1042 + if (val & UTIL_PIN_ENABLE) { 1043 + DRM_DEBUG_KMS("util pin already enabled\n"); 1044 + val &= ~UTIL_PIN_ENABLE; 1045 + I915_WRITE(UTIL_PIN_CTL, val); 1046 + } 1047 + 1048 + val = 0; 1049 + if (panel->backlight.util_pin_active_low) 1050 + val |= UTIL_PIN_POLARITY; 1051 + I915_WRITE(UTIL_PIN_CTL, val | UTIL_PIN_PIPE(pipe) | 1052 + UTIL_PIN_MODE_PWM | UTIL_PIN_ENABLE); 1053 + } 1054 + 1055 + pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller)); 1039 1056 if (pwm_ctl & BXT_BLC_PWM_ENABLE) { 1040 1057 DRM_DEBUG_KMS("backlight already enabled\n"); 1041 1058 pwm_ctl &= ~BXT_BLC_PWM_ENABLE; 1042 - I915_WRITE(BXT_BLC_PWM_CTL1, pwm_ctl); 1059 + I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller), 1060 + pwm_ctl); 1043 1061 } 1044 1062 1045 - I915_WRITE(BXT_BLC_PWM_FREQ1, panel->backlight.max); 1063 + I915_WRITE(BXT_BLC_PWM_FREQ(panel->backlight.controller), 1064 + panel->backlight.max); 1046 1065 1047 1066 intel_panel_actually_set_backlight(connector, panel->backlight.level); 1048 1067 ··· 1072 1047 if (panel->backlight.active_low_pwm) 1073 1048 pwm_ctl |= BXT_BLC_PWM_POLARITY; 1074 1049 1075 - I915_WRITE(BXT_BLC_PWM_CTL1, pwm_ctl); 1076 - POSTING_READ(BXT_BLC_PWM_CTL1); 1077 - I915_WRITE(BXT_BLC_PWM_CTL1, pwm_ctl | BXT_BLC_PWM_ENABLE); 1050 + I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl); 1051 + POSTING_READ(BXT_BLC_PWM_CTL(panel->backlight.controller)); 1052 + I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller), 1053 + pwm_ctl | BXT_BLC_PWM_ENABLE); 1078 1054 } 1079 1055 1080 1056 static void pwm_enable_backlight(struct intel_connector *connector) ··· 1111 1085 panel->backlight.device->props.max_brightness); 1112 1086 } 1113 1087 1114 - dev_priv->display.enable_backlight(connector); 1088 + panel->backlight.enable(connector); 1115 1089 panel->backlight.enabled = true; 1116 1090 if (panel->backlight.device) 1117 1091 panel->backlight.device->props.power = FB_BLANK_UNBLANK; ··· 1139 1113 * callback needs to take this into account. 1140 1114 */ 1141 1115 if (panel->backlight.enabled) { 1142 - if (panel->backlight_power) { 1116 + if (panel->backlight.power) { 1143 1117 bool enable = bd->props.power == FB_BLANK_UNBLANK && 1144 1118 bd->props.brightness != 0; 1145 - panel->backlight_power(connector, enable); 1119 + panel->backlight.power(connector, enable); 1146 1120 } 1147 1121 } else { 1148 1122 bd->props.power = FB_BLANK_POWERDOWN; ··· 1367 1341 { 1368 1342 struct drm_device *dev = connector->base.dev; 1369 1343 struct drm_i915_private *dev_priv = dev->dev_private; 1344 + struct intel_panel *panel = &connector->panel; 1370 1345 u16 pwm_freq_hz = dev_priv->vbt.backlight.pwm_freq_hz; 1371 1346 u32 pwm; 1372 1347 ··· 1376 1349 return 0; 1377 1350 } 1378 1351 1379 - if (!dev_priv->display.backlight_hz_to_pwm) { 1352 + if (!panel->backlight.hz_to_pwm) { 1380 1353 DRM_DEBUG_KMS("backlight frequency setting from VBT currently not supported on this platform\n"); 1381 1354 return 0; 1382 1355 } 1383 1356 1384 - pwm = dev_priv->display.backlight_hz_to_pwm(connector, pwm_freq_hz); 1357 + pwm = panel->backlight.hz_to_pwm(connector, pwm_freq_hz); 1385 1358 if (!pwm) { 1386 1359 DRM_DEBUG_KMS("backlight frequency conversion failed\n"); 1387 1360 return 0; ··· 1595 1568 struct intel_panel *panel = &connector->panel; 1596 1569 u32 pwm_ctl, val; 1597 1570 1598 - pwm_ctl = I915_READ(BXT_BLC_PWM_CTL1); 1599 - panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY; 1571 + /* 1572 + * For BXT hard coding the Backlight controller to 0. 1573 + * TODO : Read the controller value from VBT and generalize 1574 + */ 1575 + panel->backlight.controller = 0; 1600 1576 1601 - panel->backlight.max = I915_READ(BXT_BLC_PWM_FREQ1); 1577 + pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller)); 1578 + 1579 + /* Keeping the check if controller 1 is to be programmed. 1580 + * This will come into affect once the VBT parsing 1581 + * is fixed for controller selection, and controller 1 is used 1582 + * for a prticular display configuration. 1583 + */ 1584 + if (panel->backlight.controller == 1) { 1585 + val = I915_READ(UTIL_PIN_CTL); 1586 + panel->backlight.util_pin_active_low = 1587 + val & UTIL_PIN_POLARITY; 1588 + } 1589 + 1590 + panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY; 1591 + panel->backlight.max = 1592 + I915_READ(BXT_BLC_PWM_FREQ(panel->backlight.controller)); 1602 1593 1603 1594 if (!panel->backlight.max) 1604 1595 panel->backlight.max = get_backlight_max_vbt(connector); ··· 1684 1639 } 1685 1640 } 1686 1641 1642 + /* ensure intel_panel has been initialized first */ 1643 + if (WARN_ON(!panel->backlight.setup)) 1644 + return -ENODEV; 1645 + 1687 1646 /* set level and max in panel struct */ 1688 1647 mutex_lock(&dev_priv->backlight_lock); 1689 - ret = dev_priv->display.setup_backlight(intel_connector, pipe); 1648 + ret = panel->backlight.setup(intel_connector, pipe); 1690 1649 mutex_unlock(&dev_priv->backlight_lock); 1691 1650 1692 1651 if (ret) { ··· 1722 1673 } 1723 1674 1724 1675 /* Set up chip specific backlight functions */ 1725 - void intel_panel_init_backlight_funcs(struct drm_device *dev) 1676 + static void 1677 + intel_panel_init_backlight_funcs(struct intel_panel *panel) 1726 1678 { 1679 + struct intel_connector *intel_connector = 1680 + container_of(panel, struct intel_connector, panel); 1681 + struct drm_device *dev = intel_connector->base.dev; 1727 1682 struct drm_i915_private *dev_priv = dev->dev_private; 1728 1683 1729 1684 if (IS_BROXTON(dev)) { 1730 - dev_priv->display.setup_backlight = bxt_setup_backlight; 1731 - dev_priv->display.enable_backlight = bxt_enable_backlight; 1732 - dev_priv->display.disable_backlight = bxt_disable_backlight; 1733 - dev_priv->display.set_backlight = bxt_set_backlight; 1734 - dev_priv->display.get_backlight = bxt_get_backlight; 1685 + panel->backlight.setup = bxt_setup_backlight; 1686 + panel->backlight.enable = bxt_enable_backlight; 1687 + panel->backlight.disable = bxt_disable_backlight; 1688 + panel->backlight.set = bxt_set_backlight; 1689 + panel->backlight.get = bxt_get_backlight; 1735 1690 } else if (HAS_PCH_LPT(dev) || HAS_PCH_SPT(dev)) { 1736 - dev_priv->display.setup_backlight = lpt_setup_backlight; 1737 - dev_priv->display.enable_backlight = lpt_enable_backlight; 1738 - dev_priv->display.disable_backlight = lpt_disable_backlight; 1739 - dev_priv->display.set_backlight = lpt_set_backlight; 1740 - dev_priv->display.get_backlight = lpt_get_backlight; 1691 + panel->backlight.setup = lpt_setup_backlight; 1692 + panel->backlight.enable = lpt_enable_backlight; 1693 + panel->backlight.disable = lpt_disable_backlight; 1694 + panel->backlight.set = lpt_set_backlight; 1695 + panel->backlight.get = lpt_get_backlight; 1741 1696 if (HAS_PCH_LPT(dev)) 1742 - dev_priv->display.backlight_hz_to_pwm = lpt_hz_to_pwm; 1697 + panel->backlight.hz_to_pwm = lpt_hz_to_pwm; 1743 1698 else 1744 - dev_priv->display.backlight_hz_to_pwm = spt_hz_to_pwm; 1699 + panel->backlight.hz_to_pwm = spt_hz_to_pwm; 1745 1700 } else if (HAS_PCH_SPLIT(dev)) { 1746 - dev_priv->display.setup_backlight = pch_setup_backlight; 1747 - dev_priv->display.enable_backlight = pch_enable_backlight; 1748 - dev_priv->display.disable_backlight = pch_disable_backlight; 1749 - dev_priv->display.set_backlight = pch_set_backlight; 1750 - dev_priv->display.get_backlight = pch_get_backlight; 1751 - dev_priv->display.backlight_hz_to_pwm = pch_hz_to_pwm; 1701 + panel->backlight.setup = pch_setup_backlight; 1702 + panel->backlight.enable = pch_enable_backlight; 1703 + panel->backlight.disable = pch_disable_backlight; 1704 + panel->backlight.set = pch_set_backlight; 1705 + panel->backlight.get = pch_get_backlight; 1706 + panel->backlight.hz_to_pwm = pch_hz_to_pwm; 1752 1707 } else if (IS_VALLEYVIEW(dev)) { 1753 1708 if (dev_priv->vbt.has_mipi) { 1754 - dev_priv->display.setup_backlight = pwm_setup_backlight; 1755 - dev_priv->display.enable_backlight = pwm_enable_backlight; 1756 - dev_priv->display.disable_backlight = pwm_disable_backlight; 1757 - dev_priv->display.set_backlight = pwm_set_backlight; 1758 - dev_priv->display.get_backlight = pwm_get_backlight; 1709 + panel->backlight.setup = pwm_setup_backlight; 1710 + panel->backlight.enable = pwm_enable_backlight; 1711 + panel->backlight.disable = pwm_disable_backlight; 1712 + panel->backlight.set = pwm_set_backlight; 1713 + panel->backlight.get = pwm_get_backlight; 1759 1714 } else { 1760 - dev_priv->display.setup_backlight = vlv_setup_backlight; 1761 - dev_priv->display.enable_backlight = vlv_enable_backlight; 1762 - dev_priv->display.disable_backlight = vlv_disable_backlight; 1763 - dev_priv->display.set_backlight = vlv_set_backlight; 1764 - dev_priv->display.get_backlight = vlv_get_backlight; 1765 - dev_priv->display.backlight_hz_to_pwm = vlv_hz_to_pwm; 1715 + panel->backlight.setup = vlv_setup_backlight; 1716 + panel->backlight.enable = vlv_enable_backlight; 1717 + panel->backlight.disable = vlv_disable_backlight; 1718 + panel->backlight.set = vlv_set_backlight; 1719 + panel->backlight.get = vlv_get_backlight; 1720 + panel->backlight.hz_to_pwm = vlv_hz_to_pwm; 1766 1721 } 1767 1722 } else if (IS_GEN4(dev)) { 1768 - dev_priv->display.setup_backlight = i965_setup_backlight; 1769 - dev_priv->display.enable_backlight = i965_enable_backlight; 1770 - dev_priv->display.disable_backlight = i965_disable_backlight; 1771 - dev_priv->display.set_backlight = i9xx_set_backlight; 1772 - dev_priv->display.get_backlight = i9xx_get_backlight; 1773 - dev_priv->display.backlight_hz_to_pwm = i965_hz_to_pwm; 1723 + panel->backlight.setup = i965_setup_backlight; 1724 + panel->backlight.enable = i965_enable_backlight; 1725 + panel->backlight.disable = i965_disable_backlight; 1726 + panel->backlight.set = i9xx_set_backlight; 1727 + panel->backlight.get = i9xx_get_backlight; 1728 + panel->backlight.hz_to_pwm = i965_hz_to_pwm; 1774 1729 } else { 1775 - dev_priv->display.setup_backlight = i9xx_setup_backlight; 1776 - dev_priv->display.enable_backlight = i9xx_enable_backlight; 1777 - dev_priv->display.disable_backlight = i9xx_disable_backlight; 1778 - dev_priv->display.set_backlight = i9xx_set_backlight; 1779 - dev_priv->display.get_backlight = i9xx_get_backlight; 1780 - dev_priv->display.backlight_hz_to_pwm = i9xx_hz_to_pwm; 1730 + panel->backlight.setup = i9xx_setup_backlight; 1731 + panel->backlight.enable = i9xx_enable_backlight; 1732 + panel->backlight.disable = i9xx_disable_backlight; 1733 + panel->backlight.set = i9xx_set_backlight; 1734 + panel->backlight.get = i9xx_get_backlight; 1735 + panel->backlight.hz_to_pwm = i9xx_hz_to_pwm; 1781 1736 } 1782 1737 } 1783 1738 ··· 1789 1736 struct drm_display_mode *fixed_mode, 1790 1737 struct drm_display_mode *downclock_mode) 1791 1738 { 1739 + intel_panel_init_backlight_funcs(panel); 1740 + 1792 1741 panel->fixed_mode = fixed_mode; 1793 1742 panel->downclock_mode = downclock_mode; 1794 1743
+328 -484
drivers/gpu/drm/i915/intel_pm.c
··· 71 71 72 72 gen9_init_clock_gating(dev); 73 73 74 - if (INTEL_REVID(dev) <= SKL_REVID_B0) { 75 - /* 76 - * WaDisableSDEUnitClockGating:skl 77 - * WaSetGAPSunitClckGateDisable:skl 78 - */ 79 - I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 80 - GEN8_GAPSUNIT_CLOCK_GATE_DISABLE | 81 - GEN8_SDEUNIT_CLOCK_GATE_DISABLE); 82 - 83 - /* WaDisableVFUnitClockGating:skl */ 84 - I915_WRITE(GEN6_UCGCTL2, I915_READ(GEN6_UCGCTL2) | 85 - GEN6_VFUNIT_CLOCK_GATE_DISABLE); 86 - } 87 - 88 74 if (INTEL_REVID(dev) <= SKL_REVID_D0) { 89 75 /* WaDisableHDCInvalidation:skl */ 90 76 I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | ··· 113 127 I915_WRITE(GEN8_UCGCTL6, I915_READ(GEN8_UCGCTL6) | 114 128 GEN8_HDCUNIT_CLOCK_GATE_DISABLE_HDCREQ); 115 129 116 - if (INTEL_REVID(dev) == BXT_REVID_A0) { 117 - /* 118 - * Hardware specification requires this bit to be 119 - * set to 1 for A0 120 - */ 130 + /* WaStoreMultiplePTEenable:bxt */ 131 + /* This is a requirement according to Hardware specification */ 132 + if (INTEL_REVID(dev) == BXT_REVID_A0) 121 133 I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF); 122 - } 123 134 124 135 /* WaSetClckGatingDisableMedia:bxt */ 125 136 if (INTEL_REVID(dev) == BXT_REVID_A0) { ··· 686 703 687 704 crtc = single_enabled_crtc(dev); 688 705 if (crtc) { 689 - const struct drm_display_mode *adjusted_mode; 706 + const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode; 690 707 int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8; 691 - int clock; 692 - 693 - adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode; 694 - clock = adjusted_mode->crtc_clock; 708 + int clock = adjusted_mode->crtc_clock; 695 709 696 710 /* Display SR */ 697 711 wm = intel_calculate_wm(clock, &pineview_display_wm, ··· 1482 1502 if (crtc) { 1483 1503 /* self-refresh has much higher latency */ 1484 1504 static const int sr_latency_ns = 12000; 1485 - const struct drm_display_mode *adjusted_mode = 1486 - &to_intel_crtc(crtc)->config->base.adjusted_mode; 1505 + const struct drm_display_mode *adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode; 1487 1506 int clock = adjusted_mode->crtc_clock; 1488 1507 int htotal = adjusted_mode->crtc_htotal; 1489 1508 int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w; ··· 1629 1650 if (HAS_FW_BLC(dev) && enabled) { 1630 1651 /* self-refresh has much higher latency */ 1631 1652 static const int sr_latency_ns = 6000; 1632 - const struct drm_display_mode *adjusted_mode = 1633 - &to_intel_crtc(enabled)->config->base.adjusted_mode; 1653 + const struct drm_display_mode *adjusted_mode = &to_intel_crtc(enabled)->config->base.adjusted_mode; 1634 1654 int clock = adjusted_mode->crtc_clock; 1635 1655 int htotal = adjusted_mode->crtc_htotal; 1636 1656 int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w; ··· 1765 1787 return DIV_ROUND_UP(pri_val * 64, horiz_pixels * bytes_per_pixel) + 2; 1766 1788 } 1767 1789 1768 - struct skl_pipe_wm_parameters { 1769 - bool active; 1770 - uint32_t pipe_htotal; 1771 - uint32_t pixel_rate; /* in KHz */ 1772 - struct intel_plane_wm_parameters plane[I915_MAX_PLANES]; 1773 - struct intel_plane_wm_parameters cursor; 1774 - }; 1775 - 1776 - struct ilk_pipe_wm_parameters { 1777 - bool active; 1778 - uint32_t pipe_htotal; 1779 - uint32_t pixel_rate; 1780 - struct intel_plane_wm_parameters pri; 1781 - struct intel_plane_wm_parameters spr; 1782 - struct intel_plane_wm_parameters cur; 1783 - }; 1784 - 1785 1790 struct ilk_wm_maximums { 1786 1791 uint16_t pri; 1787 1792 uint16_t spr; ··· 1783 1822 * For both WM_PIPE and WM_LP. 1784 1823 * mem_value must be in 0.1us units. 1785 1824 */ 1786 - static uint32_t ilk_compute_pri_wm(const struct ilk_pipe_wm_parameters *params, 1825 + static uint32_t ilk_compute_pri_wm(const struct intel_crtc_state *cstate, 1826 + const struct intel_plane_state *pstate, 1787 1827 uint32_t mem_value, 1788 1828 bool is_lp) 1789 1829 { 1830 + int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0; 1790 1831 uint32_t method1, method2; 1791 1832 1792 - if (!params->active || !params->pri.enabled) 1833 + if (!cstate->base.active || !pstate->visible) 1793 1834 return 0; 1794 1835 1795 - method1 = ilk_wm_method1(params->pixel_rate, 1796 - params->pri.bytes_per_pixel, 1797 - mem_value); 1836 + method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), bpp, mem_value); 1798 1837 1799 1838 if (!is_lp) 1800 1839 return method1; 1801 1840 1802 - method2 = ilk_wm_method2(params->pixel_rate, 1803 - params->pipe_htotal, 1804 - params->pri.horiz_pixels, 1805 - params->pri.bytes_per_pixel, 1841 + method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate), 1842 + cstate->base.adjusted_mode.crtc_htotal, 1843 + drm_rect_width(&pstate->dst), 1844 + bpp, 1806 1845 mem_value); 1807 1846 1808 1847 return min(method1, method2); ··· 1812 1851 * For both WM_PIPE and WM_LP. 1813 1852 * mem_value must be in 0.1us units. 1814 1853 */ 1815 - static uint32_t ilk_compute_spr_wm(const struct ilk_pipe_wm_parameters *params, 1854 + static uint32_t ilk_compute_spr_wm(const struct intel_crtc_state *cstate, 1855 + const struct intel_plane_state *pstate, 1816 1856 uint32_t mem_value) 1817 1857 { 1858 + int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0; 1818 1859 uint32_t method1, method2; 1819 1860 1820 - if (!params->active || !params->spr.enabled) 1861 + if (!cstate->base.active || !pstate->visible) 1821 1862 return 0; 1822 1863 1823 - method1 = ilk_wm_method1(params->pixel_rate, 1824 - params->spr.bytes_per_pixel, 1825 - mem_value); 1826 - method2 = ilk_wm_method2(params->pixel_rate, 1827 - params->pipe_htotal, 1828 - params->spr.horiz_pixels, 1829 - params->spr.bytes_per_pixel, 1864 + method1 = ilk_wm_method1(ilk_pipe_pixel_rate(cstate), bpp, mem_value); 1865 + method2 = ilk_wm_method2(ilk_pipe_pixel_rate(cstate), 1866 + cstate->base.adjusted_mode.crtc_htotal, 1867 + drm_rect_width(&pstate->dst), 1868 + bpp, 1830 1869 mem_value); 1831 1870 return min(method1, method2); 1832 1871 } ··· 1835 1874 * For both WM_PIPE and WM_LP. 1836 1875 * mem_value must be in 0.1us units. 1837 1876 */ 1838 - static uint32_t ilk_compute_cur_wm(const struct ilk_pipe_wm_parameters *params, 1877 + static uint32_t ilk_compute_cur_wm(const struct intel_crtc_state *cstate, 1878 + const struct intel_plane_state *pstate, 1839 1879 uint32_t mem_value) 1840 1880 { 1841 - if (!params->active || !params->cur.enabled) 1881 + int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0; 1882 + 1883 + if (!cstate->base.active || !pstate->visible) 1842 1884 return 0; 1843 1885 1844 - return ilk_wm_method2(params->pixel_rate, 1845 - params->pipe_htotal, 1846 - params->cur.horiz_pixels, 1847 - params->cur.bytes_per_pixel, 1886 + return ilk_wm_method2(ilk_pipe_pixel_rate(cstate), 1887 + cstate->base.adjusted_mode.crtc_htotal, 1888 + drm_rect_width(&pstate->dst), 1889 + bpp, 1848 1890 mem_value); 1849 1891 } 1850 1892 1851 1893 /* Only for WM_LP. */ 1852 - static uint32_t ilk_compute_fbc_wm(const struct ilk_pipe_wm_parameters *params, 1894 + static uint32_t ilk_compute_fbc_wm(const struct intel_crtc_state *cstate, 1895 + const struct intel_plane_state *pstate, 1853 1896 uint32_t pri_val) 1854 1897 { 1855 - if (!params->active || !params->pri.enabled) 1898 + int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0; 1899 + 1900 + if (!cstate->base.active || !pstate->visible) 1856 1901 return 0; 1857 1902 1858 - return ilk_wm_fbc(pri_val, 1859 - params->pri.horiz_pixels, 1860 - params->pri.bytes_per_pixel); 1903 + return ilk_wm_fbc(pri_val, drm_rect_width(&pstate->dst), bpp); 1861 1904 } 1862 1905 1863 1906 static unsigned int ilk_display_fifo_size(const struct drm_device *dev) ··· 2026 2061 } 2027 2062 2028 2063 static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv, 2064 + const struct intel_crtc *intel_crtc, 2029 2065 int level, 2030 - const struct ilk_pipe_wm_parameters *p, 2066 + struct intel_crtc_state *cstate, 2031 2067 struct intel_wm_level *result) 2032 2068 { 2069 + struct intel_plane *intel_plane; 2033 2070 uint16_t pri_latency = dev_priv->wm.pri_latency[level]; 2034 2071 uint16_t spr_latency = dev_priv->wm.spr_latency[level]; 2035 2072 uint16_t cur_latency = dev_priv->wm.cur_latency[level]; ··· 2043 2076 cur_latency *= 5; 2044 2077 } 2045 2078 2046 - result->pri_val = ilk_compute_pri_wm(p, pri_latency, level); 2047 - result->spr_val = ilk_compute_spr_wm(p, spr_latency); 2048 - result->cur_val = ilk_compute_cur_wm(p, cur_latency); 2049 - result->fbc_val = ilk_compute_fbc_wm(p, result->pri_val); 2079 + for_each_intel_plane_on_crtc(dev_priv->dev, intel_crtc, intel_plane) { 2080 + struct intel_plane_state *pstate = 2081 + to_intel_plane_state(intel_plane->base.state); 2082 + 2083 + switch (intel_plane->base.type) { 2084 + case DRM_PLANE_TYPE_PRIMARY: 2085 + result->pri_val = ilk_compute_pri_wm(cstate, pstate, 2086 + pri_latency, 2087 + level); 2088 + result->fbc_val = ilk_compute_fbc_wm(cstate, pstate, 2089 + result->pri_val); 2090 + break; 2091 + case DRM_PLANE_TYPE_OVERLAY: 2092 + result->spr_val = ilk_compute_spr_wm(cstate, pstate, 2093 + spr_latency); 2094 + break; 2095 + case DRM_PLANE_TYPE_CURSOR: 2096 + result->cur_val = ilk_compute_cur_wm(cstate, pstate, 2097 + cur_latency); 2098 + break; 2099 + } 2100 + } 2101 + 2050 2102 result->enable = true; 2051 2103 } 2052 2104 ··· 2074 2088 { 2075 2089 struct drm_i915_private *dev_priv = dev->dev_private; 2076 2090 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2077 - struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode; 2091 + const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode; 2078 2092 u32 linetime, ips_linetime; 2079 2093 2080 2094 if (!intel_crtc->active) ··· 2083 2097 /* The WM are computed with base on how long it takes to fill a single 2084 2098 * row at the given clock rate, multiplied by 8. 2085 2099 * */ 2086 - linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8, 2087 - mode->crtc_clock); 2088 - ips_linetime = DIV_ROUND_CLOSEST(mode->crtc_htotal * 1000 * 8, 2100 + linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, 2101 + adjusted_mode->crtc_clock); 2102 + ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8, 2089 2103 dev_priv->cdclk_freq); 2090 2104 2091 2105 return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) | ··· 2324 2338 intel_print_wm_latency(dev, "Gen9 Plane", dev_priv->wm.skl_latency); 2325 2339 } 2326 2340 2327 - static void ilk_compute_wm_parameters(struct drm_crtc *crtc, 2328 - struct ilk_pipe_wm_parameters *p) 2329 - { 2330 - struct drm_device *dev = crtc->dev; 2331 - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2332 - enum pipe pipe = intel_crtc->pipe; 2333 - struct drm_plane *plane; 2334 - 2335 - if (!intel_crtc->active) 2336 - return; 2337 - 2338 - p->active = true; 2339 - p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal; 2340 - p->pixel_rate = ilk_pipe_pixel_rate(intel_crtc->config); 2341 - 2342 - if (crtc->primary->state->fb) 2343 - p->pri.bytes_per_pixel = 2344 - crtc->primary->state->fb->bits_per_pixel / 8; 2345 - else 2346 - p->pri.bytes_per_pixel = 4; 2347 - 2348 - p->cur.bytes_per_pixel = 4; 2349 - /* 2350 - * TODO: for now, assume primary and cursor planes are always enabled. 2351 - * Setting them to false makes the screen flicker. 2352 - */ 2353 - p->pri.enabled = true; 2354 - p->cur.enabled = true; 2355 - 2356 - p->pri.horiz_pixels = intel_crtc->config->pipe_src_w; 2357 - p->cur.horiz_pixels = intel_crtc->base.cursor->state->crtc_w; 2358 - 2359 - drm_for_each_legacy_plane(plane, dev) { 2360 - struct intel_plane *intel_plane = to_intel_plane(plane); 2361 - 2362 - if (intel_plane->pipe == pipe) { 2363 - p->spr = intel_plane->wm; 2364 - break; 2365 - } 2366 - } 2367 - } 2368 - 2369 2341 static void ilk_compute_wm_config(struct drm_device *dev, 2370 2342 struct intel_wm_config *config) 2371 2343 { ··· 2343 2399 } 2344 2400 2345 2401 /* Compute new watermarks for the pipe */ 2346 - static bool intel_compute_pipe_wm(struct drm_crtc *crtc, 2347 - const struct ilk_pipe_wm_parameters *params, 2402 + static bool intel_compute_pipe_wm(struct intel_crtc_state *cstate, 2348 2403 struct intel_pipe_wm *pipe_wm) 2349 2404 { 2405 + struct drm_crtc *crtc = cstate->base.crtc; 2350 2406 struct drm_device *dev = crtc->dev; 2351 2407 const struct drm_i915_private *dev_priv = dev->dev_private; 2408 + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2409 + struct intel_plane *intel_plane; 2410 + struct intel_plane_state *sprstate = NULL; 2352 2411 int level, max_level = ilk_wm_max_level(dev); 2353 2412 /* LP0 watermark maximums depend on this pipe alone */ 2354 2413 struct intel_wm_config config = { 2355 2414 .num_pipes_active = 1, 2356 - .sprites_enabled = params->spr.enabled, 2357 - .sprites_scaled = params->spr.scaled, 2358 2415 }; 2359 2416 struct ilk_wm_maximums max; 2360 2417 2361 - pipe_wm->pipe_enabled = params->active; 2362 - pipe_wm->sprites_enabled = params->spr.enabled; 2363 - pipe_wm->sprites_scaled = params->spr.scaled; 2418 + for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 2419 + if (intel_plane->base.type == DRM_PLANE_TYPE_OVERLAY) { 2420 + sprstate = to_intel_plane_state(intel_plane->base.state); 2421 + break; 2422 + } 2423 + } 2424 + 2425 + config.sprites_enabled = sprstate->visible; 2426 + config.sprites_scaled = sprstate->visible && 2427 + (drm_rect_width(&sprstate->dst) != drm_rect_width(&sprstate->src) >> 16 || 2428 + drm_rect_height(&sprstate->dst) != drm_rect_height(&sprstate->src) >> 16); 2429 + 2430 + pipe_wm->pipe_enabled = cstate->base.active; 2431 + pipe_wm->sprites_enabled = sprstate->visible; 2432 + pipe_wm->sprites_scaled = config.sprites_scaled; 2364 2433 2365 2434 /* ILK/SNB: LP2+ watermarks only w/o sprites */ 2366 - if (INTEL_INFO(dev)->gen <= 6 && params->spr.enabled) 2435 + if (INTEL_INFO(dev)->gen <= 6 && sprstate->visible) 2367 2436 max_level = 1; 2368 2437 2369 2438 /* ILK/SNB/IVB: LP1+ watermarks only w/o scaling */ 2370 - if (params->spr.scaled) 2439 + if (config.sprites_scaled) 2371 2440 max_level = 0; 2372 2441 2373 - ilk_compute_wm_level(dev_priv, 0, params, &pipe_wm->wm[0]); 2442 + ilk_compute_wm_level(dev_priv, intel_crtc, 0, cstate, &pipe_wm->wm[0]); 2374 2443 2375 2444 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 2376 2445 pipe_wm->linetime = hsw_compute_linetime_wm(dev, crtc); ··· 2400 2443 for (level = 1; level <= max_level; level++) { 2401 2444 struct intel_wm_level wm = {}; 2402 2445 2403 - ilk_compute_wm_level(dev_priv, level, params, &wm); 2446 + ilk_compute_wm_level(dev_priv, intel_crtc, level, cstate, &wm); 2404 2447 2405 2448 /* 2406 2449 * Disable any watermark level that exceeds the ··· 2805 2848 #define SKL_DDB_SIZE 896 /* in blocks */ 2806 2849 #define BXT_DDB_SIZE 512 2807 2850 2851 + /* 2852 + * Return the index of a plane in the SKL DDB and wm result arrays. Primary 2853 + * plane is always in slot 0, cursor is always in slot I915_MAX_PLANES-1, and 2854 + * other universal planes are in indices 1..n. Note that this may leave unused 2855 + * indices between the top "sprite" plane and the cursor. 2856 + */ 2857 + static int 2858 + skl_wm_plane_id(const struct intel_plane *plane) 2859 + { 2860 + switch (plane->base.type) { 2861 + case DRM_PLANE_TYPE_PRIMARY: 2862 + return 0; 2863 + case DRM_PLANE_TYPE_CURSOR: 2864 + return PLANE_CURSOR; 2865 + case DRM_PLANE_TYPE_OVERLAY: 2866 + return plane->plane + 1; 2867 + default: 2868 + MISSING_CASE(plane->base.type); 2869 + return plane->plane; 2870 + } 2871 + } 2872 + 2808 2873 static void 2809 2874 skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, 2810 - struct drm_crtc *for_crtc, 2875 + const struct intel_crtc_state *cstate, 2811 2876 const struct intel_wm_config *config, 2812 - const struct skl_pipe_wm_parameters *params, 2813 2877 struct skl_ddb_entry *alloc /* out */) 2814 2878 { 2879 + struct drm_crtc *for_crtc = cstate->base.crtc; 2815 2880 struct drm_crtc *crtc; 2816 2881 unsigned int pipe_size, ddb_size; 2817 2882 int nth_active_pipe; 2818 2883 2819 - if (!params->active) { 2884 + if (!cstate->base.active) { 2820 2885 alloc->start = 0; 2821 2886 alloc->end = 0; 2822 2887 return; ··· 2898 2919 } 2899 2920 2900 2921 val = I915_READ(CUR_BUF_CFG(pipe)); 2901 - skl_ddb_entry_init_from_hw(&ddb->cursor[pipe], val); 2922 + skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR], 2923 + val); 2902 2924 } 2903 2925 } 2904 2926 2905 2927 static unsigned int 2906 - skl_plane_relative_data_rate(const struct intel_plane_wm_parameters *p, int y) 2928 + skl_plane_relative_data_rate(const struct intel_crtc_state *cstate, 2929 + const struct drm_plane_state *pstate, 2930 + int y) 2907 2931 { 2932 + struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 2933 + struct drm_framebuffer *fb = pstate->fb; 2908 2934 2909 2935 /* for planar format */ 2910 - if (p->y_bytes_per_pixel) { 2936 + if (fb->pixel_format == DRM_FORMAT_NV12) { 2911 2937 if (y) /* y-plane data rate */ 2912 - return p->horiz_pixels * p->vert_pixels * p->y_bytes_per_pixel; 2938 + return intel_crtc->config->pipe_src_w * 2939 + intel_crtc->config->pipe_src_h * 2940 + drm_format_plane_cpp(fb->pixel_format, 0); 2913 2941 else /* uv-plane data rate */ 2914 - return (p->horiz_pixels/2) * (p->vert_pixels/2) * p->bytes_per_pixel; 2942 + return (intel_crtc->config->pipe_src_w/2) * 2943 + (intel_crtc->config->pipe_src_h/2) * 2944 + drm_format_plane_cpp(fb->pixel_format, 1); 2915 2945 } 2916 2946 2917 2947 /* for packed formats */ 2918 - return p->horiz_pixels * p->vert_pixels * p->bytes_per_pixel; 2948 + return intel_crtc->config->pipe_src_w * 2949 + intel_crtc->config->pipe_src_h * 2950 + drm_format_plane_cpp(fb->pixel_format, 0); 2919 2951 } 2920 2952 2921 2953 /* ··· 2935 2945 * 3 * 4096 * 8192 * 4 < 2^32 2936 2946 */ 2937 2947 static unsigned int 2938 - skl_get_total_relative_data_rate(struct intel_crtc *intel_crtc, 2939 - const struct skl_pipe_wm_parameters *params) 2948 + skl_get_total_relative_data_rate(const struct intel_crtc_state *cstate) 2940 2949 { 2950 + struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 2951 + struct drm_device *dev = intel_crtc->base.dev; 2952 + const struct intel_plane *intel_plane; 2941 2953 unsigned int total_data_rate = 0; 2942 - int plane; 2943 2954 2944 - for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) { 2945 - const struct intel_plane_wm_parameters *p; 2955 + for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 2956 + const struct drm_plane_state *pstate = intel_plane->base.state; 2946 2957 2947 - p = &params->plane[plane]; 2948 - if (!p->enabled) 2958 + if (pstate->fb == NULL) 2949 2959 continue; 2950 2960 2951 - total_data_rate += skl_plane_relative_data_rate(p, 0); /* packed/uv */ 2952 - if (p->y_bytes_per_pixel) { 2953 - total_data_rate += skl_plane_relative_data_rate(p, 1); /* y-plane */ 2954 - } 2961 + /* packed/uv */ 2962 + total_data_rate += skl_plane_relative_data_rate(cstate, 2963 + pstate, 2964 + 0); 2965 + 2966 + if (pstate->fb->pixel_format == DRM_FORMAT_NV12) 2967 + /* y-plane */ 2968 + total_data_rate += skl_plane_relative_data_rate(cstate, 2969 + pstate, 2970 + 1); 2955 2971 } 2956 2972 2957 2973 return total_data_rate; 2958 2974 } 2959 2975 2960 2976 static void 2961 - skl_allocate_pipe_ddb(struct drm_crtc *crtc, 2977 + skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, 2962 2978 const struct intel_wm_config *config, 2963 - const struct skl_pipe_wm_parameters *params, 2964 2979 struct skl_ddb_allocation *ddb /* out */) 2965 2980 { 2981 + struct drm_crtc *crtc = cstate->base.crtc; 2966 2982 struct drm_device *dev = crtc->dev; 2967 - struct drm_i915_private *dev_priv = dev->dev_private; 2968 2983 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2984 + struct intel_plane *intel_plane; 2969 2985 enum pipe pipe = intel_crtc->pipe; 2970 2986 struct skl_ddb_entry *alloc = &ddb->pipe[pipe]; 2971 2987 uint16_t alloc_size, start, cursor_blocks; 2972 2988 uint16_t minimum[I915_MAX_PLANES]; 2973 2989 uint16_t y_minimum[I915_MAX_PLANES]; 2974 2990 unsigned int total_data_rate; 2975 - int plane; 2976 2991 2977 - skl_ddb_get_pipe_allocation_limits(dev, crtc, config, params, alloc); 2992 + skl_ddb_get_pipe_allocation_limits(dev, cstate, config, alloc); 2978 2993 alloc_size = skl_ddb_entry_size(alloc); 2979 2994 if (alloc_size == 0) { 2980 2995 memset(ddb->plane[pipe], 0, sizeof(ddb->plane[pipe])); 2981 - memset(&ddb->cursor[pipe], 0, sizeof(ddb->cursor[pipe])); 2996 + memset(&ddb->plane[pipe][PLANE_CURSOR], 0, 2997 + sizeof(ddb->plane[pipe][PLANE_CURSOR])); 2982 2998 return; 2983 2999 } 2984 3000 2985 3001 cursor_blocks = skl_cursor_allocation(config); 2986 - ddb->cursor[pipe].start = alloc->end - cursor_blocks; 2987 - ddb->cursor[pipe].end = alloc->end; 3002 + ddb->plane[pipe][PLANE_CURSOR].start = alloc->end - cursor_blocks; 3003 + ddb->plane[pipe][PLANE_CURSOR].end = alloc->end; 2988 3004 2989 3005 alloc_size -= cursor_blocks; 2990 3006 alloc->end -= cursor_blocks; 2991 3007 2992 3008 /* 1. Allocate the mininum required blocks for each active plane */ 2993 - for_each_plane(dev_priv, pipe, plane) { 2994 - const struct intel_plane_wm_parameters *p; 3009 + for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 3010 + struct drm_plane *plane = &intel_plane->base; 3011 + struct drm_framebuffer *fb = plane->fb; 3012 + int id = skl_wm_plane_id(intel_plane); 2995 3013 2996 - p = &params->plane[plane]; 2997 - if (!p->enabled) 3014 + if (fb == NULL) 3015 + continue; 3016 + if (plane->type == DRM_PLANE_TYPE_CURSOR) 2998 3017 continue; 2999 3018 3000 - minimum[plane] = 8; 3001 - alloc_size -= minimum[plane]; 3002 - y_minimum[plane] = p->y_bytes_per_pixel ? 8 : 0; 3003 - alloc_size -= y_minimum[plane]; 3019 + minimum[id] = 8; 3020 + alloc_size -= minimum[id]; 3021 + y_minimum[id] = (fb->pixel_format == DRM_FORMAT_NV12) ? 8 : 0; 3022 + alloc_size -= y_minimum[id]; 3004 3023 } 3005 3024 3006 3025 /* ··· 3018 3019 * 3019 3020 * FIXME: we may not allocate every single block here. 3020 3021 */ 3021 - total_data_rate = skl_get_total_relative_data_rate(intel_crtc, params); 3022 + total_data_rate = skl_get_total_relative_data_rate(cstate); 3022 3023 3023 3024 start = alloc->start; 3024 - for (plane = 0; plane < intel_num_planes(intel_crtc); plane++) { 3025 - const struct intel_plane_wm_parameters *p; 3025 + for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 3026 + struct drm_plane *plane = &intel_plane->base; 3027 + struct drm_plane_state *pstate = intel_plane->base.state; 3026 3028 unsigned int data_rate, y_data_rate; 3027 3029 uint16_t plane_blocks, y_plane_blocks = 0; 3030 + int id = skl_wm_plane_id(intel_plane); 3028 3031 3029 - p = &params->plane[plane]; 3030 - if (!p->enabled) 3032 + if (pstate->fb == NULL) 3033 + continue; 3034 + if (plane->type == DRM_PLANE_TYPE_CURSOR) 3031 3035 continue; 3032 3036 3033 - data_rate = skl_plane_relative_data_rate(p, 0); 3037 + data_rate = skl_plane_relative_data_rate(cstate, pstate, 0); 3034 3038 3035 3039 /* 3036 3040 * allocation for (packed formats) or (uv-plane part of planar format): 3037 3041 * promote the expression to 64 bits to avoid overflowing, the 3038 3042 * result is < available as data_rate / total_data_rate < 1 3039 3043 */ 3040 - plane_blocks = minimum[plane]; 3044 + plane_blocks = minimum[id]; 3041 3045 plane_blocks += div_u64((uint64_t)alloc_size * data_rate, 3042 3046 total_data_rate); 3043 3047 3044 - ddb->plane[pipe][plane].start = start; 3045 - ddb->plane[pipe][plane].end = start + plane_blocks; 3048 + ddb->plane[pipe][id].start = start; 3049 + ddb->plane[pipe][id].end = start + plane_blocks; 3046 3050 3047 3051 start += plane_blocks; 3048 3052 3049 3053 /* 3050 3054 * allocation for y_plane part of planar format: 3051 3055 */ 3052 - if (p->y_bytes_per_pixel) { 3053 - y_data_rate = skl_plane_relative_data_rate(p, 1); 3054 - y_plane_blocks = y_minimum[plane]; 3056 + if (pstate->fb->pixel_format == DRM_FORMAT_NV12) { 3057 + y_data_rate = skl_plane_relative_data_rate(cstate, 3058 + pstate, 3059 + 1); 3060 + y_plane_blocks = y_minimum[id]; 3055 3061 y_plane_blocks += div_u64((uint64_t)alloc_size * y_data_rate, 3056 3062 total_data_rate); 3057 3063 3058 - ddb->y_plane[pipe][plane].start = start; 3059 - ddb->y_plane[pipe][plane].end = start + y_plane_blocks; 3064 + ddb->y_plane[pipe][id].start = start; 3065 + ddb->y_plane[pipe][id].end = start + y_plane_blocks; 3060 3066 3061 3067 start += y_plane_blocks; 3062 3068 } ··· 3137 3133 sizeof(new_ddb->plane[pipe]))) 3138 3134 return true; 3139 3135 3140 - if (memcmp(&new_ddb->cursor[pipe], &cur_ddb->cursor[pipe], 3141 - sizeof(new_ddb->cursor[pipe]))) 3136 + if (memcmp(&new_ddb->plane[pipe][PLANE_CURSOR], &cur_ddb->plane[pipe][PLANE_CURSOR], 3137 + sizeof(new_ddb->plane[pipe][PLANE_CURSOR]))) 3142 3138 return true; 3143 3139 3144 3140 return false; ··· 3148 3144 struct intel_wm_config *config) 3149 3145 { 3150 3146 struct drm_crtc *crtc; 3151 - struct drm_plane *plane; 3152 3147 3153 3148 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) 3154 3149 config->num_pipes_active += to_intel_crtc(crtc)->active; 3155 - 3156 - /* FIXME: I don't think we need those two global parameters on SKL */ 3157 - list_for_each_entry(plane, &dev->mode_config.plane_list, head) { 3158 - struct intel_plane *intel_plane = to_intel_plane(plane); 3159 - 3160 - config->sprites_enabled |= intel_plane->wm.enabled; 3161 - config->sprites_scaled |= intel_plane->wm.scaled; 3162 - } 3163 - } 3164 - 3165 - static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc, 3166 - struct skl_pipe_wm_parameters *p) 3167 - { 3168 - struct drm_device *dev = crtc->dev; 3169 - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3170 - enum pipe pipe = intel_crtc->pipe; 3171 - struct drm_plane *plane; 3172 - struct drm_framebuffer *fb; 3173 - int i = 1; /* Index for sprite planes start */ 3174 - 3175 - p->active = intel_crtc->active; 3176 - if (p->active) { 3177 - p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal; 3178 - p->pixel_rate = skl_pipe_pixel_rate(intel_crtc->config); 3179 - 3180 - fb = crtc->primary->state->fb; 3181 - /* For planar: Bpp is for uv plane, y_Bpp is for y plane */ 3182 - if (fb) { 3183 - p->plane[0].enabled = true; 3184 - p->plane[0].bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ? 3185 - drm_format_plane_cpp(fb->pixel_format, 1) : 3186 - drm_format_plane_cpp(fb->pixel_format, 0); 3187 - p->plane[0].y_bytes_per_pixel = fb->pixel_format == DRM_FORMAT_NV12 ? 3188 - drm_format_plane_cpp(fb->pixel_format, 0) : 0; 3189 - p->plane[0].tiling = fb->modifier[0]; 3190 - } else { 3191 - p->plane[0].enabled = false; 3192 - p->plane[0].bytes_per_pixel = 0; 3193 - p->plane[0].y_bytes_per_pixel = 0; 3194 - p->plane[0].tiling = DRM_FORMAT_MOD_NONE; 3195 - } 3196 - p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w; 3197 - p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h; 3198 - p->plane[0].rotation = crtc->primary->state->rotation; 3199 - 3200 - fb = crtc->cursor->state->fb; 3201 - p->cursor.y_bytes_per_pixel = 0; 3202 - if (fb) { 3203 - p->cursor.enabled = true; 3204 - p->cursor.bytes_per_pixel = fb->bits_per_pixel / 8; 3205 - p->cursor.horiz_pixels = crtc->cursor->state->crtc_w; 3206 - p->cursor.vert_pixels = crtc->cursor->state->crtc_h; 3207 - } else { 3208 - p->cursor.enabled = false; 3209 - p->cursor.bytes_per_pixel = 0; 3210 - p->cursor.horiz_pixels = 64; 3211 - p->cursor.vert_pixels = 64; 3212 - } 3213 - } 3214 - 3215 - list_for_each_entry(plane, &dev->mode_config.plane_list, head) { 3216 - struct intel_plane *intel_plane = to_intel_plane(plane); 3217 - 3218 - if (intel_plane->pipe == pipe && 3219 - plane->type == DRM_PLANE_TYPE_OVERLAY) 3220 - p->plane[i++] = intel_plane->wm; 3221 - } 3222 3150 } 3223 3151 3224 3152 static bool skl_compute_plane_wm(const struct drm_i915_private *dev_priv, 3225 - struct skl_pipe_wm_parameters *p, 3226 - struct intel_plane_wm_parameters *p_params, 3153 + struct intel_crtc_state *cstate, 3154 + struct intel_plane *intel_plane, 3227 3155 uint16_t ddb_allocation, 3228 3156 int level, 3229 3157 uint16_t *out_blocks, /* out */ 3230 3158 uint8_t *out_lines /* out */) 3231 3159 { 3160 + struct drm_plane *plane = &intel_plane->base; 3161 + struct drm_framebuffer *fb = plane->state->fb; 3232 3162 uint32_t latency = dev_priv->wm.skl_latency[level]; 3233 3163 uint32_t method1, method2; 3234 3164 uint32_t plane_bytes_per_line, plane_blocks_per_line; ··· 3170 3232 uint32_t selected_result; 3171 3233 uint8_t bytes_per_pixel; 3172 3234 3173 - if (latency == 0 || !p->active || !p_params->enabled) 3235 + if (latency == 0 || !cstate->base.active || !fb) 3174 3236 return false; 3175 3237 3176 - bytes_per_pixel = p_params->y_bytes_per_pixel ? 3177 - p_params->y_bytes_per_pixel : 3178 - p_params->bytes_per_pixel; 3179 - method1 = skl_wm_method1(p->pixel_rate, 3238 + bytes_per_pixel = (fb->pixel_format == DRM_FORMAT_NV12) ? 3239 + drm_format_plane_cpp(DRM_FORMAT_NV12, 0) : 3240 + drm_format_plane_cpp(DRM_FORMAT_NV12, 1); 3241 + method1 = skl_wm_method1(skl_pipe_pixel_rate(cstate), 3180 3242 bytes_per_pixel, 3181 3243 latency); 3182 - method2 = skl_wm_method2(p->pixel_rate, 3183 - p->pipe_htotal, 3184 - p_params->horiz_pixels, 3244 + method2 = skl_wm_method2(skl_pipe_pixel_rate(cstate), 3245 + cstate->base.adjusted_mode.crtc_htotal, 3246 + cstate->pipe_src_w, 3185 3247 bytes_per_pixel, 3186 - p_params->tiling, 3248 + fb->modifier[0], 3187 3249 latency); 3188 3250 3189 - plane_bytes_per_line = p_params->horiz_pixels * bytes_per_pixel; 3251 + plane_bytes_per_line = cstate->pipe_src_w * bytes_per_pixel; 3190 3252 plane_blocks_per_line = DIV_ROUND_UP(plane_bytes_per_line, 512); 3191 3253 3192 - if (p_params->tiling == I915_FORMAT_MOD_Y_TILED || 3193 - p_params->tiling == I915_FORMAT_MOD_Yf_TILED) { 3254 + if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED || 3255 + fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) { 3194 3256 uint32_t min_scanlines = 4; 3195 3257 uint32_t y_tile_minimum; 3196 - if (intel_rotation_90_or_270(p_params->rotation)) { 3197 - switch (p_params->bytes_per_pixel) { 3258 + if (intel_rotation_90_or_270(plane->state->rotation)) { 3259 + int bpp = (fb->pixel_format == DRM_FORMAT_NV12) ? 3260 + drm_format_plane_cpp(fb->pixel_format, 1) : 3261 + drm_format_plane_cpp(fb->pixel_format, 0); 3262 + 3263 + switch (bpp) { 3198 3264 case 1: 3199 3265 min_scanlines = 16; 3200 3266 break; ··· 3222 3280 res_lines = DIV_ROUND_UP(selected_result, plane_blocks_per_line); 3223 3281 3224 3282 if (level >= 1 && level <= 7) { 3225 - if (p_params->tiling == I915_FORMAT_MOD_Y_TILED || 3226 - p_params->tiling == I915_FORMAT_MOD_Yf_TILED) 3283 + if (fb->modifier[0] == I915_FORMAT_MOD_Y_TILED || 3284 + fb->modifier[0] == I915_FORMAT_MOD_Yf_TILED) 3227 3285 res_lines += 4; 3228 3286 else 3229 3287 res_blocks++; ··· 3240 3298 3241 3299 static void skl_compute_wm_level(const struct drm_i915_private *dev_priv, 3242 3300 struct skl_ddb_allocation *ddb, 3243 - struct skl_pipe_wm_parameters *p, 3244 - enum pipe pipe, 3301 + struct intel_crtc_state *cstate, 3245 3302 int level, 3246 - int num_planes, 3247 3303 struct skl_wm_level *result) 3248 3304 { 3305 + struct drm_device *dev = dev_priv->dev; 3306 + struct intel_crtc *intel_crtc = to_intel_crtc(cstate->base.crtc); 3307 + struct intel_plane *intel_plane; 3249 3308 uint16_t ddb_blocks; 3250 - int i; 3309 + enum pipe pipe = intel_crtc->pipe; 3251 3310 3252 - for (i = 0; i < num_planes; i++) { 3311 + for_each_intel_plane_on_crtc(dev, intel_crtc, intel_plane) { 3312 + int i = skl_wm_plane_id(intel_plane); 3313 + 3253 3314 ddb_blocks = skl_ddb_entry_size(&ddb->plane[pipe][i]); 3254 3315 3255 3316 result->plane_en[i] = skl_compute_plane_wm(dev_priv, 3256 - p, &p->plane[i], 3317 + cstate, 3318 + intel_plane, 3257 3319 ddb_blocks, 3258 3320 level, 3259 3321 &result->plane_res_b[i], 3260 3322 &result->plane_res_l[i]); 3261 3323 } 3262 - 3263 - ddb_blocks = skl_ddb_entry_size(&ddb->cursor[pipe]); 3264 - result->cursor_en = skl_compute_plane_wm(dev_priv, p, &p->cursor, 3265 - ddb_blocks, level, 3266 - &result->cursor_res_b, 3267 - &result->cursor_res_l); 3268 3324 } 3269 3325 3270 3326 static uint32_t 3271 - skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p) 3327 + skl_compute_linetime_wm(struct intel_crtc_state *cstate) 3272 3328 { 3273 - if (!to_intel_crtc(crtc)->active) 3329 + if (!cstate->base.active) 3274 3330 return 0; 3275 3331 3276 - if (WARN_ON(p->pixel_rate == 0)) 3332 + if (WARN_ON(skl_pipe_pixel_rate(cstate) == 0)) 3277 3333 return 0; 3278 3334 3279 - return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate); 3335 + return DIV_ROUND_UP(8 * cstate->base.adjusted_mode.crtc_htotal * 1000, 3336 + skl_pipe_pixel_rate(cstate)); 3280 3337 } 3281 3338 3282 - static void skl_compute_transition_wm(struct drm_crtc *crtc, 3283 - struct skl_pipe_wm_parameters *params, 3339 + static void skl_compute_transition_wm(struct intel_crtc_state *cstate, 3284 3340 struct skl_wm_level *trans_wm /* out */) 3285 3341 { 3342 + struct drm_crtc *crtc = cstate->base.crtc; 3286 3343 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3287 - int i; 3344 + struct intel_plane *intel_plane; 3288 3345 3289 - if (!params->active) 3346 + if (!cstate->base.active) 3290 3347 return; 3291 3348 3292 3349 /* Until we know more, just disable transition WMs */ 3293 - for (i = 0; i < intel_num_planes(intel_crtc); i++) 3350 + for_each_intel_plane_on_crtc(crtc->dev, intel_crtc, intel_plane) { 3351 + int i = skl_wm_plane_id(intel_plane); 3352 + 3294 3353 trans_wm->plane_en[i] = false; 3295 - trans_wm->cursor_en = false; 3354 + } 3296 3355 } 3297 3356 3298 - static void skl_compute_pipe_wm(struct drm_crtc *crtc, 3357 + static void skl_compute_pipe_wm(struct intel_crtc_state *cstate, 3299 3358 struct skl_ddb_allocation *ddb, 3300 - struct skl_pipe_wm_parameters *params, 3301 3359 struct skl_pipe_wm *pipe_wm) 3302 3360 { 3303 - struct drm_device *dev = crtc->dev; 3361 + struct drm_device *dev = cstate->base.crtc->dev; 3304 3362 const struct drm_i915_private *dev_priv = dev->dev_private; 3305 - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3306 3363 int level, max_level = ilk_wm_max_level(dev); 3307 3364 3308 3365 for (level = 0; level <= max_level; level++) { 3309 - skl_compute_wm_level(dev_priv, ddb, params, intel_crtc->pipe, 3310 - level, intel_num_planes(intel_crtc), 3311 - &pipe_wm->wm[level]); 3366 + skl_compute_wm_level(dev_priv, ddb, cstate, 3367 + level, &pipe_wm->wm[level]); 3312 3368 } 3313 - pipe_wm->linetime = skl_compute_linetime_wm(crtc, params); 3369 + pipe_wm->linetime = skl_compute_linetime_wm(cstate); 3314 3370 3315 - skl_compute_transition_wm(crtc, params, &pipe_wm->trans_wm); 3371 + skl_compute_transition_wm(cstate, &pipe_wm->trans_wm); 3316 3372 } 3317 3373 3318 3374 static void skl_compute_wm_results(struct drm_device *dev, 3319 - struct skl_pipe_wm_parameters *p, 3320 3375 struct skl_pipe_wm *p_wm, 3321 3376 struct skl_wm_values *r, 3322 3377 struct intel_crtc *intel_crtc) ··· 3338 3399 3339 3400 temp = 0; 3340 3401 3341 - temp |= p_wm->wm[level].cursor_res_l << PLANE_WM_LINES_SHIFT; 3342 - temp |= p_wm->wm[level].cursor_res_b; 3402 + temp |= p_wm->wm[level].plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT; 3403 + temp |= p_wm->wm[level].plane_res_b[PLANE_CURSOR]; 3343 3404 3344 - if (p_wm->wm[level].cursor_en) 3405 + if (p_wm->wm[level].plane_en[PLANE_CURSOR]) 3345 3406 temp |= PLANE_WM_EN; 3346 3407 3347 - r->cursor[pipe][level] = temp; 3408 + r->plane[pipe][PLANE_CURSOR][level] = temp; 3348 3409 3349 3410 } 3350 3411 ··· 3360 3421 } 3361 3422 3362 3423 temp = 0; 3363 - temp |= p_wm->trans_wm.cursor_res_l << PLANE_WM_LINES_SHIFT; 3364 - temp |= p_wm->trans_wm.cursor_res_b; 3365 - if (p_wm->trans_wm.cursor_en) 3424 + temp |= p_wm->trans_wm.plane_res_l[PLANE_CURSOR] << PLANE_WM_LINES_SHIFT; 3425 + temp |= p_wm->trans_wm.plane_res_b[PLANE_CURSOR]; 3426 + if (p_wm->trans_wm.plane_en[PLANE_CURSOR]) 3366 3427 temp |= PLANE_WM_EN; 3367 3428 3368 - r->cursor_trans[pipe] = temp; 3429 + r->plane_trans[pipe][PLANE_CURSOR] = temp; 3369 3430 3370 3431 r->wm_linetime[pipe] = p_wm->linetime; 3371 3432 } ··· 3399 3460 I915_WRITE(PLANE_WM(pipe, i, level), 3400 3461 new->plane[pipe][i][level]); 3401 3462 I915_WRITE(CUR_WM(pipe, level), 3402 - new->cursor[pipe][level]); 3463 + new->plane[pipe][PLANE_CURSOR][level]); 3403 3464 } 3404 3465 for (i = 0; i < intel_num_planes(crtc); i++) 3405 3466 I915_WRITE(PLANE_WM_TRANS(pipe, i), 3406 3467 new->plane_trans[pipe][i]); 3407 - I915_WRITE(CUR_WM_TRANS(pipe), new->cursor_trans[pipe]); 3468 + I915_WRITE(CUR_WM_TRANS(pipe), 3469 + new->plane_trans[pipe][PLANE_CURSOR]); 3408 3470 3409 3471 for (i = 0; i < intel_num_planes(crtc); i++) { 3410 3472 skl_ddb_entry_write(dev_priv, ··· 3417 3477 } 3418 3478 3419 3479 skl_ddb_entry_write(dev_priv, CUR_BUF_CFG(pipe), 3420 - &new->ddb.cursor[pipe]); 3480 + &new->ddb.plane[pipe][PLANE_CURSOR]); 3421 3481 } 3422 3482 } 3423 3483 ··· 3557 3617 } 3558 3618 3559 3619 static bool skl_update_pipe_wm(struct drm_crtc *crtc, 3560 - struct skl_pipe_wm_parameters *params, 3561 3620 struct intel_wm_config *config, 3562 3621 struct skl_ddb_allocation *ddb, /* out */ 3563 3622 struct skl_pipe_wm *pipe_wm /* out */) 3564 3623 { 3565 3624 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3625 + struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 3566 3626 3567 - skl_compute_wm_pipe_parameters(crtc, params); 3568 - skl_allocate_pipe_ddb(crtc, config, params, ddb); 3569 - skl_compute_pipe_wm(crtc, ddb, params, pipe_wm); 3627 + skl_allocate_pipe_ddb(cstate, config, ddb); 3628 + skl_compute_pipe_wm(cstate, ddb, pipe_wm); 3570 3629 3571 3630 if (!memcmp(&intel_crtc->wm.skl_active, pipe_wm, sizeof(*pipe_wm))) 3572 3631 return false; ··· 3598 3659 */ 3599 3660 list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, 3600 3661 base.head) { 3601 - struct skl_pipe_wm_parameters params = {}; 3602 3662 struct skl_pipe_wm pipe_wm = {}; 3603 3663 bool wm_changed; 3604 3664 ··· 3607 3669 if (!intel_crtc->active) 3608 3670 continue; 3609 3671 3610 - wm_changed = skl_update_pipe_wm(&intel_crtc->base, 3611 - &params, config, 3672 + wm_changed = skl_update_pipe_wm(&intel_crtc->base, config, 3612 3673 &r->ddb, &pipe_wm); 3613 3674 3614 3675 /* ··· 3617 3680 */ 3618 3681 WARN_ON(!wm_changed); 3619 3682 3620 - skl_compute_wm_results(dev, &params, &pipe_wm, r, intel_crtc); 3683 + skl_compute_wm_results(dev, &pipe_wm, r, intel_crtc); 3621 3684 r->dirty[intel_crtc->pipe] = true; 3622 3685 } 3623 3686 } ··· 3627 3690 watermarks->wm_linetime[pipe] = 0; 3628 3691 memset(watermarks->plane[pipe], 0, 3629 3692 sizeof(uint32_t) * 8 * I915_MAX_PLANES); 3630 - memset(watermarks->cursor[pipe], 0, sizeof(uint32_t) * 8); 3631 3693 memset(watermarks->plane_trans[pipe], 3632 3694 0, sizeof(uint32_t) * I915_MAX_PLANES); 3633 - watermarks->cursor_trans[pipe] = 0; 3695 + watermarks->plane_trans[pipe][PLANE_CURSOR] = 0; 3634 3696 3635 3697 /* Clear ddb entries for pipe */ 3636 3698 memset(&watermarks->ddb.pipe[pipe], 0, sizeof(struct skl_ddb_entry)); ··· 3637 3701 sizeof(struct skl_ddb_entry) * I915_MAX_PLANES); 3638 3702 memset(&watermarks->ddb.y_plane[pipe], 0, 3639 3703 sizeof(struct skl_ddb_entry) * I915_MAX_PLANES); 3640 - memset(&watermarks->ddb.cursor[pipe], 0, sizeof(struct skl_ddb_entry)); 3704 + memset(&watermarks->ddb.plane[pipe][PLANE_CURSOR], 0, 3705 + sizeof(struct skl_ddb_entry)); 3641 3706 3642 3707 } 3643 3708 ··· 3647 3710 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3648 3711 struct drm_device *dev = crtc->dev; 3649 3712 struct drm_i915_private *dev_priv = dev->dev_private; 3650 - struct skl_pipe_wm_parameters params = {}; 3651 3713 struct skl_wm_values *results = &dev_priv->wm.skl_results; 3652 3714 struct skl_pipe_wm pipe_wm = {}; 3653 3715 struct intel_wm_config config = {}; ··· 3659 3723 3660 3724 skl_compute_wm_global_parameters(dev, &config); 3661 3725 3662 - if (!skl_update_pipe_wm(crtc, &params, &config, 3663 - &results->ddb, &pipe_wm)) 3726 + if (!skl_update_pipe_wm(crtc, &config, &results->ddb, &pipe_wm)) 3664 3727 return; 3665 3728 3666 - skl_compute_wm_results(dev, &params, &pipe_wm, results, intel_crtc); 3729 + skl_compute_wm_results(dev, &pipe_wm, results, intel_crtc); 3667 3730 results->dirty[intel_crtc->pipe] = true; 3668 3731 3669 3732 skl_update_other_pipe_wm(dev, crtc, &config, results); ··· 3673 3738 dev_priv->wm.skl_hw = *results; 3674 3739 } 3675 3740 3676 - static void 3677 - skl_update_sprite_wm(struct drm_plane *plane, struct drm_crtc *crtc, 3678 - uint32_t sprite_width, uint32_t sprite_height, 3679 - int pixel_size, bool enabled, bool scaled) 3680 - { 3681 - struct intel_plane *intel_plane = to_intel_plane(plane); 3682 - struct drm_framebuffer *fb = plane->state->fb; 3683 - 3684 - intel_plane->wm.enabled = enabled; 3685 - intel_plane->wm.scaled = scaled; 3686 - intel_plane->wm.horiz_pixels = sprite_width; 3687 - intel_plane->wm.vert_pixels = sprite_height; 3688 - intel_plane->wm.tiling = DRM_FORMAT_MOD_NONE; 3689 - 3690 - /* For planar: Bpp is for UV plane, y_Bpp is for Y plane */ 3691 - intel_plane->wm.bytes_per_pixel = 3692 - (fb && fb->pixel_format == DRM_FORMAT_NV12) ? 3693 - drm_format_plane_cpp(plane->state->fb->pixel_format, 1) : pixel_size; 3694 - intel_plane->wm.y_bytes_per_pixel = 3695 - (fb && fb->pixel_format == DRM_FORMAT_NV12) ? 3696 - drm_format_plane_cpp(plane->state->fb->pixel_format, 0) : 0; 3697 - 3698 - /* 3699 - * Framebuffer can be NULL on plane disable, but it does not 3700 - * matter for watermarks if we assume no tiling in that case. 3701 - */ 3702 - if (fb) 3703 - intel_plane->wm.tiling = fb->modifier[0]; 3704 - intel_plane->wm.rotation = plane->state->rotation; 3705 - 3706 - skl_update_wm(crtc); 3707 - } 3708 - 3709 3741 static void ilk_update_wm(struct drm_crtc *crtc) 3710 3742 { 3711 3743 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 3744 + struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 3712 3745 struct drm_device *dev = crtc->dev; 3713 3746 struct drm_i915_private *dev_priv = dev->dev_private; 3714 3747 struct ilk_wm_maximums max; 3715 - struct ilk_pipe_wm_parameters params = {}; 3716 3748 struct ilk_wm_values results = {}; 3717 3749 enum intel_ddb_partitioning partitioning; 3718 3750 struct intel_pipe_wm pipe_wm = {}; 3719 3751 struct intel_pipe_wm lp_wm_1_2 = {}, lp_wm_5_6 = {}, *best_lp_wm; 3720 3752 struct intel_wm_config config = {}; 3721 3753 3722 - ilk_compute_wm_parameters(crtc, &params); 3754 + WARN_ON(cstate->base.active != intel_crtc->active); 3723 3755 3724 - intel_compute_pipe_wm(crtc, &params, &pipe_wm); 3756 + /* 3757 + * IVB workaround: must disable low power watermarks for at least 3758 + * one frame before enabling scaling. LP watermarks can be re-enabled 3759 + * when scaling is disabled. 3760 + * 3761 + * WaCxSRDisabledForSpriteScaling:ivb 3762 + */ 3763 + if (cstate->disable_lp_wm) { 3764 + ilk_disable_lp_wm(dev); 3765 + intel_wait_for_vblank(dev, intel_crtc->pipe); 3766 + } 3767 + 3768 + intel_compute_pipe_wm(cstate, &pipe_wm); 3725 3769 3726 3770 if (!memcmp(&intel_crtc->wm.active, &pipe_wm, sizeof(pipe_wm))) 3727 3771 return; ··· 3731 3817 ilk_write_wm_values(dev_priv, &results); 3732 3818 } 3733 3819 3734 - static void 3735 - ilk_update_sprite_wm(struct drm_plane *plane, 3736 - struct drm_crtc *crtc, 3737 - uint32_t sprite_width, uint32_t sprite_height, 3738 - int pixel_size, bool enabled, bool scaled) 3739 - { 3740 - struct drm_device *dev = plane->dev; 3741 - struct intel_plane *intel_plane = to_intel_plane(plane); 3742 - 3743 - intel_plane->wm.enabled = enabled; 3744 - intel_plane->wm.scaled = scaled; 3745 - intel_plane->wm.horiz_pixels = sprite_width; 3746 - intel_plane->wm.vert_pixels = sprite_width; 3747 - intel_plane->wm.bytes_per_pixel = pixel_size; 3748 - 3749 - /* 3750 - * IVB workaround: must disable low power watermarks for at least 3751 - * one frame before enabling scaling. LP watermarks can be re-enabled 3752 - * when scaling is disabled. 3753 - * 3754 - * WaCxSRDisabledForSpriteScaling:ivb 3755 - */ 3756 - if (IS_IVYBRIDGE(dev) && scaled && ilk_disable_lp_wm(dev)) 3757 - intel_wait_for_vblank(dev, intel_plane->pipe); 3758 - 3759 - ilk_update_wm(crtc); 3760 - } 3761 - 3762 3820 static void skl_pipe_wm_active_state(uint32_t val, 3763 3821 struct skl_pipe_wm *active, 3764 3822 bool is_transwm, ··· 3749 3863 (val >> PLANE_WM_LINES_SHIFT) & 3750 3864 PLANE_WM_LINES_MASK; 3751 3865 } else { 3752 - active->wm[level].cursor_en = is_enabled; 3753 - active->wm[level].cursor_res_b = 3866 + active->wm[level].plane_en[PLANE_CURSOR] = is_enabled; 3867 + active->wm[level].plane_res_b[PLANE_CURSOR] = 3754 3868 val & PLANE_WM_BLOCKS_MASK; 3755 - active->wm[level].cursor_res_l = 3869 + active->wm[level].plane_res_l[PLANE_CURSOR] = 3756 3870 (val >> PLANE_WM_LINES_SHIFT) & 3757 3871 PLANE_WM_LINES_MASK; 3758 3872 } ··· 3765 3879 (val >> PLANE_WM_LINES_SHIFT) & 3766 3880 PLANE_WM_LINES_MASK; 3767 3881 } else { 3768 - active->trans_wm.cursor_en = is_enabled; 3769 - active->trans_wm.cursor_res_b = 3882 + active->trans_wm.plane_en[PLANE_CURSOR] = is_enabled; 3883 + active->trans_wm.plane_res_b[PLANE_CURSOR] = 3770 3884 val & PLANE_WM_BLOCKS_MASK; 3771 - active->trans_wm.cursor_res_l = 3885 + active->trans_wm.plane_res_l[PLANE_CURSOR] = 3772 3886 (val >> PLANE_WM_LINES_SHIFT) & 3773 3887 PLANE_WM_LINES_MASK; 3774 3888 } ··· 3794 3908 for (i = 0; i < intel_num_planes(intel_crtc); i++) 3795 3909 hw->plane[pipe][i][level] = 3796 3910 I915_READ(PLANE_WM(pipe, i, level)); 3797 - hw->cursor[pipe][level] = I915_READ(CUR_WM(pipe, level)); 3911 + hw->plane[pipe][PLANE_CURSOR][level] = I915_READ(CUR_WM(pipe, level)); 3798 3912 } 3799 3913 3800 3914 for (i = 0; i < intel_num_planes(intel_crtc); i++) 3801 3915 hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i)); 3802 - hw->cursor_trans[pipe] = I915_READ(CUR_WM_TRANS(pipe)); 3916 + hw->plane_trans[pipe][PLANE_CURSOR] = I915_READ(CUR_WM_TRANS(pipe)); 3803 3917 3804 3918 if (!intel_crtc->active) 3805 3919 return; ··· 3814 3928 skl_pipe_wm_active_state(temp, active, false, 3815 3929 false, i, level); 3816 3930 } 3817 - temp = hw->cursor[pipe][level]; 3931 + temp = hw->plane[pipe][PLANE_CURSOR][level]; 3818 3932 skl_pipe_wm_active_state(temp, active, false, true, i, level); 3819 3933 } 3820 3934 ··· 3823 3937 skl_pipe_wm_active_state(temp, active, true, false, i, 0); 3824 3938 } 3825 3939 3826 - temp = hw->cursor_trans[pipe]; 3940 + temp = hw->plane_trans[pipe][PLANE_CURSOR]; 3827 3941 skl_pipe_wm_active_state(temp, active, true, true, i, 0); 3828 3942 } 3829 3943 ··· 4106 4220 4107 4221 if (dev_priv->display.update_wm) 4108 4222 dev_priv->display.update_wm(crtc); 4109 - } 4110 - 4111 - void intel_update_sprite_watermarks(struct drm_plane *plane, 4112 - struct drm_crtc *crtc, 4113 - uint32_t sprite_width, 4114 - uint32_t sprite_height, 4115 - int pixel_size, 4116 - bool enabled, bool scaled) 4117 - { 4118 - struct drm_i915_private *dev_priv = plane->dev->dev_private; 4119 - 4120 - if (dev_priv->display.update_sprite_wm) 4121 - dev_priv->display.update_sprite_wm(plane, crtc, 4122 - sprite_width, sprite_height, 4123 - pixel_size, enabled, scaled); 4124 4223 } 4125 4224 4126 4225 /** ··· 4757 4886 I915_WRITE(GUC_MAX_IDLE_COUNT, 0xA); 4758 4887 4759 4888 I915_WRITE(GEN6_RC_SLEEP, 0); 4760 - I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */ 4761 4889 4762 4890 /* 2c: Program Coarse Power Gating Policies. */ 4763 4891 I915_WRITE(GEN9_MEDIA_PG_IDLE_HYSTERESIS, 25); ··· 4767 4897 rc6_mask = GEN6_RC_CTL_RC6_ENABLE; 4768 4898 DRM_INFO("RC6 %s\n", (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ? 4769 4899 "on" : "off"); 4770 - 4900 + /* WaRsUseTimeoutMode */ 4771 4901 if ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_D0) || 4772 - (IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_A0)) 4902 + (IS_BROXTON(dev) && INTEL_REVID(dev) <= BXT_REVID_A0)) { 4903 + I915_WRITE(GEN6_RC6_THRESHOLD, 625); /* 800us */ 4773 4904 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 4774 4905 GEN7_RC_CTL_TO_MODE | 4775 4906 rc6_mask); 4776 - else 4907 + } else { 4908 + I915_WRITE(GEN6_RC6_THRESHOLD, 37500); /* 37.5/125ms per EI */ 4777 4909 I915_WRITE(GEN6_RC_CONTROL, GEN6_RC_CTL_HW_ENABLE | 4778 4910 GEN6_RC_CTL_EI_MODE(1) | 4779 4911 rc6_mask); 4912 + } 4780 4913 4781 4914 /* 4782 4915 * 3b: Enable Coarse Power Gating only when RC6 is enabled. ··· 5088 5215 struct drm_device *dev = dev_priv->dev; 5089 5216 u32 val, rp0; 5090 5217 5091 - if (dev->pdev->revision >= 0x20) { 5092 - val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE); 5218 + val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE); 5093 5219 5094 - switch (INTEL_INFO(dev)->eu_total) { 5095 - case 8: 5096 - /* (2 * 4) config */ 5097 - rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT); 5098 - break; 5099 - case 12: 5100 - /* (2 * 6) config */ 5101 - rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT); 5102 - break; 5103 - case 16: 5104 - /* (2 * 8) config */ 5105 - default: 5106 - /* Setting (2 * 8) Min RP0 for any other combination */ 5107 - rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT); 5108 - break; 5109 - } 5110 - rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK); 5111 - } else { 5112 - /* For pre-production hardware */ 5113 - val = vlv_punit_read(dev_priv, PUNIT_GPU_STATUS_REG); 5114 - rp0 = (val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & 5115 - PUNIT_GPU_STATUS_MAX_FREQ_MASK; 5220 + switch (INTEL_INFO(dev)->eu_total) { 5221 + case 8: 5222 + /* (2 * 4) config */ 5223 + rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT); 5224 + break; 5225 + case 12: 5226 + /* (2 * 6) config */ 5227 + rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT); 5228 + break; 5229 + case 16: 5230 + /* (2 * 8) config */ 5231 + default: 5232 + /* Setting (2 * 8) Min RP0 for any other combination */ 5233 + rp0 = (val >> FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT); 5234 + break; 5116 5235 } 5236 + 5237 + rp0 = (rp0 & FB_GFX_FREQ_FUSE_MASK); 5238 + 5117 5239 return rp0; 5118 5240 } 5119 5241 ··· 5124 5256 5125 5257 static int cherryview_rps_guar_freq(struct drm_i915_private *dev_priv) 5126 5258 { 5127 - struct drm_device *dev = dev_priv->dev; 5128 5259 u32 val, rp1; 5129 5260 5130 - if (dev->pdev->revision >= 0x20) { 5131 - val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE); 5132 - rp1 = (val & FB_GFX_FREQ_FUSE_MASK); 5133 - } else { 5134 - /* For pre-production hardware */ 5135 - val = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS); 5136 - rp1 = ((val >> PUNIT_GPU_STATUS_MAX_FREQ_SHIFT) & 5137 - PUNIT_GPU_STATUS_MAX_FREQ_MASK); 5138 - } 5261 + val = vlv_punit_read(dev_priv, FB_GFX_FMAX_AT_VMAX_FUSE); 5262 + rp1 = (val & FB_GFX_FREQ_FUSE_MASK); 5263 + 5139 5264 return rp1; 5140 5265 } 5141 5266 ··· 5343 5482 mutex_unlock(&dev_priv->sb_lock); 5344 5483 5345 5484 switch ((val >> 2) & 0x7) { 5346 - case 0: 5347 - case 1: 5348 - dev_priv->rps.cz_freq = 200; 5349 - dev_priv->mem_freq = 1600; 5350 - break; 5351 - case 2: 5352 - dev_priv->rps.cz_freq = 267; 5353 - dev_priv->mem_freq = 1600; 5354 - break; 5355 5485 case 3: 5356 - dev_priv->rps.cz_freq = 333; 5357 5486 dev_priv->mem_freq = 2000; 5358 5487 break; 5359 - case 4: 5360 - dev_priv->rps.cz_freq = 320; 5361 - dev_priv->mem_freq = 1600; 5362 - break; 5363 - case 5: 5364 - dev_priv->rps.cz_freq = 400; 5488 + default: 5365 5489 dev_priv->mem_freq = 1600; 5366 5490 break; 5367 5491 } ··· 6523 6677 PCH_LP_PARTITION_LEVEL_DISABLE); 6524 6678 6525 6679 /* WADPOClockGatingDisable:hsw */ 6526 - I915_WRITE(_TRANSA_CHICKEN1, 6527 - I915_READ(_TRANSA_CHICKEN1) | 6680 + I915_WRITE(TRANS_CHICKEN1(PIPE_A), 6681 + I915_READ(TRANS_CHICKEN1(PIPE_A)) | 6528 6682 TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); 6529 6683 } 6530 6684 ··· 7022 7176 dev_priv->display.init_clock_gating = 7023 7177 skl_init_clock_gating; 7024 7178 dev_priv->display.update_wm = skl_update_wm; 7025 - dev_priv->display.update_sprite_wm = skl_update_sprite_wm; 7026 7179 } else if (HAS_PCH_SPLIT(dev)) { 7027 7180 ilk_setup_wm_latency(dev); 7028 7181 ··· 7030 7185 (!IS_GEN5(dev) && dev_priv->wm.pri_latency[0] && 7031 7186 dev_priv->wm.spr_latency[0] && dev_priv->wm.cur_latency[0])) { 7032 7187 dev_priv->display.update_wm = ilk_update_wm; 7033 - dev_priv->display.update_sprite_wm = ilk_update_sprite_wm; 7034 7188 } else { 7035 7189 DRM_DEBUG_KMS("Failed to read display plane latency. " 7036 7190 "Disable CxSR\n"); ··· 7171 7327 7172 7328 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val) 7173 7329 { 7174 - int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4); 7330 + int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000); 7175 7331 7176 7332 div = vlv_gpu_freq_div(czclk_freq); 7177 7333 if (div < 0) ··· 7182 7338 7183 7339 static int byt_freq_opcode(struct drm_i915_private *dev_priv, int val) 7184 7340 { 7185 - int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->mem_freq, 4); 7341 + int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000); 7186 7342 7187 7343 mul = vlv_gpu_freq_div(czclk_freq); 7188 7344 if (mul < 0) ··· 7193 7349 7194 7350 static int chv_gpu_freq(struct drm_i915_private *dev_priv, int val) 7195 7351 { 7196 - int div, czclk_freq = dev_priv->rps.cz_freq; 7352 + int div, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000); 7197 7353 7198 7354 div = vlv_gpu_freq_div(czclk_freq) / 2; 7199 7355 if (div < 0) ··· 7204 7360 7205 7361 static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val) 7206 7362 { 7207 - int mul, czclk_freq = dev_priv->rps.cz_freq; 7363 + int mul, czclk_freq = DIV_ROUND_CLOSEST(dev_priv->czclk_freq, 1000); 7208 7364 7209 7365 mul = vlv_gpu_freq_div(czclk_freq) / 2; 7210 7366 if (mul < 0)
+53 -66
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 717 717 struct drm_i915_private *dev_priv = dev->dev_private; 718 718 struct i915_workarounds *w = &dev_priv->workarounds; 719 719 720 - if (WARN_ON_ONCE(w->count == 0)) 720 + if (w->count == 0) 721 721 return 0; 722 722 723 723 ring->gpu_caches_dirty = true; ··· 800 800 801 801 #define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val) 802 802 803 - static int bdw_init_workarounds(struct intel_engine_cs *ring) 803 + static int gen8_init_workarounds(struct intel_engine_cs *ring) 804 804 { 805 805 struct drm_device *dev = ring->dev; 806 806 struct drm_i915_private *dev_priv = dev->dev_private; 807 807 808 808 WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); 809 809 810 - /* WaDisableAsyncFlipPerfMode:bdw */ 810 + /* WaDisableAsyncFlipPerfMode:bdw,chv */ 811 811 WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE); 812 812 813 - /* WaDisablePartialInstShootdown:bdw */ 814 - /* WaDisableThreadStallDopClockGating:bdw (pre-production) */ 813 + /* WaDisablePartialInstShootdown:bdw,chv */ 815 814 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 816 - PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE | 817 - STALL_DOP_GATING_DISABLE); 818 - 819 - /* WaDisableDopClockGating:bdw */ 820 - WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, 821 - DOP_CLOCK_GATING_DISABLE); 822 - 823 - WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, 824 - GEN8_SAMPLER_POWER_BYPASS_DIS); 815 + PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE); 825 816 826 817 /* Use Force Non-Coherent whenever executing a 3D context. This is a 827 818 * workaround for for a possible hang in the unlikely event a TLB 828 819 * invalidation occurs during a PSD flush. 829 820 */ 821 + /* WaForceEnableNonCoherent:bdw,chv */ 822 + /* WaHdcDisableFetchWhenMasked:bdw,chv */ 830 823 WA_SET_BIT_MASKED(HDC_CHICKEN0, 831 - /* WaForceEnableNonCoherent:bdw */ 832 - HDC_FORCE_NON_COHERENT | 833 - /* WaForceContextSaveRestoreNonCoherent:bdw */ 834 - HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | 835 - /* WaHdcDisableFetchWhenMasked:bdw */ 836 824 HDC_DONOT_FETCH_MEM_WHEN_MASKED | 837 - /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */ 838 - (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); 825 + HDC_FORCE_NON_COHERENT); 839 826 840 827 /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0: 841 828 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping ··· 830 843 * stalling waiting for the earlier ones to write to Hierarchical Z 831 844 * buffer." 832 845 * 833 - * This optimization is off by default for Broadwell; turn it on. 846 + * This optimization is off by default for BDW and CHV; turn it on. 834 847 */ 835 848 WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE); 836 849 837 - /* Wa4x4STCOptimizationDisable:bdw */ 838 - WA_SET_BIT_MASKED(CACHE_MODE_1, 839 - GEN8_4x4_STC_OPTIMIZATION_DISABLE); 850 + /* Wa4x4STCOptimizationDisable:bdw,chv */ 851 + WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE); 840 852 841 853 /* 842 854 * BSpec recommends 8x4 when MSAA is used, ··· 852 866 return 0; 853 867 } 854 868 855 - static int chv_init_workarounds(struct intel_engine_cs *ring) 869 + static int bdw_init_workarounds(struct intel_engine_cs *ring) 856 870 { 871 + int ret; 857 872 struct drm_device *dev = ring->dev; 858 873 struct drm_i915_private *dev_priv = dev->dev_private; 859 874 860 - WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING); 875 + ret = gen8_init_workarounds(ring); 876 + if (ret) 877 + return ret; 861 878 862 - /* WaDisableAsyncFlipPerfMode:chv */ 863 - WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE); 879 + /* WaDisableThreadStallDopClockGating:bdw (pre-production) */ 880 + WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); 864 881 865 - /* WaDisablePartialInstShootdown:chv */ 866 - /* WaDisableThreadStallDopClockGating:chv */ 867 - WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, 868 - PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE | 869 - STALL_DOP_GATING_DISABLE); 882 + /* WaDisableDopClockGating:bdw */ 883 + WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2, 884 + DOP_CLOCK_GATING_DISABLE); 870 885 871 - /* Use Force Non-Coherent whenever executing a 3D context. This is a 872 - * workaround for a possible hang in the unlikely event a TLB 873 - * invalidation occurs during a PSD flush. 874 - */ 875 - /* WaForceEnableNonCoherent:chv */ 876 - /* WaHdcDisableFetchWhenMasked:chv */ 886 + WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, 887 + GEN8_SAMPLER_POWER_BYPASS_DIS); 888 + 877 889 WA_SET_BIT_MASKED(HDC_CHICKEN0, 878 - HDC_FORCE_NON_COHERENT | 879 - HDC_DONOT_FETCH_MEM_WHEN_MASKED); 890 + /* WaForceContextSaveRestoreNonCoherent:bdw */ 891 + HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | 892 + /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */ 893 + (IS_BDW_GT3(dev) ? HDC_FENCE_DEST_SLM_DISABLE : 0)); 880 894 881 - /* According to the CACHE_MODE_0 default value documentation, some 882 - * CHV platforms disable this optimization by default. Turn it on. 883 - */ 884 - WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE); 895 + return 0; 896 + } 885 897 886 - /* Wa4x4STCOptimizationDisable:chv */ 887 - WA_SET_BIT_MASKED(CACHE_MODE_1, 888 - GEN8_4x4_STC_OPTIMIZATION_DISABLE); 898 + static int chv_init_workarounds(struct intel_engine_cs *ring) 899 + { 900 + int ret; 901 + struct drm_device *dev = ring->dev; 902 + struct drm_i915_private *dev_priv = dev->dev_private; 903 + 904 + ret = gen8_init_workarounds(ring); 905 + if (ret) 906 + return ret; 907 + 908 + /* WaDisableThreadStallDopClockGating:chv */ 909 + WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE); 889 910 890 911 /* Improve HiZ throughput on CHV. */ 891 912 WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X); 892 - 893 - /* 894 - * BSpec recommends 8x4 when MSAA is used, 895 - * however in practice 16x4 seems fastest. 896 - * 897 - * Note that PS/WM thread counts depend on the WIZ hashing 898 - * disable bit, which we don't touch here, but it's good 899 - * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM). 900 - */ 901 - WA_SET_FIELD_MASKED(GEN7_GT_MODE, 902 - GEN6_WIZ_HASHING_MASK, 903 - GEN6_WIZ_HASHING_16x4); 904 913 905 914 return 0; 906 915 } ··· 942 961 } 943 962 944 963 /* Wa4x4STCOptimizationDisable:skl,bxt */ 945 - WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE); 946 - 947 964 /* WaDisablePartialResolveInVc:skl,bxt */ 948 - WA_SET_BIT_MASKED(CACHE_MODE_1, GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE); 965 + WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE | 966 + GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE)); 949 967 950 968 /* WaCcsTlbPrefetchDisable:skl,bxt */ 951 969 WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, ··· 1021 1041 1022 1042 static int skl_init_workarounds(struct intel_engine_cs *ring) 1023 1043 { 1044 + int ret; 1024 1045 struct drm_device *dev = ring->dev; 1025 1046 struct drm_i915_private *dev_priv = dev->dev_private; 1026 1047 1027 - gen9_init_workarounds(ring); 1048 + ret = gen9_init_workarounds(ring); 1049 + if (ret) 1050 + return ret; 1028 1051 1029 1052 /* WaDisablePowerCompilerClockGating:skl */ 1030 1053 if (INTEL_REVID(dev) == SKL_REVID_B0) ··· 1064 1081 1065 1082 static int bxt_init_workarounds(struct intel_engine_cs *ring) 1066 1083 { 1084 + int ret; 1067 1085 struct drm_device *dev = ring->dev; 1068 1086 struct drm_i915_private *dev_priv = dev->dev_private; 1069 1087 1070 - gen9_init_workarounds(ring); 1088 + ret = gen9_init_workarounds(ring); 1089 + if (ret) 1090 + return ret; 1071 1091 1072 1092 /* WaDisableThreadStallDopClockGating:bxt */ 1073 1093 WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, ··· 2623 2637 GEN8_RING_SEMAPHORE_INIT; 2624 2638 } 2625 2639 } else if (INTEL_INFO(dev)->gen >= 6) { 2640 + ring->init_context = intel_rcs_ctx_init; 2626 2641 ring->add_request = gen6_add_request; 2627 2642 ring->flush = gen7_render_ring_flush; 2628 2643 if (INTEL_INFO(dev)->gen == 6)
+54 -7
drivers/gpu/drm/i915/intel_runtime_pm.c
··· 657 657 } 658 658 } else { 659 659 if (enable_requested) { 660 - I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask); 661 - POSTING_READ(HSW_PWR_WELL_DRIVER); 662 - DRM_DEBUG_KMS("Disabling %s\n", power_well->name); 660 + if (IS_SKYLAKE(dev) && 661 + (power_well->data == SKL_DISP_PW_1) && 662 + (intel_csr_load_status_get(dev_priv) == FW_LOADED)) 663 + DRM_DEBUG_KMS("Not Disabling PW1, dmc will handle\n"); 664 + else { 665 + I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask); 666 + POSTING_READ(HSW_PWR_WELL_DRIVER); 667 + DRM_DEBUG_KMS("Disabling %s\n", power_well->name); 668 + } 663 669 664 670 if ((GEN9_ENABLE_DC5(dev) || SKL_ENABLE_DC6(dev)) && 665 671 power_well->data == SKL_DISP_PW_2) { ··· 994 988 lookup_power_well(dev_priv, PUNIT_POWER_WELL_DPIO_CMN_D); 995 989 u32 phy_control = dev_priv->chv_phy_control; 996 990 u32 phy_status = 0; 991 + u32 phy_status_mask = 0xffffffff; 997 992 u32 tmp; 993 + 994 + /* 995 + * The BIOS can leave the PHY is some weird state 996 + * where it doesn't fully power down some parts. 997 + * Disable the asserts until the PHY has been fully 998 + * reset (ie. the power well has been disabled at 999 + * least once). 1000 + */ 1001 + if (!dev_priv->chv_phy_assert[DPIO_PHY0]) 1002 + phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH0) | 1003 + PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 0) | 1004 + PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH0, 1) | 1005 + PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1) | 1006 + PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 0) | 1007 + PHY_STATUS_SPLINE_LDO(DPIO_PHY0, DPIO_CH1, 1)); 1008 + 1009 + if (!dev_priv->chv_phy_assert[DPIO_PHY1]) 1010 + phy_status_mask &= ~(PHY_STATUS_CMN_LDO(DPIO_PHY1, DPIO_CH0) | 1011 + PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 0) | 1012 + PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1)); 998 1013 999 1014 if (cmn_bc->ops->is_enabled(dev_priv, cmn_bc)) { 1000 1015 phy_status |= PHY_POWERGOOD(DPIO_PHY0); ··· 1077 1050 phy_status |= PHY_STATUS_SPLINE_LDO(DPIO_PHY1, DPIO_CH0, 1); 1078 1051 } 1079 1052 1053 + phy_status &= phy_status_mask; 1054 + 1080 1055 /* 1081 1056 * The PHY may be busy with some initial calibration and whatnot, 1082 1057 * so the power state can take a while to actually change. 1083 1058 */ 1084 - if (wait_for((tmp = I915_READ(DISPLAY_PHY_STATUS)) == phy_status, 10)) 1059 + if (wait_for((tmp = I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask) == phy_status, 10)) 1085 1060 WARN(phy_status != tmp, 1086 1061 "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n", 1087 1062 tmp, phy_status, dev_priv->chv_phy_control); ··· 1176 1147 DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n", 1177 1148 phy, dev_priv->chv_phy_control); 1178 1149 1150 + /* PHY is fully reset now, so we can enable the PHY state asserts */ 1151 + dev_priv->chv_phy_assert[phy] = true; 1152 + 1179 1153 assert_chv_phy_status(dev_priv); 1180 1154 } 1181 1155 ··· 1187 1155 { 1188 1156 enum pipe pipe = phy == DPIO_PHY0 ? PIPE_A : PIPE_C; 1189 1157 u32 reg, val, expected, actual; 1158 + 1159 + /* 1160 + * The BIOS can leave the PHY is some weird state 1161 + * where it doesn't fully power down some parts. 1162 + * Disable the asserts until the PHY has been fully 1163 + * reset (ie. the power well has been disabled at 1164 + * least once). 1165 + */ 1166 + if (!dev_priv->chv_phy_assert[phy]) 1167 + return; 1190 1168 1191 1169 if (ch == DPIO_CH0) 1192 1170 reg = _CHV_CMN_DW0_CH0; ··· 1865 1823 1866 1824 /* Make sure we're not suspended first. */ 1867 1825 pm_runtime_get_sync(device); 1868 - pm_runtime_disable(device); 1869 1826 } 1870 1827 1871 1828 /** ··· 1953 1912 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY0, DPIO_CH1); 1954 1913 1955 1914 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY0); 1915 + 1916 + dev_priv->chv_phy_assert[DPIO_PHY0] = false; 1917 + } else { 1918 + dev_priv->chv_phy_assert[DPIO_PHY0] = true; 1956 1919 } 1957 1920 1958 1921 if (cmn_d->ops->is_enabled(dev_priv, cmn_d)) { ··· 1975 1930 PHY_CH_POWER_DOWN_OVRD(mask, DPIO_PHY1, DPIO_CH0); 1976 1931 1977 1932 dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(DPIO_PHY1); 1933 + 1934 + dev_priv->chv_phy_assert[DPIO_PHY1] = false; 1935 + } else { 1936 + dev_priv->chv_phy_assert[DPIO_PHY1] = true; 1978 1937 } 1979 1938 1980 1939 I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control); ··· 2163 2114 2164 2115 if (!HAS_RUNTIME_PM(dev)) 2165 2116 return; 2166 - 2167 - pm_runtime_set_active(device); 2168 2117 2169 2118 /* 2170 2119 * RPM depends on RC6 to save restore the GT HW context, so make RC6 a
+32 -5
drivers/gpu/drm/i915/intel_sdvo.c
··· 107 107 bool color_range_auto; 108 108 109 109 /** 110 + * HDMI user specified aspect ratio 111 + */ 112 + enum hdmi_picture_aspect aspect_ratio; 113 + 114 + /** 110 115 * This is set if we're going to treat the device as TV-out. 111 116 * 112 117 * While we have these nice friendly flags for output types that ought ··· 608 603 return false; 609 604 } 610 605 611 - static int intel_sdvo_get_pixel_multiplier(struct drm_display_mode *mode) 606 + static int intel_sdvo_get_pixel_multiplier(const struct drm_display_mode *adjusted_mode) 612 607 { 613 - if (mode->clock >= 100000) 608 + if (adjusted_mode->crtc_clock >= 100000) 614 609 return 1; 615 - else if (mode->clock >= 50000) 610 + else if (adjusted_mode->crtc_clock >= 50000) 616 611 return 2; 617 612 else 618 613 return 4; ··· 1186 1181 if (intel_sdvo->is_tv) 1187 1182 i9xx_adjust_sdvo_tv_clock(pipe_config); 1188 1183 1184 + /* Set user selected PAR to incoming mode's member */ 1185 + if (intel_sdvo->is_hdmi) 1186 + adjusted_mode->picture_aspect_ratio = intel_sdvo->aspect_ratio; 1187 + 1189 1188 return true; 1190 1189 } 1191 1190 ··· 1198 1189 struct drm_device *dev = intel_encoder->base.dev; 1199 1190 struct drm_i915_private *dev_priv = dev->dev_private; 1200 1191 struct intel_crtc *crtc = to_intel_crtc(intel_encoder->base.crtc); 1201 - struct drm_display_mode *adjusted_mode = 1202 - &crtc->config->base.adjusted_mode; 1192 + const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 1203 1193 struct drm_display_mode *mode = &crtc->config->base.mode; 1204 1194 struct intel_sdvo *intel_sdvo = to_sdvo(intel_encoder); 1205 1195 u32 sdvox; ··· 2052 2044 goto done; 2053 2045 } 2054 2046 2047 + if (property == connector->dev->mode_config.aspect_ratio_property) { 2048 + switch (val) { 2049 + case DRM_MODE_PICTURE_ASPECT_NONE: 2050 + intel_sdvo->aspect_ratio = HDMI_PICTURE_ASPECT_NONE; 2051 + break; 2052 + case DRM_MODE_PICTURE_ASPECT_4_3: 2053 + intel_sdvo->aspect_ratio = HDMI_PICTURE_ASPECT_4_3; 2054 + break; 2055 + case DRM_MODE_PICTURE_ASPECT_16_9: 2056 + intel_sdvo->aspect_ratio = HDMI_PICTURE_ASPECT_16_9; 2057 + break; 2058 + default: 2059 + return -EINVAL; 2060 + } 2061 + goto done; 2062 + } 2063 + 2055 2064 #define CHECK_PROPERTY(name, NAME) \ 2056 2065 if (intel_sdvo_connector->name == property) { \ 2057 2066 if (intel_sdvo_connector->cur_##name == temp_value) return 0; \ ··· 2408 2383 intel_attach_broadcast_rgb_property(&connector->base.base); 2409 2384 intel_sdvo->color_range_auto = true; 2410 2385 } 2386 + intel_attach_aspect_ratio_property(&connector->base.base); 2387 + intel_sdvo->aspect_ratio = HDMI_PICTURE_ASPECT_NONE; 2411 2388 } 2412 2389 2413 2390 static struct intel_sdvo_connector *intel_sdvo_connector_alloc(void)
+9 -22
drivers/gpu/drm/i915/intel_sprite.c
··· 53 53 } 54 54 } 55 55 56 - static int usecs_to_scanlines(const struct drm_display_mode *mode, int usecs) 56 + static int usecs_to_scanlines(const struct drm_display_mode *adjusted_mode, 57 + int usecs) 57 58 { 58 59 /* paranoia */ 59 - if (!mode->crtc_htotal) 60 + if (!adjusted_mode->crtc_htotal) 60 61 return 1; 61 62 62 - return DIV_ROUND_UP(usecs * mode->crtc_clock, 1000 * mode->crtc_htotal); 63 + return DIV_ROUND_UP(usecs * adjusted_mode->crtc_clock, 64 + 1000 * adjusted_mode->crtc_htotal); 63 65 } 64 66 65 67 /** ··· 81 79 void intel_pipe_update_start(struct intel_crtc *crtc) 82 80 { 83 81 struct drm_device *dev = crtc->base.dev; 84 - const struct drm_display_mode *mode = &crtc->config->base.adjusted_mode; 82 + const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode; 85 83 enum pipe pipe = crtc->pipe; 86 84 long timeout = msecs_to_jiffies_timeout(1); 87 85 int scanline, min, max, vblank_start; 88 86 wait_queue_head_t *wq = drm_crtc_vblank_waitqueue(&crtc->base); 89 87 DEFINE_WAIT(wait); 90 88 91 - vblank_start = mode->crtc_vblank_start; 92 - if (mode->flags & DRM_MODE_FLAG_INTERLACE) 89 + vblank_start = adjusted_mode->crtc_vblank_start; 90 + if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) 93 91 vblank_start = DIV_ROUND_UP(vblank_start, 2); 94 92 95 93 /* FIXME needs to be calibrated sensibly */ 96 - min = vblank_start - usecs_to_scanlines(mode, 100); 94 + min = vblank_start - usecs_to_scanlines(adjusted_mode, 100); 97 95 max = vblank_start - 1; 98 96 99 97 local_irq_disable(); ··· 192 190 const int pipe = intel_plane->pipe; 193 191 const int plane = intel_plane->plane + 1; 194 192 u32 plane_ctl, stride_div, stride; 195 - int pixel_size = drm_format_plane_cpp(fb->pixel_format, 0); 196 193 const struct drm_intel_sprite_colorkey *key = 197 194 &to_intel_plane_state(drm_plane->state)->ckey; 198 195 unsigned long surf_addr; ··· 209 208 210 209 rotation = drm_plane->state->rotation; 211 210 plane_ctl |= skl_plane_ctl_rotation(rotation); 212 - 213 - intel_update_sprite_watermarks(drm_plane, crtc, src_w, src_h, 214 - pixel_size, true, 215 - src_w != crtc_w || src_h != crtc_h); 216 211 217 212 stride_div = intel_fb_stride_alignment(dev, fb->modifier[0], 218 213 fb->pixel_format); ··· 291 294 292 295 I915_WRITE(PLANE_SURF(pipe, plane), 0); 293 296 POSTING_READ(PLANE_SURF(pipe, plane)); 294 - 295 - intel_update_sprite_watermarks(dplane, crtc, 0, 0, 0, false, false); 296 297 } 297 298 298 299 static void ··· 533 538 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) 534 539 sprctl |= SPRITE_PIPE_CSC_ENABLE; 535 540 536 - intel_update_sprite_watermarks(plane, crtc, src_w, src_h, pixel_size, 537 - true, 538 - src_w != crtc_w || src_h != crtc_h); 539 - 540 541 /* Sizes are 0 based */ 541 542 src_w--; 542 543 src_h--; ··· 665 674 666 675 if (IS_GEN6(dev)) 667 676 dvscntr |= DVS_TRICKLE_FEED_DISABLE; /* must disable */ 668 - 669 - intel_update_sprite_watermarks(plane, crtc, src_w, src_h, 670 - pixel_size, true, 671 - src_w != crtc_w || src_h != crtc_h); 672 677 673 678 /* Sizes are 0 based */ 674 679 src_w--;
+5 -5
drivers/gpu/drm/i915/intel_uncore.c
··· 1429 1429 struct drm_i915_private *dev_priv = dev->dev_private; 1430 1430 int ret; 1431 1431 1432 - I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 1432 + I915_WRITE(ILK_GDSR, 1433 1433 ILK_GRDOM_RENDER | ILK_GRDOM_RESET_ENABLE); 1434 - ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 1434 + ret = wait_for((I915_READ(ILK_GDSR) & 1435 1435 ILK_GRDOM_RESET_ENABLE) == 0, 500); 1436 1436 if (ret) 1437 1437 return ret; 1438 1438 1439 - I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 1439 + I915_WRITE(ILK_GDSR, 1440 1440 ILK_GRDOM_MEDIA | ILK_GRDOM_RESET_ENABLE); 1441 - ret = wait_for((I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 1441 + ret = wait_for((I915_READ(ILK_GDSR) & 1442 1442 ILK_GRDOM_RESET_ENABLE) == 0, 500); 1443 1443 if (ret) 1444 1444 return ret; 1445 1445 1446 - I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, 0); 1446 + I915_WRITE(ILK_GDSR, 0); 1447 1447 1448 1448 return 0; 1449 1449 }
+45 -17
include/drm/i915_component.h
··· 24 24 #ifndef _I915_COMPONENT_H_ 25 25 #define _I915_COMPONENT_H_ 26 26 27 + /* MAX_PORT is the number of port 28 + * It must be sync with I915_MAX_PORTS defined i915_drv.h 29 + * 5 should be enough as only HSW, BDW, SKL need such fix. 30 + */ 31 + #define MAX_PORTS 5 32 + 33 + /** 34 + * struct i915_audio_component_ops - callbacks defined in gfx driver 35 + * @owner: the module owner 36 + * @get_power: get the POWER_DOMAIN_AUDIO power well 37 + * @put_power: put the POWER_DOMAIN_AUDIO power well 38 + * @codec_wake_override: Enable/Disable generating the codec wake signal 39 + * @get_cdclk_freq: get the Core Display Clock in KHz 40 + * @sync_audio_rate: set n/cts based on the sample rate 41 + */ 42 + struct i915_audio_component_ops { 43 + struct module *owner; 44 + void (*get_power)(struct device *); 45 + void (*put_power)(struct device *); 46 + void (*codec_wake_override)(struct device *, bool enable); 47 + int (*get_cdclk_freq)(struct device *); 48 + int (*sync_audio_rate)(struct device *, int port, int rate); 49 + }; 50 + 51 + struct i915_audio_component_audio_ops { 52 + void *audio_ptr; 53 + /** 54 + * Call from i915 driver, notifying the HDA driver that 55 + * pin sense and/or ELD information has changed. 56 + * @audio_ptr: HDA driver object 57 + * @port: Which port has changed (PORTA / PORTB / PORTC etc) 58 + */ 59 + void (*pin_eld_notify)(void *audio_ptr, int port); 60 + }; 61 + 62 + /** 63 + * struct i915_audio_component - used for audio video interaction 64 + * @dev: the device from gfx driver 65 + * @aud_sample_rate: the array of audio sample rate per port 66 + * @ops: callback for audio driver calling 67 + * @audio_ops: Call from i915 driver 68 + */ 27 69 struct i915_audio_component { 28 70 struct device *dev; 71 + int aud_sample_rate[MAX_PORTS]; 29 72 30 - const struct i915_audio_component_ops { 31 - struct module *owner; 32 - void (*get_power)(struct device *); 33 - void (*put_power)(struct device *); 34 - void (*codec_wake_override)(struct device *, bool enable); 35 - int (*get_cdclk_freq)(struct device *); 36 - } *ops; 73 + const struct i915_audio_component_ops *ops; 37 74 38 - const struct i915_audio_component_audio_ops { 39 - void *audio_ptr; 40 - /** 41 - * Call from i915 driver, notifying the HDA driver that 42 - * pin sense and/or ELD information has changed. 43 - * @audio_ptr: HDA driver object 44 - * @port: Which port has changed (PORTA / PORTB / PORTC etc) 45 - */ 46 - void (*pin_eld_notify)(void *audio_ptr, int port); 47 - } *audio_ops; 75 + const struct i915_audio_component_audio_ops *audio_ops; 48 76 }; 49 77 50 78 #endif /* _I915_COMPONENT_H_ */
+2 -1
include/uapi/drm/i915_drm.h
··· 690 690 #define EXEC_OBJECT_NEEDS_FENCE (1<<0) 691 691 #define EXEC_OBJECT_NEEDS_GTT (1<<1) 692 692 #define EXEC_OBJECT_WRITE (1<<2) 693 - #define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_WRITE<<1) 693 + #define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3) 694 + #define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_SUPPORTS_48B_ADDRESS<<1) 694 695 __u64 flags; 695 696 696 697 __u64 rsvd1;
+19
sound/pci/hda/patch_hdmi.c
··· 1775 1775 return non_pcm; 1776 1776 } 1777 1777 1778 + /* There is a fixed mapping between audio pin node and display port 1779 + * on current Intel platforms: 1780 + * Pin Widget 5 - PORT B (port = 1 in i915 driver) 1781 + * Pin Widget 6 - PORT C (port = 2 in i915 driver) 1782 + * Pin Widget 7 - PORT D (port = 3 in i915 driver) 1783 + */ 1784 + static int intel_pin2port(hda_nid_t pin_nid) 1785 + { 1786 + return pin_nid - 4; 1787 + } 1778 1788 1779 1789 /* 1780 1790 * HDMI callbacks ··· 1801 1791 int pin_idx = hinfo_to_pin_index(codec, hinfo); 1802 1792 struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx); 1803 1793 hda_nid_t pin_nid = per_pin->pin_nid; 1794 + struct snd_pcm_runtime *runtime = substream->runtime; 1795 + struct i915_audio_component *acomp = codec->bus->core.audio_component; 1804 1796 bool non_pcm; 1805 1797 int pinctl; 1806 1798 ··· 1818 1806 intel_verify_pin_cvt_connect(codec, per_pin); 1819 1807 intel_not_share_assigned_cvt(codec, pin_nid, per_pin->mux_idx); 1820 1808 } 1809 + 1810 + /* Call sync_audio_rate to set the N/CTS/M manually if necessary */ 1811 + /* Todo: add DP1.2 MST audio support later */ 1812 + if (acomp && acomp->ops && acomp->ops->sync_audio_rate) 1813 + acomp->ops->sync_audio_rate(acomp->dev, 1814 + intel_pin2port(pin_nid), 1815 + runtime->rate); 1821 1816 1822 1817 non_pcm = check_non_pcm_per_cvt(codec, cvt_nid); 1823 1818 mutex_lock(&per_pin->lock);