Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-intel-next-2018-02-21' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

Driver Changes:

- Lift alpha_support protection from Cannonlake (Rodrigo)
* Meaning the driver should mostly work for the hardware we had
at our disposal when testing
* Used to be preliminary_hw_support
- Add missing Cannonlake PCI device ID of 0x5A4C (Rodrigo)
- Cannonlake port register fix (Mahesh)

- Fix Dell Venue 8 Pro black screen after modeset (Hans)
- Fix for always returning zero out-fence from execbuf (Daniele)
- Fix HDMI audio when no no relevant video output is active (Jani)
- Fix memleak of VBT data on driver_unload (Hans)

- Fix for KASAN found locking issue (Maarten)
- RCU barrier consolidation to improve igt/gem_sync/idle (Chris)
- Optimizations to IRQ handlers (Chris)
- vblank tracking improvements (64-bit resolution, PM) (Dhinakaran)
- Pipe select bit corrections (Ville)
- Reduce runtime computed device_info fields (Chris)
- Tune down some WARN_ONs to GEM_BUG_ON now that CI has good coverage (Chris)
- A bunch of kerneldoc warning fixes (Chris)

* tag 'drm-intel-next-2018-02-21' of git://anongit.freedesktop.org/drm/drm-intel: (113 commits)
drm/i915: Update DRIVER_DATE to 20180221
drm/i915/fbc: Use PLANE_HAS_FENCE to determine if the plane is fenced
drm/i915/fbdev: Use the PLANE_HAS_FENCE flags from the time of pinning
drm/i915: Move the policy for placement of the GGTT vma into the caller
drm/i915: Also check view->type for a normal GGTT view
drm/i915: Drop WaDoubleCursorLP3Latency:ivb
drm/i915: Set the primary plane pipe select bits on gen4
drm/i915: Don't set cursor pipe select bits on g4x+
drm/i915: Assert that we don't overflow frontbuffer tracking bits
drm/i915: Track number of pending freed objects
drm/i915/: Initialise trans_min for skl_compute_transition_wm()
drm/i915: Clear the in-use marker on execbuf failure
drm/i915: Prune gen8_gt_irq_handler
drm/i915: Track GT interrupt handling using the master iir
drm/i915: Remove WARN_ONCE for failing to pm_runtime_if_in_use
drm: intel_dpio_phy: fix kernel-doc comments at nested struct
drm/i915: Release connector iterator on a digital port conflict.
drm/i915/execlists: Remove too early assert
drm/i915: Assert that we always complete a submission to guc/execlists
drm: move read_domains and write_domain into i915
...

+1872 -961
+1 -1
drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
··· 210 210 amdgpu_bo_unreserve(new_abo); 211 211 212 212 work->base = base; 213 - work->target_vblank = target - drm_crtc_vblank_count(crtc) + 213 + work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) + 214 214 amdgpu_get_vblank_counter_kms(dev, work->crtc_id); 215 215 216 216 /* we borrow the event spin lock for protecting flip_wrok */
+2 -2
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
··· 3842 3842 3843 3843 3844 3844 /* Prepare wait for target vblank early - before the fence-waits */ 3845 - target_vblank = target - drm_crtc_vblank_count(crtc) + 3845 + target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) + 3846 3846 amdgpu_get_vblank_counter_kms(crtc->dev, acrtc->crtc_id); 3847 3847 3848 3848 /* TODO This might fail and hence better not used, wait ··· 3988 3988 amdgpu_dm_do_flip( 3989 3989 crtc, 3990 3990 fb, 3991 - drm_crtc_vblank_count(crtc) + *wait_for_vblank, 3991 + (uint32_t)drm_crtc_vblank_count(crtc) + *wait_for_vblank, 3992 3992 dm_state->context); 3993 3993 } 3994 3994
+1 -1
drivers/gpu/drm/drm_plane.c
··· 948 948 if (r) 949 949 return r; 950 950 951 - current_vblank = drm_crtc_vblank_count(crtc); 951 + current_vblank = (u32)drm_crtc_vblank_count(crtc); 952 952 953 953 switch (page_flip->flags & DRM_MODE_PAGE_FLIP_TARGET) { 954 954 case DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE:
+73 -12
drivers/gpu/drm/drm_vblank.c
··· 271 271 store_vblank(dev, pipe, diff, t_vblank, cur_vblank); 272 272 } 273 273 274 - static u32 drm_vblank_count(struct drm_device *dev, unsigned int pipe) 274 + static u64 drm_vblank_count(struct drm_device *dev, unsigned int pipe) 275 275 { 276 276 struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 277 277 ··· 292 292 * This is mostly useful for hardware that can obtain the scanout position, but 293 293 * doesn't have a hardware frame counter. 294 294 */ 295 - u32 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc) 295 + u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc) 296 296 { 297 297 struct drm_device *dev = crtc->dev; 298 298 unsigned int pipe = drm_crtc_index(crtc); 299 - u32 vblank; 299 + u64 vblank; 300 300 unsigned long flags; 301 301 302 302 WARN_ONCE(drm_debug & DRM_UT_VBL && !dev->driver->get_vblank_timestamp, ··· 347 347 spin_lock_irqsave(&dev->vblank_time_lock, irqflags); 348 348 349 349 /* 350 - * Only disable vblank interrupts if they're enabled. This avoids 351 - * calling the ->disable_vblank() operation in atomic context with the 352 - * hardware potentially runtime suspended. 350 + * Update vblank count and disable vblank interrupts only if the 351 + * interrupts were enabled. This avoids calling the ->disable_vblank() 352 + * operation in atomic context with the hardware potentially runtime 353 + * suspended. 353 354 */ 354 - if (vblank->enabled) { 355 - __disable_vblank(dev, pipe); 356 - vblank->enabled = false; 357 - } 355 + if (!vblank->enabled) 356 + goto out; 358 357 359 358 /* 360 - * Always update the count and timestamp to maintain the 359 + * Update the count and timestamp to maintain the 361 360 * appearance that the counter has been ticking all along until 362 361 * this time. This makes the count account for the entire time 363 362 * between drm_crtc_vblank_on() and drm_crtc_vblank_off(). 364 363 */ 365 364 drm_update_vblank_count(dev, pipe, false); 365 + __disable_vblank(dev, pipe); 366 + vblank->enabled = false; 366 367 368 + out: 367 369 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); 368 370 } 369 371 ··· 1057 1055 { 1058 1056 struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; 1059 1057 int ret; 1060 - u32 last; 1058 + u64 last; 1061 1059 1062 1060 if (WARN_ON(pipe >= dev->num_crtcs)) 1063 1061 return; ··· 1236 1234 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 1237 1235 } 1238 1236 EXPORT_SYMBOL(drm_crtc_vblank_on); 1237 + 1238 + /** 1239 + * drm_vblank_restore - estimated vblanks using timestamps and update it. 1240 + * 1241 + * Power manamement features can cause frame counter resets between vblank 1242 + * disable and enable. Drivers can then use this function in their 1243 + * &drm_crtc_funcs.enable_vblank implementation to estimate the vblanks since 1244 + * the last &drm_crtc_funcs.disable_vblank. 1245 + * 1246 + * This function is the legacy version of drm_crtc_vblank_restore(). 1247 + */ 1248 + void drm_vblank_restore(struct drm_device *dev, unsigned int pipe) 1249 + { 1250 + ktime_t t_vblank; 1251 + struct drm_vblank_crtc *vblank; 1252 + int framedur_ns; 1253 + u64 diff_ns; 1254 + u32 cur_vblank, diff = 1; 1255 + int count = DRM_TIMESTAMP_MAXRETRIES; 1256 + 1257 + if (WARN_ON(pipe >= dev->num_crtcs)) 1258 + return; 1259 + 1260 + assert_spin_locked(&dev->vbl_lock); 1261 + assert_spin_locked(&dev->vblank_time_lock); 1262 + 1263 + vblank = &dev->vblank[pipe]; 1264 + WARN_ONCE((drm_debug & DRM_UT_VBL) && !vblank->framedur_ns, 1265 + "Cannot compute missed vblanks without frame duration\n"); 1266 + framedur_ns = vblank->framedur_ns; 1267 + 1268 + do { 1269 + cur_vblank = __get_vblank_counter(dev, pipe); 1270 + drm_get_last_vbltimestamp(dev, pipe, &t_vblank, false); 1271 + } while (cur_vblank != __get_vblank_counter(dev, pipe) && --count > 0); 1272 + 1273 + diff_ns = ktime_to_ns(ktime_sub(t_vblank, vblank->time)); 1274 + if (framedur_ns) 1275 + diff = DIV_ROUND_CLOSEST_ULL(diff_ns, framedur_ns); 1276 + 1277 + 1278 + DRM_DEBUG_VBL("missed %d vblanks in %lld ns, frame duration=%d ns, hw_diff=%d\n", 1279 + diff, diff_ns, framedur_ns, cur_vblank - vblank->last); 1280 + store_vblank(dev, pipe, diff, t_vblank, cur_vblank); 1281 + } 1282 + EXPORT_SYMBOL(drm_vblank_restore); 1283 + 1284 + /** 1285 + * drm_crtc_vblank_restore - estimate vblanks using timestamps and update it. 1286 + * Power manamement features can cause frame counter resets between vblank 1287 + * disable and enable. Drivers can then use this function in their 1288 + * &drm_crtc_funcs.enable_vblank implementation to estimate the vblanks since 1289 + * the last &drm_crtc_funcs.disable_vblank. 1290 + */ 1291 + void drm_crtc_vblank_restore(struct drm_crtc *crtc) 1292 + { 1293 + drm_vblank_restore(crtc->dev, drm_crtc_index(crtc)); 1294 + } 1295 + EXPORT_SYMBOL(drm_crtc_vblank_restore); 1239 1296 1240 1297 static void drm_legacy_vblank_pre_modeset(struct drm_device *dev, 1241 1298 unsigned int pipe)
+1
drivers/gpu/drm/i915/Makefile
··· 17 17 subdir-ccflags-y += $(call cc-disable-warning, type-limits) 18 18 subdir-ccflags-y += $(call cc-disable-warning, missing-field-initializers) 19 19 subdir-ccflags-y += $(call cc-disable-warning, implicit-fallthrough) 20 + subdir-ccflags-y += $(call cc-disable-warning, unused-but-set-variable) 20 21 subdir-ccflags-$(CONFIG_DRM_I915_WERROR) += -Werror 21 22 22 23 # Fine grained warnings disable
+14 -14
drivers/gpu/drm/i915/dvo_ivch.c
··· 59 59 * This must not be set while VR01_DVO_BYPASS_ENABLE is set. 60 60 */ 61 61 # define VR01_LCD_ENABLE (1 << 2) 62 - /** Enables the DVO repeater. */ 62 + /* Enables the DVO repeater. */ 63 63 # define VR01_DVO_BYPASS_ENABLE (1 << 1) 64 - /** Enables the DVO clock */ 64 + /* Enables the DVO clock */ 65 65 # define VR01_DVO_ENABLE (1 << 0) 66 - /** Enable dithering for 18bpp panels. Not documented. */ 66 + /* Enable dithering for 18bpp panels. Not documented. */ 67 67 # define VR01_DITHER_ENABLE (1 << 4) 68 68 69 69 /* 70 70 * LCD Interface Format 71 71 */ 72 72 #define VR10 0x10 73 - /** Enables LVDS output instead of CMOS */ 73 + /* Enables LVDS output instead of CMOS */ 74 74 # define VR10_LVDS_ENABLE (1 << 4) 75 - /** Enables 18-bit LVDS output. */ 75 + /* Enables 18-bit LVDS output. */ 76 76 # define VR10_INTERFACE_1X18 (0 << 2) 77 - /** Enables 24-bit LVDS or CMOS output */ 77 + /* Enables 24-bit LVDS or CMOS output */ 78 78 # define VR10_INTERFACE_1X24 (1 << 2) 79 - /** Enables 2x18-bit LVDS or CMOS output. */ 79 + /* Enables 2x18-bit LVDS or CMOS output. */ 80 80 # define VR10_INTERFACE_2X18 (2 << 2) 81 - /** Enables 2x24-bit LVDS output */ 81 + /* Enables 2x24-bit LVDS output */ 82 82 # define VR10_INTERFACE_2X24 (3 << 2) 83 - /** Mask that defines the depth of the pipeline */ 83 + /* Mask that defines the depth of the pipeline */ 84 84 # define VR10_INTERFACE_DEPTH_MASK (3 << 2) 85 85 86 86 /* ··· 97 97 * Panel power down status 98 98 */ 99 99 #define VR30 0x30 100 - /** Read only bit indicating that the panel is not in a safe poweroff state. */ 100 + /* Read only bit indicating that the panel is not in a safe poweroff state. */ 101 101 # define VR30_PANEL_ON (1 << 15) 102 102 103 103 #define VR40 0x40 ··· 183 183 184 184 185 185 static void ivch_dump_regs(struct intel_dvo_device *dvo); 186 - /** 186 + /* 187 187 * Reads a register on the ivch. 188 188 * 189 189 * Each of the 256 registers are 16 bits long. ··· 230 230 return false; 231 231 } 232 232 233 - /** Writes a 16-bit register on the ivch */ 233 + /* Writes a 16-bit register on the ivch */ 234 234 static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data) 235 235 { 236 236 struct ivch_priv *priv = dvo->dev_priv; ··· 258 258 return false; 259 259 } 260 260 261 - /** Probes the given bus and slave address for an ivch */ 261 + /* Probes the given bus and slave address for an ivch */ 262 262 static bool ivch_init(struct intel_dvo_device *dvo, 263 263 struct i2c_adapter *adapter) 264 264 { ··· 338 338 ivch_write(dvo, backup_addresses[i], priv->reg_backup[i]); 339 339 } 340 340 341 - /** Sets the power state of the panel connected to the ivch */ 341 + /* Sets the power state of the panel connected to the ivch */ 342 342 static void ivch_dpms(struct intel_dvo_device *dvo, bool enable) 343 343 { 344 344 int i;
+2 -2
drivers/gpu/drm/i915/gvt/dmabuf.c
··· 162 162 info->size << PAGE_SHIFT); 163 163 i915_gem_object_init(obj, &intel_vgpu_gem_ops); 164 164 165 - obj->base.read_domains = I915_GEM_DOMAIN_GTT; 166 - obj->base.write_domain = 0; 165 + obj->read_domains = I915_GEM_DOMAIN_GTT; 166 + obj->write_domain = 0; 167 167 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 168 168 unsigned int tiling_mode = 0; 169 169 unsigned int stride = 0;
+25 -31
drivers/gpu/drm/i915/i915_debugfs.c
··· 49 49 50 50 intel_device_info_dump_flags(info, &p); 51 51 intel_device_info_dump_runtime(info, &p); 52 + intel_driver_caps_print(&dev_priv->caps, &p); 52 53 53 54 kernel_param_lock(THIS_MODULE); 54 55 i915_params_dump(&i915_modparams, &p); ··· 150 149 get_global_flag(obj), 151 150 get_pin_mapped_flag(obj), 152 151 obj->base.size / 1024, 153 - obj->base.read_domains, 154 - obj->base.write_domain, 152 + obj->read_domains, 153 + obj->write_domain, 155 154 i915_cache_level_str(dev_priv, obj->cache_level), 156 155 obj->mm.dirty ? " dirty" : "", 157 156 obj->mm.madv == I915_MADV_DONTNEED ? " purgeable" : ""); ··· 1461 1460 struct drm_i915_private *dev_priv = node_to_i915(m->private); 1462 1461 u32 gt_core_status, rcctl1, rc6vids = 0; 1463 1462 u32 gen9_powergate_enable = 0, gen9_powergate_status = 0; 1464 - unsigned forcewake_count; 1465 - int count = 0; 1466 - 1467 - forcewake_count = READ_ONCE(dev_priv->uncore.fw_domain[FW_DOMAIN_ID_RENDER].wake_count); 1468 - if (forcewake_count) { 1469 - seq_puts(m, "RC information inaccurate because somebody " 1470 - "holds a forcewake reference \n"); 1471 - } else { 1472 - /* NB: we cannot use forcewake, else we read the wrong values */ 1473 - while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) 1474 - udelay(10); 1475 - seq_printf(m, "RC information accurate: %s\n", yesno(count < 51)); 1476 - } 1477 1463 1478 1464 gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS); 1479 1465 trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true); ··· 1471 1483 gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS); 1472 1484 } 1473 1485 1474 - mutex_lock(&dev_priv->pcu_lock); 1475 - sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, &rc6vids); 1476 - mutex_unlock(&dev_priv->pcu_lock); 1486 + if (INTEL_GEN(dev_priv) <= 7) { 1487 + mutex_lock(&dev_priv->pcu_lock); 1488 + sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS, 1489 + &rc6vids); 1490 + mutex_unlock(&dev_priv->pcu_lock); 1491 + } 1477 1492 1478 1493 seq_printf(m, "RC1e Enabled: %s\n", 1479 1494 yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); ··· 1532 1541 print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p); 1533 1542 print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp); 1534 1543 1535 - seq_printf(m, "RC6 voltage: %dmV\n", 1536 - GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff))); 1537 - seq_printf(m, "RC6+ voltage: %dmV\n", 1538 - GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff))); 1539 - seq_printf(m, "RC6++ voltage: %dmV\n", 1540 - GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff))); 1544 + if (INTEL_GEN(dev_priv) <= 7) { 1545 + seq_printf(m, "RC6 voltage: %dmV\n", 1546 + GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff))); 1547 + seq_printf(m, "RC6+ voltage: %dmV\n", 1548 + GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff))); 1549 + seq_printf(m, "RC6++ voltage: %dmV\n", 1550 + GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff))); 1551 + } 1552 + 1541 1553 return i915_forcewake_domains(m, NULL); 1542 1554 } 1543 1555 ··· 1593 1599 seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason); 1594 1600 1595 1601 if (fbc->work.scheduled) 1596 - seq_printf(m, "FBC worker scheduled on vblank %u, now %llu\n", 1602 + seq_printf(m, "FBC worker scheduled on vblank %llu, now %llu\n", 1597 1603 fbc->work.scheduled_vblank, 1598 1604 drm_crtc_vblank_count(&fbc->crtc->base)); 1599 1605 ··· 2332 2338 return -ENODEV; 2333 2339 2334 2340 GEM_BUG_ON(!guc->execbuf_client); 2335 - GEM_BUG_ON(!guc->preempt_client); 2336 2341 2337 2342 seq_printf(m, "Doorbell map:\n"); 2338 2343 seq_printf(m, "\t%*pb\n", GUC_NUM_DOORBELLS, guc->doorbell_bitmap); ··· 2339 2346 2340 2347 seq_printf(m, "\nGuC execbuf client @ %p:\n", guc->execbuf_client); 2341 2348 i915_guc_client_info(m, dev_priv, guc->execbuf_client); 2342 - seq_printf(m, "\nGuC preempt client @ %p:\n", guc->preempt_client); 2343 - i915_guc_client_info(m, dev_priv, guc->preempt_client); 2349 + if (guc->preempt_client) { 2350 + seq_printf(m, "\nGuC preempt client @ %p:\n", 2351 + guc->preempt_client); 2352 + i915_guc_client_info(m, dev_priv, guc->preempt_client); 2353 + } 2344 2354 2345 2355 i915_guc_log_info(m, dev_priv); 2346 2356 ··· 4079 4083 if (val & DROP_IDLE) 4080 4084 drain_delayed_work(&dev_priv->gt.idle_work); 4081 4085 4082 - if (val & DROP_FREED) { 4083 - synchronize_rcu(); 4086 + if (val & DROP_FREED) 4084 4087 i915_gem_drain_freed_objects(dev_priv); 4085 - } 4086 4088 4087 4089 return ret; 4088 4090 }
+135 -147
drivers/gpu/drm/i915/i915_drv.c
··· 122 122 i915_error_injected(dev_priv) ? KERN_DEBUG : KERN_ERR, \ 123 123 fmt, ##__VA_ARGS__) 124 124 125 - 126 - static enum intel_pch intel_virt_detect_pch(struct drm_i915_private *dev_priv) 125 + /* Map PCH device id to PCH type, or PCH_NONE if unknown. */ 126 + static enum intel_pch 127 + intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id) 127 128 { 128 - enum intel_pch ret = PCH_NOP; 129 + switch (id) { 130 + case INTEL_PCH_IBX_DEVICE_ID_TYPE: 131 + DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); 132 + WARN_ON(!IS_GEN5(dev_priv)); 133 + return PCH_IBX; 134 + case INTEL_PCH_CPT_DEVICE_ID_TYPE: 135 + DRM_DEBUG_KMS("Found CougarPoint PCH\n"); 136 + WARN_ON(!IS_GEN6(dev_priv) && !IS_IVYBRIDGE(dev_priv)); 137 + return PCH_CPT; 138 + case INTEL_PCH_PPT_DEVICE_ID_TYPE: 139 + DRM_DEBUG_KMS("Found PantherPoint PCH\n"); 140 + WARN_ON(!IS_GEN6(dev_priv) && !IS_IVYBRIDGE(dev_priv)); 141 + /* PantherPoint is CPT compatible */ 142 + return PCH_CPT; 143 + case INTEL_PCH_LPT_DEVICE_ID_TYPE: 144 + DRM_DEBUG_KMS("Found LynxPoint PCH\n"); 145 + WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)); 146 + WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)); 147 + return PCH_LPT; 148 + case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE: 149 + DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); 150 + WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)); 151 + WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv)); 152 + return PCH_LPT; 153 + case INTEL_PCH_WPT_DEVICE_ID_TYPE: 154 + DRM_DEBUG_KMS("Found WildcatPoint PCH\n"); 155 + WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)); 156 + WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)); 157 + /* WildcatPoint is LPT compatible */ 158 + return PCH_LPT; 159 + case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE: 160 + DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n"); 161 + WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)); 162 + WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv)); 163 + /* WildcatPoint is LPT compatible */ 164 + return PCH_LPT; 165 + case INTEL_PCH_SPT_DEVICE_ID_TYPE: 166 + DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); 167 + WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)); 168 + return PCH_SPT; 169 + case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE: 170 + DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); 171 + WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv)); 172 + return PCH_SPT; 173 + case INTEL_PCH_KBP_DEVICE_ID_TYPE: 174 + DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n"); 175 + WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) && 176 + !IS_COFFEELAKE(dev_priv)); 177 + return PCH_KBP; 178 + case INTEL_PCH_CNP_DEVICE_ID_TYPE: 179 + DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n"); 180 + WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv)); 181 + return PCH_CNP; 182 + case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE: 183 + DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n"); 184 + WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv)); 185 + return PCH_CNP; 186 + case INTEL_PCH_ICP_DEVICE_ID_TYPE: 187 + DRM_DEBUG_KMS("Found Ice Lake PCH\n"); 188 + WARN_ON(!IS_ICELAKE(dev_priv)); 189 + return PCH_ICP; 190 + default: 191 + return PCH_NONE; 192 + } 193 + } 194 + 195 + static bool intel_is_virt_pch(unsigned short id, 196 + unsigned short svendor, unsigned short sdevice) 197 + { 198 + return (id == INTEL_PCH_P2X_DEVICE_ID_TYPE || 199 + id == INTEL_PCH_P3X_DEVICE_ID_TYPE || 200 + (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE && 201 + svendor == PCI_SUBVENDOR_ID_REDHAT_QUMRANET && 202 + sdevice == PCI_SUBDEVICE_ID_QEMU)); 203 + } 204 + 205 + static unsigned short 206 + intel_virt_detect_pch(const struct drm_i915_private *dev_priv) 207 + { 208 + unsigned short id = 0; 129 209 130 210 /* 131 211 * In a virtualized passthrough environment we can be in a ··· 214 134 * make an educated guess as to which PCH is really there. 215 135 */ 216 136 217 - if (IS_GEN5(dev_priv)) { 218 - ret = PCH_IBX; 219 - DRM_DEBUG_KMS("Assuming Ibex Peak PCH\n"); 220 - } else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) { 221 - ret = PCH_CPT; 222 - DRM_DEBUG_KMS("Assuming CougarPoint PCH\n"); 223 - } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { 224 - ret = PCH_LPT; 225 - if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)) 226 - dev_priv->pch_id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE; 227 - else 228 - dev_priv->pch_id = INTEL_PCH_LPT_DEVICE_ID_TYPE; 229 - DRM_DEBUG_KMS("Assuming LynxPoint PCH\n"); 230 - } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 231 - ret = PCH_SPT; 232 - DRM_DEBUG_KMS("Assuming SunrisePoint PCH\n"); 233 - } else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) { 234 - ret = PCH_CNP; 235 - DRM_DEBUG_KMS("Assuming CannonPoint PCH\n"); 236 - } 137 + if (IS_GEN5(dev_priv)) 138 + id = INTEL_PCH_IBX_DEVICE_ID_TYPE; 139 + else if (IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv)) 140 + id = INTEL_PCH_CPT_DEVICE_ID_TYPE; 141 + else if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv)) 142 + id = INTEL_PCH_LPT_LP_DEVICE_ID_TYPE; 143 + else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 144 + id = INTEL_PCH_LPT_DEVICE_ID_TYPE; 145 + else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) 146 + id = INTEL_PCH_SPT_DEVICE_ID_TYPE; 147 + else if (IS_COFFEELAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) 148 + id = INTEL_PCH_CNP_DEVICE_ID_TYPE; 237 149 238 - return ret; 150 + if (id) 151 + DRM_DEBUG_KMS("Assuming PCH ID %04x\n", id); 152 + else 153 + DRM_DEBUG_KMS("Assuming no PCH\n"); 154 + 155 + return id; 239 156 } 240 157 241 158 static void intel_detect_pch(struct drm_i915_private *dev_priv) ··· 260 183 */ 261 184 while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) { 262 185 unsigned short id; 186 + enum intel_pch pch_type; 263 187 264 188 if (pch->vendor != PCI_VENDOR_ID_INTEL) 265 189 continue; 266 190 267 191 id = pch->device & INTEL_PCH_DEVICE_ID_MASK; 268 192 269 - dev_priv->pch_id = id; 270 - 271 - if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { 272 - dev_priv->pch_type = PCH_IBX; 273 - DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); 274 - WARN_ON(!IS_GEN5(dev_priv)); 275 - } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { 276 - dev_priv->pch_type = PCH_CPT; 277 - DRM_DEBUG_KMS("Found CougarPoint PCH\n"); 278 - WARN_ON(!IS_GEN6(dev_priv) && 279 - !IS_IVYBRIDGE(dev_priv)); 280 - } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { 281 - /* PantherPoint is CPT compatible */ 282 - dev_priv->pch_type = PCH_CPT; 283 - DRM_DEBUG_KMS("Found PantherPoint PCH\n"); 284 - WARN_ON(!IS_GEN6(dev_priv) && 285 - !IS_IVYBRIDGE(dev_priv)); 286 - } else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) { 287 - dev_priv->pch_type = PCH_LPT; 288 - DRM_DEBUG_KMS("Found LynxPoint PCH\n"); 289 - WARN_ON(!IS_HASWELL(dev_priv) && 290 - !IS_BROADWELL(dev_priv)); 291 - WARN_ON(IS_HSW_ULT(dev_priv) || 292 - IS_BDW_ULT(dev_priv)); 293 - } else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) { 294 - dev_priv->pch_type = PCH_LPT; 295 - DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); 296 - WARN_ON(!IS_HASWELL(dev_priv) && 297 - !IS_BROADWELL(dev_priv)); 298 - WARN_ON(!IS_HSW_ULT(dev_priv) && 299 - !IS_BDW_ULT(dev_priv)); 300 - } else if (id == INTEL_PCH_WPT_DEVICE_ID_TYPE) { 301 - /* WildcatPoint is LPT compatible */ 302 - dev_priv->pch_type = PCH_LPT; 303 - DRM_DEBUG_KMS("Found WildcatPoint PCH\n"); 304 - WARN_ON(!IS_HASWELL(dev_priv) && 305 - !IS_BROADWELL(dev_priv)); 306 - WARN_ON(IS_HSW_ULT(dev_priv) || 307 - IS_BDW_ULT(dev_priv)); 308 - } else if (id == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE) { 309 - /* WildcatPoint is LPT compatible */ 310 - dev_priv->pch_type = PCH_LPT; 311 - DRM_DEBUG_KMS("Found WildcatPoint LP PCH\n"); 312 - WARN_ON(!IS_HASWELL(dev_priv) && 313 - !IS_BROADWELL(dev_priv)); 314 - WARN_ON(!IS_HSW_ULT(dev_priv) && 315 - !IS_BDW_ULT(dev_priv)); 316 - } else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) { 317 - dev_priv->pch_type = PCH_SPT; 318 - DRM_DEBUG_KMS("Found SunrisePoint PCH\n"); 319 - WARN_ON(!IS_SKYLAKE(dev_priv) && 320 - !IS_KABYLAKE(dev_priv)); 321 - } else if (id == INTEL_PCH_SPT_LP_DEVICE_ID_TYPE) { 322 - dev_priv->pch_type = PCH_SPT; 323 - DRM_DEBUG_KMS("Found SunrisePoint LP PCH\n"); 324 - WARN_ON(!IS_SKYLAKE(dev_priv) && 325 - !IS_KABYLAKE(dev_priv)); 326 - } else if (id == INTEL_PCH_KBP_DEVICE_ID_TYPE) { 327 - dev_priv->pch_type = PCH_KBP; 328 - DRM_DEBUG_KMS("Found Kaby Lake PCH (KBP)\n"); 329 - WARN_ON(!IS_SKYLAKE(dev_priv) && 330 - !IS_KABYLAKE(dev_priv) && 331 - !IS_COFFEELAKE(dev_priv)); 332 - } else if (id == INTEL_PCH_CNP_DEVICE_ID_TYPE) { 333 - dev_priv->pch_type = PCH_CNP; 334 - DRM_DEBUG_KMS("Found Cannon Lake PCH (CNP)\n"); 335 - WARN_ON(!IS_CANNONLAKE(dev_priv) && 336 - !IS_COFFEELAKE(dev_priv)); 337 - } else if (id == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) { 338 - dev_priv->pch_type = PCH_CNP; 339 - DRM_DEBUG_KMS("Found Cannon Lake LP PCH (CNP-LP)\n"); 340 - WARN_ON(!IS_CANNONLAKE(dev_priv) && 341 - !IS_COFFEELAKE(dev_priv)); 342 - } else if (id == INTEL_PCH_ICP_DEVICE_ID_TYPE) { 343 - dev_priv->pch_type = PCH_ICP; 344 - DRM_DEBUG_KMS("Found Ice Lake PCH\n"); 345 - WARN_ON(!IS_ICELAKE(dev_priv)); 346 - } else if (id == INTEL_PCH_P2X_DEVICE_ID_TYPE || 347 - id == INTEL_PCH_P3X_DEVICE_ID_TYPE || 348 - (id == INTEL_PCH_QEMU_DEVICE_ID_TYPE && 349 - pch->subsystem_vendor == 350 - PCI_SUBVENDOR_ID_REDHAT_QUMRANET && 351 - pch->subsystem_device == 352 - PCI_SUBDEVICE_ID_QEMU)) { 353 - dev_priv->pch_type = intel_virt_detect_pch(dev_priv); 354 - } else { 355 - continue; 193 + pch_type = intel_pch_type(dev_priv, id); 194 + if (pch_type != PCH_NONE) { 195 + dev_priv->pch_type = pch_type; 196 + dev_priv->pch_id = id; 197 + break; 198 + } else if (intel_is_virt_pch(id, pch->subsystem_vendor, 199 + pch->subsystem_device)) { 200 + id = intel_virt_detect_pch(dev_priv); 201 + if (id) { 202 + pch_type = intel_pch_type(dev_priv, id); 203 + if (WARN_ON(pch_type == PCH_NONE)) 204 + pch_type = PCH_NOP; 205 + } else { 206 + pch_type = PCH_NOP; 207 + } 208 + dev_priv->pch_type = pch_type; 209 + dev_priv->pch_id = id; 210 + break; 356 211 } 357 - 358 - break; 359 212 } 360 213 if (!pch) 361 214 DRM_DEBUG_KMS("No PCH found.\n"); ··· 293 286 pci_dev_put(pch); 294 287 } 295 288 296 - static int i915_getparam(struct drm_device *dev, void *data, 297 - struct drm_file *file_priv) 289 + static int i915_getparam_ioctl(struct drm_device *dev, void *data, 290 + struct drm_file *file_priv) 298 291 { 299 292 struct drm_i915_private *dev_priv = to_i915(dev); 300 293 struct pci_dev *pdev = dev_priv->drm.pdev; ··· 388 381 value = i915_gem_mmap_gtt_version(); 389 382 break; 390 383 case I915_PARAM_HAS_SCHEDULER: 391 - value = 0; 392 - if (dev_priv->engine[RCS] && dev_priv->engine[RCS]->schedule) { 393 - value |= I915_SCHEDULER_CAP_ENABLED; 394 - value |= I915_SCHEDULER_CAP_PRIORITY; 395 - if (HAS_LOGICAL_RING_PREEMPTION(dev_priv)) 396 - value |= I915_SCHEDULER_CAP_PREEMPTION; 397 - } 384 + value = dev_priv->caps.scheduler; 398 385 break; 399 386 400 387 case I915_PARAM_MMAP_VERSION: ··· 880 879 /** 881 880 * i915_driver_init_early - setup state not requiring device access 882 881 * @dev_priv: device private 882 + * @ent: the matching pci_device_id 883 883 * 884 884 * Initialize everything that is a "SW-only" state, that is state not 885 885 * requiring accessing the device or exposing the driver via kernel internal ··· 906 904 907 905 BUILD_BUG_ON(INTEL_MAX_PLATFORMS > 908 906 sizeof(device_info->platform_mask) * BITS_PER_BYTE); 909 - device_info->platform_mask = BIT(device_info->platform); 910 - 911 907 BUG_ON(device_info->gen > sizeof(device_info->gen_mask) * BITS_PER_BYTE); 912 - device_info->gen_mask = BIT(device_info->gen - 1); 913 - 914 908 spin_lock_init(&dev_priv->irq_lock); 915 909 spin_lock_init(&dev_priv->gpu_error.lock); 916 910 mutex_init(&dev_priv->backlight_lock); ··· 1444 1446 1445 1447 intel_modeset_cleanup(dev); 1446 1448 1447 - /* 1448 - * free the memory space allocated for the child device 1449 - * config parsed from VBT 1450 - */ 1451 - if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) { 1452 - kfree(dev_priv->vbt.child_dev); 1453 - dev_priv->vbt.child_dev = NULL; 1454 - dev_priv->vbt.child_dev_num = 0; 1455 - } 1456 - kfree(dev_priv->vbt.sdvo_lvds_vbt_mode); 1457 - dev_priv->vbt.sdvo_lvds_vbt_mode = NULL; 1458 - kfree(dev_priv->vbt.lfp_lvds_vbt_mode); 1459 - dev_priv->vbt.lfp_lvds_vbt_mode = NULL; 1449 + intel_bios_cleanup(dev_priv); 1460 1450 1461 1451 vga_switcheroo_unregister_client(pdev); 1462 1452 vga_client_register(pdev, NULL, NULL, NULL); ··· 1911 1925 ret = i915_gem_reset_prepare(i915); 1912 1926 if (ret) { 1913 1927 dev_err(i915->drm.dev, "GPU recovery failed\n"); 1914 - intel_gpu_reset(i915, ALL_ENGINES); 1915 1928 goto taint; 1916 1929 } 1917 1930 ··· 1942 1957 */ 1943 1958 ret = i915_ggtt_enable_hw(i915); 1944 1959 if (ret) { 1945 - DRM_ERROR("Failed to re-enable GGTT following reset %d\n", ret); 1960 + DRM_ERROR("Failed to re-enable GGTT following reset (%d)\n", 1961 + ret); 1946 1962 goto error; 1947 1963 } 1948 1964 ··· 1960 1974 */ 1961 1975 ret = i915_gem_init_hw(i915); 1962 1976 if (ret) { 1963 - DRM_ERROR("Failed hw init on reset %d\n", ret); 1977 + DRM_ERROR("Failed to initialise HW following reset (%d)\n", 1978 + ret); 1964 1979 goto error; 1965 1980 } 1966 1981 ··· 1993 2006 error: 1994 2007 i915_gem_set_wedged(i915); 1995 2008 i915_gem_retire_requests(i915); 2009 + intel_gpu_reset(i915, ALL_ENGINES); 1996 2010 goto finish; 1997 2011 } 1998 2012 ··· 2783 2795 DRM_IOCTL_DEF_DRV(I915_BATCHBUFFER, drm_noop, DRM_AUTH), 2784 2796 DRM_IOCTL_DEF_DRV(I915_IRQ_EMIT, drm_noop, DRM_AUTH), 2785 2797 DRM_IOCTL_DEF_DRV(I915_IRQ_WAIT, drm_noop, DRM_AUTH), 2786 - DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam, DRM_AUTH|DRM_RENDER_ALLOW), 2798 + DRM_IOCTL_DEF_DRV(I915_GETPARAM, i915_getparam_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 2787 2799 DRM_IOCTL_DEF_DRV(I915_SETPARAM, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2788 2800 DRM_IOCTL_DEF_DRV(I915_ALLOC, drm_noop, DRM_AUTH), 2789 2801 DRM_IOCTL_DEF_DRV(I915_FREE, drm_noop, DRM_AUTH), ··· 2795 2807 DRM_IOCTL_DEF_DRV(I915_VBLANK_SWAP, drm_noop, DRM_AUTH), 2796 2808 DRM_IOCTL_DEF_DRV(I915_HWS_ADDR, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2797 2809 DRM_IOCTL_DEF_DRV(I915_GEM_INIT, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 2798 - DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), 2799 - DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2, DRM_AUTH|DRM_RENDER_ALLOW), 2810 + DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER, i915_gem_execbuffer_ioctl, DRM_AUTH), 2811 + DRM_IOCTL_DEF_DRV(I915_GEM_EXECBUFFER2_WR, i915_gem_execbuffer2_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 2800 2812 DRM_IOCTL_DEF_DRV(I915_GEM_PIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 2801 2813 DRM_IOCTL_DEF_DRV(I915_GEM_UNPIN, i915_gem_reject_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 2802 2814 DRM_IOCTL_DEF_DRV(I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), ··· 2815 2827 DRM_IOCTL_DEF_DRV(I915_GEM_SET_TILING, i915_gem_set_tiling_ioctl, DRM_RENDER_ALLOW), 2816 2828 DRM_IOCTL_DEF_DRV(I915_GEM_GET_TILING, i915_gem_get_tiling_ioctl, DRM_RENDER_ALLOW), 2817 2829 DRM_IOCTL_DEF_DRV(I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, DRM_RENDER_ALLOW), 2818 - DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0), 2830 + DRM_IOCTL_DEF_DRV(I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id_ioctl, 0), 2819 2831 DRM_IOCTL_DEF_DRV(I915_GEM_MADVISE, i915_gem_madvise_ioctl, DRM_RENDER_ALLOW), 2820 2832 DRM_IOCTL_DEF_DRV(I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), 2821 2833 DRM_IOCTL_DEF_DRV(I915_OVERLAY_ATTRS, intel_overlay_attrs_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), 2822 - DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey, DRM_MASTER|DRM_CONTROL_ALLOW), 2834 + DRM_IOCTL_DEF_DRV(I915_SET_SPRITE_COLORKEY, intel_sprite_set_colorkey_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW), 2823 2835 DRM_IOCTL_DEF_DRV(I915_GET_SPRITE_COLORKEY, drm_noop, DRM_MASTER|DRM_CONTROL_ALLOW), 2824 2836 DRM_IOCTL_DEF_DRV(I915_GEM_WAIT, i915_gem_wait_ioctl, DRM_AUTH|DRM_RENDER_ALLOW), 2825 2837 DRM_IOCTL_DEF_DRV(I915_GEM_CONTEXT_CREATE, i915_gem_context_create_ioctl, DRM_RENDER_ALLOW),
+40 -18
drivers/gpu/drm/i915/i915_drv.h
··· 83 83 84 84 #define DRIVER_NAME "i915" 85 85 #define DRIVER_DESC "Intel Graphics" 86 - #define DRIVER_DATE "20180207" 87 - #define DRIVER_TIMESTAMP 1517988364 86 + #define DRIVER_DATE "20180221" 87 + #define DRIVER_TIMESTAMP 1519219289 88 88 89 89 /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and 90 90 * WARN_ON()) for hw state sanity checks to check for unexpected conditions ··· 472 472 u32 reset_count; 473 473 u32 suspend_count; 474 474 struct intel_device_info device_info; 475 + struct intel_driver_caps driver_caps; 475 476 struct i915_params params; 476 477 477 478 struct i915_error_uc { ··· 667 666 */ 668 667 struct intel_fbc_state_cache { 669 668 struct i915_vma *vma; 669 + unsigned long flags; 670 670 671 671 struct { 672 672 unsigned int mode_flags; ··· 706 704 */ 707 705 struct intel_fbc_reg_params { 708 706 struct i915_vma *vma; 707 + unsigned long flags; 709 708 710 709 struct { 711 710 enum pipe pipe; ··· 725 722 726 723 struct intel_fbc_work { 727 724 bool scheduled; 728 - u32 scheduled_vblank; 725 + u64 scheduled_vblank; 729 726 struct work_struct work; 730 727 } work; 731 728 ··· 949 946 950 947 struct intel_rc6 { 951 948 bool enabled; 949 + u64 prev_hw_residency[4]; 950 + u64 cur_residency[4]; 952 951 }; 953 952 954 953 struct intel_llc_pstate { ··· 1097 1092 struct llist_head free_list; 1098 1093 struct work_struct free_work; 1099 1094 spinlock_t free_lock; 1095 + /** 1096 + * Count of objects pending destructions. Used to skip needlessly 1097 + * waiting on an RCU barrier if no objects are waiting to be freed. 1098 + */ 1099 + atomic_t free_count; 1100 1100 1101 1101 /** 1102 1102 * Small stash of WC pages ··· 1366 1356 u32 size; 1367 1357 u8 *data; 1368 1358 const u8 *sequence[MIPI_SEQ_MAX]; 1359 + u8 *deassert_seq; /* Used by fixup_mipi_sequences() */ 1369 1360 } dsi; 1370 1361 1371 1362 int crt_ddc_pin; ··· 1826 1815 struct kmem_cache *priorities; 1827 1816 1828 1817 const struct intel_device_info info; 1818 + struct intel_driver_caps caps; 1829 1819 1830 1820 /** 1831 1821 * Data Stolen Memory - aka "i915 stolen memory" gives us the start and ··· 2431 2419 * We have one bit per pipe and per scanout plane type. 2432 2420 */ 2433 2421 #define INTEL_FRONTBUFFER_BITS_PER_PIPE 8 2434 - #define INTEL_FRONTBUFFER(pipe, plane_id) \ 2435 - (1 << ((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) 2422 + #define INTEL_FRONTBUFFER(pipe, plane_id) ({ \ 2423 + BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES > 32); \ 2424 + BUILD_BUG_ON(I915_MAX_PLANES > INTEL_FRONTBUFFER_BITS_PER_PIPE); \ 2425 + BIT((plane_id) + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)); \ 2426 + }) 2436 2427 #define INTEL_FRONTBUFFER_OVERLAY(pipe) \ 2437 - (1 << (INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) 2428 + BIT(INTEL_FRONTBUFFER_BITS_PER_PIPE - 1 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)) 2438 2429 #define INTEL_FRONTBUFFER_ALL_MASK(pipe) \ 2439 - (0xff << (INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe))) 2430 + GENMASK(INTEL_FRONTBUFFER_BITS_PER_PIPE * ((pipe) + 1) - 1, \ 2431 + INTEL_FRONTBUFFER_BITS_PER_PIPE * (pipe)) 2440 2432 2441 2433 /* 2442 2434 * Optimised SGL iterator for GEM objects ··· 2815 2799 2816 2800 #define HAS_FW_BLC(dev_priv) (INTEL_GEN(dev_priv) > 2) 2817 2801 #define HAS_FBC(dev_priv) ((dev_priv)->info.has_fbc) 2818 - #define HAS_CUR_FBC(dev_priv) (!HAS_GMCH_DISPLAY(dev_priv) && INTEL_INFO(dev_priv)->gen >= 7) 2802 + #define HAS_CUR_FBC(dev_priv) (!HAS_GMCH_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 7) 2819 2803 2820 2804 #define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv)) 2821 2805 ··· 2878 2862 #define INTEL_PCH_QEMU_DEVICE_ID_TYPE 0x2900 /* qemu q35 has 2918 */ 2879 2863 2880 2864 #define INTEL_PCH_TYPE(dev_priv) ((dev_priv)->pch_type) 2865 + #define INTEL_PCH_ID(dev_priv) ((dev_priv)->pch_id) 2881 2866 #define HAS_PCH_ICP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_ICP) 2882 2867 #define HAS_PCH_CNP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CNP) 2883 2868 #define HAS_PCH_CNP_LP(dev_priv) \ 2884 - ((dev_priv)->pch_id == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) 2869 + (INTEL_PCH_ID(dev_priv) == INTEL_PCH_CNP_LP_DEVICE_ID_TYPE) 2885 2870 #define HAS_PCH_KBP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_KBP) 2886 2871 #define HAS_PCH_SPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_SPT) 2887 2872 #define HAS_PCH_LPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_LPT) 2888 2873 #define HAS_PCH_LPT_LP(dev_priv) \ 2889 - ((dev_priv)->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE || \ 2890 - (dev_priv)->pch_id == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE) 2874 + (INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE || \ 2875 + INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_LP_DEVICE_ID_TYPE) 2891 2876 #define HAS_PCH_LPT_H(dev_priv) \ 2892 - ((dev_priv)->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE || \ 2893 - (dev_priv)->pch_id == INTEL_PCH_WPT_DEVICE_ID_TYPE) 2877 + (INTEL_PCH_ID(dev_priv) == INTEL_PCH_LPT_DEVICE_ID_TYPE || \ 2878 + INTEL_PCH_ID(dev_priv) == INTEL_PCH_WPT_DEVICE_ID_TYPE) 2894 2879 #define HAS_PCH_CPT(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_CPT) 2895 2880 #define HAS_PCH_IBX(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_IBX) 2896 2881 #define HAS_PCH_NOP(dev_priv) (INTEL_PCH_TYPE(dev_priv) == PCH_NOP) ··· 3098 3081 struct drm_file *file_priv); 3099 3082 int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 3100 3083 struct drm_file *file_priv); 3101 - int i915_gem_execbuffer(struct drm_device *dev, void *data, 3102 - struct drm_file *file_priv); 3103 - int i915_gem_execbuffer2(struct drm_device *dev, void *data, 3104 - struct drm_file *file_priv); 3084 + int i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data, 3085 + struct drm_file *file_priv); 3086 + int i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, 3087 + struct drm_file *file_priv); 3105 3088 int i915_gem_busy_ioctl(struct drm_device *dev, void *data, 3106 3089 struct drm_file *file_priv); 3107 3090 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data, ··· 3145 3128 3146 3129 static inline void i915_gem_drain_freed_objects(struct drm_i915_private *i915) 3147 3130 { 3131 + if (!atomic_read(&i915->mm.free_count)) 3132 + return; 3133 + 3148 3134 /* A single pass should suffice to release all the freed objects (along 3149 3135 * most call paths) , but be a little more paranoid in that freeing 3150 3136 * the objects does take a little amount of time, during which the rcu ··· 3419 3399 struct i915_vma * __must_check 3420 3400 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 3421 3401 u32 alignment, 3422 - const struct i915_ggtt_view *view); 3402 + const struct i915_ggtt_view *view, 3403 + unsigned int flags); 3423 3404 void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma); 3424 3405 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, 3425 3406 int align); ··· 3696 3675 3697 3676 /* intel_bios.c */ 3698 3677 void intel_bios_init(struct drm_i915_private *dev_priv); 3678 + void intel_bios_cleanup(struct drm_i915_private *dev_priv); 3699 3679 bool intel_bios_is_valid_vbt(const void *buf, size_t size); 3700 3680 bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv); 3701 3681 bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
+66 -61
drivers/gpu/drm/i915/i915_gem.c
··· 240 240 241 241 static void __start_cpu_write(struct drm_i915_gem_object *obj) 242 242 { 243 - obj->base.read_domains = I915_GEM_DOMAIN_CPU; 244 - obj->base.write_domain = I915_GEM_DOMAIN_CPU; 243 + obj->read_domains = I915_GEM_DOMAIN_CPU; 244 + obj->write_domain = I915_GEM_DOMAIN_CPU; 245 245 if (cpu_write_needs_clflush(obj)) 246 246 obj->cache_dirty = true; 247 247 } ··· 257 257 obj->mm.dirty = false; 258 258 259 259 if (needs_clflush && 260 - (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 && 260 + (obj->read_domains & I915_GEM_DOMAIN_CPU) == 0 && 261 261 !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)) 262 262 drm_clflush_sg(pages); 263 263 ··· 703 703 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 704 704 struct i915_vma *vma; 705 705 706 - if (!(obj->base.write_domain & flush_domains)) 706 + if (!(obj->write_domain & flush_domains)) 707 707 return; 708 708 709 - switch (obj->base.write_domain) { 709 + switch (obj->write_domain) { 710 710 case I915_GEM_DOMAIN_GTT: 711 711 i915_gem_flush_ggtt_writes(dev_priv); 712 712 ··· 731 731 break; 732 732 } 733 733 734 - obj->base.write_domain = 0; 734 + obj->write_domain = 0; 735 735 } 736 736 737 737 static inline int ··· 831 831 * anyway again before the next pread happens. 832 832 */ 833 833 if (!obj->cache_dirty && 834 - !(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) 834 + !(obj->read_domains & I915_GEM_DOMAIN_CPU)) 835 835 *needs_clflush = CLFLUSH_BEFORE; 836 836 837 837 out: ··· 890 890 * Same trick applies to invalidate partially written 891 891 * cachelines read before writing. 892 892 */ 893 - if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) 893 + if (!(obj->read_domains & I915_GEM_DOMAIN_CPU)) 894 894 *needs_clflush |= CLFLUSH_BEFORE; 895 895 } 896 896 ··· 2391 2391 * wasn't in the GTT, there shouldn't be any way it could have been in 2392 2392 * a GPU cache 2393 2393 */ 2394 - GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS); 2395 - GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS); 2394 + GEM_BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS); 2395 + GEM_BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS); 2396 2396 2397 2397 st = kmalloc(sizeof(*st), GFP_KERNEL); 2398 2398 if (st == NULL) ··· 3205 3205 intel_engine_dump(engine, &p, "%s\n", engine->name); 3206 3206 } 3207 3207 3208 + set_bit(I915_WEDGED, &i915->gpu_error.flags); 3209 + smp_mb__after_atomic(); 3210 + 3208 3211 /* 3209 3212 * First, stop submission to hw, but do not yet complete requests by 3210 3213 * rolling the global seqno forward (since this would complete requests ··· 3232 3229 * start to complete all requests. 3233 3230 */ 3234 3231 engine->submit_request = nop_complete_submit_request; 3232 + engine->schedule = NULL; 3235 3233 } 3234 + 3235 + i915->caps.scheduler = 0; 3236 3236 3237 3237 /* 3238 3238 * Make sure no request can slip through without getting completed by ··· 3247 3241 for_each_engine(engine, i915, id) { 3248 3242 unsigned long flags; 3249 3243 3250 - /* Mark all pending requests as complete so that any concurrent 3244 + /* 3245 + * Mark all pending requests as complete so that any concurrent 3251 3246 * (lockless) lookup doesn't try and wait upon the request as we 3252 3247 * reset it. 3253 3248 */ ··· 3258 3251 spin_unlock_irqrestore(&engine->timeline->lock, flags); 3259 3252 } 3260 3253 3261 - set_bit(I915_WEDGED, &i915->gpu_error.flags); 3262 3254 wake_up_all(&i915->gpu_error.reset_queue); 3263 3255 } 3264 3256 ··· 3703 3697 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); 3704 3698 if (obj->cache_dirty) 3705 3699 i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE); 3706 - obj->base.write_domain = 0; 3700 + obj->write_domain = 0; 3707 3701 } 3708 3702 3709 3703 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj) ··· 3740 3734 if (ret) 3741 3735 return ret; 3742 3736 3743 - if (obj->base.write_domain == I915_GEM_DOMAIN_WC) 3737 + if (obj->write_domain == I915_GEM_DOMAIN_WC) 3744 3738 return 0; 3745 3739 3746 3740 /* Flush and acquire obj->pages so that we are coherent through ··· 3761 3755 * coherent writes from the GPU, by effectively invalidating the 3762 3756 * WC domain upon first access. 3763 3757 */ 3764 - if ((obj->base.read_domains & I915_GEM_DOMAIN_WC) == 0) 3758 + if ((obj->read_domains & I915_GEM_DOMAIN_WC) == 0) 3765 3759 mb(); 3766 3760 3767 3761 /* It should now be out of any other write domains, and we can update 3768 3762 * the domain values for our changes. 3769 3763 */ 3770 - GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_WC) != 0); 3771 - obj->base.read_domains |= I915_GEM_DOMAIN_WC; 3764 + GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_WC) != 0); 3765 + obj->read_domains |= I915_GEM_DOMAIN_WC; 3772 3766 if (write) { 3773 - obj->base.read_domains = I915_GEM_DOMAIN_WC; 3774 - obj->base.write_domain = I915_GEM_DOMAIN_WC; 3767 + obj->read_domains = I915_GEM_DOMAIN_WC; 3768 + obj->write_domain = I915_GEM_DOMAIN_WC; 3775 3769 obj->mm.dirty = true; 3776 3770 } 3777 3771 ··· 3803 3797 if (ret) 3804 3798 return ret; 3805 3799 3806 - if (obj->base.write_domain == I915_GEM_DOMAIN_GTT) 3800 + if (obj->write_domain == I915_GEM_DOMAIN_GTT) 3807 3801 return 0; 3808 3802 3809 3803 /* Flush and acquire obj->pages so that we are coherent through ··· 3824 3818 * coherent writes from the GPU, by effectively invalidating the 3825 3819 * GTT domain upon first access. 3826 3820 */ 3827 - if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) 3821 + if ((obj->read_domains & I915_GEM_DOMAIN_GTT) == 0) 3828 3822 mb(); 3829 3823 3830 3824 /* It should now be out of any other write domains, and we can update 3831 3825 * the domain values for our changes. 3832 3826 */ 3833 - GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0); 3834 - obj->base.read_domains |= I915_GEM_DOMAIN_GTT; 3827 + GEM_BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); 3828 + obj->read_domains |= I915_GEM_DOMAIN_GTT; 3835 3829 if (write) { 3836 - obj->base.read_domains = I915_GEM_DOMAIN_GTT; 3837 - obj->base.write_domain = I915_GEM_DOMAIN_GTT; 3830 + obj->read_domains = I915_GEM_DOMAIN_GTT; 3831 + obj->write_domain = I915_GEM_DOMAIN_GTT; 3838 3832 obj->mm.dirty = true; 3839 3833 } 3840 3834 ··· 4078 4072 struct i915_vma * 4079 4073 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, 4080 4074 u32 alignment, 4081 - const struct i915_ggtt_view *view) 4075 + const struct i915_ggtt_view *view, 4076 + unsigned int flags) 4082 4077 { 4083 4078 struct i915_vma *vma; 4084 4079 int ret; ··· 4116 4109 * try to preserve the existing ABI). 4117 4110 */ 4118 4111 vma = ERR_PTR(-ENOSPC); 4119 - if (!view || view->type == I915_GGTT_VIEW_NORMAL) 4112 + if ((flags & PIN_MAPPABLE) == 0 && 4113 + (!view || view->type == I915_GGTT_VIEW_NORMAL)) 4120 4114 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, 4121 - PIN_MAPPABLE | PIN_NONBLOCK); 4122 - if (IS_ERR(vma)) { 4123 - struct drm_i915_private *i915 = to_i915(obj->base.dev); 4124 - unsigned int flags; 4125 - 4126 - /* Valleyview is definitely limited to scanning out the first 4127 - * 512MiB. Lets presume this behaviour was inherited from the 4128 - * g4x display engine and that all earlier gen are similarly 4129 - * limited. Testing suggests that it is a little more 4130 - * complicated than this. For example, Cherryview appears quite 4131 - * happy to scanout from anywhere within its global aperture. 4132 - */ 4133 - flags = 0; 4134 - if (HAS_GMCH_DISPLAY(i915)) 4135 - flags = PIN_MAPPABLE; 4115 + flags | 4116 + PIN_MAPPABLE | 4117 + PIN_NONBLOCK); 4118 + if (IS_ERR(vma)) 4136 4119 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags); 4137 - } 4138 4120 if (IS_ERR(vma)) 4139 4121 goto err_unpin_global; 4140 4122 ··· 4136 4140 /* It should now be out of any other write domains, and we can update 4137 4141 * the domain values for our changes. 4138 4142 */ 4139 - obj->base.read_domains |= I915_GEM_DOMAIN_GTT; 4143 + obj->read_domains |= I915_GEM_DOMAIN_GTT; 4140 4144 4141 4145 return vma; 4142 4146 ··· 4189 4193 flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU); 4190 4194 4191 4195 /* Flush the CPU cache if it's still invalid. */ 4192 - if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) { 4196 + if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { 4193 4197 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC); 4194 - obj->base.read_domains |= I915_GEM_DOMAIN_CPU; 4198 + obj->read_domains |= I915_GEM_DOMAIN_CPU; 4195 4199 } 4196 4200 4197 4201 /* It should now be out of any other write domains, and we can update 4198 4202 * the domain values for our changes. 4199 4203 */ 4200 - GEM_BUG_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU); 4204 + GEM_BUG_ON(obj->write_domain & ~I915_GEM_DOMAIN_CPU); 4201 4205 4202 4206 /* If we're writing through the CPU, then the GPU read domains will 4203 4207 * need to be invalidated at next use. ··· 4272 4276 4273 4277 lockdep_assert_held(&obj->base.dev->struct_mutex); 4274 4278 4275 - if (!view && flags & PIN_MAPPABLE) { 4279 + if (flags & PIN_MAPPABLE && 4280 + (!view || view->type == I915_GGTT_VIEW_NORMAL)) { 4276 4281 /* If the required space is larger than the available 4277 4282 * aperture, we will not able to find a slot for the 4278 4283 * object and unbinding the object now will be in ··· 4634 4637 4635 4638 i915_gem_object_init(obj, &i915_gem_object_ops); 4636 4639 4637 - obj->base.write_domain = I915_GEM_DOMAIN_CPU; 4638 - obj->base.read_domains = I915_GEM_DOMAIN_CPU; 4640 + obj->write_domain = I915_GEM_DOMAIN_CPU; 4641 + obj->read_domains = I915_GEM_DOMAIN_CPU; 4639 4642 4640 4643 if (HAS_LLC(dev_priv)) 4641 4644 /* On some devices, we can have the GPU use the LLC (the CPU ··· 4749 4752 kfree(obj->bit_17); 4750 4753 i915_gem_object_free(obj); 4751 4754 4755 + GEM_BUG_ON(!atomic_read(&i915->mm.free_count)); 4756 + atomic_dec(&i915->mm.free_count); 4757 + 4752 4758 if (on) 4753 4759 cond_resched(); 4754 4760 } ··· 4840 4840 * i915_gem_busy_ioctl(). For the corresponding synchronized 4841 4841 * lookup see i915_gem_object_lookup_rcu(). 4842 4842 */ 4843 + atomic_inc(&to_i915(obj->base.dev)->mm.free_count); 4843 4844 call_rcu(&obj->rcu, __i915_gem_free_object_rcu); 4844 4845 } 4845 4846 ··· 4883 4882 * it may impact the display and we are uncertain about the stability 4884 4883 * of the reset, so this could be applied to even earlier gen. 4885 4884 */ 4886 - if (INTEL_GEN(i915) >= 5) { 4887 - int reset = intel_gpu_reset(i915, ALL_ENGINES); 4888 - WARN_ON(reset && reset != -ENODEV); 4889 - } 4885 + if (INTEL_GEN(i915) >= 5 && intel_has_gpu_reset(i915)) 4886 + WARN_ON(intel_gpu_reset(i915, ALL_ENGINES)); 4890 4887 } 4891 4888 4892 4889 int i915_gem_suspend(struct drm_i915_private *dev_priv) ··· 5064 5065 5065 5066 for_each_engine(engine, i915, id) { 5066 5067 err = engine->init_hw(engine); 5067 - if (err) 5068 + if (err) { 5069 + DRM_ERROR("Failed to restart %s (%d)\n", 5070 + engine->name, err); 5068 5071 return err; 5072 + } 5069 5073 } 5070 5074 5071 5075 return 0; ··· 5120 5118 5121 5119 ret = i915_ppgtt_init_hw(dev_priv); 5122 5120 if (ret) { 5123 - DRM_ERROR("PPGTT enable HW failed %d\n", ret); 5121 + DRM_ERROR("Enabling PPGTT failed (%d)\n", ret); 5124 5122 goto out; 5125 5123 } 5126 5124 5127 5125 /* We can't enable contexts until all firmware is loaded */ 5128 5126 ret = intel_uc_init_hw(dev_priv); 5129 - if (ret) 5127 + if (ret) { 5128 + DRM_ERROR("Enabling uc failed (%d)\n", ret); 5130 5129 goto out; 5130 + } 5131 5131 5132 5132 intel_mocs_init_l3cc_table(dev_priv); 5133 5133 ··· 5419 5415 { 5420 5416 int i; 5421 5417 5422 - if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) && 5418 + if (INTEL_GEN(dev_priv) >= 7 && !IS_VALLEYVIEW(dev_priv) && 5423 5419 !IS_CHERRYVIEW(dev_priv)) 5424 5420 dev_priv->num_fence_regs = 32; 5425 - else if (INTEL_INFO(dev_priv)->gen >= 4 || 5421 + else if (INTEL_GEN(dev_priv) >= 4 || 5426 5422 IS_I945G(dev_priv) || IS_I945GM(dev_priv) || 5427 5423 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) 5428 5424 dev_priv->num_fence_regs = 16; ··· 5541 5537 void i915_gem_load_cleanup(struct drm_i915_private *dev_priv) 5542 5538 { 5543 5539 i915_gem_drain_freed_objects(dev_priv); 5544 - WARN_ON(!llist_empty(&dev_priv->mm.free_list)); 5540 + GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list)); 5541 + GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count)); 5545 5542 WARN_ON(dev_priv->mm.object_count); 5546 5543 5547 5544 mutex_lock(&dev_priv->drm.struct_mutex); ··· 5698 5693 if (IS_ERR(obj)) 5699 5694 return obj; 5700 5695 5701 - GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU); 5696 + GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU); 5702 5697 5703 5698 file = obj->base.filp; 5704 5699 offset = 0;
+1 -1
drivers/gpu/drm/i915/i915_gem_clflush.c
··· 177 177 } else if (obj->mm.pages) { 178 178 __i915_do_clflush(obj); 179 179 } else { 180 - GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU); 180 + GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU); 181 181 } 182 182 183 183 obj->cache_dirty = false;
+18 -22
drivers/gpu/drm/i915/i915_gem_context.c
··· 338 338 context_close(ctx); 339 339 } 340 340 341 - /** 342 - * The default context needs to exist per ring that uses contexts. It stores the 343 - * context state of the GPU for applications that don't utilize HW contexts, as 344 - * well as an idle case. 345 - */ 346 341 static struct i915_gem_context * 347 342 i915_gem_create_context(struct drm_i915_private *dev_priv, 348 343 struct drm_i915_file_private *file_priv) ··· 444 449 i915_gem_context_free(ctx); 445 450 } 446 451 452 + static bool needs_preempt_context(struct drm_i915_private *i915) 453 + { 454 + return HAS_LOGICAL_RING_PREEMPTION(i915); 455 + } 456 + 447 457 int i915_gem_contexts_init(struct drm_i915_private *dev_priv) 448 458 { 449 459 struct i915_gem_context *ctx; 450 - int err; 451 460 461 + /* Reassure ourselves we are only called once */ 452 462 GEM_BUG_ON(dev_priv->kernel_context); 463 + GEM_BUG_ON(dev_priv->preempt_context); 453 464 454 465 INIT_LIST_HEAD(&dev_priv->contexts.list); 455 466 INIT_WORK(&dev_priv->contexts.free_work, contexts_free_worker); ··· 469 468 ctx = i915_gem_context_create_kernel(dev_priv, I915_PRIORITY_MIN); 470 469 if (IS_ERR(ctx)) { 471 470 DRM_ERROR("Failed to create default global context\n"); 472 - err = PTR_ERR(ctx); 473 - goto err; 471 + return PTR_ERR(ctx); 474 472 } 475 473 /* 476 474 * For easy recognisablity, we want the kernel context to be 0 and then ··· 479 479 dev_priv->kernel_context = ctx; 480 480 481 481 /* highest priority; preempting task */ 482 - ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX); 483 - if (IS_ERR(ctx)) { 484 - DRM_ERROR("Failed to create default preempt context\n"); 485 - err = PTR_ERR(ctx); 486 - goto err_kernel_context; 482 + if (needs_preempt_context(dev_priv)) { 483 + ctx = i915_gem_context_create_kernel(dev_priv, INT_MAX); 484 + if (!IS_ERR(ctx)) 485 + dev_priv->preempt_context = ctx; 486 + else 487 + DRM_ERROR("Failed to create preempt context; disabling preemption\n"); 487 488 } 488 - dev_priv->preempt_context = ctx; 489 489 490 490 DRM_DEBUG_DRIVER("%s context support initialized\n", 491 491 dev_priv->engine[RCS]->context_size ? "logical" : 492 492 "fake"); 493 493 return 0; 494 - 495 - err_kernel_context: 496 - destroy_kernel_context(&dev_priv->kernel_context); 497 - err: 498 - return err; 499 494 } 500 495 501 496 void i915_gem_contexts_lost(struct drm_i915_private *dev_priv) ··· 516 521 { 517 522 lockdep_assert_held(&i915->drm.struct_mutex); 518 523 519 - destroy_kernel_context(&i915->preempt_context); 524 + if (i915->preempt_context) 525 + destroy_kernel_context(&i915->preempt_context); 520 526 destroy_kernel_context(&i915->kernel_context); 521 527 522 528 /* Must free all deferred contexts (via flush_workqueue) first */ ··· 799 803 800 804 case I915_CONTEXT_PARAM_PRIORITY: 801 805 { 802 - int priority = args->value; 806 + s64 priority = args->value; 803 807 804 808 if (args->size) 805 809 ret = -EINVAL; 806 - else if (!to_i915(dev)->engine[RCS]->schedule) 810 + else if (!(to_i915(dev)->caps.scheduler & I915_SCHEDULER_CAP_PRIORITY)) 807 811 ret = -ENODEV; 808 812 else if (priority > I915_CONTEXT_MAX_USER_PRIORITY || 809 813 priority < I915_CONTEXT_MIN_USER_PRIORITY)
+3
drivers/gpu/drm/i915/i915_gem_context.h
··· 29 29 #include <linux/list.h> 30 30 #include <linux/radix-tree.h> 31 31 32 + #include "i915_gem.h" 33 + 32 34 struct pid; 33 35 34 36 struct drm_device; ··· 38 36 39 37 struct drm_i915_private; 40 38 struct drm_i915_file_private; 39 + struct drm_i915_gem_request; 41 40 struct i915_hw_ppgtt; 42 41 struct i915_vma; 43 42 struct intel_ring;
+2 -2
drivers/gpu/drm/i915/i915_gem_dmabuf.c
··· 330 330 * write-combined buffer or a delay through the chipset for GTT 331 331 * writes that do require us to treat GTT as a separate cache domain.) 332 332 */ 333 - obj->base.read_domains = I915_GEM_DOMAIN_GTT; 334 - obj->base.write_domain = 0; 333 + obj->read_domains = I915_GEM_DOMAIN_GTT; 334 + obj->write_domain = 0; 335 335 336 336 return &obj->base; 337 337
+13 -11
drivers/gpu/drm/i915/i915_gem_execbuffer.c
··· 505 505 list_add_tail(&vma->exec_link, &eb->unbound); 506 506 if (drm_mm_node_allocated(&vma->node)) 507 507 err = i915_vma_unbind(vma); 508 + if (unlikely(err)) 509 + vma->exec_flags = NULL; 508 510 } 509 511 return err; 510 512 } ··· 1075 1073 u32 *cmd; 1076 1074 int err; 1077 1075 1078 - GEM_BUG_ON(vma->obj->base.write_domain & I915_GEM_DOMAIN_CPU); 1076 + GEM_BUG_ON(vma->obj->write_domain & I915_GEM_DOMAIN_CPU); 1079 1077 1080 1078 obj = i915_gem_batch_pool_get(&eb->engine->batch_pool, PAGE_SIZE); 1081 1079 if (IS_ERR(obj)) ··· 1863 1861 i915_gem_active_set(&vma->last_read[idx], req); 1864 1862 list_move_tail(&vma->vm_link, &vma->vm->active_list); 1865 1863 1866 - obj->base.write_domain = 0; 1864 + obj->write_domain = 0; 1867 1865 if (flags & EXEC_OBJECT_WRITE) { 1868 - obj->base.write_domain = I915_GEM_DOMAIN_RENDER; 1866 + obj->write_domain = I915_GEM_DOMAIN_RENDER; 1869 1867 1870 1868 if (intel_fb_obj_invalidate(obj, ORIGIN_CS)) 1871 1869 i915_gem_active_set(&obj->frontbuffer_write, req); 1872 1870 1873 - obj->base.read_domains = 0; 1871 + obj->read_domains = 0; 1874 1872 } 1875 - obj->base.read_domains |= I915_GEM_GPU_DOMAINS; 1873 + obj->read_domains |= I915_GEM_GPU_DOMAINS; 1876 1874 1877 1875 if (flags & EXEC_OBJECT_NEEDS_FENCE) 1878 1876 i915_gem_active_set(&vma->last_fence, req); ··· 1975 1973 return 0; 1976 1974 } 1977 1975 1978 - /** 1976 + /* 1979 1977 * Find one BSD ring to dispatch the corresponding BSD command. 1980 1978 * The engine index is returned. 1981 1979 */ ··· 2412 2410 if (out_fence) { 2413 2411 if (err == 0) { 2414 2412 fd_install(out_fence_fd, out_fence->file); 2415 - args->rsvd2 &= GENMASK_ULL(0, 31); /* keep in-fence */ 2413 + args->rsvd2 &= GENMASK_ULL(31, 0); /* keep in-fence */ 2416 2414 args->rsvd2 |= (u64)out_fence_fd << 32; 2417 2415 out_fence_fd = -1; 2418 2416 } else { ··· 2465 2463 * list array and passes it to the real function. 2466 2464 */ 2467 2465 int 2468 - i915_gem_execbuffer(struct drm_device *dev, void *data, 2469 - struct drm_file *file) 2466 + i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data, 2467 + struct drm_file *file) 2470 2468 { 2471 2469 struct drm_i915_gem_execbuffer *args = data; 2472 2470 struct drm_i915_gem_execbuffer2 exec2; ··· 2556 2554 } 2557 2555 2558 2556 int 2559 - i915_gem_execbuffer2(struct drm_device *dev, void *data, 2560 - struct drm_file *file) 2557 + i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data, 2558 + struct drm_file *file) 2561 2559 { 2562 2560 struct drm_i915_gem_execbuffer2 *args = data; 2563 2561 struct drm_i915_gem_exec_object2 *exec2_list;
+1 -1
drivers/gpu/drm/i915/i915_gem_fence_reg.c
··· 64 64 int fence_pitch_shift; 65 65 u64 val; 66 66 67 - if (INTEL_INFO(fence->i915)->gen >= 6) { 67 + if (INTEL_GEN(fence->i915) >= 6) { 68 68 fence_reg_lo = FENCE_REG_GEN6_LO(fence->id); 69 69 fence_reg_hi = FENCE_REG_GEN6_HI(fence->id); 70 70 fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT;
+14 -24
drivers/gpu/drm/i915/i915_gem_gtt.c
··· 673 673 static void gen8_initialize_pd(struct i915_address_space *vm, 674 674 struct i915_page_directory *pd) 675 675 { 676 - unsigned int i; 677 - 678 676 fill_px(vm, pd, 679 677 gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC)); 680 - for (i = 0; i < I915_PDES; i++) 681 - pd->page_table[i] = vm->scratch_pt; 678 + memset_p((void **)pd->page_table, vm->scratch_pt, I915_PDES); 682 679 } 683 680 684 681 static int __pdp_init(struct i915_address_space *vm, 685 682 struct i915_page_directory_pointer *pdp) 686 683 { 687 684 const unsigned int pdpes = i915_pdpes_per_pdp(vm); 688 - unsigned int i; 689 685 690 686 pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory), 691 687 GFP_KERNEL | __GFP_NOWARN); 692 688 if (unlikely(!pdp->page_directory)) 693 689 return -ENOMEM; 694 690 695 - for (i = 0; i < pdpes; i++) 696 - pdp->page_directory[i] = vm->scratch_pd; 691 + memset_p((void **)pdp->page_directory, vm->scratch_pd, pdpes); 697 692 698 693 return 0; 699 694 } ··· 710 715 struct i915_page_directory_pointer *pdp; 711 716 int ret = -ENOMEM; 712 717 713 - WARN_ON(!use_4lvl(vm)); 718 + GEM_BUG_ON(!use_4lvl(vm)); 714 719 715 720 pdp = kzalloc(sizeof(*pdp), GFP_KERNEL); 716 721 if (!pdp) ··· 759 764 static void gen8_initialize_pml4(struct i915_address_space *vm, 760 765 struct i915_pml4 *pml4) 761 766 { 762 - unsigned int i; 763 - 764 767 fill_px(vm, pml4, 765 768 gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC)); 766 - for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) 767 - pml4->pdps[i] = vm->scratch_pdp; 769 + memset_p((void **)pml4->pdps, vm->scratch_pdp, GEN8_PML4ES_PER_PML4); 768 770 } 769 771 770 772 /* Broadwell Page Directory Pointer Descriptors */ ··· 2101 2109 ppgtt->base.i915 = dev_priv; 2102 2110 ppgtt->base.dma = &dev_priv->drm.pdev->dev; 2103 2111 2104 - if (INTEL_INFO(dev_priv)->gen < 8) 2112 + if (INTEL_GEN(dev_priv) < 8) 2105 2113 return gen6_ppgtt_init(ppgtt); 2106 2114 else 2107 2115 return gen8_ppgtt_init(ppgtt); ··· 2249 2257 trace_i915_ppgtt_release(&ppgtt->base); 2250 2258 2251 2259 /* vmas should already be unbound and destroyed */ 2252 - WARN_ON(!list_empty(&ppgtt->base.active_list)); 2253 - WARN_ON(!list_empty(&ppgtt->base.inactive_list)); 2254 - WARN_ON(!list_empty(&ppgtt->base.unbound_list)); 2260 + GEM_BUG_ON(!list_empty(&ppgtt->base.active_list)); 2261 + GEM_BUG_ON(!list_empty(&ppgtt->base.inactive_list)); 2262 + GEM_BUG_ON(!list_empty(&ppgtt->base.unbound_list)); 2255 2263 2256 2264 ppgtt->base.cleanup(&ppgtt->base); 2257 2265 i915_address_space_fini(&ppgtt->base); ··· 2814 2822 2815 2823 i915->mm.aliasing_ppgtt = ppgtt; 2816 2824 2817 - WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma); 2825 + GEM_BUG_ON(ggtt->base.bind_vma != ggtt_bind_vma); 2818 2826 ggtt->base.bind_vma = aliasing_gtt_bind_vma; 2819 2827 2820 - WARN_ON(ggtt->base.unbind_vma != ggtt_unbind_vma); 2828 + GEM_BUG_ON(ggtt->base.unbind_vma != ggtt_unbind_vma); 2821 2829 ggtt->base.unbind_vma = aliasing_gtt_unbind_vma; 2822 2830 2823 2831 return 0; ··· 2908 2916 ggtt->base.closed = true; 2909 2917 2910 2918 mutex_lock(&dev_priv->drm.struct_mutex); 2911 - WARN_ON(!list_empty(&ggtt->base.active_list)); 2919 + GEM_BUG_ON(!list_empty(&ggtt->base.active_list)); 2912 2920 list_for_each_entry_safe(vma, vn, &ggtt->base.inactive_list, vm_link) 2913 2921 WARN_ON(i915_vma_unbind(vma)); 2914 2922 mutex_unlock(&dev_priv->drm.struct_mutex); ··· 3801 3809 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj)); 3802 3810 3803 3811 switch (vma->ggtt_view.type) { 3812 + default: 3813 + GEM_BUG_ON(vma->ggtt_view.type); 3814 + /* fall through */ 3804 3815 case I915_GGTT_VIEW_NORMAL: 3805 3816 vma->pages = vma->obj->mm.pages; 3806 3817 return 0; ··· 3816 3821 case I915_GGTT_VIEW_PARTIAL: 3817 3822 vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj); 3818 3823 break; 3819 - 3820 - default: 3821 - WARN_ONCE(1, "GGTT view %u not implemented!\n", 3822 - vma->ggtt_view.type); 3823 - return -EINVAL; 3824 3824 } 3825 3825 3826 3826 ret = 0;
+6 -2
drivers/gpu/drm/i915/i915_gem_internal.c
··· 167 167 }; 168 168 169 169 /** 170 + * i915_gem_object_create_internal: create an object with volatile pages 171 + * @i915: the i915 device 172 + * @size: the size in bytes of backing storage to allocate for the object 173 + * 170 174 * Creates a new object that wraps some internal memory for private use. 171 175 * This object is not backed by swappable storage, and as such its contents 172 176 * are volatile and only valid whilst pinned. If the object is reaped by the ··· 201 197 drm_gem_private_object_init(&i915->drm, &obj->base, size); 202 198 i915_gem_object_init(obj, &i915_gem_object_internal_ops); 203 199 204 - obj->base.read_domains = I915_GEM_DOMAIN_CPU; 205 - obj->base.write_domain = I915_GEM_DOMAIN_CPU; 200 + obj->read_domains = I915_GEM_DOMAIN_CPU; 201 + obj->write_domain = I915_GEM_DOMAIN_CPU; 206 202 207 203 cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE; 208 204 i915_gem_object_set_cache_coherency(obj, cache_level);
+15
drivers/gpu/drm/i915/i915_gem_object.h
··· 148 148 #define I915_BO_CACHE_COHERENT_FOR_WRITE BIT(1) 149 149 unsigned int cache_dirty:1; 150 150 151 + /** 152 + * @read_domains: Read memory domains. 153 + * 154 + * These monitor which caches contain read/write data related to the 155 + * object. When transitioning from one set of domains to another, 156 + * the driver is called to ensure that caches are suitably flushed and 157 + * invalidated. 158 + */ 159 + u16 read_domains; 160 + 161 + /** 162 + * @write_domain: Corresponding unique write memory domain. 163 + */ 164 + u16 write_domain; 165 + 151 166 atomic_t frontbuffer_bits; 152 167 unsigned int frontbuffer_ggtt_origin; /* write once */ 153 168 struct i915_gem_active frontbuffer_write;
+34 -8
drivers/gpu/drm/i915/i915_gem_request.c
··· 443 443 engine->last_retired_context = request->ctx; 444 444 445 445 spin_lock_irq(&request->lock); 446 - if (request->waitboost) 447 - atomic_dec(&request->i915->gt_pm.rps.num_waiters); 448 446 if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags)) 449 447 dma_fence_signal_locked(&request->fence); 450 448 if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) 451 449 intel_engine_cancel_signaling(request); 450 + if (request->waitboost) { 451 + GEM_BUG_ON(!atomic_read(&request->i915->gt_pm.rps.num_waiters)); 452 + atomic_dec(&request->i915->gt_pm.rps.num_waiters); 453 + } 452 454 spin_unlock_irq(&request->lock); 453 455 454 456 i915_priotree_fini(request->i915, &request->priotree); ··· 918 916 919 917 /** 920 918 * i915_gem_request_await_object - set this request to (async) wait upon a bo 921 - * 922 919 * @to: request we are wishing to use 923 920 * @obj: object which may be in use on another ring. 921 + * @write: whether the wait is on behalf of a writer 924 922 * 925 923 * This code is meant to abstract object synchronization with the GPU. 926 924 * Conceptually we serialise writes between engines inside the GPU. ··· 995 993 lockdep_assert_held(&request->i915->drm.struct_mutex); 996 994 trace_i915_gem_request_add(request); 997 995 998 - /* Make sure that no request gazumped us - if it was allocated after 996 + /* 997 + * Make sure that no request gazumped us - if it was allocated after 999 998 * our i915_gem_request_alloc() and called __i915_add_request() before 1000 999 * us, the timeline will hold its seqno which is later than ours. 1001 1000 */ ··· 1023 1020 WARN(err, "engine->emit_flush() failed: %d!\n", err); 1024 1021 } 1025 1022 1026 - /* Record the position of the start of the breadcrumb so that 1023 + /* 1024 + * Record the position of the start of the breadcrumb so that 1027 1025 * should we detect the updated seqno part-way through the 1028 1026 * GPU processing the request, we never over-estimate the 1029 1027 * position of the ring's HEAD. ··· 1033 1029 GEM_BUG_ON(IS_ERR(cs)); 1034 1030 request->postfix = intel_ring_offset(request, cs); 1035 1031 1036 - /* Seal the request and mark it as pending execution. Note that 1032 + /* 1033 + * Seal the request and mark it as pending execution. Note that 1037 1034 * we may inspect this state, without holding any locks, during 1038 1035 * hangcheck. Hence we apply the barrier to ensure that we do not 1039 1036 * see a more recent value in the hws than we are tracking. ··· 1042 1037 1043 1038 prev = i915_gem_active_raw(&timeline->last_request, 1044 1039 &request->i915->drm.struct_mutex); 1045 - if (prev) { 1040 + if (prev && !i915_gem_request_completed(prev)) { 1046 1041 i915_sw_fence_await_sw_fence(&request->submit, &prev->submit, 1047 1042 &request->submitq); 1048 1043 if (engine->schedule) ··· 1062 1057 list_add_tail(&request->ring_link, &ring->request_list); 1063 1058 request->emitted_jiffies = jiffies; 1064 1059 1065 - /* Let the backend know a new request has arrived that may need 1060 + /* 1061 + * Let the backend know a new request has arrived that may need 1066 1062 * to adjust the existing execution schedule due to a high priority 1067 1063 * request - i.e. we may want to preempt the current request in order 1068 1064 * to run a high priority dependency chain *before* we can execute this ··· 1079 1073 local_bh_disable(); 1080 1074 i915_sw_fence_commit(&request->submit); 1081 1075 local_bh_enable(); /* Kick the execlists tasklet if just scheduled */ 1076 + 1077 + /* 1078 + * In typical scenarios, we do not expect the previous request on 1079 + * the timeline to be still tracked by timeline->last_request if it 1080 + * has been completed. If the completed request is still here, that 1081 + * implies that request retirement is a long way behind submission, 1082 + * suggesting that we haven't been retiring frequently enough from 1083 + * the combination of retire-before-alloc, waiters and the background 1084 + * retirement worker. So if the last request on this timeline was 1085 + * already completed, do a catch up pass, flushing the retirement queue 1086 + * up to this client. Since we have now moved the heaviest operations 1087 + * during retirement onto secondary workers, such as freeing objects 1088 + * or contexts, retiring a bunch of requests is mostly list management 1089 + * (and cache misses), and so we should not be overly penalizing this 1090 + * client by performing excess work, though we may still performing 1091 + * work on behalf of others -- but instead we should benefit from 1092 + * improved resource management. (Well, that's the theory at least.) 1093 + */ 1094 + if (prev && i915_gem_request_completed(prev)) 1095 + i915_gem_request_retire_upto(prev); 1082 1096 } 1083 1097 1084 1098 static unsigned long local_clock_us(unsigned int *cpu)
+2 -2
drivers/gpu/drm/i915/i915_gem_stolen.c
··· 356 356 reserved_base = 0; 357 357 reserved_size = 0; 358 358 359 - switch (INTEL_INFO(dev_priv)->gen) { 359 + switch (INTEL_GEN(dev_priv)) { 360 360 case 2: 361 361 case 3: 362 362 break; ··· 516 516 i915_gem_object_init(obj, &i915_gem_object_stolen_ops); 517 517 518 518 obj->stolen = stolen; 519 - obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; 519 + obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT; 520 520 cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE; 521 521 i915_gem_object_set_cache_coherency(obj, cache_level); 522 522
+6 -4
drivers/gpu/drm/i915/i915_gem_userptr.c
··· 721 721 .release = i915_gem_userptr_release, 722 722 }; 723 723 724 - /** 724 + /* 725 725 * Creates a new mm object that wraps some normal memory from the process 726 726 * context - user memory. 727 727 * ··· 757 757 * dma-buf instead. 758 758 */ 759 759 int 760 - i915_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 760 + i915_gem_userptr_ioctl(struct drm_device *dev, 761 + void *data, 762 + struct drm_file *file) 761 763 { 762 764 struct drm_i915_private *dev_priv = to_i915(dev); 763 765 struct drm_i915_gem_userptr *args = data; ··· 798 796 799 797 drm_gem_private_object_init(dev, &obj->base, args->user_size); 800 798 i915_gem_object_init(obj, &i915_gem_userptr_ops); 801 - obj->base.read_domains = I915_GEM_DOMAIN_CPU; 802 - obj->base.write_domain = I915_GEM_DOMAIN_CPU; 799 + obj->read_domains = I915_GEM_DOMAIN_CPU; 800 + obj->write_domain = I915_GEM_DOMAIN_CPU; 803 801 i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC); 804 802 805 803 obj->userptr.ptr = args->user_ptr;
+19 -14
drivers/gpu/drm/i915/i915_gpu_error.c
··· 579 579 } 580 580 581 581 static void err_print_capabilities(struct drm_i915_error_state_buf *m, 582 - const struct intel_device_info *info) 582 + const struct intel_device_info *info, 583 + const struct intel_driver_caps *caps) 583 584 { 584 585 struct drm_printer p = i915_error_printer(m); 585 586 586 587 intel_device_info_dump_flags(info, &p); 588 + intel_driver_caps_print(caps, &p); 587 589 } 588 590 589 591 static void err_print_params(struct drm_i915_error_state_buf *m, ··· 810 808 if (error->display) 811 809 intel_display_print_error_state(m, error->display); 812 810 813 - err_print_capabilities(m, &error->device_info); 811 + err_print_capabilities(m, &error->device_info, &error->driver_caps); 814 812 err_print_params(m, &error->params); 815 813 err_print_uc(m, &error->uc); 816 814 ··· 1021 1019 err->engine = __active_get_engine_id(&obj->frontbuffer_write); 1022 1020 1023 1021 err->gtt_offset = vma->node.start; 1024 - err->read_domains = obj->base.read_domains; 1025 - err->write_domain = obj->base.write_domain; 1022 + err->read_domains = obj->read_domains; 1023 + err->write_domain = obj->write_domain; 1026 1024 err->fence_reg = vma->fence ? vma->fence->id : -1; 1027 1025 err->tiling = i915_gem_object_get_tiling(obj); 1028 1026 err->dirty = obj->mm.dirty; ··· 1742 1740 memcpy(&error->device_info, 1743 1741 INTEL_INFO(dev_priv), 1744 1742 sizeof(error->device_info)); 1743 + error->driver_caps = dev_priv->caps; 1745 1744 } 1746 1745 1747 1746 static __always_inline void dup_param(const char *type, void *x) ··· 1805 1802 1806 1803 /** 1807 1804 * i915_capture_error_state - capture an error record for later analysis 1808 - * @dev: drm device 1805 + * @i915: i915 device 1806 + * @engine_mask: the mask of engines triggering the hang 1807 + * @error_msg: a message to insert into the error capture header 1809 1808 * 1810 1809 * Should be called when an error is detected (either a hang or an error 1811 1810 * interrupt) to capture error state from the time of the error. Fills 1812 1811 * out a structure which becomes available in debugfs for user level tools 1813 1812 * to pick up. 1814 1813 */ 1815 - void i915_capture_error_state(struct drm_i915_private *dev_priv, 1814 + void i915_capture_error_state(struct drm_i915_private *i915, 1816 1815 u32 engine_mask, 1817 1816 const char *error_msg) 1818 1817 { ··· 1825 1820 if (!i915_modparams.error_capture) 1826 1821 return; 1827 1822 1828 - if (READ_ONCE(dev_priv->gpu_error.first_error)) 1823 + if (READ_ONCE(i915->gpu_error.first_error)) 1829 1824 return; 1830 1825 1831 - error = i915_capture_gpu_state(dev_priv); 1826 + error = i915_capture_gpu_state(i915); 1832 1827 if (!error) { 1833 1828 DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); 1834 1829 return; 1835 1830 } 1836 1831 1837 - i915_error_capture_msg(dev_priv, error, engine_mask, error_msg); 1832 + i915_error_capture_msg(i915, error, engine_mask, error_msg); 1838 1833 DRM_INFO("%s\n", error->error_msg); 1839 1834 1840 1835 if (!error->simulated) { 1841 - spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); 1842 - if (!dev_priv->gpu_error.first_error) { 1843 - dev_priv->gpu_error.first_error = error; 1836 + spin_lock_irqsave(&i915->gpu_error.lock, flags); 1837 + if (!i915->gpu_error.first_error) { 1838 + i915->gpu_error.first_error = error; 1844 1839 error = NULL; 1845 1840 } 1846 - spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); 1841 + spin_unlock_irqrestore(&i915->gpu_error.lock, flags); 1847 1842 } 1848 1843 1849 1844 if (error) { ··· 1858 1853 DRM_INFO("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n"); 1859 1854 DRM_INFO("The gpu crash dump is required to analyze gpu hangs, so please always attach it.\n"); 1860 1855 DRM_INFO("GPU crash dump saved to /sys/class/drm/card%d/error\n", 1861 - dev_priv->drm.primary->index); 1856 + i915->drm.primary->index); 1862 1857 warned = true; 1863 1858 } 1864 1859 }
+12 -15
drivers/gpu/drm/i915/i915_ioc32.c
··· 1 - /** 2 - * \file i915_ioc32.c 3 - * 1 + /* 4 2 * 32-bit ioctl compatibility routines for the i915 DRM. 5 - * 6 - * \author Alan Hourihane <alanh@fairlite.demon.co.uk> 7 - * 8 3 * 9 4 * Copyright (C) Paul Mackerras 2005 10 5 * Copyright (C) Alan Hourihane 2005 ··· 23 28 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 24 29 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 25 30 * IN THE SOFTWARE. 31 + * 32 + * Author: Alan Hourihane <alanh@fairlite.demon.co.uk> 26 33 */ 27 34 #include <linux/compat.h> 28 35 ··· 52 55 return -EFAULT; 53 56 54 57 request = compat_alloc_user_space(sizeof(*request)); 55 - if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) 56 - || __put_user(req32.param, &request->param) 57 - || __put_user((void __user *)(unsigned long)req32.value, 58 - &request->value)) 58 + if (!access_ok(VERIFY_WRITE, request, sizeof(*request)) || 59 + __put_user(req32.param, &request->param) || 60 + __put_user((void __user *)(unsigned long)req32.value, 61 + &request->value)) 59 62 return -EFAULT; 60 63 61 64 return drm_ioctl(file, DRM_IOCTL_I915_GETPARAM, ··· 67 70 }; 68 71 69 72 /** 73 + * i915_compat_ioctl - handle the mistakes of the past 74 + * @filp: the file pointer 75 + * @cmd: the ioctl command (and encoded flags) 76 + * @arg: the ioctl argument (from userspace) 77 + * 70 78 * Called whenever a 32-bit process running under a 64-bit kernel 71 79 * performs an ioctl on /dev/dri/card<n>. 72 - * 73 - * \param filp file pointer. 74 - * \param cmd command. 75 - * \param arg user argument. 76 - * \return zero on success or negative number on failure. 77 80 */ 78 81 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 79 82 {
+68 -47
drivers/gpu/drm/i915/i915_irq.c
··· 1413 1413 tasklet_hi_schedule(&execlists->tasklet); 1414 1414 } 1415 1415 1416 - static void gen8_gt_irq_ack(struct drm_i915_private *dev_priv, 1416 + static void gen8_gt_irq_ack(struct drm_i915_private *i915, 1417 1417 u32 master_ctl, u32 gt_iir[4]) 1418 1418 { 1419 + void __iomem * const regs = i915->regs; 1420 + 1421 + #define GEN8_GT_IRQS (GEN8_GT_RCS_IRQ | \ 1422 + GEN8_GT_BCS_IRQ | \ 1423 + GEN8_GT_VCS1_IRQ | \ 1424 + GEN8_GT_VCS2_IRQ | \ 1425 + GEN8_GT_VECS_IRQ | \ 1426 + GEN8_GT_PM_IRQ | \ 1427 + GEN8_GT_GUC_IRQ) 1428 + 1419 1429 if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1420 - gt_iir[0] = I915_READ_FW(GEN8_GT_IIR(0)); 1421 - if (gt_iir[0]) 1422 - I915_WRITE_FW(GEN8_GT_IIR(0), gt_iir[0]); 1430 + gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0)); 1431 + if (likely(gt_iir[0])) 1432 + raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]); 1423 1433 } 1424 1434 1425 1435 if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1426 - gt_iir[1] = I915_READ_FW(GEN8_GT_IIR(1)); 1427 - if (gt_iir[1]) 1428 - I915_WRITE_FW(GEN8_GT_IIR(1), gt_iir[1]); 1429 - } 1430 - 1431 - if (master_ctl & GEN8_GT_VECS_IRQ) { 1432 - gt_iir[3] = I915_READ_FW(GEN8_GT_IIR(3)); 1433 - if (gt_iir[3]) 1434 - I915_WRITE_FW(GEN8_GT_IIR(3), gt_iir[3]); 1436 + gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1)); 1437 + if (likely(gt_iir[1])) 1438 + raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]); 1435 1439 } 1436 1440 1437 1441 if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 1438 - gt_iir[2] = I915_READ_FW(GEN8_GT_IIR(2)); 1439 - if (gt_iir[2] & (dev_priv->pm_rps_events | 1440 - dev_priv->pm_guc_events)) { 1441 - I915_WRITE_FW(GEN8_GT_IIR(2), 1442 - gt_iir[2] & (dev_priv->pm_rps_events | 1443 - dev_priv->pm_guc_events)); 1444 - } 1442 + gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2)); 1443 + if (likely(gt_iir[2] & (i915->pm_rps_events | 1444 + i915->pm_guc_events))) 1445 + raw_reg_write(regs, GEN8_GT_IIR(2), 1446 + gt_iir[2] & (i915->pm_rps_events | 1447 + i915->pm_guc_events)); 1448 + } 1449 + 1450 + if (master_ctl & GEN8_GT_VECS_IRQ) { 1451 + gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3)); 1452 + if (likely(gt_iir[3])) 1453 + raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]); 1445 1454 } 1446 1455 } 1447 1456 1448 - static void gen8_gt_irq_handler(struct drm_i915_private *dev_priv, 1449 - u32 gt_iir[4]) 1457 + static void gen8_gt_irq_handler(struct drm_i915_private *i915, 1458 + u32 master_ctl, u32 gt_iir[4]) 1450 1459 { 1451 - if (gt_iir[0]) { 1452 - gen8_cs_irq_handler(dev_priv->engine[RCS], 1460 + if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) { 1461 + gen8_cs_irq_handler(i915->engine[RCS], 1453 1462 gt_iir[0], GEN8_RCS_IRQ_SHIFT); 1454 - gen8_cs_irq_handler(dev_priv->engine[BCS], 1463 + gen8_cs_irq_handler(i915->engine[BCS], 1455 1464 gt_iir[0], GEN8_BCS_IRQ_SHIFT); 1456 1465 } 1457 1466 1458 - if (gt_iir[1]) { 1459 - gen8_cs_irq_handler(dev_priv->engine[VCS], 1467 + if (master_ctl & (GEN8_GT_VCS1_IRQ | GEN8_GT_VCS2_IRQ)) { 1468 + gen8_cs_irq_handler(i915->engine[VCS], 1460 1469 gt_iir[1], GEN8_VCS1_IRQ_SHIFT); 1461 - gen8_cs_irq_handler(dev_priv->engine[VCS2], 1470 + gen8_cs_irq_handler(i915->engine[VCS2], 1462 1471 gt_iir[1], GEN8_VCS2_IRQ_SHIFT); 1463 1472 } 1464 1473 1465 - if (gt_iir[3]) 1466 - gen8_cs_irq_handler(dev_priv->engine[VECS], 1474 + if (master_ctl & GEN8_GT_VECS_IRQ) { 1475 + gen8_cs_irq_handler(i915->engine[VECS], 1467 1476 gt_iir[3], GEN8_VECS_IRQ_SHIFT); 1477 + } 1468 1478 1469 - if (gt_iir[2] & dev_priv->pm_rps_events) 1470 - gen6_rps_irq_handler(dev_priv, gt_iir[2]); 1471 - 1472 - if (gt_iir[2] & dev_priv->pm_guc_events) 1473 - gen9_guc_irq_handler(dev_priv, gt_iir[2]); 1479 + if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) { 1480 + gen6_rps_irq_handler(i915, gt_iir[2]); 1481 + gen9_guc_irq_handler(i915, gt_iir[2]); 1482 + } 1474 1483 } 1475 1484 1476 1485 static bool bxt_port_hotplug_long_detect(enum port port, u32 val) ··· 2094 2085 2095 2086 do { 2096 2087 u32 master_ctl, iir; 2097 - u32 gt_iir[4] = {}; 2098 2088 u32 pipe_stats[I915_MAX_PIPES] = {}; 2099 2089 u32 hotplug_status = 0; 2090 + u32 gt_iir[4]; 2100 2091 u32 ier = 0; 2101 2092 2102 2093 master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL; ··· 2149 2140 I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2150 2141 POSTING_READ(GEN8_MASTER_IRQ); 2151 2142 2152 - gen8_gt_irq_handler(dev_priv, gt_iir); 2143 + gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); 2153 2144 2154 2145 if (hotplug_status) 2155 2146 i9xx_hpd_irq_handler(dev_priv, hotplug_status); ··· 2684 2675 2685 2676 static irqreturn_t gen8_irq_handler(int irq, void *arg) 2686 2677 { 2687 - struct drm_device *dev = arg; 2688 - struct drm_i915_private *dev_priv = to_i915(dev); 2678 + struct drm_i915_private *dev_priv = to_i915(arg); 2689 2679 u32 master_ctl; 2690 - u32 gt_iir[4] = {}; 2680 + u32 gt_iir[4]; 2691 2681 2692 2682 if (!intel_irqs_enabled(dev_priv)) 2693 2683 return IRQ_NONE; ··· 2698 2690 2699 2691 I915_WRITE_FW(GEN8_MASTER_IRQ, 0); 2700 2692 2701 - /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2702 - disable_rpm_wakeref_asserts(dev_priv); 2703 - 2704 2693 /* Find, clear, then process each source of interrupt */ 2705 2694 gen8_gt_irq_ack(dev_priv, master_ctl, gt_iir); 2706 - gen8_gt_irq_handler(dev_priv, gt_iir); 2707 - gen8_de_irq_handler(dev_priv, master_ctl); 2695 + 2696 + /* IRQs are synced during runtime_suspend, we don't require a wakeref */ 2697 + if (master_ctl & ~GEN8_GT_IRQS) { 2698 + disable_rpm_wakeref_asserts(dev_priv); 2699 + gen8_de_irq_handler(dev_priv, master_ctl); 2700 + enable_rpm_wakeref_asserts(dev_priv); 2701 + } 2708 2702 2709 2703 I915_WRITE_FW(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL); 2710 - POSTING_READ_FW(GEN8_MASTER_IRQ); 2711 2704 2712 - enable_rpm_wakeref_asserts(dev_priv); 2705 + gen8_gt_irq_handler(dev_priv, master_ctl, gt_iir); 2713 2706 2714 2707 return IRQ_HANDLED; 2715 2708 } ··· 2960 2951 ilk_enable_display_irq(dev_priv, bit); 2961 2952 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2962 2953 2954 + /* Even though there is no DMC, frame counter can get stuck when 2955 + * PSR is active as no frames are generated. 2956 + */ 2957 + if (HAS_PSR(dev_priv)) 2958 + drm_vblank_restore(dev, pipe); 2959 + 2963 2960 return 0; 2964 2961 } 2965 2962 ··· 2977 2962 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 2978 2963 bdw_enable_pipe_irq(dev_priv, pipe, GEN8_PIPE_VBLANK); 2979 2964 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 2965 + 2966 + /* Even if there is no DMC, frame counter can get stuck when 2967 + * PSR is active as no frames are generated, so check only for PSR. 2968 + */ 2969 + if (HAS_PSR(dev_priv)) 2970 + drm_vblank_restore(dev, pipe); 2980 2971 2981 2972 return 0; 2982 2973 }
+2 -2
drivers/gpu/drm/i915/i915_oa_cflgt3.c
··· 84 84 void 85 85 i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv) 86 86 { 87 - strncpy(dev_priv->perf.oa.test_config.uuid, 87 + strlcpy(dev_priv->perf.oa.test_config.uuid, 88 88 "577e8e2c-3fa0-4875-8743-3538d585e3b0", 89 - UUID_STRING_LEN); 89 + sizeof(dev_priv->perf.oa.test_config.uuid)); 90 90 dev_priv->perf.oa.test_config.id = 1; 91 91 92 92 dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
+2 -2
drivers/gpu/drm/i915/i915_oa_cnl.c
··· 96 96 void 97 97 i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv) 98 98 { 99 - strncpy(dev_priv->perf.oa.test_config.uuid, 99 + strlcpy(dev_priv->perf.oa.test_config.uuid, 100 100 "db41edd4-d8e7-4730-ad11-b9a2d6833503", 101 - UUID_STRING_LEN); 101 + sizeof(dev_priv->perf.oa.test_config.uuid)); 102 102 dev_priv->perf.oa.test_config.id = 1; 103 103 104 104 dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
+62 -48
drivers/gpu/drm/i915/i915_pci.c
··· 29 29 #include "i915_drv.h" 30 30 #include "i915_selftest.h" 31 31 32 + #define PLATFORM(x) .platform = (x), .platform_mask = BIT(x) 33 + #define GEN(x) .gen = (x), .gen_mask = BIT((x) - 1) 34 + 32 35 #define GEN_DEFAULT_PIPEOFFSETS \ 33 36 .pipe_offsets = { PIPE_A_OFFSET, PIPE_B_OFFSET, \ 34 37 PIPE_C_OFFSET, PIPE_EDP_OFFSET }, \ ··· 66 63 .page_sizes = I915_GTT_PAGE_SIZE_4K 67 64 68 65 #define GEN2_FEATURES \ 69 - .gen = 2, .num_pipes = 1, \ 66 + GEN(2), \ 67 + .num_pipes = 1, \ 70 68 .has_overlay = 1, .overlay_needs_physical = 1, \ 71 69 .has_gmch_display = 1, \ 72 70 .hws_needs_physical = 1, \ ··· 80 76 81 77 static const struct intel_device_info intel_i830_info = { 82 78 GEN2_FEATURES, 83 - .platform = INTEL_I830, 79 + PLATFORM(INTEL_I830), 84 80 .is_mobile = 1, .cursor_needs_physical = 1, 85 81 .num_pipes = 2, /* legal, last one wins */ 86 82 }; 87 83 88 84 static const struct intel_device_info intel_i845g_info = { 89 85 GEN2_FEATURES, 90 - .platform = INTEL_I845G, 86 + PLATFORM(INTEL_I845G), 91 87 }; 92 88 93 89 static const struct intel_device_info intel_i85x_info = { 94 90 GEN2_FEATURES, 95 - .platform = INTEL_I85X, .is_mobile = 1, 91 + PLATFORM(INTEL_I85X), 92 + .is_mobile = 1, 96 93 .num_pipes = 2, /* legal, last one wins */ 97 94 .cursor_needs_physical = 1, 98 95 .has_fbc = 1, ··· 101 96 102 97 static const struct intel_device_info intel_i865g_info = { 103 98 GEN2_FEATURES, 104 - .platform = INTEL_I865G, 99 + PLATFORM(INTEL_I865G), 105 100 }; 106 101 107 102 #define GEN3_FEATURES \ 108 - .gen = 3, .num_pipes = 2, \ 103 + GEN(3), \ 104 + .num_pipes = 2, \ 109 105 .has_gmch_display = 1, \ 110 106 .ring_mask = RENDER_RING, \ 111 107 .has_snoop = true, \ ··· 116 110 117 111 static const struct intel_device_info intel_i915g_info = { 118 112 GEN3_FEATURES, 119 - .platform = INTEL_I915G, .cursor_needs_physical = 1, 113 + PLATFORM(INTEL_I915G), 114 + .cursor_needs_physical = 1, 120 115 .has_overlay = 1, .overlay_needs_physical = 1, 121 116 .hws_needs_physical = 1, 122 117 .unfenced_needs_alignment = 1, ··· 125 118 126 119 static const struct intel_device_info intel_i915gm_info = { 127 120 GEN3_FEATURES, 128 - .platform = INTEL_I915GM, 121 + PLATFORM(INTEL_I915GM), 129 122 .is_mobile = 1, 130 123 .cursor_needs_physical = 1, 131 124 .has_overlay = 1, .overlay_needs_physical = 1, ··· 137 130 138 131 static const struct intel_device_info intel_i945g_info = { 139 132 GEN3_FEATURES, 140 - .platform = INTEL_I945G, 133 + PLATFORM(INTEL_I945G), 141 134 .has_hotplug = 1, .cursor_needs_physical = 1, 142 135 .has_overlay = 1, .overlay_needs_physical = 1, 143 136 .hws_needs_physical = 1, ··· 146 139 147 140 static const struct intel_device_info intel_i945gm_info = { 148 141 GEN3_FEATURES, 149 - .platform = INTEL_I945GM, .is_mobile = 1, 142 + PLATFORM(INTEL_I945GM), 143 + .is_mobile = 1, 150 144 .has_hotplug = 1, .cursor_needs_physical = 1, 151 145 .has_overlay = 1, .overlay_needs_physical = 1, 152 146 .supports_tv = 1, ··· 158 150 159 151 static const struct intel_device_info intel_g33_info = { 160 152 GEN3_FEATURES, 161 - .platform = INTEL_G33, 153 + PLATFORM(INTEL_G33), 162 154 .has_hotplug = 1, 163 155 .has_overlay = 1, 164 156 }; 165 157 166 158 static const struct intel_device_info intel_pineview_info = { 167 159 GEN3_FEATURES, 168 - .platform = INTEL_PINEVIEW, .is_mobile = 1, 160 + PLATFORM(INTEL_PINEVIEW), 161 + .is_mobile = 1, 169 162 .has_hotplug = 1, 170 163 .has_overlay = 1, 171 164 }; 172 165 173 166 #define GEN4_FEATURES \ 174 - .gen = 4, .num_pipes = 2, \ 167 + GEN(4), \ 168 + .num_pipes = 2, \ 175 169 .has_hotplug = 1, \ 176 170 .has_gmch_display = 1, \ 177 171 .ring_mask = RENDER_RING, \ ··· 184 174 185 175 static const struct intel_device_info intel_i965g_info = { 186 176 GEN4_FEATURES, 187 - .platform = INTEL_I965G, 177 + PLATFORM(INTEL_I965G), 188 178 .has_overlay = 1, 189 179 .hws_needs_physical = 1, 190 180 .has_snoop = false, ··· 192 182 193 183 static const struct intel_device_info intel_i965gm_info = { 194 184 GEN4_FEATURES, 195 - .platform = INTEL_I965GM, 185 + PLATFORM(INTEL_I965GM), 196 186 .is_mobile = 1, .has_fbc = 1, 197 187 .has_overlay = 1, 198 188 .supports_tv = 1, ··· 202 192 203 193 static const struct intel_device_info intel_g45_info = { 204 194 GEN4_FEATURES, 205 - .platform = INTEL_G45, 195 + PLATFORM(INTEL_G45), 206 196 .ring_mask = RENDER_RING | BSD_RING, 207 197 }; 208 198 209 199 static const struct intel_device_info intel_gm45_info = { 210 200 GEN4_FEATURES, 211 - .platform = INTEL_GM45, 201 + PLATFORM(INTEL_GM45), 212 202 .is_mobile = 1, .has_fbc = 1, 213 203 .supports_tv = 1, 214 204 .ring_mask = RENDER_RING | BSD_RING, 215 205 }; 216 206 217 207 #define GEN5_FEATURES \ 218 - .gen = 5, .num_pipes = 2, \ 208 + GEN(5), \ 209 + .num_pipes = 2, \ 219 210 .has_hotplug = 1, \ 220 211 .ring_mask = RENDER_RING | BSD_RING, \ 221 212 .has_snoop = true, \ ··· 228 217 229 218 static const struct intel_device_info intel_ironlake_d_info = { 230 219 GEN5_FEATURES, 231 - .platform = INTEL_IRONLAKE, 220 + PLATFORM(INTEL_IRONLAKE), 232 221 }; 233 222 234 223 static const struct intel_device_info intel_ironlake_m_info = { 235 224 GEN5_FEATURES, 236 - .platform = INTEL_IRONLAKE, 225 + PLATFORM(INTEL_IRONLAKE), 237 226 .is_mobile = 1, .has_fbc = 1, 238 227 }; 239 228 240 229 #define GEN6_FEATURES \ 241 - .gen = 6, .num_pipes = 2, \ 230 + GEN(6), \ 231 + .num_pipes = 2, \ 242 232 .has_hotplug = 1, \ 243 233 .has_fbc = 1, \ 244 234 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ ··· 253 241 254 242 #define SNB_D_PLATFORM \ 255 243 GEN6_FEATURES, \ 256 - .platform = INTEL_SANDYBRIDGE 244 + PLATFORM(INTEL_SANDYBRIDGE) 257 245 258 246 static const struct intel_device_info intel_sandybridge_d_gt1_info = { 259 247 SNB_D_PLATFORM, ··· 267 255 268 256 #define SNB_M_PLATFORM \ 269 257 GEN6_FEATURES, \ 270 - .platform = INTEL_SANDYBRIDGE, \ 258 + PLATFORM(INTEL_SANDYBRIDGE), \ 271 259 .is_mobile = 1 272 260 273 261 ··· 282 270 }; 283 271 284 272 #define GEN7_FEATURES \ 285 - .gen = 7, .num_pipes = 3, \ 273 + GEN(7), \ 274 + .num_pipes = 3, \ 286 275 .has_hotplug = 1, \ 287 276 .has_fbc = 1, \ 288 277 .ring_mask = RENDER_RING | BSD_RING | BLT_RING, \ ··· 298 285 299 286 #define IVB_D_PLATFORM \ 300 287 GEN7_FEATURES, \ 301 - .platform = INTEL_IVYBRIDGE, \ 288 + PLATFORM(INTEL_IVYBRIDGE), \ 302 289 .has_l3_dpf = 1 303 290 304 291 static const struct intel_device_info intel_ivybridge_d_gt1_info = { ··· 313 300 314 301 #define IVB_M_PLATFORM \ 315 302 GEN7_FEATURES, \ 316 - .platform = INTEL_IVYBRIDGE, \ 303 + PLATFORM(INTEL_IVYBRIDGE), \ 317 304 .is_mobile = 1, \ 318 305 .has_l3_dpf = 1 319 306 ··· 329 316 330 317 static const struct intel_device_info intel_ivybridge_q_info = { 331 318 GEN7_FEATURES, 332 - .platform = INTEL_IVYBRIDGE, 319 + PLATFORM(INTEL_IVYBRIDGE), 333 320 .gt = 2, 334 321 .num_pipes = 0, /* legal, last one wins */ 335 322 .has_l3_dpf = 1, 336 323 }; 337 324 338 325 static const struct intel_device_info intel_valleyview_info = { 339 - .platform = INTEL_VALLEYVIEW, 340 - .gen = 7, 326 + PLATFORM(INTEL_VALLEYVIEW), 327 + GEN(7), 341 328 .is_lp = 1, 342 329 .num_pipes = 2, 343 330 .has_psr = 1, ··· 368 355 369 356 #define HSW_PLATFORM \ 370 357 G75_FEATURES, \ 371 - .platform = INTEL_HASWELL, \ 358 + PLATFORM(INTEL_HASWELL), \ 372 359 .has_l3_dpf = 1 373 360 374 361 static const struct intel_device_info intel_haswell_gt1_info = { ··· 388 375 389 376 #define GEN8_FEATURES \ 390 377 G75_FEATURES, \ 378 + GEN(8), \ 391 379 BDW_COLORS, \ 392 380 .page_sizes = I915_GTT_PAGE_SIZE_4K | \ 393 381 I915_GTT_PAGE_SIZE_2M, \ ··· 399 385 400 386 #define BDW_PLATFORM \ 401 387 GEN8_FEATURES, \ 402 - .gen = 8, \ 403 - .platform = INTEL_BROADWELL 388 + PLATFORM(INTEL_BROADWELL) 404 389 405 390 static const struct intel_device_info intel_broadwell_gt1_info = { 406 391 BDW_PLATFORM, ··· 426 413 }; 427 414 428 415 static const struct intel_device_info intel_cherryview_info = { 429 - .gen = 8, .num_pipes = 3, 416 + PLATFORM(INTEL_CHERRYVIEW), 417 + GEN(8), 418 + .num_pipes = 3, 430 419 .has_hotplug = 1, 431 420 .is_lp = 1, 432 421 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, 433 - .platform = INTEL_CHERRYVIEW, 434 422 .has_64bit_reloc = 1, 435 423 .has_psr = 1, 436 424 .has_runtime_pm = 1, ··· 457 443 458 444 #define GEN9_FEATURES \ 459 445 GEN8_FEATURES, \ 446 + GEN(9), \ 460 447 GEN9_DEFAULT_PAGE_SIZES, \ 461 448 .has_logical_ring_preemption = 1, \ 462 449 .has_csr = 1, \ ··· 467 452 468 453 #define SKL_PLATFORM \ 469 454 GEN9_FEATURES, \ 470 - .gen = 9, \ 471 - .platform = INTEL_SKYLAKE 455 + PLATFORM(INTEL_SKYLAKE) 472 456 473 457 static const struct intel_device_info intel_skylake_gt1_info = { 474 458 SKL_PLATFORM, ··· 495 481 }; 496 482 497 483 #define GEN9_LP_FEATURES \ 498 - .gen = 9, \ 484 + GEN(9), \ 499 485 .is_lp = 1, \ 500 486 .has_hotplug = 1, \ 501 487 .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, \ ··· 527 513 528 514 static const struct intel_device_info intel_broxton_info = { 529 515 GEN9_LP_FEATURES, 530 - .platform = INTEL_BROXTON, 516 + PLATFORM(INTEL_BROXTON), 531 517 .ddb_size = 512, 532 518 }; 533 519 534 520 static const struct intel_device_info intel_geminilake_info = { 535 521 GEN9_LP_FEATURES, 536 - .platform = INTEL_GEMINILAKE, 522 + PLATFORM(INTEL_GEMINILAKE), 537 523 .ddb_size = 1024, 538 524 GLK_COLORS, 539 525 }; 540 526 541 527 #define KBL_PLATFORM \ 542 528 GEN9_FEATURES, \ 543 - .gen = 9, \ 544 - .platform = INTEL_KABYLAKE 529 + PLATFORM(INTEL_KABYLAKE) 545 530 546 531 static const struct intel_device_info intel_kabylake_gt1_info = { 547 532 KBL_PLATFORM, ··· 560 547 561 548 #define CFL_PLATFORM \ 562 549 GEN9_FEATURES, \ 563 - .gen = 9, \ 564 - .platform = INTEL_COFFEELAKE 550 + PLATFORM(INTEL_COFFEELAKE) 565 551 566 552 static const struct intel_device_info intel_coffeelake_gt1_info = { 567 553 CFL_PLATFORM, ··· 580 568 581 569 #define GEN10_FEATURES \ 582 570 GEN9_FEATURES, \ 571 + GEN(10), \ 583 572 .ddb_size = 1024, \ 584 573 GLK_COLORS 585 574 586 575 static const struct intel_device_info intel_cannonlake_info = { 587 576 GEN10_FEATURES, 588 - .is_alpha_support = 1, 589 - .platform = INTEL_CANNONLAKE, 590 - .gen = 10, 577 + PLATFORM(INTEL_CANNONLAKE), 591 578 .gt = 2, 592 579 }; 593 580 594 581 #define GEN11_FEATURES \ 595 582 GEN10_FEATURES, \ 596 - .gen = 11, \ 583 + GEN(11), \ 597 584 .ddb_size = 2048, \ 598 585 .has_csr = 0 599 586 600 587 static const struct intel_device_info intel_icelake_11_info = { 601 588 GEN11_FEATURES, 602 - .platform = INTEL_ICELAKE, 589 + PLATFORM(INTEL_ICELAKE), 603 590 .is_alpha_support = 1, 604 591 .has_resource_streamer = 0, 605 592 }; 593 + 594 + #undef GEN 595 + #undef PLATFORM 606 596 607 597 /* 608 598 * Make sure any device matches here are from most specific to most
+91 -15
drivers/gpu/drm/i915/i915_pmu.c
··· 415 415 return 0; 416 416 } 417 417 418 - static u64 __i915_pmu_event_read(struct perf_event *event) 418 + static u64 __get_rc6(struct drm_i915_private *i915) 419 + { 420 + u64 val; 421 + 422 + val = intel_rc6_residency_ns(i915, 423 + IS_VALLEYVIEW(i915) ? 424 + VLV_GT_RENDER_RC6 : 425 + GEN6_GT_GFX_RC6); 426 + 427 + if (HAS_RC6p(i915)) 428 + val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p); 429 + 430 + if (HAS_RC6pp(i915)) 431 + val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp); 432 + 433 + return val; 434 + } 435 + 436 + static u64 get_rc6(struct drm_i915_private *i915, bool locked) 437 + { 438 + #if IS_ENABLED(CONFIG_PM) 439 + unsigned long flags; 440 + u64 val; 441 + 442 + if (intel_runtime_pm_get_if_in_use(i915)) { 443 + val = __get_rc6(i915); 444 + intel_runtime_pm_put(i915); 445 + 446 + /* 447 + * If we are coming back from being runtime suspended we must 448 + * be careful not to report a larger value than returned 449 + * previously. 450 + */ 451 + 452 + if (!locked) 453 + spin_lock_irqsave(&i915->pmu.lock, flags); 454 + 455 + if (val >= i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { 456 + i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0; 457 + i915->pmu.sample[__I915_SAMPLE_RC6].cur = val; 458 + } else { 459 + val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur; 460 + } 461 + 462 + if (!locked) 463 + spin_unlock_irqrestore(&i915->pmu.lock, flags); 464 + } else { 465 + struct pci_dev *pdev = i915->drm.pdev; 466 + struct device *kdev = &pdev->dev; 467 + unsigned long flags2; 468 + 469 + /* 470 + * We are runtime suspended. 471 + * 472 + * Report the delta from when the device was suspended to now, 473 + * on top of the last known real value, as the approximated RC6 474 + * counter value. 475 + */ 476 + if (!locked) 477 + spin_lock_irqsave(&i915->pmu.lock, flags); 478 + 479 + spin_lock_irqsave(&kdev->power.lock, flags2); 480 + 481 + if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) 482 + i915->pmu.suspended_jiffies_last = 483 + kdev->power.suspended_jiffies; 484 + 485 + val = kdev->power.suspended_jiffies - 486 + i915->pmu.suspended_jiffies_last; 487 + val += jiffies - kdev->power.accounting_timestamp; 488 + 489 + spin_unlock_irqrestore(&kdev->power.lock, flags2); 490 + 491 + val = jiffies_to_nsecs(val); 492 + val += i915->pmu.sample[__I915_SAMPLE_RC6].cur; 493 + i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; 494 + 495 + if (!locked) 496 + spin_unlock_irqrestore(&i915->pmu.lock, flags); 497 + } 498 + 499 + return val; 500 + #else 501 + return __get_rc6(i915); 502 + #endif 503 + } 504 + 505 + static u64 __i915_pmu_event_read(struct perf_event *event, bool locked) 419 506 { 420 507 struct drm_i915_private *i915 = 421 508 container_of(event->pmu, typeof(*i915), pmu.base); ··· 540 453 val = count_interrupts(i915); 541 454 break; 542 455 case I915_PMU_RC6_RESIDENCY: 543 - intel_runtime_pm_get(i915); 544 - val = intel_rc6_residency_ns(i915, 545 - IS_VALLEYVIEW(i915) ? 546 - VLV_GT_RENDER_RC6 : 547 - GEN6_GT_GFX_RC6); 548 - if (HAS_RC6p(i915)) 549 - val += intel_rc6_residency_ns(i915, 550 - GEN6_GT_GFX_RC6p); 551 - if (HAS_RC6pp(i915)) 552 - val += intel_rc6_residency_ns(i915, 553 - GEN6_GT_GFX_RC6pp); 554 - intel_runtime_pm_put(i915); 456 + val = get_rc6(i915, locked); 555 457 break; 556 458 } 557 459 } ··· 555 479 556 480 again: 557 481 prev = local64_read(&hwc->prev_count); 558 - new = __i915_pmu_event_read(event); 482 + new = __i915_pmu_event_read(event, false); 559 483 560 484 if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev) 561 485 goto again; ··· 610 534 * for all listeners. Even when the event was already enabled and has 611 535 * an existing non-zero value. 612 536 */ 613 - local64_set(&event->hw.prev_count, __i915_pmu_event_read(event)); 537 + local64_set(&event->hw.prev_count, __i915_pmu_event_read(event, true)); 614 538 615 539 spin_unlock_irqrestore(&i915->pmu.lock, flags); 616 540 }
+6
drivers/gpu/drm/i915/i915_pmu.h
··· 27 27 enum { 28 28 __I915_SAMPLE_FREQ_ACT = 0, 29 29 __I915_SAMPLE_FREQ_REQ, 30 + __I915_SAMPLE_RC6, 31 + __I915_SAMPLE_RC6_ESTIMATED, 30 32 __I915_NUM_PMU_SAMPLERS 31 33 }; 32 34 ··· 96 94 * struct intel_engine_cs. 97 95 */ 98 96 struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS]; 97 + /** 98 + * @suspended_jiffies_last: Cached suspend time from PM core. 99 + */ 100 + unsigned long suspended_jiffies_last; 99 101 /** 100 102 * @i915_attr: Memory block holding device attributes. 101 103 */
+60 -19
drivers/gpu/drm/i915/i915_reg.h
··· 1906 1906 #define CL_POWER_DOWN_ENABLE (1 << 4) 1907 1907 #define SUS_CLOCK_CONFIG (3 << 0) 1908 1908 1909 + #define _ICL_PORT_CL_DW5_A 0x162014 1910 + #define _ICL_PORT_CL_DW5_B 0x6C014 1911 + #define ICL_PORT_CL_DW5(port) _MMIO_PORT(port, _ICL_PORT_CL_DW5_A, \ 1912 + _ICL_PORT_CL_DW5_B) 1913 + 1909 1914 #define _PORT_CL1CM_DW9_A 0x162024 1910 1915 #define _PORT_CL1CM_DW9_BC 0x6C024 1911 1916 #define IREF0RC_OFFSET_SHIFT 8 ··· 2034 2029 #define _CNL_PORT_TX_DW5_LN0_AE 0x162454 2035 2030 #define _CNL_PORT_TX_DW5_LN0_B 0x162654 2036 2031 #define _CNL_PORT_TX_DW5_LN0_C 0x162C54 2037 - #define _CNL_PORT_TX_DW5_LN0_D 0x162ED4 2032 + #define _CNL_PORT_TX_DW5_LN0_D 0x162E54 2038 2033 #define _CNL_PORT_TX_DW5_LN0_F 0x162854 2039 2034 #define CNL_PORT_TX_DW5_GRP(port) _MMIO_PORT6(port, \ 2040 2035 _CNL_PORT_TX_DW5_GRP_AE, \ ··· 2065 2060 #define _CNL_PORT_TX_DW7_LN0_AE 0x16245C 2066 2061 #define _CNL_PORT_TX_DW7_LN0_B 0x16265C 2067 2062 #define _CNL_PORT_TX_DW7_LN0_C 0x162C5C 2068 - #define _CNL_PORT_TX_DW7_LN0_D 0x162EDC 2063 + #define _CNL_PORT_TX_DW7_LN0_D 0x162E5C 2069 2064 #define _CNL_PORT_TX_DW7_LN0_F 0x16285C 2070 2065 #define CNL_PORT_TX_DW7_GRP(port) _MMIO_PORT6(port, \ 2071 2066 _CNL_PORT_TX_DW7_GRP_AE, \ ··· 2108 2103 #define VOLTAGE_INFO_SHIFT 24 2109 2104 #define CNL_PORT_COMP_DW9 _MMIO(0x162124) 2110 2105 #define CNL_PORT_COMP_DW10 _MMIO(0x162128) 2106 + 2107 + #define _ICL_PORT_COMP_DW0_A 0x162100 2108 + #define _ICL_PORT_COMP_DW0_B 0x6C100 2109 + #define ICL_PORT_COMP_DW0(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW0_A, \ 2110 + _ICL_PORT_COMP_DW0_B) 2111 + #define _ICL_PORT_COMP_DW1_A 0x162104 2112 + #define _ICL_PORT_COMP_DW1_B 0x6C104 2113 + #define ICL_PORT_COMP_DW1(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW1_A, \ 2114 + _ICL_PORT_COMP_DW1_B) 2115 + #define _ICL_PORT_COMP_DW3_A 0x16210C 2116 + #define _ICL_PORT_COMP_DW3_B 0x6C10C 2117 + #define ICL_PORT_COMP_DW3(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW3_A, \ 2118 + _ICL_PORT_COMP_DW3_B) 2119 + #define _ICL_PORT_COMP_DW9_A 0x162124 2120 + #define _ICL_PORT_COMP_DW9_B 0x6C124 2121 + #define ICL_PORT_COMP_DW9(port) _MMIO_PORT(port, _ICL_PORT_COMP_DW9_A, \ 2122 + _ICL_PORT_COMP_DW9_B) 2123 + #define _ICL_PORT_COMP_DW10_A 0x162128 2124 + #define _ICL_PORT_COMP_DW10_B 0x6C128 2125 + #define ICL_PORT_COMP_DW10(port) _MMIO_PORT(port, \ 2126 + _ICL_PORT_COMP_DW10_A, \ 2127 + _ICL_PORT_COMP_DW10_B) 2111 2128 2112 2129 /* BXT PHY Ref registers */ 2113 2130 #define _PORT_REF_DW3_A 0x16218C ··· 7165 7138 #define DISP_DATA_PARTITION_5_6 (1<<6) 7166 7139 #define DISP_IPC_ENABLE (1<<3) 7167 7140 #define DBUF_CTL _MMIO(0x45008) 7141 + #define DBUF_CTL_S1 _MMIO(0x45008) 7142 + #define DBUF_CTL_S2 _MMIO(0x44FE8) 7168 7143 #define DBUF_POWER_REQUEST (1<<31) 7169 7144 #define DBUF_POWER_STATE (1<<30) 7170 7145 #define GEN7_MSG_CTL _MMIO(0x45010) ··· 7176 7147 #define RESET_PCH_HANDSHAKE_ENABLE (1<<4) 7177 7148 7178 7149 #define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430) 7179 - #define SKL_SELECT_ALTERNATE_DC_EXIT (1<<30) 7180 - #define MASK_WAKEMEM (1<<13) 7150 + #define SKL_SELECT_ALTERNATE_DC_EXIT (1 << 30) 7151 + #define MASK_WAKEMEM (1 << 13) 7152 + #define CNL_DDI_CLOCK_REG_ACCESS_ON (1 << 7) 7181 7153 7182 7154 #define SKL_DFSM _MMIO(0x51000) 7183 7155 #define SKL_DFSM_CDCLK_LIMIT_MASK (3 << 23) ··· 7190 7160 #define SKL_DFSM_PIPE_B_DISABLE (1 << 21) 7191 7161 #define SKL_DFSM_PIPE_C_DISABLE (1 << 28) 7192 7162 7193 - #define SKL_DSSM _MMIO(0x51004) 7194 - #define CNL_DSSM_CDCLK_PLL_REFCLK_24MHz (1 << 31) 7163 + #define SKL_DSSM _MMIO(0x51004) 7164 + #define CNL_DSSM_CDCLK_PLL_REFCLK_24MHz (1 << 31) 7165 + #define ICL_DSSM_CDCLK_PLL_REFCLK_MASK (7 << 29) 7166 + #define ICL_DSSM_CDCLK_PLL_REFCLK_24MHz (0 << 29) 7167 + #define ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz (1 << 29) 7168 + #define ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz (2 << 29) 7195 7169 7196 7170 #define GEN7_FF_SLICE_CS_CHICKEN1 _MMIO(0x20e0) 7197 7171 #define GEN9_FFSC_PERCTX_PREEMPT_CTRL (1<<14) ··· 8828 8794 8829 8795 /* CDCLK_CTL */ 8830 8796 #define CDCLK_CTL _MMIO(0x46000) 8831 - #define CDCLK_FREQ_SEL_MASK (3<<26) 8832 - #define CDCLK_FREQ_450_432 (0<<26) 8833 - #define CDCLK_FREQ_540 (1<<26) 8834 - #define CDCLK_FREQ_337_308 (2<<26) 8835 - #define CDCLK_FREQ_675_617 (3<<26) 8836 - #define BXT_CDCLK_CD2X_DIV_SEL_MASK (3<<22) 8837 - #define BXT_CDCLK_CD2X_DIV_SEL_1 (0<<22) 8838 - #define BXT_CDCLK_CD2X_DIV_SEL_1_5 (1<<22) 8839 - #define BXT_CDCLK_CD2X_DIV_SEL_2 (2<<22) 8840 - #define BXT_CDCLK_CD2X_DIV_SEL_4 (3<<22) 8841 - #define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe)<<20) 8842 - #define CDCLK_DIVMUX_CD_OVERRIDE (1<<19) 8797 + #define CDCLK_FREQ_SEL_MASK (3 << 26) 8798 + #define CDCLK_FREQ_450_432 (0 << 26) 8799 + #define CDCLK_FREQ_540 (1 << 26) 8800 + #define CDCLK_FREQ_337_308 (2 << 26) 8801 + #define CDCLK_FREQ_675_617 (3 << 26) 8802 + #define BXT_CDCLK_CD2X_DIV_SEL_MASK (3 << 22) 8803 + #define BXT_CDCLK_CD2X_DIV_SEL_1 (0 << 22) 8804 + #define BXT_CDCLK_CD2X_DIV_SEL_1_5 (1 << 22) 8805 + #define BXT_CDCLK_CD2X_DIV_SEL_2 (2 << 22) 8806 + #define BXT_CDCLK_CD2X_DIV_SEL_4 (3 << 22) 8807 + #define BXT_CDCLK_CD2X_PIPE(pipe) ((pipe) << 20) 8808 + #define CDCLK_DIVMUX_CD_OVERRIDE (1 << 19) 8843 8809 #define BXT_CDCLK_CD2X_PIPE_NONE BXT_CDCLK_CD2X_PIPE(3) 8844 - #define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1<<16) 8810 + #define ICL_CDCLK_CD2X_PIPE_NONE (7 << 19) 8811 + #define BXT_CDCLK_SSA_PRECHARGE_ENABLE (1 << 16) 8845 8812 #define CDCLK_FREQ_DECIMAL_MASK (0x7ff) 8846 8813 8847 8814 /* LCPLL_CTL */ ··· 9750 9715 #define MMCD_MISC_CTRL _MMIO(0x4ddc) /* skl+ */ 9751 9716 #define MMCD_PCLA (1 << 31) 9752 9717 #define MMCD_HOTSPOT_EN (1 << 27) 9718 + 9719 + #define _ICL_PHY_MISC_A 0x64C00 9720 + #define _ICL_PHY_MISC_B 0x64C04 9721 + #define ICL_PHY_MISC(port) _MMIO_PORT(port, _ICL_PHY_MISC_A, \ 9722 + _ICL_PHY_MISC_B) 9723 + #define ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN (1 << 23) 9753 9724 9754 9725 #endif /* _I915_REG_H_ */
+8 -8
drivers/gpu/drm/i915/i915_syncmap.c
··· 86 86 87 87 /** 88 88 * i915_syncmap_init -- initialise the #i915_syncmap 89 - * @root - pointer to the #i915_syncmap 89 + * @root: pointer to the #i915_syncmap 90 90 */ 91 91 void i915_syncmap_init(struct i915_syncmap **root) 92 92 { ··· 139 139 140 140 /** 141 141 * i915_syncmap_is_later -- compare against the last know sync point 142 - * @root - pointer to the #i915_syncmap 143 - * @id - the context id (other timeline) we are synchronising to 144 - * @seqno - the sequence number along the other timeline 142 + * @root: pointer to the #i915_syncmap 143 + * @id: the context id (other timeline) we are synchronising to 144 + * @seqno: the sequence number along the other timeline 145 145 * 146 146 * If we have already synchronised this @root timeline with another (@id) then 147 147 * we can omit any repeated or earlier synchronisation requests. If the two ··· 339 339 340 340 /** 341 341 * i915_syncmap_set -- mark the most recent syncpoint between contexts 342 - * @root - pointer to the #i915_syncmap 343 - * @id - the context id (other timeline) we have synchronised to 344 - * @seqno - the sequence number along the other timeline 342 + * @root: pointer to the #i915_syncmap 343 + * @id: the context id (other timeline) we have synchronised to 344 + * @seqno: the sequence number along the other timeline 345 345 * 346 346 * When we synchronise this @root timeline with another (@id), we also know 347 347 * that we have synchronized with all previous seqno along that timeline. If ··· 382 382 383 383 /** 384 384 * i915_syncmap_free -- free all memory associated with the syncmap 385 - * @root - pointer to the #i915_syncmap 385 + * @root: pointer to the #i915_syncmap 386 386 * 387 387 * Either when the timeline is to be freed and we no longer need the sync 388 388 * point tracking, or when the fences are all known to be signaled and the
+3 -2
drivers/gpu/drm/i915/intel_atomic.c
··· 188 188 /** 189 189 * intel_crtc_destroy_state - destroy crtc state 190 190 * @crtc: drm crtc 191 + * @state: the state to destroy 191 192 * 192 193 * Destroys the crtc state (both common and Intel-specific) for the 193 194 * specified crtc. 194 195 */ 195 196 void 196 197 intel_crtc_destroy_state(struct drm_crtc *crtc, 197 - struct drm_crtc_state *state) 198 + struct drm_crtc_state *state) 198 199 { 199 200 drm_atomic_helper_crtc_destroy_state(crtc, state); 200 201 } ··· 203 202 /** 204 203 * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests 205 204 * @dev_priv: i915 device 206 - * @crtc: intel crtc 205 + * @intel_crtc: intel crtc 207 206 * @crtc_state: incoming crtc_state to validate and setup scalers 208 207 * 209 208 * This function sets up scalers based on staged scaling requests for
+1
drivers/gpu/drm/i915/intel_atomic_plane.c
··· 85 85 __drm_atomic_helper_plane_duplicate_state(plane, state); 86 86 87 87 intel_state->vma = NULL; 88 + intel_state->flags = 0; 88 89 89 90 return state; 90 91 }
+4 -4
drivers/gpu/drm/i915/intel_audio.c
··· 704 704 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 705 705 dev_priv->display.audio_codec_enable = ilk_audio_codec_enable; 706 706 dev_priv->display.audio_codec_disable = ilk_audio_codec_disable; 707 - } else if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8) { 707 + } else if (IS_HASWELL(dev_priv) || INTEL_GEN(dev_priv) >= 8) { 708 708 dev_priv->display.audio_codec_enable = hsw_audio_codec_enable; 709 709 dev_priv->display.audio_codec_disable = hsw_audio_codec_disable; 710 710 } else if (HAS_PCH_SPLIT(dev_priv)) { ··· 779 779 { 780 780 struct intel_encoder *encoder; 781 781 782 - if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map))) 783 - return NULL; 784 - 785 782 /* MST */ 786 783 if (pipe >= 0) { 784 + if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map))) 785 + return NULL; 786 + 787 787 encoder = dev_priv->av_enc_map[pipe]; 788 788 /* 789 789 * when bootup, audio driver may not know it is
+106 -1
drivers/gpu/drm/i915/intel_bios.c
··· 391 391 static int intel_bios_ssc_frequency(struct drm_i915_private *dev_priv, 392 392 bool alternate) 393 393 { 394 - switch (INTEL_INFO(dev_priv)->gen) { 394 + switch (INTEL_GEN(dev_priv)) { 395 395 case 2: 396 396 return alternate ? 66667 : 48000; 397 397 case 3: ··· 947 947 return 0; 948 948 } 949 949 950 + /* 951 + * Get len of pre-fixed deassert fragment from a v1 init OTP sequence, 952 + * skip all delay + gpio operands and stop at the first DSI packet op. 953 + */ 954 + static int get_init_otp_deassert_fragment_len(struct drm_i915_private *dev_priv) 955 + { 956 + const u8 *data = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP]; 957 + int index, len; 958 + 959 + if (WARN_ON(!data || dev_priv->vbt.dsi.seq_version != 1)) 960 + return 0; 961 + 962 + /* index = 1 to skip sequence byte */ 963 + for (index = 1; data[index] != MIPI_SEQ_ELEM_END; index += len) { 964 + switch (data[index]) { 965 + case MIPI_SEQ_ELEM_SEND_PKT: 966 + return index == 1 ? 0 : index; 967 + case MIPI_SEQ_ELEM_DELAY: 968 + len = 5; /* 1 byte for operand + uint32 */ 969 + break; 970 + case MIPI_SEQ_ELEM_GPIO: 971 + len = 3; /* 1 byte for op, 1 for gpio_nr, 1 for value */ 972 + break; 973 + default: 974 + return 0; 975 + } 976 + } 977 + 978 + return 0; 979 + } 980 + 981 + /* 982 + * Some v1 VBT MIPI sequences do the deassert in the init OTP sequence. 983 + * The deassert must be done before calling intel_dsi_device_ready, so for 984 + * these devices we split the init OTP sequence into a deassert sequence and 985 + * the actual init OTP part. 986 + */ 987 + static void fixup_mipi_sequences(struct drm_i915_private *dev_priv) 988 + { 989 + u8 *init_otp; 990 + int len; 991 + 992 + /* Limit this to VLV for now. */ 993 + if (!IS_VALLEYVIEW(dev_priv)) 994 + return; 995 + 996 + /* Limit this to v1 vid-mode sequences */ 997 + if (dev_priv->vbt.dsi.config->is_cmd_mode || 998 + dev_priv->vbt.dsi.seq_version != 1) 999 + return; 1000 + 1001 + /* Only do this if there are otp and assert seqs and no deassert seq */ 1002 + if (!dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] || 1003 + !dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET] || 1004 + dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET]) 1005 + return; 1006 + 1007 + /* The deassert-sequence ends at the first DSI packet */ 1008 + len = get_init_otp_deassert_fragment_len(dev_priv); 1009 + if (!len) 1010 + return; 1011 + 1012 + DRM_DEBUG_KMS("Using init OTP fragment to deassert reset\n"); 1013 + 1014 + /* Copy the fragment, update seq byte and terminate it */ 1015 + init_otp = (u8 *)dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP]; 1016 + dev_priv->vbt.dsi.deassert_seq = kmemdup(init_otp, len + 1, GFP_KERNEL); 1017 + if (!dev_priv->vbt.dsi.deassert_seq) 1018 + return; 1019 + dev_priv->vbt.dsi.deassert_seq[0] = MIPI_SEQ_DEASSERT_RESET; 1020 + dev_priv->vbt.dsi.deassert_seq[len] = MIPI_SEQ_ELEM_END; 1021 + /* Use the copy for deassert */ 1022 + dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET] = 1023 + dev_priv->vbt.dsi.deassert_seq; 1024 + /* Replace the last byte of the fragment with init OTP seq byte */ 1025 + init_otp[len - 1] = MIPI_SEQ_INIT_OTP; 1026 + /* And make MIPI_MIPI_SEQ_INIT_OTP point to it */ 1027 + dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1; 1028 + } 1029 + 950 1030 static void 951 1031 parse_mipi_sequence(struct drm_i915_private *dev_priv, 952 1032 const struct bdb_header *bdb) ··· 1095 1015 dev_priv->vbt.dsi.data = data; 1096 1016 dev_priv->vbt.dsi.size = seq_size; 1097 1017 dev_priv->vbt.dsi.seq_version = sequence->version; 1018 + 1019 + fixup_mipi_sequences(dev_priv); 1098 1020 1099 1021 DRM_DEBUG_DRIVER("MIPI related VBT parsing complete\n"); 1100 1022 return; ··· 1690 1608 1691 1609 if (bios) 1692 1610 pci_unmap_rom(pdev, bios); 1611 + } 1612 + 1613 + /** 1614 + * intel_bios_cleanup - Free any resources allocated by intel_bios_init() 1615 + * @dev_priv: i915 device instance 1616 + */ 1617 + void intel_bios_cleanup(struct drm_i915_private *dev_priv) 1618 + { 1619 + kfree(dev_priv->vbt.child_dev); 1620 + dev_priv->vbt.child_dev = NULL; 1621 + dev_priv->vbt.child_dev_num = 0; 1622 + kfree(dev_priv->vbt.sdvo_lvds_vbt_mode); 1623 + dev_priv->vbt.sdvo_lvds_vbt_mode = NULL; 1624 + kfree(dev_priv->vbt.lfp_lvds_vbt_mode); 1625 + dev_priv->vbt.lfp_lvds_vbt_mode = NULL; 1626 + kfree(dev_priv->vbt.dsi.data); 1627 + dev_priv->vbt.dsi.data = NULL; 1628 + kfree(dev_priv->vbt.dsi.pps); 1629 + dev_priv->vbt.dsi.pps = NULL; 1630 + kfree(dev_priv->vbt.dsi.config); 1631 + dev_priv->vbt.dsi.config = NULL; 1632 + kfree(dev_priv->vbt.dsi.deassert_seq); 1633 + dev_priv->vbt.dsi.deassert_seq = NULL; 1693 1634 } 1694 1635 1695 1636 /**
+4 -16
drivers/gpu/drm/i915/intel_breadcrumbs.c
··· 588 588 spin_unlock_irq(&b->rb_lock); 589 589 } 590 590 591 - static bool signal_valid(const struct drm_i915_gem_request *request) 592 - { 593 - return intel_wait_check_request(&request->signaling.wait, request); 594 - } 595 - 596 591 static bool signal_complete(const struct drm_i915_gem_request *request) 597 592 { 598 593 if (!request) 599 594 return false; 600 595 601 - /* If another process served as the bottom-half it may have already 602 - * signalled that this wait is already completed. 603 - */ 604 - if (intel_wait_complete(&request->signaling.wait)) 605 - return signal_valid(request); 606 - 607 - /* Carefully check if the request is complete, giving time for the 596 + /* 597 + * Carefully check if the request is complete, giving time for the 608 598 * seqno to be visible or if the GPU hung. 609 599 */ 610 - if (__i915_request_irq_complete(request)) 611 - return true; 612 - 613 - return false; 600 + return __i915_request_irq_complete(request); 614 601 } 615 602 616 603 static struct drm_i915_gem_request *to_signaler(struct rb_node *rb) ··· 699 712 &request->fence.flags)) { 700 713 local_bh_disable(); 701 714 dma_fence_signal(&request->fence); 715 + GEM_BUG_ON(!i915_gem_request_completed(request)); 702 716 local_bh_enable(); /* kick start the tasklets */ 703 717 } 704 718
+236 -3
drivers/gpu/drm/i915/intel_cdclk.c
··· 1778 1778 dev_priv->cdclk.hw.vco = -1; 1779 1779 } 1780 1780 1781 + static int icl_calc_cdclk(int min_cdclk, unsigned int ref) 1782 + { 1783 + int ranges_24[] = { 312000, 552000, 648000 }; 1784 + int ranges_19_38[] = { 307200, 556800, 652800 }; 1785 + int *ranges; 1786 + 1787 + switch (ref) { 1788 + default: 1789 + MISSING_CASE(ref); 1790 + case 24000: 1791 + ranges = ranges_24; 1792 + break; 1793 + case 19200: 1794 + case 38400: 1795 + ranges = ranges_19_38; 1796 + break; 1797 + } 1798 + 1799 + if (min_cdclk > ranges[1]) 1800 + return ranges[2]; 1801 + else if (min_cdclk > ranges[0]) 1802 + return ranges[1]; 1803 + else 1804 + return ranges[0]; 1805 + } 1806 + 1807 + static int icl_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk) 1808 + { 1809 + int ratio; 1810 + 1811 + if (cdclk == dev_priv->cdclk.hw.bypass) 1812 + return 0; 1813 + 1814 + switch (cdclk) { 1815 + default: 1816 + MISSING_CASE(cdclk); 1817 + case 307200: 1818 + case 556800: 1819 + case 652800: 1820 + WARN_ON(dev_priv->cdclk.hw.ref != 19200 && 1821 + dev_priv->cdclk.hw.ref != 38400); 1822 + break; 1823 + case 312000: 1824 + case 552000: 1825 + case 648000: 1826 + WARN_ON(dev_priv->cdclk.hw.ref != 24000); 1827 + } 1828 + 1829 + ratio = cdclk / (dev_priv->cdclk.hw.ref / 2); 1830 + 1831 + return dev_priv->cdclk.hw.ref * ratio; 1832 + } 1833 + 1834 + static void icl_set_cdclk(struct drm_i915_private *dev_priv, 1835 + const struct intel_cdclk_state *cdclk_state) 1836 + { 1837 + unsigned int cdclk = cdclk_state->cdclk; 1838 + unsigned int vco = cdclk_state->vco; 1839 + int ret; 1840 + 1841 + mutex_lock(&dev_priv->pcu_lock); 1842 + ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL, 1843 + SKL_CDCLK_PREPARE_FOR_CHANGE, 1844 + SKL_CDCLK_READY_FOR_CHANGE, 1845 + SKL_CDCLK_READY_FOR_CHANGE, 3); 1846 + mutex_unlock(&dev_priv->pcu_lock); 1847 + if (ret) { 1848 + DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n", 1849 + ret); 1850 + return; 1851 + } 1852 + 1853 + if (dev_priv->cdclk.hw.vco != 0 && 1854 + dev_priv->cdclk.hw.vco != vco) 1855 + cnl_cdclk_pll_disable(dev_priv); 1856 + 1857 + if (dev_priv->cdclk.hw.vco != vco) 1858 + cnl_cdclk_pll_enable(dev_priv, vco); 1859 + 1860 + I915_WRITE(CDCLK_CTL, ICL_CDCLK_CD2X_PIPE_NONE | 1861 + skl_cdclk_decimal(cdclk)); 1862 + 1863 + mutex_lock(&dev_priv->pcu_lock); 1864 + /* TODO: add proper DVFS support. */ 1865 + sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, 2); 1866 + mutex_unlock(&dev_priv->pcu_lock); 1867 + 1868 + intel_update_cdclk(dev_priv); 1869 + } 1870 + 1871 + static void icl_get_cdclk(struct drm_i915_private *dev_priv, 1872 + struct intel_cdclk_state *cdclk_state) 1873 + { 1874 + u32 val; 1875 + 1876 + cdclk_state->bypass = 50000; 1877 + 1878 + val = I915_READ(SKL_DSSM); 1879 + switch (val & ICL_DSSM_CDCLK_PLL_REFCLK_MASK) { 1880 + default: 1881 + MISSING_CASE(val); 1882 + case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz: 1883 + cdclk_state->ref = 24000; 1884 + break; 1885 + case ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz: 1886 + cdclk_state->ref = 19200; 1887 + break; 1888 + case ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz: 1889 + cdclk_state->ref = 38400; 1890 + break; 1891 + } 1892 + 1893 + val = I915_READ(BXT_DE_PLL_ENABLE); 1894 + if ((val & BXT_DE_PLL_PLL_ENABLE) == 0 || 1895 + (val & BXT_DE_PLL_LOCK) == 0) { 1896 + /* 1897 + * CDCLK PLL is disabled, the VCO/ratio doesn't matter, but 1898 + * setting it to zero is a way to signal that. 1899 + */ 1900 + cdclk_state->vco = 0; 1901 + cdclk_state->cdclk = cdclk_state->bypass; 1902 + return; 1903 + } 1904 + 1905 + cdclk_state->vco = (val & BXT_DE_PLL_RATIO_MASK) * cdclk_state->ref; 1906 + 1907 + val = I915_READ(CDCLK_CTL); 1908 + WARN_ON((val & BXT_CDCLK_CD2X_DIV_SEL_MASK) != 0); 1909 + 1910 + cdclk_state->cdclk = cdclk_state->vco / 2; 1911 + } 1912 + 1913 + /** 1914 + * icl_init_cdclk - Initialize CDCLK on ICL 1915 + * @dev_priv: i915 device 1916 + * 1917 + * Initialize CDCLK for ICL. This consists mainly of initializing 1918 + * dev_priv->cdclk.hw and sanitizing the state of the hardware if needed. This 1919 + * is generally done only during the display core initialization sequence, after 1920 + * which the DMC will take care of turning CDCLK off/on as needed. 1921 + */ 1922 + void icl_init_cdclk(struct drm_i915_private *dev_priv) 1923 + { 1924 + struct intel_cdclk_state sanitized_state; 1925 + u32 val; 1926 + 1927 + /* This sets dev_priv->cdclk.hw. */ 1928 + intel_update_cdclk(dev_priv); 1929 + intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK"); 1930 + 1931 + /* This means CDCLK disabled. */ 1932 + if (dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass) 1933 + goto sanitize; 1934 + 1935 + val = I915_READ(CDCLK_CTL); 1936 + 1937 + if ((val & BXT_CDCLK_CD2X_DIV_SEL_MASK) != 0) 1938 + goto sanitize; 1939 + 1940 + if ((val & CDCLK_FREQ_DECIMAL_MASK) != 1941 + skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk)) 1942 + goto sanitize; 1943 + 1944 + return; 1945 + 1946 + sanitize: 1947 + DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n"); 1948 + 1949 + sanitized_state.ref = dev_priv->cdclk.hw.ref; 1950 + sanitized_state.cdclk = icl_calc_cdclk(0, sanitized_state.ref); 1951 + sanitized_state.vco = icl_calc_cdclk_pll_vco(dev_priv, 1952 + sanitized_state.cdclk); 1953 + 1954 + icl_set_cdclk(dev_priv, &sanitized_state); 1955 + } 1956 + 1957 + /** 1958 + * icl_uninit_cdclk - Uninitialize CDCLK on ICL 1959 + * @dev_priv: i915 device 1960 + * 1961 + * Uninitialize CDCLK for ICL. This is done only during the display core 1962 + * uninitialization sequence. 1963 + */ 1964 + void icl_uninit_cdclk(struct drm_i915_private *dev_priv) 1965 + { 1966 + struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw; 1967 + 1968 + cdclk_state.cdclk = cdclk_state.bypass; 1969 + cdclk_state.vco = 0; 1970 + 1971 + icl_set_cdclk(dev_priv, &cdclk_state); 1972 + } 1973 + 1781 1974 /** 1782 1975 * cnl_init_cdclk - Initialize CDCLK on CNL 1783 1976 * @dev_priv: i915 device ··· 2409 2216 return 0; 2410 2217 } 2411 2218 2219 + static int icl_modeset_calc_cdclk(struct drm_atomic_state *state) 2220 + { 2221 + struct drm_i915_private *dev_priv = to_i915(state->dev); 2222 + struct intel_atomic_state *intel_state = to_intel_atomic_state(state); 2223 + unsigned int ref = intel_state->cdclk.logical.ref; 2224 + int min_cdclk, cdclk, vco; 2225 + 2226 + min_cdclk = intel_compute_min_cdclk(state); 2227 + if (min_cdclk < 0) 2228 + return min_cdclk; 2229 + 2230 + cdclk = icl_calc_cdclk(min_cdclk, ref); 2231 + vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk); 2232 + 2233 + intel_state->cdclk.logical.vco = vco; 2234 + intel_state->cdclk.logical.cdclk = cdclk; 2235 + 2236 + if (!intel_state->active_crtcs) { 2237 + cdclk = icl_calc_cdclk(0, ref); 2238 + vco = icl_calc_cdclk_pll_vco(dev_priv, cdclk); 2239 + 2240 + intel_state->cdclk.actual.vco = vco; 2241 + intel_state->cdclk.actual.cdclk = cdclk; 2242 + } else { 2243 + intel_state->cdclk.actual = intel_state->cdclk.logical; 2244 + } 2245 + 2246 + return 0; 2247 + } 2248 + 2412 2249 static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv) 2413 2250 { 2414 2251 int max_cdclk_freq = dev_priv->max_cdclk_freq; ··· 2456 2233 return max_cdclk_freq; 2457 2234 else if (IS_CHERRYVIEW(dev_priv)) 2458 2235 return max_cdclk_freq*95/100; 2459 - else if (INTEL_INFO(dev_priv)->gen < 4) 2236 + else if (INTEL_GEN(dev_priv) < 4) 2460 2237 return 2*max_cdclk_freq*90/100; 2461 2238 else 2462 2239 return max_cdclk_freq*90/100; ··· 2472 2249 */ 2473 2250 void intel_update_max_cdclk(struct drm_i915_private *dev_priv) 2474 2251 { 2475 - if (IS_CANNONLAKE(dev_priv)) { 2252 + if (IS_ICELAKE(dev_priv)) { 2253 + if (dev_priv->cdclk.hw.ref == 24000) 2254 + dev_priv->max_cdclk_freq = 648000; 2255 + else 2256 + dev_priv->max_cdclk_freq = 652800; 2257 + } else if (IS_CANNONLAKE(dev_priv)) { 2476 2258 dev_priv->max_cdclk_freq = 528000; 2477 2259 } else if (IS_GEN9_BC(dev_priv)) { 2478 2260 u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK; ··· 2701 2473 dev_priv->display.set_cdclk = cnl_set_cdclk; 2702 2474 dev_priv->display.modeset_calc_cdclk = 2703 2475 cnl_modeset_calc_cdclk; 2476 + } else if (IS_ICELAKE(dev_priv)) { 2477 + dev_priv->display.set_cdclk = icl_set_cdclk; 2478 + dev_priv->display.modeset_calc_cdclk = icl_modeset_calc_cdclk; 2704 2479 } 2705 2480 2706 - if (IS_CANNONLAKE(dev_priv)) 2481 + if (IS_ICELAKE(dev_priv)) 2482 + dev_priv->display.get_cdclk = icl_get_cdclk; 2483 + else if (IS_CANNONLAKE(dev_priv)) 2707 2484 dev_priv->display.get_cdclk = cnl_get_cdclk; 2708 2485 else if (IS_GEN9_BC(dev_priv)) 2709 2486 dev_priv->display.get_cdclk = skl_get_cdclk;
+3 -10
drivers/gpu/drm/i915/intel_crt.c
··· 474 474 return ret; 475 475 } 476 476 477 - /** 478 - * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect CRT presence. 479 - * 480 - * Not for i915G/i915GM 481 - * 482 - * \return true if CRT is connected. 483 - * \return false if CRT is disconnected. 484 - */ 485 477 static bool intel_crt_detect_hotplug(struct drm_connector *connector) 486 478 { 487 479 struct drm_device *dev = connector->dev; ··· 799 807 else 800 808 status = connector_status_unknown; 801 809 intel_release_load_detect_pipe(connector, &tmp, ctx); 802 - } else if (ret == 0) 810 + } else if (ret == 0) { 803 811 status = connector_status_unknown; 804 - else if (ret < 0) 812 + } else { 805 813 status = ret; 814 + } 806 815 807 816 out: 808 817 intel_display_power_put(dev_priv, intel_encoder->power_domain);
+1 -1
drivers/gpu/drm/i915/intel_ddi.c
··· 2152 2152 2153 2153 I915_WRITE(DPLL_CTRL2, val); 2154 2154 2155 - } else if (INTEL_INFO(dev_priv)->gen < 9) { 2155 + } else if (INTEL_GEN(dev_priv) < 9) { 2156 2156 I915_WRITE(PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll)); 2157 2157 } 2158 2158
+6
drivers/gpu/drm/i915/intel_device_info.c
··· 586 586 /* Initialize command stream timestamp frequency */ 587 587 info->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv); 588 588 } 589 + 590 + void intel_driver_caps_print(const struct intel_driver_caps *caps, 591 + struct drm_printer *p) 592 + { 593 + drm_printf(p, "scheduler: %x\n", caps->scheduler); 594 + }
+7
drivers/gpu/drm/i915/intel_device_info.h
··· 167 167 } color; 168 168 }; 169 169 170 + struct intel_driver_caps { 171 + unsigned int scheduler; 172 + }; 173 + 170 174 static inline unsigned int sseu_subslice_total(const struct sseu_dev_info *sseu) 171 175 { 172 176 return hweight8(sseu->slice_mask) * hweight8(sseu->subslice_mask); ··· 185 181 struct drm_printer *p); 186 182 void intel_device_info_dump_runtime(const struct intel_device_info *info, 187 183 struct drm_printer *p); 184 + 185 + void intel_driver_caps_print(const struct intel_driver_caps *caps, 186 + struct drm_printer *p); 188 187 189 188 #endif
+73 -29
drivers/gpu/drm/i915/intel_display.c
··· 558 558 } 559 559 560 560 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) 561 - /** 561 + 562 + /* 562 563 * Returns whether the given set of divisors are valid for a given refclk with 563 564 * the given connectors. 564 565 */ 565 - 566 566 static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv, 567 567 const struct intel_limit *limit, 568 568 const struct dpll *clock) ··· 2029 2029 2030 2030 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv) 2031 2031 { 2032 - if (INTEL_INFO(dev_priv)->gen >= 9) 2032 + if (INTEL_GEN(dev_priv) >= 9) 2033 2033 return 256 * 1024; 2034 2034 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) || 2035 2035 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) 2036 2036 return 128 * 1024; 2037 - else if (INTEL_INFO(dev_priv)->gen >= 4) 2037 + else if (INTEL_GEN(dev_priv) >= 4) 2038 2038 return 4 * 1024; 2039 2039 else 2040 2040 return 0; ··· 2068 2068 } 2069 2069 2070 2070 struct i915_vma * 2071 - intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation) 2071 + intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, 2072 + unsigned int rotation, 2073 + unsigned long *out_flags) 2072 2074 { 2073 2075 struct drm_device *dev = fb->dev; 2074 2076 struct drm_i915_private *dev_priv = to_i915(dev); 2075 2077 struct drm_i915_gem_object *obj = intel_fb_obj(fb); 2076 2078 struct i915_ggtt_view view; 2077 2079 struct i915_vma *vma; 2080 + unsigned int pinctl; 2078 2081 u32 alignment; 2079 2082 2080 2083 WARN_ON(!mutex_is_locked(&dev->struct_mutex)); ··· 2105 2102 2106 2103 atomic_inc(&dev_priv->gpu_error.pending_fb_pin); 2107 2104 2108 - vma = i915_gem_object_pin_to_display_plane(obj, alignment, &view); 2105 + pinctl = 0; 2106 + 2107 + /* Valleyview is definitely limited to scanning out the first 2108 + * 512MiB. Lets presume this behaviour was inherited from the 2109 + * g4x display engine and that all earlier gen are similarly 2110 + * limited. Testing suggests that it is a little more 2111 + * complicated than this. For example, Cherryview appears quite 2112 + * happy to scanout from anywhere within its global aperture. 2113 + */ 2114 + if (HAS_GMCH_DISPLAY(dev_priv)) 2115 + pinctl |= PIN_MAPPABLE; 2116 + 2117 + vma = i915_gem_object_pin_to_display_plane(obj, 2118 + alignment, &view, pinctl); 2109 2119 if (IS_ERR(vma)) 2110 2120 goto err; 2111 2121 ··· 2139 2123 * something and try to run the system in a "less than optimal" 2140 2124 * mode that matches the user configuration. 2141 2125 */ 2142 - i915_vma_pin_fence(vma); 2126 + if (i915_vma_pin_fence(vma) == 0 && vma->fence) 2127 + *out_flags |= PLANE_HAS_FENCE; 2143 2128 } 2144 2129 2145 2130 i915_vma_get(vma); ··· 2151 2134 return vma; 2152 2135 } 2153 2136 2154 - void intel_unpin_fb_vma(struct i915_vma *vma) 2137 + void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags) 2155 2138 { 2156 2139 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex); 2157 2140 2158 - i915_vma_unpin_fence(vma); 2141 + if (flags & PLANE_HAS_FENCE) 2142 + i915_vma_unpin_fence(vma); 2159 2143 i915_gem_object_unpin_from_display_plane(vma); 2160 2144 i915_vma_put(vma); 2161 2145 } ··· 2826 2808 valid_fb: 2827 2809 mutex_lock(&dev->struct_mutex); 2828 2810 intel_state->vma = 2829 - intel_pin_and_fence_fb_obj(fb, primary->state->rotation); 2811 + intel_pin_and_fence_fb_obj(fb, 2812 + primary->state->rotation, 2813 + &intel_state->flags); 2830 2814 mutex_unlock(&dev->struct_mutex); 2831 2815 if (IS_ERR(intel_state->vma)) { 2832 2816 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n", ··· 3183 3163 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 3184 3164 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; 3185 3165 3186 - if (INTEL_GEN(dev_priv) < 4) 3166 + if (INTEL_GEN(dev_priv) < 5) 3187 3167 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe); 3188 3168 3189 3169 switch (fb->format->format) { ··· 4776 4756 4777 4757 /** 4778 4758 * skl_update_scaler_plane - Stages update to scaler state for a given plane. 4779 - * 4780 - * @state: crtc's scaler state 4759 + * @crtc_state: crtc's scaler state 4781 4760 * @plane_state: atomic plane state to update 4782 4761 * 4783 4762 * Return ··· 4973 4954 /** 4974 4955 * intel_post_enable_primary - Perform operations after enabling primary plane 4975 4956 * @crtc: the CRTC whose primary plane was just enabled 4957 + * @new_crtc_state: the enabling state 4976 4958 * 4977 4959 * Performs potentially sleeping operations that must be done after the primary 4978 4960 * plane is enabled, such as updating FBC and IPS. Note that this may be ··· 5438 5418 I915_WRITE(CLKGATE_DIS_PSL(pipe), val); 5439 5419 } 5440 5420 5421 + static void icl_pipe_mbus_enable(struct intel_crtc *crtc) 5422 + { 5423 + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 5424 + enum pipe pipe = crtc->pipe; 5425 + uint32_t val; 5426 + 5427 + val = MBUS_DBOX_BW_CREDIT(1) | MBUS_DBOX_A_CREDIT(2); 5428 + 5429 + /* Program B credit equally to all pipes */ 5430 + val |= MBUS_DBOX_B_CREDIT(24 / INTEL_INFO(dev_priv)->num_pipes); 5431 + 5432 + I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val); 5433 + } 5434 + 5441 5435 static void haswell_crtc_enable(struct intel_crtc_state *pipe_config, 5442 5436 struct drm_atomic_state *old_state) 5443 5437 { ··· 5528 5494 5529 5495 if (dev_priv->display.initial_watermarks != NULL) 5530 5496 dev_priv->display.initial_watermarks(old_intel_state, pipe_config); 5497 + 5498 + if (INTEL_GEN(dev_priv) >= 11) 5499 + icl_pipe_mbus_enable(intel_crtc); 5531 5500 5532 5501 /* XXX: Do the pipe assertions at the right place for BXT DSI. */ 5533 5502 if (!transcoder_is_dsi(cpu_transcoder)) ··· 6344 6307 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); 6345 6308 6346 6309 /* GDG double wide on either pipe, otherwise pipe A only */ 6347 - return INTEL_INFO(dev_priv)->gen < 4 && 6310 + return INTEL_GEN(dev_priv) < 4 && 6348 6311 (crtc->pipe == PIPE_A || IS_I915G(dev_priv)); 6349 6312 } 6350 6313 ··· 8231 8194 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 8232 8195 struct intel_crtc_state *config = intel_crtc->config; 8233 8196 8234 - if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) { 8197 + if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) { 8235 8198 u32 val = 0; 8236 8199 8237 8200 switch (intel_crtc->config->pipe_bpp) { ··· 9574 9537 if (HAS_DDI(dev_priv)) 9575 9538 cntl |= CURSOR_PIPE_CSC_ENABLE; 9576 9539 9577 - cntl |= MCURSOR_PIPE_SELECT(crtc->pipe); 9540 + if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) 9541 + cntl |= MCURSOR_PIPE_SELECT(crtc->pipe); 9578 9542 9579 9543 switch (plane_state->base.crtc_w) { 9580 9544 case 64: ··· 10740 10702 struct drm_connector_list_iter conn_iter; 10741 10703 unsigned int used_ports = 0; 10742 10704 unsigned int used_mst_ports = 0; 10705 + bool ret = true; 10743 10706 10744 10707 /* 10745 10708 * Walk the connector list instead of the encoder ··· 10775 10736 10776 10737 /* the same port mustn't appear more than once */ 10777 10738 if (used_ports & port_mask) 10778 - return false; 10739 + ret = false; 10779 10740 10780 10741 used_ports |= port_mask; 10781 10742 break; ··· 10793 10754 if (used_ports & used_mst_ports) 10794 10755 return false; 10795 10756 10796 - return true; 10757 + return ret; 10797 10758 } 10798 10759 10799 10760 static void ··· 12114 12075 struct drm_device *dev = crtc->base.dev; 12115 12076 12116 12077 if (!dev->max_vblank_count) 12117 - return drm_crtc_accurate_vblank_count(&crtc->base); 12078 + return (u32)drm_crtc_accurate_vblank_count(&crtc->base); 12118 12079 12119 12080 return dev->driver->get_vblank_counter(dev, crtc->pipe); 12120 12081 } ··· 12655 12616 /** 12656 12617 * intel_prepare_plane_fb - Prepare fb for usage on plane 12657 12618 * @plane: drm plane to prepare for 12658 - * @fb: framebuffer to prepare for presentation 12619 + * @new_state: the plane state being prepared 12659 12620 * 12660 12621 * Prepares a framebuffer for usage on a display plane. Generally this 12661 12622 * involves pinning the underlying object and updating the frontbuffer tracking ··· 12734 12695 } else { 12735 12696 struct i915_vma *vma; 12736 12697 12737 - vma = intel_pin_and_fence_fb_obj(fb, new_state->rotation); 12698 + vma = intel_pin_and_fence_fb_obj(fb, 12699 + new_state->rotation, 12700 + &to_intel_plane_state(new_state)->flags); 12738 12701 if (!IS_ERR(vma)) 12739 12702 to_intel_plane_state(new_state)->vma = vma; 12740 12703 else ··· 12775 12734 /** 12776 12735 * intel_cleanup_plane_fb - Cleans up an fb after plane use 12777 12736 * @plane: drm plane to clean up for 12778 - * @fb: old framebuffer that was on plane 12737 + * @old_state: the state from the previous modeset 12779 12738 * 12780 12739 * Cleans up a framebuffer that has just been removed from a plane. 12781 12740 * ··· 12791 12750 vma = fetch_and_zero(&to_intel_plane_state(old_state)->vma); 12792 12751 if (vma) { 12793 12752 mutex_lock(&plane->dev->struct_mutex); 12794 - intel_unpin_fb_vma(vma); 12753 + intel_unpin_fb_vma(vma, to_intel_plane_state(old_state)->flags); 12795 12754 mutex_unlock(&plane->dev->struct_mutex); 12796 12755 } 12797 12756 } ··· 13152 13111 goto out_unlock; 13153 13112 } 13154 13113 } else { 13155 - vma = intel_pin_and_fence_fb_obj(fb, new_plane_state->rotation); 13114 + vma = intel_pin_and_fence_fb_obj(fb, 13115 + new_plane_state->rotation, 13116 + &to_intel_plane_state(new_plane_state)->flags); 13156 13117 if (IS_ERR(vma)) { 13157 13118 DRM_DEBUG_KMS("failed to pin object\n"); 13158 13119 ··· 13185 13142 13186 13143 old_vma = fetch_and_zero(&to_intel_plane_state(old_plane_state)->vma); 13187 13144 if (old_vma) 13188 - intel_unpin_fb_vma(old_vma); 13145 + intel_unpin_fb_vma(old_vma, 13146 + to_intel_plane_state(old_plane_state)->flags); 13189 13147 13190 13148 out_unlock: 13191 13149 mutex_unlock(&dev_priv->drm.struct_mutex); ··· 13542 13498 return to_intel_crtc(connector->base.state->crtc)->pipe; 13543 13499 } 13544 13500 13545 - int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 13546 - struct drm_file *file) 13501 + int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, 13502 + struct drm_file *file) 13547 13503 { 13548 13504 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; 13549 13505 struct drm_crtc *drmmode_crtc; ··· 13991 13947 * gen2/3 display engine uses the fence if present, 13992 13948 * so the tiling mode must match the fb modifier exactly. 13993 13949 */ 13994 - if (INTEL_INFO(dev_priv)->gen < 4 && 13950 + if (INTEL_GEN(dev_priv) < 4 && 13995 13951 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) { 13996 13952 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n"); 13997 13953 goto err; ··· 14206 14162 { 14207 14163 intel_init_cdclk_hooks(dev_priv); 14208 14164 14209 - if (INTEL_INFO(dev_priv)->gen >= 9) { 14165 + if (INTEL_GEN(dev_priv) >= 9) { 14210 14166 dev_priv->display.get_pipe_config = haswell_get_pipe_config; 14211 14167 dev_priv->display.get_initial_plane_config = 14212 14168 skylake_get_initial_plane_config;
+3 -3
drivers/gpu/drm/i915/intel_dp.c
··· 258 258 if (IS_CNL_WITH_PORT_F(dev_priv)) 259 259 return 810000; 260 260 261 - /* For other SKUs, max rate on ports A and B is 5.4G */ 261 + /* For other SKUs, max rate on ports A and D is 5.4G */ 262 262 if (port == PORT_A || port == PORT_D) 263 263 return 540000; 264 264 ··· 1467 1467 static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv, 1468 1468 enum port port) 1469 1469 { 1470 - if (INTEL_INFO(dev_priv)->gen >= 9) 1470 + if (INTEL_GEN(dev_priv) >= 9) 1471 1471 return skl_aux_ctl_reg(dev_priv, port); 1472 1472 else if (HAS_PCH_SPLIT(dev_priv)) 1473 1473 return ilk_aux_ctl_reg(dev_priv, port); ··· 1478 1478 static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv, 1479 1479 enum port port, int index) 1480 1480 { 1481 - if (INTEL_INFO(dev_priv)->gen >= 9) 1481 + if (INTEL_GEN(dev_priv) >= 9) 1482 1482 return skl_aux_data_reg(dev_priv, port, index); 1483 1483 else if (HAS_PCH_SPLIT(dev_priv)) 1484 1484 return ilk_aux_data_reg(dev_priv, port, index);
+1 -1
drivers/gpu/drm/i915/intel_dpio_phy.c
··· 147 147 */ 148 148 struct { 149 149 /** 150 - * @port: which port maps to this channel. 150 + * @channel.port: which port maps to this channel. 151 151 */ 152 152 enum port port; 153 153 } channel[2];
+13 -6
drivers/gpu/drm/i915/intel_drv.h
··· 204 204 struct drm_fb_helper helper; 205 205 struct intel_framebuffer *fb; 206 206 struct i915_vma *vma; 207 + unsigned long vma_flags; 207 208 async_cookie_t cookie; 208 209 int preferred_bpp; 209 210 }; ··· 491 490 struct intel_plane_state { 492 491 struct drm_plane_state base; 493 492 struct i915_vma *vma; 493 + unsigned long flags; 494 + #define PLANE_HAS_FENCE BIT(0) 494 495 495 496 struct { 496 497 u32 offset; ··· 1410 1407 void cnl_uninit_cdclk(struct drm_i915_private *dev_priv); 1411 1408 void bxt_init_cdclk(struct drm_i915_private *dev_priv); 1412 1409 void bxt_uninit_cdclk(struct drm_i915_private *dev_priv); 1410 + void icl_init_cdclk(struct drm_i915_private *dev_priv); 1411 + void icl_uninit_cdclk(struct drm_i915_private *dev_priv); 1413 1412 void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv); 1414 1413 void intel_update_max_cdclk(struct drm_i915_private *dev_priv); 1415 1414 void intel_update_cdclk(struct drm_i915_private *dev_priv); ··· 1460 1455 intel_encoder_current_mode(struct intel_encoder *encoder); 1461 1456 1462 1457 enum pipe intel_get_pipe_from_connector(struct intel_connector *connector); 1463 - int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 1464 - struct drm_file *file_priv); 1458 + int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data, 1459 + struct drm_file *file_priv); 1465 1460 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv, 1466 1461 enum pipe pipe); 1467 1462 static inline bool ··· 1506 1501 struct intel_load_detect_pipe *old, 1507 1502 struct drm_modeset_acquire_ctx *ctx); 1508 1503 struct i915_vma * 1509 - intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, unsigned int rotation); 1510 - void intel_unpin_fb_vma(struct i915_vma *vma); 1504 + intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, 1505 + unsigned int rotation, 1506 + unsigned long *out_flags); 1507 + void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags); 1511 1508 struct drm_framebuffer * 1512 1509 intel_framebuffer_create(struct drm_i915_gem_object *obj, 1513 1510 struct drm_mode_fb_cmd2 *mode_cmd); ··· 2025 2018 int usecs); 2026 2019 struct intel_plane *intel_sprite_plane_create(struct drm_i915_private *dev_priv, 2027 2020 enum pipe pipe, int plane); 2028 - int intel_sprite_set_colorkey(struct drm_device *dev, void *data, 2029 - struct drm_file *file_priv); 2021 + int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data, 2022 + struct drm_file *file_priv); 2030 2023 void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state); 2031 2024 void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state); 2032 2025 void skl_update_plane(struct intel_plane *plane,
+15 -13
drivers/gpu/drm/i915/intel_dvo.c
··· 245 245 intel_dvo->attached_connector->panel.fixed_mode; 246 246 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode; 247 247 248 - /* If we have timings from the BIOS for the panel, put them in 248 + /* 249 + * If we have timings from the BIOS for the panel, put them in 249 250 * to the adjusted mode. The CRTC will be set up for this mode, 250 251 * with the panel scaling set up to source from the H/VDisplay 251 252 * of the original mode. ··· 294 293 I915_WRITE(dvo_reg, dvo_val); 295 294 } 296 295 297 - /** 298 - * Detect the output connection on our DVO device. 299 - * 300 - * Unimplemented. 301 - */ 302 296 static enum drm_connector_status 303 297 intel_dvo_detect(struct drm_connector *connector, bool force) 304 298 { ··· 309 313 const struct drm_display_mode *fixed_mode = 310 314 to_intel_connector(connector)->panel.fixed_mode; 311 315 312 - /* We should probably have an i2c driver get_modes function for those 316 + /* 317 + * We should probably have an i2c driver get_modes function for those 313 318 * devices which will have a fixed set of modes determined by the chip 314 319 * (TV-out, for example), but for now with just TMDS and LVDS, 315 320 * that's not the case. ··· 368 371 .destroy = intel_dvo_enc_destroy, 369 372 }; 370 373 371 - /** 374 + /* 372 375 * Attempts to get a fixed panel timing for LVDS (currently only the i830). 373 376 * 374 377 * Other chips with DVO LVDS will need to extend this to deal with the LVDS ··· 440 443 uint32_t dpll[I915_MAX_PIPES]; 441 444 enum port port; 442 445 443 - /* Allow the I2C driver info to specify the GPIO to be used in 446 + /* 447 + * Allow the I2C driver info to specify the GPIO to be used in 444 448 * special cases, but otherwise default to what's defined 445 449 * in the spec. 446 450 */ ··· 452 454 else 453 455 gpio = GMBUS_PIN_DPB; 454 456 455 - /* Set up the I2C bus necessary for the chip we're probing. 457 + /* 458 + * Set up the I2C bus necessary for the chip we're probing. 456 459 * It appears that everything is on GPIOE except for panels 457 460 * on i830 laptops, which are on GPIOB (DVOA). 458 461 */ ··· 461 462 462 463 intel_dvo->dev = *dvo; 463 464 464 - /* GMBUS NAK handling seems to be unstable, hence let the 465 + /* 466 + * GMBUS NAK handling seems to be unstable, hence let the 465 467 * transmitter detection run in bit banging mode for now. 466 468 */ 467 469 intel_gmbus_force_bit(i2c, true); 468 470 469 - /* ns2501 requires the DVO 2x clock before it will 471 + /* 472 + * ns2501 requires the DVO 2x clock before it will 470 473 * respond to i2c accesses, so make sure we have 471 474 * have the clock enabled before we attempt to 472 475 * initialize the device. ··· 526 525 527 526 intel_connector_attach_encoder(intel_connector, intel_encoder); 528 527 if (dvo->type == INTEL_DVO_CHIP_LVDS) { 529 - /* For our LVDS chipsets, we should hopefully be able 528 + /* 529 + * For our LVDS chipsets, we should hopefully be able 530 530 * to dig the fixed panel mode out of the BIOS data. 531 531 * However, it's in a different format from the BIOS 532 532 * data on chipsets with integrated LVDS (stored in AIM
+97 -79
drivers/gpu/drm/i915/intel_engine_cs.c
··· 631 631 * Similarly the preempt context must always be available so that 632 632 * we can interrupt the engine at any time. 633 633 */ 634 - if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) { 634 + if (engine->i915->preempt_context) { 635 635 ring = engine->context_pin(engine, 636 636 engine->i915->preempt_context); 637 637 if (IS_ERR(ring)) { ··· 656 656 err_breadcrumbs: 657 657 intel_engine_fini_breadcrumbs(engine); 658 658 err_unpin_preempt: 659 - if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) 659 + if (engine->i915->preempt_context) 660 660 engine->context_unpin(engine, engine->i915->preempt_context); 661 661 err_unpin_kernel: 662 662 engine->context_unpin(engine, engine->i915->kernel_context); ··· 686 686 if (engine->default_state) 687 687 i915_gem_object_put(engine->default_state); 688 688 689 - if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) 689 + if (engine->i915->preempt_context) 690 690 engine->context_unpin(engine, engine->i915->preempt_context); 691 691 engine->context_unpin(engine, engine->i915->kernel_context); 692 692 } 693 693 694 - u64 intel_engine_get_active_head(struct intel_engine_cs *engine) 694 + u64 intel_engine_get_active_head(const struct intel_engine_cs *engine) 695 695 { 696 696 struct drm_i915_private *dev_priv = engine->i915; 697 697 u64 acthd; ··· 707 707 return acthd; 708 708 } 709 709 710 - u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine) 710 + u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine) 711 711 { 712 712 struct drm_i915_private *dev_priv = engine->i915; 713 713 u64 bbaddr; ··· 1464 1464 struct drm_i915_private *dev_priv = engine->i915; 1465 1465 bool idle = true; 1466 1466 1467 - intel_runtime_pm_get(dev_priv); 1467 + /* If the whole device is asleep, the engine must be idle */ 1468 + if (!intel_runtime_pm_get_if_in_use(dev_priv)) 1469 + return true; 1468 1470 1469 1471 /* First check that no commands are left in the ring */ 1470 1472 if ((I915_READ_HEAD(engine) & HEAD_ADDR) != ··· 1504 1502 1505 1503 if (I915_SELFTEST_ONLY(engine->breadcrumbs.mock)) 1506 1504 return true; 1507 - 1508 - /* Interrupt/tasklet pending? */ 1509 - if (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) 1510 - return false; 1511 1505 1512 1506 /* Waiting to drain ELSP? */ 1513 1507 if (READ_ONCE(engine->execlists.active)) ··· 1705 1707 } 1706 1708 } 1707 1709 1708 - void intel_engine_dump(struct intel_engine_cs *engine, 1709 - struct drm_printer *m, 1710 - const char *header, ...) 1710 + static void intel_engine_print_registers(const struct intel_engine_cs *engine, 1711 + struct drm_printer *m) 1711 1712 { 1712 - struct intel_breadcrumbs * const b = &engine->breadcrumbs; 1713 - const struct intel_engine_execlists * const execlists = &engine->execlists; 1714 - struct i915_gpu_error * const error = &engine->i915->gpu_error; 1715 1713 struct drm_i915_private *dev_priv = engine->i915; 1716 - struct drm_i915_gem_request *rq; 1717 - struct rb_node *rb; 1718 - char hdr[80]; 1714 + const struct intel_engine_execlists * const execlists = 1715 + &engine->execlists; 1719 1716 u64 addr; 1720 1717 1721 - if (header) { 1722 - va_list ap; 1723 - 1724 - va_start(ap, header); 1725 - drm_vprintf(m, header, &ap); 1726 - va_end(ap); 1727 - } 1728 - 1729 - if (i915_terminally_wedged(&engine->i915->gpu_error)) 1730 - drm_printf(m, "*** WEDGED ***\n"); 1731 - 1732 - drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n", 1733 - intel_engine_get_seqno(engine), 1734 - intel_engine_last_submit(engine), 1735 - engine->hangcheck.seqno, 1736 - jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp), 1737 - engine->timeline->inflight_seqnos); 1738 - drm_printf(m, "\tReset count: %d (global %d)\n", 1739 - i915_reset_engine_count(error, engine), 1740 - i915_reset_count(error)); 1741 - 1742 - rcu_read_lock(); 1743 - 1744 - drm_printf(m, "\tRequests:\n"); 1745 - 1746 - rq = list_first_entry(&engine->timeline->requests, 1747 - struct drm_i915_gem_request, link); 1748 - if (&rq->link != &engine->timeline->requests) 1749 - print_request(m, rq, "\t\tfirst "); 1750 - 1751 - rq = list_last_entry(&engine->timeline->requests, 1752 - struct drm_i915_gem_request, link); 1753 - if (&rq->link != &engine->timeline->requests) 1754 - print_request(m, rq, "\t\tlast "); 1755 - 1756 - rq = i915_gem_find_active_request(engine); 1757 - if (rq) { 1758 - print_request(m, rq, "\t\tactive "); 1759 - drm_printf(m, 1760 - "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n", 1761 - rq->head, rq->postfix, rq->tail, 1762 - rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u, 1763 - rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u); 1764 - } 1765 - 1766 - drm_printf(m, "\tRING_START: 0x%08x [0x%08x]\n", 1767 - I915_READ(RING_START(engine->mmio_base)), 1768 - rq ? i915_ggtt_offset(rq->ring->vma) : 0); 1769 - drm_printf(m, "\tRING_HEAD: 0x%08x [0x%08x]\n", 1770 - I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR, 1771 - rq ? rq->ring->head : 0); 1772 - drm_printf(m, "\tRING_TAIL: 0x%08x [0x%08x]\n", 1773 - I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR, 1774 - rq ? rq->ring->tail : 0); 1718 + drm_printf(m, "\tRING_START: 0x%08x\n", 1719 + I915_READ(RING_START(engine->mmio_base))); 1720 + drm_printf(m, "\tRING_HEAD: 0x%08x\n", 1721 + I915_READ(RING_HEAD(engine->mmio_base)) & HEAD_ADDR); 1722 + drm_printf(m, "\tRING_TAIL: 0x%08x\n", 1723 + I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR); 1775 1724 drm_printf(m, "\tRING_CTL: 0x%08x%s\n", 1776 1725 I915_READ(RING_CTL(engine->mmio_base)), 1777 1726 I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : ""); ··· 1727 1782 I915_READ(RING_MI_MODE(engine->mmio_base)), 1728 1783 I915_READ(RING_MI_MODE(engine->mmio_base)) & (MODE_IDLE) ? " [idle]" : ""); 1729 1784 } 1785 + 1786 + if (INTEL_GEN(dev_priv) >= 6) { 1787 + drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine)); 1788 + } 1789 + 1730 1790 if (HAS_LEGACY_SEMAPHORES(dev_priv)) { 1731 1791 drm_printf(m, "\tSYNC_0: 0x%08x\n", 1732 1792 I915_READ(RING_SYNC_0(engine->mmio_base))); ··· 1741 1791 drm_printf(m, "\tSYNC_2: 0x%08x\n", 1742 1792 I915_READ(RING_SYNC_2(engine->mmio_base))); 1743 1793 } 1744 - 1745 - rcu_read_unlock(); 1746 1794 1747 1795 addr = intel_engine_get_active_head(engine); 1748 1796 drm_printf(m, "\tACTHD: 0x%08x_%08x\n", ··· 1803 1855 1804 1856 rcu_read_lock(); 1805 1857 for (idx = 0; idx < execlists_num_ports(execlists); idx++) { 1858 + struct drm_i915_gem_request *rq; 1806 1859 unsigned int count; 1807 1860 1808 1861 rq = port_unpack(&execlists->port[idx], &count); 1809 1862 if (rq) { 1863 + char hdr[80]; 1864 + 1810 1865 snprintf(hdr, sizeof(hdr), 1811 1866 "\t\tELSP[%d] count=%d, rq: ", 1812 1867 idx, count); ··· 1827 1876 I915_READ(RING_PP_DIR_BASE_READ(engine))); 1828 1877 drm_printf(m, "\tPP_DIR_DCLV: 0x%08x\n", 1829 1878 I915_READ(RING_PP_DIR_DCLV(engine))); 1879 + } 1880 + } 1881 + 1882 + void intel_engine_dump(struct intel_engine_cs *engine, 1883 + struct drm_printer *m, 1884 + const char *header, ...) 1885 + { 1886 + struct intel_breadcrumbs * const b = &engine->breadcrumbs; 1887 + const struct intel_engine_execlists * const execlists = &engine->execlists; 1888 + struct i915_gpu_error * const error = &engine->i915->gpu_error; 1889 + struct drm_i915_gem_request *rq; 1890 + struct rb_node *rb; 1891 + 1892 + if (header) { 1893 + va_list ap; 1894 + 1895 + va_start(ap, header); 1896 + drm_vprintf(m, header, &ap); 1897 + va_end(ap); 1898 + } 1899 + 1900 + if (i915_terminally_wedged(&engine->i915->gpu_error)) 1901 + drm_printf(m, "*** WEDGED ***\n"); 1902 + 1903 + drm_printf(m, "\tcurrent seqno %x, last %x, hangcheck %x [%d ms], inflight %d\n", 1904 + intel_engine_get_seqno(engine), 1905 + intel_engine_last_submit(engine), 1906 + engine->hangcheck.seqno, 1907 + jiffies_to_msecs(jiffies - engine->hangcheck.action_timestamp), 1908 + engine->timeline->inflight_seqnos); 1909 + drm_printf(m, "\tReset count: %d (global %d)\n", 1910 + i915_reset_engine_count(error, engine), 1911 + i915_reset_count(error)); 1912 + 1913 + rcu_read_lock(); 1914 + 1915 + drm_printf(m, "\tRequests:\n"); 1916 + 1917 + rq = list_first_entry(&engine->timeline->requests, 1918 + struct drm_i915_gem_request, link); 1919 + if (&rq->link != &engine->timeline->requests) 1920 + print_request(m, rq, "\t\tfirst "); 1921 + 1922 + rq = list_last_entry(&engine->timeline->requests, 1923 + struct drm_i915_gem_request, link); 1924 + if (&rq->link != &engine->timeline->requests) 1925 + print_request(m, rq, "\t\tlast "); 1926 + 1927 + rq = i915_gem_find_active_request(engine); 1928 + if (rq) { 1929 + print_request(m, rq, "\t\tactive "); 1930 + drm_printf(m, 1931 + "\t\t[head %04x, postfix %04x, tail %04x, batch 0x%08x_%08x]\n", 1932 + rq->head, rq->postfix, rq->tail, 1933 + rq->batch ? upper_32_bits(rq->batch->node.start) : ~0u, 1934 + rq->batch ? lower_32_bits(rq->batch->node.start) : ~0u); 1935 + drm_printf(m, "\t\tring->start: 0x%08x\n", 1936 + i915_ggtt_offset(rq->ring->vma)); 1937 + drm_printf(m, "\t\tring->head: 0x%08x\n", 1938 + rq->ring->head); 1939 + drm_printf(m, "\t\tring->tail: 0x%08x\n", 1940 + rq->ring->tail); 1941 + } 1942 + 1943 + rcu_read_unlock(); 1944 + 1945 + if (intel_runtime_pm_get_if_in_use(engine->i915)) { 1946 + intel_engine_print_registers(engine, m); 1947 + intel_runtime_pm_put(engine->i915); 1948 + } else { 1949 + drm_printf(m, "\tDevice is asleep; skipping register dump\n"); 1830 1950 } 1831 1951 1832 1952 spin_lock_irq(&engine->timeline->lock); ··· 1920 1898 w->tsk->comm, w->tsk->pid, w->seqno); 1921 1899 } 1922 1900 spin_unlock_irq(&b->rb_lock); 1923 - 1924 - if (INTEL_GEN(dev_priv) >= 6) { 1925 - drm_printf(m, "\tRING_IMR: %08x\n", I915_READ_IMR(engine)); 1926 - } 1927 1901 1928 1902 drm_printf(m, "IRQ? 0x%lx (breadcrumbs? %s) (execlists? %s)\n", 1929 1903 engine->irq_posted,
+9 -4
drivers/gpu/drm/i915/intel_fbc.c
··· 183 183 else 184 184 dpfc_ctl |= DPFC_CTL_LIMIT_1X; 185 185 186 - if (params->vma->fence) { 186 + if (params->flags & PLANE_HAS_FENCE) { 187 187 dpfc_ctl |= DPFC_CTL_FENCE_EN | params->vma->fence->id; 188 188 I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset); 189 189 } else { ··· 241 241 break; 242 242 } 243 243 244 - if (params->vma->fence) { 244 + if (params->flags & PLANE_HAS_FENCE) { 245 245 dpfc_ctl |= DPFC_CTL_FENCE_EN; 246 246 if (IS_GEN5(dev_priv)) 247 247 dpfc_ctl |= params->vma->fence->id; ··· 324 324 break; 325 325 } 326 326 327 - if (params->vma->fence) { 327 + if (params->flags & PLANE_HAS_FENCE) { 328 328 dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN; 329 329 I915_WRITE(SNB_DPFC_CTL_SA, 330 330 SNB_CPU_FENCE_ENABLE | ··· 753 753 struct drm_framebuffer *fb = plane_state->base.fb; 754 754 755 755 cache->vma = NULL; 756 + cache->flags = 0; 756 757 757 758 cache->crtc.mode_flags = crtc_state->base.adjusted_mode.flags; 758 759 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) ··· 779 778 cache->fb.stride = fb->pitches[0]; 780 779 781 780 cache->vma = plane_state->vma; 781 + cache->flags = plane_state->flags; 782 + if (WARN_ON(cache->flags & PLANE_HAS_FENCE && !cache->vma->fence)) 783 + cache->flags &= ~PLANE_HAS_FENCE; 782 784 } 783 785 784 786 static bool intel_fbc_can_activate(struct intel_crtc *crtc) ··· 820 816 * so have no fence associated with it) due to aperture constaints 821 817 * at the time of pinning. 822 818 */ 823 - if (!cache->vma->fence) { 819 + if (!(cache->flags & PLANE_HAS_FENCE)) { 824 820 fbc->no_fbc_reason = "framebuffer not tiled or fenced"; 825 821 return false; 826 822 } ··· 901 897 memset(params, 0, sizeof(*params)); 902 898 903 899 params->vma = cache->vma; 900 + params->flags = cache->flags; 904 901 905 902 params->crtc.pipe = crtc->pipe; 906 903 params->crtc.i9xx_plane = to_intel_plane(crtc->base.primary)->i9xx_plane;
+9 -4
drivers/gpu/drm/i915/intel_fbdev.c
··· 48 48 static void intel_fbdev_invalidate(struct intel_fbdev *ifbdev) 49 49 { 50 50 struct drm_i915_gem_object *obj = ifbdev->fb->obj; 51 - unsigned int origin = ifbdev->vma->fence ? ORIGIN_GTT : ORIGIN_CPU; 51 + unsigned int origin = 52 + ifbdev->vma_flags & PLANE_HAS_FENCE ? ORIGIN_GTT : ORIGIN_CPU; 52 53 53 54 intel_fb_obj_invalidate(obj, origin); 54 55 } ··· 178 177 struct fb_info *info; 179 178 struct drm_framebuffer *fb; 180 179 struct i915_vma *vma; 180 + unsigned long flags = 0; 181 181 bool prealloc = false; 182 182 void __iomem *vaddr; 183 183 int ret; ··· 213 211 * This also validates that any existing fb inherited from the 214 212 * BIOS is suitable for own access. 215 213 */ 216 - vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, DRM_MODE_ROTATE_0); 214 + vma = intel_pin_and_fence_fb_obj(&ifbdev->fb->base, 215 + DRM_MODE_ROTATE_0, 216 + &flags); 217 217 if (IS_ERR(vma)) { 218 218 ret = PTR_ERR(vma); 219 219 goto out_unlock; ··· 272 268 DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x\n", 273 269 fb->width, fb->height, i915_ggtt_offset(vma)); 274 270 ifbdev->vma = vma; 271 + ifbdev->vma_flags = flags; 275 272 276 273 intel_runtime_pm_put(dev_priv); 277 274 mutex_unlock(&dev->struct_mutex); ··· 280 275 return 0; 281 276 282 277 out_unpin: 283 - intel_unpin_fb_vma(vma); 278 + intel_unpin_fb_vma(vma, flags); 284 279 out_unlock: 285 280 intel_runtime_pm_put(dev_priv); 286 281 mutex_unlock(&dev->struct_mutex); ··· 518 513 519 514 if (ifbdev->vma) { 520 515 mutex_lock(&ifbdev->helper.dev->struct_mutex); 521 - intel_unpin_fb_vma(ifbdev->vma); 516 + intel_unpin_fb_vma(ifbdev->vma, ifbdev->vma_flags); 522 517 mutex_unlock(&ifbdev->helper.dev->struct_mutex); 523 518 } 524 519
+36 -21
drivers/gpu/drm/i915/intel_guc_submission.c
··· 688 688 goto unlock; 689 689 690 690 if (port_isset(port)) { 691 - if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) { 691 + if (engine->i915->preempt_context) { 692 692 struct guc_preempt_work *preempt_work = 693 693 &engine->i915->guc.preempt_work[engine->id]; 694 694 ··· 747 747 execlists_set_active(execlists, EXECLISTS_ACTIVE_USER); 748 748 guc_submit(engine); 749 749 } 750 + 751 + /* We must always keep the beast fed if we have work piled up */ 752 + GEM_BUG_ON(port_isset(execlists->port) && 753 + !execlists_is_active(execlists, EXECLISTS_ACTIVE_USER)); 754 + GEM_BUG_ON(execlists->first && !port_isset(execlists->port)); 755 + 750 756 unlock: 751 757 spin_unlock_irq(&engine->timeline->lock); 752 758 } ··· 838 832 if (ret) 839 833 return ret; 840 834 841 - ret = create_doorbell(guc->preempt_client); 842 - if (ret) { 843 - destroy_doorbell(guc->execbuf_client); 844 - return ret; 835 + if (guc->preempt_client) { 836 + ret = create_doorbell(guc->preempt_client); 837 + if (ret) { 838 + destroy_doorbell(guc->execbuf_client); 839 + return ret; 840 + } 845 841 } 846 842 847 843 return 0; ··· 856 848 * Instead of trying (in vain) to communicate with it, let's just 857 849 * cleanup the doorbell HW and our internal state. 858 850 */ 859 - __destroy_doorbell(guc->preempt_client); 860 - __update_doorbell_desc(guc->preempt_client, GUC_DOORBELL_INVALID); 851 + if (guc->preempt_client) { 852 + __destroy_doorbell(guc->preempt_client); 853 + __update_doorbell_desc(guc->preempt_client, 854 + GUC_DOORBELL_INVALID); 855 + } 861 856 __destroy_doorbell(guc->execbuf_client); 862 857 __update_doorbell_desc(guc->execbuf_client, GUC_DOORBELL_INVALID); 863 858 } ··· 990 979 } 991 980 guc->execbuf_client = client; 992 981 993 - client = guc_client_alloc(dev_priv, 994 - INTEL_INFO(dev_priv)->ring_mask, 995 - GUC_CLIENT_PRIORITY_KMD_HIGH, 996 - dev_priv->preempt_context); 997 - if (IS_ERR(client)) { 998 - DRM_ERROR("Failed to create GuC client for preemption!\n"); 999 - guc_client_free(guc->execbuf_client); 1000 - guc->execbuf_client = NULL; 1001 - return PTR_ERR(client); 982 + if (dev_priv->preempt_context) { 983 + client = guc_client_alloc(dev_priv, 984 + INTEL_INFO(dev_priv)->ring_mask, 985 + GUC_CLIENT_PRIORITY_KMD_HIGH, 986 + dev_priv->preempt_context); 987 + if (IS_ERR(client)) { 988 + DRM_ERROR("Failed to create GuC client for preemption!\n"); 989 + guc_client_free(guc->execbuf_client); 990 + guc->execbuf_client = NULL; 991 + return PTR_ERR(client); 992 + } 993 + guc->preempt_client = client; 1002 994 } 1003 - guc->preempt_client = client; 1004 995 1005 996 return 0; 1006 997 } ··· 1011 998 { 1012 999 struct intel_guc_client *client; 1013 1000 1014 - client = fetch_and_zero(&guc->execbuf_client); 1015 - guc_client_free(client); 1016 - 1017 1001 client = fetch_and_zero(&guc->preempt_client); 1002 + if (client) 1003 + guc_client_free(client); 1004 + 1005 + client = fetch_and_zero(&guc->execbuf_client); 1018 1006 guc_client_free(client); 1019 1007 } 1020 1008 ··· 1174 1160 GEM_BUG_ON(!guc->execbuf_client); 1175 1161 1176 1162 guc_reset_wq(guc->execbuf_client); 1177 - guc_reset_wq(guc->preempt_client); 1163 + if (guc->preempt_client) 1164 + guc_reset_wq(guc->preempt_client); 1178 1165 1179 1166 err = intel_guc_sample_forcewake(guc); 1180 1167 if (err)
+2 -1
drivers/gpu/drm/i915/intel_huc.c
··· 118 118 119 119 /** 120 120 * huc_ucode_xfer() - DMA's the firmware 121 - * @dev_priv: the drm_i915_private device 121 + * @huc_fw: the firmware descriptor 122 + * @vma: the firmware image (bound into the GGTT) 122 123 * 123 124 * Transfer the firmware image to RAM for execution by the microcontroller. 124 125 *
+6 -12
drivers/gpu/drm/i915/intel_lpe_audio.c
··· 74 74 static struct platform_device * 75 75 lpe_audio_platdev_create(struct drm_i915_private *dev_priv) 76 76 { 77 - int ret; 78 77 struct drm_device *dev = &dev_priv->drm; 79 78 struct platform_device_info pinfo = {}; 80 79 struct resource *rsc; ··· 118 119 spin_lock_init(&pdata->lpe_audio_slock); 119 120 120 121 platdev = platform_device_register_full(&pinfo); 121 - if (IS_ERR(platdev)) { 122 - ret = PTR_ERR(platdev); 123 - DRM_ERROR("Failed to allocate LPE audio platform device\n"); 124 - goto err; 125 - } 126 - 127 122 kfree(rsc); 123 + kfree(pdata); 124 + 125 + if (IS_ERR(platdev)) { 126 + DRM_ERROR("Failed to allocate LPE audio platform device\n"); 127 + return platdev; 128 + } 128 129 129 130 pm_runtime_forbid(&platdev->dev); 130 131 pm_runtime_set_active(&platdev->dev); 131 132 pm_runtime_enable(&platdev->dev); 132 133 133 134 return platdev; 134 - 135 - err: 136 - kfree(rsc); 137 - kfree(pdata); 138 - return ERR_PTR(ret); 139 135 } 140 136 141 137 static void lpe_audio_platdev_destroy(struct drm_i915_private *dev_priv)
+23 -5
drivers/gpu/drm/i915/intel_lrc.c
··· 161 161 #define EXECLISTS_REQUEST_SIZE 64 /* bytes */ 162 162 #define WA_TAIL_DWORDS 2 163 163 #define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS) 164 - #define PREEMPT_ID 0x1 165 164 166 165 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx, 167 166 struct intel_engine_cs *engine); ··· 447 448 &engine->i915->preempt_context->engine[engine->id]; 448 449 unsigned int n; 449 450 450 - GEM_BUG_ON(engine->i915->preempt_context->hw_id != PREEMPT_ID); 451 + GEM_BUG_ON(engine->execlists.preempt_complete_status != 452 + upper_32_bits(ce->lrc_desc)); 451 453 GEM_BUG_ON(!IS_ALIGNED(ce->ring->size, WA_TAIL_BYTES)); 452 454 453 455 memset(ce->ring->vaddr + ce->ring->tail, 0, WA_TAIL_BYTES); ··· 528 528 if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK)) 529 529 goto unlock; 530 530 531 - if (HAS_LOGICAL_RING_PREEMPTION(engine->i915) && 531 + if (engine->i915->preempt_context && 532 532 rb_entry(rb, struct i915_priolist, node)->priority > 533 533 max(last->priotree.priority, 0)) { 534 534 /* ··· 642 642 execlists->first = rb; 643 643 if (submit) 644 644 port_assign(port, last); 645 + 646 + /* We must always keep the beast fed if we have work piled up */ 647 + GEM_BUG_ON(execlists->first && !port_isset(execlists->port)); 648 + 645 649 unlock: 646 650 spin_unlock_irq(&engine->timeline->lock); 647 651 ··· 653 649 execlists_set_active(execlists, EXECLISTS_ACTIVE_USER); 654 650 execlists_submit_ports(engine); 655 651 } 652 + 653 + GEM_BUG_ON(port_isset(execlists->port) && 654 + !execlists_is_active(execlists, EXECLISTS_ACTIVE_USER)); 656 655 } 657 656 658 657 void ··· 851 844 GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE); 852 845 853 846 if (status & GEN8_CTX_STATUS_COMPLETE && 854 - buf[2*head + 1] == PREEMPT_ID) { 847 + buf[2*head + 1] == execlists->preempt_complete_status) { 855 848 GEM_TRACE("%s preempt-idle\n", engine->name); 856 849 857 850 execlists_cancel_port_requests(execlists); ··· 1970 1963 engine->unpark = NULL; 1971 1964 1972 1965 engine->flags |= I915_ENGINE_SUPPORTS_STATS; 1966 + 1967 + engine->i915->caps.scheduler = 1968 + I915_SCHEDULER_CAP_ENABLED | 1969 + I915_SCHEDULER_CAP_PRIORITY; 1970 + if (engine->i915->preempt_context) 1971 + engine->i915->caps.scheduler |= I915_SCHEDULER_CAP_PREEMPTION; 1973 1972 } 1974 1973 1975 1974 static void ··· 2051 2038 2052 2039 engine->execlists.elsp = 2053 2040 engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine)); 2041 + 2042 + engine->execlists.preempt_complete_status = ~0u; 2043 + if (engine->i915->preempt_context) 2044 + engine->execlists.preempt_complete_status = 2045 + upper_32_bits(engine->i915->preempt_context->engine[engine->id].lrc_desc); 2054 2046 2055 2047 return 0; 2056 2048 ··· 2319 2301 if (!engine->default_state) 2320 2302 regs[CTX_CONTEXT_CONTROL + 1] |= 2321 2303 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); 2322 - if (ctx->hw_id == PREEMPT_ID) 2304 + if (ctx == ctx->i915->preempt_context) 2323 2305 regs[CTX_CONTEXT_CONTROL + 1] |= 2324 2306 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | 2325 2307 CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT);
+28 -16
drivers/gpu/drm/i915/intel_lvds.c
··· 189 189 /* Convert from 100ms to 100us units */ 190 190 pps->t4 = val * 1000; 191 191 192 - if (INTEL_INFO(dev_priv)->gen <= 4 && 192 + if (INTEL_GEN(dev_priv) <= 4 && 193 193 pps->t1_t2 == 0 && pps->t5 == 0 && pps->t3 == 0 && pps->tx == 0) { 194 194 DRM_DEBUG_KMS("Panel power timings uninitialized, " 195 195 "setting defaults\n"); ··· 268 268 /* set the corresponsding LVDS_BORDER bit */ 269 269 temp &= ~LVDS_BORDER_ENABLE; 270 270 temp |= pipe_config->gmch_pfit.lvds_border_bits; 271 - /* Set the B0-B3 data pairs corresponding to whether we're going to 271 + 272 + /* 273 + * Set the B0-B3 data pairs corresponding to whether we're going to 272 274 * set the DPLLs for dual-channel mode or not. 273 275 */ 274 276 if (lvds_encoder->is_dual_link) ··· 278 276 else 279 277 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); 280 278 281 - /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) 279 + /* 280 + * It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) 282 281 * appropriately here, but we need to look more thoroughly into how 283 282 * panels behave in the two modes. For now, let's just maintain the 284 283 * value we got from the BIOS. ··· 287 284 temp &= ~LVDS_A3_POWER_MASK; 288 285 temp |= lvds_encoder->a3_power; 289 286 290 - /* Set the dithering flag on LVDS as needed, note that there is no 287 + /* 288 + * Set the dithering flag on LVDS as needed, note that there is no 291 289 * special lvds dither control bit on pch-split platforms, dithering is 292 - * only controlled through the PIPECONF reg. */ 290 + * only controlled through the PIPECONF reg. 291 + */ 293 292 if (IS_GEN4(dev_priv)) { 294 - /* Bspec wording suggests that LVDS port dithering only exists 295 - * for 18bpp panels. */ 293 + /* 294 + * Bspec wording suggests that LVDS port dithering only exists 295 + * for 18bpp panels. 296 + */ 296 297 if (pipe_config->dither && pipe_config->pipe_bpp == 18) 297 298 temp |= LVDS_ENABLE_DITHER; 298 299 else ··· 311 304 I915_WRITE(lvds_encoder->reg, temp); 312 305 } 313 306 314 - /** 307 + /* 315 308 * Sets the power state for the panel. 316 309 */ 317 310 static void intel_enable_lvds(struct intel_encoder *encoder, ··· 448 441 return true; 449 442 } 450 443 451 - /** 444 + /* 452 445 * Detect the LVDS connection. 453 446 * 454 447 * Since LVDS doesn't have hotlug, we use the lid as a proxy. Open means ··· 471 464 return connector_status_connected; 472 465 } 473 466 474 - /** 467 + /* 475 468 * Return the list of DDC modes if available, or the BIOS fixed mode otherwise. 476 469 */ 477 470 static int intel_lvds_get_modes(struct drm_connector *connector) ··· 900 893 if (dmi_check_system(intel_dual_link_lvds)) 901 894 return true; 902 895 903 - /* BIOS should set the proper LVDS register value at boot, but 896 + /* 897 + * BIOS should set the proper LVDS register value at boot, but 904 898 * in reality, it doesn't set the value when the lid is closed; 905 899 * we need to check "the value to be set" in VBT when LVDS 906 900 * register is uninitialized. ··· 915 907 916 908 static bool intel_lvds_supported(struct drm_i915_private *dev_priv) 917 909 { 918 - /* With the introduction of the PCH we gained a dedicated 919 - * LVDS presence pin, use it. */ 910 + /* 911 + * With the introduction of the PCH we gained a dedicated 912 + * LVDS presence pin, use it. 913 + */ 920 914 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) 921 915 return true; 922 916 923 - /* Otherwise LVDS was only attached to mobile products, 924 - * except for the inglorious 830gm */ 917 + /* 918 + * Otherwise LVDS was only attached to mobile products, 919 + * except for the inglorious 830gm 920 + */ 925 921 if (INTEL_GEN(dev_priv) <= 4 && 926 922 IS_MOBILE(dev_priv) && !IS_I830(dev_priv)) 927 923 return true; ··· 935 923 936 924 /** 937 925 * intel_lvds_init - setup LVDS connectors on this device 938 - * @dev: drm device 926 + * @dev_priv: i915 device 939 927 * 940 928 * Create the connector, register the LVDS DDC bus, and try to figure out what 941 929 * modes we can display on the LVDS panel (if present).
+1 -1
drivers/gpu/drm/i915/intel_mocs.c
··· 187 187 table->table = broxton_mocs_table; 188 188 result = true; 189 189 } else { 190 - WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9, 190 + WARN_ONCE(INTEL_GEN(dev_priv) >= 9, 191 191 "Platform that should have a MOCS table does not.\n"); 192 192 } 193 193
+2 -1
drivers/gpu/drm/i915/intel_overlay.c
··· 801 801 802 802 atomic_inc(&dev_priv->gpu_error.pending_fb_pin); 803 803 804 - vma = i915_gem_object_pin_to_display_plane(new_bo, 0, NULL); 804 + vma = i915_gem_object_pin_to_display_plane(new_bo, 805 + 0, NULL, PIN_MAPPABLE); 805 806 if (IS_ERR(vma)) { 806 807 ret = PTR_ERR(vma); 807 808 goto out_pin_section;
+8 -4
drivers/gpu/drm/i915/intel_panel.c
··· 397 397 398 398 /** 399 399 * scale - scale values from one range to another 400 - * 401 400 * @source_val: value in range [@source_min..@source_max] 401 + * @source_min: minimum legal value for @source_val 402 + * @source_max: maximum legal value for @source_val 403 + * @target_min: corresponding target value for @source_min 404 + * @target_max: corresponding target value for @source_max 402 405 * 403 406 * Return @source_val in range [@source_min..@source_max] scaled to range 404 407 * [@target_min..@target_max]. ··· 419 416 source_val = clamp(source_val, source_min, source_max); 420 417 421 418 /* avoid overflows */ 422 - target_val = DIV_ROUND_CLOSEST_ULL((uint64_t)(source_val - source_min) * 423 - (target_max - target_min), source_max - source_min); 419 + target_val = mul_u32_u32(source_val - source_min, 420 + target_max - target_min); 421 + target_val = DIV_ROUND_CLOSEST_ULL(target_val, source_max - source_min); 424 422 target_val += target_min; 425 423 426 424 return target_val; ··· 501 497 u32 val; 502 498 503 499 val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; 504 - if (INTEL_INFO(dev_priv)->gen < 4) 500 + if (INTEL_GEN(dev_priv) < 4) 505 501 val >>= 1; 506 502 507 503 if (panel->backlight.combination_mode) {
+71 -23
drivers/gpu/drm/i915/intel_pm.c
··· 729 729 * intel_calculate_wm - calculate watermark level 730 730 * @pixel_rate: pixel clock 731 731 * @wm: chip FIFO params 732 + * @fifo_size: size of the FIFO buffer 732 733 * @cpp: bytes per pixel 733 734 * @latency_ns: memory latency for the platform 734 735 * ··· 2917 2916 /* ILK cursor LP0 latency is 1300 ns */ 2918 2917 if (IS_GEN5(dev_priv)) 2919 2918 wm[0] = 13; 2920 - 2921 - /* WaDoubleCursorLP3Latency:ivb */ 2922 - if (IS_IVYBRIDGE(dev_priv)) 2923 - wm[3] *= 2; 2924 2919 } 2925 2920 2926 2921 int ilk_wm_max_level(const struct drm_i915_private *dev_priv) ··· 4593 4596 min_disp_buf_needed = res_blocks; 4594 4597 } 4595 4598 4596 - if (res_blocks >= ddb_allocation || res_lines > 31 || 4599 + if ((level > 0 && res_lines > 31) || 4600 + res_blocks >= ddb_allocation || 4597 4601 min_disp_buf_needed >= ddb_allocation) { 4598 4602 *enabled = false; 4599 4603 ··· 4615 4617 } 4616 4618 } 4617 4619 4620 + /* The number of lines are ignored for the level 0 watermark. */ 4621 + *out_lines = level ? res_lines : 0; 4618 4622 *out_blocks = res_blocks; 4619 - *out_lines = res_lines; 4620 4623 *enabled = true; 4621 4624 4622 4625 return 0; ··· 4709 4710 if (!dev_priv->ipc_enabled) 4710 4711 goto exit; 4711 4712 4713 + trans_min = 0; 4712 4714 if (INTEL_GEN(dev_priv) >= 10) 4713 4715 trans_min = 4; 4714 4716 ··· 5864 5864 5865 5865 /** 5866 5866 * intel_update_watermarks - update FIFO watermark values based on current modes 5867 + * @crtc: the #intel_crtc on which to compute the WM 5867 5868 * 5868 5869 * Calculate watermark values for the various WM regs based on current mode 5869 5870 * and plane configuration. ··· 6373 6372 if (!rps->enabled) 6374 6373 return; 6375 6374 6375 + if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)) 6376 + return; 6377 + 6378 + /* Serializes with i915_gem_request_retire() */ 6376 6379 boost = false; 6377 6380 spin_lock_irqsave(&rq->lock, flags); 6378 - if (!rq->waitboost && !i915_gem_request_completed(rq)) { 6379 - atomic_inc(&rps->num_waiters); 6381 + if (!rq->waitboost && !dma_fence_is_signaled_locked(&rq->fence)) { 6382 + boost = !atomic_fetch_inc(&rps->num_waiters); 6380 6383 rq->waitboost = true; 6381 - boost = true; 6382 6384 } 6383 6385 spin_unlock_irqrestore(&rq->lock, flags); 6384 6386 if (!boost) ··· 6942 6938 * No floor required for ring frequency on SKL. 6943 6939 */ 6944 6940 ring_freq = gpu_freq; 6945 - } else if (INTEL_INFO(dev_priv)->gen >= 8) { 6941 + } else if (INTEL_GEN(dev_priv) >= 8) { 6946 6942 /* max(2 * GT, DDR). NB: GT is 50MHz units */ 6947 6943 ring_freq = max(min_ring_freq, gpu_freq); 6948 6944 } else if (IS_HASWELL(dev_priv)) { ··· 7553 7549 { 7554 7550 unsigned long val; 7555 7551 7556 - if (INTEL_INFO(dev_priv)->gen != 5) 7552 + if (!IS_GEN5(dev_priv)) 7557 7553 return 0; 7558 7554 7559 7555 spin_lock_irq(&mchdev_lock); ··· 7637 7633 7638 7634 void i915_update_gfx_val(struct drm_i915_private *dev_priv) 7639 7635 { 7640 - if (INTEL_INFO(dev_priv)->gen != 5) 7636 + if (!IS_GEN5(dev_priv)) 7641 7637 return; 7642 7638 7643 7639 spin_lock_irq(&mchdev_lock); ··· 7688 7684 { 7689 7685 unsigned long val; 7690 7686 7691 - if (INTEL_INFO(dev_priv)->gen != 5) 7687 + if (!IS_GEN5(dev_priv)) 7692 7688 return 0; 7693 7689 7694 7690 spin_lock_irq(&mchdev_lock); ··· 9419 9415 const i915_reg_t reg) 9420 9416 { 9421 9417 u32 lower, upper, tmp; 9422 - unsigned long flags; 9423 9418 int loop = 2; 9424 9419 9425 - /* The register accessed do not need forcewake. We borrow 9420 + /* 9421 + * The register accessed do not need forcewake. We borrow 9426 9422 * uncore lock to prevent concurrent access to range reg. 9427 9423 */ 9428 - spin_lock_irqsave(&dev_priv->uncore.lock, flags); 9424 + lockdep_assert_held(&dev_priv->uncore.lock); 9429 9425 9430 - /* vlv and chv residency counters are 40 bits in width. 9426 + /* 9427 + * vlv and chv residency counters are 40 bits in width. 9431 9428 * With a control bit, we can choose between upper or lower 9432 9429 * 32bit window into this counter. 9433 9430 * ··· 9452 9447 upper = I915_READ_FW(reg); 9453 9448 } while (upper != tmp && --loop); 9454 9449 9455 - /* Everywhere else we always use VLV_COUNTER_CONTROL with the 9450 + /* 9451 + * Everywhere else we always use VLV_COUNTER_CONTROL with the 9456 9452 * VLV_COUNT_RANGE_HIGH bit set - so it is safe to leave it set 9457 9453 * now. 9458 9454 */ 9459 - 9460 - spin_unlock_irqrestore(&dev_priv->uncore.lock, flags); 9461 9455 9462 9456 return lower | (u64)upper << 8; 9463 9457 } ··· 9464 9460 u64 intel_rc6_residency_ns(struct drm_i915_private *dev_priv, 9465 9461 const i915_reg_t reg) 9466 9462 { 9467 - u64 time_hw; 9463 + u64 time_hw, prev_hw, overflow_hw; 9464 + unsigned int fw_domains; 9465 + unsigned long flags; 9466 + unsigned int i; 9468 9467 u32 mul, div; 9469 9468 9470 9469 if (!HAS_RC6(dev_priv)) 9471 9470 return 0; 9472 9471 9472 + /* 9473 + * Store previous hw counter values for counter wrap-around handling. 9474 + * 9475 + * There are only four interesting registers and they live next to each 9476 + * other so we can use the relative address, compared to the smallest 9477 + * one as the index into driver storage. 9478 + */ 9479 + i = (i915_mmio_reg_offset(reg) - 9480 + i915_mmio_reg_offset(GEN6_GT_GFX_RC6_LOCKED)) / sizeof(u32); 9481 + if (WARN_ON_ONCE(i >= ARRAY_SIZE(dev_priv->gt_pm.rc6.cur_residency))) 9482 + return 0; 9483 + 9484 + fw_domains = intel_uncore_forcewake_for_reg(dev_priv, reg, FW_REG_READ); 9485 + 9486 + spin_lock_irqsave(&dev_priv->uncore.lock, flags); 9487 + intel_uncore_forcewake_get__locked(dev_priv, fw_domains); 9488 + 9473 9489 /* On VLV and CHV, residency time is in CZ units rather than 1.28us */ 9474 9490 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { 9475 9491 mul = 1000000; 9476 9492 div = dev_priv->czclk_freq; 9493 + overflow_hw = BIT_ULL(40); 9477 9494 time_hw = vlv_residency_raw(dev_priv, reg); 9478 9495 } else { 9479 9496 /* 833.33ns units on Gen9LP, 1.28us elsewhere. */ ··· 9506 9481 div = 1; 9507 9482 } 9508 9483 9509 - time_hw = I915_READ(reg); 9484 + overflow_hw = BIT_ULL(32); 9485 + time_hw = I915_READ_FW(reg); 9510 9486 } 9511 9487 9512 - return DIV_ROUND_UP_ULL(time_hw * mul, div); 9488 + /* 9489 + * Counter wrap handling. 9490 + * 9491 + * But relying on a sufficient frequency of queries otherwise counters 9492 + * can still wrap. 9493 + */ 9494 + prev_hw = dev_priv->gt_pm.rc6.prev_hw_residency[i]; 9495 + dev_priv->gt_pm.rc6.prev_hw_residency[i] = time_hw; 9496 + 9497 + /* RC6 delta from last sample. */ 9498 + if (time_hw >= prev_hw) 9499 + time_hw -= prev_hw; 9500 + else 9501 + time_hw += overflow_hw - prev_hw; 9502 + 9503 + /* Add delta to RC6 extended raw driver copy. */ 9504 + time_hw += dev_priv->gt_pm.rc6.cur_residency[i]; 9505 + dev_priv->gt_pm.rc6.cur_residency[i] = time_hw; 9506 + 9507 + intel_uncore_forcewake_put__locked(dev_priv, fw_domains); 9508 + spin_unlock_irqrestore(&dev_priv->uncore.lock, flags); 9509 + 9510 + return mul_u64_u32_div(time_hw, mul, div); 9513 9511 } 9514 9512 9515 9513 u32 intel_get_cagf(struct drm_i915_private *dev_priv, u32 rpstat)
+2 -2
drivers/gpu/drm/i915/intel_psr.c
··· 126 126 static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv, 127 127 enum port port) 128 128 { 129 - if (INTEL_INFO(dev_priv)->gen >= 9) 129 + if (INTEL_GEN(dev_priv) >= 9) 130 130 return DP_AUX_CH_CTL(port); 131 131 else 132 132 return EDP_PSR_AUX_CTL; ··· 135 135 static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv, 136 136 enum port port, int index) 137 137 { 138 - if (INTEL_INFO(dev_priv)->gen >= 9) 138 + if (INTEL_GEN(dev_priv) >= 9) 139 139 return DP_AUX_CH_DATA(port, index); 140 140 else 141 141 return EDP_PSR_AUX_DATA(index);
+11 -19
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 137 137 return 0; 138 138 } 139 139 140 - /** 140 + /* 141 141 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for 142 142 * implementing two workarounds on gen6. From section 1.4.7.1 143 143 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1: ··· 453 453 454 454 if (!stop_ring(engine)) { 455 455 /* G45 ring initialization often fails to reset head to zero */ 456 - DRM_DEBUG_KMS("%s head not reset to zero " 457 - "ctl %08x head %08x tail %08x start %08x\n", 458 - engine->name, 459 - I915_READ_CTL(engine), 460 - I915_READ_HEAD(engine), 461 - I915_READ_TAIL(engine), 462 - I915_READ_START(engine)); 456 + DRM_DEBUG_DRIVER("%s head not reset to zero " 457 + "ctl %08x head %08x tail %08x start %08x\n", 458 + engine->name, 459 + I915_READ_CTL(engine), 460 + I915_READ_HEAD(engine), 461 + I915_READ_TAIL(engine), 462 + I915_READ_START(engine)); 463 463 464 464 if (!stop_ring(engine)) { 465 465 DRM_ERROR("failed to set %s head to zero " ··· 492 492 493 493 /* WaClearRingBufHeadRegAtInit:ctg,elk */ 494 494 if (I915_READ_HEAD(engine)) 495 - DRM_DEBUG("%s initialization failed [head=%08x], fudging\n", 496 - engine->name, I915_READ_HEAD(engine)); 495 + DRM_DEBUG_DRIVER("%s initialization failed [head=%08x], fudging\n", 496 + engine->name, I915_READ_HEAD(engine)); 497 497 498 498 intel_ring_update_space(ring); 499 499 I915_WRITE_HEAD(engine, ring->head); ··· 655 655 if (IS_GEN(dev_priv, 6, 7)) 656 656 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); 657 657 658 - if (INTEL_INFO(dev_priv)->gen >= 6) 658 + if (INTEL_GEN(dev_priv) >= 6) 659 659 I915_WRITE_IMR(engine, ~engine->irq_keep_mask); 660 660 661 661 return init_workarounds_ring(engine); ··· 729 729 730 730 static const int i9xx_emit_breadcrumb_sz = 4; 731 731 732 - /** 733 - * gen6_sema_emit_breadcrumb - Update the semaphore mailbox registers 734 - * 735 - * @request - request to write to the ring 736 - * 737 - * Update the mailbox registers in the *other* rings with the current seqno. 738 - * This acts like a signal in the canonical semaphore. 739 - */ 740 732 static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs) 741 733 { 742 734 return i9xx_emit_breadcrumb(req,
+8 -3
drivers/gpu/drm/i915/intel_ringbuffer.h
··· 279 279 * @csb_use_mmio: access csb through mmio, instead of hwsp 280 280 */ 281 281 bool csb_use_mmio; 282 + 283 + /** 284 + * @preempt_complete_status: expected CSB upon completing preemption 285 + */ 286 + u32 preempt_complete_status; 282 287 }; 283 288 284 289 #define INTEL_ENGINE_CS_MAX_NAME 8 ··· 659 654 } 660 655 661 656 static inline u32 662 - intel_read_status_page(struct intel_engine_cs *engine, int reg) 657 + intel_read_status_page(const struct intel_engine_cs *engine, int reg) 663 658 { 664 659 /* Ensure that the compiler doesn't optimize away the load. */ 665 660 return READ_ONCE(engine->status_page.page_addr[reg]); ··· 817 812 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine); 818 813 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine); 819 814 820 - u64 intel_engine_get_active_head(struct intel_engine_cs *engine); 821 - u64 intel_engine_get_last_batch_head(struct intel_engine_cs *engine); 815 + u64 intel_engine_get_active_head(const struct intel_engine_cs *engine); 816 + u64 intel_engine_get_last_batch_head(const struct intel_engine_cs *engine); 822 817 823 818 static inline u32 intel_engine_get_seqno(struct intel_engine_cs *engine) 824 819 {
+144 -17
drivers/gpu/drm/i915/intel_runtime_pm.c
··· 2646 2646 DRM_ERROR("DBuf power disable timeout!\n"); 2647 2647 } 2648 2648 2649 + /* 2650 + * TODO: we shouldn't always enable DBUF_CTL_S2, we should only enable it when 2651 + * needed and keep it disabled as much as possible. 2652 + */ 2653 + static void icl_dbuf_enable(struct drm_i915_private *dev_priv) 2654 + { 2655 + I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST); 2656 + I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST); 2657 + POSTING_READ(DBUF_CTL_S2); 2658 + 2659 + udelay(10); 2660 + 2661 + if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) || 2662 + !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)) 2663 + DRM_ERROR("DBuf power enable timeout\n"); 2664 + } 2665 + 2666 + static void icl_dbuf_disable(struct drm_i915_private *dev_priv) 2667 + { 2668 + I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST); 2669 + I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST); 2670 + POSTING_READ(DBUF_CTL_S2); 2671 + 2672 + udelay(10); 2673 + 2674 + if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) || 2675 + (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)) 2676 + DRM_ERROR("DBuf power disable timeout!\n"); 2677 + } 2678 + 2679 + static void icl_mbus_init(struct drm_i915_private *dev_priv) 2680 + { 2681 + uint32_t val; 2682 + 2683 + val = MBUS_ABOX_BT_CREDIT_POOL1(16) | 2684 + MBUS_ABOX_BT_CREDIT_POOL2(16) | 2685 + MBUS_ABOX_B_CREDIT(1) | 2686 + MBUS_ABOX_BW_CREDIT(1); 2687 + 2688 + I915_WRITE(MBUS_ABOX_CTL, val); 2689 + } 2690 + 2649 2691 static void skl_display_core_init(struct drm_i915_private *dev_priv, 2650 2692 bool resume) 2651 2693 { ··· 2836 2794 { .dw1 = 0x00440000, .dw9 = 0x9A00AB25, .dw10 = 0x8AE38FF1, }, 2837 2795 }; 2838 2796 2839 - static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv) 2797 + /* 2798 + * CNL has just one set of registers, while ICL has two sets: one for port A and 2799 + * the other for port B. The CNL registers are equivalent to the ICL port A 2800 + * registers, that's why we call the ICL macros even though the function has CNL 2801 + * on its name. 2802 + */ 2803 + static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv, 2804 + enum port port) 2840 2805 { 2841 2806 const struct cnl_procmon *procmon; 2842 2807 u32 val; 2843 2808 2844 - val = I915_READ(CNL_PORT_COMP_DW3); 2809 + val = I915_READ(ICL_PORT_COMP_DW3(port)); 2845 2810 switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) { 2846 2811 default: 2847 2812 MISSING_CASE(val); ··· 2869 2820 break; 2870 2821 } 2871 2822 2872 - val = I915_READ(CNL_PORT_COMP_DW1); 2823 + val = I915_READ(ICL_PORT_COMP_DW1(port)); 2873 2824 val &= ~((0xff << 16) | 0xff); 2874 2825 val |= procmon->dw1; 2875 - I915_WRITE(CNL_PORT_COMP_DW1, val); 2826 + I915_WRITE(ICL_PORT_COMP_DW1(port), val); 2876 2827 2877 - I915_WRITE(CNL_PORT_COMP_DW9, procmon->dw9); 2878 - I915_WRITE(CNL_PORT_COMP_DW10, procmon->dw10); 2828 + I915_WRITE(ICL_PORT_COMP_DW9(port), procmon->dw9); 2829 + I915_WRITE(ICL_PORT_COMP_DW10(port), procmon->dw10); 2879 2830 } 2880 2831 2881 2832 static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume) ··· 2896 2847 val &= ~CNL_COMP_PWR_DOWN; 2897 2848 I915_WRITE(CHICKEN_MISC_2, val); 2898 2849 2899 - cnl_set_procmon_ref_values(dev_priv); 2850 + /* Dummy PORT_A to get the correct CNL register from the ICL macro */ 2851 + cnl_set_procmon_ref_values(dev_priv, PORT_A); 2900 2852 2901 2853 val = I915_READ(CNL_PORT_COMP_DW0); 2902 2854 val |= COMP_INIT; ··· 2959 2909 val = I915_READ(CHICKEN_MISC_2); 2960 2910 val |= CNL_COMP_PWR_DOWN; 2961 2911 I915_WRITE(CHICKEN_MISC_2, val); 2912 + } 2913 + 2914 + static void icl_display_core_init(struct drm_i915_private *dev_priv, 2915 + bool resume) 2916 + { 2917 + enum port port; 2918 + u32 val; 2919 + 2920 + gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 2921 + 2922 + /* 1. Enable PCH reset handshake. */ 2923 + val = I915_READ(HSW_NDE_RSTWRN_OPT); 2924 + val |= RESET_PCH_HANDSHAKE_ENABLE; 2925 + I915_WRITE(HSW_NDE_RSTWRN_OPT, val); 2926 + 2927 + for (port = PORT_A; port <= PORT_B; port++) { 2928 + /* 2. Enable DDI combo PHY comp. */ 2929 + val = I915_READ(ICL_PHY_MISC(port)); 2930 + val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN; 2931 + I915_WRITE(ICL_PHY_MISC(port), val); 2932 + 2933 + cnl_set_procmon_ref_values(dev_priv, port); 2934 + 2935 + val = I915_READ(ICL_PORT_COMP_DW0(port)); 2936 + val |= COMP_INIT; 2937 + I915_WRITE(ICL_PORT_COMP_DW0(port), val); 2938 + 2939 + /* 3. Set power down enable. */ 2940 + val = I915_READ(ICL_PORT_CL_DW5(port)); 2941 + val |= CL_POWER_DOWN_ENABLE; 2942 + I915_WRITE(ICL_PORT_CL_DW5(port), val); 2943 + } 2944 + 2945 + /* 4. Enable power well 1 (PG1) and aux IO power. */ 2946 + /* FIXME: ICL power wells code not here yet. */ 2947 + 2948 + /* 5. Enable CDCLK. */ 2949 + icl_init_cdclk(dev_priv); 2950 + 2951 + /* 6. Enable DBUF. */ 2952 + icl_dbuf_enable(dev_priv); 2953 + 2954 + /* 7. Setup MBUS. */ 2955 + icl_mbus_init(dev_priv); 2956 + 2957 + /* 8. CHICKEN_DCPR_1 */ 2958 + I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) | 2959 + CNL_DDI_CLOCK_REG_ACCESS_ON); 2960 + } 2961 + 2962 + static void icl_display_core_uninit(struct drm_i915_private *dev_priv) 2963 + { 2964 + enum port port; 2965 + u32 val; 2966 + 2967 + gen9_set_dc_state(dev_priv, DC_STATE_DISABLE); 2968 + 2969 + /* 1. Disable all display engine functions -> aready done */ 2970 + 2971 + /* 2. Disable DBUF */ 2972 + icl_dbuf_disable(dev_priv); 2973 + 2974 + /* 3. Disable CD clock */ 2975 + icl_uninit_cdclk(dev_priv); 2976 + 2977 + /* 4. Disable Power Well 1 (PG1) and Aux IO Power */ 2978 + /* FIXME: ICL power wells code not here yet. */ 2979 + 2980 + /* 5. Disable Comp */ 2981 + for (port = PORT_A; port <= PORT_B; port++) { 2982 + val = I915_READ(ICL_PHY_MISC(port)); 2983 + val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN; 2984 + I915_WRITE(ICL_PHY_MISC(port), val); 2985 + } 2962 2986 } 2963 2987 2964 2988 static void chv_phy_control_init(struct drm_i915_private *dev_priv) ··· 3167 3043 3168 3044 power_domains->initializing = true; 3169 3045 3170 - if (IS_CANNONLAKE(dev_priv)) { 3046 + if (IS_ICELAKE(dev_priv)) { 3047 + icl_display_core_init(dev_priv, resume); 3048 + } else if (IS_CANNONLAKE(dev_priv)) { 3171 3049 cnl_display_core_init(dev_priv, resume); 3172 3050 } else if (IS_GEN9_BC(dev_priv)) { 3173 3051 skl_display_core_init(dev_priv, resume); ··· 3210 3084 if (!i915_modparams.disable_power_well) 3211 3085 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT); 3212 3086 3213 - if (IS_CANNONLAKE(dev_priv)) 3087 + if (IS_ICELAKE(dev_priv)) 3088 + icl_display_core_uninit(dev_priv); 3089 + else if (IS_CANNONLAKE(dev_priv)) 3214 3090 cnl_display_core_uninit(dev_priv); 3215 3091 else if (IS_GEN9_BC(dev_priv)) 3216 3092 skl_display_core_uninit(dev_priv); ··· 3328 3200 * @dev_priv: i915 device instance 3329 3201 * 3330 3202 * This function grabs a device-level runtime pm reference if the device is 3331 - * already in use and ensures that it is powered up. 3203 + * already in use and ensures that it is powered up. It is illegal to try 3204 + * and access the HW should intel_runtime_pm_get_if_in_use() report failure. 3332 3205 * 3333 3206 * Any runtime pm reference obtained by this function must have a symmetric 3334 3207 * call to intel_runtime_pm_put() to release the reference again. 3208 + * 3209 + * Returns: True if the wakeref was acquired, or False otherwise. 3335 3210 */ 3336 3211 bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv) 3337 3212 { 3338 - struct pci_dev *pdev = dev_priv->drm.pdev; 3339 - struct device *kdev = &pdev->dev; 3340 - 3341 3213 if (IS_ENABLED(CONFIG_PM)) { 3342 - int ret = pm_runtime_get_if_in_use(kdev); 3214 + struct pci_dev *pdev = dev_priv->drm.pdev; 3215 + struct device *kdev = &pdev->dev; 3343 3216 3344 3217 /* 3345 3218 * In cases runtime PM is disabled by the RPM core and we get ··· 3348 3219 * function, since the power state is undefined. This applies 3349 3220 * atm to the late/early system suspend/resume handlers. 3350 3221 */ 3351 - WARN_ONCE(ret < 0, 3352 - "pm_runtime_get_if_in_use() failed: %d\n", ret); 3353 - if (ret <= 0) 3222 + if (pm_runtime_get_if_in_use(kdev) <= 0) 3354 3223 return false; 3355 3224 } 3356 3225
+53 -29
drivers/gpu/drm/i915/intel_sdvo.c
··· 214 214 intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo, 215 215 struct intel_sdvo_connector *intel_sdvo_connector); 216 216 217 - /** 217 + /* 218 218 * Writes the SDVOB or SDVOC with the given value, but always writes both 219 219 * SDVOB and SDVOC to work around apparent hardware issues (according to 220 220 * comments in the BIOS). ··· 250 250 * writing them only once doesn't appear to 'stick'. 251 251 * The BIOS does this too. Yay, magic 252 252 */ 253 - for (i = 0; i < 2; i++) 254 - { 253 + for (i = 0; i < 2; i++) { 255 254 I915_WRITE(GEN3_SDVOB, bval); 256 255 POSTING_READ(GEN3_SDVOB); 256 + 257 257 I915_WRITE(GEN3_SDVOC, cval); 258 258 POSTING_READ(GEN3_SDVOC); 259 259 } ··· 643 643 &targets, sizeof(targets)); 644 644 } 645 645 646 - /** 646 + /* 647 647 * Return whether each input is trained. 648 648 * 649 649 * This function is making an assumption about the layout of the response, ··· 1061 1061 return true; 1062 1062 } 1063 1063 1064 - /* Asks the sdvo controller for the preferred input mode given the output mode. 1065 - * Unfortunately we have to set up the full output mode to do that. */ 1064 + /* 1065 + * Asks the sdvo controller for the preferred input mode given the output mode. 1066 + * Unfortunately we have to set up the full output mode to do that. 1067 + */ 1066 1068 static bool 1067 1069 intel_sdvo_get_preferred_input_mode(struct intel_sdvo *intel_sdvo, 1068 1070 const struct drm_display_mode *mode, ··· 1097 1095 unsigned dotclock = pipe_config->port_clock; 1098 1096 struct dpll *clock = &pipe_config->dpll; 1099 1097 1100 - /* SDVO TV has fixed PLL values depend on its clock range, 1101 - this mirrors vbios setting. */ 1098 + /* 1099 + * SDVO TV has fixed PLL values depend on its clock range, 1100 + * this mirrors vbios setting. 1101 + */ 1102 1102 if (dotclock >= 100000 && dotclock < 140500) { 1103 1103 clock->p1 = 2; 1104 1104 clock->p2 = 10; ··· 1136 1132 if (HAS_PCH_SPLIT(to_i915(encoder->base.dev))) 1137 1133 pipe_config->has_pch_encoder = true; 1138 1134 1139 - /* We need to construct preferred input timings based on our 1135 + /* 1136 + * We need to construct preferred input timings based on our 1140 1137 * output timings. To do that, we have to set the output 1141 1138 * timings, even though this isn't really the right place in 1142 1139 * the sequence to do it. Oh well. ··· 1160 1155 adjusted_mode); 1161 1156 } 1162 1157 1163 - /* Make the CRTC code factor in the SDVO pixel multiplier. The 1158 + /* 1159 + * Make the CRTC code factor in the SDVO pixel multiplier. The 1164 1160 * SDVO device will factor out the multiplier during mode_set. 1165 1161 */ 1166 1162 pipe_config->pixel_multiplier = ··· 1175 1169 pipe_config->has_audio = true; 1176 1170 1177 1171 if (intel_sdvo_state->base.broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) { 1178 - /* See CEA-861-E - 5.1 Default Encoding Parameters */ 1179 - /* FIXME: This bit is only valid when using TMDS encoding and 8 1180 - * bit per color mode. */ 1172 + /* 1173 + * See CEA-861-E - 5.1 Default Encoding Parameters 1174 + * 1175 + * FIXME: This bit is only valid when using TMDS encoding and 8 1176 + * bit per color mode. 1177 + */ 1181 1178 if (pipe_config->has_hdmi_sink && 1182 1179 drm_match_cea_mode(adjusted_mode) > 1) 1183 1180 pipe_config->limited_color_range = true; ··· 1281 1272 1282 1273 intel_sdvo_update_props(intel_sdvo, sdvo_state); 1283 1274 1284 - /* First, set the input mapping for the first input to our controlled 1275 + /* 1276 + * First, set the input mapping for the first input to our controlled 1285 1277 * output. This is only correct if we're a single-input device, in 1286 1278 * which case the first input is the output from the appropriate SDVO 1287 1279 * channel on the motherboard. In a two-input device, the first input ··· 1445 1435 1446 1436 ret = intel_sdvo_get_input_timing(intel_sdvo, &dtd); 1447 1437 if (!ret) { 1448 - /* Some sdvo encoders are not spec compliant and don't 1449 - * implement the mandatory get_timings function. */ 1438 + /* 1439 + * Some sdvo encoders are not spec compliant and don't 1440 + * implement the mandatory get_timings function. 1441 + */ 1450 1442 DRM_DEBUG_DRIVER("failed to retrieve SDVO DTD\n"); 1451 1443 pipe_config->quirks |= PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS; 1452 1444 } else { ··· 1597 1585 intel_wait_for_vblank(dev_priv, intel_crtc->pipe); 1598 1586 1599 1587 success = intel_sdvo_get_trained_inputs(intel_sdvo, &input1, &input2); 1600 - /* Warn if the device reported failure to sync. 1588 + /* 1589 + * Warn if the device reported failure to sync. 1590 + * 1601 1591 * A lot of SDVO devices fail to notify of sync, but it's 1602 1592 * a given it the status is a success, we succeeded. 1603 1593 */ ··· 1686 1672 if (!I915_HAS_HOTPLUG(dev_priv)) 1687 1673 return 0; 1688 1674 1689 - /* HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise 1690 - * on the line. */ 1675 + /* 1676 + * HW Erratum: SDVO Hotplug is broken on all i945G chips, there's noise 1677 + * on the line. 1678 + */ 1691 1679 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv)) 1692 1680 return 0; 1693 1681 ··· 1973 1957 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", 1974 1958 connector->base.id, connector->name); 1975 1959 1976 - /* Read the list of supported input resolutions for the selected TV 1960 + /* 1961 + * Read the list of supported input resolutions for the selected TV 1977 1962 * format. 1978 1963 */ 1979 1964 format_map = 1 << conn_state->tv.mode; ··· 2285 2268 uint16_t mask = 0; 2286 2269 unsigned int num_bits; 2287 2270 2288 - /* Make a mask of outputs less than or equal to our own priority in the 2271 + /* 2272 + * Make a mask of outputs less than or equal to our own priority in the 2289 2273 * list. 2290 2274 */ 2291 2275 switch (sdvo->controlled_output) { ··· 2316 2298 sdvo->ddc_bus = 1 << num_bits; 2317 2299 } 2318 2300 2319 - /** 2301 + /* 2320 2302 * Choose the appropriate DDC bus for control bus switch command for this 2321 2303 * SDVO output based on the controlled output. 2322 2304 * ··· 2360 2342 2361 2343 sdvo->i2c = intel_gmbus_get_adapter(dev_priv, pin); 2362 2344 2363 - /* With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow 2345 + /* 2346 + * With gmbus we should be able to drive sdvo i2c at 2MHz, but somehow 2364 2347 * our code totally fails once we start using gmbus. Hence fall back to 2365 - * bit banging for now. */ 2348 + * bit banging for now. 2349 + */ 2366 2350 intel_gmbus_force_bit(sdvo->i2c, true); 2367 2351 } 2368 2352 ··· 2399 2379 if (my_mapping->slave_addr) 2400 2380 return my_mapping->slave_addr; 2401 2381 2402 - /* If the BIOS only described a different SDVO device, use the 2382 + /* 2383 + * If the BIOS only described a different SDVO device, use the 2403 2384 * address that it isn't using. 2404 2385 */ 2405 2386 if (other_mapping->slave_addr) { ··· 2410 2389 return 0x70; 2411 2390 } 2412 2391 2413 - /* No SDVO device info is found for another DVO port, 2392 + /* 2393 + * No SDVO device info is found for another DVO port, 2414 2394 * so use mapping assumption we had before BIOS parsing. 2415 2395 */ 2416 2396 if (sdvo->port == PORT_B) ··· 2512 2490 if (intel_sdvo_get_hotplug_support(intel_sdvo) & 2513 2491 intel_sdvo_connector->output_flag) { 2514 2492 intel_sdvo->hotplug_active |= intel_sdvo_connector->output_flag; 2515 - /* Some SDVO devices have one-shot hotplug interrupts. 2493 + /* 2494 + * Some SDVO devices have one-shot hotplug interrupts. 2516 2495 * Ensure that they get re-enabled when an interrupt happens. 2517 2496 */ 2518 2497 intel_encoder->hot_plug = intel_sdvo_enable_hotplug; ··· 2812 2789 to_intel_sdvo_connector_state(conn_state); 2813 2790 uint16_t response, data_value[2]; 2814 2791 2815 - /* when horizontal overscan is supported, Add the left/right property */ 2792 + /* when horizontal overscan is supported, Add the left/right property */ 2816 2793 if (enhancements.overscan_h) { 2817 2794 if (!intel_sdvo_get_value(intel_sdvo, 2818 2795 SDVO_CMD_GET_MAX_OVERSCAN_H, ··· 3097 3074 goto err_output; 3098 3075 } 3099 3076 3100 - /* Only enable the hotplug irq if we need it, to work around noisy 3077 + /* 3078 + * Only enable the hotplug irq if we need it, to work around noisy 3101 3079 * hotplug lines. 3102 3080 */ 3103 3081 if (intel_sdvo->hotplug_active) {
+5 -2
drivers/gpu/drm/i915/intel_sprite.c
··· 1063 1063 return 0; 1064 1064 } 1065 1065 1066 - int intel_sprite_set_colorkey(struct drm_device *dev, void *data, 1067 - struct drm_file *file_priv) 1066 + int intel_sprite_set_colorkey_ioctl(struct drm_device *dev, void *data, 1067 + struct drm_file *file_priv) 1068 1068 { 1069 1069 struct drm_i915_private *dev_priv = to_i915(dev); 1070 1070 struct drm_intel_sprite_colorkey *set = data; ··· 1076 1076 1077 1077 /* ignore the pointless "none" flag */ 1078 1078 set->flags &= ~I915_SET_COLORKEY_NONE; 1079 + 1080 + if (set->flags & ~(I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) 1081 + return -EINVAL; 1079 1082 1080 1083 /* Make sure we don't try to enable both src & dest simultaneously */ 1081 1084 if ((set->flags & (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE)) == (I915_SET_COLORKEY_DESTINATION | I915_SET_COLORKEY_SOURCE))
+3 -25
drivers/gpu/drm/i915/intel_tv.c
··· 43 43 TV_MARGIN_RIGHT, TV_MARGIN_BOTTOM 44 44 }; 45 45 46 - /** Private structure for the integrated TV support */ 47 46 struct intel_tv { 48 47 struct intel_encoder base; 49 48 ··· 369 370 * The constants below were all computed using a 107.520MHz clock 370 371 */ 371 372 372 - /** 373 + /* 373 374 * Register programming values for TV modes. 374 375 * 375 376 * These values account for -1s required. 376 377 */ 377 - 378 378 static const struct tv_mode tv_modes[] = { 379 379 { 380 380 .name = "NTSC-M", ··· 1124 1126 }, 1125 1127 }; 1126 1128 1127 - /** 1128 - * Detects TV presence by checking for load. 1129 - * 1130 - * Requires that the current pipe's DPLL is active. 1131 - 1132 - * \return true if TV is connected. 1133 - * \return false if TV is disconnected. 1134 - */ 1135 1129 static int 1136 1130 intel_tv_detect_type(struct intel_tv *intel_tv, 1137 1131 struct drm_connector *connector) ··· 1249 1259 connector->state->tv.mode = i; 1250 1260 } 1251 1261 1252 - /** 1253 - * Detect the TV connection. 1254 - * 1255 - * Currently this always returns CONNECTOR_STATUS_UNKNOWN, as we need to be sure 1256 - * we have a pipe programmed in order to probe the TV. 1257 - */ 1258 1262 static int 1259 1263 intel_tv_detect(struct drm_connector *connector, 1260 1264 struct drm_modeset_acquire_ctx *ctx, ··· 1322 1338 mode_ptr->type |= DRM_MODE_TYPE_PREFERRED; 1323 1339 } 1324 1340 } 1325 - 1326 - /** 1327 - * Stub get_modes function. 1328 - * 1329 - * This should probably return a set of fixed modes, unless we can figure out 1330 - * how to probe modes off of TV connections. 1331 - */ 1332 1341 1333 1342 static int 1334 1343 intel_tv_get_modes(struct drm_connector *connector) ··· 1489 1512 connector = &intel_connector->base; 1490 1513 state = connector->state; 1491 1514 1492 - /* The documentation, for the older chipsets at least, recommend 1515 + /* 1516 + * The documentation, for the older chipsets at least, recommend 1493 1517 * using a polling method rather than hotplug detection for TVs. 1494 1518 * This is because in order to perform the hotplug detection, the PLLs 1495 1519 * for the TV must be kept alive increasing power drain and starving
+3 -2
drivers/gpu/drm/i915/intel_uc_fw.c
··· 197 197 198 198 /** 199 199 * intel_uc_fw_upload - load uC firmware using custom loader 200 - * 201 200 * @uc_fw: uC firmware 202 - * @loader: custom uC firmware loader function 201 + * @xfer: custom uC firmware loader function 203 202 * 204 203 * Loads uC firmware using custom loader and updates internal flags. 204 + * 205 + * Return: 0 on success, non-zero on failure. 205 206 */ 206 207 int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, 207 208 int (*xfer)(struct intel_uc_fw *uc_fw,
+17 -8
drivers/gpu/drm/i915/intel_uncore.c
··· 1522 1522 engine->name); 1523 1523 1524 1524 I915_WRITE_FW(RING_HEAD(base), I915_READ_FW(RING_TAIL(base))); 1525 + POSTING_READ_FW(RING_HEAD(base)); /* paranoia */ 1525 1526 1526 1527 I915_WRITE_FW(RING_HEAD(base), 0); 1527 1528 I915_WRITE_FW(RING_TAIL(base), 0); 1529 + POSTING_READ_FW(RING_TAIL(base)); 1528 1530 1529 1531 /* The ring must be empty before it is disabled */ 1530 1532 I915_WRITE_FW(RING_CTL(base), 0); ··· 1550 1548 gen3_stop_engine(engine); 1551 1549 } 1552 1550 1553 - static bool i915_reset_complete(struct pci_dev *pdev) 1551 + static bool i915_in_reset(struct pci_dev *pdev) 1554 1552 { 1555 1553 u8 gdrst; 1556 1554 1557 1555 pci_read_config_byte(pdev, I915_GDRST, &gdrst); 1558 - return (gdrst & GRDOM_RESET_STATUS) == 0; 1556 + return gdrst & GRDOM_RESET_STATUS; 1559 1557 } 1560 1558 1561 1559 static int i915_do_reset(struct drm_i915_private *dev_priv, unsigned engine_mask) 1562 1560 { 1563 1561 struct pci_dev *pdev = dev_priv->drm.pdev; 1562 + int err; 1564 1563 1565 - /* assert reset for at least 20 usec */ 1564 + /* Assert reset for at least 20 usec, and wait for acknowledgement. */ 1566 1565 pci_write_config_byte(pdev, I915_GDRST, GRDOM_RESET_ENABLE); 1567 1566 usleep_range(50, 200); 1568 - pci_write_config_byte(pdev, I915_GDRST, 0); 1567 + err = wait_for(i915_in_reset(pdev), 500); 1569 1568 1570 - return wait_for(i915_reset_complete(pdev), 500); 1569 + /* Clear the reset request. */ 1570 + pci_write_config_byte(pdev, I915_GDRST, 0); 1571 + usleep_range(50, 200); 1572 + if (!err) 1573 + err = wait_for(!i915_in_reset(pdev), 500); 1574 + 1575 + return err; 1571 1576 } 1572 1577 1573 1578 static bool g4x_reset_complete(struct pci_dev *pdev) ··· 1883 1874 if (!i915_modparams.reset) 1884 1875 return NULL; 1885 1876 1886 - if (INTEL_INFO(dev_priv)->gen >= 8) 1877 + if (INTEL_GEN(dev_priv) >= 8) 1887 1878 return gen8_reset_engines; 1888 - else if (INTEL_INFO(dev_priv)->gen >= 6) 1879 + else if (INTEL_GEN(dev_priv) >= 6) 1889 1880 return gen6_reset_engines; 1890 1881 else if (IS_GEN5(dev_priv)) 1891 1882 return ironlake_do_reset; ··· 1893 1884 return g4x_do_reset; 1894 1885 else if (IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) 1895 1886 return g33_do_reset; 1896 - else if (INTEL_INFO(dev_priv)->gen >= 3) 1887 + else if (INTEL_GEN(dev_priv) >= 3) 1897 1888 return i915_do_reset; 1898 1889 else 1899 1890 return NULL;
+5
drivers/gpu/drm/i915/intel_uncore.h
··· 198 198 2, timeout_ms, NULL); 199 199 } 200 200 201 + #define raw_reg_read(base, reg) \ 202 + readl(base + i915_mmio_reg_offset(reg)) 203 + #define raw_reg_write(base, reg, value) \ 204 + writel(value, base + i915_mmio_reg_offset(reg)) 205 + 201 206 #endif /* !__INTEL_UNCORE_H__ */
+2 -2
drivers/gpu/drm/i915/selftests/huge_gem_object.c
··· 129 129 drm_gem_private_object_init(&i915->drm, &obj->base, dma_size); 130 130 i915_gem_object_init(obj, &huge_ops); 131 131 132 - obj->base.read_domains = I915_GEM_DOMAIN_CPU; 133 - obj->base.write_domain = I915_GEM_DOMAIN_CPU; 132 + obj->read_domains = I915_GEM_DOMAIN_CPU; 133 + obj->write_domain = I915_GEM_DOMAIN_CPU; 134 134 cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE; 135 135 i915_gem_object_set_cache_coherency(obj, cache_level); 136 136 obj->scratch = phys_size;
+4 -4
drivers/gpu/drm/i915/selftests/huge_pages.c
··· 178 178 drm_gem_private_object_init(&i915->drm, &obj->base, size); 179 179 i915_gem_object_init(obj, &huge_page_ops); 180 180 181 - obj->base.write_domain = I915_GEM_DOMAIN_CPU; 182 - obj->base.read_domains = I915_GEM_DOMAIN_CPU; 181 + obj->write_domain = I915_GEM_DOMAIN_CPU; 182 + obj->read_domains = I915_GEM_DOMAIN_CPU; 183 183 obj->cache_level = I915_CACHE_NONE; 184 184 185 185 obj->mm.page_mask = page_mask; ··· 329 329 else 330 330 i915_gem_object_init(obj, &fake_ops); 331 331 332 - obj->base.write_domain = I915_GEM_DOMAIN_CPU; 333 - obj->base.read_domains = I915_GEM_DOMAIN_CPU; 332 + obj->write_domain = I915_GEM_DOMAIN_CPU; 333 + obj->read_domains = I915_GEM_DOMAIN_CPU; 334 334 obj->cache_level = I915_CACHE_NONE; 335 335 336 336 return obj;
+2 -2
drivers/gpu/drm/i915/selftests/i915_gem_context.c
··· 215 215 } 216 216 217 217 i915_gem_obj_finish_shmem_access(obj); 218 - obj->base.read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU; 219 - obj->base.write_domain = 0; 218 + obj->read_domains = I915_GEM_DOMAIN_GTT | I915_GEM_DOMAIN_CPU; 219 + obj->write_domain = 0; 220 220 return 0; 221 221 } 222 222
+3 -3
drivers/gpu/drm/i915/selftests/i915_gem_gtt.c
··· 113 113 drm_gem_private_object_init(&i915->drm, &obj->base, size); 114 114 i915_gem_object_init(obj, &fake_ops); 115 115 116 - obj->base.write_domain = I915_GEM_DOMAIN_CPU; 117 - obj->base.read_domains = I915_GEM_DOMAIN_CPU; 116 + obj->write_domain = I915_GEM_DOMAIN_CPU; 117 + obj->read_domains = I915_GEM_DOMAIN_CPU; 118 118 obj->cache_level = I915_CACHE_NONE; 119 119 120 120 /* Preallocate the "backing storage" */ ··· 927 927 928 928 explode = fake_dma_object(i915, size); 929 929 if (IS_ERR(explode)) { 930 - err = PTR_ERR(purge); 930 + err = PTR_ERR(explode); 931 931 goto err_purge; 932 932 } 933 933
+12 -6
drivers/gpu/drm/i915/selftests/i915_gem_object.c
··· 212 212 return -EINTR; 213 213 214 214 err = i915_gem_object_set_tiling(obj, tile->tiling, tile->stride); 215 - if (err) 215 + if (err) { 216 + pr_err("Failed to set tiling mode=%u, stride=%u, err=%d\n", 217 + tile->tiling, tile->stride, err); 216 218 return err; 219 + } 217 220 218 221 GEM_BUG_ON(i915_gem_object_get_tiling(obj) != tile->tiling); 219 222 GEM_BUG_ON(i915_gem_object_get_stride(obj) != tile->stride); ··· 233 230 GEM_BUG_ON(view.partial.size > nreal); 234 231 235 232 err = i915_gem_object_set_to_gtt_domain(obj, true); 236 - if (err) 233 + if (err) { 234 + pr_err("Failed to flush to GTT write domain; err=%d\n", 235 + err); 237 236 return err; 237 + } 238 238 239 239 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE); 240 240 if (IS_ERR(vma)) { 241 - pr_err("Failed to pin partial view: offset=%lu\n", 242 - page); 241 + pr_err("Failed to pin partial view: offset=%lu; err=%d\n", 242 + page, (int)PTR_ERR(vma)); 243 243 return PTR_ERR(vma); 244 244 } 245 245 ··· 252 246 io = i915_vma_pin_iomap(vma); 253 247 i915_vma_unpin(vma); 254 248 if (IS_ERR(io)) { 255 - pr_err("Failed to iomap partial view: offset=%lu\n", 256 - page); 249 + pr_err("Failed to iomap partial view: offset=%lu; err=%d\n", 250 + page, (int)PTR_ERR(io)); 257 251 return PTR_ERR(io); 258 252 } 259 253
+11 -9
drivers/gpu/drm/i915/selftests/intel_guc.c
··· 87 87 88 88 static bool client_doorbell_in_sync(struct intel_guc_client *client) 89 89 { 90 - return doorbell_ok(client->guc, client->doorbell_id); 90 + return !client || doorbell_ok(client->guc, client->doorbell_id); 91 91 } 92 92 93 93 /* ··· 137 137 goto unlock; 138 138 } 139 139 GEM_BUG_ON(!guc->execbuf_client); 140 - GEM_BUG_ON(!guc->preempt_client); 141 140 142 141 err = validate_client(guc->execbuf_client, 143 142 GUC_CLIENT_PRIORITY_KMD_NORMAL, false); ··· 145 146 goto out; 146 147 } 147 148 148 - err = validate_client(guc->preempt_client, 149 - GUC_CLIENT_PRIORITY_KMD_HIGH, true); 150 - if (err) { 151 - pr_err("preempt client validation failed\n"); 152 - goto out; 149 + if (guc->preempt_client) { 150 + err = validate_client(guc->preempt_client, 151 + GUC_CLIENT_PRIORITY_KMD_HIGH, true); 152 + if (err) { 153 + pr_err("preempt client validation failed\n"); 154 + goto out; 155 + } 153 156 } 154 157 155 158 /* each client should now have reserved a doorbell */ 156 159 if (!has_doorbell(guc->execbuf_client) || 157 - !has_doorbell(guc->preempt_client)) { 160 + (guc->preempt_client && !has_doorbell(guc->preempt_client))) { 158 161 pr_err("guc_clients_create didn't reserve doorbells\n"); 159 162 err = -EINVAL; 160 163 goto out; ··· 225 224 * clients during unload. 226 225 */ 227 226 destroy_doorbell(guc->execbuf_client); 228 - destroy_doorbell(guc->preempt_client); 227 + if (guc->preempt_client) 228 + destroy_doorbell(guc->preempt_client); 229 229 guc_clients_destroy(guc); 230 230 guc_clients_create(guc); 231 231 guc_clients_doorbell_init(guc);
-6
drivers/gpu/drm/i915/selftests/mock_gem_device.c
··· 243 243 if (!i915->kernel_context) 244 244 goto err_engine; 245 245 246 - i915->preempt_context = mock_context(i915, NULL); 247 - if (!i915->preempt_context) 248 - goto err_kernel_context; 249 - 250 246 WARN_ON(i915_gemfs_init(i915)); 251 247 252 248 return i915; 253 249 254 - err_kernel_context: 255 - i915_gem_context_put(i915->kernel_context); 256 250 err_engine: 257 251 for_each_engine(engine, i915, id) 258 252 mock_engine_free(engine);
+1 -1
drivers/gpu/drm/radeon/radeon_display.c
··· 570 570 base &= ~7; 571 571 } 572 572 work->base = base; 573 - work->target_vblank = target - drm_crtc_vblank_count(crtc) + 573 + work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) + 574 574 dev->driver->get_vblank_counter(dev, work->crtc_id); 575 575 576 576 /* We borrow the event spin lock for protecting flip_work */
+1 -1
drivers/gpu/drm/tegra/dc.c
··· 1359 1359 return host1x_syncpt_read(dc->syncpt); 1360 1360 1361 1361 /* fallback to software emulated VBLANK counter */ 1362 - return drm_crtc_vblank_count(&dc->base); 1362 + return (u32)drm_crtc_vblank_count(&dc->base); 1363 1363 } 1364 1364 1365 1365 static int tegra_dc_enable_vblank(struct drm_crtc *crtc)
+1 -1
include/drm/drm_atomic.h
··· 154 154 struct drm_crtc *ptr; 155 155 struct drm_crtc_state *state, *old_state, *new_state; 156 156 s32 __user *out_fence_ptr; 157 - unsigned last_vblank_count; 157 + u64 last_vblank_count; 158 158 }; 159 159 160 160 struct __drm_connnectors_state {
-15
include/drm/drm_gem.h
··· 116 116 int name; 117 117 118 118 /** 119 - * @read_domains: 120 - * 121 - * Read memory domains. These monitor which caches contain read/write data 122 - * related to the object. When transitioning from one set of domains 123 - * to another, the driver is called to ensure that caches are suitably 124 - * flushed and invalidated. 125 - */ 126 - uint32_t read_domains; 127 - 128 - /** 129 - * @write_domain: Corresponding unique write memory domain. 130 - */ 131 - uint32_t write_domain; 132 - 133 - /** 134 119 * @dma_buf: 135 120 * 136 121 * dma-buf associated with this GEM object.
+3 -1
include/drm/drm_vblank.h
··· 195 195 void drm_crtc_vblank_off(struct drm_crtc *crtc); 196 196 void drm_crtc_vblank_reset(struct drm_crtc *crtc); 197 197 void drm_crtc_vblank_on(struct drm_crtc *crtc); 198 - u32 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc); 198 + u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc); 199 + void drm_vblank_restore(struct drm_device *dev, unsigned int pipe); 200 + void drm_crtc_vblank_restore(struct drm_crtc *crtc); 199 201 200 202 bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, 201 203 unsigned int pipe, int *max_error,
+8 -7
include/drm/i915_pciids.h
··· 416 416 417 417 /* CNL */ 418 418 #define INTEL_CNL_IDS(info) \ 419 - INTEL_VGA_DEVICE(0x5A52, info), \ 420 - INTEL_VGA_DEVICE(0x5A5A, info), \ 421 - INTEL_VGA_DEVICE(0x5A42, info), \ 422 - INTEL_VGA_DEVICE(0x5A4A, info), \ 423 419 INTEL_VGA_DEVICE(0x5A51, info), \ 424 420 INTEL_VGA_DEVICE(0x5A59, info), \ 425 421 INTEL_VGA_DEVICE(0x5A41, info), \ 426 422 INTEL_VGA_DEVICE(0x5A49, info), \ 427 - INTEL_VGA_DEVICE(0x5A71, info), \ 428 - INTEL_VGA_DEVICE(0x5A79, info), \ 423 + INTEL_VGA_DEVICE(0x5A52, info), \ 424 + INTEL_VGA_DEVICE(0x5A5A, info), \ 425 + INTEL_VGA_DEVICE(0x5A42, info), \ 426 + INTEL_VGA_DEVICE(0x5A4A, info), \ 427 + INTEL_VGA_DEVICE(0x5A50, info), \ 428 + INTEL_VGA_DEVICE(0x5A40, info), \ 429 429 INTEL_VGA_DEVICE(0x5A54, info), \ 430 430 INTEL_VGA_DEVICE(0x5A5C, info), \ 431 - INTEL_VGA_DEVICE(0x5A44, info) 431 + INTEL_VGA_DEVICE(0x5A44, info), \ 432 + INTEL_VGA_DEVICE(0x5A4C, info) 432 433 433 434 #endif /* _I915_PCIIDS_H */