Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'drm-intel-next-fixes-2016-12-22' of git://anongit.freedesktop.org/git/drm-intel into drm-fixes

First set of i915 fixes for code in next.

* tag 'drm-intel-next-fixes-2016-12-22' of git://anongit.freedesktop.org/git/drm-intel:
drm/i915: skip the first 4k of stolen memory on everything >= gen8
drm/i915: Fallback to single PAGE_SIZE segments for DMA remapping
drm/i915: Fix use after free in logical_render_ring_init
drm/i915: disable PSR by default on HSW/BDW
drm/i915: Fix setting of boost freq tunable
drm/i915: tune down the fast link training vs boot fail
drm/i915: Reorder phys backing storage release
drm/i915/gen9: Fix PCODE polling during SAGV disabling
drm/i915/gen9: Fix PCODE polling during CDCLK change notification
drm/i915/dsi: Fix chv_exec_gpio disabling the GPIOs it is setting
drm/i915/dsi: Fix swapping of MIPI_SEQ_DEASSERT_RESET / MIPI_SEQ_ASSERT_RESET
drm/i915/dsi: Do not clear DPOUNIT_CLOCK_GATE_DISABLE from vlv_init_display_clock_gating
drm/i915: drop the struct_mutex when wedged or trying to reset

+186 -95
+2
drivers/gpu/drm/i915/i915_drv.h
··· 3509 3509 3510 3510 int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val); 3511 3511 int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val); 3512 + int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request, 3513 + u32 reply_mask, u32 reply, int timeout_base_ms); 3512 3514 3513 3515 /* intel_sideband.c */ 3514 3516 u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr);
+56 -23
drivers/gpu/drm/i915/i915_gem.c
··· 174 174 i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj) 175 175 { 176 176 struct address_space *mapping = obj->base.filp->f_mapping; 177 - char *vaddr = obj->phys_handle->vaddr; 177 + drm_dma_handle_t *phys; 178 178 struct sg_table *st; 179 179 struct scatterlist *sg; 180 + char *vaddr; 180 181 int i; 181 182 182 183 if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj))) 183 184 return ERR_PTR(-EINVAL); 184 185 186 + /* Always aligning to the object size, allows a single allocation 187 + * to handle all possible callers, and given typical object sizes, 188 + * the alignment of the buddy allocation will naturally match. 189 + */ 190 + phys = drm_pci_alloc(obj->base.dev, 191 + obj->base.size, 192 + roundup_pow_of_two(obj->base.size)); 193 + if (!phys) 194 + return ERR_PTR(-ENOMEM); 195 + 196 + vaddr = phys->vaddr; 185 197 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) { 186 198 struct page *page; 187 199 char *src; 188 200 189 201 page = shmem_read_mapping_page(mapping, i); 190 - if (IS_ERR(page)) 191 - return ERR_CAST(page); 202 + if (IS_ERR(page)) { 203 + st = ERR_CAST(page); 204 + goto err_phys; 205 + } 192 206 193 207 src = kmap_atomic(page); 194 208 memcpy(vaddr, src, PAGE_SIZE); ··· 216 202 i915_gem_chipset_flush(to_i915(obj->base.dev)); 217 203 218 204 st = kmalloc(sizeof(*st), GFP_KERNEL); 219 - if (st == NULL) 220 - return ERR_PTR(-ENOMEM); 205 + if (!st) { 206 + st = ERR_PTR(-ENOMEM); 207 + goto err_phys; 208 + } 221 209 222 210 if (sg_alloc_table(st, 1, GFP_KERNEL)) { 223 211 kfree(st); 224 - return ERR_PTR(-ENOMEM); 212 + st = ERR_PTR(-ENOMEM); 213 + goto err_phys; 225 214 } 226 215 227 216 sg = st->sgl; 228 217 sg->offset = 0; 229 218 sg->length = obj->base.size; 230 219 231 - sg_dma_address(sg) = obj->phys_handle->busaddr; 220 + sg_dma_address(sg) = phys->busaddr; 232 221 sg_dma_len(sg) = obj->base.size; 233 222 223 + obj->phys_handle = phys; 224 + return st; 225 + 226 + err_phys: 227 + drm_pci_free(obj->base.dev, phys); 234 228 return st; 235 229 } 236 230 ··· 294 272 295 273 sg_free_table(pages); 296 274 kfree(pages); 275 + 276 + drm_pci_free(obj->base.dev, obj->phys_handle); 297 277 } 298 278 299 279 static void 300 280 i915_gem_object_release_phys(struct drm_i915_gem_object *obj) 301 281 { 302 - drm_pci_free(obj->base.dev, obj->phys_handle); 303 282 i915_gem_object_unpin_pages(obj); 304 283 } 305 284 ··· 561 538 i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, 562 539 int align) 563 540 { 564 - drm_dma_handle_t *phys; 565 541 int ret; 566 542 567 - if (obj->phys_handle) { 568 - if ((unsigned long)obj->phys_handle->vaddr & (align -1)) 569 - return -EBUSY; 543 + if (align > obj->base.size) 544 + return -EINVAL; 570 545 546 + if (obj->ops == &i915_gem_phys_ops) 571 547 return 0; 572 - } 573 548 574 549 if (obj->mm.madv != I915_MADV_WILLNEED) 575 550 return -EFAULT; ··· 583 562 if (obj->mm.pages) 584 563 return -EBUSY; 585 564 586 - /* create a new object */ 587 - phys = drm_pci_alloc(obj->base.dev, obj->base.size, align); 588 - if (!phys) 589 - return -ENOMEM; 590 - 591 - obj->phys_handle = phys; 592 565 obj->ops = &i915_gem_phys_ops; 593 566 594 567 return i915_gem_object_pin_pages(obj); ··· 2341 2326 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj) 2342 2327 { 2343 2328 struct drm_i915_private *dev_priv = to_i915(obj->base.dev); 2344 - int page_count, i; 2329 + const unsigned long page_count = obj->base.size / PAGE_SIZE; 2330 + unsigned long i; 2345 2331 struct address_space *mapping; 2346 2332 struct sg_table *st; 2347 2333 struct scatterlist *sg; ··· 2368 2352 if (st == NULL) 2369 2353 return ERR_PTR(-ENOMEM); 2370 2354 2371 - page_count = obj->base.size / PAGE_SIZE; 2355 + rebuild_st: 2372 2356 if (sg_alloc_table(st, page_count, GFP_KERNEL)) { 2373 2357 kfree(st); 2374 2358 return ERR_PTR(-ENOMEM); ··· 2427 2411 i915_sg_trim(st); 2428 2412 2429 2413 ret = i915_gem_gtt_prepare_pages(obj, st); 2430 - if (ret) 2431 - goto err_pages; 2414 + if (ret) { 2415 + /* DMA remapping failed? One possible cause is that 2416 + * it could not reserve enough large entries, asking 2417 + * for PAGE_SIZE chunks instead may be helpful. 2418 + */ 2419 + if (max_segment > PAGE_SIZE) { 2420 + for_each_sgt_page(page, sgt_iter, st) 2421 + put_page(page); 2422 + sg_free_table(st); 2423 + 2424 + max_segment = PAGE_SIZE; 2425 + goto rebuild_st; 2426 + } else { 2427 + dev_warn(&dev_priv->drm.pdev->dev, 2428 + "Failed to DMA remap %lu pages\n", 2429 + page_count); 2430 + goto err_pages; 2431 + } 2432 + } 2432 2433 2433 2434 if (i915_gem_object_needs_bit17_swizzle(obj)) 2434 2435 i915_gem_object_do_bit_17_swizzle(obj, st);
+2 -3
drivers/gpu/drm/i915/i915_gem_stolen.c
··· 55 55 return -ENODEV; 56 56 57 57 /* See the comment at the drm_mm_init() call for more about this check. 58 - * WaSkipStolenMemoryFirstPage:bdw,chv,kbl (incomplete) 58 + * WaSkipStolenMemoryFirstPage:bdw+ (incomplete) 59 59 */ 60 - if (start < 4096 && (IS_GEN8(dev_priv) || 61 - IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0))) 60 + if (start < 4096 && INTEL_GEN(dev_priv) >= 8) 62 61 start = 4096; 63 62 64 63 mutex_lock(&dev_priv->mm.stolen_lock);
+1 -1
drivers/gpu/drm/i915/i915_sysfs.c
··· 460 460 461 461 static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL); 462 462 static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL); 463 - static DEVICE_ATTR(gt_boost_freq_mhz, S_IRUGO, gt_boost_freq_mhz_show, gt_boost_freq_mhz_store); 463 + static DEVICE_ATTR(gt_boost_freq_mhz, S_IRUGO | S_IWUSR, gt_boost_freq_mhz_show, gt_boost_freq_mhz_store); 464 464 static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store); 465 465 static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store); 466 466
+9 -3
drivers/gpu/drm/i915/intel_bios.h
··· 46 46 u16 t11_t12; 47 47 } __packed; 48 48 49 - /* MIPI Sequence Block definitions */ 49 + /* 50 + * MIPI Sequence Block definitions 51 + * 52 + * Note the VBT spec has AssertReset / DeassertReset swapped from their 53 + * usual naming, we use the proper names here to avoid confusion when 54 + * reading the code. 55 + */ 50 56 enum mipi_seq { 51 57 MIPI_SEQ_END = 0, 52 - MIPI_SEQ_ASSERT_RESET, 58 + MIPI_SEQ_DEASSERT_RESET, /* Spec says MipiAssertResetPin */ 53 59 MIPI_SEQ_INIT_OTP, 54 60 MIPI_SEQ_DISPLAY_ON, 55 61 MIPI_SEQ_DISPLAY_OFF, 56 - MIPI_SEQ_DEASSERT_RESET, 62 + MIPI_SEQ_ASSERT_RESET, /* Spec says MipiDeassertResetPin */ 57 63 MIPI_SEQ_BACKLIGHT_ON, /* sequence block v2+ */ 58 64 MIPI_SEQ_BACKLIGHT_OFF, /* sequence block v2+ */ 59 65 MIPI_SEQ_TEAR_ON, /* sequence block v2+ */
+10 -21
drivers/gpu/drm/i915/intel_display.c
··· 6244 6244 dev_priv->cdclk_pll.vco = 0; 6245 6245 } 6246 6246 6247 - static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv) 6248 - { 6249 - int ret; 6250 - u32 val; 6251 - 6252 - /* inform PCU we want to change CDCLK */ 6253 - val = SKL_CDCLK_PREPARE_FOR_CHANGE; 6254 - mutex_lock(&dev_priv->rps.hw_lock); 6255 - ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val); 6256 - mutex_unlock(&dev_priv->rps.hw_lock); 6257 - 6258 - return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE); 6259 - } 6260 - 6261 - static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv) 6262 - { 6263 - return _wait_for(skl_cdclk_pcu_ready(dev_priv), 3000, 10) == 0; 6264 - } 6265 - 6266 6247 static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco) 6267 6248 { 6268 6249 u32 freq_select, pcu_ack; 6250 + int ret; 6269 6251 6270 6252 WARN_ON((cdclk == 24000) != (vco == 0)); 6271 6253 6272 6254 DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco); 6273 6255 6274 - if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) { 6275 - DRM_ERROR("failed to inform PCU about cdclk change\n"); 6256 + mutex_lock(&dev_priv->rps.hw_lock); 6257 + ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL, 6258 + SKL_CDCLK_PREPARE_FOR_CHANGE, 6259 + SKL_CDCLK_READY_FOR_CHANGE, 6260 + SKL_CDCLK_READY_FOR_CHANGE, 3); 6261 + mutex_unlock(&dev_priv->rps.hw_lock); 6262 + if (ret) { 6263 + DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n", 6264 + ret); 6276 6265 return; 6277 6266 } 6278 6267
+2 -2
drivers/gpu/drm/i915/intel_dp.c
··· 4014 4014 return; 4015 4015 4016 4016 /* FIXME: we need to synchronize this sort of stuff with hardware 4017 - * readout */ 4018 - if (WARN_ON_ONCE(!intel_dp->lane_count)) 4017 + * readout. Currently fast link training doesn't work on boot-up. */ 4018 + if (!intel_dp->lane_count) 4019 4019 return; 4020 4020 4021 4021 /* if link training is requested we should perform it always */
+4 -3
drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
··· 300 300 mutex_lock(&dev_priv->sb_lock); 301 301 vlv_iosf_sb_write(dev_priv, port, cfg1, 0); 302 302 vlv_iosf_sb_write(dev_priv, port, cfg0, 303 - CHV_GPIO_GPIOCFG_GPO | CHV_GPIO_GPIOTXSTATE(value)); 303 + CHV_GPIO_GPIOEN | CHV_GPIO_GPIOCFG_GPO | 304 + CHV_GPIO_GPIOTXSTATE(value)); 304 305 mutex_unlock(&dev_priv->sb_lock); 305 306 } 306 307 ··· 377 376 */ 378 377 379 378 static const char * const seq_name[] = { 380 - [MIPI_SEQ_ASSERT_RESET] = "MIPI_SEQ_ASSERT_RESET", 379 + [MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET", 381 380 [MIPI_SEQ_INIT_OTP] = "MIPI_SEQ_INIT_OTP", 382 381 [MIPI_SEQ_DISPLAY_ON] = "MIPI_SEQ_DISPLAY_ON", 383 382 [MIPI_SEQ_DISPLAY_OFF] = "MIPI_SEQ_DISPLAY_OFF", 384 - [MIPI_SEQ_DEASSERT_RESET] = "MIPI_SEQ_DEASSERT_RESET", 383 + [MIPI_SEQ_ASSERT_RESET] = "MIPI_SEQ_ASSERT_RESET", 385 384 [MIPI_SEQ_BACKLIGHT_ON] = "MIPI_SEQ_BACKLIGHT_ON", 386 385 [MIPI_SEQ_BACKLIGHT_OFF] = "MIPI_SEQ_BACKLIGHT_OFF", 387 386 [MIPI_SEQ_TEAR_ON] = "MIPI_SEQ_TEAR_ON",
+1 -6
drivers/gpu/drm/i915/intel_lrc.c
··· 1968 1968 ret); 1969 1969 } 1970 1970 1971 - ret = logical_ring_init(engine); 1972 - if (ret) { 1973 - lrc_destroy_wa_ctx_obj(engine); 1974 - } 1975 - 1976 - return ret; 1971 + return logical_ring_init(engine); 1977 1972 } 1978 1973 1979 1974 int logical_xcs_ring_init(struct intel_engine_cs *engine)
+84 -25
drivers/gpu/drm/i915/intel_pm.c
··· 2964 2964 return 0; 2965 2965 } 2966 2966 2967 - static int 2968 - intel_do_sagv_disable(struct drm_i915_private *dev_priv) 2969 - { 2970 - int ret; 2971 - uint32_t temp = GEN9_SAGV_DISABLE; 2972 - 2973 - ret = sandybridge_pcode_read(dev_priv, GEN9_PCODE_SAGV_CONTROL, 2974 - &temp); 2975 - if (ret) 2976 - return ret; 2977 - else 2978 - return temp & GEN9_SAGV_IS_DISABLED; 2979 - } 2980 - 2981 2967 int 2982 2968 intel_disable_sagv(struct drm_i915_private *dev_priv) 2983 2969 { 2984 - int ret, result; 2970 + int ret; 2985 2971 2986 2972 if (!intel_has_sagv(dev_priv)) 2987 2973 return 0; ··· 2979 2993 mutex_lock(&dev_priv->rps.hw_lock); 2980 2994 2981 2995 /* bspec says to keep retrying for at least 1 ms */ 2982 - ret = wait_for(result = intel_do_sagv_disable(dev_priv), 1); 2996 + ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL, 2997 + GEN9_SAGV_DISABLE, 2998 + GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED, 2999 + 1); 2983 3000 mutex_unlock(&dev_priv->rps.hw_lock); 2984 - 2985 - if (ret == -ETIMEDOUT) { 2986 - DRM_ERROR("Request to disable SAGV timed out\n"); 2987 - return -ETIMEDOUT; 2988 - } 2989 3001 2990 3002 /* 2991 3003 * Some skl systems, pre-release machines in particular, 2992 3004 * don't actually have an SAGV. 2993 3005 */ 2994 - if (IS_SKYLAKE(dev_priv) && result == -ENXIO) { 3006 + if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) { 2995 3007 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n"); 2996 3008 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED; 2997 3009 return 0; 2998 - } else if (result < 0) { 2999 - DRM_ERROR("Failed to disable the SAGV\n"); 3000 - return result; 3010 + } else if (ret < 0) { 3011 + DRM_ERROR("Failed to disable the SAGV (%d)\n", ret); 3012 + return ret; 3001 3013 } 3002 3014 3003 3015 dev_priv->sagv_status = I915_SAGV_DISABLED; ··· 7872 7888 } 7873 7889 7874 7890 return 0; 7891 + } 7892 + 7893 + static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox, 7894 + u32 request, u32 reply_mask, u32 reply, 7895 + u32 *status) 7896 + { 7897 + u32 val = request; 7898 + 7899 + *status = sandybridge_pcode_read(dev_priv, mbox, &val); 7900 + 7901 + return *status || ((val & reply_mask) == reply); 7902 + } 7903 + 7904 + /** 7905 + * skl_pcode_request - send PCODE request until acknowledgment 7906 + * @dev_priv: device private 7907 + * @mbox: PCODE mailbox ID the request is targeted for 7908 + * @request: request ID 7909 + * @reply_mask: mask used to check for request acknowledgment 7910 + * @reply: value used to check for request acknowledgment 7911 + * @timeout_base_ms: timeout for polling with preemption enabled 7912 + * 7913 + * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE 7914 + * reports an error or an overall timeout of @timeout_base_ms+10 ms expires. 7915 + * The request is acknowledged once the PCODE reply dword equals @reply after 7916 + * applying @reply_mask. Polling is first attempted with preemption enabled 7917 + * for @timeout_base_ms and if this times out for another 10 ms with 7918 + * preemption disabled. 7919 + * 7920 + * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some 7921 + * other error as reported by PCODE. 7922 + */ 7923 + int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request, 7924 + u32 reply_mask, u32 reply, int timeout_base_ms) 7925 + { 7926 + u32 status; 7927 + int ret; 7928 + 7929 + WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 7930 + 7931 + #define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \ 7932 + &status) 7933 + 7934 + /* 7935 + * Prime the PCODE by doing a request first. Normally it guarantees 7936 + * that a subsequent request, at most @timeout_base_ms later, succeeds. 7937 + * _wait_for() doesn't guarantee when its passed condition is evaluated 7938 + * first, so send the first request explicitly. 7939 + */ 7940 + if (COND) { 7941 + ret = 0; 7942 + goto out; 7943 + } 7944 + ret = _wait_for(COND, timeout_base_ms * 1000, 10); 7945 + if (!ret) 7946 + goto out; 7947 + 7948 + /* 7949 + * The above can time out if the number of requests was low (2 in the 7950 + * worst case) _and_ PCODE was busy for some reason even after a 7951 + * (queued) request and @timeout_base_ms delay. As a workaround retry 7952 + * the poll with preemption disabled to maximize the number of 7953 + * requests. Increase the timeout from @timeout_base_ms to 10ms to 7954 + * account for interrupts that could reduce the number of these 7955 + * requests. 7956 + */ 7957 + DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n"); 7958 + WARN_ON_ONCE(timeout_base_ms > 3); 7959 + preempt_disable(); 7960 + ret = wait_for_atomic(COND, 10); 7961 + preempt_enable(); 7962 + 7963 + out: 7964 + return ret ? ret : status; 7965 + #undef COND 7875 7966 } 7876 7967 7877 7968 static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
+3 -7
drivers/gpu/drm/i915/intel_psr.c
··· 825 825 dev_priv->psr_mmio_base = IS_HASWELL(dev_priv) ? 826 826 HSW_EDP_PSR_BASE : BDW_EDP_PSR_BASE; 827 827 828 - /* Per platform default */ 829 - if (i915.enable_psr == -1) { 830 - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) 831 - i915.enable_psr = 1; 832 - else 833 - i915.enable_psr = 0; 834 - } 828 + /* Per platform default: all disabled. */ 829 + if (i915.enable_psr == -1) 830 + i915.enable_psr = 0; 835 831 836 832 /* Set link_standby x link_off defaults */ 837 833 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
+12 -1
drivers/gpu/drm/i915/intel_runtime_pm.c
··· 1039 1039 1040 1040 static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) 1041 1041 { 1042 - I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); 1042 + u32 val; 1043 + 1044 + /* 1045 + * On driver load, a pipe may be active and driving a DSI display. 1046 + * Preserve DPOUNIT_CLOCK_GATE_DISABLE to avoid the pipe getting stuck 1047 + * (and never recovering) in this case. intel_dsi_post_disable() will 1048 + * clear it when we turn off the display. 1049 + */ 1050 + val = I915_READ(DSPCLK_GATE_D); 1051 + val &= DPOUNIT_CLOCK_GATE_DISABLE; 1052 + val |= VRHUNIT_CLOCK_GATE_DISABLE; 1053 + I915_WRITE(DSPCLK_GATE_D, val); 1043 1054 1044 1055 /* 1045 1056 * Disable trickle feed and enable pnd deadline calculation