Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
"Xmas fixes pull:

core:
one atomic fix, revert the WARN_ON dumb buffers patch.

agp:
fixup Dave J.

nouveau:
fix 3.18 regression for old userspace

tegra fixes:
vblank and iommu fixes

amdkfd:
fix bugs shown by testing with userspace, init apertures once

msm:
hdmi fixes and cleanup

i915:
misc fixes

There is also a link ordering fix that I've asked to be cc'ed to you,
putting iommu before gpu, it fixes an issue with amdkfd when things
are all in the kernel, but I didn't like sending it via my tree
without discussion.

I'll probably be a bit on/off for a few weeks with pulls now, due to
holidays and LCA, so don't be surprised if stuff gets a bit backed up,
and things end up a bit large due to lag"

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: (28 commits)
Revert "drm/gem: Warn on illegal use of the dumb buffer interface v2"
agp: Fix up email address & attributions in AGP MODULE_AUTHOR tags
nouveau: bring back legacy mmap handler
drm/msm/hdmi: rework HDMI IRQ handler
drm/msm/hdmi: enable regulators before clocks to avoid warnings
drm/msm/mdp5: update irqs on crtc<->encoder link change
drm/msm: block incoming update on pending updates
drm/atomic: fix potential null ptr on plane enable
drm/msm: Deletion of unnecessary checks before the function call "release_firmware"
drm/msm: Deletion of unnecessary checks before two function calls
drm/tegra: dc: Select root window for event dispatch
drm/tegra: gem: Use the proper size for GEM objects
drm/tegra: gem: Flush buffer objects upon allocation
drm/tegra: dc: Fix a potential race on page-flip completion
drm/tegra: dc: Consistently use the same pipe
drm/irq: Add drm_crtc_vblank_count()
drm/irq: Add drm_crtc_handle_vblank()
drm/irq: Add drm_crtc_send_vblank_event()
drm/i915: Disable PSMI sleep messages on all rings around context switches
drm/i915: Force the CS stall for invalidate flushes
...

+449 -220
+1 -1
drivers/char/agp/ali-agp.c
··· 417 417 module_init(agp_ali_init); 418 418 module_exit(agp_ali_cleanup); 419 419 420 - MODULE_AUTHOR("Dave Jones <davej@redhat.com>"); 420 + MODULE_AUTHOR("Dave Jones"); 421 421 MODULE_LICENSE("GPL and additional rights"); 422 422
+1 -1
drivers/char/agp/amd64-agp.c
··· 813 813 module_init(agp_amd64_mod_init); 814 814 module_exit(agp_amd64_cleanup); 815 815 816 - MODULE_AUTHOR("Dave Jones <davej@redhat.com>, Andi Kleen"); 816 + MODULE_AUTHOR("Dave Jones, Andi Kleen"); 817 817 module_param(agp_try_unsupported, bool, 0); 818 818 MODULE_LICENSE("GPL");
+1 -1
drivers/char/agp/ati-agp.c
··· 579 579 module_init(agp_ati_init); 580 580 module_exit(agp_ati_cleanup); 581 581 582 - MODULE_AUTHOR("Dave Jones <davej@redhat.com>"); 582 + MODULE_AUTHOR("Dave Jones"); 583 583 MODULE_LICENSE("GPL and additional rights"); 584 584
+1 -1
drivers/char/agp/backend.c
··· 356 356 __setup("agp=", agp_setup); 357 357 #endif 358 358 359 - MODULE_AUTHOR("Dave Jones <davej@redhat.com>"); 359 + MODULE_AUTHOR("Dave Jones, Jeff Hartmann"); 360 360 MODULE_DESCRIPTION("AGP GART driver"); 361 361 MODULE_LICENSE("GPL and additional rights"); 362 362 MODULE_ALIAS_MISCDEV(AGPGART_MINOR);
+1 -1
drivers/char/agp/intel-agp.c
··· 920 920 module_init(agp_intel_init); 921 921 module_exit(agp_intel_cleanup); 922 922 923 - MODULE_AUTHOR("Dave Jones <davej@redhat.com>"); 923 + MODULE_AUTHOR("Dave Jones, Various @Intel"); 924 924 MODULE_LICENSE("GPL and additional rights");
+1 -1
drivers/char/agp/intel-gtt.c
··· 1438 1438 } 1439 1439 EXPORT_SYMBOL(intel_gmch_remove); 1440 1440 1441 - MODULE_AUTHOR("Dave Jones <davej@redhat.com>"); 1441 + MODULE_AUTHOR("Dave Jones, Various @Intel"); 1442 1442 MODULE_LICENSE("GPL and additional rights");
+1 -1
drivers/char/agp/nvidia-agp.c
··· 1 1 /* 2 2 * Nvidia AGPGART routines. 3 3 * Based upon a 2.4 agpgart diff by the folks from NVIDIA, and hacked up 4 - * to work in 2.5 by Dave Jones <davej@redhat.com> 4 + * to work in 2.5 by Dave Jones. 5 5 */ 6 6 7 7 #include <linux/module.h>
+1 -1
drivers/char/agp/via-agp.c
··· 595 595 module_exit(agp_via_cleanup); 596 596 597 597 MODULE_LICENSE("GPL"); 598 - MODULE_AUTHOR("Dave Jones <davej@redhat.com>"); 598 + MODULE_AUTHOR("Dave Jones");
-4
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
··· 121 121 if (IS_ERR(process)) 122 122 return PTR_ERR(process); 123 123 124 - process->is_32bit_user_mode = is_32bit_user_mode; 125 - 126 124 dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n", 127 125 process->pasid, process->is_32bit_user_mode); 128 - 129 - kfd_init_apertures(process); 130 126 131 127 return 0; 132 128 }
+2 -4
drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
··· 299 299 struct kfd_dev *dev; 300 300 struct kfd_process_device *pdd; 301 301 302 - mutex_lock(&process->mutex); 303 - 304 302 /*Iterating over all devices*/ 305 303 while ((dev = kfd_topology_enum_kfd_devices(id)) != NULL && 306 304 id < NUM_OF_SUPPORTED_GPUS) { 307 305 308 306 pdd = kfd_get_process_device_data(dev, process, 1); 307 + if (!pdd) 308 + return -1; 309 309 310 310 /* 311 311 * For 64 bit process aperture will be statically reserved in ··· 347 347 348 348 id++; 349 349 } 350 - 351 - mutex_unlock(&process->mutex); 352 350 353 351 return 0; 354 352 }
+9
drivers/gpu/drm/amd/amdkfd/kfd_process.c
··· 26 26 #include <linux/slab.h> 27 27 #include <linux/amd-iommu.h> 28 28 #include <linux/notifier.h> 29 + #include <linux/compat.h> 30 + 29 31 struct mm_struct; 30 32 31 33 #include "kfd_priv.h" ··· 287 285 if (err != 0) 288 286 goto err_process_pqm_init; 289 287 288 + /* init process apertures*/ 289 + process->is_32bit_user_mode = is_compat_task(); 290 + if (kfd_init_apertures(process) != 0) 291 + goto err_init_apretures; 292 + 290 293 return process; 291 294 295 + err_init_apretures: 296 + pqm_uninit(&process->pqm); 292 297 err_process_pqm_init: 293 298 hash_del_rcu(&process->kfd_processes); 294 299 synchronize_rcu();
+6 -2
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
··· 700 700 dev->node_props.simd_per_cu); 701 701 sysfs_show_32bit_prop(buffer, "max_slots_scratch_cu", 702 702 dev->node_props.max_slots_scratch_cu); 703 - sysfs_show_32bit_prop(buffer, "engine_id", 704 - dev->node_props.engine_id); 705 703 sysfs_show_32bit_prop(buffer, "vendor_id", 706 704 dev->node_props.vendor_id); 707 705 sysfs_show_32bit_prop(buffer, "device_id", ··· 713 715 dev->gpu->kgd)); 714 716 sysfs_show_64bit_prop(buffer, "local_mem_size", 715 717 kfd2kgd->get_vmem_size(dev->gpu->kgd)); 718 + 719 + sysfs_show_32bit_prop(buffer, "fw_version", 720 + kfd2kgd->get_fw_version( 721 + dev->gpu->kgd, 722 + KGD_ENGINE_MEC1)); 723 + 716 724 } 717 725 718 726 ret = sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute",
+15
drivers/gpu/drm/amd/include/kgd_kfd_interface.h
··· 45 45 KGD_POOL_FRAMEBUFFER = 3, 46 46 }; 47 47 48 + enum kgd_engine_type { 49 + KGD_ENGINE_PFP = 1, 50 + KGD_ENGINE_ME, 51 + KGD_ENGINE_CE, 52 + KGD_ENGINE_MEC1, 53 + KGD_ENGINE_MEC2, 54 + KGD_ENGINE_RLC, 55 + KGD_ENGINE_SDMA, 56 + KGD_ENGINE_MAX 57 + }; 58 + 48 59 struct kgd2kfd_shared_resources { 49 60 /* Bit n == 1 means VMID n is available for KFD. */ 50 61 unsigned int compute_vmid_bitmap; ··· 148 137 * 149 138 * @hqd_destroy: Destructs and preempts the queue assigned to that hqd slot. 150 139 * 140 + * @get_fw_version: Returns FW versions from the header 141 + * 151 142 * This structure contains function pointers to services that the kgd driver 152 143 * provides to amdkfd driver. 153 144 * ··· 189 176 int (*hqd_destroy)(struct kgd_dev *kgd, uint32_t reset_type, 190 177 unsigned int timeout, uint32_t pipe_id, 191 178 uint32_t queue_id); 179 + uint16_t (*get_fw_version)(struct kgd_dev *kgd, 180 + enum kgd_engine_type type); 192 181 }; 193 182 194 183 bool kgd2kfd_init(unsigned interface_version,
+1 -1
drivers/gpu/drm/drm_atomic_helper.c
··· 61 61 struct drm_crtc_state *crtc_state; 62 62 63 63 if (plane->state->crtc) { 64 - crtc_state = state->crtc_states[drm_crtc_index(plane->crtc)]; 64 + crtc_state = state->crtc_states[drm_crtc_index(plane->state->crtc)]; 65 65 66 66 if (WARN_ON(!crtc_state)) 67 67 return;
+60
drivers/gpu/drm/drm_irq.c
··· 830 830 * vblank events since the system was booted, including lost events due to 831 831 * modesetting activity. 832 832 * 833 + * This is the legacy version of drm_crtc_vblank_count(). 834 + * 833 835 * Returns: 834 836 * The software vblank counter. 835 837 */ ··· 844 842 return atomic_read(&vblank->count); 845 843 } 846 844 EXPORT_SYMBOL(drm_vblank_count); 845 + 846 + /** 847 + * drm_crtc_vblank_count - retrieve "cooked" vblank counter value 848 + * @crtc: which counter to retrieve 849 + * 850 + * Fetches the "cooked" vblank count value that represents the number of 851 + * vblank events since the system was booted, including lost events due to 852 + * modesetting activity. 853 + * 854 + * This is the native KMS version of drm_vblank_count(). 855 + * 856 + * Returns: 857 + * The software vblank counter. 858 + */ 859 + u32 drm_crtc_vblank_count(struct drm_crtc *crtc) 860 + { 861 + return drm_vblank_count(crtc->dev, drm_crtc_index(crtc)); 862 + } 863 + EXPORT_SYMBOL(drm_crtc_vblank_count); 847 864 848 865 /** 849 866 * drm_vblank_count_and_time - retrieve "cooked" vblank counter value ··· 925 904 * 926 905 * Updates sequence # and timestamp on event, and sends it to userspace. 927 906 * Caller must hold event lock. 907 + * 908 + * This is the legacy version of drm_crtc_send_vblank_event(). 928 909 */ 929 910 void drm_send_vblank_event(struct drm_device *dev, int crtc, 930 911 struct drm_pending_vblank_event *e) ··· 944 921 send_vblank_event(dev, e, seq, &now); 945 922 } 946 923 EXPORT_SYMBOL(drm_send_vblank_event); 924 + 925 + /** 926 + * drm_crtc_send_vblank_event - helper to send vblank event after pageflip 927 + * @crtc: the source CRTC of the vblank event 928 + * @e: the event to send 929 + * 930 + * Updates sequence # and timestamp on event, and sends it to userspace. 931 + * Caller must hold event lock. 932 + * 933 + * This is the native KMS version of drm_send_vblank_event(). 934 + */ 935 + void drm_crtc_send_vblank_event(struct drm_crtc *crtc, 936 + struct drm_pending_vblank_event *e) 937 + { 938 + drm_send_vblank_event(crtc->dev, drm_crtc_index(crtc), e); 939 + } 940 + EXPORT_SYMBOL(drm_crtc_send_vblank_event); 947 941 948 942 /** 949 943 * drm_vblank_enable - enable the vblank interrupt on a CRTC ··· 1634 1594 * 1635 1595 * Drivers should call this routine in their vblank interrupt handlers to 1636 1596 * update the vblank counter and send any signals that may be pending. 1597 + * 1598 + * This is the legacy version of drm_crtc_handle_vblank(). 1637 1599 */ 1638 1600 bool drm_handle_vblank(struct drm_device *dev, int crtc) 1639 1601 { ··· 1712 1670 return true; 1713 1671 } 1714 1672 EXPORT_SYMBOL(drm_handle_vblank); 1673 + 1674 + /** 1675 + * drm_crtc_handle_vblank - handle a vblank event 1676 + * @crtc: where this event occurred 1677 + * 1678 + * Drivers should call this routine in their vblank interrupt handlers to 1679 + * update the vblank counter and send any signals that may be pending. 1680 + * 1681 + * This is the native KMS version of drm_handle_vblank(). 1682 + * 1683 + * Returns: 1684 + * True if the event was successfully handled, false on failure. 1685 + */ 1686 + bool drm_crtc_handle_vblank(struct drm_crtc *crtc) 1687 + { 1688 + return drm_handle_vblank(crtc->dev, drm_crtc_index(crtc)); 1689 + } 1690 + EXPORT_SYMBOL(drm_crtc_handle_vblank);
+4 -2
drivers/gpu/drm/i915/i915_drv.c
··· 811 811 if (!i915.reset) 812 812 return 0; 813 813 814 + intel_reset_gt_powersave(dev); 815 + 814 816 mutex_lock(&dev->struct_mutex); 815 817 816 818 i915_gem_reset(dev); ··· 882 880 * of re-init after reset. 883 881 */ 884 882 if (INTEL_INFO(dev)->gen > 5) 885 - intel_reset_gt_powersave(dev); 883 + intel_enable_gt_powersave(dev); 886 884 } else { 887 885 mutex_unlock(&dev->struct_mutex); 888 886 } ··· 1586 1584 .gem_prime_import = i915_gem_prime_import, 1587 1585 1588 1586 .dumb_create = i915_gem_dumb_create, 1589 - .dumb_map_offset = i915_gem_dumb_map_offset, 1587 + .dumb_map_offset = i915_gem_mmap_gtt, 1590 1588 .dumb_destroy = drm_gem_dumb_destroy, 1591 1589 .ioctls = i915_ioctls, 1592 1590 .fops = &i915_driver_fops,
+2 -3
drivers/gpu/drm/i915/i915_drv.h
··· 2501 2501 int i915_gem_dumb_create(struct drm_file *file_priv, 2502 2502 struct drm_device *dev, 2503 2503 struct drm_mode_create_dumb *args); 2504 - int i915_gem_dumb_map_offset(struct drm_file *file_priv, 2505 - struct drm_device *dev, uint32_t handle, 2506 - uint64_t *offset); 2504 + int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev, 2505 + uint32_t handle, uint64_t *offset); 2507 2506 /** 2508 2507 * Returns true if seq1 is later than seq2. 2509 2508 */
+5 -23
drivers/gpu/drm/i915/i915_gem.c
··· 401 401 i915_gem_create(struct drm_file *file, 402 402 struct drm_device *dev, 403 403 uint64_t size, 404 - bool dumb, 405 404 uint32_t *handle_p) 406 405 { 407 406 struct drm_i915_gem_object *obj; ··· 416 417 if (obj == NULL) 417 418 return -ENOMEM; 418 419 419 - obj->base.dumb = dumb; 420 420 ret = drm_gem_handle_create(file, &obj->base, &handle); 421 421 /* drop reference from allocate - handle holds it now */ 422 422 drm_gem_object_unreference_unlocked(&obj->base); ··· 435 437 args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64); 436 438 args->size = args->pitch * args->height; 437 439 return i915_gem_create(file, dev, 438 - args->size, true, &args->handle); 440 + args->size, &args->handle); 439 441 } 440 442 441 443 /** ··· 448 450 struct drm_i915_gem_create *args = data; 449 451 450 452 return i915_gem_create(file, dev, 451 - args->size, false, &args->handle); 453 + args->size, &args->handle); 452 454 } 453 455 454 456 static inline int ··· 1838 1840 drm_gem_free_mmap_offset(&obj->base); 1839 1841 } 1840 1842 1841 - static int 1843 + int 1842 1844 i915_gem_mmap_gtt(struct drm_file *file, 1843 1845 struct drm_device *dev, 1844 - uint32_t handle, bool dumb, 1846 + uint32_t handle, 1845 1847 uint64_t *offset) 1846 1848 { 1847 1849 struct drm_i915_private *dev_priv = dev->dev_private; ··· 1857 1859 ret = -ENOENT; 1858 1860 goto unlock; 1859 1861 } 1860 - 1861 - /* 1862 - * We don't allow dumb mmaps on objects created using another 1863 - * interface. 1864 - */ 1865 - WARN_ONCE(dumb && !(obj->base.dumb || obj->base.import_attach), 1866 - "Illegal dumb map of accelerated buffer.\n"); 1867 1862 1868 1863 if (obj->base.size > dev_priv->gtt.mappable_end) { 1869 1864 ret = -E2BIG; ··· 1882 1891 return ret; 1883 1892 } 1884 1893 1885 - int 1886 - i915_gem_dumb_map_offset(struct drm_file *file, 1887 - struct drm_device *dev, 1888 - uint32_t handle, 1889 - uint64_t *offset) 1890 - { 1891 - return i915_gem_mmap_gtt(file, dev, handle, true, offset); 1892 - } 1893 - 1894 1894 /** 1895 1895 * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing 1896 1896 * @dev: DRM device ··· 1903 1921 { 1904 1922 struct drm_i915_gem_mmap_gtt *args = data; 1905 1923 1906 - return i915_gem_mmap_gtt(file, dev, args->handle, false, &args->offset); 1924 + return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset); 1907 1925 } 1908 1926 1909 1927 static inline int
+40 -8
drivers/gpu/drm/i915/i915_gem_context.c
··· 473 473 u32 hw_flags) 474 474 { 475 475 u32 flags = hw_flags | MI_MM_SPACE_GTT; 476 - int ret; 476 + const int num_rings = 477 + /* Use an extended w/a on ivb+ if signalling from other rings */ 478 + i915_semaphore_is_enabled(ring->dev) ? 479 + hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 : 480 + 0; 481 + int len, i, ret; 477 482 478 483 /* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB 479 484 * invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value ··· 495 490 if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8) 496 491 flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN); 497 492 498 - ret = intel_ring_begin(ring, 6); 493 + 494 + len = 4; 495 + if (INTEL_INFO(ring->dev)->gen >= 7) 496 + len += 2 + (num_rings ? 4*num_rings + 2 : 0); 497 + 498 + ret = intel_ring_begin(ring, len); 499 499 if (ret) 500 500 return ret; 501 501 502 502 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */ 503 - if (INTEL_INFO(ring->dev)->gen >= 7) 503 + if (INTEL_INFO(ring->dev)->gen >= 7) { 504 504 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE); 505 - else 506 - intel_ring_emit(ring, MI_NOOP); 505 + if (num_rings) { 506 + struct intel_engine_cs *signaller; 507 + 508 + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings)); 509 + for_each_ring(signaller, to_i915(ring->dev), i) { 510 + if (signaller == ring) 511 + continue; 512 + 513 + intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base)); 514 + intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); 515 + } 516 + } 517 + } 507 518 508 519 intel_ring_emit(ring, MI_NOOP); 509 520 intel_ring_emit(ring, MI_SET_CONTEXT); ··· 531 510 */ 532 511 intel_ring_emit(ring, MI_NOOP); 533 512 534 - if (INTEL_INFO(ring->dev)->gen >= 7) 513 + if (INTEL_INFO(ring->dev)->gen >= 7) { 514 + if (num_rings) { 515 + struct intel_engine_cs *signaller; 516 + 517 + intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings)); 518 + for_each_ring(signaller, to_i915(ring->dev), i) { 519 + if (signaller == ring) 520 + continue; 521 + 522 + intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base)); 523 + intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); 524 + } 525 + } 535 526 intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE); 536 - else 537 - intel_ring_emit(ring, MI_NOOP); 527 + } 538 528 539 529 intel_ring_advance(ring); 540 530
-3
drivers/gpu/drm/i915/i915_gem_execbuffer.c
··· 121 121 goto err; 122 122 } 123 123 124 - WARN_ONCE(obj->base.dumb, 125 - "GPU use of dumb buffer is illegal.\n"); 126 - 127 124 drm_gem_object_reference(&obj->base); 128 125 list_add_tail(&obj->obj_exec_link, &objects); 129 126 }
+14 -4
drivers/gpu/drm/i915/i915_irq.c
··· 281 281 struct drm_i915_private *dev_priv = dev->dev_private; 282 282 283 283 spin_lock_irq(&dev_priv->irq_lock); 284 + 284 285 WARN_ON(dev_priv->rps.pm_iir); 285 286 WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events); 286 287 dev_priv->rps.interrupts_enabled = true; 288 + I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) | 289 + dev_priv->pm_rps_events); 287 290 gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events); 291 + 288 292 spin_unlock_irq(&dev_priv->irq_lock); 289 293 } 290 294 ··· 3311 3307 GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs); 3312 3308 3313 3309 if (INTEL_INFO(dev)->gen >= 6) { 3314 - pm_irqs |= dev_priv->pm_rps_events; 3315 - 3310 + /* 3311 + * RPS interrupts will get enabled/disabled on demand when RPS 3312 + * itself is enabled/disabled. 3313 + */ 3316 3314 if (HAS_VEBOX(dev)) 3317 3315 pm_irqs |= PM_VEBOX_USER_INTERRUPT; 3318 3316 ··· 3526 3520 dev_priv->pm_irq_mask = 0xffffffff; 3527 3521 GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]); 3528 3522 GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]); 3529 - GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, dev_priv->pm_rps_events); 3523 + /* 3524 + * RPS interrupts will get enabled/disabled on demand when RPS itself 3525 + * is enabled/disabled. 3526 + */ 3527 + GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0); 3530 3528 GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]); 3531 3529 } 3532 3530 ··· 3619 3609 3620 3610 vlv_display_irq_reset(dev_priv); 3621 3611 3622 - dev_priv->irq_mask = 0; 3612 + dev_priv->irq_mask = ~0; 3623 3613 } 3624 3614 3625 3615 static void valleyview_irq_uninstall(struct drm_device *dev)
+3
drivers/gpu/drm/i915/i915_reg.h
··· 395 395 #define PIPE_CONTROL_STORE_DATA_INDEX (1<<21) 396 396 #define PIPE_CONTROL_CS_STALL (1<<20) 397 397 #define PIPE_CONTROL_TLB_INVALIDATE (1<<18) 398 + #define PIPE_CONTROL_MEDIA_STATE_CLEAR (1<<16) 398 399 #define PIPE_CONTROL_QW_WRITE (1<<14) 399 400 #define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14) 400 401 #define PIPE_CONTROL_DEPTH_STALL (1<<13) ··· 1129 1128 #define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE)) 1130 1129 #define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE)) 1131 1130 #define GEN6_NOSYNC 0 1131 + #define RING_PSMI_CTL(base) ((base)+0x50) 1132 1132 #define RING_MAX_IDLE(base) ((base)+0x54) 1133 1133 #define RING_HWS_PGA(base) ((base)+0x80) 1134 1134 #define RING_HWS_PGA_GEN6(base) ((base)+0x2080) ··· 1460 1458 #define GEN6_BLITTER_FBC_NOTIFY (1<<3) 1461 1459 1462 1460 #define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050 1461 + #define GEN6_PSMI_SLEEP_MSG_DISABLE (1 << 0) 1463 1462 #define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12) 1464 1463 #define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10) 1465 1464
+19 -9
drivers/gpu/drm/i915/intel_pm.c
··· 6191 6191 valleyview_cleanup_gt_powersave(dev); 6192 6192 } 6193 6193 6194 + static void gen6_suspend_rps(struct drm_device *dev) 6195 + { 6196 + struct drm_i915_private *dev_priv = dev->dev_private; 6197 + 6198 + flush_delayed_work(&dev_priv->rps.delayed_resume_work); 6199 + 6200 + /* 6201 + * TODO: disable RPS interrupts on GEN9+ too once RPS support 6202 + * is added for it. 6203 + */ 6204 + if (INTEL_INFO(dev)->gen < 9) 6205 + gen6_disable_rps_interrupts(dev); 6206 + } 6207 + 6194 6208 /** 6195 6209 * intel_suspend_gt_powersave - suspend PM work and helper threads 6196 6210 * @dev: drm device ··· 6220 6206 if (INTEL_INFO(dev)->gen < 6) 6221 6207 return; 6222 6208 6223 - flush_delayed_work(&dev_priv->rps.delayed_resume_work); 6224 - 6225 - /* 6226 - * TODO: disable RPS interrupts on GEN9+ too once RPS support 6227 - * is added for it. 6228 - */ 6229 - if (INTEL_INFO(dev)->gen < 9) 6230 - gen6_disable_rps_interrupts(dev); 6209 + gen6_suspend_rps(dev); 6231 6210 6232 6211 /* Force GPU to min freq during suspend */ 6233 6212 gen6_rps_idle(dev_priv); ··· 6323 6316 { 6324 6317 struct drm_i915_private *dev_priv = dev->dev_private; 6325 6318 6319 + if (INTEL_INFO(dev)->gen < 6) 6320 + return; 6321 + 6322 + gen6_suspend_rps(dev); 6326 6323 dev_priv->rps.enabled = false; 6327 - intel_enable_gt_powersave(dev); 6328 6324 } 6329 6325 6330 6326 static void ibx_init_clock_gating(struct drm_device *dev)
+3
drivers/gpu/drm/i915/intel_ringbuffer.c
··· 362 362 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; 363 363 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; 364 364 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; 365 + flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR; 365 366 /* 366 367 * TLB invalidate requires a post-sync write. 367 368 */ 368 369 flags |= PIPE_CONTROL_QW_WRITE; 369 370 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; 371 + 372 + flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD; 370 373 371 374 /* Workaround: we must issue a pipe_control with CS-stall bit 372 375 * set before a pipe_control command that has the state cache
+2 -4
drivers/gpu/drm/msm/adreno/adreno_gpu.c
··· 386 386 msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id); 387 387 drm_gem_object_unreference(gpu->memptrs_bo); 388 388 } 389 - if (gpu->pm4) 390 - release_firmware(gpu->pm4); 391 - if (gpu->pfp) 392 - release_firmware(gpu->pfp); 389 + release_firmware(gpu->pm4); 390 + release_firmware(gpu->pfp); 393 391 msm_gpu_cleanup(&gpu->base); 394 392 }
+22 -31
drivers/gpu/drm/msm/hdmi/hdmi_connector.c
··· 141 141 uint32_t hpd_ctrl; 142 142 int i, ret; 143 143 144 + for (i = 0; i < config->hpd_reg_cnt; i++) { 145 + ret = regulator_enable(hdmi->hpd_regs[i]); 146 + if (ret) { 147 + dev_err(dev->dev, "failed to enable hpd regulator: %s (%d)\n", 148 + config->hpd_reg_names[i], ret); 149 + goto fail; 150 + } 151 + } 152 + 144 153 ret = gpio_config(hdmi, true); 145 154 if (ret) { 146 155 dev_err(dev->dev, "failed to configure GPIOs: %d\n", ret); ··· 169 160 if (ret) { 170 161 dev_err(dev->dev, "failed to enable hpd clk: %s (%d)\n", 171 162 config->hpd_clk_names[i], ret); 172 - goto fail; 173 - } 174 - } 175 - 176 - for (i = 0; i < config->hpd_reg_cnt; i++) { 177 - ret = regulator_enable(hdmi->hpd_regs[i]); 178 - if (ret) { 179 - dev_err(dev->dev, "failed to enable hpd regulator: %s (%d)\n", 180 - config->hpd_reg_names[i], ret); 181 163 goto fail; 182 164 } 183 165 } ··· 200 200 return ret; 201 201 } 202 202 203 - static int hdp_disable(struct hdmi_connector *hdmi_connector) 203 + static void hdp_disable(struct hdmi_connector *hdmi_connector) 204 204 { 205 205 struct hdmi *hdmi = hdmi_connector->hdmi; 206 206 const struct hdmi_platform_config *config = hdmi->config; ··· 212 212 213 213 hdmi_set_mode(hdmi, false); 214 214 215 - for (i = 0; i < config->hpd_reg_cnt; i++) { 216 - ret = regulator_disable(hdmi->hpd_regs[i]); 217 - if (ret) { 218 - dev_err(dev->dev, "failed to disable hpd regulator: %s (%d)\n", 219 - config->hpd_reg_names[i], ret); 220 - goto fail; 221 - } 222 - } 223 - 224 215 for (i = 0; i < config->hpd_clk_cnt; i++) 225 216 clk_disable_unprepare(hdmi->hpd_clks[i]); 226 217 227 218 ret = gpio_config(hdmi, false); 228 - if (ret) { 229 - dev_err(dev->dev, "failed to unconfigure GPIOs: %d\n", ret); 230 - goto fail; 219 + if (ret) 220 + dev_warn(dev->dev, "failed to unconfigure GPIOs: %d\n", ret); 221 + 222 + for (i = 0; i < config->hpd_reg_cnt; i++) { 223 + ret = regulator_disable(hdmi->hpd_regs[i]); 224 + if (ret) 225 + dev_warn(dev->dev, "failed to disable hpd regulator: %s (%d)\n", 226 + config->hpd_reg_names[i], ret); 231 227 } 232 - 233 - return 0; 234 - 235 - fail: 236 - return ret; 237 228 } 238 229 239 230 static void ··· 251 260 (hpd_int_status & HDMI_HPD_INT_STATUS_INT)) { 252 261 bool detected = !!(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED); 253 262 254 - DBG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl); 255 - 256 - /* ack the irq: */ 263 + /* ack & disable (temporarily) HPD events: */ 257 264 hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 258 - hpd_int_ctrl | HDMI_HPD_INT_CTRL_INT_ACK); 265 + HDMI_HPD_INT_CTRL_INT_ACK); 266 + 267 + DBG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl); 259 268 260 269 /* detect disconnect if we are connected or visa versa: */ 261 270 hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN;
+1 -10
drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
··· 331 331 struct drm_crtc_state *state) 332 332 { 333 333 struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); 334 - struct drm_device *dev = crtc->dev; 335 - 336 334 DBG("%s: check", mdp4_crtc->name); 337 - 338 - if (mdp4_crtc->event) { 339 - dev_err(dev->dev, "already pending flip!\n"); 340 - return -EBUSY; 341 - } 342 - 343 335 // TODO anything else to check? 344 - 345 336 return 0; 346 337 } 347 338 ··· 348 357 struct drm_device *dev = crtc->dev; 349 358 unsigned long flags; 350 359 351 - DBG("%s: flush", mdp4_crtc->name); 360 + DBG("%s: event: %p", mdp4_crtc->name, crtc->state->event); 352 361 353 362 WARN_ON(mdp4_crtc->event); 354 363
+2 -10
drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
··· 303 303 304 304 DBG("%s: check", mdp5_crtc->name); 305 305 306 - if (mdp5_crtc->event) { 307 - dev_err(dev->dev, "already pending flip!\n"); 308 - return -EBUSY; 309 - } 310 - 311 306 /* request a free CTL, if none is already allocated for this CRTC */ 312 307 if (state->enable && !mdp5_crtc->ctl) { 313 308 mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc); ··· 359 364 struct drm_device *dev = crtc->dev; 360 365 unsigned long flags; 361 366 362 - DBG("%s: flush", mdp5_crtc->name); 367 + DBG("%s: event: %p", mdp5_crtc->name, crtc->state->event); 363 368 364 369 WARN_ON(mdp5_crtc->event); 365 370 ··· 455 460 /* now that we know what irq's we want: */ 456 461 mdp5_crtc->err.irqmask = intf2err(intf); 457 462 mdp5_crtc->vblank.irqmask = intf2vblank(intf); 458 - 459 - /* when called from modeset_init(), skip the rest until later: */ 460 - if (!mdp5_kms) 461 - return; 463 + mdp_irq_update(&mdp5_kms->base); 462 464 463 465 spin_lock_irqsave(&mdp5_kms->resource_lock, flags); 464 466 intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
+1 -11
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
··· 216 216 goto fail; 217 217 } 218 218 219 - /* NOTE: the vsync and error irq's are actually associated with 220 - * the INTF/encoder.. the easiest way to deal with this (ie. what 221 - * we do now) is assume a fixed relationship between crtc's and 222 - * encoders. I'm not sure if there is ever a need to more freely 223 - * assign crtcs to encoders, but if there is then we need to take 224 - * care of error and vblank irq's that the crtc has registered, 225 - * and also update user-requested vblank_mask. 226 - */ 227 - encoder->possible_crtcs = BIT(0); 228 - mdp5_crtc_set_intf(priv->crtcs[0], 3, INTF_HDMI); 229 - 219 + encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;; 230 220 priv->encoders[priv->num_encoders++] = encoder; 231 221 232 222 /* Construct bridge/connector for HDMI: */
+6 -3
drivers/gpu/drm/msm/mdp/mdp_kms.c
··· 42 42 mdp_kms->funcs->set_irqmask(mdp_kms, irqmask); 43 43 } 44 44 45 - static void update_irq_unlocked(struct mdp_kms *mdp_kms) 45 + /* if an mdp_irq's irqmask has changed, such as when mdp5 crtc<->encoder 46 + * link changes, this must be called to figure out the new global irqmask 47 + */ 48 + void mdp_irq_update(struct mdp_kms *mdp_kms) 46 49 { 47 50 unsigned long flags; 48 51 spin_lock_irqsave(&list_lock, flags); ··· 125 122 spin_unlock_irqrestore(&list_lock, flags); 126 123 127 124 if (needs_update) 128 - update_irq_unlocked(mdp_kms); 125 + mdp_irq_update(mdp_kms); 129 126 } 130 127 131 128 void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq) ··· 144 141 spin_unlock_irqrestore(&list_lock, flags); 145 142 146 143 if (needs_update) 147 - update_irq_unlocked(mdp_kms); 144 + mdp_irq_update(mdp_kms); 148 145 }
+1 -1
drivers/gpu/drm/msm/mdp/mdp_kms.h
··· 75 75 void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask); 76 76 void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq); 77 77 void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq); 78 - 78 + void mdp_irq_update(struct mdp_kms *mdp_kms); 79 79 80 80 /* 81 81 * pixel format helpers:
+68 -1
drivers/gpu/drm/msm/msm_atomic.c
··· 23 23 struct drm_atomic_state *state; 24 24 uint32_t fence; 25 25 struct msm_fence_cb fence_cb; 26 + uint32_t crtc_mask; 26 27 }; 27 28 28 29 static void fence_cb(struct msm_fence_cb *cb); 30 + 31 + /* block until specified crtcs are no longer pending update, and 32 + * atomically mark them as pending update 33 + */ 34 + static int start_atomic(struct msm_drm_private *priv, uint32_t crtc_mask) 35 + { 36 + int ret; 37 + 38 + spin_lock(&priv->pending_crtcs_event.lock); 39 + ret = wait_event_interruptible_locked(priv->pending_crtcs_event, 40 + !(priv->pending_crtcs & crtc_mask)); 41 + if (ret == 0) { 42 + DBG("start: %08x", crtc_mask); 43 + priv->pending_crtcs |= crtc_mask; 44 + } 45 + spin_unlock(&priv->pending_crtcs_event.lock); 46 + 47 + return ret; 48 + } 49 + 50 + /* clear specified crtcs (no longer pending update) 51 + */ 52 + static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask) 53 + { 54 + spin_lock(&priv->pending_crtcs_event.lock); 55 + DBG("end: %08x", crtc_mask); 56 + priv->pending_crtcs &= ~crtc_mask; 57 + wake_up_all_locked(&priv->pending_crtcs_event); 58 + spin_unlock(&priv->pending_crtcs_event.lock); 59 + } 29 60 30 61 static struct msm_commit *new_commit(struct drm_atomic_state *state) 31 62 { ··· 89 58 90 59 drm_atomic_helper_commit_post_planes(dev, state); 91 60 61 + /* NOTE: _wait_for_vblanks() only waits for vblank on 62 + * enabled CRTCs. So we end up faulting when disabling 63 + * due to (potentially) unref'ing the outgoing fb's 64 + * before the vblank when the disable has latched. 65 + * 66 + * But if it did wait on disabled (or newly disabled) 67 + * CRTCs, that would be racy (ie. we could have missed 68 + * the irq. We need some way to poll for pipe shut 69 + * down. Or just live with occasionally hitting the 70 + * timeout in the CRTC disable path (which really should 71 + * not be critical path) 72 + */ 73 + 92 74 drm_atomic_helper_wait_for_vblanks(dev, state); 93 75 94 76 drm_atomic_helper_cleanup_planes(dev, state); 95 77 96 78 drm_atomic_state_free(state); 79 + 80 + end_atomic(dev->dev_private, c->crtc_mask); 97 81 98 82 kfree(c); 99 83 } ··· 143 97 int msm_atomic_commit(struct drm_device *dev, 144 98 struct drm_atomic_state *state, bool async) 145 99 { 146 - struct msm_commit *c; 147 100 int nplanes = dev->mode_config.num_total_plane; 101 + int ncrtcs = dev->mode_config.num_crtc; 102 + struct msm_commit *c; 148 103 int i, ret; 149 104 150 105 ret = drm_atomic_helper_prepare_planes(dev, state); ··· 153 106 return ret; 154 107 155 108 c = new_commit(state); 109 + if (!c) 110 + return -ENOMEM; 111 + 112 + /* 113 + * Figure out what crtcs we have: 114 + */ 115 + for (i = 0; i < ncrtcs; i++) { 116 + struct drm_crtc *crtc = state->crtcs[i]; 117 + if (!crtc) 118 + continue; 119 + c->crtc_mask |= (1 << drm_crtc_index(crtc)); 120 + } 156 121 157 122 /* 158 123 * Figure out what fence to wait for: ··· 179 120 if ((plane->state->fb != new_state->fb) && new_state->fb) 180 121 add_fb(c, new_state->fb); 181 122 } 123 + 124 + /* 125 + * Wait for pending updates on any of the same crtc's and then 126 + * mark our set of crtc's as busy: 127 + */ 128 + ret = start_atomic(dev->dev_private, c->crtc_mask); 129 + if (ret) 130 + return ret; 182 131 183 132 /* 184 133 * This is the point of no return - everything below never fails except
+1
drivers/gpu/drm/msm/msm_drv.c
··· 193 193 194 194 priv->wq = alloc_ordered_workqueue("msm", 0); 195 195 init_waitqueue_head(&priv->fence_event); 196 + init_waitqueue_head(&priv->pending_crtcs_event); 196 197 197 198 INIT_LIST_HEAD(&priv->inactive_list); 198 199 INIT_LIST_HEAD(&priv->fence_cbs);
+4
drivers/gpu/drm/msm/msm_drv.h
··· 96 96 /* callbacks deferred until bo is inactive: */ 97 97 struct list_head fence_cbs; 98 98 99 + /* crtcs pending async atomic updates: */ 100 + uint32_t pending_crtcs; 101 + wait_queue_head_t pending_crtcs_event; 102 + 99 103 /* registered MMUs: */ 100 104 unsigned int num_mmus; 101 105 struct msm_mmu *mmus[NUM_DOMAINS];
+1 -2
drivers/gpu/drm/msm/msm_fbdev.c
··· 190 190 fail: 191 191 192 192 if (ret) { 193 - if (fbi) 194 - framebuffer_release(fbi); 193 + framebuffer_release(fbi); 195 194 if (fb) { 196 195 drm_framebuffer_unregister_private(fb); 197 196 drm_framebuffer_remove(fb);
+1 -2
drivers/gpu/drm/msm/msm_gem.c
··· 535 535 drm_free_large(msm_obj->pages); 536 536 537 537 } else { 538 - if (msm_obj->vaddr) 539 - vunmap(msm_obj->vaddr); 538 + vunmap(msm_obj->vaddr); 540 539 put_pages(obj); 541 540 } 542 541
-9
drivers/gpu/drm/nouveau/nouveau_display.c
··· 876 876 if (ret) 877 877 return ret; 878 878 879 - bo->gem.dumb = true; 880 879 ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle); 881 880 drm_gem_object_unreference_unlocked(&bo->gem); 882 881 return ret; ··· 891 892 gem = drm_gem_object_lookup(dev, file_priv, handle); 892 893 if (gem) { 893 894 struct nouveau_bo *bo = nouveau_gem_object(gem); 894 - 895 - /* 896 - * We don't allow dumb mmaps on objects created using another 897 - * interface. 898 - */ 899 - WARN_ONCE(!(gem->dumb || gem->import_attach), 900 - "Illegal dumb map of accelerated buffer.\n"); 901 - 902 895 *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node); 903 896 drm_gem_object_unreference_unlocked(gem); 904 897 return 0;
-3
drivers/gpu/drm/nouveau/nouveau_gem.c
··· 444 444 list_for_each_entry(nvbo, list, entry) { 445 445 struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; 446 446 447 - WARN_ONCE(nvbo->gem.dumb, 448 - "GPU use of dumb buffer is illegal.\n"); 449 - 450 447 ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains, 451 448 b->write_domains, 452 449 b->valid_domains);
+2 -1
drivers/gpu/drm/nouveau/nouveau_ttm.c
··· 28 28 #include "nouveau_ttm.h" 29 29 #include "nouveau_gem.h" 30 30 31 + #include "drm_legacy.h" 31 32 static int 32 33 nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize) 33 34 { ··· 282 281 struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev); 283 282 284 283 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) 285 - return -EINVAL; 284 + return drm_legacy_mmap(filp, vma); 286 285 287 286 return ttm_bo_mmap(filp, vma, &drm->ttm.bdev); 288 287 }
+4 -22
drivers/gpu/drm/radeon/radeon_gem.c
··· 394 394 return r; 395 395 } 396 396 397 - static int radeon_mode_mmap(struct drm_file *filp, 398 - struct drm_device *dev, 399 - uint32_t handle, bool dumb, 400 - uint64_t *offset_p) 397 + int radeon_mode_dumb_mmap(struct drm_file *filp, 398 + struct drm_device *dev, 399 + uint32_t handle, uint64_t *offset_p) 401 400 { 402 401 struct drm_gem_object *gobj; 403 402 struct radeon_bo *robj; ··· 405 406 if (gobj == NULL) { 406 407 return -ENOENT; 407 408 } 408 - 409 - /* 410 - * We don't allow dumb mmaps on objects created using another 411 - * interface. 412 - */ 413 - WARN_ONCE(dumb && !(gobj->dumb || gobj->import_attach), 414 - "Illegal dumb map of GPU buffer.\n"); 415 - 416 409 robj = gem_to_radeon_bo(gobj); 417 410 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) { 418 411 drm_gem_object_unreference_unlocked(gobj); ··· 415 424 return 0; 416 425 } 417 426 418 - int radeon_mode_dumb_mmap(struct drm_file *filp, 419 - struct drm_device *dev, 420 - uint32_t handle, uint64_t *offset_p) 421 - { 422 - return radeon_mode_mmap(filp, dev, handle, true, offset_p); 423 - } 424 - 425 427 int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, 426 428 struct drm_file *filp) 427 429 { 428 430 struct drm_radeon_gem_mmap *args = data; 429 431 430 - return radeon_mode_mmap(filp, dev, args->handle, false, 431 - &args->addr_ptr); 432 + return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr); 432 433 } 433 434 434 435 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, ··· 746 763 return -ENOMEM; 747 764 748 765 r = drm_gem_handle_create(file_priv, gobj, &handle); 749 - gobj->dumb = true; 750 766 /* drop reference from allocate - handle holds it now */ 751 767 drm_gem_object_unreference_unlocked(gobj); 752 768 if (r) {
+53
drivers/gpu/drm/radeon/radeon_kfd.c
··· 28 28 #include "cikd.h" 29 29 #include "cik_reg.h" 30 30 #include "radeon_kfd.h" 31 + #include "radeon_ucode.h" 32 + #include <linux/firmware.h> 31 33 32 34 #define CIK_PIPE_PER_MEC (4) 33 35 ··· 51 49 static uint64_t get_gpu_clock_counter(struct kgd_dev *kgd); 52 50 53 51 static uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd); 52 + static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type); 54 53 55 54 /* 56 55 * Register access functions ··· 94 91 .hqd_load = kgd_hqd_load, 95 92 .hqd_is_occupies = kgd_hqd_is_occupies, 96 93 .hqd_destroy = kgd_hqd_destroy, 94 + .get_fw_version = get_fw_version 97 95 }; 98 96 99 97 static const struct kgd2kfd_calls *kgd2kfd; ··· 564 560 565 561 release_queue(kgd); 566 562 return 0; 563 + } 564 + 565 + static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type) 566 + { 567 + struct radeon_device *rdev = (struct radeon_device *) kgd; 568 + const union radeon_firmware_header *hdr; 569 + 570 + BUG_ON(kgd == NULL || rdev->mec_fw == NULL); 571 + 572 + switch (type) { 573 + case KGD_ENGINE_PFP: 574 + hdr = (const union radeon_firmware_header *) rdev->pfp_fw->data; 575 + break; 576 + 577 + case KGD_ENGINE_ME: 578 + hdr = (const union radeon_firmware_header *) rdev->me_fw->data; 579 + break; 580 + 581 + case KGD_ENGINE_CE: 582 + hdr = (const union radeon_firmware_header *) rdev->ce_fw->data; 583 + break; 584 + 585 + case KGD_ENGINE_MEC1: 586 + hdr = (const union radeon_firmware_header *) rdev->mec_fw->data; 587 + break; 588 + 589 + case KGD_ENGINE_MEC2: 590 + hdr = (const union radeon_firmware_header *) 591 + rdev->mec2_fw->data; 592 + break; 593 + 594 + case KGD_ENGINE_RLC: 595 + hdr = (const union radeon_firmware_header *) rdev->rlc_fw->data; 596 + break; 597 + 598 + case KGD_ENGINE_SDMA: 599 + hdr = (const union radeon_firmware_header *) 600 + rdev->sdma_fw->data; 601 + break; 602 + 603 + default: 604 + return 0; 605 + } 606 + 607 + if (hdr == NULL) 608 + return 0; 609 + 610 + /* Only 12 bit in use*/ 611 + return hdr->common.ucode_version; 567 612 }
-3
drivers/gpu/drm/radeon/radeon_object.c
··· 529 529 u32 current_domain = 530 530 radeon_mem_type_to_domain(bo->tbo.mem.mem_type); 531 531 532 - WARN_ONCE(bo->gem_base.dumb, 533 - "GPU use of dumb buffer is illegal.\n"); 534 - 535 532 /* Check if this buffer will be moved and don't move it 536 533 * if we have moved too many buffers for this IB already. 537 534 *
+36 -12
drivers/gpu/drm/tegra/dc.c
··· 168 168 const struct tegra_dc_window *window) 169 169 { 170 170 unsigned h_offset, v_offset, h_size, v_size, h_dda, v_dda, bpp; 171 - unsigned long value; 171 + unsigned long value, flags; 172 172 bool yuv, planar; 173 173 174 174 /* ··· 180 180 bpp = window->bits_per_pixel / 8; 181 181 else 182 182 bpp = planar ? 1 : 2; 183 + 184 + spin_lock_irqsave(&dc->lock, flags); 183 185 184 186 value = WINDOW_A_SELECT << index; 185 187 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER); ··· 275 273 276 274 case TEGRA_BO_TILING_MODE_BLOCK: 277 275 DRM_ERROR("hardware doesn't support block linear mode\n"); 276 + spin_unlock_irqrestore(&dc->lock, flags); 278 277 return -EINVAL; 279 278 } 280 279 ··· 334 331 335 332 tegra_dc_window_commit(dc, index); 336 333 334 + spin_unlock_irqrestore(&dc->lock, flags); 335 + 337 336 return 0; 338 337 } 339 338 ··· 343 338 { 344 339 struct tegra_dc *dc = to_tegra_dc(plane->crtc); 345 340 struct tegra_plane *p = to_tegra_plane(plane); 341 + unsigned long flags; 346 342 u32 value; 347 343 348 344 if (!plane->crtc) 349 345 return 0; 346 + 347 + spin_lock_irqsave(&dc->lock, flags); 350 348 351 349 value = WINDOW_A_SELECT << p->index; 352 350 tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER); ··· 359 351 tegra_dc_writel(dc, value, DC_WIN_WIN_OPTIONS); 360 352 361 353 tegra_dc_window_commit(dc, p->index); 354 + 355 + spin_unlock_irqrestore(&dc->lock, flags); 362 356 363 357 return 0; 364 358 } ··· 709 699 struct tegra_bo *bo = tegra_fb_get_plane(fb, 0); 710 700 unsigned int h_offset = 0, v_offset = 0; 711 701 struct tegra_bo_tiling tiling; 702 + unsigned long value, flags; 712 703 unsigned int format, swap; 713 - unsigned long value; 714 704 int err; 715 705 716 706 err = tegra_fb_get_tiling(fb, &tiling); 717 707 if (err < 0) 718 708 return err; 709 + 710 + spin_lock_irqsave(&dc->lock, flags); 719 711 720 712 tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER); 721 713 ··· 764 752 765 753 case TEGRA_BO_TILING_MODE_BLOCK: 766 754 DRM_ERROR("hardware doesn't support block linear mode\n"); 755 + spin_unlock_irqrestore(&dc->lock, flags); 767 756 return -EINVAL; 768 757 } 769 758 ··· 790 777 value = GENERAL_ACT_REQ | WIN_A_ACT_REQ; 791 778 tegra_dc_writel(dc, value << 8, DC_CMD_STATE_CONTROL); 792 779 tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL); 780 + 781 + spin_unlock_irqrestore(&dc->lock, flags); 793 782 794 783 return 0; 795 784 } ··· 829 814 unsigned long flags, base; 830 815 struct tegra_bo *bo; 831 816 832 - if (!dc->event) 817 + spin_lock_irqsave(&drm->event_lock, flags); 818 + 819 + if (!dc->event) { 820 + spin_unlock_irqrestore(&drm->event_lock, flags); 833 821 return; 822 + } 834 823 835 824 bo = tegra_fb_get_plane(crtc->primary->fb, 0); 836 825 826 + spin_lock_irqsave(&dc->lock, flags); 827 + 837 828 /* check if new start address has been latched */ 829 + tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER); 838 830 tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS); 839 831 base = tegra_dc_readl(dc, DC_WINBUF_START_ADDR); 840 832 tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS); 841 833 834 + spin_unlock_irqrestore(&dc->lock, flags); 835 + 842 836 if (base == bo->paddr + crtc->primary->fb->offsets[0]) { 843 - spin_lock_irqsave(&drm->event_lock, flags); 844 - drm_send_vblank_event(drm, dc->pipe, dc->event); 845 - drm_vblank_put(drm, dc->pipe); 837 + drm_crtc_send_vblank_event(crtc, dc->event); 838 + drm_crtc_vblank_put(crtc); 846 839 dc->event = NULL; 847 - spin_unlock_irqrestore(&drm->event_lock, flags); 848 840 } 841 + 842 + spin_unlock_irqrestore(&drm->event_lock, flags); 849 843 } 850 844 851 845 void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file) ··· 867 843 868 844 if (dc->event && dc->event->base.file_priv == file) { 869 845 dc->event->base.destroy(&dc->event->base); 870 - drm_vblank_put(drm, dc->pipe); 846 + drm_crtc_vblank_put(crtc); 871 847 dc->event = NULL; 872 848 } 873 849 ··· 877 853 static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, 878 854 struct drm_pending_vblank_event *event, uint32_t page_flip_flags) 879 855 { 856 + unsigned int pipe = drm_crtc_index(crtc); 880 857 struct tegra_dc *dc = to_tegra_dc(crtc); 881 - struct drm_device *drm = crtc->dev; 882 858 883 859 if (dc->event) 884 860 return -EBUSY; 885 861 886 862 if (event) { 887 - event->pipe = dc->pipe; 863 + event->pipe = pipe; 888 864 dc->event = event; 889 - drm_vblank_get(drm, dc->pipe); 865 + drm_crtc_vblank_get(crtc); 890 866 } 891 867 892 868 tegra_dc_set_base(dc, 0, 0, fb); ··· 1151 1127 /* 1152 1128 dev_dbg(dc->dev, "%s(): vertical blank\n", __func__); 1153 1129 */ 1154 - drm_handle_vblank(dc->base.dev, dc->pipe); 1130 + drm_crtc_handle_vblank(&dc->base); 1155 1131 tegra_dc_finish_page_flip(dc); 1156 1132 } 1157 1133
+10 -6
drivers/gpu/drm/tegra/drm.c
··· 694 694 .llseek = noop_llseek, 695 695 }; 696 696 697 - static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe) 697 + static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, 698 + unsigned int pipe) 698 699 { 699 700 struct drm_crtc *crtc; 700 701 701 702 list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) { 702 - struct tegra_dc *dc = to_tegra_dc(crtc); 703 - 704 - if (dc->pipe == pipe) 703 + if (pipe == drm_crtc_index(crtc)) 705 704 return crtc; 706 705 } 707 706 708 707 return NULL; 709 708 } 710 709 711 - static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc) 710 + static u32 tegra_drm_get_vblank_counter(struct drm_device *drm, int pipe) 712 711 { 712 + struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe); 713 + 714 + if (!crtc) 715 + return 0; 716 + 713 717 /* TODO: implement real hardware counter using syncpoints */ 714 - return drm_vblank_count(dev, crtc); 718 + return drm_crtc_vblank_count(crtc); 715 719 } 716 720 717 721 static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
+39 -11
drivers/gpu/drm/tegra/gem.c
··· 216 216 } 217 217 } 218 218 219 - static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo, 220 - size_t size) 219 + static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo) 221 220 { 221 + struct scatterlist *s; 222 + struct sg_table *sgt; 223 + unsigned int i; 224 + 222 225 bo->pages = drm_gem_get_pages(&bo->gem); 223 226 if (IS_ERR(bo->pages)) 224 227 return PTR_ERR(bo->pages); 225 228 226 - bo->num_pages = size >> PAGE_SHIFT; 229 + bo->num_pages = bo->gem.size >> PAGE_SHIFT; 227 230 228 - bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages); 229 - if (IS_ERR(bo->sgt)) { 230 - drm_gem_put_pages(&bo->gem, bo->pages, false, false); 231 - return PTR_ERR(bo->sgt); 231 + sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages); 232 + if (IS_ERR(sgt)) 233 + goto put_pages; 234 + 235 + /* 236 + * Fake up the SG table so that dma_map_sg() can be used to flush the 237 + * pages associated with it. Note that this relies on the fact that 238 + * the DMA API doesn't hook into IOMMU on Tegra, therefore mapping is 239 + * only cache maintenance. 240 + * 241 + * TODO: Replace this by drm_clflash_sg() once it can be implemented 242 + * without relying on symbols that are not exported. 243 + */ 244 + for_each_sg(sgt->sgl, s, sgt->nents, i) 245 + sg_dma_address(s) = sg_phys(s); 246 + 247 + if (dma_map_sg(drm->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE) == 0) { 248 + sgt = ERR_PTR(-ENOMEM); 249 + goto release_sgt; 232 250 } 233 251 252 + bo->sgt = sgt; 253 + 234 254 return 0; 255 + 256 + release_sgt: 257 + sg_free_table(sgt); 258 + kfree(sgt); 259 + put_pages: 260 + drm_gem_put_pages(&bo->gem, bo->pages, false, false); 261 + return PTR_ERR(sgt); 235 262 } 236 263 237 - static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo, 238 - size_t size) 264 + static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo) 239 265 { 240 266 struct tegra_drm *tegra = drm->dev_private; 241 267 int err; 242 268 243 269 if (tegra->domain) { 244 - err = tegra_bo_get_pages(drm, bo, size); 270 + err = tegra_bo_get_pages(drm, bo); 245 271 if (err < 0) 246 272 return err; 247 273 ··· 277 251 return err; 278 252 } 279 253 } else { 254 + size_t size = bo->gem.size; 255 + 280 256 bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr, 281 257 GFP_KERNEL | __GFP_NOWARN); 282 258 if (!bo->vaddr) { ··· 302 274 if (IS_ERR(bo)) 303 275 return bo; 304 276 305 - err = tegra_bo_alloc(drm, bo, size); 277 + err = tegra_bo_alloc(drm, bo); 306 278 if (err < 0) 307 279 goto release; 308 280
+4
include/drm/drmP.h
··· 901 901 extern int drm_wait_vblank(struct drm_device *dev, void *data, 902 902 struct drm_file *filp); 903 903 extern u32 drm_vblank_count(struct drm_device *dev, int crtc); 904 + extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc); 904 905 extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc, 905 906 struct timeval *vblanktime); 906 907 extern void drm_send_vblank_event(struct drm_device *dev, int crtc, 907 908 struct drm_pending_vblank_event *e); 909 + extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc, 910 + struct drm_pending_vblank_event *e); 908 911 extern bool drm_handle_vblank(struct drm_device *dev, int crtc); 912 + extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc); 909 913 extern int drm_vblank_get(struct drm_device *dev, int crtc); 910 914 extern void drm_vblank_put(struct drm_device *dev, int crtc); 911 915 extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
-7
include/drm/drm_gem.h
··· 119 119 * simply leave it as NULL. 120 120 */ 121 121 struct dma_buf_attachment *import_attach; 122 - 123 - /** 124 - * dumb - created as dumb buffer 125 - * Whether the gem object was created using the dumb buffer interface 126 - * as such it may not be used for GPU rendering. 127 - */ 128 - bool dumb; 129 122 }; 130 123 131 124 void drm_gem_object_release(struct drm_gem_object *obj);