···505 struct drm_local_map *map = NULL;506 struct drm_gem_object *obj;507 struct drm_hash_item *hash;508- unsigned long prot;509 int ret = 0;510511 mutex_lock(&dev->struct_mutex);···537 vma->vm_ops = obj->dev->driver->gem_vm_ops;538 vma->vm_private_data = map->handle;539 /* FIXME: use pgprot_writecombine when available */540- prot = pgprot_val(vma->vm_page_prot);541-#ifdef CONFIG_X86542- prot |= _PAGE_CACHE_WC;543-#endif544- vma->vm_page_prot = __pgprot(prot);545546 /* Take a ref for this mapping of the object, so that the fault547 * handler can dereference the mmap offset's pointer to the object.
···505 struct drm_local_map *map = NULL;506 struct drm_gem_object *obj;507 struct drm_hash_item *hash;0508 int ret = 0;509510 mutex_lock(&dev->struct_mutex);···538 vma->vm_ops = obj->dev->driver->gem_vm_ops;539 vma->vm_private_data = map->handle;540 /* FIXME: use pgprot_writecombine when available */541+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);0000542543 /* Take a ref for this mapping of the object, so that the fault544 * handler can dereference the mmap offset's pointer to the object.
+1
drivers/gpu/drm/drm_sysfs.c
···451452 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp);453}0454455/**456 * drm_sysfs_device_add - adds a class device to sysfs for a character driver
···451452 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp);453}454+EXPORT_SYMBOL(drm_sysfs_hotplug_event);455456/**457 * drm_sysfs_device_add - adds a class device to sysfs for a character driver
+1-8
drivers/gpu/drm/i915/i915_dma.c
···922 * Some of the preallocated space is taken by the GTT923 * and popup. GTT is 1K per MB of aperture size, and popup is 4K.924 */925- if (IS_G4X(dev))926 overhead = 4096;927 else928 overhead = (*aperture_size / 1024) + 4096;···1029 ret = drm_irq_install(dev);1030 if (ret)1031 goto destroy_ringbuffer;1032-1033- /* FIXME: re-add hotplug support */1034-#if 01035- ret = drm_hotplug_init(dev);1036- if (ret)1037- goto destroy_ringbuffer;1038-#endif10391040 /* Always safe in the mode setting case. */1041 /* FIXME: do pre/post-mode set stuff in core KMS code */
···922 * Some of the preallocated space is taken by the GTT923 * and popup. GTT is 1K per MB of aperture size, and popup is 4K.924 */925+ if (IS_G4X(dev) || IS_IGD(dev))926 overhead = 4096;927 else928 overhead = (*aperture_size / 1024) + 4096;···1029 ret = drm_irq_install(dev);1030 if (ret)1031 goto destroy_ringbuffer;000000010321033 /* Always safe in the mode setting case. */1034 /* FIXME: do pre/post-mode set stuff in core KMS code */
+5
drivers/gpu/drm/i915/i915_drv.h
···159 u32 irq_mask_reg;160 u32 pipestat[2];161000162 int tex_lru_log_granularity;163 int allow_batchbuffer;164 struct mem_block *agp_heap;···300 *301 * A reference is held on the buffer while on this list.302 */0303 struct list_head active_list;304305 /**···814#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \815 IS_I915GM(dev)))816#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev))0817818#define PRIMARY_RINGBUFFER_SIZE (128*1024)819
···159 u32 irq_mask_reg;160 u32 pipestat[2];161162+ u32 hotplug_supported_mask;163+ struct work_struct hotplug_work;164+165 int tex_lru_log_granularity;166 int allow_batchbuffer;167 struct mem_block *agp_heap;···297 *298 * A reference is held on the buffer while on this list.299 */300+ spinlock_t active_list_lock;301 struct list_head active_list;302303 /**···810#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \811 IS_I915GM(dev)))812#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev))813+#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev))814815#define PRIMARY_RINGBUFFER_SIZE (128*1024)816
+29-9
drivers/gpu/drm/i915/i915_gem.c
···1072 case -EAGAIN:1073 return VM_FAULT_OOM;1074 case -EFAULT:01075 return VM_FAULT_SIGBUS;1076 default:1077 return VM_FAULT_NOPAGE;···1325 obj_priv->active = 1;1326 }1327 /* Move from whatever list we were on to the tail of execution. */01328 list_move_tail(&obj_priv->list,1329 &dev_priv->mm.active_list);01330 obj_priv->last_rendering_seqno = seqno;1331}1332···1470 /* Move any buffers on the active list that are no longer referenced1471 * by the ringbuffer to the flushing/inactive lists as appropriate.1472 */01473 while (!list_empty(&dev_priv->mm.active_list)) {1474 struct drm_gem_object *obj;1475 struct drm_i915_gem_object *obj_priv;···1485 * this seqno.1486 */1487 if (obj_priv->last_rendering_seqno != request->seqno)1488- return;14891490#if WATCH_LRU1491 DRM_INFO("%s: retire %d moves to inactive list %p\n",···1497 else1498 i915_gem_object_move_to_inactive(obj);1499 }001500}15011502/**···1996 int regnum = obj_priv->fence_reg;1997 uint32_t val;1998 uint32_t pitch_val;019992000- if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||2001 (obj_priv->gtt_offset & (obj->size - 1))) {2002- WARN(1, "%s: object 0x%08x not 1M or size aligned\n",2003 __func__, obj_priv->gtt_offset);2004 return;2005 }20062007 pitch_val = (obj_priv->stride / 128) - 1;2008-2009 val = obj_priv->gtt_offset;2010 if (obj_priv->tiling_mode == I915_TILING_Y)2011 val |= 1 << I830_FENCE_TILING_Y_SHIFT;2012- val |= I830_FENCE_SIZE_BITS(obj->size);002013 val |= pitch_val << I830_FENCE_PITCH_SHIFT;2014 val |= I830_FENCE_REG_VALID;2015···2203 return -EBUSY;2204 if (alignment == 0)2205 alignment = i915_gem_get_gtt_alignment(obj);2206- if (alignment & (PAGE_SIZE - 1)) {2207 DRM_ERROR("Invalid object alignment requested %u\n", alignment);2208 return -EINVAL;2209 }···2220 }2221 }2222 if (obj_priv->gtt_space == NULL) {002223 /* If the gtt is empty and we're still having trouble2224 * fitting our object in, we're out of memory.2225 */2226#if WATCH_LRU2227 DRM_INFO("%s: GTT full, evicting something\n", __func__);2228#endif2229- if (list_empty(&dev_priv->mm.inactive_list) &&2230- list_empty(&dev_priv->mm.flushing_list) &&2231- list_empty(&dev_priv->mm.active_list)) {0002232 DRM_ERROR("GTT full, but LRU list empty\n");2233 return -ENOMEM;2234 }···36893690 i915_gem_retire_requests(dev);369103692 if (!dev_priv->mm.wedged) {3693 /* Active and flushing should now be empty as we've3694 * waited for a sequence higher than any pending execbuffer···3716 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;3717 i915_gem_object_move_to_inactive(obj_priv->obj);3718 }037193720 while (!list_empty(&dev_priv->mm.flushing_list)) {3721 struct drm_i915_gem_object *obj_priv;···3965 if (ret != 0)3966 return ret;396703968 BUG_ON(!list_empty(&dev_priv->mm.active_list));003969 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));3970 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));3971 BUG_ON(!list_empty(&dev_priv->mm.request_list));···4012{4013 drm_i915_private_t *dev_priv = dev->dev_private;401404015 INIT_LIST_HEAD(&dev_priv->mm.active_list);4016 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);4017 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
···1072 case -EAGAIN:1073 return VM_FAULT_OOM;1074 case -EFAULT:1075+ case -EINVAL:1076 return VM_FAULT_SIGBUS;1077 default:1078 return VM_FAULT_NOPAGE;···1324 obj_priv->active = 1;1325 }1326 /* Move from whatever list we were on to the tail of execution. */1327+ spin_lock(&dev_priv->mm.active_list_lock);1328 list_move_tail(&obj_priv->list,1329 &dev_priv->mm.active_list);1330+ spin_unlock(&dev_priv->mm.active_list_lock);1331 obj_priv->last_rendering_seqno = seqno;1332}1333···1467 /* Move any buffers on the active list that are no longer referenced1468 * by the ringbuffer to the flushing/inactive lists as appropriate.1469 */1470+ spin_lock(&dev_priv->mm.active_list_lock);1471 while (!list_empty(&dev_priv->mm.active_list)) {1472 struct drm_gem_object *obj;1473 struct drm_i915_gem_object *obj_priv;···1481 * this seqno.1482 */1483 if (obj_priv->last_rendering_seqno != request->seqno)1484+ goto out;14851486#if WATCH_LRU1487 DRM_INFO("%s: retire %d moves to inactive list %p\n",···1493 else1494 i915_gem_object_move_to_inactive(obj);1495 }1496+out:1497+ spin_unlock(&dev_priv->mm.active_list_lock);1498}14991500/**···1990 int regnum = obj_priv->fence_reg;1991 uint32_t val;1992 uint32_t pitch_val;1993+ uint32_t fence_size_bits;19941995+ if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||1996 (obj_priv->gtt_offset & (obj->size - 1))) {1997+ WARN(1, "%s: object 0x%08x not 512K or size aligned\n",1998 __func__, obj_priv->gtt_offset);1999 return;2000 }20012002 pitch_val = (obj_priv->stride / 128) - 1;2003+ WARN_ON(pitch_val & ~0x0000000f);2004 val = obj_priv->gtt_offset;2005 if (obj_priv->tiling_mode == I915_TILING_Y)2006 val |= 1 << I830_FENCE_TILING_Y_SHIFT;2007+ fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);2008+ WARN_ON(fence_size_bits & ~0x00000f00);2009+ val |= fence_size_bits;2010 val |= pitch_val << I830_FENCE_PITCH_SHIFT;2011 val |= I830_FENCE_REG_VALID;2012···2194 return -EBUSY;2195 if (alignment == 0)2196 alignment = i915_gem_get_gtt_alignment(obj);2197+ if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {2198 DRM_ERROR("Invalid object alignment requested %u\n", alignment);2199 return -EINVAL;2200 }···2211 }2212 }2213 if (obj_priv->gtt_space == NULL) {2214+ bool lists_empty;2215+2216 /* If the gtt is empty and we're still having trouble2217 * fitting our object in, we're out of memory.2218 */2219#if WATCH_LRU2220 DRM_INFO("%s: GTT full, evicting something\n", __func__);2221#endif2222+ spin_lock(&dev_priv->mm.active_list_lock);2223+ lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&2224+ list_empty(&dev_priv->mm.flushing_list) &&2225+ list_empty(&dev_priv->mm.active_list));2226+ spin_unlock(&dev_priv->mm.active_list_lock);2227+ if (lists_empty) {2228 DRM_ERROR("GTT full, but LRU list empty\n");2229 return -ENOMEM;2230 }···36753676 i915_gem_retire_requests(dev);36773678+ spin_lock(&dev_priv->mm.active_list_lock);3679 if (!dev_priv->mm.wedged) {3680 /* Active and flushing should now be empty as we've3681 * waited for a sequence higher than any pending execbuffer···3701 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;3702 i915_gem_object_move_to_inactive(obj_priv->obj);3703 }3704+ spin_unlock(&dev_priv->mm.active_list_lock);37053706 while (!list_empty(&dev_priv->mm.flushing_list)) {3707 struct drm_i915_gem_object *obj_priv;···3949 if (ret != 0)3950 return ret;39513952+ spin_lock(&dev_priv->mm.active_list_lock);3953 BUG_ON(!list_empty(&dev_priv->mm.active_list));3954+ spin_unlock(&dev_priv->mm.active_list_lock);3955+3956 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));3957 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));3958 BUG_ON(!list_empty(&dev_priv->mm.request_list));···3993{3994 drm_i915_private_t *dev_priv = dev->dev_private;39953996+ spin_lock_init(&dev_priv->mm.active_list_lock);3997 INIT_LIST_HEAD(&dev_priv->mm.active_list);3998 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);3999 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
···636intel_wait_for_vblank(struct drm_device *dev)637{638 /* Wait for 20ms, i.e. one cycle at 50hz. */639- udelay(20000);640}641642static int···1104 if (!ok) {1105 DRM_ERROR("Couldn't find PLL settings for mode!\n");1106 return -EINVAL;000000000000000000001107 }11081109 if (IS_IGD(dev))
···636intel_wait_for_vblank(struct drm_device *dev)637{638 /* Wait for 20ms, i.e. one cycle at 50hz. */639+ mdelay(20);640}641642static int···1104 if (!ok) {1105 DRM_ERROR("Couldn't find PLL settings for mode!\n");1106 return -EINVAL;1107+ }1108+1109+ /* SDVO TV has fixed PLL values depend on its clock range,1110+ this mirrors vbios setting. */1111+ if (is_sdvo && is_tv) {1112+ if (adjusted_mode->clock >= 1000001113+ && adjusted_mode->clock < 140500) {1114+ clock.p1 = 2;1115+ clock.p2 = 10;1116+ clock.n = 3;1117+ clock.m1 = 16;1118+ clock.m2 = 8;1119+ } else if (adjusted_mode->clock >= 1405001120+ && adjusted_mode->clock <= 200000) {1121+ clock.p1 = 1;1122+ clock.p2 = 10;1123+ clock.n = 6;1124+ clock.m1 = 12;1125+ clock.m2 = 8;1126+ }1127 }11281129 if (IS_IGD(dev))
+1
drivers/gpu/drm/i915/intel_modes.c
···76 drm_mode_connector_update_edid_property(&intel_output->base,77 edid);78 ret = drm_add_edid_modes(&intel_output->base, edid);079 kfree(edid);80 }81