···505505 struct drm_local_map *map = NULL;506506 struct drm_gem_object *obj;507507 struct drm_hash_item *hash;508508- unsigned long prot;509508 int ret = 0;510509511510 mutex_lock(&dev->struct_mutex);···537538 vma->vm_ops = obj->dev->driver->gem_vm_ops;538539 vma->vm_private_data = map->handle;539540 /* FIXME: use pgprot_writecombine when available */540540- prot = pgprot_val(vma->vm_page_prot);541541-#ifdef CONFIG_X86542542- prot |= _PAGE_CACHE_WC;543543-#endif544544- vma->vm_page_prot = __pgprot(prot);541541+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);545542546543 /* Take a ref for this mapping of the object, so that the fault547544 * handler can dereference the mmap offset's pointer to the object.
+1
drivers/gpu/drm/drm_sysfs.c
···451451452452 kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, envp);453453}454454+EXPORT_SYMBOL(drm_sysfs_hotplug_event);454455455456/**456457 * drm_sysfs_device_add - adds a class device to sysfs for a character driver
+1-8
drivers/gpu/drm/i915/i915_dma.c
···922922 * Some of the preallocated space is taken by the GTT923923 * and popup. GTT is 1K per MB of aperture size, and popup is 4K.924924 */925925- if (IS_G4X(dev))925925+ if (IS_G4X(dev) || IS_IGD(dev))926926 overhead = 4096;927927 else928928 overhead = (*aperture_size / 1024) + 4096;···10291029 ret = drm_irq_install(dev);10301030 if (ret)10311031 goto destroy_ringbuffer;10321032-10331033- /* FIXME: re-add hotplug support */10341034-#if 010351035- ret = drm_hotplug_init(dev);10361036- if (ret)10371037- goto destroy_ringbuffer;10381038-#endif1039103210401033 /* Always safe in the mode setting case. */10411034 /* FIXME: do pre/post-mode set stuff in core KMS code */
+5
drivers/gpu/drm/i915/i915_drv.h
···159159 u32 irq_mask_reg;160160 u32 pipestat[2];161161162162+ u32 hotplug_supported_mask;163163+ struct work_struct hotplug_work;164164+162165 int tex_lru_log_granularity;163166 int allow_batchbuffer;164167 struct mem_block *agp_heap;···300297 *301298 * A reference is held on the buffer while on this list.302299 */300300+ spinlock_t active_list_lock;303301 struct list_head active_list;304302305303 /**···814810#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \815811 IS_I915GM(dev)))816812#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev))813813+#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev))817814818815#define PRIMARY_RINGBUFFER_SIZE (128*1024)819816
+29-9
drivers/gpu/drm/i915/i915_gem.c
···10721072 case -EAGAIN:10731073 return VM_FAULT_OOM;10741074 case -EFAULT:10751075+ case -EINVAL:10751076 return VM_FAULT_SIGBUS;10761077 default:10771078 return VM_FAULT_NOPAGE;···13251324 obj_priv->active = 1;13261325 }13271326 /* Move from whatever list we were on to the tail of execution. */13271327+ spin_lock(&dev_priv->mm.active_list_lock);13281328 list_move_tail(&obj_priv->list,13291329 &dev_priv->mm.active_list);13301330+ spin_unlock(&dev_priv->mm.active_list_lock);13301331 obj_priv->last_rendering_seqno = seqno;13311332}13321333···14701467 /* Move any buffers on the active list that are no longer referenced14711468 * by the ringbuffer to the flushing/inactive lists as appropriate.14721469 */14701470+ spin_lock(&dev_priv->mm.active_list_lock);14731471 while (!list_empty(&dev_priv->mm.active_list)) {14741472 struct drm_gem_object *obj;14751473 struct drm_i915_gem_object *obj_priv;···14851481 * this seqno.14861482 */14871483 if (obj_priv->last_rendering_seqno != request->seqno)14881488- return;14841484+ goto out;1489148514901486#if WATCH_LRU14911487 DRM_INFO("%s: retire %d moves to inactive list %p\n",···14971493 else14981494 i915_gem_object_move_to_inactive(obj);14991495 }14961496+out:14971497+ spin_unlock(&dev_priv->mm.active_list_lock);15001498}1501149915021500/**···19961990 int regnum = obj_priv->fence_reg;19971991 uint32_t val;19981992 uint32_t pitch_val;19931993+ uint32_t fence_size_bits;1999199420002000- if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||19951995+ if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||20011996 (obj_priv->gtt_offset & (obj->size - 1))) {20022002- WARN(1, "%s: object 0x%08x not 1M or size aligned\n",19971997+ WARN(1, "%s: object 0x%08x not 512K or size aligned\n",20031998 __func__, obj_priv->gtt_offset);20041999 return;20052000 }2006200120072002 pitch_val = (obj_priv->stride / 128) - 1;20082008-20032003+ WARN_ON(pitch_val & ~0x0000000f);20092004 val = obj_priv->gtt_offset;20102005 if (obj_priv->tiling_mode == I915_TILING_Y)20112006 val |= 1 << I830_FENCE_TILING_Y_SHIFT;20122012- val |= I830_FENCE_SIZE_BITS(obj->size);20072007+ fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);20082008+ WARN_ON(fence_size_bits & ~0x00000f00);20092009+ val |= fence_size_bits;20132010 val |= pitch_val << I830_FENCE_PITCH_SHIFT;20142011 val |= I830_FENCE_REG_VALID;20152012···22032194 return -EBUSY;22042195 if (alignment == 0)22052196 alignment = i915_gem_get_gtt_alignment(obj);22062206- if (alignment & (PAGE_SIZE - 1)) {21972197+ if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {22072198 DRM_ERROR("Invalid object alignment requested %u\n", alignment);22082199 return -EINVAL;22092200 }···22202211 }22212212 }22222213 if (obj_priv->gtt_space == NULL) {22142214+ bool lists_empty;22152215+22232216 /* If the gtt is empty and we're still having trouble22242217 * fitting our object in, we're out of memory.22252218 */22262219#if WATCH_LRU22272220 DRM_INFO("%s: GTT full, evicting something\n", __func__);22282221#endif22292229- if (list_empty(&dev_priv->mm.inactive_list) &&22302230- list_empty(&dev_priv->mm.flushing_list) &&22312231- list_empty(&dev_priv->mm.active_list)) {22222222+ spin_lock(&dev_priv->mm.active_list_lock);22232223+ lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&22242224+ list_empty(&dev_priv->mm.flushing_list) &&22252225+ list_empty(&dev_priv->mm.active_list));22262226+ spin_unlock(&dev_priv->mm.active_list_lock);22272227+ if (lists_empty) {22322228 DRM_ERROR("GTT full, but LRU list empty\n");22332229 return -ENOMEM;22342230 }···3689367536903676 i915_gem_retire_requests(dev);3691367736783678+ spin_lock(&dev_priv->mm.active_list_lock);36923679 if (!dev_priv->mm.wedged) {36933680 /* Active and flushing should now be empty as we've36943681 * waited for a sequence higher than any pending execbuffer···37163701 obj_priv->obj->write_domain &= ~I915_GEM_GPU_DOMAINS;37173702 i915_gem_object_move_to_inactive(obj_priv->obj);37183703 }37043704+ spin_unlock(&dev_priv->mm.active_list_lock);3719370537203706 while (!list_empty(&dev_priv->mm.flushing_list)) {37213707 struct drm_i915_gem_object *obj_priv;···39653949 if (ret != 0)39663950 return ret;3967395139523952+ spin_lock(&dev_priv->mm.active_list_lock);39683953 BUG_ON(!list_empty(&dev_priv->mm.active_list));39543954+ spin_unlock(&dev_priv->mm.active_list_lock);39553955+39693956 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));39703957 BUG_ON(!list_empty(&dev_priv->mm.inactive_list));39713958 BUG_ON(!list_empty(&dev_priv->mm.request_list));···40123993{40133994 drm_i915_private_t *dev_priv = dev->dev_private;4014399539963996+ spin_lock_init(&dev_priv->mm.active_list_lock);40153997 INIT_LIST_HEAD(&dev_priv->mm.active_list);40163998 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);40173999 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);