Merge branch 'drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (44 commits)
drm/i915: fix ioremap of a user address for non-root (CVE-2008-3831)
drm: make CONFIG_DRM depend on CONFIG_SHMEM.
radeon: fix PCI bus mastering support enables.
radeon: add RS400 family support.
drm/radeon: add support for RS740 IGP chipsets.
i915: GM45 has GM965-style MCH setup.
i915: Don't run retire work handler while suspended
i915: Map status page cached for chips with GTT-based HWS location.
i915: Fix up ring initialization to cover G45 oddities
i915: Use non-reserved status page index for breadcrumb
drm: Increment dev_priv->irq_received so i915_gem_interrupts count works.
drm: kill drm_device->irq
drm: wbinvd is cache coherent.
i915: add missing return in error path.
i915: fixup permissions on gem ioctls.
drm: Clean up many sparse warnings in i915.
drm: Use ioremap_wc in i915_driver instead of ioremap, since we always want WC.
drm: G33-class hardware has a newer 965-style MCH (no DCC register).
drm: Avoid oops in GEM execbuffers with bad arguments.
DRM: Return -EBADF on bad object in flink, and return curent name if it exists.
...

+8816 -1966
+1
arch/x86/mm/highmem_32.c
··· 137 138 return (void*) vaddr; 139 } 140 141 struct page *kmap_atomic_to_page(void *ptr) 142 {
··· 137 138 return (void*) vaddr; 139 } 140 + EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */ 141 142 struct page *kmap_atomic_to_page(void *ptr) 143 {
+2 -1
drivers/gpu/drm/Kconfig
··· 6 # 7 menuconfig DRM 8 tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)" 9 - depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG 10 help 11 Kernel-level support for the Direct Rendering Infrastructure (DRI) 12 introduced in XFree86 4.0. If you say Y here, you need to select ··· 87 config DRM_SIS 88 tristate "SiS video cards" 89 depends on DRM && AGP 90 help 91 Choose this option if you have a SiS 630 or compatible video 92 chipset. If M is selected the module will be called sis. AGP
··· 6 # 7 menuconfig DRM 8 tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)" 9 + depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && SHMEM 10 help 11 Kernel-level support for the Direct Rendering Infrastructure (DRI) 12 introduced in XFree86 4.0. If you say Y here, you need to select ··· 87 config DRM_SIS 88 tristate "SiS video cards" 89 depends on DRM && AGP 90 + depends on FB_SIS || FB_SIS=n 91 help 92 Choose this option if you have a SiS 630 or compatible video 93 chipset. If M is selected the module will be called sis. AGP
+3 -2
drivers/gpu/drm/Makefile
··· 4 5 ccflags-y := -Iinclude/drm 6 7 - drm-y := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \ 8 - drm_drv.o drm_fops.o drm_ioctl.o drm_irq.o \ 9 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ 10 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ 11 drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o
··· 4 5 ccflags-y := -Iinclude/drm 6 7 + drm-y := drm_auth.o drm_bufs.o drm_cache.o \ 8 + drm_context.o drm_dma.o drm_drawable.o \ 9 + drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \ 10 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ 11 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ 12 drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o
+51 -1
drivers/gpu/drm/drm_agpsupport.c
··· 33 34 #include "drmP.h" 35 #include <linux/module.h> 36 37 #if __OS_HAS_AGP 38 ··· 453 return agp_unbind_memory(handle); 454 } 455 456 - #endif /* __OS_HAS_AGP */
··· 33 34 #include "drmP.h" 35 #include <linux/module.h> 36 + #include <asm/agp.h> 37 38 #if __OS_HAS_AGP 39 ··· 452 return agp_unbind_memory(handle); 453 } 454 455 + /** 456 + * Binds a collection of pages into AGP memory at the given offset, returning 457 + * the AGP memory structure containing them. 458 + * 459 + * No reference is held on the pages during this time -- it is up to the 460 + * caller to handle that. 461 + */ 462 + DRM_AGP_MEM * 463 + drm_agp_bind_pages(struct drm_device *dev, 464 + struct page **pages, 465 + unsigned long num_pages, 466 + uint32_t gtt_offset, 467 + u32 type) 468 + { 469 + DRM_AGP_MEM *mem; 470 + int ret, i; 471 + 472 + DRM_DEBUG("\n"); 473 + 474 + mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages, 475 + type); 476 + if (mem == NULL) { 477 + DRM_ERROR("Failed to allocate memory for %ld pages\n", 478 + num_pages); 479 + return NULL; 480 + } 481 + 482 + for (i = 0; i < num_pages; i++) 483 + mem->memory[i] = phys_to_gart(page_to_phys(pages[i])); 484 + mem->page_count = num_pages; 485 + 486 + mem->is_flushed = true; 487 + ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE); 488 + if (ret != 0) { 489 + DRM_ERROR("Failed to bind AGP memory: %d\n", ret); 490 + agp_free_memory(mem); 491 + return NULL; 492 + } 493 + 494 + return mem; 495 + } 496 + EXPORT_SYMBOL(drm_agp_bind_pages); 497 + 498 + void drm_agp_chipset_flush(struct drm_device *dev) 499 + { 500 + agp_flush_chipset(dev->agp->bridge); 501 + } 502 + EXPORT_SYMBOL(drm_agp_chipset_flush); 503 + 504 + #endif /* __OS_HAS_AGP */
+69
drivers/gpu/drm/drm_cache.c
···
··· 1 + /************************************************************************** 2 + * 3 + * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA 4 + * All Rights Reserved. 5 + * 6 + * Permission is hereby granted, free of charge, to any person obtaining a 7 + * copy of this software and associated documentation files (the 8 + * "Software"), to deal in the Software without restriction, including 9 + * without limitation the rights to use, copy, modify, merge, publish, 10 + * distribute, sub license, and/or sell copies of the Software, and to 11 + * permit persons to whom the Software is furnished to do so, subject to 12 + * the following conditions: 13 + * 14 + * The above copyright notice and this permission notice (including the 15 + * next paragraph) shall be included in all copies or substantial portions 16 + * of the Software. 17 + * 18 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 + * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 + * 26 + **************************************************************************/ 27 + /* 28 + * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com> 29 + */ 30 + 31 + #include "drmP.h" 32 + 33 + #if defined(CONFIG_X86) 34 + static void 35 + drm_clflush_page(struct page *page) 36 + { 37 + uint8_t *page_virtual; 38 + unsigned int i; 39 + 40 + if (unlikely(page == NULL)) 41 + return; 42 + 43 + page_virtual = kmap_atomic(page, KM_USER0); 44 + for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) 45 + clflush(page_virtual + i); 46 + kunmap_atomic(page_virtual, KM_USER0); 47 + } 48 + #endif 49 + 50 + void 51 + drm_clflush_pages(struct page *pages[], unsigned long num_pages) 52 + { 53 + 54 + #if defined(CONFIG_X86) 55 + if (cpu_has_clflush) { 56 + unsigned long i; 57 + 58 + mb(); 59 + for (i = 0; i < num_pages; ++i) 60 + drm_clflush_page(*pages++); 61 + mb(); 62 + 63 + return; 64 + } 65 + 66 + wbinvd(); 67 + #endif 68 + } 69 + EXPORT_SYMBOL(drm_clflush_pages);
+6
drivers/gpu/drm/drm_drv.c
··· 116 117 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0), 118 119 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 120 }; 121 122 #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
··· 116 117 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0), 118 119 + DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0), 120 + 121 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 122 + 123 + DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0), 124 + DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH), 125 + DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH), 126 }; 127 128 #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
+7 -1
drivers/gpu/drm/drm_fops.c
··· 246 memset(priv, 0, sizeof(*priv)); 247 filp->private_data = priv; 248 priv->filp = filp; 249 - priv->uid = current->euid; 250 priv->pid = task_pid_nr(current); 251 priv->minor = idr_find(&drm_minors_idr, minor_id); 252 priv->ioctl_count = 0; ··· 255 priv->lock_count = 0; 256 257 INIT_LIST_HEAD(&priv->lhead); 258 259 if (dev->driver->open) { 260 ret = dev->driver->open(dev, priv); ··· 402 !dev->driver->reclaim_buffers_locked) { 403 dev->driver->reclaim_buffers(dev, file_priv); 404 } 405 406 drm_fasync(-1, filp, 0); 407
··· 246 memset(priv, 0, sizeof(*priv)); 247 filp->private_data = priv; 248 priv->filp = filp; 249 + priv->uid = current_euid(); 250 priv->pid = task_pid_nr(current); 251 priv->minor = idr_find(&drm_minors_idr, minor_id); 252 priv->ioctl_count = 0; ··· 255 priv->lock_count = 0; 256 257 INIT_LIST_HEAD(&priv->lhead); 258 + 259 + if (dev->driver->driver_features & DRIVER_GEM) 260 + drm_gem_open(dev, priv); 261 262 if (dev->driver->open) { 263 ret = dev->driver->open(dev, priv); ··· 399 !dev->driver->reclaim_buffers_locked) { 400 dev->driver->reclaim_buffers(dev, file_priv); 401 } 402 + 403 + if (dev->driver->driver_features & DRIVER_GEM) 404 + drm_gem_release(dev, file_priv); 405 406 drm_fasync(-1, filp, 0); 407
+421
drivers/gpu/drm/drm_gem.c
···
··· 1 + /* 2 + * Copyright © 2008 Intel Corporation 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice (including the next 12 + * paragraph) shall be included in all copies or substantial portions of the 13 + * Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 + * IN THE SOFTWARE. 22 + * 23 + * Authors: 24 + * Eric Anholt <eric@anholt.net> 25 + * 26 + */ 27 + 28 + #include <linux/types.h> 29 + #include <linux/slab.h> 30 + #include <linux/mm.h> 31 + #include <linux/uaccess.h> 32 + #include <linux/fs.h> 33 + #include <linux/file.h> 34 + #include <linux/module.h> 35 + #include <linux/mman.h> 36 + #include <linux/pagemap.h> 37 + #include "drmP.h" 38 + 39 + /** @file drm_gem.c 40 + * 41 + * This file provides some of the base ioctls and library routines for 42 + * the graphics memory manager implemented by each device driver. 43 + * 44 + * Because various devices have different requirements in terms of 45 + * synchronization and migration strategies, implementing that is left up to 46 + * the driver, and all that the general API provides should be generic -- 47 + * allocating objects, reading/writing data with the cpu, freeing objects. 48 + * Even there, platform-dependent optimizations for reading/writing data with 49 + * the CPU mean we'll likely hook those out to driver-specific calls. However, 50 + * the DRI2 implementation wants to have at least allocate/mmap be generic. 51 + * 52 + * The goal was to have swap-backed object allocation managed through 53 + * struct file. However, file descriptors as handles to a struct file have 54 + * two major failings: 55 + * - Process limits prevent more than 1024 or so being used at a time by 56 + * default. 57 + * - Inability to allocate high fds will aggravate the X Server's select() 58 + * handling, and likely that of many GL client applications as well. 59 + * 60 + * This led to a plan of using our own integer IDs (called handles, following 61 + * DRM terminology) to mimic fds, and implement the fd syscalls we need as 62 + * ioctls. The objects themselves will still include the struct file so 63 + * that we can transition to fds if the required kernel infrastructure shows 64 + * up at a later date, and as our interface with shmfs for memory allocation. 65 + */ 66 + 67 + /** 68 + * Initialize the GEM device fields 69 + */ 70 + 71 + int 72 + drm_gem_init(struct drm_device *dev) 73 + { 74 + spin_lock_init(&dev->object_name_lock); 75 + idr_init(&dev->object_name_idr); 76 + atomic_set(&dev->object_count, 0); 77 + atomic_set(&dev->object_memory, 0); 78 + atomic_set(&dev->pin_count, 0); 79 + atomic_set(&dev->pin_memory, 0); 80 + atomic_set(&dev->gtt_count, 0); 81 + atomic_set(&dev->gtt_memory, 0); 82 + return 0; 83 + } 84 + 85 + /** 86 + * Allocate a GEM object of the specified size with shmfs backing store 87 + */ 88 + struct drm_gem_object * 89 + drm_gem_object_alloc(struct drm_device *dev, size_t size) 90 + { 91 + struct drm_gem_object *obj; 92 + 93 + BUG_ON((size & (PAGE_SIZE - 1)) != 0); 94 + 95 + obj = kcalloc(1, sizeof(*obj), GFP_KERNEL); 96 + 97 + obj->dev = dev; 98 + obj->filp = shmem_file_setup("drm mm object", size, 0); 99 + if (IS_ERR(obj->filp)) { 100 + kfree(obj); 101 + return NULL; 102 + } 103 + 104 + kref_init(&obj->refcount); 105 + kref_init(&obj->handlecount); 106 + obj->size = size; 107 + if (dev->driver->gem_init_object != NULL && 108 + dev->driver->gem_init_object(obj) != 0) { 109 + fput(obj->filp); 110 + kfree(obj); 111 + return NULL; 112 + } 113 + atomic_inc(&dev->object_count); 114 + atomic_add(obj->size, &dev->object_memory); 115 + return obj; 116 + } 117 + EXPORT_SYMBOL(drm_gem_object_alloc); 118 + 119 + /** 120 + * Removes the mapping from handle to filp for this object. 121 + */ 122 + static int 123 + drm_gem_handle_delete(struct drm_file *filp, int handle) 124 + { 125 + struct drm_device *dev; 126 + struct drm_gem_object *obj; 127 + 128 + /* This is gross. The idr system doesn't let us try a delete and 129 + * return an error code. It just spews if you fail at deleting. 130 + * So, we have to grab a lock around finding the object and then 131 + * doing the delete on it and dropping the refcount, or the user 132 + * could race us to double-decrement the refcount and cause a 133 + * use-after-free later. Given the frequency of our handle lookups, 134 + * we may want to use ida for number allocation and a hash table 135 + * for the pointers, anyway. 136 + */ 137 + spin_lock(&filp->table_lock); 138 + 139 + /* Check if we currently have a reference on the object */ 140 + obj = idr_find(&filp->object_idr, handle); 141 + if (obj == NULL) { 142 + spin_unlock(&filp->table_lock); 143 + return -EINVAL; 144 + } 145 + dev = obj->dev; 146 + 147 + /* Release reference and decrement refcount. */ 148 + idr_remove(&filp->object_idr, handle); 149 + spin_unlock(&filp->table_lock); 150 + 151 + mutex_lock(&dev->struct_mutex); 152 + drm_gem_object_handle_unreference(obj); 153 + mutex_unlock(&dev->struct_mutex); 154 + 155 + return 0; 156 + } 157 + 158 + /** 159 + * Create a handle for this object. This adds a handle reference 160 + * to the object, which includes a regular reference count. Callers 161 + * will likely want to dereference the object afterwards. 162 + */ 163 + int 164 + drm_gem_handle_create(struct drm_file *file_priv, 165 + struct drm_gem_object *obj, 166 + int *handlep) 167 + { 168 + int ret; 169 + 170 + /* 171 + * Get the user-visible handle using idr. 172 + */ 173 + again: 174 + /* ensure there is space available to allocate a handle */ 175 + if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0) 176 + return -ENOMEM; 177 + 178 + /* do the allocation under our spinlock */ 179 + spin_lock(&file_priv->table_lock); 180 + ret = idr_get_new_above(&file_priv->object_idr, obj, 1, handlep); 181 + spin_unlock(&file_priv->table_lock); 182 + if (ret == -EAGAIN) 183 + goto again; 184 + 185 + if (ret != 0) 186 + return ret; 187 + 188 + drm_gem_object_handle_reference(obj); 189 + return 0; 190 + } 191 + EXPORT_SYMBOL(drm_gem_handle_create); 192 + 193 + /** Returns a reference to the object named by the handle. */ 194 + struct drm_gem_object * 195 + drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, 196 + int handle) 197 + { 198 + struct drm_gem_object *obj; 199 + 200 + spin_lock(&filp->table_lock); 201 + 202 + /* Check if we currently have a reference on the object */ 203 + obj = idr_find(&filp->object_idr, handle); 204 + if (obj == NULL) { 205 + spin_unlock(&filp->table_lock); 206 + return NULL; 207 + } 208 + 209 + drm_gem_object_reference(obj); 210 + 211 + spin_unlock(&filp->table_lock); 212 + 213 + return obj; 214 + } 215 + EXPORT_SYMBOL(drm_gem_object_lookup); 216 + 217 + /** 218 + * Releases the handle to an mm object. 219 + */ 220 + int 221 + drm_gem_close_ioctl(struct drm_device *dev, void *data, 222 + struct drm_file *file_priv) 223 + { 224 + struct drm_gem_close *args = data; 225 + int ret; 226 + 227 + if (!(dev->driver->driver_features & DRIVER_GEM)) 228 + return -ENODEV; 229 + 230 + ret = drm_gem_handle_delete(file_priv, args->handle); 231 + 232 + return ret; 233 + } 234 + 235 + /** 236 + * Create a global name for an object, returning the name. 237 + * 238 + * Note that the name does not hold a reference; when the object 239 + * is freed, the name goes away. 240 + */ 241 + int 242 + drm_gem_flink_ioctl(struct drm_device *dev, void *data, 243 + struct drm_file *file_priv) 244 + { 245 + struct drm_gem_flink *args = data; 246 + struct drm_gem_object *obj; 247 + int ret; 248 + 249 + if (!(dev->driver->driver_features & DRIVER_GEM)) 250 + return -ENODEV; 251 + 252 + obj = drm_gem_object_lookup(dev, file_priv, args->handle); 253 + if (obj == NULL) 254 + return -EBADF; 255 + 256 + again: 257 + if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) 258 + return -ENOMEM; 259 + 260 + spin_lock(&dev->object_name_lock); 261 + if (obj->name) { 262 + args->name = obj->name; 263 + spin_unlock(&dev->object_name_lock); 264 + return 0; 265 + } 266 + ret = idr_get_new_above(&dev->object_name_idr, obj, 1, 267 + &obj->name); 268 + spin_unlock(&dev->object_name_lock); 269 + if (ret == -EAGAIN) 270 + goto again; 271 + 272 + if (ret != 0) { 273 + mutex_lock(&dev->struct_mutex); 274 + drm_gem_object_unreference(obj); 275 + mutex_unlock(&dev->struct_mutex); 276 + return ret; 277 + } 278 + 279 + /* 280 + * Leave the reference from the lookup around as the 281 + * name table now holds one 282 + */ 283 + args->name = (uint64_t) obj->name; 284 + 285 + return 0; 286 + } 287 + 288 + /** 289 + * Open an object using the global name, returning a handle and the size. 290 + * 291 + * This handle (of course) holds a reference to the object, so the object 292 + * will not go away until the handle is deleted. 293 + */ 294 + int 295 + drm_gem_open_ioctl(struct drm_device *dev, void *data, 296 + struct drm_file *file_priv) 297 + { 298 + struct drm_gem_open *args = data; 299 + struct drm_gem_object *obj; 300 + int ret; 301 + int handle; 302 + 303 + if (!(dev->driver->driver_features & DRIVER_GEM)) 304 + return -ENODEV; 305 + 306 + spin_lock(&dev->object_name_lock); 307 + obj = idr_find(&dev->object_name_idr, (int) args->name); 308 + if (obj) 309 + drm_gem_object_reference(obj); 310 + spin_unlock(&dev->object_name_lock); 311 + if (!obj) 312 + return -ENOENT; 313 + 314 + ret = drm_gem_handle_create(file_priv, obj, &handle); 315 + mutex_lock(&dev->struct_mutex); 316 + drm_gem_object_unreference(obj); 317 + mutex_unlock(&dev->struct_mutex); 318 + if (ret) 319 + return ret; 320 + 321 + args->handle = handle; 322 + args->size = obj->size; 323 + 324 + return 0; 325 + } 326 + 327 + /** 328 + * Called at device open time, sets up the structure for handling refcounting 329 + * of mm objects. 330 + */ 331 + void 332 + drm_gem_open(struct drm_device *dev, struct drm_file *file_private) 333 + { 334 + idr_init(&file_private->object_idr); 335 + spin_lock_init(&file_private->table_lock); 336 + } 337 + 338 + /** 339 + * Called at device close to release the file's 340 + * handle references on objects. 341 + */ 342 + static int 343 + drm_gem_object_release_handle(int id, void *ptr, void *data) 344 + { 345 + struct drm_gem_object *obj = ptr; 346 + 347 + drm_gem_object_handle_unreference(obj); 348 + 349 + return 0; 350 + } 351 + 352 + /** 353 + * Called at close time when the filp is going away. 354 + * 355 + * Releases any remaining references on objects by this filp. 356 + */ 357 + void 358 + drm_gem_release(struct drm_device *dev, struct drm_file *file_private) 359 + { 360 + mutex_lock(&dev->struct_mutex); 361 + idr_for_each(&file_private->object_idr, 362 + &drm_gem_object_release_handle, NULL); 363 + 364 + idr_destroy(&file_private->object_idr); 365 + mutex_unlock(&dev->struct_mutex); 366 + } 367 + 368 + /** 369 + * Called after the last reference to the object has been lost. 370 + * 371 + * Frees the object 372 + */ 373 + void 374 + drm_gem_object_free(struct kref *kref) 375 + { 376 + struct drm_gem_object *obj = (struct drm_gem_object *) kref; 377 + struct drm_device *dev = obj->dev; 378 + 379 + BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 380 + 381 + if (dev->driver->gem_free_object != NULL) 382 + dev->driver->gem_free_object(obj); 383 + 384 + fput(obj->filp); 385 + atomic_dec(&dev->object_count); 386 + atomic_sub(obj->size, &dev->object_memory); 387 + kfree(obj); 388 + } 389 + EXPORT_SYMBOL(drm_gem_object_free); 390 + 391 + /** 392 + * Called after the last handle to the object has been closed 393 + * 394 + * Removes any name for the object. Note that this must be 395 + * called before drm_gem_object_free or we'll be touching 396 + * freed memory 397 + */ 398 + void 399 + drm_gem_object_handle_free(struct kref *kref) 400 + { 401 + struct drm_gem_object *obj = container_of(kref, 402 + struct drm_gem_object, 403 + handlecount); 404 + struct drm_device *dev = obj->dev; 405 + 406 + /* Remove any name for this object */ 407 + spin_lock(&dev->object_name_lock); 408 + if (obj->name) { 409 + idr_remove(&dev->object_name_idr, obj->name); 410 + spin_unlock(&dev->object_name_lock); 411 + /* 412 + * The object name held a reference to this object, drop 413 + * that now. 414 + */ 415 + drm_gem_object_unreference(obj); 416 + } else 417 + spin_unlock(&dev->object_name_lock); 418 + 419 + } 420 + EXPORT_SYMBOL(drm_gem_object_handle_free); 421 +
+386 -76
drivers/gpu/drm/drm_irq.c
··· 63 p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn)) 64 return -EINVAL; 65 66 - p->irq = dev->irq; 67 68 DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum, 69 p->irq); ··· 71 return 0; 72 } 73 74 /** 75 * Install IRQ handler. 76 * 77 * \param dev DRM device. 78 - * \param irq IRQ number. 79 * 80 - * Initializes the IRQ related data, and setups drm_device::vbl_queue. Installs the handler, calling the driver 81 * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions 82 * before and after the installation. 83 */ 84 - static int drm_irq_install(struct drm_device * dev) 85 { 86 - int ret; 87 unsigned long sh_flags = 0; 88 89 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 90 return -EINVAL; 91 92 - if (dev->irq == 0) 93 return -EINVAL; 94 95 mutex_lock(&dev->struct_mutex); ··· 219 dev->irq_enabled = 1; 220 mutex_unlock(&dev->struct_mutex); 221 222 - DRM_DEBUG("irq=%d\n", dev->irq); 223 - 224 - if (drm_core_check_feature(dev, DRIVER_IRQ_VBL)) { 225 - init_waitqueue_head(&dev->vbl_queue); 226 - 227 - spin_lock_init(&dev->vbl_lock); 228 - 229 - INIT_LIST_HEAD(&dev->vbl_sigs); 230 - INIT_LIST_HEAD(&dev->vbl_sigs2); 231 - 232 - dev->vbl_pending = 0; 233 - } 234 235 /* Before installing handler */ 236 dev->driver->irq_preinstall(dev); ··· 228 if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED)) 229 sh_flags = IRQF_SHARED; 230 231 - ret = request_irq(dev->irq, dev->driver->irq_handler, 232 sh_flags, dev->devname, dev); 233 if (ret < 0) { 234 mutex_lock(&dev->struct_mutex); 235 dev->irq_enabled = 0; ··· 239 } 240 241 /* After installing handler */ 242 - dev->driver->irq_postinstall(dev); 243 244 - return 0; 245 } 246 247 /** 248 * Uninstall the IRQ handler. ··· 272 if (!irq_enabled) 273 return -EINVAL; 274 275 - DRM_DEBUG("irq=%d\n", dev->irq); 276 277 dev->driver->irq_uninstall(dev); 278 279 - free_irq(dev->irq, dev); 280 281 dev->locked_tasklet_func = NULL; 282 283 return 0; 284 } 285 - 286 EXPORT_SYMBOL(drm_irq_uninstall); 287 288 /** ··· 310 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 311 return 0; 312 if (dev->if_version < DRM_IF_VERSION(1, 2) && 313 - ctl->irq != dev->irq) 314 return -EINVAL; 315 return drm_irq_install(dev); 316 case DRM_UNINST_HANDLER: ··· 320 default: 321 return -EINVAL; 322 } 323 } 324 325 /** ··· 509 * 510 * If a signal is not requested, then calls vblank_wait(). 511 */ 512 - int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv) 513 { 514 union drm_wait_vblank *vblwait = data; 515 - struct timeval now; 516 int ret = 0; 517 - unsigned int flags, seq; 518 519 - if ((!dev->irq) || (!dev->irq_enabled)) 520 return -EINVAL; 521 522 if (vblwait->request.type & ··· 528 } 529 530 flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK; 531 532 - if (!drm_core_check_feature(dev, (flags & _DRM_VBLANK_SECONDARY) ? 533 - DRIVER_IRQ_VBL2 : DRIVER_IRQ_VBL)) 534 return -EINVAL; 535 536 - seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_received2 537 - : &dev->vbl_received); 538 539 switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) { 540 case _DRM_VBLANK_RELATIVE: ··· 547 case _DRM_VBLANK_ABSOLUTE: 548 break; 549 default: 550 - return -EINVAL; 551 } 552 553 if ((flags & _DRM_VBLANK_NEXTONMISS) && ··· 558 559 if (flags & _DRM_VBLANK_SIGNAL) { 560 unsigned long irqflags; 561 - struct list_head *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY) 562 - ? &dev->vbl_sigs2 : &dev->vbl_sigs; 563 struct drm_vbl_sig *vbl_sig; 564 565 spin_lock_irqsave(&dev->vbl_lock, irqflags); ··· 579 } 580 } 581 582 - if (dev->vbl_pending >= 100) { 583 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 584 - return -EBUSY; 585 } 586 - 587 - dev->vbl_pending++; 588 589 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 590 591 - if (! 592 - (vbl_sig = 593 - drm_alloc(sizeof(struct drm_vbl_sig), DRM_MEM_DRIVER))) { 594 - return -ENOMEM; 595 } 596 597 - memset((void *)vbl_sig, 0, sizeof(*vbl_sig)); 598 599 vbl_sig->sequence = vblwait->request.sequence; 600 vbl_sig->info.si_signo = vblwait->request.signal; ··· 615 616 vblwait->reply.sequence = seq; 617 } else { 618 - if (flags & _DRM_VBLANK_SECONDARY) { 619 - if (dev->driver->vblank_wait2) 620 - ret = dev->driver->vblank_wait2(dev, &vblwait->request.sequence); 621 - } else if (dev->driver->vblank_wait) 622 - ret = 623 - dev->driver->vblank_wait(dev, 624 - &vblwait->request.sequence); 625 626 - do_gettimeofday(&now); 627 - vblwait->reply.tval_sec = now.tv_sec; 628 - vblwait->reply.tval_usec = now.tv_usec; 629 } 630 631 - done: 632 return ret; 633 } 634 ··· 645 * Send the VBLANK signals. 646 * 647 * \param dev DRM device. 648 * 649 * Sends a signal for each task in drm_device::vbl_sigs and empties the list. 650 * 651 * If a signal is not requested, then calls vblank_wait(). 652 */ 653 - void drm_vbl_send_signals(struct drm_device * dev) 654 { 655 unsigned long flags; 656 - int i; 657 658 spin_lock_irqsave(&dev->vbl_lock, flags); 659 660 - for (i = 0; i < 2; i++) { 661 - struct drm_vbl_sig *vbl_sig, *tmp; 662 - struct list_head *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs; 663 - unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 : 664 - &dev->vbl_received); 665 666 - list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) { 667 - if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) { 668 - vbl_sig->info.si_code = vbl_seq; 669 - send_sig_info(vbl_sig->info.si_signo, 670 - &vbl_sig->info, vbl_sig->task); 671 672 - list_del(&vbl_sig->head); 673 674 - drm_free(vbl_sig, sizeof(*vbl_sig), 675 - DRM_MEM_DRIVER); 676 - 677 - dev->vbl_pending--; 678 - } 679 - } 680 } 681 682 spin_unlock_irqrestore(&dev->vbl_lock, flags); 683 } 684 685 - EXPORT_SYMBOL(drm_vbl_send_signals); 686 687 /** 688 * Tasklet wrapper function.
··· 63 p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn)) 64 return -EINVAL; 65 66 + p->irq = dev->pdev->irq; 67 68 DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum, 69 p->irq); ··· 71 return 0; 72 } 73 74 + static void vblank_disable_fn(unsigned long arg) 75 + { 76 + struct drm_device *dev = (struct drm_device *)arg; 77 + unsigned long irqflags; 78 + int i; 79 + 80 + if (!dev->vblank_disable_allowed) 81 + return; 82 + 83 + for (i = 0; i < dev->num_crtcs; i++) { 84 + spin_lock_irqsave(&dev->vbl_lock, irqflags); 85 + if (atomic_read(&dev->vblank_refcount[i]) == 0 && 86 + dev->vblank_enabled[i]) { 87 + DRM_DEBUG("disabling vblank on crtc %d\n", i); 88 + dev->last_vblank[i] = 89 + dev->driver->get_vblank_counter(dev, i); 90 + dev->driver->disable_vblank(dev, i); 91 + dev->vblank_enabled[i] = 0; 92 + } 93 + spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 94 + } 95 + } 96 + 97 + static void drm_vblank_cleanup(struct drm_device *dev) 98 + { 99 + /* Bail if the driver didn't call drm_vblank_init() */ 100 + if (dev->num_crtcs == 0) 101 + return; 102 + 103 + del_timer(&dev->vblank_disable_timer); 104 + 105 + vblank_disable_fn((unsigned long)dev); 106 + 107 + drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs, 108 + DRM_MEM_DRIVER); 109 + drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs, 110 + DRM_MEM_DRIVER); 111 + drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) * 112 + dev->num_crtcs, DRM_MEM_DRIVER); 113 + drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) * 114 + dev->num_crtcs, DRM_MEM_DRIVER); 115 + drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) * 116 + dev->num_crtcs, DRM_MEM_DRIVER); 117 + drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs, 118 + DRM_MEM_DRIVER); 119 + drm_free(dev->vblank_inmodeset, sizeof(*dev->vblank_inmodeset) * 120 + dev->num_crtcs, DRM_MEM_DRIVER); 121 + 122 + dev->num_crtcs = 0; 123 + } 124 + 125 + int drm_vblank_init(struct drm_device *dev, int num_crtcs) 126 + { 127 + int i, ret = -ENOMEM; 128 + 129 + setup_timer(&dev->vblank_disable_timer, vblank_disable_fn, 130 + (unsigned long)dev); 131 + spin_lock_init(&dev->vbl_lock); 132 + atomic_set(&dev->vbl_signal_pending, 0); 133 + dev->num_crtcs = num_crtcs; 134 + 135 + dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs, 136 + DRM_MEM_DRIVER); 137 + if (!dev->vbl_queue) 138 + goto err; 139 + 140 + dev->vbl_sigs = drm_alloc(sizeof(struct list_head) * num_crtcs, 141 + DRM_MEM_DRIVER); 142 + if (!dev->vbl_sigs) 143 + goto err; 144 + 145 + dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs, 146 + DRM_MEM_DRIVER); 147 + if (!dev->_vblank_count) 148 + goto err; 149 + 150 + dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs, 151 + DRM_MEM_DRIVER); 152 + if (!dev->vblank_refcount) 153 + goto err; 154 + 155 + dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int), 156 + DRM_MEM_DRIVER); 157 + if (!dev->vblank_enabled) 158 + goto err; 159 + 160 + dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER); 161 + if (!dev->last_vblank) 162 + goto err; 163 + 164 + dev->vblank_inmodeset = drm_calloc(num_crtcs, sizeof(int), 165 + DRM_MEM_DRIVER); 166 + if (!dev->vblank_inmodeset) 167 + goto err; 168 + 169 + /* Zero per-crtc vblank stuff */ 170 + for (i = 0; i < num_crtcs; i++) { 171 + init_waitqueue_head(&dev->vbl_queue[i]); 172 + INIT_LIST_HEAD(&dev->vbl_sigs[i]); 173 + atomic_set(&dev->_vblank_count[i], 0); 174 + atomic_set(&dev->vblank_refcount[i], 0); 175 + } 176 + 177 + dev->vblank_disable_allowed = 0; 178 + 179 + return 0; 180 + 181 + err: 182 + drm_vblank_cleanup(dev); 183 + return ret; 184 + } 185 + EXPORT_SYMBOL(drm_vblank_init); 186 + 187 /** 188 * Install IRQ handler. 189 * 190 * \param dev DRM device. 191 * 192 + * Initializes the IRQ related data. Installs the handler, calling the driver 193 * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions 194 * before and after the installation. 195 */ 196 + int drm_irq_install(struct drm_device *dev) 197 { 198 + int ret = 0; 199 unsigned long sh_flags = 0; 200 201 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 202 return -EINVAL; 203 204 + if (dev->pdev->irq == 0) 205 return -EINVAL; 206 207 mutex_lock(&dev->struct_mutex); ··· 107 dev->irq_enabled = 1; 108 mutex_unlock(&dev->struct_mutex); 109 110 + DRM_DEBUG("irq=%d\n", dev->pdev->irq); 111 112 /* Before installing handler */ 113 dev->driver->irq_preinstall(dev); ··· 127 if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED)) 128 sh_flags = IRQF_SHARED; 129 130 + ret = request_irq(drm_dev_to_irq(dev), dev->driver->irq_handler, 131 sh_flags, dev->devname, dev); 132 + 133 if (ret < 0) { 134 mutex_lock(&dev->struct_mutex); 135 dev->irq_enabled = 0; ··· 137 } 138 139 /* After installing handler */ 140 + ret = dev->driver->irq_postinstall(dev); 141 + if (ret < 0) { 142 + mutex_lock(&dev->struct_mutex); 143 + dev->irq_enabled = 0; 144 + mutex_unlock(&dev->struct_mutex); 145 + } 146 147 + return ret; 148 } 149 + EXPORT_SYMBOL(drm_irq_install); 150 151 /** 152 * Uninstall the IRQ handler. ··· 164 if (!irq_enabled) 165 return -EINVAL; 166 167 + DRM_DEBUG("irq=%d\n", dev->pdev->irq); 168 169 dev->driver->irq_uninstall(dev); 170 171 + free_irq(dev->pdev->irq, dev); 172 + 173 + drm_vblank_cleanup(dev); 174 175 dev->locked_tasklet_func = NULL; 176 177 return 0; 178 } 179 EXPORT_SYMBOL(drm_irq_uninstall); 180 181 /** ··· 201 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 202 return 0; 203 if (dev->if_version < DRM_IF_VERSION(1, 2) && 204 + ctl->irq != dev->pdev->irq) 205 return -EINVAL; 206 return drm_irq_install(dev); 207 case DRM_UNINST_HANDLER: ··· 211 default: 212 return -EINVAL; 213 } 214 + } 215 + 216 + /** 217 + * drm_vblank_count - retrieve "cooked" vblank counter value 218 + * @dev: DRM device 219 + * @crtc: which counter to retrieve 220 + * 221 + * Fetches the "cooked" vblank count value that represents the number of 222 + * vblank events since the system was booted, including lost events due to 223 + * modesetting activity. 224 + */ 225 + u32 drm_vblank_count(struct drm_device *dev, int crtc) 226 + { 227 + return atomic_read(&dev->_vblank_count[crtc]); 228 + } 229 + EXPORT_SYMBOL(drm_vblank_count); 230 + 231 + /** 232 + * drm_update_vblank_count - update the master vblank counter 233 + * @dev: DRM device 234 + * @crtc: counter to update 235 + * 236 + * Call back into the driver to update the appropriate vblank counter 237 + * (specified by @crtc). Deal with wraparound, if it occurred, and 238 + * update the last read value so we can deal with wraparound on the next 239 + * call if necessary. 240 + * 241 + * Only necessary when going from off->on, to account for frames we 242 + * didn't get an interrupt for. 243 + * 244 + * Note: caller must hold dev->vbl_lock since this reads & writes 245 + * device vblank fields. 246 + */ 247 + static void drm_update_vblank_count(struct drm_device *dev, int crtc) 248 + { 249 + u32 cur_vblank, diff; 250 + 251 + /* 252 + * Interrupts were disabled prior to this call, so deal with counter 253 + * wrap if needed. 254 + * NOTE! It's possible we lost a full dev->max_vblank_count events 255 + * here if the register is small or we had vblank interrupts off for 256 + * a long time. 257 + */ 258 + cur_vblank = dev->driver->get_vblank_counter(dev, crtc); 259 + diff = cur_vblank - dev->last_vblank[crtc]; 260 + if (cur_vblank < dev->last_vblank[crtc]) { 261 + diff += dev->max_vblank_count; 262 + 263 + DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n", 264 + crtc, dev->last_vblank[crtc], cur_vblank, diff); 265 + } 266 + 267 + DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n", 268 + crtc, diff); 269 + 270 + atomic_add(diff, &dev->_vblank_count[crtc]); 271 + } 272 + 273 + /** 274 + * drm_vblank_get - get a reference count on vblank events 275 + * @dev: DRM device 276 + * @crtc: which CRTC to own 277 + * 278 + * Acquire a reference count on vblank events to avoid having them disabled 279 + * while in use. 280 + * 281 + * RETURNS 282 + * Zero on success, nonzero on failure. 283 + */ 284 + int drm_vblank_get(struct drm_device *dev, int crtc) 285 + { 286 + unsigned long irqflags; 287 + int ret = 0; 288 + 289 + spin_lock_irqsave(&dev->vbl_lock, irqflags); 290 + /* Going from 0->1 means we have to enable interrupts again */ 291 + if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 && 292 + !dev->vblank_enabled[crtc]) { 293 + ret = dev->driver->enable_vblank(dev, crtc); 294 + DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret); 295 + if (ret) 296 + atomic_dec(&dev->vblank_refcount[crtc]); 297 + else { 298 + dev->vblank_enabled[crtc] = 1; 299 + drm_update_vblank_count(dev, crtc); 300 + } 301 + } 302 + spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 303 + 304 + return ret; 305 + } 306 + EXPORT_SYMBOL(drm_vblank_get); 307 + 308 + /** 309 + * drm_vblank_put - give up ownership of vblank events 310 + * @dev: DRM device 311 + * @crtc: which counter to give up 312 + * 313 + * Release ownership of a given vblank counter, turning off interrupts 314 + * if possible. 315 + */ 316 + void drm_vblank_put(struct drm_device *dev, int crtc) 317 + { 318 + /* Last user schedules interrupt disable */ 319 + if (atomic_dec_and_test(&dev->vblank_refcount[crtc])) 320 + mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ); 321 + } 322 + EXPORT_SYMBOL(drm_vblank_put); 323 + 324 + /** 325 + * drm_modeset_ctl - handle vblank event counter changes across mode switch 326 + * @DRM_IOCTL_ARGS: standard ioctl arguments 327 + * 328 + * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET 329 + * ioctls around modesetting so that any lost vblank events are accounted for. 330 + * 331 + * Generally the counter will reset across mode sets. If interrupts are 332 + * enabled around this call, we don't have to do anything since the counter 333 + * will have already been incremented. 334 + */ 335 + int drm_modeset_ctl(struct drm_device *dev, void *data, 336 + struct drm_file *file_priv) 337 + { 338 + struct drm_modeset_ctl *modeset = data; 339 + unsigned long irqflags; 340 + int crtc, ret = 0; 341 + 342 + /* If drm_vblank_init() hasn't been called yet, just no-op */ 343 + if (!dev->num_crtcs) 344 + goto out; 345 + 346 + crtc = modeset->crtc; 347 + if (crtc >= dev->num_crtcs) { 348 + ret = -EINVAL; 349 + goto out; 350 + } 351 + 352 + /* 353 + * To avoid all the problems that might happen if interrupts 354 + * were enabled/disabled around or between these calls, we just 355 + * have the kernel take a reference on the CRTC (just once though 356 + * to avoid corrupting the count if multiple, mismatch calls occur), 357 + * so that interrupts remain enabled in the interim. 358 + */ 359 + switch (modeset->cmd) { 360 + case _DRM_PRE_MODESET: 361 + if (!dev->vblank_inmodeset[crtc]) { 362 + dev->vblank_inmodeset[crtc] = 1; 363 + drm_vblank_get(dev, crtc); 364 + } 365 + break; 366 + case _DRM_POST_MODESET: 367 + if (dev->vblank_inmodeset[crtc]) { 368 + spin_lock_irqsave(&dev->vbl_lock, irqflags); 369 + dev->vblank_disable_allowed = 1; 370 + dev->vblank_inmodeset[crtc] = 0; 371 + spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 372 + drm_vblank_put(dev, crtc); 373 + } 374 + break; 375 + default: 376 + ret = -EINVAL; 377 + break; 378 + } 379 + 380 + out: 381 + return ret; 382 } 383 384 /** ··· 232 * 233 * If a signal is not requested, then calls vblank_wait(). 234 */ 235 + int drm_wait_vblank(struct drm_device *dev, void *data, 236 + struct drm_file *file_priv) 237 { 238 union drm_wait_vblank *vblwait = data; 239 int ret = 0; 240 + unsigned int flags, seq, crtc; 241 242 + if ((!dev->pdev->irq) || (!dev->irq_enabled)) 243 return -EINVAL; 244 245 if (vblwait->request.type & ··· 251 } 252 253 flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK; 254 + crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0; 255 256 + if (crtc >= dev->num_crtcs) 257 return -EINVAL; 258 259 + ret = drm_vblank_get(dev, crtc); 260 + if (ret) { 261 + DRM_ERROR("failed to acquire vblank counter, %d\n", ret); 262 + return ret; 263 + } 264 + seq = drm_vblank_count(dev, crtc); 265 266 switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) { 267 case _DRM_VBLANK_RELATIVE: ··· 266 case _DRM_VBLANK_ABSOLUTE: 267 break; 268 default: 269 + ret = -EINVAL; 270 + goto done; 271 } 272 273 if ((flags & _DRM_VBLANK_NEXTONMISS) && ··· 276 277 if (flags & _DRM_VBLANK_SIGNAL) { 278 unsigned long irqflags; 279 + struct list_head *vbl_sigs = &dev->vbl_sigs[crtc]; 280 struct drm_vbl_sig *vbl_sig; 281 282 spin_lock_irqsave(&dev->vbl_lock, irqflags); ··· 298 } 299 } 300 301 + if (atomic_read(&dev->vbl_signal_pending) >= 100) { 302 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 303 + ret = -EBUSY; 304 + goto done; 305 } 306 307 spin_unlock_irqrestore(&dev->vbl_lock, irqflags); 308 309 + vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig), 310 + DRM_MEM_DRIVER); 311 + if (!vbl_sig) { 312 + ret = -ENOMEM; 313 + goto done; 314 } 315 316 + ret = drm_vblank_get(dev, crtc); 317 + if (ret) { 318 + drm_free(vbl_sig, sizeof(struct drm_vbl_sig), 319 + DRM_MEM_DRIVER); 320 + return ret; 321 + } 322 + 323 + atomic_inc(&dev->vbl_signal_pending); 324 325 vbl_sig->sequence = vblwait->request.sequence; 326 vbl_sig->info.si_signo = vblwait->request.signal; ··· 327 328 vblwait->reply.sequence = seq; 329 } else { 330 + DRM_DEBUG("waiting on vblank count %d, crtc %d\n", 331 + vblwait->request.sequence, crtc); 332 + DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ, 333 + ((drm_vblank_count(dev, crtc) 334 + - vblwait->request.sequence) <= (1 << 23))); 335 336 + if (ret != -EINTR) { 337 + struct timeval now; 338 + 339 + do_gettimeofday(&now); 340 + 341 + vblwait->reply.tval_sec = now.tv_sec; 342 + vblwait->reply.tval_usec = now.tv_usec; 343 + vblwait->reply.sequence = drm_vblank_count(dev, crtc); 344 + DRM_DEBUG("returning %d to client\n", 345 + vblwait->reply.sequence); 346 + } else { 347 + DRM_DEBUG("vblank wait interrupted by signal\n"); 348 + } 349 } 350 351 + done: 352 + drm_vblank_put(dev, crtc); 353 return ret; 354 } 355 ··· 348 * Send the VBLANK signals. 349 * 350 * \param dev DRM device. 351 + * \param crtc CRTC where the vblank event occurred 352 * 353 * Sends a signal for each task in drm_device::vbl_sigs and empties the list. 354 * 355 * If a signal is not requested, then calls vblank_wait(). 356 */ 357 + static void drm_vbl_send_signals(struct drm_device *dev, int crtc) 358 { 359 + struct drm_vbl_sig *vbl_sig, *tmp; 360 + struct list_head *vbl_sigs; 361 + unsigned int vbl_seq; 362 unsigned long flags; 363 364 spin_lock_irqsave(&dev->vbl_lock, flags); 365 366 + vbl_sigs = &dev->vbl_sigs[crtc]; 367 + vbl_seq = drm_vblank_count(dev, crtc); 368 369 + list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) { 370 + if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) { 371 + vbl_sig->info.si_code = vbl_seq; 372 + send_sig_info(vbl_sig->info.si_signo, 373 + &vbl_sig->info, vbl_sig->task); 374 375 + list_del(&vbl_sig->head); 376 377 + drm_free(vbl_sig, sizeof(*vbl_sig), 378 + DRM_MEM_DRIVER); 379 + atomic_dec(&dev->vbl_signal_pending); 380 + drm_vblank_put(dev, crtc); 381 + } 382 } 383 384 spin_unlock_irqrestore(&dev->vbl_lock, flags); 385 } 386 387 + /** 388 + * drm_handle_vblank - handle a vblank event 389 + * @dev: DRM device 390 + * @crtc: where this event occurred 391 + * 392 + * Drivers should call this routine in their vblank interrupt handlers to 393 + * update the vblank counter and send any signals that may be pending. 394 + */ 395 + void drm_handle_vblank(struct drm_device *dev, int crtc) 396 + { 397 + atomic_inc(&dev->_vblank_count[crtc]); 398 + DRM_WAKEUP(&dev->vbl_queue[crtc]); 399 + drm_vbl_send_signals(dev, crtc); 400 + } 401 + EXPORT_SYMBOL(drm_handle_vblank); 402 403 /** 404 * Tasklet wrapper function.
+2
drivers/gpu/drm/drm_memory.c
··· 133 { 134 return drm_agp_free_memory(handle) ? 0 : -EINVAL; 135 } 136 137 /** Wrapper around agp_bind_memory() */ 138 int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start) ··· 146 { 147 return drm_agp_unbind_memory(handle); 148 } 149 150 #else /* __OS_HAS_AGP */ 151 static inline void *agp_remap(unsigned long offset, unsigned long size,
··· 133 { 134 return drm_agp_free_memory(handle) ? 0 : -EINVAL; 135 } 136 + EXPORT_SYMBOL(drm_free_agp); 137 138 /** Wrapper around agp_bind_memory() */ 139 int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start) ··· 145 { 146 return drm_agp_unbind_memory(handle); 147 } 148 + EXPORT_SYMBOL(drm_unbind_agp); 149 150 #else /* __OS_HAS_AGP */ 151 static inline void *agp_remap(unsigned long offset, unsigned long size,
+4 -1
drivers/gpu/drm/drm_mm.c
··· 169 170 return child; 171 } 172 173 /* 174 * Put a block. Merge with the previous and / or next block if they are free. ··· 218 drm_free(cur, sizeof(*cur), DRM_MEM_MM); 219 } 220 } 221 222 struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm, 223 unsigned long size, ··· 267 268 return (head->next->next == head); 269 } 270 271 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) 272 { ··· 276 277 return drm_mm_create_tail_node(mm, start, size); 278 } 279 - 280 281 void drm_mm_takedown(struct drm_mm * mm) 282 {
··· 169 170 return child; 171 } 172 + EXPORT_SYMBOL(drm_mm_get_block); 173 174 /* 175 * Put a block. Merge with the previous and / or next block if they are free. ··· 217 drm_free(cur, sizeof(*cur), DRM_MEM_MM); 218 } 219 } 220 + EXPORT_SYMBOL(drm_mm_put_block); 221 222 struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm, 223 unsigned long size, ··· 265 266 return (head->next->next == head); 267 } 268 + EXPORT_SYMBOL(drm_mm_search_free); 269 270 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) 271 { ··· 273 274 return drm_mm_create_tail_node(mm, start, size); 275 } 276 + EXPORT_SYMBOL(drm_mm_init); 277 278 void drm_mm_takedown(struct drm_mm * mm) 279 {
+122 -13
drivers/gpu/drm/drm_proc.c
··· 49 int request, int *eof, void *data); 50 static int drm_bufs_info(char *buf, char **start, off_t offset, 51 int request, int *eof, void *data); 52 #if DRM_DEBUG_CODE 53 static int drm_vma_info(char *buf, char **start, off_t offset, 54 int request, int *eof, void *data); ··· 64 static struct drm_proc_list { 65 const char *name; /**< file name */ 66 int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/ 67 } drm_proc_list[] = { 68 - {"name", drm_name_info}, 69 - {"mem", drm_mem_info}, 70 - {"vm", drm_vm_info}, 71 - {"clients", drm_clients_info}, 72 - {"queues", drm_queues_info}, 73 - {"bufs", drm_bufs_info}, 74 #if DRM_DEBUG_CODE 75 {"vma", drm_vma_info}, 76 #endif ··· 97 int drm_proc_init(struct drm_minor *minor, int minor_id, 98 struct proc_dir_entry *root) 99 { 100 struct proc_dir_entry *ent; 101 - int i, j; 102 char name[64]; 103 104 sprintf(name, "%d", minor_id); ··· 110 } 111 112 for (i = 0; i < DRM_PROC_ENTRIES; i++) { 113 ent = create_proc_entry(drm_proc_list[i].name, 114 S_IFREG | S_IRUGO, minor->dev_root); 115 if (!ent) { 116 DRM_ERROR("Cannot create /proc/dri/%s/%s\n", 117 name, drm_proc_list[i].name); 118 - for (j = 0; j < i; j++) 119 - remove_proc_entry(drm_proc_list[i].name, 120 - minor->dev_root); 121 - remove_proc_entry(name, root); 122 - minor->dev_root = NULL; 123 - return -1; 124 } 125 ent->read_proc = drm_proc_list[i].f; 126 ent->data = minor; 127 } 128 129 return 0; 130 } 131 132 /** ··· 160 */ 161 int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root) 162 { 163 int i; 164 char name[64]; 165 166 if (!root || !minor->dev_root) 167 return 0; 168 169 for (i = 0; i < DRM_PROC_ENTRIES; i++) 170 remove_proc_entry(drm_proc_list[i].name, minor->dev_root); ··· 509 ret = drm__clients_info(buf, start, offset, request, eof, data); 510 mutex_unlock(&dev->struct_mutex); 511 return ret; 512 } 513 514 #if DRM_DEBUG_CODE
··· 49 int request, int *eof, void *data); 50 static int drm_bufs_info(char *buf, char **start, off_t offset, 51 int request, int *eof, void *data); 52 + static int drm_gem_name_info(char *buf, char **start, off_t offset, 53 + int request, int *eof, void *data); 54 + static int drm_gem_object_info(char *buf, char **start, off_t offset, 55 + int request, int *eof, void *data); 56 #if DRM_DEBUG_CODE 57 static int drm_vma_info(char *buf, char **start, off_t offset, 58 int request, int *eof, void *data); ··· 60 static struct drm_proc_list { 61 const char *name; /**< file name */ 62 int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/ 63 + u32 driver_features; /**< Required driver features for this entry */ 64 } drm_proc_list[] = { 65 + {"name", drm_name_info, 0}, 66 + {"mem", drm_mem_info, 0}, 67 + {"vm", drm_vm_info, 0}, 68 + {"clients", drm_clients_info, 0}, 69 + {"queues", drm_queues_info, 0}, 70 + {"bufs", drm_bufs_info, 0}, 71 + {"gem_names", drm_gem_name_info, DRIVER_GEM}, 72 + {"gem_objects", drm_gem_object_info, DRIVER_GEM}, 73 #if DRM_DEBUG_CODE 74 {"vma", drm_vma_info}, 75 #endif ··· 90 int drm_proc_init(struct drm_minor *minor, int minor_id, 91 struct proc_dir_entry *root) 92 { 93 + struct drm_device *dev = minor->dev; 94 struct proc_dir_entry *ent; 95 + int i, j, ret; 96 char name[64]; 97 98 sprintf(name, "%d", minor_id); ··· 102 } 103 104 for (i = 0; i < DRM_PROC_ENTRIES; i++) { 105 + u32 features = drm_proc_list[i].driver_features; 106 + 107 + if (features != 0 && 108 + (dev->driver->driver_features & features) != features) 109 + continue; 110 + 111 ent = create_proc_entry(drm_proc_list[i].name, 112 S_IFREG | S_IRUGO, minor->dev_root); 113 if (!ent) { 114 DRM_ERROR("Cannot create /proc/dri/%s/%s\n", 115 name, drm_proc_list[i].name); 116 + ret = -1; 117 + goto fail; 118 } 119 ent->read_proc = drm_proc_list[i].f; 120 ent->data = minor; 121 } 122 123 + if (dev->driver->proc_init) { 124 + ret = dev->driver->proc_init(minor); 125 + if (ret) { 126 + DRM_ERROR("DRM: Driver failed to initialize " 127 + "/proc/dri.\n"); 128 + goto fail; 129 + } 130 + } 131 + 132 return 0; 133 + fail: 134 + 135 + for (j = 0; j < i; j++) 136 + remove_proc_entry(drm_proc_list[i].name, 137 + minor->dev_root); 138 + remove_proc_entry(name, root); 139 + minor->dev_root = NULL; 140 + return ret; 141 } 142 143 /** ··· 133 */ 134 int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root) 135 { 136 + struct drm_device *dev = minor->dev; 137 int i; 138 char name[64]; 139 140 if (!root || !minor->dev_root) 141 return 0; 142 + 143 + if (dev->driver->proc_cleanup) 144 + dev->driver->proc_cleanup(minor); 145 146 for (i = 0; i < DRM_PROC_ENTRIES; i++) 147 remove_proc_entry(drm_proc_list[i].name, minor->dev_root); ··· 478 ret = drm__clients_info(buf, start, offset, request, eof, data); 479 mutex_unlock(&dev->struct_mutex); 480 return ret; 481 + } 482 + 483 + struct drm_gem_name_info_data { 484 + int len; 485 + char *buf; 486 + int eof; 487 + }; 488 + 489 + static int drm_gem_one_name_info(int id, void *ptr, void *data) 490 + { 491 + struct drm_gem_object *obj = ptr; 492 + struct drm_gem_name_info_data *nid = data; 493 + 494 + DRM_INFO("name %d size %d\n", obj->name, obj->size); 495 + if (nid->eof) 496 + return 0; 497 + 498 + nid->len += sprintf(&nid->buf[nid->len], 499 + "%6d%9d%8d%9d\n", 500 + obj->name, obj->size, 501 + atomic_read(&obj->handlecount.refcount), 502 + atomic_read(&obj->refcount.refcount)); 503 + if (nid->len > DRM_PROC_LIMIT) { 504 + nid->eof = 1; 505 + return 0; 506 + } 507 + return 0; 508 + } 509 + 510 + static int drm_gem_name_info(char *buf, char **start, off_t offset, 511 + int request, int *eof, void *data) 512 + { 513 + struct drm_minor *minor = (struct drm_minor *) data; 514 + struct drm_device *dev = minor->dev; 515 + struct drm_gem_name_info_data nid; 516 + 517 + if (offset > DRM_PROC_LIMIT) { 518 + *eof = 1; 519 + return 0; 520 + } 521 + 522 + nid.len = sprintf(buf, " name size handles refcount\n"); 523 + nid.buf = buf; 524 + nid.eof = 0; 525 + idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, &nid); 526 + 527 + *start = &buf[offset]; 528 + *eof = 0; 529 + if (nid.len > request + offset) 530 + return request; 531 + *eof = 1; 532 + return nid.len - offset; 533 + } 534 + 535 + static int drm_gem_object_info(char *buf, char **start, off_t offset, 536 + int request, int *eof, void *data) 537 + { 538 + struct drm_minor *minor = (struct drm_minor *) data; 539 + struct drm_device *dev = minor->dev; 540 + int len = 0; 541 + 542 + if (offset > DRM_PROC_LIMIT) { 543 + *eof = 1; 544 + return 0; 545 + } 546 + 547 + *start = &buf[offset]; 548 + *eof = 0; 549 + DRM_PROC_PRINT("%d objects\n", atomic_read(&dev->object_count)); 550 + DRM_PROC_PRINT("%d object bytes\n", atomic_read(&dev->object_memory)); 551 + DRM_PROC_PRINT("%d pinned\n", atomic_read(&dev->pin_count)); 552 + DRM_PROC_PRINT("%d pin bytes\n", atomic_read(&dev->pin_memory)); 553 + DRM_PROC_PRINT("%d gtt bytes\n", atomic_read(&dev->gtt_memory)); 554 + DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total); 555 + if (len > request + offset) 556 + return request; 557 + *eof = 1; 558 + return len - offset; 559 } 560 561 #if DRM_DEBUG_CODE
+10 -1
drivers/gpu/drm/drm_stub.c
··· 107 #ifdef __alpha__ 108 dev->hose = pdev->sysdata; 109 #endif 110 - dev->irq = pdev->irq; 111 112 if (drm_ht_create(&dev->map_hash, 12)) { 113 return -ENOMEM; ··· 149 if (retcode) { 150 DRM_ERROR("Cannot allocate memory for context bitmap.\n"); 151 goto error_out_unreg; 152 } 153 154 return 0; ··· 325 int drm_put_minor(struct drm_minor **minor_p) 326 { 327 struct drm_minor *minor = *minor_p; 328 DRM_DEBUG("release secondary minor %d\n", minor->index); 329 330 if (minor->type == DRM_MINOR_LEGACY)
··· 107 #ifdef __alpha__ 108 dev->hose = pdev->sysdata; 109 #endif 110 111 if (drm_ht_create(&dev->map_hash, 12)) { 112 return -ENOMEM; ··· 150 if (retcode) { 151 DRM_ERROR("Cannot allocate memory for context bitmap.\n"); 152 goto error_out_unreg; 153 + } 154 + 155 + if (driver->driver_features & DRIVER_GEM) { 156 + retcode = drm_gem_init(dev); 157 + if (retcode) { 158 + DRM_ERROR("Cannot initialize graphics execution " 159 + "manager (GEM)\n"); 160 + goto error_out_unreg; 161 + } 162 } 163 164 return 0; ··· 317 int drm_put_minor(struct drm_minor **minor_p) 318 { 319 struct drm_minor *minor = *minor_p; 320 + 321 DRM_DEBUG("release secondary minor %d\n", minor->index); 322 323 if (minor->type == DRM_MINOR_LEGACY)
+1 -1
drivers/gpu/drm/drm_sysfs.c
··· 184 err_out_files: 185 if (i > 0) 186 for (j = 0; j < i; j++) 187 - device_remove_file(&minor->kdev, &device_attrs[i]); 188 device_unregister(&minor->kdev); 189 err_out: 190
··· 184 err_out_files: 185 if (i > 0) 186 for (j = 0; j < i; j++) 187 + device_remove_file(&minor->kdev, &device_attrs[j]); 188 device_unregister(&minor->kdev); 189 err_out: 190
+6 -1
drivers/gpu/drm/i915/Makefile
··· 3 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. 4 5 ccflags-y := -Iinclude/drm 6 - i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o 7 8 i915-$(CONFIG_COMPAT) += i915_ioc32.o 9
··· 3 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. 4 5 ccflags-y := -Iinclude/drm 6 + i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_opregion.o \ 7 + i915_suspend.o \ 8 + i915_gem.o \ 9 + i915_gem_debug.o \ 10 + i915_gem_proc.o \ 11 + i915_gem_tiling.o 12 13 i915-$(CONFIG_COMPAT) += i915_ioc32.o 14
+226 -106
drivers/gpu/drm/i915/i915_dma.c
··· 40 { 41 drm_i915_private_t *dev_priv = dev->dev_private; 42 drm_i915_ring_buffer_t *ring = &(dev_priv->ring); 43 - u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR; 44 int i; 45 46 - for (i = 0; i < 10000; i++) { 47 - ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR; 48 ring->space = ring->head - (ring->tail + 8); 49 if (ring->space < 0) 50 ring->space += ring->Size; 51 if (ring->space >= n) 52 return 0; 53 54 - dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 55 56 if (ring->head != last_head) 57 i = 0; 58 59 last_head = ring->head; 60 } 61 62 return -EBUSY; 63 } 64 65 void i915_kernel_lost_context(struct drm_device * dev) ··· 123 drm_i915_private_t *dev_priv = dev->dev_private; 124 drm_i915_ring_buffer_t *ring = &(dev_priv->ring); 125 126 - ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR; 127 - ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR; 128 ring->space = ring->head - (ring->tail + 8); 129 if (ring->space < 0) 130 ring->space += ring->Size; 131 132 - if (ring->head == ring->tail) 133 dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; 134 } 135 ··· 140 * may not have been called from userspace and after dev_private 141 * is freed, it's too late. 142 */ 143 - if (dev->irq) 144 drm_irq_uninstall(dev); 145 146 if (dev_priv->ring.virtual_start) { 147 drm_core_ioremapfree(&dev_priv->ring.map, dev); 148 - dev_priv->ring.virtual_start = 0; 149 - dev_priv->ring.map.handle = 0; 150 dev_priv->ring.map.size = 0; 151 } 152 153 - if (dev_priv->status_page_dmah) { 154 - drm_pci_free(dev, dev_priv->status_page_dmah); 155 - dev_priv->status_page_dmah = NULL; 156 - /* Need to rewrite hardware status page */ 157 - I915_WRITE(0x02080, 0x1ffff000); 158 - } 159 - 160 - if (dev_priv->status_gfx_addr) { 161 - dev_priv->status_gfx_addr = 0; 162 - drm_core_ioremapfree(&dev_priv->hws_map, dev); 163 - I915_WRITE(0x2080, 0x1ffff000); 164 - } 165 166 return 0; 167 } ··· 168 return -EINVAL; 169 } 170 171 - dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset); 172 - if (!dev_priv->mmio_map) { 173 - i915_dma_cleanup(dev); 174 - DRM_ERROR("can not find mmio map!\n"); 175 - return -EINVAL; 176 - } 177 - 178 dev_priv->sarea_priv = (drm_i915_sarea_t *) 179 ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset); 180 181 - dev_priv->ring.Start = init->ring_start; 182 - dev_priv->ring.End = init->ring_end; 183 - dev_priv->ring.Size = init->ring_size; 184 - dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; 185 186 - dev_priv->ring.map.offset = init->ring_start; 187 - dev_priv->ring.map.size = init->ring_size; 188 - dev_priv->ring.map.type = 0; 189 - dev_priv->ring.map.flags = 0; 190 - dev_priv->ring.map.mtrr = 0; 191 192 - drm_core_ioremap(&dev_priv->ring.map, dev); 193 194 - if (dev_priv->ring.map.handle == NULL) { 195 - i915_dma_cleanup(dev); 196 - DRM_ERROR("can not ioremap virtual address for" 197 - " ring buffer\n"); 198 - return -ENOMEM; 199 } 200 201 dev_priv->ring.virtual_start = dev_priv->ring.map.handle; ··· 206 dev_priv->current_page = 0; 207 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; 208 209 - /* We are using separate values as placeholders for mechanisms for 210 - * private backbuffer/depthbuffer usage. 211 - */ 212 - dev_priv->use_mi_batchbuffer_start = 0; 213 - if (IS_I965G(dev)) /* 965 doesn't support older method */ 214 - dev_priv->use_mi_batchbuffer_start = 1; 215 - 216 /* Allow hardware batchbuffers unless told otherwise. 217 */ 218 dev_priv->allow_batchbuffer = 1; 219 220 - /* Program Hardware Status Page */ 221 - if (!I915_NEED_GFX_HWS(dev)) { 222 - dev_priv->status_page_dmah = 223 - drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff); 224 - 225 - if (!dev_priv->status_page_dmah) { 226 - i915_dma_cleanup(dev); 227 - DRM_ERROR("Can not allocate hardware status page\n"); 228 - return -ENOMEM; 229 - } 230 - dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; 231 - dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; 232 - 233 - memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 234 - I915_WRITE(0x02080, dev_priv->dma_status_page); 235 - } 236 - DRM_DEBUG("Enabled hardware status page\n"); 237 return 0; 238 } 239 ··· 221 222 if (!dev_priv->sarea) { 223 DRM_ERROR("can not find sarea!\n"); 224 - return -EINVAL; 225 - } 226 - 227 - if (!dev_priv->mmio_map) { 228 - DRM_ERROR("can not find mmio map!\n"); 229 return -EINVAL; 230 } 231 ··· 238 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); 239 240 if (dev_priv->status_gfx_addr != 0) 241 - I915_WRITE(0x02080, dev_priv->status_gfx_addr); 242 else 243 - I915_WRITE(0x02080, dev_priv->dma_status_page); 244 DRM_DEBUG("Enabled hardware status page\n"); 245 246 return 0; ··· 385 return 0; 386 } 387 388 - static int i915_emit_box(struct drm_device * dev, 389 - struct drm_clip_rect __user * boxes, 390 - int i, int DR1, int DR4) 391 { 392 drm_i915_private_t *dev_priv = dev->dev_private; 393 struct drm_clip_rect box; ··· 434 drm_i915_private_t *dev_priv = dev->dev_private; 435 RING_LOCALS; 436 437 - dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter; 438 - 439 if (dev_priv->counter > 0x7FFFFFFFUL) 440 - dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1; 441 442 BEGIN_LP_RING(4); 443 - OUT_RING(CMD_STORE_DWORD_IDX); 444 - OUT_RING(20); 445 OUT_RING(dev_priv->counter); 446 OUT_RING(0); 447 ADVANCE_LP_RING(); ··· 506 return ret; 507 } 508 509 - if (dev_priv->use_mi_batchbuffer_start) { 510 BEGIN_LP_RING(2); 511 if (IS_I965G(dev)) { 512 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); ··· 536 drm_i915_private_t *dev_priv = dev->dev_private; 537 RING_LOCALS; 538 539 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n", 540 __func__, 541 dev_priv->current_page, ··· 547 i915_kernel_lost_context(dev); 548 549 BEGIN_LP_RING(2); 550 - OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE); 551 OUT_RING(0); 552 ADVANCE_LP_RING(); 553 ··· 572 dev_priv->sarea_priv->last_enqueue = dev_priv->counter++; 573 574 BEGIN_LP_RING(4); 575 - OUT_RING(CMD_STORE_DWORD_IDX); 576 - OUT_RING(20); 577 OUT_RING(dev_priv->counter); 578 OUT_RING(0); 579 ADVANCE_LP_RING(); ··· 593 static int i915_flush_ioctl(struct drm_device *dev, void *data, 594 struct drm_file *file_priv) 595 { 596 - LOCK_TEST_WITH_RETURN(dev, file_priv); 597 598 - return i915_quiescent(dev); 599 } 600 601 static int i915_batchbuffer(struct drm_device *dev, void *data, ··· 622 DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n", 623 batch->start, batch->used, batch->num_cliprects); 624 625 - LOCK_TEST_WITH_RETURN(dev, file_priv); 626 627 if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects, 628 batch->num_cliprects * 629 sizeof(struct drm_clip_rect))) 630 return -EFAULT; 631 632 ret = i915_dispatch_batchbuffer(dev, batch); 633 634 - sarea_priv->last_dispatch = (int)hw_status[5]; 635 return ret; 636 } 637 ··· 651 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n", 652 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); 653 654 - LOCK_TEST_WITH_RETURN(dev, file_priv); 655 656 if (cmdbuf->num_cliprects && 657 DRM_VERIFYAREA_READ(cmdbuf->cliprects, ··· 661 return -EFAULT; 662 } 663 664 ret = i915_dispatch_cmdbuffer(dev, cmdbuf); 665 if (ret) { 666 DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); 667 return ret; 668 } 669 670 - sarea_priv->last_dispatch = (int)hw_status[5]; 671 return 0; 672 } 673 674 static int i915_flip_bufs(struct drm_device *dev, void *data, 675 struct drm_file *file_priv) 676 { 677 DRM_DEBUG("%s\n", __func__); 678 679 - LOCK_TEST_WITH_RETURN(dev, file_priv); 680 681 - return i915_dispatch_flip(dev); 682 } 683 684 static int i915_getparam(struct drm_device *dev, void *data, ··· 704 705 switch (param->param) { 706 case I915_PARAM_IRQ_ACTIVE: 707 - value = dev->irq ? 1 : 0; 708 break; 709 case I915_PARAM_ALLOW_BATCHBUFFER: 710 value = dev_priv->allow_batchbuffer ? 1 : 0; 711 break; 712 case I915_PARAM_LAST_DISPATCH: 713 value = READ_BREADCRUMB(dev_priv); 714 break; 715 default: 716 DRM_ERROR("Unknown parameter %d\n", param->param); ··· 744 745 switch (param->param) { 746 case I915_SETPARAM_USE_MI_BATCHBUFFER_START: 747 - if (!IS_I965G(dev)) 748 - dev_priv->use_mi_batchbuffer_start = param->value; 749 break; 750 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: 751 dev_priv->tex_lru_log_granularity = param->value; ··· 794 dev_priv->hw_status_page = dev_priv->hws_map.handle; 795 796 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 797 - I915_WRITE(0x02080, dev_priv->status_gfx_addr); 798 - DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n", 799 dev_priv->status_gfx_addr); 800 DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page); 801 return 0; ··· 821 memset(dev_priv, 0, sizeof(drm_i915_private_t)); 822 823 dev->dev_private = (void *)dev_priv; 824 825 /* Add register map (needed for suspend/resume) */ 826 base = drm_get_resource_start(dev, mmio_bar); 827 size = drm_get_resource_len(dev, mmio_bar); 828 829 - ret = drm_addmap(dev, base, size, _DRM_REGISTERS, 830 - _DRM_KERNEL | _DRM_DRIVER, 831 - &dev_priv->mmio_map); 832 return ret; 833 } 834 ··· 860 { 861 struct drm_i915_private *dev_priv = dev->dev_private; 862 863 - if (dev_priv->mmio_map) 864 - drm_rmmap(dev, dev_priv->mmio_map); 865 866 drm_free(dev->dev_private, sizeof(drm_i915_private_t), 867 DRM_MEM_DRIVER); 868 869 return 0; 870 } ··· 902 if (!dev_priv) 903 return; 904 905 if (dev_priv->agp_heap) 906 i915_mem_takedown(&(dev_priv->agp_heap)); 907 ··· 914 { 915 drm_i915_private_t *dev_priv = dev->dev_private; 916 i915_mem_release(dev, file_priv, dev_priv->agp_heap); 917 } 918 919 struct drm_ioctl_desc i915_ioctls[] = { ··· 940 DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), 941 DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ), 942 DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), 943 - DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH), 944 }; 945 946 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
··· 40 { 41 drm_i915_private_t *dev_priv = dev->dev_private; 42 drm_i915_ring_buffer_t *ring = &(dev_priv->ring); 43 + u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD; 44 + u32 last_acthd = I915_READ(acthd_reg); 45 + u32 acthd; 46 + u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 47 int i; 48 49 + for (i = 0; i < 100000; i++) { 50 + ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 51 + acthd = I915_READ(acthd_reg); 52 ring->space = ring->head - (ring->tail + 8); 53 if (ring->space < 0) 54 ring->space += ring->Size; 55 if (ring->space >= n) 56 return 0; 57 58 + if (dev_priv->sarea_priv) 59 + dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 60 61 if (ring->head != last_head) 62 i = 0; 63 + if (acthd != last_acthd) 64 + i = 0; 65 66 last_head = ring->head; 67 + last_acthd = acthd; 68 + msleep_interruptible(10); 69 + 70 } 71 72 return -EBUSY; 73 + } 74 + 75 + /** 76 + * Sets up the hardware status page for devices that need a physical address 77 + * in the register. 78 + */ 79 + static int i915_init_phys_hws(struct drm_device *dev) 80 + { 81 + drm_i915_private_t *dev_priv = dev->dev_private; 82 + /* Program Hardware Status Page */ 83 + dev_priv->status_page_dmah = 84 + drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff); 85 + 86 + if (!dev_priv->status_page_dmah) { 87 + DRM_ERROR("Can not allocate hardware status page\n"); 88 + return -ENOMEM; 89 + } 90 + dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr; 91 + dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr; 92 + 93 + memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 94 + 95 + I915_WRITE(HWS_PGA, dev_priv->dma_status_page); 96 + DRM_DEBUG("Enabled hardware status page\n"); 97 + return 0; 98 + } 99 + 100 + /** 101 + * Frees the hardware status page, whether it's a physical address or a virtual 102 + * address set up by the X Server. 103 + */ 104 + static void i915_free_hws(struct drm_device *dev) 105 + { 106 + drm_i915_private_t *dev_priv = dev->dev_private; 107 + if (dev_priv->status_page_dmah) { 108 + drm_pci_free(dev, dev_priv->status_page_dmah); 109 + dev_priv->status_page_dmah = NULL; 110 + } 111 + 112 + if (dev_priv->status_gfx_addr) { 113 + dev_priv->status_gfx_addr = 0; 114 + drm_core_ioremapfree(&dev_priv->hws_map, dev); 115 + } 116 + 117 + /* Need to rewrite hardware status page */ 118 + I915_WRITE(HWS_PGA, 0x1ffff000); 119 } 120 121 void i915_kernel_lost_context(struct drm_device * dev) ··· 67 drm_i915_private_t *dev_priv = dev->dev_private; 68 drm_i915_ring_buffer_t *ring = &(dev_priv->ring); 69 70 + ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 71 + ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR; 72 ring->space = ring->head - (ring->tail + 8); 73 if (ring->space < 0) 74 ring->space += ring->Size; 75 76 + if (ring->head == ring->tail && dev_priv->sarea_priv) 77 dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY; 78 } 79 ··· 84 * may not have been called from userspace and after dev_private 85 * is freed, it's too late. 86 */ 87 + if (dev->irq_enabled) 88 drm_irq_uninstall(dev); 89 90 if (dev_priv->ring.virtual_start) { 91 drm_core_ioremapfree(&dev_priv->ring.map, dev); 92 + dev_priv->ring.virtual_start = NULL; 93 + dev_priv->ring.map.handle = NULL; 94 dev_priv->ring.map.size = 0; 95 } 96 97 + /* Clear the HWS virtual address at teardown */ 98 + if (I915_NEED_GFX_HWS(dev)) 99 + i915_free_hws(dev); 100 101 return 0; 102 } ··· 121 return -EINVAL; 122 } 123 124 dev_priv->sarea_priv = (drm_i915_sarea_t *) 125 ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset); 126 127 + if (init->ring_size != 0) { 128 + if (dev_priv->ring.ring_obj != NULL) { 129 + i915_dma_cleanup(dev); 130 + DRM_ERROR("Client tried to initialize ringbuffer in " 131 + "GEM mode\n"); 132 + return -EINVAL; 133 + } 134 135 + dev_priv->ring.Size = init->ring_size; 136 + dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; 137 138 + dev_priv->ring.map.offset = init->ring_start; 139 + dev_priv->ring.map.size = init->ring_size; 140 + dev_priv->ring.map.type = 0; 141 + dev_priv->ring.map.flags = 0; 142 + dev_priv->ring.map.mtrr = 0; 143 144 + drm_core_ioremap(&dev_priv->ring.map, dev); 145 + 146 + if (dev_priv->ring.map.handle == NULL) { 147 + i915_dma_cleanup(dev); 148 + DRM_ERROR("can not ioremap virtual address for" 149 + " ring buffer\n"); 150 + return -ENOMEM; 151 + } 152 } 153 154 dev_priv->ring.virtual_start = dev_priv->ring.map.handle; ··· 159 dev_priv->current_page = 0; 160 dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; 161 162 /* Allow hardware batchbuffers unless told otherwise. 163 */ 164 dev_priv->allow_batchbuffer = 1; 165 166 return 0; 167 } 168 ··· 198 199 if (!dev_priv->sarea) { 200 DRM_ERROR("can not find sarea!\n"); 201 return -EINVAL; 202 } 203 ··· 220 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); 221 222 if (dev_priv->status_gfx_addr != 0) 223 + I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); 224 else 225 + I915_WRITE(HWS_PGA, dev_priv->dma_status_page); 226 DRM_DEBUG("Enabled hardware status page\n"); 227 228 return 0; ··· 367 return 0; 368 } 369 370 + int 371 + i915_emit_box(struct drm_device *dev, 372 + struct drm_clip_rect __user *boxes, 373 + int i, int DR1, int DR4) 374 { 375 drm_i915_private_t *dev_priv = dev->dev_private; 376 struct drm_clip_rect box; ··· 415 drm_i915_private_t *dev_priv = dev->dev_private; 416 RING_LOCALS; 417 418 + dev_priv->counter++; 419 if (dev_priv->counter > 0x7FFFFFFFUL) 420 + dev_priv->counter = 0; 421 + if (dev_priv->sarea_priv) 422 + dev_priv->sarea_priv->last_enqueue = dev_priv->counter; 423 424 BEGIN_LP_RING(4); 425 + OUT_RING(MI_STORE_DWORD_INDEX); 426 + OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT); 427 OUT_RING(dev_priv->counter); 428 OUT_RING(0); 429 ADVANCE_LP_RING(); ··· 486 return ret; 487 } 488 489 + if (!IS_I830(dev) && !IS_845G(dev)) { 490 BEGIN_LP_RING(2); 491 if (IS_I965G(dev)) { 492 OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965); ··· 516 drm_i915_private_t *dev_priv = dev->dev_private; 517 RING_LOCALS; 518 519 + if (!dev_priv->sarea_priv) 520 + return -EINVAL; 521 + 522 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n", 523 __func__, 524 dev_priv->current_page, ··· 524 i915_kernel_lost_context(dev); 525 526 BEGIN_LP_RING(2); 527 + OUT_RING(MI_FLUSH | MI_READ_FLUSH); 528 OUT_RING(0); 529 ADVANCE_LP_RING(); 530 ··· 549 dev_priv->sarea_priv->last_enqueue = dev_priv->counter++; 550 551 BEGIN_LP_RING(4); 552 + OUT_RING(MI_STORE_DWORD_INDEX); 553 + OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT); 554 OUT_RING(dev_priv->counter); 555 OUT_RING(0); 556 ADVANCE_LP_RING(); ··· 570 static int i915_flush_ioctl(struct drm_device *dev, void *data, 571 struct drm_file *file_priv) 572 { 573 + int ret; 574 575 + RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 576 + 577 + mutex_lock(&dev->struct_mutex); 578 + ret = i915_quiescent(dev); 579 + mutex_unlock(&dev->struct_mutex); 580 + 581 + return ret; 582 } 583 584 static int i915_batchbuffer(struct drm_device *dev, void *data, ··· 593 DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n", 594 batch->start, batch->used, batch->num_cliprects); 595 596 + RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 597 598 if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects, 599 batch->num_cliprects * 600 sizeof(struct drm_clip_rect))) 601 return -EFAULT; 602 603 + mutex_lock(&dev->struct_mutex); 604 ret = i915_dispatch_batchbuffer(dev, batch); 605 + mutex_unlock(&dev->struct_mutex); 606 607 + if (sarea_priv) 608 + sarea_priv->last_dispatch = (int)hw_status[5]; 609 return ret; 610 } 611 ··· 619 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n", 620 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); 621 622 + RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 623 624 if (cmdbuf->num_cliprects && 625 DRM_VERIFYAREA_READ(cmdbuf->cliprects, ··· 629 return -EFAULT; 630 } 631 632 + mutex_lock(&dev->struct_mutex); 633 ret = i915_dispatch_cmdbuffer(dev, cmdbuf); 634 + mutex_unlock(&dev->struct_mutex); 635 if (ret) { 636 DRM_ERROR("i915_dispatch_cmdbuffer failed\n"); 637 return ret; 638 } 639 640 + if (sarea_priv) 641 + sarea_priv->last_dispatch = (int)hw_status[5]; 642 return 0; 643 } 644 645 static int i915_flip_bufs(struct drm_device *dev, void *data, 646 struct drm_file *file_priv) 647 { 648 + int ret; 649 + 650 DRM_DEBUG("%s\n", __func__); 651 652 + RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 653 654 + mutex_lock(&dev->struct_mutex); 655 + ret = i915_dispatch_flip(dev); 656 + mutex_unlock(&dev->struct_mutex); 657 + 658 + return ret; 659 } 660 661 static int i915_getparam(struct drm_device *dev, void *data, ··· 663 664 switch (param->param) { 665 case I915_PARAM_IRQ_ACTIVE: 666 + value = dev->pdev->irq ? 1 : 0; 667 break; 668 case I915_PARAM_ALLOW_BATCHBUFFER: 669 value = dev_priv->allow_batchbuffer ? 1 : 0; 670 break; 671 case I915_PARAM_LAST_DISPATCH: 672 value = READ_BREADCRUMB(dev_priv); 673 + break; 674 + case I915_PARAM_CHIPSET_ID: 675 + value = dev->pci_device; 676 + break; 677 + case I915_PARAM_HAS_GEM: 678 + value = 1; 679 break; 680 default: 681 DRM_ERROR("Unknown parameter %d\n", param->param); ··· 697 698 switch (param->param) { 699 case I915_SETPARAM_USE_MI_BATCHBUFFER_START: 700 break; 701 case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY: 702 dev_priv->tex_lru_log_granularity = param->value; ··· 749 dev_priv->hw_status_page = dev_priv->hws_map.handle; 750 751 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 752 + I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); 753 + DRM_DEBUG("load hws HWS_PGA with gfx mem 0x%x\n", 754 dev_priv->status_gfx_addr); 755 DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page); 756 return 0; ··· 776 memset(dev_priv, 0, sizeof(drm_i915_private_t)); 777 778 dev->dev_private = (void *)dev_priv; 779 + dev_priv->dev = dev; 780 781 /* Add register map (needed for suspend/resume) */ 782 base = drm_get_resource_start(dev, mmio_bar); 783 size = drm_get_resource_len(dev, mmio_bar); 784 785 + dev_priv->regs = ioremap(base, size); 786 + 787 + i915_gem_load(dev); 788 + 789 + /* Init HWS */ 790 + if (!I915_NEED_GFX_HWS(dev)) { 791 + ret = i915_init_phys_hws(dev); 792 + if (ret != 0) 793 + return ret; 794 + } 795 + 796 + /* On the 945G/GM, the chipset reports the MSI capability on the 797 + * integrated graphics even though the support isn't actually there 798 + * according to the published specs. It doesn't appear to function 799 + * correctly in testing on 945G. 800 + * This may be a side effect of MSI having been made available for PEG 801 + * and the registers being closely associated. 802 + */ 803 + if (!IS_I945G(dev) && !IS_I945GM(dev)) 804 + if (pci_enable_msi(dev->pdev)) 805 + DRM_ERROR("failed to enable MSI\n"); 806 + 807 + intel_opregion_init(dev); 808 + 809 + spin_lock_init(&dev_priv->user_irq_lock); 810 + 811 return ret; 812 } 813 ··· 791 { 792 struct drm_i915_private *dev_priv = dev->dev_private; 793 794 + if (dev->pdev->msi_enabled) 795 + pci_disable_msi(dev->pdev); 796 + 797 + i915_free_hws(dev); 798 + 799 + if (dev_priv->regs != NULL) 800 + iounmap(dev_priv->regs); 801 + 802 + intel_opregion_free(dev); 803 804 drm_free(dev->dev_private, sizeof(drm_i915_private_t), 805 DRM_MEM_DRIVER); 806 + 807 + return 0; 808 + } 809 + 810 + int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv) 811 + { 812 + struct drm_i915_file_private *i915_file_priv; 813 + 814 + DRM_DEBUG("\n"); 815 + i915_file_priv = (struct drm_i915_file_private *) 816 + drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES); 817 + 818 + if (!i915_file_priv) 819 + return -ENOMEM; 820 + 821 + file_priv->driver_priv = i915_file_priv; 822 + 823 + i915_file_priv->mm.last_gem_seqno = 0; 824 + i915_file_priv->mm.last_gem_throttle_seqno = 0; 825 826 return 0; 827 } ··· 807 if (!dev_priv) 808 return; 809 810 + i915_gem_lastclose(dev); 811 + 812 if (dev_priv->agp_heap) 813 i915_mem_takedown(&(dev_priv->agp_heap)); 814 ··· 817 { 818 drm_i915_private_t *dev_priv = dev->dev_private; 819 i915_mem_release(dev, file_priv, dev_priv->agp_heap); 820 + } 821 + 822 + void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) 823 + { 824 + struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; 825 + 826 + drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES); 827 } 828 829 struct drm_ioctl_desc i915_ioctls[] = { ··· 836 DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE, i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ), 837 DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ), 838 DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), 839 + DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 840 + DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 841 + DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), 842 + DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 843 + DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 844 + DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH), 845 + DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH), 846 + DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 847 + DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 848 + DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0), 849 + DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0), 850 + DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0), 851 + DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0), 852 + DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0), 853 + DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0), 854 + DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0), 855 + DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0), 856 }; 857 858 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
+14 -462
drivers/gpu/drm/i915/i915_drv.c
··· 38 i915_PCI_IDS 39 }; 40 41 - enum pipe { 42 - PIPE_A = 0, 43 - PIPE_B, 44 - }; 45 - 46 - static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) 47 - { 48 - struct drm_i915_private *dev_priv = dev->dev_private; 49 - 50 - if (pipe == PIPE_A) 51 - return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE); 52 - else 53 - return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE); 54 - } 55 - 56 - static void i915_save_palette(struct drm_device *dev, enum pipe pipe) 57 - { 58 - struct drm_i915_private *dev_priv = dev->dev_private; 59 - unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B); 60 - u32 *array; 61 - int i; 62 - 63 - if (!i915_pipe_enabled(dev, pipe)) 64 - return; 65 - 66 - if (pipe == PIPE_A) 67 - array = dev_priv->save_palette_a; 68 - else 69 - array = dev_priv->save_palette_b; 70 - 71 - for(i = 0; i < 256; i++) 72 - array[i] = I915_READ(reg + (i << 2)); 73 - } 74 - 75 - static void i915_restore_palette(struct drm_device *dev, enum pipe pipe) 76 - { 77 - struct drm_i915_private *dev_priv = dev->dev_private; 78 - unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B); 79 - u32 *array; 80 - int i; 81 - 82 - if (!i915_pipe_enabled(dev, pipe)) 83 - return; 84 - 85 - if (pipe == PIPE_A) 86 - array = dev_priv->save_palette_a; 87 - else 88 - array = dev_priv->save_palette_b; 89 - 90 - for(i = 0; i < 256; i++) 91 - I915_WRITE(reg + (i << 2), array[i]); 92 - } 93 - 94 - static u8 i915_read_indexed(u16 index_port, u16 data_port, u8 reg) 95 - { 96 - outb(reg, index_port); 97 - return inb(data_port); 98 - } 99 - 100 - static u8 i915_read_ar(u16 st01, u8 reg, u16 palette_enable) 101 - { 102 - inb(st01); 103 - outb(palette_enable | reg, VGA_AR_INDEX); 104 - return inb(VGA_AR_DATA_READ); 105 - } 106 - 107 - static void i915_write_ar(u8 st01, u8 reg, u8 val, u16 palette_enable) 108 - { 109 - inb(st01); 110 - outb(palette_enable | reg, VGA_AR_INDEX); 111 - outb(val, VGA_AR_DATA_WRITE); 112 - } 113 - 114 - static void i915_write_indexed(u16 index_port, u16 data_port, u8 reg, u8 val) 115 - { 116 - outb(reg, index_port); 117 - outb(val, data_port); 118 - } 119 - 120 - static void i915_save_vga(struct drm_device *dev) 121 - { 122 - struct drm_i915_private *dev_priv = dev->dev_private; 123 - int i; 124 - u16 cr_index, cr_data, st01; 125 - 126 - /* VGA color palette registers */ 127 - dev_priv->saveDACMASK = inb(VGA_DACMASK); 128 - /* DACCRX automatically increments during read */ 129 - outb(0, VGA_DACRX); 130 - /* Read 3 bytes of color data from each index */ 131 - for (i = 0; i < 256 * 3; i++) 132 - dev_priv->saveDACDATA[i] = inb(VGA_DACDATA); 133 - 134 - /* MSR bits */ 135 - dev_priv->saveMSR = inb(VGA_MSR_READ); 136 - if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) { 137 - cr_index = VGA_CR_INDEX_CGA; 138 - cr_data = VGA_CR_DATA_CGA; 139 - st01 = VGA_ST01_CGA; 140 - } else { 141 - cr_index = VGA_CR_INDEX_MDA; 142 - cr_data = VGA_CR_DATA_MDA; 143 - st01 = VGA_ST01_MDA; 144 - } 145 - 146 - /* CRT controller regs */ 147 - i915_write_indexed(cr_index, cr_data, 0x11, 148 - i915_read_indexed(cr_index, cr_data, 0x11) & 149 - (~0x80)); 150 - for (i = 0; i <= 0x24; i++) 151 - dev_priv->saveCR[i] = 152 - i915_read_indexed(cr_index, cr_data, i); 153 - /* Make sure we don't turn off CR group 0 writes */ 154 - dev_priv->saveCR[0x11] &= ~0x80; 155 - 156 - /* Attribute controller registers */ 157 - inb(st01); 158 - dev_priv->saveAR_INDEX = inb(VGA_AR_INDEX); 159 - for (i = 0; i <= 0x14; i++) 160 - dev_priv->saveAR[i] = i915_read_ar(st01, i, 0); 161 - inb(st01); 162 - outb(dev_priv->saveAR_INDEX, VGA_AR_INDEX); 163 - inb(st01); 164 - 165 - /* Graphics controller registers */ 166 - for (i = 0; i < 9; i++) 167 - dev_priv->saveGR[i] = 168 - i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, i); 169 - 170 - dev_priv->saveGR[0x10] = 171 - i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10); 172 - dev_priv->saveGR[0x11] = 173 - i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11); 174 - dev_priv->saveGR[0x18] = 175 - i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18); 176 - 177 - /* Sequencer registers */ 178 - for (i = 0; i < 8; i++) 179 - dev_priv->saveSR[i] = 180 - i915_read_indexed(VGA_SR_INDEX, VGA_SR_DATA, i); 181 - } 182 - 183 - static void i915_restore_vga(struct drm_device *dev) 184 - { 185 - struct drm_i915_private *dev_priv = dev->dev_private; 186 - int i; 187 - u16 cr_index, cr_data, st01; 188 - 189 - /* MSR bits */ 190 - outb(dev_priv->saveMSR, VGA_MSR_WRITE); 191 - if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) { 192 - cr_index = VGA_CR_INDEX_CGA; 193 - cr_data = VGA_CR_DATA_CGA; 194 - st01 = VGA_ST01_CGA; 195 - } else { 196 - cr_index = VGA_CR_INDEX_MDA; 197 - cr_data = VGA_CR_DATA_MDA; 198 - st01 = VGA_ST01_MDA; 199 - } 200 - 201 - /* Sequencer registers, don't write SR07 */ 202 - for (i = 0; i < 7; i++) 203 - i915_write_indexed(VGA_SR_INDEX, VGA_SR_DATA, i, 204 - dev_priv->saveSR[i]); 205 - 206 - /* CRT controller regs */ 207 - /* Enable CR group 0 writes */ 208 - i915_write_indexed(cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]); 209 - for (i = 0; i <= 0x24; i++) 210 - i915_write_indexed(cr_index, cr_data, i, dev_priv->saveCR[i]); 211 - 212 - /* Graphics controller regs */ 213 - for (i = 0; i < 9; i++) 214 - i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, i, 215 - dev_priv->saveGR[i]); 216 - 217 - i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10, 218 - dev_priv->saveGR[0x10]); 219 - i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11, 220 - dev_priv->saveGR[0x11]); 221 - i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18, 222 - dev_priv->saveGR[0x18]); 223 - 224 - /* Attribute controller registers */ 225 - inb(st01); 226 - for (i = 0; i <= 0x14; i++) 227 - i915_write_ar(st01, i, dev_priv->saveAR[i], 0); 228 - inb(st01); /* switch back to index mode */ 229 - outb(dev_priv->saveAR_INDEX | 0x20, VGA_AR_INDEX); 230 - inb(st01); 231 - 232 - /* VGA color palette registers */ 233 - outb(dev_priv->saveDACMASK, VGA_DACMASK); 234 - /* DACCRX automatically increments during read */ 235 - outb(0, VGA_DACWX); 236 - /* Read 3 bytes of color data from each index */ 237 - for (i = 0; i < 256 * 3; i++) 238 - outb(dev_priv->saveDACDATA[i], VGA_DACDATA); 239 - 240 - } 241 - 242 static int i915_suspend(struct drm_device *dev, pm_message_t state) 243 { 244 struct drm_i915_private *dev_priv = dev->dev_private; 245 - int i; 246 247 if (!dev || !dev_priv) { 248 printk(KERN_ERR "dev: %p, dev_priv: %p\n", dev, dev_priv); ··· 52 return 0; 53 54 pci_save_state(dev->pdev); 55 - pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); 56 57 - /* Display arbitration control */ 58 - dev_priv->saveDSPARB = I915_READ(DSPARB); 59 60 - /* Pipe & plane A info */ 61 - dev_priv->savePIPEACONF = I915_READ(PIPEACONF); 62 - dev_priv->savePIPEASRC = I915_READ(PIPEASRC); 63 - dev_priv->saveFPA0 = I915_READ(FPA0); 64 - dev_priv->saveFPA1 = I915_READ(FPA1); 65 - dev_priv->saveDPLL_A = I915_READ(DPLL_A); 66 - if (IS_I965G(dev)) 67 - dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD); 68 - dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A); 69 - dev_priv->saveHBLANK_A = I915_READ(HBLANK_A); 70 - dev_priv->saveHSYNC_A = I915_READ(HSYNC_A); 71 - dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A); 72 - dev_priv->saveVBLANK_A = I915_READ(VBLANK_A); 73 - dev_priv->saveVSYNC_A = I915_READ(VSYNC_A); 74 - dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); 75 - 76 - dev_priv->saveDSPACNTR = I915_READ(DSPACNTR); 77 - dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE); 78 - dev_priv->saveDSPASIZE = I915_READ(DSPASIZE); 79 - dev_priv->saveDSPAPOS = I915_READ(DSPAPOS); 80 - dev_priv->saveDSPABASE = I915_READ(DSPABASE); 81 - if (IS_I965G(dev)) { 82 - dev_priv->saveDSPASURF = I915_READ(DSPASURF); 83 - dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF); 84 - } 85 - i915_save_palette(dev, PIPE_A); 86 - dev_priv->savePIPEASTAT = I915_READ(I915REG_PIPEASTAT); 87 - 88 - /* Pipe & plane B info */ 89 - dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF); 90 - dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC); 91 - dev_priv->saveFPB0 = I915_READ(FPB0); 92 - dev_priv->saveFPB1 = I915_READ(FPB1); 93 - dev_priv->saveDPLL_B = I915_READ(DPLL_B); 94 - if (IS_I965G(dev)) 95 - dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD); 96 - dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B); 97 - dev_priv->saveHBLANK_B = I915_READ(HBLANK_B); 98 - dev_priv->saveHSYNC_B = I915_READ(HSYNC_B); 99 - dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B); 100 - dev_priv->saveVBLANK_B = I915_READ(VBLANK_B); 101 - dev_priv->saveVSYNC_B = I915_READ(VSYNC_B); 102 - dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); 103 - 104 - dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR); 105 - dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE); 106 - dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE); 107 - dev_priv->saveDSPBPOS = I915_READ(DSPBPOS); 108 - dev_priv->saveDSPBBASE = I915_READ(DSPBBASE); 109 - if (IS_I965GM(dev) || IS_IGD_GM(dev)) { 110 - dev_priv->saveDSPBSURF = I915_READ(DSPBSURF); 111 - dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF); 112 - } 113 - i915_save_palette(dev, PIPE_B); 114 - dev_priv->savePIPEBSTAT = I915_READ(I915REG_PIPEBSTAT); 115 - 116 - /* CRT state */ 117 - dev_priv->saveADPA = I915_READ(ADPA); 118 - 119 - /* LVDS state */ 120 - dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL); 121 - dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); 122 - dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); 123 - if (IS_I965G(dev)) 124 - dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); 125 - if (IS_MOBILE(dev) && !IS_I830(dev)) 126 - dev_priv->saveLVDS = I915_READ(LVDS); 127 - if (!IS_I830(dev) && !IS_845G(dev)) 128 - dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL); 129 - dev_priv->saveLVDSPP_ON = I915_READ(LVDSPP_ON); 130 - dev_priv->saveLVDSPP_OFF = I915_READ(LVDSPP_OFF); 131 - dev_priv->savePP_CYCLE = I915_READ(PP_CYCLE); 132 - 133 - /* FIXME: save TV & SDVO state */ 134 - 135 - /* FBC state */ 136 - dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); 137 - dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); 138 - dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); 139 - dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); 140 - 141 - /* Interrupt state */ 142 - dev_priv->saveIIR = I915_READ(I915REG_INT_IDENTITY_R); 143 - dev_priv->saveIER = I915_READ(I915REG_INT_ENABLE_R); 144 - dev_priv->saveIMR = I915_READ(I915REG_INT_MASK_R); 145 - 146 - /* VGA state */ 147 - dev_priv->saveVCLK_DIVISOR_VGA0 = I915_READ(VCLK_DIVISOR_VGA0); 148 - dev_priv->saveVCLK_DIVISOR_VGA1 = I915_READ(VCLK_DIVISOR_VGA1); 149 - dev_priv->saveVCLK_POST_DIV = I915_READ(VCLK_POST_DIV); 150 - dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); 151 - 152 - /* Clock gating state */ 153 - dev_priv->saveD_STATE = I915_READ(D_STATE); 154 - dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D); 155 - 156 - /* Cache mode state */ 157 - dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); 158 - 159 - /* Memory Arbitration state */ 160 - dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); 161 - 162 - /* Scratch space */ 163 - for (i = 0; i < 16; i++) { 164 - dev_priv->saveSWF0[i] = I915_READ(SWF0 + (i << 2)); 165 - dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2)); 166 - } 167 - for (i = 0; i < 3; i++) 168 - dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); 169 - 170 - i915_save_vga(dev); 171 172 if (state.event == PM_EVENT_SUSPEND) { 173 /* Shut down the device */ ··· 68 69 static int i915_resume(struct drm_device *dev) 70 { 71 - struct drm_i915_private *dev_priv = dev->dev_private; 72 - int i; 73 - 74 pci_set_power_state(dev->pdev, PCI_D0); 75 pci_restore_state(dev->pdev); 76 if (pci_enable_device(dev->pdev)) 77 return -1; 78 pci_set_master(dev->pdev); 79 80 - pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); 81 82 - I915_WRITE(DSPARB, dev_priv->saveDSPARB); 83 - 84 - /* Pipe & plane A info */ 85 - /* Prime the clock */ 86 - if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { 87 - I915_WRITE(DPLL_A, dev_priv->saveDPLL_A & 88 - ~DPLL_VCO_ENABLE); 89 - udelay(150); 90 - } 91 - I915_WRITE(FPA0, dev_priv->saveFPA0); 92 - I915_WRITE(FPA1, dev_priv->saveFPA1); 93 - /* Actually enable it */ 94 - I915_WRITE(DPLL_A, dev_priv->saveDPLL_A); 95 - udelay(150); 96 - if (IS_I965G(dev)) 97 - I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); 98 - udelay(150); 99 - 100 - /* Restore mode */ 101 - I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A); 102 - I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A); 103 - I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A); 104 - I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A); 105 - I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A); 106 - I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A); 107 - I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); 108 - 109 - /* Restore plane info */ 110 - I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE); 111 - I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS); 112 - I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC); 113 - I915_WRITE(DSPABASE, dev_priv->saveDSPABASE); 114 - I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE); 115 - if (IS_I965G(dev)) { 116 - I915_WRITE(DSPASURF, dev_priv->saveDSPASURF); 117 - I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF); 118 - } 119 - 120 - I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF); 121 - 122 - i915_restore_palette(dev, PIPE_A); 123 - /* Enable the plane */ 124 - I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR); 125 - I915_WRITE(DSPABASE, I915_READ(DSPABASE)); 126 - 127 - /* Pipe & plane B info */ 128 - if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { 129 - I915_WRITE(DPLL_B, dev_priv->saveDPLL_B & 130 - ~DPLL_VCO_ENABLE); 131 - udelay(150); 132 - } 133 - I915_WRITE(FPB0, dev_priv->saveFPB0); 134 - I915_WRITE(FPB1, dev_priv->saveFPB1); 135 - /* Actually enable it */ 136 - I915_WRITE(DPLL_B, dev_priv->saveDPLL_B); 137 - udelay(150); 138 - if (IS_I965G(dev)) 139 - I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); 140 - udelay(150); 141 - 142 - /* Restore mode */ 143 - I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B); 144 - I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B); 145 - I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B); 146 - I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B); 147 - I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B); 148 - I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B); 149 - I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); 150 - 151 - /* Restore plane info */ 152 - I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE); 153 - I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS); 154 - I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC); 155 - I915_WRITE(DSPBBASE, dev_priv->saveDSPBBASE); 156 - I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE); 157 - if (IS_I965G(dev)) { 158 - I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF); 159 - I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); 160 - } 161 - 162 - I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF); 163 - 164 - i915_restore_palette(dev, PIPE_B); 165 - /* Enable the plane */ 166 - I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); 167 - I915_WRITE(DSPBBASE, I915_READ(DSPBBASE)); 168 - 169 - /* CRT state */ 170 - I915_WRITE(ADPA, dev_priv->saveADPA); 171 - 172 - /* LVDS state */ 173 - if (IS_I965G(dev)) 174 - I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); 175 - if (IS_MOBILE(dev) && !IS_I830(dev)) 176 - I915_WRITE(LVDS, dev_priv->saveLVDS); 177 - if (!IS_I830(dev) && !IS_845G(dev)) 178 - I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL); 179 - 180 - I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); 181 - I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); 182 - I915_WRITE(LVDSPP_ON, dev_priv->saveLVDSPP_ON); 183 - I915_WRITE(LVDSPP_OFF, dev_priv->saveLVDSPP_OFF); 184 - I915_WRITE(PP_CYCLE, dev_priv->savePP_CYCLE); 185 - I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); 186 - 187 - /* FIXME: restore TV & SDVO state */ 188 - 189 - /* FBC info */ 190 - I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); 191 - I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); 192 - I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); 193 - I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL); 194 - 195 - /* VGA state */ 196 - I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); 197 - I915_WRITE(VCLK_DIVISOR_VGA0, dev_priv->saveVCLK_DIVISOR_VGA0); 198 - I915_WRITE(VCLK_DIVISOR_VGA1, dev_priv->saveVCLK_DIVISOR_VGA1); 199 - I915_WRITE(VCLK_POST_DIV, dev_priv->saveVCLK_POST_DIV); 200 - udelay(150); 201 - 202 - /* Clock gating state */ 203 - I915_WRITE (D_STATE, dev_priv->saveD_STATE); 204 - I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D); 205 - 206 - /* Cache mode state */ 207 - I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); 208 - 209 - /* Memory arbitration state */ 210 - I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000); 211 - 212 - for (i = 0; i < 16; i++) { 213 - I915_WRITE(SWF0 + (i << 2), dev_priv->saveSWF0[i]); 214 - I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]); 215 - } 216 - for (i = 0; i < 3; i++) 217 - I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); 218 - 219 - i915_restore_vga(dev); 220 221 return 0; 222 } ··· 87 */ 88 .driver_features = 89 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ 90 - DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL | 91 - DRIVER_IRQ_VBL2, 92 .load = i915_driver_load, 93 .unload = i915_driver_unload, 94 .lastclose = i915_driver_lastclose, 95 .preclose = i915_driver_preclose, 96 .suspend = i915_suspend, 97 .resume = i915_resume, 98 .device_is_agp = i915_driver_device_is_agp, 99 - .vblank_wait = i915_driver_vblank_wait, 100 - .vblank_wait2 = i915_driver_vblank_wait2, 101 .irq_preinstall = i915_driver_irq_preinstall, 102 .irq_postinstall = i915_driver_irq_postinstall, 103 .irq_uninstall = i915_driver_irq_uninstall, ··· 107 .reclaim_buffers = drm_core_reclaim_buffers, 108 .get_map_ofs = drm_core_get_map_ofs, 109 .get_reg_ofs = drm_core_get_reg_ofs, 110 .ioctls = i915_ioctls, 111 .fops = { 112 .owner = THIS_MODULE,
··· 38 i915_PCI_IDS 39 }; 40 41 static int i915_suspend(struct drm_device *dev, pm_message_t state) 42 { 43 struct drm_i915_private *dev_priv = dev->dev_private; 44 45 if (!dev || !dev_priv) { 46 printk(KERN_ERR "dev: %p, dev_priv: %p\n", dev, dev_priv); ··· 254 return 0; 255 256 pci_save_state(dev->pdev); 257 258 + i915_save_state(dev); 259 260 + intel_opregion_free(dev); 261 262 if (state.event == PM_EVENT_SUSPEND) { 263 /* Shut down the device */ ··· 382 383 static int i915_resume(struct drm_device *dev) 384 { 385 pci_set_power_state(dev->pdev, PCI_D0); 386 pci_restore_state(dev->pdev); 387 if (pci_enable_device(dev->pdev)) 388 return -1; 389 pci_set_master(dev->pdev); 390 391 + i915_restore_state(dev); 392 393 + intel_opregion_init(dev); 394 395 return 0; 396 } ··· 541 */ 542 .driver_features = 543 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ 544 + DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM, 545 .load = i915_driver_load, 546 .unload = i915_driver_unload, 547 + .open = i915_driver_open, 548 .lastclose = i915_driver_lastclose, 549 .preclose = i915_driver_preclose, 550 + .postclose = i915_driver_postclose, 551 .suspend = i915_suspend, 552 .resume = i915_resume, 553 .device_is_agp = i915_driver_device_is_agp, 554 + .get_vblank_counter = i915_get_vblank_counter, 555 + .enable_vblank = i915_enable_vblank, 556 + .disable_vblank = i915_disable_vblank, 557 .irq_preinstall = i915_driver_irq_preinstall, 558 .irq_postinstall = i915_driver_irq_postinstall, 559 .irq_uninstall = i915_driver_irq_uninstall, ··· 559 .reclaim_buffers = drm_core_reclaim_buffers, 560 .get_map_ofs = drm_core_get_map_ofs, 561 .get_reg_ofs = drm_core_get_reg_ofs, 562 + .proc_init = i915_gem_proc_init, 563 + .proc_cleanup = i915_gem_proc_cleanup, 564 + .gem_init_object = i915_gem_init_object, 565 + .gem_free_object = i915_gem_free_object, 566 .ioctls = i915_ioctls, 567 .fops = { 568 .owner = THIS_MODULE,
+351 -831
drivers/gpu/drm/i915/i915_drv.h
··· 30 #ifndef _I915_DRV_H_ 31 #define _I915_DRV_H_ 32 33 /* General customization: 34 */ 35 ··· 39 40 #define DRIVER_NAME "i915" 41 #define DRIVER_DESC "Intel Graphics" 42 - #define DRIVER_DATE "20060119" 43 44 /* Interface history: 45 * ··· 60 #define DRIVER_MINOR 6 61 #define DRIVER_PATCHLEVEL 0 62 63 typedef struct _drm_i915_ring_buffer { 64 int tail_mask; 65 - unsigned long Start; 66 - unsigned long End; 67 unsigned long Size; 68 u8 *virtual_start; 69 int head; 70 int tail; 71 int space; 72 drm_local_map_t map; 73 } drm_i915_ring_buffer_t; 74 75 struct mem_block { ··· 90 typedef struct _drm_i915_vbl_swap { 91 struct list_head head; 92 drm_drawable_t drw_id; 93 - unsigned int pipe; 94 unsigned int sequence; 95 } drm_i915_vbl_swap_t; 96 97 typedef struct drm_i915_private { 98 drm_local_map_t *sarea; 99 - drm_local_map_t *mmio_map; 100 101 drm_i915_sarea_t *sarea_priv; 102 drm_i915_ring_buffer_t ring; ··· 119 drm_dma_handle_t *status_page_dmah; 120 void *hw_status_page; 121 dma_addr_t dma_status_page; 122 - unsigned long counter; 123 unsigned int status_gfx_addr; 124 drm_local_map_t hws_map; 125 126 unsigned int cpp; 127 int back_offset; 128 int front_offset; 129 int current_page; 130 int page_flipping; 131 - int use_mi_batchbuffer_start; 132 133 wait_queue_head_t irq_queue; 134 atomic_t irq_received; 135 - atomic_t irq_emitted; 136 137 int tex_lru_log_granularity; 138 int allow_batchbuffer; ··· 148 spinlock_t swaps_lock; 149 drm_i915_vbl_swap_t vbl_swaps; 150 unsigned int swaps_pending; 151 152 /* Register state */ 153 u8 saveLBB; ··· 175 u32 saveDSPASTRIDE; 176 u32 saveDSPASIZE; 177 u32 saveDSPAPOS; 178 - u32 saveDSPABASE; 179 u32 saveDSPASURF; 180 u32 saveDSPATILEOFF; 181 u32 savePFIT_PGM_RATIOS; ··· 196 u32 saveDSPBSTRIDE; 197 u32 saveDSPBSIZE; 198 u32 saveDSPBPOS; 199 - u32 saveDSPBBASE; 200 u32 saveDSPBSURF; 201 u32 saveDSPBTILEOFF; 202 - u32 saveVCLK_DIVISOR_VGA0; 203 - u32 saveVCLK_DIVISOR_VGA1; 204 - u32 saveVCLK_POST_DIV; 205 u32 saveVGACNTRL; 206 u32 saveADPA; 207 u32 saveLVDS; 208 - u32 saveLVDSPP_ON; 209 - u32 saveLVDSPP_OFF; 210 u32 saveDVOA; 211 u32 saveDVOB; 212 u32 saveDVOC; 213 u32 savePP_ON; 214 u32 savePP_OFF; 215 u32 savePP_CONTROL; 216 - u32 savePP_CYCLE; 217 u32 savePFIT_CONTROL; 218 u32 save_palette_a[256]; 219 u32 save_palette_b[256]; ··· 226 u32 saveIMR; 227 u32 saveCACHE_MODE_0; 228 u32 saveD_STATE; 229 - u32 saveDSPCLK_GATE_D; 230 u32 saveMI_ARB_STATE; 231 u32 saveSWF0[16]; 232 u32 saveSWF1[16]; ··· 239 u8 saveDACMASK; 240 u8 saveDACDATA[256*3]; /* 256 3-byte colors */ 241 u8 saveCR[37]; 242 } drm_i915_private_t; 243 244 extern struct drm_ioctl_desc i915_ioctls[]; 245 extern int i915_max_ioctl; ··· 420 extern void i915_kernel_lost_context(struct drm_device * dev); 421 extern int i915_driver_load(struct drm_device *, unsigned long flags); 422 extern int i915_driver_unload(struct drm_device *); 423 extern void i915_driver_lastclose(struct drm_device * dev); 424 extern void i915_driver_preclose(struct drm_device *dev, 425 struct drm_file *file_priv); 426 extern int i915_driver_device_is_agp(struct drm_device * dev); 427 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 428 unsigned long arg); 429 430 /* i915_irq.c */ 431 extern int i915_irq_emit(struct drm_device *dev, void *data, 432 struct drm_file *file_priv); 433 extern int i915_irq_wait(struct drm_device *dev, void *data, 434 struct drm_file *file_priv); 435 436 - extern int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence); 437 - extern int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence); 438 extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); 439 extern void i915_driver_irq_preinstall(struct drm_device * dev); 440 - extern void i915_driver_irq_postinstall(struct drm_device * dev); 441 extern void i915_driver_irq_uninstall(struct drm_device * dev); 442 extern int i915_vblank_pipe_set(struct drm_device *dev, void *data, 443 struct drm_file *file_priv); 444 extern int i915_vblank_pipe_get(struct drm_device *dev, void *data, 445 struct drm_file *file_priv); 446 extern int i915_vblank_swap(struct drm_device *dev, void *data, 447 struct drm_file *file_priv); 448 449 /* i915_mem.c */ 450 extern int i915_mem_alloc(struct drm_device *dev, void *data, ··· 469 extern void i915_mem_takedown(struct mem_block **heap); 470 extern void i915_mem_release(struct drm_device * dev, 471 struct drm_file *file_priv, struct mem_block *heap); 472 473 - #define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg)) 474 - #define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val)) 475 - #define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg)) 476 - #define I915_WRITE16(reg,val) DRM_WRITE16(dev_priv->mmio_map, (reg), (val)) 477 478 #define I915_VERBOSE 0 479 ··· 591 if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring); \ 592 dev_priv->ring.tail = outring; \ 593 dev_priv->ring.space -= outcount * 4; \ 594 - I915_WRITE(LP_RING + RING_TAIL, outring); \ 595 } while(0) 596 597 - extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); 598 - 599 - /* Extended config space */ 600 - #define LBB 0xf4 601 - 602 - /* VGA stuff */ 603 - 604 - #define VGA_ST01_MDA 0x3ba 605 - #define VGA_ST01_CGA 0x3da 606 - 607 - #define VGA_MSR_WRITE 0x3c2 608 - #define VGA_MSR_READ 0x3cc 609 - #define VGA_MSR_MEM_EN (1<<1) 610 - #define VGA_MSR_CGA_MODE (1<<0) 611 - 612 - #define VGA_SR_INDEX 0x3c4 613 - #define VGA_SR_DATA 0x3c5 614 - 615 - #define VGA_AR_INDEX 0x3c0 616 - #define VGA_AR_VID_EN (1<<5) 617 - #define VGA_AR_DATA_WRITE 0x3c0 618 - #define VGA_AR_DATA_READ 0x3c1 619 - 620 - #define VGA_GR_INDEX 0x3ce 621 - #define VGA_GR_DATA 0x3cf 622 - /* GR05 */ 623 - #define VGA_GR_MEM_READ_MODE_SHIFT 3 624 - #define VGA_GR_MEM_READ_MODE_PLANE 1 625 - /* GR06 */ 626 - #define VGA_GR_MEM_MODE_MASK 0xc 627 - #define VGA_GR_MEM_MODE_SHIFT 2 628 - #define VGA_GR_MEM_A0000_AFFFF 0 629 - #define VGA_GR_MEM_A0000_BFFFF 1 630 - #define VGA_GR_MEM_B0000_B7FFF 2 631 - #define VGA_GR_MEM_B0000_BFFFF 3 632 - 633 - #define VGA_DACMASK 0x3c6 634 - #define VGA_DACRX 0x3c7 635 - #define VGA_DACWX 0x3c8 636 - #define VGA_DACDATA 0x3c9 637 - 638 - #define VGA_CR_INDEX_MDA 0x3b4 639 - #define VGA_CR_DATA_MDA 0x3b5 640 - #define VGA_CR_INDEX_CGA 0x3d4 641 - #define VGA_CR_DATA_CGA 0x3d5 642 - 643 - #define GFX_OP_USER_INTERRUPT ((0<<29)|(2<<23)) 644 - #define GFX_OP_BREAKPOINT_INTERRUPT ((0<<29)|(1<<23)) 645 - #define CMD_REPORT_HEAD (7<<23) 646 - #define CMD_STORE_DWORD_IDX ((0x21<<23) | 0x1) 647 - #define CMD_OP_BATCH_BUFFER ((0x0<<29)|(0x30<<23)|0x1) 648 - 649 - #define INST_PARSER_CLIENT 0x00000000 650 - #define INST_OP_FLUSH 0x02000000 651 - #define INST_FLUSH_MAP_CACHE 0x00000001 652 - 653 - #define BB1_START_ADDR_MASK (~0x7) 654 - #define BB1_PROTECTED (1<<0) 655 - #define BB1_UNPROTECTED (0<<0) 656 - #define BB2_END_ADDR_MASK (~0x7) 657 - 658 - /* Framebuffer compression */ 659 - #define FBC_CFB_BASE 0x03200 /* 4k page aligned */ 660 - #define FBC_LL_BASE 0x03204 /* 4k page aligned */ 661 - #define FBC_CONTROL 0x03208 662 - #define FBC_CTL_EN (1<<31) 663 - #define FBC_CTL_PERIODIC (1<<30) 664 - #define FBC_CTL_INTERVAL_SHIFT (16) 665 - #define FBC_CTL_UNCOMPRESSIBLE (1<<14) 666 - #define FBC_CTL_STRIDE_SHIFT (5) 667 - #define FBC_CTL_FENCENO (1<<0) 668 - #define FBC_COMMAND 0x0320c 669 - #define FBC_CMD_COMPRESS (1<<0) 670 - #define FBC_STATUS 0x03210 671 - #define FBC_STAT_COMPRESSING (1<<31) 672 - #define FBC_STAT_COMPRESSED (1<<30) 673 - #define FBC_STAT_MODIFIED (1<<29) 674 - #define FBC_STAT_CURRENT_LINE (1<<0) 675 - #define FBC_CONTROL2 0x03214 676 - #define FBC_CTL_FENCE_DBL (0<<4) 677 - #define FBC_CTL_IDLE_IMM (0<<2) 678 - #define FBC_CTL_IDLE_FULL (1<<2) 679 - #define FBC_CTL_IDLE_LINE (2<<2) 680 - #define FBC_CTL_IDLE_DEBUG (3<<2) 681 - #define FBC_CTL_CPU_FENCE (1<<1) 682 - #define FBC_CTL_PLANEA (0<<0) 683 - #define FBC_CTL_PLANEB (1<<0) 684 - #define FBC_FENCE_OFF 0x0321b 685 - 686 - #define FBC_LL_SIZE (1536) 687 - #define FBC_LL_PAD (32) 688 - 689 - /* Interrupt bits: 690 - */ 691 - #define USER_INT_FLAG (1<<1) 692 - #define VSYNC_PIPEB_FLAG (1<<5) 693 - #define VSYNC_PIPEA_FLAG (1<<7) 694 - #define HWB_OOM_FLAG (1<<13) /* binner out of memory */ 695 - 696 - #define I915REG_HWSTAM 0x02098 697 - #define I915REG_INT_IDENTITY_R 0x020a4 698 - #define I915REG_INT_MASK_R 0x020a8 699 - #define I915REG_INT_ENABLE_R 0x020a0 700 - 701 - #define I915REG_PIPEASTAT 0x70024 702 - #define I915REG_PIPEBSTAT 0x71024 703 - 704 - #define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17) 705 - #define I915_VBLANK_CLEAR (1UL<<1) 706 - 707 - #define SRX_INDEX 0x3c4 708 - #define SRX_DATA 0x3c5 709 - #define SR01 1 710 - #define SR01_SCREEN_OFF (1<<5) 711 - 712 - #define PPCR 0x61204 713 - #define PPCR_ON (1<<0) 714 - 715 - #define DVOB 0x61140 716 - #define DVOB_ON (1<<31) 717 - #define DVOC 0x61160 718 - #define DVOC_ON (1<<31) 719 - #define LVDS 0x61180 720 - #define LVDS_ON (1<<31) 721 - 722 - #define ADPA 0x61100 723 - #define ADPA_DPMS_MASK (~(3<<10)) 724 - #define ADPA_DPMS_ON (0<<10) 725 - #define ADPA_DPMS_SUSPEND (1<<10) 726 - #define ADPA_DPMS_STANDBY (2<<10) 727 - #define ADPA_DPMS_OFF (3<<10) 728 - 729 - #define NOPID 0x2094 730 - #define LP_RING 0x2030 731 - #define HP_RING 0x2040 732 - /* The binner has its own ring buffer: 733 - */ 734 - #define HWB_RING 0x2400 735 - 736 - #define RING_TAIL 0x00 737 - #define TAIL_ADDR 0x001FFFF8 738 - #define RING_HEAD 0x04 739 - #define HEAD_WRAP_COUNT 0xFFE00000 740 - #define HEAD_WRAP_ONE 0x00200000 741 - #define HEAD_ADDR 0x001FFFFC 742 - #define RING_START 0x08 743 - #define START_ADDR 0x0xFFFFF000 744 - #define RING_LEN 0x0C 745 - #define RING_NR_PAGES 0x001FF000 746 - #define RING_REPORT_MASK 0x00000006 747 - #define RING_REPORT_64K 0x00000002 748 - #define RING_REPORT_128K 0x00000004 749 - #define RING_NO_REPORT 0x00000000 750 - #define RING_VALID_MASK 0x00000001 751 - #define RING_VALID 0x00000001 752 - #define RING_INVALID 0x00000000 753 - 754 - /* Instruction parser error reg: 755 - */ 756 - #define IPEIR 0x2088 757 - 758 - /* Scratch pad debug 0 reg: 759 - */ 760 - #define SCPD0 0x209c 761 - 762 - /* Error status reg: 763 - */ 764 - #define ESR 0x20b8 765 - 766 - /* Secondary DMA fetch address debug reg: 767 - */ 768 - #define DMA_FADD_S 0x20d4 769 - 770 - /* Memory Interface Arbitration State 771 - */ 772 - #define MI_ARB_STATE 0x20e4 773 - 774 - /* Cache mode 0 reg. 775 - * - Manipulating render cache behaviour is central 776 - * to the concept of zone rendering, tuning this reg can help avoid 777 - * unnecessary render cache reads and even writes (for z/stencil) 778 - * at beginning and end of scene. 779 * 780 - * - To change a bit, write to this reg with a mask bit set and the 781 - * bit of interest either set or cleared. EG: (BIT<<16) | BIT to set. 782 */ 783 - #define Cache_Mode_0 0x2120 784 - #define CACHE_MODE_0 0x2120 785 - #define CM0_MASK_SHIFT 16 786 - #define CM0_IZ_OPT_DISABLE (1<<6) 787 - #define CM0_ZR_OPT_DISABLE (1<<5) 788 - #define CM0_DEPTH_EVICT_DISABLE (1<<4) 789 - #define CM0_COLOR_EVICT_DISABLE (1<<3) 790 - #define CM0_DEPTH_WRITE_DISABLE (1<<1) 791 - #define CM0_RC_OP_FLUSH_DISABLE (1<<0) 792 - 793 - 794 - /* Graphics flush control. A CPU write flushes the GWB of all writes. 795 - * The data is discarded. 796 - */ 797 - #define GFX_FLSH_CNTL 0x2170 798 - 799 - /* Binner control. Defines the location of the bin pointer list: 800 - */ 801 - #define BINCTL 0x2420 802 - #define BC_MASK (1 << 9) 803 - 804 - /* Binned scene info. 805 - */ 806 - #define BINSCENE 0x2428 807 - #define BS_OP_LOAD (1 << 8) 808 - #define BS_MASK (1 << 22) 809 - 810 - /* Bin command parser debug reg: 811 - */ 812 - #define BCPD 0x2480 813 - 814 - /* Bin memory control debug reg: 815 - */ 816 - #define BMCD 0x2484 817 - 818 - /* Bin data cache debug reg: 819 - */ 820 - #define BDCD 0x2488 821 - 822 - /* Binner pointer cache debug reg: 823 - */ 824 - #define BPCD 0x248c 825 - 826 - /* Binner scratch pad debug reg: 827 - */ 828 - #define BINSKPD 0x24f0 829 - 830 - /* HWB scratch pad debug reg: 831 - */ 832 - #define HWBSKPD 0x24f4 833 - 834 - /* Binner memory pool reg: 835 - */ 836 - #define BMP_BUFFER 0x2430 837 - #define BMP_PAGE_SIZE_4K (0 << 10) 838 - #define BMP_BUFFER_SIZE_SHIFT 1 839 - #define BMP_ENABLE (1 << 0) 840 - 841 - /* Get/put memory from the binner memory pool: 842 - */ 843 - #define BMP_GET 0x2438 844 - #define BMP_PUT 0x2440 845 - #define BMP_OFFSET_SHIFT 5 846 - 847 - /* 3D state packets: 848 - */ 849 - #define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24)) 850 - 851 - #define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19)) 852 - #define SC_UPDATE_SCISSOR (0x1<<1) 853 - #define SC_ENABLE_MASK (0x1<<0) 854 - #define SC_ENABLE (0x1<<0) 855 - 856 - #define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16)) 857 - 858 - #define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1)) 859 - #define SCI_YMIN_MASK (0xffff<<16) 860 - #define SCI_XMIN_MASK (0xffff<<0) 861 - #define SCI_YMAX_MASK (0xffff<<16) 862 - #define SCI_XMAX_MASK (0xffff<<0) 863 - 864 - #define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19)) 865 - #define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1) 866 - #define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0) 867 - #define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16)) 868 - #define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4) 869 - #define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0) 870 - #define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) 871 - 872 - #define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2) 873 - 874 - #define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4) 875 - #define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6) 876 - #define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21) 877 - #define XY_SRC_COPY_BLT_WRITE_RGB (1<<20) 878 - #define XY_SRC_COPY_BLT_SRC_TILED (1<<15) 879 - #define XY_SRC_COPY_BLT_DST_TILED (1<<11) 880 - 881 - #define MI_BATCH_BUFFER ((0x30<<23)|1) 882 - #define MI_BATCH_BUFFER_START (0x31<<23) 883 - #define MI_BATCH_BUFFER_END (0xA<<23) 884 - #define MI_BATCH_NON_SECURE (1) 885 - #define MI_BATCH_NON_SECURE_I965 (1<<8) 886 - 887 - #define MI_WAIT_FOR_EVENT ((0x3<<23)) 888 - #define MI_WAIT_FOR_PLANE_B_FLIP (1<<6) 889 - #define MI_WAIT_FOR_PLANE_A_FLIP (1<<2) 890 - #define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1) 891 - 892 - #define MI_LOAD_SCAN_LINES_INCL ((0x12<<23)) 893 - 894 - #define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2) 895 - #define ASYNC_FLIP (1<<22) 896 - #define DISPLAY_PLANE_A (0<<20) 897 - #define DISPLAY_PLANE_B (1<<20) 898 - 899 - /* Display regs */ 900 - #define DSPACNTR 0x70180 901 - #define DSPBCNTR 0x71180 902 - #define DISPPLANE_SEL_PIPE_MASK (1<<24) 903 - 904 - /* Define the region of interest for the binner: 905 - */ 906 - #define CMD_OP_BIN_CONTROL ((0x3<<29)|(0x1d<<24)|(0x84<<16)|4) 907 - 908 - #define CMD_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) 909 - 910 - #define CMD_MI_FLUSH (0x04 << 23) 911 - #define MI_NO_WRITE_FLUSH (1 << 2) 912 - #define MI_READ_FLUSH (1 << 0) 913 - #define MI_EXE_FLUSH (1 << 1) 914 - #define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ 915 - #define MI_SCENE_COUNT (1 << 3) /* just increment scene count */ 916 - 917 - #define BREADCRUMB_BITS 31 918 - #define BREADCRUMB_MASK ((1U << BREADCRUMB_BITS) - 1) 919 - 920 - #define READ_BREADCRUMB(dev_priv) (((volatile u32*)(dev_priv->hw_status_page))[5]) 921 #define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) 922 923 - #define BLC_PWM_CTL 0x61254 924 - #define BACKLIGHT_MODULATION_FREQ_SHIFT (17) 925 - 926 - #define BLC_PWM_CTL2 0x61250 927 - /** 928 - * This is the most significant 15 bits of the number of backlight cycles in a 929 - * complete cycle of the modulated backlight control. 930 - * 931 - * The actual value is this field multiplied by two. 932 - */ 933 - #define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17) 934 - #define BLM_LEGACY_MODE (1 << 16) 935 - /** 936 - * This is the number of cycles out of the backlight modulation cycle for which 937 - * the backlight is on. 938 - * 939 - * This field must be no greater than the number of cycles in the complete 940 - * backlight modulation cycle. 941 - */ 942 - #define BACKLIGHT_DUTY_CYCLE_SHIFT (0) 943 - #define BACKLIGHT_DUTY_CYCLE_MASK (0xffff) 944 - 945 - #define I915_GCFGC 0xf0 946 - #define I915_LOW_FREQUENCY_ENABLE (1 << 7) 947 - #define I915_DISPLAY_CLOCK_190_200_MHZ (0 << 4) 948 - #define I915_DISPLAY_CLOCK_333_MHZ (4 << 4) 949 - #define I915_DISPLAY_CLOCK_MASK (7 << 4) 950 - 951 - #define I855_HPLLCC 0xc0 952 - #define I855_CLOCK_CONTROL_MASK (3 << 0) 953 - #define I855_CLOCK_133_200 (0 << 0) 954 - #define I855_CLOCK_100_200 (1 << 0) 955 - #define I855_CLOCK_100_133 (2 << 0) 956 - #define I855_CLOCK_166_250 (3 << 0) 957 - 958 - /* p317, 319 959 - */ 960 - #define VCLK2_VCO_M 0x6008 /* treat as 16 bit? (includes msbs) */ 961 - #define VCLK2_VCO_N 0x600a 962 - #define VCLK2_VCO_DIV_SEL 0x6012 963 - 964 - #define VCLK_DIVISOR_VGA0 0x6000 965 - #define VCLK_DIVISOR_VGA1 0x6004 966 - #define VCLK_POST_DIV 0x6010 967 - /** Selects a post divisor of 4 instead of 2. */ 968 - # define VGA1_PD_P2_DIV_4 (1 << 15) 969 - /** Overrides the p2 post divisor field */ 970 - # define VGA1_PD_P1_DIV_2 (1 << 13) 971 - # define VGA1_PD_P1_SHIFT 8 972 - /** P1 value is 2 greater than this field */ 973 - # define VGA1_PD_P1_MASK (0x1f << 8) 974 - /** Selects a post divisor of 4 instead of 2. */ 975 - # define VGA0_PD_P2_DIV_4 (1 << 7) 976 - /** Overrides the p2 post divisor field */ 977 - # define VGA0_PD_P1_DIV_2 (1 << 5) 978 - # define VGA0_PD_P1_SHIFT 0 979 - /** P1 value is 2 greater than this field */ 980 - # define VGA0_PD_P1_MASK (0x1f << 0) 981 - 982 - /* PCI D state control register */ 983 - #define D_STATE 0x6104 984 - #define DSPCLK_GATE_D 0x6200 985 - 986 - /* I830 CRTC registers */ 987 - #define HTOTAL_A 0x60000 988 - #define HBLANK_A 0x60004 989 - #define HSYNC_A 0x60008 990 - #define VTOTAL_A 0x6000c 991 - #define VBLANK_A 0x60010 992 - #define VSYNC_A 0x60014 993 - #define PIPEASRC 0x6001c 994 - #define BCLRPAT_A 0x60020 995 - #define VSYNCSHIFT_A 0x60028 996 - 997 - #define HTOTAL_B 0x61000 998 - #define HBLANK_B 0x61004 999 - #define HSYNC_B 0x61008 1000 - #define VTOTAL_B 0x6100c 1001 - #define VBLANK_B 0x61010 1002 - #define VSYNC_B 0x61014 1003 - #define PIPEBSRC 0x6101c 1004 - #define BCLRPAT_B 0x61020 1005 - #define VSYNCSHIFT_B 0x61028 1006 - 1007 - #define PP_STATUS 0x61200 1008 - # define PP_ON (1 << 31) 1009 - /** 1010 - * Indicates that all dependencies of the panel are on: 1011 - * 1012 - * - PLL enabled 1013 - * - pipe enabled 1014 - * - LVDS/DVOB/DVOC on 1015 - */ 1016 - # define PP_READY (1 << 30) 1017 - # define PP_SEQUENCE_NONE (0 << 28) 1018 - # define PP_SEQUENCE_ON (1 << 28) 1019 - # define PP_SEQUENCE_OFF (2 << 28) 1020 - # define PP_SEQUENCE_MASK 0x30000000 1021 - #define PP_CONTROL 0x61204 1022 - # define POWER_TARGET_ON (1 << 0) 1023 - 1024 - #define LVDSPP_ON 0x61208 1025 - #define LVDSPP_OFF 0x6120c 1026 - #define PP_CYCLE 0x61210 1027 - 1028 - #define PFIT_CONTROL 0x61230 1029 - # define PFIT_ENABLE (1 << 31) 1030 - # define PFIT_PIPE_MASK (3 << 29) 1031 - # define PFIT_PIPE_SHIFT 29 1032 - # define VERT_INTERP_DISABLE (0 << 10) 1033 - # define VERT_INTERP_BILINEAR (1 << 10) 1034 - # define VERT_INTERP_MASK (3 << 10) 1035 - # define VERT_AUTO_SCALE (1 << 9) 1036 - # define HORIZ_INTERP_DISABLE (0 << 6) 1037 - # define HORIZ_INTERP_BILINEAR (1 << 6) 1038 - # define HORIZ_INTERP_MASK (3 << 6) 1039 - # define HORIZ_AUTO_SCALE (1 << 5) 1040 - # define PANEL_8TO6_DITHER_ENABLE (1 << 3) 1041 - 1042 - #define PFIT_PGM_RATIOS 0x61234 1043 - # define PFIT_VERT_SCALE_MASK 0xfff00000 1044 - # define PFIT_HORIZ_SCALE_MASK 0x0000fff0 1045 - 1046 - #define PFIT_AUTO_RATIOS 0x61238 1047 - 1048 - 1049 - #define DPLL_A 0x06014 1050 - #define DPLL_B 0x06018 1051 - # define DPLL_VCO_ENABLE (1 << 31) 1052 - # define DPLL_DVO_HIGH_SPEED (1 << 30) 1053 - # define DPLL_SYNCLOCK_ENABLE (1 << 29) 1054 - # define DPLL_VGA_MODE_DIS (1 << 28) 1055 - # define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */ 1056 - # define DPLLB_MODE_LVDS (2 << 26) /* i915 */ 1057 - # define DPLL_MODE_MASK (3 << 26) 1058 - # define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */ 1059 - # define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */ 1060 - # define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */ 1061 - # define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */ 1062 - # define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ 1063 - # define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ 1064 - /** 1065 - * The i830 generation, in DAC/serial mode, defines p1 as two plus this 1066 - * bitfield, or just 2 if PLL_P1_DIVIDE_BY_TWO is set. 1067 - */ 1068 - # define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000 1069 - /** 1070 - * The i830 generation, in LVDS mode, defines P1 as the bit number set within 1071 - * this field (only one bit may be set). 1072 - */ 1073 - # define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000 1074 - # define DPLL_FPA01_P1_POST_DIV_SHIFT 16 1075 - # define PLL_P2_DIVIDE_BY_4 (1 << 23) /* i830, required in DVO non-gang */ 1076 - # define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */ 1077 - # define PLL_REF_INPUT_DREFCLK (0 << 13) 1078 - # define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */ 1079 - # define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */ 1080 - # define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13) 1081 - # define PLL_REF_INPUT_MASK (3 << 13) 1082 - # define PLL_LOAD_PULSE_PHASE_SHIFT 9 1083 - /* 1084 - * Parallel to Serial Load Pulse phase selection. 1085 - * Selects the phase for the 10X DPLL clock for the PCIe 1086 - * digital display port. The range is 4 to 13; 10 or more 1087 - * is just a flip delay. The default is 6 1088 - */ 1089 - # define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT) 1090 - # define DISPLAY_RATE_SELECT_FPA1 (1 << 8) 1091 - 1092 - /** 1093 - * SDVO multiplier for 945G/GM. Not used on 965. 1094 - * 1095 - * \sa DPLL_MD_UDI_MULTIPLIER_MASK 1096 - */ 1097 - # define SDVO_MULTIPLIER_MASK 0x000000ff 1098 - # define SDVO_MULTIPLIER_SHIFT_HIRES 4 1099 - # define SDVO_MULTIPLIER_SHIFT_VGA 0 1100 - 1101 - /** @defgroup DPLL_MD 1102 - * @{ 1103 - */ 1104 - /** Pipe A SDVO/UDI clock multiplier/divider register for G965. */ 1105 - #define DPLL_A_MD 0x0601c 1106 - /** Pipe B SDVO/UDI clock multiplier/divider register for G965. */ 1107 - #define DPLL_B_MD 0x06020 1108 - /** 1109 - * UDI pixel divider, controlling how many pixels are stuffed into a packet. 1110 - * 1111 - * Value is pixels minus 1. Must be set to 1 pixel for SDVO. 1112 - */ 1113 - # define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000 1114 - # define DPLL_MD_UDI_DIVIDER_SHIFT 24 1115 - /** UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */ 1116 - # define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000 1117 - # define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16 1118 - /** 1119 - * SDVO/UDI pixel multiplier. 1120 - * 1121 - * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus 1122 - * clock rate is 10 times the DPLL clock. At low resolution/refresh rate 1123 - * modes, the bus rate would be below the limits, so SDVO allows for stuffing 1124 - * dummy bytes in the datastream at an increased clock rate, with both sides of 1125 - * the link knowing how many bytes are fill. 1126 - * 1127 - * So, for a mode with a dotclock of 65Mhz, we would want to double the clock 1128 - * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be 1129 - * set to 130Mhz, and the SDVO multiplier set to 2x in this register and 1130 - * through an SDVO command. 1131 - * 1132 - * This register field has values of multiplication factor minus 1, with 1133 - * a maximum multiplier of 5 for SDVO. 1134 - */ 1135 - # define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00 1136 - # define DPLL_MD_UDI_MULTIPLIER_SHIFT 8 1137 - /** SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK. 1138 - * This best be set to the default value (3) or the CRT won't work. No, 1139 - * I don't entirely understand what this does... 1140 - */ 1141 - # define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f 1142 - # define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 1143 - /** @} */ 1144 - 1145 - #define DPLL_TEST 0x606c 1146 - # define DPLLB_TEST_SDVO_DIV_1 (0 << 22) 1147 - # define DPLLB_TEST_SDVO_DIV_2 (1 << 22) 1148 - # define DPLLB_TEST_SDVO_DIV_4 (2 << 22) 1149 - # define DPLLB_TEST_SDVO_DIV_MASK (3 << 22) 1150 - # define DPLLB_TEST_N_BYPASS (1 << 19) 1151 - # define DPLLB_TEST_M_BYPASS (1 << 18) 1152 - # define DPLLB_INPUT_BUFFER_ENABLE (1 << 16) 1153 - # define DPLLA_TEST_N_BYPASS (1 << 3) 1154 - # define DPLLA_TEST_M_BYPASS (1 << 2) 1155 - # define DPLLA_INPUT_BUFFER_ENABLE (1 << 0) 1156 - 1157 - #define ADPA 0x61100 1158 - #define ADPA_DAC_ENABLE (1<<31) 1159 - #define ADPA_DAC_DISABLE 0 1160 - #define ADPA_PIPE_SELECT_MASK (1<<30) 1161 - #define ADPA_PIPE_A_SELECT 0 1162 - #define ADPA_PIPE_B_SELECT (1<<30) 1163 - #define ADPA_USE_VGA_HVPOLARITY (1<<15) 1164 - #define ADPA_SETS_HVPOLARITY 0 1165 - #define ADPA_VSYNC_CNTL_DISABLE (1<<11) 1166 - #define ADPA_VSYNC_CNTL_ENABLE 0 1167 - #define ADPA_HSYNC_CNTL_DISABLE (1<<10) 1168 - #define ADPA_HSYNC_CNTL_ENABLE 0 1169 - #define ADPA_VSYNC_ACTIVE_HIGH (1<<4) 1170 - #define ADPA_VSYNC_ACTIVE_LOW 0 1171 - #define ADPA_HSYNC_ACTIVE_HIGH (1<<3) 1172 - #define ADPA_HSYNC_ACTIVE_LOW 0 1173 - 1174 - #define FPA0 0x06040 1175 - #define FPA1 0x06044 1176 - #define FPB0 0x06048 1177 - #define FPB1 0x0604c 1178 - # define FP_N_DIV_MASK 0x003f0000 1179 - # define FP_N_DIV_SHIFT 16 1180 - # define FP_M1_DIV_MASK 0x00003f00 1181 - # define FP_M1_DIV_SHIFT 8 1182 - # define FP_M2_DIV_MASK 0x0000003f 1183 - # define FP_M2_DIV_SHIFT 0 1184 - 1185 - 1186 - #define PORT_HOTPLUG_EN 0x61110 1187 - # define SDVOB_HOTPLUG_INT_EN (1 << 26) 1188 - # define SDVOC_HOTPLUG_INT_EN (1 << 25) 1189 - # define TV_HOTPLUG_INT_EN (1 << 18) 1190 - # define CRT_HOTPLUG_INT_EN (1 << 9) 1191 - # define CRT_HOTPLUG_FORCE_DETECT (1 << 3) 1192 - 1193 - #define PORT_HOTPLUG_STAT 0x61114 1194 - # define CRT_HOTPLUG_INT_STATUS (1 << 11) 1195 - # define TV_HOTPLUG_INT_STATUS (1 << 10) 1196 - # define CRT_HOTPLUG_MONITOR_MASK (3 << 8) 1197 - # define CRT_HOTPLUG_MONITOR_COLOR (3 << 8) 1198 - # define CRT_HOTPLUG_MONITOR_MONO (2 << 8) 1199 - # define CRT_HOTPLUG_MONITOR_NONE (0 << 8) 1200 - # define SDVOC_HOTPLUG_INT_STATUS (1 << 7) 1201 - # define SDVOB_HOTPLUG_INT_STATUS (1 << 6) 1202 - 1203 - #define SDVOB 0x61140 1204 - #define SDVOC 0x61160 1205 - #define SDVO_ENABLE (1 << 31) 1206 - #define SDVO_PIPE_B_SELECT (1 << 30) 1207 - #define SDVO_STALL_SELECT (1 << 29) 1208 - #define SDVO_INTERRUPT_ENABLE (1 << 26) 1209 - /** 1210 - * 915G/GM SDVO pixel multiplier. 1211 - * 1212 - * Programmed value is multiplier - 1, up to 5x. 1213 - * 1214 - * \sa DPLL_MD_UDI_MULTIPLIER_MASK 1215 - */ 1216 - #define SDVO_PORT_MULTIPLY_MASK (7 << 23) 1217 - #define SDVO_PORT_MULTIPLY_SHIFT 23 1218 - #define SDVO_PHASE_SELECT_MASK (15 << 19) 1219 - #define SDVO_PHASE_SELECT_DEFAULT (6 << 19) 1220 - #define SDVO_CLOCK_OUTPUT_INVERT (1 << 18) 1221 - #define SDVOC_GANG_MODE (1 << 16) 1222 - #define SDVO_BORDER_ENABLE (1 << 7) 1223 - #define SDVOB_PCIE_CONCURRENCY (1 << 3) 1224 - #define SDVO_DETECTED (1 << 2) 1225 - /* Bits to be preserved when writing */ 1226 - #define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14)) 1227 - #define SDVOC_PRESERVE_MASK (1 << 17) 1228 - 1229 - /** @defgroup LVDS 1230 - * @{ 1231 - */ 1232 - /** 1233 - * This register controls the LVDS output enable, pipe selection, and data 1234 - * format selection. 1235 - * 1236 - * All of the clock/data pairs are force powered down by power sequencing. 1237 - */ 1238 - #define LVDS 0x61180 1239 - /** 1240 - * Enables the LVDS port. This bit must be set before DPLLs are enabled, as 1241 - * the DPLL semantics change when the LVDS is assigned to that pipe. 1242 - */ 1243 - # define LVDS_PORT_EN (1 << 31) 1244 - /** Selects pipe B for LVDS data. Must be set on pre-965. */ 1245 - # define LVDS_PIPEB_SELECT (1 << 30) 1246 - 1247 - /** 1248 - * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per 1249 - * pixel. 1250 - */ 1251 - # define LVDS_A0A2_CLKA_POWER_MASK (3 << 8) 1252 - # define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8) 1253 - # define LVDS_A0A2_CLKA_POWER_UP (3 << 8) 1254 - /** 1255 - * Controls the A3 data pair, which contains the additional LSBs for 24 bit 1256 - * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be 1257 - * on. 1258 - */ 1259 - # define LVDS_A3_POWER_MASK (3 << 6) 1260 - # define LVDS_A3_POWER_DOWN (0 << 6) 1261 - # define LVDS_A3_POWER_UP (3 << 6) 1262 - /** 1263 - * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP 1264 - * is set. 1265 - */ 1266 - # define LVDS_CLKB_POWER_MASK (3 << 4) 1267 - # define LVDS_CLKB_POWER_DOWN (0 << 4) 1268 - # define LVDS_CLKB_POWER_UP (3 << 4) 1269 - 1270 - /** 1271 - * Controls the B0-B3 data pairs. This must be set to match the DPLL p2 1272 - * setting for whether we are in dual-channel mode. The B3 pair will 1273 - * additionally only be powered up when LVDS_A3_POWER_UP is set. 1274 - */ 1275 - # define LVDS_B0B3_POWER_MASK (3 << 2) 1276 - # define LVDS_B0B3_POWER_DOWN (0 << 2) 1277 - # define LVDS_B0B3_POWER_UP (3 << 2) 1278 - 1279 - #define PIPEACONF 0x70008 1280 - #define PIPEACONF_ENABLE (1<<31) 1281 - #define PIPEACONF_DISABLE 0 1282 - #define PIPEACONF_DOUBLE_WIDE (1<<30) 1283 - #define I965_PIPECONF_ACTIVE (1<<30) 1284 - #define PIPEACONF_SINGLE_WIDE 0 1285 - #define PIPEACONF_PIPE_UNLOCKED 0 1286 - #define PIPEACONF_PIPE_LOCKED (1<<25) 1287 - #define PIPEACONF_PALETTE 0 1288 - #define PIPEACONF_GAMMA (1<<24) 1289 - #define PIPECONF_FORCE_BORDER (1<<25) 1290 - #define PIPECONF_PROGRESSIVE (0 << 21) 1291 - #define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21) 1292 - #define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21) 1293 - 1294 - #define DSPARB 0x70030 1295 - #define DSPARB_CSTART_MASK (0x7f << 7) 1296 - #define DSPARB_CSTART_SHIFT 7 1297 - #define DSPARB_BSTART_MASK (0x7f) 1298 - #define DSPARB_BSTART_SHIFT 0 1299 - 1300 - #define PIPEBCONF 0x71008 1301 - #define PIPEBCONF_ENABLE (1<<31) 1302 - #define PIPEBCONF_DISABLE 0 1303 - #define PIPEBCONF_DOUBLE_WIDE (1<<30) 1304 - #define PIPEBCONF_DISABLE 0 1305 - #define PIPEBCONF_GAMMA (1<<24) 1306 - #define PIPEBCONF_PALETTE 0 1307 - 1308 - #define PIPEBGCMAXRED 0x71010 1309 - #define PIPEBGCMAXGREEN 0x71014 1310 - #define PIPEBGCMAXBLUE 0x71018 1311 - #define PIPEBSTAT 0x71024 1312 - #define PIPEBFRAMEHIGH 0x71040 1313 - #define PIPEBFRAMEPIXEL 0x71044 1314 - 1315 - #define DSPACNTR 0x70180 1316 - #define DSPBCNTR 0x71180 1317 - #define DISPLAY_PLANE_ENABLE (1<<31) 1318 - #define DISPLAY_PLANE_DISABLE 0 1319 - #define DISPPLANE_GAMMA_ENABLE (1<<30) 1320 - #define DISPPLANE_GAMMA_DISABLE 0 1321 - #define DISPPLANE_PIXFORMAT_MASK (0xf<<26) 1322 - #define DISPPLANE_8BPP (0x2<<26) 1323 - #define DISPPLANE_15_16BPP (0x4<<26) 1324 - #define DISPPLANE_16BPP (0x5<<26) 1325 - #define DISPPLANE_32BPP_NO_ALPHA (0x6<<26) 1326 - #define DISPPLANE_32BPP (0x7<<26) 1327 - #define DISPPLANE_STEREO_ENABLE (1<<25) 1328 - #define DISPPLANE_STEREO_DISABLE 0 1329 - #define DISPPLANE_SEL_PIPE_MASK (1<<24) 1330 - #define DISPPLANE_SEL_PIPE_A 0 1331 - #define DISPPLANE_SEL_PIPE_B (1<<24) 1332 - #define DISPPLANE_SRC_KEY_ENABLE (1<<22) 1333 - #define DISPPLANE_SRC_KEY_DISABLE 0 1334 - #define DISPPLANE_LINE_DOUBLE (1<<20) 1335 - #define DISPPLANE_NO_LINE_DOUBLE 0 1336 - #define DISPPLANE_STEREO_POLARITY_FIRST 0 1337 - #define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) 1338 - /* plane B only */ 1339 - #define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15) 1340 - #define DISPPLANE_ALPHA_TRANS_DISABLE 0 1341 - #define DISPPLANE_SPRITE_ABOVE_DISPLAYA 0 1342 - #define DISPPLANE_SPRITE_ABOVE_OVERLAY (1) 1343 - 1344 - #define DSPABASE 0x70184 1345 - #define DSPASTRIDE 0x70188 1346 - 1347 - #define DSPBBASE 0x71184 1348 - #define DSPBADDR DSPBBASE 1349 - #define DSPBSTRIDE 0x71188 1350 - 1351 - #define DSPAKEYVAL 0x70194 1352 - #define DSPAKEYMASK 0x70198 1353 - 1354 - #define DSPAPOS 0x7018C /* reserved */ 1355 - #define DSPASIZE 0x70190 1356 - #define DSPBPOS 0x7118C 1357 - #define DSPBSIZE 0x71190 1358 - 1359 - #define DSPASURF 0x7019C 1360 - #define DSPATILEOFF 0x701A4 1361 - 1362 - #define DSPBSURF 0x7119C 1363 - #define DSPBTILEOFF 0x711A4 1364 - 1365 - #define VGACNTRL 0x71400 1366 - # define VGA_DISP_DISABLE (1 << 31) 1367 - # define VGA_2X_MODE (1 << 30) 1368 - # define VGA_PIPE_B_SELECT (1 << 29) 1369 - 1370 - /* 1371 - * Some BIOS scratch area registers. The 845 (and 830?) store the amount 1372 - * of video memory available to the BIOS in SWF1. 1373 - */ 1374 - 1375 - #define SWF0 0x71410 1376 - 1377 - /* 1378 - * 855 scratch registers. 1379 - */ 1380 - #define SWF10 0x70410 1381 - 1382 - #define SWF30 0x72414 1383 - 1384 - /* 1385 - * Overlay registers. These are overlay registers accessed via MMIO. 1386 - * Those loaded via the overlay register page are defined in i830_video.c. 1387 - */ 1388 - #define OVADD 0x30000 1389 - 1390 - #define DOVSTA 0x30008 1391 - #define OC_BUF (0x3<<20) 1392 - 1393 - #define OGAMC5 0x30010 1394 - #define OGAMC4 0x30014 1395 - #define OGAMC3 0x30018 1396 - #define OGAMC2 0x3001c 1397 - #define OGAMC1 0x30020 1398 - #define OGAMC0 0x30024 1399 - /* 1400 - * Palette registers 1401 - */ 1402 - #define PALETTE_A 0x0a000 1403 - #define PALETTE_B 0x0a800 1404 1405 #define IS_I830(dev) ((dev)->pci_device == 0x3577) 1406 #define IS_845G(dev) ((dev)->pci_device == 0x2562) ··· 639 640 #define IS_I965GM(dev) ((dev)->pci_device == 0x2A02) 641 642 - #define IS_IGD_GM(dev) ((dev)->pci_device == 0x2A42) 643 644 #define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \ 645 (dev)->pci_device == 0x2E12 || \ ··· 653 IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev)) 654 655 #define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ 656 - IS_I945GM(dev) || IS_I965GM(dev) || IS_IGD_GM(dev)) 657 658 - #define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_IGD_GM(dev) || IS_G4X(dev)) 659 660 #define PRIMARY_RINGBUFFER_SIZE (128*1024) 661
··· 30 #ifndef _I915_DRV_H_ 31 #define _I915_DRV_H_ 32 33 + #include "i915_reg.h" 34 + 35 /* General customization: 36 */ 37 ··· 37 38 #define DRIVER_NAME "i915" 39 #define DRIVER_DESC "Intel Graphics" 40 + #define DRIVER_DATE "20080730" 41 + 42 + enum pipe { 43 + PIPE_A = 0, 44 + PIPE_B, 45 + }; 46 47 /* Interface history: 48 * ··· 53 #define DRIVER_MINOR 6 54 #define DRIVER_PATCHLEVEL 0 55 56 + #define WATCH_COHERENCY 0 57 + #define WATCH_BUF 0 58 + #define WATCH_EXEC 0 59 + #define WATCH_LRU 0 60 + #define WATCH_RELOC 0 61 + #define WATCH_INACTIVE 0 62 + #define WATCH_PWRITE 0 63 + 64 typedef struct _drm_i915_ring_buffer { 65 int tail_mask; 66 unsigned long Size; 67 u8 *virtual_start; 68 int head; 69 int tail; 70 int space; 71 drm_local_map_t map; 72 + struct drm_gem_object *ring_obj; 73 } drm_i915_ring_buffer_t; 74 75 struct mem_block { ··· 76 typedef struct _drm_i915_vbl_swap { 77 struct list_head head; 78 drm_drawable_t drw_id; 79 + unsigned int plane; 80 unsigned int sequence; 81 } drm_i915_vbl_swap_t; 82 83 + struct opregion_header; 84 + struct opregion_acpi; 85 + struct opregion_swsci; 86 + struct opregion_asle; 87 + 88 + struct intel_opregion { 89 + struct opregion_header *header; 90 + struct opregion_acpi *acpi; 91 + struct opregion_swsci *swsci; 92 + struct opregion_asle *asle; 93 + int enabled; 94 + }; 95 + 96 typedef struct drm_i915_private { 97 + struct drm_device *dev; 98 + 99 + void __iomem *regs; 100 drm_local_map_t *sarea; 101 102 drm_i915_sarea_t *sarea_priv; 103 drm_i915_ring_buffer_t ring; ··· 90 drm_dma_handle_t *status_page_dmah; 91 void *hw_status_page; 92 dma_addr_t dma_status_page; 93 + uint32_t counter; 94 unsigned int status_gfx_addr; 95 drm_local_map_t hws_map; 96 + struct drm_gem_object *hws_obj; 97 98 unsigned int cpp; 99 int back_offset; 100 int front_offset; 101 int current_page; 102 int page_flipping; 103 104 wait_queue_head_t irq_queue; 105 atomic_t irq_received; 106 + /** Protects user_irq_refcount and irq_mask_reg */ 107 + spinlock_t user_irq_lock; 108 + /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */ 109 + int user_irq_refcount; 110 + /** Cached value of IMR to avoid reads in updating the bitfield */ 111 + u32 irq_mask_reg; 112 113 int tex_lru_log_granularity; 114 int allow_batchbuffer; ··· 114 spinlock_t swaps_lock; 115 drm_i915_vbl_swap_t vbl_swaps; 116 unsigned int swaps_pending; 117 + 118 + struct intel_opregion opregion; 119 120 /* Register state */ 121 u8 saveLBB; ··· 139 u32 saveDSPASTRIDE; 140 u32 saveDSPASIZE; 141 u32 saveDSPAPOS; 142 + u32 saveDSPAADDR; 143 u32 saveDSPASURF; 144 u32 saveDSPATILEOFF; 145 u32 savePFIT_PGM_RATIOS; ··· 160 u32 saveDSPBSTRIDE; 161 u32 saveDSPBSIZE; 162 u32 saveDSPBPOS; 163 + u32 saveDSPBADDR; 164 u32 saveDSPBSURF; 165 u32 saveDSPBTILEOFF; 166 + u32 saveVGA0; 167 + u32 saveVGA1; 168 + u32 saveVGA_PD; 169 u32 saveVGACNTRL; 170 u32 saveADPA; 171 u32 saveLVDS; 172 + u32 savePP_ON_DELAYS; 173 + u32 savePP_OFF_DELAYS; 174 u32 saveDVOA; 175 u32 saveDVOB; 176 u32 saveDVOC; 177 u32 savePP_ON; 178 u32 savePP_OFF; 179 u32 savePP_CONTROL; 180 + u32 savePP_DIVISOR; 181 u32 savePFIT_CONTROL; 182 u32 save_palette_a[256]; 183 u32 save_palette_b[256]; ··· 190 u32 saveIMR; 191 u32 saveCACHE_MODE_0; 192 u32 saveD_STATE; 193 + u32 saveCG_2D_DIS; 194 u32 saveMI_ARB_STATE; 195 u32 saveSWF0[16]; 196 u32 saveSWF1[16]; ··· 203 u8 saveDACMASK; 204 u8 saveDACDATA[256*3]; /* 256 3-byte colors */ 205 u8 saveCR[37]; 206 + 207 + struct { 208 + struct drm_mm gtt_space; 209 + 210 + /** 211 + * List of objects currently involved in rendering from the 212 + * ringbuffer. 213 + * 214 + * A reference is held on the buffer while on this list. 215 + */ 216 + struct list_head active_list; 217 + 218 + /** 219 + * List of objects which are not in the ringbuffer but which 220 + * still have a write_domain which needs to be flushed before 221 + * unbinding. 222 + * 223 + * A reference is held on the buffer while on this list. 224 + */ 225 + struct list_head flushing_list; 226 + 227 + /** 228 + * LRU list of objects which are not in the ringbuffer and 229 + * are ready to unbind, but are still in the GTT. 230 + * 231 + * A reference is not held on the buffer while on this list, 232 + * as merely being GTT-bound shouldn't prevent its being 233 + * freed, and we'll pull it off the list in the free path. 234 + */ 235 + struct list_head inactive_list; 236 + 237 + /** 238 + * List of breadcrumbs associated with GPU requests currently 239 + * outstanding. 240 + */ 241 + struct list_head request_list; 242 + 243 + /** 244 + * We leave the user IRQ off as much as possible, 245 + * but this means that requests will finish and never 246 + * be retired once the system goes idle. Set a timer to 247 + * fire periodically while the ring is running. When it 248 + * fires, go retire requests. 249 + */ 250 + struct delayed_work retire_work; 251 + 252 + /** Work task for vblank-related ring access */ 253 + struct work_struct vblank_work; 254 + 255 + uint32_t next_gem_seqno; 256 + 257 + /** 258 + * Waiting sequence number, if any 259 + */ 260 + uint32_t waiting_gem_seqno; 261 + 262 + /** 263 + * Last seq seen at irq time 264 + */ 265 + uint32_t irq_gem_seqno; 266 + 267 + /** 268 + * Flag if the X Server, and thus DRM, is not currently in 269 + * control of the device. 270 + * 271 + * This is set between LeaveVT and EnterVT. It needs to be 272 + * replaced with a semaphore. It also needs to be 273 + * transitioned away from for kernel modesetting. 274 + */ 275 + int suspended; 276 + 277 + /** 278 + * Flag if the hardware appears to be wedged. 279 + * 280 + * This is set when attempts to idle the device timeout. 281 + * It prevents command submission from occuring and makes 282 + * every pending request fail 283 + */ 284 + int wedged; 285 + 286 + /** Bit 6 swizzling required for X tiling */ 287 + uint32_t bit_6_swizzle_x; 288 + /** Bit 6 swizzling required for Y tiling */ 289 + uint32_t bit_6_swizzle_y; 290 + } mm; 291 } drm_i915_private_t; 292 + 293 + /** driver private structure attached to each drm_gem_object */ 294 + struct drm_i915_gem_object { 295 + struct drm_gem_object *obj; 296 + 297 + /** Current space allocated to this object in the GTT, if any. */ 298 + struct drm_mm_node *gtt_space; 299 + 300 + /** This object's place on the active/flushing/inactive lists */ 301 + struct list_head list; 302 + 303 + /** 304 + * This is set if the object is on the active or flushing lists 305 + * (has pending rendering), and is not set if it's on inactive (ready 306 + * to be unbound). 307 + */ 308 + int active; 309 + 310 + /** 311 + * This is set if the object has been written to since last bound 312 + * to the GTT 313 + */ 314 + int dirty; 315 + 316 + /** AGP memory structure for our GTT binding. */ 317 + DRM_AGP_MEM *agp_mem; 318 + 319 + struct page **page_list; 320 + 321 + /** 322 + * Current offset of the object in GTT space. 323 + * 324 + * This is the same as gtt_space->start 325 + */ 326 + uint32_t gtt_offset; 327 + 328 + /** Boolean whether this object has a valid gtt offset. */ 329 + int gtt_bound; 330 + 331 + /** How many users have pinned this object in GTT space */ 332 + int pin_count; 333 + 334 + /** Breadcrumb of last rendering to the buffer. */ 335 + uint32_t last_rendering_seqno; 336 + 337 + /** Current tiling mode for the object. */ 338 + uint32_t tiling_mode; 339 + 340 + /** AGP mapping type (AGP_USER_MEMORY or AGP_USER_CACHED_MEMORY */ 341 + uint32_t agp_type; 342 + 343 + /** 344 + * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when 345 + * GEM_DOMAIN_CPU is not in the object's read domain. 346 + */ 347 + uint8_t *page_cpu_valid; 348 + }; 349 + 350 + /** 351 + * Request queue structure. 352 + * 353 + * The request queue allows us to note sequence numbers that have been emitted 354 + * and may be associated with active buffers to be retired. 355 + * 356 + * By keeping this list, we can avoid having to do questionable 357 + * sequence-number comparisons on buffer last_rendering_seqnos, and associate 358 + * an emission time with seqnos for tracking how far ahead of the GPU we are. 359 + */ 360 + struct drm_i915_gem_request { 361 + /** GEM sequence number associated with this request. */ 362 + uint32_t seqno; 363 + 364 + /** Time at which this request was emitted, in jiffies. */ 365 + unsigned long emitted_jiffies; 366 + 367 + /** Cache domains that were flushed at the start of the request. */ 368 + uint32_t flush_domains; 369 + 370 + struct list_head list; 371 + }; 372 + 373 + struct drm_i915_file_private { 374 + struct { 375 + uint32_t last_gem_seqno; 376 + uint32_t last_gem_throttle_seqno; 377 + } mm; 378 + }; 379 380 extern struct drm_ioctl_desc i915_ioctls[]; 381 extern int i915_max_ioctl; ··· 212 extern void i915_kernel_lost_context(struct drm_device * dev); 213 extern int i915_driver_load(struct drm_device *, unsigned long flags); 214 extern int i915_driver_unload(struct drm_device *); 215 + extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv); 216 extern void i915_driver_lastclose(struct drm_device * dev); 217 extern void i915_driver_preclose(struct drm_device *dev, 218 struct drm_file *file_priv); 219 + extern void i915_driver_postclose(struct drm_device *dev, 220 + struct drm_file *file_priv); 221 extern int i915_driver_device_is_agp(struct drm_device * dev); 222 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 223 unsigned long arg); 224 + extern int i915_emit_box(struct drm_device *dev, 225 + struct drm_clip_rect __user *boxes, 226 + int i, int DR1, int DR4); 227 228 /* i915_irq.c */ 229 extern int i915_irq_emit(struct drm_device *dev, void *data, 230 struct drm_file *file_priv); 231 extern int i915_irq_wait(struct drm_device *dev, void *data, 232 struct drm_file *file_priv); 233 + void i915_user_irq_get(struct drm_device *dev); 234 + void i915_user_irq_put(struct drm_device *dev); 235 236 + extern void i915_gem_vblank_work_handler(struct work_struct *work); 237 extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); 238 extern void i915_driver_irq_preinstall(struct drm_device * dev); 239 + extern int i915_driver_irq_postinstall(struct drm_device *dev); 240 extern void i915_driver_irq_uninstall(struct drm_device * dev); 241 extern int i915_vblank_pipe_set(struct drm_device *dev, void *data, 242 struct drm_file *file_priv); 243 extern int i915_vblank_pipe_get(struct drm_device *dev, void *data, 244 struct drm_file *file_priv); 245 + extern int i915_enable_vblank(struct drm_device *dev, int crtc); 246 + extern void i915_disable_vblank(struct drm_device *dev, int crtc); 247 + extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc); 248 extern int i915_vblank_swap(struct drm_device *dev, void *data, 249 struct drm_file *file_priv); 250 + extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask); 251 252 /* i915_mem.c */ 253 extern int i915_mem_alloc(struct drm_device *dev, void *data, ··· 250 extern void i915_mem_takedown(struct mem_block **heap); 251 extern void i915_mem_release(struct drm_device * dev, 252 struct drm_file *file_priv, struct mem_block *heap); 253 + /* i915_gem.c */ 254 + int i915_gem_init_ioctl(struct drm_device *dev, void *data, 255 + struct drm_file *file_priv); 256 + int i915_gem_create_ioctl(struct drm_device *dev, void *data, 257 + struct drm_file *file_priv); 258 + int i915_gem_pread_ioctl(struct drm_device *dev, void *data, 259 + struct drm_file *file_priv); 260 + int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 261 + struct drm_file *file_priv); 262 + int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 263 + struct drm_file *file_priv); 264 + int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 265 + struct drm_file *file_priv); 266 + int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 267 + struct drm_file *file_priv); 268 + int i915_gem_execbuffer(struct drm_device *dev, void *data, 269 + struct drm_file *file_priv); 270 + int i915_gem_pin_ioctl(struct drm_device *dev, void *data, 271 + struct drm_file *file_priv); 272 + int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, 273 + struct drm_file *file_priv); 274 + int i915_gem_busy_ioctl(struct drm_device *dev, void *data, 275 + struct drm_file *file_priv); 276 + int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 277 + struct drm_file *file_priv); 278 + int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, 279 + struct drm_file *file_priv); 280 + int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, 281 + struct drm_file *file_priv); 282 + int i915_gem_set_tiling(struct drm_device *dev, void *data, 283 + struct drm_file *file_priv); 284 + int i915_gem_get_tiling(struct drm_device *dev, void *data, 285 + struct drm_file *file_priv); 286 + void i915_gem_load(struct drm_device *dev); 287 + int i915_gem_proc_init(struct drm_minor *minor); 288 + void i915_gem_proc_cleanup(struct drm_minor *minor); 289 + int i915_gem_init_object(struct drm_gem_object *obj); 290 + void i915_gem_free_object(struct drm_gem_object *obj); 291 + int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); 292 + void i915_gem_object_unpin(struct drm_gem_object *obj); 293 + void i915_gem_lastclose(struct drm_device *dev); 294 + uint32_t i915_get_gem_seqno(struct drm_device *dev); 295 + void i915_gem_retire_requests(struct drm_device *dev); 296 + void i915_gem_retire_work_handler(struct work_struct *work); 297 + void i915_gem_clflush_object(struct drm_gem_object *obj); 298 299 + /* i915_gem_tiling.c */ 300 + void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 301 + 302 + /* i915_gem_debug.c */ 303 + void i915_gem_dump_object(struct drm_gem_object *obj, int len, 304 + const char *where, uint32_t mark); 305 + #if WATCH_INACTIVE 306 + void i915_verify_inactive(struct drm_device *dev, char *file, int line); 307 + #else 308 + #define i915_verify_inactive(dev, file, line) 309 + #endif 310 + void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle); 311 + void i915_gem_dump_object(struct drm_gem_object *obj, int len, 312 + const char *where, uint32_t mark); 313 + void i915_dump_lru(struct drm_device *dev, const char *where); 314 + 315 + /* i915_suspend.c */ 316 + extern int i915_save_state(struct drm_device *dev); 317 + extern int i915_restore_state(struct drm_device *dev); 318 + 319 + /* i915_suspend.c */ 320 + extern int i915_save_state(struct drm_device *dev); 321 + extern int i915_restore_state(struct drm_device *dev); 322 + 323 + /* i915_opregion.c */ 324 + extern int intel_opregion_init(struct drm_device *dev); 325 + extern void intel_opregion_free(struct drm_device *dev); 326 + extern void opregion_asle_intr(struct drm_device *dev); 327 + extern void opregion_enable_asle(struct drm_device *dev); 328 + 329 + /** 330 + * Lock test for when it's just for synchronization of ring access. 331 + * 332 + * In that case, we don't need to do it when GEM is initialized as nobody else 333 + * has access to the ring. 334 + */ 335 + #define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \ 336 + if (((drm_i915_private_t *)dev->dev_private)->ring.ring_obj == NULL) \ 337 + LOCK_TEST_WITH_RETURN(dev, file_priv); \ 338 + } while (0) 339 + 340 + #define I915_READ(reg) readl(dev_priv->regs + (reg)) 341 + #define I915_WRITE(reg, val) writel(val, dev_priv->regs + (reg)) 342 + #define I915_READ16(reg) readw(dev_priv->regs + (reg)) 343 + #define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg)) 344 + #define I915_READ8(reg) readb(dev_priv->regs + (reg)) 345 + #define I915_WRITE8(reg, val) writeb(val, dev_priv->regs + (reg)) 346 347 #define I915_VERBOSE 0 348 ··· 284 if (I915_VERBOSE) DRM_DEBUG("ADVANCE_LP_RING %x\n", outring); \ 285 dev_priv->ring.tail = outring; \ 286 dev_priv->ring.space -= outcount * 4; \ 287 + I915_WRITE(PRB0_TAIL, outring); \ 288 } while(0) 289 290 + /** 291 + * Reads a dword out of the status page, which is written to from the command 292 + * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or 293 + * MI_STORE_DATA_IMM. 294 * 295 + * The following dwords have a reserved meaning: 296 + * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes. 297 + * 0x04: ring 0 head pointer 298 + * 0x05: ring 1 head pointer (915-class) 299 + * 0x06: ring 2 head pointer (915-class) 300 + * 0x10-0x1b: Context status DWords (GM45) 301 + * 0x1f: Last written status offset. (GM45) 302 + * 303 + * The area from dword 0x20 to 0x3ff is available for driver usage. 304 */ 305 #define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) 306 + #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, 5) 307 + #define I915_GEM_HWS_INDEX 0x20 308 309 + extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); 310 311 #define IS_I830(dev) ((dev)->pci_device == 0x3577) 312 #define IS_845G(dev) ((dev)->pci_device == 0x2562) ··· 1119 1120 #define IS_I965GM(dev) ((dev)->pci_device == 0x2A02) 1121 1122 + #define IS_GM45(dev) ((dev)->pci_device == 0x2A42) 1123 1124 #define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \ 1125 (dev)->pci_device == 0x2E12 || \ ··· 1133 IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev)) 1134 1135 #define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ 1136 + IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev)) 1137 1138 + #define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev)) 1139 1140 #define PRIMARY_RINGBUFFER_SIZE (128*1024) 1141
+2558
drivers/gpu/drm/i915/i915_gem.c
···
··· 1 + /* 2 + * Copyright © 2008 Intel Corporation 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice (including the next 12 + * paragraph) shall be included in all copies or substantial portions of the 13 + * Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 + * IN THE SOFTWARE. 22 + * 23 + * Authors: 24 + * Eric Anholt <eric@anholt.net> 25 + * 26 + */ 27 + 28 + #include "drmP.h" 29 + #include "drm.h" 30 + #include "i915_drm.h" 31 + #include "i915_drv.h" 32 + #include <linux/swap.h> 33 + 34 + static int 35 + i915_gem_object_set_domain(struct drm_gem_object *obj, 36 + uint32_t read_domains, 37 + uint32_t write_domain); 38 + static int 39 + i915_gem_object_set_domain_range(struct drm_gem_object *obj, 40 + uint64_t offset, 41 + uint64_t size, 42 + uint32_t read_domains, 43 + uint32_t write_domain); 44 + static int 45 + i915_gem_set_domain(struct drm_gem_object *obj, 46 + struct drm_file *file_priv, 47 + uint32_t read_domains, 48 + uint32_t write_domain); 49 + static int i915_gem_object_get_page_list(struct drm_gem_object *obj); 50 + static void i915_gem_object_free_page_list(struct drm_gem_object *obj); 51 + static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); 52 + 53 + static void 54 + i915_gem_cleanup_ringbuffer(struct drm_device *dev); 55 + 56 + int 57 + i915_gem_init_ioctl(struct drm_device *dev, void *data, 58 + struct drm_file *file_priv) 59 + { 60 + drm_i915_private_t *dev_priv = dev->dev_private; 61 + struct drm_i915_gem_init *args = data; 62 + 63 + mutex_lock(&dev->struct_mutex); 64 + 65 + if (args->gtt_start >= args->gtt_end || 66 + (args->gtt_start & (PAGE_SIZE - 1)) != 0 || 67 + (args->gtt_end & (PAGE_SIZE - 1)) != 0) { 68 + mutex_unlock(&dev->struct_mutex); 69 + return -EINVAL; 70 + } 71 + 72 + drm_mm_init(&dev_priv->mm.gtt_space, args->gtt_start, 73 + args->gtt_end - args->gtt_start); 74 + 75 + dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start); 76 + 77 + mutex_unlock(&dev->struct_mutex); 78 + 79 + return 0; 80 + } 81 + 82 + 83 + /** 84 + * Creates a new mm object and returns a handle to it. 85 + */ 86 + int 87 + i915_gem_create_ioctl(struct drm_device *dev, void *data, 88 + struct drm_file *file_priv) 89 + { 90 + struct drm_i915_gem_create *args = data; 91 + struct drm_gem_object *obj; 92 + int handle, ret; 93 + 94 + args->size = roundup(args->size, PAGE_SIZE); 95 + 96 + /* Allocate the new object */ 97 + obj = drm_gem_object_alloc(dev, args->size); 98 + if (obj == NULL) 99 + return -ENOMEM; 100 + 101 + ret = drm_gem_handle_create(file_priv, obj, &handle); 102 + mutex_lock(&dev->struct_mutex); 103 + drm_gem_object_handle_unreference(obj); 104 + mutex_unlock(&dev->struct_mutex); 105 + 106 + if (ret) 107 + return ret; 108 + 109 + args->handle = handle; 110 + 111 + return 0; 112 + } 113 + 114 + /** 115 + * Reads data from the object referenced by handle. 116 + * 117 + * On error, the contents of *data are undefined. 118 + */ 119 + int 120 + i915_gem_pread_ioctl(struct drm_device *dev, void *data, 121 + struct drm_file *file_priv) 122 + { 123 + struct drm_i915_gem_pread *args = data; 124 + struct drm_gem_object *obj; 125 + struct drm_i915_gem_object *obj_priv; 126 + ssize_t read; 127 + loff_t offset; 128 + int ret; 129 + 130 + obj = drm_gem_object_lookup(dev, file_priv, args->handle); 131 + if (obj == NULL) 132 + return -EBADF; 133 + obj_priv = obj->driver_private; 134 + 135 + /* Bounds check source. 136 + * 137 + * XXX: This could use review for overflow issues... 138 + */ 139 + if (args->offset > obj->size || args->size > obj->size || 140 + args->offset + args->size > obj->size) { 141 + drm_gem_object_unreference(obj); 142 + return -EINVAL; 143 + } 144 + 145 + mutex_lock(&dev->struct_mutex); 146 + 147 + ret = i915_gem_object_set_domain_range(obj, args->offset, args->size, 148 + I915_GEM_DOMAIN_CPU, 0); 149 + if (ret != 0) { 150 + drm_gem_object_unreference(obj); 151 + mutex_unlock(&dev->struct_mutex); 152 + return ret; 153 + } 154 + 155 + offset = args->offset; 156 + 157 + read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr, 158 + args->size, &offset); 159 + if (read != args->size) { 160 + drm_gem_object_unreference(obj); 161 + mutex_unlock(&dev->struct_mutex); 162 + if (read < 0) 163 + return read; 164 + else 165 + return -EINVAL; 166 + } 167 + 168 + drm_gem_object_unreference(obj); 169 + mutex_unlock(&dev->struct_mutex); 170 + 171 + return 0; 172 + } 173 + 174 + static int 175 + i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj, 176 + struct drm_i915_gem_pwrite *args, 177 + struct drm_file *file_priv) 178 + { 179 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 180 + ssize_t remain; 181 + loff_t offset; 182 + char __user *user_data; 183 + char __iomem *vaddr; 184 + char *vaddr_atomic; 185 + int i, o, l; 186 + int ret = 0; 187 + unsigned long pfn; 188 + unsigned long unwritten; 189 + 190 + user_data = (char __user *) (uintptr_t) args->data_ptr; 191 + remain = args->size; 192 + if (!access_ok(VERIFY_READ, user_data, remain)) 193 + return -EFAULT; 194 + 195 + 196 + mutex_lock(&dev->struct_mutex); 197 + ret = i915_gem_object_pin(obj, 0); 198 + if (ret) { 199 + mutex_unlock(&dev->struct_mutex); 200 + return ret; 201 + } 202 + ret = i915_gem_set_domain(obj, file_priv, 203 + I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT); 204 + if (ret) 205 + goto fail; 206 + 207 + obj_priv = obj->driver_private; 208 + offset = obj_priv->gtt_offset + args->offset; 209 + obj_priv->dirty = 1; 210 + 211 + while (remain > 0) { 212 + /* Operation in this page 213 + * 214 + * i = page number 215 + * o = offset within page 216 + * l = bytes to copy 217 + */ 218 + i = offset >> PAGE_SHIFT; 219 + o = offset & (PAGE_SIZE-1); 220 + l = remain; 221 + if ((o + l) > PAGE_SIZE) 222 + l = PAGE_SIZE - o; 223 + 224 + pfn = (dev->agp->base >> PAGE_SHIFT) + i; 225 + 226 + #ifdef CONFIG_HIGHMEM 227 + /* This is a workaround for the low performance of iounmap 228 + * (approximate 10% cpu cost on normal 3D workloads). 229 + * kmap_atomic on HIGHMEM kernels happens to let us map card 230 + * memory without taking IPIs. When the vmap rework lands 231 + * we should be able to dump this hack. 232 + */ 233 + vaddr_atomic = kmap_atomic_pfn(pfn, KM_USER0); 234 + #if WATCH_PWRITE 235 + DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n", 236 + i, o, l, pfn, vaddr_atomic); 237 + #endif 238 + unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + o, 239 + user_data, l); 240 + kunmap_atomic(vaddr_atomic, KM_USER0); 241 + 242 + if (unwritten) 243 + #endif /* CONFIG_HIGHMEM */ 244 + { 245 + vaddr = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE); 246 + #if WATCH_PWRITE 247 + DRM_INFO("pwrite slow i %d o %d l %d " 248 + "pfn %ld vaddr %p\n", 249 + i, o, l, pfn, vaddr); 250 + #endif 251 + if (vaddr == NULL) { 252 + ret = -EFAULT; 253 + goto fail; 254 + } 255 + unwritten = __copy_from_user(vaddr + o, user_data, l); 256 + #if WATCH_PWRITE 257 + DRM_INFO("unwritten %ld\n", unwritten); 258 + #endif 259 + iounmap(vaddr); 260 + if (unwritten) { 261 + ret = -EFAULT; 262 + goto fail; 263 + } 264 + } 265 + 266 + remain -= l; 267 + user_data += l; 268 + offset += l; 269 + } 270 + #if WATCH_PWRITE && 1 271 + i915_gem_clflush_object(obj); 272 + i915_gem_dump_object(obj, args->offset + args->size, __func__, ~0); 273 + i915_gem_clflush_object(obj); 274 + #endif 275 + 276 + fail: 277 + i915_gem_object_unpin(obj); 278 + mutex_unlock(&dev->struct_mutex); 279 + 280 + return ret; 281 + } 282 + 283 + static int 284 + i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj, 285 + struct drm_i915_gem_pwrite *args, 286 + struct drm_file *file_priv) 287 + { 288 + int ret; 289 + loff_t offset; 290 + ssize_t written; 291 + 292 + mutex_lock(&dev->struct_mutex); 293 + 294 + ret = i915_gem_set_domain(obj, file_priv, 295 + I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU); 296 + if (ret) { 297 + mutex_unlock(&dev->struct_mutex); 298 + return ret; 299 + } 300 + 301 + offset = args->offset; 302 + 303 + written = vfs_write(obj->filp, 304 + (char __user *)(uintptr_t) args->data_ptr, 305 + args->size, &offset); 306 + if (written != args->size) { 307 + mutex_unlock(&dev->struct_mutex); 308 + if (written < 0) 309 + return written; 310 + else 311 + return -EINVAL; 312 + } 313 + 314 + mutex_unlock(&dev->struct_mutex); 315 + 316 + return 0; 317 + } 318 + 319 + /** 320 + * Writes data to the object referenced by handle. 321 + * 322 + * On error, the contents of the buffer that were to be modified are undefined. 323 + */ 324 + int 325 + i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 326 + struct drm_file *file_priv) 327 + { 328 + struct drm_i915_gem_pwrite *args = data; 329 + struct drm_gem_object *obj; 330 + struct drm_i915_gem_object *obj_priv; 331 + int ret = 0; 332 + 333 + obj = drm_gem_object_lookup(dev, file_priv, args->handle); 334 + if (obj == NULL) 335 + return -EBADF; 336 + obj_priv = obj->driver_private; 337 + 338 + /* Bounds check destination. 339 + * 340 + * XXX: This could use review for overflow issues... 341 + */ 342 + if (args->offset > obj->size || args->size > obj->size || 343 + args->offset + args->size > obj->size) { 344 + drm_gem_object_unreference(obj); 345 + return -EINVAL; 346 + } 347 + 348 + /* We can only do the GTT pwrite on untiled buffers, as otherwise 349 + * it would end up going through the fenced access, and we'll get 350 + * different detiling behavior between reading and writing. 351 + * pread/pwrite currently are reading and writing from the CPU 352 + * perspective, requiring manual detiling by the client. 353 + */ 354 + if (obj_priv->tiling_mode == I915_TILING_NONE && 355 + dev->gtt_total != 0) 356 + ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv); 357 + else 358 + ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv); 359 + 360 + #if WATCH_PWRITE 361 + if (ret) 362 + DRM_INFO("pwrite failed %d\n", ret); 363 + #endif 364 + 365 + drm_gem_object_unreference(obj); 366 + 367 + return ret; 368 + } 369 + 370 + /** 371 + * Called when user space prepares to use an object 372 + */ 373 + int 374 + i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 375 + struct drm_file *file_priv) 376 + { 377 + struct drm_i915_gem_set_domain *args = data; 378 + struct drm_gem_object *obj; 379 + int ret; 380 + 381 + if (!(dev->driver->driver_features & DRIVER_GEM)) 382 + return -ENODEV; 383 + 384 + obj = drm_gem_object_lookup(dev, file_priv, args->handle); 385 + if (obj == NULL) 386 + return -EBADF; 387 + 388 + mutex_lock(&dev->struct_mutex); 389 + #if WATCH_BUF 390 + DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n", 391 + obj, obj->size, args->read_domains, args->write_domain); 392 + #endif 393 + ret = i915_gem_set_domain(obj, file_priv, 394 + args->read_domains, args->write_domain); 395 + drm_gem_object_unreference(obj); 396 + mutex_unlock(&dev->struct_mutex); 397 + return ret; 398 + } 399 + 400 + /** 401 + * Called when user space has done writes to this buffer 402 + */ 403 + int 404 + i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 405 + struct drm_file *file_priv) 406 + { 407 + struct drm_i915_gem_sw_finish *args = data; 408 + struct drm_gem_object *obj; 409 + struct drm_i915_gem_object *obj_priv; 410 + int ret = 0; 411 + 412 + if (!(dev->driver->driver_features & DRIVER_GEM)) 413 + return -ENODEV; 414 + 415 + mutex_lock(&dev->struct_mutex); 416 + obj = drm_gem_object_lookup(dev, file_priv, args->handle); 417 + if (obj == NULL) { 418 + mutex_unlock(&dev->struct_mutex); 419 + return -EBADF; 420 + } 421 + 422 + #if WATCH_BUF 423 + DRM_INFO("%s: sw_finish %d (%p %d)\n", 424 + __func__, args->handle, obj, obj->size); 425 + #endif 426 + obj_priv = obj->driver_private; 427 + 428 + /* Pinned buffers may be scanout, so flush the cache */ 429 + if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) { 430 + i915_gem_clflush_object(obj); 431 + drm_agp_chipset_flush(dev); 432 + } 433 + drm_gem_object_unreference(obj); 434 + mutex_unlock(&dev->struct_mutex); 435 + return ret; 436 + } 437 + 438 + /** 439 + * Maps the contents of an object, returning the address it is mapped 440 + * into. 441 + * 442 + * While the mapping holds a reference on the contents of the object, it doesn't 443 + * imply a ref on the object itself. 444 + */ 445 + int 446 + i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 447 + struct drm_file *file_priv) 448 + { 449 + struct drm_i915_gem_mmap *args = data; 450 + struct drm_gem_object *obj; 451 + loff_t offset; 452 + unsigned long addr; 453 + 454 + if (!(dev->driver->driver_features & DRIVER_GEM)) 455 + return -ENODEV; 456 + 457 + obj = drm_gem_object_lookup(dev, file_priv, args->handle); 458 + if (obj == NULL) 459 + return -EBADF; 460 + 461 + offset = args->offset; 462 + 463 + down_write(&current->mm->mmap_sem); 464 + addr = do_mmap(obj->filp, 0, args->size, 465 + PROT_READ | PROT_WRITE, MAP_SHARED, 466 + args->offset); 467 + up_write(&current->mm->mmap_sem); 468 + mutex_lock(&dev->struct_mutex); 469 + drm_gem_object_unreference(obj); 470 + mutex_unlock(&dev->struct_mutex); 471 + if (IS_ERR((void *)addr)) 472 + return addr; 473 + 474 + args->addr_ptr = (uint64_t) addr; 475 + 476 + return 0; 477 + } 478 + 479 + static void 480 + i915_gem_object_free_page_list(struct drm_gem_object *obj) 481 + { 482 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 483 + int page_count = obj->size / PAGE_SIZE; 484 + int i; 485 + 486 + if (obj_priv->page_list == NULL) 487 + return; 488 + 489 + 490 + for (i = 0; i < page_count; i++) 491 + if (obj_priv->page_list[i] != NULL) { 492 + if (obj_priv->dirty) 493 + set_page_dirty(obj_priv->page_list[i]); 494 + mark_page_accessed(obj_priv->page_list[i]); 495 + page_cache_release(obj_priv->page_list[i]); 496 + } 497 + obj_priv->dirty = 0; 498 + 499 + drm_free(obj_priv->page_list, 500 + page_count * sizeof(struct page *), 501 + DRM_MEM_DRIVER); 502 + obj_priv->page_list = NULL; 503 + } 504 + 505 + static void 506 + i915_gem_object_move_to_active(struct drm_gem_object *obj) 507 + { 508 + struct drm_device *dev = obj->dev; 509 + drm_i915_private_t *dev_priv = dev->dev_private; 510 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 511 + 512 + /* Add a reference if we're newly entering the active list. */ 513 + if (!obj_priv->active) { 514 + drm_gem_object_reference(obj); 515 + obj_priv->active = 1; 516 + } 517 + /* Move from whatever list we were on to the tail of execution. */ 518 + list_move_tail(&obj_priv->list, 519 + &dev_priv->mm.active_list); 520 + } 521 + 522 + 523 + static void 524 + i915_gem_object_move_to_inactive(struct drm_gem_object *obj) 525 + { 526 + struct drm_device *dev = obj->dev; 527 + drm_i915_private_t *dev_priv = dev->dev_private; 528 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 529 + 530 + i915_verify_inactive(dev, __FILE__, __LINE__); 531 + if (obj_priv->pin_count != 0) 532 + list_del_init(&obj_priv->list); 533 + else 534 + list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); 535 + 536 + if (obj_priv->active) { 537 + obj_priv->active = 0; 538 + drm_gem_object_unreference(obj); 539 + } 540 + i915_verify_inactive(dev, __FILE__, __LINE__); 541 + } 542 + 543 + /** 544 + * Creates a new sequence number, emitting a write of it to the status page 545 + * plus an interrupt, which will trigger i915_user_interrupt_handler. 546 + * 547 + * Must be called with struct_lock held. 548 + * 549 + * Returned sequence numbers are nonzero on success. 550 + */ 551 + static uint32_t 552 + i915_add_request(struct drm_device *dev, uint32_t flush_domains) 553 + { 554 + drm_i915_private_t *dev_priv = dev->dev_private; 555 + struct drm_i915_gem_request *request; 556 + uint32_t seqno; 557 + int was_empty; 558 + RING_LOCALS; 559 + 560 + request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER); 561 + if (request == NULL) 562 + return 0; 563 + 564 + /* Grab the seqno we're going to make this request be, and bump the 565 + * next (skipping 0 so it can be the reserved no-seqno value). 566 + */ 567 + seqno = dev_priv->mm.next_gem_seqno; 568 + dev_priv->mm.next_gem_seqno++; 569 + if (dev_priv->mm.next_gem_seqno == 0) 570 + dev_priv->mm.next_gem_seqno++; 571 + 572 + BEGIN_LP_RING(4); 573 + OUT_RING(MI_STORE_DWORD_INDEX); 574 + OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 575 + OUT_RING(seqno); 576 + 577 + OUT_RING(MI_USER_INTERRUPT); 578 + ADVANCE_LP_RING(); 579 + 580 + DRM_DEBUG("%d\n", seqno); 581 + 582 + request->seqno = seqno; 583 + request->emitted_jiffies = jiffies; 584 + request->flush_domains = flush_domains; 585 + was_empty = list_empty(&dev_priv->mm.request_list); 586 + list_add_tail(&request->list, &dev_priv->mm.request_list); 587 + 588 + if (was_empty && !dev_priv->mm.suspended) 589 + schedule_delayed_work(&dev_priv->mm.retire_work, HZ); 590 + return seqno; 591 + } 592 + 593 + /** 594 + * Command execution barrier 595 + * 596 + * Ensures that all commands in the ring are finished 597 + * before signalling the CPU 598 + */ 599 + static uint32_t 600 + i915_retire_commands(struct drm_device *dev) 601 + { 602 + drm_i915_private_t *dev_priv = dev->dev_private; 603 + uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; 604 + uint32_t flush_domains = 0; 605 + RING_LOCALS; 606 + 607 + /* The sampler always gets flushed on i965 (sigh) */ 608 + if (IS_I965G(dev)) 609 + flush_domains |= I915_GEM_DOMAIN_SAMPLER; 610 + BEGIN_LP_RING(2); 611 + OUT_RING(cmd); 612 + OUT_RING(0); /* noop */ 613 + ADVANCE_LP_RING(); 614 + return flush_domains; 615 + } 616 + 617 + /** 618 + * Moves buffers associated only with the given active seqno from the active 619 + * to inactive list, potentially freeing them. 620 + */ 621 + static void 622 + i915_gem_retire_request(struct drm_device *dev, 623 + struct drm_i915_gem_request *request) 624 + { 625 + drm_i915_private_t *dev_priv = dev->dev_private; 626 + 627 + /* Move any buffers on the active list that are no longer referenced 628 + * by the ringbuffer to the flushing/inactive lists as appropriate. 629 + */ 630 + while (!list_empty(&dev_priv->mm.active_list)) { 631 + struct drm_gem_object *obj; 632 + struct drm_i915_gem_object *obj_priv; 633 + 634 + obj_priv = list_first_entry(&dev_priv->mm.active_list, 635 + struct drm_i915_gem_object, 636 + list); 637 + obj = obj_priv->obj; 638 + 639 + /* If the seqno being retired doesn't match the oldest in the 640 + * list, then the oldest in the list must still be newer than 641 + * this seqno. 642 + */ 643 + if (obj_priv->last_rendering_seqno != request->seqno) 644 + return; 645 + #if WATCH_LRU 646 + DRM_INFO("%s: retire %d moves to inactive list %p\n", 647 + __func__, request->seqno, obj); 648 + #endif 649 + 650 + if (obj->write_domain != 0) { 651 + list_move_tail(&obj_priv->list, 652 + &dev_priv->mm.flushing_list); 653 + } else { 654 + i915_gem_object_move_to_inactive(obj); 655 + } 656 + } 657 + 658 + if (request->flush_domains != 0) { 659 + struct drm_i915_gem_object *obj_priv, *next; 660 + 661 + /* Clear the write domain and activity from any buffers 662 + * that are just waiting for a flush matching the one retired. 663 + */ 664 + list_for_each_entry_safe(obj_priv, next, 665 + &dev_priv->mm.flushing_list, list) { 666 + struct drm_gem_object *obj = obj_priv->obj; 667 + 668 + if (obj->write_domain & request->flush_domains) { 669 + obj->write_domain = 0; 670 + i915_gem_object_move_to_inactive(obj); 671 + } 672 + } 673 + 674 + } 675 + } 676 + 677 + /** 678 + * Returns true if seq1 is later than seq2. 679 + */ 680 + static int 681 + i915_seqno_passed(uint32_t seq1, uint32_t seq2) 682 + { 683 + return (int32_t)(seq1 - seq2) >= 0; 684 + } 685 + 686 + uint32_t 687 + i915_get_gem_seqno(struct drm_device *dev) 688 + { 689 + drm_i915_private_t *dev_priv = dev->dev_private; 690 + 691 + return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX); 692 + } 693 + 694 + /** 695 + * This function clears the request list as sequence numbers are passed. 696 + */ 697 + void 698 + i915_gem_retire_requests(struct drm_device *dev) 699 + { 700 + drm_i915_private_t *dev_priv = dev->dev_private; 701 + uint32_t seqno; 702 + 703 + seqno = i915_get_gem_seqno(dev); 704 + 705 + while (!list_empty(&dev_priv->mm.request_list)) { 706 + struct drm_i915_gem_request *request; 707 + uint32_t retiring_seqno; 708 + 709 + request = list_first_entry(&dev_priv->mm.request_list, 710 + struct drm_i915_gem_request, 711 + list); 712 + retiring_seqno = request->seqno; 713 + 714 + if (i915_seqno_passed(seqno, retiring_seqno) || 715 + dev_priv->mm.wedged) { 716 + i915_gem_retire_request(dev, request); 717 + 718 + list_del(&request->list); 719 + drm_free(request, sizeof(*request), DRM_MEM_DRIVER); 720 + } else 721 + break; 722 + } 723 + } 724 + 725 + void 726 + i915_gem_retire_work_handler(struct work_struct *work) 727 + { 728 + drm_i915_private_t *dev_priv; 729 + struct drm_device *dev; 730 + 731 + dev_priv = container_of(work, drm_i915_private_t, 732 + mm.retire_work.work); 733 + dev = dev_priv->dev; 734 + 735 + mutex_lock(&dev->struct_mutex); 736 + i915_gem_retire_requests(dev); 737 + if (!dev_priv->mm.suspended && 738 + !list_empty(&dev_priv->mm.request_list)) 739 + schedule_delayed_work(&dev_priv->mm.retire_work, HZ); 740 + mutex_unlock(&dev->struct_mutex); 741 + } 742 + 743 + /** 744 + * Waits for a sequence number to be signaled, and cleans up the 745 + * request and object lists appropriately for that event. 746 + */ 747 + static int 748 + i915_wait_request(struct drm_device *dev, uint32_t seqno) 749 + { 750 + drm_i915_private_t *dev_priv = dev->dev_private; 751 + int ret = 0; 752 + 753 + BUG_ON(seqno == 0); 754 + 755 + if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { 756 + dev_priv->mm.waiting_gem_seqno = seqno; 757 + i915_user_irq_get(dev); 758 + ret = wait_event_interruptible(dev_priv->irq_queue, 759 + i915_seqno_passed(i915_get_gem_seqno(dev), 760 + seqno) || 761 + dev_priv->mm.wedged); 762 + i915_user_irq_put(dev); 763 + dev_priv->mm.waiting_gem_seqno = 0; 764 + } 765 + if (dev_priv->mm.wedged) 766 + ret = -EIO; 767 + 768 + if (ret && ret != -ERESTARTSYS) 769 + DRM_ERROR("%s returns %d (awaiting %d at %d)\n", 770 + __func__, ret, seqno, i915_get_gem_seqno(dev)); 771 + 772 + /* Directly dispatch request retiring. While we have the work queue 773 + * to handle this, the waiter on a request often wants an associated 774 + * buffer to have made it to the inactive list, and we would need 775 + * a separate wait queue to handle that. 776 + */ 777 + if (ret == 0) 778 + i915_gem_retire_requests(dev); 779 + 780 + return ret; 781 + } 782 + 783 + static void 784 + i915_gem_flush(struct drm_device *dev, 785 + uint32_t invalidate_domains, 786 + uint32_t flush_domains) 787 + { 788 + drm_i915_private_t *dev_priv = dev->dev_private; 789 + uint32_t cmd; 790 + RING_LOCALS; 791 + 792 + #if WATCH_EXEC 793 + DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, 794 + invalidate_domains, flush_domains); 795 + #endif 796 + 797 + if (flush_domains & I915_GEM_DOMAIN_CPU) 798 + drm_agp_chipset_flush(dev); 799 + 800 + if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU | 801 + I915_GEM_DOMAIN_GTT)) { 802 + /* 803 + * read/write caches: 804 + * 805 + * I915_GEM_DOMAIN_RENDER is always invalidated, but is 806 + * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is 807 + * also flushed at 2d versus 3d pipeline switches. 808 + * 809 + * read-only caches: 810 + * 811 + * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if 812 + * MI_READ_FLUSH is set, and is always flushed on 965. 813 + * 814 + * I915_GEM_DOMAIN_COMMAND may not exist? 815 + * 816 + * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is 817 + * invalidated when MI_EXE_FLUSH is set. 818 + * 819 + * I915_GEM_DOMAIN_VERTEX, which exists on 965, is 820 + * invalidated with every MI_FLUSH. 821 + * 822 + * TLBs: 823 + * 824 + * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND 825 + * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and 826 + * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER 827 + * are flushed at any MI_FLUSH. 828 + */ 829 + 830 + cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; 831 + if ((invalidate_domains|flush_domains) & 832 + I915_GEM_DOMAIN_RENDER) 833 + cmd &= ~MI_NO_WRITE_FLUSH; 834 + if (!IS_I965G(dev)) { 835 + /* 836 + * On the 965, the sampler cache always gets flushed 837 + * and this bit is reserved. 838 + */ 839 + if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) 840 + cmd |= MI_READ_FLUSH; 841 + } 842 + if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) 843 + cmd |= MI_EXE_FLUSH; 844 + 845 + #if WATCH_EXEC 846 + DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); 847 + #endif 848 + BEGIN_LP_RING(2); 849 + OUT_RING(cmd); 850 + OUT_RING(0); /* noop */ 851 + ADVANCE_LP_RING(); 852 + } 853 + } 854 + 855 + /** 856 + * Ensures that all rendering to the object has completed and the object is 857 + * safe to unbind from the GTT or access from the CPU. 858 + */ 859 + static int 860 + i915_gem_object_wait_rendering(struct drm_gem_object *obj) 861 + { 862 + struct drm_device *dev = obj->dev; 863 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 864 + int ret; 865 + 866 + /* If there are writes queued to the buffer, flush and 867 + * create a new seqno to wait for. 868 + */ 869 + if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) { 870 + uint32_t write_domain = obj->write_domain; 871 + #if WATCH_BUF 872 + DRM_INFO("%s: flushing object %p from write domain %08x\n", 873 + __func__, obj, write_domain); 874 + #endif 875 + i915_gem_flush(dev, 0, write_domain); 876 + 877 + i915_gem_object_move_to_active(obj); 878 + obj_priv->last_rendering_seqno = i915_add_request(dev, 879 + write_domain); 880 + BUG_ON(obj_priv->last_rendering_seqno == 0); 881 + #if WATCH_LRU 882 + DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj); 883 + #endif 884 + } 885 + 886 + /* If there is rendering queued on the buffer being evicted, wait for 887 + * it. 888 + */ 889 + if (obj_priv->active) { 890 + #if WATCH_BUF 891 + DRM_INFO("%s: object %p wait for seqno %08x\n", 892 + __func__, obj, obj_priv->last_rendering_seqno); 893 + #endif 894 + ret = i915_wait_request(dev, obj_priv->last_rendering_seqno); 895 + if (ret != 0) 896 + return ret; 897 + } 898 + 899 + return 0; 900 + } 901 + 902 + /** 903 + * Unbinds an object from the GTT aperture. 904 + */ 905 + static int 906 + i915_gem_object_unbind(struct drm_gem_object *obj) 907 + { 908 + struct drm_device *dev = obj->dev; 909 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 910 + int ret = 0; 911 + 912 + #if WATCH_BUF 913 + DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj); 914 + DRM_INFO("gtt_space %p\n", obj_priv->gtt_space); 915 + #endif 916 + if (obj_priv->gtt_space == NULL) 917 + return 0; 918 + 919 + if (obj_priv->pin_count != 0) { 920 + DRM_ERROR("Attempting to unbind pinned buffer\n"); 921 + return -EINVAL; 922 + } 923 + 924 + /* Wait for any rendering to complete 925 + */ 926 + ret = i915_gem_object_wait_rendering(obj); 927 + if (ret) { 928 + DRM_ERROR("wait_rendering failed: %d\n", ret); 929 + return ret; 930 + } 931 + 932 + /* Move the object to the CPU domain to ensure that 933 + * any possible CPU writes while it's not in the GTT 934 + * are flushed when we go to remap it. This will 935 + * also ensure that all pending GPU writes are finished 936 + * before we unbind. 937 + */ 938 + ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU, 939 + I915_GEM_DOMAIN_CPU); 940 + if (ret) { 941 + DRM_ERROR("set_domain failed: %d\n", ret); 942 + return ret; 943 + } 944 + 945 + if (obj_priv->agp_mem != NULL) { 946 + drm_unbind_agp(obj_priv->agp_mem); 947 + drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); 948 + obj_priv->agp_mem = NULL; 949 + } 950 + 951 + BUG_ON(obj_priv->active); 952 + 953 + i915_gem_object_free_page_list(obj); 954 + 955 + if (obj_priv->gtt_space) { 956 + atomic_dec(&dev->gtt_count); 957 + atomic_sub(obj->size, &dev->gtt_memory); 958 + 959 + drm_mm_put_block(obj_priv->gtt_space); 960 + obj_priv->gtt_space = NULL; 961 + } 962 + 963 + /* Remove ourselves from the LRU list if present. */ 964 + if (!list_empty(&obj_priv->list)) 965 + list_del_init(&obj_priv->list); 966 + 967 + return 0; 968 + } 969 + 970 + static int 971 + i915_gem_evict_something(struct drm_device *dev) 972 + { 973 + drm_i915_private_t *dev_priv = dev->dev_private; 974 + struct drm_gem_object *obj; 975 + struct drm_i915_gem_object *obj_priv; 976 + int ret = 0; 977 + 978 + for (;;) { 979 + /* If there's an inactive buffer available now, grab it 980 + * and be done. 981 + */ 982 + if (!list_empty(&dev_priv->mm.inactive_list)) { 983 + obj_priv = list_first_entry(&dev_priv->mm.inactive_list, 984 + struct drm_i915_gem_object, 985 + list); 986 + obj = obj_priv->obj; 987 + BUG_ON(obj_priv->pin_count != 0); 988 + #if WATCH_LRU 989 + DRM_INFO("%s: evicting %p\n", __func__, obj); 990 + #endif 991 + BUG_ON(obj_priv->active); 992 + 993 + /* Wait on the rendering and unbind the buffer. */ 994 + ret = i915_gem_object_unbind(obj); 995 + break; 996 + } 997 + 998 + /* If we didn't get anything, but the ring is still processing 999 + * things, wait for one of those things to finish and hopefully 1000 + * leave us a buffer to evict. 1001 + */ 1002 + if (!list_empty(&dev_priv->mm.request_list)) { 1003 + struct drm_i915_gem_request *request; 1004 + 1005 + request = list_first_entry(&dev_priv->mm.request_list, 1006 + struct drm_i915_gem_request, 1007 + list); 1008 + 1009 + ret = i915_wait_request(dev, request->seqno); 1010 + if (ret) 1011 + break; 1012 + 1013 + /* if waiting caused an object to become inactive, 1014 + * then loop around and wait for it. Otherwise, we 1015 + * assume that waiting freed and unbound something, 1016 + * so there should now be some space in the GTT 1017 + */ 1018 + if (!list_empty(&dev_priv->mm.inactive_list)) 1019 + continue; 1020 + break; 1021 + } 1022 + 1023 + /* If we didn't have anything on the request list but there 1024 + * are buffers awaiting a flush, emit one and try again. 1025 + * When we wait on it, those buffers waiting for that flush 1026 + * will get moved to inactive. 1027 + */ 1028 + if (!list_empty(&dev_priv->mm.flushing_list)) { 1029 + obj_priv = list_first_entry(&dev_priv->mm.flushing_list, 1030 + struct drm_i915_gem_object, 1031 + list); 1032 + obj = obj_priv->obj; 1033 + 1034 + i915_gem_flush(dev, 1035 + obj->write_domain, 1036 + obj->write_domain); 1037 + i915_add_request(dev, obj->write_domain); 1038 + 1039 + obj = NULL; 1040 + continue; 1041 + } 1042 + 1043 + DRM_ERROR("inactive empty %d request empty %d " 1044 + "flushing empty %d\n", 1045 + list_empty(&dev_priv->mm.inactive_list), 1046 + list_empty(&dev_priv->mm.request_list), 1047 + list_empty(&dev_priv->mm.flushing_list)); 1048 + /* If we didn't do any of the above, there's nothing to be done 1049 + * and we just can't fit it in. 1050 + */ 1051 + return -ENOMEM; 1052 + } 1053 + return ret; 1054 + } 1055 + 1056 + static int 1057 + i915_gem_object_get_page_list(struct drm_gem_object *obj) 1058 + { 1059 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 1060 + int page_count, i; 1061 + struct address_space *mapping; 1062 + struct inode *inode; 1063 + struct page *page; 1064 + int ret; 1065 + 1066 + if (obj_priv->page_list) 1067 + return 0; 1068 + 1069 + /* Get the list of pages out of our struct file. They'll be pinned 1070 + * at this point until we release them. 1071 + */ 1072 + page_count = obj->size / PAGE_SIZE; 1073 + BUG_ON(obj_priv->page_list != NULL); 1074 + obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *), 1075 + DRM_MEM_DRIVER); 1076 + if (obj_priv->page_list == NULL) { 1077 + DRM_ERROR("Faled to allocate page list\n"); 1078 + return -ENOMEM; 1079 + } 1080 + 1081 + inode = obj->filp->f_path.dentry->d_inode; 1082 + mapping = inode->i_mapping; 1083 + for (i = 0; i < page_count; i++) { 1084 + page = read_mapping_page(mapping, i, NULL); 1085 + if (IS_ERR(page)) { 1086 + ret = PTR_ERR(page); 1087 + DRM_ERROR("read_mapping_page failed: %d\n", ret); 1088 + i915_gem_object_free_page_list(obj); 1089 + return ret; 1090 + } 1091 + obj_priv->page_list[i] = page; 1092 + } 1093 + return 0; 1094 + } 1095 + 1096 + /** 1097 + * Finds free space in the GTT aperture and binds the object there. 1098 + */ 1099 + static int 1100 + i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) 1101 + { 1102 + struct drm_device *dev = obj->dev; 1103 + drm_i915_private_t *dev_priv = dev->dev_private; 1104 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 1105 + struct drm_mm_node *free_space; 1106 + int page_count, ret; 1107 + 1108 + if (alignment == 0) 1109 + alignment = PAGE_SIZE; 1110 + if (alignment & (PAGE_SIZE - 1)) { 1111 + DRM_ERROR("Invalid object alignment requested %u\n", alignment); 1112 + return -EINVAL; 1113 + } 1114 + 1115 + search_free: 1116 + free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, 1117 + obj->size, alignment, 0); 1118 + if (free_space != NULL) { 1119 + obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size, 1120 + alignment); 1121 + if (obj_priv->gtt_space != NULL) { 1122 + obj_priv->gtt_space->private = obj; 1123 + obj_priv->gtt_offset = obj_priv->gtt_space->start; 1124 + } 1125 + } 1126 + if (obj_priv->gtt_space == NULL) { 1127 + /* If the gtt is empty and we're still having trouble 1128 + * fitting our object in, we're out of memory. 1129 + */ 1130 + #if WATCH_LRU 1131 + DRM_INFO("%s: GTT full, evicting something\n", __func__); 1132 + #endif 1133 + if (list_empty(&dev_priv->mm.inactive_list) && 1134 + list_empty(&dev_priv->mm.flushing_list) && 1135 + list_empty(&dev_priv->mm.active_list)) { 1136 + DRM_ERROR("GTT full, but LRU list empty\n"); 1137 + return -ENOMEM; 1138 + } 1139 + 1140 + ret = i915_gem_evict_something(dev); 1141 + if (ret != 0) { 1142 + DRM_ERROR("Failed to evict a buffer %d\n", ret); 1143 + return ret; 1144 + } 1145 + goto search_free; 1146 + } 1147 + 1148 + #if WATCH_BUF 1149 + DRM_INFO("Binding object of size %d at 0x%08x\n", 1150 + obj->size, obj_priv->gtt_offset); 1151 + #endif 1152 + ret = i915_gem_object_get_page_list(obj); 1153 + if (ret) { 1154 + drm_mm_put_block(obj_priv->gtt_space); 1155 + obj_priv->gtt_space = NULL; 1156 + return ret; 1157 + } 1158 + 1159 + page_count = obj->size / PAGE_SIZE; 1160 + /* Create an AGP memory structure pointing at our pages, and bind it 1161 + * into the GTT. 1162 + */ 1163 + obj_priv->agp_mem = drm_agp_bind_pages(dev, 1164 + obj_priv->page_list, 1165 + page_count, 1166 + obj_priv->gtt_offset, 1167 + obj_priv->agp_type); 1168 + if (obj_priv->agp_mem == NULL) { 1169 + i915_gem_object_free_page_list(obj); 1170 + drm_mm_put_block(obj_priv->gtt_space); 1171 + obj_priv->gtt_space = NULL; 1172 + return -ENOMEM; 1173 + } 1174 + atomic_inc(&dev->gtt_count); 1175 + atomic_add(obj->size, &dev->gtt_memory); 1176 + 1177 + /* Assert that the object is not currently in any GPU domain. As it 1178 + * wasn't in the GTT, there shouldn't be any way it could have been in 1179 + * a GPU cache 1180 + */ 1181 + BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); 1182 + BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); 1183 + 1184 + return 0; 1185 + } 1186 + 1187 + void 1188 + i915_gem_clflush_object(struct drm_gem_object *obj) 1189 + { 1190 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 1191 + 1192 + /* If we don't have a page list set up, then we're not pinned 1193 + * to GPU, and we can ignore the cache flush because it'll happen 1194 + * again at bind time. 1195 + */ 1196 + if (obj_priv->page_list == NULL) 1197 + return; 1198 + 1199 + drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE); 1200 + } 1201 + 1202 + /* 1203 + * Set the next domain for the specified object. This 1204 + * may not actually perform the necessary flushing/invaliding though, 1205 + * as that may want to be batched with other set_domain operations 1206 + * 1207 + * This is (we hope) the only really tricky part of gem. The goal 1208 + * is fairly simple -- track which caches hold bits of the object 1209 + * and make sure they remain coherent. A few concrete examples may 1210 + * help to explain how it works. For shorthand, we use the notation 1211 + * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the 1212 + * a pair of read and write domain masks. 1213 + * 1214 + * Case 1: the batch buffer 1215 + * 1216 + * 1. Allocated 1217 + * 2. Written by CPU 1218 + * 3. Mapped to GTT 1219 + * 4. Read by GPU 1220 + * 5. Unmapped from GTT 1221 + * 6. Freed 1222 + * 1223 + * Let's take these a step at a time 1224 + * 1225 + * 1. Allocated 1226 + * Pages allocated from the kernel may still have 1227 + * cache contents, so we set them to (CPU, CPU) always. 1228 + * 2. Written by CPU (using pwrite) 1229 + * The pwrite function calls set_domain (CPU, CPU) and 1230 + * this function does nothing (as nothing changes) 1231 + * 3. Mapped by GTT 1232 + * This function asserts that the object is not 1233 + * currently in any GPU-based read or write domains 1234 + * 4. Read by GPU 1235 + * i915_gem_execbuffer calls set_domain (COMMAND, 0). 1236 + * As write_domain is zero, this function adds in the 1237 + * current read domains (CPU+COMMAND, 0). 1238 + * flush_domains is set to CPU. 1239 + * invalidate_domains is set to COMMAND 1240 + * clflush is run to get data out of the CPU caches 1241 + * then i915_dev_set_domain calls i915_gem_flush to 1242 + * emit an MI_FLUSH and drm_agp_chipset_flush 1243 + * 5. Unmapped from GTT 1244 + * i915_gem_object_unbind calls set_domain (CPU, CPU) 1245 + * flush_domains and invalidate_domains end up both zero 1246 + * so no flushing/invalidating happens 1247 + * 6. Freed 1248 + * yay, done 1249 + * 1250 + * Case 2: The shared render buffer 1251 + * 1252 + * 1. Allocated 1253 + * 2. Mapped to GTT 1254 + * 3. Read/written by GPU 1255 + * 4. set_domain to (CPU,CPU) 1256 + * 5. Read/written by CPU 1257 + * 6. Read/written by GPU 1258 + * 1259 + * 1. Allocated 1260 + * Same as last example, (CPU, CPU) 1261 + * 2. Mapped to GTT 1262 + * Nothing changes (assertions find that it is not in the GPU) 1263 + * 3. Read/written by GPU 1264 + * execbuffer calls set_domain (RENDER, RENDER) 1265 + * flush_domains gets CPU 1266 + * invalidate_domains gets GPU 1267 + * clflush (obj) 1268 + * MI_FLUSH and drm_agp_chipset_flush 1269 + * 4. set_domain (CPU, CPU) 1270 + * flush_domains gets GPU 1271 + * invalidate_domains gets CPU 1272 + * wait_rendering (obj) to make sure all drawing is complete. 1273 + * This will include an MI_FLUSH to get the data from GPU 1274 + * to memory 1275 + * clflush (obj) to invalidate the CPU cache 1276 + * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?) 1277 + * 5. Read/written by CPU 1278 + * cache lines are loaded and dirtied 1279 + * 6. Read written by GPU 1280 + * Same as last GPU access 1281 + * 1282 + * Case 3: The constant buffer 1283 + * 1284 + * 1. Allocated 1285 + * 2. Written by CPU 1286 + * 3. Read by GPU 1287 + * 4. Updated (written) by CPU again 1288 + * 5. Read by GPU 1289 + * 1290 + * 1. Allocated 1291 + * (CPU, CPU) 1292 + * 2. Written by CPU 1293 + * (CPU, CPU) 1294 + * 3. Read by GPU 1295 + * (CPU+RENDER, 0) 1296 + * flush_domains = CPU 1297 + * invalidate_domains = RENDER 1298 + * clflush (obj) 1299 + * MI_FLUSH 1300 + * drm_agp_chipset_flush 1301 + * 4. Updated (written) by CPU again 1302 + * (CPU, CPU) 1303 + * flush_domains = 0 (no previous write domain) 1304 + * invalidate_domains = 0 (no new read domains) 1305 + * 5. Read by GPU 1306 + * (CPU+RENDER, 0) 1307 + * flush_domains = CPU 1308 + * invalidate_domains = RENDER 1309 + * clflush (obj) 1310 + * MI_FLUSH 1311 + * drm_agp_chipset_flush 1312 + */ 1313 + static int 1314 + i915_gem_object_set_domain(struct drm_gem_object *obj, 1315 + uint32_t read_domains, 1316 + uint32_t write_domain) 1317 + { 1318 + struct drm_device *dev = obj->dev; 1319 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 1320 + uint32_t invalidate_domains = 0; 1321 + uint32_t flush_domains = 0; 1322 + int ret; 1323 + 1324 + #if WATCH_BUF 1325 + DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", 1326 + __func__, obj, 1327 + obj->read_domains, read_domains, 1328 + obj->write_domain, write_domain); 1329 + #endif 1330 + /* 1331 + * If the object isn't moving to a new write domain, 1332 + * let the object stay in multiple read domains 1333 + */ 1334 + if (write_domain == 0) 1335 + read_domains |= obj->read_domains; 1336 + else 1337 + obj_priv->dirty = 1; 1338 + 1339 + /* 1340 + * Flush the current write domain if 1341 + * the new read domains don't match. Invalidate 1342 + * any read domains which differ from the old 1343 + * write domain 1344 + */ 1345 + if (obj->write_domain && obj->write_domain != read_domains) { 1346 + flush_domains |= obj->write_domain; 1347 + invalidate_domains |= read_domains & ~obj->write_domain; 1348 + } 1349 + /* 1350 + * Invalidate any read caches which may have 1351 + * stale data. That is, any new read domains. 1352 + */ 1353 + invalidate_domains |= read_domains & ~obj->read_domains; 1354 + if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) { 1355 + #if WATCH_BUF 1356 + DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", 1357 + __func__, flush_domains, invalidate_domains); 1358 + #endif 1359 + /* 1360 + * If we're invaliding the CPU cache and flushing a GPU cache, 1361 + * then pause for rendering so that the GPU caches will be 1362 + * flushed before the cpu cache is invalidated 1363 + */ 1364 + if ((invalidate_domains & I915_GEM_DOMAIN_CPU) && 1365 + (flush_domains & ~(I915_GEM_DOMAIN_CPU | 1366 + I915_GEM_DOMAIN_GTT))) { 1367 + ret = i915_gem_object_wait_rendering(obj); 1368 + if (ret) 1369 + return ret; 1370 + } 1371 + i915_gem_clflush_object(obj); 1372 + } 1373 + 1374 + if ((write_domain | flush_domains) != 0) 1375 + obj->write_domain = write_domain; 1376 + 1377 + /* If we're invalidating the CPU domain, clear the per-page CPU 1378 + * domain list as well. 1379 + */ 1380 + if (obj_priv->page_cpu_valid != NULL && 1381 + (write_domain != 0 || 1382 + read_domains & I915_GEM_DOMAIN_CPU)) { 1383 + drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE, 1384 + DRM_MEM_DRIVER); 1385 + obj_priv->page_cpu_valid = NULL; 1386 + } 1387 + obj->read_domains = read_domains; 1388 + 1389 + dev->invalidate_domains |= invalidate_domains; 1390 + dev->flush_domains |= flush_domains; 1391 + #if WATCH_BUF 1392 + DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n", 1393 + __func__, 1394 + obj->read_domains, obj->write_domain, 1395 + dev->invalidate_domains, dev->flush_domains); 1396 + #endif 1397 + return 0; 1398 + } 1399 + 1400 + /** 1401 + * Set the read/write domain on a range of the object. 1402 + * 1403 + * Currently only implemented for CPU reads, otherwise drops to normal 1404 + * i915_gem_object_set_domain(). 1405 + */ 1406 + static int 1407 + i915_gem_object_set_domain_range(struct drm_gem_object *obj, 1408 + uint64_t offset, 1409 + uint64_t size, 1410 + uint32_t read_domains, 1411 + uint32_t write_domain) 1412 + { 1413 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 1414 + int ret, i; 1415 + 1416 + if (obj->read_domains & I915_GEM_DOMAIN_CPU) 1417 + return 0; 1418 + 1419 + if (read_domains != I915_GEM_DOMAIN_CPU || 1420 + write_domain != 0) 1421 + return i915_gem_object_set_domain(obj, 1422 + read_domains, write_domain); 1423 + 1424 + /* Wait on any GPU rendering to the object to be flushed. */ 1425 + if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) { 1426 + ret = i915_gem_object_wait_rendering(obj); 1427 + if (ret) 1428 + return ret; 1429 + } 1430 + 1431 + if (obj_priv->page_cpu_valid == NULL) { 1432 + obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE, 1433 + DRM_MEM_DRIVER); 1434 + } 1435 + 1436 + /* Flush the cache on any pages that are still invalid from the CPU's 1437 + * perspective. 1438 + */ 1439 + for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) { 1440 + if (obj_priv->page_cpu_valid[i]) 1441 + continue; 1442 + 1443 + drm_clflush_pages(obj_priv->page_list + i, 1); 1444 + 1445 + obj_priv->page_cpu_valid[i] = 1; 1446 + } 1447 + 1448 + return 0; 1449 + } 1450 + 1451 + /** 1452 + * Once all of the objects have been set in the proper domain, 1453 + * perform the necessary flush and invalidate operations. 1454 + * 1455 + * Returns the write domains flushed, for use in flush tracking. 1456 + */ 1457 + static uint32_t 1458 + i915_gem_dev_set_domain(struct drm_device *dev) 1459 + { 1460 + uint32_t flush_domains = dev->flush_domains; 1461 + 1462 + /* 1463 + * Now that all the buffers are synced to the proper domains, 1464 + * flush and invalidate the collected domains 1465 + */ 1466 + if (dev->invalidate_domains | dev->flush_domains) { 1467 + #if WATCH_EXEC 1468 + DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", 1469 + __func__, 1470 + dev->invalidate_domains, 1471 + dev->flush_domains); 1472 + #endif 1473 + i915_gem_flush(dev, 1474 + dev->invalidate_domains, 1475 + dev->flush_domains); 1476 + dev->invalidate_domains = 0; 1477 + dev->flush_domains = 0; 1478 + } 1479 + 1480 + return flush_domains; 1481 + } 1482 + 1483 + /** 1484 + * Pin an object to the GTT and evaluate the relocations landing in it. 1485 + */ 1486 + static int 1487 + i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, 1488 + struct drm_file *file_priv, 1489 + struct drm_i915_gem_exec_object *entry) 1490 + { 1491 + struct drm_device *dev = obj->dev; 1492 + struct drm_i915_gem_relocation_entry reloc; 1493 + struct drm_i915_gem_relocation_entry __user *relocs; 1494 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 1495 + int i, ret; 1496 + uint32_t last_reloc_offset = -1; 1497 + void __iomem *reloc_page = NULL; 1498 + 1499 + /* Choose the GTT offset for our buffer and put it there. */ 1500 + ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); 1501 + if (ret) 1502 + return ret; 1503 + 1504 + entry->offset = obj_priv->gtt_offset; 1505 + 1506 + relocs = (struct drm_i915_gem_relocation_entry __user *) 1507 + (uintptr_t) entry->relocs_ptr; 1508 + /* Apply the relocations, using the GTT aperture to avoid cache 1509 + * flushing requirements. 1510 + */ 1511 + for (i = 0; i < entry->relocation_count; i++) { 1512 + struct drm_gem_object *target_obj; 1513 + struct drm_i915_gem_object *target_obj_priv; 1514 + uint32_t reloc_val, reloc_offset; 1515 + uint32_t __iomem *reloc_entry; 1516 + 1517 + ret = copy_from_user(&reloc, relocs + i, sizeof(reloc)); 1518 + if (ret != 0) { 1519 + i915_gem_object_unpin(obj); 1520 + return ret; 1521 + } 1522 + 1523 + target_obj = drm_gem_object_lookup(obj->dev, file_priv, 1524 + reloc.target_handle); 1525 + if (target_obj == NULL) { 1526 + i915_gem_object_unpin(obj); 1527 + return -EBADF; 1528 + } 1529 + target_obj_priv = target_obj->driver_private; 1530 + 1531 + /* The target buffer should have appeared before us in the 1532 + * exec_object list, so it should have a GTT space bound by now. 1533 + */ 1534 + if (target_obj_priv->gtt_space == NULL) { 1535 + DRM_ERROR("No GTT space found for object %d\n", 1536 + reloc.target_handle); 1537 + drm_gem_object_unreference(target_obj); 1538 + i915_gem_object_unpin(obj); 1539 + return -EINVAL; 1540 + } 1541 + 1542 + if (reloc.offset > obj->size - 4) { 1543 + DRM_ERROR("Relocation beyond object bounds: " 1544 + "obj %p target %d offset %d size %d.\n", 1545 + obj, reloc.target_handle, 1546 + (int) reloc.offset, (int) obj->size); 1547 + drm_gem_object_unreference(target_obj); 1548 + i915_gem_object_unpin(obj); 1549 + return -EINVAL; 1550 + } 1551 + if (reloc.offset & 3) { 1552 + DRM_ERROR("Relocation not 4-byte aligned: " 1553 + "obj %p target %d offset %d.\n", 1554 + obj, reloc.target_handle, 1555 + (int) reloc.offset); 1556 + drm_gem_object_unreference(target_obj); 1557 + i915_gem_object_unpin(obj); 1558 + return -EINVAL; 1559 + } 1560 + 1561 + if (reloc.write_domain && target_obj->pending_write_domain && 1562 + reloc.write_domain != target_obj->pending_write_domain) { 1563 + DRM_ERROR("Write domain conflict: " 1564 + "obj %p target %d offset %d " 1565 + "new %08x old %08x\n", 1566 + obj, reloc.target_handle, 1567 + (int) reloc.offset, 1568 + reloc.write_domain, 1569 + target_obj->pending_write_domain); 1570 + drm_gem_object_unreference(target_obj); 1571 + i915_gem_object_unpin(obj); 1572 + return -EINVAL; 1573 + } 1574 + 1575 + #if WATCH_RELOC 1576 + DRM_INFO("%s: obj %p offset %08x target %d " 1577 + "read %08x write %08x gtt %08x " 1578 + "presumed %08x delta %08x\n", 1579 + __func__, 1580 + obj, 1581 + (int) reloc.offset, 1582 + (int) reloc.target_handle, 1583 + (int) reloc.read_domains, 1584 + (int) reloc.write_domain, 1585 + (int) target_obj_priv->gtt_offset, 1586 + (int) reloc.presumed_offset, 1587 + reloc.delta); 1588 + #endif 1589 + 1590 + target_obj->pending_read_domains |= reloc.read_domains; 1591 + target_obj->pending_write_domain |= reloc.write_domain; 1592 + 1593 + /* If the relocation already has the right value in it, no 1594 + * more work needs to be done. 1595 + */ 1596 + if (target_obj_priv->gtt_offset == reloc.presumed_offset) { 1597 + drm_gem_object_unreference(target_obj); 1598 + continue; 1599 + } 1600 + 1601 + /* Now that we're going to actually write some data in, 1602 + * make sure that any rendering using this buffer's contents 1603 + * is completed. 1604 + */ 1605 + i915_gem_object_wait_rendering(obj); 1606 + 1607 + /* As we're writing through the gtt, flush 1608 + * any CPU writes before we write the relocations 1609 + */ 1610 + if (obj->write_domain & I915_GEM_DOMAIN_CPU) { 1611 + i915_gem_clflush_object(obj); 1612 + drm_agp_chipset_flush(dev); 1613 + obj->write_domain = 0; 1614 + } 1615 + 1616 + /* Map the page containing the relocation we're going to 1617 + * perform. 1618 + */ 1619 + reloc_offset = obj_priv->gtt_offset + reloc.offset; 1620 + if (reloc_page == NULL || 1621 + (last_reloc_offset & ~(PAGE_SIZE - 1)) != 1622 + (reloc_offset & ~(PAGE_SIZE - 1))) { 1623 + if (reloc_page != NULL) 1624 + iounmap(reloc_page); 1625 + 1626 + reloc_page = ioremap_wc(dev->agp->base + 1627 + (reloc_offset & 1628 + ~(PAGE_SIZE - 1)), 1629 + PAGE_SIZE); 1630 + last_reloc_offset = reloc_offset; 1631 + if (reloc_page == NULL) { 1632 + drm_gem_object_unreference(target_obj); 1633 + i915_gem_object_unpin(obj); 1634 + return -ENOMEM; 1635 + } 1636 + } 1637 + 1638 + reloc_entry = (uint32_t __iomem *)(reloc_page + 1639 + (reloc_offset & (PAGE_SIZE - 1))); 1640 + reloc_val = target_obj_priv->gtt_offset + reloc.delta; 1641 + 1642 + #if WATCH_BUF 1643 + DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n", 1644 + obj, (unsigned int) reloc.offset, 1645 + readl(reloc_entry), reloc_val); 1646 + #endif 1647 + writel(reloc_val, reloc_entry); 1648 + 1649 + /* Write the updated presumed offset for this entry back out 1650 + * to the user. 1651 + */ 1652 + reloc.presumed_offset = target_obj_priv->gtt_offset; 1653 + ret = copy_to_user(relocs + i, &reloc, sizeof(reloc)); 1654 + if (ret != 0) { 1655 + drm_gem_object_unreference(target_obj); 1656 + i915_gem_object_unpin(obj); 1657 + return ret; 1658 + } 1659 + 1660 + drm_gem_object_unreference(target_obj); 1661 + } 1662 + 1663 + if (reloc_page != NULL) 1664 + iounmap(reloc_page); 1665 + 1666 + #if WATCH_BUF 1667 + if (0) 1668 + i915_gem_dump_object(obj, 128, __func__, ~0); 1669 + #endif 1670 + return 0; 1671 + } 1672 + 1673 + /** Dispatch a batchbuffer to the ring 1674 + */ 1675 + static int 1676 + i915_dispatch_gem_execbuffer(struct drm_device *dev, 1677 + struct drm_i915_gem_execbuffer *exec, 1678 + uint64_t exec_offset) 1679 + { 1680 + drm_i915_private_t *dev_priv = dev->dev_private; 1681 + struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *) 1682 + (uintptr_t) exec->cliprects_ptr; 1683 + int nbox = exec->num_cliprects; 1684 + int i = 0, count; 1685 + uint32_t exec_start, exec_len; 1686 + RING_LOCALS; 1687 + 1688 + exec_start = (uint32_t) exec_offset + exec->batch_start_offset; 1689 + exec_len = (uint32_t) exec->batch_len; 1690 + 1691 + if ((exec_start | exec_len) & 0x7) { 1692 + DRM_ERROR("alignment\n"); 1693 + return -EINVAL; 1694 + } 1695 + 1696 + if (!exec_start) 1697 + return -EINVAL; 1698 + 1699 + count = nbox ? nbox : 1; 1700 + 1701 + for (i = 0; i < count; i++) { 1702 + if (i < nbox) { 1703 + int ret = i915_emit_box(dev, boxes, i, 1704 + exec->DR1, exec->DR4); 1705 + if (ret) 1706 + return ret; 1707 + } 1708 + 1709 + if (IS_I830(dev) || IS_845G(dev)) { 1710 + BEGIN_LP_RING(4); 1711 + OUT_RING(MI_BATCH_BUFFER); 1712 + OUT_RING(exec_start | MI_BATCH_NON_SECURE); 1713 + OUT_RING(exec_start + exec_len - 4); 1714 + OUT_RING(0); 1715 + ADVANCE_LP_RING(); 1716 + } else { 1717 + BEGIN_LP_RING(2); 1718 + if (IS_I965G(dev)) { 1719 + OUT_RING(MI_BATCH_BUFFER_START | 1720 + (2 << 6) | 1721 + MI_BATCH_NON_SECURE_I965); 1722 + OUT_RING(exec_start); 1723 + } else { 1724 + OUT_RING(MI_BATCH_BUFFER_START | 1725 + (2 << 6)); 1726 + OUT_RING(exec_start | MI_BATCH_NON_SECURE); 1727 + } 1728 + ADVANCE_LP_RING(); 1729 + } 1730 + } 1731 + 1732 + /* XXX breadcrumb */ 1733 + return 0; 1734 + } 1735 + 1736 + /* Throttle our rendering by waiting until the ring has completed our requests 1737 + * emitted over 20 msec ago. 1738 + * 1739 + * This should get us reasonable parallelism between CPU and GPU but also 1740 + * relatively low latency when blocking on a particular request to finish. 1741 + */ 1742 + static int 1743 + i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv) 1744 + { 1745 + struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; 1746 + int ret = 0; 1747 + uint32_t seqno; 1748 + 1749 + mutex_lock(&dev->struct_mutex); 1750 + seqno = i915_file_priv->mm.last_gem_throttle_seqno; 1751 + i915_file_priv->mm.last_gem_throttle_seqno = 1752 + i915_file_priv->mm.last_gem_seqno; 1753 + if (seqno) 1754 + ret = i915_wait_request(dev, seqno); 1755 + mutex_unlock(&dev->struct_mutex); 1756 + return ret; 1757 + } 1758 + 1759 + int 1760 + i915_gem_execbuffer(struct drm_device *dev, void *data, 1761 + struct drm_file *file_priv) 1762 + { 1763 + drm_i915_private_t *dev_priv = dev->dev_private; 1764 + struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; 1765 + struct drm_i915_gem_execbuffer *args = data; 1766 + struct drm_i915_gem_exec_object *exec_list = NULL; 1767 + struct drm_gem_object **object_list = NULL; 1768 + struct drm_gem_object *batch_obj; 1769 + int ret, i, pinned = 0; 1770 + uint64_t exec_offset; 1771 + uint32_t seqno, flush_domains; 1772 + 1773 + #if WATCH_EXEC 1774 + DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", 1775 + (int) args->buffers_ptr, args->buffer_count, args->batch_len); 1776 + #endif 1777 + 1778 + if (args->buffer_count < 1) { 1779 + DRM_ERROR("execbuf with %d buffers\n", args->buffer_count); 1780 + return -EINVAL; 1781 + } 1782 + /* Copy in the exec list from userland */ 1783 + exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count, 1784 + DRM_MEM_DRIVER); 1785 + object_list = drm_calloc(sizeof(*object_list), args->buffer_count, 1786 + DRM_MEM_DRIVER); 1787 + if (exec_list == NULL || object_list == NULL) { 1788 + DRM_ERROR("Failed to allocate exec or object list " 1789 + "for %d buffers\n", 1790 + args->buffer_count); 1791 + ret = -ENOMEM; 1792 + goto pre_mutex_err; 1793 + } 1794 + ret = copy_from_user(exec_list, 1795 + (struct drm_i915_relocation_entry __user *) 1796 + (uintptr_t) args->buffers_ptr, 1797 + sizeof(*exec_list) * args->buffer_count); 1798 + if (ret != 0) { 1799 + DRM_ERROR("copy %d exec entries failed %d\n", 1800 + args->buffer_count, ret); 1801 + goto pre_mutex_err; 1802 + } 1803 + 1804 + mutex_lock(&dev->struct_mutex); 1805 + 1806 + i915_verify_inactive(dev, __FILE__, __LINE__); 1807 + 1808 + if (dev_priv->mm.wedged) { 1809 + DRM_ERROR("Execbuf while wedged\n"); 1810 + mutex_unlock(&dev->struct_mutex); 1811 + return -EIO; 1812 + } 1813 + 1814 + if (dev_priv->mm.suspended) { 1815 + DRM_ERROR("Execbuf while VT-switched.\n"); 1816 + mutex_unlock(&dev->struct_mutex); 1817 + return -EBUSY; 1818 + } 1819 + 1820 + /* Zero the gloabl flush/invalidate flags. These 1821 + * will be modified as each object is bound to the 1822 + * gtt 1823 + */ 1824 + dev->invalidate_domains = 0; 1825 + dev->flush_domains = 0; 1826 + 1827 + /* Look up object handles and perform the relocations */ 1828 + for (i = 0; i < args->buffer_count; i++) { 1829 + object_list[i] = drm_gem_object_lookup(dev, file_priv, 1830 + exec_list[i].handle); 1831 + if (object_list[i] == NULL) { 1832 + DRM_ERROR("Invalid object handle %d at index %d\n", 1833 + exec_list[i].handle, i); 1834 + ret = -EBADF; 1835 + goto err; 1836 + } 1837 + 1838 + object_list[i]->pending_read_domains = 0; 1839 + object_list[i]->pending_write_domain = 0; 1840 + ret = i915_gem_object_pin_and_relocate(object_list[i], 1841 + file_priv, 1842 + &exec_list[i]); 1843 + if (ret) { 1844 + DRM_ERROR("object bind and relocate failed %d\n", ret); 1845 + goto err; 1846 + } 1847 + pinned = i + 1; 1848 + } 1849 + 1850 + /* Set the pending read domains for the batch buffer to COMMAND */ 1851 + batch_obj = object_list[args->buffer_count-1]; 1852 + batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND; 1853 + batch_obj->pending_write_domain = 0; 1854 + 1855 + i915_verify_inactive(dev, __FILE__, __LINE__); 1856 + 1857 + for (i = 0; i < args->buffer_count; i++) { 1858 + struct drm_gem_object *obj = object_list[i]; 1859 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 1860 + 1861 + if (obj_priv->gtt_space == NULL) { 1862 + /* We evicted the buffer in the process of validating 1863 + * our set of buffers in. We could try to recover by 1864 + * kicking them everything out and trying again from 1865 + * the start. 1866 + */ 1867 + ret = -ENOMEM; 1868 + goto err; 1869 + } 1870 + 1871 + /* make sure all previous memory operations have passed */ 1872 + ret = i915_gem_object_set_domain(obj, 1873 + obj->pending_read_domains, 1874 + obj->pending_write_domain); 1875 + if (ret) 1876 + goto err; 1877 + } 1878 + 1879 + i915_verify_inactive(dev, __FILE__, __LINE__); 1880 + 1881 + /* Flush/invalidate caches and chipset buffer */ 1882 + flush_domains = i915_gem_dev_set_domain(dev); 1883 + 1884 + i915_verify_inactive(dev, __FILE__, __LINE__); 1885 + 1886 + #if WATCH_COHERENCY 1887 + for (i = 0; i < args->buffer_count; i++) { 1888 + i915_gem_object_check_coherency(object_list[i], 1889 + exec_list[i].handle); 1890 + } 1891 + #endif 1892 + 1893 + exec_offset = exec_list[args->buffer_count - 1].offset; 1894 + 1895 + #if WATCH_EXEC 1896 + i915_gem_dump_object(object_list[args->buffer_count - 1], 1897 + args->batch_len, 1898 + __func__, 1899 + ~0); 1900 + #endif 1901 + 1902 + (void)i915_add_request(dev, flush_domains); 1903 + 1904 + /* Exec the batchbuffer */ 1905 + ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset); 1906 + if (ret) { 1907 + DRM_ERROR("dispatch failed %d\n", ret); 1908 + goto err; 1909 + } 1910 + 1911 + /* 1912 + * Ensure that the commands in the batch buffer are 1913 + * finished before the interrupt fires 1914 + */ 1915 + flush_domains = i915_retire_commands(dev); 1916 + 1917 + i915_verify_inactive(dev, __FILE__, __LINE__); 1918 + 1919 + /* 1920 + * Get a seqno representing the execution of the current buffer, 1921 + * which we can wait on. We would like to mitigate these interrupts, 1922 + * likely by only creating seqnos occasionally (so that we have 1923 + * *some* interrupts representing completion of buffers that we can 1924 + * wait on when trying to clear up gtt space). 1925 + */ 1926 + seqno = i915_add_request(dev, flush_domains); 1927 + BUG_ON(seqno == 0); 1928 + i915_file_priv->mm.last_gem_seqno = seqno; 1929 + for (i = 0; i < args->buffer_count; i++) { 1930 + struct drm_gem_object *obj = object_list[i]; 1931 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 1932 + 1933 + i915_gem_object_move_to_active(obj); 1934 + obj_priv->last_rendering_seqno = seqno; 1935 + #if WATCH_LRU 1936 + DRM_INFO("%s: move to exec list %p\n", __func__, obj); 1937 + #endif 1938 + } 1939 + #if WATCH_LRU 1940 + i915_dump_lru(dev, __func__); 1941 + #endif 1942 + 1943 + i915_verify_inactive(dev, __FILE__, __LINE__); 1944 + 1945 + /* Copy the new buffer offsets back to the user's exec list. */ 1946 + ret = copy_to_user((struct drm_i915_relocation_entry __user *) 1947 + (uintptr_t) args->buffers_ptr, 1948 + exec_list, 1949 + sizeof(*exec_list) * args->buffer_count); 1950 + if (ret) 1951 + DRM_ERROR("failed to copy %d exec entries " 1952 + "back to user (%d)\n", 1953 + args->buffer_count, ret); 1954 + err: 1955 + if (object_list != NULL) { 1956 + for (i = 0; i < pinned; i++) 1957 + i915_gem_object_unpin(object_list[i]); 1958 + 1959 + for (i = 0; i < args->buffer_count; i++) 1960 + drm_gem_object_unreference(object_list[i]); 1961 + } 1962 + mutex_unlock(&dev->struct_mutex); 1963 + 1964 + pre_mutex_err: 1965 + drm_free(object_list, sizeof(*object_list) * args->buffer_count, 1966 + DRM_MEM_DRIVER); 1967 + drm_free(exec_list, sizeof(*exec_list) * args->buffer_count, 1968 + DRM_MEM_DRIVER); 1969 + 1970 + return ret; 1971 + } 1972 + 1973 + int 1974 + i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) 1975 + { 1976 + struct drm_device *dev = obj->dev; 1977 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 1978 + int ret; 1979 + 1980 + i915_verify_inactive(dev, __FILE__, __LINE__); 1981 + if (obj_priv->gtt_space == NULL) { 1982 + ret = i915_gem_object_bind_to_gtt(obj, alignment); 1983 + if (ret != 0) { 1984 + DRM_ERROR("Failure to bind: %d", ret); 1985 + return ret; 1986 + } 1987 + } 1988 + obj_priv->pin_count++; 1989 + 1990 + /* If the object is not active and not pending a flush, 1991 + * remove it from the inactive list 1992 + */ 1993 + if (obj_priv->pin_count == 1) { 1994 + atomic_inc(&dev->pin_count); 1995 + atomic_add(obj->size, &dev->pin_memory); 1996 + if (!obj_priv->active && 1997 + (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | 1998 + I915_GEM_DOMAIN_GTT)) == 0 && 1999 + !list_empty(&obj_priv->list)) 2000 + list_del_init(&obj_priv->list); 2001 + } 2002 + i915_verify_inactive(dev, __FILE__, __LINE__); 2003 + 2004 + return 0; 2005 + } 2006 + 2007 + void 2008 + i915_gem_object_unpin(struct drm_gem_object *obj) 2009 + { 2010 + struct drm_device *dev = obj->dev; 2011 + drm_i915_private_t *dev_priv = dev->dev_private; 2012 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 2013 + 2014 + i915_verify_inactive(dev, __FILE__, __LINE__); 2015 + obj_priv->pin_count--; 2016 + BUG_ON(obj_priv->pin_count < 0); 2017 + BUG_ON(obj_priv->gtt_space == NULL); 2018 + 2019 + /* If the object is no longer pinned, and is 2020 + * neither active nor being flushed, then stick it on 2021 + * the inactive list 2022 + */ 2023 + if (obj_priv->pin_count == 0) { 2024 + if (!obj_priv->active && 2025 + (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | 2026 + I915_GEM_DOMAIN_GTT)) == 0) 2027 + list_move_tail(&obj_priv->list, 2028 + &dev_priv->mm.inactive_list); 2029 + atomic_dec(&dev->pin_count); 2030 + atomic_sub(obj->size, &dev->pin_memory); 2031 + } 2032 + i915_verify_inactive(dev, __FILE__, __LINE__); 2033 + } 2034 + 2035 + int 2036 + i915_gem_pin_ioctl(struct drm_device *dev, void *data, 2037 + struct drm_file *file_priv) 2038 + { 2039 + struct drm_i915_gem_pin *args = data; 2040 + struct drm_gem_object *obj; 2041 + struct drm_i915_gem_object *obj_priv; 2042 + int ret; 2043 + 2044 + mutex_lock(&dev->struct_mutex); 2045 + 2046 + obj = drm_gem_object_lookup(dev, file_priv, args->handle); 2047 + if (obj == NULL) { 2048 + DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n", 2049 + args->handle); 2050 + mutex_unlock(&dev->struct_mutex); 2051 + return -EBADF; 2052 + } 2053 + obj_priv = obj->driver_private; 2054 + 2055 + ret = i915_gem_object_pin(obj, args->alignment); 2056 + if (ret != 0) { 2057 + drm_gem_object_unreference(obj); 2058 + mutex_unlock(&dev->struct_mutex); 2059 + return ret; 2060 + } 2061 + 2062 + /* XXX - flush the CPU caches for pinned objects 2063 + * as the X server doesn't manage domains yet 2064 + */ 2065 + if (obj->write_domain & I915_GEM_DOMAIN_CPU) { 2066 + i915_gem_clflush_object(obj); 2067 + drm_agp_chipset_flush(dev); 2068 + obj->write_domain = 0; 2069 + } 2070 + args->offset = obj_priv->gtt_offset; 2071 + drm_gem_object_unreference(obj); 2072 + mutex_unlock(&dev->struct_mutex); 2073 + 2074 + return 0; 2075 + } 2076 + 2077 + int 2078 + i915_gem_unpin_ioctl(struct drm_device *dev, void *data, 2079 + struct drm_file *file_priv) 2080 + { 2081 + struct drm_i915_gem_pin *args = data; 2082 + struct drm_gem_object *obj; 2083 + 2084 + mutex_lock(&dev->struct_mutex); 2085 + 2086 + obj = drm_gem_object_lookup(dev, file_priv, args->handle); 2087 + if (obj == NULL) { 2088 + DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n", 2089 + args->handle); 2090 + mutex_unlock(&dev->struct_mutex); 2091 + return -EBADF; 2092 + } 2093 + 2094 + i915_gem_object_unpin(obj); 2095 + 2096 + drm_gem_object_unreference(obj); 2097 + mutex_unlock(&dev->struct_mutex); 2098 + return 0; 2099 + } 2100 + 2101 + int 2102 + i915_gem_busy_ioctl(struct drm_device *dev, void *data, 2103 + struct drm_file *file_priv) 2104 + { 2105 + struct drm_i915_gem_busy *args = data; 2106 + struct drm_gem_object *obj; 2107 + struct drm_i915_gem_object *obj_priv; 2108 + 2109 + mutex_lock(&dev->struct_mutex); 2110 + obj = drm_gem_object_lookup(dev, file_priv, args->handle); 2111 + if (obj == NULL) { 2112 + DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n", 2113 + args->handle); 2114 + mutex_unlock(&dev->struct_mutex); 2115 + return -EBADF; 2116 + } 2117 + 2118 + obj_priv = obj->driver_private; 2119 + args->busy = obj_priv->active; 2120 + 2121 + drm_gem_object_unreference(obj); 2122 + mutex_unlock(&dev->struct_mutex); 2123 + return 0; 2124 + } 2125 + 2126 + int 2127 + i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 2128 + struct drm_file *file_priv) 2129 + { 2130 + return i915_gem_ring_throttle(dev, file_priv); 2131 + } 2132 + 2133 + int i915_gem_init_object(struct drm_gem_object *obj) 2134 + { 2135 + struct drm_i915_gem_object *obj_priv; 2136 + 2137 + obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER); 2138 + if (obj_priv == NULL) 2139 + return -ENOMEM; 2140 + 2141 + /* 2142 + * We've just allocated pages from the kernel, 2143 + * so they've just been written by the CPU with 2144 + * zeros. They'll need to be clflushed before we 2145 + * use them with the GPU. 2146 + */ 2147 + obj->write_domain = I915_GEM_DOMAIN_CPU; 2148 + obj->read_domains = I915_GEM_DOMAIN_CPU; 2149 + 2150 + obj_priv->agp_type = AGP_USER_MEMORY; 2151 + 2152 + obj->driver_private = obj_priv; 2153 + obj_priv->obj = obj; 2154 + INIT_LIST_HEAD(&obj_priv->list); 2155 + return 0; 2156 + } 2157 + 2158 + void i915_gem_free_object(struct drm_gem_object *obj) 2159 + { 2160 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 2161 + 2162 + while (obj_priv->pin_count > 0) 2163 + i915_gem_object_unpin(obj); 2164 + 2165 + i915_gem_object_unbind(obj); 2166 + 2167 + drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); 2168 + drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); 2169 + } 2170 + 2171 + static int 2172 + i915_gem_set_domain(struct drm_gem_object *obj, 2173 + struct drm_file *file_priv, 2174 + uint32_t read_domains, 2175 + uint32_t write_domain) 2176 + { 2177 + struct drm_device *dev = obj->dev; 2178 + int ret; 2179 + uint32_t flush_domains; 2180 + 2181 + BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 2182 + 2183 + ret = i915_gem_object_set_domain(obj, read_domains, write_domain); 2184 + if (ret) 2185 + return ret; 2186 + flush_domains = i915_gem_dev_set_domain(obj->dev); 2187 + 2188 + if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) 2189 + (void) i915_add_request(dev, flush_domains); 2190 + 2191 + return 0; 2192 + } 2193 + 2194 + /** Unbinds all objects that are on the given buffer list. */ 2195 + static int 2196 + i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head) 2197 + { 2198 + struct drm_gem_object *obj; 2199 + struct drm_i915_gem_object *obj_priv; 2200 + int ret; 2201 + 2202 + while (!list_empty(head)) { 2203 + obj_priv = list_first_entry(head, 2204 + struct drm_i915_gem_object, 2205 + list); 2206 + obj = obj_priv->obj; 2207 + 2208 + if (obj_priv->pin_count != 0) { 2209 + DRM_ERROR("Pinned object in unbind list\n"); 2210 + mutex_unlock(&dev->struct_mutex); 2211 + return -EINVAL; 2212 + } 2213 + 2214 + ret = i915_gem_object_unbind(obj); 2215 + if (ret != 0) { 2216 + DRM_ERROR("Error unbinding object in LeaveVT: %d\n", 2217 + ret); 2218 + mutex_unlock(&dev->struct_mutex); 2219 + return ret; 2220 + } 2221 + } 2222 + 2223 + 2224 + return 0; 2225 + } 2226 + 2227 + static int 2228 + i915_gem_idle(struct drm_device *dev) 2229 + { 2230 + drm_i915_private_t *dev_priv = dev->dev_private; 2231 + uint32_t seqno, cur_seqno, last_seqno; 2232 + int stuck, ret; 2233 + 2234 + mutex_lock(&dev->struct_mutex); 2235 + 2236 + if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) { 2237 + mutex_unlock(&dev->struct_mutex); 2238 + return 0; 2239 + } 2240 + 2241 + /* Hack! Don't let anybody do execbuf while we don't control the chip. 2242 + * We need to replace this with a semaphore, or something. 2243 + */ 2244 + dev_priv->mm.suspended = 1; 2245 + 2246 + /* Cancel the retire work handler, wait for it to finish if running 2247 + */ 2248 + mutex_unlock(&dev->struct_mutex); 2249 + cancel_delayed_work_sync(&dev_priv->mm.retire_work); 2250 + mutex_lock(&dev->struct_mutex); 2251 + 2252 + i915_kernel_lost_context(dev); 2253 + 2254 + /* Flush the GPU along with all non-CPU write domains 2255 + */ 2256 + i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT), 2257 + ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); 2258 + seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU | 2259 + I915_GEM_DOMAIN_GTT)); 2260 + 2261 + if (seqno == 0) { 2262 + mutex_unlock(&dev->struct_mutex); 2263 + return -ENOMEM; 2264 + } 2265 + 2266 + dev_priv->mm.waiting_gem_seqno = seqno; 2267 + last_seqno = 0; 2268 + stuck = 0; 2269 + for (;;) { 2270 + cur_seqno = i915_get_gem_seqno(dev); 2271 + if (i915_seqno_passed(cur_seqno, seqno)) 2272 + break; 2273 + if (last_seqno == cur_seqno) { 2274 + if (stuck++ > 100) { 2275 + DRM_ERROR("hardware wedged\n"); 2276 + dev_priv->mm.wedged = 1; 2277 + DRM_WAKEUP(&dev_priv->irq_queue); 2278 + break; 2279 + } 2280 + } 2281 + msleep(10); 2282 + last_seqno = cur_seqno; 2283 + } 2284 + dev_priv->mm.waiting_gem_seqno = 0; 2285 + 2286 + i915_gem_retire_requests(dev); 2287 + 2288 + /* Active and flushing should now be empty as we've 2289 + * waited for a sequence higher than any pending execbuffer 2290 + */ 2291 + BUG_ON(!list_empty(&dev_priv->mm.active_list)); 2292 + BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); 2293 + 2294 + /* Request should now be empty as we've also waited 2295 + * for the last request in the list 2296 + */ 2297 + BUG_ON(!list_empty(&dev_priv->mm.request_list)); 2298 + 2299 + /* Move all buffers out of the GTT. */ 2300 + ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list); 2301 + if (ret) { 2302 + mutex_unlock(&dev->struct_mutex); 2303 + return ret; 2304 + } 2305 + 2306 + BUG_ON(!list_empty(&dev_priv->mm.active_list)); 2307 + BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); 2308 + BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); 2309 + BUG_ON(!list_empty(&dev_priv->mm.request_list)); 2310 + 2311 + i915_gem_cleanup_ringbuffer(dev); 2312 + mutex_unlock(&dev->struct_mutex); 2313 + 2314 + return 0; 2315 + } 2316 + 2317 + static int 2318 + i915_gem_init_hws(struct drm_device *dev) 2319 + { 2320 + drm_i915_private_t *dev_priv = dev->dev_private; 2321 + struct drm_gem_object *obj; 2322 + struct drm_i915_gem_object *obj_priv; 2323 + int ret; 2324 + 2325 + /* If we need a physical address for the status page, it's already 2326 + * initialized at driver load time. 2327 + */ 2328 + if (!I915_NEED_GFX_HWS(dev)) 2329 + return 0; 2330 + 2331 + obj = drm_gem_object_alloc(dev, 4096); 2332 + if (obj == NULL) { 2333 + DRM_ERROR("Failed to allocate status page\n"); 2334 + return -ENOMEM; 2335 + } 2336 + obj_priv = obj->driver_private; 2337 + obj_priv->agp_type = AGP_USER_CACHED_MEMORY; 2338 + 2339 + ret = i915_gem_object_pin(obj, 4096); 2340 + if (ret != 0) { 2341 + drm_gem_object_unreference(obj); 2342 + return ret; 2343 + } 2344 + 2345 + dev_priv->status_gfx_addr = obj_priv->gtt_offset; 2346 + 2347 + dev_priv->hw_status_page = kmap(obj_priv->page_list[0]); 2348 + if (dev_priv->hw_status_page == NULL) { 2349 + DRM_ERROR("Failed to map status page.\n"); 2350 + memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 2351 + drm_gem_object_unreference(obj); 2352 + return -EINVAL; 2353 + } 2354 + dev_priv->hws_obj = obj; 2355 + memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 2356 + I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); 2357 + I915_READ(HWS_PGA); /* posting read */ 2358 + DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); 2359 + 2360 + return 0; 2361 + } 2362 + 2363 + static int 2364 + i915_gem_init_ringbuffer(struct drm_device *dev) 2365 + { 2366 + drm_i915_private_t *dev_priv = dev->dev_private; 2367 + struct drm_gem_object *obj; 2368 + struct drm_i915_gem_object *obj_priv; 2369 + int ret; 2370 + u32 head; 2371 + 2372 + ret = i915_gem_init_hws(dev); 2373 + if (ret != 0) 2374 + return ret; 2375 + 2376 + obj = drm_gem_object_alloc(dev, 128 * 1024); 2377 + if (obj == NULL) { 2378 + DRM_ERROR("Failed to allocate ringbuffer\n"); 2379 + return -ENOMEM; 2380 + } 2381 + obj_priv = obj->driver_private; 2382 + 2383 + ret = i915_gem_object_pin(obj, 4096); 2384 + if (ret != 0) { 2385 + drm_gem_object_unreference(obj); 2386 + return ret; 2387 + } 2388 + 2389 + /* Set up the kernel mapping for the ring. */ 2390 + dev_priv->ring.Size = obj->size; 2391 + dev_priv->ring.tail_mask = obj->size - 1; 2392 + 2393 + dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset; 2394 + dev_priv->ring.map.size = obj->size; 2395 + dev_priv->ring.map.type = 0; 2396 + dev_priv->ring.map.flags = 0; 2397 + dev_priv->ring.map.mtrr = 0; 2398 + 2399 + drm_core_ioremap_wc(&dev_priv->ring.map, dev); 2400 + if (dev_priv->ring.map.handle == NULL) { 2401 + DRM_ERROR("Failed to map ringbuffer.\n"); 2402 + memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); 2403 + drm_gem_object_unreference(obj); 2404 + return -EINVAL; 2405 + } 2406 + dev_priv->ring.ring_obj = obj; 2407 + dev_priv->ring.virtual_start = dev_priv->ring.map.handle; 2408 + 2409 + /* Stop the ring if it's running. */ 2410 + I915_WRITE(PRB0_CTL, 0); 2411 + I915_WRITE(PRB0_TAIL, 0); 2412 + I915_WRITE(PRB0_HEAD, 0); 2413 + 2414 + /* Initialize the ring. */ 2415 + I915_WRITE(PRB0_START, obj_priv->gtt_offset); 2416 + head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 2417 + 2418 + /* G45 ring initialization fails to reset head to zero */ 2419 + if (head != 0) { 2420 + DRM_ERROR("Ring head not reset to zero " 2421 + "ctl %08x head %08x tail %08x start %08x\n", 2422 + I915_READ(PRB0_CTL), 2423 + I915_READ(PRB0_HEAD), 2424 + I915_READ(PRB0_TAIL), 2425 + I915_READ(PRB0_START)); 2426 + I915_WRITE(PRB0_HEAD, 0); 2427 + 2428 + DRM_ERROR("Ring head forced to zero " 2429 + "ctl %08x head %08x tail %08x start %08x\n", 2430 + I915_READ(PRB0_CTL), 2431 + I915_READ(PRB0_HEAD), 2432 + I915_READ(PRB0_TAIL), 2433 + I915_READ(PRB0_START)); 2434 + } 2435 + 2436 + I915_WRITE(PRB0_CTL, 2437 + ((obj->size - 4096) & RING_NR_PAGES) | 2438 + RING_NO_REPORT | 2439 + RING_VALID); 2440 + 2441 + head = I915_READ(PRB0_HEAD) & HEAD_ADDR; 2442 + 2443 + /* If the head is still not zero, the ring is dead */ 2444 + if (head != 0) { 2445 + DRM_ERROR("Ring initialization failed " 2446 + "ctl %08x head %08x tail %08x start %08x\n", 2447 + I915_READ(PRB0_CTL), 2448 + I915_READ(PRB0_HEAD), 2449 + I915_READ(PRB0_TAIL), 2450 + I915_READ(PRB0_START)); 2451 + return -EIO; 2452 + } 2453 + 2454 + /* Update our cache of the ring state */ 2455 + i915_kernel_lost_context(dev); 2456 + 2457 + return 0; 2458 + } 2459 + 2460 + static void 2461 + i915_gem_cleanup_ringbuffer(struct drm_device *dev) 2462 + { 2463 + drm_i915_private_t *dev_priv = dev->dev_private; 2464 + 2465 + if (dev_priv->ring.ring_obj == NULL) 2466 + return; 2467 + 2468 + drm_core_ioremapfree(&dev_priv->ring.map, dev); 2469 + 2470 + i915_gem_object_unpin(dev_priv->ring.ring_obj); 2471 + drm_gem_object_unreference(dev_priv->ring.ring_obj); 2472 + dev_priv->ring.ring_obj = NULL; 2473 + memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); 2474 + 2475 + if (dev_priv->hws_obj != NULL) { 2476 + struct drm_gem_object *obj = dev_priv->hws_obj; 2477 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 2478 + 2479 + kunmap(obj_priv->page_list[0]); 2480 + i915_gem_object_unpin(obj); 2481 + drm_gem_object_unreference(obj); 2482 + dev_priv->hws_obj = NULL; 2483 + memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 2484 + dev_priv->hw_status_page = NULL; 2485 + 2486 + /* Write high address into HWS_PGA when disabling. */ 2487 + I915_WRITE(HWS_PGA, 0x1ffff000); 2488 + } 2489 + } 2490 + 2491 + int 2492 + i915_gem_entervt_ioctl(struct drm_device *dev, void *data, 2493 + struct drm_file *file_priv) 2494 + { 2495 + drm_i915_private_t *dev_priv = dev->dev_private; 2496 + int ret; 2497 + 2498 + if (dev_priv->mm.wedged) { 2499 + DRM_ERROR("Reenabling wedged hardware, good luck\n"); 2500 + dev_priv->mm.wedged = 0; 2501 + } 2502 + 2503 + ret = i915_gem_init_ringbuffer(dev); 2504 + if (ret != 0) 2505 + return ret; 2506 + 2507 + mutex_lock(&dev->struct_mutex); 2508 + BUG_ON(!list_empty(&dev_priv->mm.active_list)); 2509 + BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); 2510 + BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); 2511 + BUG_ON(!list_empty(&dev_priv->mm.request_list)); 2512 + dev_priv->mm.suspended = 0; 2513 + mutex_unlock(&dev->struct_mutex); 2514 + 2515 + drm_irq_install(dev); 2516 + 2517 + return 0; 2518 + } 2519 + 2520 + int 2521 + i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, 2522 + struct drm_file *file_priv) 2523 + { 2524 + int ret; 2525 + 2526 + ret = i915_gem_idle(dev); 2527 + drm_irq_uninstall(dev); 2528 + 2529 + return ret; 2530 + } 2531 + 2532 + void 2533 + i915_gem_lastclose(struct drm_device *dev) 2534 + { 2535 + int ret; 2536 + 2537 + ret = i915_gem_idle(dev); 2538 + if (ret) 2539 + DRM_ERROR("failed to idle hardware: %d\n", ret); 2540 + } 2541 + 2542 + void 2543 + i915_gem_load(struct drm_device *dev) 2544 + { 2545 + drm_i915_private_t *dev_priv = dev->dev_private; 2546 + 2547 + INIT_LIST_HEAD(&dev_priv->mm.active_list); 2548 + INIT_LIST_HEAD(&dev_priv->mm.flushing_list); 2549 + INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 2550 + INIT_LIST_HEAD(&dev_priv->mm.request_list); 2551 + INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 2552 + i915_gem_retire_work_handler); 2553 + INIT_WORK(&dev_priv->mm.vblank_work, 2554 + i915_gem_vblank_work_handler); 2555 + dev_priv->mm.next_gem_seqno = 1; 2556 + 2557 + i915_gem_detect_bit_6_swizzle(dev); 2558 + }
+201
drivers/gpu/drm/i915/i915_gem_debug.c
···
··· 1 + /* 2 + * Copyright © 2008 Intel Corporation 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice (including the next 12 + * paragraph) shall be included in all copies or substantial portions of the 13 + * Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 + * IN THE SOFTWARE. 22 + * 23 + * Authors: 24 + * Keith Packard <keithp@keithp.com> 25 + * 26 + */ 27 + 28 + #include "drmP.h" 29 + #include "drm.h" 30 + #include "i915_drm.h" 31 + #include "i915_drv.h" 32 + 33 + #if WATCH_INACTIVE 34 + void 35 + i915_verify_inactive(struct drm_device *dev, char *file, int line) 36 + { 37 + drm_i915_private_t *dev_priv = dev->dev_private; 38 + struct drm_gem_object *obj; 39 + struct drm_i915_gem_object *obj_priv; 40 + 41 + list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { 42 + obj = obj_priv->obj; 43 + if (obj_priv->pin_count || obj_priv->active || 44 + (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | 45 + I915_GEM_DOMAIN_GTT))) 46 + DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n", 47 + obj, 48 + obj_priv->pin_count, obj_priv->active, 49 + obj->write_domain, file, line); 50 + } 51 + } 52 + #endif /* WATCH_INACTIVE */ 53 + 54 + 55 + #if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE 56 + static void 57 + i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end, 58 + uint32_t bias, uint32_t mark) 59 + { 60 + uint32_t *mem = kmap_atomic(page, KM_USER0); 61 + int i; 62 + for (i = start; i < end; i += 4) 63 + DRM_INFO("%08x: %08x%s\n", 64 + (int) (bias + i), mem[i / 4], 65 + (bias + i == mark) ? " ********" : ""); 66 + kunmap_atomic(mem, KM_USER0); 67 + /* give syslog time to catch up */ 68 + msleep(1); 69 + } 70 + 71 + void 72 + i915_gem_dump_object(struct drm_gem_object *obj, int len, 73 + const char *where, uint32_t mark) 74 + { 75 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 76 + int page; 77 + 78 + DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset); 79 + for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) { 80 + int page_len, chunk, chunk_len; 81 + 82 + page_len = len - page * PAGE_SIZE; 83 + if (page_len > PAGE_SIZE) 84 + page_len = PAGE_SIZE; 85 + 86 + for (chunk = 0; chunk < page_len; chunk += 128) { 87 + chunk_len = page_len - chunk; 88 + if (chunk_len > 128) 89 + chunk_len = 128; 90 + i915_gem_dump_page(obj_priv->page_list[page], 91 + chunk, chunk + chunk_len, 92 + obj_priv->gtt_offset + 93 + page * PAGE_SIZE, 94 + mark); 95 + } 96 + } 97 + } 98 + #endif 99 + 100 + #if WATCH_LRU 101 + void 102 + i915_dump_lru(struct drm_device *dev, const char *where) 103 + { 104 + drm_i915_private_t *dev_priv = dev->dev_private; 105 + struct drm_i915_gem_object *obj_priv; 106 + 107 + DRM_INFO("active list %s {\n", where); 108 + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, 109 + list) 110 + { 111 + DRM_INFO(" %p: %08x\n", obj_priv, 112 + obj_priv->last_rendering_seqno); 113 + } 114 + DRM_INFO("}\n"); 115 + DRM_INFO("flushing list %s {\n", where); 116 + list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, 117 + list) 118 + { 119 + DRM_INFO(" %p: %08x\n", obj_priv, 120 + obj_priv->last_rendering_seqno); 121 + } 122 + DRM_INFO("}\n"); 123 + DRM_INFO("inactive %s {\n", where); 124 + list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { 125 + DRM_INFO(" %p: %08x\n", obj_priv, 126 + obj_priv->last_rendering_seqno); 127 + } 128 + DRM_INFO("}\n"); 129 + } 130 + #endif 131 + 132 + 133 + #if WATCH_COHERENCY 134 + void 135 + i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) 136 + { 137 + struct drm_device *dev = obj->dev; 138 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 139 + int page; 140 + uint32_t *gtt_mapping; 141 + uint32_t *backing_map = NULL; 142 + int bad_count = 0; 143 + 144 + DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n", 145 + __func__, obj, obj_priv->gtt_offset, handle, 146 + obj->size / 1024); 147 + 148 + gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset, 149 + obj->size); 150 + if (gtt_mapping == NULL) { 151 + DRM_ERROR("failed to map GTT space\n"); 152 + return; 153 + } 154 + 155 + for (page = 0; page < obj->size / PAGE_SIZE; page++) { 156 + int i; 157 + 158 + backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0); 159 + 160 + if (backing_map == NULL) { 161 + DRM_ERROR("failed to map backing page\n"); 162 + goto out; 163 + } 164 + 165 + for (i = 0; i < PAGE_SIZE / 4; i++) { 166 + uint32_t cpuval = backing_map[i]; 167 + uint32_t gttval = readl(gtt_mapping + 168 + page * 1024 + i); 169 + 170 + if (cpuval != gttval) { 171 + DRM_INFO("incoherent CPU vs GPU at 0x%08x: " 172 + "0x%08x vs 0x%08x\n", 173 + (int)(obj_priv->gtt_offset + 174 + page * PAGE_SIZE + i * 4), 175 + cpuval, gttval); 176 + if (bad_count++ >= 8) { 177 + DRM_INFO("...\n"); 178 + goto out; 179 + } 180 + } 181 + } 182 + kunmap_atomic(backing_map, KM_USER0); 183 + backing_map = NULL; 184 + } 185 + 186 + out: 187 + if (backing_map != NULL) 188 + kunmap_atomic(backing_map, KM_USER0); 189 + iounmap(gtt_mapping); 190 + 191 + /* give syslog time to catch up */ 192 + msleep(1); 193 + 194 + /* Directly flush the object, since we just loaded values with the CPU 195 + * from the backing pages and we don't want to disturb the cache 196 + * management that we're trying to observe. 197 + */ 198 + 199 + i915_gem_clflush_object(obj); 200 + } 201 + #endif
+292
drivers/gpu/drm/i915/i915_gem_proc.c
···
··· 1 + /* 2 + * Copyright © 2008 Intel Corporation 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice (including the next 12 + * paragraph) shall be included in all copies or substantial portions of the 13 + * Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 + * IN THE SOFTWARE. 22 + * 23 + * Authors: 24 + * Eric Anholt <eric@anholt.net> 25 + * Keith Packard <keithp@keithp.com> 26 + * 27 + */ 28 + 29 + #include "drmP.h" 30 + #include "drm.h" 31 + #include "i915_drm.h" 32 + #include "i915_drv.h" 33 + 34 + static int i915_gem_active_info(char *buf, char **start, off_t offset, 35 + int request, int *eof, void *data) 36 + { 37 + struct drm_minor *minor = (struct drm_minor *) data; 38 + struct drm_device *dev = minor->dev; 39 + drm_i915_private_t *dev_priv = dev->dev_private; 40 + struct drm_i915_gem_object *obj_priv; 41 + int len = 0; 42 + 43 + if (offset > DRM_PROC_LIMIT) { 44 + *eof = 1; 45 + return 0; 46 + } 47 + 48 + *start = &buf[offset]; 49 + *eof = 0; 50 + DRM_PROC_PRINT("Active:\n"); 51 + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, 52 + list) 53 + { 54 + struct drm_gem_object *obj = obj_priv->obj; 55 + if (obj->name) { 56 + DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n", 57 + obj, obj->name, 58 + obj->read_domains, obj->write_domain, 59 + obj_priv->last_rendering_seqno); 60 + } else { 61 + DRM_PROC_PRINT(" %p: %08x %08x %d\n", 62 + obj, 63 + obj->read_domains, obj->write_domain, 64 + obj_priv->last_rendering_seqno); 65 + } 66 + } 67 + if (len > request + offset) 68 + return request; 69 + *eof = 1; 70 + return len - offset; 71 + } 72 + 73 + static int i915_gem_flushing_info(char *buf, char **start, off_t offset, 74 + int request, int *eof, void *data) 75 + { 76 + struct drm_minor *minor = (struct drm_minor *) data; 77 + struct drm_device *dev = minor->dev; 78 + drm_i915_private_t *dev_priv = dev->dev_private; 79 + struct drm_i915_gem_object *obj_priv; 80 + int len = 0; 81 + 82 + if (offset > DRM_PROC_LIMIT) { 83 + *eof = 1; 84 + return 0; 85 + } 86 + 87 + *start = &buf[offset]; 88 + *eof = 0; 89 + DRM_PROC_PRINT("Flushing:\n"); 90 + list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, 91 + list) 92 + { 93 + struct drm_gem_object *obj = obj_priv->obj; 94 + if (obj->name) { 95 + DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n", 96 + obj, obj->name, 97 + obj->read_domains, obj->write_domain, 98 + obj_priv->last_rendering_seqno); 99 + } else { 100 + DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj, 101 + obj->read_domains, obj->write_domain, 102 + obj_priv->last_rendering_seqno); 103 + } 104 + } 105 + if (len > request + offset) 106 + return request; 107 + *eof = 1; 108 + return len - offset; 109 + } 110 + 111 + static int i915_gem_inactive_info(char *buf, char **start, off_t offset, 112 + int request, int *eof, void *data) 113 + { 114 + struct drm_minor *minor = (struct drm_minor *) data; 115 + struct drm_device *dev = minor->dev; 116 + drm_i915_private_t *dev_priv = dev->dev_private; 117 + struct drm_i915_gem_object *obj_priv; 118 + int len = 0; 119 + 120 + if (offset > DRM_PROC_LIMIT) { 121 + *eof = 1; 122 + return 0; 123 + } 124 + 125 + *start = &buf[offset]; 126 + *eof = 0; 127 + DRM_PROC_PRINT("Inactive:\n"); 128 + list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, 129 + list) 130 + { 131 + struct drm_gem_object *obj = obj_priv->obj; 132 + if (obj->name) { 133 + DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n", 134 + obj, obj->name, 135 + obj->read_domains, obj->write_domain, 136 + obj_priv->last_rendering_seqno); 137 + } else { 138 + DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj, 139 + obj->read_domains, obj->write_domain, 140 + obj_priv->last_rendering_seqno); 141 + } 142 + } 143 + if (len > request + offset) 144 + return request; 145 + *eof = 1; 146 + return len - offset; 147 + } 148 + 149 + static int i915_gem_request_info(char *buf, char **start, off_t offset, 150 + int request, int *eof, void *data) 151 + { 152 + struct drm_minor *minor = (struct drm_minor *) data; 153 + struct drm_device *dev = minor->dev; 154 + drm_i915_private_t *dev_priv = dev->dev_private; 155 + struct drm_i915_gem_request *gem_request; 156 + int len = 0; 157 + 158 + if (offset > DRM_PROC_LIMIT) { 159 + *eof = 1; 160 + return 0; 161 + } 162 + 163 + *start = &buf[offset]; 164 + *eof = 0; 165 + DRM_PROC_PRINT("Request:\n"); 166 + list_for_each_entry(gem_request, &dev_priv->mm.request_list, 167 + list) 168 + { 169 + DRM_PROC_PRINT(" %d @ %d %08x\n", 170 + gem_request->seqno, 171 + (int) (jiffies - gem_request->emitted_jiffies), 172 + gem_request->flush_domains); 173 + } 174 + if (len > request + offset) 175 + return request; 176 + *eof = 1; 177 + return len - offset; 178 + } 179 + 180 + static int i915_gem_seqno_info(char *buf, char **start, off_t offset, 181 + int request, int *eof, void *data) 182 + { 183 + struct drm_minor *minor = (struct drm_minor *) data; 184 + struct drm_device *dev = minor->dev; 185 + drm_i915_private_t *dev_priv = dev->dev_private; 186 + int len = 0; 187 + 188 + if (offset > DRM_PROC_LIMIT) { 189 + *eof = 1; 190 + return 0; 191 + } 192 + 193 + *start = &buf[offset]; 194 + *eof = 0; 195 + DRM_PROC_PRINT("Current sequence: %d\n", i915_get_gem_seqno(dev)); 196 + DRM_PROC_PRINT("Waiter sequence: %d\n", 197 + dev_priv->mm.waiting_gem_seqno); 198 + DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno); 199 + if (len > request + offset) 200 + return request; 201 + *eof = 1; 202 + return len - offset; 203 + } 204 + 205 + 206 + static int i915_interrupt_info(char *buf, char **start, off_t offset, 207 + int request, int *eof, void *data) 208 + { 209 + struct drm_minor *minor = (struct drm_minor *) data; 210 + struct drm_device *dev = minor->dev; 211 + drm_i915_private_t *dev_priv = dev->dev_private; 212 + int len = 0; 213 + 214 + if (offset > DRM_PROC_LIMIT) { 215 + *eof = 1; 216 + return 0; 217 + } 218 + 219 + *start = &buf[offset]; 220 + *eof = 0; 221 + DRM_PROC_PRINT("Interrupt enable: %08x\n", 222 + I915_READ(IER)); 223 + DRM_PROC_PRINT("Interrupt identity: %08x\n", 224 + I915_READ(IIR)); 225 + DRM_PROC_PRINT("Interrupt mask: %08x\n", 226 + I915_READ(IMR)); 227 + DRM_PROC_PRINT("Pipe A stat: %08x\n", 228 + I915_READ(PIPEASTAT)); 229 + DRM_PROC_PRINT("Pipe B stat: %08x\n", 230 + I915_READ(PIPEBSTAT)); 231 + DRM_PROC_PRINT("Interrupts received: %d\n", 232 + atomic_read(&dev_priv->irq_received)); 233 + DRM_PROC_PRINT("Current sequence: %d\n", 234 + i915_get_gem_seqno(dev)); 235 + DRM_PROC_PRINT("Waiter sequence: %d\n", 236 + dev_priv->mm.waiting_gem_seqno); 237 + DRM_PROC_PRINT("IRQ sequence: %d\n", 238 + dev_priv->mm.irq_gem_seqno); 239 + if (len > request + offset) 240 + return request; 241 + *eof = 1; 242 + return len - offset; 243 + } 244 + 245 + static struct drm_proc_list { 246 + /** file name */ 247 + const char *name; 248 + /** proc callback*/ 249 + int (*f) (char *, char **, off_t, int, int *, void *); 250 + } i915_gem_proc_list[] = { 251 + {"i915_gem_active", i915_gem_active_info}, 252 + {"i915_gem_flushing", i915_gem_flushing_info}, 253 + {"i915_gem_inactive", i915_gem_inactive_info}, 254 + {"i915_gem_request", i915_gem_request_info}, 255 + {"i915_gem_seqno", i915_gem_seqno_info}, 256 + {"i915_gem_interrupt", i915_interrupt_info}, 257 + }; 258 + 259 + #define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list) 260 + 261 + int i915_gem_proc_init(struct drm_minor *minor) 262 + { 263 + struct proc_dir_entry *ent; 264 + int i, j; 265 + 266 + for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) { 267 + ent = create_proc_entry(i915_gem_proc_list[i].name, 268 + S_IFREG | S_IRUGO, minor->dev_root); 269 + if (!ent) { 270 + DRM_ERROR("Cannot create /proc/dri/.../%s\n", 271 + i915_gem_proc_list[i].name); 272 + for (j = 0; j < i; j++) 273 + remove_proc_entry(i915_gem_proc_list[i].name, 274 + minor->dev_root); 275 + return -1; 276 + } 277 + ent->read_proc = i915_gem_proc_list[i].f; 278 + ent->data = minor; 279 + } 280 + return 0; 281 + } 282 + 283 + void i915_gem_proc_cleanup(struct drm_minor *minor) 284 + { 285 + int i; 286 + 287 + if (!minor->dev_root) 288 + return; 289 + 290 + for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) 291 + remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root); 292 + }
+257
drivers/gpu/drm/i915/i915_gem_tiling.c
···
··· 1 + /* 2 + * Copyright © 2008 Intel Corporation 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice (including the next 12 + * paragraph) shall be included in all copies or substantial portions of the 13 + * Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 + * IN THE SOFTWARE. 22 + * 23 + * Authors: 24 + * Eric Anholt <eric@anholt.net> 25 + * 26 + */ 27 + 28 + #include "drmP.h" 29 + #include "drm.h" 30 + #include "i915_drm.h" 31 + #include "i915_drv.h" 32 + 33 + /** @file i915_gem_tiling.c 34 + * 35 + * Support for managing tiling state of buffer objects. 36 + * 37 + * The idea behind tiling is to increase cache hit rates by rearranging 38 + * pixel data so that a group of pixel accesses are in the same cacheline. 39 + * Performance improvement from doing this on the back/depth buffer are on 40 + * the order of 30%. 41 + * 42 + * Intel architectures make this somewhat more complicated, though, by 43 + * adjustments made to addressing of data when the memory is in interleaved 44 + * mode (matched pairs of DIMMS) to improve memory bandwidth. 45 + * For interleaved memory, the CPU sends every sequential 64 bytes 46 + * to an alternate memory channel so it can get the bandwidth from both. 47 + * 48 + * The GPU also rearranges its accesses for increased bandwidth to interleaved 49 + * memory, and it matches what the CPU does for non-tiled. However, when tiled 50 + * it does it a little differently, since one walks addresses not just in the 51 + * X direction but also Y. So, along with alternating channels when bit 52 + * 6 of the address flips, it also alternates when other bits flip -- Bits 9 53 + * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines) 54 + * are common to both the 915 and 965-class hardware. 55 + * 56 + * The CPU also sometimes XORs in higher bits as well, to improve 57 + * bandwidth doing strided access like we do so frequently in graphics. This 58 + * is called "Channel XOR Randomization" in the MCH documentation. The result 59 + * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address 60 + * decode. 61 + * 62 + * All of this bit 6 XORing has an effect on our memory management, 63 + * as we need to make sure that the 3d driver can correctly address object 64 + * contents. 65 + * 66 + * If we don't have interleaved memory, all tiling is safe and no swizzling is 67 + * required. 68 + * 69 + * When bit 17 is XORed in, we simply refuse to tile at all. Bit 70 + * 17 is not just a page offset, so as we page an objet out and back in, 71 + * individual pages in it will have different bit 17 addresses, resulting in 72 + * each 64 bytes being swapped with its neighbor! 73 + * 74 + * Otherwise, if interleaved, we have to tell the 3d driver what the address 75 + * swizzling it needs to do is, since it's writing with the CPU to the pages 76 + * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the 77 + * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling 78 + * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order 79 + * to match what the GPU expects. 80 + */ 81 + 82 + /** 83 + * Detects bit 6 swizzling of address lookup between IGD access and CPU 84 + * access through main memory. 85 + */ 86 + void 87 + i915_gem_detect_bit_6_swizzle(struct drm_device *dev) 88 + { 89 + drm_i915_private_t *dev_priv = dev->dev_private; 90 + uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 91 + uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 92 + 93 + if (!IS_I9XX(dev)) { 94 + /* As far as we know, the 865 doesn't have these bit 6 95 + * swizzling issues. 96 + */ 97 + swizzle_x = I915_BIT_6_SWIZZLE_NONE; 98 + swizzle_y = I915_BIT_6_SWIZZLE_NONE; 99 + } else if ((!IS_I965G(dev) && !IS_G33(dev)) || IS_I965GM(dev) || 100 + IS_GM45(dev)) { 101 + uint32_t dcc; 102 + 103 + /* On 915-945 and GM965, channel interleave by the CPU is 104 + * determined by DCC. The CPU will alternate based on bit 6 105 + * in interleaved mode, and the GPU will then also alternate 106 + * on bit 6, 9, and 10 for X, but the CPU may also optionally 107 + * alternate based on bit 17 (XOR not disabled and XOR 108 + * bit == 17). 109 + */ 110 + dcc = I915_READ(DCC); 111 + switch (dcc & DCC_ADDRESSING_MODE_MASK) { 112 + case DCC_ADDRESSING_MODE_SINGLE_CHANNEL: 113 + case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC: 114 + swizzle_x = I915_BIT_6_SWIZZLE_NONE; 115 + swizzle_y = I915_BIT_6_SWIZZLE_NONE; 116 + break; 117 + case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED: 118 + if (IS_I915G(dev) || IS_I915GM(dev) || 119 + dcc & DCC_CHANNEL_XOR_DISABLE) { 120 + swizzle_x = I915_BIT_6_SWIZZLE_9_10; 121 + swizzle_y = I915_BIT_6_SWIZZLE_9; 122 + } else if (IS_I965GM(dev) || IS_GM45(dev)) { 123 + /* GM965 only does bit 11-based channel 124 + * randomization 125 + */ 126 + swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; 127 + swizzle_y = I915_BIT_6_SWIZZLE_9_11; 128 + } else { 129 + /* Bit 17 or perhaps other swizzling */ 130 + swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 131 + swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 132 + } 133 + break; 134 + } 135 + if (dcc == 0xffffffff) { 136 + DRM_ERROR("Couldn't read from MCHBAR. " 137 + "Disabling tiling.\n"); 138 + swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 139 + swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 140 + } 141 + } else { 142 + /* The 965, G33, and newer, have a very flexible memory 143 + * configuration. It will enable dual-channel mode 144 + * (interleaving) on as much memory as it can, and the GPU 145 + * will additionally sometimes enable different bit 6 146 + * swizzling for tiled objects from the CPU. 147 + * 148 + * Here's what I found on the G965: 149 + * slot fill memory size swizzling 150 + * 0A 0B 1A 1B 1-ch 2-ch 151 + * 512 0 0 0 512 0 O 152 + * 512 0 512 0 16 1008 X 153 + * 512 0 0 512 16 1008 X 154 + * 0 512 0 512 16 1008 X 155 + * 1024 1024 1024 0 2048 1024 O 156 + * 157 + * We could probably detect this based on either the DRB 158 + * matching, which was the case for the swizzling required in 159 + * the table above, or from the 1-ch value being less than 160 + * the minimum size of a rank. 161 + */ 162 + if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) { 163 + swizzle_x = I915_BIT_6_SWIZZLE_NONE; 164 + swizzle_y = I915_BIT_6_SWIZZLE_NONE; 165 + } else { 166 + swizzle_x = I915_BIT_6_SWIZZLE_9_10; 167 + swizzle_y = I915_BIT_6_SWIZZLE_9; 168 + } 169 + } 170 + 171 + dev_priv->mm.bit_6_swizzle_x = swizzle_x; 172 + dev_priv->mm.bit_6_swizzle_y = swizzle_y; 173 + } 174 + 175 + /** 176 + * Sets the tiling mode of an object, returning the required swizzling of 177 + * bit 6 of addresses in the object. 178 + */ 179 + int 180 + i915_gem_set_tiling(struct drm_device *dev, void *data, 181 + struct drm_file *file_priv) 182 + { 183 + struct drm_i915_gem_set_tiling *args = data; 184 + drm_i915_private_t *dev_priv = dev->dev_private; 185 + struct drm_gem_object *obj; 186 + struct drm_i915_gem_object *obj_priv; 187 + 188 + obj = drm_gem_object_lookup(dev, file_priv, args->handle); 189 + if (obj == NULL) 190 + return -EINVAL; 191 + obj_priv = obj->driver_private; 192 + 193 + mutex_lock(&dev->struct_mutex); 194 + 195 + if (args->tiling_mode == I915_TILING_NONE) { 196 + obj_priv->tiling_mode = I915_TILING_NONE; 197 + args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; 198 + } else { 199 + if (args->tiling_mode == I915_TILING_X) 200 + args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; 201 + else 202 + args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; 203 + /* If we can't handle the swizzling, make it untiled. */ 204 + if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { 205 + args->tiling_mode = I915_TILING_NONE; 206 + args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; 207 + } 208 + } 209 + obj_priv->tiling_mode = args->tiling_mode; 210 + 211 + mutex_unlock(&dev->struct_mutex); 212 + 213 + drm_gem_object_unreference(obj); 214 + 215 + return 0; 216 + } 217 + 218 + /** 219 + * Returns the current tiling mode and required bit 6 swizzling for the object. 220 + */ 221 + int 222 + i915_gem_get_tiling(struct drm_device *dev, void *data, 223 + struct drm_file *file_priv) 224 + { 225 + struct drm_i915_gem_get_tiling *args = data; 226 + drm_i915_private_t *dev_priv = dev->dev_private; 227 + struct drm_gem_object *obj; 228 + struct drm_i915_gem_object *obj_priv; 229 + 230 + obj = drm_gem_object_lookup(dev, file_priv, args->handle); 231 + if (obj == NULL) 232 + return -EINVAL; 233 + obj_priv = obj->driver_private; 234 + 235 + mutex_lock(&dev->struct_mutex); 236 + 237 + args->tiling_mode = obj_priv->tiling_mode; 238 + switch (obj_priv->tiling_mode) { 239 + case I915_TILING_X: 240 + args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; 241 + break; 242 + case I915_TILING_Y: 243 + args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; 244 + break; 245 + case I915_TILING_NONE: 246 + args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; 247 + break; 248 + default: 249 + DRM_ERROR("unknown tiling mode\n"); 250 + } 251 + 252 + mutex_unlock(&dev->struct_mutex); 253 + 254 + drm_gem_object_unreference(obj); 255 + 256 + return 0; 257 + }
+378 -136
drivers/gpu/drm/i915/i915_irq.c
··· 31 #include "i915_drm.h" 32 #include "i915_drv.h" 33 34 - #define USER_INT_FLAG (1<<1) 35 - #define VSYNC_PIPEB_FLAG (1<<5) 36 - #define VSYNC_PIPEA_FLAG (1<<7) 37 - 38 #define MAX_NOPID ((u32)~0) 39 40 /** 41 * Emit blits for scheduled buffer swaps. ··· 128 unsigned long irqflags; 129 struct list_head *list, *tmp, hits, *hit; 130 int nhits, nrects, slice[2], upper[2], lower[2], i; 131 - unsigned counter[2] = { atomic_read(&dev->vbl_received), 132 - atomic_read(&dev->vbl_received2) }; 133 struct drm_drawable_info *drw; 134 drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv; 135 u32 cpp = dev_priv->cpp; ··· 150 src_pitch >>= 2; 151 } 152 153 DRM_DEBUG("\n"); 154 155 INIT_LIST_HEAD(&hits); ··· 165 list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) { 166 drm_i915_vbl_swap_t *vbl_swap = 167 list_entry(list, drm_i915_vbl_swap_t, head); 168 169 - if ((counter[vbl_swap->pipe] - vbl_swap->sequence) > (1<<23)) 170 continue; 171 172 list_del(list); 173 dev_priv->swaps_pending--; 174 175 spin_unlock(&dev_priv->swaps_lock); 176 spin_lock(&dev->drw_lock); ··· 265 drm_i915_vbl_swap_t *swap_hit = 266 list_entry(hit, drm_i915_vbl_swap_t, head); 267 struct drm_clip_rect *rect; 268 - int num_rects, pipe; 269 unsigned short top, bottom; 270 271 drw = drm_get_drawable_info(dev, swap_hit->drw_id); ··· 274 continue; 275 276 rect = drw->rects; 277 - pipe = swap_hit->pipe; 278 - top = upper[pipe]; 279 - bottom = lower[pipe]; 280 281 for (num_rects = drw->num_rects; num_rects--; rect++) { 282 int y1 = max(rect->y1, top); ··· 313 } 314 } 315 316 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) 317 { 318 struct drm_device *dev = (struct drm_device *) arg; 319 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 320 - u16 temp; 321 u32 pipea_stats, pipeb_stats; 322 323 - pipea_stats = I915_READ(I915REG_PIPEASTAT); 324 - pipeb_stats = I915_READ(I915REG_PIPEBSTAT); 325 326 - temp = I915_READ16(I915REG_INT_IDENTITY_R); 327 328 - temp &= (USER_INT_FLAG | VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG); 329 - 330 - DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp); 331 - 332 - if (temp == 0) 333 return IRQ_NONE; 334 335 - I915_WRITE16(I915REG_INT_IDENTITY_R, temp); 336 - (void) I915_READ16(I915REG_INT_IDENTITY_R); 337 - DRM_READMEMORYBARRIER(); 338 339 - dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 340 341 - if (temp & USER_INT_FLAG) 342 DRM_WAKEUP(&dev_priv->irq_queue); 343 344 - if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) { 345 - int vblank_pipe = dev_priv->vblank_pipe; 346 347 - if ((vblank_pipe & 348 - (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) 349 - == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) { 350 - if (temp & VSYNC_PIPEA_FLAG) 351 - atomic_inc(&dev->vbl_received); 352 - if (temp & VSYNC_PIPEB_FLAG) 353 - atomic_inc(&dev->vbl_received2); 354 - } else if (((temp & VSYNC_PIPEA_FLAG) && 355 - (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) || 356 - ((temp & VSYNC_PIPEB_FLAG) && 357 - (vblank_pipe & DRM_I915_VBLANK_PIPE_B))) 358 - atomic_inc(&dev->vbl_received); 359 - 360 - DRM_WAKEUP(&dev->vbl_queue); 361 - drm_vbl_send_signals(dev); 362 - 363 - if (dev_priv->swaps_pending > 0) 364 drm_locked_tasklet(dev, i915_vblank_tasklet); 365 - I915_WRITE(I915REG_PIPEASTAT, 366 - pipea_stats|I915_VBLANK_INTERRUPT_ENABLE| 367 - I915_VBLANK_CLEAR); 368 - I915_WRITE(I915REG_PIPEBSTAT, 369 - pipeb_stats|I915_VBLANK_INTERRUPT_ENABLE| 370 - I915_VBLANK_CLEAR); 371 } 372 373 return IRQ_HANDLED; ··· 460 461 DRM_DEBUG("\n"); 462 463 - dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter; 464 - 465 if (dev_priv->counter > 0x7FFFFFFFUL) 466 - dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1; 467 468 BEGIN_LP_RING(6); 469 - OUT_RING(CMD_STORE_DWORD_IDX); 470 - OUT_RING(20); 471 OUT_RING(dev_priv->counter); 472 OUT_RING(0); 473 OUT_RING(0); 474 - OUT_RING(GFX_OP_USER_INTERRUPT); 475 ADVANCE_LP_RING(); 476 477 return dev_priv->counter; 478 } 479 480 static int i915_wait_irq(struct drm_device * dev, int irq_nr) ··· 507 DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr, 508 READ_BREADCRUMB(dev_priv)); 509 510 - if (READ_BREADCRUMB(dev_priv) >= irq_nr) 511 return 0; 512 513 - dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 514 515 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, 516 READ_BREADCRUMB(dev_priv) >= irq_nr); 517 518 if (ret == -EBUSY) { 519 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", 520 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); 521 } 522 523 - dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 524 - return ret; 525 - } 526 - 527 - static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequence, 528 - atomic_t *counter) 529 - { 530 - drm_i915_private_t *dev_priv = dev->dev_private; 531 - unsigned int cur_vblank; 532 - int ret = 0; 533 - 534 - if (!dev_priv) { 535 - DRM_ERROR("called with no initialization\n"); 536 - return -EINVAL; 537 - } 538 - 539 - DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, 540 - (((cur_vblank = atomic_read(counter)) 541 - - *sequence) <= (1<<23))); 542 - 543 - *sequence = cur_vblank; 544 545 return ret; 546 - } 547 - 548 - 549 - int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence) 550 - { 551 - return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received); 552 - } 553 - 554 - int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence) 555 - { 556 - return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2); 557 } 558 559 /* Needs the lock as it touches the ring. ··· 544 drm_i915_irq_emit_t *emit = data; 545 int result; 546 547 - LOCK_TEST_WITH_RETURN(dev, file_priv); 548 549 if (!dev_priv) { 550 DRM_ERROR("called with no initialization\n"); 551 return -EINVAL; 552 } 553 - 554 result = i915_emit_irq(dev); 555 556 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { 557 DRM_ERROR("copy_to_user\n"); ··· 578 return i915_wait_irq(dev, irqwait->irq_seq); 579 } 580 581 - static void i915_enable_interrupt (struct drm_device *dev) 582 { 583 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 584 - u16 flag; 585 586 - flag = 0; 587 - if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A) 588 - flag |= VSYNC_PIPEA_FLAG; 589 - if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B) 590 - flag |= VSYNC_PIPEB_FLAG; 591 592 - I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG | flag); 593 } 594 595 /* Set the vblank monitor pipe ··· 654 struct drm_file *file_priv) 655 { 656 drm_i915_private_t *dev_priv = dev->dev_private; 657 - drm_i915_vblank_pipe_t *pipe = data; 658 659 if (!dev_priv) { 660 DRM_ERROR("called with no initialization\n"); 661 return -EINVAL; 662 } 663 - 664 - if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) { 665 - DRM_ERROR("called with invalid pipe 0x%x\n", pipe->pipe); 666 - return -EINVAL; 667 - } 668 - 669 - dev_priv->vblank_pipe = pipe->pipe; 670 - 671 - i915_enable_interrupt (dev); 672 673 return 0; 674 } ··· 668 { 669 drm_i915_private_t *dev_priv = dev->dev_private; 670 drm_i915_vblank_pipe_t *pipe = data; 671 - u16 flag; 672 673 if (!dev_priv) { 674 DRM_ERROR("called with no initialization\n"); 675 return -EINVAL; 676 } 677 678 - flag = I915_READ(I915REG_INT_ENABLE_R); 679 - pipe->pipe = 0; 680 - if (flag & VSYNC_PIPEA_FLAG) 681 - pipe->pipe |= DRM_I915_VBLANK_PIPE_A; 682 - if (flag & VSYNC_PIPEB_FLAG) 683 - pipe->pipe |= DRM_I915_VBLANK_PIPE_B; 684 685 return 0; 686 } ··· 688 drm_i915_private_t *dev_priv = dev->dev_private; 689 drm_i915_vblank_swap_t *swap = data; 690 drm_i915_vbl_swap_t *vbl_swap; 691 - unsigned int pipe, seqtype, curseq; 692 unsigned long irqflags; 693 struct list_head *list; 694 695 - if (!dev_priv) { 696 DRM_ERROR("%s called with no initialization\n", __func__); 697 return -EINVAL; 698 } ··· 709 return -EINVAL; 710 } 711 712 - pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0; 713 714 seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE); 715 ··· 729 730 spin_unlock_irqrestore(&dev->drw_lock, irqflags); 731 732 - curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received); 733 734 if (seqtype == _DRM_VBLANK_RELATIVE) 735 swap->sequence += curseq; ··· 746 swap->sequence = curseq + 1; 747 } else { 748 DRM_DEBUG("Missed target sequence\n"); 749 return -EINVAL; 750 } 751 } ··· 757 vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head); 758 759 if (vbl_swap->drw_id == swap->drawable && 760 - vbl_swap->pipe == pipe && 761 vbl_swap->sequence == swap->sequence) { 762 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); 763 DRM_DEBUG("Already scheduled\n"); ··· 769 770 if (dev_priv->swaps_pending >= 100) { 771 DRM_DEBUG("Too many swaps queued\n"); 772 return -EBUSY; 773 } 774 ··· 777 778 if (!vbl_swap) { 779 DRM_ERROR("Failed to allocate memory to queue swap\n"); 780 return -ENOMEM; 781 } 782 783 DRM_DEBUG("\n"); 784 785 vbl_swap->drw_id = swap->drawable; 786 - vbl_swap->pipe = pipe; 787 vbl_swap->sequence = swap->sequence; 788 789 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); ··· 803 { 804 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 805 806 - I915_WRITE16(I915REG_HWSTAM, 0xfffe); 807 - I915_WRITE16(I915REG_INT_MASK_R, 0x0); 808 - I915_WRITE16(I915REG_INT_ENABLE_R, 0x0); 809 } 810 811 - void i915_driver_irq_postinstall(struct drm_device * dev) 812 { 813 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 814 815 spin_lock_init(&dev_priv->swaps_lock); 816 INIT_LIST_HEAD(&dev_priv->vbl_swaps.head); 817 dev_priv->swaps_pending = 0; 818 819 - if (!dev_priv->vblank_pipe) 820 - dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A; 821 - i915_enable_interrupt(dev); 822 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue); 823 } 824 825 void i915_driver_irq_uninstall(struct drm_device * dev) 826 { 827 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 828 - u16 temp; 829 830 if (!dev_priv) 831 return; 832 833 - I915_WRITE16(I915REG_HWSTAM, 0xffff); 834 - I915_WRITE16(I915REG_INT_MASK_R, 0xffff); 835 - I915_WRITE16(I915REG_INT_ENABLE_R, 0x0); 836 837 - temp = I915_READ16(I915REG_INT_IDENTITY_R); 838 - I915_WRITE16(I915REG_INT_IDENTITY_R, temp); 839 }
··· 31 #include "i915_drm.h" 32 #include "i915_drv.h" 33 34 #define MAX_NOPID ((u32)~0) 35 + 36 + /** These are the interrupts used by the driver */ 37 + #define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT | \ 38 + I915_ASLE_INTERRUPT | \ 39 + I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \ 40 + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) 41 + 42 + void 43 + i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) 44 + { 45 + if ((dev_priv->irq_mask_reg & mask) != 0) { 46 + dev_priv->irq_mask_reg &= ~mask; 47 + I915_WRITE(IMR, dev_priv->irq_mask_reg); 48 + (void) I915_READ(IMR); 49 + } 50 + } 51 + 52 + static inline void 53 + i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask) 54 + { 55 + if ((dev_priv->irq_mask_reg & mask) != mask) { 56 + dev_priv->irq_mask_reg |= mask; 57 + I915_WRITE(IMR, dev_priv->irq_mask_reg); 58 + (void) I915_READ(IMR); 59 + } 60 + } 61 + 62 + /** 63 + * i915_get_pipe - return the the pipe associated with a given plane 64 + * @dev: DRM device 65 + * @plane: plane to look for 66 + * 67 + * The Intel Mesa & 2D drivers call the vblank routines with a plane number 68 + * rather than a pipe number, since they may not always be equal. This routine 69 + * maps the given @plane back to a pipe number. 70 + */ 71 + static int 72 + i915_get_pipe(struct drm_device *dev, int plane) 73 + { 74 + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 75 + u32 dspcntr; 76 + 77 + dspcntr = plane ? I915_READ(DSPBCNTR) : I915_READ(DSPACNTR); 78 + 79 + return dspcntr & DISPPLANE_SEL_PIPE_MASK ? 1 : 0; 80 + } 81 + 82 + /** 83 + * i915_get_plane - return the the plane associated with a given pipe 84 + * @dev: DRM device 85 + * @pipe: pipe to look for 86 + * 87 + * The Intel Mesa & 2D drivers call the vblank routines with a plane number 88 + * rather than a plane number, since they may not always be equal. This routine 89 + * maps the given @pipe back to a plane number. 90 + */ 91 + static int 92 + i915_get_plane(struct drm_device *dev, int pipe) 93 + { 94 + if (i915_get_pipe(dev, 0) == pipe) 95 + return 0; 96 + return 1; 97 + } 98 + 99 + /** 100 + * i915_pipe_enabled - check if a pipe is enabled 101 + * @dev: DRM device 102 + * @pipe: pipe to check 103 + * 104 + * Reading certain registers when the pipe is disabled can hang the chip. 105 + * Use this routine to make sure the PLL is running and the pipe is active 106 + * before reading such registers if unsure. 107 + */ 108 + static int 109 + i915_pipe_enabled(struct drm_device *dev, int pipe) 110 + { 111 + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 112 + unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF; 113 + 114 + if (I915_READ(pipeconf) & PIPEACONF_ENABLE) 115 + return 1; 116 + 117 + return 0; 118 + } 119 120 /** 121 * Emit blits for scheduled buffer swaps. ··· 48 unsigned long irqflags; 49 struct list_head *list, *tmp, hits, *hit; 50 int nhits, nrects, slice[2], upper[2], lower[2], i; 51 + unsigned counter[2]; 52 struct drm_drawable_info *drw; 53 drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv; 54 u32 cpp = dev_priv->cpp; ··· 71 src_pitch >>= 2; 72 } 73 74 + counter[0] = drm_vblank_count(dev, 0); 75 + counter[1] = drm_vblank_count(dev, 1); 76 + 77 DRM_DEBUG("\n"); 78 79 INIT_LIST_HEAD(&hits); ··· 83 list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) { 84 drm_i915_vbl_swap_t *vbl_swap = 85 list_entry(list, drm_i915_vbl_swap_t, head); 86 + int pipe = i915_get_pipe(dev, vbl_swap->plane); 87 88 + if ((counter[pipe] - vbl_swap->sequence) > (1<<23)) 89 continue; 90 91 list_del(list); 92 dev_priv->swaps_pending--; 93 + drm_vblank_put(dev, pipe); 94 95 spin_unlock(&dev_priv->swaps_lock); 96 spin_lock(&dev->drw_lock); ··· 181 drm_i915_vbl_swap_t *swap_hit = 182 list_entry(hit, drm_i915_vbl_swap_t, head); 183 struct drm_clip_rect *rect; 184 + int num_rects, plane; 185 unsigned short top, bottom; 186 187 drw = drm_get_drawable_info(dev, swap_hit->drw_id); ··· 190 continue; 191 192 rect = drw->rects; 193 + plane = swap_hit->plane; 194 + top = upper[plane]; 195 + bottom = lower[plane]; 196 197 for (num_rects = drw->num_rects; num_rects--; rect++) { 198 int y1 = max(rect->y1, top); ··· 229 } 230 } 231 232 + u32 i915_get_vblank_counter(struct drm_device *dev, int plane) 233 + { 234 + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 235 + unsigned long high_frame; 236 + unsigned long low_frame; 237 + u32 high1, high2, low, count; 238 + int pipe; 239 + 240 + pipe = i915_get_pipe(dev, plane); 241 + high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH; 242 + low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL; 243 + 244 + if (!i915_pipe_enabled(dev, pipe)) { 245 + DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe); 246 + return 0; 247 + } 248 + 249 + /* 250 + * High & low register fields aren't synchronized, so make sure 251 + * we get a low value that's stable across two reads of the high 252 + * register. 253 + */ 254 + do { 255 + high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >> 256 + PIPE_FRAME_HIGH_SHIFT); 257 + low = ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >> 258 + PIPE_FRAME_LOW_SHIFT); 259 + high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >> 260 + PIPE_FRAME_HIGH_SHIFT); 261 + } while (high1 != high2); 262 + 263 + count = (high1 << 8) | low; 264 + 265 + return count; 266 + } 267 + 268 + void 269 + i915_gem_vblank_work_handler(struct work_struct *work) 270 + { 271 + drm_i915_private_t *dev_priv; 272 + struct drm_device *dev; 273 + 274 + dev_priv = container_of(work, drm_i915_private_t, 275 + mm.vblank_work); 276 + dev = dev_priv->dev; 277 + 278 + mutex_lock(&dev->struct_mutex); 279 + i915_vblank_tasklet(dev); 280 + mutex_unlock(&dev->struct_mutex); 281 + } 282 + 283 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) 284 { 285 struct drm_device *dev = (struct drm_device *) arg; 286 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 287 + u32 iir; 288 u32 pipea_stats, pipeb_stats; 289 + int vblank = 0; 290 291 + atomic_inc(&dev_priv->irq_received); 292 293 + if (dev->pdev->msi_enabled) 294 + I915_WRITE(IMR, ~0); 295 + iir = I915_READ(IIR); 296 297 + if (iir == 0) { 298 + if (dev->pdev->msi_enabled) { 299 + I915_WRITE(IMR, dev_priv->irq_mask_reg); 300 + (void) I915_READ(IMR); 301 + } 302 return IRQ_NONE; 303 + } 304 305 + /* 306 + * Clear the PIPE(A|B)STAT regs before the IIR otherwise 307 + * we may get extra interrupts. 308 + */ 309 + if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) { 310 + pipea_stats = I915_READ(PIPEASTAT); 311 + if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)) 312 + pipea_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE | 313 + PIPE_VBLANK_INTERRUPT_ENABLE); 314 + else if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS| 315 + PIPE_VBLANK_INTERRUPT_STATUS)) { 316 + vblank++; 317 + drm_handle_vblank(dev, i915_get_plane(dev, 0)); 318 + } 319 320 + I915_WRITE(PIPEASTAT, pipea_stats); 321 + } 322 + if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) { 323 + pipeb_stats = I915_READ(PIPEBSTAT); 324 + /* Ack the event */ 325 + I915_WRITE(PIPEBSTAT, pipeb_stats); 326 327 + /* The vblank interrupt gets enabled even if we didn't ask for 328 + it, so make sure it's shut down again */ 329 + if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)) 330 + pipeb_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE | 331 + PIPE_VBLANK_INTERRUPT_ENABLE); 332 + else if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS| 333 + PIPE_VBLANK_INTERRUPT_STATUS)) { 334 + vblank++; 335 + drm_handle_vblank(dev, i915_get_plane(dev, 1)); 336 + } 337 + 338 + if (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) 339 + opregion_asle_intr(dev); 340 + I915_WRITE(PIPEBSTAT, pipeb_stats); 341 + } 342 + 343 + I915_WRITE(IIR, iir); 344 + if (dev->pdev->msi_enabled) 345 + I915_WRITE(IMR, dev_priv->irq_mask_reg); 346 + (void) I915_READ(IIR); /* Flush posted writes */ 347 + 348 + if (dev_priv->sarea_priv) 349 + dev_priv->sarea_priv->last_dispatch = 350 + READ_BREADCRUMB(dev_priv); 351 + 352 + if (iir & I915_USER_INTERRUPT) { 353 + dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev); 354 DRM_WAKEUP(&dev_priv->irq_queue); 355 + } 356 357 + if (iir & I915_ASLE_INTERRUPT) 358 + opregion_asle_intr(dev); 359 360 + if (vblank && dev_priv->swaps_pending > 0) { 361 + if (dev_priv->ring.ring_obj == NULL) 362 drm_locked_tasklet(dev, i915_vblank_tasklet); 363 + else 364 + schedule_work(&dev_priv->mm.vblank_work); 365 } 366 367 return IRQ_HANDLED; ··· 298 299 DRM_DEBUG("\n"); 300 301 + dev_priv->counter++; 302 if (dev_priv->counter > 0x7FFFFFFFUL) 303 + dev_priv->counter = 1; 304 + if (dev_priv->sarea_priv) 305 + dev_priv->sarea_priv->last_enqueue = dev_priv->counter; 306 307 BEGIN_LP_RING(6); 308 + OUT_RING(MI_STORE_DWORD_INDEX); 309 + OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT); 310 OUT_RING(dev_priv->counter); 311 OUT_RING(0); 312 OUT_RING(0); 313 + OUT_RING(MI_USER_INTERRUPT); 314 ADVANCE_LP_RING(); 315 316 return dev_priv->counter; 317 + } 318 + 319 + void i915_user_irq_get(struct drm_device *dev) 320 + { 321 + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 322 + 323 + spin_lock(&dev_priv->user_irq_lock); 324 + if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) 325 + i915_enable_irq(dev_priv, I915_USER_INTERRUPT); 326 + spin_unlock(&dev_priv->user_irq_lock); 327 + } 328 + 329 + void i915_user_irq_put(struct drm_device *dev) 330 + { 331 + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 332 + 333 + spin_lock(&dev_priv->user_irq_lock); 334 + BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); 335 + if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) 336 + i915_disable_irq(dev_priv, I915_USER_INTERRUPT); 337 + spin_unlock(&dev_priv->user_irq_lock); 338 } 339 340 static int i915_wait_irq(struct drm_device * dev, int irq_nr) ··· 323 DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr, 324 READ_BREADCRUMB(dev_priv)); 325 326 + if (READ_BREADCRUMB(dev_priv) >= irq_nr) { 327 + if (dev_priv->sarea_priv) { 328 + dev_priv->sarea_priv->last_dispatch = 329 + READ_BREADCRUMB(dev_priv); 330 + } 331 return 0; 332 + } 333 334 + if (dev_priv->sarea_priv) 335 + dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; 336 337 + i915_user_irq_get(dev); 338 DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ, 339 READ_BREADCRUMB(dev_priv) >= irq_nr); 340 + i915_user_irq_put(dev); 341 342 if (ret == -EBUSY) { 343 DRM_ERROR("EBUSY -- rec: %d emitted: %d\n", 344 READ_BREADCRUMB(dev_priv), (int)dev_priv->counter); 345 } 346 347 + if (dev_priv->sarea_priv) 348 + dev_priv->sarea_priv->last_dispatch = 349 + READ_BREADCRUMB(dev_priv); 350 351 return ret; 352 } 353 354 /* Needs the lock as it touches the ring. ··· 381 drm_i915_irq_emit_t *emit = data; 382 int result; 383 384 + RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 385 386 if (!dev_priv) { 387 DRM_ERROR("called with no initialization\n"); 388 return -EINVAL; 389 } 390 + mutex_lock(&dev->struct_mutex); 391 result = i915_emit_irq(dev); 392 + mutex_unlock(&dev->struct_mutex); 393 394 if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) { 395 DRM_ERROR("copy_to_user\n"); ··· 414 return i915_wait_irq(dev, irqwait->irq_seq); 415 } 416 417 + int i915_enable_vblank(struct drm_device *dev, int plane) 418 { 419 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 420 + int pipe = i915_get_pipe(dev, plane); 421 + u32 pipestat_reg = 0; 422 + u32 pipestat; 423 424 + switch (pipe) { 425 + case 0: 426 + pipestat_reg = PIPEASTAT; 427 + i915_enable_irq(dev_priv, I915_DISPLAY_PIPE_A_EVENT_INTERRUPT); 428 + break; 429 + case 1: 430 + pipestat_reg = PIPEBSTAT; 431 + i915_enable_irq(dev_priv, I915_DISPLAY_PIPE_B_EVENT_INTERRUPT); 432 + break; 433 + default: 434 + DRM_ERROR("tried to enable vblank on non-existent pipe %d\n", 435 + pipe); 436 + break; 437 + } 438 439 + if (pipestat_reg) { 440 + pipestat = I915_READ(pipestat_reg); 441 + if (IS_I965G(dev)) 442 + pipestat |= PIPE_START_VBLANK_INTERRUPT_ENABLE; 443 + else 444 + pipestat |= PIPE_VBLANK_INTERRUPT_ENABLE; 445 + /* Clear any stale interrupt status */ 446 + pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS | 447 + PIPE_VBLANK_INTERRUPT_STATUS); 448 + I915_WRITE(pipestat_reg, pipestat); 449 + } 450 + 451 + return 0; 452 + } 453 + 454 + void i915_disable_vblank(struct drm_device *dev, int plane) 455 + { 456 + drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 457 + int pipe = i915_get_pipe(dev, plane); 458 + u32 pipestat_reg = 0; 459 + u32 pipestat; 460 + 461 + switch (pipe) { 462 + case 0: 463 + pipestat_reg = PIPEASTAT; 464 + i915_disable_irq(dev_priv, I915_DISPLAY_PIPE_A_EVENT_INTERRUPT); 465 + break; 466 + case 1: 467 + pipestat_reg = PIPEBSTAT; 468 + i915_disable_irq(dev_priv, I915_DISPLAY_PIPE_B_EVENT_INTERRUPT); 469 + break; 470 + default: 471 + DRM_ERROR("tried to disable vblank on non-existent pipe %d\n", 472 + pipe); 473 + break; 474 + } 475 + 476 + if (pipestat_reg) { 477 + pipestat = I915_READ(pipestat_reg); 478 + pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE | 479 + PIPE_VBLANK_INTERRUPT_ENABLE); 480 + /* Clear any stale interrupt status */ 481 + pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS | 482 + PIPE_VBLANK_INTERRUPT_STATUS); 483 + I915_WRITE(pipestat_reg, pipestat); 484 + } 485 } 486 487 /* Set the vblank monitor pipe ··· 434 struct drm_file *file_priv) 435 { 436 drm_i915_private_t *dev_priv = dev->dev_private; 437 438 if (!dev_priv) { 439 DRM_ERROR("called with no initialization\n"); 440 return -EINVAL; 441 } 442 443 return 0; 444 } ··· 458 { 459 drm_i915_private_t *dev_priv = dev->dev_private; 460 drm_i915_vblank_pipe_t *pipe = data; 461 462 if (!dev_priv) { 463 DRM_ERROR("called with no initialization\n"); 464 return -EINVAL; 465 } 466 467 + pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 468 469 return 0; 470 } ··· 484 drm_i915_private_t *dev_priv = dev->dev_private; 485 drm_i915_vblank_swap_t *swap = data; 486 drm_i915_vbl_swap_t *vbl_swap; 487 + unsigned int pipe, seqtype, curseq, plane; 488 unsigned long irqflags; 489 struct list_head *list; 490 + int ret; 491 492 + if (!dev_priv || !dev_priv->sarea_priv) { 493 DRM_ERROR("%s called with no initialization\n", __func__); 494 return -EINVAL; 495 } ··· 504 return -EINVAL; 505 } 506 507 + plane = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0; 508 + pipe = i915_get_pipe(dev, plane); 509 510 seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE); 511 ··· 523 524 spin_unlock_irqrestore(&dev->drw_lock, irqflags); 525 526 + /* 527 + * We take the ref here and put it when the swap actually completes 528 + * in the tasklet. 529 + */ 530 + ret = drm_vblank_get(dev, pipe); 531 + if (ret) 532 + return ret; 533 + curseq = drm_vblank_count(dev, pipe); 534 535 if (seqtype == _DRM_VBLANK_RELATIVE) 536 swap->sequence += curseq; ··· 533 swap->sequence = curseq + 1; 534 } else { 535 DRM_DEBUG("Missed target sequence\n"); 536 + drm_vblank_put(dev, pipe); 537 return -EINVAL; 538 } 539 } ··· 543 vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head); 544 545 if (vbl_swap->drw_id == swap->drawable && 546 + vbl_swap->plane == plane && 547 vbl_swap->sequence == swap->sequence) { 548 spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags); 549 DRM_DEBUG("Already scheduled\n"); ··· 555 556 if (dev_priv->swaps_pending >= 100) { 557 DRM_DEBUG("Too many swaps queued\n"); 558 + drm_vblank_put(dev, pipe); 559 return -EBUSY; 560 } 561 ··· 562 563 if (!vbl_swap) { 564 DRM_ERROR("Failed to allocate memory to queue swap\n"); 565 + drm_vblank_put(dev, pipe); 566 return -ENOMEM; 567 } 568 569 DRM_DEBUG("\n"); 570 571 vbl_swap->drw_id = swap->drawable; 572 + vbl_swap->plane = plane; 573 vbl_swap->sequence = swap->sequence; 574 575 spin_lock_irqsave(&dev_priv->swaps_lock, irqflags); ··· 587 { 588 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 589 590 + I915_WRITE(HWSTAM, 0xeffe); 591 + I915_WRITE(IMR, 0xffffffff); 592 + I915_WRITE(IER, 0x0); 593 } 594 595 + int i915_driver_irq_postinstall(struct drm_device *dev) 596 { 597 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 598 + int ret, num_pipes = 2; 599 600 spin_lock_init(&dev_priv->swaps_lock); 601 INIT_LIST_HEAD(&dev_priv->vbl_swaps.head); 602 dev_priv->swaps_pending = 0; 603 604 + /* Set initial unmasked IRQs to just the selected vblank pipes. */ 605 + dev_priv->irq_mask_reg = ~0; 606 + 607 + ret = drm_vblank_init(dev, num_pipes); 608 + if (ret) 609 + return ret; 610 + 611 + dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 612 + dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; 613 + dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; 614 + 615 + dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 616 + 617 + dev_priv->irq_mask_reg &= I915_INTERRUPT_ENABLE_MASK; 618 + 619 + I915_WRITE(IMR, dev_priv->irq_mask_reg); 620 + I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK); 621 + (void) I915_READ(IER); 622 + 623 + opregion_enable_asle(dev); 624 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue); 625 + 626 + return 0; 627 } 628 629 void i915_driver_irq_uninstall(struct drm_device * dev) 630 { 631 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 632 + u32 temp; 633 634 if (!dev_priv) 635 return; 636 637 + dev_priv->vblank_pipe = 0; 638 639 + I915_WRITE(HWSTAM, 0xffffffff); 640 + I915_WRITE(IMR, 0xffffffff); 641 + I915_WRITE(IER, 0x0); 642 + 643 + temp = I915_READ(PIPEASTAT); 644 + I915_WRITE(PIPEASTAT, temp); 645 + temp = I915_READ(PIPEBSTAT); 646 + I915_WRITE(PIPEBSTAT, temp); 647 + temp = I915_READ(IIR); 648 + I915_WRITE(IIR, temp); 649 }
+371
drivers/gpu/drm/i915/i915_opregion.c
···
··· 1 + /* 2 + * Copyright 2008 Intel Corporation <hong.liu@intel.com> 3 + * Copyright 2008 Red Hat <mjg@redhat.com> 4 + * 5 + * Permission is hereby granted, free of charge, to any person obtaining 6 + * a copy of this software and associated documentation files (the 7 + * "Software"), to deal in the Software without restriction, including 8 + * without limitation the rights to use, copy, modify, merge, publish, 9 + * distribute, sub license, and/or sell copies of the Software, and to 10 + * permit persons to whom the Software is furnished to do so, subject to 11 + * the following conditions: 12 + * 13 + * The above copyright notice and this permission notice (including the 14 + * next paragraph) shall be included in all copies or substantial 15 + * portions of the Software. 16 + * 17 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 18 + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 19 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 20 + * NON-INFRINGEMENT. IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE 21 + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 22 + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 23 + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 24 + * SOFTWARE. 25 + * 26 + */ 27 + 28 + #include <linux/acpi.h> 29 + 30 + #include "drmP.h" 31 + #include "i915_drm.h" 32 + #include "i915_drv.h" 33 + 34 + #define PCI_ASLE 0xe4 35 + #define PCI_LBPC 0xf4 36 + #define PCI_ASLS 0xfc 37 + 38 + #define OPREGION_SZ (8*1024) 39 + #define OPREGION_HEADER_OFFSET 0 40 + #define OPREGION_ACPI_OFFSET 0x100 41 + #define OPREGION_SWSCI_OFFSET 0x200 42 + #define OPREGION_ASLE_OFFSET 0x300 43 + #define OPREGION_VBT_OFFSET 0x1000 44 + 45 + #define OPREGION_SIGNATURE "IntelGraphicsMem" 46 + #define MBOX_ACPI (1<<0) 47 + #define MBOX_SWSCI (1<<1) 48 + #define MBOX_ASLE (1<<2) 49 + 50 + struct opregion_header { 51 + u8 signature[16]; 52 + u32 size; 53 + u32 opregion_ver; 54 + u8 bios_ver[32]; 55 + u8 vbios_ver[16]; 56 + u8 driver_ver[16]; 57 + u32 mboxes; 58 + u8 reserved[164]; 59 + } __attribute__((packed)); 60 + 61 + /* OpRegion mailbox #1: public ACPI methods */ 62 + struct opregion_acpi { 63 + u32 drdy; /* driver readiness */ 64 + u32 csts; /* notification status */ 65 + u32 cevt; /* current event */ 66 + u8 rsvd1[20]; 67 + u32 didl[8]; /* supported display devices ID list */ 68 + u32 cpdl[8]; /* currently presented display list */ 69 + u32 cadl[8]; /* currently active display list */ 70 + u32 nadl[8]; /* next active devices list */ 71 + u32 aslp; /* ASL sleep time-out */ 72 + u32 tidx; /* toggle table index */ 73 + u32 chpd; /* current hotplug enable indicator */ 74 + u32 clid; /* current lid state*/ 75 + u32 cdck; /* current docking state */ 76 + u32 sxsw; /* Sx state resume */ 77 + u32 evts; /* ASL supported events */ 78 + u32 cnot; /* current OS notification */ 79 + u32 nrdy; /* driver status */ 80 + u8 rsvd2[60]; 81 + } __attribute__((packed)); 82 + 83 + /* OpRegion mailbox #2: SWSCI */ 84 + struct opregion_swsci { 85 + u32 scic; /* SWSCI command|status|data */ 86 + u32 parm; /* command parameters */ 87 + u32 dslp; /* driver sleep time-out */ 88 + u8 rsvd[244]; 89 + } __attribute__((packed)); 90 + 91 + /* OpRegion mailbox #3: ASLE */ 92 + struct opregion_asle { 93 + u32 ardy; /* driver readiness */ 94 + u32 aslc; /* ASLE interrupt command */ 95 + u32 tche; /* technology enabled indicator */ 96 + u32 alsi; /* current ALS illuminance reading */ 97 + u32 bclp; /* backlight brightness to set */ 98 + u32 pfit; /* panel fitting state */ 99 + u32 cblv; /* current brightness level */ 100 + u16 bclm[20]; /* backlight level duty cycle mapping table */ 101 + u32 cpfm; /* current panel fitting mode */ 102 + u32 epfm; /* enabled panel fitting modes */ 103 + u8 plut[74]; /* panel LUT and identifier */ 104 + u32 pfmb; /* PWM freq and min brightness */ 105 + u8 rsvd[102]; 106 + } __attribute__((packed)); 107 + 108 + /* ASLE irq request bits */ 109 + #define ASLE_SET_ALS_ILLUM (1 << 0) 110 + #define ASLE_SET_BACKLIGHT (1 << 1) 111 + #define ASLE_SET_PFIT (1 << 2) 112 + #define ASLE_SET_PWM_FREQ (1 << 3) 113 + #define ASLE_REQ_MSK 0xf 114 + 115 + /* response bits of ASLE irq request */ 116 + #define ASLE_ALS_ILLUM_FAIL (2<<10) 117 + #define ASLE_BACKLIGHT_FAIL (2<<12) 118 + #define ASLE_PFIT_FAIL (2<<14) 119 + #define ASLE_PWM_FREQ_FAIL (2<<16) 120 + 121 + /* ASLE backlight brightness to set */ 122 + #define ASLE_BCLP_VALID (1<<31) 123 + #define ASLE_BCLP_MSK (~(1<<31)) 124 + 125 + /* ASLE panel fitting request */ 126 + #define ASLE_PFIT_VALID (1<<31) 127 + #define ASLE_PFIT_CENTER (1<<0) 128 + #define ASLE_PFIT_STRETCH_TEXT (1<<1) 129 + #define ASLE_PFIT_STRETCH_GFX (1<<2) 130 + 131 + /* PWM frequency and minimum brightness */ 132 + #define ASLE_PFMB_BRIGHTNESS_MASK (0xff) 133 + #define ASLE_PFMB_BRIGHTNESS_VALID (1<<8) 134 + #define ASLE_PFMB_PWM_MASK (0x7ffffe00) 135 + #define ASLE_PFMB_PWM_VALID (1<<31) 136 + 137 + #define ASLE_CBLV_VALID (1<<31) 138 + 139 + static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) 140 + { 141 + struct drm_i915_private *dev_priv = dev->dev_private; 142 + struct opregion_asle *asle = dev_priv->opregion.asle; 143 + u32 blc_pwm_ctl, blc_pwm_ctl2; 144 + 145 + if (!(bclp & ASLE_BCLP_VALID)) 146 + return ASLE_BACKLIGHT_FAIL; 147 + 148 + bclp &= ASLE_BCLP_MSK; 149 + if (bclp < 0 || bclp > 255) 150 + return ASLE_BACKLIGHT_FAIL; 151 + 152 + blc_pwm_ctl = I915_READ(BLC_PWM_CTL); 153 + blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK; 154 + blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2); 155 + 156 + if (blc_pwm_ctl2 & BLM_COMBINATION_MODE) 157 + pci_write_config_dword(dev->pdev, PCI_LBPC, bclp); 158 + else 159 + I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | ((bclp * 0x101)-1)); 160 + 161 + asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID; 162 + 163 + return 0; 164 + } 165 + 166 + static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi) 167 + { 168 + /* alsi is the current ALS reading in lux. 0 indicates below sensor 169 + range, 0xffff indicates above sensor range. 1-0xfffe are valid */ 170 + return 0; 171 + } 172 + 173 + static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb) 174 + { 175 + struct drm_i915_private *dev_priv = dev->dev_private; 176 + if (pfmb & ASLE_PFMB_PWM_VALID) { 177 + u32 blc_pwm_ctl = I915_READ(BLC_PWM_CTL); 178 + u32 pwm = pfmb & ASLE_PFMB_PWM_MASK; 179 + blc_pwm_ctl &= BACKLIGHT_DUTY_CYCLE_MASK; 180 + pwm = pwm >> 9; 181 + /* FIXME - what do we do with the PWM? */ 182 + } 183 + return 0; 184 + } 185 + 186 + static u32 asle_set_pfit(struct drm_device *dev, u32 pfit) 187 + { 188 + /* Panel fitting is currently controlled by the X code, so this is a 189 + noop until modesetting support works fully */ 190 + if (!(pfit & ASLE_PFIT_VALID)) 191 + return ASLE_PFIT_FAIL; 192 + return 0; 193 + } 194 + 195 + void opregion_asle_intr(struct drm_device *dev) 196 + { 197 + struct drm_i915_private *dev_priv = dev->dev_private; 198 + struct opregion_asle *asle = dev_priv->opregion.asle; 199 + u32 asle_stat = 0; 200 + u32 asle_req; 201 + 202 + if (!asle) 203 + return; 204 + 205 + asle_req = asle->aslc & ASLE_REQ_MSK; 206 + 207 + if (!asle_req) { 208 + DRM_DEBUG("non asle set request??\n"); 209 + return; 210 + } 211 + 212 + if (asle_req & ASLE_SET_ALS_ILLUM) 213 + asle_stat |= asle_set_als_illum(dev, asle->alsi); 214 + 215 + if (asle_req & ASLE_SET_BACKLIGHT) 216 + asle_stat |= asle_set_backlight(dev, asle->bclp); 217 + 218 + if (asle_req & ASLE_SET_PFIT) 219 + asle_stat |= asle_set_pfit(dev, asle->pfit); 220 + 221 + if (asle_req & ASLE_SET_PWM_FREQ) 222 + asle_stat |= asle_set_pwm_freq(dev, asle->pfmb); 223 + 224 + asle->aslc = asle_stat; 225 + } 226 + 227 + #define ASLE_ALS_EN (1<<0) 228 + #define ASLE_BLC_EN (1<<1) 229 + #define ASLE_PFIT_EN (1<<2) 230 + #define ASLE_PFMB_EN (1<<3) 231 + 232 + void opregion_enable_asle(struct drm_device *dev) 233 + { 234 + struct drm_i915_private *dev_priv = dev->dev_private; 235 + struct opregion_asle *asle = dev_priv->opregion.asle; 236 + 237 + if (asle) { 238 + u32 pipeb_stats = I915_READ(PIPEBSTAT); 239 + if (IS_MOBILE(dev)) { 240 + /* Many devices trigger events with a write to the 241 + legacy backlight controller, so we need to ensure 242 + that it's able to generate interrupts */ 243 + I915_WRITE(PIPEBSTAT, pipeb_stats |= 244 + I915_LEGACY_BLC_EVENT_ENABLE); 245 + i915_enable_irq(dev_priv, I915_ASLE_INTERRUPT | 246 + I915_DISPLAY_PIPE_B_EVENT_INTERRUPT); 247 + } else 248 + i915_enable_irq(dev_priv, I915_ASLE_INTERRUPT); 249 + 250 + asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN | 251 + ASLE_PFMB_EN; 252 + asle->ardy = 1; 253 + } 254 + } 255 + 256 + #define ACPI_EV_DISPLAY_SWITCH (1<<0) 257 + #define ACPI_EV_LID (1<<1) 258 + #define ACPI_EV_DOCK (1<<2) 259 + 260 + static struct intel_opregion *system_opregion; 261 + 262 + int intel_opregion_video_event(struct notifier_block *nb, unsigned long val, 263 + void *data) 264 + { 265 + /* The only video events relevant to opregion are 0x80. These indicate 266 + either a docking event, lid switch or display switch request. In 267 + Linux, these are handled by the dock, button and video drivers. 268 + We might want to fix the video driver to be opregion-aware in 269 + future, but right now we just indicate to the firmware that the 270 + request has been handled */ 271 + 272 + struct opregion_acpi *acpi; 273 + 274 + if (!system_opregion) 275 + return NOTIFY_DONE; 276 + 277 + acpi = system_opregion->acpi; 278 + acpi->csts = 0; 279 + 280 + return NOTIFY_OK; 281 + } 282 + 283 + static struct notifier_block intel_opregion_notifier = { 284 + .notifier_call = intel_opregion_video_event, 285 + }; 286 + 287 + int intel_opregion_init(struct drm_device *dev) 288 + { 289 + struct drm_i915_private *dev_priv = dev->dev_private; 290 + struct intel_opregion *opregion = &dev_priv->opregion; 291 + void *base; 292 + u32 asls, mboxes; 293 + int err = 0; 294 + 295 + pci_read_config_dword(dev->pdev, PCI_ASLS, &asls); 296 + DRM_DEBUG("graphic opregion physical addr: 0x%x\n", asls); 297 + if (asls == 0) { 298 + DRM_DEBUG("ACPI OpRegion not supported!\n"); 299 + return -ENOTSUPP; 300 + } 301 + 302 + base = ioremap(asls, OPREGION_SZ); 303 + if (!base) 304 + return -ENOMEM; 305 + 306 + opregion->header = base; 307 + if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) { 308 + DRM_DEBUG("opregion signature mismatch\n"); 309 + err = -EINVAL; 310 + goto err_out; 311 + } 312 + 313 + mboxes = opregion->header->mboxes; 314 + if (mboxes & MBOX_ACPI) { 315 + DRM_DEBUG("Public ACPI methods supported\n"); 316 + opregion->acpi = base + OPREGION_ACPI_OFFSET; 317 + } else { 318 + DRM_DEBUG("Public ACPI methods not supported\n"); 319 + err = -ENOTSUPP; 320 + goto err_out; 321 + } 322 + opregion->enabled = 1; 323 + 324 + if (mboxes & MBOX_SWSCI) { 325 + DRM_DEBUG("SWSCI supported\n"); 326 + opregion->swsci = base + OPREGION_SWSCI_OFFSET; 327 + } 328 + if (mboxes & MBOX_ASLE) { 329 + DRM_DEBUG("ASLE supported\n"); 330 + opregion->asle = base + OPREGION_ASLE_OFFSET; 331 + } 332 + 333 + /* Notify BIOS we are ready to handle ACPI video ext notifs. 334 + * Right now, all the events are handled by the ACPI video module. 335 + * We don't actually need to do anything with them. */ 336 + opregion->acpi->csts = 0; 337 + opregion->acpi->drdy = 1; 338 + 339 + system_opregion = opregion; 340 + register_acpi_notifier(&intel_opregion_notifier); 341 + 342 + return 0; 343 + 344 + err_out: 345 + iounmap(opregion->header); 346 + opregion->header = NULL; 347 + return err; 348 + } 349 + 350 + void intel_opregion_free(struct drm_device *dev) 351 + { 352 + struct drm_i915_private *dev_priv = dev->dev_private; 353 + struct intel_opregion *opregion = &dev_priv->opregion; 354 + 355 + if (!opregion->enabled) 356 + return; 357 + 358 + opregion->acpi->drdy = 0; 359 + 360 + system_opregion = NULL; 361 + unregister_acpi_notifier(&intel_opregion_notifier); 362 + 363 + /* just clear all opregion memory pointers now */ 364 + iounmap(opregion->header); 365 + opregion->header = NULL; 366 + opregion->acpi = NULL; 367 + opregion->swsci = NULL; 368 + opregion->asle = NULL; 369 + 370 + opregion->enabled = 0; 371 + }
+1417
drivers/gpu/drm/i915/i915_reg.h
···
··· 1 + /* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. 2 + * All Rights Reserved. 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the 6 + * "Software"), to deal in the Software without restriction, including 7 + * without limitation the rights to use, copy, modify, merge, publish, 8 + * distribute, sub license, and/or sell copies of the Software, and to 9 + * permit persons to whom the Software is furnished to do so, subject to 10 + * the following conditions: 11 + * 12 + * The above copyright notice and this permission notice (including the 13 + * next paragraph) shall be included in all copies or substantial portions 14 + * of the Software. 15 + * 16 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 17 + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 19 + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 20 + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 21 + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 22 + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 + */ 24 + 25 + #ifndef _I915_REG_H_ 26 + #define _I915_REG_H_ 27 + 28 + /* 29 + * The Bridge device's PCI config space has information about the 30 + * fb aperture size and the amount of pre-reserved memory. 31 + */ 32 + #define INTEL_GMCH_CTRL 0x52 33 + #define INTEL_GMCH_ENABLED 0x4 34 + #define INTEL_GMCH_MEM_MASK 0x1 35 + #define INTEL_GMCH_MEM_64M 0x1 36 + #define INTEL_GMCH_MEM_128M 0 37 + 38 + #define INTEL_855_GMCH_GMS_MASK (0x7 << 4) 39 + #define INTEL_855_GMCH_GMS_DISABLED (0x0 << 4) 40 + #define INTEL_855_GMCH_GMS_STOLEN_1M (0x1 << 4) 41 + #define INTEL_855_GMCH_GMS_STOLEN_4M (0x2 << 4) 42 + #define INTEL_855_GMCH_GMS_STOLEN_8M (0x3 << 4) 43 + #define INTEL_855_GMCH_GMS_STOLEN_16M (0x4 << 4) 44 + #define INTEL_855_GMCH_GMS_STOLEN_32M (0x5 << 4) 45 + 46 + #define INTEL_915G_GMCH_GMS_STOLEN_48M (0x6 << 4) 47 + #define INTEL_915G_GMCH_GMS_STOLEN_64M (0x7 << 4) 48 + 49 + /* PCI config space */ 50 + 51 + #define HPLLCC 0xc0 /* 855 only */ 52 + #define GC_CLOCK_CONTROL_MASK (3 << 0) 53 + #define GC_CLOCK_133_200 (0 << 0) 54 + #define GC_CLOCK_100_200 (1 << 0) 55 + #define GC_CLOCK_100_133 (2 << 0) 56 + #define GC_CLOCK_166_250 (3 << 0) 57 + #define GCFGC 0xf0 /* 915+ only */ 58 + #define GC_LOW_FREQUENCY_ENABLE (1 << 7) 59 + #define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4) 60 + #define GC_DISPLAY_CLOCK_333_MHZ (4 << 4) 61 + #define GC_DISPLAY_CLOCK_MASK (7 << 4) 62 + #define LBB 0xf4 63 + 64 + /* VGA stuff */ 65 + 66 + #define VGA_ST01_MDA 0x3ba 67 + #define VGA_ST01_CGA 0x3da 68 + 69 + #define VGA_MSR_WRITE 0x3c2 70 + #define VGA_MSR_READ 0x3cc 71 + #define VGA_MSR_MEM_EN (1<<1) 72 + #define VGA_MSR_CGA_MODE (1<<0) 73 + 74 + #define VGA_SR_INDEX 0x3c4 75 + #define VGA_SR_DATA 0x3c5 76 + 77 + #define VGA_AR_INDEX 0x3c0 78 + #define VGA_AR_VID_EN (1<<5) 79 + #define VGA_AR_DATA_WRITE 0x3c0 80 + #define VGA_AR_DATA_READ 0x3c1 81 + 82 + #define VGA_GR_INDEX 0x3ce 83 + #define VGA_GR_DATA 0x3cf 84 + /* GR05 */ 85 + #define VGA_GR_MEM_READ_MODE_SHIFT 3 86 + #define VGA_GR_MEM_READ_MODE_PLANE 1 87 + /* GR06 */ 88 + #define VGA_GR_MEM_MODE_MASK 0xc 89 + #define VGA_GR_MEM_MODE_SHIFT 2 90 + #define VGA_GR_MEM_A0000_AFFFF 0 91 + #define VGA_GR_MEM_A0000_BFFFF 1 92 + #define VGA_GR_MEM_B0000_B7FFF 2 93 + #define VGA_GR_MEM_B0000_BFFFF 3 94 + 95 + #define VGA_DACMASK 0x3c6 96 + #define VGA_DACRX 0x3c7 97 + #define VGA_DACWX 0x3c8 98 + #define VGA_DACDATA 0x3c9 99 + 100 + #define VGA_CR_INDEX_MDA 0x3b4 101 + #define VGA_CR_DATA_MDA 0x3b5 102 + #define VGA_CR_INDEX_CGA 0x3d4 103 + #define VGA_CR_DATA_CGA 0x3d5 104 + 105 + /* 106 + * Memory interface instructions used by the kernel 107 + */ 108 + #define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags)) 109 + 110 + #define MI_NOOP MI_INSTR(0, 0) 111 + #define MI_USER_INTERRUPT MI_INSTR(0x02, 0) 112 + #define MI_WAIT_FOR_EVENT MI_INSTR(0x03, 0) 113 + #define MI_WAIT_FOR_PLANE_B_FLIP (1<<6) 114 + #define MI_WAIT_FOR_PLANE_A_FLIP (1<<2) 115 + #define MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1) 116 + #define MI_FLUSH MI_INSTR(0x04, 0) 117 + #define MI_READ_FLUSH (1 << 0) 118 + #define MI_EXE_FLUSH (1 << 1) 119 + #define MI_NO_WRITE_FLUSH (1 << 2) 120 + #define MI_SCENE_COUNT (1 << 3) /* just increment scene count */ 121 + #define MI_END_SCENE (1 << 4) /* flush binner and incr scene count */ 122 + #define MI_BATCH_BUFFER_END MI_INSTR(0x0a, 0) 123 + #define MI_REPORT_HEAD MI_INSTR(0x07, 0) 124 + #define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0) 125 + #define MI_STORE_DWORD_IMM MI_INSTR(0x20, 1) 126 + #define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */ 127 + #define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1) 128 + #define MI_STORE_DWORD_INDEX_SHIFT 2 129 + #define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1) 130 + #define MI_BATCH_BUFFER MI_INSTR(0x30, 1) 131 + #define MI_BATCH_NON_SECURE (1) 132 + #define MI_BATCH_NON_SECURE_I965 (1<<8) 133 + #define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0) 134 + 135 + /* 136 + * 3D instructions used by the kernel 137 + */ 138 + #define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags)) 139 + 140 + #define GFX_OP_RASTER_RULES ((0x3<<29)|(0x7<<24)) 141 + #define GFX_OP_SCISSOR ((0x3<<29)|(0x1c<<24)|(0x10<<19)) 142 + #define SC_UPDATE_SCISSOR (0x1<<1) 143 + #define SC_ENABLE_MASK (0x1<<0) 144 + #define SC_ENABLE (0x1<<0) 145 + #define GFX_OP_LOAD_INDIRECT ((0x3<<29)|(0x1d<<24)|(0x7<<16)) 146 + #define GFX_OP_SCISSOR_INFO ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1)) 147 + #define SCI_YMIN_MASK (0xffff<<16) 148 + #define SCI_XMIN_MASK (0xffff<<0) 149 + #define SCI_YMAX_MASK (0xffff<<16) 150 + #define SCI_XMAX_MASK (0xffff<<0) 151 + #define GFX_OP_SCISSOR_ENABLE ((0x3<<29)|(0x1c<<24)|(0x10<<19)) 152 + #define GFX_OP_SCISSOR_RECT ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1) 153 + #define GFX_OP_COLOR_FACTOR ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0) 154 + #define GFX_OP_STIPPLE ((0x3<<29)|(0x1d<<24)|(0x83<<16)) 155 + #define GFX_OP_MAP_INFO ((0x3<<29)|(0x1d<<24)|0x4) 156 + #define GFX_OP_DESTBUFFER_VARS ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0) 157 + #define GFX_OP_DESTBUFFER_INFO ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1) 158 + #define GFX_OP_DRAWRECT_INFO ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3)) 159 + #define GFX_OP_DRAWRECT_INFO_I965 ((0x7900<<16)|0x2) 160 + #define SRC_COPY_BLT_CMD ((2<<29)|(0x43<<22)|4) 161 + #define XY_SRC_COPY_BLT_CMD ((2<<29)|(0x53<<22)|6) 162 + #define XY_MONO_SRC_COPY_IMM_BLT ((2<<29)|(0x71<<22)|5) 163 + #define XY_SRC_COPY_BLT_WRITE_ALPHA (1<<21) 164 + #define XY_SRC_COPY_BLT_WRITE_RGB (1<<20) 165 + #define BLT_DEPTH_8 (0<<24) 166 + #define BLT_DEPTH_16_565 (1<<24) 167 + #define BLT_DEPTH_16_1555 (2<<24) 168 + #define BLT_DEPTH_32 (3<<24) 169 + #define BLT_ROP_GXCOPY (0xcc<<16) 170 + #define XY_SRC_COPY_BLT_SRC_TILED (1<<15) /* 965+ only */ 171 + #define XY_SRC_COPY_BLT_DST_TILED (1<<11) /* 965+ only */ 172 + #define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2) 173 + #define ASYNC_FLIP (1<<22) 174 + #define DISPLAY_PLANE_A (0<<20) 175 + #define DISPLAY_PLANE_B (1<<20) 176 + 177 + /* 178 + * Instruction and interrupt control regs 179 + */ 180 + 181 + #define PRB0_TAIL 0x02030 182 + #define PRB0_HEAD 0x02034 183 + #define PRB0_START 0x02038 184 + #define PRB0_CTL 0x0203c 185 + #define TAIL_ADDR 0x001FFFF8 186 + #define HEAD_WRAP_COUNT 0xFFE00000 187 + #define HEAD_WRAP_ONE 0x00200000 188 + #define HEAD_ADDR 0x001FFFFC 189 + #define RING_NR_PAGES 0x001FF000 190 + #define RING_REPORT_MASK 0x00000006 191 + #define RING_REPORT_64K 0x00000002 192 + #define RING_REPORT_128K 0x00000004 193 + #define RING_NO_REPORT 0x00000000 194 + #define RING_VALID_MASK 0x00000001 195 + #define RING_VALID 0x00000001 196 + #define RING_INVALID 0x00000000 197 + #define PRB1_TAIL 0x02040 /* 915+ only */ 198 + #define PRB1_HEAD 0x02044 /* 915+ only */ 199 + #define PRB1_START 0x02048 /* 915+ only */ 200 + #define PRB1_CTL 0x0204c /* 915+ only */ 201 + #define ACTHD_I965 0x02074 202 + #define HWS_PGA 0x02080 203 + #define HWS_ADDRESS_MASK 0xfffff000 204 + #define HWS_START_ADDRESS_SHIFT 4 205 + #define IPEIR 0x02088 206 + #define NOPID 0x02094 207 + #define HWSTAM 0x02098 208 + #define SCPD0 0x0209c /* 915+ only */ 209 + #define IER 0x020a0 210 + #define IIR 0x020a4 211 + #define IMR 0x020a8 212 + #define ISR 0x020ac 213 + #define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18) 214 + #define I915_DISPLAY_PORT_INTERRUPT (1<<17) 215 + #define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15) 216 + #define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1<<14) 217 + #define I915_HWB_OOM_INTERRUPT (1<<13) 218 + #define I915_SYNC_STATUS_INTERRUPT (1<<12) 219 + #define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1<<11) 220 + #define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1<<10) 221 + #define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1<<9) 222 + #define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1<<8) 223 + #define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1<<7) 224 + #define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1<<6) 225 + #define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1<<5) 226 + #define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1<<4) 227 + #define I915_DEBUG_INTERRUPT (1<<2) 228 + #define I915_USER_INTERRUPT (1<<1) 229 + #define I915_ASLE_INTERRUPT (1<<0) 230 + #define EIR 0x020b0 231 + #define EMR 0x020b4 232 + #define ESR 0x020b8 233 + #define INSTPM 0x020c0 234 + #define ACTHD 0x020c8 235 + #define FW_BLC 0x020d8 236 + #define FW_BLC_SELF 0x020e0 /* 915+ only */ 237 + #define MI_ARB_STATE 0x020e4 /* 915+ only */ 238 + #define CACHE_MODE_0 0x02120 /* 915+ only */ 239 + #define CM0_MASK_SHIFT 16 240 + #define CM0_IZ_OPT_DISABLE (1<<6) 241 + #define CM0_ZR_OPT_DISABLE (1<<5) 242 + #define CM0_DEPTH_EVICT_DISABLE (1<<4) 243 + #define CM0_COLOR_EVICT_DISABLE (1<<3) 244 + #define CM0_DEPTH_WRITE_DISABLE (1<<1) 245 + #define CM0_RC_OP_FLUSH_DISABLE (1<<0) 246 + #define GFX_FLSH_CNTL 0x02170 /* 915+ only */ 247 + 248 + /* 249 + * Framebuffer compression (915+ only) 250 + */ 251 + 252 + #define FBC_CFB_BASE 0x03200 /* 4k page aligned */ 253 + #define FBC_LL_BASE 0x03204 /* 4k page aligned */ 254 + #define FBC_CONTROL 0x03208 255 + #define FBC_CTL_EN (1<<31) 256 + #define FBC_CTL_PERIODIC (1<<30) 257 + #define FBC_CTL_INTERVAL_SHIFT (16) 258 + #define FBC_CTL_UNCOMPRESSIBLE (1<<14) 259 + #define FBC_CTL_STRIDE_SHIFT (5) 260 + #define FBC_CTL_FENCENO (1<<0) 261 + #define FBC_COMMAND 0x0320c 262 + #define FBC_CMD_COMPRESS (1<<0) 263 + #define FBC_STATUS 0x03210 264 + #define FBC_STAT_COMPRESSING (1<<31) 265 + #define FBC_STAT_COMPRESSED (1<<30) 266 + #define FBC_STAT_MODIFIED (1<<29) 267 + #define FBC_STAT_CURRENT_LINE (1<<0) 268 + #define FBC_CONTROL2 0x03214 269 + #define FBC_CTL_FENCE_DBL (0<<4) 270 + #define FBC_CTL_IDLE_IMM (0<<2) 271 + #define FBC_CTL_IDLE_FULL (1<<2) 272 + #define FBC_CTL_IDLE_LINE (2<<2) 273 + #define FBC_CTL_IDLE_DEBUG (3<<2) 274 + #define FBC_CTL_CPU_FENCE (1<<1) 275 + #define FBC_CTL_PLANEA (0<<0) 276 + #define FBC_CTL_PLANEB (1<<0) 277 + #define FBC_FENCE_OFF 0x0321b 278 + 279 + #define FBC_LL_SIZE (1536) 280 + 281 + /* 282 + * GPIO regs 283 + */ 284 + #define GPIOA 0x5010 285 + #define GPIOB 0x5014 286 + #define GPIOC 0x5018 287 + #define GPIOD 0x501c 288 + #define GPIOE 0x5020 289 + #define GPIOF 0x5024 290 + #define GPIOG 0x5028 291 + #define GPIOH 0x502c 292 + # define GPIO_CLOCK_DIR_MASK (1 << 0) 293 + # define GPIO_CLOCK_DIR_IN (0 << 1) 294 + # define GPIO_CLOCK_DIR_OUT (1 << 1) 295 + # define GPIO_CLOCK_VAL_MASK (1 << 2) 296 + # define GPIO_CLOCK_VAL_OUT (1 << 3) 297 + # define GPIO_CLOCK_VAL_IN (1 << 4) 298 + # define GPIO_CLOCK_PULLUP_DISABLE (1 << 5) 299 + # define GPIO_DATA_DIR_MASK (1 << 8) 300 + # define GPIO_DATA_DIR_IN (0 << 9) 301 + # define GPIO_DATA_DIR_OUT (1 << 9) 302 + # define GPIO_DATA_VAL_MASK (1 << 10) 303 + # define GPIO_DATA_VAL_OUT (1 << 11) 304 + # define GPIO_DATA_VAL_IN (1 << 12) 305 + # define GPIO_DATA_PULLUP_DISABLE (1 << 13) 306 + 307 + /* 308 + * Clock control & power management 309 + */ 310 + 311 + #define VGA0 0x6000 312 + #define VGA1 0x6004 313 + #define VGA_PD 0x6010 314 + #define VGA0_PD_P2_DIV_4 (1 << 7) 315 + #define VGA0_PD_P1_DIV_2 (1 << 5) 316 + #define VGA0_PD_P1_SHIFT 0 317 + #define VGA0_PD_P1_MASK (0x1f << 0) 318 + #define VGA1_PD_P2_DIV_4 (1 << 15) 319 + #define VGA1_PD_P1_DIV_2 (1 << 13) 320 + #define VGA1_PD_P1_SHIFT 8 321 + #define VGA1_PD_P1_MASK (0x1f << 8) 322 + #define DPLL_A 0x06014 323 + #define DPLL_B 0x06018 324 + #define DPLL_VCO_ENABLE (1 << 31) 325 + #define DPLL_DVO_HIGH_SPEED (1 << 30) 326 + #define DPLL_SYNCLOCK_ENABLE (1 << 29) 327 + #define DPLL_VGA_MODE_DIS (1 << 28) 328 + #define DPLLB_MODE_DAC_SERIAL (1 << 26) /* i915 */ 329 + #define DPLLB_MODE_LVDS (2 << 26) /* i915 */ 330 + #define DPLL_MODE_MASK (3 << 26) 331 + #define DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */ 332 + #define DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */ 333 + #define DPLLB_LVDS_P2_CLOCK_DIV_14 (0 << 24) /* i915 */ 334 + #define DPLLB_LVDS_P2_CLOCK_DIV_7 (1 << 24) /* i915 */ 335 + #define DPLL_P2_CLOCK_DIV_MASK 0x03000000 /* i915 */ 336 + #define DPLL_FPA01_P1_POST_DIV_MASK 0x00ff0000 /* i915 */ 337 + 338 + #define I915_FIFO_UNDERRUN_STATUS (1UL<<31) 339 + #define I915_CRC_ERROR_ENABLE (1UL<<29) 340 + #define I915_CRC_DONE_ENABLE (1UL<<28) 341 + #define I915_GMBUS_EVENT_ENABLE (1UL<<27) 342 + #define I915_VSYNC_INTERRUPT_ENABLE (1UL<<25) 343 + #define I915_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24) 344 + #define I915_DPST_EVENT_ENABLE (1UL<<23) 345 + #define I915_LEGACY_BLC_EVENT_ENABLE (1UL<<22) 346 + #define I915_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21) 347 + #define I915_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20) 348 + #define I915_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */ 349 + #define I915_VBLANK_INTERRUPT_ENABLE (1UL<<17) 350 + #define I915_OVERLAY_UPDATED_ENABLE (1UL<<16) 351 + #define I915_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) 352 + #define I915_CRC_DONE_INTERRUPT_STATUS (1UL<<12) 353 + #define I915_GMBUS_INTERRUPT_STATUS (1UL<<11) 354 + #define I915_VSYNC_INTERRUPT_STATUS (1UL<<9) 355 + #define I915_DISPLAY_LINE_COMPARE_STATUS (1UL<<8) 356 + #define I915_DPST_EVENT_STATUS (1UL<<7) 357 + #define I915_LEGACY_BLC_EVENT_STATUS (1UL<<6) 358 + #define I915_ODD_FIELD_INTERRUPT_STATUS (1UL<<5) 359 + #define I915_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4) 360 + #define I915_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ 361 + #define I915_VBLANK_INTERRUPT_STATUS (1UL<<1) 362 + #define I915_OVERLAY_UPDATED_STATUS (1UL<<0) 363 + 364 + #define SRX_INDEX 0x3c4 365 + #define SRX_DATA 0x3c5 366 + #define SR01 1 367 + #define SR01_SCREEN_OFF (1<<5) 368 + 369 + #define PPCR 0x61204 370 + #define PPCR_ON (1<<0) 371 + 372 + #define DVOB 0x61140 373 + #define DVOB_ON (1<<31) 374 + #define DVOC 0x61160 375 + #define DVOC_ON (1<<31) 376 + #define LVDS 0x61180 377 + #define LVDS_ON (1<<31) 378 + 379 + #define ADPA 0x61100 380 + #define ADPA_DPMS_MASK (~(3<<10)) 381 + #define ADPA_DPMS_ON (0<<10) 382 + #define ADPA_DPMS_SUSPEND (1<<10) 383 + #define ADPA_DPMS_STANDBY (2<<10) 384 + #define ADPA_DPMS_OFF (3<<10) 385 + 386 + #define RING_TAIL 0x00 387 + #define TAIL_ADDR 0x001FFFF8 388 + #define RING_HEAD 0x04 389 + #define HEAD_WRAP_COUNT 0xFFE00000 390 + #define HEAD_WRAP_ONE 0x00200000 391 + #define HEAD_ADDR 0x001FFFFC 392 + #define RING_START 0x08 393 + #define START_ADDR 0xFFFFF000 394 + #define RING_LEN 0x0C 395 + #define RING_NR_PAGES 0x001FF000 396 + #define RING_REPORT_MASK 0x00000006 397 + #define RING_REPORT_64K 0x00000002 398 + #define RING_REPORT_128K 0x00000004 399 + #define RING_NO_REPORT 0x00000000 400 + #define RING_VALID_MASK 0x00000001 401 + #define RING_VALID 0x00000001 402 + #define RING_INVALID 0x00000000 403 + 404 + /* Scratch pad debug 0 reg: 405 + */ 406 + #define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000 407 + /* 408 + * The i830 generation, in LVDS mode, defines P1 as the bit number set within 409 + * this field (only one bit may be set). 410 + */ 411 + #define DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS 0x003f0000 412 + #define DPLL_FPA01_P1_POST_DIV_SHIFT 16 413 + /* i830, required in DVO non-gang */ 414 + #define PLL_P2_DIVIDE_BY_4 (1 << 23) 415 + #define PLL_P1_DIVIDE_BY_TWO (1 << 21) /* i830 */ 416 + #define PLL_REF_INPUT_DREFCLK (0 << 13) 417 + #define PLL_REF_INPUT_TVCLKINA (1 << 13) /* i830 */ 418 + #define PLL_REF_INPUT_TVCLKINBC (2 << 13) /* SDVO TVCLKIN */ 419 + #define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13) 420 + #define PLL_REF_INPUT_MASK (3 << 13) 421 + #define PLL_LOAD_PULSE_PHASE_SHIFT 9 422 + /* 423 + * Parallel to Serial Load Pulse phase selection. 424 + * Selects the phase for the 10X DPLL clock for the PCIe 425 + * digital display port. The range is 4 to 13; 10 or more 426 + * is just a flip delay. The default is 6 427 + */ 428 + #define PLL_LOAD_PULSE_PHASE_MASK (0xf << PLL_LOAD_PULSE_PHASE_SHIFT) 429 + #define DISPLAY_RATE_SELECT_FPA1 (1 << 8) 430 + /* 431 + * SDVO multiplier for 945G/GM. Not used on 965. 432 + */ 433 + #define SDVO_MULTIPLIER_MASK 0x000000ff 434 + #define SDVO_MULTIPLIER_SHIFT_HIRES 4 435 + #define SDVO_MULTIPLIER_SHIFT_VGA 0 436 + #define DPLL_A_MD 0x0601c /* 965+ only */ 437 + /* 438 + * UDI pixel divider, controlling how many pixels are stuffed into a packet. 439 + * 440 + * Value is pixels minus 1. Must be set to 1 pixel for SDVO. 441 + */ 442 + #define DPLL_MD_UDI_DIVIDER_MASK 0x3f000000 443 + #define DPLL_MD_UDI_DIVIDER_SHIFT 24 444 + /* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */ 445 + #define DPLL_MD_VGA_UDI_DIVIDER_MASK 0x003f0000 446 + #define DPLL_MD_VGA_UDI_DIVIDER_SHIFT 16 447 + /* 448 + * SDVO/UDI pixel multiplier. 449 + * 450 + * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus 451 + * clock rate is 10 times the DPLL clock. At low resolution/refresh rate 452 + * modes, the bus rate would be below the limits, so SDVO allows for stuffing 453 + * dummy bytes in the datastream at an increased clock rate, with both sides of 454 + * the link knowing how many bytes are fill. 455 + * 456 + * So, for a mode with a dotclock of 65Mhz, we would want to double the clock 457 + * rate to 130Mhz to get a bus rate of 1.30Ghz. The DPLL clock rate would be 458 + * set to 130Mhz, and the SDVO multiplier set to 2x in this register and 459 + * through an SDVO command. 460 + * 461 + * This register field has values of multiplication factor minus 1, with 462 + * a maximum multiplier of 5 for SDVO. 463 + */ 464 + #define DPLL_MD_UDI_MULTIPLIER_MASK 0x00003f00 465 + #define DPLL_MD_UDI_MULTIPLIER_SHIFT 8 466 + /* 467 + * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK. 468 + * This best be set to the default value (3) or the CRT won't work. No, 469 + * I don't entirely understand what this does... 470 + */ 471 + #define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f 472 + #define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0 473 + #define DPLL_B_MD 0x06020 /* 965+ only */ 474 + #define FPA0 0x06040 475 + #define FPA1 0x06044 476 + #define FPB0 0x06048 477 + #define FPB1 0x0604c 478 + #define FP_N_DIV_MASK 0x003f0000 479 + #define FP_N_DIV_SHIFT 16 480 + #define FP_M1_DIV_MASK 0x00003f00 481 + #define FP_M1_DIV_SHIFT 8 482 + #define FP_M2_DIV_MASK 0x0000003f 483 + #define FP_M2_DIV_SHIFT 0 484 + #define DPLL_TEST 0x606c 485 + #define DPLLB_TEST_SDVO_DIV_1 (0 << 22) 486 + #define DPLLB_TEST_SDVO_DIV_2 (1 << 22) 487 + #define DPLLB_TEST_SDVO_DIV_4 (2 << 22) 488 + #define DPLLB_TEST_SDVO_DIV_MASK (3 << 22) 489 + #define DPLLB_TEST_N_BYPASS (1 << 19) 490 + #define DPLLB_TEST_M_BYPASS (1 << 18) 491 + #define DPLLB_INPUT_BUFFER_ENABLE (1 << 16) 492 + #define DPLLA_TEST_N_BYPASS (1 << 3) 493 + #define DPLLA_TEST_M_BYPASS (1 << 2) 494 + #define DPLLA_INPUT_BUFFER_ENABLE (1 << 0) 495 + #define D_STATE 0x6104 496 + #define CG_2D_DIS 0x6200 497 + #define CG_3D_DIS 0x6204 498 + 499 + /* 500 + * Palette regs 501 + */ 502 + 503 + #define PALETTE_A 0x0a000 504 + #define PALETTE_B 0x0a800 505 + 506 + /* MCH MMIO space */ 507 + 508 + /* 509 + * MCHBAR mirror. 510 + * 511 + * This mirrors the MCHBAR MMIO space whose location is determined by 512 + * device 0 function 0's pci config register 0x44 or 0x48 and matches it in 513 + * every way. It is not accessible from the CP register read instructions. 514 + * 515 + */ 516 + #define MCHBAR_MIRROR_BASE 0x10000 517 + 518 + /** 915-945 and GM965 MCH register controlling DRAM channel access */ 519 + #define DCC 0x10200 520 + #define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0) 521 + #define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0) 522 + #define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0) 523 + #define DCC_ADDRESSING_MODE_MASK (3 << 0) 524 + #define DCC_CHANNEL_XOR_DISABLE (1 << 10) 525 + 526 + /** 965 MCH register controlling DRAM channel configuration */ 527 + #define C0DRB3 0x10206 528 + #define C1DRB3 0x10606 529 + 530 + /* 531 + * Overlay regs 532 + */ 533 + 534 + #define OVADD 0x30000 535 + #define DOVSTA 0x30008 536 + #define OC_BUF (0x3<<20) 537 + #define OGAMC5 0x30010 538 + #define OGAMC4 0x30014 539 + #define OGAMC3 0x30018 540 + #define OGAMC2 0x3001c 541 + #define OGAMC1 0x30020 542 + #define OGAMC0 0x30024 543 + 544 + /* 545 + * Display engine regs 546 + */ 547 + 548 + /* Pipe A timing regs */ 549 + #define HTOTAL_A 0x60000 550 + #define HBLANK_A 0x60004 551 + #define HSYNC_A 0x60008 552 + #define VTOTAL_A 0x6000c 553 + #define VBLANK_A 0x60010 554 + #define VSYNC_A 0x60014 555 + #define PIPEASRC 0x6001c 556 + #define BCLRPAT_A 0x60020 557 + 558 + /* Pipe B timing regs */ 559 + #define HTOTAL_B 0x61000 560 + #define HBLANK_B 0x61004 561 + #define HSYNC_B 0x61008 562 + #define VTOTAL_B 0x6100c 563 + #define VBLANK_B 0x61010 564 + #define VSYNC_B 0x61014 565 + #define PIPEBSRC 0x6101c 566 + #define BCLRPAT_B 0x61020 567 + 568 + /* VGA port control */ 569 + #define ADPA 0x61100 570 + #define ADPA_DAC_ENABLE (1<<31) 571 + #define ADPA_DAC_DISABLE 0 572 + #define ADPA_PIPE_SELECT_MASK (1<<30) 573 + #define ADPA_PIPE_A_SELECT 0 574 + #define ADPA_PIPE_B_SELECT (1<<30) 575 + #define ADPA_USE_VGA_HVPOLARITY (1<<15) 576 + #define ADPA_SETS_HVPOLARITY 0 577 + #define ADPA_VSYNC_CNTL_DISABLE (1<<11) 578 + #define ADPA_VSYNC_CNTL_ENABLE 0 579 + #define ADPA_HSYNC_CNTL_DISABLE (1<<10) 580 + #define ADPA_HSYNC_CNTL_ENABLE 0 581 + #define ADPA_VSYNC_ACTIVE_HIGH (1<<4) 582 + #define ADPA_VSYNC_ACTIVE_LOW 0 583 + #define ADPA_HSYNC_ACTIVE_HIGH (1<<3) 584 + #define ADPA_HSYNC_ACTIVE_LOW 0 585 + #define ADPA_DPMS_MASK (~(3<<10)) 586 + #define ADPA_DPMS_ON (0<<10) 587 + #define ADPA_DPMS_SUSPEND (1<<10) 588 + #define ADPA_DPMS_STANDBY (2<<10) 589 + #define ADPA_DPMS_OFF (3<<10) 590 + 591 + /* Hotplug control (945+ only) */ 592 + #define PORT_HOTPLUG_EN 0x61110 593 + #define SDVOB_HOTPLUG_INT_EN (1 << 26) 594 + #define SDVOC_HOTPLUG_INT_EN (1 << 25) 595 + #define TV_HOTPLUG_INT_EN (1 << 18) 596 + #define CRT_HOTPLUG_INT_EN (1 << 9) 597 + #define CRT_HOTPLUG_FORCE_DETECT (1 << 3) 598 + 599 + #define PORT_HOTPLUG_STAT 0x61114 600 + #define CRT_HOTPLUG_INT_STATUS (1 << 11) 601 + #define TV_HOTPLUG_INT_STATUS (1 << 10) 602 + #define CRT_HOTPLUG_MONITOR_MASK (3 << 8) 603 + #define CRT_HOTPLUG_MONITOR_COLOR (3 << 8) 604 + #define CRT_HOTPLUG_MONITOR_MONO (2 << 8) 605 + #define CRT_HOTPLUG_MONITOR_NONE (0 << 8) 606 + #define SDVOC_HOTPLUG_INT_STATUS (1 << 7) 607 + #define SDVOB_HOTPLUG_INT_STATUS (1 << 6) 608 + 609 + /* SDVO port control */ 610 + #define SDVOB 0x61140 611 + #define SDVOC 0x61160 612 + #define SDVO_ENABLE (1 << 31) 613 + #define SDVO_PIPE_B_SELECT (1 << 30) 614 + #define SDVO_STALL_SELECT (1 << 29) 615 + #define SDVO_INTERRUPT_ENABLE (1 << 26) 616 + /** 617 + * 915G/GM SDVO pixel multiplier. 618 + * 619 + * Programmed value is multiplier - 1, up to 5x. 620 + * 621 + * \sa DPLL_MD_UDI_MULTIPLIER_MASK 622 + */ 623 + #define SDVO_PORT_MULTIPLY_MASK (7 << 23) 624 + #define SDVO_PORT_MULTIPLY_SHIFT 23 625 + #define SDVO_PHASE_SELECT_MASK (15 << 19) 626 + #define SDVO_PHASE_SELECT_DEFAULT (6 << 19) 627 + #define SDVO_CLOCK_OUTPUT_INVERT (1 << 18) 628 + #define SDVOC_GANG_MODE (1 << 16) 629 + #define SDVO_BORDER_ENABLE (1 << 7) 630 + #define SDVOB_PCIE_CONCURRENCY (1 << 3) 631 + #define SDVO_DETECTED (1 << 2) 632 + /* Bits to be preserved when writing */ 633 + #define SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | (1 << 26)) 634 + #define SDVOC_PRESERVE_MASK ((1 << 17) | (1 << 26)) 635 + 636 + /* DVO port control */ 637 + #define DVOA 0x61120 638 + #define DVOB 0x61140 639 + #define DVOC 0x61160 640 + #define DVO_ENABLE (1 << 31) 641 + #define DVO_PIPE_B_SELECT (1 << 30) 642 + #define DVO_PIPE_STALL_UNUSED (0 << 28) 643 + #define DVO_PIPE_STALL (1 << 28) 644 + #define DVO_PIPE_STALL_TV (2 << 28) 645 + #define DVO_PIPE_STALL_MASK (3 << 28) 646 + #define DVO_USE_VGA_SYNC (1 << 15) 647 + #define DVO_DATA_ORDER_I740 (0 << 14) 648 + #define DVO_DATA_ORDER_FP (1 << 14) 649 + #define DVO_VSYNC_DISABLE (1 << 11) 650 + #define DVO_HSYNC_DISABLE (1 << 10) 651 + #define DVO_VSYNC_TRISTATE (1 << 9) 652 + #define DVO_HSYNC_TRISTATE (1 << 8) 653 + #define DVO_BORDER_ENABLE (1 << 7) 654 + #define DVO_DATA_ORDER_GBRG (1 << 6) 655 + #define DVO_DATA_ORDER_RGGB (0 << 6) 656 + #define DVO_DATA_ORDER_GBRG_ERRATA (0 << 6) 657 + #define DVO_DATA_ORDER_RGGB_ERRATA (1 << 6) 658 + #define DVO_VSYNC_ACTIVE_HIGH (1 << 4) 659 + #define DVO_HSYNC_ACTIVE_HIGH (1 << 3) 660 + #define DVO_BLANK_ACTIVE_HIGH (1 << 2) 661 + #define DVO_OUTPUT_CSTATE_PIXELS (1 << 1) /* SDG only */ 662 + #define DVO_OUTPUT_SOURCE_SIZE_PIXELS (1 << 0) /* SDG only */ 663 + #define DVO_PRESERVE_MASK (0x7<<24) 664 + #define DVOA_SRCDIM 0x61124 665 + #define DVOB_SRCDIM 0x61144 666 + #define DVOC_SRCDIM 0x61164 667 + #define DVO_SRCDIM_HORIZONTAL_SHIFT 12 668 + #define DVO_SRCDIM_VERTICAL_SHIFT 0 669 + 670 + /* LVDS port control */ 671 + #define LVDS 0x61180 672 + /* 673 + * Enables the LVDS port. This bit must be set before DPLLs are enabled, as 674 + * the DPLL semantics change when the LVDS is assigned to that pipe. 675 + */ 676 + #define LVDS_PORT_EN (1 << 31) 677 + /* Selects pipe B for LVDS data. Must be set on pre-965. */ 678 + #define LVDS_PIPEB_SELECT (1 << 30) 679 + /* 680 + * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per 681 + * pixel. 682 + */ 683 + #define LVDS_A0A2_CLKA_POWER_MASK (3 << 8) 684 + #define LVDS_A0A2_CLKA_POWER_DOWN (0 << 8) 685 + #define LVDS_A0A2_CLKA_POWER_UP (3 << 8) 686 + /* 687 + * Controls the A3 data pair, which contains the additional LSBs for 24 bit 688 + * mode. Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be 689 + * on. 690 + */ 691 + #define LVDS_A3_POWER_MASK (3 << 6) 692 + #define LVDS_A3_POWER_DOWN (0 << 6) 693 + #define LVDS_A3_POWER_UP (3 << 6) 694 + /* 695 + * Controls the CLKB pair. This should only be set when LVDS_B0B3_POWER_UP 696 + * is set. 697 + */ 698 + #define LVDS_CLKB_POWER_MASK (3 << 4) 699 + #define LVDS_CLKB_POWER_DOWN (0 << 4) 700 + #define LVDS_CLKB_POWER_UP (3 << 4) 701 + /* 702 + * Controls the B0-B3 data pairs. This must be set to match the DPLL p2 703 + * setting for whether we are in dual-channel mode. The B3 pair will 704 + * additionally only be powered up when LVDS_A3_POWER_UP is set. 705 + */ 706 + #define LVDS_B0B3_POWER_MASK (3 << 2) 707 + #define LVDS_B0B3_POWER_DOWN (0 << 2) 708 + #define LVDS_B0B3_POWER_UP (3 << 2) 709 + 710 + /* Panel power sequencing */ 711 + #define PP_STATUS 0x61200 712 + #define PP_ON (1 << 31) 713 + /* 714 + * Indicates that all dependencies of the panel are on: 715 + * 716 + * - PLL enabled 717 + * - pipe enabled 718 + * - LVDS/DVOB/DVOC on 719 + */ 720 + #define PP_READY (1 << 30) 721 + #define PP_SEQUENCE_NONE (0 << 28) 722 + #define PP_SEQUENCE_ON (1 << 28) 723 + #define PP_SEQUENCE_OFF (2 << 28) 724 + #define PP_SEQUENCE_MASK 0x30000000 725 + #define PP_CONTROL 0x61204 726 + #define POWER_TARGET_ON (1 << 0) 727 + #define PP_ON_DELAYS 0x61208 728 + #define PP_OFF_DELAYS 0x6120c 729 + #define PP_DIVISOR 0x61210 730 + 731 + /* Panel fitting */ 732 + #define PFIT_CONTROL 0x61230 733 + #define PFIT_ENABLE (1 << 31) 734 + #define PFIT_PIPE_MASK (3 << 29) 735 + #define PFIT_PIPE_SHIFT 29 736 + #define VERT_INTERP_DISABLE (0 << 10) 737 + #define VERT_INTERP_BILINEAR (1 << 10) 738 + #define VERT_INTERP_MASK (3 << 10) 739 + #define VERT_AUTO_SCALE (1 << 9) 740 + #define HORIZ_INTERP_DISABLE (0 << 6) 741 + #define HORIZ_INTERP_BILINEAR (1 << 6) 742 + #define HORIZ_INTERP_MASK (3 << 6) 743 + #define HORIZ_AUTO_SCALE (1 << 5) 744 + #define PANEL_8TO6_DITHER_ENABLE (1 << 3) 745 + #define PFIT_PGM_RATIOS 0x61234 746 + #define PFIT_VERT_SCALE_MASK 0xfff00000 747 + #define PFIT_HORIZ_SCALE_MASK 0x0000fff0 748 + #define PFIT_AUTO_RATIOS 0x61238 749 + 750 + /* Backlight control */ 751 + #define BLC_PWM_CTL 0x61254 752 + #define BACKLIGHT_MODULATION_FREQ_SHIFT (17) 753 + #define BLC_PWM_CTL2 0x61250 /* 965+ only */ 754 + #define BLM_COMBINATION_MODE (1 << 30) 755 + /* 756 + * This is the most significant 15 bits of the number of backlight cycles in a 757 + * complete cycle of the modulated backlight control. 758 + * 759 + * The actual value is this field multiplied by two. 760 + */ 761 + #define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17) 762 + #define BLM_LEGACY_MODE (1 << 16) 763 + /* 764 + * This is the number of cycles out of the backlight modulation cycle for which 765 + * the backlight is on. 766 + * 767 + * This field must be no greater than the number of cycles in the complete 768 + * backlight modulation cycle. 769 + */ 770 + #define BACKLIGHT_DUTY_CYCLE_SHIFT (0) 771 + #define BACKLIGHT_DUTY_CYCLE_MASK (0xffff) 772 + 773 + /* TV port control */ 774 + #define TV_CTL 0x68000 775 + /** Enables the TV encoder */ 776 + # define TV_ENC_ENABLE (1 << 31) 777 + /** Sources the TV encoder input from pipe B instead of A. */ 778 + # define TV_ENC_PIPEB_SELECT (1 << 30) 779 + /** Outputs composite video (DAC A only) */ 780 + # define TV_ENC_OUTPUT_COMPOSITE (0 << 28) 781 + /** Outputs SVideo video (DAC B/C) */ 782 + # define TV_ENC_OUTPUT_SVIDEO (1 << 28) 783 + /** Outputs Component video (DAC A/B/C) */ 784 + # define TV_ENC_OUTPUT_COMPONENT (2 << 28) 785 + /** Outputs Composite and SVideo (DAC A/B/C) */ 786 + # define TV_ENC_OUTPUT_SVIDEO_COMPOSITE (3 << 28) 787 + # define TV_TRILEVEL_SYNC (1 << 21) 788 + /** Enables slow sync generation (945GM only) */ 789 + # define TV_SLOW_SYNC (1 << 20) 790 + /** Selects 4x oversampling for 480i and 576p */ 791 + # define TV_OVERSAMPLE_4X (0 << 18) 792 + /** Selects 2x oversampling for 720p and 1080i */ 793 + # define TV_OVERSAMPLE_2X (1 << 18) 794 + /** Selects no oversampling for 1080p */ 795 + # define TV_OVERSAMPLE_NONE (2 << 18) 796 + /** Selects 8x oversampling */ 797 + # define TV_OVERSAMPLE_8X (3 << 18) 798 + /** Selects progressive mode rather than interlaced */ 799 + # define TV_PROGRESSIVE (1 << 17) 800 + /** Sets the colorburst to PAL mode. Required for non-M PAL modes. */ 801 + # define TV_PAL_BURST (1 << 16) 802 + /** Field for setting delay of Y compared to C */ 803 + # define TV_YC_SKEW_MASK (7 << 12) 804 + /** Enables a fix for 480p/576p standard definition modes on the 915GM only */ 805 + # define TV_ENC_SDP_FIX (1 << 11) 806 + /** 807 + * Enables a fix for the 915GM only. 808 + * 809 + * Not sure what it does. 810 + */ 811 + # define TV_ENC_C0_FIX (1 << 10) 812 + /** Bits that must be preserved by software */ 813 + # define TV_CTL_SAVE ((3 << 8) | (3 << 6)) 814 + # define TV_FUSE_STATE_MASK (3 << 4) 815 + /** Read-only state that reports all features enabled */ 816 + # define TV_FUSE_STATE_ENABLED (0 << 4) 817 + /** Read-only state that reports that Macrovision is disabled in hardware*/ 818 + # define TV_FUSE_STATE_NO_MACROVISION (1 << 4) 819 + /** Read-only state that reports that TV-out is disabled in hardware. */ 820 + # define TV_FUSE_STATE_DISABLED (2 << 4) 821 + /** Normal operation */ 822 + # define TV_TEST_MODE_NORMAL (0 << 0) 823 + /** Encoder test pattern 1 - combo pattern */ 824 + # define TV_TEST_MODE_PATTERN_1 (1 << 0) 825 + /** Encoder test pattern 2 - full screen vertical 75% color bars */ 826 + # define TV_TEST_MODE_PATTERN_2 (2 << 0) 827 + /** Encoder test pattern 3 - full screen horizontal 75% color bars */ 828 + # define TV_TEST_MODE_PATTERN_3 (3 << 0) 829 + /** Encoder test pattern 4 - random noise */ 830 + # define TV_TEST_MODE_PATTERN_4 (4 << 0) 831 + /** Encoder test pattern 5 - linear color ramps */ 832 + # define TV_TEST_MODE_PATTERN_5 (5 << 0) 833 + /** 834 + * This test mode forces the DACs to 50% of full output. 835 + * 836 + * This is used for load detection in combination with TVDAC_SENSE_MASK 837 + */ 838 + # define TV_TEST_MODE_MONITOR_DETECT (7 << 0) 839 + # define TV_TEST_MODE_MASK (7 << 0) 840 + 841 + #define TV_DAC 0x68004 842 + /** 843 + * Reports that DAC state change logic has reported change (RO). 844 + * 845 + * This gets cleared when TV_DAC_STATE_EN is cleared 846 + */ 847 + # define TVDAC_STATE_CHG (1 << 31) 848 + # define TVDAC_SENSE_MASK (7 << 28) 849 + /** Reports that DAC A voltage is above the detect threshold */ 850 + # define TVDAC_A_SENSE (1 << 30) 851 + /** Reports that DAC B voltage is above the detect threshold */ 852 + # define TVDAC_B_SENSE (1 << 29) 853 + /** Reports that DAC C voltage is above the detect threshold */ 854 + # define TVDAC_C_SENSE (1 << 28) 855 + /** 856 + * Enables DAC state detection logic, for load-based TV detection. 857 + * 858 + * The PLL of the chosen pipe (in TV_CTL) must be running, and the encoder set 859 + * to off, for load detection to work. 860 + */ 861 + # define TVDAC_STATE_CHG_EN (1 << 27) 862 + /** Sets the DAC A sense value to high */ 863 + # define TVDAC_A_SENSE_CTL (1 << 26) 864 + /** Sets the DAC B sense value to high */ 865 + # define TVDAC_B_SENSE_CTL (1 << 25) 866 + /** Sets the DAC C sense value to high */ 867 + # define TVDAC_C_SENSE_CTL (1 << 24) 868 + /** Overrides the ENC_ENABLE and DAC voltage levels */ 869 + # define DAC_CTL_OVERRIDE (1 << 7) 870 + /** Sets the slew rate. Must be preserved in software */ 871 + # define ENC_TVDAC_SLEW_FAST (1 << 6) 872 + # define DAC_A_1_3_V (0 << 4) 873 + # define DAC_A_1_1_V (1 << 4) 874 + # define DAC_A_0_7_V (2 << 4) 875 + # define DAC_A_OFF (3 << 4) 876 + # define DAC_B_1_3_V (0 << 2) 877 + # define DAC_B_1_1_V (1 << 2) 878 + # define DAC_B_0_7_V (2 << 2) 879 + # define DAC_B_OFF (3 << 2) 880 + # define DAC_C_1_3_V (0 << 0) 881 + # define DAC_C_1_1_V (1 << 0) 882 + # define DAC_C_0_7_V (2 << 0) 883 + # define DAC_C_OFF (3 << 0) 884 + 885 + /** 886 + * CSC coefficients are stored in a floating point format with 9 bits of 887 + * mantissa and 2 or 3 bits of exponent. The exponent is represented as 2**-n, 888 + * where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with 889 + * -1 (0x3) being the only legal negative value. 890 + */ 891 + #define TV_CSC_Y 0x68010 892 + # define TV_RY_MASK 0x07ff0000 893 + # define TV_RY_SHIFT 16 894 + # define TV_GY_MASK 0x00000fff 895 + # define TV_GY_SHIFT 0 896 + 897 + #define TV_CSC_Y2 0x68014 898 + # define TV_BY_MASK 0x07ff0000 899 + # define TV_BY_SHIFT 16 900 + /** 901 + * Y attenuation for component video. 902 + * 903 + * Stored in 1.9 fixed point. 904 + */ 905 + # define TV_AY_MASK 0x000003ff 906 + # define TV_AY_SHIFT 0 907 + 908 + #define TV_CSC_U 0x68018 909 + # define TV_RU_MASK 0x07ff0000 910 + # define TV_RU_SHIFT 16 911 + # define TV_GU_MASK 0x000007ff 912 + # define TV_GU_SHIFT 0 913 + 914 + #define TV_CSC_U2 0x6801c 915 + # define TV_BU_MASK 0x07ff0000 916 + # define TV_BU_SHIFT 16 917 + /** 918 + * U attenuation for component video. 919 + * 920 + * Stored in 1.9 fixed point. 921 + */ 922 + # define TV_AU_MASK 0x000003ff 923 + # define TV_AU_SHIFT 0 924 + 925 + #define TV_CSC_V 0x68020 926 + # define TV_RV_MASK 0x0fff0000 927 + # define TV_RV_SHIFT 16 928 + # define TV_GV_MASK 0x000007ff 929 + # define TV_GV_SHIFT 0 930 + 931 + #define TV_CSC_V2 0x68024 932 + # define TV_BV_MASK 0x07ff0000 933 + # define TV_BV_SHIFT 16 934 + /** 935 + * V attenuation for component video. 936 + * 937 + * Stored in 1.9 fixed point. 938 + */ 939 + # define TV_AV_MASK 0x000007ff 940 + # define TV_AV_SHIFT 0 941 + 942 + #define TV_CLR_KNOBS 0x68028 943 + /** 2s-complement brightness adjustment */ 944 + # define TV_BRIGHTNESS_MASK 0xff000000 945 + # define TV_BRIGHTNESS_SHIFT 24 946 + /** Contrast adjustment, as a 2.6 unsigned floating point number */ 947 + # define TV_CONTRAST_MASK 0x00ff0000 948 + # define TV_CONTRAST_SHIFT 16 949 + /** Saturation adjustment, as a 2.6 unsigned floating point number */ 950 + # define TV_SATURATION_MASK 0x0000ff00 951 + # define TV_SATURATION_SHIFT 8 952 + /** Hue adjustment, as an integer phase angle in degrees */ 953 + # define TV_HUE_MASK 0x000000ff 954 + # define TV_HUE_SHIFT 0 955 + 956 + #define TV_CLR_LEVEL 0x6802c 957 + /** Controls the DAC level for black */ 958 + # define TV_BLACK_LEVEL_MASK 0x01ff0000 959 + # define TV_BLACK_LEVEL_SHIFT 16 960 + /** Controls the DAC level for blanking */ 961 + # define TV_BLANK_LEVEL_MASK 0x000001ff 962 + # define TV_BLANK_LEVEL_SHIFT 0 963 + 964 + #define TV_H_CTL_1 0x68030 965 + /** Number of pixels in the hsync. */ 966 + # define TV_HSYNC_END_MASK 0x1fff0000 967 + # define TV_HSYNC_END_SHIFT 16 968 + /** Total number of pixels minus one in the line (display and blanking). */ 969 + # define TV_HTOTAL_MASK 0x00001fff 970 + # define TV_HTOTAL_SHIFT 0 971 + 972 + #define TV_H_CTL_2 0x68034 973 + /** Enables the colorburst (needed for non-component color) */ 974 + # define TV_BURST_ENA (1 << 31) 975 + /** Offset of the colorburst from the start of hsync, in pixels minus one. */ 976 + # define TV_HBURST_START_SHIFT 16 977 + # define TV_HBURST_START_MASK 0x1fff0000 978 + /** Length of the colorburst */ 979 + # define TV_HBURST_LEN_SHIFT 0 980 + # define TV_HBURST_LEN_MASK 0x0001fff 981 + 982 + #define TV_H_CTL_3 0x68038 983 + /** End of hblank, measured in pixels minus one from start of hsync */ 984 + # define TV_HBLANK_END_SHIFT 16 985 + # define TV_HBLANK_END_MASK 0x1fff0000 986 + /** Start of hblank, measured in pixels minus one from start of hsync */ 987 + # define TV_HBLANK_START_SHIFT 0 988 + # define TV_HBLANK_START_MASK 0x0001fff 989 + 990 + #define TV_V_CTL_1 0x6803c 991 + /** XXX */ 992 + # define TV_NBR_END_SHIFT 16 993 + # define TV_NBR_END_MASK 0x07ff0000 994 + /** XXX */ 995 + # define TV_VI_END_F1_SHIFT 8 996 + # define TV_VI_END_F1_MASK 0x00003f00 997 + /** XXX */ 998 + # define TV_VI_END_F2_SHIFT 0 999 + # define TV_VI_END_F2_MASK 0x0000003f 1000 + 1001 + #define TV_V_CTL_2 0x68040 1002 + /** Length of vsync, in half lines */ 1003 + # define TV_VSYNC_LEN_MASK 0x07ff0000 1004 + # define TV_VSYNC_LEN_SHIFT 16 1005 + /** Offset of the start of vsync in field 1, measured in one less than the 1006 + * number of half lines. 1007 + */ 1008 + # define TV_VSYNC_START_F1_MASK 0x00007f00 1009 + # define TV_VSYNC_START_F1_SHIFT 8 1010 + /** 1011 + * Offset of the start of vsync in field 2, measured in one less than the 1012 + * number of half lines. 1013 + */ 1014 + # define TV_VSYNC_START_F2_MASK 0x0000007f 1015 + # define TV_VSYNC_START_F2_SHIFT 0 1016 + 1017 + #define TV_V_CTL_3 0x68044 1018 + /** Enables generation of the equalization signal */ 1019 + # define TV_EQUAL_ENA (1 << 31) 1020 + /** Length of vsync, in half lines */ 1021 + # define TV_VEQ_LEN_MASK 0x007f0000 1022 + # define TV_VEQ_LEN_SHIFT 16 1023 + /** Offset of the start of equalization in field 1, measured in one less than 1024 + * the number of half lines. 1025 + */ 1026 + # define TV_VEQ_START_F1_MASK 0x0007f00 1027 + # define TV_VEQ_START_F1_SHIFT 8 1028 + /** 1029 + * Offset of the start of equalization in field 2, measured in one less than 1030 + * the number of half lines. 1031 + */ 1032 + # define TV_VEQ_START_F2_MASK 0x000007f 1033 + # define TV_VEQ_START_F2_SHIFT 0 1034 + 1035 + #define TV_V_CTL_4 0x68048 1036 + /** 1037 + * Offset to start of vertical colorburst, measured in one less than the 1038 + * number of lines from vertical start. 1039 + */ 1040 + # define TV_VBURST_START_F1_MASK 0x003f0000 1041 + # define TV_VBURST_START_F1_SHIFT 16 1042 + /** 1043 + * Offset to the end of vertical colorburst, measured in one less than the 1044 + * number of lines from the start of NBR. 1045 + */ 1046 + # define TV_VBURST_END_F1_MASK 0x000000ff 1047 + # define TV_VBURST_END_F1_SHIFT 0 1048 + 1049 + #define TV_V_CTL_5 0x6804c 1050 + /** 1051 + * Offset to start of vertical colorburst, measured in one less than the 1052 + * number of lines from vertical start. 1053 + */ 1054 + # define TV_VBURST_START_F2_MASK 0x003f0000 1055 + # define TV_VBURST_START_F2_SHIFT 16 1056 + /** 1057 + * Offset to the end of vertical colorburst, measured in one less than the 1058 + * number of lines from the start of NBR. 1059 + */ 1060 + # define TV_VBURST_END_F2_MASK 0x000000ff 1061 + # define TV_VBURST_END_F2_SHIFT 0 1062 + 1063 + #define TV_V_CTL_6 0x68050 1064 + /** 1065 + * Offset to start of vertical colorburst, measured in one less than the 1066 + * number of lines from vertical start. 1067 + */ 1068 + # define TV_VBURST_START_F3_MASK 0x003f0000 1069 + # define TV_VBURST_START_F3_SHIFT 16 1070 + /** 1071 + * Offset to the end of vertical colorburst, measured in one less than the 1072 + * number of lines from the start of NBR. 1073 + */ 1074 + # define TV_VBURST_END_F3_MASK 0x000000ff 1075 + # define TV_VBURST_END_F3_SHIFT 0 1076 + 1077 + #define TV_V_CTL_7 0x68054 1078 + /** 1079 + * Offset to start of vertical colorburst, measured in one less than the 1080 + * number of lines from vertical start. 1081 + */ 1082 + # define TV_VBURST_START_F4_MASK 0x003f0000 1083 + # define TV_VBURST_START_F4_SHIFT 16 1084 + /** 1085 + * Offset to the end of vertical colorburst, measured in one less than the 1086 + * number of lines from the start of NBR. 1087 + */ 1088 + # define TV_VBURST_END_F4_MASK 0x000000ff 1089 + # define TV_VBURST_END_F4_SHIFT 0 1090 + 1091 + #define TV_SC_CTL_1 0x68060 1092 + /** Turns on the first subcarrier phase generation DDA */ 1093 + # define TV_SC_DDA1_EN (1 << 31) 1094 + /** Turns on the first subcarrier phase generation DDA */ 1095 + # define TV_SC_DDA2_EN (1 << 30) 1096 + /** Turns on the first subcarrier phase generation DDA */ 1097 + # define TV_SC_DDA3_EN (1 << 29) 1098 + /** Sets the subcarrier DDA to reset frequency every other field */ 1099 + # define TV_SC_RESET_EVERY_2 (0 << 24) 1100 + /** Sets the subcarrier DDA to reset frequency every fourth field */ 1101 + # define TV_SC_RESET_EVERY_4 (1 << 24) 1102 + /** Sets the subcarrier DDA to reset frequency every eighth field */ 1103 + # define TV_SC_RESET_EVERY_8 (2 << 24) 1104 + /** Sets the subcarrier DDA to never reset the frequency */ 1105 + # define TV_SC_RESET_NEVER (3 << 24) 1106 + /** Sets the peak amplitude of the colorburst.*/ 1107 + # define TV_BURST_LEVEL_MASK 0x00ff0000 1108 + # define TV_BURST_LEVEL_SHIFT 16 1109 + /** Sets the increment of the first subcarrier phase generation DDA */ 1110 + # define TV_SCDDA1_INC_MASK 0x00000fff 1111 + # define TV_SCDDA1_INC_SHIFT 0 1112 + 1113 + #define TV_SC_CTL_2 0x68064 1114 + /** Sets the rollover for the second subcarrier phase generation DDA */ 1115 + # define TV_SCDDA2_SIZE_MASK 0x7fff0000 1116 + # define TV_SCDDA2_SIZE_SHIFT 16 1117 + /** Sets the increent of the second subcarrier phase generation DDA */ 1118 + # define TV_SCDDA2_INC_MASK 0x00007fff 1119 + # define TV_SCDDA2_INC_SHIFT 0 1120 + 1121 + #define TV_SC_CTL_3 0x68068 1122 + /** Sets the rollover for the third subcarrier phase generation DDA */ 1123 + # define TV_SCDDA3_SIZE_MASK 0x7fff0000 1124 + # define TV_SCDDA3_SIZE_SHIFT 16 1125 + /** Sets the increent of the third subcarrier phase generation DDA */ 1126 + # define TV_SCDDA3_INC_MASK 0x00007fff 1127 + # define TV_SCDDA3_INC_SHIFT 0 1128 + 1129 + #define TV_WIN_POS 0x68070 1130 + /** X coordinate of the display from the start of horizontal active */ 1131 + # define TV_XPOS_MASK 0x1fff0000 1132 + # define TV_XPOS_SHIFT 16 1133 + /** Y coordinate of the display from the start of vertical active (NBR) */ 1134 + # define TV_YPOS_MASK 0x00000fff 1135 + # define TV_YPOS_SHIFT 0 1136 + 1137 + #define TV_WIN_SIZE 0x68074 1138 + /** Horizontal size of the display window, measured in pixels*/ 1139 + # define TV_XSIZE_MASK 0x1fff0000 1140 + # define TV_XSIZE_SHIFT 16 1141 + /** 1142 + * Vertical size of the display window, measured in pixels. 1143 + * 1144 + * Must be even for interlaced modes. 1145 + */ 1146 + # define TV_YSIZE_MASK 0x00000fff 1147 + # define TV_YSIZE_SHIFT 0 1148 + 1149 + #define TV_FILTER_CTL_1 0x68080 1150 + /** 1151 + * Enables automatic scaling calculation. 1152 + * 1153 + * If set, the rest of the registers are ignored, and the calculated values can 1154 + * be read back from the register. 1155 + */ 1156 + # define TV_AUTO_SCALE (1 << 31) 1157 + /** 1158 + * Disables the vertical filter. 1159 + * 1160 + * This is required on modes more than 1024 pixels wide */ 1161 + # define TV_V_FILTER_BYPASS (1 << 29) 1162 + /** Enables adaptive vertical filtering */ 1163 + # define TV_VADAPT (1 << 28) 1164 + # define TV_VADAPT_MODE_MASK (3 << 26) 1165 + /** Selects the least adaptive vertical filtering mode */ 1166 + # define TV_VADAPT_MODE_LEAST (0 << 26) 1167 + /** Selects the moderately adaptive vertical filtering mode */ 1168 + # define TV_VADAPT_MODE_MODERATE (1 << 26) 1169 + /** Selects the most adaptive vertical filtering mode */ 1170 + # define TV_VADAPT_MODE_MOST (3 << 26) 1171 + /** 1172 + * Sets the horizontal scaling factor. 1173 + * 1174 + * This should be the fractional part of the horizontal scaling factor divided 1175 + * by the oversampling rate. TV_HSCALE should be less than 1, and set to: 1176 + * 1177 + * (src width - 1) / ((oversample * dest width) - 1) 1178 + */ 1179 + # define TV_HSCALE_FRAC_MASK 0x00003fff 1180 + # define TV_HSCALE_FRAC_SHIFT 0 1181 + 1182 + #define TV_FILTER_CTL_2 0x68084 1183 + /** 1184 + * Sets the integer part of the 3.15 fixed-point vertical scaling factor. 1185 + * 1186 + * TV_VSCALE should be (src height - 1) / ((interlace * dest height) - 1) 1187 + */ 1188 + # define TV_VSCALE_INT_MASK 0x00038000 1189 + # define TV_VSCALE_INT_SHIFT 15 1190 + /** 1191 + * Sets the fractional part of the 3.15 fixed-point vertical scaling factor. 1192 + * 1193 + * \sa TV_VSCALE_INT_MASK 1194 + */ 1195 + # define TV_VSCALE_FRAC_MASK 0x00007fff 1196 + # define TV_VSCALE_FRAC_SHIFT 0 1197 + 1198 + #define TV_FILTER_CTL_3 0x68088 1199 + /** 1200 + * Sets the integer part of the 3.15 fixed-point vertical scaling factor. 1201 + * 1202 + * TV_VSCALE should be (src height - 1) / (1/4 * (dest height - 1)) 1203 + * 1204 + * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes. 1205 + */ 1206 + # define TV_VSCALE_IP_INT_MASK 0x00038000 1207 + # define TV_VSCALE_IP_INT_SHIFT 15 1208 + /** 1209 + * Sets the fractional part of the 3.15 fixed-point vertical scaling factor. 1210 + * 1211 + * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes. 1212 + * 1213 + * \sa TV_VSCALE_IP_INT_MASK 1214 + */ 1215 + # define TV_VSCALE_IP_FRAC_MASK 0x00007fff 1216 + # define TV_VSCALE_IP_FRAC_SHIFT 0 1217 + 1218 + #define TV_CC_CONTROL 0x68090 1219 + # define TV_CC_ENABLE (1 << 31) 1220 + /** 1221 + * Specifies which field to send the CC data in. 1222 + * 1223 + * CC data is usually sent in field 0. 1224 + */ 1225 + # define TV_CC_FID_MASK (1 << 27) 1226 + # define TV_CC_FID_SHIFT 27 1227 + /** Sets the horizontal position of the CC data. Usually 135. */ 1228 + # define TV_CC_HOFF_MASK 0x03ff0000 1229 + # define TV_CC_HOFF_SHIFT 16 1230 + /** Sets the vertical position of the CC data. Usually 21 */ 1231 + # define TV_CC_LINE_MASK 0x0000003f 1232 + # define TV_CC_LINE_SHIFT 0 1233 + 1234 + #define TV_CC_DATA 0x68094 1235 + # define TV_CC_RDY (1 << 31) 1236 + /** Second word of CC data to be transmitted. */ 1237 + # define TV_CC_DATA_2_MASK 0x007f0000 1238 + # define TV_CC_DATA_2_SHIFT 16 1239 + /** First word of CC data to be transmitted. */ 1240 + # define TV_CC_DATA_1_MASK 0x0000007f 1241 + # define TV_CC_DATA_1_SHIFT 0 1242 + 1243 + #define TV_H_LUMA_0 0x68100 1244 + #define TV_H_LUMA_59 0x681ec 1245 + #define TV_H_CHROMA_0 0x68200 1246 + #define TV_H_CHROMA_59 0x682ec 1247 + #define TV_V_LUMA_0 0x68300 1248 + #define TV_V_LUMA_42 0x683a8 1249 + #define TV_V_CHROMA_0 0x68400 1250 + #define TV_V_CHROMA_42 0x684a8 1251 + 1252 + /* Display & cursor control */ 1253 + 1254 + /* Pipe A */ 1255 + #define PIPEADSL 0x70000 1256 + #define PIPEACONF 0x70008 1257 + #define PIPEACONF_ENABLE (1<<31) 1258 + #define PIPEACONF_DISABLE 0 1259 + #define PIPEACONF_DOUBLE_WIDE (1<<30) 1260 + #define I965_PIPECONF_ACTIVE (1<<30) 1261 + #define PIPEACONF_SINGLE_WIDE 0 1262 + #define PIPEACONF_PIPE_UNLOCKED 0 1263 + #define PIPEACONF_PIPE_LOCKED (1<<25) 1264 + #define PIPEACONF_PALETTE 0 1265 + #define PIPEACONF_GAMMA (1<<24) 1266 + #define PIPECONF_FORCE_BORDER (1<<25) 1267 + #define PIPECONF_PROGRESSIVE (0 << 21) 1268 + #define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21) 1269 + #define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21) 1270 + #define PIPEASTAT 0x70024 1271 + #define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31) 1272 + #define PIPE_CRC_ERROR_ENABLE (1UL<<29) 1273 + #define PIPE_CRC_DONE_ENABLE (1UL<<28) 1274 + #define PIPE_GMBUS_EVENT_ENABLE (1UL<<27) 1275 + #define PIPE_HOTPLUG_INTERRUPT_ENABLE (1UL<<26) 1276 + #define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25) 1277 + #define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24) 1278 + #define PIPE_DPST_EVENT_ENABLE (1UL<<23) 1279 + #define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22) 1280 + #define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21) 1281 + #define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20) 1282 + #define PIPE_HOTPLUG_TV_INTERRUPT_ENABLE (1UL<<18) /* pre-965 */ 1283 + #define PIPE_START_VBLANK_INTERRUPT_ENABLE (1UL<<18) /* 965 or later */ 1284 + #define PIPE_VBLANK_INTERRUPT_ENABLE (1UL<<17) 1285 + #define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16) 1286 + #define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13) 1287 + #define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12) 1288 + #define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11) 1289 + #define PIPE_HOTPLUG_INTERRUPT_STATUS (1UL<<10) 1290 + #define PIPE_VSYNC_INTERRUPT_STATUS (1UL<<9) 1291 + #define PIPE_DISPLAY_LINE_COMPARE_STATUS (1UL<<8) 1292 + #define PIPE_DPST_EVENT_STATUS (1UL<<7) 1293 + #define PIPE_LEGACY_BLC_EVENT_STATUS (1UL<<6) 1294 + #define PIPE_ODD_FIELD_INTERRUPT_STATUS (1UL<<5) 1295 + #define PIPE_EVEN_FIELD_INTERRUPT_STATUS (1UL<<4) 1296 + #define PIPE_HOTPLUG_TV_INTERRUPT_STATUS (1UL<<2) /* pre-965 */ 1297 + #define PIPE_START_VBLANK_INTERRUPT_STATUS (1UL<<2) /* 965 or later */ 1298 + #define PIPE_VBLANK_INTERRUPT_STATUS (1UL<<1) 1299 + #define PIPE_OVERLAY_UPDATED_STATUS (1UL<<0) 1300 + 1301 + #define DSPARB 0x70030 1302 + #define DSPARB_CSTART_MASK (0x7f << 7) 1303 + #define DSPARB_CSTART_SHIFT 7 1304 + #define DSPARB_BSTART_MASK (0x7f) 1305 + #define DSPARB_BSTART_SHIFT 0 1306 + /* 1307 + * The two pipe frame counter registers are not synchronized, so 1308 + * reading a stable value is somewhat tricky. The following code 1309 + * should work: 1310 + * 1311 + * do { 1312 + * high1 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >> 1313 + * PIPE_FRAME_HIGH_SHIFT; 1314 + * low1 = ((INREG(PIPEAFRAMEPIXEL) & PIPE_FRAME_LOW_MASK) >> 1315 + * PIPE_FRAME_LOW_SHIFT); 1316 + * high2 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >> 1317 + * PIPE_FRAME_HIGH_SHIFT); 1318 + * } while (high1 != high2); 1319 + * frame = (high1 << 8) | low1; 1320 + */ 1321 + #define PIPEAFRAMEHIGH 0x70040 1322 + #define PIPE_FRAME_HIGH_MASK 0x0000ffff 1323 + #define PIPE_FRAME_HIGH_SHIFT 0 1324 + #define PIPEAFRAMEPIXEL 0x70044 1325 + #define PIPE_FRAME_LOW_MASK 0xff000000 1326 + #define PIPE_FRAME_LOW_SHIFT 24 1327 + #define PIPE_PIXEL_MASK 0x00ffffff 1328 + #define PIPE_PIXEL_SHIFT 0 1329 + 1330 + /* Cursor A & B regs */ 1331 + #define CURACNTR 0x70080 1332 + #define CURSOR_MODE_DISABLE 0x00 1333 + #define CURSOR_MODE_64_32B_AX 0x07 1334 + #define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX) 1335 + #define MCURSOR_GAMMA_ENABLE (1 << 26) 1336 + #define CURABASE 0x70084 1337 + #define CURAPOS 0x70088 1338 + #define CURSOR_POS_MASK 0x007FF 1339 + #define CURSOR_POS_SIGN 0x8000 1340 + #define CURSOR_X_SHIFT 0 1341 + #define CURSOR_Y_SHIFT 16 1342 + #define CURBCNTR 0x700c0 1343 + #define CURBBASE 0x700c4 1344 + #define CURBPOS 0x700c8 1345 + 1346 + /* Display A control */ 1347 + #define DSPACNTR 0x70180 1348 + #define DISPLAY_PLANE_ENABLE (1<<31) 1349 + #define DISPLAY_PLANE_DISABLE 0 1350 + #define DISPPLANE_GAMMA_ENABLE (1<<30) 1351 + #define DISPPLANE_GAMMA_DISABLE 0 1352 + #define DISPPLANE_PIXFORMAT_MASK (0xf<<26) 1353 + #define DISPPLANE_8BPP (0x2<<26) 1354 + #define DISPPLANE_15_16BPP (0x4<<26) 1355 + #define DISPPLANE_16BPP (0x5<<26) 1356 + #define DISPPLANE_32BPP_NO_ALPHA (0x6<<26) 1357 + #define DISPPLANE_32BPP (0x7<<26) 1358 + #define DISPPLANE_STEREO_ENABLE (1<<25) 1359 + #define DISPPLANE_STEREO_DISABLE 0 1360 + #define DISPPLANE_SEL_PIPE_MASK (1<<24) 1361 + #define DISPPLANE_SEL_PIPE_A 0 1362 + #define DISPPLANE_SEL_PIPE_B (1<<24) 1363 + #define DISPPLANE_SRC_KEY_ENABLE (1<<22) 1364 + #define DISPPLANE_SRC_KEY_DISABLE 0 1365 + #define DISPPLANE_LINE_DOUBLE (1<<20) 1366 + #define DISPPLANE_NO_LINE_DOUBLE 0 1367 + #define DISPPLANE_STEREO_POLARITY_FIRST 0 1368 + #define DISPPLANE_STEREO_POLARITY_SECOND (1<<18) 1369 + #define DSPAADDR 0x70184 1370 + #define DSPASTRIDE 0x70188 1371 + #define DSPAPOS 0x7018C /* reserved */ 1372 + #define DSPASIZE 0x70190 1373 + #define DSPASURF 0x7019C /* 965+ only */ 1374 + #define DSPATILEOFF 0x701A4 /* 965+ only */ 1375 + 1376 + /* VBIOS flags */ 1377 + #define SWF00 0x71410 1378 + #define SWF01 0x71414 1379 + #define SWF02 0x71418 1380 + #define SWF03 0x7141c 1381 + #define SWF04 0x71420 1382 + #define SWF05 0x71424 1383 + #define SWF06 0x71428 1384 + #define SWF10 0x70410 1385 + #define SWF11 0x70414 1386 + #define SWF14 0x71420 1387 + #define SWF30 0x72414 1388 + #define SWF31 0x72418 1389 + #define SWF32 0x7241c 1390 + 1391 + /* Pipe B */ 1392 + #define PIPEBDSL 0x71000 1393 + #define PIPEBCONF 0x71008 1394 + #define PIPEBSTAT 0x71024 1395 + #define PIPEBFRAMEHIGH 0x71040 1396 + #define PIPEBFRAMEPIXEL 0x71044 1397 + 1398 + /* Display B control */ 1399 + #define DSPBCNTR 0x71180 1400 + #define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15) 1401 + #define DISPPLANE_ALPHA_TRANS_DISABLE 0 1402 + #define DISPPLANE_SPRITE_ABOVE_DISPLAY 0 1403 + #define DISPPLANE_SPRITE_ABOVE_OVERLAY (1) 1404 + #define DSPBADDR 0x71184 1405 + #define DSPBSTRIDE 0x71188 1406 + #define DSPBPOS 0x7118C 1407 + #define DSPBSIZE 0x71190 1408 + #define DSPBSURF 0x7119C 1409 + #define DSPBTILEOFF 0x711A4 1410 + 1411 + /* VBIOS regs */ 1412 + #define VGACNTRL 0x71400 1413 + # define VGA_DISP_DISABLE (1 << 31) 1414 + # define VGA_2X_MODE (1 << 30) 1415 + # define VGA_PIPE_B_SELECT (1 << 29) 1416 + 1417 + #endif /* _I915_REG_H_ */
+509
drivers/gpu/drm/i915/i915_suspend.c
···
··· 1 + /* 2 + * 3 + * Copyright 2008 (c) Intel Corporation 4 + * Jesse Barnes <jbarnes@virtuousgeek.org> 5 + * 6 + * Permission is hereby granted, free of charge, to any person obtaining a 7 + * copy of this software and associated documentation files (the 8 + * "Software"), to deal in the Software without restriction, including 9 + * without limitation the rights to use, copy, modify, merge, publish, 10 + * distribute, sub license, and/or sell copies of the Software, and to 11 + * permit persons to whom the Software is furnished to do so, subject to 12 + * the following conditions: 13 + * 14 + * The above copyright notice and this permission notice (including the 15 + * next paragraph) shall be included in all copies or substantial portions 16 + * of the Software. 17 + * 18 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 19 + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 20 + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. 21 + * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR 22 + * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 23 + * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 24 + * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 25 + */ 26 + 27 + #include "drmP.h" 28 + #include "drm.h" 29 + #include "i915_drm.h" 30 + #include "i915_drv.h" 31 + 32 + static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe) 33 + { 34 + struct drm_i915_private *dev_priv = dev->dev_private; 35 + 36 + if (pipe == PIPE_A) 37 + return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE); 38 + else 39 + return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE); 40 + } 41 + 42 + static void i915_save_palette(struct drm_device *dev, enum pipe pipe) 43 + { 44 + struct drm_i915_private *dev_priv = dev->dev_private; 45 + unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B); 46 + u32 *array; 47 + int i; 48 + 49 + if (!i915_pipe_enabled(dev, pipe)) 50 + return; 51 + 52 + if (pipe == PIPE_A) 53 + array = dev_priv->save_palette_a; 54 + else 55 + array = dev_priv->save_palette_b; 56 + 57 + for(i = 0; i < 256; i++) 58 + array[i] = I915_READ(reg + (i << 2)); 59 + } 60 + 61 + static void i915_restore_palette(struct drm_device *dev, enum pipe pipe) 62 + { 63 + struct drm_i915_private *dev_priv = dev->dev_private; 64 + unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B); 65 + u32 *array; 66 + int i; 67 + 68 + if (!i915_pipe_enabled(dev, pipe)) 69 + return; 70 + 71 + if (pipe == PIPE_A) 72 + array = dev_priv->save_palette_a; 73 + else 74 + array = dev_priv->save_palette_b; 75 + 76 + for(i = 0; i < 256; i++) 77 + I915_WRITE(reg + (i << 2), array[i]); 78 + } 79 + 80 + static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg) 81 + { 82 + struct drm_i915_private *dev_priv = dev->dev_private; 83 + 84 + I915_WRITE8(index_port, reg); 85 + return I915_READ8(data_port); 86 + } 87 + 88 + static u8 i915_read_ar(struct drm_device *dev, u16 st01, u8 reg, u16 palette_enable) 89 + { 90 + struct drm_i915_private *dev_priv = dev->dev_private; 91 + 92 + I915_READ8(st01); 93 + I915_WRITE8(VGA_AR_INDEX, palette_enable | reg); 94 + return I915_READ8(VGA_AR_DATA_READ); 95 + } 96 + 97 + static void i915_write_ar(struct drm_device *dev, u16 st01, u8 reg, u8 val, u16 palette_enable) 98 + { 99 + struct drm_i915_private *dev_priv = dev->dev_private; 100 + 101 + I915_READ8(st01); 102 + I915_WRITE8(VGA_AR_INDEX, palette_enable | reg); 103 + I915_WRITE8(VGA_AR_DATA_WRITE, val); 104 + } 105 + 106 + static void i915_write_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg, u8 val) 107 + { 108 + struct drm_i915_private *dev_priv = dev->dev_private; 109 + 110 + I915_WRITE8(index_port, reg); 111 + I915_WRITE8(data_port, val); 112 + } 113 + 114 + static void i915_save_vga(struct drm_device *dev) 115 + { 116 + struct drm_i915_private *dev_priv = dev->dev_private; 117 + int i; 118 + u16 cr_index, cr_data, st01; 119 + 120 + /* VGA color palette registers */ 121 + dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK); 122 + /* DACCRX automatically increments during read */ 123 + I915_WRITE8(VGA_DACRX, 0); 124 + /* Read 3 bytes of color data from each index */ 125 + for (i = 0; i < 256 * 3; i++) 126 + dev_priv->saveDACDATA[i] = I915_READ8(VGA_DACDATA); 127 + 128 + /* MSR bits */ 129 + dev_priv->saveMSR = I915_READ8(VGA_MSR_READ); 130 + if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) { 131 + cr_index = VGA_CR_INDEX_CGA; 132 + cr_data = VGA_CR_DATA_CGA; 133 + st01 = VGA_ST01_CGA; 134 + } else { 135 + cr_index = VGA_CR_INDEX_MDA; 136 + cr_data = VGA_CR_DATA_MDA; 137 + st01 = VGA_ST01_MDA; 138 + } 139 + 140 + /* CRT controller regs */ 141 + i915_write_indexed(dev, cr_index, cr_data, 0x11, 142 + i915_read_indexed(dev, cr_index, cr_data, 0x11) & 143 + (~0x80)); 144 + for (i = 0; i <= 0x24; i++) 145 + dev_priv->saveCR[i] = 146 + i915_read_indexed(dev, cr_index, cr_data, i); 147 + /* Make sure we don't turn off CR group 0 writes */ 148 + dev_priv->saveCR[0x11] &= ~0x80; 149 + 150 + /* Attribute controller registers */ 151 + I915_READ8(st01); 152 + dev_priv->saveAR_INDEX = I915_READ8(VGA_AR_INDEX); 153 + for (i = 0; i <= 0x14; i++) 154 + dev_priv->saveAR[i] = i915_read_ar(dev, st01, i, 0); 155 + I915_READ8(st01); 156 + I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX); 157 + I915_READ8(st01); 158 + 159 + /* Graphics controller registers */ 160 + for (i = 0; i < 9; i++) 161 + dev_priv->saveGR[i] = 162 + i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i); 163 + 164 + dev_priv->saveGR[0x10] = 165 + i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10); 166 + dev_priv->saveGR[0x11] = 167 + i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11); 168 + dev_priv->saveGR[0x18] = 169 + i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18); 170 + 171 + /* Sequencer registers */ 172 + for (i = 0; i < 8; i++) 173 + dev_priv->saveSR[i] = 174 + i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i); 175 + } 176 + 177 + static void i915_restore_vga(struct drm_device *dev) 178 + { 179 + struct drm_i915_private *dev_priv = dev->dev_private; 180 + int i; 181 + u16 cr_index, cr_data, st01; 182 + 183 + /* MSR bits */ 184 + I915_WRITE8(VGA_MSR_WRITE, dev_priv->saveMSR); 185 + if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) { 186 + cr_index = VGA_CR_INDEX_CGA; 187 + cr_data = VGA_CR_DATA_CGA; 188 + st01 = VGA_ST01_CGA; 189 + } else { 190 + cr_index = VGA_CR_INDEX_MDA; 191 + cr_data = VGA_CR_DATA_MDA; 192 + st01 = VGA_ST01_MDA; 193 + } 194 + 195 + /* Sequencer registers, don't write SR07 */ 196 + for (i = 0; i < 7; i++) 197 + i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i, 198 + dev_priv->saveSR[i]); 199 + 200 + /* CRT controller regs */ 201 + /* Enable CR group 0 writes */ 202 + i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]); 203 + for (i = 0; i <= 0x24; i++) 204 + i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->saveCR[i]); 205 + 206 + /* Graphics controller regs */ 207 + for (i = 0; i < 9; i++) 208 + i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i, 209 + dev_priv->saveGR[i]); 210 + 211 + i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10, 212 + dev_priv->saveGR[0x10]); 213 + i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11, 214 + dev_priv->saveGR[0x11]); 215 + i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18, 216 + dev_priv->saveGR[0x18]); 217 + 218 + /* Attribute controller registers */ 219 + I915_READ8(st01); /* switch back to index mode */ 220 + for (i = 0; i <= 0x14; i++) 221 + i915_write_ar(dev, st01, i, dev_priv->saveAR[i], 0); 222 + I915_READ8(st01); /* switch back to index mode */ 223 + I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX | 0x20); 224 + I915_READ8(st01); 225 + 226 + /* VGA color palette registers */ 227 + I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK); 228 + /* DACCRX automatically increments during read */ 229 + I915_WRITE8(VGA_DACWX, 0); 230 + /* Read 3 bytes of color data from each index */ 231 + for (i = 0; i < 256 * 3; i++) 232 + I915_WRITE8(VGA_DACDATA, dev_priv->saveDACDATA[i]); 233 + 234 + } 235 + 236 + int i915_save_state(struct drm_device *dev) 237 + { 238 + struct drm_i915_private *dev_priv = dev->dev_private; 239 + int i; 240 + 241 + pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB); 242 + 243 + /* Display arbitration control */ 244 + dev_priv->saveDSPARB = I915_READ(DSPARB); 245 + 246 + /* Pipe & plane A info */ 247 + dev_priv->savePIPEACONF = I915_READ(PIPEACONF); 248 + dev_priv->savePIPEASRC = I915_READ(PIPEASRC); 249 + dev_priv->saveFPA0 = I915_READ(FPA0); 250 + dev_priv->saveFPA1 = I915_READ(FPA1); 251 + dev_priv->saveDPLL_A = I915_READ(DPLL_A); 252 + if (IS_I965G(dev)) 253 + dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD); 254 + dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A); 255 + dev_priv->saveHBLANK_A = I915_READ(HBLANK_A); 256 + dev_priv->saveHSYNC_A = I915_READ(HSYNC_A); 257 + dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A); 258 + dev_priv->saveVBLANK_A = I915_READ(VBLANK_A); 259 + dev_priv->saveVSYNC_A = I915_READ(VSYNC_A); 260 + dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); 261 + 262 + dev_priv->saveDSPACNTR = I915_READ(DSPACNTR); 263 + dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE); 264 + dev_priv->saveDSPASIZE = I915_READ(DSPASIZE); 265 + dev_priv->saveDSPAPOS = I915_READ(DSPAPOS); 266 + dev_priv->saveDSPAADDR = I915_READ(DSPAADDR); 267 + if (IS_I965G(dev)) { 268 + dev_priv->saveDSPASURF = I915_READ(DSPASURF); 269 + dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF); 270 + } 271 + i915_save_palette(dev, PIPE_A); 272 + dev_priv->savePIPEASTAT = I915_READ(PIPEASTAT); 273 + 274 + /* Pipe & plane B info */ 275 + dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF); 276 + dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC); 277 + dev_priv->saveFPB0 = I915_READ(FPB0); 278 + dev_priv->saveFPB1 = I915_READ(FPB1); 279 + dev_priv->saveDPLL_B = I915_READ(DPLL_B); 280 + if (IS_I965G(dev)) 281 + dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD); 282 + dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B); 283 + dev_priv->saveHBLANK_B = I915_READ(HBLANK_B); 284 + dev_priv->saveHSYNC_B = I915_READ(HSYNC_B); 285 + dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B); 286 + dev_priv->saveVBLANK_B = I915_READ(VBLANK_B); 287 + dev_priv->saveVSYNC_B = I915_READ(VSYNC_B); 288 + dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A); 289 + 290 + dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR); 291 + dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE); 292 + dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE); 293 + dev_priv->saveDSPBPOS = I915_READ(DSPBPOS); 294 + dev_priv->saveDSPBADDR = I915_READ(DSPBADDR); 295 + if (IS_I965GM(dev) || IS_GM45(dev)) { 296 + dev_priv->saveDSPBSURF = I915_READ(DSPBSURF); 297 + dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF); 298 + } 299 + i915_save_palette(dev, PIPE_B); 300 + dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT); 301 + 302 + /* CRT state */ 303 + dev_priv->saveADPA = I915_READ(ADPA); 304 + 305 + /* LVDS state */ 306 + dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL); 307 + dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS); 308 + dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); 309 + if (IS_I965G(dev)) 310 + dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2); 311 + if (IS_MOBILE(dev) && !IS_I830(dev)) 312 + dev_priv->saveLVDS = I915_READ(LVDS); 313 + if (!IS_I830(dev) && !IS_845G(dev)) 314 + dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL); 315 + dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS); 316 + dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS); 317 + dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR); 318 + 319 + /* FIXME: save TV & SDVO state */ 320 + 321 + /* FBC state */ 322 + dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE); 323 + dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE); 324 + dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2); 325 + dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL); 326 + 327 + /* Interrupt state */ 328 + dev_priv->saveIIR = I915_READ(IIR); 329 + dev_priv->saveIER = I915_READ(IER); 330 + dev_priv->saveIMR = I915_READ(IMR); 331 + 332 + /* VGA state */ 333 + dev_priv->saveVGA0 = I915_READ(VGA0); 334 + dev_priv->saveVGA1 = I915_READ(VGA1); 335 + dev_priv->saveVGA_PD = I915_READ(VGA_PD); 336 + dev_priv->saveVGACNTRL = I915_READ(VGACNTRL); 337 + 338 + /* Clock gating state */ 339 + dev_priv->saveD_STATE = I915_READ(D_STATE); 340 + dev_priv->saveCG_2D_DIS = I915_READ(CG_2D_DIS); 341 + 342 + /* Cache mode state */ 343 + dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0); 344 + 345 + /* Memory Arbitration state */ 346 + dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE); 347 + 348 + /* Scratch space */ 349 + for (i = 0; i < 16; i++) { 350 + dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2)); 351 + dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2)); 352 + } 353 + for (i = 0; i < 3; i++) 354 + dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2)); 355 + 356 + i915_save_vga(dev); 357 + 358 + return 0; 359 + } 360 + 361 + int i915_restore_state(struct drm_device *dev) 362 + { 363 + struct drm_i915_private *dev_priv = dev->dev_private; 364 + int i; 365 + 366 + pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB); 367 + 368 + I915_WRITE(DSPARB, dev_priv->saveDSPARB); 369 + 370 + /* Pipe & plane A info */ 371 + /* Prime the clock */ 372 + if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) { 373 + I915_WRITE(DPLL_A, dev_priv->saveDPLL_A & 374 + ~DPLL_VCO_ENABLE); 375 + DRM_UDELAY(150); 376 + } 377 + I915_WRITE(FPA0, dev_priv->saveFPA0); 378 + I915_WRITE(FPA1, dev_priv->saveFPA1); 379 + /* Actually enable it */ 380 + I915_WRITE(DPLL_A, dev_priv->saveDPLL_A); 381 + DRM_UDELAY(150); 382 + if (IS_I965G(dev)) 383 + I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD); 384 + DRM_UDELAY(150); 385 + 386 + /* Restore mode */ 387 + I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A); 388 + I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A); 389 + I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A); 390 + I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A); 391 + I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A); 392 + I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A); 393 + I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A); 394 + 395 + /* Restore plane info */ 396 + I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE); 397 + I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS); 398 + I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC); 399 + I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR); 400 + I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE); 401 + if (IS_I965G(dev)) { 402 + I915_WRITE(DSPASURF, dev_priv->saveDSPASURF); 403 + I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF); 404 + } 405 + 406 + I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF); 407 + 408 + i915_restore_palette(dev, PIPE_A); 409 + /* Enable the plane */ 410 + I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR); 411 + I915_WRITE(DSPAADDR, I915_READ(DSPAADDR)); 412 + 413 + /* Pipe & plane B info */ 414 + if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) { 415 + I915_WRITE(DPLL_B, dev_priv->saveDPLL_B & 416 + ~DPLL_VCO_ENABLE); 417 + DRM_UDELAY(150); 418 + } 419 + I915_WRITE(FPB0, dev_priv->saveFPB0); 420 + I915_WRITE(FPB1, dev_priv->saveFPB1); 421 + /* Actually enable it */ 422 + I915_WRITE(DPLL_B, dev_priv->saveDPLL_B); 423 + DRM_UDELAY(150); 424 + if (IS_I965G(dev)) 425 + I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD); 426 + DRM_UDELAY(150); 427 + 428 + /* Restore mode */ 429 + I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B); 430 + I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B); 431 + I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B); 432 + I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B); 433 + I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B); 434 + I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B); 435 + I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B); 436 + 437 + /* Restore plane info */ 438 + I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE); 439 + I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS); 440 + I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC); 441 + I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR); 442 + I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE); 443 + if (IS_I965G(dev)) { 444 + I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF); 445 + I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF); 446 + } 447 + 448 + I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF); 449 + 450 + i915_restore_palette(dev, PIPE_B); 451 + /* Enable the plane */ 452 + I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); 453 + I915_WRITE(DSPBADDR, I915_READ(DSPBADDR)); 454 + 455 + /* CRT state */ 456 + I915_WRITE(ADPA, dev_priv->saveADPA); 457 + 458 + /* LVDS state */ 459 + if (IS_I965G(dev)) 460 + I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2); 461 + if (IS_MOBILE(dev) && !IS_I830(dev)) 462 + I915_WRITE(LVDS, dev_priv->saveLVDS); 463 + if (!IS_I830(dev) && !IS_845G(dev)) 464 + I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL); 465 + 466 + I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS); 467 + I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); 468 + I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS); 469 + I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS); 470 + I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR); 471 + I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); 472 + 473 + /* FIXME: restore TV & SDVO state */ 474 + 475 + /* FBC info */ 476 + I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE); 477 + I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE); 478 + I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2); 479 + I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL); 480 + 481 + /* VGA state */ 482 + I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL); 483 + I915_WRITE(VGA0, dev_priv->saveVGA0); 484 + I915_WRITE(VGA1, dev_priv->saveVGA1); 485 + I915_WRITE(VGA_PD, dev_priv->saveVGA_PD); 486 + DRM_UDELAY(150); 487 + 488 + /* Clock gating state */ 489 + I915_WRITE (D_STATE, dev_priv->saveD_STATE); 490 + I915_WRITE (CG_2D_DIS, dev_priv->saveCG_2D_DIS); 491 + 492 + /* Cache mode state */ 493 + I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); 494 + 495 + /* Memory arbitration state */ 496 + I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000); 497 + 498 + for (i = 0; i < 16; i++) { 499 + I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]); 500 + I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]); 501 + } 502 + for (i = 0; i < 3; i++) 503 + I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]); 504 + 505 + i915_restore_vga(dev); 506 + 507 + return 0; 508 + } 509 +
+15 -14
drivers/gpu/drm/mga/mga_drv.c
··· 45 static struct drm_driver driver = { 46 .driver_features = 47 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | 48 - DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | 49 - DRIVER_IRQ_VBL, 50 .dev_priv_size = sizeof(drm_mga_buf_priv_t), 51 .load = mga_driver_load, 52 .unload = mga_driver_unload, 53 .lastclose = mga_driver_lastclose, 54 .dma_quiescent = mga_driver_dma_quiescent, 55 .device_is_agp = mga_driver_device_is_agp, 56 - .vblank_wait = mga_driver_vblank_wait, 57 .irq_preinstall = mga_driver_irq_preinstall, 58 .irq_postinstall = mga_driver_irq_postinstall, 59 .irq_uninstall = mga_driver_irq_uninstall, ··· 65 .ioctls = mga_ioctls, 66 .dma_ioctl = mga_dma_buffers, 67 .fops = { 68 - .owner = THIS_MODULE, 69 - .open = drm_open, 70 - .release = drm_release, 71 - .ioctl = drm_ioctl, 72 - .mmap = drm_mmap, 73 - .poll = drm_poll, 74 - .fasync = drm_fasync, 75 #ifdef CONFIG_COMPAT 76 - .compat_ioctl = mga_compat_ioctl, 77 #endif 78 - }, 79 .pci_driver = { 80 - .name = DRIVER_NAME, 81 - .id_table = pciidlist, 82 }, 83 84 .name = DRIVER_NAME,
··· 45 static struct drm_driver driver = { 46 .driver_features = 47 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | 48 + DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, 49 .dev_priv_size = sizeof(drm_mga_buf_priv_t), 50 .load = mga_driver_load, 51 .unload = mga_driver_unload, 52 .lastclose = mga_driver_lastclose, 53 .dma_quiescent = mga_driver_dma_quiescent, 54 .device_is_agp = mga_driver_device_is_agp, 55 + .get_vblank_counter = mga_get_vblank_counter, 56 + .enable_vblank = mga_enable_vblank, 57 + .disable_vblank = mga_disable_vblank, 58 .irq_preinstall = mga_driver_irq_preinstall, 59 .irq_postinstall = mga_driver_irq_postinstall, 60 .irq_uninstall = mga_driver_irq_uninstall, ··· 64 .ioctls = mga_ioctls, 65 .dma_ioctl = mga_dma_buffers, 66 .fops = { 67 + .owner = THIS_MODULE, 68 + .open = drm_open, 69 + .release = drm_release, 70 + .ioctl = drm_ioctl, 71 + .mmap = drm_mmap, 72 + .poll = drm_poll, 73 + .fasync = drm_fasync, 74 #ifdef CONFIG_COMPAT 75 + .compat_ioctl = mga_compat_ioctl, 76 #endif 77 + }, 78 .pci_driver = { 79 + .name = DRIVER_NAME, 80 + .id_table = pciidlist, 81 }, 82 83 .name = DRIVER_NAME,
+5 -1
drivers/gpu/drm/mga/mga_drv.h
··· 120 u32 clear_cmd; 121 u32 maccess; 122 123 wait_queue_head_t fence_queue; 124 atomic_t last_fence_retired; 125 u32 next_fence_to_post; ··· 182 extern int mga_warp_init(drm_mga_private_t * dev_priv); 183 184 /* mga_irq.c */ 185 extern int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence); 186 extern int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence); 187 extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS); 188 extern void mga_driver_irq_preinstall(struct drm_device * dev); 189 - extern void mga_driver_irq_postinstall(struct drm_device * dev); 190 extern void mga_driver_irq_uninstall(struct drm_device * dev); 191 extern long mga_compat_ioctl(struct file *filp, unsigned int cmd, 192 unsigned long arg);
··· 120 u32 clear_cmd; 121 u32 maccess; 122 123 + atomic_t vbl_received; /**< Number of vblanks received. */ 124 wait_queue_head_t fence_queue; 125 atomic_t last_fence_retired; 126 u32 next_fence_to_post; ··· 181 extern int mga_warp_init(drm_mga_private_t * dev_priv); 182 183 /* mga_irq.c */ 184 + extern int mga_enable_vblank(struct drm_device *dev, int crtc); 185 + extern void mga_disable_vblank(struct drm_device *dev, int crtc); 186 + extern u32 mga_get_vblank_counter(struct drm_device *dev, int crtc); 187 extern int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence); 188 extern int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence); 189 extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS); 190 extern void mga_driver_irq_preinstall(struct drm_device * dev); 191 + extern int mga_driver_irq_postinstall(struct drm_device *dev); 192 extern void mga_driver_irq_uninstall(struct drm_device * dev); 193 extern long mga_compat_ioctl(struct file *filp, unsigned int cmd, 194 unsigned long arg);
+54 -22
drivers/gpu/drm/mga/mga_irq.c
··· 1 /* mga_irq.c -- IRQ handling for radeon -*- linux-c -*- 2 - * 3 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. 4 * 5 * The Weather Channel (TM) funded Tungsten Graphics to develop the ··· 36 #include "mga_drm.h" 37 #include "mga_drv.h" 38 39 irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS) 40 { 41 struct drm_device *dev = (struct drm_device *) arg; ··· 60 /* VBLANK interrupt */ 61 if (status & MGA_VLINEPEN) { 62 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR); 63 - atomic_inc(&dev->vbl_received); 64 - DRM_WAKEUP(&dev->vbl_queue); 65 - drm_vbl_send_signals(dev); 66 handled = 1; 67 } 68 ··· 69 if (status & MGA_SOFTRAPEN) { 70 const u32 prim_start = MGA_READ(MGA_PRIMADDRESS); 71 const u32 prim_end = MGA_READ(MGA_PRIMEND); 72 73 MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR); 74 ··· 85 handled = 1; 86 } 87 88 - if (handled) { 89 return IRQ_HANDLED; 90 - } 91 return IRQ_NONE; 92 } 93 94 - int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence) 95 { 96 - unsigned int cur_vblank; 97 - int ret = 0; 98 99 - /* Assume that the user has missed the current sequence number 100 - * by about a day rather than she wants to wait for years 101 - * using vertical blanks... 102 */ 103 - DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, 104 - (((cur_vblank = atomic_read(&dev->vbl_received)) 105 - - *sequence) <= (1 << 23))); 106 - 107 - *sequence = cur_vblank; 108 - 109 - return ret; 110 } 111 112 int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence) ··· 149 MGA_WRITE(MGA_ICLEAR, ~0); 150 } 151 152 - void mga_driver_irq_postinstall(struct drm_device * dev) 153 { 154 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; 155 156 DRM_INIT_WAITQUEUE(&dev_priv->fence_queue); 157 158 - /* Turn on vertical blank interrupt and soft trap interrupt. */ 159 - MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); 160 } 161 162 void mga_driver_irq_uninstall(struct drm_device * dev)
··· 1 /* mga_irq.c -- IRQ handling for radeon -*- linux-c -*- 2 + */ 3 + /* 4 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved. 5 * 6 * The Weather Channel (TM) funded Tungsten Graphics to develop the ··· 35 #include "mga_drm.h" 36 #include "mga_drv.h" 37 38 + u32 mga_get_vblank_counter(struct drm_device *dev, int crtc) 39 + { 40 + const drm_mga_private_t *const dev_priv = 41 + (drm_mga_private_t *) dev->dev_private; 42 + 43 + if (crtc != 0) 44 + return 0; 45 + 46 + return atomic_read(&dev_priv->vbl_received); 47 + } 48 + 49 + 50 irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS) 51 { 52 struct drm_device *dev = (struct drm_device *) arg; ··· 47 /* VBLANK interrupt */ 48 if (status & MGA_VLINEPEN) { 49 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR); 50 + atomic_inc(&dev_priv->vbl_received); 51 + drm_handle_vblank(dev, 0); 52 handled = 1; 53 } 54 ··· 57 if (status & MGA_SOFTRAPEN) { 58 const u32 prim_start = MGA_READ(MGA_PRIMADDRESS); 59 const u32 prim_end = MGA_READ(MGA_PRIMEND); 60 + 61 62 MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR); 63 ··· 72 handled = 1; 73 } 74 75 + if (handled) 76 return IRQ_HANDLED; 77 return IRQ_NONE; 78 } 79 80 + int mga_enable_vblank(struct drm_device *dev, int crtc) 81 { 82 + drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; 83 84 + if (crtc != 0) { 85 + DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", 86 + crtc); 87 + return 0; 88 + } 89 + 90 + MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); 91 + return 0; 92 + } 93 + 94 + 95 + void mga_disable_vblank(struct drm_device *dev, int crtc) 96 + { 97 + if (crtc != 0) { 98 + DRM_ERROR("tried to disable vblank on non-existent crtc %d\n", 99 + crtc); 100 + } 101 + 102 + /* Do *NOT* disable the vertical refresh interrupt. MGA doesn't have 103 + * a nice hardware counter that tracks the number of refreshes when 104 + * the interrupt is disabled, and the kernel doesn't know the refresh 105 + * rate to calculate an estimate. 106 */ 107 + /* MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); */ 108 } 109 110 int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence) ··· 125 MGA_WRITE(MGA_ICLEAR, ~0); 126 } 127 128 + int mga_driver_irq_postinstall(struct drm_device *dev) 129 { 130 drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private; 131 + int ret; 132 + 133 + ret = drm_vblank_init(dev, 1); 134 + if (ret) 135 + return ret; 136 137 DRM_INIT_WAITQUEUE(&dev_priv->fence_queue); 138 139 + /* Turn on soft trap interrupt. Vertical blank interrupts are enabled 140 + * in mga_enable_vblank. 141 + */ 142 + MGA_WRITE(MGA_IEN, MGA_SOFTRAPEN); 143 + return 0; 144 } 145 146 void mga_driver_irq_uninstall(struct drm_device * dev)
+1 -1
drivers/gpu/drm/mga/mga_state.c
··· 1022 1023 switch (param->param) { 1024 case MGA_PARAM_IRQ_NR: 1025 - value = dev->irq; 1026 break; 1027 case MGA_PARAM_CARD_TYPE: 1028 value = dev_priv->chipset;
··· 1022 1023 switch (param->param) { 1024 case MGA_PARAM_IRQ_NR: 1025 + value = drm_dev_to_irq(dev); 1026 break; 1027 case MGA_PARAM_CARD_TYPE: 1028 value = dev_priv->chipset;
+15 -14
drivers/gpu/drm/r128/r128_drv.c
··· 43 static struct drm_driver driver = { 44 .driver_features = 45 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | 46 - DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | 47 - DRIVER_IRQ_VBL, 48 .dev_priv_size = sizeof(drm_r128_buf_priv_t), 49 .preclose = r128_driver_preclose, 50 .lastclose = r128_driver_lastclose, 51 - .vblank_wait = r128_driver_vblank_wait, 52 .irq_preinstall = r128_driver_irq_preinstall, 53 .irq_postinstall = r128_driver_irq_postinstall, 54 .irq_uninstall = r128_driver_irq_uninstall, ··· 60 .ioctls = r128_ioctls, 61 .dma_ioctl = r128_cce_buffers, 62 .fops = { 63 - .owner = THIS_MODULE, 64 - .open = drm_open, 65 - .release = drm_release, 66 - .ioctl = drm_ioctl, 67 - .mmap = drm_mmap, 68 - .poll = drm_poll, 69 - .fasync = drm_fasync, 70 #ifdef CONFIG_COMPAT 71 - .compat_ioctl = r128_compat_ioctl, 72 #endif 73 }, 74 - 75 .pci_driver = { 76 - .name = DRIVER_NAME, 77 - .id_table = pciidlist, 78 }, 79 80 .name = DRIVER_NAME, ··· 87 static int __init r128_init(void) 88 { 89 driver.num_ioctls = r128_max_ioctl; 90 return drm_init(&driver); 91 } 92
··· 43 static struct drm_driver driver = { 44 .driver_features = 45 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | 46 + DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, 47 .dev_priv_size = sizeof(drm_r128_buf_priv_t), 48 .preclose = r128_driver_preclose, 49 .lastclose = r128_driver_lastclose, 50 + .get_vblank_counter = r128_get_vblank_counter, 51 + .enable_vblank = r128_enable_vblank, 52 + .disable_vblank = r128_disable_vblank, 53 .irq_preinstall = r128_driver_irq_preinstall, 54 .irq_postinstall = r128_driver_irq_postinstall, 55 .irq_uninstall = r128_driver_irq_uninstall, ··· 59 .ioctls = r128_ioctls, 60 .dma_ioctl = r128_cce_buffers, 61 .fops = { 62 + .owner = THIS_MODULE, 63 + .open = drm_open, 64 + .release = drm_release, 65 + .ioctl = drm_ioctl, 66 + .mmap = drm_mmap, 67 + .poll = drm_poll, 68 + .fasync = drm_fasync, 69 #ifdef CONFIG_COMPAT 70 + .compat_ioctl = r128_compat_ioctl, 71 #endif 72 }, 73 .pci_driver = { 74 + .name = DRIVER_NAME, 75 + .id_table = pciidlist, 76 }, 77 78 .name = DRIVER_NAME, ··· 87 static int __init r128_init(void) 88 { 89 driver.num_ioctls = r128_max_ioctl; 90 + 91 return drm_init(&driver); 92 } 93
+7 -4
drivers/gpu/drm/r128/r128_drv.h
··· 29 * Rickard E. (Rik) Faith <faith@valinux.com> 30 * Kevin E. Martin <martin@valinux.com> 31 * Gareth Hughes <gareth@valinux.com> 32 - * Michel Dänzer <daenzerm@student.ethz.ch> 33 */ 34 35 #ifndef __R128_DRV_H__ ··· 97 u32 crtc_offset; 98 u32 crtc_offset_cntl; 99 100 u32 color_fmt; 101 unsigned int front_offset; 102 unsigned int front_pitch; ··· 151 extern int r128_do_cce_idle(drm_r128_private_t * dev_priv); 152 extern int r128_do_cleanup_cce(struct drm_device * dev); 153 154 - extern int r128_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence); 155 - 156 extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS); 157 extern void r128_driver_irq_preinstall(struct drm_device * dev); 158 - extern void r128_driver_irq_postinstall(struct drm_device * dev); 159 extern void r128_driver_irq_uninstall(struct drm_device * dev); 160 extern void r128_driver_lastclose(struct drm_device * dev); 161 extern void r128_driver_preclose(struct drm_device * dev,
··· 29 * Rickard E. (Rik) Faith <faith@valinux.com> 30 * Kevin E. Martin <martin@valinux.com> 31 * Gareth Hughes <gareth@valinux.com> 32 + * Michel D�zer <daenzerm@student.ethz.ch> 33 */ 34 35 #ifndef __R128_DRV_H__ ··· 97 u32 crtc_offset; 98 u32 crtc_offset_cntl; 99 100 + atomic_t vbl_received; 101 + 102 u32 color_fmt; 103 unsigned int front_offset; 104 unsigned int front_pitch; ··· 149 extern int r128_do_cce_idle(drm_r128_private_t * dev_priv); 150 extern int r128_do_cleanup_cce(struct drm_device * dev); 151 152 + extern int r128_enable_vblank(struct drm_device *dev, int crtc); 153 + extern void r128_disable_vblank(struct drm_device *dev, int crtc); 154 + extern u32 r128_get_vblank_counter(struct drm_device *dev, int crtc); 155 extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS); 156 extern void r128_driver_irq_preinstall(struct drm_device * dev); 157 + extern int r128_driver_irq_postinstall(struct drm_device *dev); 158 extern void r128_driver_irq_uninstall(struct drm_device * dev); 159 extern void r128_driver_lastclose(struct drm_device * dev); 160 extern void r128_driver_preclose(struct drm_device * dev,
+36 -21
drivers/gpu/drm/r128/r128_irq.c
··· 35 #include "r128_drm.h" 36 #include "r128_drv.h" 37 38 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS) 39 { 40 struct drm_device *dev = (struct drm_device *) arg; ··· 56 /* VBLANK interrupt */ 57 if (status & R128_CRTC_VBLANK_INT) { 58 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK); 59 - atomic_inc(&dev->vbl_received); 60 - DRM_WAKEUP(&dev->vbl_queue); 61 - drm_vbl_send_signals(dev); 62 return IRQ_HANDLED; 63 } 64 return IRQ_NONE; 65 } 66 67 - int r128_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence) 68 { 69 - unsigned int cur_vblank; 70 - int ret = 0; 71 72 - /* Assume that the user has missed the current sequence number 73 - * by about a day rather than she wants to wait for years 74 - * using vertical blanks... 75 */ 76 - DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, 77 - (((cur_vblank = atomic_read(&dev->vbl_received)) 78 - - *sequence) <= (1 << 23))); 79 - 80 - *sequence = cur_vblank; 81 - 82 - return ret; 83 } 84 85 void r128_driver_irq_preinstall(struct drm_device * dev) ··· 100 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK); 101 } 102 103 - void r128_driver_irq_postinstall(struct drm_device * dev) 104 { 105 - drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private; 106 - 107 - /* Turn on VBL interrupt */ 108 - R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN); 109 } 110 111 void r128_driver_irq_uninstall(struct drm_device * dev)
··· 35 #include "r128_drm.h" 36 #include "r128_drv.h" 37 38 + u32 r128_get_vblank_counter(struct drm_device *dev, int crtc) 39 + { 40 + const drm_r128_private_t *dev_priv = dev->dev_private; 41 + 42 + if (crtc != 0) 43 + return 0; 44 + 45 + return atomic_read(&dev_priv->vbl_received); 46 + } 47 + 48 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS) 49 { 50 struct drm_device *dev = (struct drm_device *) arg; ··· 46 /* VBLANK interrupt */ 47 if (status & R128_CRTC_VBLANK_INT) { 48 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK); 49 + atomic_inc(&dev_priv->vbl_received); 50 + drm_handle_vblank(dev, 0); 51 return IRQ_HANDLED; 52 } 53 return IRQ_NONE; 54 } 55 56 + int r128_enable_vblank(struct drm_device *dev, int crtc) 57 { 58 + drm_r128_private_t *dev_priv = dev->dev_private; 59 60 + if (crtc != 0) { 61 + DRM_ERROR("%s: bad crtc %d\n", __func__, crtc); 62 + return -EINVAL; 63 + } 64 + 65 + R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN); 66 + return 0; 67 + } 68 + 69 + void r128_disable_vblank(struct drm_device *dev, int crtc) 70 + { 71 + if (crtc != 0) 72 + DRM_ERROR("%s: bad crtc %d\n", __func__, crtc); 73 + 74 + /* 75 + * FIXME: implement proper interrupt disable by using the vblank 76 + * counter register (if available) 77 + * 78 + * R128_WRITE(R128_GEN_INT_CNTL, 79 + * R128_READ(R128_GEN_INT_CNTL) & ~R128_CRTC_VBLANK_INT_EN); 80 */ 81 } 82 83 void r128_driver_irq_preinstall(struct drm_device * dev) ··· 82 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK); 83 } 84 85 + int r128_driver_irq_postinstall(struct drm_device *dev) 86 { 87 + return drm_vblank_init(dev, 1); 88 } 89 90 void r128_driver_irq_uninstall(struct drm_device * dev)
+1 -1
drivers/gpu/drm/r128/r128_state.c
··· 1629 1630 switch (param->param) { 1631 case R128_PARAM_IRQ_NR: 1632 - value = dev->irq; 1633 break; 1634 default: 1635 return -EINVAL;
··· 1629 1630 switch (param->param) { 1631 case R128_PARAM_IRQ_NR: 1632 + value = drm_dev_to_irq(dev); 1633 break; 1634 default: 1635 return -EINVAL;
+38 -15
drivers/gpu/drm/radeon/radeon_cp.c
··· 71 72 static u32 IGP_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) 73 { 74 - if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) 75 return RS690_READ_MCIND(dev_priv, addr); 76 else 77 return RS480_READ_MCIND(dev_priv, addr); ··· 83 84 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) 85 return R500_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION); 86 - else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) 87 return RS690_READ_MCIND(dev_priv, RS690_MC_FB_LOCATION); 88 else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) 89 return R500_READ_MCIND(dev_priv, R520_MC_FB_LOCATION); ··· 96 { 97 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) 98 R500_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc); 99 - else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) 100 RS690_WRITE_MCIND(RS690_MC_FB_LOCATION, fb_loc); 101 else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) 102 R500_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc); ··· 109 { 110 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) 111 R500_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc); 112 - else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) 113 RS690_WRITE_MCIND(RS690_MC_AGP_LOCATION, agp_loc); 114 else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) 115 R500_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc); ··· 126 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) { 127 R500_WRITE_MCIND(RV515_MC_AGP_BASE, agp_base_lo); 128 R500_WRITE_MCIND(RV515_MC_AGP_BASE_2, agp_base_hi); 129 - } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) { 130 RS690_WRITE_MCIND(RS690_MC_AGP_BASE, agp_base_lo); 131 RS690_WRITE_MCIND(RS690_MC_AGP_BASE_2, agp_base_hi); 132 } else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) { 133 R500_WRITE_MCIND(R520_MC_AGP_BASE, agp_base_lo); 134 R500_WRITE_MCIND(R520_MC_AGP_BASE_2, agp_base_hi); 135 - } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480) { 136 RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo); 137 - RADEON_WRITE(RS480_AGP_BASE_2, 0); 138 } else { 139 RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo); 140 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200) ··· 353 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350) || 354 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV350) || 355 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) || 356 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) { 357 DRM_INFO("Loading R300 Microcode\n"); 358 for (i = 0; i < 256; i++) { ··· 363 R300_cp_microcode[i][0]); 364 } 365 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) || 366 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV410)) { 367 DRM_INFO("Loading R400 Microcode\n"); 368 for (i = 0; i < 256; i++) { ··· 372 RADEON_WRITE(RADEON_CP_ME_RAM_DATAL, 373 R420_cp_microcode[i][0]); 374 } 375 - } else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) { 376 - DRM_INFO("Loading RS690 Microcode\n"); 377 for (i = 0; i < 256; i++) { 378 RADEON_WRITE(RADEON_CP_ME_RAM_DATAH, 379 RS690_cp_microcode[i][1]); ··· 635 dev_priv->ring.size_l2qw); 636 #endif 637 638 - /* Start with assuming that writeback doesn't work */ 639 - dev_priv->writeback_works = 0; 640 641 /* Initialize the scratch register pointer. This will cause 642 * the scratch register values to be written out to memory ··· 653 RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x7); 654 655 /* Turn on bus mastering */ 656 - tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; 657 - RADEON_WRITE(RADEON_BUS_CNTL, tmp); 658 659 dev_priv->sarea_priv->last_frame = dev_priv->scratch[0] = 0; 660 RADEON_WRITE(RADEON_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame); ··· 690 static void radeon_test_writeback(drm_radeon_private_t * dev_priv) 691 { 692 u32 tmp; 693 694 /* Writeback doesn't seem to work everywhere, test it here and possibly 695 * enable it if it appears to work ··· 739 dev_priv->gart_size); 740 741 temp = IGP_READ_MCIND(dev_priv, RS480_MC_MISC_CNTL); 742 - if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) 743 IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, (RS480_GART_INDEX_REG_EN | 744 RS690_BLOCK_GFX_D3_EN)); 745 else ··· 833 u32 tmp; 834 835 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || 836 (dev_priv->flags & RADEON_IS_IGPGART)) { 837 radeon_set_igpgart(dev_priv, on); 838 return; ··· 1308 radeon_cp_init_ring_buffer(dev, dev_priv); 1309 1310 radeon_do_engine_reset(dev); 1311 - radeon_enable_interrupt(dev); 1312 1313 DRM_DEBUG("radeon_do_resume_cp() complete\n"); 1314 ··· 1730 case CHIP_R300: 1731 case CHIP_R350: 1732 case CHIP_R420: 1733 case CHIP_RV410: 1734 case CHIP_RV515: 1735 case CHIP_R520:
··· 71 72 static u32 IGP_READ_MCIND(drm_radeon_private_t *dev_priv, int addr) 73 { 74 + if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || 75 + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) 76 return RS690_READ_MCIND(dev_priv, addr); 77 else 78 return RS480_READ_MCIND(dev_priv, addr); ··· 82 83 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) 84 return R500_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION); 85 + else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || 86 + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) 87 return RS690_READ_MCIND(dev_priv, RS690_MC_FB_LOCATION); 88 else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) 89 return R500_READ_MCIND(dev_priv, R520_MC_FB_LOCATION); ··· 94 { 95 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) 96 R500_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc); 97 + else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || 98 + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) 99 RS690_WRITE_MCIND(RS690_MC_FB_LOCATION, fb_loc); 100 else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) 101 R500_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc); ··· 106 { 107 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) 108 R500_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc); 109 + else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || 110 + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) 111 RS690_WRITE_MCIND(RS690_MC_AGP_LOCATION, agp_loc); 112 else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) 113 R500_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc); ··· 122 if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) { 123 R500_WRITE_MCIND(RV515_MC_AGP_BASE, agp_base_lo); 124 R500_WRITE_MCIND(RV515_MC_AGP_BASE_2, agp_base_hi); 125 + } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || 126 + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) { 127 RS690_WRITE_MCIND(RS690_MC_AGP_BASE, agp_base_lo); 128 RS690_WRITE_MCIND(RS690_MC_AGP_BASE_2, agp_base_hi); 129 } else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) { 130 R500_WRITE_MCIND(R520_MC_AGP_BASE, agp_base_lo); 131 R500_WRITE_MCIND(R520_MC_AGP_BASE_2, agp_base_hi); 132 + } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) || 133 + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) { 134 RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo); 135 + RADEON_WRITE(RS480_AGP_BASE_2, agp_base_hi); 136 } else { 137 RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo); 138 if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200) ··· 347 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350) || 348 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV350) || 349 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) || 350 + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) || 351 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) { 352 DRM_INFO("Loading R300 Microcode\n"); 353 for (i = 0; i < 256; i++) { ··· 356 R300_cp_microcode[i][0]); 357 } 358 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) || 359 + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R423) || 360 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV410)) { 361 DRM_INFO("Loading R400 Microcode\n"); 362 for (i = 0; i < 256; i++) { ··· 364 RADEON_WRITE(RADEON_CP_ME_RAM_DATAL, 365 R420_cp_microcode[i][0]); 366 } 367 + } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || 368 + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) { 369 + DRM_INFO("Loading RS690/RS740 Microcode\n"); 370 for (i = 0; i < 256; i++) { 371 RADEON_WRITE(RADEON_CP_ME_RAM_DATAH, 372 RS690_cp_microcode[i][1]); ··· 626 dev_priv->ring.size_l2qw); 627 #endif 628 629 630 /* Initialize the scratch register pointer. This will cause 631 * the scratch register values to be written out to memory ··· 646 RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x7); 647 648 /* Turn on bus mastering */ 649 + if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) || 650 + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || 651 + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) { 652 + /* rs400, rs690/rs740 */ 653 + tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RS400_BUS_MASTER_DIS; 654 + RADEON_WRITE(RADEON_BUS_CNTL, tmp); 655 + } else if (!(((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) || 656 + ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R423))) { 657 + /* r1xx, r2xx, r300, r(v)350, r420/r481, rs480 */ 658 + tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS; 659 + RADEON_WRITE(RADEON_BUS_CNTL, tmp); 660 + } /* PCIE cards appears to not need this */ 661 662 dev_priv->sarea_priv->last_frame = dev_priv->scratch[0] = 0; 663 RADEON_WRITE(RADEON_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame); ··· 673 static void radeon_test_writeback(drm_radeon_private_t * dev_priv) 674 { 675 u32 tmp; 676 + 677 + /* Start with assuming that writeback doesn't work */ 678 + dev_priv->writeback_works = 0; 679 680 /* Writeback doesn't seem to work everywhere, test it here and possibly 681 * enable it if it appears to work ··· 719 dev_priv->gart_size); 720 721 temp = IGP_READ_MCIND(dev_priv, RS480_MC_MISC_CNTL); 722 + if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || 723 + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) 724 IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, (RS480_GART_INDEX_REG_EN | 725 RS690_BLOCK_GFX_D3_EN)); 726 else ··· 812 u32 tmp; 813 814 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || 815 + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740) || 816 (dev_priv->flags & RADEON_IS_IGPGART)) { 817 radeon_set_igpgart(dev_priv, on); 818 return; ··· 1286 radeon_cp_init_ring_buffer(dev, dev_priv); 1287 1288 radeon_do_engine_reset(dev); 1289 + radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1); 1290 1291 DRM_DEBUG("radeon_do_resume_cp() complete\n"); 1292 ··· 1708 case CHIP_R300: 1709 case CHIP_R350: 1710 case CHIP_R420: 1711 + case CHIP_R423: 1712 case CHIP_RV410: 1713 case CHIP_RV515: 1714 case CHIP_R520:
+28 -4
drivers/gpu/drm/radeon/radeon_drv.c
··· 52 "r300")); 53 } 54 55 static struct pci_device_id pciidlist[] = { 56 radeon_PCI_IDS 57 }; ··· 81 static struct drm_driver driver = { 82 .driver_features = 83 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | 84 - DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED | 85 - DRIVER_IRQ_VBL | DRIVER_IRQ_VBL2, 86 .dev_priv_size = sizeof(drm_radeon_buf_priv_t), 87 .load = radeon_driver_load, 88 .firstopen = radeon_driver_firstopen, ··· 90 .postclose = radeon_driver_postclose, 91 .lastclose = radeon_driver_lastclose, 92 .unload = radeon_driver_unload, 93 - .vblank_wait = radeon_driver_vblank_wait, 94 - .vblank_wait2 = radeon_driver_vblank_wait2, 95 .dri_library_name = dri_library_name, 96 .irq_preinstall = radeon_driver_irq_preinstall, 97 .irq_postinstall = radeon_driver_irq_postinstall,
··· 52 "r300")); 53 } 54 55 + static int radeon_suspend(struct drm_device *dev, pm_message_t state) 56 + { 57 + drm_radeon_private_t *dev_priv = dev->dev_private; 58 + 59 + /* Disable *all* interrupts */ 60 + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) 61 + RADEON_WRITE(R500_DxMODE_INT_MASK, 0); 62 + RADEON_WRITE(RADEON_GEN_INT_CNTL, 0); 63 + return 0; 64 + } 65 + 66 + static int radeon_resume(struct drm_device *dev) 67 + { 68 + drm_radeon_private_t *dev_priv = dev->dev_private; 69 + 70 + /* Restore interrupt registers */ 71 + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) 72 + RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg); 73 + RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg); 74 + return 0; 75 + } 76 + 77 static struct pci_device_id pciidlist[] = { 78 radeon_PCI_IDS 79 }; ··· 59 static struct drm_driver driver = { 60 .driver_features = 61 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG | 62 + DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED, 63 .dev_priv_size = sizeof(drm_radeon_buf_priv_t), 64 .load = radeon_driver_load, 65 .firstopen = radeon_driver_firstopen, ··· 69 .postclose = radeon_driver_postclose, 70 .lastclose = radeon_driver_lastclose, 71 .unload = radeon_driver_unload, 72 + .suspend = radeon_suspend, 73 + .resume = radeon_resume, 74 + .get_vblank_counter = radeon_get_vblank_counter, 75 + .enable_vblank = radeon_enable_vblank, 76 + .disable_vblank = radeon_disable_vblank, 77 .dri_library_name = dri_library_name, 78 .irq_preinstall = radeon_driver_irq_preinstall, 79 .irq_postinstall = radeon_driver_irq_postinstall,
+46 -11
drivers/gpu/drm/radeon/radeon_drv.h
··· 122 CHIP_RV350, 123 CHIP_RV380, 124 CHIP_R420, 125 CHIP_RV410, 126 CHIP_RS480, 127 CHIP_RS690, 128 CHIP_RV515, 129 CHIP_R520, 130 CHIP_RV530, ··· 381 struct mem_block *heap); 382 383 /* radeon_irq.c */ 384 extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv); 385 extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv); 386 387 extern void radeon_do_release(struct drm_device * dev); 388 - extern int radeon_driver_vblank_wait(struct drm_device * dev, 389 - unsigned int *sequence); 390 - extern int radeon_driver_vblank_wait2(struct drm_device * dev, 391 - unsigned int *sequence); 392 extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS); 393 extern void radeon_driver_irq_preinstall(struct drm_device * dev); 394 - extern void radeon_driver_irq_postinstall(struct drm_device * dev); 395 extern void radeon_driver_irq_uninstall(struct drm_device * dev); 396 extern void radeon_enable_interrupt(struct drm_device *dev); 397 extern int radeon_vblank_crtc_get(struct drm_device *dev); ··· 400 extern int radeon_driver_load(struct drm_device *dev, unsigned long flags); 401 extern int radeon_driver_unload(struct drm_device *dev); 402 extern int radeon_driver_firstopen(struct drm_device *dev); 403 - extern void radeon_driver_preclose(struct drm_device * dev, struct drm_file *file_priv); 404 - extern void radeon_driver_postclose(struct drm_device * dev, struct drm_file * filp); 405 extern void radeon_driver_lastclose(struct drm_device * dev); 406 - extern int radeon_driver_open(struct drm_device * dev, struct drm_file * filp_priv); 407 extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd, 408 unsigned long arg); 409 410 /* r300_cmdbuf.c */ 411 extern void r300_init_reg_flags(struct drm_device *dev); 412 413 - extern int r300_do_cp_cmdbuf(struct drm_device * dev, 414 struct drm_file *file_priv, 415 - drm_radeon_kcmd_buffer_t * cmdbuf); 416 417 /* Flags for stats.boxes 418 */ ··· 440 # define RADEON_SCISSOR_1_ENABLE (1 << 29) 441 # define RADEON_SCISSOR_2_ENABLE (1 << 30) 442 443 #define RADEON_BUS_CNTL 0x0030 444 # define RADEON_BUS_MASTER_DIS (1 << 6) 445 446 #define RADEON_CLOCK_CNTL_DATA 0x000c 447 # define RADEON_PLL_WR_EN (1 << 7) ··· 652 # define RADEON_SW_INT_TEST (1 << 25) 653 # define RADEON_SW_INT_TEST_ACK (1 << 25) 654 # define RADEON_SW_INT_FIRE (1 << 26) 655 656 #define RADEON_HOST_PATH_CNTL 0x0130 657 # define RADEON_HDP_SOFT_RESET (1 << 26) ··· 937 938 #define RADEON_AIC_CNTL 0x01d0 939 # define RADEON_PCIGART_TRANSLATE_EN (1 << 0) 940 #define RADEON_AIC_STAT 0x01d4 941 #define RADEON_AIC_PT_BASE 0x01d8 942 #define RADEON_AIC_LO_ADDR 0x01dc ··· 1147 1148 #define R200_VAP_PVS_CNTL_1 0x22D0 1149 1150 #define R500_D1CRTC_STATUS 0x609c 1151 #define R500_D2CRTC_STATUS 0x689c 1152 #define R500_CRTC_V_BLANK (1<<0) ··· 1234 1235 #define IGP_WRITE_MCIND(addr, val) \ 1236 do { \ 1237 - if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) \ 1238 RS690_WRITE_MCIND(addr, val); \ 1239 else \ 1240 RS480_WRITE_MCIND(addr, val); \
··· 122 CHIP_RV350, 123 CHIP_RV380, 124 CHIP_R420, 125 + CHIP_R423, 126 CHIP_RV410, 127 + CHIP_RS400, 128 CHIP_RS480, 129 CHIP_RS690, 130 + CHIP_RS740, 131 CHIP_RV515, 132 CHIP_R520, 133 CHIP_RV530, ··· 378 struct mem_block *heap); 379 380 /* radeon_irq.c */ 381 + extern void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state); 382 extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv); 383 extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv); 384 385 extern void radeon_do_release(struct drm_device * dev); 386 + extern u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc); 387 + extern int radeon_enable_vblank(struct drm_device *dev, int crtc); 388 + extern void radeon_disable_vblank(struct drm_device *dev, int crtc); 389 extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS); 390 extern void radeon_driver_irq_preinstall(struct drm_device * dev); 391 + extern int radeon_driver_irq_postinstall(struct drm_device *dev); 392 extern void radeon_driver_irq_uninstall(struct drm_device * dev); 393 extern void radeon_enable_interrupt(struct drm_device *dev); 394 extern int radeon_vblank_crtc_get(struct drm_device *dev); ··· 397 extern int radeon_driver_load(struct drm_device *dev, unsigned long flags); 398 extern int radeon_driver_unload(struct drm_device *dev); 399 extern int radeon_driver_firstopen(struct drm_device *dev); 400 + extern void radeon_driver_preclose(struct drm_device *dev, 401 + struct drm_file *file_priv); 402 + extern void radeon_driver_postclose(struct drm_device *dev, 403 + struct drm_file *file_priv); 404 extern void radeon_driver_lastclose(struct drm_device * dev); 405 + extern int radeon_driver_open(struct drm_device *dev, 406 + struct drm_file *file_priv); 407 extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd, 408 unsigned long arg); 409 410 /* r300_cmdbuf.c */ 411 extern void r300_init_reg_flags(struct drm_device *dev); 412 413 + extern int r300_do_cp_cmdbuf(struct drm_device *dev, 414 struct drm_file *file_priv, 415 + drm_radeon_kcmd_buffer_t *cmdbuf); 416 417 /* Flags for stats.boxes 418 */ ··· 434 # define RADEON_SCISSOR_1_ENABLE (1 << 29) 435 # define RADEON_SCISSOR_2_ENABLE (1 << 30) 436 437 + /* 438 + * PCIE radeons (rv370/rv380, rv410, r423/r430/r480, r5xx) 439 + * don't have an explicit bus mastering disable bit. It's handled 440 + * by the PCI D-states. PMI_BM_DIS disables D-state bus master 441 + * handling, not bus mastering itself. 442 + */ 443 #define RADEON_BUS_CNTL 0x0030 444 + /* r1xx, r2xx, r300, r(v)350, r420/r481, rs480 */ 445 # define RADEON_BUS_MASTER_DIS (1 << 6) 446 + /* rs400, rs690/rs740 */ 447 + # define RS400_BUS_MASTER_DIS (1 << 14) 448 + # define RS400_MSI_REARM (1 << 20) 449 + /* see RS480_MSI_REARM in AIC_CNTL for rs480 */ 450 + 451 + #define RADEON_BUS_CNTL1 0x0034 452 + # define RADEON_PMI_BM_DIS (1 << 2) 453 + # define RADEON_PMI_INT_DIS (1 << 3) 454 + 455 + #define RV370_BUS_CNTL 0x004c 456 + # define RV370_PMI_BM_DIS (1 << 5) 457 + # define RV370_PMI_INT_DIS (1 << 6) 458 + 459 + #define RADEON_MSI_REARM_EN 0x0160 460 + /* rv370/rv380, rv410, r423/r430/r480, r5xx */ 461 + # define RV370_MSI_REARM_EN (1 << 0) 462 463 #define RADEON_CLOCK_CNTL_DATA 0x000c 464 # define RADEON_PLL_WR_EN (1 << 7) ··· 623 # define RADEON_SW_INT_TEST (1 << 25) 624 # define RADEON_SW_INT_TEST_ACK (1 << 25) 625 # define RADEON_SW_INT_FIRE (1 << 26) 626 + # define R500_DISPLAY_INT_STATUS (1 << 0) 627 628 #define RADEON_HOST_PATH_CNTL 0x0130 629 # define RADEON_HDP_SOFT_RESET (1 << 26) ··· 907 908 #define RADEON_AIC_CNTL 0x01d0 909 # define RADEON_PCIGART_TRANSLATE_EN (1 << 0) 910 + # define RS480_MSI_REARM (1 << 3) 911 #define RADEON_AIC_STAT 0x01d4 912 #define RADEON_AIC_PT_BASE 0x01d8 913 #define RADEON_AIC_LO_ADDR 0x01dc ··· 1116 1117 #define R200_VAP_PVS_CNTL_1 0x22D0 1118 1119 + #define RADEON_CRTC_CRNT_FRAME 0x0214 1120 + #define RADEON_CRTC2_CRNT_FRAME 0x0314 1121 + 1122 #define R500_D1CRTC_STATUS 0x609c 1123 #define R500_D2CRTC_STATUS 0x689c 1124 #define R500_CRTC_V_BLANK (1<<0) ··· 1200 1201 #define IGP_WRITE_MCIND(addr, val) \ 1202 do { \ 1203 + if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) || \ 1204 + ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) \ 1205 RS690_WRITE_MCIND(addr, val); \ 1206 else \ 1207 RS480_WRITE_MCIND(addr, val); \
+168 -100
drivers/gpu/drm/radeon/radeon_irq.c
··· 27 * 28 * Authors: 29 * Keith Whitwell <keith@tungstengraphics.com> 30 - * Michel Dänzer <michel@daenzer.net> 31 */ 32 33 #include "drmP.h" ··· 35 #include "radeon_drm.h" 36 #include "radeon_drv.h" 37 38 - static __inline__ u32 radeon_acknowledge_irqs(drm_radeon_private_t * dev_priv, 39 - u32 mask) 40 { 41 - u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS) & mask; 42 if (irqs) 43 RADEON_WRITE(RADEON_GEN_INT_STATUS, irqs); 44 return irqs; 45 } 46 ··· 184 drm_radeon_private_t *dev_priv = 185 (drm_radeon_private_t *) dev->dev_private; 186 u32 stat; 187 188 /* Only consider the bits we're interested in - others could be used 189 * outside the DRM 190 */ 191 - stat = radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK | 192 - RADEON_CRTC_VBLANK_STAT | 193 - RADEON_CRTC2_VBLANK_STAT)); 194 if (!stat) 195 return IRQ_NONE; 196 197 stat &= dev_priv->irq_enable_reg; 198 199 /* SW interrupt */ 200 - if (stat & RADEON_SW_INT_TEST) { 201 DRM_WAKEUP(&dev_priv->swi_queue); 202 - } 203 204 /* VBLANK interrupt */ 205 - if (stat & (RADEON_CRTC_VBLANK_STAT|RADEON_CRTC2_VBLANK_STAT)) { 206 - int vblank_crtc = dev_priv->vblank_crtc; 207 - 208 - if ((vblank_crtc & 209 - (DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) == 210 - (DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) { 211 - if (stat & RADEON_CRTC_VBLANK_STAT) 212 - atomic_inc(&dev->vbl_received); 213 - if (stat & RADEON_CRTC2_VBLANK_STAT) 214 - atomic_inc(&dev->vbl_received2); 215 - } else if (((stat & RADEON_CRTC_VBLANK_STAT) && 216 - (vblank_crtc & DRM_RADEON_VBLANK_CRTC1)) || 217 - ((stat & RADEON_CRTC2_VBLANK_STAT) && 218 - (vblank_crtc & DRM_RADEON_VBLANK_CRTC2))) 219 - atomic_inc(&dev->vbl_received); 220 - 221 - DRM_WAKEUP(&dev->vbl_queue); 222 - drm_vbl_send_signals(dev); 223 } 224 - 225 return IRQ_HANDLED; 226 } 227 ··· 249 return ret; 250 } 251 252 - static int radeon_driver_vblank_do_wait(struct drm_device * dev, 253 - unsigned int *sequence, int crtc) 254 { 255 - drm_radeon_private_t *dev_priv = 256 - (drm_radeon_private_t *) dev->dev_private; 257 - unsigned int cur_vblank; 258 - int ret = 0; 259 - int ack = 0; 260 - atomic_t *counter; 261 if (!dev_priv) { 262 DRM_ERROR("called with no initialization\n"); 263 return -EINVAL; 264 } 265 266 - if (crtc == DRM_RADEON_VBLANK_CRTC1) { 267 - counter = &dev->vbl_received; 268 - ack |= RADEON_CRTC_VBLANK_STAT; 269 - } else if (crtc == DRM_RADEON_VBLANK_CRTC2) { 270 - counter = &dev->vbl_received2; 271 - ack |= RADEON_CRTC2_VBLANK_STAT; 272 - } else 273 return -EINVAL; 274 275 - radeon_acknowledge_irqs(dev_priv, ack); 276 - 277 - dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE; 278 - 279 - /* Assume that the user has missed the current sequence number 280 - * by about a day rather than she wants to wait for years 281 - * using vertical blanks... 282 - */ 283 - DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, 284 - (((cur_vblank = atomic_read(counter)) 285 - - *sequence) <= (1 << 23))); 286 - 287 - *sequence = cur_vblank; 288 - 289 - return ret; 290 - } 291 - 292 - int radeon_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence) 293 - { 294 - return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC1); 295 - } 296 - 297 - int radeon_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence) 298 - { 299 - return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC2); 300 } 301 302 /* Needs the lock as it touches the ring. ··· 316 return radeon_wait_irq(dev, irqwait->irq_seq); 317 } 318 319 - void radeon_enable_interrupt(struct drm_device *dev) 320 - { 321 - drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; 322 - 323 - dev_priv->irq_enable_reg = RADEON_SW_INT_ENABLE; 324 - if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC1) 325 - dev_priv->irq_enable_reg |= RADEON_CRTC_VBLANK_MASK; 326 - 327 - if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC2) 328 - dev_priv->irq_enable_reg |= RADEON_CRTC2_VBLANK_MASK; 329 - 330 - RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg); 331 - dev_priv->irq_enabled = 1; 332 - } 333 - 334 /* drm_dma.h hooks 335 */ 336 void radeon_driver_irq_preinstall(struct drm_device * dev) 337 { 338 drm_radeon_private_t *dev_priv = 339 (drm_radeon_private_t *) dev->dev_private; 340 341 /* Disable *all* interrupts */ 342 RADEON_WRITE(RADEON_GEN_INT_CNTL, 0); 343 344 /* Clear bits if they're already high */ 345 - radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK | 346 - RADEON_CRTC_VBLANK_STAT | 347 - RADEON_CRTC2_VBLANK_STAT)); 348 } 349 350 - void radeon_driver_irq_postinstall(struct drm_device * dev) 351 { 352 drm_radeon_private_t *dev_priv = 353 (drm_radeon_private_t *) dev->dev_private; 354 355 atomic_set(&dev_priv->swi_emitted, 0); 356 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue); 357 358 - radeon_enable_interrupt(dev); 359 } 360 361 void radeon_driver_irq_uninstall(struct drm_device * dev) ··· 362 363 dev_priv->irq_enabled = 0; 364 365 /* Disable *all* interrupts */ 366 RADEON_WRITE(RADEON_GEN_INT_CNTL, 0); 367 } ··· 372 int radeon_vblank_crtc_get(struct drm_device *dev) 373 { 374 drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; 375 - u32 flag; 376 - u32 value; 377 378 - flag = RADEON_READ(RADEON_GEN_INT_CNTL); 379 - value = 0; 380 - 381 - if (flag & RADEON_CRTC_VBLANK_MASK) 382 - value |= DRM_RADEON_VBLANK_CRTC1; 383 - 384 - if (flag & RADEON_CRTC2_VBLANK_MASK) 385 - value |= DRM_RADEON_VBLANK_CRTC2; 386 - return value; 387 } 388 389 int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value) ··· 384 return -EINVAL; 385 } 386 dev_priv->vblank_crtc = (unsigned int)value; 387 - radeon_enable_interrupt(dev); 388 return 0; 389 }
··· 27 * 28 * Authors: 29 * Keith Whitwell <keith@tungstengraphics.com> 30 + * Michel D�zer <michel@daenzer.net> 31 */ 32 33 #include "drmP.h" ··· 35 #include "radeon_drm.h" 36 #include "radeon_drv.h" 37 38 + void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state) 39 { 40 + drm_radeon_private_t *dev_priv = dev->dev_private; 41 + 42 + if (state) 43 + dev_priv->irq_enable_reg |= mask; 44 + else 45 + dev_priv->irq_enable_reg &= ~mask; 46 + 47 + RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg); 48 + } 49 + 50 + static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state) 51 + { 52 + drm_radeon_private_t *dev_priv = dev->dev_private; 53 + 54 + if (state) 55 + dev_priv->r500_disp_irq_reg |= mask; 56 + else 57 + dev_priv->r500_disp_irq_reg &= ~mask; 58 + 59 + RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg); 60 + } 61 + 62 + int radeon_enable_vblank(struct drm_device *dev, int crtc) 63 + { 64 + drm_radeon_private_t *dev_priv = dev->dev_private; 65 + 66 + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) { 67 + switch (crtc) { 68 + case 0: 69 + r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 1); 70 + break; 71 + case 1: 72 + r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 1); 73 + break; 74 + default: 75 + DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", 76 + crtc); 77 + return EINVAL; 78 + } 79 + } else { 80 + switch (crtc) { 81 + case 0: 82 + radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 1); 83 + break; 84 + case 1: 85 + radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 1); 86 + break; 87 + default: 88 + DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", 89 + crtc); 90 + return EINVAL; 91 + } 92 + } 93 + 94 + return 0; 95 + } 96 + 97 + void radeon_disable_vblank(struct drm_device *dev, int crtc) 98 + { 99 + drm_radeon_private_t *dev_priv = dev->dev_private; 100 + 101 + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) { 102 + switch (crtc) { 103 + case 0: 104 + r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 0); 105 + break; 106 + case 1: 107 + r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 0); 108 + break; 109 + default: 110 + DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", 111 + crtc); 112 + break; 113 + } 114 + } else { 115 + switch (crtc) { 116 + case 0: 117 + radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 0); 118 + break; 119 + case 1: 120 + radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 0); 121 + break; 122 + default: 123 + DRM_ERROR("tried to enable vblank on non-existent crtc %d\n", 124 + crtc); 125 + break; 126 + } 127 + } 128 + } 129 + 130 + static inline u32 radeon_acknowledge_irqs(drm_radeon_private_t *dev_priv, u32 *r500_disp_int) 131 + { 132 + u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS); 133 + u32 irq_mask = RADEON_SW_INT_TEST; 134 + 135 + *r500_disp_int = 0; 136 + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) { 137 + /* vbl interrupts in a different place */ 138 + 139 + if (irqs & R500_DISPLAY_INT_STATUS) { 140 + /* if a display interrupt */ 141 + u32 disp_irq; 142 + 143 + disp_irq = RADEON_READ(R500_DISP_INTERRUPT_STATUS); 144 + 145 + *r500_disp_int = disp_irq; 146 + if (disp_irq & R500_D1_VBLANK_INTERRUPT) 147 + RADEON_WRITE(R500_D1MODE_VBLANK_STATUS, R500_VBLANK_ACK); 148 + if (disp_irq & R500_D2_VBLANK_INTERRUPT) 149 + RADEON_WRITE(R500_D2MODE_VBLANK_STATUS, R500_VBLANK_ACK); 150 + } 151 + irq_mask |= R500_DISPLAY_INT_STATUS; 152 + } else 153 + irq_mask |= RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT; 154 + 155 + irqs &= irq_mask; 156 + 157 if (irqs) 158 RADEON_WRITE(RADEON_GEN_INT_STATUS, irqs); 159 + 160 return irqs; 161 } 162 ··· 68 drm_radeon_private_t *dev_priv = 69 (drm_radeon_private_t *) dev->dev_private; 70 u32 stat; 71 + u32 r500_disp_int; 72 73 /* Only consider the bits we're interested in - others could be used 74 * outside the DRM 75 */ 76 + stat = radeon_acknowledge_irqs(dev_priv, &r500_disp_int); 77 if (!stat) 78 return IRQ_NONE; 79 80 stat &= dev_priv->irq_enable_reg; 81 82 /* SW interrupt */ 83 + if (stat & RADEON_SW_INT_TEST) 84 DRM_WAKEUP(&dev_priv->swi_queue); 85 86 /* VBLANK interrupt */ 87 + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) { 88 + if (r500_disp_int & R500_D1_VBLANK_INTERRUPT) 89 + drm_handle_vblank(dev, 0); 90 + if (r500_disp_int & R500_D2_VBLANK_INTERRUPT) 91 + drm_handle_vblank(dev, 1); 92 + } else { 93 + if (stat & RADEON_CRTC_VBLANK_STAT) 94 + drm_handle_vblank(dev, 0); 95 + if (stat & RADEON_CRTC2_VBLANK_STAT) 96 + drm_handle_vblank(dev, 1); 97 } 98 return IRQ_HANDLED; 99 } 100 ··· 144 return ret; 145 } 146 147 + u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc) 148 { 149 + drm_radeon_private_t *dev_priv = dev->dev_private; 150 + 151 if (!dev_priv) { 152 DRM_ERROR("called with no initialization\n"); 153 return -EINVAL; 154 } 155 156 + if (crtc < 0 || crtc > 1) { 157 + DRM_ERROR("Invalid crtc %d\n", crtc); 158 return -EINVAL; 159 + } 160 161 + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) { 162 + if (crtc == 0) 163 + return RADEON_READ(R500_D1CRTC_FRAME_COUNT); 164 + else 165 + return RADEON_READ(R500_D2CRTC_FRAME_COUNT); 166 + } else { 167 + if (crtc == 0) 168 + return RADEON_READ(RADEON_CRTC_CRNT_FRAME); 169 + else 170 + return RADEON_READ(RADEON_CRTC2_CRNT_FRAME); 171 + } 172 } 173 174 /* Needs the lock as it touches the ring. ··· 234 return radeon_wait_irq(dev, irqwait->irq_seq); 235 } 236 237 /* drm_dma.h hooks 238 */ 239 void radeon_driver_irq_preinstall(struct drm_device * dev) 240 { 241 drm_radeon_private_t *dev_priv = 242 (drm_radeon_private_t *) dev->dev_private; 243 + u32 dummy; 244 245 /* Disable *all* interrupts */ 246 + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) 247 + RADEON_WRITE(R500_DxMODE_INT_MASK, 0); 248 RADEON_WRITE(RADEON_GEN_INT_CNTL, 0); 249 250 /* Clear bits if they're already high */ 251 + radeon_acknowledge_irqs(dev_priv, &dummy); 252 } 253 254 + int radeon_driver_irq_postinstall(struct drm_device *dev) 255 { 256 drm_radeon_private_t *dev_priv = 257 (drm_radeon_private_t *) dev->dev_private; 258 + int ret; 259 260 atomic_set(&dev_priv->swi_emitted, 0); 261 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue); 262 263 + ret = drm_vblank_init(dev, 2); 264 + if (ret) 265 + return ret; 266 + 267 + dev->max_vblank_count = 0x001fffff; 268 + 269 + radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1); 270 + 271 + return 0; 272 } 273 274 void radeon_driver_irq_uninstall(struct drm_device * dev) ··· 285 286 dev_priv->irq_enabled = 0; 287 288 + if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) 289 + RADEON_WRITE(R500_DxMODE_INT_MASK, 0); 290 /* Disable *all* interrupts */ 291 RADEON_WRITE(RADEON_GEN_INT_CNTL, 0); 292 } ··· 293 int radeon_vblank_crtc_get(struct drm_device *dev) 294 { 295 drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private; 296 297 + return dev_priv->vblank_crtc; 298 } 299 300 int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value) ··· 315 return -EINVAL; 316 } 317 dev_priv->vblank_crtc = (unsigned int)value; 318 return 0; 319 }
+1 -1
drivers/gpu/drm/radeon/radeon_state.c
··· 2997 value = GET_SCRATCH(2); 2998 break; 2999 case RADEON_PARAM_IRQ_NR: 3000 - value = dev->irq; 3001 break; 3002 case RADEON_PARAM_GART_BASE: 3003 value = dev_priv->gart_vm_start;
··· 2997 value = GET_SCRATCH(2); 2998 break; 2999 case RADEON_PARAM_IRQ_NR: 3000 + value = drm_dev_to_irq(dev); 3001 break; 3002 case RADEON_PARAM_GART_BASE: 3003 value = dev_priv->gart_vm_start;
+5 -5
drivers/gpu/drm/sis/sis_mm.c
··· 41 #define AGP_TYPE 1 42 43 44 - #if defined(CONFIG_FB_SIS) 45 /* fb management via fb device */ 46 47 #define SIS_MM_ALIGN_SHIFT 0 ··· 57 if (req.size == 0) 58 return NULL; 59 else 60 - return (void *)~req.offset; 61 } 62 63 static void sis_sman_mm_free(void *private, void *ref) ··· 75 return ~((unsigned long)ref); 76 } 77 78 - #else /* CONFIG_FB_SIS */ 79 80 #define SIS_MM_ALIGN_SHIFT 4 81 #define SIS_MM_ALIGN_MASK ( (1 << SIS_MM_ALIGN_SHIFT) - 1) 82 83 - #endif /* CONFIG_FB_SIS */ 84 85 static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv) 86 { ··· 89 int ret; 90 91 mutex_lock(&dev->struct_mutex); 92 - #if defined(CONFIG_FB_SIS) 93 { 94 struct drm_sman_mm sman_mm; 95 sman_mm.private = (void *)0xFFFFFFFF;
··· 41 #define AGP_TYPE 1 42 43 44 + #if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE) 45 /* fb management via fb device */ 46 47 #define SIS_MM_ALIGN_SHIFT 0 ··· 57 if (req.size == 0) 58 return NULL; 59 else 60 + return (void *)(unsigned long)~req.offset; 61 } 62 63 static void sis_sman_mm_free(void *private, void *ref) ··· 75 return ~((unsigned long)ref); 76 } 77 78 + #else /* CONFIG_FB_SIS[_MODULE] */ 79 80 #define SIS_MM_ALIGN_SHIFT 4 81 #define SIS_MM_ALIGN_MASK ( (1 << SIS_MM_ALIGN_SHIFT) - 1) 82 83 + #endif /* CONFIG_FB_SIS[_MODULE] */ 84 85 static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv) 86 { ··· 89 int ret; 90 91 mutex_lock(&dev->struct_mutex); 92 + #if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE) 93 { 94 struct drm_sman_mm sman_mm; 95 sman_mm.private = (void *)0xFFFFFFFF;
+14 -12
drivers/gpu/drm/via/via_drv.c
··· 40 static struct drm_driver driver = { 41 .driver_features = 42 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | 43 - DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL, 44 .load = via_driver_load, 45 .unload = via_driver_unload, 46 .context_dtor = via_final_context, 47 - .vblank_wait = via_driver_vblank_wait, 48 .irq_preinstall = via_driver_irq_preinstall, 49 .irq_postinstall = via_driver_irq_postinstall, 50 .irq_uninstall = via_driver_irq_uninstall, ··· 61 .get_reg_ofs = drm_core_get_reg_ofs, 62 .ioctls = via_ioctls, 63 .fops = { 64 - .owner = THIS_MODULE, 65 - .open = drm_open, 66 - .release = drm_release, 67 - .ioctl = drm_ioctl, 68 - .mmap = drm_mmap, 69 - .poll = drm_poll, 70 - .fasync = drm_fasync, 71 - }, 72 .pci_driver = { 73 - .name = DRIVER_NAME, 74 - .id_table = pciidlist, 75 }, 76 77 .name = DRIVER_NAME,
··· 40 static struct drm_driver driver = { 41 .driver_features = 42 DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | 43 + DRIVER_IRQ_SHARED, 44 .load = via_driver_load, 45 .unload = via_driver_unload, 46 .context_dtor = via_final_context, 47 + .get_vblank_counter = via_get_vblank_counter, 48 + .enable_vblank = via_enable_vblank, 49 + .disable_vblank = via_disable_vblank, 50 .irq_preinstall = via_driver_irq_preinstall, 51 .irq_postinstall = via_driver_irq_postinstall, 52 .irq_uninstall = via_driver_irq_uninstall, ··· 59 .get_reg_ofs = drm_core_get_reg_ofs, 60 .ioctls = via_ioctls, 61 .fops = { 62 + .owner = THIS_MODULE, 63 + .open = drm_open, 64 + .release = drm_release, 65 + .ioctl = drm_ioctl, 66 + .mmap = drm_mmap, 67 + .poll = drm_poll, 68 + .fasync = drm_fasync, 69 + }, 70 .pci_driver = { 71 + .name = DRIVER_NAME, 72 + .id_table = pciidlist, 73 }, 74 75 .name = DRIVER_NAME,
+10 -6
drivers/gpu/drm/via/via_drv.h
··· 75 struct timeval last_vblank; 76 int last_vblank_valid; 77 unsigned usec_per_vblank; 78 drm_via_state_t hc_state; 79 char pci_buf[VIA_PCI_BUF_SIZE]; 80 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE]; ··· 131 extern int via_final_context(struct drm_device * dev, int context); 132 133 extern int via_do_cleanup_map(struct drm_device * dev); 134 - extern int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence); 135 136 extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS); 137 extern void via_driver_irq_preinstall(struct drm_device * dev); 138 - extern void via_driver_irq_postinstall(struct drm_device * dev); 139 extern void via_driver_irq_uninstall(struct drm_device * dev); 140 141 extern int via_dma_cleanup(struct drm_device * dev); 142 extern void via_init_command_verifier(void); 143 extern int via_driver_dma_quiescent(struct drm_device * dev); 144 - extern void via_init_futex(drm_via_private_t * dev_priv); 145 - extern void via_cleanup_futex(drm_via_private_t * dev_priv); 146 - extern void via_release_futex(drm_via_private_t * dev_priv, int context); 147 148 - extern void via_reclaim_buffers_locked(struct drm_device *dev, struct drm_file *file_priv); 149 extern void via_lastclose(struct drm_device *dev); 150 151 extern void via_dmablit_handler(struct drm_device *dev, int engine, int from_irq);
··· 75 struct timeval last_vblank; 76 int last_vblank_valid; 77 unsigned usec_per_vblank; 78 + atomic_t vbl_received; 79 drm_via_state_t hc_state; 80 char pci_buf[VIA_PCI_BUF_SIZE]; 81 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE]; ··· 130 extern int via_final_context(struct drm_device * dev, int context); 131 132 extern int via_do_cleanup_map(struct drm_device * dev); 133 + extern u32 via_get_vblank_counter(struct drm_device *dev, int crtc); 134 + extern int via_enable_vblank(struct drm_device *dev, int crtc); 135 + extern void via_disable_vblank(struct drm_device *dev, int crtc); 136 137 extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS); 138 extern void via_driver_irq_preinstall(struct drm_device * dev); 139 + extern int via_driver_irq_postinstall(struct drm_device *dev); 140 extern void via_driver_irq_uninstall(struct drm_device * dev); 141 142 extern int via_dma_cleanup(struct drm_device * dev); 143 extern void via_init_command_verifier(void); 144 extern int via_driver_dma_quiescent(struct drm_device * dev); 145 + extern void via_init_futex(drm_via_private_t *dev_priv); 146 + extern void via_cleanup_futex(drm_via_private_t *dev_priv); 147 + extern void via_release_futex(drm_via_private_t *dev_priv, int context); 148 149 + extern void via_reclaim_buffers_locked(struct drm_device *dev, 150 + struct drm_file *file_priv); 151 extern void via_lastclose(struct drm_device *dev); 152 153 extern void via_dmablit_handler(struct drm_device *dev, int engine, int from_irq);
+59 -46
drivers/gpu/drm/via/via_irq.c
··· 43 #define VIA_REG_INTERRUPT 0x200 44 45 /* VIA_REG_INTERRUPT */ 46 - #define VIA_IRQ_GLOBAL (1 << 31) 47 #define VIA_IRQ_VBLANK_ENABLE (1 << 19) 48 #define VIA_IRQ_VBLANK_PENDING (1 << 3) 49 #define VIA_IRQ_HQV0_ENABLE (1 << 11) ··· 68 69 static maskarray_t via_pro_group_a_irqs[] = { 70 {VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010, 71 - 0x00000000}, 72 {VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010, 73 - 0x00000000}, 74 {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0, 75 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}, 76 {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1, 77 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}, 78 }; 79 - static int via_num_pro_group_a = 80 - sizeof(via_pro_group_a_irqs) / sizeof(maskarray_t); 81 static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3}; 82 83 static maskarray_t via_unichrome_irqs[] = { ··· 85 {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1, 86 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008} 87 }; 88 - static int via_num_unichrome = sizeof(via_unichrome_irqs) / sizeof(maskarray_t); 89 static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1}; 90 91 static unsigned time_diff(struct timeval *now, struct timeval *then) 92 { 93 return (now->tv_usec >= then->tv_usec) ? 94 - now->tv_usec - then->tv_usec : 95 - 1000000 - (then->tv_usec - now->tv_usec); 96 } 97 98 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) ··· 117 118 status = VIA_READ(VIA_REG_INTERRUPT); 119 if (status & VIA_IRQ_VBLANK_PENDING) { 120 - atomic_inc(&dev->vbl_received); 121 - if (!(atomic_read(&dev->vbl_received) & 0x0F)) { 122 do_gettimeofday(&cur_vblank); 123 if (dev_priv->last_vblank_valid) { 124 dev_priv->usec_per_vblank = 125 - time_diff(&cur_vblank, 126 - &dev_priv->last_vblank) >> 4; 127 } 128 dev_priv->last_vblank = cur_vblank; 129 dev_priv->last_vblank_valid = 1; 130 } 131 - if (!(atomic_read(&dev->vbl_received) & 0xFF)) { 132 DRM_DEBUG("US per vblank is: %u\n", 133 dev_priv->usec_per_vblank); 134 } 135 - DRM_WAKEUP(&dev->vbl_queue); 136 - drm_vbl_send_signals(dev); 137 handled = 1; 138 } 139 ··· 153 /* Acknowlege interrupts */ 154 VIA_WRITE(VIA_REG_INTERRUPT, status); 155 156 if (handled) 157 return IRQ_HANDLED; 158 else ··· 172 } 173 } 174 175 - int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence) 176 { 177 - drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 178 - unsigned int cur_vblank; 179 - int ret = 0; 180 181 - DRM_DEBUG("\n"); 182 - if (!dev_priv) { 183 - DRM_ERROR("called with no initialization\n"); 184 return -EINVAL; 185 } 186 187 - viadrv_acknowledge_irqs(dev_priv); 188 189 - /* Assume that the user has missed the current sequence number 190 - * by about a day rather than she wants to wait for years 191 - * using vertical blanks... 192 - */ 193 194 - DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ, 195 - (((cur_vblank = atomic_read(&dev->vbl_received)) - 196 - *sequence) <= (1 << 23))); 197 198 - *sequence = cur_vblank; 199 - return ret; 200 } 201 202 static int ··· 250 *sequence = cur_irq_sequence; 251 return ret; 252 } 253 254 /* 255 * drm_dma.h hooks ··· 305 } 306 } 307 308 - void via_driver_irq_postinstall(struct drm_device * dev) 309 { 310 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 311 u32 status; 312 313 - DRM_DEBUG("\n"); 314 - if (dev_priv) { 315 - status = VIA_READ(VIA_REG_INTERRUPT); 316 - VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL 317 - | dev_priv->irq_enable_mask); 318 319 - /* Some magic, oh for some data sheets ! */ 320 321 - VIA_WRITE8(0x83d4, 0x11); 322 - VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30); 323 324 - } 325 } 326 327 void via_driver_irq_uninstall(struct drm_device * dev) ··· 354 drm_via_irq_t *cur_irq = dev_priv->via_irqs; 355 int force_sequence; 356 357 - if (!dev->irq) 358 - return -EINVAL; 359 - 360 if (irqwait->request.irq >= dev_priv->num_irqs) { 361 DRM_ERROR("Trying to wait on unknown irq %d\n", 362 irqwait->request.irq); ··· 364 365 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) { 366 case VIA_IRQ_RELATIVE: 367 - irqwait->request.sequence += atomic_read(&cur_irq->irq_received); 368 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE; 369 case VIA_IRQ_ABSOLUTE: 370 break;
··· 43 #define VIA_REG_INTERRUPT 0x200 44 45 /* VIA_REG_INTERRUPT */ 46 + #define VIA_IRQ_GLOBAL (1 << 31) 47 #define VIA_IRQ_VBLANK_ENABLE (1 << 19) 48 #define VIA_IRQ_VBLANK_PENDING (1 << 3) 49 #define VIA_IRQ_HQV0_ENABLE (1 << 11) ··· 68 69 static maskarray_t via_pro_group_a_irqs[] = { 70 {VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010, 71 + 0x00000000 }, 72 {VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010, 73 + 0x00000000 }, 74 {VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0, 75 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}, 76 {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1, 77 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}, 78 }; 79 + static int via_num_pro_group_a = ARRAY_SIZE(via_pro_group_a_irqs); 80 static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3}; 81 82 static maskarray_t via_unichrome_irqs[] = { ··· 86 {VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1, 87 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008} 88 }; 89 + static int via_num_unichrome = ARRAY_SIZE(via_unichrome_irqs); 90 static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1}; 91 + 92 93 static unsigned time_diff(struct timeval *now, struct timeval *then) 94 { 95 return (now->tv_usec >= then->tv_usec) ? 96 + now->tv_usec - then->tv_usec : 97 + 1000000 - (then->tv_usec - now->tv_usec); 98 + } 99 + 100 + u32 via_get_vblank_counter(struct drm_device *dev, int crtc) 101 + { 102 + drm_via_private_t *dev_priv = dev->dev_private; 103 + if (crtc != 0) 104 + return 0; 105 + 106 + return atomic_read(&dev_priv->vbl_received); 107 } 108 109 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS) ··· 108 109 status = VIA_READ(VIA_REG_INTERRUPT); 110 if (status & VIA_IRQ_VBLANK_PENDING) { 111 + atomic_inc(&dev_priv->vbl_received); 112 + if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) { 113 do_gettimeofday(&cur_vblank); 114 if (dev_priv->last_vblank_valid) { 115 dev_priv->usec_per_vblank = 116 + time_diff(&cur_vblank, 117 + &dev_priv->last_vblank) >> 4; 118 } 119 dev_priv->last_vblank = cur_vblank; 120 dev_priv->last_vblank_valid = 1; 121 } 122 + if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) { 123 DRM_DEBUG("US per vblank is: %u\n", 124 dev_priv->usec_per_vblank); 125 } 126 + drm_handle_vblank(dev, 0); 127 handled = 1; 128 } 129 ··· 145 /* Acknowlege interrupts */ 146 VIA_WRITE(VIA_REG_INTERRUPT, status); 147 148 + 149 if (handled) 150 return IRQ_HANDLED; 151 else ··· 163 } 164 } 165 166 + int via_enable_vblank(struct drm_device *dev, int crtc) 167 { 168 + drm_via_private_t *dev_priv = dev->dev_private; 169 + u32 status; 170 171 + if (crtc != 0) { 172 + DRM_ERROR("%s: bad crtc %d\n", __func__, crtc); 173 return -EINVAL; 174 } 175 176 + status = VIA_READ(VIA_REG_INTERRUPT); 177 + VIA_WRITE(VIA_REG_INTERRUPT, status & VIA_IRQ_VBLANK_ENABLE); 178 179 + VIA_WRITE8(0x83d4, 0x11); 180 + VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30); 181 182 + return 0; 183 + } 184 185 + void via_disable_vblank(struct drm_device *dev, int crtc) 186 + { 187 + drm_via_private_t *dev_priv = dev->dev_private; 188 + 189 + VIA_WRITE8(0x83d4, 0x11); 190 + VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30); 191 + 192 + if (crtc != 0) 193 + DRM_ERROR("%s: bad crtc %d\n", __func__, crtc); 194 } 195 196 static int ··· 238 *sequence = cur_irq_sequence; 239 return ret; 240 } 241 + 242 243 /* 244 * drm_dma.h hooks ··· 292 } 293 } 294 295 + int via_driver_irq_postinstall(struct drm_device *dev) 296 { 297 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; 298 u32 status; 299 300 + DRM_DEBUG("via_driver_irq_postinstall\n"); 301 + if (!dev_priv) 302 + return -EINVAL; 303 304 + drm_vblank_init(dev, 1); 305 + status = VIA_READ(VIA_REG_INTERRUPT); 306 + VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL 307 + | dev_priv->irq_enable_mask); 308 309 + /* Some magic, oh for some data sheets ! */ 310 + VIA_WRITE8(0x83d4, 0x11); 311 + VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30); 312 313 + return 0; 314 } 315 316 void via_driver_irq_uninstall(struct drm_device * dev) ··· 339 drm_via_irq_t *cur_irq = dev_priv->via_irqs; 340 int force_sequence; 341 342 if (irqwait->request.irq >= dev_priv->num_irqs) { 343 DRM_ERROR("Trying to wait on unknown irq %d\n", 344 irqwait->request.irq); ··· 352 353 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) { 354 case VIA_IRQ_RELATIVE: 355 + irqwait->request.sequence += 356 + atomic_read(&cur_irq->irq_received); 357 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE; 358 case VIA_IRQ_ABSOLUTE: 359 break;
+1 -2
drivers/gpu/drm/via/via_mm.c
··· 93 /* Last context, perform cleanup */ 94 if (dev->ctx_count == 1 && dev->dev_private) { 95 DRM_DEBUG("Last Context\n"); 96 - if (dev->irq) 97 - drm_irq_uninstall(dev); 98 via_cleanup_futex(dev_priv); 99 via_do_cleanup_map(dev); 100 }
··· 93 /* Last context, perform cleanup */ 94 if (dev->ctx_count == 1 && dev->dev_private) { 95 DRM_DEBUG("Last Context\n"); 96 + drm_irq_uninstall(dev); 97 via_cleanup_futex(dev_priv); 98 via_do_cleanup_map(dev); 99 }
+46 -17
include/drm/drm.h
··· 36 #ifndef _DRM_H_ 37 #define _DRM_H_ 38 39 - #if defined(__linux__) 40 #if defined(__KERNEL__) 41 #endif 42 #include <asm/ioctl.h> /* For _IO* macros */ ··· 45 #define DRM_IOC_WRITE _IOC_WRITE 46 #define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE 47 #define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size) 48 - #elif defined(__FreeBSD__) || defined(__NetBSD__) || defined(__OpenBSD__) 49 - #if defined(__FreeBSD__) && defined(IN_MODULE) 50 - /* Prevent name collision when including sys/ioccom.h */ 51 - #undef ioctl 52 - #include <sys/ioccom.h> 53 - #define ioctl(a,b,c) xf86ioctl(a,b,c) 54 - #else 55 - #include <sys/ioccom.h> 56 - #endif /* __FreeBSD__ && xf86ioctl */ 57 - #define DRM_IOCTL_NR(n) ((n) & 0xff) 58 - #define DRM_IOC_VOID IOC_VOID 59 - #define DRM_IOC_READ IOC_OUT 60 - #define DRM_IOC_WRITE IOC_IN 61 - #define DRM_IOC_READWRITE IOC_INOUT 62 - #define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size) 63 - #endif 64 65 #define DRM_MAJOR 226 66 #define DRM_MAX_MINOR 15 ··· 454 enum drm_vblank_seq_type { 455 _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */ 456 _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */ 457 _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */ 458 _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */ 459 _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */ ··· 485 union drm_wait_vblank { 486 struct drm_wait_vblank_request request; 487 struct drm_wait_vblank_reply reply; 488 }; 489 490 /** ··· 570 int drm_dd_minor; 571 }; 572 573 #define DRM_IOCTL_BASE 'd' 574 #define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) 575 #define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type) ··· 612 #define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client) 613 #define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats) 614 #define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version) 615 616 #define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique) 617 #define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
··· 36 #ifndef _DRM_H_ 37 #define _DRM_H_ 38 39 #if defined(__KERNEL__) 40 #endif 41 #include <asm/ioctl.h> /* For _IO* macros */ ··· 46 #define DRM_IOC_WRITE _IOC_WRITE 47 #define DRM_IOC_READWRITE _IOC_READ|_IOC_WRITE 48 #define DRM_IOC(dir, group, nr, size) _IOC(dir, group, nr, size) 49 50 #define DRM_MAJOR 226 51 #define DRM_MAX_MINOR 15 ··· 471 enum drm_vblank_seq_type { 472 _DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */ 473 _DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */ 474 + _DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */ 475 _DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */ 476 _DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */ 477 _DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking */ ··· 501 union drm_wait_vblank { 502 struct drm_wait_vblank_request request; 503 struct drm_wait_vblank_reply reply; 504 + }; 505 + 506 + #define _DRM_PRE_MODESET 1 507 + #define _DRM_POST_MODESET 2 508 + 509 + /** 510 + * DRM_IOCTL_MODESET_CTL ioctl argument type 511 + * 512 + * \sa drmModesetCtl(). 513 + */ 514 + struct drm_modeset_ctl { 515 + uint32_t crtc; 516 + uint32_t cmd; 517 }; 518 519 /** ··· 573 int drm_dd_minor; 574 }; 575 576 + /** DRM_IOCTL_GEM_CLOSE ioctl argument type */ 577 + struct drm_gem_close { 578 + /** Handle of the object to be closed. */ 579 + uint32_t handle; 580 + uint32_t pad; 581 + }; 582 + 583 + /** DRM_IOCTL_GEM_FLINK ioctl argument type */ 584 + struct drm_gem_flink { 585 + /** Handle for the object being named */ 586 + uint32_t handle; 587 + 588 + /** Returned global name */ 589 + uint32_t name; 590 + }; 591 + 592 + /** DRM_IOCTL_GEM_OPEN ioctl argument type */ 593 + struct drm_gem_open { 594 + /** Name of object being opened */ 595 + uint32_t name; 596 + 597 + /** Returned handle for the object */ 598 + uint32_t handle; 599 + 600 + /** Returned size of the object */ 601 + uint64_t size; 602 + }; 603 + 604 #define DRM_IOCTL_BASE 'd' 605 #define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) 606 #define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type) ··· 587 #define DRM_IOCTL_GET_CLIENT DRM_IOWR(0x05, struct drm_client) 588 #define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats) 589 #define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version) 590 + #define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl) 591 + #define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close) 592 + #define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink) 593 + #define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open) 594 595 #define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique) 596 #define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
+237 -12
include/drm/drmP.h
··· 104 #define DRIVER_DMA_QUEUE 0x200 105 #define DRIVER_FB_DMA 0x400 106 #define DRIVER_IRQ_VBL2 0x800 107 108 /***********************************************************************/ 109 /** \name Begin the DRM... */ ··· 388 struct drm_minor *minor; 389 int remove_auth_on_close; 390 unsigned long lock_count; 391 struct file *filp; 392 void *driver_priv; 393 }; ··· 563 }; 564 565 /** 566 * DRM driver structure. This structure represent the common code for 567 * a family of cards. There will one drm_device for each card present 568 * in this family ··· 635 int (*kernel_context_switch) (struct drm_device *dev, int old, 636 int new); 637 void (*kernel_context_switch_unlock) (struct drm_device *dev); 638 - int (*vblank_wait) (struct drm_device *dev, unsigned int *sequence); 639 - int (*vblank_wait2) (struct drm_device *dev, unsigned int *sequence); 640 int (*dri_library_name) (struct drm_device *dev, char *buf); 641 642 /** 643 * Called by \c drm_device_is_agp. Typically used to determine if a ··· 699 700 irqreturn_t(*irq_handler) (DRM_IRQ_ARGS); 701 void (*irq_preinstall) (struct drm_device *dev); 702 - void (*irq_postinstall) (struct drm_device *dev); 703 void (*irq_uninstall) (struct drm_device *dev); 704 void (*reclaim_buffers) (struct drm_device *dev, 705 struct drm_file * file_priv); ··· 711 unsigned long (*get_reg_ofs) (struct drm_device *dev); 712 void (*set_version) (struct drm_device *dev, 713 struct drm_set_version *sv); 714 715 int major; 716 int minor; ··· 824 825 /** \name Context support */ 826 /*@{ */ 827 - int irq; /**< Interrupt used by board */ 828 int irq_enabled; /**< True if irq handler is enabled */ 829 __volatile__ long context_flag; /**< Context swapping flag */ 830 __volatile__ long interrupt_flag; /**< Interruption handler flag */ ··· 839 /** \name VBLANK IRQ support */ 840 /*@{ */ 841 842 - wait_queue_head_t vbl_queue; /**< VBLANK wait queue */ 843 - atomic_t vbl_received; 844 - atomic_t vbl_received2; /**< number of secondary VBLANK interrupts */ 845 spinlock_t vbl_lock; 846 - struct list_head vbl_sigs; /**< signal list to send on VBLANK */ 847 - struct list_head vbl_sigs2; /**< signals to send on secondary VBLANK */ 848 - unsigned int vbl_pending; 849 spinlock_t tasklet_lock; /**< For drm_locked_tasklet */ 850 void (*locked_tasklet_func)(struct drm_device *dev); 851 ··· 881 struct pci_controller *hose; 882 #endif 883 struct drm_sg_mem *sg; /**< Scatter gather memory */ 884 void *dev_private; /**< device private data */ 885 struct drm_sigdata sigdata; /**< For block_all_signals */ 886 sigset_t sigmask; ··· 896 spinlock_t drw_lock; 897 struct idr drw_idr; 898 /*@} */ 899 }; 900 901 static __inline__ int drm_core_check_feature(struct drm_device *dev, 902 int feature) ··· 1013 extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type); 1014 extern int drm_free_agp(DRM_AGP_MEM * handle, int pages); 1015 extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start); 1016 extern int drm_unbind_agp(DRM_AGP_MEM * handle); 1017 1018 /* Misc. IOCTL support (drm_ioctl.h) */ ··· 1080 extern int drm_authmagic(struct drm_device *dev, void *data, 1081 struct drm_file *file_priv); 1082 1083 /* Locking IOCTL support (drm_lock.h) */ 1084 extern int drm_lock(struct drm_device *dev, void *data, 1085 struct drm_file *file_priv); ··· 1139 extern int drm_control(struct drm_device *dev, void *data, 1140 struct drm_file *file_priv); 1141 extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS); 1142 extern int drm_irq_uninstall(struct drm_device *dev); 1143 extern void drm_driver_irq_preinstall(struct drm_device *dev); 1144 extern void drm_driver_irq_postinstall(struct drm_device *dev); 1145 extern void drm_driver_irq_uninstall(struct drm_device *dev); 1146 1147 extern int drm_wait_vblank(struct drm_device *dev, void *data, 1148 - struct drm_file *file_priv); 1149 extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); 1150 - extern void drm_vbl_send_signals(struct drm_device *dev); 1151 extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*)); 1152 1153 /* AGP/GART support (drm_agpsupport.h) */ ··· 1190 extern int drm_agp_free_memory(DRM_AGP_MEM * handle); 1191 extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start); 1192 extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle); 1193 1194 /* Stub support (drm_stub.h) */ 1195 extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, ··· 1252 extern unsigned long drm_mm_tail_space(struct drm_mm *mm); 1253 extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size); 1254 extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size); 1255 1256 extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev); 1257 extern void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev);
··· 104 #define DRIVER_DMA_QUEUE 0x200 105 #define DRIVER_FB_DMA 0x400 106 #define DRIVER_IRQ_VBL2 0x800 107 + #define DRIVER_GEM 0x1000 108 109 /***********************************************************************/ 110 /** \name Begin the DRM... */ ··· 387 struct drm_minor *minor; 388 int remove_auth_on_close; 389 unsigned long lock_count; 390 + /** Mapping of mm object handles to object pointers. */ 391 + struct idr object_idr; 392 + /** Lock for synchronization of access to object_idr. */ 393 + spinlock_t table_lock; 394 struct file *filp; 395 void *driver_priv; 396 }; ··· 558 }; 559 560 /** 561 + * This structure defines the drm_mm memory object, which will be used by the 562 + * DRM for its buffer objects. 563 + */ 564 + struct drm_gem_object { 565 + /** Reference count of this object */ 566 + struct kref refcount; 567 + 568 + /** Handle count of this object. Each handle also holds a reference */ 569 + struct kref handlecount; 570 + 571 + /** Related drm device */ 572 + struct drm_device *dev; 573 + 574 + /** File representing the shmem storage */ 575 + struct file *filp; 576 + 577 + /** 578 + * Size of the object, in bytes. Immutable over the object's 579 + * lifetime. 580 + */ 581 + size_t size; 582 + 583 + /** 584 + * Global name for this object, starts at 1. 0 means unnamed. 585 + * Access is covered by the object_name_lock in the related drm_device 586 + */ 587 + int name; 588 + 589 + /** 590 + * Memory domains. These monitor which caches contain read/write data 591 + * related to the object. When transitioning from one set of domains 592 + * to another, the driver is called to ensure that caches are suitably 593 + * flushed and invalidated 594 + */ 595 + uint32_t read_domains; 596 + uint32_t write_domain; 597 + 598 + /** 599 + * While validating an exec operation, the 600 + * new read/write domain values are computed here. 601 + * They will be transferred to the above values 602 + * at the point that any cache flushing occurs 603 + */ 604 + uint32_t pending_read_domains; 605 + uint32_t pending_write_domain; 606 + 607 + void *driver_private; 608 + }; 609 + 610 + /** 611 * DRM driver structure. This structure represent the common code for 612 * a family of cards. There will one drm_device for each card present 613 * in this family ··· 580 int (*kernel_context_switch) (struct drm_device *dev, int old, 581 int new); 582 void (*kernel_context_switch_unlock) (struct drm_device *dev); 583 int (*dri_library_name) (struct drm_device *dev, char *buf); 584 + 585 + /** 586 + * get_vblank_counter - get raw hardware vblank counter 587 + * @dev: DRM device 588 + * @crtc: counter to fetch 589 + * 590 + * Driver callback for fetching a raw hardware vblank counter 591 + * for @crtc. If a device doesn't have a hardware counter, the 592 + * driver can simply return the value of drm_vblank_count and 593 + * make the enable_vblank() and disable_vblank() hooks into no-ops, 594 + * leaving interrupts enabled at all times. 595 + * 596 + * Wraparound handling and loss of events due to modesetting is dealt 597 + * with in the DRM core code. 598 + * 599 + * RETURNS 600 + * Raw vblank counter value. 601 + */ 602 + u32 (*get_vblank_counter) (struct drm_device *dev, int crtc); 603 + 604 + /** 605 + * enable_vblank - enable vblank interrupt events 606 + * @dev: DRM device 607 + * @crtc: which irq to enable 608 + * 609 + * Enable vblank interrupts for @crtc. If the device doesn't have 610 + * a hardware vblank counter, this routine should be a no-op, since 611 + * interrupts will have to stay on to keep the count accurate. 612 + * 613 + * RETURNS 614 + * Zero on success, appropriate errno if the given @crtc's vblank 615 + * interrupt cannot be enabled. 616 + */ 617 + int (*enable_vblank) (struct drm_device *dev, int crtc); 618 + 619 + /** 620 + * disable_vblank - disable vblank interrupt events 621 + * @dev: DRM device 622 + * @crtc: which irq to enable 623 + * 624 + * Disable vblank interrupts for @crtc. If the device doesn't have 625 + * a hardware vblank counter, this routine should be a no-op, since 626 + * interrupts will have to stay on to keep the count accurate. 627 + */ 628 + void (*disable_vblank) (struct drm_device *dev, int crtc); 629 630 /** 631 * Called by \c drm_device_is_agp. Typically used to determine if a ··· 601 602 irqreturn_t(*irq_handler) (DRM_IRQ_ARGS); 603 void (*irq_preinstall) (struct drm_device *dev); 604 + int (*irq_postinstall) (struct drm_device *dev); 605 void (*irq_uninstall) (struct drm_device *dev); 606 void (*reclaim_buffers) (struct drm_device *dev, 607 struct drm_file * file_priv); ··· 613 unsigned long (*get_reg_ofs) (struct drm_device *dev); 614 void (*set_version) (struct drm_device *dev, 615 struct drm_set_version *sv); 616 + 617 + int (*proc_init)(struct drm_minor *minor); 618 + void (*proc_cleanup)(struct drm_minor *minor); 619 + 620 + /** 621 + * Driver-specific constructor for drm_gem_objects, to set up 622 + * obj->driver_private. 623 + * 624 + * Returns 0 on success. 625 + */ 626 + int (*gem_init_object) (struct drm_gem_object *obj); 627 + void (*gem_free_object) (struct drm_gem_object *obj); 628 629 int major; 630 int minor; ··· 714 715 /** \name Context support */ 716 /*@{ */ 717 int irq_enabled; /**< True if irq handler is enabled */ 718 __volatile__ long context_flag; /**< Context swapping flag */ 719 __volatile__ long interrupt_flag; /**< Interruption handler flag */ ··· 730 /** \name VBLANK IRQ support */ 731 /*@{ */ 732 733 + /* 734 + * At load time, disabling the vblank interrupt won't be allowed since 735 + * old clients may not call the modeset ioctl and therefore misbehave. 736 + * Once the modeset ioctl *has* been called though, we can safely 737 + * disable them when unused. 738 + */ 739 + int vblank_disable_allowed; 740 + 741 + wait_queue_head_t *vbl_queue; /**< VBLANK wait queue */ 742 + atomic_t *_vblank_count; /**< number of VBLANK interrupts (driver must alloc the right number of counters) */ 743 spinlock_t vbl_lock; 744 + struct list_head *vbl_sigs; /**< signal list to send on VBLANK */ 745 + atomic_t vbl_signal_pending; /* number of signals pending on all crtcs*/ 746 + atomic_t *vblank_refcount; /* number of users of vblank interruptsper crtc */ 747 + u32 *last_vblank; /* protected by dev->vbl_lock, used */ 748 + /* for wraparound handling */ 749 + int *vblank_enabled; /* so we don't call enable more than 750 + once per disable */ 751 + int *vblank_inmodeset; /* Display driver is setting mode */ 752 + struct timer_list vblank_disable_timer; 753 + 754 + u32 max_vblank_count; /**< size of vblank counter register */ 755 spinlock_t tasklet_lock; /**< For drm_locked_tasklet */ 756 void (*locked_tasklet_func)(struct drm_device *dev); 757 ··· 757 struct pci_controller *hose; 758 #endif 759 struct drm_sg_mem *sg; /**< Scatter gather memory */ 760 + int num_crtcs; /**< Number of CRTCs on this device */ 761 void *dev_private; /**< device private data */ 762 struct drm_sigdata sigdata; /**< For block_all_signals */ 763 sigset_t sigmask; ··· 771 spinlock_t drw_lock; 772 struct idr drw_idr; 773 /*@} */ 774 + 775 + /** \name GEM information */ 776 + /*@{ */ 777 + spinlock_t object_name_lock; 778 + struct idr object_name_idr; 779 + atomic_t object_count; 780 + atomic_t object_memory; 781 + atomic_t pin_count; 782 + atomic_t pin_memory; 783 + atomic_t gtt_count; 784 + atomic_t gtt_memory; 785 + uint32_t gtt_total; 786 + uint32_t invalidate_domains; /* domains pending invalidation */ 787 + uint32_t flush_domains; /* domains pending flush */ 788 + /*@} */ 789 + 790 }; 791 + 792 + static inline int drm_dev_to_irq(struct drm_device *dev) 793 + { 794 + return dev->pdev->irq; 795 + } 796 797 static __inline__ int drm_core_check_feature(struct drm_device *dev, 798 int feature) ··· 867 extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type); 868 extern int drm_free_agp(DRM_AGP_MEM * handle, int pages); 869 extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start); 870 + extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev, 871 + struct page **pages, 872 + unsigned long num_pages, 873 + uint32_t gtt_offset, 874 + uint32_t type); 875 extern int drm_unbind_agp(DRM_AGP_MEM * handle); 876 877 /* Misc. IOCTL support (drm_ioctl.h) */ ··· 929 extern int drm_authmagic(struct drm_device *dev, void *data, 930 struct drm_file *file_priv); 931 932 + /* Cache management (drm_cache.c) */ 933 + void drm_clflush_pages(struct page *pages[], unsigned long num_pages); 934 + 935 /* Locking IOCTL support (drm_lock.h) */ 936 extern int drm_lock(struct drm_device *dev, void *data, 937 struct drm_file *file_priv); ··· 985 extern int drm_control(struct drm_device *dev, void *data, 986 struct drm_file *file_priv); 987 extern irqreturn_t drm_irq_handler(DRM_IRQ_ARGS); 988 + extern int drm_irq_install(struct drm_device *dev); 989 extern int drm_irq_uninstall(struct drm_device *dev); 990 extern void drm_driver_irq_preinstall(struct drm_device *dev); 991 extern void drm_driver_irq_postinstall(struct drm_device *dev); 992 extern void drm_driver_irq_uninstall(struct drm_device *dev); 993 994 + extern int drm_vblank_init(struct drm_device *dev, int num_crtcs); 995 extern int drm_wait_vblank(struct drm_device *dev, void *data, 996 + struct drm_file *filp); 997 extern int drm_vblank_wait(struct drm_device *dev, unsigned int *vbl_seq); 998 + extern void drm_locked_tasklet(struct drm_device *dev, 999 + void(*func)(struct drm_device *)); 1000 + extern u32 drm_vblank_count(struct drm_device *dev, int crtc); 1001 + extern void drm_handle_vblank(struct drm_device *dev, int crtc); 1002 + extern int drm_vblank_get(struct drm_device *dev, int crtc); 1003 + extern void drm_vblank_put(struct drm_device *dev, int crtc); 1004 + /* Modesetting support */ 1005 + extern int drm_modeset_ctl(struct drm_device *dev, void *data, 1006 + struct drm_file *file_priv); 1007 extern void drm_locked_tasklet(struct drm_device *dev, void(*func)(struct drm_device*)); 1008 1009 /* AGP/GART support (drm_agpsupport.h) */ ··· 1026 extern int drm_agp_free_memory(DRM_AGP_MEM * handle); 1027 extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start); 1028 extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle); 1029 + extern void drm_agp_chipset_flush(struct drm_device *dev); 1030 1031 /* Stub support (drm_stub.h) */ 1032 extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, ··· 1087 extern unsigned long drm_mm_tail_space(struct drm_mm *mm); 1088 extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size); 1089 extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size); 1090 + 1091 + /* Graphics Execution Manager library functions (drm_gem.c) */ 1092 + int drm_gem_init(struct drm_device *dev); 1093 + void drm_gem_object_free(struct kref *kref); 1094 + struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev, 1095 + size_t size); 1096 + void drm_gem_object_handle_free(struct kref *kref); 1097 + 1098 + static inline void 1099 + drm_gem_object_reference(struct drm_gem_object *obj) 1100 + { 1101 + kref_get(&obj->refcount); 1102 + } 1103 + 1104 + static inline void 1105 + drm_gem_object_unreference(struct drm_gem_object *obj) 1106 + { 1107 + if (obj == NULL) 1108 + return; 1109 + 1110 + kref_put(&obj->refcount, drm_gem_object_free); 1111 + } 1112 + 1113 + int drm_gem_handle_create(struct drm_file *file_priv, 1114 + struct drm_gem_object *obj, 1115 + int *handlep); 1116 + 1117 + static inline void 1118 + drm_gem_object_handle_reference(struct drm_gem_object *obj) 1119 + { 1120 + drm_gem_object_reference(obj); 1121 + kref_get(&obj->handlecount); 1122 + } 1123 + 1124 + static inline void 1125 + drm_gem_object_handle_unreference(struct drm_gem_object *obj) 1126 + { 1127 + if (obj == NULL) 1128 + return; 1129 + 1130 + /* 1131 + * Must bump handle count first as this may be the last 1132 + * ref, in which case the object would disappear before we 1133 + * checked for a name 1134 + */ 1135 + kref_put(&obj->handlecount, drm_gem_object_handle_free); 1136 + drm_gem_object_unreference(obj); 1137 + } 1138 + 1139 + struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev, 1140 + struct drm_file *filp, 1141 + int handle); 1142 + int drm_gem_close_ioctl(struct drm_device *dev, void *data, 1143 + struct drm_file *file_priv); 1144 + int drm_gem_flink_ioctl(struct drm_device *dev, void *data, 1145 + struct drm_file *file_priv); 1146 + int drm_gem_open_ioctl(struct drm_device *dev, void *data, 1147 + struct drm_file *file_priv); 1148 + void drm_gem_open(struct drm_device *dev, struct drm_file *file_private); 1149 + void drm_gem_release(struct drm_device *dev, struct drm_file *file_private); 1150 1151 extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev); 1152 extern void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev);
+30 -24
include/drm/drm_pciids.h
··· 84 {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 85 {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 86 {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ 87 - {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 88 - {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 89 - {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 90 - {0x1002, 0x554B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 91 - {0x1002, 0x554C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 92 - {0x1002, 0x554D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 93 - {0x1002, 0x554E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 94 - {0x1002, 0x554F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 95 - {0x1002, 0x5550, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 96 - {0x1002, 0x5551, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 97 - {0x1002, 0x5552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 98 - {0x1002, 0x5554, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 99 {0x1002, 0x564A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 100 {0x1002, 0x564B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 101 {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ··· 113 {0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ 114 {0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ 115 {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ 116 - {0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ 117 - {0x1002, 0x5a62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ 118 {0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ 119 {0x1002, 0x5b62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ 120 {0x1002, 0x5b63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ ··· 124 {0x1002, 0x5b65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ 125 {0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \ 126 {0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \ 127 - {0x1002, 0x5d48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 128 - {0x1002, 0x5d49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 129 - {0x1002, 0x5d4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 130 - {0x1002, 0x5d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 131 - {0x1002, 0x5d4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 132 - {0x1002, 0x5d4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 133 - {0x1002, 0x5d4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 134 - {0x1002, 0x5d50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 135 - {0x1002, 0x5d52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 136 - {0x1002, 0x5d57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R420|RADEON_NEW_MEMMAP}, \ 137 {0x1002, 0x5e48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ 138 {0x1002, 0x5e4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ 139 {0x1002, 0x5e4b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ ··· 239 {0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 240 {0x1002, 0x791e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ 241 {0x1002, 0x791f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ 242 {0, 0, 0} 243 244 #define r128_PCI_IDS \
··· 84 {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 85 {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 86 {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ 87 + {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ 88 + {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ 89 + {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ 90 + {0x1002, 0x554B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ 91 + {0x1002, 0x554C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ 92 + {0x1002, 0x554D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ 93 + {0x1002, 0x554E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ 94 + {0x1002, 0x554F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ 95 + {0x1002, 0x5550, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ 96 + {0x1002, 0x5551, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ 97 + {0x1002, 0x5552, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ 98 + {0x1002, 0x5554, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ 99 {0x1002, 0x564A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 100 {0x1002, 0x564B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 101 {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ ··· 113 {0x1002, 0x5964, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ 114 {0x1002, 0x5965, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280}, \ 115 {0x1002, 0x5969, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV100}, \ 116 + {0x1002, 0x5a41, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \ 117 + {0x1002, 0x5a42, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ 118 + {0x1002, 0x5a61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_IGPGART}, \ 119 + {0x1002, 0x5a62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS400|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ 120 {0x1002, 0x5b60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ 121 {0x1002, 0x5b62, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ 122 {0x1002, 0x5b63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ ··· 122 {0x1002, 0x5b65, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ 123 {0x1002, 0x5c61, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \ 124 {0x1002, 0x5c63, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \ 125 + {0x1002, 0x5d48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 126 + {0x1002, 0x5d49, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 127 + {0x1002, 0x5d4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 128 + {0x1002, 0x5d4c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ 129 + {0x1002, 0x5d4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ 130 + {0x1002, 0x5d4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ 131 + {0x1002, 0x5d4f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ 132 + {0x1002, 0x5d50, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ 133 + {0x1002, 0x5d52, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ 134 + {0x1002, 0x5d57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ 135 {0x1002, 0x5e48, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ 136 {0x1002, 0x5e4a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ 137 {0x1002, 0x5e4b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ ··· 237 {0x1002, 0x7835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 238 {0x1002, 0x791e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ 239 {0x1002, 0x791f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS690|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ 240 + {0x1002, 0x796c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ 241 + {0x1002, 0x796d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ 242 + {0x1002, 0x796e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ 243 + {0x1002, 0x796f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS740|RADEON_IS_IGP|RADEON_NEW_MEMMAP|RADEON_IS_IGPGART}, \ 244 {0, 0, 0} 245 246 #define r128_PCI_IDS \
+333
include/drm/i915_drm.h
··· 143 #define DRM_I915_GET_VBLANK_PIPE 0x0e 144 #define DRM_I915_VBLANK_SWAP 0x0f 145 #define DRM_I915_HWS_ADDR 0x11 146 147 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 148 #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) ··· 176 #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 177 #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 178 #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) 179 180 /* Allow drivers to submit batchbuffers directly to hardware, relying 181 * on the security mechanisms provided by hardware. ··· 230 #define I915_PARAM_IRQ_ACTIVE 1 231 #define I915_PARAM_ALLOW_BATCHBUFFER 2 232 #define I915_PARAM_LAST_DISPATCH 3 233 234 typedef struct drm_i915_getparam { 235 int param; ··· 298 typedef struct drm_i915_hws_addr { 299 uint64_t addr; 300 } drm_i915_hws_addr_t; 301 302 #endif /* _I915_DRM_H_ */
··· 143 #define DRM_I915_GET_VBLANK_PIPE 0x0e 144 #define DRM_I915_VBLANK_SWAP 0x0f 145 #define DRM_I915_HWS_ADDR 0x11 146 + #define DRM_I915_GEM_INIT 0x13 147 + #define DRM_I915_GEM_EXECBUFFER 0x14 148 + #define DRM_I915_GEM_PIN 0x15 149 + #define DRM_I915_GEM_UNPIN 0x16 150 + #define DRM_I915_GEM_BUSY 0x17 151 + #define DRM_I915_GEM_THROTTLE 0x18 152 + #define DRM_I915_GEM_ENTERVT 0x19 153 + #define DRM_I915_GEM_LEAVEVT 0x1a 154 + #define DRM_I915_GEM_CREATE 0x1b 155 + #define DRM_I915_GEM_PREAD 0x1c 156 + #define DRM_I915_GEM_PWRITE 0x1d 157 + #define DRM_I915_GEM_MMAP 0x1e 158 + #define DRM_I915_GEM_SET_DOMAIN 0x1f 159 + #define DRM_I915_GEM_SW_FINISH 0x20 160 + #define DRM_I915_GEM_SET_TILING 0x21 161 + #define DRM_I915_GEM_GET_TILING 0x22 162 163 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 164 #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) ··· 160 #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 161 #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 162 #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) 163 + #define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) 164 + #define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) 165 + #define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) 166 + #define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE) 167 + #define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT) 168 + #define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT) 169 + #define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create) 170 + #define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread) 171 + #define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite) 172 + #define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap) 173 + #define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain) 174 + #define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish) 175 + #define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling) 176 + #define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) 177 178 /* Allow drivers to submit batchbuffers directly to hardware, relying 179 * on the security mechanisms provided by hardware. ··· 200 #define I915_PARAM_IRQ_ACTIVE 1 201 #define I915_PARAM_ALLOW_BATCHBUFFER 2 202 #define I915_PARAM_LAST_DISPATCH 3 203 + #define I915_PARAM_CHIPSET_ID 4 204 + #define I915_PARAM_HAS_GEM 5 205 206 typedef struct drm_i915_getparam { 207 int param; ··· 266 typedef struct drm_i915_hws_addr { 267 uint64_t addr; 268 } drm_i915_hws_addr_t; 269 + 270 + struct drm_i915_gem_init { 271 + /** 272 + * Beginning offset in the GTT to be managed by the DRM memory 273 + * manager. 274 + */ 275 + uint64_t gtt_start; 276 + /** 277 + * Ending offset in the GTT to be managed by the DRM memory 278 + * manager. 279 + */ 280 + uint64_t gtt_end; 281 + }; 282 + 283 + struct drm_i915_gem_create { 284 + /** 285 + * Requested size for the object. 286 + * 287 + * The (page-aligned) allocated size for the object will be returned. 288 + */ 289 + uint64_t size; 290 + /** 291 + * Returned handle for the object. 292 + * 293 + * Object handles are nonzero. 294 + */ 295 + uint32_t handle; 296 + uint32_t pad; 297 + }; 298 + 299 + struct drm_i915_gem_pread { 300 + /** Handle for the object being read. */ 301 + uint32_t handle; 302 + uint32_t pad; 303 + /** Offset into the object to read from */ 304 + uint64_t offset; 305 + /** Length of data to read */ 306 + uint64_t size; 307 + /** 308 + * Pointer to write the data into. 309 + * 310 + * This is a fixed-size type for 32/64 compatibility. 311 + */ 312 + uint64_t data_ptr; 313 + }; 314 + 315 + struct drm_i915_gem_pwrite { 316 + /** Handle for the object being written to. */ 317 + uint32_t handle; 318 + uint32_t pad; 319 + /** Offset into the object to write to */ 320 + uint64_t offset; 321 + /** Length of data to write */ 322 + uint64_t size; 323 + /** 324 + * Pointer to read the data from. 325 + * 326 + * This is a fixed-size type for 32/64 compatibility. 327 + */ 328 + uint64_t data_ptr; 329 + }; 330 + 331 + struct drm_i915_gem_mmap { 332 + /** Handle for the object being mapped. */ 333 + uint32_t handle; 334 + uint32_t pad; 335 + /** Offset in the object to map. */ 336 + uint64_t offset; 337 + /** 338 + * Length of data to map. 339 + * 340 + * The value will be page-aligned. 341 + */ 342 + uint64_t size; 343 + /** 344 + * Returned pointer the data was mapped at. 345 + * 346 + * This is a fixed-size type for 32/64 compatibility. 347 + */ 348 + uint64_t addr_ptr; 349 + }; 350 + 351 + struct drm_i915_gem_set_domain { 352 + /** Handle for the object */ 353 + uint32_t handle; 354 + 355 + /** New read domains */ 356 + uint32_t read_domains; 357 + 358 + /** New write domain */ 359 + uint32_t write_domain; 360 + }; 361 + 362 + struct drm_i915_gem_sw_finish { 363 + /** Handle for the object */ 364 + uint32_t handle; 365 + }; 366 + 367 + struct drm_i915_gem_relocation_entry { 368 + /** 369 + * Handle of the buffer being pointed to by this relocation entry. 370 + * 371 + * It's appealing to make this be an index into the mm_validate_entry 372 + * list to refer to the buffer, but this allows the driver to create 373 + * a relocation list for state buffers and not re-write it per 374 + * exec using the buffer. 375 + */ 376 + uint32_t target_handle; 377 + 378 + /** 379 + * Value to be added to the offset of the target buffer to make up 380 + * the relocation entry. 381 + */ 382 + uint32_t delta; 383 + 384 + /** Offset in the buffer the relocation entry will be written into */ 385 + uint64_t offset; 386 + 387 + /** 388 + * Offset value of the target buffer that the relocation entry was last 389 + * written as. 390 + * 391 + * If the buffer has the same offset as last time, we can skip syncing 392 + * and writing the relocation. This value is written back out by 393 + * the execbuffer ioctl when the relocation is written. 394 + */ 395 + uint64_t presumed_offset; 396 + 397 + /** 398 + * Target memory domains read by this operation. 399 + */ 400 + uint32_t read_domains; 401 + 402 + /** 403 + * Target memory domains written by this operation. 404 + * 405 + * Note that only one domain may be written by the whole 406 + * execbuffer operation, so that where there are conflicts, 407 + * the application will get -EINVAL back. 408 + */ 409 + uint32_t write_domain; 410 + }; 411 + 412 + /** @{ 413 + * Intel memory domains 414 + * 415 + * Most of these just align with the various caches in 416 + * the system and are used to flush and invalidate as 417 + * objects end up cached in different domains. 418 + */ 419 + /** CPU cache */ 420 + #define I915_GEM_DOMAIN_CPU 0x00000001 421 + /** Render cache, used by 2D and 3D drawing */ 422 + #define I915_GEM_DOMAIN_RENDER 0x00000002 423 + /** Sampler cache, used by texture engine */ 424 + #define I915_GEM_DOMAIN_SAMPLER 0x00000004 425 + /** Command queue, used to load batch buffers */ 426 + #define I915_GEM_DOMAIN_COMMAND 0x00000008 427 + /** Instruction cache, used by shader programs */ 428 + #define I915_GEM_DOMAIN_INSTRUCTION 0x00000010 429 + /** Vertex address cache */ 430 + #define I915_GEM_DOMAIN_VERTEX 0x00000020 431 + /** GTT domain - aperture and scanout */ 432 + #define I915_GEM_DOMAIN_GTT 0x00000040 433 + /** @} */ 434 + 435 + struct drm_i915_gem_exec_object { 436 + /** 437 + * User's handle for a buffer to be bound into the GTT for this 438 + * operation. 439 + */ 440 + uint32_t handle; 441 + 442 + /** Number of relocations to be performed on this buffer */ 443 + uint32_t relocation_count; 444 + /** 445 + * Pointer to array of struct drm_i915_gem_relocation_entry containing 446 + * the relocations to be performed in this buffer. 447 + */ 448 + uint64_t relocs_ptr; 449 + 450 + /** Required alignment in graphics aperture */ 451 + uint64_t alignment; 452 + 453 + /** 454 + * Returned value of the updated offset of the object, for future 455 + * presumed_offset writes. 456 + */ 457 + uint64_t offset; 458 + }; 459 + 460 + struct drm_i915_gem_execbuffer { 461 + /** 462 + * List of buffers to be validated with their relocations to be 463 + * performend on them. 464 + * 465 + * This is a pointer to an array of struct drm_i915_gem_validate_entry. 466 + * 467 + * These buffers must be listed in an order such that all relocations 468 + * a buffer is performing refer to buffers that have already appeared 469 + * in the validate list. 470 + */ 471 + uint64_t buffers_ptr; 472 + uint32_t buffer_count; 473 + 474 + /** Offset in the batchbuffer to start execution from. */ 475 + uint32_t batch_start_offset; 476 + /** Bytes used in batchbuffer from batch_start_offset */ 477 + uint32_t batch_len; 478 + uint32_t DR1; 479 + uint32_t DR4; 480 + uint32_t num_cliprects; 481 + /** This is a struct drm_clip_rect *cliprects */ 482 + uint64_t cliprects_ptr; 483 + }; 484 + 485 + struct drm_i915_gem_pin { 486 + /** Handle of the buffer to be pinned. */ 487 + uint32_t handle; 488 + uint32_t pad; 489 + 490 + /** alignment required within the aperture */ 491 + uint64_t alignment; 492 + 493 + /** Returned GTT offset of the buffer. */ 494 + uint64_t offset; 495 + }; 496 + 497 + struct drm_i915_gem_unpin { 498 + /** Handle of the buffer to be unpinned. */ 499 + uint32_t handle; 500 + uint32_t pad; 501 + }; 502 + 503 + struct drm_i915_gem_busy { 504 + /** Handle of the buffer to check for busy */ 505 + uint32_t handle; 506 + 507 + /** Return busy status (1 if busy, 0 if idle) */ 508 + uint32_t busy; 509 + }; 510 + 511 + #define I915_TILING_NONE 0 512 + #define I915_TILING_X 1 513 + #define I915_TILING_Y 2 514 + 515 + #define I915_BIT_6_SWIZZLE_NONE 0 516 + #define I915_BIT_6_SWIZZLE_9 1 517 + #define I915_BIT_6_SWIZZLE_9_10 2 518 + #define I915_BIT_6_SWIZZLE_9_11 3 519 + #define I915_BIT_6_SWIZZLE_9_10_11 4 520 + /* Not seen by userland */ 521 + #define I915_BIT_6_SWIZZLE_UNKNOWN 5 522 + 523 + struct drm_i915_gem_set_tiling { 524 + /** Handle of the buffer to have its tiling state updated */ 525 + uint32_t handle; 526 + 527 + /** 528 + * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X, 529 + * I915_TILING_Y). 530 + * 531 + * This value is to be set on request, and will be updated by the 532 + * kernel on successful return with the actual chosen tiling layout. 533 + * 534 + * The tiling mode may be demoted to I915_TILING_NONE when the system 535 + * has bit 6 swizzling that can't be managed correctly by GEM. 536 + * 537 + * Buffer contents become undefined when changing tiling_mode. 538 + */ 539 + uint32_t tiling_mode; 540 + 541 + /** 542 + * Stride in bytes for the object when in I915_TILING_X or 543 + * I915_TILING_Y. 544 + */ 545 + uint32_t stride; 546 + 547 + /** 548 + * Returned address bit 6 swizzling required for CPU access through 549 + * mmap mapping. 550 + */ 551 + uint32_t swizzle_mode; 552 + }; 553 + 554 + struct drm_i915_gem_get_tiling { 555 + /** Handle of the buffer to get tiling state for. */ 556 + uint32_t handle; 557 + 558 + /** 559 + * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X, 560 + * I915_TILING_Y). 561 + */ 562 + uint32_t tiling_mode; 563 + 564 + /** 565 + * Returned address bit 6 swizzling required for CPU access through 566 + * mmap mapping. 567 + */ 568 + uint32_t swizzle_mode; 569 + }; 570 571 #endif /* _I915_DRM_H_ */
+1
mm/shmem.c
··· 2580 shmem_unacct_size(flags, size); 2581 return ERR_PTR(error); 2582 } 2583 2584 /** 2585 * shmem_zero_setup - setup a shared anonymous mapping
··· 2580 shmem_unacct_size(flags, size); 2581 return ERR_PTR(error); 2582 } 2583 + EXPORT_SYMBOL_GPL(shmem_file_setup); 2584 2585 /** 2586 * shmem_zero_setup - setup a shared anonymous mapping