Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

drm: Add GEM ("graphics execution manager") to i915 driver.

GEM allows the creation of persistent buffer objects accessible by the
graphics device through new ioctls for managing execution of commands on the
device. The userland API is almost entirely driver-specific to ensure that
any driver building on this model can easily map the interface to individual
driver requirements.

GEM is used by the 2d driver for managing its internal state allocations and
will be used for pixmap storage to reduce memory consumption and enable
zero-copy GLX_EXT_texture_from_pixmap, and in the 3d driver is used to enable
GL_EXT_framebuffer_object and GL_ARB_pixel_buffer_object.

Signed-off-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Dave Airlie <airlied@redhat.com>

authored by

Eric Anholt and committed by
Dave Airlie
673a394b d1d8c925

+4832 -62
+3 -2
drivers/gpu/drm/Makefile
··· 4 4 5 5 ccflags-y := -Iinclude/drm 6 6 7 - drm-y := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \ 8 - drm_drv.o drm_fops.o drm_ioctl.o drm_irq.o \ 7 + drm-y := drm_auth.o drm_bufs.o drm_cache.o \ 8 + drm_context.o drm_dma.o drm_drawable.o \ 9 + drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \ 9 10 drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ 10 11 drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ 11 12 drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o
+50 -1
drivers/gpu/drm/drm_agpsupport.c
··· 33 33 34 34 #include "drmP.h" 35 35 #include <linux/module.h> 36 + #include <asm/agp.h> 36 37 37 38 #if __OS_HAS_AGP 38 39 ··· 453 452 return agp_unbind_memory(handle); 454 453 } 455 454 456 - #endif /* __OS_HAS_AGP */ 455 + /** 456 + * Binds a collection of pages into AGP memory at the given offset, returning 457 + * the AGP memory structure containing them. 458 + * 459 + * No reference is held on the pages during this time -- it is up to the 460 + * caller to handle that. 461 + */ 462 + DRM_AGP_MEM * 463 + drm_agp_bind_pages(struct drm_device *dev, 464 + struct page **pages, 465 + unsigned long num_pages, 466 + uint32_t gtt_offset) 467 + { 468 + DRM_AGP_MEM *mem; 469 + int ret, i; 470 + 471 + DRM_DEBUG("\n"); 472 + 473 + mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages, 474 + AGP_USER_MEMORY); 475 + if (mem == NULL) { 476 + DRM_ERROR("Failed to allocate memory for %ld pages\n", 477 + num_pages); 478 + return NULL; 479 + } 480 + 481 + for (i = 0; i < num_pages; i++) 482 + mem->memory[i] = phys_to_gart(page_to_phys(pages[i])); 483 + mem->page_count = num_pages; 484 + 485 + mem->is_flushed = true; 486 + ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE); 487 + if (ret != 0) { 488 + DRM_ERROR("Failed to bind AGP memory: %d\n", ret); 489 + agp_free_memory(mem); 490 + return NULL; 491 + } 492 + 493 + return mem; 494 + } 495 + EXPORT_SYMBOL(drm_agp_bind_pages); 496 + 497 + void drm_agp_chipset_flush(struct drm_device *dev) 498 + { 499 + agp_flush_chipset(dev->agp->bridge); 500 + } 501 + EXPORT_SYMBOL(drm_agp_chipset_flush); 502 + 503 + #endif /* __OS_HAS_AGP */
+76
drivers/gpu/drm/drm_cache.c
··· 1 + /************************************************************************** 2 + * 3 + * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA 4 + * All Rights Reserved. 5 + * 6 + * Permission is hereby granted, free of charge, to any person obtaining a 7 + * copy of this software and associated documentation files (the 8 + * "Software"), to deal in the Software without restriction, including 9 + * without limitation the rights to use, copy, modify, merge, publish, 10 + * distribute, sub license, and/or sell copies of the Software, and to 11 + * permit persons to whom the Software is furnished to do so, subject to 12 + * the following conditions: 13 + * 14 + * The above copyright notice and this permission notice (including the 15 + * next paragraph) shall be included in all copies or substantial portions 16 + * of the Software. 17 + * 18 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 + * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 + * 26 + **************************************************************************/ 27 + /* 28 + * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com> 29 + */ 30 + 31 + #include "drmP.h" 32 + 33 + #if defined(CONFIG_X86) 34 + static void 35 + drm_clflush_page(struct page *page) 36 + { 37 + uint8_t *page_virtual; 38 + unsigned int i; 39 + 40 + if (unlikely(page == NULL)) 41 + return; 42 + 43 + page_virtual = kmap_atomic(page, KM_USER0); 44 + for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) 45 + clflush(page_virtual + i); 46 + kunmap_atomic(page_virtual, KM_USER0); 47 + } 48 + #endif 49 + 50 + static void 51 + drm_clflush_ipi_handler(void *null) 52 + { 53 + wbinvd(); 54 + } 55 + 56 + void 57 + drm_clflush_pages(struct page *pages[], unsigned long num_pages) 58 + { 59 + 60 + #if defined(CONFIG_X86) 61 + if (cpu_has_clflush) { 62 + unsigned long i; 63 + 64 + mb(); 65 + for (i = 0; i < num_pages; ++i) 66 + drm_clflush_page(*pages++); 67 + mb(); 68 + 69 + return; 70 + } 71 + #endif 72 + 73 + if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0) 74 + DRM_ERROR("Timed out waiting for cache flush.\n"); 75 + } 76 + EXPORT_SYMBOL(drm_clflush_pages);
+4
drivers/gpu/drm/drm_drv.c
··· 119 119 DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0), 120 120 121 121 DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 122 + 123 + DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0), 124 + DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH), 125 + DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH), 122 126 }; 123 127 124 128 #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls )
+6
drivers/gpu/drm/drm_fops.c
··· 256 256 257 257 INIT_LIST_HEAD(&priv->lhead); 258 258 259 + if (dev->driver->driver_features & DRIVER_GEM) 260 + drm_gem_open(dev, priv); 261 + 259 262 if (dev->driver->open) { 260 263 ret = dev->driver->open(dev, priv); 261 264 if (ret < 0) ··· 402 399 !dev->driver->reclaim_buffers_locked) { 403 400 dev->driver->reclaim_buffers(dev, file_priv); 404 401 } 402 + 403 + if (dev->driver->driver_features & DRIVER_GEM) 404 + drm_gem_release(dev, file_priv); 405 405 406 406 drm_fasync(-1, filp, 0); 407 407
+420
drivers/gpu/drm/drm_gem.c
··· 1 + /* 2 + * Copyright © 2008 Intel Corporation 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice (including the next 12 + * paragraph) shall be included in all copies or substantial portions of the 13 + * Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 + * IN THE SOFTWARE. 22 + * 23 + * Authors: 24 + * Eric Anholt <eric@anholt.net> 25 + * 26 + */ 27 + 28 + #include <linux/types.h> 29 + #include <linux/slab.h> 30 + #include <linux/mm.h> 31 + #include <linux/uaccess.h> 32 + #include <linux/fs.h> 33 + #include <linux/file.h> 34 + #include <linux/module.h> 35 + #include <linux/mman.h> 36 + #include <linux/pagemap.h> 37 + #include "drmP.h" 38 + 39 + /** @file drm_gem.c 40 + * 41 + * This file provides some of the base ioctls and library routines for 42 + * the graphics memory manager implemented by each device driver. 43 + * 44 + * Because various devices have different requirements in terms of 45 + * synchronization and migration strategies, implementing that is left up to 46 + * the driver, and all that the general API provides should be generic -- 47 + * allocating objects, reading/writing data with the cpu, freeing objects. 48 + * Even there, platform-dependent optimizations for reading/writing data with 49 + * the CPU mean we'll likely hook those out to driver-specific calls. However, 50 + * the DRI2 implementation wants to have at least allocate/mmap be generic. 51 + * 52 + * The goal was to have swap-backed object allocation managed through 53 + * struct file. However, file descriptors as handles to a struct file have 54 + * two major failings: 55 + * - Process limits prevent more than 1024 or so being used at a time by 56 + * default. 57 + * - Inability to allocate high fds will aggravate the X Server's select() 58 + * handling, and likely that of many GL client applications as well. 59 + * 60 + * This led to a plan of using our own integer IDs (called handles, following 61 + * DRM terminology) to mimic fds, and implement the fd syscalls we need as 62 + * ioctls. The objects themselves will still include the struct file so 63 + * that we can transition to fds if the required kernel infrastructure shows 64 + * up at a later date, and as our interface with shmfs for memory allocation. 65 + */ 66 + 67 + /** 68 + * Initialize the GEM device fields 69 + */ 70 + 71 + int 72 + drm_gem_init(struct drm_device *dev) 73 + { 74 + spin_lock_init(&dev->object_name_lock); 75 + idr_init(&dev->object_name_idr); 76 + atomic_set(&dev->object_count, 0); 77 + atomic_set(&dev->object_memory, 0); 78 + atomic_set(&dev->pin_count, 0); 79 + atomic_set(&dev->pin_memory, 0); 80 + atomic_set(&dev->gtt_count, 0); 81 + atomic_set(&dev->gtt_memory, 0); 82 + return 0; 83 + } 84 + 85 + /** 86 + * Allocate a GEM object of the specified size with shmfs backing store 87 + */ 88 + struct drm_gem_object * 89 + drm_gem_object_alloc(struct drm_device *dev, size_t size) 90 + { 91 + struct drm_gem_object *obj; 92 + 93 + BUG_ON((size & (PAGE_SIZE - 1)) != 0); 94 + 95 + obj = kcalloc(1, sizeof(*obj), GFP_KERNEL); 96 + 97 + obj->dev = dev; 98 + obj->filp = shmem_file_setup("drm mm object", size, 0); 99 + if (IS_ERR(obj->filp)) { 100 + kfree(obj); 101 + return NULL; 102 + } 103 + 104 + kref_init(&obj->refcount); 105 + kref_init(&obj->handlecount); 106 + obj->size = size; 107 + if (dev->driver->gem_init_object != NULL && 108 + dev->driver->gem_init_object(obj) != 0) { 109 + fput(obj->filp); 110 + kfree(obj); 111 + return NULL; 112 + } 113 + atomic_inc(&dev->object_count); 114 + atomic_add(obj->size, &dev->object_memory); 115 + return obj; 116 + } 117 + EXPORT_SYMBOL(drm_gem_object_alloc); 118 + 119 + /** 120 + * Removes the mapping from handle to filp for this object. 121 + */ 122 + static int 123 + drm_gem_handle_delete(struct drm_file *filp, int handle) 124 + { 125 + struct drm_device *dev; 126 + struct drm_gem_object *obj; 127 + 128 + /* This is gross. The idr system doesn't let us try a delete and 129 + * return an error code. It just spews if you fail at deleting. 130 + * So, we have to grab a lock around finding the object and then 131 + * doing the delete on it and dropping the refcount, or the user 132 + * could race us to double-decrement the refcount and cause a 133 + * use-after-free later. Given the frequency of our handle lookups, 134 + * we may want to use ida for number allocation and a hash table 135 + * for the pointers, anyway. 136 + */ 137 + spin_lock(&filp->table_lock); 138 + 139 + /* Check if we currently have a reference on the object */ 140 + obj = idr_find(&filp->object_idr, handle); 141 + if (obj == NULL) { 142 + spin_unlock(&filp->table_lock); 143 + return -EINVAL; 144 + } 145 + dev = obj->dev; 146 + 147 + /* Release reference and decrement refcount. */ 148 + idr_remove(&filp->object_idr, handle); 149 + spin_unlock(&filp->table_lock); 150 + 151 + mutex_lock(&dev->struct_mutex); 152 + drm_gem_object_handle_unreference(obj); 153 + mutex_unlock(&dev->struct_mutex); 154 + 155 + return 0; 156 + } 157 + 158 + /** 159 + * Create a handle for this object. This adds a handle reference 160 + * to the object, which includes a regular reference count. Callers 161 + * will likely want to dereference the object afterwards. 162 + */ 163 + int 164 + drm_gem_handle_create(struct drm_file *file_priv, 165 + struct drm_gem_object *obj, 166 + int *handlep) 167 + { 168 + int ret; 169 + 170 + /* 171 + * Get the user-visible handle using idr. 172 + */ 173 + again: 174 + /* ensure there is space available to allocate a handle */ 175 + if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0) 176 + return -ENOMEM; 177 + 178 + /* do the allocation under our spinlock */ 179 + spin_lock(&file_priv->table_lock); 180 + ret = idr_get_new_above(&file_priv->object_idr, obj, 1, handlep); 181 + spin_unlock(&file_priv->table_lock); 182 + if (ret == -EAGAIN) 183 + goto again; 184 + 185 + if (ret != 0) 186 + return ret; 187 + 188 + drm_gem_object_handle_reference(obj); 189 + return 0; 190 + } 191 + EXPORT_SYMBOL(drm_gem_handle_create); 192 + 193 + /** Returns a reference to the object named by the handle. */ 194 + struct drm_gem_object * 195 + drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, 196 + int handle) 197 + { 198 + struct drm_gem_object *obj; 199 + 200 + spin_lock(&filp->table_lock); 201 + 202 + /* Check if we currently have a reference on the object */ 203 + obj = idr_find(&filp->object_idr, handle); 204 + if (obj == NULL) { 205 + spin_unlock(&filp->table_lock); 206 + return NULL; 207 + } 208 + 209 + drm_gem_object_reference(obj); 210 + 211 + spin_unlock(&filp->table_lock); 212 + 213 + return obj; 214 + } 215 + EXPORT_SYMBOL(drm_gem_object_lookup); 216 + 217 + /** 218 + * Releases the handle to an mm object. 219 + */ 220 + int 221 + drm_gem_close_ioctl(struct drm_device *dev, void *data, 222 + struct drm_file *file_priv) 223 + { 224 + struct drm_gem_close *args = data; 225 + int ret; 226 + 227 + if (!(dev->driver->driver_features & DRIVER_GEM)) 228 + return -ENODEV; 229 + 230 + ret = drm_gem_handle_delete(file_priv, args->handle); 231 + 232 + return ret; 233 + } 234 + 235 + /** 236 + * Create a global name for an object, returning the name. 237 + * 238 + * Note that the name does not hold a reference; when the object 239 + * is freed, the name goes away. 240 + */ 241 + int 242 + drm_gem_flink_ioctl(struct drm_device *dev, void *data, 243 + struct drm_file *file_priv) 244 + { 245 + struct drm_gem_flink *args = data; 246 + struct drm_gem_object *obj; 247 + int ret; 248 + 249 + if (!(dev->driver->driver_features & DRIVER_GEM)) 250 + return -ENODEV; 251 + 252 + obj = drm_gem_object_lookup(dev, file_priv, args->handle); 253 + if (obj == NULL) 254 + return -EINVAL; 255 + 256 + again: 257 + if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) 258 + return -ENOMEM; 259 + 260 + spin_lock(&dev->object_name_lock); 261 + if (obj->name) { 262 + spin_unlock(&dev->object_name_lock); 263 + return -EEXIST; 264 + } 265 + ret = idr_get_new_above(&dev->object_name_idr, obj, 1, 266 + &obj->name); 267 + spin_unlock(&dev->object_name_lock); 268 + if (ret == -EAGAIN) 269 + goto again; 270 + 271 + if (ret != 0) { 272 + mutex_lock(&dev->struct_mutex); 273 + drm_gem_object_unreference(obj); 274 + mutex_unlock(&dev->struct_mutex); 275 + return ret; 276 + } 277 + 278 + /* 279 + * Leave the reference from the lookup around as the 280 + * name table now holds one 281 + */ 282 + args->name = (uint64_t) obj->name; 283 + 284 + return 0; 285 + } 286 + 287 + /** 288 + * Open an object using the global name, returning a handle and the size. 289 + * 290 + * This handle (of course) holds a reference to the object, so the object 291 + * will not go away until the handle is deleted. 292 + */ 293 + int 294 + drm_gem_open_ioctl(struct drm_device *dev, void *data, 295 + struct drm_file *file_priv) 296 + { 297 + struct drm_gem_open *args = data; 298 + struct drm_gem_object *obj; 299 + int ret; 300 + int handle; 301 + 302 + if (!(dev->driver->driver_features & DRIVER_GEM)) 303 + return -ENODEV; 304 + 305 + spin_lock(&dev->object_name_lock); 306 + obj = idr_find(&dev->object_name_idr, (int) args->name); 307 + if (obj) 308 + drm_gem_object_reference(obj); 309 + spin_unlock(&dev->object_name_lock); 310 + if (!obj) 311 + return -ENOENT; 312 + 313 + ret = drm_gem_handle_create(file_priv, obj, &handle); 314 + mutex_lock(&dev->struct_mutex); 315 + drm_gem_object_unreference(obj); 316 + mutex_unlock(&dev->struct_mutex); 317 + if (ret) 318 + return ret; 319 + 320 + args->handle = handle; 321 + args->size = obj->size; 322 + 323 + return 0; 324 + } 325 + 326 + /** 327 + * Called at device open time, sets up the structure for handling refcounting 328 + * of mm objects. 329 + */ 330 + void 331 + drm_gem_open(struct drm_device *dev, struct drm_file *file_private) 332 + { 333 + idr_init(&file_private->object_idr); 334 + spin_lock_init(&file_private->table_lock); 335 + } 336 + 337 + /** 338 + * Called at device close to release the file's 339 + * handle references on objects. 340 + */ 341 + static int 342 + drm_gem_object_release_handle(int id, void *ptr, void *data) 343 + { 344 + struct drm_gem_object *obj = ptr; 345 + 346 + drm_gem_object_handle_unreference(obj); 347 + 348 + return 0; 349 + } 350 + 351 + /** 352 + * Called at close time when the filp is going away. 353 + * 354 + * Releases any remaining references on objects by this filp. 355 + */ 356 + void 357 + drm_gem_release(struct drm_device *dev, struct drm_file *file_private) 358 + { 359 + mutex_lock(&dev->struct_mutex); 360 + idr_for_each(&file_private->object_idr, 361 + &drm_gem_object_release_handle, NULL); 362 + 363 + idr_destroy(&file_private->object_idr); 364 + mutex_unlock(&dev->struct_mutex); 365 + } 366 + 367 + /** 368 + * Called after the last reference to the object has been lost. 369 + * 370 + * Frees the object 371 + */ 372 + void 373 + drm_gem_object_free(struct kref *kref) 374 + { 375 + struct drm_gem_object *obj = (struct drm_gem_object *) kref; 376 + struct drm_device *dev = obj->dev; 377 + 378 + BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 379 + 380 + if (dev->driver->gem_free_object != NULL) 381 + dev->driver->gem_free_object(obj); 382 + 383 + fput(obj->filp); 384 + atomic_dec(&dev->object_count); 385 + atomic_sub(obj->size, &dev->object_memory); 386 + kfree(obj); 387 + } 388 + EXPORT_SYMBOL(drm_gem_object_free); 389 + 390 + /** 391 + * Called after the last handle to the object has been closed 392 + * 393 + * Removes any name for the object. Note that this must be 394 + * called before drm_gem_object_free or we'll be touching 395 + * freed memory 396 + */ 397 + void 398 + drm_gem_object_handle_free(struct kref *kref) 399 + { 400 + struct drm_gem_object *obj = container_of(kref, 401 + struct drm_gem_object, 402 + handlecount); 403 + struct drm_device *dev = obj->dev; 404 + 405 + /* Remove any name for this object */ 406 + spin_lock(&dev->object_name_lock); 407 + if (obj->name) { 408 + idr_remove(&dev->object_name_idr, obj->name); 409 + spin_unlock(&dev->object_name_lock); 410 + /* 411 + * The object name held a reference to this object, drop 412 + * that now. 413 + */ 414 + drm_gem_object_unreference(obj); 415 + } else 416 + spin_unlock(&dev->object_name_lock); 417 + 418 + } 419 + EXPORT_SYMBOL(drm_gem_object_handle_free); 420 +
+2
drivers/gpu/drm/drm_memory.c
··· 133 133 { 134 134 return drm_agp_free_memory(handle) ? 0 : -EINVAL; 135 135 } 136 + EXPORT_SYMBOL(drm_free_agp); 136 137 137 138 /** Wrapper around agp_bind_memory() */ 138 139 int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start) ··· 146 145 { 147 146 return drm_agp_unbind_memory(handle); 148 147 } 148 + EXPORT_SYMBOL(drm_unbind_agp); 149 149 150 150 #else /* __OS_HAS_AGP */ 151 151 static inline void *agp_remap(unsigned long offset, unsigned long size,
+4 -1
drivers/gpu/drm/drm_mm.c
··· 169 169 170 170 return child; 171 171 } 172 + EXPORT_SYMBOL(drm_mm_get_block); 172 173 173 174 /* 174 175 * Put a block. Merge with the previous and / or next block if they are free. ··· 218 217 drm_free(cur, sizeof(*cur), DRM_MEM_MM); 219 218 } 220 219 } 220 + EXPORT_SYMBOL(drm_mm_put_block); 221 221 222 222 struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm, 223 223 unsigned long size, ··· 267 265 268 266 return (head->next->next == head); 269 267 } 268 + EXPORT_SYMBOL(drm_mm_search_free); 270 269 271 270 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) 272 271 { ··· 276 273 277 274 return drm_mm_create_tail_node(mm, start, size); 278 275 } 279 - 276 + EXPORT_SYMBOL(drm_mm_init); 280 277 281 278 void drm_mm_takedown(struct drm_mm * mm) 282 279 {
+122 -13
drivers/gpu/drm/drm_proc.c
··· 49 49 int request, int *eof, void *data); 50 50 static int drm_bufs_info(char *buf, char **start, off_t offset, 51 51 int request, int *eof, void *data); 52 + static int drm_gem_name_info(char *buf, char **start, off_t offset, 53 + int request, int *eof, void *data); 54 + static int drm_gem_object_info(char *buf, char **start, off_t offset, 55 + int request, int *eof, void *data); 52 56 #if DRM_DEBUG_CODE 53 57 static int drm_vma_info(char *buf, char **start, off_t offset, 54 58 int request, int *eof, void *data); ··· 64 60 static struct drm_proc_list { 65 61 const char *name; /**< file name */ 66 62 int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/ 63 + u32 driver_features; /**< Required driver features for this entry */ 67 64 } drm_proc_list[] = { 68 - {"name", drm_name_info}, 69 - {"mem", drm_mem_info}, 70 - {"vm", drm_vm_info}, 71 - {"clients", drm_clients_info}, 72 - {"queues", drm_queues_info}, 73 - {"bufs", drm_bufs_info}, 65 + {"name", drm_name_info, 0}, 66 + {"mem", drm_mem_info, 0}, 67 + {"vm", drm_vm_info, 0}, 68 + {"clients", drm_clients_info, 0}, 69 + {"queues", drm_queues_info, 0}, 70 + {"bufs", drm_bufs_info, 0}, 71 + {"gem_names", drm_gem_name_info, DRIVER_GEM}, 72 + {"gem_objects", drm_gem_object_info, DRIVER_GEM}, 74 73 #if DRM_DEBUG_CODE 75 74 {"vma", drm_vma_info}, 76 75 #endif ··· 97 90 int drm_proc_init(struct drm_minor *minor, int minor_id, 98 91 struct proc_dir_entry *root) 99 92 { 93 + struct drm_device *dev = minor->dev; 100 94 struct proc_dir_entry *ent; 101 - int i, j; 95 + int i, j, ret; 102 96 char name[64]; 103 97 104 98 sprintf(name, "%d", minor_id); ··· 110 102 } 111 103 112 104 for (i = 0; i < DRM_PROC_ENTRIES; i++) { 105 + u32 features = drm_proc_list[i].driver_features; 106 + 107 + if (features != 0 && 108 + (dev->driver->driver_features & features) != features) 109 + continue; 110 + 113 111 ent = create_proc_entry(drm_proc_list[i].name, 114 112 S_IFREG | S_IRUGO, minor->dev_root); 115 113 if (!ent) { 116 114 DRM_ERROR("Cannot create /proc/dri/%s/%s\n", 117 115 name, drm_proc_list[i].name); 118 - for (j = 0; j < i; j++) 119 - remove_proc_entry(drm_proc_list[i].name, 120 - minor->dev_root); 121 - remove_proc_entry(name, root); 122 - minor->dev_root = NULL; 123 - return -1; 116 + ret = -1; 117 + goto fail; 124 118 } 125 119 ent->read_proc = drm_proc_list[i].f; 126 120 ent->data = minor; 127 121 } 128 122 123 + if (dev->driver->proc_init) { 124 + ret = dev->driver->proc_init(minor); 125 + if (ret) { 126 + DRM_ERROR("DRM: Driver failed to initialize " 127 + "/proc/dri.\n"); 128 + goto fail; 129 + } 130 + } 131 + 129 132 return 0; 133 + fail: 134 + 135 + for (j = 0; j < i; j++) 136 + remove_proc_entry(drm_proc_list[i].name, 137 + minor->dev_root); 138 + remove_proc_entry(name, root); 139 + minor->dev_root = NULL; 140 + return ret; 130 141 } 131 142 132 143 /** ··· 160 133 */ 161 134 int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root) 162 135 { 136 + struct drm_device *dev = minor->dev; 163 137 int i; 164 138 char name[64]; 165 139 166 140 if (!root || !minor->dev_root) 167 141 return 0; 142 + 143 + if (dev->driver->proc_cleanup) 144 + dev->driver->proc_cleanup(minor); 168 145 169 146 for (i = 0; i < DRM_PROC_ENTRIES; i++) 170 147 remove_proc_entry(drm_proc_list[i].name, minor->dev_root); ··· 509 478 ret = drm__clients_info(buf, start, offset, request, eof, data); 510 479 mutex_unlock(&dev->struct_mutex); 511 480 return ret; 481 + } 482 + 483 + struct drm_gem_name_info_data { 484 + int len; 485 + char *buf; 486 + int eof; 487 + }; 488 + 489 + static int drm_gem_one_name_info(int id, void *ptr, void *data) 490 + { 491 + struct drm_gem_object *obj = ptr; 492 + struct drm_gem_name_info_data *nid = data; 493 + 494 + DRM_INFO("name %d size %d\n", obj->name, obj->size); 495 + if (nid->eof) 496 + return 0; 497 + 498 + nid->len += sprintf(&nid->buf[nid->len], 499 + "%6d%9d%8d%9d\n", 500 + obj->name, obj->size, 501 + atomic_read(&obj->handlecount.refcount), 502 + atomic_read(&obj->refcount.refcount)); 503 + if (nid->len > DRM_PROC_LIMIT) { 504 + nid->eof = 1; 505 + return 0; 506 + } 507 + return 0; 508 + } 509 + 510 + static int drm_gem_name_info(char *buf, char **start, off_t offset, 511 + int request, int *eof, void *data) 512 + { 513 + struct drm_minor *minor = (struct drm_minor *) data; 514 + struct drm_device *dev = minor->dev; 515 + struct drm_gem_name_info_data nid; 516 + 517 + if (offset > DRM_PROC_LIMIT) { 518 + *eof = 1; 519 + return 0; 520 + } 521 + 522 + nid.len = sprintf(buf, " name size handles refcount\n"); 523 + nid.buf = buf; 524 + nid.eof = 0; 525 + idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, &nid); 526 + 527 + *start = &buf[offset]; 528 + *eof = 0; 529 + if (nid.len > request + offset) 530 + return request; 531 + *eof = 1; 532 + return nid.len - offset; 533 + } 534 + 535 + static int drm_gem_object_info(char *buf, char **start, off_t offset, 536 + int request, int *eof, void *data) 537 + { 538 + struct drm_minor *minor = (struct drm_minor *) data; 539 + struct drm_device *dev = minor->dev; 540 + int len = 0; 541 + 542 + if (offset > DRM_PROC_LIMIT) { 543 + *eof = 1; 544 + return 0; 545 + } 546 + 547 + *start = &buf[offset]; 548 + *eof = 0; 549 + DRM_PROC_PRINT("%d objects\n", atomic_read(&dev->object_count)); 550 + DRM_PROC_PRINT("%d object bytes\n", atomic_read(&dev->object_memory)); 551 + DRM_PROC_PRINT("%d pinned\n", atomic_read(&dev->pin_count)); 552 + DRM_PROC_PRINT("%d pin bytes\n", atomic_read(&dev->pin_memory)); 553 + DRM_PROC_PRINT("%d gtt bytes\n", atomic_read(&dev->gtt_memory)); 554 + DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total); 555 + if (len > request + offset) 556 + return request; 557 + *eof = 1; 558 + return len - offset; 512 559 } 513 560 514 561 #if DRM_DEBUG_CODE
+10
drivers/gpu/drm/drm_stub.c
··· 152 152 goto error_out_unreg; 153 153 } 154 154 155 + if (driver->driver_features & DRIVER_GEM) { 156 + retcode = drm_gem_init(dev); 157 + if (retcode) { 158 + DRM_ERROR("Cannot initialize graphics execution " 159 + "manager (GEM)\n"); 160 + goto error_out_unreg; 161 + } 162 + } 163 + 155 164 return 0; 156 165 157 166 error_out_unreg: ··· 326 317 int drm_put_minor(struct drm_minor **minor_p) 327 318 { 328 319 struct drm_minor *minor = *minor_p; 320 + 329 321 DRM_DEBUG("release secondary minor %d\n", minor->index); 330 322 331 323 if (minor->type == DRM_MINOR_LEGACY)
+5 -1
drivers/gpu/drm/i915/Makefile
··· 4 4 5 5 ccflags-y := -Iinclude/drm 6 6 i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_opregion.o \ 7 - i915_suspend.o 7 + i915_suspend.o \ 8 + i915_gem.o \ 9 + i915_gem_debug.o \ 10 + i915_gem_proc.o \ 11 + i915_gem_tiling.o 8 12 9 13 i915-$(CONFIG_COMPAT) += i915_ioc32.o 10 14
+76 -18
drivers/gpu/drm/i915/i915_dma.c
··· 170 170 dev_priv->sarea_priv = (drm_i915_sarea_t *) 171 171 ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset); 172 172 173 - dev_priv->ring.Start = init->ring_start; 174 - dev_priv->ring.End = init->ring_end; 175 - dev_priv->ring.Size = init->ring_size; 176 - dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; 173 + if (init->ring_size != 0) { 174 + if (dev_priv->ring.ring_obj != NULL) { 175 + i915_dma_cleanup(dev); 176 + DRM_ERROR("Client tried to initialize ringbuffer in " 177 + "GEM mode\n"); 178 + return -EINVAL; 179 + } 177 180 178 - dev_priv->ring.map.offset = init->ring_start; 179 - dev_priv->ring.map.size = init->ring_size; 180 - dev_priv->ring.map.type = 0; 181 - dev_priv->ring.map.flags = 0; 182 - dev_priv->ring.map.mtrr = 0; 181 + dev_priv->ring.Size = init->ring_size; 182 + dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; 183 183 184 - drm_core_ioremap(&dev_priv->ring.map, dev); 184 + dev_priv->ring.map.offset = init->ring_start; 185 + dev_priv->ring.map.size = init->ring_size; 186 + dev_priv->ring.map.type = 0; 187 + dev_priv->ring.map.flags = 0; 188 + dev_priv->ring.map.mtrr = 0; 185 189 186 - if (dev_priv->ring.map.handle == NULL) { 187 - i915_dma_cleanup(dev); 188 - DRM_ERROR("can not ioremap virtual address for" 189 - " ring buffer\n"); 190 - return -ENOMEM; 190 + drm_core_ioremap(&dev_priv->ring.map, dev); 191 + 192 + if (dev_priv->ring.map.handle == NULL) { 193 + i915_dma_cleanup(dev); 194 + DRM_ERROR("can not ioremap virtual address for" 195 + " ring buffer\n"); 196 + return -ENOMEM; 197 + } 191 198 } 192 199 193 200 dev_priv->ring.virtual_start = dev_priv->ring.map.handle; ··· 384 377 return 0; 385 378 } 386 379 387 - static int i915_emit_box(struct drm_device * dev, 388 - struct drm_clip_rect __user * boxes, 389 - int i, int DR1, int DR4) 380 + int 381 + i915_emit_box(struct drm_device *dev, 382 + struct drm_clip_rect __user *boxes, 383 + int i, int DR1, int DR4) 390 384 { 391 385 drm_i915_private_t *dev_priv = dev->dev_private; 392 386 struct drm_clip_rect box; ··· 689 681 case I915_PARAM_LAST_DISPATCH: 690 682 value = READ_BREADCRUMB(dev_priv); 691 683 break; 684 + case I915_PARAM_HAS_GEM: 685 + value = 1; 686 + break; 692 687 default: 693 688 DRM_ERROR("Unknown parameter %d\n", param->param); 694 689 return -EINVAL; ··· 795 784 memset(dev_priv, 0, sizeof(drm_i915_private_t)); 796 785 797 786 dev->dev_private = (void *)dev_priv; 787 + dev_priv->dev = dev; 798 788 799 789 /* Add register map (needed for suspend/resume) */ 800 790 base = drm_get_resource_start(dev, mmio_bar); ··· 804 792 ret = drm_addmap(dev, base, size, _DRM_REGISTERS, 805 793 _DRM_KERNEL | _DRM_DRIVER, 806 794 &dev_priv->mmio_map); 795 + 796 + i915_gem_load(dev); 807 797 808 798 /* Init HWS */ 809 799 if (!I915_NEED_GFX_HWS(dev)) { ··· 852 838 return 0; 853 839 } 854 840 841 + int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv) 842 + { 843 + struct drm_i915_file_private *i915_file_priv; 844 + 845 + DRM_DEBUG("\n"); 846 + i915_file_priv = (struct drm_i915_file_private *) 847 + drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES); 848 + 849 + if (!i915_file_priv) 850 + return -ENOMEM; 851 + 852 + file_priv->driver_priv = i915_file_priv; 853 + 854 + i915_file_priv->mm.last_gem_seqno = 0; 855 + i915_file_priv->mm.last_gem_throttle_seqno = 0; 856 + 857 + return 0; 858 + } 859 + 855 860 void i915_driver_lastclose(struct drm_device * dev) 856 861 { 857 862 drm_i915_private_t *dev_priv = dev->dev_private; 858 863 859 864 if (!dev_priv) 860 865 return; 866 + 867 + i915_gem_lastclose(dev); 861 868 862 869 if (dev_priv->agp_heap) 863 870 i915_mem_takedown(&(dev_priv->agp_heap)); ··· 890 855 { 891 856 drm_i915_private_t *dev_priv = dev->dev_private; 892 857 i915_mem_release(dev, file_priv, dev_priv->agp_heap); 858 + } 859 + 860 + void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) 861 + { 862 + struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; 863 + 864 + drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES); 893 865 } 894 866 895 867 struct drm_ioctl_desc i915_ioctls[] = { ··· 917 875 DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ), 918 876 DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), 919 877 DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH), 878 + DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH), 879 + DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), 880 + DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 881 + DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), 882 + DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH), 883 + DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH), 884 + DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH), 885 + DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH), 886 + DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0), 887 + DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0), 888 + DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0), 889 + DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0), 890 + DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0), 891 + DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0), 892 + DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0), 893 + DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0), 920 894 }; 921 895 922 896 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);
+9 -2
drivers/gpu/drm/i915/i915_drv.c
··· 85 85 /* don't use mtrr's here, the Xserver or user space app should 86 86 * deal with them for intel hardware. 87 87 */ 88 - .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | 89 - DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, 88 + .driver_features = 89 + DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ 90 + DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM, 90 91 .load = i915_driver_load, 91 92 .unload = i915_driver_unload, 93 + .open = i915_driver_open, 92 94 .lastclose = i915_driver_lastclose, 93 95 .preclose = i915_driver_preclose, 96 + .postclose = i915_driver_postclose, 94 97 .suspend = i915_suspend, 95 98 .resume = i915_resume, 96 99 .device_is_agp = i915_driver_device_is_agp, ··· 107 104 .reclaim_buffers = drm_core_reclaim_buffers, 108 105 .get_map_ofs = drm_core_get_map_ofs, 109 106 .get_reg_ofs = drm_core_get_reg_ofs, 107 + .proc_init = i915_gem_proc_init, 108 + .proc_cleanup = i915_gem_proc_cleanup, 109 + .gem_init_object = i915_gem_init_object, 110 + .gem_free_object = i915_gem_free_object, 110 111 .ioctls = i915_ioctls, 111 112 .fops = { 112 113 .owner = THIS_MODULE,
+249 -4
drivers/gpu/drm/i915/i915_drv.h
··· 39 39 40 40 #define DRIVER_NAME "i915" 41 41 #define DRIVER_DESC "Intel Graphics" 42 - #define DRIVER_DATE "20060119" 42 + #define DRIVER_DATE "20080730" 43 43 44 44 enum pipe { 45 45 PIPE_A = 0, ··· 60 60 #define DRIVER_MINOR 6 61 61 #define DRIVER_PATCHLEVEL 0 62 62 63 + #define WATCH_COHERENCY 0 64 + #define WATCH_BUF 0 65 + #define WATCH_EXEC 0 66 + #define WATCH_LRU 0 67 + #define WATCH_RELOC 0 68 + #define WATCH_INACTIVE 0 69 + #define WATCH_PWRITE 0 70 + 63 71 typedef struct _drm_i915_ring_buffer { 64 72 int tail_mask; 65 - unsigned long Start; 66 - unsigned long End; 67 73 unsigned long Size; 68 74 u8 *virtual_start; 69 75 int head; 70 76 int tail; 71 77 int space; 72 78 drm_local_map_t map; 79 + struct drm_gem_object *ring_obj; 73 80 } drm_i915_ring_buffer_t; 74 81 75 82 struct mem_block { ··· 108 101 }; 109 102 110 103 typedef struct drm_i915_private { 104 + struct drm_device *dev; 105 + 111 106 drm_local_map_t *sarea; 112 107 drm_local_map_t *mmio_map; 113 108 ··· 122 113 uint32_t counter; 123 114 unsigned int status_gfx_addr; 124 115 drm_local_map_t hws_map; 116 + struct drm_gem_object *hws_obj; 125 117 126 118 unsigned int cpp; 127 119 int back_offset; ··· 132 122 133 123 wait_queue_head_t irq_queue; 134 124 atomic_t irq_received; 135 - atomic_t irq_emitted; 136 125 /** Protects user_irq_refcount and irq_mask_reg */ 137 126 spinlock_t user_irq_lock; 138 127 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */ ··· 239 230 u8 saveDACMASK; 240 231 u8 saveDACDATA[256*3]; /* 256 3-byte colors */ 241 232 u8 saveCR[37]; 233 + 234 + struct { 235 + struct drm_mm gtt_space; 236 + 237 + /** 238 + * List of objects currently involved in rendering from the 239 + * ringbuffer. 240 + * 241 + * A reference is held on the buffer while on this list. 242 + */ 243 + struct list_head active_list; 244 + 245 + /** 246 + * List of objects which are not in the ringbuffer but which 247 + * still have a write_domain which needs to be flushed before 248 + * unbinding. 249 + * 250 + * A reference is held on the buffer while on this list. 251 + */ 252 + struct list_head flushing_list; 253 + 254 + /** 255 + * LRU list of objects which are not in the ringbuffer and 256 + * are ready to unbind, but are still in the GTT. 257 + * 258 + * A reference is not held on the buffer while on this list, 259 + * as merely being GTT-bound shouldn't prevent its being 260 + * freed, and we'll pull it off the list in the free path. 261 + */ 262 + struct list_head inactive_list; 263 + 264 + /** 265 + * List of breadcrumbs associated with GPU requests currently 266 + * outstanding. 267 + */ 268 + struct list_head request_list; 269 + 270 + /** 271 + * We leave the user IRQ off as much as possible, 272 + * but this means that requests will finish and never 273 + * be retired once the system goes idle. Set a timer to 274 + * fire periodically while the ring is running. When it 275 + * fires, go retire requests. 276 + */ 277 + struct delayed_work retire_work; 278 + 279 + uint32_t next_gem_seqno; 280 + 281 + /** 282 + * Waiting sequence number, if any 283 + */ 284 + uint32_t waiting_gem_seqno; 285 + 286 + /** 287 + * Last seq seen at irq time 288 + */ 289 + uint32_t irq_gem_seqno; 290 + 291 + /** 292 + * Flag if the X Server, and thus DRM, is not currently in 293 + * control of the device. 294 + * 295 + * This is set between LeaveVT and EnterVT. It needs to be 296 + * replaced with a semaphore. It also needs to be 297 + * transitioned away from for kernel modesetting. 298 + */ 299 + int suspended; 300 + 301 + /** 302 + * Flag if the hardware appears to be wedged. 303 + * 304 + * This is set when attempts to idle the device timeout. 305 + * It prevents command submission from occuring and makes 306 + * every pending request fail 307 + */ 308 + int wedged; 309 + 310 + /** Bit 6 swizzling required for X tiling */ 311 + uint32_t bit_6_swizzle_x; 312 + /** Bit 6 swizzling required for Y tiling */ 313 + uint32_t bit_6_swizzle_y; 314 + } mm; 242 315 } drm_i915_private_t; 316 + 317 + /** driver private structure attached to each drm_gem_object */ 318 + struct drm_i915_gem_object { 319 + struct drm_gem_object *obj; 320 + 321 + /** Current space allocated to this object in the GTT, if any. */ 322 + struct drm_mm_node *gtt_space; 323 + 324 + /** This object's place on the active/flushing/inactive lists */ 325 + struct list_head list; 326 + 327 + /** 328 + * This is set if the object is on the active or flushing lists 329 + * (has pending rendering), and is not set if it's on inactive (ready 330 + * to be unbound). 331 + */ 332 + int active; 333 + 334 + /** 335 + * This is set if the object has been written to since last bound 336 + * to the GTT 337 + */ 338 + int dirty; 339 + 340 + /** AGP memory structure for our GTT binding. */ 341 + DRM_AGP_MEM *agp_mem; 342 + 343 + struct page **page_list; 344 + 345 + /** 346 + * Current offset of the object in GTT space. 347 + * 348 + * This is the same as gtt_space->start 349 + */ 350 + uint32_t gtt_offset; 351 + 352 + /** Boolean whether this object has a valid gtt offset. */ 353 + int gtt_bound; 354 + 355 + /** How many users have pinned this object in GTT space */ 356 + int pin_count; 357 + 358 + /** Breadcrumb of last rendering to the buffer. */ 359 + uint32_t last_rendering_seqno; 360 + 361 + /** Current tiling mode for the object. */ 362 + uint32_t tiling_mode; 363 + 364 + /** 365 + * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when 366 + * GEM_DOMAIN_CPU is not in the object's read domain. 367 + */ 368 + uint8_t *page_cpu_valid; 369 + }; 370 + 371 + /** 372 + * Request queue structure. 373 + * 374 + * The request queue allows us to note sequence numbers that have been emitted 375 + * and may be associated with active buffers to be retired. 376 + * 377 + * By keeping this list, we can avoid having to do questionable 378 + * sequence-number comparisons on buffer last_rendering_seqnos, and associate 379 + * an emission time with seqnos for tracking how far ahead of the GPU we are. 380 + */ 381 + struct drm_i915_gem_request { 382 + /** GEM sequence number associated with this request. */ 383 + uint32_t seqno; 384 + 385 + /** Time at which this request was emitted, in jiffies. */ 386 + unsigned long emitted_jiffies; 387 + 388 + /** Cache domains that were flushed at the start of the request. */ 389 + uint32_t flush_domains; 390 + 391 + struct list_head list; 392 + }; 393 + 394 + struct drm_i915_file_private { 395 + struct { 396 + uint32_t last_gem_seqno; 397 + uint32_t last_gem_throttle_seqno; 398 + } mm; 399 + }; 243 400 244 401 extern struct drm_ioctl_desc i915_ioctls[]; 245 402 extern int i915_max_ioctl; ··· 414 239 extern void i915_kernel_lost_context(struct drm_device * dev); 415 240 extern int i915_driver_load(struct drm_device *, unsigned long flags); 416 241 extern int i915_driver_unload(struct drm_device *); 242 + extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv); 417 243 extern void i915_driver_lastclose(struct drm_device * dev); 418 244 extern void i915_driver_preclose(struct drm_device *dev, 419 245 struct drm_file *file_priv); 246 + extern void i915_driver_postclose(struct drm_device *dev, 247 + struct drm_file *file_priv); 420 248 extern int i915_driver_device_is_agp(struct drm_device * dev); 421 249 extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, 422 250 unsigned long arg); 251 + extern int i915_emit_box(struct drm_device *dev, 252 + struct drm_clip_rect __user *boxes, 253 + int i, int DR1, int DR4); 423 254 424 255 /* i915_irq.c */ 425 256 extern int i915_irq_emit(struct drm_device *dev, void *data, 426 257 struct drm_file *file_priv); 427 258 extern int i915_irq_wait(struct drm_device *dev, void *data, 428 259 struct drm_file *file_priv); 260 + void i915_user_irq_get(struct drm_device *dev); 261 + void i915_user_irq_put(struct drm_device *dev); 429 262 430 263 extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); 431 264 extern void i915_driver_irq_preinstall(struct drm_device * dev); ··· 462 279 extern void i915_mem_takedown(struct mem_block **heap); 463 280 extern void i915_mem_release(struct drm_device * dev, 464 281 struct drm_file *file_priv, struct mem_block *heap); 282 + /* i915_gem.c */ 283 + int i915_gem_init_ioctl(struct drm_device *dev, void *data, 284 + struct drm_file *file_priv); 285 + int i915_gem_create_ioctl(struct drm_device *dev, void *data, 286 + struct drm_file *file_priv); 287 + int i915_gem_pread_ioctl(struct drm_device *dev, void *data, 288 + struct drm_file *file_priv); 289 + int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 290 + struct drm_file *file_priv); 291 + int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 292 + struct drm_file *file_priv); 293 + int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 294 + struct drm_file *file_priv); 295 + int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 296 + struct drm_file *file_priv); 297 + int i915_gem_execbuffer(struct drm_device *dev, void *data, 298 + struct drm_file *file_priv); 299 + int i915_gem_pin_ioctl(struct drm_device *dev, void *data, 300 + struct drm_file *file_priv); 301 + int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, 302 + struct drm_file *file_priv); 303 + int i915_gem_busy_ioctl(struct drm_device *dev, void *data, 304 + struct drm_file *file_priv); 305 + int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 306 + struct drm_file *file_priv); 307 + int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, 308 + struct drm_file *file_priv); 309 + int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, 310 + struct drm_file *file_priv); 311 + int i915_gem_set_tiling(struct drm_device *dev, void *data, 312 + struct drm_file *file_priv); 313 + int i915_gem_get_tiling(struct drm_device *dev, void *data, 314 + struct drm_file *file_priv); 315 + void i915_gem_load(struct drm_device *dev); 316 + int i915_gem_proc_init(struct drm_minor *minor); 317 + void i915_gem_proc_cleanup(struct drm_minor *minor); 318 + int i915_gem_init_object(struct drm_gem_object *obj); 319 + void i915_gem_free_object(struct drm_gem_object *obj); 320 + int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); 321 + void i915_gem_object_unpin(struct drm_gem_object *obj); 322 + void i915_gem_lastclose(struct drm_device *dev); 323 + uint32_t i915_get_gem_seqno(struct drm_device *dev); 324 + void i915_gem_retire_requests(struct drm_device *dev); 325 + void i915_gem_retire_work_handler(struct work_struct *work); 326 + void i915_gem_clflush_object(struct drm_gem_object *obj); 327 + 328 + /* i915_gem_tiling.c */ 329 + void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 330 + 331 + /* i915_gem_debug.c */ 332 + void i915_gem_dump_object(struct drm_gem_object *obj, int len, 333 + const char *where, uint32_t mark); 334 + #if WATCH_INACTIVE 335 + void i915_verify_inactive(struct drm_device *dev, char *file, int line); 336 + #else 337 + #define i915_verify_inactive(dev, file, line) 338 + #endif 339 + void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle); 340 + void i915_gem_dump_object(struct drm_gem_object *obj, int len, 341 + const char *where, uint32_t mark); 342 + void i915_dump_lru(struct drm_device *dev, const char *where); 465 343 466 344 /* i915_suspend.c */ 467 345 extern int i915_save_state(struct drm_device *dev); ··· 591 347 */ 592 348 #define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) 593 349 #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, 5) 350 + #define I915_GEM_HWS_INDEX 0x10 594 351 595 352 extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); 596 353
+2497
drivers/gpu/drm/i915/i915_gem.c
··· 1 + /* 2 + * Copyright © 2008 Intel Corporation 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice (including the next 12 + * paragraph) shall be included in all copies or substantial portions of the 13 + * Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 + * IN THE SOFTWARE. 22 + * 23 + * Authors: 24 + * Eric Anholt <eric@anholt.net> 25 + * 26 + */ 27 + 28 + #include "drmP.h" 29 + #include "drm.h" 30 + #include "i915_drm.h" 31 + #include "i915_drv.h" 32 + #include <linux/swap.h> 33 + 34 + static int 35 + i915_gem_object_set_domain(struct drm_gem_object *obj, 36 + uint32_t read_domains, 37 + uint32_t write_domain); 38 + static int 39 + i915_gem_object_set_domain_range(struct drm_gem_object *obj, 40 + uint64_t offset, 41 + uint64_t size, 42 + uint32_t read_domains, 43 + uint32_t write_domain); 44 + static int 45 + i915_gem_set_domain(struct drm_gem_object *obj, 46 + struct drm_file *file_priv, 47 + uint32_t read_domains, 48 + uint32_t write_domain); 49 + static int i915_gem_object_get_page_list(struct drm_gem_object *obj); 50 + static void i915_gem_object_free_page_list(struct drm_gem_object *obj); 51 + static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); 52 + 53 + int 54 + i915_gem_init_ioctl(struct drm_device *dev, void *data, 55 + struct drm_file *file_priv) 56 + { 57 + drm_i915_private_t *dev_priv = dev->dev_private; 58 + struct drm_i915_gem_init *args = data; 59 + 60 + mutex_lock(&dev->struct_mutex); 61 + 62 + if (args->gtt_start >= args->gtt_end || 63 + (args->gtt_start & (PAGE_SIZE - 1)) != 0 || 64 + (args->gtt_end & (PAGE_SIZE - 1)) != 0) { 65 + mutex_unlock(&dev->struct_mutex); 66 + return -EINVAL; 67 + } 68 + 69 + drm_mm_init(&dev_priv->mm.gtt_space, args->gtt_start, 70 + args->gtt_end - args->gtt_start); 71 + 72 + dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start); 73 + 74 + mutex_unlock(&dev->struct_mutex); 75 + 76 + return 0; 77 + } 78 + 79 + 80 + /** 81 + * Creates a new mm object and returns a handle to it. 82 + */ 83 + int 84 + i915_gem_create_ioctl(struct drm_device *dev, void *data, 85 + struct drm_file *file_priv) 86 + { 87 + struct drm_i915_gem_create *args = data; 88 + struct drm_gem_object *obj; 89 + int handle, ret; 90 + 91 + args->size = roundup(args->size, PAGE_SIZE); 92 + 93 + /* Allocate the new object */ 94 + obj = drm_gem_object_alloc(dev, args->size); 95 + if (obj == NULL) 96 + return -ENOMEM; 97 + 98 + ret = drm_gem_handle_create(file_priv, obj, &handle); 99 + mutex_lock(&dev->struct_mutex); 100 + drm_gem_object_handle_unreference(obj); 101 + mutex_unlock(&dev->struct_mutex); 102 + 103 + if (ret) 104 + return ret; 105 + 106 + args->handle = handle; 107 + 108 + return 0; 109 + } 110 + 111 + /** 112 + * Reads data from the object referenced by handle. 113 + * 114 + * On error, the contents of *data are undefined. 115 + */ 116 + int 117 + i915_gem_pread_ioctl(struct drm_device *dev, void *data, 118 + struct drm_file *file_priv) 119 + { 120 + struct drm_i915_gem_pread *args = data; 121 + struct drm_gem_object *obj; 122 + struct drm_i915_gem_object *obj_priv; 123 + ssize_t read; 124 + loff_t offset; 125 + int ret; 126 + 127 + obj = drm_gem_object_lookup(dev, file_priv, args->handle); 128 + if (obj == NULL) 129 + return -EBADF; 130 + obj_priv = obj->driver_private; 131 + 132 + /* Bounds check source. 133 + * 134 + * XXX: This could use review for overflow issues... 135 + */ 136 + if (args->offset > obj->size || args->size > obj->size || 137 + args->offset + args->size > obj->size) { 138 + drm_gem_object_unreference(obj); 139 + return -EINVAL; 140 + } 141 + 142 + mutex_lock(&dev->struct_mutex); 143 + 144 + ret = i915_gem_object_set_domain_range(obj, args->offset, args->size, 145 + I915_GEM_DOMAIN_CPU, 0); 146 + if (ret != 0) { 147 + drm_gem_object_unreference(obj); 148 + mutex_unlock(&dev->struct_mutex); 149 + } 150 + 151 + offset = args->offset; 152 + 153 + read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr, 154 + args->size, &offset); 155 + if (read != args->size) { 156 + drm_gem_object_unreference(obj); 157 + mutex_unlock(&dev->struct_mutex); 158 + if (read < 0) 159 + return read; 160 + else 161 + return -EINVAL; 162 + } 163 + 164 + drm_gem_object_unreference(obj); 165 + mutex_unlock(&dev->struct_mutex); 166 + 167 + return 0; 168 + } 169 + 170 + static int 171 + i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj, 172 + struct drm_i915_gem_pwrite *args, 173 + struct drm_file *file_priv) 174 + { 175 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 176 + ssize_t remain; 177 + loff_t offset; 178 + char __user *user_data; 179 + char *vaddr; 180 + int i, o, l; 181 + int ret = 0; 182 + unsigned long pfn; 183 + unsigned long unwritten; 184 + 185 + user_data = (char __user *) (uintptr_t) args->data_ptr; 186 + remain = args->size; 187 + if (!access_ok(VERIFY_READ, user_data, remain)) 188 + return -EFAULT; 189 + 190 + 191 + mutex_lock(&dev->struct_mutex); 192 + ret = i915_gem_object_pin(obj, 0); 193 + if (ret) { 194 + mutex_unlock(&dev->struct_mutex); 195 + return ret; 196 + } 197 + ret = i915_gem_set_domain(obj, file_priv, 198 + I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT); 199 + if (ret) 200 + goto fail; 201 + 202 + obj_priv = obj->driver_private; 203 + offset = obj_priv->gtt_offset + args->offset; 204 + obj_priv->dirty = 1; 205 + 206 + while (remain > 0) { 207 + /* Operation in this page 208 + * 209 + * i = page number 210 + * o = offset within page 211 + * l = bytes to copy 212 + */ 213 + i = offset >> PAGE_SHIFT; 214 + o = offset & (PAGE_SIZE-1); 215 + l = remain; 216 + if ((o + l) > PAGE_SIZE) 217 + l = PAGE_SIZE - o; 218 + 219 + pfn = (dev->agp->base >> PAGE_SHIFT) + i; 220 + 221 + #ifdef CONFIG_HIGHMEM 222 + /* kmap_atomic can't map IO pages on non-HIGHMEM kernels 223 + */ 224 + vaddr = kmap_atomic_pfn(pfn, KM_USER0); 225 + #if WATCH_PWRITE 226 + DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n", 227 + i, o, l, pfn, vaddr); 228 + #endif 229 + unwritten = __copy_from_user_inatomic_nocache(vaddr + o, 230 + user_data, l); 231 + kunmap_atomic(vaddr, KM_USER0); 232 + 233 + if (unwritten) 234 + #endif /* CONFIG_HIGHMEM */ 235 + { 236 + vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); 237 + #if WATCH_PWRITE 238 + DRM_INFO("pwrite slow i %d o %d l %d " 239 + "pfn %ld vaddr %p\n", 240 + i, o, l, pfn, vaddr); 241 + #endif 242 + if (vaddr == NULL) { 243 + ret = -EFAULT; 244 + goto fail; 245 + } 246 + unwritten = __copy_from_user(vaddr + o, user_data, l); 247 + #if WATCH_PWRITE 248 + DRM_INFO("unwritten %ld\n", unwritten); 249 + #endif 250 + iounmap(vaddr); 251 + if (unwritten) { 252 + ret = -EFAULT; 253 + goto fail; 254 + } 255 + } 256 + 257 + remain -= l; 258 + user_data += l; 259 + offset += l; 260 + } 261 + #if WATCH_PWRITE && 1 262 + i915_gem_clflush_object(obj); 263 + i915_gem_dump_object(obj, args->offset + args->size, __func__, ~0); 264 + i915_gem_clflush_object(obj); 265 + #endif 266 + 267 + fail: 268 + i915_gem_object_unpin(obj); 269 + mutex_unlock(&dev->struct_mutex); 270 + 271 + return ret; 272 + } 273 + 274 + int 275 + i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj, 276 + struct drm_i915_gem_pwrite *args, 277 + struct drm_file *file_priv) 278 + { 279 + int ret; 280 + loff_t offset; 281 + ssize_t written; 282 + 283 + mutex_lock(&dev->struct_mutex); 284 + 285 + ret = i915_gem_set_domain(obj, file_priv, 286 + I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU); 287 + if (ret) { 288 + mutex_unlock(&dev->struct_mutex); 289 + return ret; 290 + } 291 + 292 + offset = args->offset; 293 + 294 + written = vfs_write(obj->filp, 295 + (char __user *)(uintptr_t) args->data_ptr, 296 + args->size, &offset); 297 + if (written != args->size) { 298 + mutex_unlock(&dev->struct_mutex); 299 + if (written < 0) 300 + return written; 301 + else 302 + return -EINVAL; 303 + } 304 + 305 + mutex_unlock(&dev->struct_mutex); 306 + 307 + return 0; 308 + } 309 + 310 + /** 311 + * Writes data to the object referenced by handle. 312 + * 313 + * On error, the contents of the buffer that were to be modified are undefined. 314 + */ 315 + int 316 + i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, 317 + struct drm_file *file_priv) 318 + { 319 + struct drm_i915_gem_pwrite *args = data; 320 + struct drm_gem_object *obj; 321 + struct drm_i915_gem_object *obj_priv; 322 + int ret = 0; 323 + 324 + obj = drm_gem_object_lookup(dev, file_priv, args->handle); 325 + if (obj == NULL) 326 + return -EBADF; 327 + obj_priv = obj->driver_private; 328 + 329 + /* Bounds check destination. 330 + * 331 + * XXX: This could use review for overflow issues... 332 + */ 333 + if (args->offset > obj->size || args->size > obj->size || 334 + args->offset + args->size > obj->size) { 335 + drm_gem_object_unreference(obj); 336 + return -EINVAL; 337 + } 338 + 339 + /* We can only do the GTT pwrite on untiled buffers, as otherwise 340 + * it would end up going through the fenced access, and we'll get 341 + * different detiling behavior between reading and writing. 342 + * pread/pwrite currently are reading and writing from the CPU 343 + * perspective, requiring manual detiling by the client. 344 + */ 345 + if (obj_priv->tiling_mode == I915_TILING_NONE && 346 + dev->gtt_total != 0) 347 + ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv); 348 + else 349 + ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv); 350 + 351 + #if WATCH_PWRITE 352 + if (ret) 353 + DRM_INFO("pwrite failed %d\n", ret); 354 + #endif 355 + 356 + drm_gem_object_unreference(obj); 357 + 358 + return ret; 359 + } 360 + 361 + /** 362 + * Called when user space prepares to use an object 363 + */ 364 + int 365 + i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, 366 + struct drm_file *file_priv) 367 + { 368 + struct drm_i915_gem_set_domain *args = data; 369 + struct drm_gem_object *obj; 370 + int ret; 371 + 372 + if (!(dev->driver->driver_features & DRIVER_GEM)) 373 + return -ENODEV; 374 + 375 + obj = drm_gem_object_lookup(dev, file_priv, args->handle); 376 + if (obj == NULL) 377 + return -EBADF; 378 + 379 + mutex_lock(&dev->struct_mutex); 380 + #if WATCH_BUF 381 + DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n", 382 + obj, obj->size, args->read_domains, args->write_domain); 383 + #endif 384 + ret = i915_gem_set_domain(obj, file_priv, 385 + args->read_domains, args->write_domain); 386 + drm_gem_object_unreference(obj); 387 + mutex_unlock(&dev->struct_mutex); 388 + return ret; 389 + } 390 + 391 + /** 392 + * Called when user space has done writes to this buffer 393 + */ 394 + int 395 + i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, 396 + struct drm_file *file_priv) 397 + { 398 + struct drm_i915_gem_sw_finish *args = data; 399 + struct drm_gem_object *obj; 400 + struct drm_i915_gem_object *obj_priv; 401 + int ret = 0; 402 + 403 + if (!(dev->driver->driver_features & DRIVER_GEM)) 404 + return -ENODEV; 405 + 406 + mutex_lock(&dev->struct_mutex); 407 + obj = drm_gem_object_lookup(dev, file_priv, args->handle); 408 + if (obj == NULL) { 409 + mutex_unlock(&dev->struct_mutex); 410 + return -EBADF; 411 + } 412 + 413 + #if WATCH_BUF 414 + DRM_INFO("%s: sw_finish %d (%p %d)\n", 415 + __func__, args->handle, obj, obj->size); 416 + #endif 417 + obj_priv = obj->driver_private; 418 + 419 + /* Pinned buffers may be scanout, so flush the cache */ 420 + if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) { 421 + i915_gem_clflush_object(obj); 422 + drm_agp_chipset_flush(dev); 423 + } 424 + drm_gem_object_unreference(obj); 425 + mutex_unlock(&dev->struct_mutex); 426 + return ret; 427 + } 428 + 429 + /** 430 + * Maps the contents of an object, returning the address it is mapped 431 + * into. 432 + * 433 + * While the mapping holds a reference on the contents of the object, it doesn't 434 + * imply a ref on the object itself. 435 + */ 436 + int 437 + i915_gem_mmap_ioctl(struct drm_device *dev, void *data, 438 + struct drm_file *file_priv) 439 + { 440 + struct drm_i915_gem_mmap *args = data; 441 + struct drm_gem_object *obj; 442 + loff_t offset; 443 + unsigned long addr; 444 + 445 + if (!(dev->driver->driver_features & DRIVER_GEM)) 446 + return -ENODEV; 447 + 448 + obj = drm_gem_object_lookup(dev, file_priv, args->handle); 449 + if (obj == NULL) 450 + return -EBADF; 451 + 452 + offset = args->offset; 453 + 454 + down_write(&current->mm->mmap_sem); 455 + addr = do_mmap(obj->filp, 0, args->size, 456 + PROT_READ | PROT_WRITE, MAP_SHARED, 457 + args->offset); 458 + up_write(&current->mm->mmap_sem); 459 + mutex_lock(&dev->struct_mutex); 460 + drm_gem_object_unreference(obj); 461 + mutex_unlock(&dev->struct_mutex); 462 + if (IS_ERR((void *)addr)) 463 + return addr; 464 + 465 + args->addr_ptr = (uint64_t) addr; 466 + 467 + return 0; 468 + } 469 + 470 + static void 471 + i915_gem_object_free_page_list(struct drm_gem_object *obj) 472 + { 473 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 474 + int page_count = obj->size / PAGE_SIZE; 475 + int i; 476 + 477 + if (obj_priv->page_list == NULL) 478 + return; 479 + 480 + 481 + for (i = 0; i < page_count; i++) 482 + if (obj_priv->page_list[i] != NULL) { 483 + if (obj_priv->dirty) 484 + set_page_dirty(obj_priv->page_list[i]); 485 + mark_page_accessed(obj_priv->page_list[i]); 486 + page_cache_release(obj_priv->page_list[i]); 487 + } 488 + obj_priv->dirty = 0; 489 + 490 + drm_free(obj_priv->page_list, 491 + page_count * sizeof(struct page *), 492 + DRM_MEM_DRIVER); 493 + obj_priv->page_list = NULL; 494 + } 495 + 496 + static void 497 + i915_gem_object_move_to_active(struct drm_gem_object *obj) 498 + { 499 + struct drm_device *dev = obj->dev; 500 + drm_i915_private_t *dev_priv = dev->dev_private; 501 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 502 + 503 + /* Add a reference if we're newly entering the active list. */ 504 + if (!obj_priv->active) { 505 + drm_gem_object_reference(obj); 506 + obj_priv->active = 1; 507 + } 508 + /* Move from whatever list we were on to the tail of execution. */ 509 + list_move_tail(&obj_priv->list, 510 + &dev_priv->mm.active_list); 511 + } 512 + 513 + 514 + static void 515 + i915_gem_object_move_to_inactive(struct drm_gem_object *obj) 516 + { 517 + struct drm_device *dev = obj->dev; 518 + drm_i915_private_t *dev_priv = dev->dev_private; 519 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 520 + 521 + i915_verify_inactive(dev, __FILE__, __LINE__); 522 + if (obj_priv->pin_count != 0) 523 + list_del_init(&obj_priv->list); 524 + else 525 + list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); 526 + 527 + if (obj_priv->active) { 528 + obj_priv->active = 0; 529 + drm_gem_object_unreference(obj); 530 + } 531 + i915_verify_inactive(dev, __FILE__, __LINE__); 532 + } 533 + 534 + /** 535 + * Creates a new sequence number, emitting a write of it to the status page 536 + * plus an interrupt, which will trigger i915_user_interrupt_handler. 537 + * 538 + * Must be called with struct_lock held. 539 + * 540 + * Returned sequence numbers are nonzero on success. 541 + */ 542 + static uint32_t 543 + i915_add_request(struct drm_device *dev, uint32_t flush_domains) 544 + { 545 + drm_i915_private_t *dev_priv = dev->dev_private; 546 + struct drm_i915_gem_request *request; 547 + uint32_t seqno; 548 + int was_empty; 549 + RING_LOCALS; 550 + 551 + request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER); 552 + if (request == NULL) 553 + return 0; 554 + 555 + /* Grab the seqno we're going to make this request be, and bump the 556 + * next (skipping 0 so it can be the reserved no-seqno value). 557 + */ 558 + seqno = dev_priv->mm.next_gem_seqno; 559 + dev_priv->mm.next_gem_seqno++; 560 + if (dev_priv->mm.next_gem_seqno == 0) 561 + dev_priv->mm.next_gem_seqno++; 562 + 563 + BEGIN_LP_RING(4); 564 + OUT_RING(MI_STORE_DWORD_INDEX); 565 + OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 566 + OUT_RING(seqno); 567 + 568 + OUT_RING(MI_USER_INTERRUPT); 569 + ADVANCE_LP_RING(); 570 + 571 + DRM_DEBUG("%d\n", seqno); 572 + 573 + request->seqno = seqno; 574 + request->emitted_jiffies = jiffies; 575 + request->flush_domains = flush_domains; 576 + was_empty = list_empty(&dev_priv->mm.request_list); 577 + list_add_tail(&request->list, &dev_priv->mm.request_list); 578 + 579 + if (was_empty) 580 + schedule_delayed_work(&dev_priv->mm.retire_work, HZ); 581 + return seqno; 582 + } 583 + 584 + /** 585 + * Command execution barrier 586 + * 587 + * Ensures that all commands in the ring are finished 588 + * before signalling the CPU 589 + */ 590 + uint32_t 591 + i915_retire_commands(struct drm_device *dev) 592 + { 593 + drm_i915_private_t *dev_priv = dev->dev_private; 594 + uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; 595 + uint32_t flush_domains = 0; 596 + RING_LOCALS; 597 + 598 + /* The sampler always gets flushed on i965 (sigh) */ 599 + if (IS_I965G(dev)) 600 + flush_domains |= I915_GEM_DOMAIN_SAMPLER; 601 + BEGIN_LP_RING(2); 602 + OUT_RING(cmd); 603 + OUT_RING(0); /* noop */ 604 + ADVANCE_LP_RING(); 605 + return flush_domains; 606 + } 607 + 608 + /** 609 + * Moves buffers associated only with the given active seqno from the active 610 + * to inactive list, potentially freeing them. 611 + */ 612 + static void 613 + i915_gem_retire_request(struct drm_device *dev, 614 + struct drm_i915_gem_request *request) 615 + { 616 + drm_i915_private_t *dev_priv = dev->dev_private; 617 + 618 + /* Move any buffers on the active list that are no longer referenced 619 + * by the ringbuffer to the flushing/inactive lists as appropriate. 620 + */ 621 + while (!list_empty(&dev_priv->mm.active_list)) { 622 + struct drm_gem_object *obj; 623 + struct drm_i915_gem_object *obj_priv; 624 + 625 + obj_priv = list_first_entry(&dev_priv->mm.active_list, 626 + struct drm_i915_gem_object, 627 + list); 628 + obj = obj_priv->obj; 629 + 630 + /* If the seqno being retired doesn't match the oldest in the 631 + * list, then the oldest in the list must still be newer than 632 + * this seqno. 633 + */ 634 + if (obj_priv->last_rendering_seqno != request->seqno) 635 + return; 636 + #if WATCH_LRU 637 + DRM_INFO("%s: retire %d moves to inactive list %p\n", 638 + __func__, request->seqno, obj); 639 + #endif 640 + 641 + if (obj->write_domain != 0) { 642 + list_move_tail(&obj_priv->list, 643 + &dev_priv->mm.flushing_list); 644 + } else { 645 + i915_gem_object_move_to_inactive(obj); 646 + } 647 + } 648 + 649 + if (request->flush_domains != 0) { 650 + struct drm_i915_gem_object *obj_priv, *next; 651 + 652 + /* Clear the write domain and activity from any buffers 653 + * that are just waiting for a flush matching the one retired. 654 + */ 655 + list_for_each_entry_safe(obj_priv, next, 656 + &dev_priv->mm.flushing_list, list) { 657 + struct drm_gem_object *obj = obj_priv->obj; 658 + 659 + if (obj->write_domain & request->flush_domains) { 660 + obj->write_domain = 0; 661 + i915_gem_object_move_to_inactive(obj); 662 + } 663 + } 664 + 665 + } 666 + } 667 + 668 + /** 669 + * Returns true if seq1 is later than seq2. 670 + */ 671 + static int 672 + i915_seqno_passed(uint32_t seq1, uint32_t seq2) 673 + { 674 + return (int32_t)(seq1 - seq2) >= 0; 675 + } 676 + 677 + uint32_t 678 + i915_get_gem_seqno(struct drm_device *dev) 679 + { 680 + drm_i915_private_t *dev_priv = dev->dev_private; 681 + 682 + return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX); 683 + } 684 + 685 + /** 686 + * This function clears the request list as sequence numbers are passed. 687 + */ 688 + void 689 + i915_gem_retire_requests(struct drm_device *dev) 690 + { 691 + drm_i915_private_t *dev_priv = dev->dev_private; 692 + uint32_t seqno; 693 + 694 + seqno = i915_get_gem_seqno(dev); 695 + 696 + while (!list_empty(&dev_priv->mm.request_list)) { 697 + struct drm_i915_gem_request *request; 698 + uint32_t retiring_seqno; 699 + 700 + request = list_first_entry(&dev_priv->mm.request_list, 701 + struct drm_i915_gem_request, 702 + list); 703 + retiring_seqno = request->seqno; 704 + 705 + if (i915_seqno_passed(seqno, retiring_seqno) || 706 + dev_priv->mm.wedged) { 707 + i915_gem_retire_request(dev, request); 708 + 709 + list_del(&request->list); 710 + drm_free(request, sizeof(*request), DRM_MEM_DRIVER); 711 + } else 712 + break; 713 + } 714 + } 715 + 716 + void 717 + i915_gem_retire_work_handler(struct work_struct *work) 718 + { 719 + drm_i915_private_t *dev_priv; 720 + struct drm_device *dev; 721 + 722 + dev_priv = container_of(work, drm_i915_private_t, 723 + mm.retire_work.work); 724 + dev = dev_priv->dev; 725 + 726 + mutex_lock(&dev->struct_mutex); 727 + i915_gem_retire_requests(dev); 728 + if (!list_empty(&dev_priv->mm.request_list)) 729 + schedule_delayed_work(&dev_priv->mm.retire_work, HZ); 730 + mutex_unlock(&dev->struct_mutex); 731 + } 732 + 733 + /** 734 + * Waits for a sequence number to be signaled, and cleans up the 735 + * request and object lists appropriately for that event. 736 + */ 737 + int 738 + i915_wait_request(struct drm_device *dev, uint32_t seqno) 739 + { 740 + drm_i915_private_t *dev_priv = dev->dev_private; 741 + int ret = 0; 742 + 743 + BUG_ON(seqno == 0); 744 + 745 + if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { 746 + dev_priv->mm.waiting_gem_seqno = seqno; 747 + i915_user_irq_get(dev); 748 + ret = wait_event_interruptible(dev_priv->irq_queue, 749 + i915_seqno_passed(i915_get_gem_seqno(dev), 750 + seqno) || 751 + dev_priv->mm.wedged); 752 + i915_user_irq_put(dev); 753 + dev_priv->mm.waiting_gem_seqno = 0; 754 + } 755 + if (dev_priv->mm.wedged) 756 + ret = -EIO; 757 + 758 + if (ret && ret != -ERESTARTSYS) 759 + DRM_ERROR("%s returns %d (awaiting %d at %d)\n", 760 + __func__, ret, seqno, i915_get_gem_seqno(dev)); 761 + 762 + /* Directly dispatch request retiring. While we have the work queue 763 + * to handle this, the waiter on a request often wants an associated 764 + * buffer to have made it to the inactive list, and we would need 765 + * a separate wait queue to handle that. 766 + */ 767 + if (ret == 0) 768 + i915_gem_retire_requests(dev); 769 + 770 + return ret; 771 + } 772 + 773 + static void 774 + i915_gem_flush(struct drm_device *dev, 775 + uint32_t invalidate_domains, 776 + uint32_t flush_domains) 777 + { 778 + drm_i915_private_t *dev_priv = dev->dev_private; 779 + uint32_t cmd; 780 + RING_LOCALS; 781 + 782 + #if WATCH_EXEC 783 + DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, 784 + invalidate_domains, flush_domains); 785 + #endif 786 + 787 + if (flush_domains & I915_GEM_DOMAIN_CPU) 788 + drm_agp_chipset_flush(dev); 789 + 790 + if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU | 791 + I915_GEM_DOMAIN_GTT)) { 792 + /* 793 + * read/write caches: 794 + * 795 + * I915_GEM_DOMAIN_RENDER is always invalidated, but is 796 + * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is 797 + * also flushed at 2d versus 3d pipeline switches. 798 + * 799 + * read-only caches: 800 + * 801 + * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if 802 + * MI_READ_FLUSH is set, and is always flushed on 965. 803 + * 804 + * I915_GEM_DOMAIN_COMMAND may not exist? 805 + * 806 + * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is 807 + * invalidated when MI_EXE_FLUSH is set. 808 + * 809 + * I915_GEM_DOMAIN_VERTEX, which exists on 965, is 810 + * invalidated with every MI_FLUSH. 811 + * 812 + * TLBs: 813 + * 814 + * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND 815 + * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and 816 + * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER 817 + * are flushed at any MI_FLUSH. 818 + */ 819 + 820 + cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; 821 + if ((invalidate_domains|flush_domains) & 822 + I915_GEM_DOMAIN_RENDER) 823 + cmd &= ~MI_NO_WRITE_FLUSH; 824 + if (!IS_I965G(dev)) { 825 + /* 826 + * On the 965, the sampler cache always gets flushed 827 + * and this bit is reserved. 828 + */ 829 + if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) 830 + cmd |= MI_READ_FLUSH; 831 + } 832 + if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) 833 + cmd |= MI_EXE_FLUSH; 834 + 835 + #if WATCH_EXEC 836 + DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); 837 + #endif 838 + BEGIN_LP_RING(2); 839 + OUT_RING(cmd); 840 + OUT_RING(0); /* noop */ 841 + ADVANCE_LP_RING(); 842 + } 843 + } 844 + 845 + /** 846 + * Ensures that all rendering to the object has completed and the object is 847 + * safe to unbind from the GTT or access from the CPU. 848 + */ 849 + static int 850 + i915_gem_object_wait_rendering(struct drm_gem_object *obj) 851 + { 852 + struct drm_device *dev = obj->dev; 853 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 854 + int ret; 855 + 856 + /* If there are writes queued to the buffer, flush and 857 + * create a new seqno to wait for. 858 + */ 859 + if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) { 860 + uint32_t write_domain = obj->write_domain; 861 + #if WATCH_BUF 862 + DRM_INFO("%s: flushing object %p from write domain %08x\n", 863 + __func__, obj, write_domain); 864 + #endif 865 + i915_gem_flush(dev, 0, write_domain); 866 + 867 + i915_gem_object_move_to_active(obj); 868 + obj_priv->last_rendering_seqno = i915_add_request(dev, 869 + write_domain); 870 + BUG_ON(obj_priv->last_rendering_seqno == 0); 871 + #if WATCH_LRU 872 + DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj); 873 + #endif 874 + } 875 + 876 + /* If there is rendering queued on the buffer being evicted, wait for 877 + * it. 878 + */ 879 + if (obj_priv->active) { 880 + #if WATCH_BUF 881 + DRM_INFO("%s: object %p wait for seqno %08x\n", 882 + __func__, obj, obj_priv->last_rendering_seqno); 883 + #endif 884 + ret = i915_wait_request(dev, obj_priv->last_rendering_seqno); 885 + if (ret != 0) 886 + return ret; 887 + } 888 + 889 + return 0; 890 + } 891 + 892 + /** 893 + * Unbinds an object from the GTT aperture. 894 + */ 895 + static int 896 + i915_gem_object_unbind(struct drm_gem_object *obj) 897 + { 898 + struct drm_device *dev = obj->dev; 899 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 900 + int ret = 0; 901 + 902 + #if WATCH_BUF 903 + DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj); 904 + DRM_INFO("gtt_space %p\n", obj_priv->gtt_space); 905 + #endif 906 + if (obj_priv->gtt_space == NULL) 907 + return 0; 908 + 909 + if (obj_priv->pin_count != 0) { 910 + DRM_ERROR("Attempting to unbind pinned buffer\n"); 911 + return -EINVAL; 912 + } 913 + 914 + /* Wait for any rendering to complete 915 + */ 916 + ret = i915_gem_object_wait_rendering(obj); 917 + if (ret) { 918 + DRM_ERROR("wait_rendering failed: %d\n", ret); 919 + return ret; 920 + } 921 + 922 + /* Move the object to the CPU domain to ensure that 923 + * any possible CPU writes while it's not in the GTT 924 + * are flushed when we go to remap it. This will 925 + * also ensure that all pending GPU writes are finished 926 + * before we unbind. 927 + */ 928 + ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU, 929 + I915_GEM_DOMAIN_CPU); 930 + if (ret) { 931 + DRM_ERROR("set_domain failed: %d\n", ret); 932 + return ret; 933 + } 934 + 935 + if (obj_priv->agp_mem != NULL) { 936 + drm_unbind_agp(obj_priv->agp_mem); 937 + drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); 938 + obj_priv->agp_mem = NULL; 939 + } 940 + 941 + BUG_ON(obj_priv->active); 942 + 943 + i915_gem_object_free_page_list(obj); 944 + 945 + if (obj_priv->gtt_space) { 946 + atomic_dec(&dev->gtt_count); 947 + atomic_sub(obj->size, &dev->gtt_memory); 948 + 949 + drm_mm_put_block(obj_priv->gtt_space); 950 + obj_priv->gtt_space = NULL; 951 + } 952 + 953 + /* Remove ourselves from the LRU list if present. */ 954 + if (!list_empty(&obj_priv->list)) 955 + list_del_init(&obj_priv->list); 956 + 957 + return 0; 958 + } 959 + 960 + static int 961 + i915_gem_evict_something(struct drm_device *dev) 962 + { 963 + drm_i915_private_t *dev_priv = dev->dev_private; 964 + struct drm_gem_object *obj; 965 + struct drm_i915_gem_object *obj_priv; 966 + int ret = 0; 967 + 968 + for (;;) { 969 + /* If there's an inactive buffer available now, grab it 970 + * and be done. 971 + */ 972 + if (!list_empty(&dev_priv->mm.inactive_list)) { 973 + obj_priv = list_first_entry(&dev_priv->mm.inactive_list, 974 + struct drm_i915_gem_object, 975 + list); 976 + obj = obj_priv->obj; 977 + BUG_ON(obj_priv->pin_count != 0); 978 + #if WATCH_LRU 979 + DRM_INFO("%s: evicting %p\n", __func__, obj); 980 + #endif 981 + BUG_ON(obj_priv->active); 982 + 983 + /* Wait on the rendering and unbind the buffer. */ 984 + ret = i915_gem_object_unbind(obj); 985 + break; 986 + } 987 + 988 + /* If we didn't get anything, but the ring is still processing 989 + * things, wait for one of those things to finish and hopefully 990 + * leave us a buffer to evict. 991 + */ 992 + if (!list_empty(&dev_priv->mm.request_list)) { 993 + struct drm_i915_gem_request *request; 994 + 995 + request = list_first_entry(&dev_priv->mm.request_list, 996 + struct drm_i915_gem_request, 997 + list); 998 + 999 + ret = i915_wait_request(dev, request->seqno); 1000 + if (ret) 1001 + break; 1002 + 1003 + /* if waiting caused an object to become inactive, 1004 + * then loop around and wait for it. Otherwise, we 1005 + * assume that waiting freed and unbound something, 1006 + * so there should now be some space in the GTT 1007 + */ 1008 + if (!list_empty(&dev_priv->mm.inactive_list)) 1009 + continue; 1010 + break; 1011 + } 1012 + 1013 + /* If we didn't have anything on the request list but there 1014 + * are buffers awaiting a flush, emit one and try again. 1015 + * When we wait on it, those buffers waiting for that flush 1016 + * will get moved to inactive. 1017 + */ 1018 + if (!list_empty(&dev_priv->mm.flushing_list)) { 1019 + obj_priv = list_first_entry(&dev_priv->mm.flushing_list, 1020 + struct drm_i915_gem_object, 1021 + list); 1022 + obj = obj_priv->obj; 1023 + 1024 + i915_gem_flush(dev, 1025 + obj->write_domain, 1026 + obj->write_domain); 1027 + i915_add_request(dev, obj->write_domain); 1028 + 1029 + obj = NULL; 1030 + continue; 1031 + } 1032 + 1033 + DRM_ERROR("inactive empty %d request empty %d " 1034 + "flushing empty %d\n", 1035 + list_empty(&dev_priv->mm.inactive_list), 1036 + list_empty(&dev_priv->mm.request_list), 1037 + list_empty(&dev_priv->mm.flushing_list)); 1038 + /* If we didn't do any of the above, there's nothing to be done 1039 + * and we just can't fit it in. 1040 + */ 1041 + return -ENOMEM; 1042 + } 1043 + return ret; 1044 + } 1045 + 1046 + static int 1047 + i915_gem_object_get_page_list(struct drm_gem_object *obj) 1048 + { 1049 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 1050 + int page_count, i; 1051 + struct address_space *mapping; 1052 + struct inode *inode; 1053 + struct page *page; 1054 + int ret; 1055 + 1056 + if (obj_priv->page_list) 1057 + return 0; 1058 + 1059 + /* Get the list of pages out of our struct file. They'll be pinned 1060 + * at this point until we release them. 1061 + */ 1062 + page_count = obj->size / PAGE_SIZE; 1063 + BUG_ON(obj_priv->page_list != NULL); 1064 + obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *), 1065 + DRM_MEM_DRIVER); 1066 + if (obj_priv->page_list == NULL) { 1067 + DRM_ERROR("Faled to allocate page list\n"); 1068 + return -ENOMEM; 1069 + } 1070 + 1071 + inode = obj->filp->f_path.dentry->d_inode; 1072 + mapping = inode->i_mapping; 1073 + for (i = 0; i < page_count; i++) { 1074 + page = read_mapping_page(mapping, i, NULL); 1075 + if (IS_ERR(page)) { 1076 + ret = PTR_ERR(page); 1077 + DRM_ERROR("read_mapping_page failed: %d\n", ret); 1078 + i915_gem_object_free_page_list(obj); 1079 + return ret; 1080 + } 1081 + obj_priv->page_list[i] = page; 1082 + } 1083 + return 0; 1084 + } 1085 + 1086 + /** 1087 + * Finds free space in the GTT aperture and binds the object there. 1088 + */ 1089 + static int 1090 + i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) 1091 + { 1092 + struct drm_device *dev = obj->dev; 1093 + drm_i915_private_t *dev_priv = dev->dev_private; 1094 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 1095 + struct drm_mm_node *free_space; 1096 + int page_count, ret; 1097 + 1098 + if (alignment == 0) 1099 + alignment = PAGE_SIZE; 1100 + if (alignment & (PAGE_SIZE - 1)) { 1101 + DRM_ERROR("Invalid object alignment requested %u\n", alignment); 1102 + return -EINVAL; 1103 + } 1104 + 1105 + search_free: 1106 + free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, 1107 + obj->size, alignment, 0); 1108 + if (free_space != NULL) { 1109 + obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size, 1110 + alignment); 1111 + if (obj_priv->gtt_space != NULL) { 1112 + obj_priv->gtt_space->private = obj; 1113 + obj_priv->gtt_offset = obj_priv->gtt_space->start; 1114 + } 1115 + } 1116 + if (obj_priv->gtt_space == NULL) { 1117 + /* If the gtt is empty and we're still having trouble 1118 + * fitting our object in, we're out of memory. 1119 + */ 1120 + #if WATCH_LRU 1121 + DRM_INFO("%s: GTT full, evicting something\n", __func__); 1122 + #endif 1123 + if (list_empty(&dev_priv->mm.inactive_list) && 1124 + list_empty(&dev_priv->mm.flushing_list) && 1125 + list_empty(&dev_priv->mm.active_list)) { 1126 + DRM_ERROR("GTT full, but LRU list empty\n"); 1127 + return -ENOMEM; 1128 + } 1129 + 1130 + ret = i915_gem_evict_something(dev); 1131 + if (ret != 0) { 1132 + DRM_ERROR("Failed to evict a buffer %d\n", ret); 1133 + return ret; 1134 + } 1135 + goto search_free; 1136 + } 1137 + 1138 + #if WATCH_BUF 1139 + DRM_INFO("Binding object of size %d at 0x%08x\n", 1140 + obj->size, obj_priv->gtt_offset); 1141 + #endif 1142 + ret = i915_gem_object_get_page_list(obj); 1143 + if (ret) { 1144 + drm_mm_put_block(obj_priv->gtt_space); 1145 + obj_priv->gtt_space = NULL; 1146 + return ret; 1147 + } 1148 + 1149 + page_count = obj->size / PAGE_SIZE; 1150 + /* Create an AGP memory structure pointing at our pages, and bind it 1151 + * into the GTT. 1152 + */ 1153 + obj_priv->agp_mem = drm_agp_bind_pages(dev, 1154 + obj_priv->page_list, 1155 + page_count, 1156 + obj_priv->gtt_offset); 1157 + if (obj_priv->agp_mem == NULL) { 1158 + i915_gem_object_free_page_list(obj); 1159 + drm_mm_put_block(obj_priv->gtt_space); 1160 + obj_priv->gtt_space = NULL; 1161 + return -ENOMEM; 1162 + } 1163 + atomic_inc(&dev->gtt_count); 1164 + atomic_add(obj->size, &dev->gtt_memory); 1165 + 1166 + /* Assert that the object is not currently in any GPU domain. As it 1167 + * wasn't in the GTT, there shouldn't be any way it could have been in 1168 + * a GPU cache 1169 + */ 1170 + BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); 1171 + BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); 1172 + 1173 + return 0; 1174 + } 1175 + 1176 + void 1177 + i915_gem_clflush_object(struct drm_gem_object *obj) 1178 + { 1179 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 1180 + 1181 + /* If we don't have a page list set up, then we're not pinned 1182 + * to GPU, and we can ignore the cache flush because it'll happen 1183 + * again at bind time. 1184 + */ 1185 + if (obj_priv->page_list == NULL) 1186 + return; 1187 + 1188 + drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE); 1189 + } 1190 + 1191 + /* 1192 + * Set the next domain for the specified object. This 1193 + * may not actually perform the necessary flushing/invaliding though, 1194 + * as that may want to be batched with other set_domain operations 1195 + * 1196 + * This is (we hope) the only really tricky part of gem. The goal 1197 + * is fairly simple -- track which caches hold bits of the object 1198 + * and make sure they remain coherent. A few concrete examples may 1199 + * help to explain how it works. For shorthand, we use the notation 1200 + * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the 1201 + * a pair of read and write domain masks. 1202 + * 1203 + * Case 1: the batch buffer 1204 + * 1205 + * 1. Allocated 1206 + * 2. Written by CPU 1207 + * 3. Mapped to GTT 1208 + * 4. Read by GPU 1209 + * 5. Unmapped from GTT 1210 + * 6. Freed 1211 + * 1212 + * Let's take these a step at a time 1213 + * 1214 + * 1. Allocated 1215 + * Pages allocated from the kernel may still have 1216 + * cache contents, so we set them to (CPU, CPU) always. 1217 + * 2. Written by CPU (using pwrite) 1218 + * The pwrite function calls set_domain (CPU, CPU) and 1219 + * this function does nothing (as nothing changes) 1220 + * 3. Mapped by GTT 1221 + * This function asserts that the object is not 1222 + * currently in any GPU-based read or write domains 1223 + * 4. Read by GPU 1224 + * i915_gem_execbuffer calls set_domain (COMMAND, 0). 1225 + * As write_domain is zero, this function adds in the 1226 + * current read domains (CPU+COMMAND, 0). 1227 + * flush_domains is set to CPU. 1228 + * invalidate_domains is set to COMMAND 1229 + * clflush is run to get data out of the CPU caches 1230 + * then i915_dev_set_domain calls i915_gem_flush to 1231 + * emit an MI_FLUSH and drm_agp_chipset_flush 1232 + * 5. Unmapped from GTT 1233 + * i915_gem_object_unbind calls set_domain (CPU, CPU) 1234 + * flush_domains and invalidate_domains end up both zero 1235 + * so no flushing/invalidating happens 1236 + * 6. Freed 1237 + * yay, done 1238 + * 1239 + * Case 2: The shared render buffer 1240 + * 1241 + * 1. Allocated 1242 + * 2. Mapped to GTT 1243 + * 3. Read/written by GPU 1244 + * 4. set_domain to (CPU,CPU) 1245 + * 5. Read/written by CPU 1246 + * 6. Read/written by GPU 1247 + * 1248 + * 1. Allocated 1249 + * Same as last example, (CPU, CPU) 1250 + * 2. Mapped to GTT 1251 + * Nothing changes (assertions find that it is not in the GPU) 1252 + * 3. Read/written by GPU 1253 + * execbuffer calls set_domain (RENDER, RENDER) 1254 + * flush_domains gets CPU 1255 + * invalidate_domains gets GPU 1256 + * clflush (obj) 1257 + * MI_FLUSH and drm_agp_chipset_flush 1258 + * 4. set_domain (CPU, CPU) 1259 + * flush_domains gets GPU 1260 + * invalidate_domains gets CPU 1261 + * wait_rendering (obj) to make sure all drawing is complete. 1262 + * This will include an MI_FLUSH to get the data from GPU 1263 + * to memory 1264 + * clflush (obj) to invalidate the CPU cache 1265 + * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?) 1266 + * 5. Read/written by CPU 1267 + * cache lines are loaded and dirtied 1268 + * 6. Read written by GPU 1269 + * Same as last GPU access 1270 + * 1271 + * Case 3: The constant buffer 1272 + * 1273 + * 1. Allocated 1274 + * 2. Written by CPU 1275 + * 3. Read by GPU 1276 + * 4. Updated (written) by CPU again 1277 + * 5. Read by GPU 1278 + * 1279 + * 1. Allocated 1280 + * (CPU, CPU) 1281 + * 2. Written by CPU 1282 + * (CPU, CPU) 1283 + * 3. Read by GPU 1284 + * (CPU+RENDER, 0) 1285 + * flush_domains = CPU 1286 + * invalidate_domains = RENDER 1287 + * clflush (obj) 1288 + * MI_FLUSH 1289 + * drm_agp_chipset_flush 1290 + * 4. Updated (written) by CPU again 1291 + * (CPU, CPU) 1292 + * flush_domains = 0 (no previous write domain) 1293 + * invalidate_domains = 0 (no new read domains) 1294 + * 5. Read by GPU 1295 + * (CPU+RENDER, 0) 1296 + * flush_domains = CPU 1297 + * invalidate_domains = RENDER 1298 + * clflush (obj) 1299 + * MI_FLUSH 1300 + * drm_agp_chipset_flush 1301 + */ 1302 + static int 1303 + i915_gem_object_set_domain(struct drm_gem_object *obj, 1304 + uint32_t read_domains, 1305 + uint32_t write_domain) 1306 + { 1307 + struct drm_device *dev = obj->dev; 1308 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 1309 + uint32_t invalidate_domains = 0; 1310 + uint32_t flush_domains = 0; 1311 + int ret; 1312 + 1313 + #if WATCH_BUF 1314 + DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", 1315 + __func__, obj, 1316 + obj->read_domains, read_domains, 1317 + obj->write_domain, write_domain); 1318 + #endif 1319 + /* 1320 + * If the object isn't moving to a new write domain, 1321 + * let the object stay in multiple read domains 1322 + */ 1323 + if (write_domain == 0) 1324 + read_domains |= obj->read_domains; 1325 + else 1326 + obj_priv->dirty = 1; 1327 + 1328 + /* 1329 + * Flush the current write domain if 1330 + * the new read domains don't match. Invalidate 1331 + * any read domains which differ from the old 1332 + * write domain 1333 + */ 1334 + if (obj->write_domain && obj->write_domain != read_domains) { 1335 + flush_domains |= obj->write_domain; 1336 + invalidate_domains |= read_domains & ~obj->write_domain; 1337 + } 1338 + /* 1339 + * Invalidate any read caches which may have 1340 + * stale data. That is, any new read domains. 1341 + */ 1342 + invalidate_domains |= read_domains & ~obj->read_domains; 1343 + if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) { 1344 + #if WATCH_BUF 1345 + DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", 1346 + __func__, flush_domains, invalidate_domains); 1347 + #endif 1348 + /* 1349 + * If we're invaliding the CPU cache and flushing a GPU cache, 1350 + * then pause for rendering so that the GPU caches will be 1351 + * flushed before the cpu cache is invalidated 1352 + */ 1353 + if ((invalidate_domains & I915_GEM_DOMAIN_CPU) && 1354 + (flush_domains & ~(I915_GEM_DOMAIN_CPU | 1355 + I915_GEM_DOMAIN_GTT))) { 1356 + ret = i915_gem_object_wait_rendering(obj); 1357 + if (ret) 1358 + return ret; 1359 + } 1360 + i915_gem_clflush_object(obj); 1361 + } 1362 + 1363 + if ((write_domain | flush_domains) != 0) 1364 + obj->write_domain = write_domain; 1365 + 1366 + /* If we're invalidating the CPU domain, clear the per-page CPU 1367 + * domain list as well. 1368 + */ 1369 + if (obj_priv->page_cpu_valid != NULL && 1370 + (write_domain != 0 || 1371 + read_domains & I915_GEM_DOMAIN_CPU)) { 1372 + drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE, 1373 + DRM_MEM_DRIVER); 1374 + obj_priv->page_cpu_valid = NULL; 1375 + } 1376 + obj->read_domains = read_domains; 1377 + 1378 + dev->invalidate_domains |= invalidate_domains; 1379 + dev->flush_domains |= flush_domains; 1380 + #if WATCH_BUF 1381 + DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n", 1382 + __func__, 1383 + obj->read_domains, obj->write_domain, 1384 + dev->invalidate_domains, dev->flush_domains); 1385 + #endif 1386 + return 0; 1387 + } 1388 + 1389 + /** 1390 + * Set the read/write domain on a range of the object. 1391 + * 1392 + * Currently only implemented for CPU reads, otherwise drops to normal 1393 + * i915_gem_object_set_domain(). 1394 + */ 1395 + static int 1396 + i915_gem_object_set_domain_range(struct drm_gem_object *obj, 1397 + uint64_t offset, 1398 + uint64_t size, 1399 + uint32_t read_domains, 1400 + uint32_t write_domain) 1401 + { 1402 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 1403 + int ret, i; 1404 + 1405 + if (obj->read_domains & I915_GEM_DOMAIN_CPU) 1406 + return 0; 1407 + 1408 + if (read_domains != I915_GEM_DOMAIN_CPU || 1409 + write_domain != 0) 1410 + return i915_gem_object_set_domain(obj, 1411 + read_domains, write_domain); 1412 + 1413 + /* Wait on any GPU rendering to the object to be flushed. */ 1414 + if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) { 1415 + ret = i915_gem_object_wait_rendering(obj); 1416 + if (ret) 1417 + return ret; 1418 + } 1419 + 1420 + if (obj_priv->page_cpu_valid == NULL) { 1421 + obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE, 1422 + DRM_MEM_DRIVER); 1423 + } 1424 + 1425 + /* Flush the cache on any pages that are still invalid from the CPU's 1426 + * perspective. 1427 + */ 1428 + for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) { 1429 + if (obj_priv->page_cpu_valid[i]) 1430 + continue; 1431 + 1432 + drm_clflush_pages(obj_priv->page_list + i, 1); 1433 + 1434 + obj_priv->page_cpu_valid[i] = 1; 1435 + } 1436 + 1437 + return 0; 1438 + } 1439 + 1440 + /** 1441 + * Once all of the objects have been set in the proper domain, 1442 + * perform the necessary flush and invalidate operations. 1443 + * 1444 + * Returns the write domains flushed, for use in flush tracking. 1445 + */ 1446 + static uint32_t 1447 + i915_gem_dev_set_domain(struct drm_device *dev) 1448 + { 1449 + uint32_t flush_domains = dev->flush_domains; 1450 + 1451 + /* 1452 + * Now that all the buffers are synced to the proper domains, 1453 + * flush and invalidate the collected domains 1454 + */ 1455 + if (dev->invalidate_domains | dev->flush_domains) { 1456 + #if WATCH_EXEC 1457 + DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", 1458 + __func__, 1459 + dev->invalidate_domains, 1460 + dev->flush_domains); 1461 + #endif 1462 + i915_gem_flush(dev, 1463 + dev->invalidate_domains, 1464 + dev->flush_domains); 1465 + dev->invalidate_domains = 0; 1466 + dev->flush_domains = 0; 1467 + } 1468 + 1469 + return flush_domains; 1470 + } 1471 + 1472 + /** 1473 + * Pin an object to the GTT and evaluate the relocations landing in it. 1474 + */ 1475 + static int 1476 + i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, 1477 + struct drm_file *file_priv, 1478 + struct drm_i915_gem_exec_object *entry) 1479 + { 1480 + struct drm_device *dev = obj->dev; 1481 + struct drm_i915_gem_relocation_entry reloc; 1482 + struct drm_i915_gem_relocation_entry __user *relocs; 1483 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 1484 + int i, ret; 1485 + uint32_t last_reloc_offset = -1; 1486 + void *reloc_page = NULL; 1487 + 1488 + /* Choose the GTT offset for our buffer and put it there. */ 1489 + ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); 1490 + if (ret) 1491 + return ret; 1492 + 1493 + entry->offset = obj_priv->gtt_offset; 1494 + 1495 + relocs = (struct drm_i915_gem_relocation_entry __user *) 1496 + (uintptr_t) entry->relocs_ptr; 1497 + /* Apply the relocations, using the GTT aperture to avoid cache 1498 + * flushing requirements. 1499 + */ 1500 + for (i = 0; i < entry->relocation_count; i++) { 1501 + struct drm_gem_object *target_obj; 1502 + struct drm_i915_gem_object *target_obj_priv; 1503 + uint32_t reloc_val, reloc_offset, *reloc_entry; 1504 + int ret; 1505 + 1506 + ret = copy_from_user(&reloc, relocs + i, sizeof(reloc)); 1507 + if (ret != 0) { 1508 + i915_gem_object_unpin(obj); 1509 + return ret; 1510 + } 1511 + 1512 + target_obj = drm_gem_object_lookup(obj->dev, file_priv, 1513 + reloc.target_handle); 1514 + if (target_obj == NULL) { 1515 + i915_gem_object_unpin(obj); 1516 + return -EBADF; 1517 + } 1518 + target_obj_priv = target_obj->driver_private; 1519 + 1520 + /* The target buffer should have appeared before us in the 1521 + * exec_object list, so it should have a GTT space bound by now. 1522 + */ 1523 + if (target_obj_priv->gtt_space == NULL) { 1524 + DRM_ERROR("No GTT space found for object %d\n", 1525 + reloc.target_handle); 1526 + drm_gem_object_unreference(target_obj); 1527 + i915_gem_object_unpin(obj); 1528 + return -EINVAL; 1529 + } 1530 + 1531 + if (reloc.offset > obj->size - 4) { 1532 + DRM_ERROR("Relocation beyond object bounds: " 1533 + "obj %p target %d offset %d size %d.\n", 1534 + obj, reloc.target_handle, 1535 + (int) reloc.offset, (int) obj->size); 1536 + drm_gem_object_unreference(target_obj); 1537 + i915_gem_object_unpin(obj); 1538 + return -EINVAL; 1539 + } 1540 + if (reloc.offset & 3) { 1541 + DRM_ERROR("Relocation not 4-byte aligned: " 1542 + "obj %p target %d offset %d.\n", 1543 + obj, reloc.target_handle, 1544 + (int) reloc.offset); 1545 + drm_gem_object_unreference(target_obj); 1546 + i915_gem_object_unpin(obj); 1547 + return -EINVAL; 1548 + } 1549 + 1550 + if (reloc.write_domain && target_obj->pending_write_domain && 1551 + reloc.write_domain != target_obj->pending_write_domain) { 1552 + DRM_ERROR("Write domain conflict: " 1553 + "obj %p target %d offset %d " 1554 + "new %08x old %08x\n", 1555 + obj, reloc.target_handle, 1556 + (int) reloc.offset, 1557 + reloc.write_domain, 1558 + target_obj->pending_write_domain); 1559 + drm_gem_object_unreference(target_obj); 1560 + i915_gem_object_unpin(obj); 1561 + return -EINVAL; 1562 + } 1563 + 1564 + #if WATCH_RELOC 1565 + DRM_INFO("%s: obj %p offset %08x target %d " 1566 + "read %08x write %08x gtt %08x " 1567 + "presumed %08x delta %08x\n", 1568 + __func__, 1569 + obj, 1570 + (int) reloc.offset, 1571 + (int) reloc.target_handle, 1572 + (int) reloc.read_domains, 1573 + (int) reloc.write_domain, 1574 + (int) target_obj_priv->gtt_offset, 1575 + (int) reloc.presumed_offset, 1576 + reloc.delta); 1577 + #endif 1578 + 1579 + target_obj->pending_read_domains |= reloc.read_domains; 1580 + target_obj->pending_write_domain |= reloc.write_domain; 1581 + 1582 + /* If the relocation already has the right value in it, no 1583 + * more work needs to be done. 1584 + */ 1585 + if (target_obj_priv->gtt_offset == reloc.presumed_offset) { 1586 + drm_gem_object_unreference(target_obj); 1587 + continue; 1588 + } 1589 + 1590 + /* Now that we're going to actually write some data in, 1591 + * make sure that any rendering using this buffer's contents 1592 + * is completed. 1593 + */ 1594 + i915_gem_object_wait_rendering(obj); 1595 + 1596 + /* As we're writing through the gtt, flush 1597 + * any CPU writes before we write the relocations 1598 + */ 1599 + if (obj->write_domain & I915_GEM_DOMAIN_CPU) { 1600 + i915_gem_clflush_object(obj); 1601 + drm_agp_chipset_flush(dev); 1602 + obj->write_domain = 0; 1603 + } 1604 + 1605 + /* Map the page containing the relocation we're going to 1606 + * perform. 1607 + */ 1608 + reloc_offset = obj_priv->gtt_offset + reloc.offset; 1609 + if (reloc_page == NULL || 1610 + (last_reloc_offset & ~(PAGE_SIZE - 1)) != 1611 + (reloc_offset & ~(PAGE_SIZE - 1))) { 1612 + if (reloc_page != NULL) 1613 + iounmap(reloc_page); 1614 + 1615 + reloc_page = ioremap(dev->agp->base + 1616 + (reloc_offset & ~(PAGE_SIZE - 1)), 1617 + PAGE_SIZE); 1618 + last_reloc_offset = reloc_offset; 1619 + if (reloc_page == NULL) { 1620 + drm_gem_object_unreference(target_obj); 1621 + i915_gem_object_unpin(obj); 1622 + return -ENOMEM; 1623 + } 1624 + } 1625 + 1626 + reloc_entry = (uint32_t *)((char *)reloc_page + 1627 + (reloc_offset & (PAGE_SIZE - 1))); 1628 + reloc_val = target_obj_priv->gtt_offset + reloc.delta; 1629 + 1630 + #if WATCH_BUF 1631 + DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n", 1632 + obj, (unsigned int) reloc.offset, 1633 + readl(reloc_entry), reloc_val); 1634 + #endif 1635 + writel(reloc_val, reloc_entry); 1636 + 1637 + /* Write the updated presumed offset for this entry back out 1638 + * to the user. 1639 + */ 1640 + reloc.presumed_offset = target_obj_priv->gtt_offset; 1641 + ret = copy_to_user(relocs + i, &reloc, sizeof(reloc)); 1642 + if (ret != 0) { 1643 + drm_gem_object_unreference(target_obj); 1644 + i915_gem_object_unpin(obj); 1645 + return ret; 1646 + } 1647 + 1648 + drm_gem_object_unreference(target_obj); 1649 + } 1650 + 1651 + if (reloc_page != NULL) 1652 + iounmap(reloc_page); 1653 + 1654 + #if WATCH_BUF 1655 + if (0) 1656 + i915_gem_dump_object(obj, 128, __func__, ~0); 1657 + #endif 1658 + return 0; 1659 + } 1660 + 1661 + /** Dispatch a batchbuffer to the ring 1662 + */ 1663 + static int 1664 + i915_dispatch_gem_execbuffer(struct drm_device *dev, 1665 + struct drm_i915_gem_execbuffer *exec, 1666 + uint64_t exec_offset) 1667 + { 1668 + drm_i915_private_t *dev_priv = dev->dev_private; 1669 + struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *) 1670 + (uintptr_t) exec->cliprects_ptr; 1671 + int nbox = exec->num_cliprects; 1672 + int i = 0, count; 1673 + uint32_t exec_start, exec_len; 1674 + RING_LOCALS; 1675 + 1676 + exec_start = (uint32_t) exec_offset + exec->batch_start_offset; 1677 + exec_len = (uint32_t) exec->batch_len; 1678 + 1679 + if ((exec_start | exec_len) & 0x7) { 1680 + DRM_ERROR("alignment\n"); 1681 + return -EINVAL; 1682 + } 1683 + 1684 + if (!exec_start) 1685 + return -EINVAL; 1686 + 1687 + count = nbox ? nbox : 1; 1688 + 1689 + for (i = 0; i < count; i++) { 1690 + if (i < nbox) { 1691 + int ret = i915_emit_box(dev, boxes, i, 1692 + exec->DR1, exec->DR4); 1693 + if (ret) 1694 + return ret; 1695 + } 1696 + 1697 + if (IS_I830(dev) || IS_845G(dev)) { 1698 + BEGIN_LP_RING(4); 1699 + OUT_RING(MI_BATCH_BUFFER); 1700 + OUT_RING(exec_start | MI_BATCH_NON_SECURE); 1701 + OUT_RING(exec_start + exec_len - 4); 1702 + OUT_RING(0); 1703 + ADVANCE_LP_RING(); 1704 + } else { 1705 + BEGIN_LP_RING(2); 1706 + if (IS_I965G(dev)) { 1707 + OUT_RING(MI_BATCH_BUFFER_START | 1708 + (2 << 6) | 1709 + MI_BATCH_NON_SECURE_I965); 1710 + OUT_RING(exec_start); 1711 + } else { 1712 + OUT_RING(MI_BATCH_BUFFER_START | 1713 + (2 << 6)); 1714 + OUT_RING(exec_start | MI_BATCH_NON_SECURE); 1715 + } 1716 + ADVANCE_LP_RING(); 1717 + } 1718 + } 1719 + 1720 + /* XXX breadcrumb */ 1721 + return 0; 1722 + } 1723 + 1724 + /* Throttle our rendering by waiting until the ring has completed our requests 1725 + * emitted over 20 msec ago. 1726 + * 1727 + * This should get us reasonable parallelism between CPU and GPU but also 1728 + * relatively low latency when blocking on a particular request to finish. 1729 + */ 1730 + static int 1731 + i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv) 1732 + { 1733 + struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; 1734 + int ret = 0; 1735 + uint32_t seqno; 1736 + 1737 + mutex_lock(&dev->struct_mutex); 1738 + seqno = i915_file_priv->mm.last_gem_throttle_seqno; 1739 + i915_file_priv->mm.last_gem_throttle_seqno = 1740 + i915_file_priv->mm.last_gem_seqno; 1741 + if (seqno) 1742 + ret = i915_wait_request(dev, seqno); 1743 + mutex_unlock(&dev->struct_mutex); 1744 + return ret; 1745 + } 1746 + 1747 + int 1748 + i915_gem_execbuffer(struct drm_device *dev, void *data, 1749 + struct drm_file *file_priv) 1750 + { 1751 + drm_i915_private_t *dev_priv = dev->dev_private; 1752 + struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; 1753 + struct drm_i915_gem_execbuffer *args = data; 1754 + struct drm_i915_gem_exec_object *exec_list = NULL; 1755 + struct drm_gem_object **object_list = NULL; 1756 + struct drm_gem_object *batch_obj; 1757 + int ret, i, pinned = 0; 1758 + uint64_t exec_offset; 1759 + uint32_t seqno, flush_domains; 1760 + 1761 + #if WATCH_EXEC 1762 + DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", 1763 + (int) args->buffers_ptr, args->buffer_count, args->batch_len); 1764 + #endif 1765 + 1766 + /* Copy in the exec list from userland */ 1767 + exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count, 1768 + DRM_MEM_DRIVER); 1769 + object_list = drm_calloc(sizeof(*object_list), args->buffer_count, 1770 + DRM_MEM_DRIVER); 1771 + if (exec_list == NULL || object_list == NULL) { 1772 + DRM_ERROR("Failed to allocate exec or object list " 1773 + "for %d buffers\n", 1774 + args->buffer_count); 1775 + ret = -ENOMEM; 1776 + goto pre_mutex_err; 1777 + } 1778 + ret = copy_from_user(exec_list, 1779 + (struct drm_i915_relocation_entry __user *) 1780 + (uintptr_t) args->buffers_ptr, 1781 + sizeof(*exec_list) * args->buffer_count); 1782 + if (ret != 0) { 1783 + DRM_ERROR("copy %d exec entries failed %d\n", 1784 + args->buffer_count, ret); 1785 + goto pre_mutex_err; 1786 + } 1787 + 1788 + mutex_lock(&dev->struct_mutex); 1789 + 1790 + i915_verify_inactive(dev, __FILE__, __LINE__); 1791 + 1792 + if (dev_priv->mm.wedged) { 1793 + DRM_ERROR("Execbuf while wedged\n"); 1794 + mutex_unlock(&dev->struct_mutex); 1795 + return -EIO; 1796 + } 1797 + 1798 + if (dev_priv->mm.suspended) { 1799 + DRM_ERROR("Execbuf while VT-switched.\n"); 1800 + mutex_unlock(&dev->struct_mutex); 1801 + return -EBUSY; 1802 + } 1803 + 1804 + /* Zero the gloabl flush/invalidate flags. These 1805 + * will be modified as each object is bound to the 1806 + * gtt 1807 + */ 1808 + dev->invalidate_domains = 0; 1809 + dev->flush_domains = 0; 1810 + 1811 + /* Look up object handles and perform the relocations */ 1812 + for (i = 0; i < args->buffer_count; i++) { 1813 + object_list[i] = drm_gem_object_lookup(dev, file_priv, 1814 + exec_list[i].handle); 1815 + if (object_list[i] == NULL) { 1816 + DRM_ERROR("Invalid object handle %d at index %d\n", 1817 + exec_list[i].handle, i); 1818 + ret = -EBADF; 1819 + goto err; 1820 + } 1821 + 1822 + object_list[i]->pending_read_domains = 0; 1823 + object_list[i]->pending_write_domain = 0; 1824 + ret = i915_gem_object_pin_and_relocate(object_list[i], 1825 + file_priv, 1826 + &exec_list[i]); 1827 + if (ret) { 1828 + DRM_ERROR("object bind and relocate failed %d\n", ret); 1829 + goto err; 1830 + } 1831 + pinned = i + 1; 1832 + } 1833 + 1834 + /* Set the pending read domains for the batch buffer to COMMAND */ 1835 + batch_obj = object_list[args->buffer_count-1]; 1836 + batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND; 1837 + batch_obj->pending_write_domain = 0; 1838 + 1839 + i915_verify_inactive(dev, __FILE__, __LINE__); 1840 + 1841 + for (i = 0; i < args->buffer_count; i++) { 1842 + struct drm_gem_object *obj = object_list[i]; 1843 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 1844 + 1845 + if (obj_priv->gtt_space == NULL) { 1846 + /* We evicted the buffer in the process of validating 1847 + * our set of buffers in. We could try to recover by 1848 + * kicking them everything out and trying again from 1849 + * the start. 1850 + */ 1851 + ret = -ENOMEM; 1852 + goto err; 1853 + } 1854 + 1855 + /* make sure all previous memory operations have passed */ 1856 + ret = i915_gem_object_set_domain(obj, 1857 + obj->pending_read_domains, 1858 + obj->pending_write_domain); 1859 + if (ret) 1860 + goto err; 1861 + } 1862 + 1863 + i915_verify_inactive(dev, __FILE__, __LINE__); 1864 + 1865 + /* Flush/invalidate caches and chipset buffer */ 1866 + flush_domains = i915_gem_dev_set_domain(dev); 1867 + 1868 + i915_verify_inactive(dev, __FILE__, __LINE__); 1869 + 1870 + #if WATCH_COHERENCY 1871 + for (i = 0; i < args->buffer_count; i++) { 1872 + i915_gem_object_check_coherency(object_list[i], 1873 + exec_list[i].handle); 1874 + } 1875 + #endif 1876 + 1877 + exec_offset = exec_list[args->buffer_count - 1].offset; 1878 + 1879 + #if WATCH_EXEC 1880 + i915_gem_dump_object(object_list[args->buffer_count - 1], 1881 + args->batch_len, 1882 + __func__, 1883 + ~0); 1884 + #endif 1885 + 1886 + (void)i915_add_request(dev, flush_domains); 1887 + 1888 + /* Exec the batchbuffer */ 1889 + ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset); 1890 + if (ret) { 1891 + DRM_ERROR("dispatch failed %d\n", ret); 1892 + goto err; 1893 + } 1894 + 1895 + /* 1896 + * Ensure that the commands in the batch buffer are 1897 + * finished before the interrupt fires 1898 + */ 1899 + flush_domains = i915_retire_commands(dev); 1900 + 1901 + i915_verify_inactive(dev, __FILE__, __LINE__); 1902 + 1903 + /* 1904 + * Get a seqno representing the execution of the current buffer, 1905 + * which we can wait on. We would like to mitigate these interrupts, 1906 + * likely by only creating seqnos occasionally (so that we have 1907 + * *some* interrupts representing completion of buffers that we can 1908 + * wait on when trying to clear up gtt space). 1909 + */ 1910 + seqno = i915_add_request(dev, flush_domains); 1911 + BUG_ON(seqno == 0); 1912 + i915_file_priv->mm.last_gem_seqno = seqno; 1913 + for (i = 0; i < args->buffer_count; i++) { 1914 + struct drm_gem_object *obj = object_list[i]; 1915 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 1916 + 1917 + i915_gem_object_move_to_active(obj); 1918 + obj_priv->last_rendering_seqno = seqno; 1919 + #if WATCH_LRU 1920 + DRM_INFO("%s: move to exec list %p\n", __func__, obj); 1921 + #endif 1922 + } 1923 + #if WATCH_LRU 1924 + i915_dump_lru(dev, __func__); 1925 + #endif 1926 + 1927 + i915_verify_inactive(dev, __FILE__, __LINE__); 1928 + 1929 + /* Copy the new buffer offsets back to the user's exec list. */ 1930 + ret = copy_to_user((struct drm_i915_relocation_entry __user *) 1931 + (uintptr_t) args->buffers_ptr, 1932 + exec_list, 1933 + sizeof(*exec_list) * args->buffer_count); 1934 + if (ret) 1935 + DRM_ERROR("failed to copy %d exec entries " 1936 + "back to user (%d)\n", 1937 + args->buffer_count, ret); 1938 + err: 1939 + if (object_list != NULL) { 1940 + for (i = 0; i < pinned; i++) 1941 + i915_gem_object_unpin(object_list[i]); 1942 + 1943 + for (i = 0; i < args->buffer_count; i++) 1944 + drm_gem_object_unreference(object_list[i]); 1945 + } 1946 + mutex_unlock(&dev->struct_mutex); 1947 + 1948 + pre_mutex_err: 1949 + drm_free(object_list, sizeof(*object_list) * args->buffer_count, 1950 + DRM_MEM_DRIVER); 1951 + drm_free(exec_list, sizeof(*exec_list) * args->buffer_count, 1952 + DRM_MEM_DRIVER); 1953 + 1954 + return ret; 1955 + } 1956 + 1957 + int 1958 + i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) 1959 + { 1960 + struct drm_device *dev = obj->dev; 1961 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 1962 + int ret; 1963 + 1964 + i915_verify_inactive(dev, __FILE__, __LINE__); 1965 + if (obj_priv->gtt_space == NULL) { 1966 + ret = i915_gem_object_bind_to_gtt(obj, alignment); 1967 + if (ret != 0) { 1968 + DRM_ERROR("Failure to bind: %d", ret); 1969 + return ret; 1970 + } 1971 + } 1972 + obj_priv->pin_count++; 1973 + 1974 + /* If the object is not active and not pending a flush, 1975 + * remove it from the inactive list 1976 + */ 1977 + if (obj_priv->pin_count == 1) { 1978 + atomic_inc(&dev->pin_count); 1979 + atomic_add(obj->size, &dev->pin_memory); 1980 + if (!obj_priv->active && 1981 + (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | 1982 + I915_GEM_DOMAIN_GTT)) == 0 && 1983 + !list_empty(&obj_priv->list)) 1984 + list_del_init(&obj_priv->list); 1985 + } 1986 + i915_verify_inactive(dev, __FILE__, __LINE__); 1987 + 1988 + return 0; 1989 + } 1990 + 1991 + void 1992 + i915_gem_object_unpin(struct drm_gem_object *obj) 1993 + { 1994 + struct drm_device *dev = obj->dev; 1995 + drm_i915_private_t *dev_priv = dev->dev_private; 1996 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 1997 + 1998 + i915_verify_inactive(dev, __FILE__, __LINE__); 1999 + obj_priv->pin_count--; 2000 + BUG_ON(obj_priv->pin_count < 0); 2001 + BUG_ON(obj_priv->gtt_space == NULL); 2002 + 2003 + /* If the object is no longer pinned, and is 2004 + * neither active nor being flushed, then stick it on 2005 + * the inactive list 2006 + */ 2007 + if (obj_priv->pin_count == 0) { 2008 + if (!obj_priv->active && 2009 + (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | 2010 + I915_GEM_DOMAIN_GTT)) == 0) 2011 + list_move_tail(&obj_priv->list, 2012 + &dev_priv->mm.inactive_list); 2013 + atomic_dec(&dev->pin_count); 2014 + atomic_sub(obj->size, &dev->pin_memory); 2015 + } 2016 + i915_verify_inactive(dev, __FILE__, __LINE__); 2017 + } 2018 + 2019 + int 2020 + i915_gem_pin_ioctl(struct drm_device *dev, void *data, 2021 + struct drm_file *file_priv) 2022 + { 2023 + struct drm_i915_gem_pin *args = data; 2024 + struct drm_gem_object *obj; 2025 + struct drm_i915_gem_object *obj_priv; 2026 + int ret; 2027 + 2028 + mutex_lock(&dev->struct_mutex); 2029 + 2030 + obj = drm_gem_object_lookup(dev, file_priv, args->handle); 2031 + if (obj == NULL) { 2032 + DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n", 2033 + args->handle); 2034 + mutex_unlock(&dev->struct_mutex); 2035 + return -EBADF; 2036 + } 2037 + obj_priv = obj->driver_private; 2038 + 2039 + ret = i915_gem_object_pin(obj, args->alignment); 2040 + if (ret != 0) { 2041 + drm_gem_object_unreference(obj); 2042 + mutex_unlock(&dev->struct_mutex); 2043 + return ret; 2044 + } 2045 + 2046 + /* XXX - flush the CPU caches for pinned objects 2047 + * as the X server doesn't manage domains yet 2048 + */ 2049 + if (obj->write_domain & I915_GEM_DOMAIN_CPU) { 2050 + i915_gem_clflush_object(obj); 2051 + drm_agp_chipset_flush(dev); 2052 + obj->write_domain = 0; 2053 + } 2054 + args->offset = obj_priv->gtt_offset; 2055 + drm_gem_object_unreference(obj); 2056 + mutex_unlock(&dev->struct_mutex); 2057 + 2058 + return 0; 2059 + } 2060 + 2061 + int 2062 + i915_gem_unpin_ioctl(struct drm_device *dev, void *data, 2063 + struct drm_file *file_priv) 2064 + { 2065 + struct drm_i915_gem_pin *args = data; 2066 + struct drm_gem_object *obj; 2067 + 2068 + mutex_lock(&dev->struct_mutex); 2069 + 2070 + obj = drm_gem_object_lookup(dev, file_priv, args->handle); 2071 + if (obj == NULL) { 2072 + DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n", 2073 + args->handle); 2074 + mutex_unlock(&dev->struct_mutex); 2075 + return -EBADF; 2076 + } 2077 + 2078 + i915_gem_object_unpin(obj); 2079 + 2080 + drm_gem_object_unreference(obj); 2081 + mutex_unlock(&dev->struct_mutex); 2082 + return 0; 2083 + } 2084 + 2085 + int 2086 + i915_gem_busy_ioctl(struct drm_device *dev, void *data, 2087 + struct drm_file *file_priv) 2088 + { 2089 + struct drm_i915_gem_busy *args = data; 2090 + struct drm_gem_object *obj; 2091 + struct drm_i915_gem_object *obj_priv; 2092 + 2093 + mutex_lock(&dev->struct_mutex); 2094 + obj = drm_gem_object_lookup(dev, file_priv, args->handle); 2095 + if (obj == NULL) { 2096 + DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n", 2097 + args->handle); 2098 + mutex_unlock(&dev->struct_mutex); 2099 + return -EBADF; 2100 + } 2101 + 2102 + obj_priv = obj->driver_private; 2103 + args->busy = obj_priv->active; 2104 + 2105 + drm_gem_object_unreference(obj); 2106 + mutex_unlock(&dev->struct_mutex); 2107 + return 0; 2108 + } 2109 + 2110 + int 2111 + i915_gem_throttle_ioctl(struct drm_device *dev, void *data, 2112 + struct drm_file *file_priv) 2113 + { 2114 + return i915_gem_ring_throttle(dev, file_priv); 2115 + } 2116 + 2117 + int i915_gem_init_object(struct drm_gem_object *obj) 2118 + { 2119 + struct drm_i915_gem_object *obj_priv; 2120 + 2121 + obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER); 2122 + if (obj_priv == NULL) 2123 + return -ENOMEM; 2124 + 2125 + /* 2126 + * We've just allocated pages from the kernel, 2127 + * so they've just been written by the CPU with 2128 + * zeros. They'll need to be clflushed before we 2129 + * use them with the GPU. 2130 + */ 2131 + obj->write_domain = I915_GEM_DOMAIN_CPU; 2132 + obj->read_domains = I915_GEM_DOMAIN_CPU; 2133 + 2134 + obj->driver_private = obj_priv; 2135 + obj_priv->obj = obj; 2136 + INIT_LIST_HEAD(&obj_priv->list); 2137 + return 0; 2138 + } 2139 + 2140 + void i915_gem_free_object(struct drm_gem_object *obj) 2141 + { 2142 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 2143 + 2144 + while (obj_priv->pin_count > 0) 2145 + i915_gem_object_unpin(obj); 2146 + 2147 + i915_gem_object_unbind(obj); 2148 + 2149 + drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); 2150 + drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); 2151 + } 2152 + 2153 + static int 2154 + i915_gem_set_domain(struct drm_gem_object *obj, 2155 + struct drm_file *file_priv, 2156 + uint32_t read_domains, 2157 + uint32_t write_domain) 2158 + { 2159 + struct drm_device *dev = obj->dev; 2160 + int ret; 2161 + uint32_t flush_domains; 2162 + 2163 + BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 2164 + 2165 + ret = i915_gem_object_set_domain(obj, read_domains, write_domain); 2166 + if (ret) 2167 + return ret; 2168 + flush_domains = i915_gem_dev_set_domain(obj->dev); 2169 + 2170 + if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) 2171 + (void) i915_add_request(dev, flush_domains); 2172 + 2173 + return 0; 2174 + } 2175 + 2176 + /** Unbinds all objects that are on the given buffer list. */ 2177 + static int 2178 + i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head) 2179 + { 2180 + struct drm_gem_object *obj; 2181 + struct drm_i915_gem_object *obj_priv; 2182 + int ret; 2183 + 2184 + while (!list_empty(head)) { 2185 + obj_priv = list_first_entry(head, 2186 + struct drm_i915_gem_object, 2187 + list); 2188 + obj = obj_priv->obj; 2189 + 2190 + if (obj_priv->pin_count != 0) { 2191 + DRM_ERROR("Pinned object in unbind list\n"); 2192 + mutex_unlock(&dev->struct_mutex); 2193 + return -EINVAL; 2194 + } 2195 + 2196 + ret = i915_gem_object_unbind(obj); 2197 + if (ret != 0) { 2198 + DRM_ERROR("Error unbinding object in LeaveVT: %d\n", 2199 + ret); 2200 + mutex_unlock(&dev->struct_mutex); 2201 + return ret; 2202 + } 2203 + } 2204 + 2205 + 2206 + return 0; 2207 + } 2208 + 2209 + static int 2210 + i915_gem_idle(struct drm_device *dev) 2211 + { 2212 + drm_i915_private_t *dev_priv = dev->dev_private; 2213 + uint32_t seqno, cur_seqno, last_seqno; 2214 + int stuck, ret; 2215 + 2216 + if (dev_priv->mm.suspended) 2217 + return 0; 2218 + 2219 + /* Hack! Don't let anybody do execbuf while we don't control the chip. 2220 + * We need to replace this with a semaphore, or something. 2221 + */ 2222 + dev_priv->mm.suspended = 1; 2223 + 2224 + i915_kernel_lost_context(dev); 2225 + 2226 + /* Flush the GPU along with all non-CPU write domains 2227 + */ 2228 + i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT), 2229 + ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); 2230 + seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU | 2231 + I915_GEM_DOMAIN_GTT)); 2232 + 2233 + if (seqno == 0) { 2234 + mutex_unlock(&dev->struct_mutex); 2235 + return -ENOMEM; 2236 + } 2237 + 2238 + dev_priv->mm.waiting_gem_seqno = seqno; 2239 + last_seqno = 0; 2240 + stuck = 0; 2241 + for (;;) { 2242 + cur_seqno = i915_get_gem_seqno(dev); 2243 + if (i915_seqno_passed(cur_seqno, seqno)) 2244 + break; 2245 + if (last_seqno == cur_seqno) { 2246 + if (stuck++ > 100) { 2247 + DRM_ERROR("hardware wedged\n"); 2248 + dev_priv->mm.wedged = 1; 2249 + DRM_WAKEUP(&dev_priv->irq_queue); 2250 + break; 2251 + } 2252 + } 2253 + msleep(10); 2254 + last_seqno = cur_seqno; 2255 + } 2256 + dev_priv->mm.waiting_gem_seqno = 0; 2257 + 2258 + i915_gem_retire_requests(dev); 2259 + 2260 + /* Active and flushing should now be empty as we've 2261 + * waited for a sequence higher than any pending execbuffer 2262 + */ 2263 + BUG_ON(!list_empty(&dev_priv->mm.active_list)); 2264 + BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); 2265 + 2266 + /* Request should now be empty as we've also waited 2267 + * for the last request in the list 2268 + */ 2269 + BUG_ON(!list_empty(&dev_priv->mm.request_list)); 2270 + 2271 + /* Move all buffers out of the GTT. */ 2272 + ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list); 2273 + if (ret) 2274 + return ret; 2275 + 2276 + BUG_ON(!list_empty(&dev_priv->mm.active_list)); 2277 + BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); 2278 + BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); 2279 + BUG_ON(!list_empty(&dev_priv->mm.request_list)); 2280 + return 0; 2281 + } 2282 + 2283 + static int 2284 + i915_gem_init_hws(struct drm_device *dev) 2285 + { 2286 + drm_i915_private_t *dev_priv = dev->dev_private; 2287 + struct drm_gem_object *obj; 2288 + struct drm_i915_gem_object *obj_priv; 2289 + int ret; 2290 + 2291 + /* If we need a physical address for the status page, it's already 2292 + * initialized at driver load time. 2293 + */ 2294 + if (!I915_NEED_GFX_HWS(dev)) 2295 + return 0; 2296 + 2297 + obj = drm_gem_object_alloc(dev, 4096); 2298 + if (obj == NULL) { 2299 + DRM_ERROR("Failed to allocate status page\n"); 2300 + return -ENOMEM; 2301 + } 2302 + obj_priv = obj->driver_private; 2303 + 2304 + ret = i915_gem_object_pin(obj, 4096); 2305 + if (ret != 0) { 2306 + drm_gem_object_unreference(obj); 2307 + return ret; 2308 + } 2309 + 2310 + dev_priv->status_gfx_addr = obj_priv->gtt_offset; 2311 + dev_priv->hws_map.offset = dev->agp->base + obj_priv->gtt_offset; 2312 + dev_priv->hws_map.size = 4096; 2313 + dev_priv->hws_map.type = 0; 2314 + dev_priv->hws_map.flags = 0; 2315 + dev_priv->hws_map.mtrr = 0; 2316 + 2317 + drm_core_ioremap(&dev_priv->hws_map, dev); 2318 + if (dev_priv->hws_map.handle == NULL) { 2319 + DRM_ERROR("Failed to map status page.\n"); 2320 + memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 2321 + drm_gem_object_unreference(obj); 2322 + return -EINVAL; 2323 + } 2324 + dev_priv->hws_obj = obj; 2325 + dev_priv->hw_status_page = dev_priv->hws_map.handle; 2326 + memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 2327 + I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); 2328 + DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); 2329 + 2330 + return 0; 2331 + } 2332 + 2333 + static int 2334 + i915_gem_init_ringbuffer(struct drm_device *dev) 2335 + { 2336 + drm_i915_private_t *dev_priv = dev->dev_private; 2337 + struct drm_gem_object *obj; 2338 + struct drm_i915_gem_object *obj_priv; 2339 + int ret; 2340 + 2341 + ret = i915_gem_init_hws(dev); 2342 + if (ret != 0) 2343 + return ret; 2344 + 2345 + obj = drm_gem_object_alloc(dev, 128 * 1024); 2346 + if (obj == NULL) { 2347 + DRM_ERROR("Failed to allocate ringbuffer\n"); 2348 + return -ENOMEM; 2349 + } 2350 + obj_priv = obj->driver_private; 2351 + 2352 + ret = i915_gem_object_pin(obj, 4096); 2353 + if (ret != 0) { 2354 + drm_gem_object_unreference(obj); 2355 + return ret; 2356 + } 2357 + 2358 + /* Set up the kernel mapping for the ring. */ 2359 + dev_priv->ring.Size = obj->size; 2360 + dev_priv->ring.tail_mask = obj->size - 1; 2361 + 2362 + dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset; 2363 + dev_priv->ring.map.size = obj->size; 2364 + dev_priv->ring.map.type = 0; 2365 + dev_priv->ring.map.flags = 0; 2366 + dev_priv->ring.map.mtrr = 0; 2367 + 2368 + drm_core_ioremap(&dev_priv->ring.map, dev); 2369 + if (dev_priv->ring.map.handle == NULL) { 2370 + DRM_ERROR("Failed to map ringbuffer.\n"); 2371 + memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); 2372 + drm_gem_object_unreference(obj); 2373 + return -EINVAL; 2374 + } 2375 + dev_priv->ring.ring_obj = obj; 2376 + dev_priv->ring.virtual_start = dev_priv->ring.map.handle; 2377 + 2378 + /* Stop the ring if it's running. */ 2379 + I915_WRITE(PRB0_CTL, 0); 2380 + I915_WRITE(PRB0_HEAD, 0); 2381 + I915_WRITE(PRB0_TAIL, 0); 2382 + I915_WRITE(PRB0_START, 0); 2383 + 2384 + /* Initialize the ring. */ 2385 + I915_WRITE(PRB0_START, obj_priv->gtt_offset); 2386 + I915_WRITE(PRB0_CTL, 2387 + ((obj->size - 4096) & RING_NR_PAGES) | 2388 + RING_NO_REPORT | 2389 + RING_VALID); 2390 + 2391 + /* Update our cache of the ring state */ 2392 + i915_kernel_lost_context(dev); 2393 + 2394 + return 0; 2395 + } 2396 + 2397 + static void 2398 + i915_gem_cleanup_ringbuffer(struct drm_device *dev) 2399 + { 2400 + drm_i915_private_t *dev_priv = dev->dev_private; 2401 + 2402 + if (dev_priv->ring.ring_obj == NULL) 2403 + return; 2404 + 2405 + drm_core_ioremapfree(&dev_priv->ring.map, dev); 2406 + 2407 + i915_gem_object_unpin(dev_priv->ring.ring_obj); 2408 + drm_gem_object_unreference(dev_priv->ring.ring_obj); 2409 + dev_priv->ring.ring_obj = NULL; 2410 + memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); 2411 + 2412 + if (dev_priv->hws_obj != NULL) { 2413 + i915_gem_object_unpin(dev_priv->hws_obj); 2414 + drm_gem_object_unreference(dev_priv->hws_obj); 2415 + dev_priv->hws_obj = NULL; 2416 + memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 2417 + 2418 + /* Write high address into HWS_PGA when disabling. */ 2419 + I915_WRITE(HWS_PGA, 0x1ffff000); 2420 + } 2421 + } 2422 + 2423 + int 2424 + i915_gem_entervt_ioctl(struct drm_device *dev, void *data, 2425 + struct drm_file *file_priv) 2426 + { 2427 + drm_i915_private_t *dev_priv = dev->dev_private; 2428 + int ret; 2429 + 2430 + if (dev_priv->mm.wedged) { 2431 + DRM_ERROR("Reenabling wedged hardware, good luck\n"); 2432 + dev_priv->mm.wedged = 0; 2433 + } 2434 + 2435 + ret = i915_gem_init_ringbuffer(dev); 2436 + if (ret != 0) 2437 + return ret; 2438 + 2439 + mutex_lock(&dev->struct_mutex); 2440 + BUG_ON(!list_empty(&dev_priv->mm.active_list)); 2441 + BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); 2442 + BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); 2443 + BUG_ON(!list_empty(&dev_priv->mm.request_list)); 2444 + dev_priv->mm.suspended = 0; 2445 + mutex_unlock(&dev->struct_mutex); 2446 + return 0; 2447 + } 2448 + 2449 + int 2450 + i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, 2451 + struct drm_file *file_priv) 2452 + { 2453 + int ret; 2454 + 2455 + mutex_lock(&dev->struct_mutex); 2456 + ret = i915_gem_idle(dev); 2457 + if (ret == 0) 2458 + i915_gem_cleanup_ringbuffer(dev); 2459 + mutex_unlock(&dev->struct_mutex); 2460 + 2461 + return 0; 2462 + } 2463 + 2464 + void 2465 + i915_gem_lastclose(struct drm_device *dev) 2466 + { 2467 + int ret; 2468 + drm_i915_private_t *dev_priv = dev->dev_private; 2469 + 2470 + mutex_lock(&dev->struct_mutex); 2471 + 2472 + if (dev_priv->ring.ring_obj != NULL) { 2473 + ret = i915_gem_idle(dev); 2474 + if (ret) 2475 + DRM_ERROR("failed to idle hardware: %d\n", ret); 2476 + 2477 + i915_gem_cleanup_ringbuffer(dev); 2478 + } 2479 + 2480 + mutex_unlock(&dev->struct_mutex); 2481 + } 2482 + 2483 + void 2484 + i915_gem_load(struct drm_device *dev) 2485 + { 2486 + drm_i915_private_t *dev_priv = dev->dev_private; 2487 + 2488 + INIT_LIST_HEAD(&dev_priv->mm.active_list); 2489 + INIT_LIST_HEAD(&dev_priv->mm.flushing_list); 2490 + INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 2491 + INIT_LIST_HEAD(&dev_priv->mm.request_list); 2492 + INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 2493 + i915_gem_retire_work_handler); 2494 + dev_priv->mm.next_gem_seqno = 1; 2495 + 2496 + i915_gem_detect_bit_6_swizzle(dev); 2497 + }
+201
drivers/gpu/drm/i915/i915_gem_debug.c
··· 1 + /* 2 + * Copyright © 2008 Intel Corporation 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice (including the next 12 + * paragraph) shall be included in all copies or substantial portions of the 13 + * Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 + * IN THE SOFTWARE. 22 + * 23 + * Authors: 24 + * Keith Packard <keithp@keithp.com> 25 + * 26 + */ 27 + 28 + #include "drmP.h" 29 + #include "drm.h" 30 + #include "i915_drm.h" 31 + #include "i915_drv.h" 32 + 33 + #if WATCH_INACTIVE 34 + void 35 + i915_verify_inactive(struct drm_device *dev, char *file, int line) 36 + { 37 + drm_i915_private_t *dev_priv = dev->dev_private; 38 + struct drm_gem_object *obj; 39 + struct drm_i915_gem_object *obj_priv; 40 + 41 + list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { 42 + obj = obj_priv->obj; 43 + if (obj_priv->pin_count || obj_priv->active || 44 + (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | 45 + I915_GEM_DOMAIN_GTT))) 46 + DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n", 47 + obj, 48 + obj_priv->pin_count, obj_priv->active, 49 + obj->write_domain, file, line); 50 + } 51 + } 52 + #endif /* WATCH_INACTIVE */ 53 + 54 + 55 + #if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE 56 + static void 57 + i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end, 58 + uint32_t bias, uint32_t mark) 59 + { 60 + uint32_t *mem = kmap_atomic(page, KM_USER0); 61 + int i; 62 + for (i = start; i < end; i += 4) 63 + DRM_INFO("%08x: %08x%s\n", 64 + (int) (bias + i), mem[i / 4], 65 + (bias + i == mark) ? " ********" : ""); 66 + kunmap_atomic(mem, KM_USER0); 67 + /* give syslog time to catch up */ 68 + msleep(1); 69 + } 70 + 71 + void 72 + i915_gem_dump_object(struct drm_gem_object *obj, int len, 73 + const char *where, uint32_t mark) 74 + { 75 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 76 + int page; 77 + 78 + DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset); 79 + for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) { 80 + int page_len, chunk, chunk_len; 81 + 82 + page_len = len - page * PAGE_SIZE; 83 + if (page_len > PAGE_SIZE) 84 + page_len = PAGE_SIZE; 85 + 86 + for (chunk = 0; chunk < page_len; chunk += 128) { 87 + chunk_len = page_len - chunk; 88 + if (chunk_len > 128) 89 + chunk_len = 128; 90 + i915_gem_dump_page(obj_priv->page_list[page], 91 + chunk, chunk + chunk_len, 92 + obj_priv->gtt_offset + 93 + page * PAGE_SIZE, 94 + mark); 95 + } 96 + } 97 + } 98 + #endif 99 + 100 + #if WATCH_LRU 101 + void 102 + i915_dump_lru(struct drm_device *dev, const char *where) 103 + { 104 + drm_i915_private_t *dev_priv = dev->dev_private; 105 + struct drm_i915_gem_object *obj_priv; 106 + 107 + DRM_INFO("active list %s {\n", where); 108 + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, 109 + list) 110 + { 111 + DRM_INFO(" %p: %08x\n", obj_priv, 112 + obj_priv->last_rendering_seqno); 113 + } 114 + DRM_INFO("}\n"); 115 + DRM_INFO("flushing list %s {\n", where); 116 + list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, 117 + list) 118 + { 119 + DRM_INFO(" %p: %08x\n", obj_priv, 120 + obj_priv->last_rendering_seqno); 121 + } 122 + DRM_INFO("}\n"); 123 + DRM_INFO("inactive %s {\n", where); 124 + list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { 125 + DRM_INFO(" %p: %08x\n", obj_priv, 126 + obj_priv->last_rendering_seqno); 127 + } 128 + DRM_INFO("}\n"); 129 + } 130 + #endif 131 + 132 + 133 + #if WATCH_COHERENCY 134 + void 135 + i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) 136 + { 137 + struct drm_device *dev = obj->dev; 138 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 139 + int page; 140 + uint32_t *gtt_mapping; 141 + uint32_t *backing_map = NULL; 142 + int bad_count = 0; 143 + 144 + DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n", 145 + __func__, obj, obj_priv->gtt_offset, handle, 146 + obj->size / 1024); 147 + 148 + gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset, 149 + obj->size); 150 + if (gtt_mapping == NULL) { 151 + DRM_ERROR("failed to map GTT space\n"); 152 + return; 153 + } 154 + 155 + for (page = 0; page < obj->size / PAGE_SIZE; page++) { 156 + int i; 157 + 158 + backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0); 159 + 160 + if (backing_map == NULL) { 161 + DRM_ERROR("failed to map backing page\n"); 162 + goto out; 163 + } 164 + 165 + for (i = 0; i < PAGE_SIZE / 4; i++) { 166 + uint32_t cpuval = backing_map[i]; 167 + uint32_t gttval = readl(gtt_mapping + 168 + page * 1024 + i); 169 + 170 + if (cpuval != gttval) { 171 + DRM_INFO("incoherent CPU vs GPU at 0x%08x: " 172 + "0x%08x vs 0x%08x\n", 173 + (int)(obj_priv->gtt_offset + 174 + page * PAGE_SIZE + i * 4), 175 + cpuval, gttval); 176 + if (bad_count++ >= 8) { 177 + DRM_INFO("...\n"); 178 + goto out; 179 + } 180 + } 181 + } 182 + kunmap_atomic(backing_map, KM_USER0); 183 + backing_map = NULL; 184 + } 185 + 186 + out: 187 + if (backing_map != NULL) 188 + kunmap_atomic(backing_map, KM_USER0); 189 + iounmap(gtt_mapping); 190 + 191 + /* give syslog time to catch up */ 192 + msleep(1); 193 + 194 + /* Directly flush the object, since we just loaded values with the CPU 195 + * from the backing pages and we don't want to disturb the cache 196 + * management that we're trying to observe. 197 + */ 198 + 199 + i915_gem_clflush_object(obj); 200 + } 201 + #endif
+292
drivers/gpu/drm/i915/i915_gem_proc.c
··· 1 + /* 2 + * Copyright © 2008 Intel Corporation 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice (including the next 12 + * paragraph) shall be included in all copies or substantial portions of the 13 + * Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 + * IN THE SOFTWARE. 22 + * 23 + * Authors: 24 + * Eric Anholt <eric@anholt.net> 25 + * Keith Packard <keithp@keithp.com> 26 + * 27 + */ 28 + 29 + #include "drmP.h" 30 + #include "drm.h" 31 + #include "i915_drm.h" 32 + #include "i915_drv.h" 33 + 34 + static int i915_gem_active_info(char *buf, char **start, off_t offset, 35 + int request, int *eof, void *data) 36 + { 37 + struct drm_minor *minor = (struct drm_minor *) data; 38 + struct drm_device *dev = minor->dev; 39 + drm_i915_private_t *dev_priv = dev->dev_private; 40 + struct drm_i915_gem_object *obj_priv; 41 + int len = 0; 42 + 43 + if (offset > DRM_PROC_LIMIT) { 44 + *eof = 1; 45 + return 0; 46 + } 47 + 48 + *start = &buf[offset]; 49 + *eof = 0; 50 + DRM_PROC_PRINT("Active:\n"); 51 + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, 52 + list) 53 + { 54 + struct drm_gem_object *obj = obj_priv->obj; 55 + if (obj->name) { 56 + DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n", 57 + obj, obj->name, 58 + obj->read_domains, obj->write_domain, 59 + obj_priv->last_rendering_seqno); 60 + } else { 61 + DRM_PROC_PRINT(" %p: %08x %08x %d\n", 62 + obj, 63 + obj->read_domains, obj->write_domain, 64 + obj_priv->last_rendering_seqno); 65 + } 66 + } 67 + if (len > request + offset) 68 + return request; 69 + *eof = 1; 70 + return len - offset; 71 + } 72 + 73 + static int i915_gem_flushing_info(char *buf, char **start, off_t offset, 74 + int request, int *eof, void *data) 75 + { 76 + struct drm_minor *minor = (struct drm_minor *) data; 77 + struct drm_device *dev = minor->dev; 78 + drm_i915_private_t *dev_priv = dev->dev_private; 79 + struct drm_i915_gem_object *obj_priv; 80 + int len = 0; 81 + 82 + if (offset > DRM_PROC_LIMIT) { 83 + *eof = 1; 84 + return 0; 85 + } 86 + 87 + *start = &buf[offset]; 88 + *eof = 0; 89 + DRM_PROC_PRINT("Flushing:\n"); 90 + list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, 91 + list) 92 + { 93 + struct drm_gem_object *obj = obj_priv->obj; 94 + if (obj->name) { 95 + DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n", 96 + obj, obj->name, 97 + obj->read_domains, obj->write_domain, 98 + obj_priv->last_rendering_seqno); 99 + } else { 100 + DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj, 101 + obj->read_domains, obj->write_domain, 102 + obj_priv->last_rendering_seqno); 103 + } 104 + } 105 + if (len > request + offset) 106 + return request; 107 + *eof = 1; 108 + return len - offset; 109 + } 110 + 111 + static int i915_gem_inactive_info(char *buf, char **start, off_t offset, 112 + int request, int *eof, void *data) 113 + { 114 + struct drm_minor *minor = (struct drm_minor *) data; 115 + struct drm_device *dev = minor->dev; 116 + drm_i915_private_t *dev_priv = dev->dev_private; 117 + struct drm_i915_gem_object *obj_priv; 118 + int len = 0; 119 + 120 + if (offset > DRM_PROC_LIMIT) { 121 + *eof = 1; 122 + return 0; 123 + } 124 + 125 + *start = &buf[offset]; 126 + *eof = 0; 127 + DRM_PROC_PRINT("Inactive:\n"); 128 + list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, 129 + list) 130 + { 131 + struct drm_gem_object *obj = obj_priv->obj; 132 + if (obj->name) { 133 + DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n", 134 + obj, obj->name, 135 + obj->read_domains, obj->write_domain, 136 + obj_priv->last_rendering_seqno); 137 + } else { 138 + DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj, 139 + obj->read_domains, obj->write_domain, 140 + obj_priv->last_rendering_seqno); 141 + } 142 + } 143 + if (len > request + offset) 144 + return request; 145 + *eof = 1; 146 + return len - offset; 147 + } 148 + 149 + static int i915_gem_request_info(char *buf, char **start, off_t offset, 150 + int request, int *eof, void *data) 151 + { 152 + struct drm_minor *minor = (struct drm_minor *) data; 153 + struct drm_device *dev = minor->dev; 154 + drm_i915_private_t *dev_priv = dev->dev_private; 155 + struct drm_i915_gem_request *gem_request; 156 + int len = 0; 157 + 158 + if (offset > DRM_PROC_LIMIT) { 159 + *eof = 1; 160 + return 0; 161 + } 162 + 163 + *start = &buf[offset]; 164 + *eof = 0; 165 + DRM_PROC_PRINT("Request:\n"); 166 + list_for_each_entry(gem_request, &dev_priv->mm.request_list, 167 + list) 168 + { 169 + DRM_PROC_PRINT(" %d @ %d %08x\n", 170 + gem_request->seqno, 171 + (int) (jiffies - gem_request->emitted_jiffies), 172 + gem_request->flush_domains); 173 + } 174 + if (len > request + offset) 175 + return request; 176 + *eof = 1; 177 + return len - offset; 178 + } 179 + 180 + static int i915_gem_seqno_info(char *buf, char **start, off_t offset, 181 + int request, int *eof, void *data) 182 + { 183 + struct drm_minor *minor = (struct drm_minor *) data; 184 + struct drm_device *dev = minor->dev; 185 + drm_i915_private_t *dev_priv = dev->dev_private; 186 + int len = 0; 187 + 188 + if (offset > DRM_PROC_LIMIT) { 189 + *eof = 1; 190 + return 0; 191 + } 192 + 193 + *start = &buf[offset]; 194 + *eof = 0; 195 + DRM_PROC_PRINT("Current sequence: %d\n", i915_get_gem_seqno(dev)); 196 + DRM_PROC_PRINT("Waiter sequence: %d\n", 197 + dev_priv->mm.waiting_gem_seqno); 198 + DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno); 199 + if (len > request + offset) 200 + return request; 201 + *eof = 1; 202 + return len - offset; 203 + } 204 + 205 + 206 + static int i915_interrupt_info(char *buf, char **start, off_t offset, 207 + int request, int *eof, void *data) 208 + { 209 + struct drm_minor *minor = (struct drm_minor *) data; 210 + struct drm_device *dev = minor->dev; 211 + drm_i915_private_t *dev_priv = dev->dev_private; 212 + int len = 0; 213 + 214 + if (offset > DRM_PROC_LIMIT) { 215 + *eof = 1; 216 + return 0; 217 + } 218 + 219 + *start = &buf[offset]; 220 + *eof = 0; 221 + DRM_PROC_PRINT("Interrupt enable: %08x\n", 222 + I915_READ(IER)); 223 + DRM_PROC_PRINT("Interrupt identity: %08x\n", 224 + I915_READ(IIR)); 225 + DRM_PROC_PRINT("Interrupt mask: %08x\n", 226 + I915_READ(IMR)); 227 + DRM_PROC_PRINT("Pipe A stat: %08x\n", 228 + I915_READ(PIPEASTAT)); 229 + DRM_PROC_PRINT("Pipe B stat: %08x\n", 230 + I915_READ(PIPEBSTAT)); 231 + DRM_PROC_PRINT("Interrupts received: %d\n", 232 + atomic_read(&dev_priv->irq_received)); 233 + DRM_PROC_PRINT("Current sequence: %d\n", 234 + i915_get_gem_seqno(dev)); 235 + DRM_PROC_PRINT("Waiter sequence: %d\n", 236 + dev_priv->mm.waiting_gem_seqno); 237 + DRM_PROC_PRINT("IRQ sequence: %d\n", 238 + dev_priv->mm.irq_gem_seqno); 239 + if (len > request + offset) 240 + return request; 241 + *eof = 1; 242 + return len - offset; 243 + } 244 + 245 + static struct drm_proc_list { 246 + /** file name */ 247 + const char *name; 248 + /** proc callback*/ 249 + int (*f) (char *, char **, off_t, int, int *, void *); 250 + } i915_gem_proc_list[] = { 251 + {"i915_gem_active", i915_gem_active_info}, 252 + {"i915_gem_flushing", i915_gem_flushing_info}, 253 + {"i915_gem_inactive", i915_gem_inactive_info}, 254 + {"i915_gem_request", i915_gem_request_info}, 255 + {"i915_gem_seqno", i915_gem_seqno_info}, 256 + {"i915_gem_interrupt", i915_interrupt_info}, 257 + }; 258 + 259 + #define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list) 260 + 261 + int i915_gem_proc_init(struct drm_minor *minor) 262 + { 263 + struct proc_dir_entry *ent; 264 + int i, j; 265 + 266 + for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) { 267 + ent = create_proc_entry(i915_gem_proc_list[i].name, 268 + S_IFREG | S_IRUGO, minor->dev_root); 269 + if (!ent) { 270 + DRM_ERROR("Cannot create /proc/dri/.../%s\n", 271 + i915_gem_proc_list[i].name); 272 + for (j = 0; j < i; j++) 273 + remove_proc_entry(i915_gem_proc_list[i].name, 274 + minor->dev_root); 275 + return -1; 276 + } 277 + ent->read_proc = i915_gem_proc_list[i].f; 278 + ent->data = minor; 279 + } 280 + return 0; 281 + } 282 + 283 + void i915_gem_proc_cleanup(struct drm_minor *minor) 284 + { 285 + int i; 286 + 287 + if (!minor->dev_root) 288 + return; 289 + 290 + for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) 291 + remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root); 292 + }
+256
drivers/gpu/drm/i915/i915_gem_tiling.c
··· 1 + /* 2 + * Copyright © 2008 Intel Corporation 3 + * 4 + * Permission is hereby granted, free of charge, to any person obtaining a 5 + * copy of this software and associated documentation files (the "Software"), 6 + * to deal in the Software without restriction, including without limitation 7 + * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 + * and/or sell copies of the Software, and to permit persons to whom the 9 + * Software is furnished to do so, subject to the following conditions: 10 + * 11 + * The above copyright notice and this permission notice (including the next 12 + * paragraph) shall be included in all copies or substantial portions of the 13 + * Software. 14 + * 15 + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 + * IN THE SOFTWARE. 22 + * 23 + * Authors: 24 + * Eric Anholt <eric@anholt.net> 25 + * 26 + */ 27 + 28 + #include "drmP.h" 29 + #include "drm.h" 30 + #include "i915_drm.h" 31 + #include "i915_drv.h" 32 + 33 + /** @file i915_gem_tiling.c 34 + * 35 + * Support for managing tiling state of buffer objects. 36 + * 37 + * The idea behind tiling is to increase cache hit rates by rearranging 38 + * pixel data so that a group of pixel accesses are in the same cacheline. 39 + * Performance improvement from doing this on the back/depth buffer are on 40 + * the order of 30%. 41 + * 42 + * Intel architectures make this somewhat more complicated, though, by 43 + * adjustments made to addressing of data when the memory is in interleaved 44 + * mode (matched pairs of DIMMS) to improve memory bandwidth. 45 + * For interleaved memory, the CPU sends every sequential 64 bytes 46 + * to an alternate memory channel so it can get the bandwidth from both. 47 + * 48 + * The GPU also rearranges its accesses for increased bandwidth to interleaved 49 + * memory, and it matches what the CPU does for non-tiled. However, when tiled 50 + * it does it a little differently, since one walks addresses not just in the 51 + * X direction but also Y. So, along with alternating channels when bit 52 + * 6 of the address flips, it also alternates when other bits flip -- Bits 9 53 + * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines) 54 + * are common to both the 915 and 965-class hardware. 55 + * 56 + * The CPU also sometimes XORs in higher bits as well, to improve 57 + * bandwidth doing strided access like we do so frequently in graphics. This 58 + * is called "Channel XOR Randomization" in the MCH documentation. The result 59 + * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address 60 + * decode. 61 + * 62 + * All of this bit 6 XORing has an effect on our memory management, 63 + * as we need to make sure that the 3d driver can correctly address object 64 + * contents. 65 + * 66 + * If we don't have interleaved memory, all tiling is safe and no swizzling is 67 + * required. 68 + * 69 + * When bit 17 is XORed in, we simply refuse to tile at all. Bit 70 + * 17 is not just a page offset, so as we page an objet out and back in, 71 + * individual pages in it will have different bit 17 addresses, resulting in 72 + * each 64 bytes being swapped with its neighbor! 73 + * 74 + * Otherwise, if interleaved, we have to tell the 3d driver what the address 75 + * swizzling it needs to do is, since it's writing with the CPU to the pages 76 + * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the 77 + * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling 78 + * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order 79 + * to match what the GPU expects. 80 + */ 81 + 82 + /** 83 + * Detects bit 6 swizzling of address lookup between IGD access and CPU 84 + * access through main memory. 85 + */ 86 + void 87 + i915_gem_detect_bit_6_swizzle(struct drm_device *dev) 88 + { 89 + drm_i915_private_t *dev_priv = dev->dev_private; 90 + uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 91 + uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 92 + 93 + if (!IS_I9XX(dev)) { 94 + /* As far as we know, the 865 doesn't have these bit 6 95 + * swizzling issues. 96 + */ 97 + swizzle_x = I915_BIT_6_SWIZZLE_NONE; 98 + swizzle_y = I915_BIT_6_SWIZZLE_NONE; 99 + } else if (!IS_I965G(dev) || IS_I965GM(dev)) { 100 + uint32_t dcc; 101 + 102 + /* On 915-945 and GM965, channel interleave by the CPU is 103 + * determined by DCC. The CPU will alternate based on bit 6 104 + * in interleaved mode, and the GPU will then also alternate 105 + * on bit 6, 9, and 10 for X, but the CPU may also optionally 106 + * alternate based on bit 17 (XOR not disabled and XOR 107 + * bit == 17). 108 + */ 109 + dcc = I915_READ(DCC); 110 + switch (dcc & DCC_ADDRESSING_MODE_MASK) { 111 + case DCC_ADDRESSING_MODE_SINGLE_CHANNEL: 112 + case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC: 113 + swizzle_x = I915_BIT_6_SWIZZLE_NONE; 114 + swizzle_y = I915_BIT_6_SWIZZLE_NONE; 115 + break; 116 + case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED: 117 + if (IS_I915G(dev) || IS_I915GM(dev) || 118 + dcc & DCC_CHANNEL_XOR_DISABLE) { 119 + swizzle_x = I915_BIT_6_SWIZZLE_9_10; 120 + swizzle_y = I915_BIT_6_SWIZZLE_9; 121 + } else if (IS_I965GM(dev)) { 122 + /* GM965 only does bit 11-based channel 123 + * randomization 124 + */ 125 + swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; 126 + swizzle_y = I915_BIT_6_SWIZZLE_9_11; 127 + } else { 128 + /* Bit 17 or perhaps other swizzling */ 129 + swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 130 + swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 131 + } 132 + break; 133 + } 134 + if (dcc == 0xffffffff) { 135 + DRM_ERROR("Couldn't read from MCHBAR. " 136 + "Disabling tiling.\n"); 137 + swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 138 + swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 139 + } 140 + } else { 141 + /* The 965, G33, and newer, have a very flexible memory 142 + * configuration. It will enable dual-channel mode 143 + * (interleaving) on as much memory as it can, and the GPU 144 + * will additionally sometimes enable different bit 6 145 + * swizzling for tiled objects from the CPU. 146 + * 147 + * Here's what I found on the G965: 148 + * slot fill memory size swizzling 149 + * 0A 0B 1A 1B 1-ch 2-ch 150 + * 512 0 0 0 512 0 O 151 + * 512 0 512 0 16 1008 X 152 + * 512 0 0 512 16 1008 X 153 + * 0 512 0 512 16 1008 X 154 + * 1024 1024 1024 0 2048 1024 O 155 + * 156 + * We could probably detect this based on either the DRB 157 + * matching, which was the case for the swizzling required in 158 + * the table above, or from the 1-ch value being less than 159 + * the minimum size of a rank. 160 + */ 161 + if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) { 162 + swizzle_x = I915_BIT_6_SWIZZLE_NONE; 163 + swizzle_y = I915_BIT_6_SWIZZLE_NONE; 164 + } else { 165 + swizzle_x = I915_BIT_6_SWIZZLE_9_10; 166 + swizzle_y = I915_BIT_6_SWIZZLE_9; 167 + } 168 + } 169 + 170 + dev_priv->mm.bit_6_swizzle_x = swizzle_x; 171 + dev_priv->mm.bit_6_swizzle_y = swizzle_y; 172 + } 173 + 174 + /** 175 + * Sets the tiling mode of an object, returning the required swizzling of 176 + * bit 6 of addresses in the object. 177 + */ 178 + int 179 + i915_gem_set_tiling(struct drm_device *dev, void *data, 180 + struct drm_file *file_priv) 181 + { 182 + struct drm_i915_gem_set_tiling *args = data; 183 + drm_i915_private_t *dev_priv = dev->dev_private; 184 + struct drm_gem_object *obj; 185 + struct drm_i915_gem_object *obj_priv; 186 + 187 + obj = drm_gem_object_lookup(dev, file_priv, args->handle); 188 + if (obj == NULL) 189 + return -EINVAL; 190 + obj_priv = obj->driver_private; 191 + 192 + mutex_lock(&dev->struct_mutex); 193 + 194 + if (args->tiling_mode == I915_TILING_NONE) { 195 + obj_priv->tiling_mode = I915_TILING_NONE; 196 + args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; 197 + } else { 198 + if (args->tiling_mode == I915_TILING_X) 199 + args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; 200 + else 201 + args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; 202 + /* If we can't handle the swizzling, make it untiled. */ 203 + if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { 204 + args->tiling_mode = I915_TILING_NONE; 205 + args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; 206 + } 207 + } 208 + obj_priv->tiling_mode = args->tiling_mode; 209 + 210 + mutex_unlock(&dev->struct_mutex); 211 + 212 + drm_gem_object_unreference(obj); 213 + 214 + return 0; 215 + } 216 + 217 + /** 218 + * Returns the current tiling mode and required bit 6 swizzling for the object. 219 + */ 220 + int 221 + i915_gem_get_tiling(struct drm_device *dev, void *data, 222 + struct drm_file *file_priv) 223 + { 224 + struct drm_i915_gem_get_tiling *args = data; 225 + drm_i915_private_t *dev_priv = dev->dev_private; 226 + struct drm_gem_object *obj; 227 + struct drm_i915_gem_object *obj_priv; 228 + 229 + obj = drm_gem_object_lookup(dev, file_priv, args->handle); 230 + if (obj == NULL) 231 + return -EINVAL; 232 + obj_priv = obj->driver_private; 233 + 234 + mutex_lock(&dev->struct_mutex); 235 + 236 + args->tiling_mode = obj_priv->tiling_mode; 237 + switch (obj_priv->tiling_mode) { 238 + case I915_TILING_X: 239 + args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; 240 + break; 241 + case I915_TILING_Y: 242 + args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; 243 + break; 244 + case I915_TILING_NONE: 245 + args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; 246 + break; 247 + default: 248 + DRM_ERROR("unknown tiling mode\n"); 249 + } 250 + 251 + mutex_unlock(&dev->struct_mutex); 252 + 253 + drm_gem_object_unreference(obj); 254 + 255 + return 0; 256 + }
+12 -7
drivers/gpu/drm/i915/i915_irq.c
··· 407 407 I915_WRITE(PIPEBSTAT, pipeb_stats); 408 408 } 409 409 410 - if (iir & I915_ASLE_INTERRUPT) 411 - opregion_asle_intr(dev); 410 + I915_WRITE(IIR, iir); 411 + if (dev->pdev->msi_enabled) 412 + I915_WRITE(IMR, dev_priv->irq_mask_reg); 413 + (void) I915_READ(IIR); /* Flush posted writes */ 412 414 413 415 dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); 414 416 415 - if (dev->pdev->msi_enabled) 416 - I915_WRITE(IMR, dev_priv->irq_mask_reg); 417 - I915_WRITE(IIR, iir); 418 - (void) I915_READ(IIR); 417 + if (iir & I915_USER_INTERRUPT) { 418 + dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev); 419 + DRM_WAKEUP(&dev_priv->irq_queue); 420 + } 421 + 422 + if (iir & I915_ASLE_INTERRUPT) 423 + opregion_asle_intr(dev); 419 424 420 425 if (vblank && dev_priv->swaps_pending > 0) 421 426 drm_locked_tasklet(dev, i915_vblank_tasklet); ··· 454 449 return dev_priv->counter; 455 450 } 456 451 457 - static void i915_user_irq_get(struct drm_device *dev) 452 + void i915_user_irq_get(struct drm_device *dev) 458 453 { 459 454 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 460 455
+24 -13
drivers/gpu/drm/i915/i915_reg.h
··· 25 25 #ifndef _I915_REG_H_ 26 26 #define _I915_REG_H_ 27 27 28 - /* MCH MMIO space */ 29 - /** 915-945 and GM965 MCH register controlling DRAM channel access */ 30 - #define DCC 0x200 31 - #define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0) 32 - #define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0) 33 - #define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0) 34 - #define DCC_ADDRESSING_MODE_MASK (3 << 0) 35 - #define DCC_CHANNEL_XOR_DISABLE (1 << 10) 36 - 37 - /** 965 MCH register controlling DRAM channel configuration */ 38 - #define CHDECMISC 0x111 39 - #define CHDECMISC_FLEXMEMORY (1 << 1) 40 - 41 28 /* 42 29 * The Bridge device's PCI config space has information about the 43 30 * fb aperture size and the amount of pre-reserved memory. ··· 502 515 503 516 #define PALETTE_A 0x0a000 504 517 #define PALETTE_B 0x0a800 518 + 519 + /* MCH MMIO space */ 520 + 521 + /* 522 + * MCHBAR mirror. 523 + * 524 + * This mirrors the MCHBAR MMIO space whose location is determined by 525 + * device 0 function 0's pci config register 0x44 or 0x48 and matches it in 526 + * every way. It is not accessible from the CP register read instructions. 527 + * 528 + */ 529 + #define MCHBAR_MIRROR_BASE 0x10000 530 + 531 + /** 915-945 and GM965 MCH register controlling DRAM channel access */ 532 + #define DCC 0x10200 533 + #define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0) 534 + #define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0) 535 + #define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0) 536 + #define DCC_ADDRESSING_MODE_MASK (3 << 0) 537 + #define DCC_CHANNEL_XOR_DISABLE (1 << 10) 538 + 539 + /** 965 MCH register controlling DRAM channel configuration */ 540 + #define C0DRB3 0x10206 541 + #define C1DRB3 0x10606 505 542 506 543 /* 507 544 * Overlay regs
+31
include/drm/drm.h
··· 570 570 int drm_dd_minor; 571 571 }; 572 572 573 + /** DRM_IOCTL_GEM_CLOSE ioctl argument type */ 574 + struct drm_gem_close { 575 + /** Handle of the object to be closed. */ 576 + uint32_t handle; 577 + uint32_t pad; 578 + }; 579 + 580 + /** DRM_IOCTL_GEM_FLINK ioctl argument type */ 581 + struct drm_gem_flink { 582 + /** Handle for the object being named */ 583 + uint32_t handle; 584 + 585 + /** Returned global name */ 586 + uint32_t name; 587 + }; 588 + 589 + /** DRM_IOCTL_GEM_OPEN ioctl argument type */ 590 + struct drm_gem_open { 591 + /** Name of object being opened */ 592 + uint32_t name; 593 + 594 + /** Returned handle for the object */ 595 + uint32_t handle; 596 + 597 + /** Returned size of the object */ 598 + uint64_t size; 599 + }; 600 + 573 601 #define DRM_IOCTL_BASE 'd' 574 602 #define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) 575 603 #define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type) ··· 613 585 #define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats) 614 586 #define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version) 615 587 #define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl) 588 + #define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close) 589 + #define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink) 590 + #define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open) 616 591 617 592 #define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique) 618 593 #define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth)
+151
include/drm/drmP.h
··· 104 104 #define DRIVER_DMA_QUEUE 0x200 105 105 #define DRIVER_FB_DMA 0x400 106 106 #define DRIVER_IRQ_VBL2 0x800 107 + #define DRIVER_GEM 0x1000 107 108 108 109 /***********************************************************************/ 109 110 /** \name Begin the DRM... */ ··· 388 387 struct drm_minor *minor; 389 388 int remove_auth_on_close; 390 389 unsigned long lock_count; 390 + /** Mapping of mm object handles to object pointers. */ 391 + struct idr object_idr; 392 + /** Lock for synchronization of access to object_idr. */ 393 + spinlock_t table_lock; 391 394 struct file *filp; 392 395 void *driver_priv; 393 396 }; ··· 563 558 }; 564 559 565 560 /** 561 + * This structure defines the drm_mm memory object, which will be used by the 562 + * DRM for its buffer objects. 563 + */ 564 + struct drm_gem_object { 565 + /** Reference count of this object */ 566 + struct kref refcount; 567 + 568 + /** Handle count of this object. Each handle also holds a reference */ 569 + struct kref handlecount; 570 + 571 + /** Related drm device */ 572 + struct drm_device *dev; 573 + 574 + /** File representing the shmem storage */ 575 + struct file *filp; 576 + 577 + /** 578 + * Size of the object, in bytes. Immutable over the object's 579 + * lifetime. 580 + */ 581 + size_t size; 582 + 583 + /** 584 + * Global name for this object, starts at 1. 0 means unnamed. 585 + * Access is covered by the object_name_lock in the related drm_device 586 + */ 587 + int name; 588 + 589 + /** 590 + * Memory domains. These monitor which caches contain read/write data 591 + * related to the object. When transitioning from one set of domains 592 + * to another, the driver is called to ensure that caches are suitably 593 + * flushed and invalidated 594 + */ 595 + uint32_t read_domains; 596 + uint32_t write_domain; 597 + 598 + /** 599 + * While validating an exec operation, the 600 + * new read/write domain values are computed here. 601 + * They will be transferred to the above values 602 + * at the point that any cache flushing occurs 603 + */ 604 + uint32_t pending_read_domains; 605 + uint32_t pending_write_domain; 606 + 607 + void *driver_private; 608 + }; 609 + 610 + /** 566 611 * DRM driver structure. This structure represent the common code for 567 612 * a family of cards. There will one drm_device for each card present 568 613 * in this family ··· 711 656 unsigned long (*get_reg_ofs) (struct drm_device *dev); 712 657 void (*set_version) (struct drm_device *dev, 713 658 struct drm_set_version *sv); 659 + 660 + int (*proc_init)(struct drm_minor *minor); 661 + void (*proc_cleanup)(struct drm_minor *minor); 662 + 663 + /** 664 + * Driver-specific constructor for drm_gem_objects, to set up 665 + * obj->driver_private. 666 + * 667 + * Returns 0 on success. 668 + */ 669 + int (*gem_init_object) (struct drm_gem_object *obj); 670 + void (*gem_free_object) (struct drm_gem_object *obj); 714 671 715 672 int major; 716 673 int minor; ··· 897 830 spinlock_t drw_lock; 898 831 struct idr drw_idr; 899 832 /*@} */ 833 + 834 + /** \name GEM information */ 835 + /*@{ */ 836 + spinlock_t object_name_lock; 837 + struct idr object_name_idr; 838 + atomic_t object_count; 839 + atomic_t object_memory; 840 + atomic_t pin_count; 841 + atomic_t pin_memory; 842 + atomic_t gtt_count; 843 + atomic_t gtt_memory; 844 + uint32_t gtt_total; 845 + uint32_t invalidate_domains; /* domains pending invalidation */ 846 + uint32_t flush_domains; /* domains pending flush */ 847 + /*@} */ 848 + 900 849 }; 901 850 902 851 static __inline__ int drm_core_check_feature(struct drm_device *dev, ··· 1009 926 extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type); 1010 927 extern int drm_free_agp(DRM_AGP_MEM * handle, int pages); 1011 928 extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start); 929 + extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev, 930 + struct page **pages, 931 + unsigned long num_pages, 932 + uint32_t gtt_offset); 1012 933 extern int drm_unbind_agp(DRM_AGP_MEM * handle); 1013 934 1014 935 /* Misc. IOCTL support (drm_ioctl.h) */ ··· 1074 987 struct drm_file *file_priv); 1075 988 extern int drm_authmagic(struct drm_device *dev, void *data, 1076 989 struct drm_file *file_priv); 990 + 991 + /* Cache management (drm_cache.c) */ 992 + void drm_clflush_pages(struct page *pages[], unsigned long num_pages); 1077 993 1078 994 /* Locking IOCTL support (drm_lock.h) */ 1079 995 extern int drm_lock(struct drm_device *dev, void *data, ··· 1184 1094 extern int drm_agp_free_memory(DRM_AGP_MEM * handle); 1185 1095 extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start); 1186 1096 extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle); 1097 + extern void drm_agp_chipset_flush(struct drm_device *dev); 1187 1098 1188 1099 /* Stub support (drm_stub.h) */ 1189 1100 extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, ··· 1246 1155 extern unsigned long drm_mm_tail_space(struct drm_mm *mm); 1247 1156 extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size); 1248 1157 extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size); 1158 + 1159 + /* Graphics Execution Manager library functions (drm_gem.c) */ 1160 + int drm_gem_init(struct drm_device *dev); 1161 + void drm_gem_object_free(struct kref *kref); 1162 + struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev, 1163 + size_t size); 1164 + void drm_gem_object_handle_free(struct kref *kref); 1165 + 1166 + static inline void 1167 + drm_gem_object_reference(struct drm_gem_object *obj) 1168 + { 1169 + kref_get(&obj->refcount); 1170 + } 1171 + 1172 + static inline void 1173 + drm_gem_object_unreference(struct drm_gem_object *obj) 1174 + { 1175 + if (obj == NULL) 1176 + return; 1177 + 1178 + kref_put(&obj->refcount, drm_gem_object_free); 1179 + } 1180 + 1181 + int drm_gem_handle_create(struct drm_file *file_priv, 1182 + struct drm_gem_object *obj, 1183 + int *handlep); 1184 + 1185 + static inline void 1186 + drm_gem_object_handle_reference(struct drm_gem_object *obj) 1187 + { 1188 + drm_gem_object_reference(obj); 1189 + kref_get(&obj->handlecount); 1190 + } 1191 + 1192 + static inline void 1193 + drm_gem_object_handle_unreference(struct drm_gem_object *obj) 1194 + { 1195 + if (obj == NULL) 1196 + return; 1197 + 1198 + /* 1199 + * Must bump handle count first as this may be the last 1200 + * ref, in which case the object would disappear before we 1201 + * checked for a name 1202 + */ 1203 + kref_put(&obj->handlecount, drm_gem_object_handle_free); 1204 + drm_gem_object_unreference(obj); 1205 + } 1206 + 1207 + struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev, 1208 + struct drm_file *filp, 1209 + int handle); 1210 + int drm_gem_close_ioctl(struct drm_device *dev, void *data, 1211 + struct drm_file *file_priv); 1212 + int drm_gem_flink_ioctl(struct drm_device *dev, void *data, 1213 + struct drm_file *file_priv); 1214 + int drm_gem_open_ioctl(struct drm_device *dev, void *data, 1215 + struct drm_file *file_priv); 1216 + void drm_gem_open(struct drm_device *dev, struct drm_file *file_private); 1217 + void drm_gem_release(struct drm_device *dev, struct drm_file *file_private); 1249 1218 1250 1219 extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev); 1251 1220 extern void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev);
+332
include/drm/i915_drm.h
··· 143 143 #define DRM_I915_GET_VBLANK_PIPE 0x0e 144 144 #define DRM_I915_VBLANK_SWAP 0x0f 145 145 #define DRM_I915_HWS_ADDR 0x11 146 + #define DRM_I915_GEM_INIT 0x13 147 + #define DRM_I915_GEM_EXECBUFFER 0x14 148 + #define DRM_I915_GEM_PIN 0x15 149 + #define DRM_I915_GEM_UNPIN 0x16 150 + #define DRM_I915_GEM_BUSY 0x17 151 + #define DRM_I915_GEM_THROTTLE 0x18 152 + #define DRM_I915_GEM_ENTERVT 0x19 153 + #define DRM_I915_GEM_LEAVEVT 0x1a 154 + #define DRM_I915_GEM_CREATE 0x1b 155 + #define DRM_I915_GEM_PREAD 0x1c 156 + #define DRM_I915_GEM_PWRITE 0x1d 157 + #define DRM_I915_GEM_MMAP 0x1e 158 + #define DRM_I915_GEM_SET_DOMAIN 0x1f 159 + #define DRM_I915_GEM_SW_FINISH 0x20 160 + #define DRM_I915_GEM_SET_TILING 0x21 161 + #define DRM_I915_GEM_GET_TILING 0x22 146 162 147 163 #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) 148 164 #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) ··· 176 160 #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 177 161 #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) 178 162 #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) 163 + #define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) 164 + #define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) 165 + #define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) 166 + #define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE) 167 + #define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT) 168 + #define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT) 169 + #define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create) 170 + #define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread) 171 + #define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite) 172 + #define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap) 173 + #define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain) 174 + #define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish) 175 + #define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling) 176 + #define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) 179 177 180 178 /* Allow drivers to submit batchbuffers directly to hardware, relying 181 179 * on the security mechanisms provided by hardware. ··· 230 200 #define I915_PARAM_IRQ_ACTIVE 1 231 201 #define I915_PARAM_ALLOW_BATCHBUFFER 2 232 202 #define I915_PARAM_LAST_DISPATCH 3 203 + #define I915_PARAM_HAS_GEM 5 233 204 234 205 typedef struct drm_i915_getparam { 235 206 int param; ··· 297 266 typedef struct drm_i915_hws_addr { 298 267 uint64_t addr; 299 268 } drm_i915_hws_addr_t; 269 + 270 + struct drm_i915_gem_init { 271 + /** 272 + * Beginning offset in the GTT to be managed by the DRM memory 273 + * manager. 274 + */ 275 + uint64_t gtt_start; 276 + /** 277 + * Ending offset in the GTT to be managed by the DRM memory 278 + * manager. 279 + */ 280 + uint64_t gtt_end; 281 + }; 282 + 283 + struct drm_i915_gem_create { 284 + /** 285 + * Requested size for the object. 286 + * 287 + * The (page-aligned) allocated size for the object will be returned. 288 + */ 289 + uint64_t size; 290 + /** 291 + * Returned handle for the object. 292 + * 293 + * Object handles are nonzero. 294 + */ 295 + uint32_t handle; 296 + uint32_t pad; 297 + }; 298 + 299 + struct drm_i915_gem_pread { 300 + /** Handle for the object being read. */ 301 + uint32_t handle; 302 + uint32_t pad; 303 + /** Offset into the object to read from */ 304 + uint64_t offset; 305 + /** Length of data to read */ 306 + uint64_t size; 307 + /** 308 + * Pointer to write the data into. 309 + * 310 + * This is a fixed-size type for 32/64 compatibility. 311 + */ 312 + uint64_t data_ptr; 313 + }; 314 + 315 + struct drm_i915_gem_pwrite { 316 + /** Handle for the object being written to. */ 317 + uint32_t handle; 318 + uint32_t pad; 319 + /** Offset into the object to write to */ 320 + uint64_t offset; 321 + /** Length of data to write */ 322 + uint64_t size; 323 + /** 324 + * Pointer to read the data from. 325 + * 326 + * This is a fixed-size type for 32/64 compatibility. 327 + */ 328 + uint64_t data_ptr; 329 + }; 330 + 331 + struct drm_i915_gem_mmap { 332 + /** Handle for the object being mapped. */ 333 + uint32_t handle; 334 + uint32_t pad; 335 + /** Offset in the object to map. */ 336 + uint64_t offset; 337 + /** 338 + * Length of data to map. 339 + * 340 + * The value will be page-aligned. 341 + */ 342 + uint64_t size; 343 + /** 344 + * Returned pointer the data was mapped at. 345 + * 346 + * This is a fixed-size type for 32/64 compatibility. 347 + */ 348 + uint64_t addr_ptr; 349 + }; 350 + 351 + struct drm_i915_gem_set_domain { 352 + /** Handle for the object */ 353 + uint32_t handle; 354 + 355 + /** New read domains */ 356 + uint32_t read_domains; 357 + 358 + /** New write domain */ 359 + uint32_t write_domain; 360 + }; 361 + 362 + struct drm_i915_gem_sw_finish { 363 + /** Handle for the object */ 364 + uint32_t handle; 365 + }; 366 + 367 + struct drm_i915_gem_relocation_entry { 368 + /** 369 + * Handle of the buffer being pointed to by this relocation entry. 370 + * 371 + * It's appealing to make this be an index into the mm_validate_entry 372 + * list to refer to the buffer, but this allows the driver to create 373 + * a relocation list for state buffers and not re-write it per 374 + * exec using the buffer. 375 + */ 376 + uint32_t target_handle; 377 + 378 + /** 379 + * Value to be added to the offset of the target buffer to make up 380 + * the relocation entry. 381 + */ 382 + uint32_t delta; 383 + 384 + /** Offset in the buffer the relocation entry will be written into */ 385 + uint64_t offset; 386 + 387 + /** 388 + * Offset value of the target buffer that the relocation entry was last 389 + * written as. 390 + * 391 + * If the buffer has the same offset as last time, we can skip syncing 392 + * and writing the relocation. This value is written back out by 393 + * the execbuffer ioctl when the relocation is written. 394 + */ 395 + uint64_t presumed_offset; 396 + 397 + /** 398 + * Target memory domains read by this operation. 399 + */ 400 + uint32_t read_domains; 401 + 402 + /** 403 + * Target memory domains written by this operation. 404 + * 405 + * Note that only one domain may be written by the whole 406 + * execbuffer operation, so that where there are conflicts, 407 + * the application will get -EINVAL back. 408 + */ 409 + uint32_t write_domain; 410 + }; 411 + 412 + /** @{ 413 + * Intel memory domains 414 + * 415 + * Most of these just align with the various caches in 416 + * the system and are used to flush and invalidate as 417 + * objects end up cached in different domains. 418 + */ 419 + /** CPU cache */ 420 + #define I915_GEM_DOMAIN_CPU 0x00000001 421 + /** Render cache, used by 2D and 3D drawing */ 422 + #define I915_GEM_DOMAIN_RENDER 0x00000002 423 + /** Sampler cache, used by texture engine */ 424 + #define I915_GEM_DOMAIN_SAMPLER 0x00000004 425 + /** Command queue, used to load batch buffers */ 426 + #define I915_GEM_DOMAIN_COMMAND 0x00000008 427 + /** Instruction cache, used by shader programs */ 428 + #define I915_GEM_DOMAIN_INSTRUCTION 0x00000010 429 + /** Vertex address cache */ 430 + #define I915_GEM_DOMAIN_VERTEX 0x00000020 431 + /** GTT domain - aperture and scanout */ 432 + #define I915_GEM_DOMAIN_GTT 0x00000040 433 + /** @} */ 434 + 435 + struct drm_i915_gem_exec_object { 436 + /** 437 + * User's handle for a buffer to be bound into the GTT for this 438 + * operation. 439 + */ 440 + uint32_t handle; 441 + 442 + /** Number of relocations to be performed on this buffer */ 443 + uint32_t relocation_count; 444 + /** 445 + * Pointer to array of struct drm_i915_gem_relocation_entry containing 446 + * the relocations to be performed in this buffer. 447 + */ 448 + uint64_t relocs_ptr; 449 + 450 + /** Required alignment in graphics aperture */ 451 + uint64_t alignment; 452 + 453 + /** 454 + * Returned value of the updated offset of the object, for future 455 + * presumed_offset writes. 456 + */ 457 + uint64_t offset; 458 + }; 459 + 460 + struct drm_i915_gem_execbuffer { 461 + /** 462 + * List of buffers to be validated with their relocations to be 463 + * performend on them. 464 + * 465 + * This is a pointer to an array of struct drm_i915_gem_validate_entry. 466 + * 467 + * These buffers must be listed in an order such that all relocations 468 + * a buffer is performing refer to buffers that have already appeared 469 + * in the validate list. 470 + */ 471 + uint64_t buffers_ptr; 472 + uint32_t buffer_count; 473 + 474 + /** Offset in the batchbuffer to start execution from. */ 475 + uint32_t batch_start_offset; 476 + /** Bytes used in batchbuffer from batch_start_offset */ 477 + uint32_t batch_len; 478 + uint32_t DR1; 479 + uint32_t DR4; 480 + uint32_t num_cliprects; 481 + /** This is a struct drm_clip_rect *cliprects */ 482 + uint64_t cliprects_ptr; 483 + }; 484 + 485 + struct drm_i915_gem_pin { 486 + /** Handle of the buffer to be pinned. */ 487 + uint32_t handle; 488 + uint32_t pad; 489 + 490 + /** alignment required within the aperture */ 491 + uint64_t alignment; 492 + 493 + /** Returned GTT offset of the buffer. */ 494 + uint64_t offset; 495 + }; 496 + 497 + struct drm_i915_gem_unpin { 498 + /** Handle of the buffer to be unpinned. */ 499 + uint32_t handle; 500 + uint32_t pad; 501 + }; 502 + 503 + struct drm_i915_gem_busy { 504 + /** Handle of the buffer to check for busy */ 505 + uint32_t handle; 506 + 507 + /** Return busy status (1 if busy, 0 if idle) */ 508 + uint32_t busy; 509 + }; 510 + 511 + #define I915_TILING_NONE 0 512 + #define I915_TILING_X 1 513 + #define I915_TILING_Y 2 514 + 515 + #define I915_BIT_6_SWIZZLE_NONE 0 516 + #define I915_BIT_6_SWIZZLE_9 1 517 + #define I915_BIT_6_SWIZZLE_9_10 2 518 + #define I915_BIT_6_SWIZZLE_9_11 3 519 + #define I915_BIT_6_SWIZZLE_9_10_11 4 520 + /* Not seen by userland */ 521 + #define I915_BIT_6_SWIZZLE_UNKNOWN 5 522 + 523 + struct drm_i915_gem_set_tiling { 524 + /** Handle of the buffer to have its tiling state updated */ 525 + uint32_t handle; 526 + 527 + /** 528 + * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X, 529 + * I915_TILING_Y). 530 + * 531 + * This value is to be set on request, and will be updated by the 532 + * kernel on successful return with the actual chosen tiling layout. 533 + * 534 + * The tiling mode may be demoted to I915_TILING_NONE when the system 535 + * has bit 6 swizzling that can't be managed correctly by GEM. 536 + * 537 + * Buffer contents become undefined when changing tiling_mode. 538 + */ 539 + uint32_t tiling_mode; 540 + 541 + /** 542 + * Stride in bytes for the object when in I915_TILING_X or 543 + * I915_TILING_Y. 544 + */ 545 + uint32_t stride; 546 + 547 + /** 548 + * Returned address bit 6 swizzling required for CPU access through 549 + * mmap mapping. 550 + */ 551 + uint32_t swizzle_mode; 552 + }; 553 + 554 + struct drm_i915_gem_get_tiling { 555 + /** Handle of the buffer to get tiling state for. */ 556 + uint32_t handle; 557 + 558 + /** 559 + * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X, 560 + * I915_TILING_Y). 561 + */ 562 + uint32_t tiling_mode; 563 + 564 + /** 565 + * Returned address bit 6 swizzling required for CPU access through 566 + * mmap mapping. 567 + */ 568 + uint32_t swizzle_mode; 569 + }; 300 570 301 571 #endif /* _I915_DRM_H_ */