Merge branch 'drm-gem-update' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-gem-update' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6:
drm/i915: Return error in i915_gem_set_to_gtt_domain if we're not in the GTT.
drm/i915: Retry execbuffer pinning after clearing the GTT
drm/i915: Move the execbuffer domain computations together
drm/i915: Rename object_set_domain to object_set_to_gpu_domain
drm/i915: Make a single set-to-cpu-domain path and use it wherever needed.
drm/i915: Make a single set-to-gtt-domain path.
drm/i915: If interrupted while setting object domains, still emit the flush.
drm/i915: Move flushing list cleanup from flush request retire to request emit.
drm/i915: Respect GM965/GM45 bit-17-instead-of-bit-11 option for swizzling.

+422 -247
+10 -5
drivers/gpu/drm/i915/i915_drv.h
··· 244 * List of objects currently involved in rendering from the 245 * ringbuffer. 246 * 247 * A reference is held on the buffer while on this list. 248 */ 249 struct list_head active_list; ··· 257 * still have a write_domain which needs to be flushed before 258 * unbinding. 259 * 260 * A reference is held on the buffer while on this list. 261 */ 262 struct list_head flushing_list; ··· 266 /** 267 * LRU list of objects which are not in the ringbuffer and 268 * are ready to unbind, but are still in the GTT. 269 * 270 * A reference is not held on the buffer while on this list, 271 * as merely being GTT-bound shouldn't prevent its being ··· 379 uint32_t agp_type; 380 381 /** 382 - * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when 383 - * GEM_DOMAIN_CPU is not in the object's read domain. 384 */ 385 uint8_t *page_cpu_valid; 386 }; ··· 401 402 /** Time at which this request was emitted, in jiffies. */ 403 unsigned long emitted_jiffies; 404 - 405 - /** Cache domains that were flushed at the start of the request. */ 406 - uint32_t flush_domains; 407 408 struct list_head list; 409 };
··· 244 * List of objects currently involved in rendering from the 245 * ringbuffer. 246 * 247 + * Includes buffers having the contents of their GPU caches 248 + * flushed, not necessarily primitives. last_rendering_seqno 249 + * represents when the rendering involved will be completed. 250 + * 251 * A reference is held on the buffer while on this list. 252 */ 253 struct list_head active_list; ··· 253 * still have a write_domain which needs to be flushed before 254 * unbinding. 255 * 256 + * last_rendering_seqno is 0 while an object is in this list. 257 + * 258 * A reference is held on the buffer while on this list. 259 */ 260 struct list_head flushing_list; ··· 260 /** 261 * LRU list of objects which are not in the ringbuffer and 262 * are ready to unbind, but are still in the GTT. 263 + * 264 + * last_rendering_seqno is 0 while an object is in this list. 265 * 266 * A reference is not held on the buffer while on this list, 267 * as merely being GTT-bound shouldn't prevent its being ··· 371 uint32_t agp_type; 372 373 /** 374 + * If present, while GEM_DOMAIN_CPU is in the read domain this array 375 + * flags which individual pages are valid. 376 */ 377 uint8_t *page_cpu_valid; 378 }; ··· 393 394 /** Time at which this request was emitted, in jiffies. */ 395 unsigned long emitted_jiffies; 396 397 struct list_head list; 398 };
+405 -236
drivers/gpu/drm/i915/i915_gem.c
··· 33 34 #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) 35 36 - static int 37 - i915_gem_object_set_domain(struct drm_gem_object *obj, 38 - uint32_t read_domains, 39 - uint32_t write_domain); 40 - static int 41 - i915_gem_object_set_domain_range(struct drm_gem_object *obj, 42 - uint64_t offset, 43 - uint64_t size, 44 - uint32_t read_domains, 45 - uint32_t write_domain); 46 - static int 47 - i915_gem_set_domain(struct drm_gem_object *obj, 48 - struct drm_file *file_priv, 49 - uint32_t read_domains, 50 - uint32_t write_domain); 51 static int i915_gem_object_get_page_list(struct drm_gem_object *obj); 52 static void i915_gem_object_free_page_list(struct drm_gem_object *obj); 53 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); ··· 162 163 mutex_lock(&dev->struct_mutex); 164 165 - ret = i915_gem_object_set_domain_range(obj, args->offset, args->size, 166 - I915_GEM_DOMAIN_CPU, 0); 167 if (ret != 0) { 168 drm_gem_object_unreference(obj); 169 mutex_unlock(&dev->struct_mutex); ··· 260 mutex_unlock(&dev->struct_mutex); 261 return ret; 262 } 263 - ret = i915_gem_set_domain(obj, file_priv, 264 - I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT); 265 if (ret) 266 goto fail; 267 ··· 319 320 mutex_lock(&dev->struct_mutex); 321 322 - ret = i915_gem_set_domain(obj, file_priv, 323 - I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU); 324 if (ret) { 325 mutex_unlock(&dev->struct_mutex); 326 return ret; ··· 395 } 396 397 /** 398 - * Called when user space prepares to use an object 399 */ 400 int 401 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, ··· 404 { 405 struct drm_i915_gem_set_domain *args = data; 406 struct drm_gem_object *obj; 407 int ret; 408 409 if (!(dev->driver->driver_features & DRIVER_GEM)) 410 return -ENODEV; 411 412 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 413 if (obj == NULL) ··· 431 mutex_lock(&dev->struct_mutex); 432 #if WATCH_BUF 433 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n", 434 - obj, obj->size, args->read_domains, args->write_domain); 435 #endif 436 - ret = i915_gem_set_domain(obj, file_priv, 437 - args->read_domains, args->write_domain); 438 drm_gem_object_unreference(obj); 439 mutex_unlock(&dev->struct_mutex); 440 return ret; ··· 480 obj_priv = obj->driver_private; 481 482 /* Pinned buffers may be scanout, so flush the cache */ 483 - if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) { 484 - i915_gem_clflush_object(obj); 485 - drm_agp_chipset_flush(dev); 486 - } 487 drm_gem_object_unreference(obj); 488 mutex_unlock(&dev->struct_mutex); 489 return ret; ··· 556 } 557 558 static void 559 - i915_gem_object_move_to_active(struct drm_gem_object *obj) 560 { 561 struct drm_device *dev = obj->dev; 562 drm_i915_private_t *dev_priv = dev->dev_private; ··· 570 /* Move from whatever list we were on to the tail of execution. */ 571 list_move_tail(&obj_priv->list, 572 &dev_priv->mm.active_list); 573 } 574 575 576 static void 577 i915_gem_object_move_to_inactive(struct drm_gem_object *obj) ··· 598 else 599 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); 600 601 if (obj_priv->active) { 602 obj_priv->active = 0; 603 drm_gem_object_unreference(obj); ··· 647 648 request->seqno = seqno; 649 request->emitted_jiffies = jiffies; 650 - request->flush_domains = flush_domains; 651 was_empty = list_empty(&dev_priv->mm.request_list); 652 list_add_tail(&request->list, &dev_priv->mm.request_list); 653 654 if (was_empty && !dev_priv->mm.suspended) 655 schedule_delayed_work(&dev_priv->mm.retire_work, HZ); ··· 731 __func__, request->seqno, obj); 732 #endif 733 734 - if (obj->write_domain != 0) { 735 - list_move_tail(&obj_priv->list, 736 - &dev_priv->mm.flushing_list); 737 - } else { 738 i915_gem_object_move_to_inactive(obj); 739 - } 740 - } 741 - 742 - if (request->flush_domains != 0) { 743 - struct drm_i915_gem_object *obj_priv, *next; 744 - 745 - /* Clear the write domain and activity from any buffers 746 - * that are just waiting for a flush matching the one retired. 747 - */ 748 - list_for_each_entry_safe(obj_priv, next, 749 - &dev_priv->mm.flushing_list, list) { 750 - struct drm_gem_object *obj = obj_priv->obj; 751 - 752 - if (obj->write_domain & request->flush_domains) { 753 - obj->write_domain = 0; 754 - i915_gem_object_move_to_inactive(obj); 755 - } 756 - } 757 - 758 } 759 } 760 ··· 927 struct drm_i915_gem_object *obj_priv = obj->driver_private; 928 int ret; 929 930 - /* If there are writes queued to the buffer, flush and 931 - * create a new seqno to wait for. 932 */ 933 - if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) { 934 - uint32_t write_domain = obj->write_domain; 935 - #if WATCH_BUF 936 - DRM_INFO("%s: flushing object %p from write domain %08x\n", 937 - __func__, obj, write_domain); 938 - #endif 939 - i915_gem_flush(dev, 0, write_domain); 940 - 941 - i915_gem_object_move_to_active(obj); 942 - obj_priv->last_rendering_seqno = i915_add_request(dev, 943 - write_domain); 944 - BUG_ON(obj_priv->last_rendering_seqno == 0); 945 - #if WATCH_LRU 946 - DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj); 947 - #endif 948 - } 949 950 /* If there is rendering queued on the buffer being evicted, wait for 951 * it. ··· 970 return -EINVAL; 971 } 972 973 - /* Wait for any rendering to complete 974 - */ 975 - ret = i915_gem_object_wait_rendering(obj); 976 - if (ret) { 977 - DRM_ERROR("wait_rendering failed: %d\n", ret); 978 - return ret; 979 - } 980 - 981 /* Move the object to the CPU domain to ensure that 982 * any possible CPU writes while it's not in the GTT 983 * are flushed when we go to remap it. This will 984 * also ensure that all pending GPU writes are finished 985 * before we unbind. 986 */ 987 - ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU, 988 - I915_GEM_DOMAIN_CPU); 989 if (ret) { 990 - DRM_ERROR("set_domain failed: %d\n", ret); 991 return ret; 992 } 993 ··· 1095 } 1096 1097 static int 1098 i915_gem_object_get_page_list(struct drm_gem_object *obj) 1099 { 1100 struct drm_i915_gem_object *obj_priv = obj->driver_private; ··· 1193 1194 ret = i915_gem_evict_something(dev); 1195 if (ret != 0) { 1196 - DRM_ERROR("Failed to evict a buffer %d\n", ret); 1197 return ret; 1198 } 1199 goto search_free; ··· 1252 return; 1253 1254 drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE); 1255 } 1256 1257 /* ··· 1502 * MI_FLUSH 1503 * drm_agp_chipset_flush 1504 */ 1505 - static int 1506 - i915_gem_object_set_domain(struct drm_gem_object *obj, 1507 - uint32_t read_domains, 1508 - uint32_t write_domain) 1509 { 1510 struct drm_device *dev = obj->dev; 1511 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1512 uint32_t invalidate_domains = 0; 1513 uint32_t flush_domains = 0; 1514 - int ret; 1515 1516 #if WATCH_BUF 1517 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", ··· 1550 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", 1551 __func__, flush_domains, invalidate_domains); 1552 #endif 1553 - /* 1554 - * If we're invaliding the CPU cache and flushing a GPU cache, 1555 - * then pause for rendering so that the GPU caches will be 1556 - * flushed before the cpu cache is invalidated 1557 - */ 1558 - if ((invalidate_domains & I915_GEM_DOMAIN_CPU) && 1559 - (flush_domains & ~(I915_GEM_DOMAIN_CPU | 1560 - I915_GEM_DOMAIN_GTT))) { 1561 - ret = i915_gem_object_wait_rendering(obj); 1562 - if (ret) 1563 - return ret; 1564 - } 1565 i915_gem_clflush_object(obj); 1566 } 1567 1568 if ((write_domain | flush_domains) != 0) 1569 obj->write_domain = write_domain; 1570 - 1571 - /* If we're invalidating the CPU domain, clear the per-page CPU 1572 - * domain list as well. 1573 - */ 1574 - if (obj_priv->page_cpu_valid != NULL && 1575 - (write_domain != 0 || 1576 - read_domains & I915_GEM_DOMAIN_CPU)) { 1577 - drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE, 1578 - DRM_MEM_DRIVER); 1579 - obj_priv->page_cpu_valid = NULL; 1580 - } 1581 obj->read_domains = read_domains; 1582 1583 dev->invalidate_domains |= invalidate_domains; ··· 1565 obj->read_domains, obj->write_domain, 1566 dev->invalidate_domains, dev->flush_domains); 1567 #endif 1568 - return 0; 1569 } 1570 1571 /** 1572 - * Set the read/write domain on a range of the object. 1573 * 1574 - * Currently only implemented for CPU reads, otherwise drops to normal 1575 - * i915_gem_object_set_domain(). 1576 */ 1577 static int 1578 - i915_gem_object_set_domain_range(struct drm_gem_object *obj, 1579 - uint64_t offset, 1580 - uint64_t size, 1581 - uint32_t read_domains, 1582 - uint32_t write_domain) 1583 { 1584 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1585 - int ret, i; 1586 1587 - if (obj->read_domains & I915_GEM_DOMAIN_CPU) 1588 return 0; 1589 1590 - if (read_domains != I915_GEM_DOMAIN_CPU || 1591 - write_domain != 0) 1592 - return i915_gem_object_set_domain(obj, 1593 - read_domains, write_domain); 1594 - 1595 - /* Wait on any GPU rendering to the object to be flushed. */ 1596 - ret = i915_gem_object_wait_rendering(obj); 1597 - if (ret) 1598 - return ret; 1599 - 1600 if (obj_priv->page_cpu_valid == NULL) { 1601 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE, 1602 DRM_MEM_DRIVER); 1603 - } 1604 1605 /* Flush the cache on any pages that are still invalid from the CPU's 1606 * perspective. 1607 */ 1608 - for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) { 1609 if (obj_priv->page_cpu_valid[i]) 1610 continue; 1611 ··· 1661 obj_priv->page_cpu_valid[i] = 1; 1662 } 1663 1664 - return 0; 1665 - } 1666 - 1667 - /** 1668 - * Once all of the objects have been set in the proper domain, 1669 - * perform the necessary flush and invalidate operations. 1670 - * 1671 - * Returns the write domains flushed, for use in flush tracking. 1672 - */ 1673 - static uint32_t 1674 - i915_gem_dev_set_domain(struct drm_device *dev) 1675 - { 1676 - uint32_t flush_domains = dev->flush_domains; 1677 - 1678 - /* 1679 - * Now that all the buffers are synced to the proper domains, 1680 - * flush and invalidate the collected domains 1681 */ 1682 - if (dev->invalidate_domains | dev->flush_domains) { 1683 - #if WATCH_EXEC 1684 - DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", 1685 - __func__, 1686 - dev->invalidate_domains, 1687 - dev->flush_domains); 1688 - #endif 1689 - i915_gem_flush(dev, 1690 - dev->invalidate_domains, 1691 - dev->flush_domains); 1692 - dev->invalidate_domains = 0; 1693 - dev->flush_domains = 0; 1694 - } 1695 1696 - return flush_domains; 1697 } 1698 1699 /** ··· 1749 return -EINVAL; 1750 } 1751 1752 if (reloc.write_domain && target_obj->pending_write_domain && 1753 reloc.write_domain != target_obj->pending_write_domain) { 1754 DRM_ERROR("Write domain conflict: " ··· 1801 continue; 1802 } 1803 1804 - /* Now that we're going to actually write some data in, 1805 - * make sure that any rendering using this buffer's contents 1806 - * is completed. 1807 - */ 1808 - i915_gem_object_wait_rendering(obj); 1809 - 1810 - /* As we're writing through the gtt, flush 1811 - * any CPU writes before we write the relocations 1812 - */ 1813 - if (obj->write_domain & I915_GEM_DOMAIN_CPU) { 1814 - i915_gem_clflush_object(obj); 1815 - drm_agp_chipset_flush(dev); 1816 - obj->write_domain = 0; 1817 } 1818 1819 /* Map the page containing the relocation we're going to ··· 1947 int ret, i, pinned = 0; 1948 uint64_t exec_offset; 1949 uint32_t seqno, flush_domains; 1950 1951 #if WATCH_EXEC 1952 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", ··· 1996 return -EBUSY; 1997 } 1998 1999 - /* Zero the gloabl flush/invalidate flags. These 2000 - * will be modified as each object is bound to the 2001 - * gtt 2002 - */ 2003 - dev->invalidate_domains = 0; 2004 - dev->flush_domains = 0; 2005 - 2006 - /* Look up object handles and perform the relocations */ 2007 for (i = 0; i < args->buffer_count; i++) { 2008 object_list[i] = drm_gem_object_lookup(dev, file_priv, 2009 exec_list[i].handle); ··· 2006 ret = -EBADF; 2007 goto err; 2008 } 2009 2010 - object_list[i]->pending_read_domains = 0; 2011 - object_list[i]->pending_write_domain = 0; 2012 - ret = i915_gem_object_pin_and_relocate(object_list[i], 2013 - file_priv, 2014 - &exec_list[i]); 2015 - if (ret) { 2016 - DRM_ERROR("object bind and relocate failed %d\n", ret); 2017 goto err; 2018 } 2019 - pinned = i + 1; 2020 } 2021 2022 /* Set the pending read domains for the batch buffer to COMMAND */ ··· 2048 2049 i915_verify_inactive(dev, __FILE__, __LINE__); 2050 2051 for (i = 0; i < args->buffer_count; i++) { 2052 struct drm_gem_object *obj = object_list[i]; 2053 2054 - /* make sure all previous memory operations have passed */ 2055 - ret = i915_gem_object_set_domain(obj, 2056 - obj->pending_read_domains, 2057 - obj->pending_write_domain); 2058 - if (ret) 2059 - goto err; 2060 } 2061 2062 i915_verify_inactive(dev, __FILE__, __LINE__); 2063 2064 - /* Flush/invalidate caches and chipset buffer */ 2065 - flush_domains = i915_gem_dev_set_domain(dev); 2066 2067 i915_verify_inactive(dev, __FILE__, __LINE__); 2068 ··· 2097 __func__, 2098 ~0); 2099 #endif 2100 - 2101 - (void)i915_add_request(dev, flush_domains); 2102 2103 /* Exec the batchbuffer */ 2104 ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset); ··· 2125 i915_file_priv->mm.last_gem_seqno = seqno; 2126 for (i = 0; i < args->buffer_count; i++) { 2127 struct drm_gem_object *obj = object_list[i]; 2128 - struct drm_i915_gem_object *obj_priv = obj->driver_private; 2129 2130 - i915_gem_object_move_to_active(obj); 2131 - obj_priv->last_rendering_seqno = seqno; 2132 #if WATCH_LRU 2133 DRM_INFO("%s: move to exec list %p\n", __func__, obj); 2134 #endif ··· 2257 /* XXX - flush the CPU caches for pinned objects 2258 * as the X server doesn't manage domains yet 2259 */ 2260 - if (obj->write_domain & I915_GEM_DOMAIN_CPU) { 2261 - i915_gem_clflush_object(obj); 2262 - drm_agp_chipset_flush(dev); 2263 - obj->write_domain = 0; 2264 - } 2265 args->offset = obj_priv->gtt_offset; 2266 drm_gem_object_unreference(obj); 2267 mutex_unlock(&dev->struct_mutex); ··· 2357 2358 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); 2359 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); 2360 - } 2361 - 2362 - static int 2363 - i915_gem_set_domain(struct drm_gem_object *obj, 2364 - struct drm_file *file_priv, 2365 - uint32_t read_domains, 2366 - uint32_t write_domain) 2367 - { 2368 - struct drm_device *dev = obj->dev; 2369 - int ret; 2370 - uint32_t flush_domains; 2371 - 2372 - BUG_ON(!mutex_is_locked(&dev->struct_mutex)); 2373 - 2374 - ret = i915_gem_object_set_domain(obj, read_domains, write_domain); 2375 - if (ret) 2376 - return ret; 2377 - flush_domains = i915_gem_dev_set_domain(obj->dev); 2378 - 2379 - if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) 2380 - (void) i915_add_request(dev, flush_domains); 2381 - 2382 - return 0; 2383 } 2384 2385 /** Unbinds all objects that are on the given buffer list. */
··· 33 34 #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) 35 36 + static void 37 + i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, 38 + uint32_t read_domains, 39 + uint32_t write_domain); 40 + static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); 41 + static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); 42 + static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); 43 + static int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, 44 + int write); 45 + static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, 46 + int write); 47 + static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, 48 + uint64_t offset, 49 + uint64_t size); 50 + static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj); 51 static int i915_gem_object_get_page_list(struct drm_gem_object *obj); 52 static void i915_gem_object_free_page_list(struct drm_gem_object *obj); 53 static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); ··· 162 163 mutex_lock(&dev->struct_mutex); 164 165 + ret = i915_gem_object_set_cpu_read_domain_range(obj, args->offset, 166 + args->size); 167 if (ret != 0) { 168 drm_gem_object_unreference(obj); 169 mutex_unlock(&dev->struct_mutex); ··· 260 mutex_unlock(&dev->struct_mutex); 261 return ret; 262 } 263 + ret = i915_gem_object_set_to_gtt_domain(obj, 1); 264 if (ret) 265 goto fail; 266 ··· 320 321 mutex_lock(&dev->struct_mutex); 322 323 + ret = i915_gem_object_set_to_cpu_domain(obj, 1); 324 if (ret) { 325 mutex_unlock(&dev->struct_mutex); 326 return ret; ··· 397 } 398 399 /** 400 + * Called when user space prepares to use an object with the CPU, either 401 + * through the mmap ioctl's mapping or a GTT mapping. 402 */ 403 int 404 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, ··· 405 { 406 struct drm_i915_gem_set_domain *args = data; 407 struct drm_gem_object *obj; 408 + uint32_t read_domains = args->read_domains; 409 + uint32_t write_domain = args->write_domain; 410 int ret; 411 412 if (!(dev->driver->driver_features & DRIVER_GEM)) 413 return -ENODEV; 414 + 415 + /* Only handle setting domains to types used by the CPU. */ 416 + if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) 417 + return -EINVAL; 418 + 419 + if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) 420 + return -EINVAL; 421 + 422 + /* Having something in the write domain implies it's in the read 423 + * domain, and only that read domain. Enforce that in the request. 424 + */ 425 + if (write_domain != 0 && read_domains != write_domain) 426 + return -EINVAL; 427 428 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 429 if (obj == NULL) ··· 417 mutex_lock(&dev->struct_mutex); 418 #if WATCH_BUF 419 DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n", 420 + obj, obj->size, read_domains, write_domain); 421 #endif 422 + if (read_domains & I915_GEM_DOMAIN_GTT) { 423 + ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0); 424 + 425 + /* Silently promote "you're not bound, there was nothing to do" 426 + * to success, since the client was just asking us to 427 + * make sure everything was done. 428 + */ 429 + if (ret == -EINVAL) 430 + ret = 0; 431 + } else { 432 + ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0); 433 + } 434 + 435 drm_gem_object_unreference(obj); 436 mutex_unlock(&dev->struct_mutex); 437 return ret; ··· 455 obj_priv = obj->driver_private; 456 457 /* Pinned buffers may be scanout, so flush the cache */ 458 + if (obj_priv->pin_count) 459 + i915_gem_object_flush_cpu_write_domain(obj); 460 + 461 drm_gem_object_unreference(obj); 462 mutex_unlock(&dev->struct_mutex); 463 return ret; ··· 532 } 533 534 static void 535 + i915_gem_object_move_to_active(struct drm_gem_object *obj, uint32_t seqno) 536 { 537 struct drm_device *dev = obj->dev; 538 drm_i915_private_t *dev_priv = dev->dev_private; ··· 546 /* Move from whatever list we were on to the tail of execution. */ 547 list_move_tail(&obj_priv->list, 548 &dev_priv->mm.active_list); 549 + obj_priv->last_rendering_seqno = seqno; 550 } 551 552 + static void 553 + i915_gem_object_move_to_flushing(struct drm_gem_object *obj) 554 + { 555 + struct drm_device *dev = obj->dev; 556 + drm_i915_private_t *dev_priv = dev->dev_private; 557 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 558 + 559 + BUG_ON(!obj_priv->active); 560 + list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list); 561 + obj_priv->last_rendering_seqno = 0; 562 + } 563 564 static void 565 i915_gem_object_move_to_inactive(struct drm_gem_object *obj) ··· 562 else 563 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); 564 565 + obj_priv->last_rendering_seqno = 0; 566 if (obj_priv->active) { 567 obj_priv->active = 0; 568 drm_gem_object_unreference(obj); ··· 610 611 request->seqno = seqno; 612 request->emitted_jiffies = jiffies; 613 was_empty = list_empty(&dev_priv->mm.request_list); 614 list_add_tail(&request->list, &dev_priv->mm.request_list); 615 + 616 + /* Associate any objects on the flushing list matching the write 617 + * domain we're flushing with our flush. 618 + */ 619 + if (flush_domains != 0) { 620 + struct drm_i915_gem_object *obj_priv, *next; 621 + 622 + list_for_each_entry_safe(obj_priv, next, 623 + &dev_priv->mm.flushing_list, list) { 624 + struct drm_gem_object *obj = obj_priv->obj; 625 + 626 + if ((obj->write_domain & flush_domains) == 627 + obj->write_domain) { 628 + obj->write_domain = 0; 629 + i915_gem_object_move_to_active(obj, seqno); 630 + } 631 + } 632 + 633 + } 634 635 if (was_empty && !dev_priv->mm.suspended) 636 schedule_delayed_work(&dev_priv->mm.retire_work, HZ); ··· 676 __func__, request->seqno, obj); 677 #endif 678 679 + if (obj->write_domain != 0) 680 + i915_gem_object_move_to_flushing(obj); 681 + else 682 i915_gem_object_move_to_inactive(obj); 683 } 684 } 685 ··· 892 struct drm_i915_gem_object *obj_priv = obj->driver_private; 893 int ret; 894 895 + /* This function only exists to support waiting for existing rendering, 896 + * not for emitting required flushes. 897 */ 898 + BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0); 899 900 /* If there is rendering queued on the buffer being evicted, wait for 901 * it. ··· 950 return -EINVAL; 951 } 952 953 /* Move the object to the CPU domain to ensure that 954 * any possible CPU writes while it's not in the GTT 955 * are flushed when we go to remap it. This will 956 * also ensure that all pending GPU writes are finished 957 * before we unbind. 958 */ 959 + ret = i915_gem_object_set_to_cpu_domain(obj, 1); 960 if (ret) { 961 + if (ret != -ERESTARTSYS) 962 + DRM_ERROR("set_domain failed: %d\n", ret); 963 return ret; 964 } 965 ··· 1083 } 1084 1085 static int 1086 + i915_gem_evict_everything(struct drm_device *dev) 1087 + { 1088 + int ret; 1089 + 1090 + for (;;) { 1091 + ret = i915_gem_evict_something(dev); 1092 + if (ret != 0) 1093 + break; 1094 + } 1095 + return ret; 1096 + } 1097 + 1098 + static int 1099 i915_gem_object_get_page_list(struct drm_gem_object *obj) 1100 { 1101 struct drm_i915_gem_object *obj_priv = obj->driver_private; ··· 1168 1169 ret = i915_gem_evict_something(dev); 1170 if (ret != 0) { 1171 + if (ret != -ERESTARTSYS) 1172 + DRM_ERROR("Failed to evict a buffer %d\n", ret); 1173 return ret; 1174 } 1175 goto search_free; ··· 1226 return; 1227 1228 drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE); 1229 + } 1230 + 1231 + /** Flushes any GPU write domain for the object if it's dirty. */ 1232 + static void 1233 + i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) 1234 + { 1235 + struct drm_device *dev = obj->dev; 1236 + uint32_t seqno; 1237 + 1238 + if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0) 1239 + return; 1240 + 1241 + /* Queue the GPU write cache flushing we need. */ 1242 + i915_gem_flush(dev, 0, obj->write_domain); 1243 + seqno = i915_add_request(dev, obj->write_domain); 1244 + obj->write_domain = 0; 1245 + i915_gem_object_move_to_active(obj, seqno); 1246 + } 1247 + 1248 + /** Flushes the GTT write domain for the object if it's dirty. */ 1249 + static void 1250 + i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj) 1251 + { 1252 + if (obj->write_domain != I915_GEM_DOMAIN_GTT) 1253 + return; 1254 + 1255 + /* No actual flushing is required for the GTT write domain. Writes 1256 + * to it immediately go to main memory as far as we know, so there's 1257 + * no chipset flush. It also doesn't land in render cache. 1258 + */ 1259 + obj->write_domain = 0; 1260 + } 1261 + 1262 + /** Flushes the CPU write domain for the object if it's dirty. */ 1263 + static void 1264 + i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj) 1265 + { 1266 + struct drm_device *dev = obj->dev; 1267 + 1268 + if (obj->write_domain != I915_GEM_DOMAIN_CPU) 1269 + return; 1270 + 1271 + i915_gem_clflush_object(obj); 1272 + drm_agp_chipset_flush(dev); 1273 + obj->write_domain = 0; 1274 + } 1275 + 1276 + /** 1277 + * Moves a single object to the GTT read, and possibly write domain. 1278 + * 1279 + * This function returns when the move is complete, including waiting on 1280 + * flushes to occur. 1281 + */ 1282 + static int 1283 + i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) 1284 + { 1285 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 1286 + int ret; 1287 + 1288 + /* Not valid to be called on unbound objects. */ 1289 + if (obj_priv->gtt_space == NULL) 1290 + return -EINVAL; 1291 + 1292 + i915_gem_object_flush_gpu_write_domain(obj); 1293 + /* Wait on any GPU rendering and flushing to occur. */ 1294 + ret = i915_gem_object_wait_rendering(obj); 1295 + if (ret != 0) 1296 + return ret; 1297 + 1298 + /* If we're writing through the GTT domain, then CPU and GPU caches 1299 + * will need to be invalidated at next use. 1300 + */ 1301 + if (write) 1302 + obj->read_domains &= I915_GEM_DOMAIN_GTT; 1303 + 1304 + i915_gem_object_flush_cpu_write_domain(obj); 1305 + 1306 + /* It should now be out of any other write domains, and we can update 1307 + * the domain values for our changes. 1308 + */ 1309 + BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0); 1310 + obj->read_domains |= I915_GEM_DOMAIN_GTT; 1311 + if (write) { 1312 + obj->write_domain = I915_GEM_DOMAIN_GTT; 1313 + obj_priv->dirty = 1; 1314 + } 1315 + 1316 + return 0; 1317 + } 1318 + 1319 + /** 1320 + * Moves a single object to the CPU read, and possibly write domain. 1321 + * 1322 + * This function returns when the move is complete, including waiting on 1323 + * flushes to occur. 1324 + */ 1325 + static int 1326 + i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) 1327 + { 1328 + struct drm_device *dev = obj->dev; 1329 + int ret; 1330 + 1331 + i915_gem_object_flush_gpu_write_domain(obj); 1332 + /* Wait on any GPU rendering and flushing to occur. */ 1333 + ret = i915_gem_object_wait_rendering(obj); 1334 + if (ret != 0) 1335 + return ret; 1336 + 1337 + i915_gem_object_flush_gtt_write_domain(obj); 1338 + 1339 + /* If we have a partially-valid cache of the object in the CPU, 1340 + * finish invalidating it and free the per-page flags. 1341 + */ 1342 + i915_gem_object_set_to_full_cpu_read_domain(obj); 1343 + 1344 + /* Flush the CPU cache if it's still invalid. */ 1345 + if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) { 1346 + i915_gem_clflush_object(obj); 1347 + drm_agp_chipset_flush(dev); 1348 + 1349 + obj->read_domains |= I915_GEM_DOMAIN_CPU; 1350 + } 1351 + 1352 + /* It should now be out of any other write domains, and we can update 1353 + * the domain values for our changes. 1354 + */ 1355 + BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); 1356 + 1357 + /* If we're writing through the CPU, then the GPU read domains will 1358 + * need to be invalidated at next use. 1359 + */ 1360 + if (write) { 1361 + obj->read_domains &= I915_GEM_DOMAIN_CPU; 1362 + obj->write_domain = I915_GEM_DOMAIN_CPU; 1363 + } 1364 + 1365 + return 0; 1366 } 1367 1368 /* ··· 1339 * MI_FLUSH 1340 * drm_agp_chipset_flush 1341 */ 1342 + static void 1343 + i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, 1344 + uint32_t read_domains, 1345 + uint32_t write_domain) 1346 { 1347 struct drm_device *dev = obj->dev; 1348 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1349 uint32_t invalidate_domains = 0; 1350 uint32_t flush_domains = 0; 1351 + 1352 + BUG_ON(read_domains & I915_GEM_DOMAIN_CPU); 1353 + BUG_ON(write_domain == I915_GEM_DOMAIN_CPU); 1354 1355 #if WATCH_BUF 1356 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", ··· 1385 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", 1386 __func__, flush_domains, invalidate_domains); 1387 #endif 1388 i915_gem_clflush_object(obj); 1389 } 1390 1391 if ((write_domain | flush_domains) != 0) 1392 obj->write_domain = write_domain; 1393 obj->read_domains = read_domains; 1394 1395 dev->invalidate_domains |= invalidate_domains; ··· 1423 obj->read_domains, obj->write_domain, 1424 dev->invalidate_domains, dev->flush_domains); 1425 #endif 1426 } 1427 1428 /** 1429 + * Moves the object from a partially CPU read to a full one. 1430 * 1431 + * Note that this only resolves i915_gem_object_set_cpu_read_domain_range(), 1432 + * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU). 1433 + */ 1434 + static void 1435 + i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj) 1436 + { 1437 + struct drm_device *dev = obj->dev; 1438 + struct drm_i915_gem_object *obj_priv = obj->driver_private; 1439 + 1440 + if (!obj_priv->page_cpu_valid) 1441 + return; 1442 + 1443 + /* If we're partially in the CPU read domain, finish moving it in. 1444 + */ 1445 + if (obj->read_domains & I915_GEM_DOMAIN_CPU) { 1446 + int i; 1447 + 1448 + for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) { 1449 + if (obj_priv->page_cpu_valid[i]) 1450 + continue; 1451 + drm_clflush_pages(obj_priv->page_list + i, 1); 1452 + } 1453 + drm_agp_chipset_flush(dev); 1454 + } 1455 + 1456 + /* Free the page_cpu_valid mappings which are now stale, whether 1457 + * or not we've got I915_GEM_DOMAIN_CPU. 1458 + */ 1459 + drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE, 1460 + DRM_MEM_DRIVER); 1461 + obj_priv->page_cpu_valid = NULL; 1462 + } 1463 + 1464 + /** 1465 + * Set the CPU read domain on a range of the object. 1466 + * 1467 + * The object ends up with I915_GEM_DOMAIN_CPU in its read flags although it's 1468 + * not entirely valid. The page_cpu_valid member of the object flags which 1469 + * pages have been flushed, and will be respected by 1470 + * i915_gem_object_set_to_cpu_domain() if it's called on to get a valid mapping 1471 + * of the whole object. 1472 + * 1473 + * This function returns when the move is complete, including waiting on 1474 + * flushes to occur. 1475 */ 1476 static int 1477 + i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, 1478 + uint64_t offset, uint64_t size) 1479 { 1480 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1481 + int i, ret; 1482 1483 + if (offset == 0 && size == obj->size) 1484 + return i915_gem_object_set_to_cpu_domain(obj, 0); 1485 + 1486 + i915_gem_object_flush_gpu_write_domain(obj); 1487 + /* Wait on any GPU rendering and flushing to occur. */ 1488 + ret = i915_gem_object_wait_rendering(obj); 1489 + if (ret != 0) 1490 + return ret; 1491 + i915_gem_object_flush_gtt_write_domain(obj); 1492 + 1493 + /* If we're already fully in the CPU read domain, we're done. */ 1494 + if (obj_priv->page_cpu_valid == NULL && 1495 + (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0) 1496 return 0; 1497 1498 + /* Otherwise, create/clear the per-page CPU read domain flag if we're 1499 + * newly adding I915_GEM_DOMAIN_CPU 1500 + */ 1501 if (obj_priv->page_cpu_valid == NULL) { 1502 obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE, 1503 DRM_MEM_DRIVER); 1504 + if (obj_priv->page_cpu_valid == NULL) 1505 + return -ENOMEM; 1506 + } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) 1507 + memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE); 1508 1509 /* Flush the cache on any pages that are still invalid from the CPU's 1510 * perspective. 1511 */ 1512 + for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; 1513 + i++) { 1514 if (obj_priv->page_cpu_valid[i]) 1515 continue; 1516 ··· 1472 obj_priv->page_cpu_valid[i] = 1; 1473 } 1474 1475 + /* It should now be out of any other write domains, and we can update 1476 + * the domain values for our changes. 1477 */ 1478 + BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0); 1479 1480 + obj->read_domains |= I915_GEM_DOMAIN_CPU; 1481 + 1482 + return 0; 1483 } 1484 1485 /** ··· 1585 return -EINVAL; 1586 } 1587 1588 + if (reloc.write_domain & I915_GEM_DOMAIN_CPU || 1589 + reloc.read_domains & I915_GEM_DOMAIN_CPU) { 1590 + DRM_ERROR("reloc with read/write CPU domains: " 1591 + "obj %p target %d offset %d " 1592 + "read %08x write %08x", 1593 + obj, reloc.target_handle, 1594 + (int) reloc.offset, 1595 + reloc.read_domains, 1596 + reloc.write_domain); 1597 + return -EINVAL; 1598 + } 1599 + 1600 if (reloc.write_domain && target_obj->pending_write_domain && 1601 reloc.write_domain != target_obj->pending_write_domain) { 1602 DRM_ERROR("Write domain conflict: " ··· 1625 continue; 1626 } 1627 1628 + ret = i915_gem_object_set_to_gtt_domain(obj, 1); 1629 + if (ret != 0) { 1630 + drm_gem_object_unreference(target_obj); 1631 + i915_gem_object_unpin(obj); 1632 + return -EINVAL; 1633 } 1634 1635 /* Map the page containing the relocation we're going to ··· 1779 int ret, i, pinned = 0; 1780 uint64_t exec_offset; 1781 uint32_t seqno, flush_domains; 1782 + int pin_tries; 1783 1784 #if WATCH_EXEC 1785 DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", ··· 1827 return -EBUSY; 1828 } 1829 1830 + /* Look up object handles */ 1831 for (i = 0; i < args->buffer_count; i++) { 1832 object_list[i] = drm_gem_object_lookup(dev, file_priv, 1833 exec_list[i].handle); ··· 1844 ret = -EBADF; 1845 goto err; 1846 } 1847 + } 1848 1849 + /* Pin and relocate */ 1850 + for (pin_tries = 0; ; pin_tries++) { 1851 + ret = 0; 1852 + for (i = 0; i < args->buffer_count; i++) { 1853 + object_list[i]->pending_read_domains = 0; 1854 + object_list[i]->pending_write_domain = 0; 1855 + ret = i915_gem_object_pin_and_relocate(object_list[i], 1856 + file_priv, 1857 + &exec_list[i]); 1858 + if (ret) 1859 + break; 1860 + pinned = i + 1; 1861 + } 1862 + /* success */ 1863 + if (ret == 0) 1864 + break; 1865 + 1866 + /* error other than GTT full, or we've already tried again */ 1867 + if (ret != -ENOMEM || pin_tries >= 1) { 1868 + DRM_ERROR("Failed to pin buffers %d\n", ret); 1869 goto err; 1870 } 1871 + 1872 + /* unpin all of our buffers */ 1873 + for (i = 0; i < pinned; i++) 1874 + i915_gem_object_unpin(object_list[i]); 1875 + 1876 + /* evict everyone we can from the aperture */ 1877 + ret = i915_gem_evict_everything(dev); 1878 + if (ret) 1879 + goto err; 1880 } 1881 1882 /* Set the pending read domains for the batch buffer to COMMAND */ ··· 1864 1865 i915_verify_inactive(dev, __FILE__, __LINE__); 1866 1867 + /* Zero the global flush/invalidate flags. These 1868 + * will be modified as new domains are computed 1869 + * for each object 1870 + */ 1871 + dev->invalidate_domains = 0; 1872 + dev->flush_domains = 0; 1873 + 1874 for (i = 0; i < args->buffer_count; i++) { 1875 struct drm_gem_object *obj = object_list[i]; 1876 1877 + /* Compute new gpu domains and update invalidate/flush */ 1878 + i915_gem_object_set_to_gpu_domain(obj, 1879 + obj->pending_read_domains, 1880 + obj->pending_write_domain); 1881 } 1882 1883 i915_verify_inactive(dev, __FILE__, __LINE__); 1884 1885 + if (dev->invalidate_domains | dev->flush_domains) { 1886 + #if WATCH_EXEC 1887 + DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", 1888 + __func__, 1889 + dev->invalidate_domains, 1890 + dev->flush_domains); 1891 + #endif 1892 + i915_gem_flush(dev, 1893 + dev->invalidate_domains, 1894 + dev->flush_domains); 1895 + if (dev->flush_domains) 1896 + (void)i915_add_request(dev, dev->flush_domains); 1897 + } 1898 1899 i915_verify_inactive(dev, __FILE__, __LINE__); 1900 ··· 1897 __func__, 1898 ~0); 1899 #endif 1900 1901 /* Exec the batchbuffer */ 1902 ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset); ··· 1927 i915_file_priv->mm.last_gem_seqno = seqno; 1928 for (i = 0; i < args->buffer_count; i++) { 1929 struct drm_gem_object *obj = object_list[i]; 1930 1931 + i915_gem_object_move_to_active(obj, seqno); 1932 #if WATCH_LRU 1933 DRM_INFO("%s: move to exec list %p\n", __func__, obj); 1934 #endif ··· 2061 /* XXX - flush the CPU caches for pinned objects 2062 * as the X server doesn't manage domains yet 2063 */ 2064 + i915_gem_object_flush_cpu_write_domain(obj); 2065 args->offset = obj_priv->gtt_offset; 2066 drm_gem_object_unreference(obj); 2067 mutex_unlock(&dev->struct_mutex); ··· 2165 2166 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); 2167 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); 2168 } 2169 2170 /** Unbinds all objects that are on the given buffer list. */
+2 -3
drivers/gpu/drm/i915/i915_gem_proc.c
··· 166 list_for_each_entry(gem_request, &dev_priv->mm.request_list, 167 list) 168 { 169 - DRM_PROC_PRINT(" %d @ %d %08x\n", 170 gem_request->seqno, 171 - (int) (jiffies - gem_request->emitted_jiffies), 172 - gem_request->flush_domains); 173 } 174 if (len > request + offset) 175 return request;
··· 166 list_for_each_entry(gem_request, &dev_priv->mm.request_list, 167 list) 168 { 169 + DRM_PROC_PRINT(" %d @ %d\n", 170 gem_request->seqno, 171 + (int) (jiffies - gem_request->emitted_jiffies)); 172 } 173 if (len > request + offset) 174 return request;
+4 -3
drivers/gpu/drm/i915/i915_gem_tiling.c
··· 119 dcc & DCC_CHANNEL_XOR_DISABLE) { 120 swizzle_x = I915_BIT_6_SWIZZLE_9_10; 121 swizzle_y = I915_BIT_6_SWIZZLE_9; 122 - } else if (IS_I965GM(dev) || IS_GM45(dev)) { 123 - /* GM965 only does bit 11-based channel 124 - * randomization 125 */ 126 swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; 127 swizzle_y = I915_BIT_6_SWIZZLE_9_11;
··· 119 dcc & DCC_CHANNEL_XOR_DISABLE) { 120 swizzle_x = I915_BIT_6_SWIZZLE_9_10; 121 swizzle_y = I915_BIT_6_SWIZZLE_9; 122 + } else if ((IS_I965GM(dev) || IS_GM45(dev)) && 123 + (dcc & DCC_CHANNEL_XOR_BIT_17) == 0) { 124 + /* GM965/GM45 does either bit 11 or bit 17 125 + * swizzling. 126 */ 127 swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; 128 swizzle_y = I915_BIT_6_SWIZZLE_9_11;
+1
drivers/gpu/drm/i915/i915_reg.h
··· 522 #define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0) 523 #define DCC_ADDRESSING_MODE_MASK (3 << 0) 524 #define DCC_CHANNEL_XOR_DISABLE (1 << 10) 525 526 /** 965 MCH register controlling DRAM channel configuration */ 527 #define C0DRB3 0x10206
··· 522 #define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0) 523 #define DCC_ADDRESSING_MODE_MASK (3 << 0) 524 #define DCC_CHANNEL_XOR_DISABLE (1 << 10) 525 + #define DCC_CHANNEL_XOR_BIT_17 (1 << 9) 526 527 /** 965 MCH register controlling DRAM channel configuration */ 528 #define C0DRB3 0x10206