drm/i915: Move the execbuffer domain computations together

This eliminates the dev_set_domain function and just in-lines it
where its used, with the goal of moving the manipulation and use of
invalidate_domains and flush_domains closer together. This also
avoids calling add_request unless some domain has been flushed.

Signed-off-by: Keith Packard <keithp@keithp.com>
Signed-off-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Dave Airlie <airlied@redhat.com>

authored by

Keith Packard and committed by
Dave Airlie
646f0f6e c0d90829

+21 -44
+21 -44
drivers/gpu/drm/i915/i915_gem.c
··· 1647 1647 } 1648 1648 1649 1649 /** 1650 - * Once all of the objects have been set in the proper domain, 1651 - * perform the necessary flush and invalidate operations. 1652 - * 1653 - * Returns the write domains flushed, for use in flush tracking. 1654 - */ 1655 - static uint32_t 1656 - i915_gem_dev_set_domain(struct drm_device *dev) 1657 - { 1658 - uint32_t flush_domains = dev->flush_domains; 1659 - 1660 - /* 1661 - * Now that all the buffers are synced to the proper domains, 1662 - * flush and invalidate the collected domains 1663 - */ 1664 - if (dev->invalidate_domains | dev->flush_domains) { 1665 - #if WATCH_EXEC 1666 - DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", 1667 - __func__, 1668 - dev->invalidate_domains, 1669 - dev->flush_domains); 1670 - #endif 1671 - i915_gem_flush(dev, 1672 - dev->invalidate_domains, 1673 - dev->flush_domains); 1674 - dev->invalidate_domains = 0; 1675 - dev->flush_domains = 0; 1676 - } 1677 - 1678 - return flush_domains; 1679 - } 1680 - 1681 - /** 1682 1650 * Pin an object to the GTT and evaluate the relocations landing in it. 1683 1651 */ 1684 1652 static int ··· 1970 2002 return -EBUSY; 1971 2003 } 1972 2004 1973 - /* Zero the gloabl flush/invalidate flags. These 1974 - * will be modified as each object is bound to the 1975 - * gtt 1976 - */ 1977 - dev->invalidate_domains = 0; 1978 - dev->flush_domains = 0; 1979 - 1980 2005 /* Look up object handles and perform the relocations */ 1981 2006 for (i = 0; i < args->buffer_count; i++) { 1982 2007 object_list[i] = drm_gem_object_lookup(dev, file_priv, ··· 2000 2039 2001 2040 i915_verify_inactive(dev, __FILE__, __LINE__); 2002 2041 2042 + /* Zero the global flush/invalidate flags. These 2043 + * will be modified as new domains are computed 2044 + * for each object 2045 + */ 2046 + dev->invalidate_domains = 0; 2047 + dev->flush_domains = 0; 2048 + 2003 2049 for (i = 0; i < args->buffer_count; i++) { 2004 2050 struct drm_gem_object *obj = object_list[i]; 2005 2051 2006 - /* Compute new gpu domains and update invalidate/flushing */ 2052 + /* Compute new gpu domains and update invalidate/flush */ 2007 2053 i915_gem_object_set_to_gpu_domain(obj, 2008 2054 obj->pending_read_domains, 2009 2055 obj->pending_write_domain); ··· 2018 2050 2019 2051 i915_verify_inactive(dev, __FILE__, __LINE__); 2020 2052 2021 - /* Flush/invalidate caches and chipset buffer */ 2022 - flush_domains = i915_gem_dev_set_domain(dev); 2053 + if (dev->invalidate_domains | dev->flush_domains) { 2054 + #if WATCH_EXEC 2055 + DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", 2056 + __func__, 2057 + dev->invalidate_domains, 2058 + dev->flush_domains); 2059 + #endif 2060 + i915_gem_flush(dev, 2061 + dev->invalidate_domains, 2062 + dev->flush_domains); 2063 + if (dev->flush_domains) 2064 + (void)i915_add_request(dev, dev->flush_domains); 2065 + } 2023 2066 2024 2067 i915_verify_inactive(dev, __FILE__, __LINE__); 2025 2068 ··· 2049 2070 __func__, 2050 2071 ~0); 2051 2072 #endif 2052 - 2053 - (void)i915_add_request(dev, flush_domains); 2054 2073 2055 2074 /* Exec the batchbuffer */ 2056 2075 ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);