drm/i915: Cut two args to set_to_gpu_domain that confused this tricky path.

While not strictly required, it helped while thinking about the following
change. This change should be invariant.

Signed-off-by: Eric Anholt <eric@anholt.net>
Signed-off-by: Dave Airlie <airlied@redhat.com>

authored by Eric Anholt and committed by Dave Airlie 8b0e378a 683fdc5f

+16 -22
+16 -22
drivers/gpu/drm/i915/i915_gem.c
··· 34 34 35 35 #define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) 36 36 37 - static void 38 - i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, 39 - uint32_t read_domains, 40 - uint32_t write_domain); 41 37 static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); 42 38 static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); 43 39 static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); ··· 2017 2021 * drm_agp_chipset_flush 2018 2022 */ 2019 2023 static void 2020 - i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, 2021 - uint32_t read_domains, 2022 - uint32_t write_domain) 2024 + i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj) 2023 2025 { 2024 2026 struct drm_device *dev = obj->dev; 2025 2027 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2026 2028 uint32_t invalidate_domains = 0; 2027 2029 uint32_t flush_domains = 0; 2028 2030 2029 - BUG_ON(read_domains & I915_GEM_DOMAIN_CPU); 2030 - BUG_ON(write_domain == I915_GEM_DOMAIN_CPU); 2031 + BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU); 2032 + BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU); 2031 2033 2032 2034 #if WATCH_BUF 2033 2035 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", 2034 2036 __func__, obj, 2035 - obj->read_domains, read_domains, 2036 - obj->write_domain, write_domain); 2037 + obj->read_domains, obj->pending_read_domains, 2038 + obj->write_domain, obj->pending_write_domain); 2037 2039 #endif 2038 2040 /* 2039 2041 * If the object isn't moving to a new write domain, 2040 2042 * let the object stay in multiple read domains 2041 2043 */ 2042 - if (write_domain == 0) 2043 - read_domains |= obj->read_domains; 2044 + if (obj->pending_write_domain == 0) 2045 + obj->pending_read_domains |= obj->read_domains; 2044 2046 else 2045 2047 obj_priv->dirty = 1; 2046 2048 ··· 2048 2054 * any read domains which differ from the old 2049 2055 * write domain 2050 2056 */ 2051 - if (obj->write_domain && obj->write_domain != read_domains) { 2057 + if (obj->write_domain && 2058 + obj->write_domain != obj->pending_read_domains) { 2052 2059 flush_domains |= obj->write_domain; 2053 - invalidate_domains |= read_domains & ~obj->write_domain; 2060 + invalidate_domains |= 2061 + obj->pending_read_domains & ~obj->write_domain; 2054 2062 } 2055 2063 /* 2056 2064 * Invalidate any read caches which may have 2057 2065 * stale data. That is, any new read domains. 2058 2066 */ 2059 - invalidate_domains |= read_domains & ~obj->read_domains; 2067 + invalidate_domains |= obj->pending_read_domains & ~obj->read_domains; 2060 2068 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) { 2061 2069 #if WATCH_BUF 2062 2070 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", ··· 2067 2071 i915_gem_clflush_object(obj); 2068 2072 } 2069 2073 2070 - if ((write_domain | flush_domains) != 0) 2071 - obj->write_domain = write_domain; 2072 - obj->read_domains = read_domains; 2074 + if ((obj->pending_write_domain | flush_domains) != 0) 2075 + obj->write_domain = obj->pending_write_domain; 2076 + obj->read_domains = obj->pending_read_domains; 2073 2077 2074 2078 dev->invalidate_domains |= invalidate_domains; 2075 2079 dev->flush_domains |= flush_domains; ··· 2579 2583 struct drm_gem_object *obj = object_list[i]; 2580 2584 2581 2585 /* Compute new gpu domains and update invalidate/flush */ 2582 - i915_gem_object_set_to_gpu_domain(obj, 2583 - obj->pending_read_domains, 2584 - obj->pending_write_domain); 2586 + i915_gem_object_set_to_gpu_domain(obj); 2585 2587 } 2586 2588 2587 2589 i915_verify_inactive(dev, __FILE__, __LINE__);