Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright © 2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25#include "i915_drv.h"
26#include "intel_frontbuffer.h"
27#include "i915_gem_clflush.h"
28
29static DEFINE_SPINLOCK(clflush_lock);
30
31struct clflush {
32 struct dma_fence dma; /* Must be first for dma_fence_free() */
33 struct i915_sw_fence wait;
34 struct work_struct work;
35 struct drm_i915_gem_object *obj;
36};
37
38static const char *i915_clflush_get_driver_name(struct dma_fence *fence)
39{
40 return DRIVER_NAME;
41}
42
43static const char *i915_clflush_get_timeline_name(struct dma_fence *fence)
44{
45 return "clflush";
46}
47
48static void i915_clflush_release(struct dma_fence *fence)
49{
50 struct clflush *clflush = container_of(fence, typeof(*clflush), dma);
51
52 i915_sw_fence_fini(&clflush->wait);
53
54 BUILD_BUG_ON(offsetof(typeof(*clflush), dma));
55 dma_fence_free(&clflush->dma);
56}
57
58static const struct dma_fence_ops i915_clflush_ops = {
59 .get_driver_name = i915_clflush_get_driver_name,
60 .get_timeline_name = i915_clflush_get_timeline_name,
61 .release = i915_clflush_release,
62};
63
64static void __i915_do_clflush(struct drm_i915_gem_object *obj)
65{
66 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
67 drm_clflush_sg(obj->mm.pages);
68 intel_fb_obj_flush(obj, ORIGIN_CPU);
69}
70
71static void i915_clflush_work(struct work_struct *work)
72{
73 struct clflush *clflush = container_of(work, typeof(*clflush), work);
74 struct drm_i915_gem_object *obj = clflush->obj;
75
76 if (i915_gem_object_pin_pages(obj)) {
77 DRM_ERROR("Failed to acquire obj->pages for clflushing\n");
78 goto out;
79 }
80
81 __i915_do_clflush(obj);
82
83 i915_gem_object_unpin_pages(obj);
84
85out:
86 i915_gem_object_put(obj);
87
88 dma_fence_signal(&clflush->dma);
89 dma_fence_put(&clflush->dma);
90}
91
92static int __i915_sw_fence_call
93i915_clflush_notify(struct i915_sw_fence *fence,
94 enum i915_sw_fence_notify state)
95{
96 struct clflush *clflush = container_of(fence, typeof(*clflush), wait);
97
98 switch (state) {
99 case FENCE_COMPLETE:
100 schedule_work(&clflush->work);
101 break;
102
103 case FENCE_FREE:
104 dma_fence_put(&clflush->dma);
105 break;
106 }
107
108 return NOTIFY_DONE;
109}
110
111bool i915_gem_clflush_object(struct drm_i915_gem_object *obj,
112 unsigned int flags)
113{
114 struct clflush *clflush;
115
116 /*
117 * Stolen memory is always coherent with the GPU as it is explicitly
118 * marked as wc by the system, or the system is cache-coherent.
119 * Similarly, we only access struct pages through the CPU cache, so
120 * anything not backed by physical memory we consider to be always
121 * coherent and not need clflushing.
122 */
123 if (!i915_gem_object_has_struct_page(obj)) {
124 obj->cache_dirty = false;
125 return false;
126 }
127
128 /* If the GPU is snooping the contents of the CPU cache,
129 * we do not need to manually clear the CPU cache lines. However,
130 * the caches are only snooped when the render cache is
131 * flushed/invalidated. As we always have to emit invalidations
132 * and flushes when moving into and out of the RENDER domain, correct
133 * snooping behaviour occurs naturally as the result of our domain
134 * tracking.
135 */
136 if (!(flags & I915_CLFLUSH_FORCE) &&
137 obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ)
138 return false;
139
140 trace_i915_gem_object_clflush(obj);
141
142 clflush = NULL;
143 if (!(flags & I915_CLFLUSH_SYNC))
144 clflush = kmalloc(sizeof(*clflush), GFP_KERNEL);
145 if (clflush) {
146 GEM_BUG_ON(!obj->cache_dirty);
147
148 dma_fence_init(&clflush->dma,
149 &i915_clflush_ops,
150 &clflush_lock,
151 to_i915(obj->base.dev)->mm.unordered_timeline,
152 0);
153 i915_sw_fence_init(&clflush->wait, i915_clflush_notify);
154
155 clflush->obj = i915_gem_object_get(obj);
156 INIT_WORK(&clflush->work, i915_clflush_work);
157
158 dma_fence_get(&clflush->dma);
159
160 i915_sw_fence_await_reservation(&clflush->wait,
161 obj->resv, NULL,
162 true, I915_FENCE_TIMEOUT,
163 I915_FENCE_GFP);
164
165 reservation_object_lock(obj->resv, NULL);
166 reservation_object_add_excl_fence(obj->resv, &clflush->dma);
167 reservation_object_unlock(obj->resv);
168
169 i915_sw_fence_commit(&clflush->wait);
170 } else if (obj->mm.pages) {
171 __i915_do_clflush(obj);
172 } else {
173 GEM_BUG_ON(obj->write_domain != I915_GEM_DOMAIN_CPU);
174 }
175
176 obj->cache_dirty = false;
177 return true;
178}