Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2021 Intel Corporation
4 */
5
6#include "i915_drv.h"
7#include "intel_display_types.h"
8#include "intel_dpt.h"
9#include "intel_fb.h"
10#include "gt/gen8_ppgtt.h"
11
12struct i915_dpt {
13 struct i915_address_space vm;
14
15 struct drm_i915_gem_object *obj;
16 struct i915_vma *vma;
17 void __iomem *iomem;
18};
19
20#define i915_is_dpt(vm) ((vm)->is_dpt)
21
22static inline struct i915_dpt *
23i915_vm_to_dpt(struct i915_address_space *vm)
24{
25 BUILD_BUG_ON(offsetof(struct i915_dpt, vm));
26 GEM_BUG_ON(!i915_is_dpt(vm));
27 return container_of(vm, struct i915_dpt, vm);
28}
29
30#define dpt_total_entries(dpt) ((dpt)->vm.total >> PAGE_SHIFT)
31
32static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
33{
34 writeq(pte, addr);
35}
36
37static void dpt_insert_page(struct i915_address_space *vm,
38 dma_addr_t addr,
39 u64 offset,
40 enum i915_cache_level level,
41 u32 flags)
42{
43 struct i915_dpt *dpt = i915_vm_to_dpt(vm);
44 gen8_pte_t __iomem *base = dpt->iomem;
45
46 gen8_set_pte(base + offset / I915_GTT_PAGE_SIZE,
47 vm->pte_encode(addr, level, flags));
48}
49
50static void dpt_insert_entries(struct i915_address_space *vm,
51 struct i915_vma *vma,
52 enum i915_cache_level level,
53 u32 flags)
54{
55 struct i915_dpt *dpt = i915_vm_to_dpt(vm);
56 gen8_pte_t __iomem *base = dpt->iomem;
57 const gen8_pte_t pte_encode = vm->pte_encode(0, level, flags);
58 struct sgt_iter sgt_iter;
59 dma_addr_t addr;
60 int i;
61
62 /*
63 * Note that we ignore PTE_READ_ONLY here. The caller must be careful
64 * not to allow the user to override access to a read only page.
65 */
66
67 i = vma->node.start / I915_GTT_PAGE_SIZE;
68 for_each_sgt_daddr(addr, sgt_iter, vma->pages)
69 gen8_set_pte(&base[i++], pte_encode | addr);
70}
71
72static void dpt_clear_range(struct i915_address_space *vm,
73 u64 start, u64 length)
74{
75}
76
77static void dpt_bind_vma(struct i915_address_space *vm,
78 struct i915_vm_pt_stash *stash,
79 struct i915_vma *vma,
80 enum i915_cache_level cache_level,
81 u32 flags)
82{
83 struct drm_i915_gem_object *obj = vma->obj;
84 u32 pte_flags;
85
86 /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
87 pte_flags = 0;
88 if (vma->vm->has_read_only && i915_gem_object_is_readonly(obj))
89 pte_flags |= PTE_READ_ONLY;
90 if (i915_gem_object_is_lmem(obj))
91 pte_flags |= PTE_LM;
92
93 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
94
95 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
96
97 /*
98 * Without aliasing PPGTT there's no difference between
99 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
100 * upgrade to both bound if we bind either to avoid double-binding.
101 */
102 atomic_or(I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND, &vma->flags);
103}
104
105static void dpt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
106{
107 vm->clear_range(vm, vma->node.start, vma->size);
108}
109
110static void dpt_cleanup(struct i915_address_space *vm)
111{
112 struct i915_dpt *dpt = i915_vm_to_dpt(vm);
113
114 i915_gem_object_put(dpt->obj);
115}
116
117struct i915_vma *intel_dpt_pin(struct i915_address_space *vm)
118{
119 struct drm_i915_private *i915 = vm->i915;
120 struct i915_dpt *dpt = i915_vm_to_dpt(vm);
121 intel_wakeref_t wakeref;
122 struct i915_vma *vma;
123 void __iomem *iomem;
124 struct i915_gem_ww_ctx ww;
125 int err;
126
127 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
128 atomic_inc(&i915->gpu_error.pending_fb_pin);
129
130 for_i915_gem_ww(&ww, err, true) {
131 err = i915_gem_object_lock(dpt->obj, &ww);
132 if (err)
133 continue;
134
135 vma = i915_gem_object_ggtt_pin_ww(dpt->obj, &ww, NULL, 0, 4096,
136 HAS_LMEM(i915) ? 0 : PIN_MAPPABLE);
137 if (IS_ERR(vma)) {
138 err = PTR_ERR(vma);
139 continue;
140 }
141
142 iomem = i915_vma_pin_iomap(vma);
143 i915_vma_unpin(vma);
144
145 if (IS_ERR(iomem)) {
146 err = PTR_ERR(iomem);
147 continue;
148 }
149
150 dpt->vma = vma;
151 dpt->iomem = iomem;
152
153 i915_vma_get(vma);
154 }
155
156 atomic_dec(&i915->gpu_error.pending_fb_pin);
157 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
158
159 return err ? ERR_PTR(err) : vma;
160}
161
162void intel_dpt_unpin(struct i915_address_space *vm)
163{
164 struct i915_dpt *dpt = i915_vm_to_dpt(vm);
165
166 i915_vma_unpin_iomap(dpt->vma);
167 i915_vma_put(dpt->vma);
168}
169
170/**
171 * intel_dpt_resume - restore the memory mapping for all DPT FBs during system resume
172 * @i915: device instance
173 *
174 * Restore the memory mapping during system resume for all framebuffers which
175 * are mapped to HW via a GGTT->DPT page table. The content of these page
176 * tables are not stored in the hibernation image during S4 and S3RST->S4
177 * transitions, so here we reprogram the PTE entries in those tables.
178 *
179 * This function must be called after the mappings in GGTT have been restored calling
180 * i915_ggtt_resume().
181 */
182void intel_dpt_resume(struct drm_i915_private *i915)
183{
184 struct drm_framebuffer *drm_fb;
185
186 if (!HAS_DISPLAY(i915))
187 return;
188
189 mutex_lock(&i915->drm.mode_config.fb_lock);
190 drm_for_each_fb(drm_fb, &i915->drm) {
191 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
192
193 if (fb->dpt_vm)
194 i915_ggtt_resume_vm(fb->dpt_vm);
195 }
196 mutex_unlock(&i915->drm.mode_config.fb_lock);
197}
198
199/**
200 * intel_dpt_suspend - suspend the memory mapping for all DPT FBs during system suspend
201 * @i915: device instance
202 *
203 * Suspend the memory mapping during system suspend for all framebuffers which
204 * are mapped to HW via a GGTT->DPT page table.
205 *
206 * This function must be called before the mappings in GGTT are suspended calling
207 * i915_ggtt_suspend().
208 */
209void intel_dpt_suspend(struct drm_i915_private *i915)
210{
211 struct drm_framebuffer *drm_fb;
212
213 if (!HAS_DISPLAY(i915))
214 return;
215
216 mutex_lock(&i915->drm.mode_config.fb_lock);
217
218 drm_for_each_fb(drm_fb, &i915->drm) {
219 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
220
221 if (fb->dpt_vm)
222 i915_ggtt_suspend_vm(fb->dpt_vm);
223 }
224
225 mutex_unlock(&i915->drm.mode_config.fb_lock);
226}
227
228struct i915_address_space *
229intel_dpt_create(struct intel_framebuffer *fb)
230{
231 struct drm_gem_object *obj = &intel_fb_obj(&fb->base)->base;
232 struct drm_i915_private *i915 = to_i915(obj->dev);
233 struct drm_i915_gem_object *dpt_obj;
234 struct i915_address_space *vm;
235 struct i915_dpt *dpt;
236 size_t size;
237 int ret;
238
239 if (intel_fb_needs_pot_stride_remap(fb))
240 size = intel_remapped_info_size(&fb->remapped_view.gtt.remapped);
241 else
242 size = DIV_ROUND_UP_ULL(obj->size, I915_GTT_PAGE_SIZE);
243
244 size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE);
245
246 if (HAS_LMEM(i915))
247 dpt_obj = i915_gem_object_create_lmem(i915, size, 0);
248 else
249 dpt_obj = i915_gem_object_create_stolen(i915, size);
250 if (IS_ERR(dpt_obj))
251 return ERR_CAST(dpt_obj);
252
253 ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE);
254 if (ret) {
255 i915_gem_object_put(dpt_obj);
256 return ERR_PTR(ret);
257 }
258
259 dpt = kzalloc(sizeof(*dpt), GFP_KERNEL);
260 if (!dpt) {
261 i915_gem_object_put(dpt_obj);
262 return ERR_PTR(-ENOMEM);
263 }
264
265 vm = &dpt->vm;
266
267 vm->gt = to_gt(i915);
268 vm->i915 = i915;
269 vm->dma = i915->drm.dev;
270 vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
271 vm->is_dpt = true;
272
273 i915_address_space_init(vm, VM_CLASS_DPT);
274
275 vm->insert_page = dpt_insert_page;
276 vm->clear_range = dpt_clear_range;
277 vm->insert_entries = dpt_insert_entries;
278 vm->cleanup = dpt_cleanup;
279
280 vm->vma_ops.bind_vma = dpt_bind_vma;
281 vm->vma_ops.unbind_vma = dpt_unbind_vma;
282
283 vm->pte_encode = gen8_ggtt_pte_encode;
284
285 dpt->obj = dpt_obj;
286
287 return &dpt->vm;
288}
289
290void intel_dpt_destroy(struct i915_address_space *vm)
291{
292 struct i915_dpt *dpt = i915_vm_to_dpt(vm);
293
294 i915_vm_close(&dpt->vm);
295}