Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2021 Intel Corporation
4 */
5
6#include "gem/i915_gem_domain.h"
7#include "gem/i915_gem_internal.h"
8#include "gem/i915_gem_lmem.h"
9#include "gt/gen8_ppgtt.h"
10
11#include "i915_drv.h"
12#include "i915_reg.h"
13#include "intel_de.h"
14#include "intel_display_types.h"
15#include "intel_dpt.h"
16#include "intel_fb.h"
17
18struct i915_dpt {
19 struct i915_address_space vm;
20
21 struct drm_i915_gem_object *obj;
22 struct i915_vma *vma;
23 void __iomem *iomem;
24};
25
26#define i915_is_dpt(vm) ((vm)->is_dpt)
27
28static inline struct i915_dpt *
29i915_vm_to_dpt(struct i915_address_space *vm)
30{
31 BUILD_BUG_ON(offsetof(struct i915_dpt, vm));
32 GEM_BUG_ON(!i915_is_dpt(vm));
33 return container_of(vm, struct i915_dpt, vm);
34}
35
36#define dpt_total_entries(dpt) ((dpt)->vm.total >> PAGE_SHIFT)
37
38static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
39{
40 writeq(pte, addr);
41}
42
43static void dpt_insert_page(struct i915_address_space *vm,
44 dma_addr_t addr,
45 u64 offset,
46 enum i915_cache_level level,
47 u32 flags)
48{
49 struct i915_dpt *dpt = i915_vm_to_dpt(vm);
50 gen8_pte_t __iomem *base = dpt->iomem;
51
52 gen8_set_pte(base + offset / I915_GTT_PAGE_SIZE,
53 vm->pte_encode(addr, level, flags));
54}
55
56static void dpt_insert_entries(struct i915_address_space *vm,
57 struct i915_vma_resource *vma_res,
58 enum i915_cache_level level,
59 u32 flags)
60{
61 struct i915_dpt *dpt = i915_vm_to_dpt(vm);
62 gen8_pte_t __iomem *base = dpt->iomem;
63 const gen8_pte_t pte_encode = vm->pte_encode(0, level, flags);
64 struct sgt_iter sgt_iter;
65 dma_addr_t addr;
66 int i;
67
68 /*
69 * Note that we ignore PTE_READ_ONLY here. The caller must be careful
70 * not to allow the user to override access to a read only page.
71 */
72
73 i = vma_res->start / I915_GTT_PAGE_SIZE;
74 for_each_sgt_daddr(addr, sgt_iter, vma_res->bi.pages)
75 gen8_set_pte(&base[i++], pte_encode | addr);
76}
77
78static void dpt_clear_range(struct i915_address_space *vm,
79 u64 start, u64 length)
80{
81}
82
83static void dpt_bind_vma(struct i915_address_space *vm,
84 struct i915_vm_pt_stash *stash,
85 struct i915_vma_resource *vma_res,
86 enum i915_cache_level cache_level,
87 u32 flags)
88{
89 u32 pte_flags;
90
91 if (vma_res->bound_flags)
92 return;
93
94 /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
95 pte_flags = 0;
96 if (vm->has_read_only && vma_res->bi.readonly)
97 pte_flags |= PTE_READ_ONLY;
98 if (vma_res->bi.lmem)
99 pte_flags |= PTE_LM;
100
101 vm->insert_entries(vm, vma_res, cache_level, pte_flags);
102
103 vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
104
105 /*
106 * Without aliasing PPGTT there's no difference between
107 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
108 * upgrade to both bound if we bind either to avoid double-binding.
109 */
110 vma_res->bound_flags = I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
111}
112
113static void dpt_unbind_vma(struct i915_address_space *vm,
114 struct i915_vma_resource *vma_res)
115{
116 vm->clear_range(vm, vma_res->start, vma_res->vma_size);
117}
118
119static void dpt_cleanup(struct i915_address_space *vm)
120{
121 struct i915_dpt *dpt = i915_vm_to_dpt(vm);
122
123 i915_gem_object_put(dpt->obj);
124}
125
126struct i915_vma *intel_dpt_pin(struct i915_address_space *vm)
127{
128 struct drm_i915_private *i915 = vm->i915;
129 struct i915_dpt *dpt = i915_vm_to_dpt(vm);
130 intel_wakeref_t wakeref;
131 struct i915_vma *vma;
132 void __iomem *iomem;
133 struct i915_gem_ww_ctx ww;
134 u64 pin_flags = 0;
135 int err;
136
137 if (i915_gem_object_is_stolen(dpt->obj))
138 pin_flags |= PIN_MAPPABLE;
139
140 wakeref = intel_runtime_pm_get(&i915->runtime_pm);
141 atomic_inc(&i915->gpu_error.pending_fb_pin);
142
143 for_i915_gem_ww(&ww, err, true) {
144 err = i915_gem_object_lock(dpt->obj, &ww);
145 if (err)
146 continue;
147
148 vma = i915_gem_object_ggtt_pin_ww(dpt->obj, &ww, NULL, 0, 4096,
149 pin_flags);
150 if (IS_ERR(vma)) {
151 err = PTR_ERR(vma);
152 continue;
153 }
154
155 iomem = i915_vma_pin_iomap(vma);
156 i915_vma_unpin(vma);
157
158 if (IS_ERR(iomem)) {
159 err = PTR_ERR(iomem);
160 continue;
161 }
162
163 dpt->vma = vma;
164 dpt->iomem = iomem;
165
166 i915_vma_get(vma);
167 }
168
169 atomic_dec(&i915->gpu_error.pending_fb_pin);
170 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
171
172 return err ? ERR_PTR(err) : vma;
173}
174
175void intel_dpt_unpin(struct i915_address_space *vm)
176{
177 struct i915_dpt *dpt = i915_vm_to_dpt(vm);
178
179 i915_vma_unpin_iomap(dpt->vma);
180 i915_vma_put(dpt->vma);
181}
182
183/**
184 * intel_dpt_resume - restore the memory mapping for all DPT FBs during system resume
185 * @i915: device instance
186 *
187 * Restore the memory mapping during system resume for all framebuffers which
188 * are mapped to HW via a GGTT->DPT page table. The content of these page
189 * tables are not stored in the hibernation image during S4 and S3RST->S4
190 * transitions, so here we reprogram the PTE entries in those tables.
191 *
192 * This function must be called after the mappings in GGTT have been restored calling
193 * i915_ggtt_resume().
194 */
195void intel_dpt_resume(struct drm_i915_private *i915)
196{
197 struct drm_framebuffer *drm_fb;
198
199 if (!HAS_DISPLAY(i915))
200 return;
201
202 mutex_lock(&i915->drm.mode_config.fb_lock);
203 drm_for_each_fb(drm_fb, &i915->drm) {
204 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
205
206 if (fb->dpt_vm)
207 i915_ggtt_resume_vm(fb->dpt_vm);
208 }
209 mutex_unlock(&i915->drm.mode_config.fb_lock);
210}
211
212/**
213 * intel_dpt_suspend - suspend the memory mapping for all DPT FBs during system suspend
214 * @i915: device instance
215 *
216 * Suspend the memory mapping during system suspend for all framebuffers which
217 * are mapped to HW via a GGTT->DPT page table.
218 *
219 * This function must be called before the mappings in GGTT are suspended calling
220 * i915_ggtt_suspend().
221 */
222void intel_dpt_suspend(struct drm_i915_private *i915)
223{
224 struct drm_framebuffer *drm_fb;
225
226 if (!HAS_DISPLAY(i915))
227 return;
228
229 mutex_lock(&i915->drm.mode_config.fb_lock);
230
231 drm_for_each_fb(drm_fb, &i915->drm) {
232 struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
233
234 if (fb->dpt_vm)
235 i915_ggtt_suspend_vm(fb->dpt_vm);
236 }
237
238 mutex_unlock(&i915->drm.mode_config.fb_lock);
239}
240
241struct i915_address_space *
242intel_dpt_create(struct intel_framebuffer *fb)
243{
244 struct drm_gem_object *obj = &intel_fb_obj(&fb->base)->base;
245 struct drm_i915_private *i915 = to_i915(obj->dev);
246 struct drm_i915_gem_object *dpt_obj;
247 struct i915_address_space *vm;
248 struct i915_dpt *dpt;
249 size_t size;
250 int ret;
251
252 if (intel_fb_needs_pot_stride_remap(fb))
253 size = intel_remapped_info_size(&fb->remapped_view.gtt.remapped);
254 else
255 size = DIV_ROUND_UP_ULL(obj->size, I915_GTT_PAGE_SIZE);
256
257 size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE);
258
259 dpt_obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_CONTIGUOUS);
260 if (IS_ERR(dpt_obj) && i915_ggtt_has_aperture(to_gt(i915)->ggtt))
261 dpt_obj = i915_gem_object_create_stolen(i915, size);
262 if (IS_ERR(dpt_obj) && !HAS_LMEM(i915)) {
263 drm_dbg_kms(&i915->drm, "Allocating dpt from smem\n");
264 dpt_obj = i915_gem_object_create_internal(i915, size);
265 }
266 if (IS_ERR(dpt_obj))
267 return ERR_CAST(dpt_obj);
268
269 ret = i915_gem_object_lock_interruptible(dpt_obj, NULL);
270 if (!ret) {
271 ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE);
272 i915_gem_object_unlock(dpt_obj);
273 }
274 if (ret) {
275 i915_gem_object_put(dpt_obj);
276 return ERR_PTR(ret);
277 }
278
279 dpt = kzalloc(sizeof(*dpt), GFP_KERNEL);
280 if (!dpt) {
281 i915_gem_object_put(dpt_obj);
282 return ERR_PTR(-ENOMEM);
283 }
284
285 vm = &dpt->vm;
286
287 vm->gt = to_gt(i915);
288 vm->i915 = i915;
289 vm->dma = i915->drm.dev;
290 vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
291 vm->is_dpt = true;
292
293 i915_address_space_init(vm, VM_CLASS_DPT);
294
295 vm->insert_page = dpt_insert_page;
296 vm->clear_range = dpt_clear_range;
297 vm->insert_entries = dpt_insert_entries;
298 vm->cleanup = dpt_cleanup;
299
300 vm->vma_ops.bind_vma = dpt_bind_vma;
301 vm->vma_ops.unbind_vma = dpt_unbind_vma;
302
303 vm->pte_encode = gen8_ggtt_pte_encode;
304
305 dpt->obj = dpt_obj;
306 dpt->obj->is_dpt = true;
307
308 return &dpt->vm;
309}
310
311void intel_dpt_destroy(struct i915_address_space *vm)
312{
313 struct i915_dpt *dpt = i915_vm_to_dpt(vm);
314
315 dpt->obj->is_dpt = false;
316 i915_vm_put(&dpt->vm);
317}
318
319void intel_dpt_configure(struct intel_crtc *crtc)
320{
321 struct drm_i915_private *i915 = to_i915(crtc->base.dev);
322
323 if (DISPLAY_VER(i915) == 14) {
324 enum pipe pipe = crtc->pipe;
325 enum plane_id plane_id;
326
327 for_each_plane_id_on_crtc(crtc, plane_id) {
328 if (plane_id == PLANE_CURSOR)
329 continue;
330
331 intel_de_rmw(i915, PLANE_CHICKEN(pipe, plane_id),
332 PLANE_CHICKEN_DISABLE_DPT,
333 i915->params.enable_dpt ? 0 : PLANE_CHICKEN_DISABLE_DPT);
334 }
335 } else if (DISPLAY_VER(i915) == 13) {
336 intel_de_rmw(i915, CHICKEN_MISC_2,
337 CHICKEN_MISC_DISABLE_DPT,
338 i915->params.enable_dpt ? 0 : CHICKEN_MISC_DISABLE_DPT);
339 }
340}