Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1/*
2 * Copyright © 2008-2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24#include <drm/i915_drm.h>
25#include "i915_drv.h"
26
27/**
28 * DOC: fence register handling
29 *
30 * Important to avoid confusions: "fences" in the i915 driver are not execution
31 * fences used to track command completion but hardware detiler objects which
32 * wrap a given range of the global GTT. Each platform has only a fairly limited
33 * set of these objects.
34 *
35 * Fences are used to detile GTT memory mappings. They're also connected to the
36 * hardware frontbuffer render tracking and hence interact with frontbuffer
37 * compression. Furthermore on older platforms fences are required for tiled
38 * objects used by the display engine. They can also be used by the render
39 * engine - they're required for blitter commands and are optional for render
40 * commands. But on gen4+ both display (with the exception of fbc) and rendering
41 * have their own tiling state bits and don't need fences.
42 *
43 * Also note that fences only support X and Y tiling and hence can't be used for
44 * the fancier new tiling formats like W, Ys and Yf.
45 *
46 * Finally note that because fences are such a restricted resource they're
47 * dynamically associated with objects. Furthermore fence state is committed to
48 * the hardware lazily to avoid unnecessary stalls on gen2/3. Therefore code must
49 * explicitly call i915_gem_object_get_fence() to synchronize fencing status
50 * for cpu access. Also note that some code wants an unfenced view, for those
51 * cases the fence can be removed forcefully with i915_gem_object_put_fence().
52 *
53 * Internally these functions will synchronize with userspace access by removing
54 * CPU ptes into GTT mmaps (not the GTT ptes themselves) as needed.
55 */
56
57#define pipelined 0
58
59static void i965_write_fence_reg(struct drm_i915_fence_reg *fence,
60 struct i915_vma *vma)
61{
62 i915_reg_t fence_reg_lo, fence_reg_hi;
63 int fence_pitch_shift;
64 u64 val;
65
66 if (INTEL_GEN(fence->i915) >= 6) {
67 fence_reg_lo = FENCE_REG_GEN6_LO(fence->id);
68 fence_reg_hi = FENCE_REG_GEN6_HI(fence->id);
69 fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT;
70
71 } else {
72 fence_reg_lo = FENCE_REG_965_LO(fence->id);
73 fence_reg_hi = FENCE_REG_965_HI(fence->id);
74 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
75 }
76
77 val = 0;
78 if (vma) {
79 unsigned int stride = i915_gem_object_get_stride(vma->obj);
80
81 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
82 GEM_BUG_ON(!IS_ALIGNED(vma->node.start, I965_FENCE_PAGE));
83 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I965_FENCE_PAGE));
84 GEM_BUG_ON(!IS_ALIGNED(stride, 128));
85
86 val = (vma->node.start + vma->fence_size - I965_FENCE_PAGE) << 32;
87 val |= vma->node.start;
88 val |= (u64)((stride / 128) - 1) << fence_pitch_shift;
89 if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
90 val |= BIT(I965_FENCE_TILING_Y_SHIFT);
91 val |= I965_FENCE_REG_VALID;
92 }
93
94 if (!pipelined) {
95 struct drm_i915_private *dev_priv = fence->i915;
96
97 /* To w/a incoherency with non-atomic 64-bit register updates,
98 * we split the 64-bit update into two 32-bit writes. In order
99 * for a partial fence not to be evaluated between writes, we
100 * precede the update with write to turn off the fence register,
101 * and only enable the fence as the last step.
102 *
103 * For extra levels of paranoia, we make sure each step lands
104 * before applying the next step.
105 */
106 I915_WRITE(fence_reg_lo, 0);
107 POSTING_READ(fence_reg_lo);
108
109 I915_WRITE(fence_reg_hi, upper_32_bits(val));
110 I915_WRITE(fence_reg_lo, lower_32_bits(val));
111 POSTING_READ(fence_reg_lo);
112 }
113}
114
115static void i915_write_fence_reg(struct drm_i915_fence_reg *fence,
116 struct i915_vma *vma)
117{
118 u32 val;
119
120 val = 0;
121 if (vma) {
122 unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
123 bool is_y_tiled = tiling == I915_TILING_Y;
124 unsigned int stride = i915_gem_object_get_stride(vma->obj);
125
126 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
127 GEM_BUG_ON(vma->node.start & ~I915_FENCE_START_MASK);
128 GEM_BUG_ON(!is_power_of_2(vma->fence_size));
129 GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
130
131 if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence->i915))
132 stride /= 128;
133 else
134 stride /= 512;
135 GEM_BUG_ON(!is_power_of_2(stride));
136
137 val = vma->node.start;
138 if (is_y_tiled)
139 val |= BIT(I830_FENCE_TILING_Y_SHIFT);
140 val |= I915_FENCE_SIZE_BITS(vma->fence_size);
141 val |= ilog2(stride) << I830_FENCE_PITCH_SHIFT;
142
143 val |= I830_FENCE_REG_VALID;
144 }
145
146 if (!pipelined) {
147 struct drm_i915_private *dev_priv = fence->i915;
148 i915_reg_t reg = FENCE_REG(fence->id);
149
150 I915_WRITE(reg, val);
151 POSTING_READ(reg);
152 }
153}
154
155static void i830_write_fence_reg(struct drm_i915_fence_reg *fence,
156 struct i915_vma *vma)
157{
158 u32 val;
159
160 val = 0;
161 if (vma) {
162 unsigned int stride = i915_gem_object_get_stride(vma->obj);
163
164 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
165 GEM_BUG_ON(vma->node.start & ~I830_FENCE_START_MASK);
166 GEM_BUG_ON(!is_power_of_2(vma->fence_size));
167 GEM_BUG_ON(!is_power_of_2(stride / 128));
168 GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
169
170 val = vma->node.start;
171 if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
172 val |= BIT(I830_FENCE_TILING_Y_SHIFT);
173 val |= I830_FENCE_SIZE_BITS(vma->fence_size);
174 val |= ilog2(stride / 128) << I830_FENCE_PITCH_SHIFT;
175 val |= I830_FENCE_REG_VALID;
176 }
177
178 if (!pipelined) {
179 struct drm_i915_private *dev_priv = fence->i915;
180 i915_reg_t reg = FENCE_REG(fence->id);
181
182 I915_WRITE(reg, val);
183 POSTING_READ(reg);
184 }
185}
186
187static void fence_write(struct drm_i915_fence_reg *fence,
188 struct i915_vma *vma)
189{
190 /* Previous access through the fence register is marshalled by
191 * the mb() inside the fault handlers (i915_gem_release_mmaps)
192 * and explicitly managed for internal users.
193 */
194
195 if (IS_GEN(fence->i915, 2))
196 i830_write_fence_reg(fence, vma);
197 else if (IS_GEN(fence->i915, 3))
198 i915_write_fence_reg(fence, vma);
199 else
200 i965_write_fence_reg(fence, vma);
201
202 /* Access through the fenced region afterwards is
203 * ordered by the posting reads whilst writing the registers.
204 */
205
206 fence->dirty = false;
207}
208
209static int fence_update(struct drm_i915_fence_reg *fence,
210 struct i915_vma *vma)
211{
212 intel_wakeref_t wakeref;
213 int ret;
214
215 if (vma) {
216 if (!i915_vma_is_map_and_fenceable(vma))
217 return -EINVAL;
218
219 if (WARN(!i915_gem_object_get_stride(vma->obj) ||
220 !i915_gem_object_get_tiling(vma->obj),
221 "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
222 i915_gem_object_get_stride(vma->obj),
223 i915_gem_object_get_tiling(vma->obj)))
224 return -EINVAL;
225
226 ret = i915_active_request_retire(&vma->last_fence,
227 &vma->obj->base.dev->struct_mutex);
228 if (ret)
229 return ret;
230 }
231
232 if (fence->vma) {
233 struct i915_vma *old = fence->vma;
234
235 ret = i915_active_request_retire(&old->last_fence,
236 &old->obj->base.dev->struct_mutex);
237 if (ret)
238 return ret;
239
240 i915_vma_flush_writes(old);
241 }
242
243 if (fence->vma && fence->vma != vma) {
244 /* Ensure that all userspace CPU access is completed before
245 * stealing the fence.
246 */
247 GEM_BUG_ON(fence->vma->fence != fence);
248 i915_vma_revoke_mmap(fence->vma);
249
250 fence->vma->fence = NULL;
251 fence->vma = NULL;
252
253 list_move(&fence->link, &fence->i915->mm.fence_list);
254 }
255
256 /* We only need to update the register itself if the device is awake.
257 * If the device is currently powered down, we will defer the write
258 * to the runtime resume, see i915_gem_restore_fences().
259 */
260 wakeref = intel_runtime_pm_get_if_in_use(fence->i915);
261 if (wakeref) {
262 fence_write(fence, vma);
263 intel_runtime_pm_put(fence->i915, wakeref);
264 }
265
266 if (vma) {
267 if (fence->vma != vma) {
268 vma->fence = fence;
269 fence->vma = vma;
270 }
271
272 list_move_tail(&fence->link, &fence->i915->mm.fence_list);
273 }
274
275 return 0;
276}
277
278/**
279 * i915_vma_put_fence - force-remove fence for a VMA
280 * @vma: vma to map linearly (not through a fence reg)
281 *
282 * This function force-removes any fence from the given object, which is useful
283 * if the kernel wants to do untiled GTT access.
284 *
285 * Returns:
286 *
287 * 0 on success, negative error code on failure.
288 */
289int i915_vma_put_fence(struct i915_vma *vma)
290{
291 struct drm_i915_fence_reg *fence = vma->fence;
292
293 if (!fence)
294 return 0;
295
296 if (fence->pin_count)
297 return -EBUSY;
298
299 return fence_update(fence, NULL);
300}
301
302static struct drm_i915_fence_reg *fence_find(struct drm_i915_private *dev_priv)
303{
304 struct drm_i915_fence_reg *fence;
305
306 list_for_each_entry(fence, &dev_priv->mm.fence_list, link) {
307 GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
308
309 if (fence->pin_count)
310 continue;
311
312 return fence;
313 }
314
315 /* Wait for completion of pending flips which consume fences */
316 if (intel_has_pending_fb_unpin(dev_priv))
317 return ERR_PTR(-EAGAIN);
318
319 return ERR_PTR(-EDEADLK);
320}
321
322/**
323 * i915_vma_pin_fence - set up fencing for a vma
324 * @vma: vma to map through a fence reg
325 *
326 * When mapping objects through the GTT, userspace wants to be able to write
327 * to them without having to worry about swizzling if the object is tiled.
328 * This function walks the fence regs looking for a free one for @obj,
329 * stealing one if it can't find any.
330 *
331 * It then sets up the reg based on the object's properties: address, pitch
332 * and tiling format.
333 *
334 * For an untiled surface, this removes any existing fence.
335 *
336 * Returns:
337 *
338 * 0 on success, negative error code on failure.
339 */
340int
341i915_vma_pin_fence(struct i915_vma *vma)
342{
343 struct drm_i915_fence_reg *fence;
344 struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
345 int err;
346
347 /* Note that we revoke fences on runtime suspend. Therefore the user
348 * must keep the device awake whilst using the fence.
349 */
350 assert_rpm_wakelock_held(vma->vm->i915);
351
352 /* Just update our place in the LRU if our fence is getting reused. */
353 if (vma->fence) {
354 fence = vma->fence;
355 GEM_BUG_ON(fence->vma != vma);
356 fence->pin_count++;
357 if (!fence->dirty) {
358 list_move_tail(&fence->link,
359 &fence->i915->mm.fence_list);
360 return 0;
361 }
362 } else if (set) {
363 fence = fence_find(vma->vm->i915);
364 if (IS_ERR(fence))
365 return PTR_ERR(fence);
366
367 GEM_BUG_ON(fence->pin_count);
368 fence->pin_count++;
369 } else
370 return 0;
371
372 err = fence_update(fence, set);
373 if (err)
374 goto out_unpin;
375
376 GEM_BUG_ON(fence->vma != set);
377 GEM_BUG_ON(vma->fence != (set ? fence : NULL));
378
379 if (set)
380 return 0;
381
382out_unpin:
383 fence->pin_count--;
384 return err;
385}
386
387/**
388 * i915_reserve_fence - Reserve a fence for vGPU
389 * @dev_priv: i915 device private
390 *
391 * This function walks the fence regs looking for a free one and remove
392 * it from the fence_list. It is used to reserve fence for vGPU to use.
393 */
394struct drm_i915_fence_reg *
395i915_reserve_fence(struct drm_i915_private *dev_priv)
396{
397 struct drm_i915_fence_reg *fence;
398 int count;
399 int ret;
400
401 lockdep_assert_held(&dev_priv->drm.struct_mutex);
402
403 /* Keep at least one fence available for the display engine. */
404 count = 0;
405 list_for_each_entry(fence, &dev_priv->mm.fence_list, link)
406 count += !fence->pin_count;
407 if (count <= 1)
408 return ERR_PTR(-ENOSPC);
409
410 fence = fence_find(dev_priv);
411 if (IS_ERR(fence))
412 return fence;
413
414 if (fence->vma) {
415 /* Force-remove fence from VMA */
416 ret = fence_update(fence, NULL);
417 if (ret)
418 return ERR_PTR(ret);
419 }
420
421 list_del(&fence->link);
422 return fence;
423}
424
425/**
426 * i915_unreserve_fence - Reclaim a reserved fence
427 * @fence: the fence reg
428 *
429 * This function add a reserved fence register from vGPU to the fence_list.
430 */
431void i915_unreserve_fence(struct drm_i915_fence_reg *fence)
432{
433 lockdep_assert_held(&fence->i915->drm.struct_mutex);
434
435 list_add(&fence->link, &fence->i915->mm.fence_list);
436}
437
438/**
439 * i915_gem_revoke_fences - revoke fence state
440 * @dev_priv: i915 device private
441 *
442 * Removes all GTT mmappings via the fence registers. This forces any user
443 * of the fence to reacquire that fence before continuing with their access.
444 * One use is during GPU reset where the fence register is lost and we need to
445 * revoke concurrent userspace access via GTT mmaps until the hardware has been
446 * reset and the fence registers have been restored.
447 */
448void i915_gem_revoke_fences(struct drm_i915_private *dev_priv)
449{
450 int i;
451
452 lockdep_assert_held(&dev_priv->drm.struct_mutex);
453
454 for (i = 0; i < dev_priv->num_fence_regs; i++) {
455 struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
456
457 GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
458
459 if (fence->vma)
460 i915_vma_revoke_mmap(fence->vma);
461 }
462}
463
464/**
465 * i915_gem_restore_fences - restore fence state
466 * @dev_priv: i915 device private
467 *
468 * Restore the hw fence state to match the software tracking again, to be called
469 * after a gpu reset and on resume. Note that on runtime suspend we only cancel
470 * the fences, to be reacquired by the user later.
471 */
472void i915_gem_restore_fences(struct drm_i915_private *dev_priv)
473{
474 int i;
475
476 for (i = 0; i < dev_priv->num_fence_regs; i++) {
477 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
478 struct i915_vma *vma = reg->vma;
479
480 GEM_BUG_ON(vma && vma->fence != reg);
481
482 /*
483 * Commit delayed tiling changes if we have an object still
484 * attached to the fence, otherwise just clear the fence.
485 */
486 if (vma && !i915_gem_object_is_tiled(vma->obj)) {
487 GEM_BUG_ON(!reg->dirty);
488 GEM_BUG_ON(i915_vma_has_userfault(vma));
489
490 list_move(®->link, &dev_priv->mm.fence_list);
491 vma->fence = NULL;
492 vma = NULL;
493 }
494
495 fence_write(reg, vma);
496 reg->vma = vma;
497 }
498}
499
500/**
501 * DOC: tiling swizzling details
502 *
503 * The idea behind tiling is to increase cache hit rates by rearranging
504 * pixel data so that a group of pixel accesses are in the same cacheline.
505 * Performance improvement from doing this on the back/depth buffer are on
506 * the order of 30%.
507 *
508 * Intel architectures make this somewhat more complicated, though, by
509 * adjustments made to addressing of data when the memory is in interleaved
510 * mode (matched pairs of DIMMS) to improve memory bandwidth.
511 * For interleaved memory, the CPU sends every sequential 64 bytes
512 * to an alternate memory channel so it can get the bandwidth from both.
513 *
514 * The GPU also rearranges its accesses for increased bandwidth to interleaved
515 * memory, and it matches what the CPU does for non-tiled. However, when tiled
516 * it does it a little differently, since one walks addresses not just in the
517 * X direction but also Y. So, along with alternating channels when bit
518 * 6 of the address flips, it also alternates when other bits flip -- Bits 9
519 * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
520 * are common to both the 915 and 965-class hardware.
521 *
522 * The CPU also sometimes XORs in higher bits as well, to improve
523 * bandwidth doing strided access like we do so frequently in graphics. This
524 * is called "Channel XOR Randomization" in the MCH documentation. The result
525 * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
526 * decode.
527 *
528 * All of this bit 6 XORing has an effect on our memory management,
529 * as we need to make sure that the 3d driver can correctly address object
530 * contents.
531 *
532 * If we don't have interleaved memory, all tiling is safe and no swizzling is
533 * required.
534 *
535 * When bit 17 is XORed in, we simply refuse to tile at all. Bit
536 * 17 is not just a page offset, so as we page an object out and back in,
537 * individual pages in it will have different bit 17 addresses, resulting in
538 * each 64 bytes being swapped with its neighbor!
539 *
540 * Otherwise, if interleaved, we have to tell the 3d driver what the address
541 * swizzling it needs to do is, since it's writing with the CPU to the pages
542 * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
543 * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
544 * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
545 * to match what the GPU expects.
546 */
547
548/**
549 * i915_gem_detect_bit_6_swizzle - detect bit 6 swizzling pattern
550 * @dev_priv: i915 device private
551 *
552 * Detects bit 6 swizzling of address lookup between IGD access and CPU
553 * access through main memory.
554 */
555void
556i915_gem_detect_bit_6_swizzle(struct drm_i915_private *dev_priv)
557{
558 u32 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
559 u32 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
560
561 if (INTEL_GEN(dev_priv) >= 8 || IS_VALLEYVIEW(dev_priv)) {
562 /*
563 * On BDW+, swizzling is not used. We leave the CPU memory
564 * controller in charge of optimizing memory accesses without
565 * the extra address manipulation GPU side.
566 *
567 * VLV and CHV don't have GPU swizzling.
568 */
569 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
570 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
571 } else if (INTEL_GEN(dev_priv) >= 6) {
572 if (dev_priv->preserve_bios_swizzle) {
573 if (I915_READ(DISP_ARB_CTL) &
574 DISP_TILE_SURFACE_SWIZZLING) {
575 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
576 swizzle_y = I915_BIT_6_SWIZZLE_9;
577 } else {
578 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
579 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
580 }
581 } else {
582 u32 dimm_c0, dimm_c1;
583 dimm_c0 = I915_READ(MAD_DIMM_C0);
584 dimm_c1 = I915_READ(MAD_DIMM_C1);
585 dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
586 dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
587 /* Enable swizzling when the channels are populated
588 * with identically sized dimms. We don't need to check
589 * the 3rd channel because no cpu with gpu attached
590 * ships in that configuration. Also, swizzling only
591 * makes sense for 2 channels anyway. */
592 if (dimm_c0 == dimm_c1) {
593 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
594 swizzle_y = I915_BIT_6_SWIZZLE_9;
595 } else {
596 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
597 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
598 }
599 }
600 } else if (IS_GEN(dev_priv, 5)) {
601 /* On Ironlake whatever DRAM config, GPU always do
602 * same swizzling setup.
603 */
604 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
605 swizzle_y = I915_BIT_6_SWIZZLE_9;
606 } else if (IS_GEN(dev_priv, 2)) {
607 /* As far as we know, the 865 doesn't have these bit 6
608 * swizzling issues.
609 */
610 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
611 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
612 } else if (IS_MOBILE(dev_priv) ||
613 IS_I915G(dev_priv) || IS_I945G(dev_priv)) {
614 u32 dcc;
615
616 /* On 9xx chipsets, channel interleave by the CPU is
617 * determined by DCC. For single-channel, neither the CPU
618 * nor the GPU do swizzling. For dual channel interleaved,
619 * the GPU's interleave is bit 9 and 10 for X tiled, and bit
620 * 9 for Y tiled. The CPU's interleave is independent, and
621 * can be based on either bit 11 (haven't seen this yet) or
622 * bit 17 (common).
623 */
624 dcc = I915_READ(DCC);
625 switch (dcc & DCC_ADDRESSING_MODE_MASK) {
626 case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
627 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
628 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
629 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
630 break;
631 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
632 if (dcc & DCC_CHANNEL_XOR_DISABLE) {
633 /* This is the base swizzling by the GPU for
634 * tiled buffers.
635 */
636 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
637 swizzle_y = I915_BIT_6_SWIZZLE_9;
638 } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
639 /* Bit 11 swizzling by the CPU in addition. */
640 swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
641 swizzle_y = I915_BIT_6_SWIZZLE_9_11;
642 } else {
643 /* Bit 17 swizzling by the CPU in addition. */
644 swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
645 swizzle_y = I915_BIT_6_SWIZZLE_9_17;
646 }
647 break;
648 }
649
650 /* check for L-shaped memory aka modified enhanced addressing */
651 if (IS_GEN(dev_priv, 4) &&
652 !(I915_READ(DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
653 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
654 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
655 }
656
657 if (dcc == 0xffffffff) {
658 DRM_ERROR("Couldn't read from MCHBAR. "
659 "Disabling tiling.\n");
660 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
661 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
662 }
663 } else {
664 /* The 965, G33, and newer, have a very flexible memory
665 * configuration. It will enable dual-channel mode
666 * (interleaving) on as much memory as it can, and the GPU
667 * will additionally sometimes enable different bit 6
668 * swizzling for tiled objects from the CPU.
669 *
670 * Here's what I found on the G965:
671 * slot fill memory size swizzling
672 * 0A 0B 1A 1B 1-ch 2-ch
673 * 512 0 0 0 512 0 O
674 * 512 0 512 0 16 1008 X
675 * 512 0 0 512 16 1008 X
676 * 0 512 0 512 16 1008 X
677 * 1024 1024 1024 0 2048 1024 O
678 *
679 * We could probably detect this based on either the DRB
680 * matching, which was the case for the swizzling required in
681 * the table above, or from the 1-ch value being less than
682 * the minimum size of a rank.
683 *
684 * Reports indicate that the swizzling actually
685 * varies depending upon page placement inside the
686 * channels, i.e. we see swizzled pages where the
687 * banks of memory are paired and unswizzled on the
688 * uneven portion, so leave that as unknown.
689 */
690 if (I915_READ16(C0DRB3) == I915_READ16(C1DRB3)) {
691 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
692 swizzle_y = I915_BIT_6_SWIZZLE_9;
693 }
694 }
695
696 if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN ||
697 swizzle_y == I915_BIT_6_SWIZZLE_UNKNOWN) {
698 /* Userspace likes to explode if it sees unknown swizzling,
699 * so lie. We will finish the lie when reporting through
700 * the get-tiling-ioctl by reporting the physical swizzle
701 * mode as unknown instead.
702 *
703 * As we don't strictly know what the swizzling is, it may be
704 * bit17 dependent, and so we need to also prevent the pages
705 * from being moved.
706 */
707 dev_priv->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
708 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
709 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
710 }
711
712 dev_priv->mm.bit_6_swizzle_x = swizzle_x;
713 dev_priv->mm.bit_6_swizzle_y = swizzle_y;
714}
715
716/*
717 * Swap every 64 bytes of this page around, to account for it having a new
718 * bit 17 of its physical address and therefore being interpreted differently
719 * by the GPU.
720 */
721static void
722i915_gem_swizzle_page(struct page *page)
723{
724 char temp[64];
725 char *vaddr;
726 int i;
727
728 vaddr = kmap(page);
729
730 for (i = 0; i < PAGE_SIZE; i += 128) {
731 memcpy(temp, &vaddr[i], 64);
732 memcpy(&vaddr[i], &vaddr[i + 64], 64);
733 memcpy(&vaddr[i + 64], temp, 64);
734 }
735
736 kunmap(page);
737}
738
739/**
740 * i915_gem_object_do_bit_17_swizzle - fixup bit 17 swizzling
741 * @obj: i915 GEM buffer object
742 * @pages: the scattergather list of physical pages
743 *
744 * This function fixes up the swizzling in case any page frame number for this
745 * object has changed in bit 17 since that state has been saved with
746 * i915_gem_object_save_bit_17_swizzle().
747 *
748 * This is called when pinning backing storage again, since the kernel is free
749 * to move unpinned backing storage around (either by directly moving pages or
750 * by swapping them out and back in again).
751 */
752void
753i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
754 struct sg_table *pages)
755{
756 struct sgt_iter sgt_iter;
757 struct page *page;
758 int i;
759
760 if (obj->bit_17 == NULL)
761 return;
762
763 i = 0;
764 for_each_sgt_page(page, sgt_iter, pages) {
765 char new_bit_17 = page_to_phys(page) >> 17;
766 if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) {
767 i915_gem_swizzle_page(page);
768 set_page_dirty(page);
769 }
770 i++;
771 }
772}
773
774/**
775 * i915_gem_object_save_bit_17_swizzle - save bit 17 swizzling
776 * @obj: i915 GEM buffer object
777 * @pages: the scattergather list of physical pages
778 *
779 * This function saves the bit 17 of each page frame number so that swizzling
780 * can be fixed up later on with i915_gem_object_do_bit_17_swizzle(). This must
781 * be called before the backing storage can be unpinned.
782 */
783void
784i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
785 struct sg_table *pages)
786{
787 const unsigned int page_count = obj->base.size >> PAGE_SHIFT;
788 struct sgt_iter sgt_iter;
789 struct page *page;
790 int i;
791
792 if (obj->bit_17 == NULL) {
793 obj->bit_17 = kcalloc(BITS_TO_LONGS(page_count),
794 sizeof(long), GFP_KERNEL);
795 if (obj->bit_17 == NULL) {
796 DRM_ERROR("Failed to allocate memory for bit 17 "
797 "record\n");
798 return;
799 }
800 }
801
802 i = 0;
803
804 for_each_sgt_page(page, sgt_iter, pages) {
805 if (page_to_phys(page) & (1 << 17))
806 __set_bit(i, obj->bit_17);
807 else
808 __clear_bit(i, obj->bit_17);
809 i++;
810 }
811}