drm/i915: move modesetting output/encoder code under display/
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / i915_gem_fence_reg.c
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23
24 #include <drm/i915_drm.h>
25
26 #include "i915_drv.h"
27 #include "i915_scatterlist.h"
28 #include "i915_vgpu.h"
29
30 /**
31  * DOC: fence register handling
32  *
33  * Important to avoid confusions: "fences" in the i915 driver are not execution
34  * fences used to track command completion but hardware detiler objects which
35  * wrap a given range of the global GTT. Each platform has only a fairly limited
36  * set of these objects.
37  *
38  * Fences are used to detile GTT memory mappings. They're also connected to the
39  * hardware frontbuffer render tracking and hence interact with frontbuffer
40  * compression. Furthermore on older platforms fences are required for tiled
41  * objects used by the display engine. They can also be used by the render
42  * engine - they're required for blitter commands and are optional for render
43  * commands. But on gen4+ both display (with the exception of fbc) and rendering
44  * have their own tiling state bits and don't need fences.
45  *
46  * Also note that fences only support X and Y tiling and hence can't be used for
47  * the fancier new tiling formats like W, Ys and Yf.
48  *
49  * Finally note that because fences are such a restricted resource they're
50  * dynamically associated with objects. Furthermore fence state is committed to
51  * the hardware lazily to avoid unnecessary stalls on gen2/3. Therefore code must
52  * explicitly call i915_gem_object_get_fence() to synchronize fencing status
53  * for cpu access. Also note that some code wants an unfenced view, for those
54  * cases the fence can be removed forcefully with i915_gem_object_put_fence().
55  *
56  * Internally these functions will synchronize with userspace access by removing
57  * CPU ptes into GTT mmaps (not the GTT ptes themselves) as needed.
58  */
59
60 #define pipelined 0
61
62 static void i965_write_fence_reg(struct i915_fence_reg *fence,
63                                  struct i915_vma *vma)
64 {
65         i915_reg_t fence_reg_lo, fence_reg_hi;
66         int fence_pitch_shift;
67         u64 val;
68
69         if (INTEL_GEN(fence->i915) >= 6) {
70                 fence_reg_lo = FENCE_REG_GEN6_LO(fence->id);
71                 fence_reg_hi = FENCE_REG_GEN6_HI(fence->id);
72                 fence_pitch_shift = GEN6_FENCE_PITCH_SHIFT;
73
74         } else {
75                 fence_reg_lo = FENCE_REG_965_LO(fence->id);
76                 fence_reg_hi = FENCE_REG_965_HI(fence->id);
77                 fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
78         }
79
80         val = 0;
81         if (vma) {
82                 unsigned int stride = i915_gem_object_get_stride(vma->obj);
83
84                 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
85                 GEM_BUG_ON(!IS_ALIGNED(vma->node.start, I965_FENCE_PAGE));
86                 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I965_FENCE_PAGE));
87                 GEM_BUG_ON(!IS_ALIGNED(stride, 128));
88
89                 val = (vma->node.start + vma->fence_size - I965_FENCE_PAGE) << 32;
90                 val |= vma->node.start;
91                 val |= (u64)((stride / 128) - 1) << fence_pitch_shift;
92                 if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
93                         val |= BIT(I965_FENCE_TILING_Y_SHIFT);
94                 val |= I965_FENCE_REG_VALID;
95         }
96
97         if (!pipelined) {
98                 struct intel_uncore *uncore = &fence->i915->uncore;
99
100                 /*
101                  * To w/a incoherency with non-atomic 64-bit register updates,
102                  * we split the 64-bit update into two 32-bit writes. In order
103                  * for a partial fence not to be evaluated between writes, we
104                  * precede the update with write to turn off the fence register,
105                  * and only enable the fence as the last step.
106                  *
107                  * For extra levels of paranoia, we make sure each step lands
108                  * before applying the next step.
109                  */
110                 intel_uncore_write_fw(uncore, fence_reg_lo, 0);
111                 intel_uncore_posting_read_fw(uncore, fence_reg_lo);
112
113                 intel_uncore_write_fw(uncore, fence_reg_hi, upper_32_bits(val));
114                 intel_uncore_write_fw(uncore, fence_reg_lo, lower_32_bits(val));
115                 intel_uncore_posting_read_fw(uncore, fence_reg_lo);
116         }
117 }
118
119 static void i915_write_fence_reg(struct i915_fence_reg *fence,
120                                  struct i915_vma *vma)
121 {
122         u32 val;
123
124         val = 0;
125         if (vma) {
126                 unsigned int tiling = i915_gem_object_get_tiling(vma->obj);
127                 bool is_y_tiled = tiling == I915_TILING_Y;
128                 unsigned int stride = i915_gem_object_get_stride(vma->obj);
129
130                 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
131                 GEM_BUG_ON(vma->node.start & ~I915_FENCE_START_MASK);
132                 GEM_BUG_ON(!is_power_of_2(vma->fence_size));
133                 GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
134
135                 if (is_y_tiled && HAS_128_BYTE_Y_TILING(fence->i915))
136                         stride /= 128;
137                 else
138                         stride /= 512;
139                 GEM_BUG_ON(!is_power_of_2(stride));
140
141                 val = vma->node.start;
142                 if (is_y_tiled)
143                         val |= BIT(I830_FENCE_TILING_Y_SHIFT);
144                 val |= I915_FENCE_SIZE_BITS(vma->fence_size);
145                 val |= ilog2(stride) << I830_FENCE_PITCH_SHIFT;
146
147                 val |= I830_FENCE_REG_VALID;
148         }
149
150         if (!pipelined) {
151                 struct intel_uncore *uncore = &fence->i915->uncore;
152                 i915_reg_t reg = FENCE_REG(fence->id);
153
154                 intel_uncore_write_fw(uncore, reg, val);
155                 intel_uncore_posting_read_fw(uncore, reg);
156         }
157 }
158
159 static void i830_write_fence_reg(struct i915_fence_reg *fence,
160                                  struct i915_vma *vma)
161 {
162         u32 val;
163
164         val = 0;
165         if (vma) {
166                 unsigned int stride = i915_gem_object_get_stride(vma->obj);
167
168                 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma));
169                 GEM_BUG_ON(vma->node.start & ~I830_FENCE_START_MASK);
170                 GEM_BUG_ON(!is_power_of_2(vma->fence_size));
171                 GEM_BUG_ON(!is_power_of_2(stride / 128));
172                 GEM_BUG_ON(!IS_ALIGNED(vma->node.start, vma->fence_size));
173
174                 val = vma->node.start;
175                 if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y)
176                         val |= BIT(I830_FENCE_TILING_Y_SHIFT);
177                 val |= I830_FENCE_SIZE_BITS(vma->fence_size);
178                 val |= ilog2(stride / 128) << I830_FENCE_PITCH_SHIFT;
179                 val |= I830_FENCE_REG_VALID;
180         }
181
182         if (!pipelined) {
183                 struct intel_uncore *uncore = &fence->i915->uncore;
184                 i915_reg_t reg = FENCE_REG(fence->id);
185
186                 intel_uncore_write_fw(uncore, reg, val);
187                 intel_uncore_posting_read_fw(uncore, reg);
188         }
189 }
190
191 static void fence_write(struct i915_fence_reg *fence,
192                         struct i915_vma *vma)
193 {
194         /*
195          * Previous access through the fence register is marshalled by
196          * the mb() inside the fault handlers (i915_gem_release_mmaps)
197          * and explicitly managed for internal users.
198          */
199
200         if (IS_GEN(fence->i915, 2))
201                 i830_write_fence_reg(fence, vma);
202         else if (IS_GEN(fence->i915, 3))
203                 i915_write_fence_reg(fence, vma);
204         else
205                 i965_write_fence_reg(fence, vma);
206
207         /*
208          * Access through the fenced region afterwards is
209          * ordered by the posting reads whilst writing the registers.
210          */
211
212         fence->dirty = false;
213 }
214
215 static int fence_update(struct i915_fence_reg *fence,
216                         struct i915_vma *vma)
217 {
218         intel_wakeref_t wakeref;
219         struct i915_vma *old;
220         int ret;
221
222         if (vma) {
223                 if (!i915_vma_is_map_and_fenceable(vma))
224                         return -EINVAL;
225
226                 if (WARN(!i915_gem_object_get_stride(vma->obj) ||
227                          !i915_gem_object_get_tiling(vma->obj),
228                          "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
229                          i915_gem_object_get_stride(vma->obj),
230                          i915_gem_object_get_tiling(vma->obj)))
231                         return -EINVAL;
232
233                 ret = i915_active_request_retire(&vma->last_fence,
234                                              &vma->obj->base.dev->struct_mutex);
235                 if (ret)
236                         return ret;
237         }
238
239         old = xchg(&fence->vma, NULL);
240         if (old) {
241                 ret = i915_active_request_retire(&old->last_fence,
242                                              &old->obj->base.dev->struct_mutex);
243                 if (ret) {
244                         fence->vma = old;
245                         return ret;
246                 }
247
248                 i915_vma_flush_writes(old);
249
250                 /*
251                  * Ensure that all userspace CPU access is completed before
252                  * stealing the fence.
253                  */
254                 if (old != vma) {
255                         GEM_BUG_ON(old->fence != fence);
256                         i915_vma_revoke_mmap(old);
257                         old->fence = NULL;
258                 }
259
260                 list_move(&fence->link, &fence->i915->ggtt.fence_list);
261         }
262
263         /*
264          * We only need to update the register itself if the device is awake.
265          * If the device is currently powered down, we will defer the write
266          * to the runtime resume, see i915_gem_restore_fences().
267          *
268          * This only works for removing the fence register, on acquisition
269          * the caller must hold the rpm wakeref. The fence register must
270          * be cleared before we can use any other fences to ensure that
271          * the new fences do not overlap the elided clears, confusing HW.
272          */
273         wakeref = intel_runtime_pm_get_if_in_use(&fence->i915->runtime_pm);
274         if (!wakeref) {
275                 GEM_BUG_ON(vma);
276                 return 0;
277         }
278
279         WRITE_ONCE(fence->vma, vma);
280         fence_write(fence, vma);
281
282         if (vma) {
283                 vma->fence = fence;
284                 list_move_tail(&fence->link, &fence->i915->ggtt.fence_list);
285         }
286
287         intel_runtime_pm_put(&fence->i915->runtime_pm, wakeref);
288         return 0;
289 }
290
291 /**
292  * i915_vma_put_fence - force-remove fence for a VMA
293  * @vma: vma to map linearly (not through a fence reg)
294  *
295  * This function force-removes any fence from the given object, which is useful
296  * if the kernel wants to do untiled GTT access.
297  *
298  * Returns:
299  *
300  * 0 on success, negative error code on failure.
301  */
302 int i915_vma_put_fence(struct i915_vma *vma)
303 {
304         struct i915_fence_reg *fence = vma->fence;
305
306         if (!fence)
307                 return 0;
308
309         if (fence->pin_count)
310                 return -EBUSY;
311
312         return fence_update(fence, NULL);
313 }
314
315 static struct i915_fence_reg *fence_find(struct drm_i915_private *i915)
316 {
317         struct i915_fence_reg *fence;
318
319         list_for_each_entry(fence, &i915->ggtt.fence_list, link) {
320                 GEM_BUG_ON(fence->vma && fence->vma->fence != fence);
321
322                 if (fence->pin_count)
323                         continue;
324
325                 return fence;
326         }
327
328         /* Wait for completion of pending flips which consume fences */
329         if (intel_has_pending_fb_unpin(i915))
330                 return ERR_PTR(-EAGAIN);
331
332         return ERR_PTR(-EDEADLK);
333 }
334
335 /**
336  * i915_vma_pin_fence - set up fencing for a vma
337  * @vma: vma to map through a fence reg
338  *
339  * When mapping objects through the GTT, userspace wants to be able to write
340  * to them without having to worry about swizzling if the object is tiled.
341  * This function walks the fence regs looking for a free one for @obj,
342  * stealing one if it can't find any.
343  *
344  * It then sets up the reg based on the object's properties: address, pitch
345  * and tiling format.
346  *
347  * For an untiled surface, this removes any existing fence.
348  *
349  * Returns:
350  *
351  * 0 on success, negative error code on failure.
352  */
353 int i915_vma_pin_fence(struct i915_vma *vma)
354 {
355         struct i915_fence_reg *fence;
356         struct i915_vma *set = i915_gem_object_is_tiled(vma->obj) ? vma : NULL;
357         int err;
358
359         /*
360          * Note that we revoke fences on runtime suspend. Therefore the user
361          * must keep the device awake whilst using the fence.
362          */
363         assert_rpm_wakelock_held(&vma->vm->i915->runtime_pm);
364
365         /* Just update our place in the LRU if our fence is getting reused. */
366         if (vma->fence) {
367                 fence = vma->fence;
368                 GEM_BUG_ON(fence->vma != vma);
369                 fence->pin_count++;
370                 if (!fence->dirty) {
371                         list_move_tail(&fence->link,
372                                        &fence->i915->ggtt.fence_list);
373                         return 0;
374                 }
375         } else if (set) {
376                 fence = fence_find(vma->vm->i915);
377                 if (IS_ERR(fence))
378                         return PTR_ERR(fence);
379
380                 GEM_BUG_ON(fence->pin_count);
381                 fence->pin_count++;
382         } else
383                 return 0;
384
385         err = fence_update(fence, set);
386         if (err)
387                 goto out_unpin;
388
389         GEM_BUG_ON(fence->vma != set);
390         GEM_BUG_ON(vma->fence != (set ? fence : NULL));
391
392         if (set)
393                 return 0;
394
395 out_unpin:
396         fence->pin_count--;
397         return err;
398 }
399
400 /**
401  * i915_reserve_fence - Reserve a fence for vGPU
402  * @i915: i915 device private
403  *
404  * This function walks the fence regs looking for a free one and remove
405  * it from the fence_list. It is used to reserve fence for vGPU to use.
406  */
407 struct i915_fence_reg *i915_reserve_fence(struct drm_i915_private *i915)
408 {
409         struct i915_fence_reg *fence;
410         int count;
411         int ret;
412
413         lockdep_assert_held(&i915->drm.struct_mutex);
414
415         /* Keep at least one fence available for the display engine. */
416         count = 0;
417         list_for_each_entry(fence, &i915->ggtt.fence_list, link)
418                 count += !fence->pin_count;
419         if (count <= 1)
420                 return ERR_PTR(-ENOSPC);
421
422         fence = fence_find(i915);
423         if (IS_ERR(fence))
424                 return fence;
425
426         if (fence->vma) {
427                 /* Force-remove fence from VMA */
428                 ret = fence_update(fence, NULL);
429                 if (ret)
430                         return ERR_PTR(ret);
431         }
432
433         list_del(&fence->link);
434         return fence;
435 }
436
437 /**
438  * i915_unreserve_fence - Reclaim a reserved fence
439  * @fence: the fence reg
440  *
441  * This function add a reserved fence register from vGPU to the fence_list.
442  */
443 void i915_unreserve_fence(struct i915_fence_reg *fence)
444 {
445         lockdep_assert_held(&fence->i915->drm.struct_mutex);
446
447         list_add(&fence->link, &fence->i915->ggtt.fence_list);
448 }
449
450 /**
451  * i915_gem_restore_fences - restore fence state
452  * @i915: i915 device private
453  *
454  * Restore the hw fence state to match the software tracking again, to be called
455  * after a gpu reset and on resume. Note that on runtime suspend we only cancel
456  * the fences, to be reacquired by the user later.
457  */
458 void i915_gem_restore_fences(struct drm_i915_private *i915)
459 {
460         int i;
461
462         rcu_read_lock(); /* keep obj alive as we dereference */
463         for (i = 0; i < i915->ggtt.num_fences; i++) {
464                 struct i915_fence_reg *reg = &i915->ggtt.fence_regs[i];
465                 struct i915_vma *vma = READ_ONCE(reg->vma);
466
467                 GEM_BUG_ON(vma && vma->fence != reg);
468
469                 /*
470                  * Commit delayed tiling changes if we have an object still
471                  * attached to the fence, otherwise just clear the fence.
472                  */
473                 if (vma && !i915_gem_object_is_tiled(vma->obj))
474                         vma = NULL;
475
476                 fence_write(reg, vma);
477         }
478         rcu_read_unlock();
479 }
480
481 /**
482  * DOC: tiling swizzling details
483  *
484  * The idea behind tiling is to increase cache hit rates by rearranging
485  * pixel data so that a group of pixel accesses are in the same cacheline.
486  * Performance improvement from doing this on the back/depth buffer are on
487  * the order of 30%.
488  *
489  * Intel architectures make this somewhat more complicated, though, by
490  * adjustments made to addressing of data when the memory is in interleaved
491  * mode (matched pairs of DIMMS) to improve memory bandwidth.
492  * For interleaved memory, the CPU sends every sequential 64 bytes
493  * to an alternate memory channel so it can get the bandwidth from both.
494  *
495  * The GPU also rearranges its accesses for increased bandwidth to interleaved
496  * memory, and it matches what the CPU does for non-tiled.  However, when tiled
497  * it does it a little differently, since one walks addresses not just in the
498  * X direction but also Y.  So, along with alternating channels when bit
499  * 6 of the address flips, it also alternates when other bits flip --  Bits 9
500  * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
501  * are common to both the 915 and 965-class hardware.
502  *
503  * The CPU also sometimes XORs in higher bits as well, to improve
504  * bandwidth doing strided access like we do so frequently in graphics.  This
505  * is called "Channel XOR Randomization" in the MCH documentation.  The result
506  * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
507  * decode.
508  *
509  * All of this bit 6 XORing has an effect on our memory management,
510  * as we need to make sure that the 3d driver can correctly address object
511  * contents.
512  *
513  * If we don't have interleaved memory, all tiling is safe and no swizzling is
514  * required.
515  *
516  * When bit 17 is XORed in, we simply refuse to tile at all.  Bit
517  * 17 is not just a page offset, so as we page an object out and back in,
518  * individual pages in it will have different bit 17 addresses, resulting in
519  * each 64 bytes being swapped with its neighbor!
520  *
521  * Otherwise, if interleaved, we have to tell the 3d driver what the address
522  * swizzling it needs to do is, since it's writing with the CPU to the pages
523  * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
524  * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
525  * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
526  * to match what the GPU expects.
527  */
528
529 /**
530  * i915_gem_detect_bit_6_swizzle - detect bit 6 swizzling pattern
531  * @i915: i915 device private
532  *
533  * Detects bit 6 swizzling of address lookup between IGD access and CPU
534  * access through main memory.
535  */
536 static void detect_bit_6_swizzle(struct drm_i915_private *i915)
537 {
538         struct intel_uncore *uncore = &i915->uncore;
539         u32 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
540         u32 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
541
542         if (INTEL_GEN(i915) >= 8 || IS_VALLEYVIEW(i915)) {
543                 /*
544                  * On BDW+, swizzling is not used. We leave the CPU memory
545                  * controller in charge of optimizing memory accesses without
546                  * the extra address manipulation GPU side.
547                  *
548                  * VLV and CHV don't have GPU swizzling.
549                  */
550                 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
551                 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
552         } else if (INTEL_GEN(i915) >= 6) {
553                 if (i915->preserve_bios_swizzle) {
554                         if (intel_uncore_read(uncore, DISP_ARB_CTL) &
555                             DISP_TILE_SURFACE_SWIZZLING) {
556                                 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
557                                 swizzle_y = I915_BIT_6_SWIZZLE_9;
558                         } else {
559                                 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
560                                 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
561                         }
562                 } else {
563                         u32 dimm_c0, dimm_c1;
564                         dimm_c0 = intel_uncore_read(uncore, MAD_DIMM_C0);
565                         dimm_c1 = intel_uncore_read(uncore, MAD_DIMM_C1);
566                         dimm_c0 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
567                         dimm_c1 &= MAD_DIMM_A_SIZE_MASK | MAD_DIMM_B_SIZE_MASK;
568                         /*
569                          * Enable swizzling when the channels are populated
570                          * with identically sized dimms. We don't need to check
571                          * the 3rd channel because no cpu with gpu attached
572                          * ships in that configuration. Also, swizzling only
573                          * makes sense for 2 channels anyway.
574                          */
575                         if (dimm_c0 == dimm_c1) {
576                                 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
577                                 swizzle_y = I915_BIT_6_SWIZZLE_9;
578                         } else {
579                                 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
580                                 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
581                         }
582                 }
583         } else if (IS_GEN(i915, 5)) {
584                 /*
585                  * On Ironlake whatever DRAM config, GPU always do
586                  * same swizzling setup.
587                  */
588                 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
589                 swizzle_y = I915_BIT_6_SWIZZLE_9;
590         } else if (IS_GEN(i915, 2)) {
591                 /*
592                  * As far as we know, the 865 doesn't have these bit 6
593                  * swizzling issues.
594                  */
595                 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
596                 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
597         } else if (IS_G45(i915) || IS_I965G(i915) || IS_G33(i915)) {
598                 /*
599                  * The 965, G33, and newer, have a very flexible memory
600                  * configuration.  It will enable dual-channel mode
601                  * (interleaving) on as much memory as it can, and the GPU
602                  * will additionally sometimes enable different bit 6
603                  * swizzling for tiled objects from the CPU.
604                  *
605                  * Here's what I found on the G965:
606                  *    slot fill         memory size  swizzling
607                  * 0A   0B   1A   1B    1-ch   2-ch
608                  * 512  0    0    0     512    0     O
609                  * 512  0    512  0     16     1008  X
610                  * 512  0    0    512   16     1008  X
611                  * 0    512  0    512   16     1008  X
612                  * 1024 1024 1024 0     2048   1024  O
613                  *
614                  * We could probably detect this based on either the DRB
615                  * matching, which was the case for the swizzling required in
616                  * the table above, or from the 1-ch value being less than
617                  * the minimum size of a rank.
618                  *
619                  * Reports indicate that the swizzling actually
620                  * varies depending upon page placement inside the
621                  * channels, i.e. we see swizzled pages where the
622                  * banks of memory are paired and unswizzled on the
623                  * uneven portion, so leave that as unknown.
624                  */
625                 if (intel_uncore_read(uncore, C0DRB3) ==
626                     intel_uncore_read(uncore, C1DRB3)) {
627                         swizzle_x = I915_BIT_6_SWIZZLE_9_10;
628                         swizzle_y = I915_BIT_6_SWIZZLE_9;
629                 }
630         } else {
631                 u32 dcc = intel_uncore_read(uncore, DCC);
632
633                 /*
634                  * On 9xx chipsets, channel interleave by the CPU is
635                  * determined by DCC.  For single-channel, neither the CPU
636                  * nor the GPU do swizzling.  For dual channel interleaved,
637                  * the GPU's interleave is bit 9 and 10 for X tiled, and bit
638                  * 9 for Y tiled.  The CPU's interleave is independent, and
639                  * can be based on either bit 11 (haven't seen this yet) or
640                  * bit 17 (common).
641                  */
642                 switch (dcc & DCC_ADDRESSING_MODE_MASK) {
643                 case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
644                 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
645                         swizzle_x = I915_BIT_6_SWIZZLE_NONE;
646                         swizzle_y = I915_BIT_6_SWIZZLE_NONE;
647                         break;
648                 case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
649                         if (dcc & DCC_CHANNEL_XOR_DISABLE) {
650                                 /*
651                                  * This is the base swizzling by the GPU for
652                                  * tiled buffers.
653                                  */
654                                 swizzle_x = I915_BIT_6_SWIZZLE_9_10;
655                                 swizzle_y = I915_BIT_6_SWIZZLE_9;
656                         } else if ((dcc & DCC_CHANNEL_XOR_BIT_17) == 0) {
657                                 /* Bit 11 swizzling by the CPU in addition. */
658                                 swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
659                                 swizzle_y = I915_BIT_6_SWIZZLE_9_11;
660                         } else {
661                                 /* Bit 17 swizzling by the CPU in addition. */
662                                 swizzle_x = I915_BIT_6_SWIZZLE_9_10_17;
663                                 swizzle_y = I915_BIT_6_SWIZZLE_9_17;
664                         }
665                         break;
666                 }
667
668                 /* check for L-shaped memory aka modified enhanced addressing */
669                 if (IS_GEN(i915, 4) &&
670                     !(intel_uncore_read(uncore, DCC2) & DCC2_MODIFIED_ENHANCED_DISABLE)) {
671                         swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
672                         swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
673                 }
674
675                 if (dcc == 0xffffffff) {
676                         DRM_ERROR("Couldn't read from MCHBAR.  "
677                                   "Disabling tiling.\n");
678                         swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
679                         swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
680                 }
681         }
682
683         if (swizzle_x == I915_BIT_6_SWIZZLE_UNKNOWN ||
684             swizzle_y == I915_BIT_6_SWIZZLE_UNKNOWN) {
685                 /*
686                  * Userspace likes to explode if it sees unknown swizzling,
687                  * so lie. We will finish the lie when reporting through
688                  * the get-tiling-ioctl by reporting the physical swizzle
689                  * mode as unknown instead.
690                  *
691                  * As we don't strictly know what the swizzling is, it may be
692                  * bit17 dependent, and so we need to also prevent the pages
693                  * from being moved.
694                  */
695                 i915->quirks |= QUIRK_PIN_SWIZZLED_PAGES;
696                 swizzle_x = I915_BIT_6_SWIZZLE_NONE;
697                 swizzle_y = I915_BIT_6_SWIZZLE_NONE;
698         }
699
700         i915->mm.bit_6_swizzle_x = swizzle_x;
701         i915->mm.bit_6_swizzle_y = swizzle_y;
702 }
703
704 /*
705  * Swap every 64 bytes of this page around, to account for it having a new
706  * bit 17 of its physical address and therefore being interpreted differently
707  * by the GPU.
708  */
709 static void i915_gem_swizzle_page(struct page *page)
710 {
711         char temp[64];
712         char *vaddr;
713         int i;
714
715         vaddr = kmap(page);
716
717         for (i = 0; i < PAGE_SIZE; i += 128) {
718                 memcpy(temp, &vaddr[i], 64);
719                 memcpy(&vaddr[i], &vaddr[i + 64], 64);
720                 memcpy(&vaddr[i + 64], temp, 64);
721         }
722
723         kunmap(page);
724 }
725
726 /**
727  * i915_gem_object_do_bit_17_swizzle - fixup bit 17 swizzling
728  * @obj: i915 GEM buffer object
729  * @pages: the scattergather list of physical pages
730  *
731  * This function fixes up the swizzling in case any page frame number for this
732  * object has changed in bit 17 since that state has been saved with
733  * i915_gem_object_save_bit_17_swizzle().
734  *
735  * This is called when pinning backing storage again, since the kernel is free
736  * to move unpinned backing storage around (either by directly moving pages or
737  * by swapping them out and back in again).
738  */
739 void
740 i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj,
741                                   struct sg_table *pages)
742 {
743         struct sgt_iter sgt_iter;
744         struct page *page;
745         int i;
746
747         if (obj->bit_17 == NULL)
748                 return;
749
750         i = 0;
751         for_each_sgt_page(page, sgt_iter, pages) {
752                 char new_bit_17 = page_to_phys(page) >> 17;
753                 if ((new_bit_17 & 0x1) != (test_bit(i, obj->bit_17) != 0)) {
754                         i915_gem_swizzle_page(page);
755                         set_page_dirty(page);
756                 }
757                 i++;
758         }
759 }
760
761 /**
762  * i915_gem_object_save_bit_17_swizzle - save bit 17 swizzling
763  * @obj: i915 GEM buffer object
764  * @pages: the scattergather list of physical pages
765  *
766  * This function saves the bit 17 of each page frame number so that swizzling
767  * can be fixed up later on with i915_gem_object_do_bit_17_swizzle(). This must
768  * be called before the backing storage can be unpinned.
769  */
770 void
771 i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj,
772                                     struct sg_table *pages)
773 {
774         const unsigned int page_count = obj->base.size >> PAGE_SHIFT;
775         struct sgt_iter sgt_iter;
776         struct page *page;
777         int i;
778
779         if (obj->bit_17 == NULL) {
780                 obj->bit_17 = bitmap_zalloc(page_count, GFP_KERNEL);
781                 if (obj->bit_17 == NULL) {
782                         DRM_ERROR("Failed to allocate memory for bit 17 "
783                                   "record\n");
784                         return;
785                 }
786         }
787
788         i = 0;
789
790         for_each_sgt_page(page, sgt_iter, pages) {
791                 if (page_to_phys(page) & (1 << 17))
792                         __set_bit(i, obj->bit_17);
793                 else
794                         __clear_bit(i, obj->bit_17);
795                 i++;
796         }
797 }
798
799 void i915_ggtt_init_fences(struct i915_ggtt *ggtt)
800 {
801         struct drm_i915_private *i915 = ggtt->vm.i915;
802         int num_fences;
803         int i;
804
805         INIT_LIST_HEAD(&ggtt->fence_list);
806         INIT_LIST_HEAD(&ggtt->userfault_list);
807         intel_wakeref_auto_init(&ggtt->userfault_wakeref, &i915->runtime_pm);
808
809         detect_bit_6_swizzle(i915);
810
811         if (INTEL_GEN(i915) >= 7 &&
812             !(IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)))
813                 num_fences = 32;
814         else if (INTEL_GEN(i915) >= 4 ||
815                  IS_I945G(i915) || IS_I945GM(i915) ||
816                  IS_G33(i915) || IS_PINEVIEW(i915))
817                 num_fences = 16;
818         else
819                 num_fences = 8;
820
821         if (intel_vgpu_active(i915))
822                 num_fences = intel_uncore_read(&i915->uncore,
823                                                vgtif_reg(avail_rs.fence_num));
824
825         /* Initialize fence registers to zero */
826         for (i = 0; i < num_fences; i++) {
827                 struct i915_fence_reg *fence = &ggtt->fence_regs[i];
828
829                 fence->i915 = i915;
830                 fence->id = i;
831                 list_add_tail(&fence->link, &ggtt->fence_list);
832         }
833         ggtt->num_fences = num_fences;
834
835         i915_gem_restore_fences(i915);
836 }