Merge tag 'drm-intel-next-2014-10-03-no-ppgtt' of git://anongit.freedesktop.org/drm...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / i915_gem_gtt.c
1 /*
2  * Copyright © 2010 Daniel Vetter
3  * Copyright © 2011-2014 Intel Corporation
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  *
24  */
25
26 #include <linux/seq_file.h>
27 #include <drm/drmP.h>
28 #include <drm/i915_drm.h>
29 #include "i915_drv.h"
30 #include "i915_trace.h"
31 #include "intel_drv.h"
32
33 static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv);
34 static void chv_setup_private_ppat(struct drm_i915_private *dev_priv);
35
36 static int sanitize_enable_ppgtt(struct drm_device *dev, int enable_ppgtt)
37 {
38         bool has_aliasing_ppgtt;
39         bool has_full_ppgtt;
40
41         has_aliasing_ppgtt = INTEL_INFO(dev)->gen >= 6;
42         has_full_ppgtt = INTEL_INFO(dev)->gen >= 7;
43         if (IS_GEN8(dev))
44                 has_full_ppgtt = false; /* XXX why? */
45
46         if (enable_ppgtt == 0 || !has_aliasing_ppgtt)
47                 return 0;
48
49         if (enable_ppgtt == 1)
50                 return 1;
51
52         if (enable_ppgtt == 2 && has_full_ppgtt)
53                 return 2;
54
55 #ifdef CONFIG_INTEL_IOMMU
56         /* Disable ppgtt on SNB if VT-d is on. */
57         if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped) {
58                 DRM_INFO("Disabling PPGTT because VT-d is on\n");
59                 return 0;
60         }
61 #endif
62
63         /* Early VLV doesn't have this */
64         if (IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
65             dev->pdev->revision < 0xb) {
66                 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
67                 return 0;
68         }
69
70         return has_aliasing_ppgtt ? 1 : 0;
71 }
72
73
74 static void ppgtt_bind_vma(struct i915_vma *vma,
75                            enum i915_cache_level cache_level,
76                            u32 flags);
77 static void ppgtt_unbind_vma(struct i915_vma *vma);
78
79 static inline gen8_gtt_pte_t gen8_pte_encode(dma_addr_t addr,
80                                              enum i915_cache_level level,
81                                              bool valid)
82 {
83         gen8_gtt_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
84         pte |= addr;
85
86         switch (level) {
87         case I915_CACHE_NONE:
88                 pte |= PPAT_UNCACHED_INDEX;
89                 break;
90         case I915_CACHE_WT:
91                 pte |= PPAT_DISPLAY_ELLC_INDEX;
92                 break;
93         default:
94                 pte |= PPAT_CACHED_INDEX;
95                 break;
96         }
97
98         return pte;
99 }
100
101 static inline gen8_ppgtt_pde_t gen8_pde_encode(struct drm_device *dev,
102                                              dma_addr_t addr,
103                                              enum i915_cache_level level)
104 {
105         gen8_ppgtt_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
106         pde |= addr;
107         if (level != I915_CACHE_NONE)
108                 pde |= PPAT_CACHED_PDE_INDEX;
109         else
110                 pde |= PPAT_UNCACHED_INDEX;
111         return pde;
112 }
113
114 static gen6_gtt_pte_t snb_pte_encode(dma_addr_t addr,
115                                      enum i915_cache_level level,
116                                      bool valid, u32 unused)
117 {
118         gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
119         pte |= GEN6_PTE_ADDR_ENCODE(addr);
120
121         switch (level) {
122         case I915_CACHE_L3_LLC:
123         case I915_CACHE_LLC:
124                 pte |= GEN6_PTE_CACHE_LLC;
125                 break;
126         case I915_CACHE_NONE:
127                 pte |= GEN6_PTE_UNCACHED;
128                 break;
129         default:
130                 WARN_ON(1);
131         }
132
133         return pte;
134 }
135
136 static gen6_gtt_pte_t ivb_pte_encode(dma_addr_t addr,
137                                      enum i915_cache_level level,
138                                      bool valid, u32 unused)
139 {
140         gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
141         pte |= GEN6_PTE_ADDR_ENCODE(addr);
142
143         switch (level) {
144         case I915_CACHE_L3_LLC:
145                 pte |= GEN7_PTE_CACHE_L3_LLC;
146                 break;
147         case I915_CACHE_LLC:
148                 pte |= GEN6_PTE_CACHE_LLC;
149                 break;
150         case I915_CACHE_NONE:
151                 pte |= GEN6_PTE_UNCACHED;
152                 break;
153         default:
154                 WARN_ON(1);
155         }
156
157         return pte;
158 }
159
160 static gen6_gtt_pte_t byt_pte_encode(dma_addr_t addr,
161                                      enum i915_cache_level level,
162                                      bool valid, u32 flags)
163 {
164         gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
165         pte |= GEN6_PTE_ADDR_ENCODE(addr);
166
167         /* Mark the page as writeable.  Other platforms don't have a
168          * setting for read-only/writable, so this matches that behavior.
169          */
170         if (!(flags & PTE_READ_ONLY))
171                 pte |= BYT_PTE_WRITEABLE;
172
173         if (level != I915_CACHE_NONE)
174                 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
175
176         return pte;
177 }
178
179 static gen6_gtt_pte_t hsw_pte_encode(dma_addr_t addr,
180                                      enum i915_cache_level level,
181                                      bool valid, u32 unused)
182 {
183         gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
184         pte |= HSW_PTE_ADDR_ENCODE(addr);
185
186         if (level != I915_CACHE_NONE)
187                 pte |= HSW_WB_LLC_AGE3;
188
189         return pte;
190 }
191
192 static gen6_gtt_pte_t iris_pte_encode(dma_addr_t addr,
193                                       enum i915_cache_level level,
194                                       bool valid, u32 unused)
195 {
196         gen6_gtt_pte_t pte = valid ? GEN6_PTE_VALID : 0;
197         pte |= HSW_PTE_ADDR_ENCODE(addr);
198
199         switch (level) {
200         case I915_CACHE_NONE:
201                 break;
202         case I915_CACHE_WT:
203                 pte |= HSW_WT_ELLC_LLC_AGE3;
204                 break;
205         default:
206                 pte |= HSW_WB_ELLC_LLC_AGE3;
207                 break;
208         }
209
210         return pte;
211 }
212
213 /* Broadwell Page Directory Pointer Descriptors */
214 static int gen8_write_pdp(struct intel_engine_cs *ring, unsigned entry,
215                            uint64_t val)
216 {
217         int ret;
218
219         BUG_ON(entry >= 4);
220
221         ret = intel_ring_begin(ring, 6);
222         if (ret)
223                 return ret;
224
225         intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
226         intel_ring_emit(ring, GEN8_RING_PDP_UDW(ring, entry));
227         intel_ring_emit(ring, (u32)(val >> 32));
228         intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
229         intel_ring_emit(ring, GEN8_RING_PDP_LDW(ring, entry));
230         intel_ring_emit(ring, (u32)(val));
231         intel_ring_advance(ring);
232
233         return 0;
234 }
235
236 static int gen8_mm_switch(struct i915_hw_ppgtt *ppgtt,
237                           struct intel_engine_cs *ring)
238 {
239         int i, ret;
240
241         /* bit of a hack to find the actual last used pd */
242         int used_pd = ppgtt->num_pd_entries / GEN8_PDES_PER_PAGE;
243
244         for (i = used_pd - 1; i >= 0; i--) {
245                 dma_addr_t addr = ppgtt->pd_dma_addr[i];
246                 ret = gen8_write_pdp(ring, i, addr);
247                 if (ret)
248                         return ret;
249         }
250
251         return 0;
252 }
253
254 static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
255                                    uint64_t start,
256                                    uint64_t length,
257                                    bool use_scratch)
258 {
259         struct i915_hw_ppgtt *ppgtt =
260                 container_of(vm, struct i915_hw_ppgtt, base);
261         gen8_gtt_pte_t *pt_vaddr, scratch_pte;
262         unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
263         unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
264         unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
265         unsigned num_entries = length >> PAGE_SHIFT;
266         unsigned last_pte, i;
267
268         scratch_pte = gen8_pte_encode(ppgtt->base.scratch.addr,
269                                       I915_CACHE_LLC, use_scratch);
270
271         while (num_entries) {
272                 struct page *page_table = ppgtt->gen8_pt_pages[pdpe][pde];
273
274                 last_pte = pte + num_entries;
275                 if (last_pte > GEN8_PTES_PER_PAGE)
276                         last_pte = GEN8_PTES_PER_PAGE;
277
278                 pt_vaddr = kmap_atomic(page_table);
279
280                 for (i = pte; i < last_pte; i++) {
281                         pt_vaddr[i] = scratch_pte;
282                         num_entries--;
283                 }
284
285                 if (!HAS_LLC(ppgtt->base.dev))
286                         drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
287                 kunmap_atomic(pt_vaddr);
288
289                 pte = 0;
290                 if (++pde == GEN8_PDES_PER_PAGE) {
291                         pdpe++;
292                         pde = 0;
293                 }
294         }
295 }
296
297 static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
298                                       struct sg_table *pages,
299                                       uint64_t start,
300                                       enum i915_cache_level cache_level, u32 unused)
301 {
302         struct i915_hw_ppgtt *ppgtt =
303                 container_of(vm, struct i915_hw_ppgtt, base);
304         gen8_gtt_pte_t *pt_vaddr;
305         unsigned pdpe = start >> GEN8_PDPE_SHIFT & GEN8_PDPE_MASK;
306         unsigned pde = start >> GEN8_PDE_SHIFT & GEN8_PDE_MASK;
307         unsigned pte = start >> GEN8_PTE_SHIFT & GEN8_PTE_MASK;
308         struct sg_page_iter sg_iter;
309
310         pt_vaddr = NULL;
311
312         for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
313                 if (WARN_ON(pdpe >= GEN8_LEGACY_PDPS))
314                         break;
315
316                 if (pt_vaddr == NULL)
317                         pt_vaddr = kmap_atomic(ppgtt->gen8_pt_pages[pdpe][pde]);
318
319                 pt_vaddr[pte] =
320                         gen8_pte_encode(sg_page_iter_dma_address(&sg_iter),
321                                         cache_level, true);
322                 if (++pte == GEN8_PTES_PER_PAGE) {
323                         if (!HAS_LLC(ppgtt->base.dev))
324                                 drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
325                         kunmap_atomic(pt_vaddr);
326                         pt_vaddr = NULL;
327                         if (++pde == GEN8_PDES_PER_PAGE) {
328                                 pdpe++;
329                                 pde = 0;
330                         }
331                         pte = 0;
332                 }
333         }
334         if (pt_vaddr) {
335                 if (!HAS_LLC(ppgtt->base.dev))
336                         drm_clflush_virt_range(pt_vaddr, PAGE_SIZE);
337                 kunmap_atomic(pt_vaddr);
338         }
339 }
340
341 static void gen8_free_page_tables(struct page **pt_pages)
342 {
343         int i;
344
345         if (pt_pages == NULL)
346                 return;
347
348         for (i = 0; i < GEN8_PDES_PER_PAGE; i++)
349                 if (pt_pages[i])
350                         __free_pages(pt_pages[i], 0);
351 }
352
353 static void gen8_ppgtt_free(const struct i915_hw_ppgtt *ppgtt)
354 {
355         int i;
356
357         for (i = 0; i < ppgtt->num_pd_pages; i++) {
358                 gen8_free_page_tables(ppgtt->gen8_pt_pages[i]);
359                 kfree(ppgtt->gen8_pt_pages[i]);
360                 kfree(ppgtt->gen8_pt_dma_addr[i]);
361         }
362
363         __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
364 }
365
366 static void gen8_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
367 {
368         struct pci_dev *hwdev = ppgtt->base.dev->pdev;
369         int i, j;
370
371         for (i = 0; i < ppgtt->num_pd_pages; i++) {
372                 /* TODO: In the future we'll support sparse mappings, so this
373                  * will have to change. */
374                 if (!ppgtt->pd_dma_addr[i])
375                         continue;
376
377                 pci_unmap_page(hwdev, ppgtt->pd_dma_addr[i], PAGE_SIZE,
378                                PCI_DMA_BIDIRECTIONAL);
379
380                 for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
381                         dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
382                         if (addr)
383                                 pci_unmap_page(hwdev, addr, PAGE_SIZE,
384                                                PCI_DMA_BIDIRECTIONAL);
385                 }
386         }
387 }
388
389 static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
390 {
391         struct i915_hw_ppgtt *ppgtt =
392                 container_of(vm, struct i915_hw_ppgtt, base);
393
394         gen8_ppgtt_unmap_pages(ppgtt);
395         gen8_ppgtt_free(ppgtt);
396 }
397
398 static struct page **__gen8_alloc_page_tables(void)
399 {
400         struct page **pt_pages;
401         int i;
402
403         pt_pages = kcalloc(GEN8_PDES_PER_PAGE, sizeof(struct page *), GFP_KERNEL);
404         if (!pt_pages)
405                 return ERR_PTR(-ENOMEM);
406
407         for (i = 0; i < GEN8_PDES_PER_PAGE; i++) {
408                 pt_pages[i] = alloc_page(GFP_KERNEL);
409                 if (!pt_pages[i])
410                         goto bail;
411         }
412
413         return pt_pages;
414
415 bail:
416         gen8_free_page_tables(pt_pages);
417         kfree(pt_pages);
418         return ERR_PTR(-ENOMEM);
419 }
420
421 static int gen8_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt,
422                                            const int max_pdp)
423 {
424         struct page **pt_pages[GEN8_LEGACY_PDPS];
425         int i, ret;
426
427         for (i = 0; i < max_pdp; i++) {
428                 pt_pages[i] = __gen8_alloc_page_tables();
429                 if (IS_ERR(pt_pages[i])) {
430                         ret = PTR_ERR(pt_pages[i]);
431                         goto unwind_out;
432                 }
433         }
434
435         /* NB: Avoid touching gen8_pt_pages until last to keep the allocation,
436          * "atomic" - for cleanup purposes.
437          */
438         for (i = 0; i < max_pdp; i++)
439                 ppgtt->gen8_pt_pages[i] = pt_pages[i];
440
441         return 0;
442
443 unwind_out:
444         while (i--) {
445                 gen8_free_page_tables(pt_pages[i]);
446                 kfree(pt_pages[i]);
447         }
448
449         return ret;
450 }
451
452 static int gen8_ppgtt_allocate_dma(struct i915_hw_ppgtt *ppgtt)
453 {
454         int i;
455
456         for (i = 0; i < ppgtt->num_pd_pages; i++) {
457                 ppgtt->gen8_pt_dma_addr[i] = kcalloc(GEN8_PDES_PER_PAGE,
458                                                      sizeof(dma_addr_t),
459                                                      GFP_KERNEL);
460                 if (!ppgtt->gen8_pt_dma_addr[i])
461                         return -ENOMEM;
462         }
463
464         return 0;
465 }
466
467 static int gen8_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt,
468                                                 const int max_pdp)
469 {
470         ppgtt->pd_pages = alloc_pages(GFP_KERNEL, get_order(max_pdp << PAGE_SHIFT));
471         if (!ppgtt->pd_pages)
472                 return -ENOMEM;
473
474         ppgtt->num_pd_pages = 1 << get_order(max_pdp << PAGE_SHIFT);
475         BUG_ON(ppgtt->num_pd_pages > GEN8_LEGACY_PDPS);
476
477         return 0;
478 }
479
480 static int gen8_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt,
481                             const int max_pdp)
482 {
483         int ret;
484
485         ret = gen8_ppgtt_allocate_page_directories(ppgtt, max_pdp);
486         if (ret)
487                 return ret;
488
489         ret = gen8_ppgtt_allocate_page_tables(ppgtt, max_pdp);
490         if (ret) {
491                 __free_pages(ppgtt->pd_pages, get_order(max_pdp << PAGE_SHIFT));
492                 return ret;
493         }
494
495         ppgtt->num_pd_entries = max_pdp * GEN8_PDES_PER_PAGE;
496
497         ret = gen8_ppgtt_allocate_dma(ppgtt);
498         if (ret)
499                 gen8_ppgtt_free(ppgtt);
500
501         return ret;
502 }
503
504 static int gen8_ppgtt_setup_page_directories(struct i915_hw_ppgtt *ppgtt,
505                                              const int pd)
506 {
507         dma_addr_t pd_addr;
508         int ret;
509
510         pd_addr = pci_map_page(ppgtt->base.dev->pdev,
511                                &ppgtt->pd_pages[pd], 0,
512                                PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
513
514         ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pd_addr);
515         if (ret)
516                 return ret;
517
518         ppgtt->pd_dma_addr[pd] = pd_addr;
519
520         return 0;
521 }
522
523 static int gen8_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt,
524                                         const int pd,
525                                         const int pt)
526 {
527         dma_addr_t pt_addr;
528         struct page *p;
529         int ret;
530
531         p = ppgtt->gen8_pt_pages[pd][pt];
532         pt_addr = pci_map_page(ppgtt->base.dev->pdev,
533                                p, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
534         ret = pci_dma_mapping_error(ppgtt->base.dev->pdev, pt_addr);
535         if (ret)
536                 return ret;
537
538         ppgtt->gen8_pt_dma_addr[pd][pt] = pt_addr;
539
540         return 0;
541 }
542
543 /**
544  * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
545  * with a net effect resembling a 2-level page table in normal x86 terms. Each
546  * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
547  * space.
548  *
549  * FIXME: split allocation into smaller pieces. For now we only ever do this
550  * once, but with full PPGTT, the multiple contiguous allocations will be bad.
551  * TODO: Do something with the size parameter
552  */
553 static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size)
554 {
555         const int max_pdp = DIV_ROUND_UP(size, 1 << 30);
556         const int min_pt_pages = GEN8_PDES_PER_PAGE * max_pdp;
557         int i, j, ret;
558
559         if (size % (1<<30))
560                 DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size);
561
562         /* 1. Do all our allocations for page directories and page tables. */
563         ret = gen8_ppgtt_alloc(ppgtt, max_pdp);
564         if (ret)
565                 return ret;
566
567         /*
568          * 2. Create DMA mappings for the page directories and page tables.
569          */
570         for (i = 0; i < max_pdp; i++) {
571                 ret = gen8_ppgtt_setup_page_directories(ppgtt, i);
572                 if (ret)
573                         goto bail;
574
575                 for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
576                         ret = gen8_ppgtt_setup_page_tables(ppgtt, i, j);
577                         if (ret)
578                                 goto bail;
579                 }
580         }
581
582         /*
583          * 3. Map all the page directory entires to point to the page tables
584          * we've allocated.
585          *
586          * For now, the PPGTT helper functions all require that the PDEs are
587          * plugged in correctly. So we do that now/here. For aliasing PPGTT, we
588          * will never need to touch the PDEs again.
589          */
590         for (i = 0; i < max_pdp; i++) {
591                 gen8_ppgtt_pde_t *pd_vaddr;
592                 pd_vaddr = kmap_atomic(&ppgtt->pd_pages[i]);
593                 for (j = 0; j < GEN8_PDES_PER_PAGE; j++) {
594                         dma_addr_t addr = ppgtt->gen8_pt_dma_addr[i][j];
595                         pd_vaddr[j] = gen8_pde_encode(ppgtt->base.dev, addr,
596                                                       I915_CACHE_LLC);
597                 }
598                 if (!HAS_LLC(ppgtt->base.dev))
599                         drm_clflush_virt_range(pd_vaddr, PAGE_SIZE);
600                 kunmap_atomic(pd_vaddr);
601         }
602
603         ppgtt->switch_mm = gen8_mm_switch;
604         ppgtt->base.clear_range = gen8_ppgtt_clear_range;
605         ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
606         ppgtt->base.cleanup = gen8_ppgtt_cleanup;
607         ppgtt->base.start = 0;
608         ppgtt->base.total = ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE * PAGE_SIZE;
609
610         ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
611
612         DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n",
613                          ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp);
614         DRM_DEBUG_DRIVER("Allocated %d pages for page tables (%lld wasted)\n",
615                          ppgtt->num_pd_entries,
616                          (ppgtt->num_pd_entries - min_pt_pages) + size % (1<<30));
617         return 0;
618
619 bail:
620         gen8_ppgtt_unmap_pages(ppgtt);
621         gen8_ppgtt_free(ppgtt);
622         return ret;
623 }
624
625 static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
626 {
627         struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
628         struct i915_address_space *vm = &ppgtt->base;
629         gen6_gtt_pte_t __iomem *pd_addr;
630         gen6_gtt_pte_t scratch_pte;
631         uint32_t pd_entry;
632         int pte, pde;
633
634         scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
635
636         pd_addr = (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm +
637                 ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
638
639         seq_printf(m, "  VM %p (pd_offset %x-%x):\n", vm,
640                    ppgtt->pd_offset, ppgtt->pd_offset + ppgtt->num_pd_entries);
641         for (pde = 0; pde < ppgtt->num_pd_entries; pde++) {
642                 u32 expected;
643                 gen6_gtt_pte_t *pt_vaddr;
644                 dma_addr_t pt_addr = ppgtt->pt_dma_addr[pde];
645                 pd_entry = readl(pd_addr + pde);
646                 expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
647
648                 if (pd_entry != expected)
649                         seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
650                                    pde,
651                                    pd_entry,
652                                    expected);
653                 seq_printf(m, "\tPDE: %x\n", pd_entry);
654
655                 pt_vaddr = kmap_atomic(ppgtt->pt_pages[pde]);
656                 for (pte = 0; pte < I915_PPGTT_PT_ENTRIES; pte+=4) {
657                         unsigned long va =
658                                 (pde * PAGE_SIZE * I915_PPGTT_PT_ENTRIES) +
659                                 (pte * PAGE_SIZE);
660                         int i;
661                         bool found = false;
662                         for (i = 0; i < 4; i++)
663                                 if (pt_vaddr[pte + i] != scratch_pte)
664                                         found = true;
665                         if (!found)
666                                 continue;
667
668                         seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
669                         for (i = 0; i < 4; i++) {
670                                 if (pt_vaddr[pte + i] != scratch_pte)
671                                         seq_printf(m, " %08x", pt_vaddr[pte + i]);
672                                 else
673                                         seq_puts(m, "  SCRATCH ");
674                         }
675                         seq_puts(m, "\n");
676                 }
677                 kunmap_atomic(pt_vaddr);
678         }
679 }
680
681 static void gen6_write_pdes(struct i915_hw_ppgtt *ppgtt)
682 {
683         struct drm_i915_private *dev_priv = ppgtt->base.dev->dev_private;
684         gen6_gtt_pte_t __iomem *pd_addr;
685         uint32_t pd_entry;
686         int i;
687
688         WARN_ON(ppgtt->pd_offset & 0x3f);
689         pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
690                 ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
691         for (i = 0; i < ppgtt->num_pd_entries; i++) {
692                 dma_addr_t pt_addr;
693
694                 pt_addr = ppgtt->pt_dma_addr[i];
695                 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
696                 pd_entry |= GEN6_PDE_VALID;
697
698                 writel(pd_entry, pd_addr + i);
699         }
700         readl(pd_addr);
701 }
702
703 static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
704 {
705         BUG_ON(ppgtt->pd_offset & 0x3f);
706
707         return (ppgtt->pd_offset / 64) << 16;
708 }
709
710 static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
711                          struct intel_engine_cs *ring)
712 {
713         int ret;
714
715         /* NB: TLBs must be flushed and invalidated before a switch */
716         ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
717         if (ret)
718                 return ret;
719
720         ret = intel_ring_begin(ring, 6);
721         if (ret)
722                 return ret;
723
724         intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
725         intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
726         intel_ring_emit(ring, PP_DIR_DCLV_2G);
727         intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
728         intel_ring_emit(ring, get_pd_offset(ppgtt));
729         intel_ring_emit(ring, MI_NOOP);
730         intel_ring_advance(ring);
731
732         return 0;
733 }
734
735 static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
736                           struct intel_engine_cs *ring)
737 {
738         int ret;
739
740         /* NB: TLBs must be flushed and invalidated before a switch */
741         ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
742         if (ret)
743                 return ret;
744
745         ret = intel_ring_begin(ring, 6);
746         if (ret)
747                 return ret;
748
749         intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
750         intel_ring_emit(ring, RING_PP_DIR_DCLV(ring));
751         intel_ring_emit(ring, PP_DIR_DCLV_2G);
752         intel_ring_emit(ring, RING_PP_DIR_BASE(ring));
753         intel_ring_emit(ring, get_pd_offset(ppgtt));
754         intel_ring_emit(ring, MI_NOOP);
755         intel_ring_advance(ring);
756
757         /* XXX: RCS is the only one to auto invalidate the TLBs? */
758         if (ring->id != RCS) {
759                 ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
760                 if (ret)
761                         return ret;
762         }
763
764         return 0;
765 }
766
767 static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
768                           struct intel_engine_cs *ring)
769 {
770         struct drm_device *dev = ppgtt->base.dev;
771         struct drm_i915_private *dev_priv = dev->dev_private;
772
773
774         I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
775         I915_WRITE(RING_PP_DIR_BASE(ring), get_pd_offset(ppgtt));
776
777         POSTING_READ(RING_PP_DIR_DCLV(ring));
778
779         return 0;
780 }
781
782 static void gen8_ppgtt_enable(struct drm_device *dev)
783 {
784         struct drm_i915_private *dev_priv = dev->dev_private;
785         struct intel_engine_cs *ring;
786         int j;
787
788         for_each_ring(ring, dev_priv, j) {
789                 I915_WRITE(RING_MODE_GEN7(ring),
790                            _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
791         }
792 }
793
794 static void gen7_ppgtt_enable(struct drm_device *dev)
795 {
796         struct drm_i915_private *dev_priv = dev->dev_private;
797         struct intel_engine_cs *ring;
798         uint32_t ecochk, ecobits;
799         int i;
800
801         ecobits = I915_READ(GAC_ECO_BITS);
802         I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
803
804         ecochk = I915_READ(GAM_ECOCHK);
805         if (IS_HASWELL(dev)) {
806                 ecochk |= ECOCHK_PPGTT_WB_HSW;
807         } else {
808                 ecochk |= ECOCHK_PPGTT_LLC_IVB;
809                 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
810         }
811         I915_WRITE(GAM_ECOCHK, ecochk);
812
813         for_each_ring(ring, dev_priv, i) {
814                 /* GFX_MODE is per-ring on gen7+ */
815                 I915_WRITE(RING_MODE_GEN7(ring),
816                            _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
817         }
818 }
819
820 static void gen6_ppgtt_enable(struct drm_device *dev)
821 {
822         struct drm_i915_private *dev_priv = dev->dev_private;
823         uint32_t ecochk, gab_ctl, ecobits;
824
825         ecobits = I915_READ(GAC_ECO_BITS);
826         I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
827                    ECOBITS_PPGTT_CACHE64B);
828
829         gab_ctl = I915_READ(GAB_CTL);
830         I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
831
832         ecochk = I915_READ(GAM_ECOCHK);
833         I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
834
835         I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
836 }
837
838 /* PPGTT support for Sandybdrige/Gen6 and later */
839 static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
840                                    uint64_t start,
841                                    uint64_t length,
842                                    bool use_scratch)
843 {
844         struct i915_hw_ppgtt *ppgtt =
845                 container_of(vm, struct i915_hw_ppgtt, base);
846         gen6_gtt_pte_t *pt_vaddr, scratch_pte;
847         unsigned first_entry = start >> PAGE_SHIFT;
848         unsigned num_entries = length >> PAGE_SHIFT;
849         unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
850         unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
851         unsigned last_pte, i;
852
853         scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, true, 0);
854
855         while (num_entries) {
856                 last_pte = first_pte + num_entries;
857                 if (last_pte > I915_PPGTT_PT_ENTRIES)
858                         last_pte = I915_PPGTT_PT_ENTRIES;
859
860                 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
861
862                 for (i = first_pte; i < last_pte; i++)
863                         pt_vaddr[i] = scratch_pte;
864
865                 kunmap_atomic(pt_vaddr);
866
867                 num_entries -= last_pte - first_pte;
868                 first_pte = 0;
869                 act_pt++;
870         }
871 }
872
873 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
874                                       struct sg_table *pages,
875                                       uint64_t start,
876                                       enum i915_cache_level cache_level, u32 flags)
877 {
878         struct i915_hw_ppgtt *ppgtt =
879                 container_of(vm, struct i915_hw_ppgtt, base);
880         gen6_gtt_pte_t *pt_vaddr;
881         unsigned first_entry = start >> PAGE_SHIFT;
882         unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
883         unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
884         struct sg_page_iter sg_iter;
885
886         pt_vaddr = NULL;
887         for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
888                 if (pt_vaddr == NULL)
889                         pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
890
891                 pt_vaddr[act_pte] =
892                         vm->pte_encode(sg_page_iter_dma_address(&sg_iter),
893                                        cache_level, true, flags);
894
895                 if (++act_pte == I915_PPGTT_PT_ENTRIES) {
896                         kunmap_atomic(pt_vaddr);
897                         pt_vaddr = NULL;
898                         act_pt++;
899                         act_pte = 0;
900                 }
901         }
902         if (pt_vaddr)
903                 kunmap_atomic(pt_vaddr);
904 }
905
906 static void gen6_ppgtt_unmap_pages(struct i915_hw_ppgtt *ppgtt)
907 {
908         int i;
909
910         if (ppgtt->pt_dma_addr) {
911                 for (i = 0; i < ppgtt->num_pd_entries; i++)
912                         pci_unmap_page(ppgtt->base.dev->pdev,
913                                        ppgtt->pt_dma_addr[i],
914                                        4096, PCI_DMA_BIDIRECTIONAL);
915         }
916 }
917
918 static void gen6_ppgtt_free(struct i915_hw_ppgtt *ppgtt)
919 {
920         int i;
921
922         kfree(ppgtt->pt_dma_addr);
923         for (i = 0; i < ppgtt->num_pd_entries; i++)
924                 __free_page(ppgtt->pt_pages[i]);
925         kfree(ppgtt->pt_pages);
926 }
927
928 static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
929 {
930         struct i915_hw_ppgtt *ppgtt =
931                 container_of(vm, struct i915_hw_ppgtt, base);
932
933         drm_mm_remove_node(&ppgtt->node);
934
935         gen6_ppgtt_unmap_pages(ppgtt);
936         gen6_ppgtt_free(ppgtt);
937 }
938
939 static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
940 {
941         struct drm_device *dev = ppgtt->base.dev;
942         struct drm_i915_private *dev_priv = dev->dev_private;
943         bool retried = false;
944         int ret;
945
946         /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
947          * allocator works in address space sizes, so it's multiplied by page
948          * size. We allocate at the top of the GTT to avoid fragmentation.
949          */
950         BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
951 alloc:
952         ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
953                                                   &ppgtt->node, GEN6_PD_SIZE,
954                                                   GEN6_PD_ALIGN, 0,
955                                                   0, dev_priv->gtt.base.total,
956                                                   DRM_MM_TOPDOWN);
957         if (ret == -ENOSPC && !retried) {
958                 ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
959                                                GEN6_PD_SIZE, GEN6_PD_ALIGN,
960                                                I915_CACHE_NONE,
961                                                0, dev_priv->gtt.base.total,
962                                                0);
963                 if (ret)
964                         return ret;
965
966                 retried = true;
967                 goto alloc;
968         }
969
970         if (ppgtt->node.start < dev_priv->gtt.mappable_end)
971                 DRM_DEBUG("Forced to use aperture for PDEs\n");
972
973         ppgtt->num_pd_entries = GEN6_PPGTT_PD_ENTRIES;
974         return ret;
975 }
976
977 static int gen6_ppgtt_allocate_page_tables(struct i915_hw_ppgtt *ppgtt)
978 {
979         int i;
980
981         ppgtt->pt_pages = kcalloc(ppgtt->num_pd_entries, sizeof(struct page *),
982                                   GFP_KERNEL);
983
984         if (!ppgtt->pt_pages)
985                 return -ENOMEM;
986
987         for (i = 0; i < ppgtt->num_pd_entries; i++) {
988                 ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
989                 if (!ppgtt->pt_pages[i]) {
990                         gen6_ppgtt_free(ppgtt);
991                         return -ENOMEM;
992                 }
993         }
994
995         return 0;
996 }
997
998 static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
999 {
1000         int ret;
1001
1002         ret = gen6_ppgtt_allocate_page_directories(ppgtt);
1003         if (ret)
1004                 return ret;
1005
1006         ret = gen6_ppgtt_allocate_page_tables(ppgtt);
1007         if (ret) {
1008                 drm_mm_remove_node(&ppgtt->node);
1009                 return ret;
1010         }
1011
1012         ppgtt->pt_dma_addr = kcalloc(ppgtt->num_pd_entries, sizeof(dma_addr_t),
1013                                      GFP_KERNEL);
1014         if (!ppgtt->pt_dma_addr) {
1015                 drm_mm_remove_node(&ppgtt->node);
1016                 gen6_ppgtt_free(ppgtt);
1017                 return -ENOMEM;
1018         }
1019
1020         return 0;
1021 }
1022
1023 static int gen6_ppgtt_setup_page_tables(struct i915_hw_ppgtt *ppgtt)
1024 {
1025         struct drm_device *dev = ppgtt->base.dev;
1026         int i;
1027
1028         for (i = 0; i < ppgtt->num_pd_entries; i++) {
1029                 dma_addr_t pt_addr;
1030
1031                 pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
1032                                        PCI_DMA_BIDIRECTIONAL);
1033
1034                 if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
1035                         gen6_ppgtt_unmap_pages(ppgtt);
1036                         return -EIO;
1037                 }
1038
1039                 ppgtt->pt_dma_addr[i] = pt_addr;
1040         }
1041
1042         return 0;
1043 }
1044
1045 static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
1046 {
1047         struct drm_device *dev = ppgtt->base.dev;
1048         struct drm_i915_private *dev_priv = dev->dev_private;
1049         int ret;
1050
1051         ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
1052         if (IS_GEN6(dev)) {
1053                 ppgtt->switch_mm = gen6_mm_switch;
1054         } else if (IS_HASWELL(dev)) {
1055                 ppgtt->switch_mm = hsw_mm_switch;
1056         } else if (IS_GEN7(dev)) {
1057                 ppgtt->switch_mm = gen7_mm_switch;
1058         } else
1059                 BUG();
1060
1061         ret = gen6_ppgtt_alloc(ppgtt);
1062         if (ret)
1063                 return ret;
1064
1065         ret = gen6_ppgtt_setup_page_tables(ppgtt);
1066         if (ret) {
1067                 gen6_ppgtt_free(ppgtt);
1068                 return ret;
1069         }
1070
1071         ppgtt->base.clear_range = gen6_ppgtt_clear_range;
1072         ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
1073         ppgtt->base.cleanup = gen6_ppgtt_cleanup;
1074         ppgtt->base.start = 0;
1075         ppgtt->base.total =  ppgtt->num_pd_entries * I915_PPGTT_PT_ENTRIES * PAGE_SIZE;
1076         ppgtt->debug_dump = gen6_dump_ppgtt;
1077
1078         ppgtt->pd_offset =
1079                 ppgtt->node.start / PAGE_SIZE * sizeof(gen6_gtt_pte_t);
1080
1081         ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true);
1082
1083         DRM_DEBUG_DRIVER("Allocated pde space (%ldM) at GTT entry: %lx\n",
1084                          ppgtt->node.size >> 20,
1085                          ppgtt->node.start / PAGE_SIZE);
1086
1087         gen6_write_pdes(ppgtt);
1088         DRM_DEBUG("Adding PPGTT at offset %x\n",
1089                   ppgtt->pd_offset << 10);
1090
1091         return 0;
1092 }
1093
1094 static int __hw_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
1095 {
1096         struct drm_i915_private *dev_priv = dev->dev_private;
1097
1098         ppgtt->base.dev = dev;
1099         ppgtt->base.scratch = dev_priv->gtt.base.scratch;
1100
1101         if (INTEL_INFO(dev)->gen < 8)
1102                 return gen6_ppgtt_init(ppgtt);
1103         else if (IS_GEN8(dev) || IS_GEN9(dev))
1104                 return gen8_ppgtt_init(ppgtt, dev_priv->gtt.base.total);
1105         else
1106                 BUG();
1107 }
1108 int i915_ppgtt_init(struct drm_device *dev, struct i915_hw_ppgtt *ppgtt)
1109 {
1110         struct drm_i915_private *dev_priv = dev->dev_private;
1111         int ret = 0;
1112
1113         ret = __hw_ppgtt_init(dev, ppgtt);
1114         if (ret == 0) {
1115                 kref_init(&ppgtt->ref);
1116                 drm_mm_init(&ppgtt->base.mm, ppgtt->base.start,
1117                             ppgtt->base.total);
1118                 i915_init_vm(dev_priv, &ppgtt->base);
1119         }
1120
1121         return ret;
1122 }
1123
1124 int i915_ppgtt_init_hw(struct drm_device *dev)
1125 {
1126         struct drm_i915_private *dev_priv = dev->dev_private;
1127         struct intel_engine_cs *ring;
1128         struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
1129         int i, ret = 0;
1130
1131         /* In the case of execlists, PPGTT is enabled by the context descriptor
1132          * and the PDPs are contained within the context itself.  We don't
1133          * need to do anything here. */
1134         if (i915.enable_execlists)
1135                 return 0;
1136
1137         if (!USES_PPGTT(dev))
1138                 return 0;
1139
1140         if (IS_GEN6(dev))
1141                 gen6_ppgtt_enable(dev);
1142         else if (IS_GEN7(dev))
1143                 gen7_ppgtt_enable(dev);
1144         else if (INTEL_INFO(dev)->gen >= 8)
1145                 gen8_ppgtt_enable(dev);
1146         else
1147                 WARN_ON(1);
1148
1149         if (ppgtt) {
1150                 for_each_ring(ring, dev_priv, i) {
1151                         ret = ppgtt->switch_mm(ppgtt, ring);
1152                         if (ret != 0)
1153                                 return ret;
1154                 }
1155         }
1156
1157         return ret;
1158 }
1159 struct i915_hw_ppgtt *
1160 i915_ppgtt_create(struct drm_device *dev, struct drm_i915_file_private *fpriv)
1161 {
1162         struct i915_hw_ppgtt *ppgtt;
1163         int ret;
1164
1165         ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
1166         if (!ppgtt)
1167                 return ERR_PTR(-ENOMEM);
1168
1169         ret = i915_ppgtt_init(dev, ppgtt);
1170         if (ret) {
1171                 kfree(ppgtt);
1172                 return ERR_PTR(ret);
1173         }
1174
1175         ppgtt->file_priv = fpriv;
1176
1177         return ppgtt;
1178 }
1179
1180 void  i915_ppgtt_release(struct kref *kref)
1181 {
1182         struct i915_hw_ppgtt *ppgtt =
1183                 container_of(kref, struct i915_hw_ppgtt, ref);
1184
1185         /* vmas should already be unbound */
1186         WARN_ON(!list_empty(&ppgtt->base.active_list));
1187         WARN_ON(!list_empty(&ppgtt->base.inactive_list));
1188
1189         list_del(&ppgtt->base.global_link);
1190         drm_mm_takedown(&ppgtt->base.mm);
1191
1192         ppgtt->base.cleanup(&ppgtt->base);
1193         kfree(ppgtt);
1194 }
1195
1196 static void
1197 ppgtt_bind_vma(struct i915_vma *vma,
1198                enum i915_cache_level cache_level,
1199                u32 flags)
1200 {
1201         /* Currently applicable only to VLV */
1202         if (vma->obj->gt_ro)
1203                 flags |= PTE_READ_ONLY;
1204
1205         vma->vm->insert_entries(vma->vm, vma->obj->pages, vma->node.start,
1206                                 cache_level, flags);
1207 }
1208
1209 static void ppgtt_unbind_vma(struct i915_vma *vma)
1210 {
1211         vma->vm->clear_range(vma->vm,
1212                              vma->node.start,
1213                              vma->obj->base.size,
1214                              true);
1215 }
1216
1217 extern int intel_iommu_gfx_mapped;
1218 /* Certain Gen5 chipsets require require idling the GPU before
1219  * unmapping anything from the GTT when VT-d is enabled.
1220  */
1221 static inline bool needs_idle_maps(struct drm_device *dev)
1222 {
1223 #ifdef CONFIG_INTEL_IOMMU
1224         /* Query intel_iommu to see if we need the workaround. Presumably that
1225          * was loaded first.
1226          */
1227         if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
1228                 return true;
1229 #endif
1230         return false;
1231 }
1232
1233 static bool do_idling(struct drm_i915_private *dev_priv)
1234 {
1235         bool ret = dev_priv->mm.interruptible;
1236
1237         if (unlikely(dev_priv->gtt.do_idle_maps)) {
1238                 dev_priv->mm.interruptible = false;
1239                 if (i915_gpu_idle(dev_priv->dev)) {
1240                         DRM_ERROR("Couldn't idle GPU\n");
1241                         /* Wait a bit, in hopes it avoids the hang */
1242                         udelay(10);
1243                 }
1244         }
1245
1246         return ret;
1247 }
1248
1249 static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
1250 {
1251         if (unlikely(dev_priv->gtt.do_idle_maps))
1252                 dev_priv->mm.interruptible = interruptible;
1253 }
1254
1255 void i915_check_and_clear_faults(struct drm_device *dev)
1256 {
1257         struct drm_i915_private *dev_priv = dev->dev_private;
1258         struct intel_engine_cs *ring;
1259         int i;
1260
1261         if (INTEL_INFO(dev)->gen < 6)
1262                 return;
1263
1264         for_each_ring(ring, dev_priv, i) {
1265                 u32 fault_reg;
1266                 fault_reg = I915_READ(RING_FAULT_REG(ring));
1267                 if (fault_reg & RING_FAULT_VALID) {
1268                         DRM_DEBUG_DRIVER("Unexpected fault\n"
1269                                          "\tAddr: 0x%08lx\\n"
1270                                          "\tAddress space: %s\n"
1271                                          "\tSource ID: %d\n"
1272                                          "\tType: %d\n",
1273                                          fault_reg & PAGE_MASK,
1274                                          fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
1275                                          RING_FAULT_SRCID(fault_reg),
1276                                          RING_FAULT_FAULT_TYPE(fault_reg));
1277                         I915_WRITE(RING_FAULT_REG(ring),
1278                                    fault_reg & ~RING_FAULT_VALID);
1279                 }
1280         }
1281         POSTING_READ(RING_FAULT_REG(&dev_priv->ring[RCS]));
1282 }
1283
1284 static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
1285 {
1286         if (INTEL_INFO(dev_priv->dev)->gen < 6) {
1287                 intel_gtt_chipset_flush();
1288         } else {
1289                 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
1290                 POSTING_READ(GFX_FLSH_CNTL_GEN6);
1291         }
1292 }
1293
1294 void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
1295 {
1296         struct drm_i915_private *dev_priv = dev->dev_private;
1297
1298         /* Don't bother messing with faults pre GEN6 as we have little
1299          * documentation supporting that it's a good idea.
1300          */
1301         if (INTEL_INFO(dev)->gen < 6)
1302                 return;
1303
1304         i915_check_and_clear_faults(dev);
1305
1306         dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
1307                                        dev_priv->gtt.base.start,
1308                                        dev_priv->gtt.base.total,
1309                                        true);
1310
1311         i915_ggtt_flush(dev_priv);
1312 }
1313
1314 void i915_gem_restore_gtt_mappings(struct drm_device *dev)
1315 {
1316         struct drm_i915_private *dev_priv = dev->dev_private;
1317         struct drm_i915_gem_object *obj;
1318         struct i915_address_space *vm;
1319
1320         i915_check_and_clear_faults(dev);
1321
1322         /* First fill our portion of the GTT with scratch pages */
1323         dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
1324                                        dev_priv->gtt.base.start,
1325                                        dev_priv->gtt.base.total,
1326                                        true);
1327
1328         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
1329                 struct i915_vma *vma = i915_gem_obj_to_vma(obj,
1330                                                            &dev_priv->gtt.base);
1331                 if (!vma)
1332                         continue;
1333
1334                 i915_gem_clflush_object(obj, obj->pin_display);
1335                 /* The bind_vma code tries to be smart about tracking mappings.
1336                  * Unfortunately above, we've just wiped out the mappings
1337                  * without telling our object about it. So we need to fake it.
1338                  */
1339                 obj->has_global_gtt_mapping = 0;
1340                 vma->bind_vma(vma, obj->cache_level, GLOBAL_BIND);
1341         }
1342
1343
1344         if (INTEL_INFO(dev)->gen >= 8) {
1345                 if (IS_CHERRYVIEW(dev))
1346                         chv_setup_private_ppat(dev_priv);
1347                 else
1348                         bdw_setup_private_ppat(dev_priv);
1349
1350                 return;
1351         }
1352
1353         list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
1354                 /* TODO: Perhaps it shouldn't be gen6 specific */
1355                 if (i915_is_ggtt(vm)) {
1356                         if (dev_priv->mm.aliasing_ppgtt)
1357                                 gen6_write_pdes(dev_priv->mm.aliasing_ppgtt);
1358                         continue;
1359                 }
1360
1361                 gen6_write_pdes(container_of(vm, struct i915_hw_ppgtt, base));
1362         }
1363
1364         i915_ggtt_flush(dev_priv);
1365 }
1366
1367 int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
1368 {
1369         if (obj->has_dma_mapping)
1370                 return 0;
1371
1372         if (!dma_map_sg(&obj->base.dev->pdev->dev,
1373                         obj->pages->sgl, obj->pages->nents,
1374                         PCI_DMA_BIDIRECTIONAL))
1375                 return -ENOSPC;
1376
1377         return 0;
1378 }
1379
1380 static inline void gen8_set_pte(void __iomem *addr, gen8_gtt_pte_t pte)
1381 {
1382 #ifdef writeq
1383         writeq(pte, addr);
1384 #else
1385         iowrite32((u32)pte, addr);
1386         iowrite32(pte >> 32, addr + 4);
1387 #endif
1388 }
1389
1390 static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
1391                                      struct sg_table *st,
1392                                      uint64_t start,
1393                                      enum i915_cache_level level, u32 unused)
1394 {
1395         struct drm_i915_private *dev_priv = vm->dev->dev_private;
1396         unsigned first_entry = start >> PAGE_SHIFT;
1397         gen8_gtt_pte_t __iomem *gtt_entries =
1398                 (gen8_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
1399         int i = 0;
1400         struct sg_page_iter sg_iter;
1401         dma_addr_t addr = 0; /* shut up gcc */
1402
1403         for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
1404                 addr = sg_dma_address(sg_iter.sg) +
1405                         (sg_iter.sg_pgoffset << PAGE_SHIFT);
1406                 gen8_set_pte(&gtt_entries[i],
1407                              gen8_pte_encode(addr, level, true));
1408                 i++;
1409         }
1410
1411         /*
1412          * XXX: This serves as a posting read to make sure that the PTE has
1413          * actually been updated. There is some concern that even though
1414          * registers and PTEs are within the same BAR that they are potentially
1415          * of NUMA access patterns. Therefore, even with the way we assume
1416          * hardware should work, we must keep this posting read for paranoia.
1417          */
1418         if (i != 0)
1419                 WARN_ON(readq(&gtt_entries[i-1])
1420                         != gen8_pte_encode(addr, level, true));
1421
1422         /* This next bit makes the above posting read even more important. We
1423          * want to flush the TLBs only after we're certain all the PTE updates
1424          * have finished.
1425          */
1426         I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
1427         POSTING_READ(GFX_FLSH_CNTL_GEN6);
1428 }
1429
1430 /*
1431  * Binds an object into the global gtt with the specified cache level. The object
1432  * will be accessible to the GPU via commands whose operands reference offsets
1433  * within the global GTT as well as accessible by the GPU through the GMADR
1434  * mapped BAR (dev_priv->mm.gtt->gtt).
1435  */
1436 static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
1437                                      struct sg_table *st,
1438                                      uint64_t start,
1439                                      enum i915_cache_level level, u32 flags)
1440 {
1441         struct drm_i915_private *dev_priv = vm->dev->dev_private;
1442         unsigned first_entry = start >> PAGE_SHIFT;
1443         gen6_gtt_pte_t __iomem *gtt_entries =
1444                 (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
1445         int i = 0;
1446         struct sg_page_iter sg_iter;
1447         dma_addr_t addr = 0;
1448
1449         for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
1450                 addr = sg_page_iter_dma_address(&sg_iter);
1451                 iowrite32(vm->pte_encode(addr, level, true, flags), &gtt_entries[i]);
1452                 i++;
1453         }
1454
1455         /* XXX: This serves as a posting read to make sure that the PTE has
1456          * actually been updated. There is some concern that even though
1457          * registers and PTEs are within the same BAR that they are potentially
1458          * of NUMA access patterns. Therefore, even with the way we assume
1459          * hardware should work, we must keep this posting read for paranoia.
1460          */
1461         if (i != 0) {
1462                 unsigned long gtt = readl(&gtt_entries[i-1]);
1463                 WARN_ON(gtt != vm->pte_encode(addr, level, true, flags));
1464         }
1465
1466         /* This next bit makes the above posting read even more important. We
1467          * want to flush the TLBs only after we're certain all the PTE updates
1468          * have finished.
1469          */
1470         I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
1471         POSTING_READ(GFX_FLSH_CNTL_GEN6);
1472 }
1473
1474 static void gen8_ggtt_clear_range(struct i915_address_space *vm,
1475                                   uint64_t start,
1476                                   uint64_t length,
1477                                   bool use_scratch)
1478 {
1479         struct drm_i915_private *dev_priv = vm->dev->dev_private;
1480         unsigned first_entry = start >> PAGE_SHIFT;
1481         unsigned num_entries = length >> PAGE_SHIFT;
1482         gen8_gtt_pte_t scratch_pte, __iomem *gtt_base =
1483                 (gen8_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
1484         const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
1485         int i;
1486
1487         if (WARN(num_entries > max_entries,
1488                  "First entry = %d; Num entries = %d (max=%d)\n",
1489                  first_entry, num_entries, max_entries))
1490                 num_entries = max_entries;
1491
1492         scratch_pte = gen8_pte_encode(vm->scratch.addr,
1493                                       I915_CACHE_LLC,
1494                                       use_scratch);
1495         for (i = 0; i < num_entries; i++)
1496                 gen8_set_pte(&gtt_base[i], scratch_pte);
1497         readl(gtt_base);
1498 }
1499
1500 static void gen6_ggtt_clear_range(struct i915_address_space *vm,
1501                                   uint64_t start,
1502                                   uint64_t length,
1503                                   bool use_scratch)
1504 {
1505         struct drm_i915_private *dev_priv = vm->dev->dev_private;
1506         unsigned first_entry = start >> PAGE_SHIFT;
1507         unsigned num_entries = length >> PAGE_SHIFT;
1508         gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
1509                 (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
1510         const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
1511         int i;
1512
1513         if (WARN(num_entries > max_entries,
1514                  "First entry = %d; Num entries = %d (max=%d)\n",
1515                  first_entry, num_entries, max_entries))
1516                 num_entries = max_entries;
1517
1518         scratch_pte = vm->pte_encode(vm->scratch.addr, I915_CACHE_LLC, use_scratch, 0);
1519
1520         for (i = 0; i < num_entries; i++)
1521                 iowrite32(scratch_pte, &gtt_base[i]);
1522         readl(gtt_base);
1523 }
1524
1525
1526 static void i915_ggtt_bind_vma(struct i915_vma *vma,
1527                                enum i915_cache_level cache_level,
1528                                u32 unused)
1529 {
1530         const unsigned long entry = vma->node.start >> PAGE_SHIFT;
1531         unsigned int flags = (cache_level == I915_CACHE_NONE) ?
1532                 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
1533
1534         BUG_ON(!i915_is_ggtt(vma->vm));
1535         intel_gtt_insert_sg_entries(vma->obj->pages, entry, flags);
1536         vma->obj->has_global_gtt_mapping = 1;
1537 }
1538
1539 static void i915_ggtt_clear_range(struct i915_address_space *vm,
1540                                   uint64_t start,
1541                                   uint64_t length,
1542                                   bool unused)
1543 {
1544         unsigned first_entry = start >> PAGE_SHIFT;
1545         unsigned num_entries = length >> PAGE_SHIFT;
1546         intel_gtt_clear_range(first_entry, num_entries);
1547 }
1548
1549 static void i915_ggtt_unbind_vma(struct i915_vma *vma)
1550 {
1551         const unsigned int first = vma->node.start >> PAGE_SHIFT;
1552         const unsigned int size = vma->obj->base.size >> PAGE_SHIFT;
1553
1554         BUG_ON(!i915_is_ggtt(vma->vm));
1555         vma->obj->has_global_gtt_mapping = 0;
1556         intel_gtt_clear_range(first, size);
1557 }
1558
1559 static void ggtt_bind_vma(struct i915_vma *vma,
1560                           enum i915_cache_level cache_level,
1561                           u32 flags)
1562 {
1563         struct drm_device *dev = vma->vm->dev;
1564         struct drm_i915_private *dev_priv = dev->dev_private;
1565         struct drm_i915_gem_object *obj = vma->obj;
1566
1567         /* Currently applicable only to VLV */
1568         if (obj->gt_ro)
1569                 flags |= PTE_READ_ONLY;
1570
1571         /* If there is no aliasing PPGTT, or the caller needs a global mapping,
1572          * or we have a global mapping already but the cacheability flags have
1573          * changed, set the global PTEs.
1574          *
1575          * If there is an aliasing PPGTT it is anecdotally faster, so use that
1576          * instead if none of the above hold true.
1577          *
1578          * NB: A global mapping should only be needed for special regions like
1579          * "gtt mappable", SNB errata, or if specified via special execbuf
1580          * flags. At all other times, the GPU will use the aliasing PPGTT.
1581          */
1582         if (!dev_priv->mm.aliasing_ppgtt || flags & GLOBAL_BIND) {
1583                 if (!obj->has_global_gtt_mapping ||
1584                     (cache_level != obj->cache_level)) {
1585                         vma->vm->insert_entries(vma->vm, obj->pages,
1586                                                 vma->node.start,
1587                                                 cache_level, flags);
1588                         obj->has_global_gtt_mapping = 1;
1589                 }
1590         }
1591
1592         if (dev_priv->mm.aliasing_ppgtt &&
1593             (!obj->has_aliasing_ppgtt_mapping ||
1594              (cache_level != obj->cache_level))) {
1595                 struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
1596                 appgtt->base.insert_entries(&appgtt->base,
1597                                             vma->obj->pages,
1598                                             vma->node.start,
1599                                             cache_level, flags);
1600                 vma->obj->has_aliasing_ppgtt_mapping = 1;
1601         }
1602 }
1603
1604 static void ggtt_unbind_vma(struct i915_vma *vma)
1605 {
1606         struct drm_device *dev = vma->vm->dev;
1607         struct drm_i915_private *dev_priv = dev->dev_private;
1608         struct drm_i915_gem_object *obj = vma->obj;
1609
1610         if (obj->has_global_gtt_mapping) {
1611                 vma->vm->clear_range(vma->vm,
1612                                      vma->node.start,
1613                                      obj->base.size,
1614                                      true);
1615                 obj->has_global_gtt_mapping = 0;
1616         }
1617
1618         if (obj->has_aliasing_ppgtt_mapping) {
1619                 struct i915_hw_ppgtt *appgtt = dev_priv->mm.aliasing_ppgtt;
1620                 appgtt->base.clear_range(&appgtt->base,
1621                                          vma->node.start,
1622                                          obj->base.size,
1623                                          true);
1624                 obj->has_aliasing_ppgtt_mapping = 0;
1625         }
1626 }
1627
1628 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
1629 {
1630         struct drm_device *dev = obj->base.dev;
1631         struct drm_i915_private *dev_priv = dev->dev_private;
1632         bool interruptible;
1633
1634         interruptible = do_idling(dev_priv);
1635
1636         if (!obj->has_dma_mapping)
1637                 dma_unmap_sg(&dev->pdev->dev,
1638                              obj->pages->sgl, obj->pages->nents,
1639                              PCI_DMA_BIDIRECTIONAL);
1640
1641         undo_idling(dev_priv, interruptible);
1642 }
1643
1644 static void i915_gtt_color_adjust(struct drm_mm_node *node,
1645                                   unsigned long color,
1646                                   unsigned long *start,
1647                                   unsigned long *end)
1648 {
1649         if (node->color != color)
1650                 *start += 4096;
1651
1652         if (!list_empty(&node->node_list)) {
1653                 node = list_entry(node->node_list.next,
1654                                   struct drm_mm_node,
1655                                   node_list);
1656                 if (node->allocated && node->color != color)
1657                         *end -= 4096;
1658         }
1659 }
1660
1661 int i915_gem_setup_global_gtt(struct drm_device *dev,
1662                               unsigned long start,
1663                               unsigned long mappable_end,
1664                               unsigned long end)
1665 {
1666         /* Let GEM Manage all of the aperture.
1667          *
1668          * However, leave one page at the end still bound to the scratch page.
1669          * There are a number of places where the hardware apparently prefetches
1670          * past the end of the object, and we've seen multiple hangs with the
1671          * GPU head pointer stuck in a batchbuffer bound at the last page of the
1672          * aperture.  One page should be enough to keep any prefetching inside
1673          * of the aperture.
1674          */
1675         struct drm_i915_private *dev_priv = dev->dev_private;
1676         struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
1677         struct drm_mm_node *entry;
1678         struct drm_i915_gem_object *obj;
1679         unsigned long hole_start, hole_end;
1680         int ret;
1681
1682         BUG_ON(mappable_end > end);
1683
1684         /* Subtract the guard page ... */
1685         drm_mm_init(&ggtt_vm->mm, start, end - start - PAGE_SIZE);
1686         if (!HAS_LLC(dev))
1687                 dev_priv->gtt.base.mm.color_adjust = i915_gtt_color_adjust;
1688
1689         /* Mark any preallocated objects as occupied */
1690         list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
1691                 struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm);
1692
1693                 DRM_DEBUG_KMS("reserving preallocated space: %lx + %zx\n",
1694                               i915_gem_obj_ggtt_offset(obj), obj->base.size);
1695
1696                 WARN_ON(i915_gem_obj_ggtt_bound(obj));
1697                 ret = drm_mm_reserve_node(&ggtt_vm->mm, &vma->node);
1698                 if (ret) {
1699                         DRM_DEBUG_KMS("Reservation failed: %i\n", ret);
1700                         return ret;
1701                 }
1702                 obj->has_global_gtt_mapping = 1;
1703         }
1704
1705         dev_priv->gtt.base.start = start;
1706         dev_priv->gtt.base.total = end - start;
1707
1708         /* Clear any non-preallocated blocks */
1709         drm_mm_for_each_hole(entry, &ggtt_vm->mm, hole_start, hole_end) {
1710                 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
1711                               hole_start, hole_end);
1712                 ggtt_vm->clear_range(ggtt_vm, hole_start,
1713                                      hole_end - hole_start, true);
1714         }
1715
1716         /* And finally clear the reserved guard page */
1717         ggtt_vm->clear_range(ggtt_vm, end - PAGE_SIZE, PAGE_SIZE, true);
1718
1719         if (USES_PPGTT(dev) && !USES_FULL_PPGTT(dev)) {
1720                 struct i915_hw_ppgtt *ppgtt;
1721
1722                 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
1723                 if (!ppgtt)
1724                         return -ENOMEM;
1725
1726                 ret = __hw_ppgtt_init(dev, ppgtt);
1727                 if (ret != 0)
1728                         return ret;
1729
1730                 dev_priv->mm.aliasing_ppgtt = ppgtt;
1731         }
1732
1733         return 0;
1734 }
1735
1736 void i915_gem_init_global_gtt(struct drm_device *dev)
1737 {
1738         struct drm_i915_private *dev_priv = dev->dev_private;
1739         unsigned long gtt_size, mappable_size;
1740
1741         gtt_size = dev_priv->gtt.base.total;
1742         mappable_size = dev_priv->gtt.mappable_end;
1743
1744         i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
1745 }
1746
1747 void i915_global_gtt_cleanup(struct drm_device *dev)
1748 {
1749         struct drm_i915_private *dev_priv = dev->dev_private;
1750         struct i915_address_space *vm = &dev_priv->gtt.base;
1751
1752         if (dev_priv->mm.aliasing_ppgtt) {
1753                 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
1754
1755                 ppgtt->base.cleanup(&ppgtt->base);
1756         }
1757
1758         if (drm_mm_initialized(&vm->mm)) {
1759                 drm_mm_takedown(&vm->mm);
1760                 list_del(&vm->global_link);
1761         }
1762
1763         vm->cleanup(vm);
1764 }
1765
1766 static int setup_scratch_page(struct drm_device *dev)
1767 {
1768         struct drm_i915_private *dev_priv = dev->dev_private;
1769         struct page *page;
1770         dma_addr_t dma_addr;
1771
1772         page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
1773         if (page == NULL)
1774                 return -ENOMEM;
1775         set_pages_uc(page, 1);
1776
1777 #ifdef CONFIG_INTEL_IOMMU
1778         dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
1779                                 PCI_DMA_BIDIRECTIONAL);
1780         if (pci_dma_mapping_error(dev->pdev, dma_addr))
1781                 return -EINVAL;
1782 #else
1783         dma_addr = page_to_phys(page);
1784 #endif
1785         dev_priv->gtt.base.scratch.page = page;
1786         dev_priv->gtt.base.scratch.addr = dma_addr;
1787
1788         return 0;
1789 }
1790
1791 static void teardown_scratch_page(struct drm_device *dev)
1792 {
1793         struct drm_i915_private *dev_priv = dev->dev_private;
1794         struct page *page = dev_priv->gtt.base.scratch.page;
1795
1796         set_pages_wb(page, 1);
1797         pci_unmap_page(dev->pdev, dev_priv->gtt.base.scratch.addr,
1798                        PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
1799         __free_page(page);
1800 }
1801
1802 static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
1803 {
1804         snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
1805         snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
1806         return snb_gmch_ctl << 20;
1807 }
1808
1809 static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
1810 {
1811         bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
1812         bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
1813         if (bdw_gmch_ctl)
1814                 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
1815
1816 #ifdef CONFIG_X86_32
1817         /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
1818         if (bdw_gmch_ctl > 4)
1819                 bdw_gmch_ctl = 4;
1820 #endif
1821
1822         return bdw_gmch_ctl << 20;
1823 }
1824
1825 static inline unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
1826 {
1827         gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
1828         gmch_ctrl &= SNB_GMCH_GGMS_MASK;
1829
1830         if (gmch_ctrl)
1831                 return 1 << (20 + gmch_ctrl);
1832
1833         return 0;
1834 }
1835
1836 static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
1837 {
1838         snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
1839         snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
1840         return snb_gmch_ctl << 25; /* 32 MB units */
1841 }
1842
1843 static inline size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
1844 {
1845         bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
1846         bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
1847         return bdw_gmch_ctl << 25; /* 32 MB units */
1848 }
1849
1850 static size_t chv_get_stolen_size(u16 gmch_ctrl)
1851 {
1852         gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
1853         gmch_ctrl &= SNB_GMCH_GMS_MASK;
1854
1855         /*
1856          * 0x0  to 0x10: 32MB increments starting at 0MB
1857          * 0x11 to 0x16: 4MB increments starting at 8MB
1858          * 0x17 to 0x1d: 4MB increments start at 36MB
1859          */
1860         if (gmch_ctrl < 0x11)
1861                 return gmch_ctrl << 25;
1862         else if (gmch_ctrl < 0x17)
1863                 return (gmch_ctrl - 0x11 + 2) << 22;
1864         else
1865                 return (gmch_ctrl - 0x17 + 9) << 22;
1866 }
1867
1868 static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
1869 {
1870         gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
1871         gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
1872
1873         if (gen9_gmch_ctl < 0xf0)
1874                 return gen9_gmch_ctl << 25; /* 32 MB units */
1875         else
1876                 /* 4MB increments starting at 0xf0 for 4MB */
1877                 return (gen9_gmch_ctl - 0xf0 + 1) << 22;
1878 }
1879
1880 static int ggtt_probe_common(struct drm_device *dev,
1881                              size_t gtt_size)
1882 {
1883         struct drm_i915_private *dev_priv = dev->dev_private;
1884         phys_addr_t gtt_phys_addr;
1885         int ret;
1886
1887         /* For Modern GENs the PTEs and register space are split in the BAR */
1888         gtt_phys_addr = pci_resource_start(dev->pdev, 0) +
1889                 (pci_resource_len(dev->pdev, 0) / 2);
1890
1891         dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size);
1892         if (!dev_priv->gtt.gsm) {
1893                 DRM_ERROR("Failed to map the gtt page table\n");
1894                 return -ENOMEM;
1895         }
1896
1897         ret = setup_scratch_page(dev);
1898         if (ret) {
1899                 DRM_ERROR("Scratch setup failed\n");
1900                 /* iounmap will also get called at remove, but meh */
1901                 iounmap(dev_priv->gtt.gsm);
1902         }
1903
1904         return ret;
1905 }
1906
1907 /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
1908  * bits. When using advanced contexts each context stores its own PAT, but
1909  * writing this data shouldn't be harmful even in those cases. */
1910 static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
1911 {
1912         uint64_t pat;
1913
1914         pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC)     | /* for normal objects, no eLLC */
1915               GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
1916               GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
1917               GEN8_PPAT(3, GEN8_PPAT_UC)                     | /* Uncached objects, mostly for scanout */
1918               GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
1919               GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
1920               GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
1921               GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
1922
1923         /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
1924          * write would work. */
1925         I915_WRITE(GEN8_PRIVATE_PAT, pat);
1926         I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32);
1927 }
1928
1929 static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
1930 {
1931         uint64_t pat;
1932
1933         /*
1934          * Map WB on BDW to snooped on CHV.
1935          *
1936          * Only the snoop bit has meaning for CHV, the rest is
1937          * ignored.
1938          *
1939          * Note that the harware enforces snooping for all page
1940          * table accesses. The snoop bit is actually ignored for
1941          * PDEs.
1942          */
1943         pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
1944               GEN8_PPAT(1, 0) |
1945               GEN8_PPAT(2, 0) |
1946               GEN8_PPAT(3, 0) |
1947               GEN8_PPAT(4, CHV_PPAT_SNOOP) |
1948               GEN8_PPAT(5, CHV_PPAT_SNOOP) |
1949               GEN8_PPAT(6, CHV_PPAT_SNOOP) |
1950               GEN8_PPAT(7, CHV_PPAT_SNOOP);
1951
1952         I915_WRITE(GEN8_PRIVATE_PAT, pat);
1953         I915_WRITE(GEN8_PRIVATE_PAT + 4, pat >> 32);
1954 }
1955
1956 static int gen8_gmch_probe(struct drm_device *dev,
1957                            size_t *gtt_total,
1958                            size_t *stolen,
1959                            phys_addr_t *mappable_base,
1960                            unsigned long *mappable_end)
1961 {
1962         struct drm_i915_private *dev_priv = dev->dev_private;
1963         unsigned int gtt_size;
1964         u16 snb_gmch_ctl;
1965         int ret;
1966
1967         /* TODO: We're not aware of mappable constraints on gen8 yet */
1968         *mappable_base = pci_resource_start(dev->pdev, 2);
1969         *mappable_end = pci_resource_len(dev->pdev, 2);
1970
1971         if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(39)))
1972                 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(39));
1973
1974         pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
1975
1976         if (INTEL_INFO(dev)->gen >= 9) {
1977                 *stolen = gen9_get_stolen_size(snb_gmch_ctl);
1978                 gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
1979         } else if (IS_CHERRYVIEW(dev)) {
1980                 *stolen = chv_get_stolen_size(snb_gmch_ctl);
1981                 gtt_size = chv_get_total_gtt_size(snb_gmch_ctl);
1982         } else {
1983                 *stolen = gen8_get_stolen_size(snb_gmch_ctl);
1984                 gtt_size = gen8_get_total_gtt_size(snb_gmch_ctl);
1985         }
1986
1987         *gtt_total = (gtt_size / sizeof(gen8_gtt_pte_t)) << PAGE_SHIFT;
1988
1989         if (IS_CHERRYVIEW(dev))
1990                 chv_setup_private_ppat(dev_priv);
1991         else
1992                 bdw_setup_private_ppat(dev_priv);
1993
1994         ret = ggtt_probe_common(dev, gtt_size);
1995
1996         dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range;
1997         dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries;
1998
1999         return ret;
2000 }
2001
2002 static int gen6_gmch_probe(struct drm_device *dev,
2003                            size_t *gtt_total,
2004                            size_t *stolen,
2005                            phys_addr_t *mappable_base,
2006                            unsigned long *mappable_end)
2007 {
2008         struct drm_i915_private *dev_priv = dev->dev_private;
2009         unsigned int gtt_size;
2010         u16 snb_gmch_ctl;
2011         int ret;
2012
2013         *mappable_base = pci_resource_start(dev->pdev, 2);
2014         *mappable_end = pci_resource_len(dev->pdev, 2);
2015
2016         /* 64/512MB is the current min/max we actually know of, but this is just
2017          * a coarse sanity check.
2018          */
2019         if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
2020                 DRM_ERROR("Unknown GMADR size (%lx)\n",
2021                           dev_priv->gtt.mappable_end);
2022                 return -ENXIO;
2023         }
2024
2025         if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
2026                 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
2027         pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
2028
2029         *stolen = gen6_get_stolen_size(snb_gmch_ctl);
2030
2031         gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
2032         *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
2033
2034         ret = ggtt_probe_common(dev, gtt_size);
2035
2036         dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
2037         dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
2038
2039         return ret;
2040 }
2041
2042 static void gen6_gmch_remove(struct i915_address_space *vm)
2043 {
2044
2045         struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
2046
2047         iounmap(gtt->gsm);
2048         teardown_scratch_page(vm->dev);
2049 }
2050
2051 static int i915_gmch_probe(struct drm_device *dev,
2052                            size_t *gtt_total,
2053                            size_t *stolen,
2054                            phys_addr_t *mappable_base,
2055                            unsigned long *mappable_end)
2056 {
2057         struct drm_i915_private *dev_priv = dev->dev_private;
2058         int ret;
2059
2060         ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
2061         if (!ret) {
2062                 DRM_ERROR("failed to set up gmch\n");
2063                 return -EIO;
2064         }
2065
2066         intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
2067
2068         dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
2069         dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
2070
2071         if (unlikely(dev_priv->gtt.do_idle_maps))
2072                 DRM_INFO("applying Ironlake quirks for intel_iommu\n");
2073
2074         return 0;
2075 }
2076
2077 static void i915_gmch_remove(struct i915_address_space *vm)
2078 {
2079         intel_gmch_remove();
2080 }
2081
2082 int i915_gem_gtt_init(struct drm_device *dev)
2083 {
2084         struct drm_i915_private *dev_priv = dev->dev_private;
2085         struct i915_gtt *gtt = &dev_priv->gtt;
2086         int ret;
2087
2088         if (INTEL_INFO(dev)->gen <= 5) {
2089                 gtt->gtt_probe = i915_gmch_probe;
2090                 gtt->base.cleanup = i915_gmch_remove;
2091         } else if (INTEL_INFO(dev)->gen < 8) {
2092                 gtt->gtt_probe = gen6_gmch_probe;
2093                 gtt->base.cleanup = gen6_gmch_remove;
2094                 if (IS_HASWELL(dev) && dev_priv->ellc_size)
2095                         gtt->base.pte_encode = iris_pte_encode;
2096                 else if (IS_HASWELL(dev))
2097                         gtt->base.pte_encode = hsw_pte_encode;
2098                 else if (IS_VALLEYVIEW(dev))
2099                         gtt->base.pte_encode = byt_pte_encode;
2100                 else if (INTEL_INFO(dev)->gen >= 7)
2101                         gtt->base.pte_encode = ivb_pte_encode;
2102                 else
2103                         gtt->base.pte_encode = snb_pte_encode;
2104         } else {
2105                 dev_priv->gtt.gtt_probe = gen8_gmch_probe;
2106                 dev_priv->gtt.base.cleanup = gen6_gmch_remove;
2107         }
2108
2109         ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
2110                              &gtt->mappable_base, &gtt->mappable_end);
2111         if (ret)
2112                 return ret;
2113
2114         gtt->base.dev = dev;
2115
2116         /* GMADR is the PCI mmio aperture into the global GTT. */
2117         DRM_INFO("Memory usable by graphics device = %zdM\n",
2118                  gtt->base.total >> 20);
2119         DRM_DEBUG_DRIVER("GMADR size = %ldM\n", gtt->mappable_end >> 20);
2120         DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
2121 #ifdef CONFIG_INTEL_IOMMU
2122         if (intel_iommu_gfx_mapped)
2123                 DRM_INFO("VT-d active for gfx access\n");
2124 #endif
2125         /*
2126          * i915.enable_ppgtt is read-only, so do an early pass to validate the
2127          * user's requested state against the hardware/driver capabilities.  We
2128          * do this now so that we can print out any log messages once rather
2129          * than every time we check intel_enable_ppgtt().
2130          */
2131         i915.enable_ppgtt = sanitize_enable_ppgtt(dev, i915.enable_ppgtt);
2132         DRM_DEBUG_DRIVER("ppgtt mode: %i\n", i915.enable_ppgtt);
2133
2134         return 0;
2135 }
2136
2137 static struct i915_vma *__i915_gem_vma_create(struct drm_i915_gem_object *obj,
2138                                               struct i915_address_space *vm)
2139 {
2140         struct i915_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL);
2141         if (vma == NULL)
2142                 return ERR_PTR(-ENOMEM);
2143
2144         INIT_LIST_HEAD(&vma->vma_link);
2145         INIT_LIST_HEAD(&vma->mm_list);
2146         INIT_LIST_HEAD(&vma->exec_list);
2147         vma->vm = vm;
2148         vma->obj = obj;
2149
2150         switch (INTEL_INFO(vm->dev)->gen) {
2151         case 9:
2152         case 8:
2153         case 7:
2154         case 6:
2155                 if (i915_is_ggtt(vm)) {
2156                         vma->unbind_vma = ggtt_unbind_vma;
2157                         vma->bind_vma = ggtt_bind_vma;
2158                 } else {
2159                         vma->unbind_vma = ppgtt_unbind_vma;
2160                         vma->bind_vma = ppgtt_bind_vma;
2161                 }
2162                 break;
2163         case 5:
2164         case 4:
2165         case 3:
2166         case 2:
2167                 BUG_ON(!i915_is_ggtt(vm));
2168                 vma->unbind_vma = i915_ggtt_unbind_vma;
2169                 vma->bind_vma = i915_ggtt_bind_vma;
2170                 break;
2171         default:
2172                 BUG();
2173         }
2174
2175         /* Keep GGTT vmas first to make debug easier */
2176         if (i915_is_ggtt(vm))
2177                 list_add(&vma->vma_link, &obj->vma_list);
2178         else {
2179                 list_add_tail(&vma->vma_link, &obj->vma_list);
2180                 i915_ppgtt_get(i915_vm_to_ppgtt(vm));
2181         }
2182
2183         return vma;
2184 }
2185
2186 struct i915_vma *
2187 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
2188                                   struct i915_address_space *vm)
2189 {
2190         struct i915_vma *vma;
2191
2192         vma = i915_gem_obj_to_vma(obj, vm);
2193         if (!vma)
2194                 vma = __i915_gem_vma_create(obj, vm);
2195
2196         return vma;
2197 }