Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include <drm/drmP.h>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_gem_clflush.h"
33 #include "i915_vgpu.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36 #include "intel_frontbuffer.h"
37 #include "intel_mocs.h"
38 #include "i915_gemfs.h"
39 #include <linux/dma-fence-array.h>
40 #include <linux/kthread.h>
41 #include <linux/reservation.h>
42 #include <linux/shmem_fs.h>
43 #include <linux/slab.h>
44 #include <linux/stop_machine.h>
45 #include <linux/swap.h>
46 #include <linux/pci.h>
47 #include <linux/dma-buf.h>
48
49 static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
50
51 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
52 {
53         if (obj->cache_dirty)
54                 return false;
55
56         if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
57                 return true;
58
59         return obj->pin_global; /* currently in use by HW, keep flushed */
60 }
61
62 static int
63 insert_mappable_node(struct i915_ggtt *ggtt,
64                      struct drm_mm_node *node, u32 size)
65 {
66         memset(node, 0, sizeof(*node));
67         return drm_mm_insert_node_in_range(&ggtt->base.mm, node,
68                                            size, 0, I915_COLOR_UNEVICTABLE,
69                                            0, ggtt->mappable_end,
70                                            DRM_MM_INSERT_LOW);
71 }
72
73 static void
74 remove_mappable_node(struct drm_mm_node *node)
75 {
76         drm_mm_remove_node(node);
77 }
78
79 /* some bookkeeping */
80 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
81                                   u64 size)
82 {
83         spin_lock(&dev_priv->mm.object_stat_lock);
84         dev_priv->mm.object_count++;
85         dev_priv->mm.object_memory += size;
86         spin_unlock(&dev_priv->mm.object_stat_lock);
87 }
88
89 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
90                                      u64 size)
91 {
92         spin_lock(&dev_priv->mm.object_stat_lock);
93         dev_priv->mm.object_count--;
94         dev_priv->mm.object_memory -= size;
95         spin_unlock(&dev_priv->mm.object_stat_lock);
96 }
97
98 static int
99 i915_gem_wait_for_error(struct i915_gpu_error *error)
100 {
101         int ret;
102
103         might_sleep();
104
105         /*
106          * Only wait 10 seconds for the gpu reset to complete to avoid hanging
107          * userspace. If it takes that long something really bad is going on and
108          * we should simply try to bail out and fail as gracefully as possible.
109          */
110         ret = wait_event_interruptible_timeout(error->reset_queue,
111                                                !i915_reset_backoff(error),
112                                                I915_RESET_TIMEOUT);
113         if (ret == 0) {
114                 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
115                 return -EIO;
116         } else if (ret < 0) {
117                 return ret;
118         } else {
119                 return 0;
120         }
121 }
122
123 int i915_mutex_lock_interruptible(struct drm_device *dev)
124 {
125         struct drm_i915_private *dev_priv = to_i915(dev);
126         int ret;
127
128         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
129         if (ret)
130                 return ret;
131
132         ret = mutex_lock_interruptible(&dev->struct_mutex);
133         if (ret)
134                 return ret;
135
136         return 0;
137 }
138
139 int
140 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
141                             struct drm_file *file)
142 {
143         struct drm_i915_private *dev_priv = to_i915(dev);
144         struct i915_ggtt *ggtt = &dev_priv->ggtt;
145         struct drm_i915_gem_get_aperture *args = data;
146         struct i915_vma *vma;
147         u64 pinned;
148
149         pinned = ggtt->base.reserved;
150         mutex_lock(&dev->struct_mutex);
151         list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
152                 if (i915_vma_is_pinned(vma))
153                         pinned += vma->node.size;
154         list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
155                 if (i915_vma_is_pinned(vma))
156                         pinned += vma->node.size;
157         mutex_unlock(&dev->struct_mutex);
158
159         args->aper_size = ggtt->base.total;
160         args->aper_available_size = args->aper_size - pinned;
161
162         return 0;
163 }
164
165 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
166 {
167         struct address_space *mapping = obj->base.filp->f_mapping;
168         drm_dma_handle_t *phys;
169         struct sg_table *st;
170         struct scatterlist *sg;
171         char *vaddr;
172         int i;
173         int err;
174
175         if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
176                 return -EINVAL;
177
178         /* Always aligning to the object size, allows a single allocation
179          * to handle all possible callers, and given typical object sizes,
180          * the alignment of the buddy allocation will naturally match.
181          */
182         phys = drm_pci_alloc(obj->base.dev,
183                              roundup_pow_of_two(obj->base.size),
184                              roundup_pow_of_two(obj->base.size));
185         if (!phys)
186                 return -ENOMEM;
187
188         vaddr = phys->vaddr;
189         for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
190                 struct page *page;
191                 char *src;
192
193                 page = shmem_read_mapping_page(mapping, i);
194                 if (IS_ERR(page)) {
195                         err = PTR_ERR(page);
196                         goto err_phys;
197                 }
198
199                 src = kmap_atomic(page);
200                 memcpy(vaddr, src, PAGE_SIZE);
201                 drm_clflush_virt_range(vaddr, PAGE_SIZE);
202                 kunmap_atomic(src);
203
204                 put_page(page);
205                 vaddr += PAGE_SIZE;
206         }
207
208         i915_gem_chipset_flush(to_i915(obj->base.dev));
209
210         st = kmalloc(sizeof(*st), GFP_KERNEL);
211         if (!st) {
212                 err = -ENOMEM;
213                 goto err_phys;
214         }
215
216         if (sg_alloc_table(st, 1, GFP_KERNEL)) {
217                 kfree(st);
218                 err = -ENOMEM;
219                 goto err_phys;
220         }
221
222         sg = st->sgl;
223         sg->offset = 0;
224         sg->length = obj->base.size;
225
226         sg_dma_address(sg) = phys->busaddr;
227         sg_dma_len(sg) = obj->base.size;
228
229         obj->phys_handle = phys;
230
231         __i915_gem_object_set_pages(obj, st, sg->length);
232
233         return 0;
234
235 err_phys:
236         drm_pci_free(obj->base.dev, phys);
237
238         return err;
239 }
240
241 static void __start_cpu_write(struct drm_i915_gem_object *obj)
242 {
243         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
244         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
245         if (cpu_write_needs_clflush(obj))
246                 obj->cache_dirty = true;
247 }
248
249 static void
250 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
251                                 struct sg_table *pages,
252                                 bool needs_clflush)
253 {
254         GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
255
256         if (obj->mm.madv == I915_MADV_DONTNEED)
257                 obj->mm.dirty = false;
258
259         if (needs_clflush &&
260             (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
261             !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
262                 drm_clflush_sg(pages);
263
264         __start_cpu_write(obj);
265 }
266
267 static void
268 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
269                                struct sg_table *pages)
270 {
271         __i915_gem_object_release_shmem(obj, pages, false);
272
273         if (obj->mm.dirty) {
274                 struct address_space *mapping = obj->base.filp->f_mapping;
275                 char *vaddr = obj->phys_handle->vaddr;
276                 int i;
277
278                 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
279                         struct page *page;
280                         char *dst;
281
282                         page = shmem_read_mapping_page(mapping, i);
283                         if (IS_ERR(page))
284                                 continue;
285
286                         dst = kmap_atomic(page);
287                         drm_clflush_virt_range(vaddr, PAGE_SIZE);
288                         memcpy(dst, vaddr, PAGE_SIZE);
289                         kunmap_atomic(dst);
290
291                         set_page_dirty(page);
292                         if (obj->mm.madv == I915_MADV_WILLNEED)
293                                 mark_page_accessed(page);
294                         put_page(page);
295                         vaddr += PAGE_SIZE;
296                 }
297                 obj->mm.dirty = false;
298         }
299
300         sg_free_table(pages);
301         kfree(pages);
302
303         drm_pci_free(obj->base.dev, obj->phys_handle);
304 }
305
306 static void
307 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
308 {
309         i915_gem_object_unpin_pages(obj);
310 }
311
312 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
313         .get_pages = i915_gem_object_get_pages_phys,
314         .put_pages = i915_gem_object_put_pages_phys,
315         .release = i915_gem_object_release_phys,
316 };
317
318 static const struct drm_i915_gem_object_ops i915_gem_object_ops;
319
320 int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
321 {
322         struct i915_vma *vma;
323         LIST_HEAD(still_in_list);
324         int ret;
325
326         lockdep_assert_held(&obj->base.dev->struct_mutex);
327
328         /* Closed vma are removed from the obj->vma_list - but they may
329          * still have an active binding on the object. To remove those we
330          * must wait for all rendering to complete to the object (as unbinding
331          * must anyway), and retire the requests.
332          */
333         ret = i915_gem_object_set_to_cpu_domain(obj, false);
334         if (ret)
335                 return ret;
336
337         while ((vma = list_first_entry_or_null(&obj->vma_list,
338                                                struct i915_vma,
339                                                obj_link))) {
340                 list_move_tail(&vma->obj_link, &still_in_list);
341                 ret = i915_vma_unbind(vma);
342                 if (ret)
343                         break;
344         }
345         list_splice(&still_in_list, &obj->vma_list);
346
347         return ret;
348 }
349
350 static long
351 i915_gem_object_wait_fence(struct dma_fence *fence,
352                            unsigned int flags,
353                            long timeout,
354                            struct intel_rps_client *rps_client)
355 {
356         struct drm_i915_gem_request *rq;
357
358         BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
359
360         if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
361                 return timeout;
362
363         if (!dma_fence_is_i915(fence))
364                 return dma_fence_wait_timeout(fence,
365                                               flags & I915_WAIT_INTERRUPTIBLE,
366                                               timeout);
367
368         rq = to_request(fence);
369         if (i915_gem_request_completed(rq))
370                 goto out;
371
372         /* This client is about to stall waiting for the GPU. In many cases
373          * this is undesirable and limits the throughput of the system, as
374          * many clients cannot continue processing user input/output whilst
375          * blocked. RPS autotuning may take tens of milliseconds to respond
376          * to the GPU load and thus incurs additional latency for the client.
377          * We can circumvent that by promoting the GPU frequency to maximum
378          * before we wait. This makes the GPU throttle up much more quickly
379          * (good for benchmarks and user experience, e.g. window animations),
380          * but at a cost of spending more power processing the workload
381          * (bad for battery). Not all clients even want their results
382          * immediately and for them we should just let the GPU select its own
383          * frequency to maximise efficiency. To prevent a single client from
384          * forcing the clocks too high for the whole system, we only allow
385          * each client to waitboost once in a busy period.
386          */
387         if (rps_client) {
388                 if (INTEL_GEN(rq->i915) >= 6)
389                         gen6_rps_boost(rq, rps_client);
390                 else
391                         rps_client = NULL;
392         }
393
394         timeout = i915_wait_request(rq, flags, timeout);
395
396 out:
397         if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
398                 i915_gem_request_retire_upto(rq);
399
400         return timeout;
401 }
402
403 static long
404 i915_gem_object_wait_reservation(struct reservation_object *resv,
405                                  unsigned int flags,
406                                  long timeout,
407                                  struct intel_rps_client *rps_client)
408 {
409         unsigned int seq = __read_seqcount_begin(&resv->seq);
410         struct dma_fence *excl;
411         bool prune_fences = false;
412
413         if (flags & I915_WAIT_ALL) {
414                 struct dma_fence **shared;
415                 unsigned int count, i;
416                 int ret;
417
418                 ret = reservation_object_get_fences_rcu(resv,
419                                                         &excl, &count, &shared);
420                 if (ret)
421                         return ret;
422
423                 for (i = 0; i < count; i++) {
424                         timeout = i915_gem_object_wait_fence(shared[i],
425                                                              flags, timeout,
426                                                              rps_client);
427                         if (timeout < 0)
428                                 break;
429
430                         dma_fence_put(shared[i]);
431                 }
432
433                 for (; i < count; i++)
434                         dma_fence_put(shared[i]);
435                 kfree(shared);
436
437                 prune_fences = count && timeout >= 0;
438         } else {
439                 excl = reservation_object_get_excl_rcu(resv);
440         }
441
442         if (excl && timeout >= 0) {
443                 timeout = i915_gem_object_wait_fence(excl, flags, timeout,
444                                                      rps_client);
445                 prune_fences = timeout >= 0;
446         }
447
448         dma_fence_put(excl);
449
450         /* Oportunistically prune the fences iff we know they have *all* been
451          * signaled and that the reservation object has not been changed (i.e.
452          * no new fences have been added).
453          */
454         if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
455                 if (reservation_object_trylock(resv)) {
456                         if (!__read_seqcount_retry(&resv->seq, seq))
457                                 reservation_object_add_excl_fence(resv, NULL);
458                         reservation_object_unlock(resv);
459                 }
460         }
461
462         return timeout;
463 }
464
465 static void __fence_set_priority(struct dma_fence *fence, int prio)
466 {
467         struct drm_i915_gem_request *rq;
468         struct intel_engine_cs *engine;
469
470         if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
471                 return;
472
473         rq = to_request(fence);
474         engine = rq->engine;
475         if (!engine->schedule)
476                 return;
477
478         engine->schedule(rq, prio);
479 }
480
481 static void fence_set_priority(struct dma_fence *fence, int prio)
482 {
483         /* Recurse once into a fence-array */
484         if (dma_fence_is_array(fence)) {
485                 struct dma_fence_array *array = to_dma_fence_array(fence);
486                 int i;
487
488                 for (i = 0; i < array->num_fences; i++)
489                         __fence_set_priority(array->fences[i], prio);
490         } else {
491                 __fence_set_priority(fence, prio);
492         }
493 }
494
495 int
496 i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
497                               unsigned int flags,
498                               int prio)
499 {
500         struct dma_fence *excl;
501
502         if (flags & I915_WAIT_ALL) {
503                 struct dma_fence **shared;
504                 unsigned int count, i;
505                 int ret;
506
507                 ret = reservation_object_get_fences_rcu(obj->resv,
508                                                         &excl, &count, &shared);
509                 if (ret)
510                         return ret;
511
512                 for (i = 0; i < count; i++) {
513                         fence_set_priority(shared[i], prio);
514                         dma_fence_put(shared[i]);
515                 }
516
517                 kfree(shared);
518         } else {
519                 excl = reservation_object_get_excl_rcu(obj->resv);
520         }
521
522         if (excl) {
523                 fence_set_priority(excl, prio);
524                 dma_fence_put(excl);
525         }
526         return 0;
527 }
528
529 /**
530  * Waits for rendering to the object to be completed
531  * @obj: i915 gem object
532  * @flags: how to wait (under a lock, for all rendering or just for writes etc)
533  * @timeout: how long to wait
534  * @rps_client: client (user process) to charge for any waitboosting
535  */
536 int
537 i915_gem_object_wait(struct drm_i915_gem_object *obj,
538                      unsigned int flags,
539                      long timeout,
540                      struct intel_rps_client *rps_client)
541 {
542         might_sleep();
543 #if IS_ENABLED(CONFIG_LOCKDEP)
544         GEM_BUG_ON(debug_locks &&
545                    !!lockdep_is_held(&obj->base.dev->struct_mutex) !=
546                    !!(flags & I915_WAIT_LOCKED));
547 #endif
548         GEM_BUG_ON(timeout < 0);
549
550         timeout = i915_gem_object_wait_reservation(obj->resv,
551                                                    flags, timeout,
552                                                    rps_client);
553         return timeout < 0 ? timeout : 0;
554 }
555
556 static struct intel_rps_client *to_rps_client(struct drm_file *file)
557 {
558         struct drm_i915_file_private *fpriv = file->driver_priv;
559
560         return &fpriv->rps_client;
561 }
562
563 static int
564 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
565                      struct drm_i915_gem_pwrite *args,
566                      struct drm_file *file)
567 {
568         void *vaddr = obj->phys_handle->vaddr + args->offset;
569         char __user *user_data = u64_to_user_ptr(args->data_ptr);
570
571         /* We manually control the domain here and pretend that it
572          * remains coherent i.e. in the GTT domain, like shmem_pwrite.
573          */
574         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
575         if (copy_from_user(vaddr, user_data, args->size))
576                 return -EFAULT;
577
578         drm_clflush_virt_range(vaddr, args->size);
579         i915_gem_chipset_flush(to_i915(obj->base.dev));
580
581         intel_fb_obj_flush(obj, ORIGIN_CPU);
582         return 0;
583 }
584
585 void *i915_gem_object_alloc(struct drm_i915_private *dev_priv)
586 {
587         return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
588 }
589
590 void i915_gem_object_free(struct drm_i915_gem_object *obj)
591 {
592         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
593         kmem_cache_free(dev_priv->objects, obj);
594 }
595
596 static int
597 i915_gem_create(struct drm_file *file,
598                 struct drm_i915_private *dev_priv,
599                 uint64_t size,
600                 uint32_t *handle_p)
601 {
602         struct drm_i915_gem_object *obj;
603         int ret;
604         u32 handle;
605
606         size = roundup(size, PAGE_SIZE);
607         if (size == 0)
608                 return -EINVAL;
609
610         /* Allocate the new object */
611         obj = i915_gem_object_create(dev_priv, size);
612         if (IS_ERR(obj))
613                 return PTR_ERR(obj);
614
615         ret = drm_gem_handle_create(file, &obj->base, &handle);
616         /* drop reference from allocate - handle holds it now */
617         i915_gem_object_put(obj);
618         if (ret)
619                 return ret;
620
621         *handle_p = handle;
622         return 0;
623 }
624
625 int
626 i915_gem_dumb_create(struct drm_file *file,
627                      struct drm_device *dev,
628                      struct drm_mode_create_dumb *args)
629 {
630         /* have to work out size/pitch and return them */
631         args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
632         args->size = args->pitch * args->height;
633         return i915_gem_create(file, to_i915(dev),
634                                args->size, &args->handle);
635 }
636
637 static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
638 {
639         return !(obj->cache_level == I915_CACHE_NONE ||
640                  obj->cache_level == I915_CACHE_WT);
641 }
642
643 /**
644  * Creates a new mm object and returns a handle to it.
645  * @dev: drm device pointer
646  * @data: ioctl data blob
647  * @file: drm file pointer
648  */
649 int
650 i915_gem_create_ioctl(struct drm_device *dev, void *data,
651                       struct drm_file *file)
652 {
653         struct drm_i915_private *dev_priv = to_i915(dev);
654         struct drm_i915_gem_create *args = data;
655
656         i915_gem_flush_free_objects(dev_priv);
657
658         return i915_gem_create(file, dev_priv,
659                                args->size, &args->handle);
660 }
661
662 static inline enum fb_op_origin
663 fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
664 {
665         return (domain == I915_GEM_DOMAIN_GTT ?
666                 obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
667 }
668
669 void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
670 {
671         /*
672          * No actual flushing is required for the GTT write domain for reads
673          * from the GTT domain. Writes to it "immediately" go to main memory
674          * as far as we know, so there's no chipset flush. It also doesn't
675          * land in the GPU render cache.
676          *
677          * However, we do have to enforce the order so that all writes through
678          * the GTT land before any writes to the device, such as updates to
679          * the GATT itself.
680          *
681          * We also have to wait a bit for the writes to land from the GTT.
682          * An uncached read (i.e. mmio) seems to be ideal for the round-trip
683          * timing. This issue has only been observed when switching quickly
684          * between GTT writes and CPU reads from inside the kernel on recent hw,
685          * and it appears to only affect discrete GTT blocks (i.e. on LLC
686          * system agents we cannot reproduce this behaviour, until Cannonlake
687          * that was!).
688          */
689
690         wmb();
691
692         intel_runtime_pm_get(dev_priv);
693         spin_lock_irq(&dev_priv->uncore.lock);
694
695         POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));
696
697         spin_unlock_irq(&dev_priv->uncore.lock);
698         intel_runtime_pm_put(dev_priv);
699 }
700
701 static void
702 flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
703 {
704         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
705         struct i915_vma *vma;
706
707         if (!(obj->base.write_domain & flush_domains))
708                 return;
709
710         switch (obj->base.write_domain) {
711         case I915_GEM_DOMAIN_GTT:
712                 i915_gem_flush_ggtt_writes(dev_priv);
713
714                 intel_fb_obj_flush(obj,
715                                    fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
716
717                 for_each_ggtt_vma(vma, obj) {
718                         if (vma->iomap)
719                                 continue;
720
721                         i915_vma_unset_ggtt_write(vma);
722                 }
723                 break;
724
725         case I915_GEM_DOMAIN_CPU:
726                 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
727                 break;
728
729         case I915_GEM_DOMAIN_RENDER:
730                 if (gpu_write_needs_clflush(obj))
731                         obj->cache_dirty = true;
732                 break;
733         }
734
735         obj->base.write_domain = 0;
736 }
737
738 static inline int
739 __copy_to_user_swizzled(char __user *cpu_vaddr,
740                         const char *gpu_vaddr, int gpu_offset,
741                         int length)
742 {
743         int ret, cpu_offset = 0;
744
745         while (length > 0) {
746                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
747                 int this_length = min(cacheline_end - gpu_offset, length);
748                 int swizzled_gpu_offset = gpu_offset ^ 64;
749
750                 ret = __copy_to_user(cpu_vaddr + cpu_offset,
751                                      gpu_vaddr + swizzled_gpu_offset,
752                                      this_length);
753                 if (ret)
754                         return ret + length;
755
756                 cpu_offset += this_length;
757                 gpu_offset += this_length;
758                 length -= this_length;
759         }
760
761         return 0;
762 }
763
764 static inline int
765 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
766                           const char __user *cpu_vaddr,
767                           int length)
768 {
769         int ret, cpu_offset = 0;
770
771         while (length > 0) {
772                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
773                 int this_length = min(cacheline_end - gpu_offset, length);
774                 int swizzled_gpu_offset = gpu_offset ^ 64;
775
776                 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
777                                        cpu_vaddr + cpu_offset,
778                                        this_length);
779                 if (ret)
780                         return ret + length;
781
782                 cpu_offset += this_length;
783                 gpu_offset += this_length;
784                 length -= this_length;
785         }
786
787         return 0;
788 }
789
790 /*
791  * Pins the specified object's pages and synchronizes the object with
792  * GPU accesses. Sets needs_clflush to non-zero if the caller should
793  * flush the object from the CPU cache.
794  */
795 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
796                                     unsigned int *needs_clflush)
797 {
798         int ret;
799
800         lockdep_assert_held(&obj->base.dev->struct_mutex);
801
802         *needs_clflush = 0;
803         if (!i915_gem_object_has_struct_page(obj))
804                 return -ENODEV;
805
806         ret = i915_gem_object_wait(obj,
807                                    I915_WAIT_INTERRUPTIBLE |
808                                    I915_WAIT_LOCKED,
809                                    MAX_SCHEDULE_TIMEOUT,
810                                    NULL);
811         if (ret)
812                 return ret;
813
814         ret = i915_gem_object_pin_pages(obj);
815         if (ret)
816                 return ret;
817
818         if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
819             !static_cpu_has(X86_FEATURE_CLFLUSH)) {
820                 ret = i915_gem_object_set_to_cpu_domain(obj, false);
821                 if (ret)
822                         goto err_unpin;
823                 else
824                         goto out;
825         }
826
827         flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
828
829         /* If we're not in the cpu read domain, set ourself into the gtt
830          * read domain and manually flush cachelines (if required). This
831          * optimizes for the case when the gpu will dirty the data
832          * anyway again before the next pread happens.
833          */
834         if (!obj->cache_dirty &&
835             !(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
836                 *needs_clflush = CLFLUSH_BEFORE;
837
838 out:
839         /* return with the pages pinned */
840         return 0;
841
842 err_unpin:
843         i915_gem_object_unpin_pages(obj);
844         return ret;
845 }
846
847 int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
848                                      unsigned int *needs_clflush)
849 {
850         int ret;
851
852         lockdep_assert_held(&obj->base.dev->struct_mutex);
853
854         *needs_clflush = 0;
855         if (!i915_gem_object_has_struct_page(obj))
856                 return -ENODEV;
857
858         ret = i915_gem_object_wait(obj,
859                                    I915_WAIT_INTERRUPTIBLE |
860                                    I915_WAIT_LOCKED |
861                                    I915_WAIT_ALL,
862                                    MAX_SCHEDULE_TIMEOUT,
863                                    NULL);
864         if (ret)
865                 return ret;
866
867         ret = i915_gem_object_pin_pages(obj);
868         if (ret)
869                 return ret;
870
871         if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
872             !static_cpu_has(X86_FEATURE_CLFLUSH)) {
873                 ret = i915_gem_object_set_to_cpu_domain(obj, true);
874                 if (ret)
875                         goto err_unpin;
876                 else
877                         goto out;
878         }
879
880         flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
881
882         /* If we're not in the cpu write domain, set ourself into the
883          * gtt write domain and manually flush cachelines (as required).
884          * This optimizes for the case when the gpu will use the data
885          * right away and we therefore have to clflush anyway.
886          */
887         if (!obj->cache_dirty) {
888                 *needs_clflush |= CLFLUSH_AFTER;
889
890                 /*
891                  * Same trick applies to invalidate partially written
892                  * cachelines read before writing.
893                  */
894                 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
895                         *needs_clflush |= CLFLUSH_BEFORE;
896         }
897
898 out:
899         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
900         obj->mm.dirty = true;
901         /* return with the pages pinned */
902         return 0;
903
904 err_unpin:
905         i915_gem_object_unpin_pages(obj);
906         return ret;
907 }
908
909 static void
910 shmem_clflush_swizzled_range(char *addr, unsigned long length,
911                              bool swizzled)
912 {
913         if (unlikely(swizzled)) {
914                 unsigned long start = (unsigned long) addr;
915                 unsigned long end = (unsigned long) addr + length;
916
917                 /* For swizzling simply ensure that we always flush both
918                  * channels. Lame, but simple and it works. Swizzled
919                  * pwrite/pread is far from a hotpath - current userspace
920                  * doesn't use it at all. */
921                 start = round_down(start, 128);
922                 end = round_up(end, 128);
923
924                 drm_clflush_virt_range((void *)start, end - start);
925         } else {
926                 drm_clflush_virt_range(addr, length);
927         }
928
929 }
930
931 /* Only difference to the fast-path function is that this can handle bit17
932  * and uses non-atomic copy and kmap functions. */
933 static int
934 shmem_pread_slow(struct page *page, int offset, int length,
935                  char __user *user_data,
936                  bool page_do_bit17_swizzling, bool needs_clflush)
937 {
938         char *vaddr;
939         int ret;
940
941         vaddr = kmap(page);
942         if (needs_clflush)
943                 shmem_clflush_swizzled_range(vaddr + offset, length,
944                                              page_do_bit17_swizzling);
945
946         if (page_do_bit17_swizzling)
947                 ret = __copy_to_user_swizzled(user_data, vaddr, offset, length);
948         else
949                 ret = __copy_to_user(user_data, vaddr + offset, length);
950         kunmap(page);
951
952         return ret ? - EFAULT : 0;
953 }
954
955 static int
956 shmem_pread(struct page *page, int offset, int length, char __user *user_data,
957             bool page_do_bit17_swizzling, bool needs_clflush)
958 {
959         int ret;
960
961         ret = -ENODEV;
962         if (!page_do_bit17_swizzling) {
963                 char *vaddr = kmap_atomic(page);
964
965                 if (needs_clflush)
966                         drm_clflush_virt_range(vaddr + offset, length);
967                 ret = __copy_to_user_inatomic(user_data, vaddr + offset, length);
968                 kunmap_atomic(vaddr);
969         }
970         if (ret == 0)
971                 return 0;
972
973         return shmem_pread_slow(page, offset, length, user_data,
974                                 page_do_bit17_swizzling, needs_clflush);
975 }
976
977 static int
978 i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
979                      struct drm_i915_gem_pread *args)
980 {
981         char __user *user_data;
982         u64 remain;
983         unsigned int obj_do_bit17_swizzling;
984         unsigned int needs_clflush;
985         unsigned int idx, offset;
986         int ret;
987
988         obj_do_bit17_swizzling = 0;
989         if (i915_gem_object_needs_bit17_swizzle(obj))
990                 obj_do_bit17_swizzling = BIT(17);
991
992         ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
993         if (ret)
994                 return ret;
995
996         ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
997         mutex_unlock(&obj->base.dev->struct_mutex);
998         if (ret)
999                 return ret;
1000
1001         remain = args->size;
1002         user_data = u64_to_user_ptr(args->data_ptr);
1003         offset = offset_in_page(args->offset);
1004         for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
1005                 struct page *page = i915_gem_object_get_page(obj, idx);
1006                 int length;
1007
1008                 length = remain;
1009                 if (offset + length > PAGE_SIZE)
1010                         length = PAGE_SIZE - offset;
1011
1012                 ret = shmem_pread(page, offset, length, user_data,
1013                                   page_to_phys(page) & obj_do_bit17_swizzling,
1014                                   needs_clflush);
1015                 if (ret)
1016                         break;
1017
1018                 remain -= length;
1019                 user_data += length;
1020                 offset = 0;
1021         }
1022
1023         i915_gem_obj_finish_shmem_access(obj);
1024         return ret;
1025 }
1026
1027 static inline bool
1028 gtt_user_read(struct io_mapping *mapping,
1029               loff_t base, int offset,
1030               char __user *user_data, int length)
1031 {
1032         void __iomem *vaddr;
1033         unsigned long unwritten;
1034
1035         /* We can use the cpu mem copy function because this is X86. */
1036         vaddr = io_mapping_map_atomic_wc(mapping, base);
1037         unwritten = __copy_to_user_inatomic(user_data,
1038                                             (void __force *)vaddr + offset,
1039                                             length);
1040         io_mapping_unmap_atomic(vaddr);
1041         if (unwritten) {
1042                 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
1043                 unwritten = copy_to_user(user_data,
1044                                          (void __force *)vaddr + offset,
1045                                          length);
1046                 io_mapping_unmap(vaddr);
1047         }
1048         return unwritten;
1049 }
1050
1051 static int
1052 i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
1053                    const struct drm_i915_gem_pread *args)
1054 {
1055         struct drm_i915_private *i915 = to_i915(obj->base.dev);
1056         struct i915_ggtt *ggtt = &i915->ggtt;
1057         struct drm_mm_node node;
1058         struct i915_vma *vma;
1059         void __user *user_data;
1060         u64 remain, offset;
1061         int ret;
1062
1063         ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1064         if (ret)
1065                 return ret;
1066
1067         intel_runtime_pm_get(i915);
1068         vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1069                                        PIN_MAPPABLE |
1070                                        PIN_NONFAULT |
1071                                        PIN_NONBLOCK);
1072         if (!IS_ERR(vma)) {
1073                 node.start = i915_ggtt_offset(vma);
1074                 node.allocated = false;
1075                 ret = i915_vma_put_fence(vma);
1076                 if (ret) {
1077                         i915_vma_unpin(vma);
1078                         vma = ERR_PTR(ret);
1079                 }
1080         }
1081         if (IS_ERR(vma)) {
1082                 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1083                 if (ret)
1084                         goto out_unlock;
1085                 GEM_BUG_ON(!node.allocated);
1086         }
1087
1088         ret = i915_gem_object_set_to_gtt_domain(obj, false);
1089         if (ret)
1090                 goto out_unpin;
1091
1092         mutex_unlock(&i915->drm.struct_mutex);
1093
1094         user_data = u64_to_user_ptr(args->data_ptr);
1095         remain = args->size;
1096         offset = args->offset;
1097
1098         while (remain > 0) {
1099                 /* Operation in this page
1100                  *
1101                  * page_base = page offset within aperture
1102                  * page_offset = offset within page
1103                  * page_length = bytes to copy for this page
1104                  */
1105                 u32 page_base = node.start;
1106                 unsigned page_offset = offset_in_page(offset);
1107                 unsigned page_length = PAGE_SIZE - page_offset;
1108                 page_length = remain < page_length ? remain : page_length;
1109                 if (node.allocated) {
1110                         wmb();
1111                         ggtt->base.insert_page(&ggtt->base,
1112                                                i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1113                                                node.start, I915_CACHE_NONE, 0);
1114                         wmb();
1115                 } else {
1116                         page_base += offset & PAGE_MASK;
1117                 }
1118
1119                 if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
1120                                   user_data, page_length)) {
1121                         ret = -EFAULT;
1122                         break;
1123                 }
1124
1125                 remain -= page_length;
1126                 user_data += page_length;
1127                 offset += page_length;
1128         }
1129
1130         mutex_lock(&i915->drm.struct_mutex);
1131 out_unpin:
1132         if (node.allocated) {
1133                 wmb();
1134                 ggtt->base.clear_range(&ggtt->base,
1135                                        node.start, node.size);
1136                 remove_mappable_node(&node);
1137         } else {
1138                 i915_vma_unpin(vma);
1139         }
1140 out_unlock:
1141         intel_runtime_pm_put(i915);
1142         mutex_unlock(&i915->drm.struct_mutex);
1143
1144         return ret;
1145 }
1146
1147 /**
1148  * Reads data from the object referenced by handle.
1149  * @dev: drm device pointer
1150  * @data: ioctl data blob
1151  * @file: drm file pointer
1152  *
1153  * On error, the contents of *data are undefined.
1154  */
1155 int
1156 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1157                      struct drm_file *file)
1158 {
1159         struct drm_i915_gem_pread *args = data;
1160         struct drm_i915_gem_object *obj;
1161         int ret;
1162
1163         if (args->size == 0)
1164                 return 0;
1165
1166         if (!access_ok(VERIFY_WRITE,
1167                        u64_to_user_ptr(args->data_ptr),
1168                        args->size))
1169                 return -EFAULT;
1170
1171         obj = i915_gem_object_lookup(file, args->handle);
1172         if (!obj)
1173                 return -ENOENT;
1174
1175         /* Bounds check source.  */
1176         if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
1177                 ret = -EINVAL;
1178                 goto out;
1179         }
1180
1181         trace_i915_gem_object_pread(obj, args->offset, args->size);
1182
1183         ret = i915_gem_object_wait(obj,
1184                                    I915_WAIT_INTERRUPTIBLE,
1185                                    MAX_SCHEDULE_TIMEOUT,
1186                                    to_rps_client(file));
1187         if (ret)
1188                 goto out;
1189
1190         ret = i915_gem_object_pin_pages(obj);
1191         if (ret)
1192                 goto out;
1193
1194         ret = i915_gem_shmem_pread(obj, args);
1195         if (ret == -EFAULT || ret == -ENODEV)
1196                 ret = i915_gem_gtt_pread(obj, args);
1197
1198         i915_gem_object_unpin_pages(obj);
1199 out:
1200         i915_gem_object_put(obj);
1201         return ret;
1202 }
1203
1204 /* This is the fast write path which cannot handle
1205  * page faults in the source data
1206  */
1207
1208 static inline bool
1209 ggtt_write(struct io_mapping *mapping,
1210            loff_t base, int offset,
1211            char __user *user_data, int length)
1212 {
1213         void __iomem *vaddr;
1214         unsigned long unwritten;
1215
1216         /* We can use the cpu mem copy function because this is X86. */
1217         vaddr = io_mapping_map_atomic_wc(mapping, base);
1218         unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
1219                                                       user_data, length);
1220         io_mapping_unmap_atomic(vaddr);
1221         if (unwritten) {
1222                 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
1223                 unwritten = copy_from_user((void __force *)vaddr + offset,
1224                                            user_data, length);
1225                 io_mapping_unmap(vaddr);
1226         }
1227
1228         return unwritten;
1229 }
1230
1231 /**
1232  * This is the fast pwrite path, where we copy the data directly from the
1233  * user into the GTT, uncached.
1234  * @obj: i915 GEM object
1235  * @args: pwrite arguments structure
1236  */
1237 static int
1238 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
1239                          const struct drm_i915_gem_pwrite *args)
1240 {
1241         struct drm_i915_private *i915 = to_i915(obj->base.dev);
1242         struct i915_ggtt *ggtt = &i915->ggtt;
1243         struct drm_mm_node node;
1244         struct i915_vma *vma;
1245         u64 remain, offset;
1246         void __user *user_data;
1247         int ret;
1248
1249         ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1250         if (ret)
1251                 return ret;
1252
1253         if (i915_gem_object_has_struct_page(obj)) {
1254                 /*
1255                  * Avoid waking the device up if we can fallback, as
1256                  * waking/resuming is very slow (worst-case 10-100 ms
1257                  * depending on PCI sleeps and our own resume time).
1258                  * This easily dwarfs any performance advantage from
1259                  * using the cache bypass of indirect GGTT access.
1260                  */
1261                 if (!intel_runtime_pm_get_if_in_use(i915)) {
1262                         ret = -EFAULT;
1263                         goto out_unlock;
1264                 }
1265         } else {
1266                 /* No backing pages, no fallback, we must force GGTT access */
1267                 intel_runtime_pm_get(i915);
1268         }
1269
1270         vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1271                                        PIN_MAPPABLE |
1272                                        PIN_NONFAULT |
1273                                        PIN_NONBLOCK);
1274         if (!IS_ERR(vma)) {
1275                 node.start = i915_ggtt_offset(vma);
1276                 node.allocated = false;
1277                 ret = i915_vma_put_fence(vma);
1278                 if (ret) {
1279                         i915_vma_unpin(vma);
1280                         vma = ERR_PTR(ret);
1281                 }
1282         }
1283         if (IS_ERR(vma)) {
1284                 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1285                 if (ret)
1286                         goto out_rpm;
1287                 GEM_BUG_ON(!node.allocated);
1288         }
1289
1290         ret = i915_gem_object_set_to_gtt_domain(obj, true);
1291         if (ret)
1292                 goto out_unpin;
1293
1294         mutex_unlock(&i915->drm.struct_mutex);
1295
1296         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
1297
1298         user_data = u64_to_user_ptr(args->data_ptr);
1299         offset = args->offset;
1300         remain = args->size;
1301         while (remain) {
1302                 /* Operation in this page
1303                  *
1304                  * page_base = page offset within aperture
1305                  * page_offset = offset within page
1306                  * page_length = bytes to copy for this page
1307                  */
1308                 u32 page_base = node.start;
1309                 unsigned int page_offset = offset_in_page(offset);
1310                 unsigned int page_length = PAGE_SIZE - page_offset;
1311                 page_length = remain < page_length ? remain : page_length;
1312                 if (node.allocated) {
1313                         wmb(); /* flush the write before we modify the GGTT */
1314                         ggtt->base.insert_page(&ggtt->base,
1315                                                i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1316                                                node.start, I915_CACHE_NONE, 0);
1317                         wmb(); /* flush modifications to the GGTT (insert_page) */
1318                 } else {
1319                         page_base += offset & PAGE_MASK;
1320                 }
1321                 /* If we get a fault while copying data, then (presumably) our
1322                  * source page isn't available.  Return the error and we'll
1323                  * retry in the slow path.
1324                  * If the object is non-shmem backed, we retry again with the
1325                  * path that handles page fault.
1326                  */
1327                 if (ggtt_write(&ggtt->iomap, page_base, page_offset,
1328                                user_data, page_length)) {
1329                         ret = -EFAULT;
1330                         break;
1331                 }
1332
1333                 remain -= page_length;
1334                 user_data += page_length;
1335                 offset += page_length;
1336         }
1337         intel_fb_obj_flush(obj, ORIGIN_CPU);
1338
1339         mutex_lock(&i915->drm.struct_mutex);
1340 out_unpin:
1341         if (node.allocated) {
1342                 wmb();
1343                 ggtt->base.clear_range(&ggtt->base,
1344                                        node.start, node.size);
1345                 remove_mappable_node(&node);
1346         } else {
1347                 i915_vma_unpin(vma);
1348         }
1349 out_rpm:
1350         intel_runtime_pm_put(i915);
1351 out_unlock:
1352         mutex_unlock(&i915->drm.struct_mutex);
1353         return ret;
1354 }
1355
1356 static int
1357 shmem_pwrite_slow(struct page *page, int offset, int length,
1358                   char __user *user_data,
1359                   bool page_do_bit17_swizzling,
1360                   bool needs_clflush_before,
1361                   bool needs_clflush_after)
1362 {
1363         char *vaddr;
1364         int ret;
1365
1366         vaddr = kmap(page);
1367         if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
1368                 shmem_clflush_swizzled_range(vaddr + offset, length,
1369                                              page_do_bit17_swizzling);
1370         if (page_do_bit17_swizzling)
1371                 ret = __copy_from_user_swizzled(vaddr, offset, user_data,
1372                                                 length);
1373         else
1374                 ret = __copy_from_user(vaddr + offset, user_data, length);
1375         if (needs_clflush_after)
1376                 shmem_clflush_swizzled_range(vaddr + offset, length,
1377                                              page_do_bit17_swizzling);
1378         kunmap(page);
1379
1380         return ret ? -EFAULT : 0;
1381 }
1382
1383 /* Per-page copy function for the shmem pwrite fastpath.
1384  * Flushes invalid cachelines before writing to the target if
1385  * needs_clflush_before is set and flushes out any written cachelines after
1386  * writing if needs_clflush is set.
1387  */
1388 static int
1389 shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
1390              bool page_do_bit17_swizzling,
1391              bool needs_clflush_before,
1392              bool needs_clflush_after)
1393 {
1394         int ret;
1395
1396         ret = -ENODEV;
1397         if (!page_do_bit17_swizzling) {
1398                 char *vaddr = kmap_atomic(page);
1399
1400                 if (needs_clflush_before)
1401                         drm_clflush_virt_range(vaddr + offset, len);
1402                 ret = __copy_from_user_inatomic(vaddr + offset, user_data, len);
1403                 if (needs_clflush_after)
1404                         drm_clflush_virt_range(vaddr + offset, len);
1405
1406                 kunmap_atomic(vaddr);
1407         }
1408         if (ret == 0)
1409                 return ret;
1410
1411         return shmem_pwrite_slow(page, offset, len, user_data,
1412                                  page_do_bit17_swizzling,
1413                                  needs_clflush_before,
1414                                  needs_clflush_after);
1415 }
1416
1417 static int
1418 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
1419                       const struct drm_i915_gem_pwrite *args)
1420 {
1421         struct drm_i915_private *i915 = to_i915(obj->base.dev);
1422         void __user *user_data;
1423         u64 remain;
1424         unsigned int obj_do_bit17_swizzling;
1425         unsigned int partial_cacheline_write;
1426         unsigned int needs_clflush;
1427         unsigned int offset, idx;
1428         int ret;
1429
1430         ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1431         if (ret)
1432                 return ret;
1433
1434         ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
1435         mutex_unlock(&i915->drm.struct_mutex);
1436         if (ret)
1437                 return ret;
1438
1439         obj_do_bit17_swizzling = 0;
1440         if (i915_gem_object_needs_bit17_swizzle(obj))
1441                 obj_do_bit17_swizzling = BIT(17);
1442
1443         /* If we don't overwrite a cacheline completely we need to be
1444          * careful to have up-to-date data by first clflushing. Don't
1445          * overcomplicate things and flush the entire patch.
1446          */
1447         partial_cacheline_write = 0;
1448         if (needs_clflush & CLFLUSH_BEFORE)
1449                 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
1450
1451         user_data = u64_to_user_ptr(args->data_ptr);
1452         remain = args->size;
1453         offset = offset_in_page(args->offset);
1454         for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
1455                 struct page *page = i915_gem_object_get_page(obj, idx);
1456                 int length;
1457
1458                 length = remain;
1459                 if (offset + length > PAGE_SIZE)
1460                         length = PAGE_SIZE - offset;
1461
1462                 ret = shmem_pwrite(page, offset, length, user_data,
1463                                    page_to_phys(page) & obj_do_bit17_swizzling,
1464                                    (offset | length) & partial_cacheline_write,
1465                                    needs_clflush & CLFLUSH_AFTER);
1466                 if (ret)
1467                         break;
1468
1469                 remain -= length;
1470                 user_data += length;
1471                 offset = 0;
1472         }
1473
1474         intel_fb_obj_flush(obj, ORIGIN_CPU);
1475         i915_gem_obj_finish_shmem_access(obj);
1476         return ret;
1477 }
1478
1479 /**
1480  * Writes data to the object referenced by handle.
1481  * @dev: drm device
1482  * @data: ioctl data blob
1483  * @file: drm file
1484  *
1485  * On error, the contents of the buffer that were to be modified are undefined.
1486  */
1487 int
1488 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1489                       struct drm_file *file)
1490 {
1491         struct drm_i915_gem_pwrite *args = data;
1492         struct drm_i915_gem_object *obj;
1493         int ret;
1494
1495         if (args->size == 0)
1496                 return 0;
1497
1498         if (!access_ok(VERIFY_READ,
1499                        u64_to_user_ptr(args->data_ptr),
1500                        args->size))
1501                 return -EFAULT;
1502
1503         obj = i915_gem_object_lookup(file, args->handle);
1504         if (!obj)
1505                 return -ENOENT;
1506
1507         /* Bounds check destination. */
1508         if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
1509                 ret = -EINVAL;
1510                 goto err;
1511         }
1512
1513         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1514
1515         ret = -ENODEV;
1516         if (obj->ops->pwrite)
1517                 ret = obj->ops->pwrite(obj, args);
1518         if (ret != -ENODEV)
1519                 goto err;
1520
1521         ret = i915_gem_object_wait(obj,
1522                                    I915_WAIT_INTERRUPTIBLE |
1523                                    I915_WAIT_ALL,
1524                                    MAX_SCHEDULE_TIMEOUT,
1525                                    to_rps_client(file));
1526         if (ret)
1527                 goto err;
1528
1529         ret = i915_gem_object_pin_pages(obj);
1530         if (ret)
1531                 goto err;
1532
1533         ret = -EFAULT;
1534         /* We can only do the GTT pwrite on untiled buffers, as otherwise
1535          * it would end up going through the fenced access, and we'll get
1536          * different detiling behavior between reading and writing.
1537          * pread/pwrite currently are reading and writing from the CPU
1538          * perspective, requiring manual detiling by the client.
1539          */
1540         if (!i915_gem_object_has_struct_page(obj) ||
1541             cpu_write_needs_clflush(obj))
1542                 /* Note that the gtt paths might fail with non-page-backed user
1543                  * pointers (e.g. gtt mappings when moving data between
1544                  * textures). Fallback to the shmem path in that case.
1545                  */
1546                 ret = i915_gem_gtt_pwrite_fast(obj, args);
1547
1548         if (ret == -EFAULT || ret == -ENOSPC) {
1549                 if (obj->phys_handle)
1550                         ret = i915_gem_phys_pwrite(obj, args, file);
1551                 else
1552                         ret = i915_gem_shmem_pwrite(obj, args);
1553         }
1554
1555         i915_gem_object_unpin_pages(obj);
1556 err:
1557         i915_gem_object_put(obj);
1558         return ret;
1559 }
1560
1561 static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
1562 {
1563         struct drm_i915_private *i915;
1564         struct list_head *list;
1565         struct i915_vma *vma;
1566
1567         GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
1568
1569         for_each_ggtt_vma(vma, obj) {
1570                 if (i915_vma_is_active(vma))
1571                         continue;
1572
1573                 if (!drm_mm_node_allocated(&vma->node))
1574                         continue;
1575
1576                 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
1577         }
1578
1579         i915 = to_i915(obj->base.dev);
1580         spin_lock(&i915->mm.obj_lock);
1581         list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
1582         list_move_tail(&obj->mm.link, list);
1583         spin_unlock(&i915->mm.obj_lock);
1584 }
1585
1586 /**
1587  * Called when user space prepares to use an object with the CPU, either
1588  * through the mmap ioctl's mapping or a GTT mapping.
1589  * @dev: drm device
1590  * @data: ioctl data blob
1591  * @file: drm file
1592  */
1593 int
1594 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1595                           struct drm_file *file)
1596 {
1597         struct drm_i915_gem_set_domain *args = data;
1598         struct drm_i915_gem_object *obj;
1599         uint32_t read_domains = args->read_domains;
1600         uint32_t write_domain = args->write_domain;
1601         int err;
1602
1603         /* Only handle setting domains to types used by the CPU. */
1604         if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
1605                 return -EINVAL;
1606
1607         /* Having something in the write domain implies it's in the read
1608          * domain, and only that read domain.  Enforce that in the request.
1609          */
1610         if (write_domain != 0 && read_domains != write_domain)
1611                 return -EINVAL;
1612
1613         obj = i915_gem_object_lookup(file, args->handle);
1614         if (!obj)
1615                 return -ENOENT;
1616
1617         /* Try to flush the object off the GPU without holding the lock.
1618          * We will repeat the flush holding the lock in the normal manner
1619          * to catch cases where we are gazumped.
1620          */
1621         err = i915_gem_object_wait(obj,
1622                                    I915_WAIT_INTERRUPTIBLE |
1623                                    (write_domain ? I915_WAIT_ALL : 0),
1624                                    MAX_SCHEDULE_TIMEOUT,
1625                                    to_rps_client(file));
1626         if (err)
1627                 goto out;
1628
1629         /*
1630          * Proxy objects do not control access to the backing storage, ergo
1631          * they cannot be used as a means to manipulate the cache domain
1632          * tracking for that backing storage. The proxy object is always
1633          * considered to be outside of any cache domain.
1634          */
1635         if (i915_gem_object_is_proxy(obj)) {
1636                 err = -ENXIO;
1637                 goto out;
1638         }
1639
1640         /*
1641          * Flush and acquire obj->pages so that we are coherent through
1642          * direct access in memory with previous cached writes through
1643          * shmemfs and that our cache domain tracking remains valid.
1644          * For example, if the obj->filp was moved to swap without us
1645          * being notified and releasing the pages, we would mistakenly
1646          * continue to assume that the obj remained out of the CPU cached
1647          * domain.
1648          */
1649         err = i915_gem_object_pin_pages(obj);
1650         if (err)
1651                 goto out;
1652
1653         err = i915_mutex_lock_interruptible(dev);
1654         if (err)
1655                 goto out_unpin;
1656
1657         if (read_domains & I915_GEM_DOMAIN_WC)
1658                 err = i915_gem_object_set_to_wc_domain(obj, write_domain);
1659         else if (read_domains & I915_GEM_DOMAIN_GTT)
1660                 err = i915_gem_object_set_to_gtt_domain(obj, write_domain);
1661         else
1662                 err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
1663
1664         /* And bump the LRU for this access */
1665         i915_gem_object_bump_inactive_ggtt(obj);
1666
1667         mutex_unlock(&dev->struct_mutex);
1668
1669         if (write_domain != 0)
1670                 intel_fb_obj_invalidate(obj,
1671                                         fb_write_origin(obj, write_domain));
1672
1673 out_unpin:
1674         i915_gem_object_unpin_pages(obj);
1675 out:
1676         i915_gem_object_put(obj);
1677         return err;
1678 }
1679
1680 /**
1681  * Called when user space has done writes to this buffer
1682  * @dev: drm device
1683  * @data: ioctl data blob
1684  * @file: drm file
1685  */
1686 int
1687 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1688                          struct drm_file *file)
1689 {
1690         struct drm_i915_gem_sw_finish *args = data;
1691         struct drm_i915_gem_object *obj;
1692
1693         obj = i915_gem_object_lookup(file, args->handle);
1694         if (!obj)
1695                 return -ENOENT;
1696
1697         /*
1698          * Proxy objects are barred from CPU access, so there is no
1699          * need to ban sw_finish as it is a nop.
1700          */
1701
1702         /* Pinned buffers may be scanout, so flush the cache */
1703         i915_gem_object_flush_if_display(obj);
1704         i915_gem_object_put(obj);
1705
1706         return 0;
1707 }
1708
1709 /**
1710  * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1711  *                       it is mapped to.
1712  * @dev: drm device
1713  * @data: ioctl data blob
1714  * @file: drm file
1715  *
1716  * While the mapping holds a reference on the contents of the object, it doesn't
1717  * imply a ref on the object itself.
1718  *
1719  * IMPORTANT:
1720  *
1721  * DRM driver writers who look a this function as an example for how to do GEM
1722  * mmap support, please don't implement mmap support like here. The modern way
1723  * to implement DRM mmap support is with an mmap offset ioctl (like
1724  * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1725  * That way debug tooling like valgrind will understand what's going on, hiding
1726  * the mmap call in a driver private ioctl will break that. The i915 driver only
1727  * does cpu mmaps this way because we didn't know better.
1728  */
1729 int
1730 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1731                     struct drm_file *file)
1732 {
1733         struct drm_i915_gem_mmap *args = data;
1734         struct drm_i915_gem_object *obj;
1735         unsigned long addr;
1736
1737         if (args->flags & ~(I915_MMAP_WC))
1738                 return -EINVAL;
1739
1740         if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1741                 return -ENODEV;
1742
1743         obj = i915_gem_object_lookup(file, args->handle);
1744         if (!obj)
1745                 return -ENOENT;
1746
1747         /* prime objects have no backing filp to GEM mmap
1748          * pages from.
1749          */
1750         if (!obj->base.filp) {
1751                 i915_gem_object_put(obj);
1752                 return -ENXIO;
1753         }
1754
1755         addr = vm_mmap(obj->base.filp, 0, args->size,
1756                        PROT_READ | PROT_WRITE, MAP_SHARED,
1757                        args->offset);
1758         if (args->flags & I915_MMAP_WC) {
1759                 struct mm_struct *mm = current->mm;
1760                 struct vm_area_struct *vma;
1761
1762                 if (down_write_killable(&mm->mmap_sem)) {
1763                         i915_gem_object_put(obj);
1764                         return -EINTR;
1765                 }
1766                 vma = find_vma(mm, addr);
1767                 if (vma)
1768                         vma->vm_page_prot =
1769                                 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1770                 else
1771                         addr = -ENOMEM;
1772                 up_write(&mm->mmap_sem);
1773
1774                 /* This may race, but that's ok, it only gets set */
1775                 WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
1776         }
1777         i915_gem_object_put(obj);
1778         if (IS_ERR((void *)addr))
1779                 return addr;
1780
1781         args->addr_ptr = (uint64_t) addr;
1782
1783         return 0;
1784 }
1785
1786 static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
1787 {
1788         return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
1789 }
1790
1791 /**
1792  * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
1793  *
1794  * A history of the GTT mmap interface:
1795  *
1796  * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
1797  *     aligned and suitable for fencing, and still fit into the available
1798  *     mappable space left by the pinned display objects. A classic problem
1799  *     we called the page-fault-of-doom where we would ping-pong between
1800  *     two objects that could not fit inside the GTT and so the memcpy
1801  *     would page one object in at the expense of the other between every
1802  *     single byte.
1803  *
1804  * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
1805  *     as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
1806  *     object is too large for the available space (or simply too large
1807  *     for the mappable aperture!), a view is created instead and faulted
1808  *     into userspace. (This view is aligned and sized appropriately for
1809  *     fenced access.)
1810  *
1811  * 2 - Recognise WC as a separate cache domain so that we can flush the
1812  *     delayed writes via GTT before performing direct access via WC.
1813  *
1814  * Restrictions:
1815  *
1816  *  * snoopable objects cannot be accessed via the GTT. It can cause machine
1817  *    hangs on some architectures, corruption on others. An attempt to service
1818  *    a GTT page fault from a snoopable object will generate a SIGBUS.
1819  *
1820  *  * the object must be able to fit into RAM (physical memory, though no
1821  *    limited to the mappable aperture).
1822  *
1823  *
1824  * Caveats:
1825  *
1826  *  * a new GTT page fault will synchronize rendering from the GPU and flush
1827  *    all data to system memory. Subsequent access will not be synchronized.
1828  *
1829  *  * all mappings are revoked on runtime device suspend.
1830  *
1831  *  * there are only 8, 16 or 32 fence registers to share between all users
1832  *    (older machines require fence register for display and blitter access
1833  *    as well). Contention of the fence registers will cause the previous users
1834  *    to be unmapped and any new access will generate new page faults.
1835  *
1836  *  * running out of memory while servicing a fault may generate a SIGBUS,
1837  *    rather than the expected SIGSEGV.
1838  */
1839 int i915_gem_mmap_gtt_version(void)
1840 {
1841         return 2;
1842 }
1843
1844 static inline struct i915_ggtt_view
1845 compute_partial_view(struct drm_i915_gem_object *obj,
1846                      pgoff_t page_offset,
1847                      unsigned int chunk)
1848 {
1849         struct i915_ggtt_view view;
1850
1851         if (i915_gem_object_is_tiled(obj))
1852                 chunk = roundup(chunk, tile_row_pages(obj));
1853
1854         view.type = I915_GGTT_VIEW_PARTIAL;
1855         view.partial.offset = rounddown(page_offset, chunk);
1856         view.partial.size =
1857                 min_t(unsigned int, chunk,
1858                       (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
1859
1860         /* If the partial covers the entire object, just create a normal VMA. */
1861         if (chunk >= obj->base.size >> PAGE_SHIFT)
1862                 view.type = I915_GGTT_VIEW_NORMAL;
1863
1864         return view;
1865 }
1866
1867 /**
1868  * i915_gem_fault - fault a page into the GTT
1869  * @vmf: fault info
1870  *
1871  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1872  * from userspace.  The fault handler takes care of binding the object to
1873  * the GTT (if needed), allocating and programming a fence register (again,
1874  * only if needed based on whether the old reg is still valid or the object
1875  * is tiled) and inserting a new PTE into the faulting process.
1876  *
1877  * Note that the faulting process may involve evicting existing objects
1878  * from the GTT and/or fence registers to make room.  So performance may
1879  * suffer if the GTT working set is large or there are few fence registers
1880  * left.
1881  *
1882  * The current feature set supported by i915_gem_fault() and thus GTT mmaps
1883  * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
1884  */
1885 int i915_gem_fault(struct vm_fault *vmf)
1886 {
1887 #define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
1888         struct vm_area_struct *area = vmf->vma;
1889         struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
1890         struct drm_device *dev = obj->base.dev;
1891         struct drm_i915_private *dev_priv = to_i915(dev);
1892         struct i915_ggtt *ggtt = &dev_priv->ggtt;
1893         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1894         struct i915_vma *vma;
1895         pgoff_t page_offset;
1896         unsigned int flags;
1897         int ret;
1898
1899         /* We don't use vmf->pgoff since that has the fake offset */
1900         page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
1901
1902         trace_i915_gem_object_fault(obj, page_offset, true, write);
1903
1904         /* Try to flush the object off the GPU first without holding the lock.
1905          * Upon acquiring the lock, we will perform our sanity checks and then
1906          * repeat the flush holding the lock in the normal manner to catch cases
1907          * where we are gazumped.
1908          */
1909         ret = i915_gem_object_wait(obj,
1910                                    I915_WAIT_INTERRUPTIBLE,
1911                                    MAX_SCHEDULE_TIMEOUT,
1912                                    NULL);
1913         if (ret)
1914                 goto err;
1915
1916         ret = i915_gem_object_pin_pages(obj);
1917         if (ret)
1918                 goto err;
1919
1920         intel_runtime_pm_get(dev_priv);
1921
1922         ret = i915_mutex_lock_interruptible(dev);
1923         if (ret)
1924                 goto err_rpm;
1925
1926         /* Access to snoopable pages through the GTT is incoherent. */
1927         if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
1928                 ret = -EFAULT;
1929                 goto err_unlock;
1930         }
1931
1932         /* If the object is smaller than a couple of partial vma, it is
1933          * not worth only creating a single partial vma - we may as well
1934          * clear enough space for the full object.
1935          */
1936         flags = PIN_MAPPABLE;
1937         if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
1938                 flags |= PIN_NONBLOCK | PIN_NONFAULT;
1939
1940         /* Now pin it into the GTT as needed */
1941         vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
1942         if (IS_ERR(vma)) {
1943                 /* Use a partial view if it is bigger than available space */
1944                 struct i915_ggtt_view view =
1945                         compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
1946
1947                 /* Userspace is now writing through an untracked VMA, abandon
1948                  * all hope that the hardware is able to track future writes.
1949                  */
1950                 obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
1951
1952                 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
1953         }
1954         if (IS_ERR(vma)) {
1955                 ret = PTR_ERR(vma);
1956                 goto err_unlock;
1957         }
1958
1959         ret = i915_gem_object_set_to_gtt_domain(obj, write);
1960         if (ret)
1961                 goto err_unpin;
1962
1963         ret = i915_vma_pin_fence(vma);
1964         if (ret)
1965                 goto err_unpin;
1966
1967         /* Finally, remap it using the new GTT offset */
1968         ret = remap_io_mapping(area,
1969                                area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
1970                                (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
1971                                min_t(u64, vma->size, area->vm_end - area->vm_start),
1972                                &ggtt->iomap);
1973         if (ret)
1974                 goto err_fence;
1975
1976         /* Mark as being mmapped into userspace for later revocation */
1977         assert_rpm_wakelock_held(dev_priv);
1978         if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
1979                 list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
1980         GEM_BUG_ON(!obj->userfault_count);
1981
1982         i915_vma_set_ggtt_write(vma);
1983
1984 err_fence:
1985         i915_vma_unpin_fence(vma);
1986 err_unpin:
1987         __i915_vma_unpin(vma);
1988 err_unlock:
1989         mutex_unlock(&dev->struct_mutex);
1990 err_rpm:
1991         intel_runtime_pm_put(dev_priv);
1992         i915_gem_object_unpin_pages(obj);
1993 err:
1994         switch (ret) {
1995         case -EIO:
1996                 /*
1997                  * We eat errors when the gpu is terminally wedged to avoid
1998                  * userspace unduly crashing (gl has no provisions for mmaps to
1999                  * fail). But any other -EIO isn't ours (e.g. swap in failure)
2000                  * and so needs to be reported.
2001                  */
2002                 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
2003                         ret = VM_FAULT_SIGBUS;
2004                         break;
2005                 }
2006         case -EAGAIN:
2007                 /*
2008                  * EAGAIN means the gpu is hung and we'll wait for the error
2009                  * handler to reset everything when re-faulting in
2010                  * i915_mutex_lock_interruptible.
2011                  */
2012         case 0:
2013         case -ERESTARTSYS:
2014         case -EINTR:
2015         case -EBUSY:
2016                 /*
2017                  * EBUSY is ok: this just means that another thread
2018                  * already did the job.
2019                  */
2020                 ret = VM_FAULT_NOPAGE;
2021                 break;
2022         case -ENOMEM:
2023                 ret = VM_FAULT_OOM;
2024                 break;
2025         case -ENOSPC:
2026         case -EFAULT:
2027                 ret = VM_FAULT_SIGBUS;
2028                 break;
2029         default:
2030                 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
2031                 ret = VM_FAULT_SIGBUS;
2032                 break;
2033         }
2034         return ret;
2035 }
2036
2037 static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
2038 {
2039         struct i915_vma *vma;
2040
2041         GEM_BUG_ON(!obj->userfault_count);
2042
2043         obj->userfault_count = 0;
2044         list_del(&obj->userfault_link);
2045         drm_vma_node_unmap(&obj->base.vma_node,
2046                            obj->base.dev->anon_inode->i_mapping);
2047
2048         for_each_ggtt_vma(vma, obj)
2049                 i915_vma_unset_userfault(vma);
2050 }
2051
2052 /**
2053  * i915_gem_release_mmap - remove physical page mappings
2054  * @obj: obj in question
2055  *
2056  * Preserve the reservation of the mmapping with the DRM core code, but
2057  * relinquish ownership of the pages back to the system.
2058  *
2059  * It is vital that we remove the page mapping if we have mapped a tiled
2060  * object through the GTT and then lose the fence register due to
2061  * resource pressure. Similarly if the object has been moved out of the
2062  * aperture, than pages mapped into userspace must be revoked. Removing the
2063  * mapping will then trigger a page fault on the next user access, allowing
2064  * fixup by i915_gem_fault().
2065  */
2066 void
2067 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
2068 {
2069         struct drm_i915_private *i915 = to_i915(obj->base.dev);
2070
2071         /* Serialisation between user GTT access and our code depends upon
2072          * revoking the CPU's PTE whilst the mutex is held. The next user
2073          * pagefault then has to wait until we release the mutex.
2074          *
2075          * Note that RPM complicates somewhat by adding an additional
2076          * requirement that operations to the GGTT be made holding the RPM
2077          * wakeref.
2078          */
2079         lockdep_assert_held(&i915->drm.struct_mutex);
2080         intel_runtime_pm_get(i915);
2081
2082         if (!obj->userfault_count)
2083                 goto out;
2084
2085         __i915_gem_object_release_mmap(obj);
2086
2087         /* Ensure that the CPU's PTE are revoked and there are not outstanding
2088          * memory transactions from userspace before we return. The TLB
2089          * flushing implied above by changing the PTE above *should* be
2090          * sufficient, an extra barrier here just provides us with a bit
2091          * of paranoid documentation about our requirement to serialise
2092          * memory writes before touching registers / GSM.
2093          */
2094         wmb();
2095
2096 out:
2097         intel_runtime_pm_put(i915);
2098 }
2099
2100 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
2101 {
2102         struct drm_i915_gem_object *obj, *on;
2103         int i;
2104
2105         /*
2106          * Only called during RPM suspend. All users of the userfault_list
2107          * must be holding an RPM wakeref to ensure that this can not
2108          * run concurrently with themselves (and use the struct_mutex for
2109          * protection between themselves).
2110          */
2111
2112         list_for_each_entry_safe(obj, on,
2113                                  &dev_priv->mm.userfault_list, userfault_link)
2114                 __i915_gem_object_release_mmap(obj);
2115
2116         /* The fence will be lost when the device powers down. If any were
2117          * in use by hardware (i.e. they are pinned), we should not be powering
2118          * down! All other fences will be reacquired by the user upon waking.
2119          */
2120         for (i = 0; i < dev_priv->num_fence_regs; i++) {
2121                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2122
2123                 /* Ideally we want to assert that the fence register is not
2124                  * live at this point (i.e. that no piece of code will be
2125                  * trying to write through fence + GTT, as that both violates
2126                  * our tracking of activity and associated locking/barriers,
2127                  * but also is illegal given that the hw is powered down).
2128                  *
2129                  * Previously we used reg->pin_count as a "liveness" indicator.
2130                  * That is not sufficient, and we need a more fine-grained
2131                  * tool if we want to have a sanity check here.
2132                  */
2133
2134                 if (!reg->vma)
2135                         continue;
2136
2137                 GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
2138                 reg->dirty = true;
2139         }
2140 }
2141
2142 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
2143 {
2144         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2145         int err;
2146
2147         err = drm_gem_create_mmap_offset(&obj->base);
2148         if (likely(!err))
2149                 return 0;
2150
2151         /* Attempt to reap some mmap space from dead objects */
2152         do {
2153                 err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
2154                 if (err)
2155                         break;
2156
2157                 i915_gem_drain_freed_objects(dev_priv);
2158                 err = drm_gem_create_mmap_offset(&obj->base);
2159                 if (!err)
2160                         break;
2161
2162         } while (flush_delayed_work(&dev_priv->gt.retire_work));
2163
2164         return err;
2165 }
2166
2167 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
2168 {
2169         drm_gem_free_mmap_offset(&obj->base);
2170 }
2171
2172 int
2173 i915_gem_mmap_gtt(struct drm_file *file,
2174                   struct drm_device *dev,
2175                   uint32_t handle,
2176                   uint64_t *offset)
2177 {
2178         struct drm_i915_gem_object *obj;
2179         int ret;
2180
2181         obj = i915_gem_object_lookup(file, handle);
2182         if (!obj)
2183                 return -ENOENT;
2184
2185         ret = i915_gem_object_create_mmap_offset(obj);
2186         if (ret == 0)
2187                 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
2188
2189         i915_gem_object_put(obj);
2190         return ret;
2191 }
2192
2193 /**
2194  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
2195  * @dev: DRM device
2196  * @data: GTT mapping ioctl data
2197  * @file: GEM object info
2198  *
2199  * Simply returns the fake offset to userspace so it can mmap it.
2200  * The mmap call will end up in drm_gem_mmap(), which will set things
2201  * up so we can get faults in the handler above.
2202  *
2203  * The fault handler will take care of binding the object into the GTT
2204  * (since it may have been evicted to make room for something), allocating
2205  * a fence register, and mapping the appropriate aperture address into
2206  * userspace.
2207  */
2208 int
2209 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2210                         struct drm_file *file)
2211 {
2212         struct drm_i915_gem_mmap_gtt *args = data;
2213
2214         return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2215 }
2216
2217 /* Immediately discard the backing storage */
2218 static void
2219 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2220 {
2221         i915_gem_object_free_mmap_offset(obj);
2222
2223         if (obj->base.filp == NULL)
2224                 return;
2225
2226         /* Our goal here is to return as much of the memory as
2227          * is possible back to the system as we are called from OOM.
2228          * To do this we must instruct the shmfs to drop all of its
2229          * backing pages, *now*.
2230          */
2231         shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
2232         obj->mm.madv = __I915_MADV_PURGED;
2233         obj->mm.pages = ERR_PTR(-EFAULT);
2234 }
2235
2236 /* Try to discard unwanted pages */
2237 void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
2238 {
2239         struct address_space *mapping;
2240
2241         lockdep_assert_held(&obj->mm.lock);
2242         GEM_BUG_ON(i915_gem_object_has_pages(obj));
2243
2244         switch (obj->mm.madv) {
2245         case I915_MADV_DONTNEED:
2246                 i915_gem_object_truncate(obj);
2247         case __I915_MADV_PURGED:
2248                 return;
2249         }
2250
2251         if (obj->base.filp == NULL)
2252                 return;
2253
2254         mapping = obj->base.filp->f_mapping,
2255         invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2256 }
2257
2258 static void
2259 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
2260                               struct sg_table *pages)
2261 {
2262         struct sgt_iter sgt_iter;
2263         struct page *page;
2264
2265         __i915_gem_object_release_shmem(obj, pages, true);
2266
2267         i915_gem_gtt_finish_pages(obj, pages);
2268
2269         if (i915_gem_object_needs_bit17_swizzle(obj))
2270                 i915_gem_object_save_bit_17_swizzle(obj, pages);
2271
2272         for_each_sgt_page(page, sgt_iter, pages) {
2273                 if (obj->mm.dirty)
2274                         set_page_dirty(page);
2275
2276                 if (obj->mm.madv == I915_MADV_WILLNEED)
2277                         mark_page_accessed(page);
2278
2279                 put_page(page);
2280         }
2281         obj->mm.dirty = false;
2282
2283         sg_free_table(pages);
2284         kfree(pages);
2285 }
2286
2287 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
2288 {
2289         struct radix_tree_iter iter;
2290         void __rcu **slot;
2291
2292         rcu_read_lock();
2293         radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
2294                 radix_tree_delete(&obj->mm.get_page.radix, iter.index);
2295         rcu_read_unlock();
2296 }
2297
2298 void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
2299                                  enum i915_mm_subclass subclass)
2300 {
2301         struct drm_i915_private *i915 = to_i915(obj->base.dev);
2302         struct sg_table *pages;
2303
2304         if (i915_gem_object_has_pinned_pages(obj))
2305                 return;
2306
2307         GEM_BUG_ON(obj->bind_count);
2308         if (!i915_gem_object_has_pages(obj))
2309                 return;
2310
2311         /* May be called by shrinker from within get_pages() (on another bo) */
2312         mutex_lock_nested(&obj->mm.lock, subclass);
2313         if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
2314                 goto unlock;
2315
2316         /* ->put_pages might need to allocate memory for the bit17 swizzle
2317          * array, hence protect them from being reaped by removing them from gtt
2318          * lists early. */
2319         pages = fetch_and_zero(&obj->mm.pages);
2320         GEM_BUG_ON(!pages);
2321
2322         spin_lock(&i915->mm.obj_lock);
2323         list_del(&obj->mm.link);
2324         spin_unlock(&i915->mm.obj_lock);
2325
2326         if (obj->mm.mapping) {
2327                 void *ptr;
2328
2329                 ptr = page_mask_bits(obj->mm.mapping);
2330                 if (is_vmalloc_addr(ptr))
2331                         vunmap(ptr);
2332                 else
2333                         kunmap(kmap_to_page(ptr));
2334
2335                 obj->mm.mapping = NULL;
2336         }
2337
2338         __i915_gem_object_reset_page_iter(obj);
2339
2340         if (!IS_ERR(pages))
2341                 obj->ops->put_pages(obj, pages);
2342
2343         obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
2344
2345 unlock:
2346         mutex_unlock(&obj->mm.lock);
2347 }
2348
2349 static bool i915_sg_trim(struct sg_table *orig_st)
2350 {
2351         struct sg_table new_st;
2352         struct scatterlist *sg, *new_sg;
2353         unsigned int i;
2354
2355         if (orig_st->nents == orig_st->orig_nents)
2356                 return false;
2357
2358         if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
2359                 return false;
2360
2361         new_sg = new_st.sgl;
2362         for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
2363                 sg_set_page(new_sg, sg_page(sg), sg->length, 0);
2364                 /* called before being DMA mapped, no need to copy sg->dma_* */
2365                 new_sg = sg_next(new_sg);
2366         }
2367         GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
2368
2369         sg_free_table(orig_st);
2370
2371         *orig_st = new_st;
2372         return true;
2373 }
2374
2375 static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2376 {
2377         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2378         const unsigned long page_count = obj->base.size / PAGE_SIZE;
2379         unsigned long i;
2380         struct address_space *mapping;
2381         struct sg_table *st;
2382         struct scatterlist *sg;
2383         struct sgt_iter sgt_iter;
2384         struct page *page;
2385         unsigned long last_pfn = 0;     /* suppress gcc warning */
2386         unsigned int max_segment = i915_sg_segment_size();
2387         unsigned int sg_page_sizes;
2388         gfp_t noreclaim;
2389         int ret;
2390
2391         /* Assert that the object is not currently in any GPU domain. As it
2392          * wasn't in the GTT, there shouldn't be any way it could have been in
2393          * a GPU cache
2394          */
2395         GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2396         GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2397
2398         st = kmalloc(sizeof(*st), GFP_KERNEL);
2399         if (st == NULL)
2400                 return -ENOMEM;
2401
2402 rebuild_st:
2403         if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
2404                 kfree(st);
2405                 return -ENOMEM;
2406         }
2407
2408         /* Get the list of pages out of our struct file.  They'll be pinned
2409          * at this point until we release them.
2410          *
2411          * Fail silently without starting the shrinker
2412          */
2413         mapping = obj->base.filp->f_mapping;
2414         noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
2415         noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
2416
2417         sg = st->sgl;
2418         st->nents = 0;
2419         sg_page_sizes = 0;
2420         for (i = 0; i < page_count; i++) {
2421                 const unsigned int shrink[] = {
2422                         I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
2423                         0,
2424                 }, *s = shrink;
2425                 gfp_t gfp = noreclaim;
2426
2427                 do {
2428                         page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2429                         if (likely(!IS_ERR(page)))
2430                                 break;
2431
2432                         if (!*s) {
2433                                 ret = PTR_ERR(page);
2434                                 goto err_sg;
2435                         }
2436
2437                         i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++);
2438                         cond_resched();
2439
2440                         /* We've tried hard to allocate the memory by reaping
2441                          * our own buffer, now let the real VM do its job and
2442                          * go down in flames if truly OOM.
2443                          *
2444                          * However, since graphics tend to be disposable,
2445                          * defer the oom here by reporting the ENOMEM back
2446                          * to userspace.
2447                          */
2448                         if (!*s) {
2449                                 /* reclaim and warn, but no oom */
2450                                 gfp = mapping_gfp_mask(mapping);
2451
2452                                 /* Our bo are always dirty and so we require
2453                                  * kswapd to reclaim our pages (direct reclaim
2454                                  * does not effectively begin pageout of our
2455                                  * buffers on its own). However, direct reclaim
2456                                  * only waits for kswapd when under allocation
2457                                  * congestion. So as a result __GFP_RECLAIM is
2458                                  * unreliable and fails to actually reclaim our
2459                                  * dirty pages -- unless you try over and over
2460                                  * again with !__GFP_NORETRY. However, we still
2461                                  * want to fail this allocation rather than
2462                                  * trigger the out-of-memory killer and for
2463                                  * this we want __GFP_RETRY_MAYFAIL.
2464                                  */
2465                                 gfp |= __GFP_RETRY_MAYFAIL;
2466                         }
2467                 } while (1);
2468
2469                 if (!i ||
2470                     sg->length >= max_segment ||
2471                     page_to_pfn(page) != last_pfn + 1) {
2472                         if (i) {
2473                                 sg_page_sizes |= sg->length;
2474                                 sg = sg_next(sg);
2475                         }
2476                         st->nents++;
2477                         sg_set_page(sg, page, PAGE_SIZE, 0);
2478                 } else {
2479                         sg->length += PAGE_SIZE;
2480                 }
2481                 last_pfn = page_to_pfn(page);
2482
2483                 /* Check that the i965g/gm workaround works. */
2484                 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2485         }
2486         if (sg) { /* loop terminated early; short sg table */
2487                 sg_page_sizes |= sg->length;
2488                 sg_mark_end(sg);
2489         }
2490
2491         /* Trim unused sg entries to avoid wasting memory. */
2492         i915_sg_trim(st);
2493
2494         ret = i915_gem_gtt_prepare_pages(obj, st);
2495         if (ret) {
2496                 /* DMA remapping failed? One possible cause is that
2497                  * it could not reserve enough large entries, asking
2498                  * for PAGE_SIZE chunks instead may be helpful.
2499                  */
2500                 if (max_segment > PAGE_SIZE) {
2501                         for_each_sgt_page(page, sgt_iter, st)
2502                                 put_page(page);
2503                         sg_free_table(st);
2504
2505                         max_segment = PAGE_SIZE;
2506                         goto rebuild_st;
2507                 } else {
2508                         dev_warn(&dev_priv->drm.pdev->dev,
2509                                  "Failed to DMA remap %lu pages\n",
2510                                  page_count);
2511                         goto err_pages;
2512                 }
2513         }
2514
2515         if (i915_gem_object_needs_bit17_swizzle(obj))
2516                 i915_gem_object_do_bit_17_swizzle(obj, st);
2517
2518         __i915_gem_object_set_pages(obj, st, sg_page_sizes);
2519
2520         return 0;
2521
2522 err_sg:
2523         sg_mark_end(sg);
2524 err_pages:
2525         for_each_sgt_page(page, sgt_iter, st)
2526                 put_page(page);
2527         sg_free_table(st);
2528         kfree(st);
2529
2530         /* shmemfs first checks if there is enough memory to allocate the page
2531          * and reports ENOSPC should there be insufficient, along with the usual
2532          * ENOMEM for a genuine allocation failure.
2533          *
2534          * We use ENOSPC in our driver to mean that we have run out of aperture
2535          * space and so want to translate the error from shmemfs back to our
2536          * usual understanding of ENOMEM.
2537          */
2538         if (ret == -ENOSPC)
2539                 ret = -ENOMEM;
2540
2541         return ret;
2542 }
2543
2544 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
2545                                  struct sg_table *pages,
2546                                  unsigned int sg_page_sizes)
2547 {
2548         struct drm_i915_private *i915 = to_i915(obj->base.dev);
2549         unsigned long supported = INTEL_INFO(i915)->page_sizes;
2550         int i;
2551
2552         lockdep_assert_held(&obj->mm.lock);
2553
2554         obj->mm.get_page.sg_pos = pages->sgl;
2555         obj->mm.get_page.sg_idx = 0;
2556
2557         obj->mm.pages = pages;
2558
2559         if (i915_gem_object_is_tiled(obj) &&
2560             i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
2561                 GEM_BUG_ON(obj->mm.quirked);
2562                 __i915_gem_object_pin_pages(obj);
2563                 obj->mm.quirked = true;
2564         }
2565
2566         GEM_BUG_ON(!sg_page_sizes);
2567         obj->mm.page_sizes.phys = sg_page_sizes;
2568
2569         /*
2570          * Calculate the supported page-sizes which fit into the given
2571          * sg_page_sizes. This will give us the page-sizes which we may be able
2572          * to use opportunistically when later inserting into the GTT. For
2573          * example if phys=2G, then in theory we should be able to use 1G, 2M,
2574          * 64K or 4K pages, although in practice this will depend on a number of
2575          * other factors.
2576          */
2577         obj->mm.page_sizes.sg = 0;
2578         for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
2579                 if (obj->mm.page_sizes.phys & ~0u << i)
2580                         obj->mm.page_sizes.sg |= BIT(i);
2581         }
2582         GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
2583
2584         spin_lock(&i915->mm.obj_lock);
2585         list_add(&obj->mm.link, &i915->mm.unbound_list);
2586         spin_unlock(&i915->mm.obj_lock);
2587 }
2588
2589 static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2590 {
2591         int err;
2592
2593         if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
2594                 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2595                 return -EFAULT;
2596         }
2597
2598         err = obj->ops->get_pages(obj);
2599         GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
2600
2601         return err;
2602 }
2603
2604 /* Ensure that the associated pages are gathered from the backing storage
2605  * and pinned into our object. i915_gem_object_pin_pages() may be called
2606  * multiple times before they are released by a single call to
2607  * i915_gem_object_unpin_pages() - once the pages are no longer referenced
2608  * either as a result of memory pressure (reaping pages under the shrinker)
2609  * or as the object is itself released.
2610  */
2611 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2612 {
2613         int err;
2614
2615         err = mutex_lock_interruptible(&obj->mm.lock);
2616         if (err)
2617                 return err;
2618
2619         if (unlikely(!i915_gem_object_has_pages(obj))) {
2620                 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
2621
2622                 err = ____i915_gem_object_get_pages(obj);
2623                 if (err)
2624                         goto unlock;
2625
2626                 smp_mb__before_atomic();
2627         }
2628         atomic_inc(&obj->mm.pages_pin_count);
2629
2630 unlock:
2631         mutex_unlock(&obj->mm.lock);
2632         return err;
2633 }
2634
2635 /* The 'mapping' part of i915_gem_object_pin_map() below */
2636 static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
2637                                  enum i915_map_type type)
2638 {
2639         unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
2640         struct sg_table *sgt = obj->mm.pages;
2641         struct sgt_iter sgt_iter;
2642         struct page *page;
2643         struct page *stack_pages[32];
2644         struct page **pages = stack_pages;
2645         unsigned long i = 0;
2646         pgprot_t pgprot;
2647         void *addr;
2648
2649         /* A single page can always be kmapped */
2650         if (n_pages == 1 && type == I915_MAP_WB)
2651                 return kmap(sg_page(sgt->sgl));
2652
2653         if (n_pages > ARRAY_SIZE(stack_pages)) {
2654                 /* Too big for stack -- allocate temporary array instead */
2655                 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
2656                 if (!pages)
2657                         return NULL;
2658         }
2659
2660         for_each_sgt_page(page, sgt_iter, sgt)
2661                 pages[i++] = page;
2662
2663         /* Check that we have the expected number of pages */
2664         GEM_BUG_ON(i != n_pages);
2665
2666         switch (type) {
2667         default:
2668                 MISSING_CASE(type);
2669                 /* fallthrough to use PAGE_KERNEL anyway */
2670         case I915_MAP_WB:
2671                 pgprot = PAGE_KERNEL;
2672                 break;
2673         case I915_MAP_WC:
2674                 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
2675                 break;
2676         }
2677         addr = vmap(pages, n_pages, 0, pgprot);
2678
2679         if (pages != stack_pages)
2680                 kvfree(pages);
2681
2682         return addr;
2683 }
2684
2685 /* get, pin, and map the pages of the object into kernel space */
2686 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
2687                               enum i915_map_type type)
2688 {
2689         enum i915_map_type has_type;
2690         bool pinned;
2691         void *ptr;
2692         int ret;
2693
2694         if (unlikely(!i915_gem_object_has_struct_page(obj)))
2695                 return ERR_PTR(-ENXIO);
2696
2697         ret = mutex_lock_interruptible(&obj->mm.lock);
2698         if (ret)
2699                 return ERR_PTR(ret);
2700
2701         pinned = !(type & I915_MAP_OVERRIDE);
2702         type &= ~I915_MAP_OVERRIDE;
2703
2704         if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
2705                 if (unlikely(!i915_gem_object_has_pages(obj))) {
2706                         GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
2707
2708                         ret = ____i915_gem_object_get_pages(obj);
2709                         if (ret)
2710                                 goto err_unlock;
2711
2712                         smp_mb__before_atomic();
2713                 }
2714                 atomic_inc(&obj->mm.pages_pin_count);
2715                 pinned = false;
2716         }
2717         GEM_BUG_ON(!i915_gem_object_has_pages(obj));
2718
2719         ptr = page_unpack_bits(obj->mm.mapping, &has_type);
2720         if (ptr && has_type != type) {
2721                 if (pinned) {
2722                         ret = -EBUSY;
2723                         goto err_unpin;
2724                 }
2725
2726                 if (is_vmalloc_addr(ptr))
2727                         vunmap(ptr);
2728                 else
2729                         kunmap(kmap_to_page(ptr));
2730
2731                 ptr = obj->mm.mapping = NULL;
2732         }
2733
2734         if (!ptr) {
2735                 ptr = i915_gem_object_map(obj, type);
2736                 if (!ptr) {
2737                         ret = -ENOMEM;
2738                         goto err_unpin;
2739                 }
2740
2741                 obj->mm.mapping = page_pack_bits(ptr, type);
2742         }
2743
2744 out_unlock:
2745         mutex_unlock(&obj->mm.lock);
2746         return ptr;
2747
2748 err_unpin:
2749         atomic_dec(&obj->mm.pages_pin_count);
2750 err_unlock:
2751         ptr = ERR_PTR(ret);
2752         goto out_unlock;
2753 }
2754
2755 static int
2756 i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
2757                            const struct drm_i915_gem_pwrite *arg)
2758 {
2759         struct address_space *mapping = obj->base.filp->f_mapping;
2760         char __user *user_data = u64_to_user_ptr(arg->data_ptr);
2761         u64 remain, offset;
2762         unsigned int pg;
2763
2764         /* Before we instantiate/pin the backing store for our use, we
2765          * can prepopulate the shmemfs filp efficiently using a write into
2766          * the pagecache. We avoid the penalty of instantiating all the
2767          * pages, important if the user is just writing to a few and never
2768          * uses the object on the GPU, and using a direct write into shmemfs
2769          * allows it to avoid the cost of retrieving a page (either swapin
2770          * or clearing-before-use) before it is overwritten.
2771          */
2772         if (i915_gem_object_has_pages(obj))
2773                 return -ENODEV;
2774
2775         if (obj->mm.madv != I915_MADV_WILLNEED)
2776                 return -EFAULT;
2777
2778         /* Before the pages are instantiated the object is treated as being
2779          * in the CPU domain. The pages will be clflushed as required before
2780          * use, and we can freely write into the pages directly. If userspace
2781          * races pwrite with any other operation; corruption will ensue -
2782          * that is userspace's prerogative!
2783          */
2784
2785         remain = arg->size;
2786         offset = arg->offset;
2787         pg = offset_in_page(offset);
2788
2789         do {
2790                 unsigned int len, unwritten;
2791                 struct page *page;
2792                 void *data, *vaddr;
2793                 int err;
2794
2795                 len = PAGE_SIZE - pg;
2796                 if (len > remain)
2797                         len = remain;
2798
2799                 err = pagecache_write_begin(obj->base.filp, mapping,
2800                                             offset, len, 0,
2801                                             &page, &data);
2802                 if (err < 0)
2803                         return err;
2804
2805                 vaddr = kmap(page);
2806                 unwritten = copy_from_user(vaddr + pg, user_data, len);
2807                 kunmap(page);
2808
2809                 err = pagecache_write_end(obj->base.filp, mapping,
2810                                           offset, len, len - unwritten,
2811                                           page, data);
2812                 if (err < 0)
2813                         return err;
2814
2815                 if (unwritten)
2816                         return -EFAULT;
2817
2818                 remain -= len;
2819                 user_data += len;
2820                 offset += len;
2821                 pg = 0;
2822         } while (remain);
2823
2824         return 0;
2825 }
2826
2827 static bool ban_context(const struct i915_gem_context *ctx,
2828                         unsigned int score)
2829 {
2830         return (i915_gem_context_is_bannable(ctx) &&
2831                 score >= CONTEXT_SCORE_BAN_THRESHOLD);
2832 }
2833
2834 static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
2835 {
2836         unsigned int score;
2837         bool banned;
2838
2839         atomic_inc(&ctx->guilty_count);
2840
2841         score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
2842         banned = ban_context(ctx, score);
2843         DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n",
2844                          ctx->name, score, yesno(banned));
2845         if (!banned)
2846                 return;
2847
2848         i915_gem_context_set_banned(ctx);
2849         if (!IS_ERR_OR_NULL(ctx->file_priv)) {
2850                 atomic_inc(&ctx->file_priv->context_bans);
2851                 DRM_DEBUG_DRIVER("client %s has had %d context banned\n",
2852                                  ctx->name, atomic_read(&ctx->file_priv->context_bans));
2853         }
2854 }
2855
2856 static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
2857 {
2858         atomic_inc(&ctx->active_count);
2859 }
2860
2861 struct drm_i915_gem_request *
2862 i915_gem_find_active_request(struct intel_engine_cs *engine)
2863 {
2864         struct drm_i915_gem_request *request, *active = NULL;
2865         unsigned long flags;
2866
2867         /* We are called by the error capture and reset at a random
2868          * point in time. In particular, note that neither is crucially
2869          * ordered with an interrupt. After a hang, the GPU is dead and we
2870          * assume that no more writes can happen (we waited long enough for
2871          * all writes that were in transaction to be flushed) - adding an
2872          * extra delay for a recent interrupt is pointless. Hence, we do
2873          * not need an engine->irq_seqno_barrier() before the seqno reads.
2874          */
2875         spin_lock_irqsave(&engine->timeline->lock, flags);
2876         list_for_each_entry(request, &engine->timeline->requests, link) {
2877                 if (__i915_gem_request_completed(request,
2878                                                  request->global_seqno))
2879                         continue;
2880
2881                 GEM_BUG_ON(request->engine != engine);
2882                 GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
2883                                     &request->fence.flags));
2884
2885                 active = request;
2886                 break;
2887         }
2888         spin_unlock_irqrestore(&engine->timeline->lock, flags);
2889
2890         return active;
2891 }
2892
2893 static bool engine_stalled(struct intel_engine_cs *engine)
2894 {
2895         if (!engine->hangcheck.stalled)
2896                 return false;
2897
2898         /* Check for possible seqno movement after hang declaration */
2899         if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) {
2900                 DRM_DEBUG_DRIVER("%s pardoned\n", engine->name);
2901                 return false;
2902         }
2903
2904         return true;
2905 }
2906
2907 /*
2908  * Ensure irq handler finishes, and not run again.
2909  * Also return the active request so that we only search for it once.
2910  */
2911 struct drm_i915_gem_request *
2912 i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
2913 {
2914         struct drm_i915_gem_request *request = NULL;
2915
2916         /*
2917          * During the reset sequence, we must prevent the engine from
2918          * entering RC6. As the context state is undefined until we restart
2919          * the engine, if it does enter RC6 during the reset, the state
2920          * written to the powercontext is undefined and so we may lose
2921          * GPU state upon resume, i.e. fail to restart after a reset.
2922          */
2923         intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
2924
2925         /*
2926          * Prevent the signaler thread from updating the request
2927          * state (by calling dma_fence_signal) as we are processing
2928          * the reset. The write from the GPU of the seqno is
2929          * asynchronous and the signaler thread may see a different
2930          * value to us and declare the request complete, even though
2931          * the reset routine have picked that request as the active
2932          * (incomplete) request. This conflict is not handled
2933          * gracefully!
2934          */
2935         kthread_park(engine->breadcrumbs.signaler);
2936
2937         /*
2938          * Prevent request submission to the hardware until we have
2939          * completed the reset in i915_gem_reset_finish(). If a request
2940          * is completed by one engine, it may then queue a request
2941          * to a second via its execlists->tasklet *just* as we are
2942          * calling engine->init_hw() and also writing the ELSP.
2943          * Turning off the execlists->tasklet until the reset is over
2944          * prevents the race.
2945          */
2946         tasklet_kill(&engine->execlists.tasklet);
2947         tasklet_disable(&engine->execlists.tasklet);
2948
2949         /*
2950          * We're using worker to queue preemption requests from the tasklet in
2951          * GuC submission mode.
2952          * Even though tasklet was disabled, we may still have a worker queued.
2953          * Let's make sure that all workers scheduled before disabling the
2954          * tasklet are completed before continuing with the reset.
2955          */
2956         if (engine->i915->guc.preempt_wq)
2957                 flush_workqueue(engine->i915->guc.preempt_wq);
2958
2959         if (engine->irq_seqno_barrier)
2960                 engine->irq_seqno_barrier(engine);
2961
2962         request = i915_gem_find_active_request(engine);
2963         if (request && request->fence.error == -EIO)
2964                 request = ERR_PTR(-EIO); /* Previous reset failed! */
2965
2966         return request;
2967 }
2968
2969 int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
2970 {
2971         struct intel_engine_cs *engine;
2972         struct drm_i915_gem_request *request;
2973         enum intel_engine_id id;
2974         int err = 0;
2975
2976         for_each_engine(engine, dev_priv, id) {
2977                 request = i915_gem_reset_prepare_engine(engine);
2978                 if (IS_ERR(request)) {
2979                         err = PTR_ERR(request);
2980                         continue;
2981                 }
2982
2983                 engine->hangcheck.active_request = request;
2984         }
2985
2986         i915_gem_revoke_fences(dev_priv);
2987
2988         return err;
2989 }
2990
2991 static void skip_request(struct drm_i915_gem_request *request)
2992 {
2993         void *vaddr = request->ring->vaddr;
2994         u32 head;
2995
2996         /* As this request likely depends on state from the lost
2997          * context, clear out all the user operations leaving the
2998          * breadcrumb at the end (so we get the fence notifications).
2999          */
3000         head = request->head;
3001         if (request->postfix < head) {
3002                 memset(vaddr + head, 0, request->ring->size - head);
3003                 head = 0;
3004         }
3005         memset(vaddr + head, 0, request->postfix - head);
3006
3007         dma_fence_set_error(&request->fence, -EIO);
3008 }
3009
3010 static void engine_skip_context(struct drm_i915_gem_request *request)
3011 {
3012         struct intel_engine_cs *engine = request->engine;
3013         struct i915_gem_context *hung_ctx = request->ctx;
3014         struct intel_timeline *timeline;
3015         unsigned long flags;
3016
3017         timeline = i915_gem_context_lookup_timeline(hung_ctx, engine);
3018
3019         spin_lock_irqsave(&engine->timeline->lock, flags);
3020         spin_lock(&timeline->lock);
3021
3022         list_for_each_entry_continue(request, &engine->timeline->requests, link)
3023                 if (request->ctx == hung_ctx)
3024                         skip_request(request);
3025
3026         list_for_each_entry(request, &timeline->requests, link)
3027                 skip_request(request);
3028
3029         spin_unlock(&timeline->lock);
3030         spin_unlock_irqrestore(&engine->timeline->lock, flags);
3031 }
3032
3033 /* Returns the request if it was guilty of the hang */
3034 static struct drm_i915_gem_request *
3035 i915_gem_reset_request(struct intel_engine_cs *engine,
3036                        struct drm_i915_gem_request *request)
3037 {
3038         /* The guilty request will get skipped on a hung engine.
3039          *
3040          * Users of client default contexts do not rely on logical
3041          * state preserved between batches so it is safe to execute
3042          * queued requests following the hang. Non default contexts
3043          * rely on preserved state, so skipping a batch loses the
3044          * evolution of the state and it needs to be considered corrupted.
3045          * Executing more queued batches on top of corrupted state is
3046          * risky. But we take the risk by trying to advance through
3047          * the queued requests in order to make the client behaviour
3048          * more predictable around resets, by not throwing away random
3049          * amount of batches it has prepared for execution. Sophisticated
3050          * clients can use gem_reset_stats_ioctl and dma fence status
3051          * (exported via sync_file info ioctl on explicit fences) to observe
3052          * when it loses the context state and should rebuild accordingly.
3053          *
3054          * The context ban, and ultimately the client ban, mechanism are safety
3055          * valves if client submission ends up resulting in nothing more than
3056          * subsequent hangs.
3057          */
3058
3059         if (engine_stalled(engine)) {
3060                 i915_gem_context_mark_guilty(request->ctx);
3061                 skip_request(request);
3062
3063                 /* If this context is now banned, skip all pending requests. */
3064                 if (i915_gem_context_is_banned(request->ctx))
3065                         engine_skip_context(request);
3066         } else {
3067                 /*
3068                  * Since this is not the hung engine, it may have advanced
3069                  * since the hang declaration. Double check by refinding
3070                  * the active request at the time of the reset.
3071                  */
3072                 request = i915_gem_find_active_request(engine);
3073                 if (request) {
3074                         i915_gem_context_mark_innocent(request->ctx);
3075                         dma_fence_set_error(&request->fence, -EAGAIN);
3076
3077                         /* Rewind the engine to replay the incomplete rq */
3078                         spin_lock_irq(&engine->timeline->lock);
3079                         request = list_prev_entry(request, link);
3080                         if (&request->link == &engine->timeline->requests)
3081                                 request = NULL;
3082                         spin_unlock_irq(&engine->timeline->lock);
3083                 }
3084         }
3085
3086         return request;
3087 }
3088
3089 void i915_gem_reset_engine(struct intel_engine_cs *engine,
3090                            struct drm_i915_gem_request *request)
3091 {
3092         /*
3093          * Make sure this write is visible before we re-enable the interrupt
3094          * handlers on another CPU, as tasklet_enable() resolves to just
3095          * a compiler barrier which is insufficient for our purpose here.
3096          */
3097         smp_store_mb(engine->irq_posted, 0);
3098
3099         if (request)
3100                 request = i915_gem_reset_request(engine, request);
3101
3102         if (request) {
3103                 DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
3104                                  engine->name, request->global_seqno);
3105         }
3106
3107         /* Setup the CS to resume from the breadcrumb of the hung request */
3108         engine->reset_hw(engine, request);
3109 }
3110
3111 void i915_gem_reset(struct drm_i915_private *dev_priv)
3112 {
3113         struct intel_engine_cs *engine;
3114         enum intel_engine_id id;
3115
3116         lockdep_assert_held(&dev_priv->drm.struct_mutex);
3117
3118         i915_gem_retire_requests(dev_priv);
3119
3120         for_each_engine(engine, dev_priv, id) {
3121                 struct i915_gem_context *ctx;
3122
3123                 i915_gem_reset_engine(engine, engine->hangcheck.active_request);
3124                 ctx = fetch_and_zero(&engine->last_retired_context);
3125                 if (ctx)
3126                         engine->context_unpin(engine, ctx);
3127
3128                 /*
3129                  * Ostensibily, we always want a context loaded for powersaving,
3130                  * so if the engine is idle after the reset, send a request
3131                  * to load our scratch kernel_context.
3132                  *
3133                  * More mysteriously, if we leave the engine idle after a reset,
3134                  * the next userspace batch may hang, with what appears to be
3135                  * an incoherent read by the CS (presumably stale TLB). An
3136                  * empty request appears sufficient to paper over the glitch.
3137                  */
3138                 if (list_empty(&engine->timeline->requests)) {
3139                         struct drm_i915_gem_request *rq;
3140
3141                         rq = i915_gem_request_alloc(engine,
3142                                                     dev_priv->kernel_context);
3143                         if (!IS_ERR(rq))
3144                                 __i915_add_request(rq, false);
3145                 }
3146         }
3147
3148         i915_gem_restore_fences(dev_priv);
3149
3150         if (dev_priv->gt.awake) {
3151                 intel_sanitize_gt_powersave(dev_priv);
3152                 intel_enable_gt_powersave(dev_priv);
3153                 if (INTEL_GEN(dev_priv) >= 6)
3154                         gen6_rps_busy(dev_priv);
3155         }
3156 }
3157
3158 void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
3159 {
3160         tasklet_enable(&engine->execlists.tasklet);
3161         kthread_unpark(engine->breadcrumbs.signaler);
3162
3163         intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
3164 }
3165
3166 void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
3167 {
3168         struct intel_engine_cs *engine;
3169         enum intel_engine_id id;
3170
3171         lockdep_assert_held(&dev_priv->drm.struct_mutex);
3172
3173         for_each_engine(engine, dev_priv, id) {
3174                 engine->hangcheck.active_request = NULL;
3175                 i915_gem_reset_finish_engine(engine);
3176         }
3177 }
3178
3179 static void nop_submit_request(struct drm_i915_gem_request *request)
3180 {
3181         dma_fence_set_error(&request->fence, -EIO);
3182
3183         i915_gem_request_submit(request);
3184 }
3185
3186 static void nop_complete_submit_request(struct drm_i915_gem_request *request)
3187 {
3188         unsigned long flags;
3189
3190         dma_fence_set_error(&request->fence, -EIO);
3191
3192         spin_lock_irqsave(&request->engine->timeline->lock, flags);
3193         __i915_gem_request_submit(request);
3194         intel_engine_init_global_seqno(request->engine, request->global_seqno);
3195         spin_unlock_irqrestore(&request->engine->timeline->lock, flags);
3196 }
3197
3198 void i915_gem_set_wedged(struct drm_i915_private *i915)
3199 {
3200         struct intel_engine_cs *engine;
3201         enum intel_engine_id id;
3202
3203         /*
3204          * First, stop submission to hw, but do not yet complete requests by
3205          * rolling the global seqno forward (since this would complete requests
3206          * for which we haven't set the fence error to EIO yet).
3207          */
3208         for_each_engine(engine, i915, id)
3209                 engine->submit_request = nop_submit_request;
3210
3211         /*
3212          * Make sure no one is running the old callback before we proceed with
3213          * cancelling requests and resetting the completion tracking. Otherwise
3214          * we might submit a request to the hardware which never completes.
3215          */
3216         synchronize_rcu();
3217
3218         for_each_engine(engine, i915, id) {
3219                 /* Mark all executing requests as skipped */
3220                 engine->cancel_requests(engine);
3221
3222                 /*
3223                  * Only once we've force-cancelled all in-flight requests can we
3224                  * start to complete all requests.
3225                  */
3226                 engine->submit_request = nop_complete_submit_request;
3227         }
3228
3229         /*
3230          * Make sure no request can slip through without getting completed by
3231          * either this call here to intel_engine_init_global_seqno, or the one
3232          * in nop_complete_submit_request.
3233          */
3234         synchronize_rcu();
3235
3236         for_each_engine(engine, i915, id) {
3237                 unsigned long flags;
3238
3239                 /* Mark all pending requests as complete so that any concurrent
3240                  * (lockless) lookup doesn't try and wait upon the request as we
3241                  * reset it.
3242                  */
3243                 spin_lock_irqsave(&engine->timeline->lock, flags);
3244                 intel_engine_init_global_seqno(engine,
3245                                                intel_engine_last_submit(engine));
3246                 spin_unlock_irqrestore(&engine->timeline->lock, flags);
3247         }
3248
3249         set_bit(I915_WEDGED, &i915->gpu_error.flags);
3250         wake_up_all(&i915->gpu_error.reset_queue);
3251 }
3252
3253 bool i915_gem_unset_wedged(struct drm_i915_private *i915)
3254 {
3255         struct i915_gem_timeline *tl;
3256         int i;
3257
3258         lockdep_assert_held(&i915->drm.struct_mutex);
3259         if (!test_bit(I915_WEDGED, &i915->gpu_error.flags))
3260                 return true;
3261
3262         /* Before unwedging, make sure that all pending operations
3263          * are flushed and errored out - we may have requests waiting upon
3264          * third party fences. We marked all inflight requests as EIO, and
3265          * every execbuf since returned EIO, for consistency we want all
3266          * the currently pending requests to also be marked as EIO, which
3267          * is done inside our nop_submit_request - and so we must wait.
3268          *
3269          * No more can be submitted until we reset the wedged bit.
3270          */
3271         list_for_each_entry(tl, &i915->gt.timelines, link) {
3272                 for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
3273                         struct drm_i915_gem_request *rq;
3274
3275                         rq = i915_gem_active_peek(&tl->engine[i].last_request,
3276                                                   &i915->drm.struct_mutex);
3277                         if (!rq)
3278                                 continue;
3279
3280                         /* We can't use our normal waiter as we want to
3281                          * avoid recursively trying to handle the current
3282                          * reset. The basic dma_fence_default_wait() installs
3283                          * a callback for dma_fence_signal(), which is
3284                          * triggered by our nop handler (indirectly, the
3285                          * callback enables the signaler thread which is
3286                          * woken by the nop_submit_request() advancing the seqno
3287                          * and when the seqno passes the fence, the signaler
3288                          * then signals the fence waking us up).
3289                          */
3290                         if (dma_fence_default_wait(&rq->fence, true,
3291                                                    MAX_SCHEDULE_TIMEOUT) < 0)
3292                                 return false;
3293                 }
3294         }
3295
3296         /* Undo nop_submit_request. We prevent all new i915 requests from
3297          * being queued (by disallowing execbuf whilst wedged) so having
3298          * waited for all active requests above, we know the system is idle
3299          * and do not have to worry about a thread being inside
3300          * engine->submit_request() as we swap over. So unlike installing
3301          * the nop_submit_request on reset, we can do this from normal
3302          * context and do not require stop_machine().
3303          */
3304         intel_engines_reset_default_submission(i915);
3305         i915_gem_contexts_lost(i915);
3306
3307         smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
3308         clear_bit(I915_WEDGED, &i915->gpu_error.flags);
3309
3310         return true;
3311 }
3312
3313 static void
3314 i915_gem_retire_work_handler(struct work_struct *work)
3315 {
3316         struct drm_i915_private *dev_priv =
3317                 container_of(work, typeof(*dev_priv), gt.retire_work.work);
3318         struct drm_device *dev = &dev_priv->drm;
3319
3320         /* Come back later if the device is busy... */
3321         if (mutex_trylock(&dev->struct_mutex)) {
3322                 i915_gem_retire_requests(dev_priv);
3323                 mutex_unlock(&dev->struct_mutex);
3324         }
3325
3326         /* Keep the retire handler running until we are finally idle.
3327          * We do not need to do this test under locking as in the worst-case
3328          * we queue the retire worker once too often.
3329          */
3330         if (READ_ONCE(dev_priv->gt.awake)) {
3331                 i915_queue_hangcheck(dev_priv);
3332                 queue_delayed_work(dev_priv->wq,
3333                                    &dev_priv->gt.retire_work,
3334                                    round_jiffies_up_relative(HZ));
3335         }
3336 }
3337
3338 static inline bool
3339 new_requests_since_last_retire(const struct drm_i915_private *i915)
3340 {
3341         return (READ_ONCE(i915->gt.active_requests) ||
3342                 work_pending(&i915->gt.idle_work.work));
3343 }
3344
3345 static void
3346 i915_gem_idle_work_handler(struct work_struct *work)
3347 {
3348         struct drm_i915_private *dev_priv =
3349                 container_of(work, typeof(*dev_priv), gt.idle_work.work);
3350         bool rearm_hangcheck;
3351         ktime_t end;
3352
3353         if (!READ_ONCE(dev_priv->gt.awake))
3354                 return;
3355
3356         /*
3357          * Wait for last execlists context complete, but bail out in case a
3358          * new request is submitted.
3359          */
3360         end = ktime_add_ms(ktime_get(), I915_IDLE_ENGINES_TIMEOUT);
3361         do {
3362                 if (new_requests_since_last_retire(dev_priv))
3363                         return;
3364
3365                 if (intel_engines_are_idle(dev_priv))
3366                         break;
3367
3368                 usleep_range(100, 500);
3369         } while (ktime_before(ktime_get(), end));
3370
3371         rearm_hangcheck =
3372                 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
3373
3374         if (!mutex_trylock(&dev_priv->drm.struct_mutex)) {
3375                 /* Currently busy, come back later */
3376                 mod_delayed_work(dev_priv->wq,
3377                                  &dev_priv->gt.idle_work,
3378                                  msecs_to_jiffies(50));
3379                 goto out_rearm;
3380         }
3381
3382         /*
3383          * New request retired after this work handler started, extend active
3384          * period until next instance of the work.
3385          */
3386         if (new_requests_since_last_retire(dev_priv))
3387                 goto out_unlock;
3388
3389         /*
3390          * Be paranoid and flush a concurrent interrupt to make sure
3391          * we don't reactivate any irq tasklets after parking.
3392          *
3393          * FIXME: Note that even though we have waited for execlists to be idle,
3394          * there may still be an in-flight interrupt even though the CSB
3395          * is now empty. synchronize_irq() makes sure that a residual interrupt
3396          * is completed before we continue, but it doesn't prevent the HW from
3397          * raising a spurious interrupt later. To complete the shield we should
3398          * coordinate disabling the CS irq with flushing the interrupts.
3399          */
3400         synchronize_irq(dev_priv->drm.irq);
3401
3402         intel_engines_park(dev_priv);
3403         i915_gem_timelines_park(dev_priv);
3404
3405         i915_pmu_gt_parked(dev_priv);
3406
3407         GEM_BUG_ON(!dev_priv->gt.awake);
3408         dev_priv->gt.awake = false;
3409         rearm_hangcheck = false;
3410
3411         if (INTEL_GEN(dev_priv) >= 6)
3412                 gen6_rps_idle(dev_priv);
3413
3414         intel_display_power_put(dev_priv, POWER_DOMAIN_GT_IRQ);
3415
3416         intel_runtime_pm_put(dev_priv);
3417 out_unlock:
3418         mutex_unlock(&dev_priv->drm.struct_mutex);
3419
3420 out_rearm:
3421         if (rearm_hangcheck) {
3422                 GEM_BUG_ON(!dev_priv->gt.awake);
3423                 i915_queue_hangcheck(dev_priv);
3424         }
3425 }
3426
3427 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
3428 {
3429         struct drm_i915_private *i915 = to_i915(gem->dev);
3430         struct drm_i915_gem_object *obj = to_intel_bo(gem);
3431         struct drm_i915_file_private *fpriv = file->driver_priv;
3432         struct i915_lut_handle *lut, *ln;
3433
3434         mutex_lock(&i915->drm.struct_mutex);
3435
3436         list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
3437                 struct i915_gem_context *ctx = lut->ctx;
3438                 struct i915_vma *vma;
3439
3440                 GEM_BUG_ON(ctx->file_priv == ERR_PTR(-EBADF));
3441                 if (ctx->file_priv != fpriv)
3442                         continue;
3443
3444                 vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
3445                 GEM_BUG_ON(vma->obj != obj);
3446
3447                 /* We allow the process to have multiple handles to the same
3448                  * vma, in the same fd namespace, by virtue of flink/open.
3449                  */
3450                 GEM_BUG_ON(!vma->open_count);
3451                 if (!--vma->open_count && !i915_vma_is_ggtt(vma))
3452                         i915_vma_close(vma);
3453
3454                 list_del(&lut->obj_link);
3455                 list_del(&lut->ctx_link);
3456
3457                 kmem_cache_free(i915->luts, lut);
3458                 __i915_gem_object_release_unless_active(obj);
3459         }
3460
3461         mutex_unlock(&i915->drm.struct_mutex);
3462 }
3463
3464 static unsigned long to_wait_timeout(s64 timeout_ns)
3465 {
3466         if (timeout_ns < 0)
3467                 return MAX_SCHEDULE_TIMEOUT;
3468
3469         if (timeout_ns == 0)
3470                 return 0;
3471
3472         return nsecs_to_jiffies_timeout(timeout_ns);
3473 }
3474
3475 /**
3476  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
3477  * @dev: drm device pointer
3478  * @data: ioctl data blob
3479  * @file: drm file pointer
3480  *
3481  * Returns 0 if successful, else an error is returned with the remaining time in
3482  * the timeout parameter.
3483  *  -ETIME: object is still busy after timeout
3484  *  -ERESTARTSYS: signal interrupted the wait
3485  *  -ENONENT: object doesn't exist
3486  * Also possible, but rare:
3487  *  -EAGAIN: incomplete, restart syscall
3488  *  -ENOMEM: damn
3489  *  -ENODEV: Internal IRQ fail
3490  *  -E?: The add request failed
3491  *
3492  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
3493  * non-zero timeout parameter the wait ioctl will wait for the given number of
3494  * nanoseconds on an object becoming unbusy. Since the wait itself does so
3495  * without holding struct_mutex the object may become re-busied before this
3496  * function completes. A similar but shorter * race condition exists in the busy
3497  * ioctl
3498  */
3499 int
3500 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3501 {
3502         struct drm_i915_gem_wait *args = data;
3503         struct drm_i915_gem_object *obj;
3504         ktime_t start;
3505         long ret;
3506
3507         if (args->flags != 0)
3508                 return -EINVAL;
3509
3510         obj = i915_gem_object_lookup(file, args->bo_handle);
3511         if (!obj)
3512                 return -ENOENT;
3513
3514         start = ktime_get();
3515
3516         ret = i915_gem_object_wait(obj,
3517                                    I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL,
3518                                    to_wait_timeout(args->timeout_ns),
3519                                    to_rps_client(file));
3520
3521         if (args->timeout_ns > 0) {
3522                 args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
3523                 if (args->timeout_ns < 0)
3524                         args->timeout_ns = 0;
3525
3526                 /*
3527                  * Apparently ktime isn't accurate enough and occasionally has a
3528                  * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
3529                  * things up to make the test happy. We allow up to 1 jiffy.
3530                  *
3531                  * This is a regression from the timespec->ktime conversion.
3532                  */
3533                 if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
3534                         args->timeout_ns = 0;
3535
3536                 /* Asked to wait beyond the jiffie/scheduler precision? */
3537                 if (ret == -ETIME && args->timeout_ns)
3538                         ret = -EAGAIN;
3539         }
3540
3541         i915_gem_object_put(obj);
3542         return ret;
3543 }
3544
3545 static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
3546 {
3547         int ret, i;
3548
3549         for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
3550                 ret = i915_gem_active_wait(&tl->engine[i].last_request, flags);
3551                 if (ret)
3552                         return ret;
3553         }
3554
3555         return 0;
3556 }
3557
3558 static int wait_for_engines(struct drm_i915_private *i915)
3559 {
3560         if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
3561                 dev_err(i915->drm.dev,
3562                         "Failed to idle engines, declaring wedged!\n");
3563                 if (drm_debug & DRM_UT_DRIVER) {
3564                         struct drm_printer p = drm_debug_printer(__func__);
3565                         struct intel_engine_cs *engine;
3566                         enum intel_engine_id id;
3567
3568                         for_each_engine(engine, i915, id)
3569                                 intel_engine_dump(engine, &p,
3570                                                   "%s", engine->name);
3571                 }
3572
3573                 i915_gem_set_wedged(i915);
3574                 return -EIO;
3575         }
3576
3577         return 0;
3578 }
3579
3580 int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
3581 {
3582         int ret;
3583
3584         /* If the device is asleep, we have no requests outstanding */
3585         if (!READ_ONCE(i915->gt.awake))
3586                 return 0;
3587
3588         if (flags & I915_WAIT_LOCKED) {
3589                 struct i915_gem_timeline *tl;
3590
3591                 lockdep_assert_held(&i915->drm.struct_mutex);
3592
3593                 list_for_each_entry(tl, &i915->gt.timelines, link) {
3594                         ret = wait_for_timeline(tl, flags);
3595                         if (ret)
3596                                 return ret;
3597                 }
3598                 i915_gem_retire_requests(i915);
3599
3600                 ret = wait_for_engines(i915);
3601         } else {
3602                 ret = wait_for_timeline(&i915->gt.global_timeline, flags);
3603         }
3604
3605         return ret;
3606 }
3607
3608 static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
3609 {
3610         /*
3611          * We manually flush the CPU domain so that we can override and
3612          * force the flush for the display, and perform it asyncrhonously.
3613          */
3614         flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
3615         if (obj->cache_dirty)
3616                 i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
3617         obj->base.write_domain = 0;
3618 }
3619
3620 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
3621 {
3622         if (!READ_ONCE(obj->pin_global))
3623                 return;
3624
3625         mutex_lock(&obj->base.dev->struct_mutex);
3626         __i915_gem_object_flush_for_display(obj);
3627         mutex_unlock(&obj->base.dev->struct_mutex);
3628 }
3629
3630 /**
3631  * Moves a single object to the WC read, and possibly write domain.
3632  * @obj: object to act on
3633  * @write: ask for write access or read only
3634  *
3635  * This function returns when the move is complete, including waiting on
3636  * flushes to occur.
3637  */
3638 int
3639 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
3640 {
3641         int ret;
3642
3643         lockdep_assert_held(&obj->base.dev->struct_mutex);
3644
3645         ret = i915_gem_object_wait(obj,
3646                                    I915_WAIT_INTERRUPTIBLE |
3647                                    I915_WAIT_LOCKED |
3648                                    (write ? I915_WAIT_ALL : 0),
3649                                    MAX_SCHEDULE_TIMEOUT,
3650                                    NULL);
3651         if (ret)
3652                 return ret;
3653
3654         if (obj->base.write_domain == I915_GEM_DOMAIN_WC)
3655                 return 0;
3656
3657         /* Flush and acquire obj->pages so that we are coherent through
3658          * direct access in memory with previous cached writes through
3659          * shmemfs and that our cache domain tracking remains valid.
3660          * For example, if the obj->filp was moved to swap without us
3661          * being notified and releasing the pages, we would mistakenly
3662          * continue to assume that the obj remained out of the CPU cached
3663          * domain.
3664          */
3665         ret = i915_gem_object_pin_pages(obj);
3666         if (ret)
3667                 return ret;
3668
3669         flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
3670
3671         /* Serialise direct access to this object with the barriers for
3672          * coherent writes from the GPU, by effectively invalidating the
3673          * WC domain upon first access.
3674          */
3675         if ((obj->base.read_domains & I915_GEM_DOMAIN_WC) == 0)
3676                 mb();
3677
3678         /* It should now be out of any other write domains, and we can update
3679          * the domain values for our changes.
3680          */
3681         GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_WC) != 0);
3682         obj->base.read_domains |= I915_GEM_DOMAIN_WC;
3683         if (write) {
3684                 obj->base.read_domains = I915_GEM_DOMAIN_WC;
3685                 obj->base.write_domain = I915_GEM_DOMAIN_WC;
3686                 obj->mm.dirty = true;
3687         }
3688
3689         i915_gem_object_unpin_pages(obj);
3690         return 0;
3691 }
3692
3693 /**
3694  * Moves a single object to the GTT read, and possibly write domain.
3695  * @obj: object to act on
3696  * @write: ask for write access or read only
3697  *
3698  * This function returns when the move is complete, including waiting on
3699  * flushes to occur.
3700  */
3701 int
3702 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3703 {
3704         int ret;
3705
3706         lockdep_assert_held(&obj->base.dev->struct_mutex);
3707
3708         ret = i915_gem_object_wait(obj,
3709                                    I915_WAIT_INTERRUPTIBLE |
3710                                    I915_WAIT_LOCKED |
3711                                    (write ? I915_WAIT_ALL : 0),
3712                                    MAX_SCHEDULE_TIMEOUT,
3713                                    NULL);
3714         if (ret)
3715                 return ret;
3716
3717         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3718                 return 0;
3719
3720         /* Flush and acquire obj->pages so that we are coherent through
3721          * direct access in memory with previous cached writes through
3722          * shmemfs and that our cache domain tracking remains valid.
3723          * For example, if the obj->filp was moved to swap without us
3724          * being notified and releasing the pages, we would mistakenly
3725          * continue to assume that the obj remained out of the CPU cached
3726          * domain.
3727          */
3728         ret = i915_gem_object_pin_pages(obj);
3729         if (ret)
3730                 return ret;
3731
3732         flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
3733
3734         /* Serialise direct access to this object with the barriers for
3735          * coherent writes from the GPU, by effectively invalidating the
3736          * GTT domain upon first access.
3737          */
3738         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3739                 mb();
3740
3741         /* It should now be out of any other write domains, and we can update
3742          * the domain values for our changes.
3743          */
3744         GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3745         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3746         if (write) {
3747                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3748                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3749                 obj->mm.dirty = true;
3750         }
3751
3752         i915_gem_object_unpin_pages(obj);
3753         return 0;
3754 }
3755
3756 /**
3757  * Changes the cache-level of an object across all VMA.
3758  * @obj: object to act on
3759  * @cache_level: new cache level to set for the object
3760  *
3761  * After this function returns, the object will be in the new cache-level
3762  * across all GTT and the contents of the backing storage will be coherent,
3763  * with respect to the new cache-level. In order to keep the backing storage
3764  * coherent for all users, we only allow a single cache level to be set
3765  * globally on the object and prevent it from being changed whilst the
3766  * hardware is reading from the object. That is if the object is currently
3767  * on the scanout it will be set to uncached (or equivalent display
3768  * cache coherency) and all non-MOCS GPU access will also be uncached so
3769  * that all direct access to the scanout remains coherent.
3770  */
3771 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3772                                     enum i915_cache_level cache_level)
3773 {
3774         struct i915_vma *vma;
3775         int ret;
3776
3777         lockdep_assert_held(&obj->base.dev->struct_mutex);
3778
3779         if (obj->cache_level == cache_level)
3780                 return 0;
3781
3782         /* Inspect the list of currently bound VMA and unbind any that would
3783          * be invalid given the new cache-level. This is principally to
3784          * catch the issue of the CS prefetch crossing page boundaries and
3785          * reading an invalid PTE on older architectures.
3786          */
3787 restart:
3788         list_for_each_entry(vma, &obj->vma_list, obj_link) {
3789                 if (!drm_mm_node_allocated(&vma->node))
3790                         continue;
3791
3792                 if (i915_vma_is_pinned(vma)) {
3793                         DRM_DEBUG("can not change the cache level of pinned objects\n");
3794                         return -EBUSY;
3795                 }
3796
3797                 if (!i915_vma_is_closed(vma) &&
3798                     i915_gem_valid_gtt_space(vma, cache_level))
3799                         continue;
3800
3801                 ret = i915_vma_unbind(vma);
3802                 if (ret)
3803                         return ret;
3804
3805                 /* As unbinding may affect other elements in the
3806                  * obj->vma_list (due to side-effects from retiring
3807                  * an active vma), play safe and restart the iterator.
3808                  */
3809                 goto restart;
3810         }
3811
3812         /* We can reuse the existing drm_mm nodes but need to change the
3813          * cache-level on the PTE. We could simply unbind them all and
3814          * rebind with the correct cache-level on next use. However since
3815          * we already have a valid slot, dma mapping, pages etc, we may as
3816          * rewrite the PTE in the belief that doing so tramples upon less
3817          * state and so involves less work.
3818          */
3819         if (obj->bind_count) {
3820                 /* Before we change the PTE, the GPU must not be accessing it.
3821                  * If we wait upon the object, we know that all the bound
3822                  * VMA are no longer active.
3823                  */
3824                 ret = i915_gem_object_wait(obj,
3825                                            I915_WAIT_INTERRUPTIBLE |
3826                                            I915_WAIT_LOCKED |
3827                                            I915_WAIT_ALL,
3828                                            MAX_SCHEDULE_TIMEOUT,
3829                                            NULL);
3830                 if (ret)
3831                         return ret;
3832
3833                 if (!HAS_LLC(to_i915(obj->base.dev)) &&
3834                     cache_level != I915_CACHE_NONE) {
3835                         /* Access to snoopable pages through the GTT is
3836                          * incoherent and on some machines causes a hard
3837                          * lockup. Relinquish the CPU mmaping to force
3838                          * userspace to refault in the pages and we can
3839                          * then double check if the GTT mapping is still
3840                          * valid for that pointer access.
3841                          */
3842                         i915_gem_release_mmap(obj);
3843
3844                         /* As we no longer need a fence for GTT access,
3845                          * we can relinquish it now (and so prevent having
3846                          * to steal a fence from someone else on the next
3847                          * fence request). Note GPU activity would have
3848                          * dropped the fence as all snoopable access is
3849                          * supposed to be linear.
3850                          */
3851                         for_each_ggtt_vma(vma, obj) {
3852                                 ret = i915_vma_put_fence(vma);
3853                                 if (ret)
3854                                         return ret;
3855                         }
3856                 } else {
3857                         /* We either have incoherent backing store and
3858                          * so no GTT access or the architecture is fully
3859                          * coherent. In such cases, existing GTT mmaps
3860                          * ignore the cache bit in the PTE and we can
3861                          * rewrite it without confusing the GPU or having
3862                          * to force userspace to fault back in its mmaps.
3863                          */
3864                 }
3865
3866                 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3867                         if (!drm_mm_node_allocated(&vma->node))
3868                                 continue;
3869
3870                         ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
3871                         if (ret)
3872                                 return ret;
3873                 }
3874         }
3875
3876         list_for_each_entry(vma, &obj->vma_list, obj_link)
3877                 vma->node.color = cache_level;
3878         i915_gem_object_set_cache_coherency(obj, cache_level);
3879         obj->cache_dirty = true; /* Always invalidate stale cachelines */
3880
3881         return 0;
3882 }
3883
3884 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3885                                struct drm_file *file)
3886 {
3887         struct drm_i915_gem_caching *args = data;
3888         struct drm_i915_gem_object *obj;
3889         int err = 0;
3890
3891         rcu_read_lock();
3892         obj = i915_gem_object_lookup_rcu(file, args->handle);
3893         if (!obj) {
3894                 err = -ENOENT;
3895                 goto out;
3896         }
3897
3898         switch (obj->cache_level) {
3899         case I915_CACHE_LLC:
3900         case I915_CACHE_L3_LLC:
3901                 args->caching = I915_CACHING_CACHED;
3902                 break;
3903
3904         case I915_CACHE_WT:
3905                 args->caching = I915_CACHING_DISPLAY;
3906                 break;
3907
3908         default:
3909                 args->caching = I915_CACHING_NONE;
3910                 break;
3911         }
3912 out:
3913         rcu_read_unlock();
3914         return err;
3915 }
3916
3917 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3918                                struct drm_file *file)
3919 {
3920         struct drm_i915_private *i915 = to_i915(dev);
3921         struct drm_i915_gem_caching *args = data;
3922         struct drm_i915_gem_object *obj;
3923         enum i915_cache_level level;
3924         int ret = 0;
3925
3926         switch (args->caching) {
3927         case I915_CACHING_NONE:
3928                 level = I915_CACHE_NONE;
3929                 break;
3930         case I915_CACHING_CACHED:
3931                 /*
3932                  * Due to a HW issue on BXT A stepping, GPU stores via a
3933                  * snooped mapping may leave stale data in a corresponding CPU
3934                  * cacheline, whereas normally such cachelines would get
3935                  * invalidated.
3936                  */
3937                 if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
3938                         return -ENODEV;
3939
3940                 level = I915_CACHE_LLC;
3941                 break;
3942         case I915_CACHING_DISPLAY:
3943                 level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
3944                 break;
3945         default:
3946                 return -EINVAL;
3947         }
3948
3949         obj = i915_gem_object_lookup(file, args->handle);
3950         if (!obj)
3951                 return -ENOENT;
3952
3953         /*
3954          * The caching mode of proxy object is handled by its generator, and
3955          * not allowed to be changed by userspace.
3956          */
3957         if (i915_gem_object_is_proxy(obj)) {
3958                 ret = -ENXIO;
3959                 goto out;
3960         }
3961
3962         if (obj->cache_level == level)
3963                 goto out;
3964
3965         ret = i915_gem_object_wait(obj,
3966                                    I915_WAIT_INTERRUPTIBLE,
3967                                    MAX_SCHEDULE_TIMEOUT,
3968                                    to_rps_client(file));
3969         if (ret)
3970                 goto out;
3971
3972         ret = i915_mutex_lock_interruptible(dev);
3973         if (ret)
3974                 goto out;
3975
3976         ret = i915_gem_object_set_cache_level(obj, level);
3977         mutex_unlock(&dev->struct_mutex);
3978
3979 out:
3980         i915_gem_object_put(obj);
3981         return ret;
3982 }
3983
3984 /*
3985  * Prepare buffer for display plane (scanout, cursors, etc).
3986  * Can be called from an uninterruptible phase (modesetting) and allows
3987  * any flushes to be pipelined (for pageflips).
3988  */
3989 struct i915_vma *
3990 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3991                                      u32 alignment,
3992                                      const struct i915_ggtt_view *view)
3993 {
3994         struct i915_vma *vma;
3995         int ret;
3996
3997         lockdep_assert_held(&obj->base.dev->struct_mutex);
3998
3999         /* Mark the global pin early so that we account for the
4000          * display coherency whilst setting up the cache domains.
4001          */
4002         obj->pin_global++;
4003
4004         /* The display engine is not coherent with the LLC cache on gen6.  As
4005          * a result, we make sure that the pinning that is about to occur is
4006          * done with uncached PTEs. This is lowest common denominator for all
4007          * chipsets.
4008          *
4009          * However for gen6+, we could do better by using the GFDT bit instead
4010          * of uncaching, which would allow us to flush all the LLC-cached data
4011          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
4012          */
4013         ret = i915_gem_object_set_cache_level(obj,
4014                                               HAS_WT(to_i915(obj->base.dev)) ?
4015                                               I915_CACHE_WT : I915_CACHE_NONE);
4016         if (ret) {
4017                 vma = ERR_PTR(ret);
4018                 goto err_unpin_global;
4019         }
4020
4021         /* As the user may map the buffer once pinned in the display plane
4022          * (e.g. libkms for the bootup splash), we have to ensure that we
4023          * always use map_and_fenceable for all scanout buffers. However,
4024          * it may simply be too big to fit into mappable, in which case
4025          * put it anyway and hope that userspace can cope (but always first
4026          * try to preserve the existing ABI).
4027          */
4028         vma = ERR_PTR(-ENOSPC);
4029         if (!view || view->type == I915_GGTT_VIEW_NORMAL)
4030                 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
4031                                                PIN_MAPPABLE | PIN_NONBLOCK);
4032         if (IS_ERR(vma)) {
4033                 struct drm_i915_private *i915 = to_i915(obj->base.dev);
4034                 unsigned int flags;
4035
4036                 /* Valleyview is definitely limited to scanning out the first
4037                  * 512MiB. Lets presume this behaviour was inherited from the
4038                  * g4x display engine and that all earlier gen are similarly
4039                  * limited. Testing suggests that it is a little more
4040                  * complicated than this. For example, Cherryview appears quite
4041                  * happy to scanout from anywhere within its global aperture.
4042                  */
4043                 flags = 0;
4044                 if (HAS_GMCH_DISPLAY(i915))
4045                         flags = PIN_MAPPABLE;
4046                 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
4047         }
4048         if (IS_ERR(vma))
4049                 goto err_unpin_global;
4050
4051         vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
4052
4053         /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */
4054         __i915_gem_object_flush_for_display(obj);
4055         intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
4056
4057         /* It should now be out of any other write domains, and we can update
4058          * the domain values for our changes.
4059          */
4060         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
4061
4062         return vma;
4063
4064 err_unpin_global:
4065         obj->pin_global--;
4066         return vma;
4067 }
4068
4069 void
4070 i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
4071 {
4072         lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
4073
4074         if (WARN_ON(vma->obj->pin_global == 0))
4075                 return;
4076
4077         if (--vma->obj->pin_global == 0)
4078                 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
4079
4080         /* Bump the LRU to try and avoid premature eviction whilst flipping  */
4081         i915_gem_object_bump_inactive_ggtt(vma->obj);
4082
4083         i915_vma_unpin(vma);
4084 }
4085
4086 /**
4087  * Moves a single object to the CPU read, and possibly write domain.
4088  * @obj: object to act on
4089  * @write: requesting write or read-only access
4090  *
4091  * This function returns when the move is complete, including waiting on
4092  * flushes to occur.
4093  */
4094 int
4095 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
4096 {
4097         int ret;
4098
4099         lockdep_assert_held(&obj->base.dev->struct_mutex);
4100
4101         ret = i915_gem_object_wait(obj,
4102                                    I915_WAIT_INTERRUPTIBLE |
4103                                    I915_WAIT_LOCKED |
4104                                    (write ? I915_WAIT_ALL : 0),
4105                                    MAX_SCHEDULE_TIMEOUT,
4106                                    NULL);
4107         if (ret)
4108                 return ret;
4109
4110         flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
4111
4112         /* Flush the CPU cache if it's still invalid. */
4113         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
4114                 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
4115                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
4116         }
4117
4118         /* It should now be out of any other write domains, and we can update
4119          * the domain values for our changes.
4120          */
4121         GEM_BUG_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
4122
4123         /* If we're writing through the CPU, then the GPU read domains will
4124          * need to be invalidated at next use.
4125          */
4126         if (write)
4127                 __start_cpu_write(obj);
4128
4129         return 0;
4130 }
4131
4132 /* Throttle our rendering by waiting until the ring has completed our requests
4133  * emitted over 20 msec ago.
4134  *
4135  * Note that if we were to use the current jiffies each time around the loop,
4136  * we wouldn't escape the function with any frames outstanding if the time to
4137  * render a frame was over 20ms.
4138  *
4139  * This should get us reasonable parallelism between CPU and GPU but also
4140  * relatively low latency when blocking on a particular request to finish.
4141  */
4142 static int
4143 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
4144 {
4145         struct drm_i915_private *dev_priv = to_i915(dev);
4146         struct drm_i915_file_private *file_priv = file->driver_priv;
4147         unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
4148         struct drm_i915_gem_request *request, *target = NULL;
4149         long ret;
4150
4151         /* ABI: return -EIO if already wedged */
4152         if (i915_terminally_wedged(&dev_priv->gpu_error))
4153                 return -EIO;
4154
4155         spin_lock(&file_priv->mm.lock);
4156         list_for_each_entry(request, &file_priv->mm.request_list, client_link) {
4157                 if (time_after_eq(request->emitted_jiffies, recent_enough))
4158                         break;
4159
4160                 if (target) {
4161                         list_del(&target->client_link);
4162                         target->file_priv = NULL;
4163                 }
4164
4165                 target = request;
4166         }
4167         if (target)
4168                 i915_gem_request_get(target);
4169         spin_unlock(&file_priv->mm.lock);
4170
4171         if (target == NULL)
4172                 return 0;
4173
4174         ret = i915_wait_request(target,
4175                                 I915_WAIT_INTERRUPTIBLE,
4176                                 MAX_SCHEDULE_TIMEOUT);
4177         i915_gem_request_put(target);
4178
4179         return ret < 0 ? ret : 0;
4180 }
4181
4182 struct i915_vma *
4183 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
4184                          const struct i915_ggtt_view *view,
4185                          u64 size,
4186                          u64 alignment,
4187                          u64 flags)
4188 {
4189         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
4190         struct i915_address_space *vm = &dev_priv->ggtt.base;
4191         struct i915_vma *vma;
4192         int ret;
4193
4194         lockdep_assert_held(&obj->base.dev->struct_mutex);
4195
4196         if (!view && flags & PIN_MAPPABLE) {
4197                 /* If the required space is larger than the available
4198                  * aperture, we will not able to find a slot for the
4199                  * object and unbinding the object now will be in
4200                  * vain. Worse, doing so may cause us to ping-pong
4201                  * the object in and out of the Global GTT and
4202                  * waste a lot of cycles under the mutex.
4203                  */
4204                 if (obj->base.size > dev_priv->ggtt.mappable_end)
4205                         return ERR_PTR(-E2BIG);
4206
4207                 /* If NONBLOCK is set the caller is optimistically
4208                  * trying to cache the full object within the mappable
4209                  * aperture, and *must* have a fallback in place for
4210                  * situations where we cannot bind the object. We
4211                  * can be a little more lax here and use the fallback
4212                  * more often to avoid costly migrations of ourselves
4213                  * and other objects within the aperture.
4214                  *
4215                  * Half-the-aperture is used as a simple heuristic.
4216                  * More interesting would to do search for a free
4217                  * block prior to making the commitment to unbind.
4218                  * That caters for the self-harm case, and with a
4219                  * little more heuristics (e.g. NOFAULT, NOEVICT)
4220                  * we could try to minimise harm to others.
4221                  */
4222                 if (flags & PIN_NONBLOCK &&
4223                     obj->base.size > dev_priv->ggtt.mappable_end / 2)
4224                         return ERR_PTR(-ENOSPC);
4225         }
4226
4227         vma = i915_vma_instance(obj, vm, view);
4228         if (unlikely(IS_ERR(vma)))
4229                 return vma;
4230
4231         if (i915_vma_misplaced(vma, size, alignment, flags)) {
4232                 if (flags & PIN_NONBLOCK) {
4233                         if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
4234                                 return ERR_PTR(-ENOSPC);
4235
4236                         if (flags & PIN_MAPPABLE &&
4237                             vma->fence_size > dev_priv->ggtt.mappable_end / 2)
4238                                 return ERR_PTR(-ENOSPC);
4239                 }
4240
4241                 WARN(i915_vma_is_pinned(vma),
4242                      "bo is already pinned in ggtt with incorrect alignment:"
4243                      " offset=%08x, req.alignment=%llx,"
4244                      " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
4245                      i915_ggtt_offset(vma), alignment,
4246                      !!(flags & PIN_MAPPABLE),
4247                      i915_vma_is_map_and_fenceable(vma));
4248                 ret = i915_vma_unbind(vma);
4249                 if (ret)
4250                         return ERR_PTR(ret);
4251         }
4252
4253         ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
4254         if (ret)
4255                 return ERR_PTR(ret);
4256
4257         return vma;
4258 }
4259
4260 static __always_inline unsigned int __busy_read_flag(unsigned int id)
4261 {
4262         /* Note that we could alias engines in the execbuf API, but
4263          * that would be very unwise as it prevents userspace from
4264          * fine control over engine selection. Ahem.
4265          *
4266          * This should be something like EXEC_MAX_ENGINE instead of
4267          * I915_NUM_ENGINES.
4268          */
4269         BUILD_BUG_ON(I915_NUM_ENGINES > 16);
4270         return 0x10000 << id;
4271 }
4272
4273 static __always_inline unsigned int __busy_write_id(unsigned int id)
4274 {
4275         /* The uABI guarantees an active writer is also amongst the read
4276          * engines. This would be true if we accessed the activity tracking
4277          * under the lock, but as we perform the lookup of the object and
4278          * its activity locklessly we can not guarantee that the last_write
4279          * being active implies that we have set the same engine flag from
4280          * last_read - hence we always set both read and write busy for
4281          * last_write.
4282          */
4283         return id | __busy_read_flag(id);
4284 }
4285
4286 static __always_inline unsigned int
4287 __busy_set_if_active(const struct dma_fence *fence,
4288                      unsigned int (*flag)(unsigned int id))
4289 {
4290         struct drm_i915_gem_request *rq;
4291
4292         /* We have to check the current hw status of the fence as the uABI
4293          * guarantees forward progress. We could rely on the idle worker
4294          * to eventually flush us, but to minimise latency just ask the
4295          * hardware.
4296          *
4297          * Note we only report on the status of native fences.
4298          */
4299         if (!dma_fence_is_i915(fence))
4300                 return 0;
4301
4302         /* opencode to_request() in order to avoid const warnings */
4303         rq = container_of(fence, struct drm_i915_gem_request, fence);
4304         if (i915_gem_request_completed(rq))
4305                 return 0;
4306
4307         return flag(rq->engine->uabi_id);
4308 }
4309
4310 static __always_inline unsigned int
4311 busy_check_reader(const struct dma_fence *fence)
4312 {
4313         return __busy_set_if_active(fence, __busy_read_flag);
4314 }
4315
4316 static __always_inline unsigned int
4317 busy_check_writer(const struct dma_fence *fence)
4318 {
4319         if (!fence)
4320                 return 0;
4321
4322         return __busy_set_if_active(fence, __busy_write_id);
4323 }
4324
4325 int
4326 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4327                     struct drm_file *file)
4328 {
4329         struct drm_i915_gem_busy *args = data;
4330         struct drm_i915_gem_object *obj;
4331         struct reservation_object_list *list;
4332         unsigned int seq;
4333         int err;
4334
4335         err = -ENOENT;
4336         rcu_read_lock();
4337         obj = i915_gem_object_lookup_rcu(file, args->handle);
4338         if (!obj)
4339                 goto out;
4340
4341         /* A discrepancy here is that we do not report the status of
4342          * non-i915 fences, i.e. even though we may report the object as idle,
4343          * a call to set-domain may still stall waiting for foreign rendering.
4344          * This also means that wait-ioctl may report an object as busy,
4345          * where busy-ioctl considers it idle.
4346          *
4347          * We trade the ability to warn of foreign fences to report on which
4348          * i915 engines are active for the object.
4349          *
4350          * Alternatively, we can trade that extra information on read/write
4351          * activity with
4352          *      args->busy =
4353          *              !reservation_object_test_signaled_rcu(obj->resv, true);
4354          * to report the overall busyness. This is what the wait-ioctl does.
4355          *
4356          */
4357 retry:
4358         seq = raw_read_seqcount(&obj->resv->seq);
4359
4360         /* Translate the exclusive fence to the READ *and* WRITE engine */
4361         args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
4362
4363         /* Translate shared fences to READ set of engines */
4364         list = rcu_dereference(obj->resv->fence);
4365         if (list) {
4366                 unsigned int shared_count = list->shared_count, i;
4367
4368                 for (i = 0; i < shared_count; ++i) {
4369                         struct dma_fence *fence =
4370                                 rcu_dereference(list->shared[i]);
4371
4372                         args->busy |= busy_check_reader(fence);
4373                 }
4374         }
4375
4376         if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
4377                 goto retry;
4378
4379         err = 0;
4380 out:
4381         rcu_read_unlock();
4382         return err;
4383 }
4384
4385 int
4386 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4387                         struct drm_file *file_priv)
4388 {
4389         return i915_gem_ring_throttle(dev, file_priv);
4390 }
4391
4392 int
4393 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4394                        struct drm_file *file_priv)
4395 {
4396         struct drm_i915_private *dev_priv = to_i915(dev);
4397         struct drm_i915_gem_madvise *args = data;
4398         struct drm_i915_gem_object *obj;
4399         int err;
4400
4401         switch (args->madv) {
4402         case I915_MADV_DONTNEED:
4403         case I915_MADV_WILLNEED:
4404             break;
4405         default:
4406             return -EINVAL;
4407         }
4408
4409         obj = i915_gem_object_lookup(file_priv, args->handle);
4410         if (!obj)
4411                 return -ENOENT;
4412
4413         err = mutex_lock_interruptible(&obj->mm.lock);
4414         if (err)
4415                 goto out;
4416
4417         if (i915_gem_object_has_pages(obj) &&
4418             i915_gem_object_is_tiled(obj) &&
4419             dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
4420                 if (obj->mm.madv == I915_MADV_WILLNEED) {
4421                         GEM_BUG_ON(!obj->mm.quirked);
4422                         __i915_gem_object_unpin_pages(obj);
4423                         obj->mm.quirked = false;
4424                 }
4425                 if (args->madv == I915_MADV_WILLNEED) {
4426                         GEM_BUG_ON(obj->mm.quirked);
4427                         __i915_gem_object_pin_pages(obj);
4428                         obj->mm.quirked = true;
4429                 }
4430         }
4431
4432         if (obj->mm.madv != __I915_MADV_PURGED)
4433                 obj->mm.madv = args->madv;
4434
4435         /* if the object is no longer attached, discard its backing storage */
4436         if (obj->mm.madv == I915_MADV_DONTNEED &&
4437             !i915_gem_object_has_pages(obj))
4438                 i915_gem_object_truncate(obj);
4439
4440         args->retained = obj->mm.madv != __I915_MADV_PURGED;
4441         mutex_unlock(&obj->mm.lock);
4442
4443 out:
4444         i915_gem_object_put(obj);
4445         return err;
4446 }
4447
4448 static void
4449 frontbuffer_retire(struct i915_gem_active *active,
4450                    struct drm_i915_gem_request *request)
4451 {
4452         struct drm_i915_gem_object *obj =
4453                 container_of(active, typeof(*obj), frontbuffer_write);
4454
4455         intel_fb_obj_flush(obj, ORIGIN_CS);
4456 }
4457
4458 void i915_gem_object_init(struct drm_i915_gem_object *obj,
4459                           const struct drm_i915_gem_object_ops *ops)
4460 {
4461         mutex_init(&obj->mm.lock);
4462
4463         INIT_LIST_HEAD(&obj->vma_list);
4464         INIT_LIST_HEAD(&obj->lut_list);
4465         INIT_LIST_HEAD(&obj->batch_pool_link);
4466
4467         obj->ops = ops;
4468
4469         reservation_object_init(&obj->__builtin_resv);
4470         obj->resv = &obj->__builtin_resv;
4471
4472         obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
4473         init_request_active(&obj->frontbuffer_write, frontbuffer_retire);
4474
4475         obj->mm.madv = I915_MADV_WILLNEED;
4476         INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
4477         mutex_init(&obj->mm.get_page.lock);
4478
4479         i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
4480 }
4481
4482 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4483         .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
4484                  I915_GEM_OBJECT_IS_SHRINKABLE,
4485
4486         .get_pages = i915_gem_object_get_pages_gtt,
4487         .put_pages = i915_gem_object_put_pages_gtt,
4488
4489         .pwrite = i915_gem_object_pwrite_gtt,
4490 };
4491
4492 static int i915_gem_object_create_shmem(struct drm_device *dev,
4493                                         struct drm_gem_object *obj,
4494                                         size_t size)
4495 {
4496         struct drm_i915_private *i915 = to_i915(dev);
4497         unsigned long flags = VM_NORESERVE;
4498         struct file *filp;
4499
4500         drm_gem_private_object_init(dev, obj, size);
4501
4502         if (i915->mm.gemfs)
4503                 filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
4504                                                  flags);
4505         else
4506                 filp = shmem_file_setup("i915", size, flags);
4507
4508         if (IS_ERR(filp))
4509                 return PTR_ERR(filp);
4510
4511         obj->filp = filp;
4512
4513         return 0;
4514 }
4515
4516 struct drm_i915_gem_object *
4517 i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
4518 {
4519         struct drm_i915_gem_object *obj;
4520         struct address_space *mapping;
4521         unsigned int cache_level;
4522         gfp_t mask;
4523         int ret;
4524
4525         /* There is a prevalence of the assumption that we fit the object's
4526          * page count inside a 32bit _signed_ variable. Let's document this and
4527          * catch if we ever need to fix it. In the meantime, if you do spot
4528          * such a local variable, please consider fixing!
4529          */
4530         if (size >> PAGE_SHIFT > INT_MAX)
4531                 return ERR_PTR(-E2BIG);
4532
4533         if (overflows_type(size, obj->base.size))
4534                 return ERR_PTR(-E2BIG);
4535
4536         obj = i915_gem_object_alloc(dev_priv);
4537         if (obj == NULL)
4538                 return ERR_PTR(-ENOMEM);
4539
4540         ret = i915_gem_object_create_shmem(&dev_priv->drm, &obj->base, size);
4541         if (ret)
4542                 goto fail;
4543
4544         mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4545         if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) {
4546                 /* 965gm cannot relocate objects above 4GiB. */
4547                 mask &= ~__GFP_HIGHMEM;
4548                 mask |= __GFP_DMA32;
4549         }
4550
4551         mapping = obj->base.filp->f_mapping;
4552         mapping_set_gfp_mask(mapping, mask);
4553         GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
4554
4555         i915_gem_object_init(obj, &i915_gem_object_ops);
4556
4557         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4558         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4559
4560         if (HAS_LLC(dev_priv))
4561                 /* On some devices, we can have the GPU use the LLC (the CPU
4562                  * cache) for about a 10% performance improvement
4563                  * compared to uncached.  Graphics requests other than
4564                  * display scanout are coherent with the CPU in
4565                  * accessing this cache.  This means in this mode we
4566                  * don't need to clflush on the CPU side, and on the
4567                  * GPU side we only need to flush internal caches to
4568                  * get data visible to the CPU.
4569                  *
4570                  * However, we maintain the display planes as UC, and so
4571                  * need to rebind when first used as such.
4572                  */
4573                 cache_level = I915_CACHE_LLC;
4574         else
4575                 cache_level = I915_CACHE_NONE;
4576
4577         i915_gem_object_set_cache_coherency(obj, cache_level);
4578
4579         trace_i915_gem_object_create(obj);
4580
4581         return obj;
4582
4583 fail:
4584         i915_gem_object_free(obj);
4585         return ERR_PTR(ret);
4586 }
4587
4588 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4589 {
4590         /* If we are the last user of the backing storage (be it shmemfs
4591          * pages or stolen etc), we know that the pages are going to be
4592          * immediately released. In this case, we can then skip copying
4593          * back the contents from the GPU.
4594          */
4595
4596         if (obj->mm.madv != I915_MADV_WILLNEED)
4597                 return false;
4598
4599         if (obj->base.filp == NULL)
4600                 return true;
4601
4602         /* At first glance, this looks racy, but then again so would be
4603          * userspace racing mmap against close. However, the first external
4604          * reference to the filp can only be obtained through the
4605          * i915_gem_mmap_ioctl() which safeguards us against the user
4606          * acquiring such a reference whilst we are in the middle of
4607          * freeing the object.
4608          */
4609         return atomic_long_read(&obj->base.filp->f_count) == 1;
4610 }
4611
4612 static void __i915_gem_free_objects(struct drm_i915_private *i915,
4613                                     struct llist_node *freed)
4614 {
4615         struct drm_i915_gem_object *obj, *on;
4616
4617         intel_runtime_pm_get(i915);
4618         llist_for_each_entry_safe(obj, on, freed, freed) {
4619                 struct i915_vma *vma, *vn;
4620
4621                 trace_i915_gem_object_destroy(obj);
4622
4623                 mutex_lock(&i915->drm.struct_mutex);
4624
4625                 GEM_BUG_ON(i915_gem_object_is_active(obj));
4626                 list_for_each_entry_safe(vma, vn,
4627                                          &obj->vma_list, obj_link) {
4628                         GEM_BUG_ON(i915_vma_is_active(vma));
4629                         vma->flags &= ~I915_VMA_PIN_MASK;
4630                         i915_vma_close(vma);
4631                 }
4632                 GEM_BUG_ON(!list_empty(&obj->vma_list));
4633                 GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
4634
4635                 /* This serializes freeing with the shrinker. Since the free
4636                  * is delayed, first by RCU then by the workqueue, we want the
4637                  * shrinker to be able to free pages of unreferenced objects,
4638                  * or else we may oom whilst there are plenty of deferred
4639                  * freed objects.
4640                  */
4641                 if (i915_gem_object_has_pages(obj)) {
4642                         spin_lock(&i915->mm.obj_lock);
4643                         list_del_init(&obj->mm.link);
4644                         spin_unlock(&i915->mm.obj_lock);
4645                 }
4646
4647                 mutex_unlock(&i915->drm.struct_mutex);
4648
4649                 GEM_BUG_ON(obj->bind_count);
4650                 GEM_BUG_ON(obj->userfault_count);
4651                 GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
4652                 GEM_BUG_ON(!list_empty(&obj->lut_list));
4653
4654                 if (obj->ops->release)
4655                         obj->ops->release(obj);
4656
4657                 if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
4658                         atomic_set(&obj->mm.pages_pin_count, 0);
4659                 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
4660                 GEM_BUG_ON(i915_gem_object_has_pages(obj));
4661
4662                 if (obj->base.import_attach)
4663                         drm_prime_gem_destroy(&obj->base, NULL);
4664
4665                 reservation_object_fini(&obj->__builtin_resv);
4666                 drm_gem_object_release(&obj->base);
4667                 i915_gem_info_remove_obj(i915, obj->base.size);
4668
4669                 kfree(obj->bit_17);
4670                 i915_gem_object_free(obj);
4671
4672                 if (on)
4673                         cond_resched();
4674         }
4675         intel_runtime_pm_put(i915);
4676 }
4677
4678 static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
4679 {
4680         struct llist_node *freed;
4681
4682         /* Free the oldest, most stale object to keep the free_list short */
4683         freed = NULL;
4684         if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
4685                 /* Only one consumer of llist_del_first() allowed */
4686                 spin_lock(&i915->mm.free_lock);
4687                 freed = llist_del_first(&i915->mm.free_list);
4688                 spin_unlock(&i915->mm.free_lock);
4689         }
4690         if (unlikely(freed)) {
4691                 freed->next = NULL;
4692                 __i915_gem_free_objects(i915, freed);
4693         }
4694 }
4695
4696 static void __i915_gem_free_work(struct work_struct *work)
4697 {
4698         struct drm_i915_private *i915 =
4699                 container_of(work, struct drm_i915_private, mm.free_work);
4700         struct llist_node *freed;
4701
4702         /* All file-owned VMA should have been released by this point through
4703          * i915_gem_close_object(), or earlier by i915_gem_context_close().
4704          * However, the object may also be bound into the global GTT (e.g.
4705          * older GPUs without per-process support, or for direct access through
4706          * the GTT either for the user or for scanout). Those VMA still need to
4707          * unbound now.
4708          */
4709
4710         spin_lock(&i915->mm.free_lock);
4711         while ((freed = llist_del_all(&i915->mm.free_list))) {
4712                 spin_unlock(&i915->mm.free_lock);
4713
4714                 __i915_gem_free_objects(i915, freed);
4715                 if (need_resched())
4716                         return;
4717
4718                 spin_lock(&i915->mm.free_lock);
4719         }
4720         spin_unlock(&i915->mm.free_lock);
4721 }
4722
4723 static void __i915_gem_free_object_rcu(struct rcu_head *head)
4724 {
4725         struct drm_i915_gem_object *obj =
4726                 container_of(head, typeof(*obj), rcu);
4727         struct drm_i915_private *i915 = to_i915(obj->base.dev);
4728
4729         /* We can't simply use call_rcu() from i915_gem_free_object()
4730          * as we need to block whilst unbinding, and the call_rcu
4731          * task may be called from softirq context. So we take a
4732          * detour through a worker.
4733          */
4734         if (llist_add(&obj->freed, &i915->mm.free_list))
4735                 schedule_work(&i915->mm.free_work);
4736 }
4737
4738 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4739 {
4740         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4741
4742         if (obj->mm.quirked)
4743                 __i915_gem_object_unpin_pages(obj);
4744
4745         if (discard_backing_storage(obj))
4746                 obj->mm.madv = I915_MADV_DONTNEED;
4747
4748         /* Before we free the object, make sure any pure RCU-only
4749          * read-side critical sections are complete, e.g.
4750          * i915_gem_busy_ioctl(). For the corresponding synchronized
4751          * lookup see i915_gem_object_lookup_rcu().
4752          */
4753         call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
4754 }
4755
4756 void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
4757 {
4758         lockdep_assert_held(&obj->base.dev->struct_mutex);
4759
4760         if (!i915_gem_object_has_active_reference(obj) &&
4761             i915_gem_object_is_active(obj))
4762                 i915_gem_object_set_active_reference(obj);
4763         else
4764                 i915_gem_object_put(obj);
4765 }
4766
4767 static void assert_kernel_context_is_current(struct drm_i915_private *i915)
4768 {
4769         struct i915_gem_context *kernel_context = i915->kernel_context;
4770         struct intel_engine_cs *engine;
4771         enum intel_engine_id id;
4772
4773         for_each_engine(engine, i915, id) {
4774                 GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline->last_request));
4775                 GEM_BUG_ON(engine->last_retired_context != kernel_context);
4776         }
4777 }
4778
4779 void i915_gem_sanitize(struct drm_i915_private *i915)
4780 {
4781         if (i915_terminally_wedged(&i915->gpu_error)) {
4782                 mutex_lock(&i915->drm.struct_mutex);
4783                 i915_gem_unset_wedged(i915);
4784                 mutex_unlock(&i915->drm.struct_mutex);
4785         }
4786
4787         /*
4788          * If we inherit context state from the BIOS or earlier occupants
4789          * of the GPU, the GPU may be in an inconsistent state when we
4790          * try to take over. The only way to remove the earlier state
4791          * is by resetting. However, resetting on earlier gen is tricky as
4792          * it may impact the display and we are uncertain about the stability
4793          * of the reset, so this could be applied to even earlier gen.
4794          */
4795         if (INTEL_GEN(i915) >= 5) {
4796                 int reset = intel_gpu_reset(i915, ALL_ENGINES);
4797                 WARN_ON(reset && reset != -ENODEV);
4798         }
4799 }
4800
4801 int i915_gem_suspend(struct drm_i915_private *dev_priv)
4802 {
4803         struct drm_device *dev = &dev_priv->drm;
4804         int ret;
4805
4806         intel_runtime_pm_get(dev_priv);
4807         intel_suspend_gt_powersave(dev_priv);
4808
4809         mutex_lock(&dev->struct_mutex);
4810
4811         /* We have to flush all the executing contexts to main memory so
4812          * that they can saved in the hibernation image. To ensure the last
4813          * context image is coherent, we have to switch away from it. That
4814          * leaves the dev_priv->kernel_context still active when
4815          * we actually suspend, and its image in memory may not match the GPU
4816          * state. Fortunately, the kernel_context is disposable and we do
4817          * not rely on its state.
4818          */
4819         if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
4820                 ret = i915_gem_switch_to_kernel_context(dev_priv);
4821                 if (ret)
4822                         goto err_unlock;
4823
4824                 ret = i915_gem_wait_for_idle(dev_priv,
4825                                              I915_WAIT_INTERRUPTIBLE |
4826                                              I915_WAIT_LOCKED);
4827                 if (ret && ret != -EIO)
4828                         goto err_unlock;
4829
4830                 assert_kernel_context_is_current(dev_priv);
4831         }
4832         i915_gem_contexts_lost(dev_priv);
4833         mutex_unlock(&dev->struct_mutex);
4834
4835         intel_guc_suspend(dev_priv);
4836
4837         cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4838         cancel_delayed_work_sync(&dev_priv->gt.retire_work);
4839
4840         /* As the idle_work is rearming if it detects a race, play safe and
4841          * repeat the flush until it is definitely idle.
4842          */
4843         drain_delayed_work(&dev_priv->gt.idle_work);
4844
4845         /* Assert that we sucessfully flushed all the work and
4846          * reset the GPU back to its idle, low power state.
4847          */
4848         WARN_ON(dev_priv->gt.awake);
4849         if (WARN_ON(!intel_engines_are_idle(dev_priv)))
4850                 i915_gem_set_wedged(dev_priv); /* no hope, discard everything */
4851
4852         /*
4853          * Neither the BIOS, ourselves or any other kernel
4854          * expects the system to be in execlists mode on startup,
4855          * so we need to reset the GPU back to legacy mode. And the only
4856          * known way to disable logical contexts is through a GPU reset.
4857          *
4858          * So in order to leave the system in a known default configuration,
4859          * always reset the GPU upon unload and suspend. Afterwards we then
4860          * clean up the GEM state tracking, flushing off the requests and
4861          * leaving the system in a known idle state.
4862          *
4863          * Note that is of the upmost importance that the GPU is idle and
4864          * all stray writes are flushed *before* we dismantle the backing
4865          * storage for the pinned objects.
4866          *
4867          * However, since we are uncertain that resetting the GPU on older
4868          * machines is a good idea, we don't - just in case it leaves the
4869          * machine in an unusable condition.
4870          */
4871         i915_gem_sanitize(dev_priv);
4872
4873         intel_runtime_pm_put(dev_priv);
4874         return 0;
4875
4876 err_unlock:
4877         mutex_unlock(&dev->struct_mutex);
4878         intel_runtime_pm_put(dev_priv);
4879         return ret;
4880 }
4881
4882 void i915_gem_resume(struct drm_i915_private *i915)
4883 {
4884         WARN_ON(i915->gt.awake);
4885
4886         mutex_lock(&i915->drm.struct_mutex);
4887         intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
4888
4889         i915_gem_restore_gtt_mappings(i915);
4890         i915_gem_restore_fences(i915);
4891
4892         /*
4893          * As we didn't flush the kernel context before suspend, we cannot
4894          * guarantee that the context image is complete. So let's just reset
4895          * it and start again.
4896          */
4897         i915->gt.resume(i915);
4898
4899         if (i915_gem_init_hw(i915))
4900                 goto err_wedged;
4901
4902         intel_guc_resume(i915);
4903
4904         /* Always reload a context for powersaving. */
4905         if (i915_gem_switch_to_kernel_context(i915))
4906                 goto err_wedged;
4907
4908 out_unlock:
4909         intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
4910         mutex_unlock(&i915->drm.struct_mutex);
4911         return;
4912
4913 err_wedged:
4914         if (!i915_terminally_wedged(&i915->gpu_error)) {
4915                 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
4916                 i915_gem_set_wedged(i915);
4917         }
4918         goto out_unlock;
4919 }
4920
4921 void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
4922 {
4923         if (INTEL_GEN(dev_priv) < 5 ||
4924             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4925                 return;
4926
4927         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4928                                  DISP_TILE_SURFACE_SWIZZLING);
4929
4930         if (IS_GEN5(dev_priv))
4931                 return;
4932
4933         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4934         if (IS_GEN6(dev_priv))
4935                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4936         else if (IS_GEN7(dev_priv))
4937                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4938         else if (IS_GEN8(dev_priv))
4939                 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4940         else
4941                 BUG();
4942 }
4943
4944 static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
4945 {
4946         I915_WRITE(RING_CTL(base), 0);
4947         I915_WRITE(RING_HEAD(base), 0);
4948         I915_WRITE(RING_TAIL(base), 0);
4949         I915_WRITE(RING_START(base), 0);
4950 }
4951
4952 static void init_unused_rings(struct drm_i915_private *dev_priv)
4953 {
4954         if (IS_I830(dev_priv)) {
4955                 init_unused_ring(dev_priv, PRB1_BASE);
4956                 init_unused_ring(dev_priv, SRB0_BASE);
4957                 init_unused_ring(dev_priv, SRB1_BASE);
4958                 init_unused_ring(dev_priv, SRB2_BASE);
4959                 init_unused_ring(dev_priv, SRB3_BASE);
4960         } else if (IS_GEN2(dev_priv)) {
4961                 init_unused_ring(dev_priv, SRB0_BASE);
4962                 init_unused_ring(dev_priv, SRB1_BASE);
4963         } else if (IS_GEN3(dev_priv)) {
4964                 init_unused_ring(dev_priv, PRB1_BASE);
4965                 init_unused_ring(dev_priv, PRB2_BASE);
4966         }
4967 }
4968
4969 static int __i915_gem_restart_engines(void *data)
4970 {
4971         struct drm_i915_private *i915 = data;
4972         struct intel_engine_cs *engine;
4973         enum intel_engine_id id;
4974         int err;
4975
4976         for_each_engine(engine, i915, id) {
4977                 err = engine->init_hw(engine);
4978                 if (err)
4979                         return err;
4980         }
4981
4982         return 0;
4983 }
4984
4985 int i915_gem_init_hw(struct drm_i915_private *dev_priv)
4986 {
4987         int ret;
4988
4989         dev_priv->gt.last_init_time = ktime_get();
4990
4991         /* Double layer security blanket, see i915_gem_init() */
4992         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4993
4994         if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
4995                 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4996
4997         if (IS_HASWELL(dev_priv))
4998                 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
4999                            LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
5000
5001         if (HAS_PCH_NOP(dev_priv)) {
5002                 if (IS_IVYBRIDGE(dev_priv)) {
5003                         u32 temp = I915_READ(GEN7_MSG_CTL);
5004                         temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
5005                         I915_WRITE(GEN7_MSG_CTL, temp);
5006                 } else if (INTEL_GEN(dev_priv) >= 7) {
5007                         u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
5008                         temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
5009                         I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
5010                 }
5011         }
5012
5013         i915_gem_init_swizzling(dev_priv);
5014
5015         /*
5016          * At least 830 can leave some of the unused rings
5017          * "active" (ie. head != tail) after resume which
5018          * will prevent c3 entry. Makes sure all unused rings
5019          * are totally idle.
5020          */
5021         init_unused_rings(dev_priv);
5022
5023         BUG_ON(!dev_priv->kernel_context);
5024         if (i915_terminally_wedged(&dev_priv->gpu_error)) {
5025                 ret = -EIO;
5026                 goto out;
5027         }
5028
5029         ret = i915_ppgtt_init_hw(dev_priv);
5030         if (ret) {
5031                 DRM_ERROR("PPGTT enable HW failed %d\n", ret);
5032                 goto out;
5033         }
5034
5035         /* We can't enable contexts until all firmware is loaded */
5036         ret = intel_uc_init_hw(dev_priv);
5037         if (ret)
5038                 goto out;
5039
5040         intel_mocs_init_l3cc_table(dev_priv);
5041
5042         /* Only when the HW is re-initialised, can we replay the requests */
5043         ret = __i915_gem_restart_engines(dev_priv);
5044 out:
5045         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5046         return ret;
5047 }
5048
5049 static int __intel_engines_record_defaults(struct drm_i915_private *i915)
5050 {
5051         struct i915_gem_context *ctx;
5052         struct intel_engine_cs *engine;
5053         enum intel_engine_id id;
5054         int err;
5055
5056         /*
5057          * As we reset the gpu during very early sanitisation, the current
5058          * register state on the GPU should reflect its defaults values.
5059          * We load a context onto the hw (with restore-inhibit), then switch
5060          * over to a second context to save that default register state. We
5061          * can then prime every new context with that state so they all start
5062          * from the same default HW values.
5063          */
5064
5065         ctx = i915_gem_context_create_kernel(i915, 0);
5066         if (IS_ERR(ctx))
5067                 return PTR_ERR(ctx);
5068
5069         for_each_engine(engine, i915, id) {
5070                 struct drm_i915_gem_request *rq;
5071
5072                 rq = i915_gem_request_alloc(engine, ctx);
5073                 if (IS_ERR(rq)) {
5074                         err = PTR_ERR(rq);
5075                         goto out_ctx;
5076                 }
5077
5078                 err = 0;
5079                 if (engine->init_context)
5080                         err = engine->init_context(rq);
5081
5082                 __i915_add_request(rq, true);
5083                 if (err)
5084                         goto err_active;
5085         }
5086
5087         err = i915_gem_switch_to_kernel_context(i915);
5088         if (err)
5089                 goto err_active;
5090
5091         err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
5092         if (err)
5093                 goto err_active;
5094
5095         assert_kernel_context_is_current(i915);
5096
5097         for_each_engine(engine, i915, id) {
5098                 struct i915_vma *state;
5099
5100                 state = ctx->engine[id].state;
5101                 if (!state)
5102                         continue;
5103
5104                 /*
5105                  * As we will hold a reference to the logical state, it will
5106                  * not be torn down with the context, and importantly the
5107                  * object will hold onto its vma (making it possible for a
5108                  * stray GTT write to corrupt our defaults). Unmap the vma
5109                  * from the GTT to prevent such accidents and reclaim the
5110                  * space.
5111                  */
5112                 err = i915_vma_unbind(state);
5113                 if (err)
5114                         goto err_active;
5115
5116                 err = i915_gem_object_set_to_cpu_domain(state->obj, false);
5117                 if (err)
5118                         goto err_active;
5119
5120                 engine->default_state = i915_gem_object_get(state->obj);
5121         }
5122
5123         if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
5124                 unsigned int found = intel_engines_has_context_isolation(i915);
5125
5126                 /*
5127                  * Make sure that classes with multiple engine instances all
5128                  * share the same basic configuration.
5129                  */
5130                 for_each_engine(engine, i915, id) {
5131                         unsigned int bit = BIT(engine->uabi_class);
5132                         unsigned int expected = engine->default_state ? bit : 0;
5133
5134                         if ((found & bit) != expected) {
5135                                 DRM_ERROR("mismatching default context state for class %d on engine %s\n",
5136                                           engine->uabi_class, engine->name);
5137                         }
5138                 }
5139         }
5140
5141 out_ctx:
5142         i915_gem_context_set_closed(ctx);
5143         i915_gem_context_put(ctx);
5144         return err;
5145
5146 err_active:
5147         /*
5148          * If we have to abandon now, we expect the engines to be idle
5149          * and ready to be torn-down. First try to flush any remaining
5150          * request, ensure we are pointing at the kernel context and
5151          * then remove it.
5152          */
5153         if (WARN_ON(i915_gem_switch_to_kernel_context(i915)))
5154                 goto out_ctx;
5155
5156         if (WARN_ON(i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED)))
5157                 goto out_ctx;
5158
5159         i915_gem_contexts_lost(i915);
5160         goto out_ctx;
5161 }
5162
5163 int i915_gem_init(struct drm_i915_private *dev_priv)
5164 {
5165         int ret;
5166
5167         /*
5168          * We need to fallback to 4K pages since gvt gtt handling doesn't
5169          * support huge page entries - we will need to check either hypervisor
5170          * mm can support huge guest page or just do emulation in gvt.
5171          */
5172         if (intel_vgpu_active(dev_priv))
5173                 mkwrite_device_info(dev_priv)->page_sizes =
5174                         I915_GTT_PAGE_SIZE_4K;
5175
5176         dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
5177
5178         if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
5179                 dev_priv->gt.resume = intel_lr_context_resume;
5180                 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
5181         } else {
5182                 dev_priv->gt.resume = intel_legacy_submission_resume;
5183                 dev_priv->gt.cleanup_engine = intel_engine_cleanup;
5184         }
5185
5186         ret = i915_gem_init_userptr(dev_priv);
5187         if (ret)
5188                 return ret;
5189
5190         ret = intel_uc_init_wq(dev_priv);
5191         if (ret)
5192                 return ret;
5193
5194         /* This is just a security blanket to placate dragons.
5195          * On some systems, we very sporadically observe that the first TLBs
5196          * used by the CS may be stale, despite us poking the TLB reset. If
5197          * we hold the forcewake during initialisation these problems
5198          * just magically go away.
5199          */
5200         mutex_lock(&dev_priv->drm.struct_mutex);
5201         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5202
5203         ret = i915_gem_init_ggtt(dev_priv);
5204         if (ret) {
5205                 GEM_BUG_ON(ret == -EIO);
5206                 goto err_unlock;
5207         }
5208
5209         ret = i915_gem_contexts_init(dev_priv);
5210         if (ret) {
5211                 GEM_BUG_ON(ret == -EIO);
5212                 goto err_ggtt;
5213         }
5214
5215         ret = intel_engines_init(dev_priv);
5216         if (ret) {
5217                 GEM_BUG_ON(ret == -EIO);
5218                 goto err_context;
5219         }
5220
5221         intel_init_gt_powersave(dev_priv);
5222
5223         ret = intel_uc_init(dev_priv);
5224         if (ret)
5225                 goto err_pm;
5226
5227         ret = i915_gem_init_hw(dev_priv);
5228         if (ret)
5229                 goto err_uc_init;
5230
5231         /*
5232          * Despite its name intel_init_clock_gating applies both display
5233          * clock gating workarounds; GT mmio workarounds and the occasional
5234          * GT power context workaround. Worse, sometimes it includes a context
5235          * register workaround which we need to apply before we record the
5236          * default HW state for all contexts.
5237          *
5238          * FIXME: break up the workarounds and apply them at the right time!
5239          */
5240         intel_init_clock_gating(dev_priv);
5241
5242         ret = __intel_engines_record_defaults(dev_priv);
5243         if (ret)
5244                 goto err_init_hw;
5245
5246         if (i915_inject_load_failure()) {
5247                 ret = -ENODEV;
5248                 goto err_init_hw;
5249         }
5250
5251         if (i915_inject_load_failure()) {
5252                 ret = -EIO;
5253                 goto err_init_hw;
5254         }
5255
5256         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5257         mutex_unlock(&dev_priv->drm.struct_mutex);
5258
5259         return 0;
5260
5261         /*
5262          * Unwinding is complicated by that we want to handle -EIO to mean
5263          * disable GPU submission but keep KMS alive. We want to mark the
5264          * HW as irrevisibly wedged, but keep enough state around that the
5265          * driver doesn't explode during runtime.
5266          */
5267 err_init_hw:
5268         i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
5269         i915_gem_contexts_lost(dev_priv);
5270         intel_uc_fini_hw(dev_priv);
5271 err_uc_init:
5272         intel_uc_fini(dev_priv);
5273 err_pm:
5274         if (ret != -EIO) {
5275                 intel_cleanup_gt_powersave(dev_priv);
5276                 i915_gem_cleanup_engines(dev_priv);
5277         }
5278 err_context:
5279         if (ret != -EIO)
5280                 i915_gem_contexts_fini(dev_priv);
5281 err_ggtt:
5282 err_unlock:
5283         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5284         mutex_unlock(&dev_priv->drm.struct_mutex);
5285
5286         if (ret != -EIO)
5287                 i915_gem_cleanup_userptr(dev_priv);
5288
5289         if (ret == -EIO) {
5290                 /*
5291                  * Allow engine initialisation to fail by marking the GPU as
5292                  * wedged. But we only want to do this where the GPU is angry,
5293                  * for all other failure, such as an allocation failure, bail.
5294                  */
5295                 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
5296                         DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
5297                         i915_gem_set_wedged(dev_priv);
5298                 }
5299                 ret = 0;
5300         }
5301
5302         i915_gem_drain_freed_objects(dev_priv);
5303         return ret;
5304 }
5305
5306 void i915_gem_init_mmio(struct drm_i915_private *i915)
5307 {
5308         i915_gem_sanitize(i915);
5309 }
5310
5311 void
5312 i915_gem_cleanup_engines(struct drm_i915_private *dev_priv)
5313 {
5314         struct intel_engine_cs *engine;
5315         enum intel_engine_id id;
5316
5317         for_each_engine(engine, dev_priv, id)
5318                 dev_priv->gt.cleanup_engine(engine);
5319 }
5320
5321 void
5322 i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
5323 {
5324         int i;
5325
5326         if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
5327             !IS_CHERRYVIEW(dev_priv))
5328                 dev_priv->num_fence_regs = 32;
5329         else if (INTEL_INFO(dev_priv)->gen >= 4 ||
5330                  IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
5331                  IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
5332                 dev_priv->num_fence_regs = 16;
5333         else
5334                 dev_priv->num_fence_regs = 8;
5335
5336         if (intel_vgpu_active(dev_priv))
5337                 dev_priv->num_fence_regs =
5338                                 I915_READ(vgtif_reg(avail_rs.fence_num));
5339
5340         /* Initialize fence registers to zero */
5341         for (i = 0; i < dev_priv->num_fence_regs; i++) {
5342                 struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
5343
5344                 fence->i915 = dev_priv;
5345                 fence->id = i;
5346                 list_add_tail(&fence->link, &dev_priv->mm.fence_list);
5347         }
5348         i915_gem_restore_fences(dev_priv);
5349
5350         i915_gem_detect_bit_6_swizzle(dev_priv);
5351 }
5352
5353 static void i915_gem_init__mm(struct drm_i915_private *i915)
5354 {
5355         spin_lock_init(&i915->mm.object_stat_lock);
5356         spin_lock_init(&i915->mm.obj_lock);
5357         spin_lock_init(&i915->mm.free_lock);
5358
5359         init_llist_head(&i915->mm.free_list);
5360
5361         INIT_LIST_HEAD(&i915->mm.unbound_list);
5362         INIT_LIST_HEAD(&i915->mm.bound_list);
5363         INIT_LIST_HEAD(&i915->mm.fence_list);
5364         INIT_LIST_HEAD(&i915->mm.userfault_list);
5365
5366         INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
5367 }
5368
5369 int
5370 i915_gem_load_init(struct drm_i915_private *dev_priv)
5371 {
5372         int err = -ENOMEM;
5373
5374         dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
5375         if (!dev_priv->objects)
5376                 goto err_out;
5377
5378         dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
5379         if (!dev_priv->vmas)
5380                 goto err_objects;
5381
5382         dev_priv->luts = KMEM_CACHE(i915_lut_handle, 0);
5383         if (!dev_priv->luts)
5384                 goto err_vmas;
5385
5386         dev_priv->requests = KMEM_CACHE(drm_i915_gem_request,
5387                                         SLAB_HWCACHE_ALIGN |
5388                                         SLAB_RECLAIM_ACCOUNT |
5389                                         SLAB_TYPESAFE_BY_RCU);
5390         if (!dev_priv->requests)
5391                 goto err_luts;
5392
5393         dev_priv->dependencies = KMEM_CACHE(i915_dependency,
5394                                             SLAB_HWCACHE_ALIGN |
5395                                             SLAB_RECLAIM_ACCOUNT);
5396         if (!dev_priv->dependencies)
5397                 goto err_requests;
5398
5399         dev_priv->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
5400         if (!dev_priv->priorities)
5401                 goto err_dependencies;
5402
5403         mutex_lock(&dev_priv->drm.struct_mutex);
5404         INIT_LIST_HEAD(&dev_priv->gt.timelines);
5405         err = i915_gem_timeline_init__global(dev_priv);
5406         mutex_unlock(&dev_priv->drm.struct_mutex);
5407         if (err)
5408                 goto err_priorities;
5409
5410         i915_gem_init__mm(dev_priv);
5411
5412         INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
5413                           i915_gem_retire_work_handler);
5414         INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
5415                           i915_gem_idle_work_handler);
5416         init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
5417         init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
5418
5419         atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
5420
5421         spin_lock_init(&dev_priv->fb_tracking.lock);
5422
5423         err = i915_gemfs_init(dev_priv);
5424         if (err)
5425                 DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
5426
5427         return 0;
5428
5429 err_priorities:
5430         kmem_cache_destroy(dev_priv->priorities);
5431 err_dependencies:
5432         kmem_cache_destroy(dev_priv->dependencies);
5433 err_requests:
5434         kmem_cache_destroy(dev_priv->requests);
5435 err_luts:
5436         kmem_cache_destroy(dev_priv->luts);
5437 err_vmas:
5438         kmem_cache_destroy(dev_priv->vmas);
5439 err_objects:
5440         kmem_cache_destroy(dev_priv->objects);
5441 err_out:
5442         return err;
5443 }
5444
5445 void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
5446 {
5447         i915_gem_drain_freed_objects(dev_priv);
5448         WARN_ON(!llist_empty(&dev_priv->mm.free_list));
5449         WARN_ON(dev_priv->mm.object_count);
5450
5451         mutex_lock(&dev_priv->drm.struct_mutex);
5452         i915_gem_timeline_fini(&dev_priv->gt.global_timeline);
5453         WARN_ON(!list_empty(&dev_priv->gt.timelines));
5454         mutex_unlock(&dev_priv->drm.struct_mutex);
5455
5456         kmem_cache_destroy(dev_priv->priorities);
5457         kmem_cache_destroy(dev_priv->dependencies);
5458         kmem_cache_destroy(dev_priv->requests);
5459         kmem_cache_destroy(dev_priv->luts);
5460         kmem_cache_destroy(dev_priv->vmas);
5461         kmem_cache_destroy(dev_priv->objects);
5462
5463         /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
5464         rcu_barrier();
5465
5466         i915_gemfs_fini(dev_priv);
5467 }
5468
5469 int i915_gem_freeze(struct drm_i915_private *dev_priv)
5470 {
5471         /* Discard all purgeable objects, let userspace recover those as
5472          * required after resuming.
5473          */
5474         i915_gem_shrink_all(dev_priv);
5475
5476         return 0;
5477 }
5478
5479 int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
5480 {
5481         struct drm_i915_gem_object *obj;
5482         struct list_head *phases[] = {
5483                 &dev_priv->mm.unbound_list,
5484                 &dev_priv->mm.bound_list,
5485                 NULL
5486         }, **p;
5487
5488         /* Called just before we write the hibernation image.
5489          *
5490          * We need to update the domain tracking to reflect that the CPU
5491          * will be accessing all the pages to create and restore from the
5492          * hibernation, and so upon restoration those pages will be in the
5493          * CPU domain.
5494          *
5495          * To make sure the hibernation image contains the latest state,
5496          * we update that state just before writing out the image.
5497          *
5498          * To try and reduce the hibernation image, we manually shrink
5499          * the objects as well, see i915_gem_freeze()
5500          */
5501
5502         i915_gem_shrink(dev_priv, -1UL, NULL, I915_SHRINK_UNBOUND);
5503         i915_gem_drain_freed_objects(dev_priv);
5504
5505         spin_lock(&dev_priv->mm.obj_lock);
5506         for (p = phases; *p; p++) {
5507                 list_for_each_entry(obj, *p, mm.link)
5508                         __start_cpu_write(obj);
5509         }
5510         spin_unlock(&dev_priv->mm.obj_lock);
5511
5512         return 0;
5513 }
5514
5515 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
5516 {
5517         struct drm_i915_file_private *file_priv = file->driver_priv;
5518         struct drm_i915_gem_request *request;
5519
5520         /* Clean up our request list when the client is going away, so that
5521          * later retire_requests won't dereference our soon-to-be-gone
5522          * file_priv.
5523          */
5524         spin_lock(&file_priv->mm.lock);
5525         list_for_each_entry(request, &file_priv->mm.request_list, client_link)
5526                 request->file_priv = NULL;
5527         spin_unlock(&file_priv->mm.lock);
5528 }
5529
5530 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
5531 {
5532         struct drm_i915_file_private *file_priv;
5533         int ret;
5534
5535         DRM_DEBUG("\n");
5536
5537         file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
5538         if (!file_priv)
5539                 return -ENOMEM;
5540
5541         file->driver_priv = file_priv;
5542         file_priv->dev_priv = i915;
5543         file_priv->file = file;
5544
5545         spin_lock_init(&file_priv->mm.lock);
5546         INIT_LIST_HEAD(&file_priv->mm.request_list);
5547
5548         file_priv->bsd_engine = -1;
5549
5550         ret = i915_gem_context_open(i915, file);
5551         if (ret)
5552                 kfree(file_priv);
5553
5554         return ret;
5555 }
5556
5557 /**
5558  * i915_gem_track_fb - update frontbuffer tracking
5559  * @old: current GEM buffer for the frontbuffer slots
5560  * @new: new GEM buffer for the frontbuffer slots
5561  * @frontbuffer_bits: bitmask of frontbuffer slots
5562  *
5563  * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
5564  * from @old and setting them in @new. Both @old and @new can be NULL.
5565  */
5566 void i915_gem_track_fb(struct drm_i915_gem_object *old,
5567                        struct drm_i915_gem_object *new,
5568                        unsigned frontbuffer_bits)
5569 {
5570         /* Control of individual bits within the mask are guarded by
5571          * the owning plane->mutex, i.e. we can never see concurrent
5572          * manipulation of individual bits. But since the bitfield as a whole
5573          * is updated using RMW, we need to use atomics in order to update
5574          * the bits.
5575          */
5576         BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
5577                      sizeof(atomic_t) * BITS_PER_BYTE);
5578
5579         if (old) {
5580                 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
5581                 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
5582         }
5583
5584         if (new) {
5585                 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
5586                 atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
5587         }
5588 }
5589
5590 /* Allocate a new GEM object and fill it with the supplied data */
5591 struct drm_i915_gem_object *
5592 i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
5593                                  const void *data, size_t size)
5594 {
5595         struct drm_i915_gem_object *obj;
5596         struct file *file;
5597         size_t offset;
5598         int err;
5599
5600         obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE));
5601         if (IS_ERR(obj))
5602                 return obj;
5603
5604         GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU);
5605
5606         file = obj->base.filp;
5607         offset = 0;
5608         do {
5609                 unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
5610                 struct page *page;
5611                 void *pgdata, *vaddr;
5612
5613                 err = pagecache_write_begin(file, file->f_mapping,
5614                                             offset, len, 0,
5615                                             &page, &pgdata);
5616                 if (err < 0)
5617                         goto fail;
5618
5619                 vaddr = kmap(page);
5620                 memcpy(vaddr, data, len);
5621                 kunmap(page);
5622
5623                 err = pagecache_write_end(file, file->f_mapping,
5624                                           offset, len, len,
5625                                           page, pgdata);
5626                 if (err < 0)
5627                         goto fail;
5628
5629                 size -= len;
5630                 data += len;
5631                 offset += len;
5632         } while (size);
5633
5634         return obj;
5635
5636 fail:
5637         i915_gem_object_put(obj);
5638         return ERR_PTR(err);
5639 }
5640
5641 struct scatterlist *
5642 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
5643                        unsigned int n,
5644                        unsigned int *offset)
5645 {
5646         struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
5647         struct scatterlist *sg;
5648         unsigned int idx, count;
5649
5650         might_sleep();
5651         GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
5652         GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
5653
5654         /* As we iterate forward through the sg, we record each entry in a
5655          * radixtree for quick repeated (backwards) lookups. If we have seen
5656          * this index previously, we will have an entry for it.
5657          *
5658          * Initial lookup is O(N), but this is amortized to O(1) for
5659          * sequential page access (where each new request is consecutive
5660          * to the previous one). Repeated lookups are O(lg(obj->base.size)),
5661          * i.e. O(1) with a large constant!
5662          */
5663         if (n < READ_ONCE(iter->sg_idx))
5664                 goto lookup;
5665
5666         mutex_lock(&iter->lock);
5667
5668         /* We prefer to reuse the last sg so that repeated lookup of this
5669          * (or the subsequent) sg are fast - comparing against the last
5670          * sg is faster than going through the radixtree.
5671          */
5672
5673         sg = iter->sg_pos;
5674         idx = iter->sg_idx;
5675         count = __sg_page_count(sg);
5676
5677         while (idx + count <= n) {
5678                 unsigned long exception, i;
5679                 int ret;
5680
5681                 /* If we cannot allocate and insert this entry, or the
5682                  * individual pages from this range, cancel updating the
5683                  * sg_idx so that on this lookup we are forced to linearly
5684                  * scan onwards, but on future lookups we will try the
5685                  * insertion again (in which case we need to be careful of
5686                  * the error return reporting that we have already inserted
5687                  * this index).
5688                  */
5689                 ret = radix_tree_insert(&iter->radix, idx, sg);
5690                 if (ret && ret != -EEXIST)
5691                         goto scan;
5692
5693                 exception =
5694                         RADIX_TREE_EXCEPTIONAL_ENTRY |
5695                         idx << RADIX_TREE_EXCEPTIONAL_SHIFT;
5696                 for (i = 1; i < count; i++) {
5697                         ret = radix_tree_insert(&iter->radix, idx + i,
5698                                                 (void *)exception);
5699                         if (ret && ret != -EEXIST)
5700                                 goto scan;
5701                 }
5702
5703                 idx += count;
5704                 sg = ____sg_next(sg);
5705                 count = __sg_page_count(sg);
5706         }
5707
5708 scan:
5709         iter->sg_pos = sg;
5710         iter->sg_idx = idx;
5711
5712         mutex_unlock(&iter->lock);
5713
5714         if (unlikely(n < idx)) /* insertion completed by another thread */
5715                 goto lookup;
5716
5717         /* In case we failed to insert the entry into the radixtree, we need
5718          * to look beyond the current sg.
5719          */
5720         while (idx + count <= n) {
5721                 idx += count;
5722                 sg = ____sg_next(sg);
5723                 count = __sg_page_count(sg);
5724         }
5725
5726         *offset = n - idx;
5727         return sg;
5728
5729 lookup:
5730         rcu_read_lock();
5731
5732         sg = radix_tree_lookup(&iter->radix, n);
5733         GEM_BUG_ON(!sg);
5734
5735         /* If this index is in the middle of multi-page sg entry,
5736          * the radixtree will contain an exceptional entry that points
5737          * to the start of that range. We will return the pointer to
5738          * the base page and the offset of this page within the
5739          * sg entry's range.
5740          */
5741         *offset = 0;
5742         if (unlikely(radix_tree_exception(sg))) {
5743                 unsigned long base =
5744                         (unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT;
5745
5746                 sg = radix_tree_lookup(&iter->radix, base);
5747                 GEM_BUG_ON(!sg);
5748
5749                 *offset = n - base;
5750         }
5751
5752         rcu_read_unlock();
5753
5754         return sg;
5755 }
5756
5757 struct page *
5758 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
5759 {
5760         struct scatterlist *sg;
5761         unsigned int offset;
5762
5763         GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
5764
5765         sg = i915_gem_object_get_sg(obj, n, &offset);
5766         return nth_page(sg_page(sg), offset);
5767 }
5768
5769 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
5770 struct page *
5771 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
5772                                unsigned int n)
5773 {
5774         struct page *page;
5775
5776         page = i915_gem_object_get_page(obj, n);
5777         if (!obj->mm.dirty)
5778                 set_page_dirty(page);
5779
5780         return page;
5781 }
5782
5783 dma_addr_t
5784 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
5785                                 unsigned long n)
5786 {
5787         struct scatterlist *sg;
5788         unsigned int offset;
5789
5790         sg = i915_gem_object_get_sg(obj, n, &offset);
5791         return sg_dma_address(sg) + (offset << PAGE_SHIFT);
5792 }
5793
5794 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
5795 {
5796         struct sg_table *pages;
5797         int err;
5798
5799         if (align > obj->base.size)
5800                 return -EINVAL;
5801
5802         if (obj->ops == &i915_gem_phys_ops)
5803                 return 0;
5804
5805         if (obj->ops != &i915_gem_object_ops)
5806                 return -EINVAL;
5807
5808         err = i915_gem_object_unbind(obj);
5809         if (err)
5810                 return err;
5811
5812         mutex_lock(&obj->mm.lock);
5813
5814         if (obj->mm.madv != I915_MADV_WILLNEED) {
5815                 err = -EFAULT;
5816                 goto err_unlock;
5817         }
5818
5819         if (obj->mm.quirked) {
5820                 err = -EFAULT;
5821                 goto err_unlock;
5822         }
5823
5824         if (obj->mm.mapping) {
5825                 err = -EBUSY;
5826                 goto err_unlock;
5827         }
5828
5829         pages = fetch_and_zero(&obj->mm.pages);
5830         if (pages) {
5831                 struct drm_i915_private *i915 = to_i915(obj->base.dev);
5832
5833                 __i915_gem_object_reset_page_iter(obj);
5834
5835                 spin_lock(&i915->mm.obj_lock);
5836                 list_del(&obj->mm.link);
5837                 spin_unlock(&i915->mm.obj_lock);
5838         }
5839
5840         obj->ops = &i915_gem_phys_ops;
5841
5842         err = ____i915_gem_object_get_pages(obj);
5843         if (err)
5844                 goto err_xfer;
5845
5846         /* Perma-pin (until release) the physical set of pages */
5847         __i915_gem_object_pin_pages(obj);
5848
5849         if (!IS_ERR_OR_NULL(pages))
5850                 i915_gem_object_ops.put_pages(obj, pages);
5851         mutex_unlock(&obj->mm.lock);
5852         return 0;
5853
5854 err_xfer:
5855         obj->ops = &i915_gem_object_ops;
5856         obj->mm.pages = pages;
5857 err_unlock:
5858         mutex_unlock(&obj->mm.lock);
5859         return err;
5860 }
5861
5862 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
5863 #include "selftests/scatterlist.c"
5864 #include "selftests/mock_gem_device.c"
5865 #include "selftests/huge_gem_object.c"
5866 #include "selftests/huge_pages.c"
5867 #include "selftests/i915_gem_object.c"
5868 #include "selftests/i915_gem_coherency.c"
5869 #endif