Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include <drm/drmP.h>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_gem_clflush.h"
33 #include "i915_vgpu.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36 #include "intel_frontbuffer.h"
37 #include "intel_mocs.h"
38 #include "i915_gemfs.h"
39 #include <linux/dma-fence-array.h>
40 #include <linux/kthread.h>
41 #include <linux/reservation.h>
42 #include <linux/shmem_fs.h>
43 #include <linux/slab.h>
44 #include <linux/stop_machine.h>
45 #include <linux/swap.h>
46 #include <linux/pci.h>
47 #include <linux/dma-buf.h>
48
49 static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
50
51 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
52 {
53         if (obj->cache_dirty)
54                 return false;
55
56         if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
57                 return true;
58
59         return obj->pin_global; /* currently in use by HW, keep flushed */
60 }
61
62 static int
63 insert_mappable_node(struct i915_ggtt *ggtt,
64                      struct drm_mm_node *node, u32 size)
65 {
66         memset(node, 0, sizeof(*node));
67         return drm_mm_insert_node_in_range(&ggtt->base.mm, node,
68                                            size, 0, I915_COLOR_UNEVICTABLE,
69                                            0, ggtt->mappable_end,
70                                            DRM_MM_INSERT_LOW);
71 }
72
73 static void
74 remove_mappable_node(struct drm_mm_node *node)
75 {
76         drm_mm_remove_node(node);
77 }
78
79 /* some bookkeeping */
80 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
81                                   u64 size)
82 {
83         spin_lock(&dev_priv->mm.object_stat_lock);
84         dev_priv->mm.object_count++;
85         dev_priv->mm.object_memory += size;
86         spin_unlock(&dev_priv->mm.object_stat_lock);
87 }
88
89 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
90                                      u64 size)
91 {
92         spin_lock(&dev_priv->mm.object_stat_lock);
93         dev_priv->mm.object_count--;
94         dev_priv->mm.object_memory -= size;
95         spin_unlock(&dev_priv->mm.object_stat_lock);
96 }
97
98 static int
99 i915_gem_wait_for_error(struct i915_gpu_error *error)
100 {
101         int ret;
102
103         might_sleep();
104
105         /*
106          * Only wait 10 seconds for the gpu reset to complete to avoid hanging
107          * userspace. If it takes that long something really bad is going on and
108          * we should simply try to bail out and fail as gracefully as possible.
109          */
110         ret = wait_event_interruptible_timeout(error->reset_queue,
111                                                !i915_reset_backoff(error),
112                                                I915_RESET_TIMEOUT);
113         if (ret == 0) {
114                 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
115                 return -EIO;
116         } else if (ret < 0) {
117                 return ret;
118         } else {
119                 return 0;
120         }
121 }
122
123 int i915_mutex_lock_interruptible(struct drm_device *dev)
124 {
125         struct drm_i915_private *dev_priv = to_i915(dev);
126         int ret;
127
128         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
129         if (ret)
130                 return ret;
131
132         ret = mutex_lock_interruptible(&dev->struct_mutex);
133         if (ret)
134                 return ret;
135
136         return 0;
137 }
138
139 int
140 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
141                             struct drm_file *file)
142 {
143         struct drm_i915_private *dev_priv = to_i915(dev);
144         struct i915_ggtt *ggtt = &dev_priv->ggtt;
145         struct drm_i915_gem_get_aperture *args = data;
146         struct i915_vma *vma;
147         u64 pinned;
148
149         pinned = ggtt->base.reserved;
150         mutex_lock(&dev->struct_mutex);
151         list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
152                 if (i915_vma_is_pinned(vma))
153                         pinned += vma->node.size;
154         list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
155                 if (i915_vma_is_pinned(vma))
156                         pinned += vma->node.size;
157         mutex_unlock(&dev->struct_mutex);
158
159         args->aper_size = ggtt->base.total;
160         args->aper_available_size = args->aper_size - pinned;
161
162         return 0;
163 }
164
165 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
166 {
167         struct address_space *mapping = obj->base.filp->f_mapping;
168         drm_dma_handle_t *phys;
169         struct sg_table *st;
170         struct scatterlist *sg;
171         char *vaddr;
172         int i;
173         int err;
174
175         if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
176                 return -EINVAL;
177
178         /* Always aligning to the object size, allows a single allocation
179          * to handle all possible callers, and given typical object sizes,
180          * the alignment of the buddy allocation will naturally match.
181          */
182         phys = drm_pci_alloc(obj->base.dev,
183                              roundup_pow_of_two(obj->base.size),
184                              roundup_pow_of_two(obj->base.size));
185         if (!phys)
186                 return -ENOMEM;
187
188         vaddr = phys->vaddr;
189         for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
190                 struct page *page;
191                 char *src;
192
193                 page = shmem_read_mapping_page(mapping, i);
194                 if (IS_ERR(page)) {
195                         err = PTR_ERR(page);
196                         goto err_phys;
197                 }
198
199                 src = kmap_atomic(page);
200                 memcpy(vaddr, src, PAGE_SIZE);
201                 drm_clflush_virt_range(vaddr, PAGE_SIZE);
202                 kunmap_atomic(src);
203
204                 put_page(page);
205                 vaddr += PAGE_SIZE;
206         }
207
208         i915_gem_chipset_flush(to_i915(obj->base.dev));
209
210         st = kmalloc(sizeof(*st), GFP_KERNEL);
211         if (!st) {
212                 err = -ENOMEM;
213                 goto err_phys;
214         }
215
216         if (sg_alloc_table(st, 1, GFP_KERNEL)) {
217                 kfree(st);
218                 err = -ENOMEM;
219                 goto err_phys;
220         }
221
222         sg = st->sgl;
223         sg->offset = 0;
224         sg->length = obj->base.size;
225
226         sg_dma_address(sg) = phys->busaddr;
227         sg_dma_len(sg) = obj->base.size;
228
229         obj->phys_handle = phys;
230
231         __i915_gem_object_set_pages(obj, st, sg->length);
232
233         return 0;
234
235 err_phys:
236         drm_pci_free(obj->base.dev, phys);
237
238         return err;
239 }
240
241 static void __start_cpu_write(struct drm_i915_gem_object *obj)
242 {
243         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
244         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
245         if (cpu_write_needs_clflush(obj))
246                 obj->cache_dirty = true;
247 }
248
249 static void
250 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
251                                 struct sg_table *pages,
252                                 bool needs_clflush)
253 {
254         GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
255
256         if (obj->mm.madv == I915_MADV_DONTNEED)
257                 obj->mm.dirty = false;
258
259         if (needs_clflush &&
260             (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
261             !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
262                 drm_clflush_sg(pages);
263
264         __start_cpu_write(obj);
265 }
266
267 static void
268 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
269                                struct sg_table *pages)
270 {
271         __i915_gem_object_release_shmem(obj, pages, false);
272
273         if (obj->mm.dirty) {
274                 struct address_space *mapping = obj->base.filp->f_mapping;
275                 char *vaddr = obj->phys_handle->vaddr;
276                 int i;
277
278                 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
279                         struct page *page;
280                         char *dst;
281
282                         page = shmem_read_mapping_page(mapping, i);
283                         if (IS_ERR(page))
284                                 continue;
285
286                         dst = kmap_atomic(page);
287                         drm_clflush_virt_range(vaddr, PAGE_SIZE);
288                         memcpy(dst, vaddr, PAGE_SIZE);
289                         kunmap_atomic(dst);
290
291                         set_page_dirty(page);
292                         if (obj->mm.madv == I915_MADV_WILLNEED)
293                                 mark_page_accessed(page);
294                         put_page(page);
295                         vaddr += PAGE_SIZE;
296                 }
297                 obj->mm.dirty = false;
298         }
299
300         sg_free_table(pages);
301         kfree(pages);
302
303         drm_pci_free(obj->base.dev, obj->phys_handle);
304 }
305
306 static void
307 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
308 {
309         i915_gem_object_unpin_pages(obj);
310 }
311
312 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
313         .get_pages = i915_gem_object_get_pages_phys,
314         .put_pages = i915_gem_object_put_pages_phys,
315         .release = i915_gem_object_release_phys,
316 };
317
318 static const struct drm_i915_gem_object_ops i915_gem_object_ops;
319
320 int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
321 {
322         struct i915_vma *vma;
323         LIST_HEAD(still_in_list);
324         int ret;
325
326         lockdep_assert_held(&obj->base.dev->struct_mutex);
327
328         /* Closed vma are removed from the obj->vma_list - but they may
329          * still have an active binding on the object. To remove those we
330          * must wait for all rendering to complete to the object (as unbinding
331          * must anyway), and retire the requests.
332          */
333         ret = i915_gem_object_set_to_cpu_domain(obj, false);
334         if (ret)
335                 return ret;
336
337         while ((vma = list_first_entry_or_null(&obj->vma_list,
338                                                struct i915_vma,
339                                                obj_link))) {
340                 list_move_tail(&vma->obj_link, &still_in_list);
341                 ret = i915_vma_unbind(vma);
342                 if (ret)
343                         break;
344         }
345         list_splice(&still_in_list, &obj->vma_list);
346
347         return ret;
348 }
349
350 static long
351 i915_gem_object_wait_fence(struct dma_fence *fence,
352                            unsigned int flags,
353                            long timeout,
354                            struct intel_rps_client *rps_client)
355 {
356         struct drm_i915_gem_request *rq;
357
358         BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
359
360         if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
361                 return timeout;
362
363         if (!dma_fence_is_i915(fence))
364                 return dma_fence_wait_timeout(fence,
365                                               flags & I915_WAIT_INTERRUPTIBLE,
366                                               timeout);
367
368         rq = to_request(fence);
369         if (i915_gem_request_completed(rq))
370                 goto out;
371
372         /* This client is about to stall waiting for the GPU. In many cases
373          * this is undesirable and limits the throughput of the system, as
374          * many clients cannot continue processing user input/output whilst
375          * blocked. RPS autotuning may take tens of milliseconds to respond
376          * to the GPU load and thus incurs additional latency for the client.
377          * We can circumvent that by promoting the GPU frequency to maximum
378          * before we wait. This makes the GPU throttle up much more quickly
379          * (good for benchmarks and user experience, e.g. window animations),
380          * but at a cost of spending more power processing the workload
381          * (bad for battery). Not all clients even want their results
382          * immediately and for them we should just let the GPU select its own
383          * frequency to maximise efficiency. To prevent a single client from
384          * forcing the clocks too high for the whole system, we only allow
385          * each client to waitboost once in a busy period.
386          */
387         if (rps_client) {
388                 if (INTEL_GEN(rq->i915) >= 6)
389                         gen6_rps_boost(rq, rps_client);
390                 else
391                         rps_client = NULL;
392         }
393
394         timeout = i915_wait_request(rq, flags, timeout);
395
396 out:
397         if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
398                 i915_gem_request_retire_upto(rq);
399
400         return timeout;
401 }
402
403 static long
404 i915_gem_object_wait_reservation(struct reservation_object *resv,
405                                  unsigned int flags,
406                                  long timeout,
407                                  struct intel_rps_client *rps_client)
408 {
409         unsigned int seq = __read_seqcount_begin(&resv->seq);
410         struct dma_fence *excl;
411         bool prune_fences = false;
412
413         if (flags & I915_WAIT_ALL) {
414                 struct dma_fence **shared;
415                 unsigned int count, i;
416                 int ret;
417
418                 ret = reservation_object_get_fences_rcu(resv,
419                                                         &excl, &count, &shared);
420                 if (ret)
421                         return ret;
422
423                 for (i = 0; i < count; i++) {
424                         timeout = i915_gem_object_wait_fence(shared[i],
425                                                              flags, timeout,
426                                                              rps_client);
427                         if (timeout < 0)
428                                 break;
429
430                         dma_fence_put(shared[i]);
431                 }
432
433                 for (; i < count; i++)
434                         dma_fence_put(shared[i]);
435                 kfree(shared);
436
437                 prune_fences = count && timeout >= 0;
438         } else {
439                 excl = reservation_object_get_excl_rcu(resv);
440         }
441
442         if (excl && timeout >= 0) {
443                 timeout = i915_gem_object_wait_fence(excl, flags, timeout,
444                                                      rps_client);
445                 prune_fences = timeout >= 0;
446         }
447
448         dma_fence_put(excl);
449
450         /* Oportunistically prune the fences iff we know they have *all* been
451          * signaled and that the reservation object has not been changed (i.e.
452          * no new fences have been added).
453          */
454         if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
455                 if (reservation_object_trylock(resv)) {
456                         if (!__read_seqcount_retry(&resv->seq, seq))
457                                 reservation_object_add_excl_fence(resv, NULL);
458                         reservation_object_unlock(resv);
459                 }
460         }
461
462         return timeout;
463 }
464
465 static void __fence_set_priority(struct dma_fence *fence, int prio)
466 {
467         struct drm_i915_gem_request *rq;
468         struct intel_engine_cs *engine;
469
470         if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
471                 return;
472
473         rq = to_request(fence);
474         engine = rq->engine;
475         if (!engine->schedule)
476                 return;
477
478         engine->schedule(rq, prio);
479 }
480
481 static void fence_set_priority(struct dma_fence *fence, int prio)
482 {
483         /* Recurse once into a fence-array */
484         if (dma_fence_is_array(fence)) {
485                 struct dma_fence_array *array = to_dma_fence_array(fence);
486                 int i;
487
488                 for (i = 0; i < array->num_fences; i++)
489                         __fence_set_priority(array->fences[i], prio);
490         } else {
491                 __fence_set_priority(fence, prio);
492         }
493 }
494
495 int
496 i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
497                               unsigned int flags,
498                               int prio)
499 {
500         struct dma_fence *excl;
501
502         if (flags & I915_WAIT_ALL) {
503                 struct dma_fence **shared;
504                 unsigned int count, i;
505                 int ret;
506
507                 ret = reservation_object_get_fences_rcu(obj->resv,
508                                                         &excl, &count, &shared);
509                 if (ret)
510                         return ret;
511
512                 for (i = 0; i < count; i++) {
513                         fence_set_priority(shared[i], prio);
514                         dma_fence_put(shared[i]);
515                 }
516
517                 kfree(shared);
518         } else {
519                 excl = reservation_object_get_excl_rcu(obj->resv);
520         }
521
522         if (excl) {
523                 fence_set_priority(excl, prio);
524                 dma_fence_put(excl);
525         }
526         return 0;
527 }
528
529 /**
530  * Waits for rendering to the object to be completed
531  * @obj: i915 gem object
532  * @flags: how to wait (under a lock, for all rendering or just for writes etc)
533  * @timeout: how long to wait
534  * @rps_client: client (user process) to charge for any waitboosting
535  */
536 int
537 i915_gem_object_wait(struct drm_i915_gem_object *obj,
538                      unsigned int flags,
539                      long timeout,
540                      struct intel_rps_client *rps_client)
541 {
542         might_sleep();
543 #if IS_ENABLED(CONFIG_LOCKDEP)
544         GEM_BUG_ON(debug_locks &&
545                    !!lockdep_is_held(&obj->base.dev->struct_mutex) !=
546                    !!(flags & I915_WAIT_LOCKED));
547 #endif
548         GEM_BUG_ON(timeout < 0);
549
550         timeout = i915_gem_object_wait_reservation(obj->resv,
551                                                    flags, timeout,
552                                                    rps_client);
553         return timeout < 0 ? timeout : 0;
554 }
555
556 static struct intel_rps_client *to_rps_client(struct drm_file *file)
557 {
558         struct drm_i915_file_private *fpriv = file->driver_priv;
559
560         return &fpriv->rps_client;
561 }
562
563 static int
564 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
565                      struct drm_i915_gem_pwrite *args,
566                      struct drm_file *file)
567 {
568         void *vaddr = obj->phys_handle->vaddr + args->offset;
569         char __user *user_data = u64_to_user_ptr(args->data_ptr);
570
571         /* We manually control the domain here and pretend that it
572          * remains coherent i.e. in the GTT domain, like shmem_pwrite.
573          */
574         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
575         if (copy_from_user(vaddr, user_data, args->size))
576                 return -EFAULT;
577
578         drm_clflush_virt_range(vaddr, args->size);
579         i915_gem_chipset_flush(to_i915(obj->base.dev));
580
581         intel_fb_obj_flush(obj, ORIGIN_CPU);
582         return 0;
583 }
584
585 void *i915_gem_object_alloc(struct drm_i915_private *dev_priv)
586 {
587         return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
588 }
589
590 void i915_gem_object_free(struct drm_i915_gem_object *obj)
591 {
592         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
593         kmem_cache_free(dev_priv->objects, obj);
594 }
595
596 static int
597 i915_gem_create(struct drm_file *file,
598                 struct drm_i915_private *dev_priv,
599                 uint64_t size,
600                 uint32_t *handle_p)
601 {
602         struct drm_i915_gem_object *obj;
603         int ret;
604         u32 handle;
605
606         size = roundup(size, PAGE_SIZE);
607         if (size == 0)
608                 return -EINVAL;
609
610         /* Allocate the new object */
611         obj = i915_gem_object_create(dev_priv, size);
612         if (IS_ERR(obj))
613                 return PTR_ERR(obj);
614
615         ret = drm_gem_handle_create(file, &obj->base, &handle);
616         /* drop reference from allocate - handle holds it now */
617         i915_gem_object_put(obj);
618         if (ret)
619                 return ret;
620
621         *handle_p = handle;
622         return 0;
623 }
624
625 int
626 i915_gem_dumb_create(struct drm_file *file,
627                      struct drm_device *dev,
628                      struct drm_mode_create_dumb *args)
629 {
630         /* have to work out size/pitch and return them */
631         args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
632         args->size = args->pitch * args->height;
633         return i915_gem_create(file, to_i915(dev),
634                                args->size, &args->handle);
635 }
636
637 static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
638 {
639         return !(obj->cache_level == I915_CACHE_NONE ||
640                  obj->cache_level == I915_CACHE_WT);
641 }
642
643 /**
644  * Creates a new mm object and returns a handle to it.
645  * @dev: drm device pointer
646  * @data: ioctl data blob
647  * @file: drm file pointer
648  */
649 int
650 i915_gem_create_ioctl(struct drm_device *dev, void *data,
651                       struct drm_file *file)
652 {
653         struct drm_i915_private *dev_priv = to_i915(dev);
654         struct drm_i915_gem_create *args = data;
655
656         i915_gem_flush_free_objects(dev_priv);
657
658         return i915_gem_create(file, dev_priv,
659                                args->size, &args->handle);
660 }
661
662 static inline enum fb_op_origin
663 fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
664 {
665         return (domain == I915_GEM_DOMAIN_GTT ?
666                 obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
667 }
668
669 void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
670 {
671         /*
672          * No actual flushing is required for the GTT write domain for reads
673          * from the GTT domain. Writes to it "immediately" go to main memory
674          * as far as we know, so there's no chipset flush. It also doesn't
675          * land in the GPU render cache.
676          *
677          * However, we do have to enforce the order so that all writes through
678          * the GTT land before any writes to the device, such as updates to
679          * the GATT itself.
680          *
681          * We also have to wait a bit for the writes to land from the GTT.
682          * An uncached read (i.e. mmio) seems to be ideal for the round-trip
683          * timing. This issue has only been observed when switching quickly
684          * between GTT writes and CPU reads from inside the kernel on recent hw,
685          * and it appears to only affect discrete GTT blocks (i.e. on LLC
686          * system agents we cannot reproduce this behaviour, until Cannonlake
687          * that was!).
688          */
689
690         wmb();
691
692         intel_runtime_pm_get(dev_priv);
693         spin_lock_irq(&dev_priv->uncore.lock);
694
695         POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));
696
697         spin_unlock_irq(&dev_priv->uncore.lock);
698         intel_runtime_pm_put(dev_priv);
699 }
700
701 static void
702 flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
703 {
704         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
705         struct i915_vma *vma;
706
707         if (!(obj->base.write_domain & flush_domains))
708                 return;
709
710         switch (obj->base.write_domain) {
711         case I915_GEM_DOMAIN_GTT:
712                 i915_gem_flush_ggtt_writes(dev_priv);
713
714                 intel_fb_obj_flush(obj,
715                                    fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
716
717                 for_each_ggtt_vma(vma, obj) {
718                         if (vma->iomap)
719                                 continue;
720
721                         i915_vma_unset_ggtt_write(vma);
722                 }
723                 break;
724
725         case I915_GEM_DOMAIN_CPU:
726                 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
727                 break;
728
729         case I915_GEM_DOMAIN_RENDER:
730                 if (gpu_write_needs_clflush(obj))
731                         obj->cache_dirty = true;
732                 break;
733         }
734
735         obj->base.write_domain = 0;
736 }
737
738 static inline int
739 __copy_to_user_swizzled(char __user *cpu_vaddr,
740                         const char *gpu_vaddr, int gpu_offset,
741                         int length)
742 {
743         int ret, cpu_offset = 0;
744
745         while (length > 0) {
746                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
747                 int this_length = min(cacheline_end - gpu_offset, length);
748                 int swizzled_gpu_offset = gpu_offset ^ 64;
749
750                 ret = __copy_to_user(cpu_vaddr + cpu_offset,
751                                      gpu_vaddr + swizzled_gpu_offset,
752                                      this_length);
753                 if (ret)
754                         return ret + length;
755
756                 cpu_offset += this_length;
757                 gpu_offset += this_length;
758                 length -= this_length;
759         }
760
761         return 0;
762 }
763
764 static inline int
765 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
766                           const char __user *cpu_vaddr,
767                           int length)
768 {
769         int ret, cpu_offset = 0;
770
771         while (length > 0) {
772                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
773                 int this_length = min(cacheline_end - gpu_offset, length);
774                 int swizzled_gpu_offset = gpu_offset ^ 64;
775
776                 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
777                                        cpu_vaddr + cpu_offset,
778                                        this_length);
779                 if (ret)
780                         return ret + length;
781
782                 cpu_offset += this_length;
783                 gpu_offset += this_length;
784                 length -= this_length;
785         }
786
787         return 0;
788 }
789
790 /*
791  * Pins the specified object's pages and synchronizes the object with
792  * GPU accesses. Sets needs_clflush to non-zero if the caller should
793  * flush the object from the CPU cache.
794  */
795 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
796                                     unsigned int *needs_clflush)
797 {
798         int ret;
799
800         lockdep_assert_held(&obj->base.dev->struct_mutex);
801
802         *needs_clflush = 0;
803         if (!i915_gem_object_has_struct_page(obj))
804                 return -ENODEV;
805
806         ret = i915_gem_object_wait(obj,
807                                    I915_WAIT_INTERRUPTIBLE |
808                                    I915_WAIT_LOCKED,
809                                    MAX_SCHEDULE_TIMEOUT,
810                                    NULL);
811         if (ret)
812                 return ret;
813
814         ret = i915_gem_object_pin_pages(obj);
815         if (ret)
816                 return ret;
817
818         if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
819             !static_cpu_has(X86_FEATURE_CLFLUSH)) {
820                 ret = i915_gem_object_set_to_cpu_domain(obj, false);
821                 if (ret)
822                         goto err_unpin;
823                 else
824                         goto out;
825         }
826
827         flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
828
829         /* If we're not in the cpu read domain, set ourself into the gtt
830          * read domain and manually flush cachelines (if required). This
831          * optimizes for the case when the gpu will dirty the data
832          * anyway again before the next pread happens.
833          */
834         if (!obj->cache_dirty &&
835             !(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
836                 *needs_clflush = CLFLUSH_BEFORE;
837
838 out:
839         /* return with the pages pinned */
840         return 0;
841
842 err_unpin:
843         i915_gem_object_unpin_pages(obj);
844         return ret;
845 }
846
847 int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
848                                      unsigned int *needs_clflush)
849 {
850         int ret;
851
852         lockdep_assert_held(&obj->base.dev->struct_mutex);
853
854         *needs_clflush = 0;
855         if (!i915_gem_object_has_struct_page(obj))
856                 return -ENODEV;
857
858         ret = i915_gem_object_wait(obj,
859                                    I915_WAIT_INTERRUPTIBLE |
860                                    I915_WAIT_LOCKED |
861                                    I915_WAIT_ALL,
862                                    MAX_SCHEDULE_TIMEOUT,
863                                    NULL);
864         if (ret)
865                 return ret;
866
867         ret = i915_gem_object_pin_pages(obj);
868         if (ret)
869                 return ret;
870
871         if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
872             !static_cpu_has(X86_FEATURE_CLFLUSH)) {
873                 ret = i915_gem_object_set_to_cpu_domain(obj, true);
874                 if (ret)
875                         goto err_unpin;
876                 else
877                         goto out;
878         }
879
880         flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
881
882         /* If we're not in the cpu write domain, set ourself into the
883          * gtt write domain and manually flush cachelines (as required).
884          * This optimizes for the case when the gpu will use the data
885          * right away and we therefore have to clflush anyway.
886          */
887         if (!obj->cache_dirty) {
888                 *needs_clflush |= CLFLUSH_AFTER;
889
890                 /*
891                  * Same trick applies to invalidate partially written
892                  * cachelines read before writing.
893                  */
894                 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
895                         *needs_clflush |= CLFLUSH_BEFORE;
896         }
897
898 out:
899         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
900         obj->mm.dirty = true;
901         /* return with the pages pinned */
902         return 0;
903
904 err_unpin:
905         i915_gem_object_unpin_pages(obj);
906         return ret;
907 }
908
909 static void
910 shmem_clflush_swizzled_range(char *addr, unsigned long length,
911                              bool swizzled)
912 {
913         if (unlikely(swizzled)) {
914                 unsigned long start = (unsigned long) addr;
915                 unsigned long end = (unsigned long) addr + length;
916
917                 /* For swizzling simply ensure that we always flush both
918                  * channels. Lame, but simple and it works. Swizzled
919                  * pwrite/pread is far from a hotpath - current userspace
920                  * doesn't use it at all. */
921                 start = round_down(start, 128);
922                 end = round_up(end, 128);
923
924                 drm_clflush_virt_range((void *)start, end - start);
925         } else {
926                 drm_clflush_virt_range(addr, length);
927         }
928
929 }
930
931 /* Only difference to the fast-path function is that this can handle bit17
932  * and uses non-atomic copy and kmap functions. */
933 static int
934 shmem_pread_slow(struct page *page, int offset, int length,
935                  char __user *user_data,
936                  bool page_do_bit17_swizzling, bool needs_clflush)
937 {
938         char *vaddr;
939         int ret;
940
941         vaddr = kmap(page);
942         if (needs_clflush)
943                 shmem_clflush_swizzled_range(vaddr + offset, length,
944                                              page_do_bit17_swizzling);
945
946         if (page_do_bit17_swizzling)
947                 ret = __copy_to_user_swizzled(user_data, vaddr, offset, length);
948         else
949                 ret = __copy_to_user(user_data, vaddr + offset, length);
950         kunmap(page);
951
952         return ret ? - EFAULT : 0;
953 }
954
955 static int
956 shmem_pread(struct page *page, int offset, int length, char __user *user_data,
957             bool page_do_bit17_swizzling, bool needs_clflush)
958 {
959         int ret;
960
961         ret = -ENODEV;
962         if (!page_do_bit17_swizzling) {
963                 char *vaddr = kmap_atomic(page);
964
965                 if (needs_clflush)
966                         drm_clflush_virt_range(vaddr + offset, length);
967                 ret = __copy_to_user_inatomic(user_data, vaddr + offset, length);
968                 kunmap_atomic(vaddr);
969         }
970         if (ret == 0)
971                 return 0;
972
973         return shmem_pread_slow(page, offset, length, user_data,
974                                 page_do_bit17_swizzling, needs_clflush);
975 }
976
977 static int
978 i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
979                      struct drm_i915_gem_pread *args)
980 {
981         char __user *user_data;
982         u64 remain;
983         unsigned int obj_do_bit17_swizzling;
984         unsigned int needs_clflush;
985         unsigned int idx, offset;
986         int ret;
987
988         obj_do_bit17_swizzling = 0;
989         if (i915_gem_object_needs_bit17_swizzle(obj))
990                 obj_do_bit17_swizzling = BIT(17);
991
992         ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
993         if (ret)
994                 return ret;
995
996         ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
997         mutex_unlock(&obj->base.dev->struct_mutex);
998         if (ret)
999                 return ret;
1000
1001         remain = args->size;
1002         user_data = u64_to_user_ptr(args->data_ptr);
1003         offset = offset_in_page(args->offset);
1004         for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
1005                 struct page *page = i915_gem_object_get_page(obj, idx);
1006                 int length;
1007
1008                 length = remain;
1009                 if (offset + length > PAGE_SIZE)
1010                         length = PAGE_SIZE - offset;
1011
1012                 ret = shmem_pread(page, offset, length, user_data,
1013                                   page_to_phys(page) & obj_do_bit17_swizzling,
1014                                   needs_clflush);
1015                 if (ret)
1016                         break;
1017
1018                 remain -= length;
1019                 user_data += length;
1020                 offset = 0;
1021         }
1022
1023         i915_gem_obj_finish_shmem_access(obj);
1024         return ret;
1025 }
1026
1027 static inline bool
1028 gtt_user_read(struct io_mapping *mapping,
1029               loff_t base, int offset,
1030               char __user *user_data, int length)
1031 {
1032         void __iomem *vaddr;
1033         unsigned long unwritten;
1034
1035         /* We can use the cpu mem copy function because this is X86. */
1036         vaddr = io_mapping_map_atomic_wc(mapping, base);
1037         unwritten = __copy_to_user_inatomic(user_data,
1038                                             (void __force *)vaddr + offset,
1039                                             length);
1040         io_mapping_unmap_atomic(vaddr);
1041         if (unwritten) {
1042                 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
1043                 unwritten = copy_to_user(user_data,
1044                                          (void __force *)vaddr + offset,
1045                                          length);
1046                 io_mapping_unmap(vaddr);
1047         }
1048         return unwritten;
1049 }
1050
1051 static int
1052 i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
1053                    const struct drm_i915_gem_pread *args)
1054 {
1055         struct drm_i915_private *i915 = to_i915(obj->base.dev);
1056         struct i915_ggtt *ggtt = &i915->ggtt;
1057         struct drm_mm_node node;
1058         struct i915_vma *vma;
1059         void __user *user_data;
1060         u64 remain, offset;
1061         int ret;
1062
1063         ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1064         if (ret)
1065                 return ret;
1066
1067         intel_runtime_pm_get(i915);
1068         vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1069                                        PIN_MAPPABLE |
1070                                        PIN_NONFAULT |
1071                                        PIN_NONBLOCK);
1072         if (!IS_ERR(vma)) {
1073                 node.start = i915_ggtt_offset(vma);
1074                 node.allocated = false;
1075                 ret = i915_vma_put_fence(vma);
1076                 if (ret) {
1077                         i915_vma_unpin(vma);
1078                         vma = ERR_PTR(ret);
1079                 }
1080         }
1081         if (IS_ERR(vma)) {
1082                 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1083                 if (ret)
1084                         goto out_unlock;
1085                 GEM_BUG_ON(!node.allocated);
1086         }
1087
1088         ret = i915_gem_object_set_to_gtt_domain(obj, false);
1089         if (ret)
1090                 goto out_unpin;
1091
1092         mutex_unlock(&i915->drm.struct_mutex);
1093
1094         user_data = u64_to_user_ptr(args->data_ptr);
1095         remain = args->size;
1096         offset = args->offset;
1097
1098         while (remain > 0) {
1099                 /* Operation in this page
1100                  *
1101                  * page_base = page offset within aperture
1102                  * page_offset = offset within page
1103                  * page_length = bytes to copy for this page
1104                  */
1105                 u32 page_base = node.start;
1106                 unsigned page_offset = offset_in_page(offset);
1107                 unsigned page_length = PAGE_SIZE - page_offset;
1108                 page_length = remain < page_length ? remain : page_length;
1109                 if (node.allocated) {
1110                         wmb();
1111                         ggtt->base.insert_page(&ggtt->base,
1112                                                i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1113                                                node.start, I915_CACHE_NONE, 0);
1114                         wmb();
1115                 } else {
1116                         page_base += offset & PAGE_MASK;
1117                 }
1118
1119                 if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
1120                                   user_data, page_length)) {
1121                         ret = -EFAULT;
1122                         break;
1123                 }
1124
1125                 remain -= page_length;
1126                 user_data += page_length;
1127                 offset += page_length;
1128         }
1129
1130         mutex_lock(&i915->drm.struct_mutex);
1131 out_unpin:
1132         if (node.allocated) {
1133                 wmb();
1134                 ggtt->base.clear_range(&ggtt->base,
1135                                        node.start, node.size);
1136                 remove_mappable_node(&node);
1137         } else {
1138                 i915_vma_unpin(vma);
1139         }
1140 out_unlock:
1141         intel_runtime_pm_put(i915);
1142         mutex_unlock(&i915->drm.struct_mutex);
1143
1144         return ret;
1145 }
1146
1147 /**
1148  * Reads data from the object referenced by handle.
1149  * @dev: drm device pointer
1150  * @data: ioctl data blob
1151  * @file: drm file pointer
1152  *
1153  * On error, the contents of *data are undefined.
1154  */
1155 int
1156 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1157                      struct drm_file *file)
1158 {
1159         struct drm_i915_gem_pread *args = data;
1160         struct drm_i915_gem_object *obj;
1161         int ret;
1162
1163         if (args->size == 0)
1164                 return 0;
1165
1166         if (!access_ok(VERIFY_WRITE,
1167                        u64_to_user_ptr(args->data_ptr),
1168                        args->size))
1169                 return -EFAULT;
1170
1171         obj = i915_gem_object_lookup(file, args->handle);
1172         if (!obj)
1173                 return -ENOENT;
1174
1175         /* Bounds check source.  */
1176         if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
1177                 ret = -EINVAL;
1178                 goto out;
1179         }
1180
1181         trace_i915_gem_object_pread(obj, args->offset, args->size);
1182
1183         ret = i915_gem_object_wait(obj,
1184                                    I915_WAIT_INTERRUPTIBLE,
1185                                    MAX_SCHEDULE_TIMEOUT,
1186                                    to_rps_client(file));
1187         if (ret)
1188                 goto out;
1189
1190         ret = i915_gem_object_pin_pages(obj);
1191         if (ret)
1192                 goto out;
1193
1194         ret = i915_gem_shmem_pread(obj, args);
1195         if (ret == -EFAULT || ret == -ENODEV)
1196                 ret = i915_gem_gtt_pread(obj, args);
1197
1198         i915_gem_object_unpin_pages(obj);
1199 out:
1200         i915_gem_object_put(obj);
1201         return ret;
1202 }
1203
1204 /* This is the fast write path which cannot handle
1205  * page faults in the source data
1206  */
1207
1208 static inline bool
1209 ggtt_write(struct io_mapping *mapping,
1210            loff_t base, int offset,
1211            char __user *user_data, int length)
1212 {
1213         void __iomem *vaddr;
1214         unsigned long unwritten;
1215
1216         /* We can use the cpu mem copy function because this is X86. */
1217         vaddr = io_mapping_map_atomic_wc(mapping, base);
1218         unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
1219                                                       user_data, length);
1220         io_mapping_unmap_atomic(vaddr);
1221         if (unwritten) {
1222                 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
1223                 unwritten = copy_from_user((void __force *)vaddr + offset,
1224                                            user_data, length);
1225                 io_mapping_unmap(vaddr);
1226         }
1227
1228         return unwritten;
1229 }
1230
1231 /**
1232  * This is the fast pwrite path, where we copy the data directly from the
1233  * user into the GTT, uncached.
1234  * @obj: i915 GEM object
1235  * @args: pwrite arguments structure
1236  */
1237 static int
1238 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
1239                          const struct drm_i915_gem_pwrite *args)
1240 {
1241         struct drm_i915_private *i915 = to_i915(obj->base.dev);
1242         struct i915_ggtt *ggtt = &i915->ggtt;
1243         struct drm_mm_node node;
1244         struct i915_vma *vma;
1245         u64 remain, offset;
1246         void __user *user_data;
1247         int ret;
1248
1249         ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1250         if (ret)
1251                 return ret;
1252
1253         if (i915_gem_object_has_struct_page(obj)) {
1254                 /*
1255                  * Avoid waking the device up if we can fallback, as
1256                  * waking/resuming is very slow (worst-case 10-100 ms
1257                  * depending on PCI sleeps and our own resume time).
1258                  * This easily dwarfs any performance advantage from
1259                  * using the cache bypass of indirect GGTT access.
1260                  */
1261                 if (!intel_runtime_pm_get_if_in_use(i915)) {
1262                         ret = -EFAULT;
1263                         goto out_unlock;
1264                 }
1265         } else {
1266                 /* No backing pages, no fallback, we must force GGTT access */
1267                 intel_runtime_pm_get(i915);
1268         }
1269
1270         vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1271                                        PIN_MAPPABLE |
1272                                        PIN_NONFAULT |
1273                                        PIN_NONBLOCK);
1274         if (!IS_ERR(vma)) {
1275                 node.start = i915_ggtt_offset(vma);
1276                 node.allocated = false;
1277                 ret = i915_vma_put_fence(vma);
1278                 if (ret) {
1279                         i915_vma_unpin(vma);
1280                         vma = ERR_PTR(ret);
1281                 }
1282         }
1283         if (IS_ERR(vma)) {
1284                 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1285                 if (ret)
1286                         goto out_rpm;
1287                 GEM_BUG_ON(!node.allocated);
1288         }
1289
1290         ret = i915_gem_object_set_to_gtt_domain(obj, true);
1291         if (ret)
1292                 goto out_unpin;
1293
1294         mutex_unlock(&i915->drm.struct_mutex);
1295
1296         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
1297
1298         user_data = u64_to_user_ptr(args->data_ptr);
1299         offset = args->offset;
1300         remain = args->size;
1301         while (remain) {
1302                 /* Operation in this page
1303                  *
1304                  * page_base = page offset within aperture
1305                  * page_offset = offset within page
1306                  * page_length = bytes to copy for this page
1307                  */
1308                 u32 page_base = node.start;
1309                 unsigned int page_offset = offset_in_page(offset);
1310                 unsigned int page_length = PAGE_SIZE - page_offset;
1311                 page_length = remain < page_length ? remain : page_length;
1312                 if (node.allocated) {
1313                         wmb(); /* flush the write before we modify the GGTT */
1314                         ggtt->base.insert_page(&ggtt->base,
1315                                                i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1316                                                node.start, I915_CACHE_NONE, 0);
1317                         wmb(); /* flush modifications to the GGTT (insert_page) */
1318                 } else {
1319                         page_base += offset & PAGE_MASK;
1320                 }
1321                 /* If we get a fault while copying data, then (presumably) our
1322                  * source page isn't available.  Return the error and we'll
1323                  * retry in the slow path.
1324                  * If the object is non-shmem backed, we retry again with the
1325                  * path that handles page fault.
1326                  */
1327                 if (ggtt_write(&ggtt->iomap, page_base, page_offset,
1328                                user_data, page_length)) {
1329                         ret = -EFAULT;
1330                         break;
1331                 }
1332
1333                 remain -= page_length;
1334                 user_data += page_length;
1335                 offset += page_length;
1336         }
1337         intel_fb_obj_flush(obj, ORIGIN_CPU);
1338
1339         mutex_lock(&i915->drm.struct_mutex);
1340 out_unpin:
1341         if (node.allocated) {
1342                 wmb();
1343                 ggtt->base.clear_range(&ggtt->base,
1344                                        node.start, node.size);
1345                 remove_mappable_node(&node);
1346         } else {
1347                 i915_vma_unpin(vma);
1348         }
1349 out_rpm:
1350         intel_runtime_pm_put(i915);
1351 out_unlock:
1352         mutex_unlock(&i915->drm.struct_mutex);
1353         return ret;
1354 }
1355
1356 static int
1357 shmem_pwrite_slow(struct page *page, int offset, int length,
1358                   char __user *user_data,
1359                   bool page_do_bit17_swizzling,
1360                   bool needs_clflush_before,
1361                   bool needs_clflush_after)
1362 {
1363         char *vaddr;
1364         int ret;
1365
1366         vaddr = kmap(page);
1367         if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
1368                 shmem_clflush_swizzled_range(vaddr + offset, length,
1369                                              page_do_bit17_swizzling);
1370         if (page_do_bit17_swizzling)
1371                 ret = __copy_from_user_swizzled(vaddr, offset, user_data,
1372                                                 length);
1373         else
1374                 ret = __copy_from_user(vaddr + offset, user_data, length);
1375         if (needs_clflush_after)
1376                 shmem_clflush_swizzled_range(vaddr + offset, length,
1377                                              page_do_bit17_swizzling);
1378         kunmap(page);
1379
1380         return ret ? -EFAULT : 0;
1381 }
1382
1383 /* Per-page copy function for the shmem pwrite fastpath.
1384  * Flushes invalid cachelines before writing to the target if
1385  * needs_clflush_before is set and flushes out any written cachelines after
1386  * writing if needs_clflush is set.
1387  */
1388 static int
1389 shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
1390              bool page_do_bit17_swizzling,
1391              bool needs_clflush_before,
1392              bool needs_clflush_after)
1393 {
1394         int ret;
1395
1396         ret = -ENODEV;
1397         if (!page_do_bit17_swizzling) {
1398                 char *vaddr = kmap_atomic(page);
1399
1400                 if (needs_clflush_before)
1401                         drm_clflush_virt_range(vaddr + offset, len);
1402                 ret = __copy_from_user_inatomic(vaddr + offset, user_data, len);
1403                 if (needs_clflush_after)
1404                         drm_clflush_virt_range(vaddr + offset, len);
1405
1406                 kunmap_atomic(vaddr);
1407         }
1408         if (ret == 0)
1409                 return ret;
1410
1411         return shmem_pwrite_slow(page, offset, len, user_data,
1412                                  page_do_bit17_swizzling,
1413                                  needs_clflush_before,
1414                                  needs_clflush_after);
1415 }
1416
1417 static int
1418 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
1419                       const struct drm_i915_gem_pwrite *args)
1420 {
1421         struct drm_i915_private *i915 = to_i915(obj->base.dev);
1422         void __user *user_data;
1423         u64 remain;
1424         unsigned int obj_do_bit17_swizzling;
1425         unsigned int partial_cacheline_write;
1426         unsigned int needs_clflush;
1427         unsigned int offset, idx;
1428         int ret;
1429
1430         ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1431         if (ret)
1432                 return ret;
1433
1434         ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
1435         mutex_unlock(&i915->drm.struct_mutex);
1436         if (ret)
1437                 return ret;
1438
1439         obj_do_bit17_swizzling = 0;
1440         if (i915_gem_object_needs_bit17_swizzle(obj))
1441                 obj_do_bit17_swizzling = BIT(17);
1442
1443         /* If we don't overwrite a cacheline completely we need to be
1444          * careful to have up-to-date data by first clflushing. Don't
1445          * overcomplicate things and flush the entire patch.
1446          */
1447         partial_cacheline_write = 0;
1448         if (needs_clflush & CLFLUSH_BEFORE)
1449                 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
1450
1451         user_data = u64_to_user_ptr(args->data_ptr);
1452         remain = args->size;
1453         offset = offset_in_page(args->offset);
1454         for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
1455                 struct page *page = i915_gem_object_get_page(obj, idx);
1456                 int length;
1457
1458                 length = remain;
1459                 if (offset + length > PAGE_SIZE)
1460                         length = PAGE_SIZE - offset;
1461
1462                 ret = shmem_pwrite(page, offset, length, user_data,
1463                                    page_to_phys(page) & obj_do_bit17_swizzling,
1464                                    (offset | length) & partial_cacheline_write,
1465                                    needs_clflush & CLFLUSH_AFTER);
1466                 if (ret)
1467                         break;
1468
1469                 remain -= length;
1470                 user_data += length;
1471                 offset = 0;
1472         }
1473
1474         intel_fb_obj_flush(obj, ORIGIN_CPU);
1475         i915_gem_obj_finish_shmem_access(obj);
1476         return ret;
1477 }
1478
1479 /**
1480  * Writes data to the object referenced by handle.
1481  * @dev: drm device
1482  * @data: ioctl data blob
1483  * @file: drm file
1484  *
1485  * On error, the contents of the buffer that were to be modified are undefined.
1486  */
1487 int
1488 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1489                       struct drm_file *file)
1490 {
1491         struct drm_i915_gem_pwrite *args = data;
1492         struct drm_i915_gem_object *obj;
1493         int ret;
1494
1495         if (args->size == 0)
1496                 return 0;
1497
1498         if (!access_ok(VERIFY_READ,
1499                        u64_to_user_ptr(args->data_ptr),
1500                        args->size))
1501                 return -EFAULT;
1502
1503         obj = i915_gem_object_lookup(file, args->handle);
1504         if (!obj)
1505                 return -ENOENT;
1506
1507         /* Bounds check destination. */
1508         if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
1509                 ret = -EINVAL;
1510                 goto err;
1511         }
1512
1513         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1514
1515         ret = -ENODEV;
1516         if (obj->ops->pwrite)
1517                 ret = obj->ops->pwrite(obj, args);
1518         if (ret != -ENODEV)
1519                 goto err;
1520
1521         ret = i915_gem_object_wait(obj,
1522                                    I915_WAIT_INTERRUPTIBLE |
1523                                    I915_WAIT_ALL,
1524                                    MAX_SCHEDULE_TIMEOUT,
1525                                    to_rps_client(file));
1526         if (ret)
1527                 goto err;
1528
1529         ret = i915_gem_object_pin_pages(obj);
1530         if (ret)
1531                 goto err;
1532
1533         ret = -EFAULT;
1534         /* We can only do the GTT pwrite on untiled buffers, as otherwise
1535          * it would end up going through the fenced access, and we'll get
1536          * different detiling behavior between reading and writing.
1537          * pread/pwrite currently are reading and writing from the CPU
1538          * perspective, requiring manual detiling by the client.
1539          */
1540         if (!i915_gem_object_has_struct_page(obj) ||
1541             cpu_write_needs_clflush(obj))
1542                 /* Note that the gtt paths might fail with non-page-backed user
1543                  * pointers (e.g. gtt mappings when moving data between
1544                  * textures). Fallback to the shmem path in that case.
1545                  */
1546                 ret = i915_gem_gtt_pwrite_fast(obj, args);
1547
1548         if (ret == -EFAULT || ret == -ENOSPC) {
1549                 if (obj->phys_handle)
1550                         ret = i915_gem_phys_pwrite(obj, args, file);
1551                 else
1552                         ret = i915_gem_shmem_pwrite(obj, args);
1553         }
1554
1555         i915_gem_object_unpin_pages(obj);
1556 err:
1557         i915_gem_object_put(obj);
1558         return ret;
1559 }
1560
1561 static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
1562 {
1563         struct drm_i915_private *i915;
1564         struct list_head *list;
1565         struct i915_vma *vma;
1566
1567         GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
1568
1569         for_each_ggtt_vma(vma, obj) {
1570                 if (i915_vma_is_active(vma))
1571                         continue;
1572
1573                 if (!drm_mm_node_allocated(&vma->node))
1574                         continue;
1575
1576                 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
1577         }
1578
1579         i915 = to_i915(obj->base.dev);
1580         spin_lock(&i915->mm.obj_lock);
1581         list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
1582         list_move_tail(&obj->mm.link, list);
1583         spin_unlock(&i915->mm.obj_lock);
1584 }
1585
1586 /**
1587  * Called when user space prepares to use an object with the CPU, either
1588  * through the mmap ioctl's mapping or a GTT mapping.
1589  * @dev: drm device
1590  * @data: ioctl data blob
1591  * @file: drm file
1592  */
1593 int
1594 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1595                           struct drm_file *file)
1596 {
1597         struct drm_i915_gem_set_domain *args = data;
1598         struct drm_i915_gem_object *obj;
1599         uint32_t read_domains = args->read_domains;
1600         uint32_t write_domain = args->write_domain;
1601         int err;
1602
1603         /* Only handle setting domains to types used by the CPU. */
1604         if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
1605                 return -EINVAL;
1606
1607         /* Having something in the write domain implies it's in the read
1608          * domain, and only that read domain.  Enforce that in the request.
1609          */
1610         if (write_domain != 0 && read_domains != write_domain)
1611                 return -EINVAL;
1612
1613         obj = i915_gem_object_lookup(file, args->handle);
1614         if (!obj)
1615                 return -ENOENT;
1616
1617         /* Try to flush the object off the GPU without holding the lock.
1618          * We will repeat the flush holding the lock in the normal manner
1619          * to catch cases where we are gazumped.
1620          */
1621         err = i915_gem_object_wait(obj,
1622                                    I915_WAIT_INTERRUPTIBLE |
1623                                    (write_domain ? I915_WAIT_ALL : 0),
1624                                    MAX_SCHEDULE_TIMEOUT,
1625                                    to_rps_client(file));
1626         if (err)
1627                 goto out;
1628
1629         /*
1630          * Proxy objects do not control access to the backing storage, ergo
1631          * they cannot be used as a means to manipulate the cache domain
1632          * tracking for that backing storage. The proxy object is always
1633          * considered to be outside of any cache domain.
1634          */
1635         if (i915_gem_object_is_proxy(obj)) {
1636                 err = -ENXIO;
1637                 goto out;
1638         }
1639
1640         /*
1641          * Flush and acquire obj->pages so that we are coherent through
1642          * direct access in memory with previous cached writes through
1643          * shmemfs and that our cache domain tracking remains valid.
1644          * For example, if the obj->filp was moved to swap without us
1645          * being notified and releasing the pages, we would mistakenly
1646          * continue to assume that the obj remained out of the CPU cached
1647          * domain.
1648          */
1649         err = i915_gem_object_pin_pages(obj);
1650         if (err)
1651                 goto out;
1652
1653         err = i915_mutex_lock_interruptible(dev);
1654         if (err)
1655                 goto out_unpin;
1656
1657         if (read_domains & I915_GEM_DOMAIN_WC)
1658                 err = i915_gem_object_set_to_wc_domain(obj, write_domain);
1659         else if (read_domains & I915_GEM_DOMAIN_GTT)
1660                 err = i915_gem_object_set_to_gtt_domain(obj, write_domain);
1661         else
1662                 err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
1663
1664         /* And bump the LRU for this access */
1665         i915_gem_object_bump_inactive_ggtt(obj);
1666
1667         mutex_unlock(&dev->struct_mutex);
1668
1669         if (write_domain != 0)
1670                 intel_fb_obj_invalidate(obj,
1671                                         fb_write_origin(obj, write_domain));
1672
1673 out_unpin:
1674         i915_gem_object_unpin_pages(obj);
1675 out:
1676         i915_gem_object_put(obj);
1677         return err;
1678 }
1679
1680 /**
1681  * Called when user space has done writes to this buffer
1682  * @dev: drm device
1683  * @data: ioctl data blob
1684  * @file: drm file
1685  */
1686 int
1687 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1688                          struct drm_file *file)
1689 {
1690         struct drm_i915_gem_sw_finish *args = data;
1691         struct drm_i915_gem_object *obj;
1692
1693         obj = i915_gem_object_lookup(file, args->handle);
1694         if (!obj)
1695                 return -ENOENT;
1696
1697         /*
1698          * Proxy objects are barred from CPU access, so there is no
1699          * need to ban sw_finish as it is a nop.
1700          */
1701
1702         /* Pinned buffers may be scanout, so flush the cache */
1703         i915_gem_object_flush_if_display(obj);
1704         i915_gem_object_put(obj);
1705
1706         return 0;
1707 }
1708
1709 /**
1710  * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1711  *                       it is mapped to.
1712  * @dev: drm device
1713  * @data: ioctl data blob
1714  * @file: drm file
1715  *
1716  * While the mapping holds a reference on the contents of the object, it doesn't
1717  * imply a ref on the object itself.
1718  *
1719  * IMPORTANT:
1720  *
1721  * DRM driver writers who look a this function as an example for how to do GEM
1722  * mmap support, please don't implement mmap support like here. The modern way
1723  * to implement DRM mmap support is with an mmap offset ioctl (like
1724  * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1725  * That way debug tooling like valgrind will understand what's going on, hiding
1726  * the mmap call in a driver private ioctl will break that. The i915 driver only
1727  * does cpu mmaps this way because we didn't know better.
1728  */
1729 int
1730 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1731                     struct drm_file *file)
1732 {
1733         struct drm_i915_gem_mmap *args = data;
1734         struct drm_i915_gem_object *obj;
1735         unsigned long addr;
1736
1737         if (args->flags & ~(I915_MMAP_WC))
1738                 return -EINVAL;
1739
1740         if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1741                 return -ENODEV;
1742
1743         obj = i915_gem_object_lookup(file, args->handle);
1744         if (!obj)
1745                 return -ENOENT;
1746
1747         /* prime objects have no backing filp to GEM mmap
1748          * pages from.
1749          */
1750         if (!obj->base.filp) {
1751                 i915_gem_object_put(obj);
1752                 return -ENXIO;
1753         }
1754
1755         addr = vm_mmap(obj->base.filp, 0, args->size,
1756                        PROT_READ | PROT_WRITE, MAP_SHARED,
1757                        args->offset);
1758         if (args->flags & I915_MMAP_WC) {
1759                 struct mm_struct *mm = current->mm;
1760                 struct vm_area_struct *vma;
1761
1762                 if (down_write_killable(&mm->mmap_sem)) {
1763                         i915_gem_object_put(obj);
1764                         return -EINTR;
1765                 }
1766                 vma = find_vma(mm, addr);
1767                 if (vma)
1768                         vma->vm_page_prot =
1769                                 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1770                 else
1771                         addr = -ENOMEM;
1772                 up_write(&mm->mmap_sem);
1773
1774                 /* This may race, but that's ok, it only gets set */
1775                 WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
1776         }
1777         i915_gem_object_put(obj);
1778         if (IS_ERR((void *)addr))
1779                 return addr;
1780
1781         args->addr_ptr = (uint64_t) addr;
1782
1783         return 0;
1784 }
1785
1786 static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
1787 {
1788         return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
1789 }
1790
1791 /**
1792  * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
1793  *
1794  * A history of the GTT mmap interface:
1795  *
1796  * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
1797  *     aligned and suitable for fencing, and still fit into the available
1798  *     mappable space left by the pinned display objects. A classic problem
1799  *     we called the page-fault-of-doom where we would ping-pong between
1800  *     two objects that could not fit inside the GTT and so the memcpy
1801  *     would page one object in at the expense of the other between every
1802  *     single byte.
1803  *
1804  * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
1805  *     as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
1806  *     object is too large for the available space (or simply too large
1807  *     for the mappable aperture!), a view is created instead and faulted
1808  *     into userspace. (This view is aligned and sized appropriately for
1809  *     fenced access.)
1810  *
1811  * 2 - Recognise WC as a separate cache domain so that we can flush the
1812  *     delayed writes via GTT before performing direct access via WC.
1813  *
1814  * Restrictions:
1815  *
1816  *  * snoopable objects cannot be accessed via the GTT. It can cause machine
1817  *    hangs on some architectures, corruption on others. An attempt to service
1818  *    a GTT page fault from a snoopable object will generate a SIGBUS.
1819  *
1820  *  * the object must be able to fit into RAM (physical memory, though no
1821  *    limited to the mappable aperture).
1822  *
1823  *
1824  * Caveats:
1825  *
1826  *  * a new GTT page fault will synchronize rendering from the GPU and flush
1827  *    all data to system memory. Subsequent access will not be synchronized.
1828  *
1829  *  * all mappings are revoked on runtime device suspend.
1830  *
1831  *  * there are only 8, 16 or 32 fence registers to share between all users
1832  *    (older machines require fence register for display and blitter access
1833  *    as well). Contention of the fence registers will cause the previous users
1834  *    to be unmapped and any new access will generate new page faults.
1835  *
1836  *  * running out of memory while servicing a fault may generate a SIGBUS,
1837  *    rather than the expected SIGSEGV.
1838  */
1839 int i915_gem_mmap_gtt_version(void)
1840 {
1841         return 2;
1842 }
1843
1844 static inline struct i915_ggtt_view
1845 compute_partial_view(struct drm_i915_gem_object *obj,
1846                      pgoff_t page_offset,
1847                      unsigned int chunk)
1848 {
1849         struct i915_ggtt_view view;
1850
1851         if (i915_gem_object_is_tiled(obj))
1852                 chunk = roundup(chunk, tile_row_pages(obj));
1853
1854         view.type = I915_GGTT_VIEW_PARTIAL;
1855         view.partial.offset = rounddown(page_offset, chunk);
1856         view.partial.size =
1857                 min_t(unsigned int, chunk,
1858                       (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
1859
1860         /* If the partial covers the entire object, just create a normal VMA. */
1861         if (chunk >= obj->base.size >> PAGE_SHIFT)
1862                 view.type = I915_GGTT_VIEW_NORMAL;
1863
1864         return view;
1865 }
1866
1867 /**
1868  * i915_gem_fault - fault a page into the GTT
1869  * @vmf: fault info
1870  *
1871  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1872  * from userspace.  The fault handler takes care of binding the object to
1873  * the GTT (if needed), allocating and programming a fence register (again,
1874  * only if needed based on whether the old reg is still valid or the object
1875  * is tiled) and inserting a new PTE into the faulting process.
1876  *
1877  * Note that the faulting process may involve evicting existing objects
1878  * from the GTT and/or fence registers to make room.  So performance may
1879  * suffer if the GTT working set is large or there are few fence registers
1880  * left.
1881  *
1882  * The current feature set supported by i915_gem_fault() and thus GTT mmaps
1883  * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
1884  */
1885 int i915_gem_fault(struct vm_fault *vmf)
1886 {
1887 #define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
1888         struct vm_area_struct *area = vmf->vma;
1889         struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
1890         struct drm_device *dev = obj->base.dev;
1891         struct drm_i915_private *dev_priv = to_i915(dev);
1892         struct i915_ggtt *ggtt = &dev_priv->ggtt;
1893         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1894         struct i915_vma *vma;
1895         pgoff_t page_offset;
1896         unsigned int flags;
1897         int ret;
1898
1899         /* We don't use vmf->pgoff since that has the fake offset */
1900         page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
1901
1902         trace_i915_gem_object_fault(obj, page_offset, true, write);
1903
1904         /* Try to flush the object off the GPU first without holding the lock.
1905          * Upon acquiring the lock, we will perform our sanity checks and then
1906          * repeat the flush holding the lock in the normal manner to catch cases
1907          * where we are gazumped.
1908          */
1909         ret = i915_gem_object_wait(obj,
1910                                    I915_WAIT_INTERRUPTIBLE,
1911                                    MAX_SCHEDULE_TIMEOUT,
1912                                    NULL);
1913         if (ret)
1914                 goto err;
1915
1916         ret = i915_gem_object_pin_pages(obj);
1917         if (ret)
1918                 goto err;
1919
1920         intel_runtime_pm_get(dev_priv);
1921
1922         ret = i915_mutex_lock_interruptible(dev);
1923         if (ret)
1924                 goto err_rpm;
1925
1926         /* Access to snoopable pages through the GTT is incoherent. */
1927         if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
1928                 ret = -EFAULT;
1929                 goto err_unlock;
1930         }
1931
1932         /* If the object is smaller than a couple of partial vma, it is
1933          * not worth only creating a single partial vma - we may as well
1934          * clear enough space for the full object.
1935          */
1936         flags = PIN_MAPPABLE;
1937         if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
1938                 flags |= PIN_NONBLOCK | PIN_NONFAULT;
1939
1940         /* Now pin it into the GTT as needed */
1941         vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
1942         if (IS_ERR(vma)) {
1943                 /* Use a partial view if it is bigger than available space */
1944                 struct i915_ggtt_view view =
1945                         compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
1946
1947                 /* Userspace is now writing through an untracked VMA, abandon
1948                  * all hope that the hardware is able to track future writes.
1949                  */
1950                 obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
1951
1952                 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
1953         }
1954         if (IS_ERR(vma)) {
1955                 ret = PTR_ERR(vma);
1956                 goto err_unlock;
1957         }
1958
1959         ret = i915_gem_object_set_to_gtt_domain(obj, write);
1960         if (ret)
1961                 goto err_unpin;
1962
1963         ret = i915_vma_pin_fence(vma);
1964         if (ret)
1965                 goto err_unpin;
1966
1967         /* Finally, remap it using the new GTT offset */
1968         ret = remap_io_mapping(area,
1969                                area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
1970                                (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
1971                                min_t(u64, vma->size, area->vm_end - area->vm_start),
1972                                &ggtt->iomap);
1973         if (ret)
1974                 goto err_fence;
1975
1976         /* Mark as being mmapped into userspace for later revocation */
1977         assert_rpm_wakelock_held(dev_priv);
1978         if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
1979                 list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
1980         GEM_BUG_ON(!obj->userfault_count);
1981
1982         i915_vma_set_ggtt_write(vma);
1983
1984 err_fence:
1985         i915_vma_unpin_fence(vma);
1986 err_unpin:
1987         __i915_vma_unpin(vma);
1988 err_unlock:
1989         mutex_unlock(&dev->struct_mutex);
1990 err_rpm:
1991         intel_runtime_pm_put(dev_priv);
1992         i915_gem_object_unpin_pages(obj);
1993 err:
1994         switch (ret) {
1995         case -EIO:
1996                 /*
1997                  * We eat errors when the gpu is terminally wedged to avoid
1998                  * userspace unduly crashing (gl has no provisions for mmaps to
1999                  * fail). But any other -EIO isn't ours (e.g. swap in failure)
2000                  * and so needs to be reported.
2001                  */
2002                 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
2003                         ret = VM_FAULT_SIGBUS;
2004                         break;
2005                 }
2006         case -EAGAIN:
2007                 /*
2008                  * EAGAIN means the gpu is hung and we'll wait for the error
2009                  * handler to reset everything when re-faulting in
2010                  * i915_mutex_lock_interruptible.
2011                  */
2012         case 0:
2013         case -ERESTARTSYS:
2014         case -EINTR:
2015         case -EBUSY:
2016                 /*
2017                  * EBUSY is ok: this just means that another thread
2018                  * already did the job.
2019                  */
2020                 ret = VM_FAULT_NOPAGE;
2021                 break;
2022         case -ENOMEM:
2023                 ret = VM_FAULT_OOM;
2024                 break;
2025         case -ENOSPC:
2026         case -EFAULT:
2027                 ret = VM_FAULT_SIGBUS;
2028                 break;
2029         default:
2030                 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
2031                 ret = VM_FAULT_SIGBUS;
2032                 break;
2033         }
2034         return ret;
2035 }
2036
2037 static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
2038 {
2039         struct i915_vma *vma;
2040
2041         GEM_BUG_ON(!obj->userfault_count);
2042
2043         obj->userfault_count = 0;
2044         list_del(&obj->userfault_link);
2045         drm_vma_node_unmap(&obj->base.vma_node,
2046                            obj->base.dev->anon_inode->i_mapping);
2047
2048         for_each_ggtt_vma(vma, obj)
2049                 i915_vma_unset_userfault(vma);
2050 }
2051
2052 /**
2053  * i915_gem_release_mmap - remove physical page mappings
2054  * @obj: obj in question
2055  *
2056  * Preserve the reservation of the mmapping with the DRM core code, but
2057  * relinquish ownership of the pages back to the system.
2058  *
2059  * It is vital that we remove the page mapping if we have mapped a tiled
2060  * object through the GTT and then lose the fence register due to
2061  * resource pressure. Similarly if the object has been moved out of the
2062  * aperture, than pages mapped into userspace must be revoked. Removing the
2063  * mapping will then trigger a page fault on the next user access, allowing
2064  * fixup by i915_gem_fault().
2065  */
2066 void
2067 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
2068 {
2069         struct drm_i915_private *i915 = to_i915(obj->base.dev);
2070
2071         /* Serialisation between user GTT access and our code depends upon
2072          * revoking the CPU's PTE whilst the mutex is held. The next user
2073          * pagefault then has to wait until we release the mutex.
2074          *
2075          * Note that RPM complicates somewhat by adding an additional
2076          * requirement that operations to the GGTT be made holding the RPM
2077          * wakeref.
2078          */
2079         lockdep_assert_held(&i915->drm.struct_mutex);
2080         intel_runtime_pm_get(i915);
2081
2082         if (!obj->userfault_count)
2083                 goto out;
2084
2085         __i915_gem_object_release_mmap(obj);
2086
2087         /* Ensure that the CPU's PTE are revoked and there are not outstanding
2088          * memory transactions from userspace before we return. The TLB
2089          * flushing implied above by changing the PTE above *should* be
2090          * sufficient, an extra barrier here just provides us with a bit
2091          * of paranoid documentation about our requirement to serialise
2092          * memory writes before touching registers / GSM.
2093          */
2094         wmb();
2095
2096 out:
2097         intel_runtime_pm_put(i915);
2098 }
2099
2100 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
2101 {
2102         struct drm_i915_gem_object *obj, *on;
2103         int i;
2104
2105         /*
2106          * Only called during RPM suspend. All users of the userfault_list
2107          * must be holding an RPM wakeref to ensure that this can not
2108          * run concurrently with themselves (and use the struct_mutex for
2109          * protection between themselves).
2110          */
2111
2112         list_for_each_entry_safe(obj, on,
2113                                  &dev_priv->mm.userfault_list, userfault_link)
2114                 __i915_gem_object_release_mmap(obj);
2115
2116         /* The fence will be lost when the device powers down. If any were
2117          * in use by hardware (i.e. they are pinned), we should not be powering
2118          * down! All other fences will be reacquired by the user upon waking.
2119          */
2120         for (i = 0; i < dev_priv->num_fence_regs; i++) {
2121                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2122
2123                 /* Ideally we want to assert that the fence register is not
2124                  * live at this point (i.e. that no piece of code will be
2125                  * trying to write through fence + GTT, as that both violates
2126                  * our tracking of activity and associated locking/barriers,
2127                  * but also is illegal given that the hw is powered down).
2128                  *
2129                  * Previously we used reg->pin_count as a "liveness" indicator.
2130                  * That is not sufficient, and we need a more fine-grained
2131                  * tool if we want to have a sanity check here.
2132                  */
2133
2134                 if (!reg->vma)
2135                         continue;
2136
2137                 GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
2138                 reg->dirty = true;
2139         }
2140 }
2141
2142 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
2143 {
2144         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2145         int err;
2146
2147         err = drm_gem_create_mmap_offset(&obj->base);
2148         if (likely(!err))
2149                 return 0;
2150
2151         /* Attempt to reap some mmap space from dead objects */
2152         do {
2153                 err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
2154                 if (err)
2155                         break;
2156
2157                 i915_gem_drain_freed_objects(dev_priv);
2158                 err = drm_gem_create_mmap_offset(&obj->base);
2159                 if (!err)
2160                         break;
2161
2162         } while (flush_delayed_work(&dev_priv->gt.retire_work));
2163
2164         return err;
2165 }
2166
2167 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
2168 {
2169         drm_gem_free_mmap_offset(&obj->base);
2170 }
2171
2172 int
2173 i915_gem_mmap_gtt(struct drm_file *file,
2174                   struct drm_device *dev,
2175                   uint32_t handle,
2176                   uint64_t *offset)
2177 {
2178         struct drm_i915_gem_object *obj;
2179         int ret;
2180
2181         obj = i915_gem_object_lookup(file, handle);
2182         if (!obj)
2183                 return -ENOENT;
2184
2185         ret = i915_gem_object_create_mmap_offset(obj);
2186         if (ret == 0)
2187                 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
2188
2189         i915_gem_object_put(obj);
2190         return ret;
2191 }
2192
2193 /**
2194  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
2195  * @dev: DRM device
2196  * @data: GTT mapping ioctl data
2197  * @file: GEM object info
2198  *
2199  * Simply returns the fake offset to userspace so it can mmap it.
2200  * The mmap call will end up in drm_gem_mmap(), which will set things
2201  * up so we can get faults in the handler above.
2202  *
2203  * The fault handler will take care of binding the object into the GTT
2204  * (since it may have been evicted to make room for something), allocating
2205  * a fence register, and mapping the appropriate aperture address into
2206  * userspace.
2207  */
2208 int
2209 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2210                         struct drm_file *file)
2211 {
2212         struct drm_i915_gem_mmap_gtt *args = data;
2213
2214         return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2215 }
2216
2217 /* Immediately discard the backing storage */
2218 static void
2219 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2220 {
2221         i915_gem_object_free_mmap_offset(obj);
2222
2223         if (obj->base.filp == NULL)
2224                 return;
2225
2226         /* Our goal here is to return as much of the memory as
2227          * is possible back to the system as we are called from OOM.
2228          * To do this we must instruct the shmfs to drop all of its
2229          * backing pages, *now*.
2230          */
2231         shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
2232         obj->mm.madv = __I915_MADV_PURGED;
2233         obj->mm.pages = ERR_PTR(-EFAULT);
2234 }
2235
2236 /* Try to discard unwanted pages */
2237 void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
2238 {
2239         struct address_space *mapping;
2240
2241         lockdep_assert_held(&obj->mm.lock);
2242         GEM_BUG_ON(i915_gem_object_has_pages(obj));
2243
2244         switch (obj->mm.madv) {
2245         case I915_MADV_DONTNEED:
2246                 i915_gem_object_truncate(obj);
2247         case __I915_MADV_PURGED:
2248                 return;
2249         }
2250
2251         if (obj->base.filp == NULL)
2252                 return;
2253
2254         mapping = obj->base.filp->f_mapping,
2255         invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2256 }
2257
2258 static void
2259 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
2260                               struct sg_table *pages)
2261 {
2262         struct sgt_iter sgt_iter;
2263         struct page *page;
2264
2265         __i915_gem_object_release_shmem(obj, pages, true);
2266
2267         i915_gem_gtt_finish_pages(obj, pages);
2268
2269         if (i915_gem_object_needs_bit17_swizzle(obj))
2270                 i915_gem_object_save_bit_17_swizzle(obj, pages);
2271
2272         for_each_sgt_page(page, sgt_iter, pages) {
2273                 if (obj->mm.dirty)
2274                         set_page_dirty(page);
2275
2276                 if (obj->mm.madv == I915_MADV_WILLNEED)
2277                         mark_page_accessed(page);
2278
2279                 put_page(page);
2280         }
2281         obj->mm.dirty = false;
2282
2283         sg_free_table(pages);
2284         kfree(pages);
2285 }
2286
2287 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
2288 {
2289         struct radix_tree_iter iter;
2290         void __rcu **slot;
2291
2292         rcu_read_lock();
2293         radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
2294                 radix_tree_delete(&obj->mm.get_page.radix, iter.index);
2295         rcu_read_unlock();
2296 }
2297
2298 void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
2299                                  enum i915_mm_subclass subclass)
2300 {
2301         struct drm_i915_private *i915 = to_i915(obj->base.dev);
2302         struct sg_table *pages;
2303
2304         if (i915_gem_object_has_pinned_pages(obj))
2305                 return;
2306
2307         GEM_BUG_ON(obj->bind_count);
2308         if (!i915_gem_object_has_pages(obj))
2309                 return;
2310
2311         /* May be called by shrinker from within get_pages() (on another bo) */
2312         mutex_lock_nested(&obj->mm.lock, subclass);
2313         if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
2314                 goto unlock;
2315
2316         /* ->put_pages might need to allocate memory for the bit17 swizzle
2317          * array, hence protect them from being reaped by removing them from gtt
2318          * lists early. */
2319         pages = fetch_and_zero(&obj->mm.pages);
2320         GEM_BUG_ON(!pages);
2321
2322         spin_lock(&i915->mm.obj_lock);
2323         list_del(&obj->mm.link);
2324         spin_unlock(&i915->mm.obj_lock);
2325
2326         if (obj->mm.mapping) {
2327                 void *ptr;
2328
2329                 ptr = page_mask_bits(obj->mm.mapping);
2330                 if (is_vmalloc_addr(ptr))
2331                         vunmap(ptr);
2332                 else
2333                         kunmap(kmap_to_page(ptr));
2334
2335                 obj->mm.mapping = NULL;
2336         }
2337
2338         __i915_gem_object_reset_page_iter(obj);
2339
2340         if (!IS_ERR(pages))
2341                 obj->ops->put_pages(obj, pages);
2342
2343         obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
2344
2345 unlock:
2346         mutex_unlock(&obj->mm.lock);
2347 }
2348
2349 static bool i915_sg_trim(struct sg_table *orig_st)
2350 {
2351         struct sg_table new_st;
2352         struct scatterlist *sg, *new_sg;
2353         unsigned int i;
2354
2355         if (orig_st->nents == orig_st->orig_nents)
2356                 return false;
2357
2358         if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
2359                 return false;
2360
2361         new_sg = new_st.sgl;
2362         for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
2363                 sg_set_page(new_sg, sg_page(sg), sg->length, 0);
2364                 /* called before being DMA mapped, no need to copy sg->dma_* */
2365                 new_sg = sg_next(new_sg);
2366         }
2367         GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
2368
2369         sg_free_table(orig_st);
2370
2371         *orig_st = new_st;
2372         return true;
2373 }
2374
2375 static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2376 {
2377         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2378         const unsigned long page_count = obj->base.size / PAGE_SIZE;
2379         unsigned long i;
2380         struct address_space *mapping;
2381         struct sg_table *st;
2382         struct scatterlist *sg;
2383         struct sgt_iter sgt_iter;
2384         struct page *page;
2385         unsigned long last_pfn = 0;     /* suppress gcc warning */
2386         unsigned int max_segment = i915_sg_segment_size();
2387         unsigned int sg_page_sizes;
2388         gfp_t noreclaim;
2389         int ret;
2390
2391         /* Assert that the object is not currently in any GPU domain. As it
2392          * wasn't in the GTT, there shouldn't be any way it could have been in
2393          * a GPU cache
2394          */
2395         GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2396         GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2397
2398         st = kmalloc(sizeof(*st), GFP_KERNEL);
2399         if (st == NULL)
2400                 return -ENOMEM;
2401
2402 rebuild_st:
2403         if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
2404                 kfree(st);
2405                 return -ENOMEM;
2406         }
2407
2408         /* Get the list of pages out of our struct file.  They'll be pinned
2409          * at this point until we release them.
2410          *
2411          * Fail silently without starting the shrinker
2412          */
2413         mapping = obj->base.filp->f_mapping;
2414         noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
2415         noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
2416
2417         sg = st->sgl;
2418         st->nents = 0;
2419         sg_page_sizes = 0;
2420         for (i = 0; i < page_count; i++) {
2421                 const unsigned int shrink[] = {
2422                         I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
2423                         0,
2424                 }, *s = shrink;
2425                 gfp_t gfp = noreclaim;
2426
2427                 do {
2428                         page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2429                         if (likely(!IS_ERR(page)))
2430                                 break;
2431
2432                         if (!*s) {
2433                                 ret = PTR_ERR(page);
2434                                 goto err_sg;
2435                         }
2436
2437                         i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++);
2438                         cond_resched();
2439
2440                         /* We've tried hard to allocate the memory by reaping
2441                          * our own buffer, now let the real VM do its job and
2442                          * go down in flames if truly OOM.
2443                          *
2444                          * However, since graphics tend to be disposable,
2445                          * defer the oom here by reporting the ENOMEM back
2446                          * to userspace.
2447                          */
2448                         if (!*s) {
2449                                 /* reclaim and warn, but no oom */
2450                                 gfp = mapping_gfp_mask(mapping);
2451
2452                                 /* Our bo are always dirty and so we require
2453                                  * kswapd to reclaim our pages (direct reclaim
2454                                  * does not effectively begin pageout of our
2455                                  * buffers on its own). However, direct reclaim
2456                                  * only waits for kswapd when under allocation
2457                                  * congestion. So as a result __GFP_RECLAIM is
2458                                  * unreliable and fails to actually reclaim our
2459                                  * dirty pages -- unless you try over and over
2460                                  * again with !__GFP_NORETRY. However, we still
2461                                  * want to fail this allocation rather than
2462                                  * trigger the out-of-memory killer and for
2463                                  * this we want __GFP_RETRY_MAYFAIL.
2464                                  */
2465                                 gfp |= __GFP_RETRY_MAYFAIL;
2466                         }
2467                 } while (1);
2468
2469                 if (!i ||
2470                     sg->length >= max_segment ||
2471                     page_to_pfn(page) != last_pfn + 1) {
2472                         if (i) {
2473                                 sg_page_sizes |= sg->length;
2474                                 sg = sg_next(sg);
2475                         }
2476                         st->nents++;
2477                         sg_set_page(sg, page, PAGE_SIZE, 0);
2478                 } else {
2479                         sg->length += PAGE_SIZE;
2480                 }
2481                 last_pfn = page_to_pfn(page);
2482
2483                 /* Check that the i965g/gm workaround works. */
2484                 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2485         }
2486         if (sg) { /* loop terminated early; short sg table */
2487                 sg_page_sizes |= sg->length;
2488                 sg_mark_end(sg);
2489         }
2490
2491         /* Trim unused sg entries to avoid wasting memory. */
2492         i915_sg_trim(st);
2493
2494         ret = i915_gem_gtt_prepare_pages(obj, st);
2495         if (ret) {
2496                 /* DMA remapping failed? One possible cause is that
2497                  * it could not reserve enough large entries, asking
2498                  * for PAGE_SIZE chunks instead may be helpful.
2499                  */
2500                 if (max_segment > PAGE_SIZE) {
2501                         for_each_sgt_page(page, sgt_iter, st)
2502                                 put_page(page);
2503                         sg_free_table(st);
2504
2505                         max_segment = PAGE_SIZE;
2506                         goto rebuild_st;
2507                 } else {
2508                         dev_warn(&dev_priv->drm.pdev->dev,
2509                                  "Failed to DMA remap %lu pages\n",
2510                                  page_count);
2511                         goto err_pages;
2512                 }
2513         }
2514
2515         if (i915_gem_object_needs_bit17_swizzle(obj))
2516                 i915_gem_object_do_bit_17_swizzle(obj, st);
2517
2518         __i915_gem_object_set_pages(obj, st, sg_page_sizes);
2519
2520         return 0;
2521
2522 err_sg:
2523         sg_mark_end(sg);
2524 err_pages:
2525         for_each_sgt_page(page, sgt_iter, st)
2526                 put_page(page);
2527         sg_free_table(st);
2528         kfree(st);
2529
2530         /* shmemfs first checks if there is enough memory to allocate the page
2531          * and reports ENOSPC should there be insufficient, along with the usual
2532          * ENOMEM for a genuine allocation failure.
2533          *
2534          * We use ENOSPC in our driver to mean that we have run out of aperture
2535          * space and so want to translate the error from shmemfs back to our
2536          * usual understanding of ENOMEM.
2537          */
2538         if (ret == -ENOSPC)
2539                 ret = -ENOMEM;
2540
2541         return ret;
2542 }
2543
2544 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
2545                                  struct sg_table *pages,
2546                                  unsigned int sg_page_sizes)
2547 {
2548         struct drm_i915_private *i915 = to_i915(obj->base.dev);
2549         unsigned long supported = INTEL_INFO(i915)->page_sizes;
2550         int i;
2551
2552         lockdep_assert_held(&obj->mm.lock);
2553
2554         obj->mm.get_page.sg_pos = pages->sgl;
2555         obj->mm.get_page.sg_idx = 0;
2556
2557         obj->mm.pages = pages;
2558
2559         if (i915_gem_object_is_tiled(obj) &&
2560             i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
2561                 GEM_BUG_ON(obj->mm.quirked);
2562                 __i915_gem_object_pin_pages(obj);
2563                 obj->mm.quirked = true;
2564         }
2565
2566         GEM_BUG_ON(!sg_page_sizes);
2567         obj->mm.page_sizes.phys = sg_page_sizes;
2568
2569         /*
2570          * Calculate the supported page-sizes which fit into the given
2571          * sg_page_sizes. This will give us the page-sizes which we may be able
2572          * to use opportunistically when later inserting into the GTT. For
2573          * example if phys=2G, then in theory we should be able to use 1G, 2M,
2574          * 64K or 4K pages, although in practice this will depend on a number of
2575          * other factors.
2576          */
2577         obj->mm.page_sizes.sg = 0;
2578         for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
2579                 if (obj->mm.page_sizes.phys & ~0u << i)
2580                         obj->mm.page_sizes.sg |= BIT(i);
2581         }
2582         GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
2583
2584         spin_lock(&i915->mm.obj_lock);
2585         list_add(&obj->mm.link, &i915->mm.unbound_list);
2586         spin_unlock(&i915->mm.obj_lock);
2587 }
2588
2589 static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2590 {
2591         int err;
2592
2593         if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
2594                 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2595                 return -EFAULT;
2596         }
2597
2598         err = obj->ops->get_pages(obj);
2599         GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
2600
2601         return err;
2602 }
2603
2604 /* Ensure that the associated pages are gathered from the backing storage
2605  * and pinned into our object. i915_gem_object_pin_pages() may be called
2606  * multiple times before they are released by a single call to
2607  * i915_gem_object_unpin_pages() - once the pages are no longer referenced
2608  * either as a result of memory pressure (reaping pages under the shrinker)
2609  * or as the object is itself released.
2610  */
2611 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2612 {
2613         int err;
2614
2615         err = mutex_lock_interruptible(&obj->mm.lock);
2616         if (err)
2617                 return err;
2618
2619         if (unlikely(!i915_gem_object_has_pages(obj))) {
2620                 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
2621
2622                 err = ____i915_gem_object_get_pages(obj);
2623                 if (err)
2624                         goto unlock;
2625
2626                 smp_mb__before_atomic();
2627         }
2628         atomic_inc(&obj->mm.pages_pin_count);
2629
2630 unlock:
2631         mutex_unlock(&obj->mm.lock);
2632         return err;
2633 }
2634
2635 /* The 'mapping' part of i915_gem_object_pin_map() below */
2636 static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
2637                                  enum i915_map_type type)
2638 {
2639         unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
2640         struct sg_table *sgt = obj->mm.pages;
2641         struct sgt_iter sgt_iter;
2642         struct page *page;
2643         struct page *stack_pages[32];
2644         struct page **pages = stack_pages;
2645         unsigned long i = 0;
2646         pgprot_t pgprot;
2647         void *addr;
2648
2649         /* A single page can always be kmapped */
2650         if (n_pages == 1 && type == I915_MAP_WB)
2651                 return kmap(sg_page(sgt->sgl));
2652
2653         if (n_pages > ARRAY_SIZE(stack_pages)) {
2654                 /* Too big for stack -- allocate temporary array instead */
2655                 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
2656                 if (!pages)
2657                         return NULL;
2658         }
2659
2660         for_each_sgt_page(page, sgt_iter, sgt)
2661                 pages[i++] = page;
2662
2663         /* Check that we have the expected number of pages */
2664         GEM_BUG_ON(i != n_pages);
2665
2666         switch (type) {
2667         default:
2668                 MISSING_CASE(type);
2669                 /* fallthrough to use PAGE_KERNEL anyway */
2670         case I915_MAP_WB:
2671                 pgprot = PAGE_KERNEL;
2672                 break;
2673         case I915_MAP_WC:
2674                 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
2675                 break;
2676         }
2677         addr = vmap(pages, n_pages, 0, pgprot);
2678
2679         if (pages != stack_pages)
2680                 kvfree(pages);
2681
2682         return addr;
2683 }
2684
2685 /* get, pin, and map the pages of the object into kernel space */
2686 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
2687                               enum i915_map_type type)
2688 {
2689         enum i915_map_type has_type;
2690         bool pinned;
2691         void *ptr;
2692         int ret;
2693
2694         if (unlikely(!i915_gem_object_has_struct_page(obj)))
2695                 return ERR_PTR(-ENXIO);
2696
2697         ret = mutex_lock_interruptible(&obj->mm.lock);
2698         if (ret)
2699                 return ERR_PTR(ret);
2700
2701         pinned = !(type & I915_MAP_OVERRIDE);
2702         type &= ~I915_MAP_OVERRIDE;
2703
2704         if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
2705                 if (unlikely(!i915_gem_object_has_pages(obj))) {
2706                         GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
2707
2708                         ret = ____i915_gem_object_get_pages(obj);
2709                         if (ret)
2710                                 goto err_unlock;
2711
2712                         smp_mb__before_atomic();
2713                 }
2714                 atomic_inc(&obj->mm.pages_pin_count);
2715                 pinned = false;
2716         }
2717         GEM_BUG_ON(!i915_gem_object_has_pages(obj));
2718
2719         ptr = page_unpack_bits(obj->mm.mapping, &has_type);
2720         if (ptr && has_type != type) {
2721                 if (pinned) {
2722                         ret = -EBUSY;
2723                         goto err_unpin;
2724                 }
2725
2726                 if (is_vmalloc_addr(ptr))
2727                         vunmap(ptr);
2728                 else
2729                         kunmap(kmap_to_page(ptr));
2730
2731                 ptr = obj->mm.mapping = NULL;
2732         }
2733
2734         if (!ptr) {
2735                 ptr = i915_gem_object_map(obj, type);
2736                 if (!ptr) {
2737                         ret = -ENOMEM;
2738                         goto err_unpin;
2739                 }
2740
2741                 obj->mm.mapping = page_pack_bits(ptr, type);
2742         }
2743
2744 out_unlock:
2745         mutex_unlock(&obj->mm.lock);
2746         return ptr;
2747
2748 err_unpin:
2749         atomic_dec(&obj->mm.pages_pin_count);
2750 err_unlock:
2751         ptr = ERR_PTR(ret);
2752         goto out_unlock;
2753 }
2754
2755 static int
2756 i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
2757                            const struct drm_i915_gem_pwrite *arg)
2758 {
2759         struct address_space *mapping = obj->base.filp->f_mapping;
2760         char __user *user_data = u64_to_user_ptr(arg->data_ptr);
2761         u64 remain, offset;
2762         unsigned int pg;
2763
2764         /* Before we instantiate/pin the backing store for our use, we
2765          * can prepopulate the shmemfs filp efficiently using a write into
2766          * the pagecache. We avoid the penalty of instantiating all the
2767          * pages, important if the user is just writing to a few and never
2768          * uses the object on the GPU, and using a direct write into shmemfs
2769          * allows it to avoid the cost of retrieving a page (either swapin
2770          * or clearing-before-use) before it is overwritten.
2771          */
2772         if (i915_gem_object_has_pages(obj))
2773                 return -ENODEV;
2774
2775         if (obj->mm.madv != I915_MADV_WILLNEED)
2776                 return -EFAULT;
2777
2778         /* Before the pages are instantiated the object is treated as being
2779          * in the CPU domain. The pages will be clflushed as required before
2780          * use, and we can freely write into the pages directly. If userspace
2781          * races pwrite with any other operation; corruption will ensue -
2782          * that is userspace's prerogative!
2783          */
2784
2785         remain = arg->size;
2786         offset = arg->offset;
2787         pg = offset_in_page(offset);
2788
2789         do {
2790                 unsigned int len, unwritten;
2791                 struct page *page;
2792                 void *data, *vaddr;
2793                 int err;
2794
2795                 len = PAGE_SIZE - pg;
2796                 if (len > remain)
2797                         len = remain;
2798
2799                 err = pagecache_write_begin(obj->base.filp, mapping,
2800                                             offset, len, 0,
2801                                             &page, &data);
2802                 if (err < 0)
2803                         return err;
2804
2805                 vaddr = kmap(page);
2806                 unwritten = copy_from_user(vaddr + pg, user_data, len);
2807                 kunmap(page);
2808
2809                 err = pagecache_write_end(obj->base.filp, mapping,
2810                                           offset, len, len - unwritten,
2811                                           page, data);
2812                 if (err < 0)
2813                         return err;
2814
2815                 if (unwritten)
2816                         return -EFAULT;
2817
2818                 remain -= len;
2819                 user_data += len;
2820                 offset += len;
2821                 pg = 0;
2822         } while (remain);
2823
2824         return 0;
2825 }
2826
2827 static bool ban_context(const struct i915_gem_context *ctx,
2828                         unsigned int score)
2829 {
2830         return (i915_gem_context_is_bannable(ctx) &&
2831                 score >= CONTEXT_SCORE_BAN_THRESHOLD);
2832 }
2833
2834 static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
2835 {
2836         unsigned int score;
2837         bool banned;
2838
2839         atomic_inc(&ctx->guilty_count);
2840
2841         score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
2842         banned = ban_context(ctx, score);
2843         DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n",
2844                          ctx->name, score, yesno(banned));
2845         if (!banned)
2846                 return;
2847
2848         i915_gem_context_set_banned(ctx);
2849         if (!IS_ERR_OR_NULL(ctx->file_priv)) {
2850                 atomic_inc(&ctx->file_priv->context_bans);
2851                 DRM_DEBUG_DRIVER("client %s has had %d context banned\n",
2852                                  ctx->name, atomic_read(&ctx->file_priv->context_bans));
2853         }
2854 }
2855
2856 static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
2857 {
2858         atomic_inc(&ctx->active_count);
2859 }
2860
2861 struct drm_i915_gem_request *
2862 i915_gem_find_active_request(struct intel_engine_cs *engine)
2863 {
2864         struct drm_i915_gem_request *request, *active = NULL;
2865         unsigned long flags;
2866
2867         /* We are called by the error capture and reset at a random
2868          * point in time. In particular, note that neither is crucially
2869          * ordered with an interrupt. After a hang, the GPU is dead and we
2870          * assume that no more writes can happen (we waited long enough for
2871          * all writes that were in transaction to be flushed) - adding an
2872          * extra delay for a recent interrupt is pointless. Hence, we do
2873          * not need an engine->irq_seqno_barrier() before the seqno reads.
2874          */
2875         spin_lock_irqsave(&engine->timeline->lock, flags);
2876         list_for_each_entry(request, &engine->timeline->requests, link) {
2877                 if (__i915_gem_request_completed(request,
2878                                                  request->global_seqno))
2879                         continue;
2880
2881                 GEM_BUG_ON(request->engine != engine);
2882                 GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
2883                                     &request->fence.flags));
2884
2885                 active = request;
2886                 break;
2887         }
2888         spin_unlock_irqrestore(&engine->timeline->lock, flags);
2889
2890         return active;
2891 }
2892
2893 static bool engine_stalled(struct intel_engine_cs *engine)
2894 {
2895         if (!engine->hangcheck.stalled)
2896                 return false;
2897
2898         /* Check for possible seqno movement after hang declaration */
2899         if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) {
2900                 DRM_DEBUG_DRIVER("%s pardoned\n", engine->name);
2901                 return false;
2902         }
2903
2904         return true;
2905 }
2906
2907 /*
2908  * Ensure irq handler finishes, and not run again.
2909  * Also return the active request so that we only search for it once.
2910  */
2911 struct drm_i915_gem_request *
2912 i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
2913 {
2914         struct drm_i915_gem_request *request = NULL;
2915
2916         /*
2917          * During the reset sequence, we must prevent the engine from
2918          * entering RC6. As the context state is undefined until we restart
2919          * the engine, if it does enter RC6 during the reset, the state
2920          * written to the powercontext is undefined and so we may lose
2921          * GPU state upon resume, i.e. fail to restart after a reset.
2922          */
2923         intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
2924
2925         /*
2926          * Prevent the signaler thread from updating the request
2927          * state (by calling dma_fence_signal) as we are processing
2928          * the reset. The write from the GPU of the seqno is
2929          * asynchronous and the signaler thread may see a different
2930          * value to us and declare the request complete, even though
2931          * the reset routine have picked that request as the active
2932          * (incomplete) request. This conflict is not handled
2933          * gracefully!
2934          */
2935         kthread_park(engine->breadcrumbs.signaler);
2936
2937         /*
2938          * Prevent request submission to the hardware until we have
2939          * completed the reset in i915_gem_reset_finish(). If a request
2940          * is completed by one engine, it may then queue a request
2941          * to a second via its execlists->tasklet *just* as we are
2942          * calling engine->init_hw() and also writing the ELSP.
2943          * Turning off the execlists->tasklet until the reset is over
2944          * prevents the race.
2945          */
2946         tasklet_kill(&engine->execlists.tasklet);
2947         tasklet_disable(&engine->execlists.tasklet);
2948
2949         /*
2950          * We're using worker to queue preemption requests from the tasklet in
2951          * GuC submission mode.
2952          * Even though tasklet was disabled, we may still have a worker queued.
2953          * Let's make sure that all workers scheduled before disabling the
2954          * tasklet are completed before continuing with the reset.
2955          */
2956         if (engine->i915->guc.preempt_wq)
2957                 flush_workqueue(engine->i915->guc.preempt_wq);
2958
2959         if (engine->irq_seqno_barrier)
2960                 engine->irq_seqno_barrier(engine);
2961
2962         request = i915_gem_find_active_request(engine);
2963         if (request && request->fence.error == -EIO)
2964                 request = ERR_PTR(-EIO); /* Previous reset failed! */
2965
2966         return request;
2967 }
2968
2969 int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
2970 {
2971         struct intel_engine_cs *engine;
2972         struct drm_i915_gem_request *request;
2973         enum intel_engine_id id;
2974         int err = 0;
2975
2976         for_each_engine(engine, dev_priv, id) {
2977                 request = i915_gem_reset_prepare_engine(engine);
2978                 if (IS_ERR(request)) {
2979                         err = PTR_ERR(request);
2980                         continue;
2981                 }
2982
2983                 engine->hangcheck.active_request = request;
2984         }
2985
2986         i915_gem_revoke_fences(dev_priv);
2987
2988         return err;
2989 }
2990
2991 static void skip_request(struct drm_i915_gem_request *request)
2992 {
2993         void *vaddr = request->ring->vaddr;
2994         u32 head;
2995
2996         /* As this request likely depends on state from the lost
2997          * context, clear out all the user operations leaving the
2998          * breadcrumb at the end (so we get the fence notifications).
2999          */
3000         head = request->head;
3001         if (request->postfix < head) {
3002                 memset(vaddr + head, 0, request->ring->size - head);
3003                 head = 0;
3004         }
3005         memset(vaddr + head, 0, request->postfix - head);
3006
3007         dma_fence_set_error(&request->fence, -EIO);
3008 }
3009
3010 static void engine_skip_context(struct drm_i915_gem_request *request)
3011 {
3012         struct intel_engine_cs *engine = request->engine;
3013         struct i915_gem_context *hung_ctx = request->ctx;
3014         struct intel_timeline *timeline;
3015         unsigned long flags;
3016
3017         timeline = i915_gem_context_lookup_timeline(hung_ctx, engine);
3018
3019         spin_lock_irqsave(&engine->timeline->lock, flags);
3020         spin_lock(&timeline->lock);
3021
3022         list_for_each_entry_continue(request, &engine->timeline->requests, link)
3023                 if (request->ctx == hung_ctx)
3024                         skip_request(request);
3025
3026         list_for_each_entry(request, &timeline->requests, link)
3027                 skip_request(request);
3028
3029         spin_unlock(&timeline->lock);
3030         spin_unlock_irqrestore(&engine->timeline->lock, flags);
3031 }
3032
3033 /* Returns the request if it was guilty of the hang */
3034 static struct drm_i915_gem_request *
3035 i915_gem_reset_request(struct intel_engine_cs *engine,
3036                        struct drm_i915_gem_request *request)
3037 {
3038         /* The guilty request will get skipped on a hung engine.
3039          *
3040          * Users of client default contexts do not rely on logical
3041          * state preserved between batches so it is safe to execute
3042          * queued requests following the hang. Non default contexts
3043          * rely on preserved state, so skipping a batch loses the
3044          * evolution of the state and it needs to be considered corrupted.
3045          * Executing more queued batches on top of corrupted state is
3046          * risky. But we take the risk by trying to advance through
3047          * the queued requests in order to make the client behaviour
3048          * more predictable around resets, by not throwing away random
3049          * amount of batches it has prepared for execution. Sophisticated
3050          * clients can use gem_reset_stats_ioctl and dma fence status
3051          * (exported via sync_file info ioctl on explicit fences) to observe
3052          * when it loses the context state and should rebuild accordingly.
3053          *
3054          * The context ban, and ultimately the client ban, mechanism are safety
3055          * valves if client submission ends up resulting in nothing more than
3056          * subsequent hangs.
3057          */
3058
3059         if (engine_stalled(engine)) {
3060                 i915_gem_context_mark_guilty(request->ctx);
3061                 skip_request(request);
3062
3063                 /* If this context is now banned, skip all pending requests. */
3064                 if (i915_gem_context_is_banned(request->ctx))
3065                         engine_skip_context(request);
3066         } else {
3067                 /*
3068                  * Since this is not the hung engine, it may have advanced
3069                  * since the hang declaration. Double check by refinding
3070                  * the active request at the time of the reset.
3071                  */
3072                 request = i915_gem_find_active_request(engine);
3073                 if (request) {
3074                         i915_gem_context_mark_innocent(request->ctx);
3075                         dma_fence_set_error(&request->fence, -EAGAIN);
3076
3077                         /* Rewind the engine to replay the incomplete rq */
3078                         spin_lock_irq(&engine->timeline->lock);
3079                         request = list_prev_entry(request, link);
3080                         if (&request->link == &engine->timeline->requests)
3081                                 request = NULL;
3082                         spin_unlock_irq(&engine->timeline->lock);
3083                 }
3084         }
3085
3086         return request;
3087 }
3088
3089 void i915_gem_reset_engine(struct intel_engine_cs *engine,
3090                            struct drm_i915_gem_request *request)
3091 {
3092         /*
3093          * Make sure this write is visible before we re-enable the interrupt
3094          * handlers on another CPU, as tasklet_enable() resolves to just
3095          * a compiler barrier which is insufficient for our purpose here.
3096          */
3097         smp_store_mb(engine->irq_posted, 0);
3098
3099         if (request)
3100                 request = i915_gem_reset_request(engine, request);
3101
3102         if (request) {
3103                 DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
3104                                  engine->name, request->global_seqno);
3105         }
3106
3107         /* Setup the CS to resume from the breadcrumb of the hung request */
3108         engine->reset_hw(engine, request);
3109 }
3110
3111 void i915_gem_reset(struct drm_i915_private *dev_priv)
3112 {
3113         struct intel_engine_cs *engine;
3114         enum intel_engine_id id;
3115
3116         lockdep_assert_held(&dev_priv->drm.struct_mutex);
3117
3118         i915_gem_retire_requests(dev_priv);
3119
3120         for_each_engine(engine, dev_priv, id) {
3121                 struct i915_gem_context *ctx;
3122
3123                 i915_gem_reset_engine(engine, engine->hangcheck.active_request);
3124                 ctx = fetch_and_zero(&engine->last_retired_context);
3125                 if (ctx)
3126                         engine->context_unpin(engine, ctx);
3127
3128                 /*
3129                  * Ostensibily, we always want a context loaded for powersaving,
3130                  * so if the engine is idle after the reset, send a request
3131                  * to load our scratch kernel_context.
3132                  *
3133                  * More mysteriously, if we leave the engine idle after a reset,
3134                  * the next userspace batch may hang, with what appears to be
3135                  * an incoherent read by the CS (presumably stale TLB). An
3136                  * empty request appears sufficient to paper over the glitch.
3137                  */
3138                 if (list_empty(&engine->timeline->requests)) {
3139                         struct drm_i915_gem_request *rq;
3140
3141                         rq = i915_gem_request_alloc(engine,
3142                                                     dev_priv->kernel_context);
3143                         if (!IS_ERR(rq))
3144                                 __i915_add_request(rq, false);
3145                 }
3146         }
3147
3148         i915_gem_restore_fences(dev_priv);
3149
3150         if (dev_priv->gt.awake) {
3151                 intel_sanitize_gt_powersave(dev_priv);
3152                 intel_enable_gt_powersave(dev_priv);
3153                 if (INTEL_GEN(dev_priv) >= 6)
3154                         gen6_rps_busy(dev_priv);
3155         }
3156 }
3157
3158 void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
3159 {
3160         tasklet_enable(&engine->execlists.tasklet);
3161         kthread_unpark(engine->breadcrumbs.signaler);
3162
3163         intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
3164 }
3165
3166 void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
3167 {
3168         struct intel_engine_cs *engine;
3169         enum intel_engine_id id;
3170
3171         lockdep_assert_held(&dev_priv->drm.struct_mutex);
3172
3173         for_each_engine(engine, dev_priv, id) {
3174                 engine->hangcheck.active_request = NULL;
3175                 i915_gem_reset_finish_engine(engine);
3176         }
3177 }
3178
3179 static void nop_submit_request(struct drm_i915_gem_request *request)
3180 {
3181         dma_fence_set_error(&request->fence, -EIO);
3182
3183         i915_gem_request_submit(request);
3184 }
3185
3186 static void nop_complete_submit_request(struct drm_i915_gem_request *request)
3187 {
3188         unsigned long flags;
3189
3190         dma_fence_set_error(&request->fence, -EIO);
3191
3192         spin_lock_irqsave(&request->engine->timeline->lock, flags);
3193         __i915_gem_request_submit(request);
3194         intel_engine_init_global_seqno(request->engine, request->global_seqno);
3195         spin_unlock_irqrestore(&request->engine->timeline->lock, flags);
3196 }
3197
3198 void i915_gem_set_wedged(struct drm_i915_private *i915)
3199 {
3200         struct intel_engine_cs *engine;
3201         enum intel_engine_id id;
3202
3203         /*
3204          * First, stop submission to hw, but do not yet complete requests by
3205          * rolling the global seqno forward (since this would complete requests
3206          * for which we haven't set the fence error to EIO yet).
3207          */
3208         for_each_engine(engine, i915, id)
3209                 engine->submit_request = nop_submit_request;
3210
3211         /*
3212          * Make sure no one is running the old callback before we proceed with
3213          * cancelling requests and resetting the completion tracking. Otherwise
3214          * we might submit a request to the hardware which never completes.
3215          */
3216         synchronize_rcu();
3217
3218         for_each_engine(engine, i915, id) {
3219                 /* Mark all executing requests as skipped */
3220                 engine->cancel_requests(engine);
3221
3222                 /*
3223                  * Only once we've force-cancelled all in-flight requests can we
3224                  * start to complete all requests.
3225                  */
3226                 engine->submit_request = nop_complete_submit_request;
3227         }
3228
3229         /*
3230          * Make sure no request can slip through without getting completed by
3231          * either this call here to intel_engine_init_global_seqno, or the one
3232          * in nop_complete_submit_request.
3233          */
3234         synchronize_rcu();
3235
3236         for_each_engine(engine, i915, id) {
3237                 unsigned long flags;
3238
3239                 /* Mark all pending requests as complete so that any concurrent
3240                  * (lockless) lookup doesn't try and wait upon the request as we
3241                  * reset it.
3242                  */
3243                 spin_lock_irqsave(&engine->timeline->lock, flags);
3244                 intel_engine_init_global_seqno(engine,
3245                                                intel_engine_last_submit(engine));
3246                 spin_unlock_irqrestore(&engine->timeline->lock, flags);
3247         }
3248
3249         set_bit(I915_WEDGED, &i915->gpu_error.flags);
3250         wake_up_all(&i915->gpu_error.reset_queue);
3251 }
3252
3253 bool i915_gem_unset_wedged(struct drm_i915_private *i915)
3254 {
3255         struct i915_gem_timeline *tl;
3256         int i;
3257
3258         lockdep_assert_held(&i915->drm.struct_mutex);
3259         if (!test_bit(I915_WEDGED, &i915->gpu_error.flags))
3260                 return true;
3261
3262         /* Before unwedging, make sure that all pending operations
3263          * are flushed and errored out - we may have requests waiting upon
3264          * third party fences. We marked all inflight requests as EIO, and
3265          * every execbuf since returned EIO, for consistency we want all
3266          * the currently pending requests to also be marked as EIO, which
3267          * is done inside our nop_submit_request - and so we must wait.
3268          *
3269          * No more can be submitted until we reset the wedged bit.
3270          */
3271         list_for_each_entry(tl, &i915->gt.timelines, link) {
3272                 for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
3273                         struct drm_i915_gem_request *rq;
3274
3275                         rq = i915_gem_active_peek(&tl->engine[i].last_request,
3276                                                   &i915->drm.struct_mutex);
3277                         if (!rq)
3278                                 continue;
3279
3280                         /* We can't use our normal waiter as we want to
3281                          * avoid recursively trying to handle the current
3282                          * reset. The basic dma_fence_default_wait() installs
3283                          * a callback for dma_fence_signal(), which is
3284                          * triggered by our nop handler (indirectly, the
3285                          * callback enables the signaler thread which is
3286                          * woken by the nop_submit_request() advancing the seqno
3287                          * and when the seqno passes the fence, the signaler
3288                          * then signals the fence waking us up).
3289                          */
3290                         if (dma_fence_default_wait(&rq->fence, true,
3291                                                    MAX_SCHEDULE_TIMEOUT) < 0)
3292                                 return false;
3293                 }
3294         }
3295
3296         /* Undo nop_submit_request. We prevent all new i915 requests from
3297          * being queued (by disallowing execbuf whilst wedged) so having
3298          * waited for all active requests above, we know the system is idle
3299          * and do not have to worry about a thread being inside
3300          * engine->submit_request() as we swap over. So unlike installing
3301          * the nop_submit_request on reset, we can do this from normal
3302          * context and do not require stop_machine().
3303          */
3304         intel_engines_reset_default_submission(i915);
3305         i915_gem_contexts_lost(i915);
3306
3307         smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
3308         clear_bit(I915_WEDGED, &i915->gpu_error.flags);
3309
3310         return true;
3311 }
3312
3313 static void
3314 i915_gem_retire_work_handler(struct work_struct *work)
3315 {
3316         struct drm_i915_private *dev_priv =
3317                 container_of(work, typeof(*dev_priv), gt.retire_work.work);
3318         struct drm_device *dev = &dev_priv->drm;
3319
3320         /* Come back later if the device is busy... */
3321         if (mutex_trylock(&dev->struct_mutex)) {
3322                 i915_gem_retire_requests(dev_priv);
3323                 mutex_unlock(&dev->struct_mutex);
3324         }
3325
3326         /*
3327          * Keep the retire handler running until we are finally idle.
3328          * We do not need to do this test under locking as in the worst-case
3329          * we queue the retire worker once too often.
3330          */
3331         if (READ_ONCE(dev_priv->gt.awake))
3332                 queue_delayed_work(dev_priv->wq,
3333                                    &dev_priv->gt.retire_work,
3334                                    round_jiffies_up_relative(HZ));
3335 }
3336
3337 static inline bool
3338 new_requests_since_last_retire(const struct drm_i915_private *i915)
3339 {
3340         return (READ_ONCE(i915->gt.active_requests) ||
3341                 work_pending(&i915->gt.idle_work.work));
3342 }
3343
3344 static void
3345 i915_gem_idle_work_handler(struct work_struct *work)
3346 {
3347         struct drm_i915_private *dev_priv =
3348                 container_of(work, typeof(*dev_priv), gt.idle_work.work);
3349         bool rearm_hangcheck;
3350         ktime_t end;
3351
3352         if (!READ_ONCE(dev_priv->gt.awake))
3353                 return;
3354
3355         /*
3356          * Wait for last execlists context complete, but bail out in case a
3357          * new request is submitted.
3358          */
3359         end = ktime_add_ms(ktime_get(), I915_IDLE_ENGINES_TIMEOUT);
3360         do {
3361                 if (new_requests_since_last_retire(dev_priv))
3362                         return;
3363
3364                 if (intel_engines_are_idle(dev_priv))
3365                         break;
3366
3367                 usleep_range(100, 500);
3368         } while (ktime_before(ktime_get(), end));
3369
3370         rearm_hangcheck =
3371                 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
3372
3373         if (!mutex_trylock(&dev_priv->drm.struct_mutex)) {
3374                 /* Currently busy, come back later */
3375                 mod_delayed_work(dev_priv->wq,
3376                                  &dev_priv->gt.idle_work,
3377                                  msecs_to_jiffies(50));
3378                 goto out_rearm;
3379         }
3380
3381         /*
3382          * New request retired after this work handler started, extend active
3383          * period until next instance of the work.
3384          */
3385         if (new_requests_since_last_retire(dev_priv))
3386                 goto out_unlock;
3387
3388         /*
3389          * Be paranoid and flush a concurrent interrupt to make sure
3390          * we don't reactivate any irq tasklets after parking.
3391          *
3392          * FIXME: Note that even though we have waited for execlists to be idle,
3393          * there may still be an in-flight interrupt even though the CSB
3394          * is now empty. synchronize_irq() makes sure that a residual interrupt
3395          * is completed before we continue, but it doesn't prevent the HW from
3396          * raising a spurious interrupt later. To complete the shield we should
3397          * coordinate disabling the CS irq with flushing the interrupts.
3398          */
3399         synchronize_irq(dev_priv->drm.irq);
3400
3401         intel_engines_park(dev_priv);
3402         i915_gem_timelines_park(dev_priv);
3403
3404         i915_pmu_gt_parked(dev_priv);
3405
3406         GEM_BUG_ON(!dev_priv->gt.awake);
3407         dev_priv->gt.awake = false;
3408         rearm_hangcheck = false;
3409
3410         if (INTEL_GEN(dev_priv) >= 6)
3411                 gen6_rps_idle(dev_priv);
3412
3413         intel_display_power_put(dev_priv, POWER_DOMAIN_GT_IRQ);
3414
3415         intel_runtime_pm_put(dev_priv);
3416 out_unlock:
3417         mutex_unlock(&dev_priv->drm.struct_mutex);
3418
3419 out_rearm:
3420         if (rearm_hangcheck) {
3421                 GEM_BUG_ON(!dev_priv->gt.awake);
3422                 i915_queue_hangcheck(dev_priv);
3423         }
3424 }
3425
3426 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
3427 {
3428         struct drm_i915_private *i915 = to_i915(gem->dev);
3429         struct drm_i915_gem_object *obj = to_intel_bo(gem);
3430         struct drm_i915_file_private *fpriv = file->driver_priv;
3431         struct i915_lut_handle *lut, *ln;
3432
3433         mutex_lock(&i915->drm.struct_mutex);
3434
3435         list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
3436                 struct i915_gem_context *ctx = lut->ctx;
3437                 struct i915_vma *vma;
3438
3439                 GEM_BUG_ON(ctx->file_priv == ERR_PTR(-EBADF));
3440                 if (ctx->file_priv != fpriv)
3441                         continue;
3442
3443                 vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
3444                 GEM_BUG_ON(vma->obj != obj);
3445
3446                 /* We allow the process to have multiple handles to the same
3447                  * vma, in the same fd namespace, by virtue of flink/open.
3448                  */
3449                 GEM_BUG_ON(!vma->open_count);
3450                 if (!--vma->open_count && !i915_vma_is_ggtt(vma))
3451                         i915_vma_close(vma);
3452
3453                 list_del(&lut->obj_link);
3454                 list_del(&lut->ctx_link);
3455
3456                 kmem_cache_free(i915->luts, lut);
3457                 __i915_gem_object_release_unless_active(obj);
3458         }
3459
3460         mutex_unlock(&i915->drm.struct_mutex);
3461 }
3462
3463 static unsigned long to_wait_timeout(s64 timeout_ns)
3464 {
3465         if (timeout_ns < 0)
3466                 return MAX_SCHEDULE_TIMEOUT;
3467
3468         if (timeout_ns == 0)
3469                 return 0;
3470
3471         return nsecs_to_jiffies_timeout(timeout_ns);
3472 }
3473
3474 /**
3475  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
3476  * @dev: drm device pointer
3477  * @data: ioctl data blob
3478  * @file: drm file pointer
3479  *
3480  * Returns 0 if successful, else an error is returned with the remaining time in
3481  * the timeout parameter.
3482  *  -ETIME: object is still busy after timeout
3483  *  -ERESTARTSYS: signal interrupted the wait
3484  *  -ENONENT: object doesn't exist
3485  * Also possible, but rare:
3486  *  -EAGAIN: incomplete, restart syscall
3487  *  -ENOMEM: damn
3488  *  -ENODEV: Internal IRQ fail
3489  *  -E?: The add request failed
3490  *
3491  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
3492  * non-zero timeout parameter the wait ioctl will wait for the given number of
3493  * nanoseconds on an object becoming unbusy. Since the wait itself does so
3494  * without holding struct_mutex the object may become re-busied before this
3495  * function completes. A similar but shorter * race condition exists in the busy
3496  * ioctl
3497  */
3498 int
3499 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3500 {
3501         struct drm_i915_gem_wait *args = data;
3502         struct drm_i915_gem_object *obj;
3503         ktime_t start;
3504         long ret;
3505
3506         if (args->flags != 0)
3507                 return -EINVAL;
3508
3509         obj = i915_gem_object_lookup(file, args->bo_handle);
3510         if (!obj)
3511                 return -ENOENT;
3512
3513         start = ktime_get();
3514
3515         ret = i915_gem_object_wait(obj,
3516                                    I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL,
3517                                    to_wait_timeout(args->timeout_ns),
3518                                    to_rps_client(file));
3519
3520         if (args->timeout_ns > 0) {
3521                 args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
3522                 if (args->timeout_ns < 0)
3523                         args->timeout_ns = 0;
3524
3525                 /*
3526                  * Apparently ktime isn't accurate enough and occasionally has a
3527                  * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
3528                  * things up to make the test happy. We allow up to 1 jiffy.
3529                  *
3530                  * This is a regression from the timespec->ktime conversion.
3531                  */
3532                 if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
3533                         args->timeout_ns = 0;
3534
3535                 /* Asked to wait beyond the jiffie/scheduler precision? */
3536                 if (ret == -ETIME && args->timeout_ns)
3537                         ret = -EAGAIN;
3538         }
3539
3540         i915_gem_object_put(obj);
3541         return ret;
3542 }
3543
3544 static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
3545 {
3546         int ret, i;
3547
3548         for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
3549                 ret = i915_gem_active_wait(&tl->engine[i].last_request, flags);
3550                 if (ret)
3551                         return ret;
3552         }
3553
3554         return 0;
3555 }
3556
3557 static int wait_for_engines(struct drm_i915_private *i915)
3558 {
3559         if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
3560                 dev_err(i915->drm.dev,
3561                         "Failed to idle engines, declaring wedged!\n");
3562                 if (drm_debug & DRM_UT_DRIVER) {
3563                         struct drm_printer p = drm_debug_printer(__func__);
3564                         struct intel_engine_cs *engine;
3565                         enum intel_engine_id id;
3566
3567                         for_each_engine(engine, i915, id)
3568                                 intel_engine_dump(engine, &p,
3569                                                   "%s", engine->name);
3570                 }
3571
3572                 i915_gem_set_wedged(i915);
3573                 return -EIO;
3574         }
3575
3576         return 0;
3577 }
3578
3579 int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
3580 {
3581         int ret;
3582
3583         /* If the device is asleep, we have no requests outstanding */
3584         if (!READ_ONCE(i915->gt.awake))
3585                 return 0;
3586
3587         if (flags & I915_WAIT_LOCKED) {
3588                 struct i915_gem_timeline *tl;
3589
3590                 lockdep_assert_held(&i915->drm.struct_mutex);
3591
3592                 list_for_each_entry(tl, &i915->gt.timelines, link) {
3593                         ret = wait_for_timeline(tl, flags);
3594                         if (ret)
3595                                 return ret;
3596                 }
3597                 i915_gem_retire_requests(i915);
3598
3599                 ret = wait_for_engines(i915);
3600         } else {
3601                 ret = wait_for_timeline(&i915->gt.global_timeline, flags);
3602         }
3603
3604         return ret;
3605 }
3606
3607 static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
3608 {
3609         /*
3610          * We manually flush the CPU domain so that we can override and
3611          * force the flush for the display, and perform it asyncrhonously.
3612          */
3613         flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
3614         if (obj->cache_dirty)
3615                 i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
3616         obj->base.write_domain = 0;
3617 }
3618
3619 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
3620 {
3621         if (!READ_ONCE(obj->pin_global))
3622                 return;
3623
3624         mutex_lock(&obj->base.dev->struct_mutex);
3625         __i915_gem_object_flush_for_display(obj);
3626         mutex_unlock(&obj->base.dev->struct_mutex);
3627 }
3628
3629 /**
3630  * Moves a single object to the WC read, and possibly write domain.
3631  * @obj: object to act on
3632  * @write: ask for write access or read only
3633  *
3634  * This function returns when the move is complete, including waiting on
3635  * flushes to occur.
3636  */
3637 int
3638 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
3639 {
3640         int ret;
3641
3642         lockdep_assert_held(&obj->base.dev->struct_mutex);
3643
3644         ret = i915_gem_object_wait(obj,
3645                                    I915_WAIT_INTERRUPTIBLE |
3646                                    I915_WAIT_LOCKED |
3647                                    (write ? I915_WAIT_ALL : 0),
3648                                    MAX_SCHEDULE_TIMEOUT,
3649                                    NULL);
3650         if (ret)
3651                 return ret;
3652
3653         if (obj->base.write_domain == I915_GEM_DOMAIN_WC)
3654                 return 0;
3655
3656         /* Flush and acquire obj->pages so that we are coherent through
3657          * direct access in memory with previous cached writes through
3658          * shmemfs and that our cache domain tracking remains valid.
3659          * For example, if the obj->filp was moved to swap without us
3660          * being notified and releasing the pages, we would mistakenly
3661          * continue to assume that the obj remained out of the CPU cached
3662          * domain.
3663          */
3664         ret = i915_gem_object_pin_pages(obj);
3665         if (ret)
3666                 return ret;
3667
3668         flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
3669
3670         /* Serialise direct access to this object with the barriers for
3671          * coherent writes from the GPU, by effectively invalidating the
3672          * WC domain upon first access.
3673          */
3674         if ((obj->base.read_domains & I915_GEM_DOMAIN_WC) == 0)
3675                 mb();
3676
3677         /* It should now be out of any other write domains, and we can update
3678          * the domain values for our changes.
3679          */
3680         GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_WC) != 0);
3681         obj->base.read_domains |= I915_GEM_DOMAIN_WC;
3682         if (write) {
3683                 obj->base.read_domains = I915_GEM_DOMAIN_WC;
3684                 obj->base.write_domain = I915_GEM_DOMAIN_WC;
3685                 obj->mm.dirty = true;
3686         }
3687
3688         i915_gem_object_unpin_pages(obj);
3689         return 0;
3690 }
3691
3692 /**
3693  * Moves a single object to the GTT read, and possibly write domain.
3694  * @obj: object to act on
3695  * @write: ask for write access or read only
3696  *
3697  * This function returns when the move is complete, including waiting on
3698  * flushes to occur.
3699  */
3700 int
3701 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3702 {
3703         int ret;
3704
3705         lockdep_assert_held(&obj->base.dev->struct_mutex);
3706
3707         ret = i915_gem_object_wait(obj,
3708                                    I915_WAIT_INTERRUPTIBLE |
3709                                    I915_WAIT_LOCKED |
3710                                    (write ? I915_WAIT_ALL : 0),
3711                                    MAX_SCHEDULE_TIMEOUT,
3712                                    NULL);
3713         if (ret)
3714                 return ret;
3715
3716         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3717                 return 0;
3718
3719         /* Flush and acquire obj->pages so that we are coherent through
3720          * direct access in memory with previous cached writes through
3721          * shmemfs and that our cache domain tracking remains valid.
3722          * For example, if the obj->filp was moved to swap without us
3723          * being notified and releasing the pages, we would mistakenly
3724          * continue to assume that the obj remained out of the CPU cached
3725          * domain.
3726          */
3727         ret = i915_gem_object_pin_pages(obj);
3728         if (ret)
3729                 return ret;
3730
3731         flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
3732
3733         /* Serialise direct access to this object with the barriers for
3734          * coherent writes from the GPU, by effectively invalidating the
3735          * GTT domain upon first access.
3736          */
3737         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3738                 mb();
3739
3740         /* It should now be out of any other write domains, and we can update
3741          * the domain values for our changes.
3742          */
3743         GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3744         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3745         if (write) {
3746                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3747                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3748                 obj->mm.dirty = true;
3749         }
3750
3751         i915_gem_object_unpin_pages(obj);
3752         return 0;
3753 }
3754
3755 /**
3756  * Changes the cache-level of an object across all VMA.
3757  * @obj: object to act on
3758  * @cache_level: new cache level to set for the object
3759  *
3760  * After this function returns, the object will be in the new cache-level
3761  * across all GTT and the contents of the backing storage will be coherent,
3762  * with respect to the new cache-level. In order to keep the backing storage
3763  * coherent for all users, we only allow a single cache level to be set
3764  * globally on the object and prevent it from being changed whilst the
3765  * hardware is reading from the object. That is if the object is currently
3766  * on the scanout it will be set to uncached (or equivalent display
3767  * cache coherency) and all non-MOCS GPU access will also be uncached so
3768  * that all direct access to the scanout remains coherent.
3769  */
3770 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3771                                     enum i915_cache_level cache_level)
3772 {
3773         struct i915_vma *vma;
3774         int ret;
3775
3776         lockdep_assert_held(&obj->base.dev->struct_mutex);
3777
3778         if (obj->cache_level == cache_level)
3779                 return 0;
3780
3781         /* Inspect the list of currently bound VMA and unbind any that would
3782          * be invalid given the new cache-level. This is principally to
3783          * catch the issue of the CS prefetch crossing page boundaries and
3784          * reading an invalid PTE on older architectures.
3785          */
3786 restart:
3787         list_for_each_entry(vma, &obj->vma_list, obj_link) {
3788                 if (!drm_mm_node_allocated(&vma->node))
3789                         continue;
3790
3791                 if (i915_vma_is_pinned(vma)) {
3792                         DRM_DEBUG("can not change the cache level of pinned objects\n");
3793                         return -EBUSY;
3794                 }
3795
3796                 if (!i915_vma_is_closed(vma) &&
3797                     i915_gem_valid_gtt_space(vma, cache_level))
3798                         continue;
3799
3800                 ret = i915_vma_unbind(vma);
3801                 if (ret)
3802                         return ret;
3803
3804                 /* As unbinding may affect other elements in the
3805                  * obj->vma_list (due to side-effects from retiring
3806                  * an active vma), play safe and restart the iterator.
3807                  */
3808                 goto restart;
3809         }
3810
3811         /* We can reuse the existing drm_mm nodes but need to change the
3812          * cache-level on the PTE. We could simply unbind them all and
3813          * rebind with the correct cache-level on next use. However since
3814          * we already have a valid slot, dma mapping, pages etc, we may as
3815          * rewrite the PTE in the belief that doing so tramples upon less
3816          * state and so involves less work.
3817          */
3818         if (obj->bind_count) {
3819                 /* Before we change the PTE, the GPU must not be accessing it.
3820                  * If we wait upon the object, we know that all the bound
3821                  * VMA are no longer active.
3822                  */
3823                 ret = i915_gem_object_wait(obj,
3824                                            I915_WAIT_INTERRUPTIBLE |
3825                                            I915_WAIT_LOCKED |
3826                                            I915_WAIT_ALL,
3827                                            MAX_SCHEDULE_TIMEOUT,
3828                                            NULL);
3829                 if (ret)
3830                         return ret;
3831
3832                 if (!HAS_LLC(to_i915(obj->base.dev)) &&
3833                     cache_level != I915_CACHE_NONE) {
3834                         /* Access to snoopable pages through the GTT is
3835                          * incoherent and on some machines causes a hard
3836                          * lockup. Relinquish the CPU mmaping to force
3837                          * userspace to refault in the pages and we can
3838                          * then double check if the GTT mapping is still
3839                          * valid for that pointer access.
3840                          */
3841                         i915_gem_release_mmap(obj);
3842
3843                         /* As we no longer need a fence for GTT access,
3844                          * we can relinquish it now (and so prevent having
3845                          * to steal a fence from someone else on the next
3846                          * fence request). Note GPU activity would have
3847                          * dropped the fence as all snoopable access is
3848                          * supposed to be linear.
3849                          */
3850                         for_each_ggtt_vma(vma, obj) {
3851                                 ret = i915_vma_put_fence(vma);
3852                                 if (ret)
3853                                         return ret;
3854                         }
3855                 } else {
3856                         /* We either have incoherent backing store and
3857                          * so no GTT access or the architecture is fully
3858                          * coherent. In such cases, existing GTT mmaps
3859                          * ignore the cache bit in the PTE and we can
3860                          * rewrite it without confusing the GPU or having
3861                          * to force userspace to fault back in its mmaps.
3862                          */
3863                 }
3864
3865                 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3866                         if (!drm_mm_node_allocated(&vma->node))
3867                                 continue;
3868
3869                         ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
3870                         if (ret)
3871                                 return ret;
3872                 }
3873         }
3874
3875         list_for_each_entry(vma, &obj->vma_list, obj_link)
3876                 vma->node.color = cache_level;
3877         i915_gem_object_set_cache_coherency(obj, cache_level);
3878         obj->cache_dirty = true; /* Always invalidate stale cachelines */
3879
3880         return 0;
3881 }
3882
3883 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3884                                struct drm_file *file)
3885 {
3886         struct drm_i915_gem_caching *args = data;
3887         struct drm_i915_gem_object *obj;
3888         int err = 0;
3889
3890         rcu_read_lock();
3891         obj = i915_gem_object_lookup_rcu(file, args->handle);
3892         if (!obj) {
3893                 err = -ENOENT;
3894                 goto out;
3895         }
3896
3897         switch (obj->cache_level) {
3898         case I915_CACHE_LLC:
3899         case I915_CACHE_L3_LLC:
3900                 args->caching = I915_CACHING_CACHED;
3901                 break;
3902
3903         case I915_CACHE_WT:
3904                 args->caching = I915_CACHING_DISPLAY;
3905                 break;
3906
3907         default:
3908                 args->caching = I915_CACHING_NONE;
3909                 break;
3910         }
3911 out:
3912         rcu_read_unlock();
3913         return err;
3914 }
3915
3916 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3917                                struct drm_file *file)
3918 {
3919         struct drm_i915_private *i915 = to_i915(dev);
3920         struct drm_i915_gem_caching *args = data;
3921         struct drm_i915_gem_object *obj;
3922         enum i915_cache_level level;
3923         int ret = 0;
3924
3925         switch (args->caching) {
3926         case I915_CACHING_NONE:
3927                 level = I915_CACHE_NONE;
3928                 break;
3929         case I915_CACHING_CACHED:
3930                 /*
3931                  * Due to a HW issue on BXT A stepping, GPU stores via a
3932                  * snooped mapping may leave stale data in a corresponding CPU
3933                  * cacheline, whereas normally such cachelines would get
3934                  * invalidated.
3935                  */
3936                 if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
3937                         return -ENODEV;
3938
3939                 level = I915_CACHE_LLC;
3940                 break;
3941         case I915_CACHING_DISPLAY:
3942                 level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
3943                 break;
3944         default:
3945                 return -EINVAL;
3946         }
3947
3948         obj = i915_gem_object_lookup(file, args->handle);
3949         if (!obj)
3950                 return -ENOENT;
3951
3952         /*
3953          * The caching mode of proxy object is handled by its generator, and
3954          * not allowed to be changed by userspace.
3955          */
3956         if (i915_gem_object_is_proxy(obj)) {
3957                 ret = -ENXIO;
3958                 goto out;
3959         }
3960
3961         if (obj->cache_level == level)
3962                 goto out;
3963
3964         ret = i915_gem_object_wait(obj,
3965                                    I915_WAIT_INTERRUPTIBLE,
3966                                    MAX_SCHEDULE_TIMEOUT,
3967                                    to_rps_client(file));
3968         if (ret)
3969                 goto out;
3970
3971         ret = i915_mutex_lock_interruptible(dev);
3972         if (ret)
3973                 goto out;
3974
3975         ret = i915_gem_object_set_cache_level(obj, level);
3976         mutex_unlock(&dev->struct_mutex);
3977
3978 out:
3979         i915_gem_object_put(obj);
3980         return ret;
3981 }
3982
3983 /*
3984  * Prepare buffer for display plane (scanout, cursors, etc).
3985  * Can be called from an uninterruptible phase (modesetting) and allows
3986  * any flushes to be pipelined (for pageflips).
3987  */
3988 struct i915_vma *
3989 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3990                                      u32 alignment,
3991                                      const struct i915_ggtt_view *view)
3992 {
3993         struct i915_vma *vma;
3994         int ret;
3995
3996         lockdep_assert_held(&obj->base.dev->struct_mutex);
3997
3998         /* Mark the global pin early so that we account for the
3999          * display coherency whilst setting up the cache domains.
4000          */
4001         obj->pin_global++;
4002
4003         /* The display engine is not coherent with the LLC cache on gen6.  As
4004          * a result, we make sure that the pinning that is about to occur is
4005          * done with uncached PTEs. This is lowest common denominator for all
4006          * chipsets.
4007          *
4008          * However for gen6+, we could do better by using the GFDT bit instead
4009          * of uncaching, which would allow us to flush all the LLC-cached data
4010          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
4011          */
4012         ret = i915_gem_object_set_cache_level(obj,
4013                                               HAS_WT(to_i915(obj->base.dev)) ?
4014                                               I915_CACHE_WT : I915_CACHE_NONE);
4015         if (ret) {
4016                 vma = ERR_PTR(ret);
4017                 goto err_unpin_global;
4018         }
4019
4020         /* As the user may map the buffer once pinned in the display plane
4021          * (e.g. libkms for the bootup splash), we have to ensure that we
4022          * always use map_and_fenceable for all scanout buffers. However,
4023          * it may simply be too big to fit into mappable, in which case
4024          * put it anyway and hope that userspace can cope (but always first
4025          * try to preserve the existing ABI).
4026          */
4027         vma = ERR_PTR(-ENOSPC);
4028         if (!view || view->type == I915_GGTT_VIEW_NORMAL)
4029                 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
4030                                                PIN_MAPPABLE | PIN_NONBLOCK);
4031         if (IS_ERR(vma)) {
4032                 struct drm_i915_private *i915 = to_i915(obj->base.dev);
4033                 unsigned int flags;
4034
4035                 /* Valleyview is definitely limited to scanning out the first
4036                  * 512MiB. Lets presume this behaviour was inherited from the
4037                  * g4x display engine and that all earlier gen are similarly
4038                  * limited. Testing suggests that it is a little more
4039                  * complicated than this. For example, Cherryview appears quite
4040                  * happy to scanout from anywhere within its global aperture.
4041                  */
4042                 flags = 0;
4043                 if (HAS_GMCH_DISPLAY(i915))
4044                         flags = PIN_MAPPABLE;
4045                 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
4046         }
4047         if (IS_ERR(vma))
4048                 goto err_unpin_global;
4049
4050         vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
4051
4052         /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */
4053         __i915_gem_object_flush_for_display(obj);
4054         intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
4055
4056         /* It should now be out of any other write domains, and we can update
4057          * the domain values for our changes.
4058          */
4059         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
4060
4061         return vma;
4062
4063 err_unpin_global:
4064         obj->pin_global--;
4065         return vma;
4066 }
4067
4068 void
4069 i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
4070 {
4071         lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
4072
4073         if (WARN_ON(vma->obj->pin_global == 0))
4074                 return;
4075
4076         if (--vma->obj->pin_global == 0)
4077                 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
4078
4079         /* Bump the LRU to try and avoid premature eviction whilst flipping  */
4080         i915_gem_object_bump_inactive_ggtt(vma->obj);
4081
4082         i915_vma_unpin(vma);
4083 }
4084
4085 /**
4086  * Moves a single object to the CPU read, and possibly write domain.
4087  * @obj: object to act on
4088  * @write: requesting write or read-only access
4089  *
4090  * This function returns when the move is complete, including waiting on
4091  * flushes to occur.
4092  */
4093 int
4094 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
4095 {
4096         int ret;
4097
4098         lockdep_assert_held(&obj->base.dev->struct_mutex);
4099
4100         ret = i915_gem_object_wait(obj,
4101                                    I915_WAIT_INTERRUPTIBLE |
4102                                    I915_WAIT_LOCKED |
4103                                    (write ? I915_WAIT_ALL : 0),
4104                                    MAX_SCHEDULE_TIMEOUT,
4105                                    NULL);
4106         if (ret)
4107                 return ret;
4108
4109         flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
4110
4111         /* Flush the CPU cache if it's still invalid. */
4112         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
4113                 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
4114                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
4115         }
4116
4117         /* It should now be out of any other write domains, and we can update
4118          * the domain values for our changes.
4119          */
4120         GEM_BUG_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
4121
4122         /* If we're writing through the CPU, then the GPU read domains will
4123          * need to be invalidated at next use.
4124          */
4125         if (write)
4126                 __start_cpu_write(obj);
4127
4128         return 0;
4129 }
4130
4131 /* Throttle our rendering by waiting until the ring has completed our requests
4132  * emitted over 20 msec ago.
4133  *
4134  * Note that if we were to use the current jiffies each time around the loop,
4135  * we wouldn't escape the function with any frames outstanding if the time to
4136  * render a frame was over 20ms.
4137  *
4138  * This should get us reasonable parallelism between CPU and GPU but also
4139  * relatively low latency when blocking on a particular request to finish.
4140  */
4141 static int
4142 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
4143 {
4144         struct drm_i915_private *dev_priv = to_i915(dev);
4145         struct drm_i915_file_private *file_priv = file->driver_priv;
4146         unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
4147         struct drm_i915_gem_request *request, *target = NULL;
4148         long ret;
4149
4150         /* ABI: return -EIO if already wedged */
4151         if (i915_terminally_wedged(&dev_priv->gpu_error))
4152                 return -EIO;
4153
4154         spin_lock(&file_priv->mm.lock);
4155         list_for_each_entry(request, &file_priv->mm.request_list, client_link) {
4156                 if (time_after_eq(request->emitted_jiffies, recent_enough))
4157                         break;
4158
4159                 if (target) {
4160                         list_del(&target->client_link);
4161                         target->file_priv = NULL;
4162                 }
4163
4164                 target = request;
4165         }
4166         if (target)
4167                 i915_gem_request_get(target);
4168         spin_unlock(&file_priv->mm.lock);
4169
4170         if (target == NULL)
4171                 return 0;
4172
4173         ret = i915_wait_request(target,
4174                                 I915_WAIT_INTERRUPTIBLE,
4175                                 MAX_SCHEDULE_TIMEOUT);
4176         i915_gem_request_put(target);
4177
4178         return ret < 0 ? ret : 0;
4179 }
4180
4181 struct i915_vma *
4182 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
4183                          const struct i915_ggtt_view *view,
4184                          u64 size,
4185                          u64 alignment,
4186                          u64 flags)
4187 {
4188         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
4189         struct i915_address_space *vm = &dev_priv->ggtt.base;
4190         struct i915_vma *vma;
4191         int ret;
4192
4193         lockdep_assert_held(&obj->base.dev->struct_mutex);
4194
4195         if (!view && flags & PIN_MAPPABLE) {
4196                 /* If the required space is larger than the available
4197                  * aperture, we will not able to find a slot for the
4198                  * object and unbinding the object now will be in
4199                  * vain. Worse, doing so may cause us to ping-pong
4200                  * the object in and out of the Global GTT and
4201                  * waste a lot of cycles under the mutex.
4202                  */
4203                 if (obj->base.size > dev_priv->ggtt.mappable_end)
4204                         return ERR_PTR(-E2BIG);
4205
4206                 /* If NONBLOCK is set the caller is optimistically
4207                  * trying to cache the full object within the mappable
4208                  * aperture, and *must* have a fallback in place for
4209                  * situations where we cannot bind the object. We
4210                  * can be a little more lax here and use the fallback
4211                  * more often to avoid costly migrations of ourselves
4212                  * and other objects within the aperture.
4213                  *
4214                  * Half-the-aperture is used as a simple heuristic.
4215                  * More interesting would to do search for a free
4216                  * block prior to making the commitment to unbind.
4217                  * That caters for the self-harm case, and with a
4218                  * little more heuristics (e.g. NOFAULT, NOEVICT)
4219                  * we could try to minimise harm to others.
4220                  */
4221                 if (flags & PIN_NONBLOCK &&
4222                     obj->base.size > dev_priv->ggtt.mappable_end / 2)
4223                         return ERR_PTR(-ENOSPC);
4224         }
4225
4226         vma = i915_vma_instance(obj, vm, view);
4227         if (unlikely(IS_ERR(vma)))
4228                 return vma;
4229
4230         if (i915_vma_misplaced(vma, size, alignment, flags)) {
4231                 if (flags & PIN_NONBLOCK) {
4232                         if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
4233                                 return ERR_PTR(-ENOSPC);
4234
4235                         if (flags & PIN_MAPPABLE &&
4236                             vma->fence_size > dev_priv->ggtt.mappable_end / 2)
4237                                 return ERR_PTR(-ENOSPC);
4238                 }
4239
4240                 WARN(i915_vma_is_pinned(vma),
4241                      "bo is already pinned in ggtt with incorrect alignment:"
4242                      " offset=%08x, req.alignment=%llx,"
4243                      " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
4244                      i915_ggtt_offset(vma), alignment,
4245                      !!(flags & PIN_MAPPABLE),
4246                      i915_vma_is_map_and_fenceable(vma));
4247                 ret = i915_vma_unbind(vma);
4248                 if (ret)
4249                         return ERR_PTR(ret);
4250         }
4251
4252         ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
4253         if (ret)
4254                 return ERR_PTR(ret);
4255
4256         return vma;
4257 }
4258
4259 static __always_inline unsigned int __busy_read_flag(unsigned int id)
4260 {
4261         /* Note that we could alias engines in the execbuf API, but
4262          * that would be very unwise as it prevents userspace from
4263          * fine control over engine selection. Ahem.
4264          *
4265          * This should be something like EXEC_MAX_ENGINE instead of
4266          * I915_NUM_ENGINES.
4267          */
4268         BUILD_BUG_ON(I915_NUM_ENGINES > 16);
4269         return 0x10000 << id;
4270 }
4271
4272 static __always_inline unsigned int __busy_write_id(unsigned int id)
4273 {
4274         /* The uABI guarantees an active writer is also amongst the read
4275          * engines. This would be true if we accessed the activity tracking
4276          * under the lock, but as we perform the lookup of the object and
4277          * its activity locklessly we can not guarantee that the last_write
4278          * being active implies that we have set the same engine flag from
4279          * last_read - hence we always set both read and write busy for
4280          * last_write.
4281          */
4282         return id | __busy_read_flag(id);
4283 }
4284
4285 static __always_inline unsigned int
4286 __busy_set_if_active(const struct dma_fence *fence,
4287                      unsigned int (*flag)(unsigned int id))
4288 {
4289         struct drm_i915_gem_request *rq;
4290
4291         /* We have to check the current hw status of the fence as the uABI
4292          * guarantees forward progress. We could rely on the idle worker
4293          * to eventually flush us, but to minimise latency just ask the
4294          * hardware.
4295          *
4296          * Note we only report on the status of native fences.
4297          */
4298         if (!dma_fence_is_i915(fence))
4299                 return 0;
4300
4301         /* opencode to_request() in order to avoid const warnings */
4302         rq = container_of(fence, struct drm_i915_gem_request, fence);
4303         if (i915_gem_request_completed(rq))
4304                 return 0;
4305
4306         return flag(rq->engine->uabi_id);
4307 }
4308
4309 static __always_inline unsigned int
4310 busy_check_reader(const struct dma_fence *fence)
4311 {
4312         return __busy_set_if_active(fence, __busy_read_flag);
4313 }
4314
4315 static __always_inline unsigned int
4316 busy_check_writer(const struct dma_fence *fence)
4317 {
4318         if (!fence)
4319                 return 0;
4320
4321         return __busy_set_if_active(fence, __busy_write_id);
4322 }
4323
4324 int
4325 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4326                     struct drm_file *file)
4327 {
4328         struct drm_i915_gem_busy *args = data;
4329         struct drm_i915_gem_object *obj;
4330         struct reservation_object_list *list;
4331         unsigned int seq;
4332         int err;
4333
4334         err = -ENOENT;
4335         rcu_read_lock();
4336         obj = i915_gem_object_lookup_rcu(file, args->handle);
4337         if (!obj)
4338                 goto out;
4339
4340         /* A discrepancy here is that we do not report the status of
4341          * non-i915 fences, i.e. even though we may report the object as idle,
4342          * a call to set-domain may still stall waiting for foreign rendering.
4343          * This also means that wait-ioctl may report an object as busy,
4344          * where busy-ioctl considers it idle.
4345          *
4346          * We trade the ability to warn of foreign fences to report on which
4347          * i915 engines are active for the object.
4348          *
4349          * Alternatively, we can trade that extra information on read/write
4350          * activity with
4351          *      args->busy =
4352          *              !reservation_object_test_signaled_rcu(obj->resv, true);
4353          * to report the overall busyness. This is what the wait-ioctl does.
4354          *
4355          */
4356 retry:
4357         seq = raw_read_seqcount(&obj->resv->seq);
4358
4359         /* Translate the exclusive fence to the READ *and* WRITE engine */
4360         args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
4361
4362         /* Translate shared fences to READ set of engines */
4363         list = rcu_dereference(obj->resv->fence);
4364         if (list) {
4365                 unsigned int shared_count = list->shared_count, i;
4366
4367                 for (i = 0; i < shared_count; ++i) {
4368                         struct dma_fence *fence =
4369                                 rcu_dereference(list->shared[i]);
4370
4371                         args->busy |= busy_check_reader(fence);
4372                 }
4373         }
4374
4375         if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
4376                 goto retry;
4377
4378         err = 0;
4379 out:
4380         rcu_read_unlock();
4381         return err;
4382 }
4383
4384 int
4385 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4386                         struct drm_file *file_priv)
4387 {
4388         return i915_gem_ring_throttle(dev, file_priv);
4389 }
4390
4391 int
4392 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4393                        struct drm_file *file_priv)
4394 {
4395         struct drm_i915_private *dev_priv = to_i915(dev);
4396         struct drm_i915_gem_madvise *args = data;
4397         struct drm_i915_gem_object *obj;
4398         int err;
4399
4400         switch (args->madv) {
4401         case I915_MADV_DONTNEED:
4402         case I915_MADV_WILLNEED:
4403             break;
4404         default:
4405             return -EINVAL;
4406         }
4407
4408         obj = i915_gem_object_lookup(file_priv, args->handle);
4409         if (!obj)
4410                 return -ENOENT;
4411
4412         err = mutex_lock_interruptible(&obj->mm.lock);
4413         if (err)
4414                 goto out;
4415
4416         if (i915_gem_object_has_pages(obj) &&
4417             i915_gem_object_is_tiled(obj) &&
4418             dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
4419                 if (obj->mm.madv == I915_MADV_WILLNEED) {
4420                         GEM_BUG_ON(!obj->mm.quirked);
4421                         __i915_gem_object_unpin_pages(obj);
4422                         obj->mm.quirked = false;
4423                 }
4424                 if (args->madv == I915_MADV_WILLNEED) {
4425                         GEM_BUG_ON(obj->mm.quirked);
4426                         __i915_gem_object_pin_pages(obj);
4427                         obj->mm.quirked = true;
4428                 }
4429         }
4430
4431         if (obj->mm.madv != __I915_MADV_PURGED)
4432                 obj->mm.madv = args->madv;
4433
4434         /* if the object is no longer attached, discard its backing storage */
4435         if (obj->mm.madv == I915_MADV_DONTNEED &&
4436             !i915_gem_object_has_pages(obj))
4437                 i915_gem_object_truncate(obj);
4438
4439         args->retained = obj->mm.madv != __I915_MADV_PURGED;
4440         mutex_unlock(&obj->mm.lock);
4441
4442 out:
4443         i915_gem_object_put(obj);
4444         return err;
4445 }
4446
4447 static void
4448 frontbuffer_retire(struct i915_gem_active *active,
4449                    struct drm_i915_gem_request *request)
4450 {
4451         struct drm_i915_gem_object *obj =
4452                 container_of(active, typeof(*obj), frontbuffer_write);
4453
4454         intel_fb_obj_flush(obj, ORIGIN_CS);
4455 }
4456
4457 void i915_gem_object_init(struct drm_i915_gem_object *obj,
4458                           const struct drm_i915_gem_object_ops *ops)
4459 {
4460         mutex_init(&obj->mm.lock);
4461
4462         INIT_LIST_HEAD(&obj->vma_list);
4463         INIT_LIST_HEAD(&obj->lut_list);
4464         INIT_LIST_HEAD(&obj->batch_pool_link);
4465
4466         obj->ops = ops;
4467
4468         reservation_object_init(&obj->__builtin_resv);
4469         obj->resv = &obj->__builtin_resv;
4470
4471         obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
4472         init_request_active(&obj->frontbuffer_write, frontbuffer_retire);
4473
4474         obj->mm.madv = I915_MADV_WILLNEED;
4475         INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
4476         mutex_init(&obj->mm.get_page.lock);
4477
4478         i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
4479 }
4480
4481 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4482         .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
4483                  I915_GEM_OBJECT_IS_SHRINKABLE,
4484
4485         .get_pages = i915_gem_object_get_pages_gtt,
4486         .put_pages = i915_gem_object_put_pages_gtt,
4487
4488         .pwrite = i915_gem_object_pwrite_gtt,
4489 };
4490
4491 static int i915_gem_object_create_shmem(struct drm_device *dev,
4492                                         struct drm_gem_object *obj,
4493                                         size_t size)
4494 {
4495         struct drm_i915_private *i915 = to_i915(dev);
4496         unsigned long flags = VM_NORESERVE;
4497         struct file *filp;
4498
4499         drm_gem_private_object_init(dev, obj, size);
4500
4501         if (i915->mm.gemfs)
4502                 filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
4503                                                  flags);
4504         else
4505                 filp = shmem_file_setup("i915", size, flags);
4506
4507         if (IS_ERR(filp))
4508                 return PTR_ERR(filp);
4509
4510         obj->filp = filp;
4511
4512         return 0;
4513 }
4514
4515 struct drm_i915_gem_object *
4516 i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
4517 {
4518         struct drm_i915_gem_object *obj;
4519         struct address_space *mapping;
4520         unsigned int cache_level;
4521         gfp_t mask;
4522         int ret;
4523
4524         /* There is a prevalence of the assumption that we fit the object's
4525          * page count inside a 32bit _signed_ variable. Let's document this and
4526          * catch if we ever need to fix it. In the meantime, if you do spot
4527          * such a local variable, please consider fixing!
4528          */
4529         if (size >> PAGE_SHIFT > INT_MAX)
4530                 return ERR_PTR(-E2BIG);
4531
4532         if (overflows_type(size, obj->base.size))
4533                 return ERR_PTR(-E2BIG);
4534
4535         obj = i915_gem_object_alloc(dev_priv);
4536         if (obj == NULL)
4537                 return ERR_PTR(-ENOMEM);
4538
4539         ret = i915_gem_object_create_shmem(&dev_priv->drm, &obj->base, size);
4540         if (ret)
4541                 goto fail;
4542
4543         mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4544         if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) {
4545                 /* 965gm cannot relocate objects above 4GiB. */
4546                 mask &= ~__GFP_HIGHMEM;
4547                 mask |= __GFP_DMA32;
4548         }
4549
4550         mapping = obj->base.filp->f_mapping;
4551         mapping_set_gfp_mask(mapping, mask);
4552         GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
4553
4554         i915_gem_object_init(obj, &i915_gem_object_ops);
4555
4556         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4557         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4558
4559         if (HAS_LLC(dev_priv))
4560                 /* On some devices, we can have the GPU use the LLC (the CPU
4561                  * cache) for about a 10% performance improvement
4562                  * compared to uncached.  Graphics requests other than
4563                  * display scanout are coherent with the CPU in
4564                  * accessing this cache.  This means in this mode we
4565                  * don't need to clflush on the CPU side, and on the
4566                  * GPU side we only need to flush internal caches to
4567                  * get data visible to the CPU.
4568                  *
4569                  * However, we maintain the display planes as UC, and so
4570                  * need to rebind when first used as such.
4571                  */
4572                 cache_level = I915_CACHE_LLC;
4573         else
4574                 cache_level = I915_CACHE_NONE;
4575
4576         i915_gem_object_set_cache_coherency(obj, cache_level);
4577
4578         trace_i915_gem_object_create(obj);
4579
4580         return obj;
4581
4582 fail:
4583         i915_gem_object_free(obj);
4584         return ERR_PTR(ret);
4585 }
4586
4587 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4588 {
4589         /* If we are the last user of the backing storage (be it shmemfs
4590          * pages or stolen etc), we know that the pages are going to be
4591          * immediately released. In this case, we can then skip copying
4592          * back the contents from the GPU.
4593          */
4594
4595         if (obj->mm.madv != I915_MADV_WILLNEED)
4596                 return false;
4597
4598         if (obj->base.filp == NULL)
4599                 return true;
4600
4601         /* At first glance, this looks racy, but then again so would be
4602          * userspace racing mmap against close. However, the first external
4603          * reference to the filp can only be obtained through the
4604          * i915_gem_mmap_ioctl() which safeguards us against the user
4605          * acquiring such a reference whilst we are in the middle of
4606          * freeing the object.
4607          */
4608         return atomic_long_read(&obj->base.filp->f_count) == 1;
4609 }
4610
4611 static void __i915_gem_free_objects(struct drm_i915_private *i915,
4612                                     struct llist_node *freed)
4613 {
4614         struct drm_i915_gem_object *obj, *on;
4615
4616         intel_runtime_pm_get(i915);
4617         llist_for_each_entry_safe(obj, on, freed, freed) {
4618                 struct i915_vma *vma, *vn;
4619
4620                 trace_i915_gem_object_destroy(obj);
4621
4622                 mutex_lock(&i915->drm.struct_mutex);
4623
4624                 GEM_BUG_ON(i915_gem_object_is_active(obj));
4625                 list_for_each_entry_safe(vma, vn,
4626                                          &obj->vma_list, obj_link) {
4627                         GEM_BUG_ON(i915_vma_is_active(vma));
4628                         vma->flags &= ~I915_VMA_PIN_MASK;
4629                         i915_vma_close(vma);
4630                 }
4631                 GEM_BUG_ON(!list_empty(&obj->vma_list));
4632                 GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
4633
4634                 /* This serializes freeing with the shrinker. Since the free
4635                  * is delayed, first by RCU then by the workqueue, we want the
4636                  * shrinker to be able to free pages of unreferenced objects,
4637                  * or else we may oom whilst there are plenty of deferred
4638                  * freed objects.
4639                  */
4640                 if (i915_gem_object_has_pages(obj)) {
4641                         spin_lock(&i915->mm.obj_lock);
4642                         list_del_init(&obj->mm.link);
4643                         spin_unlock(&i915->mm.obj_lock);
4644                 }
4645
4646                 mutex_unlock(&i915->drm.struct_mutex);
4647
4648                 GEM_BUG_ON(obj->bind_count);
4649                 GEM_BUG_ON(obj->userfault_count);
4650                 GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
4651                 GEM_BUG_ON(!list_empty(&obj->lut_list));
4652
4653                 if (obj->ops->release)
4654                         obj->ops->release(obj);
4655
4656                 if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
4657                         atomic_set(&obj->mm.pages_pin_count, 0);
4658                 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
4659                 GEM_BUG_ON(i915_gem_object_has_pages(obj));
4660
4661                 if (obj->base.import_attach)
4662                         drm_prime_gem_destroy(&obj->base, NULL);
4663
4664                 reservation_object_fini(&obj->__builtin_resv);
4665                 drm_gem_object_release(&obj->base);
4666                 i915_gem_info_remove_obj(i915, obj->base.size);
4667
4668                 kfree(obj->bit_17);
4669                 i915_gem_object_free(obj);
4670
4671                 if (on)
4672                         cond_resched();
4673         }
4674         intel_runtime_pm_put(i915);
4675 }
4676
4677 static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
4678 {
4679         struct llist_node *freed;
4680
4681         /* Free the oldest, most stale object to keep the free_list short */
4682         freed = NULL;
4683         if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
4684                 /* Only one consumer of llist_del_first() allowed */
4685                 spin_lock(&i915->mm.free_lock);
4686                 freed = llist_del_first(&i915->mm.free_list);
4687                 spin_unlock(&i915->mm.free_lock);
4688         }
4689         if (unlikely(freed)) {
4690                 freed->next = NULL;
4691                 __i915_gem_free_objects(i915, freed);
4692         }
4693 }
4694
4695 static void __i915_gem_free_work(struct work_struct *work)
4696 {
4697         struct drm_i915_private *i915 =
4698                 container_of(work, struct drm_i915_private, mm.free_work);
4699         struct llist_node *freed;
4700
4701         /* All file-owned VMA should have been released by this point through
4702          * i915_gem_close_object(), or earlier by i915_gem_context_close().
4703          * However, the object may also be bound into the global GTT (e.g.
4704          * older GPUs without per-process support, or for direct access through
4705          * the GTT either for the user or for scanout). Those VMA still need to
4706          * unbound now.
4707          */
4708
4709         spin_lock(&i915->mm.free_lock);
4710         while ((freed = llist_del_all(&i915->mm.free_list))) {
4711                 spin_unlock(&i915->mm.free_lock);
4712
4713                 __i915_gem_free_objects(i915, freed);
4714                 if (need_resched())
4715                         return;
4716
4717                 spin_lock(&i915->mm.free_lock);
4718         }
4719         spin_unlock(&i915->mm.free_lock);
4720 }
4721
4722 static void __i915_gem_free_object_rcu(struct rcu_head *head)
4723 {
4724         struct drm_i915_gem_object *obj =
4725                 container_of(head, typeof(*obj), rcu);
4726         struct drm_i915_private *i915 = to_i915(obj->base.dev);
4727
4728         /* We can't simply use call_rcu() from i915_gem_free_object()
4729          * as we need to block whilst unbinding, and the call_rcu
4730          * task may be called from softirq context. So we take a
4731          * detour through a worker.
4732          */
4733         if (llist_add(&obj->freed, &i915->mm.free_list))
4734                 schedule_work(&i915->mm.free_work);
4735 }
4736
4737 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4738 {
4739         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4740
4741         if (obj->mm.quirked)
4742                 __i915_gem_object_unpin_pages(obj);
4743
4744         if (discard_backing_storage(obj))
4745                 obj->mm.madv = I915_MADV_DONTNEED;
4746
4747         /* Before we free the object, make sure any pure RCU-only
4748          * read-side critical sections are complete, e.g.
4749          * i915_gem_busy_ioctl(). For the corresponding synchronized
4750          * lookup see i915_gem_object_lookup_rcu().
4751          */
4752         call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
4753 }
4754
4755 void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
4756 {
4757         lockdep_assert_held(&obj->base.dev->struct_mutex);
4758
4759         if (!i915_gem_object_has_active_reference(obj) &&
4760             i915_gem_object_is_active(obj))
4761                 i915_gem_object_set_active_reference(obj);
4762         else
4763                 i915_gem_object_put(obj);
4764 }
4765
4766 static void assert_kernel_context_is_current(struct drm_i915_private *i915)
4767 {
4768         struct i915_gem_context *kernel_context = i915->kernel_context;
4769         struct intel_engine_cs *engine;
4770         enum intel_engine_id id;
4771
4772         for_each_engine(engine, i915, id) {
4773                 GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline->last_request));
4774                 GEM_BUG_ON(engine->last_retired_context != kernel_context);
4775         }
4776 }
4777
4778 void i915_gem_sanitize(struct drm_i915_private *i915)
4779 {
4780         if (i915_terminally_wedged(&i915->gpu_error)) {
4781                 mutex_lock(&i915->drm.struct_mutex);
4782                 i915_gem_unset_wedged(i915);
4783                 mutex_unlock(&i915->drm.struct_mutex);
4784         }
4785
4786         /*
4787          * If we inherit context state from the BIOS or earlier occupants
4788          * of the GPU, the GPU may be in an inconsistent state when we
4789          * try to take over. The only way to remove the earlier state
4790          * is by resetting. However, resetting on earlier gen is tricky as
4791          * it may impact the display and we are uncertain about the stability
4792          * of the reset, so this could be applied to even earlier gen.
4793          */
4794         if (INTEL_GEN(i915) >= 5) {
4795                 int reset = intel_gpu_reset(i915, ALL_ENGINES);
4796                 WARN_ON(reset && reset != -ENODEV);
4797         }
4798 }
4799
4800 int i915_gem_suspend(struct drm_i915_private *dev_priv)
4801 {
4802         struct drm_device *dev = &dev_priv->drm;
4803         int ret;
4804
4805         intel_runtime_pm_get(dev_priv);
4806         intel_suspend_gt_powersave(dev_priv);
4807
4808         mutex_lock(&dev->struct_mutex);
4809
4810         /* We have to flush all the executing contexts to main memory so
4811          * that they can saved in the hibernation image. To ensure the last
4812          * context image is coherent, we have to switch away from it. That
4813          * leaves the dev_priv->kernel_context still active when
4814          * we actually suspend, and its image in memory may not match the GPU
4815          * state. Fortunately, the kernel_context is disposable and we do
4816          * not rely on its state.
4817          */
4818         if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
4819                 ret = i915_gem_switch_to_kernel_context(dev_priv);
4820                 if (ret)
4821                         goto err_unlock;
4822
4823                 ret = i915_gem_wait_for_idle(dev_priv,
4824                                              I915_WAIT_INTERRUPTIBLE |
4825                                              I915_WAIT_LOCKED);
4826                 if (ret && ret != -EIO)
4827                         goto err_unlock;
4828
4829                 assert_kernel_context_is_current(dev_priv);
4830         }
4831         i915_gem_contexts_lost(dev_priv);
4832         mutex_unlock(&dev->struct_mutex);
4833
4834         intel_guc_suspend(dev_priv);
4835
4836         cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4837         cancel_delayed_work_sync(&dev_priv->gt.retire_work);
4838
4839         /* As the idle_work is rearming if it detects a race, play safe and
4840          * repeat the flush until it is definitely idle.
4841          */
4842         drain_delayed_work(&dev_priv->gt.idle_work);
4843
4844         /* Assert that we sucessfully flushed all the work and
4845          * reset the GPU back to its idle, low power state.
4846          */
4847         WARN_ON(dev_priv->gt.awake);
4848         if (WARN_ON(!intel_engines_are_idle(dev_priv)))
4849                 i915_gem_set_wedged(dev_priv); /* no hope, discard everything */
4850
4851         /*
4852          * Neither the BIOS, ourselves or any other kernel
4853          * expects the system to be in execlists mode on startup,
4854          * so we need to reset the GPU back to legacy mode. And the only
4855          * known way to disable logical contexts is through a GPU reset.
4856          *
4857          * So in order to leave the system in a known default configuration,
4858          * always reset the GPU upon unload and suspend. Afterwards we then
4859          * clean up the GEM state tracking, flushing off the requests and
4860          * leaving the system in a known idle state.
4861          *
4862          * Note that is of the upmost importance that the GPU is idle and
4863          * all stray writes are flushed *before* we dismantle the backing
4864          * storage for the pinned objects.
4865          *
4866          * However, since we are uncertain that resetting the GPU on older
4867          * machines is a good idea, we don't - just in case it leaves the
4868          * machine in an unusable condition.
4869          */
4870         i915_gem_sanitize(dev_priv);
4871
4872         intel_runtime_pm_put(dev_priv);
4873         return 0;
4874
4875 err_unlock:
4876         mutex_unlock(&dev->struct_mutex);
4877         intel_runtime_pm_put(dev_priv);
4878         return ret;
4879 }
4880
4881 void i915_gem_resume(struct drm_i915_private *i915)
4882 {
4883         WARN_ON(i915->gt.awake);
4884
4885         mutex_lock(&i915->drm.struct_mutex);
4886         intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
4887
4888         i915_gem_restore_gtt_mappings(i915);
4889         i915_gem_restore_fences(i915);
4890
4891         /*
4892          * As we didn't flush the kernel context before suspend, we cannot
4893          * guarantee that the context image is complete. So let's just reset
4894          * it and start again.
4895          */
4896         i915->gt.resume(i915);
4897
4898         if (i915_gem_init_hw(i915))
4899                 goto err_wedged;
4900
4901         intel_guc_resume(i915);
4902
4903         /* Always reload a context for powersaving. */
4904         if (i915_gem_switch_to_kernel_context(i915))
4905                 goto err_wedged;
4906
4907 out_unlock:
4908         intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
4909         mutex_unlock(&i915->drm.struct_mutex);
4910         return;
4911
4912 err_wedged:
4913         if (!i915_terminally_wedged(&i915->gpu_error)) {
4914                 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
4915                 i915_gem_set_wedged(i915);
4916         }
4917         goto out_unlock;
4918 }
4919
4920 void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
4921 {
4922         if (INTEL_GEN(dev_priv) < 5 ||
4923             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4924                 return;
4925
4926         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4927                                  DISP_TILE_SURFACE_SWIZZLING);
4928
4929         if (IS_GEN5(dev_priv))
4930                 return;
4931
4932         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4933         if (IS_GEN6(dev_priv))
4934                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4935         else if (IS_GEN7(dev_priv))
4936                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4937         else if (IS_GEN8(dev_priv))
4938                 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4939         else
4940                 BUG();
4941 }
4942
4943 static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
4944 {
4945         I915_WRITE(RING_CTL(base), 0);
4946         I915_WRITE(RING_HEAD(base), 0);
4947         I915_WRITE(RING_TAIL(base), 0);
4948         I915_WRITE(RING_START(base), 0);
4949 }
4950
4951 static void init_unused_rings(struct drm_i915_private *dev_priv)
4952 {
4953         if (IS_I830(dev_priv)) {
4954                 init_unused_ring(dev_priv, PRB1_BASE);
4955                 init_unused_ring(dev_priv, SRB0_BASE);
4956                 init_unused_ring(dev_priv, SRB1_BASE);
4957                 init_unused_ring(dev_priv, SRB2_BASE);
4958                 init_unused_ring(dev_priv, SRB3_BASE);
4959         } else if (IS_GEN2(dev_priv)) {
4960                 init_unused_ring(dev_priv, SRB0_BASE);
4961                 init_unused_ring(dev_priv, SRB1_BASE);
4962         } else if (IS_GEN3(dev_priv)) {
4963                 init_unused_ring(dev_priv, PRB1_BASE);
4964                 init_unused_ring(dev_priv, PRB2_BASE);
4965         }
4966 }
4967
4968 static int __i915_gem_restart_engines(void *data)
4969 {
4970         struct drm_i915_private *i915 = data;
4971         struct intel_engine_cs *engine;
4972         enum intel_engine_id id;
4973         int err;
4974
4975         for_each_engine(engine, i915, id) {
4976                 err = engine->init_hw(engine);
4977                 if (err)
4978                         return err;
4979         }
4980
4981         return 0;
4982 }
4983
4984 int i915_gem_init_hw(struct drm_i915_private *dev_priv)
4985 {
4986         int ret;
4987
4988         dev_priv->gt.last_init_time = ktime_get();
4989
4990         /* Double layer security blanket, see i915_gem_init() */
4991         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4992
4993         if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
4994                 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4995
4996         if (IS_HASWELL(dev_priv))
4997                 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
4998                            LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
4999
5000         if (HAS_PCH_NOP(dev_priv)) {
5001                 if (IS_IVYBRIDGE(dev_priv)) {
5002                         u32 temp = I915_READ(GEN7_MSG_CTL);
5003                         temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
5004                         I915_WRITE(GEN7_MSG_CTL, temp);
5005                 } else if (INTEL_GEN(dev_priv) >= 7) {
5006                         u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
5007                         temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
5008                         I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
5009                 }
5010         }
5011
5012         i915_gem_init_swizzling(dev_priv);
5013
5014         /*
5015          * At least 830 can leave some of the unused rings
5016          * "active" (ie. head != tail) after resume which
5017          * will prevent c3 entry. Makes sure all unused rings
5018          * are totally idle.
5019          */
5020         init_unused_rings(dev_priv);
5021
5022         BUG_ON(!dev_priv->kernel_context);
5023         if (i915_terminally_wedged(&dev_priv->gpu_error)) {
5024                 ret = -EIO;
5025                 goto out;
5026         }
5027
5028         ret = i915_ppgtt_init_hw(dev_priv);
5029         if (ret) {
5030                 DRM_ERROR("PPGTT enable HW failed %d\n", ret);
5031                 goto out;
5032         }
5033
5034         /* We can't enable contexts until all firmware is loaded */
5035         ret = intel_uc_init_hw(dev_priv);
5036         if (ret)
5037                 goto out;
5038
5039         intel_mocs_init_l3cc_table(dev_priv);
5040
5041         /* Only when the HW is re-initialised, can we replay the requests */
5042         ret = __i915_gem_restart_engines(dev_priv);
5043 out:
5044         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5045         return ret;
5046 }
5047
5048 static int __intel_engines_record_defaults(struct drm_i915_private *i915)
5049 {
5050         struct i915_gem_context *ctx;
5051         struct intel_engine_cs *engine;
5052         enum intel_engine_id id;
5053         int err;
5054
5055         /*
5056          * As we reset the gpu during very early sanitisation, the current
5057          * register state on the GPU should reflect its defaults values.
5058          * We load a context onto the hw (with restore-inhibit), then switch
5059          * over to a second context to save that default register state. We
5060          * can then prime every new context with that state so they all start
5061          * from the same default HW values.
5062          */
5063
5064         ctx = i915_gem_context_create_kernel(i915, 0);
5065         if (IS_ERR(ctx))
5066                 return PTR_ERR(ctx);
5067
5068         for_each_engine(engine, i915, id) {
5069                 struct drm_i915_gem_request *rq;
5070
5071                 rq = i915_gem_request_alloc(engine, ctx);
5072                 if (IS_ERR(rq)) {
5073                         err = PTR_ERR(rq);
5074                         goto out_ctx;
5075                 }
5076
5077                 err = 0;
5078                 if (engine->init_context)
5079                         err = engine->init_context(rq);
5080
5081                 __i915_add_request(rq, true);
5082                 if (err)
5083                         goto err_active;
5084         }
5085
5086         err = i915_gem_switch_to_kernel_context(i915);
5087         if (err)
5088                 goto err_active;
5089
5090         err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
5091         if (err)
5092                 goto err_active;
5093
5094         assert_kernel_context_is_current(i915);
5095
5096         for_each_engine(engine, i915, id) {
5097                 struct i915_vma *state;
5098
5099                 state = ctx->engine[id].state;
5100                 if (!state)
5101                         continue;
5102
5103                 /*
5104                  * As we will hold a reference to the logical state, it will
5105                  * not be torn down with the context, and importantly the
5106                  * object will hold onto its vma (making it possible for a
5107                  * stray GTT write to corrupt our defaults). Unmap the vma
5108                  * from the GTT to prevent such accidents and reclaim the
5109                  * space.
5110                  */
5111                 err = i915_vma_unbind(state);
5112                 if (err)
5113                         goto err_active;
5114
5115                 err = i915_gem_object_set_to_cpu_domain(state->obj, false);
5116                 if (err)
5117                         goto err_active;
5118
5119                 engine->default_state = i915_gem_object_get(state->obj);
5120         }
5121
5122         if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
5123                 unsigned int found = intel_engines_has_context_isolation(i915);
5124
5125                 /*
5126                  * Make sure that classes with multiple engine instances all
5127                  * share the same basic configuration.
5128                  */
5129                 for_each_engine(engine, i915, id) {
5130                         unsigned int bit = BIT(engine->uabi_class);
5131                         unsigned int expected = engine->default_state ? bit : 0;
5132
5133                         if ((found & bit) != expected) {
5134                                 DRM_ERROR("mismatching default context state for class %d on engine %s\n",
5135                                           engine->uabi_class, engine->name);
5136                         }
5137                 }
5138         }
5139
5140 out_ctx:
5141         i915_gem_context_set_closed(ctx);
5142         i915_gem_context_put(ctx);
5143         return err;
5144
5145 err_active:
5146         /*
5147          * If we have to abandon now, we expect the engines to be idle
5148          * and ready to be torn-down. First try to flush any remaining
5149          * request, ensure we are pointing at the kernel context and
5150          * then remove it.
5151          */
5152         if (WARN_ON(i915_gem_switch_to_kernel_context(i915)))
5153                 goto out_ctx;
5154
5155         if (WARN_ON(i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED)))
5156                 goto out_ctx;
5157
5158         i915_gem_contexts_lost(i915);
5159         goto out_ctx;
5160 }
5161
5162 int i915_gem_init(struct drm_i915_private *dev_priv)
5163 {
5164         int ret;
5165
5166         /*
5167          * We need to fallback to 4K pages since gvt gtt handling doesn't
5168          * support huge page entries - we will need to check either hypervisor
5169          * mm can support huge guest page or just do emulation in gvt.
5170          */
5171         if (intel_vgpu_active(dev_priv))
5172                 mkwrite_device_info(dev_priv)->page_sizes =
5173                         I915_GTT_PAGE_SIZE_4K;
5174
5175         dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
5176
5177         if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
5178                 dev_priv->gt.resume = intel_lr_context_resume;
5179                 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
5180         } else {
5181                 dev_priv->gt.resume = intel_legacy_submission_resume;
5182                 dev_priv->gt.cleanup_engine = intel_engine_cleanup;
5183         }
5184
5185         ret = i915_gem_init_userptr(dev_priv);
5186         if (ret)
5187                 return ret;
5188
5189         ret = intel_uc_init_wq(dev_priv);
5190         if (ret)
5191                 return ret;
5192
5193         /* This is just a security blanket to placate dragons.
5194          * On some systems, we very sporadically observe that the first TLBs
5195          * used by the CS may be stale, despite us poking the TLB reset. If
5196          * we hold the forcewake during initialisation these problems
5197          * just magically go away.
5198          */
5199         mutex_lock(&dev_priv->drm.struct_mutex);
5200         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5201
5202         ret = i915_gem_init_ggtt(dev_priv);
5203         if (ret) {
5204                 GEM_BUG_ON(ret == -EIO);
5205                 goto err_unlock;
5206         }
5207
5208         ret = i915_gem_contexts_init(dev_priv);
5209         if (ret) {
5210                 GEM_BUG_ON(ret == -EIO);
5211                 goto err_ggtt;
5212         }
5213
5214         ret = intel_engines_init(dev_priv);
5215         if (ret) {
5216                 GEM_BUG_ON(ret == -EIO);
5217                 goto err_context;
5218         }
5219
5220         intel_init_gt_powersave(dev_priv);
5221
5222         ret = intel_uc_init(dev_priv);
5223         if (ret)
5224                 goto err_pm;
5225
5226         ret = i915_gem_init_hw(dev_priv);
5227         if (ret)
5228                 goto err_uc_init;
5229
5230         /*
5231          * Despite its name intel_init_clock_gating applies both display
5232          * clock gating workarounds; GT mmio workarounds and the occasional
5233          * GT power context workaround. Worse, sometimes it includes a context
5234          * register workaround which we need to apply before we record the
5235          * default HW state for all contexts.
5236          *
5237          * FIXME: break up the workarounds and apply them at the right time!
5238          */
5239         intel_init_clock_gating(dev_priv);
5240
5241         ret = __intel_engines_record_defaults(dev_priv);
5242         if (ret)
5243                 goto err_init_hw;
5244
5245         if (i915_inject_load_failure()) {
5246                 ret = -ENODEV;
5247                 goto err_init_hw;
5248         }
5249
5250         if (i915_inject_load_failure()) {
5251                 ret = -EIO;
5252                 goto err_init_hw;
5253         }
5254
5255         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5256         mutex_unlock(&dev_priv->drm.struct_mutex);
5257
5258         return 0;
5259
5260         /*
5261          * Unwinding is complicated by that we want to handle -EIO to mean
5262          * disable GPU submission but keep KMS alive. We want to mark the
5263          * HW as irrevisibly wedged, but keep enough state around that the
5264          * driver doesn't explode during runtime.
5265          */
5266 err_init_hw:
5267         i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
5268         i915_gem_contexts_lost(dev_priv);
5269         intel_uc_fini_hw(dev_priv);
5270 err_uc_init:
5271         intel_uc_fini(dev_priv);
5272 err_pm:
5273         if (ret != -EIO) {
5274                 intel_cleanup_gt_powersave(dev_priv);
5275                 i915_gem_cleanup_engines(dev_priv);
5276         }
5277 err_context:
5278         if (ret != -EIO)
5279                 i915_gem_contexts_fini(dev_priv);
5280 err_ggtt:
5281 err_unlock:
5282         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5283         mutex_unlock(&dev_priv->drm.struct_mutex);
5284
5285         intel_uc_fini_wq(dev_priv);
5286
5287         if (ret != -EIO)
5288                 i915_gem_cleanup_userptr(dev_priv);
5289
5290         if (ret == -EIO) {
5291                 /*
5292                  * Allow engine initialisation to fail by marking the GPU as
5293                  * wedged. But we only want to do this where the GPU is angry,
5294                  * for all other failure, such as an allocation failure, bail.
5295                  */
5296                 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
5297                         DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
5298                         i915_gem_set_wedged(dev_priv);
5299                 }
5300                 ret = 0;
5301         }
5302
5303         i915_gem_drain_freed_objects(dev_priv);
5304         return ret;
5305 }
5306
5307 void i915_gem_init_mmio(struct drm_i915_private *i915)
5308 {
5309         i915_gem_sanitize(i915);
5310 }
5311
5312 void
5313 i915_gem_cleanup_engines(struct drm_i915_private *dev_priv)
5314 {
5315         struct intel_engine_cs *engine;
5316         enum intel_engine_id id;
5317
5318         for_each_engine(engine, dev_priv, id)
5319                 dev_priv->gt.cleanup_engine(engine);
5320 }
5321
5322 void
5323 i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
5324 {
5325         int i;
5326
5327         if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
5328             !IS_CHERRYVIEW(dev_priv))
5329                 dev_priv->num_fence_regs = 32;
5330         else if (INTEL_INFO(dev_priv)->gen >= 4 ||
5331                  IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
5332                  IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
5333                 dev_priv->num_fence_regs = 16;
5334         else
5335                 dev_priv->num_fence_regs = 8;
5336
5337         if (intel_vgpu_active(dev_priv))
5338                 dev_priv->num_fence_regs =
5339                                 I915_READ(vgtif_reg(avail_rs.fence_num));
5340
5341         /* Initialize fence registers to zero */
5342         for (i = 0; i < dev_priv->num_fence_regs; i++) {
5343                 struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
5344
5345                 fence->i915 = dev_priv;
5346                 fence->id = i;
5347                 list_add_tail(&fence->link, &dev_priv->mm.fence_list);
5348         }
5349         i915_gem_restore_fences(dev_priv);
5350
5351         i915_gem_detect_bit_6_swizzle(dev_priv);
5352 }
5353
5354 static void i915_gem_init__mm(struct drm_i915_private *i915)
5355 {
5356         spin_lock_init(&i915->mm.object_stat_lock);
5357         spin_lock_init(&i915->mm.obj_lock);
5358         spin_lock_init(&i915->mm.free_lock);
5359
5360         init_llist_head(&i915->mm.free_list);
5361
5362         INIT_LIST_HEAD(&i915->mm.unbound_list);
5363         INIT_LIST_HEAD(&i915->mm.bound_list);
5364         INIT_LIST_HEAD(&i915->mm.fence_list);
5365         INIT_LIST_HEAD(&i915->mm.userfault_list);
5366
5367         INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
5368 }
5369
5370 int
5371 i915_gem_load_init(struct drm_i915_private *dev_priv)
5372 {
5373         int err = -ENOMEM;
5374
5375         dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
5376         if (!dev_priv->objects)
5377                 goto err_out;
5378
5379         dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
5380         if (!dev_priv->vmas)
5381                 goto err_objects;
5382
5383         dev_priv->luts = KMEM_CACHE(i915_lut_handle, 0);
5384         if (!dev_priv->luts)
5385                 goto err_vmas;
5386
5387         dev_priv->requests = KMEM_CACHE(drm_i915_gem_request,
5388                                         SLAB_HWCACHE_ALIGN |
5389                                         SLAB_RECLAIM_ACCOUNT |
5390                                         SLAB_TYPESAFE_BY_RCU);
5391         if (!dev_priv->requests)
5392                 goto err_luts;
5393
5394         dev_priv->dependencies = KMEM_CACHE(i915_dependency,
5395                                             SLAB_HWCACHE_ALIGN |
5396                                             SLAB_RECLAIM_ACCOUNT);
5397         if (!dev_priv->dependencies)
5398                 goto err_requests;
5399
5400         dev_priv->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
5401         if (!dev_priv->priorities)
5402                 goto err_dependencies;
5403
5404         mutex_lock(&dev_priv->drm.struct_mutex);
5405         INIT_LIST_HEAD(&dev_priv->gt.timelines);
5406         err = i915_gem_timeline_init__global(dev_priv);
5407         mutex_unlock(&dev_priv->drm.struct_mutex);
5408         if (err)
5409                 goto err_priorities;
5410
5411         i915_gem_init__mm(dev_priv);
5412
5413         INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
5414                           i915_gem_retire_work_handler);
5415         INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
5416                           i915_gem_idle_work_handler);
5417         init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
5418         init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
5419
5420         atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
5421
5422         spin_lock_init(&dev_priv->fb_tracking.lock);
5423
5424         err = i915_gemfs_init(dev_priv);
5425         if (err)
5426                 DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
5427
5428         return 0;
5429
5430 err_priorities:
5431         kmem_cache_destroy(dev_priv->priorities);
5432 err_dependencies:
5433         kmem_cache_destroy(dev_priv->dependencies);
5434 err_requests:
5435         kmem_cache_destroy(dev_priv->requests);
5436 err_luts:
5437         kmem_cache_destroy(dev_priv->luts);
5438 err_vmas:
5439         kmem_cache_destroy(dev_priv->vmas);
5440 err_objects:
5441         kmem_cache_destroy(dev_priv->objects);
5442 err_out:
5443         return err;
5444 }
5445
5446 void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
5447 {
5448         i915_gem_drain_freed_objects(dev_priv);
5449         WARN_ON(!llist_empty(&dev_priv->mm.free_list));
5450         WARN_ON(dev_priv->mm.object_count);
5451
5452         mutex_lock(&dev_priv->drm.struct_mutex);
5453         i915_gem_timeline_fini(&dev_priv->gt.global_timeline);
5454         WARN_ON(!list_empty(&dev_priv->gt.timelines));
5455         mutex_unlock(&dev_priv->drm.struct_mutex);
5456
5457         kmem_cache_destroy(dev_priv->priorities);
5458         kmem_cache_destroy(dev_priv->dependencies);
5459         kmem_cache_destroy(dev_priv->requests);
5460         kmem_cache_destroy(dev_priv->luts);
5461         kmem_cache_destroy(dev_priv->vmas);
5462         kmem_cache_destroy(dev_priv->objects);
5463
5464         /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
5465         rcu_barrier();
5466
5467         i915_gemfs_fini(dev_priv);
5468 }
5469
5470 int i915_gem_freeze(struct drm_i915_private *dev_priv)
5471 {
5472         /* Discard all purgeable objects, let userspace recover those as
5473          * required after resuming.
5474          */
5475         i915_gem_shrink_all(dev_priv);
5476
5477         return 0;
5478 }
5479
5480 int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
5481 {
5482         struct drm_i915_gem_object *obj;
5483         struct list_head *phases[] = {
5484                 &dev_priv->mm.unbound_list,
5485                 &dev_priv->mm.bound_list,
5486                 NULL
5487         }, **p;
5488
5489         /* Called just before we write the hibernation image.
5490          *
5491          * We need to update the domain tracking to reflect that the CPU
5492          * will be accessing all the pages to create and restore from the
5493          * hibernation, and so upon restoration those pages will be in the
5494          * CPU domain.
5495          *
5496          * To make sure the hibernation image contains the latest state,
5497          * we update that state just before writing out the image.
5498          *
5499          * To try and reduce the hibernation image, we manually shrink
5500          * the objects as well, see i915_gem_freeze()
5501          */
5502
5503         i915_gem_shrink(dev_priv, -1UL, NULL, I915_SHRINK_UNBOUND);
5504         i915_gem_drain_freed_objects(dev_priv);
5505
5506         spin_lock(&dev_priv->mm.obj_lock);
5507         for (p = phases; *p; p++) {
5508                 list_for_each_entry(obj, *p, mm.link)
5509                         __start_cpu_write(obj);
5510         }
5511         spin_unlock(&dev_priv->mm.obj_lock);
5512
5513         return 0;
5514 }
5515
5516 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
5517 {
5518         struct drm_i915_file_private *file_priv = file->driver_priv;
5519         struct drm_i915_gem_request *request;
5520
5521         /* Clean up our request list when the client is going away, so that
5522          * later retire_requests won't dereference our soon-to-be-gone
5523          * file_priv.
5524          */
5525         spin_lock(&file_priv->mm.lock);
5526         list_for_each_entry(request, &file_priv->mm.request_list, client_link)
5527                 request->file_priv = NULL;
5528         spin_unlock(&file_priv->mm.lock);
5529 }
5530
5531 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
5532 {
5533         struct drm_i915_file_private *file_priv;
5534         int ret;
5535
5536         DRM_DEBUG("\n");
5537
5538         file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
5539         if (!file_priv)
5540                 return -ENOMEM;
5541
5542         file->driver_priv = file_priv;
5543         file_priv->dev_priv = i915;
5544         file_priv->file = file;
5545
5546         spin_lock_init(&file_priv->mm.lock);
5547         INIT_LIST_HEAD(&file_priv->mm.request_list);
5548
5549         file_priv->bsd_engine = -1;
5550
5551         ret = i915_gem_context_open(i915, file);
5552         if (ret)
5553                 kfree(file_priv);
5554
5555         return ret;
5556 }
5557
5558 /**
5559  * i915_gem_track_fb - update frontbuffer tracking
5560  * @old: current GEM buffer for the frontbuffer slots
5561  * @new: new GEM buffer for the frontbuffer slots
5562  * @frontbuffer_bits: bitmask of frontbuffer slots
5563  *
5564  * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
5565  * from @old and setting them in @new. Both @old and @new can be NULL.
5566  */
5567 void i915_gem_track_fb(struct drm_i915_gem_object *old,
5568                        struct drm_i915_gem_object *new,
5569                        unsigned frontbuffer_bits)
5570 {
5571         /* Control of individual bits within the mask are guarded by
5572          * the owning plane->mutex, i.e. we can never see concurrent
5573          * manipulation of individual bits. But since the bitfield as a whole
5574          * is updated using RMW, we need to use atomics in order to update
5575          * the bits.
5576          */
5577         BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
5578                      sizeof(atomic_t) * BITS_PER_BYTE);
5579
5580         if (old) {
5581                 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
5582                 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
5583         }
5584
5585         if (new) {
5586                 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
5587                 atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
5588         }
5589 }
5590
5591 /* Allocate a new GEM object and fill it with the supplied data */
5592 struct drm_i915_gem_object *
5593 i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
5594                                  const void *data, size_t size)
5595 {
5596         struct drm_i915_gem_object *obj;
5597         struct file *file;
5598         size_t offset;
5599         int err;
5600
5601         obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE));
5602         if (IS_ERR(obj))
5603                 return obj;
5604
5605         GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU);
5606
5607         file = obj->base.filp;
5608         offset = 0;
5609         do {
5610                 unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
5611                 struct page *page;
5612                 void *pgdata, *vaddr;
5613
5614                 err = pagecache_write_begin(file, file->f_mapping,
5615                                             offset, len, 0,
5616                                             &page, &pgdata);
5617                 if (err < 0)
5618                         goto fail;
5619
5620                 vaddr = kmap(page);
5621                 memcpy(vaddr, data, len);
5622                 kunmap(page);
5623
5624                 err = pagecache_write_end(file, file->f_mapping,
5625                                           offset, len, len,
5626                                           page, pgdata);
5627                 if (err < 0)
5628                         goto fail;
5629
5630                 size -= len;
5631                 data += len;
5632                 offset += len;
5633         } while (size);
5634
5635         return obj;
5636
5637 fail:
5638         i915_gem_object_put(obj);
5639         return ERR_PTR(err);
5640 }
5641
5642 struct scatterlist *
5643 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
5644                        unsigned int n,
5645                        unsigned int *offset)
5646 {
5647         struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
5648         struct scatterlist *sg;
5649         unsigned int idx, count;
5650
5651         might_sleep();
5652         GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
5653         GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
5654
5655         /* As we iterate forward through the sg, we record each entry in a
5656          * radixtree for quick repeated (backwards) lookups. If we have seen
5657          * this index previously, we will have an entry for it.
5658          *
5659          * Initial lookup is O(N), but this is amortized to O(1) for
5660          * sequential page access (where each new request is consecutive
5661          * to the previous one). Repeated lookups are O(lg(obj->base.size)),
5662          * i.e. O(1) with a large constant!
5663          */
5664         if (n < READ_ONCE(iter->sg_idx))
5665                 goto lookup;
5666
5667         mutex_lock(&iter->lock);
5668
5669         /* We prefer to reuse the last sg so that repeated lookup of this
5670          * (or the subsequent) sg are fast - comparing against the last
5671          * sg is faster than going through the radixtree.
5672          */
5673
5674         sg = iter->sg_pos;
5675         idx = iter->sg_idx;
5676         count = __sg_page_count(sg);
5677
5678         while (idx + count <= n) {
5679                 unsigned long exception, i;
5680                 int ret;
5681
5682                 /* If we cannot allocate and insert this entry, or the
5683                  * individual pages from this range, cancel updating the
5684                  * sg_idx so that on this lookup we are forced to linearly
5685                  * scan onwards, but on future lookups we will try the
5686                  * insertion again (in which case we need to be careful of
5687                  * the error return reporting that we have already inserted
5688                  * this index).
5689                  */
5690                 ret = radix_tree_insert(&iter->radix, idx, sg);
5691                 if (ret && ret != -EEXIST)
5692                         goto scan;
5693
5694                 exception =
5695                         RADIX_TREE_EXCEPTIONAL_ENTRY |
5696                         idx << RADIX_TREE_EXCEPTIONAL_SHIFT;
5697                 for (i = 1; i < count; i++) {
5698                         ret = radix_tree_insert(&iter->radix, idx + i,
5699                                                 (void *)exception);
5700                         if (ret && ret != -EEXIST)
5701                                 goto scan;
5702                 }
5703
5704                 idx += count;
5705                 sg = ____sg_next(sg);
5706                 count = __sg_page_count(sg);
5707         }
5708
5709 scan:
5710         iter->sg_pos = sg;
5711         iter->sg_idx = idx;
5712
5713         mutex_unlock(&iter->lock);
5714
5715         if (unlikely(n < idx)) /* insertion completed by another thread */
5716                 goto lookup;
5717
5718         /* In case we failed to insert the entry into the radixtree, we need
5719          * to look beyond the current sg.
5720          */
5721         while (idx + count <= n) {
5722                 idx += count;
5723                 sg = ____sg_next(sg);
5724                 count = __sg_page_count(sg);
5725         }
5726
5727         *offset = n - idx;
5728         return sg;
5729
5730 lookup:
5731         rcu_read_lock();
5732
5733         sg = radix_tree_lookup(&iter->radix, n);
5734         GEM_BUG_ON(!sg);
5735
5736         /* If this index is in the middle of multi-page sg entry,
5737          * the radixtree will contain an exceptional entry that points
5738          * to the start of that range. We will return the pointer to
5739          * the base page and the offset of this page within the
5740          * sg entry's range.
5741          */
5742         *offset = 0;
5743         if (unlikely(radix_tree_exception(sg))) {
5744                 unsigned long base =
5745                         (unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT;
5746
5747                 sg = radix_tree_lookup(&iter->radix, base);
5748                 GEM_BUG_ON(!sg);
5749
5750                 *offset = n - base;
5751         }
5752
5753         rcu_read_unlock();
5754
5755         return sg;
5756 }
5757
5758 struct page *
5759 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
5760 {
5761         struct scatterlist *sg;
5762         unsigned int offset;
5763
5764         GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
5765
5766         sg = i915_gem_object_get_sg(obj, n, &offset);
5767         return nth_page(sg_page(sg), offset);
5768 }
5769
5770 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
5771 struct page *
5772 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
5773                                unsigned int n)
5774 {
5775         struct page *page;
5776
5777         page = i915_gem_object_get_page(obj, n);
5778         if (!obj->mm.dirty)
5779                 set_page_dirty(page);
5780
5781         return page;
5782 }
5783
5784 dma_addr_t
5785 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
5786                                 unsigned long n)
5787 {
5788         struct scatterlist *sg;
5789         unsigned int offset;
5790
5791         sg = i915_gem_object_get_sg(obj, n, &offset);
5792         return sg_dma_address(sg) + (offset << PAGE_SHIFT);
5793 }
5794
5795 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
5796 {
5797         struct sg_table *pages;
5798         int err;
5799
5800         if (align > obj->base.size)
5801                 return -EINVAL;
5802
5803         if (obj->ops == &i915_gem_phys_ops)
5804                 return 0;
5805
5806         if (obj->ops != &i915_gem_object_ops)
5807                 return -EINVAL;
5808
5809         err = i915_gem_object_unbind(obj);
5810         if (err)
5811                 return err;
5812
5813         mutex_lock(&obj->mm.lock);
5814
5815         if (obj->mm.madv != I915_MADV_WILLNEED) {
5816                 err = -EFAULT;
5817                 goto err_unlock;
5818         }
5819
5820         if (obj->mm.quirked) {
5821                 err = -EFAULT;
5822                 goto err_unlock;
5823         }
5824
5825         if (obj->mm.mapping) {
5826                 err = -EBUSY;
5827                 goto err_unlock;
5828         }
5829
5830         pages = fetch_and_zero(&obj->mm.pages);
5831         if (pages) {
5832                 struct drm_i915_private *i915 = to_i915(obj->base.dev);
5833
5834                 __i915_gem_object_reset_page_iter(obj);
5835
5836                 spin_lock(&i915->mm.obj_lock);
5837                 list_del(&obj->mm.link);
5838                 spin_unlock(&i915->mm.obj_lock);
5839         }
5840
5841         obj->ops = &i915_gem_phys_ops;
5842
5843         err = ____i915_gem_object_get_pages(obj);
5844         if (err)
5845                 goto err_xfer;
5846
5847         /* Perma-pin (until release) the physical set of pages */
5848         __i915_gem_object_pin_pages(obj);
5849
5850         if (!IS_ERR_OR_NULL(pages))
5851                 i915_gem_object_ops.put_pages(obj, pages);
5852         mutex_unlock(&obj->mm.lock);
5853         return 0;
5854
5855 err_xfer:
5856         obj->ops = &i915_gem_object_ops;
5857         obj->mm.pages = pages;
5858 err_unlock:
5859         mutex_unlock(&obj->mm.lock);
5860         return err;
5861 }
5862
5863 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
5864 #include "selftests/scatterlist.c"
5865 #include "selftests/mock_gem_device.c"
5866 #include "selftests/huge_gem_object.c"
5867 #include "selftests/huge_pages.c"
5868 #include "selftests/i915_gem_object.c"
5869 #include "selftests/i915_gem_coherency.c"
5870 #endif