Merge tag 'drm-fixes-for-v4.16-rc5' of git://people.freedesktop.org/~airlied/linux
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / i915_gem.c
1 /*
2  * Copyright © 2008-2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Eric Anholt <eric@anholt.net>
25  *
26  */
27
28 #include <drm/drmP.h>
29 #include <drm/drm_vma_manager.h>
30 #include <drm/i915_drm.h>
31 #include "i915_drv.h"
32 #include "i915_gem_clflush.h"
33 #include "i915_vgpu.h"
34 #include "i915_trace.h"
35 #include "intel_drv.h"
36 #include "intel_frontbuffer.h"
37 #include "intel_mocs.h"
38 #include "i915_gemfs.h"
39 #include <linux/dma-fence-array.h>
40 #include <linux/kthread.h>
41 #include <linux/reservation.h>
42 #include <linux/shmem_fs.h>
43 #include <linux/slab.h>
44 #include <linux/stop_machine.h>
45 #include <linux/swap.h>
46 #include <linux/pci.h>
47 #include <linux/dma-buf.h>
48
49 static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
50
51 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
52 {
53         if (obj->cache_dirty)
54                 return false;
55
56         if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
57                 return true;
58
59         return obj->pin_global; /* currently in use by HW, keep flushed */
60 }
61
62 static int
63 insert_mappable_node(struct i915_ggtt *ggtt,
64                      struct drm_mm_node *node, u32 size)
65 {
66         memset(node, 0, sizeof(*node));
67         return drm_mm_insert_node_in_range(&ggtt->base.mm, node,
68                                            size, 0, I915_COLOR_UNEVICTABLE,
69                                            0, ggtt->mappable_end,
70                                            DRM_MM_INSERT_LOW);
71 }
72
73 static void
74 remove_mappable_node(struct drm_mm_node *node)
75 {
76         drm_mm_remove_node(node);
77 }
78
79 /* some bookkeeping */
80 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
81                                   u64 size)
82 {
83         spin_lock(&dev_priv->mm.object_stat_lock);
84         dev_priv->mm.object_count++;
85         dev_priv->mm.object_memory += size;
86         spin_unlock(&dev_priv->mm.object_stat_lock);
87 }
88
89 static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
90                                      u64 size)
91 {
92         spin_lock(&dev_priv->mm.object_stat_lock);
93         dev_priv->mm.object_count--;
94         dev_priv->mm.object_memory -= size;
95         spin_unlock(&dev_priv->mm.object_stat_lock);
96 }
97
98 static int
99 i915_gem_wait_for_error(struct i915_gpu_error *error)
100 {
101         int ret;
102
103         might_sleep();
104
105         /*
106          * Only wait 10 seconds for the gpu reset to complete to avoid hanging
107          * userspace. If it takes that long something really bad is going on and
108          * we should simply try to bail out and fail as gracefully as possible.
109          */
110         ret = wait_event_interruptible_timeout(error->reset_queue,
111                                                !i915_reset_backoff(error),
112                                                I915_RESET_TIMEOUT);
113         if (ret == 0) {
114                 DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
115                 return -EIO;
116         } else if (ret < 0) {
117                 return ret;
118         } else {
119                 return 0;
120         }
121 }
122
123 int i915_mutex_lock_interruptible(struct drm_device *dev)
124 {
125         struct drm_i915_private *dev_priv = to_i915(dev);
126         int ret;
127
128         ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
129         if (ret)
130                 return ret;
131
132         ret = mutex_lock_interruptible(&dev->struct_mutex);
133         if (ret)
134                 return ret;
135
136         return 0;
137 }
138
139 int
140 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
141                             struct drm_file *file)
142 {
143         struct drm_i915_private *dev_priv = to_i915(dev);
144         struct i915_ggtt *ggtt = &dev_priv->ggtt;
145         struct drm_i915_gem_get_aperture *args = data;
146         struct i915_vma *vma;
147         u64 pinned;
148
149         pinned = ggtt->base.reserved;
150         mutex_lock(&dev->struct_mutex);
151         list_for_each_entry(vma, &ggtt->base.active_list, vm_link)
152                 if (i915_vma_is_pinned(vma))
153                         pinned += vma->node.size;
154         list_for_each_entry(vma, &ggtt->base.inactive_list, vm_link)
155                 if (i915_vma_is_pinned(vma))
156                         pinned += vma->node.size;
157         mutex_unlock(&dev->struct_mutex);
158
159         args->aper_size = ggtt->base.total;
160         args->aper_available_size = args->aper_size - pinned;
161
162         return 0;
163 }
164
165 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
166 {
167         struct address_space *mapping = obj->base.filp->f_mapping;
168         drm_dma_handle_t *phys;
169         struct sg_table *st;
170         struct scatterlist *sg;
171         char *vaddr;
172         int i;
173         int err;
174
175         if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
176                 return -EINVAL;
177
178         /* Always aligning to the object size, allows a single allocation
179          * to handle all possible callers, and given typical object sizes,
180          * the alignment of the buddy allocation will naturally match.
181          */
182         phys = drm_pci_alloc(obj->base.dev,
183                              roundup_pow_of_two(obj->base.size),
184                              roundup_pow_of_two(obj->base.size));
185         if (!phys)
186                 return -ENOMEM;
187
188         vaddr = phys->vaddr;
189         for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
190                 struct page *page;
191                 char *src;
192
193                 page = shmem_read_mapping_page(mapping, i);
194                 if (IS_ERR(page)) {
195                         err = PTR_ERR(page);
196                         goto err_phys;
197                 }
198
199                 src = kmap_atomic(page);
200                 memcpy(vaddr, src, PAGE_SIZE);
201                 drm_clflush_virt_range(vaddr, PAGE_SIZE);
202                 kunmap_atomic(src);
203
204                 put_page(page);
205                 vaddr += PAGE_SIZE;
206         }
207
208         i915_gem_chipset_flush(to_i915(obj->base.dev));
209
210         st = kmalloc(sizeof(*st), GFP_KERNEL);
211         if (!st) {
212                 err = -ENOMEM;
213                 goto err_phys;
214         }
215
216         if (sg_alloc_table(st, 1, GFP_KERNEL)) {
217                 kfree(st);
218                 err = -ENOMEM;
219                 goto err_phys;
220         }
221
222         sg = st->sgl;
223         sg->offset = 0;
224         sg->length = obj->base.size;
225
226         sg_dma_address(sg) = phys->busaddr;
227         sg_dma_len(sg) = obj->base.size;
228
229         obj->phys_handle = phys;
230
231         __i915_gem_object_set_pages(obj, st, sg->length);
232
233         return 0;
234
235 err_phys:
236         drm_pci_free(obj->base.dev, phys);
237
238         return err;
239 }
240
241 static void __start_cpu_write(struct drm_i915_gem_object *obj)
242 {
243         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
244         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
245         if (cpu_write_needs_clflush(obj))
246                 obj->cache_dirty = true;
247 }
248
249 static void
250 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
251                                 struct sg_table *pages,
252                                 bool needs_clflush)
253 {
254         GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
255
256         if (obj->mm.madv == I915_MADV_DONTNEED)
257                 obj->mm.dirty = false;
258
259         if (needs_clflush &&
260             (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
261             !(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
262                 drm_clflush_sg(pages);
263
264         __start_cpu_write(obj);
265 }
266
267 static void
268 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
269                                struct sg_table *pages)
270 {
271         __i915_gem_object_release_shmem(obj, pages, false);
272
273         if (obj->mm.dirty) {
274                 struct address_space *mapping = obj->base.filp->f_mapping;
275                 char *vaddr = obj->phys_handle->vaddr;
276                 int i;
277
278                 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
279                         struct page *page;
280                         char *dst;
281
282                         page = shmem_read_mapping_page(mapping, i);
283                         if (IS_ERR(page))
284                                 continue;
285
286                         dst = kmap_atomic(page);
287                         drm_clflush_virt_range(vaddr, PAGE_SIZE);
288                         memcpy(dst, vaddr, PAGE_SIZE);
289                         kunmap_atomic(dst);
290
291                         set_page_dirty(page);
292                         if (obj->mm.madv == I915_MADV_WILLNEED)
293                                 mark_page_accessed(page);
294                         put_page(page);
295                         vaddr += PAGE_SIZE;
296                 }
297                 obj->mm.dirty = false;
298         }
299
300         sg_free_table(pages);
301         kfree(pages);
302
303         drm_pci_free(obj->base.dev, obj->phys_handle);
304 }
305
306 static void
307 i915_gem_object_release_phys(struct drm_i915_gem_object *obj)
308 {
309         i915_gem_object_unpin_pages(obj);
310 }
311
312 static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
313         .get_pages = i915_gem_object_get_pages_phys,
314         .put_pages = i915_gem_object_put_pages_phys,
315         .release = i915_gem_object_release_phys,
316 };
317
318 static const struct drm_i915_gem_object_ops i915_gem_object_ops;
319
320 int i915_gem_object_unbind(struct drm_i915_gem_object *obj)
321 {
322         struct i915_vma *vma;
323         LIST_HEAD(still_in_list);
324         int ret;
325
326         lockdep_assert_held(&obj->base.dev->struct_mutex);
327
328         /* Closed vma are removed from the obj->vma_list - but they may
329          * still have an active binding on the object. To remove those we
330          * must wait for all rendering to complete to the object (as unbinding
331          * must anyway), and retire the requests.
332          */
333         ret = i915_gem_object_set_to_cpu_domain(obj, false);
334         if (ret)
335                 return ret;
336
337         while ((vma = list_first_entry_or_null(&obj->vma_list,
338                                                struct i915_vma,
339                                                obj_link))) {
340                 list_move_tail(&vma->obj_link, &still_in_list);
341                 ret = i915_vma_unbind(vma);
342                 if (ret)
343                         break;
344         }
345         list_splice(&still_in_list, &obj->vma_list);
346
347         return ret;
348 }
349
350 static long
351 i915_gem_object_wait_fence(struct dma_fence *fence,
352                            unsigned int flags,
353                            long timeout,
354                            struct intel_rps_client *rps_client)
355 {
356         struct drm_i915_gem_request *rq;
357
358         BUILD_BUG_ON(I915_WAIT_INTERRUPTIBLE != 0x1);
359
360         if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
361                 return timeout;
362
363         if (!dma_fence_is_i915(fence))
364                 return dma_fence_wait_timeout(fence,
365                                               flags & I915_WAIT_INTERRUPTIBLE,
366                                               timeout);
367
368         rq = to_request(fence);
369         if (i915_gem_request_completed(rq))
370                 goto out;
371
372         /* This client is about to stall waiting for the GPU. In many cases
373          * this is undesirable and limits the throughput of the system, as
374          * many clients cannot continue processing user input/output whilst
375          * blocked. RPS autotuning may take tens of milliseconds to respond
376          * to the GPU load and thus incurs additional latency for the client.
377          * We can circumvent that by promoting the GPU frequency to maximum
378          * before we wait. This makes the GPU throttle up much more quickly
379          * (good for benchmarks and user experience, e.g. window animations),
380          * but at a cost of spending more power processing the workload
381          * (bad for battery). Not all clients even want their results
382          * immediately and for them we should just let the GPU select its own
383          * frequency to maximise efficiency. To prevent a single client from
384          * forcing the clocks too high for the whole system, we only allow
385          * each client to waitboost once in a busy period.
386          */
387         if (rps_client) {
388                 if (INTEL_GEN(rq->i915) >= 6)
389                         gen6_rps_boost(rq, rps_client);
390                 else
391                         rps_client = NULL;
392         }
393
394         timeout = i915_wait_request(rq, flags, timeout);
395
396 out:
397         if (flags & I915_WAIT_LOCKED && i915_gem_request_completed(rq))
398                 i915_gem_request_retire_upto(rq);
399
400         return timeout;
401 }
402
403 static long
404 i915_gem_object_wait_reservation(struct reservation_object *resv,
405                                  unsigned int flags,
406                                  long timeout,
407                                  struct intel_rps_client *rps_client)
408 {
409         unsigned int seq = __read_seqcount_begin(&resv->seq);
410         struct dma_fence *excl;
411         bool prune_fences = false;
412
413         if (flags & I915_WAIT_ALL) {
414                 struct dma_fence **shared;
415                 unsigned int count, i;
416                 int ret;
417
418                 ret = reservation_object_get_fences_rcu(resv,
419                                                         &excl, &count, &shared);
420                 if (ret)
421                         return ret;
422
423                 for (i = 0; i < count; i++) {
424                         timeout = i915_gem_object_wait_fence(shared[i],
425                                                              flags, timeout,
426                                                              rps_client);
427                         if (timeout < 0)
428                                 break;
429
430                         dma_fence_put(shared[i]);
431                 }
432
433                 for (; i < count; i++)
434                         dma_fence_put(shared[i]);
435                 kfree(shared);
436
437                 prune_fences = count && timeout >= 0;
438         } else {
439                 excl = reservation_object_get_excl_rcu(resv);
440         }
441
442         if (excl && timeout >= 0) {
443                 timeout = i915_gem_object_wait_fence(excl, flags, timeout,
444                                                      rps_client);
445                 prune_fences = timeout >= 0;
446         }
447
448         dma_fence_put(excl);
449
450         /* Oportunistically prune the fences iff we know they have *all* been
451          * signaled and that the reservation object has not been changed (i.e.
452          * no new fences have been added).
453          */
454         if (prune_fences && !__read_seqcount_retry(&resv->seq, seq)) {
455                 if (reservation_object_trylock(resv)) {
456                         if (!__read_seqcount_retry(&resv->seq, seq))
457                                 reservation_object_add_excl_fence(resv, NULL);
458                         reservation_object_unlock(resv);
459                 }
460         }
461
462         return timeout;
463 }
464
465 static void __fence_set_priority(struct dma_fence *fence, int prio)
466 {
467         struct drm_i915_gem_request *rq;
468         struct intel_engine_cs *engine;
469
470         if (dma_fence_is_signaled(fence) || !dma_fence_is_i915(fence))
471                 return;
472
473         rq = to_request(fence);
474         engine = rq->engine;
475         if (!engine->schedule)
476                 return;
477
478         engine->schedule(rq, prio);
479 }
480
481 static void fence_set_priority(struct dma_fence *fence, int prio)
482 {
483         /* Recurse once into a fence-array */
484         if (dma_fence_is_array(fence)) {
485                 struct dma_fence_array *array = to_dma_fence_array(fence);
486                 int i;
487
488                 for (i = 0; i < array->num_fences; i++)
489                         __fence_set_priority(array->fences[i], prio);
490         } else {
491                 __fence_set_priority(fence, prio);
492         }
493 }
494
495 int
496 i915_gem_object_wait_priority(struct drm_i915_gem_object *obj,
497                               unsigned int flags,
498                               int prio)
499 {
500         struct dma_fence *excl;
501
502         if (flags & I915_WAIT_ALL) {
503                 struct dma_fence **shared;
504                 unsigned int count, i;
505                 int ret;
506
507                 ret = reservation_object_get_fences_rcu(obj->resv,
508                                                         &excl, &count, &shared);
509                 if (ret)
510                         return ret;
511
512                 for (i = 0; i < count; i++) {
513                         fence_set_priority(shared[i], prio);
514                         dma_fence_put(shared[i]);
515                 }
516
517                 kfree(shared);
518         } else {
519                 excl = reservation_object_get_excl_rcu(obj->resv);
520         }
521
522         if (excl) {
523                 fence_set_priority(excl, prio);
524                 dma_fence_put(excl);
525         }
526         return 0;
527 }
528
529 /**
530  * Waits for rendering to the object to be completed
531  * @obj: i915 gem object
532  * @flags: how to wait (under a lock, for all rendering or just for writes etc)
533  * @timeout: how long to wait
534  * @rps_client: client (user process) to charge for any waitboosting
535  */
536 int
537 i915_gem_object_wait(struct drm_i915_gem_object *obj,
538                      unsigned int flags,
539                      long timeout,
540                      struct intel_rps_client *rps_client)
541 {
542         might_sleep();
543 #if IS_ENABLED(CONFIG_LOCKDEP)
544         GEM_BUG_ON(debug_locks &&
545                    !!lockdep_is_held(&obj->base.dev->struct_mutex) !=
546                    !!(flags & I915_WAIT_LOCKED));
547 #endif
548         GEM_BUG_ON(timeout < 0);
549
550         timeout = i915_gem_object_wait_reservation(obj->resv,
551                                                    flags, timeout,
552                                                    rps_client);
553         return timeout < 0 ? timeout : 0;
554 }
555
556 static struct intel_rps_client *to_rps_client(struct drm_file *file)
557 {
558         struct drm_i915_file_private *fpriv = file->driver_priv;
559
560         return &fpriv->rps_client;
561 }
562
563 static int
564 i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
565                      struct drm_i915_gem_pwrite *args,
566                      struct drm_file *file)
567 {
568         void *vaddr = obj->phys_handle->vaddr + args->offset;
569         char __user *user_data = u64_to_user_ptr(args->data_ptr);
570
571         /* We manually control the domain here and pretend that it
572          * remains coherent i.e. in the GTT domain, like shmem_pwrite.
573          */
574         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
575         if (copy_from_user(vaddr, user_data, args->size))
576                 return -EFAULT;
577
578         drm_clflush_virt_range(vaddr, args->size);
579         i915_gem_chipset_flush(to_i915(obj->base.dev));
580
581         intel_fb_obj_flush(obj, ORIGIN_CPU);
582         return 0;
583 }
584
585 void *i915_gem_object_alloc(struct drm_i915_private *dev_priv)
586 {
587         return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
588 }
589
590 void i915_gem_object_free(struct drm_i915_gem_object *obj)
591 {
592         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
593         kmem_cache_free(dev_priv->objects, obj);
594 }
595
596 static int
597 i915_gem_create(struct drm_file *file,
598                 struct drm_i915_private *dev_priv,
599                 uint64_t size,
600                 uint32_t *handle_p)
601 {
602         struct drm_i915_gem_object *obj;
603         int ret;
604         u32 handle;
605
606         size = roundup(size, PAGE_SIZE);
607         if (size == 0)
608                 return -EINVAL;
609
610         /* Allocate the new object */
611         obj = i915_gem_object_create(dev_priv, size);
612         if (IS_ERR(obj))
613                 return PTR_ERR(obj);
614
615         ret = drm_gem_handle_create(file, &obj->base, &handle);
616         /* drop reference from allocate - handle holds it now */
617         i915_gem_object_put(obj);
618         if (ret)
619                 return ret;
620
621         *handle_p = handle;
622         return 0;
623 }
624
625 int
626 i915_gem_dumb_create(struct drm_file *file,
627                      struct drm_device *dev,
628                      struct drm_mode_create_dumb *args)
629 {
630         /* have to work out size/pitch and return them */
631         args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
632         args->size = args->pitch * args->height;
633         return i915_gem_create(file, to_i915(dev),
634                                args->size, &args->handle);
635 }
636
637 static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
638 {
639         return !(obj->cache_level == I915_CACHE_NONE ||
640                  obj->cache_level == I915_CACHE_WT);
641 }
642
643 /**
644  * Creates a new mm object and returns a handle to it.
645  * @dev: drm device pointer
646  * @data: ioctl data blob
647  * @file: drm file pointer
648  */
649 int
650 i915_gem_create_ioctl(struct drm_device *dev, void *data,
651                       struct drm_file *file)
652 {
653         struct drm_i915_private *dev_priv = to_i915(dev);
654         struct drm_i915_gem_create *args = data;
655
656         i915_gem_flush_free_objects(dev_priv);
657
658         return i915_gem_create(file, dev_priv,
659                                args->size, &args->handle);
660 }
661
662 static inline enum fb_op_origin
663 fb_write_origin(struct drm_i915_gem_object *obj, unsigned int domain)
664 {
665         return (domain == I915_GEM_DOMAIN_GTT ?
666                 obj->frontbuffer_ggtt_origin : ORIGIN_CPU);
667 }
668
669 void i915_gem_flush_ggtt_writes(struct drm_i915_private *dev_priv)
670 {
671         /*
672          * No actual flushing is required for the GTT write domain for reads
673          * from the GTT domain. Writes to it "immediately" go to main memory
674          * as far as we know, so there's no chipset flush. It also doesn't
675          * land in the GPU render cache.
676          *
677          * However, we do have to enforce the order so that all writes through
678          * the GTT land before any writes to the device, such as updates to
679          * the GATT itself.
680          *
681          * We also have to wait a bit for the writes to land from the GTT.
682          * An uncached read (i.e. mmio) seems to be ideal for the round-trip
683          * timing. This issue has only been observed when switching quickly
684          * between GTT writes and CPU reads from inside the kernel on recent hw,
685          * and it appears to only affect discrete GTT blocks (i.e. on LLC
686          * system agents we cannot reproduce this behaviour, until Cannonlake
687          * that was!).
688          */
689
690         wmb();
691
692         intel_runtime_pm_get(dev_priv);
693         spin_lock_irq(&dev_priv->uncore.lock);
694
695         POSTING_READ_FW(RING_HEAD(RENDER_RING_BASE));
696
697         spin_unlock_irq(&dev_priv->uncore.lock);
698         intel_runtime_pm_put(dev_priv);
699 }
700
701 static void
702 flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
703 {
704         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
705         struct i915_vma *vma;
706
707         if (!(obj->base.write_domain & flush_domains))
708                 return;
709
710         switch (obj->base.write_domain) {
711         case I915_GEM_DOMAIN_GTT:
712                 i915_gem_flush_ggtt_writes(dev_priv);
713
714                 intel_fb_obj_flush(obj,
715                                    fb_write_origin(obj, I915_GEM_DOMAIN_GTT));
716
717                 for_each_ggtt_vma(vma, obj) {
718                         if (vma->iomap)
719                                 continue;
720
721                         i915_vma_unset_ggtt_write(vma);
722                 }
723                 break;
724
725         case I915_GEM_DOMAIN_CPU:
726                 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
727                 break;
728
729         case I915_GEM_DOMAIN_RENDER:
730                 if (gpu_write_needs_clflush(obj))
731                         obj->cache_dirty = true;
732                 break;
733         }
734
735         obj->base.write_domain = 0;
736 }
737
738 static inline int
739 __copy_to_user_swizzled(char __user *cpu_vaddr,
740                         const char *gpu_vaddr, int gpu_offset,
741                         int length)
742 {
743         int ret, cpu_offset = 0;
744
745         while (length > 0) {
746                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
747                 int this_length = min(cacheline_end - gpu_offset, length);
748                 int swizzled_gpu_offset = gpu_offset ^ 64;
749
750                 ret = __copy_to_user(cpu_vaddr + cpu_offset,
751                                      gpu_vaddr + swizzled_gpu_offset,
752                                      this_length);
753                 if (ret)
754                         return ret + length;
755
756                 cpu_offset += this_length;
757                 gpu_offset += this_length;
758                 length -= this_length;
759         }
760
761         return 0;
762 }
763
764 static inline int
765 __copy_from_user_swizzled(char *gpu_vaddr, int gpu_offset,
766                           const char __user *cpu_vaddr,
767                           int length)
768 {
769         int ret, cpu_offset = 0;
770
771         while (length > 0) {
772                 int cacheline_end = ALIGN(gpu_offset + 1, 64);
773                 int this_length = min(cacheline_end - gpu_offset, length);
774                 int swizzled_gpu_offset = gpu_offset ^ 64;
775
776                 ret = __copy_from_user(gpu_vaddr + swizzled_gpu_offset,
777                                        cpu_vaddr + cpu_offset,
778                                        this_length);
779                 if (ret)
780                         return ret + length;
781
782                 cpu_offset += this_length;
783                 gpu_offset += this_length;
784                 length -= this_length;
785         }
786
787         return 0;
788 }
789
790 /*
791  * Pins the specified object's pages and synchronizes the object with
792  * GPU accesses. Sets needs_clflush to non-zero if the caller should
793  * flush the object from the CPU cache.
794  */
795 int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
796                                     unsigned int *needs_clflush)
797 {
798         int ret;
799
800         lockdep_assert_held(&obj->base.dev->struct_mutex);
801
802         *needs_clflush = 0;
803         if (!i915_gem_object_has_struct_page(obj))
804                 return -ENODEV;
805
806         ret = i915_gem_object_wait(obj,
807                                    I915_WAIT_INTERRUPTIBLE |
808                                    I915_WAIT_LOCKED,
809                                    MAX_SCHEDULE_TIMEOUT,
810                                    NULL);
811         if (ret)
812                 return ret;
813
814         ret = i915_gem_object_pin_pages(obj);
815         if (ret)
816                 return ret;
817
818         if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ ||
819             !static_cpu_has(X86_FEATURE_CLFLUSH)) {
820                 ret = i915_gem_object_set_to_cpu_domain(obj, false);
821                 if (ret)
822                         goto err_unpin;
823                 else
824                         goto out;
825         }
826
827         flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
828
829         /* If we're not in the cpu read domain, set ourself into the gtt
830          * read domain and manually flush cachelines (if required). This
831          * optimizes for the case when the gpu will dirty the data
832          * anyway again before the next pread happens.
833          */
834         if (!obj->cache_dirty &&
835             !(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
836                 *needs_clflush = CLFLUSH_BEFORE;
837
838 out:
839         /* return with the pages pinned */
840         return 0;
841
842 err_unpin:
843         i915_gem_object_unpin_pages(obj);
844         return ret;
845 }
846
847 int i915_gem_obj_prepare_shmem_write(struct drm_i915_gem_object *obj,
848                                      unsigned int *needs_clflush)
849 {
850         int ret;
851
852         lockdep_assert_held(&obj->base.dev->struct_mutex);
853
854         *needs_clflush = 0;
855         if (!i915_gem_object_has_struct_page(obj))
856                 return -ENODEV;
857
858         ret = i915_gem_object_wait(obj,
859                                    I915_WAIT_INTERRUPTIBLE |
860                                    I915_WAIT_LOCKED |
861                                    I915_WAIT_ALL,
862                                    MAX_SCHEDULE_TIMEOUT,
863                                    NULL);
864         if (ret)
865                 return ret;
866
867         ret = i915_gem_object_pin_pages(obj);
868         if (ret)
869                 return ret;
870
871         if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE ||
872             !static_cpu_has(X86_FEATURE_CLFLUSH)) {
873                 ret = i915_gem_object_set_to_cpu_domain(obj, true);
874                 if (ret)
875                         goto err_unpin;
876                 else
877                         goto out;
878         }
879
880         flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
881
882         /* If we're not in the cpu write domain, set ourself into the
883          * gtt write domain and manually flush cachelines (as required).
884          * This optimizes for the case when the gpu will use the data
885          * right away and we therefore have to clflush anyway.
886          */
887         if (!obj->cache_dirty) {
888                 *needs_clflush |= CLFLUSH_AFTER;
889
890                 /*
891                  * Same trick applies to invalidate partially written
892                  * cachelines read before writing.
893                  */
894                 if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU))
895                         *needs_clflush |= CLFLUSH_BEFORE;
896         }
897
898 out:
899         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
900         obj->mm.dirty = true;
901         /* return with the pages pinned */
902         return 0;
903
904 err_unpin:
905         i915_gem_object_unpin_pages(obj);
906         return ret;
907 }
908
909 static void
910 shmem_clflush_swizzled_range(char *addr, unsigned long length,
911                              bool swizzled)
912 {
913         if (unlikely(swizzled)) {
914                 unsigned long start = (unsigned long) addr;
915                 unsigned long end = (unsigned long) addr + length;
916
917                 /* For swizzling simply ensure that we always flush both
918                  * channels. Lame, but simple and it works. Swizzled
919                  * pwrite/pread is far from a hotpath - current userspace
920                  * doesn't use it at all. */
921                 start = round_down(start, 128);
922                 end = round_up(end, 128);
923
924                 drm_clflush_virt_range((void *)start, end - start);
925         } else {
926                 drm_clflush_virt_range(addr, length);
927         }
928
929 }
930
931 /* Only difference to the fast-path function is that this can handle bit17
932  * and uses non-atomic copy and kmap functions. */
933 static int
934 shmem_pread_slow(struct page *page, int offset, int length,
935                  char __user *user_data,
936                  bool page_do_bit17_swizzling, bool needs_clflush)
937 {
938         char *vaddr;
939         int ret;
940
941         vaddr = kmap(page);
942         if (needs_clflush)
943                 shmem_clflush_swizzled_range(vaddr + offset, length,
944                                              page_do_bit17_swizzling);
945
946         if (page_do_bit17_swizzling)
947                 ret = __copy_to_user_swizzled(user_data, vaddr, offset, length);
948         else
949                 ret = __copy_to_user(user_data, vaddr + offset, length);
950         kunmap(page);
951
952         return ret ? - EFAULT : 0;
953 }
954
955 static int
956 shmem_pread(struct page *page, int offset, int length, char __user *user_data,
957             bool page_do_bit17_swizzling, bool needs_clflush)
958 {
959         int ret;
960
961         ret = -ENODEV;
962         if (!page_do_bit17_swizzling) {
963                 char *vaddr = kmap_atomic(page);
964
965                 if (needs_clflush)
966                         drm_clflush_virt_range(vaddr + offset, length);
967                 ret = __copy_to_user_inatomic(user_data, vaddr + offset, length);
968                 kunmap_atomic(vaddr);
969         }
970         if (ret == 0)
971                 return 0;
972
973         return shmem_pread_slow(page, offset, length, user_data,
974                                 page_do_bit17_swizzling, needs_clflush);
975 }
976
977 static int
978 i915_gem_shmem_pread(struct drm_i915_gem_object *obj,
979                      struct drm_i915_gem_pread *args)
980 {
981         char __user *user_data;
982         u64 remain;
983         unsigned int obj_do_bit17_swizzling;
984         unsigned int needs_clflush;
985         unsigned int idx, offset;
986         int ret;
987
988         obj_do_bit17_swizzling = 0;
989         if (i915_gem_object_needs_bit17_swizzle(obj))
990                 obj_do_bit17_swizzling = BIT(17);
991
992         ret = mutex_lock_interruptible(&obj->base.dev->struct_mutex);
993         if (ret)
994                 return ret;
995
996         ret = i915_gem_obj_prepare_shmem_read(obj, &needs_clflush);
997         mutex_unlock(&obj->base.dev->struct_mutex);
998         if (ret)
999                 return ret;
1000
1001         remain = args->size;
1002         user_data = u64_to_user_ptr(args->data_ptr);
1003         offset = offset_in_page(args->offset);
1004         for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
1005                 struct page *page = i915_gem_object_get_page(obj, idx);
1006                 int length;
1007
1008                 length = remain;
1009                 if (offset + length > PAGE_SIZE)
1010                         length = PAGE_SIZE - offset;
1011
1012                 ret = shmem_pread(page, offset, length, user_data,
1013                                   page_to_phys(page) & obj_do_bit17_swizzling,
1014                                   needs_clflush);
1015                 if (ret)
1016                         break;
1017
1018                 remain -= length;
1019                 user_data += length;
1020                 offset = 0;
1021         }
1022
1023         i915_gem_obj_finish_shmem_access(obj);
1024         return ret;
1025 }
1026
1027 static inline bool
1028 gtt_user_read(struct io_mapping *mapping,
1029               loff_t base, int offset,
1030               char __user *user_data, int length)
1031 {
1032         void __iomem *vaddr;
1033         unsigned long unwritten;
1034
1035         /* We can use the cpu mem copy function because this is X86. */
1036         vaddr = io_mapping_map_atomic_wc(mapping, base);
1037         unwritten = __copy_to_user_inatomic(user_data,
1038                                             (void __force *)vaddr + offset,
1039                                             length);
1040         io_mapping_unmap_atomic(vaddr);
1041         if (unwritten) {
1042                 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
1043                 unwritten = copy_to_user(user_data,
1044                                          (void __force *)vaddr + offset,
1045                                          length);
1046                 io_mapping_unmap(vaddr);
1047         }
1048         return unwritten;
1049 }
1050
1051 static int
1052 i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
1053                    const struct drm_i915_gem_pread *args)
1054 {
1055         struct drm_i915_private *i915 = to_i915(obj->base.dev);
1056         struct i915_ggtt *ggtt = &i915->ggtt;
1057         struct drm_mm_node node;
1058         struct i915_vma *vma;
1059         void __user *user_data;
1060         u64 remain, offset;
1061         int ret;
1062
1063         ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1064         if (ret)
1065                 return ret;
1066
1067         intel_runtime_pm_get(i915);
1068         vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1069                                        PIN_MAPPABLE |
1070                                        PIN_NONFAULT |
1071                                        PIN_NONBLOCK);
1072         if (!IS_ERR(vma)) {
1073                 node.start = i915_ggtt_offset(vma);
1074                 node.allocated = false;
1075                 ret = i915_vma_put_fence(vma);
1076                 if (ret) {
1077                         i915_vma_unpin(vma);
1078                         vma = ERR_PTR(ret);
1079                 }
1080         }
1081         if (IS_ERR(vma)) {
1082                 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1083                 if (ret)
1084                         goto out_unlock;
1085                 GEM_BUG_ON(!node.allocated);
1086         }
1087
1088         ret = i915_gem_object_set_to_gtt_domain(obj, false);
1089         if (ret)
1090                 goto out_unpin;
1091
1092         mutex_unlock(&i915->drm.struct_mutex);
1093
1094         user_data = u64_to_user_ptr(args->data_ptr);
1095         remain = args->size;
1096         offset = args->offset;
1097
1098         while (remain > 0) {
1099                 /* Operation in this page
1100                  *
1101                  * page_base = page offset within aperture
1102                  * page_offset = offset within page
1103                  * page_length = bytes to copy for this page
1104                  */
1105                 u32 page_base = node.start;
1106                 unsigned page_offset = offset_in_page(offset);
1107                 unsigned page_length = PAGE_SIZE - page_offset;
1108                 page_length = remain < page_length ? remain : page_length;
1109                 if (node.allocated) {
1110                         wmb();
1111                         ggtt->base.insert_page(&ggtt->base,
1112                                                i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1113                                                node.start, I915_CACHE_NONE, 0);
1114                         wmb();
1115                 } else {
1116                         page_base += offset & PAGE_MASK;
1117                 }
1118
1119                 if (gtt_user_read(&ggtt->iomap, page_base, page_offset,
1120                                   user_data, page_length)) {
1121                         ret = -EFAULT;
1122                         break;
1123                 }
1124
1125                 remain -= page_length;
1126                 user_data += page_length;
1127                 offset += page_length;
1128         }
1129
1130         mutex_lock(&i915->drm.struct_mutex);
1131 out_unpin:
1132         if (node.allocated) {
1133                 wmb();
1134                 ggtt->base.clear_range(&ggtt->base,
1135                                        node.start, node.size);
1136                 remove_mappable_node(&node);
1137         } else {
1138                 i915_vma_unpin(vma);
1139         }
1140 out_unlock:
1141         intel_runtime_pm_put(i915);
1142         mutex_unlock(&i915->drm.struct_mutex);
1143
1144         return ret;
1145 }
1146
1147 /**
1148  * Reads data from the object referenced by handle.
1149  * @dev: drm device pointer
1150  * @data: ioctl data blob
1151  * @file: drm file pointer
1152  *
1153  * On error, the contents of *data are undefined.
1154  */
1155 int
1156 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
1157                      struct drm_file *file)
1158 {
1159         struct drm_i915_gem_pread *args = data;
1160         struct drm_i915_gem_object *obj;
1161         int ret;
1162
1163         if (args->size == 0)
1164                 return 0;
1165
1166         if (!access_ok(VERIFY_WRITE,
1167                        u64_to_user_ptr(args->data_ptr),
1168                        args->size))
1169                 return -EFAULT;
1170
1171         obj = i915_gem_object_lookup(file, args->handle);
1172         if (!obj)
1173                 return -ENOENT;
1174
1175         /* Bounds check source.  */
1176         if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
1177                 ret = -EINVAL;
1178                 goto out;
1179         }
1180
1181         trace_i915_gem_object_pread(obj, args->offset, args->size);
1182
1183         ret = i915_gem_object_wait(obj,
1184                                    I915_WAIT_INTERRUPTIBLE,
1185                                    MAX_SCHEDULE_TIMEOUT,
1186                                    to_rps_client(file));
1187         if (ret)
1188                 goto out;
1189
1190         ret = i915_gem_object_pin_pages(obj);
1191         if (ret)
1192                 goto out;
1193
1194         ret = i915_gem_shmem_pread(obj, args);
1195         if (ret == -EFAULT || ret == -ENODEV)
1196                 ret = i915_gem_gtt_pread(obj, args);
1197
1198         i915_gem_object_unpin_pages(obj);
1199 out:
1200         i915_gem_object_put(obj);
1201         return ret;
1202 }
1203
1204 /* This is the fast write path which cannot handle
1205  * page faults in the source data
1206  */
1207
1208 static inline bool
1209 ggtt_write(struct io_mapping *mapping,
1210            loff_t base, int offset,
1211            char __user *user_data, int length)
1212 {
1213         void __iomem *vaddr;
1214         unsigned long unwritten;
1215
1216         /* We can use the cpu mem copy function because this is X86. */
1217         vaddr = io_mapping_map_atomic_wc(mapping, base);
1218         unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
1219                                                       user_data, length);
1220         io_mapping_unmap_atomic(vaddr);
1221         if (unwritten) {
1222                 vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
1223                 unwritten = copy_from_user((void __force *)vaddr + offset,
1224                                            user_data, length);
1225                 io_mapping_unmap(vaddr);
1226         }
1227
1228         return unwritten;
1229 }
1230
1231 /**
1232  * This is the fast pwrite path, where we copy the data directly from the
1233  * user into the GTT, uncached.
1234  * @obj: i915 GEM object
1235  * @args: pwrite arguments structure
1236  */
1237 static int
1238 i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
1239                          const struct drm_i915_gem_pwrite *args)
1240 {
1241         struct drm_i915_private *i915 = to_i915(obj->base.dev);
1242         struct i915_ggtt *ggtt = &i915->ggtt;
1243         struct drm_mm_node node;
1244         struct i915_vma *vma;
1245         u64 remain, offset;
1246         void __user *user_data;
1247         int ret;
1248
1249         ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1250         if (ret)
1251                 return ret;
1252
1253         if (i915_gem_object_has_struct_page(obj)) {
1254                 /*
1255                  * Avoid waking the device up if we can fallback, as
1256                  * waking/resuming is very slow (worst-case 10-100 ms
1257                  * depending on PCI sleeps and our own resume time).
1258                  * This easily dwarfs any performance advantage from
1259                  * using the cache bypass of indirect GGTT access.
1260                  */
1261                 if (!intel_runtime_pm_get_if_in_use(i915)) {
1262                         ret = -EFAULT;
1263                         goto out_unlock;
1264                 }
1265         } else {
1266                 /* No backing pages, no fallback, we must force GGTT access */
1267                 intel_runtime_pm_get(i915);
1268         }
1269
1270         vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
1271                                        PIN_MAPPABLE |
1272                                        PIN_NONFAULT |
1273                                        PIN_NONBLOCK);
1274         if (!IS_ERR(vma)) {
1275                 node.start = i915_ggtt_offset(vma);
1276                 node.allocated = false;
1277                 ret = i915_vma_put_fence(vma);
1278                 if (ret) {
1279                         i915_vma_unpin(vma);
1280                         vma = ERR_PTR(ret);
1281                 }
1282         }
1283         if (IS_ERR(vma)) {
1284                 ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
1285                 if (ret)
1286                         goto out_rpm;
1287                 GEM_BUG_ON(!node.allocated);
1288         }
1289
1290         ret = i915_gem_object_set_to_gtt_domain(obj, true);
1291         if (ret)
1292                 goto out_unpin;
1293
1294         mutex_unlock(&i915->drm.struct_mutex);
1295
1296         intel_fb_obj_invalidate(obj, ORIGIN_CPU);
1297
1298         user_data = u64_to_user_ptr(args->data_ptr);
1299         offset = args->offset;
1300         remain = args->size;
1301         while (remain) {
1302                 /* Operation in this page
1303                  *
1304                  * page_base = page offset within aperture
1305                  * page_offset = offset within page
1306                  * page_length = bytes to copy for this page
1307                  */
1308                 u32 page_base = node.start;
1309                 unsigned int page_offset = offset_in_page(offset);
1310                 unsigned int page_length = PAGE_SIZE - page_offset;
1311                 page_length = remain < page_length ? remain : page_length;
1312                 if (node.allocated) {
1313                         wmb(); /* flush the write before we modify the GGTT */
1314                         ggtt->base.insert_page(&ggtt->base,
1315                                                i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
1316                                                node.start, I915_CACHE_NONE, 0);
1317                         wmb(); /* flush modifications to the GGTT (insert_page) */
1318                 } else {
1319                         page_base += offset & PAGE_MASK;
1320                 }
1321                 /* If we get a fault while copying data, then (presumably) our
1322                  * source page isn't available.  Return the error and we'll
1323                  * retry in the slow path.
1324                  * If the object is non-shmem backed, we retry again with the
1325                  * path that handles page fault.
1326                  */
1327                 if (ggtt_write(&ggtt->iomap, page_base, page_offset,
1328                                user_data, page_length)) {
1329                         ret = -EFAULT;
1330                         break;
1331                 }
1332
1333                 remain -= page_length;
1334                 user_data += page_length;
1335                 offset += page_length;
1336         }
1337         intel_fb_obj_flush(obj, ORIGIN_CPU);
1338
1339         mutex_lock(&i915->drm.struct_mutex);
1340 out_unpin:
1341         if (node.allocated) {
1342                 wmb();
1343                 ggtt->base.clear_range(&ggtt->base,
1344                                        node.start, node.size);
1345                 remove_mappable_node(&node);
1346         } else {
1347                 i915_vma_unpin(vma);
1348         }
1349 out_rpm:
1350         intel_runtime_pm_put(i915);
1351 out_unlock:
1352         mutex_unlock(&i915->drm.struct_mutex);
1353         return ret;
1354 }
1355
1356 static int
1357 shmem_pwrite_slow(struct page *page, int offset, int length,
1358                   char __user *user_data,
1359                   bool page_do_bit17_swizzling,
1360                   bool needs_clflush_before,
1361                   bool needs_clflush_after)
1362 {
1363         char *vaddr;
1364         int ret;
1365
1366         vaddr = kmap(page);
1367         if (unlikely(needs_clflush_before || page_do_bit17_swizzling))
1368                 shmem_clflush_swizzled_range(vaddr + offset, length,
1369                                              page_do_bit17_swizzling);
1370         if (page_do_bit17_swizzling)
1371                 ret = __copy_from_user_swizzled(vaddr, offset, user_data,
1372                                                 length);
1373         else
1374                 ret = __copy_from_user(vaddr + offset, user_data, length);
1375         if (needs_clflush_after)
1376                 shmem_clflush_swizzled_range(vaddr + offset, length,
1377                                              page_do_bit17_swizzling);
1378         kunmap(page);
1379
1380         return ret ? -EFAULT : 0;
1381 }
1382
1383 /* Per-page copy function for the shmem pwrite fastpath.
1384  * Flushes invalid cachelines before writing to the target if
1385  * needs_clflush_before is set and flushes out any written cachelines after
1386  * writing if needs_clflush is set.
1387  */
1388 static int
1389 shmem_pwrite(struct page *page, int offset, int len, char __user *user_data,
1390              bool page_do_bit17_swizzling,
1391              bool needs_clflush_before,
1392              bool needs_clflush_after)
1393 {
1394         int ret;
1395
1396         ret = -ENODEV;
1397         if (!page_do_bit17_swizzling) {
1398                 char *vaddr = kmap_atomic(page);
1399
1400                 if (needs_clflush_before)
1401                         drm_clflush_virt_range(vaddr + offset, len);
1402                 ret = __copy_from_user_inatomic(vaddr + offset, user_data, len);
1403                 if (needs_clflush_after)
1404                         drm_clflush_virt_range(vaddr + offset, len);
1405
1406                 kunmap_atomic(vaddr);
1407         }
1408         if (ret == 0)
1409                 return ret;
1410
1411         return shmem_pwrite_slow(page, offset, len, user_data,
1412                                  page_do_bit17_swizzling,
1413                                  needs_clflush_before,
1414                                  needs_clflush_after);
1415 }
1416
1417 static int
1418 i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
1419                       const struct drm_i915_gem_pwrite *args)
1420 {
1421         struct drm_i915_private *i915 = to_i915(obj->base.dev);
1422         void __user *user_data;
1423         u64 remain;
1424         unsigned int obj_do_bit17_swizzling;
1425         unsigned int partial_cacheline_write;
1426         unsigned int needs_clflush;
1427         unsigned int offset, idx;
1428         int ret;
1429
1430         ret = mutex_lock_interruptible(&i915->drm.struct_mutex);
1431         if (ret)
1432                 return ret;
1433
1434         ret = i915_gem_obj_prepare_shmem_write(obj, &needs_clflush);
1435         mutex_unlock(&i915->drm.struct_mutex);
1436         if (ret)
1437                 return ret;
1438
1439         obj_do_bit17_swizzling = 0;
1440         if (i915_gem_object_needs_bit17_swizzle(obj))
1441                 obj_do_bit17_swizzling = BIT(17);
1442
1443         /* If we don't overwrite a cacheline completely we need to be
1444          * careful to have up-to-date data by first clflushing. Don't
1445          * overcomplicate things and flush the entire patch.
1446          */
1447         partial_cacheline_write = 0;
1448         if (needs_clflush & CLFLUSH_BEFORE)
1449                 partial_cacheline_write = boot_cpu_data.x86_clflush_size - 1;
1450
1451         user_data = u64_to_user_ptr(args->data_ptr);
1452         remain = args->size;
1453         offset = offset_in_page(args->offset);
1454         for (idx = args->offset >> PAGE_SHIFT; remain; idx++) {
1455                 struct page *page = i915_gem_object_get_page(obj, idx);
1456                 int length;
1457
1458                 length = remain;
1459                 if (offset + length > PAGE_SIZE)
1460                         length = PAGE_SIZE - offset;
1461
1462                 ret = shmem_pwrite(page, offset, length, user_data,
1463                                    page_to_phys(page) & obj_do_bit17_swizzling,
1464                                    (offset | length) & partial_cacheline_write,
1465                                    needs_clflush & CLFLUSH_AFTER);
1466                 if (ret)
1467                         break;
1468
1469                 remain -= length;
1470                 user_data += length;
1471                 offset = 0;
1472         }
1473
1474         intel_fb_obj_flush(obj, ORIGIN_CPU);
1475         i915_gem_obj_finish_shmem_access(obj);
1476         return ret;
1477 }
1478
1479 /**
1480  * Writes data to the object referenced by handle.
1481  * @dev: drm device
1482  * @data: ioctl data blob
1483  * @file: drm file
1484  *
1485  * On error, the contents of the buffer that were to be modified are undefined.
1486  */
1487 int
1488 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
1489                       struct drm_file *file)
1490 {
1491         struct drm_i915_gem_pwrite *args = data;
1492         struct drm_i915_gem_object *obj;
1493         int ret;
1494
1495         if (args->size == 0)
1496                 return 0;
1497
1498         if (!access_ok(VERIFY_READ,
1499                        u64_to_user_ptr(args->data_ptr),
1500                        args->size))
1501                 return -EFAULT;
1502
1503         obj = i915_gem_object_lookup(file, args->handle);
1504         if (!obj)
1505                 return -ENOENT;
1506
1507         /* Bounds check destination. */
1508         if (range_overflows_t(u64, args->offset, args->size, obj->base.size)) {
1509                 ret = -EINVAL;
1510                 goto err;
1511         }
1512
1513         trace_i915_gem_object_pwrite(obj, args->offset, args->size);
1514
1515         ret = -ENODEV;
1516         if (obj->ops->pwrite)
1517                 ret = obj->ops->pwrite(obj, args);
1518         if (ret != -ENODEV)
1519                 goto err;
1520
1521         ret = i915_gem_object_wait(obj,
1522                                    I915_WAIT_INTERRUPTIBLE |
1523                                    I915_WAIT_ALL,
1524                                    MAX_SCHEDULE_TIMEOUT,
1525                                    to_rps_client(file));
1526         if (ret)
1527                 goto err;
1528
1529         ret = i915_gem_object_pin_pages(obj);
1530         if (ret)
1531                 goto err;
1532
1533         ret = -EFAULT;
1534         /* We can only do the GTT pwrite on untiled buffers, as otherwise
1535          * it would end up going through the fenced access, and we'll get
1536          * different detiling behavior between reading and writing.
1537          * pread/pwrite currently are reading and writing from the CPU
1538          * perspective, requiring manual detiling by the client.
1539          */
1540         if (!i915_gem_object_has_struct_page(obj) ||
1541             cpu_write_needs_clflush(obj))
1542                 /* Note that the gtt paths might fail with non-page-backed user
1543                  * pointers (e.g. gtt mappings when moving data between
1544                  * textures). Fallback to the shmem path in that case.
1545                  */
1546                 ret = i915_gem_gtt_pwrite_fast(obj, args);
1547
1548         if (ret == -EFAULT || ret == -ENOSPC) {
1549                 if (obj->phys_handle)
1550                         ret = i915_gem_phys_pwrite(obj, args, file);
1551                 else
1552                         ret = i915_gem_shmem_pwrite(obj, args);
1553         }
1554
1555         i915_gem_object_unpin_pages(obj);
1556 err:
1557         i915_gem_object_put(obj);
1558         return ret;
1559 }
1560
1561 static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
1562 {
1563         struct drm_i915_private *i915;
1564         struct list_head *list;
1565         struct i915_vma *vma;
1566
1567         GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
1568
1569         for_each_ggtt_vma(vma, obj) {
1570                 if (i915_vma_is_active(vma))
1571                         continue;
1572
1573                 if (!drm_mm_node_allocated(&vma->node))
1574                         continue;
1575
1576                 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
1577         }
1578
1579         i915 = to_i915(obj->base.dev);
1580         spin_lock(&i915->mm.obj_lock);
1581         list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
1582         list_move_tail(&obj->mm.link, list);
1583         spin_unlock(&i915->mm.obj_lock);
1584 }
1585
1586 /**
1587  * Called when user space prepares to use an object with the CPU, either
1588  * through the mmap ioctl's mapping or a GTT mapping.
1589  * @dev: drm device
1590  * @data: ioctl data blob
1591  * @file: drm file
1592  */
1593 int
1594 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1595                           struct drm_file *file)
1596 {
1597         struct drm_i915_gem_set_domain *args = data;
1598         struct drm_i915_gem_object *obj;
1599         uint32_t read_domains = args->read_domains;
1600         uint32_t write_domain = args->write_domain;
1601         int err;
1602
1603         /* Only handle setting domains to types used by the CPU. */
1604         if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
1605                 return -EINVAL;
1606
1607         /* Having something in the write domain implies it's in the read
1608          * domain, and only that read domain.  Enforce that in the request.
1609          */
1610         if (write_domain != 0 && read_domains != write_domain)
1611                 return -EINVAL;
1612
1613         obj = i915_gem_object_lookup(file, args->handle);
1614         if (!obj)
1615                 return -ENOENT;
1616
1617         /* Try to flush the object off the GPU without holding the lock.
1618          * We will repeat the flush holding the lock in the normal manner
1619          * to catch cases where we are gazumped.
1620          */
1621         err = i915_gem_object_wait(obj,
1622                                    I915_WAIT_INTERRUPTIBLE |
1623                                    (write_domain ? I915_WAIT_ALL : 0),
1624                                    MAX_SCHEDULE_TIMEOUT,
1625                                    to_rps_client(file));
1626         if (err)
1627                 goto out;
1628
1629         /*
1630          * Proxy objects do not control access to the backing storage, ergo
1631          * they cannot be used as a means to manipulate the cache domain
1632          * tracking for that backing storage. The proxy object is always
1633          * considered to be outside of any cache domain.
1634          */
1635         if (i915_gem_object_is_proxy(obj)) {
1636                 err = -ENXIO;
1637                 goto out;
1638         }
1639
1640         /*
1641          * Flush and acquire obj->pages so that we are coherent through
1642          * direct access in memory with previous cached writes through
1643          * shmemfs and that our cache domain tracking remains valid.
1644          * For example, if the obj->filp was moved to swap without us
1645          * being notified and releasing the pages, we would mistakenly
1646          * continue to assume that the obj remained out of the CPU cached
1647          * domain.
1648          */
1649         err = i915_gem_object_pin_pages(obj);
1650         if (err)
1651                 goto out;
1652
1653         err = i915_mutex_lock_interruptible(dev);
1654         if (err)
1655                 goto out_unpin;
1656
1657         if (read_domains & I915_GEM_DOMAIN_WC)
1658                 err = i915_gem_object_set_to_wc_domain(obj, write_domain);
1659         else if (read_domains & I915_GEM_DOMAIN_GTT)
1660                 err = i915_gem_object_set_to_gtt_domain(obj, write_domain);
1661         else
1662                 err = i915_gem_object_set_to_cpu_domain(obj, write_domain);
1663
1664         /* And bump the LRU for this access */
1665         i915_gem_object_bump_inactive_ggtt(obj);
1666
1667         mutex_unlock(&dev->struct_mutex);
1668
1669         if (write_domain != 0)
1670                 intel_fb_obj_invalidate(obj,
1671                                         fb_write_origin(obj, write_domain));
1672
1673 out_unpin:
1674         i915_gem_object_unpin_pages(obj);
1675 out:
1676         i915_gem_object_put(obj);
1677         return err;
1678 }
1679
1680 /**
1681  * Called when user space has done writes to this buffer
1682  * @dev: drm device
1683  * @data: ioctl data blob
1684  * @file: drm file
1685  */
1686 int
1687 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
1688                          struct drm_file *file)
1689 {
1690         struct drm_i915_gem_sw_finish *args = data;
1691         struct drm_i915_gem_object *obj;
1692
1693         obj = i915_gem_object_lookup(file, args->handle);
1694         if (!obj)
1695                 return -ENOENT;
1696
1697         /*
1698          * Proxy objects are barred from CPU access, so there is no
1699          * need to ban sw_finish as it is a nop.
1700          */
1701
1702         /* Pinned buffers may be scanout, so flush the cache */
1703         i915_gem_object_flush_if_display(obj);
1704         i915_gem_object_put(obj);
1705
1706         return 0;
1707 }
1708
1709 /**
1710  * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
1711  *                       it is mapped to.
1712  * @dev: drm device
1713  * @data: ioctl data blob
1714  * @file: drm file
1715  *
1716  * While the mapping holds a reference on the contents of the object, it doesn't
1717  * imply a ref on the object itself.
1718  *
1719  * IMPORTANT:
1720  *
1721  * DRM driver writers who look a this function as an example for how to do GEM
1722  * mmap support, please don't implement mmap support like here. The modern way
1723  * to implement DRM mmap support is with an mmap offset ioctl (like
1724  * i915_gem_mmap_gtt) and then using the mmap syscall on the DRM fd directly.
1725  * That way debug tooling like valgrind will understand what's going on, hiding
1726  * the mmap call in a driver private ioctl will break that. The i915 driver only
1727  * does cpu mmaps this way because we didn't know better.
1728  */
1729 int
1730 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
1731                     struct drm_file *file)
1732 {
1733         struct drm_i915_gem_mmap *args = data;
1734         struct drm_i915_gem_object *obj;
1735         unsigned long addr;
1736
1737         if (args->flags & ~(I915_MMAP_WC))
1738                 return -EINVAL;
1739
1740         if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
1741                 return -ENODEV;
1742
1743         obj = i915_gem_object_lookup(file, args->handle);
1744         if (!obj)
1745                 return -ENOENT;
1746
1747         /* prime objects have no backing filp to GEM mmap
1748          * pages from.
1749          */
1750         if (!obj->base.filp) {
1751                 i915_gem_object_put(obj);
1752                 return -ENXIO;
1753         }
1754
1755         addr = vm_mmap(obj->base.filp, 0, args->size,
1756                        PROT_READ | PROT_WRITE, MAP_SHARED,
1757                        args->offset);
1758         if (args->flags & I915_MMAP_WC) {
1759                 struct mm_struct *mm = current->mm;
1760                 struct vm_area_struct *vma;
1761
1762                 if (down_write_killable(&mm->mmap_sem)) {
1763                         i915_gem_object_put(obj);
1764                         return -EINTR;
1765                 }
1766                 vma = find_vma(mm, addr);
1767                 if (vma)
1768                         vma->vm_page_prot =
1769                                 pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1770                 else
1771                         addr = -ENOMEM;
1772                 up_write(&mm->mmap_sem);
1773
1774                 /* This may race, but that's ok, it only gets set */
1775                 WRITE_ONCE(obj->frontbuffer_ggtt_origin, ORIGIN_CPU);
1776         }
1777         i915_gem_object_put(obj);
1778         if (IS_ERR((void *)addr))
1779                 return addr;
1780
1781         args->addr_ptr = (uint64_t) addr;
1782
1783         return 0;
1784 }
1785
1786 static unsigned int tile_row_pages(struct drm_i915_gem_object *obj)
1787 {
1788         return i915_gem_object_get_tile_row_size(obj) >> PAGE_SHIFT;
1789 }
1790
1791 /**
1792  * i915_gem_mmap_gtt_version - report the current feature set for GTT mmaps
1793  *
1794  * A history of the GTT mmap interface:
1795  *
1796  * 0 - Everything had to fit into the GTT. Both parties of a memcpy had to
1797  *     aligned and suitable for fencing, and still fit into the available
1798  *     mappable space left by the pinned display objects. A classic problem
1799  *     we called the page-fault-of-doom where we would ping-pong between
1800  *     two objects that could not fit inside the GTT and so the memcpy
1801  *     would page one object in at the expense of the other between every
1802  *     single byte.
1803  *
1804  * 1 - Objects can be any size, and have any compatible fencing (X Y, or none
1805  *     as set via i915_gem_set_tiling() [DRM_I915_GEM_SET_TILING]). If the
1806  *     object is too large for the available space (or simply too large
1807  *     for the mappable aperture!), a view is created instead and faulted
1808  *     into userspace. (This view is aligned and sized appropriately for
1809  *     fenced access.)
1810  *
1811  * 2 - Recognise WC as a separate cache domain so that we can flush the
1812  *     delayed writes via GTT before performing direct access via WC.
1813  *
1814  * Restrictions:
1815  *
1816  *  * snoopable objects cannot be accessed via the GTT. It can cause machine
1817  *    hangs on some architectures, corruption on others. An attempt to service
1818  *    a GTT page fault from a snoopable object will generate a SIGBUS.
1819  *
1820  *  * the object must be able to fit into RAM (physical memory, though no
1821  *    limited to the mappable aperture).
1822  *
1823  *
1824  * Caveats:
1825  *
1826  *  * a new GTT page fault will synchronize rendering from the GPU and flush
1827  *    all data to system memory. Subsequent access will not be synchronized.
1828  *
1829  *  * all mappings are revoked on runtime device suspend.
1830  *
1831  *  * there are only 8, 16 or 32 fence registers to share between all users
1832  *    (older machines require fence register for display and blitter access
1833  *    as well). Contention of the fence registers will cause the previous users
1834  *    to be unmapped and any new access will generate new page faults.
1835  *
1836  *  * running out of memory while servicing a fault may generate a SIGBUS,
1837  *    rather than the expected SIGSEGV.
1838  */
1839 int i915_gem_mmap_gtt_version(void)
1840 {
1841         return 2;
1842 }
1843
1844 static inline struct i915_ggtt_view
1845 compute_partial_view(struct drm_i915_gem_object *obj,
1846                      pgoff_t page_offset,
1847                      unsigned int chunk)
1848 {
1849         struct i915_ggtt_view view;
1850
1851         if (i915_gem_object_is_tiled(obj))
1852                 chunk = roundup(chunk, tile_row_pages(obj));
1853
1854         view.type = I915_GGTT_VIEW_PARTIAL;
1855         view.partial.offset = rounddown(page_offset, chunk);
1856         view.partial.size =
1857                 min_t(unsigned int, chunk,
1858                       (obj->base.size >> PAGE_SHIFT) - view.partial.offset);
1859
1860         /* If the partial covers the entire object, just create a normal VMA. */
1861         if (chunk >= obj->base.size >> PAGE_SHIFT)
1862                 view.type = I915_GGTT_VIEW_NORMAL;
1863
1864         return view;
1865 }
1866
1867 /**
1868  * i915_gem_fault - fault a page into the GTT
1869  * @vmf: fault info
1870  *
1871  * The fault handler is set up by drm_gem_mmap() when a object is GTT mapped
1872  * from userspace.  The fault handler takes care of binding the object to
1873  * the GTT (if needed), allocating and programming a fence register (again,
1874  * only if needed based on whether the old reg is still valid or the object
1875  * is tiled) and inserting a new PTE into the faulting process.
1876  *
1877  * Note that the faulting process may involve evicting existing objects
1878  * from the GTT and/or fence registers to make room.  So performance may
1879  * suffer if the GTT working set is large or there are few fence registers
1880  * left.
1881  *
1882  * The current feature set supported by i915_gem_fault() and thus GTT mmaps
1883  * is exposed via I915_PARAM_MMAP_GTT_VERSION (see i915_gem_mmap_gtt_version).
1884  */
1885 int i915_gem_fault(struct vm_fault *vmf)
1886 {
1887 #define MIN_CHUNK_PAGES ((1 << 20) >> PAGE_SHIFT) /* 1 MiB */
1888         struct vm_area_struct *area = vmf->vma;
1889         struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data);
1890         struct drm_device *dev = obj->base.dev;
1891         struct drm_i915_private *dev_priv = to_i915(dev);
1892         struct i915_ggtt *ggtt = &dev_priv->ggtt;
1893         bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
1894         struct i915_vma *vma;
1895         pgoff_t page_offset;
1896         unsigned int flags;
1897         int ret;
1898
1899         /* We don't use vmf->pgoff since that has the fake offset */
1900         page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
1901
1902         trace_i915_gem_object_fault(obj, page_offset, true, write);
1903
1904         /* Try to flush the object off the GPU first without holding the lock.
1905          * Upon acquiring the lock, we will perform our sanity checks and then
1906          * repeat the flush holding the lock in the normal manner to catch cases
1907          * where we are gazumped.
1908          */
1909         ret = i915_gem_object_wait(obj,
1910                                    I915_WAIT_INTERRUPTIBLE,
1911                                    MAX_SCHEDULE_TIMEOUT,
1912                                    NULL);
1913         if (ret)
1914                 goto err;
1915
1916         ret = i915_gem_object_pin_pages(obj);
1917         if (ret)
1918                 goto err;
1919
1920         intel_runtime_pm_get(dev_priv);
1921
1922         ret = i915_mutex_lock_interruptible(dev);
1923         if (ret)
1924                 goto err_rpm;
1925
1926         /* Access to snoopable pages through the GTT is incoherent. */
1927         if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
1928                 ret = -EFAULT;
1929                 goto err_unlock;
1930         }
1931
1932         /* If the object is smaller than a couple of partial vma, it is
1933          * not worth only creating a single partial vma - we may as well
1934          * clear enough space for the full object.
1935          */
1936         flags = PIN_MAPPABLE;
1937         if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
1938                 flags |= PIN_NONBLOCK | PIN_NONFAULT;
1939
1940         /* Now pin it into the GTT as needed */
1941         vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
1942         if (IS_ERR(vma)) {
1943                 /* Use a partial view if it is bigger than available space */
1944                 struct i915_ggtt_view view =
1945                         compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
1946
1947                 /* Userspace is now writing through an untracked VMA, abandon
1948                  * all hope that the hardware is able to track future writes.
1949                  */
1950                 obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
1951
1952                 vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
1953         }
1954         if (IS_ERR(vma)) {
1955                 ret = PTR_ERR(vma);
1956                 goto err_unlock;
1957         }
1958
1959         ret = i915_gem_object_set_to_gtt_domain(obj, write);
1960         if (ret)
1961                 goto err_unpin;
1962
1963         ret = i915_vma_pin_fence(vma);
1964         if (ret)
1965                 goto err_unpin;
1966
1967         /* Finally, remap it using the new GTT offset */
1968         ret = remap_io_mapping(area,
1969                                area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
1970                                (ggtt->gmadr.start + vma->node.start) >> PAGE_SHIFT,
1971                                min_t(u64, vma->size, area->vm_end - area->vm_start),
1972                                &ggtt->iomap);
1973         if (ret)
1974                 goto err_fence;
1975
1976         /* Mark as being mmapped into userspace for later revocation */
1977         assert_rpm_wakelock_held(dev_priv);
1978         if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
1979                 list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
1980         GEM_BUG_ON(!obj->userfault_count);
1981
1982         i915_vma_set_ggtt_write(vma);
1983
1984 err_fence:
1985         i915_vma_unpin_fence(vma);
1986 err_unpin:
1987         __i915_vma_unpin(vma);
1988 err_unlock:
1989         mutex_unlock(&dev->struct_mutex);
1990 err_rpm:
1991         intel_runtime_pm_put(dev_priv);
1992         i915_gem_object_unpin_pages(obj);
1993 err:
1994         switch (ret) {
1995         case -EIO:
1996                 /*
1997                  * We eat errors when the gpu is terminally wedged to avoid
1998                  * userspace unduly crashing (gl has no provisions for mmaps to
1999                  * fail). But any other -EIO isn't ours (e.g. swap in failure)
2000                  * and so needs to be reported.
2001                  */
2002                 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
2003                         ret = VM_FAULT_SIGBUS;
2004                         break;
2005                 }
2006         case -EAGAIN:
2007                 /*
2008                  * EAGAIN means the gpu is hung and we'll wait for the error
2009                  * handler to reset everything when re-faulting in
2010                  * i915_mutex_lock_interruptible.
2011                  */
2012         case 0:
2013         case -ERESTARTSYS:
2014         case -EINTR:
2015         case -EBUSY:
2016                 /*
2017                  * EBUSY is ok: this just means that another thread
2018                  * already did the job.
2019                  */
2020                 ret = VM_FAULT_NOPAGE;
2021                 break;
2022         case -ENOMEM:
2023                 ret = VM_FAULT_OOM;
2024                 break;
2025         case -ENOSPC:
2026         case -EFAULT:
2027                 ret = VM_FAULT_SIGBUS;
2028                 break;
2029         default:
2030                 WARN_ONCE(ret, "unhandled error in i915_gem_fault: %i\n", ret);
2031                 ret = VM_FAULT_SIGBUS;
2032                 break;
2033         }
2034         return ret;
2035 }
2036
2037 static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
2038 {
2039         struct i915_vma *vma;
2040
2041         GEM_BUG_ON(!obj->userfault_count);
2042
2043         obj->userfault_count = 0;
2044         list_del(&obj->userfault_link);
2045         drm_vma_node_unmap(&obj->base.vma_node,
2046                            obj->base.dev->anon_inode->i_mapping);
2047
2048         for_each_ggtt_vma(vma, obj)
2049                 i915_vma_unset_userfault(vma);
2050 }
2051
2052 /**
2053  * i915_gem_release_mmap - remove physical page mappings
2054  * @obj: obj in question
2055  *
2056  * Preserve the reservation of the mmapping with the DRM core code, but
2057  * relinquish ownership of the pages back to the system.
2058  *
2059  * It is vital that we remove the page mapping if we have mapped a tiled
2060  * object through the GTT and then lose the fence register due to
2061  * resource pressure. Similarly if the object has been moved out of the
2062  * aperture, than pages mapped into userspace must be revoked. Removing the
2063  * mapping will then trigger a page fault on the next user access, allowing
2064  * fixup by i915_gem_fault().
2065  */
2066 void
2067 i915_gem_release_mmap(struct drm_i915_gem_object *obj)
2068 {
2069         struct drm_i915_private *i915 = to_i915(obj->base.dev);
2070
2071         /* Serialisation between user GTT access and our code depends upon
2072          * revoking the CPU's PTE whilst the mutex is held. The next user
2073          * pagefault then has to wait until we release the mutex.
2074          *
2075          * Note that RPM complicates somewhat by adding an additional
2076          * requirement that operations to the GGTT be made holding the RPM
2077          * wakeref.
2078          */
2079         lockdep_assert_held(&i915->drm.struct_mutex);
2080         intel_runtime_pm_get(i915);
2081
2082         if (!obj->userfault_count)
2083                 goto out;
2084
2085         __i915_gem_object_release_mmap(obj);
2086
2087         /* Ensure that the CPU's PTE are revoked and there are not outstanding
2088          * memory transactions from userspace before we return. The TLB
2089          * flushing implied above by changing the PTE above *should* be
2090          * sufficient, an extra barrier here just provides us with a bit
2091          * of paranoid documentation about our requirement to serialise
2092          * memory writes before touching registers / GSM.
2093          */
2094         wmb();
2095
2096 out:
2097         intel_runtime_pm_put(i915);
2098 }
2099
2100 void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
2101 {
2102         struct drm_i915_gem_object *obj, *on;
2103         int i;
2104
2105         /*
2106          * Only called during RPM suspend. All users of the userfault_list
2107          * must be holding an RPM wakeref to ensure that this can not
2108          * run concurrently with themselves (and use the struct_mutex for
2109          * protection between themselves).
2110          */
2111
2112         list_for_each_entry_safe(obj, on,
2113                                  &dev_priv->mm.userfault_list, userfault_link)
2114                 __i915_gem_object_release_mmap(obj);
2115
2116         /* The fence will be lost when the device powers down. If any were
2117          * in use by hardware (i.e. they are pinned), we should not be powering
2118          * down! All other fences will be reacquired by the user upon waking.
2119          */
2120         for (i = 0; i < dev_priv->num_fence_regs; i++) {
2121                 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
2122
2123                 /* Ideally we want to assert that the fence register is not
2124                  * live at this point (i.e. that no piece of code will be
2125                  * trying to write through fence + GTT, as that both violates
2126                  * our tracking of activity and associated locking/barriers,
2127                  * but also is illegal given that the hw is powered down).
2128                  *
2129                  * Previously we used reg->pin_count as a "liveness" indicator.
2130                  * That is not sufficient, and we need a more fine-grained
2131                  * tool if we want to have a sanity check here.
2132                  */
2133
2134                 if (!reg->vma)
2135                         continue;
2136
2137                 GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
2138                 reg->dirty = true;
2139         }
2140 }
2141
2142 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
2143 {
2144         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2145         int err;
2146
2147         err = drm_gem_create_mmap_offset(&obj->base);
2148         if (likely(!err))
2149                 return 0;
2150
2151         /* Attempt to reap some mmap space from dead objects */
2152         do {
2153                 err = i915_gem_wait_for_idle(dev_priv, I915_WAIT_INTERRUPTIBLE);
2154                 if (err)
2155                         break;
2156
2157                 i915_gem_drain_freed_objects(dev_priv);
2158                 err = drm_gem_create_mmap_offset(&obj->base);
2159                 if (!err)
2160                         break;
2161
2162         } while (flush_delayed_work(&dev_priv->gt.retire_work));
2163
2164         return err;
2165 }
2166
2167 static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
2168 {
2169         drm_gem_free_mmap_offset(&obj->base);
2170 }
2171
2172 int
2173 i915_gem_mmap_gtt(struct drm_file *file,
2174                   struct drm_device *dev,
2175                   uint32_t handle,
2176                   uint64_t *offset)
2177 {
2178         struct drm_i915_gem_object *obj;
2179         int ret;
2180
2181         obj = i915_gem_object_lookup(file, handle);
2182         if (!obj)
2183                 return -ENOENT;
2184
2185         ret = i915_gem_object_create_mmap_offset(obj);
2186         if (ret == 0)
2187                 *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
2188
2189         i915_gem_object_put(obj);
2190         return ret;
2191 }
2192
2193 /**
2194  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
2195  * @dev: DRM device
2196  * @data: GTT mapping ioctl data
2197  * @file: GEM object info
2198  *
2199  * Simply returns the fake offset to userspace so it can mmap it.
2200  * The mmap call will end up in drm_gem_mmap(), which will set things
2201  * up so we can get faults in the handler above.
2202  *
2203  * The fault handler will take care of binding the object into the GTT
2204  * (since it may have been evicted to make room for something), allocating
2205  * a fence register, and mapping the appropriate aperture address into
2206  * userspace.
2207  */
2208 int
2209 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
2210                         struct drm_file *file)
2211 {
2212         struct drm_i915_gem_mmap_gtt *args = data;
2213
2214         return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
2215 }
2216
2217 /* Immediately discard the backing storage */
2218 static void
2219 i915_gem_object_truncate(struct drm_i915_gem_object *obj)
2220 {
2221         i915_gem_object_free_mmap_offset(obj);
2222
2223         if (obj->base.filp == NULL)
2224                 return;
2225
2226         /* Our goal here is to return as much of the memory as
2227          * is possible back to the system as we are called from OOM.
2228          * To do this we must instruct the shmfs to drop all of its
2229          * backing pages, *now*.
2230          */
2231         shmem_truncate_range(file_inode(obj->base.filp), 0, (loff_t)-1);
2232         obj->mm.madv = __I915_MADV_PURGED;
2233         obj->mm.pages = ERR_PTR(-EFAULT);
2234 }
2235
2236 /* Try to discard unwanted pages */
2237 void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
2238 {
2239         struct address_space *mapping;
2240
2241         lockdep_assert_held(&obj->mm.lock);
2242         GEM_BUG_ON(i915_gem_object_has_pages(obj));
2243
2244         switch (obj->mm.madv) {
2245         case I915_MADV_DONTNEED:
2246                 i915_gem_object_truncate(obj);
2247         case __I915_MADV_PURGED:
2248                 return;
2249         }
2250
2251         if (obj->base.filp == NULL)
2252                 return;
2253
2254         mapping = obj->base.filp->f_mapping,
2255         invalidate_mapping_pages(mapping, 0, (loff_t)-1);
2256 }
2257
2258 static void
2259 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
2260                               struct sg_table *pages)
2261 {
2262         struct sgt_iter sgt_iter;
2263         struct page *page;
2264
2265         __i915_gem_object_release_shmem(obj, pages, true);
2266
2267         i915_gem_gtt_finish_pages(obj, pages);
2268
2269         if (i915_gem_object_needs_bit17_swizzle(obj))
2270                 i915_gem_object_save_bit_17_swizzle(obj, pages);
2271
2272         for_each_sgt_page(page, sgt_iter, pages) {
2273                 if (obj->mm.dirty)
2274                         set_page_dirty(page);
2275
2276                 if (obj->mm.madv == I915_MADV_WILLNEED)
2277                         mark_page_accessed(page);
2278
2279                 put_page(page);
2280         }
2281         obj->mm.dirty = false;
2282
2283         sg_free_table(pages);
2284         kfree(pages);
2285 }
2286
2287 static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
2288 {
2289         struct radix_tree_iter iter;
2290         void __rcu **slot;
2291
2292         rcu_read_lock();
2293         radix_tree_for_each_slot(slot, &obj->mm.get_page.radix, &iter, 0)
2294                 radix_tree_delete(&obj->mm.get_page.radix, iter.index);
2295         rcu_read_unlock();
2296 }
2297
2298 void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
2299                                  enum i915_mm_subclass subclass)
2300 {
2301         struct drm_i915_private *i915 = to_i915(obj->base.dev);
2302         struct sg_table *pages;
2303
2304         if (i915_gem_object_has_pinned_pages(obj))
2305                 return;
2306
2307         GEM_BUG_ON(obj->bind_count);
2308         if (!i915_gem_object_has_pages(obj))
2309                 return;
2310
2311         /* May be called by shrinker from within get_pages() (on another bo) */
2312         mutex_lock_nested(&obj->mm.lock, subclass);
2313         if (unlikely(atomic_read(&obj->mm.pages_pin_count)))
2314                 goto unlock;
2315
2316         /* ->put_pages might need to allocate memory for the bit17 swizzle
2317          * array, hence protect them from being reaped by removing them from gtt
2318          * lists early. */
2319         pages = fetch_and_zero(&obj->mm.pages);
2320         GEM_BUG_ON(!pages);
2321
2322         spin_lock(&i915->mm.obj_lock);
2323         list_del(&obj->mm.link);
2324         spin_unlock(&i915->mm.obj_lock);
2325
2326         if (obj->mm.mapping) {
2327                 void *ptr;
2328
2329                 ptr = page_mask_bits(obj->mm.mapping);
2330                 if (is_vmalloc_addr(ptr))
2331                         vunmap(ptr);
2332                 else
2333                         kunmap(kmap_to_page(ptr));
2334
2335                 obj->mm.mapping = NULL;
2336         }
2337
2338         __i915_gem_object_reset_page_iter(obj);
2339
2340         if (!IS_ERR(pages))
2341                 obj->ops->put_pages(obj, pages);
2342
2343         obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
2344
2345 unlock:
2346         mutex_unlock(&obj->mm.lock);
2347 }
2348
2349 static bool i915_sg_trim(struct sg_table *orig_st)
2350 {
2351         struct sg_table new_st;
2352         struct scatterlist *sg, *new_sg;
2353         unsigned int i;
2354
2355         if (orig_st->nents == orig_st->orig_nents)
2356                 return false;
2357
2358         if (sg_alloc_table(&new_st, orig_st->nents, GFP_KERNEL | __GFP_NOWARN))
2359                 return false;
2360
2361         new_sg = new_st.sgl;
2362         for_each_sg(orig_st->sgl, sg, orig_st->nents, i) {
2363                 sg_set_page(new_sg, sg_page(sg), sg->length, 0);
2364                 /* called before being DMA mapped, no need to copy sg->dma_* */
2365                 new_sg = sg_next(new_sg);
2366         }
2367         GEM_BUG_ON(new_sg); /* Should walk exactly nents and hit the end */
2368
2369         sg_free_table(orig_st);
2370
2371         *orig_st = new_st;
2372         return true;
2373 }
2374
2375 static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2376 {
2377         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2378         const unsigned long page_count = obj->base.size / PAGE_SIZE;
2379         unsigned long i;
2380         struct address_space *mapping;
2381         struct sg_table *st;
2382         struct scatterlist *sg;
2383         struct sgt_iter sgt_iter;
2384         struct page *page;
2385         unsigned long last_pfn = 0;     /* suppress gcc warning */
2386         unsigned int max_segment = i915_sg_segment_size();
2387         unsigned int sg_page_sizes;
2388         gfp_t noreclaim;
2389         int ret;
2390
2391         /* Assert that the object is not currently in any GPU domain. As it
2392          * wasn't in the GTT, there shouldn't be any way it could have been in
2393          * a GPU cache
2394          */
2395         GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
2396         GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
2397
2398         st = kmalloc(sizeof(*st), GFP_KERNEL);
2399         if (st == NULL)
2400                 return -ENOMEM;
2401
2402 rebuild_st:
2403         if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
2404                 kfree(st);
2405                 return -ENOMEM;
2406         }
2407
2408         /* Get the list of pages out of our struct file.  They'll be pinned
2409          * at this point until we release them.
2410          *
2411          * Fail silently without starting the shrinker
2412          */
2413         mapping = obj->base.filp->f_mapping;
2414         noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
2415         noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
2416
2417         sg = st->sgl;
2418         st->nents = 0;
2419         sg_page_sizes = 0;
2420         for (i = 0; i < page_count; i++) {
2421                 const unsigned int shrink[] = {
2422                         I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
2423                         0,
2424                 }, *s = shrink;
2425                 gfp_t gfp = noreclaim;
2426
2427                 do {
2428                         page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2429                         if (likely(!IS_ERR(page)))
2430                                 break;
2431
2432                         if (!*s) {
2433                                 ret = PTR_ERR(page);
2434                                 goto err_sg;
2435                         }
2436
2437                         i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++);
2438                         cond_resched();
2439
2440                         /* We've tried hard to allocate the memory by reaping
2441                          * our own buffer, now let the real VM do its job and
2442                          * go down in flames if truly OOM.
2443                          *
2444                          * However, since graphics tend to be disposable,
2445                          * defer the oom here by reporting the ENOMEM back
2446                          * to userspace.
2447                          */
2448                         if (!*s) {
2449                                 /* reclaim and warn, but no oom */
2450                                 gfp = mapping_gfp_mask(mapping);
2451
2452                                 /* Our bo are always dirty and so we require
2453                                  * kswapd to reclaim our pages (direct reclaim
2454                                  * does not effectively begin pageout of our
2455                                  * buffers on its own). However, direct reclaim
2456                                  * only waits for kswapd when under allocation
2457                                  * congestion. So as a result __GFP_RECLAIM is
2458                                  * unreliable and fails to actually reclaim our
2459                                  * dirty pages -- unless you try over and over
2460                                  * again with !__GFP_NORETRY. However, we still
2461                                  * want to fail this allocation rather than
2462                                  * trigger the out-of-memory killer and for
2463                                  * this we want __GFP_RETRY_MAYFAIL.
2464                                  */
2465                                 gfp |= __GFP_RETRY_MAYFAIL;
2466                         }
2467                 } while (1);
2468
2469                 if (!i ||
2470                     sg->length >= max_segment ||
2471                     page_to_pfn(page) != last_pfn + 1) {
2472                         if (i) {
2473                                 sg_page_sizes |= sg->length;
2474                                 sg = sg_next(sg);
2475                         }
2476                         st->nents++;
2477                         sg_set_page(sg, page, PAGE_SIZE, 0);
2478                 } else {
2479                         sg->length += PAGE_SIZE;
2480                 }
2481                 last_pfn = page_to_pfn(page);
2482
2483                 /* Check that the i965g/gm workaround works. */
2484                 WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
2485         }
2486         if (sg) { /* loop terminated early; short sg table */
2487                 sg_page_sizes |= sg->length;
2488                 sg_mark_end(sg);
2489         }
2490
2491         /* Trim unused sg entries to avoid wasting memory. */
2492         i915_sg_trim(st);
2493
2494         ret = i915_gem_gtt_prepare_pages(obj, st);
2495         if (ret) {
2496                 /* DMA remapping failed? One possible cause is that
2497                  * it could not reserve enough large entries, asking
2498                  * for PAGE_SIZE chunks instead may be helpful.
2499                  */
2500                 if (max_segment > PAGE_SIZE) {
2501                         for_each_sgt_page(page, sgt_iter, st)
2502                                 put_page(page);
2503                         sg_free_table(st);
2504
2505                         max_segment = PAGE_SIZE;
2506                         goto rebuild_st;
2507                 } else {
2508                         dev_warn(&dev_priv->drm.pdev->dev,
2509                                  "Failed to DMA remap %lu pages\n",
2510                                  page_count);
2511                         goto err_pages;
2512                 }
2513         }
2514
2515         if (i915_gem_object_needs_bit17_swizzle(obj))
2516                 i915_gem_object_do_bit_17_swizzle(obj, st);
2517
2518         __i915_gem_object_set_pages(obj, st, sg_page_sizes);
2519
2520         return 0;
2521
2522 err_sg:
2523         sg_mark_end(sg);
2524 err_pages:
2525         for_each_sgt_page(page, sgt_iter, st)
2526                 put_page(page);
2527         sg_free_table(st);
2528         kfree(st);
2529
2530         /* shmemfs first checks if there is enough memory to allocate the page
2531          * and reports ENOSPC should there be insufficient, along with the usual
2532          * ENOMEM for a genuine allocation failure.
2533          *
2534          * We use ENOSPC in our driver to mean that we have run out of aperture
2535          * space and so want to translate the error from shmemfs back to our
2536          * usual understanding of ENOMEM.
2537          */
2538         if (ret == -ENOSPC)
2539                 ret = -ENOMEM;
2540
2541         return ret;
2542 }
2543
2544 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
2545                                  struct sg_table *pages,
2546                                  unsigned int sg_page_sizes)
2547 {
2548         struct drm_i915_private *i915 = to_i915(obj->base.dev);
2549         unsigned long supported = INTEL_INFO(i915)->page_sizes;
2550         int i;
2551
2552         lockdep_assert_held(&obj->mm.lock);
2553
2554         obj->mm.get_page.sg_pos = pages->sgl;
2555         obj->mm.get_page.sg_idx = 0;
2556
2557         obj->mm.pages = pages;
2558
2559         if (i915_gem_object_is_tiled(obj) &&
2560             i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
2561                 GEM_BUG_ON(obj->mm.quirked);
2562                 __i915_gem_object_pin_pages(obj);
2563                 obj->mm.quirked = true;
2564         }
2565
2566         GEM_BUG_ON(!sg_page_sizes);
2567         obj->mm.page_sizes.phys = sg_page_sizes;
2568
2569         /*
2570          * Calculate the supported page-sizes which fit into the given
2571          * sg_page_sizes. This will give us the page-sizes which we may be able
2572          * to use opportunistically when later inserting into the GTT. For
2573          * example if phys=2G, then in theory we should be able to use 1G, 2M,
2574          * 64K or 4K pages, although in practice this will depend on a number of
2575          * other factors.
2576          */
2577         obj->mm.page_sizes.sg = 0;
2578         for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
2579                 if (obj->mm.page_sizes.phys & ~0u << i)
2580                         obj->mm.page_sizes.sg |= BIT(i);
2581         }
2582         GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
2583
2584         spin_lock(&i915->mm.obj_lock);
2585         list_add(&obj->mm.link, &i915->mm.unbound_list);
2586         spin_unlock(&i915->mm.obj_lock);
2587 }
2588
2589 static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2590 {
2591         int err;
2592
2593         if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
2594                 DRM_DEBUG("Attempting to obtain a purgeable object\n");
2595                 return -EFAULT;
2596         }
2597
2598         err = obj->ops->get_pages(obj);
2599         GEM_BUG_ON(!err && !i915_gem_object_has_pages(obj));
2600
2601         return err;
2602 }
2603
2604 /* Ensure that the associated pages are gathered from the backing storage
2605  * and pinned into our object. i915_gem_object_pin_pages() may be called
2606  * multiple times before they are released by a single call to
2607  * i915_gem_object_unpin_pages() - once the pages are no longer referenced
2608  * either as a result of memory pressure (reaping pages under the shrinker)
2609  * or as the object is itself released.
2610  */
2611 int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
2612 {
2613         int err;
2614
2615         err = mutex_lock_interruptible(&obj->mm.lock);
2616         if (err)
2617                 return err;
2618
2619         if (unlikely(!i915_gem_object_has_pages(obj))) {
2620                 GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
2621
2622                 err = ____i915_gem_object_get_pages(obj);
2623                 if (err)
2624                         goto unlock;
2625
2626                 smp_mb__before_atomic();
2627         }
2628         atomic_inc(&obj->mm.pages_pin_count);
2629
2630 unlock:
2631         mutex_unlock(&obj->mm.lock);
2632         return err;
2633 }
2634
2635 /* The 'mapping' part of i915_gem_object_pin_map() below */
2636 static void *i915_gem_object_map(const struct drm_i915_gem_object *obj,
2637                                  enum i915_map_type type)
2638 {
2639         unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
2640         struct sg_table *sgt = obj->mm.pages;
2641         struct sgt_iter sgt_iter;
2642         struct page *page;
2643         struct page *stack_pages[32];
2644         struct page **pages = stack_pages;
2645         unsigned long i = 0;
2646         pgprot_t pgprot;
2647         void *addr;
2648
2649         /* A single page can always be kmapped */
2650         if (n_pages == 1 && type == I915_MAP_WB)
2651                 return kmap(sg_page(sgt->sgl));
2652
2653         if (n_pages > ARRAY_SIZE(stack_pages)) {
2654                 /* Too big for stack -- allocate temporary array instead */
2655                 pages = kvmalloc_array(n_pages, sizeof(*pages), GFP_KERNEL);
2656                 if (!pages)
2657                         return NULL;
2658         }
2659
2660         for_each_sgt_page(page, sgt_iter, sgt)
2661                 pages[i++] = page;
2662
2663         /* Check that we have the expected number of pages */
2664         GEM_BUG_ON(i != n_pages);
2665
2666         switch (type) {
2667         default:
2668                 MISSING_CASE(type);
2669                 /* fallthrough to use PAGE_KERNEL anyway */
2670         case I915_MAP_WB:
2671                 pgprot = PAGE_KERNEL;
2672                 break;
2673         case I915_MAP_WC:
2674                 pgprot = pgprot_writecombine(PAGE_KERNEL_IO);
2675                 break;
2676         }
2677         addr = vmap(pages, n_pages, 0, pgprot);
2678
2679         if (pages != stack_pages)
2680                 kvfree(pages);
2681
2682         return addr;
2683 }
2684
2685 /* get, pin, and map the pages of the object into kernel space */
2686 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
2687                               enum i915_map_type type)
2688 {
2689         enum i915_map_type has_type;
2690         bool pinned;
2691         void *ptr;
2692         int ret;
2693
2694         if (unlikely(!i915_gem_object_has_struct_page(obj)))
2695                 return ERR_PTR(-ENXIO);
2696
2697         ret = mutex_lock_interruptible(&obj->mm.lock);
2698         if (ret)
2699                 return ERR_PTR(ret);
2700
2701         pinned = !(type & I915_MAP_OVERRIDE);
2702         type &= ~I915_MAP_OVERRIDE;
2703
2704         if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
2705                 if (unlikely(!i915_gem_object_has_pages(obj))) {
2706                         GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
2707
2708                         ret = ____i915_gem_object_get_pages(obj);
2709                         if (ret)
2710                                 goto err_unlock;
2711
2712                         smp_mb__before_atomic();
2713                 }
2714                 atomic_inc(&obj->mm.pages_pin_count);
2715                 pinned = false;
2716         }
2717         GEM_BUG_ON(!i915_gem_object_has_pages(obj));
2718
2719         ptr = page_unpack_bits(obj->mm.mapping, &has_type);
2720         if (ptr && has_type != type) {
2721                 if (pinned) {
2722                         ret = -EBUSY;
2723                         goto err_unpin;
2724                 }
2725
2726                 if (is_vmalloc_addr(ptr))
2727                         vunmap(ptr);
2728                 else
2729                         kunmap(kmap_to_page(ptr));
2730
2731                 ptr = obj->mm.mapping = NULL;
2732         }
2733
2734         if (!ptr) {
2735                 ptr = i915_gem_object_map(obj, type);
2736                 if (!ptr) {
2737                         ret = -ENOMEM;
2738                         goto err_unpin;
2739                 }
2740
2741                 obj->mm.mapping = page_pack_bits(ptr, type);
2742         }
2743
2744 out_unlock:
2745         mutex_unlock(&obj->mm.lock);
2746         return ptr;
2747
2748 err_unpin:
2749         atomic_dec(&obj->mm.pages_pin_count);
2750 err_unlock:
2751         ptr = ERR_PTR(ret);
2752         goto out_unlock;
2753 }
2754
2755 static int
2756 i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
2757                            const struct drm_i915_gem_pwrite *arg)
2758 {
2759         struct address_space *mapping = obj->base.filp->f_mapping;
2760         char __user *user_data = u64_to_user_ptr(arg->data_ptr);
2761         u64 remain, offset;
2762         unsigned int pg;
2763
2764         /* Before we instantiate/pin the backing store for our use, we
2765          * can prepopulate the shmemfs filp efficiently using a write into
2766          * the pagecache. We avoid the penalty of instantiating all the
2767          * pages, important if the user is just writing to a few and never
2768          * uses the object on the GPU, and using a direct write into shmemfs
2769          * allows it to avoid the cost of retrieving a page (either swapin
2770          * or clearing-before-use) before it is overwritten.
2771          */
2772         if (i915_gem_object_has_pages(obj))
2773                 return -ENODEV;
2774
2775         if (obj->mm.madv != I915_MADV_WILLNEED)
2776                 return -EFAULT;
2777
2778         /* Before the pages are instantiated the object is treated as being
2779          * in the CPU domain. The pages will be clflushed as required before
2780          * use, and we can freely write into the pages directly. If userspace
2781          * races pwrite with any other operation; corruption will ensue -
2782          * that is userspace's prerogative!
2783          */
2784
2785         remain = arg->size;
2786         offset = arg->offset;
2787         pg = offset_in_page(offset);
2788
2789         do {
2790                 unsigned int len, unwritten;
2791                 struct page *page;
2792                 void *data, *vaddr;
2793                 int err;
2794
2795                 len = PAGE_SIZE - pg;
2796                 if (len > remain)
2797                         len = remain;
2798
2799                 err = pagecache_write_begin(obj->base.filp, mapping,
2800                                             offset, len, 0,
2801                                             &page, &data);
2802                 if (err < 0)
2803                         return err;
2804
2805                 vaddr = kmap(page);
2806                 unwritten = copy_from_user(vaddr + pg, user_data, len);
2807                 kunmap(page);
2808
2809                 err = pagecache_write_end(obj->base.filp, mapping,
2810                                           offset, len, len - unwritten,
2811                                           page, data);
2812                 if (err < 0)
2813                         return err;
2814
2815                 if (unwritten)
2816                         return -EFAULT;
2817
2818                 remain -= len;
2819                 user_data += len;
2820                 offset += len;
2821                 pg = 0;
2822         } while (remain);
2823
2824         return 0;
2825 }
2826
2827 static bool ban_context(const struct i915_gem_context *ctx,
2828                         unsigned int score)
2829 {
2830         return (i915_gem_context_is_bannable(ctx) &&
2831                 score >= CONTEXT_SCORE_BAN_THRESHOLD);
2832 }
2833
2834 static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
2835 {
2836         unsigned int score;
2837         bool banned;
2838
2839         atomic_inc(&ctx->guilty_count);
2840
2841         score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
2842         banned = ban_context(ctx, score);
2843         DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n",
2844                          ctx->name, score, yesno(banned));
2845         if (!banned)
2846                 return;
2847
2848         i915_gem_context_set_banned(ctx);
2849         if (!IS_ERR_OR_NULL(ctx->file_priv)) {
2850                 atomic_inc(&ctx->file_priv->context_bans);
2851                 DRM_DEBUG_DRIVER("client %s has had %d context banned\n",
2852                                  ctx->name, atomic_read(&ctx->file_priv->context_bans));
2853         }
2854 }
2855
2856 static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
2857 {
2858         atomic_inc(&ctx->active_count);
2859 }
2860
2861 struct drm_i915_gem_request *
2862 i915_gem_find_active_request(struct intel_engine_cs *engine)
2863 {
2864         struct drm_i915_gem_request *request, *active = NULL;
2865         unsigned long flags;
2866
2867         /* We are called by the error capture and reset at a random
2868          * point in time. In particular, note that neither is crucially
2869          * ordered with an interrupt. After a hang, the GPU is dead and we
2870          * assume that no more writes can happen (we waited long enough for
2871          * all writes that were in transaction to be flushed) - adding an
2872          * extra delay for a recent interrupt is pointless. Hence, we do
2873          * not need an engine->irq_seqno_barrier() before the seqno reads.
2874          */
2875         spin_lock_irqsave(&engine->timeline->lock, flags);
2876         list_for_each_entry(request, &engine->timeline->requests, link) {
2877                 if (__i915_gem_request_completed(request,
2878                                                  request->global_seqno))
2879                         continue;
2880
2881                 GEM_BUG_ON(request->engine != engine);
2882                 GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
2883                                     &request->fence.flags));
2884
2885                 active = request;
2886                 break;
2887         }
2888         spin_unlock_irqrestore(&engine->timeline->lock, flags);
2889
2890         return active;
2891 }
2892
2893 static bool engine_stalled(struct intel_engine_cs *engine)
2894 {
2895         if (!engine->hangcheck.stalled)
2896                 return false;
2897
2898         /* Check for possible seqno movement after hang declaration */
2899         if (engine->hangcheck.seqno != intel_engine_get_seqno(engine)) {
2900                 DRM_DEBUG_DRIVER("%s pardoned\n", engine->name);
2901                 return false;
2902         }
2903
2904         return true;
2905 }
2906
2907 /*
2908  * Ensure irq handler finishes, and not run again.
2909  * Also return the active request so that we only search for it once.
2910  */
2911 struct drm_i915_gem_request *
2912 i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
2913 {
2914         struct drm_i915_gem_request *request = NULL;
2915
2916         /*
2917          * During the reset sequence, we must prevent the engine from
2918          * entering RC6. As the context state is undefined until we restart
2919          * the engine, if it does enter RC6 during the reset, the state
2920          * written to the powercontext is undefined and so we may lose
2921          * GPU state upon resume, i.e. fail to restart after a reset.
2922          */
2923         intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
2924
2925         /*
2926          * Prevent the signaler thread from updating the request
2927          * state (by calling dma_fence_signal) as we are processing
2928          * the reset. The write from the GPU of the seqno is
2929          * asynchronous and the signaler thread may see a different
2930          * value to us and declare the request complete, even though
2931          * the reset routine have picked that request as the active
2932          * (incomplete) request. This conflict is not handled
2933          * gracefully!
2934          */
2935         kthread_park(engine->breadcrumbs.signaler);
2936
2937         /*
2938          * Prevent request submission to the hardware until we have
2939          * completed the reset in i915_gem_reset_finish(). If a request
2940          * is completed by one engine, it may then queue a request
2941          * to a second via its execlists->tasklet *just* as we are
2942          * calling engine->init_hw() and also writing the ELSP.
2943          * Turning off the execlists->tasklet until the reset is over
2944          * prevents the race.
2945          */
2946         tasklet_kill(&engine->execlists.tasklet);
2947         tasklet_disable(&engine->execlists.tasklet);
2948
2949         /*
2950          * We're using worker to queue preemption requests from the tasklet in
2951          * GuC submission mode.
2952          * Even though tasklet was disabled, we may still have a worker queued.
2953          * Let's make sure that all workers scheduled before disabling the
2954          * tasklet are completed before continuing with the reset.
2955          */
2956         if (engine->i915->guc.preempt_wq)
2957                 flush_workqueue(engine->i915->guc.preempt_wq);
2958
2959         if (engine->irq_seqno_barrier)
2960                 engine->irq_seqno_barrier(engine);
2961
2962         request = i915_gem_find_active_request(engine);
2963         if (request && request->fence.error == -EIO)
2964                 request = ERR_PTR(-EIO); /* Previous reset failed! */
2965
2966         return request;
2967 }
2968
2969 int i915_gem_reset_prepare(struct drm_i915_private *dev_priv)
2970 {
2971         struct intel_engine_cs *engine;
2972         struct drm_i915_gem_request *request;
2973         enum intel_engine_id id;
2974         int err = 0;
2975
2976         for_each_engine(engine, dev_priv, id) {
2977                 request = i915_gem_reset_prepare_engine(engine);
2978                 if (IS_ERR(request)) {
2979                         err = PTR_ERR(request);
2980                         continue;
2981                 }
2982
2983                 engine->hangcheck.active_request = request;
2984         }
2985
2986         i915_gem_revoke_fences(dev_priv);
2987
2988         return err;
2989 }
2990
2991 static void skip_request(struct drm_i915_gem_request *request)
2992 {
2993         void *vaddr = request->ring->vaddr;
2994         u32 head;
2995
2996         /* As this request likely depends on state from the lost
2997          * context, clear out all the user operations leaving the
2998          * breadcrumb at the end (so we get the fence notifications).
2999          */
3000         head = request->head;
3001         if (request->postfix < head) {
3002                 memset(vaddr + head, 0, request->ring->size - head);
3003                 head = 0;
3004         }
3005         memset(vaddr + head, 0, request->postfix - head);
3006
3007         dma_fence_set_error(&request->fence, -EIO);
3008 }
3009
3010 static void engine_skip_context(struct drm_i915_gem_request *request)
3011 {
3012         struct intel_engine_cs *engine = request->engine;
3013         struct i915_gem_context *hung_ctx = request->ctx;
3014         struct intel_timeline *timeline;
3015         unsigned long flags;
3016
3017         timeline = i915_gem_context_lookup_timeline(hung_ctx, engine);
3018
3019         spin_lock_irqsave(&engine->timeline->lock, flags);
3020         spin_lock(&timeline->lock);
3021
3022         list_for_each_entry_continue(request, &engine->timeline->requests, link)
3023                 if (request->ctx == hung_ctx)
3024                         skip_request(request);
3025
3026         list_for_each_entry(request, &timeline->requests, link)
3027                 skip_request(request);
3028
3029         spin_unlock(&timeline->lock);
3030         spin_unlock_irqrestore(&engine->timeline->lock, flags);
3031 }
3032
3033 /* Returns the request if it was guilty of the hang */
3034 static struct drm_i915_gem_request *
3035 i915_gem_reset_request(struct intel_engine_cs *engine,
3036                        struct drm_i915_gem_request *request)
3037 {
3038         /* The guilty request will get skipped on a hung engine.
3039          *
3040          * Users of client default contexts do not rely on logical
3041          * state preserved between batches so it is safe to execute
3042          * queued requests following the hang. Non default contexts
3043          * rely on preserved state, so skipping a batch loses the
3044          * evolution of the state and it needs to be considered corrupted.
3045          * Executing more queued batches on top of corrupted state is
3046          * risky. But we take the risk by trying to advance through
3047          * the queued requests in order to make the client behaviour
3048          * more predictable around resets, by not throwing away random
3049          * amount of batches it has prepared for execution. Sophisticated
3050          * clients can use gem_reset_stats_ioctl and dma fence status
3051          * (exported via sync_file info ioctl on explicit fences) to observe
3052          * when it loses the context state and should rebuild accordingly.
3053          *
3054          * The context ban, and ultimately the client ban, mechanism are safety
3055          * valves if client submission ends up resulting in nothing more than
3056          * subsequent hangs.
3057          */
3058
3059         if (engine_stalled(engine)) {
3060                 i915_gem_context_mark_guilty(request->ctx);
3061                 skip_request(request);
3062
3063                 /* If this context is now banned, skip all pending requests. */
3064                 if (i915_gem_context_is_banned(request->ctx))
3065                         engine_skip_context(request);
3066         } else {
3067                 /*
3068                  * Since this is not the hung engine, it may have advanced
3069                  * since the hang declaration. Double check by refinding
3070                  * the active request at the time of the reset.
3071                  */
3072                 request = i915_gem_find_active_request(engine);
3073                 if (request) {
3074                         i915_gem_context_mark_innocent(request->ctx);
3075                         dma_fence_set_error(&request->fence, -EAGAIN);
3076
3077                         /* Rewind the engine to replay the incomplete rq */
3078                         spin_lock_irq(&engine->timeline->lock);
3079                         request = list_prev_entry(request, link);
3080                         if (&request->link == &engine->timeline->requests)
3081                                 request = NULL;
3082                         spin_unlock_irq(&engine->timeline->lock);
3083                 }
3084         }
3085
3086         return request;
3087 }
3088
3089 void i915_gem_reset_engine(struct intel_engine_cs *engine,
3090                            struct drm_i915_gem_request *request)
3091 {
3092         /*
3093          * Make sure this write is visible before we re-enable the interrupt
3094          * handlers on another CPU, as tasklet_enable() resolves to just
3095          * a compiler barrier which is insufficient for our purpose here.
3096          */
3097         smp_store_mb(engine->irq_posted, 0);
3098
3099         if (request)
3100                 request = i915_gem_reset_request(engine, request);
3101
3102         if (request) {
3103                 DRM_DEBUG_DRIVER("resetting %s to restart from tail of request 0x%x\n",
3104                                  engine->name, request->global_seqno);
3105         }
3106
3107         /* Setup the CS to resume from the breadcrumb of the hung request */
3108         engine->reset_hw(engine, request);
3109 }
3110
3111 void i915_gem_reset(struct drm_i915_private *dev_priv)
3112 {
3113         struct intel_engine_cs *engine;
3114         enum intel_engine_id id;
3115
3116         lockdep_assert_held(&dev_priv->drm.struct_mutex);
3117
3118         i915_gem_retire_requests(dev_priv);
3119
3120         for_each_engine(engine, dev_priv, id) {
3121                 struct i915_gem_context *ctx;
3122
3123                 i915_gem_reset_engine(engine, engine->hangcheck.active_request);
3124                 ctx = fetch_and_zero(&engine->last_retired_context);
3125                 if (ctx)
3126                         engine->context_unpin(engine, ctx);
3127
3128                 /*
3129                  * Ostensibily, we always want a context loaded for powersaving,
3130                  * so if the engine is idle after the reset, send a request
3131                  * to load our scratch kernel_context.
3132                  *
3133                  * More mysteriously, if we leave the engine idle after a reset,
3134                  * the next userspace batch may hang, with what appears to be
3135                  * an incoherent read by the CS (presumably stale TLB). An
3136                  * empty request appears sufficient to paper over the glitch.
3137                  */
3138                 if (list_empty(&engine->timeline->requests)) {
3139                         struct drm_i915_gem_request *rq;
3140
3141                         rq = i915_gem_request_alloc(engine,
3142                                                     dev_priv->kernel_context);
3143                         if (!IS_ERR(rq))
3144                                 __i915_add_request(rq, false);
3145                 }
3146         }
3147
3148         i915_gem_restore_fences(dev_priv);
3149
3150         if (dev_priv->gt.awake) {
3151                 intel_sanitize_gt_powersave(dev_priv);
3152                 intel_enable_gt_powersave(dev_priv);
3153                 if (INTEL_GEN(dev_priv) >= 6)
3154                         gen6_rps_busy(dev_priv);
3155         }
3156 }
3157
3158 void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
3159 {
3160         tasklet_enable(&engine->execlists.tasklet);
3161         kthread_unpark(engine->breadcrumbs.signaler);
3162
3163         intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
3164 }
3165
3166 void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
3167 {
3168         struct intel_engine_cs *engine;
3169         enum intel_engine_id id;
3170
3171         lockdep_assert_held(&dev_priv->drm.struct_mutex);
3172
3173         for_each_engine(engine, dev_priv, id) {
3174                 engine->hangcheck.active_request = NULL;
3175                 i915_gem_reset_finish_engine(engine);
3176         }
3177 }
3178
3179 static void nop_submit_request(struct drm_i915_gem_request *request)
3180 {
3181         dma_fence_set_error(&request->fence, -EIO);
3182
3183         i915_gem_request_submit(request);
3184 }
3185
3186 static void nop_complete_submit_request(struct drm_i915_gem_request *request)
3187 {
3188         unsigned long flags;
3189
3190         dma_fence_set_error(&request->fence, -EIO);
3191
3192         spin_lock_irqsave(&request->engine->timeline->lock, flags);
3193         __i915_gem_request_submit(request);
3194         intel_engine_init_global_seqno(request->engine, request->global_seqno);
3195         spin_unlock_irqrestore(&request->engine->timeline->lock, flags);
3196 }
3197
3198 void i915_gem_set_wedged(struct drm_i915_private *i915)
3199 {
3200         struct intel_engine_cs *engine;
3201         enum intel_engine_id id;
3202
3203         /*
3204          * First, stop submission to hw, but do not yet complete requests by
3205          * rolling the global seqno forward (since this would complete requests
3206          * for which we haven't set the fence error to EIO yet).
3207          */
3208         for_each_engine(engine, i915, id) {
3209                 i915_gem_reset_prepare_engine(engine);
3210                 engine->submit_request = nop_submit_request;
3211         }
3212
3213         /*
3214          * Make sure no one is running the old callback before we proceed with
3215          * cancelling requests and resetting the completion tracking. Otherwise
3216          * we might submit a request to the hardware which never completes.
3217          */
3218         synchronize_rcu();
3219
3220         for_each_engine(engine, i915, id) {
3221                 /* Mark all executing requests as skipped */
3222                 engine->cancel_requests(engine);
3223
3224                 /*
3225                  * Only once we've force-cancelled all in-flight requests can we
3226                  * start to complete all requests.
3227                  */
3228                 engine->submit_request = nop_complete_submit_request;
3229         }
3230
3231         /*
3232          * Make sure no request can slip through without getting completed by
3233          * either this call here to intel_engine_init_global_seqno, or the one
3234          * in nop_complete_submit_request.
3235          */
3236         synchronize_rcu();
3237
3238         for_each_engine(engine, i915, id) {
3239                 unsigned long flags;
3240
3241                 /* Mark all pending requests as complete so that any concurrent
3242                  * (lockless) lookup doesn't try and wait upon the request as we
3243                  * reset it.
3244                  */
3245                 spin_lock_irqsave(&engine->timeline->lock, flags);
3246                 intel_engine_init_global_seqno(engine,
3247                                                intel_engine_last_submit(engine));
3248                 spin_unlock_irqrestore(&engine->timeline->lock, flags);
3249
3250                 i915_gem_reset_finish_engine(engine);
3251         }
3252
3253         set_bit(I915_WEDGED, &i915->gpu_error.flags);
3254         wake_up_all(&i915->gpu_error.reset_queue);
3255 }
3256
3257 bool i915_gem_unset_wedged(struct drm_i915_private *i915)
3258 {
3259         struct i915_gem_timeline *tl;
3260         int i;
3261
3262         lockdep_assert_held(&i915->drm.struct_mutex);
3263         if (!test_bit(I915_WEDGED, &i915->gpu_error.flags))
3264                 return true;
3265
3266         /* Before unwedging, make sure that all pending operations
3267          * are flushed and errored out - we may have requests waiting upon
3268          * third party fences. We marked all inflight requests as EIO, and
3269          * every execbuf since returned EIO, for consistency we want all
3270          * the currently pending requests to also be marked as EIO, which
3271          * is done inside our nop_submit_request - and so we must wait.
3272          *
3273          * No more can be submitted until we reset the wedged bit.
3274          */
3275         list_for_each_entry(tl, &i915->gt.timelines, link) {
3276                 for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
3277                         struct drm_i915_gem_request *rq;
3278
3279                         rq = i915_gem_active_peek(&tl->engine[i].last_request,
3280                                                   &i915->drm.struct_mutex);
3281                         if (!rq)
3282                                 continue;
3283
3284                         /* We can't use our normal waiter as we want to
3285                          * avoid recursively trying to handle the current
3286                          * reset. The basic dma_fence_default_wait() installs
3287                          * a callback for dma_fence_signal(), which is
3288                          * triggered by our nop handler (indirectly, the
3289                          * callback enables the signaler thread which is
3290                          * woken by the nop_submit_request() advancing the seqno
3291                          * and when the seqno passes the fence, the signaler
3292                          * then signals the fence waking us up).
3293                          */
3294                         if (dma_fence_default_wait(&rq->fence, true,
3295                                                    MAX_SCHEDULE_TIMEOUT) < 0)
3296                                 return false;
3297                 }
3298         }
3299
3300         /* Undo nop_submit_request. We prevent all new i915 requests from
3301          * being queued (by disallowing execbuf whilst wedged) so having
3302          * waited for all active requests above, we know the system is idle
3303          * and do not have to worry about a thread being inside
3304          * engine->submit_request() as we swap over. So unlike installing
3305          * the nop_submit_request on reset, we can do this from normal
3306          * context and do not require stop_machine().
3307          */
3308         intel_engines_reset_default_submission(i915);
3309         i915_gem_contexts_lost(i915);
3310
3311         smp_mb__before_atomic(); /* complete takeover before enabling execbuf */
3312         clear_bit(I915_WEDGED, &i915->gpu_error.flags);
3313
3314         return true;
3315 }
3316
3317 static void
3318 i915_gem_retire_work_handler(struct work_struct *work)
3319 {
3320         struct drm_i915_private *dev_priv =
3321                 container_of(work, typeof(*dev_priv), gt.retire_work.work);
3322         struct drm_device *dev = &dev_priv->drm;
3323
3324         /* Come back later if the device is busy... */
3325         if (mutex_trylock(&dev->struct_mutex)) {
3326                 i915_gem_retire_requests(dev_priv);
3327                 mutex_unlock(&dev->struct_mutex);
3328         }
3329
3330         /*
3331          * Keep the retire handler running until we are finally idle.
3332          * We do not need to do this test under locking as in the worst-case
3333          * we queue the retire worker once too often.
3334          */
3335         if (READ_ONCE(dev_priv->gt.awake))
3336                 queue_delayed_work(dev_priv->wq,
3337                                    &dev_priv->gt.retire_work,
3338                                    round_jiffies_up_relative(HZ));
3339 }
3340
3341 static inline bool
3342 new_requests_since_last_retire(const struct drm_i915_private *i915)
3343 {
3344         return (READ_ONCE(i915->gt.active_requests) ||
3345                 work_pending(&i915->gt.idle_work.work));
3346 }
3347
3348 static void
3349 i915_gem_idle_work_handler(struct work_struct *work)
3350 {
3351         struct drm_i915_private *dev_priv =
3352                 container_of(work, typeof(*dev_priv), gt.idle_work.work);
3353         bool rearm_hangcheck;
3354         ktime_t end;
3355
3356         if (!READ_ONCE(dev_priv->gt.awake))
3357                 return;
3358
3359         /*
3360          * Wait for last execlists context complete, but bail out in case a
3361          * new request is submitted.
3362          */
3363         end = ktime_add_ms(ktime_get(), I915_IDLE_ENGINES_TIMEOUT);
3364         do {
3365                 if (new_requests_since_last_retire(dev_priv))
3366                         return;
3367
3368                 if (intel_engines_are_idle(dev_priv))
3369                         break;
3370
3371                 usleep_range(100, 500);
3372         } while (ktime_before(ktime_get(), end));
3373
3374         rearm_hangcheck =
3375                 cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
3376
3377         if (!mutex_trylock(&dev_priv->drm.struct_mutex)) {
3378                 /* Currently busy, come back later */
3379                 mod_delayed_work(dev_priv->wq,
3380                                  &dev_priv->gt.idle_work,
3381                                  msecs_to_jiffies(50));
3382                 goto out_rearm;
3383         }
3384
3385         /*
3386          * New request retired after this work handler started, extend active
3387          * period until next instance of the work.
3388          */
3389         if (new_requests_since_last_retire(dev_priv))
3390                 goto out_unlock;
3391
3392         /*
3393          * Be paranoid and flush a concurrent interrupt to make sure
3394          * we don't reactivate any irq tasklets after parking.
3395          *
3396          * FIXME: Note that even though we have waited for execlists to be idle,
3397          * there may still be an in-flight interrupt even though the CSB
3398          * is now empty. synchronize_irq() makes sure that a residual interrupt
3399          * is completed before we continue, but it doesn't prevent the HW from
3400          * raising a spurious interrupt later. To complete the shield we should
3401          * coordinate disabling the CS irq with flushing the interrupts.
3402          */
3403         synchronize_irq(dev_priv->drm.irq);
3404
3405         intel_engines_park(dev_priv);
3406         i915_gem_timelines_park(dev_priv);
3407
3408         i915_pmu_gt_parked(dev_priv);
3409
3410         GEM_BUG_ON(!dev_priv->gt.awake);
3411         dev_priv->gt.awake = false;
3412         rearm_hangcheck = false;
3413
3414         if (INTEL_GEN(dev_priv) >= 6)
3415                 gen6_rps_idle(dev_priv);
3416
3417         intel_display_power_put(dev_priv, POWER_DOMAIN_GT_IRQ);
3418
3419         intel_runtime_pm_put(dev_priv);
3420 out_unlock:
3421         mutex_unlock(&dev_priv->drm.struct_mutex);
3422
3423 out_rearm:
3424         if (rearm_hangcheck) {
3425                 GEM_BUG_ON(!dev_priv->gt.awake);
3426                 i915_queue_hangcheck(dev_priv);
3427         }
3428 }
3429
3430 void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
3431 {
3432         struct drm_i915_private *i915 = to_i915(gem->dev);
3433         struct drm_i915_gem_object *obj = to_intel_bo(gem);
3434         struct drm_i915_file_private *fpriv = file->driver_priv;
3435         struct i915_lut_handle *lut, *ln;
3436
3437         mutex_lock(&i915->drm.struct_mutex);
3438
3439         list_for_each_entry_safe(lut, ln, &obj->lut_list, obj_link) {
3440                 struct i915_gem_context *ctx = lut->ctx;
3441                 struct i915_vma *vma;
3442
3443                 GEM_BUG_ON(ctx->file_priv == ERR_PTR(-EBADF));
3444                 if (ctx->file_priv != fpriv)
3445                         continue;
3446
3447                 vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
3448                 GEM_BUG_ON(vma->obj != obj);
3449
3450                 /* We allow the process to have multiple handles to the same
3451                  * vma, in the same fd namespace, by virtue of flink/open.
3452                  */
3453                 GEM_BUG_ON(!vma->open_count);
3454                 if (!--vma->open_count && !i915_vma_is_ggtt(vma))
3455                         i915_vma_close(vma);
3456
3457                 list_del(&lut->obj_link);
3458                 list_del(&lut->ctx_link);
3459
3460                 kmem_cache_free(i915->luts, lut);
3461                 __i915_gem_object_release_unless_active(obj);
3462         }
3463
3464         mutex_unlock(&i915->drm.struct_mutex);
3465 }
3466
3467 static unsigned long to_wait_timeout(s64 timeout_ns)
3468 {
3469         if (timeout_ns < 0)
3470                 return MAX_SCHEDULE_TIMEOUT;
3471
3472         if (timeout_ns == 0)
3473                 return 0;
3474
3475         return nsecs_to_jiffies_timeout(timeout_ns);
3476 }
3477
3478 /**
3479  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
3480  * @dev: drm device pointer
3481  * @data: ioctl data blob
3482  * @file: drm file pointer
3483  *
3484  * Returns 0 if successful, else an error is returned with the remaining time in
3485  * the timeout parameter.
3486  *  -ETIME: object is still busy after timeout
3487  *  -ERESTARTSYS: signal interrupted the wait
3488  *  -ENONENT: object doesn't exist
3489  * Also possible, but rare:
3490  *  -EAGAIN: incomplete, restart syscall
3491  *  -ENOMEM: damn
3492  *  -ENODEV: Internal IRQ fail
3493  *  -E?: The add request failed
3494  *
3495  * The wait ioctl with a timeout of 0 reimplements the busy ioctl. With any
3496  * non-zero timeout parameter the wait ioctl will wait for the given number of
3497  * nanoseconds on an object becoming unbusy. Since the wait itself does so
3498  * without holding struct_mutex the object may become re-busied before this
3499  * function completes. A similar but shorter * race condition exists in the busy
3500  * ioctl
3501  */
3502 int
3503 i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
3504 {
3505         struct drm_i915_gem_wait *args = data;
3506         struct drm_i915_gem_object *obj;
3507         ktime_t start;
3508         long ret;
3509
3510         if (args->flags != 0)
3511                 return -EINVAL;
3512
3513         obj = i915_gem_object_lookup(file, args->bo_handle);
3514         if (!obj)
3515                 return -ENOENT;
3516
3517         start = ktime_get();
3518
3519         ret = i915_gem_object_wait(obj,
3520                                    I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL,
3521                                    to_wait_timeout(args->timeout_ns),
3522                                    to_rps_client(file));
3523
3524         if (args->timeout_ns > 0) {
3525                 args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
3526                 if (args->timeout_ns < 0)
3527                         args->timeout_ns = 0;
3528
3529                 /*
3530                  * Apparently ktime isn't accurate enough and occasionally has a
3531                  * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
3532                  * things up to make the test happy. We allow up to 1 jiffy.
3533                  *
3534                  * This is a regression from the timespec->ktime conversion.
3535                  */
3536                 if (ret == -ETIME && !nsecs_to_jiffies(args->timeout_ns))
3537                         args->timeout_ns = 0;
3538
3539                 /* Asked to wait beyond the jiffie/scheduler precision? */
3540                 if (ret == -ETIME && args->timeout_ns)
3541                         ret = -EAGAIN;
3542         }
3543
3544         i915_gem_object_put(obj);
3545         return ret;
3546 }
3547
3548 static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
3549 {
3550         int ret, i;
3551
3552         for (i = 0; i < ARRAY_SIZE(tl->engine); i++) {
3553                 ret = i915_gem_active_wait(&tl->engine[i].last_request, flags);
3554                 if (ret)
3555                         return ret;
3556         }
3557
3558         return 0;
3559 }
3560
3561 static int wait_for_engines(struct drm_i915_private *i915)
3562 {
3563         if (wait_for(intel_engines_are_idle(i915), I915_IDLE_ENGINES_TIMEOUT)) {
3564                 dev_err(i915->drm.dev,
3565                         "Failed to idle engines, declaring wedged!\n");
3566                 if (drm_debug & DRM_UT_DRIVER) {
3567                         struct drm_printer p = drm_debug_printer(__func__);
3568                         struct intel_engine_cs *engine;
3569                         enum intel_engine_id id;
3570
3571                         for_each_engine(engine, i915, id)
3572                                 intel_engine_dump(engine, &p,
3573                                                   "%s", engine->name);
3574                 }
3575
3576                 i915_gem_set_wedged(i915);
3577                 return -EIO;
3578         }
3579
3580         return 0;
3581 }
3582
3583 int i915_gem_wait_for_idle(struct drm_i915_private *i915, unsigned int flags)
3584 {
3585         int ret;
3586
3587         /* If the device is asleep, we have no requests outstanding */
3588         if (!READ_ONCE(i915->gt.awake))
3589                 return 0;
3590
3591         if (flags & I915_WAIT_LOCKED) {
3592                 struct i915_gem_timeline *tl;
3593
3594                 lockdep_assert_held(&i915->drm.struct_mutex);
3595
3596                 list_for_each_entry(tl, &i915->gt.timelines, link) {
3597                         ret = wait_for_timeline(tl, flags);
3598                         if (ret)
3599                                 return ret;
3600                 }
3601                 i915_gem_retire_requests(i915);
3602
3603                 ret = wait_for_engines(i915);
3604         } else {
3605                 ret = wait_for_timeline(&i915->gt.global_timeline, flags);
3606         }
3607
3608         return ret;
3609 }
3610
3611 static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
3612 {
3613         /*
3614          * We manually flush the CPU domain so that we can override and
3615          * force the flush for the display, and perform it asyncrhonously.
3616          */
3617         flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
3618         if (obj->cache_dirty)
3619                 i915_gem_clflush_object(obj, I915_CLFLUSH_FORCE);
3620         obj->base.write_domain = 0;
3621 }
3622
3623 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
3624 {
3625         if (!READ_ONCE(obj->pin_global))
3626                 return;
3627
3628         mutex_lock(&obj->base.dev->struct_mutex);
3629         __i915_gem_object_flush_for_display(obj);
3630         mutex_unlock(&obj->base.dev->struct_mutex);
3631 }
3632
3633 /**
3634  * Moves a single object to the WC read, and possibly write domain.
3635  * @obj: object to act on
3636  * @write: ask for write access or read only
3637  *
3638  * This function returns when the move is complete, including waiting on
3639  * flushes to occur.
3640  */
3641 int
3642 i915_gem_object_set_to_wc_domain(struct drm_i915_gem_object *obj, bool write)
3643 {
3644         int ret;
3645
3646         lockdep_assert_held(&obj->base.dev->struct_mutex);
3647
3648         ret = i915_gem_object_wait(obj,
3649                                    I915_WAIT_INTERRUPTIBLE |
3650                                    I915_WAIT_LOCKED |
3651                                    (write ? I915_WAIT_ALL : 0),
3652                                    MAX_SCHEDULE_TIMEOUT,
3653                                    NULL);
3654         if (ret)
3655                 return ret;
3656
3657         if (obj->base.write_domain == I915_GEM_DOMAIN_WC)
3658                 return 0;
3659
3660         /* Flush and acquire obj->pages so that we are coherent through
3661          * direct access in memory with previous cached writes through
3662          * shmemfs and that our cache domain tracking remains valid.
3663          * For example, if the obj->filp was moved to swap without us
3664          * being notified and releasing the pages, we would mistakenly
3665          * continue to assume that the obj remained out of the CPU cached
3666          * domain.
3667          */
3668         ret = i915_gem_object_pin_pages(obj);
3669         if (ret)
3670                 return ret;
3671
3672         flush_write_domain(obj, ~I915_GEM_DOMAIN_WC);
3673
3674         /* Serialise direct access to this object with the barriers for
3675          * coherent writes from the GPU, by effectively invalidating the
3676          * WC domain upon first access.
3677          */
3678         if ((obj->base.read_domains & I915_GEM_DOMAIN_WC) == 0)
3679                 mb();
3680
3681         /* It should now be out of any other write domains, and we can update
3682          * the domain values for our changes.
3683          */
3684         GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_WC) != 0);
3685         obj->base.read_domains |= I915_GEM_DOMAIN_WC;
3686         if (write) {
3687                 obj->base.read_domains = I915_GEM_DOMAIN_WC;
3688                 obj->base.write_domain = I915_GEM_DOMAIN_WC;
3689                 obj->mm.dirty = true;
3690         }
3691
3692         i915_gem_object_unpin_pages(obj);
3693         return 0;
3694 }
3695
3696 /**
3697  * Moves a single object to the GTT read, and possibly write domain.
3698  * @obj: object to act on
3699  * @write: ask for write access or read only
3700  *
3701  * This function returns when the move is complete, including waiting on
3702  * flushes to occur.
3703  */
3704 int
3705 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3706 {
3707         int ret;
3708
3709         lockdep_assert_held(&obj->base.dev->struct_mutex);
3710
3711         ret = i915_gem_object_wait(obj,
3712                                    I915_WAIT_INTERRUPTIBLE |
3713                                    I915_WAIT_LOCKED |
3714                                    (write ? I915_WAIT_ALL : 0),
3715                                    MAX_SCHEDULE_TIMEOUT,
3716                                    NULL);
3717         if (ret)
3718                 return ret;
3719
3720         if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
3721                 return 0;
3722
3723         /* Flush and acquire obj->pages so that we are coherent through
3724          * direct access in memory with previous cached writes through
3725          * shmemfs and that our cache domain tracking remains valid.
3726          * For example, if the obj->filp was moved to swap without us
3727          * being notified and releasing the pages, we would mistakenly
3728          * continue to assume that the obj remained out of the CPU cached
3729          * domain.
3730          */
3731         ret = i915_gem_object_pin_pages(obj);
3732         if (ret)
3733                 return ret;
3734
3735         flush_write_domain(obj, ~I915_GEM_DOMAIN_GTT);
3736
3737         /* Serialise direct access to this object with the barriers for
3738          * coherent writes from the GPU, by effectively invalidating the
3739          * GTT domain upon first access.
3740          */
3741         if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
3742                 mb();
3743
3744         /* It should now be out of any other write domains, and we can update
3745          * the domain values for our changes.
3746          */
3747         GEM_BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
3748         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
3749         if (write) {
3750                 obj->base.read_domains = I915_GEM_DOMAIN_GTT;
3751                 obj->base.write_domain = I915_GEM_DOMAIN_GTT;
3752                 obj->mm.dirty = true;
3753         }
3754
3755         i915_gem_object_unpin_pages(obj);
3756         return 0;
3757 }
3758
3759 /**
3760  * Changes the cache-level of an object across all VMA.
3761  * @obj: object to act on
3762  * @cache_level: new cache level to set for the object
3763  *
3764  * After this function returns, the object will be in the new cache-level
3765  * across all GTT and the contents of the backing storage will be coherent,
3766  * with respect to the new cache-level. In order to keep the backing storage
3767  * coherent for all users, we only allow a single cache level to be set
3768  * globally on the object and prevent it from being changed whilst the
3769  * hardware is reading from the object. That is if the object is currently
3770  * on the scanout it will be set to uncached (or equivalent display
3771  * cache coherency) and all non-MOCS GPU access will also be uncached so
3772  * that all direct access to the scanout remains coherent.
3773  */
3774 int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
3775                                     enum i915_cache_level cache_level)
3776 {
3777         struct i915_vma *vma;
3778         int ret;
3779
3780         lockdep_assert_held(&obj->base.dev->struct_mutex);
3781
3782         if (obj->cache_level == cache_level)
3783                 return 0;
3784
3785         /* Inspect the list of currently bound VMA and unbind any that would
3786          * be invalid given the new cache-level. This is principally to
3787          * catch the issue of the CS prefetch crossing page boundaries and
3788          * reading an invalid PTE on older architectures.
3789          */
3790 restart:
3791         list_for_each_entry(vma, &obj->vma_list, obj_link) {
3792                 if (!drm_mm_node_allocated(&vma->node))
3793                         continue;
3794
3795                 if (i915_vma_is_pinned(vma)) {
3796                         DRM_DEBUG("can not change the cache level of pinned objects\n");
3797                         return -EBUSY;
3798                 }
3799
3800                 if (!i915_vma_is_closed(vma) &&
3801                     i915_gem_valid_gtt_space(vma, cache_level))
3802                         continue;
3803
3804                 ret = i915_vma_unbind(vma);
3805                 if (ret)
3806                         return ret;
3807
3808                 /* As unbinding may affect other elements in the
3809                  * obj->vma_list (due to side-effects from retiring
3810                  * an active vma), play safe and restart the iterator.
3811                  */
3812                 goto restart;
3813         }
3814
3815         /* We can reuse the existing drm_mm nodes but need to change the
3816          * cache-level on the PTE. We could simply unbind them all and
3817          * rebind with the correct cache-level on next use. However since
3818          * we already have a valid slot, dma mapping, pages etc, we may as
3819          * rewrite the PTE in the belief that doing so tramples upon less
3820          * state and so involves less work.
3821          */
3822         if (obj->bind_count) {
3823                 /* Before we change the PTE, the GPU must not be accessing it.
3824                  * If we wait upon the object, we know that all the bound
3825                  * VMA are no longer active.
3826                  */
3827                 ret = i915_gem_object_wait(obj,
3828                                            I915_WAIT_INTERRUPTIBLE |
3829                                            I915_WAIT_LOCKED |
3830                                            I915_WAIT_ALL,
3831                                            MAX_SCHEDULE_TIMEOUT,
3832                                            NULL);
3833                 if (ret)
3834                         return ret;
3835
3836                 if (!HAS_LLC(to_i915(obj->base.dev)) &&
3837                     cache_level != I915_CACHE_NONE) {
3838                         /* Access to snoopable pages through the GTT is
3839                          * incoherent and on some machines causes a hard
3840                          * lockup. Relinquish the CPU mmaping to force
3841                          * userspace to refault in the pages and we can
3842                          * then double check if the GTT mapping is still
3843                          * valid for that pointer access.
3844                          */
3845                         i915_gem_release_mmap(obj);
3846
3847                         /* As we no longer need a fence for GTT access,
3848                          * we can relinquish it now (and so prevent having
3849                          * to steal a fence from someone else on the next
3850                          * fence request). Note GPU activity would have
3851                          * dropped the fence as all snoopable access is
3852                          * supposed to be linear.
3853                          */
3854                         for_each_ggtt_vma(vma, obj) {
3855                                 ret = i915_vma_put_fence(vma);
3856                                 if (ret)
3857                                         return ret;
3858                         }
3859                 } else {
3860                         /* We either have incoherent backing store and
3861                          * so no GTT access or the architecture is fully
3862                          * coherent. In such cases, existing GTT mmaps
3863                          * ignore the cache bit in the PTE and we can
3864                          * rewrite it without confusing the GPU or having
3865                          * to force userspace to fault back in its mmaps.
3866                          */
3867                 }
3868
3869                 list_for_each_entry(vma, &obj->vma_list, obj_link) {
3870                         if (!drm_mm_node_allocated(&vma->node))
3871                                 continue;
3872
3873                         ret = i915_vma_bind(vma, cache_level, PIN_UPDATE);
3874                         if (ret)
3875                                 return ret;
3876                 }
3877         }
3878
3879         list_for_each_entry(vma, &obj->vma_list, obj_link)
3880                 vma->node.color = cache_level;
3881         i915_gem_object_set_cache_coherency(obj, cache_level);
3882         obj->cache_dirty = true; /* Always invalidate stale cachelines */
3883
3884         return 0;
3885 }
3886
3887 int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
3888                                struct drm_file *file)
3889 {
3890         struct drm_i915_gem_caching *args = data;
3891         struct drm_i915_gem_object *obj;
3892         int err = 0;
3893
3894         rcu_read_lock();
3895         obj = i915_gem_object_lookup_rcu(file, args->handle);
3896         if (!obj) {
3897                 err = -ENOENT;
3898                 goto out;
3899         }
3900
3901         switch (obj->cache_level) {
3902         case I915_CACHE_LLC:
3903         case I915_CACHE_L3_LLC:
3904                 args->caching = I915_CACHING_CACHED;
3905                 break;
3906
3907         case I915_CACHE_WT:
3908                 args->caching = I915_CACHING_DISPLAY;
3909                 break;
3910
3911         default:
3912                 args->caching = I915_CACHING_NONE;
3913                 break;
3914         }
3915 out:
3916         rcu_read_unlock();
3917         return err;
3918 }
3919
3920 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
3921                                struct drm_file *file)
3922 {
3923         struct drm_i915_private *i915 = to_i915(dev);
3924         struct drm_i915_gem_caching *args = data;
3925         struct drm_i915_gem_object *obj;
3926         enum i915_cache_level level;
3927         int ret = 0;
3928
3929         switch (args->caching) {
3930         case I915_CACHING_NONE:
3931                 level = I915_CACHE_NONE;
3932                 break;
3933         case I915_CACHING_CACHED:
3934                 /*
3935                  * Due to a HW issue on BXT A stepping, GPU stores via a
3936                  * snooped mapping may leave stale data in a corresponding CPU
3937                  * cacheline, whereas normally such cachelines would get
3938                  * invalidated.
3939                  */
3940                 if (!HAS_LLC(i915) && !HAS_SNOOP(i915))
3941                         return -ENODEV;
3942
3943                 level = I915_CACHE_LLC;
3944                 break;
3945         case I915_CACHING_DISPLAY:
3946                 level = HAS_WT(i915) ? I915_CACHE_WT : I915_CACHE_NONE;
3947                 break;
3948         default:
3949                 return -EINVAL;
3950         }
3951
3952         obj = i915_gem_object_lookup(file, args->handle);
3953         if (!obj)
3954                 return -ENOENT;
3955
3956         /*
3957          * The caching mode of proxy object is handled by its generator, and
3958          * not allowed to be changed by userspace.
3959          */
3960         if (i915_gem_object_is_proxy(obj)) {
3961                 ret = -ENXIO;
3962                 goto out;
3963         }
3964
3965         if (obj->cache_level == level)
3966                 goto out;
3967
3968         ret = i915_gem_object_wait(obj,
3969                                    I915_WAIT_INTERRUPTIBLE,
3970                                    MAX_SCHEDULE_TIMEOUT,
3971                                    to_rps_client(file));
3972         if (ret)
3973                 goto out;
3974
3975         ret = i915_mutex_lock_interruptible(dev);
3976         if (ret)
3977                 goto out;
3978
3979         ret = i915_gem_object_set_cache_level(obj, level);
3980         mutex_unlock(&dev->struct_mutex);
3981
3982 out:
3983         i915_gem_object_put(obj);
3984         return ret;
3985 }
3986
3987 /*
3988  * Prepare buffer for display plane (scanout, cursors, etc).
3989  * Can be called from an uninterruptible phase (modesetting) and allows
3990  * any flushes to be pipelined (for pageflips).
3991  */
3992 struct i915_vma *
3993 i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
3994                                      u32 alignment,
3995                                      const struct i915_ggtt_view *view)
3996 {
3997         struct i915_vma *vma;
3998         int ret;
3999
4000         lockdep_assert_held(&obj->base.dev->struct_mutex);
4001
4002         /* Mark the global pin early so that we account for the
4003          * display coherency whilst setting up the cache domains.
4004          */
4005         obj->pin_global++;
4006
4007         /* The display engine is not coherent with the LLC cache on gen6.  As
4008          * a result, we make sure that the pinning that is about to occur is
4009          * done with uncached PTEs. This is lowest common denominator for all
4010          * chipsets.
4011          *
4012          * However for gen6+, we could do better by using the GFDT bit instead
4013          * of uncaching, which would allow us to flush all the LLC-cached data
4014          * with that bit in the PTE to main memory with just one PIPE_CONTROL.
4015          */
4016         ret = i915_gem_object_set_cache_level(obj,
4017                                               HAS_WT(to_i915(obj->base.dev)) ?
4018                                               I915_CACHE_WT : I915_CACHE_NONE);
4019         if (ret) {
4020                 vma = ERR_PTR(ret);
4021                 goto err_unpin_global;
4022         }
4023
4024         /* As the user may map the buffer once pinned in the display plane
4025          * (e.g. libkms for the bootup splash), we have to ensure that we
4026          * always use map_and_fenceable for all scanout buffers. However,
4027          * it may simply be too big to fit into mappable, in which case
4028          * put it anyway and hope that userspace can cope (but always first
4029          * try to preserve the existing ABI).
4030          */
4031         vma = ERR_PTR(-ENOSPC);
4032         if (!view || view->type == I915_GGTT_VIEW_NORMAL)
4033                 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment,
4034                                                PIN_MAPPABLE | PIN_NONBLOCK);
4035         if (IS_ERR(vma)) {
4036                 struct drm_i915_private *i915 = to_i915(obj->base.dev);
4037                 unsigned int flags;
4038
4039                 /* Valleyview is definitely limited to scanning out the first
4040                  * 512MiB. Lets presume this behaviour was inherited from the
4041                  * g4x display engine and that all earlier gen are similarly
4042                  * limited. Testing suggests that it is a little more
4043                  * complicated than this. For example, Cherryview appears quite
4044                  * happy to scanout from anywhere within its global aperture.
4045                  */
4046                 flags = 0;
4047                 if (HAS_GMCH_DISPLAY(i915))
4048                         flags = PIN_MAPPABLE;
4049                 vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
4050         }
4051         if (IS_ERR(vma))
4052                 goto err_unpin_global;
4053
4054         vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
4055
4056         /* Treat this as an end-of-frame, like intel_user_framebuffer_dirty() */
4057         __i915_gem_object_flush_for_display(obj);
4058         intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
4059
4060         /* It should now be out of any other write domains, and we can update
4061          * the domain values for our changes.
4062          */
4063         obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
4064
4065         return vma;
4066
4067 err_unpin_global:
4068         obj->pin_global--;
4069         return vma;
4070 }
4071
4072 void
4073 i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
4074 {
4075         lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
4076
4077         if (WARN_ON(vma->obj->pin_global == 0))
4078                 return;
4079
4080         if (--vma->obj->pin_global == 0)
4081                 vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
4082
4083         /* Bump the LRU to try and avoid premature eviction whilst flipping  */
4084         i915_gem_object_bump_inactive_ggtt(vma->obj);
4085
4086         i915_vma_unpin(vma);
4087 }
4088
4089 /**
4090  * Moves a single object to the CPU read, and possibly write domain.
4091  * @obj: object to act on
4092  * @write: requesting write or read-only access
4093  *
4094  * This function returns when the move is complete, including waiting on
4095  * flushes to occur.
4096  */
4097 int
4098 i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
4099 {
4100         int ret;
4101
4102         lockdep_assert_held(&obj->base.dev->struct_mutex);
4103
4104         ret = i915_gem_object_wait(obj,
4105                                    I915_WAIT_INTERRUPTIBLE |
4106                                    I915_WAIT_LOCKED |
4107                                    (write ? I915_WAIT_ALL : 0),
4108                                    MAX_SCHEDULE_TIMEOUT,
4109                                    NULL);
4110         if (ret)
4111                 return ret;
4112
4113         flush_write_domain(obj, ~I915_GEM_DOMAIN_CPU);
4114
4115         /* Flush the CPU cache if it's still invalid. */
4116         if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
4117                 i915_gem_clflush_object(obj, I915_CLFLUSH_SYNC);
4118                 obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
4119         }
4120
4121         /* It should now be out of any other write domains, and we can update
4122          * the domain values for our changes.
4123          */
4124         GEM_BUG_ON(obj->base.write_domain & ~I915_GEM_DOMAIN_CPU);
4125
4126         /* If we're writing through the CPU, then the GPU read domains will
4127          * need to be invalidated at next use.
4128          */
4129         if (write)
4130                 __start_cpu_write(obj);
4131
4132         return 0;
4133 }
4134
4135 /* Throttle our rendering by waiting until the ring has completed our requests
4136  * emitted over 20 msec ago.
4137  *
4138  * Note that if we were to use the current jiffies each time around the loop,
4139  * we wouldn't escape the function with any frames outstanding if the time to
4140  * render a frame was over 20ms.
4141  *
4142  * This should get us reasonable parallelism between CPU and GPU but also
4143  * relatively low latency when blocking on a particular request to finish.
4144  */
4145 static int
4146 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
4147 {
4148         struct drm_i915_private *dev_priv = to_i915(dev);
4149         struct drm_i915_file_private *file_priv = file->driver_priv;
4150         unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
4151         struct drm_i915_gem_request *request, *target = NULL;
4152         long ret;
4153
4154         /* ABI: return -EIO if already wedged */
4155         if (i915_terminally_wedged(&dev_priv->gpu_error))
4156                 return -EIO;
4157
4158         spin_lock(&file_priv->mm.lock);
4159         list_for_each_entry(request, &file_priv->mm.request_list, client_link) {
4160                 if (time_after_eq(request->emitted_jiffies, recent_enough))
4161                         break;
4162
4163                 if (target) {
4164                         list_del(&target->client_link);
4165                         target->file_priv = NULL;
4166                 }
4167
4168                 target = request;
4169         }
4170         if (target)
4171                 i915_gem_request_get(target);
4172         spin_unlock(&file_priv->mm.lock);
4173
4174         if (target == NULL)
4175                 return 0;
4176
4177         ret = i915_wait_request(target,
4178                                 I915_WAIT_INTERRUPTIBLE,
4179                                 MAX_SCHEDULE_TIMEOUT);
4180         i915_gem_request_put(target);
4181
4182         return ret < 0 ? ret : 0;
4183 }
4184
4185 struct i915_vma *
4186 i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
4187                          const struct i915_ggtt_view *view,
4188                          u64 size,
4189                          u64 alignment,
4190                          u64 flags)
4191 {
4192         struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
4193         struct i915_address_space *vm = &dev_priv->ggtt.base;
4194         struct i915_vma *vma;
4195         int ret;
4196
4197         lockdep_assert_held(&obj->base.dev->struct_mutex);
4198
4199         if (!view && flags & PIN_MAPPABLE) {
4200                 /* If the required space is larger than the available
4201                  * aperture, we will not able to find a slot for the
4202                  * object and unbinding the object now will be in
4203                  * vain. Worse, doing so may cause us to ping-pong
4204                  * the object in and out of the Global GTT and
4205                  * waste a lot of cycles under the mutex.
4206                  */
4207                 if (obj->base.size > dev_priv->ggtt.mappable_end)
4208                         return ERR_PTR(-E2BIG);
4209
4210                 /* If NONBLOCK is set the caller is optimistically
4211                  * trying to cache the full object within the mappable
4212                  * aperture, and *must* have a fallback in place for
4213                  * situations where we cannot bind the object. We
4214                  * can be a little more lax here and use the fallback
4215                  * more often to avoid costly migrations of ourselves
4216                  * and other objects within the aperture.
4217                  *
4218                  * Half-the-aperture is used as a simple heuristic.
4219                  * More interesting would to do search for a free
4220                  * block prior to making the commitment to unbind.
4221                  * That caters for the self-harm case, and with a
4222                  * little more heuristics (e.g. NOFAULT, NOEVICT)
4223                  * we could try to minimise harm to others.
4224                  */
4225                 if (flags & PIN_NONBLOCK &&
4226                     obj->base.size > dev_priv->ggtt.mappable_end / 2)
4227                         return ERR_PTR(-ENOSPC);
4228         }
4229
4230         vma = i915_vma_instance(obj, vm, view);
4231         if (unlikely(IS_ERR(vma)))
4232                 return vma;
4233
4234         if (i915_vma_misplaced(vma, size, alignment, flags)) {
4235                 if (flags & PIN_NONBLOCK) {
4236                         if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
4237                                 return ERR_PTR(-ENOSPC);
4238
4239                         if (flags & PIN_MAPPABLE &&
4240                             vma->fence_size > dev_priv->ggtt.mappable_end / 2)
4241                                 return ERR_PTR(-ENOSPC);
4242                 }
4243
4244                 WARN(i915_vma_is_pinned(vma),
4245                      "bo is already pinned in ggtt with incorrect alignment:"
4246                      " offset=%08x, req.alignment=%llx,"
4247                      " req.map_and_fenceable=%d, vma->map_and_fenceable=%d\n",
4248                      i915_ggtt_offset(vma), alignment,
4249                      !!(flags & PIN_MAPPABLE),
4250                      i915_vma_is_map_and_fenceable(vma));
4251                 ret = i915_vma_unbind(vma);
4252                 if (ret)
4253                         return ERR_PTR(ret);
4254         }
4255
4256         ret = i915_vma_pin(vma, size, alignment, flags | PIN_GLOBAL);
4257         if (ret)
4258                 return ERR_PTR(ret);
4259
4260         return vma;
4261 }
4262
4263 static __always_inline unsigned int __busy_read_flag(unsigned int id)
4264 {
4265         /* Note that we could alias engines in the execbuf API, but
4266          * that would be very unwise as it prevents userspace from
4267          * fine control over engine selection. Ahem.
4268          *
4269          * This should be something like EXEC_MAX_ENGINE instead of
4270          * I915_NUM_ENGINES.
4271          */
4272         BUILD_BUG_ON(I915_NUM_ENGINES > 16);
4273         return 0x10000 << id;
4274 }
4275
4276 static __always_inline unsigned int __busy_write_id(unsigned int id)
4277 {
4278         /* The uABI guarantees an active writer is also amongst the read
4279          * engines. This would be true if we accessed the activity tracking
4280          * under the lock, but as we perform the lookup of the object and
4281          * its activity locklessly we can not guarantee that the last_write
4282          * being active implies that we have set the same engine flag from
4283          * last_read - hence we always set both read and write busy for
4284          * last_write.
4285          */
4286         return id | __busy_read_flag(id);
4287 }
4288
4289 static __always_inline unsigned int
4290 __busy_set_if_active(const struct dma_fence *fence,
4291                      unsigned int (*flag)(unsigned int id))
4292 {
4293         struct drm_i915_gem_request *rq;
4294
4295         /* We have to check the current hw status of the fence as the uABI
4296          * guarantees forward progress. We could rely on the idle worker
4297          * to eventually flush us, but to minimise latency just ask the
4298          * hardware.
4299          *
4300          * Note we only report on the status of native fences.
4301          */
4302         if (!dma_fence_is_i915(fence))
4303                 return 0;
4304
4305         /* opencode to_request() in order to avoid const warnings */
4306         rq = container_of(fence, struct drm_i915_gem_request, fence);
4307         if (i915_gem_request_completed(rq))
4308                 return 0;
4309
4310         return flag(rq->engine->uabi_id);
4311 }
4312
4313 static __always_inline unsigned int
4314 busy_check_reader(const struct dma_fence *fence)
4315 {
4316         return __busy_set_if_active(fence, __busy_read_flag);
4317 }
4318
4319 static __always_inline unsigned int
4320 busy_check_writer(const struct dma_fence *fence)
4321 {
4322         if (!fence)
4323                 return 0;
4324
4325         return __busy_set_if_active(fence, __busy_write_id);
4326 }
4327
4328 int
4329 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4330                     struct drm_file *file)
4331 {
4332         struct drm_i915_gem_busy *args = data;
4333         struct drm_i915_gem_object *obj;
4334         struct reservation_object_list *list;
4335         unsigned int seq;
4336         int err;
4337
4338         err = -ENOENT;
4339         rcu_read_lock();
4340         obj = i915_gem_object_lookup_rcu(file, args->handle);
4341         if (!obj)
4342                 goto out;
4343
4344         /* A discrepancy here is that we do not report the status of
4345          * non-i915 fences, i.e. even though we may report the object as idle,
4346          * a call to set-domain may still stall waiting for foreign rendering.
4347          * This also means that wait-ioctl may report an object as busy,
4348          * where busy-ioctl considers it idle.
4349          *
4350          * We trade the ability to warn of foreign fences to report on which
4351          * i915 engines are active for the object.
4352          *
4353          * Alternatively, we can trade that extra information on read/write
4354          * activity with
4355          *      args->busy =
4356          *              !reservation_object_test_signaled_rcu(obj->resv, true);
4357          * to report the overall busyness. This is what the wait-ioctl does.
4358          *
4359          */
4360 retry:
4361         seq = raw_read_seqcount(&obj->resv->seq);
4362
4363         /* Translate the exclusive fence to the READ *and* WRITE engine */
4364         args->busy = busy_check_writer(rcu_dereference(obj->resv->fence_excl));
4365
4366         /* Translate shared fences to READ set of engines */
4367         list = rcu_dereference(obj->resv->fence);
4368         if (list) {
4369                 unsigned int shared_count = list->shared_count, i;
4370
4371                 for (i = 0; i < shared_count; ++i) {
4372                         struct dma_fence *fence =
4373                                 rcu_dereference(list->shared[i]);
4374
4375                         args->busy |= busy_check_reader(fence);
4376                 }
4377         }
4378
4379         if (args->busy && read_seqcount_retry(&obj->resv->seq, seq))
4380                 goto retry;
4381
4382         err = 0;
4383 out:
4384         rcu_read_unlock();
4385         return err;
4386 }
4387
4388 int
4389 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
4390                         struct drm_file *file_priv)
4391 {
4392         return i915_gem_ring_throttle(dev, file_priv);
4393 }
4394
4395 int
4396 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
4397                        struct drm_file *file_priv)
4398 {
4399         struct drm_i915_private *dev_priv = to_i915(dev);
4400         struct drm_i915_gem_madvise *args = data;
4401         struct drm_i915_gem_object *obj;
4402         int err;
4403
4404         switch (args->madv) {
4405         case I915_MADV_DONTNEED:
4406         case I915_MADV_WILLNEED:
4407             break;
4408         default:
4409             return -EINVAL;
4410         }
4411
4412         obj = i915_gem_object_lookup(file_priv, args->handle);
4413         if (!obj)
4414                 return -ENOENT;
4415
4416         err = mutex_lock_interruptible(&obj->mm.lock);
4417         if (err)
4418                 goto out;
4419
4420         if (i915_gem_object_has_pages(obj) &&
4421             i915_gem_object_is_tiled(obj) &&
4422             dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
4423                 if (obj->mm.madv == I915_MADV_WILLNEED) {
4424                         GEM_BUG_ON(!obj->mm.quirked);
4425                         __i915_gem_object_unpin_pages(obj);
4426                         obj->mm.quirked = false;
4427                 }
4428                 if (args->madv == I915_MADV_WILLNEED) {
4429                         GEM_BUG_ON(obj->mm.quirked);
4430                         __i915_gem_object_pin_pages(obj);
4431                         obj->mm.quirked = true;
4432                 }
4433         }
4434
4435         if (obj->mm.madv != __I915_MADV_PURGED)
4436                 obj->mm.madv = args->madv;
4437
4438         /* if the object is no longer attached, discard its backing storage */
4439         if (obj->mm.madv == I915_MADV_DONTNEED &&
4440             !i915_gem_object_has_pages(obj))
4441                 i915_gem_object_truncate(obj);
4442
4443         args->retained = obj->mm.madv != __I915_MADV_PURGED;
4444         mutex_unlock(&obj->mm.lock);
4445
4446 out:
4447         i915_gem_object_put(obj);
4448         return err;
4449 }
4450
4451 static void
4452 frontbuffer_retire(struct i915_gem_active *active,
4453                    struct drm_i915_gem_request *request)
4454 {
4455         struct drm_i915_gem_object *obj =
4456                 container_of(active, typeof(*obj), frontbuffer_write);
4457
4458         intel_fb_obj_flush(obj, ORIGIN_CS);
4459 }
4460
4461 void i915_gem_object_init(struct drm_i915_gem_object *obj,
4462                           const struct drm_i915_gem_object_ops *ops)
4463 {
4464         mutex_init(&obj->mm.lock);
4465
4466         INIT_LIST_HEAD(&obj->vma_list);
4467         INIT_LIST_HEAD(&obj->lut_list);
4468         INIT_LIST_HEAD(&obj->batch_pool_link);
4469
4470         obj->ops = ops;
4471
4472         reservation_object_init(&obj->__builtin_resv);
4473         obj->resv = &obj->__builtin_resv;
4474
4475         obj->frontbuffer_ggtt_origin = ORIGIN_GTT;
4476         init_request_active(&obj->frontbuffer_write, frontbuffer_retire);
4477
4478         obj->mm.madv = I915_MADV_WILLNEED;
4479         INIT_RADIX_TREE(&obj->mm.get_page.radix, GFP_KERNEL | __GFP_NOWARN);
4480         mutex_init(&obj->mm.get_page.lock);
4481
4482         i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
4483 }
4484
4485 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
4486         .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
4487                  I915_GEM_OBJECT_IS_SHRINKABLE,
4488
4489         .get_pages = i915_gem_object_get_pages_gtt,
4490         .put_pages = i915_gem_object_put_pages_gtt,
4491
4492         .pwrite = i915_gem_object_pwrite_gtt,
4493 };
4494
4495 static int i915_gem_object_create_shmem(struct drm_device *dev,
4496                                         struct drm_gem_object *obj,
4497                                         size_t size)
4498 {
4499         struct drm_i915_private *i915 = to_i915(dev);
4500         unsigned long flags = VM_NORESERVE;
4501         struct file *filp;
4502
4503         drm_gem_private_object_init(dev, obj, size);
4504
4505         if (i915->mm.gemfs)
4506                 filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
4507                                                  flags);
4508         else
4509                 filp = shmem_file_setup("i915", size, flags);
4510
4511         if (IS_ERR(filp))
4512                 return PTR_ERR(filp);
4513
4514         obj->filp = filp;
4515
4516         return 0;
4517 }
4518
4519 struct drm_i915_gem_object *
4520 i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
4521 {
4522         struct drm_i915_gem_object *obj;
4523         struct address_space *mapping;
4524         unsigned int cache_level;
4525         gfp_t mask;
4526         int ret;
4527
4528         /* There is a prevalence of the assumption that we fit the object's
4529          * page count inside a 32bit _signed_ variable. Let's document this and
4530          * catch if we ever need to fix it. In the meantime, if you do spot
4531          * such a local variable, please consider fixing!
4532          */
4533         if (size >> PAGE_SHIFT > INT_MAX)
4534                 return ERR_PTR(-E2BIG);
4535
4536         if (overflows_type(size, obj->base.size))
4537                 return ERR_PTR(-E2BIG);
4538
4539         obj = i915_gem_object_alloc(dev_priv);
4540         if (obj == NULL)
4541                 return ERR_PTR(-ENOMEM);
4542
4543         ret = i915_gem_object_create_shmem(&dev_priv->drm, &obj->base, size);
4544         if (ret)
4545                 goto fail;
4546
4547         mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
4548         if (IS_I965GM(dev_priv) || IS_I965G(dev_priv)) {
4549                 /* 965gm cannot relocate objects above 4GiB. */
4550                 mask &= ~__GFP_HIGHMEM;
4551                 mask |= __GFP_DMA32;
4552         }
4553
4554         mapping = obj->base.filp->f_mapping;
4555         mapping_set_gfp_mask(mapping, mask);
4556         GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
4557
4558         i915_gem_object_init(obj, &i915_gem_object_ops);
4559
4560         obj->base.write_domain = I915_GEM_DOMAIN_CPU;
4561         obj->base.read_domains = I915_GEM_DOMAIN_CPU;
4562
4563         if (HAS_LLC(dev_priv))
4564                 /* On some devices, we can have the GPU use the LLC (the CPU
4565                  * cache) for about a 10% performance improvement
4566                  * compared to uncached.  Graphics requests other than
4567                  * display scanout are coherent with the CPU in
4568                  * accessing this cache.  This means in this mode we
4569                  * don't need to clflush on the CPU side, and on the
4570                  * GPU side we only need to flush internal caches to
4571                  * get data visible to the CPU.
4572                  *
4573                  * However, we maintain the display planes as UC, and so
4574                  * need to rebind when first used as such.
4575                  */
4576                 cache_level = I915_CACHE_LLC;
4577         else
4578                 cache_level = I915_CACHE_NONE;
4579
4580         i915_gem_object_set_cache_coherency(obj, cache_level);
4581
4582         trace_i915_gem_object_create(obj);
4583
4584         return obj;
4585
4586 fail:
4587         i915_gem_object_free(obj);
4588         return ERR_PTR(ret);
4589 }
4590
4591 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
4592 {
4593         /* If we are the last user of the backing storage (be it shmemfs
4594          * pages or stolen etc), we know that the pages are going to be
4595          * immediately released. In this case, we can then skip copying
4596          * back the contents from the GPU.
4597          */
4598
4599         if (obj->mm.madv != I915_MADV_WILLNEED)
4600                 return false;
4601
4602         if (obj->base.filp == NULL)
4603                 return true;
4604
4605         /* At first glance, this looks racy, but then again so would be
4606          * userspace racing mmap against close. However, the first external
4607          * reference to the filp can only be obtained through the
4608          * i915_gem_mmap_ioctl() which safeguards us against the user
4609          * acquiring such a reference whilst we are in the middle of
4610          * freeing the object.
4611          */
4612         return atomic_long_read(&obj->base.filp->f_count) == 1;
4613 }
4614
4615 static void __i915_gem_free_objects(struct drm_i915_private *i915,
4616                                     struct llist_node *freed)
4617 {
4618         struct drm_i915_gem_object *obj, *on;
4619
4620         intel_runtime_pm_get(i915);
4621         llist_for_each_entry_safe(obj, on, freed, freed) {
4622                 struct i915_vma *vma, *vn;
4623
4624                 trace_i915_gem_object_destroy(obj);
4625
4626                 mutex_lock(&i915->drm.struct_mutex);
4627
4628                 GEM_BUG_ON(i915_gem_object_is_active(obj));
4629                 list_for_each_entry_safe(vma, vn,
4630                                          &obj->vma_list, obj_link) {
4631                         GEM_BUG_ON(i915_vma_is_active(vma));
4632                         vma->flags &= ~I915_VMA_PIN_MASK;
4633                         i915_vma_close(vma);
4634                 }
4635                 GEM_BUG_ON(!list_empty(&obj->vma_list));
4636                 GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
4637
4638                 /* This serializes freeing with the shrinker. Since the free
4639                  * is delayed, first by RCU then by the workqueue, we want the
4640                  * shrinker to be able to free pages of unreferenced objects,
4641                  * or else we may oom whilst there are plenty of deferred
4642                  * freed objects.
4643                  */
4644                 if (i915_gem_object_has_pages(obj)) {
4645                         spin_lock(&i915->mm.obj_lock);
4646                         list_del_init(&obj->mm.link);
4647                         spin_unlock(&i915->mm.obj_lock);
4648                 }
4649
4650                 mutex_unlock(&i915->drm.struct_mutex);
4651
4652                 GEM_BUG_ON(obj->bind_count);
4653                 GEM_BUG_ON(obj->userfault_count);
4654                 GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
4655                 GEM_BUG_ON(!list_empty(&obj->lut_list));
4656
4657                 if (obj->ops->release)
4658                         obj->ops->release(obj);
4659
4660                 if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
4661                         atomic_set(&obj->mm.pages_pin_count, 0);
4662                 __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
4663                 GEM_BUG_ON(i915_gem_object_has_pages(obj));
4664
4665                 if (obj->base.import_attach)
4666                         drm_prime_gem_destroy(&obj->base, NULL);
4667
4668                 reservation_object_fini(&obj->__builtin_resv);
4669                 drm_gem_object_release(&obj->base);
4670                 i915_gem_info_remove_obj(i915, obj->base.size);
4671
4672                 kfree(obj->bit_17);
4673                 i915_gem_object_free(obj);
4674
4675                 if (on)
4676                         cond_resched();
4677         }
4678         intel_runtime_pm_put(i915);
4679 }
4680
4681 static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
4682 {
4683         struct llist_node *freed;
4684
4685         /* Free the oldest, most stale object to keep the free_list short */
4686         freed = NULL;
4687         if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
4688                 /* Only one consumer of llist_del_first() allowed */
4689                 spin_lock(&i915->mm.free_lock);
4690                 freed = llist_del_first(&i915->mm.free_list);
4691                 spin_unlock(&i915->mm.free_lock);
4692         }
4693         if (unlikely(freed)) {
4694                 freed->next = NULL;
4695                 __i915_gem_free_objects(i915, freed);
4696         }
4697 }
4698
4699 static void __i915_gem_free_work(struct work_struct *work)
4700 {
4701         struct drm_i915_private *i915 =
4702                 container_of(work, struct drm_i915_private, mm.free_work);
4703         struct llist_node *freed;
4704
4705         /* All file-owned VMA should have been released by this point through
4706          * i915_gem_close_object(), or earlier by i915_gem_context_close().
4707          * However, the object may also be bound into the global GTT (e.g.
4708          * older GPUs without per-process support, or for direct access through
4709          * the GTT either for the user or for scanout). Those VMA still need to
4710          * unbound now.
4711          */
4712
4713         spin_lock(&i915->mm.free_lock);
4714         while ((freed = llist_del_all(&i915->mm.free_list))) {
4715                 spin_unlock(&i915->mm.free_lock);
4716
4717                 __i915_gem_free_objects(i915, freed);
4718                 if (need_resched())
4719                         return;
4720
4721                 spin_lock(&i915->mm.free_lock);
4722         }
4723         spin_unlock(&i915->mm.free_lock);
4724 }
4725
4726 static void __i915_gem_free_object_rcu(struct rcu_head *head)
4727 {
4728         struct drm_i915_gem_object *obj =
4729                 container_of(head, typeof(*obj), rcu);
4730         struct drm_i915_private *i915 = to_i915(obj->base.dev);
4731
4732         /* We can't simply use call_rcu() from i915_gem_free_object()
4733          * as we need to block whilst unbinding, and the call_rcu
4734          * task may be called from softirq context. So we take a
4735          * detour through a worker.
4736          */
4737         if (llist_add(&obj->freed, &i915->mm.free_list))
4738                 schedule_work(&i915->mm.free_work);
4739 }
4740
4741 void i915_gem_free_object(struct drm_gem_object *gem_obj)
4742 {
4743         struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
4744
4745         if (obj->mm.quirked)
4746                 __i915_gem_object_unpin_pages(obj);
4747
4748         if (discard_backing_storage(obj))
4749                 obj->mm.madv = I915_MADV_DONTNEED;
4750
4751         /* Before we free the object, make sure any pure RCU-only
4752          * read-side critical sections are complete, e.g.
4753          * i915_gem_busy_ioctl(). For the corresponding synchronized
4754          * lookup see i915_gem_object_lookup_rcu().
4755          */
4756         call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
4757 }
4758
4759 void __i915_gem_object_release_unless_active(struct drm_i915_gem_object *obj)
4760 {
4761         lockdep_assert_held(&obj->base.dev->struct_mutex);
4762
4763         if (!i915_gem_object_has_active_reference(obj) &&
4764             i915_gem_object_is_active(obj))
4765                 i915_gem_object_set_active_reference(obj);
4766         else
4767                 i915_gem_object_put(obj);
4768 }
4769
4770 static void assert_kernel_context_is_current(struct drm_i915_private *i915)
4771 {
4772         struct i915_gem_context *kernel_context = i915->kernel_context;
4773         struct intel_engine_cs *engine;
4774         enum intel_engine_id id;
4775
4776         for_each_engine(engine, i915, id) {
4777                 GEM_BUG_ON(__i915_gem_active_peek(&engine->timeline->last_request));
4778                 GEM_BUG_ON(engine->last_retired_context != kernel_context);
4779         }
4780 }
4781
4782 void i915_gem_sanitize(struct drm_i915_private *i915)
4783 {
4784         if (i915_terminally_wedged(&i915->gpu_error)) {
4785                 mutex_lock(&i915->drm.struct_mutex);
4786                 i915_gem_unset_wedged(i915);
4787                 mutex_unlock(&i915->drm.struct_mutex);
4788         }
4789
4790         /*
4791          * If we inherit context state from the BIOS or earlier occupants
4792          * of the GPU, the GPU may be in an inconsistent state when we
4793          * try to take over. The only way to remove the earlier state
4794          * is by resetting. However, resetting on earlier gen is tricky as
4795          * it may impact the display and we are uncertain about the stability
4796          * of the reset, so this could be applied to even earlier gen.
4797          */
4798         if (INTEL_GEN(i915) >= 5) {
4799                 int reset = intel_gpu_reset(i915, ALL_ENGINES);
4800                 WARN_ON(reset && reset != -ENODEV);
4801         }
4802 }
4803
4804 int i915_gem_suspend(struct drm_i915_private *dev_priv)
4805 {
4806         struct drm_device *dev = &dev_priv->drm;
4807         int ret;
4808
4809         intel_runtime_pm_get(dev_priv);
4810         intel_suspend_gt_powersave(dev_priv);
4811
4812         mutex_lock(&dev->struct_mutex);
4813
4814         /* We have to flush all the executing contexts to main memory so
4815          * that they can saved in the hibernation image. To ensure the last
4816          * context image is coherent, we have to switch away from it. That
4817          * leaves the dev_priv->kernel_context still active when
4818          * we actually suspend, and its image in memory may not match the GPU
4819          * state. Fortunately, the kernel_context is disposable and we do
4820          * not rely on its state.
4821          */
4822         if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
4823                 ret = i915_gem_switch_to_kernel_context(dev_priv);
4824                 if (ret)
4825                         goto err_unlock;
4826
4827                 ret = i915_gem_wait_for_idle(dev_priv,
4828                                              I915_WAIT_INTERRUPTIBLE |
4829                                              I915_WAIT_LOCKED);
4830                 if (ret && ret != -EIO)
4831                         goto err_unlock;
4832
4833                 assert_kernel_context_is_current(dev_priv);
4834         }
4835         i915_gem_contexts_lost(dev_priv);
4836         mutex_unlock(&dev->struct_mutex);
4837
4838         intel_guc_suspend(dev_priv);
4839
4840         cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
4841         cancel_delayed_work_sync(&dev_priv->gt.retire_work);
4842
4843         /* As the idle_work is rearming if it detects a race, play safe and
4844          * repeat the flush until it is definitely idle.
4845          */
4846         drain_delayed_work(&dev_priv->gt.idle_work);
4847
4848         /* Assert that we sucessfully flushed all the work and
4849          * reset the GPU back to its idle, low power state.
4850          */
4851         WARN_ON(dev_priv->gt.awake);
4852         if (WARN_ON(!intel_engines_are_idle(dev_priv)))
4853                 i915_gem_set_wedged(dev_priv); /* no hope, discard everything */
4854
4855         /*
4856          * Neither the BIOS, ourselves or any other kernel
4857          * expects the system to be in execlists mode on startup,
4858          * so we need to reset the GPU back to legacy mode. And the only
4859          * known way to disable logical contexts is through a GPU reset.
4860          *
4861          * So in order to leave the system in a known default configuration,
4862          * always reset the GPU upon unload and suspend. Afterwards we then
4863          * clean up the GEM state tracking, flushing off the requests and
4864          * leaving the system in a known idle state.
4865          *
4866          * Note that is of the upmost importance that the GPU is idle and
4867          * all stray writes are flushed *before* we dismantle the backing
4868          * storage for the pinned objects.
4869          *
4870          * However, since we are uncertain that resetting the GPU on older
4871          * machines is a good idea, we don't - just in case it leaves the
4872          * machine in an unusable condition.
4873          */
4874         i915_gem_sanitize(dev_priv);
4875
4876         intel_runtime_pm_put(dev_priv);
4877         return 0;
4878
4879 err_unlock:
4880         mutex_unlock(&dev->struct_mutex);
4881         intel_runtime_pm_put(dev_priv);
4882         return ret;
4883 }
4884
4885 void i915_gem_resume(struct drm_i915_private *i915)
4886 {
4887         WARN_ON(i915->gt.awake);
4888
4889         mutex_lock(&i915->drm.struct_mutex);
4890         intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
4891
4892         i915_gem_restore_gtt_mappings(i915);
4893         i915_gem_restore_fences(i915);
4894
4895         /*
4896          * As we didn't flush the kernel context before suspend, we cannot
4897          * guarantee that the context image is complete. So let's just reset
4898          * it and start again.
4899          */
4900         i915->gt.resume(i915);
4901
4902         if (i915_gem_init_hw(i915))
4903                 goto err_wedged;
4904
4905         intel_guc_resume(i915);
4906
4907         /* Always reload a context for powersaving. */
4908         if (i915_gem_switch_to_kernel_context(i915))
4909                 goto err_wedged;
4910
4911 out_unlock:
4912         intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
4913         mutex_unlock(&i915->drm.struct_mutex);
4914         return;
4915
4916 err_wedged:
4917         if (!i915_terminally_wedged(&i915->gpu_error)) {
4918                 DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
4919                 i915_gem_set_wedged(i915);
4920         }
4921         goto out_unlock;
4922 }
4923
4924 void i915_gem_init_swizzling(struct drm_i915_private *dev_priv)
4925 {
4926         if (INTEL_GEN(dev_priv) < 5 ||
4927             dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
4928                 return;
4929
4930         I915_WRITE(DISP_ARB_CTL, I915_READ(DISP_ARB_CTL) |
4931                                  DISP_TILE_SURFACE_SWIZZLING);
4932
4933         if (IS_GEN5(dev_priv))
4934                 return;
4935
4936         I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_SWZCTL);
4937         if (IS_GEN6(dev_priv))
4938                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
4939         else if (IS_GEN7(dev_priv))
4940                 I915_WRITE(ARB_MODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
4941         else if (IS_GEN8(dev_priv))
4942                 I915_WRITE(GAMTARBMODE, _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
4943         else
4944                 BUG();
4945 }
4946
4947 static void init_unused_ring(struct drm_i915_private *dev_priv, u32 base)
4948 {
4949         I915_WRITE(RING_CTL(base), 0);
4950         I915_WRITE(RING_HEAD(base), 0);
4951         I915_WRITE(RING_TAIL(base), 0);
4952         I915_WRITE(RING_START(base), 0);
4953 }
4954
4955 static void init_unused_rings(struct drm_i915_private *dev_priv)
4956 {
4957         if (IS_I830(dev_priv)) {
4958                 init_unused_ring(dev_priv, PRB1_BASE);
4959                 init_unused_ring(dev_priv, SRB0_BASE);
4960                 init_unused_ring(dev_priv, SRB1_BASE);
4961                 init_unused_ring(dev_priv, SRB2_BASE);
4962                 init_unused_ring(dev_priv, SRB3_BASE);
4963         } else if (IS_GEN2(dev_priv)) {
4964                 init_unused_ring(dev_priv, SRB0_BASE);
4965                 init_unused_ring(dev_priv, SRB1_BASE);
4966         } else if (IS_GEN3(dev_priv)) {
4967                 init_unused_ring(dev_priv, PRB1_BASE);
4968                 init_unused_ring(dev_priv, PRB2_BASE);
4969         }
4970 }
4971
4972 static int __i915_gem_restart_engines(void *data)
4973 {
4974         struct drm_i915_private *i915 = data;
4975         struct intel_engine_cs *engine;
4976         enum intel_engine_id id;
4977         int err;
4978
4979         for_each_engine(engine, i915, id) {
4980                 err = engine->init_hw(engine);
4981                 if (err)
4982                         return err;
4983         }
4984
4985         return 0;
4986 }
4987
4988 int i915_gem_init_hw(struct drm_i915_private *dev_priv)
4989 {
4990         int ret;
4991
4992         dev_priv->gt.last_init_time = ktime_get();
4993
4994         /* Double layer security blanket, see i915_gem_init() */
4995         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
4996
4997         if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
4998                 I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
4999
5000         if (IS_HASWELL(dev_priv))
5001                 I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
5002                            LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
5003
5004         if (HAS_PCH_NOP(dev_priv)) {
5005                 if (IS_IVYBRIDGE(dev_priv)) {
5006                         u32 temp = I915_READ(GEN7_MSG_CTL);
5007                         temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
5008                         I915_WRITE(GEN7_MSG_CTL, temp);
5009                 } else if (INTEL_GEN(dev_priv) >= 7) {
5010                         u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
5011                         temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
5012                         I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
5013                 }
5014         }
5015
5016         i915_gem_init_swizzling(dev_priv);
5017
5018         /*
5019          * At least 830 can leave some of the unused rings
5020          * "active" (ie. head != tail) after resume which
5021          * will prevent c3 entry. Makes sure all unused rings
5022          * are totally idle.
5023          */
5024         init_unused_rings(dev_priv);
5025
5026         BUG_ON(!dev_priv->kernel_context);
5027         if (i915_terminally_wedged(&dev_priv->gpu_error)) {
5028                 ret = -EIO;
5029                 goto out;
5030         }
5031
5032         ret = i915_ppgtt_init_hw(dev_priv);
5033         if (ret) {
5034                 DRM_ERROR("PPGTT enable HW failed %d\n", ret);
5035                 goto out;
5036         }
5037
5038         /* We can't enable contexts until all firmware is loaded */
5039         ret = intel_uc_init_hw(dev_priv);
5040         if (ret)
5041                 goto out;
5042
5043         intel_mocs_init_l3cc_table(dev_priv);
5044
5045         /* Only when the HW is re-initialised, can we replay the requests */
5046         ret = __i915_gem_restart_engines(dev_priv);
5047 out:
5048         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5049         return ret;
5050 }
5051
5052 static int __intel_engines_record_defaults(struct drm_i915_private *i915)
5053 {
5054         struct i915_gem_context *ctx;
5055         struct intel_engine_cs *engine;
5056         enum intel_engine_id id;
5057         int err;
5058
5059         /*
5060          * As we reset the gpu during very early sanitisation, the current
5061          * register state on the GPU should reflect its defaults values.
5062          * We load a context onto the hw (with restore-inhibit), then switch
5063          * over to a second context to save that default register state. We
5064          * can then prime every new context with that state so they all start
5065          * from the same default HW values.
5066          */
5067
5068         ctx = i915_gem_context_create_kernel(i915, 0);
5069         if (IS_ERR(ctx))
5070                 return PTR_ERR(ctx);
5071
5072         for_each_engine(engine, i915, id) {
5073                 struct drm_i915_gem_request *rq;
5074
5075                 rq = i915_gem_request_alloc(engine, ctx);
5076                 if (IS_ERR(rq)) {
5077                         err = PTR_ERR(rq);
5078                         goto out_ctx;
5079                 }
5080
5081                 err = 0;
5082                 if (engine->init_context)
5083                         err = engine->init_context(rq);
5084
5085                 __i915_add_request(rq, true);
5086                 if (err)
5087                         goto err_active;
5088         }
5089
5090         err = i915_gem_switch_to_kernel_context(i915);
5091         if (err)
5092                 goto err_active;
5093
5094         err = i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED);
5095         if (err)
5096                 goto err_active;
5097
5098         assert_kernel_context_is_current(i915);
5099
5100         for_each_engine(engine, i915, id) {
5101                 struct i915_vma *state;
5102
5103                 state = ctx->engine[id].state;
5104                 if (!state)
5105                         continue;
5106
5107                 /*
5108                  * As we will hold a reference to the logical state, it will
5109                  * not be torn down with the context, and importantly the
5110                  * object will hold onto its vma (making it possible for a
5111                  * stray GTT write to corrupt our defaults). Unmap the vma
5112                  * from the GTT to prevent such accidents and reclaim the
5113                  * space.
5114                  */
5115                 err = i915_vma_unbind(state);
5116                 if (err)
5117                         goto err_active;
5118
5119                 err = i915_gem_object_set_to_cpu_domain(state->obj, false);
5120                 if (err)
5121                         goto err_active;
5122
5123                 engine->default_state = i915_gem_object_get(state->obj);
5124         }
5125
5126         if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
5127                 unsigned int found = intel_engines_has_context_isolation(i915);
5128
5129                 /*
5130                  * Make sure that classes with multiple engine instances all
5131                  * share the same basic configuration.
5132                  */
5133                 for_each_engine(engine, i915, id) {
5134                         unsigned int bit = BIT(engine->uabi_class);
5135                         unsigned int expected = engine->default_state ? bit : 0;
5136
5137                         if ((found & bit) != expected) {
5138                                 DRM_ERROR("mismatching default context state for class %d on engine %s\n",
5139                                           engine->uabi_class, engine->name);
5140                         }
5141                 }
5142         }
5143
5144 out_ctx:
5145         i915_gem_context_set_closed(ctx);
5146         i915_gem_context_put(ctx);
5147         return err;
5148
5149 err_active:
5150         /*
5151          * If we have to abandon now, we expect the engines to be idle
5152          * and ready to be torn-down. First try to flush any remaining
5153          * request, ensure we are pointing at the kernel context and
5154          * then remove it.
5155          */
5156         if (WARN_ON(i915_gem_switch_to_kernel_context(i915)))
5157                 goto out_ctx;
5158
5159         if (WARN_ON(i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED)))
5160                 goto out_ctx;
5161
5162         i915_gem_contexts_lost(i915);
5163         goto out_ctx;
5164 }
5165
5166 int i915_gem_init(struct drm_i915_private *dev_priv)
5167 {
5168         int ret;
5169
5170         /*
5171          * We need to fallback to 4K pages since gvt gtt handling doesn't
5172          * support huge page entries - we will need to check either hypervisor
5173          * mm can support huge guest page or just do emulation in gvt.
5174          */
5175         if (intel_vgpu_active(dev_priv))
5176                 mkwrite_device_info(dev_priv)->page_sizes =
5177                         I915_GTT_PAGE_SIZE_4K;
5178
5179         dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
5180
5181         if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
5182                 dev_priv->gt.resume = intel_lr_context_resume;
5183                 dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
5184         } else {
5185                 dev_priv->gt.resume = intel_legacy_submission_resume;
5186                 dev_priv->gt.cleanup_engine = intel_engine_cleanup;
5187         }
5188
5189         ret = i915_gem_init_userptr(dev_priv);
5190         if (ret)
5191                 return ret;
5192
5193         ret = intel_uc_init_wq(dev_priv);
5194         if (ret)
5195                 return ret;
5196
5197         /* This is just a security blanket to placate dragons.
5198          * On some systems, we very sporadically observe that the first TLBs
5199          * used by the CS may be stale, despite us poking the TLB reset. If
5200          * we hold the forcewake during initialisation these problems
5201          * just magically go away.
5202          */
5203         mutex_lock(&dev_priv->drm.struct_mutex);
5204         intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
5205
5206         ret = i915_gem_init_ggtt(dev_priv);
5207         if (ret) {
5208                 GEM_BUG_ON(ret == -EIO);
5209                 goto err_unlock;
5210         }
5211
5212         ret = i915_gem_contexts_init(dev_priv);
5213         if (ret) {
5214                 GEM_BUG_ON(ret == -EIO);
5215                 goto err_ggtt;
5216         }
5217
5218         ret = intel_engines_init(dev_priv);
5219         if (ret) {
5220                 GEM_BUG_ON(ret == -EIO);
5221                 goto err_context;
5222         }
5223
5224         intel_init_gt_powersave(dev_priv);
5225
5226         ret = intel_uc_init(dev_priv);
5227         if (ret)
5228                 goto err_pm;
5229
5230         ret = i915_gem_init_hw(dev_priv);
5231         if (ret)
5232                 goto err_uc_init;
5233
5234         /*
5235          * Despite its name intel_init_clock_gating applies both display
5236          * clock gating workarounds; GT mmio workarounds and the occasional
5237          * GT power context workaround. Worse, sometimes it includes a context
5238          * register workaround which we need to apply before we record the
5239          * default HW state for all contexts.
5240          *
5241          * FIXME: break up the workarounds and apply them at the right time!
5242          */
5243         intel_init_clock_gating(dev_priv);
5244
5245         ret = __intel_engines_record_defaults(dev_priv);
5246         if (ret)
5247                 goto err_init_hw;
5248
5249         if (i915_inject_load_failure()) {
5250                 ret = -ENODEV;
5251                 goto err_init_hw;
5252         }
5253
5254         if (i915_inject_load_failure()) {
5255                 ret = -EIO;
5256                 goto err_init_hw;
5257         }
5258
5259         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5260         mutex_unlock(&dev_priv->drm.struct_mutex);
5261
5262         return 0;
5263
5264         /*
5265          * Unwinding is complicated by that we want to handle -EIO to mean
5266          * disable GPU submission but keep KMS alive. We want to mark the
5267          * HW as irrevisibly wedged, but keep enough state around that the
5268          * driver doesn't explode during runtime.
5269          */
5270 err_init_hw:
5271         i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED);
5272         i915_gem_contexts_lost(dev_priv);
5273         intel_uc_fini_hw(dev_priv);
5274 err_uc_init:
5275         intel_uc_fini(dev_priv);
5276 err_pm:
5277         if (ret != -EIO) {
5278                 intel_cleanup_gt_powersave(dev_priv);
5279                 i915_gem_cleanup_engines(dev_priv);
5280         }
5281 err_context:
5282         if (ret != -EIO)
5283                 i915_gem_contexts_fini(dev_priv);
5284 err_ggtt:
5285 err_unlock:
5286         intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
5287         mutex_unlock(&dev_priv->drm.struct_mutex);
5288
5289         intel_uc_fini_wq(dev_priv);
5290
5291         if (ret != -EIO)
5292                 i915_gem_cleanup_userptr(dev_priv);
5293
5294         if (ret == -EIO) {
5295                 /*
5296                  * Allow engine initialisation to fail by marking the GPU as
5297                  * wedged. But we only want to do this where the GPU is angry,
5298                  * for all other failure, such as an allocation failure, bail.
5299                  */
5300                 if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
5301                         DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
5302                         i915_gem_set_wedged(dev_priv);
5303                 }
5304                 ret = 0;
5305         }
5306
5307         i915_gem_drain_freed_objects(dev_priv);
5308         return ret;
5309 }
5310
5311 void i915_gem_init_mmio(struct drm_i915_private *i915)
5312 {
5313         i915_gem_sanitize(i915);
5314 }
5315
5316 void
5317 i915_gem_cleanup_engines(struct drm_i915_private *dev_priv)
5318 {
5319         struct intel_engine_cs *engine;
5320         enum intel_engine_id id;
5321
5322         for_each_engine(engine, dev_priv, id)
5323                 dev_priv->gt.cleanup_engine(engine);
5324 }
5325
5326 void
5327 i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
5328 {
5329         int i;
5330
5331         if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
5332             !IS_CHERRYVIEW(dev_priv))
5333                 dev_priv->num_fence_regs = 32;
5334         else if (INTEL_INFO(dev_priv)->gen >= 4 ||
5335                  IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
5336                  IS_G33(dev_priv) || IS_PINEVIEW(dev_priv))
5337                 dev_priv->num_fence_regs = 16;
5338         else
5339                 dev_priv->num_fence_regs = 8;
5340
5341         if (intel_vgpu_active(dev_priv))
5342                 dev_priv->num_fence_regs =
5343                                 I915_READ(vgtif_reg(avail_rs.fence_num));
5344
5345         /* Initialize fence registers to zero */
5346         for (i = 0; i < dev_priv->num_fence_regs; i++) {
5347                 struct drm_i915_fence_reg *fence = &dev_priv->fence_regs[i];
5348
5349                 fence->i915 = dev_priv;
5350                 fence->id = i;
5351                 list_add_tail(&fence->link, &dev_priv->mm.fence_list);
5352         }
5353         i915_gem_restore_fences(dev_priv);
5354
5355         i915_gem_detect_bit_6_swizzle(dev_priv);
5356 }
5357
5358 static void i915_gem_init__mm(struct drm_i915_private *i915)
5359 {
5360         spin_lock_init(&i915->mm.object_stat_lock);
5361         spin_lock_init(&i915->mm.obj_lock);
5362         spin_lock_init(&i915->mm.free_lock);
5363
5364         init_llist_head(&i915->mm.free_list);
5365
5366         INIT_LIST_HEAD(&i915->mm.unbound_list);
5367         INIT_LIST_HEAD(&i915->mm.bound_list);
5368         INIT_LIST_HEAD(&i915->mm.fence_list);
5369         INIT_LIST_HEAD(&i915->mm.userfault_list);
5370
5371         INIT_WORK(&i915->mm.free_work, __i915_gem_free_work);
5372 }
5373
5374 int
5375 i915_gem_load_init(struct drm_i915_private *dev_priv)
5376 {
5377         int err = -ENOMEM;
5378
5379         dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
5380         if (!dev_priv->objects)
5381                 goto err_out;
5382
5383         dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
5384         if (!dev_priv->vmas)
5385                 goto err_objects;
5386
5387         dev_priv->luts = KMEM_CACHE(i915_lut_handle, 0);
5388         if (!dev_priv->luts)
5389                 goto err_vmas;
5390
5391         dev_priv->requests = KMEM_CACHE(drm_i915_gem_request,
5392                                         SLAB_HWCACHE_ALIGN |
5393                                         SLAB_RECLAIM_ACCOUNT |
5394                                         SLAB_TYPESAFE_BY_RCU);
5395         if (!dev_priv->requests)
5396                 goto err_luts;
5397
5398         dev_priv->dependencies = KMEM_CACHE(i915_dependency,
5399                                             SLAB_HWCACHE_ALIGN |
5400                                             SLAB_RECLAIM_ACCOUNT);
5401         if (!dev_priv->dependencies)
5402                 goto err_requests;
5403
5404         dev_priv->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
5405         if (!dev_priv->priorities)
5406                 goto err_dependencies;
5407
5408         mutex_lock(&dev_priv->drm.struct_mutex);
5409         INIT_LIST_HEAD(&dev_priv->gt.timelines);
5410         err = i915_gem_timeline_init__global(dev_priv);
5411         mutex_unlock(&dev_priv->drm.struct_mutex);
5412         if (err)
5413                 goto err_priorities;
5414
5415         i915_gem_init__mm(dev_priv);
5416
5417         INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
5418                           i915_gem_retire_work_handler);
5419         INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
5420                           i915_gem_idle_work_handler);
5421         init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
5422         init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
5423
5424         atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
5425
5426         spin_lock_init(&dev_priv->fb_tracking.lock);
5427
5428         err = i915_gemfs_init(dev_priv);
5429         if (err)
5430                 DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
5431
5432         return 0;
5433
5434 err_priorities:
5435         kmem_cache_destroy(dev_priv->priorities);
5436 err_dependencies:
5437         kmem_cache_destroy(dev_priv->dependencies);
5438 err_requests:
5439         kmem_cache_destroy(dev_priv->requests);
5440 err_luts:
5441         kmem_cache_destroy(dev_priv->luts);
5442 err_vmas:
5443         kmem_cache_destroy(dev_priv->vmas);
5444 err_objects:
5445         kmem_cache_destroy(dev_priv->objects);
5446 err_out:
5447         return err;
5448 }
5449
5450 void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
5451 {
5452         i915_gem_drain_freed_objects(dev_priv);
5453         WARN_ON(!llist_empty(&dev_priv->mm.free_list));
5454         WARN_ON(dev_priv->mm.object_count);
5455
5456         mutex_lock(&dev_priv->drm.struct_mutex);
5457         i915_gem_timeline_fini(&dev_priv->gt.global_timeline);
5458         WARN_ON(!list_empty(&dev_priv->gt.timelines));
5459         mutex_unlock(&dev_priv->drm.struct_mutex);
5460
5461         kmem_cache_destroy(dev_priv->priorities);
5462         kmem_cache_destroy(dev_priv->dependencies);
5463         kmem_cache_destroy(dev_priv->requests);
5464         kmem_cache_destroy(dev_priv->luts);
5465         kmem_cache_destroy(dev_priv->vmas);
5466         kmem_cache_destroy(dev_priv->objects);
5467
5468         /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
5469         rcu_barrier();
5470
5471         i915_gemfs_fini(dev_priv);
5472 }
5473
5474 int i915_gem_freeze(struct drm_i915_private *dev_priv)
5475 {
5476         /* Discard all purgeable objects, let userspace recover those as
5477          * required after resuming.
5478          */
5479         i915_gem_shrink_all(dev_priv);
5480
5481         return 0;
5482 }
5483
5484 int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
5485 {
5486         struct drm_i915_gem_object *obj;
5487         struct list_head *phases[] = {
5488                 &dev_priv->mm.unbound_list,
5489                 &dev_priv->mm.bound_list,
5490                 NULL
5491         }, **p;
5492
5493         /* Called just before we write the hibernation image.
5494          *
5495          * We need to update the domain tracking to reflect that the CPU
5496          * will be accessing all the pages to create and restore from the
5497          * hibernation, and so upon restoration those pages will be in the
5498          * CPU domain.
5499          *
5500          * To make sure the hibernation image contains the latest state,
5501          * we update that state just before writing out the image.
5502          *
5503          * To try and reduce the hibernation image, we manually shrink
5504          * the objects as well, see i915_gem_freeze()
5505          */
5506
5507         i915_gem_shrink(dev_priv, -1UL, NULL, I915_SHRINK_UNBOUND);
5508         i915_gem_drain_freed_objects(dev_priv);
5509
5510         spin_lock(&dev_priv->mm.obj_lock);
5511         for (p = phases; *p; p++) {
5512                 list_for_each_entry(obj, *p, mm.link)
5513                         __start_cpu_write(obj);
5514         }
5515         spin_unlock(&dev_priv->mm.obj_lock);
5516
5517         return 0;
5518 }
5519
5520 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
5521 {
5522         struct drm_i915_file_private *file_priv = file->driver_priv;
5523         struct drm_i915_gem_request *request;
5524
5525         /* Clean up our request list when the client is going away, so that
5526          * later retire_requests won't dereference our soon-to-be-gone
5527          * file_priv.
5528          */
5529         spin_lock(&file_priv->mm.lock);
5530         list_for_each_entry(request, &file_priv->mm.request_list, client_link)
5531                 request->file_priv = NULL;
5532         spin_unlock(&file_priv->mm.lock);
5533 }
5534
5535 int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
5536 {
5537         struct drm_i915_file_private *file_priv;
5538         int ret;
5539
5540         DRM_DEBUG("\n");
5541
5542         file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
5543         if (!file_priv)
5544                 return -ENOMEM;
5545
5546         file->driver_priv = file_priv;
5547         file_priv->dev_priv = i915;
5548         file_priv->file = file;
5549
5550         spin_lock_init(&file_priv->mm.lock);
5551         INIT_LIST_HEAD(&file_priv->mm.request_list);
5552
5553         file_priv->bsd_engine = -1;
5554
5555         ret = i915_gem_context_open(i915, file);
5556         if (ret)
5557                 kfree(file_priv);
5558
5559         return ret;
5560 }
5561
5562 /**
5563  * i915_gem_track_fb - update frontbuffer tracking
5564  * @old: current GEM buffer for the frontbuffer slots
5565  * @new: new GEM buffer for the frontbuffer slots
5566  * @frontbuffer_bits: bitmask of frontbuffer slots
5567  *
5568  * This updates the frontbuffer tracking bits @frontbuffer_bits by clearing them
5569  * from @old and setting them in @new. Both @old and @new can be NULL.
5570  */
5571 void i915_gem_track_fb(struct drm_i915_gem_object *old,
5572                        struct drm_i915_gem_object *new,
5573                        unsigned frontbuffer_bits)
5574 {
5575         /* Control of individual bits within the mask are guarded by
5576          * the owning plane->mutex, i.e. we can never see concurrent
5577          * manipulation of individual bits. But since the bitfield as a whole
5578          * is updated using RMW, we need to use atomics in order to update
5579          * the bits.
5580          */
5581         BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
5582                      sizeof(atomic_t) * BITS_PER_BYTE);
5583
5584         if (old) {
5585                 WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));
5586                 atomic_andnot(frontbuffer_bits, &old->frontbuffer_bits);
5587         }
5588
5589         if (new) {
5590                 WARN_ON(atomic_read(&new->frontbuffer_bits) & frontbuffer_bits);
5591                 atomic_or(frontbuffer_bits, &new->frontbuffer_bits);
5592         }
5593 }
5594
5595 /* Allocate a new GEM object and fill it with the supplied data */
5596 struct drm_i915_gem_object *
5597 i915_gem_object_create_from_data(struct drm_i915_private *dev_priv,
5598                                  const void *data, size_t size)
5599 {
5600         struct drm_i915_gem_object *obj;
5601         struct file *file;
5602         size_t offset;
5603         int err;
5604
5605         obj = i915_gem_object_create(dev_priv, round_up(size, PAGE_SIZE));
5606         if (IS_ERR(obj))
5607                 return obj;
5608
5609         GEM_BUG_ON(obj->base.write_domain != I915_GEM_DOMAIN_CPU);
5610
5611         file = obj->base.filp;
5612         offset = 0;
5613         do {
5614                 unsigned int len = min_t(typeof(size), size, PAGE_SIZE);
5615                 struct page *page;
5616                 void *pgdata, *vaddr;
5617
5618                 err = pagecache_write_begin(file, file->f_mapping,
5619                                             offset, len, 0,
5620                                             &page, &pgdata);
5621                 if (err < 0)
5622                         goto fail;
5623
5624                 vaddr = kmap(page);
5625                 memcpy(vaddr, data, len);
5626                 kunmap(page);
5627
5628                 err = pagecache_write_end(file, file->f_mapping,
5629                                           offset, len, len,
5630                                           page, pgdata);
5631                 if (err < 0)
5632                         goto fail;
5633
5634                 size -= len;
5635                 data += len;
5636                 offset += len;
5637         } while (size);
5638
5639         return obj;
5640
5641 fail:
5642         i915_gem_object_put(obj);
5643         return ERR_PTR(err);
5644 }
5645
5646 struct scatterlist *
5647 i915_gem_object_get_sg(struct drm_i915_gem_object *obj,
5648                        unsigned int n,
5649                        unsigned int *offset)
5650 {
5651         struct i915_gem_object_page_iter *iter = &obj->mm.get_page;
5652         struct scatterlist *sg;
5653         unsigned int idx, count;
5654
5655         might_sleep();
5656         GEM_BUG_ON(n >= obj->base.size >> PAGE_SHIFT);
5657         GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
5658
5659         /* As we iterate forward through the sg, we record each entry in a
5660          * radixtree for quick repeated (backwards) lookups. If we have seen
5661          * this index previously, we will have an entry for it.
5662          *
5663          * Initial lookup is O(N), but this is amortized to O(1) for
5664          * sequential page access (where each new request is consecutive
5665          * to the previous one). Repeated lookups are O(lg(obj->base.size)),
5666          * i.e. O(1) with a large constant!
5667          */
5668         if (n < READ_ONCE(iter->sg_idx))
5669                 goto lookup;
5670
5671         mutex_lock(&iter->lock);
5672
5673         /* We prefer to reuse the last sg so that repeated lookup of this
5674          * (or the subsequent) sg are fast - comparing against the last
5675          * sg is faster than going through the radixtree.
5676          */
5677
5678         sg = iter->sg_pos;
5679         idx = iter->sg_idx;
5680         count = __sg_page_count(sg);
5681
5682         while (idx + count <= n) {
5683                 unsigned long exception, i;
5684                 int ret;
5685
5686                 /* If we cannot allocate and insert this entry, or the
5687                  * individual pages from this range, cancel updating the
5688                  * sg_idx so that on this lookup we are forced to linearly
5689                  * scan onwards, but on future lookups we will try the
5690                  * insertion again (in which case we need to be careful of
5691                  * the error return reporting that we have already inserted
5692                  * this index).
5693                  */
5694                 ret = radix_tree_insert(&iter->radix, idx, sg);
5695                 if (ret && ret != -EEXIST)
5696                         goto scan;
5697
5698                 exception =
5699                         RADIX_TREE_EXCEPTIONAL_ENTRY |
5700                         idx << RADIX_TREE_EXCEPTIONAL_SHIFT;
5701                 for (i = 1; i < count; i++) {
5702                         ret = radix_tree_insert(&iter->radix, idx + i,
5703                                                 (void *)exception);
5704                         if (ret && ret != -EEXIST)
5705                                 goto scan;
5706                 }
5707
5708                 idx += count;
5709                 sg = ____sg_next(sg);
5710                 count = __sg_page_count(sg);
5711         }
5712
5713 scan:
5714         iter->sg_pos = sg;
5715         iter->sg_idx = idx;
5716
5717         mutex_unlock(&iter->lock);
5718
5719         if (unlikely(n < idx)) /* insertion completed by another thread */
5720                 goto lookup;
5721
5722         /* In case we failed to insert the entry into the radixtree, we need
5723          * to look beyond the current sg.
5724          */
5725         while (idx + count <= n) {
5726                 idx += count;
5727                 sg = ____sg_next(sg);
5728                 count = __sg_page_count(sg);
5729         }
5730
5731         *offset = n - idx;
5732         return sg;
5733
5734 lookup:
5735         rcu_read_lock();
5736
5737         sg = radix_tree_lookup(&iter->radix, n);
5738         GEM_BUG_ON(!sg);
5739
5740         /* If this index is in the middle of multi-page sg entry,
5741          * the radixtree will contain an exceptional entry that points
5742          * to the start of that range. We will return the pointer to
5743          * the base page and the offset of this page within the
5744          * sg entry's range.
5745          */
5746         *offset = 0;
5747         if (unlikely(radix_tree_exception(sg))) {
5748                 unsigned long base =
5749                         (unsigned long)sg >> RADIX_TREE_EXCEPTIONAL_SHIFT;
5750
5751                 sg = radix_tree_lookup(&iter->radix, base);
5752                 GEM_BUG_ON(!sg);
5753
5754                 *offset = n - base;
5755         }
5756
5757         rcu_read_unlock();
5758
5759         return sg;
5760 }
5761
5762 struct page *
5763 i915_gem_object_get_page(struct drm_i915_gem_object *obj, unsigned int n)
5764 {
5765         struct scatterlist *sg;
5766         unsigned int offset;
5767
5768         GEM_BUG_ON(!i915_gem_object_has_struct_page(obj));
5769
5770         sg = i915_gem_object_get_sg(obj, n, &offset);
5771         return nth_page(sg_page(sg), offset);
5772 }
5773
5774 /* Like i915_gem_object_get_page(), but mark the returned page dirty */
5775 struct page *
5776 i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj,
5777                                unsigned int n)
5778 {
5779         struct page *page;
5780
5781         page = i915_gem_object_get_page(obj, n);
5782         if (!obj->mm.dirty)
5783                 set_page_dirty(page);
5784
5785         return page;
5786 }
5787
5788 dma_addr_t
5789 i915_gem_object_get_dma_address(struct drm_i915_gem_object *obj,
5790                                 unsigned long n)
5791 {
5792         struct scatterlist *sg;
5793         unsigned int offset;
5794
5795         sg = i915_gem_object_get_sg(obj, n, &offset);
5796         return sg_dma_address(sg) + (offset << PAGE_SHIFT);
5797 }
5798
5799 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
5800 {
5801         struct sg_table *pages;
5802         int err;
5803
5804         if (align > obj->base.size)
5805                 return -EINVAL;
5806
5807         if (obj->ops == &i915_gem_phys_ops)
5808                 return 0;
5809
5810         if (obj->ops != &i915_gem_object_ops)
5811                 return -EINVAL;
5812
5813         err = i915_gem_object_unbind(obj);
5814         if (err)
5815                 return err;
5816
5817         mutex_lock(&obj->mm.lock);
5818
5819         if (obj->mm.madv != I915_MADV_WILLNEED) {
5820                 err = -EFAULT;
5821                 goto err_unlock;
5822         }
5823
5824         if (obj->mm.quirked) {
5825                 err = -EFAULT;
5826                 goto err_unlock;
5827         }
5828
5829         if (obj->mm.mapping) {
5830                 err = -EBUSY;
5831                 goto err_unlock;
5832         }
5833
5834         pages = fetch_and_zero(&obj->mm.pages);
5835         if (pages) {
5836                 struct drm_i915_private *i915 = to_i915(obj->base.dev);
5837
5838                 __i915_gem_object_reset_page_iter(obj);
5839
5840                 spin_lock(&i915->mm.obj_lock);
5841                 list_del(&obj->mm.link);
5842                 spin_unlock(&i915->mm.obj_lock);
5843         }
5844
5845         obj->ops = &i915_gem_phys_ops;
5846
5847         err = ____i915_gem_object_get_pages(obj);
5848         if (err)
5849                 goto err_xfer;
5850
5851         /* Perma-pin (until release) the physical set of pages */
5852         __i915_gem_object_pin_pages(obj);
5853
5854         if (!IS_ERR_OR_NULL(pages))
5855                 i915_gem_object_ops.put_pages(obj, pages);
5856         mutex_unlock(&obj->mm.lock);
5857         return 0;
5858
5859 err_xfer:
5860         obj->ops = &i915_gem_object_ops;
5861         obj->mm.pages = pages;
5862 err_unlock:
5863         mutex_unlock(&obj->mm.lock);
5864         return err;
5865 }
5866
5867 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
5868 #include "selftests/scatterlist.c"
5869 #include "selftests/mock_gem_device.c"
5870 #include "selftests/huge_gem_object.c"
5871 #include "selftests/huge_pages.c"
5872 #include "selftests/i915_gem_object.c"
5873 #include "selftests/i915_gem_coherency.c"
5874 #endif