d70ee0df5c13a445b8a50ff71b371d975ff9ab43
[sfrench/cifs-2.6.git] / drivers / gpu / drm / vmwgfx / vmwgfx_resource.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
3  *
4  * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27
28 #include "vmwgfx_drv.h"
29 #include <drm/vmwgfx_drm.h>
30 #include <drm/ttm/ttm_placement.h>
31 #include <drm/drmP.h>
32 #include "vmwgfx_resource_priv.h"
33 #include "vmwgfx_binding.h"
34
35 #define VMW_RES_EVICT_ERR_COUNT 10
36
37 /**
38  * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
39  * @res: The resource
40  */
41 void vmw_resource_mob_attach(struct vmw_resource *res)
42 {
43         struct vmw_buffer_object *backup = res->backup;
44         struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL;
45
46         lockdep_assert_held(&backup->base.resv->lock.base);
47         res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
48                 res->func->prio;
49
50         while (*new) {
51                 struct vmw_resource *this =
52                         container_of(*new, struct vmw_resource, mob_node);
53
54                 parent = *new;
55                 new = (res->backup_offset < this->backup_offset) ?
56                         &((*new)->rb_left) : &((*new)->rb_right);
57         }
58
59         rb_link_node(&res->mob_node, parent, new);
60         rb_insert_color(&res->mob_node, &backup->res_tree);
61
62         vmw_bo_prio_add(backup, res->used_prio);
63 }
64
65 /**
66  * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
67  * @res: The resource
68  */
69 void vmw_resource_mob_detach(struct vmw_resource *res)
70 {
71         struct vmw_buffer_object *backup = res->backup;
72
73         lockdep_assert_held(&backup->base.resv->lock.base);
74         if (vmw_resource_mob_attached(res)) {
75                 rb_erase(&res->mob_node, &backup->res_tree);
76                 RB_CLEAR_NODE(&res->mob_node);
77                 vmw_bo_prio_del(backup, res->used_prio);
78         }
79 }
80
81
82 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
83 {
84         kref_get(&res->kref);
85         return res;
86 }
87
88 struct vmw_resource *
89 vmw_resource_reference_unless_doomed(struct vmw_resource *res)
90 {
91         return kref_get_unless_zero(&res->kref) ? res : NULL;
92 }
93
94 /**
95  * vmw_resource_release_id - release a resource id to the id manager.
96  *
97  * @res: Pointer to the resource.
98  *
99  * Release the resource id to the resource id manager and set it to -1
100  */
101 void vmw_resource_release_id(struct vmw_resource *res)
102 {
103         struct vmw_private *dev_priv = res->dev_priv;
104         struct idr *idr = &dev_priv->res_idr[res->func->res_type];
105
106         spin_lock(&dev_priv->resource_lock);
107         if (res->id != -1)
108                 idr_remove(idr, res->id);
109         res->id = -1;
110         spin_unlock(&dev_priv->resource_lock);
111 }
112
113 static void vmw_resource_release(struct kref *kref)
114 {
115         struct vmw_resource *res =
116             container_of(kref, struct vmw_resource, kref);
117         struct vmw_private *dev_priv = res->dev_priv;
118         int id;
119         struct idr *idr = &dev_priv->res_idr[res->func->res_type];
120
121         spin_lock(&dev_priv->resource_lock);
122         list_del_init(&res->lru_head);
123         spin_unlock(&dev_priv->resource_lock);
124         if (res->backup) {
125                 struct ttm_buffer_object *bo = &res->backup->base;
126
127                 ttm_bo_reserve(bo, false, false, NULL);
128                 if (vmw_resource_mob_attached(res) &&
129                     res->func->unbind != NULL) {
130                         struct ttm_validate_buffer val_buf;
131
132                         val_buf.bo = bo;
133                         val_buf.num_shared = 0;
134                         res->func->unbind(res, false, &val_buf);
135                 }
136                 res->backup_dirty = false;
137                 vmw_resource_mob_detach(res);
138                 if (res->dirty)
139                         res->func->dirty_free(res);
140                 if (res->coherent)
141                         vmw_bo_dirty_release(res->backup);
142                 ttm_bo_unreserve(bo);
143                 vmw_bo_unreference(&res->backup);
144         }
145
146         if (likely(res->hw_destroy != NULL)) {
147                 mutex_lock(&dev_priv->binding_mutex);
148                 vmw_binding_res_list_kill(&res->binding_head);
149                 mutex_unlock(&dev_priv->binding_mutex);
150                 res->hw_destroy(res);
151         }
152
153         id = res->id;
154         if (res->res_free != NULL)
155                 res->res_free(res);
156         else
157                 kfree(res);
158
159         spin_lock(&dev_priv->resource_lock);
160         if (id != -1)
161                 idr_remove(idr, id);
162         spin_unlock(&dev_priv->resource_lock);
163 }
164
165 void vmw_resource_unreference(struct vmw_resource **p_res)
166 {
167         struct vmw_resource *res = *p_res;
168
169         *p_res = NULL;
170         kref_put(&res->kref, vmw_resource_release);
171 }
172
173
174 /**
175  * vmw_resource_alloc_id - release a resource id to the id manager.
176  *
177  * @res: Pointer to the resource.
178  *
179  * Allocate the lowest free resource from the resource manager, and set
180  * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
181  */
182 int vmw_resource_alloc_id(struct vmw_resource *res)
183 {
184         struct vmw_private *dev_priv = res->dev_priv;
185         int ret;
186         struct idr *idr = &dev_priv->res_idr[res->func->res_type];
187
188         BUG_ON(res->id != -1);
189
190         idr_preload(GFP_KERNEL);
191         spin_lock(&dev_priv->resource_lock);
192
193         ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
194         if (ret >= 0)
195                 res->id = ret;
196
197         spin_unlock(&dev_priv->resource_lock);
198         idr_preload_end();
199         return ret < 0 ? ret : 0;
200 }
201
202 /**
203  * vmw_resource_init - initialize a struct vmw_resource
204  *
205  * @dev_priv:       Pointer to a device private struct.
206  * @res:            The struct vmw_resource to initialize.
207  * @obj_type:       Resource object type.
208  * @delay_id:       Boolean whether to defer device id allocation until
209  *                  the first validation.
210  * @res_free:       Resource destructor.
211  * @func:           Resource function table.
212  */
213 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
214                       bool delay_id,
215                       void (*res_free) (struct vmw_resource *res),
216                       const struct vmw_res_func *func)
217 {
218         kref_init(&res->kref);
219         res->hw_destroy = NULL;
220         res->res_free = res_free;
221         res->dev_priv = dev_priv;
222         res->func = func;
223         RB_CLEAR_NODE(&res->mob_node);
224         INIT_LIST_HEAD(&res->lru_head);
225         INIT_LIST_HEAD(&res->binding_head);
226         res->id = -1;
227         res->backup = NULL;
228         res->backup_offset = 0;
229         res->backup_dirty = false;
230         res->res_dirty = false;
231         res->coherent = false;
232         res->used_prio = 3;
233         res->dirty = NULL;
234         if (delay_id)
235                 return 0;
236         else
237                 return vmw_resource_alloc_id(res);
238 }
239
240
241 /**
242  * vmw_user_resource_lookup_handle - lookup a struct resource from a
243  * TTM user-space handle and perform basic type checks
244  *
245  * @dev_priv:     Pointer to a device private struct
246  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
247  * @handle:       The TTM user-space handle
248  * @converter:    Pointer to an object describing the resource type
249  * @p_res:        On successful return the location pointed to will contain
250  *                a pointer to a refcounted struct vmw_resource.
251  *
252  * If the handle can't be found or is associated with an incorrect resource
253  * type, -EINVAL will be returned.
254  */
255 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
256                                     struct ttm_object_file *tfile,
257                                     uint32_t handle,
258                                     const struct vmw_user_resource_conv
259                                     *converter,
260                                     struct vmw_resource **p_res)
261 {
262         struct ttm_base_object *base;
263         struct vmw_resource *res;
264         int ret = -EINVAL;
265
266         base = ttm_base_object_lookup(tfile, handle);
267         if (unlikely(base == NULL))
268                 return -EINVAL;
269
270         if (unlikely(ttm_base_object_type(base) != converter->object_type))
271                 goto out_bad_resource;
272
273         res = converter->base_obj_to_res(base);
274         kref_get(&res->kref);
275
276         *p_res = res;
277         ret = 0;
278
279 out_bad_resource:
280         ttm_base_object_unref(&base);
281
282         return ret;
283 }
284
285 /**
286  * vmw_user_resource_lookup_handle - lookup a struct resource from a
287  * TTM user-space handle and perform basic type checks
288  *
289  * @dev_priv:     Pointer to a device private struct
290  * @tfile:        Pointer to a struct ttm_object_file identifying the caller
291  * @handle:       The TTM user-space handle
292  * @converter:    Pointer to an object describing the resource type
293  * @p_res:        On successful return the location pointed to will contain
294  *                a pointer to a refcounted struct vmw_resource.
295  *
296  * If the handle can't be found or is associated with an incorrect resource
297  * type, -EINVAL will be returned.
298  */
299 struct vmw_resource *
300 vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
301                                       struct ttm_object_file *tfile,
302                                       uint32_t handle,
303                                       const struct vmw_user_resource_conv
304                                       *converter)
305 {
306         struct ttm_base_object *base;
307
308         base = ttm_base_object_noref_lookup(tfile, handle);
309         if (!base)
310                 return ERR_PTR(-ESRCH);
311
312         if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
313                 ttm_base_object_noref_release();
314                 return ERR_PTR(-EINVAL);
315         }
316
317         return converter->base_obj_to_res(base);
318 }
319
320 /**
321  * Helper function that looks either a surface or bo.
322  *
323  * The pointer this pointed at by out_surf and out_buf needs to be null.
324  */
325 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
326                            struct ttm_object_file *tfile,
327                            uint32_t handle,
328                            struct vmw_surface **out_surf,
329                            struct vmw_buffer_object **out_buf)
330 {
331         struct vmw_resource *res;
332         int ret;
333
334         BUG_ON(*out_surf || *out_buf);
335
336         ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
337                                               user_surface_converter,
338                                               &res);
339         if (!ret) {
340                 *out_surf = vmw_res_to_srf(res);
341                 return 0;
342         }
343
344         *out_surf = NULL;
345         ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
346         return ret;
347 }
348
349 /**
350  * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
351  *
352  * @res:            The resource for which to allocate a backup buffer.
353  * @interruptible:  Whether any sleeps during allocation should be
354  *                  performed while interruptible.
355  */
356 static int vmw_resource_buf_alloc(struct vmw_resource *res,
357                                   bool interruptible)
358 {
359         unsigned long size =
360                 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
361         struct vmw_buffer_object *backup;
362         int ret;
363
364         if (likely(res->backup)) {
365                 BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
366                 return 0;
367         }
368
369         backup = kzalloc(sizeof(*backup), GFP_KERNEL);
370         if (unlikely(!backup))
371                 return -ENOMEM;
372
373         ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
374                               res->func->backup_placement,
375                               interruptible,
376                               &vmw_bo_bo_free);
377         if (unlikely(ret != 0))
378                 goto out_no_bo;
379
380         res->backup = backup;
381
382 out_no_bo:
383         return ret;
384 }
385
386 /**
387  * vmw_resource_do_validate - Make a resource up-to-date and visible
388  *                            to the device.
389  *
390  * @res:            The resource to make visible to the device.
391  * @val_buf:        Information about a buffer possibly
392  *                  containing backup data if a bind operation is needed.
393  *
394  * On hardware resource shortage, this function returns -EBUSY and
395  * should be retried once resources have been freed up.
396  */
397 static int vmw_resource_do_validate(struct vmw_resource *res,
398                                     struct ttm_validate_buffer *val_buf,
399                                     bool dirtying)
400 {
401         int ret = 0;
402         const struct vmw_res_func *func = res->func;
403
404         if (unlikely(res->id == -1)) {
405                 ret = func->create(res);
406                 if (unlikely(ret != 0))
407                         return ret;
408         }
409
410         if (func->bind &&
411             ((func->needs_backup && !vmw_resource_mob_attached(res) &&
412               val_buf->bo != NULL) ||
413              (!func->needs_backup && val_buf->bo != NULL))) {
414                 ret = func->bind(res, val_buf);
415                 if (unlikely(ret != 0))
416                         goto out_bind_failed;
417                 if (func->needs_backup)
418                         vmw_resource_mob_attach(res);
419         }
420
421         /*
422          * Handle the case where the backup mob is marked coherent but
423          * the resource isn't.
424          */
425         if (func->dirty_alloc && vmw_resource_mob_attached(res) &&
426             !res->coherent) {
427                 if (res->backup->dirty && !res->dirty) {
428                         ret = func->dirty_alloc(res);
429                         if (ret)
430                                 return ret;
431                 } else if (!res->backup->dirty && res->dirty) {
432                         func->dirty_free(res);
433                 }
434         }
435
436         /*
437          * Transfer the dirty regions to the resource and update
438          * the resource.
439          */
440         if (res->dirty) {
441                 if (dirtying && !res->res_dirty) {
442                         pgoff_t start = res->backup_offset >> PAGE_SHIFT;
443                         pgoff_t end = __KERNEL_DIV_ROUND_UP
444                                 (res->backup_offset + res->backup_size,
445                                  PAGE_SIZE);
446
447                         vmw_bo_dirty_unmap(res->backup, start, end);
448                 }
449
450                 vmw_bo_dirty_transfer_to_res(res);
451                 return func->dirty_sync(res);
452         }
453
454         return 0;
455
456 out_bind_failed:
457         func->destroy(res);
458
459         return ret;
460 }
461
462 /**
463  * vmw_resource_unreserve - Unreserve a resource previously reserved for
464  * command submission.
465  *
466  * @res:               Pointer to the struct vmw_resource to unreserve.
467  * @dirty_set:         Change dirty status of the resource.
468  * @dirty:             When changing dirty status indicates the new status.
469  * @switch_backup:     Backup buffer has been switched.
470  * @new_backup:        Pointer to new backup buffer if command submission
471  *                     switched. May be NULL.
472  * @new_backup_offset: New backup offset if @switch_backup is true.
473  *
474  * Currently unreserving a resource means putting it back on the device's
475  * resource lru list, so that it can be evicted if necessary.
476  */
477 void vmw_resource_unreserve(struct vmw_resource *res,
478                             bool dirty_set,
479                             bool dirty,
480                             bool switch_backup,
481                             struct vmw_buffer_object *new_backup,
482                             unsigned long new_backup_offset)
483 {
484         struct vmw_private *dev_priv = res->dev_priv;
485
486         if (!list_empty(&res->lru_head))
487                 return;
488
489         if (switch_backup && new_backup != res->backup) {
490                 if (res->backup) {
491                         vmw_resource_mob_detach(res);
492                         if (res->coherent)
493                                 vmw_bo_dirty_release(res->backup);
494                         vmw_bo_unreference(&res->backup);
495                 }
496
497                 if (new_backup) {
498                         res->backup = vmw_bo_reference(new_backup);
499
500                         /*
501                          * The validation code should already have added a
502                          * dirty tracker here.
503                          */
504                         WARN_ON(res->coherent && !new_backup->dirty);
505
506                         vmw_resource_mob_attach(res);
507                 } else {
508                         res->backup = NULL;
509                 }
510         } else if (switch_backup && res->coherent) {
511                 vmw_bo_dirty_release(res->backup);
512         }
513
514         if (switch_backup)
515                 res->backup_offset = new_backup_offset;
516
517         if (dirty_set)
518                 res->res_dirty = dirty;
519
520         if (!res->func->may_evict || res->id == -1 || res->pin_count)
521                 return;
522
523         spin_lock(&dev_priv->resource_lock);
524         list_add_tail(&res->lru_head,
525                       &res->dev_priv->res_lru[res->func->res_type]);
526         spin_unlock(&dev_priv->resource_lock);
527 }
528
529 /**
530  * vmw_resource_check_buffer - Check whether a backup buffer is needed
531  *                             for a resource and in that case, allocate
532  *                             one, reserve and validate it.
533  *
534  * @ticket:         The ww aqcquire context to use, or NULL if trylocking.
535  * @res:            The resource for which to allocate a backup buffer.
536  * @interruptible:  Whether any sleeps during allocation should be
537  *                  performed while interruptible.
538  * @val_buf:        On successful return contains data about the
539  *                  reserved and validated backup buffer.
540  */
541 static int
542 vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
543                           struct vmw_resource *res,
544                           bool interruptible,
545                           struct ttm_validate_buffer *val_buf)
546 {
547         struct ttm_operation_ctx ctx = { true, false };
548         struct list_head val_list;
549         bool backup_dirty = false;
550         int ret;
551
552         if (unlikely(res->backup == NULL)) {
553                 ret = vmw_resource_buf_alloc(res, interruptible);
554                 if (unlikely(ret != 0))
555                         return ret;
556         }
557
558         INIT_LIST_HEAD(&val_list);
559         ttm_bo_get(&res->backup->base);
560         val_buf->bo = &res->backup->base;
561         val_buf->num_shared = 0;
562         list_add_tail(&val_buf->head, &val_list);
563         ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL,
564                                      true);
565         if (unlikely(ret != 0))
566                 goto out_no_reserve;
567
568         if (res->func->needs_backup && !vmw_resource_mob_attached(res))
569                 return 0;
570
571         backup_dirty = res->backup_dirty;
572         ret = ttm_bo_validate(&res->backup->base,
573                               res->func->backup_placement,
574                               &ctx);
575
576         if (unlikely(ret != 0))
577                 goto out_no_validate;
578
579         return 0;
580
581 out_no_validate:
582         ttm_eu_backoff_reservation(ticket, &val_list);
583 out_no_reserve:
584         ttm_bo_put(val_buf->bo);
585         val_buf->bo = NULL;
586         if (backup_dirty)
587                 vmw_bo_unreference(&res->backup);
588
589         return ret;
590 }
591
592 /**
593  * vmw_resource_reserve - Reserve a resource for command submission
594  *
595  * @res:            The resource to reserve.
596  *
597  * This function takes the resource off the LRU list and make sure
598  * a backup buffer is present for guest-backed resources. However,
599  * the buffer may not be bound to the resource at this point.
600  *
601  */
602 int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
603                          bool no_backup)
604 {
605         struct vmw_private *dev_priv = res->dev_priv;
606         int ret;
607
608         spin_lock(&dev_priv->resource_lock);
609         list_del_init(&res->lru_head);
610         spin_unlock(&dev_priv->resource_lock);
611
612         if (res->func->needs_backup && res->backup == NULL &&
613             !no_backup) {
614                 ret = vmw_resource_buf_alloc(res, interruptible);
615                 if (unlikely(ret != 0)) {
616                         DRM_ERROR("Failed to allocate a backup buffer "
617                                   "of size %lu. bytes\n",
618                                   (unsigned long) res->backup_size);
619                         return ret;
620                 }
621         }
622
623         return 0;
624 }
625
626 /**
627  * vmw_resource_backoff_reservation - Unreserve and unreference a
628  *                                    backup buffer
629  *.
630  * @ticket:         The ww acquire ctx used for reservation.
631  * @val_buf:        Backup buffer information.
632  */
633 static void
634 vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
635                                  struct ttm_validate_buffer *val_buf)
636 {
637         struct list_head val_list;
638
639         if (likely(val_buf->bo == NULL))
640                 return;
641
642         INIT_LIST_HEAD(&val_list);
643         list_add_tail(&val_buf->head, &val_list);
644         ttm_eu_backoff_reservation(ticket, &val_list);
645         ttm_bo_put(val_buf->bo);
646         val_buf->bo = NULL;
647 }
648
649 /**
650  * vmw_resource_do_evict - Evict a resource, and transfer its data
651  *                         to a backup buffer.
652  *
653  * @ticket:         The ww acquire ticket to use, or NULL if trylocking.
654  * @res:            The resource to evict.
655  * @interruptible:  Whether to wait interruptible.
656  */
657 static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
658                                  struct vmw_resource *res, bool interruptible)
659 {
660         struct ttm_validate_buffer val_buf;
661         const struct vmw_res_func *func = res->func;
662         int ret;
663
664         BUG_ON(!func->may_evict);
665
666         val_buf.bo = NULL;
667         val_buf.num_shared = 0;
668         ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
669         if (unlikely(ret != 0))
670                 return ret;
671
672         if (unlikely(func->unbind != NULL &&
673                      (!func->needs_backup || vmw_resource_mob_attached(res)))) {
674                 ret = func->unbind(res, res->res_dirty, &val_buf);
675                 if (unlikely(ret != 0))
676                         goto out_no_unbind;
677                 vmw_resource_mob_detach(res);
678         }
679         ret = func->destroy(res);
680         res->backup_dirty = true;
681         res->res_dirty = false;
682 out_no_unbind:
683         vmw_resource_backoff_reservation(ticket, &val_buf);
684
685         return ret;
686 }
687
688
689 /**
690  * vmw_resource_validate - Make a resource up-to-date and visible
691  *                         to the device.
692  * @res: The resource to make visible to the device.
693  * @intr: Perform waits interruptible if possible.
694  * @dirtying: Pending GPU operation will dirty the resource
695  *
696  * On succesful return, any backup DMA buffer pointed to by @res->backup will
697  * be reserved and validated.
698  * On hardware resource shortage, this function will repeatedly evict
699  * resources of the same type until the validation succeeds.
700  *
701  * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
702  * on failure.
703  */
704 int vmw_resource_validate(struct vmw_resource *res, bool intr,
705                           bool dirtying)
706 {
707         int ret;
708         struct vmw_resource *evict_res;
709         struct vmw_private *dev_priv = res->dev_priv;
710         struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
711         struct ttm_validate_buffer val_buf;
712         unsigned err_count = 0;
713
714         if (!res->func->create)
715                 return 0;
716
717         val_buf.bo = NULL;
718         val_buf.num_shared = 0;
719         if (res->backup)
720                 val_buf.bo = &res->backup->base;
721         do {
722                 ret = vmw_resource_do_validate(res, &val_buf, dirtying);
723                 if (likely(ret != -EBUSY))
724                         break;
725
726                 spin_lock(&dev_priv->resource_lock);
727                 if (list_empty(lru_list) || !res->func->may_evict) {
728                         DRM_ERROR("Out of device device resources "
729                                   "for %s.\n", res->func->type_name);
730                         ret = -EBUSY;
731                         spin_unlock(&dev_priv->resource_lock);
732                         break;
733                 }
734
735                 evict_res = vmw_resource_reference
736                         (list_first_entry(lru_list, struct vmw_resource,
737                                           lru_head));
738                 list_del_init(&evict_res->lru_head);
739
740                 spin_unlock(&dev_priv->resource_lock);
741
742                 /* Trylock backup buffers with a NULL ticket. */
743                 ret = vmw_resource_do_evict(NULL, evict_res, intr);
744                 if (unlikely(ret != 0)) {
745                         spin_lock(&dev_priv->resource_lock);
746                         list_add_tail(&evict_res->lru_head, lru_list);
747                         spin_unlock(&dev_priv->resource_lock);
748                         if (ret == -ERESTARTSYS ||
749                             ++err_count > VMW_RES_EVICT_ERR_COUNT) {
750                                 vmw_resource_unreference(&evict_res);
751                                 goto out_no_validate;
752                         }
753                 }
754
755                 vmw_resource_unreference(&evict_res);
756         } while (1);
757
758         if (unlikely(ret != 0))
759                 goto out_no_validate;
760         else if (!res->func->needs_backup && res->backup) {
761                 WARN_ON_ONCE(vmw_resource_mob_attached(res));
762                 vmw_bo_unreference(&res->backup);
763         }
764
765         return 0;
766
767 out_no_validate:
768         return ret;
769 }
770
771
772 /**
773  * vmw_resource_unbind_list
774  *
775  * @vbo: Pointer to the current backing MOB.
776  *
777  * Evicts the Guest Backed hardware resource if the backup
778  * buffer is being moved out of MOB memory.
779  * Note that this function will not race with the resource
780  * validation code, since resource validation and eviction
781  * both require the backup buffer to be reserved.
782  */
783 void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
784 {
785         struct ttm_validate_buffer val_buf = {
786                 .bo = &vbo->base,
787                 .num_shared = 0
788         };
789
790         lockdep_assert_held(&vbo->base.resv->lock.base);
791         while (!RB_EMPTY_ROOT(&vbo->res_tree)) {
792                 struct rb_node *node = vbo->res_tree.rb_node;
793                 struct vmw_resource *res =
794                         container_of(node, struct vmw_resource, mob_node);
795
796                 if (!WARN_ON_ONCE(!res->func->unbind))
797                         (void) res->func->unbind(res, res->res_dirty, &val_buf);
798
799                 res->backup_dirty = true;
800                 res->res_dirty = false;
801                 vmw_resource_mob_detach(res);
802         }
803
804         (void) ttm_bo_wait(&vbo->base, false, false);
805 }
806
807
808 /**
809  * vmw_query_readback_all - Read back cached query states
810  *
811  * @dx_query_mob: Buffer containing the DX query MOB
812  *
813  * Read back cached states from the device if they exist.  This function
814  * assumings binding_mutex is held.
815  */
816 int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
817 {
818         struct vmw_resource *dx_query_ctx;
819         struct vmw_private *dev_priv;
820         struct {
821                 SVGA3dCmdHeader header;
822                 SVGA3dCmdDXReadbackAllQuery body;
823         } *cmd;
824
825
826         /* No query bound, so do nothing */
827         if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
828                 return 0;
829
830         dx_query_ctx = dx_query_mob->dx_query_ctx;
831         dev_priv     = dx_query_ctx->dev_priv;
832
833         cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), dx_query_ctx->id);
834         if (unlikely(cmd == NULL))
835                 return -ENOMEM;
836
837         cmd->header.id   = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
838         cmd->header.size = sizeof(cmd->body);
839         cmd->body.cid    = dx_query_ctx->id;
840
841         vmw_fifo_commit(dev_priv, sizeof(*cmd));
842
843         /* Triggers a rebind the next time affected context is bound */
844         dx_query_mob->dx_query_ctx = NULL;
845
846         return 0;
847 }
848
849
850
851 /**
852  * vmw_query_move_notify - Read back cached query states
853  *
854  * @bo: The TTM buffer object about to move.
855  * @mem: The memory region @bo is moving to.
856  *
857  * Called before the query MOB is swapped out to read back cached query
858  * states from the device.
859  */
860 void vmw_query_move_notify(struct ttm_buffer_object *bo,
861                            struct ttm_mem_reg *mem)
862 {
863         struct vmw_buffer_object *dx_query_mob;
864         struct ttm_bo_device *bdev = bo->bdev;
865         struct vmw_private *dev_priv;
866
867
868         dev_priv = container_of(bdev, struct vmw_private, bdev);
869
870         mutex_lock(&dev_priv->binding_mutex);
871
872         dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
873         if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
874                 mutex_unlock(&dev_priv->binding_mutex);
875                 return;
876         }
877
878         /* If BO is being moved from MOB to system memory */
879         if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
880                 struct vmw_fence_obj *fence;
881
882                 (void) vmw_query_readback_all(dx_query_mob);
883                 mutex_unlock(&dev_priv->binding_mutex);
884
885                 /* Create a fence and attach the BO to it */
886                 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
887                 vmw_bo_fence_single(bo, fence);
888
889                 if (fence != NULL)
890                         vmw_fence_obj_unreference(&fence);
891
892                 (void) ttm_bo_wait(bo, false, false);
893         } else
894                 mutex_unlock(&dev_priv->binding_mutex);
895
896 }
897
898 /**
899  * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
900  *
901  * @res:            The resource being queried.
902  */
903 bool vmw_resource_needs_backup(const struct vmw_resource *res)
904 {
905         return res->func->needs_backup;
906 }
907
908 /**
909  * vmw_resource_evict_type - Evict all resources of a specific type
910  *
911  * @dev_priv:       Pointer to a device private struct
912  * @type:           The resource type to evict
913  *
914  * To avoid thrashing starvation or as part of the hibernation sequence,
915  * try to evict all evictable resources of a specific type.
916  */
917 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
918                                     enum vmw_res_type type)
919 {
920         struct list_head *lru_list = &dev_priv->res_lru[type];
921         struct vmw_resource *evict_res;
922         unsigned err_count = 0;
923         int ret;
924         struct ww_acquire_ctx ticket;
925
926         do {
927                 spin_lock(&dev_priv->resource_lock);
928
929                 if (list_empty(lru_list))
930                         goto out_unlock;
931
932                 evict_res = vmw_resource_reference(
933                         list_first_entry(lru_list, struct vmw_resource,
934                                          lru_head));
935                 list_del_init(&evict_res->lru_head);
936                 spin_unlock(&dev_priv->resource_lock);
937
938                 /* Wait lock backup buffers with a ticket. */
939                 ret = vmw_resource_do_evict(&ticket, evict_res, false);
940                 if (unlikely(ret != 0)) {
941                         spin_lock(&dev_priv->resource_lock);
942                         list_add_tail(&evict_res->lru_head, lru_list);
943                         spin_unlock(&dev_priv->resource_lock);
944                         if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
945                                 vmw_resource_unreference(&evict_res);
946                                 return;
947                         }
948                 }
949
950                 vmw_resource_unreference(&evict_res);
951         } while (1);
952
953 out_unlock:
954         spin_unlock(&dev_priv->resource_lock);
955 }
956
957 /**
958  * vmw_resource_evict_all - Evict all evictable resources
959  *
960  * @dev_priv:       Pointer to a device private struct
961  *
962  * To avoid thrashing starvation or as part of the hibernation sequence,
963  * evict all evictable resources. In particular this means that all
964  * guest-backed resources that are registered with the device are
965  * evicted and the OTable becomes clean.
966  */
967 void vmw_resource_evict_all(struct vmw_private *dev_priv)
968 {
969         enum vmw_res_type type;
970
971         mutex_lock(&dev_priv->cmdbuf_mutex);
972
973         for (type = 0; type < vmw_res_max; ++type)
974                 vmw_resource_evict_type(dev_priv, type);
975
976         mutex_unlock(&dev_priv->cmdbuf_mutex);
977 }
978
979 /**
980  * vmw_resource_pin - Add a pin reference on a resource
981  *
982  * @res: The resource to add a pin reference on
983  *
984  * This function adds a pin reference, and if needed validates the resource.
985  * Having a pin reference means that the resource can never be evicted, and
986  * its id will never change as long as there is a pin reference.
987  * This function returns 0 on success and a negative error code on failure.
988  */
989 int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
990 {
991         struct ttm_operation_ctx ctx = { interruptible, false };
992         struct vmw_private *dev_priv = res->dev_priv;
993         int ret;
994
995         ttm_write_lock(&dev_priv->reservation_sem, interruptible);
996         mutex_lock(&dev_priv->cmdbuf_mutex);
997         ret = vmw_resource_reserve(res, interruptible, false);
998         if (ret)
999                 goto out_no_reserve;
1000
1001         if (res->pin_count == 0) {
1002                 struct vmw_buffer_object *vbo = NULL;
1003
1004                 if (res->backup) {
1005                         vbo = res->backup;
1006
1007                         ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
1008                         if (!vbo->pin_count) {
1009                                 ret = ttm_bo_validate
1010                                         (&vbo->base,
1011                                          res->func->backup_placement,
1012                                          &ctx);
1013                                 if (ret) {
1014                                         ttm_bo_unreserve(&vbo->base);
1015                                         goto out_no_validate;
1016                                 }
1017                         }
1018
1019                         /* Do we really need to pin the MOB as well? */
1020                         vmw_bo_pin_reserved(vbo, true);
1021                 }
1022                 ret = vmw_resource_validate(res, interruptible, true);
1023                 if (vbo)
1024                         ttm_bo_unreserve(&vbo->base);
1025                 if (ret)
1026                         goto out_no_validate;
1027         }
1028         res->pin_count++;
1029
1030 out_no_validate:
1031         vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1032 out_no_reserve:
1033         mutex_unlock(&dev_priv->cmdbuf_mutex);
1034         ttm_write_unlock(&dev_priv->reservation_sem);
1035
1036         return ret;
1037 }
1038
1039 /**
1040  * vmw_resource_unpin - Remove a pin reference from a resource
1041  *
1042  * @res: The resource to remove a pin reference from
1043  *
1044  * Having a pin reference means that the resource can never be evicted, and
1045  * its id will never change as long as there is a pin reference.
1046  */
1047 void vmw_resource_unpin(struct vmw_resource *res)
1048 {
1049         struct vmw_private *dev_priv = res->dev_priv;
1050         int ret;
1051
1052         (void) ttm_read_lock(&dev_priv->reservation_sem, false);
1053         mutex_lock(&dev_priv->cmdbuf_mutex);
1054
1055         ret = vmw_resource_reserve(res, false, true);
1056         WARN_ON(ret);
1057
1058         WARN_ON(res->pin_count == 0);
1059         if (--res->pin_count == 0 && res->backup) {
1060                 struct vmw_buffer_object *vbo = res->backup;
1061
1062                 (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
1063                 vmw_bo_pin_reserved(vbo, false);
1064                 ttm_bo_unreserve(&vbo->base);
1065         }
1066
1067         vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1068
1069         mutex_unlock(&dev_priv->cmdbuf_mutex);
1070         ttm_read_unlock(&dev_priv->reservation_sem);
1071 }
1072
1073 /**
1074  * vmw_res_type - Return the resource type
1075  *
1076  * @res: Pointer to the resource
1077  */
1078 enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1079 {
1080         return res->func->res_type;
1081 }
1082
1083 /**
1084  * vmw_resource_update_dirty - Update a resource's dirty tracker with a
1085  * sequential range of touched backing store memory.
1086  * @res: The resource.
1087  * @start: The first page touched.
1088  * @end: The last page touched + 1.
1089  */
1090 void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
1091                                pgoff_t end)
1092 {
1093         if (res->dirty)
1094                 res->func->dirty_range_add(res, start << PAGE_SHIFT,
1095                                            end << PAGE_SHIFT);
1096 }
1097
1098 /**
1099  * vmw_resources_clean - Clean resources intersecting a mob range
1100  * @vbo: The mob buffer object
1101  * @start: The mob page offset starting the range
1102  * @end: The mob page offset ending the range
1103  * @num_prefault: Returns how many pages including the first have been
1104  * cleaned and are ok to prefault
1105  */
1106 int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
1107                         pgoff_t end, pgoff_t *num_prefault)
1108 {
1109         struct rb_node *cur = vbo->res_tree.rb_node;
1110         struct vmw_resource *found = NULL;
1111         unsigned long res_start = start << PAGE_SHIFT;
1112         unsigned long res_end = end << PAGE_SHIFT;
1113         unsigned long last_cleaned = 0;
1114
1115         /*
1116          * Find the resource with lowest backup_offset that intersects the
1117          * range.
1118          */
1119         while (cur) {
1120                 struct vmw_resource *cur_res =
1121                         container_of(cur, struct vmw_resource, mob_node);
1122
1123                 if (cur_res->backup_offset >= res_end) {
1124                         cur = cur->rb_left;
1125                 } else if (cur_res->backup_offset + cur_res->backup_size <=
1126                            res_start) {
1127                         cur = cur->rb_right;
1128                 } else {
1129                         found = cur_res;
1130                         cur = cur->rb_left;
1131                         /* Continue to look for resources with lower offsets */
1132                 }
1133         }
1134
1135         /*
1136          * In order of increasing backup_offset, clean dirty resorces
1137          * intersecting the range.
1138          */
1139         while (found) {
1140                 if (found->res_dirty) {
1141                         int ret;
1142
1143                         if (!found->func->clean)
1144                                 return -EINVAL;
1145
1146                         ret = found->func->clean(found);
1147                         if (ret)
1148                                 return ret;
1149
1150                         found->res_dirty = false;
1151                 }
1152                 last_cleaned = found->backup_offset + found->backup_size;
1153                 cur = rb_next(&found->mob_node);
1154                 if (!cur)
1155                         break;
1156
1157                 found = container_of(cur, struct vmw_resource, mob_node);
1158                 if (found->backup_offset >= res_end)
1159                         break;
1160         }
1161
1162         /*
1163          * Set number of pages allowed prefaulting and fence the buffer object
1164          */
1165         *num_prefault = 1;
1166         if (last_cleaned > res_start) {
1167                 struct ttm_buffer_object *bo = &vbo->base;
1168
1169                 *num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
1170                                                       PAGE_SIZE);
1171                 vmw_bo_fence_single(bo, NULL);
1172                 if (bo->moving)
1173                         dma_fence_put(bo->moving);
1174                 bo->moving = dma_fence_get
1175                         (reservation_object_get_excl(bo->resv));
1176         }
1177
1178         return 0;
1179 }