1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include "vmwgfx_drv.h"
29 #include <drm/vmwgfx_drm.h>
30 #include <drm/ttm/ttm_placement.h>
32 #include "vmwgfx_resource_priv.h"
33 #include "vmwgfx_binding.h"
35 #define VMW_RES_EVICT_ERR_COUNT 10
38 * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
41 void vmw_resource_mob_attach(struct vmw_resource *res)
43 struct vmw_buffer_object *backup = res->backup;
44 struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL;
46 lockdep_assert_held(&backup->base.resv->lock.base);
47 res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
51 struct vmw_resource *this =
52 container_of(*new, struct vmw_resource, mob_node);
55 new = (res->backup_offset < this->backup_offset) ?
56 &((*new)->rb_left) : &((*new)->rb_right);
59 rb_link_node(&res->mob_node, parent, new);
60 rb_insert_color(&res->mob_node, &backup->res_tree);
62 vmw_bo_prio_add(backup, res->used_prio);
66 * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
69 void vmw_resource_mob_detach(struct vmw_resource *res)
71 struct vmw_buffer_object *backup = res->backup;
73 lockdep_assert_held(&backup->base.resv->lock.base);
74 if (vmw_resource_mob_attached(res)) {
75 rb_erase(&res->mob_node, &backup->res_tree);
76 RB_CLEAR_NODE(&res->mob_node);
77 vmw_bo_prio_del(backup, res->used_prio);
82 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
89 vmw_resource_reference_unless_doomed(struct vmw_resource *res)
91 return kref_get_unless_zero(&res->kref) ? res : NULL;
95 * vmw_resource_release_id - release a resource id to the id manager.
97 * @res: Pointer to the resource.
99 * Release the resource id to the resource id manager and set it to -1
101 void vmw_resource_release_id(struct vmw_resource *res)
103 struct vmw_private *dev_priv = res->dev_priv;
104 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
106 spin_lock(&dev_priv->resource_lock);
108 idr_remove(idr, res->id);
110 spin_unlock(&dev_priv->resource_lock);
113 static void vmw_resource_release(struct kref *kref)
115 struct vmw_resource *res =
116 container_of(kref, struct vmw_resource, kref);
117 struct vmw_private *dev_priv = res->dev_priv;
119 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
121 spin_lock(&dev_priv->resource_lock);
122 list_del_init(&res->lru_head);
123 spin_unlock(&dev_priv->resource_lock);
125 struct ttm_buffer_object *bo = &res->backup->base;
127 ttm_bo_reserve(bo, false, false, NULL);
128 if (vmw_resource_mob_attached(res) &&
129 res->func->unbind != NULL) {
130 struct ttm_validate_buffer val_buf;
133 val_buf.num_shared = 0;
134 res->func->unbind(res, false, &val_buf);
136 res->backup_dirty = false;
137 vmw_resource_mob_detach(res);
139 res->func->dirty_free(res);
141 vmw_bo_dirty_release(res->backup);
142 ttm_bo_unreserve(bo);
143 vmw_bo_unreference(&res->backup);
146 if (likely(res->hw_destroy != NULL)) {
147 mutex_lock(&dev_priv->binding_mutex);
148 vmw_binding_res_list_kill(&res->binding_head);
149 mutex_unlock(&dev_priv->binding_mutex);
150 res->hw_destroy(res);
154 if (res->res_free != NULL)
159 spin_lock(&dev_priv->resource_lock);
162 spin_unlock(&dev_priv->resource_lock);
165 void vmw_resource_unreference(struct vmw_resource **p_res)
167 struct vmw_resource *res = *p_res;
170 kref_put(&res->kref, vmw_resource_release);
175 * vmw_resource_alloc_id - release a resource id to the id manager.
177 * @res: Pointer to the resource.
179 * Allocate the lowest free resource from the resource manager, and set
180 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
182 int vmw_resource_alloc_id(struct vmw_resource *res)
184 struct vmw_private *dev_priv = res->dev_priv;
186 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
188 BUG_ON(res->id != -1);
190 idr_preload(GFP_KERNEL);
191 spin_lock(&dev_priv->resource_lock);
193 ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
197 spin_unlock(&dev_priv->resource_lock);
199 return ret < 0 ? ret : 0;
203 * vmw_resource_init - initialize a struct vmw_resource
205 * @dev_priv: Pointer to a device private struct.
206 * @res: The struct vmw_resource to initialize.
207 * @obj_type: Resource object type.
208 * @delay_id: Boolean whether to defer device id allocation until
209 * the first validation.
210 * @res_free: Resource destructor.
211 * @func: Resource function table.
213 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
215 void (*res_free) (struct vmw_resource *res),
216 const struct vmw_res_func *func)
218 kref_init(&res->kref);
219 res->hw_destroy = NULL;
220 res->res_free = res_free;
221 res->dev_priv = dev_priv;
223 RB_CLEAR_NODE(&res->mob_node);
224 INIT_LIST_HEAD(&res->lru_head);
225 INIT_LIST_HEAD(&res->binding_head);
228 res->backup_offset = 0;
229 res->backup_dirty = false;
230 res->res_dirty = false;
231 res->coherent = false;
237 return vmw_resource_alloc_id(res);
242 * vmw_user_resource_lookup_handle - lookup a struct resource from a
243 * TTM user-space handle and perform basic type checks
245 * @dev_priv: Pointer to a device private struct
246 * @tfile: Pointer to a struct ttm_object_file identifying the caller
247 * @handle: The TTM user-space handle
248 * @converter: Pointer to an object describing the resource type
249 * @p_res: On successful return the location pointed to will contain
250 * a pointer to a refcounted struct vmw_resource.
252 * If the handle can't be found or is associated with an incorrect resource
253 * type, -EINVAL will be returned.
255 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
256 struct ttm_object_file *tfile,
258 const struct vmw_user_resource_conv
260 struct vmw_resource **p_res)
262 struct ttm_base_object *base;
263 struct vmw_resource *res;
266 base = ttm_base_object_lookup(tfile, handle);
267 if (unlikely(base == NULL))
270 if (unlikely(ttm_base_object_type(base) != converter->object_type))
271 goto out_bad_resource;
273 res = converter->base_obj_to_res(base);
274 kref_get(&res->kref);
280 ttm_base_object_unref(&base);
286 * vmw_user_resource_lookup_handle - lookup a struct resource from a
287 * TTM user-space handle and perform basic type checks
289 * @dev_priv: Pointer to a device private struct
290 * @tfile: Pointer to a struct ttm_object_file identifying the caller
291 * @handle: The TTM user-space handle
292 * @converter: Pointer to an object describing the resource type
293 * @p_res: On successful return the location pointed to will contain
294 * a pointer to a refcounted struct vmw_resource.
296 * If the handle can't be found or is associated with an incorrect resource
297 * type, -EINVAL will be returned.
299 struct vmw_resource *
300 vmw_user_resource_noref_lookup_handle(struct vmw_private *dev_priv,
301 struct ttm_object_file *tfile,
303 const struct vmw_user_resource_conv
306 struct ttm_base_object *base;
308 base = ttm_base_object_noref_lookup(tfile, handle);
310 return ERR_PTR(-ESRCH);
312 if (unlikely(ttm_base_object_type(base) != converter->object_type)) {
313 ttm_base_object_noref_release();
314 return ERR_PTR(-EINVAL);
317 return converter->base_obj_to_res(base);
321 * Helper function that looks either a surface or bo.
323 * The pointer this pointed at by out_surf and out_buf needs to be null.
325 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
326 struct ttm_object_file *tfile,
328 struct vmw_surface **out_surf,
329 struct vmw_buffer_object **out_buf)
331 struct vmw_resource *res;
334 BUG_ON(*out_surf || *out_buf);
336 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
337 user_surface_converter,
340 *out_surf = vmw_res_to_srf(res);
345 ret = vmw_user_bo_lookup(tfile, handle, out_buf, NULL);
350 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
352 * @res: The resource for which to allocate a backup buffer.
353 * @interruptible: Whether any sleeps during allocation should be
354 * performed while interruptible.
356 static int vmw_resource_buf_alloc(struct vmw_resource *res,
360 (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
361 struct vmw_buffer_object *backup;
364 if (likely(res->backup)) {
365 BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
369 backup = kzalloc(sizeof(*backup), GFP_KERNEL);
370 if (unlikely(!backup))
373 ret = vmw_bo_init(res->dev_priv, backup, res->backup_size,
374 res->func->backup_placement,
377 if (unlikely(ret != 0))
380 res->backup = backup;
387 * vmw_resource_do_validate - Make a resource up-to-date and visible
390 * @res: The resource to make visible to the device.
391 * @val_buf: Information about a buffer possibly
392 * containing backup data if a bind operation is needed.
394 * On hardware resource shortage, this function returns -EBUSY and
395 * should be retried once resources have been freed up.
397 static int vmw_resource_do_validate(struct vmw_resource *res,
398 struct ttm_validate_buffer *val_buf,
402 const struct vmw_res_func *func = res->func;
404 if (unlikely(res->id == -1)) {
405 ret = func->create(res);
406 if (unlikely(ret != 0))
411 ((func->needs_backup && !vmw_resource_mob_attached(res) &&
412 val_buf->bo != NULL) ||
413 (!func->needs_backup && val_buf->bo != NULL))) {
414 ret = func->bind(res, val_buf);
415 if (unlikely(ret != 0))
416 goto out_bind_failed;
417 if (func->needs_backup)
418 vmw_resource_mob_attach(res);
422 * Handle the case where the backup mob is marked coherent but
423 * the resource isn't.
425 if (func->dirty_alloc && vmw_resource_mob_attached(res) &&
427 if (res->backup->dirty && !res->dirty) {
428 ret = func->dirty_alloc(res);
431 } else if (!res->backup->dirty && res->dirty) {
432 func->dirty_free(res);
437 * Transfer the dirty regions to the resource and update
441 if (dirtying && !res->res_dirty) {
442 pgoff_t start = res->backup_offset >> PAGE_SHIFT;
443 pgoff_t end = __KERNEL_DIV_ROUND_UP
444 (res->backup_offset + res->backup_size,
447 vmw_bo_dirty_unmap(res->backup, start, end);
450 vmw_bo_dirty_transfer_to_res(res);
451 return func->dirty_sync(res);
463 * vmw_resource_unreserve - Unreserve a resource previously reserved for
464 * command submission.
466 * @res: Pointer to the struct vmw_resource to unreserve.
467 * @dirty_set: Change dirty status of the resource.
468 * @dirty: When changing dirty status indicates the new status.
469 * @switch_backup: Backup buffer has been switched.
470 * @new_backup: Pointer to new backup buffer if command submission
471 * switched. May be NULL.
472 * @new_backup_offset: New backup offset if @switch_backup is true.
474 * Currently unreserving a resource means putting it back on the device's
475 * resource lru list, so that it can be evicted if necessary.
477 void vmw_resource_unreserve(struct vmw_resource *res,
481 struct vmw_buffer_object *new_backup,
482 unsigned long new_backup_offset)
484 struct vmw_private *dev_priv = res->dev_priv;
486 if (!list_empty(&res->lru_head))
489 if (switch_backup && new_backup != res->backup) {
491 vmw_resource_mob_detach(res);
493 vmw_bo_dirty_release(res->backup);
494 vmw_bo_unreference(&res->backup);
498 res->backup = vmw_bo_reference(new_backup);
501 * The validation code should already have added a
502 * dirty tracker here.
504 WARN_ON(res->coherent && !new_backup->dirty);
506 vmw_resource_mob_attach(res);
510 } else if (switch_backup && res->coherent) {
511 vmw_bo_dirty_release(res->backup);
515 res->backup_offset = new_backup_offset;
518 res->res_dirty = dirty;
520 if (!res->func->may_evict || res->id == -1 || res->pin_count)
523 spin_lock(&dev_priv->resource_lock);
524 list_add_tail(&res->lru_head,
525 &res->dev_priv->res_lru[res->func->res_type]);
526 spin_unlock(&dev_priv->resource_lock);
530 * vmw_resource_check_buffer - Check whether a backup buffer is needed
531 * for a resource and in that case, allocate
532 * one, reserve and validate it.
534 * @ticket: The ww aqcquire context to use, or NULL if trylocking.
535 * @res: The resource for which to allocate a backup buffer.
536 * @interruptible: Whether any sleeps during allocation should be
537 * performed while interruptible.
538 * @val_buf: On successful return contains data about the
539 * reserved and validated backup buffer.
542 vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
543 struct vmw_resource *res,
545 struct ttm_validate_buffer *val_buf)
547 struct ttm_operation_ctx ctx = { true, false };
548 struct list_head val_list;
549 bool backup_dirty = false;
552 if (unlikely(res->backup == NULL)) {
553 ret = vmw_resource_buf_alloc(res, interruptible);
554 if (unlikely(ret != 0))
558 INIT_LIST_HEAD(&val_list);
559 ttm_bo_get(&res->backup->base);
560 val_buf->bo = &res->backup->base;
561 val_buf->num_shared = 0;
562 list_add_tail(&val_buf->head, &val_list);
563 ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL,
565 if (unlikely(ret != 0))
568 if (res->func->needs_backup && !vmw_resource_mob_attached(res))
571 backup_dirty = res->backup_dirty;
572 ret = ttm_bo_validate(&res->backup->base,
573 res->func->backup_placement,
576 if (unlikely(ret != 0))
577 goto out_no_validate;
582 ttm_eu_backoff_reservation(ticket, &val_list);
584 ttm_bo_put(val_buf->bo);
587 vmw_bo_unreference(&res->backup);
593 * vmw_resource_reserve - Reserve a resource for command submission
595 * @res: The resource to reserve.
597 * This function takes the resource off the LRU list and make sure
598 * a backup buffer is present for guest-backed resources. However,
599 * the buffer may not be bound to the resource at this point.
602 int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
605 struct vmw_private *dev_priv = res->dev_priv;
608 spin_lock(&dev_priv->resource_lock);
609 list_del_init(&res->lru_head);
610 spin_unlock(&dev_priv->resource_lock);
612 if (res->func->needs_backup && res->backup == NULL &&
614 ret = vmw_resource_buf_alloc(res, interruptible);
615 if (unlikely(ret != 0)) {
616 DRM_ERROR("Failed to allocate a backup buffer "
617 "of size %lu. bytes\n",
618 (unsigned long) res->backup_size);
627 * vmw_resource_backoff_reservation - Unreserve and unreference a
630 * @ticket: The ww acquire ctx used for reservation.
631 * @val_buf: Backup buffer information.
634 vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
635 struct ttm_validate_buffer *val_buf)
637 struct list_head val_list;
639 if (likely(val_buf->bo == NULL))
642 INIT_LIST_HEAD(&val_list);
643 list_add_tail(&val_buf->head, &val_list);
644 ttm_eu_backoff_reservation(ticket, &val_list);
645 ttm_bo_put(val_buf->bo);
650 * vmw_resource_do_evict - Evict a resource, and transfer its data
651 * to a backup buffer.
653 * @ticket: The ww acquire ticket to use, or NULL if trylocking.
654 * @res: The resource to evict.
655 * @interruptible: Whether to wait interruptible.
657 static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
658 struct vmw_resource *res, bool interruptible)
660 struct ttm_validate_buffer val_buf;
661 const struct vmw_res_func *func = res->func;
664 BUG_ON(!func->may_evict);
667 val_buf.num_shared = 0;
668 ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
669 if (unlikely(ret != 0))
672 if (unlikely(func->unbind != NULL &&
673 (!func->needs_backup || vmw_resource_mob_attached(res)))) {
674 ret = func->unbind(res, res->res_dirty, &val_buf);
675 if (unlikely(ret != 0))
677 vmw_resource_mob_detach(res);
679 ret = func->destroy(res);
680 res->backup_dirty = true;
681 res->res_dirty = false;
683 vmw_resource_backoff_reservation(ticket, &val_buf);
690 * vmw_resource_validate - Make a resource up-to-date and visible
692 * @res: The resource to make visible to the device.
693 * @intr: Perform waits interruptible if possible.
694 * @dirtying: Pending GPU operation will dirty the resource
696 * On succesful return, any backup DMA buffer pointed to by @res->backup will
697 * be reserved and validated.
698 * On hardware resource shortage, this function will repeatedly evict
699 * resources of the same type until the validation succeeds.
701 * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
704 int vmw_resource_validate(struct vmw_resource *res, bool intr,
708 struct vmw_resource *evict_res;
709 struct vmw_private *dev_priv = res->dev_priv;
710 struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
711 struct ttm_validate_buffer val_buf;
712 unsigned err_count = 0;
714 if (!res->func->create)
718 val_buf.num_shared = 0;
720 val_buf.bo = &res->backup->base;
722 ret = vmw_resource_do_validate(res, &val_buf, dirtying);
723 if (likely(ret != -EBUSY))
726 spin_lock(&dev_priv->resource_lock);
727 if (list_empty(lru_list) || !res->func->may_evict) {
728 DRM_ERROR("Out of device device resources "
729 "for %s.\n", res->func->type_name);
731 spin_unlock(&dev_priv->resource_lock);
735 evict_res = vmw_resource_reference
736 (list_first_entry(lru_list, struct vmw_resource,
738 list_del_init(&evict_res->lru_head);
740 spin_unlock(&dev_priv->resource_lock);
742 /* Trylock backup buffers with a NULL ticket. */
743 ret = vmw_resource_do_evict(NULL, evict_res, intr);
744 if (unlikely(ret != 0)) {
745 spin_lock(&dev_priv->resource_lock);
746 list_add_tail(&evict_res->lru_head, lru_list);
747 spin_unlock(&dev_priv->resource_lock);
748 if (ret == -ERESTARTSYS ||
749 ++err_count > VMW_RES_EVICT_ERR_COUNT) {
750 vmw_resource_unreference(&evict_res);
751 goto out_no_validate;
755 vmw_resource_unreference(&evict_res);
758 if (unlikely(ret != 0))
759 goto out_no_validate;
760 else if (!res->func->needs_backup && res->backup) {
761 WARN_ON_ONCE(vmw_resource_mob_attached(res));
762 vmw_bo_unreference(&res->backup);
773 * vmw_resource_unbind_list
775 * @vbo: Pointer to the current backing MOB.
777 * Evicts the Guest Backed hardware resource if the backup
778 * buffer is being moved out of MOB memory.
779 * Note that this function will not race with the resource
780 * validation code, since resource validation and eviction
781 * both require the backup buffer to be reserved.
783 void vmw_resource_unbind_list(struct vmw_buffer_object *vbo)
785 struct ttm_validate_buffer val_buf = {
790 lockdep_assert_held(&vbo->base.resv->lock.base);
791 while (!RB_EMPTY_ROOT(&vbo->res_tree)) {
792 struct rb_node *node = vbo->res_tree.rb_node;
793 struct vmw_resource *res =
794 container_of(node, struct vmw_resource, mob_node);
796 if (!WARN_ON_ONCE(!res->func->unbind))
797 (void) res->func->unbind(res, res->res_dirty, &val_buf);
799 res->backup_dirty = true;
800 res->res_dirty = false;
801 vmw_resource_mob_detach(res);
804 (void) ttm_bo_wait(&vbo->base, false, false);
809 * vmw_query_readback_all - Read back cached query states
811 * @dx_query_mob: Buffer containing the DX query MOB
813 * Read back cached states from the device if they exist. This function
814 * assumings binding_mutex is held.
816 int vmw_query_readback_all(struct vmw_buffer_object *dx_query_mob)
818 struct vmw_resource *dx_query_ctx;
819 struct vmw_private *dev_priv;
821 SVGA3dCmdHeader header;
822 SVGA3dCmdDXReadbackAllQuery body;
826 /* No query bound, so do nothing */
827 if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
830 dx_query_ctx = dx_query_mob->dx_query_ctx;
831 dev_priv = dx_query_ctx->dev_priv;
833 cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), dx_query_ctx->id);
834 if (unlikely(cmd == NULL))
837 cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
838 cmd->header.size = sizeof(cmd->body);
839 cmd->body.cid = dx_query_ctx->id;
841 vmw_fifo_commit(dev_priv, sizeof(*cmd));
843 /* Triggers a rebind the next time affected context is bound */
844 dx_query_mob->dx_query_ctx = NULL;
852 * vmw_query_move_notify - Read back cached query states
854 * @bo: The TTM buffer object about to move.
855 * @mem: The memory region @bo is moving to.
857 * Called before the query MOB is swapped out to read back cached query
858 * states from the device.
860 void vmw_query_move_notify(struct ttm_buffer_object *bo,
861 struct ttm_mem_reg *mem)
863 struct vmw_buffer_object *dx_query_mob;
864 struct ttm_bo_device *bdev = bo->bdev;
865 struct vmw_private *dev_priv;
868 dev_priv = container_of(bdev, struct vmw_private, bdev);
870 mutex_lock(&dev_priv->binding_mutex);
872 dx_query_mob = container_of(bo, struct vmw_buffer_object, base);
873 if (mem == NULL || !dx_query_mob || !dx_query_mob->dx_query_ctx) {
874 mutex_unlock(&dev_priv->binding_mutex);
878 /* If BO is being moved from MOB to system memory */
879 if (mem->mem_type == TTM_PL_SYSTEM && bo->mem.mem_type == VMW_PL_MOB) {
880 struct vmw_fence_obj *fence;
882 (void) vmw_query_readback_all(dx_query_mob);
883 mutex_unlock(&dev_priv->binding_mutex);
885 /* Create a fence and attach the BO to it */
886 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
887 vmw_bo_fence_single(bo, fence);
890 vmw_fence_obj_unreference(&fence);
892 (void) ttm_bo_wait(bo, false, false);
894 mutex_unlock(&dev_priv->binding_mutex);
899 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
901 * @res: The resource being queried.
903 bool vmw_resource_needs_backup(const struct vmw_resource *res)
905 return res->func->needs_backup;
909 * vmw_resource_evict_type - Evict all resources of a specific type
911 * @dev_priv: Pointer to a device private struct
912 * @type: The resource type to evict
914 * To avoid thrashing starvation or as part of the hibernation sequence,
915 * try to evict all evictable resources of a specific type.
917 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
918 enum vmw_res_type type)
920 struct list_head *lru_list = &dev_priv->res_lru[type];
921 struct vmw_resource *evict_res;
922 unsigned err_count = 0;
924 struct ww_acquire_ctx ticket;
927 spin_lock(&dev_priv->resource_lock);
929 if (list_empty(lru_list))
932 evict_res = vmw_resource_reference(
933 list_first_entry(lru_list, struct vmw_resource,
935 list_del_init(&evict_res->lru_head);
936 spin_unlock(&dev_priv->resource_lock);
938 /* Wait lock backup buffers with a ticket. */
939 ret = vmw_resource_do_evict(&ticket, evict_res, false);
940 if (unlikely(ret != 0)) {
941 spin_lock(&dev_priv->resource_lock);
942 list_add_tail(&evict_res->lru_head, lru_list);
943 spin_unlock(&dev_priv->resource_lock);
944 if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
945 vmw_resource_unreference(&evict_res);
950 vmw_resource_unreference(&evict_res);
954 spin_unlock(&dev_priv->resource_lock);
958 * vmw_resource_evict_all - Evict all evictable resources
960 * @dev_priv: Pointer to a device private struct
962 * To avoid thrashing starvation or as part of the hibernation sequence,
963 * evict all evictable resources. In particular this means that all
964 * guest-backed resources that are registered with the device are
965 * evicted and the OTable becomes clean.
967 void vmw_resource_evict_all(struct vmw_private *dev_priv)
969 enum vmw_res_type type;
971 mutex_lock(&dev_priv->cmdbuf_mutex);
973 for (type = 0; type < vmw_res_max; ++type)
974 vmw_resource_evict_type(dev_priv, type);
976 mutex_unlock(&dev_priv->cmdbuf_mutex);
980 * vmw_resource_pin - Add a pin reference on a resource
982 * @res: The resource to add a pin reference on
984 * This function adds a pin reference, and if needed validates the resource.
985 * Having a pin reference means that the resource can never be evicted, and
986 * its id will never change as long as there is a pin reference.
987 * This function returns 0 on success and a negative error code on failure.
989 int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
991 struct ttm_operation_ctx ctx = { interruptible, false };
992 struct vmw_private *dev_priv = res->dev_priv;
995 ttm_write_lock(&dev_priv->reservation_sem, interruptible);
996 mutex_lock(&dev_priv->cmdbuf_mutex);
997 ret = vmw_resource_reserve(res, interruptible, false);
1001 if (res->pin_count == 0) {
1002 struct vmw_buffer_object *vbo = NULL;
1007 ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
1008 if (!vbo->pin_count) {
1009 ret = ttm_bo_validate
1011 res->func->backup_placement,
1014 ttm_bo_unreserve(&vbo->base);
1015 goto out_no_validate;
1019 /* Do we really need to pin the MOB as well? */
1020 vmw_bo_pin_reserved(vbo, true);
1022 ret = vmw_resource_validate(res, interruptible, true);
1024 ttm_bo_unreserve(&vbo->base);
1026 goto out_no_validate;
1031 vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1033 mutex_unlock(&dev_priv->cmdbuf_mutex);
1034 ttm_write_unlock(&dev_priv->reservation_sem);
1040 * vmw_resource_unpin - Remove a pin reference from a resource
1042 * @res: The resource to remove a pin reference from
1044 * Having a pin reference means that the resource can never be evicted, and
1045 * its id will never change as long as there is a pin reference.
1047 void vmw_resource_unpin(struct vmw_resource *res)
1049 struct vmw_private *dev_priv = res->dev_priv;
1052 (void) ttm_read_lock(&dev_priv->reservation_sem, false);
1053 mutex_lock(&dev_priv->cmdbuf_mutex);
1055 ret = vmw_resource_reserve(res, false, true);
1058 WARN_ON(res->pin_count == 0);
1059 if (--res->pin_count == 0 && res->backup) {
1060 struct vmw_buffer_object *vbo = res->backup;
1062 (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
1063 vmw_bo_pin_reserved(vbo, false);
1064 ttm_bo_unreserve(&vbo->base);
1067 vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1069 mutex_unlock(&dev_priv->cmdbuf_mutex);
1070 ttm_read_unlock(&dev_priv->reservation_sem);
1074 * vmw_res_type - Return the resource type
1076 * @res: Pointer to the resource
1078 enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1080 return res->func->res_type;
1084 * vmw_resource_update_dirty - Update a resource's dirty tracker with a
1085 * sequential range of touched backing store memory.
1086 * @res: The resource.
1087 * @start: The first page touched.
1088 * @end: The last page touched + 1.
1090 void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
1094 res->func->dirty_range_add(res, start << PAGE_SHIFT,
1099 * vmw_resources_clean - Clean resources intersecting a mob range
1100 * @vbo: The mob buffer object
1101 * @start: The mob page offset starting the range
1102 * @end: The mob page offset ending the range
1103 * @num_prefault: Returns how many pages including the first have been
1104 * cleaned and are ok to prefault
1106 int vmw_resources_clean(struct vmw_buffer_object *vbo, pgoff_t start,
1107 pgoff_t end, pgoff_t *num_prefault)
1109 struct rb_node *cur = vbo->res_tree.rb_node;
1110 struct vmw_resource *found = NULL;
1111 unsigned long res_start = start << PAGE_SHIFT;
1112 unsigned long res_end = end << PAGE_SHIFT;
1113 unsigned long last_cleaned = 0;
1116 * Find the resource with lowest backup_offset that intersects the
1120 struct vmw_resource *cur_res =
1121 container_of(cur, struct vmw_resource, mob_node);
1123 if (cur_res->backup_offset >= res_end) {
1125 } else if (cur_res->backup_offset + cur_res->backup_size <=
1127 cur = cur->rb_right;
1131 /* Continue to look for resources with lower offsets */
1136 * In order of increasing backup_offset, clean dirty resorces
1137 * intersecting the range.
1140 if (found->res_dirty) {
1143 if (!found->func->clean)
1146 ret = found->func->clean(found);
1150 found->res_dirty = false;
1152 last_cleaned = found->backup_offset + found->backup_size;
1153 cur = rb_next(&found->mob_node);
1157 found = container_of(cur, struct vmw_resource, mob_node);
1158 if (found->backup_offset >= res_end)
1163 * Set number of pages allowed prefaulting and fence the buffer object
1166 if (last_cleaned > res_start) {
1167 struct ttm_buffer_object *bo = &vbo->base;
1169 *num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
1171 vmw_bo_fence_single(bo, NULL);
1173 dma_fence_put(bo->moving);
1174 bo->moving = dma_fence_get
1175 (reservation_object_get_excl(bo->resv));