1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include <drm/ttm/ttm_placement.h>
30 #include "vmwgfx_binding.h"
31 #include "vmwgfx_bo.h"
32 #include "vmwgfx_drv.h"
33 #include "vmwgfx_resource_priv.h"
35 #define VMW_RES_EVICT_ERR_COUNT 10
38 * vmw_resource_mob_attach - Mark a resource as attached to its backing mob
41 void vmw_resource_mob_attach(struct vmw_resource *res)
43 struct vmw_bo *backup = res->backup;
44 struct rb_node **new = &backup->res_tree.rb_node, *parent = NULL;
46 dma_resv_assert_held(res->backup->base.base.resv);
47 res->used_prio = (res->res_dirty) ? res->func->dirty_prio :
51 struct vmw_resource *this =
52 container_of(*new, struct vmw_resource, mob_node);
55 new = (res->backup_offset < this->backup_offset) ?
56 &((*new)->rb_left) : &((*new)->rb_right);
59 rb_link_node(&res->mob_node, parent, new);
60 rb_insert_color(&res->mob_node, &backup->res_tree);
62 vmw_bo_prio_add(backup, res->used_prio);
66 * vmw_resource_mob_detach - Mark a resource as detached from its backing mob
69 void vmw_resource_mob_detach(struct vmw_resource *res)
71 struct vmw_bo *backup = res->backup;
73 dma_resv_assert_held(backup->base.base.resv);
74 if (vmw_resource_mob_attached(res)) {
75 rb_erase(&res->mob_node, &backup->res_tree);
76 RB_CLEAR_NODE(&res->mob_node);
77 vmw_bo_prio_del(backup, res->used_prio);
81 struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
88 vmw_resource_reference_unless_doomed(struct vmw_resource *res)
90 return kref_get_unless_zero(&res->kref) ? res : NULL;
94 * vmw_resource_release_id - release a resource id to the id manager.
96 * @res: Pointer to the resource.
98 * Release the resource id to the resource id manager and set it to -1
100 void vmw_resource_release_id(struct vmw_resource *res)
102 struct vmw_private *dev_priv = res->dev_priv;
103 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
105 spin_lock(&dev_priv->resource_lock);
107 idr_remove(idr, res->id);
109 spin_unlock(&dev_priv->resource_lock);
112 static void vmw_resource_release(struct kref *kref)
114 struct vmw_resource *res =
115 container_of(kref, struct vmw_resource, kref);
116 struct vmw_private *dev_priv = res->dev_priv;
119 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
121 spin_lock(&dev_priv->resource_lock);
122 list_del_init(&res->lru_head);
123 spin_unlock(&dev_priv->resource_lock);
125 struct ttm_buffer_object *bo = &res->backup->base;
127 ret = ttm_bo_reserve(bo, false, false, NULL);
129 if (vmw_resource_mob_attached(res) &&
130 res->func->unbind != NULL) {
131 struct ttm_validate_buffer val_buf;
134 val_buf.num_shared = 0;
135 res->func->unbind(res, false, &val_buf);
137 res->backup_dirty = false;
138 vmw_resource_mob_detach(res);
140 res->func->dirty_free(res);
142 vmw_bo_dirty_release(res->backup);
143 ttm_bo_unreserve(bo);
144 vmw_bo_unreference(&res->backup);
147 if (likely(res->hw_destroy != NULL)) {
148 mutex_lock(&dev_priv->binding_mutex);
149 vmw_binding_res_list_kill(&res->binding_head);
150 mutex_unlock(&dev_priv->binding_mutex);
151 res->hw_destroy(res);
155 if (res->res_free != NULL)
160 spin_lock(&dev_priv->resource_lock);
163 spin_unlock(&dev_priv->resource_lock);
166 void vmw_resource_unreference(struct vmw_resource **p_res)
168 struct vmw_resource *res = *p_res;
171 kref_put(&res->kref, vmw_resource_release);
176 * vmw_resource_alloc_id - release a resource id to the id manager.
178 * @res: Pointer to the resource.
180 * Allocate the lowest free resource from the resource manager, and set
181 * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
183 int vmw_resource_alloc_id(struct vmw_resource *res)
185 struct vmw_private *dev_priv = res->dev_priv;
187 struct idr *idr = &dev_priv->res_idr[res->func->res_type];
189 BUG_ON(res->id != -1);
191 idr_preload(GFP_KERNEL);
192 spin_lock(&dev_priv->resource_lock);
194 ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
198 spin_unlock(&dev_priv->resource_lock);
200 return ret < 0 ? ret : 0;
204 * vmw_resource_init - initialize a struct vmw_resource
206 * @dev_priv: Pointer to a device private struct.
207 * @res: The struct vmw_resource to initialize.
208 * @delay_id: Boolean whether to defer device id allocation until
209 * the first validation.
210 * @res_free: Resource destructor.
211 * @func: Resource function table.
213 int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
215 void (*res_free) (struct vmw_resource *res),
216 const struct vmw_res_func *func)
218 kref_init(&res->kref);
219 res->hw_destroy = NULL;
220 res->res_free = res_free;
221 res->dev_priv = dev_priv;
223 RB_CLEAR_NODE(&res->mob_node);
224 INIT_LIST_HEAD(&res->lru_head);
225 INIT_LIST_HEAD(&res->binding_head);
228 res->backup_offset = 0;
229 res->backup_dirty = false;
230 res->res_dirty = false;
231 res->coherent = false;
237 return vmw_resource_alloc_id(res);
242 * vmw_user_resource_lookup_handle - lookup a struct resource from a
243 * TTM user-space handle and perform basic type checks
245 * @dev_priv: Pointer to a device private struct
246 * @tfile: Pointer to a struct ttm_object_file identifying the caller
247 * @handle: The TTM user-space handle
248 * @converter: Pointer to an object describing the resource type
249 * @p_res: On successful return the location pointed to will contain
250 * a pointer to a refcounted struct vmw_resource.
252 * If the handle can't be found or is associated with an incorrect resource
253 * type, -EINVAL will be returned.
255 int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
256 struct ttm_object_file *tfile,
258 const struct vmw_user_resource_conv
260 struct vmw_resource **p_res)
262 struct ttm_base_object *base;
263 struct vmw_resource *res;
266 base = ttm_base_object_lookup(tfile, handle);
267 if (unlikely(base == NULL))
270 if (unlikely(ttm_base_object_type(base) != converter->object_type))
271 goto out_bad_resource;
273 res = converter->base_obj_to_res(base);
274 kref_get(&res->kref);
280 ttm_base_object_unref(&base);
286 * Helper function that looks either a surface or bo.
288 * The pointer this pointed at by out_surf and out_buf needs to be null.
290 int vmw_user_lookup_handle(struct vmw_private *dev_priv,
291 struct drm_file *filp,
293 struct vmw_surface **out_surf,
294 struct vmw_bo **out_buf)
296 struct ttm_object_file *tfile = vmw_fpriv(filp)->tfile;
297 struct vmw_resource *res;
300 BUG_ON(*out_surf || *out_buf);
302 ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
303 user_surface_converter,
306 *out_surf = vmw_res_to_srf(res);
311 ret = vmw_user_bo_lookup(filp, handle, out_buf);
316 * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
318 * @res: The resource for which to allocate a backup buffer.
319 * @interruptible: Whether any sleeps during allocation should be
320 * performed while interruptible.
322 static int vmw_resource_buf_alloc(struct vmw_resource *res,
325 unsigned long size = PFN_ALIGN(res->backup_size);
326 struct vmw_bo *backup;
329 if (likely(res->backup)) {
330 BUG_ON(res->backup->base.base.size < size);
334 ret = vmw_bo_create(res->dev_priv, res->backup_size,
335 res->func->domain, res->func->busy_domain,
336 interruptible, false, &backup);
337 if (unlikely(ret != 0))
340 res->backup = backup;
347 * vmw_resource_do_validate - Make a resource up-to-date and visible
350 * @res: The resource to make visible to the device.
351 * @val_buf: Information about a buffer possibly
352 * containing backup data if a bind operation is needed.
353 * @dirtying: Transfer dirty regions.
355 * On hardware resource shortage, this function returns -EBUSY and
356 * should be retried once resources have been freed up.
358 static int vmw_resource_do_validate(struct vmw_resource *res,
359 struct ttm_validate_buffer *val_buf,
363 const struct vmw_res_func *func = res->func;
365 if (unlikely(res->id == -1)) {
366 ret = func->create(res);
367 if (unlikely(ret != 0))
372 ((func->needs_backup && !vmw_resource_mob_attached(res) &&
373 val_buf->bo != NULL) ||
374 (!func->needs_backup && val_buf->bo != NULL))) {
375 ret = func->bind(res, val_buf);
376 if (unlikely(ret != 0))
377 goto out_bind_failed;
378 if (func->needs_backup)
379 vmw_resource_mob_attach(res);
383 * Handle the case where the backup mob is marked coherent but
384 * the resource isn't.
386 if (func->dirty_alloc && vmw_resource_mob_attached(res) &&
388 if (res->backup->dirty && !res->dirty) {
389 ret = func->dirty_alloc(res);
392 } else if (!res->backup->dirty && res->dirty) {
393 func->dirty_free(res);
398 * Transfer the dirty regions to the resource and update
402 if (dirtying && !res->res_dirty) {
403 pgoff_t start = res->backup_offset >> PAGE_SHIFT;
404 pgoff_t end = __KERNEL_DIV_ROUND_UP
405 (res->backup_offset + res->backup_size,
408 vmw_bo_dirty_unmap(res->backup, start, end);
411 vmw_bo_dirty_transfer_to_res(res);
412 return func->dirty_sync(res);
424 * vmw_resource_unreserve - Unreserve a resource previously reserved for
425 * command submission.
427 * @res: Pointer to the struct vmw_resource to unreserve.
428 * @dirty_set: Change dirty status of the resource.
429 * @dirty: When changing dirty status indicates the new status.
430 * @switch_backup: Backup buffer has been switched.
431 * @new_backup: Pointer to new backup buffer if command submission
432 * switched. May be NULL.
433 * @new_backup_offset: New backup offset if @switch_backup is true.
435 * Currently unreserving a resource means putting it back on the device's
436 * resource lru list, so that it can be evicted if necessary.
438 void vmw_resource_unreserve(struct vmw_resource *res,
442 struct vmw_bo *new_backup,
443 unsigned long new_backup_offset)
445 struct vmw_private *dev_priv = res->dev_priv;
447 if (!list_empty(&res->lru_head))
450 if (switch_backup && new_backup != res->backup) {
452 vmw_resource_mob_detach(res);
454 vmw_bo_dirty_release(res->backup);
455 vmw_bo_unreference(&res->backup);
459 res->backup = vmw_bo_reference(new_backup);
462 * The validation code should already have added a
463 * dirty tracker here.
465 WARN_ON(res->coherent && !new_backup->dirty);
467 vmw_resource_mob_attach(res);
471 } else if (switch_backup && res->coherent) {
472 vmw_bo_dirty_release(res->backup);
476 res->backup_offset = new_backup_offset;
479 res->res_dirty = dirty;
481 if (!res->func->may_evict || res->id == -1 || res->pin_count)
484 spin_lock(&dev_priv->resource_lock);
485 list_add_tail(&res->lru_head,
486 &res->dev_priv->res_lru[res->func->res_type]);
487 spin_unlock(&dev_priv->resource_lock);
491 * vmw_resource_check_buffer - Check whether a backup buffer is needed
492 * for a resource and in that case, allocate
493 * one, reserve and validate it.
495 * @ticket: The ww acquire context to use, or NULL if trylocking.
496 * @res: The resource for which to allocate a backup buffer.
497 * @interruptible: Whether any sleeps during allocation should be
498 * performed while interruptible.
499 * @val_buf: On successful return contains data about the
500 * reserved and validated backup buffer.
503 vmw_resource_check_buffer(struct ww_acquire_ctx *ticket,
504 struct vmw_resource *res,
506 struct ttm_validate_buffer *val_buf)
508 struct ttm_operation_ctx ctx = { true, false };
509 struct list_head val_list;
510 bool backup_dirty = false;
513 if (unlikely(res->backup == NULL)) {
514 ret = vmw_resource_buf_alloc(res, interruptible);
515 if (unlikely(ret != 0))
519 INIT_LIST_HEAD(&val_list);
520 ttm_bo_get(&res->backup->base);
521 val_buf->bo = &res->backup->base;
522 val_buf->num_shared = 0;
523 list_add_tail(&val_buf->head, &val_list);
524 ret = ttm_eu_reserve_buffers(ticket, &val_list, interruptible, NULL);
525 if (unlikely(ret != 0))
528 if (res->func->needs_backup && !vmw_resource_mob_attached(res))
531 backup_dirty = res->backup_dirty;
532 vmw_bo_placement_set(res->backup, res->func->domain,
533 res->func->busy_domain);
534 ret = ttm_bo_validate(&res->backup->base,
535 &res->backup->placement,
538 if (unlikely(ret != 0))
539 goto out_no_validate;
544 ttm_eu_backoff_reservation(ticket, &val_list);
546 ttm_bo_put(val_buf->bo);
549 vmw_bo_unreference(&res->backup);
555 * vmw_resource_reserve - Reserve a resource for command submission
557 * @res: The resource to reserve.
559 * This function takes the resource off the LRU list and make sure
560 * a backup buffer is present for guest-backed resources. However,
561 * the buffer may not be bound to the resource at this point.
564 int vmw_resource_reserve(struct vmw_resource *res, bool interruptible,
567 struct vmw_private *dev_priv = res->dev_priv;
570 spin_lock(&dev_priv->resource_lock);
571 list_del_init(&res->lru_head);
572 spin_unlock(&dev_priv->resource_lock);
574 if (res->func->needs_backup && res->backup == NULL &&
576 ret = vmw_resource_buf_alloc(res, interruptible);
577 if (unlikely(ret != 0)) {
578 DRM_ERROR("Failed to allocate a backup buffer "
579 "of size %lu. bytes\n",
580 (unsigned long) res->backup_size);
589 * vmw_resource_backoff_reservation - Unreserve and unreference a
592 * @ticket: The ww acquire ctx used for reservation.
593 * @val_buf: Backup buffer information.
596 vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
597 struct ttm_validate_buffer *val_buf)
599 struct list_head val_list;
601 if (likely(val_buf->bo == NULL))
604 INIT_LIST_HEAD(&val_list);
605 list_add_tail(&val_buf->head, &val_list);
606 ttm_eu_backoff_reservation(ticket, &val_list);
607 ttm_bo_put(val_buf->bo);
612 * vmw_resource_do_evict - Evict a resource, and transfer its data
613 * to a backup buffer.
615 * @ticket: The ww acquire ticket to use, or NULL if trylocking.
616 * @res: The resource to evict.
617 * @interruptible: Whether to wait interruptible.
619 static int vmw_resource_do_evict(struct ww_acquire_ctx *ticket,
620 struct vmw_resource *res, bool interruptible)
622 struct ttm_validate_buffer val_buf;
623 const struct vmw_res_func *func = res->func;
626 BUG_ON(!func->may_evict);
629 val_buf.num_shared = 0;
630 ret = vmw_resource_check_buffer(ticket, res, interruptible, &val_buf);
631 if (unlikely(ret != 0))
634 if (unlikely(func->unbind != NULL &&
635 (!func->needs_backup || vmw_resource_mob_attached(res)))) {
636 ret = func->unbind(res, res->res_dirty, &val_buf);
637 if (unlikely(ret != 0))
639 vmw_resource_mob_detach(res);
641 ret = func->destroy(res);
642 res->backup_dirty = true;
643 res->res_dirty = false;
645 vmw_resource_backoff_reservation(ticket, &val_buf);
652 * vmw_resource_validate - Make a resource up-to-date and visible
654 * @res: The resource to make visible to the device.
655 * @intr: Perform waits interruptible if possible.
656 * @dirtying: Pending GPU operation will dirty the resource
658 * On successful return, any backup DMA buffer pointed to by @res->backup will
659 * be reserved and validated.
660 * On hardware resource shortage, this function will repeatedly evict
661 * resources of the same type until the validation succeeds.
663 * Return: Zero on success, -ERESTARTSYS if interrupted, negative error code
666 int vmw_resource_validate(struct vmw_resource *res, bool intr,
670 struct vmw_resource *evict_res;
671 struct vmw_private *dev_priv = res->dev_priv;
672 struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
673 struct ttm_validate_buffer val_buf;
674 unsigned err_count = 0;
676 if (!res->func->create)
680 val_buf.num_shared = 0;
682 val_buf.bo = &res->backup->base;
684 ret = vmw_resource_do_validate(res, &val_buf, dirtying);
685 if (likely(ret != -EBUSY))
688 spin_lock(&dev_priv->resource_lock);
689 if (list_empty(lru_list) || !res->func->may_evict) {
690 DRM_ERROR("Out of device device resources "
691 "for %s.\n", res->func->type_name);
693 spin_unlock(&dev_priv->resource_lock);
697 evict_res = vmw_resource_reference
698 (list_first_entry(lru_list, struct vmw_resource,
700 list_del_init(&evict_res->lru_head);
702 spin_unlock(&dev_priv->resource_lock);
704 /* Trylock backup buffers with a NULL ticket. */
705 ret = vmw_resource_do_evict(NULL, evict_res, intr);
706 if (unlikely(ret != 0)) {
707 spin_lock(&dev_priv->resource_lock);
708 list_add_tail(&evict_res->lru_head, lru_list);
709 spin_unlock(&dev_priv->resource_lock);
710 if (ret == -ERESTARTSYS ||
711 ++err_count > VMW_RES_EVICT_ERR_COUNT) {
712 vmw_resource_unreference(&evict_res);
713 goto out_no_validate;
717 vmw_resource_unreference(&evict_res);
720 if (unlikely(ret != 0))
721 goto out_no_validate;
722 else if (!res->func->needs_backup && res->backup) {
723 WARN_ON_ONCE(vmw_resource_mob_attached(res));
724 vmw_bo_unreference(&res->backup);
735 * vmw_resource_unbind_list
737 * @vbo: Pointer to the current backing MOB.
739 * Evicts the Guest Backed hardware resource if the backup
740 * buffer is being moved out of MOB memory.
741 * Note that this function will not race with the resource
742 * validation code, since resource validation and eviction
743 * both require the backup buffer to be reserved.
745 void vmw_resource_unbind_list(struct vmw_bo *vbo)
747 struct ttm_validate_buffer val_buf = {
752 dma_resv_assert_held(vbo->base.base.resv);
753 while (!RB_EMPTY_ROOT(&vbo->res_tree)) {
754 struct rb_node *node = vbo->res_tree.rb_node;
755 struct vmw_resource *res =
756 container_of(node, struct vmw_resource, mob_node);
758 if (!WARN_ON_ONCE(!res->func->unbind))
759 (void) res->func->unbind(res, res->res_dirty, &val_buf);
761 res->backup_dirty = true;
762 res->res_dirty = false;
763 vmw_resource_mob_detach(res);
766 (void) ttm_bo_wait(&vbo->base, false, false);
771 * vmw_query_readback_all - Read back cached query states
773 * @dx_query_mob: Buffer containing the DX query MOB
775 * Read back cached states from the device if they exist. This function
776 * assumes binding_mutex is held.
778 int vmw_query_readback_all(struct vmw_bo *dx_query_mob)
780 struct vmw_resource *dx_query_ctx;
781 struct vmw_private *dev_priv;
783 SVGA3dCmdHeader header;
784 SVGA3dCmdDXReadbackAllQuery body;
788 /* No query bound, so do nothing */
789 if (!dx_query_mob || !dx_query_mob->dx_query_ctx)
792 dx_query_ctx = dx_query_mob->dx_query_ctx;
793 dev_priv = dx_query_ctx->dev_priv;
795 cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), dx_query_ctx->id);
796 if (unlikely(cmd == NULL))
799 cmd->header.id = SVGA_3D_CMD_DX_READBACK_ALL_QUERY;
800 cmd->header.size = sizeof(cmd->body);
801 cmd->body.cid = dx_query_ctx->id;
803 vmw_cmd_commit(dev_priv, sizeof(*cmd));
805 /* Triggers a rebind the next time affected context is bound */
806 dx_query_mob->dx_query_ctx = NULL;
814 * vmw_query_move_notify - Read back cached query states
816 * @bo: The TTM buffer object about to move.
817 * @old_mem: The memory region @bo is moving from.
818 * @new_mem: The memory region @bo is moving to.
820 * Called before the query MOB is swapped out to read back cached query
821 * states from the device.
823 void vmw_query_move_notify(struct ttm_buffer_object *bo,
824 struct ttm_resource *old_mem,
825 struct ttm_resource *new_mem)
827 struct vmw_bo *dx_query_mob;
828 struct ttm_device *bdev = bo->bdev;
829 struct vmw_private *dev_priv;
831 dev_priv = container_of(bdev, struct vmw_private, bdev);
833 mutex_lock(&dev_priv->binding_mutex);
835 /* If BO is being moved from MOB to system memory */
836 if (new_mem->mem_type == TTM_PL_SYSTEM &&
837 old_mem->mem_type == VMW_PL_MOB) {
838 struct vmw_fence_obj *fence;
840 dx_query_mob = container_of(bo, struct vmw_bo, base);
841 if (!dx_query_mob || !dx_query_mob->dx_query_ctx) {
842 mutex_unlock(&dev_priv->binding_mutex);
846 (void) vmw_query_readback_all(dx_query_mob);
847 mutex_unlock(&dev_priv->binding_mutex);
849 /* Create a fence and attach the BO to it */
850 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
851 vmw_bo_fence_single(bo, fence);
854 vmw_fence_obj_unreference(&fence);
856 (void) ttm_bo_wait(bo, false, false);
858 mutex_unlock(&dev_priv->binding_mutex);
862 * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
864 * @res: The resource being queried.
866 bool vmw_resource_needs_backup(const struct vmw_resource *res)
868 return res->func->needs_backup;
872 * vmw_resource_evict_type - Evict all resources of a specific type
874 * @dev_priv: Pointer to a device private struct
875 * @type: The resource type to evict
877 * To avoid thrashing starvation or as part of the hibernation sequence,
878 * try to evict all evictable resources of a specific type.
880 static void vmw_resource_evict_type(struct vmw_private *dev_priv,
881 enum vmw_res_type type)
883 struct list_head *lru_list = &dev_priv->res_lru[type];
884 struct vmw_resource *evict_res;
885 unsigned err_count = 0;
887 struct ww_acquire_ctx ticket;
890 spin_lock(&dev_priv->resource_lock);
892 if (list_empty(lru_list))
895 evict_res = vmw_resource_reference(
896 list_first_entry(lru_list, struct vmw_resource,
898 list_del_init(&evict_res->lru_head);
899 spin_unlock(&dev_priv->resource_lock);
901 /* Wait lock backup buffers with a ticket. */
902 ret = vmw_resource_do_evict(&ticket, evict_res, false);
903 if (unlikely(ret != 0)) {
904 spin_lock(&dev_priv->resource_lock);
905 list_add_tail(&evict_res->lru_head, lru_list);
906 spin_unlock(&dev_priv->resource_lock);
907 if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
908 vmw_resource_unreference(&evict_res);
913 vmw_resource_unreference(&evict_res);
917 spin_unlock(&dev_priv->resource_lock);
921 * vmw_resource_evict_all - Evict all evictable resources
923 * @dev_priv: Pointer to a device private struct
925 * To avoid thrashing starvation or as part of the hibernation sequence,
926 * evict all evictable resources. In particular this means that all
927 * guest-backed resources that are registered with the device are
928 * evicted and the OTable becomes clean.
930 void vmw_resource_evict_all(struct vmw_private *dev_priv)
932 enum vmw_res_type type;
934 mutex_lock(&dev_priv->cmdbuf_mutex);
936 for (type = 0; type < vmw_res_max; ++type)
937 vmw_resource_evict_type(dev_priv, type);
939 mutex_unlock(&dev_priv->cmdbuf_mutex);
943 * vmw_resource_pin - Add a pin reference on a resource
945 * @res: The resource to add a pin reference on
947 * This function adds a pin reference, and if needed validates the resource.
948 * Having a pin reference means that the resource can never be evicted, and
949 * its id will never change as long as there is a pin reference.
950 * This function returns 0 on success and a negative error code on failure.
952 int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
954 struct ttm_operation_ctx ctx = { interruptible, false };
955 struct vmw_private *dev_priv = res->dev_priv;
958 mutex_lock(&dev_priv->cmdbuf_mutex);
959 ret = vmw_resource_reserve(res, interruptible, false);
963 if (res->pin_count == 0) {
964 struct vmw_bo *vbo = NULL;
969 ret = ttm_bo_reserve(&vbo->base, interruptible, false, NULL);
971 goto out_no_validate;
972 if (!vbo->base.pin_count) {
973 vmw_bo_placement_set(vbo,
975 res->func->busy_domain);
976 ret = ttm_bo_validate
981 ttm_bo_unreserve(&vbo->base);
982 goto out_no_validate;
986 /* Do we really need to pin the MOB as well? */
987 vmw_bo_pin_reserved(vbo, true);
989 ret = vmw_resource_validate(res, interruptible, true);
991 ttm_bo_unreserve(&vbo->base);
993 goto out_no_validate;
998 vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1000 mutex_unlock(&dev_priv->cmdbuf_mutex);
1006 * vmw_resource_unpin - Remove a pin reference from a resource
1008 * @res: The resource to remove a pin reference from
1010 * Having a pin reference means that the resource can never be evicted, and
1011 * its id will never change as long as there is a pin reference.
1013 void vmw_resource_unpin(struct vmw_resource *res)
1015 struct vmw_private *dev_priv = res->dev_priv;
1018 mutex_lock(&dev_priv->cmdbuf_mutex);
1020 ret = vmw_resource_reserve(res, false, true);
1023 WARN_ON(res->pin_count == 0);
1024 if (--res->pin_count == 0 && res->backup) {
1025 struct vmw_bo *vbo = res->backup;
1027 (void) ttm_bo_reserve(&vbo->base, false, false, NULL);
1028 vmw_bo_pin_reserved(vbo, false);
1029 ttm_bo_unreserve(&vbo->base);
1032 vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
1034 mutex_unlock(&dev_priv->cmdbuf_mutex);
1038 * vmw_res_type - Return the resource type
1040 * @res: Pointer to the resource
1042 enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
1044 return res->func->res_type;
1048 * vmw_resource_dirty_update - Update a resource's dirty tracker with a
1049 * sequential range of touched backing store memory.
1050 * @res: The resource.
1051 * @start: The first page touched.
1052 * @end: The last page touched + 1.
1054 void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start,
1058 res->func->dirty_range_add(res, start << PAGE_SHIFT,
1063 * vmw_resources_clean - Clean resources intersecting a mob range
1064 * @vbo: The mob buffer object
1065 * @start: The mob page offset starting the range
1066 * @end: The mob page offset ending the range
1067 * @num_prefault: Returns how many pages including the first have been
1068 * cleaned and are ok to prefault
1070 int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start,
1071 pgoff_t end, pgoff_t *num_prefault)
1073 struct rb_node *cur = vbo->res_tree.rb_node;
1074 struct vmw_resource *found = NULL;
1075 unsigned long res_start = start << PAGE_SHIFT;
1076 unsigned long res_end = end << PAGE_SHIFT;
1077 unsigned long last_cleaned = 0;
1080 * Find the resource with lowest backup_offset that intersects the
1084 struct vmw_resource *cur_res =
1085 container_of(cur, struct vmw_resource, mob_node);
1087 if (cur_res->backup_offset >= res_end) {
1089 } else if (cur_res->backup_offset + cur_res->backup_size <=
1091 cur = cur->rb_right;
1095 /* Continue to look for resources with lower offsets */
1100 * In order of increasing backup_offset, clean dirty resources
1101 * intersecting the range.
1104 if (found->res_dirty) {
1107 if (!found->func->clean)
1110 ret = found->func->clean(found);
1114 found->res_dirty = false;
1116 last_cleaned = found->backup_offset + found->backup_size;
1117 cur = rb_next(&found->mob_node);
1121 found = container_of(cur, struct vmw_resource, mob_node);
1122 if (found->backup_offset >= res_end)
1127 * Set number of pages allowed prefaulting and fence the buffer object
1130 if (last_cleaned > res_start) {
1131 struct ttm_buffer_object *bo = &vbo->base;
1133 *num_prefault = __KERNEL_DIV_ROUND_UP(last_cleaned - res_start,
1135 vmw_bo_fence_single(bo, NULL);