1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2009-2023 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 #include <drm/ttm/ttm_placement.h>
30 #include "vmwgfx_binding.h"
31 #include "vmwgfx_bo.h"
32 #include "vmwgfx_drv.h"
33 #include "vmwgfx_resource_priv.h"
35 struct vmw_user_context {
36 struct ttm_base_object base;
37 struct vmw_resource res;
38 struct vmw_ctx_binding_state *cbs;
39 struct vmw_cmdbuf_res_manager *man;
40 struct vmw_resource *cotables[SVGA_COTABLE_MAX];
41 spinlock_t cotable_lock;
42 struct vmw_bo *dx_query_mob;
45 static void vmw_user_context_free(struct vmw_resource *res);
46 static struct vmw_resource *
47 vmw_user_context_base_to_res(struct ttm_base_object *base);
49 static int vmw_gb_context_create(struct vmw_resource *res);
50 static int vmw_gb_context_bind(struct vmw_resource *res,
51 struct ttm_validate_buffer *val_buf);
52 static int vmw_gb_context_unbind(struct vmw_resource *res,
54 struct ttm_validate_buffer *val_buf);
55 static int vmw_gb_context_destroy(struct vmw_resource *res);
56 static int vmw_dx_context_create(struct vmw_resource *res);
57 static int vmw_dx_context_bind(struct vmw_resource *res,
58 struct ttm_validate_buffer *val_buf);
59 static int vmw_dx_context_unbind(struct vmw_resource *res,
61 struct ttm_validate_buffer *val_buf);
62 static int vmw_dx_context_destroy(struct vmw_resource *res);
64 static const struct vmw_user_resource_conv user_context_conv = {
65 .object_type = VMW_RES_CONTEXT,
66 .base_obj_to_res = vmw_user_context_base_to_res,
67 .res_free = vmw_user_context_free
70 const struct vmw_user_resource_conv *user_context_converter =
74 static const struct vmw_res_func vmw_legacy_context_func = {
75 .res_type = vmw_res_context,
76 .needs_guest_memory = false,
78 .type_name = "legacy contexts",
79 .domain = VMW_BO_DOMAIN_SYS,
80 .busy_domain = VMW_BO_DOMAIN_SYS,
87 static const struct vmw_res_func vmw_gb_context_func = {
88 .res_type = vmw_res_context,
89 .needs_guest_memory = true,
93 .type_name = "guest backed contexts",
94 .domain = VMW_BO_DOMAIN_MOB,
95 .busy_domain = VMW_BO_DOMAIN_MOB,
96 .create = vmw_gb_context_create,
97 .destroy = vmw_gb_context_destroy,
98 .bind = vmw_gb_context_bind,
99 .unbind = vmw_gb_context_unbind
102 static const struct vmw_res_func vmw_dx_context_func = {
103 .res_type = vmw_res_dx_context,
104 .needs_guest_memory = true,
108 .type_name = "dx contexts",
109 .domain = VMW_BO_DOMAIN_MOB,
110 .busy_domain = VMW_BO_DOMAIN_MOB,
111 .create = vmw_dx_context_create,
112 .destroy = vmw_dx_context_destroy,
113 .bind = vmw_dx_context_bind,
114 .unbind = vmw_dx_context_unbind
118 * Context management:
121 static void vmw_context_cotables_unref(struct vmw_private *dev_priv,
122 struct vmw_user_context *uctx)
124 struct vmw_resource *res;
126 u32 cotable_max = has_sm5_context(dev_priv) ?
127 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
129 for (i = 0; i < cotable_max; ++i) {
130 spin_lock(&uctx->cotable_lock);
131 res = uctx->cotables[i];
132 uctx->cotables[i] = NULL;
133 spin_unlock(&uctx->cotable_lock);
136 vmw_resource_unreference(&res);
140 static void vmw_hw_context_destroy(struct vmw_resource *res)
142 struct vmw_user_context *uctx =
143 container_of(res, struct vmw_user_context, res);
144 struct vmw_private *dev_priv = res->dev_priv;
146 SVGA3dCmdHeader header;
147 SVGA3dCmdDestroyContext body;
151 if (res->func->destroy == vmw_gb_context_destroy ||
152 res->func->destroy == vmw_dx_context_destroy) {
153 mutex_lock(&dev_priv->cmdbuf_mutex);
154 vmw_cmdbuf_res_man_destroy(uctx->man);
155 mutex_lock(&dev_priv->binding_mutex);
156 vmw_binding_state_kill(uctx->cbs);
157 (void) res->func->destroy(res);
158 mutex_unlock(&dev_priv->binding_mutex);
159 if (dev_priv->pinned_bo != NULL &&
160 !dev_priv->query_cid_valid)
161 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
162 mutex_unlock(&dev_priv->cmdbuf_mutex);
163 vmw_context_cotables_unref(dev_priv, uctx);
167 vmw_execbuf_release_pinned_bo(dev_priv);
168 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
169 if (unlikely(cmd == NULL))
172 cmd->header.id = SVGA_3D_CMD_CONTEXT_DESTROY;
173 cmd->header.size = sizeof(cmd->body);
174 cmd->body.cid = res->id;
176 vmw_cmd_commit(dev_priv, sizeof(*cmd));
177 vmw_fifo_resource_dec(dev_priv);
180 static int vmw_gb_context_init(struct vmw_private *dev_priv,
182 struct vmw_resource *res,
183 void (*res_free)(struct vmw_resource *res))
186 struct vmw_user_context *uctx =
187 container_of(res, struct vmw_user_context, res);
189 res->guest_memory_size = (dx ? sizeof(SVGADXContextMobFormat) :
190 sizeof(SVGAGBContextData));
191 ret = vmw_resource_init(dev_priv, res, true,
193 dx ? &vmw_dx_context_func :
194 &vmw_gb_context_func);
195 if (unlikely(ret != 0))
198 if (dev_priv->has_mob) {
199 uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
200 if (IS_ERR(uctx->man)) {
201 ret = PTR_ERR(uctx->man);
207 uctx->cbs = vmw_binding_state_alloc(dev_priv);
208 if (IS_ERR(uctx->cbs)) {
209 ret = PTR_ERR(uctx->cbs);
213 spin_lock_init(&uctx->cotable_lock);
216 u32 cotable_max = has_sm5_context(dev_priv) ?
217 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
218 for (i = 0; i < cotable_max; ++i) {
219 uctx->cotables[i] = vmw_cotable_alloc(dev_priv,
221 if (IS_ERR(uctx->cotables[i])) {
222 ret = PTR_ERR(uctx->cotables[i]);
228 res->hw_destroy = vmw_hw_context_destroy;
232 vmw_context_cotables_unref(dev_priv, uctx);
241 static int vmw_context_init(struct vmw_private *dev_priv,
242 struct vmw_resource *res,
243 void (*res_free)(struct vmw_resource *res),
249 SVGA3dCmdHeader header;
250 SVGA3dCmdDefineContext body;
253 if (dev_priv->has_mob)
254 return vmw_gb_context_init(dev_priv, dx, res, res_free);
256 ret = vmw_resource_init(dev_priv, res, false,
257 res_free, &vmw_legacy_context_func);
259 if (unlikely(ret != 0)) {
260 DRM_ERROR("Failed to allocate a resource id.\n");
264 if (unlikely(res->id >= SVGA3D_HB_MAX_CONTEXT_IDS)) {
265 DRM_ERROR("Out of hw context ids.\n");
266 vmw_resource_unreference(&res);
270 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
271 if (unlikely(cmd == NULL)) {
272 vmw_resource_unreference(&res);
276 cmd->header.id = SVGA_3D_CMD_CONTEXT_DEFINE;
277 cmd->header.size = sizeof(cmd->body);
278 cmd->body.cid = res->id;
280 vmw_cmd_commit(dev_priv, sizeof(*cmd));
281 vmw_fifo_resource_inc(dev_priv);
282 res->hw_destroy = vmw_hw_context_destroy;
286 if (res_free == NULL)
298 static int vmw_gb_context_create(struct vmw_resource *res)
300 struct vmw_private *dev_priv = res->dev_priv;
303 SVGA3dCmdHeader header;
304 SVGA3dCmdDefineGBContext body;
307 if (likely(res->id != -1))
310 ret = vmw_resource_alloc_id(res);
311 if (unlikely(ret != 0)) {
312 DRM_ERROR("Failed to allocate a context id.\n");
316 if (unlikely(res->id >= VMWGFX_NUM_GB_CONTEXT)) {
321 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
322 if (unlikely(cmd == NULL)) {
327 cmd->header.id = SVGA_3D_CMD_DEFINE_GB_CONTEXT;
328 cmd->header.size = sizeof(cmd->body);
329 cmd->body.cid = res->id;
330 vmw_cmd_commit(dev_priv, sizeof(*cmd));
331 vmw_fifo_resource_inc(dev_priv);
336 vmw_resource_release_id(res);
341 static int vmw_gb_context_bind(struct vmw_resource *res,
342 struct ttm_validate_buffer *val_buf)
344 struct vmw_private *dev_priv = res->dev_priv;
346 SVGA3dCmdHeader header;
347 SVGA3dCmdBindGBContext body;
349 struct ttm_buffer_object *bo = val_buf->bo;
351 BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
353 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
354 if (unlikely(cmd == NULL))
357 cmd->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
358 cmd->header.size = sizeof(cmd->body);
359 cmd->body.cid = res->id;
360 cmd->body.mobid = bo->resource->start;
361 cmd->body.validContents = res->guest_memory_dirty;
362 res->guest_memory_dirty = false;
363 vmw_cmd_commit(dev_priv, sizeof(*cmd));
368 static int vmw_gb_context_unbind(struct vmw_resource *res,
370 struct ttm_validate_buffer *val_buf)
372 struct vmw_private *dev_priv = res->dev_priv;
373 struct ttm_buffer_object *bo = val_buf->bo;
374 struct vmw_fence_obj *fence;
375 struct vmw_user_context *uctx =
376 container_of(res, struct vmw_user_context, res);
379 SVGA3dCmdHeader header;
380 SVGA3dCmdReadbackGBContext body;
383 SVGA3dCmdHeader header;
384 SVGA3dCmdBindGBContext body;
386 uint32_t submit_size;
390 BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
392 mutex_lock(&dev_priv->binding_mutex);
393 vmw_binding_state_scrub(uctx->cbs);
395 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
397 cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
398 if (unlikely(cmd == NULL)) {
399 mutex_unlock(&dev_priv->binding_mutex);
406 cmd1->header.id = SVGA_3D_CMD_READBACK_GB_CONTEXT;
407 cmd1->header.size = sizeof(cmd1->body);
408 cmd1->body.cid = res->id;
409 cmd2 = (void *) (&cmd1[1]);
411 cmd2->header.id = SVGA_3D_CMD_BIND_GB_CONTEXT;
412 cmd2->header.size = sizeof(cmd2->body);
413 cmd2->body.cid = res->id;
414 cmd2->body.mobid = SVGA3D_INVALID_ID;
416 vmw_cmd_commit(dev_priv, submit_size);
417 mutex_unlock(&dev_priv->binding_mutex);
420 * Create a fence object and fence the backup buffer.
423 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
426 vmw_bo_fence_single(bo, fence);
428 if (likely(fence != NULL))
429 vmw_fence_obj_unreference(&fence);
434 static int vmw_gb_context_destroy(struct vmw_resource *res)
436 struct vmw_private *dev_priv = res->dev_priv;
438 SVGA3dCmdHeader header;
439 SVGA3dCmdDestroyGBContext body;
442 if (likely(res->id == -1))
445 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
446 if (unlikely(cmd == NULL))
449 cmd->header.id = SVGA_3D_CMD_DESTROY_GB_CONTEXT;
450 cmd->header.size = sizeof(cmd->body);
451 cmd->body.cid = res->id;
452 vmw_cmd_commit(dev_priv, sizeof(*cmd));
453 if (dev_priv->query_cid == res->id)
454 dev_priv->query_cid_valid = false;
455 vmw_resource_release_id(res);
456 vmw_fifo_resource_dec(dev_priv);
465 static int vmw_dx_context_create(struct vmw_resource *res)
467 struct vmw_private *dev_priv = res->dev_priv;
470 SVGA3dCmdHeader header;
471 SVGA3dCmdDXDefineContext body;
474 if (likely(res->id != -1))
477 ret = vmw_resource_alloc_id(res);
478 if (unlikely(ret != 0)) {
479 DRM_ERROR("Failed to allocate a context id.\n");
483 if (unlikely(res->id >= VMWGFX_NUM_DXCONTEXT)) {
488 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
489 if (unlikely(cmd == NULL)) {
494 cmd->header.id = SVGA_3D_CMD_DX_DEFINE_CONTEXT;
495 cmd->header.size = sizeof(cmd->body);
496 cmd->body.cid = res->id;
497 vmw_cmd_commit(dev_priv, sizeof(*cmd));
498 vmw_fifo_resource_inc(dev_priv);
503 vmw_resource_release_id(res);
508 static int vmw_dx_context_bind(struct vmw_resource *res,
509 struct ttm_validate_buffer *val_buf)
511 struct vmw_private *dev_priv = res->dev_priv;
513 SVGA3dCmdHeader header;
514 SVGA3dCmdDXBindContext body;
516 struct ttm_buffer_object *bo = val_buf->bo;
518 BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
520 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
521 if (unlikely(cmd == NULL))
524 cmd->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
525 cmd->header.size = sizeof(cmd->body);
526 cmd->body.cid = res->id;
527 cmd->body.mobid = bo->resource->start;
528 cmd->body.validContents = res->guest_memory_dirty;
529 res->guest_memory_dirty = false;
530 vmw_cmd_commit(dev_priv, sizeof(*cmd));
537 * vmw_dx_context_scrub_cotables - Scrub all bindings and
538 * cotables from a context
540 * @ctx: Pointer to the context resource
541 * @readback: Whether to save the otable contents on scrubbing.
543 * COtables must be unbound before their context, but unbinding requires
544 * the backup buffer being reserved, whereas scrubbing does not.
545 * This function scrubs all cotables of a context, potentially reading back
546 * the contents into their backup buffers. However, scrubbing cotables
547 * also makes the device context invalid, so scrub all bindings first so
548 * that doesn't have to be done later with an invalid context.
550 void vmw_dx_context_scrub_cotables(struct vmw_resource *ctx,
553 struct vmw_user_context *uctx =
554 container_of(ctx, struct vmw_user_context, res);
555 u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
556 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
559 vmw_binding_state_scrub(uctx->cbs);
560 for (i = 0; i < cotable_max; ++i) {
561 struct vmw_resource *res;
563 /* Avoid racing with ongoing cotable destruction. */
564 spin_lock(&uctx->cotable_lock);
565 res = uctx->cotables[vmw_cotable_scrub_order[i]];
567 res = vmw_resource_reference_unless_doomed(res);
568 spin_unlock(&uctx->cotable_lock);
572 WARN_ON(vmw_cotable_scrub(res, readback));
573 vmw_resource_unreference(&res);
577 static int vmw_dx_context_unbind(struct vmw_resource *res,
579 struct ttm_validate_buffer *val_buf)
581 struct vmw_private *dev_priv = res->dev_priv;
582 struct ttm_buffer_object *bo = val_buf->bo;
583 struct vmw_fence_obj *fence;
584 struct vmw_user_context *uctx =
585 container_of(res, struct vmw_user_context, res);
588 SVGA3dCmdHeader header;
589 SVGA3dCmdDXReadbackContext body;
592 SVGA3dCmdHeader header;
593 SVGA3dCmdDXBindContext body;
595 uint32_t submit_size;
599 BUG_ON(bo->resource->mem_type != VMW_PL_MOB);
601 mutex_lock(&dev_priv->binding_mutex);
602 vmw_dx_context_scrub_cotables(res, readback);
604 if (uctx->dx_query_mob && uctx->dx_query_mob->dx_query_ctx &&
606 WARN_ON(uctx->dx_query_mob->dx_query_ctx != res);
607 if (vmw_query_readback_all(uctx->dx_query_mob))
608 DRM_ERROR("Failed to read back query states\n");
611 submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
613 cmd = VMW_CMD_RESERVE(dev_priv, submit_size);
614 if (unlikely(cmd == NULL)) {
615 mutex_unlock(&dev_priv->binding_mutex);
622 cmd1->header.id = SVGA_3D_CMD_DX_READBACK_CONTEXT;
623 cmd1->header.size = sizeof(cmd1->body);
624 cmd1->body.cid = res->id;
625 cmd2 = (void *) (&cmd1[1]);
627 cmd2->header.id = SVGA_3D_CMD_DX_BIND_CONTEXT;
628 cmd2->header.size = sizeof(cmd2->body);
629 cmd2->body.cid = res->id;
630 cmd2->body.mobid = SVGA3D_INVALID_ID;
632 vmw_cmd_commit(dev_priv, submit_size);
633 mutex_unlock(&dev_priv->binding_mutex);
636 * Create a fence object and fence the backup buffer.
639 (void) vmw_execbuf_fence_commands(NULL, dev_priv,
642 vmw_bo_fence_single(bo, fence);
644 if (likely(fence != NULL))
645 vmw_fence_obj_unreference(&fence);
650 static int vmw_dx_context_destroy(struct vmw_resource *res)
652 struct vmw_private *dev_priv = res->dev_priv;
654 SVGA3dCmdHeader header;
655 SVGA3dCmdDXDestroyContext body;
658 if (likely(res->id == -1))
661 cmd = VMW_CMD_RESERVE(dev_priv, sizeof(*cmd));
662 if (unlikely(cmd == NULL))
665 cmd->header.id = SVGA_3D_CMD_DX_DESTROY_CONTEXT;
666 cmd->header.size = sizeof(cmd->body);
667 cmd->body.cid = res->id;
668 vmw_cmd_commit(dev_priv, sizeof(*cmd));
669 if (dev_priv->query_cid == res->id)
670 dev_priv->query_cid_valid = false;
671 vmw_resource_release_id(res);
672 vmw_fifo_resource_dec(dev_priv);
678 * User-space context management:
681 static struct vmw_resource *
682 vmw_user_context_base_to_res(struct ttm_base_object *base)
684 return &(container_of(base, struct vmw_user_context, base)->res);
687 static void vmw_user_context_free(struct vmw_resource *res)
689 struct vmw_user_context *ctx =
690 container_of(res, struct vmw_user_context, res);
693 vmw_binding_state_free(ctx->cbs);
695 (void) vmw_context_bind_dx_query(res, NULL);
697 ttm_base_object_kfree(ctx, base);
701 * This function is called when user space has no more references on the
702 * base object. It releases the base-object's reference on the resource object.
705 static void vmw_user_context_base_release(struct ttm_base_object **p_base)
707 struct ttm_base_object *base = *p_base;
708 struct vmw_user_context *ctx =
709 container_of(base, struct vmw_user_context, base);
710 struct vmw_resource *res = &ctx->res;
713 vmw_resource_unreference(&res);
716 int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
717 struct drm_file *file_priv)
719 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
720 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
722 return ttm_ref_object_base_unref(tfile, arg->cid);
725 static int vmw_context_define(struct drm_device *dev, void *data,
726 struct drm_file *file_priv, bool dx)
728 struct vmw_private *dev_priv = vmw_priv(dev);
729 struct vmw_user_context *ctx;
730 struct vmw_resource *res;
731 struct vmw_resource *tmp;
732 struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
733 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
736 if (!has_sm4_context(dev_priv) && dx) {
737 VMW_DEBUG_USER("DX contexts not supported by device.\n");
741 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
742 if (unlikely(!ctx)) {
748 ctx->base.shareable = false;
749 ctx->base.tfile = NULL;
752 * From here on, the destructor takes over resource freeing.
755 ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
756 if (unlikely(ret != 0))
759 tmp = vmw_resource_reference(&ctx->res);
760 ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
761 &vmw_user_context_base_release);
763 if (unlikely(ret != 0)) {
764 vmw_resource_unreference(&tmp);
768 arg->cid = ctx->base.handle;
770 vmw_resource_unreference(&res);
775 int vmw_context_define_ioctl(struct drm_device *dev, void *data,
776 struct drm_file *file_priv)
778 return vmw_context_define(dev, data, file_priv, false);
781 int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
782 struct drm_file *file_priv)
784 union drm_vmw_extended_context_arg *arg = (typeof(arg)) data;
785 struct drm_vmw_context_arg *rep = &arg->rep;
788 case drm_vmw_context_legacy:
789 return vmw_context_define(dev, rep, file_priv, false);
790 case drm_vmw_context_dx:
791 return vmw_context_define(dev, rep, file_priv, true);
799 * vmw_context_binding_list - Return a list of context bindings
801 * @ctx: The context resource
803 * Returns the current list of bindings of the given context. Note that
804 * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
806 struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
808 struct vmw_user_context *uctx =
809 container_of(ctx, struct vmw_user_context, res);
811 return vmw_binding_state_list(uctx->cbs);
814 struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
816 return container_of(ctx, struct vmw_user_context, res)->man;
819 struct vmw_resource *vmw_context_cotable(struct vmw_resource *ctx,
820 SVGACOTableType cotable_type)
822 u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
823 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
825 if (cotable_type >= cotable_max)
826 return ERR_PTR(-EINVAL);
828 return container_of(ctx, struct vmw_user_context, res)->
829 cotables[cotable_type];
833 * vmw_context_binding_state -
834 * Return a pointer to a context binding state structure
836 * @ctx: The context resource
838 * Returns the current state of bindings of the given context. Note that
839 * this state becomes stale as soon as the dev_priv::binding_mutex is unlocked.
841 struct vmw_ctx_binding_state *
842 vmw_context_binding_state(struct vmw_resource *ctx)
844 return container_of(ctx, struct vmw_user_context, res)->cbs;
848 * vmw_context_bind_dx_query -
849 * Sets query MOB for the context. If @mob is NULL, then this function will
850 * remove the association between the MOB and the context. This function
851 * assumes the binding_mutex is held.
853 * @ctx_res: The context resource
854 * @mob: a reference to the query MOB
856 * Returns -EINVAL if a MOB has already been set and does not match the one
857 * specified in the parameter. 0 otherwise.
859 int vmw_context_bind_dx_query(struct vmw_resource *ctx_res,
862 struct vmw_user_context *uctx =
863 container_of(ctx_res, struct vmw_user_context, res);
866 if (uctx->dx_query_mob) {
867 uctx->dx_query_mob->dx_query_ctx = NULL;
868 vmw_bo_unreference(&uctx->dx_query_mob);
869 uctx->dx_query_mob = NULL;
875 /* Can only have one MOB per context for queries */
876 if (uctx->dx_query_mob && uctx->dx_query_mob != mob)
879 mob->dx_query_ctx = ctx_res;
881 if (!uctx->dx_query_mob)
882 uctx->dx_query_mob = vmw_bo_reference(mob);
888 * vmw_context_get_dx_query_mob - Returns non-counted reference to DX query mob
890 * @ctx_res: The context resource
893 vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res)
895 struct vmw_user_context *uctx =
896 container_of(ctx_res, struct vmw_user_context, res);
898 return uctx->dx_query_mob;