1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /**************************************************************************
4 * Copyright 2009 - 2023 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
27 #include "vmwgfx_binding.h"
28 #include "vmwgfx_bo.h"
29 #include "vmwgfx_drv.h"
30 #include "vmwgfx_mksstat.h"
31 #include "vmwgfx_so.h"
33 #include <drm/ttm/ttm_bo.h>
34 #include <drm/ttm/ttm_placement.h>
36 #include <linux/sync_file.h>
37 #include <linux/hashtable.h>
40 * Helper macro to get dx_ctx_node if available otherwise print an error
41 * message. This is for use in command verifier function where if dx_ctx_node
42 * is not set then command is invalid.
44 #define VMW_GET_CTX_NODE(__sw_context) \
46 __sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({ \
47 VMW_DEBUG_USER("SM context is not set at %s\n", __func__); \
48 __sw_context->dx_ctx_node; \
52 #define VMW_DECLARE_CMD_VAR(__var, __type) \
54 SVGA3dCmdHeader header; \
59 * struct vmw_relocation - Buffer object relocation
61 * @head: List head for the command submission context's relocation list
62 * @vbo: Non ref-counted pointer to buffer object
63 * @mob_loc: Pointer to location for mob id to be modified
64 * @location: Pointer to location for guest pointer to be modified
66 struct vmw_relocation {
67 struct list_head head;
71 SVGAGuestPtr *location;
76 * enum vmw_resource_relocation_type - Relocation type for resources
78 * @vmw_res_rel_normal: Traditional relocation. The resource id in the
79 * command stream is replaced with the actual id after validation.
80 * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced
82 * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after
83 * validation is -1, the command is replaced with a NOP. Otherwise no action.
84 * @vmw_res_rel_max: Last value in the enum - used for error checking
86 enum vmw_resource_relocation_type {
94 * struct vmw_resource_relocation - Relocation info for resources
96 * @head: List head for the software context's relocation list.
97 * @res: Non-ref-counted pointer to the resource.
98 * @offset: Offset of single byte entries into the command buffer where the id
99 * that needs fixup is located.
100 * @rel_type: Type of relocation.
102 struct vmw_resource_relocation {
103 struct list_head head;
104 const struct vmw_resource *res;
106 enum vmw_resource_relocation_type rel_type:3;
110 * struct vmw_ctx_validation_info - Extra validation metadata for contexts
112 * @head: List head of context list
113 * @ctx: The context resource
114 * @cur: The context's persistent binding state
115 * @staged: The binding state changes of this command buffer
117 struct vmw_ctx_validation_info {
118 struct list_head head;
119 struct vmw_resource *ctx;
120 struct vmw_ctx_binding_state *cur;
121 struct vmw_ctx_binding_state *staged;
125 * struct vmw_cmd_entry - Describe a command for the verifier
127 * @func: Call-back to handle the command.
128 * @user_allow: Whether allowed from the execbuf ioctl.
129 * @gb_disable: Whether disabled if guest-backed objects are available.
130 * @gb_enable: Whether enabled iff guest-backed objects are available.
131 * @cmd_name: Name of the command.
133 struct vmw_cmd_entry {
134 int (*func) (struct vmw_private *, struct vmw_sw_context *,
139 const char *cmd_name;
142 #define VMW_CMD_DEF(_cmd, _func, _user_allow, _gb_disable, _gb_enable) \
143 [(_cmd) - SVGA_3D_CMD_BASE] = {(_func), (_user_allow),\
144 (_gb_disable), (_gb_enable), #_cmd}
146 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
147 struct vmw_sw_context *sw_context,
148 struct vmw_resource *ctx);
149 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
150 struct vmw_sw_context *sw_context,
152 struct vmw_bo **vmw_bo_p);
154 * vmw_ptr_diff - Compute the offset from a to b in bytes
156 * @a: A starting pointer.
157 * @b: A pointer offset in the same address space.
159 * Returns: The offset in bytes between the two pointers.
161 static size_t vmw_ptr_diff(void *a, void *b)
163 return (unsigned long) b - (unsigned long) a;
167 * vmw_execbuf_bindings_commit - Commit modified binding state
169 * @sw_context: The command submission context
170 * @backoff: Whether this is part of the error path and binding state changes
173 static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context,
176 struct vmw_ctx_validation_info *entry;
178 list_for_each_entry(entry, &sw_context->ctx_list, head) {
180 vmw_binding_state_commit(entry->cur, entry->staged);
182 if (entry->staged != sw_context->staged_bindings)
183 vmw_binding_state_free(entry->staged);
185 sw_context->staged_bindings_inuse = false;
188 /* List entries are freed with the validation context */
189 INIT_LIST_HEAD(&sw_context->ctx_list);
193 * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced
195 * @sw_context: The command submission context
197 static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context)
199 if (sw_context->dx_query_mob)
200 vmw_context_bind_dx_query(sw_context->dx_query_ctx,
201 sw_context->dx_query_mob);
205 * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to
208 * @dev_priv: Pointer to the device private:
209 * @sw_context: The command submission context
210 * @res: Pointer to the resource
211 * @node: The validation node holding the context resource metadata
213 static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv,
214 struct vmw_sw_context *sw_context,
215 struct vmw_resource *res,
216 struct vmw_ctx_validation_info *node)
220 ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
221 if (unlikely(ret != 0))
224 if (!sw_context->staged_bindings) {
225 sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv);
226 if (IS_ERR(sw_context->staged_bindings)) {
227 ret = PTR_ERR(sw_context->staged_bindings);
228 sw_context->staged_bindings = NULL;
233 if (sw_context->staged_bindings_inuse) {
234 node->staged = vmw_binding_state_alloc(dev_priv);
235 if (IS_ERR(node->staged)) {
236 ret = PTR_ERR(node->staged);
241 node->staged = sw_context->staged_bindings;
242 sw_context->staged_bindings_inuse = true;
246 node->cur = vmw_context_binding_state(res);
247 list_add_tail(&node->head, &sw_context->ctx_list);
256 * vmw_execbuf_res_size - calculate extra size fore the resource validation node
258 * @dev_priv: Pointer to the device private struct.
259 * @res_type: The resource type.
261 * Guest-backed contexts and DX contexts require extra size to store execbuf
262 * private information in the validation node. Typically the binding manager
263 * associated data structures.
265 * Returns: The extra size requirement based on resource type.
267 static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv,
268 enum vmw_res_type res_type)
270 return (res_type == vmw_res_dx_context ||
271 (res_type == vmw_res_context && dev_priv->has_mob)) ?
272 sizeof(struct vmw_ctx_validation_info) : 0;
276 * vmw_execbuf_rcache_update - Update a resource-node cache entry
278 * @rcache: Pointer to the entry to update.
279 * @res: Pointer to the resource.
280 * @private: Pointer to the execbuf-private space in the resource validation
283 static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache,
284 struct vmw_resource *res,
288 rcache->private = private;
290 rcache->valid_handle = 0;
293 enum vmw_val_add_flags {
294 vmw_val_add_flag_none = 0,
295 vmw_val_add_flag_noctx = 1 << 0,
299 * vmw_execbuf_res_val_add - Add a resource to the validation list.
301 * @sw_context: Pointer to the software context.
302 * @res: Unreferenced rcu-protected pointer to the resource.
303 * @dirty: Whether to change dirty status.
304 * @flags: specifies whether to use the context or not
306 * Returns: 0 on success. Negative error code on failure. Typical error codes
307 * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed.
309 static int vmw_execbuf_res_val_add(struct vmw_sw_context *sw_context,
310 struct vmw_resource *res,
314 struct vmw_private *dev_priv = res->dev_priv;
316 enum vmw_res_type res_type = vmw_res_type(res);
317 struct vmw_res_cache_entry *rcache;
318 struct vmw_ctx_validation_info *ctx_info;
320 unsigned int priv_size;
322 rcache = &sw_context->res_cache[res_type];
323 if (likely(rcache->valid && rcache->res == res)) {
325 vmw_validation_res_set_dirty(sw_context->ctx,
326 rcache->private, dirty);
330 if ((flags & vmw_val_add_flag_noctx) != 0) {
331 ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty,
332 (void **)&ctx_info, NULL);
337 priv_size = vmw_execbuf_res_size(dev_priv, res_type);
338 ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size,
339 dirty, (void **)&ctx_info,
344 if (priv_size && first_usage) {
345 ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res,
348 VMW_DEBUG_USER("Failed first usage context setup.\n");
354 vmw_execbuf_rcache_update(rcache, res, ctx_info);
359 * vmw_view_res_val_add - Add a view and the surface it's pointing to to the
362 * @sw_context: The software context holding the validation list.
363 * @view: Pointer to the view resource.
365 * Returns 0 if success, negative error code otherwise.
367 static int vmw_view_res_val_add(struct vmw_sw_context *sw_context,
368 struct vmw_resource *view)
373 * First add the resource the view is pointing to, otherwise it may be
374 * swapped out when the view is validated.
376 ret = vmw_execbuf_res_val_add(sw_context, vmw_view_srf(view),
377 vmw_view_dirtying(view), vmw_val_add_flag_noctx);
381 return vmw_execbuf_res_val_add(sw_context, view, VMW_RES_DIRTY_NONE,
382 vmw_val_add_flag_noctx);
386 * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing
387 * to to the validation list.
389 * @sw_context: The software context holding the validation list.
390 * @view_type: The view type to look up.
391 * @id: view id of the view.
393 * The view is represented by a view id and the DX context it's created on, or
394 * scheduled for creation on. If there is no DX context set, the function will
395 * return an -EINVAL error pointer.
397 * Returns: Unreferenced pointer to the resource on success, negative error
398 * pointer on failure.
400 static struct vmw_resource *
401 vmw_view_id_val_add(struct vmw_sw_context *sw_context,
402 enum vmw_view_type view_type, u32 id)
404 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
405 struct vmw_resource *view;
409 return ERR_PTR(-EINVAL);
411 view = vmw_view_lookup(sw_context->man, view_type, id);
415 ret = vmw_view_res_val_add(sw_context, view);
423 * vmw_resource_context_res_add - Put resources previously bound to a context on
424 * the validation list
426 * @dev_priv: Pointer to a device private structure
427 * @sw_context: Pointer to a software context used for this command submission
428 * @ctx: Pointer to the context resource
430 * This function puts all resources that were previously bound to @ctx on the
431 * resource validation list. This is part of the context state reemission
433 static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
434 struct vmw_sw_context *sw_context,
435 struct vmw_resource *ctx)
437 struct list_head *binding_list;
438 struct vmw_ctx_bindinfo *entry;
440 struct vmw_resource *res;
442 u32 cotable_max = has_sm5_context(ctx->dev_priv) ?
443 SVGA_COTABLE_MAX : SVGA_COTABLE_DX10_MAX;
445 /* Add all cotables to the validation list. */
446 if (has_sm4_context(dev_priv) &&
447 vmw_res_type(ctx) == vmw_res_dx_context) {
448 for (i = 0; i < cotable_max; ++i) {
449 res = vmw_context_cotable(ctx, i);
453 ret = vmw_execbuf_res_val_add(sw_context, res,
455 vmw_val_add_flag_noctx);
456 if (unlikely(ret != 0))
461 /* Add all resources bound to the context to the validation list */
462 mutex_lock(&dev_priv->binding_mutex);
463 binding_list = vmw_context_binding_list(ctx);
465 list_for_each_entry(entry, binding_list, ctx_list) {
466 if (vmw_res_type(entry->res) == vmw_res_view)
467 ret = vmw_view_res_val_add(sw_context, entry->res);
469 ret = vmw_execbuf_res_val_add(sw_context, entry->res,
470 vmw_binding_dirtying(entry->bt),
471 vmw_val_add_flag_noctx);
472 if (unlikely(ret != 0))
476 if (has_sm4_context(dev_priv) &&
477 vmw_res_type(ctx) == vmw_res_dx_context) {
478 struct vmw_bo *dx_query_mob;
480 dx_query_mob = vmw_context_get_dx_query_mob(ctx);
482 vmw_bo_placement_set(dx_query_mob,
485 ret = vmw_validation_add_bo(sw_context->ctx,
490 mutex_unlock(&dev_priv->binding_mutex);
495 * vmw_resource_relocation_add - Add a relocation to the relocation list
497 * @sw_context: Pointer to the software context.
498 * @res: The resource.
499 * @offset: Offset into the command buffer currently being parsed where the id
500 * that needs fixup is located. Granularity is one byte.
501 * @rel_type: Relocation type.
503 static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context,
504 const struct vmw_resource *res,
505 unsigned long offset,
506 enum vmw_resource_relocation_type
509 struct vmw_resource_relocation *rel;
511 rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel));
512 if (unlikely(!rel)) {
513 VMW_DEBUG_USER("Failed to allocate a resource relocation.\n");
518 rel->offset = offset;
519 rel->rel_type = rel_type;
520 list_add_tail(&rel->head, &sw_context->res_relocations);
526 * vmw_resource_relocations_free - Free all relocations on a list
528 * @list: Pointer to the head of the relocation list
530 static void vmw_resource_relocations_free(struct list_head *list)
532 /* Memory is validation context memory, so no need to free it */
533 INIT_LIST_HEAD(list);
537 * vmw_resource_relocations_apply - Apply all relocations on a list
539 * @cb: Pointer to the start of the command buffer bein patch. This need not be
540 * the same buffer as the one being parsed when the relocation list was built,
541 * but the contents must be the same modulo the resource ids.
542 * @list: Pointer to the head of the relocation list.
544 static void vmw_resource_relocations_apply(uint32_t *cb,
545 struct list_head *list)
547 struct vmw_resource_relocation *rel;
549 /* Validate the struct vmw_resource_relocation member size */
550 BUILD_BUG_ON(SVGA_CB_MAX_SIZE >= (1 << 29));
551 BUILD_BUG_ON(vmw_res_rel_max >= (1 << 3));
553 list_for_each_entry(rel, list, head) {
554 u32 *addr = (u32 *)((unsigned long) cb + rel->offset);
555 switch (rel->rel_type) {
556 case vmw_res_rel_normal:
557 *addr = rel->res->id;
559 case vmw_res_rel_nop:
560 *addr = SVGA_3D_CMD_NOP;
563 if (rel->res->id == -1)
564 *addr = SVGA_3D_CMD_NOP;
570 static int vmw_cmd_invalid(struct vmw_private *dev_priv,
571 struct vmw_sw_context *sw_context,
572 SVGA3dCmdHeader *header)
577 static int vmw_cmd_ok(struct vmw_private *dev_priv,
578 struct vmw_sw_context *sw_context,
579 SVGA3dCmdHeader *header)
585 * vmw_resources_reserve - Reserve all resources on the sw_context's resource
588 * @sw_context: Pointer to the software context.
590 * Note that since vmware's command submission currently is protected by the
591 * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since
592 * only a single thread at once will attempt this.
594 static int vmw_resources_reserve(struct vmw_sw_context *sw_context)
598 ret = vmw_validation_res_reserve(sw_context->ctx, true);
602 if (sw_context->dx_query_mob) {
603 struct vmw_bo *expected_dx_query_mob;
605 expected_dx_query_mob =
606 vmw_context_get_dx_query_mob(sw_context->dx_query_ctx);
607 if (expected_dx_query_mob &&
608 expected_dx_query_mob != sw_context->dx_query_mob) {
617 * vmw_cmd_res_check - Check that a resource is present and if so, put it on the
618 * resource validate list unless it's already there.
620 * @dev_priv: Pointer to a device private structure.
621 * @sw_context: Pointer to the software context.
622 * @res_type: Resource type.
623 * @dirty: Whether to change dirty status.
624 * @converter: User-space visisble type specific information.
625 * @id_loc: Pointer to the location in the command buffer currently being parsed
626 * from where the user-space resource id handle is located.
627 * @p_res: Pointer to pointer to resource validalidation node. Populated on
631 vmw_cmd_res_check(struct vmw_private *dev_priv,
632 struct vmw_sw_context *sw_context,
633 enum vmw_res_type res_type,
635 const struct vmw_user_resource_conv *converter,
637 struct vmw_resource **p_res)
639 struct vmw_res_cache_entry *rcache = &sw_context->res_cache[res_type];
640 struct vmw_resource *res;
642 bool needs_unref = false;
647 if (*id_loc == SVGA3D_INVALID_ID) {
648 if (res_type == vmw_res_context) {
649 VMW_DEBUG_USER("Illegal context invalid id.\n");
655 if (likely(rcache->valid_handle && *id_loc == rcache->handle)) {
658 vmw_validation_res_set_dirty(sw_context->ctx,
659 rcache->private, dirty);
661 unsigned int size = vmw_execbuf_res_size(dev_priv, res_type);
663 ret = vmw_validation_preload_res(sw_context->ctx, size);
667 ret = vmw_user_resource_lookup_handle
668 (dev_priv, sw_context->fp->tfile, *id_loc, converter, &res);
670 VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n",
671 (unsigned int) *id_loc);
676 ret = vmw_execbuf_res_val_add(sw_context, res, dirty, vmw_val_add_flag_none);
677 if (unlikely(ret != 0))
680 if (rcache->valid && rcache->res == res) {
681 rcache->valid_handle = true;
682 rcache->handle = *id_loc;
686 ret = vmw_resource_relocation_add(sw_context, res,
687 vmw_ptr_diff(sw_context->buf_start,
695 vmw_resource_unreference(&res);
701 * vmw_rebind_all_dx_query - Rebind DX query associated with the context
703 * @ctx_res: context the query belongs to
705 * This function assumes binding_mutex is held.
707 static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res)
709 struct vmw_private *dev_priv = ctx_res->dev_priv;
710 struct vmw_bo *dx_query_mob;
711 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery);
713 dx_query_mob = vmw_context_get_dx_query_mob(ctx_res);
715 if (!dx_query_mob || dx_query_mob->dx_query_ctx)
718 cmd = VMW_CMD_CTX_RESERVE(dev_priv, sizeof(*cmd), ctx_res->id);
722 cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY;
723 cmd->header.size = sizeof(cmd->body);
724 cmd->body.cid = ctx_res->id;
725 cmd->body.mobid = dx_query_mob->base.resource->start;
726 vmw_cmd_commit(dev_priv, sizeof(*cmd));
728 vmw_context_bind_dx_query(ctx_res, dx_query_mob);
734 * vmw_rebind_contexts - Rebind all resources previously bound to referenced
737 * @sw_context: Pointer to the software context.
739 * Rebind context binding points that have been scrubbed because of eviction.
741 static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
743 struct vmw_ctx_validation_info *val;
746 list_for_each_entry(val, &sw_context->ctx_list, head) {
747 ret = vmw_binding_rebind_all(val->cur);
748 if (unlikely(ret != 0)) {
749 if (ret != -ERESTARTSYS)
750 VMW_DEBUG_USER("Failed to rebind context.\n");
754 ret = vmw_rebind_all_dx_query(val->ctx);
756 VMW_DEBUG_USER("Failed to rebind queries.\n");
765 * vmw_view_bindings_add - Add an array of view bindings to a context binding
768 * @sw_context: The execbuf state used for this command.
769 * @view_type: View type for the bindings.
770 * @binding_type: Binding type for the bindings.
771 * @shader_slot: The shader slot to user for the bindings.
772 * @view_ids: Array of view ids to be bound.
773 * @num_views: Number of view ids in @view_ids.
774 * @first_slot: The binding slot to be used for the first view id in @view_ids.
776 static int vmw_view_bindings_add(struct vmw_sw_context *sw_context,
777 enum vmw_view_type view_type,
778 enum vmw_ctx_binding_type binding_type,
780 uint32 view_ids[], u32 num_views,
783 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
789 for (i = 0; i < num_views; ++i) {
790 struct vmw_ctx_bindinfo_view binding;
791 struct vmw_resource *view = NULL;
793 if (view_ids[i] != SVGA3D_INVALID_ID) {
794 view = vmw_view_id_val_add(sw_context, view_type,
797 VMW_DEBUG_USER("View not found.\n");
798 return PTR_ERR(view);
801 binding.bi.ctx = ctx_node->ctx;
802 binding.bi.res = view;
803 binding.bi.bt = binding_type;
804 binding.shader_slot = shader_slot;
805 binding.slot = first_slot + i;
806 vmw_binding_add(ctx_node->staged, &binding.bi,
807 shader_slot, binding.slot);
814 * vmw_cmd_cid_check - Check a command header for valid context information.
816 * @dev_priv: Pointer to a device private structure.
817 * @sw_context: Pointer to the software context.
818 * @header: A command header with an embedded user-space context handle.
820 * Convenience function: Call vmw_cmd_res_check with the user-space context
821 * handle embedded in @header.
823 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
824 struct vmw_sw_context *sw_context,
825 SVGA3dCmdHeader *header)
827 VMW_DECLARE_CMD_VAR(*cmd, uint32_t) =
828 container_of(header, typeof(*cmd), header);
830 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
831 VMW_RES_DIRTY_SET, user_context_converter,
836 * vmw_execbuf_info_from_res - Get the private validation metadata for a
837 * recently validated resource
839 * @sw_context: Pointer to the command submission context
842 * The resource pointed to by @res needs to be present in the command submission
843 * context's resource cache and hence the last resource of that type to be
844 * processed by the validation code.
846 * Return: a pointer to the private metadata of the resource, or NULL if it
849 static struct vmw_ctx_validation_info *
850 vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context,
851 struct vmw_resource *res)
853 struct vmw_res_cache_entry *rcache =
854 &sw_context->res_cache[vmw_res_type(res)];
856 if (rcache->valid && rcache->res == res)
857 return rcache->private;
863 static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
864 struct vmw_sw_context *sw_context,
865 SVGA3dCmdHeader *header)
867 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget);
868 struct vmw_resource *ctx;
869 struct vmw_resource *res;
872 cmd = container_of(header, typeof(*cmd), header);
874 if (cmd->body.type >= SVGA3D_RT_MAX) {
875 VMW_DEBUG_USER("Illegal render target type %u.\n",
876 (unsigned int) cmd->body.type);
880 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
881 VMW_RES_DIRTY_SET, user_context_converter,
882 &cmd->body.cid, &ctx);
883 if (unlikely(ret != 0))
886 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
887 VMW_RES_DIRTY_SET, user_surface_converter,
888 &cmd->body.target.sid, &res);
892 if (dev_priv->has_mob) {
893 struct vmw_ctx_bindinfo_view binding;
894 struct vmw_ctx_validation_info *node;
896 node = vmw_execbuf_info_from_res(sw_context, ctx);
900 binding.bi.ctx = ctx;
901 binding.bi.res = res;
902 binding.bi.bt = vmw_ctx_binding_rt;
903 binding.slot = cmd->body.type;
904 vmw_binding_add(node->staged, &binding.bi, 0, binding.slot);
910 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
911 struct vmw_sw_context *sw_context,
912 SVGA3dCmdHeader *header)
914 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy);
917 cmd = container_of(header, typeof(*cmd), header);
919 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
920 VMW_RES_DIRTY_NONE, user_surface_converter,
921 &cmd->body.src.sid, NULL);
925 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
926 VMW_RES_DIRTY_SET, user_surface_converter,
927 &cmd->body.dest.sid, NULL);
930 static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv,
931 struct vmw_sw_context *sw_context,
932 SVGA3dCmdHeader *header)
934 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy);
937 cmd = container_of(header, typeof(*cmd), header);
938 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
939 VMW_RES_DIRTY_NONE, user_surface_converter,
940 &cmd->body.src, NULL);
944 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
945 VMW_RES_DIRTY_SET, user_surface_converter,
946 &cmd->body.dest, NULL);
949 static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv,
950 struct vmw_sw_context *sw_context,
951 SVGA3dCmdHeader *header)
953 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion);
956 cmd = container_of(header, typeof(*cmd), header);
957 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
958 VMW_RES_DIRTY_NONE, user_surface_converter,
959 &cmd->body.srcSid, NULL);
963 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
964 VMW_RES_DIRTY_SET, user_surface_converter,
965 &cmd->body.dstSid, NULL);
968 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
969 struct vmw_sw_context *sw_context,
970 SVGA3dCmdHeader *header)
972 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt);
975 cmd = container_of(header, typeof(*cmd), header);
976 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
977 VMW_RES_DIRTY_NONE, user_surface_converter,
978 &cmd->body.src.sid, NULL);
979 if (unlikely(ret != 0))
982 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
983 VMW_RES_DIRTY_SET, user_surface_converter,
984 &cmd->body.dest.sid, NULL);
987 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
988 struct vmw_sw_context *sw_context,
989 SVGA3dCmdHeader *header)
991 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) =
992 container_of(header, typeof(*cmd), header);
994 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
995 VMW_RES_DIRTY_NONE, user_surface_converter,
996 &cmd->body.srcImage.sid, NULL);
999 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
1000 struct vmw_sw_context *sw_context,
1001 SVGA3dCmdHeader *header)
1003 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) =
1004 container_of(header, typeof(*cmd), header);
1006 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1007 VMW_RES_DIRTY_NONE, user_surface_converter,
1008 &cmd->body.sid, NULL);
1012 * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
1014 * @dev_priv: The device private structure.
1015 * @new_query_bo: The new buffer holding query results.
1016 * @sw_context: The software context used for this command submission.
1018 * This function checks whether @new_query_bo is suitable for holding query
1019 * results, and if another buffer currently is pinned for query results. If so,
1020 * the function prepares the state of @sw_context for switching pinned buffers
1021 * after successful submission of the current command batch.
1023 static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
1024 struct vmw_bo *new_query_bo,
1025 struct vmw_sw_context *sw_context)
1027 struct vmw_res_cache_entry *ctx_entry =
1028 &sw_context->res_cache[vmw_res_context];
1031 BUG_ON(!ctx_entry->valid);
1032 sw_context->last_query_ctx = ctx_entry->res;
1034 if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
1036 if (unlikely(PFN_UP(new_query_bo->base.resource->size) > 4)) {
1037 VMW_DEBUG_USER("Query buffer too large.\n");
1041 if (unlikely(sw_context->cur_query_bo != NULL)) {
1042 sw_context->needs_post_query_barrier = true;
1043 vmw_bo_placement_set_default_accelerated(sw_context->cur_query_bo);
1044 ret = vmw_validation_add_bo(sw_context->ctx,
1045 sw_context->cur_query_bo);
1046 if (unlikely(ret != 0))
1049 sw_context->cur_query_bo = new_query_bo;
1051 vmw_bo_placement_set_default_accelerated(dev_priv->dummy_query_bo);
1052 ret = vmw_validation_add_bo(sw_context->ctx,
1053 dev_priv->dummy_query_bo);
1054 if (unlikely(ret != 0))
1062 * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
1064 * @dev_priv: The device private structure.
1065 * @sw_context: The software context used for this command submission batch.
1067 * This function will check if we're switching query buffers, and will then,
1068 * issue a dummy occlusion query wait used as a query barrier. When the fence
1069 * object following that query wait has signaled, we are sure that all preceding
1070 * queries have finished, and the old query buffer can be unpinned. However,
1071 * since both the new query buffer and the old one are fenced with that fence,
1072 * we can do an asynchronus unpin now, and be sure that the old query buffer
1073 * won't be moved until the fence has signaled.
1075 * As mentioned above, both the new - and old query buffers need to be fenced
1076 * using a sequence emitted *after* calling this function.
1078 static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
1079 struct vmw_sw_context *sw_context)
1082 * The validate list should still hold references to all
1085 if (sw_context->needs_post_query_barrier) {
1086 struct vmw_res_cache_entry *ctx_entry =
1087 &sw_context->res_cache[vmw_res_context];
1088 struct vmw_resource *ctx;
1091 BUG_ON(!ctx_entry->valid);
1092 ctx = ctx_entry->res;
1094 ret = vmw_cmd_emit_dummy_query(dev_priv, ctx->id);
1096 if (unlikely(ret != 0))
1097 VMW_DEBUG_USER("Out of fifo space for dummy query.\n");
1100 if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
1101 if (dev_priv->pinned_bo) {
1102 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
1103 vmw_bo_unreference(&dev_priv->pinned_bo);
1106 if (!sw_context->needs_post_query_barrier) {
1107 vmw_bo_pin_reserved(sw_context->cur_query_bo, true);
1110 * We pin also the dummy_query_bo buffer so that we
1111 * don't need to validate it when emitting dummy queries
1112 * in context destroy paths.
1114 if (!dev_priv->dummy_query_bo_pinned) {
1115 vmw_bo_pin_reserved(dev_priv->dummy_query_bo,
1117 dev_priv->dummy_query_bo_pinned = true;
1120 BUG_ON(sw_context->last_query_ctx == NULL);
1121 dev_priv->query_cid = sw_context->last_query_ctx->id;
1122 dev_priv->query_cid_valid = true;
1123 dev_priv->pinned_bo =
1124 vmw_bo_reference(sw_context->cur_query_bo);
1130 * vmw_translate_mob_ptr - Prepare to translate a user-space buffer handle
1133 * @dev_priv: Pointer to a device private structure.
1134 * @sw_context: The software context used for this command batch validation.
1135 * @id: Pointer to the user-space handle to be translated.
1136 * @vmw_bo_p: Points to a location that, on successful return will carry a
1137 * non-reference-counted pointer to the buffer object identified by the
1138 * user-space handle in @id.
1140 * This function saves information needed to translate a user-space buffer
1141 * handle to a MOB id. The translation does not take place immediately, but
1142 * during a call to vmw_apply_relocations().
1144 * This function builds a relocation list and a list of buffers to validate. The
1145 * former needs to be freed using either vmw_apply_relocations() or
1146 * vmw_free_relocations(). The latter needs to be freed using
1147 * vmw_clear_validations.
1149 static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
1150 struct vmw_sw_context *sw_context,
1152 struct vmw_bo **vmw_bo_p)
1154 struct vmw_bo *vmw_bo;
1155 uint32_t handle = *id;
1156 struct vmw_relocation *reloc;
1159 vmw_validation_preload_bo(sw_context->ctx);
1160 ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
1162 drm_dbg(&dev_priv->drm, "Could not find or use MOB buffer.\n");
1163 return PTR_ERR(vmw_bo);
1165 vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_MOB, VMW_BO_DOMAIN_MOB);
1166 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
1167 ttm_bo_put(&vmw_bo->base);
1168 if (unlikely(ret != 0))
1171 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1175 reloc->mob_loc = id;
1176 reloc->vbo = vmw_bo;
1179 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1185 * vmw_translate_guest_ptr - Prepare to translate a user-space buffer handle
1186 * to a valid SVGAGuestPtr
1188 * @dev_priv: Pointer to a device private structure.
1189 * @sw_context: The software context used for this command batch validation.
1190 * @ptr: Pointer to the user-space handle to be translated.
1191 * @vmw_bo_p: Points to a location that, on successful return will carry a
1192 * non-reference-counted pointer to the DMA buffer identified by the user-space
1195 * This function saves information needed to translate a user-space buffer
1196 * handle to a valid SVGAGuestPtr. The translation does not take place
1197 * immediately, but during a call to vmw_apply_relocations().
1199 * This function builds a relocation list and a list of buffers to validate.
1200 * The former needs to be freed using either vmw_apply_relocations() or
1201 * vmw_free_relocations(). The latter needs to be freed using
1202 * vmw_clear_validations.
1204 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
1205 struct vmw_sw_context *sw_context,
1207 struct vmw_bo **vmw_bo_p)
1209 struct vmw_bo *vmw_bo;
1210 uint32_t handle = ptr->gmrId;
1211 struct vmw_relocation *reloc;
1214 vmw_validation_preload_bo(sw_context->ctx);
1215 ret = vmw_user_bo_lookup(sw_context->filp, handle, &vmw_bo);
1217 drm_dbg(&dev_priv->drm, "Could not find or use GMR region.\n");
1218 return PTR_ERR(vmw_bo);
1220 vmw_bo_placement_set(vmw_bo, VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
1221 VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
1222 ret = vmw_validation_add_bo(sw_context->ctx, vmw_bo);
1223 ttm_bo_put(&vmw_bo->base);
1224 if (unlikely(ret != 0))
1227 reloc = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*reloc));
1231 reloc->location = ptr;
1232 reloc->vbo = vmw_bo;
1234 list_add_tail(&reloc->head, &sw_context->bo_relocations);
1240 * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY command.
1242 * @dev_priv: Pointer to a device private struct.
1243 * @sw_context: The software context used for this command submission.
1244 * @header: Pointer to the command header in the command stream.
1246 * This function adds the new query into the query COTABLE
1248 static int vmw_cmd_dx_define_query(struct vmw_private *dev_priv,
1249 struct vmw_sw_context *sw_context,
1250 SVGA3dCmdHeader *header)
1252 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineQuery);
1253 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
1254 struct vmw_resource *cotable_res;
1260 cmd = container_of(header, typeof(*cmd), header);
1262 if (cmd->body.type < SVGA3D_QUERYTYPE_MIN ||
1263 cmd->body.type >= SVGA3D_QUERYTYPE_MAX)
1266 cotable_res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXQUERY);
1267 ret = vmw_cotable_notify(cotable_res, cmd->body.queryId);
1273 * vmw_cmd_dx_bind_query - validate SVGA_3D_CMD_DX_BIND_QUERY command.
1275 * @dev_priv: Pointer to a device private struct.
1276 * @sw_context: The software context used for this command submission.
1277 * @header: Pointer to the command header in the command stream.
1279 * The query bind operation will eventually associate the query ID with its
1280 * backing MOB. In this function, we take the user mode MOB ID and use
1281 * vmw_translate_mob_ptr() to translate it to its kernel mode equivalent.
1283 static int vmw_cmd_dx_bind_query(struct vmw_private *dev_priv,
1284 struct vmw_sw_context *sw_context,
1285 SVGA3dCmdHeader *header)
1287 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindQuery);
1288 struct vmw_bo *vmw_bo;
1291 cmd = container_of(header, typeof(*cmd), header);
1294 * Look up the buffer pointed to by q.mobid, put it on the relocation
1295 * list so its kernel mode MOB ID can be filled in later
1297 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1303 sw_context->dx_query_mob = vmw_bo;
1304 sw_context->dx_query_ctx = sw_context->dx_ctx_node->ctx;
1309 * vmw_cmd_begin_gb_query - validate SVGA_3D_CMD_BEGIN_GB_QUERY command.
1311 * @dev_priv: Pointer to a device private struct.
1312 * @sw_context: The software context used for this command submission.
1313 * @header: Pointer to the command header in the command stream.
1315 static int vmw_cmd_begin_gb_query(struct vmw_private *dev_priv,
1316 struct vmw_sw_context *sw_context,
1317 SVGA3dCmdHeader *header)
1319 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginGBQuery) =
1320 container_of(header, typeof(*cmd), header);
1322 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1323 VMW_RES_DIRTY_SET, user_context_converter,
1324 &cmd->body.cid, NULL);
1328 * vmw_cmd_begin_query - validate SVGA_3D_CMD_BEGIN_QUERY command.
1330 * @dev_priv: Pointer to a device private struct.
1331 * @sw_context: The software context used for this command submission.
1332 * @header: Pointer to the command header in the command stream.
1334 static int vmw_cmd_begin_query(struct vmw_private *dev_priv,
1335 struct vmw_sw_context *sw_context,
1336 SVGA3dCmdHeader *header)
1338 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBeginQuery) =
1339 container_of(header, typeof(*cmd), header);
1341 if (unlikely(dev_priv->has_mob)) {
1342 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdBeginGBQuery);
1344 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1346 gb_cmd.header.id = SVGA_3D_CMD_BEGIN_GB_QUERY;
1347 gb_cmd.header.size = cmd->header.size;
1348 gb_cmd.body.cid = cmd->body.cid;
1349 gb_cmd.body.type = cmd->body.type;
1351 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1352 return vmw_cmd_begin_gb_query(dev_priv, sw_context, header);
1355 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1356 VMW_RES_DIRTY_SET, user_context_converter,
1357 &cmd->body.cid, NULL);
1361 * vmw_cmd_end_gb_query - validate SVGA_3D_CMD_END_GB_QUERY command.
1363 * @dev_priv: Pointer to a device private struct.
1364 * @sw_context: The software context used for this command submission.
1365 * @header: Pointer to the command header in the command stream.
1367 static int vmw_cmd_end_gb_query(struct vmw_private *dev_priv,
1368 struct vmw_sw_context *sw_context,
1369 SVGA3dCmdHeader *header)
1371 struct vmw_bo *vmw_bo;
1372 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndGBQuery);
1375 cmd = container_of(header, typeof(*cmd), header);
1376 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1377 if (unlikely(ret != 0))
1380 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1382 if (unlikely(ret != 0))
1385 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1391 * vmw_cmd_end_query - validate SVGA_3D_CMD_END_QUERY command.
1393 * @dev_priv: Pointer to a device private struct.
1394 * @sw_context: The software context used for this command submission.
1395 * @header: Pointer to the command header in the command stream.
1397 static int vmw_cmd_end_query(struct vmw_private *dev_priv,
1398 struct vmw_sw_context *sw_context,
1399 SVGA3dCmdHeader *header)
1401 struct vmw_bo *vmw_bo;
1402 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdEndQuery);
1405 cmd = container_of(header, typeof(*cmd), header);
1406 if (dev_priv->has_mob) {
1407 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdEndGBQuery);
1409 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1411 gb_cmd.header.id = SVGA_3D_CMD_END_GB_QUERY;
1412 gb_cmd.header.size = cmd->header.size;
1413 gb_cmd.body.cid = cmd->body.cid;
1414 gb_cmd.body.type = cmd->body.type;
1415 gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1416 gb_cmd.body.offset = cmd->body.guestResult.offset;
1418 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1419 return vmw_cmd_end_gb_query(dev_priv, sw_context, header);
1422 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1423 if (unlikely(ret != 0))
1426 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1427 &cmd->body.guestResult, &vmw_bo);
1428 if (unlikely(ret != 0))
1431 ret = vmw_query_bo_switch_prepare(dev_priv, vmw_bo, sw_context);
1437 * vmw_cmd_wait_gb_query - validate SVGA_3D_CMD_WAIT_GB_QUERY command.
1439 * @dev_priv: Pointer to a device private struct.
1440 * @sw_context: The software context used for this command submission.
1441 * @header: Pointer to the command header in the command stream.
1443 static int vmw_cmd_wait_gb_query(struct vmw_private *dev_priv,
1444 struct vmw_sw_context *sw_context,
1445 SVGA3dCmdHeader *header)
1447 struct vmw_bo *vmw_bo;
1448 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForGBQuery);
1451 cmd = container_of(header, typeof(*cmd), header);
1452 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1453 if (unlikely(ret != 0))
1456 ret = vmw_translate_mob_ptr(dev_priv, sw_context, &cmd->body.mobid,
1458 if (unlikely(ret != 0))
1465 * vmw_cmd_wait_query - validate SVGA_3D_CMD_WAIT_QUERY command.
1467 * @dev_priv: Pointer to a device private struct.
1468 * @sw_context: The software context used for this command submission.
1469 * @header: Pointer to the command header in the command stream.
1471 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
1472 struct vmw_sw_context *sw_context,
1473 SVGA3dCmdHeader *header)
1475 struct vmw_bo *vmw_bo;
1476 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdWaitForQuery);
1479 cmd = container_of(header, typeof(*cmd), header);
1480 if (dev_priv->has_mob) {
1481 VMW_DECLARE_CMD_VAR(gb_cmd, SVGA3dCmdWaitForGBQuery);
1483 BUG_ON(sizeof(gb_cmd) != sizeof(*cmd));
1485 gb_cmd.header.id = SVGA_3D_CMD_WAIT_FOR_GB_QUERY;
1486 gb_cmd.header.size = cmd->header.size;
1487 gb_cmd.body.cid = cmd->body.cid;
1488 gb_cmd.body.type = cmd->body.type;
1489 gb_cmd.body.mobid = cmd->body.guestResult.gmrId;
1490 gb_cmd.body.offset = cmd->body.guestResult.offset;
1492 memcpy(cmd, &gb_cmd, sizeof(*cmd));
1493 return vmw_cmd_wait_gb_query(dev_priv, sw_context, header);
1496 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1497 if (unlikely(ret != 0))
1500 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1501 &cmd->body.guestResult, &vmw_bo);
1502 if (unlikely(ret != 0))
1508 static int vmw_cmd_dma(struct vmw_private *dev_priv,
1509 struct vmw_sw_context *sw_context,
1510 SVGA3dCmdHeader *header)
1512 struct vmw_bo *vmw_bo = NULL;
1513 struct vmw_surface *srf = NULL;
1514 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceDMA);
1516 SVGA3dCmdSurfaceDMASuffix *suffix;
1520 cmd = container_of(header, typeof(*cmd), header);
1521 suffix = (SVGA3dCmdSurfaceDMASuffix *)((unsigned long) &cmd->body +
1522 header->size - sizeof(*suffix));
1524 /* Make sure device and verifier stays in sync. */
1525 if (unlikely(suffix->suffixSize != sizeof(*suffix))) {
1526 VMW_DEBUG_USER("Invalid DMA suffix size.\n");
1530 ret = vmw_translate_guest_ptr(dev_priv, sw_context,
1531 &cmd->body.guest.ptr, &vmw_bo);
1532 if (unlikely(ret != 0))
1535 /* Make sure DMA doesn't cross BO boundaries. */
1536 bo_size = vmw_bo->base.base.size;
1537 if (unlikely(cmd->body.guest.ptr.offset > bo_size)) {
1538 VMW_DEBUG_USER("Invalid DMA offset.\n");
1542 bo_size -= cmd->body.guest.ptr.offset;
1543 if (unlikely(suffix->maximumOffset > bo_size))
1544 suffix->maximumOffset = bo_size;
1546 dirty = (cmd->body.transfer == SVGA3D_WRITE_HOST_VRAM) ?
1547 VMW_RES_DIRTY_SET : 0;
1548 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1549 dirty, user_surface_converter,
1550 &cmd->body.host.sid, NULL);
1551 if (unlikely(ret != 0)) {
1552 if (unlikely(ret != -ERESTARTSYS))
1553 VMW_DEBUG_USER("could not find surface for DMA.\n");
1557 srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
1559 vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base, header);
1564 static int vmw_cmd_draw(struct vmw_private *dev_priv,
1565 struct vmw_sw_context *sw_context,
1566 SVGA3dCmdHeader *header)
1568 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDrawPrimitives);
1569 SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
1570 (unsigned long)header + sizeof(*cmd));
1571 SVGA3dPrimitiveRange *range;
1576 ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
1577 if (unlikely(ret != 0))
1580 cmd = container_of(header, typeof(*cmd), header);
1581 maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
1583 if (unlikely(cmd->body.numVertexDecls > maxnum)) {
1584 VMW_DEBUG_USER("Illegal number of vertex declarations.\n");
1588 for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
1589 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1591 user_surface_converter,
1592 &decl->array.surfaceId, NULL);
1593 if (unlikely(ret != 0))
1597 maxnum = (header->size - sizeof(cmd->body) -
1598 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
1599 if (unlikely(cmd->body.numRanges > maxnum)) {
1600 VMW_DEBUG_USER("Illegal number of index ranges.\n");
1604 range = (SVGA3dPrimitiveRange *) decl;
1605 for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
1606 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1608 user_surface_converter,
1609 &range->indexArray.surfaceId, NULL);
1610 if (unlikely(ret != 0))
1616 static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
1617 struct vmw_sw_context *sw_context,
1618 SVGA3dCmdHeader *header)
1620 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetTextureState);
1621 SVGA3dTextureState *last_state = (SVGA3dTextureState *)
1622 ((unsigned long) header + header->size + sizeof(header));
1623 SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
1624 ((unsigned long) header + sizeof(*cmd));
1625 struct vmw_resource *ctx;
1626 struct vmw_resource *res;
1629 cmd = container_of(header, typeof(*cmd), header);
1631 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1632 VMW_RES_DIRTY_SET, user_context_converter,
1633 &cmd->body.cid, &ctx);
1634 if (unlikely(ret != 0))
1637 for (; cur_state < last_state; ++cur_state) {
1638 if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
1641 if (cur_state->stage >= SVGA3D_NUM_TEXTURE_UNITS) {
1642 VMW_DEBUG_USER("Illegal texture/sampler unit %u.\n",
1643 (unsigned int) cur_state->stage);
1647 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1649 user_surface_converter,
1650 &cur_state->value, &res);
1651 if (unlikely(ret != 0))
1654 if (dev_priv->has_mob) {
1655 struct vmw_ctx_bindinfo_tex binding;
1656 struct vmw_ctx_validation_info *node;
1658 node = vmw_execbuf_info_from_res(sw_context, ctx);
1662 binding.bi.ctx = ctx;
1663 binding.bi.res = res;
1664 binding.bi.bt = vmw_ctx_binding_tex;
1665 binding.texture_stage = cur_state->stage;
1666 vmw_binding_add(node->staged, &binding.bi, 0,
1667 binding.texture_stage);
1674 static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
1675 struct vmw_sw_context *sw_context,
1678 struct vmw_bo *vmw_bo;
1682 SVGAFifoCmdDefineGMRFB body;
1685 return vmw_translate_guest_ptr(dev_priv, sw_context, &cmd->body.ptr,
1690 * vmw_cmd_res_switch_backup - Utility function to handle backup buffer
1693 * @dev_priv: Pointer to a device private struct.
1694 * @sw_context: The software context being used for this batch.
1695 * @res: Pointer to the resource.
1696 * @buf_id: Pointer to the user-space backup buffer handle in the command
1698 * @backup_offset: Offset of backup into MOB.
1700 * This function prepares for registering a switch of backup buffers in the
1701 * resource metadata just prior to unreserving. It's basically a wrapper around
1702 * vmw_cmd_res_switch_backup with a different interface.
1704 static int vmw_cmd_res_switch_backup(struct vmw_private *dev_priv,
1705 struct vmw_sw_context *sw_context,
1706 struct vmw_resource *res, uint32_t *buf_id,
1707 unsigned long backup_offset)
1713 info = vmw_execbuf_info_from_res(sw_context, res);
1717 ret = vmw_translate_mob_ptr(dev_priv, sw_context, buf_id, &vbo);
1721 vmw_validation_res_switch_backup(sw_context->ctx, info, vbo,
1727 * vmw_cmd_switch_backup - Utility function to handle backup buffer switching
1729 * @dev_priv: Pointer to a device private struct.
1730 * @sw_context: The software context being used for this batch.
1731 * @res_type: The resource type.
1732 * @converter: Information about user-space binding for this resource type.
1733 * @res_id: Pointer to the user-space resource handle in the command stream.
1734 * @buf_id: Pointer to the user-space backup buffer handle in the command
1736 * @backup_offset: Offset of backup into MOB.
1738 * This function prepares for registering a switch of backup buffers in the
1739 * resource metadata just prior to unreserving. It's basically a wrapper around
1740 * vmw_cmd_res_switch_backup with a different interface.
1742 static int vmw_cmd_switch_backup(struct vmw_private *dev_priv,
1743 struct vmw_sw_context *sw_context,
1744 enum vmw_res_type res_type,
1745 const struct vmw_user_resource_conv
1746 *converter, uint32_t *res_id, uint32_t *buf_id,
1747 unsigned long backup_offset)
1749 struct vmw_resource *res;
1752 ret = vmw_cmd_res_check(dev_priv, sw_context, res_type,
1753 VMW_RES_DIRTY_NONE, converter, res_id, &res);
1757 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res, buf_id,
1762 * vmw_cmd_bind_gb_surface - Validate SVGA_3D_CMD_BIND_GB_SURFACE command
1764 * @dev_priv: Pointer to a device private struct.
1765 * @sw_context: The software context being used for this batch.
1766 * @header: Pointer to the command header in the command stream.
1768 static int vmw_cmd_bind_gb_surface(struct vmw_private *dev_priv,
1769 struct vmw_sw_context *sw_context,
1770 SVGA3dCmdHeader *header)
1772 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBSurface) =
1773 container_of(header, typeof(*cmd), header);
1775 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_surface,
1776 user_surface_converter, &cmd->body.sid,
1777 &cmd->body.mobid, 0);
1781 * vmw_cmd_update_gb_image - Validate SVGA_3D_CMD_UPDATE_GB_IMAGE command
1783 * @dev_priv: Pointer to a device private struct.
1784 * @sw_context: The software context being used for this batch.
1785 * @header: Pointer to the command header in the command stream.
1787 static int vmw_cmd_update_gb_image(struct vmw_private *dev_priv,
1788 struct vmw_sw_context *sw_context,
1789 SVGA3dCmdHeader *header)
1791 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBImage) =
1792 container_of(header, typeof(*cmd), header);
1794 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1795 VMW_RES_DIRTY_NONE, user_surface_converter,
1796 &cmd->body.image.sid, NULL);
1800 * vmw_cmd_update_gb_surface - Validate SVGA_3D_CMD_UPDATE_GB_SURFACE command
1802 * @dev_priv: Pointer to a device private struct.
1803 * @sw_context: The software context being used for this batch.
1804 * @header: Pointer to the command header in the command stream.
1806 static int vmw_cmd_update_gb_surface(struct vmw_private *dev_priv,
1807 struct vmw_sw_context *sw_context,
1808 SVGA3dCmdHeader *header)
1810 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdUpdateGBSurface) =
1811 container_of(header, typeof(*cmd), header);
1813 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1814 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1815 &cmd->body.sid, NULL);
1819 * vmw_cmd_readback_gb_image - Validate SVGA_3D_CMD_READBACK_GB_IMAGE command
1821 * @dev_priv: Pointer to a device private struct.
1822 * @sw_context: The software context being used for this batch.
1823 * @header: Pointer to the command header in the command stream.
1825 static int vmw_cmd_readback_gb_image(struct vmw_private *dev_priv,
1826 struct vmw_sw_context *sw_context,
1827 SVGA3dCmdHeader *header)
1829 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBImage) =
1830 container_of(header, typeof(*cmd), header);
1832 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1833 VMW_RES_DIRTY_NONE, user_surface_converter,
1834 &cmd->body.image.sid, NULL);
1838 * vmw_cmd_readback_gb_surface - Validate SVGA_3D_CMD_READBACK_GB_SURFACE
1841 * @dev_priv: Pointer to a device private struct.
1842 * @sw_context: The software context being used for this batch.
1843 * @header: Pointer to the command header in the command stream.
1845 static int vmw_cmd_readback_gb_surface(struct vmw_private *dev_priv,
1846 struct vmw_sw_context *sw_context,
1847 SVGA3dCmdHeader *header)
1849 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdReadbackGBSurface) =
1850 container_of(header, typeof(*cmd), header);
1852 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1853 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1854 &cmd->body.sid, NULL);
1858 * vmw_cmd_invalidate_gb_image - Validate SVGA_3D_CMD_INVALIDATE_GB_IMAGE
1861 * @dev_priv: Pointer to a device private struct.
1862 * @sw_context: The software context being used for this batch.
1863 * @header: Pointer to the command header in the command stream.
1865 static int vmw_cmd_invalidate_gb_image(struct vmw_private *dev_priv,
1866 struct vmw_sw_context *sw_context,
1867 SVGA3dCmdHeader *header)
1869 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBImage) =
1870 container_of(header, typeof(*cmd), header);
1872 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1873 VMW_RES_DIRTY_NONE, user_surface_converter,
1874 &cmd->body.image.sid, NULL);
1878 * vmw_cmd_invalidate_gb_surface - Validate SVGA_3D_CMD_INVALIDATE_GB_SURFACE
1881 * @dev_priv: Pointer to a device private struct.
1882 * @sw_context: The software context being used for this batch.
1883 * @header: Pointer to the command header in the command stream.
1885 static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
1886 struct vmw_sw_context *sw_context,
1887 SVGA3dCmdHeader *header)
1889 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdInvalidateGBSurface) =
1890 container_of(header, typeof(*cmd), header);
1892 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
1893 VMW_RES_DIRTY_CLEAR, user_surface_converter,
1894 &cmd->body.sid, NULL);
1898 * vmw_cmd_shader_define - Validate SVGA_3D_CMD_SHADER_DEFINE command
1900 * @dev_priv: Pointer to a device private struct.
1901 * @sw_context: The software context being used for this batch.
1902 * @header: Pointer to the command header in the command stream.
1904 static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
1905 struct vmw_sw_context *sw_context,
1906 SVGA3dCmdHeader *header)
1908 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDefineShader);
1911 struct vmw_resource *ctx;
1913 cmd = container_of(header, typeof(*cmd), header);
1915 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1916 VMW_RES_DIRTY_SET, user_context_converter,
1917 &cmd->body.cid, &ctx);
1918 if (unlikely(ret != 0))
1921 if (unlikely(!dev_priv->has_mob))
1924 size = cmd->header.size - sizeof(cmd->body);
1925 ret = vmw_compat_shader_add(dev_priv, vmw_context_res_man(ctx),
1926 cmd->body.shid, cmd + 1, cmd->body.type,
1927 size, &sw_context->staged_cmd_res);
1928 if (unlikely(ret != 0))
1931 return vmw_resource_relocation_add(sw_context, NULL,
1932 vmw_ptr_diff(sw_context->buf_start,
1938 * vmw_cmd_shader_destroy - Validate SVGA_3D_CMD_SHADER_DESTROY command
1940 * @dev_priv: Pointer to a device private struct.
1941 * @sw_context: The software context being used for this batch.
1942 * @header: Pointer to the command header in the command stream.
1944 static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
1945 struct vmw_sw_context *sw_context,
1946 SVGA3dCmdHeader *header)
1948 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDestroyShader);
1950 struct vmw_resource *ctx;
1952 cmd = container_of(header, typeof(*cmd), header);
1954 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
1955 VMW_RES_DIRTY_SET, user_context_converter,
1956 &cmd->body.cid, &ctx);
1957 if (unlikely(ret != 0))
1960 if (unlikely(!dev_priv->has_mob))
1963 ret = vmw_shader_remove(vmw_context_res_man(ctx), cmd->body.shid,
1964 cmd->body.type, &sw_context->staged_cmd_res);
1965 if (unlikely(ret != 0))
1968 return vmw_resource_relocation_add(sw_context, NULL,
1969 vmw_ptr_diff(sw_context->buf_start,
1975 * vmw_cmd_set_shader - Validate SVGA_3D_CMD_SET_SHADER command
1977 * @dev_priv: Pointer to a device private struct.
1978 * @sw_context: The software context being used for this batch.
1979 * @header: Pointer to the command header in the command stream.
1981 static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
1982 struct vmw_sw_context *sw_context,
1983 SVGA3dCmdHeader *header)
1985 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShader);
1986 struct vmw_ctx_bindinfo_shader binding;
1987 struct vmw_resource *ctx, *res = NULL;
1988 struct vmw_ctx_validation_info *ctx_info;
1991 cmd = container_of(header, typeof(*cmd), header);
1993 if (cmd->body.type >= SVGA3D_SHADERTYPE_PREDX_MAX) {
1994 VMW_DEBUG_USER("Illegal shader type %u.\n",
1995 (unsigned int) cmd->body.type);
1999 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2000 VMW_RES_DIRTY_SET, user_context_converter,
2001 &cmd->body.cid, &ctx);
2002 if (unlikely(ret != 0))
2005 if (!dev_priv->has_mob)
2008 if (cmd->body.shid != SVGA3D_INVALID_ID) {
2010 * This is the compat shader path - Per device guest-backed
2011 * shaders, but user-space thinks it's per context host-
2014 res = vmw_shader_lookup(vmw_context_res_man(ctx),
2015 cmd->body.shid, cmd->body.type);
2017 ret = vmw_execbuf_res_val_add(sw_context, res,
2019 vmw_val_add_flag_noctx);
2020 if (unlikely(ret != 0))
2023 ret = vmw_resource_relocation_add
2025 vmw_ptr_diff(sw_context->buf_start,
2027 vmw_res_rel_normal);
2028 if (unlikely(ret != 0))
2033 if (IS_ERR_OR_NULL(res)) {
2034 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
2036 user_shader_converter, &cmd->body.shid,
2038 if (unlikely(ret != 0))
2042 ctx_info = vmw_execbuf_info_from_res(sw_context, ctx);
2046 binding.bi.ctx = ctx;
2047 binding.bi.res = res;
2048 binding.bi.bt = vmw_ctx_binding_shader;
2049 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2050 vmw_binding_add(ctx_info->staged, &binding.bi, binding.shader_slot, 0);
2056 * vmw_cmd_set_shader_const - Validate SVGA_3D_CMD_SET_SHADER_CONST command
2058 * @dev_priv: Pointer to a device private struct.
2059 * @sw_context: The software context being used for this batch.
2060 * @header: Pointer to the command header in the command stream.
2062 static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
2063 struct vmw_sw_context *sw_context,
2064 SVGA3dCmdHeader *header)
2066 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetShaderConst);
2069 cmd = container_of(header, typeof(*cmd), header);
2071 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2072 VMW_RES_DIRTY_SET, user_context_converter,
2073 &cmd->body.cid, NULL);
2074 if (unlikely(ret != 0))
2077 if (dev_priv->has_mob)
2078 header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
2084 * vmw_cmd_bind_gb_shader - Validate SVGA_3D_CMD_BIND_GB_SHADER command
2086 * @dev_priv: Pointer to a device private struct.
2087 * @sw_context: The software context being used for this batch.
2088 * @header: Pointer to the command header in the command stream.
2090 static int vmw_cmd_bind_gb_shader(struct vmw_private *dev_priv,
2091 struct vmw_sw_context *sw_context,
2092 SVGA3dCmdHeader *header)
2094 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBindGBShader) =
2095 container_of(header, typeof(*cmd), header);
2097 return vmw_cmd_switch_backup(dev_priv, sw_context, vmw_res_shader,
2098 user_shader_converter, &cmd->body.shid,
2099 &cmd->body.mobid, cmd->body.offsetInBytes);
2103 * vmw_cmd_dx_set_single_constant_buffer - Validate
2104 * SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER command.
2106 * @dev_priv: Pointer to a device private struct.
2107 * @sw_context: The software context being used for this batch.
2108 * @header: Pointer to the command header in the command stream.
2111 vmw_cmd_dx_set_single_constant_buffer(struct vmw_private *dev_priv,
2112 struct vmw_sw_context *sw_context,
2113 SVGA3dCmdHeader *header)
2115 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetSingleConstantBuffer);
2116 SVGA3dShaderType max_shader_num = has_sm5_context(dev_priv) ?
2117 SVGA3D_NUM_SHADERTYPE : SVGA3D_NUM_SHADERTYPE_DX10;
2119 struct vmw_resource *res = NULL;
2120 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2121 struct vmw_ctx_bindinfo_cb binding;
2127 cmd = container_of(header, typeof(*cmd), header);
2128 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2129 VMW_RES_DIRTY_NONE, user_surface_converter,
2130 &cmd->body.sid, &res);
2131 if (unlikely(ret != 0))
2134 binding.bi.ctx = ctx_node->ctx;
2135 binding.bi.res = res;
2136 binding.bi.bt = vmw_ctx_binding_cb;
2137 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2138 binding.offset = cmd->body.offsetInBytes;
2139 binding.size = cmd->body.sizeInBytes;
2140 binding.slot = cmd->body.slot;
2142 if (binding.shader_slot >= max_shader_num ||
2143 binding.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2144 VMW_DEBUG_USER("Illegal const buffer shader %u slot %u.\n",
2145 (unsigned int) cmd->body.type,
2146 (unsigned int) binding.slot);
2150 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot,
2157 * vmw_cmd_dx_set_constant_buffer_offset - Validate
2158 * SVGA_3D_CMD_DX_SET_VS/PS/GS/HS/DS/CS_CONSTANT_BUFFER_OFFSET command.
2160 * @dev_priv: Pointer to a device private struct.
2161 * @sw_context: The software context being used for this batch.
2162 * @header: Pointer to the command header in the command stream.
2165 vmw_cmd_dx_set_constant_buffer_offset(struct vmw_private *dev_priv,
2166 struct vmw_sw_context *sw_context,
2167 SVGA3dCmdHeader *header)
2169 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetConstantBufferOffset);
2171 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2174 if (!has_sm5_context(dev_priv))
2180 cmd = container_of(header, typeof(*cmd), header);
2181 if (cmd->body.slot >= SVGA3D_DX_MAX_CONSTBUFFERS) {
2182 VMW_DEBUG_USER("Illegal const buffer slot %u.\n",
2183 (unsigned int) cmd->body.slot);
2187 shader_slot = cmd->header.id - SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET;
2188 vmw_binding_cb_offset_update(ctx_node->staged, shader_slot,
2189 cmd->body.slot, cmd->body.offsetInBytes);
2195 * vmw_cmd_dx_set_shader_res - Validate SVGA_3D_CMD_DX_SET_SHADER_RESOURCES
2198 * @dev_priv: Pointer to a device private struct.
2199 * @sw_context: The software context being used for this batch.
2200 * @header: Pointer to the command header in the command stream.
2202 static int vmw_cmd_dx_set_shader_res(struct vmw_private *dev_priv,
2203 struct vmw_sw_context *sw_context,
2204 SVGA3dCmdHeader *header)
2206 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShaderResources) =
2207 container_of(header, typeof(*cmd), header);
2208 SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
2209 SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
2211 u32 num_sr_view = (cmd->header.size - sizeof(cmd->body)) /
2212 sizeof(SVGA3dShaderResourceViewId);
2214 if ((u64) cmd->body.startView + (u64) num_sr_view >
2215 (u64) SVGA3D_DX_MAX_SRVIEWS ||
2216 cmd->body.type >= max_allowed) {
2217 VMW_DEBUG_USER("Invalid shader binding.\n");
2221 return vmw_view_bindings_add(sw_context, vmw_view_sr,
2223 cmd->body.type - SVGA3D_SHADERTYPE_MIN,
2224 (void *) &cmd[1], num_sr_view,
2225 cmd->body.startView);
2229 * vmw_cmd_dx_set_shader - Validate SVGA_3D_CMD_DX_SET_SHADER command
2231 * @dev_priv: Pointer to a device private struct.
2232 * @sw_context: The software context being used for this batch.
2233 * @header: Pointer to the command header in the command stream.
2235 static int vmw_cmd_dx_set_shader(struct vmw_private *dev_priv,
2236 struct vmw_sw_context *sw_context,
2237 SVGA3dCmdHeader *header)
2239 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetShader);
2240 SVGA3dShaderType max_allowed = has_sm5_context(dev_priv) ?
2241 SVGA3D_SHADERTYPE_MAX : SVGA3D_SHADERTYPE_DX10_MAX;
2242 struct vmw_resource *res = NULL;
2243 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2244 struct vmw_ctx_bindinfo_shader binding;
2250 cmd = container_of(header, typeof(*cmd), header);
2252 if (cmd->body.type >= max_allowed ||
2253 cmd->body.type < SVGA3D_SHADERTYPE_MIN) {
2254 VMW_DEBUG_USER("Illegal shader type %u.\n",
2255 (unsigned int) cmd->body.type);
2259 if (cmd->body.shaderId != SVGA3D_INVALID_ID) {
2260 res = vmw_shader_lookup(sw_context->man, cmd->body.shaderId, 0);
2262 VMW_DEBUG_USER("Could not find shader for binding.\n");
2263 return PTR_ERR(res);
2266 ret = vmw_execbuf_res_val_add(sw_context, res,
2268 vmw_val_add_flag_noctx);
2273 binding.bi.ctx = ctx_node->ctx;
2274 binding.bi.res = res;
2275 binding.bi.bt = vmw_ctx_binding_dx_shader;
2276 binding.shader_slot = cmd->body.type - SVGA3D_SHADERTYPE_MIN;
2278 vmw_binding_add(ctx_node->staged, &binding.bi, binding.shader_slot, 0);
2284 * vmw_cmd_dx_set_vertex_buffers - Validates SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS
2287 * @dev_priv: Pointer to a device private struct.
2288 * @sw_context: The software context being used for this batch.
2289 * @header: Pointer to the command header in the command stream.
2291 static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
2292 struct vmw_sw_context *sw_context,
2293 SVGA3dCmdHeader *header)
2295 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2296 struct vmw_ctx_bindinfo_vb binding;
2297 struct vmw_resource *res;
2299 SVGA3dCmdHeader header;
2300 SVGA3dCmdDXSetVertexBuffers body;
2301 SVGA3dVertexBuffer buf[];
2308 cmd = container_of(header, typeof(*cmd), header);
2309 num = (cmd->header.size - sizeof(cmd->body)) /
2310 sizeof(SVGA3dVertexBuffer);
2311 if ((u64)num + (u64)cmd->body.startBuffer >
2312 (u64)SVGA3D_DX_MAX_VERTEXBUFFERS) {
2313 VMW_DEBUG_USER("Invalid number of vertex buffers.\n");
2317 for (i = 0; i < num; i++) {
2318 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2320 user_surface_converter,
2321 &cmd->buf[i].sid, &res);
2322 if (unlikely(ret != 0))
2325 binding.bi.ctx = ctx_node->ctx;
2326 binding.bi.bt = vmw_ctx_binding_vb;
2327 binding.bi.res = res;
2328 binding.offset = cmd->buf[i].offset;
2329 binding.stride = cmd->buf[i].stride;
2330 binding.slot = i + cmd->body.startBuffer;
2332 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2339 * vmw_cmd_dx_set_index_buffer - Validate
2340 * SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
2342 * @dev_priv: Pointer to a device private struct.
2343 * @sw_context: The software context being used for this batch.
2344 * @header: Pointer to the command header in the command stream.
2346 static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
2347 struct vmw_sw_context *sw_context,
2348 SVGA3dCmdHeader *header)
2350 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2351 struct vmw_ctx_bindinfo_ib binding;
2352 struct vmw_resource *res;
2353 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetIndexBuffer);
2359 cmd = container_of(header, typeof(*cmd), header);
2360 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2361 VMW_RES_DIRTY_NONE, user_surface_converter,
2362 &cmd->body.sid, &res);
2363 if (unlikely(ret != 0))
2366 binding.bi.ctx = ctx_node->ctx;
2367 binding.bi.res = res;
2368 binding.bi.bt = vmw_ctx_binding_ib;
2369 binding.offset = cmd->body.offset;
2370 binding.format = cmd->body.format;
2372 vmw_binding_add(ctx_node->staged, &binding.bi, 0, 0);
2378 * vmw_cmd_dx_set_rendertargets - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
2381 * @dev_priv: Pointer to a device private struct.
2382 * @sw_context: The software context being used for this batch.
2383 * @header: Pointer to the command header in the command stream.
2385 static int vmw_cmd_dx_set_rendertargets(struct vmw_private *dev_priv,
2386 struct vmw_sw_context *sw_context,
2387 SVGA3dCmdHeader *header)
2389 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXSetRenderTargets) =
2390 container_of(header, typeof(*cmd), header);
2391 u32 num_rt_view = (cmd->header.size - sizeof(cmd->body)) /
2392 sizeof(SVGA3dRenderTargetViewId);
2395 if (num_rt_view > SVGA3D_DX_MAX_RENDER_TARGETS) {
2396 VMW_DEBUG_USER("Invalid DX Rendertarget binding.\n");
2400 ret = vmw_view_bindings_add(sw_context, vmw_view_ds, vmw_ctx_binding_ds,
2401 0, &cmd->body.depthStencilViewId, 1, 0);
2405 return vmw_view_bindings_add(sw_context, vmw_view_rt,
2406 vmw_ctx_binding_dx_rt, 0, (void *)&cmd[1],
2411 * vmw_cmd_dx_clear_rendertarget_view - Validate
2412 * SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW command
2414 * @dev_priv: Pointer to a device private struct.
2415 * @sw_context: The software context being used for this batch.
2416 * @header: Pointer to the command header in the command stream.
2418 static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
2419 struct vmw_sw_context *sw_context,
2420 SVGA3dCmdHeader *header)
2422 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearRenderTargetView) =
2423 container_of(header, typeof(*cmd), header);
2424 struct vmw_resource *ret;
2426 ret = vmw_view_id_val_add(sw_context, vmw_view_rt,
2427 cmd->body.renderTargetViewId);
2429 return PTR_ERR_OR_ZERO(ret);
2433 * vmw_cmd_dx_clear_depthstencil_view - Validate
2434 * SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
2436 * @dev_priv: Pointer to a device private struct.
2437 * @sw_context: The software context being used for this batch.
2438 * @header: Pointer to the command header in the command stream.
2440 static int vmw_cmd_dx_clear_depthstencil_view(struct vmw_private *dev_priv,
2441 struct vmw_sw_context *sw_context,
2442 SVGA3dCmdHeader *header)
2444 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXClearDepthStencilView) =
2445 container_of(header, typeof(*cmd), header);
2446 struct vmw_resource *ret;
2448 ret = vmw_view_id_val_add(sw_context, vmw_view_ds,
2449 cmd->body.depthStencilViewId);
2451 return PTR_ERR_OR_ZERO(ret);
2454 static int vmw_cmd_dx_view_define(struct vmw_private *dev_priv,
2455 struct vmw_sw_context *sw_context,
2456 SVGA3dCmdHeader *header)
2458 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2459 struct vmw_resource *srf;
2460 struct vmw_resource *res;
2461 enum vmw_view_type view_type;
2464 * This is based on the fact that all affected define commands have the
2465 * same initial command body layout.
2468 SVGA3dCmdHeader header;
2476 view_type = vmw_view_cmd_to_type(header->id);
2477 if (view_type == vmw_view_max)
2480 cmd = container_of(header, typeof(*cmd), header);
2481 if (unlikely(cmd->sid == SVGA3D_INVALID_ID)) {
2482 VMW_DEBUG_USER("Invalid surface id.\n");
2485 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2486 VMW_RES_DIRTY_NONE, user_surface_converter,
2488 if (unlikely(ret != 0))
2491 res = vmw_context_cotable(ctx_node->ctx, vmw_view_cotables[view_type]);
2492 ret = vmw_cotable_notify(res, cmd->defined_id);
2493 if (unlikely(ret != 0))
2496 return vmw_view_add(sw_context->man, ctx_node->ctx, srf, view_type,
2497 cmd->defined_id, header,
2498 header->size + sizeof(*header),
2499 &sw_context->staged_cmd_res);
2503 * vmw_cmd_dx_set_so_targets - Validate SVGA_3D_CMD_DX_SET_SOTARGETS command.
2505 * @dev_priv: Pointer to a device private struct.
2506 * @sw_context: The software context being used for this batch.
2507 * @header: Pointer to the command header in the command stream.
2509 static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
2510 struct vmw_sw_context *sw_context,
2511 SVGA3dCmdHeader *header)
2513 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2514 struct vmw_ctx_bindinfo_so_target binding;
2515 struct vmw_resource *res;
2517 SVGA3dCmdHeader header;
2518 SVGA3dCmdDXSetSOTargets body;
2519 SVGA3dSoTarget targets[];
2526 cmd = container_of(header, typeof(*cmd), header);
2527 num = (cmd->header.size - sizeof(cmd->body)) / sizeof(SVGA3dSoTarget);
2529 if (num > SVGA3D_DX_MAX_SOTARGETS) {
2530 VMW_DEBUG_USER("Invalid DX SO binding.\n");
2534 for (i = 0; i < num; i++) {
2535 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2537 user_surface_converter,
2538 &cmd->targets[i].sid, &res);
2539 if (unlikely(ret != 0))
2542 binding.bi.ctx = ctx_node->ctx;
2543 binding.bi.res = res;
2544 binding.bi.bt = vmw_ctx_binding_so_target;
2545 binding.offset = cmd->targets[i].offset;
2546 binding.size = cmd->targets[i].sizeInBytes;
2549 vmw_binding_add(ctx_node->staged, &binding.bi, 0, binding.slot);
2555 static int vmw_cmd_dx_so_define(struct vmw_private *dev_priv,
2556 struct vmw_sw_context *sw_context,
2557 SVGA3dCmdHeader *header)
2559 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2560 struct vmw_resource *res;
2562 * This is based on the fact that all affected define commands have
2563 * the same initial command body layout.
2566 SVGA3dCmdHeader header;
2569 enum vmw_so_type so_type;
2575 so_type = vmw_so_cmd_to_type(header->id);
2576 res = vmw_context_cotable(ctx_node->ctx, vmw_so_cotables[so_type]);
2578 return PTR_ERR(res);
2579 cmd = container_of(header, typeof(*cmd), header);
2580 ret = vmw_cotable_notify(res, cmd->defined_id);
2586 * vmw_cmd_dx_check_subresource - Validate SVGA_3D_CMD_DX_[X]_SUBRESOURCE
2589 * @dev_priv: Pointer to a device private struct.
2590 * @sw_context: The software context being used for this batch.
2591 * @header: Pointer to the command header in the command stream.
2593 static int vmw_cmd_dx_check_subresource(struct vmw_private *dev_priv,
2594 struct vmw_sw_context *sw_context,
2595 SVGA3dCmdHeader *header)
2598 SVGA3dCmdHeader header;
2600 SVGA3dCmdDXReadbackSubResource r_body;
2601 SVGA3dCmdDXInvalidateSubResource i_body;
2602 SVGA3dCmdDXUpdateSubResource u_body;
2603 SVGA3dSurfaceId sid;
2607 BUILD_BUG_ON(offsetof(typeof(*cmd), r_body.sid) !=
2608 offsetof(typeof(*cmd), sid));
2609 BUILD_BUG_ON(offsetof(typeof(*cmd), i_body.sid) !=
2610 offsetof(typeof(*cmd), sid));
2611 BUILD_BUG_ON(offsetof(typeof(*cmd), u_body.sid) !=
2612 offsetof(typeof(*cmd), sid));
2614 cmd = container_of(header, typeof(*cmd), header);
2615 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2616 VMW_RES_DIRTY_NONE, user_surface_converter,
2620 static int vmw_cmd_dx_cid_check(struct vmw_private *dev_priv,
2621 struct vmw_sw_context *sw_context,
2622 SVGA3dCmdHeader *header)
2624 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2633 * vmw_cmd_dx_view_remove - validate a view remove command and schedule the view
2634 * resource for removal.
2636 * @dev_priv: Pointer to a device private struct.
2637 * @sw_context: The software context being used for this batch.
2638 * @header: Pointer to the command header in the command stream.
2640 * Check that the view exists, and if it was not created using this command
2641 * batch, conditionally make this command a NOP.
2643 static int vmw_cmd_dx_view_remove(struct vmw_private *dev_priv,
2644 struct vmw_sw_context *sw_context,
2645 SVGA3dCmdHeader *header)
2647 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2649 SVGA3dCmdHeader header;
2650 union vmw_view_destroy body;
2651 } *cmd = container_of(header, typeof(*cmd), header);
2652 enum vmw_view_type view_type = vmw_view_cmd_to_type(header->id);
2653 struct vmw_resource *view;
2659 ret = vmw_view_remove(sw_context->man, cmd->body.view_id, view_type,
2660 &sw_context->staged_cmd_res, &view);
2665 * If the view wasn't created during this command batch, it might
2666 * have been removed due to a context swapout, so add a
2667 * relocation to conditionally make this command a NOP to avoid
2670 return vmw_resource_relocation_add(sw_context, view,
2671 vmw_ptr_diff(sw_context->buf_start,
2673 vmw_res_rel_cond_nop);
2677 * vmw_cmd_dx_define_shader - Validate SVGA_3D_CMD_DX_DEFINE_SHADER command
2679 * @dev_priv: Pointer to a device private struct.
2680 * @sw_context: The software context being used for this batch.
2681 * @header: Pointer to the command header in the command stream.
2683 static int vmw_cmd_dx_define_shader(struct vmw_private *dev_priv,
2684 struct vmw_sw_context *sw_context,
2685 SVGA3dCmdHeader *header)
2687 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2688 struct vmw_resource *res;
2689 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDefineShader) =
2690 container_of(header, typeof(*cmd), header);
2696 res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_DXSHADER);
2697 ret = vmw_cotable_notify(res, cmd->body.shaderId);
2701 return vmw_dx_shader_add(sw_context->man, ctx_node->ctx,
2702 cmd->body.shaderId, cmd->body.type,
2703 &sw_context->staged_cmd_res);
2707 * vmw_cmd_dx_destroy_shader - Validate SVGA_3D_CMD_DX_DESTROY_SHADER command
2709 * @dev_priv: Pointer to a device private struct.
2710 * @sw_context: The software context being used for this batch.
2711 * @header: Pointer to the command header in the command stream.
2713 static int vmw_cmd_dx_destroy_shader(struct vmw_private *dev_priv,
2714 struct vmw_sw_context *sw_context,
2715 SVGA3dCmdHeader *header)
2717 struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context);
2718 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXDestroyShader) =
2719 container_of(header, typeof(*cmd), header);
2725 ret = vmw_shader_remove(sw_context->man, cmd->body.shaderId, 0,
2726 &sw_context->staged_cmd_res);
2732 * vmw_cmd_dx_bind_shader - Validate SVGA_3D_CMD_DX_BIND_SHADER command
2734 * @dev_priv: Pointer to a device private struct.
2735 * @sw_context: The software context being used for this batch.
2736 * @header: Pointer to the command header in the command stream.
2738 static int vmw_cmd_dx_bind_shader(struct vmw_private *dev_priv,
2739 struct vmw_sw_context *sw_context,
2740 SVGA3dCmdHeader *header)
2742 struct vmw_resource *ctx;
2743 struct vmw_resource *res;
2744 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindShader) =
2745 container_of(header, typeof(*cmd), header);
2748 if (cmd->body.cid != SVGA3D_INVALID_ID) {
2749 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
2751 user_context_converter, &cmd->body.cid,
2756 struct vmw_ctx_validation_info *ctx_node =
2757 VMW_GET_CTX_NODE(sw_context);
2762 ctx = ctx_node->ctx;
2765 res = vmw_shader_lookup(vmw_context_res_man(ctx), cmd->body.shid, 0);
2767 VMW_DEBUG_USER("Could not find shader to bind.\n");
2768 return PTR_ERR(res);
2771 ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
2772 vmw_val_add_flag_noctx);
2774 VMW_DEBUG_USER("Error creating resource validation node.\n");
2778 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
2780 cmd->body.offsetInBytes);
2784 * vmw_cmd_dx_genmips - Validate SVGA_3D_CMD_DX_GENMIPS command
2786 * @dev_priv: Pointer to a device private struct.
2787 * @sw_context: The software context being used for this batch.
2788 * @header: Pointer to the command header in the command stream.
2790 static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
2791 struct vmw_sw_context *sw_context,
2792 SVGA3dCmdHeader *header)
2794 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
2795 container_of(header, typeof(*cmd), header);
2796 struct vmw_resource *view;
2797 struct vmw_res_cache_entry *rcache;
2799 view = vmw_view_id_val_add(sw_context, vmw_view_sr,
2800 cmd->body.shaderResourceViewId);
2802 return PTR_ERR(view);
2805 * Normally the shader-resource view is not gpu-dirtying, but for
2806 * this particular command it is...
2807 * So mark the last looked-up surface, which is the surface
2808 * the view points to, gpu-dirty.
2810 rcache = &sw_context->res_cache[vmw_res_surface];
2811 vmw_validation_res_set_dirty(sw_context->ctx, rcache->private,
2817 * vmw_cmd_dx_transfer_from_buffer - Validate
2818 * SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER command
2820 * @dev_priv: Pointer to a device private struct.
2821 * @sw_context: The software context being used for this batch.
2822 * @header: Pointer to the command header in the command stream.
2824 static int vmw_cmd_dx_transfer_from_buffer(struct vmw_private *dev_priv,
2825 struct vmw_sw_context *sw_context,
2826 SVGA3dCmdHeader *header)
2828 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXTransferFromBuffer) =
2829 container_of(header, typeof(*cmd), header);
2832 ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2833 VMW_RES_DIRTY_NONE, user_surface_converter,
2834 &cmd->body.srcSid, NULL);
2838 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2839 VMW_RES_DIRTY_SET, user_surface_converter,
2840 &cmd->body.destSid, NULL);
2844 * vmw_cmd_intra_surface_copy - Validate SVGA_3D_CMD_INTRA_SURFACE_COPY command
2846 * @dev_priv: Pointer to a device private struct.
2847 * @sw_context: The software context being used for this batch.
2848 * @header: Pointer to the command header in the command stream.
2850 static int vmw_cmd_intra_surface_copy(struct vmw_private *dev_priv,
2851 struct vmw_sw_context *sw_context,
2852 SVGA3dCmdHeader *header)
2854 VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdIntraSurfaceCopy) =
2855 container_of(header, typeof(*cmd), header);
2857 if (!(dev_priv->capabilities2 & SVGA_CAP2_INTRA_SURFACE_COPY))
2860 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
2861 VMW_RES_DIRTY_SET, user_surface_converter,
2862 &cmd->body.surface.sid, NULL);
2865 static int vmw_cmd_sm5(struct vmw_private *dev_priv,
2866 struct vmw_sw_context *sw_context,
2867 SVGA3dCmdHeader *header)
2869 if (!has_sm5_context(dev_priv))
2875 static int vmw_cmd_sm5_view_define(struct vmw_private *dev_priv,
2876 struct vmw_sw_context *sw_context,
2877 SVGA3dCmdHeader *header)
2879 if (!has_sm5_context(dev_priv))
2882 return vmw_cmd_dx_view_define(dev_priv, sw_context, header);
2885 static int vmw_cmd_sm5_view_remove(struct vmw_private *dev_priv,
2886 struct vmw_sw_context *sw_context,
2887 SVGA3dCmdHeader *header)
2889 if (!has_sm5_context(dev_priv))
2892 return vmw_cmd_dx_view_remove(dev_priv, sw_context, header);
2895 static int vmw_cmd_clear_uav_uint(struct vmw_private *dev_priv,
2896 struct vmw_sw_context *sw_context,
2897 SVGA3dCmdHeader *header)
2900 SVGA3dCmdHeader header;
2901 SVGA3dCmdDXClearUAViewUint body;
2902 } *cmd = container_of(header, typeof(*cmd), header);
2903 struct vmw_resource *ret;
2905 if (!has_sm5_context(dev_priv))
2908 ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2909 cmd->body.uaViewId);
2911 return PTR_ERR_OR_ZERO(ret);
2914 static int vmw_cmd_clear_uav_float(struct vmw_private *dev_priv,
2915 struct vmw_sw_context *sw_context,
2916 SVGA3dCmdHeader *header)
2919 SVGA3dCmdHeader header;
2920 SVGA3dCmdDXClearUAViewFloat body;
2921 } *cmd = container_of(header, typeof(*cmd), header);
2922 struct vmw_resource *ret;
2924 if (!has_sm5_context(dev_priv))
2927 ret = vmw_view_id_val_add(sw_context, vmw_view_ua,
2928 cmd->body.uaViewId);
2930 return PTR_ERR_OR_ZERO(ret);
2933 static int vmw_cmd_set_uav(struct vmw_private *dev_priv,
2934 struct vmw_sw_context *sw_context,
2935 SVGA3dCmdHeader *header)
2938 SVGA3dCmdHeader header;
2939 SVGA3dCmdDXSetUAViews body;
2940 } *cmd = container_of(header, typeof(*cmd), header);
2941 u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2942 sizeof(SVGA3dUAViewId);
2945 if (!has_sm5_context(dev_priv))
2948 if (num_uav > vmw_max_num_uavs(dev_priv)) {
2949 VMW_DEBUG_USER("Invalid UAV binding.\n");
2953 ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2954 vmw_ctx_binding_uav, 0, (void *)&cmd[1],
2959 vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 0,
2960 cmd->body.uavSpliceIndex);
2965 static int vmw_cmd_set_cs_uav(struct vmw_private *dev_priv,
2966 struct vmw_sw_context *sw_context,
2967 SVGA3dCmdHeader *header)
2970 SVGA3dCmdHeader header;
2971 SVGA3dCmdDXSetCSUAViews body;
2972 } *cmd = container_of(header, typeof(*cmd), header);
2973 u32 num_uav = (cmd->header.size - sizeof(cmd->body)) /
2974 sizeof(SVGA3dUAViewId);
2977 if (!has_sm5_context(dev_priv))
2980 if (num_uav > vmw_max_num_uavs(dev_priv)) {
2981 VMW_DEBUG_USER("Invalid UAV binding.\n");
2985 ret = vmw_view_bindings_add(sw_context, vmw_view_ua,
2986 vmw_ctx_binding_cs_uav, 0, (void *)&cmd[1],
2991 vmw_binding_add_uav_index(sw_context->dx_ctx_node->staged, 1,
2992 cmd->body.startIndex);
2997 static int vmw_cmd_dx_define_streamoutput(struct vmw_private *dev_priv,
2998 struct vmw_sw_context *sw_context,
2999 SVGA3dCmdHeader *header)
3001 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3002 struct vmw_resource *res;
3004 SVGA3dCmdHeader header;
3005 SVGA3dCmdDXDefineStreamOutputWithMob body;
3006 } *cmd = container_of(header, typeof(*cmd), header);
3009 if (!has_sm5_context(dev_priv))
3013 DRM_ERROR("DX Context not set.\n");
3017 res = vmw_context_cotable(ctx_node->ctx, SVGA_COTABLE_STREAMOUTPUT);
3018 ret = vmw_cotable_notify(res, cmd->body.soid);
3022 return vmw_dx_streamoutput_add(sw_context->man, ctx_node->ctx,
3024 &sw_context->staged_cmd_res);
3027 static int vmw_cmd_dx_destroy_streamoutput(struct vmw_private *dev_priv,
3028 struct vmw_sw_context *sw_context,
3029 SVGA3dCmdHeader *header)
3031 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3032 struct vmw_resource *res;
3034 SVGA3dCmdHeader header;
3035 SVGA3dCmdDXDestroyStreamOutput body;
3036 } *cmd = container_of(header, typeof(*cmd), header);
3039 DRM_ERROR("DX Context not set.\n");
3044 * When device does not support SM5 then streamoutput with mob command is
3045 * not available to user-space. Simply return in this case.
3047 if (!has_sm5_context(dev_priv))
3051 * With SM5 capable device if lookup fails then user-space probably used
3052 * old streamoutput define command. Return without an error.
3054 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3059 return vmw_dx_streamoutput_remove(sw_context->man, cmd->body.soid,
3060 &sw_context->staged_cmd_res);
3063 static int vmw_cmd_dx_bind_streamoutput(struct vmw_private *dev_priv,
3064 struct vmw_sw_context *sw_context,
3065 SVGA3dCmdHeader *header)
3067 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3068 struct vmw_resource *res;
3070 SVGA3dCmdHeader header;
3071 SVGA3dCmdDXBindStreamOutput body;
3072 } *cmd = container_of(header, typeof(*cmd), header);
3075 if (!has_sm5_context(dev_priv))
3079 DRM_ERROR("DX Context not set.\n");
3083 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3086 DRM_ERROR("Could not find streamoutput to bind.\n");
3087 return PTR_ERR(res);
3090 vmw_dx_streamoutput_set_size(res, cmd->body.sizeInBytes);
3092 ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
3093 vmw_val_add_flag_noctx);
3095 DRM_ERROR("Error creating resource validation node.\n");
3099 return vmw_cmd_res_switch_backup(dev_priv, sw_context, res,
3101 cmd->body.offsetInBytes);
3104 static int vmw_cmd_dx_set_streamoutput(struct vmw_private *dev_priv,
3105 struct vmw_sw_context *sw_context,
3106 SVGA3dCmdHeader *header)
3108 struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node;
3109 struct vmw_resource *res;
3110 struct vmw_ctx_bindinfo_so binding;
3112 SVGA3dCmdHeader header;
3113 SVGA3dCmdDXSetStreamOutput body;
3114 } *cmd = container_of(header, typeof(*cmd), header);
3118 DRM_ERROR("DX Context not set.\n");
3122 if (cmd->body.soid == SVGA3D_INVALID_ID)
3126 * When device does not support SM5 then streamoutput with mob command is
3127 * not available to user-space. Simply return in this case.
3129 if (!has_sm5_context(dev_priv))
3133 * With SM5 capable device if lookup fails then user-space probably used
3134 * old streamoutput define command. Return without an error.
3136 res = vmw_dx_streamoutput_lookup(vmw_context_res_man(ctx_node->ctx),
3142 ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_NONE,
3143 vmw_val_add_flag_noctx);
3145 DRM_ERROR("Error creating resource validation node.\n");
3149 binding.bi.ctx = ctx_node->ctx;
3150 binding.bi.res = res;
3151 binding.bi.bt = vmw_ctx_binding_so;
3152 binding.slot = 0; /* Only one SO set to context at a time. */
3154 vmw_binding_add(sw_context->dx_ctx_node->staged, &binding.bi, 0,
3160 static int vmw_cmd_indexed_instanced_indirect(struct vmw_private *dev_priv,
3161 struct vmw_sw_context *sw_context,
3162 SVGA3dCmdHeader *header)
3164 struct vmw_draw_indexed_instanced_indirect_cmd {
3165 SVGA3dCmdHeader header;
3166 SVGA3dCmdDXDrawIndexedInstancedIndirect body;
3167 } *cmd = container_of(header, typeof(*cmd), header);
3169 if (!has_sm5_context(dev_priv))
3172 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3173 VMW_RES_DIRTY_NONE, user_surface_converter,
3174 &cmd->body.argsBufferSid, NULL);
3177 static int vmw_cmd_instanced_indirect(struct vmw_private *dev_priv,
3178 struct vmw_sw_context *sw_context,
3179 SVGA3dCmdHeader *header)
3181 struct vmw_draw_instanced_indirect_cmd {
3182 SVGA3dCmdHeader header;
3183 SVGA3dCmdDXDrawInstancedIndirect body;
3184 } *cmd = container_of(header, typeof(*cmd), header);
3186 if (!has_sm5_context(dev_priv))
3189 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3190 VMW_RES_DIRTY_NONE, user_surface_converter,
3191 &cmd->body.argsBufferSid, NULL);
3194 static int vmw_cmd_dispatch_indirect(struct vmw_private *dev_priv,
3195 struct vmw_sw_context *sw_context,
3196 SVGA3dCmdHeader *header)
3198 struct vmw_dispatch_indirect_cmd {
3199 SVGA3dCmdHeader header;
3200 SVGA3dCmdDXDispatchIndirect body;
3201 } *cmd = container_of(header, typeof(*cmd), header);
3203 if (!has_sm5_context(dev_priv))
3206 return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface,
3207 VMW_RES_DIRTY_NONE, user_surface_converter,
3208 &cmd->body.argsBufferSid, NULL);
3211 static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
3212 struct vmw_sw_context *sw_context,
3213 void *buf, uint32_t *size)
3215 uint32_t size_remaining = *size;
3218 cmd_id = ((uint32_t *)buf)[0];
3220 case SVGA_CMD_UPDATE:
3221 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
3223 case SVGA_CMD_DEFINE_GMRFB:
3224 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
3226 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3227 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3229 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3230 *size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3233 VMW_DEBUG_USER("Unsupported SVGA command: %u.\n", cmd_id);
3237 if (*size > size_remaining) {
3238 VMW_DEBUG_USER("Invalid SVGA command (size mismatch): %u.\n",
3243 if (unlikely(!sw_context->kernel)) {
3244 VMW_DEBUG_USER("Kernel only SVGA command: %u.\n", cmd_id);
3248 if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
3249 return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
3254 static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
3255 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
3256 false, false, false),
3257 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
3258 false, false, false),
3259 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check,
3260 true, false, false),
3261 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check,
3262 true, false, false),
3263 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma,
3264 true, false, false),
3265 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid,
3266 false, false, false),
3267 VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid,
3268 false, false, false),
3269 VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check,
3270 true, false, false),
3271 VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check,
3272 true, false, false),
3273 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check,
3274 true, false, false),
3275 VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
3276 &vmw_cmd_set_render_target_check, true, false, false),
3277 VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state,
3278 true, false, false),
3279 VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check,
3280 true, false, false),
3281 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check,
3282 true, false, false),
3283 VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check,
3284 true, false, false),
3285 VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check,
3286 true, false, false),
3287 VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check,
3288 true, false, false),
3289 VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check,
3290 true, false, false),
3291 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
3292 false, false, false),
3293 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
3294 true, false, false),
3295 VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
3296 true, false, false),
3297 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
3298 true, false, false),
3299 VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
3300 true, false, false),
3301 VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
3302 true, false, false),
3303 VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
3304 true, false, false),
3305 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_begin_query,
3306 true, false, false),
3307 VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query,
3308 true, false, false),
3309 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query,
3310 true, false, false),
3311 VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok,
3312 true, false, false),
3313 VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
3314 &vmw_cmd_blt_surf_screen_check, false, false, false),
3315 VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE_V2, &vmw_cmd_invalid,
3316 false, false, false),
3317 VMW_CMD_DEF(SVGA_3D_CMD_GENERATE_MIPMAPS, &vmw_cmd_invalid,
3318 false, false, false),
3319 VMW_CMD_DEF(SVGA_3D_CMD_ACTIVATE_SURFACE, &vmw_cmd_invalid,
3320 false, false, false),
3321 VMW_CMD_DEF(SVGA_3D_CMD_DEACTIVATE_SURFACE, &vmw_cmd_invalid,
3322 false, false, false),
3323 VMW_CMD_DEF(SVGA_3D_CMD_SCREEN_DMA, &vmw_cmd_invalid,
3324 false, false, false),
3325 VMW_CMD_DEF(SVGA_3D_CMD_DEAD1, &vmw_cmd_invalid,
3326 false, false, false),
3327 VMW_CMD_DEF(SVGA_3D_CMD_DEAD2, &vmw_cmd_invalid,
3328 false, false, false),
3329 VMW_CMD_DEF(SVGA_3D_CMD_DEAD12, &vmw_cmd_invalid, false, false, false),
3330 VMW_CMD_DEF(SVGA_3D_CMD_DEAD13, &vmw_cmd_invalid, false, false, false),
3331 VMW_CMD_DEF(SVGA_3D_CMD_DEAD14, &vmw_cmd_invalid, false, false, false),
3332 VMW_CMD_DEF(SVGA_3D_CMD_DEAD15, &vmw_cmd_invalid, false, false, false),
3333 VMW_CMD_DEF(SVGA_3D_CMD_DEAD16, &vmw_cmd_invalid, false, false, false),
3334 VMW_CMD_DEF(SVGA_3D_CMD_DEAD17, &vmw_cmd_invalid, false, false, false),
3335 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE, &vmw_cmd_invalid,
3336 false, false, true),
3337 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_OTABLE, &vmw_cmd_invalid,
3338 false, false, true),
3339 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_MOB, &vmw_cmd_invalid,
3340 false, false, true),
3341 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_MOB, &vmw_cmd_invalid,
3342 false, false, true),
3343 VMW_CMD_DEF(SVGA_3D_CMD_REDEFINE_GB_MOB64, &vmw_cmd_invalid,
3344 false, false, true),
3345 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING, &vmw_cmd_invalid,
3346 false, false, true),
3347 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE, &vmw_cmd_invalid,
3348 false, false, true),
3349 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SURFACE, &vmw_cmd_invalid,
3350 false, false, true),
3351 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE, &vmw_cmd_bind_gb_surface,
3353 VMW_CMD_DEF(SVGA_3D_CMD_COND_BIND_GB_SURFACE, &vmw_cmd_invalid,
3354 false, false, true),
3355 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_IMAGE, &vmw_cmd_update_gb_image,
3357 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SURFACE,
3358 &vmw_cmd_update_gb_surface, true, false, true),
3359 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE,
3360 &vmw_cmd_readback_gb_image, true, false, true),
3361 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_SURFACE,
3362 &vmw_cmd_readback_gb_surface, true, false, true),
3363 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
3364 &vmw_cmd_invalidate_gb_image, true, false, true),
3365 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_SURFACE,
3366 &vmw_cmd_invalidate_gb_surface, true, false, true),
3367 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_CONTEXT, &vmw_cmd_invalid,
3368 false, false, true),
3369 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_CONTEXT, &vmw_cmd_invalid,
3370 false, false, true),
3371 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_CONTEXT, &vmw_cmd_invalid,
3372 false, false, true),
3373 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_CONTEXT, &vmw_cmd_invalid,
3374 false, false, true),
3375 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_CONTEXT, &vmw_cmd_invalid,
3376 false, false, true),
3377 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SHADER, &vmw_cmd_invalid,
3378 false, false, true),
3379 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SHADER, &vmw_cmd_bind_gb_shader,
3381 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SHADER, &vmw_cmd_invalid,
3382 false, false, true),
3383 VMW_CMD_DEF(SVGA_3D_CMD_SET_OTABLE_BASE64, &vmw_cmd_invalid,
3384 false, false, false),
3385 VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_GB_QUERY, &vmw_cmd_begin_gb_query,
3387 VMW_CMD_DEF(SVGA_3D_CMD_END_GB_QUERY, &vmw_cmd_end_gb_query,
3389 VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_GB_QUERY, &vmw_cmd_wait_gb_query,
3391 VMW_CMD_DEF(SVGA_3D_CMD_NOP, &vmw_cmd_ok,
3393 VMW_CMD_DEF(SVGA_3D_CMD_NOP_ERROR, &vmw_cmd_ok,
3395 VMW_CMD_DEF(SVGA_3D_CMD_ENABLE_GART, &vmw_cmd_invalid,
3396 false, false, true),
3397 VMW_CMD_DEF(SVGA_3D_CMD_DISABLE_GART, &vmw_cmd_invalid,
3398 false, false, true),
3399 VMW_CMD_DEF(SVGA_3D_CMD_MAP_MOB_INTO_GART, &vmw_cmd_invalid,
3400 false, false, true),
3401 VMW_CMD_DEF(SVGA_3D_CMD_UNMAP_GART_RANGE, &vmw_cmd_invalid,
3402 false, false, true),
3403 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SCREENTARGET, &vmw_cmd_invalid,
3404 false, false, true),
3405 VMW_CMD_DEF(SVGA_3D_CMD_DESTROY_GB_SCREENTARGET, &vmw_cmd_invalid,
3406 false, false, true),
3407 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SCREENTARGET, &vmw_cmd_invalid,
3408 false, false, true),
3409 VMW_CMD_DEF(SVGA_3D_CMD_UPDATE_GB_SCREENTARGET, &vmw_cmd_invalid,
3410 false, false, true),
3411 VMW_CMD_DEF(SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3412 false, false, true),
3413 VMW_CMD_DEF(SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL, &vmw_cmd_invalid,
3414 false, false, true),
3415 VMW_CMD_DEF(SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE, &vmw_cmd_cid_check,
3417 VMW_CMD_DEF(SVGA_3D_CMD_GB_SCREEN_DMA, &vmw_cmd_invalid,
3418 false, false, true),
3419 VMW_CMD_DEF(SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH, &vmw_cmd_invalid,
3420 false, false, true),
3421 VMW_CMD_DEF(SVGA_3D_CMD_GB_MOB_FENCE, &vmw_cmd_invalid,
3422 false, false, true),
3423 VMW_CMD_DEF(SVGA_3D_CMD_DEFINE_GB_SURFACE_V2, &vmw_cmd_invalid,
3424 false, false, true),
3427 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_CONTEXT, &vmw_cmd_invalid,
3428 false, false, true),
3429 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_CONTEXT, &vmw_cmd_invalid,
3430 false, false, true),
3431 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_CONTEXT, &vmw_cmd_invalid,
3432 false, false, true),
3433 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_CONTEXT, &vmw_cmd_invalid,
3434 false, false, true),
3435 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_CONTEXT, &vmw_cmd_invalid,
3436 false, false, true),
3437 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SINGLE_CONSTANT_BUFFER,
3438 &vmw_cmd_dx_set_single_constant_buffer, true, false, true),
3439 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER_RESOURCES,
3440 &vmw_cmd_dx_set_shader_res, true, false, true),
3441 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SHADER, &vmw_cmd_dx_set_shader,
3443 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SAMPLERS, &vmw_cmd_dx_cid_check,
3445 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW, &vmw_cmd_dx_cid_check,
3447 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED, &vmw_cmd_dx_cid_check,
3449 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED, &vmw_cmd_dx_cid_check,
3451 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED,
3452 &vmw_cmd_dx_cid_check, true, false, true),
3453 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_AUTO, &vmw_cmd_dx_cid_check,
3455 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VERTEX_BUFFERS,
3456 &vmw_cmd_dx_set_vertex_buffers, true, false, true),
3457 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INDEX_BUFFER,
3458 &vmw_cmd_dx_set_index_buffer, true, false, true),
3459 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RENDERTARGETS,
3460 &vmw_cmd_dx_set_rendertargets, true, false, true),
3461 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_BLEND_STATE, &vmw_cmd_dx_cid_check,
3463 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DEPTHSTENCIL_STATE,
3464 &vmw_cmd_dx_cid_check, true, false, true),
3465 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_RASTERIZER_STATE,
3466 &vmw_cmd_dx_cid_check, true, false, true),
3467 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
3469 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
3471 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
3473 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
3474 &vmw_cmd_dx_cid_check, true, false, true),
3475 VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
3477 VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
3479 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
3481 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
3483 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
3485 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SCISSORRECTS, &vmw_cmd_dx_cid_check,
3487 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_RENDERTARGET_VIEW,
3488 &vmw_cmd_dx_clear_rendertarget_view, true, false, true),
3489 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW,
3490 &vmw_cmd_dx_clear_depthstencil_view, true, false, true),
3491 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY, &vmw_cmd_invalid,
3493 VMW_CMD_DEF(SVGA_3D_CMD_DX_GENMIPS, &vmw_cmd_dx_genmips,
3495 VMW_CMD_DEF(SVGA_3D_CMD_DX_UPDATE_SUBRESOURCE,
3496 &vmw_cmd_dx_check_subresource, true, false, true),
3497 VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_SUBRESOURCE,
3498 &vmw_cmd_dx_check_subresource, true, false, true),
3499 VMW_CMD_DEF(SVGA_3D_CMD_DX_INVALIDATE_SUBRESOURCE,
3500 &vmw_cmd_dx_check_subresource, true, false, true),
3501 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADERRESOURCE_VIEW,
3502 &vmw_cmd_dx_view_define, true, false, true),
3503 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADERRESOURCE_VIEW,
3504 &vmw_cmd_dx_view_remove, true, false, true),
3505 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RENDERTARGET_VIEW,
3506 &vmw_cmd_dx_view_define, true, false, true),
3507 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RENDERTARGET_VIEW,
3508 &vmw_cmd_dx_view_remove, true, false, true),
3509 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW,
3510 &vmw_cmd_dx_view_define, true, false, true),
3511 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_VIEW,
3512 &vmw_cmd_dx_view_remove, true, false, true),
3513 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_ELEMENTLAYOUT,
3514 &vmw_cmd_dx_so_define, true, false, true),
3515 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_ELEMENTLAYOUT,
3516 &vmw_cmd_dx_cid_check, true, false, true),
3517 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_BLEND_STATE,
3518 &vmw_cmd_dx_so_define, true, false, true),
3519 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_BLEND_STATE,
3520 &vmw_cmd_dx_cid_check, true, false, true),
3521 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_STATE,
3522 &vmw_cmd_dx_so_define, true, false, true),
3523 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_DEPTHSTENCIL_STATE,
3524 &vmw_cmd_dx_cid_check, true, false, true),
3525 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE,
3526 &vmw_cmd_dx_so_define, true, false, true),
3527 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_RASTERIZER_STATE,
3528 &vmw_cmd_dx_cid_check, true, false, true),
3529 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SAMPLER_STATE,
3530 &vmw_cmd_dx_so_define, true, false, true),
3531 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SAMPLER_STATE,
3532 &vmw_cmd_dx_cid_check, true, false, true),
3533 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_SHADER,
3534 &vmw_cmd_dx_define_shader, true, false, true),
3535 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_SHADER,
3536 &vmw_cmd_dx_destroy_shader, true, false, true),
3537 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_SHADER,
3538 &vmw_cmd_dx_bind_shader, true, false, true),
3539 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT,
3540 &vmw_cmd_dx_so_define, true, false, true),
3541 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_STREAMOUTPUT,
3542 &vmw_cmd_dx_destroy_streamoutput, true, false, true),
3543 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_STREAMOUTPUT,
3544 &vmw_cmd_dx_set_streamoutput, true, false, true),
3545 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_SOTARGETS,
3546 &vmw_cmd_dx_set_so_targets, true, false, true),
3547 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_INPUT_LAYOUT,
3548 &vmw_cmd_dx_cid_check, true, false, true),
3549 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_TOPOLOGY,
3550 &vmw_cmd_dx_cid_check, true, false, true),
3551 VMW_CMD_DEF(SVGA_3D_CMD_DX_BUFFER_COPY,
3552 &vmw_cmd_buffer_copy_check, true, false, true),
3553 VMW_CMD_DEF(SVGA_3D_CMD_DX_PRED_COPY_REGION,
3554 &vmw_cmd_pred_copy_check, true, false, true),
3555 VMW_CMD_DEF(SVGA_3D_CMD_DX_TRANSFER_FROM_BUFFER,
3556 &vmw_cmd_dx_transfer_from_buffer,
3558 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VS_CONSTANT_BUFFER_OFFSET,
3559 &vmw_cmd_dx_set_constant_buffer_offset,
3561 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PS_CONSTANT_BUFFER_OFFSET,
3562 &vmw_cmd_dx_set_constant_buffer_offset,
3564 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_GS_CONSTANT_BUFFER_OFFSET,
3565 &vmw_cmd_dx_set_constant_buffer_offset,
3567 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_HS_CONSTANT_BUFFER_OFFSET,
3568 &vmw_cmd_dx_set_constant_buffer_offset,
3570 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_DS_CONSTANT_BUFFER_OFFSET,
3571 &vmw_cmd_dx_set_constant_buffer_offset,
3573 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_CONSTANT_BUFFER_OFFSET,
3574 &vmw_cmd_dx_set_constant_buffer_offset,
3576 VMW_CMD_DEF(SVGA_3D_CMD_INTRA_SURFACE_COPY, &vmw_cmd_intra_surface_copy,
3582 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_UA_VIEW, &vmw_cmd_sm5_view_define,
3584 VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_UA_VIEW, &vmw_cmd_sm5_view_remove,
3586 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_UINT, &vmw_cmd_clear_uav_uint,
3588 VMW_CMD_DEF(SVGA_3D_CMD_DX_CLEAR_UA_VIEW_FLOAT,
3589 &vmw_cmd_clear_uav_float, true, false, true),
3590 VMW_CMD_DEF(SVGA_3D_CMD_DX_COPY_STRUCTURE_COUNT, &vmw_cmd_invalid, true,
3592 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_UA_VIEWS, &vmw_cmd_set_uav, true, false,
3594 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INDEXED_INSTANCED_INDIRECT,
3595 &vmw_cmd_indexed_instanced_indirect, true, false, true),
3596 VMW_CMD_DEF(SVGA_3D_CMD_DX_DRAW_INSTANCED_INDIRECT,
3597 &vmw_cmd_instanced_indirect, true, false, true),
3598 VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH, &vmw_cmd_sm5, true, false, true),
3599 VMW_CMD_DEF(SVGA_3D_CMD_DX_DISPATCH_INDIRECT,
3600 &vmw_cmd_dispatch_indirect, true, false, true),
3601 VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_CS_UA_VIEWS, &vmw_cmd_set_cs_uav, true,
3603 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_DEPTHSTENCIL_VIEW_V2,
3604 &vmw_cmd_sm5_view_define, true, false, true),
3605 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_STREAMOUTPUT_WITH_MOB,
3606 &vmw_cmd_dx_define_streamoutput, true, false, true),
3607 VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_STREAMOUTPUT,
3608 &vmw_cmd_dx_bind_streamoutput, true, false, true),
3609 VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_RASTERIZER_STATE_V2,
3610 &vmw_cmd_dx_so_define, true, false, true),
3613 bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd)
3615 u32 cmd_id = ((u32 *) buf)[0];
3617 if (cmd_id >= SVGA_CMD_MAX) {
3618 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3619 const struct vmw_cmd_entry *entry;
3621 *size = header->size + sizeof(SVGA3dCmdHeader);
3622 cmd_id = header->id;
3623 if (cmd_id >= SVGA_3D_CMD_MAX)
3626 cmd_id -= SVGA_3D_CMD_BASE;
3627 entry = &vmw_cmd_entries[cmd_id];
3628 *cmd = entry->cmd_name;
3633 case SVGA_CMD_UPDATE:
3634 *cmd = "SVGA_CMD_UPDATE";
3635 *size = sizeof(u32) + sizeof(SVGAFifoCmdUpdate);
3637 case SVGA_CMD_DEFINE_GMRFB:
3638 *cmd = "SVGA_CMD_DEFINE_GMRFB";
3639 *size = sizeof(u32) + sizeof(SVGAFifoCmdDefineGMRFB);
3641 case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
3642 *cmd = "SVGA_CMD_BLIT_GMRFB_TO_SCREEN";
3643 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3645 case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
3646 *cmd = "SVGA_CMD_BLIT_SCREEN_TO_GMRFB";
3647 *size = sizeof(u32) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
3658 static int vmw_cmd_check(struct vmw_private *dev_priv,
3659 struct vmw_sw_context *sw_context, void *buf,
3663 uint32_t size_remaining = *size;
3664 SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
3666 const struct vmw_cmd_entry *entry;
3667 bool gb = dev_priv->capabilities & SVGA_CAP_GBOBJECTS;
3669 cmd_id = ((uint32_t *)buf)[0];
3670 /* Handle any none 3D commands */
3671 if (unlikely(cmd_id < SVGA_CMD_MAX))
3672 return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
3675 cmd_id = header->id;
3676 *size = header->size + sizeof(SVGA3dCmdHeader);
3678 cmd_id -= SVGA_3D_CMD_BASE;
3679 if (unlikely(*size > size_remaining))
3682 if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
3685 entry = &vmw_cmd_entries[cmd_id];
3686 if (unlikely(!entry->func))
3689 if (unlikely(!entry->user_allow && !sw_context->kernel))
3690 goto out_privileged;
3692 if (unlikely(entry->gb_disable && gb))
3695 if (unlikely(entry->gb_enable && !gb))
3698 ret = entry->func(dev_priv, sw_context, header);
3699 if (unlikely(ret != 0)) {
3700 VMW_DEBUG_USER("SVGA3D command: %d failed with error %d\n",
3701 cmd_id + SVGA_3D_CMD_BASE, ret);
3707 VMW_DEBUG_USER("Invalid SVGA3D command: %d\n",
3708 cmd_id + SVGA_3D_CMD_BASE);
3711 VMW_DEBUG_USER("Privileged SVGA3D command: %d\n",
3712 cmd_id + SVGA_3D_CMD_BASE);
3715 VMW_DEBUG_USER("Deprecated (disallowed) SVGA3D command: %d\n",
3716 cmd_id + SVGA_3D_CMD_BASE);
3719 VMW_DEBUG_USER("SVGA3D command: %d not supported by virtual device.\n",
3720 cmd_id + SVGA_3D_CMD_BASE);
3724 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
3725 struct vmw_sw_context *sw_context, void *buf,
3728 int32_t cur_size = size;
3731 sw_context->buf_start = buf;
3733 while (cur_size > 0) {
3735 ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
3736 if (unlikely(ret != 0))
3738 buf = (void *)((unsigned long) buf + size);
3742 if (unlikely(cur_size != 0)) {
3743 VMW_DEBUG_USER("Command verifier out of sync.\n");
3750 static void vmw_free_relocations(struct vmw_sw_context *sw_context)
3752 /* Memory is validation context memory, so no need to free it */
3753 INIT_LIST_HEAD(&sw_context->bo_relocations);
3756 static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
3758 struct vmw_relocation *reloc;
3759 struct ttm_buffer_object *bo;
3761 list_for_each_entry(reloc, &sw_context->bo_relocations, head) {
3762 bo = &reloc->vbo->base;
3763 switch (bo->resource->mem_type) {
3765 reloc->location->offset += bo->resource->start << PAGE_SHIFT;
3766 reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
3769 reloc->location->gmrId = bo->resource->start;
3772 *reloc->mob_loc = bo->resource->start;
3778 vmw_free_relocations(sw_context);
3781 static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
3784 if (likely(sw_context->cmd_bounce_size >= size))
3787 if (sw_context->cmd_bounce_size == 0)
3788 sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
3790 while (sw_context->cmd_bounce_size < size) {
3791 sw_context->cmd_bounce_size =
3792 PAGE_ALIGN(sw_context->cmd_bounce_size +
3793 (sw_context->cmd_bounce_size >> 1));
3796 vfree(sw_context->cmd_bounce);
3797 sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
3799 if (sw_context->cmd_bounce == NULL) {
3800 VMW_DEBUG_USER("Failed to allocate command bounce buffer.\n");
3801 sw_context->cmd_bounce_size = 0;
3809 * vmw_execbuf_fence_commands - create and submit a command stream fence
3811 * Creates a fence object and submits a command stream marker.
3812 * If this fails for some reason, We sync the fifo and return NULL.
3813 * It is then safe to fence buffers with a NULL pointer.
3815 * If @p_handle is not NULL @file_priv must also not be NULL. Creates a
3816 * userspace handle if @p_handle is not NULL, otherwise not.
3819 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
3820 struct vmw_private *dev_priv,
3821 struct vmw_fence_obj **p_fence,
3826 bool synced = false;
3828 /* p_handle implies file_priv. */
3829 BUG_ON(p_handle != NULL && file_priv == NULL);
3831 ret = vmw_cmd_send_fence(dev_priv, &sequence);
3832 if (unlikely(ret != 0)) {
3833 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
3837 if (p_handle != NULL)
3838 ret = vmw_user_fence_create(file_priv, dev_priv->fman,
3839 sequence, p_fence, p_handle);
3841 ret = vmw_fence_create(dev_priv->fman, sequence, p_fence);
3843 if (unlikely(ret != 0 && !synced)) {
3844 (void) vmw_fallback_wait(dev_priv, false, false, sequence,
3845 false, VMW_FENCE_WAIT_TIMEOUT);
3853 * vmw_execbuf_copy_fence_user - copy fence object information to user-space.
3855 * @dev_priv: Pointer to a vmw_private struct.
3856 * @vmw_fp: Pointer to the struct vmw_fpriv representing the calling file.
3857 * @ret: Return value from fence object creation.
3858 * @user_fence_rep: User space address of a struct drm_vmw_fence_rep to which
3859 * the information should be copied.
3860 * @fence: Pointer to the fenc object.
3861 * @fence_handle: User-space fence handle.
3862 * @out_fence_fd: exported file descriptor for the fence. -1 if not used
3864 * This function copies fence information to user-space. If copying fails, the
3865 * user-space struct drm_vmw_fence_rep::error member is hopefully left
3866 * untouched, and if it's preloaded with an -EFAULT by user-space, the error
3867 * will hopefully be detected.
3869 * Also if copying fails, user-space will be unable to signal the fence object
3870 * so we wait for it immediately, and then unreference the user-space reference.
3873 vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
3874 struct vmw_fpriv *vmw_fp, int ret,
3875 struct drm_vmw_fence_rep __user *user_fence_rep,
3876 struct vmw_fence_obj *fence, uint32_t fence_handle,
3877 int32_t out_fence_fd)
3879 struct drm_vmw_fence_rep fence_rep;
3881 if (user_fence_rep == NULL)
3884 memset(&fence_rep, 0, sizeof(fence_rep));
3886 fence_rep.error = ret;
3887 fence_rep.fd = out_fence_fd;
3889 BUG_ON(fence == NULL);
3891 fence_rep.handle = fence_handle;
3892 fence_rep.seqno = fence->base.seqno;
3893 vmw_update_seqno(dev_priv);
3894 fence_rep.passed_seqno = dev_priv->last_read_seqno;
3898 * copy_to_user errors will be detected by user space not seeing
3899 * fence_rep::error filled in. Typically user-space would have pre-set
3900 * that member to -EFAULT.
3902 ret = copy_to_user(user_fence_rep, &fence_rep,
3906 * User-space lost the fence object. We need to sync and unreference the
3909 if (unlikely(ret != 0) && (fence_rep.error == 0)) {
3910 ttm_ref_object_base_unref(vmw_fp->tfile, fence_handle);
3911 VMW_DEBUG_USER("Fence copy error. Syncing.\n");
3912 (void) vmw_fence_obj_wait(fence, false, false,
3913 VMW_FENCE_WAIT_TIMEOUT);
3916 return ret ? -EFAULT : 0;
3920 * vmw_execbuf_submit_fifo - Patch a command batch and submit it using the fifo.
3922 * @dev_priv: Pointer to a device private structure.
3923 * @kernel_commands: Pointer to the unpatched command batch.
3924 * @command_size: Size of the unpatched command batch.
3925 * @sw_context: Structure holding the relocation lists.
3927 * Side effects: If this function returns 0, then the command batch pointed to
3928 * by @kernel_commands will have been modified.
3930 static int vmw_execbuf_submit_fifo(struct vmw_private *dev_priv,
3931 void *kernel_commands, u32 command_size,
3932 struct vmw_sw_context *sw_context)
3936 if (sw_context->dx_ctx_node)
3937 cmd = VMW_CMD_CTX_RESERVE(dev_priv, command_size,
3938 sw_context->dx_ctx_node->ctx->id);
3940 cmd = VMW_CMD_RESERVE(dev_priv, command_size);
3945 vmw_apply_relocations(sw_context);
3946 memcpy(cmd, kernel_commands, command_size);
3947 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3948 vmw_resource_relocations_free(&sw_context->res_relocations);
3949 vmw_cmd_commit(dev_priv, command_size);
3955 * vmw_execbuf_submit_cmdbuf - Patch a command batch and submit it using the
3956 * command buffer manager.
3958 * @dev_priv: Pointer to a device private structure.
3959 * @header: Opaque handle to the command buffer allocation.
3960 * @command_size: Size of the unpatched command batch.
3961 * @sw_context: Structure holding the relocation lists.
3963 * Side effects: If this function returns 0, then the command buffer represented
3964 * by @header will have been modified.
3966 static int vmw_execbuf_submit_cmdbuf(struct vmw_private *dev_priv,
3967 struct vmw_cmdbuf_header *header,
3969 struct vmw_sw_context *sw_context)
3971 u32 id = ((sw_context->dx_ctx_node) ? sw_context->dx_ctx_node->ctx->id :
3973 void *cmd = vmw_cmdbuf_reserve(dev_priv->cman, command_size, id, false,
3976 vmw_apply_relocations(sw_context);
3977 vmw_resource_relocations_apply(cmd, &sw_context->res_relocations);
3978 vmw_resource_relocations_free(&sw_context->res_relocations);
3979 vmw_cmdbuf_commit(dev_priv->cman, command_size, header, false);
3985 * vmw_execbuf_cmdbuf - Prepare, if possible, a user-space command batch for
3986 * submission using a command buffer.
3988 * @dev_priv: Pointer to a device private structure.
3989 * @user_commands: User-space pointer to the commands to be submitted.
3990 * @command_size: Size of the unpatched command batch.
3991 * @header: Out parameter returning the opaque pointer to the command buffer.
3993 * This function checks whether we can use the command buffer manager for
3994 * submission and if so, creates a command buffer of suitable size and copies
3995 * the user data into that buffer.
3997 * On successful return, the function returns a pointer to the data in the
3998 * command buffer and *@header is set to non-NULL.
4000 * @kernel_commands: If command buffers could not be used, the function will
4001 * return the value of @kernel_commands on function call. That value may be
4002 * NULL. In that case, the value of *@header will be set to NULL.
4004 * If an error is encountered, the function will return a pointer error value.
4005 * If the function is interrupted by a signal while sleeping, it will return
4006 * -ERESTARTSYS casted to a pointer error value.
4008 static void *vmw_execbuf_cmdbuf(struct vmw_private *dev_priv,
4009 void __user *user_commands,
4010 void *kernel_commands, u32 command_size,
4011 struct vmw_cmdbuf_header **header)
4017 if (command_size > SVGA_CB_MAX_SIZE) {
4018 VMW_DEBUG_USER("Command buffer is too large.\n");
4019 return ERR_PTR(-EINVAL);
4022 if (!dev_priv->cman || kernel_commands)
4023 return kernel_commands;
4025 /* If possible, add a little space for fencing. */
4026 cmdbuf_size = command_size + 512;
4027 cmdbuf_size = min_t(size_t, cmdbuf_size, SVGA_CB_MAX_SIZE);
4028 kernel_commands = vmw_cmdbuf_alloc(dev_priv->cman, cmdbuf_size, true,
4030 if (IS_ERR(kernel_commands))
4031 return kernel_commands;
4033 ret = copy_from_user(kernel_commands, user_commands, command_size);
4035 VMW_DEBUG_USER("Failed copying commands.\n");
4036 vmw_cmdbuf_header_free(*header);
4038 return ERR_PTR(-EFAULT);
4041 return kernel_commands;
4044 static int vmw_execbuf_tie_context(struct vmw_private *dev_priv,
4045 struct vmw_sw_context *sw_context,
4048 struct vmw_resource *res;
4052 if (handle == SVGA3D_INVALID_ID)
4055 size = vmw_execbuf_res_size(dev_priv, vmw_res_dx_context);
4056 ret = vmw_validation_preload_res(sw_context->ctx, size);
4060 ret = vmw_user_resource_lookup_handle
4061 (dev_priv, sw_context->fp->tfile, handle,
4062 user_context_converter, &res);
4064 VMW_DEBUG_USER("Could not find or user DX context 0x%08x.\n",
4065 (unsigned int) handle);
4069 ret = vmw_execbuf_res_val_add(sw_context, res, VMW_RES_DIRTY_SET,
4070 vmw_val_add_flag_none);
4071 if (unlikely(ret != 0)) {
4072 vmw_resource_unreference(&res);
4076 sw_context->dx_ctx_node = vmw_execbuf_info_from_res(sw_context, res);
4077 sw_context->man = vmw_context_res_man(res);
4079 vmw_resource_unreference(&res);
4083 int vmw_execbuf_process(struct drm_file *file_priv,
4084 struct vmw_private *dev_priv,
4085 void __user *user_commands, void *kernel_commands,
4086 uint32_t command_size, uint64_t throttle_us,
4087 uint32_t dx_context_handle,
4088 struct drm_vmw_fence_rep __user *user_fence_rep,
4089 struct vmw_fence_obj **out_fence, uint32_t flags)
4091 struct vmw_sw_context *sw_context = &dev_priv->ctx;
4092 struct vmw_fence_obj *fence = NULL;
4093 struct vmw_cmdbuf_header *header;
4094 uint32_t handle = 0;
4096 int32_t out_fence_fd = -1;
4097 struct sync_file *sync_file = NULL;
4098 DECLARE_VAL_CONTEXT(val_ctx, sw_context, 1);
4100 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4101 out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
4102 if (out_fence_fd < 0) {
4103 VMW_DEBUG_USER("Failed to get a fence fd.\n");
4104 return out_fence_fd;
4109 VMW_DEBUG_USER("Throttling is no longer supported.\n");
4112 kernel_commands = vmw_execbuf_cmdbuf(dev_priv, user_commands,
4113 kernel_commands, command_size,
4115 if (IS_ERR(kernel_commands)) {
4116 ret = PTR_ERR(kernel_commands);
4117 goto out_free_fence_fd;
4120 ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
4123 goto out_free_header;
4126 sw_context->kernel = false;
4127 if (kernel_commands == NULL) {
4128 ret = vmw_resize_cmd_bounce(sw_context, command_size);
4129 if (unlikely(ret != 0))
4132 ret = copy_from_user(sw_context->cmd_bounce, user_commands,
4134 if (unlikely(ret != 0)) {
4136 VMW_DEBUG_USER("Failed copying commands.\n");
4140 kernel_commands = sw_context->cmd_bounce;
4141 } else if (!header) {
4142 sw_context->kernel = true;
4145 sw_context->filp = file_priv;
4146 sw_context->fp = vmw_fpriv(file_priv);
4147 INIT_LIST_HEAD(&sw_context->ctx_list);
4148 sw_context->cur_query_bo = dev_priv->pinned_bo;
4149 sw_context->last_query_ctx = NULL;
4150 sw_context->needs_post_query_barrier = false;
4151 sw_context->dx_ctx_node = NULL;
4152 sw_context->dx_query_mob = NULL;
4153 sw_context->dx_query_ctx = NULL;
4154 memset(sw_context->res_cache, 0, sizeof(sw_context->res_cache));
4155 INIT_LIST_HEAD(&sw_context->res_relocations);
4156 INIT_LIST_HEAD(&sw_context->bo_relocations);
4158 if (sw_context->staged_bindings)
4159 vmw_binding_state_reset(sw_context->staged_bindings);
4161 INIT_LIST_HEAD(&sw_context->staged_cmd_res);
4162 sw_context->ctx = &val_ctx;
4163 ret = vmw_execbuf_tie_context(dev_priv, sw_context, dx_context_handle);
4164 if (unlikely(ret != 0))
4167 ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
4169 if (unlikely(ret != 0))
4172 ret = vmw_resources_reserve(sw_context);
4173 if (unlikely(ret != 0))
4176 ret = vmw_validation_bo_reserve(&val_ctx, true);
4177 if (unlikely(ret != 0))
4180 ret = vmw_validation_bo_validate(&val_ctx, true);
4181 if (unlikely(ret != 0))
4184 ret = vmw_validation_res_validate(&val_ctx, true);
4185 if (unlikely(ret != 0))
4188 vmw_validation_drop_ht(&val_ctx);
4190 ret = mutex_lock_interruptible(&dev_priv->binding_mutex);
4191 if (unlikely(ret != 0)) {
4196 if (dev_priv->has_mob) {
4197 ret = vmw_rebind_contexts(sw_context);
4198 if (unlikely(ret != 0))
4199 goto out_unlock_binding;
4203 ret = vmw_execbuf_submit_fifo(dev_priv, kernel_commands,
4204 command_size, sw_context);
4206 ret = vmw_execbuf_submit_cmdbuf(dev_priv, header, command_size,
4210 mutex_unlock(&dev_priv->binding_mutex);
4214 vmw_query_bo_switch_commit(dev_priv, sw_context);
4215 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, &fence,
4216 (user_fence_rep) ? &handle : NULL);
4218 * This error is harmless, because if fence submission fails,
4219 * vmw_fifo_send_fence will sync. The error will be propagated to
4220 * user-space in @fence_rep
4223 VMW_DEBUG_USER("Fence submission error. Syncing.\n");
4225 vmw_execbuf_bindings_commit(sw_context, false);
4226 vmw_bind_dx_query_mob(sw_context);
4227 vmw_validation_res_unreserve(&val_ctx, false);
4229 vmw_validation_bo_fence(sw_context->ctx, fence);
4231 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4232 __vmw_execbuf_release_pinned_bo(dev_priv, fence);
4235 * If anything fails here, give up trying to export the fence and do a
4236 * sync since the user mode will not be able to sync the fence itself.
4237 * This ensures we are still functionally correct.
4239 if (flags & DRM_VMW_EXECBUF_FLAG_EXPORT_FENCE_FD) {
4241 sync_file = sync_file_create(&fence->base);
4243 VMW_DEBUG_USER("Sync file create failed for fence\n");
4244 put_unused_fd(out_fence_fd);
4247 (void) vmw_fence_obj_wait(fence, false, false,
4248 VMW_FENCE_WAIT_TIMEOUT);
4252 ret = vmw_execbuf_copy_fence_user(dev_priv, vmw_fpriv(file_priv), ret,
4253 user_fence_rep, fence, handle, out_fence_fd);
4257 /* usercopy of fence failed, put the file object */
4258 fput(sync_file->file);
4259 put_unused_fd(out_fence_fd);
4261 /* Link the fence with the FD created earlier */
4262 fd_install(out_fence_fd, sync_file->file);
4266 /* Don't unreference when handing fence out */
4267 if (unlikely(out_fence != NULL)) {
4270 } else if (likely(fence != NULL)) {
4271 vmw_fence_obj_unreference(&fence);
4274 vmw_cmdbuf_res_commit(&sw_context->staged_cmd_res);
4275 mutex_unlock(&dev_priv->cmdbuf_mutex);
4278 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4279 * in resource destruction paths.
4281 vmw_validation_unref_lists(&val_ctx);
4286 mutex_unlock(&dev_priv->binding_mutex);
4288 vmw_validation_bo_backoff(&val_ctx);
4290 vmw_execbuf_bindings_commit(sw_context, true);
4291 vmw_validation_res_unreserve(&val_ctx, true);
4292 vmw_resource_relocations_free(&sw_context->res_relocations);
4293 vmw_free_relocations(sw_context);
4294 if (unlikely(dev_priv->pinned_bo != NULL && !dev_priv->query_cid_valid))
4295 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4297 vmw_cmdbuf_res_revert(&sw_context->staged_cmd_res);
4298 vmw_validation_drop_ht(&val_ctx);
4299 WARN_ON(!list_empty(&sw_context->ctx_list));
4300 mutex_unlock(&dev_priv->cmdbuf_mutex);
4303 * Unreference resources outside of the cmdbuf_mutex to avoid deadlocks
4304 * in resource destruction paths.
4306 vmw_validation_unref_lists(&val_ctx);
4309 vmw_cmdbuf_header_free(header);
4311 if (out_fence_fd >= 0)
4312 put_unused_fd(out_fence_fd);
4318 * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
4320 * @dev_priv: The device private structure.
4322 * This function is called to idle the fifo and unpin the query buffer if the
4323 * normal way to do this hits an error, which should typically be extremely
4326 static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
4328 VMW_DEBUG_USER("Can't unpin query buffer. Trying to recover.\n");
4330 (void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
4331 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4332 if (dev_priv->dummy_query_bo_pinned) {
4333 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4334 dev_priv->dummy_query_bo_pinned = false;
4340 * __vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query
4343 * @dev_priv: The device private structure.
4344 * @fence: If non-NULL should point to a struct vmw_fence_obj issued _after_ a
4345 * query barrier that flushes all queries touching the current buffer pointed to
4346 * by @dev_priv->pinned_bo
4348 * This function should be used to unpin the pinned query bo, or as a query
4349 * barrier when we need to make sure that all queries have finished before the
4350 * next fifo command. (For example on hardware context destructions where the
4351 * hardware may otherwise leak unfinished queries).
4353 * This function does not return any failure codes, but make attempts to do safe
4354 * unpinning in case of errors.
4356 * The function will synchronize on the previous query barrier, and will thus
4357 * not finish until that barrier has executed.
4359 * the @dev_priv->cmdbuf_mutex needs to be held by the current thread before
4360 * calling this function.
4362 void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
4363 struct vmw_fence_obj *fence)
4366 struct vmw_fence_obj *lfence = NULL;
4367 DECLARE_VAL_CONTEXT(val_ctx, NULL, 0);
4369 if (dev_priv->pinned_bo == NULL)
4372 vmw_bo_placement_set(dev_priv->pinned_bo,
4373 VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
4374 VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
4375 ret = vmw_validation_add_bo(&val_ctx, dev_priv->pinned_bo);
4377 goto out_no_reserve;
4379 vmw_bo_placement_set(dev_priv->dummy_query_bo,
4380 VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM,
4381 VMW_BO_DOMAIN_GMR | VMW_BO_DOMAIN_VRAM);
4382 ret = vmw_validation_add_bo(&val_ctx, dev_priv->dummy_query_bo);
4384 goto out_no_reserve;
4386 ret = vmw_validation_bo_reserve(&val_ctx, false);
4388 goto out_no_reserve;
4390 if (dev_priv->query_cid_valid) {
4391 BUG_ON(fence != NULL);
4392 ret = vmw_cmd_emit_dummy_query(dev_priv, dev_priv->query_cid);
4395 dev_priv->query_cid_valid = false;
4398 vmw_bo_pin_reserved(dev_priv->pinned_bo, false);
4399 if (dev_priv->dummy_query_bo_pinned) {
4400 vmw_bo_pin_reserved(dev_priv->dummy_query_bo, false);
4401 dev_priv->dummy_query_bo_pinned = false;
4403 if (fence == NULL) {
4404 (void) vmw_execbuf_fence_commands(NULL, dev_priv, &lfence,
4408 vmw_validation_bo_fence(&val_ctx, fence);
4410 vmw_fence_obj_unreference(&lfence);
4412 vmw_validation_unref_lists(&val_ctx);
4413 vmw_bo_unreference(&dev_priv->pinned_bo);
4418 vmw_validation_bo_backoff(&val_ctx);
4420 vmw_validation_unref_lists(&val_ctx);
4421 vmw_execbuf_unpin_panic(dev_priv);
4422 vmw_bo_unreference(&dev_priv->pinned_bo);
4426 * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned query bo.
4428 * @dev_priv: The device private structure.
4430 * This function should be used to unpin the pinned query bo, or as a query
4431 * barrier when we need to make sure that all queries have finished before the
4432 * next fifo command. (For example on hardware context destructions where the
4433 * hardware may otherwise leak unfinished queries).
4435 * This function does not return any failure codes, but make attempts to do safe
4436 * unpinning in case of errors.
4438 * The function will synchronize on the previous query barrier, and will thus
4439 * not finish until that barrier has executed.
4441 void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv)
4443 mutex_lock(&dev_priv->cmdbuf_mutex);
4444 if (dev_priv->query_cid_valid)
4445 __vmw_execbuf_release_pinned_bo(dev_priv, NULL);
4446 mutex_unlock(&dev_priv->cmdbuf_mutex);
4449 int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
4450 struct drm_file *file_priv)
4452 struct vmw_private *dev_priv = vmw_priv(dev);
4453 struct drm_vmw_execbuf_arg *arg = data;
4455 struct dma_fence *in_fence = NULL;
4457 MKS_STAT_TIME_DECL(MKSSTAT_KERN_EXECBUF);
4458 MKS_STAT_TIME_PUSH(MKSSTAT_KERN_EXECBUF);
4461 * Extend the ioctl argument while maintaining backwards compatibility:
4462 * We take different code paths depending on the value of arg->version.
4464 * Note: The ioctl argument is extended and zeropadded by core DRM.
4466 if (unlikely(arg->version > DRM_VMW_EXECBUF_VERSION ||
4467 arg->version == 0)) {
4468 VMW_DEBUG_USER("Incorrect execbuf version.\n");
4473 switch (arg->version) {
4475 /* For v1 core DRM have extended + zeropadded the data */
4476 arg->context_handle = (uint32_t) -1;
4480 /* For v2 and later core DRM would have correctly copied it */
4484 /* If imported a fence FD from elsewhere, then wait on it */
4485 if (arg->flags & DRM_VMW_EXECBUF_FLAG_IMPORT_FENCE_FD) {
4486 in_fence = sync_file_get_fence(arg->imported_fence_fd);
4489 VMW_DEBUG_USER("Cannot get imported fence\n");
4494 ret = dma_fence_wait(in_fence, true);
4499 ret = vmw_execbuf_process(file_priv, dev_priv,
4500 (void __user *)(unsigned long)arg->commands,
4501 NULL, arg->command_size, arg->throttle_us,
4502 arg->context_handle,
4503 (void __user *)(unsigned long)arg->fence_rep,
4506 if (unlikely(ret != 0))
4509 vmw_kms_cursor_post_execbuf(dev_priv);
4513 dma_fence_put(in_fence);
4516 MKS_STAT_TIME_POP(MKSSTAT_KERN_EXECBUF);