Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / gvt / cmd_parser.c
1 /*
2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Ke Yu
25  *    Kevin Tian <kevin.tian@intel.com>
26  *    Zhiyuan Lv <zhiyuan.lv@intel.com>
27  *
28  * Contributors:
29  *    Min He <min.he@intel.com>
30  *    Ping Gao <ping.a.gao@intel.com>
31  *    Tina Zhang <tina.zhang@intel.com>
32  *    Yulei Zhang <yulei.zhang@intel.com>
33  *    Zhi Wang <zhi.a.wang@intel.com>
34  *
35  */
36
37 #include <linux/slab.h>
38 #include "i915_drv.h"
39 #include "gvt.h"
40 #include "i915_pvinfo.h"
41 #include "trace.h"
42
43 #define INVALID_OP    (~0U)
44
45 #define OP_LEN_MI           9
46 #define OP_LEN_2D           10
47 #define OP_LEN_3D_MEDIA     16
48 #define OP_LEN_MFX_VC       16
49 #define OP_LEN_VEBOX        16
50
51 #define CMD_TYPE(cmd)   (((cmd) >> 29) & 7)
52
53 struct sub_op_bits {
54         int hi;
55         int low;
56 };
57 struct decode_info {
58         char *name;
59         int op_len;
60         int nr_sub_op;
61         struct sub_op_bits *sub_op;
62 };
63
64 #define   MAX_CMD_BUDGET                        0x7fffffff
65 #define   MI_WAIT_FOR_PLANE_C_FLIP_PENDING      (1<<15)
66 #define   MI_WAIT_FOR_PLANE_B_FLIP_PENDING      (1<<9)
67 #define   MI_WAIT_FOR_PLANE_A_FLIP_PENDING      (1<<1)
68
69 #define   MI_WAIT_FOR_SPRITE_C_FLIP_PENDING      (1<<20)
70 #define   MI_WAIT_FOR_SPRITE_B_FLIP_PENDING      (1<<10)
71 #define   MI_WAIT_FOR_SPRITE_A_FLIP_PENDING      (1<<2)
72
73 /* Render Command Map */
74
75 /* MI_* command Opcode (28:23) */
76 #define OP_MI_NOOP                          0x0
77 #define OP_MI_SET_PREDICATE                 0x1  /* HSW+ */
78 #define OP_MI_USER_INTERRUPT                0x2
79 #define OP_MI_WAIT_FOR_EVENT                0x3
80 #define OP_MI_FLUSH                         0x4
81 #define OP_MI_ARB_CHECK                     0x5
82 #define OP_MI_RS_CONTROL                    0x6  /* HSW+ */
83 #define OP_MI_REPORT_HEAD                   0x7
84 #define OP_MI_ARB_ON_OFF                    0x8
85 #define OP_MI_URB_ATOMIC_ALLOC              0x9  /* HSW+ */
86 #define OP_MI_BATCH_BUFFER_END              0xA
87 #define OP_MI_SUSPEND_FLUSH                 0xB
88 #define OP_MI_PREDICATE                     0xC  /* IVB+ */
89 #define OP_MI_TOPOLOGY_FILTER               0xD  /* IVB+ */
90 #define OP_MI_SET_APPID                     0xE  /* IVB+ */
91 #define OP_MI_RS_CONTEXT                    0xF  /* HSW+ */
92 #define OP_MI_LOAD_SCAN_LINES_INCL          0x12 /* HSW+ */
93 #define OP_MI_DISPLAY_FLIP                  0x14
94 #define OP_MI_SEMAPHORE_MBOX                0x16
95 #define OP_MI_SET_CONTEXT                   0x18
96 #define OP_MI_MATH                          0x1A
97 #define OP_MI_URB_CLEAR                     0x19
98 #define OP_MI_SEMAPHORE_SIGNAL              0x1B  /* BDW+ */
99 #define OP_MI_SEMAPHORE_WAIT                0x1C  /* BDW+ */
100
101 #define OP_MI_STORE_DATA_IMM                0x20
102 #define OP_MI_STORE_DATA_INDEX              0x21
103 #define OP_MI_LOAD_REGISTER_IMM             0x22
104 #define OP_MI_UPDATE_GTT                    0x23
105 #define OP_MI_STORE_REGISTER_MEM            0x24
106 #define OP_MI_FLUSH_DW                      0x26
107 #define OP_MI_CLFLUSH                       0x27
108 #define OP_MI_REPORT_PERF_COUNT             0x28
109 #define OP_MI_LOAD_REGISTER_MEM             0x29  /* HSW+ */
110 #define OP_MI_LOAD_REGISTER_REG             0x2A  /* HSW+ */
111 #define OP_MI_RS_STORE_DATA_IMM             0x2B  /* HSW+ */
112 #define OP_MI_LOAD_URB_MEM                  0x2C  /* HSW+ */
113 #define OP_MI_STORE_URM_MEM                 0x2D  /* HSW+ */
114 #define OP_MI_2E                            0x2E  /* BDW+ */
115 #define OP_MI_2F                            0x2F  /* BDW+ */
116 #define OP_MI_BATCH_BUFFER_START            0x31
117
118 /* Bit definition for dword 0 */
119 #define _CMDBIT_BB_START_IN_PPGTT       (1UL << 8)
120
121 #define OP_MI_CONDITIONAL_BATCH_BUFFER_END  0x36
122
123 #define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
124 #define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
125 #define BATCH_BUFFER_ADR_SPACE_BIT(x)   (((x) >> 8) & 1U)
126 #define BATCH_BUFFER_2ND_LEVEL_BIT(x)   ((x) >> 22 & 1U)
127
128 /* 2D command: Opcode (28:22) */
129 #define OP_2D(x)    ((2<<7) | x)
130
131 #define OP_XY_SETUP_BLT                             OP_2D(0x1)
132 #define OP_XY_SETUP_CLIP_BLT                        OP_2D(0x3)
133 #define OP_XY_SETUP_MONO_PATTERN_SL_BLT             OP_2D(0x11)
134 #define OP_XY_PIXEL_BLT                             OP_2D(0x24)
135 #define OP_XY_SCANLINES_BLT                         OP_2D(0x25)
136 #define OP_XY_TEXT_BLT                              OP_2D(0x26)
137 #define OP_XY_TEXT_IMMEDIATE_BLT                    OP_2D(0x31)
138 #define OP_XY_COLOR_BLT                             OP_2D(0x50)
139 #define OP_XY_PAT_BLT                               OP_2D(0x51)
140 #define OP_XY_MONO_PAT_BLT                          OP_2D(0x52)
141 #define OP_XY_SRC_COPY_BLT                          OP_2D(0x53)
142 #define OP_XY_MONO_SRC_COPY_BLT                     OP_2D(0x54)
143 #define OP_XY_FULL_BLT                              OP_2D(0x55)
144 #define OP_XY_FULL_MONO_SRC_BLT                     OP_2D(0x56)
145 #define OP_XY_FULL_MONO_PATTERN_BLT                 OP_2D(0x57)
146 #define OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT        OP_2D(0x58)
147 #define OP_XY_MONO_PAT_FIXED_BLT                    OP_2D(0x59)
148 #define OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT           OP_2D(0x71)
149 #define OP_XY_PAT_BLT_IMMEDIATE                     OP_2D(0x72)
150 #define OP_XY_SRC_COPY_CHROMA_BLT                   OP_2D(0x73)
151 #define OP_XY_FULL_IMMEDIATE_PATTERN_BLT            OP_2D(0x74)
152 #define OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT   OP_2D(0x75)
153 #define OP_XY_PAT_CHROMA_BLT                        OP_2D(0x76)
154 #define OP_XY_PAT_CHROMA_BLT_IMMEDIATE              OP_2D(0x77)
155
156 /* 3D/Media Command: Pipeline Type(28:27) Opcode(26:24) Sub Opcode(23:16) */
157 #define OP_3D_MEDIA(sub_type, opcode, sub_opcode) \
158         ((3 << 13) | ((sub_type) << 11) | ((opcode) << 8) | (sub_opcode))
159
160 #define OP_STATE_PREFETCH                       OP_3D_MEDIA(0x0, 0x0, 0x03)
161
162 #define OP_STATE_BASE_ADDRESS                   OP_3D_MEDIA(0x0, 0x1, 0x01)
163 #define OP_STATE_SIP                            OP_3D_MEDIA(0x0, 0x1, 0x02)
164 #define OP_3D_MEDIA_0_1_4                       OP_3D_MEDIA(0x0, 0x1, 0x04)
165
166 #define OP_3DSTATE_VF_STATISTICS_GM45           OP_3D_MEDIA(0x1, 0x0, 0x0B)
167
168 #define OP_PIPELINE_SELECT                      OP_3D_MEDIA(0x1, 0x1, 0x04)
169
170 #define OP_MEDIA_VFE_STATE                      OP_3D_MEDIA(0x2, 0x0, 0x0)
171 #define OP_MEDIA_CURBE_LOAD                     OP_3D_MEDIA(0x2, 0x0, 0x1)
172 #define OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD      OP_3D_MEDIA(0x2, 0x0, 0x2)
173 #define OP_MEDIA_GATEWAY_STATE                  OP_3D_MEDIA(0x2, 0x0, 0x3)
174 #define OP_MEDIA_STATE_FLUSH                    OP_3D_MEDIA(0x2, 0x0, 0x4)
175
176 #define OP_MEDIA_OBJECT                         OP_3D_MEDIA(0x2, 0x1, 0x0)
177 #define OP_MEDIA_OBJECT_PRT                     OP_3D_MEDIA(0x2, 0x1, 0x2)
178 #define OP_MEDIA_OBJECT_WALKER                  OP_3D_MEDIA(0x2, 0x1, 0x3)
179 #define OP_GPGPU_WALKER                         OP_3D_MEDIA(0x2, 0x1, 0x5)
180
181 #define OP_3DSTATE_CLEAR_PARAMS                 OP_3D_MEDIA(0x3, 0x0, 0x04) /* IVB+ */
182 #define OP_3DSTATE_DEPTH_BUFFER                 OP_3D_MEDIA(0x3, 0x0, 0x05) /* IVB+ */
183 #define OP_3DSTATE_STENCIL_BUFFER               OP_3D_MEDIA(0x3, 0x0, 0x06) /* IVB+ */
184 #define OP_3DSTATE_HIER_DEPTH_BUFFER            OP_3D_MEDIA(0x3, 0x0, 0x07) /* IVB+ */
185 #define OP_3DSTATE_VERTEX_BUFFERS               OP_3D_MEDIA(0x3, 0x0, 0x08)
186 #define OP_3DSTATE_VERTEX_ELEMENTS              OP_3D_MEDIA(0x3, 0x0, 0x09)
187 #define OP_3DSTATE_INDEX_BUFFER                 OP_3D_MEDIA(0x3, 0x0, 0x0A)
188 #define OP_3DSTATE_VF_STATISTICS                OP_3D_MEDIA(0x3, 0x0, 0x0B)
189 #define OP_3DSTATE_VF                           OP_3D_MEDIA(0x3, 0x0, 0x0C)  /* HSW+ */
190 #define OP_3DSTATE_CC_STATE_POINTERS            OP_3D_MEDIA(0x3, 0x0, 0x0E)
191 #define OP_3DSTATE_SCISSOR_STATE_POINTERS       OP_3D_MEDIA(0x3, 0x0, 0x0F)
192 #define OP_3DSTATE_VS                           OP_3D_MEDIA(0x3, 0x0, 0x10)
193 #define OP_3DSTATE_GS                           OP_3D_MEDIA(0x3, 0x0, 0x11)
194 #define OP_3DSTATE_CLIP                         OP_3D_MEDIA(0x3, 0x0, 0x12)
195 #define OP_3DSTATE_SF                           OP_3D_MEDIA(0x3, 0x0, 0x13)
196 #define OP_3DSTATE_WM                           OP_3D_MEDIA(0x3, 0x0, 0x14)
197 #define OP_3DSTATE_CONSTANT_VS                  OP_3D_MEDIA(0x3, 0x0, 0x15)
198 #define OP_3DSTATE_CONSTANT_GS                  OP_3D_MEDIA(0x3, 0x0, 0x16)
199 #define OP_3DSTATE_CONSTANT_PS                  OP_3D_MEDIA(0x3, 0x0, 0x17)
200 #define OP_3DSTATE_SAMPLE_MASK                  OP_3D_MEDIA(0x3, 0x0, 0x18)
201 #define OP_3DSTATE_CONSTANT_HS                  OP_3D_MEDIA(0x3, 0x0, 0x19) /* IVB+ */
202 #define OP_3DSTATE_CONSTANT_DS                  OP_3D_MEDIA(0x3, 0x0, 0x1A) /* IVB+ */
203 #define OP_3DSTATE_HS                           OP_3D_MEDIA(0x3, 0x0, 0x1B) /* IVB+ */
204 #define OP_3DSTATE_TE                           OP_3D_MEDIA(0x3, 0x0, 0x1C) /* IVB+ */
205 #define OP_3DSTATE_DS                           OP_3D_MEDIA(0x3, 0x0, 0x1D) /* IVB+ */
206 #define OP_3DSTATE_STREAMOUT                    OP_3D_MEDIA(0x3, 0x0, 0x1E) /* IVB+ */
207 #define OP_3DSTATE_SBE                          OP_3D_MEDIA(0x3, 0x0, 0x1F) /* IVB+ */
208 #define OP_3DSTATE_PS                           OP_3D_MEDIA(0x3, 0x0, 0x20) /* IVB+ */
209 #define OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP OP_3D_MEDIA(0x3, 0x0, 0x21) /* IVB+ */
210 #define OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC   OP_3D_MEDIA(0x3, 0x0, 0x23) /* IVB+ */
211 #define OP_3DSTATE_BLEND_STATE_POINTERS         OP_3D_MEDIA(0x3, 0x0, 0x24) /* IVB+ */
212 #define OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x25) /* IVB+ */
213 #define OP_3DSTATE_BINDING_TABLE_POINTERS_VS    OP_3D_MEDIA(0x3, 0x0, 0x26) /* IVB+ */
214 #define OP_3DSTATE_BINDING_TABLE_POINTERS_HS    OP_3D_MEDIA(0x3, 0x0, 0x27) /* IVB+ */
215 #define OP_3DSTATE_BINDING_TABLE_POINTERS_DS    OP_3D_MEDIA(0x3, 0x0, 0x28) /* IVB+ */
216 #define OP_3DSTATE_BINDING_TABLE_POINTERS_GS    OP_3D_MEDIA(0x3, 0x0, 0x29) /* IVB+ */
217 #define OP_3DSTATE_BINDING_TABLE_POINTERS_PS    OP_3D_MEDIA(0x3, 0x0, 0x2A) /* IVB+ */
218 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_VS    OP_3D_MEDIA(0x3, 0x0, 0x2B) /* IVB+ */
219 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_HS    OP_3D_MEDIA(0x3, 0x0, 0x2C) /* IVB+ */
220 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_DS    OP_3D_MEDIA(0x3, 0x0, 0x2D) /* IVB+ */
221 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_GS    OP_3D_MEDIA(0x3, 0x0, 0x2E) /* IVB+ */
222 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_PS    OP_3D_MEDIA(0x3, 0x0, 0x2F) /* IVB+ */
223 #define OP_3DSTATE_URB_VS                       OP_3D_MEDIA(0x3, 0x0, 0x30) /* IVB+ */
224 #define OP_3DSTATE_URB_HS                       OP_3D_MEDIA(0x3, 0x0, 0x31) /* IVB+ */
225 #define OP_3DSTATE_URB_DS                       OP_3D_MEDIA(0x3, 0x0, 0x32) /* IVB+ */
226 #define OP_3DSTATE_URB_GS                       OP_3D_MEDIA(0x3, 0x0, 0x33) /* IVB+ */
227 #define OP_3DSTATE_GATHER_CONSTANT_VS           OP_3D_MEDIA(0x3, 0x0, 0x34) /* HSW+ */
228 #define OP_3DSTATE_GATHER_CONSTANT_GS           OP_3D_MEDIA(0x3, 0x0, 0x35) /* HSW+ */
229 #define OP_3DSTATE_GATHER_CONSTANT_HS           OP_3D_MEDIA(0x3, 0x0, 0x36) /* HSW+ */
230 #define OP_3DSTATE_GATHER_CONSTANT_DS           OP_3D_MEDIA(0x3, 0x0, 0x37) /* HSW+ */
231 #define OP_3DSTATE_GATHER_CONSTANT_PS           OP_3D_MEDIA(0x3, 0x0, 0x38) /* HSW+ */
232 #define OP_3DSTATE_DX9_CONSTANTF_VS             OP_3D_MEDIA(0x3, 0x0, 0x39) /* HSW+ */
233 #define OP_3DSTATE_DX9_CONSTANTF_PS             OP_3D_MEDIA(0x3, 0x0, 0x3A) /* HSW+ */
234 #define OP_3DSTATE_DX9_CONSTANTI_VS             OP_3D_MEDIA(0x3, 0x0, 0x3B) /* HSW+ */
235 #define OP_3DSTATE_DX9_CONSTANTI_PS             OP_3D_MEDIA(0x3, 0x0, 0x3C) /* HSW+ */
236 #define OP_3DSTATE_DX9_CONSTANTB_VS             OP_3D_MEDIA(0x3, 0x0, 0x3D) /* HSW+ */
237 #define OP_3DSTATE_DX9_CONSTANTB_PS             OP_3D_MEDIA(0x3, 0x0, 0x3E) /* HSW+ */
238 #define OP_3DSTATE_DX9_LOCAL_VALID_VS           OP_3D_MEDIA(0x3, 0x0, 0x3F) /* HSW+ */
239 #define OP_3DSTATE_DX9_LOCAL_VALID_PS           OP_3D_MEDIA(0x3, 0x0, 0x40) /* HSW+ */
240 #define OP_3DSTATE_DX9_GENERATE_ACTIVE_VS       OP_3D_MEDIA(0x3, 0x0, 0x41) /* HSW+ */
241 #define OP_3DSTATE_DX9_GENERATE_ACTIVE_PS       OP_3D_MEDIA(0x3, 0x0, 0x42) /* HSW+ */
242 #define OP_3DSTATE_BINDING_TABLE_EDIT_VS        OP_3D_MEDIA(0x3, 0x0, 0x43) /* HSW+ */
243 #define OP_3DSTATE_BINDING_TABLE_EDIT_GS        OP_3D_MEDIA(0x3, 0x0, 0x44) /* HSW+ */
244 #define OP_3DSTATE_BINDING_TABLE_EDIT_HS        OP_3D_MEDIA(0x3, 0x0, 0x45) /* HSW+ */
245 #define OP_3DSTATE_BINDING_TABLE_EDIT_DS        OP_3D_MEDIA(0x3, 0x0, 0x46) /* HSW+ */
246 #define OP_3DSTATE_BINDING_TABLE_EDIT_PS        OP_3D_MEDIA(0x3, 0x0, 0x47) /* HSW+ */
247
248 #define OP_3DSTATE_VF_INSTANCING                OP_3D_MEDIA(0x3, 0x0, 0x49) /* BDW+ */
249 #define OP_3DSTATE_VF_SGVS                      OP_3D_MEDIA(0x3, 0x0, 0x4A) /* BDW+ */
250 #define OP_3DSTATE_VF_TOPOLOGY                  OP_3D_MEDIA(0x3, 0x0, 0x4B) /* BDW+ */
251 #define OP_3DSTATE_WM_CHROMAKEY                 OP_3D_MEDIA(0x3, 0x0, 0x4C) /* BDW+ */
252 #define OP_3DSTATE_PS_BLEND                     OP_3D_MEDIA(0x3, 0x0, 0x4D) /* BDW+ */
253 #define OP_3DSTATE_WM_DEPTH_STENCIL             OP_3D_MEDIA(0x3, 0x0, 0x4E) /* BDW+ */
254 #define OP_3DSTATE_PS_EXTRA                     OP_3D_MEDIA(0x3, 0x0, 0x4F) /* BDW+ */
255 #define OP_3DSTATE_RASTER                       OP_3D_MEDIA(0x3, 0x0, 0x50) /* BDW+ */
256 #define OP_3DSTATE_SBE_SWIZ                     OP_3D_MEDIA(0x3, 0x0, 0x51) /* BDW+ */
257 #define OP_3DSTATE_WM_HZ_OP                     OP_3D_MEDIA(0x3, 0x0, 0x52) /* BDW+ */
258 #define OP_3DSTATE_COMPONENT_PACKING            OP_3D_MEDIA(0x3, 0x0, 0x55) /* SKL+ */
259
260 #define OP_3DSTATE_DRAWING_RECTANGLE            OP_3D_MEDIA(0x3, 0x1, 0x00)
261 #define OP_3DSTATE_SAMPLER_PALETTE_LOAD0        OP_3D_MEDIA(0x3, 0x1, 0x02)
262 #define OP_3DSTATE_CHROMA_KEY                   OP_3D_MEDIA(0x3, 0x1, 0x04)
263 #define OP_SNB_3DSTATE_DEPTH_BUFFER             OP_3D_MEDIA(0x3, 0x1, 0x05)
264 #define OP_3DSTATE_POLY_STIPPLE_OFFSET          OP_3D_MEDIA(0x3, 0x1, 0x06)
265 #define OP_3DSTATE_POLY_STIPPLE_PATTERN         OP_3D_MEDIA(0x3, 0x1, 0x07)
266 #define OP_3DSTATE_LINE_STIPPLE                 OP_3D_MEDIA(0x3, 0x1, 0x08)
267 #define OP_3DSTATE_AA_LINE_PARAMS               OP_3D_MEDIA(0x3, 0x1, 0x0A)
268 #define OP_3DSTATE_GS_SVB_INDEX                 OP_3D_MEDIA(0x3, 0x1, 0x0B)
269 #define OP_3DSTATE_SAMPLER_PALETTE_LOAD1        OP_3D_MEDIA(0x3, 0x1, 0x0C)
270 #define OP_3DSTATE_MULTISAMPLE_BDW              OP_3D_MEDIA(0x3, 0x0, 0x0D)
271 #define OP_SNB_3DSTATE_STENCIL_BUFFER           OP_3D_MEDIA(0x3, 0x1, 0x0E)
272 #define OP_SNB_3DSTATE_HIER_DEPTH_BUFFER        OP_3D_MEDIA(0x3, 0x1, 0x0F)
273 #define OP_SNB_3DSTATE_CLEAR_PARAMS             OP_3D_MEDIA(0x3, 0x1, 0x10)
274 #define OP_3DSTATE_MONOFILTER_SIZE              OP_3D_MEDIA(0x3, 0x1, 0x11)
275 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS       OP_3D_MEDIA(0x3, 0x1, 0x12) /* IVB+ */
276 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS       OP_3D_MEDIA(0x3, 0x1, 0x13) /* IVB+ */
277 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS       OP_3D_MEDIA(0x3, 0x1, 0x14) /* IVB+ */
278 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS       OP_3D_MEDIA(0x3, 0x1, 0x15) /* IVB+ */
279 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS       OP_3D_MEDIA(0x3, 0x1, 0x16) /* IVB+ */
280 #define OP_3DSTATE_SO_DECL_LIST                 OP_3D_MEDIA(0x3, 0x1, 0x17)
281 #define OP_3DSTATE_SO_BUFFER                    OP_3D_MEDIA(0x3, 0x1, 0x18)
282 #define OP_3DSTATE_BINDING_TABLE_POOL_ALLOC     OP_3D_MEDIA(0x3, 0x1, 0x19) /* HSW+ */
283 #define OP_3DSTATE_GATHER_POOL_ALLOC            OP_3D_MEDIA(0x3, 0x1, 0x1A) /* HSW+ */
284 #define OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x1B) /* HSW+ */
285 #define OP_3DSTATE_SAMPLE_PATTERN               OP_3D_MEDIA(0x3, 0x1, 0x1C)
286 #define OP_PIPE_CONTROL                         OP_3D_MEDIA(0x3, 0x2, 0x00)
287 #define OP_3DPRIMITIVE                          OP_3D_MEDIA(0x3, 0x3, 0x00)
288
289 /* VCCP Command Parser */
290
291 /*
292  * Below MFX and VBE cmd definition is from vaapi intel driver project (BSD License)
293  * git://anongit.freedesktop.org/vaapi/intel-driver
294  * src/i965_defines.h
295  *
296  */
297
298 #define OP_MFX(pipeline, op, sub_opa, sub_opb)     \
299         (3 << 13 | \
300          (pipeline) << 11 | \
301          (op) << 8 | \
302          (sub_opa) << 5 | \
303          (sub_opb))
304
305 #define OP_MFX_PIPE_MODE_SELECT                    OP_MFX(2, 0, 0, 0)  /* ALL */
306 #define OP_MFX_SURFACE_STATE                       OP_MFX(2, 0, 0, 1)  /* ALL */
307 #define OP_MFX_PIPE_BUF_ADDR_STATE                 OP_MFX(2, 0, 0, 2)  /* ALL */
308 #define OP_MFX_IND_OBJ_BASE_ADDR_STATE             OP_MFX(2, 0, 0, 3)  /* ALL */
309 #define OP_MFX_BSP_BUF_BASE_ADDR_STATE             OP_MFX(2, 0, 0, 4)  /* ALL */
310 #define OP_2_0_0_5                                 OP_MFX(2, 0, 0, 5)  /* ALL */
311 #define OP_MFX_STATE_POINTER                       OP_MFX(2, 0, 0, 6)  /* ALL */
312 #define OP_MFX_QM_STATE                            OP_MFX(2, 0, 0, 7)  /* IVB+ */
313 #define OP_MFX_FQM_STATE                           OP_MFX(2, 0, 0, 8)  /* IVB+ */
314 #define OP_MFX_PAK_INSERT_OBJECT                   OP_MFX(2, 0, 2, 8)  /* IVB+ */
315 #define OP_MFX_STITCH_OBJECT                       OP_MFX(2, 0, 2, 0xA)  /* IVB+ */
316
317 #define OP_MFD_IT_OBJECT                           OP_MFX(2, 0, 1, 9) /* ALL */
318
319 #define OP_MFX_WAIT                                OP_MFX(1, 0, 0, 0) /* IVB+ */
320 #define OP_MFX_AVC_IMG_STATE                       OP_MFX(2, 1, 0, 0) /* ALL */
321 #define OP_MFX_AVC_QM_STATE                        OP_MFX(2, 1, 0, 1) /* ALL */
322 #define OP_MFX_AVC_DIRECTMODE_STATE                OP_MFX(2, 1, 0, 2) /* ALL */
323 #define OP_MFX_AVC_SLICE_STATE                     OP_MFX(2, 1, 0, 3) /* ALL */
324 #define OP_MFX_AVC_REF_IDX_STATE                   OP_MFX(2, 1, 0, 4) /* ALL */
325 #define OP_MFX_AVC_WEIGHTOFFSET_STATE              OP_MFX(2, 1, 0, 5) /* ALL */
326 #define OP_MFD_AVC_PICID_STATE                     OP_MFX(2, 1, 1, 5) /* HSW+ */
327 #define OP_MFD_AVC_DPB_STATE                       OP_MFX(2, 1, 1, 6) /* IVB+ */
328 #define OP_MFD_AVC_SLICEADDR                       OP_MFX(2, 1, 1, 7) /* IVB+ */
329 #define OP_MFD_AVC_BSD_OBJECT                      OP_MFX(2, 1, 1, 8) /* ALL */
330 #define OP_MFC_AVC_PAK_OBJECT                      OP_MFX(2, 1, 2, 9) /* ALL */
331
332 #define OP_MFX_VC1_PRED_PIPE_STATE                 OP_MFX(2, 2, 0, 1) /* ALL */
333 #define OP_MFX_VC1_DIRECTMODE_STATE                OP_MFX(2, 2, 0, 2) /* ALL */
334 #define OP_MFD_VC1_SHORT_PIC_STATE                 OP_MFX(2, 2, 1, 0) /* IVB+ */
335 #define OP_MFD_VC1_LONG_PIC_STATE                  OP_MFX(2, 2, 1, 1) /* IVB+ */
336 #define OP_MFD_VC1_BSD_OBJECT                      OP_MFX(2, 2, 1, 8) /* ALL */
337
338 #define OP_MFX_MPEG2_PIC_STATE                     OP_MFX(2, 3, 0, 0) /* ALL */
339 #define OP_MFX_MPEG2_QM_STATE                      OP_MFX(2, 3, 0, 1) /* ALL */
340 #define OP_MFD_MPEG2_BSD_OBJECT                    OP_MFX(2, 3, 1, 8) /* ALL */
341 #define OP_MFC_MPEG2_SLICEGROUP_STATE              OP_MFX(2, 3, 2, 3) /* ALL */
342 #define OP_MFC_MPEG2_PAK_OBJECT                    OP_MFX(2, 3, 2, 9) /* ALL */
343
344 #define OP_MFX_2_6_0_0                             OP_MFX(2, 6, 0, 0) /* IVB+ */
345 #define OP_MFX_2_6_0_8                             OP_MFX(2, 6, 0, 8) /* IVB+ */
346 #define OP_MFX_2_6_0_9                             OP_MFX(2, 6, 0, 9) /* IVB+ */
347
348 #define OP_MFX_JPEG_PIC_STATE                      OP_MFX(2, 7, 0, 0)
349 #define OP_MFX_JPEG_HUFF_TABLE_STATE               OP_MFX(2, 7, 0, 2)
350 #define OP_MFD_JPEG_BSD_OBJECT                     OP_MFX(2, 7, 1, 8)
351
352 #define OP_VEB(pipeline, op, sub_opa, sub_opb) \
353         (3 << 13 | \
354          (pipeline) << 11 | \
355          (op) << 8 | \
356          (sub_opa) << 5 | \
357          (sub_opb))
358
359 #define OP_VEB_SURFACE_STATE                       OP_VEB(2, 4, 0, 0)
360 #define OP_VEB_STATE                               OP_VEB(2, 4, 0, 2)
361 #define OP_VEB_DNDI_IECP_STATE                     OP_VEB(2, 4, 0, 3)
362
363 struct parser_exec_state;
364
365 typedef int (*parser_cmd_handler)(struct parser_exec_state *s);
366
367 #define GVT_CMD_HASH_BITS   7
368
369 /* which DWords need address fix */
370 #define ADDR_FIX_1(x1)                  (1 << (x1))
371 #define ADDR_FIX_2(x1, x2)              (ADDR_FIX_1(x1) | ADDR_FIX_1(x2))
372 #define ADDR_FIX_3(x1, x2, x3)          (ADDR_FIX_1(x1) | ADDR_FIX_2(x2, x3))
373 #define ADDR_FIX_4(x1, x2, x3, x4)      (ADDR_FIX_1(x1) | ADDR_FIX_3(x2, x3, x4))
374 #define ADDR_FIX_5(x1, x2, x3, x4, x5)  (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5))
375
376 struct cmd_info {
377         char *name;
378         u32 opcode;
379
380 #define F_LEN_MASK      (1U<<0)
381 #define F_LEN_CONST  1U
382 #define F_LEN_VAR    0U
383
384 /*
385  * command has its own ip advance logic
386  * e.g. MI_BATCH_START, MI_BATCH_END
387  */
388 #define F_IP_ADVANCE_CUSTOM (1<<1)
389
390 #define F_POST_HANDLE   (1<<2)
391         u32 flag;
392
393 #define R_RCS   (1 << RCS)
394 #define R_VCS1  (1 << VCS)
395 #define R_VCS2  (1 << VCS2)
396 #define R_VCS   (R_VCS1 | R_VCS2)
397 #define R_BCS   (1 << BCS)
398 #define R_VECS  (1 << VECS)
399 #define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS)
400         /* rings that support this cmd: BLT/RCS/VCS/VECS */
401         uint16_t rings;
402
403         /* devices that support this cmd: SNB/IVB/HSW/... */
404         uint16_t devices;
405
406         /* which DWords are address that need fix up.
407          * bit 0 means a 32-bit non address operand in command
408          * bit 1 means address operand, which could be 32-bit
409          * or 64-bit depending on different architectures.(
410          * defined by "gmadr_bytes_in_cmd" in intel_gvt.
411          * No matter the address length, each address only takes
412          * one bit in the bitmap.
413          */
414         uint16_t addr_bitmap;
415
416         /* flag == F_LEN_CONST : command length
417          * flag == F_LEN_VAR : length bias bits
418          * Note: length is in DWord
419          */
420         uint8_t len;
421
422         parser_cmd_handler handler;
423 };
424
425 struct cmd_entry {
426         struct hlist_node hlist;
427         struct cmd_info *info;
428 };
429
430 enum {
431         RING_BUFFER_INSTRUCTION,
432         BATCH_BUFFER_INSTRUCTION,
433         BATCH_BUFFER_2ND_LEVEL,
434 };
435
436 enum {
437         GTT_BUFFER,
438         PPGTT_BUFFER
439 };
440
441 struct parser_exec_state {
442         struct intel_vgpu *vgpu;
443         int ring_id;
444
445         int buf_type;
446
447         /* batch buffer address type */
448         int buf_addr_type;
449
450         /* graphics memory address of ring buffer start */
451         unsigned long ring_start;
452         unsigned long ring_size;
453         unsigned long ring_head;
454         unsigned long ring_tail;
455
456         /* instruction graphics memory address */
457         unsigned long ip_gma;
458
459         /* mapped va of the instr_gma */
460         void *ip_va;
461         void *rb_va;
462
463         void *ret_bb_va;
464         /* next instruction when return from  batch buffer to ring buffer */
465         unsigned long ret_ip_gma_ring;
466
467         /* next instruction when return from 2nd batch buffer to batch buffer */
468         unsigned long ret_ip_gma_bb;
469
470         /* batch buffer address type (GTT or PPGTT)
471          * used when ret from 2nd level batch buffer
472          */
473         int saved_buf_addr_type;
474
475         struct cmd_info *info;
476
477         struct intel_vgpu_workload *workload;
478 };
479
480 #define gmadr_dw_number(s)      \
481         (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
482
483 static unsigned long bypass_scan_mask = 0;
484
485 /* ring ALL, type = 0 */
486 static struct sub_op_bits sub_op_mi[] = {
487         {31, 29},
488         {28, 23},
489 };
490
491 static struct decode_info decode_info_mi = {
492         "MI",
493         OP_LEN_MI,
494         ARRAY_SIZE(sub_op_mi),
495         sub_op_mi,
496 };
497
498 /* ring RCS, command type 2 */
499 static struct sub_op_bits sub_op_2d[] = {
500         {31, 29},
501         {28, 22},
502 };
503
504 static struct decode_info decode_info_2d = {
505         "2D",
506         OP_LEN_2D,
507         ARRAY_SIZE(sub_op_2d),
508         sub_op_2d,
509 };
510
511 /* ring RCS, command type 3 */
512 static struct sub_op_bits sub_op_3d_media[] = {
513         {31, 29},
514         {28, 27},
515         {26, 24},
516         {23, 16},
517 };
518
519 static struct decode_info decode_info_3d_media = {
520         "3D_Media",
521         OP_LEN_3D_MEDIA,
522         ARRAY_SIZE(sub_op_3d_media),
523         sub_op_3d_media,
524 };
525
526 /* ring VCS, command type 3 */
527 static struct sub_op_bits sub_op_mfx_vc[] = {
528         {31, 29},
529         {28, 27},
530         {26, 24},
531         {23, 21},
532         {20, 16},
533 };
534
535 static struct decode_info decode_info_mfx_vc = {
536         "MFX_VC",
537         OP_LEN_MFX_VC,
538         ARRAY_SIZE(sub_op_mfx_vc),
539         sub_op_mfx_vc,
540 };
541
542 /* ring VECS, command type 3 */
543 static struct sub_op_bits sub_op_vebox[] = {
544         {31, 29},
545         {28, 27},
546         {26, 24},
547         {23, 21},
548         {20, 16},
549 };
550
551 static struct decode_info decode_info_vebox = {
552         "VEBOX",
553         OP_LEN_VEBOX,
554         ARRAY_SIZE(sub_op_vebox),
555         sub_op_vebox,
556 };
557
558 static struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
559         [RCS] = {
560                 &decode_info_mi,
561                 NULL,
562                 NULL,
563                 &decode_info_3d_media,
564                 NULL,
565                 NULL,
566                 NULL,
567                 NULL,
568         },
569
570         [VCS] = {
571                 &decode_info_mi,
572                 NULL,
573                 NULL,
574                 &decode_info_mfx_vc,
575                 NULL,
576                 NULL,
577                 NULL,
578                 NULL,
579         },
580
581         [BCS] = {
582                 &decode_info_mi,
583                 NULL,
584                 &decode_info_2d,
585                 NULL,
586                 NULL,
587                 NULL,
588                 NULL,
589                 NULL,
590         },
591
592         [VECS] = {
593                 &decode_info_mi,
594                 NULL,
595                 NULL,
596                 &decode_info_vebox,
597                 NULL,
598                 NULL,
599                 NULL,
600                 NULL,
601         },
602
603         [VCS2] = {
604                 &decode_info_mi,
605                 NULL,
606                 NULL,
607                 &decode_info_mfx_vc,
608                 NULL,
609                 NULL,
610                 NULL,
611                 NULL,
612         },
613 };
614
615 static inline u32 get_opcode(u32 cmd, int ring_id)
616 {
617         struct decode_info *d_info;
618
619         d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
620         if (d_info == NULL)
621                 return INVALID_OP;
622
623         return cmd >> (32 - d_info->op_len);
624 }
625
626 static inline struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
627                 unsigned int opcode, int ring_id)
628 {
629         struct cmd_entry *e;
630
631         hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) {
632                 if ((opcode == e->info->opcode) &&
633                                 (e->info->rings & (1 << ring_id)))
634                         return e->info;
635         }
636         return NULL;
637 }
638
639 static inline struct cmd_info *get_cmd_info(struct intel_gvt *gvt,
640                 u32 cmd, int ring_id)
641 {
642         u32 opcode;
643
644         opcode = get_opcode(cmd, ring_id);
645         if (opcode == INVALID_OP)
646                 return NULL;
647
648         return find_cmd_entry(gvt, opcode, ring_id);
649 }
650
651 static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
652 {
653         return (cmd >> low) & ((1U << (hi - low + 1)) - 1);
654 }
655
656 static inline void print_opcode(u32 cmd, int ring_id)
657 {
658         struct decode_info *d_info;
659         int i;
660
661         d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
662         if (d_info == NULL)
663                 return;
664
665         gvt_dbg_cmd("opcode=0x%x %s sub_ops:",
666                         cmd >> (32 - d_info->op_len), d_info->name);
667
668         for (i = 0; i < d_info->nr_sub_op; i++)
669                 pr_err("0x%x ", sub_op_val(cmd, d_info->sub_op[i].hi,
670                                         d_info->sub_op[i].low));
671
672         pr_err("\n");
673 }
674
675 static inline u32 *cmd_ptr(struct parser_exec_state *s, int index)
676 {
677         return s->ip_va + (index << 2);
678 }
679
680 static inline u32 cmd_val(struct parser_exec_state *s, int index)
681 {
682         return *cmd_ptr(s, index);
683 }
684
685 static void parser_exec_state_dump(struct parser_exec_state *s)
686 {
687         int cnt = 0;
688         int i;
689
690         gvt_dbg_cmd("  vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
691                         " ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id,
692                         s->ring_id, s->ring_start, s->ring_start + s->ring_size,
693                         s->ring_head, s->ring_tail);
694
695         gvt_dbg_cmd("  %s %s ip_gma(%08lx) ",
696                         s->buf_type == RING_BUFFER_INSTRUCTION ?
697                         "RING_BUFFER" : "BATCH_BUFFER",
698                         s->buf_addr_type == GTT_BUFFER ?
699                         "GTT" : "PPGTT", s->ip_gma);
700
701         if (s->ip_va == NULL) {
702                 gvt_dbg_cmd(" ip_va(NULL)");
703                 return;
704         }
705
706         gvt_dbg_cmd("  ip_va=%p: %08x %08x %08x %08x\n",
707                         s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
708                         cmd_val(s, 2), cmd_val(s, 3));
709
710         print_opcode(cmd_val(s, 0), s->ring_id);
711
712         /* print the whole page to trace */
713         pr_err("    ip_va=%p: %08x %08x %08x %08x\n",
714                         s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
715                         cmd_val(s, 2), cmd_val(s, 3));
716
717         s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12);
718
719         while (cnt < 1024) {
720                 pr_err("ip_va=%p: ", s->ip_va);
721                 for (i = 0; i < 8; i++)
722                         pr_err("%08x ", cmd_val(s, i));
723                 pr_err("\n");
724
725                 s->ip_va += 8 * sizeof(u32);
726                 cnt += 8;
727         }
728 }
729
730 static inline void update_ip_va(struct parser_exec_state *s)
731 {
732         unsigned long len = 0;
733
734         if (WARN_ON(s->ring_head == s->ring_tail))
735                 return;
736
737         if (s->buf_type == RING_BUFFER_INSTRUCTION) {
738                 unsigned long ring_top = s->ring_start + s->ring_size;
739
740                 if (s->ring_head > s->ring_tail) {
741                         if (s->ip_gma >= s->ring_head && s->ip_gma < ring_top)
742                                 len = (s->ip_gma - s->ring_head);
743                         else if (s->ip_gma >= s->ring_start &&
744                                         s->ip_gma <= s->ring_tail)
745                                 len = (ring_top - s->ring_head) +
746                                         (s->ip_gma - s->ring_start);
747                 } else
748                         len = (s->ip_gma - s->ring_head);
749
750                 s->ip_va = s->rb_va + len;
751         } else {/* shadow batch buffer */
752                 s->ip_va = s->ret_bb_va;
753         }
754 }
755
756 static inline int ip_gma_set(struct parser_exec_state *s,
757                 unsigned long ip_gma)
758 {
759         WARN_ON(!IS_ALIGNED(ip_gma, 4));
760
761         s->ip_gma = ip_gma;
762         update_ip_va(s);
763         return 0;
764 }
765
766 static inline int ip_gma_advance(struct parser_exec_state *s,
767                 unsigned int dw_len)
768 {
769         s->ip_gma += (dw_len << 2);
770
771         if (s->buf_type == RING_BUFFER_INSTRUCTION) {
772                 if (s->ip_gma >= s->ring_start + s->ring_size)
773                         s->ip_gma -= s->ring_size;
774                 update_ip_va(s);
775         } else {
776                 s->ip_va += (dw_len << 2);
777         }
778
779         return 0;
780 }
781
782 static inline int get_cmd_length(struct cmd_info *info, u32 cmd)
783 {
784         if ((info->flag & F_LEN_MASK) == F_LEN_CONST)
785                 return info->len;
786         else
787                 return (cmd & ((1U << info->len) - 1)) + 2;
788         return 0;
789 }
790
791 static inline int cmd_length(struct parser_exec_state *s)
792 {
793         return get_cmd_length(s->info, cmd_val(s, 0));
794 }
795
796 /* do not remove this, some platform may need clflush here */
797 #define patch_value(s, addr, val) do { \
798         *addr = val; \
799 } while (0)
800
801 static bool is_shadowed_mmio(unsigned int offset)
802 {
803         bool ret = false;
804
805         if ((offset == 0x2168) || /*BB current head register UDW */
806             (offset == 0x2140) || /*BB current header register */
807             (offset == 0x211c) || /*second BB header register UDW */
808             (offset == 0x2114)) { /*second BB header register UDW */
809                 ret = true;
810         }
811         return ret;
812 }
813
814 static inline bool is_force_nonpriv_mmio(unsigned int offset)
815 {
816         return (offset >= 0x24d0 && offset < 0x2500);
817 }
818
819 static int force_nonpriv_reg_handler(struct parser_exec_state *s,
820                                      unsigned int offset, unsigned int index)
821 {
822         struct intel_gvt *gvt = s->vgpu->gvt;
823         unsigned int data = cmd_val(s, index + 1);
824
825         if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data)) {
826                 gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n",
827                         offset, data);
828                 return -EINVAL;
829         }
830         return 0;
831 }
832
833 static int cmd_reg_handler(struct parser_exec_state *s,
834         unsigned int offset, unsigned int index, char *cmd)
835 {
836         struct intel_vgpu *vgpu = s->vgpu;
837         struct intel_gvt *gvt = vgpu->gvt;
838
839         if (offset + 4 > gvt->device_info.mmio_size) {
840                 gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
841                                 cmd, offset);
842                 return -EINVAL;
843         }
844
845         if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
846                 gvt_vgpu_err("%s access to non-render register (%x)\n",
847                                 cmd, offset);
848                 return 0;
849         }
850
851         if (is_shadowed_mmio(offset)) {
852                 gvt_vgpu_err("found access of shadowed MMIO %x\n", offset);
853                 return 0;
854         }
855
856         if (is_force_nonpriv_mmio(offset) &&
857             force_nonpriv_reg_handler(s, offset, index))
858                 return -EINVAL;
859
860         if (offset == i915_mmio_reg_offset(DERRMR) ||
861                 offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
862                 /* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
863                 patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
864         }
865
866         /* TODO: Update the global mask if this MMIO is a masked-MMIO */
867         intel_gvt_mmio_set_cmd_accessed(gvt, offset);
868         return 0;
869 }
870
871 #define cmd_reg(s, i) \
872         (cmd_val(s, i) & GENMASK(22, 2))
873
874 #define cmd_reg_inhibit(s, i) \
875         (cmd_val(s, i) & GENMASK(22, 18))
876
877 #define cmd_gma(s, i) \
878         (cmd_val(s, i) & GENMASK(31, 2))
879
880 #define cmd_gma_hi(s, i) \
881         (cmd_val(s, i) & GENMASK(15, 0))
882
883 static int cmd_handler_lri(struct parser_exec_state *s)
884 {
885         int i, ret = 0;
886         int cmd_len = cmd_length(s);
887         struct intel_gvt *gvt = s->vgpu->gvt;
888
889         for (i = 1; i < cmd_len; i += 2) {
890                 if (IS_BROADWELL(gvt->dev_priv) &&
891                                 (s->ring_id != RCS)) {
892                         if (s->ring_id == BCS &&
893                                         cmd_reg(s, i) ==
894                                         i915_mmio_reg_offset(DERRMR))
895                                 ret |= 0;
896                         else
897                                 ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0;
898                 }
899                 if (ret)
900                         break;
901                 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lri");
902         }
903         return ret;
904 }
905
906 static int cmd_handler_lrr(struct parser_exec_state *s)
907 {
908         int i, ret = 0;
909         int cmd_len = cmd_length(s);
910
911         for (i = 1; i < cmd_len; i += 2) {
912                 if (IS_BROADWELL(s->vgpu->gvt->dev_priv))
913                         ret |= ((cmd_reg_inhibit(s, i) ||
914                                         (cmd_reg_inhibit(s, i + 1)))) ?
915                                 -EINVAL : 0;
916                 if (ret)
917                         break;
918                 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrr-src");
919                 ret |= cmd_reg_handler(s, cmd_reg(s, i + 1), i, "lrr-dst");
920         }
921         return ret;
922 }
923
924 static inline int cmd_address_audit(struct parser_exec_state *s,
925                 unsigned long guest_gma, int op_size, bool index_mode);
926
927 static int cmd_handler_lrm(struct parser_exec_state *s)
928 {
929         struct intel_gvt *gvt = s->vgpu->gvt;
930         int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
931         unsigned long gma;
932         int i, ret = 0;
933         int cmd_len = cmd_length(s);
934
935         for (i = 1; i < cmd_len;) {
936                 if (IS_BROADWELL(gvt->dev_priv))
937                         ret |= (cmd_reg_inhibit(s, i)) ? -EINVAL : 0;
938                 if (ret)
939                         break;
940                 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrm");
941                 if (cmd_val(s, 0) & (1 << 22)) {
942                         gma = cmd_gma(s, i + 1);
943                         if (gmadr_bytes == 8)
944                                 gma |= (cmd_gma_hi(s, i + 2)) << 32;
945                         ret |= cmd_address_audit(s, gma, sizeof(u32), false);
946                 }
947                 i += gmadr_dw_number(s) + 1;
948         }
949         return ret;
950 }
951
952 static int cmd_handler_srm(struct parser_exec_state *s)
953 {
954         int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
955         unsigned long gma;
956         int i, ret = 0;
957         int cmd_len = cmd_length(s);
958
959         for (i = 1; i < cmd_len;) {
960                 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "srm");
961                 if (cmd_val(s, 0) & (1 << 22)) {
962                         gma = cmd_gma(s, i + 1);
963                         if (gmadr_bytes == 8)
964                                 gma |= (cmd_gma_hi(s, i + 2)) << 32;
965                         ret |= cmd_address_audit(s, gma, sizeof(u32), false);
966                 }
967                 i += gmadr_dw_number(s) + 1;
968         }
969         return ret;
970 }
971
972 struct cmd_interrupt_event {
973         int pipe_control_notify;
974         int mi_flush_dw;
975         int mi_user_interrupt;
976 };
977
978 static struct cmd_interrupt_event cmd_interrupt_events[] = {
979         [RCS] = {
980                 .pipe_control_notify = RCS_PIPE_CONTROL,
981                 .mi_flush_dw = INTEL_GVT_EVENT_RESERVED,
982                 .mi_user_interrupt = RCS_MI_USER_INTERRUPT,
983         },
984         [BCS] = {
985                 .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
986                 .mi_flush_dw = BCS_MI_FLUSH_DW,
987                 .mi_user_interrupt = BCS_MI_USER_INTERRUPT,
988         },
989         [VCS] = {
990                 .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
991                 .mi_flush_dw = VCS_MI_FLUSH_DW,
992                 .mi_user_interrupt = VCS_MI_USER_INTERRUPT,
993         },
994         [VCS2] = {
995                 .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
996                 .mi_flush_dw = VCS2_MI_FLUSH_DW,
997                 .mi_user_interrupt = VCS2_MI_USER_INTERRUPT,
998         },
999         [VECS] = {
1000                 .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
1001                 .mi_flush_dw = VECS_MI_FLUSH_DW,
1002                 .mi_user_interrupt = VECS_MI_USER_INTERRUPT,
1003         },
1004 };
1005
1006 static int cmd_handler_pipe_control(struct parser_exec_state *s)
1007 {
1008         int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1009         unsigned long gma;
1010         bool index_mode = false;
1011         unsigned int post_sync;
1012         int ret = 0;
1013
1014         post_sync = (cmd_val(s, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK) >> 14;
1015
1016         /* LRI post sync */
1017         if (cmd_val(s, 1) & PIPE_CONTROL_MMIO_WRITE)
1018                 ret = cmd_reg_handler(s, cmd_reg(s, 2), 1, "pipe_ctrl");
1019         /* post sync */
1020         else if (post_sync) {
1021                 if (post_sync == 2)
1022                         ret = cmd_reg_handler(s, 0x2350, 1, "pipe_ctrl");
1023                 else if (post_sync == 3)
1024                         ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl");
1025                 else if (post_sync == 1) {
1026                         /* check ggtt*/
1027                         if ((cmd_val(s, 1) & PIPE_CONTROL_GLOBAL_GTT_IVB)) {
1028                                 gma = cmd_val(s, 2) & GENMASK(31, 3);
1029                                 if (gmadr_bytes == 8)
1030                                         gma |= (cmd_gma_hi(s, 3)) << 32;
1031                                 /* Store Data Index */
1032                                 if (cmd_val(s, 1) & (1 << 21))
1033                                         index_mode = true;
1034                                 ret |= cmd_address_audit(s, gma, sizeof(u64),
1035                                                 index_mode);
1036                         }
1037                 }
1038         }
1039
1040         if (ret)
1041                 return ret;
1042
1043         if (cmd_val(s, 1) & PIPE_CONTROL_NOTIFY)
1044                 set_bit(cmd_interrupt_events[s->ring_id].pipe_control_notify,
1045                                 s->workload->pending_events);
1046         return 0;
1047 }
1048
1049 static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s)
1050 {
1051         set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt,
1052                         s->workload->pending_events);
1053         return 0;
1054 }
1055
1056 static int cmd_advance_default(struct parser_exec_state *s)
1057 {
1058         return ip_gma_advance(s, cmd_length(s));
1059 }
1060
1061 static int cmd_handler_mi_batch_buffer_end(struct parser_exec_state *s)
1062 {
1063         int ret;
1064
1065         if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
1066                 s->buf_type = BATCH_BUFFER_INSTRUCTION;
1067                 ret = ip_gma_set(s, s->ret_ip_gma_bb);
1068                 s->buf_addr_type = s->saved_buf_addr_type;
1069         } else {
1070                 s->buf_type = RING_BUFFER_INSTRUCTION;
1071                 s->buf_addr_type = GTT_BUFFER;
1072                 if (s->ret_ip_gma_ring >= s->ring_start + s->ring_size)
1073                         s->ret_ip_gma_ring -= s->ring_size;
1074                 ret = ip_gma_set(s, s->ret_ip_gma_ring);
1075         }
1076         return ret;
1077 }
1078
1079 struct mi_display_flip_command_info {
1080         int pipe;
1081         int plane;
1082         int event;
1083         i915_reg_t stride_reg;
1084         i915_reg_t ctrl_reg;
1085         i915_reg_t surf_reg;
1086         u64 stride_val;
1087         u64 tile_val;
1088         u64 surf_val;
1089         bool async_flip;
1090 };
1091
1092 struct plane_code_mapping {
1093         int pipe;
1094         int plane;
1095         int event;
1096 };
1097
1098 static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
1099                 struct mi_display_flip_command_info *info)
1100 {
1101         struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1102         struct plane_code_mapping gen8_plane_code[] = {
1103                 [0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE},
1104                 [1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE},
1105                 [2] = {PIPE_A, PLANE_B, SPRITE_A_FLIP_DONE},
1106                 [3] = {PIPE_B, PLANE_B, SPRITE_B_FLIP_DONE},
1107                 [4] = {PIPE_C, PLANE_A, PRIMARY_C_FLIP_DONE},
1108                 [5] = {PIPE_C, PLANE_B, SPRITE_C_FLIP_DONE},
1109         };
1110         u32 dword0, dword1, dword2;
1111         u32 v;
1112
1113         dword0 = cmd_val(s, 0);
1114         dword1 = cmd_val(s, 1);
1115         dword2 = cmd_val(s, 2);
1116
1117         v = (dword0 & GENMASK(21, 19)) >> 19;
1118         if (WARN_ON(v >= ARRAY_SIZE(gen8_plane_code)))
1119                 return -EINVAL;
1120
1121         info->pipe = gen8_plane_code[v].pipe;
1122         info->plane = gen8_plane_code[v].plane;
1123         info->event = gen8_plane_code[v].event;
1124         info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
1125         info->tile_val = (dword1 & 0x1);
1126         info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
1127         info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
1128
1129         if (info->plane == PLANE_A) {
1130                 info->ctrl_reg = DSPCNTR(info->pipe);
1131                 info->stride_reg = DSPSTRIDE(info->pipe);
1132                 info->surf_reg = DSPSURF(info->pipe);
1133         } else if (info->plane == PLANE_B) {
1134                 info->ctrl_reg = SPRCTL(info->pipe);
1135                 info->stride_reg = SPRSTRIDE(info->pipe);
1136                 info->surf_reg = SPRSURF(info->pipe);
1137         } else {
1138                 WARN_ON(1);
1139                 return -EINVAL;
1140         }
1141         return 0;
1142 }
1143
1144 static int skl_decode_mi_display_flip(struct parser_exec_state *s,
1145                 struct mi_display_flip_command_info *info)
1146 {
1147         struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1148         struct intel_vgpu *vgpu = s->vgpu;
1149         u32 dword0 = cmd_val(s, 0);
1150         u32 dword1 = cmd_val(s, 1);
1151         u32 dword2 = cmd_val(s, 2);
1152         u32 plane = (dword0 & GENMASK(12, 8)) >> 8;
1153
1154         info->plane = PRIMARY_PLANE;
1155
1156         switch (plane) {
1157         case MI_DISPLAY_FLIP_SKL_PLANE_1_A:
1158                 info->pipe = PIPE_A;
1159                 info->event = PRIMARY_A_FLIP_DONE;
1160                 break;
1161         case MI_DISPLAY_FLIP_SKL_PLANE_1_B:
1162                 info->pipe = PIPE_B;
1163                 info->event = PRIMARY_B_FLIP_DONE;
1164                 break;
1165         case MI_DISPLAY_FLIP_SKL_PLANE_1_C:
1166                 info->pipe = PIPE_C;
1167                 info->event = PRIMARY_C_FLIP_DONE;
1168                 break;
1169
1170         case MI_DISPLAY_FLIP_SKL_PLANE_2_A:
1171                 info->pipe = PIPE_A;
1172                 info->event = SPRITE_A_FLIP_DONE;
1173                 info->plane = SPRITE_PLANE;
1174                 break;
1175         case MI_DISPLAY_FLIP_SKL_PLANE_2_B:
1176                 info->pipe = PIPE_B;
1177                 info->event = SPRITE_B_FLIP_DONE;
1178                 info->plane = SPRITE_PLANE;
1179                 break;
1180         case MI_DISPLAY_FLIP_SKL_PLANE_2_C:
1181                 info->pipe = PIPE_C;
1182                 info->event = SPRITE_C_FLIP_DONE;
1183                 info->plane = SPRITE_PLANE;
1184                 break;
1185
1186         default:
1187                 gvt_vgpu_err("unknown plane code %d\n", plane);
1188                 return -EINVAL;
1189         }
1190
1191         info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
1192         info->tile_val = (dword1 & GENMASK(2, 0));
1193         info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
1194         info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
1195
1196         info->ctrl_reg = DSPCNTR(info->pipe);
1197         info->stride_reg = DSPSTRIDE(info->pipe);
1198         info->surf_reg = DSPSURF(info->pipe);
1199
1200         return 0;
1201 }
1202
1203 static int gen8_check_mi_display_flip(struct parser_exec_state *s,
1204                 struct mi_display_flip_command_info *info)
1205 {
1206         struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1207         u32 stride, tile;
1208
1209         if (!info->async_flip)
1210                 return 0;
1211
1212         if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
1213                 stride = vgpu_vreg(s->vgpu, info->stride_reg) & GENMASK(9, 0);
1214                 tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) &
1215                                 GENMASK(12, 10)) >> 10;
1216         } else {
1217                 stride = (vgpu_vreg(s->vgpu, info->stride_reg) &
1218                                 GENMASK(15, 6)) >> 6;
1219                 tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10;
1220         }
1221
1222         if (stride != info->stride_val)
1223                 gvt_dbg_cmd("cannot change stride during async flip\n");
1224
1225         if (tile != info->tile_val)
1226                 gvt_dbg_cmd("cannot change tile during async flip\n");
1227
1228         return 0;
1229 }
1230
1231 static int gen8_update_plane_mmio_from_mi_display_flip(
1232                 struct parser_exec_state *s,
1233                 struct mi_display_flip_command_info *info)
1234 {
1235         struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1236         struct intel_vgpu *vgpu = s->vgpu;
1237
1238         set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12),
1239                       info->surf_val << 12);
1240         if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
1241                 set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0),
1242                               info->stride_val);
1243                 set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10),
1244                               info->tile_val << 10);
1245         } else {
1246                 set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(15, 6),
1247                               info->stride_val << 6);
1248                 set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(10, 10),
1249                               info->tile_val << 10);
1250         }
1251
1252         vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++;
1253         intel_vgpu_trigger_virtual_event(vgpu, info->event);
1254         return 0;
1255 }
1256
1257 static int decode_mi_display_flip(struct parser_exec_state *s,
1258                 struct mi_display_flip_command_info *info)
1259 {
1260         struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1261
1262         if (IS_BROADWELL(dev_priv))
1263                 return gen8_decode_mi_display_flip(s, info);
1264         if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
1265                 return skl_decode_mi_display_flip(s, info);
1266
1267         return -ENODEV;
1268 }
1269
1270 static int check_mi_display_flip(struct parser_exec_state *s,
1271                 struct mi_display_flip_command_info *info)
1272 {
1273         struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1274
1275         if (IS_BROADWELL(dev_priv)
1276                 || IS_SKYLAKE(dev_priv)
1277                 || IS_KABYLAKE(dev_priv))
1278                 return gen8_check_mi_display_flip(s, info);
1279         return -ENODEV;
1280 }
1281
1282 static int update_plane_mmio_from_mi_display_flip(
1283                 struct parser_exec_state *s,
1284                 struct mi_display_flip_command_info *info)
1285 {
1286         struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1287
1288         if (IS_BROADWELL(dev_priv)
1289                 || IS_SKYLAKE(dev_priv)
1290                 || IS_KABYLAKE(dev_priv))
1291                 return gen8_update_plane_mmio_from_mi_display_flip(s, info);
1292         return -ENODEV;
1293 }
1294
1295 static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
1296 {
1297         struct mi_display_flip_command_info info;
1298         struct intel_vgpu *vgpu = s->vgpu;
1299         int ret;
1300         int i;
1301         int len = cmd_length(s);
1302
1303         ret = decode_mi_display_flip(s, &info);
1304         if (ret) {
1305                 gvt_vgpu_err("fail to decode MI display flip command\n");
1306                 return ret;
1307         }
1308
1309         ret = check_mi_display_flip(s, &info);
1310         if (ret) {
1311                 gvt_vgpu_err("invalid MI display flip command\n");
1312                 return ret;
1313         }
1314
1315         ret = update_plane_mmio_from_mi_display_flip(s, &info);
1316         if (ret) {
1317                 gvt_vgpu_err("fail to update plane mmio\n");
1318                 return ret;
1319         }
1320
1321         for (i = 0; i < len; i++)
1322                 patch_value(s, cmd_ptr(s, i), MI_NOOP);
1323         return 0;
1324 }
1325
1326 static bool is_wait_for_flip_pending(u32 cmd)
1327 {
1328         return cmd & (MI_WAIT_FOR_PLANE_A_FLIP_PENDING |
1329                         MI_WAIT_FOR_PLANE_B_FLIP_PENDING |
1330                         MI_WAIT_FOR_PLANE_C_FLIP_PENDING |
1331                         MI_WAIT_FOR_SPRITE_A_FLIP_PENDING |
1332                         MI_WAIT_FOR_SPRITE_B_FLIP_PENDING |
1333                         MI_WAIT_FOR_SPRITE_C_FLIP_PENDING);
1334 }
1335
1336 static int cmd_handler_mi_wait_for_event(struct parser_exec_state *s)
1337 {
1338         u32 cmd = cmd_val(s, 0);
1339
1340         if (!is_wait_for_flip_pending(cmd))
1341                 return 0;
1342
1343         patch_value(s, cmd_ptr(s, 0), MI_NOOP);
1344         return 0;
1345 }
1346
1347 static unsigned long get_gma_bb_from_cmd(struct parser_exec_state *s, int index)
1348 {
1349         unsigned long addr;
1350         unsigned long gma_high, gma_low;
1351         int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1352
1353         if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
1354                 return INTEL_GVT_INVALID_ADDR;
1355
1356         gma_low = cmd_val(s, index) & BATCH_BUFFER_ADDR_MASK;
1357         if (gmadr_bytes == 4) {
1358                 addr = gma_low;
1359         } else {
1360                 gma_high = cmd_val(s, index + 1) & BATCH_BUFFER_ADDR_HIGH_MASK;
1361                 addr = (((unsigned long)gma_high) << 32) | gma_low;
1362         }
1363         return addr;
1364 }
1365
1366 static inline int cmd_address_audit(struct parser_exec_state *s,
1367                 unsigned long guest_gma, int op_size, bool index_mode)
1368 {
1369         struct intel_vgpu *vgpu = s->vgpu;
1370         u32 max_surface_size = vgpu->gvt->device_info.max_surface_size;
1371         int i;
1372         int ret;
1373
1374         if (op_size > max_surface_size) {
1375                 gvt_vgpu_err("command address audit fail name %s\n",
1376                         s->info->name);
1377                 return -EINVAL;
1378         }
1379
1380         if (index_mode) {
1381                 if (guest_gma >= GTT_PAGE_SIZE / sizeof(u64)) {
1382                         ret = -EINVAL;
1383                         goto err;
1384                 }
1385         } else if (!intel_gvt_ggtt_validate_range(vgpu, guest_gma, op_size)) {
1386                 ret = -EINVAL;
1387                 goto err;
1388         }
1389
1390         return 0;
1391
1392 err:
1393         gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
1394                         s->info->name, guest_gma, op_size);
1395
1396         pr_err("cmd dump: ");
1397         for (i = 0; i < cmd_length(s); i++) {
1398                 if (!(i % 4))
1399                         pr_err("\n%08x ", cmd_val(s, i));
1400                 else
1401                         pr_err("%08x ", cmd_val(s, i));
1402         }
1403         pr_err("\nvgpu%d: aperture 0x%llx - 0x%llx, hidden 0x%llx - 0x%llx\n",
1404                         vgpu->id,
1405                         vgpu_aperture_gmadr_base(vgpu),
1406                         vgpu_aperture_gmadr_end(vgpu),
1407                         vgpu_hidden_gmadr_base(vgpu),
1408                         vgpu_hidden_gmadr_end(vgpu));
1409         return ret;
1410 }
1411
1412 static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s)
1413 {
1414         int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1415         int op_size = (cmd_length(s) - 3) * sizeof(u32);
1416         int core_id = (cmd_val(s, 2) & (1 << 0)) ? 1 : 0;
1417         unsigned long gma, gma_low, gma_high;
1418         int ret = 0;
1419
1420         /* check ppggt */
1421         if (!(cmd_val(s, 0) & (1 << 22)))
1422                 return 0;
1423
1424         gma = cmd_val(s, 2) & GENMASK(31, 2);
1425
1426         if (gmadr_bytes == 8) {
1427                 gma_low = cmd_val(s, 1) & GENMASK(31, 2);
1428                 gma_high = cmd_val(s, 2) & GENMASK(15, 0);
1429                 gma = (gma_high << 32) | gma_low;
1430                 core_id = (cmd_val(s, 1) & (1 << 0)) ? 1 : 0;
1431         }
1432         ret = cmd_address_audit(s, gma + op_size * core_id, op_size, false);
1433         return ret;
1434 }
1435
1436 static inline int unexpected_cmd(struct parser_exec_state *s)
1437 {
1438         struct intel_vgpu *vgpu = s->vgpu;
1439
1440         gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name);
1441
1442         return -EINVAL;
1443 }
1444
1445 static int cmd_handler_mi_semaphore_wait(struct parser_exec_state *s)
1446 {
1447         return unexpected_cmd(s);
1448 }
1449
1450 static int cmd_handler_mi_report_perf_count(struct parser_exec_state *s)
1451 {
1452         return unexpected_cmd(s);
1453 }
1454
1455 static int cmd_handler_mi_op_2e(struct parser_exec_state *s)
1456 {
1457         return unexpected_cmd(s);
1458 }
1459
1460 static int cmd_handler_mi_op_2f(struct parser_exec_state *s)
1461 {
1462         int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1463         int op_size = (1 << ((cmd_val(s, 0) & GENMASK(20, 19)) >> 19)) *
1464                         sizeof(u32);
1465         unsigned long gma, gma_high;
1466         int ret = 0;
1467
1468         if (!(cmd_val(s, 0) & (1 << 22)))
1469                 return ret;
1470
1471         gma = cmd_val(s, 1) & GENMASK(31, 2);
1472         if (gmadr_bytes == 8) {
1473                 gma_high = cmd_val(s, 2) & GENMASK(15, 0);
1474                 gma = (gma_high << 32) | gma;
1475         }
1476         ret = cmd_address_audit(s, gma, op_size, false);
1477         return ret;
1478 }
1479
1480 static int cmd_handler_mi_store_data_index(struct parser_exec_state *s)
1481 {
1482         return unexpected_cmd(s);
1483 }
1484
1485 static int cmd_handler_mi_clflush(struct parser_exec_state *s)
1486 {
1487         return unexpected_cmd(s);
1488 }
1489
1490 static int cmd_handler_mi_conditional_batch_buffer_end(
1491                 struct parser_exec_state *s)
1492 {
1493         return unexpected_cmd(s);
1494 }
1495
1496 static int cmd_handler_mi_update_gtt(struct parser_exec_state *s)
1497 {
1498         return unexpected_cmd(s);
1499 }
1500
1501 static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
1502 {
1503         int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1504         unsigned long gma;
1505         bool index_mode = false;
1506         int ret = 0;
1507
1508         /* Check post-sync and ppgtt bit */
1509         if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) {
1510                 gma = cmd_val(s, 1) & GENMASK(31, 3);
1511                 if (gmadr_bytes == 8)
1512                         gma |= (cmd_val(s, 2) & GENMASK(15, 0)) << 32;
1513                 /* Store Data Index */
1514                 if (cmd_val(s, 0) & (1 << 21))
1515                         index_mode = true;
1516                 ret = cmd_address_audit(s, gma, sizeof(u64), index_mode);
1517         }
1518         /* Check notify bit */
1519         if ((cmd_val(s, 0) & (1 << 8)))
1520                 set_bit(cmd_interrupt_events[s->ring_id].mi_flush_dw,
1521                                 s->workload->pending_events);
1522         return ret;
1523 }
1524
1525 static void addr_type_update_snb(struct parser_exec_state *s)
1526 {
1527         if ((s->buf_type == RING_BUFFER_INSTRUCTION) &&
1528                         (BATCH_BUFFER_ADR_SPACE_BIT(cmd_val(s, 0)) == 1)) {
1529                 s->buf_addr_type = PPGTT_BUFFER;
1530         }
1531 }
1532
1533
1534 static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
1535                 unsigned long gma, unsigned long end_gma, void *va)
1536 {
1537         unsigned long copy_len, offset;
1538         unsigned long len = 0;
1539         unsigned long gpa;
1540
1541         while (gma != end_gma) {
1542                 gpa = intel_vgpu_gma_to_gpa(mm, gma);
1543                 if (gpa == INTEL_GVT_INVALID_ADDR) {
1544                         gvt_vgpu_err("invalid gma address: %lx\n", gma);
1545                         return -EFAULT;
1546                 }
1547
1548                 offset = gma & (GTT_PAGE_SIZE - 1);
1549
1550                 copy_len = (end_gma - gma) >= (GTT_PAGE_SIZE - offset) ?
1551                         GTT_PAGE_SIZE - offset : end_gma - gma;
1552
1553                 intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len);
1554
1555                 len += copy_len;
1556                 gma += copy_len;
1557         }
1558         return len;
1559 }
1560
1561
1562 /*
1563  * Check whether a batch buffer needs to be scanned. Currently
1564  * the only criteria is based on privilege.
1565  */
1566 static int batch_buffer_needs_scan(struct parser_exec_state *s)
1567 {
1568         struct intel_gvt *gvt = s->vgpu->gvt;
1569
1570         if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
1571                 || IS_KABYLAKE(gvt->dev_priv)) {
1572                 /* BDW decides privilege based on address space */
1573                 if (cmd_val(s, 0) & (1 << 8))
1574                         return 0;
1575         }
1576         return 1;
1577 }
1578
1579 static int find_bb_size(struct parser_exec_state *s)
1580 {
1581         unsigned long gma = 0;
1582         struct cmd_info *info;
1583         int bb_size = 0;
1584         uint32_t cmd_len = 0;
1585         bool met_bb_end = false;
1586         struct intel_vgpu *vgpu = s->vgpu;
1587         u32 cmd;
1588
1589         /* get the start gm address of the batch buffer */
1590         gma = get_gma_bb_from_cmd(s, 1);
1591         cmd = cmd_val(s, 0);
1592
1593         info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
1594         if (info == NULL) {
1595                 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
1596                                 cmd, get_opcode(cmd, s->ring_id));
1597                 return -EINVAL;
1598         }
1599         do {
1600                 copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
1601                                 gma, gma + 4, &cmd);
1602                 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
1603                 if (info == NULL) {
1604                         gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
1605                                 cmd, get_opcode(cmd, s->ring_id));
1606                         return -EINVAL;
1607                 }
1608
1609                 if (info->opcode == OP_MI_BATCH_BUFFER_END) {
1610                         met_bb_end = true;
1611                 } else if (info->opcode == OP_MI_BATCH_BUFFER_START) {
1612                         if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0) {
1613                                 /* chained batch buffer */
1614                                 met_bb_end = true;
1615                         }
1616                 }
1617                 cmd_len = get_cmd_length(info, cmd) << 2;
1618                 bb_size += cmd_len;
1619                 gma += cmd_len;
1620
1621         } while (!met_bb_end);
1622
1623         return bb_size;
1624 }
1625
1626 static int perform_bb_shadow(struct parser_exec_state *s)
1627 {
1628         struct intel_shadow_bb_entry *entry_obj;
1629         struct intel_vgpu *vgpu = s->vgpu;
1630         unsigned long gma = 0;
1631         int bb_size;
1632         void *dst = NULL;
1633         int ret = 0;
1634
1635         /* get the start gm address of the batch buffer */
1636         gma = get_gma_bb_from_cmd(s, 1);
1637
1638         /* get the size of the batch buffer */
1639         bb_size = find_bb_size(s);
1640         if (bb_size < 0)
1641                 return -EINVAL;
1642
1643         /* allocate shadow batch buffer */
1644         entry_obj = kmalloc(sizeof(*entry_obj), GFP_KERNEL);
1645         if (entry_obj == NULL)
1646                 return -ENOMEM;
1647
1648         entry_obj->obj =
1649                 i915_gem_object_create(s->vgpu->gvt->dev_priv,
1650                                        roundup(bb_size, PAGE_SIZE));
1651         if (IS_ERR(entry_obj->obj)) {
1652                 ret = PTR_ERR(entry_obj->obj);
1653                 goto free_entry;
1654         }
1655         entry_obj->len = bb_size;
1656         INIT_LIST_HEAD(&entry_obj->list);
1657
1658         dst = i915_gem_object_pin_map(entry_obj->obj, I915_MAP_WB);
1659         if (IS_ERR(dst)) {
1660                 ret = PTR_ERR(dst);
1661                 goto put_obj;
1662         }
1663
1664         ret = i915_gem_object_set_to_cpu_domain(entry_obj->obj, false);
1665         if (ret) {
1666                 gvt_vgpu_err("failed to set shadow batch to CPU\n");
1667                 goto unmap_src;
1668         }
1669
1670         entry_obj->va = dst;
1671         entry_obj->bb_start_cmd_va = s->ip_va;
1672
1673         /* copy batch buffer to shadow batch buffer*/
1674         ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
1675                               gma, gma + bb_size,
1676                               dst);
1677         if (ret < 0) {
1678                 gvt_vgpu_err("fail to copy guest ring buffer\n");
1679                 goto unmap_src;
1680         }
1681
1682         list_add(&entry_obj->list, &s->workload->shadow_bb);
1683         /*
1684          * ip_va saves the virtual address of the shadow batch buffer, while
1685          * ip_gma saves the graphics address of the original batch buffer.
1686          * As the shadow batch buffer is just a copy from the originial one,
1687          * it should be right to use shadow batch buffer'va and original batch
1688          * buffer's gma in pair. After all, we don't want to pin the shadow
1689          * buffer here (too early).
1690          */
1691         s->ip_va = dst;
1692         s->ip_gma = gma;
1693
1694         return 0;
1695
1696 unmap_src:
1697         i915_gem_object_unpin_map(entry_obj->obj);
1698 put_obj:
1699         i915_gem_object_put(entry_obj->obj);
1700 free_entry:
1701         kfree(entry_obj);
1702         return ret;
1703 }
1704
1705 static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
1706 {
1707         bool second_level;
1708         int ret = 0;
1709         struct intel_vgpu *vgpu = s->vgpu;
1710
1711         if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
1712                 gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
1713                 return -EINVAL;
1714         }
1715
1716         second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
1717         if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
1718                 gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n");
1719                 return -EINVAL;
1720         }
1721
1722         s->saved_buf_addr_type = s->buf_addr_type;
1723         addr_type_update_snb(s);
1724         if (s->buf_type == RING_BUFFER_INSTRUCTION) {
1725                 s->ret_ip_gma_ring = s->ip_gma + cmd_length(s) * sizeof(u32);
1726                 s->buf_type = BATCH_BUFFER_INSTRUCTION;
1727         } else if (second_level) {
1728                 s->buf_type = BATCH_BUFFER_2ND_LEVEL;
1729                 s->ret_ip_gma_bb = s->ip_gma + cmd_length(s) * sizeof(u32);
1730                 s->ret_bb_va = s->ip_va + cmd_length(s) * sizeof(u32);
1731         }
1732
1733         if (batch_buffer_needs_scan(s)) {
1734                 ret = perform_bb_shadow(s);
1735                 if (ret < 0)
1736                         gvt_vgpu_err("invalid shadow batch buffer\n");
1737         } else {
1738                 /* emulate a batch buffer end to do return right */
1739                 ret = cmd_handler_mi_batch_buffer_end(s);
1740                 if (ret < 0)
1741                         return ret;
1742         }
1743
1744         return ret;
1745 }
1746
1747 static struct cmd_info cmd_info[] = {
1748         {"MI_NOOP", OP_MI_NOOP, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
1749
1750         {"MI_SET_PREDICATE", OP_MI_SET_PREDICATE, F_LEN_CONST, R_ALL, D_ALL,
1751                 0, 1, NULL},
1752
1753         {"MI_USER_INTERRUPT", OP_MI_USER_INTERRUPT, F_LEN_CONST, R_ALL, D_ALL,
1754                 0, 1, cmd_handler_mi_user_interrupt},
1755
1756         {"MI_WAIT_FOR_EVENT", OP_MI_WAIT_FOR_EVENT, F_LEN_CONST, R_RCS | R_BCS,
1757                 D_ALL, 0, 1, cmd_handler_mi_wait_for_event},
1758
1759         {"MI_FLUSH", OP_MI_FLUSH, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
1760
1761         {"MI_ARB_CHECK", OP_MI_ARB_CHECK, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
1762                 NULL},
1763
1764         {"MI_RS_CONTROL", OP_MI_RS_CONTROL, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
1765                 NULL},
1766
1767         {"MI_REPORT_HEAD", OP_MI_REPORT_HEAD, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
1768                 NULL},
1769
1770         {"MI_ARB_ON_OFF", OP_MI_ARB_ON_OFF, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
1771                 NULL},
1772
1773         {"MI_URB_ATOMIC_ALLOC", OP_MI_URB_ATOMIC_ALLOC, F_LEN_CONST, R_RCS,
1774                 D_ALL, 0, 1, NULL},
1775
1776         {"MI_BATCH_BUFFER_END", OP_MI_BATCH_BUFFER_END,
1777                 F_IP_ADVANCE_CUSTOM | F_LEN_CONST, R_ALL, D_ALL, 0, 1,
1778                 cmd_handler_mi_batch_buffer_end},
1779
1780         {"MI_SUSPEND_FLUSH", OP_MI_SUSPEND_FLUSH, F_LEN_CONST, R_ALL, D_ALL,
1781                 0, 1, NULL},
1782
1783         {"MI_PREDICATE", OP_MI_PREDICATE, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
1784                 NULL},
1785
1786         {"MI_TOPOLOGY_FILTER", OP_MI_TOPOLOGY_FILTER, F_LEN_CONST, R_ALL,
1787                 D_ALL, 0, 1, NULL},
1788
1789         {"MI_SET_APPID", OP_MI_SET_APPID, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
1790                 NULL},
1791
1792         {"MI_RS_CONTEXT", OP_MI_RS_CONTEXT, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
1793                 NULL},
1794
1795         {"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP, F_LEN_VAR | F_POST_HANDLE,
1796                 R_RCS | R_BCS, D_ALL, 0, 8, cmd_handler_mi_display_flip},
1797
1798         {"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX, F_LEN_VAR, R_ALL, D_ALL,
1799                 0, 8, NULL},
1800
1801         {"MI_MATH", OP_MI_MATH, F_LEN_VAR, R_ALL, D_ALL, 0, 8, NULL},
1802
1803         {"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1804
1805         {"ME_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, F_LEN_VAR, R_ALL,
1806                 D_BDW_PLUS, 0, 8, NULL},
1807
1808         {"ME_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, F_LEN_VAR, R_ALL, D_BDW_PLUS,
1809                 ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait},
1810
1811         {"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM, F_LEN_VAR, R_ALL, D_BDW_PLUS,
1812                 ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm},
1813
1814         {"MI_STORE_DATA_INDEX", OP_MI_STORE_DATA_INDEX, F_LEN_VAR, R_ALL, D_ALL,
1815                 0, 8, cmd_handler_mi_store_data_index},
1816
1817         {"MI_LOAD_REGISTER_IMM", OP_MI_LOAD_REGISTER_IMM, F_LEN_VAR, R_ALL,
1818                 D_ALL, 0, 8, cmd_handler_lri},
1819
1820         {"MI_UPDATE_GTT", OP_MI_UPDATE_GTT, F_LEN_VAR, R_ALL, D_BDW_PLUS, 0, 10,
1821                 cmd_handler_mi_update_gtt},
1822
1823         {"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM, F_LEN_VAR, R_ALL,
1824                 D_ALL, ADDR_FIX_1(2), 8, cmd_handler_srm},
1825
1826         {"MI_FLUSH_DW", OP_MI_FLUSH_DW, F_LEN_VAR, R_ALL, D_ALL, 0, 6,
1827                 cmd_handler_mi_flush_dw},
1828
1829         {"MI_CLFLUSH", OP_MI_CLFLUSH, F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(1),
1830                 10, cmd_handler_mi_clflush},
1831
1832         {"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT, F_LEN_VAR, R_ALL,
1833                 D_ALL, ADDR_FIX_1(1), 6, cmd_handler_mi_report_perf_count},
1834
1835         {"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM, F_LEN_VAR, R_ALL,
1836                 D_ALL, ADDR_FIX_1(2), 8, cmd_handler_lrm},
1837
1838         {"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG, F_LEN_VAR, R_ALL,
1839                 D_ALL, 0, 8, cmd_handler_lrr},
1840
1841         {"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM, F_LEN_VAR, R_RCS,
1842                 D_ALL, 0, 8, NULL},
1843
1844         {"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM, F_LEN_VAR, R_RCS, D_ALL,
1845                 ADDR_FIX_1(2), 8, NULL},
1846
1847         {"MI_STORE_URM_MEM", OP_MI_STORE_URM_MEM, F_LEN_VAR, R_RCS, D_ALL,
1848                 ADDR_FIX_1(2), 8, NULL},
1849
1850         {"MI_OP_2E", OP_MI_2E, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_2(1, 2),
1851                 8, cmd_handler_mi_op_2e},
1852
1853         {"MI_OP_2F", OP_MI_2F, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_1(1),
1854                 8, cmd_handler_mi_op_2f},
1855
1856         {"MI_BATCH_BUFFER_START", OP_MI_BATCH_BUFFER_START,
1857                 F_IP_ADVANCE_CUSTOM, R_ALL, D_ALL, 0, 8,
1858                 cmd_handler_mi_batch_buffer_start},
1859
1860         {"MI_CONDITIONAL_BATCH_BUFFER_END", OP_MI_CONDITIONAL_BATCH_BUFFER_END,
1861                 F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
1862                 cmd_handler_mi_conditional_batch_buffer_end},
1863
1864         {"MI_LOAD_SCAN_LINES_INCL", OP_MI_LOAD_SCAN_LINES_INCL, F_LEN_CONST,
1865                 R_RCS | R_BCS, D_ALL, 0, 2, NULL},
1866
1867         {"XY_SETUP_BLT", OP_XY_SETUP_BLT, F_LEN_VAR, R_BCS, D_ALL,
1868                 ADDR_FIX_2(4, 7), 8, NULL},
1869
1870         {"XY_SETUP_CLIP_BLT", OP_XY_SETUP_CLIP_BLT, F_LEN_VAR, R_BCS, D_ALL,
1871                 0, 8, NULL},
1872
1873         {"XY_SETUP_MONO_PATTERN_SL_BLT", OP_XY_SETUP_MONO_PATTERN_SL_BLT,
1874                 F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
1875
1876         {"XY_PIXEL_BLT", OP_XY_PIXEL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL},
1877
1878         {"XY_SCANLINES_BLT", OP_XY_SCANLINES_BLT, F_LEN_VAR, R_BCS, D_ALL,
1879                 0, 8, NULL},
1880
1881         {"XY_TEXT_BLT", OP_XY_TEXT_BLT, F_LEN_VAR, R_BCS, D_ALL,
1882                 ADDR_FIX_1(3), 8, NULL},
1883
1884         {"XY_TEXT_IMMEDIATE_BLT", OP_XY_TEXT_IMMEDIATE_BLT, F_LEN_VAR, R_BCS,
1885                 D_ALL, 0, 8, NULL},
1886
1887         {"XY_COLOR_BLT", OP_XY_COLOR_BLT, F_LEN_VAR, R_BCS, D_ALL,
1888                 ADDR_FIX_1(4), 8, NULL},
1889
1890         {"XY_PAT_BLT", OP_XY_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL,
1891                 ADDR_FIX_2(4, 5), 8, NULL},
1892
1893         {"XY_MONO_PAT_BLT", OP_XY_MONO_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL,
1894                 ADDR_FIX_1(4), 8, NULL},
1895
1896         {"XY_SRC_COPY_BLT", OP_XY_SRC_COPY_BLT, F_LEN_VAR, R_BCS, D_ALL,
1897                 ADDR_FIX_2(4, 7), 8, NULL},
1898
1899         {"XY_MONO_SRC_COPY_BLT", OP_XY_MONO_SRC_COPY_BLT, F_LEN_VAR, R_BCS,
1900                 D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
1901
1902         {"XY_FULL_BLT", OP_XY_FULL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL},
1903
1904         {"XY_FULL_MONO_SRC_BLT", OP_XY_FULL_MONO_SRC_BLT, F_LEN_VAR, R_BCS,
1905                 D_ALL, ADDR_FIX_3(4, 5, 8), 8, NULL},
1906
1907         {"XY_FULL_MONO_PATTERN_BLT", OP_XY_FULL_MONO_PATTERN_BLT, F_LEN_VAR,
1908                 R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
1909
1910         {"XY_FULL_MONO_PATTERN_MONO_SRC_BLT",
1911                 OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT,
1912                 F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
1913
1914         {"XY_MONO_PAT_FIXED_BLT", OP_XY_MONO_PAT_FIXED_BLT, F_LEN_VAR, R_BCS,
1915                 D_ALL, ADDR_FIX_1(4), 8, NULL},
1916
1917         {"XY_MONO_SRC_COPY_IMMEDIATE_BLT", OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT,
1918                 F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
1919
1920         {"XY_PAT_BLT_IMMEDIATE", OP_XY_PAT_BLT_IMMEDIATE, F_LEN_VAR, R_BCS,
1921                 D_ALL, ADDR_FIX_1(4), 8, NULL},
1922
1923         {"XY_SRC_COPY_CHROMA_BLT", OP_XY_SRC_COPY_CHROMA_BLT, F_LEN_VAR, R_BCS,
1924                 D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
1925
1926         {"XY_FULL_IMMEDIATE_PATTERN_BLT", OP_XY_FULL_IMMEDIATE_PATTERN_BLT,
1927                 F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
1928
1929         {"XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT",
1930                 OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT,
1931                 F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
1932
1933         {"XY_PAT_CHROMA_BLT", OP_XY_PAT_CHROMA_BLT, F_LEN_VAR, R_BCS, D_ALL,
1934                 ADDR_FIX_2(4, 5), 8, NULL},
1935
1936         {"XY_PAT_CHROMA_BLT_IMMEDIATE", OP_XY_PAT_CHROMA_BLT_IMMEDIATE,
1937                 F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
1938
1939         {"3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP",
1940                 OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP,
1941                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1942
1943         {"3DSTATE_VIEWPORT_STATE_POINTERS_CC",
1944                 OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC,
1945                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1946
1947         {"3DSTATE_BLEND_STATE_POINTERS",
1948                 OP_3DSTATE_BLEND_STATE_POINTERS,
1949                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1950
1951         {"3DSTATE_DEPTH_STENCIL_STATE_POINTERS",
1952                 OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS,
1953                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1954
1955         {"3DSTATE_BINDING_TABLE_POINTERS_VS",
1956                 OP_3DSTATE_BINDING_TABLE_POINTERS_VS,
1957                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1958
1959         {"3DSTATE_BINDING_TABLE_POINTERS_HS",
1960                 OP_3DSTATE_BINDING_TABLE_POINTERS_HS,
1961                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1962
1963         {"3DSTATE_BINDING_TABLE_POINTERS_DS",
1964                 OP_3DSTATE_BINDING_TABLE_POINTERS_DS,
1965                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1966
1967         {"3DSTATE_BINDING_TABLE_POINTERS_GS",
1968                 OP_3DSTATE_BINDING_TABLE_POINTERS_GS,
1969                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1970
1971         {"3DSTATE_BINDING_TABLE_POINTERS_PS",
1972                 OP_3DSTATE_BINDING_TABLE_POINTERS_PS,
1973                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1974
1975         {"3DSTATE_SAMPLER_STATE_POINTERS_VS",
1976                 OP_3DSTATE_SAMPLER_STATE_POINTERS_VS,
1977                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1978
1979         {"3DSTATE_SAMPLER_STATE_POINTERS_HS",
1980                 OP_3DSTATE_SAMPLER_STATE_POINTERS_HS,
1981                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1982
1983         {"3DSTATE_SAMPLER_STATE_POINTERS_DS",
1984                 OP_3DSTATE_SAMPLER_STATE_POINTERS_DS,
1985                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1986
1987         {"3DSTATE_SAMPLER_STATE_POINTERS_GS",
1988                 OP_3DSTATE_SAMPLER_STATE_POINTERS_GS,
1989                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1990
1991         {"3DSTATE_SAMPLER_STATE_POINTERS_PS",
1992                 OP_3DSTATE_SAMPLER_STATE_POINTERS_PS,
1993                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1994
1995         {"3DSTATE_URB_VS", OP_3DSTATE_URB_VS, F_LEN_VAR, R_RCS, D_ALL,
1996                 0, 8, NULL},
1997
1998         {"3DSTATE_URB_HS", OP_3DSTATE_URB_HS, F_LEN_VAR, R_RCS, D_ALL,
1999                 0, 8, NULL},
2000
2001         {"3DSTATE_URB_DS", OP_3DSTATE_URB_DS, F_LEN_VAR, R_RCS, D_ALL,
2002                 0, 8, NULL},
2003
2004         {"3DSTATE_URB_GS", OP_3DSTATE_URB_GS, F_LEN_VAR, R_RCS, D_ALL,
2005                 0, 8, NULL},
2006
2007         {"3DSTATE_GATHER_CONSTANT_VS", OP_3DSTATE_GATHER_CONSTANT_VS,
2008                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2009
2010         {"3DSTATE_GATHER_CONSTANT_GS", OP_3DSTATE_GATHER_CONSTANT_GS,
2011                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2012
2013         {"3DSTATE_GATHER_CONSTANT_HS", OP_3DSTATE_GATHER_CONSTANT_HS,
2014                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2015
2016         {"3DSTATE_GATHER_CONSTANT_DS", OP_3DSTATE_GATHER_CONSTANT_DS,
2017                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2018
2019         {"3DSTATE_GATHER_CONSTANT_PS", OP_3DSTATE_GATHER_CONSTANT_PS,
2020                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2021
2022         {"3DSTATE_DX9_CONSTANTF_VS", OP_3DSTATE_DX9_CONSTANTF_VS,
2023                 F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL},
2024
2025         {"3DSTATE_DX9_CONSTANTF_PS", OP_3DSTATE_DX9_CONSTANTF_PS,
2026                 F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL},
2027
2028         {"3DSTATE_DX9_CONSTANTI_VS", OP_3DSTATE_DX9_CONSTANTI_VS,
2029                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2030
2031         {"3DSTATE_DX9_CONSTANTI_PS", OP_3DSTATE_DX9_CONSTANTI_PS,
2032                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2033
2034         {"3DSTATE_DX9_CONSTANTB_VS", OP_3DSTATE_DX9_CONSTANTB_VS,
2035                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2036
2037         {"3DSTATE_DX9_CONSTANTB_PS", OP_3DSTATE_DX9_CONSTANTB_PS,
2038                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2039
2040         {"3DSTATE_DX9_LOCAL_VALID_VS", OP_3DSTATE_DX9_LOCAL_VALID_VS,
2041                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2042
2043         {"3DSTATE_DX9_LOCAL_VALID_PS", OP_3DSTATE_DX9_LOCAL_VALID_PS,
2044                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2045
2046         {"3DSTATE_DX9_GENERATE_ACTIVE_VS", OP_3DSTATE_DX9_GENERATE_ACTIVE_VS,
2047                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2048
2049         {"3DSTATE_DX9_GENERATE_ACTIVE_PS", OP_3DSTATE_DX9_GENERATE_ACTIVE_PS,
2050                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2051
2052         {"3DSTATE_BINDING_TABLE_EDIT_VS", OP_3DSTATE_BINDING_TABLE_EDIT_VS,
2053                 F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2054
2055         {"3DSTATE_BINDING_TABLE_EDIT_GS", OP_3DSTATE_BINDING_TABLE_EDIT_GS,
2056                 F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2057
2058         {"3DSTATE_BINDING_TABLE_EDIT_HS", OP_3DSTATE_BINDING_TABLE_EDIT_HS,
2059                 F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2060
2061         {"3DSTATE_BINDING_TABLE_EDIT_DS", OP_3DSTATE_BINDING_TABLE_EDIT_DS,
2062                 F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2063
2064         {"3DSTATE_BINDING_TABLE_EDIT_PS", OP_3DSTATE_BINDING_TABLE_EDIT_PS,
2065                 F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2066
2067         {"3DSTATE_VF_INSTANCING", OP_3DSTATE_VF_INSTANCING, F_LEN_VAR, R_RCS,
2068                 D_BDW_PLUS, 0, 8, NULL},
2069
2070         {"3DSTATE_VF_SGVS", OP_3DSTATE_VF_SGVS, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2071                 NULL},
2072
2073         {"3DSTATE_VF_TOPOLOGY", OP_3DSTATE_VF_TOPOLOGY, F_LEN_VAR, R_RCS,
2074                 D_BDW_PLUS, 0, 8, NULL},
2075
2076         {"3DSTATE_WM_CHROMAKEY", OP_3DSTATE_WM_CHROMAKEY, F_LEN_VAR, R_RCS,
2077                 D_BDW_PLUS, 0, 8, NULL},
2078
2079         {"3DSTATE_PS_BLEND", OP_3DSTATE_PS_BLEND, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0,
2080                 8, NULL},
2081
2082         {"3DSTATE_WM_DEPTH_STENCIL", OP_3DSTATE_WM_DEPTH_STENCIL, F_LEN_VAR,
2083                 R_RCS, D_BDW_PLUS, 0, 8, NULL},
2084
2085         {"3DSTATE_PS_EXTRA", OP_3DSTATE_PS_EXTRA, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0,
2086                 8, NULL},
2087
2088         {"3DSTATE_RASTER", OP_3DSTATE_RASTER, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2089                 NULL},
2090
2091         {"3DSTATE_SBE_SWIZ", OP_3DSTATE_SBE_SWIZ, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2092                 NULL},
2093
2094         {"3DSTATE_WM_HZ_OP", OP_3DSTATE_WM_HZ_OP, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2095                 NULL},
2096
2097         {"3DSTATE_VERTEX_BUFFERS", OP_3DSTATE_VERTEX_BUFFERS, F_LEN_VAR, R_RCS,
2098                 D_BDW_PLUS, 0, 8, NULL},
2099
2100         {"3DSTATE_VERTEX_ELEMENTS", OP_3DSTATE_VERTEX_ELEMENTS, F_LEN_VAR,
2101                 R_RCS, D_ALL, 0, 8, NULL},
2102
2103         {"3DSTATE_INDEX_BUFFER", OP_3DSTATE_INDEX_BUFFER, F_LEN_VAR, R_RCS,
2104                 D_BDW_PLUS, ADDR_FIX_1(2), 8, NULL},
2105
2106         {"3DSTATE_VF_STATISTICS", OP_3DSTATE_VF_STATISTICS, F_LEN_CONST,
2107                 R_RCS, D_ALL, 0, 1, NULL},
2108
2109         {"3DSTATE_VF", OP_3DSTATE_VF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2110
2111         {"3DSTATE_CC_STATE_POINTERS", OP_3DSTATE_CC_STATE_POINTERS, F_LEN_VAR,
2112                 R_RCS, D_ALL, 0, 8, NULL},
2113
2114         {"3DSTATE_SCISSOR_STATE_POINTERS", OP_3DSTATE_SCISSOR_STATE_POINTERS,
2115                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2116
2117         {"3DSTATE_GS", OP_3DSTATE_GS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2118
2119         {"3DSTATE_CLIP", OP_3DSTATE_CLIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2120
2121         {"3DSTATE_WM", OP_3DSTATE_WM, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2122
2123         {"3DSTATE_CONSTANT_GS", OP_3DSTATE_CONSTANT_GS, F_LEN_VAR, R_RCS,
2124                 D_BDW_PLUS, 0, 8, NULL},
2125
2126         {"3DSTATE_CONSTANT_PS", OP_3DSTATE_CONSTANT_PS, F_LEN_VAR, R_RCS,
2127                 D_BDW_PLUS, 0, 8, NULL},
2128
2129         {"3DSTATE_SAMPLE_MASK", OP_3DSTATE_SAMPLE_MASK, F_LEN_VAR, R_RCS,
2130                 D_ALL, 0, 8, NULL},
2131
2132         {"3DSTATE_CONSTANT_HS", OP_3DSTATE_CONSTANT_HS, F_LEN_VAR, R_RCS,
2133                 D_BDW_PLUS, 0, 8, NULL},
2134
2135         {"3DSTATE_CONSTANT_DS", OP_3DSTATE_CONSTANT_DS, F_LEN_VAR, R_RCS,
2136                 D_BDW_PLUS, 0, 8, NULL},
2137
2138         {"3DSTATE_HS", OP_3DSTATE_HS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2139
2140         {"3DSTATE_TE", OP_3DSTATE_TE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2141
2142         {"3DSTATE_DS", OP_3DSTATE_DS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2143
2144         {"3DSTATE_STREAMOUT", OP_3DSTATE_STREAMOUT, F_LEN_VAR, R_RCS,
2145                 D_ALL, 0, 8, NULL},
2146
2147         {"3DSTATE_SBE", OP_3DSTATE_SBE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2148
2149         {"3DSTATE_PS", OP_3DSTATE_PS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2150
2151         {"3DSTATE_DRAWING_RECTANGLE", OP_3DSTATE_DRAWING_RECTANGLE, F_LEN_VAR,
2152                 R_RCS, D_ALL, 0, 8, NULL},
2153
2154         {"3DSTATE_SAMPLER_PALETTE_LOAD0", OP_3DSTATE_SAMPLER_PALETTE_LOAD0,
2155                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2156
2157         {"3DSTATE_CHROMA_KEY", OP_3DSTATE_CHROMA_KEY, F_LEN_VAR, R_RCS, D_ALL,
2158                 0, 8, NULL},
2159
2160         {"3DSTATE_DEPTH_BUFFER", OP_3DSTATE_DEPTH_BUFFER, F_LEN_VAR, R_RCS,
2161                 D_ALL, ADDR_FIX_1(2), 8, NULL},
2162
2163         {"3DSTATE_POLY_STIPPLE_OFFSET", OP_3DSTATE_POLY_STIPPLE_OFFSET,
2164                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2165
2166         {"3DSTATE_POLY_STIPPLE_PATTERN", OP_3DSTATE_POLY_STIPPLE_PATTERN,
2167                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2168
2169         {"3DSTATE_LINE_STIPPLE", OP_3DSTATE_LINE_STIPPLE, F_LEN_VAR, R_RCS,
2170                 D_ALL, 0, 8, NULL},
2171
2172         {"3DSTATE_AA_LINE_PARAMS", OP_3DSTATE_AA_LINE_PARAMS, F_LEN_VAR, R_RCS,
2173                 D_ALL, 0, 8, NULL},
2174
2175         {"3DSTATE_GS_SVB_INDEX", OP_3DSTATE_GS_SVB_INDEX, F_LEN_VAR, R_RCS,
2176                 D_ALL, 0, 8, NULL},
2177
2178         {"3DSTATE_SAMPLER_PALETTE_LOAD1", OP_3DSTATE_SAMPLER_PALETTE_LOAD1,
2179                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2180
2181         {"3DSTATE_MULTISAMPLE", OP_3DSTATE_MULTISAMPLE_BDW, F_LEN_VAR, R_RCS,
2182                 D_BDW_PLUS, 0, 8, NULL},
2183
2184         {"3DSTATE_STENCIL_BUFFER", OP_3DSTATE_STENCIL_BUFFER, F_LEN_VAR, R_RCS,
2185                 D_ALL, ADDR_FIX_1(2), 8, NULL},
2186
2187         {"3DSTATE_HIER_DEPTH_BUFFER", OP_3DSTATE_HIER_DEPTH_BUFFER, F_LEN_VAR,
2188                 R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL},
2189
2190         {"3DSTATE_CLEAR_PARAMS", OP_3DSTATE_CLEAR_PARAMS, F_LEN_VAR,
2191                 R_RCS, D_ALL, 0, 8, NULL},
2192
2193         {"3DSTATE_PUSH_CONSTANT_ALLOC_VS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS,
2194                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2195
2196         {"3DSTATE_PUSH_CONSTANT_ALLOC_HS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS,
2197                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2198
2199         {"3DSTATE_PUSH_CONSTANT_ALLOC_DS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS,
2200                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2201
2202         {"3DSTATE_PUSH_CONSTANT_ALLOC_GS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS,
2203                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2204
2205         {"3DSTATE_PUSH_CONSTANT_ALLOC_PS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS,
2206                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2207
2208         {"3DSTATE_MONOFILTER_SIZE", OP_3DSTATE_MONOFILTER_SIZE, F_LEN_VAR,
2209                 R_RCS, D_ALL, 0, 8, NULL},
2210
2211         {"3DSTATE_SO_DECL_LIST", OP_3DSTATE_SO_DECL_LIST, F_LEN_VAR, R_RCS,
2212                 D_ALL, 0, 9, NULL},
2213
2214         {"3DSTATE_SO_BUFFER", OP_3DSTATE_SO_BUFFER, F_LEN_VAR, R_RCS, D_BDW_PLUS,
2215                 ADDR_FIX_2(2, 4), 8, NULL},
2216
2217         {"3DSTATE_BINDING_TABLE_POOL_ALLOC",
2218                 OP_3DSTATE_BINDING_TABLE_POOL_ALLOC,
2219                 F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
2220
2221         {"3DSTATE_GATHER_POOL_ALLOC", OP_3DSTATE_GATHER_POOL_ALLOC,
2222                 F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
2223
2224         {"3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC",
2225                 OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC,
2226                 F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
2227
2228         {"3DSTATE_SAMPLE_PATTERN", OP_3DSTATE_SAMPLE_PATTERN, F_LEN_VAR, R_RCS,
2229                 D_BDW_PLUS, 0, 8, NULL},
2230
2231         {"PIPE_CONTROL", OP_PIPE_CONTROL, F_LEN_VAR, R_RCS, D_ALL,
2232                 ADDR_FIX_1(2), 8, cmd_handler_pipe_control},
2233
2234         {"3DPRIMITIVE", OP_3DPRIMITIVE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2235
2236         {"PIPELINE_SELECT", OP_PIPELINE_SELECT, F_LEN_CONST, R_RCS, D_ALL, 0,
2237                 1, NULL},
2238
2239         {"STATE_PREFETCH", OP_STATE_PREFETCH, F_LEN_VAR, R_RCS, D_ALL,
2240                 ADDR_FIX_1(1), 8, NULL},
2241
2242         {"STATE_SIP", OP_STATE_SIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2243
2244         {"STATE_BASE_ADDRESS", OP_STATE_BASE_ADDRESS, F_LEN_VAR, R_RCS, D_BDW_PLUS,
2245                 ADDR_FIX_5(1, 3, 4, 5, 6), 8, NULL},
2246
2247         {"OP_3D_MEDIA_0_1_4", OP_3D_MEDIA_0_1_4, F_LEN_VAR, R_RCS, D_ALL,
2248                 ADDR_FIX_1(1), 8, NULL},
2249
2250         {"3DSTATE_VS", OP_3DSTATE_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2251
2252         {"3DSTATE_SF", OP_3DSTATE_SF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2253
2254         {"3DSTATE_CONSTANT_VS", OP_3DSTATE_CONSTANT_VS, F_LEN_VAR, R_RCS, D_BDW_PLUS,
2255                 0, 8, NULL},
2256
2257         {"3DSTATE_COMPONENT_PACKING", OP_3DSTATE_COMPONENT_PACKING, F_LEN_VAR, R_RCS,
2258                 D_SKL_PLUS, 0, 8, NULL},
2259
2260         {"MEDIA_INTERFACE_DESCRIPTOR_LOAD", OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD,
2261                 F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
2262
2263         {"MEDIA_GATEWAY_STATE", OP_MEDIA_GATEWAY_STATE, F_LEN_VAR, R_RCS, D_ALL,
2264                 0, 16, NULL},
2265
2266         {"MEDIA_STATE_FLUSH", OP_MEDIA_STATE_FLUSH, F_LEN_VAR, R_RCS, D_ALL,
2267                 0, 16, NULL},
2268
2269         {"MEDIA_OBJECT", OP_MEDIA_OBJECT, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
2270
2271         {"MEDIA_CURBE_LOAD", OP_MEDIA_CURBE_LOAD, F_LEN_VAR, R_RCS, D_ALL,
2272                 0, 16, NULL},
2273
2274         {"MEDIA_OBJECT_PRT", OP_MEDIA_OBJECT_PRT, F_LEN_VAR, R_RCS, D_ALL,
2275                 0, 16, NULL},
2276
2277         {"MEDIA_OBJECT_WALKER", OP_MEDIA_OBJECT_WALKER, F_LEN_VAR, R_RCS, D_ALL,
2278                 0, 16, NULL},
2279
2280         {"GPGPU_WALKER", OP_GPGPU_WALKER, F_LEN_VAR, R_RCS, D_ALL,
2281                 0, 8, NULL},
2282
2283         {"MEDIA_VFE_STATE", OP_MEDIA_VFE_STATE, F_LEN_VAR, R_RCS, D_ALL, 0, 16,
2284                 NULL},
2285
2286         {"3DSTATE_VF_STATISTICS_GM45", OP_3DSTATE_VF_STATISTICS_GM45,
2287                 F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
2288
2289         {"MFX_PIPE_MODE_SELECT", OP_MFX_PIPE_MODE_SELECT, F_LEN_VAR,
2290                 R_VCS, D_ALL, 0, 12, NULL},
2291
2292         {"MFX_SURFACE_STATE", OP_MFX_SURFACE_STATE, F_LEN_VAR,
2293                 R_VCS, D_ALL, 0, 12, NULL},
2294
2295         {"MFX_PIPE_BUF_ADDR_STATE", OP_MFX_PIPE_BUF_ADDR_STATE, F_LEN_VAR,
2296                 R_VCS, D_BDW_PLUS, 0, 12, NULL},
2297
2298         {"MFX_IND_OBJ_BASE_ADDR_STATE", OP_MFX_IND_OBJ_BASE_ADDR_STATE,
2299                 F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL},
2300
2301         {"MFX_BSP_BUF_BASE_ADDR_STATE", OP_MFX_BSP_BUF_BASE_ADDR_STATE,
2302                 F_LEN_VAR, R_VCS, D_BDW_PLUS, ADDR_FIX_3(1, 3, 5), 12, NULL},
2303
2304         {"OP_2_0_0_5", OP_2_0_0_5, F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL},
2305
2306         {"MFX_STATE_POINTER", OP_MFX_STATE_POINTER, F_LEN_VAR,
2307                 R_VCS, D_ALL, 0, 12, NULL},
2308
2309         {"MFX_QM_STATE", OP_MFX_QM_STATE, F_LEN_VAR,
2310                 R_VCS, D_ALL, 0, 12, NULL},
2311
2312         {"MFX_FQM_STATE", OP_MFX_FQM_STATE, F_LEN_VAR,
2313                 R_VCS, D_ALL, 0, 12, NULL},
2314
2315         {"MFX_PAK_INSERT_OBJECT", OP_MFX_PAK_INSERT_OBJECT, F_LEN_VAR,
2316                 R_VCS, D_ALL, 0, 12, NULL},
2317
2318         {"MFX_STITCH_OBJECT", OP_MFX_STITCH_OBJECT, F_LEN_VAR,
2319                 R_VCS, D_ALL, 0, 12, NULL},
2320
2321         {"MFD_IT_OBJECT", OP_MFD_IT_OBJECT, F_LEN_VAR,
2322                 R_VCS, D_ALL, 0, 12, NULL},
2323
2324         {"MFX_WAIT", OP_MFX_WAIT, F_LEN_VAR,
2325                 R_VCS, D_ALL, 0, 6, NULL},
2326
2327         {"MFX_AVC_IMG_STATE", OP_MFX_AVC_IMG_STATE, F_LEN_VAR,
2328                 R_VCS, D_ALL, 0, 12, NULL},
2329
2330         {"MFX_AVC_QM_STATE", OP_MFX_AVC_QM_STATE, F_LEN_VAR,
2331                 R_VCS, D_ALL, 0, 12, NULL},
2332
2333         {"MFX_AVC_DIRECTMODE_STATE", OP_MFX_AVC_DIRECTMODE_STATE, F_LEN_VAR,
2334                 R_VCS, D_ALL, 0, 12, NULL},
2335
2336         {"MFX_AVC_SLICE_STATE", OP_MFX_AVC_SLICE_STATE, F_LEN_VAR,
2337                 R_VCS, D_ALL, 0, 12, NULL},
2338
2339         {"MFX_AVC_REF_IDX_STATE", OP_MFX_AVC_REF_IDX_STATE, F_LEN_VAR,
2340                 R_VCS, D_ALL, 0, 12, NULL},
2341
2342         {"MFX_AVC_WEIGHTOFFSET_STATE", OP_MFX_AVC_WEIGHTOFFSET_STATE, F_LEN_VAR,
2343                 R_VCS, D_ALL, 0, 12, NULL},
2344
2345         {"MFD_AVC_PICID_STATE", OP_MFD_AVC_PICID_STATE, F_LEN_VAR,
2346                 R_VCS, D_ALL, 0, 12, NULL},
2347         {"MFD_AVC_DPB_STATE", OP_MFD_AVC_DPB_STATE, F_LEN_VAR,
2348                 R_VCS, D_ALL, 0, 12, NULL},
2349
2350         {"MFD_AVC_BSD_OBJECT", OP_MFD_AVC_BSD_OBJECT, F_LEN_VAR,
2351                 R_VCS, D_ALL, 0, 12, NULL},
2352
2353         {"MFD_AVC_SLICEADDR", OP_MFD_AVC_SLICEADDR, F_LEN_VAR,
2354                 R_VCS, D_ALL, ADDR_FIX_1(2), 12, NULL},
2355
2356         {"MFC_AVC_PAK_OBJECT", OP_MFC_AVC_PAK_OBJECT, F_LEN_VAR,
2357                 R_VCS, D_ALL, 0, 12, NULL},
2358
2359         {"MFX_VC1_PRED_PIPE_STATE", OP_MFX_VC1_PRED_PIPE_STATE, F_LEN_VAR,
2360                 R_VCS, D_ALL, 0, 12, NULL},
2361
2362         {"MFX_VC1_DIRECTMODE_STATE", OP_MFX_VC1_DIRECTMODE_STATE, F_LEN_VAR,
2363                 R_VCS, D_ALL, 0, 12, NULL},
2364
2365         {"MFD_VC1_SHORT_PIC_STATE", OP_MFD_VC1_SHORT_PIC_STATE, F_LEN_VAR,
2366                 R_VCS, D_ALL, 0, 12, NULL},
2367
2368         {"MFD_VC1_LONG_PIC_STATE", OP_MFD_VC1_LONG_PIC_STATE, F_LEN_VAR,
2369                 R_VCS, D_ALL, 0, 12, NULL},
2370
2371         {"MFD_VC1_BSD_OBJECT", OP_MFD_VC1_BSD_OBJECT, F_LEN_VAR,
2372                 R_VCS, D_ALL, 0, 12, NULL},
2373
2374         {"MFC_MPEG2_SLICEGROUP_STATE", OP_MFC_MPEG2_SLICEGROUP_STATE, F_LEN_VAR,
2375                 R_VCS, D_ALL, 0, 12, NULL},
2376
2377         {"MFC_MPEG2_PAK_OBJECT", OP_MFC_MPEG2_PAK_OBJECT, F_LEN_VAR,
2378                 R_VCS, D_ALL, 0, 12, NULL},
2379
2380         {"MFX_MPEG2_PIC_STATE", OP_MFX_MPEG2_PIC_STATE, F_LEN_VAR,
2381                 R_VCS, D_ALL, 0, 12, NULL},
2382
2383         {"MFX_MPEG2_QM_STATE", OP_MFX_MPEG2_QM_STATE, F_LEN_VAR,
2384                 R_VCS, D_ALL, 0, 12, NULL},
2385
2386         {"MFD_MPEG2_BSD_OBJECT", OP_MFD_MPEG2_BSD_OBJECT, F_LEN_VAR,
2387                 R_VCS, D_ALL, 0, 12, NULL},
2388
2389         {"MFX_2_6_0_0", OP_MFX_2_6_0_0, F_LEN_VAR, R_VCS, D_ALL,
2390                 0, 16, NULL},
2391
2392         {"MFX_2_6_0_9", OP_MFX_2_6_0_9, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL},
2393
2394         {"MFX_2_6_0_8", OP_MFX_2_6_0_8, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL},
2395
2396         {"MFX_JPEG_PIC_STATE", OP_MFX_JPEG_PIC_STATE, F_LEN_VAR,
2397                 R_VCS, D_ALL, 0, 12, NULL},
2398
2399         {"MFX_JPEG_HUFF_TABLE_STATE", OP_MFX_JPEG_HUFF_TABLE_STATE, F_LEN_VAR,
2400                 R_VCS, D_ALL, 0, 12, NULL},
2401
2402         {"MFD_JPEG_BSD_OBJECT", OP_MFD_JPEG_BSD_OBJECT, F_LEN_VAR,
2403                 R_VCS, D_ALL, 0, 12, NULL},
2404
2405         {"VEBOX_STATE", OP_VEB_STATE, F_LEN_VAR, R_VECS, D_ALL, 0, 12, NULL},
2406
2407         {"VEBOX_SURFACE_STATE", OP_VEB_SURFACE_STATE, F_LEN_VAR, R_VECS, D_ALL,
2408                 0, 12, NULL},
2409
2410         {"VEB_DI_IECP", OP_VEB_DNDI_IECP_STATE, F_LEN_VAR, R_VECS, D_BDW_PLUS,
2411                 0, 20, NULL},
2412 };
2413
2414 static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
2415 {
2416         hash_add(gvt->cmd_table, &e->hlist, e->info->opcode);
2417 }
2418
2419 /* call the cmd handler, and advance ip */
2420 static int cmd_parser_exec(struct parser_exec_state *s)
2421 {
2422         struct intel_vgpu *vgpu = s->vgpu;
2423         struct cmd_info *info;
2424         u32 cmd;
2425         int ret = 0;
2426
2427         cmd = cmd_val(s, 0);
2428
2429         info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
2430         if (info == NULL) {
2431                 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
2432                                 cmd, get_opcode(cmd, s->ring_id));
2433                 return -EINVAL;
2434         }
2435
2436         s->info = info;
2437
2438         trace_gvt_command(vgpu->id, s->ring_id, s->ip_gma, s->ip_va,
2439                           cmd_length(s), s->buf_type);
2440
2441         if (info->handler) {
2442                 ret = info->handler(s);
2443                 if (ret < 0) {
2444                         gvt_vgpu_err("%s handler error\n", info->name);
2445                         return ret;
2446                 }
2447         }
2448
2449         if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
2450                 ret = cmd_advance_default(s);
2451                 if (ret) {
2452                         gvt_vgpu_err("%s IP advance error\n", info->name);
2453                         return ret;
2454                 }
2455         }
2456         return 0;
2457 }
2458
2459 static inline bool gma_out_of_range(unsigned long gma,
2460                 unsigned long gma_head, unsigned int gma_tail)
2461 {
2462         if (gma_tail >= gma_head)
2463                 return (gma < gma_head) || (gma > gma_tail);
2464         else
2465                 return (gma > gma_tail) && (gma < gma_head);
2466 }
2467
2468 static int command_scan(struct parser_exec_state *s,
2469                 unsigned long rb_head, unsigned long rb_tail,
2470                 unsigned long rb_start, unsigned long rb_len)
2471 {
2472
2473         unsigned long gma_head, gma_tail, gma_bottom;
2474         int ret = 0;
2475         struct intel_vgpu *vgpu = s->vgpu;
2476
2477         gma_head = rb_start + rb_head;
2478         gma_tail = rb_start + rb_tail;
2479         gma_bottom = rb_start +  rb_len;
2480
2481         while (s->ip_gma != gma_tail) {
2482                 if (s->buf_type == RING_BUFFER_INSTRUCTION) {
2483                         if (!(s->ip_gma >= rb_start) ||
2484                                 !(s->ip_gma < gma_bottom)) {
2485                                 gvt_vgpu_err("ip_gma %lx out of ring scope."
2486                                         "(base:0x%lx, bottom: 0x%lx)\n",
2487                                         s->ip_gma, rb_start,
2488                                         gma_bottom);
2489                                 parser_exec_state_dump(s);
2490                                 return -EINVAL;
2491                         }
2492                         if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
2493                                 gvt_vgpu_err("ip_gma %lx out of range."
2494                                         "base 0x%lx head 0x%lx tail 0x%lx\n",
2495                                         s->ip_gma, rb_start,
2496                                         rb_head, rb_tail);
2497                                 parser_exec_state_dump(s);
2498                                 break;
2499                         }
2500                 }
2501                 ret = cmd_parser_exec(s);
2502                 if (ret) {
2503                         gvt_vgpu_err("cmd parser error\n");
2504                         parser_exec_state_dump(s);
2505                         break;
2506                 }
2507         }
2508
2509         return ret;
2510 }
2511
2512 static int scan_workload(struct intel_vgpu_workload *workload)
2513 {
2514         unsigned long gma_head, gma_tail, gma_bottom;
2515         struct parser_exec_state s;
2516         int ret = 0;
2517
2518         /* ring base is page aligned */
2519         if (WARN_ON(!IS_ALIGNED(workload->rb_start, GTT_PAGE_SIZE)))
2520                 return -EINVAL;
2521
2522         gma_head = workload->rb_start + workload->rb_head;
2523         gma_tail = workload->rb_start + workload->rb_tail;
2524         gma_bottom = workload->rb_start +  _RING_CTL_BUF_SIZE(workload->rb_ctl);
2525
2526         s.buf_type = RING_BUFFER_INSTRUCTION;
2527         s.buf_addr_type = GTT_BUFFER;
2528         s.vgpu = workload->vgpu;
2529         s.ring_id = workload->ring_id;
2530         s.ring_start = workload->rb_start;
2531         s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
2532         s.ring_head = gma_head;
2533         s.ring_tail = gma_tail;
2534         s.rb_va = workload->shadow_ring_buffer_va;
2535         s.workload = workload;
2536
2537         if ((bypass_scan_mask & (1 << workload->ring_id)) ||
2538                 gma_head == gma_tail)
2539                 return 0;
2540
2541         if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
2542                 ret = -EINVAL;
2543                 goto out;
2544         }
2545
2546         ret = ip_gma_set(&s, gma_head);
2547         if (ret)
2548                 goto out;
2549
2550         ret = command_scan(&s, workload->rb_head, workload->rb_tail,
2551                 workload->rb_start, _RING_CTL_BUF_SIZE(workload->rb_ctl));
2552
2553 out:
2554         return ret;
2555 }
2556
2557 static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2558 {
2559
2560         unsigned long gma_head, gma_tail, gma_bottom, ring_size, ring_tail;
2561         struct parser_exec_state s;
2562         int ret = 0;
2563         struct intel_vgpu_workload *workload = container_of(wa_ctx,
2564                                 struct intel_vgpu_workload,
2565                                 wa_ctx);
2566
2567         /* ring base is page aligned */
2568         if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, GTT_PAGE_SIZE)))
2569                 return -EINVAL;
2570
2571         ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(uint32_t);
2572         ring_size = round_up(wa_ctx->indirect_ctx.size + CACHELINE_BYTES,
2573                         PAGE_SIZE);
2574         gma_head = wa_ctx->indirect_ctx.guest_gma;
2575         gma_tail = wa_ctx->indirect_ctx.guest_gma + ring_tail;
2576         gma_bottom = wa_ctx->indirect_ctx.guest_gma + ring_size;
2577
2578         s.buf_type = RING_BUFFER_INSTRUCTION;
2579         s.buf_addr_type = GTT_BUFFER;
2580         s.vgpu = workload->vgpu;
2581         s.ring_id = workload->ring_id;
2582         s.ring_start = wa_ctx->indirect_ctx.guest_gma;
2583         s.ring_size = ring_size;
2584         s.ring_head = gma_head;
2585         s.ring_tail = gma_tail;
2586         s.rb_va = wa_ctx->indirect_ctx.shadow_va;
2587         s.workload = workload;
2588
2589         if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
2590                 ret = -EINVAL;
2591                 goto out;
2592         }
2593
2594         ret = ip_gma_set(&s, gma_head);
2595         if (ret)
2596                 goto out;
2597
2598         ret = command_scan(&s, 0, ring_tail,
2599                 wa_ctx->indirect_ctx.guest_gma, ring_size);
2600 out:
2601         return ret;
2602 }
2603
2604 static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
2605 {
2606         struct intel_vgpu *vgpu = workload->vgpu;
2607         unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
2608         void *shadow_ring_buffer_va;
2609         int ring_id = workload->ring_id;
2610         int ret;
2611
2612         guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
2613
2614         /* calculate workload ring buffer size */
2615         workload->rb_len = (workload->rb_tail + guest_rb_size -
2616                         workload->rb_head) % guest_rb_size;
2617
2618         gma_head = workload->rb_start + workload->rb_head;
2619         gma_tail = workload->rb_start + workload->rb_tail;
2620         gma_top = workload->rb_start + guest_rb_size;
2621
2622         if (workload->rb_len > vgpu->reserve_ring_buffer_size[ring_id]) {
2623                 void *va = vgpu->reserve_ring_buffer_va[ring_id];
2624                 /* realloc the new ring buffer if needed */
2625                 vgpu->reserve_ring_buffer_va[ring_id] =
2626                         krealloc(va, workload->rb_len, GFP_KERNEL);
2627                 if (!vgpu->reserve_ring_buffer_va[ring_id]) {
2628                         gvt_vgpu_err("fail to alloc reserve ring buffer\n");
2629                         return -ENOMEM;
2630                 }
2631                 vgpu->reserve_ring_buffer_size[ring_id] = workload->rb_len;
2632         }
2633
2634         shadow_ring_buffer_va = vgpu->reserve_ring_buffer_va[ring_id];
2635
2636         /* get shadow ring buffer va */
2637         workload->shadow_ring_buffer_va = shadow_ring_buffer_va;
2638
2639         /* head > tail --> copy head <-> top */
2640         if (gma_head > gma_tail) {
2641                 ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
2642                                       gma_head, gma_top, shadow_ring_buffer_va);
2643                 if (ret < 0) {
2644                         gvt_vgpu_err("fail to copy guest ring buffer\n");
2645                         return ret;
2646                 }
2647                 shadow_ring_buffer_va += ret;
2648                 gma_head = workload->rb_start;
2649         }
2650
2651         /* copy head or start <-> tail */
2652         ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail,
2653                                 shadow_ring_buffer_va);
2654         if (ret < 0) {
2655                 gvt_vgpu_err("fail to copy guest ring buffer\n");
2656                 return ret;
2657         }
2658         return 0;
2659 }
2660
2661 int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload)
2662 {
2663         int ret;
2664         struct intel_vgpu *vgpu = workload->vgpu;
2665
2666         ret = shadow_workload_ring_buffer(workload);
2667         if (ret) {
2668                 gvt_vgpu_err("fail to shadow workload ring_buffer\n");
2669                 return ret;
2670         }
2671
2672         ret = scan_workload(workload);
2673         if (ret) {
2674                 gvt_vgpu_err("scan workload error\n");
2675                 return ret;
2676         }
2677         return 0;
2678 }
2679
2680 static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2681 {
2682         int ctx_size = wa_ctx->indirect_ctx.size;
2683         unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
2684         struct intel_vgpu_workload *workload = container_of(wa_ctx,
2685                                         struct intel_vgpu_workload,
2686                                         wa_ctx);
2687         struct intel_vgpu *vgpu = workload->vgpu;
2688         struct drm_i915_gem_object *obj;
2689         int ret = 0;
2690         void *map;
2691
2692         obj = i915_gem_object_create(workload->vgpu->gvt->dev_priv,
2693                                      roundup(ctx_size + CACHELINE_BYTES,
2694                                              PAGE_SIZE));
2695         if (IS_ERR(obj))
2696                 return PTR_ERR(obj);
2697
2698         /* get the va of the shadow batch buffer */
2699         map = i915_gem_object_pin_map(obj, I915_MAP_WB);
2700         if (IS_ERR(map)) {
2701                 gvt_vgpu_err("failed to vmap shadow indirect ctx\n");
2702                 ret = PTR_ERR(map);
2703                 goto put_obj;
2704         }
2705
2706         ret = i915_gem_object_set_to_cpu_domain(obj, false);
2707         if (ret) {
2708                 gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n");
2709                 goto unmap_src;
2710         }
2711
2712         ret = copy_gma_to_hva(workload->vgpu,
2713                                 workload->vgpu->gtt.ggtt_mm,
2714                                 guest_gma, guest_gma + ctx_size,
2715                                 map);
2716         if (ret < 0) {
2717                 gvt_vgpu_err("fail to copy guest indirect ctx\n");
2718                 goto unmap_src;
2719         }
2720
2721         wa_ctx->indirect_ctx.obj = obj;
2722         wa_ctx->indirect_ctx.shadow_va = map;
2723         return 0;
2724
2725 unmap_src:
2726         i915_gem_object_unpin_map(obj);
2727 put_obj:
2728         i915_gem_object_put(obj);
2729         return ret;
2730 }
2731
2732 static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2733 {
2734         uint32_t per_ctx_start[CACHELINE_DWORDS] = {0};
2735         unsigned char *bb_start_sva;
2736
2737         if (!wa_ctx->per_ctx.valid)
2738                 return 0;
2739
2740         per_ctx_start[0] = 0x18800001;
2741         per_ctx_start[1] = wa_ctx->per_ctx.guest_gma;
2742
2743         bb_start_sva = (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
2744                                 wa_ctx->indirect_ctx.size;
2745
2746         memcpy(bb_start_sva, per_ctx_start, CACHELINE_BYTES);
2747
2748         return 0;
2749 }
2750
2751 int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2752 {
2753         int ret;
2754         struct intel_vgpu_workload *workload = container_of(wa_ctx,
2755                                         struct intel_vgpu_workload,
2756                                         wa_ctx);
2757         struct intel_vgpu *vgpu = workload->vgpu;
2758
2759         if (wa_ctx->indirect_ctx.size == 0)
2760                 return 0;
2761
2762         ret = shadow_indirect_ctx(wa_ctx);
2763         if (ret) {
2764                 gvt_vgpu_err("fail to shadow indirect ctx\n");
2765                 return ret;
2766         }
2767
2768         combine_wa_ctx(wa_ctx);
2769
2770         ret = scan_wa_ctx(wa_ctx);
2771         if (ret) {
2772                 gvt_vgpu_err("scan wa ctx error\n");
2773                 return ret;
2774         }
2775
2776         return 0;
2777 }
2778
2779 static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
2780                 unsigned int opcode, unsigned long rings)
2781 {
2782         struct cmd_info *info = NULL;
2783         unsigned int ring;
2784
2785         for_each_set_bit(ring, &rings, I915_NUM_ENGINES) {
2786                 info = find_cmd_entry(gvt, opcode, ring);
2787                 if (info)
2788                         break;
2789         }
2790         return info;
2791 }
2792
2793 static int init_cmd_table(struct intel_gvt *gvt)
2794 {
2795         int i;
2796         struct cmd_entry *e;
2797         struct cmd_info *info;
2798         unsigned int gen_type;
2799
2800         gen_type = intel_gvt_get_device_type(gvt);
2801
2802         for (i = 0; i < ARRAY_SIZE(cmd_info); i++) {
2803                 if (!(cmd_info[i].devices & gen_type))
2804                         continue;
2805
2806                 e = kzalloc(sizeof(*e), GFP_KERNEL);
2807                 if (!e)
2808                         return -ENOMEM;
2809
2810                 e->info = &cmd_info[i];
2811                 info = find_cmd_entry_any_ring(gvt,
2812                                 e->info->opcode, e->info->rings);
2813                 if (info) {
2814                         gvt_err("%s %s duplicated\n", e->info->name,
2815                                         info->name);
2816                         return -EEXIST;
2817                 }
2818
2819                 INIT_HLIST_NODE(&e->hlist);
2820                 add_cmd_entry(gvt, e);
2821                 gvt_dbg_cmd("add %-30s op %04x flag %x devs %02x rings %02x\n",
2822                                 e->info->name, e->info->opcode, e->info->flag,
2823                                 e->info->devices, e->info->rings);
2824         }
2825         return 0;
2826 }
2827
2828 static void clean_cmd_table(struct intel_gvt *gvt)
2829 {
2830         struct hlist_node *tmp;
2831         struct cmd_entry *e;
2832         int i;
2833
2834         hash_for_each_safe(gvt->cmd_table, i, tmp, e, hlist)
2835                 kfree(e);
2836
2837         hash_init(gvt->cmd_table);
2838 }
2839
2840 void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt)
2841 {
2842         clean_cmd_table(gvt);
2843 }
2844
2845 int intel_gvt_init_cmd_parser(struct intel_gvt *gvt)
2846 {
2847         int ret;
2848
2849         ret = init_cmd_table(gvt);
2850         if (ret) {
2851                 intel_gvt_clean_cmd_parser(gvt);
2852                 return ret;
2853         }
2854         return 0;
2855 }