Merge branch 'parisc-4.16-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / i915 / gvt / cmd_parser.c
1 /*
2  * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Ke Yu
25  *    Kevin Tian <kevin.tian@intel.com>
26  *    Zhiyuan Lv <zhiyuan.lv@intel.com>
27  *
28  * Contributors:
29  *    Min He <min.he@intel.com>
30  *    Ping Gao <ping.a.gao@intel.com>
31  *    Tina Zhang <tina.zhang@intel.com>
32  *    Yulei Zhang <yulei.zhang@intel.com>
33  *    Zhi Wang <zhi.a.wang@intel.com>
34  *
35  */
36
37 #include <linux/slab.h>
38 #include "i915_drv.h"
39 #include "gvt.h"
40 #include "i915_pvinfo.h"
41 #include "trace.h"
42
43 #define INVALID_OP    (~0U)
44
45 #define OP_LEN_MI           9
46 #define OP_LEN_2D           10
47 #define OP_LEN_3D_MEDIA     16
48 #define OP_LEN_MFX_VC       16
49 #define OP_LEN_VEBOX        16
50
51 #define CMD_TYPE(cmd)   (((cmd) >> 29) & 7)
52
53 struct sub_op_bits {
54         int hi;
55         int low;
56 };
57 struct decode_info {
58         char *name;
59         int op_len;
60         int nr_sub_op;
61         struct sub_op_bits *sub_op;
62 };
63
64 #define   MAX_CMD_BUDGET                        0x7fffffff
65 #define   MI_WAIT_FOR_PLANE_C_FLIP_PENDING      (1<<15)
66 #define   MI_WAIT_FOR_PLANE_B_FLIP_PENDING      (1<<9)
67 #define   MI_WAIT_FOR_PLANE_A_FLIP_PENDING      (1<<1)
68
69 #define   MI_WAIT_FOR_SPRITE_C_FLIP_PENDING      (1<<20)
70 #define   MI_WAIT_FOR_SPRITE_B_FLIP_PENDING      (1<<10)
71 #define   MI_WAIT_FOR_SPRITE_A_FLIP_PENDING      (1<<2)
72
73 /* Render Command Map */
74
75 /* MI_* command Opcode (28:23) */
76 #define OP_MI_NOOP                          0x0
77 #define OP_MI_SET_PREDICATE                 0x1  /* HSW+ */
78 #define OP_MI_USER_INTERRUPT                0x2
79 #define OP_MI_WAIT_FOR_EVENT                0x3
80 #define OP_MI_FLUSH                         0x4
81 #define OP_MI_ARB_CHECK                     0x5
82 #define OP_MI_RS_CONTROL                    0x6  /* HSW+ */
83 #define OP_MI_REPORT_HEAD                   0x7
84 #define OP_MI_ARB_ON_OFF                    0x8
85 #define OP_MI_URB_ATOMIC_ALLOC              0x9  /* HSW+ */
86 #define OP_MI_BATCH_BUFFER_END              0xA
87 #define OP_MI_SUSPEND_FLUSH                 0xB
88 #define OP_MI_PREDICATE                     0xC  /* IVB+ */
89 #define OP_MI_TOPOLOGY_FILTER               0xD  /* IVB+ */
90 #define OP_MI_SET_APPID                     0xE  /* IVB+ */
91 #define OP_MI_RS_CONTEXT                    0xF  /* HSW+ */
92 #define OP_MI_LOAD_SCAN_LINES_INCL          0x12 /* HSW+ */
93 #define OP_MI_DISPLAY_FLIP                  0x14
94 #define OP_MI_SEMAPHORE_MBOX                0x16
95 #define OP_MI_SET_CONTEXT                   0x18
96 #define OP_MI_MATH                          0x1A
97 #define OP_MI_URB_CLEAR                     0x19
98 #define OP_MI_SEMAPHORE_SIGNAL              0x1B  /* BDW+ */
99 #define OP_MI_SEMAPHORE_WAIT                0x1C  /* BDW+ */
100
101 #define OP_MI_STORE_DATA_IMM                0x20
102 #define OP_MI_STORE_DATA_INDEX              0x21
103 #define OP_MI_LOAD_REGISTER_IMM             0x22
104 #define OP_MI_UPDATE_GTT                    0x23
105 #define OP_MI_STORE_REGISTER_MEM            0x24
106 #define OP_MI_FLUSH_DW                      0x26
107 #define OP_MI_CLFLUSH                       0x27
108 #define OP_MI_REPORT_PERF_COUNT             0x28
109 #define OP_MI_LOAD_REGISTER_MEM             0x29  /* HSW+ */
110 #define OP_MI_LOAD_REGISTER_REG             0x2A  /* HSW+ */
111 #define OP_MI_RS_STORE_DATA_IMM             0x2B  /* HSW+ */
112 #define OP_MI_LOAD_URB_MEM                  0x2C  /* HSW+ */
113 #define OP_MI_STORE_URM_MEM                 0x2D  /* HSW+ */
114 #define OP_MI_2E                            0x2E  /* BDW+ */
115 #define OP_MI_2F                            0x2F  /* BDW+ */
116 #define OP_MI_BATCH_BUFFER_START            0x31
117
118 /* Bit definition for dword 0 */
119 #define _CMDBIT_BB_START_IN_PPGTT       (1UL << 8)
120
121 #define OP_MI_CONDITIONAL_BATCH_BUFFER_END  0x36
122
123 #define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
124 #define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
125 #define BATCH_BUFFER_ADR_SPACE_BIT(x)   (((x) >> 8) & 1U)
126 #define BATCH_BUFFER_2ND_LEVEL_BIT(x)   ((x) >> 22 & 1U)
127
128 /* 2D command: Opcode (28:22) */
129 #define OP_2D(x)    ((2<<7) | x)
130
131 #define OP_XY_SETUP_BLT                             OP_2D(0x1)
132 #define OP_XY_SETUP_CLIP_BLT                        OP_2D(0x3)
133 #define OP_XY_SETUP_MONO_PATTERN_SL_BLT             OP_2D(0x11)
134 #define OP_XY_PIXEL_BLT                             OP_2D(0x24)
135 #define OP_XY_SCANLINES_BLT                         OP_2D(0x25)
136 #define OP_XY_TEXT_BLT                              OP_2D(0x26)
137 #define OP_XY_TEXT_IMMEDIATE_BLT                    OP_2D(0x31)
138 #define OP_XY_COLOR_BLT                             OP_2D(0x50)
139 #define OP_XY_PAT_BLT                               OP_2D(0x51)
140 #define OP_XY_MONO_PAT_BLT                          OP_2D(0x52)
141 #define OP_XY_SRC_COPY_BLT                          OP_2D(0x53)
142 #define OP_XY_MONO_SRC_COPY_BLT                     OP_2D(0x54)
143 #define OP_XY_FULL_BLT                              OP_2D(0x55)
144 #define OP_XY_FULL_MONO_SRC_BLT                     OP_2D(0x56)
145 #define OP_XY_FULL_MONO_PATTERN_BLT                 OP_2D(0x57)
146 #define OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT        OP_2D(0x58)
147 #define OP_XY_MONO_PAT_FIXED_BLT                    OP_2D(0x59)
148 #define OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT           OP_2D(0x71)
149 #define OP_XY_PAT_BLT_IMMEDIATE                     OP_2D(0x72)
150 #define OP_XY_SRC_COPY_CHROMA_BLT                   OP_2D(0x73)
151 #define OP_XY_FULL_IMMEDIATE_PATTERN_BLT            OP_2D(0x74)
152 #define OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT   OP_2D(0x75)
153 #define OP_XY_PAT_CHROMA_BLT                        OP_2D(0x76)
154 #define OP_XY_PAT_CHROMA_BLT_IMMEDIATE              OP_2D(0x77)
155
156 /* 3D/Media Command: Pipeline Type(28:27) Opcode(26:24) Sub Opcode(23:16) */
157 #define OP_3D_MEDIA(sub_type, opcode, sub_opcode) \
158         ((3 << 13) | ((sub_type) << 11) | ((opcode) << 8) | (sub_opcode))
159
160 #define OP_STATE_PREFETCH                       OP_3D_MEDIA(0x0, 0x0, 0x03)
161
162 #define OP_STATE_BASE_ADDRESS                   OP_3D_MEDIA(0x0, 0x1, 0x01)
163 #define OP_STATE_SIP                            OP_3D_MEDIA(0x0, 0x1, 0x02)
164 #define OP_3D_MEDIA_0_1_4                       OP_3D_MEDIA(0x0, 0x1, 0x04)
165
166 #define OP_3DSTATE_VF_STATISTICS_GM45           OP_3D_MEDIA(0x1, 0x0, 0x0B)
167
168 #define OP_PIPELINE_SELECT                      OP_3D_MEDIA(0x1, 0x1, 0x04)
169
170 #define OP_MEDIA_VFE_STATE                      OP_3D_MEDIA(0x2, 0x0, 0x0)
171 #define OP_MEDIA_CURBE_LOAD                     OP_3D_MEDIA(0x2, 0x0, 0x1)
172 #define OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD      OP_3D_MEDIA(0x2, 0x0, 0x2)
173 #define OP_MEDIA_GATEWAY_STATE                  OP_3D_MEDIA(0x2, 0x0, 0x3)
174 #define OP_MEDIA_STATE_FLUSH                    OP_3D_MEDIA(0x2, 0x0, 0x4)
175
176 #define OP_MEDIA_OBJECT                         OP_3D_MEDIA(0x2, 0x1, 0x0)
177 #define OP_MEDIA_OBJECT_PRT                     OP_3D_MEDIA(0x2, 0x1, 0x2)
178 #define OP_MEDIA_OBJECT_WALKER                  OP_3D_MEDIA(0x2, 0x1, 0x3)
179 #define OP_GPGPU_WALKER                         OP_3D_MEDIA(0x2, 0x1, 0x5)
180
181 #define OP_3DSTATE_CLEAR_PARAMS                 OP_3D_MEDIA(0x3, 0x0, 0x04) /* IVB+ */
182 #define OP_3DSTATE_DEPTH_BUFFER                 OP_3D_MEDIA(0x3, 0x0, 0x05) /* IVB+ */
183 #define OP_3DSTATE_STENCIL_BUFFER               OP_3D_MEDIA(0x3, 0x0, 0x06) /* IVB+ */
184 #define OP_3DSTATE_HIER_DEPTH_BUFFER            OP_3D_MEDIA(0x3, 0x0, 0x07) /* IVB+ */
185 #define OP_3DSTATE_VERTEX_BUFFERS               OP_3D_MEDIA(0x3, 0x0, 0x08)
186 #define OP_3DSTATE_VERTEX_ELEMENTS              OP_3D_MEDIA(0x3, 0x0, 0x09)
187 #define OP_3DSTATE_INDEX_BUFFER                 OP_3D_MEDIA(0x3, 0x0, 0x0A)
188 #define OP_3DSTATE_VF_STATISTICS                OP_3D_MEDIA(0x3, 0x0, 0x0B)
189 #define OP_3DSTATE_VF                           OP_3D_MEDIA(0x3, 0x0, 0x0C)  /* HSW+ */
190 #define OP_3DSTATE_CC_STATE_POINTERS            OP_3D_MEDIA(0x3, 0x0, 0x0E)
191 #define OP_3DSTATE_SCISSOR_STATE_POINTERS       OP_3D_MEDIA(0x3, 0x0, 0x0F)
192 #define OP_3DSTATE_VS                           OP_3D_MEDIA(0x3, 0x0, 0x10)
193 #define OP_3DSTATE_GS                           OP_3D_MEDIA(0x3, 0x0, 0x11)
194 #define OP_3DSTATE_CLIP                         OP_3D_MEDIA(0x3, 0x0, 0x12)
195 #define OP_3DSTATE_SF                           OP_3D_MEDIA(0x3, 0x0, 0x13)
196 #define OP_3DSTATE_WM                           OP_3D_MEDIA(0x3, 0x0, 0x14)
197 #define OP_3DSTATE_CONSTANT_VS                  OP_3D_MEDIA(0x3, 0x0, 0x15)
198 #define OP_3DSTATE_CONSTANT_GS                  OP_3D_MEDIA(0x3, 0x0, 0x16)
199 #define OP_3DSTATE_CONSTANT_PS                  OP_3D_MEDIA(0x3, 0x0, 0x17)
200 #define OP_3DSTATE_SAMPLE_MASK                  OP_3D_MEDIA(0x3, 0x0, 0x18)
201 #define OP_3DSTATE_CONSTANT_HS                  OP_3D_MEDIA(0x3, 0x0, 0x19) /* IVB+ */
202 #define OP_3DSTATE_CONSTANT_DS                  OP_3D_MEDIA(0x3, 0x0, 0x1A) /* IVB+ */
203 #define OP_3DSTATE_HS                           OP_3D_MEDIA(0x3, 0x0, 0x1B) /* IVB+ */
204 #define OP_3DSTATE_TE                           OP_3D_MEDIA(0x3, 0x0, 0x1C) /* IVB+ */
205 #define OP_3DSTATE_DS                           OP_3D_MEDIA(0x3, 0x0, 0x1D) /* IVB+ */
206 #define OP_3DSTATE_STREAMOUT                    OP_3D_MEDIA(0x3, 0x0, 0x1E) /* IVB+ */
207 #define OP_3DSTATE_SBE                          OP_3D_MEDIA(0x3, 0x0, 0x1F) /* IVB+ */
208 #define OP_3DSTATE_PS                           OP_3D_MEDIA(0x3, 0x0, 0x20) /* IVB+ */
209 #define OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP OP_3D_MEDIA(0x3, 0x0, 0x21) /* IVB+ */
210 #define OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC   OP_3D_MEDIA(0x3, 0x0, 0x23) /* IVB+ */
211 #define OP_3DSTATE_BLEND_STATE_POINTERS         OP_3D_MEDIA(0x3, 0x0, 0x24) /* IVB+ */
212 #define OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x25) /* IVB+ */
213 #define OP_3DSTATE_BINDING_TABLE_POINTERS_VS    OP_3D_MEDIA(0x3, 0x0, 0x26) /* IVB+ */
214 #define OP_3DSTATE_BINDING_TABLE_POINTERS_HS    OP_3D_MEDIA(0x3, 0x0, 0x27) /* IVB+ */
215 #define OP_3DSTATE_BINDING_TABLE_POINTERS_DS    OP_3D_MEDIA(0x3, 0x0, 0x28) /* IVB+ */
216 #define OP_3DSTATE_BINDING_TABLE_POINTERS_GS    OP_3D_MEDIA(0x3, 0x0, 0x29) /* IVB+ */
217 #define OP_3DSTATE_BINDING_TABLE_POINTERS_PS    OP_3D_MEDIA(0x3, 0x0, 0x2A) /* IVB+ */
218 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_VS    OP_3D_MEDIA(0x3, 0x0, 0x2B) /* IVB+ */
219 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_HS    OP_3D_MEDIA(0x3, 0x0, 0x2C) /* IVB+ */
220 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_DS    OP_3D_MEDIA(0x3, 0x0, 0x2D) /* IVB+ */
221 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_GS    OP_3D_MEDIA(0x3, 0x0, 0x2E) /* IVB+ */
222 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_PS    OP_3D_MEDIA(0x3, 0x0, 0x2F) /* IVB+ */
223 #define OP_3DSTATE_URB_VS                       OP_3D_MEDIA(0x3, 0x0, 0x30) /* IVB+ */
224 #define OP_3DSTATE_URB_HS                       OP_3D_MEDIA(0x3, 0x0, 0x31) /* IVB+ */
225 #define OP_3DSTATE_URB_DS                       OP_3D_MEDIA(0x3, 0x0, 0x32) /* IVB+ */
226 #define OP_3DSTATE_URB_GS                       OP_3D_MEDIA(0x3, 0x0, 0x33) /* IVB+ */
227 #define OP_3DSTATE_GATHER_CONSTANT_VS           OP_3D_MEDIA(0x3, 0x0, 0x34) /* HSW+ */
228 #define OP_3DSTATE_GATHER_CONSTANT_GS           OP_3D_MEDIA(0x3, 0x0, 0x35) /* HSW+ */
229 #define OP_3DSTATE_GATHER_CONSTANT_HS           OP_3D_MEDIA(0x3, 0x0, 0x36) /* HSW+ */
230 #define OP_3DSTATE_GATHER_CONSTANT_DS           OP_3D_MEDIA(0x3, 0x0, 0x37) /* HSW+ */
231 #define OP_3DSTATE_GATHER_CONSTANT_PS           OP_3D_MEDIA(0x3, 0x0, 0x38) /* HSW+ */
232 #define OP_3DSTATE_DX9_CONSTANTF_VS             OP_3D_MEDIA(0x3, 0x0, 0x39) /* HSW+ */
233 #define OP_3DSTATE_DX9_CONSTANTF_PS             OP_3D_MEDIA(0x3, 0x0, 0x3A) /* HSW+ */
234 #define OP_3DSTATE_DX9_CONSTANTI_VS             OP_3D_MEDIA(0x3, 0x0, 0x3B) /* HSW+ */
235 #define OP_3DSTATE_DX9_CONSTANTI_PS             OP_3D_MEDIA(0x3, 0x0, 0x3C) /* HSW+ */
236 #define OP_3DSTATE_DX9_CONSTANTB_VS             OP_3D_MEDIA(0x3, 0x0, 0x3D) /* HSW+ */
237 #define OP_3DSTATE_DX9_CONSTANTB_PS             OP_3D_MEDIA(0x3, 0x0, 0x3E) /* HSW+ */
238 #define OP_3DSTATE_DX9_LOCAL_VALID_VS           OP_3D_MEDIA(0x3, 0x0, 0x3F) /* HSW+ */
239 #define OP_3DSTATE_DX9_LOCAL_VALID_PS           OP_3D_MEDIA(0x3, 0x0, 0x40) /* HSW+ */
240 #define OP_3DSTATE_DX9_GENERATE_ACTIVE_VS       OP_3D_MEDIA(0x3, 0x0, 0x41) /* HSW+ */
241 #define OP_3DSTATE_DX9_GENERATE_ACTIVE_PS       OP_3D_MEDIA(0x3, 0x0, 0x42) /* HSW+ */
242 #define OP_3DSTATE_BINDING_TABLE_EDIT_VS        OP_3D_MEDIA(0x3, 0x0, 0x43) /* HSW+ */
243 #define OP_3DSTATE_BINDING_TABLE_EDIT_GS        OP_3D_MEDIA(0x3, 0x0, 0x44) /* HSW+ */
244 #define OP_3DSTATE_BINDING_TABLE_EDIT_HS        OP_3D_MEDIA(0x3, 0x0, 0x45) /* HSW+ */
245 #define OP_3DSTATE_BINDING_TABLE_EDIT_DS        OP_3D_MEDIA(0x3, 0x0, 0x46) /* HSW+ */
246 #define OP_3DSTATE_BINDING_TABLE_EDIT_PS        OP_3D_MEDIA(0x3, 0x0, 0x47) /* HSW+ */
247
248 #define OP_3DSTATE_VF_INSTANCING                OP_3D_MEDIA(0x3, 0x0, 0x49) /* BDW+ */
249 #define OP_3DSTATE_VF_SGVS                      OP_3D_MEDIA(0x3, 0x0, 0x4A) /* BDW+ */
250 #define OP_3DSTATE_VF_TOPOLOGY                  OP_3D_MEDIA(0x3, 0x0, 0x4B) /* BDW+ */
251 #define OP_3DSTATE_WM_CHROMAKEY                 OP_3D_MEDIA(0x3, 0x0, 0x4C) /* BDW+ */
252 #define OP_3DSTATE_PS_BLEND                     OP_3D_MEDIA(0x3, 0x0, 0x4D) /* BDW+ */
253 #define OP_3DSTATE_WM_DEPTH_STENCIL             OP_3D_MEDIA(0x3, 0x0, 0x4E) /* BDW+ */
254 #define OP_3DSTATE_PS_EXTRA                     OP_3D_MEDIA(0x3, 0x0, 0x4F) /* BDW+ */
255 #define OP_3DSTATE_RASTER                       OP_3D_MEDIA(0x3, 0x0, 0x50) /* BDW+ */
256 #define OP_3DSTATE_SBE_SWIZ                     OP_3D_MEDIA(0x3, 0x0, 0x51) /* BDW+ */
257 #define OP_3DSTATE_WM_HZ_OP                     OP_3D_MEDIA(0x3, 0x0, 0x52) /* BDW+ */
258 #define OP_3DSTATE_COMPONENT_PACKING            OP_3D_MEDIA(0x3, 0x0, 0x55) /* SKL+ */
259
260 #define OP_3DSTATE_DRAWING_RECTANGLE            OP_3D_MEDIA(0x3, 0x1, 0x00)
261 #define OP_3DSTATE_SAMPLER_PALETTE_LOAD0        OP_3D_MEDIA(0x3, 0x1, 0x02)
262 #define OP_3DSTATE_CHROMA_KEY                   OP_3D_MEDIA(0x3, 0x1, 0x04)
263 #define OP_SNB_3DSTATE_DEPTH_BUFFER             OP_3D_MEDIA(0x3, 0x1, 0x05)
264 #define OP_3DSTATE_POLY_STIPPLE_OFFSET          OP_3D_MEDIA(0x3, 0x1, 0x06)
265 #define OP_3DSTATE_POLY_STIPPLE_PATTERN         OP_3D_MEDIA(0x3, 0x1, 0x07)
266 #define OP_3DSTATE_LINE_STIPPLE                 OP_3D_MEDIA(0x3, 0x1, 0x08)
267 #define OP_3DSTATE_AA_LINE_PARAMS               OP_3D_MEDIA(0x3, 0x1, 0x0A)
268 #define OP_3DSTATE_GS_SVB_INDEX                 OP_3D_MEDIA(0x3, 0x1, 0x0B)
269 #define OP_3DSTATE_SAMPLER_PALETTE_LOAD1        OP_3D_MEDIA(0x3, 0x1, 0x0C)
270 #define OP_3DSTATE_MULTISAMPLE_BDW              OP_3D_MEDIA(0x3, 0x0, 0x0D)
271 #define OP_SNB_3DSTATE_STENCIL_BUFFER           OP_3D_MEDIA(0x3, 0x1, 0x0E)
272 #define OP_SNB_3DSTATE_HIER_DEPTH_BUFFER        OP_3D_MEDIA(0x3, 0x1, 0x0F)
273 #define OP_SNB_3DSTATE_CLEAR_PARAMS             OP_3D_MEDIA(0x3, 0x1, 0x10)
274 #define OP_3DSTATE_MONOFILTER_SIZE              OP_3D_MEDIA(0x3, 0x1, 0x11)
275 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS       OP_3D_MEDIA(0x3, 0x1, 0x12) /* IVB+ */
276 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS       OP_3D_MEDIA(0x3, 0x1, 0x13) /* IVB+ */
277 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS       OP_3D_MEDIA(0x3, 0x1, 0x14) /* IVB+ */
278 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS       OP_3D_MEDIA(0x3, 0x1, 0x15) /* IVB+ */
279 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS       OP_3D_MEDIA(0x3, 0x1, 0x16) /* IVB+ */
280 #define OP_3DSTATE_SO_DECL_LIST                 OP_3D_MEDIA(0x3, 0x1, 0x17)
281 #define OP_3DSTATE_SO_BUFFER                    OP_3D_MEDIA(0x3, 0x1, 0x18)
282 #define OP_3DSTATE_BINDING_TABLE_POOL_ALLOC     OP_3D_MEDIA(0x3, 0x1, 0x19) /* HSW+ */
283 #define OP_3DSTATE_GATHER_POOL_ALLOC            OP_3D_MEDIA(0x3, 0x1, 0x1A) /* HSW+ */
284 #define OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x1B) /* HSW+ */
285 #define OP_3DSTATE_SAMPLE_PATTERN               OP_3D_MEDIA(0x3, 0x1, 0x1C)
286 #define OP_PIPE_CONTROL                         OP_3D_MEDIA(0x3, 0x2, 0x00)
287 #define OP_3DPRIMITIVE                          OP_3D_MEDIA(0x3, 0x3, 0x00)
288
289 /* VCCP Command Parser */
290
291 /*
292  * Below MFX and VBE cmd definition is from vaapi intel driver project (BSD License)
293  * git://anongit.freedesktop.org/vaapi/intel-driver
294  * src/i965_defines.h
295  *
296  */
297
298 #define OP_MFX(pipeline, op, sub_opa, sub_opb)     \
299         (3 << 13 | \
300          (pipeline) << 11 | \
301          (op) << 8 | \
302          (sub_opa) << 5 | \
303          (sub_opb))
304
305 #define OP_MFX_PIPE_MODE_SELECT                    OP_MFX(2, 0, 0, 0)  /* ALL */
306 #define OP_MFX_SURFACE_STATE                       OP_MFX(2, 0, 0, 1)  /* ALL */
307 #define OP_MFX_PIPE_BUF_ADDR_STATE                 OP_MFX(2, 0, 0, 2)  /* ALL */
308 #define OP_MFX_IND_OBJ_BASE_ADDR_STATE             OP_MFX(2, 0, 0, 3)  /* ALL */
309 #define OP_MFX_BSP_BUF_BASE_ADDR_STATE             OP_MFX(2, 0, 0, 4)  /* ALL */
310 #define OP_2_0_0_5                                 OP_MFX(2, 0, 0, 5)  /* ALL */
311 #define OP_MFX_STATE_POINTER                       OP_MFX(2, 0, 0, 6)  /* ALL */
312 #define OP_MFX_QM_STATE                            OP_MFX(2, 0, 0, 7)  /* IVB+ */
313 #define OP_MFX_FQM_STATE                           OP_MFX(2, 0, 0, 8)  /* IVB+ */
314 #define OP_MFX_PAK_INSERT_OBJECT                   OP_MFX(2, 0, 2, 8)  /* IVB+ */
315 #define OP_MFX_STITCH_OBJECT                       OP_MFX(2, 0, 2, 0xA)  /* IVB+ */
316
317 #define OP_MFD_IT_OBJECT                           OP_MFX(2, 0, 1, 9) /* ALL */
318
319 #define OP_MFX_WAIT                                OP_MFX(1, 0, 0, 0) /* IVB+ */
320 #define OP_MFX_AVC_IMG_STATE                       OP_MFX(2, 1, 0, 0) /* ALL */
321 #define OP_MFX_AVC_QM_STATE                        OP_MFX(2, 1, 0, 1) /* ALL */
322 #define OP_MFX_AVC_DIRECTMODE_STATE                OP_MFX(2, 1, 0, 2) /* ALL */
323 #define OP_MFX_AVC_SLICE_STATE                     OP_MFX(2, 1, 0, 3) /* ALL */
324 #define OP_MFX_AVC_REF_IDX_STATE                   OP_MFX(2, 1, 0, 4) /* ALL */
325 #define OP_MFX_AVC_WEIGHTOFFSET_STATE              OP_MFX(2, 1, 0, 5) /* ALL */
326 #define OP_MFD_AVC_PICID_STATE                     OP_MFX(2, 1, 1, 5) /* HSW+ */
327 #define OP_MFD_AVC_DPB_STATE                       OP_MFX(2, 1, 1, 6) /* IVB+ */
328 #define OP_MFD_AVC_SLICEADDR                       OP_MFX(2, 1, 1, 7) /* IVB+ */
329 #define OP_MFD_AVC_BSD_OBJECT                      OP_MFX(2, 1, 1, 8) /* ALL */
330 #define OP_MFC_AVC_PAK_OBJECT                      OP_MFX(2, 1, 2, 9) /* ALL */
331
332 #define OP_MFX_VC1_PRED_PIPE_STATE                 OP_MFX(2, 2, 0, 1) /* ALL */
333 #define OP_MFX_VC1_DIRECTMODE_STATE                OP_MFX(2, 2, 0, 2) /* ALL */
334 #define OP_MFD_VC1_SHORT_PIC_STATE                 OP_MFX(2, 2, 1, 0) /* IVB+ */
335 #define OP_MFD_VC1_LONG_PIC_STATE                  OP_MFX(2, 2, 1, 1) /* IVB+ */
336 #define OP_MFD_VC1_BSD_OBJECT                      OP_MFX(2, 2, 1, 8) /* ALL */
337
338 #define OP_MFX_MPEG2_PIC_STATE                     OP_MFX(2, 3, 0, 0) /* ALL */
339 #define OP_MFX_MPEG2_QM_STATE                      OP_MFX(2, 3, 0, 1) /* ALL */
340 #define OP_MFD_MPEG2_BSD_OBJECT                    OP_MFX(2, 3, 1, 8) /* ALL */
341 #define OP_MFC_MPEG2_SLICEGROUP_STATE              OP_MFX(2, 3, 2, 3) /* ALL */
342 #define OP_MFC_MPEG2_PAK_OBJECT                    OP_MFX(2, 3, 2, 9) /* ALL */
343
344 #define OP_MFX_2_6_0_0                             OP_MFX(2, 6, 0, 0) /* IVB+ */
345 #define OP_MFX_2_6_0_8                             OP_MFX(2, 6, 0, 8) /* IVB+ */
346 #define OP_MFX_2_6_0_9                             OP_MFX(2, 6, 0, 9) /* IVB+ */
347
348 #define OP_MFX_JPEG_PIC_STATE                      OP_MFX(2, 7, 0, 0)
349 #define OP_MFX_JPEG_HUFF_TABLE_STATE               OP_MFX(2, 7, 0, 2)
350 #define OP_MFD_JPEG_BSD_OBJECT                     OP_MFX(2, 7, 1, 8)
351
352 #define OP_VEB(pipeline, op, sub_opa, sub_opb) \
353         (3 << 13 | \
354          (pipeline) << 11 | \
355          (op) << 8 | \
356          (sub_opa) << 5 | \
357          (sub_opb))
358
359 #define OP_VEB_SURFACE_STATE                       OP_VEB(2, 4, 0, 0)
360 #define OP_VEB_STATE                               OP_VEB(2, 4, 0, 2)
361 #define OP_VEB_DNDI_IECP_STATE                     OP_VEB(2, 4, 0, 3)
362
363 struct parser_exec_state;
364
365 typedef int (*parser_cmd_handler)(struct parser_exec_state *s);
366
367 #define GVT_CMD_HASH_BITS   7
368
369 /* which DWords need address fix */
370 #define ADDR_FIX_1(x1)                  (1 << (x1))
371 #define ADDR_FIX_2(x1, x2)              (ADDR_FIX_1(x1) | ADDR_FIX_1(x2))
372 #define ADDR_FIX_3(x1, x2, x3)          (ADDR_FIX_1(x1) | ADDR_FIX_2(x2, x3))
373 #define ADDR_FIX_4(x1, x2, x3, x4)      (ADDR_FIX_1(x1) | ADDR_FIX_3(x2, x3, x4))
374 #define ADDR_FIX_5(x1, x2, x3, x4, x5)  (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5))
375
376 struct cmd_info {
377         char *name;
378         u32 opcode;
379
380 #define F_LEN_MASK      (1U<<0)
381 #define F_LEN_CONST  1U
382 #define F_LEN_VAR    0U
383
384 /*
385  * command has its own ip advance logic
386  * e.g. MI_BATCH_START, MI_BATCH_END
387  */
388 #define F_IP_ADVANCE_CUSTOM (1<<1)
389
390 #define F_POST_HANDLE   (1<<2)
391         u32 flag;
392
393 #define R_RCS   (1 << RCS)
394 #define R_VCS1  (1 << VCS)
395 #define R_VCS2  (1 << VCS2)
396 #define R_VCS   (R_VCS1 | R_VCS2)
397 #define R_BCS   (1 << BCS)
398 #define R_VECS  (1 << VECS)
399 #define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS)
400         /* rings that support this cmd: BLT/RCS/VCS/VECS */
401         uint16_t rings;
402
403         /* devices that support this cmd: SNB/IVB/HSW/... */
404         uint16_t devices;
405
406         /* which DWords are address that need fix up.
407          * bit 0 means a 32-bit non address operand in command
408          * bit 1 means address operand, which could be 32-bit
409          * or 64-bit depending on different architectures.(
410          * defined by "gmadr_bytes_in_cmd" in intel_gvt.
411          * No matter the address length, each address only takes
412          * one bit in the bitmap.
413          */
414         uint16_t addr_bitmap;
415
416         /* flag == F_LEN_CONST : command length
417          * flag == F_LEN_VAR : length bias bits
418          * Note: length is in DWord
419          */
420         uint8_t len;
421
422         parser_cmd_handler handler;
423 };
424
425 struct cmd_entry {
426         struct hlist_node hlist;
427         struct cmd_info *info;
428 };
429
430 enum {
431         RING_BUFFER_INSTRUCTION,
432         BATCH_BUFFER_INSTRUCTION,
433         BATCH_BUFFER_2ND_LEVEL,
434 };
435
436 enum {
437         GTT_BUFFER,
438         PPGTT_BUFFER
439 };
440
441 struct parser_exec_state {
442         struct intel_vgpu *vgpu;
443         int ring_id;
444
445         int buf_type;
446
447         /* batch buffer address type */
448         int buf_addr_type;
449
450         /* graphics memory address of ring buffer start */
451         unsigned long ring_start;
452         unsigned long ring_size;
453         unsigned long ring_head;
454         unsigned long ring_tail;
455
456         /* instruction graphics memory address */
457         unsigned long ip_gma;
458
459         /* mapped va of the instr_gma */
460         void *ip_va;
461         void *rb_va;
462
463         void *ret_bb_va;
464         /* next instruction when return from  batch buffer to ring buffer */
465         unsigned long ret_ip_gma_ring;
466
467         /* next instruction when return from 2nd batch buffer to batch buffer */
468         unsigned long ret_ip_gma_bb;
469
470         /* batch buffer address type (GTT or PPGTT)
471          * used when ret from 2nd level batch buffer
472          */
473         int saved_buf_addr_type;
474
475         struct cmd_info *info;
476
477         struct intel_vgpu_workload *workload;
478 };
479
480 #define gmadr_dw_number(s)      \
481         (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
482
483 static unsigned long bypass_scan_mask = 0;
484
485 /* ring ALL, type = 0 */
486 static struct sub_op_bits sub_op_mi[] = {
487         {31, 29},
488         {28, 23},
489 };
490
491 static struct decode_info decode_info_mi = {
492         "MI",
493         OP_LEN_MI,
494         ARRAY_SIZE(sub_op_mi),
495         sub_op_mi,
496 };
497
498 /* ring RCS, command type 2 */
499 static struct sub_op_bits sub_op_2d[] = {
500         {31, 29},
501         {28, 22},
502 };
503
504 static struct decode_info decode_info_2d = {
505         "2D",
506         OP_LEN_2D,
507         ARRAY_SIZE(sub_op_2d),
508         sub_op_2d,
509 };
510
511 /* ring RCS, command type 3 */
512 static struct sub_op_bits sub_op_3d_media[] = {
513         {31, 29},
514         {28, 27},
515         {26, 24},
516         {23, 16},
517 };
518
519 static struct decode_info decode_info_3d_media = {
520         "3D_Media",
521         OP_LEN_3D_MEDIA,
522         ARRAY_SIZE(sub_op_3d_media),
523         sub_op_3d_media,
524 };
525
526 /* ring VCS, command type 3 */
527 static struct sub_op_bits sub_op_mfx_vc[] = {
528         {31, 29},
529         {28, 27},
530         {26, 24},
531         {23, 21},
532         {20, 16},
533 };
534
535 static struct decode_info decode_info_mfx_vc = {
536         "MFX_VC",
537         OP_LEN_MFX_VC,
538         ARRAY_SIZE(sub_op_mfx_vc),
539         sub_op_mfx_vc,
540 };
541
542 /* ring VECS, command type 3 */
543 static struct sub_op_bits sub_op_vebox[] = {
544         {31, 29},
545         {28, 27},
546         {26, 24},
547         {23, 21},
548         {20, 16},
549 };
550
551 static struct decode_info decode_info_vebox = {
552         "VEBOX",
553         OP_LEN_VEBOX,
554         ARRAY_SIZE(sub_op_vebox),
555         sub_op_vebox,
556 };
557
558 static struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
559         [RCS] = {
560                 &decode_info_mi,
561                 NULL,
562                 NULL,
563                 &decode_info_3d_media,
564                 NULL,
565                 NULL,
566                 NULL,
567                 NULL,
568         },
569
570         [VCS] = {
571                 &decode_info_mi,
572                 NULL,
573                 NULL,
574                 &decode_info_mfx_vc,
575                 NULL,
576                 NULL,
577                 NULL,
578                 NULL,
579         },
580
581         [BCS] = {
582                 &decode_info_mi,
583                 NULL,
584                 &decode_info_2d,
585                 NULL,
586                 NULL,
587                 NULL,
588                 NULL,
589                 NULL,
590         },
591
592         [VECS] = {
593                 &decode_info_mi,
594                 NULL,
595                 NULL,
596                 &decode_info_vebox,
597                 NULL,
598                 NULL,
599                 NULL,
600                 NULL,
601         },
602
603         [VCS2] = {
604                 &decode_info_mi,
605                 NULL,
606                 NULL,
607                 &decode_info_mfx_vc,
608                 NULL,
609                 NULL,
610                 NULL,
611                 NULL,
612         },
613 };
614
615 static inline u32 get_opcode(u32 cmd, int ring_id)
616 {
617         struct decode_info *d_info;
618
619         d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
620         if (d_info == NULL)
621                 return INVALID_OP;
622
623         return cmd >> (32 - d_info->op_len);
624 }
625
626 static inline struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
627                 unsigned int opcode, int ring_id)
628 {
629         struct cmd_entry *e;
630
631         hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) {
632                 if ((opcode == e->info->opcode) &&
633                                 (e->info->rings & (1 << ring_id)))
634                         return e->info;
635         }
636         return NULL;
637 }
638
639 static inline struct cmd_info *get_cmd_info(struct intel_gvt *gvt,
640                 u32 cmd, int ring_id)
641 {
642         u32 opcode;
643
644         opcode = get_opcode(cmd, ring_id);
645         if (opcode == INVALID_OP)
646                 return NULL;
647
648         return find_cmd_entry(gvt, opcode, ring_id);
649 }
650
651 static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
652 {
653         return (cmd >> low) & ((1U << (hi - low + 1)) - 1);
654 }
655
656 static inline void print_opcode(u32 cmd, int ring_id)
657 {
658         struct decode_info *d_info;
659         int i;
660
661         d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
662         if (d_info == NULL)
663                 return;
664
665         gvt_dbg_cmd("opcode=0x%x %s sub_ops:",
666                         cmd >> (32 - d_info->op_len), d_info->name);
667
668         for (i = 0; i < d_info->nr_sub_op; i++)
669                 pr_err("0x%x ", sub_op_val(cmd, d_info->sub_op[i].hi,
670                                         d_info->sub_op[i].low));
671
672         pr_err("\n");
673 }
674
675 static inline u32 *cmd_ptr(struct parser_exec_state *s, int index)
676 {
677         return s->ip_va + (index << 2);
678 }
679
680 static inline u32 cmd_val(struct parser_exec_state *s, int index)
681 {
682         return *cmd_ptr(s, index);
683 }
684
685 static void parser_exec_state_dump(struct parser_exec_state *s)
686 {
687         int cnt = 0;
688         int i;
689
690         gvt_dbg_cmd("  vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
691                         " ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id,
692                         s->ring_id, s->ring_start, s->ring_start + s->ring_size,
693                         s->ring_head, s->ring_tail);
694
695         gvt_dbg_cmd("  %s %s ip_gma(%08lx) ",
696                         s->buf_type == RING_BUFFER_INSTRUCTION ?
697                         "RING_BUFFER" : "BATCH_BUFFER",
698                         s->buf_addr_type == GTT_BUFFER ?
699                         "GTT" : "PPGTT", s->ip_gma);
700
701         if (s->ip_va == NULL) {
702                 gvt_dbg_cmd(" ip_va(NULL)");
703                 return;
704         }
705
706         gvt_dbg_cmd("  ip_va=%p: %08x %08x %08x %08x\n",
707                         s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
708                         cmd_val(s, 2), cmd_val(s, 3));
709
710         print_opcode(cmd_val(s, 0), s->ring_id);
711
712         s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12);
713
714         while (cnt < 1024) {
715                 gvt_dbg_cmd("ip_va=%p: ", s->ip_va);
716                 for (i = 0; i < 8; i++)
717                         gvt_dbg_cmd("%08x ", cmd_val(s, i));
718                 gvt_dbg_cmd("\n");
719
720                 s->ip_va += 8 * sizeof(u32);
721                 cnt += 8;
722         }
723 }
724
725 static inline void update_ip_va(struct parser_exec_state *s)
726 {
727         unsigned long len = 0;
728
729         if (WARN_ON(s->ring_head == s->ring_tail))
730                 return;
731
732         if (s->buf_type == RING_BUFFER_INSTRUCTION) {
733                 unsigned long ring_top = s->ring_start + s->ring_size;
734
735                 if (s->ring_head > s->ring_tail) {
736                         if (s->ip_gma >= s->ring_head && s->ip_gma < ring_top)
737                                 len = (s->ip_gma - s->ring_head);
738                         else if (s->ip_gma >= s->ring_start &&
739                                         s->ip_gma <= s->ring_tail)
740                                 len = (ring_top - s->ring_head) +
741                                         (s->ip_gma - s->ring_start);
742                 } else
743                         len = (s->ip_gma - s->ring_head);
744
745                 s->ip_va = s->rb_va + len;
746         } else {/* shadow batch buffer */
747                 s->ip_va = s->ret_bb_va;
748         }
749 }
750
751 static inline int ip_gma_set(struct parser_exec_state *s,
752                 unsigned long ip_gma)
753 {
754         WARN_ON(!IS_ALIGNED(ip_gma, 4));
755
756         s->ip_gma = ip_gma;
757         update_ip_va(s);
758         return 0;
759 }
760
761 static inline int ip_gma_advance(struct parser_exec_state *s,
762                 unsigned int dw_len)
763 {
764         s->ip_gma += (dw_len << 2);
765
766         if (s->buf_type == RING_BUFFER_INSTRUCTION) {
767                 if (s->ip_gma >= s->ring_start + s->ring_size)
768                         s->ip_gma -= s->ring_size;
769                 update_ip_va(s);
770         } else {
771                 s->ip_va += (dw_len << 2);
772         }
773
774         return 0;
775 }
776
777 static inline int get_cmd_length(struct cmd_info *info, u32 cmd)
778 {
779         if ((info->flag & F_LEN_MASK) == F_LEN_CONST)
780                 return info->len;
781         else
782                 return (cmd & ((1U << info->len) - 1)) + 2;
783         return 0;
784 }
785
786 static inline int cmd_length(struct parser_exec_state *s)
787 {
788         return get_cmd_length(s->info, cmd_val(s, 0));
789 }
790
791 /* do not remove this, some platform may need clflush here */
792 #define patch_value(s, addr, val) do { \
793         *addr = val; \
794 } while (0)
795
796 static bool is_shadowed_mmio(unsigned int offset)
797 {
798         bool ret = false;
799
800         if ((offset == 0x2168) || /*BB current head register UDW */
801             (offset == 0x2140) || /*BB current header register */
802             (offset == 0x211c) || /*second BB header register UDW */
803             (offset == 0x2114)) { /*second BB header register UDW */
804                 ret = true;
805         }
806         return ret;
807 }
808
809 static inline bool is_force_nonpriv_mmio(unsigned int offset)
810 {
811         return (offset >= 0x24d0 && offset < 0x2500);
812 }
813
814 static int force_nonpriv_reg_handler(struct parser_exec_state *s,
815                                      unsigned int offset, unsigned int index)
816 {
817         struct intel_gvt *gvt = s->vgpu->gvt;
818         unsigned int data = cmd_val(s, index + 1);
819
820         if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data)) {
821                 gvt_err("Unexpected forcenonpriv 0x%x LRI write, value=0x%x\n",
822                         offset, data);
823                 return -EPERM;
824         }
825         return 0;
826 }
827
828 static inline bool is_mocs_mmio(unsigned int offset)
829 {
830         return ((offset >= 0xc800) && (offset <= 0xcff8)) ||
831                 ((offset >= 0xb020) && (offset <= 0xb0a0));
832 }
833
834 static int mocs_cmd_reg_handler(struct parser_exec_state *s,
835                                 unsigned int offset, unsigned int index)
836 {
837         if (!is_mocs_mmio(offset))
838                 return -EINVAL;
839         vgpu_vreg(s->vgpu, offset) = cmd_val(s, index + 1);
840         return 0;
841 }
842
843 static int cmd_reg_handler(struct parser_exec_state *s,
844         unsigned int offset, unsigned int index, char *cmd)
845 {
846         struct intel_vgpu *vgpu = s->vgpu;
847         struct intel_gvt *gvt = vgpu->gvt;
848
849         if (offset + 4 > gvt->device_info.mmio_size) {
850                 gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
851                                 cmd, offset);
852                 return -EFAULT;
853         }
854
855         if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
856                 gvt_vgpu_err("%s access to non-render register (%x)\n",
857                                 cmd, offset);
858                 return 0;
859         }
860
861         if (is_shadowed_mmio(offset)) {
862                 gvt_vgpu_err("found access of shadowed MMIO %x\n", offset);
863                 return 0;
864         }
865
866         if (is_mocs_mmio(offset) &&
867             mocs_cmd_reg_handler(s, offset, index))
868                 return -EINVAL;
869
870         if (is_force_nonpriv_mmio(offset) &&
871                 force_nonpriv_reg_handler(s, offset, index))
872                 return -EPERM;
873
874         if (offset == i915_mmio_reg_offset(DERRMR) ||
875                 offset == i915_mmio_reg_offset(FORCEWAKE_MT)) {
876                 /* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
877                 patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
878         }
879
880         /* TODO: Update the global mask if this MMIO is a masked-MMIO */
881         intel_gvt_mmio_set_cmd_accessed(gvt, offset);
882         return 0;
883 }
884
885 #define cmd_reg(s, i) \
886         (cmd_val(s, i) & GENMASK(22, 2))
887
888 #define cmd_reg_inhibit(s, i) \
889         (cmd_val(s, i) & GENMASK(22, 18))
890
891 #define cmd_gma(s, i) \
892         (cmd_val(s, i) & GENMASK(31, 2))
893
894 #define cmd_gma_hi(s, i) \
895         (cmd_val(s, i) & GENMASK(15, 0))
896
897 static int cmd_handler_lri(struct parser_exec_state *s)
898 {
899         int i, ret = 0;
900         int cmd_len = cmd_length(s);
901         struct intel_gvt *gvt = s->vgpu->gvt;
902
903         for (i = 1; i < cmd_len; i += 2) {
904                 if (IS_BROADWELL(gvt->dev_priv) &&
905                                 (s->ring_id != RCS)) {
906                         if (s->ring_id == BCS &&
907                                         cmd_reg(s, i) ==
908                                         i915_mmio_reg_offset(DERRMR))
909                                 ret |= 0;
910                         else
911                                 ret |= (cmd_reg_inhibit(s, i)) ?
912                                         -EBADRQC : 0;
913                 }
914                 if (ret)
915                         break;
916                 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lri");
917                 if (ret)
918                         break;
919         }
920         return ret;
921 }
922
923 static int cmd_handler_lrr(struct parser_exec_state *s)
924 {
925         int i, ret = 0;
926         int cmd_len = cmd_length(s);
927
928         for (i = 1; i < cmd_len; i += 2) {
929                 if (IS_BROADWELL(s->vgpu->gvt->dev_priv))
930                         ret |= ((cmd_reg_inhibit(s, i) ||
931                                         (cmd_reg_inhibit(s, i + 1)))) ?
932                                 -EBADRQC : 0;
933                 if (ret)
934                         break;
935                 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrr-src");
936                 if (ret)
937                         break;
938                 ret |= cmd_reg_handler(s, cmd_reg(s, i + 1), i, "lrr-dst");
939                 if (ret)
940                         break;
941         }
942         return ret;
943 }
944
945 static inline int cmd_address_audit(struct parser_exec_state *s,
946                 unsigned long guest_gma, int op_size, bool index_mode);
947
948 static int cmd_handler_lrm(struct parser_exec_state *s)
949 {
950         struct intel_gvt *gvt = s->vgpu->gvt;
951         int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
952         unsigned long gma;
953         int i, ret = 0;
954         int cmd_len = cmd_length(s);
955
956         for (i = 1; i < cmd_len;) {
957                 if (IS_BROADWELL(gvt->dev_priv))
958                         ret |= (cmd_reg_inhibit(s, i)) ? -EBADRQC : 0;
959                 if (ret)
960                         break;
961                 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "lrm");
962                 if (ret)
963                         break;
964                 if (cmd_val(s, 0) & (1 << 22)) {
965                         gma = cmd_gma(s, i + 1);
966                         if (gmadr_bytes == 8)
967                                 gma |= (cmd_gma_hi(s, i + 2)) << 32;
968                         ret |= cmd_address_audit(s, gma, sizeof(u32), false);
969                         if (ret)
970                                 break;
971                 }
972                 i += gmadr_dw_number(s) + 1;
973         }
974         return ret;
975 }
976
977 static int cmd_handler_srm(struct parser_exec_state *s)
978 {
979         int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
980         unsigned long gma;
981         int i, ret = 0;
982         int cmd_len = cmd_length(s);
983
984         for (i = 1; i < cmd_len;) {
985                 ret |= cmd_reg_handler(s, cmd_reg(s, i), i, "srm");
986                 if (ret)
987                         break;
988                 if (cmd_val(s, 0) & (1 << 22)) {
989                         gma = cmd_gma(s, i + 1);
990                         if (gmadr_bytes == 8)
991                                 gma |= (cmd_gma_hi(s, i + 2)) << 32;
992                         ret |= cmd_address_audit(s, gma, sizeof(u32), false);
993                         if (ret)
994                                 break;
995                 }
996                 i += gmadr_dw_number(s) + 1;
997         }
998         return ret;
999 }
1000
1001 struct cmd_interrupt_event {
1002         int pipe_control_notify;
1003         int mi_flush_dw;
1004         int mi_user_interrupt;
1005 };
1006
1007 static struct cmd_interrupt_event cmd_interrupt_events[] = {
1008         [RCS] = {
1009                 .pipe_control_notify = RCS_PIPE_CONTROL,
1010                 .mi_flush_dw = INTEL_GVT_EVENT_RESERVED,
1011                 .mi_user_interrupt = RCS_MI_USER_INTERRUPT,
1012         },
1013         [BCS] = {
1014                 .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
1015                 .mi_flush_dw = BCS_MI_FLUSH_DW,
1016                 .mi_user_interrupt = BCS_MI_USER_INTERRUPT,
1017         },
1018         [VCS] = {
1019                 .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
1020                 .mi_flush_dw = VCS_MI_FLUSH_DW,
1021                 .mi_user_interrupt = VCS_MI_USER_INTERRUPT,
1022         },
1023         [VCS2] = {
1024                 .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
1025                 .mi_flush_dw = VCS2_MI_FLUSH_DW,
1026                 .mi_user_interrupt = VCS2_MI_USER_INTERRUPT,
1027         },
1028         [VECS] = {
1029                 .pipe_control_notify = INTEL_GVT_EVENT_RESERVED,
1030                 .mi_flush_dw = VECS_MI_FLUSH_DW,
1031                 .mi_user_interrupt = VECS_MI_USER_INTERRUPT,
1032         },
1033 };
1034
1035 static int cmd_handler_pipe_control(struct parser_exec_state *s)
1036 {
1037         int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1038         unsigned long gma;
1039         bool index_mode = false;
1040         unsigned int post_sync;
1041         int ret = 0;
1042
1043         post_sync = (cmd_val(s, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK) >> 14;
1044
1045         /* LRI post sync */
1046         if (cmd_val(s, 1) & PIPE_CONTROL_MMIO_WRITE)
1047                 ret = cmd_reg_handler(s, cmd_reg(s, 2), 1, "pipe_ctrl");
1048         /* post sync */
1049         else if (post_sync) {
1050                 if (post_sync == 2)
1051                         ret = cmd_reg_handler(s, 0x2350, 1, "pipe_ctrl");
1052                 else if (post_sync == 3)
1053                         ret = cmd_reg_handler(s, 0x2358, 1, "pipe_ctrl");
1054                 else if (post_sync == 1) {
1055                         /* check ggtt*/
1056                         if ((cmd_val(s, 1) & PIPE_CONTROL_GLOBAL_GTT_IVB)) {
1057                                 gma = cmd_val(s, 2) & GENMASK(31, 3);
1058                                 if (gmadr_bytes == 8)
1059                                         gma |= (cmd_gma_hi(s, 3)) << 32;
1060                                 /* Store Data Index */
1061                                 if (cmd_val(s, 1) & (1 << 21))
1062                                         index_mode = true;
1063                                 ret |= cmd_address_audit(s, gma, sizeof(u64),
1064                                                 index_mode);
1065                         }
1066                 }
1067         }
1068
1069         if (ret)
1070                 return ret;
1071
1072         if (cmd_val(s, 1) & PIPE_CONTROL_NOTIFY)
1073                 set_bit(cmd_interrupt_events[s->ring_id].pipe_control_notify,
1074                                 s->workload->pending_events);
1075         return 0;
1076 }
1077
1078 static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s)
1079 {
1080         set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt,
1081                         s->workload->pending_events);
1082         return 0;
1083 }
1084
1085 static int cmd_advance_default(struct parser_exec_state *s)
1086 {
1087         return ip_gma_advance(s, cmd_length(s));
1088 }
1089
1090 static int cmd_handler_mi_batch_buffer_end(struct parser_exec_state *s)
1091 {
1092         int ret;
1093
1094         if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
1095                 s->buf_type = BATCH_BUFFER_INSTRUCTION;
1096                 ret = ip_gma_set(s, s->ret_ip_gma_bb);
1097                 s->buf_addr_type = s->saved_buf_addr_type;
1098         } else {
1099                 s->buf_type = RING_BUFFER_INSTRUCTION;
1100                 s->buf_addr_type = GTT_BUFFER;
1101                 if (s->ret_ip_gma_ring >= s->ring_start + s->ring_size)
1102                         s->ret_ip_gma_ring -= s->ring_size;
1103                 ret = ip_gma_set(s, s->ret_ip_gma_ring);
1104         }
1105         return ret;
1106 }
1107
1108 struct mi_display_flip_command_info {
1109         int pipe;
1110         int plane;
1111         int event;
1112         i915_reg_t stride_reg;
1113         i915_reg_t ctrl_reg;
1114         i915_reg_t surf_reg;
1115         u64 stride_val;
1116         u64 tile_val;
1117         u64 surf_val;
1118         bool async_flip;
1119 };
1120
1121 struct plane_code_mapping {
1122         int pipe;
1123         int plane;
1124         int event;
1125 };
1126
1127 static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
1128                 struct mi_display_flip_command_info *info)
1129 {
1130         struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1131         struct plane_code_mapping gen8_plane_code[] = {
1132                 [0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE},
1133                 [1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE},
1134                 [2] = {PIPE_A, PLANE_B, SPRITE_A_FLIP_DONE},
1135                 [3] = {PIPE_B, PLANE_B, SPRITE_B_FLIP_DONE},
1136                 [4] = {PIPE_C, PLANE_A, PRIMARY_C_FLIP_DONE},
1137                 [5] = {PIPE_C, PLANE_B, SPRITE_C_FLIP_DONE},
1138         };
1139         u32 dword0, dword1, dword2;
1140         u32 v;
1141
1142         dword0 = cmd_val(s, 0);
1143         dword1 = cmd_val(s, 1);
1144         dword2 = cmd_val(s, 2);
1145
1146         v = (dword0 & GENMASK(21, 19)) >> 19;
1147         if (WARN_ON(v >= ARRAY_SIZE(gen8_plane_code)))
1148                 return -EBADRQC;
1149
1150         info->pipe = gen8_plane_code[v].pipe;
1151         info->plane = gen8_plane_code[v].plane;
1152         info->event = gen8_plane_code[v].event;
1153         info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
1154         info->tile_val = (dword1 & 0x1);
1155         info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
1156         info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
1157
1158         if (info->plane == PLANE_A) {
1159                 info->ctrl_reg = DSPCNTR(info->pipe);
1160                 info->stride_reg = DSPSTRIDE(info->pipe);
1161                 info->surf_reg = DSPSURF(info->pipe);
1162         } else if (info->plane == PLANE_B) {
1163                 info->ctrl_reg = SPRCTL(info->pipe);
1164                 info->stride_reg = SPRSTRIDE(info->pipe);
1165                 info->surf_reg = SPRSURF(info->pipe);
1166         } else {
1167                 WARN_ON(1);
1168                 return -EBADRQC;
1169         }
1170         return 0;
1171 }
1172
1173 static int skl_decode_mi_display_flip(struct parser_exec_state *s,
1174                 struct mi_display_flip_command_info *info)
1175 {
1176         struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1177         struct intel_vgpu *vgpu = s->vgpu;
1178         u32 dword0 = cmd_val(s, 0);
1179         u32 dword1 = cmd_val(s, 1);
1180         u32 dword2 = cmd_val(s, 2);
1181         u32 plane = (dword0 & GENMASK(12, 8)) >> 8;
1182
1183         info->plane = PRIMARY_PLANE;
1184
1185         switch (plane) {
1186         case MI_DISPLAY_FLIP_SKL_PLANE_1_A:
1187                 info->pipe = PIPE_A;
1188                 info->event = PRIMARY_A_FLIP_DONE;
1189                 break;
1190         case MI_DISPLAY_FLIP_SKL_PLANE_1_B:
1191                 info->pipe = PIPE_B;
1192                 info->event = PRIMARY_B_FLIP_DONE;
1193                 break;
1194         case MI_DISPLAY_FLIP_SKL_PLANE_1_C:
1195                 info->pipe = PIPE_C;
1196                 info->event = PRIMARY_C_FLIP_DONE;
1197                 break;
1198
1199         case MI_DISPLAY_FLIP_SKL_PLANE_2_A:
1200                 info->pipe = PIPE_A;
1201                 info->event = SPRITE_A_FLIP_DONE;
1202                 info->plane = SPRITE_PLANE;
1203                 break;
1204         case MI_DISPLAY_FLIP_SKL_PLANE_2_B:
1205                 info->pipe = PIPE_B;
1206                 info->event = SPRITE_B_FLIP_DONE;
1207                 info->plane = SPRITE_PLANE;
1208                 break;
1209         case MI_DISPLAY_FLIP_SKL_PLANE_2_C:
1210                 info->pipe = PIPE_C;
1211                 info->event = SPRITE_C_FLIP_DONE;
1212                 info->plane = SPRITE_PLANE;
1213                 break;
1214
1215         default:
1216                 gvt_vgpu_err("unknown plane code %d\n", plane);
1217                 return -EBADRQC;
1218         }
1219
1220         info->stride_val = (dword1 & GENMASK(15, 6)) >> 6;
1221         info->tile_val = (dword1 & GENMASK(2, 0));
1222         info->surf_val = (dword2 & GENMASK(31, 12)) >> 12;
1223         info->async_flip = ((dword2 & GENMASK(1, 0)) == 0x1);
1224
1225         info->ctrl_reg = DSPCNTR(info->pipe);
1226         info->stride_reg = DSPSTRIDE(info->pipe);
1227         info->surf_reg = DSPSURF(info->pipe);
1228
1229         return 0;
1230 }
1231
1232 static int gen8_check_mi_display_flip(struct parser_exec_state *s,
1233                 struct mi_display_flip_command_info *info)
1234 {
1235         struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1236         u32 stride, tile;
1237
1238         if (!info->async_flip)
1239                 return 0;
1240
1241         if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
1242                 stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
1243                 tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
1244                                 GENMASK(12, 10)) >> 10;
1245         } else {
1246                 stride = (vgpu_vreg_t(s->vgpu, info->stride_reg) &
1247                                 GENMASK(15, 6)) >> 6;
1248                 tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10;
1249         }
1250
1251         if (stride != info->stride_val)
1252                 gvt_dbg_cmd("cannot change stride during async flip\n");
1253
1254         if (tile != info->tile_val)
1255                 gvt_dbg_cmd("cannot change tile during async flip\n");
1256
1257         return 0;
1258 }
1259
1260 static int gen8_update_plane_mmio_from_mi_display_flip(
1261                 struct parser_exec_state *s,
1262                 struct mi_display_flip_command_info *info)
1263 {
1264         struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1265         struct intel_vgpu *vgpu = s->vgpu;
1266
1267         set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
1268                       info->surf_val << 12);
1269         if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
1270                 set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0),
1271                               info->stride_val);
1272                 set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10),
1273                               info->tile_val << 10);
1274         } else {
1275                 set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(15, 6),
1276                               info->stride_val << 6);
1277                 set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(10, 10),
1278                               info->tile_val << 10);
1279         }
1280
1281         vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++;
1282         intel_vgpu_trigger_virtual_event(vgpu, info->event);
1283         return 0;
1284 }
1285
1286 static int decode_mi_display_flip(struct parser_exec_state *s,
1287                 struct mi_display_flip_command_info *info)
1288 {
1289         struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1290
1291         if (IS_BROADWELL(dev_priv))
1292                 return gen8_decode_mi_display_flip(s, info);
1293         if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
1294                 return skl_decode_mi_display_flip(s, info);
1295
1296         return -ENODEV;
1297 }
1298
1299 static int check_mi_display_flip(struct parser_exec_state *s,
1300                 struct mi_display_flip_command_info *info)
1301 {
1302         struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1303
1304         if (IS_BROADWELL(dev_priv)
1305                 || IS_SKYLAKE(dev_priv)
1306                 || IS_KABYLAKE(dev_priv))
1307                 return gen8_check_mi_display_flip(s, info);
1308         return -ENODEV;
1309 }
1310
1311 static int update_plane_mmio_from_mi_display_flip(
1312                 struct parser_exec_state *s,
1313                 struct mi_display_flip_command_info *info)
1314 {
1315         struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
1316
1317         if (IS_BROADWELL(dev_priv)
1318                 || IS_SKYLAKE(dev_priv)
1319                 || IS_KABYLAKE(dev_priv))
1320                 return gen8_update_plane_mmio_from_mi_display_flip(s, info);
1321         return -ENODEV;
1322 }
1323
1324 static int cmd_handler_mi_display_flip(struct parser_exec_state *s)
1325 {
1326         struct mi_display_flip_command_info info;
1327         struct intel_vgpu *vgpu = s->vgpu;
1328         int ret;
1329         int i;
1330         int len = cmd_length(s);
1331
1332         ret = decode_mi_display_flip(s, &info);
1333         if (ret) {
1334                 gvt_vgpu_err("fail to decode MI display flip command\n");
1335                 return ret;
1336         }
1337
1338         ret = check_mi_display_flip(s, &info);
1339         if (ret) {
1340                 gvt_vgpu_err("invalid MI display flip command\n");
1341                 return ret;
1342         }
1343
1344         ret = update_plane_mmio_from_mi_display_flip(s, &info);
1345         if (ret) {
1346                 gvt_vgpu_err("fail to update plane mmio\n");
1347                 return ret;
1348         }
1349
1350         for (i = 0; i < len; i++)
1351                 patch_value(s, cmd_ptr(s, i), MI_NOOP);
1352         return 0;
1353 }
1354
1355 static bool is_wait_for_flip_pending(u32 cmd)
1356 {
1357         return cmd & (MI_WAIT_FOR_PLANE_A_FLIP_PENDING |
1358                         MI_WAIT_FOR_PLANE_B_FLIP_PENDING |
1359                         MI_WAIT_FOR_PLANE_C_FLIP_PENDING |
1360                         MI_WAIT_FOR_SPRITE_A_FLIP_PENDING |
1361                         MI_WAIT_FOR_SPRITE_B_FLIP_PENDING |
1362                         MI_WAIT_FOR_SPRITE_C_FLIP_PENDING);
1363 }
1364
1365 static int cmd_handler_mi_wait_for_event(struct parser_exec_state *s)
1366 {
1367         u32 cmd = cmd_val(s, 0);
1368
1369         if (!is_wait_for_flip_pending(cmd))
1370                 return 0;
1371
1372         patch_value(s, cmd_ptr(s, 0), MI_NOOP);
1373         return 0;
1374 }
1375
1376 static unsigned long get_gma_bb_from_cmd(struct parser_exec_state *s, int index)
1377 {
1378         unsigned long addr;
1379         unsigned long gma_high, gma_low;
1380         struct intel_vgpu *vgpu = s->vgpu;
1381         int gmadr_bytes = vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1382
1383         if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8)) {
1384                 gvt_vgpu_err("invalid gma bytes %d\n", gmadr_bytes);
1385                 return INTEL_GVT_INVALID_ADDR;
1386         }
1387
1388         gma_low = cmd_val(s, index) & BATCH_BUFFER_ADDR_MASK;
1389         if (gmadr_bytes == 4) {
1390                 addr = gma_low;
1391         } else {
1392                 gma_high = cmd_val(s, index + 1) & BATCH_BUFFER_ADDR_HIGH_MASK;
1393                 addr = (((unsigned long)gma_high) << 32) | gma_low;
1394         }
1395         return addr;
1396 }
1397
1398 static inline int cmd_address_audit(struct parser_exec_state *s,
1399                 unsigned long guest_gma, int op_size, bool index_mode)
1400 {
1401         struct intel_vgpu *vgpu = s->vgpu;
1402         u32 max_surface_size = vgpu->gvt->device_info.max_surface_size;
1403         int i;
1404         int ret;
1405
1406         if (op_size > max_surface_size) {
1407                 gvt_vgpu_err("command address audit fail name %s\n",
1408                         s->info->name);
1409                 return -EFAULT;
1410         }
1411
1412         if (index_mode) {
1413                 if (guest_gma >= I915_GTT_PAGE_SIZE / sizeof(u64)) {
1414                         ret = -EFAULT;
1415                         goto err;
1416                 }
1417         } else if (!intel_gvt_ggtt_validate_range(vgpu, guest_gma, op_size)) {
1418                 ret = -EFAULT;
1419                 goto err;
1420         }
1421
1422         return 0;
1423
1424 err:
1425         gvt_vgpu_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
1426                         s->info->name, guest_gma, op_size);
1427
1428         pr_err("cmd dump: ");
1429         for (i = 0; i < cmd_length(s); i++) {
1430                 if (!(i % 4))
1431                         pr_err("\n%08x ", cmd_val(s, i));
1432                 else
1433                         pr_err("%08x ", cmd_val(s, i));
1434         }
1435         pr_err("\nvgpu%d: aperture 0x%llx - 0x%llx, hidden 0x%llx - 0x%llx\n",
1436                         vgpu->id,
1437                         vgpu_aperture_gmadr_base(vgpu),
1438                         vgpu_aperture_gmadr_end(vgpu),
1439                         vgpu_hidden_gmadr_base(vgpu),
1440                         vgpu_hidden_gmadr_end(vgpu));
1441         return ret;
1442 }
1443
1444 static int cmd_handler_mi_store_data_imm(struct parser_exec_state *s)
1445 {
1446         int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1447         int op_size = (cmd_length(s) - 3) * sizeof(u32);
1448         int core_id = (cmd_val(s, 2) & (1 << 0)) ? 1 : 0;
1449         unsigned long gma, gma_low, gma_high;
1450         int ret = 0;
1451
1452         /* check ppggt */
1453         if (!(cmd_val(s, 0) & (1 << 22)))
1454                 return 0;
1455
1456         gma = cmd_val(s, 2) & GENMASK(31, 2);
1457
1458         if (gmadr_bytes == 8) {
1459                 gma_low = cmd_val(s, 1) & GENMASK(31, 2);
1460                 gma_high = cmd_val(s, 2) & GENMASK(15, 0);
1461                 gma = (gma_high << 32) | gma_low;
1462                 core_id = (cmd_val(s, 1) & (1 << 0)) ? 1 : 0;
1463         }
1464         ret = cmd_address_audit(s, gma + op_size * core_id, op_size, false);
1465         return ret;
1466 }
1467
1468 static inline int unexpected_cmd(struct parser_exec_state *s)
1469 {
1470         struct intel_vgpu *vgpu = s->vgpu;
1471
1472         gvt_vgpu_err("Unexpected %s in command buffer!\n", s->info->name);
1473
1474         return -EBADRQC;
1475 }
1476
1477 static int cmd_handler_mi_semaphore_wait(struct parser_exec_state *s)
1478 {
1479         return unexpected_cmd(s);
1480 }
1481
1482 static int cmd_handler_mi_report_perf_count(struct parser_exec_state *s)
1483 {
1484         return unexpected_cmd(s);
1485 }
1486
1487 static int cmd_handler_mi_op_2e(struct parser_exec_state *s)
1488 {
1489         return unexpected_cmd(s);
1490 }
1491
1492 static int cmd_handler_mi_op_2f(struct parser_exec_state *s)
1493 {
1494         int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1495         int op_size = (1 << ((cmd_val(s, 0) & GENMASK(20, 19)) >> 19)) *
1496                         sizeof(u32);
1497         unsigned long gma, gma_high;
1498         int ret = 0;
1499
1500         if (!(cmd_val(s, 0) & (1 << 22)))
1501                 return ret;
1502
1503         gma = cmd_val(s, 1) & GENMASK(31, 2);
1504         if (gmadr_bytes == 8) {
1505                 gma_high = cmd_val(s, 2) & GENMASK(15, 0);
1506                 gma = (gma_high << 32) | gma;
1507         }
1508         ret = cmd_address_audit(s, gma, op_size, false);
1509         return ret;
1510 }
1511
1512 static int cmd_handler_mi_store_data_index(struct parser_exec_state *s)
1513 {
1514         return unexpected_cmd(s);
1515 }
1516
1517 static int cmd_handler_mi_clflush(struct parser_exec_state *s)
1518 {
1519         return unexpected_cmd(s);
1520 }
1521
1522 static int cmd_handler_mi_conditional_batch_buffer_end(
1523                 struct parser_exec_state *s)
1524 {
1525         return unexpected_cmd(s);
1526 }
1527
1528 static int cmd_handler_mi_update_gtt(struct parser_exec_state *s)
1529 {
1530         return unexpected_cmd(s);
1531 }
1532
1533 static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
1534 {
1535         int gmadr_bytes = s->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
1536         unsigned long gma;
1537         bool index_mode = false;
1538         int ret = 0;
1539
1540         /* Check post-sync and ppgtt bit */
1541         if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) {
1542                 gma = cmd_val(s, 1) & GENMASK(31, 3);
1543                 if (gmadr_bytes == 8)
1544                         gma |= (cmd_val(s, 2) & GENMASK(15, 0)) << 32;
1545                 /* Store Data Index */
1546                 if (cmd_val(s, 0) & (1 << 21))
1547                         index_mode = true;
1548                 ret = cmd_address_audit(s, gma, sizeof(u64), index_mode);
1549         }
1550         /* Check notify bit */
1551         if ((cmd_val(s, 0) & (1 << 8)))
1552                 set_bit(cmd_interrupt_events[s->ring_id].mi_flush_dw,
1553                                 s->workload->pending_events);
1554         return ret;
1555 }
1556
1557 static void addr_type_update_snb(struct parser_exec_state *s)
1558 {
1559         if ((s->buf_type == RING_BUFFER_INSTRUCTION) &&
1560                         (BATCH_BUFFER_ADR_SPACE_BIT(cmd_val(s, 0)) == 1)) {
1561                 s->buf_addr_type = PPGTT_BUFFER;
1562         }
1563 }
1564
1565
1566 static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
1567                 unsigned long gma, unsigned long end_gma, void *va)
1568 {
1569         unsigned long copy_len, offset;
1570         unsigned long len = 0;
1571         unsigned long gpa;
1572
1573         while (gma != end_gma) {
1574                 gpa = intel_vgpu_gma_to_gpa(mm, gma);
1575                 if (gpa == INTEL_GVT_INVALID_ADDR) {
1576                         gvt_vgpu_err("invalid gma address: %lx\n", gma);
1577                         return -EFAULT;
1578                 }
1579
1580                 offset = gma & (I915_GTT_PAGE_SIZE - 1);
1581
1582                 copy_len = (end_gma - gma) >= (I915_GTT_PAGE_SIZE - offset) ?
1583                         I915_GTT_PAGE_SIZE - offset : end_gma - gma;
1584
1585                 intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len);
1586
1587                 len += copy_len;
1588                 gma += copy_len;
1589         }
1590         return len;
1591 }
1592
1593
1594 /*
1595  * Check whether a batch buffer needs to be scanned. Currently
1596  * the only criteria is based on privilege.
1597  */
1598 static int batch_buffer_needs_scan(struct parser_exec_state *s)
1599 {
1600         struct intel_gvt *gvt = s->vgpu->gvt;
1601
1602         if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)
1603                 || IS_KABYLAKE(gvt->dev_priv)) {
1604                 /* BDW decides privilege based on address space */
1605                 if (cmd_val(s, 0) & (1 << 8))
1606                         return 0;
1607         }
1608         return 1;
1609 }
1610
1611 static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
1612 {
1613         unsigned long gma = 0;
1614         struct cmd_info *info;
1615         uint32_t cmd_len = 0;
1616         bool bb_end = false;
1617         struct intel_vgpu *vgpu = s->vgpu;
1618         u32 cmd;
1619
1620         *bb_size = 0;
1621
1622         /* get the start gm address of the batch buffer */
1623         gma = get_gma_bb_from_cmd(s, 1);
1624         if (gma == INTEL_GVT_INVALID_ADDR)
1625                 return -EFAULT;
1626
1627         cmd = cmd_val(s, 0);
1628         info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
1629         if (info == NULL) {
1630                 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
1631                                 cmd, get_opcode(cmd, s->ring_id));
1632                 return -EBADRQC;
1633         }
1634         do {
1635                 if (copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
1636                                 gma, gma + 4, &cmd) < 0)
1637                         return -EFAULT;
1638                 info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
1639                 if (info == NULL) {
1640                         gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
1641                                 cmd, get_opcode(cmd, s->ring_id));
1642                         return -EBADRQC;
1643                 }
1644
1645                 if (info->opcode == OP_MI_BATCH_BUFFER_END) {
1646                         bb_end = true;
1647                 } else if (info->opcode == OP_MI_BATCH_BUFFER_START) {
1648                         if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd) == 0)
1649                                 /* chained batch buffer */
1650                                 bb_end = true;
1651                 }
1652                 cmd_len = get_cmd_length(info, cmd) << 2;
1653                 *bb_size += cmd_len;
1654                 gma += cmd_len;
1655         } while (!bb_end);
1656
1657         return 0;
1658 }
1659
1660 static int perform_bb_shadow(struct parser_exec_state *s)
1661 {
1662         struct intel_vgpu *vgpu = s->vgpu;
1663         struct intel_vgpu_shadow_bb *bb;
1664         unsigned long gma = 0;
1665         unsigned long bb_size;
1666         int ret = 0;
1667
1668         /* get the start gm address of the batch buffer */
1669         gma = get_gma_bb_from_cmd(s, 1);
1670         if (gma == INTEL_GVT_INVALID_ADDR)
1671                 return -EFAULT;
1672
1673         ret = find_bb_size(s, &bb_size);
1674         if (ret)
1675                 return ret;
1676
1677         bb = kzalloc(sizeof(*bb), GFP_KERNEL);
1678         if (!bb)
1679                 return -ENOMEM;
1680
1681         bb->obj = i915_gem_object_create(s->vgpu->gvt->dev_priv,
1682                                          roundup(bb_size, PAGE_SIZE));
1683         if (IS_ERR(bb->obj)) {
1684                 ret = PTR_ERR(bb->obj);
1685                 goto err_free_bb;
1686         }
1687
1688         ret = i915_gem_obj_prepare_shmem_write(bb->obj, &bb->clflush);
1689         if (ret)
1690                 goto err_free_obj;
1691
1692         bb->va = i915_gem_object_pin_map(bb->obj, I915_MAP_WB);
1693         if (IS_ERR(bb->va)) {
1694                 ret = PTR_ERR(bb->va);
1695                 goto err_finish_shmem_access;
1696         }
1697
1698         if (bb->clflush & CLFLUSH_BEFORE) {
1699                 drm_clflush_virt_range(bb->va, bb->obj->base.size);
1700                 bb->clflush &= ~CLFLUSH_BEFORE;
1701         }
1702
1703         ret = copy_gma_to_hva(s->vgpu, s->vgpu->gtt.ggtt_mm,
1704                               gma, gma + bb_size,
1705                               bb->va);
1706         if (ret < 0) {
1707                 gvt_vgpu_err("fail to copy guest ring buffer\n");
1708                 ret = -EFAULT;
1709                 goto err_unmap;
1710         }
1711
1712         INIT_LIST_HEAD(&bb->list);
1713         list_add(&bb->list, &s->workload->shadow_bb);
1714
1715         bb->accessing = true;
1716         bb->bb_start_cmd_va = s->ip_va;
1717
1718         /*
1719          * ip_va saves the virtual address of the shadow batch buffer, while
1720          * ip_gma saves the graphics address of the original batch buffer.
1721          * As the shadow batch buffer is just a copy from the originial one,
1722          * it should be right to use shadow batch buffer'va and original batch
1723          * buffer's gma in pair. After all, we don't want to pin the shadow
1724          * buffer here (too early).
1725          */
1726         s->ip_va = bb->va;
1727         s->ip_gma = gma;
1728         return 0;
1729 err_unmap:
1730         i915_gem_object_unpin_map(bb->obj);
1731 err_finish_shmem_access:
1732         i915_gem_obj_finish_shmem_access(bb->obj);
1733 err_free_obj:
1734         i915_gem_object_put(bb->obj);
1735 err_free_bb:
1736         kfree(bb);
1737         return ret;
1738 }
1739
1740 static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
1741 {
1742         bool second_level;
1743         int ret = 0;
1744         struct intel_vgpu *vgpu = s->vgpu;
1745
1746         if (s->buf_type == BATCH_BUFFER_2ND_LEVEL) {
1747                 gvt_vgpu_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
1748                 return -EFAULT;
1749         }
1750
1751         second_level = BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s, 0)) == 1;
1752         if (second_level && (s->buf_type != BATCH_BUFFER_INSTRUCTION)) {
1753                 gvt_vgpu_err("Jumping to 2nd level BB from RB is not allowed\n");
1754                 return -EFAULT;
1755         }
1756
1757         s->saved_buf_addr_type = s->buf_addr_type;
1758         addr_type_update_snb(s);
1759         if (s->buf_type == RING_BUFFER_INSTRUCTION) {
1760                 s->ret_ip_gma_ring = s->ip_gma + cmd_length(s) * sizeof(u32);
1761                 s->buf_type = BATCH_BUFFER_INSTRUCTION;
1762         } else if (second_level) {
1763                 s->buf_type = BATCH_BUFFER_2ND_LEVEL;
1764                 s->ret_ip_gma_bb = s->ip_gma + cmd_length(s) * sizeof(u32);
1765                 s->ret_bb_va = s->ip_va + cmd_length(s) * sizeof(u32);
1766         }
1767
1768         if (batch_buffer_needs_scan(s)) {
1769                 ret = perform_bb_shadow(s);
1770                 if (ret < 0)
1771                         gvt_vgpu_err("invalid shadow batch buffer\n");
1772         } else {
1773                 /* emulate a batch buffer end to do return right */
1774                 ret = cmd_handler_mi_batch_buffer_end(s);
1775                 if (ret < 0)
1776                         return ret;
1777         }
1778         return ret;
1779 }
1780
1781 static struct cmd_info cmd_info[] = {
1782         {"MI_NOOP", OP_MI_NOOP, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
1783
1784         {"MI_SET_PREDICATE", OP_MI_SET_PREDICATE, F_LEN_CONST, R_ALL, D_ALL,
1785                 0, 1, NULL},
1786
1787         {"MI_USER_INTERRUPT", OP_MI_USER_INTERRUPT, F_LEN_CONST, R_ALL, D_ALL,
1788                 0, 1, cmd_handler_mi_user_interrupt},
1789
1790         {"MI_WAIT_FOR_EVENT", OP_MI_WAIT_FOR_EVENT, F_LEN_CONST, R_RCS | R_BCS,
1791                 D_ALL, 0, 1, cmd_handler_mi_wait_for_event},
1792
1793         {"MI_FLUSH", OP_MI_FLUSH, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
1794
1795         {"MI_ARB_CHECK", OP_MI_ARB_CHECK, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
1796                 NULL},
1797
1798         {"MI_RS_CONTROL", OP_MI_RS_CONTROL, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
1799                 NULL},
1800
1801         {"MI_REPORT_HEAD", OP_MI_REPORT_HEAD, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
1802                 NULL},
1803
1804         {"MI_ARB_ON_OFF", OP_MI_ARB_ON_OFF, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
1805                 NULL},
1806
1807         {"MI_URB_ATOMIC_ALLOC", OP_MI_URB_ATOMIC_ALLOC, F_LEN_CONST, R_RCS,
1808                 D_ALL, 0, 1, NULL},
1809
1810         {"MI_BATCH_BUFFER_END", OP_MI_BATCH_BUFFER_END,
1811                 F_IP_ADVANCE_CUSTOM | F_LEN_CONST, R_ALL, D_ALL, 0, 1,
1812                 cmd_handler_mi_batch_buffer_end},
1813
1814         {"MI_SUSPEND_FLUSH", OP_MI_SUSPEND_FLUSH, F_LEN_CONST, R_ALL, D_ALL,
1815                 0, 1, NULL},
1816
1817         {"MI_PREDICATE", OP_MI_PREDICATE, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
1818                 NULL},
1819
1820         {"MI_TOPOLOGY_FILTER", OP_MI_TOPOLOGY_FILTER, F_LEN_CONST, R_ALL,
1821                 D_ALL, 0, 1, NULL},
1822
1823         {"MI_SET_APPID", OP_MI_SET_APPID, F_LEN_CONST, R_ALL, D_ALL, 0, 1,
1824                 NULL},
1825
1826         {"MI_RS_CONTEXT", OP_MI_RS_CONTEXT, F_LEN_CONST, R_RCS, D_ALL, 0, 1,
1827                 NULL},
1828
1829         {"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP, F_LEN_VAR | F_POST_HANDLE,
1830                 R_RCS | R_BCS, D_ALL, 0, 8, cmd_handler_mi_display_flip},
1831
1832         {"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX, F_LEN_VAR, R_ALL, D_ALL,
1833                 0, 8, NULL},
1834
1835         {"MI_MATH", OP_MI_MATH, F_LEN_VAR, R_ALL, D_ALL, 0, 8, NULL},
1836
1837         {"MI_URB_CLEAR", OP_MI_URB_CLEAR, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1838
1839         {"ME_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL, F_LEN_VAR, R_ALL,
1840                 D_BDW_PLUS, 0, 8, NULL},
1841
1842         {"ME_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT, F_LEN_VAR, R_ALL, D_BDW_PLUS,
1843                 ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait},
1844
1845         {"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM, F_LEN_VAR, R_ALL, D_BDW_PLUS,
1846                 ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm},
1847
1848         {"MI_STORE_DATA_INDEX", OP_MI_STORE_DATA_INDEX, F_LEN_VAR, R_ALL, D_ALL,
1849                 0, 8, cmd_handler_mi_store_data_index},
1850
1851         {"MI_LOAD_REGISTER_IMM", OP_MI_LOAD_REGISTER_IMM, F_LEN_VAR, R_ALL,
1852                 D_ALL, 0, 8, cmd_handler_lri},
1853
1854         {"MI_UPDATE_GTT", OP_MI_UPDATE_GTT, F_LEN_VAR, R_ALL, D_BDW_PLUS, 0, 10,
1855                 cmd_handler_mi_update_gtt},
1856
1857         {"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM, F_LEN_VAR, R_ALL,
1858                 D_ALL, ADDR_FIX_1(2), 8, cmd_handler_srm},
1859
1860         {"MI_FLUSH_DW", OP_MI_FLUSH_DW, F_LEN_VAR, R_ALL, D_ALL, 0, 6,
1861                 cmd_handler_mi_flush_dw},
1862
1863         {"MI_CLFLUSH", OP_MI_CLFLUSH, F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(1),
1864                 10, cmd_handler_mi_clflush},
1865
1866         {"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT, F_LEN_VAR, R_ALL,
1867                 D_ALL, ADDR_FIX_1(1), 6, cmd_handler_mi_report_perf_count},
1868
1869         {"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM, F_LEN_VAR, R_ALL,
1870                 D_ALL, ADDR_FIX_1(2), 8, cmd_handler_lrm},
1871
1872         {"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG, F_LEN_VAR, R_ALL,
1873                 D_ALL, 0, 8, cmd_handler_lrr},
1874
1875         {"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM, F_LEN_VAR, R_RCS,
1876                 D_ALL, 0, 8, NULL},
1877
1878         {"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM, F_LEN_VAR, R_RCS, D_ALL,
1879                 ADDR_FIX_1(2), 8, NULL},
1880
1881         {"MI_STORE_URM_MEM", OP_MI_STORE_URM_MEM, F_LEN_VAR, R_RCS, D_ALL,
1882                 ADDR_FIX_1(2), 8, NULL},
1883
1884         {"MI_OP_2E", OP_MI_2E, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_2(1, 2),
1885                 8, cmd_handler_mi_op_2e},
1886
1887         {"MI_OP_2F", OP_MI_2F, F_LEN_VAR, R_ALL, D_BDW_PLUS, ADDR_FIX_1(1),
1888                 8, cmd_handler_mi_op_2f},
1889
1890         {"MI_BATCH_BUFFER_START", OP_MI_BATCH_BUFFER_START,
1891                 F_IP_ADVANCE_CUSTOM, R_ALL, D_ALL, 0, 8,
1892                 cmd_handler_mi_batch_buffer_start},
1893
1894         {"MI_CONDITIONAL_BATCH_BUFFER_END", OP_MI_CONDITIONAL_BATCH_BUFFER_END,
1895                 F_LEN_VAR, R_ALL, D_ALL, ADDR_FIX_1(2), 8,
1896                 cmd_handler_mi_conditional_batch_buffer_end},
1897
1898         {"MI_LOAD_SCAN_LINES_INCL", OP_MI_LOAD_SCAN_LINES_INCL, F_LEN_CONST,
1899                 R_RCS | R_BCS, D_ALL, 0, 2, NULL},
1900
1901         {"XY_SETUP_BLT", OP_XY_SETUP_BLT, F_LEN_VAR, R_BCS, D_ALL,
1902                 ADDR_FIX_2(4, 7), 8, NULL},
1903
1904         {"XY_SETUP_CLIP_BLT", OP_XY_SETUP_CLIP_BLT, F_LEN_VAR, R_BCS, D_ALL,
1905                 0, 8, NULL},
1906
1907         {"XY_SETUP_MONO_PATTERN_SL_BLT", OP_XY_SETUP_MONO_PATTERN_SL_BLT,
1908                 F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
1909
1910         {"XY_PIXEL_BLT", OP_XY_PIXEL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL},
1911
1912         {"XY_SCANLINES_BLT", OP_XY_SCANLINES_BLT, F_LEN_VAR, R_BCS, D_ALL,
1913                 0, 8, NULL},
1914
1915         {"XY_TEXT_BLT", OP_XY_TEXT_BLT, F_LEN_VAR, R_BCS, D_ALL,
1916                 ADDR_FIX_1(3), 8, NULL},
1917
1918         {"XY_TEXT_IMMEDIATE_BLT", OP_XY_TEXT_IMMEDIATE_BLT, F_LEN_VAR, R_BCS,
1919                 D_ALL, 0, 8, NULL},
1920
1921         {"XY_COLOR_BLT", OP_XY_COLOR_BLT, F_LEN_VAR, R_BCS, D_ALL,
1922                 ADDR_FIX_1(4), 8, NULL},
1923
1924         {"XY_PAT_BLT", OP_XY_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL,
1925                 ADDR_FIX_2(4, 5), 8, NULL},
1926
1927         {"XY_MONO_PAT_BLT", OP_XY_MONO_PAT_BLT, F_LEN_VAR, R_BCS, D_ALL,
1928                 ADDR_FIX_1(4), 8, NULL},
1929
1930         {"XY_SRC_COPY_BLT", OP_XY_SRC_COPY_BLT, F_LEN_VAR, R_BCS, D_ALL,
1931                 ADDR_FIX_2(4, 7), 8, NULL},
1932
1933         {"XY_MONO_SRC_COPY_BLT", OP_XY_MONO_SRC_COPY_BLT, F_LEN_VAR, R_BCS,
1934                 D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
1935
1936         {"XY_FULL_BLT", OP_XY_FULL_BLT, F_LEN_VAR, R_BCS, D_ALL, 0, 8, NULL},
1937
1938         {"XY_FULL_MONO_SRC_BLT", OP_XY_FULL_MONO_SRC_BLT, F_LEN_VAR, R_BCS,
1939                 D_ALL, ADDR_FIX_3(4, 5, 8), 8, NULL},
1940
1941         {"XY_FULL_MONO_PATTERN_BLT", OP_XY_FULL_MONO_PATTERN_BLT, F_LEN_VAR,
1942                 R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
1943
1944         {"XY_FULL_MONO_PATTERN_MONO_SRC_BLT",
1945                 OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT,
1946                 F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
1947
1948         {"XY_MONO_PAT_FIXED_BLT", OP_XY_MONO_PAT_FIXED_BLT, F_LEN_VAR, R_BCS,
1949                 D_ALL, ADDR_FIX_1(4), 8, NULL},
1950
1951         {"XY_MONO_SRC_COPY_IMMEDIATE_BLT", OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT,
1952                 F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
1953
1954         {"XY_PAT_BLT_IMMEDIATE", OP_XY_PAT_BLT_IMMEDIATE, F_LEN_VAR, R_BCS,
1955                 D_ALL, ADDR_FIX_1(4), 8, NULL},
1956
1957         {"XY_SRC_COPY_CHROMA_BLT", OP_XY_SRC_COPY_CHROMA_BLT, F_LEN_VAR, R_BCS,
1958                 D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
1959
1960         {"XY_FULL_IMMEDIATE_PATTERN_BLT", OP_XY_FULL_IMMEDIATE_PATTERN_BLT,
1961                 F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 7), 8, NULL},
1962
1963         {"XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT",
1964                 OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT,
1965                 F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_2(4, 5), 8, NULL},
1966
1967         {"XY_PAT_CHROMA_BLT", OP_XY_PAT_CHROMA_BLT, F_LEN_VAR, R_BCS, D_ALL,
1968                 ADDR_FIX_2(4, 5), 8, NULL},
1969
1970         {"XY_PAT_CHROMA_BLT_IMMEDIATE", OP_XY_PAT_CHROMA_BLT_IMMEDIATE,
1971                 F_LEN_VAR, R_BCS, D_ALL, ADDR_FIX_1(4), 8, NULL},
1972
1973         {"3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP",
1974                 OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP,
1975                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1976
1977         {"3DSTATE_VIEWPORT_STATE_POINTERS_CC",
1978                 OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC,
1979                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1980
1981         {"3DSTATE_BLEND_STATE_POINTERS",
1982                 OP_3DSTATE_BLEND_STATE_POINTERS,
1983                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1984
1985         {"3DSTATE_DEPTH_STENCIL_STATE_POINTERS",
1986                 OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS,
1987                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1988
1989         {"3DSTATE_BINDING_TABLE_POINTERS_VS",
1990                 OP_3DSTATE_BINDING_TABLE_POINTERS_VS,
1991                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1992
1993         {"3DSTATE_BINDING_TABLE_POINTERS_HS",
1994                 OP_3DSTATE_BINDING_TABLE_POINTERS_HS,
1995                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
1996
1997         {"3DSTATE_BINDING_TABLE_POINTERS_DS",
1998                 OP_3DSTATE_BINDING_TABLE_POINTERS_DS,
1999                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2000
2001         {"3DSTATE_BINDING_TABLE_POINTERS_GS",
2002                 OP_3DSTATE_BINDING_TABLE_POINTERS_GS,
2003                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2004
2005         {"3DSTATE_BINDING_TABLE_POINTERS_PS",
2006                 OP_3DSTATE_BINDING_TABLE_POINTERS_PS,
2007                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2008
2009         {"3DSTATE_SAMPLER_STATE_POINTERS_VS",
2010                 OP_3DSTATE_SAMPLER_STATE_POINTERS_VS,
2011                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2012
2013         {"3DSTATE_SAMPLER_STATE_POINTERS_HS",
2014                 OP_3DSTATE_SAMPLER_STATE_POINTERS_HS,
2015                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2016
2017         {"3DSTATE_SAMPLER_STATE_POINTERS_DS",
2018                 OP_3DSTATE_SAMPLER_STATE_POINTERS_DS,
2019                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2020
2021         {"3DSTATE_SAMPLER_STATE_POINTERS_GS",
2022                 OP_3DSTATE_SAMPLER_STATE_POINTERS_GS,
2023                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2024
2025         {"3DSTATE_SAMPLER_STATE_POINTERS_PS",
2026                 OP_3DSTATE_SAMPLER_STATE_POINTERS_PS,
2027                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2028
2029         {"3DSTATE_URB_VS", OP_3DSTATE_URB_VS, F_LEN_VAR, R_RCS, D_ALL,
2030                 0, 8, NULL},
2031
2032         {"3DSTATE_URB_HS", OP_3DSTATE_URB_HS, F_LEN_VAR, R_RCS, D_ALL,
2033                 0, 8, NULL},
2034
2035         {"3DSTATE_URB_DS", OP_3DSTATE_URB_DS, F_LEN_VAR, R_RCS, D_ALL,
2036                 0, 8, NULL},
2037
2038         {"3DSTATE_URB_GS", OP_3DSTATE_URB_GS, F_LEN_VAR, R_RCS, D_ALL,
2039                 0, 8, NULL},
2040
2041         {"3DSTATE_GATHER_CONSTANT_VS", OP_3DSTATE_GATHER_CONSTANT_VS,
2042                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2043
2044         {"3DSTATE_GATHER_CONSTANT_GS", OP_3DSTATE_GATHER_CONSTANT_GS,
2045                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2046
2047         {"3DSTATE_GATHER_CONSTANT_HS", OP_3DSTATE_GATHER_CONSTANT_HS,
2048                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2049
2050         {"3DSTATE_GATHER_CONSTANT_DS", OP_3DSTATE_GATHER_CONSTANT_DS,
2051                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2052
2053         {"3DSTATE_GATHER_CONSTANT_PS", OP_3DSTATE_GATHER_CONSTANT_PS,
2054                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2055
2056         {"3DSTATE_DX9_CONSTANTF_VS", OP_3DSTATE_DX9_CONSTANTF_VS,
2057                 F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL},
2058
2059         {"3DSTATE_DX9_CONSTANTF_PS", OP_3DSTATE_DX9_CONSTANTF_PS,
2060                 F_LEN_VAR, R_RCS, D_ALL, 0, 11, NULL},
2061
2062         {"3DSTATE_DX9_CONSTANTI_VS", OP_3DSTATE_DX9_CONSTANTI_VS,
2063                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2064
2065         {"3DSTATE_DX9_CONSTANTI_PS", OP_3DSTATE_DX9_CONSTANTI_PS,
2066                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2067
2068         {"3DSTATE_DX9_CONSTANTB_VS", OP_3DSTATE_DX9_CONSTANTB_VS,
2069                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2070
2071         {"3DSTATE_DX9_CONSTANTB_PS", OP_3DSTATE_DX9_CONSTANTB_PS,
2072                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2073
2074         {"3DSTATE_DX9_LOCAL_VALID_VS", OP_3DSTATE_DX9_LOCAL_VALID_VS,
2075                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2076
2077         {"3DSTATE_DX9_LOCAL_VALID_PS", OP_3DSTATE_DX9_LOCAL_VALID_PS,
2078                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2079
2080         {"3DSTATE_DX9_GENERATE_ACTIVE_VS", OP_3DSTATE_DX9_GENERATE_ACTIVE_VS,
2081                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2082
2083         {"3DSTATE_DX9_GENERATE_ACTIVE_PS", OP_3DSTATE_DX9_GENERATE_ACTIVE_PS,
2084                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2085
2086         {"3DSTATE_BINDING_TABLE_EDIT_VS", OP_3DSTATE_BINDING_TABLE_EDIT_VS,
2087                 F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2088
2089         {"3DSTATE_BINDING_TABLE_EDIT_GS", OP_3DSTATE_BINDING_TABLE_EDIT_GS,
2090                 F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2091
2092         {"3DSTATE_BINDING_TABLE_EDIT_HS", OP_3DSTATE_BINDING_TABLE_EDIT_HS,
2093                 F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2094
2095         {"3DSTATE_BINDING_TABLE_EDIT_DS", OP_3DSTATE_BINDING_TABLE_EDIT_DS,
2096                 F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2097
2098         {"3DSTATE_BINDING_TABLE_EDIT_PS", OP_3DSTATE_BINDING_TABLE_EDIT_PS,
2099                 F_LEN_VAR, R_RCS, D_ALL, 0, 9, NULL},
2100
2101         {"3DSTATE_VF_INSTANCING", OP_3DSTATE_VF_INSTANCING, F_LEN_VAR, R_RCS,
2102                 D_BDW_PLUS, 0, 8, NULL},
2103
2104         {"3DSTATE_VF_SGVS", OP_3DSTATE_VF_SGVS, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2105                 NULL},
2106
2107         {"3DSTATE_VF_TOPOLOGY", OP_3DSTATE_VF_TOPOLOGY, F_LEN_VAR, R_RCS,
2108                 D_BDW_PLUS, 0, 8, NULL},
2109
2110         {"3DSTATE_WM_CHROMAKEY", OP_3DSTATE_WM_CHROMAKEY, F_LEN_VAR, R_RCS,
2111                 D_BDW_PLUS, 0, 8, NULL},
2112
2113         {"3DSTATE_PS_BLEND", OP_3DSTATE_PS_BLEND, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0,
2114                 8, NULL},
2115
2116         {"3DSTATE_WM_DEPTH_STENCIL", OP_3DSTATE_WM_DEPTH_STENCIL, F_LEN_VAR,
2117                 R_RCS, D_BDW_PLUS, 0, 8, NULL},
2118
2119         {"3DSTATE_PS_EXTRA", OP_3DSTATE_PS_EXTRA, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0,
2120                 8, NULL},
2121
2122         {"3DSTATE_RASTER", OP_3DSTATE_RASTER, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2123                 NULL},
2124
2125         {"3DSTATE_SBE_SWIZ", OP_3DSTATE_SBE_SWIZ, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2126                 NULL},
2127
2128         {"3DSTATE_WM_HZ_OP", OP_3DSTATE_WM_HZ_OP, F_LEN_VAR, R_RCS, D_BDW_PLUS, 0, 8,
2129                 NULL},
2130
2131         {"3DSTATE_VERTEX_BUFFERS", OP_3DSTATE_VERTEX_BUFFERS, F_LEN_VAR, R_RCS,
2132                 D_BDW_PLUS, 0, 8, NULL},
2133
2134         {"3DSTATE_VERTEX_ELEMENTS", OP_3DSTATE_VERTEX_ELEMENTS, F_LEN_VAR,
2135                 R_RCS, D_ALL, 0, 8, NULL},
2136
2137         {"3DSTATE_INDEX_BUFFER", OP_3DSTATE_INDEX_BUFFER, F_LEN_VAR, R_RCS,
2138                 D_BDW_PLUS, ADDR_FIX_1(2), 8, NULL},
2139
2140         {"3DSTATE_VF_STATISTICS", OP_3DSTATE_VF_STATISTICS, F_LEN_CONST,
2141                 R_RCS, D_ALL, 0, 1, NULL},
2142
2143         {"3DSTATE_VF", OP_3DSTATE_VF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2144
2145         {"3DSTATE_CC_STATE_POINTERS", OP_3DSTATE_CC_STATE_POINTERS, F_LEN_VAR,
2146                 R_RCS, D_ALL, 0, 8, NULL},
2147
2148         {"3DSTATE_SCISSOR_STATE_POINTERS", OP_3DSTATE_SCISSOR_STATE_POINTERS,
2149                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2150
2151         {"3DSTATE_GS", OP_3DSTATE_GS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2152
2153         {"3DSTATE_CLIP", OP_3DSTATE_CLIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2154
2155         {"3DSTATE_WM", OP_3DSTATE_WM, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2156
2157         {"3DSTATE_CONSTANT_GS", OP_3DSTATE_CONSTANT_GS, F_LEN_VAR, R_RCS,
2158                 D_BDW_PLUS, 0, 8, NULL},
2159
2160         {"3DSTATE_CONSTANT_PS", OP_3DSTATE_CONSTANT_PS, F_LEN_VAR, R_RCS,
2161                 D_BDW_PLUS, 0, 8, NULL},
2162
2163         {"3DSTATE_SAMPLE_MASK", OP_3DSTATE_SAMPLE_MASK, F_LEN_VAR, R_RCS,
2164                 D_ALL, 0, 8, NULL},
2165
2166         {"3DSTATE_CONSTANT_HS", OP_3DSTATE_CONSTANT_HS, F_LEN_VAR, R_RCS,
2167                 D_BDW_PLUS, 0, 8, NULL},
2168
2169         {"3DSTATE_CONSTANT_DS", OP_3DSTATE_CONSTANT_DS, F_LEN_VAR, R_RCS,
2170                 D_BDW_PLUS, 0, 8, NULL},
2171
2172         {"3DSTATE_HS", OP_3DSTATE_HS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2173
2174         {"3DSTATE_TE", OP_3DSTATE_TE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2175
2176         {"3DSTATE_DS", OP_3DSTATE_DS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2177
2178         {"3DSTATE_STREAMOUT", OP_3DSTATE_STREAMOUT, F_LEN_VAR, R_RCS,
2179                 D_ALL, 0, 8, NULL},
2180
2181         {"3DSTATE_SBE", OP_3DSTATE_SBE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2182
2183         {"3DSTATE_PS", OP_3DSTATE_PS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2184
2185         {"3DSTATE_DRAWING_RECTANGLE", OP_3DSTATE_DRAWING_RECTANGLE, F_LEN_VAR,
2186                 R_RCS, D_ALL, 0, 8, NULL},
2187
2188         {"3DSTATE_SAMPLER_PALETTE_LOAD0", OP_3DSTATE_SAMPLER_PALETTE_LOAD0,
2189                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2190
2191         {"3DSTATE_CHROMA_KEY", OP_3DSTATE_CHROMA_KEY, F_LEN_VAR, R_RCS, D_ALL,
2192                 0, 8, NULL},
2193
2194         {"3DSTATE_DEPTH_BUFFER", OP_3DSTATE_DEPTH_BUFFER, F_LEN_VAR, R_RCS,
2195                 D_ALL, ADDR_FIX_1(2), 8, NULL},
2196
2197         {"3DSTATE_POLY_STIPPLE_OFFSET", OP_3DSTATE_POLY_STIPPLE_OFFSET,
2198                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2199
2200         {"3DSTATE_POLY_STIPPLE_PATTERN", OP_3DSTATE_POLY_STIPPLE_PATTERN,
2201                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2202
2203         {"3DSTATE_LINE_STIPPLE", OP_3DSTATE_LINE_STIPPLE, F_LEN_VAR, R_RCS,
2204                 D_ALL, 0, 8, NULL},
2205
2206         {"3DSTATE_AA_LINE_PARAMS", OP_3DSTATE_AA_LINE_PARAMS, F_LEN_VAR, R_RCS,
2207                 D_ALL, 0, 8, NULL},
2208
2209         {"3DSTATE_GS_SVB_INDEX", OP_3DSTATE_GS_SVB_INDEX, F_LEN_VAR, R_RCS,
2210                 D_ALL, 0, 8, NULL},
2211
2212         {"3DSTATE_SAMPLER_PALETTE_LOAD1", OP_3DSTATE_SAMPLER_PALETTE_LOAD1,
2213                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2214
2215         {"3DSTATE_MULTISAMPLE", OP_3DSTATE_MULTISAMPLE_BDW, F_LEN_VAR, R_RCS,
2216                 D_BDW_PLUS, 0, 8, NULL},
2217
2218         {"3DSTATE_STENCIL_BUFFER", OP_3DSTATE_STENCIL_BUFFER, F_LEN_VAR, R_RCS,
2219                 D_ALL, ADDR_FIX_1(2), 8, NULL},
2220
2221         {"3DSTATE_HIER_DEPTH_BUFFER", OP_3DSTATE_HIER_DEPTH_BUFFER, F_LEN_VAR,
2222                 R_RCS, D_ALL, ADDR_FIX_1(2), 8, NULL},
2223
2224         {"3DSTATE_CLEAR_PARAMS", OP_3DSTATE_CLEAR_PARAMS, F_LEN_VAR,
2225                 R_RCS, D_ALL, 0, 8, NULL},
2226
2227         {"3DSTATE_PUSH_CONSTANT_ALLOC_VS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS,
2228                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2229
2230         {"3DSTATE_PUSH_CONSTANT_ALLOC_HS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS,
2231                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2232
2233         {"3DSTATE_PUSH_CONSTANT_ALLOC_DS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS,
2234                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2235
2236         {"3DSTATE_PUSH_CONSTANT_ALLOC_GS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS,
2237                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2238
2239         {"3DSTATE_PUSH_CONSTANT_ALLOC_PS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS,
2240                 F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2241
2242         {"3DSTATE_MONOFILTER_SIZE", OP_3DSTATE_MONOFILTER_SIZE, F_LEN_VAR,
2243                 R_RCS, D_ALL, 0, 8, NULL},
2244
2245         {"3DSTATE_SO_DECL_LIST", OP_3DSTATE_SO_DECL_LIST, F_LEN_VAR, R_RCS,
2246                 D_ALL, 0, 9, NULL},
2247
2248         {"3DSTATE_SO_BUFFER", OP_3DSTATE_SO_BUFFER, F_LEN_VAR, R_RCS, D_BDW_PLUS,
2249                 ADDR_FIX_2(2, 4), 8, NULL},
2250
2251         {"3DSTATE_BINDING_TABLE_POOL_ALLOC",
2252                 OP_3DSTATE_BINDING_TABLE_POOL_ALLOC,
2253                 F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
2254
2255         {"3DSTATE_GATHER_POOL_ALLOC", OP_3DSTATE_GATHER_POOL_ALLOC,
2256                 F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
2257
2258         {"3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC",
2259                 OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC,
2260                 F_LEN_VAR, R_RCS, D_BDW_PLUS, ADDR_FIX_1(1), 8, NULL},
2261
2262         {"3DSTATE_SAMPLE_PATTERN", OP_3DSTATE_SAMPLE_PATTERN, F_LEN_VAR, R_RCS,
2263                 D_BDW_PLUS, 0, 8, NULL},
2264
2265         {"PIPE_CONTROL", OP_PIPE_CONTROL, F_LEN_VAR, R_RCS, D_ALL,
2266                 ADDR_FIX_1(2), 8, cmd_handler_pipe_control},
2267
2268         {"3DPRIMITIVE", OP_3DPRIMITIVE, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2269
2270         {"PIPELINE_SELECT", OP_PIPELINE_SELECT, F_LEN_CONST, R_RCS, D_ALL, 0,
2271                 1, NULL},
2272
2273         {"STATE_PREFETCH", OP_STATE_PREFETCH, F_LEN_VAR, R_RCS, D_ALL,
2274                 ADDR_FIX_1(1), 8, NULL},
2275
2276         {"STATE_SIP", OP_STATE_SIP, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2277
2278         {"STATE_BASE_ADDRESS", OP_STATE_BASE_ADDRESS, F_LEN_VAR, R_RCS, D_BDW_PLUS,
2279                 ADDR_FIX_5(1, 3, 4, 5, 6), 8, NULL},
2280
2281         {"OP_3D_MEDIA_0_1_4", OP_3D_MEDIA_0_1_4, F_LEN_VAR, R_RCS, D_ALL,
2282                 ADDR_FIX_1(1), 8, NULL},
2283
2284         {"3DSTATE_VS", OP_3DSTATE_VS, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2285
2286         {"3DSTATE_SF", OP_3DSTATE_SF, F_LEN_VAR, R_RCS, D_ALL, 0, 8, NULL},
2287
2288         {"3DSTATE_CONSTANT_VS", OP_3DSTATE_CONSTANT_VS, F_LEN_VAR, R_RCS, D_BDW_PLUS,
2289                 0, 8, NULL},
2290
2291         {"3DSTATE_COMPONENT_PACKING", OP_3DSTATE_COMPONENT_PACKING, F_LEN_VAR, R_RCS,
2292                 D_SKL_PLUS, 0, 8, NULL},
2293
2294         {"MEDIA_INTERFACE_DESCRIPTOR_LOAD", OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD,
2295                 F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
2296
2297         {"MEDIA_GATEWAY_STATE", OP_MEDIA_GATEWAY_STATE, F_LEN_VAR, R_RCS, D_ALL,
2298                 0, 16, NULL},
2299
2300         {"MEDIA_STATE_FLUSH", OP_MEDIA_STATE_FLUSH, F_LEN_VAR, R_RCS, D_ALL,
2301                 0, 16, NULL},
2302
2303         {"MEDIA_OBJECT", OP_MEDIA_OBJECT, F_LEN_VAR, R_RCS, D_ALL, 0, 16, NULL},
2304
2305         {"MEDIA_CURBE_LOAD", OP_MEDIA_CURBE_LOAD, F_LEN_VAR, R_RCS, D_ALL,
2306                 0, 16, NULL},
2307
2308         {"MEDIA_OBJECT_PRT", OP_MEDIA_OBJECT_PRT, F_LEN_VAR, R_RCS, D_ALL,
2309                 0, 16, NULL},
2310
2311         {"MEDIA_OBJECT_WALKER", OP_MEDIA_OBJECT_WALKER, F_LEN_VAR, R_RCS, D_ALL,
2312                 0, 16, NULL},
2313
2314         {"GPGPU_WALKER", OP_GPGPU_WALKER, F_LEN_VAR, R_RCS, D_ALL,
2315                 0, 8, NULL},
2316
2317         {"MEDIA_VFE_STATE", OP_MEDIA_VFE_STATE, F_LEN_VAR, R_RCS, D_ALL, 0, 16,
2318                 NULL},
2319
2320         {"3DSTATE_VF_STATISTICS_GM45", OP_3DSTATE_VF_STATISTICS_GM45,
2321                 F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
2322
2323         {"MFX_PIPE_MODE_SELECT", OP_MFX_PIPE_MODE_SELECT, F_LEN_VAR,
2324                 R_VCS, D_ALL, 0, 12, NULL},
2325
2326         {"MFX_SURFACE_STATE", OP_MFX_SURFACE_STATE, F_LEN_VAR,
2327                 R_VCS, D_ALL, 0, 12, NULL},
2328
2329         {"MFX_PIPE_BUF_ADDR_STATE", OP_MFX_PIPE_BUF_ADDR_STATE, F_LEN_VAR,
2330                 R_VCS, D_BDW_PLUS, 0, 12, NULL},
2331
2332         {"MFX_IND_OBJ_BASE_ADDR_STATE", OP_MFX_IND_OBJ_BASE_ADDR_STATE,
2333                 F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL},
2334
2335         {"MFX_BSP_BUF_BASE_ADDR_STATE", OP_MFX_BSP_BUF_BASE_ADDR_STATE,
2336                 F_LEN_VAR, R_VCS, D_BDW_PLUS, ADDR_FIX_3(1, 3, 5), 12, NULL},
2337
2338         {"OP_2_0_0_5", OP_2_0_0_5, F_LEN_VAR, R_VCS, D_BDW_PLUS, 0, 12, NULL},
2339
2340         {"MFX_STATE_POINTER", OP_MFX_STATE_POINTER, F_LEN_VAR,
2341                 R_VCS, D_ALL, 0, 12, NULL},
2342
2343         {"MFX_QM_STATE", OP_MFX_QM_STATE, F_LEN_VAR,
2344                 R_VCS, D_ALL, 0, 12, NULL},
2345
2346         {"MFX_FQM_STATE", OP_MFX_FQM_STATE, F_LEN_VAR,
2347                 R_VCS, D_ALL, 0, 12, NULL},
2348
2349         {"MFX_PAK_INSERT_OBJECT", OP_MFX_PAK_INSERT_OBJECT, F_LEN_VAR,
2350                 R_VCS, D_ALL, 0, 12, NULL},
2351
2352         {"MFX_STITCH_OBJECT", OP_MFX_STITCH_OBJECT, F_LEN_VAR,
2353                 R_VCS, D_ALL, 0, 12, NULL},
2354
2355         {"MFD_IT_OBJECT", OP_MFD_IT_OBJECT, F_LEN_VAR,
2356                 R_VCS, D_ALL, 0, 12, NULL},
2357
2358         {"MFX_WAIT", OP_MFX_WAIT, F_LEN_VAR,
2359                 R_VCS, D_ALL, 0, 6, NULL},
2360
2361         {"MFX_AVC_IMG_STATE", OP_MFX_AVC_IMG_STATE, F_LEN_VAR,
2362                 R_VCS, D_ALL, 0, 12, NULL},
2363
2364         {"MFX_AVC_QM_STATE", OP_MFX_AVC_QM_STATE, F_LEN_VAR,
2365                 R_VCS, D_ALL, 0, 12, NULL},
2366
2367         {"MFX_AVC_DIRECTMODE_STATE", OP_MFX_AVC_DIRECTMODE_STATE, F_LEN_VAR,
2368                 R_VCS, D_ALL, 0, 12, NULL},
2369
2370         {"MFX_AVC_SLICE_STATE", OP_MFX_AVC_SLICE_STATE, F_LEN_VAR,
2371                 R_VCS, D_ALL, 0, 12, NULL},
2372
2373         {"MFX_AVC_REF_IDX_STATE", OP_MFX_AVC_REF_IDX_STATE, F_LEN_VAR,
2374                 R_VCS, D_ALL, 0, 12, NULL},
2375
2376         {"MFX_AVC_WEIGHTOFFSET_STATE", OP_MFX_AVC_WEIGHTOFFSET_STATE, F_LEN_VAR,
2377                 R_VCS, D_ALL, 0, 12, NULL},
2378
2379         {"MFD_AVC_PICID_STATE", OP_MFD_AVC_PICID_STATE, F_LEN_VAR,
2380                 R_VCS, D_ALL, 0, 12, NULL},
2381         {"MFD_AVC_DPB_STATE", OP_MFD_AVC_DPB_STATE, F_LEN_VAR,
2382                 R_VCS, D_ALL, 0, 12, NULL},
2383
2384         {"MFD_AVC_BSD_OBJECT", OP_MFD_AVC_BSD_OBJECT, F_LEN_VAR,
2385                 R_VCS, D_ALL, 0, 12, NULL},
2386
2387         {"MFD_AVC_SLICEADDR", OP_MFD_AVC_SLICEADDR, F_LEN_VAR,
2388                 R_VCS, D_ALL, ADDR_FIX_1(2), 12, NULL},
2389
2390         {"MFC_AVC_PAK_OBJECT", OP_MFC_AVC_PAK_OBJECT, F_LEN_VAR,
2391                 R_VCS, D_ALL, 0, 12, NULL},
2392
2393         {"MFX_VC1_PRED_PIPE_STATE", OP_MFX_VC1_PRED_PIPE_STATE, F_LEN_VAR,
2394                 R_VCS, D_ALL, 0, 12, NULL},
2395
2396         {"MFX_VC1_DIRECTMODE_STATE", OP_MFX_VC1_DIRECTMODE_STATE, F_LEN_VAR,
2397                 R_VCS, D_ALL, 0, 12, NULL},
2398
2399         {"MFD_VC1_SHORT_PIC_STATE", OP_MFD_VC1_SHORT_PIC_STATE, F_LEN_VAR,
2400                 R_VCS, D_ALL, 0, 12, NULL},
2401
2402         {"MFD_VC1_LONG_PIC_STATE", OP_MFD_VC1_LONG_PIC_STATE, F_LEN_VAR,
2403                 R_VCS, D_ALL, 0, 12, NULL},
2404
2405         {"MFD_VC1_BSD_OBJECT", OP_MFD_VC1_BSD_OBJECT, F_LEN_VAR,
2406                 R_VCS, D_ALL, 0, 12, NULL},
2407
2408         {"MFC_MPEG2_SLICEGROUP_STATE", OP_MFC_MPEG2_SLICEGROUP_STATE, F_LEN_VAR,
2409                 R_VCS, D_ALL, 0, 12, NULL},
2410
2411         {"MFC_MPEG2_PAK_OBJECT", OP_MFC_MPEG2_PAK_OBJECT, F_LEN_VAR,
2412                 R_VCS, D_ALL, 0, 12, NULL},
2413
2414         {"MFX_MPEG2_PIC_STATE", OP_MFX_MPEG2_PIC_STATE, F_LEN_VAR,
2415                 R_VCS, D_ALL, 0, 12, NULL},
2416
2417         {"MFX_MPEG2_QM_STATE", OP_MFX_MPEG2_QM_STATE, F_LEN_VAR,
2418                 R_VCS, D_ALL, 0, 12, NULL},
2419
2420         {"MFD_MPEG2_BSD_OBJECT", OP_MFD_MPEG2_BSD_OBJECT, F_LEN_VAR,
2421                 R_VCS, D_ALL, 0, 12, NULL},
2422
2423         {"MFX_2_6_0_0", OP_MFX_2_6_0_0, F_LEN_VAR, R_VCS, D_ALL,
2424                 0, 16, NULL},
2425
2426         {"MFX_2_6_0_9", OP_MFX_2_6_0_9, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL},
2427
2428         {"MFX_2_6_0_8", OP_MFX_2_6_0_8, F_LEN_VAR, R_VCS, D_ALL, 0, 16, NULL},
2429
2430         {"MFX_JPEG_PIC_STATE", OP_MFX_JPEG_PIC_STATE, F_LEN_VAR,
2431                 R_VCS, D_ALL, 0, 12, NULL},
2432
2433         {"MFX_JPEG_HUFF_TABLE_STATE", OP_MFX_JPEG_HUFF_TABLE_STATE, F_LEN_VAR,
2434                 R_VCS, D_ALL, 0, 12, NULL},
2435
2436         {"MFD_JPEG_BSD_OBJECT", OP_MFD_JPEG_BSD_OBJECT, F_LEN_VAR,
2437                 R_VCS, D_ALL, 0, 12, NULL},
2438
2439         {"VEBOX_STATE", OP_VEB_STATE, F_LEN_VAR, R_VECS, D_ALL, 0, 12, NULL},
2440
2441         {"VEBOX_SURFACE_STATE", OP_VEB_SURFACE_STATE, F_LEN_VAR, R_VECS, D_ALL,
2442                 0, 12, NULL},
2443
2444         {"VEB_DI_IECP", OP_VEB_DNDI_IECP_STATE, F_LEN_VAR, R_VECS, D_BDW_PLUS,
2445                 0, 20, NULL},
2446 };
2447
2448 static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
2449 {
2450         hash_add(gvt->cmd_table, &e->hlist, e->info->opcode);
2451 }
2452
2453 /* call the cmd handler, and advance ip */
2454 static int cmd_parser_exec(struct parser_exec_state *s)
2455 {
2456         struct intel_vgpu *vgpu = s->vgpu;
2457         struct cmd_info *info;
2458         u32 cmd;
2459         int ret = 0;
2460
2461         cmd = cmd_val(s, 0);
2462
2463         info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
2464         if (info == NULL) {
2465                 gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x\n",
2466                                 cmd, get_opcode(cmd, s->ring_id));
2467                 return -EBADRQC;
2468         }
2469
2470         s->info = info;
2471
2472         trace_gvt_command(vgpu->id, s->ring_id, s->ip_gma, s->ip_va,
2473                           cmd_length(s), s->buf_type);
2474
2475         if (info->handler) {
2476                 ret = info->handler(s);
2477                 if (ret < 0) {
2478                         gvt_vgpu_err("%s handler error\n", info->name);
2479                         return ret;
2480                 }
2481         }
2482
2483         if (!(info->flag & F_IP_ADVANCE_CUSTOM)) {
2484                 ret = cmd_advance_default(s);
2485                 if (ret) {
2486                         gvt_vgpu_err("%s IP advance error\n", info->name);
2487                         return ret;
2488                 }
2489         }
2490         return 0;
2491 }
2492
2493 static inline bool gma_out_of_range(unsigned long gma,
2494                 unsigned long gma_head, unsigned int gma_tail)
2495 {
2496         if (gma_tail >= gma_head)
2497                 return (gma < gma_head) || (gma > gma_tail);
2498         else
2499                 return (gma > gma_tail) && (gma < gma_head);
2500 }
2501
2502 /* Keep the consistent return type, e.g EBADRQC for unknown
2503  * cmd, EFAULT for invalid address, EPERM for nonpriv. later
2504  * works as the input of VM healthy status.
2505  */
2506 static int command_scan(struct parser_exec_state *s,
2507                 unsigned long rb_head, unsigned long rb_tail,
2508                 unsigned long rb_start, unsigned long rb_len)
2509 {
2510
2511         unsigned long gma_head, gma_tail, gma_bottom;
2512         int ret = 0;
2513         struct intel_vgpu *vgpu = s->vgpu;
2514
2515         gma_head = rb_start + rb_head;
2516         gma_tail = rb_start + rb_tail;
2517         gma_bottom = rb_start +  rb_len;
2518
2519         while (s->ip_gma != gma_tail) {
2520                 if (s->buf_type == RING_BUFFER_INSTRUCTION) {
2521                         if (!(s->ip_gma >= rb_start) ||
2522                                 !(s->ip_gma < gma_bottom)) {
2523                                 gvt_vgpu_err("ip_gma %lx out of ring scope."
2524                                         "(base:0x%lx, bottom: 0x%lx)\n",
2525                                         s->ip_gma, rb_start,
2526                                         gma_bottom);
2527                                 parser_exec_state_dump(s);
2528                                 return -EFAULT;
2529                         }
2530                         if (gma_out_of_range(s->ip_gma, gma_head, gma_tail)) {
2531                                 gvt_vgpu_err("ip_gma %lx out of range."
2532                                         "base 0x%lx head 0x%lx tail 0x%lx\n",
2533                                         s->ip_gma, rb_start,
2534                                         rb_head, rb_tail);
2535                                 parser_exec_state_dump(s);
2536                                 break;
2537                         }
2538                 }
2539                 ret = cmd_parser_exec(s);
2540                 if (ret) {
2541                         gvt_vgpu_err("cmd parser error\n");
2542                         parser_exec_state_dump(s);
2543                         break;
2544                 }
2545         }
2546
2547         return ret;
2548 }
2549
2550 static int scan_workload(struct intel_vgpu_workload *workload)
2551 {
2552         unsigned long gma_head, gma_tail, gma_bottom;
2553         struct parser_exec_state s;
2554         int ret = 0;
2555
2556         /* ring base is page aligned */
2557         if (WARN_ON(!IS_ALIGNED(workload->rb_start, I915_GTT_PAGE_SIZE)))
2558                 return -EINVAL;
2559
2560         gma_head = workload->rb_start + workload->rb_head;
2561         gma_tail = workload->rb_start + workload->rb_tail;
2562         gma_bottom = workload->rb_start +  _RING_CTL_BUF_SIZE(workload->rb_ctl);
2563
2564         s.buf_type = RING_BUFFER_INSTRUCTION;
2565         s.buf_addr_type = GTT_BUFFER;
2566         s.vgpu = workload->vgpu;
2567         s.ring_id = workload->ring_id;
2568         s.ring_start = workload->rb_start;
2569         s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
2570         s.ring_head = gma_head;
2571         s.ring_tail = gma_tail;
2572         s.rb_va = workload->shadow_ring_buffer_va;
2573         s.workload = workload;
2574
2575         if ((bypass_scan_mask & (1 << workload->ring_id)) ||
2576                 gma_head == gma_tail)
2577                 return 0;
2578
2579         if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
2580                 ret = -EINVAL;
2581                 goto out;
2582         }
2583
2584         ret = ip_gma_set(&s, gma_head);
2585         if (ret)
2586                 goto out;
2587
2588         ret = command_scan(&s, workload->rb_head, workload->rb_tail,
2589                 workload->rb_start, _RING_CTL_BUF_SIZE(workload->rb_ctl));
2590
2591 out:
2592         return ret;
2593 }
2594
2595 static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2596 {
2597
2598         unsigned long gma_head, gma_tail, gma_bottom, ring_size, ring_tail;
2599         struct parser_exec_state s;
2600         int ret = 0;
2601         struct intel_vgpu_workload *workload = container_of(wa_ctx,
2602                                 struct intel_vgpu_workload,
2603                                 wa_ctx);
2604
2605         /* ring base is page aligned */
2606         if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma,
2607                                         I915_GTT_PAGE_SIZE)))
2608                 return -EINVAL;
2609
2610         ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(uint32_t);
2611         ring_size = round_up(wa_ctx->indirect_ctx.size + CACHELINE_BYTES,
2612                         PAGE_SIZE);
2613         gma_head = wa_ctx->indirect_ctx.guest_gma;
2614         gma_tail = wa_ctx->indirect_ctx.guest_gma + ring_tail;
2615         gma_bottom = wa_ctx->indirect_ctx.guest_gma + ring_size;
2616
2617         s.buf_type = RING_BUFFER_INSTRUCTION;
2618         s.buf_addr_type = GTT_BUFFER;
2619         s.vgpu = workload->vgpu;
2620         s.ring_id = workload->ring_id;
2621         s.ring_start = wa_ctx->indirect_ctx.guest_gma;
2622         s.ring_size = ring_size;
2623         s.ring_head = gma_head;
2624         s.ring_tail = gma_tail;
2625         s.rb_va = wa_ctx->indirect_ctx.shadow_va;
2626         s.workload = workload;
2627
2628         if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) {
2629                 ret = -EINVAL;
2630                 goto out;
2631         }
2632
2633         ret = ip_gma_set(&s, gma_head);
2634         if (ret)
2635                 goto out;
2636
2637         ret = command_scan(&s, 0, ring_tail,
2638                 wa_ctx->indirect_ctx.guest_gma, ring_size);
2639 out:
2640         return ret;
2641 }
2642
2643 static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
2644 {
2645         struct intel_vgpu *vgpu = workload->vgpu;
2646         struct intel_vgpu_submission *s = &vgpu->submission;
2647         unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
2648         void *shadow_ring_buffer_va;
2649         int ring_id = workload->ring_id;
2650         int ret;
2651
2652         guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
2653
2654         /* calculate workload ring buffer size */
2655         workload->rb_len = (workload->rb_tail + guest_rb_size -
2656                         workload->rb_head) % guest_rb_size;
2657
2658         gma_head = workload->rb_start + workload->rb_head;
2659         gma_tail = workload->rb_start + workload->rb_tail;
2660         gma_top = workload->rb_start + guest_rb_size;
2661
2662         if (workload->rb_len > s->ring_scan_buffer_size[ring_id]) {
2663                 void *p;
2664
2665                 /* realloc the new ring buffer if needed */
2666                 p = krealloc(s->ring_scan_buffer[ring_id], workload->rb_len,
2667                                 GFP_KERNEL);
2668                 if (!p) {
2669                         gvt_vgpu_err("fail to re-alloc ring scan buffer\n");
2670                         return -ENOMEM;
2671                 }
2672                 s->ring_scan_buffer[ring_id] = p;
2673                 s->ring_scan_buffer_size[ring_id] = workload->rb_len;
2674         }
2675
2676         shadow_ring_buffer_va = s->ring_scan_buffer[ring_id];
2677
2678         /* get shadow ring buffer va */
2679         workload->shadow_ring_buffer_va = shadow_ring_buffer_va;
2680
2681         /* head > tail --> copy head <-> top */
2682         if (gma_head > gma_tail) {
2683                 ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm,
2684                                       gma_head, gma_top, shadow_ring_buffer_va);
2685                 if (ret < 0) {
2686                         gvt_vgpu_err("fail to copy guest ring buffer\n");
2687                         return ret;
2688                 }
2689                 shadow_ring_buffer_va += ret;
2690                 gma_head = workload->rb_start;
2691         }
2692
2693         /* copy head or start <-> tail */
2694         ret = copy_gma_to_hva(vgpu, vgpu->gtt.ggtt_mm, gma_head, gma_tail,
2695                                 shadow_ring_buffer_va);
2696         if (ret < 0) {
2697                 gvt_vgpu_err("fail to copy guest ring buffer\n");
2698                 return ret;
2699         }
2700         return 0;
2701 }
2702
2703 int intel_gvt_scan_and_shadow_ringbuffer(struct intel_vgpu_workload *workload)
2704 {
2705         int ret;
2706         struct intel_vgpu *vgpu = workload->vgpu;
2707
2708         ret = shadow_workload_ring_buffer(workload);
2709         if (ret) {
2710                 gvt_vgpu_err("fail to shadow workload ring_buffer\n");
2711                 return ret;
2712         }
2713
2714         ret = scan_workload(workload);
2715         if (ret) {
2716                 gvt_vgpu_err("scan workload error\n");
2717                 return ret;
2718         }
2719         return 0;
2720 }
2721
2722 static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2723 {
2724         int ctx_size = wa_ctx->indirect_ctx.size;
2725         unsigned long guest_gma = wa_ctx->indirect_ctx.guest_gma;
2726         struct intel_vgpu_workload *workload = container_of(wa_ctx,
2727                                         struct intel_vgpu_workload,
2728                                         wa_ctx);
2729         struct intel_vgpu *vgpu = workload->vgpu;
2730         struct drm_i915_gem_object *obj;
2731         int ret = 0;
2732         void *map;
2733
2734         obj = i915_gem_object_create(workload->vgpu->gvt->dev_priv,
2735                                      roundup(ctx_size + CACHELINE_BYTES,
2736                                              PAGE_SIZE));
2737         if (IS_ERR(obj))
2738                 return PTR_ERR(obj);
2739
2740         /* get the va of the shadow batch buffer */
2741         map = i915_gem_object_pin_map(obj, I915_MAP_WB);
2742         if (IS_ERR(map)) {
2743                 gvt_vgpu_err("failed to vmap shadow indirect ctx\n");
2744                 ret = PTR_ERR(map);
2745                 goto put_obj;
2746         }
2747
2748         ret = i915_gem_object_set_to_cpu_domain(obj, false);
2749         if (ret) {
2750                 gvt_vgpu_err("failed to set shadow indirect ctx to CPU\n");
2751                 goto unmap_src;
2752         }
2753
2754         ret = copy_gma_to_hva(workload->vgpu,
2755                                 workload->vgpu->gtt.ggtt_mm,
2756                                 guest_gma, guest_gma + ctx_size,
2757                                 map);
2758         if (ret < 0) {
2759                 gvt_vgpu_err("fail to copy guest indirect ctx\n");
2760                 goto unmap_src;
2761         }
2762
2763         wa_ctx->indirect_ctx.obj = obj;
2764         wa_ctx->indirect_ctx.shadow_va = map;
2765         return 0;
2766
2767 unmap_src:
2768         i915_gem_object_unpin_map(obj);
2769 put_obj:
2770         i915_gem_object_put(obj);
2771         return ret;
2772 }
2773
2774 static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2775 {
2776         uint32_t per_ctx_start[CACHELINE_DWORDS] = {0};
2777         unsigned char *bb_start_sva;
2778
2779         if (!wa_ctx->per_ctx.valid)
2780                 return 0;
2781
2782         per_ctx_start[0] = 0x18800001;
2783         per_ctx_start[1] = wa_ctx->per_ctx.guest_gma;
2784
2785         bb_start_sva = (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
2786                                 wa_ctx->indirect_ctx.size;
2787
2788         memcpy(bb_start_sva, per_ctx_start, CACHELINE_BYTES);
2789
2790         return 0;
2791 }
2792
2793 int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
2794 {
2795         int ret;
2796         struct intel_vgpu_workload *workload = container_of(wa_ctx,
2797                                         struct intel_vgpu_workload,
2798                                         wa_ctx);
2799         struct intel_vgpu *vgpu = workload->vgpu;
2800
2801         if (wa_ctx->indirect_ctx.size == 0)
2802                 return 0;
2803
2804         ret = shadow_indirect_ctx(wa_ctx);
2805         if (ret) {
2806                 gvt_vgpu_err("fail to shadow indirect ctx\n");
2807                 return ret;
2808         }
2809
2810         combine_wa_ctx(wa_ctx);
2811
2812         ret = scan_wa_ctx(wa_ctx);
2813         if (ret) {
2814                 gvt_vgpu_err("scan wa ctx error\n");
2815                 return ret;
2816         }
2817
2818         return 0;
2819 }
2820
2821 static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
2822                 unsigned int opcode, unsigned long rings)
2823 {
2824         struct cmd_info *info = NULL;
2825         unsigned int ring;
2826
2827         for_each_set_bit(ring, &rings, I915_NUM_ENGINES) {
2828                 info = find_cmd_entry(gvt, opcode, ring);
2829                 if (info)
2830                         break;
2831         }
2832         return info;
2833 }
2834
2835 static int init_cmd_table(struct intel_gvt *gvt)
2836 {
2837         int i;
2838         struct cmd_entry *e;
2839         struct cmd_info *info;
2840         unsigned int gen_type;
2841
2842         gen_type = intel_gvt_get_device_type(gvt);
2843
2844         for (i = 0; i < ARRAY_SIZE(cmd_info); i++) {
2845                 if (!(cmd_info[i].devices & gen_type))
2846                         continue;
2847
2848                 e = kzalloc(sizeof(*e), GFP_KERNEL);
2849                 if (!e)
2850                         return -ENOMEM;
2851
2852                 e->info = &cmd_info[i];
2853                 info = find_cmd_entry_any_ring(gvt,
2854                                 e->info->opcode, e->info->rings);
2855                 if (info) {
2856                         gvt_err("%s %s duplicated\n", e->info->name,
2857                                         info->name);
2858                         return -EEXIST;
2859                 }
2860
2861                 INIT_HLIST_NODE(&e->hlist);
2862                 add_cmd_entry(gvt, e);
2863                 gvt_dbg_cmd("add %-30s op %04x flag %x devs %02x rings %02x\n",
2864                                 e->info->name, e->info->opcode, e->info->flag,
2865                                 e->info->devices, e->info->rings);
2866         }
2867         return 0;
2868 }
2869
2870 static void clean_cmd_table(struct intel_gvt *gvt)
2871 {
2872         struct hlist_node *tmp;
2873         struct cmd_entry *e;
2874         int i;
2875
2876         hash_for_each_safe(gvt->cmd_table, i, tmp, e, hlist)
2877                 kfree(e);
2878
2879         hash_init(gvt->cmd_table);
2880 }
2881
2882 void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt)
2883 {
2884         clean_cmd_table(gvt);
2885 }
2886
2887 int intel_gvt_init_cmd_parser(struct intel_gvt *gvt)
2888 {
2889         int ret;
2890
2891         ret = init_cmd_table(gvt);
2892         if (ret) {
2893                 intel_gvt_clean_cmd_parser(gvt);
2894                 return ret;
2895         }
2896         return 0;
2897 }