2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
25 * Kevin Tian <kevin.tian@intel.com>
26 * Zhiyuan Lv <zhiyuan.lv@intel.com>
29 * Min He <min.he@intel.com>
30 * Ping Gao <ping.a.gao@intel.com>
31 * Tina Zhang <tina.zhang@intel.com>
32 * Yulei Zhang <yulei.zhang@intel.com>
33 * Zhi Wang <zhi.a.wang@intel.com>
37 #include <linux/slab.h>
40 #include "i915_pvinfo.h"
43 #define INVALID_OP (~0U)
47 #define OP_LEN_3D_MEDIA 16
48 #define OP_LEN_MFX_VC 16
49 #define OP_LEN_VEBOX 16
51 #define CMD_TYPE(cmd) (((cmd) >> 29) & 7)
61 struct sub_op_bits
*sub_op
;
64 #define MAX_CMD_BUDGET 0x7fffffff
65 #define MI_WAIT_FOR_PLANE_C_FLIP_PENDING (1<<15)
66 #define MI_WAIT_FOR_PLANE_B_FLIP_PENDING (1<<9)
67 #define MI_WAIT_FOR_PLANE_A_FLIP_PENDING (1<<1)
69 #define MI_WAIT_FOR_SPRITE_C_FLIP_PENDING (1<<20)
70 #define MI_WAIT_FOR_SPRITE_B_FLIP_PENDING (1<<10)
71 #define MI_WAIT_FOR_SPRITE_A_FLIP_PENDING (1<<2)
73 /* Render Command Map */
75 /* MI_* command Opcode (28:23) */
76 #define OP_MI_NOOP 0x0
77 #define OP_MI_SET_PREDICATE 0x1 /* HSW+ */
78 #define OP_MI_USER_INTERRUPT 0x2
79 #define OP_MI_WAIT_FOR_EVENT 0x3
80 #define OP_MI_FLUSH 0x4
81 #define OP_MI_ARB_CHECK 0x5
82 #define OP_MI_RS_CONTROL 0x6 /* HSW+ */
83 #define OP_MI_REPORT_HEAD 0x7
84 #define OP_MI_ARB_ON_OFF 0x8
85 #define OP_MI_URB_ATOMIC_ALLOC 0x9 /* HSW+ */
86 #define OP_MI_BATCH_BUFFER_END 0xA
87 #define OP_MI_SUSPEND_FLUSH 0xB
88 #define OP_MI_PREDICATE 0xC /* IVB+ */
89 #define OP_MI_TOPOLOGY_FILTER 0xD /* IVB+ */
90 #define OP_MI_SET_APPID 0xE /* IVB+ */
91 #define OP_MI_RS_CONTEXT 0xF /* HSW+ */
92 #define OP_MI_LOAD_SCAN_LINES_INCL 0x12 /* HSW+ */
93 #define OP_MI_DISPLAY_FLIP 0x14
94 #define OP_MI_SEMAPHORE_MBOX 0x16
95 #define OP_MI_SET_CONTEXT 0x18
96 #define OP_MI_MATH 0x1A
97 #define OP_MI_URB_CLEAR 0x19
98 #define OP_MI_SEMAPHORE_SIGNAL 0x1B /* BDW+ */
99 #define OP_MI_SEMAPHORE_WAIT 0x1C /* BDW+ */
101 #define OP_MI_STORE_DATA_IMM 0x20
102 #define OP_MI_STORE_DATA_INDEX 0x21
103 #define OP_MI_LOAD_REGISTER_IMM 0x22
104 #define OP_MI_UPDATE_GTT 0x23
105 #define OP_MI_STORE_REGISTER_MEM 0x24
106 #define OP_MI_FLUSH_DW 0x26
107 #define OP_MI_CLFLUSH 0x27
108 #define OP_MI_REPORT_PERF_COUNT 0x28
109 #define OP_MI_LOAD_REGISTER_MEM 0x29 /* HSW+ */
110 #define OP_MI_LOAD_REGISTER_REG 0x2A /* HSW+ */
111 #define OP_MI_RS_STORE_DATA_IMM 0x2B /* HSW+ */
112 #define OP_MI_LOAD_URB_MEM 0x2C /* HSW+ */
113 #define OP_MI_STORE_URM_MEM 0x2D /* HSW+ */
114 #define OP_MI_2E 0x2E /* BDW+ */
115 #define OP_MI_2F 0x2F /* BDW+ */
116 #define OP_MI_BATCH_BUFFER_START 0x31
118 /* Bit definition for dword 0 */
119 #define _CMDBIT_BB_START_IN_PPGTT (1UL << 8)
121 #define OP_MI_CONDITIONAL_BATCH_BUFFER_END 0x36
123 #define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
124 #define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
125 #define BATCH_BUFFER_ADR_SPACE_BIT(x) (((x) >> 8) & 1U)
126 #define BATCH_BUFFER_2ND_LEVEL_BIT(x) ((x) >> 22 & 1U)
128 /* 2D command: Opcode (28:22) */
129 #define OP_2D(x) ((2<<7) | x)
131 #define OP_XY_SETUP_BLT OP_2D(0x1)
132 #define OP_XY_SETUP_CLIP_BLT OP_2D(0x3)
133 #define OP_XY_SETUP_MONO_PATTERN_SL_BLT OP_2D(0x11)
134 #define OP_XY_PIXEL_BLT OP_2D(0x24)
135 #define OP_XY_SCANLINES_BLT OP_2D(0x25)
136 #define OP_XY_TEXT_BLT OP_2D(0x26)
137 #define OP_XY_TEXT_IMMEDIATE_BLT OP_2D(0x31)
138 #define OP_XY_COLOR_BLT OP_2D(0x50)
139 #define OP_XY_PAT_BLT OP_2D(0x51)
140 #define OP_XY_MONO_PAT_BLT OP_2D(0x52)
141 #define OP_XY_SRC_COPY_BLT OP_2D(0x53)
142 #define OP_XY_MONO_SRC_COPY_BLT OP_2D(0x54)
143 #define OP_XY_FULL_BLT OP_2D(0x55)
144 #define OP_XY_FULL_MONO_SRC_BLT OP_2D(0x56)
145 #define OP_XY_FULL_MONO_PATTERN_BLT OP_2D(0x57)
146 #define OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT OP_2D(0x58)
147 #define OP_XY_MONO_PAT_FIXED_BLT OP_2D(0x59)
148 #define OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT OP_2D(0x71)
149 #define OP_XY_PAT_BLT_IMMEDIATE OP_2D(0x72)
150 #define OP_XY_SRC_COPY_CHROMA_BLT OP_2D(0x73)
151 #define OP_XY_FULL_IMMEDIATE_PATTERN_BLT OP_2D(0x74)
152 #define OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT OP_2D(0x75)
153 #define OP_XY_PAT_CHROMA_BLT OP_2D(0x76)
154 #define OP_XY_PAT_CHROMA_BLT_IMMEDIATE OP_2D(0x77)
156 /* 3D/Media Command: Pipeline Type(28:27) Opcode(26:24) Sub Opcode(23:16) */
157 #define OP_3D_MEDIA(sub_type, opcode, sub_opcode) \
158 ((3 << 13) | ((sub_type) << 11) | ((opcode) << 8) | (sub_opcode))
160 #define OP_STATE_PREFETCH OP_3D_MEDIA(0x0, 0x0, 0x03)
162 #define OP_STATE_BASE_ADDRESS OP_3D_MEDIA(0x0, 0x1, 0x01)
163 #define OP_STATE_SIP OP_3D_MEDIA(0x0, 0x1, 0x02)
164 #define OP_3D_MEDIA_0_1_4 OP_3D_MEDIA(0x0, 0x1, 0x04)
166 #define OP_3DSTATE_VF_STATISTICS_GM45 OP_3D_MEDIA(0x1, 0x0, 0x0B)
168 #define OP_PIPELINE_SELECT OP_3D_MEDIA(0x1, 0x1, 0x04)
170 #define OP_MEDIA_VFE_STATE OP_3D_MEDIA(0x2, 0x0, 0x0)
171 #define OP_MEDIA_CURBE_LOAD OP_3D_MEDIA(0x2, 0x0, 0x1)
172 #define OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD OP_3D_MEDIA(0x2, 0x0, 0x2)
173 #define OP_MEDIA_GATEWAY_STATE OP_3D_MEDIA(0x2, 0x0, 0x3)
174 #define OP_MEDIA_STATE_FLUSH OP_3D_MEDIA(0x2, 0x0, 0x4)
176 #define OP_MEDIA_OBJECT OP_3D_MEDIA(0x2, 0x1, 0x0)
177 #define OP_MEDIA_OBJECT_PRT OP_3D_MEDIA(0x2, 0x1, 0x2)
178 #define OP_MEDIA_OBJECT_WALKER OP_3D_MEDIA(0x2, 0x1, 0x3)
179 #define OP_GPGPU_WALKER OP_3D_MEDIA(0x2, 0x1, 0x5)
181 #define OP_3DSTATE_CLEAR_PARAMS OP_3D_MEDIA(0x3, 0x0, 0x04) /* IVB+ */
182 #define OP_3DSTATE_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x05) /* IVB+ */
183 #define OP_3DSTATE_STENCIL_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x06) /* IVB+ */
184 #define OP_3DSTATE_HIER_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x07) /* IVB+ */
185 #define OP_3DSTATE_VERTEX_BUFFERS OP_3D_MEDIA(0x3, 0x0, 0x08)
186 #define OP_3DSTATE_VERTEX_ELEMENTS OP_3D_MEDIA(0x3, 0x0, 0x09)
187 #define OP_3DSTATE_INDEX_BUFFER OP_3D_MEDIA(0x3, 0x0, 0x0A)
188 #define OP_3DSTATE_VF_STATISTICS OP_3D_MEDIA(0x3, 0x0, 0x0B)
189 #define OP_3DSTATE_VF OP_3D_MEDIA(0x3, 0x0, 0x0C) /* HSW+ */
190 #define OP_3DSTATE_CC_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x0E)
191 #define OP_3DSTATE_SCISSOR_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x0F)
192 #define OP_3DSTATE_VS OP_3D_MEDIA(0x3, 0x0, 0x10)
193 #define OP_3DSTATE_GS OP_3D_MEDIA(0x3, 0x0, 0x11)
194 #define OP_3DSTATE_CLIP OP_3D_MEDIA(0x3, 0x0, 0x12)
195 #define OP_3DSTATE_SF OP_3D_MEDIA(0x3, 0x0, 0x13)
196 #define OP_3DSTATE_WM OP_3D_MEDIA(0x3, 0x0, 0x14)
197 #define OP_3DSTATE_CONSTANT_VS OP_3D_MEDIA(0x3, 0x0, 0x15)
198 #define OP_3DSTATE_CONSTANT_GS OP_3D_MEDIA(0x3, 0x0, 0x16)
199 #define OP_3DSTATE_CONSTANT_PS OP_3D_MEDIA(0x3, 0x0, 0x17)
200 #define OP_3DSTATE_SAMPLE_MASK OP_3D_MEDIA(0x3, 0x0, 0x18)
201 #define OP_3DSTATE_CONSTANT_HS OP_3D_MEDIA(0x3, 0x0, 0x19) /* IVB+ */
202 #define OP_3DSTATE_CONSTANT_DS OP_3D_MEDIA(0x3, 0x0, 0x1A) /* IVB+ */
203 #define OP_3DSTATE_HS OP_3D_MEDIA(0x3, 0x0, 0x1B) /* IVB+ */
204 #define OP_3DSTATE_TE OP_3D_MEDIA(0x3, 0x0, 0x1C) /* IVB+ */
205 #define OP_3DSTATE_DS OP_3D_MEDIA(0x3, 0x0, 0x1D) /* IVB+ */
206 #define OP_3DSTATE_STREAMOUT OP_3D_MEDIA(0x3, 0x0, 0x1E) /* IVB+ */
207 #define OP_3DSTATE_SBE OP_3D_MEDIA(0x3, 0x0, 0x1F) /* IVB+ */
208 #define OP_3DSTATE_PS OP_3D_MEDIA(0x3, 0x0, 0x20) /* IVB+ */
209 #define OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP OP_3D_MEDIA(0x3, 0x0, 0x21) /* IVB+ */
210 #define OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC OP_3D_MEDIA(0x3, 0x0, 0x23) /* IVB+ */
211 #define OP_3DSTATE_BLEND_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x24) /* IVB+ */
212 #define OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS OP_3D_MEDIA(0x3, 0x0, 0x25) /* IVB+ */
213 #define OP_3DSTATE_BINDING_TABLE_POINTERS_VS OP_3D_MEDIA(0x3, 0x0, 0x26) /* IVB+ */
214 #define OP_3DSTATE_BINDING_TABLE_POINTERS_HS OP_3D_MEDIA(0x3, 0x0, 0x27) /* IVB+ */
215 #define OP_3DSTATE_BINDING_TABLE_POINTERS_DS OP_3D_MEDIA(0x3, 0x0, 0x28) /* IVB+ */
216 #define OP_3DSTATE_BINDING_TABLE_POINTERS_GS OP_3D_MEDIA(0x3, 0x0, 0x29) /* IVB+ */
217 #define OP_3DSTATE_BINDING_TABLE_POINTERS_PS OP_3D_MEDIA(0x3, 0x0, 0x2A) /* IVB+ */
218 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_VS OP_3D_MEDIA(0x3, 0x0, 0x2B) /* IVB+ */
219 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_HS OP_3D_MEDIA(0x3, 0x0, 0x2C) /* IVB+ */
220 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_DS OP_3D_MEDIA(0x3, 0x0, 0x2D) /* IVB+ */
221 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_GS OP_3D_MEDIA(0x3, 0x0, 0x2E) /* IVB+ */
222 #define OP_3DSTATE_SAMPLER_STATE_POINTERS_PS OP_3D_MEDIA(0x3, 0x0, 0x2F) /* IVB+ */
223 #define OP_3DSTATE_URB_VS OP_3D_MEDIA(0x3, 0x0, 0x30) /* IVB+ */
224 #define OP_3DSTATE_URB_HS OP_3D_MEDIA(0x3, 0x0, 0x31) /* IVB+ */
225 #define OP_3DSTATE_URB_DS OP_3D_MEDIA(0x3, 0x0, 0x32) /* IVB+ */
226 #define OP_3DSTATE_URB_GS OP_3D_MEDIA(0x3, 0x0, 0x33) /* IVB+ */
227 #define OP_3DSTATE_GATHER_CONSTANT_VS OP_3D_MEDIA(0x3, 0x0, 0x34) /* HSW+ */
228 #define OP_3DSTATE_GATHER_CONSTANT_GS OP_3D_MEDIA(0x3, 0x0, 0x35) /* HSW+ */
229 #define OP_3DSTATE_GATHER_CONSTANT_HS OP_3D_MEDIA(0x3, 0x0, 0x36) /* HSW+ */
230 #define OP_3DSTATE_GATHER_CONSTANT_DS OP_3D_MEDIA(0x3, 0x0, 0x37) /* HSW+ */
231 #define OP_3DSTATE_GATHER_CONSTANT_PS OP_3D_MEDIA(0x3, 0x0, 0x38) /* HSW+ */
232 #define OP_3DSTATE_DX9_CONSTANTF_VS OP_3D_MEDIA(0x3, 0x0, 0x39) /* HSW+ */
233 #define OP_3DSTATE_DX9_CONSTANTF_PS OP_3D_MEDIA(0x3, 0x0, 0x3A) /* HSW+ */
234 #define OP_3DSTATE_DX9_CONSTANTI_VS OP_3D_MEDIA(0x3, 0x0, 0x3B) /* HSW+ */
235 #define OP_3DSTATE_DX9_CONSTANTI_PS OP_3D_MEDIA(0x3, 0x0, 0x3C) /* HSW+ */
236 #define OP_3DSTATE_DX9_CONSTANTB_VS OP_3D_MEDIA(0x3, 0x0, 0x3D) /* HSW+ */
237 #define OP_3DSTATE_DX9_CONSTANTB_PS OP_3D_MEDIA(0x3, 0x0, 0x3E) /* HSW+ */
238 #define OP_3DSTATE_DX9_LOCAL_VALID_VS OP_3D_MEDIA(0x3, 0x0, 0x3F) /* HSW+ */
239 #define OP_3DSTATE_DX9_LOCAL_VALID_PS OP_3D_MEDIA(0x3, 0x0, 0x40) /* HSW+ */
240 #define OP_3DSTATE_DX9_GENERATE_ACTIVE_VS OP_3D_MEDIA(0x3, 0x0, 0x41) /* HSW+ */
241 #define OP_3DSTATE_DX9_GENERATE_ACTIVE_PS OP_3D_MEDIA(0x3, 0x0, 0x42) /* HSW+ */
242 #define OP_3DSTATE_BINDING_TABLE_EDIT_VS OP_3D_MEDIA(0x3, 0x0, 0x43) /* HSW+ */
243 #define OP_3DSTATE_BINDING_TABLE_EDIT_GS OP_3D_MEDIA(0x3, 0x0, 0x44) /* HSW+ */
244 #define OP_3DSTATE_BINDING_TABLE_EDIT_HS OP_3D_MEDIA(0x3, 0x0, 0x45) /* HSW+ */
245 #define OP_3DSTATE_BINDING_TABLE_EDIT_DS OP_3D_MEDIA(0x3, 0x0, 0x46) /* HSW+ */
246 #define OP_3DSTATE_BINDING_TABLE_EDIT_PS OP_3D_MEDIA(0x3, 0x0, 0x47) /* HSW+ */
248 #define OP_3DSTATE_VF_INSTANCING OP_3D_MEDIA(0x3, 0x0, 0x49) /* BDW+ */
249 #define OP_3DSTATE_VF_SGVS OP_3D_MEDIA(0x3, 0x0, 0x4A) /* BDW+ */
250 #define OP_3DSTATE_VF_TOPOLOGY OP_3D_MEDIA(0x3, 0x0, 0x4B) /* BDW+ */
251 #define OP_3DSTATE_WM_CHROMAKEY OP_3D_MEDIA(0x3, 0x0, 0x4C) /* BDW+ */
252 #define OP_3DSTATE_PS_BLEND OP_3D_MEDIA(0x3, 0x0, 0x4D) /* BDW+ */
253 #define OP_3DSTATE_WM_DEPTH_STENCIL OP_3D_MEDIA(0x3, 0x0, 0x4E) /* BDW+ */
254 #define OP_3DSTATE_PS_EXTRA OP_3D_MEDIA(0x3, 0x0, 0x4F) /* BDW+ */
255 #define OP_3DSTATE_RASTER OP_3D_MEDIA(0x3, 0x0, 0x50) /* BDW+ */
256 #define OP_3DSTATE_SBE_SWIZ OP_3D_MEDIA(0x3, 0x0, 0x51) /* BDW+ */
257 #define OP_3DSTATE_WM_HZ_OP OP_3D_MEDIA(0x3, 0x0, 0x52) /* BDW+ */
258 #define OP_3DSTATE_COMPONENT_PACKING OP_3D_MEDIA(0x3, 0x0, 0x55) /* SKL+ */
260 #define OP_3DSTATE_DRAWING_RECTANGLE OP_3D_MEDIA(0x3, 0x1, 0x00)
261 #define OP_3DSTATE_SAMPLER_PALETTE_LOAD0 OP_3D_MEDIA(0x3, 0x1, 0x02)
262 #define OP_3DSTATE_CHROMA_KEY OP_3D_MEDIA(0x3, 0x1, 0x04)
263 #define OP_SNB_3DSTATE_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x05)
264 #define OP_3DSTATE_POLY_STIPPLE_OFFSET OP_3D_MEDIA(0x3, 0x1, 0x06)
265 #define OP_3DSTATE_POLY_STIPPLE_PATTERN OP_3D_MEDIA(0x3, 0x1, 0x07)
266 #define OP_3DSTATE_LINE_STIPPLE OP_3D_MEDIA(0x3, 0x1, 0x08)
267 #define OP_3DSTATE_AA_LINE_PARAMS OP_3D_MEDIA(0x3, 0x1, 0x0A)
268 #define OP_3DSTATE_GS_SVB_INDEX OP_3D_MEDIA(0x3, 0x1, 0x0B)
269 #define OP_3DSTATE_SAMPLER_PALETTE_LOAD1 OP_3D_MEDIA(0x3, 0x1, 0x0C)
270 #define OP_3DSTATE_MULTISAMPLE_BDW OP_3D_MEDIA(0x3, 0x0, 0x0D)
271 #define OP_SNB_3DSTATE_STENCIL_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x0E)
272 #define OP_SNB_3DSTATE_HIER_DEPTH_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x0F)
273 #define OP_SNB_3DSTATE_CLEAR_PARAMS OP_3D_MEDIA(0x3, 0x1, 0x10)
274 #define OP_3DSTATE_MONOFILTER_SIZE OP_3D_MEDIA(0x3, 0x1, 0x11)
275 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS OP_3D_MEDIA(0x3, 0x1, 0x12) /* IVB+ */
276 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS OP_3D_MEDIA(0x3, 0x1, 0x13) /* IVB+ */
277 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS OP_3D_MEDIA(0x3, 0x1, 0x14) /* IVB+ */
278 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS OP_3D_MEDIA(0x3, 0x1, 0x15) /* IVB+ */
279 #define OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS OP_3D_MEDIA(0x3, 0x1, 0x16) /* IVB+ */
280 #define OP_3DSTATE_SO_DECL_LIST OP_3D_MEDIA(0x3, 0x1, 0x17)
281 #define OP_3DSTATE_SO_BUFFER OP_3D_MEDIA(0x3, 0x1, 0x18)
282 #define OP_3DSTATE_BINDING_TABLE_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x19) /* HSW+ */
283 #define OP_3DSTATE_GATHER_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x1A) /* HSW+ */
284 #define OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC OP_3D_MEDIA(0x3, 0x1, 0x1B) /* HSW+ */
285 #define OP_3DSTATE_SAMPLE_PATTERN OP_3D_MEDIA(0x3, 0x1, 0x1C)
286 #define OP_PIPE_CONTROL OP_3D_MEDIA(0x3, 0x2, 0x00)
287 #define OP_3DPRIMITIVE OP_3D_MEDIA(0x3, 0x3, 0x00)
289 /* VCCP Command Parser */
292 * Below MFX and VBE cmd definition is from vaapi intel driver project (BSD License)
293 * git://anongit.freedesktop.org/vaapi/intel-driver
298 #define OP_MFX(pipeline, op, sub_opa, sub_opb) \
305 #define OP_MFX_PIPE_MODE_SELECT OP_MFX(2, 0, 0, 0) /* ALL */
306 #define OP_MFX_SURFACE_STATE OP_MFX(2, 0, 0, 1) /* ALL */
307 #define OP_MFX_PIPE_BUF_ADDR_STATE OP_MFX(2, 0, 0, 2) /* ALL */
308 #define OP_MFX_IND_OBJ_BASE_ADDR_STATE OP_MFX(2, 0, 0, 3) /* ALL */
309 #define OP_MFX_BSP_BUF_BASE_ADDR_STATE OP_MFX(2, 0, 0, 4) /* ALL */
310 #define OP_2_0_0_5 OP_MFX(2, 0, 0, 5) /* ALL */
311 #define OP_MFX_STATE_POINTER OP_MFX(2, 0, 0, 6) /* ALL */
312 #define OP_MFX_QM_STATE OP_MFX(2, 0, 0, 7) /* IVB+ */
313 #define OP_MFX_FQM_STATE OP_MFX(2, 0, 0, 8) /* IVB+ */
314 #define OP_MFX_PAK_INSERT_OBJECT OP_MFX(2, 0, 2, 8) /* IVB+ */
315 #define OP_MFX_STITCH_OBJECT OP_MFX(2, 0, 2, 0xA) /* IVB+ */
317 #define OP_MFD_IT_OBJECT OP_MFX(2, 0, 1, 9) /* ALL */
319 #define OP_MFX_WAIT OP_MFX(1, 0, 0, 0) /* IVB+ */
320 #define OP_MFX_AVC_IMG_STATE OP_MFX(2, 1, 0, 0) /* ALL */
321 #define OP_MFX_AVC_QM_STATE OP_MFX(2, 1, 0, 1) /* ALL */
322 #define OP_MFX_AVC_DIRECTMODE_STATE OP_MFX(2, 1, 0, 2) /* ALL */
323 #define OP_MFX_AVC_SLICE_STATE OP_MFX(2, 1, 0, 3) /* ALL */
324 #define OP_MFX_AVC_REF_IDX_STATE OP_MFX(2, 1, 0, 4) /* ALL */
325 #define OP_MFX_AVC_WEIGHTOFFSET_STATE OP_MFX(2, 1, 0, 5) /* ALL */
326 #define OP_MFD_AVC_PICID_STATE OP_MFX(2, 1, 1, 5) /* HSW+ */
327 #define OP_MFD_AVC_DPB_STATE OP_MFX(2, 1, 1, 6) /* IVB+ */
328 #define OP_MFD_AVC_SLICEADDR OP_MFX(2, 1, 1, 7) /* IVB+ */
329 #define OP_MFD_AVC_BSD_OBJECT OP_MFX(2, 1, 1, 8) /* ALL */
330 #define OP_MFC_AVC_PAK_OBJECT OP_MFX(2, 1, 2, 9) /* ALL */
332 #define OP_MFX_VC1_PRED_PIPE_STATE OP_MFX(2, 2, 0, 1) /* ALL */
333 #define OP_MFX_VC1_DIRECTMODE_STATE OP_MFX(2, 2, 0, 2) /* ALL */
334 #define OP_MFD_VC1_SHORT_PIC_STATE OP_MFX(2, 2, 1, 0) /* IVB+ */
335 #define OP_MFD_VC1_LONG_PIC_STATE OP_MFX(2, 2, 1, 1) /* IVB+ */
336 #define OP_MFD_VC1_BSD_OBJECT OP_MFX(2, 2, 1, 8) /* ALL */
338 #define OP_MFX_MPEG2_PIC_STATE OP_MFX(2, 3, 0, 0) /* ALL */
339 #define OP_MFX_MPEG2_QM_STATE OP_MFX(2, 3, 0, 1) /* ALL */
340 #define OP_MFD_MPEG2_BSD_OBJECT OP_MFX(2, 3, 1, 8) /* ALL */
341 #define OP_MFC_MPEG2_SLICEGROUP_STATE OP_MFX(2, 3, 2, 3) /* ALL */
342 #define OP_MFC_MPEG2_PAK_OBJECT OP_MFX(2, 3, 2, 9) /* ALL */
344 #define OP_MFX_2_6_0_0 OP_MFX(2, 6, 0, 0) /* IVB+ */
345 #define OP_MFX_2_6_0_8 OP_MFX(2, 6, 0, 8) /* IVB+ */
346 #define OP_MFX_2_6_0_9 OP_MFX(2, 6, 0, 9) /* IVB+ */
348 #define OP_MFX_JPEG_PIC_STATE OP_MFX(2, 7, 0, 0)
349 #define OP_MFX_JPEG_HUFF_TABLE_STATE OP_MFX(2, 7, 0, 2)
350 #define OP_MFD_JPEG_BSD_OBJECT OP_MFX(2, 7, 1, 8)
352 #define OP_VEB(pipeline, op, sub_opa, sub_opb) \
359 #define OP_VEB_SURFACE_STATE OP_VEB(2, 4, 0, 0)
360 #define OP_VEB_STATE OP_VEB(2, 4, 0, 2)
361 #define OP_VEB_DNDI_IECP_STATE OP_VEB(2, 4, 0, 3)
363 struct parser_exec_state
;
365 typedef int (*parser_cmd_handler
)(struct parser_exec_state
*s
);
367 #define GVT_CMD_HASH_BITS 7
369 /* which DWords need address fix */
370 #define ADDR_FIX_1(x1) (1 << (x1))
371 #define ADDR_FIX_2(x1, x2) (ADDR_FIX_1(x1) | ADDR_FIX_1(x2))
372 #define ADDR_FIX_3(x1, x2, x3) (ADDR_FIX_1(x1) | ADDR_FIX_2(x2, x3))
373 #define ADDR_FIX_4(x1, x2, x3, x4) (ADDR_FIX_1(x1) | ADDR_FIX_3(x2, x3, x4))
374 #define ADDR_FIX_5(x1, x2, x3, x4, x5) (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5))
380 #define F_LEN_MASK (1U<<0)
381 #define F_LEN_CONST 1U
385 * command has its own ip advance logic
386 * e.g. MI_BATCH_START, MI_BATCH_END
388 #define F_IP_ADVANCE_CUSTOM (1<<1)
390 #define F_POST_HANDLE (1<<2)
393 #define R_RCS (1 << RCS)
394 #define R_VCS1 (1 << VCS)
395 #define R_VCS2 (1 << VCS2)
396 #define R_VCS (R_VCS1 | R_VCS2)
397 #define R_BCS (1 << BCS)
398 #define R_VECS (1 << VECS)
399 #define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS)
400 /* rings that support this cmd: BLT/RCS/VCS/VECS */
403 /* devices that support this cmd: SNB/IVB/HSW/... */
406 /* which DWords are address that need fix up.
407 * bit 0 means a 32-bit non address operand in command
408 * bit 1 means address operand, which could be 32-bit
409 * or 64-bit depending on different architectures.(
410 * defined by "gmadr_bytes_in_cmd" in intel_gvt.
411 * No matter the address length, each address only takes
412 * one bit in the bitmap.
414 uint16_t addr_bitmap
;
416 /* flag == F_LEN_CONST : command length
417 * flag == F_LEN_VAR : length bias bits
418 * Note: length is in DWord
422 parser_cmd_handler handler
;
426 struct hlist_node hlist
;
427 struct cmd_info
*info
;
431 RING_BUFFER_INSTRUCTION
,
432 BATCH_BUFFER_INSTRUCTION
,
433 BATCH_BUFFER_2ND_LEVEL
,
441 struct parser_exec_state
{
442 struct intel_vgpu
*vgpu
;
447 /* batch buffer address type */
450 /* graphics memory address of ring buffer start */
451 unsigned long ring_start
;
452 unsigned long ring_size
;
453 unsigned long ring_head
;
454 unsigned long ring_tail
;
456 /* instruction graphics memory address */
457 unsigned long ip_gma
;
459 /* mapped va of the instr_gma */
464 /* next instruction when return from batch buffer to ring buffer */
465 unsigned long ret_ip_gma_ring
;
467 /* next instruction when return from 2nd batch buffer to batch buffer */
468 unsigned long ret_ip_gma_bb
;
470 /* batch buffer address type (GTT or PPGTT)
471 * used when ret from 2nd level batch buffer
473 int saved_buf_addr_type
;
475 struct cmd_info
*info
;
477 struct intel_vgpu_workload
*workload
;
480 #define gmadr_dw_number(s) \
481 (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
483 static unsigned long bypass_scan_mask
= 0;
485 /* ring ALL, type = 0 */
486 static struct sub_op_bits sub_op_mi
[] = {
491 static struct decode_info decode_info_mi
= {
494 ARRAY_SIZE(sub_op_mi
),
498 /* ring RCS, command type 2 */
499 static struct sub_op_bits sub_op_2d
[] = {
504 static struct decode_info decode_info_2d
= {
507 ARRAY_SIZE(sub_op_2d
),
511 /* ring RCS, command type 3 */
512 static struct sub_op_bits sub_op_3d_media
[] = {
519 static struct decode_info decode_info_3d_media
= {
522 ARRAY_SIZE(sub_op_3d_media
),
526 /* ring VCS, command type 3 */
527 static struct sub_op_bits sub_op_mfx_vc
[] = {
535 static struct decode_info decode_info_mfx_vc
= {
538 ARRAY_SIZE(sub_op_mfx_vc
),
542 /* ring VECS, command type 3 */
543 static struct sub_op_bits sub_op_vebox
[] = {
551 static struct decode_info decode_info_vebox
= {
554 ARRAY_SIZE(sub_op_vebox
),
558 static struct decode_info
*ring_decode_info
[I915_NUM_ENGINES
][8] = {
563 &decode_info_3d_media
,
615 static inline u32
get_opcode(u32 cmd
, int ring_id
)
617 struct decode_info
*d_info
;
619 if (ring_id
>= I915_NUM_ENGINES
)
622 d_info
= ring_decode_info
[ring_id
][CMD_TYPE(cmd
)];
626 return cmd
>> (32 - d_info
->op_len
);
629 static inline struct cmd_info
*find_cmd_entry(struct intel_gvt
*gvt
,
630 unsigned int opcode
, int ring_id
)
634 hash_for_each_possible(gvt
->cmd_table
, e
, hlist
, opcode
) {
635 if ((opcode
== e
->info
->opcode
) &&
636 (e
->info
->rings
& (1 << ring_id
)))
642 static inline struct cmd_info
*get_cmd_info(struct intel_gvt
*gvt
,
643 u32 cmd
, int ring_id
)
647 opcode
= get_opcode(cmd
, ring_id
);
648 if (opcode
== INVALID_OP
)
651 return find_cmd_entry(gvt
, opcode
, ring_id
);
654 static inline u32
sub_op_val(u32 cmd
, u32 hi
, u32 low
)
656 return (cmd
>> low
) & ((1U << (hi
- low
+ 1)) - 1);
659 static inline void print_opcode(u32 cmd
, int ring_id
)
661 struct decode_info
*d_info
;
664 if (ring_id
>= I915_NUM_ENGINES
)
667 d_info
= ring_decode_info
[ring_id
][CMD_TYPE(cmd
)];
671 gvt_err("opcode=0x%x %s sub_ops:",
672 cmd
>> (32 - d_info
->op_len
), d_info
->name
);
674 for (i
= 0; i
< d_info
->nr_sub_op
; i
++)
675 pr_err("0x%x ", sub_op_val(cmd
, d_info
->sub_op
[i
].hi
,
676 d_info
->sub_op
[i
].low
));
681 static inline u32
*cmd_ptr(struct parser_exec_state
*s
, int index
)
683 return s
->ip_va
+ (index
<< 2);
686 static inline u32
cmd_val(struct parser_exec_state
*s
, int index
)
688 return *cmd_ptr(s
, index
);
691 static void parser_exec_state_dump(struct parser_exec_state
*s
)
696 gvt_err(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
697 " ring_head(%08lx) ring_tail(%08lx)\n", s
->vgpu
->id
,
698 s
->ring_id
, s
->ring_start
, s
->ring_start
+ s
->ring_size
,
699 s
->ring_head
, s
->ring_tail
);
701 gvt_err(" %s %s ip_gma(%08lx) ",
702 s
->buf_type
== RING_BUFFER_INSTRUCTION
?
703 "RING_BUFFER" : "BATCH_BUFFER",
704 s
->buf_addr_type
== GTT_BUFFER
?
705 "GTT" : "PPGTT", s
->ip_gma
);
707 if (s
->ip_va
== NULL
) {
708 gvt_err(" ip_va(NULL)");
712 gvt_err(" ip_va=%p: %08x %08x %08x %08x\n",
713 s
->ip_va
, cmd_val(s
, 0), cmd_val(s
, 1),
714 cmd_val(s
, 2), cmd_val(s
, 3));
716 print_opcode(cmd_val(s
, 0), s
->ring_id
);
718 /* print the whole page to trace */
719 pr_err(" ip_va=%p: %08x %08x %08x %08x\n",
720 s
->ip_va
, cmd_val(s
, 0), cmd_val(s
, 1),
721 cmd_val(s
, 2), cmd_val(s
, 3));
723 s
->ip_va
= (u32
*)((((u64
)s
->ip_va
) >> 12) << 12);
726 pr_err("ip_va=%p: ", s
->ip_va
);
727 for (i
= 0; i
< 8; i
++)
728 pr_err("%08x ", cmd_val(s
, i
));
731 s
->ip_va
+= 8 * sizeof(u32
);
736 static inline void update_ip_va(struct parser_exec_state
*s
)
738 unsigned long len
= 0;
740 if (WARN_ON(s
->ring_head
== s
->ring_tail
))
743 if (s
->buf_type
== RING_BUFFER_INSTRUCTION
) {
744 unsigned long ring_top
= s
->ring_start
+ s
->ring_size
;
746 if (s
->ring_head
> s
->ring_tail
) {
747 if (s
->ip_gma
>= s
->ring_head
&& s
->ip_gma
< ring_top
)
748 len
= (s
->ip_gma
- s
->ring_head
);
749 else if (s
->ip_gma
>= s
->ring_start
&&
750 s
->ip_gma
<= s
->ring_tail
)
751 len
= (ring_top
- s
->ring_head
) +
752 (s
->ip_gma
- s
->ring_start
);
754 len
= (s
->ip_gma
- s
->ring_head
);
756 s
->ip_va
= s
->rb_va
+ len
;
757 } else {/* shadow batch buffer */
758 s
->ip_va
= s
->ret_bb_va
;
762 static inline int ip_gma_set(struct parser_exec_state
*s
,
763 unsigned long ip_gma
)
765 WARN_ON(!IS_ALIGNED(ip_gma
, 4));
772 static inline int ip_gma_advance(struct parser_exec_state
*s
,
775 s
->ip_gma
+= (dw_len
<< 2);
777 if (s
->buf_type
== RING_BUFFER_INSTRUCTION
) {
778 if (s
->ip_gma
>= s
->ring_start
+ s
->ring_size
)
779 s
->ip_gma
-= s
->ring_size
;
782 s
->ip_va
+= (dw_len
<< 2);
788 static inline int get_cmd_length(struct cmd_info
*info
, u32 cmd
)
790 if ((info
->flag
& F_LEN_MASK
) == F_LEN_CONST
)
793 return (cmd
& ((1U << info
->len
) - 1)) + 2;
797 static inline int cmd_length(struct parser_exec_state
*s
)
799 return get_cmd_length(s
->info
, cmd_val(s
, 0));
802 /* do not remove this, some platform may need clflush here */
803 #define patch_value(s, addr, val) do { \
807 static bool is_shadowed_mmio(unsigned int offset
)
811 if ((offset
== 0x2168) || /*BB current head register UDW */
812 (offset
== 0x2140) || /*BB current header register */
813 (offset
== 0x211c) || /*second BB header register UDW */
814 (offset
== 0x2114)) { /*second BB header register UDW */
820 static int cmd_reg_handler(struct parser_exec_state
*s
,
821 unsigned int offset
, unsigned int index
, char *cmd
)
823 struct intel_vgpu
*vgpu
= s
->vgpu
;
824 struct intel_gvt
*gvt
= vgpu
->gvt
;
826 if (offset
+ 4 > gvt
->device_info
.mmio_size
) {
827 gvt_err("%s access to (%x) outside of MMIO range\n",
832 if (!intel_gvt_mmio_is_cmd_access(gvt
, offset
)) {
833 gvt_err("vgpu%d: %s access to non-render register (%x)\n",
834 s
->vgpu
->id
, cmd
, offset
);
838 if (is_shadowed_mmio(offset
)) {
839 gvt_err("vgpu%d: found access of shadowed MMIO %x\n",
840 s
->vgpu
->id
, offset
);
844 if (offset
== i915_mmio_reg_offset(DERRMR
) ||
845 offset
== i915_mmio_reg_offset(FORCEWAKE_MT
)) {
846 /* Writing to HW VGT_PVINFO_PAGE offset will be discarded */
847 patch_value(s
, cmd_ptr(s
, index
), VGT_PVINFO_PAGE
);
850 /* TODO: Update the global mask if this MMIO is a masked-MMIO */
851 intel_gvt_mmio_set_cmd_accessed(gvt
, offset
);
855 #define cmd_reg(s, i) \
856 (cmd_val(s, i) & GENMASK(22, 2))
858 #define cmd_reg_inhibit(s, i) \
859 (cmd_val(s, i) & GENMASK(22, 18))
861 #define cmd_gma(s, i) \
862 (cmd_val(s, i) & GENMASK(31, 2))
864 #define cmd_gma_hi(s, i) \
865 (cmd_val(s, i) & GENMASK(15, 0))
867 static int cmd_handler_lri(struct parser_exec_state
*s
)
870 int cmd_len
= cmd_length(s
);
871 struct intel_gvt
*gvt
= s
->vgpu
->gvt
;
873 for (i
= 1; i
< cmd_len
; i
+= 2) {
874 if (IS_BROADWELL(gvt
->dev_priv
) &&
875 (s
->ring_id
!= RCS
)) {
876 if (s
->ring_id
== BCS
&&
878 i915_mmio_reg_offset(DERRMR
))
881 ret
|= (cmd_reg_inhibit(s
, i
)) ? -EINVAL
: 0;
885 ret
|= cmd_reg_handler(s
, cmd_reg(s
, i
), i
, "lri");
890 static int cmd_handler_lrr(struct parser_exec_state
*s
)
893 int cmd_len
= cmd_length(s
);
895 for (i
= 1; i
< cmd_len
; i
+= 2) {
896 if (IS_BROADWELL(s
->vgpu
->gvt
->dev_priv
))
897 ret
|= ((cmd_reg_inhibit(s
, i
) ||
898 (cmd_reg_inhibit(s
, i
+ 1)))) ?
902 ret
|= cmd_reg_handler(s
, cmd_reg(s
, i
), i
, "lrr-src");
903 ret
|= cmd_reg_handler(s
, cmd_reg(s
, i
+ 1), i
, "lrr-dst");
908 static inline int cmd_address_audit(struct parser_exec_state
*s
,
909 unsigned long guest_gma
, int op_size
, bool index_mode
);
911 static int cmd_handler_lrm(struct parser_exec_state
*s
)
913 struct intel_gvt
*gvt
= s
->vgpu
->gvt
;
914 int gmadr_bytes
= gvt
->device_info
.gmadr_bytes_in_cmd
;
917 int cmd_len
= cmd_length(s
);
919 for (i
= 1; i
< cmd_len
;) {
920 if (IS_BROADWELL(gvt
->dev_priv
))
921 ret
|= (cmd_reg_inhibit(s
, i
)) ? -EINVAL
: 0;
924 ret
|= cmd_reg_handler(s
, cmd_reg(s
, i
), i
, "lrm");
925 if (cmd_val(s
, 0) & (1 << 22)) {
926 gma
= cmd_gma(s
, i
+ 1);
927 if (gmadr_bytes
== 8)
928 gma
|= (cmd_gma_hi(s
, i
+ 2)) << 32;
929 ret
|= cmd_address_audit(s
, gma
, sizeof(u32
), false);
931 i
+= gmadr_dw_number(s
) + 1;
936 static int cmd_handler_srm(struct parser_exec_state
*s
)
938 int gmadr_bytes
= s
->vgpu
->gvt
->device_info
.gmadr_bytes_in_cmd
;
941 int cmd_len
= cmd_length(s
);
943 for (i
= 1; i
< cmd_len
;) {
944 ret
|= cmd_reg_handler(s
, cmd_reg(s
, i
), i
, "srm");
945 if (cmd_val(s
, 0) & (1 << 22)) {
946 gma
= cmd_gma(s
, i
+ 1);
947 if (gmadr_bytes
== 8)
948 gma
|= (cmd_gma_hi(s
, i
+ 2)) << 32;
949 ret
|= cmd_address_audit(s
, gma
, sizeof(u32
), false);
951 i
+= gmadr_dw_number(s
) + 1;
956 struct cmd_interrupt_event
{
957 int pipe_control_notify
;
959 int mi_user_interrupt
;
962 static struct cmd_interrupt_event cmd_interrupt_events
[] = {
964 .pipe_control_notify
= RCS_PIPE_CONTROL
,
965 .mi_flush_dw
= INTEL_GVT_EVENT_RESERVED
,
966 .mi_user_interrupt
= RCS_MI_USER_INTERRUPT
,
969 .pipe_control_notify
= INTEL_GVT_EVENT_RESERVED
,
970 .mi_flush_dw
= BCS_MI_FLUSH_DW
,
971 .mi_user_interrupt
= BCS_MI_USER_INTERRUPT
,
974 .pipe_control_notify
= INTEL_GVT_EVENT_RESERVED
,
975 .mi_flush_dw
= VCS_MI_FLUSH_DW
,
976 .mi_user_interrupt
= VCS_MI_USER_INTERRUPT
,
979 .pipe_control_notify
= INTEL_GVT_EVENT_RESERVED
,
980 .mi_flush_dw
= VCS2_MI_FLUSH_DW
,
981 .mi_user_interrupt
= VCS2_MI_USER_INTERRUPT
,
984 .pipe_control_notify
= INTEL_GVT_EVENT_RESERVED
,
985 .mi_flush_dw
= VECS_MI_FLUSH_DW
,
986 .mi_user_interrupt
= VECS_MI_USER_INTERRUPT
,
990 static int cmd_handler_pipe_control(struct parser_exec_state
*s
)
992 int gmadr_bytes
= s
->vgpu
->gvt
->device_info
.gmadr_bytes_in_cmd
;
994 bool index_mode
= false;
995 unsigned int post_sync
;
998 post_sync
= (cmd_val(s
, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK
) >> 14;
1001 if (cmd_val(s
, 1) & PIPE_CONTROL_MMIO_WRITE
)
1002 ret
= cmd_reg_handler(s
, cmd_reg(s
, 2), 1, "pipe_ctrl");
1004 else if (post_sync
) {
1006 ret
= cmd_reg_handler(s
, 0x2350, 1, "pipe_ctrl");
1007 else if (post_sync
== 3)
1008 ret
= cmd_reg_handler(s
, 0x2358, 1, "pipe_ctrl");
1009 else if (post_sync
== 1) {
1011 if ((cmd_val(s
, 2) & (1 << 2))) {
1012 gma
= cmd_val(s
, 2) & GENMASK(31, 3);
1013 if (gmadr_bytes
== 8)
1014 gma
|= (cmd_gma_hi(s
, 3)) << 32;
1015 /* Store Data Index */
1016 if (cmd_val(s
, 1) & (1 << 21))
1018 ret
|= cmd_address_audit(s
, gma
, sizeof(u64
),
1027 if (cmd_val(s
, 1) & PIPE_CONTROL_NOTIFY
)
1028 set_bit(cmd_interrupt_events
[s
->ring_id
].pipe_control_notify
,
1029 s
->workload
->pending_events
);
1033 static int cmd_handler_mi_user_interrupt(struct parser_exec_state
*s
)
1035 set_bit(cmd_interrupt_events
[s
->ring_id
].mi_user_interrupt
,
1036 s
->workload
->pending_events
);
1040 static int cmd_advance_default(struct parser_exec_state
*s
)
1042 return ip_gma_advance(s
, cmd_length(s
));
1045 static int cmd_handler_mi_batch_buffer_end(struct parser_exec_state
*s
)
1049 if (s
->buf_type
== BATCH_BUFFER_2ND_LEVEL
) {
1050 s
->buf_type
= BATCH_BUFFER_INSTRUCTION
;
1051 ret
= ip_gma_set(s
, s
->ret_ip_gma_bb
);
1052 s
->buf_addr_type
= s
->saved_buf_addr_type
;
1054 s
->buf_type
= RING_BUFFER_INSTRUCTION
;
1055 s
->buf_addr_type
= GTT_BUFFER
;
1056 if (s
->ret_ip_gma_ring
>= s
->ring_start
+ s
->ring_size
)
1057 s
->ret_ip_gma_ring
-= s
->ring_size
;
1058 ret
= ip_gma_set(s
, s
->ret_ip_gma_ring
);
1063 struct mi_display_flip_command_info
{
1067 i915_reg_t stride_reg
;
1068 i915_reg_t ctrl_reg
;
1069 i915_reg_t surf_reg
;
1076 struct plane_code_mapping
{
1082 static int gen8_decode_mi_display_flip(struct parser_exec_state
*s
,
1083 struct mi_display_flip_command_info
*info
)
1085 struct drm_i915_private
*dev_priv
= s
->vgpu
->gvt
->dev_priv
;
1086 struct plane_code_mapping gen8_plane_code
[] = {
1087 [0] = {PIPE_A
, PLANE_A
, PRIMARY_A_FLIP_DONE
},
1088 [1] = {PIPE_B
, PLANE_A
, PRIMARY_B_FLIP_DONE
},
1089 [2] = {PIPE_A
, PLANE_B
, SPRITE_A_FLIP_DONE
},
1090 [3] = {PIPE_B
, PLANE_B
, SPRITE_B_FLIP_DONE
},
1091 [4] = {PIPE_C
, PLANE_A
, PRIMARY_C_FLIP_DONE
},
1092 [5] = {PIPE_C
, PLANE_B
, SPRITE_C_FLIP_DONE
},
1094 u32 dword0
, dword1
, dword2
;
1097 dword0
= cmd_val(s
, 0);
1098 dword1
= cmd_val(s
, 1);
1099 dword2
= cmd_val(s
, 2);
1101 v
= (dword0
& GENMASK(21, 19)) >> 19;
1102 if (WARN_ON(v
>= ARRAY_SIZE(gen8_plane_code
)))
1105 info
->pipe
= gen8_plane_code
[v
].pipe
;
1106 info
->plane
= gen8_plane_code
[v
].plane
;
1107 info
->event
= gen8_plane_code
[v
].event
;
1108 info
->stride_val
= (dword1
& GENMASK(15, 6)) >> 6;
1109 info
->tile_val
= (dword1
& 0x1);
1110 info
->surf_val
= (dword2
& GENMASK(31, 12)) >> 12;
1111 info
->async_flip
= ((dword2
& GENMASK(1, 0)) == 0x1);
1113 if (info
->plane
== PLANE_A
) {
1114 info
->ctrl_reg
= DSPCNTR(info
->pipe
);
1115 info
->stride_reg
= DSPSTRIDE(info
->pipe
);
1116 info
->surf_reg
= DSPSURF(info
->pipe
);
1117 } else if (info
->plane
== PLANE_B
) {
1118 info
->ctrl_reg
= SPRCTL(info
->pipe
);
1119 info
->stride_reg
= SPRSTRIDE(info
->pipe
);
1120 info
->surf_reg
= SPRSURF(info
->pipe
);
1128 static int skl_decode_mi_display_flip(struct parser_exec_state
*s
,
1129 struct mi_display_flip_command_info
*info
)
1131 struct drm_i915_private
*dev_priv
= s
->vgpu
->gvt
->dev_priv
;
1132 u32 dword0
= cmd_val(s
, 0);
1133 u32 dword1
= cmd_val(s
, 1);
1134 u32 dword2
= cmd_val(s
, 2);
1135 u32 plane
= (dword0
& GENMASK(12, 8)) >> 8;
1137 info
->plane
= PRIMARY_PLANE
;
1140 case MI_DISPLAY_FLIP_SKL_PLANE_1_A
:
1141 info
->pipe
= PIPE_A
;
1142 info
->event
= PRIMARY_A_FLIP_DONE
;
1144 case MI_DISPLAY_FLIP_SKL_PLANE_1_B
:
1145 info
->pipe
= PIPE_B
;
1146 info
->event
= PRIMARY_B_FLIP_DONE
;
1148 case MI_DISPLAY_FLIP_SKL_PLANE_1_C
:
1149 info
->pipe
= PIPE_C
;
1150 info
->event
= PRIMARY_C_FLIP_DONE
;
1153 case MI_DISPLAY_FLIP_SKL_PLANE_2_A
:
1154 info
->pipe
= PIPE_A
;
1155 info
->event
= SPRITE_A_FLIP_DONE
;
1156 info
->plane
= SPRITE_PLANE
;
1158 case MI_DISPLAY_FLIP_SKL_PLANE_2_B
:
1159 info
->pipe
= PIPE_B
;
1160 info
->event
= SPRITE_B_FLIP_DONE
;
1161 info
->plane
= SPRITE_PLANE
;
1163 case MI_DISPLAY_FLIP_SKL_PLANE_2_C
:
1164 info
->pipe
= PIPE_C
;
1165 info
->event
= SPRITE_C_FLIP_DONE
;
1166 info
->plane
= SPRITE_PLANE
;
1170 gvt_err("unknown plane code %d\n", plane
);
1174 info
->stride_val
= (dword1
& GENMASK(15, 6)) >> 6;
1175 info
->tile_val
= (dword1
& GENMASK(2, 0));
1176 info
->surf_val
= (dword2
& GENMASK(31, 12)) >> 12;
1177 info
->async_flip
= ((dword2
& GENMASK(1, 0)) == 0x1);
1179 info
->ctrl_reg
= DSPCNTR(info
->pipe
);
1180 info
->stride_reg
= DSPSTRIDE(info
->pipe
);
1181 info
->surf_reg
= DSPSURF(info
->pipe
);
1186 static int gen8_check_mi_display_flip(struct parser_exec_state
*s
,
1187 struct mi_display_flip_command_info
*info
)
1189 struct drm_i915_private
*dev_priv
= s
->vgpu
->gvt
->dev_priv
;
1192 if (!info
->async_flip
)
1195 if (IS_SKYLAKE(dev_priv
)) {
1196 stride
= vgpu_vreg(s
->vgpu
, info
->stride_reg
) & GENMASK(9, 0);
1197 tile
= (vgpu_vreg(s
->vgpu
, info
->ctrl_reg
) &
1198 GENMASK(12, 10)) >> 10;
1200 stride
= (vgpu_vreg(s
->vgpu
, info
->stride_reg
) &
1201 GENMASK(15, 6)) >> 6;
1202 tile
= (vgpu_vreg(s
->vgpu
, info
->ctrl_reg
) & (1 << 10)) >> 10;
1205 if (stride
!= info
->stride_val
)
1206 gvt_dbg_cmd("cannot change stride during async flip\n");
1208 if (tile
!= info
->tile_val
)
1209 gvt_dbg_cmd("cannot change tile during async flip\n");
1214 static int gen8_update_plane_mmio_from_mi_display_flip(
1215 struct parser_exec_state
*s
,
1216 struct mi_display_flip_command_info
*info
)
1218 struct drm_i915_private
*dev_priv
= s
->vgpu
->gvt
->dev_priv
;
1219 struct intel_vgpu
*vgpu
= s
->vgpu
;
1221 set_mask_bits(&vgpu_vreg(vgpu
, info
->surf_reg
), GENMASK(31, 12),
1222 info
->surf_val
<< 12);
1223 if (IS_SKYLAKE(dev_priv
)) {
1224 set_mask_bits(&vgpu_vreg(vgpu
, info
->stride_reg
), GENMASK(9, 0),
1226 set_mask_bits(&vgpu_vreg(vgpu
, info
->ctrl_reg
), GENMASK(12, 10),
1227 info
->tile_val
<< 10);
1229 set_mask_bits(&vgpu_vreg(vgpu
, info
->stride_reg
), GENMASK(15, 6),
1230 info
->stride_val
<< 6);
1231 set_mask_bits(&vgpu_vreg(vgpu
, info
->ctrl_reg
), GENMASK(10, 10),
1232 info
->tile_val
<< 10);
1235 vgpu_vreg(vgpu
, PIPE_FRMCOUNT_G4X(info
->pipe
))++;
1236 intel_vgpu_trigger_virtual_event(vgpu
, info
->event
);
1240 static int decode_mi_display_flip(struct parser_exec_state
*s
,
1241 struct mi_display_flip_command_info
*info
)
1243 struct drm_i915_private
*dev_priv
= s
->vgpu
->gvt
->dev_priv
;
1245 if (IS_BROADWELL(dev_priv
))
1246 return gen8_decode_mi_display_flip(s
, info
);
1247 if (IS_SKYLAKE(dev_priv
))
1248 return skl_decode_mi_display_flip(s
, info
);
1253 static int check_mi_display_flip(struct parser_exec_state
*s
,
1254 struct mi_display_flip_command_info
*info
)
1256 struct drm_i915_private
*dev_priv
= s
->vgpu
->gvt
->dev_priv
;
1258 if (IS_BROADWELL(dev_priv
) || IS_SKYLAKE(dev_priv
))
1259 return gen8_check_mi_display_flip(s
, info
);
1263 static int update_plane_mmio_from_mi_display_flip(
1264 struct parser_exec_state
*s
,
1265 struct mi_display_flip_command_info
*info
)
1267 struct drm_i915_private
*dev_priv
= s
->vgpu
->gvt
->dev_priv
;
1269 if (IS_BROADWELL(dev_priv
) || IS_SKYLAKE(dev_priv
))
1270 return gen8_update_plane_mmio_from_mi_display_flip(s
, info
);
1274 static int cmd_handler_mi_display_flip(struct parser_exec_state
*s
)
1276 struct mi_display_flip_command_info info
;
1279 int len
= cmd_length(s
);
1281 ret
= decode_mi_display_flip(s
, &info
);
1283 gvt_err("fail to decode MI display flip command\n");
1287 ret
= check_mi_display_flip(s
, &info
);
1289 gvt_err("invalid MI display flip command\n");
1293 ret
= update_plane_mmio_from_mi_display_flip(s
, &info
);
1295 gvt_err("fail to update plane mmio\n");
1299 for (i
= 0; i
< len
; i
++)
1300 patch_value(s
, cmd_ptr(s
, i
), MI_NOOP
);
1304 static bool is_wait_for_flip_pending(u32 cmd
)
1306 return cmd
& (MI_WAIT_FOR_PLANE_A_FLIP_PENDING
|
1307 MI_WAIT_FOR_PLANE_B_FLIP_PENDING
|
1308 MI_WAIT_FOR_PLANE_C_FLIP_PENDING
|
1309 MI_WAIT_FOR_SPRITE_A_FLIP_PENDING
|
1310 MI_WAIT_FOR_SPRITE_B_FLIP_PENDING
|
1311 MI_WAIT_FOR_SPRITE_C_FLIP_PENDING
);
1314 static int cmd_handler_mi_wait_for_event(struct parser_exec_state
*s
)
1316 u32 cmd
= cmd_val(s
, 0);
1318 if (!is_wait_for_flip_pending(cmd
))
1321 patch_value(s
, cmd_ptr(s
, 0), MI_NOOP
);
1325 static unsigned long get_gma_bb_from_cmd(struct parser_exec_state
*s
, int index
)
1328 unsigned long gma_high
, gma_low
;
1329 int gmadr_bytes
= s
->vgpu
->gvt
->device_info
.gmadr_bytes_in_cmd
;
1331 if (WARN_ON(gmadr_bytes
!= 4 && gmadr_bytes
!= 8))
1332 return INTEL_GVT_INVALID_ADDR
;
1334 gma_low
= cmd_val(s
, index
) & BATCH_BUFFER_ADDR_MASK
;
1335 if (gmadr_bytes
== 4) {
1338 gma_high
= cmd_val(s
, index
+ 1) & BATCH_BUFFER_ADDR_HIGH_MASK
;
1339 addr
= (((unsigned long)gma_high
) << 32) | gma_low
;
1344 static inline int cmd_address_audit(struct parser_exec_state
*s
,
1345 unsigned long guest_gma
, int op_size
, bool index_mode
)
1347 struct intel_vgpu
*vgpu
= s
->vgpu
;
1348 u32 max_surface_size
= vgpu
->gvt
->device_info
.max_surface_size
;
1352 if (op_size
> max_surface_size
) {
1353 gvt_err("command address audit fail name %s\n", s
->info
->name
);
1358 if (guest_gma
>= GTT_PAGE_SIZE
/ sizeof(u64
)) {
1362 } else if ((!vgpu_gmadr_is_valid(s
->vgpu
, guest_gma
)) ||
1363 (!vgpu_gmadr_is_valid(s
->vgpu
,
1364 guest_gma
+ op_size
- 1))) {
1370 gvt_err("cmd_parser: Malicious %s detected, addr=0x%lx, len=%d!\n",
1371 s
->info
->name
, guest_gma
, op_size
);
1373 pr_err("cmd dump: ");
1374 for (i
= 0; i
< cmd_length(s
); i
++) {
1376 pr_err("\n%08x ", cmd_val(s
, i
));
1378 pr_err("%08x ", cmd_val(s
, i
));
1380 pr_err("\nvgpu%d: aperture 0x%llx - 0x%llx, hidden 0x%llx - 0x%llx\n",
1382 vgpu_aperture_gmadr_base(vgpu
),
1383 vgpu_aperture_gmadr_end(vgpu
),
1384 vgpu_hidden_gmadr_base(vgpu
),
1385 vgpu_hidden_gmadr_end(vgpu
));
1389 static int cmd_handler_mi_store_data_imm(struct parser_exec_state
*s
)
1391 int gmadr_bytes
= s
->vgpu
->gvt
->device_info
.gmadr_bytes_in_cmd
;
1392 int op_size
= (cmd_length(s
) - 3) * sizeof(u32
);
1393 int core_id
= (cmd_val(s
, 2) & (1 << 0)) ? 1 : 0;
1394 unsigned long gma
, gma_low
, gma_high
;
1398 if (!(cmd_val(s
, 0) & (1 << 22)))
1401 gma
= cmd_val(s
, 2) & GENMASK(31, 2);
1403 if (gmadr_bytes
== 8) {
1404 gma_low
= cmd_val(s
, 1) & GENMASK(31, 2);
1405 gma_high
= cmd_val(s
, 2) & GENMASK(15, 0);
1406 gma
= (gma_high
<< 32) | gma_low
;
1407 core_id
= (cmd_val(s
, 1) & (1 << 0)) ? 1 : 0;
1409 ret
= cmd_address_audit(s
, gma
+ op_size
* core_id
, op_size
, false);
1413 static inline int unexpected_cmd(struct parser_exec_state
*s
)
1415 gvt_err("vgpu%d: Unexpected %s in command buffer!\n",
1416 s
->vgpu
->id
, s
->info
->name
);
1420 static int cmd_handler_mi_semaphore_wait(struct parser_exec_state
*s
)
1422 return unexpected_cmd(s
);
1425 static int cmd_handler_mi_report_perf_count(struct parser_exec_state
*s
)
1427 return unexpected_cmd(s
);
1430 static int cmd_handler_mi_op_2e(struct parser_exec_state
*s
)
1432 return unexpected_cmd(s
);
1435 static int cmd_handler_mi_op_2f(struct parser_exec_state
*s
)
1437 int gmadr_bytes
= s
->vgpu
->gvt
->device_info
.gmadr_bytes_in_cmd
;
1438 int op_size
= (1 << ((cmd_val(s
, 0) & GENMASK(20, 19)) >> 19)) *
1440 unsigned long gma
, gma_high
;
1443 if (!(cmd_val(s
, 0) & (1 << 22)))
1446 gma
= cmd_val(s
, 1) & GENMASK(31, 2);
1447 if (gmadr_bytes
== 8) {
1448 gma_high
= cmd_val(s
, 2) & GENMASK(15, 0);
1449 gma
= (gma_high
<< 32) | gma
;
1451 ret
= cmd_address_audit(s
, gma
, op_size
, false);
1455 static int cmd_handler_mi_store_data_index(struct parser_exec_state
*s
)
1457 return unexpected_cmd(s
);
1460 static int cmd_handler_mi_clflush(struct parser_exec_state
*s
)
1462 return unexpected_cmd(s
);
1465 static int cmd_handler_mi_conditional_batch_buffer_end(
1466 struct parser_exec_state
*s
)
1468 return unexpected_cmd(s
);
1471 static int cmd_handler_mi_update_gtt(struct parser_exec_state
*s
)
1473 return unexpected_cmd(s
);
1476 static int cmd_handler_mi_flush_dw(struct parser_exec_state
*s
)
1478 int gmadr_bytes
= s
->vgpu
->gvt
->device_info
.gmadr_bytes_in_cmd
;
1480 bool index_mode
= false;
1483 /* Check post-sync and ppgtt bit */
1484 if (((cmd_val(s
, 0) >> 14) & 0x3) && (cmd_val(s
, 1) & (1 << 2))) {
1485 gma
= cmd_val(s
, 1) & GENMASK(31, 3);
1486 if (gmadr_bytes
== 8)
1487 gma
|= (cmd_val(s
, 2) & GENMASK(15, 0)) << 32;
1488 /* Store Data Index */
1489 if (cmd_val(s
, 0) & (1 << 21))
1491 ret
= cmd_address_audit(s
, gma
, sizeof(u64
), index_mode
);
1493 /* Check notify bit */
1494 if ((cmd_val(s
, 0) & (1 << 8)))
1495 set_bit(cmd_interrupt_events
[s
->ring_id
].mi_flush_dw
,
1496 s
->workload
->pending_events
);
1500 static void addr_type_update_snb(struct parser_exec_state
*s
)
1502 if ((s
->buf_type
== RING_BUFFER_INSTRUCTION
) &&
1503 (BATCH_BUFFER_ADR_SPACE_BIT(cmd_val(s
, 0)) == 1)) {
1504 s
->buf_addr_type
= PPGTT_BUFFER
;
1509 static int copy_gma_to_hva(struct intel_vgpu
*vgpu
, struct intel_vgpu_mm
*mm
,
1510 unsigned long gma
, unsigned long end_gma
, void *va
)
1512 unsigned long copy_len
, offset
;
1513 unsigned long len
= 0;
1516 while (gma
!= end_gma
) {
1517 gpa
= intel_vgpu_gma_to_gpa(mm
, gma
);
1518 if (gpa
== INTEL_GVT_INVALID_ADDR
) {
1519 gvt_err("invalid gma address: %lx\n", gma
);
1523 offset
= gma
& (GTT_PAGE_SIZE
- 1);
1525 copy_len
= (end_gma
- gma
) >= (GTT_PAGE_SIZE
- offset
) ?
1526 GTT_PAGE_SIZE
- offset
: end_gma
- gma
;
1528 intel_gvt_hypervisor_read_gpa(vgpu
, gpa
, va
+ len
, copy_len
);
1538 * Check whether a batch buffer needs to be scanned. Currently
1539 * the only criteria is based on privilege.
1541 static int batch_buffer_needs_scan(struct parser_exec_state
*s
)
1543 struct intel_gvt
*gvt
= s
->vgpu
->gvt
;
1545 if (IS_BROADWELL(gvt
->dev_priv
) || IS_SKYLAKE(gvt
->dev_priv
)) {
1546 /* BDW decides privilege based on address space */
1547 if (cmd_val(s
, 0) & (1 << 8))
1553 static uint32_t find_bb_size(struct parser_exec_state
*s
)
1555 unsigned long gma
= 0;
1556 struct cmd_info
*info
;
1557 uint32_t bb_size
= 0;
1558 uint32_t cmd_len
= 0;
1559 bool met_bb_end
= false;
1562 /* get the start gm address of the batch buffer */
1563 gma
= get_gma_bb_from_cmd(s
, 1);
1564 cmd
= cmd_val(s
, 0);
1566 info
= get_cmd_info(s
->vgpu
->gvt
, cmd
, s
->ring_id
);
1568 gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
1569 cmd
, get_opcode(cmd
, s
->ring_id
));
1573 copy_gma_to_hva(s
->vgpu
, s
->vgpu
->gtt
.ggtt_mm
,
1574 gma
, gma
+ 4, &cmd
);
1575 info
= get_cmd_info(s
->vgpu
->gvt
, cmd
, s
->ring_id
);
1577 gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
1578 cmd
, get_opcode(cmd
, s
->ring_id
));
1582 if (info
->opcode
== OP_MI_BATCH_BUFFER_END
) {
1584 } else if (info
->opcode
== OP_MI_BATCH_BUFFER_START
) {
1585 if (BATCH_BUFFER_2ND_LEVEL_BIT(cmd
) == 0) {
1586 /* chained batch buffer */
1590 cmd_len
= get_cmd_length(info
, cmd
) << 2;
1594 } while (!met_bb_end
);
1599 static int perform_bb_shadow(struct parser_exec_state
*s
)
1601 struct intel_shadow_bb_entry
*entry_obj
;
1602 unsigned long gma
= 0;
1607 /* get the start gm address of the batch buffer */
1608 gma
= get_gma_bb_from_cmd(s
, 1);
1610 /* get the size of the batch buffer */
1611 bb_size
= find_bb_size(s
);
1613 /* allocate shadow batch buffer */
1614 entry_obj
= kmalloc(sizeof(*entry_obj
), GFP_KERNEL
);
1615 if (entry_obj
== NULL
)
1619 i915_gem_object_create(s
->vgpu
->gvt
->dev_priv
,
1620 roundup(bb_size
, PAGE_SIZE
));
1621 if (IS_ERR(entry_obj
->obj
)) {
1622 ret
= PTR_ERR(entry_obj
->obj
);
1625 entry_obj
->len
= bb_size
;
1626 INIT_LIST_HEAD(&entry_obj
->list
);
1628 dst
= i915_gem_object_pin_map(entry_obj
->obj
, I915_MAP_WB
);
1634 ret
= i915_gem_object_set_to_cpu_domain(entry_obj
->obj
, false);
1636 gvt_err("failed to set shadow batch to CPU\n");
1640 entry_obj
->va
= dst
;
1641 entry_obj
->bb_start_cmd_va
= s
->ip_va
;
1643 /* copy batch buffer to shadow batch buffer*/
1644 ret
= copy_gma_to_hva(s
->vgpu
, s
->vgpu
->gtt
.ggtt_mm
,
1648 gvt_err("fail to copy guest ring buffer\n");
1652 list_add(&entry_obj
->list
, &s
->workload
->shadow_bb
);
1654 * ip_va saves the virtual address of the shadow batch buffer, while
1655 * ip_gma saves the graphics address of the original batch buffer.
1656 * As the shadow batch buffer is just a copy from the originial one,
1657 * it should be right to use shadow batch buffer'va and original batch
1658 * buffer's gma in pair. After all, we don't want to pin the shadow
1659 * buffer here (too early).
1667 i915_gem_object_unpin_map(entry_obj
->obj
);
1669 i915_gem_object_put(entry_obj
->obj
);
1675 static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state
*s
)
1680 if (s
->buf_type
== BATCH_BUFFER_2ND_LEVEL
) {
1681 gvt_err("Found MI_BATCH_BUFFER_START in 2nd level BB\n");
1685 second_level
= BATCH_BUFFER_2ND_LEVEL_BIT(cmd_val(s
, 0)) == 1;
1686 if (second_level
&& (s
->buf_type
!= BATCH_BUFFER_INSTRUCTION
)) {
1687 gvt_err("Jumping to 2nd level BB from RB is not allowed\n");
1691 s
->saved_buf_addr_type
= s
->buf_addr_type
;
1692 addr_type_update_snb(s
);
1693 if (s
->buf_type
== RING_BUFFER_INSTRUCTION
) {
1694 s
->ret_ip_gma_ring
= s
->ip_gma
+ cmd_length(s
) * sizeof(u32
);
1695 s
->buf_type
= BATCH_BUFFER_INSTRUCTION
;
1696 } else if (second_level
) {
1697 s
->buf_type
= BATCH_BUFFER_2ND_LEVEL
;
1698 s
->ret_ip_gma_bb
= s
->ip_gma
+ cmd_length(s
) * sizeof(u32
);
1699 s
->ret_bb_va
= s
->ip_va
+ cmd_length(s
) * sizeof(u32
);
1702 if (batch_buffer_needs_scan(s
)) {
1703 ret
= perform_bb_shadow(s
);
1705 gvt_err("invalid shadow batch buffer\n");
1707 /* emulate a batch buffer end to do return right */
1708 ret
= cmd_handler_mi_batch_buffer_end(s
);
1716 static struct cmd_info cmd_info
[] = {
1717 {"MI_NOOP", OP_MI_NOOP
, F_LEN_CONST
, R_ALL
, D_ALL
, 0, 1, NULL
},
1719 {"MI_SET_PREDICATE", OP_MI_SET_PREDICATE
, F_LEN_CONST
, R_ALL
, D_ALL
,
1722 {"MI_USER_INTERRUPT", OP_MI_USER_INTERRUPT
, F_LEN_CONST
, R_ALL
, D_ALL
,
1723 0, 1, cmd_handler_mi_user_interrupt
},
1725 {"MI_WAIT_FOR_EVENT", OP_MI_WAIT_FOR_EVENT
, F_LEN_CONST
, R_RCS
| R_BCS
,
1726 D_ALL
, 0, 1, cmd_handler_mi_wait_for_event
},
1728 {"MI_FLUSH", OP_MI_FLUSH
, F_LEN_CONST
, R_ALL
, D_ALL
, 0, 1, NULL
},
1730 {"MI_ARB_CHECK", OP_MI_ARB_CHECK
, F_LEN_CONST
, R_ALL
, D_ALL
, 0, 1,
1733 {"MI_RS_CONTROL", OP_MI_RS_CONTROL
, F_LEN_CONST
, R_RCS
, D_ALL
, 0, 1,
1736 {"MI_REPORT_HEAD", OP_MI_REPORT_HEAD
, F_LEN_CONST
, R_ALL
, D_ALL
, 0, 1,
1739 {"MI_ARB_ON_OFF", OP_MI_ARB_ON_OFF
, F_LEN_CONST
, R_ALL
, D_ALL
, 0, 1,
1742 {"MI_URB_ATOMIC_ALLOC", OP_MI_URB_ATOMIC_ALLOC
, F_LEN_CONST
, R_RCS
,
1745 {"MI_BATCH_BUFFER_END", OP_MI_BATCH_BUFFER_END
,
1746 F_IP_ADVANCE_CUSTOM
| F_LEN_CONST
, R_ALL
, D_ALL
, 0, 1,
1747 cmd_handler_mi_batch_buffer_end
},
1749 {"MI_SUSPEND_FLUSH", OP_MI_SUSPEND_FLUSH
, F_LEN_CONST
, R_ALL
, D_ALL
,
1752 {"MI_PREDICATE", OP_MI_PREDICATE
, F_LEN_CONST
, R_RCS
, D_ALL
, 0, 1,
1755 {"MI_TOPOLOGY_FILTER", OP_MI_TOPOLOGY_FILTER
, F_LEN_CONST
, R_ALL
,
1758 {"MI_SET_APPID", OP_MI_SET_APPID
, F_LEN_CONST
, R_ALL
, D_ALL
, 0, 1,
1761 {"MI_RS_CONTEXT", OP_MI_RS_CONTEXT
, F_LEN_CONST
, R_RCS
, D_ALL
, 0, 1,
1764 {"MI_DISPLAY_FLIP", OP_MI_DISPLAY_FLIP
, F_LEN_VAR
| F_POST_HANDLE
,
1765 R_RCS
| R_BCS
, D_ALL
, 0, 8, cmd_handler_mi_display_flip
},
1767 {"MI_SEMAPHORE_MBOX", OP_MI_SEMAPHORE_MBOX
, F_LEN_VAR
, R_ALL
, D_ALL
,
1770 {"MI_MATH", OP_MI_MATH
, F_LEN_VAR
, R_ALL
, D_ALL
, 0, 8, NULL
},
1772 {"MI_URB_CLEAR", OP_MI_URB_CLEAR
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
1774 {"ME_SEMAPHORE_SIGNAL", OP_MI_SEMAPHORE_SIGNAL
, F_LEN_VAR
, R_ALL
,
1775 D_BDW_PLUS
, 0, 8, NULL
},
1777 {"ME_SEMAPHORE_WAIT", OP_MI_SEMAPHORE_WAIT
, F_LEN_VAR
, R_ALL
, D_BDW_PLUS
,
1778 ADDR_FIX_1(2), 8, cmd_handler_mi_semaphore_wait
},
1780 {"MI_STORE_DATA_IMM", OP_MI_STORE_DATA_IMM
, F_LEN_VAR
, R_ALL
, D_BDW_PLUS
,
1781 ADDR_FIX_1(1), 10, cmd_handler_mi_store_data_imm
},
1783 {"MI_STORE_DATA_INDEX", OP_MI_STORE_DATA_INDEX
, F_LEN_VAR
, R_ALL
, D_ALL
,
1784 0, 8, cmd_handler_mi_store_data_index
},
1786 {"MI_LOAD_REGISTER_IMM", OP_MI_LOAD_REGISTER_IMM
, F_LEN_VAR
, R_ALL
,
1787 D_ALL
, 0, 8, cmd_handler_lri
},
1789 {"MI_UPDATE_GTT", OP_MI_UPDATE_GTT
, F_LEN_VAR
, R_ALL
, D_BDW_PLUS
, 0, 10,
1790 cmd_handler_mi_update_gtt
},
1792 {"MI_STORE_REGISTER_MEM", OP_MI_STORE_REGISTER_MEM
, F_LEN_VAR
, R_ALL
,
1793 D_ALL
, ADDR_FIX_1(2), 8, cmd_handler_srm
},
1795 {"MI_FLUSH_DW", OP_MI_FLUSH_DW
, F_LEN_VAR
, R_ALL
, D_ALL
, 0, 6,
1796 cmd_handler_mi_flush_dw
},
1798 {"MI_CLFLUSH", OP_MI_CLFLUSH
, F_LEN_VAR
, R_ALL
, D_ALL
, ADDR_FIX_1(1),
1799 10, cmd_handler_mi_clflush
},
1801 {"MI_REPORT_PERF_COUNT", OP_MI_REPORT_PERF_COUNT
, F_LEN_VAR
, R_ALL
,
1802 D_ALL
, ADDR_FIX_1(1), 6, cmd_handler_mi_report_perf_count
},
1804 {"MI_LOAD_REGISTER_MEM", OP_MI_LOAD_REGISTER_MEM
, F_LEN_VAR
, R_ALL
,
1805 D_ALL
, ADDR_FIX_1(2), 8, cmd_handler_lrm
},
1807 {"MI_LOAD_REGISTER_REG", OP_MI_LOAD_REGISTER_REG
, F_LEN_VAR
, R_ALL
,
1808 D_ALL
, 0, 8, cmd_handler_lrr
},
1810 {"MI_RS_STORE_DATA_IMM", OP_MI_RS_STORE_DATA_IMM
, F_LEN_VAR
, R_RCS
,
1813 {"MI_LOAD_URB_MEM", OP_MI_LOAD_URB_MEM
, F_LEN_VAR
, R_RCS
, D_ALL
,
1814 ADDR_FIX_1(2), 8, NULL
},
1816 {"MI_STORE_URM_MEM", OP_MI_STORE_URM_MEM
, F_LEN_VAR
, R_RCS
, D_ALL
,
1817 ADDR_FIX_1(2), 8, NULL
},
1819 {"MI_OP_2E", OP_MI_2E
, F_LEN_VAR
, R_ALL
, D_BDW_PLUS
, ADDR_FIX_2(1, 2),
1820 8, cmd_handler_mi_op_2e
},
1822 {"MI_OP_2F", OP_MI_2F
, F_LEN_VAR
, R_ALL
, D_BDW_PLUS
, ADDR_FIX_1(1),
1823 8, cmd_handler_mi_op_2f
},
1825 {"MI_BATCH_BUFFER_START", OP_MI_BATCH_BUFFER_START
,
1826 F_IP_ADVANCE_CUSTOM
, R_ALL
, D_ALL
, 0, 8,
1827 cmd_handler_mi_batch_buffer_start
},
1829 {"MI_CONDITIONAL_BATCH_BUFFER_END", OP_MI_CONDITIONAL_BATCH_BUFFER_END
,
1830 F_LEN_VAR
, R_ALL
, D_ALL
, ADDR_FIX_1(2), 8,
1831 cmd_handler_mi_conditional_batch_buffer_end
},
1833 {"MI_LOAD_SCAN_LINES_INCL", OP_MI_LOAD_SCAN_LINES_INCL
, F_LEN_CONST
,
1834 R_RCS
| R_BCS
, D_ALL
, 0, 2, NULL
},
1836 {"XY_SETUP_BLT", OP_XY_SETUP_BLT
, F_LEN_VAR
, R_BCS
, D_ALL
,
1837 ADDR_FIX_2(4, 7), 8, NULL
},
1839 {"XY_SETUP_CLIP_BLT", OP_XY_SETUP_CLIP_BLT
, F_LEN_VAR
, R_BCS
, D_ALL
,
1842 {"XY_SETUP_MONO_PATTERN_SL_BLT", OP_XY_SETUP_MONO_PATTERN_SL_BLT
,
1843 F_LEN_VAR
, R_BCS
, D_ALL
, ADDR_FIX_1(4), 8, NULL
},
1845 {"XY_PIXEL_BLT", OP_XY_PIXEL_BLT
, F_LEN_VAR
, R_BCS
, D_ALL
, 0, 8, NULL
},
1847 {"XY_SCANLINES_BLT", OP_XY_SCANLINES_BLT
, F_LEN_VAR
, R_BCS
, D_ALL
,
1850 {"XY_TEXT_BLT", OP_XY_TEXT_BLT
, F_LEN_VAR
, R_BCS
, D_ALL
,
1851 ADDR_FIX_1(3), 8, NULL
},
1853 {"XY_TEXT_IMMEDIATE_BLT", OP_XY_TEXT_IMMEDIATE_BLT
, F_LEN_VAR
, R_BCS
,
1856 {"XY_COLOR_BLT", OP_XY_COLOR_BLT
, F_LEN_VAR
, R_BCS
, D_ALL
,
1857 ADDR_FIX_1(4), 8, NULL
},
1859 {"XY_PAT_BLT", OP_XY_PAT_BLT
, F_LEN_VAR
, R_BCS
, D_ALL
,
1860 ADDR_FIX_2(4, 5), 8, NULL
},
1862 {"XY_MONO_PAT_BLT", OP_XY_MONO_PAT_BLT
, F_LEN_VAR
, R_BCS
, D_ALL
,
1863 ADDR_FIX_1(4), 8, NULL
},
1865 {"XY_SRC_COPY_BLT", OP_XY_SRC_COPY_BLT
, F_LEN_VAR
, R_BCS
, D_ALL
,
1866 ADDR_FIX_2(4, 7), 8, NULL
},
1868 {"XY_MONO_SRC_COPY_BLT", OP_XY_MONO_SRC_COPY_BLT
, F_LEN_VAR
, R_BCS
,
1869 D_ALL
, ADDR_FIX_2(4, 5), 8, NULL
},
1871 {"XY_FULL_BLT", OP_XY_FULL_BLT
, F_LEN_VAR
, R_BCS
, D_ALL
, 0, 8, NULL
},
1873 {"XY_FULL_MONO_SRC_BLT", OP_XY_FULL_MONO_SRC_BLT
, F_LEN_VAR
, R_BCS
,
1874 D_ALL
, ADDR_FIX_3(4, 5, 8), 8, NULL
},
1876 {"XY_FULL_MONO_PATTERN_BLT", OP_XY_FULL_MONO_PATTERN_BLT
, F_LEN_VAR
,
1877 R_BCS
, D_ALL
, ADDR_FIX_2(4, 7), 8, NULL
},
1879 {"XY_FULL_MONO_PATTERN_MONO_SRC_BLT",
1880 OP_XY_FULL_MONO_PATTERN_MONO_SRC_BLT
,
1881 F_LEN_VAR
, R_BCS
, D_ALL
, ADDR_FIX_2(4, 5), 8, NULL
},
1883 {"XY_MONO_PAT_FIXED_BLT", OP_XY_MONO_PAT_FIXED_BLT
, F_LEN_VAR
, R_BCS
,
1884 D_ALL
, ADDR_FIX_1(4), 8, NULL
},
1886 {"XY_MONO_SRC_COPY_IMMEDIATE_BLT", OP_XY_MONO_SRC_COPY_IMMEDIATE_BLT
,
1887 F_LEN_VAR
, R_BCS
, D_ALL
, ADDR_FIX_1(4), 8, NULL
},
1889 {"XY_PAT_BLT_IMMEDIATE", OP_XY_PAT_BLT_IMMEDIATE
, F_LEN_VAR
, R_BCS
,
1890 D_ALL
, ADDR_FIX_1(4), 8, NULL
},
1892 {"XY_SRC_COPY_CHROMA_BLT", OP_XY_SRC_COPY_CHROMA_BLT
, F_LEN_VAR
, R_BCS
,
1893 D_ALL
, ADDR_FIX_2(4, 7), 8, NULL
},
1895 {"XY_FULL_IMMEDIATE_PATTERN_BLT", OP_XY_FULL_IMMEDIATE_PATTERN_BLT
,
1896 F_LEN_VAR
, R_BCS
, D_ALL
, ADDR_FIX_2(4, 7), 8, NULL
},
1898 {"XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT",
1899 OP_XY_FULL_MONO_SRC_IMMEDIATE_PATTERN_BLT
,
1900 F_LEN_VAR
, R_BCS
, D_ALL
, ADDR_FIX_2(4, 5), 8, NULL
},
1902 {"XY_PAT_CHROMA_BLT", OP_XY_PAT_CHROMA_BLT
, F_LEN_VAR
, R_BCS
, D_ALL
,
1903 ADDR_FIX_2(4, 5), 8, NULL
},
1905 {"XY_PAT_CHROMA_BLT_IMMEDIATE", OP_XY_PAT_CHROMA_BLT_IMMEDIATE
,
1906 F_LEN_VAR
, R_BCS
, D_ALL
, ADDR_FIX_1(4), 8, NULL
},
1908 {"3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP",
1909 OP_3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP
,
1910 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
1912 {"3DSTATE_VIEWPORT_STATE_POINTERS_CC",
1913 OP_3DSTATE_VIEWPORT_STATE_POINTERS_CC
,
1914 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
1916 {"3DSTATE_BLEND_STATE_POINTERS",
1917 OP_3DSTATE_BLEND_STATE_POINTERS
,
1918 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
1920 {"3DSTATE_DEPTH_STENCIL_STATE_POINTERS",
1921 OP_3DSTATE_DEPTH_STENCIL_STATE_POINTERS
,
1922 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
1924 {"3DSTATE_BINDING_TABLE_POINTERS_VS",
1925 OP_3DSTATE_BINDING_TABLE_POINTERS_VS
,
1926 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
1928 {"3DSTATE_BINDING_TABLE_POINTERS_HS",
1929 OP_3DSTATE_BINDING_TABLE_POINTERS_HS
,
1930 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
1932 {"3DSTATE_BINDING_TABLE_POINTERS_DS",
1933 OP_3DSTATE_BINDING_TABLE_POINTERS_DS
,
1934 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
1936 {"3DSTATE_BINDING_TABLE_POINTERS_GS",
1937 OP_3DSTATE_BINDING_TABLE_POINTERS_GS
,
1938 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
1940 {"3DSTATE_BINDING_TABLE_POINTERS_PS",
1941 OP_3DSTATE_BINDING_TABLE_POINTERS_PS
,
1942 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
1944 {"3DSTATE_SAMPLER_STATE_POINTERS_VS",
1945 OP_3DSTATE_SAMPLER_STATE_POINTERS_VS
,
1946 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
1948 {"3DSTATE_SAMPLER_STATE_POINTERS_HS",
1949 OP_3DSTATE_SAMPLER_STATE_POINTERS_HS
,
1950 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
1952 {"3DSTATE_SAMPLER_STATE_POINTERS_DS",
1953 OP_3DSTATE_SAMPLER_STATE_POINTERS_DS
,
1954 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
1956 {"3DSTATE_SAMPLER_STATE_POINTERS_GS",
1957 OP_3DSTATE_SAMPLER_STATE_POINTERS_GS
,
1958 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
1960 {"3DSTATE_SAMPLER_STATE_POINTERS_PS",
1961 OP_3DSTATE_SAMPLER_STATE_POINTERS_PS
,
1962 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
1964 {"3DSTATE_URB_VS", OP_3DSTATE_URB_VS
, F_LEN_VAR
, R_RCS
, D_ALL
,
1967 {"3DSTATE_URB_HS", OP_3DSTATE_URB_HS
, F_LEN_VAR
, R_RCS
, D_ALL
,
1970 {"3DSTATE_URB_DS", OP_3DSTATE_URB_DS
, F_LEN_VAR
, R_RCS
, D_ALL
,
1973 {"3DSTATE_URB_GS", OP_3DSTATE_URB_GS
, F_LEN_VAR
, R_RCS
, D_ALL
,
1976 {"3DSTATE_GATHER_CONSTANT_VS", OP_3DSTATE_GATHER_CONSTANT_VS
,
1977 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
1979 {"3DSTATE_GATHER_CONSTANT_GS", OP_3DSTATE_GATHER_CONSTANT_GS
,
1980 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
1982 {"3DSTATE_GATHER_CONSTANT_HS", OP_3DSTATE_GATHER_CONSTANT_HS
,
1983 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
1985 {"3DSTATE_GATHER_CONSTANT_DS", OP_3DSTATE_GATHER_CONSTANT_DS
,
1986 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
1988 {"3DSTATE_GATHER_CONSTANT_PS", OP_3DSTATE_GATHER_CONSTANT_PS
,
1989 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
1991 {"3DSTATE_DX9_CONSTANTF_VS", OP_3DSTATE_DX9_CONSTANTF_VS
,
1992 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 11, NULL
},
1994 {"3DSTATE_DX9_CONSTANTF_PS", OP_3DSTATE_DX9_CONSTANTF_PS
,
1995 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 11, NULL
},
1997 {"3DSTATE_DX9_CONSTANTI_VS", OP_3DSTATE_DX9_CONSTANTI_VS
,
1998 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2000 {"3DSTATE_DX9_CONSTANTI_PS", OP_3DSTATE_DX9_CONSTANTI_PS
,
2001 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2003 {"3DSTATE_DX9_CONSTANTB_VS", OP_3DSTATE_DX9_CONSTANTB_VS
,
2004 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2006 {"3DSTATE_DX9_CONSTANTB_PS", OP_3DSTATE_DX9_CONSTANTB_PS
,
2007 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2009 {"3DSTATE_DX9_LOCAL_VALID_VS", OP_3DSTATE_DX9_LOCAL_VALID_VS
,
2010 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2012 {"3DSTATE_DX9_LOCAL_VALID_PS", OP_3DSTATE_DX9_LOCAL_VALID_PS
,
2013 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2015 {"3DSTATE_DX9_GENERATE_ACTIVE_VS", OP_3DSTATE_DX9_GENERATE_ACTIVE_VS
,
2016 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2018 {"3DSTATE_DX9_GENERATE_ACTIVE_PS", OP_3DSTATE_DX9_GENERATE_ACTIVE_PS
,
2019 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2021 {"3DSTATE_BINDING_TABLE_EDIT_VS", OP_3DSTATE_BINDING_TABLE_EDIT_VS
,
2022 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 9, NULL
},
2024 {"3DSTATE_BINDING_TABLE_EDIT_GS", OP_3DSTATE_BINDING_TABLE_EDIT_GS
,
2025 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 9, NULL
},
2027 {"3DSTATE_BINDING_TABLE_EDIT_HS", OP_3DSTATE_BINDING_TABLE_EDIT_HS
,
2028 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 9, NULL
},
2030 {"3DSTATE_BINDING_TABLE_EDIT_DS", OP_3DSTATE_BINDING_TABLE_EDIT_DS
,
2031 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 9, NULL
},
2033 {"3DSTATE_BINDING_TABLE_EDIT_PS", OP_3DSTATE_BINDING_TABLE_EDIT_PS
,
2034 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 9, NULL
},
2036 {"3DSTATE_VF_INSTANCING", OP_3DSTATE_VF_INSTANCING
, F_LEN_VAR
, R_RCS
,
2037 D_BDW_PLUS
, 0, 8, NULL
},
2039 {"3DSTATE_VF_SGVS", OP_3DSTATE_VF_SGVS
, F_LEN_VAR
, R_RCS
, D_BDW_PLUS
, 0, 8,
2042 {"3DSTATE_VF_TOPOLOGY", OP_3DSTATE_VF_TOPOLOGY
, F_LEN_VAR
, R_RCS
,
2043 D_BDW_PLUS
, 0, 8, NULL
},
2045 {"3DSTATE_WM_CHROMAKEY", OP_3DSTATE_WM_CHROMAKEY
, F_LEN_VAR
, R_RCS
,
2046 D_BDW_PLUS
, 0, 8, NULL
},
2048 {"3DSTATE_PS_BLEND", OP_3DSTATE_PS_BLEND
, F_LEN_VAR
, R_RCS
, D_BDW_PLUS
, 0,
2051 {"3DSTATE_WM_DEPTH_STENCIL", OP_3DSTATE_WM_DEPTH_STENCIL
, F_LEN_VAR
,
2052 R_RCS
, D_BDW_PLUS
, 0, 8, NULL
},
2054 {"3DSTATE_PS_EXTRA", OP_3DSTATE_PS_EXTRA
, F_LEN_VAR
, R_RCS
, D_BDW_PLUS
, 0,
2057 {"3DSTATE_RASTER", OP_3DSTATE_RASTER
, F_LEN_VAR
, R_RCS
, D_BDW_PLUS
, 0, 8,
2060 {"3DSTATE_SBE_SWIZ", OP_3DSTATE_SBE_SWIZ
, F_LEN_VAR
, R_RCS
, D_BDW_PLUS
, 0, 8,
2063 {"3DSTATE_WM_HZ_OP", OP_3DSTATE_WM_HZ_OP
, F_LEN_VAR
, R_RCS
, D_BDW_PLUS
, 0, 8,
2066 {"3DSTATE_VERTEX_BUFFERS", OP_3DSTATE_VERTEX_BUFFERS
, F_LEN_VAR
, R_RCS
,
2067 D_BDW_PLUS
, 0, 8, NULL
},
2069 {"3DSTATE_VERTEX_ELEMENTS", OP_3DSTATE_VERTEX_ELEMENTS
, F_LEN_VAR
,
2070 R_RCS
, D_ALL
, 0, 8, NULL
},
2072 {"3DSTATE_INDEX_BUFFER", OP_3DSTATE_INDEX_BUFFER
, F_LEN_VAR
, R_RCS
,
2073 D_BDW_PLUS
, ADDR_FIX_1(2), 8, NULL
},
2075 {"3DSTATE_VF_STATISTICS", OP_3DSTATE_VF_STATISTICS
, F_LEN_CONST
,
2076 R_RCS
, D_ALL
, 0, 1, NULL
},
2078 {"3DSTATE_VF", OP_3DSTATE_VF
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2080 {"3DSTATE_CC_STATE_POINTERS", OP_3DSTATE_CC_STATE_POINTERS
, F_LEN_VAR
,
2081 R_RCS
, D_ALL
, 0, 8, NULL
},
2083 {"3DSTATE_SCISSOR_STATE_POINTERS", OP_3DSTATE_SCISSOR_STATE_POINTERS
,
2084 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2086 {"3DSTATE_GS", OP_3DSTATE_GS
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2088 {"3DSTATE_CLIP", OP_3DSTATE_CLIP
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2090 {"3DSTATE_WM", OP_3DSTATE_WM
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2092 {"3DSTATE_CONSTANT_GS", OP_3DSTATE_CONSTANT_GS
, F_LEN_VAR
, R_RCS
,
2093 D_BDW_PLUS
, 0, 8, NULL
},
2095 {"3DSTATE_CONSTANT_PS", OP_3DSTATE_CONSTANT_PS
, F_LEN_VAR
, R_RCS
,
2096 D_BDW_PLUS
, 0, 8, NULL
},
2098 {"3DSTATE_SAMPLE_MASK", OP_3DSTATE_SAMPLE_MASK
, F_LEN_VAR
, R_RCS
,
2101 {"3DSTATE_CONSTANT_HS", OP_3DSTATE_CONSTANT_HS
, F_LEN_VAR
, R_RCS
,
2102 D_BDW_PLUS
, 0, 8, NULL
},
2104 {"3DSTATE_CONSTANT_DS", OP_3DSTATE_CONSTANT_DS
, F_LEN_VAR
, R_RCS
,
2105 D_BDW_PLUS
, 0, 8, NULL
},
2107 {"3DSTATE_HS", OP_3DSTATE_HS
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2109 {"3DSTATE_TE", OP_3DSTATE_TE
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2111 {"3DSTATE_DS", OP_3DSTATE_DS
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2113 {"3DSTATE_STREAMOUT", OP_3DSTATE_STREAMOUT
, F_LEN_VAR
, R_RCS
,
2116 {"3DSTATE_SBE", OP_3DSTATE_SBE
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2118 {"3DSTATE_PS", OP_3DSTATE_PS
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2120 {"3DSTATE_DRAWING_RECTANGLE", OP_3DSTATE_DRAWING_RECTANGLE
, F_LEN_VAR
,
2121 R_RCS
, D_ALL
, 0, 8, NULL
},
2123 {"3DSTATE_SAMPLER_PALETTE_LOAD0", OP_3DSTATE_SAMPLER_PALETTE_LOAD0
,
2124 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2126 {"3DSTATE_CHROMA_KEY", OP_3DSTATE_CHROMA_KEY
, F_LEN_VAR
, R_RCS
, D_ALL
,
2129 {"3DSTATE_DEPTH_BUFFER", OP_3DSTATE_DEPTH_BUFFER
, F_LEN_VAR
, R_RCS
,
2130 D_ALL
, ADDR_FIX_1(2), 8, NULL
},
2132 {"3DSTATE_POLY_STIPPLE_OFFSET", OP_3DSTATE_POLY_STIPPLE_OFFSET
,
2133 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2135 {"3DSTATE_POLY_STIPPLE_PATTERN", OP_3DSTATE_POLY_STIPPLE_PATTERN
,
2136 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2138 {"3DSTATE_LINE_STIPPLE", OP_3DSTATE_LINE_STIPPLE
, F_LEN_VAR
, R_RCS
,
2141 {"3DSTATE_AA_LINE_PARAMS", OP_3DSTATE_AA_LINE_PARAMS
, F_LEN_VAR
, R_RCS
,
2144 {"3DSTATE_GS_SVB_INDEX", OP_3DSTATE_GS_SVB_INDEX
, F_LEN_VAR
, R_RCS
,
2147 {"3DSTATE_SAMPLER_PALETTE_LOAD1", OP_3DSTATE_SAMPLER_PALETTE_LOAD1
,
2148 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2150 {"3DSTATE_MULTISAMPLE", OP_3DSTATE_MULTISAMPLE_BDW
, F_LEN_VAR
, R_RCS
,
2151 D_BDW_PLUS
, 0, 8, NULL
},
2153 {"3DSTATE_STENCIL_BUFFER", OP_3DSTATE_STENCIL_BUFFER
, F_LEN_VAR
, R_RCS
,
2154 D_ALL
, ADDR_FIX_1(2), 8, NULL
},
2156 {"3DSTATE_HIER_DEPTH_BUFFER", OP_3DSTATE_HIER_DEPTH_BUFFER
, F_LEN_VAR
,
2157 R_RCS
, D_ALL
, ADDR_FIX_1(2), 8, NULL
},
2159 {"3DSTATE_CLEAR_PARAMS", OP_3DSTATE_CLEAR_PARAMS
, F_LEN_VAR
,
2160 R_RCS
, D_ALL
, 0, 8, NULL
},
2162 {"3DSTATE_PUSH_CONSTANT_ALLOC_VS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_VS
,
2163 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2165 {"3DSTATE_PUSH_CONSTANT_ALLOC_HS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_HS
,
2166 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2168 {"3DSTATE_PUSH_CONSTANT_ALLOC_DS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_DS
,
2169 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2171 {"3DSTATE_PUSH_CONSTANT_ALLOC_GS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_GS
,
2172 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2174 {"3DSTATE_PUSH_CONSTANT_ALLOC_PS", OP_3DSTATE_PUSH_CONSTANT_ALLOC_PS
,
2175 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2177 {"3DSTATE_MONOFILTER_SIZE", OP_3DSTATE_MONOFILTER_SIZE
, F_LEN_VAR
,
2178 R_RCS
, D_ALL
, 0, 8, NULL
},
2180 {"3DSTATE_SO_DECL_LIST", OP_3DSTATE_SO_DECL_LIST
, F_LEN_VAR
, R_RCS
,
2183 {"3DSTATE_SO_BUFFER", OP_3DSTATE_SO_BUFFER
, F_LEN_VAR
, R_RCS
, D_BDW_PLUS
,
2184 ADDR_FIX_2(2, 4), 8, NULL
},
2186 {"3DSTATE_BINDING_TABLE_POOL_ALLOC",
2187 OP_3DSTATE_BINDING_TABLE_POOL_ALLOC
,
2188 F_LEN_VAR
, R_RCS
, D_BDW_PLUS
, ADDR_FIX_1(1), 8, NULL
},
2190 {"3DSTATE_GATHER_POOL_ALLOC", OP_3DSTATE_GATHER_POOL_ALLOC
,
2191 F_LEN_VAR
, R_RCS
, D_BDW_PLUS
, ADDR_FIX_1(1), 8, NULL
},
2193 {"3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC",
2194 OP_3DSTATE_DX9_CONSTANT_BUFFER_POOL_ALLOC
,
2195 F_LEN_VAR
, R_RCS
, D_BDW_PLUS
, ADDR_FIX_1(1), 8, NULL
},
2197 {"3DSTATE_SAMPLE_PATTERN", OP_3DSTATE_SAMPLE_PATTERN
, F_LEN_VAR
, R_RCS
,
2198 D_BDW_PLUS
, 0, 8, NULL
},
2200 {"PIPE_CONTROL", OP_PIPE_CONTROL
, F_LEN_VAR
, R_RCS
, D_ALL
,
2201 ADDR_FIX_1(2), 8, cmd_handler_pipe_control
},
2203 {"3DPRIMITIVE", OP_3DPRIMITIVE
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2205 {"PIPELINE_SELECT", OP_PIPELINE_SELECT
, F_LEN_CONST
, R_RCS
, D_ALL
, 0,
2208 {"STATE_PREFETCH", OP_STATE_PREFETCH
, F_LEN_VAR
, R_RCS
, D_ALL
,
2209 ADDR_FIX_1(1), 8, NULL
},
2211 {"STATE_SIP", OP_STATE_SIP
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2213 {"STATE_BASE_ADDRESS", OP_STATE_BASE_ADDRESS
, F_LEN_VAR
, R_RCS
, D_BDW_PLUS
,
2214 ADDR_FIX_5(1, 3, 4, 5, 6), 8, NULL
},
2216 {"OP_3D_MEDIA_0_1_4", OP_3D_MEDIA_0_1_4
, F_LEN_VAR
, R_RCS
, D_ALL
,
2217 ADDR_FIX_1(1), 8, NULL
},
2219 {"3DSTATE_VS", OP_3DSTATE_VS
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2221 {"3DSTATE_SF", OP_3DSTATE_SF
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 8, NULL
},
2223 {"3DSTATE_CONSTANT_VS", OP_3DSTATE_CONSTANT_VS
, F_LEN_VAR
, R_RCS
, D_BDW_PLUS
,
2226 {"3DSTATE_COMPONENT_PACKING", OP_3DSTATE_COMPONENT_PACKING
, F_LEN_VAR
, R_RCS
,
2227 D_SKL_PLUS
, 0, 8, NULL
},
2229 {"MEDIA_INTERFACE_DESCRIPTOR_LOAD", OP_MEDIA_INTERFACE_DESCRIPTOR_LOAD
,
2230 F_LEN_VAR
, R_RCS
, D_ALL
, 0, 16, NULL
},
2232 {"MEDIA_GATEWAY_STATE", OP_MEDIA_GATEWAY_STATE
, F_LEN_VAR
, R_RCS
, D_ALL
,
2235 {"MEDIA_STATE_FLUSH", OP_MEDIA_STATE_FLUSH
, F_LEN_VAR
, R_RCS
, D_ALL
,
2238 {"MEDIA_OBJECT", OP_MEDIA_OBJECT
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 16, NULL
},
2240 {"MEDIA_CURBE_LOAD", OP_MEDIA_CURBE_LOAD
, F_LEN_VAR
, R_RCS
, D_ALL
,
2243 {"MEDIA_OBJECT_PRT", OP_MEDIA_OBJECT_PRT
, F_LEN_VAR
, R_RCS
, D_ALL
,
2246 {"MEDIA_OBJECT_WALKER", OP_MEDIA_OBJECT_WALKER
, F_LEN_VAR
, R_RCS
, D_ALL
,
2249 {"GPGPU_WALKER", OP_GPGPU_WALKER
, F_LEN_VAR
, R_RCS
, D_ALL
,
2252 {"MEDIA_VFE_STATE", OP_MEDIA_VFE_STATE
, F_LEN_VAR
, R_RCS
, D_ALL
, 0, 16,
2255 {"3DSTATE_VF_STATISTICS_GM45", OP_3DSTATE_VF_STATISTICS_GM45
,
2256 F_LEN_CONST
, R_ALL
, D_ALL
, 0, 1, NULL
},
2258 {"MFX_PIPE_MODE_SELECT", OP_MFX_PIPE_MODE_SELECT
, F_LEN_VAR
,
2259 R_VCS
, D_ALL
, 0, 12, NULL
},
2261 {"MFX_SURFACE_STATE", OP_MFX_SURFACE_STATE
, F_LEN_VAR
,
2262 R_VCS
, D_ALL
, 0, 12, NULL
},
2264 {"MFX_PIPE_BUF_ADDR_STATE", OP_MFX_PIPE_BUF_ADDR_STATE
, F_LEN_VAR
,
2265 R_VCS
, D_BDW_PLUS
, 0, 12, NULL
},
2267 {"MFX_IND_OBJ_BASE_ADDR_STATE", OP_MFX_IND_OBJ_BASE_ADDR_STATE
,
2268 F_LEN_VAR
, R_VCS
, D_BDW_PLUS
, 0, 12, NULL
},
2270 {"MFX_BSP_BUF_BASE_ADDR_STATE", OP_MFX_BSP_BUF_BASE_ADDR_STATE
,
2271 F_LEN_VAR
, R_VCS
, D_BDW_PLUS
, ADDR_FIX_3(1, 3, 5), 12, NULL
},
2273 {"OP_2_0_0_5", OP_2_0_0_5
, F_LEN_VAR
, R_VCS
, D_BDW_PLUS
, 0, 12, NULL
},
2275 {"MFX_STATE_POINTER", OP_MFX_STATE_POINTER
, F_LEN_VAR
,
2276 R_VCS
, D_ALL
, 0, 12, NULL
},
2278 {"MFX_QM_STATE", OP_MFX_QM_STATE
, F_LEN_VAR
,
2279 R_VCS
, D_ALL
, 0, 12, NULL
},
2281 {"MFX_FQM_STATE", OP_MFX_FQM_STATE
, F_LEN_VAR
,
2282 R_VCS
, D_ALL
, 0, 12, NULL
},
2284 {"MFX_PAK_INSERT_OBJECT", OP_MFX_PAK_INSERT_OBJECT
, F_LEN_VAR
,
2285 R_VCS
, D_ALL
, 0, 12, NULL
},
2287 {"MFX_STITCH_OBJECT", OP_MFX_STITCH_OBJECT
, F_LEN_VAR
,
2288 R_VCS
, D_ALL
, 0, 12, NULL
},
2290 {"MFD_IT_OBJECT", OP_MFD_IT_OBJECT
, F_LEN_VAR
,
2291 R_VCS
, D_ALL
, 0, 12, NULL
},
2293 {"MFX_WAIT", OP_MFX_WAIT
, F_LEN_VAR
,
2294 R_VCS
, D_ALL
, 0, 6, NULL
},
2296 {"MFX_AVC_IMG_STATE", OP_MFX_AVC_IMG_STATE
, F_LEN_VAR
,
2297 R_VCS
, D_ALL
, 0, 12, NULL
},
2299 {"MFX_AVC_QM_STATE", OP_MFX_AVC_QM_STATE
, F_LEN_VAR
,
2300 R_VCS
, D_ALL
, 0, 12, NULL
},
2302 {"MFX_AVC_DIRECTMODE_STATE", OP_MFX_AVC_DIRECTMODE_STATE
, F_LEN_VAR
,
2303 R_VCS
, D_ALL
, 0, 12, NULL
},
2305 {"MFX_AVC_SLICE_STATE", OP_MFX_AVC_SLICE_STATE
, F_LEN_VAR
,
2306 R_VCS
, D_ALL
, 0, 12, NULL
},
2308 {"MFX_AVC_REF_IDX_STATE", OP_MFX_AVC_REF_IDX_STATE
, F_LEN_VAR
,
2309 R_VCS
, D_ALL
, 0, 12, NULL
},
2311 {"MFX_AVC_WEIGHTOFFSET_STATE", OP_MFX_AVC_WEIGHTOFFSET_STATE
, F_LEN_VAR
,
2312 R_VCS
, D_ALL
, 0, 12, NULL
},
2314 {"MFD_AVC_PICID_STATE", OP_MFD_AVC_PICID_STATE
, F_LEN_VAR
,
2315 R_VCS
, D_ALL
, 0, 12, NULL
},
2316 {"MFD_AVC_DPB_STATE", OP_MFD_AVC_DPB_STATE
, F_LEN_VAR
,
2317 R_VCS
, D_ALL
, 0, 12, NULL
},
2319 {"MFD_AVC_BSD_OBJECT", OP_MFD_AVC_BSD_OBJECT
, F_LEN_VAR
,
2320 R_VCS
, D_ALL
, 0, 12, NULL
},
2322 {"MFD_AVC_SLICEADDR", OP_MFD_AVC_SLICEADDR
, F_LEN_VAR
,
2323 R_VCS
, D_ALL
, ADDR_FIX_1(2), 12, NULL
},
2325 {"MFC_AVC_PAK_OBJECT", OP_MFC_AVC_PAK_OBJECT
, F_LEN_VAR
,
2326 R_VCS
, D_ALL
, 0, 12, NULL
},
2328 {"MFX_VC1_PRED_PIPE_STATE", OP_MFX_VC1_PRED_PIPE_STATE
, F_LEN_VAR
,
2329 R_VCS
, D_ALL
, 0, 12, NULL
},
2331 {"MFX_VC1_DIRECTMODE_STATE", OP_MFX_VC1_DIRECTMODE_STATE
, F_LEN_VAR
,
2332 R_VCS
, D_ALL
, 0, 12, NULL
},
2334 {"MFD_VC1_SHORT_PIC_STATE", OP_MFD_VC1_SHORT_PIC_STATE
, F_LEN_VAR
,
2335 R_VCS
, D_ALL
, 0, 12, NULL
},
2337 {"MFD_VC1_LONG_PIC_STATE", OP_MFD_VC1_LONG_PIC_STATE
, F_LEN_VAR
,
2338 R_VCS
, D_ALL
, 0, 12, NULL
},
2340 {"MFD_VC1_BSD_OBJECT", OP_MFD_VC1_BSD_OBJECT
, F_LEN_VAR
,
2341 R_VCS
, D_ALL
, 0, 12, NULL
},
2343 {"MFC_MPEG2_SLICEGROUP_STATE", OP_MFC_MPEG2_SLICEGROUP_STATE
, F_LEN_VAR
,
2344 R_VCS
, D_ALL
, 0, 12, NULL
},
2346 {"MFC_MPEG2_PAK_OBJECT", OP_MFC_MPEG2_PAK_OBJECT
, F_LEN_VAR
,
2347 R_VCS
, D_ALL
, 0, 12, NULL
},
2349 {"MFX_MPEG2_PIC_STATE", OP_MFX_MPEG2_PIC_STATE
, F_LEN_VAR
,
2350 R_VCS
, D_ALL
, 0, 12, NULL
},
2352 {"MFX_MPEG2_QM_STATE", OP_MFX_MPEG2_QM_STATE
, F_LEN_VAR
,
2353 R_VCS
, D_ALL
, 0, 12, NULL
},
2355 {"MFD_MPEG2_BSD_OBJECT", OP_MFD_MPEG2_BSD_OBJECT
, F_LEN_VAR
,
2356 R_VCS
, D_ALL
, 0, 12, NULL
},
2358 {"MFX_2_6_0_0", OP_MFX_2_6_0_0
, F_LEN_VAR
, R_VCS
, D_ALL
,
2361 {"MFX_2_6_0_9", OP_MFX_2_6_0_9
, F_LEN_VAR
, R_VCS
, D_ALL
, 0, 16, NULL
},
2363 {"MFX_2_6_0_8", OP_MFX_2_6_0_8
, F_LEN_VAR
, R_VCS
, D_ALL
, 0, 16, NULL
},
2365 {"MFX_JPEG_PIC_STATE", OP_MFX_JPEG_PIC_STATE
, F_LEN_VAR
,
2366 R_VCS
, D_ALL
, 0, 12, NULL
},
2368 {"MFX_JPEG_HUFF_TABLE_STATE", OP_MFX_JPEG_HUFF_TABLE_STATE
, F_LEN_VAR
,
2369 R_VCS
, D_ALL
, 0, 12, NULL
},
2371 {"MFD_JPEG_BSD_OBJECT", OP_MFD_JPEG_BSD_OBJECT
, F_LEN_VAR
,
2372 R_VCS
, D_ALL
, 0, 12, NULL
},
2374 {"VEBOX_STATE", OP_VEB_STATE
, F_LEN_VAR
, R_VECS
, D_ALL
, 0, 12, NULL
},
2376 {"VEBOX_SURFACE_STATE", OP_VEB_SURFACE_STATE
, F_LEN_VAR
, R_VECS
, D_ALL
,
2379 {"VEB_DI_IECP", OP_VEB_DNDI_IECP_STATE
, F_LEN_VAR
, R_VECS
, D_BDW_PLUS
,
2383 static void add_cmd_entry(struct intel_gvt
*gvt
, struct cmd_entry
*e
)
2385 hash_add(gvt
->cmd_table
, &e
->hlist
, e
->info
->opcode
);
2388 #define GVT_MAX_CMD_LENGTH 20 /* In Dword */
2390 static void trace_cs_command(struct parser_exec_state
*s
,
2391 cycles_t cost_pre_cmd_handler
, cycles_t cost_cmd_handler
)
2393 /* This buffer is used by ftrace to store all commands copied from
2394 * guest gma space. Sometimes commands can cross pages, this should
2395 * not be handled in ftrace logic. So this is just used as a
2398 u32 cmd_trace_buf
[GVT_MAX_CMD_LENGTH
];
2400 u32 cmd_len
= cmd_length(s
);
2401 /* The chosen value of GVT_MAX_CMD_LENGTH are just based on
2402 * following two considerations:
2403 * 1) From observation, most common ring commands is not that long.
2404 * But there are execeptions. So it indeed makes sence to observe
2406 * 2) From the performance and debugging point of view, dumping all
2407 * contents of very commands is not necessary.
2408 * We mgith shrink GVT_MAX_CMD_LENGTH or remove this trace event in
2409 * future for performance considerations.
2411 if (unlikely(cmd_len
> GVT_MAX_CMD_LENGTH
)) {
2412 gvt_dbg_cmd("cmd length exceed tracing limitation!\n");
2413 cmd_len
= GVT_MAX_CMD_LENGTH
;
2416 for (i
= 0; i
< cmd_len
; i
++)
2417 cmd_trace_buf
[i
] = cmd_val(s
, i
);
2419 trace_gvt_command(s
->vgpu
->id
, s
->ring_id
, s
->ip_gma
, cmd_trace_buf
,
2420 cmd_len
, s
->buf_type
== RING_BUFFER_INSTRUCTION
,
2421 cost_pre_cmd_handler
, cost_cmd_handler
);
2424 /* call the cmd handler, and advance ip */
2425 static int cmd_parser_exec(struct parser_exec_state
*s
)
2427 struct cmd_info
*info
;
2430 cycles_t t0
, t1
, t2
;
2431 struct parser_exec_state s_before_advance_custom
;
2435 cmd
= cmd_val(s
, 0);
2437 info
= get_cmd_info(s
->vgpu
->gvt
, cmd
, s
->ring_id
);
2439 gvt_err("unknown cmd 0x%x, opcode=0x%x\n",
2440 cmd
, get_opcode(cmd
, s
->ring_id
));
2444 gvt_dbg_cmd("%s\n", info
->name
);
2450 memcpy(&s_before_advance_custom
, s
, sizeof(struct parser_exec_state
));
2452 if (info
->handler
) {
2453 ret
= info
->handler(s
);
2455 gvt_err("%s handler error\n", info
->name
);
2461 trace_cs_command(&s_before_advance_custom
, t1
- t0
, t2
- t1
);
2463 if (!(info
->flag
& F_IP_ADVANCE_CUSTOM
)) {
2464 ret
= cmd_advance_default(s
);
2466 gvt_err("%s IP advance error\n", info
->name
);
2473 static inline bool gma_out_of_range(unsigned long gma
,
2474 unsigned long gma_head
, unsigned int gma_tail
)
2476 if (gma_tail
>= gma_head
)
2477 return (gma
< gma_head
) || (gma
> gma_tail
);
2479 return (gma
> gma_tail
) && (gma
< gma_head
);
2482 static int command_scan(struct parser_exec_state
*s
,
2483 unsigned long rb_head
, unsigned long rb_tail
,
2484 unsigned long rb_start
, unsigned long rb_len
)
2487 unsigned long gma_head
, gma_tail
, gma_bottom
;
2490 gma_head
= rb_start
+ rb_head
;
2491 gma_tail
= rb_start
+ rb_tail
;
2492 gma_bottom
= rb_start
+ rb_len
;
2494 gvt_dbg_cmd("scan_start: start=%lx end=%lx\n", gma_head
, gma_tail
);
2496 while (s
->ip_gma
!= gma_tail
) {
2497 if (s
->buf_type
== RING_BUFFER_INSTRUCTION
) {
2498 if (!(s
->ip_gma
>= rb_start
) ||
2499 !(s
->ip_gma
< gma_bottom
)) {
2500 gvt_err("ip_gma %lx out of ring scope."
2501 "(base:0x%lx, bottom: 0x%lx)\n",
2502 s
->ip_gma
, rb_start
,
2504 parser_exec_state_dump(s
);
2507 if (gma_out_of_range(s
->ip_gma
, gma_head
, gma_tail
)) {
2508 gvt_err("ip_gma %lx out of range."
2509 "base 0x%lx head 0x%lx tail 0x%lx\n",
2510 s
->ip_gma
, rb_start
,
2512 parser_exec_state_dump(s
);
2516 ret
= cmd_parser_exec(s
);
2518 gvt_err("cmd parser error\n");
2519 parser_exec_state_dump(s
);
2524 gvt_dbg_cmd("scan_end\n");
2529 static int scan_workload(struct intel_vgpu_workload
*workload
)
2531 unsigned long gma_head
, gma_tail
, gma_bottom
;
2532 struct parser_exec_state s
;
2535 /* ring base is page aligned */
2536 if (WARN_ON(!IS_ALIGNED(workload
->rb_start
, GTT_PAGE_SIZE
)))
2539 gma_head
= workload
->rb_start
+ workload
->rb_head
;
2540 gma_tail
= workload
->rb_start
+ workload
->rb_tail
;
2541 gma_bottom
= workload
->rb_start
+ _RING_CTL_BUF_SIZE(workload
->rb_ctl
);
2543 s
.buf_type
= RING_BUFFER_INSTRUCTION
;
2544 s
.buf_addr_type
= GTT_BUFFER
;
2545 s
.vgpu
= workload
->vgpu
;
2546 s
.ring_id
= workload
->ring_id
;
2547 s
.ring_start
= workload
->rb_start
;
2548 s
.ring_size
= _RING_CTL_BUF_SIZE(workload
->rb_ctl
);
2549 s
.ring_head
= gma_head
;
2550 s
.ring_tail
= gma_tail
;
2551 s
.rb_va
= workload
->shadow_ring_buffer_va
;
2552 s
.workload
= workload
;
2554 if ((bypass_scan_mask
& (1 << workload
->ring_id
)) ||
2555 gma_head
== gma_tail
)
2558 ret
= ip_gma_set(&s
, gma_head
);
2562 ret
= command_scan(&s
, workload
->rb_head
, workload
->rb_tail
,
2563 workload
->rb_start
, _RING_CTL_BUF_SIZE(workload
->rb_ctl
));
2569 static int scan_wa_ctx(struct intel_shadow_wa_ctx
*wa_ctx
)
2572 unsigned long gma_head
, gma_tail
, gma_bottom
, ring_size
, ring_tail
;
2573 struct parser_exec_state s
;
2576 /* ring base is page aligned */
2577 if (WARN_ON(!IS_ALIGNED(wa_ctx
->indirect_ctx
.guest_gma
, GTT_PAGE_SIZE
)))
2580 ring_tail
= wa_ctx
->indirect_ctx
.size
+ 3 * sizeof(uint32_t);
2581 ring_size
= round_up(wa_ctx
->indirect_ctx
.size
+ CACHELINE_BYTES
,
2583 gma_head
= wa_ctx
->indirect_ctx
.guest_gma
;
2584 gma_tail
= wa_ctx
->indirect_ctx
.guest_gma
+ ring_tail
;
2585 gma_bottom
= wa_ctx
->indirect_ctx
.guest_gma
+ ring_size
;
2587 s
.buf_type
= RING_BUFFER_INSTRUCTION
;
2588 s
.buf_addr_type
= GTT_BUFFER
;
2589 s
.vgpu
= wa_ctx
->workload
->vgpu
;
2590 s
.ring_id
= wa_ctx
->workload
->ring_id
;
2591 s
.ring_start
= wa_ctx
->indirect_ctx
.guest_gma
;
2592 s
.ring_size
= ring_size
;
2593 s
.ring_head
= gma_head
;
2594 s
.ring_tail
= gma_tail
;
2595 s
.rb_va
= wa_ctx
->indirect_ctx
.shadow_va
;
2596 s
.workload
= wa_ctx
->workload
;
2598 ret
= ip_gma_set(&s
, gma_head
);
2602 ret
= command_scan(&s
, 0, ring_tail
,
2603 wa_ctx
->indirect_ctx
.guest_gma
, ring_size
);
2608 static int shadow_workload_ring_buffer(struct intel_vgpu_workload
*workload
)
2610 struct intel_vgpu
*vgpu
= workload
->vgpu
;
2611 int ring_id
= workload
->ring_id
;
2612 struct i915_gem_context
*shadow_ctx
= vgpu
->shadow_ctx
;
2613 struct intel_ring
*ring
= shadow_ctx
->engine
[ring_id
].ring
;
2614 unsigned long gma_head
, gma_tail
, gma_top
, guest_rb_size
;
2615 unsigned int copy_len
= 0;
2618 guest_rb_size
= _RING_CTL_BUF_SIZE(workload
->rb_ctl
);
2620 /* calculate workload ring buffer size */
2621 workload
->rb_len
= (workload
->rb_tail
+ guest_rb_size
-
2622 workload
->rb_head
) % guest_rb_size
;
2624 gma_head
= workload
->rb_start
+ workload
->rb_head
;
2625 gma_tail
= workload
->rb_start
+ workload
->rb_tail
;
2626 gma_top
= workload
->rb_start
+ guest_rb_size
;
2628 /* allocate shadow ring buffer */
2629 ret
= intel_ring_begin(workload
->req
, workload
->rb_len
/ 4);
2633 /* get shadow ring buffer va */
2634 workload
->shadow_ring_buffer_va
= ring
->vaddr
+ ring
->tail
;
2636 /* head > tail --> copy head <-> top */
2637 if (gma_head
> gma_tail
) {
2638 ret
= copy_gma_to_hva(vgpu
, vgpu
->gtt
.ggtt_mm
,
2640 workload
->shadow_ring_buffer_va
);
2642 gvt_err("fail to copy guest ring buffer\n");
2645 copy_len
= gma_top
- gma_head
;
2646 gma_head
= workload
->rb_start
;
2649 /* copy head or start <-> tail */
2650 ret
= copy_gma_to_hva(vgpu
, vgpu
->gtt
.ggtt_mm
,
2652 workload
->shadow_ring_buffer_va
+ copy_len
);
2654 gvt_err("fail to copy guest ring buffer\n");
2657 ring
->tail
+= workload
->rb_len
;
2658 intel_ring_advance(ring
);
2662 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload
*workload
)
2666 ret
= shadow_workload_ring_buffer(workload
);
2668 gvt_err("fail to shadow workload ring_buffer\n");
2672 ret
= scan_workload(workload
);
2674 gvt_err("scan workload error\n");
2680 static int shadow_indirect_ctx(struct intel_shadow_wa_ctx
*wa_ctx
)
2682 int ctx_size
= wa_ctx
->indirect_ctx
.size
;
2683 unsigned long guest_gma
= wa_ctx
->indirect_ctx
.guest_gma
;
2684 struct drm_i915_gem_object
*obj
;
2688 obj
= i915_gem_object_create(wa_ctx
->workload
->vgpu
->gvt
->dev_priv
,
2689 roundup(ctx_size
+ CACHELINE_BYTES
,
2692 return PTR_ERR(obj
);
2694 /* get the va of the shadow batch buffer */
2695 map
= i915_gem_object_pin_map(obj
, I915_MAP_WB
);
2697 gvt_err("failed to vmap shadow indirect ctx\n");
2702 ret
= i915_gem_object_set_to_cpu_domain(obj
, false);
2704 gvt_err("failed to set shadow indirect ctx to CPU\n");
2708 ret
= copy_gma_to_hva(wa_ctx
->workload
->vgpu
,
2709 wa_ctx
->workload
->vgpu
->gtt
.ggtt_mm
,
2710 guest_gma
, guest_gma
+ ctx_size
,
2713 gvt_err("fail to copy guest indirect ctx\n");
2717 wa_ctx
->indirect_ctx
.obj
= obj
;
2718 wa_ctx
->indirect_ctx
.shadow_va
= map
;
2722 i915_gem_object_unpin_map(obj
);
2724 i915_gem_object_put(wa_ctx
->indirect_ctx
.obj
);
2728 static int combine_wa_ctx(struct intel_shadow_wa_ctx
*wa_ctx
)
2730 uint32_t per_ctx_start
[CACHELINE_DWORDS
] = {0};
2731 unsigned char *bb_start_sva
;
2733 per_ctx_start
[0] = 0x18800001;
2734 per_ctx_start
[1] = wa_ctx
->per_ctx
.guest_gma
;
2736 bb_start_sva
= (unsigned char *)wa_ctx
->indirect_ctx
.shadow_va
+
2737 wa_ctx
->indirect_ctx
.size
;
2739 memcpy(bb_start_sva
, per_ctx_start
, CACHELINE_BYTES
);
2744 int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx
*wa_ctx
)
2748 if (wa_ctx
->indirect_ctx
.size
== 0)
2751 ret
= shadow_indirect_ctx(wa_ctx
);
2753 gvt_err("fail to shadow indirect ctx\n");
2757 combine_wa_ctx(wa_ctx
);
2759 ret
= scan_wa_ctx(wa_ctx
);
2761 gvt_err("scan wa ctx error\n");
2768 static struct cmd_info
*find_cmd_entry_any_ring(struct intel_gvt
*gvt
,
2769 unsigned int opcode
, int rings
)
2771 struct cmd_info
*info
= NULL
;
2774 for_each_set_bit(ring
, (unsigned long *)&rings
, I915_NUM_ENGINES
) {
2775 info
= find_cmd_entry(gvt
, opcode
, ring
);
2782 static int init_cmd_table(struct intel_gvt
*gvt
)
2785 struct cmd_entry
*e
;
2786 struct cmd_info
*info
;
2787 unsigned int gen_type
;
2789 gen_type
= intel_gvt_get_device_type(gvt
);
2791 for (i
= 0; i
< ARRAY_SIZE(cmd_info
); i
++) {
2792 if (!(cmd_info
[i
].devices
& gen_type
))
2795 e
= kzalloc(sizeof(*e
), GFP_KERNEL
);
2799 e
->info
= &cmd_info
[i
];
2800 info
= find_cmd_entry_any_ring(gvt
,
2801 e
->info
->opcode
, e
->info
->rings
);
2803 gvt_err("%s %s duplicated\n", e
->info
->name
,
2808 INIT_HLIST_NODE(&e
->hlist
);
2809 add_cmd_entry(gvt
, e
);
2810 gvt_dbg_cmd("add %-30s op %04x flag %x devs %02x rings %02x\n",
2811 e
->info
->name
, e
->info
->opcode
, e
->info
->flag
,
2812 e
->info
->devices
, e
->info
->rings
);
2817 static void clean_cmd_table(struct intel_gvt
*gvt
)
2819 struct hlist_node
*tmp
;
2820 struct cmd_entry
*e
;
2823 hash_for_each_safe(gvt
->cmd_table
, i
, tmp
, e
, hlist
)
2826 hash_init(gvt
->cmd_table
);
2829 void intel_gvt_clean_cmd_parser(struct intel_gvt
*gvt
)
2831 clean_cmd_table(gvt
);
2834 int intel_gvt_init_cmd_parser(struct intel_gvt
*gvt
)
2838 ret
= init_cmd_table(gvt
);
2840 intel_gvt_clean_cmd_parser(gvt
);