]>
Commit | Line | Data |
---|---|---|
0ad35fed ZW |
1 | /* |
2 | * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | |
20 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
21 | * SOFTWARE. | |
12d14cc4 ZW |
22 | * |
23 | * Authors: | |
24 | * Kevin Tian <kevin.tian@intel.com> | |
25 | * Eddie Dong <eddie.dong@intel.com> | |
26 | * | |
27 | * Contributors: | |
28 | * Niu Bing <bing.niu@intel.com> | |
29 | * Zhi Wang <zhi.a.wang@intel.com> | |
30 | * | |
0ad35fed ZW |
31 | */ |
32 | ||
33 | #ifndef _GVT_H_ | |
34 | #define _GVT_H_ | |
35 | ||
36 | #include "debug.h" | |
37 | #include "hypercall.h" | |
12d14cc4 | 38 | #include "mmio.h" |
82d375d1 | 39 | #include "reg.h" |
c8fe6a68 | 40 | #include "interrupt.h" |
2707e444 | 41 | #include "gtt.h" |
04d348ae ZW |
42 | #include "display.h" |
43 | #include "edid.h" | |
8453d674 | 44 | #include "execlist.h" |
28c4c6ca | 45 | #include "scheduler.h" |
4b63960e | 46 | #include "sched_policy.h" |
0ad35fed ZW |
47 | |
48 | #define GVT_MAX_VGPU 8 | |
49 | ||
50 | enum { | |
51 | INTEL_GVT_HYPERVISOR_XEN = 0, | |
52 | INTEL_GVT_HYPERVISOR_KVM, | |
53 | }; | |
54 | ||
55 | struct intel_gvt_host { | |
56 | bool initialized; | |
57 | int hypervisor_type; | |
58 | struct intel_gvt_mpt *mpt; | |
59 | }; | |
60 | ||
61 | extern struct intel_gvt_host intel_gvt_host; | |
62 | ||
63 | /* Describe per-platform limitations. */ | |
64 | struct intel_gvt_device_info { | |
65 | u32 max_support_vgpus; | |
579cea5f | 66 | u32 cfg_space_size; |
c8fe6a68 | 67 | u32 mmio_size; |
579cea5f | 68 | u32 mmio_bar; |
c8fe6a68 | 69 | unsigned long msi_cap_offset; |
2707e444 ZW |
70 | u32 gtt_start_offset; |
71 | u32 gtt_entry_size; | |
72 | u32 gtt_entry_size_shift; | |
0ad35fed ZW |
73 | }; |
74 | ||
28a60dee ZW |
75 | /* GM resources owned by a vGPU */ |
76 | struct intel_vgpu_gm { | |
77 | u64 aperture_sz; | |
78 | u64 hidden_sz; | |
79 | struct drm_mm_node low_gm_node; | |
80 | struct drm_mm_node high_gm_node; | |
81 | }; | |
82 | ||
83 | #define INTEL_GVT_MAX_NUM_FENCES 32 | |
84 | ||
85 | /* Fences owned by a vGPU */ | |
86 | struct intel_vgpu_fence { | |
87 | struct drm_i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES]; | |
88 | u32 base; | |
89 | u32 size; | |
90 | }; | |
91 | ||
82d375d1 ZW |
92 | struct intel_vgpu_mmio { |
93 | void *vreg; | |
94 | void *sreg; | |
e39c5add | 95 | bool disable_warn_untrack; |
82d375d1 ZW |
96 | }; |
97 | ||
98 | #define INTEL_GVT_MAX_CFG_SPACE_SZ 256 | |
99 | #define INTEL_GVT_MAX_BAR_NUM 4 | |
100 | ||
101 | struct intel_vgpu_pci_bar { | |
102 | u64 size; | |
103 | bool tracked; | |
104 | }; | |
105 | ||
106 | struct intel_vgpu_cfg_space { | |
107 | unsigned char virtual_cfg_space[INTEL_GVT_MAX_CFG_SPACE_SZ]; | |
108 | struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM]; | |
109 | }; | |
110 | ||
111 | #define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space) | |
112 | ||
04d348ae ZW |
113 | #define INTEL_GVT_MAX_PIPE 4 |
114 | ||
c8fe6a68 ZW |
115 | struct intel_vgpu_irq { |
116 | bool irq_warn_once[INTEL_GVT_EVENT_MAX]; | |
04d348ae ZW |
117 | DECLARE_BITMAP(flip_done_event[INTEL_GVT_MAX_PIPE], |
118 | INTEL_GVT_EVENT_MAX); | |
c8fe6a68 ZW |
119 | }; |
120 | ||
4d60c5fd ZW |
121 | struct intel_vgpu_opregion { |
122 | void *va; | |
123 | u32 gfn[INTEL_GVT_OPREGION_PAGES]; | |
124 | struct page *pages[INTEL_GVT_OPREGION_PAGES]; | |
125 | }; | |
126 | ||
127 | #define vgpu_opregion(vgpu) (&(vgpu->opregion)) | |
128 | ||
04d348ae ZW |
129 | #define INTEL_GVT_MAX_PORT 5 |
130 | ||
131 | struct intel_vgpu_display { | |
132 | struct intel_vgpu_i2c_edid i2c_edid; | |
133 | struct intel_vgpu_port ports[INTEL_GVT_MAX_PORT]; | |
134 | struct intel_vgpu_sbi sbi; | |
135 | }; | |
136 | ||
0ad35fed ZW |
137 | struct intel_vgpu { |
138 | struct intel_gvt *gvt; | |
139 | int id; | |
140 | unsigned long handle; /* vGPU handle used by hypervisor MPT modules */ | |
82d375d1 ZW |
141 | bool active; |
142 | bool resetting; | |
4b63960e | 143 | void *sched_data; |
28a60dee ZW |
144 | |
145 | struct intel_vgpu_fence fence; | |
146 | struct intel_vgpu_gm gm; | |
82d375d1 ZW |
147 | struct intel_vgpu_cfg_space cfg_space; |
148 | struct intel_vgpu_mmio mmio; | |
c8fe6a68 | 149 | struct intel_vgpu_irq irq; |
2707e444 | 150 | struct intel_vgpu_gtt gtt; |
4d60c5fd | 151 | struct intel_vgpu_opregion opregion; |
04d348ae | 152 | struct intel_vgpu_display display; |
8453d674 | 153 | struct intel_vgpu_execlist execlist[I915_NUM_ENGINES]; |
28c4c6ca ZW |
154 | struct list_head workload_q_head[I915_NUM_ENGINES]; |
155 | struct kmem_cache *workloads; | |
e4734057 ZW |
156 | atomic_t running_workload_num; |
157 | struct i915_gem_context *shadow_ctx; | |
158 | struct notifier_block shadow_ctx_notifier_block; | |
28a60dee ZW |
159 | }; |
160 | ||
161 | struct intel_gvt_gm { | |
162 | unsigned long vgpu_allocated_low_gm_size; | |
163 | unsigned long vgpu_allocated_high_gm_size; | |
164 | }; | |
165 | ||
166 | struct intel_gvt_fence { | |
167 | unsigned long vgpu_allocated_fence_num; | |
0ad35fed ZW |
168 | }; |
169 | ||
12d14cc4 ZW |
170 | #define INTEL_GVT_MMIO_HASH_BITS 9 |
171 | ||
172 | struct intel_gvt_mmio { | |
173 | u32 *mmio_attribute; | |
174 | DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS); | |
175 | }; | |
176 | ||
579cea5f ZW |
177 | struct intel_gvt_firmware { |
178 | void *cfg_space; | |
179 | void *mmio; | |
180 | bool firmware_loaded; | |
181 | }; | |
182 | ||
4d60c5fd ZW |
183 | struct intel_gvt_opregion { |
184 | void *opregion_va; | |
185 | u32 opregion_pa; | |
186 | }; | |
187 | ||
0ad35fed ZW |
188 | struct intel_gvt { |
189 | struct mutex lock; | |
190 | bool initialized; | |
191 | ||
192 | struct drm_i915_private *dev_priv; | |
193 | struct idr vgpu_idr; /* vGPU IDR pool */ | |
194 | ||
195 | struct intel_gvt_device_info device_info; | |
28a60dee ZW |
196 | struct intel_gvt_gm gm; |
197 | struct intel_gvt_fence fence; | |
12d14cc4 | 198 | struct intel_gvt_mmio mmio; |
579cea5f | 199 | struct intel_gvt_firmware firmware; |
c8fe6a68 | 200 | struct intel_gvt_irq irq; |
2707e444 | 201 | struct intel_gvt_gtt gtt; |
4d60c5fd | 202 | struct intel_gvt_opregion opregion; |
28c4c6ca | 203 | struct intel_gvt_workload_scheduler scheduler; |
04d348ae ZW |
204 | |
205 | struct task_struct *service_thread; | |
206 | wait_queue_head_t service_thread_wq; | |
207 | unsigned long service_request; | |
0ad35fed ZW |
208 | }; |
209 | ||
04d348ae ZW |
210 | enum { |
211 | INTEL_GVT_REQUEST_EMULATE_VBLANK = 0, | |
212 | }; | |
213 | ||
214 | static inline void intel_gvt_request_service(struct intel_gvt *gvt, | |
215 | int service) | |
216 | { | |
217 | set_bit(service, (void *)&gvt->service_request); | |
218 | wake_up(&gvt->service_thread_wq); | |
219 | } | |
220 | ||
579cea5f ZW |
221 | void intel_gvt_free_firmware(struct intel_gvt *gvt); |
222 | int intel_gvt_load_firmware(struct intel_gvt *gvt); | |
223 | ||
28a60dee ZW |
224 | /* Aperture/GM space definitions for GVT device */ |
225 | #define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end) | |
226 | #define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.mappable_base) | |
227 | ||
228 | #define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.base.total) | |
e39c5add ZW |
229 | #define gvt_ggtt_sz(gvt) \ |
230 | ((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3) | |
28a60dee ZW |
231 | #define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt)) |
232 | ||
233 | #define gvt_aperture_gmadr_base(gvt) (0) | |
234 | #define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \ | |
235 | + gvt_aperture_sz(gvt) - 1) | |
236 | ||
237 | #define gvt_hidden_gmadr_base(gvt) (gvt_aperture_gmadr_base(gvt) \ | |
238 | + gvt_aperture_sz(gvt)) | |
239 | #define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \ | |
240 | + gvt_hidden_sz(gvt) - 1) | |
241 | ||
242 | #define gvt_fence_sz(gvt) (gvt->dev_priv->num_fence_regs) | |
243 | ||
244 | /* Aperture/GM space definitions for vGPU */ | |
245 | #define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start) | |
246 | #define vgpu_hidden_offset(vgpu) ((vgpu)->gm.high_gm_node.start) | |
247 | #define vgpu_aperture_sz(vgpu) ((vgpu)->gm.aperture_sz) | |
248 | #define vgpu_hidden_sz(vgpu) ((vgpu)->gm.hidden_sz) | |
249 | ||
250 | #define vgpu_aperture_pa_base(vgpu) \ | |
251 | (gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu)) | |
252 | ||
253 | #define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz) | |
254 | ||
255 | #define vgpu_aperture_pa_end(vgpu) \ | |
256 | (vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1) | |
257 | ||
258 | #define vgpu_aperture_gmadr_base(vgpu) (vgpu_aperture_offset(vgpu)) | |
259 | #define vgpu_aperture_gmadr_end(vgpu) \ | |
260 | (vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1) | |
261 | ||
262 | #define vgpu_hidden_gmadr_base(vgpu) (vgpu_hidden_offset(vgpu)) | |
263 | #define vgpu_hidden_gmadr_end(vgpu) \ | |
264 | (vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1) | |
265 | ||
266 | #define vgpu_fence_base(vgpu) (vgpu->fence.base) | |
267 | #define vgpu_fence_sz(vgpu) (vgpu->fence.size) | |
268 | ||
269 | struct intel_vgpu_creation_params { | |
270 | __u64 handle; | |
271 | __u64 low_gm_sz; /* in MB */ | |
272 | __u64 high_gm_sz; /* in MB */ | |
273 | __u64 fence_sz; | |
274 | __s32 primary; | |
275 | __u64 vgpu_id; | |
276 | }; | |
277 | ||
278 | int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu, | |
279 | struct intel_vgpu_creation_params *param); | |
280 | void intel_vgpu_free_resource(struct intel_vgpu *vgpu); | |
281 | void intel_vgpu_write_fence(struct intel_vgpu *vgpu, | |
282 | u32 fence, u64 value); | |
283 | ||
82d375d1 ZW |
284 | /* Macros for easily accessing vGPU virtual/shadow register */ |
285 | #define vgpu_vreg(vgpu, reg) \ | |
286 | (*(u32 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg))) | |
287 | #define vgpu_vreg8(vgpu, reg) \ | |
288 | (*(u8 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg))) | |
289 | #define vgpu_vreg16(vgpu, reg) \ | |
290 | (*(u16 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg))) | |
291 | #define vgpu_vreg64(vgpu, reg) \ | |
292 | (*(u64 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg))) | |
293 | #define vgpu_sreg(vgpu, reg) \ | |
294 | (*(u32 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg))) | |
295 | #define vgpu_sreg8(vgpu, reg) \ | |
296 | (*(u8 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg))) | |
297 | #define vgpu_sreg16(vgpu, reg) \ | |
298 | (*(u16 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg))) | |
299 | #define vgpu_sreg64(vgpu, reg) \ | |
300 | (*(u64 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg))) | |
301 | ||
302 | #define for_each_active_vgpu(gvt, vgpu, id) \ | |
303 | idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \ | |
304 | for_each_if(vgpu->active) | |
305 | ||
306 | static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu, | |
307 | u32 offset, u32 val, bool low) | |
308 | { | |
309 | u32 *pval; | |
310 | ||
311 | /* BAR offset should be 32 bits algiend */ | |
312 | offset = rounddown(offset, 4); | |
313 | pval = (u32 *)(vgpu_cfg_space(vgpu) + offset); | |
314 | ||
315 | if (low) { | |
316 | /* | |
317 | * only update bit 31 - bit 4, | |
318 | * leave the bit 3 - bit 0 unchanged. | |
319 | */ | |
320 | *pval = (val & GENMASK(31, 4)) | (*pval & GENMASK(3, 0)); | |
321 | } | |
322 | } | |
323 | ||
324 | struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, | |
325 | struct intel_vgpu_creation_params * | |
326 | param); | |
327 | ||
328 | void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu); | |
329 | ||
2707e444 ZW |
330 | /* validating GM functions */ |
331 | #define vgpu_gmadr_is_aperture(vgpu, gmadr) \ | |
332 | ((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \ | |
333 | (gmadr <= vgpu_aperture_gmadr_end(vgpu))) | |
334 | ||
335 | #define vgpu_gmadr_is_hidden(vgpu, gmadr) \ | |
336 | ((gmadr >= vgpu_hidden_gmadr_base(vgpu)) && \ | |
337 | (gmadr <= vgpu_hidden_gmadr_end(vgpu))) | |
338 | ||
339 | #define vgpu_gmadr_is_valid(vgpu, gmadr) \ | |
340 | ((vgpu_gmadr_is_aperture(vgpu, gmadr) || \ | |
341 | (vgpu_gmadr_is_hidden(vgpu, gmadr)))) | |
342 | ||
343 | #define gvt_gmadr_is_aperture(gvt, gmadr) \ | |
344 | ((gmadr >= gvt_aperture_gmadr_base(gvt)) && \ | |
345 | (gmadr <= gvt_aperture_gmadr_end(gvt))) | |
346 | ||
347 | #define gvt_gmadr_is_hidden(gvt, gmadr) \ | |
348 | ((gmadr >= gvt_hidden_gmadr_base(gvt)) && \ | |
349 | (gmadr <= gvt_hidden_gmadr_end(gvt))) | |
350 | ||
351 | #define gvt_gmadr_is_valid(gvt, gmadr) \ | |
352 | (gvt_gmadr_is_aperture(gvt, gmadr) || \ | |
353 | gvt_gmadr_is_hidden(gvt, gmadr)) | |
354 | ||
355 | bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size); | |
356 | int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr); | |
357 | int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr); | |
358 | int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index, | |
359 | unsigned long *h_index); | |
360 | int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, | |
361 | unsigned long *g_index); | |
4d60c5fd ZW |
362 | |
363 | int intel_vgpu_emulate_cfg_read(void *__vgpu, unsigned int offset, | |
364 | void *p_data, unsigned int bytes); | |
365 | ||
366 | int intel_vgpu_emulate_cfg_write(void *__vgpu, unsigned int offset, | |
367 | void *p_data, unsigned int bytes); | |
368 | ||
369 | void intel_gvt_clean_opregion(struct intel_gvt *gvt); | |
370 | int intel_gvt_init_opregion(struct intel_gvt *gvt); | |
371 | ||
372 | void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu); | |
373 | int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa); | |
374 | ||
375 | int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci); | |
376 | ||
0ad35fed ZW |
377 | #include "mpt.h" |
378 | ||
379 | #endif |