]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/gpu/drm/i915/gvt/gvt.h
Merge tag 'linux-kselftest-4.13-rc6-fixes' of git://git.kernel.org/pub/scm/linux...
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / i915 / gvt / gvt.h
CommitLineData
0ad35fed
ZW
1/*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
12d14cc4
ZW
22 *
23 * Authors:
24 * Kevin Tian <kevin.tian@intel.com>
25 * Eddie Dong <eddie.dong@intel.com>
26 *
27 * Contributors:
28 * Niu Bing <bing.niu@intel.com>
29 * Zhi Wang <zhi.a.wang@intel.com>
30 *
0ad35fed
ZW
31 */
32
33#ifndef _GVT_H_
34#define _GVT_H_
35
36#include "debug.h"
37#include "hypercall.h"
12d14cc4 38#include "mmio.h"
82d375d1 39#include "reg.h"
c8fe6a68 40#include "interrupt.h"
2707e444 41#include "gtt.h"
04d348ae
ZW
42#include "display.h"
43#include "edid.h"
8453d674 44#include "execlist.h"
28c4c6ca 45#include "scheduler.h"
4b63960e 46#include "sched_policy.h"
17865713 47#include "render.h"
be1da707 48#include "cmd_parser.h"
0ad35fed
ZW
49
50#define GVT_MAX_VGPU 8
51
52enum {
53 INTEL_GVT_HYPERVISOR_XEN = 0,
54 INTEL_GVT_HYPERVISOR_KVM,
55};
56
57struct intel_gvt_host {
58 bool initialized;
59 int hypervisor_type;
60 struct intel_gvt_mpt *mpt;
61};
62
63extern struct intel_gvt_host intel_gvt_host;
64
65/* Describe per-platform limitations. */
66struct intel_gvt_device_info {
67 u32 max_support_vgpus;
579cea5f 68 u32 cfg_space_size;
c8fe6a68 69 u32 mmio_size;
579cea5f 70 u32 mmio_bar;
c8fe6a68 71 unsigned long msi_cap_offset;
2707e444
ZW
72 u32 gtt_start_offset;
73 u32 gtt_entry_size;
74 u32 gtt_entry_size_shift;
be1da707
ZW
75 int gmadr_bytes_in_cmd;
76 u32 max_surface_size;
0ad35fed
ZW
77};
78
28a60dee
ZW
79/* GM resources owned by a vGPU */
80struct intel_vgpu_gm {
81 u64 aperture_sz;
82 u64 hidden_sz;
83 struct drm_mm_node low_gm_node;
84 struct drm_mm_node high_gm_node;
85};
86
87#define INTEL_GVT_MAX_NUM_FENCES 32
88
89/* Fences owned by a vGPU */
90struct intel_vgpu_fence {
91 struct drm_i915_fence_reg *regs[INTEL_GVT_MAX_NUM_FENCES];
92 u32 base;
93 u32 size;
94};
95
82d375d1
ZW
96struct intel_vgpu_mmio {
97 void *vreg;
98 void *sreg;
e39c5add 99 bool disable_warn_untrack;
82d375d1
ZW
100};
101
102#define INTEL_GVT_MAX_CFG_SPACE_SZ 256
103#define INTEL_GVT_MAX_BAR_NUM 4
104
105struct intel_vgpu_pci_bar {
106 u64 size;
107 bool tracked;
108};
109
110struct intel_vgpu_cfg_space {
111 unsigned char virtual_cfg_space[INTEL_GVT_MAX_CFG_SPACE_SZ];
112 struct intel_vgpu_pci_bar bar[INTEL_GVT_MAX_BAR_NUM];
113};
114
115#define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
116
04d348ae
ZW
117#define INTEL_GVT_MAX_PIPE 4
118
c8fe6a68
ZW
119struct intel_vgpu_irq {
120 bool irq_warn_once[INTEL_GVT_EVENT_MAX];
04d348ae
ZW
121 DECLARE_BITMAP(flip_done_event[INTEL_GVT_MAX_PIPE],
122 INTEL_GVT_EVENT_MAX);
c8fe6a68
ZW
123};
124
4d60c5fd
ZW
125struct intel_vgpu_opregion {
126 void *va;
127 u32 gfn[INTEL_GVT_OPREGION_PAGES];
128 struct page *pages[INTEL_GVT_OPREGION_PAGES];
129};
130
131#define vgpu_opregion(vgpu) (&(vgpu->opregion))
132
04d348ae
ZW
133#define INTEL_GVT_MAX_PORT 5
134
135struct intel_vgpu_display {
136 struct intel_vgpu_i2c_edid i2c_edid;
137 struct intel_vgpu_port ports[INTEL_GVT_MAX_PORT];
138 struct intel_vgpu_sbi sbi;
139};
140
f6504cce
PG
141struct vgpu_sched_ctl {
142 int weight;
143};
144
0ad35fed
ZW
145struct intel_vgpu {
146 struct intel_gvt *gvt;
147 int id;
148 unsigned long handle; /* vGPU handle used by hypervisor MPT modules */
82d375d1 149 bool active;
fd64be63
MH
150 bool pv_notified;
151 bool failsafe;
6184cc8d 152 unsigned int resetting_eng;
4b63960e 153 void *sched_data;
bc90d097 154 struct vgpu_sched_ctl sched_ctl;
28a60dee
ZW
155
156 struct intel_vgpu_fence fence;
157 struct intel_vgpu_gm gm;
82d375d1
ZW
158 struct intel_vgpu_cfg_space cfg_space;
159 struct intel_vgpu_mmio mmio;
c8fe6a68 160 struct intel_vgpu_irq irq;
2707e444 161 struct intel_vgpu_gtt gtt;
4d60c5fd 162 struct intel_vgpu_opregion opregion;
04d348ae 163 struct intel_vgpu_display display;
8453d674 164 struct intel_vgpu_execlist execlist[I915_NUM_ENGINES];
28c4c6ca
ZW
165 struct list_head workload_q_head[I915_NUM_ENGINES];
166 struct kmem_cache *workloads;
e4734057 167 atomic_t running_workload_num;
17865713 168 DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
e4734057 169 struct i915_gem_context *shadow_ctx;
f30437c5
JS
170
171#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
172 struct {
659643f7 173 struct mdev_device *mdev;
f30437c5
JS
174 struct vfio_region *region;
175 int num_regions;
176 struct eventfd_ctx *intx_trigger;
177 struct eventfd_ctx *msi_trigger;
178 struct rb_root cache;
179 struct mutex cache_lock;
f30437c5 180 struct notifier_block iommu_notifier;
659643f7
JS
181 struct notifier_block group_notifier;
182 struct kvm *kvm;
183 struct work_struct release_work;
364fb6b7 184 atomic_t released;
f30437c5
JS
185 } vdev;
186#endif
28a60dee
ZW
187};
188
189struct intel_gvt_gm {
190 unsigned long vgpu_allocated_low_gm_size;
191 unsigned long vgpu_allocated_high_gm_size;
192};
193
194struct intel_gvt_fence {
195 unsigned long vgpu_allocated_fence_num;
0ad35fed
ZW
196};
197
02b6ed44
TZ
198/* Special MMIO blocks. */
199struct gvt_mmio_block {
200 unsigned int device;
201 i915_reg_t offset;
202 unsigned int size;
203 gvt_mmio_func read;
204 gvt_mmio_func write;
205};
206
178cd160 207#define INTEL_GVT_MMIO_HASH_BITS 11
12d14cc4
ZW
208
209struct intel_gvt_mmio {
56a78de5 210 u8 *mmio_attribute;
5c6d4c67
CD
211/* Register contains RO bits */
212#define F_RO (1 << 0)
213/* Register contains graphics address */
214#define F_GMADR (1 << 1)
215/* Mode mask registers with high 16 bits as the mask bits */
216#define F_MODE_MASK (1 << 2)
217/* This reg can be accessed by GPU commands */
218#define F_CMD_ACCESS (1 << 3)
219/* This reg has been accessed by a VM */
220#define F_ACCESSED (1 << 4)
221/* This reg has been accessed through GPU commands */
222#define F_CMD_ACCESSED (1 << 5)
223/* This reg could be accessed by unaligned address */
224#define F_UNALIGN (1 << 6)
225
02b6ed44
TZ
226 struct gvt_mmio_block *mmio_block;
227 unsigned int num_mmio_block;
228
12d14cc4 229 DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
fbfd76c3 230 unsigned int num_tracked_mmio;
12d14cc4
ZW
231};
232
579cea5f
ZW
233struct intel_gvt_firmware {
234 void *cfg_space;
235 void *mmio;
236 bool firmware_loaded;
237};
238
4d60c5fd 239struct intel_gvt_opregion {
f655e67a 240 void *opregion_va;
4d60c5fd
ZW
241 u32 opregion_pa;
242};
243
1f31c829
ZW
244#define NR_MAX_INTEL_VGPU_TYPES 20
245struct intel_vgpu_type {
246 char name[16];
1f31c829
ZW
247 unsigned int avail_instance;
248 unsigned int low_gm_size;
249 unsigned int high_gm_size;
250 unsigned int fence;
bc90d097 251 unsigned int weight;
d1a513be 252 enum intel_vgpu_edid resolution;
1f31c829
ZW
253};
254
0ad35fed
ZW
255struct intel_gvt {
256 struct mutex lock;
0ad35fed
ZW
257 struct drm_i915_private *dev_priv;
258 struct idr vgpu_idr; /* vGPU IDR pool */
259
260 struct intel_gvt_device_info device_info;
28a60dee
ZW
261 struct intel_gvt_gm gm;
262 struct intel_gvt_fence fence;
12d14cc4 263 struct intel_gvt_mmio mmio;
579cea5f 264 struct intel_gvt_firmware firmware;
c8fe6a68 265 struct intel_gvt_irq irq;
2707e444 266 struct intel_gvt_gtt gtt;
4d60c5fd 267 struct intel_gvt_opregion opregion;
28c4c6ca 268 struct intel_gvt_workload_scheduler scheduler;
3fc03069 269 struct notifier_block shadow_ctx_notifier_block[I915_NUM_ENGINES];
be1da707 270 DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
1f31c829
ZW
271 struct intel_vgpu_type *types;
272 unsigned int num_types;
afe04fbe 273 struct intel_vgpu *idle_vgpu;
04d348ae
ZW
274
275 struct task_struct *service_thread;
276 wait_queue_head_t service_thread_wq;
277 unsigned long service_request;
0ad35fed
ZW
278};
279
feddf6e8
ZW
280static inline struct intel_gvt *to_gvt(struct drm_i915_private *i915)
281{
282 return i915->gvt;
283}
284
04d348ae
ZW
285enum {
286 INTEL_GVT_REQUEST_EMULATE_VBLANK = 0,
c713cb2f
PG
287
288 /* Scheduling trigger by timer */
91d0101a 289 INTEL_GVT_REQUEST_SCHED = 1,
c713cb2f
PG
290
291 /* Scheduling trigger by event */
292 INTEL_GVT_REQUEST_EVENT_SCHED = 2,
04d348ae
ZW
293};
294
295static inline void intel_gvt_request_service(struct intel_gvt *gvt,
296 int service)
297{
298 set_bit(service, (void *)&gvt->service_request);
299 wake_up(&gvt->service_thread_wq);
300}
301
579cea5f
ZW
302void intel_gvt_free_firmware(struct intel_gvt *gvt);
303int intel_gvt_load_firmware(struct intel_gvt *gvt);
304
1f31c829
ZW
305/* Aperture/GM space definitions for GVT device */
306#define MB_TO_BYTES(mb) ((mb) << 20ULL)
307#define BYTES_TO_MB(b) ((b) >> 20ULL)
308
309#define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
310#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
311#define HOST_FENCE 4
312
28a60dee
ZW
313/* Aperture/GM space definitions for GVT device */
314#define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
315#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.mappable_base)
316
317#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.base.total)
e39c5add
ZW
318#define gvt_ggtt_sz(gvt) \
319 ((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3)
28a60dee
ZW
320#define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
321
322#define gvt_aperture_gmadr_base(gvt) (0)
323#define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \
324 + gvt_aperture_sz(gvt) - 1)
325
326#define gvt_hidden_gmadr_base(gvt) (gvt_aperture_gmadr_base(gvt) \
327 + gvt_aperture_sz(gvt))
328#define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
329 + gvt_hidden_sz(gvt) - 1)
330
331#define gvt_fence_sz(gvt) (gvt->dev_priv->num_fence_regs)
332
333/* Aperture/GM space definitions for vGPU */
334#define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start)
335#define vgpu_hidden_offset(vgpu) ((vgpu)->gm.high_gm_node.start)
336#define vgpu_aperture_sz(vgpu) ((vgpu)->gm.aperture_sz)
337#define vgpu_hidden_sz(vgpu) ((vgpu)->gm.hidden_sz)
338
339#define vgpu_aperture_pa_base(vgpu) \
340 (gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu))
341
342#define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz)
343
344#define vgpu_aperture_pa_end(vgpu) \
345 (vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
346
347#define vgpu_aperture_gmadr_base(vgpu) (vgpu_aperture_offset(vgpu))
348#define vgpu_aperture_gmadr_end(vgpu) \
349 (vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
350
351#define vgpu_hidden_gmadr_base(vgpu) (vgpu_hidden_offset(vgpu))
352#define vgpu_hidden_gmadr_end(vgpu) \
353 (vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1)
354
355#define vgpu_fence_base(vgpu) (vgpu->fence.base)
356#define vgpu_fence_sz(vgpu) (vgpu->fence.size)
357
358struct intel_vgpu_creation_params {
359 __u64 handle;
360 __u64 low_gm_sz; /* in MB */
361 __u64 high_gm_sz; /* in MB */
362 __u64 fence_sz;
d1a513be 363 __u64 resolution;
28a60dee
ZW
364 __s32 primary;
365 __u64 vgpu_id;
bc90d097
PG
366
367 __u32 weight;
28a60dee
ZW
368};
369
370int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
371 struct intel_vgpu_creation_params *param);
d22a48bf 372void intel_vgpu_reset_resource(struct intel_vgpu *vgpu);
28a60dee
ZW
373void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
374void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
375 u32 fence, u64 value);
376
82d375d1
ZW
377/* Macros for easily accessing vGPU virtual/shadow register */
378#define vgpu_vreg(vgpu, reg) \
379 (*(u32 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
380#define vgpu_vreg8(vgpu, reg) \
381 (*(u8 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
382#define vgpu_vreg16(vgpu, reg) \
383 (*(u16 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
384#define vgpu_vreg64(vgpu, reg) \
385 (*(u64 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
386#define vgpu_sreg(vgpu, reg) \
387 (*(u32 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
388#define vgpu_sreg8(vgpu, reg) \
389 (*(u8 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
390#define vgpu_sreg16(vgpu, reg) \
391 (*(u16 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
392#define vgpu_sreg64(vgpu, reg) \
393 (*(u64 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
394
395#define for_each_active_vgpu(gvt, vgpu, id) \
396 idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
397 for_each_if(vgpu->active)
398
399static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
400 u32 offset, u32 val, bool low)
401{
402 u32 *pval;
403
404 /* BAR offset should be 32 bits algiend */
405 offset = rounddown(offset, 4);
406 pval = (u32 *)(vgpu_cfg_space(vgpu) + offset);
407
408 if (low) {
409 /*
410 * only update bit 31 - bit 4,
411 * leave the bit 3 - bit 0 unchanged.
412 */
413 *pval = (val & GENMASK(31, 4)) | (*pval & GENMASK(3, 0));
550dd77e
XC
414 } else {
415 *pval = val;
82d375d1
ZW
416 }
417}
418
1f31c829
ZW
419int intel_gvt_init_vgpu_types(struct intel_gvt *gvt);
420void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
82d375d1 421
afe04fbe
PG
422struct intel_vgpu *intel_gvt_create_idle_vgpu(struct intel_gvt *gvt);
423void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu);
1f31c829
ZW
424struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
425 struct intel_vgpu_type *type);
82d375d1 426void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
cfe65f40
CD
427void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
428 unsigned int engine_mask);
9ec1e66b 429void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
b79c52ae
ZW
430void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu);
431void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu);
1f31c829 432
2707e444
ZW
433/* validating GM functions */
434#define vgpu_gmadr_is_aperture(vgpu, gmadr) \
435 ((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \
436 (gmadr <= vgpu_aperture_gmadr_end(vgpu)))
437
438#define vgpu_gmadr_is_hidden(vgpu, gmadr) \
439 ((gmadr >= vgpu_hidden_gmadr_base(vgpu)) && \
440 (gmadr <= vgpu_hidden_gmadr_end(vgpu)))
441
442#define vgpu_gmadr_is_valid(vgpu, gmadr) \
443 ((vgpu_gmadr_is_aperture(vgpu, gmadr) || \
444 (vgpu_gmadr_is_hidden(vgpu, gmadr))))
445
446#define gvt_gmadr_is_aperture(gvt, gmadr) \
447 ((gmadr >= gvt_aperture_gmadr_base(gvt)) && \
448 (gmadr <= gvt_aperture_gmadr_end(gvt)))
449
450#define gvt_gmadr_is_hidden(gvt, gmadr) \
451 ((gmadr >= gvt_hidden_gmadr_base(gvt)) && \
452 (gmadr <= gvt_hidden_gmadr_end(gvt)))
453
454#define gvt_gmadr_is_valid(gvt, gmadr) \
455 (gvt_gmadr_is_aperture(gvt, gmadr) || \
456 gvt_gmadr_is_hidden(gvt, gmadr))
457
458bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size);
459int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr);
460int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr);
461int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
462 unsigned long *h_index);
463int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
464 unsigned long *g_index);
4d60c5fd 465
536fc234
CD
466void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
467 bool primary);
c64ff6c7
CD
468void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu);
469
9ec1e66b 470int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
4d60c5fd
ZW
471 void *p_data, unsigned int bytes);
472
9ec1e66b 473int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
4d60c5fd
ZW
474 void *p_data, unsigned int bytes);
475
476void intel_gvt_clean_opregion(struct intel_gvt *gvt);
477int intel_gvt_init_opregion(struct intel_gvt *gvt);
478
479void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
480int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
481
482int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
23736d1b 483void populate_pvinfo_page(struct intel_vgpu *vgpu);
4d60c5fd 484
9ec1e66b
JS
485struct intel_gvt_ops {
486 int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
487 unsigned int);
488 int (*emulate_cfg_write)(struct intel_vgpu *, unsigned int, void *,
489 unsigned int);
490 int (*emulate_mmio_read)(struct intel_vgpu *, u64, void *,
491 unsigned int);
492 int (*emulate_mmio_write)(struct intel_vgpu *, u64, void *,
493 unsigned int);
494 struct intel_vgpu *(*vgpu_create)(struct intel_gvt *,
495 struct intel_vgpu_type *);
496 void (*vgpu_destroy)(struct intel_vgpu *);
497 void (*vgpu_reset)(struct intel_vgpu *);
b79c52ae
ZW
498 void (*vgpu_activate)(struct intel_vgpu *);
499 void (*vgpu_deactivate)(struct intel_vgpu *);
9ec1e66b
JS
500};
501
502
fd64be63
MH
503enum {
504 GVT_FAILSAFE_UNSUPPORTED_GUEST,
a33fc7a0 505 GVT_FAILSAFE_INSUFFICIENT_RESOURCE,
fd64be63
MH
506};
507
9b7bd65e
CD
508static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
509{
510 intel_runtime_pm_get(dev_priv);
511}
512
513static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv)
514{
515 intel_runtime_pm_put(dev_priv);
516}
517
5c6d4c67
CD
518/**
519 * intel_gvt_mmio_set_accessed - mark a MMIO has been accessed
520 * @gvt: a GVT device
521 * @offset: register offset
522 *
523 */
524static inline void intel_gvt_mmio_set_accessed(
525 struct intel_gvt *gvt, unsigned int offset)
526{
527 gvt->mmio.mmio_attribute[offset >> 2] |= F_ACCESSED;
528}
529
530/**
531 * intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command
532 * @gvt: a GVT device
533 * @offset: register offset
534 *
535 */
536static inline bool intel_gvt_mmio_is_cmd_access(
537 struct intel_gvt *gvt, unsigned int offset)
538{
539 return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_ACCESS;
540}
541
542/**
543 * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
544 * @gvt: a GVT device
545 * @offset: register offset
546 *
547 */
548static inline bool intel_gvt_mmio_is_unalign(
549 struct intel_gvt *gvt, unsigned int offset)
550{
551 return gvt->mmio.mmio_attribute[offset >> 2] & F_UNALIGN;
552}
553
554/**
555 * intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command
556 * @gvt: a GVT device
557 * @offset: register offset
558 *
559 */
560static inline void intel_gvt_mmio_set_cmd_accessed(
561 struct intel_gvt *gvt, unsigned int offset)
562{
563 gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESSED;
564}
565
566/**
567 * intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask
568 * @gvt: a GVT device
569 * @offset: register offset
570 *
571 * Returns:
572 * True if a MMIO has a mode mask in its higher 16 bits, false if it isn't.
573 *
574 */
575static inline bool intel_gvt_mmio_has_mode_mask(
576 struct intel_gvt *gvt, unsigned int offset)
577{
578 return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK;
579}
580
7fb6a7d6 581#include "trace.h"
0ad35fed
ZW
582#include "mpt.h"
583
584#endif