2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Kevin Tian <kevin.tian@intel.com>
25 * Eddie Dong <eddie.dong@intel.com>
28 * Niu Bing <bing.niu@intel.com>
29 * Zhi Wang <zhi.a.wang@intel.com>
37 #include "hypercall.h"
40 #include "interrupt.h"
45 #include "scheduler.h"
46 #include "sched_policy.h"
48 #include "cmd_parser.h"
50 #define GVT_MAX_VGPU 8
53 INTEL_GVT_HYPERVISOR_XEN
= 0,
54 INTEL_GVT_HYPERVISOR_KVM
,
57 struct intel_gvt_host
{
60 struct intel_gvt_mpt
*mpt
;
63 extern struct intel_gvt_host intel_gvt_host
;
65 /* Describe per-platform limitations. */
66 struct intel_gvt_device_info
{
67 u32 max_support_vgpus
;
71 unsigned long msi_cap_offset
;
74 u32 gtt_entry_size_shift
;
75 int gmadr_bytes_in_cmd
;
79 /* GM resources owned by a vGPU */
80 struct intel_vgpu_gm
{
83 struct drm_mm_node low_gm_node
;
84 struct drm_mm_node high_gm_node
;
87 #define INTEL_GVT_MAX_NUM_FENCES 32
89 /* Fences owned by a vGPU */
90 struct intel_vgpu_fence
{
91 struct drm_i915_fence_reg
*regs
[INTEL_GVT_MAX_NUM_FENCES
];
96 struct intel_vgpu_mmio
{
99 bool disable_warn_untrack
;
102 #define INTEL_GVT_MAX_CFG_SPACE_SZ 256
103 #define INTEL_GVT_MAX_BAR_NUM 4
105 struct intel_vgpu_pci_bar
{
110 struct intel_vgpu_cfg_space
{
111 unsigned char virtual_cfg_space
[INTEL_GVT_MAX_CFG_SPACE_SZ
];
112 struct intel_vgpu_pci_bar bar
[INTEL_GVT_MAX_BAR_NUM
];
115 #define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
117 #define INTEL_GVT_MAX_PIPE 4
119 struct intel_vgpu_irq
{
120 bool irq_warn_once
[INTEL_GVT_EVENT_MAX
];
121 DECLARE_BITMAP(flip_done_event
[INTEL_GVT_MAX_PIPE
],
122 INTEL_GVT_EVENT_MAX
);
125 struct intel_vgpu_opregion
{
127 u32 gfn
[INTEL_GVT_OPREGION_PAGES
];
128 struct page
*pages
[INTEL_GVT_OPREGION_PAGES
];
131 #define vgpu_opregion(vgpu) (&(vgpu->opregion))
133 #define INTEL_GVT_MAX_PORT 5
135 struct intel_vgpu_display
{
136 struct intel_vgpu_i2c_edid i2c_edid
;
137 struct intel_vgpu_port ports
[INTEL_GVT_MAX_PORT
];
138 struct intel_vgpu_sbi sbi
;
141 struct vgpu_sched_ctl
{
146 struct intel_gvt
*gvt
;
148 unsigned long handle
; /* vGPU handle used by hypervisor MPT modules */
154 struct vgpu_sched_ctl sched_ctl
;
156 struct intel_vgpu_fence fence
;
157 struct intel_vgpu_gm gm
;
158 struct intel_vgpu_cfg_space cfg_space
;
159 struct intel_vgpu_mmio mmio
;
160 struct intel_vgpu_irq irq
;
161 struct intel_vgpu_gtt gtt
;
162 struct intel_vgpu_opregion opregion
;
163 struct intel_vgpu_display display
;
164 struct intel_vgpu_execlist execlist
[I915_NUM_ENGINES
];
165 struct list_head workload_q_head
[I915_NUM_ENGINES
];
166 struct kmem_cache
*workloads
;
167 atomic_t running_workload_num
;
168 DECLARE_BITMAP(tlb_handle_pending
, I915_NUM_ENGINES
);
169 struct i915_gem_context
*shadow_ctx
;
171 #if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
173 struct mdev_device
*mdev
;
174 struct vfio_region
*region
;
176 struct eventfd_ctx
*intx_trigger
;
177 struct eventfd_ctx
*msi_trigger
;
178 struct rb_root cache
;
179 struct mutex cache_lock
;
180 struct notifier_block iommu_notifier
;
181 struct notifier_block group_notifier
;
183 struct work_struct release_work
;
189 struct intel_gvt_gm
{
190 unsigned long vgpu_allocated_low_gm_size
;
191 unsigned long vgpu_allocated_high_gm_size
;
194 struct intel_gvt_fence
{
195 unsigned long vgpu_allocated_fence_num
;
198 #define INTEL_GVT_MMIO_HASH_BITS 11
200 struct intel_gvt_mmio
{
202 /* Register contains RO bits */
203 #define F_RO (1 << 0)
204 /* Register contains graphics address */
205 #define F_GMADR (1 << 1)
206 /* Mode mask registers with high 16 bits as the mask bits */
207 #define F_MODE_MASK (1 << 2)
208 /* This reg can be accessed by GPU commands */
209 #define F_CMD_ACCESS (1 << 3)
210 /* This reg has been accessed by a VM */
211 #define F_ACCESSED (1 << 4)
212 /* This reg has been accessed through GPU commands */
213 #define F_CMD_ACCESSED (1 << 5)
214 /* This reg could be accessed by unaligned address */
215 #define F_UNALIGN (1 << 6)
217 DECLARE_HASHTABLE(mmio_info_table
, INTEL_GVT_MMIO_HASH_BITS
);
218 unsigned int num_tracked_mmio
;
221 struct intel_gvt_firmware
{
224 bool firmware_loaded
;
227 struct intel_gvt_opregion
{
232 #define NR_MAX_INTEL_VGPU_TYPES 20
233 struct intel_vgpu_type
{
235 unsigned int avail_instance
;
236 unsigned int low_gm_size
;
237 unsigned int high_gm_size
;
240 enum intel_vgpu_edid resolution
;
245 struct drm_i915_private
*dev_priv
;
246 struct idr vgpu_idr
; /* vGPU IDR pool */
248 struct intel_gvt_device_info device_info
;
249 struct intel_gvt_gm gm
;
250 struct intel_gvt_fence fence
;
251 struct intel_gvt_mmio mmio
;
252 struct intel_gvt_firmware firmware
;
253 struct intel_gvt_irq irq
;
254 struct intel_gvt_gtt gtt
;
255 struct intel_gvt_opregion opregion
;
256 struct intel_gvt_workload_scheduler scheduler
;
257 struct notifier_block shadow_ctx_notifier_block
[I915_NUM_ENGINES
];
258 DECLARE_HASHTABLE(cmd_table
, GVT_CMD_HASH_BITS
);
259 struct intel_vgpu_type
*types
;
260 unsigned int num_types
;
261 struct intel_vgpu
*idle_vgpu
;
263 struct task_struct
*service_thread
;
264 wait_queue_head_t service_thread_wq
;
265 unsigned long service_request
;
268 static inline struct intel_gvt
*to_gvt(struct drm_i915_private
*i915
)
274 INTEL_GVT_REQUEST_EMULATE_VBLANK
= 0,
276 /* Scheduling trigger by timer */
277 INTEL_GVT_REQUEST_SCHED
= 1,
279 /* Scheduling trigger by event */
280 INTEL_GVT_REQUEST_EVENT_SCHED
= 2,
283 static inline void intel_gvt_request_service(struct intel_gvt
*gvt
,
286 set_bit(service
, (void *)&gvt
->service_request
);
287 wake_up(&gvt
->service_thread_wq
);
290 void intel_gvt_free_firmware(struct intel_gvt
*gvt
);
291 int intel_gvt_load_firmware(struct intel_gvt
*gvt
);
293 /* Aperture/GM space definitions for GVT device */
294 #define MB_TO_BYTES(mb) ((mb) << 20ULL)
295 #define BYTES_TO_MB(b) ((b) >> 20ULL)
297 #define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
298 #define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
301 /* Aperture/GM space definitions for GVT device */
302 #define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
303 #define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.mappable_base)
305 #define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.base.total)
306 #define gvt_ggtt_sz(gvt) \
307 ((gvt->dev_priv->ggtt.base.total >> PAGE_SHIFT) << 3)
308 #define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
310 #define gvt_aperture_gmadr_base(gvt) (0)
311 #define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \
312 + gvt_aperture_sz(gvt) - 1)
314 #define gvt_hidden_gmadr_base(gvt) (gvt_aperture_gmadr_base(gvt) \
315 + gvt_aperture_sz(gvt))
316 #define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
317 + gvt_hidden_sz(gvt) - 1)
319 #define gvt_fence_sz(gvt) (gvt->dev_priv->num_fence_regs)
321 /* Aperture/GM space definitions for vGPU */
322 #define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start)
323 #define vgpu_hidden_offset(vgpu) ((vgpu)->gm.high_gm_node.start)
324 #define vgpu_aperture_sz(vgpu) ((vgpu)->gm.aperture_sz)
325 #define vgpu_hidden_sz(vgpu) ((vgpu)->gm.hidden_sz)
327 #define vgpu_aperture_pa_base(vgpu) \
328 (gvt_aperture_pa_base(vgpu->gvt) + vgpu_aperture_offset(vgpu))
330 #define vgpu_ggtt_gm_sz(vgpu) ((vgpu)->gm.aperture_sz + (vgpu)->gm.hidden_sz)
332 #define vgpu_aperture_pa_end(vgpu) \
333 (vgpu_aperture_pa_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
335 #define vgpu_aperture_gmadr_base(vgpu) (vgpu_aperture_offset(vgpu))
336 #define vgpu_aperture_gmadr_end(vgpu) \
337 (vgpu_aperture_gmadr_base(vgpu) + vgpu_aperture_sz(vgpu) - 1)
339 #define vgpu_hidden_gmadr_base(vgpu) (vgpu_hidden_offset(vgpu))
340 #define vgpu_hidden_gmadr_end(vgpu) \
341 (vgpu_hidden_gmadr_base(vgpu) + vgpu_hidden_sz(vgpu) - 1)
343 #define vgpu_fence_base(vgpu) (vgpu->fence.base)
344 #define vgpu_fence_sz(vgpu) (vgpu->fence.size)
346 struct intel_vgpu_creation_params
{
348 __u64 low_gm_sz
; /* in MB */
349 __u64 high_gm_sz
; /* in MB */
358 int intel_vgpu_alloc_resource(struct intel_vgpu
*vgpu
,
359 struct intel_vgpu_creation_params
*param
);
360 void intel_vgpu_reset_resource(struct intel_vgpu
*vgpu
);
361 void intel_vgpu_free_resource(struct intel_vgpu
*vgpu
);
362 void intel_vgpu_write_fence(struct intel_vgpu
*vgpu
,
363 u32 fence
, u64 value
);
365 /* Macros for easily accessing vGPU virtual/shadow register */
366 #define vgpu_vreg(vgpu, reg) \
367 (*(u32 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
368 #define vgpu_vreg8(vgpu, reg) \
369 (*(u8 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
370 #define vgpu_vreg16(vgpu, reg) \
371 (*(u16 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
372 #define vgpu_vreg64(vgpu, reg) \
373 (*(u64 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg)))
374 #define vgpu_sreg(vgpu, reg) \
375 (*(u32 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
376 #define vgpu_sreg8(vgpu, reg) \
377 (*(u8 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
378 #define vgpu_sreg16(vgpu, reg) \
379 (*(u16 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
380 #define vgpu_sreg64(vgpu, reg) \
381 (*(u64 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
383 #define for_each_active_vgpu(gvt, vgpu, id) \
384 idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
385 for_each_if(vgpu->active)
387 static inline void intel_vgpu_write_pci_bar(struct intel_vgpu
*vgpu
,
388 u32 offset
, u32 val
, bool low
)
392 /* BAR offset should be 32 bits algiend */
393 offset
= rounddown(offset
, 4);
394 pval
= (u32
*)(vgpu_cfg_space(vgpu
) + offset
);
398 * only update bit 31 - bit 4,
399 * leave the bit 3 - bit 0 unchanged.
401 *pval
= (val
& GENMASK(31, 4)) | (*pval
& GENMASK(3, 0));
407 int intel_gvt_init_vgpu_types(struct intel_gvt
*gvt
);
408 void intel_gvt_clean_vgpu_types(struct intel_gvt
*gvt
);
410 struct intel_vgpu
*intel_gvt_create_idle_vgpu(struct intel_gvt
*gvt
);
411 void intel_gvt_destroy_idle_vgpu(struct intel_vgpu
*vgpu
);
412 struct intel_vgpu
*intel_gvt_create_vgpu(struct intel_gvt
*gvt
,
413 struct intel_vgpu_type
*type
);
414 void intel_gvt_destroy_vgpu(struct intel_vgpu
*vgpu
);
415 void intel_gvt_reset_vgpu_locked(struct intel_vgpu
*vgpu
, bool dmlr
,
416 unsigned int engine_mask
);
417 void intel_gvt_reset_vgpu(struct intel_vgpu
*vgpu
);
418 void intel_gvt_activate_vgpu(struct intel_vgpu
*vgpu
);
419 void intel_gvt_deactivate_vgpu(struct intel_vgpu
*vgpu
);
421 /* validating GM functions */
422 #define vgpu_gmadr_is_aperture(vgpu, gmadr) \
423 ((gmadr >= vgpu_aperture_gmadr_base(vgpu)) && \
424 (gmadr <= vgpu_aperture_gmadr_end(vgpu)))
426 #define vgpu_gmadr_is_hidden(vgpu, gmadr) \
427 ((gmadr >= vgpu_hidden_gmadr_base(vgpu)) && \
428 (gmadr <= vgpu_hidden_gmadr_end(vgpu)))
430 #define vgpu_gmadr_is_valid(vgpu, gmadr) \
431 ((vgpu_gmadr_is_aperture(vgpu, gmadr) || \
432 (vgpu_gmadr_is_hidden(vgpu, gmadr))))
434 #define gvt_gmadr_is_aperture(gvt, gmadr) \
435 ((gmadr >= gvt_aperture_gmadr_base(gvt)) && \
436 (gmadr <= gvt_aperture_gmadr_end(gvt)))
438 #define gvt_gmadr_is_hidden(gvt, gmadr) \
439 ((gmadr >= gvt_hidden_gmadr_base(gvt)) && \
440 (gmadr <= gvt_hidden_gmadr_end(gvt)))
442 #define gvt_gmadr_is_valid(gvt, gmadr) \
443 (gvt_gmadr_is_aperture(gvt, gmadr) || \
444 gvt_gmadr_is_hidden(gvt, gmadr))
446 bool intel_gvt_ggtt_validate_range(struct intel_vgpu
*vgpu
, u64 addr
, u32 size
);
447 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu
*vgpu
, u64 g_addr
, u64
*h_addr
);
448 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu
*vgpu
, u64 h_addr
, u64
*g_addr
);
449 int intel_gvt_ggtt_index_g2h(struct intel_vgpu
*vgpu
, unsigned long g_index
,
450 unsigned long *h_index
);
451 int intel_gvt_ggtt_h2g_index(struct intel_vgpu
*vgpu
, unsigned long h_index
,
452 unsigned long *g_index
);
454 void intel_vgpu_init_cfg_space(struct intel_vgpu
*vgpu
,
456 void intel_vgpu_reset_cfg_space(struct intel_vgpu
*vgpu
);
458 int intel_vgpu_emulate_cfg_read(struct intel_vgpu
*vgpu
, unsigned int offset
,
459 void *p_data
, unsigned int bytes
);
461 int intel_vgpu_emulate_cfg_write(struct intel_vgpu
*vgpu
, unsigned int offset
,
462 void *p_data
, unsigned int bytes
);
464 void intel_gvt_clean_opregion(struct intel_gvt
*gvt
);
465 int intel_gvt_init_opregion(struct intel_gvt
*gvt
);
467 void intel_vgpu_clean_opregion(struct intel_vgpu
*vgpu
);
468 int intel_vgpu_init_opregion(struct intel_vgpu
*vgpu
, u32 gpa
);
470 int intel_vgpu_emulate_opregion_request(struct intel_vgpu
*vgpu
, u32 swsci
);
471 void populate_pvinfo_page(struct intel_vgpu
*vgpu
);
473 struct intel_gvt_ops
{
474 int (*emulate_cfg_read
)(struct intel_vgpu
*, unsigned int, void *,
476 int (*emulate_cfg_write
)(struct intel_vgpu
*, unsigned int, void *,
478 int (*emulate_mmio_read
)(struct intel_vgpu
*, u64
, void *,
480 int (*emulate_mmio_write
)(struct intel_vgpu
*, u64
, void *,
482 struct intel_vgpu
*(*vgpu_create
)(struct intel_gvt
*,
483 struct intel_vgpu_type
*);
484 void (*vgpu_destroy
)(struct intel_vgpu
*);
485 void (*vgpu_reset
)(struct intel_vgpu
*);
486 void (*vgpu_activate
)(struct intel_vgpu
*);
487 void (*vgpu_deactivate
)(struct intel_vgpu
*);
492 GVT_FAILSAFE_UNSUPPORTED_GUEST
,
493 GVT_FAILSAFE_INSUFFICIENT_RESOURCE
,
496 static inline void mmio_hw_access_pre(struct drm_i915_private
*dev_priv
)
498 intel_runtime_pm_get(dev_priv
);
501 static inline void mmio_hw_access_post(struct drm_i915_private
*dev_priv
)
503 intel_runtime_pm_put(dev_priv
);
507 * intel_gvt_mmio_set_accessed - mark a MMIO has been accessed
509 * @offset: register offset
512 static inline void intel_gvt_mmio_set_accessed(
513 struct intel_gvt
*gvt
, unsigned int offset
)
515 gvt
->mmio
.mmio_attribute
[offset
>> 2] |= F_ACCESSED
;
519 * intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command
521 * @offset: register offset
524 static inline bool intel_gvt_mmio_is_cmd_access(
525 struct intel_gvt
*gvt
, unsigned int offset
)
527 return gvt
->mmio
.mmio_attribute
[offset
>> 2] & F_CMD_ACCESS
;
531 * intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
533 * @offset: register offset
536 static inline bool intel_gvt_mmio_is_unalign(
537 struct intel_gvt
*gvt
, unsigned int offset
)
539 return gvt
->mmio
.mmio_attribute
[offset
>> 2] & F_UNALIGN
;
543 * intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command
545 * @offset: register offset
548 static inline void intel_gvt_mmio_set_cmd_accessed(
549 struct intel_gvt
*gvt
, unsigned int offset
)
551 gvt
->mmio
.mmio_attribute
[offset
>> 2] |= F_CMD_ACCESSED
;
555 * intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask
557 * @offset: register offset
560 * True if a MMIO has a mode mask in its higher 16 bits, false if it isn't.
563 static inline bool intel_gvt_mmio_has_mode_mask(
564 struct intel_gvt
*gvt
, unsigned int offset
)
566 return gvt
->mmio
.mmio_attribute
[offset
>> 2] & F_MODE_MASK
;