4 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * Zhi Wang <zhi.a.wang@intel.com>
27 * Zhenyu Wang <zhenyuw@linux.intel.com>
28 * Xiao Zheng <xiao.zheng@intel.com>
31 * Min He <min.he@intel.com>
32 * Bing Niu <bing.niu@intel.com>
38 #include "i915_pvinfo.h"
41 static bool enable_out_of_sync
= false;
42 static int preallocated_oos_pages
= 8192;
45 * validate a gm address and related range size,
46 * translate it to host gm address
48 bool intel_gvt_ggtt_validate_range(struct intel_vgpu
*vgpu
, u64 addr
, u32 size
)
50 if ((!vgpu_gmadr_is_valid(vgpu
, addr
)) || (size
51 && !vgpu_gmadr_is_valid(vgpu
, addr
+ size
- 1))) {
52 gvt_err("vgpu%d: invalid range gmadr 0x%llx size 0x%x\n",
53 vgpu
->id
, addr
, size
);
59 /* translate a guest gmadr to host gmadr */
60 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu
*vgpu
, u64 g_addr
, u64
*h_addr
)
62 if (WARN(!vgpu_gmadr_is_valid(vgpu
, g_addr
),
63 "invalid guest gmadr %llx\n", g_addr
))
66 if (vgpu_gmadr_is_aperture(vgpu
, g_addr
))
67 *h_addr
= vgpu_aperture_gmadr_base(vgpu
)
68 + (g_addr
- vgpu_aperture_offset(vgpu
));
70 *h_addr
= vgpu_hidden_gmadr_base(vgpu
)
71 + (g_addr
- vgpu_hidden_offset(vgpu
));
75 /* translate a host gmadr to guest gmadr */
76 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu
*vgpu
, u64 h_addr
, u64
*g_addr
)
78 if (WARN(!gvt_gmadr_is_valid(vgpu
->gvt
, h_addr
),
79 "invalid host gmadr %llx\n", h_addr
))
82 if (gvt_gmadr_is_aperture(vgpu
->gvt
, h_addr
))
83 *g_addr
= vgpu_aperture_gmadr_base(vgpu
)
84 + (h_addr
- gvt_aperture_gmadr_base(vgpu
->gvt
));
86 *g_addr
= vgpu_hidden_gmadr_base(vgpu
)
87 + (h_addr
- gvt_hidden_gmadr_base(vgpu
->gvt
));
91 int intel_gvt_ggtt_index_g2h(struct intel_vgpu
*vgpu
, unsigned long g_index
,
92 unsigned long *h_index
)
97 ret
= intel_gvt_ggtt_gmadr_g2h(vgpu
, g_index
<< GTT_PAGE_SHIFT
,
102 *h_index
= h_addr
>> GTT_PAGE_SHIFT
;
106 int intel_gvt_ggtt_h2g_index(struct intel_vgpu
*vgpu
, unsigned long h_index
,
107 unsigned long *g_index
)
112 ret
= intel_gvt_ggtt_gmadr_h2g(vgpu
, h_index
<< GTT_PAGE_SHIFT
,
117 *g_index
= g_addr
>> GTT_PAGE_SHIFT
;
121 #define gtt_type_is_entry(type) \
122 (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \
123 && type != GTT_TYPE_PPGTT_PTE_ENTRY \
124 && type != GTT_TYPE_PPGTT_ROOT_ENTRY)
126 #define gtt_type_is_pt(type) \
127 (type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX)
129 #define gtt_type_is_pte_pt(type) \
130 (type == GTT_TYPE_PPGTT_PTE_PT)
132 #define gtt_type_is_root_pointer(type) \
133 (gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY)
135 #define gtt_init_entry(e, t, p, v) do { \
138 memcpy(&(e)->val64, &v, sizeof(v)); \
142 * Mappings between GTT_TYPE* enumerations.
143 * Following information can be found according to the given type:
144 * - type of next level page table
145 * - type of entry inside this level page table
146 * - type of entry with PSE set
148 * If the given type doesn't have such a kind of information,
149 * e.g. give a l4 root entry type, then request to get its PSE type,
150 * give a PTE page table type, then request to get its next level page
151 * table type, as we know l4 root entry doesn't have a PSE bit,
152 * and a PTE page table doesn't have a next level page table type,
153 * GTT_TYPE_INVALID will be returned. This is useful when traversing a
157 struct gtt_type_table_entry
{
163 #define GTT_TYPE_TABLE_ENTRY(type, e_type, npt_type, pse_type) \
165 .entry_type = e_type, \
166 .next_pt_type = npt_type, \
167 .pse_entry_type = pse_type, \
170 static struct gtt_type_table_entry gtt_type_table
[] = {
171 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY
,
172 GTT_TYPE_PPGTT_ROOT_L4_ENTRY
,
173 GTT_TYPE_PPGTT_PML4_PT
,
175 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT
,
176 GTT_TYPE_PPGTT_PML4_ENTRY
,
177 GTT_TYPE_PPGTT_PDP_PT
,
179 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY
,
180 GTT_TYPE_PPGTT_PML4_ENTRY
,
181 GTT_TYPE_PPGTT_PDP_PT
,
183 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT
,
184 GTT_TYPE_PPGTT_PDP_ENTRY
,
185 GTT_TYPE_PPGTT_PDE_PT
,
186 GTT_TYPE_PPGTT_PTE_1G_ENTRY
),
187 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY
,
188 GTT_TYPE_PPGTT_ROOT_L3_ENTRY
,
189 GTT_TYPE_PPGTT_PDE_PT
,
190 GTT_TYPE_PPGTT_PTE_1G_ENTRY
),
191 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY
,
192 GTT_TYPE_PPGTT_PDP_ENTRY
,
193 GTT_TYPE_PPGTT_PDE_PT
,
194 GTT_TYPE_PPGTT_PTE_1G_ENTRY
),
195 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT
,
196 GTT_TYPE_PPGTT_PDE_ENTRY
,
197 GTT_TYPE_PPGTT_PTE_PT
,
198 GTT_TYPE_PPGTT_PTE_2M_ENTRY
),
199 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY
,
200 GTT_TYPE_PPGTT_PDE_ENTRY
,
201 GTT_TYPE_PPGTT_PTE_PT
,
202 GTT_TYPE_PPGTT_PTE_2M_ENTRY
),
203 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT
,
204 GTT_TYPE_PPGTT_PTE_4K_ENTRY
,
207 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY
,
208 GTT_TYPE_PPGTT_PTE_4K_ENTRY
,
211 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY
,
212 GTT_TYPE_PPGTT_PDE_ENTRY
,
214 GTT_TYPE_PPGTT_PTE_2M_ENTRY
),
215 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY
,
216 GTT_TYPE_PPGTT_PDP_ENTRY
,
218 GTT_TYPE_PPGTT_PTE_1G_ENTRY
),
219 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE
,
225 static inline int get_next_pt_type(int type
)
227 return gtt_type_table
[type
].next_pt_type
;
230 static inline int get_entry_type(int type
)
232 return gtt_type_table
[type
].entry_type
;
235 static inline int get_pse_type(int type
)
237 return gtt_type_table
[type
].pse_entry_type
;
240 static u64
read_pte64(struct drm_i915_private
*dev_priv
, unsigned long index
)
242 void __iomem
*addr
= (gen8_pte_t __iomem
*)dev_priv
->ggtt
.gsm
+ index
;
247 static void write_pte64(struct drm_i915_private
*dev_priv
,
248 unsigned long index
, u64 pte
)
250 void __iomem
*addr
= (gen8_pte_t __iomem
*)dev_priv
->ggtt
.gsm
+ index
;
254 I915_WRITE(GFX_FLSH_CNTL_GEN6
, GFX_FLSH_CNTL_EN
);
255 POSTING_READ(GFX_FLSH_CNTL_GEN6
);
258 static inline struct intel_gvt_gtt_entry
*gtt_get_entry64(void *pt
,
259 struct intel_gvt_gtt_entry
*e
,
260 unsigned long index
, bool hypervisor_access
, unsigned long gpa
,
261 struct intel_vgpu
*vgpu
)
263 const struct intel_gvt_device_info
*info
= &vgpu
->gvt
->device_info
;
266 if (WARN_ON(info
->gtt_entry_size
!= 8))
269 if (hypervisor_access
) {
270 ret
= intel_gvt_hypervisor_read_gpa(vgpu
, gpa
+
271 (index
<< info
->gtt_entry_size_shift
),
275 e
->val64
= read_pte64(vgpu
->gvt
->dev_priv
, index
);
277 e
->val64
= *((u64
*)pt
+ index
);
282 static inline struct intel_gvt_gtt_entry
*gtt_set_entry64(void *pt
,
283 struct intel_gvt_gtt_entry
*e
,
284 unsigned long index
, bool hypervisor_access
, unsigned long gpa
,
285 struct intel_vgpu
*vgpu
)
287 const struct intel_gvt_device_info
*info
= &vgpu
->gvt
->device_info
;
290 if (WARN_ON(info
->gtt_entry_size
!= 8))
293 if (hypervisor_access
) {
294 ret
= intel_gvt_hypervisor_write_gpa(vgpu
, gpa
+
295 (index
<< info
->gtt_entry_size_shift
),
299 write_pte64(vgpu
->gvt
->dev_priv
, index
, e
->val64
);
301 *((u64
*)pt
+ index
) = e
->val64
;
308 #define ADDR_1G_MASK (((1UL << (GTT_HAW - 30 + 1)) - 1) << 30)
309 #define ADDR_2M_MASK (((1UL << (GTT_HAW - 21 + 1)) - 1) << 21)
310 #define ADDR_4K_MASK (((1UL << (GTT_HAW - 12 + 1)) - 1) << 12)
312 static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry
*e
)
316 if (e
->type
== GTT_TYPE_PPGTT_PTE_1G_ENTRY
)
317 pfn
= (e
->val64
& ADDR_1G_MASK
) >> 12;
318 else if (e
->type
== GTT_TYPE_PPGTT_PTE_2M_ENTRY
)
319 pfn
= (e
->val64
& ADDR_2M_MASK
) >> 12;
321 pfn
= (e
->val64
& ADDR_4K_MASK
) >> 12;
325 static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry
*e
, unsigned long pfn
)
327 if (e
->type
== GTT_TYPE_PPGTT_PTE_1G_ENTRY
) {
328 e
->val64
&= ~ADDR_1G_MASK
;
329 pfn
&= (ADDR_1G_MASK
>> 12);
330 } else if (e
->type
== GTT_TYPE_PPGTT_PTE_2M_ENTRY
) {
331 e
->val64
&= ~ADDR_2M_MASK
;
332 pfn
&= (ADDR_2M_MASK
>> 12);
334 e
->val64
&= ~ADDR_4K_MASK
;
335 pfn
&= (ADDR_4K_MASK
>> 12);
338 e
->val64
|= (pfn
<< 12);
341 static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry
*e
)
343 /* Entry doesn't have PSE bit. */
344 if (get_pse_type(e
->type
) == GTT_TYPE_INVALID
)
347 e
->type
= get_entry_type(e
->type
);
348 if (!(e
->val64
& (1 << 7)))
351 e
->type
= get_pse_type(e
->type
);
355 static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry
*e
)
358 * i915 writes PDP root pointer registers without present bit,
359 * it also works, so we need to treat root pointer entry
362 if (e
->type
== GTT_TYPE_PPGTT_ROOT_L3_ENTRY
363 || e
->type
== GTT_TYPE_PPGTT_ROOT_L4_ENTRY
)
364 return (e
->val64
!= 0);
366 return (e
->val64
& (1 << 0));
369 static void gtt_entry_clear_present(struct intel_gvt_gtt_entry
*e
)
371 e
->val64
&= ~(1 << 0);
375 * Per-platform GMA routines.
377 static unsigned long gma_to_ggtt_pte_index(unsigned long gma
)
379 unsigned long x
= (gma
>> GTT_PAGE_SHIFT
);
381 trace_gma_index(__func__
, gma
, x
);
385 #define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \
386 static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \
388 unsigned long x = (exp); \
389 trace_gma_index(__func__, gma, x); \
393 DEFINE_PPGTT_GMA_TO_INDEX(gen8
, pte
, (gma
>> 12 & 0x1ff));
394 DEFINE_PPGTT_GMA_TO_INDEX(gen8
, pde
, (gma
>> 21 & 0x1ff));
395 DEFINE_PPGTT_GMA_TO_INDEX(gen8
, l3_pdp
, (gma
>> 30 & 0x3));
396 DEFINE_PPGTT_GMA_TO_INDEX(gen8
, l4_pdp
, (gma
>> 30 & 0x1ff));
397 DEFINE_PPGTT_GMA_TO_INDEX(gen8
, pml4
, (gma
>> 39 & 0x1ff));
399 static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops
= {
400 .get_entry
= gtt_get_entry64
,
401 .set_entry
= gtt_set_entry64
,
402 .clear_present
= gtt_entry_clear_present
,
403 .test_present
= gen8_gtt_test_present
,
404 .test_pse
= gen8_gtt_test_pse
,
405 .get_pfn
= gen8_gtt_get_pfn
,
406 .set_pfn
= gen8_gtt_set_pfn
,
409 static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops
= {
410 .gma_to_ggtt_pte_index
= gma_to_ggtt_pte_index
,
411 .gma_to_pte_index
= gen8_gma_to_pte_index
,
412 .gma_to_pde_index
= gen8_gma_to_pde_index
,
413 .gma_to_l3_pdp_index
= gen8_gma_to_l3_pdp_index
,
414 .gma_to_l4_pdp_index
= gen8_gma_to_l4_pdp_index
,
415 .gma_to_pml4_index
= gen8_gma_to_pml4_index
,
418 static int gtt_entry_p2m(struct intel_vgpu
*vgpu
, struct intel_gvt_gtt_entry
*p
,
419 struct intel_gvt_gtt_entry
*m
)
421 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
422 unsigned long gfn
, mfn
;
426 if (!ops
->test_present(p
))
429 gfn
= ops
->get_pfn(p
);
431 mfn
= intel_gvt_hypervisor_gfn_to_mfn(vgpu
, gfn
);
432 if (mfn
== INTEL_GVT_INVALID_ADDR
) {
433 gvt_err("fail to translate gfn: 0x%lx\n", gfn
);
437 ops
->set_pfn(m
, mfn
);
444 struct intel_gvt_gtt_entry
*intel_vgpu_mm_get_entry(struct intel_vgpu_mm
*mm
,
445 void *page_table
, struct intel_gvt_gtt_entry
*e
,
448 struct intel_gvt
*gvt
= mm
->vgpu
->gvt
;
449 struct intel_gvt_gtt_pte_ops
*ops
= gvt
->gtt
.pte_ops
;
451 e
->type
= mm
->page_table_entry_type
;
453 ops
->get_entry(page_table
, e
, index
, false, 0, mm
->vgpu
);
458 struct intel_gvt_gtt_entry
*intel_vgpu_mm_set_entry(struct intel_vgpu_mm
*mm
,
459 void *page_table
, struct intel_gvt_gtt_entry
*e
,
462 struct intel_gvt
*gvt
= mm
->vgpu
->gvt
;
463 struct intel_gvt_gtt_pte_ops
*ops
= gvt
->gtt
.pte_ops
;
465 return ops
->set_entry(page_table
, e
, index
, false, 0, mm
->vgpu
);
469 * PPGTT shadow page table helpers.
471 static inline struct intel_gvt_gtt_entry
*ppgtt_spt_get_entry(
472 struct intel_vgpu_ppgtt_spt
*spt
,
473 void *page_table
, int type
,
474 struct intel_gvt_gtt_entry
*e
, unsigned long index
,
477 struct intel_gvt
*gvt
= spt
->vgpu
->gvt
;
478 struct intel_gvt_gtt_pte_ops
*ops
= gvt
->gtt
.pte_ops
;
480 e
->type
= get_entry_type(type
);
482 if (WARN(!gtt_type_is_entry(e
->type
), "invalid entry type\n"))
485 ops
->get_entry(page_table
, e
, index
, guest
,
486 spt
->guest_page
.gfn
<< GTT_PAGE_SHIFT
,
492 static inline struct intel_gvt_gtt_entry
*ppgtt_spt_set_entry(
493 struct intel_vgpu_ppgtt_spt
*spt
,
494 void *page_table
, int type
,
495 struct intel_gvt_gtt_entry
*e
, unsigned long index
,
498 struct intel_gvt
*gvt
= spt
->vgpu
->gvt
;
499 struct intel_gvt_gtt_pte_ops
*ops
= gvt
->gtt
.pte_ops
;
501 if (WARN(!gtt_type_is_entry(e
->type
), "invalid entry type\n"))
504 return ops
->set_entry(page_table
, e
, index
, guest
,
505 spt
->guest_page
.gfn
<< GTT_PAGE_SHIFT
,
509 #define ppgtt_get_guest_entry(spt, e, index) \
510 ppgtt_spt_get_entry(spt, NULL, \
511 spt->guest_page_type, e, index, true)
513 #define ppgtt_set_guest_entry(spt, e, index) \
514 ppgtt_spt_set_entry(spt, NULL, \
515 spt->guest_page_type, e, index, true)
517 #define ppgtt_get_shadow_entry(spt, e, index) \
518 ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
519 spt->shadow_page.type, e, index, false)
521 #define ppgtt_set_shadow_entry(spt, e, index) \
522 ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
523 spt->shadow_page.type, e, index, false)
526 * intel_vgpu_init_guest_page - init a guest page data structure
528 * @p: a guest page data structure
529 * @gfn: guest memory page frame number
530 * @handler: function will be called when target guest memory page has
533 * This function is called when user wants to track a guest memory page.
536 * Zero on success, negative error code if failed.
538 int intel_vgpu_init_guest_page(struct intel_vgpu
*vgpu
,
539 struct intel_vgpu_guest_page
*p
,
541 int (*handler
)(void *, u64
, void *, int),
544 INIT_HLIST_NODE(&p
->node
);
546 p
->writeprotection
= false;
548 p
->handler
= handler
;
553 hash_add(vgpu
->gtt
.guest_page_hash_table
, &p
->node
, p
->gfn
);
557 static int detach_oos_page(struct intel_vgpu
*vgpu
,
558 struct intel_vgpu_oos_page
*oos_page
);
561 * intel_vgpu_clean_guest_page - release the resource owned by guest page data
564 * @p: a tracked guest page
566 * This function is called when user tries to stop tracking a guest memory
569 void intel_vgpu_clean_guest_page(struct intel_vgpu
*vgpu
,
570 struct intel_vgpu_guest_page
*p
)
572 if (!hlist_unhashed(&p
->node
))
576 detach_oos_page(vgpu
, p
->oos_page
);
578 if (p
->writeprotection
)
579 intel_gvt_hypervisor_unset_wp_page(vgpu
, p
);
583 * intel_vgpu_find_guest_page - find a guest page data structure by GFN.
585 * @gfn: guest memory page frame number
587 * This function is called when emulation logic wants to know if a trapped GFN
588 * is a tracked guest page.
591 * Pointer to guest page data structure, NULL if failed.
593 struct intel_vgpu_guest_page
*intel_vgpu_find_guest_page(
594 struct intel_vgpu
*vgpu
, unsigned long gfn
)
596 struct intel_vgpu_guest_page
*p
;
598 hash_for_each_possible(vgpu
->gtt
.guest_page_hash_table
,
606 static inline int init_shadow_page(struct intel_vgpu
*vgpu
,
607 struct intel_vgpu_shadow_page
*p
, int type
)
609 struct device
*kdev
= &vgpu
->gvt
->dev_priv
->drm
.pdev
->dev
;
612 daddr
= dma_map_page(kdev
, p
->page
, 0, 4096, PCI_DMA_BIDIRECTIONAL
);
613 if (dma_mapping_error(kdev
, daddr
)) {
614 gvt_err("fail to map dma addr\n");
618 p
->vaddr
= page_address(p
->page
);
621 INIT_HLIST_NODE(&p
->node
);
623 p
->mfn
= daddr
>> GTT_PAGE_SHIFT
;
624 hash_add(vgpu
->gtt
.shadow_page_hash_table
, &p
->node
, p
->mfn
);
628 static inline void clean_shadow_page(struct intel_vgpu
*vgpu
,
629 struct intel_vgpu_shadow_page
*p
)
631 struct device
*kdev
= &vgpu
->gvt
->dev_priv
->drm
.pdev
->dev
;
633 dma_unmap_page(kdev
, p
->mfn
<< GTT_PAGE_SHIFT
, 4096,
634 PCI_DMA_BIDIRECTIONAL
);
636 if (!hlist_unhashed(&p
->node
))
640 static inline struct intel_vgpu_shadow_page
*find_shadow_page(
641 struct intel_vgpu
*vgpu
, unsigned long mfn
)
643 struct intel_vgpu_shadow_page
*p
;
645 hash_for_each_possible(vgpu
->gtt
.shadow_page_hash_table
,
653 #define guest_page_to_ppgtt_spt(ptr) \
654 container_of(ptr, struct intel_vgpu_ppgtt_spt, guest_page)
656 #define shadow_page_to_ppgtt_spt(ptr) \
657 container_of(ptr, struct intel_vgpu_ppgtt_spt, shadow_page)
659 static void *alloc_spt(gfp_t gfp_mask
)
661 struct intel_vgpu_ppgtt_spt
*spt
;
663 spt
= kzalloc(sizeof(*spt
), gfp_mask
);
667 spt
->shadow_page
.page
= alloc_page(gfp_mask
);
668 if (!spt
->shadow_page
.page
) {
675 static void free_spt(struct intel_vgpu_ppgtt_spt
*spt
)
677 __free_page(spt
->shadow_page
.page
);
681 static void ppgtt_free_shadow_page(struct intel_vgpu_ppgtt_spt
*spt
)
683 trace_spt_free(spt
->vgpu
->id
, spt
, spt
->shadow_page
.type
);
685 clean_shadow_page(spt
->vgpu
, &spt
->shadow_page
);
686 intel_vgpu_clean_guest_page(spt
->vgpu
, &spt
->guest_page
);
687 list_del_init(&spt
->post_shadow_list
);
692 static void ppgtt_free_all_shadow_page(struct intel_vgpu
*vgpu
)
694 struct hlist_node
*n
;
695 struct intel_vgpu_shadow_page
*sp
;
698 hash_for_each_safe(vgpu
->gtt
.shadow_page_hash_table
, i
, n
, sp
, node
)
699 ppgtt_free_shadow_page(shadow_page_to_ppgtt_spt(sp
));
702 static int ppgtt_handle_guest_write_page_table_bytes(void *gp
,
703 u64 pa
, void *p_data
, int bytes
);
705 static int ppgtt_write_protection_handler(void *gp
, u64 pa
,
706 void *p_data
, int bytes
)
708 struct intel_vgpu_guest_page
*gpt
= (struct intel_vgpu_guest_page
*)gp
;
711 if (bytes
!= 4 && bytes
!= 8)
714 if (!gpt
->writeprotection
)
717 ret
= ppgtt_handle_guest_write_page_table_bytes(gp
,
724 static int reclaim_one_mm(struct intel_gvt
*gvt
);
726 static struct intel_vgpu_ppgtt_spt
*ppgtt_alloc_shadow_page(
727 struct intel_vgpu
*vgpu
, int type
, unsigned long gfn
)
729 struct intel_vgpu_ppgtt_spt
*spt
= NULL
;
733 spt
= alloc_spt(GFP_KERNEL
| __GFP_ZERO
);
735 if (reclaim_one_mm(vgpu
->gvt
))
738 gvt_err("fail to allocate ppgtt shadow page\n");
739 return ERR_PTR(-ENOMEM
);
743 spt
->guest_page_type
= type
;
744 atomic_set(&spt
->refcount
, 1);
745 INIT_LIST_HEAD(&spt
->post_shadow_list
);
748 * TODO: guest page type may be different with shadow page type,
749 * when we support PSE page in future.
751 ret
= init_shadow_page(vgpu
, &spt
->shadow_page
, type
);
753 gvt_err("fail to initialize shadow page for spt\n");
757 ret
= intel_vgpu_init_guest_page(vgpu
, &spt
->guest_page
,
758 gfn
, ppgtt_write_protection_handler
, NULL
);
760 gvt_err("fail to initialize guest page for spt\n");
764 trace_spt_alloc(vgpu
->id
, spt
, type
, spt
->shadow_page
.mfn
, gfn
);
767 ppgtt_free_shadow_page(spt
);
771 static struct intel_vgpu_ppgtt_spt
*ppgtt_find_shadow_page(
772 struct intel_vgpu
*vgpu
, unsigned long mfn
)
774 struct intel_vgpu_shadow_page
*p
= find_shadow_page(vgpu
, mfn
);
777 return shadow_page_to_ppgtt_spt(p
);
779 gvt_err("vgpu%d: fail to find ppgtt shadow page: 0x%lx\n",
784 #define pt_entry_size_shift(spt) \
785 ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
787 #define pt_entries(spt) \
788 (GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
790 #define for_each_present_guest_entry(spt, e, i) \
791 for (i = 0; i < pt_entries(spt); i++) \
792 if (spt->vgpu->gvt->gtt.pte_ops->test_present( \
793 ppgtt_get_guest_entry(spt, e, i)))
795 #define for_each_present_shadow_entry(spt, e, i) \
796 for (i = 0; i < pt_entries(spt); i++) \
797 if (spt->vgpu->gvt->gtt.pte_ops->test_present( \
798 ppgtt_get_shadow_entry(spt, e, i)))
800 static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt
*spt
)
802 int v
= atomic_read(&spt
->refcount
);
804 trace_spt_refcount(spt
->vgpu
->id
, "inc", spt
, v
, (v
+ 1));
806 atomic_inc(&spt
->refcount
);
809 static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt
*spt
);
811 static int ppgtt_invalidate_shadow_page_by_shadow_entry(struct intel_vgpu
*vgpu
,
812 struct intel_gvt_gtt_entry
*e
)
814 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
815 struct intel_vgpu_ppgtt_spt
*s
;
816 intel_gvt_gtt_type_t cur_pt_type
;
818 if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(e
->type
))))
821 if (e
->type
!= GTT_TYPE_PPGTT_ROOT_L3_ENTRY
822 && e
->type
!= GTT_TYPE_PPGTT_ROOT_L4_ENTRY
) {
823 cur_pt_type
= get_next_pt_type(e
->type
) + 1;
824 if (ops
->get_pfn(e
) ==
825 vgpu
->gtt
.scratch_pt
[cur_pt_type
].page_mfn
)
828 s
= ppgtt_find_shadow_page(vgpu
, ops
->get_pfn(e
));
830 gvt_err("vgpu%d: fail to find shadow page: mfn: 0x%lx\n",
831 vgpu
->id
, ops
->get_pfn(e
));
834 return ppgtt_invalidate_shadow_page(s
);
837 static int ppgtt_invalidate_shadow_page(struct intel_vgpu_ppgtt_spt
*spt
)
839 struct intel_gvt_gtt_entry e
;
842 int v
= atomic_read(&spt
->refcount
);
844 trace_spt_change(spt
->vgpu
->id
, "die", spt
,
845 spt
->guest_page
.gfn
, spt
->shadow_page
.type
);
847 trace_spt_refcount(spt
->vgpu
->id
, "dec", spt
, v
, (v
- 1));
849 if (atomic_dec_return(&spt
->refcount
) > 0)
852 if (gtt_type_is_pte_pt(spt
->shadow_page
.type
))
855 for_each_present_shadow_entry(spt
, &e
, index
) {
856 if (!gtt_type_is_pt(get_next_pt_type(e
.type
))) {
857 gvt_err("GVT doesn't support pse bit for now\n");
860 ret
= ppgtt_invalidate_shadow_page_by_shadow_entry(
866 trace_spt_change(spt
->vgpu
->id
, "release", spt
,
867 spt
->guest_page
.gfn
, spt
->shadow_page
.type
);
868 ppgtt_free_shadow_page(spt
);
871 gvt_err("vgpu%d: fail: shadow page %p shadow entry 0x%llx type %d\n",
872 spt
->vgpu
->id
, spt
, e
.val64
, e
.type
);
876 static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt
*spt
);
878 static struct intel_vgpu_ppgtt_spt
*ppgtt_populate_shadow_page_by_guest_entry(
879 struct intel_vgpu
*vgpu
, struct intel_gvt_gtt_entry
*we
)
881 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
882 struct intel_vgpu_ppgtt_spt
*s
= NULL
;
883 struct intel_vgpu_guest_page
*g
;
886 if (WARN_ON(!gtt_type_is_pt(get_next_pt_type(we
->type
)))) {
891 g
= intel_vgpu_find_guest_page(vgpu
, ops
->get_pfn(we
));
893 s
= guest_page_to_ppgtt_spt(g
);
894 ppgtt_get_shadow_page(s
);
896 int type
= get_next_pt_type(we
->type
);
898 s
= ppgtt_alloc_shadow_page(vgpu
, type
, ops
->get_pfn(we
));
904 ret
= intel_gvt_hypervisor_set_wp_page(vgpu
, &s
->guest_page
);
908 ret
= ppgtt_populate_shadow_page(s
);
912 trace_spt_change(vgpu
->id
, "new", s
, s
->guest_page
.gfn
,
913 s
->shadow_page
.type
);
917 gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
918 vgpu
->id
, s
, we
->val64
, we
->type
);
922 static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry
*se
,
923 struct intel_vgpu_ppgtt_spt
*s
, struct intel_gvt_gtt_entry
*ge
)
925 struct intel_gvt_gtt_pte_ops
*ops
= s
->vgpu
->gvt
->gtt
.pte_ops
;
928 se
->val64
= ge
->val64
;
930 ops
->set_pfn(se
, s
->shadow_page
.mfn
);
933 static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt
*spt
)
935 struct intel_vgpu
*vgpu
= spt
->vgpu
;
936 struct intel_vgpu_ppgtt_spt
*s
;
937 struct intel_gvt_gtt_entry se
, ge
;
941 trace_spt_change(spt
->vgpu
->id
, "born", spt
,
942 spt
->guest_page
.gfn
, spt
->shadow_page
.type
);
944 if (gtt_type_is_pte_pt(spt
->shadow_page
.type
)) {
945 for_each_present_guest_entry(spt
, &ge
, i
) {
946 ret
= gtt_entry_p2m(vgpu
, &ge
, &se
);
949 ppgtt_set_shadow_entry(spt
, &se
, i
);
954 for_each_present_guest_entry(spt
, &ge
, i
) {
955 if (!gtt_type_is_pt(get_next_pt_type(ge
.type
))) {
956 gvt_err("GVT doesn't support pse bit now\n");
961 s
= ppgtt_populate_shadow_page_by_guest_entry(vgpu
, &ge
);
966 ppgtt_get_shadow_entry(spt
, &se
, i
);
967 ppgtt_generate_shadow_entry(&se
, s
, &ge
);
968 ppgtt_set_shadow_entry(spt
, &se
, i
);
972 gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
973 vgpu
->id
, spt
, ge
.val64
, ge
.type
);
977 static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page
*gpt
,
980 struct intel_vgpu_ppgtt_spt
*spt
= guest_page_to_ppgtt_spt(gpt
);
981 struct intel_vgpu_shadow_page
*sp
= &spt
->shadow_page
;
982 struct intel_vgpu
*vgpu
= spt
->vgpu
;
983 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
984 struct intel_gvt_gtt_entry e
;
987 ppgtt_get_shadow_entry(spt
, &e
, index
);
989 trace_gpt_change(spt
->vgpu
->id
, "remove", spt
, sp
->type
, e
.val64
,
992 if (!ops
->test_present(&e
))
995 if (ops
->get_pfn(&e
) == vgpu
->gtt
.scratch_pt
[sp
->type
].page_mfn
)
998 if (gtt_type_is_pt(get_next_pt_type(e
.type
))) {
999 struct intel_vgpu_ppgtt_spt
*s
=
1000 ppgtt_find_shadow_page(vgpu
, ops
->get_pfn(&e
));
1002 gvt_err("fail to find guest page\n");
1006 ret
= ppgtt_invalidate_shadow_page(s
);
1010 ops
->set_pfn(&e
, vgpu
->gtt
.scratch_pt
[sp
->type
].page_mfn
);
1011 ppgtt_set_shadow_entry(spt
, &e
, index
);
1014 gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
1015 vgpu
->id
, spt
, e
.val64
, e
.type
);
1019 static int ppgtt_handle_guest_entry_add(struct intel_vgpu_guest_page
*gpt
,
1020 struct intel_gvt_gtt_entry
*we
, unsigned long index
)
1022 struct intel_vgpu_ppgtt_spt
*spt
= guest_page_to_ppgtt_spt(gpt
);
1023 struct intel_vgpu_shadow_page
*sp
= &spt
->shadow_page
;
1024 struct intel_vgpu
*vgpu
= spt
->vgpu
;
1025 struct intel_gvt_gtt_entry m
;
1026 struct intel_vgpu_ppgtt_spt
*s
;
1029 trace_gpt_change(spt
->vgpu
->id
, "add", spt
, sp
->type
,
1032 if (gtt_type_is_pt(get_next_pt_type(we
->type
))) {
1033 s
= ppgtt_populate_shadow_page_by_guest_entry(vgpu
, we
);
1038 ppgtt_get_shadow_entry(spt
, &m
, index
);
1039 ppgtt_generate_shadow_entry(&m
, s
, we
);
1040 ppgtt_set_shadow_entry(spt
, &m
, index
);
1042 ret
= gtt_entry_p2m(vgpu
, we
, &m
);
1045 ppgtt_set_shadow_entry(spt
, &m
, index
);
1049 gvt_err("vgpu%d: fail: spt %p guest entry 0x%llx type %d\n", vgpu
->id
,
1050 spt
, we
->val64
, we
->type
);
1054 static int sync_oos_page(struct intel_vgpu
*vgpu
,
1055 struct intel_vgpu_oos_page
*oos_page
)
1057 const struct intel_gvt_device_info
*info
= &vgpu
->gvt
->device_info
;
1058 struct intel_gvt
*gvt
= vgpu
->gvt
;
1059 struct intel_gvt_gtt_pte_ops
*ops
= gvt
->gtt
.pte_ops
;
1060 struct intel_vgpu_ppgtt_spt
*spt
=
1061 guest_page_to_ppgtt_spt(oos_page
->guest_page
);
1062 struct intel_gvt_gtt_entry old
, new, m
;
1066 trace_oos_change(vgpu
->id
, "sync", oos_page
->id
,
1067 oos_page
->guest_page
, spt
->guest_page_type
);
1069 old
.type
= new.type
= get_entry_type(spt
->guest_page_type
);
1070 old
.val64
= new.val64
= 0;
1072 for (index
= 0; index
< (GTT_PAGE_SIZE
>> info
->gtt_entry_size_shift
);
1074 ops
->get_entry(oos_page
->mem
, &old
, index
, false, 0, vgpu
);
1075 ops
->get_entry(NULL
, &new, index
, true,
1076 oos_page
->guest_page
->gfn
<< PAGE_SHIFT
, vgpu
);
1078 if (old
.val64
== new.val64
1079 && !test_and_clear_bit(index
, spt
->post_shadow_bitmap
))
1082 trace_oos_sync(vgpu
->id
, oos_page
->id
,
1083 oos_page
->guest_page
, spt
->guest_page_type
,
1086 ret
= gtt_entry_p2m(vgpu
, &new, &m
);
1090 ops
->set_entry(oos_page
->mem
, &new, index
, false, 0, vgpu
);
1091 ppgtt_set_shadow_entry(spt
, &m
, index
);
1094 oos_page
->guest_page
->write_cnt
= 0;
1095 list_del_init(&spt
->post_shadow_list
);
1099 static int detach_oos_page(struct intel_vgpu
*vgpu
,
1100 struct intel_vgpu_oos_page
*oos_page
)
1102 struct intel_gvt
*gvt
= vgpu
->gvt
;
1103 struct intel_vgpu_ppgtt_spt
*spt
=
1104 guest_page_to_ppgtt_spt(oos_page
->guest_page
);
1106 trace_oos_change(vgpu
->id
, "detach", oos_page
->id
,
1107 oos_page
->guest_page
, spt
->guest_page_type
);
1109 oos_page
->guest_page
->write_cnt
= 0;
1110 oos_page
->guest_page
->oos_page
= NULL
;
1111 oos_page
->guest_page
= NULL
;
1113 list_del_init(&oos_page
->vm_list
);
1114 list_move_tail(&oos_page
->list
, &gvt
->gtt
.oos_page_free_list_head
);
1119 static int attach_oos_page(struct intel_vgpu
*vgpu
,
1120 struct intel_vgpu_oos_page
*oos_page
,
1121 struct intel_vgpu_guest_page
*gpt
)
1123 struct intel_gvt
*gvt
= vgpu
->gvt
;
1126 ret
= intel_gvt_hypervisor_read_gpa(vgpu
, gpt
->gfn
<< GTT_PAGE_SHIFT
,
1127 oos_page
->mem
, GTT_PAGE_SIZE
);
1131 oos_page
->guest_page
= gpt
;
1132 gpt
->oos_page
= oos_page
;
1134 list_move_tail(&oos_page
->list
, &gvt
->gtt
.oos_page_use_list_head
);
1136 trace_oos_change(vgpu
->id
, "attach", gpt
->oos_page
->id
,
1137 gpt
, guest_page_to_ppgtt_spt(gpt
)->guest_page_type
);
1141 static int ppgtt_set_guest_page_sync(struct intel_vgpu
*vgpu
,
1142 struct intel_vgpu_guest_page
*gpt
)
1146 ret
= intel_gvt_hypervisor_set_wp_page(vgpu
, gpt
);
1150 trace_oos_change(vgpu
->id
, "set page sync", gpt
->oos_page
->id
,
1151 gpt
, guest_page_to_ppgtt_spt(gpt
)->guest_page_type
);
1153 list_del_init(&gpt
->oos_page
->vm_list
);
1154 return sync_oos_page(vgpu
, gpt
->oos_page
);
1157 static int ppgtt_allocate_oos_page(struct intel_vgpu
*vgpu
,
1158 struct intel_vgpu_guest_page
*gpt
)
1160 struct intel_gvt
*gvt
= vgpu
->gvt
;
1161 struct intel_gvt_gtt
*gtt
= &gvt
->gtt
;
1162 struct intel_vgpu_oos_page
*oos_page
= gpt
->oos_page
;
1165 WARN(oos_page
, "shadow PPGTT page has already has a oos page\n");
1167 if (list_empty(>t
->oos_page_free_list_head
)) {
1168 oos_page
= container_of(gtt
->oos_page_use_list_head
.next
,
1169 struct intel_vgpu_oos_page
, list
);
1170 ret
= ppgtt_set_guest_page_sync(vgpu
, oos_page
->guest_page
);
1173 ret
= detach_oos_page(vgpu
, oos_page
);
1177 oos_page
= container_of(gtt
->oos_page_free_list_head
.next
,
1178 struct intel_vgpu_oos_page
, list
);
1179 return attach_oos_page(vgpu
, oos_page
, gpt
);
1182 static int ppgtt_set_guest_page_oos(struct intel_vgpu
*vgpu
,
1183 struct intel_vgpu_guest_page
*gpt
)
1185 struct intel_vgpu_oos_page
*oos_page
= gpt
->oos_page
;
1187 if (WARN(!oos_page
, "shadow PPGTT page should have a oos page\n"))
1190 trace_oos_change(vgpu
->id
, "set page out of sync", gpt
->oos_page
->id
,
1191 gpt
, guest_page_to_ppgtt_spt(gpt
)->guest_page_type
);
1193 list_add_tail(&oos_page
->vm_list
, &vgpu
->gtt
.oos_page_list_head
);
1194 return intel_gvt_hypervisor_unset_wp_page(vgpu
, gpt
);
1198 * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU
1201 * This function is called before submitting a guest workload to host,
1202 * to sync all the out-of-synced shadow for vGPU
1205 * Zero on success, negative error code if failed.
1207 int intel_vgpu_sync_oos_pages(struct intel_vgpu
*vgpu
)
1209 struct list_head
*pos
, *n
;
1210 struct intel_vgpu_oos_page
*oos_page
;
1213 if (!enable_out_of_sync
)
1216 list_for_each_safe(pos
, n
, &vgpu
->gtt
.oos_page_list_head
) {
1217 oos_page
= container_of(pos
,
1218 struct intel_vgpu_oos_page
, vm_list
);
1219 ret
= ppgtt_set_guest_page_sync(vgpu
, oos_page
->guest_page
);
1227 * The heart of PPGTT shadow page table.
1229 static int ppgtt_handle_guest_write_page_table(
1230 struct intel_vgpu_guest_page
*gpt
,
1231 struct intel_gvt_gtt_entry
*we
, unsigned long index
)
1233 struct intel_vgpu_ppgtt_spt
*spt
= guest_page_to_ppgtt_spt(gpt
);
1234 struct intel_vgpu
*vgpu
= spt
->vgpu
;
1235 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
1240 new_present
= ops
->test_present(we
);
1242 ret
= ppgtt_handle_guest_entry_removal(gpt
, index
);
1247 ret
= ppgtt_handle_guest_entry_add(gpt
, we
, index
);
1253 gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d.\n",
1254 vgpu
->id
, spt
, we
->val64
, we
->type
);
1258 static inline bool can_do_out_of_sync(struct intel_vgpu_guest_page
*gpt
)
1260 return enable_out_of_sync
1261 && gtt_type_is_pte_pt(
1262 guest_page_to_ppgtt_spt(gpt
)->guest_page_type
)
1263 && gpt
->write_cnt
>= 2;
1266 static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt
*spt
,
1267 unsigned long index
)
1269 set_bit(index
, spt
->post_shadow_bitmap
);
1270 if (!list_empty(&spt
->post_shadow_list
))
1273 list_add_tail(&spt
->post_shadow_list
,
1274 &spt
->vgpu
->gtt
.post_shadow_list_head
);
1278 * intel_vgpu_flush_post_shadow - flush the post shadow transactions
1281 * This function is called before submitting a guest workload to host,
1282 * to flush all the post shadows for a vGPU.
1285 * Zero on success, negative error code if failed.
1287 int intel_vgpu_flush_post_shadow(struct intel_vgpu
*vgpu
)
1289 struct list_head
*pos
, *n
;
1290 struct intel_vgpu_ppgtt_spt
*spt
;
1291 struct intel_gvt_gtt_entry ge
;
1292 unsigned long index
;
1295 list_for_each_safe(pos
, n
, &vgpu
->gtt
.post_shadow_list_head
) {
1296 spt
= container_of(pos
, struct intel_vgpu_ppgtt_spt
,
1299 for_each_set_bit(index
, spt
->post_shadow_bitmap
,
1300 GTT_ENTRY_NUM_IN_ONE_PAGE
) {
1301 ppgtt_get_guest_entry(spt
, &ge
, index
);
1303 ret
= ppgtt_handle_guest_write_page_table(
1304 &spt
->guest_page
, &ge
, index
);
1307 clear_bit(index
, spt
->post_shadow_bitmap
);
1309 list_del_init(&spt
->post_shadow_list
);
1314 static int ppgtt_handle_guest_write_page_table_bytes(void *gp
,
1315 u64 pa
, void *p_data
, int bytes
)
1317 struct intel_vgpu_guest_page
*gpt
= (struct intel_vgpu_guest_page
*)gp
;
1318 struct intel_vgpu_ppgtt_spt
*spt
= guest_page_to_ppgtt_spt(gpt
);
1319 struct intel_vgpu
*vgpu
= spt
->vgpu
;
1320 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
1321 const struct intel_gvt_device_info
*info
= &vgpu
->gvt
->device_info
;
1322 struct intel_gvt_gtt_entry we
;
1323 unsigned long index
;
1326 index
= (pa
& (PAGE_SIZE
- 1)) >> info
->gtt_entry_size_shift
;
1328 ppgtt_get_guest_entry(spt
, &we
, index
);
1332 if (bytes
== info
->gtt_entry_size
) {
1333 ret
= ppgtt_handle_guest_write_page_table(gpt
, &we
, index
);
1337 if (!test_bit(index
, spt
->post_shadow_bitmap
)) {
1338 ret
= ppgtt_handle_guest_entry_removal(gpt
, index
);
1343 ppgtt_set_post_shadow(spt
, index
);
1346 if (!enable_out_of_sync
)
1352 ops
->set_entry(gpt
->oos_page
->mem
, &we
, index
,
1355 if (can_do_out_of_sync(gpt
)) {
1357 ppgtt_allocate_oos_page(vgpu
, gpt
);
1359 ret
= ppgtt_set_guest_page_oos(vgpu
, gpt
);
1367 * mm page table allocation policy for bdw+
1368 * - for ggtt, only virtual page table will be allocated.
1369 * - for ppgtt, dedicated virtual/shadow page table will be allocated.
1371 static int gen8_mm_alloc_page_table(struct intel_vgpu_mm
*mm
)
1373 struct intel_vgpu
*vgpu
= mm
->vgpu
;
1374 struct intel_gvt
*gvt
= vgpu
->gvt
;
1375 const struct intel_gvt_device_info
*info
= &gvt
->device_info
;
1378 if (mm
->type
== INTEL_GVT_MM_PPGTT
) {
1379 mm
->page_table_entry_cnt
= 4;
1380 mm
->page_table_entry_size
= mm
->page_table_entry_cnt
*
1381 info
->gtt_entry_size
;
1382 mem
= kzalloc(mm
->has_shadow_page_table
?
1383 mm
->page_table_entry_size
* 2
1384 : mm
->page_table_entry_size
, GFP_KERNEL
);
1387 mm
->virtual_page_table
= mem
;
1388 if (!mm
->has_shadow_page_table
)
1390 mm
->shadow_page_table
= mem
+ mm
->page_table_entry_size
;
1391 } else if (mm
->type
== INTEL_GVT_MM_GGTT
) {
1392 mm
->page_table_entry_cnt
=
1393 (gvt_ggtt_gm_sz(gvt
) >> GTT_PAGE_SHIFT
);
1394 mm
->page_table_entry_size
= mm
->page_table_entry_cnt
*
1395 info
->gtt_entry_size
;
1396 mem
= vzalloc(mm
->page_table_entry_size
);
1399 mm
->virtual_page_table
= mem
;
1404 static void gen8_mm_free_page_table(struct intel_vgpu_mm
*mm
)
1406 if (mm
->type
== INTEL_GVT_MM_PPGTT
) {
1407 kfree(mm
->virtual_page_table
);
1408 } else if (mm
->type
== INTEL_GVT_MM_GGTT
) {
1409 if (mm
->virtual_page_table
)
1410 vfree(mm
->virtual_page_table
);
1412 mm
->virtual_page_table
= mm
->shadow_page_table
= NULL
;
1415 static void invalidate_mm(struct intel_vgpu_mm
*mm
)
1417 struct intel_vgpu
*vgpu
= mm
->vgpu
;
1418 struct intel_gvt
*gvt
= vgpu
->gvt
;
1419 struct intel_gvt_gtt
*gtt
= &gvt
->gtt
;
1420 struct intel_gvt_gtt_pte_ops
*ops
= gtt
->pte_ops
;
1421 struct intel_gvt_gtt_entry se
;
1424 if (WARN_ON(!mm
->has_shadow_page_table
|| !mm
->shadowed
))
1427 for (i
= 0; i
< mm
->page_table_entry_cnt
; i
++) {
1428 ppgtt_get_shadow_root_entry(mm
, &se
, i
);
1429 if (!ops
->test_present(&se
))
1431 ppgtt_invalidate_shadow_page_by_shadow_entry(
1434 ppgtt_set_shadow_root_entry(mm
, &se
, i
);
1436 trace_gpt_change(vgpu
->id
, "destroy root pointer",
1437 NULL
, se
.type
, se
.val64
, i
);
1439 mm
->shadowed
= false;
1443 * intel_vgpu_destroy_mm - destroy a mm object
1444 * @mm: a kref object
1446 * This function is used to destroy a mm object for vGPU
1449 void intel_vgpu_destroy_mm(struct kref
*mm_ref
)
1451 struct intel_vgpu_mm
*mm
= container_of(mm_ref
, typeof(*mm
), ref
);
1452 struct intel_vgpu
*vgpu
= mm
->vgpu
;
1453 struct intel_gvt
*gvt
= vgpu
->gvt
;
1454 struct intel_gvt_gtt
*gtt
= &gvt
->gtt
;
1456 if (!mm
->initialized
)
1459 list_del(&mm
->list
);
1460 list_del(&mm
->lru_list
);
1462 if (mm
->has_shadow_page_table
)
1465 gtt
->mm_free_page_table(mm
);
1470 static int shadow_mm(struct intel_vgpu_mm
*mm
)
1472 struct intel_vgpu
*vgpu
= mm
->vgpu
;
1473 struct intel_gvt
*gvt
= vgpu
->gvt
;
1474 struct intel_gvt_gtt
*gtt
= &gvt
->gtt
;
1475 struct intel_gvt_gtt_pte_ops
*ops
= gtt
->pte_ops
;
1476 struct intel_vgpu_ppgtt_spt
*spt
;
1477 struct intel_gvt_gtt_entry ge
, se
;
1481 if (WARN_ON(!mm
->has_shadow_page_table
|| mm
->shadowed
))
1484 mm
->shadowed
= true;
1486 for (i
= 0; i
< mm
->page_table_entry_cnt
; i
++) {
1487 ppgtt_get_guest_root_entry(mm
, &ge
, i
);
1488 if (!ops
->test_present(&ge
))
1491 trace_gpt_change(vgpu
->id
, __func__
, NULL
,
1492 ge
.type
, ge
.val64
, i
);
1494 spt
= ppgtt_populate_shadow_page_by_guest_entry(vgpu
, &ge
);
1496 gvt_err("fail to populate guest root pointer\n");
1500 ppgtt_generate_shadow_entry(&se
, spt
, &ge
);
1501 ppgtt_set_shadow_root_entry(mm
, &se
, i
);
1503 trace_gpt_change(vgpu
->id
, "populate root pointer",
1504 NULL
, se
.type
, se
.val64
, i
);
1513 * intel_vgpu_create_mm - create a mm object for a vGPU
1515 * @mm_type: mm object type, should be PPGTT or GGTT
1516 * @virtual_page_table: page table root pointers. Could be NULL if user wants
1517 * to populate shadow later.
1518 * @page_table_level: describe the page table level of the mm object
1519 * @pde_base_index: pde root pointer base in GGTT MMIO.
1521 * This function is used to create a mm object for a vGPU.
1524 * Zero on success, negative error code in pointer if failed.
1526 struct intel_vgpu_mm
*intel_vgpu_create_mm(struct intel_vgpu
*vgpu
,
1527 int mm_type
, void *virtual_page_table
, int page_table_level
,
1530 struct intel_gvt
*gvt
= vgpu
->gvt
;
1531 struct intel_gvt_gtt
*gtt
= &gvt
->gtt
;
1532 struct intel_vgpu_mm
*mm
;
1535 mm
= kzalloc(sizeof(*mm
), GFP_KERNEL
);
1543 if (page_table_level
== 1)
1544 mm
->page_table_entry_type
= GTT_TYPE_GGTT_PTE
;
1545 else if (page_table_level
== 3)
1546 mm
->page_table_entry_type
= GTT_TYPE_PPGTT_ROOT_L3_ENTRY
;
1547 else if (page_table_level
== 4)
1548 mm
->page_table_entry_type
= GTT_TYPE_PPGTT_ROOT_L4_ENTRY
;
1555 mm
->page_table_level
= page_table_level
;
1556 mm
->pde_base_index
= pde_base_index
;
1559 mm
->has_shadow_page_table
= !!(mm_type
== INTEL_GVT_MM_PPGTT
);
1561 kref_init(&mm
->ref
);
1562 atomic_set(&mm
->pincount
, 0);
1563 INIT_LIST_HEAD(&mm
->list
);
1564 INIT_LIST_HEAD(&mm
->lru_list
);
1565 list_add_tail(&mm
->list
, &vgpu
->gtt
.mm_list_head
);
1567 ret
= gtt
->mm_alloc_page_table(mm
);
1569 gvt_err("fail to allocate page table for mm\n");
1573 mm
->initialized
= true;
1575 if (virtual_page_table
)
1576 memcpy(mm
->virtual_page_table
, virtual_page_table
,
1577 mm
->page_table_entry_size
);
1579 if (mm
->has_shadow_page_table
) {
1580 ret
= shadow_mm(mm
);
1583 list_add_tail(&mm
->lru_list
, &gvt
->gtt
.mm_lru_list_head
);
1587 gvt_err("fail to create mm\n");
1589 intel_gvt_mm_unreference(mm
);
1590 return ERR_PTR(ret
);
1594 * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object
1595 * @mm: a vGPU mm object
1597 * This function is called when user doesn't want to use a vGPU mm object
1599 void intel_vgpu_unpin_mm(struct intel_vgpu_mm
*mm
)
1601 if (WARN_ON(mm
->type
!= INTEL_GVT_MM_PPGTT
))
1604 atomic_dec(&mm
->pincount
);
1608 * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
1611 * This function is called when user wants to use a vGPU mm object. If this
1612 * mm object hasn't been shadowed yet, the shadow will be populated at this
1616 * Zero on success, negative error code if failed.
1618 int intel_vgpu_pin_mm(struct intel_vgpu_mm
*mm
)
1622 if (WARN_ON(mm
->type
!= INTEL_GVT_MM_PPGTT
))
1625 atomic_inc(&mm
->pincount
);
1627 if (!mm
->shadowed
) {
1628 ret
= shadow_mm(mm
);
1633 list_del_init(&mm
->lru_list
);
1634 list_add_tail(&mm
->lru_list
, &mm
->vgpu
->gvt
->gtt
.mm_lru_list_head
);
1638 static int reclaim_one_mm(struct intel_gvt
*gvt
)
1640 struct intel_vgpu_mm
*mm
;
1641 struct list_head
*pos
, *n
;
1643 list_for_each_safe(pos
, n
, &gvt
->gtt
.mm_lru_list_head
) {
1644 mm
= container_of(pos
, struct intel_vgpu_mm
, lru_list
);
1646 if (mm
->type
!= INTEL_GVT_MM_PPGTT
)
1648 if (atomic_read(&mm
->pincount
))
1651 list_del_init(&mm
->lru_list
);
1659 * GMA translation APIs.
1661 static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm
*mm
,
1662 struct intel_gvt_gtt_entry
*e
, unsigned long index
, bool guest
)
1664 struct intel_vgpu
*vgpu
= mm
->vgpu
;
1665 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
1666 struct intel_vgpu_ppgtt_spt
*s
;
1668 if (WARN_ON(!mm
->has_shadow_page_table
))
1671 s
= ppgtt_find_shadow_page(vgpu
, ops
->get_pfn(e
));
1676 ppgtt_get_shadow_entry(s
, e
, index
);
1678 ppgtt_get_guest_entry(s
, e
, index
);
1683 * intel_vgpu_gma_to_gpa - translate a gma to GPA
1684 * @mm: mm object. could be a PPGTT or GGTT mm object
1685 * @gma: graphics memory address in this mm object
1687 * This function is used to translate a graphics memory address in specific
1688 * graphics memory space to guest physical address.
1691 * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed.
1693 unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm
*mm
, unsigned long gma
)
1695 struct intel_vgpu
*vgpu
= mm
->vgpu
;
1696 struct intel_gvt
*gvt
= vgpu
->gvt
;
1697 struct intel_gvt_gtt_pte_ops
*pte_ops
= gvt
->gtt
.pte_ops
;
1698 struct intel_gvt_gtt_gma_ops
*gma_ops
= gvt
->gtt
.gma_ops
;
1699 unsigned long gpa
= INTEL_GVT_INVALID_ADDR
;
1700 unsigned long gma_index
[4];
1701 struct intel_gvt_gtt_entry e
;
1705 if (mm
->type
!= INTEL_GVT_MM_GGTT
&& mm
->type
!= INTEL_GVT_MM_PPGTT
)
1706 return INTEL_GVT_INVALID_ADDR
;
1708 if (mm
->type
== INTEL_GVT_MM_GGTT
) {
1709 if (!vgpu_gmadr_is_valid(vgpu
, gma
))
1712 ggtt_get_guest_entry(mm
, &e
,
1713 gma_ops
->gma_to_ggtt_pte_index(gma
));
1714 gpa
= (pte_ops
->get_pfn(&e
) << GTT_PAGE_SHIFT
)
1715 + (gma
& ~GTT_PAGE_MASK
);
1717 trace_gma_translate(vgpu
->id
, "ggtt", 0, 0, gma
, gpa
);
1721 switch (mm
->page_table_level
) {
1723 ppgtt_get_shadow_root_entry(mm
, &e
, 0);
1724 gma_index
[0] = gma_ops
->gma_to_pml4_index(gma
);
1725 gma_index
[1] = gma_ops
->gma_to_l4_pdp_index(gma
);
1726 gma_index
[2] = gma_ops
->gma_to_pde_index(gma
);
1727 gma_index
[3] = gma_ops
->gma_to_pte_index(gma
);
1731 ppgtt_get_shadow_root_entry(mm
, &e
,
1732 gma_ops
->gma_to_l3_pdp_index(gma
));
1733 gma_index
[0] = gma_ops
->gma_to_pde_index(gma
);
1734 gma_index
[1] = gma_ops
->gma_to_pte_index(gma
);
1738 ppgtt_get_shadow_root_entry(mm
, &e
,
1739 gma_ops
->gma_to_pde_index(gma
));
1740 gma_index
[0] = gma_ops
->gma_to_pte_index(gma
);
1748 /* walk into the shadow page table and get gpa from guest entry */
1749 for (i
= 0; i
< index
; i
++) {
1750 ret
= ppgtt_get_next_level_entry(mm
, &e
, gma_index
[i
],
1756 gpa
= (pte_ops
->get_pfn(&e
) << GTT_PAGE_SHIFT
)
1757 + (gma
& ~GTT_PAGE_MASK
);
1759 trace_gma_translate(vgpu
->id
, "ppgtt", 0,
1760 mm
->page_table_level
, gma
, gpa
);
1763 gvt_err("invalid mm type: %d gma %lx\n", mm
->type
, gma
);
1764 return INTEL_GVT_INVALID_ADDR
;
1767 static int emulate_gtt_mmio_read(struct intel_vgpu
*vgpu
,
1768 unsigned int off
, void *p_data
, unsigned int bytes
)
1770 struct intel_vgpu_mm
*ggtt_mm
= vgpu
->gtt
.ggtt_mm
;
1771 const struct intel_gvt_device_info
*info
= &vgpu
->gvt
->device_info
;
1772 unsigned long index
= off
>> info
->gtt_entry_size_shift
;
1773 struct intel_gvt_gtt_entry e
;
1775 if (bytes
!= 4 && bytes
!= 8)
1778 ggtt_get_guest_entry(ggtt_mm
, &e
, index
);
1779 memcpy(p_data
, (void *)&e
.val64
+ (off
& (info
->gtt_entry_size
- 1)),
1785 * intel_vgpu_emulate_gtt_mmio_read - emulate GTT MMIO register read
1787 * @off: register offset
1788 * @p_data: data will be returned to guest
1789 * @bytes: data length
1791 * This function is used to emulate the GTT MMIO register read
1794 * Zero on success, error code if failed.
1796 int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu
*vgpu
, unsigned int off
,
1797 void *p_data
, unsigned int bytes
)
1799 const struct intel_gvt_device_info
*info
= &vgpu
->gvt
->device_info
;
1802 if (bytes
!= 4 && bytes
!= 8)
1805 off
-= info
->gtt_start_offset
;
1806 ret
= emulate_gtt_mmio_read(vgpu
, off
, p_data
, bytes
);
1810 static int emulate_gtt_mmio_write(struct intel_vgpu
*vgpu
, unsigned int off
,
1811 void *p_data
, unsigned int bytes
)
1813 struct intel_gvt
*gvt
= vgpu
->gvt
;
1814 const struct intel_gvt_device_info
*info
= &gvt
->device_info
;
1815 struct intel_vgpu_mm
*ggtt_mm
= vgpu
->gtt
.ggtt_mm
;
1816 struct intel_gvt_gtt_pte_ops
*ops
= gvt
->gtt
.pte_ops
;
1817 unsigned long g_gtt_index
= off
>> info
->gtt_entry_size_shift
;
1819 struct intel_gvt_gtt_entry e
, m
;
1822 if (bytes
!= 4 && bytes
!= 8)
1825 gma
= g_gtt_index
<< GTT_PAGE_SHIFT
;
1827 /* the VM may configure the whole GM space when ballooning is used */
1828 if (WARN_ONCE(!vgpu_gmadr_is_valid(vgpu
, gma
),
1829 "vgpu%d: found oob ggtt write, offset %x\n",
1834 ggtt_get_guest_entry(ggtt_mm
, &e
, g_gtt_index
);
1836 memcpy((void *)&e
.val64
+ (off
& (info
->gtt_entry_size
- 1)), p_data
,
1839 if (ops
->test_present(&e
)) {
1840 ret
= gtt_entry_p2m(vgpu
, &e
, &m
);
1842 gvt_err("vgpu%d: fail to translate guest gtt entry\n",
1851 ggtt_set_shadow_entry(ggtt_mm
, &m
, g_gtt_index
);
1852 ggtt_set_guest_entry(ggtt_mm
, &e
, g_gtt_index
);
1857 * intel_vgpu_emulate_gtt_mmio_write - emulate GTT MMIO register write
1859 * @off: register offset
1860 * @p_data: data from guest write
1861 * @bytes: data length
1863 * This function is used to emulate the GTT MMIO register write
1866 * Zero on success, error code if failed.
1868 int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu
*vgpu
, unsigned int off
,
1869 void *p_data
, unsigned int bytes
)
1871 const struct intel_gvt_device_info
*info
= &vgpu
->gvt
->device_info
;
1874 if (bytes
!= 4 && bytes
!= 8)
1877 off
-= info
->gtt_start_offset
;
1878 ret
= emulate_gtt_mmio_write(vgpu
, off
, p_data
, bytes
);
1882 static int alloc_scratch_pages(struct intel_vgpu
*vgpu
,
1883 intel_gvt_gtt_type_t type
)
1885 struct intel_vgpu_gtt
*gtt
= &vgpu
->gtt
;
1886 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
1887 int page_entry_num
= GTT_PAGE_SIZE
>>
1888 vgpu
->gvt
->device_info
.gtt_entry_size_shift
;
1891 struct device
*dev
= &vgpu
->gvt
->dev_priv
->drm
.pdev
->dev
;
1894 if (WARN_ON(type
< GTT_TYPE_PPGTT_PTE_PT
|| type
>= GTT_TYPE_MAX
))
1897 scratch_pt
= (void *)get_zeroed_page(GFP_KERNEL
);
1899 gvt_err("fail to allocate scratch page\n");
1903 daddr
= dma_map_page(dev
, virt_to_page(scratch_pt
), 0,
1904 4096, PCI_DMA_BIDIRECTIONAL
);
1905 if (dma_mapping_error(dev
, daddr
)) {
1906 gvt_err("fail to dmamap scratch_pt\n");
1907 __free_page(virt_to_page(scratch_pt
));
1910 gtt
->scratch_pt
[type
].page_mfn
=
1911 (unsigned long)(daddr
>> GTT_PAGE_SHIFT
);
1912 gtt
->scratch_pt
[type
].page
= virt_to_page(scratch_pt
);
1913 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
1914 vgpu
->id
, type
, gtt
->scratch_pt
[type
].page_mfn
);
1916 /* Build the tree by full filled the scratch pt with the entries which
1917 * point to the next level scratch pt or scratch page. The
1918 * scratch_pt[type] indicate the scratch pt/scratch page used by the
1920 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
1921 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
1922 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
1924 if (type
> GTT_TYPE_PPGTT_PTE_PT
&& type
< GTT_TYPE_MAX
) {
1925 struct intel_gvt_gtt_entry se
;
1927 memset(&se
, 0, sizeof(struct intel_gvt_gtt_entry
));
1928 se
.type
= get_entry_type(type
- 1);
1929 ops
->set_pfn(&se
, gtt
->scratch_pt
[type
- 1].page_mfn
);
1931 /* The entry parameters like present/writeable/cache type
1932 * set to the same as i915's scratch page tree.
1934 se
.val64
|= _PAGE_PRESENT
| _PAGE_RW
;
1935 if (type
== GTT_TYPE_PPGTT_PDE_PT
)
1936 se
.val64
|= PPAT_CACHED_INDEX
;
1938 for (i
= 0; i
< page_entry_num
; i
++)
1939 ops
->set_entry(scratch_pt
, &se
, i
, false, 0, vgpu
);
1945 static int release_scratch_page_tree(struct intel_vgpu
*vgpu
)
1948 struct device
*dev
= &vgpu
->gvt
->dev_priv
->drm
.pdev
->dev
;
1951 for (i
= GTT_TYPE_PPGTT_PTE_PT
; i
< GTT_TYPE_MAX
; i
++) {
1952 if (vgpu
->gtt
.scratch_pt
[i
].page
!= NULL
) {
1953 daddr
= (dma_addr_t
)(vgpu
->gtt
.scratch_pt
[i
].page_mfn
<<
1955 dma_unmap_page(dev
, daddr
, 4096, PCI_DMA_BIDIRECTIONAL
);
1956 __free_page(vgpu
->gtt
.scratch_pt
[i
].page
);
1957 vgpu
->gtt
.scratch_pt
[i
].page
= NULL
;
1958 vgpu
->gtt
.scratch_pt
[i
].page_mfn
= 0;
1965 static int create_scratch_page_tree(struct intel_vgpu
*vgpu
)
1969 for (i
= GTT_TYPE_PPGTT_PTE_PT
; i
< GTT_TYPE_MAX
; i
++) {
1970 ret
= alloc_scratch_pages(vgpu
, i
);
1978 release_scratch_page_tree(vgpu
);
1983 * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization
1986 * This function is used to initialize per-vGPU graphics memory virtualization
1990 * Zero on success, error code if failed.
1992 int intel_vgpu_init_gtt(struct intel_vgpu
*vgpu
)
1994 struct intel_vgpu_gtt
*gtt
= &vgpu
->gtt
;
1995 struct intel_vgpu_mm
*ggtt_mm
;
1997 hash_init(gtt
->guest_page_hash_table
);
1998 hash_init(gtt
->shadow_page_hash_table
);
2000 INIT_LIST_HEAD(>t
->mm_list_head
);
2001 INIT_LIST_HEAD(>t
->oos_page_list_head
);
2002 INIT_LIST_HEAD(>t
->post_shadow_list_head
);
2004 intel_vgpu_reset_ggtt(vgpu
);
2006 ggtt_mm
= intel_vgpu_create_mm(vgpu
, INTEL_GVT_MM_GGTT
,
2008 if (IS_ERR(ggtt_mm
)) {
2009 gvt_err("fail to create mm for ggtt.\n");
2010 return PTR_ERR(ggtt_mm
);
2013 gtt
->ggtt_mm
= ggtt_mm
;
2015 return create_scratch_page_tree(vgpu
);
2019 * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
2022 * This function is used to clean up per-vGPU graphics memory virtualization
2026 * Zero on success, error code if failed.
2028 void intel_vgpu_clean_gtt(struct intel_vgpu
*vgpu
)
2030 struct list_head
*pos
, *n
;
2031 struct intel_vgpu_mm
*mm
;
2033 ppgtt_free_all_shadow_page(vgpu
);
2034 release_scratch_page_tree(vgpu
);
2036 list_for_each_safe(pos
, n
, &vgpu
->gtt
.mm_list_head
) {
2037 mm
= container_of(pos
, struct intel_vgpu_mm
, list
);
2038 vgpu
->gvt
->gtt
.mm_free_page_table(mm
);
2039 list_del(&mm
->list
);
2040 list_del(&mm
->lru_list
);
2045 static void clean_spt_oos(struct intel_gvt
*gvt
)
2047 struct intel_gvt_gtt
*gtt
= &gvt
->gtt
;
2048 struct list_head
*pos
, *n
;
2049 struct intel_vgpu_oos_page
*oos_page
;
2051 WARN(!list_empty(>t
->oos_page_use_list_head
),
2052 "someone is still using oos page\n");
2054 list_for_each_safe(pos
, n
, >t
->oos_page_free_list_head
) {
2055 oos_page
= container_of(pos
, struct intel_vgpu_oos_page
, list
);
2056 list_del(&oos_page
->list
);
2061 static int setup_spt_oos(struct intel_gvt
*gvt
)
2063 struct intel_gvt_gtt
*gtt
= &gvt
->gtt
;
2064 struct intel_vgpu_oos_page
*oos_page
;
2068 INIT_LIST_HEAD(>t
->oos_page_free_list_head
);
2069 INIT_LIST_HEAD(>t
->oos_page_use_list_head
);
2071 for (i
= 0; i
< preallocated_oos_pages
; i
++) {
2072 oos_page
= kzalloc(sizeof(*oos_page
), GFP_KERNEL
);
2074 gvt_err("fail to pre-allocate oos page\n");
2079 INIT_LIST_HEAD(&oos_page
->list
);
2080 INIT_LIST_HEAD(&oos_page
->vm_list
);
2082 list_add_tail(&oos_page
->list
, >t
->oos_page_free_list_head
);
2085 gvt_dbg_mm("%d oos pages preallocated\n", i
);
2094 * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
2096 * @page_table_level: PPGTT page table level
2097 * @root_entry: PPGTT page table root pointers
2099 * This function is used to find a PPGTT mm object from mm object pool
2102 * pointer to mm object on success, NULL if failed.
2104 struct intel_vgpu_mm
*intel_vgpu_find_ppgtt_mm(struct intel_vgpu
*vgpu
,
2105 int page_table_level
, void *root_entry
)
2107 struct list_head
*pos
;
2108 struct intel_vgpu_mm
*mm
;
2111 list_for_each(pos
, &vgpu
->gtt
.mm_list_head
) {
2112 mm
= container_of(pos
, struct intel_vgpu_mm
, list
);
2113 if (mm
->type
!= INTEL_GVT_MM_PPGTT
)
2116 if (mm
->page_table_level
!= page_table_level
)
2120 dst
= mm
->virtual_page_table
;
2122 if (page_table_level
== 3) {
2123 if (src
[0] == dst
[0]
2126 && src
[3] == dst
[3])
2129 if (src
[0] == dst
[0])
2137 * intel_vgpu_g2v_create_ppgtt_mm - create a PPGTT mm object from
2140 * @page_table_level: PPGTT page table level
2142 * This function is used to create a PPGTT mm object from a guest to GVT-g
2146 * Zero on success, negative error code if failed.
2148 int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu
*vgpu
,
2149 int page_table_level
)
2151 u64
*pdp
= (u64
*)&vgpu_vreg64(vgpu
, vgtif_reg(pdp
[0]));
2152 struct intel_vgpu_mm
*mm
;
2154 if (WARN_ON((page_table_level
!= 4) && (page_table_level
!= 3)))
2157 mm
= intel_vgpu_find_ppgtt_mm(vgpu
, page_table_level
, pdp
);
2159 intel_gvt_mm_reference(mm
);
2161 mm
= intel_vgpu_create_mm(vgpu
, INTEL_GVT_MM_PPGTT
,
2162 pdp
, page_table_level
, 0);
2164 gvt_err("fail to create mm\n");
2172 * intel_vgpu_g2v_destroy_ppgtt_mm - destroy a PPGTT mm object from
2175 * @page_table_level: PPGTT page table level
2177 * This function is used to create a PPGTT mm object from a guest to GVT-g
2181 * Zero on success, negative error code if failed.
2183 int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu
*vgpu
,
2184 int page_table_level
)
2186 u64
*pdp
= (u64
*)&vgpu_vreg64(vgpu
, vgtif_reg(pdp
[0]));
2187 struct intel_vgpu_mm
*mm
;
2189 if (WARN_ON((page_table_level
!= 4) && (page_table_level
!= 3)))
2192 mm
= intel_vgpu_find_ppgtt_mm(vgpu
, page_table_level
, pdp
);
2194 gvt_err("fail to find ppgtt instance.\n");
2197 intel_gvt_mm_unreference(mm
);
2202 * intel_gvt_init_gtt - initialize mm components of a GVT device
2205 * This function is called at the initialization stage, to initialize
2206 * the mm components of a GVT device.
2209 * zero on success, negative error code if failed.
2211 int intel_gvt_init_gtt(struct intel_gvt
*gvt
)
2215 struct device
*dev
= &gvt
->dev_priv
->drm
.pdev
->dev
;
2218 gvt_dbg_core("init gtt\n");
2220 if (IS_BROADWELL(gvt
->dev_priv
) || IS_SKYLAKE(gvt
->dev_priv
)) {
2221 gvt
->gtt
.pte_ops
= &gen8_gtt_pte_ops
;
2222 gvt
->gtt
.gma_ops
= &gen8_gtt_gma_ops
;
2223 gvt
->gtt
.mm_alloc_page_table
= gen8_mm_alloc_page_table
;
2224 gvt
->gtt
.mm_free_page_table
= gen8_mm_free_page_table
;
2229 page
= (void *)get_zeroed_page(GFP_KERNEL
);
2231 gvt_err("fail to allocate scratch ggtt page\n");
2235 daddr
= dma_map_page(dev
, virt_to_page(page
), 0,
2236 4096, PCI_DMA_BIDIRECTIONAL
);
2237 if (dma_mapping_error(dev
, daddr
)) {
2238 gvt_err("fail to dmamap scratch ggtt page\n");
2239 __free_page(virt_to_page(page
));
2242 gvt
->gtt
.scratch_ggtt_page
= virt_to_page(page
);
2243 gvt
->gtt
.scratch_ggtt_mfn
= (unsigned long)(daddr
>> GTT_PAGE_SHIFT
);
2245 if (enable_out_of_sync
) {
2246 ret
= setup_spt_oos(gvt
);
2248 gvt_err("fail to initialize SPT oos\n");
2252 INIT_LIST_HEAD(&gvt
->gtt
.mm_lru_list_head
);
2257 * intel_gvt_clean_gtt - clean up mm components of a GVT device
2260 * This function is called at the driver unloading stage, to clean up the
2261 * the mm components of a GVT device.
2264 void intel_gvt_clean_gtt(struct intel_gvt
*gvt
)
2266 struct device
*dev
= &gvt
->dev_priv
->drm
.pdev
->dev
;
2267 dma_addr_t daddr
= (dma_addr_t
)(gvt
->gtt
.scratch_ggtt_mfn
<<
2270 dma_unmap_page(dev
, daddr
, 4096, PCI_DMA_BIDIRECTIONAL
);
2272 __free_page(gvt
->gtt
.scratch_ggtt_page
);
2274 if (enable_out_of_sync
)
2279 * intel_vgpu_reset_ggtt - reset the GGTT entry
2282 * This function is called at the vGPU create stage
2283 * to reset all the GGTT entries.
2286 void intel_vgpu_reset_ggtt(struct intel_vgpu
*vgpu
)
2288 struct intel_gvt
*gvt
= vgpu
->gvt
;
2289 struct intel_gvt_gtt_pte_ops
*ops
= vgpu
->gvt
->gtt
.pte_ops
;
2293 struct intel_gvt_gtt_entry e
;
2295 memset(&e
, 0, sizeof(struct intel_gvt_gtt_entry
));
2296 e
.type
= GTT_TYPE_GGTT_PTE
;
2297 ops
->set_pfn(&e
, gvt
->gtt
.scratch_ggtt_mfn
);
2298 e
.val64
|= _PAGE_PRESENT
;
2300 index
= vgpu_aperture_gmadr_base(vgpu
) >> PAGE_SHIFT
;
2301 num_entries
= vgpu_aperture_sz(vgpu
) >> PAGE_SHIFT
;
2302 for (offset
= 0; offset
< num_entries
; offset
++)
2303 ops
->set_entry(NULL
, &e
, index
+ offset
, false, 0, vgpu
);
2305 index
= vgpu_hidden_gmadr_base(vgpu
) >> PAGE_SHIFT
;
2306 num_entries
= vgpu_hidden_sz(vgpu
) >> PAGE_SHIFT
;
2307 for (offset
= 0; offset
< num_entries
; offset
++)
2308 ops
->set_entry(NULL
, &e
, index
+ offset
, false, 0, vgpu
);
2312 * intel_vgpu_reset_gtt - reset the all GTT related status
2314 * @dmlr: true for vGPU Device Model Level Reset, false for GT Reset
2316 * This function is called from vfio core to reset reset all
2317 * GTT related status, including GGTT, PPGTT, scratch page.
2320 void intel_vgpu_reset_gtt(struct intel_vgpu
*vgpu
, bool dmlr
)
2324 ppgtt_free_all_shadow_page(vgpu
);
2328 intel_vgpu_reset_ggtt(vgpu
);
2330 /* clear scratch page for security */
2331 for (i
= GTT_TYPE_PPGTT_PTE_PT
; i
< GTT_TYPE_MAX
; i
++) {
2332 if (vgpu
->gtt
.scratch_pt
[i
].page
!= NULL
)
2333 memset(page_address(vgpu
->gtt
.scratch_pt
[i
].page
),