2 * Copyright © 2010 Daniel Vetter
3 * Copyright © 2011-2014 Intel Corporation
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include <linux/seq_file.h>
27 #include <linux/stop_machine.h>
29 #include <drm/i915_drm.h>
31 #include "i915_vgpu.h"
32 #include "i915_trace.h"
33 #include "intel_drv.h"
34 #include "intel_frontbuffer.h"
36 #define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
39 * DOC: Global GTT views
41 * Background and previous state
43 * Historically objects could exists (be bound) in global GTT space only as
44 * singular instances with a view representing all of the object's backing pages
45 * in a linear fashion. This view will be called a normal view.
47 * To support multiple views of the same object, where the number of mapped
48 * pages is not equal to the backing store, or where the layout of the pages
49 * is not linear, concept of a GGTT view was added.
51 * One example of an alternative view is a stereo display driven by a single
52 * image. In this case we would have a framebuffer looking like this
58 * Above would represent a normal GGTT view as normally mapped for GPU or CPU
59 * rendering. In contrast, fed to the display engine would be an alternative
60 * view which could look something like this:
65 * In this example both the size and layout of pages in the alternative view is
66 * different from the normal view.
68 * Implementation and usage
70 * GGTT views are implemented using VMAs and are distinguished via enum
71 * i915_ggtt_view_type and struct i915_ggtt_view.
73 * A new flavour of core GEM functions which work with GGTT bound objects were
74 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
75 * renaming in large amounts of code. They take the struct i915_ggtt_view
76 * parameter encapsulating all metadata required to implement a view.
78 * As a helper for callers which are only interested in the normal view,
79 * globally const i915_ggtt_view_normal singleton instance exists. All old core
80 * GEM API functions, the ones not taking the view parameter, are operating on,
81 * or with the normal GGTT view.
83 * Code wanting to add or use a new GGTT view needs to:
85 * 1. Add a new enum with a suitable name.
86 * 2. Extend the metadata in the i915_ggtt_view structure if required.
87 * 3. Add support to i915_get_vma_pages().
89 * New views are required to build a scatter-gather table from within the
90 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
91 * exists for the lifetime of an VMA.
93 * Core API is designed to have copy semantics which means that passed in
94 * struct i915_ggtt_view does not need to be persistent (left around after
95 * calling the core API functions).
100 i915_get_ggtt_vma_pages(struct i915_vma
*vma
);
102 const struct i915_ggtt_view i915_ggtt_view_normal
= {
103 .type
= I915_GGTT_VIEW_NORMAL
,
105 const struct i915_ggtt_view i915_ggtt_view_rotated
= {
106 .type
= I915_GGTT_VIEW_ROTATED
,
109 int intel_sanitize_enable_ppgtt(struct drm_i915_private
*dev_priv
,
112 bool has_aliasing_ppgtt
;
114 bool has_full_48bit_ppgtt
;
116 has_aliasing_ppgtt
= INTEL_GEN(dev_priv
) >= 6;
117 has_full_ppgtt
= INTEL_GEN(dev_priv
) >= 7;
118 has_full_48bit_ppgtt
=
119 IS_BROADWELL(dev_priv
) || INTEL_GEN(dev_priv
) >= 9;
121 if (intel_vgpu_active(dev_priv
)) {
122 /* emulation is too hard */
123 has_full_ppgtt
= false;
124 has_full_48bit_ppgtt
= false;
127 if (!has_aliasing_ppgtt
)
131 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
132 * execlists, the sole mechanism available to submit work.
134 if (enable_ppgtt
== 0 && INTEL_GEN(dev_priv
) < 9)
137 if (enable_ppgtt
== 1)
140 if (enable_ppgtt
== 2 && has_full_ppgtt
)
143 if (enable_ppgtt
== 3 && has_full_48bit_ppgtt
)
146 #ifdef CONFIG_INTEL_IOMMU
147 /* Disable ppgtt on SNB if VT-d is on. */
148 if (IS_GEN6(dev_priv
) && intel_iommu_gfx_mapped
) {
149 DRM_INFO("Disabling PPGTT because VT-d is on\n");
154 /* Early VLV doesn't have this */
155 if (IS_VALLEYVIEW(dev_priv
) && dev_priv
->drm
.pdev
->revision
< 0xb) {
156 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
160 if (INTEL_GEN(dev_priv
) >= 8 && i915
.enable_execlists
&& has_full_ppgtt
)
161 return has_full_48bit_ppgtt
? 3 : 2;
163 return has_aliasing_ppgtt
? 1 : 0;
166 static int ppgtt_bind_vma(struct i915_vma
*vma
,
167 enum i915_cache_level cache_level
,
172 vma
->pages
= vma
->obj
->mm
.pages
;
174 /* Currently applicable only to VLV */
176 pte_flags
|= PTE_READ_ONLY
;
178 vma
->vm
->insert_entries(vma
->vm
, vma
->pages
, vma
->node
.start
,
179 cache_level
, pte_flags
);
184 static void ppgtt_unbind_vma(struct i915_vma
*vma
)
186 vma
->vm
->clear_range(vma
->vm
,
191 static gen8_pte_t
gen8_pte_encode(dma_addr_t addr
,
192 enum i915_cache_level level
)
194 gen8_pte_t pte
= _PAGE_PRESENT
| _PAGE_RW
;
198 case I915_CACHE_NONE
:
199 pte
|= PPAT_UNCACHED_INDEX
;
202 pte
|= PPAT_DISPLAY_ELLC_INDEX
;
205 pte
|= PPAT_CACHED_INDEX
;
212 static gen8_pde_t
gen8_pde_encode(const dma_addr_t addr
,
213 const enum i915_cache_level level
)
215 gen8_pde_t pde
= _PAGE_PRESENT
| _PAGE_RW
;
217 if (level
!= I915_CACHE_NONE
)
218 pde
|= PPAT_CACHED_PDE_INDEX
;
220 pde
|= PPAT_UNCACHED_INDEX
;
224 #define gen8_pdpe_encode gen8_pde_encode
225 #define gen8_pml4e_encode gen8_pde_encode
227 static gen6_pte_t
snb_pte_encode(dma_addr_t addr
,
228 enum i915_cache_level level
,
231 gen6_pte_t pte
= GEN6_PTE_VALID
;
232 pte
|= GEN6_PTE_ADDR_ENCODE(addr
);
235 case I915_CACHE_L3_LLC
:
237 pte
|= GEN6_PTE_CACHE_LLC
;
239 case I915_CACHE_NONE
:
240 pte
|= GEN6_PTE_UNCACHED
;
249 static gen6_pte_t
ivb_pte_encode(dma_addr_t addr
,
250 enum i915_cache_level level
,
253 gen6_pte_t pte
= GEN6_PTE_VALID
;
254 pte
|= GEN6_PTE_ADDR_ENCODE(addr
);
257 case I915_CACHE_L3_LLC
:
258 pte
|= GEN7_PTE_CACHE_L3_LLC
;
261 pte
|= GEN6_PTE_CACHE_LLC
;
263 case I915_CACHE_NONE
:
264 pte
|= GEN6_PTE_UNCACHED
;
273 static gen6_pte_t
byt_pte_encode(dma_addr_t addr
,
274 enum i915_cache_level level
,
277 gen6_pte_t pte
= GEN6_PTE_VALID
;
278 pte
|= GEN6_PTE_ADDR_ENCODE(addr
);
280 if (!(flags
& PTE_READ_ONLY
))
281 pte
|= BYT_PTE_WRITEABLE
;
283 if (level
!= I915_CACHE_NONE
)
284 pte
|= BYT_PTE_SNOOPED_BY_CPU_CACHES
;
289 static gen6_pte_t
hsw_pte_encode(dma_addr_t addr
,
290 enum i915_cache_level level
,
293 gen6_pte_t pte
= GEN6_PTE_VALID
;
294 pte
|= HSW_PTE_ADDR_ENCODE(addr
);
296 if (level
!= I915_CACHE_NONE
)
297 pte
|= HSW_WB_LLC_AGE3
;
302 static gen6_pte_t
iris_pte_encode(dma_addr_t addr
,
303 enum i915_cache_level level
,
306 gen6_pte_t pte
= GEN6_PTE_VALID
;
307 pte
|= HSW_PTE_ADDR_ENCODE(addr
);
310 case I915_CACHE_NONE
:
313 pte
|= HSW_WT_ELLC_LLC_AGE3
;
316 pte
|= HSW_WB_ELLC_LLC_AGE3
;
323 static int __setup_page_dma(struct drm_i915_private
*dev_priv
,
324 struct i915_page_dma
*p
, gfp_t flags
)
326 struct device
*kdev
= &dev_priv
->drm
.pdev
->dev
;
328 p
->page
= alloc_page(flags
);
332 p
->daddr
= dma_map_page(kdev
,
333 p
->page
, 0, 4096, PCI_DMA_BIDIRECTIONAL
);
335 if (dma_mapping_error(kdev
, p
->daddr
)) {
336 __free_page(p
->page
);
343 static int setup_page_dma(struct drm_i915_private
*dev_priv
,
344 struct i915_page_dma
*p
)
346 return __setup_page_dma(dev_priv
, p
, I915_GFP_DMA
);
349 static void cleanup_page_dma(struct drm_i915_private
*dev_priv
,
350 struct i915_page_dma
*p
)
352 struct pci_dev
*pdev
= dev_priv
->drm
.pdev
;
354 if (WARN_ON(!p
->page
))
357 dma_unmap_page(&pdev
->dev
, p
->daddr
, 4096, PCI_DMA_BIDIRECTIONAL
);
358 __free_page(p
->page
);
359 memset(p
, 0, sizeof(*p
));
362 static void *kmap_page_dma(struct i915_page_dma
*p
)
364 return kmap_atomic(p
->page
);
367 /* We use the flushing unmap only with ppgtt structures:
368 * page directories, page tables and scratch pages.
370 static void kunmap_page_dma(struct drm_i915_private
*dev_priv
, void *vaddr
)
372 /* There are only few exceptions for gen >=6. chv and bxt.
373 * And we are not sure about the latter so play safe for now.
375 if (IS_CHERRYVIEW(dev_priv
) || IS_GEN9_LP(dev_priv
))
376 drm_clflush_virt_range(vaddr
, PAGE_SIZE
);
378 kunmap_atomic(vaddr
);
381 #define kmap_px(px) kmap_page_dma(px_base(px))
382 #define kunmap_px(ppgtt, vaddr) \
383 kunmap_page_dma((ppgtt)->base.i915, (vaddr))
385 #define setup_px(dev_priv, px) setup_page_dma((dev_priv), px_base(px))
386 #define cleanup_px(dev_priv, px) cleanup_page_dma((dev_priv), px_base(px))
387 #define fill_px(dev_priv, px, v) fill_page_dma((dev_priv), px_base(px), (v))
388 #define fill32_px(dev_priv, px, v) \
389 fill_page_dma_32((dev_priv), px_base(px), (v))
391 static void fill_page_dma(struct drm_i915_private
*dev_priv
,
392 struct i915_page_dma
*p
, const uint64_t val
)
395 uint64_t * const vaddr
= kmap_page_dma(p
);
397 for (i
= 0; i
< 512; i
++)
400 kunmap_page_dma(dev_priv
, vaddr
);
403 static void fill_page_dma_32(struct drm_i915_private
*dev_priv
,
404 struct i915_page_dma
*p
, const uint32_t val32
)
410 fill_page_dma(dev_priv
, p
, v
);
414 setup_scratch_page(struct drm_i915_private
*dev_priv
,
415 struct i915_page_dma
*scratch
,
418 return __setup_page_dma(dev_priv
, scratch
, gfp
| __GFP_ZERO
);
421 static void cleanup_scratch_page(struct drm_i915_private
*dev_priv
,
422 struct i915_page_dma
*scratch
)
424 cleanup_page_dma(dev_priv
, scratch
);
427 static struct i915_page_table
*alloc_pt(struct drm_i915_private
*dev_priv
)
429 struct i915_page_table
*pt
;
430 const size_t count
= INTEL_GEN(dev_priv
) >= 8 ? GEN8_PTES
: GEN6_PTES
;
433 pt
= kzalloc(sizeof(*pt
), GFP_KERNEL
);
435 return ERR_PTR(-ENOMEM
);
437 pt
->used_ptes
= kcalloc(BITS_TO_LONGS(count
), sizeof(*pt
->used_ptes
),
443 ret
= setup_px(dev_priv
, pt
);
450 kfree(pt
->used_ptes
);
457 static void free_pt(struct drm_i915_private
*dev_priv
,
458 struct i915_page_table
*pt
)
460 cleanup_px(dev_priv
, pt
);
461 kfree(pt
->used_ptes
);
465 static void gen8_initialize_pt(struct i915_address_space
*vm
,
466 struct i915_page_table
*pt
)
468 gen8_pte_t scratch_pte
;
470 scratch_pte
= gen8_pte_encode(vm
->scratch_page
.daddr
,
473 fill_px(vm
->i915
, pt
, scratch_pte
);
476 static void gen6_initialize_pt(struct i915_address_space
*vm
,
477 struct i915_page_table
*pt
)
479 gen6_pte_t scratch_pte
;
481 WARN_ON(vm
->scratch_page
.daddr
== 0);
483 scratch_pte
= vm
->pte_encode(vm
->scratch_page
.daddr
,
486 fill32_px(vm
->i915
, pt
, scratch_pte
);
489 static struct i915_page_directory
*alloc_pd(struct drm_i915_private
*dev_priv
)
491 struct i915_page_directory
*pd
;
494 pd
= kzalloc(sizeof(*pd
), GFP_KERNEL
);
496 return ERR_PTR(-ENOMEM
);
498 pd
->used_pdes
= kcalloc(BITS_TO_LONGS(I915_PDES
),
499 sizeof(*pd
->used_pdes
), GFP_KERNEL
);
503 ret
= setup_px(dev_priv
, pd
);
510 kfree(pd
->used_pdes
);
517 static void free_pd(struct drm_i915_private
*dev_priv
,
518 struct i915_page_directory
*pd
)
521 cleanup_px(dev_priv
, pd
);
522 kfree(pd
->used_pdes
);
527 static void gen8_initialize_pd(struct i915_address_space
*vm
,
528 struct i915_page_directory
*pd
)
530 gen8_pde_t scratch_pde
;
532 scratch_pde
= gen8_pde_encode(px_dma(vm
->scratch_pt
), I915_CACHE_LLC
);
534 fill_px(vm
->i915
, pd
, scratch_pde
);
537 static int __pdp_init(struct drm_i915_private
*dev_priv
,
538 struct i915_page_directory_pointer
*pdp
)
540 size_t pdpes
= I915_PDPES_PER_PDP(dev_priv
);
542 pdp
->used_pdpes
= kcalloc(BITS_TO_LONGS(pdpes
),
543 sizeof(unsigned long),
545 if (!pdp
->used_pdpes
)
548 pdp
->page_directory
= kcalloc(pdpes
, sizeof(*pdp
->page_directory
),
550 if (!pdp
->page_directory
) {
551 kfree(pdp
->used_pdpes
);
552 /* the PDP might be the statically allocated top level. Keep it
553 * as clean as possible */
554 pdp
->used_pdpes
= NULL
;
561 static void __pdp_fini(struct i915_page_directory_pointer
*pdp
)
563 kfree(pdp
->used_pdpes
);
564 kfree(pdp
->page_directory
);
565 pdp
->page_directory
= NULL
;
569 i915_page_directory_pointer
*alloc_pdp(struct drm_i915_private
*dev_priv
)
571 struct i915_page_directory_pointer
*pdp
;
574 WARN_ON(!USES_FULL_48BIT_PPGTT(dev_priv
));
576 pdp
= kzalloc(sizeof(*pdp
), GFP_KERNEL
);
578 return ERR_PTR(-ENOMEM
);
580 ret
= __pdp_init(dev_priv
, pdp
);
584 ret
= setup_px(dev_priv
, pdp
);
598 static void free_pdp(struct drm_i915_private
*dev_priv
,
599 struct i915_page_directory_pointer
*pdp
)
602 if (USES_FULL_48BIT_PPGTT(dev_priv
)) {
603 cleanup_px(dev_priv
, pdp
);
608 static void gen8_initialize_pdp(struct i915_address_space
*vm
,
609 struct i915_page_directory_pointer
*pdp
)
611 gen8_ppgtt_pdpe_t scratch_pdpe
;
613 scratch_pdpe
= gen8_pdpe_encode(px_dma(vm
->scratch_pd
), I915_CACHE_LLC
);
615 fill_px(vm
->i915
, pdp
, scratch_pdpe
);
618 static void gen8_initialize_pml4(struct i915_address_space
*vm
,
619 struct i915_pml4
*pml4
)
621 gen8_ppgtt_pml4e_t scratch_pml4e
;
623 scratch_pml4e
= gen8_pml4e_encode(px_dma(vm
->scratch_pdp
),
626 fill_px(vm
->i915
, pml4
, scratch_pml4e
);
630 gen8_setup_page_directory(struct i915_hw_ppgtt
*ppgtt
,
631 struct i915_page_directory_pointer
*pdp
,
632 struct i915_page_directory
*pd
,
635 gen8_ppgtt_pdpe_t
*page_directorypo
;
637 if (!USES_FULL_48BIT_PPGTT(to_i915(ppgtt
->base
.dev
)))
640 page_directorypo
= kmap_px(pdp
);
641 page_directorypo
[index
] = gen8_pdpe_encode(px_dma(pd
), I915_CACHE_LLC
);
642 kunmap_px(ppgtt
, page_directorypo
);
646 gen8_setup_page_directory_pointer(struct i915_hw_ppgtt
*ppgtt
,
647 struct i915_pml4
*pml4
,
648 struct i915_page_directory_pointer
*pdp
,
651 gen8_ppgtt_pml4e_t
*pagemap
= kmap_px(pml4
);
653 WARN_ON(!USES_FULL_48BIT_PPGTT(to_i915(ppgtt
->base
.dev
)));
654 pagemap
[index
] = gen8_pml4e_encode(px_dma(pdp
), I915_CACHE_LLC
);
655 kunmap_px(ppgtt
, pagemap
);
658 /* Broadwell Page Directory Pointer Descriptors */
659 static int gen8_write_pdp(struct drm_i915_gem_request
*req
,
663 struct intel_ring
*ring
= req
->ring
;
664 struct intel_engine_cs
*engine
= req
->engine
;
669 ret
= intel_ring_begin(req
, 6);
673 intel_ring_emit(ring
, MI_LOAD_REGISTER_IMM(1));
674 intel_ring_emit_reg(ring
, GEN8_RING_PDP_UDW(engine
, entry
));
675 intel_ring_emit(ring
, upper_32_bits(addr
));
676 intel_ring_emit(ring
, MI_LOAD_REGISTER_IMM(1));
677 intel_ring_emit_reg(ring
, GEN8_RING_PDP_LDW(engine
, entry
));
678 intel_ring_emit(ring
, lower_32_bits(addr
));
679 intel_ring_advance(ring
);
684 static int gen8_legacy_mm_switch(struct i915_hw_ppgtt
*ppgtt
,
685 struct drm_i915_gem_request
*req
)
689 for (i
= GEN8_LEGACY_PDPES
- 1; i
>= 0; i
--) {
690 const dma_addr_t pd_daddr
= i915_page_dir_dma_addr(ppgtt
, i
);
692 ret
= gen8_write_pdp(req
, i
, pd_daddr
);
700 static int gen8_48b_mm_switch(struct i915_hw_ppgtt
*ppgtt
,
701 struct drm_i915_gem_request
*req
)
703 return gen8_write_pdp(req
, 0, px_dma(&ppgtt
->pml4
));
706 /* PDE TLBs are a pain to invalidate on GEN8+. When we modify
707 * the page table structures, we mark them dirty so that
708 * context switching/execlist queuing code takes extra steps
709 * to ensure that tlbs are flushed.
711 static void mark_tlbs_dirty(struct i915_hw_ppgtt
*ppgtt
)
713 ppgtt
->pd_dirty_rings
= INTEL_INFO(ppgtt
->base
.i915
)->ring_mask
;
716 /* Removes entries from a single page table, releasing it if it's empty.
717 * Caller can use the return value to update higher-level entries.
719 static bool gen8_ppgtt_clear_pt(struct i915_address_space
*vm
,
720 struct i915_page_table
*pt
,
724 struct i915_hw_ppgtt
*ppgtt
= i915_vm_to_ppgtt(vm
);
725 unsigned int num_entries
= gen8_pte_count(start
, length
);
726 unsigned int pte
= gen8_pte_index(start
);
727 unsigned int pte_end
= pte
+ num_entries
;
728 gen8_pte_t
*pt_vaddr
;
729 gen8_pte_t scratch_pte
= gen8_pte_encode(vm
->scratch_page
.daddr
,
732 if (WARN_ON(!px_page(pt
)))
735 GEM_BUG_ON(pte_end
> GEN8_PTES
);
737 bitmap_clear(pt
->used_ptes
, pte
, num_entries
);
739 if (bitmap_empty(pt
->used_ptes
, GEN8_PTES
))
742 pt_vaddr
= kmap_px(pt
);
744 while (pte
< pte_end
)
745 pt_vaddr
[pte
++] = scratch_pte
;
747 kunmap_px(ppgtt
, pt_vaddr
);
752 /* Removes entries from a single page dir, releasing it if it's empty.
753 * Caller can use the return value to update higher-level entries
755 static bool gen8_ppgtt_clear_pd(struct i915_address_space
*vm
,
756 struct i915_page_directory
*pd
,
760 struct i915_hw_ppgtt
*ppgtt
= i915_vm_to_ppgtt(vm
);
761 struct i915_page_table
*pt
;
763 gen8_pde_t
*pde_vaddr
;
764 gen8_pde_t scratch_pde
= gen8_pde_encode(px_dma(vm
->scratch_pt
),
767 gen8_for_each_pde(pt
, pd
, start
, length
, pde
) {
768 if (WARN_ON(!pd
->page_table
[pde
]))
771 if (gen8_ppgtt_clear_pt(vm
, pt
, start
, length
)) {
772 __clear_bit(pde
, pd
->used_pdes
);
773 pde_vaddr
= kmap_px(pd
);
774 pde_vaddr
[pde
] = scratch_pde
;
775 kunmap_px(ppgtt
, pde_vaddr
);
776 free_pt(vm
->i915
, pt
);
780 if (bitmap_empty(pd
->used_pdes
, I915_PDES
))
786 /* Removes entries from a single page dir pointer, releasing it if it's empty.
787 * Caller can use the return value to update higher-level entries
789 static bool gen8_ppgtt_clear_pdp(struct i915_address_space
*vm
,
790 struct i915_page_directory_pointer
*pdp
,
794 struct i915_hw_ppgtt
*ppgtt
= i915_vm_to_ppgtt(vm
);
795 struct i915_page_directory
*pd
;
797 gen8_ppgtt_pdpe_t
*pdpe_vaddr
;
798 gen8_ppgtt_pdpe_t scratch_pdpe
=
799 gen8_pdpe_encode(px_dma(vm
->scratch_pd
), I915_CACHE_LLC
);
801 gen8_for_each_pdpe(pd
, pdp
, start
, length
, pdpe
) {
802 if (WARN_ON(!pdp
->page_directory
[pdpe
]))
805 if (gen8_ppgtt_clear_pd(vm
, pd
, start
, length
)) {
806 __clear_bit(pdpe
, pdp
->used_pdpes
);
807 if (USES_FULL_48BIT_PPGTT(dev_priv
)) {
808 pdpe_vaddr
= kmap_px(pdp
);
809 pdpe_vaddr
[pdpe
] = scratch_pdpe
;
810 kunmap_px(ppgtt
, pdpe_vaddr
);
812 free_pd(vm
->i915
, pd
);
816 mark_tlbs_dirty(ppgtt
);
818 if (bitmap_empty(pdp
->used_pdpes
, I915_PDPES_PER_PDP(dev_priv
)))
824 /* Removes entries from a single pml4.
825 * This is the top-level structure in 4-level page tables used on gen8+.
826 * Empty entries are always scratch pml4e.
828 static void gen8_ppgtt_clear_pml4(struct i915_address_space
*vm
,
829 struct i915_pml4
*pml4
,
833 struct i915_hw_ppgtt
*ppgtt
= i915_vm_to_ppgtt(vm
);
834 struct i915_page_directory_pointer
*pdp
;
836 gen8_ppgtt_pml4e_t
*pml4e_vaddr
;
837 gen8_ppgtt_pml4e_t scratch_pml4e
=
838 gen8_pml4e_encode(px_dma(vm
->scratch_pdp
), I915_CACHE_LLC
);
840 GEM_BUG_ON(!USES_FULL_48BIT_PPGTT(vm
->i915
));
842 gen8_for_each_pml4e(pdp
, pml4
, start
, length
, pml4e
) {
843 if (WARN_ON(!pml4
->pdps
[pml4e
]))
846 if (gen8_ppgtt_clear_pdp(vm
, pdp
, start
, length
)) {
847 __clear_bit(pml4e
, pml4
->used_pml4es
);
848 pml4e_vaddr
= kmap_px(pml4
);
849 pml4e_vaddr
[pml4e
] = scratch_pml4e
;
850 kunmap_px(ppgtt
, pml4e_vaddr
);
851 free_pdp(vm
->i915
, pdp
);
856 static void gen8_ppgtt_clear_range(struct i915_address_space
*vm
,
857 uint64_t start
, uint64_t length
)
859 struct i915_hw_ppgtt
*ppgtt
= i915_vm_to_ppgtt(vm
);
861 if (USES_FULL_48BIT_PPGTT(vm
->i915
))
862 gen8_ppgtt_clear_pml4(vm
, &ppgtt
->pml4
, start
, length
);
864 gen8_ppgtt_clear_pdp(vm
, &ppgtt
->pdp
, start
, length
);
868 gen8_ppgtt_insert_pte_entries(struct i915_address_space
*vm
,
869 struct i915_page_directory_pointer
*pdp
,
870 struct sg_page_iter
*sg_iter
,
872 enum i915_cache_level cache_level
)
874 struct i915_hw_ppgtt
*ppgtt
= i915_vm_to_ppgtt(vm
);
875 gen8_pte_t
*pt_vaddr
;
876 unsigned pdpe
= gen8_pdpe_index(start
);
877 unsigned pde
= gen8_pde_index(start
);
878 unsigned pte
= gen8_pte_index(start
);
882 while (__sg_page_iter_next(sg_iter
)) {
883 if (pt_vaddr
== NULL
) {
884 struct i915_page_directory
*pd
= pdp
->page_directory
[pdpe
];
885 struct i915_page_table
*pt
= pd
->page_table
[pde
];
886 pt_vaddr
= kmap_px(pt
);
890 gen8_pte_encode(sg_page_iter_dma_address(sg_iter
),
892 if (++pte
== GEN8_PTES
) {
893 kunmap_px(ppgtt
, pt_vaddr
);
895 if (++pde
== I915_PDES
) {
896 if (++pdpe
== I915_PDPES_PER_PDP(vm
->i915
))
905 kunmap_px(ppgtt
, pt_vaddr
);
908 static void gen8_ppgtt_insert_entries(struct i915_address_space
*vm
,
909 struct sg_table
*pages
,
911 enum i915_cache_level cache_level
,
914 struct i915_hw_ppgtt
*ppgtt
= i915_vm_to_ppgtt(vm
);
915 struct sg_page_iter sg_iter
;
917 __sg_page_iter_start(&sg_iter
, pages
->sgl
, sg_nents(pages
->sgl
), 0);
919 if (!USES_FULL_48BIT_PPGTT(vm
->i915
)) {
920 gen8_ppgtt_insert_pte_entries(vm
, &ppgtt
->pdp
, &sg_iter
, start
,
923 struct i915_page_directory_pointer
*pdp
;
925 uint64_t length
= (uint64_t)pages
->orig_nents
<< PAGE_SHIFT
;
927 gen8_for_each_pml4e(pdp
, &ppgtt
->pml4
, start
, length
, pml4e
) {
928 gen8_ppgtt_insert_pte_entries(vm
, pdp
, &sg_iter
,
934 static void gen8_free_page_tables(struct drm_i915_private
*dev_priv
,
935 struct i915_page_directory
*pd
)
942 for_each_set_bit(i
, pd
->used_pdes
, I915_PDES
) {
943 if (WARN_ON(!pd
->page_table
[i
]))
946 free_pt(dev_priv
, pd
->page_table
[i
]);
947 pd
->page_table
[i
] = NULL
;
951 static int gen8_init_scratch(struct i915_address_space
*vm
)
953 struct drm_i915_private
*dev_priv
= vm
->i915
;
956 ret
= setup_scratch_page(dev_priv
, &vm
->scratch_page
, I915_GFP_DMA
);
960 vm
->scratch_pt
= alloc_pt(dev_priv
);
961 if (IS_ERR(vm
->scratch_pt
)) {
962 ret
= PTR_ERR(vm
->scratch_pt
);
963 goto free_scratch_page
;
966 vm
->scratch_pd
= alloc_pd(dev_priv
);
967 if (IS_ERR(vm
->scratch_pd
)) {
968 ret
= PTR_ERR(vm
->scratch_pd
);
972 if (USES_FULL_48BIT_PPGTT(dev_priv
)) {
973 vm
->scratch_pdp
= alloc_pdp(dev_priv
);
974 if (IS_ERR(vm
->scratch_pdp
)) {
975 ret
= PTR_ERR(vm
->scratch_pdp
);
980 gen8_initialize_pt(vm
, vm
->scratch_pt
);
981 gen8_initialize_pd(vm
, vm
->scratch_pd
);
982 if (USES_FULL_48BIT_PPGTT(dev_priv
))
983 gen8_initialize_pdp(vm
, vm
->scratch_pdp
);
988 free_pd(dev_priv
, vm
->scratch_pd
);
990 free_pt(dev_priv
, vm
->scratch_pt
);
992 cleanup_scratch_page(dev_priv
, &vm
->scratch_page
);
997 static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt
*ppgtt
, bool create
)
999 enum vgt_g2v_type msg
;
1000 struct drm_i915_private
*dev_priv
= ppgtt
->base
.i915
;
1003 if (USES_FULL_48BIT_PPGTT(dev_priv
)) {
1004 u64 daddr
= px_dma(&ppgtt
->pml4
);
1006 I915_WRITE(vgtif_reg(pdp
[0].lo
), lower_32_bits(daddr
));
1007 I915_WRITE(vgtif_reg(pdp
[0].hi
), upper_32_bits(daddr
));
1009 msg
= (create
? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE
:
1010 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY
);
1012 for (i
= 0; i
< GEN8_LEGACY_PDPES
; i
++) {
1013 u64 daddr
= i915_page_dir_dma_addr(ppgtt
, i
);
1015 I915_WRITE(vgtif_reg(pdp
[i
].lo
), lower_32_bits(daddr
));
1016 I915_WRITE(vgtif_reg(pdp
[i
].hi
), upper_32_bits(daddr
));
1019 msg
= (create
? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE
:
1020 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY
);
1023 I915_WRITE(vgtif_reg(g2v_notify
), msg
);
1028 static void gen8_free_scratch(struct i915_address_space
*vm
)
1030 struct drm_i915_private
*dev_priv
= vm
->i915
;
1032 if (USES_FULL_48BIT_PPGTT(dev_priv
))
1033 free_pdp(dev_priv
, vm
->scratch_pdp
);
1034 free_pd(dev_priv
, vm
->scratch_pd
);
1035 free_pt(dev_priv
, vm
->scratch_pt
);
1036 cleanup_scratch_page(dev_priv
, &vm
->scratch_page
);
1039 static void gen8_ppgtt_cleanup_3lvl(struct drm_i915_private
*dev_priv
,
1040 struct i915_page_directory_pointer
*pdp
)
1044 for_each_set_bit(i
, pdp
->used_pdpes
, I915_PDPES_PER_PDP(dev_priv
)) {
1045 if (WARN_ON(!pdp
->page_directory
[i
]))
1048 gen8_free_page_tables(dev_priv
, pdp
->page_directory
[i
]);
1049 free_pd(dev_priv
, pdp
->page_directory
[i
]);
1052 free_pdp(dev_priv
, pdp
);
1055 static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt
*ppgtt
)
1057 struct drm_i915_private
*dev_priv
= ppgtt
->base
.i915
;
1060 for_each_set_bit(i
, ppgtt
->pml4
.used_pml4es
, GEN8_PML4ES_PER_PML4
) {
1061 if (WARN_ON(!ppgtt
->pml4
.pdps
[i
]))
1064 gen8_ppgtt_cleanup_3lvl(dev_priv
, ppgtt
->pml4
.pdps
[i
]);
1067 cleanup_px(dev_priv
, &ppgtt
->pml4
);
1070 static void gen8_ppgtt_cleanup(struct i915_address_space
*vm
)
1072 struct drm_i915_private
*dev_priv
= vm
->i915
;
1073 struct i915_hw_ppgtt
*ppgtt
= i915_vm_to_ppgtt(vm
);
1075 if (intel_vgpu_active(dev_priv
))
1076 gen8_ppgtt_notify_vgt(ppgtt
, false);
1078 if (!USES_FULL_48BIT_PPGTT(dev_priv
))
1079 gen8_ppgtt_cleanup_3lvl(dev_priv
, &ppgtt
->pdp
);
1081 gen8_ppgtt_cleanup_4lvl(ppgtt
);
1083 gen8_free_scratch(vm
);
1087 * gen8_ppgtt_alloc_pagetabs() - Allocate page tables for VA range.
1088 * @vm: Master vm structure.
1089 * @pd: Page directory for this address range.
1090 * @start: Starting virtual address to begin allocations.
1091 * @length: Size of the allocations.
1092 * @new_pts: Bitmap set by function with new allocations. Likely used by the
1093 * caller to free on error.
1095 * Allocate the required number of page tables. Extremely similar to
1096 * gen8_ppgtt_alloc_page_directories(). The main difference is here we are limited by
1097 * the page directory boundary (instead of the page directory pointer). That
1098 * boundary is 1GB virtual. Therefore, unlike gen8_ppgtt_alloc_page_directories(), it is
1099 * possible, and likely that the caller will need to use multiple calls of this
1100 * function to achieve the appropriate allocation.
1102 * Return: 0 if success; negative error code otherwise.
1104 static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space
*vm
,
1105 struct i915_page_directory
*pd
,
1108 unsigned long *new_pts
)
1110 struct drm_i915_private
*dev_priv
= vm
->i915
;
1111 struct i915_page_table
*pt
;
1114 gen8_for_each_pde(pt
, pd
, start
, length
, pde
) {
1115 /* Don't reallocate page tables */
1116 if (test_bit(pde
, pd
->used_pdes
)) {
1117 /* Scratch is never allocated this way */
1118 WARN_ON(pt
== vm
->scratch_pt
);
1122 pt
= alloc_pt(dev_priv
);
1126 gen8_initialize_pt(vm
, pt
);
1127 pd
->page_table
[pde
] = pt
;
1128 __set_bit(pde
, new_pts
);
1129 trace_i915_page_table_entry_alloc(vm
, pde
, start
, GEN8_PDE_SHIFT
);
1135 for_each_set_bit(pde
, new_pts
, I915_PDES
)
1136 free_pt(dev_priv
, pd
->page_table
[pde
]);
1142 * gen8_ppgtt_alloc_page_directories() - Allocate page directories for VA range.
1143 * @vm: Master vm structure.
1144 * @pdp: Page directory pointer for this address range.
1145 * @start: Starting virtual address to begin allocations.
1146 * @length: Size of the allocations.
1147 * @new_pds: Bitmap set by function with new allocations. Likely used by the
1148 * caller to free on error.
1150 * Allocate the required number of page directories starting at the pde index of
1151 * @start, and ending at the pde index @start + @length. This function will skip
1152 * over already allocated page directories within the range, and only allocate
1153 * new ones, setting the appropriate pointer within the pdp as well as the
1154 * correct position in the bitmap @new_pds.
1156 * The function will only allocate the pages within the range for a give page
1157 * directory pointer. In other words, if @start + @length straddles a virtually
1158 * addressed PDP boundary (512GB for 4k pages), there will be more allocations
1159 * required by the caller, This is not currently possible, and the BUG in the
1160 * code will prevent it.
1162 * Return: 0 if success; negative error code otherwise.
1165 gen8_ppgtt_alloc_page_directories(struct i915_address_space
*vm
,
1166 struct i915_page_directory_pointer
*pdp
,
1169 unsigned long *new_pds
)
1171 struct drm_i915_private
*dev_priv
= vm
->i915
;
1172 struct i915_page_directory
*pd
;
1174 uint32_t pdpes
= I915_PDPES_PER_PDP(dev_priv
);
1176 WARN_ON(!bitmap_empty(new_pds
, pdpes
));
1178 gen8_for_each_pdpe(pd
, pdp
, start
, length
, pdpe
) {
1179 if (test_bit(pdpe
, pdp
->used_pdpes
))
1182 pd
= alloc_pd(dev_priv
);
1186 gen8_initialize_pd(vm
, pd
);
1187 pdp
->page_directory
[pdpe
] = pd
;
1188 __set_bit(pdpe
, new_pds
);
1189 trace_i915_page_directory_entry_alloc(vm
, pdpe
, start
, GEN8_PDPE_SHIFT
);
1195 for_each_set_bit(pdpe
, new_pds
, pdpes
)
1196 free_pd(dev_priv
, pdp
->page_directory
[pdpe
]);
1202 * gen8_ppgtt_alloc_page_dirpointers() - Allocate pdps for VA range.
1203 * @vm: Master vm structure.
1204 * @pml4: Page map level 4 for this address range.
1205 * @start: Starting virtual address to begin allocations.
1206 * @length: Size of the allocations.
1207 * @new_pdps: Bitmap set by function with new allocations. Likely used by the
1208 * caller to free on error.
1210 * Allocate the required number of page directory pointers. Extremely similar to
1211 * gen8_ppgtt_alloc_page_directories() and gen8_ppgtt_alloc_pagetabs().
1212 * The main difference is here we are limited by the pml4 boundary (instead of
1213 * the page directory pointer).
1215 * Return: 0 if success; negative error code otherwise.
1218 gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space
*vm
,
1219 struct i915_pml4
*pml4
,
1222 unsigned long *new_pdps
)
1224 struct drm_i915_private
*dev_priv
= vm
->i915
;
1225 struct i915_page_directory_pointer
*pdp
;
1228 WARN_ON(!bitmap_empty(new_pdps
, GEN8_PML4ES_PER_PML4
));
1230 gen8_for_each_pml4e(pdp
, pml4
, start
, length
, pml4e
) {
1231 if (!test_bit(pml4e
, pml4
->used_pml4es
)) {
1232 pdp
= alloc_pdp(dev_priv
);
1236 gen8_initialize_pdp(vm
, pdp
);
1237 pml4
->pdps
[pml4e
] = pdp
;
1238 __set_bit(pml4e
, new_pdps
);
1239 trace_i915_page_directory_pointer_entry_alloc(vm
,
1249 for_each_set_bit(pml4e
, new_pdps
, GEN8_PML4ES_PER_PML4
)
1250 free_pdp(dev_priv
, pml4
->pdps
[pml4e
]);
1256 free_gen8_temp_bitmaps(unsigned long *new_pds
, unsigned long *new_pts
)
1262 /* Fills in the page directory bitmap, and the array of page tables bitmap. Both
1263 * of these are based on the number of PDPEs in the system.
1266 int __must_check
alloc_gen8_temp_bitmaps(unsigned long **new_pds
,
1267 unsigned long **new_pts
,
1273 pds
= kcalloc(BITS_TO_LONGS(pdpes
), sizeof(unsigned long), GFP_TEMPORARY
);
1277 pts
= kcalloc(pdpes
, BITS_TO_LONGS(I915_PDES
) * sizeof(unsigned long),
1288 free_gen8_temp_bitmaps(pds
, pts
);
1292 static int gen8_alloc_va_range_3lvl(struct i915_address_space
*vm
,
1293 struct i915_page_directory_pointer
*pdp
,
1297 struct i915_hw_ppgtt
*ppgtt
= i915_vm_to_ppgtt(vm
);
1298 unsigned long *new_page_dirs
, *new_page_tables
;
1299 struct drm_i915_private
*dev_priv
= vm
->i915
;
1300 struct i915_page_directory
*pd
;
1301 const uint64_t orig_start
= start
;
1302 const uint64_t orig_length
= length
;
1304 uint32_t pdpes
= I915_PDPES_PER_PDP(dev_priv
);
1307 /* Wrap is never okay since we can only represent 48b, and we don't
1308 * actually use the other side of the canonical address space.
1310 if (WARN_ON(start
+ length
< start
))
1313 if (WARN_ON(start
+ length
> vm
->total
))
1316 ret
= alloc_gen8_temp_bitmaps(&new_page_dirs
, &new_page_tables
, pdpes
);
1320 /* Do the allocations first so we can easily bail out */
1321 ret
= gen8_ppgtt_alloc_page_directories(vm
, pdp
, start
, length
,
1324 free_gen8_temp_bitmaps(new_page_dirs
, new_page_tables
);
1328 /* For every page directory referenced, allocate page tables */
1329 gen8_for_each_pdpe(pd
, pdp
, start
, length
, pdpe
) {
1330 ret
= gen8_ppgtt_alloc_pagetabs(vm
, pd
, start
, length
,
1331 new_page_tables
+ pdpe
* BITS_TO_LONGS(I915_PDES
));
1337 length
= orig_length
;
1339 /* Allocations have completed successfully, so set the bitmaps, and do
1341 gen8_for_each_pdpe(pd
, pdp
, start
, length
, pdpe
) {
1342 gen8_pde_t
*const page_directory
= kmap_px(pd
);
1343 struct i915_page_table
*pt
;
1344 uint64_t pd_len
= length
;
1345 uint64_t pd_start
= start
;
1348 /* Every pd should be allocated, we just did that above. */
1351 gen8_for_each_pde(pt
, pd
, pd_start
, pd_len
, pde
) {
1352 /* Same reasoning as pd */
1355 WARN_ON(!gen8_pte_count(pd_start
, pd_len
));
1357 /* Set our used ptes within the page table */
1358 bitmap_set(pt
->used_ptes
,
1359 gen8_pte_index(pd_start
),
1360 gen8_pte_count(pd_start
, pd_len
));
1362 /* Our pde is now pointing to the pagetable, pt */
1363 __set_bit(pde
, pd
->used_pdes
);
1365 /* Map the PDE to the page table */
1366 page_directory
[pde
] = gen8_pde_encode(px_dma(pt
),
1368 trace_i915_page_table_entry_map(&ppgtt
->base
, pde
, pt
,
1369 gen8_pte_index(start
),
1370 gen8_pte_count(start
, length
),
1373 /* NB: We haven't yet mapped ptes to pages. At this
1374 * point we're still relying on insert_entries() */
1377 kunmap_px(ppgtt
, page_directory
);
1378 __set_bit(pdpe
, pdp
->used_pdpes
);
1379 gen8_setup_page_directory(ppgtt
, pdp
, pd
, pdpe
);
1382 free_gen8_temp_bitmaps(new_page_dirs
, new_page_tables
);
1383 mark_tlbs_dirty(ppgtt
);
1390 for_each_set_bit(temp
, new_page_tables
+ pdpe
*
1391 BITS_TO_LONGS(I915_PDES
), I915_PDES
)
1393 pdp
->page_directory
[pdpe
]->page_table
[temp
]);
1396 for_each_set_bit(pdpe
, new_page_dirs
, pdpes
)
1397 free_pd(dev_priv
, pdp
->page_directory
[pdpe
]);
1399 free_gen8_temp_bitmaps(new_page_dirs
, new_page_tables
);
1400 mark_tlbs_dirty(ppgtt
);
1404 static int gen8_alloc_va_range_4lvl(struct i915_address_space
*vm
,
1405 struct i915_pml4
*pml4
,
1409 DECLARE_BITMAP(new_pdps
, GEN8_PML4ES_PER_PML4
);
1410 struct i915_hw_ppgtt
*ppgtt
= i915_vm_to_ppgtt(vm
);
1411 struct i915_page_directory_pointer
*pdp
;
1415 /* Do the pml4 allocations first, so we don't need to track the newly
1416 * allocated tables below the pdp */
1417 bitmap_zero(new_pdps
, GEN8_PML4ES_PER_PML4
);
1419 /* The pagedirectory and pagetable allocations are done in the shared 3
1420 * and 4 level code. Just allocate the pdps.
1422 ret
= gen8_ppgtt_alloc_page_dirpointers(vm
, pml4
, start
, length
,
1427 WARN(bitmap_weight(new_pdps
, GEN8_PML4ES_PER_PML4
) > 2,
1428 "The allocation has spanned more than 512GB. "
1429 "It is highly likely this is incorrect.");
1431 gen8_for_each_pml4e(pdp
, pml4
, start
, length
, pml4e
) {
1434 ret
= gen8_alloc_va_range_3lvl(vm
, pdp
, start
, length
);
1438 gen8_setup_page_directory_pointer(ppgtt
, pml4
, pdp
, pml4e
);
1441 bitmap_or(pml4
->used_pml4es
, new_pdps
, pml4
->used_pml4es
,
1442 GEN8_PML4ES_PER_PML4
);
1447 for_each_set_bit(pml4e
, new_pdps
, GEN8_PML4ES_PER_PML4
)
1448 gen8_ppgtt_cleanup_3lvl(vm
->i915
, pml4
->pdps
[pml4e
]);
1453 static int gen8_alloc_va_range(struct i915_address_space
*vm
,
1454 uint64_t start
, uint64_t length
)
1456 struct i915_hw_ppgtt
*ppgtt
= i915_vm_to_ppgtt(vm
);
1458 if (USES_FULL_48BIT_PPGTT(vm
->i915
))
1459 return gen8_alloc_va_range_4lvl(vm
, &ppgtt
->pml4
, start
, length
);
1461 return gen8_alloc_va_range_3lvl(vm
, &ppgtt
->pdp
, start
, length
);
1464 static void gen8_dump_pdp(struct i915_page_directory_pointer
*pdp
,
1465 uint64_t start
, uint64_t length
,
1466 gen8_pte_t scratch_pte
,
1469 struct i915_page_directory
*pd
;
1472 gen8_for_each_pdpe(pd
, pdp
, start
, length
, pdpe
) {
1473 struct i915_page_table
*pt
;
1474 uint64_t pd_len
= length
;
1475 uint64_t pd_start
= start
;
1478 if (!test_bit(pdpe
, pdp
->used_pdpes
))
1481 seq_printf(m
, "\tPDPE #%d\n", pdpe
);
1482 gen8_for_each_pde(pt
, pd
, pd_start
, pd_len
, pde
) {
1484 gen8_pte_t
*pt_vaddr
;
1486 if (!test_bit(pde
, pd
->used_pdes
))
1489 pt_vaddr
= kmap_px(pt
);
1490 for (pte
= 0; pte
< GEN8_PTES
; pte
+= 4) {
1492 (pdpe
<< GEN8_PDPE_SHIFT
) |
1493 (pde
<< GEN8_PDE_SHIFT
) |
1494 (pte
<< GEN8_PTE_SHIFT
);
1498 for (i
= 0; i
< 4; i
++)
1499 if (pt_vaddr
[pte
+ i
] != scratch_pte
)
1504 seq_printf(m
, "\t\t0x%llx [%03d,%03d,%04d]: =", va
, pdpe
, pde
, pte
);
1505 for (i
= 0; i
< 4; i
++) {
1506 if (pt_vaddr
[pte
+ i
] != scratch_pte
)
1507 seq_printf(m
, " %llx", pt_vaddr
[pte
+ i
]);
1509 seq_puts(m
, " SCRATCH ");
1513 /* don't use kunmap_px, it could trigger
1514 * an unnecessary flush.
1516 kunmap_atomic(pt_vaddr
);
1521 static void gen8_dump_ppgtt(struct i915_hw_ppgtt
*ppgtt
, struct seq_file
*m
)
1523 struct i915_address_space
*vm
= &ppgtt
->base
;
1524 uint64_t start
= ppgtt
->base
.start
;
1525 uint64_t length
= ppgtt
->base
.total
;
1526 gen8_pte_t scratch_pte
= gen8_pte_encode(vm
->scratch_page
.daddr
,
1529 if (!USES_FULL_48BIT_PPGTT(vm
->i915
)) {
1530 gen8_dump_pdp(&ppgtt
->pdp
, start
, length
, scratch_pte
, m
);
1533 struct i915_pml4
*pml4
= &ppgtt
->pml4
;
1534 struct i915_page_directory_pointer
*pdp
;
1536 gen8_for_each_pml4e(pdp
, pml4
, start
, length
, pml4e
) {
1537 if (!test_bit(pml4e
, pml4
->used_pml4es
))
1540 seq_printf(m
, " PML4E #%llu\n", pml4e
);
1541 gen8_dump_pdp(pdp
, start
, length
, scratch_pte
, m
);
1546 static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt
*ppgtt
)
1548 unsigned long *new_page_dirs
, *new_page_tables
;
1549 uint32_t pdpes
= I915_PDPES_PER_PDP(to_i915(ppgtt
->base
.dev
));
1552 /* We allocate temp bitmap for page tables for no gain
1553 * but as this is for init only, lets keep the things simple
1555 ret
= alloc_gen8_temp_bitmaps(&new_page_dirs
, &new_page_tables
, pdpes
);
1559 /* Allocate for all pdps regardless of how the ppgtt
1562 ret
= gen8_ppgtt_alloc_page_directories(&ppgtt
->base
, &ppgtt
->pdp
,
1566 *ppgtt
->pdp
.used_pdpes
= *new_page_dirs
;
1568 free_gen8_temp_bitmaps(new_page_dirs
, new_page_tables
);
1574 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
1575 * with a net effect resembling a 2-level page table in normal x86 terms. Each
1576 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
1580 static int gen8_ppgtt_init(struct i915_hw_ppgtt
*ppgtt
)
1582 struct drm_i915_private
*dev_priv
= ppgtt
->base
.i915
;
1585 ret
= gen8_init_scratch(&ppgtt
->base
);
1589 ppgtt
->base
.start
= 0;
1590 ppgtt
->base
.cleanup
= gen8_ppgtt_cleanup
;
1591 ppgtt
->base
.allocate_va_range
= gen8_alloc_va_range
;
1592 ppgtt
->base
.insert_entries
= gen8_ppgtt_insert_entries
;
1593 ppgtt
->base
.clear_range
= gen8_ppgtt_clear_range
;
1594 ppgtt
->base
.unbind_vma
= ppgtt_unbind_vma
;
1595 ppgtt
->base
.bind_vma
= ppgtt_bind_vma
;
1596 ppgtt
->debug_dump
= gen8_dump_ppgtt
;
1598 if (USES_FULL_48BIT_PPGTT(dev_priv
)) {
1599 ret
= setup_px(dev_priv
, &ppgtt
->pml4
);
1603 gen8_initialize_pml4(&ppgtt
->base
, &ppgtt
->pml4
);
1605 ppgtt
->base
.total
= 1ULL << 48;
1606 ppgtt
->switch_mm
= gen8_48b_mm_switch
;
1608 ret
= __pdp_init(dev_priv
, &ppgtt
->pdp
);
1612 ppgtt
->base
.total
= 1ULL << 32;
1613 ppgtt
->switch_mm
= gen8_legacy_mm_switch
;
1614 trace_i915_page_directory_pointer_entry_alloc(&ppgtt
->base
,
1618 if (intel_vgpu_active(dev_priv
)) {
1619 ret
= gen8_preallocate_top_level_pdps(ppgtt
);
1625 if (intel_vgpu_active(dev_priv
))
1626 gen8_ppgtt_notify_vgt(ppgtt
, true);
1631 gen8_free_scratch(&ppgtt
->base
);
1635 static void gen6_dump_ppgtt(struct i915_hw_ppgtt
*ppgtt
, struct seq_file
*m
)
1637 struct i915_address_space
*vm
= &ppgtt
->base
;
1638 struct i915_page_table
*unused
;
1639 gen6_pte_t scratch_pte
;
1642 uint32_t start
= ppgtt
->base
.start
, length
= ppgtt
->base
.total
;
1644 scratch_pte
= vm
->pte_encode(vm
->scratch_page
.daddr
,
1647 gen6_for_each_pde(unused
, &ppgtt
->pd
, start
, length
, pde
) {
1649 gen6_pte_t
*pt_vaddr
;
1650 const dma_addr_t pt_addr
= px_dma(ppgtt
->pd
.page_table
[pde
]);
1651 pd_entry
= readl(ppgtt
->pd_addr
+ pde
);
1652 expected
= (GEN6_PDE_ADDR_ENCODE(pt_addr
) | GEN6_PDE_VALID
);
1654 if (pd_entry
!= expected
)
1655 seq_printf(m
, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
1659 seq_printf(m
, "\tPDE: %x\n", pd_entry
);
1661 pt_vaddr
= kmap_px(ppgtt
->pd
.page_table
[pde
]);
1663 for (pte
= 0; pte
< GEN6_PTES
; pte
+=4) {
1665 (pde
* PAGE_SIZE
* GEN6_PTES
) +
1669 for (i
= 0; i
< 4; i
++)
1670 if (pt_vaddr
[pte
+ i
] != scratch_pte
)
1675 seq_printf(m
, "\t\t0x%lx [%03d,%04d]: =", va
, pde
, pte
);
1676 for (i
= 0; i
< 4; i
++) {
1677 if (pt_vaddr
[pte
+ i
] != scratch_pte
)
1678 seq_printf(m
, " %08x", pt_vaddr
[pte
+ i
]);
1680 seq_puts(m
, " SCRATCH ");
1684 kunmap_px(ppgtt
, pt_vaddr
);
1688 /* Write pde (index) from the page directory @pd to the page table @pt */
1689 static void gen6_write_pde(struct i915_page_directory
*pd
,
1690 const int pde
, struct i915_page_table
*pt
)
1692 /* Caller needs to make sure the write completes if necessary */
1693 struct i915_hw_ppgtt
*ppgtt
=
1694 container_of(pd
, struct i915_hw_ppgtt
, pd
);
1697 pd_entry
= GEN6_PDE_ADDR_ENCODE(px_dma(pt
));
1698 pd_entry
|= GEN6_PDE_VALID
;
1700 writel(pd_entry
, ppgtt
->pd_addr
+ pde
);
1703 /* Write all the page tables found in the ppgtt structure to incrementing page
1705 static void gen6_write_page_range(struct drm_i915_private
*dev_priv
,
1706 struct i915_page_directory
*pd
,
1707 uint32_t start
, uint32_t length
)
1709 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
1710 struct i915_page_table
*pt
;
1713 gen6_for_each_pde(pt
, pd
, start
, length
, pde
)
1714 gen6_write_pde(pd
, pde
, pt
);
1716 /* Make sure write is complete before other code can use this page
1717 * table. Also require for WC mapped PTEs */
1721 static uint32_t get_pd_offset(struct i915_hw_ppgtt
*ppgtt
)
1723 BUG_ON(ppgtt
->pd
.base
.ggtt_offset
& 0x3f);
1725 return (ppgtt
->pd
.base
.ggtt_offset
/ 64) << 16;
1728 static int hsw_mm_switch(struct i915_hw_ppgtt
*ppgtt
,
1729 struct drm_i915_gem_request
*req
)
1731 struct intel_ring
*ring
= req
->ring
;
1732 struct intel_engine_cs
*engine
= req
->engine
;
1735 /* NB: TLBs must be flushed and invalidated before a switch */
1736 ret
= engine
->emit_flush(req
, EMIT_INVALIDATE
| EMIT_FLUSH
);
1740 ret
= intel_ring_begin(req
, 6);
1744 intel_ring_emit(ring
, MI_LOAD_REGISTER_IMM(2));
1745 intel_ring_emit_reg(ring
, RING_PP_DIR_DCLV(engine
));
1746 intel_ring_emit(ring
, PP_DIR_DCLV_2G
);
1747 intel_ring_emit_reg(ring
, RING_PP_DIR_BASE(engine
));
1748 intel_ring_emit(ring
, get_pd_offset(ppgtt
));
1749 intel_ring_emit(ring
, MI_NOOP
);
1750 intel_ring_advance(ring
);
1755 static int gen7_mm_switch(struct i915_hw_ppgtt
*ppgtt
,
1756 struct drm_i915_gem_request
*req
)
1758 struct intel_ring
*ring
= req
->ring
;
1759 struct intel_engine_cs
*engine
= req
->engine
;
1762 /* NB: TLBs must be flushed and invalidated before a switch */
1763 ret
= engine
->emit_flush(req
, EMIT_INVALIDATE
| EMIT_FLUSH
);
1767 ret
= intel_ring_begin(req
, 6);
1771 intel_ring_emit(ring
, MI_LOAD_REGISTER_IMM(2));
1772 intel_ring_emit_reg(ring
, RING_PP_DIR_DCLV(engine
));
1773 intel_ring_emit(ring
, PP_DIR_DCLV_2G
);
1774 intel_ring_emit_reg(ring
, RING_PP_DIR_BASE(engine
));
1775 intel_ring_emit(ring
, get_pd_offset(ppgtt
));
1776 intel_ring_emit(ring
, MI_NOOP
);
1777 intel_ring_advance(ring
);
1779 /* XXX: RCS is the only one to auto invalidate the TLBs? */
1780 if (engine
->id
!= RCS
) {
1781 ret
= engine
->emit_flush(req
, EMIT_INVALIDATE
| EMIT_FLUSH
);
1789 static int gen6_mm_switch(struct i915_hw_ppgtt
*ppgtt
,
1790 struct drm_i915_gem_request
*req
)
1792 struct intel_engine_cs
*engine
= req
->engine
;
1793 struct drm_i915_private
*dev_priv
= req
->i915
;
1795 I915_WRITE(RING_PP_DIR_DCLV(engine
), PP_DIR_DCLV_2G
);
1796 I915_WRITE(RING_PP_DIR_BASE(engine
), get_pd_offset(ppgtt
));
1800 static void gen8_ppgtt_enable(struct drm_i915_private
*dev_priv
)
1802 struct intel_engine_cs
*engine
;
1803 enum intel_engine_id id
;
1805 for_each_engine(engine
, dev_priv
, id
) {
1806 u32 four_level
= USES_FULL_48BIT_PPGTT(dev_priv
) ?
1807 GEN8_GFX_PPGTT_48B
: 0;
1808 I915_WRITE(RING_MODE_GEN7(engine
),
1809 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE
| four_level
));
1813 static void gen7_ppgtt_enable(struct drm_i915_private
*dev_priv
)
1815 struct intel_engine_cs
*engine
;
1816 uint32_t ecochk
, ecobits
;
1817 enum intel_engine_id id
;
1819 ecobits
= I915_READ(GAC_ECO_BITS
);
1820 I915_WRITE(GAC_ECO_BITS
, ecobits
| ECOBITS_PPGTT_CACHE64B
);
1822 ecochk
= I915_READ(GAM_ECOCHK
);
1823 if (IS_HASWELL(dev_priv
)) {
1824 ecochk
|= ECOCHK_PPGTT_WB_HSW
;
1826 ecochk
|= ECOCHK_PPGTT_LLC_IVB
;
1827 ecochk
&= ~ECOCHK_PPGTT_GFDT_IVB
;
1829 I915_WRITE(GAM_ECOCHK
, ecochk
);
1831 for_each_engine(engine
, dev_priv
, id
) {
1832 /* GFX_MODE is per-ring on gen7+ */
1833 I915_WRITE(RING_MODE_GEN7(engine
),
1834 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE
));
1838 static void gen6_ppgtt_enable(struct drm_i915_private
*dev_priv
)
1840 uint32_t ecochk
, gab_ctl
, ecobits
;
1842 ecobits
= I915_READ(GAC_ECO_BITS
);
1843 I915_WRITE(GAC_ECO_BITS
, ecobits
| ECOBITS_SNB_BIT
|
1844 ECOBITS_PPGTT_CACHE64B
);
1846 gab_ctl
= I915_READ(GAB_CTL
);
1847 I915_WRITE(GAB_CTL
, gab_ctl
| GAB_CTL_CONT_AFTER_PAGEFAULT
);
1849 ecochk
= I915_READ(GAM_ECOCHK
);
1850 I915_WRITE(GAM_ECOCHK
, ecochk
| ECOCHK_SNB_BIT
| ECOCHK_PPGTT_CACHE64B
);
1852 I915_WRITE(GFX_MODE
, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE
));
1855 /* PPGTT support for Sandybdrige/Gen6 and later */
1856 static void gen6_ppgtt_clear_range(struct i915_address_space
*vm
,
1860 struct i915_hw_ppgtt
*ppgtt
= i915_vm_to_ppgtt(vm
);
1861 gen6_pte_t
*pt_vaddr
, scratch_pte
;
1862 unsigned first_entry
= start
>> PAGE_SHIFT
;
1863 unsigned num_entries
= length
>> PAGE_SHIFT
;
1864 unsigned act_pt
= first_entry
/ GEN6_PTES
;
1865 unsigned first_pte
= first_entry
% GEN6_PTES
;
1866 unsigned last_pte
, i
;
1868 scratch_pte
= vm
->pte_encode(vm
->scratch_page
.daddr
,
1871 while (num_entries
) {
1872 last_pte
= first_pte
+ num_entries
;
1873 if (last_pte
> GEN6_PTES
)
1874 last_pte
= GEN6_PTES
;
1876 pt_vaddr
= kmap_px(ppgtt
->pd
.page_table
[act_pt
]);
1878 for (i
= first_pte
; i
< last_pte
; i
++)
1879 pt_vaddr
[i
] = scratch_pte
;
1881 kunmap_px(ppgtt
, pt_vaddr
);
1883 num_entries
-= last_pte
- first_pte
;
1889 static void gen6_ppgtt_insert_entries(struct i915_address_space
*vm
,
1890 struct sg_table
*pages
,
1892 enum i915_cache_level cache_level
, u32 flags
)
1894 struct i915_hw_ppgtt
*ppgtt
= i915_vm_to_ppgtt(vm
);
1895 unsigned first_entry
= start
>> PAGE_SHIFT
;
1896 unsigned act_pt
= first_entry
/ GEN6_PTES
;
1897 unsigned act_pte
= first_entry
% GEN6_PTES
;
1898 gen6_pte_t
*pt_vaddr
= NULL
;
1899 struct sgt_iter sgt_iter
;
1902 for_each_sgt_dma(addr
, sgt_iter
, pages
) {
1903 if (pt_vaddr
== NULL
)
1904 pt_vaddr
= kmap_px(ppgtt
->pd
.page_table
[act_pt
]);
1907 vm
->pte_encode(addr
, cache_level
, flags
);
1909 if (++act_pte
== GEN6_PTES
) {
1910 kunmap_px(ppgtt
, pt_vaddr
);
1918 kunmap_px(ppgtt
, pt_vaddr
);
1921 static int gen6_alloc_va_range(struct i915_address_space
*vm
,
1922 uint64_t start_in
, uint64_t length_in
)
1924 DECLARE_BITMAP(new_page_tables
, I915_PDES
);
1925 struct drm_i915_private
*dev_priv
= vm
->i915
;
1926 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
1927 struct i915_hw_ppgtt
*ppgtt
= i915_vm_to_ppgtt(vm
);
1928 struct i915_page_table
*pt
;
1929 uint32_t start
, length
, start_save
, length_save
;
1933 if (WARN_ON(start_in
+ length_in
> ppgtt
->base
.total
))
1936 start
= start_save
= start_in
;
1937 length
= length_save
= length_in
;
1939 bitmap_zero(new_page_tables
, I915_PDES
);
1941 /* The allocation is done in two stages so that we can bail out with
1942 * minimal amount of pain. The first stage finds new page tables that
1943 * need allocation. The second stage marks use ptes within the page
1946 gen6_for_each_pde(pt
, &ppgtt
->pd
, start
, length
, pde
) {
1947 if (pt
!= vm
->scratch_pt
) {
1948 WARN_ON(bitmap_empty(pt
->used_ptes
, GEN6_PTES
));
1952 /* We've already allocated a page table */
1953 WARN_ON(!bitmap_empty(pt
->used_ptes
, GEN6_PTES
));
1955 pt
= alloc_pt(dev_priv
);
1961 gen6_initialize_pt(vm
, pt
);
1963 ppgtt
->pd
.page_table
[pde
] = pt
;
1964 __set_bit(pde
, new_page_tables
);
1965 trace_i915_page_table_entry_alloc(vm
, pde
, start
, GEN6_PDE_SHIFT
);
1969 length
= length_save
;
1971 gen6_for_each_pde(pt
, &ppgtt
->pd
, start
, length
, pde
) {
1972 DECLARE_BITMAP(tmp_bitmap
, GEN6_PTES
);
1974 bitmap_zero(tmp_bitmap
, GEN6_PTES
);
1975 bitmap_set(tmp_bitmap
, gen6_pte_index(start
),
1976 gen6_pte_count(start
, length
));
1978 if (__test_and_clear_bit(pde
, new_page_tables
))
1979 gen6_write_pde(&ppgtt
->pd
, pde
, pt
);
1981 trace_i915_page_table_entry_map(vm
, pde
, pt
,
1982 gen6_pte_index(start
),
1983 gen6_pte_count(start
, length
),
1985 bitmap_or(pt
->used_ptes
, tmp_bitmap
, pt
->used_ptes
,
1989 WARN_ON(!bitmap_empty(new_page_tables
, I915_PDES
));
1991 /* Make sure write is complete before other code can use this page
1992 * table. Also require for WC mapped PTEs */
1995 mark_tlbs_dirty(ppgtt
);
1999 for_each_set_bit(pde
, new_page_tables
, I915_PDES
) {
2000 struct i915_page_table
*pt
= ppgtt
->pd
.page_table
[pde
];
2002 ppgtt
->pd
.page_table
[pde
] = vm
->scratch_pt
;
2003 free_pt(dev_priv
, pt
);
2006 mark_tlbs_dirty(ppgtt
);
2010 static int gen6_init_scratch(struct i915_address_space
*vm
)
2012 struct drm_i915_private
*dev_priv
= vm
->i915
;
2015 ret
= setup_scratch_page(dev_priv
, &vm
->scratch_page
, I915_GFP_DMA
);
2019 vm
->scratch_pt
= alloc_pt(dev_priv
);
2020 if (IS_ERR(vm
->scratch_pt
)) {
2021 cleanup_scratch_page(dev_priv
, &vm
->scratch_page
);
2022 return PTR_ERR(vm
->scratch_pt
);
2025 gen6_initialize_pt(vm
, vm
->scratch_pt
);
2030 static void gen6_free_scratch(struct i915_address_space
*vm
)
2032 struct drm_i915_private
*dev_priv
= vm
->i915
;
2034 free_pt(dev_priv
, vm
->scratch_pt
);
2035 cleanup_scratch_page(dev_priv
, &vm
->scratch_page
);
2038 static void gen6_ppgtt_cleanup(struct i915_address_space
*vm
)
2040 struct i915_hw_ppgtt
*ppgtt
= i915_vm_to_ppgtt(vm
);
2041 struct i915_page_directory
*pd
= &ppgtt
->pd
;
2042 struct drm_i915_private
*dev_priv
= vm
->i915
;
2043 struct i915_page_table
*pt
;
2046 drm_mm_remove_node(&ppgtt
->node
);
2048 gen6_for_all_pdes(pt
, pd
, pde
)
2049 if (pt
!= vm
->scratch_pt
)
2050 free_pt(dev_priv
, pt
);
2052 gen6_free_scratch(vm
);
2055 static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt
*ppgtt
)
2057 struct i915_address_space
*vm
= &ppgtt
->base
;
2058 struct drm_i915_private
*dev_priv
= ppgtt
->base
.i915
;
2059 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
2060 bool retried
= false;
2063 /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
2064 * allocator works in address space sizes, so it's multiplied by page
2065 * size. We allocate at the top of the GTT to avoid fragmentation.
2067 BUG_ON(!drm_mm_initialized(&ggtt
->base
.mm
));
2069 ret
= gen6_init_scratch(vm
);
2074 ret
= drm_mm_insert_node_in_range_generic(&ggtt
->base
.mm
,
2075 &ppgtt
->node
, GEN6_PD_SIZE
,
2077 0, ggtt
->base
.total
,
2079 if (ret
== -ENOSPC
&& !retried
) {
2080 ret
= i915_gem_evict_something(&ggtt
->base
,
2081 GEN6_PD_SIZE
, GEN6_PD_ALIGN
,
2083 0, ggtt
->base
.total
,
2096 if (ppgtt
->node
.start
< ggtt
->mappable_end
)
2097 DRM_DEBUG("Forced to use aperture for PDEs\n");
2102 gen6_free_scratch(vm
);
2106 static int gen6_ppgtt_alloc(struct i915_hw_ppgtt
*ppgtt
)
2108 return gen6_ppgtt_allocate_page_directories(ppgtt
);
2111 static void gen6_scratch_va_range(struct i915_hw_ppgtt
*ppgtt
,
2112 uint64_t start
, uint64_t length
)
2114 struct i915_page_table
*unused
;
2117 gen6_for_each_pde(unused
, &ppgtt
->pd
, start
, length
, pde
)
2118 ppgtt
->pd
.page_table
[pde
] = ppgtt
->base
.scratch_pt
;
2121 static int gen6_ppgtt_init(struct i915_hw_ppgtt
*ppgtt
)
2123 struct drm_i915_private
*dev_priv
= ppgtt
->base
.i915
;
2124 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
2127 ppgtt
->base
.pte_encode
= ggtt
->base
.pte_encode
;
2128 if (intel_vgpu_active(dev_priv
) || IS_GEN6(dev_priv
))
2129 ppgtt
->switch_mm
= gen6_mm_switch
;
2130 else if (IS_HASWELL(dev_priv
))
2131 ppgtt
->switch_mm
= hsw_mm_switch
;
2132 else if (IS_GEN7(dev_priv
))
2133 ppgtt
->switch_mm
= gen7_mm_switch
;
2137 ret
= gen6_ppgtt_alloc(ppgtt
);
2141 ppgtt
->base
.allocate_va_range
= gen6_alloc_va_range
;
2142 ppgtt
->base
.clear_range
= gen6_ppgtt_clear_range
;
2143 ppgtt
->base
.insert_entries
= gen6_ppgtt_insert_entries
;
2144 ppgtt
->base
.unbind_vma
= ppgtt_unbind_vma
;
2145 ppgtt
->base
.bind_vma
= ppgtt_bind_vma
;
2146 ppgtt
->base
.cleanup
= gen6_ppgtt_cleanup
;
2147 ppgtt
->base
.start
= 0;
2148 ppgtt
->base
.total
= I915_PDES
* GEN6_PTES
* PAGE_SIZE
;
2149 ppgtt
->debug_dump
= gen6_dump_ppgtt
;
2151 ppgtt
->pd
.base
.ggtt_offset
=
2152 ppgtt
->node
.start
/ PAGE_SIZE
* sizeof(gen6_pte_t
);
2154 ppgtt
->pd_addr
= (gen6_pte_t __iomem
*)ggtt
->gsm
+
2155 ppgtt
->pd
.base
.ggtt_offset
/ sizeof(gen6_pte_t
);
2157 gen6_scratch_va_range(ppgtt
, 0, ppgtt
->base
.total
);
2159 gen6_write_page_range(dev_priv
, &ppgtt
->pd
, 0, ppgtt
->base
.total
);
2161 DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
2162 ppgtt
->node
.size
>> 20,
2163 ppgtt
->node
.start
/ PAGE_SIZE
);
2165 DRM_DEBUG("Adding PPGTT at offset %x\n",
2166 ppgtt
->pd
.base
.ggtt_offset
<< 10);
2171 static int __hw_ppgtt_init(struct i915_hw_ppgtt
*ppgtt
,
2172 struct drm_i915_private
*dev_priv
)
2174 ppgtt
->base
.i915
= dev_priv
;
2176 if (INTEL_INFO(dev_priv
)->gen
< 8)
2177 return gen6_ppgtt_init(ppgtt
);
2179 return gen8_ppgtt_init(ppgtt
);
2182 static void i915_address_space_init(struct i915_address_space
*vm
,
2183 struct drm_i915_private
*dev_priv
,
2186 i915_gem_timeline_init(dev_priv
, &vm
->timeline
, name
);
2187 drm_mm_init(&vm
->mm
, vm
->start
, vm
->total
);
2188 INIT_LIST_HEAD(&vm
->active_list
);
2189 INIT_LIST_HEAD(&vm
->inactive_list
);
2190 INIT_LIST_HEAD(&vm
->unbound_list
);
2191 list_add_tail(&vm
->global_link
, &dev_priv
->vm_list
);
2194 static void i915_address_space_fini(struct i915_address_space
*vm
)
2196 i915_gem_timeline_fini(&vm
->timeline
);
2197 drm_mm_takedown(&vm
->mm
);
2198 list_del(&vm
->global_link
);
2201 static void gtt_write_workarounds(struct drm_i915_private
*dev_priv
)
2203 /* This function is for gtt related workarounds. This function is
2204 * called on driver load and after a GPU reset, so you can place
2205 * workarounds here even if they get overwritten by GPU reset.
2207 /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt */
2208 if (IS_BROADWELL(dev_priv
))
2209 I915_WRITE(GEN8_L3_LRA_1_GPGPU
, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW
);
2210 else if (IS_CHERRYVIEW(dev_priv
))
2211 I915_WRITE(GEN8_L3_LRA_1_GPGPU
, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV
);
2212 else if (IS_SKYLAKE(dev_priv
))
2213 I915_WRITE(GEN8_L3_LRA_1_GPGPU
, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL
);
2214 else if (IS_BROXTON(dev_priv
))
2215 I915_WRITE(GEN8_L3_LRA_1_GPGPU
, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT
);
2218 static int i915_ppgtt_init(struct i915_hw_ppgtt
*ppgtt
,
2219 struct drm_i915_private
*dev_priv
,
2220 struct drm_i915_file_private
*file_priv
,
2225 ret
= __hw_ppgtt_init(ppgtt
, dev_priv
);
2227 kref_init(&ppgtt
->ref
);
2228 i915_address_space_init(&ppgtt
->base
, dev_priv
, name
);
2229 ppgtt
->base
.file
= file_priv
;
2235 int i915_ppgtt_init_hw(struct drm_i915_private
*dev_priv
)
2237 gtt_write_workarounds(dev_priv
);
2239 /* In the case of execlists, PPGTT is enabled by the context descriptor
2240 * and the PDPs are contained within the context itself. We don't
2241 * need to do anything here. */
2242 if (i915
.enable_execlists
)
2245 if (!USES_PPGTT(dev_priv
))
2248 if (IS_GEN6(dev_priv
))
2249 gen6_ppgtt_enable(dev_priv
);
2250 else if (IS_GEN7(dev_priv
))
2251 gen7_ppgtt_enable(dev_priv
);
2252 else if (INTEL_GEN(dev_priv
) >= 8)
2253 gen8_ppgtt_enable(dev_priv
);
2255 MISSING_CASE(INTEL_GEN(dev_priv
));
2260 struct i915_hw_ppgtt
*
2261 i915_ppgtt_create(struct drm_i915_private
*dev_priv
,
2262 struct drm_i915_file_private
*fpriv
,
2265 struct i915_hw_ppgtt
*ppgtt
;
2268 ppgtt
= kzalloc(sizeof(*ppgtt
), GFP_KERNEL
);
2270 return ERR_PTR(-ENOMEM
);
2272 ret
= i915_ppgtt_init(ppgtt
, dev_priv
, fpriv
, name
);
2275 return ERR_PTR(ret
);
2278 trace_i915_ppgtt_create(&ppgtt
->base
);
2283 void i915_ppgtt_release(struct kref
*kref
)
2285 struct i915_hw_ppgtt
*ppgtt
=
2286 container_of(kref
, struct i915_hw_ppgtt
, ref
);
2288 trace_i915_ppgtt_release(&ppgtt
->base
);
2290 /* vmas should already be unbound and destroyed */
2291 WARN_ON(!list_empty(&ppgtt
->base
.active_list
));
2292 WARN_ON(!list_empty(&ppgtt
->base
.inactive_list
));
2293 WARN_ON(!list_empty(&ppgtt
->base
.unbound_list
));
2295 i915_address_space_fini(&ppgtt
->base
);
2297 ppgtt
->base
.cleanup(&ppgtt
->base
);
2301 /* Certain Gen5 chipsets require require idling the GPU before
2302 * unmapping anything from the GTT when VT-d is enabled.
2304 static bool needs_idle_maps(struct drm_i915_private
*dev_priv
)
2306 #ifdef CONFIG_INTEL_IOMMU
2307 /* Query intel_iommu to see if we need the workaround. Presumably that
2310 if (IS_GEN5(dev_priv
) && IS_MOBILE(dev_priv
) && intel_iommu_gfx_mapped
)
2316 void i915_check_and_clear_faults(struct drm_i915_private
*dev_priv
)
2318 struct intel_engine_cs
*engine
;
2319 enum intel_engine_id id
;
2321 if (INTEL_INFO(dev_priv
)->gen
< 6)
2324 for_each_engine(engine
, dev_priv
, id
) {
2326 fault_reg
= I915_READ(RING_FAULT_REG(engine
));
2327 if (fault_reg
& RING_FAULT_VALID
) {
2328 DRM_DEBUG_DRIVER("Unexpected fault\n"
2330 "\tAddress space: %s\n"
2333 fault_reg
& PAGE_MASK
,
2334 fault_reg
& RING_FAULT_GTTSEL_MASK
? "GGTT" : "PPGTT",
2335 RING_FAULT_SRCID(fault_reg
),
2336 RING_FAULT_FAULT_TYPE(fault_reg
));
2337 I915_WRITE(RING_FAULT_REG(engine
),
2338 fault_reg
& ~RING_FAULT_VALID
);
2342 /* Engine specific init may not have been done till this point. */
2343 if (dev_priv
->engine
[RCS
])
2344 POSTING_READ(RING_FAULT_REG(dev_priv
->engine
[RCS
]));
2347 static void i915_ggtt_flush(struct drm_i915_private
*dev_priv
)
2349 if (INTEL_INFO(dev_priv
)->gen
< 6) {
2350 intel_gtt_chipset_flush();
2352 I915_WRITE(GFX_FLSH_CNTL_GEN6
, GFX_FLSH_CNTL_EN
);
2353 POSTING_READ(GFX_FLSH_CNTL_GEN6
);
2357 void i915_gem_suspend_gtt_mappings(struct drm_i915_private
*dev_priv
)
2359 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
2361 /* Don't bother messing with faults pre GEN6 as we have little
2362 * documentation supporting that it's a good idea.
2364 if (INTEL_GEN(dev_priv
) < 6)
2367 i915_check_and_clear_faults(dev_priv
);
2369 ggtt
->base
.clear_range(&ggtt
->base
, ggtt
->base
.start
, ggtt
->base
.total
);
2371 i915_ggtt_flush(dev_priv
);
2374 int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object
*obj
,
2375 struct sg_table
*pages
)
2377 if (dma_map_sg(&obj
->base
.dev
->pdev
->dev
,
2378 pages
->sgl
, pages
->nents
,
2379 PCI_DMA_BIDIRECTIONAL
))
2385 static void gen8_set_pte(void __iomem
*addr
, gen8_pte_t pte
)
2390 static void gen8_ggtt_insert_page(struct i915_address_space
*vm
,
2393 enum i915_cache_level level
,
2396 struct drm_i915_private
*dev_priv
= vm
->i915
;
2397 gen8_pte_t __iomem
*pte
=
2398 (gen8_pte_t __iomem
*)dev_priv
->ggtt
.gsm
+
2399 (offset
>> PAGE_SHIFT
);
2401 gen8_set_pte(pte
, gen8_pte_encode(addr
, level
));
2403 I915_WRITE(GFX_FLSH_CNTL_GEN6
, GFX_FLSH_CNTL_EN
);
2404 POSTING_READ(GFX_FLSH_CNTL_GEN6
);
2407 static void gen8_ggtt_insert_entries(struct i915_address_space
*vm
,
2408 struct sg_table
*st
,
2410 enum i915_cache_level level
, u32 unused
)
2412 struct drm_i915_private
*dev_priv
= vm
->i915
;
2413 struct i915_ggtt
*ggtt
= i915_vm_to_ggtt(vm
);
2414 struct sgt_iter sgt_iter
;
2415 gen8_pte_t __iomem
*gtt_entries
;
2416 gen8_pte_t gtt_entry
;
2420 gtt_entries
= (gen8_pte_t __iomem
*)ggtt
->gsm
+ (start
>> PAGE_SHIFT
);
2422 for_each_sgt_dma(addr
, sgt_iter
, st
) {
2423 gtt_entry
= gen8_pte_encode(addr
, level
);
2424 gen8_set_pte(>t_entries
[i
++], gtt_entry
);
2428 * XXX: This serves as a posting read to make sure that the PTE has
2429 * actually been updated. There is some concern that even though
2430 * registers and PTEs are within the same BAR that they are potentially
2431 * of NUMA access patterns. Therefore, even with the way we assume
2432 * hardware should work, we must keep this posting read for paranoia.
2435 WARN_ON(readq(>t_entries
[i
-1]) != gtt_entry
);
2437 /* This next bit makes the above posting read even more important. We
2438 * want to flush the TLBs only after we're certain all the PTE updates
2441 I915_WRITE(GFX_FLSH_CNTL_GEN6
, GFX_FLSH_CNTL_EN
);
2442 POSTING_READ(GFX_FLSH_CNTL_GEN6
);
2445 struct insert_entries
{
2446 struct i915_address_space
*vm
;
2447 struct sg_table
*st
;
2449 enum i915_cache_level level
;
2453 static int gen8_ggtt_insert_entries__cb(void *_arg
)
2455 struct insert_entries
*arg
= _arg
;
2456 gen8_ggtt_insert_entries(arg
->vm
, arg
->st
,
2457 arg
->start
, arg
->level
, arg
->flags
);
2461 static void gen8_ggtt_insert_entries__BKL(struct i915_address_space
*vm
,
2462 struct sg_table
*st
,
2464 enum i915_cache_level level
,
2467 struct insert_entries arg
= { vm
, st
, start
, level
, flags
};
2468 stop_machine(gen8_ggtt_insert_entries__cb
, &arg
, NULL
);
2471 static void gen6_ggtt_insert_page(struct i915_address_space
*vm
,
2474 enum i915_cache_level level
,
2477 struct drm_i915_private
*dev_priv
= vm
->i915
;
2478 gen6_pte_t __iomem
*pte
=
2479 (gen6_pte_t __iomem
*)dev_priv
->ggtt
.gsm
+
2480 (offset
>> PAGE_SHIFT
);
2482 iowrite32(vm
->pte_encode(addr
, level
, flags
), pte
);
2484 I915_WRITE(GFX_FLSH_CNTL_GEN6
, GFX_FLSH_CNTL_EN
);
2485 POSTING_READ(GFX_FLSH_CNTL_GEN6
);
2489 * Binds an object into the global gtt with the specified cache level. The object
2490 * will be accessible to the GPU via commands whose operands reference offsets
2491 * within the global GTT as well as accessible by the GPU through the GMADR
2492 * mapped BAR (dev_priv->mm.gtt->gtt).
2494 static void gen6_ggtt_insert_entries(struct i915_address_space
*vm
,
2495 struct sg_table
*st
,
2497 enum i915_cache_level level
, u32 flags
)
2499 struct drm_i915_private
*dev_priv
= vm
->i915
;
2500 struct i915_ggtt
*ggtt
= i915_vm_to_ggtt(vm
);
2501 struct sgt_iter sgt_iter
;
2502 gen6_pte_t __iomem
*gtt_entries
;
2503 gen6_pte_t gtt_entry
;
2507 gtt_entries
= (gen6_pte_t __iomem
*)ggtt
->gsm
+ (start
>> PAGE_SHIFT
);
2509 for_each_sgt_dma(addr
, sgt_iter
, st
) {
2510 gtt_entry
= vm
->pte_encode(addr
, level
, flags
);
2511 iowrite32(gtt_entry
, >t_entries
[i
++]);
2514 /* XXX: This serves as a posting read to make sure that the PTE has
2515 * actually been updated. There is some concern that even though
2516 * registers and PTEs are within the same BAR that they are potentially
2517 * of NUMA access patterns. Therefore, even with the way we assume
2518 * hardware should work, we must keep this posting read for paranoia.
2521 WARN_ON(readl(>t_entries
[i
-1]) != gtt_entry
);
2523 /* This next bit makes the above posting read even more important. We
2524 * want to flush the TLBs only after we're certain all the PTE updates
2527 I915_WRITE(GFX_FLSH_CNTL_GEN6
, GFX_FLSH_CNTL_EN
);
2528 POSTING_READ(GFX_FLSH_CNTL_GEN6
);
2531 static void nop_clear_range(struct i915_address_space
*vm
,
2532 uint64_t start
, uint64_t length
)
2536 static void gen8_ggtt_clear_range(struct i915_address_space
*vm
,
2537 uint64_t start
, uint64_t length
)
2539 struct i915_ggtt
*ggtt
= i915_vm_to_ggtt(vm
);
2540 unsigned first_entry
= start
>> PAGE_SHIFT
;
2541 unsigned num_entries
= length
>> PAGE_SHIFT
;
2542 gen8_pte_t scratch_pte
, __iomem
*gtt_base
=
2543 (gen8_pte_t __iomem
*)ggtt
->gsm
+ first_entry
;
2544 const int max_entries
= ggtt_total_entries(ggtt
) - first_entry
;
2547 if (WARN(num_entries
> max_entries
,
2548 "First entry = %d; Num entries = %d (max=%d)\n",
2549 first_entry
, num_entries
, max_entries
))
2550 num_entries
= max_entries
;
2552 scratch_pte
= gen8_pte_encode(vm
->scratch_page
.daddr
,
2554 for (i
= 0; i
< num_entries
; i
++)
2555 gen8_set_pte(>t_base
[i
], scratch_pte
);
2559 static void gen6_ggtt_clear_range(struct i915_address_space
*vm
,
2563 struct i915_ggtt
*ggtt
= i915_vm_to_ggtt(vm
);
2564 unsigned first_entry
= start
>> PAGE_SHIFT
;
2565 unsigned num_entries
= length
>> PAGE_SHIFT
;
2566 gen6_pte_t scratch_pte
, __iomem
*gtt_base
=
2567 (gen6_pte_t __iomem
*)ggtt
->gsm
+ first_entry
;
2568 const int max_entries
= ggtt_total_entries(ggtt
) - first_entry
;
2571 if (WARN(num_entries
> max_entries
,
2572 "First entry = %d; Num entries = %d (max=%d)\n",
2573 first_entry
, num_entries
, max_entries
))
2574 num_entries
= max_entries
;
2576 scratch_pte
= vm
->pte_encode(vm
->scratch_page
.daddr
,
2579 for (i
= 0; i
< num_entries
; i
++)
2580 iowrite32(scratch_pte
, >t_base
[i
]);
2584 static void i915_ggtt_insert_page(struct i915_address_space
*vm
,
2587 enum i915_cache_level cache_level
,
2590 unsigned int flags
= (cache_level
== I915_CACHE_NONE
) ?
2591 AGP_USER_MEMORY
: AGP_USER_CACHED_MEMORY
;
2593 intel_gtt_insert_page(addr
, offset
>> PAGE_SHIFT
, flags
);
2596 static void i915_ggtt_insert_entries(struct i915_address_space
*vm
,
2597 struct sg_table
*pages
,
2599 enum i915_cache_level cache_level
, u32 unused
)
2601 unsigned int flags
= (cache_level
== I915_CACHE_NONE
) ?
2602 AGP_USER_MEMORY
: AGP_USER_CACHED_MEMORY
;
2604 intel_gtt_insert_sg_entries(pages
, start
>> PAGE_SHIFT
, flags
);
2608 static void i915_ggtt_clear_range(struct i915_address_space
*vm
,
2612 intel_gtt_clear_range(start
>> PAGE_SHIFT
, length
>> PAGE_SHIFT
);
2615 static int ggtt_bind_vma(struct i915_vma
*vma
,
2616 enum i915_cache_level cache_level
,
2619 struct drm_i915_private
*i915
= vma
->vm
->i915
;
2620 struct drm_i915_gem_object
*obj
= vma
->obj
;
2624 ret
= i915_get_ggtt_vma_pages(vma
);
2628 /* Currently applicable only to VLV */
2630 pte_flags
|= PTE_READ_ONLY
;
2632 intel_runtime_pm_get(i915
);
2633 vma
->vm
->insert_entries(vma
->vm
, vma
->pages
, vma
->node
.start
,
2634 cache_level
, pte_flags
);
2635 intel_runtime_pm_put(i915
);
2638 * Without aliasing PPGTT there's no difference between
2639 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
2640 * upgrade to both bound if we bind either to avoid double-binding.
2642 vma
->flags
|= I915_VMA_GLOBAL_BIND
| I915_VMA_LOCAL_BIND
;
2647 static int aliasing_gtt_bind_vma(struct i915_vma
*vma
,
2648 enum i915_cache_level cache_level
,
2651 struct drm_i915_private
*i915
= vma
->vm
->i915
;
2655 ret
= i915_get_ggtt_vma_pages(vma
);
2659 /* Currently applicable only to VLV */
2661 if (vma
->obj
->gt_ro
)
2662 pte_flags
|= PTE_READ_ONLY
;
2665 if (flags
& I915_VMA_GLOBAL_BIND
) {
2666 intel_runtime_pm_get(i915
);
2667 vma
->vm
->insert_entries(vma
->vm
,
2668 vma
->pages
, vma
->node
.start
,
2669 cache_level
, pte_flags
);
2670 intel_runtime_pm_put(i915
);
2673 if (flags
& I915_VMA_LOCAL_BIND
) {
2674 struct i915_hw_ppgtt
*appgtt
= i915
->mm
.aliasing_ppgtt
;
2675 appgtt
->base
.insert_entries(&appgtt
->base
,
2676 vma
->pages
, vma
->node
.start
,
2677 cache_level
, pte_flags
);
2683 static void ggtt_unbind_vma(struct i915_vma
*vma
)
2685 struct drm_i915_private
*i915
= vma
->vm
->i915
;
2686 struct i915_hw_ppgtt
*appgtt
= i915
->mm
.aliasing_ppgtt
;
2687 const u64 size
= min(vma
->size
, vma
->node
.size
);
2689 if (vma
->flags
& I915_VMA_GLOBAL_BIND
) {
2690 intel_runtime_pm_get(i915
);
2691 vma
->vm
->clear_range(vma
->vm
,
2692 vma
->node
.start
, size
);
2693 intel_runtime_pm_put(i915
);
2696 if (vma
->flags
& I915_VMA_LOCAL_BIND
&& appgtt
)
2697 appgtt
->base
.clear_range(&appgtt
->base
,
2698 vma
->node
.start
, size
);
2701 void i915_gem_gtt_finish_pages(struct drm_i915_gem_object
*obj
,
2702 struct sg_table
*pages
)
2704 struct drm_i915_private
*dev_priv
= to_i915(obj
->base
.dev
);
2705 struct device
*kdev
= &dev_priv
->drm
.pdev
->dev
;
2706 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
2708 if (unlikely(ggtt
->do_idle_maps
)) {
2709 if (i915_gem_wait_for_idle(dev_priv
, I915_WAIT_LOCKED
)) {
2710 DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
2711 /* Wait a bit, in hopes it avoids the hang */
2716 dma_unmap_sg(kdev
, pages
->sgl
, pages
->nents
, PCI_DMA_BIDIRECTIONAL
);
2719 static void i915_gtt_color_adjust(struct drm_mm_node
*node
,
2720 unsigned long color
,
2724 if (node
->color
!= color
)
2727 node
= list_first_entry_or_null(&node
->node_list
,
2730 if (node
&& node
->allocated
&& node
->color
!= color
)
2734 int i915_gem_init_ggtt(struct drm_i915_private
*dev_priv
)
2736 /* Let GEM Manage all of the aperture.
2738 * However, leave one page at the end still bound to the scratch page.
2739 * There are a number of places where the hardware apparently prefetches
2740 * past the end of the object, and we've seen multiple hangs with the
2741 * GPU head pointer stuck in a batchbuffer bound at the last page of the
2742 * aperture. One page should be enough to keep any prefetching inside
2745 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
2746 unsigned long hole_start
, hole_end
;
2747 struct i915_hw_ppgtt
*ppgtt
;
2748 struct drm_mm_node
*entry
;
2751 ret
= intel_vgt_balloon(dev_priv
);
2755 /* Reserve a mappable slot for our lockless error capture */
2756 ret
= drm_mm_insert_node_in_range_generic(&ggtt
->base
.mm
,
2757 &ggtt
->error_capture
,
2759 0, ggtt
->mappable_end
,
2764 /* Clear any non-preallocated blocks */
2765 drm_mm_for_each_hole(entry
, &ggtt
->base
.mm
, hole_start
, hole_end
) {
2766 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
2767 hole_start
, hole_end
);
2768 ggtt
->base
.clear_range(&ggtt
->base
, hole_start
,
2769 hole_end
- hole_start
);
2772 /* And finally clear the reserved guard page */
2773 ggtt
->base
.clear_range(&ggtt
->base
,
2774 ggtt
->base
.total
- PAGE_SIZE
, PAGE_SIZE
);
2776 if (USES_PPGTT(dev_priv
) && !USES_FULL_PPGTT(dev_priv
)) {
2777 ppgtt
= kzalloc(sizeof(*ppgtt
), GFP_KERNEL
);
2783 ret
= __hw_ppgtt_init(ppgtt
, dev_priv
);
2787 if (ppgtt
->base
.allocate_va_range
) {
2788 ret
= ppgtt
->base
.allocate_va_range(&ppgtt
->base
, 0,
2791 goto err_ppgtt_cleanup
;
2794 ppgtt
->base
.clear_range(&ppgtt
->base
,
2798 dev_priv
->mm
.aliasing_ppgtt
= ppgtt
;
2799 WARN_ON(ggtt
->base
.bind_vma
!= ggtt_bind_vma
);
2800 ggtt
->base
.bind_vma
= aliasing_gtt_bind_vma
;
2806 ppgtt
->base
.cleanup(&ppgtt
->base
);
2810 drm_mm_remove_node(&ggtt
->error_capture
);
2815 * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
2816 * @dev_priv: i915 device
2818 void i915_ggtt_cleanup_hw(struct drm_i915_private
*dev_priv
)
2820 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
2822 if (dev_priv
->mm
.aliasing_ppgtt
) {
2823 struct i915_hw_ppgtt
*ppgtt
= dev_priv
->mm
.aliasing_ppgtt
;
2824 ppgtt
->base
.cleanup(&ppgtt
->base
);
2828 i915_gem_cleanup_stolen(&dev_priv
->drm
);
2830 if (drm_mm_node_allocated(&ggtt
->error_capture
))
2831 drm_mm_remove_node(&ggtt
->error_capture
);
2833 if (drm_mm_initialized(&ggtt
->base
.mm
)) {
2834 intel_vgt_deballoon(dev_priv
);
2836 mutex_lock(&dev_priv
->drm
.struct_mutex
);
2837 i915_address_space_fini(&ggtt
->base
);
2838 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
2841 ggtt
->base
.cleanup(&ggtt
->base
);
2843 arch_phys_wc_del(ggtt
->mtrr
);
2844 io_mapping_fini(&ggtt
->mappable
);
2847 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl
)
2849 snb_gmch_ctl
>>= SNB_GMCH_GGMS_SHIFT
;
2850 snb_gmch_ctl
&= SNB_GMCH_GGMS_MASK
;
2851 return snb_gmch_ctl
<< 20;
2854 static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl
)
2856 bdw_gmch_ctl
>>= BDW_GMCH_GGMS_SHIFT
;
2857 bdw_gmch_ctl
&= BDW_GMCH_GGMS_MASK
;
2859 bdw_gmch_ctl
= 1 << bdw_gmch_ctl
;
2861 #ifdef CONFIG_X86_32
2862 /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
2863 if (bdw_gmch_ctl
> 4)
2867 return bdw_gmch_ctl
<< 20;
2870 static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl
)
2872 gmch_ctrl
>>= SNB_GMCH_GGMS_SHIFT
;
2873 gmch_ctrl
&= SNB_GMCH_GGMS_MASK
;
2876 return 1 << (20 + gmch_ctrl
);
2881 static size_t gen6_get_stolen_size(u16 snb_gmch_ctl
)
2883 snb_gmch_ctl
>>= SNB_GMCH_GMS_SHIFT
;
2884 snb_gmch_ctl
&= SNB_GMCH_GMS_MASK
;
2885 return snb_gmch_ctl
<< 25; /* 32 MB units */
2888 static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl
)
2890 bdw_gmch_ctl
>>= BDW_GMCH_GMS_SHIFT
;
2891 bdw_gmch_ctl
&= BDW_GMCH_GMS_MASK
;
2892 return bdw_gmch_ctl
<< 25; /* 32 MB units */
2895 static size_t chv_get_stolen_size(u16 gmch_ctrl
)
2897 gmch_ctrl
>>= SNB_GMCH_GMS_SHIFT
;
2898 gmch_ctrl
&= SNB_GMCH_GMS_MASK
;
2901 * 0x0 to 0x10: 32MB increments starting at 0MB
2902 * 0x11 to 0x16: 4MB increments starting at 8MB
2903 * 0x17 to 0x1d: 4MB increments start at 36MB
2905 if (gmch_ctrl
< 0x11)
2906 return gmch_ctrl
<< 25;
2907 else if (gmch_ctrl
< 0x17)
2908 return (gmch_ctrl
- 0x11 + 2) << 22;
2910 return (gmch_ctrl
- 0x17 + 9) << 22;
2913 static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl
)
2915 gen9_gmch_ctl
>>= BDW_GMCH_GMS_SHIFT
;
2916 gen9_gmch_ctl
&= BDW_GMCH_GMS_MASK
;
2918 if (gen9_gmch_ctl
< 0xf0)
2919 return gen9_gmch_ctl
<< 25; /* 32 MB units */
2921 /* 4MB increments starting at 0xf0 for 4MB */
2922 return (gen9_gmch_ctl
- 0xf0 + 1) << 22;
2925 static int ggtt_probe_common(struct i915_ggtt
*ggtt
, u64 size
)
2927 struct drm_i915_private
*dev_priv
= ggtt
->base
.i915
;
2928 struct pci_dev
*pdev
= dev_priv
->drm
.pdev
;
2929 phys_addr_t phys_addr
;
2932 /* For Modern GENs the PTEs and register space are split in the BAR */
2933 phys_addr
= pci_resource_start(pdev
, 0) + pci_resource_len(pdev
, 0) / 2;
2936 * On BXT writes larger than 64 bit to the GTT pagetable range will be
2937 * dropped. For WC mappings in general we have 64 byte burst writes
2938 * when the WC buffer is flushed, so we can't use it, but have to
2939 * resort to an uncached mapping. The WC issue is easily caught by the
2940 * readback check when writing GTT PTE entries.
2942 if (IS_GEN9_LP(dev_priv
))
2943 ggtt
->gsm
= ioremap_nocache(phys_addr
, size
);
2945 ggtt
->gsm
= ioremap_wc(phys_addr
, size
);
2947 DRM_ERROR("Failed to map the ggtt page table\n");
2951 ret
= setup_scratch_page(dev_priv
, &ggtt
->base
.scratch_page
, GFP_DMA32
);
2953 DRM_ERROR("Scratch setup failed\n");
2954 /* iounmap will also get called at remove, but meh */
2962 /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
2963 * bits. When using advanced contexts each context stores its own PAT, but
2964 * writing this data shouldn't be harmful even in those cases. */
2965 static void bdw_setup_private_ppat(struct drm_i915_private
*dev_priv
)
2969 pat
= GEN8_PPAT(0, GEN8_PPAT_WB
| GEN8_PPAT_LLC
) | /* for normal objects, no eLLC */
2970 GEN8_PPAT(1, GEN8_PPAT_WC
| GEN8_PPAT_LLCELLC
) | /* for something pointing to ptes? */
2971 GEN8_PPAT(2, GEN8_PPAT_WT
| GEN8_PPAT_LLCELLC
) | /* for scanout with eLLC */
2972 GEN8_PPAT(3, GEN8_PPAT_UC
) | /* Uncached objects, mostly for scanout */
2973 GEN8_PPAT(4, GEN8_PPAT_WB
| GEN8_PPAT_LLCELLC
| GEN8_PPAT_AGE(0)) |
2974 GEN8_PPAT(5, GEN8_PPAT_WB
| GEN8_PPAT_LLCELLC
| GEN8_PPAT_AGE(1)) |
2975 GEN8_PPAT(6, GEN8_PPAT_WB
| GEN8_PPAT_LLCELLC
| GEN8_PPAT_AGE(2)) |
2976 GEN8_PPAT(7, GEN8_PPAT_WB
| GEN8_PPAT_LLCELLC
| GEN8_PPAT_AGE(3));
2978 if (!USES_PPGTT(dev_priv
))
2979 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
2980 * so RTL will always use the value corresponding to
2982 * So let's disable cache for GGTT to avoid screen corruptions.
2983 * MOCS still can be used though.
2984 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
2985 * before this patch, i.e. the same uncached + snooping access
2986 * like on gen6/7 seems to be in effect.
2987 * - So this just fixes blitter/render access. Again it looks
2988 * like it's not just uncached access, but uncached + snooping.
2989 * So we can still hold onto all our assumptions wrt cpu
2990 * clflushing on LLC machines.
2992 pat
= GEN8_PPAT(0, GEN8_PPAT_UC
);
2994 /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
2995 * write would work. */
2996 I915_WRITE(GEN8_PRIVATE_PAT_LO
, pat
);
2997 I915_WRITE(GEN8_PRIVATE_PAT_HI
, pat
>> 32);
3000 static void chv_setup_private_ppat(struct drm_i915_private
*dev_priv
)
3005 * Map WB on BDW to snooped on CHV.
3007 * Only the snoop bit has meaning for CHV, the rest is
3010 * The hardware will never snoop for certain types of accesses:
3011 * - CPU GTT (GMADR->GGTT->no snoop->memory)
3012 * - PPGTT page tables
3013 * - some other special cycles
3015 * As with BDW, we also need to consider the following for GT accesses:
3016 * "For GGTT, there is NO pat_sel[2:0] from the entry,
3017 * so RTL will always use the value corresponding to
3019 * Which means we must set the snoop bit in PAT entry 0
3020 * in order to keep the global status page working.
3022 pat
= GEN8_PPAT(0, CHV_PPAT_SNOOP
) |
3026 GEN8_PPAT(4, CHV_PPAT_SNOOP
) |
3027 GEN8_PPAT(5, CHV_PPAT_SNOOP
) |
3028 GEN8_PPAT(6, CHV_PPAT_SNOOP
) |
3029 GEN8_PPAT(7, CHV_PPAT_SNOOP
);
3031 I915_WRITE(GEN8_PRIVATE_PAT_LO
, pat
);
3032 I915_WRITE(GEN8_PRIVATE_PAT_HI
, pat
>> 32);
3035 static void gen6_gmch_remove(struct i915_address_space
*vm
)
3037 struct i915_ggtt
*ggtt
= i915_vm_to_ggtt(vm
);
3040 cleanup_scratch_page(vm
->i915
, &vm
->scratch_page
);
3043 static int gen8_gmch_probe(struct i915_ggtt
*ggtt
)
3045 struct drm_i915_private
*dev_priv
= ggtt
->base
.i915
;
3046 struct pci_dev
*pdev
= dev_priv
->drm
.pdev
;
3050 /* TODO: We're not aware of mappable constraints on gen8 yet */
3051 ggtt
->mappable_base
= pci_resource_start(pdev
, 2);
3052 ggtt
->mappable_end
= pci_resource_len(pdev
, 2);
3054 if (!pci_set_dma_mask(pdev
, DMA_BIT_MASK(39)))
3055 pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(39));
3057 pci_read_config_word(pdev
, SNB_GMCH_CTRL
, &snb_gmch_ctl
);
3059 if (INTEL_GEN(dev_priv
) >= 9) {
3060 ggtt
->stolen_size
= gen9_get_stolen_size(snb_gmch_ctl
);
3061 size
= gen8_get_total_gtt_size(snb_gmch_ctl
);
3062 } else if (IS_CHERRYVIEW(dev_priv
)) {
3063 ggtt
->stolen_size
= chv_get_stolen_size(snb_gmch_ctl
);
3064 size
= chv_get_total_gtt_size(snb_gmch_ctl
);
3066 ggtt
->stolen_size
= gen8_get_stolen_size(snb_gmch_ctl
);
3067 size
= gen8_get_total_gtt_size(snb_gmch_ctl
);
3070 ggtt
->base
.total
= (size
/ sizeof(gen8_pte_t
)) << PAGE_SHIFT
;
3072 if (IS_CHERRYVIEW(dev_priv
) || IS_GEN9_LP(dev_priv
))
3073 chv_setup_private_ppat(dev_priv
);
3075 bdw_setup_private_ppat(dev_priv
);
3077 ggtt
->base
.cleanup
= gen6_gmch_remove
;
3078 ggtt
->base
.bind_vma
= ggtt_bind_vma
;
3079 ggtt
->base
.unbind_vma
= ggtt_unbind_vma
;
3080 ggtt
->base
.insert_page
= gen8_ggtt_insert_page
;
3081 ggtt
->base
.clear_range
= nop_clear_range
;
3082 if (!USES_FULL_PPGTT(dev_priv
) || intel_scanout_needs_vtd_wa(dev_priv
))
3083 ggtt
->base
.clear_range
= gen8_ggtt_clear_range
;
3085 ggtt
->base
.insert_entries
= gen8_ggtt_insert_entries
;
3086 if (IS_CHERRYVIEW(dev_priv
))
3087 ggtt
->base
.insert_entries
= gen8_ggtt_insert_entries__BKL
;
3089 return ggtt_probe_common(ggtt
, size
);
3092 static int gen6_gmch_probe(struct i915_ggtt
*ggtt
)
3094 struct drm_i915_private
*dev_priv
= ggtt
->base
.i915
;
3095 struct pci_dev
*pdev
= dev_priv
->drm
.pdev
;
3099 ggtt
->mappable_base
= pci_resource_start(pdev
, 2);
3100 ggtt
->mappable_end
= pci_resource_len(pdev
, 2);
3102 /* 64/512MB is the current min/max we actually know of, but this is just
3103 * a coarse sanity check.
3105 if (ggtt
->mappable_end
< (64<<20) || ggtt
->mappable_end
> (512<<20)) {
3106 DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt
->mappable_end
);
3110 if (!pci_set_dma_mask(pdev
, DMA_BIT_MASK(40)))
3111 pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(40));
3112 pci_read_config_word(pdev
, SNB_GMCH_CTRL
, &snb_gmch_ctl
);
3114 ggtt
->stolen_size
= gen6_get_stolen_size(snb_gmch_ctl
);
3116 size
= gen6_get_total_gtt_size(snb_gmch_ctl
);
3117 ggtt
->base
.total
= (size
/ sizeof(gen6_pte_t
)) << PAGE_SHIFT
;
3119 ggtt
->base
.clear_range
= gen6_ggtt_clear_range
;
3120 ggtt
->base
.insert_page
= gen6_ggtt_insert_page
;
3121 ggtt
->base
.insert_entries
= gen6_ggtt_insert_entries
;
3122 ggtt
->base
.bind_vma
= ggtt_bind_vma
;
3123 ggtt
->base
.unbind_vma
= ggtt_unbind_vma
;
3124 ggtt
->base
.cleanup
= gen6_gmch_remove
;
3126 if (HAS_EDRAM(dev_priv
))
3127 ggtt
->base
.pte_encode
= iris_pte_encode
;
3128 else if (IS_HASWELL(dev_priv
))
3129 ggtt
->base
.pte_encode
= hsw_pte_encode
;
3130 else if (IS_VALLEYVIEW(dev_priv
))
3131 ggtt
->base
.pte_encode
= byt_pte_encode
;
3132 else if (INTEL_GEN(dev_priv
) >= 7)
3133 ggtt
->base
.pte_encode
= ivb_pte_encode
;
3135 ggtt
->base
.pte_encode
= snb_pte_encode
;
3137 return ggtt_probe_common(ggtt
, size
);
3140 static void i915_gmch_remove(struct i915_address_space
*vm
)
3142 intel_gmch_remove();
3145 static int i915_gmch_probe(struct i915_ggtt
*ggtt
)
3147 struct drm_i915_private
*dev_priv
= ggtt
->base
.i915
;
3150 ret
= intel_gmch_probe(dev_priv
->bridge_dev
, dev_priv
->drm
.pdev
, NULL
);
3152 DRM_ERROR("failed to set up gmch\n");
3156 intel_gtt_get(&ggtt
->base
.total
, &ggtt
->stolen_size
,
3157 &ggtt
->mappable_base
, &ggtt
->mappable_end
);
3159 ggtt
->do_idle_maps
= needs_idle_maps(dev_priv
);
3160 ggtt
->base
.insert_page
= i915_ggtt_insert_page
;
3161 ggtt
->base
.insert_entries
= i915_ggtt_insert_entries
;
3162 ggtt
->base
.clear_range
= i915_ggtt_clear_range
;
3163 ggtt
->base
.bind_vma
= ggtt_bind_vma
;
3164 ggtt
->base
.unbind_vma
= ggtt_unbind_vma
;
3165 ggtt
->base
.cleanup
= i915_gmch_remove
;
3167 if (unlikely(ggtt
->do_idle_maps
))
3168 DRM_INFO("applying Ironlake quirks for intel_iommu\n");
3174 * i915_ggtt_probe_hw - Probe GGTT hardware location
3175 * @dev_priv: i915 device
3177 int i915_ggtt_probe_hw(struct drm_i915_private
*dev_priv
)
3179 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
3182 ggtt
->base
.i915
= dev_priv
;
3184 if (INTEL_GEN(dev_priv
) <= 5)
3185 ret
= i915_gmch_probe(ggtt
);
3186 else if (INTEL_GEN(dev_priv
) < 8)
3187 ret
= gen6_gmch_probe(ggtt
);
3189 ret
= gen8_gmch_probe(ggtt
);
3193 if ((ggtt
->base
.total
- 1) >> 32) {
3194 DRM_ERROR("We never expected a Global GTT with more than 32bits"
3195 " of address space! Found %lldM!\n",
3196 ggtt
->base
.total
>> 20);
3197 ggtt
->base
.total
= 1ULL << 32;
3198 ggtt
->mappable_end
= min(ggtt
->mappable_end
, ggtt
->base
.total
);
3201 if (ggtt
->mappable_end
> ggtt
->base
.total
) {
3202 DRM_ERROR("mappable aperture extends past end of GGTT,"
3203 " aperture=%llx, total=%llx\n",
3204 ggtt
->mappable_end
, ggtt
->base
.total
);
3205 ggtt
->mappable_end
= ggtt
->base
.total
;
3208 /* GMADR is the PCI mmio aperture into the global GTT. */
3209 DRM_INFO("Memory usable by graphics device = %lluM\n",
3210 ggtt
->base
.total
>> 20);
3211 DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt
->mappable_end
>> 20);
3212 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", ggtt
->stolen_size
>> 20);
3213 #ifdef CONFIG_INTEL_IOMMU
3214 if (intel_iommu_gfx_mapped
)
3215 DRM_INFO("VT-d active for gfx access\n");
3222 * i915_ggtt_init_hw - Initialize GGTT hardware
3223 * @dev_priv: i915 device
3225 int i915_ggtt_init_hw(struct drm_i915_private
*dev_priv
)
3227 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
3230 INIT_LIST_HEAD(&dev_priv
->vm_list
);
3232 /* Subtract the guard page before address space initialization to
3233 * shrink the range used by drm_mm.
3235 mutex_lock(&dev_priv
->drm
.struct_mutex
);
3236 ggtt
->base
.total
-= PAGE_SIZE
;
3237 i915_address_space_init(&ggtt
->base
, dev_priv
, "[global]");
3238 ggtt
->base
.total
+= PAGE_SIZE
;
3239 if (!HAS_LLC(dev_priv
))
3240 ggtt
->base
.mm
.color_adjust
= i915_gtt_color_adjust
;
3241 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
3243 if (!io_mapping_init_wc(&dev_priv
->ggtt
.mappable
,
3244 dev_priv
->ggtt
.mappable_base
,
3245 dev_priv
->ggtt
.mappable_end
)) {
3247 goto out_gtt_cleanup
;
3250 ggtt
->mtrr
= arch_phys_wc_add(ggtt
->mappable_base
, ggtt
->mappable_end
);
3253 * Initialise stolen early so that we may reserve preallocated
3254 * objects for the BIOS to KMS transition.
3256 ret
= i915_gem_init_stolen(dev_priv
);
3258 goto out_gtt_cleanup
;
3263 ggtt
->base
.cleanup(&ggtt
->base
);
3267 int i915_ggtt_enable_hw(struct drm_i915_private
*dev_priv
)
3269 if (INTEL_GEN(dev_priv
) < 6 && !intel_enable_gtt())
3275 void i915_gem_restore_gtt_mappings(struct drm_i915_private
*dev_priv
)
3277 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
3278 struct drm_i915_gem_object
*obj
, *on
;
3280 i915_check_and_clear_faults(dev_priv
);
3282 /* First fill our portion of the GTT with scratch pages */
3283 ggtt
->base
.clear_range(&ggtt
->base
, ggtt
->base
.start
, ggtt
->base
.total
);
3285 ggtt
->base
.closed
= true; /* skip rewriting PTE on VMA unbind */
3287 /* clflush objects bound into the GGTT and rebind them. */
3288 list_for_each_entry_safe(obj
, on
,
3289 &dev_priv
->mm
.bound_list
, global_link
) {
3290 bool ggtt_bound
= false;
3291 struct i915_vma
*vma
;
3293 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
) {
3294 if (vma
->vm
!= &ggtt
->base
)
3297 if (!i915_vma_unbind(vma
))
3300 WARN_ON(i915_vma_bind(vma
, obj
->cache_level
,
3306 WARN_ON(i915_gem_object_set_to_gtt_domain(obj
, false));
3309 ggtt
->base
.closed
= false;
3311 if (INTEL_GEN(dev_priv
) >= 8) {
3312 if (IS_CHERRYVIEW(dev_priv
) || IS_GEN9_LP(dev_priv
))
3313 chv_setup_private_ppat(dev_priv
);
3315 bdw_setup_private_ppat(dev_priv
);
3320 if (USES_PPGTT(dev_priv
)) {
3321 struct i915_address_space
*vm
;
3323 list_for_each_entry(vm
, &dev_priv
->vm_list
, global_link
) {
3324 /* TODO: Perhaps it shouldn't be gen6 specific */
3326 struct i915_hw_ppgtt
*ppgtt
;
3328 if (i915_is_ggtt(vm
))
3329 ppgtt
= dev_priv
->mm
.aliasing_ppgtt
;
3331 ppgtt
= i915_vm_to_ppgtt(vm
);
3333 gen6_write_page_range(dev_priv
, &ppgtt
->pd
,
3334 0, ppgtt
->base
.total
);
3338 i915_ggtt_flush(dev_priv
);
3342 i915_gem_obj_to_vma(struct drm_i915_gem_object
*obj
,
3343 struct i915_address_space
*vm
,
3344 const struct i915_ggtt_view
*view
)
3348 rb
= obj
->vma_tree
.rb_node
;
3350 struct i915_vma
*vma
= rb_entry(rb
, struct i915_vma
, obj_node
);
3353 cmp
= i915_vma_compare(vma
, vm
, view
);
3367 i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object
*obj
,
3368 struct i915_address_space
*vm
,
3369 const struct i915_ggtt_view
*view
)
3371 struct i915_vma
*vma
;
3373 lockdep_assert_held(&obj
->base
.dev
->struct_mutex
);
3374 GEM_BUG_ON(view
&& !i915_is_ggtt(vm
));
3376 vma
= i915_gem_obj_to_vma(obj
, vm
, view
);
3378 vma
= i915_vma_create(obj
, vm
, view
);
3379 GEM_BUG_ON(vma
!= i915_gem_obj_to_vma(obj
, vm
, view
));
3382 GEM_BUG_ON(i915_vma_is_closed(vma
));
3386 static struct scatterlist
*
3387 rotate_pages(const dma_addr_t
*in
, unsigned int offset
,
3388 unsigned int width
, unsigned int height
,
3389 unsigned int stride
,
3390 struct sg_table
*st
, struct scatterlist
*sg
)
3392 unsigned int column
, row
;
3393 unsigned int src_idx
;
3395 for (column
= 0; column
< width
; column
++) {
3396 src_idx
= stride
* (height
- 1) + column
;
3397 for (row
= 0; row
< height
; row
++) {
3399 /* We don't need the pages, but need to initialize
3400 * the entries so the sg list can be happily traversed.
3401 * The only thing we need are DMA addresses.
3403 sg_set_page(sg
, NULL
, PAGE_SIZE
, 0);
3404 sg_dma_address(sg
) = in
[offset
+ src_idx
];
3405 sg_dma_len(sg
) = PAGE_SIZE
;
3414 static struct sg_table
*
3415 intel_rotate_fb_obj_pages(const struct intel_rotation_info
*rot_info
,
3416 struct drm_i915_gem_object
*obj
)
3418 const size_t n_pages
= obj
->base
.size
/ PAGE_SIZE
;
3419 unsigned int size
= intel_rotation_info_size(rot_info
);
3420 struct sgt_iter sgt_iter
;
3421 dma_addr_t dma_addr
;
3423 dma_addr_t
*page_addr_list
;
3424 struct sg_table
*st
;
3425 struct scatterlist
*sg
;
3428 /* Allocate a temporary list of source pages for random access. */
3429 page_addr_list
= drm_malloc_gfp(n_pages
,
3432 if (!page_addr_list
)
3433 return ERR_PTR(ret
);
3435 /* Allocate target SG list. */
3436 st
= kmalloc(sizeof(*st
), GFP_KERNEL
);
3440 ret
= sg_alloc_table(st
, size
, GFP_KERNEL
);
3444 /* Populate source page list from the object. */
3446 for_each_sgt_dma(dma_addr
, sgt_iter
, obj
->mm
.pages
)
3447 page_addr_list
[i
++] = dma_addr
;
3449 GEM_BUG_ON(i
!= n_pages
);
3453 for (i
= 0 ; i
< ARRAY_SIZE(rot_info
->plane
); i
++) {
3454 sg
= rotate_pages(page_addr_list
, rot_info
->plane
[i
].offset
,
3455 rot_info
->plane
[i
].width
, rot_info
->plane
[i
].height
,
3456 rot_info
->plane
[i
].stride
, st
, sg
);
3459 DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n",
3460 obj
->base
.size
, rot_info
->plane
[0].width
, rot_info
->plane
[0].height
, size
);
3462 drm_free_large(page_addr_list
);
3469 drm_free_large(page_addr_list
);
3471 DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
3472 obj
->base
.size
, rot_info
->plane
[0].width
, rot_info
->plane
[0].height
, size
);
3474 return ERR_PTR(ret
);
3477 static struct sg_table
*
3478 intel_partial_pages(const struct i915_ggtt_view
*view
,
3479 struct drm_i915_gem_object
*obj
)
3481 struct sg_table
*st
;
3482 struct scatterlist
*sg
, *iter
;
3483 unsigned int count
= view
->params
.partial
.size
;
3484 unsigned int offset
;
3487 st
= kmalloc(sizeof(*st
), GFP_KERNEL
);
3491 ret
= sg_alloc_table(st
, count
, GFP_KERNEL
);
3495 iter
= i915_gem_object_get_sg(obj
,
3496 view
->params
.partial
.offset
,
3505 len
= min(iter
->length
- (offset
<< PAGE_SHIFT
),
3506 count
<< PAGE_SHIFT
);
3507 sg_set_page(sg
, NULL
, len
, 0);
3508 sg_dma_address(sg
) =
3509 sg_dma_address(iter
) + (offset
<< PAGE_SHIFT
);
3510 sg_dma_len(sg
) = len
;
3513 count
-= len
>> PAGE_SHIFT
;
3520 iter
= __sg_next(iter
);
3527 return ERR_PTR(ret
);
3531 i915_get_ggtt_vma_pages(struct i915_vma
*vma
)
3535 /* The vma->pages are only valid within the lifespan of the borrowed
3536 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
3537 * must be the vma->pages. A simple rule is that vma->pages must only
3538 * be accessed when the obj->mm.pages are pinned.
3540 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma
->obj
));
3545 if (vma
->ggtt_view
.type
== I915_GGTT_VIEW_NORMAL
)
3546 vma
->pages
= vma
->obj
->mm
.pages
;
3547 else if (vma
->ggtt_view
.type
== I915_GGTT_VIEW_ROTATED
)
3549 intel_rotate_fb_obj_pages(&vma
->ggtt_view
.params
.rotated
, vma
->obj
);
3550 else if (vma
->ggtt_view
.type
== I915_GGTT_VIEW_PARTIAL
)
3551 vma
->pages
= intel_partial_pages(&vma
->ggtt_view
, vma
->obj
);
3553 WARN_ONCE(1, "GGTT view %u not implemented!\n",
3554 vma
->ggtt_view
.type
);
3557 DRM_ERROR("Failed to get pages for GGTT view type %u!\n",
3558 vma
->ggtt_view
.type
);
3560 } else if (IS_ERR(vma
->pages
)) {
3561 ret
= PTR_ERR(vma
->pages
);
3563 DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
3564 vma
->ggtt_view
.type
, ret
);