2 * Copyright © 2010 Daniel Vetter
3 * Copyright © 2011-2014 Intel Corporation
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include <linux/log2.h>
27 #include <linux/random.h>
28 #include <linux/seq_file.h>
29 #include <linux/stop_machine.h>
32 #include <drm/i915_drm.h>
35 #include "i915_vgpu.h"
36 #include "i915_trace.h"
37 #include "intel_drv.h"
38 #include "intel_frontbuffer.h"
40 #define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
43 * DOC: Global GTT views
45 * Background and previous state
47 * Historically objects could exists (be bound) in global GTT space only as
48 * singular instances with a view representing all of the object's backing pages
49 * in a linear fashion. This view will be called a normal view.
51 * To support multiple views of the same object, where the number of mapped
52 * pages is not equal to the backing store, or where the layout of the pages
53 * is not linear, concept of a GGTT view was added.
55 * One example of an alternative view is a stereo display driven by a single
56 * image. In this case we would have a framebuffer looking like this
62 * Above would represent a normal GGTT view as normally mapped for GPU or CPU
63 * rendering. In contrast, fed to the display engine would be an alternative
64 * view which could look something like this:
69 * In this example both the size and layout of pages in the alternative view is
70 * different from the normal view.
72 * Implementation and usage
74 * GGTT views are implemented using VMAs and are distinguished via enum
75 * i915_ggtt_view_type and struct i915_ggtt_view.
77 * A new flavour of core GEM functions which work with GGTT bound objects were
78 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
79 * renaming in large amounts of code. They take the struct i915_ggtt_view
80 * parameter encapsulating all metadata required to implement a view.
82 * As a helper for callers which are only interested in the normal view,
83 * globally const i915_ggtt_view_normal singleton instance exists. All old core
84 * GEM API functions, the ones not taking the view parameter, are operating on,
85 * or with the normal GGTT view.
87 * Code wanting to add or use a new GGTT view needs to:
89 * 1. Add a new enum with a suitable name.
90 * 2. Extend the metadata in the i915_ggtt_view structure if required.
91 * 3. Add support to i915_get_vma_pages().
93 * New views are required to build a scatter-gather table from within the
94 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
95 * exists for the lifetime of an VMA.
97 * Core API is designed to have copy semantics which means that passed in
98 * struct i915_ggtt_view does not need to be persistent (left around after
99 * calling the core API functions).
104 i915_get_ggtt_vma_pages(struct i915_vma
*vma
);
106 static void gen6_ggtt_invalidate(struct drm_i915_private
*dev_priv
)
108 /* Note that as an uncached mmio write, this should flush the
109 * WCB of the writes into the GGTT before it triggers the invalidate.
111 I915_WRITE(GFX_FLSH_CNTL_GEN6
, GFX_FLSH_CNTL_EN
);
114 static void guc_ggtt_invalidate(struct drm_i915_private
*dev_priv
)
116 gen6_ggtt_invalidate(dev_priv
);
117 I915_WRITE(GEN8_GTCR
, GEN8_GTCR_INVALIDATE
);
120 static void gmch_ggtt_invalidate(struct drm_i915_private
*dev_priv
)
122 intel_gtt_chipset_flush();
125 static inline void i915_ggtt_invalidate(struct drm_i915_private
*i915
)
127 i915
->ggtt
.invalidate(i915
);
130 int intel_sanitize_enable_ppgtt(struct drm_i915_private
*dev_priv
,
133 bool has_aliasing_ppgtt
;
135 bool has_full_48bit_ppgtt
;
137 has_aliasing_ppgtt
= dev_priv
->info
.has_aliasing_ppgtt
;
138 has_full_ppgtt
= dev_priv
->info
.has_full_ppgtt
;
139 has_full_48bit_ppgtt
= dev_priv
->info
.has_full_48bit_ppgtt
;
141 if (intel_vgpu_active(dev_priv
)) {
142 /* emulation is too hard */
143 has_full_ppgtt
= false;
144 has_full_48bit_ppgtt
= false;
147 if (!has_aliasing_ppgtt
)
151 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
152 * execlists, the sole mechanism available to submit work.
154 if (enable_ppgtt
== 0 && INTEL_GEN(dev_priv
) < 9)
157 if (enable_ppgtt
== 1)
160 if (enable_ppgtt
== 2 && has_full_ppgtt
)
163 if (enable_ppgtt
== 3 && has_full_48bit_ppgtt
)
166 #ifdef CONFIG_INTEL_IOMMU
167 /* Disable ppgtt on SNB if VT-d is on. */
168 if (IS_GEN6(dev_priv
) && intel_iommu_gfx_mapped
) {
169 DRM_INFO("Disabling PPGTT because VT-d is on\n");
174 /* Early VLV doesn't have this */
175 if (IS_VALLEYVIEW(dev_priv
) && dev_priv
->drm
.pdev
->revision
< 0xb) {
176 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
180 if (INTEL_GEN(dev_priv
) >= 8 && i915
.enable_execlists
&& has_full_ppgtt
)
181 return has_full_48bit_ppgtt
? 3 : 2;
183 return has_aliasing_ppgtt
? 1 : 0;
186 static int ppgtt_bind_vma(struct i915_vma
*vma
,
187 enum i915_cache_level cache_level
,
192 vma
->pages
= vma
->obj
->mm
.pages
;
194 /* Currently applicable only to VLV */
196 pte_flags
|= PTE_READ_ONLY
;
198 vma
->vm
->insert_entries(vma
->vm
, vma
->pages
, vma
->node
.start
,
199 cache_level
, pte_flags
);
204 static void ppgtt_unbind_vma(struct i915_vma
*vma
)
206 vma
->vm
->clear_range(vma
->vm
,
211 static gen8_pte_t
gen8_pte_encode(dma_addr_t addr
,
212 enum i915_cache_level level
)
214 gen8_pte_t pte
= _PAGE_PRESENT
| _PAGE_RW
;
218 case I915_CACHE_NONE
:
219 pte
|= PPAT_UNCACHED_INDEX
;
222 pte
|= PPAT_DISPLAY_ELLC_INDEX
;
225 pte
|= PPAT_CACHED_INDEX
;
232 static gen8_pde_t
gen8_pde_encode(const dma_addr_t addr
,
233 const enum i915_cache_level level
)
235 gen8_pde_t pde
= _PAGE_PRESENT
| _PAGE_RW
;
237 if (level
!= I915_CACHE_NONE
)
238 pde
|= PPAT_CACHED_PDE_INDEX
;
240 pde
|= PPAT_UNCACHED_INDEX
;
244 #define gen8_pdpe_encode gen8_pde_encode
245 #define gen8_pml4e_encode gen8_pde_encode
247 static gen6_pte_t
snb_pte_encode(dma_addr_t addr
,
248 enum i915_cache_level level
,
251 gen6_pte_t pte
= GEN6_PTE_VALID
;
252 pte
|= GEN6_PTE_ADDR_ENCODE(addr
);
255 case I915_CACHE_L3_LLC
:
257 pte
|= GEN6_PTE_CACHE_LLC
;
259 case I915_CACHE_NONE
:
260 pte
|= GEN6_PTE_UNCACHED
;
269 static gen6_pte_t
ivb_pte_encode(dma_addr_t addr
,
270 enum i915_cache_level level
,
273 gen6_pte_t pte
= GEN6_PTE_VALID
;
274 pte
|= GEN6_PTE_ADDR_ENCODE(addr
);
277 case I915_CACHE_L3_LLC
:
278 pte
|= GEN7_PTE_CACHE_L3_LLC
;
281 pte
|= GEN6_PTE_CACHE_LLC
;
283 case I915_CACHE_NONE
:
284 pte
|= GEN6_PTE_UNCACHED
;
293 static gen6_pte_t
byt_pte_encode(dma_addr_t addr
,
294 enum i915_cache_level level
,
297 gen6_pte_t pte
= GEN6_PTE_VALID
;
298 pte
|= GEN6_PTE_ADDR_ENCODE(addr
);
300 if (!(flags
& PTE_READ_ONLY
))
301 pte
|= BYT_PTE_WRITEABLE
;
303 if (level
!= I915_CACHE_NONE
)
304 pte
|= BYT_PTE_SNOOPED_BY_CPU_CACHES
;
309 static gen6_pte_t
hsw_pte_encode(dma_addr_t addr
,
310 enum i915_cache_level level
,
313 gen6_pte_t pte
= GEN6_PTE_VALID
;
314 pte
|= HSW_PTE_ADDR_ENCODE(addr
);
316 if (level
!= I915_CACHE_NONE
)
317 pte
|= HSW_WB_LLC_AGE3
;
322 static gen6_pte_t
iris_pte_encode(dma_addr_t addr
,
323 enum i915_cache_level level
,
326 gen6_pte_t pte
= GEN6_PTE_VALID
;
327 pte
|= HSW_PTE_ADDR_ENCODE(addr
);
330 case I915_CACHE_NONE
:
333 pte
|= HSW_WT_ELLC_LLC_AGE3
;
336 pte
|= HSW_WB_ELLC_LLC_AGE3
;
343 static int __setup_page_dma(struct drm_i915_private
*dev_priv
,
344 struct i915_page_dma
*p
, gfp_t flags
)
346 struct device
*kdev
= &dev_priv
->drm
.pdev
->dev
;
348 p
->page
= alloc_page(flags
);
352 p
->daddr
= dma_map_page(kdev
,
353 p
->page
, 0, PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
355 if (dma_mapping_error(kdev
, p
->daddr
)) {
356 __free_page(p
->page
);
363 static int setup_page_dma(struct drm_i915_private
*dev_priv
,
364 struct i915_page_dma
*p
)
366 return __setup_page_dma(dev_priv
, p
, I915_GFP_DMA
);
369 static void cleanup_page_dma(struct drm_i915_private
*dev_priv
,
370 struct i915_page_dma
*p
)
372 struct pci_dev
*pdev
= dev_priv
->drm
.pdev
;
374 if (WARN_ON(!p
->page
))
377 dma_unmap_page(&pdev
->dev
, p
->daddr
, PAGE_SIZE
, PCI_DMA_BIDIRECTIONAL
);
378 __free_page(p
->page
);
379 memset(p
, 0, sizeof(*p
));
382 static void *kmap_page_dma(struct i915_page_dma
*p
)
384 return kmap_atomic(p
->page
);
387 /* We use the flushing unmap only with ppgtt structures:
388 * page directories, page tables and scratch pages.
390 static void kunmap_page_dma(struct drm_i915_private
*dev_priv
, void *vaddr
)
392 /* There are only few exceptions for gen >=6. chv and bxt.
393 * And we are not sure about the latter so play safe for now.
395 if (IS_CHERRYVIEW(dev_priv
) || IS_GEN9_LP(dev_priv
))
396 drm_clflush_virt_range(vaddr
, PAGE_SIZE
);
398 kunmap_atomic(vaddr
);
401 #define kmap_px(px) kmap_page_dma(px_base(px))
402 #define kunmap_px(ppgtt, vaddr) \
403 kunmap_page_dma((ppgtt)->base.i915, (vaddr))
405 #define setup_px(dev_priv, px) setup_page_dma((dev_priv), px_base(px))
406 #define cleanup_px(dev_priv, px) cleanup_page_dma((dev_priv), px_base(px))
407 #define fill_px(dev_priv, px, v) fill_page_dma((dev_priv), px_base(px), (v))
408 #define fill32_px(dev_priv, px, v) \
409 fill_page_dma_32((dev_priv), px_base(px), (v))
411 static void fill_page_dma(struct drm_i915_private
*dev_priv
,
412 struct i915_page_dma
*p
, const uint64_t val
)
415 uint64_t * const vaddr
= kmap_page_dma(p
);
417 for (i
= 0; i
< 512; i
++)
420 kunmap_page_dma(dev_priv
, vaddr
);
423 static void fill_page_dma_32(struct drm_i915_private
*dev_priv
,
424 struct i915_page_dma
*p
, const uint32_t val32
)
430 fill_page_dma(dev_priv
, p
, v
);
434 setup_scratch_page(struct drm_i915_private
*dev_priv
,
435 struct i915_page_dma
*scratch
,
438 return __setup_page_dma(dev_priv
, scratch
, gfp
| __GFP_ZERO
);
441 static void cleanup_scratch_page(struct drm_i915_private
*dev_priv
,
442 struct i915_page_dma
*scratch
)
444 cleanup_page_dma(dev_priv
, scratch
);
447 static struct i915_page_table
*alloc_pt(struct drm_i915_private
*dev_priv
)
449 struct i915_page_table
*pt
;
450 const size_t count
= INTEL_GEN(dev_priv
) >= 8 ? GEN8_PTES
: GEN6_PTES
;
453 pt
= kzalloc(sizeof(*pt
), GFP_KERNEL
);
455 return ERR_PTR(-ENOMEM
);
457 pt
->used_ptes
= kcalloc(BITS_TO_LONGS(count
), sizeof(*pt
->used_ptes
),
463 ret
= setup_px(dev_priv
, pt
);
470 kfree(pt
->used_ptes
);
477 static void free_pt(struct drm_i915_private
*dev_priv
,
478 struct i915_page_table
*pt
)
480 cleanup_px(dev_priv
, pt
);
481 kfree(pt
->used_ptes
);
485 static void gen8_initialize_pt(struct i915_address_space
*vm
,
486 struct i915_page_table
*pt
)
488 gen8_pte_t scratch_pte
;
490 scratch_pte
= gen8_pte_encode(vm
->scratch_page
.daddr
,
493 fill_px(vm
->i915
, pt
, scratch_pte
);
496 static void gen6_initialize_pt(struct i915_address_space
*vm
,
497 struct i915_page_table
*pt
)
499 gen6_pte_t scratch_pte
;
501 WARN_ON(vm
->scratch_page
.daddr
== 0);
503 scratch_pte
= vm
->pte_encode(vm
->scratch_page
.daddr
,
506 fill32_px(vm
->i915
, pt
, scratch_pte
);
509 static struct i915_page_directory
*alloc_pd(struct drm_i915_private
*dev_priv
)
511 struct i915_page_directory
*pd
;
514 pd
= kzalloc(sizeof(*pd
), GFP_KERNEL
);
516 return ERR_PTR(-ENOMEM
);
518 pd
->used_pdes
= kcalloc(BITS_TO_LONGS(I915_PDES
),
519 sizeof(*pd
->used_pdes
), GFP_KERNEL
);
523 ret
= setup_px(dev_priv
, pd
);
530 kfree(pd
->used_pdes
);
537 static void free_pd(struct drm_i915_private
*dev_priv
,
538 struct i915_page_directory
*pd
)
541 cleanup_px(dev_priv
, pd
);
542 kfree(pd
->used_pdes
);
547 static void gen8_initialize_pd(struct i915_address_space
*vm
,
548 struct i915_page_directory
*pd
)
550 gen8_pde_t scratch_pde
;
552 scratch_pde
= gen8_pde_encode(px_dma(vm
->scratch_pt
), I915_CACHE_LLC
);
554 fill_px(vm
->i915
, pd
, scratch_pde
);
557 static int __pdp_init(struct drm_i915_private
*dev_priv
,
558 struct i915_page_directory_pointer
*pdp
)
560 size_t pdpes
= I915_PDPES_PER_PDP(dev_priv
);
562 pdp
->used_pdpes
= kcalloc(BITS_TO_LONGS(pdpes
),
563 sizeof(unsigned long),
565 if (!pdp
->used_pdpes
)
568 pdp
->page_directory
= kcalloc(pdpes
, sizeof(*pdp
->page_directory
),
570 if (!pdp
->page_directory
) {
571 kfree(pdp
->used_pdpes
);
572 /* the PDP might be the statically allocated top level. Keep it
573 * as clean as possible */
574 pdp
->used_pdpes
= NULL
;
581 static void __pdp_fini(struct i915_page_directory_pointer
*pdp
)
583 kfree(pdp
->used_pdpes
);
584 kfree(pdp
->page_directory
);
585 pdp
->page_directory
= NULL
;
589 i915_page_directory_pointer
*alloc_pdp(struct drm_i915_private
*dev_priv
)
591 struct i915_page_directory_pointer
*pdp
;
594 WARN_ON(!USES_FULL_48BIT_PPGTT(dev_priv
));
596 pdp
= kzalloc(sizeof(*pdp
), GFP_KERNEL
);
598 return ERR_PTR(-ENOMEM
);
600 ret
= __pdp_init(dev_priv
, pdp
);
604 ret
= setup_px(dev_priv
, pdp
);
618 static void free_pdp(struct drm_i915_private
*dev_priv
,
619 struct i915_page_directory_pointer
*pdp
)
622 if (USES_FULL_48BIT_PPGTT(dev_priv
)) {
623 cleanup_px(dev_priv
, pdp
);
628 static void gen8_initialize_pdp(struct i915_address_space
*vm
,
629 struct i915_page_directory_pointer
*pdp
)
631 gen8_ppgtt_pdpe_t scratch_pdpe
;
633 scratch_pdpe
= gen8_pdpe_encode(px_dma(vm
->scratch_pd
), I915_CACHE_LLC
);
635 fill_px(vm
->i915
, pdp
, scratch_pdpe
);
638 static void gen8_initialize_pml4(struct i915_address_space
*vm
,
639 struct i915_pml4
*pml4
)
641 gen8_ppgtt_pml4e_t scratch_pml4e
;
643 scratch_pml4e
= gen8_pml4e_encode(px_dma(vm
->scratch_pdp
),
646 fill_px(vm
->i915
, pml4
, scratch_pml4e
);
650 gen8_setup_pdpe(struct i915_hw_ppgtt
*ppgtt
,
651 struct i915_page_directory_pointer
*pdp
,
652 struct i915_page_directory
*pd
,
655 gen8_ppgtt_pdpe_t
*page_directorypo
;
657 if (!USES_FULL_48BIT_PPGTT(to_i915(ppgtt
->base
.dev
)))
660 page_directorypo
= kmap_px(pdp
);
661 page_directorypo
[index
] = gen8_pdpe_encode(px_dma(pd
), I915_CACHE_LLC
);
662 kunmap_px(ppgtt
, page_directorypo
);
666 gen8_setup_pml4e(struct i915_hw_ppgtt
*ppgtt
,
667 struct i915_pml4
*pml4
,
668 struct i915_page_directory_pointer
*pdp
,
671 gen8_ppgtt_pml4e_t
*pagemap
= kmap_px(pml4
);
673 WARN_ON(!USES_FULL_48BIT_PPGTT(to_i915(ppgtt
->base
.dev
)));
674 pagemap
[index
] = gen8_pml4e_encode(px_dma(pdp
), I915_CACHE_LLC
);
675 kunmap_px(ppgtt
, pagemap
);
678 /* Broadwell Page Directory Pointer Descriptors */
679 static int gen8_write_pdp(struct drm_i915_gem_request
*req
,
683 struct intel_ring
*ring
= req
->ring
;
684 struct intel_engine_cs
*engine
= req
->engine
;
689 ret
= intel_ring_begin(req
, 6);
693 intel_ring_emit(ring
, MI_LOAD_REGISTER_IMM(1));
694 intel_ring_emit_reg(ring
, GEN8_RING_PDP_UDW(engine
, entry
));
695 intel_ring_emit(ring
, upper_32_bits(addr
));
696 intel_ring_emit(ring
, MI_LOAD_REGISTER_IMM(1));
697 intel_ring_emit_reg(ring
, GEN8_RING_PDP_LDW(engine
, entry
));
698 intel_ring_emit(ring
, lower_32_bits(addr
));
699 intel_ring_advance(ring
);
704 static int gen8_legacy_mm_switch(struct i915_hw_ppgtt
*ppgtt
,
705 struct drm_i915_gem_request
*req
)
709 for (i
= GEN8_LEGACY_PDPES
- 1; i
>= 0; i
--) {
710 const dma_addr_t pd_daddr
= i915_page_dir_dma_addr(ppgtt
, i
);
712 ret
= gen8_write_pdp(req
, i
, pd_daddr
);
720 static int gen8_48b_mm_switch(struct i915_hw_ppgtt
*ppgtt
,
721 struct drm_i915_gem_request
*req
)
723 return gen8_write_pdp(req
, 0, px_dma(&ppgtt
->pml4
));
726 /* PDE TLBs are a pain to invalidate on GEN8+. When we modify
727 * the page table structures, we mark them dirty so that
728 * context switching/execlist queuing code takes extra steps
729 * to ensure that tlbs are flushed.
731 static void mark_tlbs_dirty(struct i915_hw_ppgtt
*ppgtt
)
733 ppgtt
->pd_dirty_rings
= INTEL_INFO(ppgtt
->base
.i915
)->ring_mask
;
736 /* Removes entries from a single page table, releasing it if it's empty.
737 * Caller can use the return value to update higher-level entries.
739 static bool gen8_ppgtt_clear_pt(struct i915_address_space
*vm
,
740 struct i915_page_table
*pt
,
744 struct i915_hw_ppgtt
*ppgtt
= i915_vm_to_ppgtt(vm
);
745 unsigned int num_entries
= gen8_pte_count(start
, length
);
746 unsigned int pte
= gen8_pte_index(start
);
747 unsigned int pte_end
= pte
+ num_entries
;
748 gen8_pte_t
*pt_vaddr
;
749 gen8_pte_t scratch_pte
= gen8_pte_encode(vm
->scratch_page
.daddr
,
752 if (WARN_ON(!px_page(pt
)))
755 GEM_BUG_ON(pte_end
> GEN8_PTES
);
757 bitmap_clear(pt
->used_ptes
, pte
, num_entries
);
758 if (USES_FULL_PPGTT(vm
->i915
)) {
759 if (bitmap_empty(pt
->used_ptes
, GEN8_PTES
))
763 pt_vaddr
= kmap_px(pt
);
765 while (pte
< pte_end
)
766 pt_vaddr
[pte
++] = scratch_pte
;
768 kunmap_px(ppgtt
, pt_vaddr
);
773 /* Removes entries from a single page dir, releasing it if it's empty.
774 * Caller can use the return value to update higher-level entries
776 static bool gen8_ppgtt_clear_pd(struct i915_address_space
*vm
,
777 struct i915_page_directory
*pd
,
781 struct i915_hw_ppgtt
*ppgtt
= i915_vm_to_ppgtt(vm
);
782 struct i915_page_table
*pt
;
784 gen8_pde_t
*pde_vaddr
;
785 gen8_pde_t scratch_pde
= gen8_pde_encode(px_dma(vm
->scratch_pt
),
788 gen8_for_each_pde(pt
, pd
, start
, length
, pde
) {
789 if (WARN_ON(!pd
->page_table
[pde
]))
792 if (gen8_ppgtt_clear_pt(vm
, pt
, start
, length
)) {
793 __clear_bit(pde
, pd
->used_pdes
);
794 pde_vaddr
= kmap_px(pd
);
795 pde_vaddr
[pde
] = scratch_pde
;
796 kunmap_px(ppgtt
, pde_vaddr
);
797 free_pt(vm
->i915
, pt
);
801 if (bitmap_empty(pd
->used_pdes
, I915_PDES
))
807 /* Removes entries from a single page dir pointer, releasing it if it's empty.
808 * Caller can use the return value to update higher-level entries
810 static bool gen8_ppgtt_clear_pdp(struct i915_address_space
*vm
,
811 struct i915_page_directory_pointer
*pdp
,
815 struct i915_hw_ppgtt
*ppgtt
= i915_vm_to_ppgtt(vm
);
816 struct i915_page_directory
*pd
;
819 gen8_for_each_pdpe(pd
, pdp
, start
, length
, pdpe
) {
820 if (WARN_ON(!pdp
->page_directory
[pdpe
]))
823 if (gen8_ppgtt_clear_pd(vm
, pd
, start
, length
)) {
824 __clear_bit(pdpe
, pdp
->used_pdpes
);
825 gen8_setup_pdpe(ppgtt
, pdp
, vm
->scratch_pd
, pdpe
);
826 free_pd(vm
->i915
, pd
);
830 mark_tlbs_dirty(ppgtt
);
832 if (bitmap_empty(pdp
->used_pdpes
, I915_PDPES_PER_PDP(dev_priv
)))
838 /* Removes entries from a single pml4.
839 * This is the top-level structure in 4-level page tables used on gen8+.
840 * Empty entries are always scratch pml4e.
842 static void gen8_ppgtt_clear_pml4(struct i915_address_space
*vm
,
843 struct i915_pml4
*pml4
,
847 struct i915_hw_ppgtt
*ppgtt
= i915_vm_to_ppgtt(vm
);
848 struct i915_page_directory_pointer
*pdp
;
851 GEM_BUG_ON(!USES_FULL_48BIT_PPGTT(vm
->i915
));
853 gen8_for_each_pml4e(pdp
, pml4
, start
, length
, pml4e
) {
854 if (WARN_ON(!pml4
->pdps
[pml4e
]))
857 if (gen8_ppgtt_clear_pdp(vm
, pdp
, start
, length
)) {
858 __clear_bit(pml4e
, pml4
->used_pml4es
);
859 gen8_setup_pml4e(ppgtt
, pml4
, vm
->scratch_pdp
, pml4e
);
860 free_pdp(vm
->i915
, pdp
);
865 static void gen8_ppgtt_clear_range(struct i915_address_space
*vm
,
866 uint64_t start
, uint64_t length
)
868 struct i915_hw_ppgtt
*ppgtt
= i915_vm_to_ppgtt(vm
);
870 if (USES_FULL_48BIT_PPGTT(vm
->i915
))
871 gen8_ppgtt_clear_pml4(vm
, &ppgtt
->pml4
, start
, length
);
873 gen8_ppgtt_clear_pdp(vm
, &ppgtt
->pdp
, start
, length
);
877 gen8_ppgtt_insert_pte_entries(struct i915_address_space
*vm
,
878 struct i915_page_directory_pointer
*pdp
,
879 struct sg_page_iter
*sg_iter
,
881 enum i915_cache_level cache_level
)
883 struct i915_hw_ppgtt
*ppgtt
= i915_vm_to_ppgtt(vm
);
884 gen8_pte_t
*pt_vaddr
;
885 unsigned pdpe
= gen8_pdpe_index(start
);
886 unsigned pde
= gen8_pde_index(start
);
887 unsigned pte
= gen8_pte_index(start
);
891 while (__sg_page_iter_next(sg_iter
)) {
892 if (pt_vaddr
== NULL
) {
893 struct i915_page_directory
*pd
= pdp
->page_directory
[pdpe
];
894 struct i915_page_table
*pt
= pd
->page_table
[pde
];
895 pt_vaddr
= kmap_px(pt
);
899 gen8_pte_encode(sg_page_iter_dma_address(sg_iter
),
901 if (++pte
== GEN8_PTES
) {
902 kunmap_px(ppgtt
, pt_vaddr
);
904 if (++pde
== I915_PDES
) {
905 if (++pdpe
== I915_PDPES_PER_PDP(vm
->i915
))
914 kunmap_px(ppgtt
, pt_vaddr
);
917 static void gen8_ppgtt_insert_entries(struct i915_address_space
*vm
,
918 struct sg_table
*pages
,
920 enum i915_cache_level cache_level
,
923 struct i915_hw_ppgtt
*ppgtt
= i915_vm_to_ppgtt(vm
);
924 struct sg_page_iter sg_iter
;
926 __sg_page_iter_start(&sg_iter
, pages
->sgl
, sg_nents(pages
->sgl
), 0);
928 if (!USES_FULL_48BIT_PPGTT(vm
->i915
)) {
929 gen8_ppgtt_insert_pte_entries(vm
, &ppgtt
->pdp
, &sg_iter
, start
,
932 struct i915_page_directory_pointer
*pdp
;
934 uint64_t length
= (uint64_t)pages
->orig_nents
<< PAGE_SHIFT
;
936 gen8_for_each_pml4e(pdp
, &ppgtt
->pml4
, start
, length
, pml4e
) {
937 gen8_ppgtt_insert_pte_entries(vm
, pdp
, &sg_iter
,
943 static void gen8_free_page_tables(struct drm_i915_private
*dev_priv
,
944 struct i915_page_directory
*pd
)
951 for_each_set_bit(i
, pd
->used_pdes
, I915_PDES
) {
952 if (WARN_ON(!pd
->page_table
[i
]))
955 free_pt(dev_priv
, pd
->page_table
[i
]);
956 pd
->page_table
[i
] = NULL
;
960 static int gen8_init_scratch(struct i915_address_space
*vm
)
962 struct drm_i915_private
*dev_priv
= vm
->i915
;
965 ret
= setup_scratch_page(dev_priv
, &vm
->scratch_page
, I915_GFP_DMA
);
969 vm
->scratch_pt
= alloc_pt(dev_priv
);
970 if (IS_ERR(vm
->scratch_pt
)) {
971 ret
= PTR_ERR(vm
->scratch_pt
);
972 goto free_scratch_page
;
975 vm
->scratch_pd
= alloc_pd(dev_priv
);
976 if (IS_ERR(vm
->scratch_pd
)) {
977 ret
= PTR_ERR(vm
->scratch_pd
);
981 if (USES_FULL_48BIT_PPGTT(dev_priv
)) {
982 vm
->scratch_pdp
= alloc_pdp(dev_priv
);
983 if (IS_ERR(vm
->scratch_pdp
)) {
984 ret
= PTR_ERR(vm
->scratch_pdp
);
989 gen8_initialize_pt(vm
, vm
->scratch_pt
);
990 gen8_initialize_pd(vm
, vm
->scratch_pd
);
991 if (USES_FULL_48BIT_PPGTT(dev_priv
))
992 gen8_initialize_pdp(vm
, vm
->scratch_pdp
);
997 free_pd(dev_priv
, vm
->scratch_pd
);
999 free_pt(dev_priv
, vm
->scratch_pt
);
1001 cleanup_scratch_page(dev_priv
, &vm
->scratch_page
);
1006 static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt
*ppgtt
, bool create
)
1008 enum vgt_g2v_type msg
;
1009 struct drm_i915_private
*dev_priv
= ppgtt
->base
.i915
;
1012 if (USES_FULL_48BIT_PPGTT(dev_priv
)) {
1013 u64 daddr
= px_dma(&ppgtt
->pml4
);
1015 I915_WRITE(vgtif_reg(pdp
[0].lo
), lower_32_bits(daddr
));
1016 I915_WRITE(vgtif_reg(pdp
[0].hi
), upper_32_bits(daddr
));
1018 msg
= (create
? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE
:
1019 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY
);
1021 for (i
= 0; i
< GEN8_LEGACY_PDPES
; i
++) {
1022 u64 daddr
= i915_page_dir_dma_addr(ppgtt
, i
);
1024 I915_WRITE(vgtif_reg(pdp
[i
].lo
), lower_32_bits(daddr
));
1025 I915_WRITE(vgtif_reg(pdp
[i
].hi
), upper_32_bits(daddr
));
1028 msg
= (create
? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE
:
1029 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY
);
1032 I915_WRITE(vgtif_reg(g2v_notify
), msg
);
1037 static void gen8_free_scratch(struct i915_address_space
*vm
)
1039 struct drm_i915_private
*dev_priv
= vm
->i915
;
1041 if (USES_FULL_48BIT_PPGTT(dev_priv
))
1042 free_pdp(dev_priv
, vm
->scratch_pdp
);
1043 free_pd(dev_priv
, vm
->scratch_pd
);
1044 free_pt(dev_priv
, vm
->scratch_pt
);
1045 cleanup_scratch_page(dev_priv
, &vm
->scratch_page
);
1048 static void gen8_ppgtt_cleanup_3lvl(struct drm_i915_private
*dev_priv
,
1049 struct i915_page_directory_pointer
*pdp
)
1053 for_each_set_bit(i
, pdp
->used_pdpes
, I915_PDPES_PER_PDP(dev_priv
)) {
1054 if (WARN_ON(!pdp
->page_directory
[i
]))
1057 gen8_free_page_tables(dev_priv
, pdp
->page_directory
[i
]);
1058 free_pd(dev_priv
, pdp
->page_directory
[i
]);
1061 free_pdp(dev_priv
, pdp
);
1064 static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt
*ppgtt
)
1066 struct drm_i915_private
*dev_priv
= ppgtt
->base
.i915
;
1069 for_each_set_bit(i
, ppgtt
->pml4
.used_pml4es
, GEN8_PML4ES_PER_PML4
) {
1070 if (WARN_ON(!ppgtt
->pml4
.pdps
[i
]))
1073 gen8_ppgtt_cleanup_3lvl(dev_priv
, ppgtt
->pml4
.pdps
[i
]);
1076 cleanup_px(dev_priv
, &ppgtt
->pml4
);
1079 static void gen8_ppgtt_cleanup(struct i915_address_space
*vm
)
1081 struct drm_i915_private
*dev_priv
= vm
->i915
;
1082 struct i915_hw_ppgtt
*ppgtt
= i915_vm_to_ppgtt(vm
);
1084 if (intel_vgpu_active(dev_priv
))
1085 gen8_ppgtt_notify_vgt(ppgtt
, false);
1087 if (!USES_FULL_48BIT_PPGTT(dev_priv
))
1088 gen8_ppgtt_cleanup_3lvl(dev_priv
, &ppgtt
->pdp
);
1090 gen8_ppgtt_cleanup_4lvl(ppgtt
);
1092 gen8_free_scratch(vm
);
1096 * gen8_ppgtt_alloc_pagetabs() - Allocate page tables for VA range.
1097 * @vm: Master vm structure.
1098 * @pd: Page directory for this address range.
1099 * @start: Starting virtual address to begin allocations.
1100 * @length: Size of the allocations.
1101 * @new_pts: Bitmap set by function with new allocations. Likely used by the
1102 * caller to free on error.
1104 * Allocate the required number of page tables. Extremely similar to
1105 * gen8_ppgtt_alloc_page_directories(). The main difference is here we are limited by
1106 * the page directory boundary (instead of the page directory pointer). That
1107 * boundary is 1GB virtual. Therefore, unlike gen8_ppgtt_alloc_page_directories(), it is
1108 * possible, and likely that the caller will need to use multiple calls of this
1109 * function to achieve the appropriate allocation.
1111 * Return: 0 if success; negative error code otherwise.
1113 static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space
*vm
,
1114 struct i915_page_directory
*pd
,
1117 unsigned long *new_pts
)
1119 struct drm_i915_private
*dev_priv
= vm
->i915
;
1120 struct i915_page_table
*pt
;
1123 gen8_for_each_pde(pt
, pd
, start
, length
, pde
) {
1124 /* Don't reallocate page tables */
1125 if (test_bit(pde
, pd
->used_pdes
)) {
1126 /* Scratch is never allocated this way */
1127 WARN_ON(pt
== vm
->scratch_pt
);
1131 pt
= alloc_pt(dev_priv
);
1135 gen8_initialize_pt(vm
, pt
);
1136 pd
->page_table
[pde
] = pt
;
1137 __set_bit(pde
, new_pts
);
1138 trace_i915_page_table_entry_alloc(vm
, pde
, start
, GEN8_PDE_SHIFT
);
1144 for_each_set_bit(pde
, new_pts
, I915_PDES
)
1145 free_pt(dev_priv
, pd
->page_table
[pde
]);
1151 * gen8_ppgtt_alloc_page_directories() - Allocate page directories for VA range.
1152 * @vm: Master vm structure.
1153 * @pdp: Page directory pointer for this address range.
1154 * @start: Starting virtual address to begin allocations.
1155 * @length: Size of the allocations.
1156 * @new_pds: Bitmap set by function with new allocations. Likely used by the
1157 * caller to free on error.
1159 * Allocate the required number of page directories starting at the pde index of
1160 * @start, and ending at the pde index @start + @length. This function will skip
1161 * over already allocated page directories within the range, and only allocate
1162 * new ones, setting the appropriate pointer within the pdp as well as the
1163 * correct position in the bitmap @new_pds.
1165 * The function will only allocate the pages within the range for a give page
1166 * directory pointer. In other words, if @start + @length straddles a virtually
1167 * addressed PDP boundary (512GB for 4k pages), there will be more allocations
1168 * required by the caller, This is not currently possible, and the BUG in the
1169 * code will prevent it.
1171 * Return: 0 if success; negative error code otherwise.
1174 gen8_ppgtt_alloc_page_directories(struct i915_address_space
*vm
,
1175 struct i915_page_directory_pointer
*pdp
,
1178 unsigned long *new_pds
)
1180 struct drm_i915_private
*dev_priv
= vm
->i915
;
1181 struct i915_page_directory
*pd
;
1183 uint32_t pdpes
= I915_PDPES_PER_PDP(dev_priv
);
1185 WARN_ON(!bitmap_empty(new_pds
, pdpes
));
1187 gen8_for_each_pdpe(pd
, pdp
, start
, length
, pdpe
) {
1188 if (test_bit(pdpe
, pdp
->used_pdpes
))
1191 pd
= alloc_pd(dev_priv
);
1195 gen8_initialize_pd(vm
, pd
);
1196 pdp
->page_directory
[pdpe
] = pd
;
1197 __set_bit(pdpe
, new_pds
);
1198 trace_i915_page_directory_entry_alloc(vm
, pdpe
, start
, GEN8_PDPE_SHIFT
);
1204 for_each_set_bit(pdpe
, new_pds
, pdpes
)
1205 free_pd(dev_priv
, pdp
->page_directory
[pdpe
]);
1211 * gen8_ppgtt_alloc_page_dirpointers() - Allocate pdps for VA range.
1212 * @vm: Master vm structure.
1213 * @pml4: Page map level 4 for this address range.
1214 * @start: Starting virtual address to begin allocations.
1215 * @length: Size of the allocations.
1216 * @new_pdps: Bitmap set by function with new allocations. Likely used by the
1217 * caller to free on error.
1219 * Allocate the required number of page directory pointers. Extremely similar to
1220 * gen8_ppgtt_alloc_page_directories() and gen8_ppgtt_alloc_pagetabs().
1221 * The main difference is here we are limited by the pml4 boundary (instead of
1222 * the page directory pointer).
1224 * Return: 0 if success; negative error code otherwise.
1227 gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space
*vm
,
1228 struct i915_pml4
*pml4
,
1231 unsigned long *new_pdps
)
1233 struct drm_i915_private
*dev_priv
= vm
->i915
;
1234 struct i915_page_directory_pointer
*pdp
;
1237 WARN_ON(!bitmap_empty(new_pdps
, GEN8_PML4ES_PER_PML4
));
1239 gen8_for_each_pml4e(pdp
, pml4
, start
, length
, pml4e
) {
1240 if (!test_bit(pml4e
, pml4
->used_pml4es
)) {
1241 pdp
= alloc_pdp(dev_priv
);
1245 gen8_initialize_pdp(vm
, pdp
);
1246 pml4
->pdps
[pml4e
] = pdp
;
1247 __set_bit(pml4e
, new_pdps
);
1248 trace_i915_page_directory_pointer_entry_alloc(vm
,
1258 for_each_set_bit(pml4e
, new_pdps
, GEN8_PML4ES_PER_PML4
)
1259 free_pdp(dev_priv
, pml4
->pdps
[pml4e
]);
1265 free_gen8_temp_bitmaps(unsigned long *new_pds
, unsigned long *new_pts
)
1271 /* Fills in the page directory bitmap, and the array of page tables bitmap. Both
1272 * of these are based on the number of PDPEs in the system.
1275 int __must_check
alloc_gen8_temp_bitmaps(unsigned long **new_pds
,
1276 unsigned long **new_pts
,
1282 pds
= kcalloc(BITS_TO_LONGS(pdpes
), sizeof(unsigned long), GFP_TEMPORARY
);
1286 pts
= kcalloc(pdpes
, BITS_TO_LONGS(I915_PDES
) * sizeof(unsigned long),
1297 free_gen8_temp_bitmaps(pds
, pts
);
1301 static int gen8_alloc_va_range_3lvl(struct i915_address_space
*vm
,
1302 struct i915_page_directory_pointer
*pdp
,
1306 struct i915_hw_ppgtt
*ppgtt
= i915_vm_to_ppgtt(vm
);
1307 unsigned long *new_page_dirs
, *new_page_tables
;
1308 struct drm_i915_private
*dev_priv
= vm
->i915
;
1309 struct i915_page_directory
*pd
;
1310 const uint64_t orig_start
= start
;
1311 const uint64_t orig_length
= length
;
1313 uint32_t pdpes
= I915_PDPES_PER_PDP(dev_priv
);
1316 ret
= alloc_gen8_temp_bitmaps(&new_page_dirs
, &new_page_tables
, pdpes
);
1320 /* Do the allocations first so we can easily bail out */
1321 ret
= gen8_ppgtt_alloc_page_directories(vm
, pdp
, start
, length
,
1324 free_gen8_temp_bitmaps(new_page_dirs
, new_page_tables
);
1328 /* For every page directory referenced, allocate page tables */
1329 gen8_for_each_pdpe(pd
, pdp
, start
, length
, pdpe
) {
1330 ret
= gen8_ppgtt_alloc_pagetabs(vm
, pd
, start
, length
,
1331 new_page_tables
+ pdpe
* BITS_TO_LONGS(I915_PDES
));
1337 length
= orig_length
;
1339 /* Allocations have completed successfully, so set the bitmaps, and do
1341 gen8_for_each_pdpe(pd
, pdp
, start
, length
, pdpe
) {
1342 gen8_pde_t
*const page_directory
= kmap_px(pd
);
1343 struct i915_page_table
*pt
;
1344 uint64_t pd_len
= length
;
1345 uint64_t pd_start
= start
;
1348 /* Every pd should be allocated, we just did that above. */
1351 gen8_for_each_pde(pt
, pd
, pd_start
, pd_len
, pde
) {
1352 /* Same reasoning as pd */
1355 WARN_ON(!gen8_pte_count(pd_start
, pd_len
));
1357 /* Set our used ptes within the page table */
1358 bitmap_set(pt
->used_ptes
,
1359 gen8_pte_index(pd_start
),
1360 gen8_pte_count(pd_start
, pd_len
));
1362 /* Our pde is now pointing to the pagetable, pt */
1363 __set_bit(pde
, pd
->used_pdes
);
1365 /* Map the PDE to the page table */
1366 page_directory
[pde
] = gen8_pde_encode(px_dma(pt
),
1368 trace_i915_page_table_entry_map(&ppgtt
->base
, pde
, pt
,
1369 gen8_pte_index(start
),
1370 gen8_pte_count(start
, length
),
1373 /* NB: We haven't yet mapped ptes to pages. At this
1374 * point we're still relying on insert_entries() */
1377 kunmap_px(ppgtt
, page_directory
);
1378 __set_bit(pdpe
, pdp
->used_pdpes
);
1379 gen8_setup_pdpe(ppgtt
, pdp
, pd
, pdpe
);
1382 free_gen8_temp_bitmaps(new_page_dirs
, new_page_tables
);
1383 mark_tlbs_dirty(ppgtt
);
1390 for_each_set_bit(temp
, new_page_tables
+ pdpe
*
1391 BITS_TO_LONGS(I915_PDES
), I915_PDES
)
1393 pdp
->page_directory
[pdpe
]->page_table
[temp
]);
1396 for_each_set_bit(pdpe
, new_page_dirs
, pdpes
)
1397 free_pd(dev_priv
, pdp
->page_directory
[pdpe
]);
1399 free_gen8_temp_bitmaps(new_page_dirs
, new_page_tables
);
1400 mark_tlbs_dirty(ppgtt
);
1404 static int gen8_alloc_va_range_4lvl(struct i915_address_space
*vm
,
1405 struct i915_pml4
*pml4
,
1409 DECLARE_BITMAP(new_pdps
, GEN8_PML4ES_PER_PML4
);
1410 struct i915_hw_ppgtt
*ppgtt
= i915_vm_to_ppgtt(vm
);
1411 struct i915_page_directory_pointer
*pdp
;
1415 /* Do the pml4 allocations first, so we don't need to track the newly
1416 * allocated tables below the pdp */
1417 bitmap_zero(new_pdps
, GEN8_PML4ES_PER_PML4
);
1419 /* The pagedirectory and pagetable allocations are done in the shared 3
1420 * and 4 level code. Just allocate the pdps.
1422 ret
= gen8_ppgtt_alloc_page_dirpointers(vm
, pml4
, start
, length
,
1427 WARN(bitmap_weight(new_pdps
, GEN8_PML4ES_PER_PML4
) > 2,
1428 "The allocation has spanned more than 512GB. "
1429 "It is highly likely this is incorrect.");
1431 gen8_for_each_pml4e(pdp
, pml4
, start
, length
, pml4e
) {
1434 ret
= gen8_alloc_va_range_3lvl(vm
, pdp
, start
, length
);
1438 gen8_setup_pml4e(ppgtt
, pml4
, pdp
, pml4e
);
1441 bitmap_or(pml4
->used_pml4es
, new_pdps
, pml4
->used_pml4es
,
1442 GEN8_PML4ES_PER_PML4
);
1447 for_each_set_bit(pml4e
, new_pdps
, GEN8_PML4ES_PER_PML4
)
1448 gen8_ppgtt_cleanup_3lvl(vm
->i915
, pml4
->pdps
[pml4e
]);
1453 static int gen8_alloc_va_range(struct i915_address_space
*vm
,
1454 uint64_t start
, uint64_t length
)
1456 struct i915_hw_ppgtt
*ppgtt
= i915_vm_to_ppgtt(vm
);
1458 if (USES_FULL_48BIT_PPGTT(vm
->i915
))
1459 return gen8_alloc_va_range_4lvl(vm
, &ppgtt
->pml4
, start
, length
);
1461 return gen8_alloc_va_range_3lvl(vm
, &ppgtt
->pdp
, start
, length
);
1464 static void gen8_dump_pdp(struct i915_page_directory_pointer
*pdp
,
1465 uint64_t start
, uint64_t length
,
1466 gen8_pte_t scratch_pte
,
1469 struct i915_page_directory
*pd
;
1472 gen8_for_each_pdpe(pd
, pdp
, start
, length
, pdpe
) {
1473 struct i915_page_table
*pt
;
1474 uint64_t pd_len
= length
;
1475 uint64_t pd_start
= start
;
1478 if (!test_bit(pdpe
, pdp
->used_pdpes
))
1481 seq_printf(m
, "\tPDPE #%d\n", pdpe
);
1482 gen8_for_each_pde(pt
, pd
, pd_start
, pd_len
, pde
) {
1484 gen8_pte_t
*pt_vaddr
;
1486 if (!test_bit(pde
, pd
->used_pdes
))
1489 pt_vaddr
= kmap_px(pt
);
1490 for (pte
= 0; pte
< GEN8_PTES
; pte
+= 4) {
1492 (pdpe
<< GEN8_PDPE_SHIFT
) |
1493 (pde
<< GEN8_PDE_SHIFT
) |
1494 (pte
<< GEN8_PTE_SHIFT
);
1498 for (i
= 0; i
< 4; i
++)
1499 if (pt_vaddr
[pte
+ i
] != scratch_pte
)
1504 seq_printf(m
, "\t\t0x%llx [%03d,%03d,%04d]: =", va
, pdpe
, pde
, pte
);
1505 for (i
= 0; i
< 4; i
++) {
1506 if (pt_vaddr
[pte
+ i
] != scratch_pte
)
1507 seq_printf(m
, " %llx", pt_vaddr
[pte
+ i
]);
1509 seq_puts(m
, " SCRATCH ");
1513 /* don't use kunmap_px, it could trigger
1514 * an unnecessary flush.
1516 kunmap_atomic(pt_vaddr
);
1521 static void gen8_dump_ppgtt(struct i915_hw_ppgtt
*ppgtt
, struct seq_file
*m
)
1523 struct i915_address_space
*vm
= &ppgtt
->base
;
1524 uint64_t start
= ppgtt
->base
.start
;
1525 uint64_t length
= ppgtt
->base
.total
;
1526 gen8_pte_t scratch_pte
= gen8_pte_encode(vm
->scratch_page
.daddr
,
1529 if (!USES_FULL_48BIT_PPGTT(vm
->i915
)) {
1530 gen8_dump_pdp(&ppgtt
->pdp
, start
, length
, scratch_pte
, m
);
1533 struct i915_pml4
*pml4
= &ppgtt
->pml4
;
1534 struct i915_page_directory_pointer
*pdp
;
1536 gen8_for_each_pml4e(pdp
, pml4
, start
, length
, pml4e
) {
1537 if (!test_bit(pml4e
, pml4
->used_pml4es
))
1540 seq_printf(m
, " PML4E #%llu\n", pml4e
);
1541 gen8_dump_pdp(pdp
, start
, length
, scratch_pte
, m
);
1546 static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt
*ppgtt
)
1548 unsigned long *new_page_dirs
, *new_page_tables
;
1549 uint32_t pdpes
= I915_PDPES_PER_PDP(to_i915(ppgtt
->base
.dev
));
1552 /* We allocate temp bitmap for page tables for no gain
1553 * but as this is for init only, lets keep the things simple
1555 ret
= alloc_gen8_temp_bitmaps(&new_page_dirs
, &new_page_tables
, pdpes
);
1559 /* Allocate for all pdps regardless of how the ppgtt
1562 ret
= gen8_ppgtt_alloc_page_directories(&ppgtt
->base
, &ppgtt
->pdp
,
1566 *ppgtt
->pdp
.used_pdpes
= *new_page_dirs
;
1568 free_gen8_temp_bitmaps(new_page_dirs
, new_page_tables
);
1574 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
1575 * with a net effect resembling a 2-level page table in normal x86 terms. Each
1576 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
1580 static int gen8_ppgtt_init(struct i915_hw_ppgtt
*ppgtt
)
1582 struct drm_i915_private
*dev_priv
= ppgtt
->base
.i915
;
1585 ret
= gen8_init_scratch(&ppgtt
->base
);
1589 ppgtt
->base
.start
= 0;
1590 ppgtt
->base
.cleanup
= gen8_ppgtt_cleanup
;
1591 ppgtt
->base
.allocate_va_range
= gen8_alloc_va_range
;
1592 ppgtt
->base
.insert_entries
= gen8_ppgtt_insert_entries
;
1593 ppgtt
->base
.clear_range
= gen8_ppgtt_clear_range
;
1594 ppgtt
->base
.unbind_vma
= ppgtt_unbind_vma
;
1595 ppgtt
->base
.bind_vma
= ppgtt_bind_vma
;
1596 ppgtt
->debug_dump
= gen8_dump_ppgtt
;
1598 if (USES_FULL_48BIT_PPGTT(dev_priv
)) {
1599 ret
= setup_px(dev_priv
, &ppgtt
->pml4
);
1603 gen8_initialize_pml4(&ppgtt
->base
, &ppgtt
->pml4
);
1605 ppgtt
->base
.total
= 1ULL << 48;
1606 ppgtt
->switch_mm
= gen8_48b_mm_switch
;
1608 ret
= __pdp_init(dev_priv
, &ppgtt
->pdp
);
1612 ppgtt
->base
.total
= 1ULL << 32;
1613 ppgtt
->switch_mm
= gen8_legacy_mm_switch
;
1614 trace_i915_page_directory_pointer_entry_alloc(&ppgtt
->base
,
1618 if (intel_vgpu_active(dev_priv
)) {
1619 ret
= gen8_preallocate_top_level_pdps(ppgtt
);
1625 if (intel_vgpu_active(dev_priv
))
1626 gen8_ppgtt_notify_vgt(ppgtt
, true);
1631 gen8_free_scratch(&ppgtt
->base
);
1635 static void gen6_dump_ppgtt(struct i915_hw_ppgtt
*ppgtt
, struct seq_file
*m
)
1637 struct i915_address_space
*vm
= &ppgtt
->base
;
1638 struct i915_page_table
*unused
;
1639 gen6_pte_t scratch_pte
;
1642 uint32_t start
= ppgtt
->base
.start
, length
= ppgtt
->base
.total
;
1644 scratch_pte
= vm
->pte_encode(vm
->scratch_page
.daddr
,
1647 gen6_for_each_pde(unused
, &ppgtt
->pd
, start
, length
, pde
) {
1649 gen6_pte_t
*pt_vaddr
;
1650 const dma_addr_t pt_addr
= px_dma(ppgtt
->pd
.page_table
[pde
]);
1651 pd_entry
= readl(ppgtt
->pd_addr
+ pde
);
1652 expected
= (GEN6_PDE_ADDR_ENCODE(pt_addr
) | GEN6_PDE_VALID
);
1654 if (pd_entry
!= expected
)
1655 seq_printf(m
, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
1659 seq_printf(m
, "\tPDE: %x\n", pd_entry
);
1661 pt_vaddr
= kmap_px(ppgtt
->pd
.page_table
[pde
]);
1663 for (pte
= 0; pte
< GEN6_PTES
; pte
+=4) {
1665 (pde
* PAGE_SIZE
* GEN6_PTES
) +
1669 for (i
= 0; i
< 4; i
++)
1670 if (pt_vaddr
[pte
+ i
] != scratch_pte
)
1675 seq_printf(m
, "\t\t0x%lx [%03d,%04d]: =", va
, pde
, pte
);
1676 for (i
= 0; i
< 4; i
++) {
1677 if (pt_vaddr
[pte
+ i
] != scratch_pte
)
1678 seq_printf(m
, " %08x", pt_vaddr
[pte
+ i
]);
1680 seq_puts(m
, " SCRATCH ");
1684 kunmap_px(ppgtt
, pt_vaddr
);
1688 /* Write pde (index) from the page directory @pd to the page table @pt */
1689 static void gen6_write_pde(struct i915_page_directory
*pd
,
1690 const int pde
, struct i915_page_table
*pt
)
1692 /* Caller needs to make sure the write completes if necessary */
1693 struct i915_hw_ppgtt
*ppgtt
=
1694 container_of(pd
, struct i915_hw_ppgtt
, pd
);
1697 pd_entry
= GEN6_PDE_ADDR_ENCODE(px_dma(pt
));
1698 pd_entry
|= GEN6_PDE_VALID
;
1700 writel(pd_entry
, ppgtt
->pd_addr
+ pde
);
1703 /* Write all the page tables found in the ppgtt structure to incrementing page
1705 static void gen6_write_page_range(struct drm_i915_private
*dev_priv
,
1706 struct i915_page_directory
*pd
,
1707 uint32_t start
, uint32_t length
)
1709 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
1710 struct i915_page_table
*pt
;
1713 gen6_for_each_pde(pt
, pd
, start
, length
, pde
)
1714 gen6_write_pde(pd
, pde
, pt
);
1716 /* Make sure write is complete before other code can use this page
1717 * table. Also require for WC mapped PTEs */
1721 static uint32_t get_pd_offset(struct i915_hw_ppgtt
*ppgtt
)
1723 BUG_ON(ppgtt
->pd
.base
.ggtt_offset
& 0x3f);
1725 return (ppgtt
->pd
.base
.ggtt_offset
/ 64) << 16;
1728 static int hsw_mm_switch(struct i915_hw_ppgtt
*ppgtt
,
1729 struct drm_i915_gem_request
*req
)
1731 struct intel_ring
*ring
= req
->ring
;
1732 struct intel_engine_cs
*engine
= req
->engine
;
1735 /* NB: TLBs must be flushed and invalidated before a switch */
1736 ret
= engine
->emit_flush(req
, EMIT_INVALIDATE
| EMIT_FLUSH
);
1740 ret
= intel_ring_begin(req
, 6);
1744 intel_ring_emit(ring
, MI_LOAD_REGISTER_IMM(2));
1745 intel_ring_emit_reg(ring
, RING_PP_DIR_DCLV(engine
));
1746 intel_ring_emit(ring
, PP_DIR_DCLV_2G
);
1747 intel_ring_emit_reg(ring
, RING_PP_DIR_BASE(engine
));
1748 intel_ring_emit(ring
, get_pd_offset(ppgtt
));
1749 intel_ring_emit(ring
, MI_NOOP
);
1750 intel_ring_advance(ring
);
1755 static int gen7_mm_switch(struct i915_hw_ppgtt
*ppgtt
,
1756 struct drm_i915_gem_request
*req
)
1758 struct intel_ring
*ring
= req
->ring
;
1759 struct intel_engine_cs
*engine
= req
->engine
;
1762 /* NB: TLBs must be flushed and invalidated before a switch */
1763 ret
= engine
->emit_flush(req
, EMIT_INVALIDATE
| EMIT_FLUSH
);
1767 ret
= intel_ring_begin(req
, 6);
1771 intel_ring_emit(ring
, MI_LOAD_REGISTER_IMM(2));
1772 intel_ring_emit_reg(ring
, RING_PP_DIR_DCLV(engine
));
1773 intel_ring_emit(ring
, PP_DIR_DCLV_2G
);
1774 intel_ring_emit_reg(ring
, RING_PP_DIR_BASE(engine
));
1775 intel_ring_emit(ring
, get_pd_offset(ppgtt
));
1776 intel_ring_emit(ring
, MI_NOOP
);
1777 intel_ring_advance(ring
);
1779 /* XXX: RCS is the only one to auto invalidate the TLBs? */
1780 if (engine
->id
!= RCS
) {
1781 ret
= engine
->emit_flush(req
, EMIT_INVALIDATE
| EMIT_FLUSH
);
1789 static int gen6_mm_switch(struct i915_hw_ppgtt
*ppgtt
,
1790 struct drm_i915_gem_request
*req
)
1792 struct intel_engine_cs
*engine
= req
->engine
;
1793 struct drm_i915_private
*dev_priv
= req
->i915
;
1795 I915_WRITE(RING_PP_DIR_DCLV(engine
), PP_DIR_DCLV_2G
);
1796 I915_WRITE(RING_PP_DIR_BASE(engine
), get_pd_offset(ppgtt
));
1800 static void gen8_ppgtt_enable(struct drm_i915_private
*dev_priv
)
1802 struct intel_engine_cs
*engine
;
1803 enum intel_engine_id id
;
1805 for_each_engine(engine
, dev_priv
, id
) {
1806 u32 four_level
= USES_FULL_48BIT_PPGTT(dev_priv
) ?
1807 GEN8_GFX_PPGTT_48B
: 0;
1808 I915_WRITE(RING_MODE_GEN7(engine
),
1809 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE
| four_level
));
1813 static void gen7_ppgtt_enable(struct drm_i915_private
*dev_priv
)
1815 struct intel_engine_cs
*engine
;
1816 uint32_t ecochk
, ecobits
;
1817 enum intel_engine_id id
;
1819 ecobits
= I915_READ(GAC_ECO_BITS
);
1820 I915_WRITE(GAC_ECO_BITS
, ecobits
| ECOBITS_PPGTT_CACHE64B
);
1822 ecochk
= I915_READ(GAM_ECOCHK
);
1823 if (IS_HASWELL(dev_priv
)) {
1824 ecochk
|= ECOCHK_PPGTT_WB_HSW
;
1826 ecochk
|= ECOCHK_PPGTT_LLC_IVB
;
1827 ecochk
&= ~ECOCHK_PPGTT_GFDT_IVB
;
1829 I915_WRITE(GAM_ECOCHK
, ecochk
);
1831 for_each_engine(engine
, dev_priv
, id
) {
1832 /* GFX_MODE is per-ring on gen7+ */
1833 I915_WRITE(RING_MODE_GEN7(engine
),
1834 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE
));
1838 static void gen6_ppgtt_enable(struct drm_i915_private
*dev_priv
)
1840 uint32_t ecochk
, gab_ctl
, ecobits
;
1842 ecobits
= I915_READ(GAC_ECO_BITS
);
1843 I915_WRITE(GAC_ECO_BITS
, ecobits
| ECOBITS_SNB_BIT
|
1844 ECOBITS_PPGTT_CACHE64B
);
1846 gab_ctl
= I915_READ(GAB_CTL
);
1847 I915_WRITE(GAB_CTL
, gab_ctl
| GAB_CTL_CONT_AFTER_PAGEFAULT
);
1849 ecochk
= I915_READ(GAM_ECOCHK
);
1850 I915_WRITE(GAM_ECOCHK
, ecochk
| ECOCHK_SNB_BIT
| ECOCHK_PPGTT_CACHE64B
);
1852 I915_WRITE(GFX_MODE
, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE
));
1855 /* PPGTT support for Sandybdrige/Gen6 and later */
1856 static void gen6_ppgtt_clear_range(struct i915_address_space
*vm
,
1860 struct i915_hw_ppgtt
*ppgtt
= i915_vm_to_ppgtt(vm
);
1861 gen6_pte_t
*pt_vaddr
, scratch_pte
;
1862 unsigned first_entry
= start
>> PAGE_SHIFT
;
1863 unsigned num_entries
= length
>> PAGE_SHIFT
;
1864 unsigned act_pt
= first_entry
/ GEN6_PTES
;
1865 unsigned first_pte
= first_entry
% GEN6_PTES
;
1866 unsigned last_pte
, i
;
1868 scratch_pte
= vm
->pte_encode(vm
->scratch_page
.daddr
,
1871 while (num_entries
) {
1872 last_pte
= first_pte
+ num_entries
;
1873 if (last_pte
> GEN6_PTES
)
1874 last_pte
= GEN6_PTES
;
1876 pt_vaddr
= kmap_px(ppgtt
->pd
.page_table
[act_pt
]);
1878 for (i
= first_pte
; i
< last_pte
; i
++)
1879 pt_vaddr
[i
] = scratch_pte
;
1881 kunmap_px(ppgtt
, pt_vaddr
);
1883 num_entries
-= last_pte
- first_pte
;
1889 static void gen6_ppgtt_insert_entries(struct i915_address_space
*vm
,
1890 struct sg_table
*pages
,
1892 enum i915_cache_level cache_level
, u32 flags
)
1894 struct i915_hw_ppgtt
*ppgtt
= i915_vm_to_ppgtt(vm
);
1895 unsigned first_entry
= start
>> PAGE_SHIFT
;
1896 unsigned act_pt
= first_entry
/ GEN6_PTES
;
1897 unsigned act_pte
= first_entry
% GEN6_PTES
;
1898 gen6_pte_t
*pt_vaddr
= NULL
;
1899 struct sgt_iter sgt_iter
;
1902 for_each_sgt_dma(addr
, sgt_iter
, pages
) {
1903 if (pt_vaddr
== NULL
)
1904 pt_vaddr
= kmap_px(ppgtt
->pd
.page_table
[act_pt
]);
1907 vm
->pte_encode(addr
, cache_level
, flags
);
1909 if (++act_pte
== GEN6_PTES
) {
1910 kunmap_px(ppgtt
, pt_vaddr
);
1918 kunmap_px(ppgtt
, pt_vaddr
);
1921 static int gen6_alloc_va_range(struct i915_address_space
*vm
,
1922 uint64_t start_in
, uint64_t length_in
)
1924 DECLARE_BITMAP(new_page_tables
, I915_PDES
);
1925 struct drm_i915_private
*dev_priv
= vm
->i915
;
1926 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
1927 struct i915_hw_ppgtt
*ppgtt
= i915_vm_to_ppgtt(vm
);
1928 struct i915_page_table
*pt
;
1929 uint32_t start
, length
, start_save
, length_save
;
1933 start
= start_save
= start_in
;
1934 length
= length_save
= length_in
;
1936 bitmap_zero(new_page_tables
, I915_PDES
);
1938 /* The allocation is done in two stages so that we can bail out with
1939 * minimal amount of pain. The first stage finds new page tables that
1940 * need allocation. The second stage marks use ptes within the page
1943 gen6_for_each_pde(pt
, &ppgtt
->pd
, start
, length
, pde
) {
1944 if (pt
!= vm
->scratch_pt
) {
1945 WARN_ON(bitmap_empty(pt
->used_ptes
, GEN6_PTES
));
1949 /* We've already allocated a page table */
1950 WARN_ON(!bitmap_empty(pt
->used_ptes
, GEN6_PTES
));
1952 pt
= alloc_pt(dev_priv
);
1958 gen6_initialize_pt(vm
, pt
);
1960 ppgtt
->pd
.page_table
[pde
] = pt
;
1961 __set_bit(pde
, new_page_tables
);
1962 trace_i915_page_table_entry_alloc(vm
, pde
, start
, GEN6_PDE_SHIFT
);
1966 length
= length_save
;
1968 gen6_for_each_pde(pt
, &ppgtt
->pd
, start
, length
, pde
) {
1969 DECLARE_BITMAP(tmp_bitmap
, GEN6_PTES
);
1971 bitmap_zero(tmp_bitmap
, GEN6_PTES
);
1972 bitmap_set(tmp_bitmap
, gen6_pte_index(start
),
1973 gen6_pte_count(start
, length
));
1975 if (__test_and_clear_bit(pde
, new_page_tables
))
1976 gen6_write_pde(&ppgtt
->pd
, pde
, pt
);
1978 trace_i915_page_table_entry_map(vm
, pde
, pt
,
1979 gen6_pte_index(start
),
1980 gen6_pte_count(start
, length
),
1982 bitmap_or(pt
->used_ptes
, tmp_bitmap
, pt
->used_ptes
,
1986 WARN_ON(!bitmap_empty(new_page_tables
, I915_PDES
));
1988 /* Make sure write is complete before other code can use this page
1989 * table. Also require for WC mapped PTEs */
1992 mark_tlbs_dirty(ppgtt
);
1996 for_each_set_bit(pde
, new_page_tables
, I915_PDES
) {
1997 struct i915_page_table
*pt
= ppgtt
->pd
.page_table
[pde
];
1999 ppgtt
->pd
.page_table
[pde
] = vm
->scratch_pt
;
2000 free_pt(dev_priv
, pt
);
2003 mark_tlbs_dirty(ppgtt
);
2007 static int gen6_init_scratch(struct i915_address_space
*vm
)
2009 struct drm_i915_private
*dev_priv
= vm
->i915
;
2012 ret
= setup_scratch_page(dev_priv
, &vm
->scratch_page
, I915_GFP_DMA
);
2016 vm
->scratch_pt
= alloc_pt(dev_priv
);
2017 if (IS_ERR(vm
->scratch_pt
)) {
2018 cleanup_scratch_page(dev_priv
, &vm
->scratch_page
);
2019 return PTR_ERR(vm
->scratch_pt
);
2022 gen6_initialize_pt(vm
, vm
->scratch_pt
);
2027 static void gen6_free_scratch(struct i915_address_space
*vm
)
2029 struct drm_i915_private
*dev_priv
= vm
->i915
;
2031 free_pt(dev_priv
, vm
->scratch_pt
);
2032 cleanup_scratch_page(dev_priv
, &vm
->scratch_page
);
2035 static void gen6_ppgtt_cleanup(struct i915_address_space
*vm
)
2037 struct i915_hw_ppgtt
*ppgtt
= i915_vm_to_ppgtt(vm
);
2038 struct i915_page_directory
*pd
= &ppgtt
->pd
;
2039 struct drm_i915_private
*dev_priv
= vm
->i915
;
2040 struct i915_page_table
*pt
;
2043 drm_mm_remove_node(&ppgtt
->node
);
2045 gen6_for_all_pdes(pt
, pd
, pde
)
2046 if (pt
!= vm
->scratch_pt
)
2047 free_pt(dev_priv
, pt
);
2049 gen6_free_scratch(vm
);
2052 static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt
*ppgtt
)
2054 struct i915_address_space
*vm
= &ppgtt
->base
;
2055 struct drm_i915_private
*dev_priv
= ppgtt
->base
.i915
;
2056 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
2059 /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
2060 * allocator works in address space sizes, so it's multiplied by page
2061 * size. We allocate at the top of the GTT to avoid fragmentation.
2063 BUG_ON(!drm_mm_initialized(&ggtt
->base
.mm
));
2065 ret
= gen6_init_scratch(vm
);
2069 ret
= i915_gem_gtt_insert(&ggtt
->base
, &ppgtt
->node
,
2070 GEN6_PD_SIZE
, GEN6_PD_ALIGN
,
2071 I915_COLOR_UNEVICTABLE
,
2072 0, ggtt
->base
.total
,
2077 if (ppgtt
->node
.start
< ggtt
->mappable_end
)
2078 DRM_DEBUG("Forced to use aperture for PDEs\n");
2083 gen6_free_scratch(vm
);
2087 static int gen6_ppgtt_alloc(struct i915_hw_ppgtt
*ppgtt
)
2089 return gen6_ppgtt_allocate_page_directories(ppgtt
);
2092 static void gen6_scratch_va_range(struct i915_hw_ppgtt
*ppgtt
,
2093 uint64_t start
, uint64_t length
)
2095 struct i915_page_table
*unused
;
2098 gen6_for_each_pde(unused
, &ppgtt
->pd
, start
, length
, pde
)
2099 ppgtt
->pd
.page_table
[pde
] = ppgtt
->base
.scratch_pt
;
2102 static int gen6_ppgtt_init(struct i915_hw_ppgtt
*ppgtt
)
2104 struct drm_i915_private
*dev_priv
= ppgtt
->base
.i915
;
2105 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
2108 ppgtt
->base
.pte_encode
= ggtt
->base
.pte_encode
;
2109 if (intel_vgpu_active(dev_priv
) || IS_GEN6(dev_priv
))
2110 ppgtt
->switch_mm
= gen6_mm_switch
;
2111 else if (IS_HASWELL(dev_priv
))
2112 ppgtt
->switch_mm
= hsw_mm_switch
;
2113 else if (IS_GEN7(dev_priv
))
2114 ppgtt
->switch_mm
= gen7_mm_switch
;
2118 ret
= gen6_ppgtt_alloc(ppgtt
);
2122 ppgtt
->base
.allocate_va_range
= gen6_alloc_va_range
;
2123 ppgtt
->base
.clear_range
= gen6_ppgtt_clear_range
;
2124 ppgtt
->base
.insert_entries
= gen6_ppgtt_insert_entries
;
2125 ppgtt
->base
.unbind_vma
= ppgtt_unbind_vma
;
2126 ppgtt
->base
.bind_vma
= ppgtt_bind_vma
;
2127 ppgtt
->base
.cleanup
= gen6_ppgtt_cleanup
;
2128 ppgtt
->base
.start
= 0;
2129 ppgtt
->base
.total
= I915_PDES
* GEN6_PTES
* PAGE_SIZE
;
2130 ppgtt
->debug_dump
= gen6_dump_ppgtt
;
2132 ppgtt
->pd
.base
.ggtt_offset
=
2133 ppgtt
->node
.start
/ PAGE_SIZE
* sizeof(gen6_pte_t
);
2135 ppgtt
->pd_addr
= (gen6_pte_t __iomem
*)ggtt
->gsm
+
2136 ppgtt
->pd
.base
.ggtt_offset
/ sizeof(gen6_pte_t
);
2138 gen6_scratch_va_range(ppgtt
, 0, ppgtt
->base
.total
);
2140 gen6_write_page_range(dev_priv
, &ppgtt
->pd
, 0, ppgtt
->base
.total
);
2142 DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
2143 ppgtt
->node
.size
>> 20,
2144 ppgtt
->node
.start
/ PAGE_SIZE
);
2146 DRM_DEBUG("Adding PPGTT at offset %x\n",
2147 ppgtt
->pd
.base
.ggtt_offset
<< 10);
2152 static int __hw_ppgtt_init(struct i915_hw_ppgtt
*ppgtt
,
2153 struct drm_i915_private
*dev_priv
)
2155 ppgtt
->base
.i915
= dev_priv
;
2157 if (INTEL_INFO(dev_priv
)->gen
< 8)
2158 return gen6_ppgtt_init(ppgtt
);
2160 return gen8_ppgtt_init(ppgtt
);
2163 static void i915_address_space_init(struct i915_address_space
*vm
,
2164 struct drm_i915_private
*dev_priv
,
2167 i915_gem_timeline_init(dev_priv
, &vm
->timeline
, name
);
2168 drm_mm_init(&vm
->mm
, vm
->start
, vm
->total
);
2169 INIT_LIST_HEAD(&vm
->active_list
);
2170 INIT_LIST_HEAD(&vm
->inactive_list
);
2171 INIT_LIST_HEAD(&vm
->unbound_list
);
2172 list_add_tail(&vm
->global_link
, &dev_priv
->vm_list
);
2175 static void i915_address_space_fini(struct i915_address_space
*vm
)
2177 i915_gem_timeline_fini(&vm
->timeline
);
2178 drm_mm_takedown(&vm
->mm
);
2179 list_del(&vm
->global_link
);
2182 static void gtt_write_workarounds(struct drm_i915_private
*dev_priv
)
2184 /* This function is for gtt related workarounds. This function is
2185 * called on driver load and after a GPU reset, so you can place
2186 * workarounds here even if they get overwritten by GPU reset.
2188 /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt */
2189 if (IS_BROADWELL(dev_priv
))
2190 I915_WRITE(GEN8_L3_LRA_1_GPGPU
, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW
);
2191 else if (IS_CHERRYVIEW(dev_priv
))
2192 I915_WRITE(GEN8_L3_LRA_1_GPGPU
, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV
);
2193 else if (IS_SKYLAKE(dev_priv
))
2194 I915_WRITE(GEN8_L3_LRA_1_GPGPU
, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL
);
2195 else if (IS_BROXTON(dev_priv
))
2196 I915_WRITE(GEN8_L3_LRA_1_GPGPU
, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT
);
2199 static int i915_ppgtt_init(struct i915_hw_ppgtt
*ppgtt
,
2200 struct drm_i915_private
*dev_priv
,
2201 struct drm_i915_file_private
*file_priv
,
2206 ret
= __hw_ppgtt_init(ppgtt
, dev_priv
);
2208 kref_init(&ppgtt
->ref
);
2209 i915_address_space_init(&ppgtt
->base
, dev_priv
, name
);
2210 ppgtt
->base
.file
= file_priv
;
2216 int i915_ppgtt_init_hw(struct drm_i915_private
*dev_priv
)
2218 gtt_write_workarounds(dev_priv
);
2220 /* In the case of execlists, PPGTT is enabled by the context descriptor
2221 * and the PDPs are contained within the context itself. We don't
2222 * need to do anything here. */
2223 if (i915
.enable_execlists
)
2226 if (!USES_PPGTT(dev_priv
))
2229 if (IS_GEN6(dev_priv
))
2230 gen6_ppgtt_enable(dev_priv
);
2231 else if (IS_GEN7(dev_priv
))
2232 gen7_ppgtt_enable(dev_priv
);
2233 else if (INTEL_GEN(dev_priv
) >= 8)
2234 gen8_ppgtt_enable(dev_priv
);
2236 MISSING_CASE(INTEL_GEN(dev_priv
));
2241 struct i915_hw_ppgtt
*
2242 i915_ppgtt_create(struct drm_i915_private
*dev_priv
,
2243 struct drm_i915_file_private
*fpriv
,
2246 struct i915_hw_ppgtt
*ppgtt
;
2249 ppgtt
= kzalloc(sizeof(*ppgtt
), GFP_KERNEL
);
2251 return ERR_PTR(-ENOMEM
);
2253 ret
= i915_ppgtt_init(ppgtt
, dev_priv
, fpriv
, name
);
2256 return ERR_PTR(ret
);
2259 trace_i915_ppgtt_create(&ppgtt
->base
);
2264 void i915_ppgtt_close(struct i915_address_space
*vm
)
2266 struct list_head
*phases
[] = {
2273 GEM_BUG_ON(vm
->closed
);
2276 for (phase
= phases
; *phase
; phase
++) {
2277 struct i915_vma
*vma
, *vn
;
2279 list_for_each_entry_safe(vma
, vn
, *phase
, vm_link
)
2280 if (!i915_vma_is_closed(vma
))
2281 i915_vma_close(vma
);
2285 void i915_ppgtt_release(struct kref
*kref
)
2287 struct i915_hw_ppgtt
*ppgtt
=
2288 container_of(kref
, struct i915_hw_ppgtt
, ref
);
2290 trace_i915_ppgtt_release(&ppgtt
->base
);
2292 /* vmas should already be unbound and destroyed */
2293 WARN_ON(!list_empty(&ppgtt
->base
.active_list
));
2294 WARN_ON(!list_empty(&ppgtt
->base
.inactive_list
));
2295 WARN_ON(!list_empty(&ppgtt
->base
.unbound_list
));
2297 i915_address_space_fini(&ppgtt
->base
);
2299 ppgtt
->base
.cleanup(&ppgtt
->base
);
2303 /* Certain Gen5 chipsets require require idling the GPU before
2304 * unmapping anything from the GTT when VT-d is enabled.
2306 static bool needs_idle_maps(struct drm_i915_private
*dev_priv
)
2308 #ifdef CONFIG_INTEL_IOMMU
2309 /* Query intel_iommu to see if we need the workaround. Presumably that
2312 if (IS_GEN5(dev_priv
) && IS_MOBILE(dev_priv
) && intel_iommu_gfx_mapped
)
2318 void i915_check_and_clear_faults(struct drm_i915_private
*dev_priv
)
2320 struct intel_engine_cs
*engine
;
2321 enum intel_engine_id id
;
2323 if (INTEL_INFO(dev_priv
)->gen
< 6)
2326 for_each_engine(engine
, dev_priv
, id
) {
2328 fault_reg
= I915_READ(RING_FAULT_REG(engine
));
2329 if (fault_reg
& RING_FAULT_VALID
) {
2330 DRM_DEBUG_DRIVER("Unexpected fault\n"
2332 "\tAddress space: %s\n"
2335 fault_reg
& PAGE_MASK
,
2336 fault_reg
& RING_FAULT_GTTSEL_MASK
? "GGTT" : "PPGTT",
2337 RING_FAULT_SRCID(fault_reg
),
2338 RING_FAULT_FAULT_TYPE(fault_reg
));
2339 I915_WRITE(RING_FAULT_REG(engine
),
2340 fault_reg
& ~RING_FAULT_VALID
);
2344 /* Engine specific init may not have been done till this point. */
2345 if (dev_priv
->engine
[RCS
])
2346 POSTING_READ(RING_FAULT_REG(dev_priv
->engine
[RCS
]));
2349 void i915_gem_suspend_gtt_mappings(struct drm_i915_private
*dev_priv
)
2351 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
2353 /* Don't bother messing with faults pre GEN6 as we have little
2354 * documentation supporting that it's a good idea.
2356 if (INTEL_GEN(dev_priv
) < 6)
2359 i915_check_and_clear_faults(dev_priv
);
2361 ggtt
->base
.clear_range(&ggtt
->base
, ggtt
->base
.start
, ggtt
->base
.total
);
2363 i915_ggtt_invalidate(dev_priv
);
2366 int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object
*obj
,
2367 struct sg_table
*pages
)
2370 if (dma_map_sg(&obj
->base
.dev
->pdev
->dev
,
2371 pages
->sgl
, pages
->nents
,
2372 PCI_DMA_BIDIRECTIONAL
))
2375 /* If the DMA remap fails, one cause can be that we have
2376 * too many objects pinned in a small remapping table,
2377 * such as swiotlb. Incrementally purge all other objects and
2378 * try again - if there are no more pages to remove from
2379 * the DMA remapper, i915_gem_shrink will return 0.
2381 GEM_BUG_ON(obj
->mm
.pages
== pages
);
2382 } while (i915_gem_shrink(to_i915(obj
->base
.dev
),
2383 obj
->base
.size
>> PAGE_SHIFT
,
2385 I915_SHRINK_UNBOUND
|
2386 I915_SHRINK_ACTIVE
));
2391 static void gen8_set_pte(void __iomem
*addr
, gen8_pte_t pte
)
2396 static void gen8_ggtt_insert_page(struct i915_address_space
*vm
,
2399 enum i915_cache_level level
,
2402 struct i915_ggtt
*ggtt
= i915_vm_to_ggtt(vm
);
2403 gen8_pte_t __iomem
*pte
=
2404 (gen8_pte_t __iomem
*)ggtt
->gsm
+ (offset
>> PAGE_SHIFT
);
2406 gen8_set_pte(pte
, gen8_pte_encode(addr
, level
));
2408 ggtt
->invalidate(vm
->i915
);
2411 static void gen8_ggtt_insert_entries(struct i915_address_space
*vm
,
2412 struct sg_table
*st
,
2414 enum i915_cache_level level
, u32 unused
)
2416 struct i915_ggtt
*ggtt
= i915_vm_to_ggtt(vm
);
2417 struct sgt_iter sgt_iter
;
2418 gen8_pte_t __iomem
*gtt_entries
;
2419 gen8_pte_t gtt_entry
;
2423 gtt_entries
= (gen8_pte_t __iomem
*)ggtt
->gsm
+ (start
>> PAGE_SHIFT
);
2425 for_each_sgt_dma(addr
, sgt_iter
, st
) {
2426 gtt_entry
= gen8_pte_encode(addr
, level
);
2427 gen8_set_pte(>t_entries
[i
++], gtt_entry
);
2431 * XXX: This serves as a posting read to make sure that the PTE has
2432 * actually been updated. There is some concern that even though
2433 * registers and PTEs are within the same BAR that they are potentially
2434 * of NUMA access patterns. Therefore, even with the way we assume
2435 * hardware should work, we must keep this posting read for paranoia.
2438 WARN_ON(readq(>t_entries
[i
-1]) != gtt_entry
);
2440 /* This next bit makes the above posting read even more important. We
2441 * want to flush the TLBs only after we're certain all the PTE updates
2444 ggtt
->invalidate(vm
->i915
);
2447 struct insert_entries
{
2448 struct i915_address_space
*vm
;
2449 struct sg_table
*st
;
2451 enum i915_cache_level level
;
2455 static int gen8_ggtt_insert_entries__cb(void *_arg
)
2457 struct insert_entries
*arg
= _arg
;
2458 gen8_ggtt_insert_entries(arg
->vm
, arg
->st
,
2459 arg
->start
, arg
->level
, arg
->flags
);
2463 static void gen8_ggtt_insert_entries__BKL(struct i915_address_space
*vm
,
2464 struct sg_table
*st
,
2466 enum i915_cache_level level
,
2469 struct insert_entries arg
= { vm
, st
, start
, level
, flags
};
2470 stop_machine(gen8_ggtt_insert_entries__cb
, &arg
, NULL
);
2473 static void gen6_ggtt_insert_page(struct i915_address_space
*vm
,
2476 enum i915_cache_level level
,
2479 struct i915_ggtt
*ggtt
= i915_vm_to_ggtt(vm
);
2480 gen6_pte_t __iomem
*pte
=
2481 (gen6_pte_t __iomem
*)ggtt
->gsm
+ (offset
>> PAGE_SHIFT
);
2483 iowrite32(vm
->pte_encode(addr
, level
, flags
), pte
);
2485 ggtt
->invalidate(vm
->i915
);
2489 * Binds an object into the global gtt with the specified cache level. The object
2490 * will be accessible to the GPU via commands whose operands reference offsets
2491 * within the global GTT as well as accessible by the GPU through the GMADR
2492 * mapped BAR (dev_priv->mm.gtt->gtt).
2494 static void gen6_ggtt_insert_entries(struct i915_address_space
*vm
,
2495 struct sg_table
*st
,
2497 enum i915_cache_level level
, u32 flags
)
2499 struct i915_ggtt
*ggtt
= i915_vm_to_ggtt(vm
);
2500 struct sgt_iter sgt_iter
;
2501 gen6_pte_t __iomem
*gtt_entries
;
2502 gen6_pte_t gtt_entry
;
2506 gtt_entries
= (gen6_pte_t __iomem
*)ggtt
->gsm
+ (start
>> PAGE_SHIFT
);
2508 for_each_sgt_dma(addr
, sgt_iter
, st
) {
2509 gtt_entry
= vm
->pte_encode(addr
, level
, flags
);
2510 iowrite32(gtt_entry
, >t_entries
[i
++]);
2513 /* XXX: This serves as a posting read to make sure that the PTE has
2514 * actually been updated. There is some concern that even though
2515 * registers and PTEs are within the same BAR that they are potentially
2516 * of NUMA access patterns. Therefore, even with the way we assume
2517 * hardware should work, we must keep this posting read for paranoia.
2520 WARN_ON(readl(>t_entries
[i
-1]) != gtt_entry
);
2522 /* This next bit makes the above posting read even more important. We
2523 * want to flush the TLBs only after we're certain all the PTE updates
2526 ggtt
->invalidate(vm
->i915
);
2529 static void nop_clear_range(struct i915_address_space
*vm
,
2530 uint64_t start
, uint64_t length
)
2534 static void gen8_ggtt_clear_range(struct i915_address_space
*vm
,
2535 uint64_t start
, uint64_t length
)
2537 struct i915_ggtt
*ggtt
= i915_vm_to_ggtt(vm
);
2538 unsigned first_entry
= start
>> PAGE_SHIFT
;
2539 unsigned num_entries
= length
>> PAGE_SHIFT
;
2540 gen8_pte_t scratch_pte
, __iomem
*gtt_base
=
2541 (gen8_pte_t __iomem
*)ggtt
->gsm
+ first_entry
;
2542 const int max_entries
= ggtt_total_entries(ggtt
) - first_entry
;
2545 if (WARN(num_entries
> max_entries
,
2546 "First entry = %d; Num entries = %d (max=%d)\n",
2547 first_entry
, num_entries
, max_entries
))
2548 num_entries
= max_entries
;
2550 scratch_pte
= gen8_pte_encode(vm
->scratch_page
.daddr
,
2552 for (i
= 0; i
< num_entries
; i
++)
2553 gen8_set_pte(>t_base
[i
], scratch_pte
);
2557 static void gen6_ggtt_clear_range(struct i915_address_space
*vm
,
2561 struct i915_ggtt
*ggtt
= i915_vm_to_ggtt(vm
);
2562 unsigned first_entry
= start
>> PAGE_SHIFT
;
2563 unsigned num_entries
= length
>> PAGE_SHIFT
;
2564 gen6_pte_t scratch_pte
, __iomem
*gtt_base
=
2565 (gen6_pte_t __iomem
*)ggtt
->gsm
+ first_entry
;
2566 const int max_entries
= ggtt_total_entries(ggtt
) - first_entry
;
2569 if (WARN(num_entries
> max_entries
,
2570 "First entry = %d; Num entries = %d (max=%d)\n",
2571 first_entry
, num_entries
, max_entries
))
2572 num_entries
= max_entries
;
2574 scratch_pte
= vm
->pte_encode(vm
->scratch_page
.daddr
,
2577 for (i
= 0; i
< num_entries
; i
++)
2578 iowrite32(scratch_pte
, >t_base
[i
]);
2582 static void i915_ggtt_insert_page(struct i915_address_space
*vm
,
2585 enum i915_cache_level cache_level
,
2588 unsigned int flags
= (cache_level
== I915_CACHE_NONE
) ?
2589 AGP_USER_MEMORY
: AGP_USER_CACHED_MEMORY
;
2591 intel_gtt_insert_page(addr
, offset
>> PAGE_SHIFT
, flags
);
2594 static void i915_ggtt_insert_entries(struct i915_address_space
*vm
,
2595 struct sg_table
*pages
,
2597 enum i915_cache_level cache_level
, u32 unused
)
2599 unsigned int flags
= (cache_level
== I915_CACHE_NONE
) ?
2600 AGP_USER_MEMORY
: AGP_USER_CACHED_MEMORY
;
2602 intel_gtt_insert_sg_entries(pages
, start
>> PAGE_SHIFT
, flags
);
2606 static void i915_ggtt_clear_range(struct i915_address_space
*vm
,
2610 intel_gtt_clear_range(start
>> PAGE_SHIFT
, length
>> PAGE_SHIFT
);
2613 static int ggtt_bind_vma(struct i915_vma
*vma
,
2614 enum i915_cache_level cache_level
,
2617 struct drm_i915_private
*i915
= vma
->vm
->i915
;
2618 struct drm_i915_gem_object
*obj
= vma
->obj
;
2622 ret
= i915_get_ggtt_vma_pages(vma
);
2626 /* Currently applicable only to VLV */
2628 pte_flags
|= PTE_READ_ONLY
;
2630 intel_runtime_pm_get(i915
);
2631 vma
->vm
->insert_entries(vma
->vm
, vma
->pages
, vma
->node
.start
,
2632 cache_level
, pte_flags
);
2633 intel_runtime_pm_put(i915
);
2636 * Without aliasing PPGTT there's no difference between
2637 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
2638 * upgrade to both bound if we bind either to avoid double-binding.
2640 vma
->flags
|= I915_VMA_GLOBAL_BIND
| I915_VMA_LOCAL_BIND
;
2645 static int aliasing_gtt_bind_vma(struct i915_vma
*vma
,
2646 enum i915_cache_level cache_level
,
2649 struct drm_i915_private
*i915
= vma
->vm
->i915
;
2653 ret
= i915_get_ggtt_vma_pages(vma
);
2657 /* Currently applicable only to VLV */
2659 if (vma
->obj
->gt_ro
)
2660 pte_flags
|= PTE_READ_ONLY
;
2663 if (flags
& I915_VMA_GLOBAL_BIND
) {
2664 intel_runtime_pm_get(i915
);
2665 vma
->vm
->insert_entries(vma
->vm
,
2666 vma
->pages
, vma
->node
.start
,
2667 cache_level
, pte_flags
);
2668 intel_runtime_pm_put(i915
);
2671 if (flags
& I915_VMA_LOCAL_BIND
) {
2672 struct i915_hw_ppgtt
*appgtt
= i915
->mm
.aliasing_ppgtt
;
2673 appgtt
->base
.insert_entries(&appgtt
->base
,
2674 vma
->pages
, vma
->node
.start
,
2675 cache_level
, pte_flags
);
2681 static void ggtt_unbind_vma(struct i915_vma
*vma
)
2683 struct drm_i915_private
*i915
= vma
->vm
->i915
;
2684 struct i915_hw_ppgtt
*appgtt
= i915
->mm
.aliasing_ppgtt
;
2685 const u64 size
= min(vma
->size
, vma
->node
.size
);
2687 if (vma
->flags
& I915_VMA_GLOBAL_BIND
) {
2688 intel_runtime_pm_get(i915
);
2689 vma
->vm
->clear_range(vma
->vm
,
2690 vma
->node
.start
, size
);
2691 intel_runtime_pm_put(i915
);
2694 if (vma
->flags
& I915_VMA_LOCAL_BIND
&& appgtt
)
2695 appgtt
->base
.clear_range(&appgtt
->base
,
2696 vma
->node
.start
, size
);
2699 void i915_gem_gtt_finish_pages(struct drm_i915_gem_object
*obj
,
2700 struct sg_table
*pages
)
2702 struct drm_i915_private
*dev_priv
= to_i915(obj
->base
.dev
);
2703 struct device
*kdev
= &dev_priv
->drm
.pdev
->dev
;
2704 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
2706 if (unlikely(ggtt
->do_idle_maps
)) {
2707 if (i915_gem_wait_for_idle(dev_priv
, I915_WAIT_LOCKED
)) {
2708 DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
2709 /* Wait a bit, in hopes it avoids the hang */
2714 dma_unmap_sg(kdev
, pages
->sgl
, pages
->nents
, PCI_DMA_BIDIRECTIONAL
);
2717 static void i915_gtt_color_adjust(const struct drm_mm_node
*node
,
2718 unsigned long color
,
2722 if (node
->color
!= color
)
2723 *start
+= I915_GTT_PAGE_SIZE
;
2725 node
= list_next_entry(node
, node_list
);
2726 if (node
->allocated
&& node
->color
!= color
)
2727 *end
-= I915_GTT_PAGE_SIZE
;
2730 int i915_gem_init_ggtt(struct drm_i915_private
*dev_priv
)
2732 /* Let GEM Manage all of the aperture.
2734 * However, leave one page at the end still bound to the scratch page.
2735 * There are a number of places where the hardware apparently prefetches
2736 * past the end of the object, and we've seen multiple hangs with the
2737 * GPU head pointer stuck in a batchbuffer bound at the last page of the
2738 * aperture. One page should be enough to keep any prefetching inside
2741 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
2742 unsigned long hole_start
, hole_end
;
2743 struct i915_hw_ppgtt
*ppgtt
;
2744 struct drm_mm_node
*entry
;
2747 ret
= intel_vgt_balloon(dev_priv
);
2751 /* Reserve a mappable slot for our lockless error capture */
2752 ret
= drm_mm_insert_node_in_range(&ggtt
->base
.mm
, &ggtt
->error_capture
,
2753 PAGE_SIZE
, 0, I915_COLOR_UNEVICTABLE
,
2754 0, ggtt
->mappable_end
,
2759 /* Clear any non-preallocated blocks */
2760 drm_mm_for_each_hole(entry
, &ggtt
->base
.mm
, hole_start
, hole_end
) {
2761 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
2762 hole_start
, hole_end
);
2763 ggtt
->base
.clear_range(&ggtt
->base
, hole_start
,
2764 hole_end
- hole_start
);
2767 /* And finally clear the reserved guard page */
2768 ggtt
->base
.clear_range(&ggtt
->base
,
2769 ggtt
->base
.total
- PAGE_SIZE
, PAGE_SIZE
);
2771 if (USES_PPGTT(dev_priv
) && !USES_FULL_PPGTT(dev_priv
)) {
2772 ppgtt
= kzalloc(sizeof(*ppgtt
), GFP_KERNEL
);
2778 ret
= __hw_ppgtt_init(ppgtt
, dev_priv
);
2782 if (ppgtt
->base
.allocate_va_range
) {
2783 ret
= ppgtt
->base
.allocate_va_range(&ppgtt
->base
, 0,
2786 goto err_ppgtt_cleanup
;
2789 ppgtt
->base
.clear_range(&ppgtt
->base
,
2793 dev_priv
->mm
.aliasing_ppgtt
= ppgtt
;
2794 WARN_ON(ggtt
->base
.bind_vma
!= ggtt_bind_vma
);
2795 ggtt
->base
.bind_vma
= aliasing_gtt_bind_vma
;
2801 ppgtt
->base
.cleanup(&ppgtt
->base
);
2805 drm_mm_remove_node(&ggtt
->error_capture
);
2810 * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
2811 * @dev_priv: i915 device
2813 void i915_ggtt_cleanup_hw(struct drm_i915_private
*dev_priv
)
2815 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
2817 if (dev_priv
->mm
.aliasing_ppgtt
) {
2818 struct i915_hw_ppgtt
*ppgtt
= dev_priv
->mm
.aliasing_ppgtt
;
2819 ppgtt
->base
.cleanup(&ppgtt
->base
);
2823 i915_gem_cleanup_stolen(&dev_priv
->drm
);
2825 if (drm_mm_node_allocated(&ggtt
->error_capture
))
2826 drm_mm_remove_node(&ggtt
->error_capture
);
2828 if (drm_mm_initialized(&ggtt
->base
.mm
)) {
2829 intel_vgt_deballoon(dev_priv
);
2831 mutex_lock(&dev_priv
->drm
.struct_mutex
);
2832 i915_address_space_fini(&ggtt
->base
);
2833 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
2836 ggtt
->base
.cleanup(&ggtt
->base
);
2838 arch_phys_wc_del(ggtt
->mtrr
);
2839 io_mapping_fini(&ggtt
->mappable
);
2842 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl
)
2844 snb_gmch_ctl
>>= SNB_GMCH_GGMS_SHIFT
;
2845 snb_gmch_ctl
&= SNB_GMCH_GGMS_MASK
;
2846 return snb_gmch_ctl
<< 20;
2849 static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl
)
2851 bdw_gmch_ctl
>>= BDW_GMCH_GGMS_SHIFT
;
2852 bdw_gmch_ctl
&= BDW_GMCH_GGMS_MASK
;
2854 bdw_gmch_ctl
= 1 << bdw_gmch_ctl
;
2856 #ifdef CONFIG_X86_32
2857 /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
2858 if (bdw_gmch_ctl
> 4)
2862 return bdw_gmch_ctl
<< 20;
2865 static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl
)
2867 gmch_ctrl
>>= SNB_GMCH_GGMS_SHIFT
;
2868 gmch_ctrl
&= SNB_GMCH_GGMS_MASK
;
2871 return 1 << (20 + gmch_ctrl
);
2876 static size_t gen6_get_stolen_size(u16 snb_gmch_ctl
)
2878 snb_gmch_ctl
>>= SNB_GMCH_GMS_SHIFT
;
2879 snb_gmch_ctl
&= SNB_GMCH_GMS_MASK
;
2880 return snb_gmch_ctl
<< 25; /* 32 MB units */
2883 static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl
)
2885 bdw_gmch_ctl
>>= BDW_GMCH_GMS_SHIFT
;
2886 bdw_gmch_ctl
&= BDW_GMCH_GMS_MASK
;
2887 return bdw_gmch_ctl
<< 25; /* 32 MB units */
2890 static size_t chv_get_stolen_size(u16 gmch_ctrl
)
2892 gmch_ctrl
>>= SNB_GMCH_GMS_SHIFT
;
2893 gmch_ctrl
&= SNB_GMCH_GMS_MASK
;
2896 * 0x0 to 0x10: 32MB increments starting at 0MB
2897 * 0x11 to 0x16: 4MB increments starting at 8MB
2898 * 0x17 to 0x1d: 4MB increments start at 36MB
2900 if (gmch_ctrl
< 0x11)
2901 return gmch_ctrl
<< 25;
2902 else if (gmch_ctrl
< 0x17)
2903 return (gmch_ctrl
- 0x11 + 2) << 22;
2905 return (gmch_ctrl
- 0x17 + 9) << 22;
2908 static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl
)
2910 gen9_gmch_ctl
>>= BDW_GMCH_GMS_SHIFT
;
2911 gen9_gmch_ctl
&= BDW_GMCH_GMS_MASK
;
2913 if (gen9_gmch_ctl
< 0xf0)
2914 return gen9_gmch_ctl
<< 25; /* 32 MB units */
2916 /* 4MB increments starting at 0xf0 for 4MB */
2917 return (gen9_gmch_ctl
- 0xf0 + 1) << 22;
2920 static int ggtt_probe_common(struct i915_ggtt
*ggtt
, u64 size
)
2922 struct drm_i915_private
*dev_priv
= ggtt
->base
.i915
;
2923 struct pci_dev
*pdev
= dev_priv
->drm
.pdev
;
2924 phys_addr_t phys_addr
;
2927 /* For Modern GENs the PTEs and register space are split in the BAR */
2928 phys_addr
= pci_resource_start(pdev
, 0) + pci_resource_len(pdev
, 0) / 2;
2931 * On BXT writes larger than 64 bit to the GTT pagetable range will be
2932 * dropped. For WC mappings in general we have 64 byte burst writes
2933 * when the WC buffer is flushed, so we can't use it, but have to
2934 * resort to an uncached mapping. The WC issue is easily caught by the
2935 * readback check when writing GTT PTE entries.
2937 if (IS_GEN9_LP(dev_priv
))
2938 ggtt
->gsm
= ioremap_nocache(phys_addr
, size
);
2940 ggtt
->gsm
= ioremap_wc(phys_addr
, size
);
2942 DRM_ERROR("Failed to map the ggtt page table\n");
2946 ret
= setup_scratch_page(dev_priv
, &ggtt
->base
.scratch_page
, GFP_DMA32
);
2948 DRM_ERROR("Scratch setup failed\n");
2949 /* iounmap will also get called at remove, but meh */
2957 /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
2958 * bits. When using advanced contexts each context stores its own PAT, but
2959 * writing this data shouldn't be harmful even in those cases. */
2960 static void bdw_setup_private_ppat(struct drm_i915_private
*dev_priv
)
2964 pat
= GEN8_PPAT(0, GEN8_PPAT_WB
| GEN8_PPAT_LLC
) | /* for normal objects, no eLLC */
2965 GEN8_PPAT(1, GEN8_PPAT_WC
| GEN8_PPAT_LLCELLC
) | /* for something pointing to ptes? */
2966 GEN8_PPAT(2, GEN8_PPAT_WT
| GEN8_PPAT_LLCELLC
) | /* for scanout with eLLC */
2967 GEN8_PPAT(3, GEN8_PPAT_UC
) | /* Uncached objects, mostly for scanout */
2968 GEN8_PPAT(4, GEN8_PPAT_WB
| GEN8_PPAT_LLCELLC
| GEN8_PPAT_AGE(0)) |
2969 GEN8_PPAT(5, GEN8_PPAT_WB
| GEN8_PPAT_LLCELLC
| GEN8_PPAT_AGE(1)) |
2970 GEN8_PPAT(6, GEN8_PPAT_WB
| GEN8_PPAT_LLCELLC
| GEN8_PPAT_AGE(2)) |
2971 GEN8_PPAT(7, GEN8_PPAT_WB
| GEN8_PPAT_LLCELLC
| GEN8_PPAT_AGE(3));
2973 if (!USES_PPGTT(dev_priv
))
2974 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
2975 * so RTL will always use the value corresponding to
2977 * So let's disable cache for GGTT to avoid screen corruptions.
2978 * MOCS still can be used though.
2979 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
2980 * before this patch, i.e. the same uncached + snooping access
2981 * like on gen6/7 seems to be in effect.
2982 * - So this just fixes blitter/render access. Again it looks
2983 * like it's not just uncached access, but uncached + snooping.
2984 * So we can still hold onto all our assumptions wrt cpu
2985 * clflushing on LLC machines.
2987 pat
= GEN8_PPAT(0, GEN8_PPAT_UC
);
2989 /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
2990 * write would work. */
2991 I915_WRITE(GEN8_PRIVATE_PAT_LO
, pat
);
2992 I915_WRITE(GEN8_PRIVATE_PAT_HI
, pat
>> 32);
2995 static void chv_setup_private_ppat(struct drm_i915_private
*dev_priv
)
3000 * Map WB on BDW to snooped on CHV.
3002 * Only the snoop bit has meaning for CHV, the rest is
3005 * The hardware will never snoop for certain types of accesses:
3006 * - CPU GTT (GMADR->GGTT->no snoop->memory)
3007 * - PPGTT page tables
3008 * - some other special cycles
3010 * As with BDW, we also need to consider the following for GT accesses:
3011 * "For GGTT, there is NO pat_sel[2:0] from the entry,
3012 * so RTL will always use the value corresponding to
3014 * Which means we must set the snoop bit in PAT entry 0
3015 * in order to keep the global status page working.
3017 pat
= GEN8_PPAT(0, CHV_PPAT_SNOOP
) |
3021 GEN8_PPAT(4, CHV_PPAT_SNOOP
) |
3022 GEN8_PPAT(5, CHV_PPAT_SNOOP
) |
3023 GEN8_PPAT(6, CHV_PPAT_SNOOP
) |
3024 GEN8_PPAT(7, CHV_PPAT_SNOOP
);
3026 I915_WRITE(GEN8_PRIVATE_PAT_LO
, pat
);
3027 I915_WRITE(GEN8_PRIVATE_PAT_HI
, pat
>> 32);
3030 static void gen6_gmch_remove(struct i915_address_space
*vm
)
3032 struct i915_ggtt
*ggtt
= i915_vm_to_ggtt(vm
);
3035 cleanup_scratch_page(vm
->i915
, &vm
->scratch_page
);
3038 static int gen8_gmch_probe(struct i915_ggtt
*ggtt
)
3040 struct drm_i915_private
*dev_priv
= ggtt
->base
.i915
;
3041 struct pci_dev
*pdev
= dev_priv
->drm
.pdev
;
3045 /* TODO: We're not aware of mappable constraints on gen8 yet */
3046 ggtt
->mappable_base
= pci_resource_start(pdev
, 2);
3047 ggtt
->mappable_end
= pci_resource_len(pdev
, 2);
3049 if (!pci_set_dma_mask(pdev
, DMA_BIT_MASK(39)))
3050 pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(39));
3052 pci_read_config_word(pdev
, SNB_GMCH_CTRL
, &snb_gmch_ctl
);
3054 if (INTEL_GEN(dev_priv
) >= 9) {
3055 ggtt
->stolen_size
= gen9_get_stolen_size(snb_gmch_ctl
);
3056 size
= gen8_get_total_gtt_size(snb_gmch_ctl
);
3057 } else if (IS_CHERRYVIEW(dev_priv
)) {
3058 ggtt
->stolen_size
= chv_get_stolen_size(snb_gmch_ctl
);
3059 size
= chv_get_total_gtt_size(snb_gmch_ctl
);
3061 ggtt
->stolen_size
= gen8_get_stolen_size(snb_gmch_ctl
);
3062 size
= gen8_get_total_gtt_size(snb_gmch_ctl
);
3065 ggtt
->base
.total
= (size
/ sizeof(gen8_pte_t
)) << PAGE_SHIFT
;
3067 if (IS_CHERRYVIEW(dev_priv
) || IS_GEN9_LP(dev_priv
))
3068 chv_setup_private_ppat(dev_priv
);
3070 bdw_setup_private_ppat(dev_priv
);
3072 ggtt
->base
.cleanup
= gen6_gmch_remove
;
3073 ggtt
->base
.bind_vma
= ggtt_bind_vma
;
3074 ggtt
->base
.unbind_vma
= ggtt_unbind_vma
;
3075 ggtt
->base
.insert_page
= gen8_ggtt_insert_page
;
3076 ggtt
->base
.clear_range
= nop_clear_range
;
3077 if (!USES_FULL_PPGTT(dev_priv
) || intel_scanout_needs_vtd_wa(dev_priv
))
3078 ggtt
->base
.clear_range
= gen8_ggtt_clear_range
;
3080 ggtt
->base
.insert_entries
= gen8_ggtt_insert_entries
;
3081 if (IS_CHERRYVIEW(dev_priv
))
3082 ggtt
->base
.insert_entries
= gen8_ggtt_insert_entries__BKL
;
3084 ggtt
->invalidate
= gen6_ggtt_invalidate
;
3086 return ggtt_probe_common(ggtt
, size
);
3089 static int gen6_gmch_probe(struct i915_ggtt
*ggtt
)
3091 struct drm_i915_private
*dev_priv
= ggtt
->base
.i915
;
3092 struct pci_dev
*pdev
= dev_priv
->drm
.pdev
;
3096 ggtt
->mappable_base
= pci_resource_start(pdev
, 2);
3097 ggtt
->mappable_end
= pci_resource_len(pdev
, 2);
3099 /* 64/512MB is the current min/max we actually know of, but this is just
3100 * a coarse sanity check.
3102 if (ggtt
->mappable_end
< (64<<20) || ggtt
->mappable_end
> (512<<20)) {
3103 DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt
->mappable_end
);
3107 if (!pci_set_dma_mask(pdev
, DMA_BIT_MASK(40)))
3108 pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(40));
3109 pci_read_config_word(pdev
, SNB_GMCH_CTRL
, &snb_gmch_ctl
);
3111 ggtt
->stolen_size
= gen6_get_stolen_size(snb_gmch_ctl
);
3113 size
= gen6_get_total_gtt_size(snb_gmch_ctl
);
3114 ggtt
->base
.total
= (size
/ sizeof(gen6_pte_t
)) << PAGE_SHIFT
;
3116 ggtt
->base
.clear_range
= gen6_ggtt_clear_range
;
3117 ggtt
->base
.insert_page
= gen6_ggtt_insert_page
;
3118 ggtt
->base
.insert_entries
= gen6_ggtt_insert_entries
;
3119 ggtt
->base
.bind_vma
= ggtt_bind_vma
;
3120 ggtt
->base
.unbind_vma
= ggtt_unbind_vma
;
3121 ggtt
->base
.cleanup
= gen6_gmch_remove
;
3123 ggtt
->invalidate
= gen6_ggtt_invalidate
;
3125 if (HAS_EDRAM(dev_priv
))
3126 ggtt
->base
.pte_encode
= iris_pte_encode
;
3127 else if (IS_HASWELL(dev_priv
))
3128 ggtt
->base
.pte_encode
= hsw_pte_encode
;
3129 else if (IS_VALLEYVIEW(dev_priv
))
3130 ggtt
->base
.pte_encode
= byt_pte_encode
;
3131 else if (INTEL_GEN(dev_priv
) >= 7)
3132 ggtt
->base
.pte_encode
= ivb_pte_encode
;
3134 ggtt
->base
.pte_encode
= snb_pte_encode
;
3136 return ggtt_probe_common(ggtt
, size
);
3139 static void i915_gmch_remove(struct i915_address_space
*vm
)
3141 intel_gmch_remove();
3144 static int i915_gmch_probe(struct i915_ggtt
*ggtt
)
3146 struct drm_i915_private
*dev_priv
= ggtt
->base
.i915
;
3149 ret
= intel_gmch_probe(dev_priv
->bridge_dev
, dev_priv
->drm
.pdev
, NULL
);
3151 DRM_ERROR("failed to set up gmch\n");
3155 intel_gtt_get(&ggtt
->base
.total
,
3157 &ggtt
->mappable_base
,
3158 &ggtt
->mappable_end
);
3160 ggtt
->do_idle_maps
= needs_idle_maps(dev_priv
);
3161 ggtt
->base
.insert_page
= i915_ggtt_insert_page
;
3162 ggtt
->base
.insert_entries
= i915_ggtt_insert_entries
;
3163 ggtt
->base
.clear_range
= i915_ggtt_clear_range
;
3164 ggtt
->base
.bind_vma
= ggtt_bind_vma
;
3165 ggtt
->base
.unbind_vma
= ggtt_unbind_vma
;
3166 ggtt
->base
.cleanup
= i915_gmch_remove
;
3168 ggtt
->invalidate
= gmch_ggtt_invalidate
;
3170 if (unlikely(ggtt
->do_idle_maps
))
3171 DRM_INFO("applying Ironlake quirks for intel_iommu\n");
3177 * i915_ggtt_probe_hw - Probe GGTT hardware location
3178 * @dev_priv: i915 device
3180 int i915_ggtt_probe_hw(struct drm_i915_private
*dev_priv
)
3182 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
3185 ggtt
->base
.i915
= dev_priv
;
3187 if (INTEL_GEN(dev_priv
) <= 5)
3188 ret
= i915_gmch_probe(ggtt
);
3189 else if (INTEL_GEN(dev_priv
) < 8)
3190 ret
= gen6_gmch_probe(ggtt
);
3192 ret
= gen8_gmch_probe(ggtt
);
3196 /* Trim the GGTT to fit the GuC mappable upper range (when enabled).
3197 * This is easier than doing range restriction on the fly, as we
3198 * currently don't have any bits spare to pass in this upper
3201 if (HAS_GUC(dev_priv
) && i915
.enable_guc_loading
) {
3202 ggtt
->base
.total
= min_t(u64
, ggtt
->base
.total
, GUC_GGTT_TOP
);
3203 ggtt
->mappable_end
= min(ggtt
->mappable_end
, ggtt
->base
.total
);
3206 if ((ggtt
->base
.total
- 1) >> 32) {
3207 DRM_ERROR("We never expected a Global GTT with more than 32bits"
3208 " of address space! Found %lldM!\n",
3209 ggtt
->base
.total
>> 20);
3210 ggtt
->base
.total
= 1ULL << 32;
3211 ggtt
->mappable_end
= min(ggtt
->mappable_end
, ggtt
->base
.total
);
3214 if (ggtt
->mappable_end
> ggtt
->base
.total
) {
3215 DRM_ERROR("mappable aperture extends past end of GGTT,"
3216 " aperture=%llx, total=%llx\n",
3217 ggtt
->mappable_end
, ggtt
->base
.total
);
3218 ggtt
->mappable_end
= ggtt
->base
.total
;
3221 /* GMADR is the PCI mmio aperture into the global GTT. */
3222 DRM_INFO("Memory usable by graphics device = %lluM\n",
3223 ggtt
->base
.total
>> 20);
3224 DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt
->mappable_end
>> 20);
3225 DRM_DEBUG_DRIVER("GTT stolen size = %uM\n", ggtt
->stolen_size
>> 20);
3226 #ifdef CONFIG_INTEL_IOMMU
3227 if (intel_iommu_gfx_mapped
)
3228 DRM_INFO("VT-d active for gfx access\n");
3235 * i915_ggtt_init_hw - Initialize GGTT hardware
3236 * @dev_priv: i915 device
3238 int i915_ggtt_init_hw(struct drm_i915_private
*dev_priv
)
3240 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
3243 INIT_LIST_HEAD(&dev_priv
->vm_list
);
3245 /* Subtract the guard page before address space initialization to
3246 * shrink the range used by drm_mm.
3248 mutex_lock(&dev_priv
->drm
.struct_mutex
);
3249 ggtt
->base
.total
-= PAGE_SIZE
;
3250 i915_address_space_init(&ggtt
->base
, dev_priv
, "[global]");
3251 ggtt
->base
.total
+= PAGE_SIZE
;
3252 if (!HAS_LLC(dev_priv
))
3253 ggtt
->base
.mm
.color_adjust
= i915_gtt_color_adjust
;
3254 mutex_unlock(&dev_priv
->drm
.struct_mutex
);
3256 if (!io_mapping_init_wc(&dev_priv
->ggtt
.mappable
,
3257 dev_priv
->ggtt
.mappable_base
,
3258 dev_priv
->ggtt
.mappable_end
)) {
3260 goto out_gtt_cleanup
;
3263 ggtt
->mtrr
= arch_phys_wc_add(ggtt
->mappable_base
, ggtt
->mappable_end
);
3266 * Initialise stolen early so that we may reserve preallocated
3267 * objects for the BIOS to KMS transition.
3269 ret
= i915_gem_init_stolen(dev_priv
);
3271 goto out_gtt_cleanup
;
3276 ggtt
->base
.cleanup(&ggtt
->base
);
3280 int i915_ggtt_enable_hw(struct drm_i915_private
*dev_priv
)
3282 if (INTEL_GEN(dev_priv
) < 6 && !intel_enable_gtt())
3288 void i915_ggtt_enable_guc(struct drm_i915_private
*i915
)
3290 i915
->ggtt
.invalidate
= guc_ggtt_invalidate
;
3293 void i915_ggtt_disable_guc(struct drm_i915_private
*i915
)
3295 i915
->ggtt
.invalidate
= gen6_ggtt_invalidate
;
3298 void i915_gem_restore_gtt_mappings(struct drm_i915_private
*dev_priv
)
3300 struct i915_ggtt
*ggtt
= &dev_priv
->ggtt
;
3301 struct drm_i915_gem_object
*obj
, *on
;
3303 i915_check_and_clear_faults(dev_priv
);
3305 /* First fill our portion of the GTT with scratch pages */
3306 ggtt
->base
.clear_range(&ggtt
->base
, ggtt
->base
.start
, ggtt
->base
.total
);
3308 ggtt
->base
.closed
= true; /* skip rewriting PTE on VMA unbind */
3310 /* clflush objects bound into the GGTT and rebind them. */
3311 list_for_each_entry_safe(obj
, on
,
3312 &dev_priv
->mm
.bound_list
, global_link
) {
3313 bool ggtt_bound
= false;
3314 struct i915_vma
*vma
;
3316 list_for_each_entry(vma
, &obj
->vma_list
, obj_link
) {
3317 if (vma
->vm
!= &ggtt
->base
)
3320 if (!i915_vma_unbind(vma
))
3323 WARN_ON(i915_vma_bind(vma
, obj
->cache_level
,
3329 WARN_ON(i915_gem_object_set_to_gtt_domain(obj
, false));
3332 ggtt
->base
.closed
= false;
3334 if (INTEL_GEN(dev_priv
) >= 8) {
3335 if (IS_CHERRYVIEW(dev_priv
) || IS_GEN9_LP(dev_priv
))
3336 chv_setup_private_ppat(dev_priv
);
3338 bdw_setup_private_ppat(dev_priv
);
3343 if (USES_PPGTT(dev_priv
)) {
3344 struct i915_address_space
*vm
;
3346 list_for_each_entry(vm
, &dev_priv
->vm_list
, global_link
) {
3347 /* TODO: Perhaps it shouldn't be gen6 specific */
3349 struct i915_hw_ppgtt
*ppgtt
;
3351 if (i915_is_ggtt(vm
))
3352 ppgtt
= dev_priv
->mm
.aliasing_ppgtt
;
3354 ppgtt
= i915_vm_to_ppgtt(vm
);
3356 gen6_write_page_range(dev_priv
, &ppgtt
->pd
,
3357 0, ppgtt
->base
.total
);
3361 i915_ggtt_invalidate(dev_priv
);
3364 static struct scatterlist
*
3365 rotate_pages(const dma_addr_t
*in
, unsigned int offset
,
3366 unsigned int width
, unsigned int height
,
3367 unsigned int stride
,
3368 struct sg_table
*st
, struct scatterlist
*sg
)
3370 unsigned int column
, row
;
3371 unsigned int src_idx
;
3373 for (column
= 0; column
< width
; column
++) {
3374 src_idx
= stride
* (height
- 1) + column
;
3375 for (row
= 0; row
< height
; row
++) {
3377 /* We don't need the pages, but need to initialize
3378 * the entries so the sg list can be happily traversed.
3379 * The only thing we need are DMA addresses.
3381 sg_set_page(sg
, NULL
, PAGE_SIZE
, 0);
3382 sg_dma_address(sg
) = in
[offset
+ src_idx
];
3383 sg_dma_len(sg
) = PAGE_SIZE
;
3392 static struct sg_table
*
3393 intel_rotate_fb_obj_pages(const struct intel_rotation_info
*rot_info
,
3394 struct drm_i915_gem_object
*obj
)
3396 const size_t n_pages
= obj
->base
.size
/ PAGE_SIZE
;
3397 unsigned int size
= intel_rotation_info_size(rot_info
);
3398 struct sgt_iter sgt_iter
;
3399 dma_addr_t dma_addr
;
3401 dma_addr_t
*page_addr_list
;
3402 struct sg_table
*st
;
3403 struct scatterlist
*sg
;
3406 /* Allocate a temporary list of source pages for random access. */
3407 page_addr_list
= drm_malloc_gfp(n_pages
,
3410 if (!page_addr_list
)
3411 return ERR_PTR(ret
);
3413 /* Allocate target SG list. */
3414 st
= kmalloc(sizeof(*st
), GFP_KERNEL
);
3418 ret
= sg_alloc_table(st
, size
, GFP_KERNEL
);
3422 /* Populate source page list from the object. */
3424 for_each_sgt_dma(dma_addr
, sgt_iter
, obj
->mm
.pages
)
3425 page_addr_list
[i
++] = dma_addr
;
3427 GEM_BUG_ON(i
!= n_pages
);
3431 for (i
= 0 ; i
< ARRAY_SIZE(rot_info
->plane
); i
++) {
3432 sg
= rotate_pages(page_addr_list
, rot_info
->plane
[i
].offset
,
3433 rot_info
->plane
[i
].width
, rot_info
->plane
[i
].height
,
3434 rot_info
->plane
[i
].stride
, st
, sg
);
3437 DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n",
3438 obj
->base
.size
, rot_info
->plane
[0].width
, rot_info
->plane
[0].height
, size
);
3440 drm_free_large(page_addr_list
);
3447 drm_free_large(page_addr_list
);
3449 DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
3450 obj
->base
.size
, rot_info
->plane
[0].width
, rot_info
->plane
[0].height
, size
);
3452 return ERR_PTR(ret
);
3455 static struct sg_table
*
3456 intel_partial_pages(const struct i915_ggtt_view
*view
,
3457 struct drm_i915_gem_object
*obj
)
3459 struct sg_table
*st
;
3460 struct scatterlist
*sg
, *iter
;
3461 unsigned int count
= view
->partial
.size
;
3462 unsigned int offset
;
3465 st
= kmalloc(sizeof(*st
), GFP_KERNEL
);
3469 ret
= sg_alloc_table(st
, count
, GFP_KERNEL
);
3473 iter
= i915_gem_object_get_sg(obj
, view
->partial
.offset
, &offset
);
3481 len
= min(iter
->length
- (offset
<< PAGE_SHIFT
),
3482 count
<< PAGE_SHIFT
);
3483 sg_set_page(sg
, NULL
, len
, 0);
3484 sg_dma_address(sg
) =
3485 sg_dma_address(iter
) + (offset
<< PAGE_SHIFT
);
3486 sg_dma_len(sg
) = len
;
3489 count
-= len
>> PAGE_SHIFT
;
3496 iter
= __sg_next(iter
);
3503 return ERR_PTR(ret
);
3507 i915_get_ggtt_vma_pages(struct i915_vma
*vma
)
3511 /* The vma->pages are only valid within the lifespan of the borrowed
3512 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
3513 * must be the vma->pages. A simple rule is that vma->pages must only
3514 * be accessed when the obj->mm.pages are pinned.
3516 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma
->obj
));
3521 if (vma
->ggtt_view
.type
== I915_GGTT_VIEW_NORMAL
)
3522 vma
->pages
= vma
->obj
->mm
.pages
;
3523 else if (vma
->ggtt_view
.type
== I915_GGTT_VIEW_ROTATED
)
3525 intel_rotate_fb_obj_pages(&vma
->ggtt_view
.rotated
,
3527 else if (vma
->ggtt_view
.type
== I915_GGTT_VIEW_PARTIAL
)
3528 vma
->pages
= intel_partial_pages(&vma
->ggtt_view
, vma
->obj
);
3530 WARN_ONCE(1, "GGTT view %u not implemented!\n",
3531 vma
->ggtt_view
.type
);
3534 DRM_ERROR("Failed to get pages for GGTT view type %u!\n",
3535 vma
->ggtt_view
.type
);
3537 } else if (IS_ERR(vma
->pages
)) {
3538 ret
= PTR_ERR(vma
->pages
);
3540 DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
3541 vma
->ggtt_view
.type
, ret
);
3548 * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
3549 * @vm: the &struct i915_address_space
3550 * @node: the &struct drm_mm_node (typically i915_vma.mode)
3551 * @size: how much space to allocate inside the GTT,
3552 * must be #I915_GTT_PAGE_SIZE aligned
3553 * @offset: where to insert inside the GTT,
3554 * must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
3555 * (@offset + @size) must fit within the address space
3556 * @color: color to apply to node, if this node is not from a VMA,
3557 * color must be #I915_COLOR_UNEVICTABLE
3558 * @flags: control search and eviction behaviour
3560 * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
3561 * the address space (using @size and @color). If the @node does not fit, it
3562 * tries to evict any overlapping nodes from the GTT, including any
3563 * neighbouring nodes if the colors do not match (to ensure guard pages between
3564 * differing domains). See i915_gem_evict_for_node() for the gory details
3565 * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
3566 * evicting active overlapping objects, and any overlapping node that is pinned
3567 * or marked as unevictable will also result in failure.
3569 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3570 * asked to wait for eviction and interrupted.
3572 int i915_gem_gtt_reserve(struct i915_address_space
*vm
,
3573 struct drm_mm_node
*node
,
3574 u64 size
, u64 offset
, unsigned long color
,
3580 GEM_BUG_ON(!IS_ALIGNED(size
, I915_GTT_PAGE_SIZE
));
3581 GEM_BUG_ON(!IS_ALIGNED(offset
, I915_GTT_MIN_ALIGNMENT
));
3582 GEM_BUG_ON(range_overflows(offset
, size
, vm
->total
));
3583 GEM_BUG_ON(vm
== &vm
->i915
->mm
.aliasing_ppgtt
->base
);
3584 GEM_BUG_ON(drm_mm_node_allocated(node
));
3587 node
->start
= offset
;
3588 node
->color
= color
;
3590 err
= drm_mm_reserve_node(&vm
->mm
, node
);
3594 err
= i915_gem_evict_for_node(vm
, node
, flags
);
3596 err
= drm_mm_reserve_node(&vm
->mm
, node
);
3601 static u64
random_offset(u64 start
, u64 end
, u64 len
, u64 align
)
3605 GEM_BUG_ON(range_overflows(start
, len
, end
));
3606 GEM_BUG_ON(round_up(start
, align
) > round_down(end
- len
, align
));
3608 range
= round_down(end
- len
, align
) - round_up(start
, align
);
3610 if (sizeof(unsigned long) == sizeof(u64
)) {
3611 addr
= get_random_long();
3613 addr
= get_random_int();
3614 if (range
> U32_MAX
) {
3616 addr
|= get_random_int();
3619 div64_u64_rem(addr
, range
, &addr
);
3623 return round_up(start
, align
);
3627 * i915_gem_gtt_insert - insert a node into an address_space (GTT)
3628 * @vm: the &struct i915_address_space
3629 * @node: the &struct drm_mm_node (typically i915_vma.node)
3630 * @size: how much space to allocate inside the GTT,
3631 * must be #I915_GTT_PAGE_SIZE aligned
3632 * @alignment: required alignment of starting offset, may be 0 but
3633 * if specified, this must be a power-of-two and at least
3634 * #I915_GTT_MIN_ALIGNMENT
3635 * @color: color to apply to node
3636 * @start: start of any range restriction inside GTT (0 for all),
3637 * must be #I915_GTT_PAGE_SIZE aligned
3638 * @end: end of any range restriction inside GTT (U64_MAX for all),
3639 * must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
3640 * @flags: control search and eviction behaviour
3642 * i915_gem_gtt_insert() first searches for an available hole into which
3643 * is can insert the node. The hole address is aligned to @alignment and
3644 * its @size must then fit entirely within the [@start, @end] bounds. The
3645 * nodes on either side of the hole must match @color, or else a guard page
3646 * will be inserted between the two nodes (or the node evicted). If no
3647 * suitable hole is found, first a victim is randomly selected and tested
3648 * for eviction, otherwise then the LRU list of objects within the GTT
3649 * is scanned to find the first set of replacement nodes to create the hole.
3650 * Those old overlapping nodes are evicted from the GTT (and so must be
3651 * rebound before any future use). Any node that is currently pinned cannot
3652 * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
3653 * active and #PIN_NONBLOCK is specified, that node is also skipped when
3654 * searching for an eviction candidate. See i915_gem_evict_something() for
3655 * the gory details on the eviction algorithm.
3657 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3658 * asked to wait for eviction and interrupted.
3660 int i915_gem_gtt_insert(struct i915_address_space
*vm
,
3661 struct drm_mm_node
*node
,
3662 u64 size
, u64 alignment
, unsigned long color
,
3663 u64 start
, u64 end
, unsigned int flags
)
3665 enum drm_mm_insert_mode mode
;
3669 lockdep_assert_held(&vm
->i915
->drm
.struct_mutex
);
3671 GEM_BUG_ON(!IS_ALIGNED(size
, I915_GTT_PAGE_SIZE
));
3672 GEM_BUG_ON(alignment
&& !is_power_of_2(alignment
));
3673 GEM_BUG_ON(alignment
&& !IS_ALIGNED(alignment
, I915_GTT_MIN_ALIGNMENT
));
3674 GEM_BUG_ON(start
>= end
);
3675 GEM_BUG_ON(start
> 0 && !IS_ALIGNED(start
, I915_GTT_PAGE_SIZE
));
3676 GEM_BUG_ON(end
< U64_MAX
&& !IS_ALIGNED(end
, I915_GTT_PAGE_SIZE
));
3677 GEM_BUG_ON(vm
== &vm
->i915
->mm
.aliasing_ppgtt
->base
);
3678 GEM_BUG_ON(drm_mm_node_allocated(node
));
3680 if (unlikely(range_overflows(start
, size
, end
)))
3683 if (unlikely(round_up(start
, alignment
) > round_down(end
- size
, alignment
)))
3686 mode
= DRM_MM_INSERT_BEST
;
3687 if (flags
& PIN_HIGH
)
3688 mode
= DRM_MM_INSERT_HIGH
;
3689 if (flags
& PIN_MAPPABLE
)
3690 mode
= DRM_MM_INSERT_LOW
;
3692 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
3693 * so we know that we always have a minimum alignment of 4096.
3694 * The drm_mm range manager is optimised to return results
3695 * with zero alignment, so where possible use the optimal
3698 BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT
> I915_GTT_PAGE_SIZE
);
3699 if (alignment
<= I915_GTT_MIN_ALIGNMENT
)
3702 err
= drm_mm_insert_node_in_range(&vm
->mm
, node
,
3703 size
, alignment
, color
,
3708 /* No free space, pick a slot at random.
3710 * There is a pathological case here using a GTT shared between
3711 * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
3713 * |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
3714 * (64k objects) (448k objects)
3716 * Now imagine that the eviction LRU is ordered top-down (just because
3717 * pathology meets real life), and that we need to evict an object to
3718 * make room inside the aperture. The eviction scan then has to walk
3719 * the 448k list before it finds one within range. And now imagine that
3720 * it has to search for a new hole between every byte inside the memcpy,
3721 * for several simultaneous clients.
3723 * On a full-ppgtt system, if we have run out of available space, there
3724 * will be lots and lots of objects in the eviction list! Again,
3725 * searching that LRU list may be slow if we are also applying any
3726 * range restrictions (e.g. restriction to low 4GiB) and so, for
3727 * simplicity and similarilty between different GTT, try the single
3728 * random replacement first.
3730 offset
= random_offset(start
, end
,
3731 size
, alignment
?: I915_GTT_MIN_ALIGNMENT
);
3732 err
= i915_gem_gtt_reserve(vm
, node
, size
, offset
, color
, flags
);
3736 /* Randomly selected placement is pinned, do a search */
3737 err
= i915_gem_evict_something(vm
, size
, alignment
, color
,
3742 return drm_mm_insert_node_in_range(&vm
->mm
, node
,
3743 size
, alignment
, color
,
3744 start
, end
, DRM_MM_INSERT_EVICT
);