]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/i915/i915_gem_gtt.c
drm/i915: Disable lazy PPGTT page table optimization for vGPU
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / i915_gem_gtt.c
CommitLineData
76aaf220
DV
1/*
2 * Copyright © 2010 Daniel Vetter
c4ac524c 3 * Copyright © 2011-2014 Intel Corporation
76aaf220
DV
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 *
24 */
25
aae4a3d8
CW
26#include <linux/slab.h> /* fault-inject.h is not standalone! */
27
28#include <linux/fault-inject.h>
e007b19d 29#include <linux/log2.h>
606fec95 30#include <linux/random.h>
0e46ce2e 31#include <linux/seq_file.h>
5bab6f60 32#include <linux/stop_machine.h>
e007b19d 33
ed3ba079
LA
34#include <asm/set_memory.h>
35
760285e7
DH
36#include <drm/drmP.h>
37#include <drm/i915_drm.h>
e007b19d 38
76aaf220 39#include "i915_drv.h"
5dda8fa3 40#include "i915_vgpu.h"
76aaf220
DV
41#include "i915_trace.h"
42#include "intel_drv.h"
d07f0e59 43#include "intel_frontbuffer.h"
76aaf220 44
bb8f9cff
CW
45#define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
46
45f8f69a
TU
47/**
48 * DOC: Global GTT views
49 *
50 * Background and previous state
51 *
52 * Historically objects could exists (be bound) in global GTT space only as
53 * singular instances with a view representing all of the object's backing pages
54 * in a linear fashion. This view will be called a normal view.
55 *
56 * To support multiple views of the same object, where the number of mapped
57 * pages is not equal to the backing store, or where the layout of the pages
58 * is not linear, concept of a GGTT view was added.
59 *
60 * One example of an alternative view is a stereo display driven by a single
61 * image. In this case we would have a framebuffer looking like this
62 * (2x2 pages):
63 *
64 * 12
65 * 34
66 *
67 * Above would represent a normal GGTT view as normally mapped for GPU or CPU
68 * rendering. In contrast, fed to the display engine would be an alternative
69 * view which could look something like this:
70 *
71 * 1212
72 * 3434
73 *
74 * In this example both the size and layout of pages in the alternative view is
75 * different from the normal view.
76 *
77 * Implementation and usage
78 *
79 * GGTT views are implemented using VMAs and are distinguished via enum
80 * i915_ggtt_view_type and struct i915_ggtt_view.
81 *
82 * A new flavour of core GEM functions which work with GGTT bound objects were
ec7adb6e
JL
83 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
84 * renaming in large amounts of code. They take the struct i915_ggtt_view
85 * parameter encapsulating all metadata required to implement a view.
45f8f69a
TU
86 *
87 * As a helper for callers which are only interested in the normal view,
88 * globally const i915_ggtt_view_normal singleton instance exists. All old core
89 * GEM API functions, the ones not taking the view parameter, are operating on,
90 * or with the normal GGTT view.
91 *
92 * Code wanting to add or use a new GGTT view needs to:
93 *
94 * 1. Add a new enum with a suitable name.
95 * 2. Extend the metadata in the i915_ggtt_view structure if required.
96 * 3. Add support to i915_get_vma_pages().
97 *
98 * New views are required to build a scatter-gather table from within the
99 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
100 * exists for the lifetime of an VMA.
101 *
102 * Core API is designed to have copy semantics which means that passed in
103 * struct i915_ggtt_view does not need to be persistent (left around after
104 * calling the core API functions).
105 *
106 */
107
70b9f6f8
DV
108static int
109i915_get_ggtt_vma_pages(struct i915_vma *vma);
110
7c3f86b6
CW
111static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv)
112{
113 /* Note that as an uncached mmio write, this should flush the
114 * WCB of the writes into the GGTT before it triggers the invalidate.
115 */
116 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
117}
118
119static void guc_ggtt_invalidate(struct drm_i915_private *dev_priv)
120{
121 gen6_ggtt_invalidate(dev_priv);
122 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
123}
124
125static void gmch_ggtt_invalidate(struct drm_i915_private *dev_priv)
126{
127 intel_gtt_chipset_flush();
128}
129
130static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
131{
132 i915->ggtt.invalidate(i915);
133}
134
c033666a
CW
135int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
136 int enable_ppgtt)
cfa7c862 137{
1893a71b 138 bool has_full_ppgtt;
1f9a99e0 139 bool has_full_48bit_ppgtt;
1893a71b 140
612dde7e
JL
141 if (!dev_priv->info.has_aliasing_ppgtt)
142 return 0;
143
9e1d0e60
MT
144 has_full_ppgtt = dev_priv->info.has_full_ppgtt;
145 has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
1893a71b 146
e320d400 147 if (intel_vgpu_active(dev_priv)) {
8a4ab66f 148 /* GVT-g has no support for 32bit ppgtt */
e320d400 149 has_full_ppgtt = false;
8a4ab66f 150 has_full_48bit_ppgtt = intel_vgpu_has_full_48bit_ppgtt(dev_priv);
e320d400 151 }
71ba2d64 152
70ee45e1
DL
153 /*
154 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
155 * execlists, the sole mechanism available to submit work.
156 */
c033666a 157 if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
cfa7c862
DV
158 return 0;
159
160 if (enable_ppgtt == 1)
161 return 1;
162
1893a71b 163 if (enable_ppgtt == 2 && has_full_ppgtt)
cfa7c862
DV
164 return 2;
165
1f9a99e0
MT
166 if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
167 return 3;
168
93a25a9e 169 /* Disable ppgtt on SNB if VT-d is on. */
80debff8 170 if (IS_GEN6(dev_priv) && intel_vtd_active()) {
93a25a9e 171 DRM_INFO("Disabling PPGTT because VT-d is on\n");
cfa7c862 172 return 0;
93a25a9e 173 }
93a25a9e 174
62942ed7 175 /* Early VLV doesn't have this */
91c8a326 176 if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) {
62942ed7
JB
177 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
178 return 0;
179 }
180
4f044a88 181 if (INTEL_GEN(dev_priv) >= 8 && i915_modparams.enable_execlists) {
4fc05063
JL
182 if (has_full_48bit_ppgtt)
183 return 3;
184
185 if (has_full_ppgtt)
186 return 2;
187 }
188
612dde7e 189 return 1;
93a25a9e
DV
190}
191
70b9f6f8
DV
192static int ppgtt_bind_vma(struct i915_vma *vma,
193 enum i915_cache_level cache_level,
194 u32 unused)
47552659 195{
ff685975
CW
196 u32 pte_flags;
197 int ret;
198
1f23475c
MA
199 if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
200 ret = vma->vm->allocate_va_range(vma->vm, vma->node.start,
201 vma->size);
202 if (ret)
203 return ret;
204 }
47552659
DV
205
206 /* Currently applicable only to VLV */
ff685975 207 pte_flags = 0;
47552659
DV
208 if (vma->obj->gt_ro)
209 pte_flags |= PTE_READ_ONLY;
210
4a234c5f 211 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
70b9f6f8
DV
212
213 return 0;
47552659
DV
214}
215
216static void ppgtt_unbind_vma(struct i915_vma *vma)
217{
ff685975 218 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
47552659 219}
6f65e29a 220
fa3f46af
MA
221static int ppgtt_set_pages(struct i915_vma *vma)
222{
223 GEM_BUG_ON(vma->pages);
224
225 vma->pages = vma->obj->mm.pages;
226
7464284b
MA
227 vma->page_sizes = vma->obj->mm.page_sizes;
228
fa3f46af
MA
229 return 0;
230}
231
232static void clear_pages(struct i915_vma *vma)
233{
234 GEM_BUG_ON(!vma->pages);
235
236 if (vma->pages != vma->obj->mm.pages) {
237 sg_free_table(vma->pages);
238 kfree(vma->pages);
239 }
240 vma->pages = NULL;
7464284b
MA
241
242 memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
fa3f46af
MA
243}
244
2c642b07 245static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
4fb84d99 246 enum i915_cache_level level)
94ec8f61 247{
4fb84d99 248 gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW;
94ec8f61 249 pte |= addr;
63c42e56
BW
250
251 switch (level) {
252 case I915_CACHE_NONE:
c095b97c 253 pte |= PPAT_UNCACHED;
63c42e56
BW
254 break;
255 case I915_CACHE_WT:
c095b97c 256 pte |= PPAT_DISPLAY_ELLC;
63c42e56
BW
257 break;
258 default:
c095b97c 259 pte |= PPAT_CACHED;
63c42e56
BW
260 break;
261 }
262
94ec8f61
BW
263 return pte;
264}
265
fe36f55d
MK
266static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
267 const enum i915_cache_level level)
b1fe6673 268{
07749ef3 269 gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
b1fe6673
BW
270 pde |= addr;
271 if (level != I915_CACHE_NONE)
c095b97c 272 pde |= PPAT_CACHED_PDE;
b1fe6673 273 else
c095b97c 274 pde |= PPAT_UNCACHED;
b1fe6673
BW
275 return pde;
276}
277
762d9936
MT
278#define gen8_pdpe_encode gen8_pde_encode
279#define gen8_pml4e_encode gen8_pde_encode
280
07749ef3
MT
281static gen6_pte_t snb_pte_encode(dma_addr_t addr,
282 enum i915_cache_level level,
4fb84d99 283 u32 unused)
54d12527 284{
4fb84d99 285 gen6_pte_t pte = GEN6_PTE_VALID;
54d12527 286 pte |= GEN6_PTE_ADDR_ENCODE(addr);
e7210c3c
BW
287
288 switch (level) {
350ec881
CW
289 case I915_CACHE_L3_LLC:
290 case I915_CACHE_LLC:
291 pte |= GEN6_PTE_CACHE_LLC;
292 break;
293 case I915_CACHE_NONE:
294 pte |= GEN6_PTE_UNCACHED;
295 break;
296 default:
5f77eeb0 297 MISSING_CASE(level);
350ec881
CW
298 }
299
300 return pte;
301}
302
07749ef3
MT
303static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
304 enum i915_cache_level level,
4fb84d99 305 u32 unused)
350ec881 306{
4fb84d99 307 gen6_pte_t pte = GEN6_PTE_VALID;
350ec881
CW
308 pte |= GEN6_PTE_ADDR_ENCODE(addr);
309
310 switch (level) {
311 case I915_CACHE_L3_LLC:
312 pte |= GEN7_PTE_CACHE_L3_LLC;
e7210c3c
BW
313 break;
314 case I915_CACHE_LLC:
315 pte |= GEN6_PTE_CACHE_LLC;
316 break;
317 case I915_CACHE_NONE:
9119708c 318 pte |= GEN6_PTE_UNCACHED;
e7210c3c
BW
319 break;
320 default:
5f77eeb0 321 MISSING_CASE(level);
e7210c3c
BW
322 }
323
54d12527
BW
324 return pte;
325}
326
07749ef3
MT
327static gen6_pte_t byt_pte_encode(dma_addr_t addr,
328 enum i915_cache_level level,
4fb84d99 329 u32 flags)
93c34e70 330{
4fb84d99 331 gen6_pte_t pte = GEN6_PTE_VALID;
93c34e70
KG
332 pte |= GEN6_PTE_ADDR_ENCODE(addr);
333
24f3a8cf
AG
334 if (!(flags & PTE_READ_ONLY))
335 pte |= BYT_PTE_WRITEABLE;
93c34e70
KG
336
337 if (level != I915_CACHE_NONE)
338 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
339
340 return pte;
341}
342
07749ef3
MT
343static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
344 enum i915_cache_level level,
4fb84d99 345 u32 unused)
9119708c 346{
4fb84d99 347 gen6_pte_t pte = GEN6_PTE_VALID;
0d8ff15e 348 pte |= HSW_PTE_ADDR_ENCODE(addr);
9119708c
KG
349
350 if (level != I915_CACHE_NONE)
87a6b688 351 pte |= HSW_WB_LLC_AGE3;
9119708c
KG
352
353 return pte;
354}
355
07749ef3
MT
356static gen6_pte_t iris_pte_encode(dma_addr_t addr,
357 enum i915_cache_level level,
4fb84d99 358 u32 unused)
4d15c145 359{
4fb84d99 360 gen6_pte_t pte = GEN6_PTE_VALID;
4d15c145
BW
361 pte |= HSW_PTE_ADDR_ENCODE(addr);
362
651d794f
CW
363 switch (level) {
364 case I915_CACHE_NONE:
365 break;
366 case I915_CACHE_WT:
c51e9701 367 pte |= HSW_WT_ELLC_LLC_AGE3;
651d794f
CW
368 break;
369 default:
c51e9701 370 pte |= HSW_WB_ELLC_LLC_AGE3;
651d794f
CW
371 break;
372 }
4d15c145
BW
373
374 return pte;
375}
376
8448661d 377static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
678d96fb 378{
66df1014 379 struct pagevec *pvec = &vm->free_pages;
678d96fb 380
8448661d
CW
381 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
382 i915_gem_shrink_all(vm->i915);
aae4a3d8 383
66df1014
CW
384 if (likely(pvec->nr))
385 return pvec->pages[--pvec->nr];
386
387 if (!vm->pt_kmap_wc)
388 return alloc_page(gfp);
389
390 /* A placeholder for a specific mutex to guard the WC stash */
391 lockdep_assert_held(&vm->i915->drm.struct_mutex);
392
393 /* Look in our global stash of WC pages... */
394 pvec = &vm->i915->mm.wc_stash;
395 if (likely(pvec->nr))
396 return pvec->pages[--pvec->nr];
397
398 /* Otherwise batch allocate pages to amoritize cost of set_pages_wc. */
399 do {
400 struct page *page;
8448661d 401
66df1014
CW
402 page = alloc_page(gfp);
403 if (unlikely(!page))
404 break;
405
406 pvec->pages[pvec->nr++] = page;
407 } while (pagevec_space(pvec));
408
409 if (unlikely(!pvec->nr))
8448661d
CW
410 return NULL;
411
66df1014 412 set_pages_array_wc(pvec->pages, pvec->nr);
8448661d 413
66df1014 414 return pvec->pages[--pvec->nr];
8448661d
CW
415}
416
66df1014
CW
417static void vm_free_pages_release(struct i915_address_space *vm,
418 bool immediate)
8448661d 419{
66df1014
CW
420 struct pagevec *pvec = &vm->free_pages;
421
422 GEM_BUG_ON(!pagevec_count(pvec));
8448661d 423
66df1014
CW
424 if (vm->pt_kmap_wc) {
425 struct pagevec *stash = &vm->i915->mm.wc_stash;
426
427 /* When we use WC, first fill up the global stash and then
428 * only if full immediately free the overflow.
429 */
8448661d 430
66df1014
CW
431 lockdep_assert_held(&vm->i915->drm.struct_mutex);
432 if (pagevec_space(stash)) {
433 do {
434 stash->pages[stash->nr++] =
435 pvec->pages[--pvec->nr];
436 if (!pvec->nr)
437 return;
438 } while (pagevec_space(stash));
439
440 /* As we have made some room in the VM's free_pages,
441 * we can wait for it to fill again. Unless we are
442 * inside i915_address_space_fini() and must
443 * immediately release the pages!
444 */
445 if (!immediate)
446 return;
447 }
448
449 set_pages_array_wb(pvec->pages, pvec->nr);
450 }
451
452 __pagevec_release(pvec);
8448661d
CW
453}
454
455static void vm_free_page(struct i915_address_space *vm, struct page *page)
456{
457 if (!pagevec_add(&vm->free_pages, page))
66df1014 458 vm_free_pages_release(vm, false);
8448661d 459}
678d96fb 460
8448661d
CW
461static int __setup_page_dma(struct i915_address_space *vm,
462 struct i915_page_dma *p,
463 gfp_t gfp)
464{
465 p->page = vm_alloc_page(vm, gfp | __GFP_NOWARN | __GFP_NORETRY);
466 if (unlikely(!p->page))
467 return -ENOMEM;
678d96fb 468
8448661d
CW
469 p->daddr = dma_map_page(vm->dma, p->page, 0, PAGE_SIZE,
470 PCI_DMA_BIDIRECTIONAL);
471 if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
472 vm_free_page(vm, p->page);
473 return -ENOMEM;
44159ddb 474 }
1266cdb1
MT
475
476 return 0;
678d96fb
BW
477}
478
8448661d 479static int setup_page_dma(struct i915_address_space *vm,
275a991c 480 struct i915_page_dma *p)
c114f76a 481{
8448661d 482 return __setup_page_dma(vm, p, I915_GFP_DMA);
c114f76a
MK
483}
484
8448661d 485static void cleanup_page_dma(struct i915_address_space *vm,
275a991c 486 struct i915_page_dma *p)
06fda602 487{
8448661d
CW
488 dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
489 vm_free_page(vm, p->page);
44159ddb
MK
490}
491
9231da70 492#define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
d1c54acd 493
8448661d
CW
494#define setup_px(vm, px) setup_page_dma((vm), px_base(px))
495#define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px))
496#define fill_px(ppgtt, px, v) fill_page_dma((vm), px_base(px), (v))
497#define fill32_px(ppgtt, px, v) fill_page_dma_32((vm), px_base(px), (v))
567047be 498
8448661d
CW
499static void fill_page_dma(struct i915_address_space *vm,
500 struct i915_page_dma *p,
501 const u64 val)
d1c54acd 502{
9231da70 503 u64 * const vaddr = kmap_atomic(p->page);
d1c54acd 504
4dd504f7 505 memset64(vaddr, val, PAGE_SIZE / sizeof(val));
d1c54acd 506
9231da70 507 kunmap_atomic(vaddr);
d1c54acd
MK
508}
509
8448661d
CW
510static void fill_page_dma_32(struct i915_address_space *vm,
511 struct i915_page_dma *p,
512 const u32 v)
73eeea53 513{
8448661d 514 fill_page_dma(vm, p, (u64)v << 32 | v);
73eeea53
MK
515}
516
8bcdd0f7 517static int
8448661d 518setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
4ad2af1e 519{
aa095871 520 struct page *page = NULL;
66df1014 521 dma_addr_t addr;
aa095871 522 int order;
66df1014 523
aa095871
MA
524 /*
525 * In order to utilize 64K pages for an object with a size < 2M, we will
526 * need to support a 64K scratch page, given that every 16th entry for a
527 * page-table operating in 64K mode must point to a properly aligned 64K
528 * region, including any PTEs which happen to point to scratch.
529 *
530 * This is only relevant for the 48b PPGTT where we support
531 * huge-gtt-pages, see also i915_vma_insert().
532 *
533 * TODO: we should really consider write-protecting the scratch-page and
534 * sharing between ppgtt
535 */
536 if (i915_vm_is_48bit(vm) &&
537 HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
538 order = get_order(I915_GTT_PAGE_SIZE_64K);
06ea8c53 539 page = alloc_pages(gfp | __GFP_ZERO | __GFP_NOWARN, order);
aa095871
MA
540 if (page) {
541 addr = dma_map_page(vm->dma, page, 0,
542 I915_GTT_PAGE_SIZE_64K,
543 PCI_DMA_BIDIRECTIONAL);
544 if (unlikely(dma_mapping_error(vm->dma, addr))) {
545 __free_pages(page, order);
546 page = NULL;
547 }
548
549 if (!IS_ALIGNED(addr, I915_GTT_PAGE_SIZE_64K)) {
550 dma_unmap_page(vm->dma, addr,
551 I915_GTT_PAGE_SIZE_64K,
552 PCI_DMA_BIDIRECTIONAL);
553 __free_pages(page, order);
554 page = NULL;
555 }
556 }
557 }
66df1014 558
aa095871
MA
559 if (!page) {
560 order = 0;
561 page = alloc_page(gfp | __GFP_ZERO);
562 if (unlikely(!page))
563 return -ENOMEM;
564
565 addr = dma_map_page(vm->dma, page, 0, PAGE_SIZE,
566 PCI_DMA_BIDIRECTIONAL);
567 if (unlikely(dma_mapping_error(vm->dma, addr))) {
568 __free_page(page);
569 return -ENOMEM;
570 }
66df1014
CW
571 }
572
573 vm->scratch_page.page = page;
574 vm->scratch_page.daddr = addr;
aa095871
MA
575 vm->scratch_page.order = order;
576
66df1014 577 return 0;
4ad2af1e
MK
578}
579
8448661d 580static void cleanup_scratch_page(struct i915_address_space *vm)
4ad2af1e 581{
66df1014
CW
582 struct i915_page_dma *p = &vm->scratch_page;
583
aa095871
MA
584 dma_unmap_page(vm->dma, p->daddr, BIT(p->order) << PAGE_SHIFT,
585 PCI_DMA_BIDIRECTIONAL);
586 __free_pages(p->page, p->order);
4ad2af1e
MK
587}
588
8448661d 589static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
06fda602 590{
ec565b3c 591 struct i915_page_table *pt;
06fda602 592
dd19674b
CW
593 pt = kmalloc(sizeof(*pt), GFP_KERNEL | __GFP_NOWARN);
594 if (unlikely(!pt))
06fda602
BW
595 return ERR_PTR(-ENOMEM);
596
dd19674b
CW
597 if (unlikely(setup_px(vm, pt))) {
598 kfree(pt);
599 return ERR_PTR(-ENOMEM);
600 }
06fda602 601
dd19674b 602 pt->used_ptes = 0;
06fda602
BW
603 return pt;
604}
605
8448661d 606static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
06fda602 607{
8448661d 608 cleanup_px(vm, pt);
2e906bea
MK
609 kfree(pt);
610}
611
612static void gen8_initialize_pt(struct i915_address_space *vm,
613 struct i915_page_table *pt)
614{
dd19674b
CW
615 fill_px(vm, pt,
616 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC));
2e906bea
MK
617}
618
619static void gen6_initialize_pt(struct i915_address_space *vm,
620 struct i915_page_table *pt)
621{
dd19674b
CW
622 fill32_px(vm, pt,
623 vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
06fda602
BW
624}
625
8448661d 626static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
06fda602 627{
ec565b3c 628 struct i915_page_directory *pd;
06fda602 629
fe52e37f
CW
630 pd = kzalloc(sizeof(*pd), GFP_KERNEL | __GFP_NOWARN);
631 if (unlikely(!pd))
06fda602
BW
632 return ERR_PTR(-ENOMEM);
633
fe52e37f
CW
634 if (unlikely(setup_px(vm, pd))) {
635 kfree(pd);
636 return ERR_PTR(-ENOMEM);
637 }
e5815a2e 638
fe52e37f 639 pd->used_pdes = 0;
06fda602
BW
640 return pd;
641}
642
8448661d 643static void free_pd(struct i915_address_space *vm,
275a991c 644 struct i915_page_directory *pd)
2e906bea 645{
fe52e37f
CW
646 cleanup_px(vm, pd);
647 kfree(pd);
2e906bea
MK
648}
649
650static void gen8_initialize_pd(struct i915_address_space *vm,
651 struct i915_page_directory *pd)
652{
dd19674b 653 unsigned int i;
2e906bea 654
dd19674b
CW
655 fill_px(vm, pd,
656 gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC));
657 for (i = 0; i < I915_PDES; i++)
658 pd->page_table[i] = vm->scratch_pt;
2e906bea
MK
659}
660
fe52e37f 661static int __pdp_init(struct i915_address_space *vm,
6ac18502
MT
662 struct i915_page_directory_pointer *pdp)
663{
3e490042 664 const unsigned int pdpes = i915_pdpes_per_pdp(vm);
e2b763ca 665 unsigned int i;
6ac18502 666
fe52e37f 667 pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory),
e2b763ca
CW
668 GFP_KERNEL | __GFP_NOWARN);
669 if (unlikely(!pdp->page_directory))
6ac18502 670 return -ENOMEM;
6ac18502 671
fe52e37f
CW
672 for (i = 0; i < pdpes; i++)
673 pdp->page_directory[i] = vm->scratch_pd;
674
6ac18502
MT
675 return 0;
676}
677
678static void __pdp_fini(struct i915_page_directory_pointer *pdp)
679{
6ac18502
MT
680 kfree(pdp->page_directory);
681 pdp->page_directory = NULL;
682}
683
1e6437b0
MK
684static inline bool use_4lvl(const struct i915_address_space *vm)
685{
686 return i915_vm_is_48bit(vm);
687}
688
8448661d
CW
689static struct i915_page_directory_pointer *
690alloc_pdp(struct i915_address_space *vm)
762d9936
MT
691{
692 struct i915_page_directory_pointer *pdp;
693 int ret = -ENOMEM;
694
1e6437b0 695 WARN_ON(!use_4lvl(vm));
762d9936
MT
696
697 pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
698 if (!pdp)
699 return ERR_PTR(-ENOMEM);
700
fe52e37f 701 ret = __pdp_init(vm, pdp);
762d9936
MT
702 if (ret)
703 goto fail_bitmap;
704
8448661d 705 ret = setup_px(vm, pdp);
762d9936
MT
706 if (ret)
707 goto fail_page_m;
708
709 return pdp;
710
711fail_page_m:
712 __pdp_fini(pdp);
713fail_bitmap:
714 kfree(pdp);
715
716 return ERR_PTR(ret);
717}
718
8448661d 719static void free_pdp(struct i915_address_space *vm,
6ac18502
MT
720 struct i915_page_directory_pointer *pdp)
721{
722 __pdp_fini(pdp);
1e6437b0
MK
723
724 if (!use_4lvl(vm))
725 return;
726
727 cleanup_px(vm, pdp);
728 kfree(pdp);
762d9936
MT
729}
730
69ab76fd
MT
731static void gen8_initialize_pdp(struct i915_address_space *vm,
732 struct i915_page_directory_pointer *pdp)
733{
734 gen8_ppgtt_pdpe_t scratch_pdpe;
735
736 scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
737
8448661d 738 fill_px(vm, pdp, scratch_pdpe);
69ab76fd
MT
739}
740
741static void gen8_initialize_pml4(struct i915_address_space *vm,
742 struct i915_pml4 *pml4)
743{
e2b763ca 744 unsigned int i;
762d9936 745
e2b763ca
CW
746 fill_px(vm, pml4,
747 gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC));
748 for (i = 0; i < GEN8_PML4ES_PER_PML4; i++)
749 pml4->pdps[i] = vm->scratch_pdp;
6ac18502
MT
750}
751
94e409c1 752/* Broadwell Page Directory Pointer Descriptors */
e85b26dc 753static int gen8_write_pdp(struct drm_i915_gem_request *req,
7cb6d7ac
MT
754 unsigned entry,
755 dma_addr_t addr)
94e409c1 756{
4a570db5 757 struct intel_engine_cs *engine = req->engine;
73dec95e 758 u32 *cs;
94e409c1
BW
759
760 BUG_ON(entry >= 4);
761
73dec95e
TU
762 cs = intel_ring_begin(req, 6);
763 if (IS_ERR(cs))
764 return PTR_ERR(cs);
94e409c1 765
73dec95e
TU
766 *cs++ = MI_LOAD_REGISTER_IMM(1);
767 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, entry));
768 *cs++ = upper_32_bits(addr);
769 *cs++ = MI_LOAD_REGISTER_IMM(1);
770 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
771 *cs++ = lower_32_bits(addr);
772 intel_ring_advance(req, cs);
94e409c1
BW
773
774 return 0;
775}
776
e7167769
MK
777static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt,
778 struct drm_i915_gem_request *req)
94e409c1 779{
eeb9488e 780 int i, ret;
94e409c1 781
e7167769 782 for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) {
d852c7bf
MK
783 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
784
e85b26dc 785 ret = gen8_write_pdp(req, i, pd_daddr);
eeb9488e
BW
786 if (ret)
787 return ret;
94e409c1 788 }
d595bd4b 789
eeb9488e 790 return 0;
94e409c1
BW
791}
792
e7167769
MK
793static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt,
794 struct drm_i915_gem_request *req)
2dba3239
MT
795{
796 return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
797}
798
fce93755
MK
799/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
800 * the page table structures, we mark them dirty so that
801 * context switching/execlist queuing code takes extra steps
802 * to ensure that tlbs are flushed.
803 */
804static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
805{
49d73912 806 ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.i915)->ring_mask;
fce93755
MK
807}
808
2ce5179f
MW
809/* Removes entries from a single page table, releasing it if it's empty.
810 * Caller can use the return value to update higher-level entries.
811 */
812static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
d209b9c3 813 struct i915_page_table *pt,
dd19674b 814 u64 start, u64 length)
459108b8 815{
d209b9c3 816 unsigned int num_entries = gen8_pte_count(start, length);
37c63934
MK
817 unsigned int pte = gen8_pte_index(start);
818 unsigned int pte_end = pte + num_entries;
894ccebe
CW
819 const gen8_pte_t scratch_pte =
820 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
821 gen8_pte_t *vaddr;
459108b8 822
dd19674b 823 GEM_BUG_ON(num_entries > pt->used_ptes);
37c63934 824
dd19674b
CW
825 pt->used_ptes -= num_entries;
826 if (!pt->used_ptes)
827 return true;
2ce5179f 828
9231da70 829 vaddr = kmap_atomic_px(pt);
37c63934 830 while (pte < pte_end)
894ccebe 831 vaddr[pte++] = scratch_pte;
9231da70 832 kunmap_atomic(vaddr);
2ce5179f
MW
833
834 return false;
d209b9c3 835}
06fda602 836
dd19674b
CW
837static void gen8_ppgtt_set_pde(struct i915_address_space *vm,
838 struct i915_page_directory *pd,
839 struct i915_page_table *pt,
840 unsigned int pde)
841{
842 gen8_pde_t *vaddr;
843
844 pd->page_table[pde] = pt;
845
846 vaddr = kmap_atomic_px(pd);
847 vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC);
848 kunmap_atomic(vaddr);
849}
850
2ce5179f 851static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
d209b9c3 852 struct i915_page_directory *pd,
dd19674b 853 u64 start, u64 length)
d209b9c3
MW
854{
855 struct i915_page_table *pt;
dd19674b 856 u32 pde;
d209b9c3
MW
857
858 gen8_for_each_pde(pt, pd, start, length, pde) {
bf75d59e
CW
859 GEM_BUG_ON(pt == vm->scratch_pt);
860
dd19674b
CW
861 if (!gen8_ppgtt_clear_pt(vm, pt, start, length))
862 continue;
06fda602 863
dd19674b 864 gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde);
bf75d59e 865 GEM_BUG_ON(!pd->used_pdes);
fe52e37f 866 pd->used_pdes--;
dd19674b
CW
867
868 free_pt(vm, pt);
2ce5179f
MW
869 }
870
fe52e37f
CW
871 return !pd->used_pdes;
872}
2ce5179f 873
fe52e37f
CW
874static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
875 struct i915_page_directory_pointer *pdp,
876 struct i915_page_directory *pd,
877 unsigned int pdpe)
878{
879 gen8_ppgtt_pdpe_t *vaddr;
880
881 pdp->page_directory[pdpe] = pd;
1e6437b0 882 if (!use_4lvl(vm))
fe52e37f
CW
883 return;
884
885 vaddr = kmap_atomic_px(pdp);
886 vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
887 kunmap_atomic(vaddr);
d209b9c3 888}
06fda602 889
2ce5179f
MW
890/* Removes entries from a single page dir pointer, releasing it if it's empty.
891 * Caller can use the return value to update higher-level entries
892 */
893static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
d209b9c3 894 struct i915_page_directory_pointer *pdp,
fe52e37f 895 u64 start, u64 length)
d209b9c3
MW
896{
897 struct i915_page_directory *pd;
fe52e37f 898 unsigned int pdpe;
06fda602 899
d209b9c3 900 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
bf75d59e
CW
901 GEM_BUG_ON(pd == vm->scratch_pd);
902
fe52e37f
CW
903 if (!gen8_ppgtt_clear_pd(vm, pd, start, length))
904 continue;
459108b8 905
fe52e37f 906 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
bf75d59e 907 GEM_BUG_ON(!pdp->used_pdpes);
e2b763ca 908 pdp->used_pdpes--;
2ce5179f 909
fe52e37f
CW
910 free_pd(vm, pd);
911 }
fce93755 912
e2b763ca 913 return !pdp->used_pdpes;
d209b9c3 914}
459108b8 915
fe52e37f
CW
916static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm,
917 u64 start, u64 length)
918{
919 gen8_ppgtt_clear_pdp(vm, &i915_vm_to_ppgtt(vm)->pdp, start, length);
920}
921
e2b763ca
CW
922static void gen8_ppgtt_set_pml4e(struct i915_pml4 *pml4,
923 struct i915_page_directory_pointer *pdp,
924 unsigned int pml4e)
925{
926 gen8_ppgtt_pml4e_t *vaddr;
927
928 pml4->pdps[pml4e] = pdp;
929
930 vaddr = kmap_atomic_px(pml4);
931 vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
932 kunmap_atomic(vaddr);
933}
934
2ce5179f
MW
935/* Removes entries from a single pml4.
936 * This is the top-level structure in 4-level page tables used on gen8+.
937 * Empty entries are always scratch pml4e.
938 */
fe52e37f
CW
939static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
940 u64 start, u64 length)
d209b9c3 941{
fe52e37f
CW
942 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
943 struct i915_pml4 *pml4 = &ppgtt->pml4;
d209b9c3 944 struct i915_page_directory_pointer *pdp;
e2b763ca 945 unsigned int pml4e;
2ce5179f 946
1e6437b0 947 GEM_BUG_ON(!use_4lvl(vm));
459108b8 948
d209b9c3 949 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
bf75d59e
CW
950 GEM_BUG_ON(pdp == vm->scratch_pdp);
951
e2b763ca
CW
952 if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length))
953 continue;
459108b8 954
e2b763ca 955 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
e2b763ca
CW
956
957 free_pdp(vm, pdp);
459108b8
BW
958 }
959}
960
894ccebe
CW
961struct sgt_dma {
962 struct scatterlist *sg;
963 dma_addr_t dma, max;
964};
965
9e89f9ee
CW
966struct gen8_insert_pte {
967 u16 pml4e;
968 u16 pdpe;
969 u16 pde;
970 u16 pte;
971};
972
973static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start)
974{
975 return (struct gen8_insert_pte) {
976 gen8_pml4e_index(start),
977 gen8_pdpe_index(start),
978 gen8_pde_index(start),
979 gen8_pte_index(start),
980 };
981}
982
894ccebe
CW
983static __always_inline bool
984gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
f9b5b782 985 struct i915_page_directory_pointer *pdp,
894ccebe 986 struct sgt_dma *iter,
9e89f9ee 987 struct gen8_insert_pte *idx,
f9b5b782
MT
988 enum i915_cache_level cache_level)
989{
894ccebe
CW
990 struct i915_page_directory *pd;
991 const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
992 gen8_pte_t *vaddr;
993 bool ret;
9df15b49 994
3e490042 995 GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
9e89f9ee
CW
996 pd = pdp->page_directory[idx->pdpe];
997 vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
894ccebe 998 do {
9e89f9ee
CW
999 vaddr[idx->pte] = pte_encode | iter->dma;
1000
894ccebe
CW
1001 iter->dma += PAGE_SIZE;
1002 if (iter->dma >= iter->max) {
1003 iter->sg = __sg_next(iter->sg);
1004 if (!iter->sg) {
1005 ret = false;
1006 break;
1007 }
7ad47cf2 1008
894ccebe
CW
1009 iter->dma = sg_dma_address(iter->sg);
1010 iter->max = iter->dma + iter->sg->length;
d7b3de91 1011 }
9df15b49 1012
9e89f9ee
CW
1013 if (++idx->pte == GEN8_PTES) {
1014 idx->pte = 0;
1015
1016 if (++idx->pde == I915_PDES) {
1017 idx->pde = 0;
1018
894ccebe 1019 /* Limited by sg length for 3lvl */
9e89f9ee
CW
1020 if (++idx->pdpe == GEN8_PML4ES_PER_PML4) {
1021 idx->pdpe = 0;
894ccebe 1022 ret = true;
de5ba8eb 1023 break;
894ccebe
CW
1024 }
1025
3e490042 1026 GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
9e89f9ee 1027 pd = pdp->page_directory[idx->pdpe];
7ad47cf2 1028 }
894ccebe 1029
9231da70 1030 kunmap_atomic(vaddr);
9e89f9ee 1031 vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
9df15b49 1032 }
894ccebe 1033 } while (1);
9231da70 1034 kunmap_atomic(vaddr);
d1c54acd 1035
894ccebe 1036 return ret;
9df15b49
BW
1037}
1038
894ccebe 1039static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
4a234c5f 1040 struct i915_vma *vma,
894ccebe
CW
1041 enum i915_cache_level cache_level,
1042 u32 unused)
f9b5b782 1043{
17369ba0 1044 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
894ccebe 1045 struct sgt_dma iter = {
4a234c5f 1046 .sg = vma->pages->sgl,
894ccebe
CW
1047 .dma = sg_dma_address(iter.sg),
1048 .max = iter.dma + iter.sg->length,
1049 };
4a234c5f 1050 struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
f9b5b782 1051
9e89f9ee
CW
1052 gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
1053 cache_level);
d9ec12f8
MA
1054
1055 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
894ccebe 1056}
de5ba8eb 1057
0a03852e
MA
1058static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
1059 struct i915_page_directory_pointer **pdps,
1060 struct sgt_dma *iter,
1061 enum i915_cache_level cache_level)
1062{
1063 const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
1064 u64 start = vma->node.start;
1065 dma_addr_t rem = iter->sg->length;
1066
1067 do {
1068 struct gen8_insert_pte idx = gen8_insert_pte(start);
1069 struct i915_page_directory_pointer *pdp = pdps[idx.pml4e];
1070 struct i915_page_directory *pd = pdp->page_directory[idx.pdpe];
1071 unsigned int page_size;
17a00cf7 1072 bool maybe_64K = false;
0a03852e
MA
1073 gen8_pte_t encode = pte_encode;
1074 gen8_pte_t *vaddr;
1075 u16 index, max;
1076
1077 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
1078 IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
1079 rem >= I915_GTT_PAGE_SIZE_2M && !idx.pte) {
1080 index = idx.pde;
1081 max = I915_PDES;
1082 page_size = I915_GTT_PAGE_SIZE_2M;
1083
1084 encode |= GEN8_PDE_PS_2M;
1085
1086 vaddr = kmap_atomic_px(pd);
1087 } else {
1088 struct i915_page_table *pt = pd->page_table[idx.pde];
1089
1090 index = idx.pte;
1091 max = GEN8_PTES;
1092 page_size = I915_GTT_PAGE_SIZE;
1093
17a00cf7
MA
1094 if (!index &&
1095 vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
1096 IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
1097 (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
1098 rem >= (max - index) << PAGE_SHIFT))
1099 maybe_64K = true;
1100
0a03852e
MA
1101 vaddr = kmap_atomic_px(pt);
1102 }
1103
1104 do {
1105 GEM_BUG_ON(iter->sg->length < page_size);
1106 vaddr[index++] = encode | iter->dma;
1107
1108 start += page_size;
1109 iter->dma += page_size;
1110 rem -= page_size;
1111 if (iter->dma >= iter->max) {
1112 iter->sg = __sg_next(iter->sg);
1113 if (!iter->sg)
1114 break;
1115
1116 rem = iter->sg->length;
1117 iter->dma = sg_dma_address(iter->sg);
1118 iter->max = iter->dma + rem;
1119
17a00cf7
MA
1120 if (maybe_64K && index < max &&
1121 !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
1122 (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
1123 rem >= (max - index) << PAGE_SHIFT)))
1124 maybe_64K = false;
1125
0a03852e
MA
1126 if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
1127 break;
1128 }
1129 } while (rem >= page_size && index < max);
1130
1131 kunmap_atomic(vaddr);
17a00cf7
MA
1132
1133 /*
1134 * Is it safe to mark the 2M block as 64K? -- Either we have
1135 * filled whole page-table with 64K entries, or filled part of
1136 * it and have reached the end of the sg table and we have
1137 * enough padding.
1138 */
1139 if (maybe_64K &&
1140 (index == max ||
1141 (i915_vm_has_scratch_64K(vma->vm) &&
1142 !iter->sg && IS_ALIGNED(vma->node.start +
1143 vma->node.size,
1144 I915_GTT_PAGE_SIZE_2M)))) {
1145 vaddr = kmap_atomic_px(pd);
1146 vaddr[idx.pde] |= GEN8_PDE_IPS_64K;
1147 kunmap_atomic(vaddr);
d9ec12f8 1148 page_size = I915_GTT_PAGE_SIZE_64K;
17a00cf7 1149 }
d9ec12f8
MA
1150
1151 vma->page_sizes.gtt |= page_size;
0a03852e
MA
1152 } while (iter->sg);
1153}
1154
894ccebe 1155static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
4a234c5f 1156 struct i915_vma *vma,
894ccebe
CW
1157 enum i915_cache_level cache_level,
1158 u32 unused)
1159{
1160 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1161 struct sgt_dma iter = {
4a234c5f 1162 .sg = vma->pages->sgl,
894ccebe
CW
1163 .dma = sg_dma_address(iter.sg),
1164 .max = iter.dma + iter.sg->length,
1165 };
1166 struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
de5ba8eb 1167
0a03852e
MA
1168 if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
1169 gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level);
1170 } else {
1171 struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
1172
1173 while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++],
1174 &iter, &idx, cache_level))
1175 GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
d9ec12f8
MA
1176
1177 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
0a03852e 1178 }
f9b5b782
MT
1179}
1180
8448661d 1181static void gen8_free_page_tables(struct i915_address_space *vm,
f37c0505 1182 struct i915_page_directory *pd)
7ad47cf2
BW
1183{
1184 int i;
1185
567047be 1186 if (!px_page(pd))
7ad47cf2
BW
1187 return;
1188
fe52e37f
CW
1189 for (i = 0; i < I915_PDES; i++) {
1190 if (pd->page_table[i] != vm->scratch_pt)
1191 free_pt(vm, pd->page_table[i]);
06fda602 1192 }
d7b3de91
BW
1193}
1194
8776f02b
MK
1195static int gen8_init_scratch(struct i915_address_space *vm)
1196{
64c050db 1197 int ret;
8776f02b 1198
8448661d 1199 ret = setup_scratch_page(vm, I915_GFP_DMA);
8bcdd0f7
CW
1200 if (ret)
1201 return ret;
8776f02b 1202
8448661d 1203 vm->scratch_pt = alloc_pt(vm);
8776f02b 1204 if (IS_ERR(vm->scratch_pt)) {
64c050db
MA
1205 ret = PTR_ERR(vm->scratch_pt);
1206 goto free_scratch_page;
8776f02b
MK
1207 }
1208
8448661d 1209 vm->scratch_pd = alloc_pd(vm);
8776f02b 1210 if (IS_ERR(vm->scratch_pd)) {
64c050db
MA
1211 ret = PTR_ERR(vm->scratch_pd);
1212 goto free_pt;
8776f02b
MK
1213 }
1214
1e6437b0 1215 if (use_4lvl(vm)) {
8448661d 1216 vm->scratch_pdp = alloc_pdp(vm);
69ab76fd 1217 if (IS_ERR(vm->scratch_pdp)) {
64c050db
MA
1218 ret = PTR_ERR(vm->scratch_pdp);
1219 goto free_pd;
69ab76fd
MT
1220 }
1221 }
1222
8776f02b
MK
1223 gen8_initialize_pt(vm, vm->scratch_pt);
1224 gen8_initialize_pd(vm, vm->scratch_pd);
1e6437b0 1225 if (use_4lvl(vm))
69ab76fd 1226 gen8_initialize_pdp(vm, vm->scratch_pdp);
8776f02b
MK
1227
1228 return 0;
64c050db
MA
1229
1230free_pd:
8448661d 1231 free_pd(vm, vm->scratch_pd);
64c050db 1232free_pt:
8448661d 1233 free_pt(vm, vm->scratch_pt);
64c050db 1234free_scratch_page:
8448661d 1235 cleanup_scratch_page(vm);
64c050db
MA
1236
1237 return ret;
8776f02b
MK
1238}
1239
650da34c
ZL
1240static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
1241{
1e6437b0
MK
1242 struct i915_address_space *vm = &ppgtt->base;
1243 struct drm_i915_private *dev_priv = vm->i915;
650da34c 1244 enum vgt_g2v_type msg;
650da34c
ZL
1245 int i;
1246
1e6437b0
MK
1247 if (use_4lvl(vm)) {
1248 const u64 daddr = px_dma(&ppgtt->pml4);
650da34c 1249
ab75bb5d
VS
1250 I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
1251 I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
650da34c
ZL
1252
1253 msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
1254 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
1255 } else {
e7167769 1256 for (i = 0; i < GEN8_3LVL_PDPES; i++) {
1e6437b0 1257 const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
650da34c 1258
ab75bb5d
VS
1259 I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
1260 I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
650da34c
ZL
1261 }
1262
1263 msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
1264 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
1265 }
1266
1267 I915_WRITE(vgtif_reg(g2v_notify), msg);
1268
1269 return 0;
1270}
1271
8776f02b
MK
1272static void gen8_free_scratch(struct i915_address_space *vm)
1273{
1e6437b0 1274 if (use_4lvl(vm))
8448661d
CW
1275 free_pdp(vm, vm->scratch_pdp);
1276 free_pd(vm, vm->scratch_pd);
1277 free_pt(vm, vm->scratch_pt);
1278 cleanup_scratch_page(vm);
8776f02b
MK
1279}
1280
8448661d 1281static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm,
762d9936 1282 struct i915_page_directory_pointer *pdp)
b45a6715 1283{
3e490042 1284 const unsigned int pdpes = i915_pdpes_per_pdp(vm);
b45a6715
BW
1285 int i;
1286
3e490042 1287 for (i = 0; i < pdpes; i++) {
fe52e37f 1288 if (pdp->page_directory[i] == vm->scratch_pd)
06fda602
BW
1289 continue;
1290
8448661d
CW
1291 gen8_free_page_tables(vm, pdp->page_directory[i]);
1292 free_pd(vm, pdp->page_directory[i]);
7ad47cf2 1293 }
69876bed 1294
8448661d 1295 free_pdp(vm, pdp);
762d9936
MT
1296}
1297
1298static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
1299{
1300 int i;
1301
c5d092a4
CW
1302 for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
1303 if (ppgtt->pml4.pdps[i] == ppgtt->base.scratch_pdp)
762d9936
MT
1304 continue;
1305
8448661d 1306 gen8_ppgtt_cleanup_3lvl(&ppgtt->base, ppgtt->pml4.pdps[i]);
762d9936
MT
1307 }
1308
8448661d 1309 cleanup_px(&ppgtt->base, &ppgtt->pml4);
762d9936
MT
1310}
1311
1312static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
1313{
49d73912 1314 struct drm_i915_private *dev_priv = vm->i915;
e5716f55 1315 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
762d9936 1316
275a991c 1317 if (intel_vgpu_active(dev_priv))
650da34c
ZL
1318 gen8_ppgtt_notify_vgt(ppgtt, false);
1319
1e6437b0 1320 if (use_4lvl(vm))
762d9936 1321 gen8_ppgtt_cleanup_4lvl(ppgtt);
1e6437b0
MK
1322 else
1323 gen8_ppgtt_cleanup_3lvl(&ppgtt->base, &ppgtt->pdp);
d4ec9da0 1324
8776f02b 1325 gen8_free_scratch(vm);
b45a6715
BW
1326}
1327
fe52e37f
CW
1328static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
1329 struct i915_page_directory *pd,
1330 u64 start, u64 length)
bf2b4ed2 1331{
d7b2633d 1332 struct i915_page_table *pt;
dd19674b 1333 u64 from = start;
fe52e37f 1334 unsigned int pde;
bf2b4ed2 1335
e8ebd8e2 1336 gen8_for_each_pde(pt, pd, start, length, pde) {
14826673
CW
1337 int count = gen8_pte_count(start, length);
1338
fe52e37f 1339 if (pt == vm->scratch_pt) {
dd19674b
CW
1340 pt = alloc_pt(vm);
1341 if (IS_ERR(pt))
1342 goto unwind;
5441f0cb 1343
b58d4bef 1344 if (count < GEN8_PTES || intel_vgpu_active(vm->i915))
14826673 1345 gen8_initialize_pt(vm, pt);
fe52e37f
CW
1346
1347 gen8_ppgtt_set_pde(vm, pd, pt, pde);
1348 pd->used_pdes++;
bf75d59e 1349 GEM_BUG_ON(pd->used_pdes > I915_PDES);
dd19674b 1350 }
fe52e37f 1351
14826673 1352 pt->used_ptes += count;
7ad47cf2 1353 }
bf2b4ed2 1354 return 0;
7ad47cf2 1355
dd19674b
CW
1356unwind:
1357 gen8_ppgtt_clear_pd(vm, pd, from, start - from);
d7b3de91 1358 return -ENOMEM;
bf2b4ed2
BW
1359}
1360
c5d092a4
CW
1361static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
1362 struct i915_page_directory_pointer *pdp,
1363 u64 start, u64 length)
bf2b4ed2 1364{
5441f0cb 1365 struct i915_page_directory *pd;
e2b763ca
CW
1366 u64 from = start;
1367 unsigned int pdpe;
bf2b4ed2
BW
1368 int ret;
1369
e8ebd8e2 1370 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
e2b763ca
CW
1371 if (pd == vm->scratch_pd) {
1372 pd = alloc_pd(vm);
1373 if (IS_ERR(pd))
1374 goto unwind;
5441f0cb 1375
e2b763ca 1376 gen8_initialize_pd(vm, pd);
fe52e37f 1377 gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
e2b763ca 1378 pdp->used_pdpes++;
3e490042 1379 GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm));
75afcf72
CW
1380
1381 mark_tlbs_dirty(i915_vm_to_ppgtt(vm));
e2b763ca
CW
1382 }
1383
1384 ret = gen8_ppgtt_alloc_pd(vm, pd, start, length);
bf75d59e
CW
1385 if (unlikely(ret))
1386 goto unwind_pd;
fe52e37f 1387 }
33c8819f 1388
d7b3de91 1389 return 0;
bf2b4ed2 1390
bf75d59e
CW
1391unwind_pd:
1392 if (!pd->used_pdes) {
1393 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
1394 GEM_BUG_ON(!pdp->used_pdpes);
1395 pdp->used_pdpes--;
1396 free_pd(vm, pd);
1397 }
e2b763ca
CW
1398unwind:
1399 gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
1400 return -ENOMEM;
bf2b4ed2
BW
1401}
1402
c5d092a4
CW
1403static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm,
1404 u64 start, u64 length)
762d9936 1405{
c5d092a4
CW
1406 return gen8_ppgtt_alloc_pdp(vm,
1407 &i915_vm_to_ppgtt(vm)->pdp, start, length);
1408}
762d9936 1409
c5d092a4
CW
1410static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
1411 u64 start, u64 length)
1412{
1413 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1414 struct i915_pml4 *pml4 = &ppgtt->pml4;
1415 struct i915_page_directory_pointer *pdp;
1416 u64 from = start;
1417 u32 pml4e;
1418 int ret;
762d9936 1419
e8ebd8e2 1420 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
c5d092a4
CW
1421 if (pml4->pdps[pml4e] == vm->scratch_pdp) {
1422 pdp = alloc_pdp(vm);
1423 if (IS_ERR(pdp))
1424 goto unwind;
762d9936 1425
c5d092a4
CW
1426 gen8_initialize_pdp(vm, pdp);
1427 gen8_ppgtt_set_pml4e(pml4, pdp, pml4e);
1428 }
762d9936 1429
c5d092a4 1430 ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length);
bf75d59e
CW
1431 if (unlikely(ret))
1432 goto unwind_pdp;
762d9936
MT
1433 }
1434
762d9936
MT
1435 return 0;
1436
bf75d59e
CW
1437unwind_pdp:
1438 if (!pdp->used_pdpes) {
1439 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
1440 free_pdp(vm, pdp);
1441 }
c5d092a4
CW
1442unwind:
1443 gen8_ppgtt_clear_4lvl(vm, from, start - from);
1444 return -ENOMEM;
762d9936
MT
1445}
1446
8448661d
CW
1447static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
1448 struct i915_page_directory_pointer *pdp,
75c7b0b8 1449 u64 start, u64 length,
ea91e401
MT
1450 gen8_pte_t scratch_pte,
1451 struct seq_file *m)
1452{
3e490042 1453 struct i915_address_space *vm = &ppgtt->base;
ea91e401 1454 struct i915_page_directory *pd;
75c7b0b8 1455 u32 pdpe;
ea91e401 1456
e8ebd8e2 1457 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
ea91e401 1458 struct i915_page_table *pt;
75c7b0b8
CW
1459 u64 pd_len = length;
1460 u64 pd_start = start;
1461 u32 pde;
ea91e401 1462
e2b763ca 1463 if (pdp->page_directory[pdpe] == ppgtt->base.scratch_pd)
ea91e401
MT
1464 continue;
1465
1466 seq_printf(m, "\tPDPE #%d\n", pdpe);
e8ebd8e2 1467 gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
75c7b0b8 1468 u32 pte;
ea91e401
MT
1469 gen8_pte_t *pt_vaddr;
1470
fe52e37f 1471 if (pd->page_table[pde] == ppgtt->base.scratch_pt)
ea91e401
MT
1472 continue;
1473
9231da70 1474 pt_vaddr = kmap_atomic_px(pt);
ea91e401 1475 for (pte = 0; pte < GEN8_PTES; pte += 4) {
75c7b0b8
CW
1476 u64 va = (pdpe << GEN8_PDPE_SHIFT |
1477 pde << GEN8_PDE_SHIFT |
1478 pte << GEN8_PTE_SHIFT);
ea91e401
MT
1479 int i;
1480 bool found = false;
1481
1482 for (i = 0; i < 4; i++)
1483 if (pt_vaddr[pte + i] != scratch_pte)
1484 found = true;
1485 if (!found)
1486 continue;
1487
1488 seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
1489 for (i = 0; i < 4; i++) {
1490 if (pt_vaddr[pte + i] != scratch_pte)
1491 seq_printf(m, " %llx", pt_vaddr[pte + i]);
1492 else
1493 seq_puts(m, " SCRATCH ");
1494 }
1495 seq_puts(m, "\n");
1496 }
ea91e401
MT
1497 kunmap_atomic(pt_vaddr);
1498 }
1499 }
1500}
1501
1502static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1503{
1504 struct i915_address_space *vm = &ppgtt->base;
894ccebe
CW
1505 const gen8_pte_t scratch_pte =
1506 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
381b943b 1507 u64 start = 0, length = ppgtt->base.total;
ea91e401 1508
1e6437b0 1509 if (use_4lvl(vm)) {
75c7b0b8 1510 u64 pml4e;
ea91e401
MT
1511 struct i915_pml4 *pml4 = &ppgtt->pml4;
1512 struct i915_page_directory_pointer *pdp;
1513
e8ebd8e2 1514 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
c5d092a4 1515 if (pml4->pdps[pml4e] == ppgtt->base.scratch_pdp)
ea91e401
MT
1516 continue;
1517
1518 seq_printf(m, " PML4E #%llu\n", pml4e);
8448661d 1519 gen8_dump_pdp(ppgtt, pdp, start, length, scratch_pte, m);
ea91e401 1520 }
1e6437b0
MK
1521 } else {
1522 gen8_dump_pdp(ppgtt, &ppgtt->pdp, start, length, scratch_pte, m);
ea91e401
MT
1523 }
1524}
1525
e2b763ca 1526static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
331f38e7 1527{
e2b763ca
CW
1528 struct i915_address_space *vm = &ppgtt->base;
1529 struct i915_page_directory_pointer *pdp = &ppgtt->pdp;
1530 struct i915_page_directory *pd;
1531 u64 start = 0, length = ppgtt->base.total;
1532 u64 from = start;
1533 unsigned int pdpe;
331f38e7 1534
e2b763ca
CW
1535 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1536 pd = alloc_pd(vm);
1537 if (IS_ERR(pd))
1538 goto unwind;
331f38e7 1539
e2b763ca
CW
1540 gen8_initialize_pd(vm, pd);
1541 gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
1542 pdp->used_pdpes++;
1543 }
331f38e7 1544
e2b763ca
CW
1545 pdp->used_pdpes++; /* never remove */
1546 return 0;
331f38e7 1547
e2b763ca
CW
1548unwind:
1549 start -= from;
1550 gen8_for_each_pdpe(pd, pdp, from, start, pdpe) {
1551 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
1552 free_pd(vm, pd);
1553 }
1554 pdp->used_pdpes = 0;
1555 return -ENOMEM;
331f38e7
ZL
1556}
1557
eb0b44ad 1558/*
f3a964b9
BW
1559 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
1560 * with a net effect resembling a 2-level page table in normal x86 terms. Each
1561 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
1562 * space.
37aca44a 1563 *
f3a964b9 1564 */
5c5f6457 1565static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
37aca44a 1566{
1e6437b0
MK
1567 struct i915_address_space *vm = &ppgtt->base;
1568 struct drm_i915_private *dev_priv = vm->i915;
8776f02b 1569 int ret;
7cb6d7ac 1570
1e6437b0
MK
1571 ppgtt->base.total = USES_FULL_48BIT_PPGTT(dev_priv) ?
1572 1ULL << 48 :
1573 1ULL << 32;
1574
8448661d
CW
1575 /* There are only few exceptions for gen >=6. chv and bxt.
1576 * And we are not sure about the latter so play safe for now.
1577 */
1578 if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
1579 ppgtt->base.pt_kmap_wc = true;
1580
66df1014
CW
1581 ret = gen8_init_scratch(&ppgtt->base);
1582 if (ret) {
1583 ppgtt->base.total = 0;
1584 return ret;
1585 }
1586
1e6437b0 1587 if (use_4lvl(vm)) {
8448661d 1588 ret = setup_px(&ppgtt->base, &ppgtt->pml4);
762d9936
MT
1589 if (ret)
1590 goto free_scratch;
6ac18502 1591
69ab76fd
MT
1592 gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);
1593
e7167769 1594 ppgtt->switch_mm = gen8_mm_switch_4lvl;
c5d092a4 1595 ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_4lvl;
894ccebe 1596 ppgtt->base.insert_entries = gen8_ppgtt_insert_4lvl;
fe52e37f 1597 ppgtt->base.clear_range = gen8_ppgtt_clear_4lvl;
762d9936 1598 } else {
fe52e37f 1599 ret = __pdp_init(&ppgtt->base, &ppgtt->pdp);
81ba8aef
MT
1600 if (ret)
1601 goto free_scratch;
1602
275a991c 1603 if (intel_vgpu_active(dev_priv)) {
e2b763ca
CW
1604 ret = gen8_preallocate_top_level_pdp(ppgtt);
1605 if (ret) {
1606 __pdp_fini(&ppgtt->pdp);
331f38e7 1607 goto free_scratch;
e2b763ca 1608 }
331f38e7 1609 }
894ccebe 1610
e7167769 1611 ppgtt->switch_mm = gen8_mm_switch_3lvl;
c5d092a4 1612 ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_3lvl;
894ccebe 1613 ppgtt->base.insert_entries = gen8_ppgtt_insert_3lvl;
fe52e37f 1614 ppgtt->base.clear_range = gen8_ppgtt_clear_3lvl;
81ba8aef 1615 }
6ac18502 1616
275a991c 1617 if (intel_vgpu_active(dev_priv))
650da34c
ZL
1618 gen8_ppgtt_notify_vgt(ppgtt, true);
1619
054b9acd
MK
1620 ppgtt->base.cleanup = gen8_ppgtt_cleanup;
1621 ppgtt->base.unbind_vma = ppgtt_unbind_vma;
1622 ppgtt->base.bind_vma = ppgtt_bind_vma;
fa3f46af
MA
1623 ppgtt->base.set_pages = ppgtt_set_pages;
1624 ppgtt->base.clear_pages = clear_pages;
054b9acd
MK
1625 ppgtt->debug_dump = gen8_dump_ppgtt;
1626
d7b2633d 1627 return 0;
6ac18502
MT
1628
1629free_scratch:
1630 gen8_free_scratch(&ppgtt->base);
1631 return ret;
d7b2633d
MT
1632}
1633
87d60b63
BW
1634static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1635{
87d60b63 1636 struct i915_address_space *vm = &ppgtt->base;
09942c65 1637 struct i915_page_table *unused;
07749ef3 1638 gen6_pte_t scratch_pte;
381b943b
CW
1639 u32 pd_entry, pte, pde;
1640 u32 start = 0, length = ppgtt->base.total;
87d60b63 1641
8bcdd0f7 1642 scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
4fb84d99 1643 I915_CACHE_LLC, 0);
87d60b63 1644
731f74c5 1645 gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
87d60b63 1646 u32 expected;
07749ef3 1647 gen6_pte_t *pt_vaddr;
567047be 1648 const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
09942c65 1649 pd_entry = readl(ppgtt->pd_addr + pde);
87d60b63
BW
1650 expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
1651
1652 if (pd_entry != expected)
1653 seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
1654 pde,
1655 pd_entry,
1656 expected);
1657 seq_printf(m, "\tPDE: %x\n", pd_entry);
1658
9231da70 1659 pt_vaddr = kmap_atomic_px(ppgtt->pd.page_table[pde]);
d1c54acd 1660
07749ef3 1661 for (pte = 0; pte < GEN6_PTES; pte+=4) {
87d60b63 1662 unsigned long va =
07749ef3 1663 (pde * PAGE_SIZE * GEN6_PTES) +
87d60b63
BW
1664 (pte * PAGE_SIZE);
1665 int i;
1666 bool found = false;
1667 for (i = 0; i < 4; i++)
1668 if (pt_vaddr[pte + i] != scratch_pte)
1669 found = true;
1670 if (!found)
1671 continue;
1672
1673 seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
1674 for (i = 0; i < 4; i++) {
1675 if (pt_vaddr[pte + i] != scratch_pte)
1676 seq_printf(m, " %08x", pt_vaddr[pte + i]);
1677 else
1678 seq_puts(m, " SCRATCH ");
1679 }
1680 seq_puts(m, "\n");
1681 }
9231da70 1682 kunmap_atomic(pt_vaddr);
87d60b63
BW
1683 }
1684}
1685
678d96fb 1686/* Write pde (index) from the page directory @pd to the page table @pt */
16a011c8
CW
1687static inline void gen6_write_pde(const struct i915_hw_ppgtt *ppgtt,
1688 const unsigned int pde,
1689 const struct i915_page_table *pt)
6197349b 1690{
678d96fb 1691 /* Caller needs to make sure the write completes if necessary */
16a011c8
CW
1692 writel_relaxed(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
1693 ppgtt->pd_addr + pde);
678d96fb 1694}
6197349b 1695
678d96fb
BW
1696/* Write all the page tables found in the ppgtt structure to incrementing page
1697 * directories. */
16a011c8 1698static void gen6_write_page_range(struct i915_hw_ppgtt *ppgtt,
75c7b0b8 1699 u32 start, u32 length)
678d96fb 1700{
ec565b3c 1701 struct i915_page_table *pt;
16a011c8 1702 unsigned int pde;
678d96fb 1703
16a011c8
CW
1704 gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde)
1705 gen6_write_pde(ppgtt, pde, pt);
678d96fb 1706
16a011c8 1707 mark_tlbs_dirty(ppgtt);
dd19674b 1708 wmb();
3e302542
BW
1709}
1710
75c7b0b8 1711static inline u32 get_pd_offset(struct i915_hw_ppgtt *ppgtt)
3e302542 1712{
dd19674b
CW
1713 GEM_BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
1714 return ppgtt->pd.base.ggtt_offset << 10;
b4a74e3a
BW
1715}
1716
90252e5c 1717static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
e85b26dc 1718 struct drm_i915_gem_request *req)
90252e5c 1719{
4a570db5 1720 struct intel_engine_cs *engine = req->engine;
73dec95e 1721 u32 *cs;
90252e5c 1722
90252e5c 1723 /* NB: TLBs must be flushed and invalidated before a switch */
73dec95e
TU
1724 cs = intel_ring_begin(req, 6);
1725 if (IS_ERR(cs))
1726 return PTR_ERR(cs);
90252e5c 1727
73dec95e
TU
1728 *cs++ = MI_LOAD_REGISTER_IMM(2);
1729 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
1730 *cs++ = PP_DIR_DCLV_2G;
1731 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1732 *cs++ = get_pd_offset(ppgtt);
1733 *cs++ = MI_NOOP;
1734 intel_ring_advance(req, cs);
90252e5c
BW
1735
1736 return 0;
1737}
1738
48a10389 1739static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
e85b26dc 1740 struct drm_i915_gem_request *req)
48a10389 1741{
4a570db5 1742 struct intel_engine_cs *engine = req->engine;
73dec95e 1743 u32 *cs;
48a10389 1744
48a10389 1745 /* NB: TLBs must be flushed and invalidated before a switch */
73dec95e
TU
1746 cs = intel_ring_begin(req, 6);
1747 if (IS_ERR(cs))
1748 return PTR_ERR(cs);
1749
1750 *cs++ = MI_LOAD_REGISTER_IMM(2);
1751 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
1752 *cs++ = PP_DIR_DCLV_2G;
1753 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1754 *cs++ = get_pd_offset(ppgtt);
1755 *cs++ = MI_NOOP;
1756 intel_ring_advance(req, cs);
48a10389
BW
1757
1758 return 0;
1759}
1760
eeb9488e 1761static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
e85b26dc 1762 struct drm_i915_gem_request *req)
eeb9488e 1763{
4a570db5 1764 struct intel_engine_cs *engine = req->engine;
8eb95204 1765 struct drm_i915_private *dev_priv = req->i915;
48a10389 1766
e2f80391
TU
1767 I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
1768 I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
eeb9488e
BW
1769 return 0;
1770}
1771
c6be607a 1772static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
eeb9488e 1773{
e2f80391 1774 struct intel_engine_cs *engine;
3b3f1650 1775 enum intel_engine_id id;
3e302542 1776
3b3f1650 1777 for_each_engine(engine, dev_priv, id) {
c6be607a
TU
1778 u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ?
1779 GEN8_GFX_PPGTT_48B : 0;
e2f80391 1780 I915_WRITE(RING_MODE_GEN7(engine),
2dba3239 1781 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
eeb9488e 1782 }
eeb9488e 1783}
6197349b 1784
c6be607a 1785static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
3e302542 1786{
e2f80391 1787 struct intel_engine_cs *engine;
75c7b0b8 1788 u32 ecochk, ecobits;
3b3f1650 1789 enum intel_engine_id id;
6197349b 1790
b4a74e3a
BW
1791 ecobits = I915_READ(GAC_ECO_BITS);
1792 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
a65c2fcd 1793
b4a74e3a 1794 ecochk = I915_READ(GAM_ECOCHK);
772c2a51 1795 if (IS_HASWELL(dev_priv)) {
b4a74e3a
BW
1796 ecochk |= ECOCHK_PPGTT_WB_HSW;
1797 } else {
1798 ecochk |= ECOCHK_PPGTT_LLC_IVB;
1799 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
1800 }
1801 I915_WRITE(GAM_ECOCHK, ecochk);
a65c2fcd 1802
3b3f1650 1803 for_each_engine(engine, dev_priv, id) {
6197349b 1804 /* GFX_MODE is per-ring on gen7+ */
e2f80391 1805 I915_WRITE(RING_MODE_GEN7(engine),
b4a74e3a 1806 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
6197349b 1807 }
b4a74e3a 1808}
6197349b 1809
c6be607a 1810static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
b4a74e3a 1811{
75c7b0b8 1812 u32 ecochk, gab_ctl, ecobits;
a65c2fcd 1813
b4a74e3a
BW
1814 ecobits = I915_READ(GAC_ECO_BITS);
1815 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
1816 ECOBITS_PPGTT_CACHE64B);
6197349b 1817
b4a74e3a
BW
1818 gab_ctl = I915_READ(GAB_CTL);
1819 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
1820
1821 ecochk = I915_READ(GAM_ECOCHK);
1822 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
1823
1824 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
6197349b
BW
1825}
1826
1d2a314c 1827/* PPGTT support for Sandybdrige/Gen6 and later */
853ba5d2 1828static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
dd19674b 1829 u64 start, u64 length)
1d2a314c 1830{
e5716f55 1831 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
dd19674b
CW
1832 unsigned int first_entry = start >> PAGE_SHIFT;
1833 unsigned int pde = first_entry / GEN6_PTES;
1834 unsigned int pte = first_entry % GEN6_PTES;
1835 unsigned int num_entries = length >> PAGE_SHIFT;
1836 gen6_pte_t scratch_pte =
1837 vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
1d2a314c 1838
7bddb01f 1839 while (num_entries) {
dd19674b
CW
1840 struct i915_page_table *pt = ppgtt->pd.page_table[pde++];
1841 unsigned int end = min(pte + num_entries, GEN6_PTES);
1842 gen6_pte_t *vaddr;
7bddb01f 1843
dd19674b 1844 num_entries -= end - pte;
1d2a314c 1845
dd19674b
CW
1846 /* Note that the hw doesn't support removing PDE on the fly
1847 * (they are cached inside the context with no means to
1848 * invalidate the cache), so we can only reset the PTE
1849 * entries back to scratch.
1850 */
1d2a314c 1851
dd19674b
CW
1852 vaddr = kmap_atomic_px(pt);
1853 do {
1854 vaddr[pte++] = scratch_pte;
1855 } while (pte < end);
1856 kunmap_atomic(vaddr);
1d2a314c 1857
dd19674b 1858 pte = 0;
7bddb01f 1859 }
1d2a314c
DV
1860}
1861
853ba5d2 1862static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
4a234c5f 1863 struct i915_vma *vma,
75c7b0b8
CW
1864 enum i915_cache_level cache_level,
1865 u32 flags)
def886c3 1866{
e5716f55 1867 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
4a234c5f 1868 unsigned first_entry = vma->node.start >> PAGE_SHIFT;
07749ef3
MT
1869 unsigned act_pt = first_entry / GEN6_PTES;
1870 unsigned act_pte = first_entry % GEN6_PTES;
b31144c0
CW
1871 const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
1872 struct sgt_dma iter;
1873 gen6_pte_t *vaddr;
1874
9231da70 1875 vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
4a234c5f 1876 iter.sg = vma->pages->sgl;
b31144c0
CW
1877 iter.dma = sg_dma_address(iter.sg);
1878 iter.max = iter.dma + iter.sg->length;
1879 do {
1880 vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
6e995e23 1881
b31144c0
CW
1882 iter.dma += PAGE_SIZE;
1883 if (iter.dma == iter.max) {
1884 iter.sg = __sg_next(iter.sg);
1885 if (!iter.sg)
1886 break;
6e995e23 1887
b31144c0
CW
1888 iter.dma = sg_dma_address(iter.sg);
1889 iter.max = iter.dma + iter.sg->length;
1890 }
24f3a8cf 1891
07749ef3 1892 if (++act_pte == GEN6_PTES) {
9231da70
CW
1893 kunmap_atomic(vaddr);
1894 vaddr = kmap_atomic_px(ppgtt->pd.page_table[++act_pt]);
6e995e23 1895 act_pte = 0;
def886c3 1896 }
b31144c0 1897 } while (1);
9231da70 1898 kunmap_atomic(vaddr);
d9ec12f8
MA
1899
1900 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
def886c3
DV
1901}
1902
678d96fb 1903static int gen6_alloc_va_range(struct i915_address_space *vm,
dd19674b 1904 u64 start, u64 length)
678d96fb 1905{
e5716f55 1906 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
ec565b3c 1907 struct i915_page_table *pt;
dd19674b
CW
1908 u64 from = start;
1909 unsigned int pde;
1910 bool flush = false;
4933d519 1911
731f74c5 1912 gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
dd19674b
CW
1913 if (pt == vm->scratch_pt) {
1914 pt = alloc_pt(vm);
1915 if (IS_ERR(pt))
1916 goto unwind_out;
4933d519 1917
dd19674b
CW
1918 gen6_initialize_pt(vm, pt);
1919 ppgtt->pd.page_table[pde] = pt;
1920 gen6_write_pde(ppgtt, pde, pt);
1921 flush = true;
4933d519 1922 }
4933d519
MT
1923 }
1924
dd19674b
CW
1925 if (flush) {
1926 mark_tlbs_dirty(ppgtt);
1927 wmb();
678d96fb
BW
1928 }
1929
1930 return 0;
4933d519
MT
1931
1932unwind_out:
dd19674b
CW
1933 gen6_ppgtt_clear_range(vm, from, start);
1934 return -ENOMEM;
678d96fb
BW
1935}
1936
8776f02b
MK
1937static int gen6_init_scratch(struct i915_address_space *vm)
1938{
8bcdd0f7 1939 int ret;
8776f02b 1940
8448661d 1941 ret = setup_scratch_page(vm, I915_GFP_DMA);
8bcdd0f7
CW
1942 if (ret)
1943 return ret;
8776f02b 1944
8448661d 1945 vm->scratch_pt = alloc_pt(vm);
8776f02b 1946 if (IS_ERR(vm->scratch_pt)) {
8448661d 1947 cleanup_scratch_page(vm);
8776f02b
MK
1948 return PTR_ERR(vm->scratch_pt);
1949 }
1950
1951 gen6_initialize_pt(vm, vm->scratch_pt);
1952
1953 return 0;
1954}
1955
1956static void gen6_free_scratch(struct i915_address_space *vm)
1957{
8448661d
CW
1958 free_pt(vm, vm->scratch_pt);
1959 cleanup_scratch_page(vm);
8776f02b
MK
1960}
1961
061dd493 1962static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
a00d825d 1963{
e5716f55 1964 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
731f74c5 1965 struct i915_page_directory *pd = &ppgtt->pd;
09942c65 1966 struct i915_page_table *pt;
75c7b0b8 1967 u32 pde;
4933d519 1968
061dd493
DV
1969 drm_mm_remove_node(&ppgtt->node);
1970
731f74c5 1971 gen6_for_all_pdes(pt, pd, pde)
79ab9370 1972 if (pt != vm->scratch_pt)
8448661d 1973 free_pt(vm, pt);
06fda602 1974
8776f02b 1975 gen6_free_scratch(vm);
3440d265
DV
1976}
1977
b146520f 1978static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
3440d265 1979{
8776f02b 1980 struct i915_address_space *vm = &ppgtt->base;
49d73912 1981 struct drm_i915_private *dev_priv = ppgtt->base.i915;
72e96d64 1982 struct i915_ggtt *ggtt = &dev_priv->ggtt;
b146520f 1983 int ret;
1d2a314c 1984
c8d4c0d6
BW
1985 /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
1986 * allocator works in address space sizes, so it's multiplied by page
1987 * size. We allocate at the top of the GTT to avoid fragmentation.
1988 */
72e96d64 1989 BUG_ON(!drm_mm_initialized(&ggtt->base.mm));
4933d519 1990
8776f02b
MK
1991 ret = gen6_init_scratch(vm);
1992 if (ret)
1993 return ret;
4933d519 1994
e007b19d
CW
1995 ret = i915_gem_gtt_insert(&ggtt->base, &ppgtt->node,
1996 GEN6_PD_SIZE, GEN6_PD_ALIGN,
1997 I915_COLOR_UNEVICTABLE,
1998 0, ggtt->base.total,
1999 PIN_HIGH);
c8c26622 2000 if (ret)
678d96fb
BW
2001 goto err_out;
2002
72e96d64 2003 if (ppgtt->node.start < ggtt->mappable_end)
c8d4c0d6 2004 DRM_DEBUG("Forced to use aperture for PDEs\n");
1d2a314c 2005
52c126ee
CW
2006 ppgtt->pd.base.ggtt_offset =
2007 ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
2008
2009 ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm +
2010 ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
2011
c8c26622 2012 return 0;
678d96fb
BW
2013
2014err_out:
8776f02b 2015 gen6_free_scratch(vm);
678d96fb 2016 return ret;
b146520f
BW
2017}
2018
b146520f
BW
2019static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
2020{
2f2cf682 2021 return gen6_ppgtt_allocate_page_directories(ppgtt);
4933d519 2022}
06dc68d6 2023
4933d519 2024static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
75c7b0b8 2025 u64 start, u64 length)
4933d519 2026{
ec565b3c 2027 struct i915_page_table *unused;
75c7b0b8 2028 u32 pde;
1d2a314c 2029
731f74c5 2030 gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde)
79ab9370 2031 ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
b146520f
BW
2032}
2033
5c5f6457 2034static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
b146520f 2035{
49d73912 2036 struct drm_i915_private *dev_priv = ppgtt->base.i915;
72e96d64 2037 struct i915_ggtt *ggtt = &dev_priv->ggtt;
b146520f
BW
2038 int ret;
2039
72e96d64 2040 ppgtt->base.pte_encode = ggtt->base.pte_encode;
5db94019 2041 if (intel_vgpu_active(dev_priv) || IS_GEN6(dev_priv))
b146520f 2042 ppgtt->switch_mm = gen6_mm_switch;
772c2a51 2043 else if (IS_HASWELL(dev_priv))
b146520f 2044 ppgtt->switch_mm = hsw_mm_switch;
5db94019 2045 else if (IS_GEN7(dev_priv))
b146520f 2046 ppgtt->switch_mm = gen7_mm_switch;
8eb95204 2047 else
b146520f
BW
2048 BUG();
2049
2050 ret = gen6_ppgtt_alloc(ppgtt);
2051 if (ret)
2052 return ret;
2053
09942c65 2054 ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
1d2a314c 2055
5c5f6457 2056 gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
16a011c8 2057 gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
678d96fb 2058
52c126ee
CW
2059 ret = gen6_alloc_va_range(&ppgtt->base, 0, ppgtt->base.total);
2060 if (ret) {
2061 gen6_ppgtt_cleanup(&ppgtt->base);
2062 return ret;
2063 }
2064
054b9acd
MK
2065 ppgtt->base.clear_range = gen6_ppgtt_clear_range;
2066 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
2067 ppgtt->base.unbind_vma = ppgtt_unbind_vma;
2068 ppgtt->base.bind_vma = ppgtt_bind_vma;
fa3f46af
MA
2069 ppgtt->base.set_pages = ppgtt_set_pages;
2070 ppgtt->base.clear_pages = clear_pages;
054b9acd
MK
2071 ppgtt->base.cleanup = gen6_ppgtt_cleanup;
2072 ppgtt->debug_dump = gen6_dump_ppgtt;
2073
440fd528 2074 DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
b146520f
BW
2075 ppgtt->node.size >> 20,
2076 ppgtt->node.start / PAGE_SIZE);
3440d265 2077
52c126ee
CW
2078 DRM_DEBUG_DRIVER("Adding PPGTT at offset %x\n",
2079 ppgtt->pd.base.ggtt_offset << 10);
fa76da34 2080
b146520f 2081 return 0;
3440d265
DV
2082}
2083
2bfa996e
CW
2084static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
2085 struct drm_i915_private *dev_priv)
3440d265 2086{
49d73912 2087 ppgtt->base.i915 = dev_priv;
8448661d 2088 ppgtt->base.dma = &dev_priv->drm.pdev->dev;
3440d265 2089
2bfa996e 2090 if (INTEL_INFO(dev_priv)->gen < 8)
5c5f6457 2091 return gen6_ppgtt_init(ppgtt);
3ed124b2 2092 else
d7b2633d 2093 return gen8_ppgtt_init(ppgtt);
fa76da34 2094}
c114f76a 2095
a2cad9df 2096static void i915_address_space_init(struct i915_address_space *vm,
80b204bc
CW
2097 struct drm_i915_private *dev_priv,
2098 const char *name)
a2cad9df 2099{
80b204bc 2100 i915_gem_timeline_init(dev_priv, &vm->timeline, name);
47db922f 2101
381b943b 2102 drm_mm_init(&vm->mm, 0, vm->total);
47db922f
CW
2103 vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
2104
a2cad9df
MW
2105 INIT_LIST_HEAD(&vm->active_list);
2106 INIT_LIST_HEAD(&vm->inactive_list);
50e046b6 2107 INIT_LIST_HEAD(&vm->unbound_list);
47db922f 2108
a2cad9df 2109 list_add_tail(&vm->global_link, &dev_priv->vm_list);
8448661d 2110 pagevec_init(&vm->free_pages, false);
a2cad9df
MW
2111}
2112
ed9724dd
MA
2113static void i915_address_space_fini(struct i915_address_space *vm)
2114{
8448661d 2115 if (pagevec_count(&vm->free_pages))
66df1014 2116 vm_free_pages_release(vm, true);
8448661d 2117
ed9724dd
MA
2118 i915_gem_timeline_fini(&vm->timeline);
2119 drm_mm_takedown(&vm->mm);
2120 list_del(&vm->global_link);
2121}
2122
c6be607a 2123static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
d5165ebd 2124{
d5165ebd
TG
2125 /* This function is for gtt related workarounds. This function is
2126 * called on driver load and after a GPU reset, so you can place
2127 * workarounds here even if they get overwritten by GPU reset.
2128 */
90007bca 2129 /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl */
8652744b 2130 if (IS_BROADWELL(dev_priv))
d5165ebd 2131 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
920a14b2 2132 else if (IS_CHERRYVIEW(dev_priv))
d5165ebd 2133 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
90007bca 2134 else if (IS_GEN9_BC(dev_priv) || IS_GEN10(dev_priv))
d5165ebd 2135 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
9fb5026f 2136 else if (IS_GEN9_LP(dev_priv))
d5165ebd 2137 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
9a6330cf
MA
2138
2139 /*
2140 * To support 64K PTEs we need to first enable the use of the
2141 * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
2142 * mmio, otherwise the page-walker will simply ignore the IPS bit. This
2143 * shouldn't be needed after GEN10.
2144 *
2145 * 64K pages were first introduced from BDW+, although technically they
2146 * only *work* from gen9+. For pre-BDW we instead have the option for
2147 * 32K pages, but we don't currently have any support for it in our
2148 * driver.
2149 */
2150 if (HAS_PAGE_SIZES(dev_priv, I915_GTT_PAGE_SIZE_64K) &&
2151 INTEL_GEN(dev_priv) <= 10)
2152 I915_WRITE(GEN8_GAMW_ECO_DEV_RW_IA,
2153 I915_READ(GEN8_GAMW_ECO_DEV_RW_IA) |
2154 GAMW_ECO_ENABLE_64K_IPS_FIELD);
d5165ebd
TG
2155}
2156
c6be607a 2157int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
82460d97 2158{
c6be607a 2159 gtt_write_workarounds(dev_priv);
d5165ebd 2160
671b5013
TD
2161 /* In the case of execlists, PPGTT is enabled by the context descriptor
2162 * and the PDPs are contained within the context itself. We don't
2163 * need to do anything here. */
4f044a88 2164 if (i915_modparams.enable_execlists)
671b5013
TD
2165 return 0;
2166
c6be607a 2167 if (!USES_PPGTT(dev_priv))
82460d97
DV
2168 return 0;
2169
5db94019 2170 if (IS_GEN6(dev_priv))
c6be607a 2171 gen6_ppgtt_enable(dev_priv);
5db94019 2172 else if (IS_GEN7(dev_priv))
c6be607a
TU
2173 gen7_ppgtt_enable(dev_priv);
2174 else if (INTEL_GEN(dev_priv) >= 8)
2175 gen8_ppgtt_enable(dev_priv);
82460d97 2176 else
c6be607a 2177 MISSING_CASE(INTEL_GEN(dev_priv));
82460d97 2178
4ad2fd88
JH
2179 return 0;
2180}
1d2a314c 2181
4d884705 2182struct i915_hw_ppgtt *
2bfa996e 2183i915_ppgtt_create(struct drm_i915_private *dev_priv,
80b204bc
CW
2184 struct drm_i915_file_private *fpriv,
2185 const char *name)
4d884705
DV
2186{
2187 struct i915_hw_ppgtt *ppgtt;
2188 int ret;
2189
2190 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
2191 if (!ppgtt)
2192 return ERR_PTR(-ENOMEM);
2193
1188bc66 2194 ret = __hw_ppgtt_init(ppgtt, dev_priv);
4d884705
DV
2195 if (ret) {
2196 kfree(ppgtt);
2197 return ERR_PTR(ret);
2198 }
2199
1188bc66
CW
2200 kref_init(&ppgtt->ref);
2201 i915_address_space_init(&ppgtt->base, dev_priv, name);
2202 ppgtt->base.file = fpriv;
2203
198c974d
DCS
2204 trace_i915_ppgtt_create(&ppgtt->base);
2205
4d884705
DV
2206 return ppgtt;
2207}
2208
0c7eeda1
CW
2209void i915_ppgtt_close(struct i915_address_space *vm)
2210{
2211 struct list_head *phases[] = {
2212 &vm->active_list,
2213 &vm->inactive_list,
2214 &vm->unbound_list,
2215 NULL,
2216 }, **phase;
2217
2218 GEM_BUG_ON(vm->closed);
2219 vm->closed = true;
2220
2221 for (phase = phases; *phase; phase++) {
2222 struct i915_vma *vma, *vn;
2223
2224 list_for_each_entry_safe(vma, vn, *phase, vm_link)
2225 if (!i915_vma_is_closed(vma))
2226 i915_vma_close(vma);
2227 }
2228}
2229
ed9724dd 2230void i915_ppgtt_release(struct kref *kref)
ee960be7
DV
2231{
2232 struct i915_hw_ppgtt *ppgtt =
2233 container_of(kref, struct i915_hw_ppgtt, ref);
2234
198c974d
DCS
2235 trace_i915_ppgtt_release(&ppgtt->base);
2236
50e046b6 2237 /* vmas should already be unbound and destroyed */
ee960be7
DV
2238 WARN_ON(!list_empty(&ppgtt->base.active_list));
2239 WARN_ON(!list_empty(&ppgtt->base.inactive_list));
50e046b6 2240 WARN_ON(!list_empty(&ppgtt->base.unbound_list));
ee960be7
DV
2241
2242 ppgtt->base.cleanup(&ppgtt->base);
8448661d 2243 i915_address_space_fini(&ppgtt->base);
ee960be7
DV
2244 kfree(ppgtt);
2245}
1d2a314c 2246
a81cc00c
BW
2247/* Certain Gen5 chipsets require require idling the GPU before
2248 * unmapping anything from the GTT when VT-d is enabled.
2249 */
97d6d7ab 2250static bool needs_idle_maps(struct drm_i915_private *dev_priv)
a81cc00c 2251{
a81cc00c
BW
2252 /* Query intel_iommu to see if we need the workaround. Presumably that
2253 * was loaded first.
2254 */
80debff8 2255 return IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_vtd_active();
a81cc00c
BW
2256}
2257
dc97997a 2258void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
828c7908 2259{
e2f80391 2260 struct intel_engine_cs *engine;
3b3f1650 2261 enum intel_engine_id id;
828c7908 2262
dc97997a 2263 if (INTEL_INFO(dev_priv)->gen < 6)
828c7908
BW
2264 return;
2265
3b3f1650 2266 for_each_engine(engine, dev_priv, id) {
828c7908 2267 u32 fault_reg;
e2f80391 2268 fault_reg = I915_READ(RING_FAULT_REG(engine));
828c7908
BW
2269 if (fault_reg & RING_FAULT_VALID) {
2270 DRM_DEBUG_DRIVER("Unexpected fault\n"
59a5d290 2271 "\tAddr: 0x%08lx\n"
828c7908
BW
2272 "\tAddress space: %s\n"
2273 "\tSource ID: %d\n"
2274 "\tType: %d\n",
2275 fault_reg & PAGE_MASK,
2276 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
2277 RING_FAULT_SRCID(fault_reg),
2278 RING_FAULT_FAULT_TYPE(fault_reg));
e2f80391 2279 I915_WRITE(RING_FAULT_REG(engine),
828c7908
BW
2280 fault_reg & ~RING_FAULT_VALID);
2281 }
2282 }
3b3f1650
AG
2283
2284 /* Engine specific init may not have been done till this point. */
2285 if (dev_priv->engine[RCS])
2286 POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
828c7908
BW
2287}
2288
275a991c 2289void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
828c7908 2290{
72e96d64 2291 struct i915_ggtt *ggtt = &dev_priv->ggtt;
828c7908
BW
2292
2293 /* Don't bother messing with faults pre GEN6 as we have little
2294 * documentation supporting that it's a good idea.
2295 */
275a991c 2296 if (INTEL_GEN(dev_priv) < 6)
828c7908
BW
2297 return;
2298
dc97997a 2299 i915_check_and_clear_faults(dev_priv);
828c7908 2300
381b943b 2301 ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
91e56499 2302
7c3f86b6 2303 i915_ggtt_invalidate(dev_priv);
828c7908
BW
2304}
2305
03ac84f1
CW
2306int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
2307 struct sg_table *pages)
7c2e6fdf 2308{
1a292fa5
CW
2309 do {
2310 if (dma_map_sg(&obj->base.dev->pdev->dev,
2311 pages->sgl, pages->nents,
2312 PCI_DMA_BIDIRECTIONAL))
2313 return 0;
2314
2315 /* If the DMA remap fails, one cause can be that we have
2316 * too many objects pinned in a small remapping table,
2317 * such as swiotlb. Incrementally purge all other objects and
2318 * try again - if there are no more pages to remove from
2319 * the DMA remapper, i915_gem_shrink will return 0.
2320 */
2321 GEM_BUG_ON(obj->mm.pages == pages);
2322 } while (i915_gem_shrink(to_i915(obj->base.dev),
912d572d 2323 obj->base.size >> PAGE_SHIFT, NULL,
1a292fa5
CW
2324 I915_SHRINK_BOUND |
2325 I915_SHRINK_UNBOUND |
2326 I915_SHRINK_ACTIVE));
9da3da66 2327
03ac84f1 2328 return -ENOSPC;
7c2e6fdf
DV
2329}
2330
2c642b07 2331static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
94ec8f61 2332{
94ec8f61 2333 writeq(pte, addr);
94ec8f61
BW
2334}
2335
d6473f56
CW
2336static void gen8_ggtt_insert_page(struct i915_address_space *vm,
2337 dma_addr_t addr,
75c7b0b8 2338 u64 offset,
d6473f56
CW
2339 enum i915_cache_level level,
2340 u32 unused)
2341{
7c3f86b6 2342 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
d6473f56 2343 gen8_pte_t __iomem *pte =
7c3f86b6 2344 (gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
d6473f56 2345
4fb84d99 2346 gen8_set_pte(pte, gen8_pte_encode(addr, level));
d6473f56 2347
7c3f86b6 2348 ggtt->invalidate(vm->i915);
d6473f56
CW
2349}
2350
94ec8f61 2351static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
4a234c5f 2352 struct i915_vma *vma,
75c7b0b8
CW
2353 enum i915_cache_level level,
2354 u32 unused)
94ec8f61 2355{
ce7fda2e 2356 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
85d1225e
DG
2357 struct sgt_iter sgt_iter;
2358 gen8_pte_t __iomem *gtt_entries;
894ccebe 2359 const gen8_pte_t pte_encode = gen8_pte_encode(0, level);
85d1225e 2360 dma_addr_t addr;
be69459a 2361
894ccebe 2362 gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
4a234c5f
MA
2363 gtt_entries += vma->node.start >> PAGE_SHIFT;
2364 for_each_sgt_dma(addr, sgt_iter, vma->pages)
894ccebe 2365 gen8_set_pte(gtt_entries++, pte_encode | addr);
85d1225e 2366
894ccebe 2367 wmb();
94ec8f61 2368
94ec8f61
BW
2369 /* This next bit makes the above posting read even more important. We
2370 * want to flush the TLBs only after we're certain all the PTE updates
2371 * have finished.
2372 */
7c3f86b6 2373 ggtt->invalidate(vm->i915);
94ec8f61
BW
2374}
2375
d6473f56
CW
2376static void gen6_ggtt_insert_page(struct i915_address_space *vm,
2377 dma_addr_t addr,
75c7b0b8 2378 u64 offset,
d6473f56
CW
2379 enum i915_cache_level level,
2380 u32 flags)
2381{
7c3f86b6 2382 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
d6473f56 2383 gen6_pte_t __iomem *pte =
7c3f86b6 2384 (gen6_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
d6473f56 2385
4fb84d99 2386 iowrite32(vm->pte_encode(addr, level, flags), pte);
d6473f56 2387
7c3f86b6 2388 ggtt->invalidate(vm->i915);
d6473f56
CW
2389}
2390
e76e9aeb
BW
2391/*
2392 * Binds an object into the global gtt with the specified cache level. The object
2393 * will be accessible to the GPU via commands whose operands reference offsets
2394 * within the global GTT as well as accessible by the GPU through the GMADR
2395 * mapped BAR (dev_priv->mm.gtt->gtt).
2396 */
853ba5d2 2397static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
4a234c5f 2398 struct i915_vma *vma,
75c7b0b8
CW
2399 enum i915_cache_level level,
2400 u32 flags)
e76e9aeb 2401{
ce7fda2e 2402 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
b31144c0 2403 gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
4a234c5f 2404 unsigned int i = vma->node.start >> PAGE_SHIFT;
b31144c0 2405 struct sgt_iter iter;
85d1225e 2406 dma_addr_t addr;
4a234c5f 2407 for_each_sgt_dma(addr, iter, vma->pages)
b31144c0
CW
2408 iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
2409 wmb();
0f9b91c7
BW
2410
2411 /* This next bit makes the above posting read even more important. We
2412 * want to flush the TLBs only after we're certain all the PTE updates
2413 * have finished.
2414 */
7c3f86b6 2415 ggtt->invalidate(vm->i915);
e76e9aeb
BW
2416}
2417
f7770bfd 2418static void nop_clear_range(struct i915_address_space *vm,
75c7b0b8 2419 u64 start, u64 length)
f7770bfd
CW
2420{
2421}
2422
94ec8f61 2423static void gen8_ggtt_clear_range(struct i915_address_space *vm,
75c7b0b8 2424 u64 start, u64 length)
94ec8f61 2425{
ce7fda2e 2426 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
782f1495
BW
2427 unsigned first_entry = start >> PAGE_SHIFT;
2428 unsigned num_entries = length >> PAGE_SHIFT;
894ccebe
CW
2429 const gen8_pte_t scratch_pte =
2430 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
2431 gen8_pte_t __iomem *gtt_base =
72e96d64
JL
2432 (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
2433 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
94ec8f61
BW
2434 int i;
2435
2436 if (WARN(num_entries > max_entries,
2437 "First entry = %d; Num entries = %d (max=%d)\n",
2438 first_entry, num_entries, max_entries))
2439 num_entries = max_entries;
2440
94ec8f61
BW
2441 for (i = 0; i < num_entries; i++)
2442 gen8_set_pte(&gtt_base[i], scratch_pte);
94ec8f61
BW
2443}
2444
0ef34ad6
JB
2445static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
2446{
2447 struct drm_i915_private *dev_priv = vm->i915;
2448
2449 /*
2450 * Make sure the internal GAM fifo has been cleared of all GTT
2451 * writes before exiting stop_machine(). This guarantees that
2452 * any aperture accesses waiting to start in another process
2453 * cannot back up behind the GTT writes causing a hang.
2454 * The register can be any arbitrary GAM register.
2455 */
2456 POSTING_READ(GFX_FLSH_CNTL_GEN6);
2457}
2458
2459struct insert_page {
2460 struct i915_address_space *vm;
2461 dma_addr_t addr;
2462 u64 offset;
2463 enum i915_cache_level level;
2464};
2465
2466static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
2467{
2468 struct insert_page *arg = _arg;
2469
2470 gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
2471 bxt_vtd_ggtt_wa(arg->vm);
2472
2473 return 0;
2474}
2475
2476static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
2477 dma_addr_t addr,
2478 u64 offset,
2479 enum i915_cache_level level,
2480 u32 unused)
2481{
2482 struct insert_page arg = { vm, addr, offset, level };
2483
2484 stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
2485}
2486
2487struct insert_entries {
2488 struct i915_address_space *vm;
4a234c5f 2489 struct i915_vma *vma;
0ef34ad6
JB
2490 enum i915_cache_level level;
2491};
2492
2493static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
2494{
2495 struct insert_entries *arg = _arg;
2496
4a234c5f 2497 gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, 0);
0ef34ad6
JB
2498 bxt_vtd_ggtt_wa(arg->vm);
2499
2500 return 0;
2501}
2502
2503static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
4a234c5f 2504 struct i915_vma *vma,
0ef34ad6
JB
2505 enum i915_cache_level level,
2506 u32 unused)
2507{
17369ba0 2508 struct insert_entries arg = { vm, vma, level };
0ef34ad6
JB
2509
2510 stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
2511}
2512
2513struct clear_range {
2514 struct i915_address_space *vm;
2515 u64 start;
2516 u64 length;
2517};
2518
2519static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
2520{
2521 struct clear_range *arg = _arg;
2522
2523 gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
2524 bxt_vtd_ggtt_wa(arg->vm);
2525
2526 return 0;
2527}
2528
2529static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
2530 u64 start,
2531 u64 length)
2532{
2533 struct clear_range arg = { vm, start, length };
2534
2535 stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
2536}
2537
853ba5d2 2538static void gen6_ggtt_clear_range(struct i915_address_space *vm,
75c7b0b8 2539 u64 start, u64 length)
7faf1ab2 2540{
ce7fda2e 2541 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
782f1495
BW
2542 unsigned first_entry = start >> PAGE_SHIFT;
2543 unsigned num_entries = length >> PAGE_SHIFT;
07749ef3 2544 gen6_pte_t scratch_pte, __iomem *gtt_base =
72e96d64
JL
2545 (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
2546 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
7faf1ab2
DV
2547 int i;
2548
2549 if (WARN(num_entries > max_entries,
2550 "First entry = %d; Num entries = %d (max=%d)\n",
2551 first_entry, num_entries, max_entries))
2552 num_entries = max_entries;
2553
8bcdd0f7 2554 scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
4fb84d99 2555 I915_CACHE_LLC, 0);
828c7908 2556
7faf1ab2
DV
2557 for (i = 0; i < num_entries; i++)
2558 iowrite32(scratch_pte, &gtt_base[i]);
7faf1ab2
DV
2559}
2560
d6473f56
CW
2561static void i915_ggtt_insert_page(struct i915_address_space *vm,
2562 dma_addr_t addr,
75c7b0b8 2563 u64 offset,
d6473f56
CW
2564 enum i915_cache_level cache_level,
2565 u32 unused)
2566{
d6473f56
CW
2567 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2568 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
d6473f56
CW
2569
2570 intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
d6473f56
CW
2571}
2572
d369d2d9 2573static void i915_ggtt_insert_entries(struct i915_address_space *vm,
4a234c5f 2574 struct i915_vma *vma,
75c7b0b8
CW
2575 enum i915_cache_level cache_level,
2576 u32 unused)
7faf1ab2
DV
2577{
2578 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2579 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2580
4a234c5f
MA
2581 intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
2582 flags);
7faf1ab2
DV
2583}
2584
853ba5d2 2585static void i915_ggtt_clear_range(struct i915_address_space *vm,
75c7b0b8 2586 u64 start, u64 length)
7faf1ab2 2587{
2eedfc7d 2588 intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
7faf1ab2
DV
2589}
2590
70b9f6f8
DV
2591static int ggtt_bind_vma(struct i915_vma *vma,
2592 enum i915_cache_level cache_level,
2593 u32 flags)
0a878716 2594{
49d73912 2595 struct drm_i915_private *i915 = vma->vm->i915;
0a878716 2596 struct drm_i915_gem_object *obj = vma->obj;
ba7a5741 2597 u32 pte_flags;
0a878716 2598
0a878716 2599 /* Currently applicable only to VLV */
ba7a5741 2600 pte_flags = 0;
0a878716
DV
2601 if (obj->gt_ro)
2602 pte_flags |= PTE_READ_ONLY;
2603
9c870d03 2604 intel_runtime_pm_get(i915);
4a234c5f 2605 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
9c870d03 2606 intel_runtime_pm_put(i915);
0a878716 2607
d9ec12f8
MA
2608 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
2609
0a878716
DV
2610 /*
2611 * Without aliasing PPGTT there's no difference between
2612 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
2613 * upgrade to both bound if we bind either to avoid double-binding.
2614 */
3272db53 2615 vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
0a878716
DV
2616
2617 return 0;
2618}
2619
cbc4e9e6
CW
2620static void ggtt_unbind_vma(struct i915_vma *vma)
2621{
2622 struct drm_i915_private *i915 = vma->vm->i915;
2623
2624 intel_runtime_pm_get(i915);
2625 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
2626 intel_runtime_pm_put(i915);
2627}
2628
0a878716
DV
2629static int aliasing_gtt_bind_vma(struct i915_vma *vma,
2630 enum i915_cache_level cache_level,
2631 u32 flags)
d5bd1449 2632{
49d73912 2633 struct drm_i915_private *i915 = vma->vm->i915;
321d178e 2634 u32 pte_flags;
ff685975 2635 int ret;
70b9f6f8 2636
24f3a8cf 2637 /* Currently applicable only to VLV */
321d178e
CW
2638 pte_flags = 0;
2639 if (vma->obj->gt_ro)
f329f5f6 2640 pte_flags |= PTE_READ_ONLY;
24f3a8cf 2641
ff685975
CW
2642 if (flags & I915_VMA_LOCAL_BIND) {
2643 struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
2644
1f23475c
MA
2645 if (!(vma->flags & I915_VMA_LOCAL_BIND) &&
2646 appgtt->base.allocate_va_range) {
ff685975
CW
2647 ret = appgtt->base.allocate_va_range(&appgtt->base,
2648 vma->node.start,
d567232c 2649 vma->size);
ff685975 2650 if (ret)
fa3f46af 2651 return ret;
ff685975
CW
2652 }
2653
4a234c5f
MA
2654 appgtt->base.insert_entries(&appgtt->base, vma, cache_level,
2655 pte_flags);
ff685975
CW
2656 }
2657
3272db53 2658 if (flags & I915_VMA_GLOBAL_BIND) {
9c870d03 2659 intel_runtime_pm_get(i915);
4a234c5f 2660 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
9c870d03 2661 intel_runtime_pm_put(i915);
6f65e29a 2662 }
d5bd1449 2663
70b9f6f8 2664 return 0;
d5bd1449
CW
2665}
2666
cbc4e9e6 2667static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
74163907 2668{
49d73912 2669 struct drm_i915_private *i915 = vma->vm->i915;
6f65e29a 2670
9c870d03
CW
2671 if (vma->flags & I915_VMA_GLOBAL_BIND) {
2672 intel_runtime_pm_get(i915);
cbc4e9e6 2673 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
9c870d03
CW
2674 intel_runtime_pm_put(i915);
2675 }
06615ee5 2676
cbc4e9e6
CW
2677 if (vma->flags & I915_VMA_LOCAL_BIND) {
2678 struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->base;
2679
2680 vm->clear_range(vm, vma->node.start, vma->size);
2681 }
74163907
DV
2682}
2683
03ac84f1
CW
2684void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
2685 struct sg_table *pages)
7c2e6fdf 2686{
52a05c30
DW
2687 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2688 struct device *kdev = &dev_priv->drm.pdev->dev;
307dc25b 2689 struct i915_ggtt *ggtt = &dev_priv->ggtt;
5c042287 2690
307dc25b 2691 if (unlikely(ggtt->do_idle_maps)) {
228ec87c 2692 if (i915_gem_wait_for_idle(dev_priv, 0)) {
307dc25b
CW
2693 DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
2694 /* Wait a bit, in hopes it avoids the hang */
2695 udelay(10);
2696 }
2697 }
5c042287 2698
03ac84f1 2699 dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
7c2e6fdf 2700}
644ec02b 2701
fa3f46af
MA
2702static int ggtt_set_pages(struct i915_vma *vma)
2703{
2704 int ret;
2705
2706 GEM_BUG_ON(vma->pages);
2707
2708 ret = i915_get_ggtt_vma_pages(vma);
2709 if (ret)
2710 return ret;
2711
7464284b
MA
2712 vma->page_sizes = vma->obj->mm.page_sizes;
2713
fa3f46af
MA
2714 return 0;
2715}
2716
45b186f1 2717static void i915_gtt_color_adjust(const struct drm_mm_node *node,
42d6ab48 2718 unsigned long color,
440fd528
TR
2719 u64 *start,
2720 u64 *end)
42d6ab48 2721{
a6508ded 2722 if (node->allocated && node->color != color)
f51455d4 2723 *start += I915_GTT_PAGE_SIZE;
42d6ab48 2724
a6508ded
CW
2725 /* Also leave a space between the unallocated reserved node after the
2726 * GTT and any objects within the GTT, i.e. we use the color adjustment
2727 * to insert a guard page to prevent prefetches crossing over the
2728 * GTT boundary.
2729 */
b44f97fd 2730 node = list_next_entry(node, node_list);
a6508ded 2731 if (node->color != color)
f51455d4 2732 *end -= I915_GTT_PAGE_SIZE;
42d6ab48 2733}
fbe5d36e 2734
6cde9a02
CW
2735int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
2736{
2737 struct i915_ggtt *ggtt = &i915->ggtt;
2738 struct i915_hw_ppgtt *ppgtt;
2739 int err;
2740
57202f47 2741 ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM), "[alias]");
1188bc66
CW
2742 if (IS_ERR(ppgtt))
2743 return PTR_ERR(ppgtt);
6cde9a02 2744
e565ceb0
CW
2745 if (WARN_ON(ppgtt->base.total < ggtt->base.total)) {
2746 err = -ENODEV;
2747 goto err_ppgtt;
2748 }
2749
6cde9a02 2750 if (ppgtt->base.allocate_va_range) {
e565ceb0
CW
2751 /* Note we only pre-allocate as far as the end of the global
2752 * GTT. On 48b / 4-level page-tables, the difference is very,
2753 * very significant! We have to preallocate as GVT/vgpu does
2754 * not like the page directory disappearing.
2755 */
6cde9a02 2756 err = ppgtt->base.allocate_va_range(&ppgtt->base,
e565ceb0 2757 0, ggtt->base.total);
6cde9a02 2758 if (err)
1188bc66 2759 goto err_ppgtt;
6cde9a02
CW
2760 }
2761
6cde9a02 2762 i915->mm.aliasing_ppgtt = ppgtt;
cbc4e9e6 2763
6cde9a02
CW
2764 WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
2765 ggtt->base.bind_vma = aliasing_gtt_bind_vma;
2766
cbc4e9e6
CW
2767 WARN_ON(ggtt->base.unbind_vma != ggtt_unbind_vma);
2768 ggtt->base.unbind_vma = aliasing_gtt_unbind_vma;
2769
6cde9a02
CW
2770 return 0;
2771
6cde9a02 2772err_ppgtt:
1188bc66 2773 i915_ppgtt_put(ppgtt);
6cde9a02
CW
2774 return err;
2775}
2776
2777void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915)
2778{
2779 struct i915_ggtt *ggtt = &i915->ggtt;
2780 struct i915_hw_ppgtt *ppgtt;
2781
2782 ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt);
2783 if (!ppgtt)
2784 return;
2785
1188bc66 2786 i915_ppgtt_put(ppgtt);
6cde9a02
CW
2787
2788 ggtt->base.bind_vma = ggtt_bind_vma;
cbc4e9e6 2789 ggtt->base.unbind_vma = ggtt_unbind_vma;
6cde9a02
CW
2790}
2791
f6b9d5ca 2792int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
644ec02b 2793{
e78891ca
BW
2794 /* Let GEM Manage all of the aperture.
2795 *
2796 * However, leave one page at the end still bound to the scratch page.
2797 * There are a number of places where the hardware apparently prefetches
2798 * past the end of the object, and we've seen multiple hangs with the
2799 * GPU head pointer stuck in a batchbuffer bound at the last page of the
2800 * aperture. One page should be enough to keep any prefetching inside
2801 * of the aperture.
2802 */
72e96d64 2803 struct i915_ggtt *ggtt = &dev_priv->ggtt;
ed2f3452 2804 unsigned long hole_start, hole_end;
f6b9d5ca 2805 struct drm_mm_node *entry;
fa76da34 2806 int ret;
644ec02b 2807
b02d22a3
ZW
2808 ret = intel_vgt_balloon(dev_priv);
2809 if (ret)
2810 return ret;
5dda8fa3 2811
95374d75 2812 /* Reserve a mappable slot for our lockless error capture */
4e64e553
CW
2813 ret = drm_mm_insert_node_in_range(&ggtt->base.mm, &ggtt->error_capture,
2814 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
2815 0, ggtt->mappable_end,
2816 DRM_MM_INSERT_LOW);
95374d75
CW
2817 if (ret)
2818 return ret;
2819
ed2f3452 2820 /* Clear any non-preallocated blocks */
72e96d64 2821 drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
ed2f3452
CW
2822 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
2823 hole_start, hole_end);
72e96d64 2824 ggtt->base.clear_range(&ggtt->base, hole_start,
4fb84d99 2825 hole_end - hole_start);
ed2f3452
CW
2826 }
2827
2828 /* And finally clear the reserved guard page */
f6b9d5ca 2829 ggtt->base.clear_range(&ggtt->base,
4fb84d99 2830 ggtt->base.total - PAGE_SIZE, PAGE_SIZE);
6c5566a8 2831
97d6d7ab 2832 if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
6cde9a02 2833 ret = i915_gem_init_aliasing_ppgtt(dev_priv);
95374d75 2834 if (ret)
6cde9a02 2835 goto err;
fa76da34
DV
2836 }
2837
6c5566a8 2838 return 0;
95374d75 2839
95374d75
CW
2840err:
2841 drm_mm_remove_node(&ggtt->error_capture);
2842 return ret;
e76e9aeb
BW
2843}
2844
d85489d3
JL
2845/**
2846 * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
97d6d7ab 2847 * @dev_priv: i915 device
d85489d3 2848 */
97d6d7ab 2849void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
90d0a0e8 2850{
72e96d64 2851 struct i915_ggtt *ggtt = &dev_priv->ggtt;
94d4a2a9 2852 struct i915_vma *vma, *vn;
66df1014 2853 struct pagevec *pvec;
94d4a2a9
CW
2854
2855 ggtt->base.closed = true;
2856
2857 mutex_lock(&dev_priv->drm.struct_mutex);
2858 WARN_ON(!list_empty(&ggtt->base.active_list));
2859 list_for_each_entry_safe(vma, vn, &ggtt->base.inactive_list, vm_link)
2860 WARN_ON(i915_vma_unbind(vma));
2861 mutex_unlock(&dev_priv->drm.struct_mutex);
90d0a0e8 2862
97d6d7ab 2863 i915_gem_cleanup_stolen(&dev_priv->drm);
a4eba47b 2864
1188bc66
CW
2865 mutex_lock(&dev_priv->drm.struct_mutex);
2866 i915_gem_fini_aliasing_ppgtt(dev_priv);
2867
95374d75
CW
2868 if (drm_mm_node_allocated(&ggtt->error_capture))
2869 drm_mm_remove_node(&ggtt->error_capture);
2870
72e96d64 2871 if (drm_mm_initialized(&ggtt->base.mm)) {
b02d22a3 2872 intel_vgt_deballoon(dev_priv);
ed9724dd 2873 i915_address_space_fini(&ggtt->base);
90d0a0e8
DV
2874 }
2875
72e96d64 2876 ggtt->base.cleanup(&ggtt->base);
66df1014
CW
2877
2878 pvec = &dev_priv->mm.wc_stash;
2879 if (pvec->nr) {
2880 set_pages_array_wb(pvec->pages, pvec->nr);
2881 __pagevec_release(pvec);
2882 }
2883
1188bc66 2884 mutex_unlock(&dev_priv->drm.struct_mutex);
f6b9d5ca
CW
2885
2886 arch_phys_wc_del(ggtt->mtrr);
f7bbe788 2887 io_mapping_fini(&ggtt->mappable);
90d0a0e8 2888}
70e32544 2889
2c642b07 2890static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
e76e9aeb
BW
2891{
2892 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
2893 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
2894 return snb_gmch_ctl << 20;
2895}
2896
2c642b07 2897static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
9459d252
BW
2898{
2899 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
2900 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
2901 if (bdw_gmch_ctl)
2902 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
562d55d9
BW
2903
2904#ifdef CONFIG_X86_32
2905 /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
2906 if (bdw_gmch_ctl > 4)
2907 bdw_gmch_ctl = 4;
2908#endif
2909
9459d252
BW
2910 return bdw_gmch_ctl << 20;
2911}
2912
2c642b07 2913static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
d7f25f23
DL
2914{
2915 gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
2916 gmch_ctrl &= SNB_GMCH_GGMS_MASK;
2917
2918 if (gmch_ctrl)
2919 return 1 << (20 + gmch_ctrl);
2920
2921 return 0;
2922}
2923
2c642b07 2924static size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
e76e9aeb
BW
2925{
2926 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
2927 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
a92d1a91 2928 return (size_t)snb_gmch_ctl << 25; /* 32 MB units */
e76e9aeb
BW
2929}
2930
2c642b07 2931static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
9459d252
BW
2932{
2933 bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2934 bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
a92d1a91 2935 return (size_t)bdw_gmch_ctl << 25; /* 32 MB units */
9459d252
BW
2936}
2937
d7f25f23
DL
2938static size_t chv_get_stolen_size(u16 gmch_ctrl)
2939{
2940 gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
2941 gmch_ctrl &= SNB_GMCH_GMS_MASK;
2942
2943 /*
2944 * 0x0 to 0x10: 32MB increments starting at 0MB
2945 * 0x11 to 0x16: 4MB increments starting at 8MB
2946 * 0x17 to 0x1d: 4MB increments start at 36MB
2947 */
2948 if (gmch_ctrl < 0x11)
a92d1a91 2949 return (size_t)gmch_ctrl << 25;
d7f25f23 2950 else if (gmch_ctrl < 0x17)
a92d1a91 2951 return (size_t)(gmch_ctrl - 0x11 + 2) << 22;
d7f25f23 2952 else
a92d1a91 2953 return (size_t)(gmch_ctrl - 0x17 + 9) << 22;
d7f25f23
DL
2954}
2955
66375014
DL
2956static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
2957{
2958 gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2959 gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
2960
2961 if (gen9_gmch_ctl < 0xf0)
a92d1a91 2962 return (size_t)gen9_gmch_ctl << 25; /* 32 MB units */
66375014
DL
2963 else
2964 /* 4MB increments starting at 0xf0 for 4MB */
a92d1a91 2965 return (size_t)(gen9_gmch_ctl - 0xf0 + 1) << 22;
66375014
DL
2966}
2967
34c998b4 2968static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
63340133 2969{
49d73912
CW
2970 struct drm_i915_private *dev_priv = ggtt->base.i915;
2971 struct pci_dev *pdev = dev_priv->drm.pdev;
34c998b4 2972 phys_addr_t phys_addr;
8bcdd0f7 2973 int ret;
63340133
BW
2974
2975 /* For Modern GENs the PTEs and register space are split in the BAR */
34c998b4 2976 phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
63340133 2977
2a073f89 2978 /*
385db982
RV
2979 * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range
2980 * will be dropped. For WC mappings in general we have 64 byte burst
2981 * writes when the WC buffer is flushed, so we can't use it, but have to
2a073f89
ID
2982 * resort to an uncached mapping. The WC issue is easily caught by the
2983 * readback check when writing GTT PTE entries.
2984 */
385db982 2985 if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10)
34c998b4 2986 ggtt->gsm = ioremap_nocache(phys_addr, size);
2a073f89 2987 else
34c998b4 2988 ggtt->gsm = ioremap_wc(phys_addr, size);
72e96d64 2989 if (!ggtt->gsm) {
34c998b4 2990 DRM_ERROR("Failed to map the ggtt page table\n");
63340133
BW
2991 return -ENOMEM;
2992 }
2993
8448661d 2994 ret = setup_scratch_page(&ggtt->base, GFP_DMA32);
8bcdd0f7 2995 if (ret) {
63340133
BW
2996 DRM_ERROR("Scratch setup failed\n");
2997 /* iounmap will also get called at remove, but meh */
72e96d64 2998 iounmap(ggtt->gsm);
8bcdd0f7 2999 return ret;
63340133
BW
3000 }
3001
4ad2af1e 3002 return 0;
63340133
BW
3003}
3004
4395890a
ZW
3005static struct intel_ppat_entry *
3006__alloc_ppat_entry(struct intel_ppat *ppat, unsigned int index, u8 value)
4e34935f 3007{
4395890a
ZW
3008 struct intel_ppat_entry *entry = &ppat->entries[index];
3009
3010 GEM_BUG_ON(index >= ppat->max_entries);
3011 GEM_BUG_ON(test_bit(index, ppat->used));
3012
3013 entry->ppat = ppat;
3014 entry->value = value;
3015 kref_init(&entry->ref);
3016 set_bit(index, ppat->used);
3017 set_bit(index, ppat->dirty);
3018
3019 return entry;
3020}
3021
3022static void __free_ppat_entry(struct intel_ppat_entry *entry)
3023{
3024 struct intel_ppat *ppat = entry->ppat;
3025 unsigned int index = entry - ppat->entries;
3026
3027 GEM_BUG_ON(index >= ppat->max_entries);
3028 GEM_BUG_ON(!test_bit(index, ppat->used));
3029
3030 entry->value = ppat->clear_value;
3031 clear_bit(index, ppat->used);
3032 set_bit(index, ppat->dirty);
3033}
3034
3035/**
3036 * intel_ppat_get - get a usable PPAT entry
3037 * @i915: i915 device instance
3038 * @value: the PPAT value required by the caller
3039 *
3040 * The function tries to search if there is an existing PPAT entry which
3041 * matches with the required value. If perfectly matched, the existing PPAT
3042 * entry will be used. If only partially matched, it will try to check if
3043 * there is any available PPAT index. If yes, it will allocate a new PPAT
3044 * index for the required entry and update the HW. If not, the partially
3045 * matched entry will be used.
3046 */
3047const struct intel_ppat_entry *
3048intel_ppat_get(struct drm_i915_private *i915, u8 value)
3049{
3050 struct intel_ppat *ppat = &i915->ppat;
3051 struct intel_ppat_entry *entry;
3052 unsigned int scanned, best_score;
3053 int i;
3054
3055 GEM_BUG_ON(!ppat->max_entries);
3056
3057 scanned = best_score = 0;
3058 for_each_set_bit(i, ppat->used, ppat->max_entries) {
3059 unsigned int score;
3060
3061 score = ppat->match(ppat->entries[i].value, value);
3062 if (score > best_score) {
3063 entry = &ppat->entries[i];
3064 if (score == INTEL_PPAT_PERFECT_MATCH) {
3065 kref_get(&entry->ref);
3066 return entry;
3067 }
3068 best_score = score;
3069 }
3070 scanned++;
3071 }
3072
3073 if (scanned == ppat->max_entries) {
3074 if (!best_score)
3075 return ERR_PTR(-ENOSPC);
3076
3077 kref_get(&entry->ref);
3078 return entry;
3079 }
3080
3081 i = find_first_zero_bit(ppat->used, ppat->max_entries);
3082 entry = __alloc_ppat_entry(ppat, i, value);
3083 ppat->update_hw(i915);
3084 return entry;
3085}
3086
3087static void release_ppat(struct kref *kref)
3088{
3089 struct intel_ppat_entry *entry =
3090 container_of(kref, struct intel_ppat_entry, ref);
3091 struct drm_i915_private *i915 = entry->ppat->i915;
3092
3093 __free_ppat_entry(entry);
3094 entry->ppat->update_hw(i915);
3095}
3096
3097/**
3098 * intel_ppat_put - put back the PPAT entry got from intel_ppat_get()
3099 * @entry: an intel PPAT entry
3100 *
3101 * Put back the PPAT entry got from intel_ppat_get(). If the PPAT index of the
3102 * entry is dynamically allocated, its reference count will be decreased. Once
3103 * the reference count becomes into zero, the PPAT index becomes free again.
3104 */
3105void intel_ppat_put(const struct intel_ppat_entry *entry)
3106{
3107 struct intel_ppat *ppat = entry->ppat;
3108 unsigned int index = entry - ppat->entries;
3109
3110 GEM_BUG_ON(!ppat->max_entries);
3111
3112 kref_put(&ppat->entries[index].ref, release_ppat);
3113}
3114
3115static void cnl_private_pat_update_hw(struct drm_i915_private *dev_priv)
3116{
3117 struct intel_ppat *ppat = &dev_priv->ppat;
3118 int i;
3119
3120 for_each_set_bit(i, ppat->dirty, ppat->max_entries) {
3121 I915_WRITE(GEN10_PAT_INDEX(i), ppat->entries[i].value);
3122 clear_bit(i, ppat->dirty);
3123 }
3124}
3125
3126static void bdw_private_pat_update_hw(struct drm_i915_private *dev_priv)
3127{
3128 struct intel_ppat *ppat = &dev_priv->ppat;
3129 u64 pat = 0;
3130 int i;
3131
3132 for (i = 0; i < ppat->max_entries; i++)
3133 pat |= GEN8_PPAT(i, ppat->entries[i].value);
3134
3135 bitmap_clear(ppat->dirty, 0, ppat->max_entries);
3136
3137 I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
3138 I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
3139}
3140
3141static unsigned int bdw_private_pat_match(u8 src, u8 dst)
3142{
3143 unsigned int score = 0;
3144 enum {
3145 AGE_MATCH = BIT(0),
3146 TC_MATCH = BIT(1),
3147 CA_MATCH = BIT(2),
3148 };
3149
3150 /* Cache attribute has to be matched. */
1298d51c 3151 if (GEN8_PPAT_GET_CA(src) != GEN8_PPAT_GET_CA(dst))
4395890a
ZW
3152 return 0;
3153
3154 score |= CA_MATCH;
3155
3156 if (GEN8_PPAT_GET_TC(src) == GEN8_PPAT_GET_TC(dst))
3157 score |= TC_MATCH;
3158
3159 if (GEN8_PPAT_GET_AGE(src) == GEN8_PPAT_GET_AGE(dst))
3160 score |= AGE_MATCH;
3161
3162 if (score == (AGE_MATCH | TC_MATCH | CA_MATCH))
3163 return INTEL_PPAT_PERFECT_MATCH;
3164
3165 return score;
3166}
3167
3168static unsigned int chv_private_pat_match(u8 src, u8 dst)
3169{
3170 return (CHV_PPAT_GET_SNOOP(src) == CHV_PPAT_GET_SNOOP(dst)) ?
3171 INTEL_PPAT_PERFECT_MATCH : 0;
3172}
3173
3174static void cnl_setup_private_ppat(struct intel_ppat *ppat)
3175{
3176 ppat->max_entries = 8;
3177 ppat->update_hw = cnl_private_pat_update_hw;
3178 ppat->match = bdw_private_pat_match;
3179 ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
3180
4e34935f 3181 /* XXX: spec is unclear if this is still needed for CNL+ */
4395890a
ZW
3182 if (!USES_PPGTT(ppat->i915)) {
3183 __alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC);
4e34935f
RV
3184 return;
3185 }
3186
4395890a
ZW
3187 __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC);
3188 __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
3189 __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
3190 __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC);
3191 __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
3192 __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
3193 __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
3194 __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
4e34935f
RV
3195}
3196
fbe5d36e
BW
3197/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
3198 * bits. When using advanced contexts each context stores its own PAT, but
3199 * writing this data shouldn't be harmful even in those cases. */
4395890a 3200static void bdw_setup_private_ppat(struct intel_ppat *ppat)
fbe5d36e 3201{
4395890a
ZW
3202 ppat->max_entries = 8;
3203 ppat->update_hw = bdw_private_pat_update_hw;
3204 ppat->match = bdw_private_pat_match;
3205 ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
fbe5d36e 3206
4395890a 3207 if (!USES_PPGTT(ppat->i915)) {
d6a8b72e
RV
3208 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
3209 * so RTL will always use the value corresponding to
3210 * pat_sel = 000".
3211 * So let's disable cache for GGTT to avoid screen corruptions.
3212 * MOCS still can be used though.
3213 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
3214 * before this patch, i.e. the same uncached + snooping access
3215 * like on gen6/7 seems to be in effect.
3216 * - So this just fixes blitter/render access. Again it looks
3217 * like it's not just uncached access, but uncached + snooping.
3218 * So we can still hold onto all our assumptions wrt cpu
3219 * clflushing on LLC machines.
3220 */
4395890a
ZW
3221 __alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC);
3222 return;
3223 }
d6a8b72e 3224
4395890a
ZW
3225 __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC); /* for normal objects, no eLLC */
3226 __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); /* for something pointing to ptes? */
3227 __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); /* for scanout with eLLC */
3228 __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC); /* Uncached objects, mostly for scanout */
3229 __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
3230 __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
3231 __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
3232 __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
fbe5d36e
BW
3233}
3234
4395890a 3235static void chv_setup_private_ppat(struct intel_ppat *ppat)
ee0ce478 3236{
4395890a
ZW
3237 ppat->max_entries = 8;
3238 ppat->update_hw = bdw_private_pat_update_hw;
3239 ppat->match = chv_private_pat_match;
3240 ppat->clear_value = CHV_PPAT_SNOOP;
ee0ce478
VS
3241
3242 /*
3243 * Map WB on BDW to snooped on CHV.
3244 *
3245 * Only the snoop bit has meaning for CHV, the rest is
3246 * ignored.
3247 *
cf3d262e
VS
3248 * The hardware will never snoop for certain types of accesses:
3249 * - CPU GTT (GMADR->GGTT->no snoop->memory)
3250 * - PPGTT page tables
3251 * - some other special cycles
3252 *
3253 * As with BDW, we also need to consider the following for GT accesses:
3254 * "For GGTT, there is NO pat_sel[2:0] from the entry,
3255 * so RTL will always use the value corresponding to
3256 * pat_sel = 000".
3257 * Which means we must set the snoop bit in PAT entry 0
3258 * in order to keep the global status page working.
ee0ce478 3259 */
ee0ce478 3260
4395890a
ZW
3261 __alloc_ppat_entry(ppat, 0, CHV_PPAT_SNOOP);
3262 __alloc_ppat_entry(ppat, 1, 0);
3263 __alloc_ppat_entry(ppat, 2, 0);
3264 __alloc_ppat_entry(ppat, 3, 0);
3265 __alloc_ppat_entry(ppat, 4, CHV_PPAT_SNOOP);
3266 __alloc_ppat_entry(ppat, 5, CHV_PPAT_SNOOP);
3267 __alloc_ppat_entry(ppat, 6, CHV_PPAT_SNOOP);
3268 __alloc_ppat_entry(ppat, 7, CHV_PPAT_SNOOP);
ee0ce478
VS
3269}
3270
34c998b4
CW
3271static void gen6_gmch_remove(struct i915_address_space *vm)
3272{
3273 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
3274
3275 iounmap(ggtt->gsm);
8448661d 3276 cleanup_scratch_page(vm);
34c998b4
CW
3277}
3278
36e16c49
ZW
3279static void setup_private_pat(struct drm_i915_private *dev_priv)
3280{
4395890a
ZW
3281 struct intel_ppat *ppat = &dev_priv->ppat;
3282 int i;
3283
3284 ppat->i915 = dev_priv;
3285
36e16c49 3286 if (INTEL_GEN(dev_priv) >= 10)
4395890a 3287 cnl_setup_private_ppat(ppat);
36e16c49 3288 else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
4395890a 3289 chv_setup_private_ppat(ppat);
36e16c49 3290 else
4395890a
ZW
3291 bdw_setup_private_ppat(ppat);
3292
3293 GEM_BUG_ON(ppat->max_entries > INTEL_MAX_PPAT_ENTRIES);
3294
3295 for_each_clear_bit(i, ppat->used, ppat->max_entries) {
3296 ppat->entries[i].value = ppat->clear_value;
3297 ppat->entries[i].ppat = ppat;
3298 set_bit(i, ppat->dirty);
3299 }
3300
3301 ppat->update_hw(dev_priv);
36e16c49
ZW
3302}
3303
d507d735 3304static int gen8_gmch_probe(struct i915_ggtt *ggtt)
63340133 3305{
49d73912 3306 struct drm_i915_private *dev_priv = ggtt->base.i915;
97d6d7ab 3307 struct pci_dev *pdev = dev_priv->drm.pdev;
34c998b4 3308 unsigned int size;
63340133 3309 u16 snb_gmch_ctl;
4519290a 3310 int err;
63340133
BW
3311
3312 /* TODO: We're not aware of mappable constraints on gen8 yet */
97d6d7ab
CW
3313 ggtt->mappable_base = pci_resource_start(pdev, 2);
3314 ggtt->mappable_end = pci_resource_len(pdev, 2);
63340133 3315
4519290a
ID
3316 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
3317 if (!err)
3318 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
3319 if (err)
3320 DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
63340133 3321
97d6d7ab 3322 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
63340133 3323
97d6d7ab 3324 if (INTEL_GEN(dev_priv) >= 9) {
d507d735 3325 ggtt->stolen_size = gen9_get_stolen_size(snb_gmch_ctl);
34c998b4 3326 size = gen8_get_total_gtt_size(snb_gmch_ctl);
97d6d7ab 3327 } else if (IS_CHERRYVIEW(dev_priv)) {
d507d735 3328 ggtt->stolen_size = chv_get_stolen_size(snb_gmch_ctl);
34c998b4 3329 size = chv_get_total_gtt_size(snb_gmch_ctl);
d7f25f23 3330 } else {
d507d735 3331 ggtt->stolen_size = gen8_get_stolen_size(snb_gmch_ctl);
34c998b4 3332 size = gen8_get_total_gtt_size(snb_gmch_ctl);
d7f25f23 3333 }
63340133 3334
34c998b4 3335 ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
34c998b4 3336 ggtt->base.cleanup = gen6_gmch_remove;
d507d735
JL
3337 ggtt->base.bind_vma = ggtt_bind_vma;
3338 ggtt->base.unbind_vma = ggtt_unbind_vma;
fa3f46af
MA
3339 ggtt->base.set_pages = ggtt_set_pages;
3340 ggtt->base.clear_pages = clear_pages;
d6473f56 3341 ggtt->base.insert_page = gen8_ggtt_insert_page;
f7770bfd 3342 ggtt->base.clear_range = nop_clear_range;
48f112fe 3343 if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
f7770bfd
CW
3344 ggtt->base.clear_range = gen8_ggtt_clear_range;
3345
3346 ggtt->base.insert_entries = gen8_ggtt_insert_entries;
f7770bfd 3347
0ef34ad6
JB
3348 /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
3349 if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
3350 ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
3351 ggtt->base.insert_page = bxt_vtd_ggtt_insert_page__BKL;
3352 if (ggtt->base.clear_range != nop_clear_range)
3353 ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL;
3354 }
3355
7c3f86b6
CW
3356 ggtt->invalidate = gen6_ggtt_invalidate;
3357
36e16c49
ZW
3358 setup_private_pat(dev_priv);
3359
34c998b4 3360 return ggtt_probe_common(ggtt, size);
63340133
BW
3361}
3362
d507d735 3363static int gen6_gmch_probe(struct i915_ggtt *ggtt)
e76e9aeb 3364{
49d73912 3365 struct drm_i915_private *dev_priv = ggtt->base.i915;
97d6d7ab 3366 struct pci_dev *pdev = dev_priv->drm.pdev;
34c998b4 3367 unsigned int size;
e76e9aeb 3368 u16 snb_gmch_ctl;
4519290a 3369 int err;
e76e9aeb 3370
97d6d7ab
CW
3371 ggtt->mappable_base = pci_resource_start(pdev, 2);
3372 ggtt->mappable_end = pci_resource_len(pdev, 2);
41907ddc 3373
baa09f5f
BW
3374 /* 64/512MB is the current min/max we actually know of, but this is just
3375 * a coarse sanity check.
e76e9aeb 3376 */
34c998b4 3377 if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
d507d735 3378 DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt->mappable_end);
baa09f5f 3379 return -ENXIO;
e76e9aeb
BW
3380 }
3381
4519290a
ID
3382 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
3383 if (!err)
3384 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
3385 if (err)
3386 DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
97d6d7ab 3387 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
e76e9aeb 3388
d507d735 3389 ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
e76e9aeb 3390
34c998b4
CW
3391 size = gen6_get_total_gtt_size(snb_gmch_ctl);
3392 ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
e76e9aeb 3393
d507d735 3394 ggtt->base.clear_range = gen6_ggtt_clear_range;
d6473f56 3395 ggtt->base.insert_page = gen6_ggtt_insert_page;
d507d735
JL
3396 ggtt->base.insert_entries = gen6_ggtt_insert_entries;
3397 ggtt->base.bind_vma = ggtt_bind_vma;
3398 ggtt->base.unbind_vma = ggtt_unbind_vma;
fa3f46af
MA
3399 ggtt->base.set_pages = ggtt_set_pages;
3400 ggtt->base.clear_pages = clear_pages;
34c998b4
CW
3401 ggtt->base.cleanup = gen6_gmch_remove;
3402
7c3f86b6
CW
3403 ggtt->invalidate = gen6_ggtt_invalidate;
3404
34c998b4
CW
3405 if (HAS_EDRAM(dev_priv))
3406 ggtt->base.pte_encode = iris_pte_encode;
3407 else if (IS_HASWELL(dev_priv))
3408 ggtt->base.pte_encode = hsw_pte_encode;
3409 else if (IS_VALLEYVIEW(dev_priv))
3410 ggtt->base.pte_encode = byt_pte_encode;
3411 else if (INTEL_GEN(dev_priv) >= 7)
3412 ggtt->base.pte_encode = ivb_pte_encode;
3413 else
3414 ggtt->base.pte_encode = snb_pte_encode;
7faf1ab2 3415
34c998b4 3416 return ggtt_probe_common(ggtt, size);
e76e9aeb
BW
3417}
3418
34c998b4 3419static void i915_gmch_remove(struct i915_address_space *vm)
e76e9aeb 3420{
34c998b4 3421 intel_gmch_remove();
644ec02b 3422}
baa09f5f 3423
d507d735 3424static int i915_gmch_probe(struct i915_ggtt *ggtt)
baa09f5f 3425{
49d73912 3426 struct drm_i915_private *dev_priv = ggtt->base.i915;
baa09f5f
BW
3427 int ret;
3428
91c8a326 3429 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
baa09f5f
BW
3430 if (!ret) {
3431 DRM_ERROR("failed to set up gmch\n");
3432 return -EIO;
3433 }
3434
edd1f2fe
CW
3435 intel_gtt_get(&ggtt->base.total,
3436 &ggtt->stolen_size,
3437 &ggtt->mappable_base,
3438 &ggtt->mappable_end);
baa09f5f 3439
97d6d7ab 3440 ggtt->do_idle_maps = needs_idle_maps(dev_priv);
d6473f56 3441 ggtt->base.insert_page = i915_ggtt_insert_page;
d507d735
JL
3442 ggtt->base.insert_entries = i915_ggtt_insert_entries;
3443 ggtt->base.clear_range = i915_ggtt_clear_range;
3444 ggtt->base.bind_vma = ggtt_bind_vma;
3445 ggtt->base.unbind_vma = ggtt_unbind_vma;
fa3f46af
MA
3446 ggtt->base.set_pages = ggtt_set_pages;
3447 ggtt->base.clear_pages = clear_pages;
34c998b4 3448 ggtt->base.cleanup = i915_gmch_remove;
baa09f5f 3449
7c3f86b6
CW
3450 ggtt->invalidate = gmch_ggtt_invalidate;
3451
d507d735 3452 if (unlikely(ggtt->do_idle_maps))
c0a7f818
CW
3453 DRM_INFO("applying Ironlake quirks for intel_iommu\n");
3454
baa09f5f
BW
3455 return 0;
3456}
3457
d85489d3 3458/**
0088e522 3459 * i915_ggtt_probe_hw - Probe GGTT hardware location
97d6d7ab 3460 * @dev_priv: i915 device
d85489d3 3461 */
97d6d7ab 3462int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
baa09f5f 3463{
62106b4f 3464 struct i915_ggtt *ggtt = &dev_priv->ggtt;
baa09f5f
BW
3465 int ret;
3466
49d73912 3467 ggtt->base.i915 = dev_priv;
8448661d 3468 ggtt->base.dma = &dev_priv->drm.pdev->dev;
c114f76a 3469
34c998b4
CW
3470 if (INTEL_GEN(dev_priv) <= 5)
3471 ret = i915_gmch_probe(ggtt);
3472 else if (INTEL_GEN(dev_priv) < 8)
3473 ret = gen6_gmch_probe(ggtt);
3474 else
3475 ret = gen8_gmch_probe(ggtt);
a54c0c27 3476 if (ret)
baa09f5f 3477 return ret;
baa09f5f 3478
db9309a5
CW
3479 /* Trim the GGTT to fit the GuC mappable upper range (when enabled).
3480 * This is easier than doing range restriction on the fly, as we
3481 * currently don't have any bits spare to pass in this upper
3482 * restriction!
3483 */
4f044a88 3484 if (HAS_GUC(dev_priv) && i915_modparams.enable_guc_loading) {
db9309a5
CW
3485 ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
3486 ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
3487 }
3488
c890e2d5
CW
3489 if ((ggtt->base.total - 1) >> 32) {
3490 DRM_ERROR("We never expected a Global GTT with more than 32bits"
f6b9d5ca 3491 " of address space! Found %lldM!\n",
c890e2d5
CW
3492 ggtt->base.total >> 20);
3493 ggtt->base.total = 1ULL << 32;
3494 ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
3495 }
3496
f6b9d5ca
CW
3497 if (ggtt->mappable_end > ggtt->base.total) {
3498 DRM_ERROR("mappable aperture extends past end of GGTT,"
3499 " aperture=%llx, total=%llx\n",
3500 ggtt->mappable_end, ggtt->base.total);
3501 ggtt->mappable_end = ggtt->base.total;
3502 }
3503
baa09f5f 3504 /* GMADR is the PCI mmio aperture into the global GTT. */
c44ef60e 3505 DRM_INFO("Memory usable by graphics device = %lluM\n",
62106b4f
JL
3506 ggtt->base.total >> 20);
3507 DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
edd1f2fe 3508 DRM_DEBUG_DRIVER("GTT stolen size = %uM\n", ggtt->stolen_size >> 20);
80debff8 3509 if (intel_vtd_active())
5db6c735 3510 DRM_INFO("VT-d active for gfx access\n");
baa09f5f
BW
3511
3512 return 0;
0088e522
CW
3513}
3514
3515/**
3516 * i915_ggtt_init_hw - Initialize GGTT hardware
97d6d7ab 3517 * @dev_priv: i915 device
0088e522 3518 */
97d6d7ab 3519int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
0088e522 3520{
0088e522
CW
3521 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3522 int ret;
3523
f6b9d5ca
CW
3524 INIT_LIST_HEAD(&dev_priv->vm_list);
3525
a6508ded
CW
3526 /* Note that we use page colouring to enforce a guard page at the
3527 * end of the address space. This is required as the CS may prefetch
3528 * beyond the end of the batch buffer, across the page boundary,
3529 * and beyond the end of the GTT if we do not provide a guard.
f6b9d5ca 3530 */
80b204bc 3531 mutex_lock(&dev_priv->drm.struct_mutex);
80b204bc 3532 i915_address_space_init(&ggtt->base, dev_priv, "[global]");
a6508ded 3533 if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
f6b9d5ca 3534 ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
80b204bc 3535 mutex_unlock(&dev_priv->drm.struct_mutex);
f6b9d5ca 3536
f7bbe788
CW
3537 if (!io_mapping_init_wc(&dev_priv->ggtt.mappable,
3538 dev_priv->ggtt.mappable_base,
3539 dev_priv->ggtt.mappable_end)) {
f6b9d5ca
CW
3540 ret = -EIO;
3541 goto out_gtt_cleanup;
3542 }
3543
3544 ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, ggtt->mappable_end);
3545
0088e522
CW
3546 /*
3547 * Initialise stolen early so that we may reserve preallocated
3548 * objects for the BIOS to KMS transition.
3549 */
7ace3d30 3550 ret = i915_gem_init_stolen(dev_priv);
0088e522
CW
3551 if (ret)
3552 goto out_gtt_cleanup;
3553
3554 return 0;
a4eba47b
ID
3555
3556out_gtt_cleanup:
72e96d64 3557 ggtt->base.cleanup(&ggtt->base);
a4eba47b 3558 return ret;
baa09f5f 3559}
6f65e29a 3560
97d6d7ab 3561int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
ac840ae5 3562{
97d6d7ab 3563 if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt())
ac840ae5
VS
3564 return -EIO;
3565
3566 return 0;
3567}
3568
7c3f86b6
CW
3569void i915_ggtt_enable_guc(struct drm_i915_private *i915)
3570{
04f7b24e
CW
3571 GEM_BUG_ON(i915->ggtt.invalidate != gen6_ggtt_invalidate);
3572
7c3f86b6
CW
3573 i915->ggtt.invalidate = guc_ggtt_invalidate;
3574}
3575
3576void i915_ggtt_disable_guc(struct drm_i915_private *i915)
3577{
04f7b24e
CW
3578 /* We should only be called after i915_ggtt_enable_guc() */
3579 GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate);
3580
3581 i915->ggtt.invalidate = gen6_ggtt_invalidate;
7c3f86b6
CW
3582}
3583
275a991c 3584void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
fa42331b 3585{
72e96d64 3586 struct i915_ggtt *ggtt = &dev_priv->ggtt;
fbb30a5c 3587 struct drm_i915_gem_object *obj, *on;
fa42331b 3588
dc97997a 3589 i915_check_and_clear_faults(dev_priv);
fa42331b
DV
3590
3591 /* First fill our portion of the GTT with scratch pages */
381b943b 3592 ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
fa42331b 3593
fbb30a5c
CW
3594 ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
3595
3596 /* clflush objects bound into the GGTT and rebind them. */
f2123818 3597 list_for_each_entry_safe(obj, on, &dev_priv->mm.bound_list, mm.link) {
fbb30a5c
CW
3598 bool ggtt_bound = false;
3599 struct i915_vma *vma;
3600
1c7f4bca 3601 list_for_each_entry(vma, &obj->vma_list, obj_link) {
72e96d64 3602 if (vma->vm != &ggtt->base)
2c3d9984 3603 continue;
fa42331b 3604
fbb30a5c
CW
3605 if (!i915_vma_unbind(vma))
3606 continue;
3607
2c3d9984
TU
3608 WARN_ON(i915_vma_bind(vma, obj->cache_level,
3609 PIN_UPDATE));
fbb30a5c 3610 ggtt_bound = true;
2c3d9984
TU
3611 }
3612
fbb30a5c 3613 if (ggtt_bound)
975f7ff4 3614 WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
2c3d9984 3615 }
fa42331b 3616
fbb30a5c
CW
3617 ggtt->base.closed = false;
3618
275a991c 3619 if (INTEL_GEN(dev_priv) >= 8) {
4395890a 3620 struct intel_ppat *ppat = &dev_priv->ppat;
fa42331b 3621
4395890a
ZW
3622 bitmap_set(ppat->dirty, 0, ppat->max_entries);
3623 dev_priv->ppat.update_hw(dev_priv);
fa42331b
DV
3624 return;
3625 }
3626
275a991c 3627 if (USES_PPGTT(dev_priv)) {
72e96d64
JL
3628 struct i915_address_space *vm;
3629
fa42331b 3630 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
e5716f55 3631 struct i915_hw_ppgtt *ppgtt;
fa42331b 3632
2bfa996e 3633 if (i915_is_ggtt(vm))
fa42331b 3634 ppgtt = dev_priv->mm.aliasing_ppgtt;
e5716f55
JL
3635 else
3636 ppgtt = i915_vm_to_ppgtt(vm);
fa42331b 3637
16a011c8 3638 gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
fa42331b
DV
3639 }
3640 }
3641
7c3f86b6 3642 i915_ggtt_invalidate(dev_priv);
fa42331b
DV
3643}
3644
804beb4b 3645static struct scatterlist *
2d7f3bdb 3646rotate_pages(const dma_addr_t *in, unsigned int offset,
804beb4b 3647 unsigned int width, unsigned int height,
87130255 3648 unsigned int stride,
804beb4b 3649 struct sg_table *st, struct scatterlist *sg)
50470bb0
TU
3650{
3651 unsigned int column, row;
3652 unsigned int src_idx;
50470bb0 3653
50470bb0 3654 for (column = 0; column < width; column++) {
87130255 3655 src_idx = stride * (height - 1) + column;
50470bb0
TU
3656 for (row = 0; row < height; row++) {
3657 st->nents++;
3658 /* We don't need the pages, but need to initialize
3659 * the entries so the sg list can be happily traversed.
3660 * The only thing we need are DMA addresses.
3661 */
3662 sg_set_page(sg, NULL, PAGE_SIZE, 0);
804beb4b 3663 sg_dma_address(sg) = in[offset + src_idx];
50470bb0
TU
3664 sg_dma_len(sg) = PAGE_SIZE;
3665 sg = sg_next(sg);
87130255 3666 src_idx -= stride;
50470bb0
TU
3667 }
3668 }
804beb4b
TU
3669
3670 return sg;
50470bb0
TU
3671}
3672
ba7a5741
CW
3673static noinline struct sg_table *
3674intel_rotate_pages(struct intel_rotation_info *rot_info,
3675 struct drm_i915_gem_object *obj)
50470bb0 3676{
75c7b0b8 3677 const unsigned long n_pages = obj->base.size / PAGE_SIZE;
6687c906 3678 unsigned int size = intel_rotation_info_size(rot_info);
85d1225e
DG
3679 struct sgt_iter sgt_iter;
3680 dma_addr_t dma_addr;
50470bb0
TU
3681 unsigned long i;
3682 dma_addr_t *page_addr_list;
3683 struct sg_table *st;
89e3e142 3684 struct scatterlist *sg;
1d00dad5 3685 int ret = -ENOMEM;
50470bb0 3686
50470bb0 3687 /* Allocate a temporary list of source pages for random access. */
2098105e 3688 page_addr_list = kvmalloc_array(n_pages,
f2a85e19 3689 sizeof(dma_addr_t),
0ee931c4 3690 GFP_KERNEL);
50470bb0
TU
3691 if (!page_addr_list)
3692 return ERR_PTR(ret);
3693
3694 /* Allocate target SG list. */
3695 st = kmalloc(sizeof(*st), GFP_KERNEL);
3696 if (!st)
3697 goto err_st_alloc;
3698
6687c906 3699 ret = sg_alloc_table(st, size, GFP_KERNEL);
50470bb0
TU
3700 if (ret)
3701 goto err_sg_alloc;
3702
3703 /* Populate source page list from the object. */
3704 i = 0;
a4f5ea64 3705 for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
85d1225e 3706 page_addr_list[i++] = dma_addr;
50470bb0 3707
85d1225e 3708 GEM_BUG_ON(i != n_pages);
11f20322
VS
3709 st->nents = 0;
3710 sg = st->sgl;
3711
6687c906
VS
3712 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
3713 sg = rotate_pages(page_addr_list, rot_info->plane[i].offset,
3714 rot_info->plane[i].width, rot_info->plane[i].height,
3715 rot_info->plane[i].stride, st, sg);
89e3e142
TU
3716 }
3717
6687c906
VS
3718 DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n",
3719 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
50470bb0 3720
2098105e 3721 kvfree(page_addr_list);
50470bb0
TU
3722
3723 return st;
3724
3725err_sg_alloc:
3726 kfree(st);
3727err_st_alloc:
2098105e 3728 kvfree(page_addr_list);
50470bb0 3729
6687c906
VS
3730 DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
3731 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
3732
50470bb0
TU
3733 return ERR_PTR(ret);
3734}
ec7adb6e 3735
ba7a5741 3736static noinline struct sg_table *
8bd7ef16
JL
3737intel_partial_pages(const struct i915_ggtt_view *view,
3738 struct drm_i915_gem_object *obj)
3739{
3740 struct sg_table *st;
d2a84a76 3741 struct scatterlist *sg, *iter;
8bab1193 3742 unsigned int count = view->partial.size;
d2a84a76 3743 unsigned int offset;
8bd7ef16
JL
3744 int ret = -ENOMEM;
3745
3746 st = kmalloc(sizeof(*st), GFP_KERNEL);
3747 if (!st)
3748 goto err_st_alloc;
3749
d2a84a76 3750 ret = sg_alloc_table(st, count, GFP_KERNEL);
8bd7ef16
JL
3751 if (ret)
3752 goto err_sg_alloc;
3753
8bab1193 3754 iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
d2a84a76
CW
3755 GEM_BUG_ON(!iter);
3756
8bd7ef16
JL
3757 sg = st->sgl;
3758 st->nents = 0;
d2a84a76
CW
3759 do {
3760 unsigned int len;
8bd7ef16 3761
d2a84a76
CW
3762 len = min(iter->length - (offset << PAGE_SHIFT),
3763 count << PAGE_SHIFT);
3764 sg_set_page(sg, NULL, len, 0);
3765 sg_dma_address(sg) =
3766 sg_dma_address(iter) + (offset << PAGE_SHIFT);
3767 sg_dma_len(sg) = len;
8bd7ef16 3768
8bd7ef16 3769 st->nents++;
d2a84a76
CW
3770 count -= len >> PAGE_SHIFT;
3771 if (count == 0) {
3772 sg_mark_end(sg);
3773 return st;
3774 }
8bd7ef16 3775
d2a84a76
CW
3776 sg = __sg_next(sg);
3777 iter = __sg_next(iter);
3778 offset = 0;
3779 } while (1);
8bd7ef16
JL
3780
3781err_sg_alloc:
3782 kfree(st);
3783err_st_alloc:
3784 return ERR_PTR(ret);
3785}
3786
70b9f6f8 3787static int
50470bb0 3788i915_get_ggtt_vma_pages(struct i915_vma *vma)
fe14d5f4 3789{
ba7a5741 3790 int ret;
50470bb0 3791
2c3a3f44
CW
3792 /* The vma->pages are only valid within the lifespan of the borrowed
3793 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
3794 * must be the vma->pages. A simple rule is that vma->pages must only
3795 * be accessed when the obj->mm.pages are pinned.
3796 */
3797 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
3798
ba7a5741
CW
3799 switch (vma->ggtt_view.type) {
3800 case I915_GGTT_VIEW_NORMAL:
3801 vma->pages = vma->obj->mm.pages;
fe14d5f4
TU
3802 return 0;
3803
ba7a5741 3804 case I915_GGTT_VIEW_ROTATED:
247177dd 3805 vma->pages =
ba7a5741
CW
3806 intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
3807 break;
3808
3809 case I915_GGTT_VIEW_PARTIAL:
247177dd 3810 vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
ba7a5741
CW
3811 break;
3812
3813 default:
fe14d5f4
TU
3814 WARN_ONCE(1, "GGTT view %u not implemented!\n",
3815 vma->ggtt_view.type);
ba7a5741
CW
3816 return -EINVAL;
3817 }
fe14d5f4 3818
ba7a5741
CW
3819 ret = 0;
3820 if (unlikely(IS_ERR(vma->pages))) {
247177dd
CW
3821 ret = PTR_ERR(vma->pages);
3822 vma->pages = NULL;
50470bb0
TU
3823 DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
3824 vma->ggtt_view.type, ret);
fe14d5f4 3825 }
50470bb0 3826 return ret;
fe14d5f4
TU
3827}
3828
625d988a
CW
3829/**
3830 * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
a4dbf7cf
CW
3831 * @vm: the &struct i915_address_space
3832 * @node: the &struct drm_mm_node (typically i915_vma.mode)
3833 * @size: how much space to allocate inside the GTT,
3834 * must be #I915_GTT_PAGE_SIZE aligned
3835 * @offset: where to insert inside the GTT,
3836 * must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
3837 * (@offset + @size) must fit within the address space
3838 * @color: color to apply to node, if this node is not from a VMA,
3839 * color must be #I915_COLOR_UNEVICTABLE
3840 * @flags: control search and eviction behaviour
625d988a
CW
3841 *
3842 * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
3843 * the address space (using @size and @color). If the @node does not fit, it
3844 * tries to evict any overlapping nodes from the GTT, including any
3845 * neighbouring nodes if the colors do not match (to ensure guard pages between
3846 * differing domains). See i915_gem_evict_for_node() for the gory details
3847 * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
3848 * evicting active overlapping objects, and any overlapping node that is pinned
3849 * or marked as unevictable will also result in failure.
3850 *
3851 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3852 * asked to wait for eviction and interrupted.
3853 */
3854int i915_gem_gtt_reserve(struct i915_address_space *vm,
3855 struct drm_mm_node *node,
3856 u64 size, u64 offset, unsigned long color,
3857 unsigned int flags)
3858{
3859 int err;
3860
3861 GEM_BUG_ON(!size);
3862 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3863 GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
3864 GEM_BUG_ON(range_overflows(offset, size, vm->total));
3fec7ec4 3865 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
9734ad13 3866 GEM_BUG_ON(drm_mm_node_allocated(node));
625d988a
CW
3867
3868 node->size = size;
3869 node->start = offset;
3870 node->color = color;
3871
3872 err = drm_mm_reserve_node(&vm->mm, node);
3873 if (err != -ENOSPC)
3874 return err;
3875
616d9cee
CW
3876 if (flags & PIN_NOEVICT)
3877 return -ENOSPC;
3878
625d988a
CW
3879 err = i915_gem_evict_for_node(vm, node, flags);
3880 if (err == 0)
3881 err = drm_mm_reserve_node(&vm->mm, node);
3882
3883 return err;
3884}
3885
606fec95
CW
3886static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
3887{
3888 u64 range, addr;
3889
3890 GEM_BUG_ON(range_overflows(start, len, end));
3891 GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
3892
3893 range = round_down(end - len, align) - round_up(start, align);
3894 if (range) {
3895 if (sizeof(unsigned long) == sizeof(u64)) {
3896 addr = get_random_long();
3897 } else {
3898 addr = get_random_int();
3899 if (range > U32_MAX) {
3900 addr <<= 32;
3901 addr |= get_random_int();
3902 }
3903 }
3904 div64_u64_rem(addr, range, &addr);
3905 start += addr;
3906 }
3907
3908 return round_up(start, align);
3909}
3910
e007b19d
CW
3911/**
3912 * i915_gem_gtt_insert - insert a node into an address_space (GTT)
a4dbf7cf
CW
3913 * @vm: the &struct i915_address_space
3914 * @node: the &struct drm_mm_node (typically i915_vma.node)
3915 * @size: how much space to allocate inside the GTT,
3916 * must be #I915_GTT_PAGE_SIZE aligned
3917 * @alignment: required alignment of starting offset, may be 0 but
3918 * if specified, this must be a power-of-two and at least
3919 * #I915_GTT_MIN_ALIGNMENT
3920 * @color: color to apply to node
3921 * @start: start of any range restriction inside GTT (0 for all),
e007b19d 3922 * must be #I915_GTT_PAGE_SIZE aligned
a4dbf7cf
CW
3923 * @end: end of any range restriction inside GTT (U64_MAX for all),
3924 * must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
3925 * @flags: control search and eviction behaviour
e007b19d
CW
3926 *
3927 * i915_gem_gtt_insert() first searches for an available hole into which
3928 * is can insert the node. The hole address is aligned to @alignment and
3929 * its @size must then fit entirely within the [@start, @end] bounds. The
3930 * nodes on either side of the hole must match @color, or else a guard page
3931 * will be inserted between the two nodes (or the node evicted). If no
606fec95
CW
3932 * suitable hole is found, first a victim is randomly selected and tested
3933 * for eviction, otherwise then the LRU list of objects within the GTT
e007b19d
CW
3934 * is scanned to find the first set of replacement nodes to create the hole.
3935 * Those old overlapping nodes are evicted from the GTT (and so must be
3936 * rebound before any future use). Any node that is currently pinned cannot
3937 * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
3938 * active and #PIN_NONBLOCK is specified, that node is also skipped when
3939 * searching for an eviction candidate. See i915_gem_evict_something() for
3940 * the gory details on the eviction algorithm.
3941 *
3942 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3943 * asked to wait for eviction and interrupted.
3944 */
3945int i915_gem_gtt_insert(struct i915_address_space *vm,
3946 struct drm_mm_node *node,
3947 u64 size, u64 alignment, unsigned long color,
3948 u64 start, u64 end, unsigned int flags)
3949{
4e64e553 3950 enum drm_mm_insert_mode mode;
606fec95 3951 u64 offset;
e007b19d
CW
3952 int err;
3953
3954 lockdep_assert_held(&vm->i915->drm.struct_mutex);
3955 GEM_BUG_ON(!size);
3956 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3957 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
3958 GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
3959 GEM_BUG_ON(start >= end);
3960 GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
3961 GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
3fec7ec4 3962 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
9734ad13 3963 GEM_BUG_ON(drm_mm_node_allocated(node));
e007b19d
CW
3964
3965 if (unlikely(range_overflows(start, size, end)))
3966 return -ENOSPC;
3967
3968 if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
3969 return -ENOSPC;
3970
4e64e553
CW
3971 mode = DRM_MM_INSERT_BEST;
3972 if (flags & PIN_HIGH)
3973 mode = DRM_MM_INSERT_HIGH;
3974 if (flags & PIN_MAPPABLE)
3975 mode = DRM_MM_INSERT_LOW;
e007b19d
CW
3976
3977 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
3978 * so we know that we always have a minimum alignment of 4096.
3979 * The drm_mm range manager is optimised to return results
3980 * with zero alignment, so where possible use the optimal
3981 * path.
3982 */
3983 BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
3984 if (alignment <= I915_GTT_MIN_ALIGNMENT)
3985 alignment = 0;
3986
4e64e553
CW
3987 err = drm_mm_insert_node_in_range(&vm->mm, node,
3988 size, alignment, color,
3989 start, end, mode);
e007b19d
CW
3990 if (err != -ENOSPC)
3991 return err;
3992
616d9cee
CW
3993 if (flags & PIN_NOEVICT)
3994 return -ENOSPC;
3995
606fec95
CW
3996 /* No free space, pick a slot at random.
3997 *
3998 * There is a pathological case here using a GTT shared between
3999 * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
4000 *
4001 * |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
4002 * (64k objects) (448k objects)
4003 *
4004 * Now imagine that the eviction LRU is ordered top-down (just because
4005 * pathology meets real life), and that we need to evict an object to
4006 * make room inside the aperture. The eviction scan then has to walk
4007 * the 448k list before it finds one within range. And now imagine that
4008 * it has to search for a new hole between every byte inside the memcpy,
4009 * for several simultaneous clients.
4010 *
4011 * On a full-ppgtt system, if we have run out of available space, there
4012 * will be lots and lots of objects in the eviction list! Again,
4013 * searching that LRU list may be slow if we are also applying any
4014 * range restrictions (e.g. restriction to low 4GiB) and so, for
4015 * simplicity and similarilty between different GTT, try the single
4016 * random replacement first.
4017 */
4018 offset = random_offset(start, end,
4019 size, alignment ?: I915_GTT_MIN_ALIGNMENT);
4020 err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
4021 if (err != -ENOSPC)
4022 return err;
4023
4024 /* Randomly selected placement is pinned, do a search */
e007b19d
CW
4025 err = i915_gem_evict_something(vm, size, alignment, color,
4026 start, end, flags);
4027 if (err)
4028 return err;
4029
4e64e553
CW
4030 return drm_mm_insert_node_in_range(&vm->mm, node,
4031 size, alignment, color,
4032 start, end, DRM_MM_INSERT_EVICT);
e007b19d 4033}
3b5bb0a3
CW
4034
4035#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
4036#include "selftests/mock_gtt.c"
1c42819a 4037#include "selftests/i915_gem_gtt.c"
3b5bb0a3 4038#endif