]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/i915/i915_gem_gtt.c
drm/i915: make mappable struct resource centric
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / i915_gem_gtt.c
CommitLineData
76aaf220
DV
1/*
2 * Copyright © 2010 Daniel Vetter
c4ac524c 3 * Copyright © 2011-2014 Intel Corporation
76aaf220
DV
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 *
24 */
25
aae4a3d8
CW
26#include <linux/slab.h> /* fault-inject.h is not standalone! */
27
28#include <linux/fault-inject.h>
e007b19d 29#include <linux/log2.h>
606fec95 30#include <linux/random.h>
0e46ce2e 31#include <linux/seq_file.h>
5bab6f60 32#include <linux/stop_machine.h>
e007b19d 33
ed3ba079
LA
34#include <asm/set_memory.h>
35
760285e7
DH
36#include <drm/drmP.h>
37#include <drm/i915_drm.h>
e007b19d 38
76aaf220 39#include "i915_drv.h"
5dda8fa3 40#include "i915_vgpu.h"
76aaf220
DV
41#include "i915_trace.h"
42#include "intel_drv.h"
d07f0e59 43#include "intel_frontbuffer.h"
76aaf220 44
bb8f9cff
CW
45#define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
46
45f8f69a
TU
47/**
48 * DOC: Global GTT views
49 *
50 * Background and previous state
51 *
52 * Historically objects could exists (be bound) in global GTT space only as
53 * singular instances with a view representing all of the object's backing pages
54 * in a linear fashion. This view will be called a normal view.
55 *
56 * To support multiple views of the same object, where the number of mapped
57 * pages is not equal to the backing store, or where the layout of the pages
58 * is not linear, concept of a GGTT view was added.
59 *
60 * One example of an alternative view is a stereo display driven by a single
61 * image. In this case we would have a framebuffer looking like this
62 * (2x2 pages):
63 *
64 * 12
65 * 34
66 *
67 * Above would represent a normal GGTT view as normally mapped for GPU or CPU
68 * rendering. In contrast, fed to the display engine would be an alternative
69 * view which could look something like this:
70 *
71 * 1212
72 * 3434
73 *
74 * In this example both the size and layout of pages in the alternative view is
75 * different from the normal view.
76 *
77 * Implementation and usage
78 *
79 * GGTT views are implemented using VMAs and are distinguished via enum
80 * i915_ggtt_view_type and struct i915_ggtt_view.
81 *
82 * A new flavour of core GEM functions which work with GGTT bound objects were
ec7adb6e
JL
83 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
84 * renaming in large amounts of code. They take the struct i915_ggtt_view
85 * parameter encapsulating all metadata required to implement a view.
45f8f69a
TU
86 *
87 * As a helper for callers which are only interested in the normal view,
88 * globally const i915_ggtt_view_normal singleton instance exists. All old core
89 * GEM API functions, the ones not taking the view parameter, are operating on,
90 * or with the normal GGTT view.
91 *
92 * Code wanting to add or use a new GGTT view needs to:
93 *
94 * 1. Add a new enum with a suitable name.
95 * 2. Extend the metadata in the i915_ggtt_view structure if required.
96 * 3. Add support to i915_get_vma_pages().
97 *
98 * New views are required to build a scatter-gather table from within the
99 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
100 * exists for the lifetime of an VMA.
101 *
102 * Core API is designed to have copy semantics which means that passed in
103 * struct i915_ggtt_view does not need to be persistent (left around after
104 * calling the core API functions).
105 *
106 */
107
70b9f6f8
DV
108static int
109i915_get_ggtt_vma_pages(struct i915_vma *vma);
110
7c3f86b6
CW
111static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv)
112{
113 /* Note that as an uncached mmio write, this should flush the
114 * WCB of the writes into the GGTT before it triggers the invalidate.
115 */
116 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
117}
118
119static void guc_ggtt_invalidate(struct drm_i915_private *dev_priv)
120{
121 gen6_ggtt_invalidate(dev_priv);
122 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
123}
124
125static void gmch_ggtt_invalidate(struct drm_i915_private *dev_priv)
126{
127 intel_gtt_chipset_flush();
128}
129
130static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
131{
132 i915->ggtt.invalidate(i915);
133}
134
c033666a
CW
135int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
136 int enable_ppgtt)
cfa7c862 137{
1893a71b 138 bool has_full_ppgtt;
1f9a99e0 139 bool has_full_48bit_ppgtt;
1893a71b 140
612dde7e
JL
141 if (!dev_priv->info.has_aliasing_ppgtt)
142 return 0;
143
9e1d0e60
MT
144 has_full_ppgtt = dev_priv->info.has_full_ppgtt;
145 has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
1893a71b 146
e320d400 147 if (intel_vgpu_active(dev_priv)) {
8a4ab66f 148 /* GVT-g has no support for 32bit ppgtt */
e320d400 149 has_full_ppgtt = false;
8a4ab66f 150 has_full_48bit_ppgtt = intel_vgpu_has_full_48bit_ppgtt(dev_priv);
e320d400 151 }
71ba2d64 152
70ee45e1
DL
153 /*
154 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
155 * execlists, the sole mechanism available to submit work.
156 */
c033666a 157 if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
cfa7c862
DV
158 return 0;
159
160 if (enable_ppgtt == 1)
161 return 1;
162
1893a71b 163 if (enable_ppgtt == 2 && has_full_ppgtt)
cfa7c862
DV
164 return 2;
165
1f9a99e0
MT
166 if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
167 return 3;
168
93a25a9e 169 /* Disable ppgtt on SNB if VT-d is on. */
80debff8 170 if (IS_GEN6(dev_priv) && intel_vtd_active()) {
93a25a9e 171 DRM_INFO("Disabling PPGTT because VT-d is on\n");
cfa7c862 172 return 0;
93a25a9e 173 }
93a25a9e 174
62942ed7 175 /* Early VLV doesn't have this */
91c8a326 176 if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) {
62942ed7
JB
177 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
178 return 0;
179 }
180
4f044a88 181 if (INTEL_GEN(dev_priv) >= 8 && i915_modparams.enable_execlists) {
4fc05063
JL
182 if (has_full_48bit_ppgtt)
183 return 3;
184
185 if (has_full_ppgtt)
186 return 2;
187 }
188
612dde7e 189 return 1;
93a25a9e
DV
190}
191
70b9f6f8
DV
192static int ppgtt_bind_vma(struct i915_vma *vma,
193 enum i915_cache_level cache_level,
194 u32 unused)
47552659 195{
ff685975
CW
196 u32 pte_flags;
197 int ret;
198
1f23475c
MA
199 if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
200 ret = vma->vm->allocate_va_range(vma->vm, vma->node.start,
201 vma->size);
202 if (ret)
203 return ret;
204 }
47552659
DV
205
206 /* Currently applicable only to VLV */
ff685975 207 pte_flags = 0;
47552659
DV
208 if (vma->obj->gt_ro)
209 pte_flags |= PTE_READ_ONLY;
210
4a234c5f 211 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
70b9f6f8
DV
212
213 return 0;
47552659
DV
214}
215
216static void ppgtt_unbind_vma(struct i915_vma *vma)
217{
ff685975 218 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
47552659 219}
6f65e29a 220
fa3f46af
MA
221static int ppgtt_set_pages(struct i915_vma *vma)
222{
223 GEM_BUG_ON(vma->pages);
224
225 vma->pages = vma->obj->mm.pages;
226
7464284b
MA
227 vma->page_sizes = vma->obj->mm.page_sizes;
228
fa3f46af
MA
229 return 0;
230}
231
232static void clear_pages(struct i915_vma *vma)
233{
234 GEM_BUG_ON(!vma->pages);
235
236 if (vma->pages != vma->obj->mm.pages) {
237 sg_free_table(vma->pages);
238 kfree(vma->pages);
239 }
240 vma->pages = NULL;
7464284b
MA
241
242 memset(&vma->page_sizes, 0, sizeof(vma->page_sizes));
fa3f46af
MA
243}
244
2c642b07 245static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
4fb84d99 246 enum i915_cache_level level)
94ec8f61 247{
4fb84d99 248 gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW;
94ec8f61 249 pte |= addr;
63c42e56
BW
250
251 switch (level) {
252 case I915_CACHE_NONE:
c095b97c 253 pte |= PPAT_UNCACHED;
63c42e56
BW
254 break;
255 case I915_CACHE_WT:
c095b97c 256 pte |= PPAT_DISPLAY_ELLC;
63c42e56
BW
257 break;
258 default:
c095b97c 259 pte |= PPAT_CACHED;
63c42e56
BW
260 break;
261 }
262
94ec8f61
BW
263 return pte;
264}
265
fe36f55d
MK
266static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
267 const enum i915_cache_level level)
b1fe6673 268{
07749ef3 269 gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
b1fe6673
BW
270 pde |= addr;
271 if (level != I915_CACHE_NONE)
c095b97c 272 pde |= PPAT_CACHED_PDE;
b1fe6673 273 else
c095b97c 274 pde |= PPAT_UNCACHED;
b1fe6673
BW
275 return pde;
276}
277
762d9936
MT
278#define gen8_pdpe_encode gen8_pde_encode
279#define gen8_pml4e_encode gen8_pde_encode
280
07749ef3
MT
281static gen6_pte_t snb_pte_encode(dma_addr_t addr,
282 enum i915_cache_level level,
4fb84d99 283 u32 unused)
54d12527 284{
4fb84d99 285 gen6_pte_t pte = GEN6_PTE_VALID;
54d12527 286 pte |= GEN6_PTE_ADDR_ENCODE(addr);
e7210c3c
BW
287
288 switch (level) {
350ec881
CW
289 case I915_CACHE_L3_LLC:
290 case I915_CACHE_LLC:
291 pte |= GEN6_PTE_CACHE_LLC;
292 break;
293 case I915_CACHE_NONE:
294 pte |= GEN6_PTE_UNCACHED;
295 break;
296 default:
5f77eeb0 297 MISSING_CASE(level);
350ec881
CW
298 }
299
300 return pte;
301}
302
07749ef3
MT
303static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
304 enum i915_cache_level level,
4fb84d99 305 u32 unused)
350ec881 306{
4fb84d99 307 gen6_pte_t pte = GEN6_PTE_VALID;
350ec881
CW
308 pte |= GEN6_PTE_ADDR_ENCODE(addr);
309
310 switch (level) {
311 case I915_CACHE_L3_LLC:
312 pte |= GEN7_PTE_CACHE_L3_LLC;
e7210c3c
BW
313 break;
314 case I915_CACHE_LLC:
315 pte |= GEN6_PTE_CACHE_LLC;
316 break;
317 case I915_CACHE_NONE:
9119708c 318 pte |= GEN6_PTE_UNCACHED;
e7210c3c
BW
319 break;
320 default:
5f77eeb0 321 MISSING_CASE(level);
e7210c3c
BW
322 }
323
54d12527
BW
324 return pte;
325}
326
07749ef3
MT
327static gen6_pte_t byt_pte_encode(dma_addr_t addr,
328 enum i915_cache_level level,
4fb84d99 329 u32 flags)
93c34e70 330{
4fb84d99 331 gen6_pte_t pte = GEN6_PTE_VALID;
93c34e70
KG
332 pte |= GEN6_PTE_ADDR_ENCODE(addr);
333
24f3a8cf
AG
334 if (!(flags & PTE_READ_ONLY))
335 pte |= BYT_PTE_WRITEABLE;
93c34e70
KG
336
337 if (level != I915_CACHE_NONE)
338 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
339
340 return pte;
341}
342
07749ef3
MT
343static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
344 enum i915_cache_level level,
4fb84d99 345 u32 unused)
9119708c 346{
4fb84d99 347 gen6_pte_t pte = GEN6_PTE_VALID;
0d8ff15e 348 pte |= HSW_PTE_ADDR_ENCODE(addr);
9119708c
KG
349
350 if (level != I915_CACHE_NONE)
87a6b688 351 pte |= HSW_WB_LLC_AGE3;
9119708c
KG
352
353 return pte;
354}
355
07749ef3
MT
356static gen6_pte_t iris_pte_encode(dma_addr_t addr,
357 enum i915_cache_level level,
4fb84d99 358 u32 unused)
4d15c145 359{
4fb84d99 360 gen6_pte_t pte = GEN6_PTE_VALID;
4d15c145
BW
361 pte |= HSW_PTE_ADDR_ENCODE(addr);
362
651d794f
CW
363 switch (level) {
364 case I915_CACHE_NONE:
365 break;
366 case I915_CACHE_WT:
c51e9701 367 pte |= HSW_WT_ELLC_LLC_AGE3;
651d794f
CW
368 break;
369 default:
c51e9701 370 pte |= HSW_WB_ELLC_LLC_AGE3;
651d794f
CW
371 break;
372 }
4d15c145
BW
373
374 return pte;
375}
376
8448661d 377static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
678d96fb 378{
66df1014 379 struct pagevec *pvec = &vm->free_pages;
678d96fb 380
8448661d
CW
381 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
382 i915_gem_shrink_all(vm->i915);
aae4a3d8 383
66df1014
CW
384 if (likely(pvec->nr))
385 return pvec->pages[--pvec->nr];
386
387 if (!vm->pt_kmap_wc)
388 return alloc_page(gfp);
8448661d 389
66df1014
CW
390 /* A placeholder for a specific mutex to guard the WC stash */
391 lockdep_assert_held(&vm->i915->drm.struct_mutex);
392
393 /* Look in our global stash of WC pages... */
394 pvec = &vm->i915->mm.wc_stash;
395 if (likely(pvec->nr))
396 return pvec->pages[--pvec->nr];
397
398 /* Otherwise batch allocate pages to amoritize cost of set_pages_wc. */
399 do {
400 struct page *page;
8448661d 401
66df1014
CW
402 page = alloc_page(gfp);
403 if (unlikely(!page))
404 break;
405
406 pvec->pages[pvec->nr++] = page;
407 } while (pagevec_space(pvec));
408
409 if (unlikely(!pvec->nr))
8448661d
CW
410 return NULL;
411
66df1014 412 set_pages_array_wc(pvec->pages, pvec->nr);
8448661d 413
66df1014 414 return pvec->pages[--pvec->nr];
8448661d
CW
415}
416
66df1014
CW
417static void vm_free_pages_release(struct i915_address_space *vm,
418 bool immediate)
8448661d 419{
66df1014
CW
420 struct pagevec *pvec = &vm->free_pages;
421
422 GEM_BUG_ON(!pagevec_count(pvec));
8448661d 423
66df1014
CW
424 if (vm->pt_kmap_wc) {
425 struct pagevec *stash = &vm->i915->mm.wc_stash;
8448661d 426
66df1014
CW
427 /* When we use WC, first fill up the global stash and then
428 * only if full immediately free the overflow.
429 */
8448661d 430
66df1014
CW
431 lockdep_assert_held(&vm->i915->drm.struct_mutex);
432 if (pagevec_space(stash)) {
433 do {
434 stash->pages[stash->nr++] =
435 pvec->pages[--pvec->nr];
436 if (!pvec->nr)
437 return;
438 } while (pagevec_space(stash));
439
440 /* As we have made some room in the VM's free_pages,
441 * we can wait for it to fill again. Unless we are
442 * inside i915_address_space_fini() and must
443 * immediately release the pages!
444 */
445 if (!immediate)
446 return;
447 }
448
449 set_pages_array_wb(pvec->pages, pvec->nr);
450 }
451
452 __pagevec_release(pvec);
8448661d
CW
453}
454
455static void vm_free_page(struct i915_address_space *vm, struct page *page)
456{
457 if (!pagevec_add(&vm->free_pages, page))
66df1014 458 vm_free_pages_release(vm, false);
8448661d 459}
678d96fb 460
8448661d
CW
461static int __setup_page_dma(struct i915_address_space *vm,
462 struct i915_page_dma *p,
463 gfp_t gfp)
464{
465 p->page = vm_alloc_page(vm, gfp | __GFP_NOWARN | __GFP_NORETRY);
466 if (unlikely(!p->page))
467 return -ENOMEM;
678d96fb 468
8448661d
CW
469 p->daddr = dma_map_page(vm->dma, p->page, 0, PAGE_SIZE,
470 PCI_DMA_BIDIRECTIONAL);
471 if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
472 vm_free_page(vm, p->page);
473 return -ENOMEM;
44159ddb 474 }
1266cdb1
MT
475
476 return 0;
678d96fb
BW
477}
478
8448661d 479static int setup_page_dma(struct i915_address_space *vm,
275a991c 480 struct i915_page_dma *p)
c114f76a 481{
8448661d 482 return __setup_page_dma(vm, p, I915_GFP_DMA);
c114f76a
MK
483}
484
8448661d 485static void cleanup_page_dma(struct i915_address_space *vm,
275a991c 486 struct i915_page_dma *p)
06fda602 487{
8448661d
CW
488 dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
489 vm_free_page(vm, p->page);
44159ddb
MK
490}
491
9231da70 492#define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
d1c54acd 493
8448661d
CW
494#define setup_px(vm, px) setup_page_dma((vm), px_base(px))
495#define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px))
496#define fill_px(ppgtt, px, v) fill_page_dma((vm), px_base(px), (v))
497#define fill32_px(ppgtt, px, v) fill_page_dma_32((vm), px_base(px), (v))
567047be 498
8448661d
CW
499static void fill_page_dma(struct i915_address_space *vm,
500 struct i915_page_dma *p,
501 const u64 val)
d1c54acd 502{
9231da70 503 u64 * const vaddr = kmap_atomic(p->page);
d1c54acd 504
4dd504f7 505 memset64(vaddr, val, PAGE_SIZE / sizeof(val));
d1c54acd 506
9231da70 507 kunmap_atomic(vaddr);
d1c54acd
MK
508}
509
8448661d
CW
510static void fill_page_dma_32(struct i915_address_space *vm,
511 struct i915_page_dma *p,
512 const u32 v)
73eeea53 513{
8448661d 514 fill_page_dma(vm, p, (u64)v << 32 | v);
73eeea53
MK
515}
516
8bcdd0f7 517static int
8448661d 518setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
4ad2af1e 519{
aa095871 520 struct page *page = NULL;
66df1014 521 dma_addr_t addr;
aa095871 522 int order;
66df1014 523
aa095871
MA
524 /*
525 * In order to utilize 64K pages for an object with a size < 2M, we will
526 * need to support a 64K scratch page, given that every 16th entry for a
527 * page-table operating in 64K mode must point to a properly aligned 64K
528 * region, including any PTEs which happen to point to scratch.
529 *
530 * This is only relevant for the 48b PPGTT where we support
531 * huge-gtt-pages, see also i915_vma_insert().
532 *
533 * TODO: we should really consider write-protecting the scratch-page and
534 * sharing between ppgtt
535 */
536 if (i915_vm_is_48bit(vm) &&
537 HAS_PAGE_SIZES(vm->i915, I915_GTT_PAGE_SIZE_64K)) {
538 order = get_order(I915_GTT_PAGE_SIZE_64K);
06ea8c53 539 page = alloc_pages(gfp | __GFP_ZERO | __GFP_NOWARN, order);
aa095871
MA
540 if (page) {
541 addr = dma_map_page(vm->dma, page, 0,
542 I915_GTT_PAGE_SIZE_64K,
543 PCI_DMA_BIDIRECTIONAL);
544 if (unlikely(dma_mapping_error(vm->dma, addr))) {
545 __free_pages(page, order);
546 page = NULL;
547 }
548
549 if (!IS_ALIGNED(addr, I915_GTT_PAGE_SIZE_64K)) {
550 dma_unmap_page(vm->dma, addr,
551 I915_GTT_PAGE_SIZE_64K,
552 PCI_DMA_BIDIRECTIONAL);
553 __free_pages(page, order);
554 page = NULL;
555 }
556 }
557 }
66df1014 558
aa095871
MA
559 if (!page) {
560 order = 0;
561 page = alloc_page(gfp | __GFP_ZERO);
562 if (unlikely(!page))
563 return -ENOMEM;
564
565 addr = dma_map_page(vm->dma, page, 0, PAGE_SIZE,
566 PCI_DMA_BIDIRECTIONAL);
567 if (unlikely(dma_mapping_error(vm->dma, addr))) {
568 __free_page(page);
569 return -ENOMEM;
570 }
66df1014
CW
571 }
572
573 vm->scratch_page.page = page;
574 vm->scratch_page.daddr = addr;
aa095871
MA
575 vm->scratch_page.order = order;
576
66df1014 577 return 0;
4ad2af1e
MK
578}
579
8448661d 580static void cleanup_scratch_page(struct i915_address_space *vm)
4ad2af1e 581{
66df1014
CW
582 struct i915_page_dma *p = &vm->scratch_page;
583
aa095871
MA
584 dma_unmap_page(vm->dma, p->daddr, BIT(p->order) << PAGE_SHIFT,
585 PCI_DMA_BIDIRECTIONAL);
586 __free_pages(p->page, p->order);
4ad2af1e
MK
587}
588
8448661d 589static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
06fda602 590{
ec565b3c 591 struct i915_page_table *pt;
06fda602 592
dd19674b
CW
593 pt = kmalloc(sizeof(*pt), GFP_KERNEL | __GFP_NOWARN);
594 if (unlikely(!pt))
06fda602
BW
595 return ERR_PTR(-ENOMEM);
596
dd19674b
CW
597 if (unlikely(setup_px(vm, pt))) {
598 kfree(pt);
599 return ERR_PTR(-ENOMEM);
600 }
06fda602 601
dd19674b 602 pt->used_ptes = 0;
06fda602
BW
603 return pt;
604}
605
8448661d 606static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
06fda602 607{
8448661d 608 cleanup_px(vm, pt);
2e906bea
MK
609 kfree(pt);
610}
611
612static void gen8_initialize_pt(struct i915_address_space *vm,
613 struct i915_page_table *pt)
614{
dd19674b
CW
615 fill_px(vm, pt,
616 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC));
2e906bea
MK
617}
618
619static void gen6_initialize_pt(struct i915_address_space *vm,
620 struct i915_page_table *pt)
621{
dd19674b
CW
622 fill32_px(vm, pt,
623 vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
06fda602
BW
624}
625
8448661d 626static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
06fda602 627{
ec565b3c 628 struct i915_page_directory *pd;
06fda602 629
fe52e37f
CW
630 pd = kzalloc(sizeof(*pd), GFP_KERNEL | __GFP_NOWARN);
631 if (unlikely(!pd))
06fda602
BW
632 return ERR_PTR(-ENOMEM);
633
fe52e37f
CW
634 if (unlikely(setup_px(vm, pd))) {
635 kfree(pd);
636 return ERR_PTR(-ENOMEM);
637 }
e5815a2e 638
fe52e37f 639 pd->used_pdes = 0;
06fda602
BW
640 return pd;
641}
642
8448661d 643static void free_pd(struct i915_address_space *vm,
275a991c 644 struct i915_page_directory *pd)
2e906bea 645{
fe52e37f
CW
646 cleanup_px(vm, pd);
647 kfree(pd);
2e906bea
MK
648}
649
650static void gen8_initialize_pd(struct i915_address_space *vm,
651 struct i915_page_directory *pd)
652{
dd19674b 653 unsigned int i;
2e906bea 654
dd19674b
CW
655 fill_px(vm, pd,
656 gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC));
657 for (i = 0; i < I915_PDES; i++)
658 pd->page_table[i] = vm->scratch_pt;
2e906bea
MK
659}
660
fe52e37f 661static int __pdp_init(struct i915_address_space *vm,
6ac18502
MT
662 struct i915_page_directory_pointer *pdp)
663{
3e490042 664 const unsigned int pdpes = i915_pdpes_per_pdp(vm);
e2b763ca 665 unsigned int i;
6ac18502 666
fe52e37f 667 pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory),
e2b763ca
CW
668 GFP_KERNEL | __GFP_NOWARN);
669 if (unlikely(!pdp->page_directory))
6ac18502 670 return -ENOMEM;
6ac18502 671
fe52e37f
CW
672 for (i = 0; i < pdpes; i++)
673 pdp->page_directory[i] = vm->scratch_pd;
674
6ac18502
MT
675 return 0;
676}
677
678static void __pdp_fini(struct i915_page_directory_pointer *pdp)
679{
6ac18502
MT
680 kfree(pdp->page_directory);
681 pdp->page_directory = NULL;
682}
683
1e6437b0
MK
684static inline bool use_4lvl(const struct i915_address_space *vm)
685{
686 return i915_vm_is_48bit(vm);
687}
688
8448661d
CW
689static struct i915_page_directory_pointer *
690alloc_pdp(struct i915_address_space *vm)
762d9936
MT
691{
692 struct i915_page_directory_pointer *pdp;
693 int ret = -ENOMEM;
694
1e6437b0 695 WARN_ON(!use_4lvl(vm));
762d9936
MT
696
697 pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
698 if (!pdp)
699 return ERR_PTR(-ENOMEM);
700
fe52e37f 701 ret = __pdp_init(vm, pdp);
762d9936
MT
702 if (ret)
703 goto fail_bitmap;
704
8448661d 705 ret = setup_px(vm, pdp);
762d9936
MT
706 if (ret)
707 goto fail_page_m;
708
709 return pdp;
710
711fail_page_m:
712 __pdp_fini(pdp);
713fail_bitmap:
714 kfree(pdp);
715
716 return ERR_PTR(ret);
717}
718
8448661d 719static void free_pdp(struct i915_address_space *vm,
6ac18502
MT
720 struct i915_page_directory_pointer *pdp)
721{
722 __pdp_fini(pdp);
1e6437b0
MK
723
724 if (!use_4lvl(vm))
725 return;
726
727 cleanup_px(vm, pdp);
728 kfree(pdp);
762d9936
MT
729}
730
69ab76fd
MT
731static void gen8_initialize_pdp(struct i915_address_space *vm,
732 struct i915_page_directory_pointer *pdp)
733{
734 gen8_ppgtt_pdpe_t scratch_pdpe;
735
736 scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
737
8448661d 738 fill_px(vm, pdp, scratch_pdpe);
69ab76fd
MT
739}
740
741static void gen8_initialize_pml4(struct i915_address_space *vm,
742 struct i915_pml4 *pml4)
743{
e2b763ca 744 unsigned int i;
762d9936 745
e2b763ca
CW
746 fill_px(vm, pml4,
747 gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC));
748 for (i = 0; i < GEN8_PML4ES_PER_PML4; i++)
749 pml4->pdps[i] = vm->scratch_pdp;
6ac18502
MT
750}
751
94e409c1 752/* Broadwell Page Directory Pointer Descriptors */
e85b26dc 753static int gen8_write_pdp(struct drm_i915_gem_request *req,
7cb6d7ac
MT
754 unsigned entry,
755 dma_addr_t addr)
94e409c1 756{
4a570db5 757 struct intel_engine_cs *engine = req->engine;
73dec95e 758 u32 *cs;
94e409c1
BW
759
760 BUG_ON(entry >= 4);
761
73dec95e
TU
762 cs = intel_ring_begin(req, 6);
763 if (IS_ERR(cs))
764 return PTR_ERR(cs);
94e409c1 765
73dec95e
TU
766 *cs++ = MI_LOAD_REGISTER_IMM(1);
767 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, entry));
768 *cs++ = upper_32_bits(addr);
769 *cs++ = MI_LOAD_REGISTER_IMM(1);
770 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
771 *cs++ = lower_32_bits(addr);
772 intel_ring_advance(req, cs);
94e409c1
BW
773
774 return 0;
775}
776
e7167769
MK
777static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt,
778 struct drm_i915_gem_request *req)
94e409c1 779{
eeb9488e 780 int i, ret;
94e409c1 781
e7167769 782 for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) {
d852c7bf
MK
783 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
784
e85b26dc 785 ret = gen8_write_pdp(req, i, pd_daddr);
eeb9488e
BW
786 if (ret)
787 return ret;
94e409c1 788 }
d595bd4b 789
eeb9488e 790 return 0;
94e409c1
BW
791}
792
e7167769
MK
793static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt,
794 struct drm_i915_gem_request *req)
2dba3239
MT
795{
796 return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
797}
798
fce93755
MK
799/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
800 * the page table structures, we mark them dirty so that
801 * context switching/execlist queuing code takes extra steps
802 * to ensure that tlbs are flushed.
803 */
804static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
805{
49d73912 806 ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.i915)->ring_mask;
fce93755
MK
807}
808
2ce5179f
MW
809/* Removes entries from a single page table, releasing it if it's empty.
810 * Caller can use the return value to update higher-level entries.
811 */
812static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
d209b9c3 813 struct i915_page_table *pt,
dd19674b 814 u64 start, u64 length)
459108b8 815{
d209b9c3 816 unsigned int num_entries = gen8_pte_count(start, length);
37c63934
MK
817 unsigned int pte = gen8_pte_index(start);
818 unsigned int pte_end = pte + num_entries;
894ccebe
CW
819 const gen8_pte_t scratch_pte =
820 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
821 gen8_pte_t *vaddr;
459108b8 822
dd19674b 823 GEM_BUG_ON(num_entries > pt->used_ptes);
37c63934 824
dd19674b
CW
825 pt->used_ptes -= num_entries;
826 if (!pt->used_ptes)
827 return true;
2ce5179f 828
9231da70 829 vaddr = kmap_atomic_px(pt);
37c63934 830 while (pte < pte_end)
894ccebe 831 vaddr[pte++] = scratch_pte;
9231da70 832 kunmap_atomic(vaddr);
2ce5179f
MW
833
834 return false;
d209b9c3 835}
06fda602 836
dd19674b
CW
837static void gen8_ppgtt_set_pde(struct i915_address_space *vm,
838 struct i915_page_directory *pd,
839 struct i915_page_table *pt,
840 unsigned int pde)
841{
842 gen8_pde_t *vaddr;
843
844 pd->page_table[pde] = pt;
845
846 vaddr = kmap_atomic_px(pd);
847 vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC);
848 kunmap_atomic(vaddr);
849}
850
2ce5179f 851static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
d209b9c3 852 struct i915_page_directory *pd,
dd19674b 853 u64 start, u64 length)
d209b9c3
MW
854{
855 struct i915_page_table *pt;
dd19674b 856 u32 pde;
d209b9c3
MW
857
858 gen8_for_each_pde(pt, pd, start, length, pde) {
bf75d59e
CW
859 GEM_BUG_ON(pt == vm->scratch_pt);
860
dd19674b
CW
861 if (!gen8_ppgtt_clear_pt(vm, pt, start, length))
862 continue;
06fda602 863
dd19674b 864 gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde);
bf75d59e 865 GEM_BUG_ON(!pd->used_pdes);
fe52e37f 866 pd->used_pdes--;
dd19674b
CW
867
868 free_pt(vm, pt);
2ce5179f
MW
869 }
870
fe52e37f
CW
871 return !pd->used_pdes;
872}
2ce5179f 873
fe52e37f
CW
874static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
875 struct i915_page_directory_pointer *pdp,
876 struct i915_page_directory *pd,
877 unsigned int pdpe)
878{
879 gen8_ppgtt_pdpe_t *vaddr;
880
881 pdp->page_directory[pdpe] = pd;
1e6437b0 882 if (!use_4lvl(vm))
fe52e37f
CW
883 return;
884
885 vaddr = kmap_atomic_px(pdp);
886 vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
887 kunmap_atomic(vaddr);
d209b9c3 888}
06fda602 889
2ce5179f
MW
890/* Removes entries from a single page dir pointer, releasing it if it's empty.
891 * Caller can use the return value to update higher-level entries
892 */
893static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
d209b9c3 894 struct i915_page_directory_pointer *pdp,
fe52e37f 895 u64 start, u64 length)
d209b9c3
MW
896{
897 struct i915_page_directory *pd;
fe52e37f 898 unsigned int pdpe;
06fda602 899
d209b9c3 900 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
bf75d59e
CW
901 GEM_BUG_ON(pd == vm->scratch_pd);
902
fe52e37f
CW
903 if (!gen8_ppgtt_clear_pd(vm, pd, start, length))
904 continue;
459108b8 905
fe52e37f 906 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
bf75d59e 907 GEM_BUG_ON(!pdp->used_pdpes);
e2b763ca 908 pdp->used_pdpes--;
2ce5179f 909
fe52e37f
CW
910 free_pd(vm, pd);
911 }
fce93755 912
e2b763ca 913 return !pdp->used_pdpes;
d209b9c3 914}
459108b8 915
fe52e37f
CW
916static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm,
917 u64 start, u64 length)
918{
919 gen8_ppgtt_clear_pdp(vm, &i915_vm_to_ppgtt(vm)->pdp, start, length);
920}
921
e2b763ca
CW
922static void gen8_ppgtt_set_pml4e(struct i915_pml4 *pml4,
923 struct i915_page_directory_pointer *pdp,
924 unsigned int pml4e)
925{
926 gen8_ppgtt_pml4e_t *vaddr;
927
928 pml4->pdps[pml4e] = pdp;
929
930 vaddr = kmap_atomic_px(pml4);
931 vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
932 kunmap_atomic(vaddr);
933}
934
2ce5179f
MW
935/* Removes entries from a single pml4.
936 * This is the top-level structure in 4-level page tables used on gen8+.
937 * Empty entries are always scratch pml4e.
938 */
fe52e37f
CW
939static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
940 u64 start, u64 length)
d209b9c3 941{
fe52e37f
CW
942 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
943 struct i915_pml4 *pml4 = &ppgtt->pml4;
d209b9c3 944 struct i915_page_directory_pointer *pdp;
e2b763ca 945 unsigned int pml4e;
2ce5179f 946
1e6437b0 947 GEM_BUG_ON(!use_4lvl(vm));
459108b8 948
d209b9c3 949 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
bf75d59e
CW
950 GEM_BUG_ON(pdp == vm->scratch_pdp);
951
e2b763ca
CW
952 if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length))
953 continue;
459108b8 954
e2b763ca 955 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
e2b763ca
CW
956
957 free_pdp(vm, pdp);
459108b8
BW
958 }
959}
960
423a8a94 961static inline struct sgt_dma {
894ccebe
CW
962 struct scatterlist *sg;
963 dma_addr_t dma, max;
423a8a94
CW
964} sgt_dma(struct i915_vma *vma) {
965 struct scatterlist *sg = vma->pages->sgl;
966 dma_addr_t addr = sg_dma_address(sg);
967 return (struct sgt_dma) { sg, addr, addr + sg->length };
968}
894ccebe 969
9e89f9ee
CW
970struct gen8_insert_pte {
971 u16 pml4e;
972 u16 pdpe;
973 u16 pde;
974 u16 pte;
975};
976
977static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start)
978{
979 return (struct gen8_insert_pte) {
980 gen8_pml4e_index(start),
981 gen8_pdpe_index(start),
982 gen8_pde_index(start),
983 gen8_pte_index(start),
984 };
985}
986
894ccebe
CW
987static __always_inline bool
988gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
f9b5b782 989 struct i915_page_directory_pointer *pdp,
894ccebe 990 struct sgt_dma *iter,
9e89f9ee 991 struct gen8_insert_pte *idx,
f9b5b782
MT
992 enum i915_cache_level cache_level)
993{
894ccebe
CW
994 struct i915_page_directory *pd;
995 const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
996 gen8_pte_t *vaddr;
997 bool ret;
9df15b49 998
3e490042 999 GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
9e89f9ee
CW
1000 pd = pdp->page_directory[idx->pdpe];
1001 vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
894ccebe 1002 do {
9e89f9ee
CW
1003 vaddr[idx->pte] = pte_encode | iter->dma;
1004
894ccebe
CW
1005 iter->dma += PAGE_SIZE;
1006 if (iter->dma >= iter->max) {
1007 iter->sg = __sg_next(iter->sg);
1008 if (!iter->sg) {
1009 ret = false;
1010 break;
1011 }
7ad47cf2 1012
894ccebe
CW
1013 iter->dma = sg_dma_address(iter->sg);
1014 iter->max = iter->dma + iter->sg->length;
d7b3de91 1015 }
9df15b49 1016
9e89f9ee
CW
1017 if (++idx->pte == GEN8_PTES) {
1018 idx->pte = 0;
1019
1020 if (++idx->pde == I915_PDES) {
1021 idx->pde = 0;
1022
894ccebe 1023 /* Limited by sg length for 3lvl */
9e89f9ee
CW
1024 if (++idx->pdpe == GEN8_PML4ES_PER_PML4) {
1025 idx->pdpe = 0;
894ccebe 1026 ret = true;
de5ba8eb 1027 break;
894ccebe
CW
1028 }
1029
3e490042 1030 GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
9e89f9ee 1031 pd = pdp->page_directory[idx->pdpe];
7ad47cf2 1032 }
894ccebe 1033
9231da70 1034 kunmap_atomic(vaddr);
9e89f9ee 1035 vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
9df15b49 1036 }
894ccebe 1037 } while (1);
9231da70 1038 kunmap_atomic(vaddr);
d1c54acd 1039
894ccebe 1040 return ret;
9df15b49
BW
1041}
1042
894ccebe 1043static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
4a234c5f 1044 struct i915_vma *vma,
894ccebe
CW
1045 enum i915_cache_level cache_level,
1046 u32 unused)
f9b5b782 1047{
17369ba0 1048 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
423a8a94 1049 struct sgt_dma iter = sgt_dma(vma);
4a234c5f 1050 struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
f9b5b782 1051
9e89f9ee
CW
1052 gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
1053 cache_level);
d9ec12f8
MA
1054
1055 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
894ccebe 1056}
de5ba8eb 1057
0a03852e
MA
1058static void gen8_ppgtt_insert_huge_entries(struct i915_vma *vma,
1059 struct i915_page_directory_pointer **pdps,
1060 struct sgt_dma *iter,
1061 enum i915_cache_level cache_level)
1062{
1063 const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
1064 u64 start = vma->node.start;
1065 dma_addr_t rem = iter->sg->length;
1066
1067 do {
1068 struct gen8_insert_pte idx = gen8_insert_pte(start);
1069 struct i915_page_directory_pointer *pdp = pdps[idx.pml4e];
1070 struct i915_page_directory *pd = pdp->page_directory[idx.pdpe];
1071 unsigned int page_size;
17a00cf7 1072 bool maybe_64K = false;
0a03852e
MA
1073 gen8_pte_t encode = pte_encode;
1074 gen8_pte_t *vaddr;
1075 u16 index, max;
1076
1077 if (vma->page_sizes.sg & I915_GTT_PAGE_SIZE_2M &&
1078 IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_2M) &&
1079 rem >= I915_GTT_PAGE_SIZE_2M && !idx.pte) {
1080 index = idx.pde;
1081 max = I915_PDES;
1082 page_size = I915_GTT_PAGE_SIZE_2M;
1083
1084 encode |= GEN8_PDE_PS_2M;
1085
1086 vaddr = kmap_atomic_px(pd);
1087 } else {
1088 struct i915_page_table *pt = pd->page_table[idx.pde];
1089
1090 index = idx.pte;
1091 max = GEN8_PTES;
1092 page_size = I915_GTT_PAGE_SIZE;
1093
17a00cf7
MA
1094 if (!index &&
1095 vma->page_sizes.sg & I915_GTT_PAGE_SIZE_64K &&
1096 IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
1097 (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
1098 rem >= (max - index) << PAGE_SHIFT))
1099 maybe_64K = true;
1100
0a03852e
MA
1101 vaddr = kmap_atomic_px(pt);
1102 }
1103
1104 do {
1105 GEM_BUG_ON(iter->sg->length < page_size);
1106 vaddr[index++] = encode | iter->dma;
1107
1108 start += page_size;
1109 iter->dma += page_size;
1110 rem -= page_size;
1111 if (iter->dma >= iter->max) {
1112 iter->sg = __sg_next(iter->sg);
1113 if (!iter->sg)
1114 break;
1115
1116 rem = iter->sg->length;
1117 iter->dma = sg_dma_address(iter->sg);
1118 iter->max = iter->dma + rem;
1119
17a00cf7
MA
1120 if (maybe_64K && index < max &&
1121 !(IS_ALIGNED(iter->dma, I915_GTT_PAGE_SIZE_64K) &&
1122 (IS_ALIGNED(rem, I915_GTT_PAGE_SIZE_64K) ||
1123 rem >= (max - index) << PAGE_SHIFT)))
1124 maybe_64K = false;
1125
0a03852e
MA
1126 if (unlikely(!IS_ALIGNED(iter->dma, page_size)))
1127 break;
1128 }
1129 } while (rem >= page_size && index < max);
1130
1131 kunmap_atomic(vaddr);
17a00cf7
MA
1132
1133 /*
1134 * Is it safe to mark the 2M block as 64K? -- Either we have
1135 * filled whole page-table with 64K entries, or filled part of
1136 * it and have reached the end of the sg table and we have
1137 * enough padding.
1138 */
1139 if (maybe_64K &&
1140 (index == max ||
1141 (i915_vm_has_scratch_64K(vma->vm) &&
1142 !iter->sg && IS_ALIGNED(vma->node.start +
1143 vma->node.size,
1144 I915_GTT_PAGE_SIZE_2M)))) {
1145 vaddr = kmap_atomic_px(pd);
1146 vaddr[idx.pde] |= GEN8_PDE_IPS_64K;
1147 kunmap_atomic(vaddr);
d9ec12f8 1148 page_size = I915_GTT_PAGE_SIZE_64K;
17a00cf7 1149 }
d9ec12f8
MA
1150
1151 vma->page_sizes.gtt |= page_size;
0a03852e 1152 } while (iter->sg);
894ccebe 1153}
de5ba8eb 1154
894ccebe 1155static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
4a234c5f 1156 struct i915_vma *vma,
894ccebe
CW
1157 enum i915_cache_level cache_level,
1158 u32 unused)
1159{
1160 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
423a8a94 1161 struct sgt_dma iter = sgt_dma(vma);
894ccebe 1162 struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
de5ba8eb 1163
0a03852e
MA
1164 if (vma->page_sizes.sg > I915_GTT_PAGE_SIZE) {
1165 gen8_ppgtt_insert_huge_entries(vma, pdps, &iter, cache_level);
1166 } else {
1167 struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
1168
1169 while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++],
1170 &iter, &idx, cache_level))
1171 GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
d9ec12f8
MA
1172
1173 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
0a03852e 1174 }
f9b5b782
MT
1175}
1176
8448661d 1177static void gen8_free_page_tables(struct i915_address_space *vm,
f37c0505 1178 struct i915_page_directory *pd)
7ad47cf2
BW
1179{
1180 int i;
1181
567047be 1182 if (!px_page(pd))
7ad47cf2
BW
1183 return;
1184
fe52e37f
CW
1185 for (i = 0; i < I915_PDES; i++) {
1186 if (pd->page_table[i] != vm->scratch_pt)
1187 free_pt(vm, pd->page_table[i]);
06fda602 1188 }
d7b3de91
BW
1189}
1190
8776f02b
MK
1191static int gen8_init_scratch(struct i915_address_space *vm)
1192{
64c050db 1193 int ret;
8776f02b 1194
8448661d 1195 ret = setup_scratch_page(vm, I915_GFP_DMA);
8bcdd0f7
CW
1196 if (ret)
1197 return ret;
8776f02b 1198
8448661d 1199 vm->scratch_pt = alloc_pt(vm);
8776f02b 1200 if (IS_ERR(vm->scratch_pt)) {
64c050db
MA
1201 ret = PTR_ERR(vm->scratch_pt);
1202 goto free_scratch_page;
8776f02b
MK
1203 }
1204
8448661d 1205 vm->scratch_pd = alloc_pd(vm);
8776f02b 1206 if (IS_ERR(vm->scratch_pd)) {
64c050db
MA
1207 ret = PTR_ERR(vm->scratch_pd);
1208 goto free_pt;
8776f02b
MK
1209 }
1210
1e6437b0 1211 if (use_4lvl(vm)) {
8448661d 1212 vm->scratch_pdp = alloc_pdp(vm);
69ab76fd 1213 if (IS_ERR(vm->scratch_pdp)) {
64c050db
MA
1214 ret = PTR_ERR(vm->scratch_pdp);
1215 goto free_pd;
69ab76fd
MT
1216 }
1217 }
1218
8776f02b
MK
1219 gen8_initialize_pt(vm, vm->scratch_pt);
1220 gen8_initialize_pd(vm, vm->scratch_pd);
1e6437b0 1221 if (use_4lvl(vm))
69ab76fd 1222 gen8_initialize_pdp(vm, vm->scratch_pdp);
8776f02b
MK
1223
1224 return 0;
64c050db
MA
1225
1226free_pd:
8448661d 1227 free_pd(vm, vm->scratch_pd);
64c050db 1228free_pt:
8448661d 1229 free_pt(vm, vm->scratch_pt);
64c050db 1230free_scratch_page:
8448661d 1231 cleanup_scratch_page(vm);
64c050db
MA
1232
1233 return ret;
8776f02b
MK
1234}
1235
650da34c
ZL
1236static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
1237{
1e6437b0
MK
1238 struct i915_address_space *vm = &ppgtt->base;
1239 struct drm_i915_private *dev_priv = vm->i915;
650da34c 1240 enum vgt_g2v_type msg;
650da34c
ZL
1241 int i;
1242
1e6437b0
MK
1243 if (use_4lvl(vm)) {
1244 const u64 daddr = px_dma(&ppgtt->pml4);
650da34c 1245
ab75bb5d
VS
1246 I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
1247 I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
650da34c
ZL
1248
1249 msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
1250 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
1251 } else {
e7167769 1252 for (i = 0; i < GEN8_3LVL_PDPES; i++) {
1e6437b0 1253 const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
650da34c 1254
ab75bb5d
VS
1255 I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
1256 I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
650da34c
ZL
1257 }
1258
1259 msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
1260 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
1261 }
1262
1263 I915_WRITE(vgtif_reg(g2v_notify), msg);
1264
1265 return 0;
1266}
1267
8776f02b
MK
1268static void gen8_free_scratch(struct i915_address_space *vm)
1269{
1e6437b0 1270 if (use_4lvl(vm))
8448661d
CW
1271 free_pdp(vm, vm->scratch_pdp);
1272 free_pd(vm, vm->scratch_pd);
1273 free_pt(vm, vm->scratch_pt);
1274 cleanup_scratch_page(vm);
8776f02b
MK
1275}
1276
8448661d 1277static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm,
762d9936 1278 struct i915_page_directory_pointer *pdp)
b45a6715 1279{
3e490042 1280 const unsigned int pdpes = i915_pdpes_per_pdp(vm);
b45a6715
BW
1281 int i;
1282
3e490042 1283 for (i = 0; i < pdpes; i++) {
fe52e37f 1284 if (pdp->page_directory[i] == vm->scratch_pd)
06fda602
BW
1285 continue;
1286
8448661d
CW
1287 gen8_free_page_tables(vm, pdp->page_directory[i]);
1288 free_pd(vm, pdp->page_directory[i]);
7ad47cf2 1289 }
69876bed 1290
8448661d 1291 free_pdp(vm, pdp);
762d9936
MT
1292}
1293
1294static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
1295{
1296 int i;
1297
c5d092a4
CW
1298 for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
1299 if (ppgtt->pml4.pdps[i] == ppgtt->base.scratch_pdp)
762d9936
MT
1300 continue;
1301
8448661d 1302 gen8_ppgtt_cleanup_3lvl(&ppgtt->base, ppgtt->pml4.pdps[i]);
762d9936
MT
1303 }
1304
8448661d 1305 cleanup_px(&ppgtt->base, &ppgtt->pml4);
762d9936
MT
1306}
1307
1308static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
1309{
49d73912 1310 struct drm_i915_private *dev_priv = vm->i915;
e5716f55 1311 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
762d9936 1312
275a991c 1313 if (intel_vgpu_active(dev_priv))
650da34c
ZL
1314 gen8_ppgtt_notify_vgt(ppgtt, false);
1315
1e6437b0 1316 if (use_4lvl(vm))
762d9936 1317 gen8_ppgtt_cleanup_4lvl(ppgtt);
1e6437b0
MK
1318 else
1319 gen8_ppgtt_cleanup_3lvl(&ppgtt->base, &ppgtt->pdp);
d4ec9da0 1320
8776f02b 1321 gen8_free_scratch(vm);
b45a6715
BW
1322}
1323
fe52e37f
CW
1324static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
1325 struct i915_page_directory *pd,
1326 u64 start, u64 length)
bf2b4ed2 1327{
d7b2633d 1328 struct i915_page_table *pt;
dd19674b 1329 u64 from = start;
fe52e37f 1330 unsigned int pde;
bf2b4ed2 1331
e8ebd8e2 1332 gen8_for_each_pde(pt, pd, start, length, pde) {
14826673
CW
1333 int count = gen8_pte_count(start, length);
1334
fe52e37f 1335 if (pt == vm->scratch_pt) {
dd19674b
CW
1336 pt = alloc_pt(vm);
1337 if (IS_ERR(pt))
1338 goto unwind;
5441f0cb 1339
b58d4bef 1340 if (count < GEN8_PTES || intel_vgpu_active(vm->i915))
14826673 1341 gen8_initialize_pt(vm, pt);
fe52e37f
CW
1342
1343 gen8_ppgtt_set_pde(vm, pd, pt, pde);
1344 pd->used_pdes++;
bf75d59e 1345 GEM_BUG_ON(pd->used_pdes > I915_PDES);
dd19674b 1346 }
fe52e37f 1347
14826673 1348 pt->used_ptes += count;
7ad47cf2 1349 }
bf2b4ed2 1350 return 0;
7ad47cf2 1351
dd19674b
CW
1352unwind:
1353 gen8_ppgtt_clear_pd(vm, pd, from, start - from);
d7b3de91 1354 return -ENOMEM;
bf2b4ed2
BW
1355}
1356
c5d092a4
CW
1357static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
1358 struct i915_page_directory_pointer *pdp,
1359 u64 start, u64 length)
bf2b4ed2 1360{
5441f0cb 1361 struct i915_page_directory *pd;
e2b763ca
CW
1362 u64 from = start;
1363 unsigned int pdpe;
bf2b4ed2
BW
1364 int ret;
1365
e8ebd8e2 1366 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
e2b763ca
CW
1367 if (pd == vm->scratch_pd) {
1368 pd = alloc_pd(vm);
1369 if (IS_ERR(pd))
1370 goto unwind;
5441f0cb 1371
e2b763ca 1372 gen8_initialize_pd(vm, pd);
fe52e37f 1373 gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
e2b763ca 1374 pdp->used_pdpes++;
3e490042 1375 GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm));
75afcf72
CW
1376
1377 mark_tlbs_dirty(i915_vm_to_ppgtt(vm));
e2b763ca
CW
1378 }
1379
1380 ret = gen8_ppgtt_alloc_pd(vm, pd, start, length);
bf75d59e
CW
1381 if (unlikely(ret))
1382 goto unwind_pd;
fe52e37f 1383 }
33c8819f 1384
d7b3de91 1385 return 0;
bf2b4ed2 1386
bf75d59e
CW
1387unwind_pd:
1388 if (!pd->used_pdes) {
1389 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
1390 GEM_BUG_ON(!pdp->used_pdpes);
1391 pdp->used_pdpes--;
1392 free_pd(vm, pd);
1393 }
e2b763ca
CW
1394unwind:
1395 gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
1396 return -ENOMEM;
bf2b4ed2
BW
1397}
1398
c5d092a4
CW
1399static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm,
1400 u64 start, u64 length)
762d9936 1401{
c5d092a4
CW
1402 return gen8_ppgtt_alloc_pdp(vm,
1403 &i915_vm_to_ppgtt(vm)->pdp, start, length);
1404}
762d9936 1405
c5d092a4
CW
1406static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
1407 u64 start, u64 length)
1408{
1409 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1410 struct i915_pml4 *pml4 = &ppgtt->pml4;
1411 struct i915_page_directory_pointer *pdp;
1412 u64 from = start;
1413 u32 pml4e;
1414 int ret;
762d9936 1415
e8ebd8e2 1416 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
c5d092a4
CW
1417 if (pml4->pdps[pml4e] == vm->scratch_pdp) {
1418 pdp = alloc_pdp(vm);
1419 if (IS_ERR(pdp))
1420 goto unwind;
762d9936 1421
c5d092a4
CW
1422 gen8_initialize_pdp(vm, pdp);
1423 gen8_ppgtt_set_pml4e(pml4, pdp, pml4e);
1424 }
762d9936 1425
c5d092a4 1426 ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length);
bf75d59e
CW
1427 if (unlikely(ret))
1428 goto unwind_pdp;
762d9936
MT
1429 }
1430
762d9936
MT
1431 return 0;
1432
bf75d59e
CW
1433unwind_pdp:
1434 if (!pdp->used_pdpes) {
1435 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
1436 free_pdp(vm, pdp);
1437 }
c5d092a4
CW
1438unwind:
1439 gen8_ppgtt_clear_4lvl(vm, from, start - from);
1440 return -ENOMEM;
762d9936
MT
1441}
1442
8448661d
CW
1443static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
1444 struct i915_page_directory_pointer *pdp,
75c7b0b8 1445 u64 start, u64 length,
ea91e401
MT
1446 gen8_pte_t scratch_pte,
1447 struct seq_file *m)
1448{
3e490042 1449 struct i915_address_space *vm = &ppgtt->base;
ea91e401 1450 struct i915_page_directory *pd;
75c7b0b8 1451 u32 pdpe;
ea91e401 1452
e8ebd8e2 1453 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
ea91e401 1454 struct i915_page_table *pt;
75c7b0b8
CW
1455 u64 pd_len = length;
1456 u64 pd_start = start;
1457 u32 pde;
ea91e401 1458
e2b763ca 1459 if (pdp->page_directory[pdpe] == ppgtt->base.scratch_pd)
ea91e401
MT
1460 continue;
1461
1462 seq_printf(m, "\tPDPE #%d\n", pdpe);
e8ebd8e2 1463 gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
75c7b0b8 1464 u32 pte;
ea91e401
MT
1465 gen8_pte_t *pt_vaddr;
1466
fe52e37f 1467 if (pd->page_table[pde] == ppgtt->base.scratch_pt)
ea91e401
MT
1468 continue;
1469
9231da70 1470 pt_vaddr = kmap_atomic_px(pt);
ea91e401 1471 for (pte = 0; pte < GEN8_PTES; pte += 4) {
75c7b0b8
CW
1472 u64 va = (pdpe << GEN8_PDPE_SHIFT |
1473 pde << GEN8_PDE_SHIFT |
1474 pte << GEN8_PTE_SHIFT);
ea91e401
MT
1475 int i;
1476 bool found = false;
1477
1478 for (i = 0; i < 4; i++)
1479 if (pt_vaddr[pte + i] != scratch_pte)
1480 found = true;
1481 if (!found)
1482 continue;
1483
1484 seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
1485 for (i = 0; i < 4; i++) {
1486 if (pt_vaddr[pte + i] != scratch_pte)
1487 seq_printf(m, " %llx", pt_vaddr[pte + i]);
1488 else
1489 seq_puts(m, " SCRATCH ");
1490 }
1491 seq_puts(m, "\n");
1492 }
ea91e401
MT
1493 kunmap_atomic(pt_vaddr);
1494 }
1495 }
1496}
1497
1498static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1499{
1500 struct i915_address_space *vm = &ppgtt->base;
894ccebe
CW
1501 const gen8_pte_t scratch_pte =
1502 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
381b943b 1503 u64 start = 0, length = ppgtt->base.total;
ea91e401 1504
1e6437b0 1505 if (use_4lvl(vm)) {
75c7b0b8 1506 u64 pml4e;
ea91e401
MT
1507 struct i915_pml4 *pml4 = &ppgtt->pml4;
1508 struct i915_page_directory_pointer *pdp;
1509
e8ebd8e2 1510 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
c5d092a4 1511 if (pml4->pdps[pml4e] == ppgtt->base.scratch_pdp)
ea91e401
MT
1512 continue;
1513
1514 seq_printf(m, " PML4E #%llu\n", pml4e);
8448661d 1515 gen8_dump_pdp(ppgtt, pdp, start, length, scratch_pte, m);
ea91e401 1516 }
1e6437b0
MK
1517 } else {
1518 gen8_dump_pdp(ppgtt, &ppgtt->pdp, start, length, scratch_pte, m);
ea91e401
MT
1519 }
1520}
1521
e2b763ca 1522static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
331f38e7 1523{
e2b763ca
CW
1524 struct i915_address_space *vm = &ppgtt->base;
1525 struct i915_page_directory_pointer *pdp = &ppgtt->pdp;
1526 struct i915_page_directory *pd;
1527 u64 start = 0, length = ppgtt->base.total;
1528 u64 from = start;
1529 unsigned int pdpe;
331f38e7 1530
e2b763ca
CW
1531 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1532 pd = alloc_pd(vm);
1533 if (IS_ERR(pd))
1534 goto unwind;
331f38e7 1535
e2b763ca
CW
1536 gen8_initialize_pd(vm, pd);
1537 gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
1538 pdp->used_pdpes++;
1539 }
331f38e7 1540
e2b763ca
CW
1541 pdp->used_pdpes++; /* never remove */
1542 return 0;
331f38e7 1543
e2b763ca
CW
1544unwind:
1545 start -= from;
1546 gen8_for_each_pdpe(pd, pdp, from, start, pdpe) {
1547 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
1548 free_pd(vm, pd);
1549 }
1550 pdp->used_pdpes = 0;
1551 return -ENOMEM;
331f38e7
ZL
1552}
1553
eb0b44ad 1554/*
f3a964b9
BW
1555 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
1556 * with a net effect resembling a 2-level page table in normal x86 terms. Each
1557 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
1558 * space.
37aca44a 1559 *
f3a964b9 1560 */
5c5f6457 1561static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
37aca44a 1562{
1e6437b0
MK
1563 struct i915_address_space *vm = &ppgtt->base;
1564 struct drm_i915_private *dev_priv = vm->i915;
8776f02b 1565 int ret;
7cb6d7ac 1566
1e6437b0
MK
1567 ppgtt->base.total = USES_FULL_48BIT_PPGTT(dev_priv) ?
1568 1ULL << 48 :
1569 1ULL << 32;
1570
8448661d
CW
1571 /* There are only few exceptions for gen >=6. chv and bxt.
1572 * And we are not sure about the latter so play safe for now.
1573 */
1574 if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
1575 ppgtt->base.pt_kmap_wc = true;
1576
66df1014
CW
1577 ret = gen8_init_scratch(&ppgtt->base);
1578 if (ret) {
1579 ppgtt->base.total = 0;
1580 return ret;
1581 }
1582
1e6437b0 1583 if (use_4lvl(vm)) {
8448661d 1584 ret = setup_px(&ppgtt->base, &ppgtt->pml4);
762d9936
MT
1585 if (ret)
1586 goto free_scratch;
6ac18502 1587
69ab76fd
MT
1588 gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);
1589
e7167769 1590 ppgtt->switch_mm = gen8_mm_switch_4lvl;
c5d092a4 1591 ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_4lvl;
894ccebe 1592 ppgtt->base.insert_entries = gen8_ppgtt_insert_4lvl;
fe52e37f 1593 ppgtt->base.clear_range = gen8_ppgtt_clear_4lvl;
762d9936 1594 } else {
fe52e37f 1595 ret = __pdp_init(&ppgtt->base, &ppgtt->pdp);
81ba8aef
MT
1596 if (ret)
1597 goto free_scratch;
1598
275a991c 1599 if (intel_vgpu_active(dev_priv)) {
e2b763ca
CW
1600 ret = gen8_preallocate_top_level_pdp(ppgtt);
1601 if (ret) {
1602 __pdp_fini(&ppgtt->pdp);
331f38e7 1603 goto free_scratch;
e2b763ca 1604 }
331f38e7 1605 }
894ccebe 1606
e7167769 1607 ppgtt->switch_mm = gen8_mm_switch_3lvl;
c5d092a4 1608 ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_3lvl;
894ccebe 1609 ppgtt->base.insert_entries = gen8_ppgtt_insert_3lvl;
fe52e37f 1610 ppgtt->base.clear_range = gen8_ppgtt_clear_3lvl;
81ba8aef 1611 }
6ac18502 1612
275a991c 1613 if (intel_vgpu_active(dev_priv))
650da34c
ZL
1614 gen8_ppgtt_notify_vgt(ppgtt, true);
1615
054b9acd
MK
1616 ppgtt->base.cleanup = gen8_ppgtt_cleanup;
1617 ppgtt->base.unbind_vma = ppgtt_unbind_vma;
1618 ppgtt->base.bind_vma = ppgtt_bind_vma;
fa3f46af
MA
1619 ppgtt->base.set_pages = ppgtt_set_pages;
1620 ppgtt->base.clear_pages = clear_pages;
054b9acd
MK
1621 ppgtt->debug_dump = gen8_dump_ppgtt;
1622
d7b2633d 1623 return 0;
6ac18502
MT
1624
1625free_scratch:
1626 gen8_free_scratch(&ppgtt->base);
1627 return ret;
d7b2633d
MT
1628}
1629
87d60b63
BW
1630static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1631{
87d60b63 1632 struct i915_address_space *vm = &ppgtt->base;
09942c65 1633 struct i915_page_table *unused;
07749ef3 1634 gen6_pte_t scratch_pte;
381b943b
CW
1635 u32 pd_entry, pte, pde;
1636 u32 start = 0, length = ppgtt->base.total;
87d60b63 1637
8bcdd0f7 1638 scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
4fb84d99 1639 I915_CACHE_LLC, 0);
87d60b63 1640
731f74c5 1641 gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
87d60b63 1642 u32 expected;
07749ef3 1643 gen6_pte_t *pt_vaddr;
567047be 1644 const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
09942c65 1645 pd_entry = readl(ppgtt->pd_addr + pde);
87d60b63
BW
1646 expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
1647
1648 if (pd_entry != expected)
1649 seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
1650 pde,
1651 pd_entry,
1652 expected);
1653 seq_printf(m, "\tPDE: %x\n", pd_entry);
1654
9231da70 1655 pt_vaddr = kmap_atomic_px(ppgtt->pd.page_table[pde]);
d1c54acd 1656
07749ef3 1657 for (pte = 0; pte < GEN6_PTES; pte+=4) {
87d60b63 1658 unsigned long va =
07749ef3 1659 (pde * PAGE_SIZE * GEN6_PTES) +
87d60b63
BW
1660 (pte * PAGE_SIZE);
1661 int i;
1662 bool found = false;
1663 for (i = 0; i < 4; i++)
1664 if (pt_vaddr[pte + i] != scratch_pte)
1665 found = true;
1666 if (!found)
1667 continue;
1668
1669 seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
1670 for (i = 0; i < 4; i++) {
1671 if (pt_vaddr[pte + i] != scratch_pte)
1672 seq_printf(m, " %08x", pt_vaddr[pte + i]);
1673 else
1674 seq_puts(m, " SCRATCH ");
1675 }
1676 seq_puts(m, "\n");
1677 }
9231da70 1678 kunmap_atomic(pt_vaddr);
87d60b63
BW
1679 }
1680}
1681
678d96fb 1682/* Write pde (index) from the page directory @pd to the page table @pt */
16a011c8
CW
1683static inline void gen6_write_pde(const struct i915_hw_ppgtt *ppgtt,
1684 const unsigned int pde,
1685 const struct i915_page_table *pt)
6197349b 1686{
678d96fb 1687 /* Caller needs to make sure the write completes if necessary */
16a011c8
CW
1688 writel_relaxed(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
1689 ppgtt->pd_addr + pde);
678d96fb 1690}
6197349b 1691
678d96fb
BW
1692/* Write all the page tables found in the ppgtt structure to incrementing page
1693 * directories. */
16a011c8 1694static void gen6_write_page_range(struct i915_hw_ppgtt *ppgtt,
75c7b0b8 1695 u32 start, u32 length)
678d96fb 1696{
ec565b3c 1697 struct i915_page_table *pt;
16a011c8 1698 unsigned int pde;
678d96fb 1699
16a011c8
CW
1700 gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde)
1701 gen6_write_pde(ppgtt, pde, pt);
678d96fb 1702
16a011c8 1703 mark_tlbs_dirty(ppgtt);
dd19674b 1704 wmb();
3e302542
BW
1705}
1706
75c7b0b8 1707static inline u32 get_pd_offset(struct i915_hw_ppgtt *ppgtt)
3e302542 1708{
dd19674b
CW
1709 GEM_BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
1710 return ppgtt->pd.base.ggtt_offset << 10;
b4a74e3a
BW
1711}
1712
90252e5c 1713static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
e85b26dc 1714 struct drm_i915_gem_request *req)
90252e5c 1715{
4a570db5 1716 struct intel_engine_cs *engine = req->engine;
73dec95e 1717 u32 *cs;
90252e5c 1718
90252e5c 1719 /* NB: TLBs must be flushed and invalidated before a switch */
73dec95e
TU
1720 cs = intel_ring_begin(req, 6);
1721 if (IS_ERR(cs))
1722 return PTR_ERR(cs);
90252e5c 1723
73dec95e
TU
1724 *cs++ = MI_LOAD_REGISTER_IMM(2);
1725 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
1726 *cs++ = PP_DIR_DCLV_2G;
1727 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1728 *cs++ = get_pd_offset(ppgtt);
1729 *cs++ = MI_NOOP;
1730 intel_ring_advance(req, cs);
90252e5c
BW
1731
1732 return 0;
1733}
1734
48a10389 1735static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
e85b26dc 1736 struct drm_i915_gem_request *req)
48a10389 1737{
4a570db5 1738 struct intel_engine_cs *engine = req->engine;
73dec95e 1739 u32 *cs;
48a10389 1740
48a10389 1741 /* NB: TLBs must be flushed and invalidated before a switch */
73dec95e
TU
1742 cs = intel_ring_begin(req, 6);
1743 if (IS_ERR(cs))
1744 return PTR_ERR(cs);
1745
1746 *cs++ = MI_LOAD_REGISTER_IMM(2);
1747 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
1748 *cs++ = PP_DIR_DCLV_2G;
1749 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1750 *cs++ = get_pd_offset(ppgtt);
1751 *cs++ = MI_NOOP;
1752 intel_ring_advance(req, cs);
48a10389
BW
1753
1754 return 0;
1755}
1756
eeb9488e 1757static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
e85b26dc 1758 struct drm_i915_gem_request *req)
eeb9488e 1759{
4a570db5 1760 struct intel_engine_cs *engine = req->engine;
8eb95204 1761 struct drm_i915_private *dev_priv = req->i915;
48a10389 1762
e2f80391
TU
1763 I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
1764 I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
eeb9488e
BW
1765 return 0;
1766}
1767
c6be607a 1768static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
eeb9488e 1769{
e2f80391 1770 struct intel_engine_cs *engine;
3b3f1650 1771 enum intel_engine_id id;
3e302542 1772
3b3f1650 1773 for_each_engine(engine, dev_priv, id) {
c6be607a
TU
1774 u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ?
1775 GEN8_GFX_PPGTT_48B : 0;
e2f80391 1776 I915_WRITE(RING_MODE_GEN7(engine),
2dba3239 1777 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
eeb9488e 1778 }
eeb9488e 1779}
6197349b 1780
c6be607a 1781static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
3e302542 1782{
e2f80391 1783 struct intel_engine_cs *engine;
75c7b0b8 1784 u32 ecochk, ecobits;
3b3f1650 1785 enum intel_engine_id id;
6197349b 1786
b4a74e3a
BW
1787 ecobits = I915_READ(GAC_ECO_BITS);
1788 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
a65c2fcd 1789
b4a74e3a 1790 ecochk = I915_READ(GAM_ECOCHK);
772c2a51 1791 if (IS_HASWELL(dev_priv)) {
b4a74e3a
BW
1792 ecochk |= ECOCHK_PPGTT_WB_HSW;
1793 } else {
1794 ecochk |= ECOCHK_PPGTT_LLC_IVB;
1795 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
1796 }
1797 I915_WRITE(GAM_ECOCHK, ecochk);
a65c2fcd 1798
3b3f1650 1799 for_each_engine(engine, dev_priv, id) {
6197349b 1800 /* GFX_MODE is per-ring on gen7+ */
e2f80391 1801 I915_WRITE(RING_MODE_GEN7(engine),
b4a74e3a 1802 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
6197349b 1803 }
b4a74e3a 1804}
6197349b 1805
c6be607a 1806static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
b4a74e3a 1807{
75c7b0b8 1808 u32 ecochk, gab_ctl, ecobits;
a65c2fcd 1809
b4a74e3a
BW
1810 ecobits = I915_READ(GAC_ECO_BITS);
1811 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
1812 ECOBITS_PPGTT_CACHE64B);
6197349b 1813
b4a74e3a
BW
1814 gab_ctl = I915_READ(GAB_CTL);
1815 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
1816
1817 ecochk = I915_READ(GAM_ECOCHK);
1818 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
1819
1820 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
6197349b
BW
1821}
1822
1d2a314c 1823/* PPGTT support for Sandybdrige/Gen6 and later */
853ba5d2 1824static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
dd19674b 1825 u64 start, u64 length)
1d2a314c 1826{
e5716f55 1827 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
dd19674b
CW
1828 unsigned int first_entry = start >> PAGE_SHIFT;
1829 unsigned int pde = first_entry / GEN6_PTES;
1830 unsigned int pte = first_entry % GEN6_PTES;
1831 unsigned int num_entries = length >> PAGE_SHIFT;
1832 gen6_pte_t scratch_pte =
1833 vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
1d2a314c 1834
7bddb01f 1835 while (num_entries) {
dd19674b
CW
1836 struct i915_page_table *pt = ppgtt->pd.page_table[pde++];
1837 unsigned int end = min(pte + num_entries, GEN6_PTES);
1838 gen6_pte_t *vaddr;
7bddb01f 1839
dd19674b 1840 num_entries -= end - pte;
1d2a314c 1841
dd19674b
CW
1842 /* Note that the hw doesn't support removing PDE on the fly
1843 * (they are cached inside the context with no means to
1844 * invalidate the cache), so we can only reset the PTE
1845 * entries back to scratch.
1846 */
1d2a314c 1847
dd19674b
CW
1848 vaddr = kmap_atomic_px(pt);
1849 do {
1850 vaddr[pte++] = scratch_pte;
1851 } while (pte < end);
1852 kunmap_atomic(vaddr);
1d2a314c 1853
dd19674b 1854 pte = 0;
7bddb01f 1855 }
1d2a314c
DV
1856}
1857
853ba5d2 1858static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
4a234c5f 1859 struct i915_vma *vma,
75c7b0b8
CW
1860 enum i915_cache_level cache_level,
1861 u32 flags)
def886c3 1862{
e5716f55 1863 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
4a234c5f 1864 unsigned first_entry = vma->node.start >> PAGE_SHIFT;
07749ef3
MT
1865 unsigned act_pt = first_entry / GEN6_PTES;
1866 unsigned act_pte = first_entry % GEN6_PTES;
b31144c0 1867 const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
423a8a94 1868 struct sgt_dma iter = sgt_dma(vma);
b31144c0
CW
1869 gen6_pte_t *vaddr;
1870
9231da70 1871 vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
b31144c0
CW
1872 do {
1873 vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
6e995e23 1874
b31144c0
CW
1875 iter.dma += PAGE_SIZE;
1876 if (iter.dma == iter.max) {
1877 iter.sg = __sg_next(iter.sg);
1878 if (!iter.sg)
1879 break;
6e995e23 1880
b31144c0
CW
1881 iter.dma = sg_dma_address(iter.sg);
1882 iter.max = iter.dma + iter.sg->length;
1883 }
24f3a8cf 1884
07749ef3 1885 if (++act_pte == GEN6_PTES) {
9231da70
CW
1886 kunmap_atomic(vaddr);
1887 vaddr = kmap_atomic_px(ppgtt->pd.page_table[++act_pt]);
6e995e23 1888 act_pte = 0;
def886c3 1889 }
b31144c0 1890 } while (1);
9231da70 1891 kunmap_atomic(vaddr);
d9ec12f8
MA
1892
1893 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
def886c3
DV
1894}
1895
678d96fb 1896static int gen6_alloc_va_range(struct i915_address_space *vm,
dd19674b 1897 u64 start, u64 length)
678d96fb 1898{
e5716f55 1899 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
ec565b3c 1900 struct i915_page_table *pt;
dd19674b
CW
1901 u64 from = start;
1902 unsigned int pde;
1903 bool flush = false;
4933d519 1904
731f74c5 1905 gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
dd19674b
CW
1906 if (pt == vm->scratch_pt) {
1907 pt = alloc_pt(vm);
1908 if (IS_ERR(pt))
1909 goto unwind_out;
4933d519 1910
dd19674b
CW
1911 gen6_initialize_pt(vm, pt);
1912 ppgtt->pd.page_table[pde] = pt;
1913 gen6_write_pde(ppgtt, pde, pt);
1914 flush = true;
4933d519 1915 }
4933d519
MT
1916 }
1917
dd19674b
CW
1918 if (flush) {
1919 mark_tlbs_dirty(ppgtt);
1920 wmb();
678d96fb
BW
1921 }
1922
1923 return 0;
4933d519
MT
1924
1925unwind_out:
dd19674b
CW
1926 gen6_ppgtt_clear_range(vm, from, start);
1927 return -ENOMEM;
678d96fb
BW
1928}
1929
8776f02b
MK
1930static int gen6_init_scratch(struct i915_address_space *vm)
1931{
8bcdd0f7 1932 int ret;
8776f02b 1933
8448661d 1934 ret = setup_scratch_page(vm, I915_GFP_DMA);
8bcdd0f7
CW
1935 if (ret)
1936 return ret;
8776f02b 1937
8448661d 1938 vm->scratch_pt = alloc_pt(vm);
8776f02b 1939 if (IS_ERR(vm->scratch_pt)) {
8448661d 1940 cleanup_scratch_page(vm);
8776f02b
MK
1941 return PTR_ERR(vm->scratch_pt);
1942 }
1943
1944 gen6_initialize_pt(vm, vm->scratch_pt);
1945
1946 return 0;
1947}
1948
1949static void gen6_free_scratch(struct i915_address_space *vm)
1950{
8448661d
CW
1951 free_pt(vm, vm->scratch_pt);
1952 cleanup_scratch_page(vm);
8776f02b
MK
1953}
1954
061dd493 1955static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
a00d825d 1956{
e5716f55 1957 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
731f74c5 1958 struct i915_page_directory *pd = &ppgtt->pd;
09942c65 1959 struct i915_page_table *pt;
75c7b0b8 1960 u32 pde;
4933d519 1961
061dd493
DV
1962 drm_mm_remove_node(&ppgtt->node);
1963
731f74c5 1964 gen6_for_all_pdes(pt, pd, pde)
79ab9370 1965 if (pt != vm->scratch_pt)
8448661d 1966 free_pt(vm, pt);
06fda602 1967
8776f02b 1968 gen6_free_scratch(vm);
3440d265
DV
1969}
1970
b146520f 1971static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
3440d265 1972{
8776f02b 1973 struct i915_address_space *vm = &ppgtt->base;
49d73912 1974 struct drm_i915_private *dev_priv = ppgtt->base.i915;
72e96d64 1975 struct i915_ggtt *ggtt = &dev_priv->ggtt;
b146520f 1976 int ret;
1d2a314c 1977
c8d4c0d6
BW
1978 /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
1979 * allocator works in address space sizes, so it's multiplied by page
1980 * size. We allocate at the top of the GTT to avoid fragmentation.
1981 */
72e96d64 1982 BUG_ON(!drm_mm_initialized(&ggtt->base.mm));
4933d519 1983
8776f02b
MK
1984 ret = gen6_init_scratch(vm);
1985 if (ret)
1986 return ret;
4933d519 1987
e007b19d
CW
1988 ret = i915_gem_gtt_insert(&ggtt->base, &ppgtt->node,
1989 GEN6_PD_SIZE, GEN6_PD_ALIGN,
1990 I915_COLOR_UNEVICTABLE,
1991 0, ggtt->base.total,
1992 PIN_HIGH);
c8c26622 1993 if (ret)
678d96fb
BW
1994 goto err_out;
1995
72e96d64 1996 if (ppgtt->node.start < ggtt->mappable_end)
c8d4c0d6 1997 DRM_DEBUG("Forced to use aperture for PDEs\n");
1d2a314c 1998
52c126ee
CW
1999 ppgtt->pd.base.ggtt_offset =
2000 ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
2001
2002 ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm +
2003 ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
2004
c8c26622 2005 return 0;
678d96fb
BW
2006
2007err_out:
8776f02b 2008 gen6_free_scratch(vm);
678d96fb 2009 return ret;
b146520f
BW
2010}
2011
b146520f
BW
2012static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
2013{
2f2cf682 2014 return gen6_ppgtt_allocate_page_directories(ppgtt);
4933d519 2015}
06dc68d6 2016
4933d519 2017static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
75c7b0b8 2018 u64 start, u64 length)
4933d519 2019{
ec565b3c 2020 struct i915_page_table *unused;
75c7b0b8 2021 u32 pde;
1d2a314c 2022
731f74c5 2023 gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde)
79ab9370 2024 ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
b146520f
BW
2025}
2026
5c5f6457 2027static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
b146520f 2028{
49d73912 2029 struct drm_i915_private *dev_priv = ppgtt->base.i915;
72e96d64 2030 struct i915_ggtt *ggtt = &dev_priv->ggtt;
b146520f
BW
2031 int ret;
2032
72e96d64 2033 ppgtt->base.pte_encode = ggtt->base.pte_encode;
5db94019 2034 if (intel_vgpu_active(dev_priv) || IS_GEN6(dev_priv))
b146520f 2035 ppgtt->switch_mm = gen6_mm_switch;
772c2a51 2036 else if (IS_HASWELL(dev_priv))
b146520f 2037 ppgtt->switch_mm = hsw_mm_switch;
5db94019 2038 else if (IS_GEN7(dev_priv))
b146520f 2039 ppgtt->switch_mm = gen7_mm_switch;
8eb95204 2040 else
b146520f
BW
2041 BUG();
2042
2043 ret = gen6_ppgtt_alloc(ppgtt);
2044 if (ret)
2045 return ret;
2046
09942c65 2047 ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
1d2a314c 2048
5c5f6457 2049 gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
16a011c8 2050 gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
678d96fb 2051
52c126ee
CW
2052 ret = gen6_alloc_va_range(&ppgtt->base, 0, ppgtt->base.total);
2053 if (ret) {
2054 gen6_ppgtt_cleanup(&ppgtt->base);
2055 return ret;
2056 }
2057
054b9acd
MK
2058 ppgtt->base.clear_range = gen6_ppgtt_clear_range;
2059 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
2060 ppgtt->base.unbind_vma = ppgtt_unbind_vma;
2061 ppgtt->base.bind_vma = ppgtt_bind_vma;
fa3f46af
MA
2062 ppgtt->base.set_pages = ppgtt_set_pages;
2063 ppgtt->base.clear_pages = clear_pages;
054b9acd
MK
2064 ppgtt->base.cleanup = gen6_ppgtt_cleanup;
2065 ppgtt->debug_dump = gen6_dump_ppgtt;
2066
440fd528 2067 DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
b146520f
BW
2068 ppgtt->node.size >> 20,
2069 ppgtt->node.start / PAGE_SIZE);
3440d265 2070
52c126ee
CW
2071 DRM_DEBUG_DRIVER("Adding PPGTT at offset %x\n",
2072 ppgtt->pd.base.ggtt_offset << 10);
fa76da34 2073
b146520f 2074 return 0;
3440d265
DV
2075}
2076
2bfa996e
CW
2077static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
2078 struct drm_i915_private *dev_priv)
3440d265 2079{
49d73912 2080 ppgtt->base.i915 = dev_priv;
8448661d 2081 ppgtt->base.dma = &dev_priv->drm.pdev->dev;
3440d265 2082
2bfa996e 2083 if (INTEL_INFO(dev_priv)->gen < 8)
5c5f6457 2084 return gen6_ppgtt_init(ppgtt);
3ed124b2 2085 else
d7b2633d 2086 return gen8_ppgtt_init(ppgtt);
fa76da34 2087}
c114f76a 2088
a2cad9df 2089static void i915_address_space_init(struct i915_address_space *vm,
80b204bc
CW
2090 struct drm_i915_private *dev_priv,
2091 const char *name)
a2cad9df 2092{
80b204bc 2093 i915_gem_timeline_init(dev_priv, &vm->timeline, name);
47db922f 2094
381b943b 2095 drm_mm_init(&vm->mm, 0, vm->total);
47db922f
CW
2096 vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
2097
a2cad9df
MW
2098 INIT_LIST_HEAD(&vm->active_list);
2099 INIT_LIST_HEAD(&vm->inactive_list);
50e046b6 2100 INIT_LIST_HEAD(&vm->unbound_list);
47db922f 2101
a2cad9df 2102 list_add_tail(&vm->global_link, &dev_priv->vm_list);
86679820 2103 pagevec_init(&vm->free_pages);
a2cad9df
MW
2104}
2105
ed9724dd
MA
2106static void i915_address_space_fini(struct i915_address_space *vm)
2107{
8448661d 2108 if (pagevec_count(&vm->free_pages))
66df1014 2109 vm_free_pages_release(vm, true);
8448661d 2110
ed9724dd
MA
2111 i915_gem_timeline_fini(&vm->timeline);
2112 drm_mm_takedown(&vm->mm);
2113 list_del(&vm->global_link);
2114}
2115
c6be607a 2116static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
d5165ebd 2117{
d5165ebd
TG
2118 /* This function is for gtt related workarounds. This function is
2119 * called on driver load and after a GPU reset, so you can place
2120 * workarounds here even if they get overwritten by GPU reset.
2121 */
90007bca 2122 /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl,cnl */
8652744b 2123 if (IS_BROADWELL(dev_priv))
d5165ebd 2124 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
920a14b2 2125 else if (IS_CHERRYVIEW(dev_priv))
d5165ebd 2126 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
90007bca 2127 else if (IS_GEN9_BC(dev_priv) || IS_GEN10(dev_priv))
d5165ebd 2128 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
9fb5026f 2129 else if (IS_GEN9_LP(dev_priv))
d5165ebd 2130 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
9a6330cf
MA
2131
2132 /*
2133 * To support 64K PTEs we need to first enable the use of the
2134 * Intermediate-Page-Size(IPS) bit of the PDE field via some magical
2135 * mmio, otherwise the page-walker will simply ignore the IPS bit. This
2136 * shouldn't be needed after GEN10.
2137 *
2138 * 64K pages were first introduced from BDW+, although technically they
2139 * only *work* from gen9+. For pre-BDW we instead have the option for
2140 * 32K pages, but we don't currently have any support for it in our
2141 * driver.
2142 */
2143 if (HAS_PAGE_SIZES(dev_priv, I915_GTT_PAGE_SIZE_64K) &&
2144 INTEL_GEN(dev_priv) <= 10)
2145 I915_WRITE(GEN8_GAMW_ECO_DEV_RW_IA,
2146 I915_READ(GEN8_GAMW_ECO_DEV_RW_IA) |
2147 GAMW_ECO_ENABLE_64K_IPS_FIELD);
d5165ebd
TG
2148}
2149
c6be607a 2150int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
82460d97 2151{
c6be607a 2152 gtt_write_workarounds(dev_priv);
d5165ebd 2153
671b5013
TD
2154 /* In the case of execlists, PPGTT is enabled by the context descriptor
2155 * and the PDPs are contained within the context itself. We don't
2156 * need to do anything here. */
4f044a88 2157 if (i915_modparams.enable_execlists)
671b5013
TD
2158 return 0;
2159
c6be607a 2160 if (!USES_PPGTT(dev_priv))
82460d97
DV
2161 return 0;
2162
5db94019 2163 if (IS_GEN6(dev_priv))
c6be607a 2164 gen6_ppgtt_enable(dev_priv);
5db94019 2165 else if (IS_GEN7(dev_priv))
c6be607a
TU
2166 gen7_ppgtt_enable(dev_priv);
2167 else if (INTEL_GEN(dev_priv) >= 8)
2168 gen8_ppgtt_enable(dev_priv);
82460d97 2169 else
c6be607a 2170 MISSING_CASE(INTEL_GEN(dev_priv));
82460d97 2171
4ad2fd88
JH
2172 return 0;
2173}
1d2a314c 2174
4d884705 2175struct i915_hw_ppgtt *
2bfa996e 2176i915_ppgtt_create(struct drm_i915_private *dev_priv,
80b204bc
CW
2177 struct drm_i915_file_private *fpriv,
2178 const char *name)
4d884705
DV
2179{
2180 struct i915_hw_ppgtt *ppgtt;
2181 int ret;
2182
2183 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
2184 if (!ppgtt)
2185 return ERR_PTR(-ENOMEM);
2186
1188bc66 2187 ret = __hw_ppgtt_init(ppgtt, dev_priv);
4d884705
DV
2188 if (ret) {
2189 kfree(ppgtt);
2190 return ERR_PTR(ret);
2191 }
2192
1188bc66
CW
2193 kref_init(&ppgtt->ref);
2194 i915_address_space_init(&ppgtt->base, dev_priv, name);
2195 ppgtt->base.file = fpriv;
2196
198c974d
DCS
2197 trace_i915_ppgtt_create(&ppgtt->base);
2198
4d884705
DV
2199 return ppgtt;
2200}
2201
0c7eeda1
CW
2202void i915_ppgtt_close(struct i915_address_space *vm)
2203{
2204 struct list_head *phases[] = {
2205 &vm->active_list,
2206 &vm->inactive_list,
2207 &vm->unbound_list,
2208 NULL,
2209 }, **phase;
2210
2211 GEM_BUG_ON(vm->closed);
2212 vm->closed = true;
2213
2214 for (phase = phases; *phase; phase++) {
2215 struct i915_vma *vma, *vn;
2216
2217 list_for_each_entry_safe(vma, vn, *phase, vm_link)
2218 if (!i915_vma_is_closed(vma))
2219 i915_vma_close(vma);
2220 }
2221}
2222
ed9724dd 2223void i915_ppgtt_release(struct kref *kref)
ee960be7
DV
2224{
2225 struct i915_hw_ppgtt *ppgtt =
2226 container_of(kref, struct i915_hw_ppgtt, ref);
2227
198c974d
DCS
2228 trace_i915_ppgtt_release(&ppgtt->base);
2229
50e046b6 2230 /* vmas should already be unbound and destroyed */
ee960be7
DV
2231 WARN_ON(!list_empty(&ppgtt->base.active_list));
2232 WARN_ON(!list_empty(&ppgtt->base.inactive_list));
50e046b6 2233 WARN_ON(!list_empty(&ppgtt->base.unbound_list));
ee960be7
DV
2234
2235 ppgtt->base.cleanup(&ppgtt->base);
8448661d 2236 i915_address_space_fini(&ppgtt->base);
ee960be7
DV
2237 kfree(ppgtt);
2238}
1d2a314c 2239
a81cc00c
BW
2240/* Certain Gen5 chipsets require require idling the GPU before
2241 * unmapping anything from the GTT when VT-d is enabled.
2242 */
97d6d7ab 2243static bool needs_idle_maps(struct drm_i915_private *dev_priv)
a81cc00c 2244{
a81cc00c
BW
2245 /* Query intel_iommu to see if we need the workaround. Presumably that
2246 * was loaded first.
2247 */
80debff8 2248 return IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_vtd_active();
a81cc00c
BW
2249}
2250
dc97997a 2251void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
828c7908 2252{
e2f80391 2253 struct intel_engine_cs *engine;
3b3f1650 2254 enum intel_engine_id id;
828c7908 2255
dc97997a 2256 if (INTEL_INFO(dev_priv)->gen < 6)
828c7908
BW
2257 return;
2258
3b3f1650 2259 for_each_engine(engine, dev_priv, id) {
828c7908 2260 u32 fault_reg;
e2f80391 2261 fault_reg = I915_READ(RING_FAULT_REG(engine));
828c7908
BW
2262 if (fault_reg & RING_FAULT_VALID) {
2263 DRM_DEBUG_DRIVER("Unexpected fault\n"
59a5d290 2264 "\tAddr: 0x%08lx\n"
828c7908
BW
2265 "\tAddress space: %s\n"
2266 "\tSource ID: %d\n"
2267 "\tType: %d\n",
2268 fault_reg & PAGE_MASK,
2269 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
2270 RING_FAULT_SRCID(fault_reg),
2271 RING_FAULT_FAULT_TYPE(fault_reg));
e2f80391 2272 I915_WRITE(RING_FAULT_REG(engine),
828c7908
BW
2273 fault_reg & ~RING_FAULT_VALID);
2274 }
2275 }
3b3f1650
AG
2276
2277 /* Engine specific init may not have been done till this point. */
2278 if (dev_priv->engine[RCS])
2279 POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
828c7908
BW
2280}
2281
275a991c 2282void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
828c7908 2283{
72e96d64 2284 struct i915_ggtt *ggtt = &dev_priv->ggtt;
828c7908
BW
2285
2286 /* Don't bother messing with faults pre GEN6 as we have little
2287 * documentation supporting that it's a good idea.
2288 */
275a991c 2289 if (INTEL_GEN(dev_priv) < 6)
828c7908
BW
2290 return;
2291
dc97997a 2292 i915_check_and_clear_faults(dev_priv);
828c7908 2293
381b943b 2294 ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
91e56499 2295
7c3f86b6 2296 i915_ggtt_invalidate(dev_priv);
828c7908
BW
2297}
2298
03ac84f1
CW
2299int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
2300 struct sg_table *pages)
7c2e6fdf 2301{
1a292fa5
CW
2302 do {
2303 if (dma_map_sg(&obj->base.dev->pdev->dev,
2304 pages->sgl, pages->nents,
2305 PCI_DMA_BIDIRECTIONAL))
2306 return 0;
2307
2308 /* If the DMA remap fails, one cause can be that we have
2309 * too many objects pinned in a small remapping table,
2310 * such as swiotlb. Incrementally purge all other objects and
2311 * try again - if there are no more pages to remove from
2312 * the DMA remapper, i915_gem_shrink will return 0.
2313 */
2314 GEM_BUG_ON(obj->mm.pages == pages);
2315 } while (i915_gem_shrink(to_i915(obj->base.dev),
912d572d 2316 obj->base.size >> PAGE_SHIFT, NULL,
1a292fa5
CW
2317 I915_SHRINK_BOUND |
2318 I915_SHRINK_UNBOUND |
2319 I915_SHRINK_ACTIVE));
9da3da66 2320
03ac84f1 2321 return -ENOSPC;
7c2e6fdf
DV
2322}
2323
2c642b07 2324static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
94ec8f61 2325{
94ec8f61 2326 writeq(pte, addr);
94ec8f61
BW
2327}
2328
d6473f56
CW
2329static void gen8_ggtt_insert_page(struct i915_address_space *vm,
2330 dma_addr_t addr,
75c7b0b8 2331 u64 offset,
d6473f56
CW
2332 enum i915_cache_level level,
2333 u32 unused)
2334{
7c3f86b6 2335 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
d6473f56 2336 gen8_pte_t __iomem *pte =
7c3f86b6 2337 (gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
d6473f56 2338
4fb84d99 2339 gen8_set_pte(pte, gen8_pte_encode(addr, level));
d6473f56 2340
7c3f86b6 2341 ggtt->invalidate(vm->i915);
d6473f56
CW
2342}
2343
94ec8f61 2344static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
4a234c5f 2345 struct i915_vma *vma,
75c7b0b8
CW
2346 enum i915_cache_level level,
2347 u32 unused)
94ec8f61 2348{
ce7fda2e 2349 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
85d1225e
DG
2350 struct sgt_iter sgt_iter;
2351 gen8_pte_t __iomem *gtt_entries;
894ccebe 2352 const gen8_pte_t pte_encode = gen8_pte_encode(0, level);
85d1225e 2353 dma_addr_t addr;
be69459a 2354
894ccebe 2355 gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
4a234c5f
MA
2356 gtt_entries += vma->node.start >> PAGE_SHIFT;
2357 for_each_sgt_dma(addr, sgt_iter, vma->pages)
894ccebe 2358 gen8_set_pte(gtt_entries++, pte_encode | addr);
85d1225e 2359
894ccebe 2360 wmb();
94ec8f61 2361
94ec8f61
BW
2362 /* This next bit makes the above posting read even more important. We
2363 * want to flush the TLBs only after we're certain all the PTE updates
2364 * have finished.
2365 */
7c3f86b6 2366 ggtt->invalidate(vm->i915);
94ec8f61
BW
2367}
2368
d6473f56
CW
2369static void gen6_ggtt_insert_page(struct i915_address_space *vm,
2370 dma_addr_t addr,
75c7b0b8 2371 u64 offset,
d6473f56
CW
2372 enum i915_cache_level level,
2373 u32 flags)
2374{
7c3f86b6 2375 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
d6473f56 2376 gen6_pte_t __iomem *pte =
7c3f86b6 2377 (gen6_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
d6473f56 2378
4fb84d99 2379 iowrite32(vm->pte_encode(addr, level, flags), pte);
d6473f56 2380
7c3f86b6 2381 ggtt->invalidate(vm->i915);
d6473f56
CW
2382}
2383
e76e9aeb
BW
2384/*
2385 * Binds an object into the global gtt with the specified cache level. The object
2386 * will be accessible to the GPU via commands whose operands reference offsets
2387 * within the global GTT as well as accessible by the GPU through the GMADR
2388 * mapped BAR (dev_priv->mm.gtt->gtt).
2389 */
853ba5d2 2390static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
4a234c5f 2391 struct i915_vma *vma,
75c7b0b8
CW
2392 enum i915_cache_level level,
2393 u32 flags)
e76e9aeb 2394{
ce7fda2e 2395 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
b31144c0 2396 gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
4a234c5f 2397 unsigned int i = vma->node.start >> PAGE_SHIFT;
b31144c0 2398 struct sgt_iter iter;
85d1225e 2399 dma_addr_t addr;
4a234c5f 2400 for_each_sgt_dma(addr, iter, vma->pages)
b31144c0
CW
2401 iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
2402 wmb();
0f9b91c7
BW
2403
2404 /* This next bit makes the above posting read even more important. We
2405 * want to flush the TLBs only after we're certain all the PTE updates
2406 * have finished.
2407 */
7c3f86b6 2408 ggtt->invalidate(vm->i915);
e76e9aeb
BW
2409}
2410
f7770bfd 2411static void nop_clear_range(struct i915_address_space *vm,
75c7b0b8 2412 u64 start, u64 length)
f7770bfd
CW
2413{
2414}
2415
94ec8f61 2416static void gen8_ggtt_clear_range(struct i915_address_space *vm,
75c7b0b8 2417 u64 start, u64 length)
94ec8f61 2418{
ce7fda2e 2419 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
782f1495
BW
2420 unsigned first_entry = start >> PAGE_SHIFT;
2421 unsigned num_entries = length >> PAGE_SHIFT;
894ccebe
CW
2422 const gen8_pte_t scratch_pte =
2423 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
2424 gen8_pte_t __iomem *gtt_base =
72e96d64
JL
2425 (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
2426 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
94ec8f61
BW
2427 int i;
2428
2429 if (WARN(num_entries > max_entries,
2430 "First entry = %d; Num entries = %d (max=%d)\n",
2431 first_entry, num_entries, max_entries))
2432 num_entries = max_entries;
2433
94ec8f61
BW
2434 for (i = 0; i < num_entries; i++)
2435 gen8_set_pte(&gtt_base[i], scratch_pte);
94ec8f61
BW
2436}
2437
0ef34ad6
JB
2438static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
2439{
2440 struct drm_i915_private *dev_priv = vm->i915;
2441
2442 /*
2443 * Make sure the internal GAM fifo has been cleared of all GTT
2444 * writes before exiting stop_machine(). This guarantees that
2445 * any aperture accesses waiting to start in another process
2446 * cannot back up behind the GTT writes causing a hang.
2447 * The register can be any arbitrary GAM register.
2448 */
2449 POSTING_READ(GFX_FLSH_CNTL_GEN6);
2450}
2451
2452struct insert_page {
2453 struct i915_address_space *vm;
2454 dma_addr_t addr;
2455 u64 offset;
2456 enum i915_cache_level level;
2457};
2458
2459static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
2460{
2461 struct insert_page *arg = _arg;
2462
2463 gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
2464 bxt_vtd_ggtt_wa(arg->vm);
2465
2466 return 0;
2467}
2468
2469static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
2470 dma_addr_t addr,
2471 u64 offset,
2472 enum i915_cache_level level,
2473 u32 unused)
2474{
2475 struct insert_page arg = { vm, addr, offset, level };
2476
2477 stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
2478}
2479
2480struct insert_entries {
2481 struct i915_address_space *vm;
4a234c5f 2482 struct i915_vma *vma;
0ef34ad6
JB
2483 enum i915_cache_level level;
2484};
2485
2486static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
2487{
2488 struct insert_entries *arg = _arg;
2489
4a234c5f 2490 gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, 0);
0ef34ad6
JB
2491 bxt_vtd_ggtt_wa(arg->vm);
2492
2493 return 0;
2494}
2495
2496static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
4a234c5f 2497 struct i915_vma *vma,
0ef34ad6
JB
2498 enum i915_cache_level level,
2499 u32 unused)
2500{
17369ba0 2501 struct insert_entries arg = { vm, vma, level };
0ef34ad6
JB
2502
2503 stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
2504}
2505
2506struct clear_range {
2507 struct i915_address_space *vm;
2508 u64 start;
2509 u64 length;
2510};
2511
2512static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
2513{
2514 struct clear_range *arg = _arg;
2515
2516 gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
2517 bxt_vtd_ggtt_wa(arg->vm);
2518
2519 return 0;
2520}
2521
2522static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
2523 u64 start,
2524 u64 length)
2525{
2526 struct clear_range arg = { vm, start, length };
2527
2528 stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
2529}
2530
853ba5d2 2531static void gen6_ggtt_clear_range(struct i915_address_space *vm,
75c7b0b8 2532 u64 start, u64 length)
7faf1ab2 2533{
ce7fda2e 2534 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
782f1495
BW
2535 unsigned first_entry = start >> PAGE_SHIFT;
2536 unsigned num_entries = length >> PAGE_SHIFT;
07749ef3 2537 gen6_pte_t scratch_pte, __iomem *gtt_base =
72e96d64
JL
2538 (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
2539 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
7faf1ab2
DV
2540 int i;
2541
2542 if (WARN(num_entries > max_entries,
2543 "First entry = %d; Num entries = %d (max=%d)\n",
2544 first_entry, num_entries, max_entries))
2545 num_entries = max_entries;
2546
8bcdd0f7 2547 scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
4fb84d99 2548 I915_CACHE_LLC, 0);
828c7908 2549
7faf1ab2
DV
2550 for (i = 0; i < num_entries; i++)
2551 iowrite32(scratch_pte, &gtt_base[i]);
7faf1ab2
DV
2552}
2553
d6473f56
CW
2554static void i915_ggtt_insert_page(struct i915_address_space *vm,
2555 dma_addr_t addr,
75c7b0b8 2556 u64 offset,
d6473f56
CW
2557 enum i915_cache_level cache_level,
2558 u32 unused)
2559{
d6473f56
CW
2560 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2561 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
d6473f56
CW
2562
2563 intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
d6473f56
CW
2564}
2565
d369d2d9 2566static void i915_ggtt_insert_entries(struct i915_address_space *vm,
4a234c5f 2567 struct i915_vma *vma,
75c7b0b8
CW
2568 enum i915_cache_level cache_level,
2569 u32 unused)
7faf1ab2
DV
2570{
2571 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2572 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2573
4a234c5f
MA
2574 intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
2575 flags);
7faf1ab2
DV
2576}
2577
853ba5d2 2578static void i915_ggtt_clear_range(struct i915_address_space *vm,
75c7b0b8 2579 u64 start, u64 length)
7faf1ab2 2580{
2eedfc7d 2581 intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
7faf1ab2
DV
2582}
2583
70b9f6f8
DV
2584static int ggtt_bind_vma(struct i915_vma *vma,
2585 enum i915_cache_level cache_level,
2586 u32 flags)
0a878716 2587{
49d73912 2588 struct drm_i915_private *i915 = vma->vm->i915;
0a878716 2589 struct drm_i915_gem_object *obj = vma->obj;
ba7a5741 2590 u32 pte_flags;
0a878716 2591
0a878716 2592 /* Currently applicable only to VLV */
ba7a5741 2593 pte_flags = 0;
0a878716
DV
2594 if (obj->gt_ro)
2595 pte_flags |= PTE_READ_ONLY;
2596
9c870d03 2597 intel_runtime_pm_get(i915);
4a234c5f 2598 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
9c870d03 2599 intel_runtime_pm_put(i915);
0a878716 2600
d9ec12f8
MA
2601 vma->page_sizes.gtt = I915_GTT_PAGE_SIZE;
2602
0a878716
DV
2603 /*
2604 * Without aliasing PPGTT there's no difference between
2605 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
2606 * upgrade to both bound if we bind either to avoid double-binding.
2607 */
3272db53 2608 vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
0a878716
DV
2609
2610 return 0;
2611}
2612
cbc4e9e6
CW
2613static void ggtt_unbind_vma(struct i915_vma *vma)
2614{
2615 struct drm_i915_private *i915 = vma->vm->i915;
2616
2617 intel_runtime_pm_get(i915);
2618 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
2619 intel_runtime_pm_put(i915);
2620}
2621
0a878716
DV
2622static int aliasing_gtt_bind_vma(struct i915_vma *vma,
2623 enum i915_cache_level cache_level,
2624 u32 flags)
d5bd1449 2625{
49d73912 2626 struct drm_i915_private *i915 = vma->vm->i915;
321d178e 2627 u32 pte_flags;
ff685975 2628 int ret;
70b9f6f8 2629
24f3a8cf 2630 /* Currently applicable only to VLV */
321d178e
CW
2631 pte_flags = 0;
2632 if (vma->obj->gt_ro)
f329f5f6 2633 pte_flags |= PTE_READ_ONLY;
24f3a8cf 2634
ff685975
CW
2635 if (flags & I915_VMA_LOCAL_BIND) {
2636 struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
2637
1f23475c
MA
2638 if (!(vma->flags & I915_VMA_LOCAL_BIND) &&
2639 appgtt->base.allocate_va_range) {
ff685975
CW
2640 ret = appgtt->base.allocate_va_range(&appgtt->base,
2641 vma->node.start,
d567232c 2642 vma->size);
ff685975 2643 if (ret)
fa3f46af 2644 return ret;
ff685975
CW
2645 }
2646
4a234c5f
MA
2647 appgtt->base.insert_entries(&appgtt->base, vma, cache_level,
2648 pte_flags);
ff685975
CW
2649 }
2650
3272db53 2651 if (flags & I915_VMA_GLOBAL_BIND) {
9c870d03 2652 intel_runtime_pm_get(i915);
4a234c5f 2653 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
9c870d03 2654 intel_runtime_pm_put(i915);
6f65e29a 2655 }
d5bd1449 2656
70b9f6f8 2657 return 0;
d5bd1449
CW
2658}
2659
cbc4e9e6 2660static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
74163907 2661{
49d73912 2662 struct drm_i915_private *i915 = vma->vm->i915;
6f65e29a 2663
9c870d03
CW
2664 if (vma->flags & I915_VMA_GLOBAL_BIND) {
2665 intel_runtime_pm_get(i915);
cbc4e9e6 2666 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
9c870d03
CW
2667 intel_runtime_pm_put(i915);
2668 }
06615ee5 2669
cbc4e9e6
CW
2670 if (vma->flags & I915_VMA_LOCAL_BIND) {
2671 struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->base;
2672
2673 vm->clear_range(vm, vma->node.start, vma->size);
2674 }
74163907
DV
2675}
2676
03ac84f1
CW
2677void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
2678 struct sg_table *pages)
7c2e6fdf 2679{
52a05c30
DW
2680 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2681 struct device *kdev = &dev_priv->drm.pdev->dev;
307dc25b 2682 struct i915_ggtt *ggtt = &dev_priv->ggtt;
5c042287 2683
307dc25b 2684 if (unlikely(ggtt->do_idle_maps)) {
228ec87c 2685 if (i915_gem_wait_for_idle(dev_priv, 0)) {
307dc25b
CW
2686 DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
2687 /* Wait a bit, in hopes it avoids the hang */
2688 udelay(10);
2689 }
2690 }
5c042287 2691
03ac84f1 2692 dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
7c2e6fdf 2693}
644ec02b 2694
fa3f46af
MA
2695static int ggtt_set_pages(struct i915_vma *vma)
2696{
2697 int ret;
2698
2699 GEM_BUG_ON(vma->pages);
2700
2701 ret = i915_get_ggtt_vma_pages(vma);
2702 if (ret)
2703 return ret;
2704
7464284b
MA
2705 vma->page_sizes = vma->obj->mm.page_sizes;
2706
fa3f46af
MA
2707 return 0;
2708}
2709
45b186f1 2710static void i915_gtt_color_adjust(const struct drm_mm_node *node,
42d6ab48 2711 unsigned long color,
440fd528
TR
2712 u64 *start,
2713 u64 *end)
42d6ab48 2714{
a6508ded 2715 if (node->allocated && node->color != color)
f51455d4 2716 *start += I915_GTT_PAGE_SIZE;
42d6ab48 2717
a6508ded
CW
2718 /* Also leave a space between the unallocated reserved node after the
2719 * GTT and any objects within the GTT, i.e. we use the color adjustment
2720 * to insert a guard page to prevent prefetches crossing over the
2721 * GTT boundary.
2722 */
b44f97fd 2723 node = list_next_entry(node, node_list);
a6508ded 2724 if (node->color != color)
f51455d4 2725 *end -= I915_GTT_PAGE_SIZE;
42d6ab48 2726}
fbe5d36e 2727
6cde9a02
CW
2728int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
2729{
2730 struct i915_ggtt *ggtt = &i915->ggtt;
2731 struct i915_hw_ppgtt *ppgtt;
2732 int err;
2733
57202f47 2734 ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM), "[alias]");
1188bc66
CW
2735 if (IS_ERR(ppgtt))
2736 return PTR_ERR(ppgtt);
6cde9a02 2737
e565ceb0
CW
2738 if (WARN_ON(ppgtt->base.total < ggtt->base.total)) {
2739 err = -ENODEV;
2740 goto err_ppgtt;
2741 }
2742
6cde9a02 2743 if (ppgtt->base.allocate_va_range) {
e565ceb0
CW
2744 /* Note we only pre-allocate as far as the end of the global
2745 * GTT. On 48b / 4-level page-tables, the difference is very,
2746 * very significant! We have to preallocate as GVT/vgpu does
2747 * not like the page directory disappearing.
2748 */
6cde9a02 2749 err = ppgtt->base.allocate_va_range(&ppgtt->base,
e565ceb0 2750 0, ggtt->base.total);
6cde9a02 2751 if (err)
1188bc66 2752 goto err_ppgtt;
6cde9a02
CW
2753 }
2754
6cde9a02 2755 i915->mm.aliasing_ppgtt = ppgtt;
cbc4e9e6 2756
6cde9a02
CW
2757 WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
2758 ggtt->base.bind_vma = aliasing_gtt_bind_vma;
2759
cbc4e9e6
CW
2760 WARN_ON(ggtt->base.unbind_vma != ggtt_unbind_vma);
2761 ggtt->base.unbind_vma = aliasing_gtt_unbind_vma;
2762
6cde9a02
CW
2763 return 0;
2764
6cde9a02 2765err_ppgtt:
1188bc66 2766 i915_ppgtt_put(ppgtt);
6cde9a02
CW
2767 return err;
2768}
2769
2770void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915)
2771{
2772 struct i915_ggtt *ggtt = &i915->ggtt;
2773 struct i915_hw_ppgtt *ppgtt;
2774
2775 ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt);
2776 if (!ppgtt)
2777 return;
2778
1188bc66 2779 i915_ppgtt_put(ppgtt);
6cde9a02
CW
2780
2781 ggtt->base.bind_vma = ggtt_bind_vma;
cbc4e9e6 2782 ggtt->base.unbind_vma = ggtt_unbind_vma;
6cde9a02
CW
2783}
2784
f6b9d5ca 2785int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
644ec02b 2786{
e78891ca
BW
2787 /* Let GEM Manage all of the aperture.
2788 *
2789 * However, leave one page at the end still bound to the scratch page.
2790 * There are a number of places where the hardware apparently prefetches
2791 * past the end of the object, and we've seen multiple hangs with the
2792 * GPU head pointer stuck in a batchbuffer bound at the last page of the
2793 * aperture. One page should be enough to keep any prefetching inside
2794 * of the aperture.
2795 */
72e96d64 2796 struct i915_ggtt *ggtt = &dev_priv->ggtt;
ed2f3452 2797 unsigned long hole_start, hole_end;
f6b9d5ca 2798 struct drm_mm_node *entry;
fa76da34 2799 int ret;
644ec02b 2800
b02d22a3
ZW
2801 ret = intel_vgt_balloon(dev_priv);
2802 if (ret)
2803 return ret;
5dda8fa3 2804
95374d75 2805 /* Reserve a mappable slot for our lockless error capture */
4e64e553
CW
2806 ret = drm_mm_insert_node_in_range(&ggtt->base.mm, &ggtt->error_capture,
2807 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
2808 0, ggtt->mappable_end,
2809 DRM_MM_INSERT_LOW);
95374d75
CW
2810 if (ret)
2811 return ret;
2812
ed2f3452 2813 /* Clear any non-preallocated blocks */
72e96d64 2814 drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
ed2f3452
CW
2815 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
2816 hole_start, hole_end);
72e96d64 2817 ggtt->base.clear_range(&ggtt->base, hole_start,
4fb84d99 2818 hole_end - hole_start);
ed2f3452
CW
2819 }
2820
2821 /* And finally clear the reserved guard page */
f6b9d5ca 2822 ggtt->base.clear_range(&ggtt->base,
4fb84d99 2823 ggtt->base.total - PAGE_SIZE, PAGE_SIZE);
6c5566a8 2824
97d6d7ab 2825 if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
6cde9a02 2826 ret = i915_gem_init_aliasing_ppgtt(dev_priv);
95374d75 2827 if (ret)
6cde9a02 2828 goto err;
fa76da34
DV
2829 }
2830
6c5566a8 2831 return 0;
95374d75 2832
95374d75
CW
2833err:
2834 drm_mm_remove_node(&ggtt->error_capture);
2835 return ret;
e76e9aeb
BW
2836}
2837
d85489d3
JL
2838/**
2839 * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
97d6d7ab 2840 * @dev_priv: i915 device
d85489d3 2841 */
97d6d7ab 2842void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
90d0a0e8 2843{
72e96d64 2844 struct i915_ggtt *ggtt = &dev_priv->ggtt;
94d4a2a9 2845 struct i915_vma *vma, *vn;
66df1014 2846 struct pagevec *pvec;
94d4a2a9
CW
2847
2848 ggtt->base.closed = true;
2849
2850 mutex_lock(&dev_priv->drm.struct_mutex);
2851 WARN_ON(!list_empty(&ggtt->base.active_list));
2852 list_for_each_entry_safe(vma, vn, &ggtt->base.inactive_list, vm_link)
2853 WARN_ON(i915_vma_unbind(vma));
2854 mutex_unlock(&dev_priv->drm.struct_mutex);
90d0a0e8 2855
97d6d7ab 2856 i915_gem_cleanup_stolen(&dev_priv->drm);
a4eba47b 2857
1188bc66
CW
2858 mutex_lock(&dev_priv->drm.struct_mutex);
2859 i915_gem_fini_aliasing_ppgtt(dev_priv);
2860
95374d75
CW
2861 if (drm_mm_node_allocated(&ggtt->error_capture))
2862 drm_mm_remove_node(&ggtt->error_capture);
2863
72e96d64 2864 if (drm_mm_initialized(&ggtt->base.mm)) {
b02d22a3 2865 intel_vgt_deballoon(dev_priv);
ed9724dd 2866 i915_address_space_fini(&ggtt->base);
90d0a0e8
DV
2867 }
2868
72e96d64 2869 ggtt->base.cleanup(&ggtt->base);
66df1014
CW
2870
2871 pvec = &dev_priv->mm.wc_stash;
2872 if (pvec->nr) {
2873 set_pages_array_wb(pvec->pages, pvec->nr);
2874 __pagevec_release(pvec);
2875 }
2876
1188bc66 2877 mutex_unlock(&dev_priv->drm.struct_mutex);
f6b9d5ca
CW
2878
2879 arch_phys_wc_del(ggtt->mtrr);
b06f4c80 2880 io_mapping_fini(&ggtt->iomap);
90d0a0e8 2881}
70e32544 2882
2c642b07 2883static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
e76e9aeb
BW
2884{
2885 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
2886 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
2887 return snb_gmch_ctl << 20;
2888}
2889
2c642b07 2890static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
9459d252
BW
2891{
2892 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
2893 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
2894 if (bdw_gmch_ctl)
2895 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
562d55d9
BW
2896
2897#ifdef CONFIG_X86_32
2898 /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
2899 if (bdw_gmch_ctl > 4)
2900 bdw_gmch_ctl = 4;
2901#endif
2902
9459d252
BW
2903 return bdw_gmch_ctl << 20;
2904}
2905
2c642b07 2906static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
d7f25f23
DL
2907{
2908 gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
2909 gmch_ctrl &= SNB_GMCH_GGMS_MASK;
2910
2911 if (gmch_ctrl)
2912 return 1 << (20 + gmch_ctrl);
2913
2914 return 0;
2915}
2916
2c642b07 2917static size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
e76e9aeb
BW
2918{
2919 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
2920 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
a92d1a91 2921 return (size_t)snb_gmch_ctl << 25; /* 32 MB units */
e76e9aeb
BW
2922}
2923
2c642b07 2924static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
9459d252
BW
2925{
2926 bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2927 bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
a92d1a91 2928 return (size_t)bdw_gmch_ctl << 25; /* 32 MB units */
9459d252
BW
2929}
2930
d7f25f23
DL
2931static size_t chv_get_stolen_size(u16 gmch_ctrl)
2932{
2933 gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
2934 gmch_ctrl &= SNB_GMCH_GMS_MASK;
2935
2936 /*
2937 * 0x0 to 0x10: 32MB increments starting at 0MB
2938 * 0x11 to 0x16: 4MB increments starting at 8MB
2939 * 0x17 to 0x1d: 4MB increments start at 36MB
2940 */
2941 if (gmch_ctrl < 0x11)
a92d1a91 2942 return (size_t)gmch_ctrl << 25;
d7f25f23 2943 else if (gmch_ctrl < 0x17)
a92d1a91 2944 return (size_t)(gmch_ctrl - 0x11 + 2) << 22;
d7f25f23 2945 else
a92d1a91 2946 return (size_t)(gmch_ctrl - 0x17 + 9) << 22;
d7f25f23
DL
2947}
2948
66375014
DL
2949static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
2950{
2951 gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2952 gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
2953
2954 if (gen9_gmch_ctl < 0xf0)
a92d1a91 2955 return (size_t)gen9_gmch_ctl << 25; /* 32 MB units */
66375014
DL
2956 else
2957 /* 4MB increments starting at 0xf0 for 4MB */
a92d1a91 2958 return (size_t)(gen9_gmch_ctl - 0xf0 + 1) << 22;
66375014
DL
2959}
2960
34c998b4 2961static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
63340133 2962{
49d73912
CW
2963 struct drm_i915_private *dev_priv = ggtt->base.i915;
2964 struct pci_dev *pdev = dev_priv->drm.pdev;
34c998b4 2965 phys_addr_t phys_addr;
8bcdd0f7 2966 int ret;
63340133
BW
2967
2968 /* For Modern GENs the PTEs and register space are split in the BAR */
34c998b4 2969 phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
63340133 2970
2a073f89 2971 /*
385db982
RV
2972 * On BXT+/CNL+ writes larger than 64 bit to the GTT pagetable range
2973 * will be dropped. For WC mappings in general we have 64 byte burst
2974 * writes when the WC buffer is flushed, so we can't use it, but have to
2a073f89
ID
2975 * resort to an uncached mapping. The WC issue is easily caught by the
2976 * readback check when writing GTT PTE entries.
2977 */
385db982 2978 if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10)
34c998b4 2979 ggtt->gsm = ioremap_nocache(phys_addr, size);
2a073f89 2980 else
34c998b4 2981 ggtt->gsm = ioremap_wc(phys_addr, size);
72e96d64 2982 if (!ggtt->gsm) {
34c998b4 2983 DRM_ERROR("Failed to map the ggtt page table\n");
63340133
BW
2984 return -ENOMEM;
2985 }
2986
8448661d 2987 ret = setup_scratch_page(&ggtt->base, GFP_DMA32);
8bcdd0f7 2988 if (ret) {
63340133
BW
2989 DRM_ERROR("Scratch setup failed\n");
2990 /* iounmap will also get called at remove, but meh */
72e96d64 2991 iounmap(ggtt->gsm);
8bcdd0f7 2992 return ret;
63340133
BW
2993 }
2994
4ad2af1e 2995 return 0;
63340133
BW
2996}
2997
4395890a
ZW
2998static struct intel_ppat_entry *
2999__alloc_ppat_entry(struct intel_ppat *ppat, unsigned int index, u8 value)
4e34935f 3000{
4395890a
ZW
3001 struct intel_ppat_entry *entry = &ppat->entries[index];
3002
3003 GEM_BUG_ON(index >= ppat->max_entries);
3004 GEM_BUG_ON(test_bit(index, ppat->used));
3005
3006 entry->ppat = ppat;
3007 entry->value = value;
3008 kref_init(&entry->ref);
3009 set_bit(index, ppat->used);
3010 set_bit(index, ppat->dirty);
3011
3012 return entry;
3013}
3014
3015static void __free_ppat_entry(struct intel_ppat_entry *entry)
4e34935f 3016{
4395890a
ZW
3017 struct intel_ppat *ppat = entry->ppat;
3018 unsigned int index = entry - ppat->entries;
3019
3020 GEM_BUG_ON(index >= ppat->max_entries);
3021 GEM_BUG_ON(!test_bit(index, ppat->used));
3022
3023 entry->value = ppat->clear_value;
3024 clear_bit(index, ppat->used);
3025 set_bit(index, ppat->dirty);
3026}
3027
3028/**
3029 * intel_ppat_get - get a usable PPAT entry
3030 * @i915: i915 device instance
3031 * @value: the PPAT value required by the caller
3032 *
3033 * The function tries to search if there is an existing PPAT entry which
3034 * matches with the required value. If perfectly matched, the existing PPAT
3035 * entry will be used. If only partially matched, it will try to check if
3036 * there is any available PPAT index. If yes, it will allocate a new PPAT
3037 * index for the required entry and update the HW. If not, the partially
3038 * matched entry will be used.
3039 */
3040const struct intel_ppat_entry *
3041intel_ppat_get(struct drm_i915_private *i915, u8 value)
3042{
3043 struct intel_ppat *ppat = &i915->ppat;
3044 struct intel_ppat_entry *entry;
3045 unsigned int scanned, best_score;
3046 int i;
3047
3048 GEM_BUG_ON(!ppat->max_entries);
3049
3050 scanned = best_score = 0;
3051 for_each_set_bit(i, ppat->used, ppat->max_entries) {
3052 unsigned int score;
3053
3054 score = ppat->match(ppat->entries[i].value, value);
3055 if (score > best_score) {
3056 entry = &ppat->entries[i];
3057 if (score == INTEL_PPAT_PERFECT_MATCH) {
3058 kref_get(&entry->ref);
3059 return entry;
3060 }
3061 best_score = score;
3062 }
3063 scanned++;
3064 }
3065
3066 if (scanned == ppat->max_entries) {
3067 if (!best_score)
3068 return ERR_PTR(-ENOSPC);
3069
3070 kref_get(&entry->ref);
3071 return entry;
3072 }
3073
3074 i = find_first_zero_bit(ppat->used, ppat->max_entries);
3075 entry = __alloc_ppat_entry(ppat, i, value);
3076 ppat->update_hw(i915);
3077 return entry;
3078}
3079
3080static void release_ppat(struct kref *kref)
3081{
3082 struct intel_ppat_entry *entry =
3083 container_of(kref, struct intel_ppat_entry, ref);
3084 struct drm_i915_private *i915 = entry->ppat->i915;
3085
3086 __free_ppat_entry(entry);
3087 entry->ppat->update_hw(i915);
3088}
3089
3090/**
3091 * intel_ppat_put - put back the PPAT entry got from intel_ppat_get()
3092 * @entry: an intel PPAT entry
3093 *
3094 * Put back the PPAT entry got from intel_ppat_get(). If the PPAT index of the
3095 * entry is dynamically allocated, its reference count will be decreased. Once
3096 * the reference count becomes into zero, the PPAT index becomes free again.
3097 */
3098void intel_ppat_put(const struct intel_ppat_entry *entry)
3099{
3100 struct intel_ppat *ppat = entry->ppat;
3101 unsigned int index = entry - ppat->entries;
3102
3103 GEM_BUG_ON(!ppat->max_entries);
3104
3105 kref_put(&ppat->entries[index].ref, release_ppat);
3106}
3107
3108static void cnl_private_pat_update_hw(struct drm_i915_private *dev_priv)
3109{
3110 struct intel_ppat *ppat = &dev_priv->ppat;
3111 int i;
3112
3113 for_each_set_bit(i, ppat->dirty, ppat->max_entries) {
3114 I915_WRITE(GEN10_PAT_INDEX(i), ppat->entries[i].value);
3115 clear_bit(i, ppat->dirty);
3116 }
3117}
3118
3119static void bdw_private_pat_update_hw(struct drm_i915_private *dev_priv)
3120{
3121 struct intel_ppat *ppat = &dev_priv->ppat;
3122 u64 pat = 0;
3123 int i;
3124
3125 for (i = 0; i < ppat->max_entries; i++)
3126 pat |= GEN8_PPAT(i, ppat->entries[i].value);
3127
3128 bitmap_clear(ppat->dirty, 0, ppat->max_entries);
3129
3130 I915_WRITE(GEN8_PRIVATE_PAT_LO, lower_32_bits(pat));
3131 I915_WRITE(GEN8_PRIVATE_PAT_HI, upper_32_bits(pat));
3132}
3133
3134static unsigned int bdw_private_pat_match(u8 src, u8 dst)
3135{
3136 unsigned int score = 0;
3137 enum {
3138 AGE_MATCH = BIT(0),
3139 TC_MATCH = BIT(1),
3140 CA_MATCH = BIT(2),
3141 };
3142
3143 /* Cache attribute has to be matched. */
1298d51c 3144 if (GEN8_PPAT_GET_CA(src) != GEN8_PPAT_GET_CA(dst))
4395890a
ZW
3145 return 0;
3146
3147 score |= CA_MATCH;
3148
3149 if (GEN8_PPAT_GET_TC(src) == GEN8_PPAT_GET_TC(dst))
3150 score |= TC_MATCH;
3151
3152 if (GEN8_PPAT_GET_AGE(src) == GEN8_PPAT_GET_AGE(dst))
3153 score |= AGE_MATCH;
3154
3155 if (score == (AGE_MATCH | TC_MATCH | CA_MATCH))
3156 return INTEL_PPAT_PERFECT_MATCH;
3157
3158 return score;
3159}
3160
3161static unsigned int chv_private_pat_match(u8 src, u8 dst)
3162{
3163 return (CHV_PPAT_GET_SNOOP(src) == CHV_PPAT_GET_SNOOP(dst)) ?
3164 INTEL_PPAT_PERFECT_MATCH : 0;
3165}
3166
3167static void cnl_setup_private_ppat(struct intel_ppat *ppat)
3168{
3169 ppat->max_entries = 8;
3170 ppat->update_hw = cnl_private_pat_update_hw;
3171 ppat->match = bdw_private_pat_match;
3172 ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
3173
4e34935f 3174 /* XXX: spec is unclear if this is still needed for CNL+ */
4395890a
ZW
3175 if (!USES_PPGTT(ppat->i915)) {
3176 __alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC);
4e34935f
RV
3177 return;
3178 }
3179
4395890a
ZW
3180 __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC);
3181 __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC);
3182 __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC);
3183 __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC);
3184 __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
3185 __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
3186 __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
3187 __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
4e34935f
RV
3188}
3189
fbe5d36e
BW
3190/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
3191 * bits. When using advanced contexts each context stores its own PAT, but
3192 * writing this data shouldn't be harmful even in those cases. */
4395890a 3193static void bdw_setup_private_ppat(struct intel_ppat *ppat)
fbe5d36e 3194{
4395890a
ZW
3195 ppat->max_entries = 8;
3196 ppat->update_hw = bdw_private_pat_update_hw;
3197 ppat->match = bdw_private_pat_match;
3198 ppat->clear_value = GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3);
fbe5d36e 3199
4395890a 3200 if (!USES_PPGTT(ppat->i915)) {
d6a8b72e
RV
3201 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
3202 * so RTL will always use the value corresponding to
3203 * pat_sel = 000".
3204 * So let's disable cache for GGTT to avoid screen corruptions.
3205 * MOCS still can be used though.
3206 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
3207 * before this patch, i.e. the same uncached + snooping access
3208 * like on gen6/7 seems to be in effect.
3209 * - So this just fixes blitter/render access. Again it looks
3210 * like it's not just uncached access, but uncached + snooping.
3211 * So we can still hold onto all our assumptions wrt cpu
3212 * clflushing on LLC machines.
3213 */
4395890a
ZW
3214 __alloc_ppat_entry(ppat, 0, GEN8_PPAT_UC);
3215 return;
3216 }
d6a8b72e 3217
4395890a
ZW
3218 __alloc_ppat_entry(ppat, 0, GEN8_PPAT_WB | GEN8_PPAT_LLC); /* for normal objects, no eLLC */
3219 __alloc_ppat_entry(ppat, 1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC); /* for something pointing to ptes? */
3220 __alloc_ppat_entry(ppat, 2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC); /* for scanout with eLLC */
3221 __alloc_ppat_entry(ppat, 3, GEN8_PPAT_UC); /* Uncached objects, mostly for scanout */
3222 __alloc_ppat_entry(ppat, 4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0));
3223 __alloc_ppat_entry(ppat, 5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1));
3224 __alloc_ppat_entry(ppat, 6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2));
3225 __alloc_ppat_entry(ppat, 7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
fbe5d36e
BW
3226}
3227
4395890a 3228static void chv_setup_private_ppat(struct intel_ppat *ppat)
ee0ce478 3229{
4395890a
ZW
3230 ppat->max_entries = 8;
3231 ppat->update_hw = bdw_private_pat_update_hw;
3232 ppat->match = chv_private_pat_match;
3233 ppat->clear_value = CHV_PPAT_SNOOP;
ee0ce478
VS
3234
3235 /*
3236 * Map WB on BDW to snooped on CHV.
3237 *
3238 * Only the snoop bit has meaning for CHV, the rest is
3239 * ignored.
3240 *
cf3d262e
VS
3241 * The hardware will never snoop for certain types of accesses:
3242 * - CPU GTT (GMADR->GGTT->no snoop->memory)
3243 * - PPGTT page tables
3244 * - some other special cycles
3245 *
3246 * As with BDW, we also need to consider the following for GT accesses:
3247 * "For GGTT, there is NO pat_sel[2:0] from the entry,
3248 * so RTL will always use the value corresponding to
3249 * pat_sel = 000".
3250 * Which means we must set the snoop bit in PAT entry 0
3251 * in order to keep the global status page working.
ee0ce478 3252 */
ee0ce478 3253
4395890a
ZW
3254 __alloc_ppat_entry(ppat, 0, CHV_PPAT_SNOOP);
3255 __alloc_ppat_entry(ppat, 1, 0);
3256 __alloc_ppat_entry(ppat, 2, 0);
3257 __alloc_ppat_entry(ppat, 3, 0);
3258 __alloc_ppat_entry(ppat, 4, CHV_PPAT_SNOOP);
3259 __alloc_ppat_entry(ppat, 5, CHV_PPAT_SNOOP);
3260 __alloc_ppat_entry(ppat, 6, CHV_PPAT_SNOOP);
3261 __alloc_ppat_entry(ppat, 7, CHV_PPAT_SNOOP);
ee0ce478
VS
3262}
3263
34c998b4
CW
3264static void gen6_gmch_remove(struct i915_address_space *vm)
3265{
3266 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
3267
3268 iounmap(ggtt->gsm);
8448661d 3269 cleanup_scratch_page(vm);
34c998b4
CW
3270}
3271
36e16c49
ZW
3272static void setup_private_pat(struct drm_i915_private *dev_priv)
3273{
4395890a
ZW
3274 struct intel_ppat *ppat = &dev_priv->ppat;
3275 int i;
3276
3277 ppat->i915 = dev_priv;
3278
36e16c49 3279 if (INTEL_GEN(dev_priv) >= 10)
4395890a 3280 cnl_setup_private_ppat(ppat);
36e16c49 3281 else if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
4395890a 3282 chv_setup_private_ppat(ppat);
36e16c49 3283 else
4395890a
ZW
3284 bdw_setup_private_ppat(ppat);
3285
3286 GEM_BUG_ON(ppat->max_entries > INTEL_MAX_PPAT_ENTRIES);
3287
3288 for_each_clear_bit(i, ppat->used, ppat->max_entries) {
3289 ppat->entries[i].value = ppat->clear_value;
3290 ppat->entries[i].ppat = ppat;
3291 set_bit(i, ppat->dirty);
3292 }
3293
3294 ppat->update_hw(dev_priv);
36e16c49
ZW
3295}
3296
d507d735 3297static int gen8_gmch_probe(struct i915_ggtt *ggtt)
63340133 3298{
49d73912 3299 struct drm_i915_private *dev_priv = ggtt->base.i915;
97d6d7ab 3300 struct pci_dev *pdev = dev_priv->drm.pdev;
34c998b4 3301 unsigned int size;
63340133 3302 u16 snb_gmch_ctl;
4519290a 3303 int err;
63340133
BW
3304
3305 /* TODO: We're not aware of mappable constraints on gen8 yet */
b06f4c80
MA
3306 ggtt->gmadr =
3307 (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
3308 pci_resource_len(pdev, 2));
3309 ggtt->mappable_end = resource_size(&ggtt->gmadr);
63340133 3310
4519290a
ID
3311 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
3312 if (!err)
3313 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
3314 if (err)
3315 DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
63340133 3316
97d6d7ab 3317 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
63340133 3318
97d6d7ab 3319 if (INTEL_GEN(dev_priv) >= 9) {
d507d735 3320 ggtt->stolen_size = gen9_get_stolen_size(snb_gmch_ctl);
34c998b4 3321 size = gen8_get_total_gtt_size(snb_gmch_ctl);
97d6d7ab 3322 } else if (IS_CHERRYVIEW(dev_priv)) {
d507d735 3323 ggtt->stolen_size = chv_get_stolen_size(snb_gmch_ctl);
34c998b4 3324 size = chv_get_total_gtt_size(snb_gmch_ctl);
d7f25f23 3325 } else {
d507d735 3326 ggtt->stolen_size = gen8_get_stolen_size(snb_gmch_ctl);
34c998b4 3327 size = gen8_get_total_gtt_size(snb_gmch_ctl);
d7f25f23 3328 }
63340133 3329
34c998b4 3330 ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
34c998b4 3331 ggtt->base.cleanup = gen6_gmch_remove;
d507d735
JL
3332 ggtt->base.bind_vma = ggtt_bind_vma;
3333 ggtt->base.unbind_vma = ggtt_unbind_vma;
fa3f46af
MA
3334 ggtt->base.set_pages = ggtt_set_pages;
3335 ggtt->base.clear_pages = clear_pages;
d6473f56 3336 ggtt->base.insert_page = gen8_ggtt_insert_page;
f7770bfd 3337 ggtt->base.clear_range = nop_clear_range;
48f112fe 3338 if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
f7770bfd
CW
3339 ggtt->base.clear_range = gen8_ggtt_clear_range;
3340
3341 ggtt->base.insert_entries = gen8_ggtt_insert_entries;
f7770bfd 3342
0ef34ad6
JB
3343 /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
3344 if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
3345 ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
3346 ggtt->base.insert_page = bxt_vtd_ggtt_insert_page__BKL;
3347 if (ggtt->base.clear_range != nop_clear_range)
3348 ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL;
3349 }
3350
7c3f86b6
CW
3351 ggtt->invalidate = gen6_ggtt_invalidate;
3352
36e16c49
ZW
3353 setup_private_pat(dev_priv);
3354
34c998b4 3355 return ggtt_probe_common(ggtt, size);
63340133
BW
3356}
3357
d507d735 3358static int gen6_gmch_probe(struct i915_ggtt *ggtt)
e76e9aeb 3359{
49d73912 3360 struct drm_i915_private *dev_priv = ggtt->base.i915;
97d6d7ab 3361 struct pci_dev *pdev = dev_priv->drm.pdev;
34c998b4 3362 unsigned int size;
e76e9aeb 3363 u16 snb_gmch_ctl;
4519290a 3364 int err;
e76e9aeb 3365
b06f4c80
MA
3366 ggtt->gmadr =
3367 (struct resource) DEFINE_RES_MEM(pci_resource_start(pdev, 2),
3368 pci_resource_len(pdev, 2));
3369 ggtt->mappable_end = resource_size(&ggtt->gmadr);
41907ddc 3370
baa09f5f
BW
3371 /* 64/512MB is the current min/max we actually know of, but this is just
3372 * a coarse sanity check.
e76e9aeb 3373 */
34c998b4 3374 if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
d507d735 3375 DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt->mappable_end);
baa09f5f 3376 return -ENXIO;
e76e9aeb
BW
3377 }
3378
4519290a
ID
3379 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
3380 if (!err)
3381 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
3382 if (err)
3383 DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
97d6d7ab 3384 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
e76e9aeb 3385
d507d735 3386 ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
e76e9aeb 3387
34c998b4
CW
3388 size = gen6_get_total_gtt_size(snb_gmch_ctl);
3389 ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
e76e9aeb 3390
d507d735 3391 ggtt->base.clear_range = gen6_ggtt_clear_range;
d6473f56 3392 ggtt->base.insert_page = gen6_ggtt_insert_page;
d507d735
JL
3393 ggtt->base.insert_entries = gen6_ggtt_insert_entries;
3394 ggtt->base.bind_vma = ggtt_bind_vma;
3395 ggtt->base.unbind_vma = ggtt_unbind_vma;
fa3f46af
MA
3396 ggtt->base.set_pages = ggtt_set_pages;
3397 ggtt->base.clear_pages = clear_pages;
34c998b4
CW
3398 ggtt->base.cleanup = gen6_gmch_remove;
3399
7c3f86b6
CW
3400 ggtt->invalidate = gen6_ggtt_invalidate;
3401
34c998b4
CW
3402 if (HAS_EDRAM(dev_priv))
3403 ggtt->base.pte_encode = iris_pte_encode;
3404 else if (IS_HASWELL(dev_priv))
3405 ggtt->base.pte_encode = hsw_pte_encode;
3406 else if (IS_VALLEYVIEW(dev_priv))
3407 ggtt->base.pte_encode = byt_pte_encode;
3408 else if (INTEL_GEN(dev_priv) >= 7)
3409 ggtt->base.pte_encode = ivb_pte_encode;
3410 else
3411 ggtt->base.pte_encode = snb_pte_encode;
7faf1ab2 3412
34c998b4 3413 return ggtt_probe_common(ggtt, size);
e76e9aeb
BW
3414}
3415
34c998b4 3416static void i915_gmch_remove(struct i915_address_space *vm)
e76e9aeb 3417{
34c998b4 3418 intel_gmch_remove();
644ec02b 3419}
baa09f5f 3420
d507d735 3421static int i915_gmch_probe(struct i915_ggtt *ggtt)
baa09f5f 3422{
49d73912 3423 struct drm_i915_private *dev_priv = ggtt->base.i915;
b06f4c80 3424 phys_addr_t gmadr_base;
baa09f5f
BW
3425 int ret;
3426
91c8a326 3427 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
baa09f5f
BW
3428 if (!ret) {
3429 DRM_ERROR("failed to set up gmch\n");
3430 return -EIO;
3431 }
3432
edd1f2fe
CW
3433 intel_gtt_get(&ggtt->base.total,
3434 &ggtt->stolen_size,
b06f4c80 3435 &gmadr_base,
edd1f2fe 3436 &ggtt->mappable_end);
baa09f5f 3437
b06f4c80
MA
3438 ggtt->gmadr =
3439 (struct resource) DEFINE_RES_MEM(gmadr_base,
3440 ggtt->mappable_end);
3441
97d6d7ab 3442 ggtt->do_idle_maps = needs_idle_maps(dev_priv);
d6473f56 3443 ggtt->base.insert_page = i915_ggtt_insert_page;
d507d735
JL
3444 ggtt->base.insert_entries = i915_ggtt_insert_entries;
3445 ggtt->base.clear_range = i915_ggtt_clear_range;
3446 ggtt->base.bind_vma = ggtt_bind_vma;
3447 ggtt->base.unbind_vma = ggtt_unbind_vma;
fa3f46af
MA
3448 ggtt->base.set_pages = ggtt_set_pages;
3449 ggtt->base.clear_pages = clear_pages;
34c998b4 3450 ggtt->base.cleanup = i915_gmch_remove;
baa09f5f 3451
7c3f86b6
CW
3452 ggtt->invalidate = gmch_ggtt_invalidate;
3453
d507d735 3454 if (unlikely(ggtt->do_idle_maps))
c0a7f818
CW
3455 DRM_INFO("applying Ironlake quirks for intel_iommu\n");
3456
baa09f5f
BW
3457 return 0;
3458}
3459
d85489d3 3460/**
0088e522 3461 * i915_ggtt_probe_hw - Probe GGTT hardware location
97d6d7ab 3462 * @dev_priv: i915 device
d85489d3 3463 */
97d6d7ab 3464int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
baa09f5f 3465{
62106b4f 3466 struct i915_ggtt *ggtt = &dev_priv->ggtt;
baa09f5f
BW
3467 int ret;
3468
49d73912 3469 ggtt->base.i915 = dev_priv;
8448661d 3470 ggtt->base.dma = &dev_priv->drm.pdev->dev;
c114f76a 3471
34c998b4
CW
3472 if (INTEL_GEN(dev_priv) <= 5)
3473 ret = i915_gmch_probe(ggtt);
3474 else if (INTEL_GEN(dev_priv) < 8)
3475 ret = gen6_gmch_probe(ggtt);
3476 else
3477 ret = gen8_gmch_probe(ggtt);
a54c0c27 3478 if (ret)
baa09f5f 3479 return ret;
baa09f5f 3480
db9309a5
CW
3481 /* Trim the GGTT to fit the GuC mappable upper range (when enabled).
3482 * This is easier than doing range restriction on the fly, as we
3483 * currently don't have any bits spare to pass in this upper
3484 * restriction!
3485 */
4f044a88 3486 if (HAS_GUC(dev_priv) && i915_modparams.enable_guc_loading) {
db9309a5
CW
3487 ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
3488 ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
3489 }
3490
c890e2d5
CW
3491 if ((ggtt->base.total - 1) >> 32) {
3492 DRM_ERROR("We never expected a Global GTT with more than 32bits"
f6b9d5ca 3493 " of address space! Found %lldM!\n",
c890e2d5
CW
3494 ggtt->base.total >> 20);
3495 ggtt->base.total = 1ULL << 32;
3496 ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
3497 }
3498
f6b9d5ca
CW
3499 if (ggtt->mappable_end > ggtt->base.total) {
3500 DRM_ERROR("mappable aperture extends past end of GGTT,"
3501 " aperture=%llx, total=%llx\n",
3502 ggtt->mappable_end, ggtt->base.total);
3503 ggtt->mappable_end = ggtt->base.total;
3504 }
3505
baa09f5f 3506 /* GMADR is the PCI mmio aperture into the global GTT. */
c44ef60e 3507 DRM_INFO("Memory usable by graphics device = %lluM\n",
62106b4f 3508 ggtt->base.total >> 20);
b06f4c80 3509 DRM_DEBUG_DRIVER("GMADR size = %lluM\n", (u64)ggtt->mappable_end >> 20);
edd1f2fe 3510 DRM_DEBUG_DRIVER("GTT stolen size = %uM\n", ggtt->stolen_size >> 20);
80debff8 3511 if (intel_vtd_active())
5db6c735 3512 DRM_INFO("VT-d active for gfx access\n");
baa09f5f
BW
3513
3514 return 0;
0088e522
CW
3515}
3516
3517/**
3518 * i915_ggtt_init_hw - Initialize GGTT hardware
97d6d7ab 3519 * @dev_priv: i915 device
0088e522 3520 */
97d6d7ab 3521int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
0088e522 3522{
0088e522
CW
3523 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3524 int ret;
3525
f6b9d5ca
CW
3526 INIT_LIST_HEAD(&dev_priv->vm_list);
3527
a6508ded
CW
3528 /* Note that we use page colouring to enforce a guard page at the
3529 * end of the address space. This is required as the CS may prefetch
3530 * beyond the end of the batch buffer, across the page boundary,
3531 * and beyond the end of the GTT if we do not provide a guard.
f6b9d5ca 3532 */
80b204bc 3533 mutex_lock(&dev_priv->drm.struct_mutex);
80b204bc 3534 i915_address_space_init(&ggtt->base, dev_priv, "[global]");
a6508ded 3535 if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
f6b9d5ca 3536 ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
80b204bc 3537 mutex_unlock(&dev_priv->drm.struct_mutex);
f6b9d5ca 3538
b06f4c80
MA
3539 if (!io_mapping_init_wc(&dev_priv->ggtt.iomap,
3540 dev_priv->ggtt.gmadr.start,
f7bbe788 3541 dev_priv->ggtt.mappable_end)) {
f6b9d5ca
CW
3542 ret = -EIO;
3543 goto out_gtt_cleanup;
3544 }
3545
b06f4c80 3546 ggtt->mtrr = arch_phys_wc_add(ggtt->gmadr.start, ggtt->mappable_end);
f6b9d5ca 3547
0088e522
CW
3548 /*
3549 * Initialise stolen early so that we may reserve preallocated
3550 * objects for the BIOS to KMS transition.
3551 */
7ace3d30 3552 ret = i915_gem_init_stolen(dev_priv);
0088e522
CW
3553 if (ret)
3554 goto out_gtt_cleanup;
3555
3556 return 0;
a4eba47b
ID
3557
3558out_gtt_cleanup:
72e96d64 3559 ggtt->base.cleanup(&ggtt->base);
a4eba47b 3560 return ret;
baa09f5f 3561}
6f65e29a 3562
97d6d7ab 3563int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
ac840ae5 3564{
97d6d7ab 3565 if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt())
ac840ae5
VS
3566 return -EIO;
3567
3568 return 0;
3569}
3570
7c3f86b6
CW
3571void i915_ggtt_enable_guc(struct drm_i915_private *i915)
3572{
04f7b24e
CW
3573 GEM_BUG_ON(i915->ggtt.invalidate != gen6_ggtt_invalidate);
3574
7c3f86b6
CW
3575 i915->ggtt.invalidate = guc_ggtt_invalidate;
3576}
3577
3578void i915_ggtt_disable_guc(struct drm_i915_private *i915)
3579{
04f7b24e
CW
3580 /* We should only be called after i915_ggtt_enable_guc() */
3581 GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate);
3582
3583 i915->ggtt.invalidate = gen6_ggtt_invalidate;
7c3f86b6
CW
3584}
3585
275a991c 3586void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
fa42331b 3587{
72e96d64 3588 struct i915_ggtt *ggtt = &dev_priv->ggtt;
fbb30a5c 3589 struct drm_i915_gem_object *obj, *on;
fa42331b 3590
dc97997a 3591 i915_check_and_clear_faults(dev_priv);
fa42331b
DV
3592
3593 /* First fill our portion of the GTT with scratch pages */
381b943b 3594 ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
fa42331b 3595
fbb30a5c
CW
3596 ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
3597
3598 /* clflush objects bound into the GGTT and rebind them. */
f2123818 3599 list_for_each_entry_safe(obj, on, &dev_priv->mm.bound_list, mm.link) {
fbb30a5c
CW
3600 bool ggtt_bound = false;
3601 struct i915_vma *vma;
3602
1c7f4bca 3603 list_for_each_entry(vma, &obj->vma_list, obj_link) {
72e96d64 3604 if (vma->vm != &ggtt->base)
2c3d9984 3605 continue;
fa42331b 3606
fbb30a5c
CW
3607 if (!i915_vma_unbind(vma))
3608 continue;
3609
2c3d9984
TU
3610 WARN_ON(i915_vma_bind(vma, obj->cache_level,
3611 PIN_UPDATE));
fbb30a5c 3612 ggtt_bound = true;
2c3d9984
TU
3613 }
3614
fbb30a5c 3615 if (ggtt_bound)
975f7ff4 3616 WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
2c3d9984 3617 }
fa42331b 3618
fbb30a5c
CW
3619 ggtt->base.closed = false;
3620
275a991c 3621 if (INTEL_GEN(dev_priv) >= 8) {
4395890a 3622 struct intel_ppat *ppat = &dev_priv->ppat;
fa42331b 3623
4395890a
ZW
3624 bitmap_set(ppat->dirty, 0, ppat->max_entries);
3625 dev_priv->ppat.update_hw(dev_priv);
fa42331b
DV
3626 return;
3627 }
3628
275a991c 3629 if (USES_PPGTT(dev_priv)) {
72e96d64
JL
3630 struct i915_address_space *vm;
3631
fa42331b 3632 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
e5716f55 3633 struct i915_hw_ppgtt *ppgtt;
fa42331b 3634
2bfa996e 3635 if (i915_is_ggtt(vm))
fa42331b 3636 ppgtt = dev_priv->mm.aliasing_ppgtt;
e5716f55
JL
3637 else
3638 ppgtt = i915_vm_to_ppgtt(vm);
fa42331b 3639
16a011c8 3640 gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
fa42331b
DV
3641 }
3642 }
3643
7c3f86b6 3644 i915_ggtt_invalidate(dev_priv);
fa42331b
DV
3645}
3646
804beb4b 3647static struct scatterlist *
2d7f3bdb 3648rotate_pages(const dma_addr_t *in, unsigned int offset,
804beb4b 3649 unsigned int width, unsigned int height,
87130255 3650 unsigned int stride,
804beb4b 3651 struct sg_table *st, struct scatterlist *sg)
50470bb0
TU
3652{
3653 unsigned int column, row;
3654 unsigned int src_idx;
50470bb0 3655
50470bb0 3656 for (column = 0; column < width; column++) {
87130255 3657 src_idx = stride * (height - 1) + column;
50470bb0
TU
3658 for (row = 0; row < height; row++) {
3659 st->nents++;
3660 /* We don't need the pages, but need to initialize
3661 * the entries so the sg list can be happily traversed.
3662 * The only thing we need are DMA addresses.
3663 */
3664 sg_set_page(sg, NULL, PAGE_SIZE, 0);
804beb4b 3665 sg_dma_address(sg) = in[offset + src_idx];
50470bb0
TU
3666 sg_dma_len(sg) = PAGE_SIZE;
3667 sg = sg_next(sg);
87130255 3668 src_idx -= stride;
50470bb0
TU
3669 }
3670 }
804beb4b
TU
3671
3672 return sg;
50470bb0
TU
3673}
3674
ba7a5741
CW
3675static noinline struct sg_table *
3676intel_rotate_pages(struct intel_rotation_info *rot_info,
3677 struct drm_i915_gem_object *obj)
50470bb0 3678{
75c7b0b8 3679 const unsigned long n_pages = obj->base.size / PAGE_SIZE;
6687c906 3680 unsigned int size = intel_rotation_info_size(rot_info);
85d1225e
DG
3681 struct sgt_iter sgt_iter;
3682 dma_addr_t dma_addr;
50470bb0
TU
3683 unsigned long i;
3684 dma_addr_t *page_addr_list;
3685 struct sg_table *st;
89e3e142 3686 struct scatterlist *sg;
1d00dad5 3687 int ret = -ENOMEM;
50470bb0 3688
50470bb0 3689 /* Allocate a temporary list of source pages for random access. */
2098105e 3690 page_addr_list = kvmalloc_array(n_pages,
f2a85e19 3691 sizeof(dma_addr_t),
0ee931c4 3692 GFP_KERNEL);
50470bb0
TU
3693 if (!page_addr_list)
3694 return ERR_PTR(ret);
3695
3696 /* Allocate target SG list. */
3697 st = kmalloc(sizeof(*st), GFP_KERNEL);
3698 if (!st)
3699 goto err_st_alloc;
3700
6687c906 3701 ret = sg_alloc_table(st, size, GFP_KERNEL);
50470bb0
TU
3702 if (ret)
3703 goto err_sg_alloc;
3704
3705 /* Populate source page list from the object. */
3706 i = 0;
a4f5ea64 3707 for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
85d1225e 3708 page_addr_list[i++] = dma_addr;
50470bb0 3709
85d1225e 3710 GEM_BUG_ON(i != n_pages);
11f20322
VS
3711 st->nents = 0;
3712 sg = st->sgl;
3713
6687c906
VS
3714 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
3715 sg = rotate_pages(page_addr_list, rot_info->plane[i].offset,
3716 rot_info->plane[i].width, rot_info->plane[i].height,
3717 rot_info->plane[i].stride, st, sg);
89e3e142
TU
3718 }
3719
6687c906
VS
3720 DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n",
3721 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
50470bb0 3722
2098105e 3723 kvfree(page_addr_list);
50470bb0
TU
3724
3725 return st;
3726
3727err_sg_alloc:
3728 kfree(st);
3729err_st_alloc:
2098105e 3730 kvfree(page_addr_list);
50470bb0 3731
6687c906
VS
3732 DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
3733 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
3734
50470bb0
TU
3735 return ERR_PTR(ret);
3736}
ec7adb6e 3737
ba7a5741 3738static noinline struct sg_table *
8bd7ef16
JL
3739intel_partial_pages(const struct i915_ggtt_view *view,
3740 struct drm_i915_gem_object *obj)
3741{
3742 struct sg_table *st;
d2a84a76 3743 struct scatterlist *sg, *iter;
8bab1193 3744 unsigned int count = view->partial.size;
d2a84a76 3745 unsigned int offset;
8bd7ef16
JL
3746 int ret = -ENOMEM;
3747
3748 st = kmalloc(sizeof(*st), GFP_KERNEL);
3749 if (!st)
3750 goto err_st_alloc;
3751
d2a84a76 3752 ret = sg_alloc_table(st, count, GFP_KERNEL);
8bd7ef16
JL
3753 if (ret)
3754 goto err_sg_alloc;
3755
8bab1193 3756 iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
d2a84a76
CW
3757 GEM_BUG_ON(!iter);
3758
8bd7ef16
JL
3759 sg = st->sgl;
3760 st->nents = 0;
d2a84a76
CW
3761 do {
3762 unsigned int len;
8bd7ef16 3763
d2a84a76
CW
3764 len = min(iter->length - (offset << PAGE_SHIFT),
3765 count << PAGE_SHIFT);
3766 sg_set_page(sg, NULL, len, 0);
3767 sg_dma_address(sg) =
3768 sg_dma_address(iter) + (offset << PAGE_SHIFT);
3769 sg_dma_len(sg) = len;
8bd7ef16 3770
8bd7ef16 3771 st->nents++;
d2a84a76
CW
3772 count -= len >> PAGE_SHIFT;
3773 if (count == 0) {
3774 sg_mark_end(sg);
3775 return st;
3776 }
8bd7ef16 3777
d2a84a76
CW
3778 sg = __sg_next(sg);
3779 iter = __sg_next(iter);
3780 offset = 0;
3781 } while (1);
8bd7ef16
JL
3782
3783err_sg_alloc:
3784 kfree(st);
3785err_st_alloc:
3786 return ERR_PTR(ret);
3787}
3788
70b9f6f8 3789static int
50470bb0 3790i915_get_ggtt_vma_pages(struct i915_vma *vma)
fe14d5f4 3791{
ba7a5741 3792 int ret;
50470bb0 3793
2c3a3f44
CW
3794 /* The vma->pages are only valid within the lifespan of the borrowed
3795 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
3796 * must be the vma->pages. A simple rule is that vma->pages must only
3797 * be accessed when the obj->mm.pages are pinned.
3798 */
3799 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
3800
ba7a5741
CW
3801 switch (vma->ggtt_view.type) {
3802 case I915_GGTT_VIEW_NORMAL:
3803 vma->pages = vma->obj->mm.pages;
fe14d5f4
TU
3804 return 0;
3805
ba7a5741 3806 case I915_GGTT_VIEW_ROTATED:
247177dd 3807 vma->pages =
ba7a5741
CW
3808 intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
3809 break;
3810
3811 case I915_GGTT_VIEW_PARTIAL:
247177dd 3812 vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
ba7a5741
CW
3813 break;
3814
3815 default:
fe14d5f4
TU
3816 WARN_ONCE(1, "GGTT view %u not implemented!\n",
3817 vma->ggtt_view.type);
ba7a5741
CW
3818 return -EINVAL;
3819 }
fe14d5f4 3820
ba7a5741
CW
3821 ret = 0;
3822 if (unlikely(IS_ERR(vma->pages))) {
247177dd
CW
3823 ret = PTR_ERR(vma->pages);
3824 vma->pages = NULL;
50470bb0
TU
3825 DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
3826 vma->ggtt_view.type, ret);
fe14d5f4 3827 }
50470bb0 3828 return ret;
fe14d5f4
TU
3829}
3830
625d988a
CW
3831/**
3832 * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
a4dbf7cf
CW
3833 * @vm: the &struct i915_address_space
3834 * @node: the &struct drm_mm_node (typically i915_vma.mode)
3835 * @size: how much space to allocate inside the GTT,
3836 * must be #I915_GTT_PAGE_SIZE aligned
3837 * @offset: where to insert inside the GTT,
3838 * must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
3839 * (@offset + @size) must fit within the address space
3840 * @color: color to apply to node, if this node is not from a VMA,
3841 * color must be #I915_COLOR_UNEVICTABLE
3842 * @flags: control search and eviction behaviour
625d988a
CW
3843 *
3844 * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
3845 * the address space (using @size and @color). If the @node does not fit, it
3846 * tries to evict any overlapping nodes from the GTT, including any
3847 * neighbouring nodes if the colors do not match (to ensure guard pages between
3848 * differing domains). See i915_gem_evict_for_node() for the gory details
3849 * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
3850 * evicting active overlapping objects, and any overlapping node that is pinned
3851 * or marked as unevictable will also result in failure.
3852 *
3853 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3854 * asked to wait for eviction and interrupted.
3855 */
3856int i915_gem_gtt_reserve(struct i915_address_space *vm,
3857 struct drm_mm_node *node,
3858 u64 size, u64 offset, unsigned long color,
3859 unsigned int flags)
3860{
3861 int err;
3862
3863 GEM_BUG_ON(!size);
3864 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3865 GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
3866 GEM_BUG_ON(range_overflows(offset, size, vm->total));
3fec7ec4 3867 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
9734ad13 3868 GEM_BUG_ON(drm_mm_node_allocated(node));
625d988a
CW
3869
3870 node->size = size;
3871 node->start = offset;
3872 node->color = color;
3873
3874 err = drm_mm_reserve_node(&vm->mm, node);
3875 if (err != -ENOSPC)
3876 return err;
3877
616d9cee
CW
3878 if (flags & PIN_NOEVICT)
3879 return -ENOSPC;
3880
625d988a
CW
3881 err = i915_gem_evict_for_node(vm, node, flags);
3882 if (err == 0)
3883 err = drm_mm_reserve_node(&vm->mm, node);
3884
3885 return err;
3886}
3887
606fec95
CW
3888static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
3889{
3890 u64 range, addr;
3891
3892 GEM_BUG_ON(range_overflows(start, len, end));
3893 GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
3894
3895 range = round_down(end - len, align) - round_up(start, align);
3896 if (range) {
3897 if (sizeof(unsigned long) == sizeof(u64)) {
3898 addr = get_random_long();
3899 } else {
3900 addr = get_random_int();
3901 if (range > U32_MAX) {
3902 addr <<= 32;
3903 addr |= get_random_int();
3904 }
3905 }
3906 div64_u64_rem(addr, range, &addr);
3907 start += addr;
3908 }
3909
3910 return round_up(start, align);
3911}
3912
e007b19d
CW
3913/**
3914 * i915_gem_gtt_insert - insert a node into an address_space (GTT)
a4dbf7cf
CW
3915 * @vm: the &struct i915_address_space
3916 * @node: the &struct drm_mm_node (typically i915_vma.node)
3917 * @size: how much space to allocate inside the GTT,
3918 * must be #I915_GTT_PAGE_SIZE aligned
3919 * @alignment: required alignment of starting offset, may be 0 but
3920 * if specified, this must be a power-of-two and at least
3921 * #I915_GTT_MIN_ALIGNMENT
3922 * @color: color to apply to node
3923 * @start: start of any range restriction inside GTT (0 for all),
e007b19d 3924 * must be #I915_GTT_PAGE_SIZE aligned
a4dbf7cf
CW
3925 * @end: end of any range restriction inside GTT (U64_MAX for all),
3926 * must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
3927 * @flags: control search and eviction behaviour
e007b19d
CW
3928 *
3929 * i915_gem_gtt_insert() first searches for an available hole into which
3930 * is can insert the node. The hole address is aligned to @alignment and
3931 * its @size must then fit entirely within the [@start, @end] bounds. The
3932 * nodes on either side of the hole must match @color, or else a guard page
3933 * will be inserted between the two nodes (or the node evicted). If no
606fec95
CW
3934 * suitable hole is found, first a victim is randomly selected and tested
3935 * for eviction, otherwise then the LRU list of objects within the GTT
e007b19d
CW
3936 * is scanned to find the first set of replacement nodes to create the hole.
3937 * Those old overlapping nodes are evicted from the GTT (and so must be
3938 * rebound before any future use). Any node that is currently pinned cannot
3939 * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
3940 * active and #PIN_NONBLOCK is specified, that node is also skipped when
3941 * searching for an eviction candidate. See i915_gem_evict_something() for
3942 * the gory details on the eviction algorithm.
3943 *
3944 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3945 * asked to wait for eviction and interrupted.
3946 */
3947int i915_gem_gtt_insert(struct i915_address_space *vm,
3948 struct drm_mm_node *node,
3949 u64 size, u64 alignment, unsigned long color,
3950 u64 start, u64 end, unsigned int flags)
3951{
4e64e553 3952 enum drm_mm_insert_mode mode;
606fec95 3953 u64 offset;
e007b19d
CW
3954 int err;
3955
3956 lockdep_assert_held(&vm->i915->drm.struct_mutex);
3957 GEM_BUG_ON(!size);
3958 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3959 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
3960 GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
3961 GEM_BUG_ON(start >= end);
3962 GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
3963 GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
3fec7ec4 3964 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
9734ad13 3965 GEM_BUG_ON(drm_mm_node_allocated(node));
e007b19d
CW
3966
3967 if (unlikely(range_overflows(start, size, end)))
3968 return -ENOSPC;
3969
3970 if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
3971 return -ENOSPC;
3972
4e64e553
CW
3973 mode = DRM_MM_INSERT_BEST;
3974 if (flags & PIN_HIGH)
3975 mode = DRM_MM_INSERT_HIGH;
3976 if (flags & PIN_MAPPABLE)
3977 mode = DRM_MM_INSERT_LOW;
e007b19d
CW
3978
3979 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
3980 * so we know that we always have a minimum alignment of 4096.
3981 * The drm_mm range manager is optimised to return results
3982 * with zero alignment, so where possible use the optimal
3983 * path.
3984 */
3985 BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
3986 if (alignment <= I915_GTT_MIN_ALIGNMENT)
3987 alignment = 0;
3988
4e64e553
CW
3989 err = drm_mm_insert_node_in_range(&vm->mm, node,
3990 size, alignment, color,
3991 start, end, mode);
e007b19d
CW
3992 if (err != -ENOSPC)
3993 return err;
3994
616d9cee
CW
3995 if (flags & PIN_NOEVICT)
3996 return -ENOSPC;
3997
606fec95
CW
3998 /* No free space, pick a slot at random.
3999 *
4000 * There is a pathological case here using a GTT shared between
4001 * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
4002 *
4003 * |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
4004 * (64k objects) (448k objects)
4005 *
4006 * Now imagine that the eviction LRU is ordered top-down (just because
4007 * pathology meets real life), and that we need to evict an object to
4008 * make room inside the aperture. The eviction scan then has to walk
4009 * the 448k list before it finds one within range. And now imagine that
4010 * it has to search for a new hole between every byte inside the memcpy,
4011 * for several simultaneous clients.
4012 *
4013 * On a full-ppgtt system, if we have run out of available space, there
4014 * will be lots and lots of objects in the eviction list! Again,
4015 * searching that LRU list may be slow if we are also applying any
4016 * range restrictions (e.g. restriction to low 4GiB) and so, for
4017 * simplicity and similarilty between different GTT, try the single
4018 * random replacement first.
4019 */
4020 offset = random_offset(start, end,
4021 size, alignment ?: I915_GTT_MIN_ALIGNMENT);
4022 err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
4023 if (err != -ENOSPC)
4024 return err;
4025
4026 /* Randomly selected placement is pinned, do a search */
e007b19d
CW
4027 err = i915_gem_evict_something(vm, size, alignment, color,
4028 start, end, flags);
4029 if (err)
4030 return err;
4031
4e64e553
CW
4032 return drm_mm_insert_node_in_range(&vm->mm, node,
4033 size, alignment, color,
4034 start, end, DRM_MM_INSERT_EVICT);
e007b19d 4035}
3b5bb0a3
CW
4036
4037#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
4038#include "selftests/mock_gtt.c"
1c42819a 4039#include "selftests/i915_gem_gtt.c"
3b5bb0a3 4040#endif