]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/i915/i915_gem_gtt.c
Merge tag 'drm-intel-next-2017-07-17' of git://anongit.freedesktop.org/git/drm-intel...
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / i915_gem_gtt.c
CommitLineData
76aaf220
DV
1/*
2 * Copyright © 2010 Daniel Vetter
c4ac524c 3 * Copyright © 2011-2014 Intel Corporation
76aaf220
DV
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 *
24 */
25
aae4a3d8
CW
26#include <linux/slab.h> /* fault-inject.h is not standalone! */
27
28#include <linux/fault-inject.h>
e007b19d 29#include <linux/log2.h>
606fec95 30#include <linux/random.h>
0e46ce2e 31#include <linux/seq_file.h>
5bab6f60 32#include <linux/stop_machine.h>
e007b19d 33
ed3ba079
LA
34#include <asm/set_memory.h>
35
760285e7
DH
36#include <drm/drmP.h>
37#include <drm/i915_drm.h>
e007b19d 38
76aaf220 39#include "i915_drv.h"
5dda8fa3 40#include "i915_vgpu.h"
76aaf220
DV
41#include "i915_trace.h"
42#include "intel_drv.h"
d07f0e59 43#include "intel_frontbuffer.h"
76aaf220 44
bb8f9cff
CW
45#define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
46
45f8f69a
TU
47/**
48 * DOC: Global GTT views
49 *
50 * Background and previous state
51 *
52 * Historically objects could exists (be bound) in global GTT space only as
53 * singular instances with a view representing all of the object's backing pages
54 * in a linear fashion. This view will be called a normal view.
55 *
56 * To support multiple views of the same object, where the number of mapped
57 * pages is not equal to the backing store, or where the layout of the pages
58 * is not linear, concept of a GGTT view was added.
59 *
60 * One example of an alternative view is a stereo display driven by a single
61 * image. In this case we would have a framebuffer looking like this
62 * (2x2 pages):
63 *
64 * 12
65 * 34
66 *
67 * Above would represent a normal GGTT view as normally mapped for GPU or CPU
68 * rendering. In contrast, fed to the display engine would be an alternative
69 * view which could look something like this:
70 *
71 * 1212
72 * 3434
73 *
74 * In this example both the size and layout of pages in the alternative view is
75 * different from the normal view.
76 *
77 * Implementation and usage
78 *
79 * GGTT views are implemented using VMAs and are distinguished via enum
80 * i915_ggtt_view_type and struct i915_ggtt_view.
81 *
82 * A new flavour of core GEM functions which work with GGTT bound objects were
ec7adb6e
JL
83 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
84 * renaming in large amounts of code. They take the struct i915_ggtt_view
85 * parameter encapsulating all metadata required to implement a view.
45f8f69a
TU
86 *
87 * As a helper for callers which are only interested in the normal view,
88 * globally const i915_ggtt_view_normal singleton instance exists. All old core
89 * GEM API functions, the ones not taking the view parameter, are operating on,
90 * or with the normal GGTT view.
91 *
92 * Code wanting to add or use a new GGTT view needs to:
93 *
94 * 1. Add a new enum with a suitable name.
95 * 2. Extend the metadata in the i915_ggtt_view structure if required.
96 * 3. Add support to i915_get_vma_pages().
97 *
98 * New views are required to build a scatter-gather table from within the
99 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
100 * exists for the lifetime of an VMA.
101 *
102 * Core API is designed to have copy semantics which means that passed in
103 * struct i915_ggtt_view does not need to be persistent (left around after
104 * calling the core API functions).
105 *
106 */
107
70b9f6f8
DV
108static int
109i915_get_ggtt_vma_pages(struct i915_vma *vma);
110
7c3f86b6
CW
111static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv)
112{
113 /* Note that as an uncached mmio write, this should flush the
114 * WCB of the writes into the GGTT before it triggers the invalidate.
115 */
116 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
117}
118
119static void guc_ggtt_invalidate(struct drm_i915_private *dev_priv)
120{
121 gen6_ggtt_invalidate(dev_priv);
122 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
123}
124
125static void gmch_ggtt_invalidate(struct drm_i915_private *dev_priv)
126{
127 intel_gtt_chipset_flush();
128}
129
130static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
131{
132 i915->ggtt.invalidate(i915);
133}
134
c033666a
CW
135int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
136 int enable_ppgtt)
cfa7c862 137{
1893a71b
CW
138 bool has_aliasing_ppgtt;
139 bool has_full_ppgtt;
1f9a99e0 140 bool has_full_48bit_ppgtt;
1893a71b 141
9e1d0e60
MT
142 has_aliasing_ppgtt = dev_priv->info.has_aliasing_ppgtt;
143 has_full_ppgtt = dev_priv->info.has_full_ppgtt;
144 has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
1893a71b 145
e320d400
ZW
146 if (intel_vgpu_active(dev_priv)) {
147 /* emulation is too hard */
148 has_full_ppgtt = false;
149 has_full_48bit_ppgtt = false;
150 }
71ba2d64 151
0e4ca100
CW
152 if (!has_aliasing_ppgtt)
153 return 0;
154
70ee45e1
DL
155 /*
156 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
157 * execlists, the sole mechanism available to submit work.
158 */
c033666a 159 if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
cfa7c862
DV
160 return 0;
161
162 if (enable_ppgtt == 1)
163 return 1;
164
1893a71b 165 if (enable_ppgtt == 2 && has_full_ppgtt)
cfa7c862
DV
166 return 2;
167
1f9a99e0
MT
168 if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
169 return 3;
170
93a25a9e 171 /* Disable ppgtt on SNB if VT-d is on. */
80debff8 172 if (IS_GEN6(dev_priv) && intel_vtd_active()) {
93a25a9e 173 DRM_INFO("Disabling PPGTT because VT-d is on\n");
cfa7c862 174 return 0;
93a25a9e 175 }
93a25a9e 176
62942ed7 177 /* Early VLV doesn't have this */
91c8a326 178 if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) {
62942ed7
JB
179 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
180 return 0;
181 }
182
e320d400 183 if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists && has_full_ppgtt)
1f9a99e0 184 return has_full_48bit_ppgtt ? 3 : 2;
2f82bbdf
MT
185 else
186 return has_aliasing_ppgtt ? 1 : 0;
93a25a9e
DV
187}
188
70b9f6f8
DV
189static int ppgtt_bind_vma(struct i915_vma *vma,
190 enum i915_cache_level cache_level,
191 u32 unused)
47552659 192{
ff685975
CW
193 u32 pte_flags;
194 int ret;
195
1f23475c
MA
196 if (!(vma->flags & I915_VMA_LOCAL_BIND)) {
197 ret = vma->vm->allocate_va_range(vma->vm, vma->node.start,
198 vma->size);
199 if (ret)
200 return ret;
201 }
47552659 202
a4f5ea64 203 vma->pages = vma->obj->mm.pages;
247177dd 204
47552659 205 /* Currently applicable only to VLV */
ff685975 206 pte_flags = 0;
47552659
DV
207 if (vma->obj->gt_ro)
208 pte_flags |= PTE_READ_ONLY;
209
4a234c5f 210 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
70b9f6f8
DV
211
212 return 0;
47552659
DV
213}
214
215static void ppgtt_unbind_vma(struct i915_vma *vma)
216{
ff685975 217 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
47552659 218}
6f65e29a 219
2c642b07 220static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
4fb84d99 221 enum i915_cache_level level)
94ec8f61 222{
4fb84d99 223 gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW;
94ec8f61 224 pte |= addr;
63c42e56
BW
225
226 switch (level) {
227 case I915_CACHE_NONE:
fbe5d36e 228 pte |= PPAT_UNCACHED_INDEX;
63c42e56
BW
229 break;
230 case I915_CACHE_WT:
231 pte |= PPAT_DISPLAY_ELLC_INDEX;
232 break;
233 default:
234 pte |= PPAT_CACHED_INDEX;
235 break;
236 }
237
94ec8f61
BW
238 return pte;
239}
240
fe36f55d
MK
241static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
242 const enum i915_cache_level level)
b1fe6673 243{
07749ef3 244 gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
b1fe6673
BW
245 pde |= addr;
246 if (level != I915_CACHE_NONE)
247 pde |= PPAT_CACHED_PDE_INDEX;
248 else
249 pde |= PPAT_UNCACHED_INDEX;
250 return pde;
251}
252
762d9936
MT
253#define gen8_pdpe_encode gen8_pde_encode
254#define gen8_pml4e_encode gen8_pde_encode
255
07749ef3
MT
256static gen6_pte_t snb_pte_encode(dma_addr_t addr,
257 enum i915_cache_level level,
4fb84d99 258 u32 unused)
54d12527 259{
4fb84d99 260 gen6_pte_t pte = GEN6_PTE_VALID;
54d12527 261 pte |= GEN6_PTE_ADDR_ENCODE(addr);
e7210c3c
BW
262
263 switch (level) {
350ec881
CW
264 case I915_CACHE_L3_LLC:
265 case I915_CACHE_LLC:
266 pte |= GEN6_PTE_CACHE_LLC;
267 break;
268 case I915_CACHE_NONE:
269 pte |= GEN6_PTE_UNCACHED;
270 break;
271 default:
5f77eeb0 272 MISSING_CASE(level);
350ec881
CW
273 }
274
275 return pte;
276}
277
07749ef3
MT
278static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
279 enum i915_cache_level level,
4fb84d99 280 u32 unused)
350ec881 281{
4fb84d99 282 gen6_pte_t pte = GEN6_PTE_VALID;
350ec881
CW
283 pte |= GEN6_PTE_ADDR_ENCODE(addr);
284
285 switch (level) {
286 case I915_CACHE_L3_LLC:
287 pte |= GEN7_PTE_CACHE_L3_LLC;
e7210c3c
BW
288 break;
289 case I915_CACHE_LLC:
290 pte |= GEN6_PTE_CACHE_LLC;
291 break;
292 case I915_CACHE_NONE:
9119708c 293 pte |= GEN6_PTE_UNCACHED;
e7210c3c
BW
294 break;
295 default:
5f77eeb0 296 MISSING_CASE(level);
e7210c3c
BW
297 }
298
54d12527
BW
299 return pte;
300}
301
07749ef3
MT
302static gen6_pte_t byt_pte_encode(dma_addr_t addr,
303 enum i915_cache_level level,
4fb84d99 304 u32 flags)
93c34e70 305{
4fb84d99 306 gen6_pte_t pte = GEN6_PTE_VALID;
93c34e70
KG
307 pte |= GEN6_PTE_ADDR_ENCODE(addr);
308
24f3a8cf
AG
309 if (!(flags & PTE_READ_ONLY))
310 pte |= BYT_PTE_WRITEABLE;
93c34e70
KG
311
312 if (level != I915_CACHE_NONE)
313 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
314
315 return pte;
316}
317
07749ef3
MT
318static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
319 enum i915_cache_level level,
4fb84d99 320 u32 unused)
9119708c 321{
4fb84d99 322 gen6_pte_t pte = GEN6_PTE_VALID;
0d8ff15e 323 pte |= HSW_PTE_ADDR_ENCODE(addr);
9119708c
KG
324
325 if (level != I915_CACHE_NONE)
87a6b688 326 pte |= HSW_WB_LLC_AGE3;
9119708c
KG
327
328 return pte;
329}
330
07749ef3
MT
331static gen6_pte_t iris_pte_encode(dma_addr_t addr,
332 enum i915_cache_level level,
4fb84d99 333 u32 unused)
4d15c145 334{
4fb84d99 335 gen6_pte_t pte = GEN6_PTE_VALID;
4d15c145
BW
336 pte |= HSW_PTE_ADDR_ENCODE(addr);
337
651d794f
CW
338 switch (level) {
339 case I915_CACHE_NONE:
340 break;
341 case I915_CACHE_WT:
c51e9701 342 pte |= HSW_WT_ELLC_LLC_AGE3;
651d794f
CW
343 break;
344 default:
c51e9701 345 pte |= HSW_WB_ELLC_LLC_AGE3;
651d794f
CW
346 break;
347 }
4d15c145
BW
348
349 return pte;
350}
351
8448661d 352static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
678d96fb 353{
8448661d 354 struct page *page;
678d96fb 355
8448661d
CW
356 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
357 i915_gem_shrink_all(vm->i915);
aae4a3d8 358
8448661d
CW
359 if (vm->free_pages.nr)
360 return vm->free_pages.pages[--vm->free_pages.nr];
361
362 page = alloc_page(gfp);
363 if (!page)
364 return NULL;
365
366 if (vm->pt_kmap_wc)
367 set_pages_array_wc(&page, 1);
368
369 return page;
370}
371
372static void vm_free_pages_release(struct i915_address_space *vm)
373{
374 GEM_BUG_ON(!pagevec_count(&vm->free_pages));
375
376 if (vm->pt_kmap_wc)
377 set_pages_array_wb(vm->free_pages.pages,
378 pagevec_count(&vm->free_pages));
379
380 __pagevec_release(&vm->free_pages);
381}
382
383static void vm_free_page(struct i915_address_space *vm, struct page *page)
384{
385 if (!pagevec_add(&vm->free_pages, page))
386 vm_free_pages_release(vm);
387}
678d96fb 388
8448661d
CW
389static int __setup_page_dma(struct i915_address_space *vm,
390 struct i915_page_dma *p,
391 gfp_t gfp)
392{
393 p->page = vm_alloc_page(vm, gfp | __GFP_NOWARN | __GFP_NORETRY);
394 if (unlikely(!p->page))
395 return -ENOMEM;
678d96fb 396
8448661d
CW
397 p->daddr = dma_map_page(vm->dma, p->page, 0, PAGE_SIZE,
398 PCI_DMA_BIDIRECTIONAL);
399 if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
400 vm_free_page(vm, p->page);
401 return -ENOMEM;
44159ddb 402 }
1266cdb1
MT
403
404 return 0;
678d96fb
BW
405}
406
8448661d 407static int setup_page_dma(struct i915_address_space *vm,
275a991c 408 struct i915_page_dma *p)
c114f76a 409{
8448661d 410 return __setup_page_dma(vm, p, I915_GFP_DMA);
c114f76a
MK
411}
412
8448661d 413static void cleanup_page_dma(struct i915_address_space *vm,
275a991c 414 struct i915_page_dma *p)
06fda602 415{
8448661d
CW
416 dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
417 vm_free_page(vm, p->page);
44159ddb
MK
418}
419
9231da70 420#define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
d1c54acd 421
8448661d
CW
422#define setup_px(vm, px) setup_page_dma((vm), px_base(px))
423#define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px))
424#define fill_px(ppgtt, px, v) fill_page_dma((vm), px_base(px), (v))
425#define fill32_px(ppgtt, px, v) fill_page_dma_32((vm), px_base(px), (v))
567047be 426
8448661d
CW
427static void fill_page_dma(struct i915_address_space *vm,
428 struct i915_page_dma *p,
429 const u64 val)
d1c54acd 430{
9231da70 431 u64 * const vaddr = kmap_atomic(p->page);
d1c54acd 432 int i;
d1c54acd
MK
433
434 for (i = 0; i < 512; i++)
435 vaddr[i] = val;
436
9231da70 437 kunmap_atomic(vaddr);
d1c54acd
MK
438}
439
8448661d
CW
440static void fill_page_dma_32(struct i915_address_space *vm,
441 struct i915_page_dma *p,
442 const u32 v)
73eeea53 443{
8448661d 444 fill_page_dma(vm, p, (u64)v << 32 | v);
73eeea53
MK
445}
446
8bcdd0f7 447static int
8448661d 448setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
4ad2af1e 449{
8448661d 450 return __setup_page_dma(vm, &vm->scratch_page, gfp | __GFP_ZERO);
4ad2af1e
MK
451}
452
8448661d 453static void cleanup_scratch_page(struct i915_address_space *vm)
4ad2af1e 454{
8448661d 455 cleanup_page_dma(vm, &vm->scratch_page);
4ad2af1e
MK
456}
457
8448661d 458static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
06fda602 459{
ec565b3c 460 struct i915_page_table *pt;
06fda602 461
dd19674b
CW
462 pt = kmalloc(sizeof(*pt), GFP_KERNEL | __GFP_NOWARN);
463 if (unlikely(!pt))
06fda602
BW
464 return ERR_PTR(-ENOMEM);
465
dd19674b
CW
466 if (unlikely(setup_px(vm, pt))) {
467 kfree(pt);
468 return ERR_PTR(-ENOMEM);
469 }
06fda602 470
dd19674b 471 pt->used_ptes = 0;
06fda602
BW
472 return pt;
473}
474
8448661d 475static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
06fda602 476{
8448661d 477 cleanup_px(vm, pt);
2e906bea
MK
478 kfree(pt);
479}
480
481static void gen8_initialize_pt(struct i915_address_space *vm,
482 struct i915_page_table *pt)
483{
dd19674b
CW
484 fill_px(vm, pt,
485 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC));
2e906bea
MK
486}
487
488static void gen6_initialize_pt(struct i915_address_space *vm,
489 struct i915_page_table *pt)
490{
dd19674b
CW
491 fill32_px(vm, pt,
492 vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
06fda602
BW
493}
494
8448661d 495static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
06fda602 496{
ec565b3c 497 struct i915_page_directory *pd;
06fda602 498
fe52e37f
CW
499 pd = kzalloc(sizeof(*pd), GFP_KERNEL | __GFP_NOWARN);
500 if (unlikely(!pd))
06fda602
BW
501 return ERR_PTR(-ENOMEM);
502
fe52e37f
CW
503 if (unlikely(setup_px(vm, pd))) {
504 kfree(pd);
505 return ERR_PTR(-ENOMEM);
506 }
e5815a2e 507
fe52e37f 508 pd->used_pdes = 0;
06fda602
BW
509 return pd;
510}
511
8448661d 512static void free_pd(struct i915_address_space *vm,
275a991c 513 struct i915_page_directory *pd)
2e906bea 514{
fe52e37f
CW
515 cleanup_px(vm, pd);
516 kfree(pd);
2e906bea
MK
517}
518
519static void gen8_initialize_pd(struct i915_address_space *vm,
520 struct i915_page_directory *pd)
521{
dd19674b 522 unsigned int i;
2e906bea 523
dd19674b
CW
524 fill_px(vm, pd,
525 gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC));
526 for (i = 0; i < I915_PDES; i++)
527 pd->page_table[i] = vm->scratch_pt;
2e906bea
MK
528}
529
fe52e37f 530static int __pdp_init(struct i915_address_space *vm,
6ac18502
MT
531 struct i915_page_directory_pointer *pdp)
532{
3e490042 533 const unsigned int pdpes = i915_pdpes_per_pdp(vm);
e2b763ca 534 unsigned int i;
6ac18502 535
fe52e37f 536 pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory),
e2b763ca
CW
537 GFP_KERNEL | __GFP_NOWARN);
538 if (unlikely(!pdp->page_directory))
6ac18502 539 return -ENOMEM;
6ac18502 540
fe52e37f
CW
541 for (i = 0; i < pdpes; i++)
542 pdp->page_directory[i] = vm->scratch_pd;
543
6ac18502
MT
544 return 0;
545}
546
547static void __pdp_fini(struct i915_page_directory_pointer *pdp)
548{
6ac18502
MT
549 kfree(pdp->page_directory);
550 pdp->page_directory = NULL;
551}
552
1e6437b0
MK
553static inline bool use_4lvl(const struct i915_address_space *vm)
554{
555 return i915_vm_is_48bit(vm);
556}
557
8448661d
CW
558static struct i915_page_directory_pointer *
559alloc_pdp(struct i915_address_space *vm)
762d9936
MT
560{
561 struct i915_page_directory_pointer *pdp;
562 int ret = -ENOMEM;
563
1e6437b0 564 WARN_ON(!use_4lvl(vm));
762d9936
MT
565
566 pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
567 if (!pdp)
568 return ERR_PTR(-ENOMEM);
569
fe52e37f 570 ret = __pdp_init(vm, pdp);
762d9936
MT
571 if (ret)
572 goto fail_bitmap;
573
8448661d 574 ret = setup_px(vm, pdp);
762d9936
MT
575 if (ret)
576 goto fail_page_m;
577
578 return pdp;
579
580fail_page_m:
581 __pdp_fini(pdp);
582fail_bitmap:
583 kfree(pdp);
584
585 return ERR_PTR(ret);
586}
587
8448661d 588static void free_pdp(struct i915_address_space *vm,
6ac18502
MT
589 struct i915_page_directory_pointer *pdp)
590{
591 __pdp_fini(pdp);
1e6437b0
MK
592
593 if (!use_4lvl(vm))
594 return;
595
596 cleanup_px(vm, pdp);
597 kfree(pdp);
762d9936
MT
598}
599
69ab76fd
MT
600static void gen8_initialize_pdp(struct i915_address_space *vm,
601 struct i915_page_directory_pointer *pdp)
602{
603 gen8_ppgtt_pdpe_t scratch_pdpe;
604
605 scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
606
8448661d 607 fill_px(vm, pdp, scratch_pdpe);
69ab76fd
MT
608}
609
610static void gen8_initialize_pml4(struct i915_address_space *vm,
611 struct i915_pml4 *pml4)
612{
e2b763ca 613 unsigned int i;
762d9936 614
e2b763ca
CW
615 fill_px(vm, pml4,
616 gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC));
617 for (i = 0; i < GEN8_PML4ES_PER_PML4; i++)
618 pml4->pdps[i] = vm->scratch_pdp;
6ac18502
MT
619}
620
94e409c1 621/* Broadwell Page Directory Pointer Descriptors */
e85b26dc 622static int gen8_write_pdp(struct drm_i915_gem_request *req,
7cb6d7ac
MT
623 unsigned entry,
624 dma_addr_t addr)
94e409c1 625{
4a570db5 626 struct intel_engine_cs *engine = req->engine;
73dec95e 627 u32 *cs;
94e409c1
BW
628
629 BUG_ON(entry >= 4);
630
73dec95e
TU
631 cs = intel_ring_begin(req, 6);
632 if (IS_ERR(cs))
633 return PTR_ERR(cs);
94e409c1 634
73dec95e
TU
635 *cs++ = MI_LOAD_REGISTER_IMM(1);
636 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, entry));
637 *cs++ = upper_32_bits(addr);
638 *cs++ = MI_LOAD_REGISTER_IMM(1);
639 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
640 *cs++ = lower_32_bits(addr);
641 intel_ring_advance(req, cs);
94e409c1
BW
642
643 return 0;
644}
645
e7167769
MK
646static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt,
647 struct drm_i915_gem_request *req)
94e409c1 648{
eeb9488e 649 int i, ret;
94e409c1 650
e7167769 651 for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) {
d852c7bf
MK
652 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
653
e85b26dc 654 ret = gen8_write_pdp(req, i, pd_daddr);
eeb9488e
BW
655 if (ret)
656 return ret;
94e409c1 657 }
d595bd4b 658
eeb9488e 659 return 0;
94e409c1
BW
660}
661
e7167769
MK
662static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt,
663 struct drm_i915_gem_request *req)
2dba3239
MT
664{
665 return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
666}
667
fce93755
MK
668/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
669 * the page table structures, we mark them dirty so that
670 * context switching/execlist queuing code takes extra steps
671 * to ensure that tlbs are flushed.
672 */
673static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
674{
49d73912 675 ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.i915)->ring_mask;
fce93755
MK
676}
677
2ce5179f
MW
678/* Removes entries from a single page table, releasing it if it's empty.
679 * Caller can use the return value to update higher-level entries.
680 */
681static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
d209b9c3 682 struct i915_page_table *pt,
dd19674b 683 u64 start, u64 length)
459108b8 684{
d209b9c3 685 unsigned int num_entries = gen8_pte_count(start, length);
37c63934
MK
686 unsigned int pte = gen8_pte_index(start);
687 unsigned int pte_end = pte + num_entries;
894ccebe
CW
688 const gen8_pte_t scratch_pte =
689 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
690 gen8_pte_t *vaddr;
459108b8 691
dd19674b 692 GEM_BUG_ON(num_entries > pt->used_ptes);
37c63934 693
dd19674b
CW
694 pt->used_ptes -= num_entries;
695 if (!pt->used_ptes)
696 return true;
2ce5179f 697
9231da70 698 vaddr = kmap_atomic_px(pt);
37c63934 699 while (pte < pte_end)
894ccebe 700 vaddr[pte++] = scratch_pte;
9231da70 701 kunmap_atomic(vaddr);
2ce5179f
MW
702
703 return false;
d209b9c3 704}
06fda602 705
dd19674b
CW
706static void gen8_ppgtt_set_pde(struct i915_address_space *vm,
707 struct i915_page_directory *pd,
708 struct i915_page_table *pt,
709 unsigned int pde)
710{
711 gen8_pde_t *vaddr;
712
713 pd->page_table[pde] = pt;
714
715 vaddr = kmap_atomic_px(pd);
716 vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC);
717 kunmap_atomic(vaddr);
718}
719
2ce5179f 720static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
d209b9c3 721 struct i915_page_directory *pd,
dd19674b 722 u64 start, u64 length)
d209b9c3
MW
723{
724 struct i915_page_table *pt;
dd19674b 725 u32 pde;
d209b9c3
MW
726
727 gen8_for_each_pde(pt, pd, start, length, pde) {
bf75d59e
CW
728 GEM_BUG_ON(pt == vm->scratch_pt);
729
dd19674b
CW
730 if (!gen8_ppgtt_clear_pt(vm, pt, start, length))
731 continue;
06fda602 732
dd19674b 733 gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde);
bf75d59e 734 GEM_BUG_ON(!pd->used_pdes);
fe52e37f 735 pd->used_pdes--;
dd19674b
CW
736
737 free_pt(vm, pt);
2ce5179f
MW
738 }
739
fe52e37f
CW
740 return !pd->used_pdes;
741}
2ce5179f 742
fe52e37f
CW
743static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
744 struct i915_page_directory_pointer *pdp,
745 struct i915_page_directory *pd,
746 unsigned int pdpe)
747{
748 gen8_ppgtt_pdpe_t *vaddr;
749
750 pdp->page_directory[pdpe] = pd;
1e6437b0 751 if (!use_4lvl(vm))
fe52e37f
CW
752 return;
753
754 vaddr = kmap_atomic_px(pdp);
755 vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
756 kunmap_atomic(vaddr);
d209b9c3 757}
06fda602 758
2ce5179f
MW
759/* Removes entries from a single page dir pointer, releasing it if it's empty.
760 * Caller can use the return value to update higher-level entries
761 */
762static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
d209b9c3 763 struct i915_page_directory_pointer *pdp,
fe52e37f 764 u64 start, u64 length)
d209b9c3
MW
765{
766 struct i915_page_directory *pd;
fe52e37f 767 unsigned int pdpe;
06fda602 768
d209b9c3 769 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
bf75d59e
CW
770 GEM_BUG_ON(pd == vm->scratch_pd);
771
fe52e37f
CW
772 if (!gen8_ppgtt_clear_pd(vm, pd, start, length))
773 continue;
459108b8 774
fe52e37f 775 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
bf75d59e 776 GEM_BUG_ON(!pdp->used_pdpes);
e2b763ca 777 pdp->used_pdpes--;
2ce5179f 778
fe52e37f
CW
779 free_pd(vm, pd);
780 }
fce93755 781
e2b763ca 782 return !pdp->used_pdpes;
d209b9c3 783}
459108b8 784
fe52e37f
CW
785static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm,
786 u64 start, u64 length)
787{
788 gen8_ppgtt_clear_pdp(vm, &i915_vm_to_ppgtt(vm)->pdp, start, length);
789}
790
e2b763ca
CW
791static void gen8_ppgtt_set_pml4e(struct i915_pml4 *pml4,
792 struct i915_page_directory_pointer *pdp,
793 unsigned int pml4e)
794{
795 gen8_ppgtt_pml4e_t *vaddr;
796
797 pml4->pdps[pml4e] = pdp;
798
799 vaddr = kmap_atomic_px(pml4);
800 vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
801 kunmap_atomic(vaddr);
802}
803
2ce5179f
MW
804/* Removes entries from a single pml4.
805 * This is the top-level structure in 4-level page tables used on gen8+.
806 * Empty entries are always scratch pml4e.
807 */
fe52e37f
CW
808static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
809 u64 start, u64 length)
d209b9c3 810{
fe52e37f
CW
811 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
812 struct i915_pml4 *pml4 = &ppgtt->pml4;
d209b9c3 813 struct i915_page_directory_pointer *pdp;
e2b763ca 814 unsigned int pml4e;
2ce5179f 815
1e6437b0 816 GEM_BUG_ON(!use_4lvl(vm));
459108b8 817
d209b9c3 818 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
bf75d59e
CW
819 GEM_BUG_ON(pdp == vm->scratch_pdp);
820
e2b763ca
CW
821 if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length))
822 continue;
459108b8 823
e2b763ca 824 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
e2b763ca
CW
825
826 free_pdp(vm, pdp);
459108b8
BW
827 }
828}
829
894ccebe
CW
830struct sgt_dma {
831 struct scatterlist *sg;
832 dma_addr_t dma, max;
833};
834
9e89f9ee
CW
835struct gen8_insert_pte {
836 u16 pml4e;
837 u16 pdpe;
838 u16 pde;
839 u16 pte;
840};
841
842static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start)
843{
844 return (struct gen8_insert_pte) {
845 gen8_pml4e_index(start),
846 gen8_pdpe_index(start),
847 gen8_pde_index(start),
848 gen8_pte_index(start),
849 };
850}
851
894ccebe
CW
852static __always_inline bool
853gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
f9b5b782 854 struct i915_page_directory_pointer *pdp,
894ccebe 855 struct sgt_dma *iter,
9e89f9ee 856 struct gen8_insert_pte *idx,
f9b5b782
MT
857 enum i915_cache_level cache_level)
858{
894ccebe
CW
859 struct i915_page_directory *pd;
860 const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
861 gen8_pte_t *vaddr;
862 bool ret;
9df15b49 863
3e490042 864 GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
9e89f9ee
CW
865 pd = pdp->page_directory[idx->pdpe];
866 vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
894ccebe 867 do {
9e89f9ee
CW
868 vaddr[idx->pte] = pte_encode | iter->dma;
869
894ccebe
CW
870 iter->dma += PAGE_SIZE;
871 if (iter->dma >= iter->max) {
872 iter->sg = __sg_next(iter->sg);
873 if (!iter->sg) {
874 ret = false;
875 break;
876 }
7ad47cf2 877
894ccebe
CW
878 iter->dma = sg_dma_address(iter->sg);
879 iter->max = iter->dma + iter->sg->length;
d7b3de91 880 }
9df15b49 881
9e89f9ee
CW
882 if (++idx->pte == GEN8_PTES) {
883 idx->pte = 0;
884
885 if (++idx->pde == I915_PDES) {
886 idx->pde = 0;
887
894ccebe 888 /* Limited by sg length for 3lvl */
9e89f9ee
CW
889 if (++idx->pdpe == GEN8_PML4ES_PER_PML4) {
890 idx->pdpe = 0;
894ccebe 891 ret = true;
de5ba8eb 892 break;
894ccebe
CW
893 }
894
3e490042 895 GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
9e89f9ee 896 pd = pdp->page_directory[idx->pdpe];
7ad47cf2 897 }
894ccebe 898
9231da70 899 kunmap_atomic(vaddr);
9e89f9ee 900 vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
9df15b49 901 }
894ccebe 902 } while (1);
9231da70 903 kunmap_atomic(vaddr);
d1c54acd 904
894ccebe 905 return ret;
9df15b49
BW
906}
907
894ccebe 908static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
4a234c5f 909 struct i915_vma *vma,
894ccebe
CW
910 enum i915_cache_level cache_level,
911 u32 unused)
f9b5b782 912{
17369ba0 913 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
894ccebe 914 struct sgt_dma iter = {
4a234c5f 915 .sg = vma->pages->sgl,
894ccebe
CW
916 .dma = sg_dma_address(iter.sg),
917 .max = iter.dma + iter.sg->length,
918 };
4a234c5f 919 struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
f9b5b782 920
9e89f9ee
CW
921 gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
922 cache_level);
894ccebe 923}
de5ba8eb 924
894ccebe 925static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
4a234c5f 926 struct i915_vma *vma,
894ccebe
CW
927 enum i915_cache_level cache_level,
928 u32 unused)
929{
930 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
931 struct sgt_dma iter = {
4a234c5f 932 .sg = vma->pages->sgl,
894ccebe
CW
933 .dma = sg_dma_address(iter.sg),
934 .max = iter.dma + iter.sg->length,
935 };
936 struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
4a234c5f 937 struct gen8_insert_pte idx = gen8_insert_pte(vma->node.start);
de5ba8eb 938
9e89f9ee
CW
939 while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++], &iter,
940 &idx, cache_level))
941 GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
f9b5b782
MT
942}
943
8448661d 944static void gen8_free_page_tables(struct i915_address_space *vm,
f37c0505 945 struct i915_page_directory *pd)
7ad47cf2
BW
946{
947 int i;
948
567047be 949 if (!px_page(pd))
7ad47cf2
BW
950 return;
951
fe52e37f
CW
952 for (i = 0; i < I915_PDES; i++) {
953 if (pd->page_table[i] != vm->scratch_pt)
954 free_pt(vm, pd->page_table[i]);
06fda602 955 }
d7b3de91
BW
956}
957
8776f02b
MK
958static int gen8_init_scratch(struct i915_address_space *vm)
959{
64c050db 960 int ret;
8776f02b 961
8448661d 962 ret = setup_scratch_page(vm, I915_GFP_DMA);
8bcdd0f7
CW
963 if (ret)
964 return ret;
8776f02b 965
8448661d 966 vm->scratch_pt = alloc_pt(vm);
8776f02b 967 if (IS_ERR(vm->scratch_pt)) {
64c050db
MA
968 ret = PTR_ERR(vm->scratch_pt);
969 goto free_scratch_page;
8776f02b
MK
970 }
971
8448661d 972 vm->scratch_pd = alloc_pd(vm);
8776f02b 973 if (IS_ERR(vm->scratch_pd)) {
64c050db
MA
974 ret = PTR_ERR(vm->scratch_pd);
975 goto free_pt;
8776f02b
MK
976 }
977
1e6437b0 978 if (use_4lvl(vm)) {
8448661d 979 vm->scratch_pdp = alloc_pdp(vm);
69ab76fd 980 if (IS_ERR(vm->scratch_pdp)) {
64c050db
MA
981 ret = PTR_ERR(vm->scratch_pdp);
982 goto free_pd;
69ab76fd
MT
983 }
984 }
985
8776f02b
MK
986 gen8_initialize_pt(vm, vm->scratch_pt);
987 gen8_initialize_pd(vm, vm->scratch_pd);
1e6437b0 988 if (use_4lvl(vm))
69ab76fd 989 gen8_initialize_pdp(vm, vm->scratch_pdp);
8776f02b
MK
990
991 return 0;
64c050db
MA
992
993free_pd:
8448661d 994 free_pd(vm, vm->scratch_pd);
64c050db 995free_pt:
8448661d 996 free_pt(vm, vm->scratch_pt);
64c050db 997free_scratch_page:
8448661d 998 cleanup_scratch_page(vm);
64c050db
MA
999
1000 return ret;
8776f02b
MK
1001}
1002
650da34c
ZL
1003static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
1004{
1e6437b0
MK
1005 struct i915_address_space *vm = &ppgtt->base;
1006 struct drm_i915_private *dev_priv = vm->i915;
650da34c 1007 enum vgt_g2v_type msg;
650da34c
ZL
1008 int i;
1009
1e6437b0
MK
1010 if (use_4lvl(vm)) {
1011 const u64 daddr = px_dma(&ppgtt->pml4);
650da34c 1012
ab75bb5d
VS
1013 I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
1014 I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
650da34c
ZL
1015
1016 msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
1017 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
1018 } else {
e7167769 1019 for (i = 0; i < GEN8_3LVL_PDPES; i++) {
1e6437b0 1020 const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
650da34c 1021
ab75bb5d
VS
1022 I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
1023 I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
650da34c
ZL
1024 }
1025
1026 msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
1027 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
1028 }
1029
1030 I915_WRITE(vgtif_reg(g2v_notify), msg);
1031
1032 return 0;
1033}
1034
8776f02b
MK
1035static void gen8_free_scratch(struct i915_address_space *vm)
1036{
1e6437b0 1037 if (use_4lvl(vm))
8448661d
CW
1038 free_pdp(vm, vm->scratch_pdp);
1039 free_pd(vm, vm->scratch_pd);
1040 free_pt(vm, vm->scratch_pt);
1041 cleanup_scratch_page(vm);
8776f02b
MK
1042}
1043
8448661d 1044static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm,
762d9936 1045 struct i915_page_directory_pointer *pdp)
b45a6715 1046{
3e490042 1047 const unsigned int pdpes = i915_pdpes_per_pdp(vm);
b45a6715
BW
1048 int i;
1049
3e490042 1050 for (i = 0; i < pdpes; i++) {
fe52e37f 1051 if (pdp->page_directory[i] == vm->scratch_pd)
06fda602
BW
1052 continue;
1053
8448661d
CW
1054 gen8_free_page_tables(vm, pdp->page_directory[i]);
1055 free_pd(vm, pdp->page_directory[i]);
7ad47cf2 1056 }
69876bed 1057
8448661d 1058 free_pdp(vm, pdp);
762d9936
MT
1059}
1060
1061static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
1062{
1063 int i;
1064
c5d092a4
CW
1065 for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
1066 if (ppgtt->pml4.pdps[i] == ppgtt->base.scratch_pdp)
762d9936
MT
1067 continue;
1068
8448661d 1069 gen8_ppgtt_cleanup_3lvl(&ppgtt->base, ppgtt->pml4.pdps[i]);
762d9936
MT
1070 }
1071
8448661d 1072 cleanup_px(&ppgtt->base, &ppgtt->pml4);
762d9936
MT
1073}
1074
1075static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
1076{
49d73912 1077 struct drm_i915_private *dev_priv = vm->i915;
e5716f55 1078 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
762d9936 1079
275a991c 1080 if (intel_vgpu_active(dev_priv))
650da34c
ZL
1081 gen8_ppgtt_notify_vgt(ppgtt, false);
1082
1e6437b0 1083 if (use_4lvl(vm))
762d9936 1084 gen8_ppgtt_cleanup_4lvl(ppgtt);
1e6437b0
MK
1085 else
1086 gen8_ppgtt_cleanup_3lvl(&ppgtt->base, &ppgtt->pdp);
d4ec9da0 1087
8776f02b 1088 gen8_free_scratch(vm);
b45a6715
BW
1089}
1090
fe52e37f
CW
1091static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
1092 struct i915_page_directory *pd,
1093 u64 start, u64 length)
bf2b4ed2 1094{
d7b2633d 1095 struct i915_page_table *pt;
dd19674b 1096 u64 from = start;
fe52e37f 1097 unsigned int pde;
bf2b4ed2 1098
e8ebd8e2 1099 gen8_for_each_pde(pt, pd, start, length, pde) {
fe52e37f 1100 if (pt == vm->scratch_pt) {
dd19674b
CW
1101 pt = alloc_pt(vm);
1102 if (IS_ERR(pt))
1103 goto unwind;
5441f0cb 1104
dd19674b 1105 gen8_initialize_pt(vm, pt);
fe52e37f
CW
1106
1107 gen8_ppgtt_set_pde(vm, pd, pt, pde);
1108 pd->used_pdes++;
bf75d59e 1109 GEM_BUG_ON(pd->used_pdes > I915_PDES);
dd19674b 1110 }
fe52e37f 1111
dd19674b 1112 pt->used_ptes += gen8_pte_count(start, length);
7ad47cf2 1113 }
bf2b4ed2 1114 return 0;
7ad47cf2 1115
dd19674b
CW
1116unwind:
1117 gen8_ppgtt_clear_pd(vm, pd, from, start - from);
d7b3de91 1118 return -ENOMEM;
bf2b4ed2
BW
1119}
1120
c5d092a4
CW
1121static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
1122 struct i915_page_directory_pointer *pdp,
1123 u64 start, u64 length)
bf2b4ed2 1124{
5441f0cb 1125 struct i915_page_directory *pd;
e2b763ca
CW
1126 u64 from = start;
1127 unsigned int pdpe;
bf2b4ed2
BW
1128 int ret;
1129
e8ebd8e2 1130 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
e2b763ca
CW
1131 if (pd == vm->scratch_pd) {
1132 pd = alloc_pd(vm);
1133 if (IS_ERR(pd))
1134 goto unwind;
5441f0cb 1135
e2b763ca 1136 gen8_initialize_pd(vm, pd);
fe52e37f 1137 gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
e2b763ca 1138 pdp->used_pdpes++;
3e490042 1139 GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm));
75afcf72
CW
1140
1141 mark_tlbs_dirty(i915_vm_to_ppgtt(vm));
e2b763ca
CW
1142 }
1143
1144 ret = gen8_ppgtt_alloc_pd(vm, pd, start, length);
bf75d59e
CW
1145 if (unlikely(ret))
1146 goto unwind_pd;
fe52e37f 1147 }
33c8819f 1148
d7b3de91 1149 return 0;
bf2b4ed2 1150
bf75d59e
CW
1151unwind_pd:
1152 if (!pd->used_pdes) {
1153 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
1154 GEM_BUG_ON(!pdp->used_pdpes);
1155 pdp->used_pdpes--;
1156 free_pd(vm, pd);
1157 }
e2b763ca
CW
1158unwind:
1159 gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
1160 return -ENOMEM;
bf2b4ed2
BW
1161}
1162
c5d092a4
CW
1163static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm,
1164 u64 start, u64 length)
762d9936 1165{
c5d092a4
CW
1166 return gen8_ppgtt_alloc_pdp(vm,
1167 &i915_vm_to_ppgtt(vm)->pdp, start, length);
1168}
762d9936 1169
c5d092a4
CW
1170static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
1171 u64 start, u64 length)
1172{
1173 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1174 struct i915_pml4 *pml4 = &ppgtt->pml4;
1175 struct i915_page_directory_pointer *pdp;
1176 u64 from = start;
1177 u32 pml4e;
1178 int ret;
762d9936 1179
e8ebd8e2 1180 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
c5d092a4
CW
1181 if (pml4->pdps[pml4e] == vm->scratch_pdp) {
1182 pdp = alloc_pdp(vm);
1183 if (IS_ERR(pdp))
1184 goto unwind;
762d9936 1185
c5d092a4
CW
1186 gen8_initialize_pdp(vm, pdp);
1187 gen8_ppgtt_set_pml4e(pml4, pdp, pml4e);
1188 }
762d9936 1189
c5d092a4 1190 ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length);
bf75d59e
CW
1191 if (unlikely(ret))
1192 goto unwind_pdp;
762d9936
MT
1193 }
1194
762d9936
MT
1195 return 0;
1196
bf75d59e
CW
1197unwind_pdp:
1198 if (!pdp->used_pdpes) {
1199 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
1200 free_pdp(vm, pdp);
1201 }
c5d092a4
CW
1202unwind:
1203 gen8_ppgtt_clear_4lvl(vm, from, start - from);
1204 return -ENOMEM;
762d9936
MT
1205}
1206
8448661d
CW
1207static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
1208 struct i915_page_directory_pointer *pdp,
75c7b0b8 1209 u64 start, u64 length,
ea91e401
MT
1210 gen8_pte_t scratch_pte,
1211 struct seq_file *m)
1212{
3e490042 1213 struct i915_address_space *vm = &ppgtt->base;
ea91e401 1214 struct i915_page_directory *pd;
75c7b0b8 1215 u32 pdpe;
ea91e401 1216
e8ebd8e2 1217 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
ea91e401 1218 struct i915_page_table *pt;
75c7b0b8
CW
1219 u64 pd_len = length;
1220 u64 pd_start = start;
1221 u32 pde;
ea91e401 1222
e2b763ca 1223 if (pdp->page_directory[pdpe] == ppgtt->base.scratch_pd)
ea91e401
MT
1224 continue;
1225
1226 seq_printf(m, "\tPDPE #%d\n", pdpe);
e8ebd8e2 1227 gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
75c7b0b8 1228 u32 pte;
ea91e401
MT
1229 gen8_pte_t *pt_vaddr;
1230
fe52e37f 1231 if (pd->page_table[pde] == ppgtt->base.scratch_pt)
ea91e401
MT
1232 continue;
1233
9231da70 1234 pt_vaddr = kmap_atomic_px(pt);
ea91e401 1235 for (pte = 0; pte < GEN8_PTES; pte += 4) {
75c7b0b8
CW
1236 u64 va = (pdpe << GEN8_PDPE_SHIFT |
1237 pde << GEN8_PDE_SHIFT |
1238 pte << GEN8_PTE_SHIFT);
ea91e401
MT
1239 int i;
1240 bool found = false;
1241
1242 for (i = 0; i < 4; i++)
1243 if (pt_vaddr[pte + i] != scratch_pte)
1244 found = true;
1245 if (!found)
1246 continue;
1247
1248 seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
1249 for (i = 0; i < 4; i++) {
1250 if (pt_vaddr[pte + i] != scratch_pte)
1251 seq_printf(m, " %llx", pt_vaddr[pte + i]);
1252 else
1253 seq_puts(m, " SCRATCH ");
1254 }
1255 seq_puts(m, "\n");
1256 }
ea91e401
MT
1257 kunmap_atomic(pt_vaddr);
1258 }
1259 }
1260}
1261
1262static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1263{
1264 struct i915_address_space *vm = &ppgtt->base;
894ccebe
CW
1265 const gen8_pte_t scratch_pte =
1266 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
381b943b 1267 u64 start = 0, length = ppgtt->base.total;
ea91e401 1268
1e6437b0 1269 if (use_4lvl(vm)) {
75c7b0b8 1270 u64 pml4e;
ea91e401
MT
1271 struct i915_pml4 *pml4 = &ppgtt->pml4;
1272 struct i915_page_directory_pointer *pdp;
1273
e8ebd8e2 1274 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
c5d092a4 1275 if (pml4->pdps[pml4e] == ppgtt->base.scratch_pdp)
ea91e401
MT
1276 continue;
1277
1278 seq_printf(m, " PML4E #%llu\n", pml4e);
8448661d 1279 gen8_dump_pdp(ppgtt, pdp, start, length, scratch_pte, m);
ea91e401 1280 }
1e6437b0
MK
1281 } else {
1282 gen8_dump_pdp(ppgtt, &ppgtt->pdp, start, length, scratch_pte, m);
ea91e401
MT
1283 }
1284}
1285
e2b763ca 1286static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
331f38e7 1287{
e2b763ca
CW
1288 struct i915_address_space *vm = &ppgtt->base;
1289 struct i915_page_directory_pointer *pdp = &ppgtt->pdp;
1290 struct i915_page_directory *pd;
1291 u64 start = 0, length = ppgtt->base.total;
1292 u64 from = start;
1293 unsigned int pdpe;
331f38e7 1294
e2b763ca
CW
1295 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1296 pd = alloc_pd(vm);
1297 if (IS_ERR(pd))
1298 goto unwind;
331f38e7 1299
e2b763ca
CW
1300 gen8_initialize_pd(vm, pd);
1301 gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
1302 pdp->used_pdpes++;
1303 }
331f38e7 1304
e2b763ca
CW
1305 pdp->used_pdpes++; /* never remove */
1306 return 0;
331f38e7 1307
e2b763ca
CW
1308unwind:
1309 start -= from;
1310 gen8_for_each_pdpe(pd, pdp, from, start, pdpe) {
1311 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
1312 free_pd(vm, pd);
1313 }
1314 pdp->used_pdpes = 0;
1315 return -ENOMEM;
331f38e7
ZL
1316}
1317
eb0b44ad 1318/*
f3a964b9
BW
1319 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
1320 * with a net effect resembling a 2-level page table in normal x86 terms. Each
1321 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
1322 * space.
37aca44a 1323 *
f3a964b9 1324 */
5c5f6457 1325static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
37aca44a 1326{
1e6437b0
MK
1327 struct i915_address_space *vm = &ppgtt->base;
1328 struct drm_i915_private *dev_priv = vm->i915;
8776f02b 1329 int ret;
7cb6d7ac 1330
1e6437b0
MK
1331 ppgtt->base.total = USES_FULL_48BIT_PPGTT(dev_priv) ?
1332 1ULL << 48 :
1333 1ULL << 32;
1334
8776f02b 1335 ret = gen8_init_scratch(&ppgtt->base);
1e6437b0
MK
1336 if (ret) {
1337 ppgtt->base.total = 0;
8776f02b 1338 return ret;
1e6437b0 1339 }
69876bed 1340
8448661d
CW
1341 /* There are only few exceptions for gen >=6. chv and bxt.
1342 * And we are not sure about the latter so play safe for now.
1343 */
1344 if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
1345 ppgtt->base.pt_kmap_wc = true;
1346
1e6437b0 1347 if (use_4lvl(vm)) {
8448661d 1348 ret = setup_px(&ppgtt->base, &ppgtt->pml4);
762d9936
MT
1349 if (ret)
1350 goto free_scratch;
6ac18502 1351
69ab76fd
MT
1352 gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);
1353
e7167769 1354 ppgtt->switch_mm = gen8_mm_switch_4lvl;
c5d092a4 1355 ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_4lvl;
894ccebe 1356 ppgtt->base.insert_entries = gen8_ppgtt_insert_4lvl;
fe52e37f 1357 ppgtt->base.clear_range = gen8_ppgtt_clear_4lvl;
762d9936 1358 } else {
fe52e37f 1359 ret = __pdp_init(&ppgtt->base, &ppgtt->pdp);
81ba8aef
MT
1360 if (ret)
1361 goto free_scratch;
1362
275a991c 1363 if (intel_vgpu_active(dev_priv)) {
e2b763ca
CW
1364 ret = gen8_preallocate_top_level_pdp(ppgtt);
1365 if (ret) {
1366 __pdp_fini(&ppgtt->pdp);
331f38e7 1367 goto free_scratch;
e2b763ca 1368 }
331f38e7 1369 }
894ccebe 1370
e7167769 1371 ppgtt->switch_mm = gen8_mm_switch_3lvl;
c5d092a4 1372 ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_3lvl;
894ccebe 1373 ppgtt->base.insert_entries = gen8_ppgtt_insert_3lvl;
fe52e37f 1374 ppgtt->base.clear_range = gen8_ppgtt_clear_3lvl;
81ba8aef 1375 }
6ac18502 1376
275a991c 1377 if (intel_vgpu_active(dev_priv))
650da34c
ZL
1378 gen8_ppgtt_notify_vgt(ppgtt, true);
1379
054b9acd
MK
1380 ppgtt->base.cleanup = gen8_ppgtt_cleanup;
1381 ppgtt->base.unbind_vma = ppgtt_unbind_vma;
1382 ppgtt->base.bind_vma = ppgtt_bind_vma;
1383 ppgtt->debug_dump = gen8_dump_ppgtt;
1384
d7b2633d 1385 return 0;
6ac18502
MT
1386
1387free_scratch:
1388 gen8_free_scratch(&ppgtt->base);
1389 return ret;
d7b2633d
MT
1390}
1391
87d60b63
BW
1392static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1393{
87d60b63 1394 struct i915_address_space *vm = &ppgtt->base;
09942c65 1395 struct i915_page_table *unused;
07749ef3 1396 gen6_pte_t scratch_pte;
381b943b
CW
1397 u32 pd_entry, pte, pde;
1398 u32 start = 0, length = ppgtt->base.total;
87d60b63 1399
8bcdd0f7 1400 scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
4fb84d99 1401 I915_CACHE_LLC, 0);
87d60b63 1402
731f74c5 1403 gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
87d60b63 1404 u32 expected;
07749ef3 1405 gen6_pte_t *pt_vaddr;
567047be 1406 const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
09942c65 1407 pd_entry = readl(ppgtt->pd_addr + pde);
87d60b63
BW
1408 expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
1409
1410 if (pd_entry != expected)
1411 seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
1412 pde,
1413 pd_entry,
1414 expected);
1415 seq_printf(m, "\tPDE: %x\n", pd_entry);
1416
9231da70 1417 pt_vaddr = kmap_atomic_px(ppgtt->pd.page_table[pde]);
d1c54acd 1418
07749ef3 1419 for (pte = 0; pte < GEN6_PTES; pte+=4) {
87d60b63 1420 unsigned long va =
07749ef3 1421 (pde * PAGE_SIZE * GEN6_PTES) +
87d60b63
BW
1422 (pte * PAGE_SIZE);
1423 int i;
1424 bool found = false;
1425 for (i = 0; i < 4; i++)
1426 if (pt_vaddr[pte + i] != scratch_pte)
1427 found = true;
1428 if (!found)
1429 continue;
1430
1431 seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
1432 for (i = 0; i < 4; i++) {
1433 if (pt_vaddr[pte + i] != scratch_pte)
1434 seq_printf(m, " %08x", pt_vaddr[pte + i]);
1435 else
1436 seq_puts(m, " SCRATCH ");
1437 }
1438 seq_puts(m, "\n");
1439 }
9231da70 1440 kunmap_atomic(pt_vaddr);
87d60b63
BW
1441 }
1442}
1443
678d96fb 1444/* Write pde (index) from the page directory @pd to the page table @pt */
16a011c8
CW
1445static inline void gen6_write_pde(const struct i915_hw_ppgtt *ppgtt,
1446 const unsigned int pde,
1447 const struct i915_page_table *pt)
6197349b 1448{
678d96fb 1449 /* Caller needs to make sure the write completes if necessary */
16a011c8
CW
1450 writel_relaxed(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
1451 ppgtt->pd_addr + pde);
678d96fb 1452}
6197349b 1453
678d96fb
BW
1454/* Write all the page tables found in the ppgtt structure to incrementing page
1455 * directories. */
16a011c8 1456static void gen6_write_page_range(struct i915_hw_ppgtt *ppgtt,
75c7b0b8 1457 u32 start, u32 length)
678d96fb 1458{
ec565b3c 1459 struct i915_page_table *pt;
16a011c8 1460 unsigned int pde;
678d96fb 1461
16a011c8
CW
1462 gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde)
1463 gen6_write_pde(ppgtt, pde, pt);
678d96fb 1464
16a011c8 1465 mark_tlbs_dirty(ppgtt);
dd19674b 1466 wmb();
3e302542
BW
1467}
1468
75c7b0b8 1469static inline u32 get_pd_offset(struct i915_hw_ppgtt *ppgtt)
3e302542 1470{
dd19674b
CW
1471 GEM_BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
1472 return ppgtt->pd.base.ggtt_offset << 10;
b4a74e3a
BW
1473}
1474
90252e5c 1475static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
e85b26dc 1476 struct drm_i915_gem_request *req)
90252e5c 1477{
4a570db5 1478 struct intel_engine_cs *engine = req->engine;
73dec95e 1479 u32 *cs;
90252e5c 1480
90252e5c 1481 /* NB: TLBs must be flushed and invalidated before a switch */
73dec95e
TU
1482 cs = intel_ring_begin(req, 6);
1483 if (IS_ERR(cs))
1484 return PTR_ERR(cs);
90252e5c 1485
73dec95e
TU
1486 *cs++ = MI_LOAD_REGISTER_IMM(2);
1487 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
1488 *cs++ = PP_DIR_DCLV_2G;
1489 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1490 *cs++ = get_pd_offset(ppgtt);
1491 *cs++ = MI_NOOP;
1492 intel_ring_advance(req, cs);
90252e5c
BW
1493
1494 return 0;
1495}
1496
48a10389 1497static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
e85b26dc 1498 struct drm_i915_gem_request *req)
48a10389 1499{
4a570db5 1500 struct intel_engine_cs *engine = req->engine;
73dec95e 1501 u32 *cs;
48a10389 1502
48a10389 1503 /* NB: TLBs must be flushed and invalidated before a switch */
73dec95e
TU
1504 cs = intel_ring_begin(req, 6);
1505 if (IS_ERR(cs))
1506 return PTR_ERR(cs);
1507
1508 *cs++ = MI_LOAD_REGISTER_IMM(2);
1509 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
1510 *cs++ = PP_DIR_DCLV_2G;
1511 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1512 *cs++ = get_pd_offset(ppgtt);
1513 *cs++ = MI_NOOP;
1514 intel_ring_advance(req, cs);
48a10389
BW
1515
1516 return 0;
1517}
1518
eeb9488e 1519static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
e85b26dc 1520 struct drm_i915_gem_request *req)
eeb9488e 1521{
4a570db5 1522 struct intel_engine_cs *engine = req->engine;
8eb95204 1523 struct drm_i915_private *dev_priv = req->i915;
48a10389 1524
e2f80391
TU
1525 I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
1526 I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
eeb9488e
BW
1527 return 0;
1528}
1529
c6be607a 1530static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
eeb9488e 1531{
e2f80391 1532 struct intel_engine_cs *engine;
3b3f1650 1533 enum intel_engine_id id;
3e302542 1534
3b3f1650 1535 for_each_engine(engine, dev_priv, id) {
c6be607a
TU
1536 u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ?
1537 GEN8_GFX_PPGTT_48B : 0;
e2f80391 1538 I915_WRITE(RING_MODE_GEN7(engine),
2dba3239 1539 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
eeb9488e 1540 }
eeb9488e 1541}
6197349b 1542
c6be607a 1543static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
3e302542 1544{
e2f80391 1545 struct intel_engine_cs *engine;
75c7b0b8 1546 u32 ecochk, ecobits;
3b3f1650 1547 enum intel_engine_id id;
6197349b 1548
b4a74e3a
BW
1549 ecobits = I915_READ(GAC_ECO_BITS);
1550 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
a65c2fcd 1551
b4a74e3a 1552 ecochk = I915_READ(GAM_ECOCHK);
772c2a51 1553 if (IS_HASWELL(dev_priv)) {
b4a74e3a
BW
1554 ecochk |= ECOCHK_PPGTT_WB_HSW;
1555 } else {
1556 ecochk |= ECOCHK_PPGTT_LLC_IVB;
1557 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
1558 }
1559 I915_WRITE(GAM_ECOCHK, ecochk);
a65c2fcd 1560
3b3f1650 1561 for_each_engine(engine, dev_priv, id) {
6197349b 1562 /* GFX_MODE is per-ring on gen7+ */
e2f80391 1563 I915_WRITE(RING_MODE_GEN7(engine),
b4a74e3a 1564 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
6197349b 1565 }
b4a74e3a 1566}
6197349b 1567
c6be607a 1568static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
b4a74e3a 1569{
75c7b0b8 1570 u32 ecochk, gab_ctl, ecobits;
a65c2fcd 1571
b4a74e3a
BW
1572 ecobits = I915_READ(GAC_ECO_BITS);
1573 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
1574 ECOBITS_PPGTT_CACHE64B);
6197349b 1575
b4a74e3a
BW
1576 gab_ctl = I915_READ(GAB_CTL);
1577 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
1578
1579 ecochk = I915_READ(GAM_ECOCHK);
1580 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
1581
1582 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
6197349b
BW
1583}
1584
1d2a314c 1585/* PPGTT support for Sandybdrige/Gen6 and later */
853ba5d2 1586static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
dd19674b 1587 u64 start, u64 length)
1d2a314c 1588{
e5716f55 1589 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
dd19674b
CW
1590 unsigned int first_entry = start >> PAGE_SHIFT;
1591 unsigned int pde = first_entry / GEN6_PTES;
1592 unsigned int pte = first_entry % GEN6_PTES;
1593 unsigned int num_entries = length >> PAGE_SHIFT;
1594 gen6_pte_t scratch_pte =
1595 vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
1d2a314c 1596
7bddb01f 1597 while (num_entries) {
dd19674b
CW
1598 struct i915_page_table *pt = ppgtt->pd.page_table[pde++];
1599 unsigned int end = min(pte + num_entries, GEN6_PTES);
1600 gen6_pte_t *vaddr;
7bddb01f 1601
dd19674b 1602 num_entries -= end - pte;
1d2a314c 1603
dd19674b
CW
1604 /* Note that the hw doesn't support removing PDE on the fly
1605 * (they are cached inside the context with no means to
1606 * invalidate the cache), so we can only reset the PTE
1607 * entries back to scratch.
1608 */
1d2a314c 1609
dd19674b
CW
1610 vaddr = kmap_atomic_px(pt);
1611 do {
1612 vaddr[pte++] = scratch_pte;
1613 } while (pte < end);
1614 kunmap_atomic(vaddr);
1d2a314c 1615
dd19674b 1616 pte = 0;
7bddb01f 1617 }
1d2a314c
DV
1618}
1619
853ba5d2 1620static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
4a234c5f 1621 struct i915_vma *vma,
75c7b0b8
CW
1622 enum i915_cache_level cache_level,
1623 u32 flags)
def886c3 1624{
e5716f55 1625 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
4a234c5f 1626 unsigned first_entry = vma->node.start >> PAGE_SHIFT;
07749ef3
MT
1627 unsigned act_pt = first_entry / GEN6_PTES;
1628 unsigned act_pte = first_entry % GEN6_PTES;
b31144c0
CW
1629 const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
1630 struct sgt_dma iter;
1631 gen6_pte_t *vaddr;
1632
9231da70 1633 vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
4a234c5f 1634 iter.sg = vma->pages->sgl;
b31144c0
CW
1635 iter.dma = sg_dma_address(iter.sg);
1636 iter.max = iter.dma + iter.sg->length;
1637 do {
1638 vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
6e995e23 1639
b31144c0
CW
1640 iter.dma += PAGE_SIZE;
1641 if (iter.dma == iter.max) {
1642 iter.sg = __sg_next(iter.sg);
1643 if (!iter.sg)
1644 break;
6e995e23 1645
b31144c0
CW
1646 iter.dma = sg_dma_address(iter.sg);
1647 iter.max = iter.dma + iter.sg->length;
1648 }
24f3a8cf 1649
07749ef3 1650 if (++act_pte == GEN6_PTES) {
9231da70
CW
1651 kunmap_atomic(vaddr);
1652 vaddr = kmap_atomic_px(ppgtt->pd.page_table[++act_pt]);
6e995e23 1653 act_pte = 0;
def886c3 1654 }
b31144c0 1655 } while (1);
9231da70 1656 kunmap_atomic(vaddr);
def886c3
DV
1657}
1658
678d96fb 1659static int gen6_alloc_va_range(struct i915_address_space *vm,
dd19674b 1660 u64 start, u64 length)
678d96fb 1661{
e5716f55 1662 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
ec565b3c 1663 struct i915_page_table *pt;
dd19674b
CW
1664 u64 from = start;
1665 unsigned int pde;
1666 bool flush = false;
4933d519 1667
731f74c5 1668 gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
dd19674b
CW
1669 if (pt == vm->scratch_pt) {
1670 pt = alloc_pt(vm);
1671 if (IS_ERR(pt))
1672 goto unwind_out;
4933d519 1673
dd19674b
CW
1674 gen6_initialize_pt(vm, pt);
1675 ppgtt->pd.page_table[pde] = pt;
1676 gen6_write_pde(ppgtt, pde, pt);
1677 flush = true;
4933d519 1678 }
4933d519
MT
1679 }
1680
dd19674b
CW
1681 if (flush) {
1682 mark_tlbs_dirty(ppgtt);
1683 wmb();
678d96fb
BW
1684 }
1685
1686 return 0;
4933d519
MT
1687
1688unwind_out:
dd19674b
CW
1689 gen6_ppgtt_clear_range(vm, from, start);
1690 return -ENOMEM;
678d96fb
BW
1691}
1692
8776f02b
MK
1693static int gen6_init_scratch(struct i915_address_space *vm)
1694{
8bcdd0f7 1695 int ret;
8776f02b 1696
8448661d 1697 ret = setup_scratch_page(vm, I915_GFP_DMA);
8bcdd0f7
CW
1698 if (ret)
1699 return ret;
8776f02b 1700
8448661d 1701 vm->scratch_pt = alloc_pt(vm);
8776f02b 1702 if (IS_ERR(vm->scratch_pt)) {
8448661d 1703 cleanup_scratch_page(vm);
8776f02b
MK
1704 return PTR_ERR(vm->scratch_pt);
1705 }
1706
1707 gen6_initialize_pt(vm, vm->scratch_pt);
1708
1709 return 0;
1710}
1711
1712static void gen6_free_scratch(struct i915_address_space *vm)
1713{
8448661d
CW
1714 free_pt(vm, vm->scratch_pt);
1715 cleanup_scratch_page(vm);
8776f02b
MK
1716}
1717
061dd493 1718static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
a00d825d 1719{
e5716f55 1720 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
731f74c5 1721 struct i915_page_directory *pd = &ppgtt->pd;
09942c65 1722 struct i915_page_table *pt;
75c7b0b8 1723 u32 pde;
4933d519 1724
061dd493
DV
1725 drm_mm_remove_node(&ppgtt->node);
1726
731f74c5 1727 gen6_for_all_pdes(pt, pd, pde)
79ab9370 1728 if (pt != vm->scratch_pt)
8448661d 1729 free_pt(vm, pt);
06fda602 1730
8776f02b 1731 gen6_free_scratch(vm);
3440d265
DV
1732}
1733
b146520f 1734static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
3440d265 1735{
8776f02b 1736 struct i915_address_space *vm = &ppgtt->base;
49d73912 1737 struct drm_i915_private *dev_priv = ppgtt->base.i915;
72e96d64 1738 struct i915_ggtt *ggtt = &dev_priv->ggtt;
b146520f 1739 int ret;
1d2a314c 1740
c8d4c0d6
BW
1741 /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
1742 * allocator works in address space sizes, so it's multiplied by page
1743 * size. We allocate at the top of the GTT to avoid fragmentation.
1744 */
72e96d64 1745 BUG_ON(!drm_mm_initialized(&ggtt->base.mm));
4933d519 1746
8776f02b
MK
1747 ret = gen6_init_scratch(vm);
1748 if (ret)
1749 return ret;
4933d519 1750
e007b19d
CW
1751 ret = i915_gem_gtt_insert(&ggtt->base, &ppgtt->node,
1752 GEN6_PD_SIZE, GEN6_PD_ALIGN,
1753 I915_COLOR_UNEVICTABLE,
1754 0, ggtt->base.total,
1755 PIN_HIGH);
c8c26622 1756 if (ret)
678d96fb
BW
1757 goto err_out;
1758
72e96d64 1759 if (ppgtt->node.start < ggtt->mappable_end)
c8d4c0d6 1760 DRM_DEBUG("Forced to use aperture for PDEs\n");
1d2a314c 1761
52c126ee
CW
1762 ppgtt->pd.base.ggtt_offset =
1763 ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
1764
1765 ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm +
1766 ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
1767
c8c26622 1768 return 0;
678d96fb
BW
1769
1770err_out:
8776f02b 1771 gen6_free_scratch(vm);
678d96fb 1772 return ret;
b146520f
BW
1773}
1774
b146520f
BW
1775static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
1776{
2f2cf682 1777 return gen6_ppgtt_allocate_page_directories(ppgtt);
4933d519 1778}
06dc68d6 1779
4933d519 1780static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
75c7b0b8 1781 u64 start, u64 length)
4933d519 1782{
ec565b3c 1783 struct i915_page_table *unused;
75c7b0b8 1784 u32 pde;
1d2a314c 1785
731f74c5 1786 gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde)
79ab9370 1787 ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
b146520f
BW
1788}
1789
5c5f6457 1790static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
b146520f 1791{
49d73912 1792 struct drm_i915_private *dev_priv = ppgtt->base.i915;
72e96d64 1793 struct i915_ggtt *ggtt = &dev_priv->ggtt;
b146520f
BW
1794 int ret;
1795
72e96d64 1796 ppgtt->base.pte_encode = ggtt->base.pte_encode;
5db94019 1797 if (intel_vgpu_active(dev_priv) || IS_GEN6(dev_priv))
b146520f 1798 ppgtt->switch_mm = gen6_mm_switch;
772c2a51 1799 else if (IS_HASWELL(dev_priv))
b146520f 1800 ppgtt->switch_mm = hsw_mm_switch;
5db94019 1801 else if (IS_GEN7(dev_priv))
b146520f 1802 ppgtt->switch_mm = gen7_mm_switch;
8eb95204 1803 else
b146520f
BW
1804 BUG();
1805
1806 ret = gen6_ppgtt_alloc(ppgtt);
1807 if (ret)
1808 return ret;
1809
09942c65 1810 ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
1d2a314c 1811
5c5f6457 1812 gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
16a011c8 1813 gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
678d96fb 1814
52c126ee
CW
1815 ret = gen6_alloc_va_range(&ppgtt->base, 0, ppgtt->base.total);
1816 if (ret) {
1817 gen6_ppgtt_cleanup(&ppgtt->base);
1818 return ret;
1819 }
1820
054b9acd
MK
1821 ppgtt->base.clear_range = gen6_ppgtt_clear_range;
1822 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
1823 ppgtt->base.unbind_vma = ppgtt_unbind_vma;
1824 ppgtt->base.bind_vma = ppgtt_bind_vma;
1825 ppgtt->base.cleanup = gen6_ppgtt_cleanup;
1826 ppgtt->debug_dump = gen6_dump_ppgtt;
1827
440fd528 1828 DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
b146520f
BW
1829 ppgtt->node.size >> 20,
1830 ppgtt->node.start / PAGE_SIZE);
3440d265 1831
52c126ee
CW
1832 DRM_DEBUG_DRIVER("Adding PPGTT at offset %x\n",
1833 ppgtt->pd.base.ggtt_offset << 10);
fa76da34 1834
b146520f 1835 return 0;
3440d265
DV
1836}
1837
2bfa996e
CW
1838static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
1839 struct drm_i915_private *dev_priv)
3440d265 1840{
49d73912 1841 ppgtt->base.i915 = dev_priv;
8448661d 1842 ppgtt->base.dma = &dev_priv->drm.pdev->dev;
3440d265 1843
2bfa996e 1844 if (INTEL_INFO(dev_priv)->gen < 8)
5c5f6457 1845 return gen6_ppgtt_init(ppgtt);
3ed124b2 1846 else
d7b2633d 1847 return gen8_ppgtt_init(ppgtt);
fa76da34 1848}
c114f76a 1849
a2cad9df 1850static void i915_address_space_init(struct i915_address_space *vm,
80b204bc
CW
1851 struct drm_i915_private *dev_priv,
1852 const char *name)
a2cad9df 1853{
80b204bc 1854 i915_gem_timeline_init(dev_priv, &vm->timeline, name);
47db922f 1855
381b943b 1856 drm_mm_init(&vm->mm, 0, vm->total);
47db922f
CW
1857 vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
1858
a2cad9df
MW
1859 INIT_LIST_HEAD(&vm->active_list);
1860 INIT_LIST_HEAD(&vm->inactive_list);
50e046b6 1861 INIT_LIST_HEAD(&vm->unbound_list);
47db922f 1862
a2cad9df 1863 list_add_tail(&vm->global_link, &dev_priv->vm_list);
8448661d 1864 pagevec_init(&vm->free_pages, false);
a2cad9df
MW
1865}
1866
ed9724dd
MA
1867static void i915_address_space_fini(struct i915_address_space *vm)
1868{
8448661d
CW
1869 if (pagevec_count(&vm->free_pages))
1870 vm_free_pages_release(vm);
1871
ed9724dd
MA
1872 i915_gem_timeline_fini(&vm->timeline);
1873 drm_mm_takedown(&vm->mm);
1874 list_del(&vm->global_link);
1875}
1876
c6be607a 1877static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
d5165ebd 1878{
d5165ebd
TG
1879 /* This function is for gtt related workarounds. This function is
1880 * called on driver load and after a GPU reset, so you can place
1881 * workarounds here even if they get overwritten by GPU reset.
1882 */
46c26662 1883 /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk,cfl */
8652744b 1884 if (IS_BROADWELL(dev_priv))
d5165ebd 1885 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
920a14b2 1886 else if (IS_CHERRYVIEW(dev_priv))
d5165ebd 1887 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
b976dc53 1888 else if (IS_GEN9_BC(dev_priv))
d5165ebd 1889 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
9fb5026f 1890 else if (IS_GEN9_LP(dev_priv))
d5165ebd
TG
1891 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
1892}
1893
c6be607a 1894int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
82460d97 1895{
c6be607a 1896 gtt_write_workarounds(dev_priv);
d5165ebd 1897
671b5013
TD
1898 /* In the case of execlists, PPGTT is enabled by the context descriptor
1899 * and the PDPs are contained within the context itself. We don't
1900 * need to do anything here. */
1901 if (i915.enable_execlists)
1902 return 0;
1903
c6be607a 1904 if (!USES_PPGTT(dev_priv))
82460d97
DV
1905 return 0;
1906
5db94019 1907 if (IS_GEN6(dev_priv))
c6be607a 1908 gen6_ppgtt_enable(dev_priv);
5db94019 1909 else if (IS_GEN7(dev_priv))
c6be607a
TU
1910 gen7_ppgtt_enable(dev_priv);
1911 else if (INTEL_GEN(dev_priv) >= 8)
1912 gen8_ppgtt_enable(dev_priv);
82460d97 1913 else
c6be607a 1914 MISSING_CASE(INTEL_GEN(dev_priv));
82460d97 1915
4ad2fd88
JH
1916 return 0;
1917}
1d2a314c 1918
4d884705 1919struct i915_hw_ppgtt *
2bfa996e 1920i915_ppgtt_create(struct drm_i915_private *dev_priv,
80b204bc
CW
1921 struct drm_i915_file_private *fpriv,
1922 const char *name)
4d884705
DV
1923{
1924 struct i915_hw_ppgtt *ppgtt;
1925 int ret;
1926
1927 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
1928 if (!ppgtt)
1929 return ERR_PTR(-ENOMEM);
1930
1188bc66 1931 ret = __hw_ppgtt_init(ppgtt, dev_priv);
4d884705
DV
1932 if (ret) {
1933 kfree(ppgtt);
1934 return ERR_PTR(ret);
1935 }
1936
1188bc66
CW
1937 kref_init(&ppgtt->ref);
1938 i915_address_space_init(&ppgtt->base, dev_priv, name);
1939 ppgtt->base.file = fpriv;
1940
198c974d
DCS
1941 trace_i915_ppgtt_create(&ppgtt->base);
1942
4d884705
DV
1943 return ppgtt;
1944}
1945
0c7eeda1
CW
1946void i915_ppgtt_close(struct i915_address_space *vm)
1947{
1948 struct list_head *phases[] = {
1949 &vm->active_list,
1950 &vm->inactive_list,
1951 &vm->unbound_list,
1952 NULL,
1953 }, **phase;
1954
1955 GEM_BUG_ON(vm->closed);
1956 vm->closed = true;
1957
1958 for (phase = phases; *phase; phase++) {
1959 struct i915_vma *vma, *vn;
1960
1961 list_for_each_entry_safe(vma, vn, *phase, vm_link)
1962 if (!i915_vma_is_closed(vma))
1963 i915_vma_close(vma);
1964 }
1965}
1966
ed9724dd 1967void i915_ppgtt_release(struct kref *kref)
ee960be7
DV
1968{
1969 struct i915_hw_ppgtt *ppgtt =
1970 container_of(kref, struct i915_hw_ppgtt, ref);
1971
198c974d
DCS
1972 trace_i915_ppgtt_release(&ppgtt->base);
1973
50e046b6 1974 /* vmas should already be unbound and destroyed */
ee960be7
DV
1975 WARN_ON(!list_empty(&ppgtt->base.active_list));
1976 WARN_ON(!list_empty(&ppgtt->base.inactive_list));
50e046b6 1977 WARN_ON(!list_empty(&ppgtt->base.unbound_list));
ee960be7
DV
1978
1979 ppgtt->base.cleanup(&ppgtt->base);
8448661d 1980 i915_address_space_fini(&ppgtt->base);
ee960be7
DV
1981 kfree(ppgtt);
1982}
1d2a314c 1983
a81cc00c
BW
1984/* Certain Gen5 chipsets require require idling the GPU before
1985 * unmapping anything from the GTT when VT-d is enabled.
1986 */
97d6d7ab 1987static bool needs_idle_maps(struct drm_i915_private *dev_priv)
a81cc00c 1988{
a81cc00c
BW
1989 /* Query intel_iommu to see if we need the workaround. Presumably that
1990 * was loaded first.
1991 */
80debff8 1992 return IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_vtd_active();
a81cc00c
BW
1993}
1994
dc97997a 1995void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
828c7908 1996{
e2f80391 1997 struct intel_engine_cs *engine;
3b3f1650 1998 enum intel_engine_id id;
828c7908 1999
dc97997a 2000 if (INTEL_INFO(dev_priv)->gen < 6)
828c7908
BW
2001 return;
2002
3b3f1650 2003 for_each_engine(engine, dev_priv, id) {
828c7908 2004 u32 fault_reg;
e2f80391 2005 fault_reg = I915_READ(RING_FAULT_REG(engine));
828c7908
BW
2006 if (fault_reg & RING_FAULT_VALID) {
2007 DRM_DEBUG_DRIVER("Unexpected fault\n"
59a5d290 2008 "\tAddr: 0x%08lx\n"
828c7908
BW
2009 "\tAddress space: %s\n"
2010 "\tSource ID: %d\n"
2011 "\tType: %d\n",
2012 fault_reg & PAGE_MASK,
2013 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
2014 RING_FAULT_SRCID(fault_reg),
2015 RING_FAULT_FAULT_TYPE(fault_reg));
e2f80391 2016 I915_WRITE(RING_FAULT_REG(engine),
828c7908
BW
2017 fault_reg & ~RING_FAULT_VALID);
2018 }
2019 }
3b3f1650
AG
2020
2021 /* Engine specific init may not have been done till this point. */
2022 if (dev_priv->engine[RCS])
2023 POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
828c7908
BW
2024}
2025
275a991c 2026void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
828c7908 2027{
72e96d64 2028 struct i915_ggtt *ggtt = &dev_priv->ggtt;
828c7908
BW
2029
2030 /* Don't bother messing with faults pre GEN6 as we have little
2031 * documentation supporting that it's a good idea.
2032 */
275a991c 2033 if (INTEL_GEN(dev_priv) < 6)
828c7908
BW
2034 return;
2035
dc97997a 2036 i915_check_and_clear_faults(dev_priv);
828c7908 2037
381b943b 2038 ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
91e56499 2039
7c3f86b6 2040 i915_ggtt_invalidate(dev_priv);
828c7908
BW
2041}
2042
03ac84f1
CW
2043int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
2044 struct sg_table *pages)
7c2e6fdf 2045{
1a292fa5
CW
2046 do {
2047 if (dma_map_sg(&obj->base.dev->pdev->dev,
2048 pages->sgl, pages->nents,
2049 PCI_DMA_BIDIRECTIONAL))
2050 return 0;
2051
2052 /* If the DMA remap fails, one cause can be that we have
2053 * too many objects pinned in a small remapping table,
2054 * such as swiotlb. Incrementally purge all other objects and
2055 * try again - if there are no more pages to remove from
2056 * the DMA remapper, i915_gem_shrink will return 0.
2057 */
2058 GEM_BUG_ON(obj->mm.pages == pages);
2059 } while (i915_gem_shrink(to_i915(obj->base.dev),
2060 obj->base.size >> PAGE_SHIFT,
2061 I915_SHRINK_BOUND |
2062 I915_SHRINK_UNBOUND |
2063 I915_SHRINK_ACTIVE));
9da3da66 2064
03ac84f1 2065 return -ENOSPC;
7c2e6fdf
DV
2066}
2067
2c642b07 2068static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
94ec8f61 2069{
94ec8f61 2070 writeq(pte, addr);
94ec8f61
BW
2071}
2072
d6473f56
CW
2073static void gen8_ggtt_insert_page(struct i915_address_space *vm,
2074 dma_addr_t addr,
75c7b0b8 2075 u64 offset,
d6473f56
CW
2076 enum i915_cache_level level,
2077 u32 unused)
2078{
7c3f86b6 2079 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
d6473f56 2080 gen8_pte_t __iomem *pte =
7c3f86b6 2081 (gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
d6473f56 2082
4fb84d99 2083 gen8_set_pte(pte, gen8_pte_encode(addr, level));
d6473f56 2084
7c3f86b6 2085 ggtt->invalidate(vm->i915);
d6473f56
CW
2086}
2087
94ec8f61 2088static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
4a234c5f 2089 struct i915_vma *vma,
75c7b0b8
CW
2090 enum i915_cache_level level,
2091 u32 unused)
94ec8f61 2092{
ce7fda2e 2093 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
85d1225e
DG
2094 struct sgt_iter sgt_iter;
2095 gen8_pte_t __iomem *gtt_entries;
894ccebe 2096 const gen8_pte_t pte_encode = gen8_pte_encode(0, level);
85d1225e 2097 dma_addr_t addr;
be69459a 2098
894ccebe 2099 gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
4a234c5f
MA
2100 gtt_entries += vma->node.start >> PAGE_SHIFT;
2101 for_each_sgt_dma(addr, sgt_iter, vma->pages)
894ccebe 2102 gen8_set_pte(gtt_entries++, pte_encode | addr);
85d1225e 2103
894ccebe 2104 wmb();
94ec8f61 2105
94ec8f61
BW
2106 /* This next bit makes the above posting read even more important. We
2107 * want to flush the TLBs only after we're certain all the PTE updates
2108 * have finished.
2109 */
7c3f86b6 2110 ggtt->invalidate(vm->i915);
94ec8f61
BW
2111}
2112
d6473f56
CW
2113static void gen6_ggtt_insert_page(struct i915_address_space *vm,
2114 dma_addr_t addr,
75c7b0b8 2115 u64 offset,
d6473f56
CW
2116 enum i915_cache_level level,
2117 u32 flags)
2118{
7c3f86b6 2119 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
d6473f56 2120 gen6_pte_t __iomem *pte =
7c3f86b6 2121 (gen6_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
d6473f56 2122
4fb84d99 2123 iowrite32(vm->pte_encode(addr, level, flags), pte);
d6473f56 2124
7c3f86b6 2125 ggtt->invalidate(vm->i915);
d6473f56
CW
2126}
2127
e76e9aeb
BW
2128/*
2129 * Binds an object into the global gtt with the specified cache level. The object
2130 * will be accessible to the GPU via commands whose operands reference offsets
2131 * within the global GTT as well as accessible by the GPU through the GMADR
2132 * mapped BAR (dev_priv->mm.gtt->gtt).
2133 */
853ba5d2 2134static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
4a234c5f 2135 struct i915_vma *vma,
75c7b0b8
CW
2136 enum i915_cache_level level,
2137 u32 flags)
e76e9aeb 2138{
ce7fda2e 2139 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
b31144c0 2140 gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
4a234c5f 2141 unsigned int i = vma->node.start >> PAGE_SHIFT;
b31144c0 2142 struct sgt_iter iter;
85d1225e 2143 dma_addr_t addr;
4a234c5f 2144 for_each_sgt_dma(addr, iter, vma->pages)
b31144c0
CW
2145 iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
2146 wmb();
0f9b91c7
BW
2147
2148 /* This next bit makes the above posting read even more important. We
2149 * want to flush the TLBs only after we're certain all the PTE updates
2150 * have finished.
2151 */
7c3f86b6 2152 ggtt->invalidate(vm->i915);
e76e9aeb
BW
2153}
2154
f7770bfd 2155static void nop_clear_range(struct i915_address_space *vm,
75c7b0b8 2156 u64 start, u64 length)
f7770bfd
CW
2157{
2158}
2159
94ec8f61 2160static void gen8_ggtt_clear_range(struct i915_address_space *vm,
75c7b0b8 2161 u64 start, u64 length)
94ec8f61 2162{
ce7fda2e 2163 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
782f1495
BW
2164 unsigned first_entry = start >> PAGE_SHIFT;
2165 unsigned num_entries = length >> PAGE_SHIFT;
894ccebe
CW
2166 const gen8_pte_t scratch_pte =
2167 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
2168 gen8_pte_t __iomem *gtt_base =
72e96d64
JL
2169 (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
2170 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
94ec8f61
BW
2171 int i;
2172
2173 if (WARN(num_entries > max_entries,
2174 "First entry = %d; Num entries = %d (max=%d)\n",
2175 first_entry, num_entries, max_entries))
2176 num_entries = max_entries;
2177
94ec8f61
BW
2178 for (i = 0; i < num_entries; i++)
2179 gen8_set_pte(&gtt_base[i], scratch_pte);
94ec8f61
BW
2180}
2181
0ef34ad6
JB
2182static void bxt_vtd_ggtt_wa(struct i915_address_space *vm)
2183{
2184 struct drm_i915_private *dev_priv = vm->i915;
2185
2186 /*
2187 * Make sure the internal GAM fifo has been cleared of all GTT
2188 * writes before exiting stop_machine(). This guarantees that
2189 * any aperture accesses waiting to start in another process
2190 * cannot back up behind the GTT writes causing a hang.
2191 * The register can be any arbitrary GAM register.
2192 */
2193 POSTING_READ(GFX_FLSH_CNTL_GEN6);
2194}
2195
2196struct insert_page {
2197 struct i915_address_space *vm;
2198 dma_addr_t addr;
2199 u64 offset;
2200 enum i915_cache_level level;
2201};
2202
2203static int bxt_vtd_ggtt_insert_page__cb(void *_arg)
2204{
2205 struct insert_page *arg = _arg;
2206
2207 gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0);
2208 bxt_vtd_ggtt_wa(arg->vm);
2209
2210 return 0;
2211}
2212
2213static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm,
2214 dma_addr_t addr,
2215 u64 offset,
2216 enum i915_cache_level level,
2217 u32 unused)
2218{
2219 struct insert_page arg = { vm, addr, offset, level };
2220
2221 stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL);
2222}
2223
2224struct insert_entries {
2225 struct i915_address_space *vm;
4a234c5f 2226 struct i915_vma *vma;
0ef34ad6
JB
2227 enum i915_cache_level level;
2228};
2229
2230static int bxt_vtd_ggtt_insert_entries__cb(void *_arg)
2231{
2232 struct insert_entries *arg = _arg;
2233
4a234c5f 2234 gen8_ggtt_insert_entries(arg->vm, arg->vma, arg->level, 0);
0ef34ad6
JB
2235 bxt_vtd_ggtt_wa(arg->vm);
2236
2237 return 0;
2238}
2239
2240static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
4a234c5f 2241 struct i915_vma *vma,
0ef34ad6
JB
2242 enum i915_cache_level level,
2243 u32 unused)
2244{
17369ba0 2245 struct insert_entries arg = { vm, vma, level };
0ef34ad6
JB
2246
2247 stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
2248}
2249
2250struct clear_range {
2251 struct i915_address_space *vm;
2252 u64 start;
2253 u64 length;
2254};
2255
2256static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
2257{
2258 struct clear_range *arg = _arg;
2259
2260 gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
2261 bxt_vtd_ggtt_wa(arg->vm);
2262
2263 return 0;
2264}
2265
2266static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
2267 u64 start,
2268 u64 length)
2269{
2270 struct clear_range arg = { vm, start, length };
2271
2272 stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
2273}
2274
853ba5d2 2275static void gen6_ggtt_clear_range(struct i915_address_space *vm,
75c7b0b8 2276 u64 start, u64 length)
7faf1ab2 2277{
ce7fda2e 2278 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
782f1495
BW
2279 unsigned first_entry = start >> PAGE_SHIFT;
2280 unsigned num_entries = length >> PAGE_SHIFT;
07749ef3 2281 gen6_pte_t scratch_pte, __iomem *gtt_base =
72e96d64
JL
2282 (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
2283 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
7faf1ab2
DV
2284 int i;
2285
2286 if (WARN(num_entries > max_entries,
2287 "First entry = %d; Num entries = %d (max=%d)\n",
2288 first_entry, num_entries, max_entries))
2289 num_entries = max_entries;
2290
8bcdd0f7 2291 scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
4fb84d99 2292 I915_CACHE_LLC, 0);
828c7908 2293
7faf1ab2
DV
2294 for (i = 0; i < num_entries; i++)
2295 iowrite32(scratch_pte, &gtt_base[i]);
7faf1ab2
DV
2296}
2297
d6473f56
CW
2298static void i915_ggtt_insert_page(struct i915_address_space *vm,
2299 dma_addr_t addr,
75c7b0b8 2300 u64 offset,
d6473f56
CW
2301 enum i915_cache_level cache_level,
2302 u32 unused)
2303{
d6473f56
CW
2304 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2305 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
d6473f56
CW
2306
2307 intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
d6473f56
CW
2308}
2309
d369d2d9 2310static void i915_ggtt_insert_entries(struct i915_address_space *vm,
4a234c5f 2311 struct i915_vma *vma,
75c7b0b8
CW
2312 enum i915_cache_level cache_level,
2313 u32 unused)
7faf1ab2
DV
2314{
2315 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2316 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2317
4a234c5f
MA
2318 intel_gtt_insert_sg_entries(vma->pages, vma->node.start >> PAGE_SHIFT,
2319 flags);
7faf1ab2
DV
2320}
2321
853ba5d2 2322static void i915_ggtt_clear_range(struct i915_address_space *vm,
75c7b0b8 2323 u64 start, u64 length)
7faf1ab2 2324{
2eedfc7d 2325 intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
7faf1ab2
DV
2326}
2327
70b9f6f8
DV
2328static int ggtt_bind_vma(struct i915_vma *vma,
2329 enum i915_cache_level cache_level,
2330 u32 flags)
0a878716 2331{
49d73912 2332 struct drm_i915_private *i915 = vma->vm->i915;
0a878716 2333 struct drm_i915_gem_object *obj = vma->obj;
ba7a5741 2334 u32 pte_flags;
0a878716 2335
ba7a5741
CW
2336 if (unlikely(!vma->pages)) {
2337 int ret = i915_get_ggtt_vma_pages(vma);
2338 if (ret)
2339 return ret;
2340 }
0a878716
DV
2341
2342 /* Currently applicable only to VLV */
ba7a5741 2343 pte_flags = 0;
0a878716
DV
2344 if (obj->gt_ro)
2345 pte_flags |= PTE_READ_ONLY;
2346
9c870d03 2347 intel_runtime_pm_get(i915);
4a234c5f 2348 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
9c870d03 2349 intel_runtime_pm_put(i915);
0a878716
DV
2350
2351 /*
2352 * Without aliasing PPGTT there's no difference between
2353 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
2354 * upgrade to both bound if we bind either to avoid double-binding.
2355 */
3272db53 2356 vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
0a878716
DV
2357
2358 return 0;
2359}
2360
cbc4e9e6
CW
2361static void ggtt_unbind_vma(struct i915_vma *vma)
2362{
2363 struct drm_i915_private *i915 = vma->vm->i915;
2364
2365 intel_runtime_pm_get(i915);
2366 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
2367 intel_runtime_pm_put(i915);
2368}
2369
0a878716
DV
2370static int aliasing_gtt_bind_vma(struct i915_vma *vma,
2371 enum i915_cache_level cache_level,
2372 u32 flags)
d5bd1449 2373{
49d73912 2374 struct drm_i915_private *i915 = vma->vm->i915;
321d178e 2375 u32 pte_flags;
ff685975 2376 int ret;
70b9f6f8 2377
ba7a5741 2378 if (unlikely(!vma->pages)) {
ff685975 2379 ret = i915_get_ggtt_vma_pages(vma);
ba7a5741
CW
2380 if (ret)
2381 return ret;
2382 }
7faf1ab2 2383
24f3a8cf 2384 /* Currently applicable only to VLV */
321d178e
CW
2385 pte_flags = 0;
2386 if (vma->obj->gt_ro)
f329f5f6 2387 pte_flags |= PTE_READ_ONLY;
24f3a8cf 2388
ff685975
CW
2389 if (flags & I915_VMA_LOCAL_BIND) {
2390 struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
2391
1f23475c
MA
2392 if (!(vma->flags & I915_VMA_LOCAL_BIND) &&
2393 appgtt->base.allocate_va_range) {
ff685975
CW
2394 ret = appgtt->base.allocate_va_range(&appgtt->base,
2395 vma->node.start,
d567232c 2396 vma->size);
ff685975 2397 if (ret)
2f7399af 2398 goto err_pages;
ff685975
CW
2399 }
2400
4a234c5f
MA
2401 appgtt->base.insert_entries(&appgtt->base, vma, cache_level,
2402 pte_flags);
ff685975
CW
2403 }
2404
3272db53 2405 if (flags & I915_VMA_GLOBAL_BIND) {
9c870d03 2406 intel_runtime_pm_get(i915);
4a234c5f 2407 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags);
9c870d03 2408 intel_runtime_pm_put(i915);
6f65e29a 2409 }
d5bd1449 2410
70b9f6f8 2411 return 0;
2f7399af
CW
2412
2413err_pages:
2414 if (!(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND))) {
2415 if (vma->pages != vma->obj->mm.pages) {
2416 GEM_BUG_ON(!vma->pages);
2417 sg_free_table(vma->pages);
2418 kfree(vma->pages);
2419 }
2420 vma->pages = NULL;
2421 }
2422 return ret;
d5bd1449
CW
2423}
2424
cbc4e9e6 2425static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
74163907 2426{
49d73912 2427 struct drm_i915_private *i915 = vma->vm->i915;
6f65e29a 2428
9c870d03
CW
2429 if (vma->flags & I915_VMA_GLOBAL_BIND) {
2430 intel_runtime_pm_get(i915);
cbc4e9e6 2431 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
9c870d03
CW
2432 intel_runtime_pm_put(i915);
2433 }
06615ee5 2434
cbc4e9e6
CW
2435 if (vma->flags & I915_VMA_LOCAL_BIND) {
2436 struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->base;
2437
2438 vm->clear_range(vm, vma->node.start, vma->size);
2439 }
74163907
DV
2440}
2441
03ac84f1
CW
2442void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
2443 struct sg_table *pages)
7c2e6fdf 2444{
52a05c30
DW
2445 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2446 struct device *kdev = &dev_priv->drm.pdev->dev;
307dc25b 2447 struct i915_ggtt *ggtt = &dev_priv->ggtt;
5c042287 2448
307dc25b 2449 if (unlikely(ggtt->do_idle_maps)) {
228ec87c 2450 if (i915_gem_wait_for_idle(dev_priv, 0)) {
307dc25b
CW
2451 DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
2452 /* Wait a bit, in hopes it avoids the hang */
2453 udelay(10);
2454 }
2455 }
5c042287 2456
03ac84f1 2457 dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
7c2e6fdf 2458}
644ec02b 2459
45b186f1 2460static void i915_gtt_color_adjust(const struct drm_mm_node *node,
42d6ab48 2461 unsigned long color,
440fd528
TR
2462 u64 *start,
2463 u64 *end)
42d6ab48 2464{
a6508ded 2465 if (node->allocated && node->color != color)
f51455d4 2466 *start += I915_GTT_PAGE_SIZE;
42d6ab48 2467
a6508ded
CW
2468 /* Also leave a space between the unallocated reserved node after the
2469 * GTT and any objects within the GTT, i.e. we use the color adjustment
2470 * to insert a guard page to prevent prefetches crossing over the
2471 * GTT boundary.
2472 */
b44f97fd 2473 node = list_next_entry(node, node_list);
a6508ded 2474 if (node->color != color)
f51455d4 2475 *end -= I915_GTT_PAGE_SIZE;
42d6ab48 2476}
fbe5d36e 2477
6cde9a02
CW
2478int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
2479{
2480 struct i915_ggtt *ggtt = &i915->ggtt;
2481 struct i915_hw_ppgtt *ppgtt;
2482 int err;
2483
57202f47 2484 ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM), "[alias]");
1188bc66
CW
2485 if (IS_ERR(ppgtt))
2486 return PTR_ERR(ppgtt);
6cde9a02 2487
e565ceb0
CW
2488 if (WARN_ON(ppgtt->base.total < ggtt->base.total)) {
2489 err = -ENODEV;
2490 goto err_ppgtt;
2491 }
2492
6cde9a02 2493 if (ppgtt->base.allocate_va_range) {
e565ceb0
CW
2494 /* Note we only pre-allocate as far as the end of the global
2495 * GTT. On 48b / 4-level page-tables, the difference is very,
2496 * very significant! We have to preallocate as GVT/vgpu does
2497 * not like the page directory disappearing.
2498 */
6cde9a02 2499 err = ppgtt->base.allocate_va_range(&ppgtt->base,
e565ceb0 2500 0, ggtt->base.total);
6cde9a02 2501 if (err)
1188bc66 2502 goto err_ppgtt;
6cde9a02
CW
2503 }
2504
6cde9a02 2505 i915->mm.aliasing_ppgtt = ppgtt;
cbc4e9e6 2506
6cde9a02
CW
2507 WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
2508 ggtt->base.bind_vma = aliasing_gtt_bind_vma;
2509
cbc4e9e6
CW
2510 WARN_ON(ggtt->base.unbind_vma != ggtt_unbind_vma);
2511 ggtt->base.unbind_vma = aliasing_gtt_unbind_vma;
2512
6cde9a02
CW
2513 return 0;
2514
6cde9a02 2515err_ppgtt:
1188bc66 2516 i915_ppgtt_put(ppgtt);
6cde9a02
CW
2517 return err;
2518}
2519
2520void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915)
2521{
2522 struct i915_ggtt *ggtt = &i915->ggtt;
2523 struct i915_hw_ppgtt *ppgtt;
2524
2525 ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt);
2526 if (!ppgtt)
2527 return;
2528
1188bc66 2529 i915_ppgtt_put(ppgtt);
6cde9a02
CW
2530
2531 ggtt->base.bind_vma = ggtt_bind_vma;
cbc4e9e6 2532 ggtt->base.unbind_vma = ggtt_unbind_vma;
6cde9a02
CW
2533}
2534
f6b9d5ca 2535int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
644ec02b 2536{
e78891ca
BW
2537 /* Let GEM Manage all of the aperture.
2538 *
2539 * However, leave one page at the end still bound to the scratch page.
2540 * There are a number of places where the hardware apparently prefetches
2541 * past the end of the object, and we've seen multiple hangs with the
2542 * GPU head pointer stuck in a batchbuffer bound at the last page of the
2543 * aperture. One page should be enough to keep any prefetching inside
2544 * of the aperture.
2545 */
72e96d64 2546 struct i915_ggtt *ggtt = &dev_priv->ggtt;
ed2f3452 2547 unsigned long hole_start, hole_end;
f6b9d5ca 2548 struct drm_mm_node *entry;
fa76da34 2549 int ret;
644ec02b 2550
b02d22a3
ZW
2551 ret = intel_vgt_balloon(dev_priv);
2552 if (ret)
2553 return ret;
5dda8fa3 2554
95374d75 2555 /* Reserve a mappable slot for our lockless error capture */
4e64e553
CW
2556 ret = drm_mm_insert_node_in_range(&ggtt->base.mm, &ggtt->error_capture,
2557 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
2558 0, ggtt->mappable_end,
2559 DRM_MM_INSERT_LOW);
95374d75
CW
2560 if (ret)
2561 return ret;
2562
ed2f3452 2563 /* Clear any non-preallocated blocks */
72e96d64 2564 drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
ed2f3452
CW
2565 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
2566 hole_start, hole_end);
72e96d64 2567 ggtt->base.clear_range(&ggtt->base, hole_start,
4fb84d99 2568 hole_end - hole_start);
ed2f3452
CW
2569 }
2570
2571 /* And finally clear the reserved guard page */
f6b9d5ca 2572 ggtt->base.clear_range(&ggtt->base,
4fb84d99 2573 ggtt->base.total - PAGE_SIZE, PAGE_SIZE);
6c5566a8 2574
97d6d7ab 2575 if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
6cde9a02 2576 ret = i915_gem_init_aliasing_ppgtt(dev_priv);
95374d75 2577 if (ret)
6cde9a02 2578 goto err;
fa76da34
DV
2579 }
2580
6c5566a8 2581 return 0;
95374d75 2582
95374d75
CW
2583err:
2584 drm_mm_remove_node(&ggtt->error_capture);
2585 return ret;
e76e9aeb
BW
2586}
2587
d85489d3
JL
2588/**
2589 * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
97d6d7ab 2590 * @dev_priv: i915 device
d85489d3 2591 */
97d6d7ab 2592void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
90d0a0e8 2593{
72e96d64 2594 struct i915_ggtt *ggtt = &dev_priv->ggtt;
94d4a2a9
CW
2595 struct i915_vma *vma, *vn;
2596
2597 ggtt->base.closed = true;
2598
2599 mutex_lock(&dev_priv->drm.struct_mutex);
2600 WARN_ON(!list_empty(&ggtt->base.active_list));
2601 list_for_each_entry_safe(vma, vn, &ggtt->base.inactive_list, vm_link)
2602 WARN_ON(i915_vma_unbind(vma));
2603 mutex_unlock(&dev_priv->drm.struct_mutex);
90d0a0e8 2604
97d6d7ab 2605 i915_gem_cleanup_stolen(&dev_priv->drm);
a4eba47b 2606
1188bc66
CW
2607 mutex_lock(&dev_priv->drm.struct_mutex);
2608 i915_gem_fini_aliasing_ppgtt(dev_priv);
2609
95374d75
CW
2610 if (drm_mm_node_allocated(&ggtt->error_capture))
2611 drm_mm_remove_node(&ggtt->error_capture);
2612
72e96d64 2613 if (drm_mm_initialized(&ggtt->base.mm)) {
b02d22a3 2614 intel_vgt_deballoon(dev_priv);
ed9724dd 2615 i915_address_space_fini(&ggtt->base);
90d0a0e8
DV
2616 }
2617
72e96d64 2618 ggtt->base.cleanup(&ggtt->base);
1188bc66 2619 mutex_unlock(&dev_priv->drm.struct_mutex);
f6b9d5ca
CW
2620
2621 arch_phys_wc_del(ggtt->mtrr);
f7bbe788 2622 io_mapping_fini(&ggtt->mappable);
90d0a0e8 2623}
70e32544 2624
2c642b07 2625static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
e76e9aeb
BW
2626{
2627 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
2628 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
2629 return snb_gmch_ctl << 20;
2630}
2631
2c642b07 2632static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
9459d252
BW
2633{
2634 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
2635 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
2636 if (bdw_gmch_ctl)
2637 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
562d55d9
BW
2638
2639#ifdef CONFIG_X86_32
2640 /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
2641 if (bdw_gmch_ctl > 4)
2642 bdw_gmch_ctl = 4;
2643#endif
2644
9459d252
BW
2645 return bdw_gmch_ctl << 20;
2646}
2647
2c642b07 2648static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
d7f25f23
DL
2649{
2650 gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
2651 gmch_ctrl &= SNB_GMCH_GGMS_MASK;
2652
2653 if (gmch_ctrl)
2654 return 1 << (20 + gmch_ctrl);
2655
2656 return 0;
2657}
2658
2c642b07 2659static size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
e76e9aeb
BW
2660{
2661 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
2662 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
a92d1a91 2663 return (size_t)snb_gmch_ctl << 25; /* 32 MB units */
e76e9aeb
BW
2664}
2665
2c642b07 2666static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
9459d252
BW
2667{
2668 bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2669 bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
a92d1a91 2670 return (size_t)bdw_gmch_ctl << 25; /* 32 MB units */
9459d252
BW
2671}
2672
d7f25f23
DL
2673static size_t chv_get_stolen_size(u16 gmch_ctrl)
2674{
2675 gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
2676 gmch_ctrl &= SNB_GMCH_GMS_MASK;
2677
2678 /*
2679 * 0x0 to 0x10: 32MB increments starting at 0MB
2680 * 0x11 to 0x16: 4MB increments starting at 8MB
2681 * 0x17 to 0x1d: 4MB increments start at 36MB
2682 */
2683 if (gmch_ctrl < 0x11)
a92d1a91 2684 return (size_t)gmch_ctrl << 25;
d7f25f23 2685 else if (gmch_ctrl < 0x17)
a92d1a91 2686 return (size_t)(gmch_ctrl - 0x11 + 2) << 22;
d7f25f23 2687 else
a92d1a91 2688 return (size_t)(gmch_ctrl - 0x17 + 9) << 22;
d7f25f23
DL
2689}
2690
66375014
DL
2691static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
2692{
2693 gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2694 gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
2695
2696 if (gen9_gmch_ctl < 0xf0)
a92d1a91 2697 return (size_t)gen9_gmch_ctl << 25; /* 32 MB units */
66375014
DL
2698 else
2699 /* 4MB increments starting at 0xf0 for 4MB */
a92d1a91 2700 return (size_t)(gen9_gmch_ctl - 0xf0 + 1) << 22;
66375014
DL
2701}
2702
34c998b4 2703static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
63340133 2704{
49d73912
CW
2705 struct drm_i915_private *dev_priv = ggtt->base.i915;
2706 struct pci_dev *pdev = dev_priv->drm.pdev;
34c998b4 2707 phys_addr_t phys_addr;
8bcdd0f7 2708 int ret;
63340133
BW
2709
2710 /* For Modern GENs the PTEs and register space are split in the BAR */
34c998b4 2711 phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
63340133 2712
2a073f89
ID
2713 /*
2714 * On BXT writes larger than 64 bit to the GTT pagetable range will be
2715 * dropped. For WC mappings in general we have 64 byte burst writes
2716 * when the WC buffer is flushed, so we can't use it, but have to
2717 * resort to an uncached mapping. The WC issue is easily caught by the
2718 * readback check when writing GTT PTE entries.
2719 */
cc3f90f0 2720 if (IS_GEN9_LP(dev_priv))
34c998b4 2721 ggtt->gsm = ioremap_nocache(phys_addr, size);
2a073f89 2722 else
34c998b4 2723 ggtt->gsm = ioremap_wc(phys_addr, size);
72e96d64 2724 if (!ggtt->gsm) {
34c998b4 2725 DRM_ERROR("Failed to map the ggtt page table\n");
63340133
BW
2726 return -ENOMEM;
2727 }
2728
8448661d 2729 ret = setup_scratch_page(&ggtt->base, GFP_DMA32);
8bcdd0f7 2730 if (ret) {
63340133
BW
2731 DRM_ERROR("Scratch setup failed\n");
2732 /* iounmap will also get called at remove, but meh */
72e96d64 2733 iounmap(ggtt->gsm);
8bcdd0f7 2734 return ret;
63340133
BW
2735 }
2736
4ad2af1e 2737 return 0;
63340133
BW
2738}
2739
fbe5d36e
BW
2740/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
2741 * bits. When using advanced contexts each context stores its own PAT, but
2742 * writing this data shouldn't be harmful even in those cases. */
ee0ce478 2743static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
fbe5d36e 2744{
75c7b0b8 2745 u64 pat;
fbe5d36e
BW
2746
2747 pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
2748 GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
2749 GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
2750 GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
2751 GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
2752 GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
2753 GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
2754 GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
2755
2d1fe073 2756 if (!USES_PPGTT(dev_priv))
d6a8b72e
RV
2757 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
2758 * so RTL will always use the value corresponding to
2759 * pat_sel = 000".
2760 * So let's disable cache for GGTT to avoid screen corruptions.
2761 * MOCS still can be used though.
2762 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
2763 * before this patch, i.e. the same uncached + snooping access
2764 * like on gen6/7 seems to be in effect.
2765 * - So this just fixes blitter/render access. Again it looks
2766 * like it's not just uncached access, but uncached + snooping.
2767 * So we can still hold onto all our assumptions wrt cpu
2768 * clflushing on LLC machines.
2769 */
2770 pat = GEN8_PPAT(0, GEN8_PPAT_UC);
2771
fbe5d36e
BW
2772 /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
2773 * write would work. */
7e435ad2
VS
2774 I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
2775 I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
fbe5d36e
BW
2776}
2777
ee0ce478
VS
2778static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
2779{
75c7b0b8 2780 u64 pat;
ee0ce478
VS
2781
2782 /*
2783 * Map WB on BDW to snooped on CHV.
2784 *
2785 * Only the snoop bit has meaning for CHV, the rest is
2786 * ignored.
2787 *
cf3d262e
VS
2788 * The hardware will never snoop for certain types of accesses:
2789 * - CPU GTT (GMADR->GGTT->no snoop->memory)
2790 * - PPGTT page tables
2791 * - some other special cycles
2792 *
2793 * As with BDW, we also need to consider the following for GT accesses:
2794 * "For GGTT, there is NO pat_sel[2:0] from the entry,
2795 * so RTL will always use the value corresponding to
2796 * pat_sel = 000".
2797 * Which means we must set the snoop bit in PAT entry 0
2798 * in order to keep the global status page working.
ee0ce478
VS
2799 */
2800 pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
2801 GEN8_PPAT(1, 0) |
2802 GEN8_PPAT(2, 0) |
2803 GEN8_PPAT(3, 0) |
2804 GEN8_PPAT(4, CHV_PPAT_SNOOP) |
2805 GEN8_PPAT(5, CHV_PPAT_SNOOP) |
2806 GEN8_PPAT(6, CHV_PPAT_SNOOP) |
2807 GEN8_PPAT(7, CHV_PPAT_SNOOP);
2808
7e435ad2
VS
2809 I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
2810 I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
ee0ce478
VS
2811}
2812
34c998b4
CW
2813static void gen6_gmch_remove(struct i915_address_space *vm)
2814{
2815 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2816
2817 iounmap(ggtt->gsm);
8448661d 2818 cleanup_scratch_page(vm);
34c998b4
CW
2819}
2820
d507d735 2821static int gen8_gmch_probe(struct i915_ggtt *ggtt)
63340133 2822{
49d73912 2823 struct drm_i915_private *dev_priv = ggtt->base.i915;
97d6d7ab 2824 struct pci_dev *pdev = dev_priv->drm.pdev;
34c998b4 2825 unsigned int size;
63340133 2826 u16 snb_gmch_ctl;
4519290a 2827 int err;
63340133
BW
2828
2829 /* TODO: We're not aware of mappable constraints on gen8 yet */
97d6d7ab
CW
2830 ggtt->mappable_base = pci_resource_start(pdev, 2);
2831 ggtt->mappable_end = pci_resource_len(pdev, 2);
63340133 2832
4519290a
ID
2833 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(39));
2834 if (!err)
2835 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
2836 if (err)
2837 DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
63340133 2838
97d6d7ab 2839 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
63340133 2840
97d6d7ab 2841 if (INTEL_GEN(dev_priv) >= 9) {
d507d735 2842 ggtt->stolen_size = gen9_get_stolen_size(snb_gmch_ctl);
34c998b4 2843 size = gen8_get_total_gtt_size(snb_gmch_ctl);
97d6d7ab 2844 } else if (IS_CHERRYVIEW(dev_priv)) {
d507d735 2845 ggtt->stolen_size = chv_get_stolen_size(snb_gmch_ctl);
34c998b4 2846 size = chv_get_total_gtt_size(snb_gmch_ctl);
d7f25f23 2847 } else {
d507d735 2848 ggtt->stolen_size = gen8_get_stolen_size(snb_gmch_ctl);
34c998b4 2849 size = gen8_get_total_gtt_size(snb_gmch_ctl);
d7f25f23 2850 }
63340133 2851
34c998b4 2852 ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
63340133 2853
cc3f90f0 2854 if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
ee0ce478
VS
2855 chv_setup_private_ppat(dev_priv);
2856 else
2857 bdw_setup_private_ppat(dev_priv);
fbe5d36e 2858
34c998b4 2859 ggtt->base.cleanup = gen6_gmch_remove;
d507d735
JL
2860 ggtt->base.bind_vma = ggtt_bind_vma;
2861 ggtt->base.unbind_vma = ggtt_unbind_vma;
d6473f56 2862 ggtt->base.insert_page = gen8_ggtt_insert_page;
f7770bfd 2863 ggtt->base.clear_range = nop_clear_range;
48f112fe 2864 if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
f7770bfd
CW
2865 ggtt->base.clear_range = gen8_ggtt_clear_range;
2866
2867 ggtt->base.insert_entries = gen8_ggtt_insert_entries;
f7770bfd 2868
0ef34ad6
JB
2869 /* Serialize GTT updates with aperture access on BXT if VT-d is on. */
2870 if (intel_ggtt_update_needs_vtd_wa(dev_priv)) {
2871 ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
2872 ggtt->base.insert_page = bxt_vtd_ggtt_insert_page__BKL;
2873 if (ggtt->base.clear_range != nop_clear_range)
2874 ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL;
2875 }
2876
7c3f86b6
CW
2877 ggtt->invalidate = gen6_ggtt_invalidate;
2878
34c998b4 2879 return ggtt_probe_common(ggtt, size);
63340133
BW
2880}
2881
d507d735 2882static int gen6_gmch_probe(struct i915_ggtt *ggtt)
e76e9aeb 2883{
49d73912 2884 struct drm_i915_private *dev_priv = ggtt->base.i915;
97d6d7ab 2885 struct pci_dev *pdev = dev_priv->drm.pdev;
34c998b4 2886 unsigned int size;
e76e9aeb 2887 u16 snb_gmch_ctl;
4519290a 2888 int err;
e76e9aeb 2889
97d6d7ab
CW
2890 ggtt->mappable_base = pci_resource_start(pdev, 2);
2891 ggtt->mappable_end = pci_resource_len(pdev, 2);
41907ddc 2892
baa09f5f
BW
2893 /* 64/512MB is the current min/max we actually know of, but this is just
2894 * a coarse sanity check.
e76e9aeb 2895 */
34c998b4 2896 if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
d507d735 2897 DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt->mappable_end);
baa09f5f 2898 return -ENXIO;
e76e9aeb
BW
2899 }
2900
4519290a
ID
2901 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
2902 if (!err)
2903 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
2904 if (err)
2905 DRM_ERROR("Can't set DMA mask/consistent mask (%d)\n", err);
97d6d7ab 2906 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
e76e9aeb 2907
d507d735 2908 ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
e76e9aeb 2909
34c998b4
CW
2910 size = gen6_get_total_gtt_size(snb_gmch_ctl);
2911 ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
e76e9aeb 2912
d507d735 2913 ggtt->base.clear_range = gen6_ggtt_clear_range;
d6473f56 2914 ggtt->base.insert_page = gen6_ggtt_insert_page;
d507d735
JL
2915 ggtt->base.insert_entries = gen6_ggtt_insert_entries;
2916 ggtt->base.bind_vma = ggtt_bind_vma;
2917 ggtt->base.unbind_vma = ggtt_unbind_vma;
34c998b4
CW
2918 ggtt->base.cleanup = gen6_gmch_remove;
2919
7c3f86b6
CW
2920 ggtt->invalidate = gen6_ggtt_invalidate;
2921
34c998b4
CW
2922 if (HAS_EDRAM(dev_priv))
2923 ggtt->base.pte_encode = iris_pte_encode;
2924 else if (IS_HASWELL(dev_priv))
2925 ggtt->base.pte_encode = hsw_pte_encode;
2926 else if (IS_VALLEYVIEW(dev_priv))
2927 ggtt->base.pte_encode = byt_pte_encode;
2928 else if (INTEL_GEN(dev_priv) >= 7)
2929 ggtt->base.pte_encode = ivb_pte_encode;
2930 else
2931 ggtt->base.pte_encode = snb_pte_encode;
7faf1ab2 2932
34c998b4 2933 return ggtt_probe_common(ggtt, size);
e76e9aeb
BW
2934}
2935
34c998b4 2936static void i915_gmch_remove(struct i915_address_space *vm)
e76e9aeb 2937{
34c998b4 2938 intel_gmch_remove();
644ec02b 2939}
baa09f5f 2940
d507d735 2941static int i915_gmch_probe(struct i915_ggtt *ggtt)
baa09f5f 2942{
49d73912 2943 struct drm_i915_private *dev_priv = ggtt->base.i915;
baa09f5f
BW
2944 int ret;
2945
91c8a326 2946 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
baa09f5f
BW
2947 if (!ret) {
2948 DRM_ERROR("failed to set up gmch\n");
2949 return -EIO;
2950 }
2951
edd1f2fe
CW
2952 intel_gtt_get(&ggtt->base.total,
2953 &ggtt->stolen_size,
2954 &ggtt->mappable_base,
2955 &ggtt->mappable_end);
baa09f5f 2956
97d6d7ab 2957 ggtt->do_idle_maps = needs_idle_maps(dev_priv);
d6473f56 2958 ggtt->base.insert_page = i915_ggtt_insert_page;
d507d735
JL
2959 ggtt->base.insert_entries = i915_ggtt_insert_entries;
2960 ggtt->base.clear_range = i915_ggtt_clear_range;
2961 ggtt->base.bind_vma = ggtt_bind_vma;
2962 ggtt->base.unbind_vma = ggtt_unbind_vma;
34c998b4 2963 ggtt->base.cleanup = i915_gmch_remove;
baa09f5f 2964
7c3f86b6
CW
2965 ggtt->invalidate = gmch_ggtt_invalidate;
2966
d507d735 2967 if (unlikely(ggtt->do_idle_maps))
c0a7f818
CW
2968 DRM_INFO("applying Ironlake quirks for intel_iommu\n");
2969
baa09f5f
BW
2970 return 0;
2971}
2972
d85489d3 2973/**
0088e522 2974 * i915_ggtt_probe_hw - Probe GGTT hardware location
97d6d7ab 2975 * @dev_priv: i915 device
d85489d3 2976 */
97d6d7ab 2977int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
baa09f5f 2978{
62106b4f 2979 struct i915_ggtt *ggtt = &dev_priv->ggtt;
baa09f5f
BW
2980 int ret;
2981
49d73912 2982 ggtt->base.i915 = dev_priv;
8448661d 2983 ggtt->base.dma = &dev_priv->drm.pdev->dev;
c114f76a 2984
34c998b4
CW
2985 if (INTEL_GEN(dev_priv) <= 5)
2986 ret = i915_gmch_probe(ggtt);
2987 else if (INTEL_GEN(dev_priv) < 8)
2988 ret = gen6_gmch_probe(ggtt);
2989 else
2990 ret = gen8_gmch_probe(ggtt);
a54c0c27 2991 if (ret)
baa09f5f 2992 return ret;
baa09f5f 2993
db9309a5
CW
2994 /* Trim the GGTT to fit the GuC mappable upper range (when enabled).
2995 * This is easier than doing range restriction on the fly, as we
2996 * currently don't have any bits spare to pass in this upper
2997 * restriction!
2998 */
2999 if (HAS_GUC(dev_priv) && i915.enable_guc_loading) {
3000 ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
3001 ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
3002 }
3003
c890e2d5
CW
3004 if ((ggtt->base.total - 1) >> 32) {
3005 DRM_ERROR("We never expected a Global GTT with more than 32bits"
f6b9d5ca 3006 " of address space! Found %lldM!\n",
c890e2d5
CW
3007 ggtt->base.total >> 20);
3008 ggtt->base.total = 1ULL << 32;
3009 ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
3010 }
3011
f6b9d5ca
CW
3012 if (ggtt->mappable_end > ggtt->base.total) {
3013 DRM_ERROR("mappable aperture extends past end of GGTT,"
3014 " aperture=%llx, total=%llx\n",
3015 ggtt->mappable_end, ggtt->base.total);
3016 ggtt->mappable_end = ggtt->base.total;
3017 }
3018
baa09f5f 3019 /* GMADR is the PCI mmio aperture into the global GTT. */
c44ef60e 3020 DRM_INFO("Memory usable by graphics device = %lluM\n",
62106b4f
JL
3021 ggtt->base.total >> 20);
3022 DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
edd1f2fe 3023 DRM_DEBUG_DRIVER("GTT stolen size = %uM\n", ggtt->stolen_size >> 20);
80debff8 3024 if (intel_vtd_active())
5db6c735 3025 DRM_INFO("VT-d active for gfx access\n");
baa09f5f
BW
3026
3027 return 0;
0088e522
CW
3028}
3029
3030/**
3031 * i915_ggtt_init_hw - Initialize GGTT hardware
97d6d7ab 3032 * @dev_priv: i915 device
0088e522 3033 */
97d6d7ab 3034int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
0088e522 3035{
0088e522
CW
3036 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3037 int ret;
3038
f6b9d5ca
CW
3039 INIT_LIST_HEAD(&dev_priv->vm_list);
3040
a6508ded
CW
3041 /* Note that we use page colouring to enforce a guard page at the
3042 * end of the address space. This is required as the CS may prefetch
3043 * beyond the end of the batch buffer, across the page boundary,
3044 * and beyond the end of the GTT if we do not provide a guard.
f6b9d5ca 3045 */
80b204bc 3046 mutex_lock(&dev_priv->drm.struct_mutex);
80b204bc 3047 i915_address_space_init(&ggtt->base, dev_priv, "[global]");
a6508ded 3048 if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
f6b9d5ca 3049 ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
80b204bc 3050 mutex_unlock(&dev_priv->drm.struct_mutex);
f6b9d5ca 3051
f7bbe788
CW
3052 if (!io_mapping_init_wc(&dev_priv->ggtt.mappable,
3053 dev_priv->ggtt.mappable_base,
3054 dev_priv->ggtt.mappable_end)) {
f6b9d5ca
CW
3055 ret = -EIO;
3056 goto out_gtt_cleanup;
3057 }
3058
3059 ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, ggtt->mappable_end);
3060
0088e522
CW
3061 /*
3062 * Initialise stolen early so that we may reserve preallocated
3063 * objects for the BIOS to KMS transition.
3064 */
7ace3d30 3065 ret = i915_gem_init_stolen(dev_priv);
0088e522
CW
3066 if (ret)
3067 goto out_gtt_cleanup;
3068
3069 return 0;
a4eba47b
ID
3070
3071out_gtt_cleanup:
72e96d64 3072 ggtt->base.cleanup(&ggtt->base);
a4eba47b 3073 return ret;
baa09f5f 3074}
6f65e29a 3075
97d6d7ab 3076int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
ac840ae5 3077{
97d6d7ab 3078 if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt())
ac840ae5
VS
3079 return -EIO;
3080
3081 return 0;
3082}
3083
7c3f86b6
CW
3084void i915_ggtt_enable_guc(struct drm_i915_private *i915)
3085{
04f7b24e
CW
3086 GEM_BUG_ON(i915->ggtt.invalidate != gen6_ggtt_invalidate);
3087
7c3f86b6
CW
3088 i915->ggtt.invalidate = guc_ggtt_invalidate;
3089}
3090
3091void i915_ggtt_disable_guc(struct drm_i915_private *i915)
3092{
04f7b24e
CW
3093 /* We should only be called after i915_ggtt_enable_guc() */
3094 GEM_BUG_ON(i915->ggtt.invalidate != guc_ggtt_invalidate);
3095
3096 i915->ggtt.invalidate = gen6_ggtt_invalidate;
7c3f86b6
CW
3097}
3098
275a991c 3099void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
fa42331b 3100{
72e96d64 3101 struct i915_ggtt *ggtt = &dev_priv->ggtt;
fbb30a5c 3102 struct drm_i915_gem_object *obj, *on;
fa42331b 3103
dc97997a 3104 i915_check_and_clear_faults(dev_priv);
fa42331b
DV
3105
3106 /* First fill our portion of the GTT with scratch pages */
381b943b 3107 ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
fa42331b 3108
fbb30a5c
CW
3109 ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
3110
3111 /* clflush objects bound into the GGTT and rebind them. */
3112 list_for_each_entry_safe(obj, on,
56cea323 3113 &dev_priv->mm.bound_list, global_link) {
fbb30a5c
CW
3114 bool ggtt_bound = false;
3115 struct i915_vma *vma;
3116
1c7f4bca 3117 list_for_each_entry(vma, &obj->vma_list, obj_link) {
72e96d64 3118 if (vma->vm != &ggtt->base)
2c3d9984 3119 continue;
fa42331b 3120
fbb30a5c
CW
3121 if (!i915_vma_unbind(vma))
3122 continue;
3123
2c3d9984
TU
3124 WARN_ON(i915_vma_bind(vma, obj->cache_level,
3125 PIN_UPDATE));
fbb30a5c 3126 ggtt_bound = true;
2c3d9984
TU
3127 }
3128
fbb30a5c 3129 if (ggtt_bound)
975f7ff4 3130 WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
2c3d9984 3131 }
fa42331b 3132
fbb30a5c
CW
3133 ggtt->base.closed = false;
3134
275a991c 3135 if (INTEL_GEN(dev_priv) >= 8) {
cc3f90f0 3136 if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
fa42331b
DV
3137 chv_setup_private_ppat(dev_priv);
3138 else
3139 bdw_setup_private_ppat(dev_priv);
3140
3141 return;
3142 }
3143
275a991c 3144 if (USES_PPGTT(dev_priv)) {
72e96d64
JL
3145 struct i915_address_space *vm;
3146
fa42331b 3147 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
e5716f55 3148 struct i915_hw_ppgtt *ppgtt;
fa42331b 3149
2bfa996e 3150 if (i915_is_ggtt(vm))
fa42331b 3151 ppgtt = dev_priv->mm.aliasing_ppgtt;
e5716f55
JL
3152 else
3153 ppgtt = i915_vm_to_ppgtt(vm);
fa42331b 3154
16a011c8 3155 gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
fa42331b
DV
3156 }
3157 }
3158
7c3f86b6 3159 i915_ggtt_invalidate(dev_priv);
fa42331b
DV
3160}
3161
804beb4b 3162static struct scatterlist *
2d7f3bdb 3163rotate_pages(const dma_addr_t *in, unsigned int offset,
804beb4b 3164 unsigned int width, unsigned int height,
87130255 3165 unsigned int stride,
804beb4b 3166 struct sg_table *st, struct scatterlist *sg)
50470bb0
TU
3167{
3168 unsigned int column, row;
3169 unsigned int src_idx;
50470bb0 3170
50470bb0 3171 for (column = 0; column < width; column++) {
87130255 3172 src_idx = stride * (height - 1) + column;
50470bb0
TU
3173 for (row = 0; row < height; row++) {
3174 st->nents++;
3175 /* We don't need the pages, but need to initialize
3176 * the entries so the sg list can be happily traversed.
3177 * The only thing we need are DMA addresses.
3178 */
3179 sg_set_page(sg, NULL, PAGE_SIZE, 0);
804beb4b 3180 sg_dma_address(sg) = in[offset + src_idx];
50470bb0
TU
3181 sg_dma_len(sg) = PAGE_SIZE;
3182 sg = sg_next(sg);
87130255 3183 src_idx -= stride;
50470bb0
TU
3184 }
3185 }
804beb4b
TU
3186
3187 return sg;
50470bb0
TU
3188}
3189
ba7a5741
CW
3190static noinline struct sg_table *
3191intel_rotate_pages(struct intel_rotation_info *rot_info,
3192 struct drm_i915_gem_object *obj)
50470bb0 3193{
75c7b0b8 3194 const unsigned long n_pages = obj->base.size / PAGE_SIZE;
6687c906 3195 unsigned int size = intel_rotation_info_size(rot_info);
85d1225e
DG
3196 struct sgt_iter sgt_iter;
3197 dma_addr_t dma_addr;
50470bb0
TU
3198 unsigned long i;
3199 dma_addr_t *page_addr_list;
3200 struct sg_table *st;
89e3e142 3201 struct scatterlist *sg;
1d00dad5 3202 int ret = -ENOMEM;
50470bb0 3203
50470bb0 3204 /* Allocate a temporary list of source pages for random access. */
2098105e 3205 page_addr_list = kvmalloc_array(n_pages,
f2a85e19
CW
3206 sizeof(dma_addr_t),
3207 GFP_TEMPORARY);
50470bb0
TU
3208 if (!page_addr_list)
3209 return ERR_PTR(ret);
3210
3211 /* Allocate target SG list. */
3212 st = kmalloc(sizeof(*st), GFP_KERNEL);
3213 if (!st)
3214 goto err_st_alloc;
3215
6687c906 3216 ret = sg_alloc_table(st, size, GFP_KERNEL);
50470bb0
TU
3217 if (ret)
3218 goto err_sg_alloc;
3219
3220 /* Populate source page list from the object. */
3221 i = 0;
a4f5ea64 3222 for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
85d1225e 3223 page_addr_list[i++] = dma_addr;
50470bb0 3224
85d1225e 3225 GEM_BUG_ON(i != n_pages);
11f20322
VS
3226 st->nents = 0;
3227 sg = st->sgl;
3228
6687c906
VS
3229 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
3230 sg = rotate_pages(page_addr_list, rot_info->plane[i].offset,
3231 rot_info->plane[i].width, rot_info->plane[i].height,
3232 rot_info->plane[i].stride, st, sg);
89e3e142
TU
3233 }
3234
6687c906
VS
3235 DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n",
3236 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
50470bb0 3237
2098105e 3238 kvfree(page_addr_list);
50470bb0
TU
3239
3240 return st;
3241
3242err_sg_alloc:
3243 kfree(st);
3244err_st_alloc:
2098105e 3245 kvfree(page_addr_list);
50470bb0 3246
6687c906
VS
3247 DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
3248 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
3249
50470bb0
TU
3250 return ERR_PTR(ret);
3251}
ec7adb6e 3252
ba7a5741 3253static noinline struct sg_table *
8bd7ef16
JL
3254intel_partial_pages(const struct i915_ggtt_view *view,
3255 struct drm_i915_gem_object *obj)
3256{
3257 struct sg_table *st;
d2a84a76 3258 struct scatterlist *sg, *iter;
8bab1193 3259 unsigned int count = view->partial.size;
d2a84a76 3260 unsigned int offset;
8bd7ef16
JL
3261 int ret = -ENOMEM;
3262
3263 st = kmalloc(sizeof(*st), GFP_KERNEL);
3264 if (!st)
3265 goto err_st_alloc;
3266
d2a84a76 3267 ret = sg_alloc_table(st, count, GFP_KERNEL);
8bd7ef16
JL
3268 if (ret)
3269 goto err_sg_alloc;
3270
8bab1193 3271 iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
d2a84a76
CW
3272 GEM_BUG_ON(!iter);
3273
8bd7ef16
JL
3274 sg = st->sgl;
3275 st->nents = 0;
d2a84a76
CW
3276 do {
3277 unsigned int len;
8bd7ef16 3278
d2a84a76
CW
3279 len = min(iter->length - (offset << PAGE_SHIFT),
3280 count << PAGE_SHIFT);
3281 sg_set_page(sg, NULL, len, 0);
3282 sg_dma_address(sg) =
3283 sg_dma_address(iter) + (offset << PAGE_SHIFT);
3284 sg_dma_len(sg) = len;
8bd7ef16 3285
8bd7ef16 3286 st->nents++;
d2a84a76
CW
3287 count -= len >> PAGE_SHIFT;
3288 if (count == 0) {
3289 sg_mark_end(sg);
3290 return st;
3291 }
8bd7ef16 3292
d2a84a76
CW
3293 sg = __sg_next(sg);
3294 iter = __sg_next(iter);
3295 offset = 0;
3296 } while (1);
8bd7ef16
JL
3297
3298err_sg_alloc:
3299 kfree(st);
3300err_st_alloc:
3301 return ERR_PTR(ret);
3302}
3303
70b9f6f8 3304static int
50470bb0 3305i915_get_ggtt_vma_pages(struct i915_vma *vma)
fe14d5f4 3306{
ba7a5741 3307 int ret;
50470bb0 3308
2c3a3f44
CW
3309 /* The vma->pages are only valid within the lifespan of the borrowed
3310 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
3311 * must be the vma->pages. A simple rule is that vma->pages must only
3312 * be accessed when the obj->mm.pages are pinned.
3313 */
3314 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
3315
ba7a5741
CW
3316 switch (vma->ggtt_view.type) {
3317 case I915_GGTT_VIEW_NORMAL:
3318 vma->pages = vma->obj->mm.pages;
fe14d5f4
TU
3319 return 0;
3320
ba7a5741 3321 case I915_GGTT_VIEW_ROTATED:
247177dd 3322 vma->pages =
ba7a5741
CW
3323 intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
3324 break;
3325
3326 case I915_GGTT_VIEW_PARTIAL:
247177dd 3327 vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
ba7a5741
CW
3328 break;
3329
3330 default:
fe14d5f4
TU
3331 WARN_ONCE(1, "GGTT view %u not implemented!\n",
3332 vma->ggtt_view.type);
ba7a5741
CW
3333 return -EINVAL;
3334 }
fe14d5f4 3335
ba7a5741
CW
3336 ret = 0;
3337 if (unlikely(IS_ERR(vma->pages))) {
247177dd
CW
3338 ret = PTR_ERR(vma->pages);
3339 vma->pages = NULL;
50470bb0
TU
3340 DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
3341 vma->ggtt_view.type, ret);
fe14d5f4 3342 }
50470bb0 3343 return ret;
fe14d5f4
TU
3344}
3345
625d988a
CW
3346/**
3347 * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
a4dbf7cf
CW
3348 * @vm: the &struct i915_address_space
3349 * @node: the &struct drm_mm_node (typically i915_vma.mode)
3350 * @size: how much space to allocate inside the GTT,
3351 * must be #I915_GTT_PAGE_SIZE aligned
3352 * @offset: where to insert inside the GTT,
3353 * must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
3354 * (@offset + @size) must fit within the address space
3355 * @color: color to apply to node, if this node is not from a VMA,
3356 * color must be #I915_COLOR_UNEVICTABLE
3357 * @flags: control search and eviction behaviour
625d988a
CW
3358 *
3359 * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
3360 * the address space (using @size and @color). If the @node does not fit, it
3361 * tries to evict any overlapping nodes from the GTT, including any
3362 * neighbouring nodes if the colors do not match (to ensure guard pages between
3363 * differing domains). See i915_gem_evict_for_node() for the gory details
3364 * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
3365 * evicting active overlapping objects, and any overlapping node that is pinned
3366 * or marked as unevictable will also result in failure.
3367 *
3368 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3369 * asked to wait for eviction and interrupted.
3370 */
3371int i915_gem_gtt_reserve(struct i915_address_space *vm,
3372 struct drm_mm_node *node,
3373 u64 size, u64 offset, unsigned long color,
3374 unsigned int flags)
3375{
3376 int err;
3377
3378 GEM_BUG_ON(!size);
3379 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3380 GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
3381 GEM_BUG_ON(range_overflows(offset, size, vm->total));
3fec7ec4 3382 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
9734ad13 3383 GEM_BUG_ON(drm_mm_node_allocated(node));
625d988a
CW
3384
3385 node->size = size;
3386 node->start = offset;
3387 node->color = color;
3388
3389 err = drm_mm_reserve_node(&vm->mm, node);
3390 if (err != -ENOSPC)
3391 return err;
3392
616d9cee
CW
3393 if (flags & PIN_NOEVICT)
3394 return -ENOSPC;
3395
625d988a
CW
3396 err = i915_gem_evict_for_node(vm, node, flags);
3397 if (err == 0)
3398 err = drm_mm_reserve_node(&vm->mm, node);
3399
3400 return err;
3401}
3402
606fec95
CW
3403static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
3404{
3405 u64 range, addr;
3406
3407 GEM_BUG_ON(range_overflows(start, len, end));
3408 GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
3409
3410 range = round_down(end - len, align) - round_up(start, align);
3411 if (range) {
3412 if (sizeof(unsigned long) == sizeof(u64)) {
3413 addr = get_random_long();
3414 } else {
3415 addr = get_random_int();
3416 if (range > U32_MAX) {
3417 addr <<= 32;
3418 addr |= get_random_int();
3419 }
3420 }
3421 div64_u64_rem(addr, range, &addr);
3422 start += addr;
3423 }
3424
3425 return round_up(start, align);
3426}
3427
e007b19d
CW
3428/**
3429 * i915_gem_gtt_insert - insert a node into an address_space (GTT)
a4dbf7cf
CW
3430 * @vm: the &struct i915_address_space
3431 * @node: the &struct drm_mm_node (typically i915_vma.node)
3432 * @size: how much space to allocate inside the GTT,
3433 * must be #I915_GTT_PAGE_SIZE aligned
3434 * @alignment: required alignment of starting offset, may be 0 but
3435 * if specified, this must be a power-of-two and at least
3436 * #I915_GTT_MIN_ALIGNMENT
3437 * @color: color to apply to node
3438 * @start: start of any range restriction inside GTT (0 for all),
e007b19d 3439 * must be #I915_GTT_PAGE_SIZE aligned
a4dbf7cf
CW
3440 * @end: end of any range restriction inside GTT (U64_MAX for all),
3441 * must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
3442 * @flags: control search and eviction behaviour
e007b19d
CW
3443 *
3444 * i915_gem_gtt_insert() first searches for an available hole into which
3445 * is can insert the node. The hole address is aligned to @alignment and
3446 * its @size must then fit entirely within the [@start, @end] bounds. The
3447 * nodes on either side of the hole must match @color, or else a guard page
3448 * will be inserted between the two nodes (or the node evicted). If no
606fec95
CW
3449 * suitable hole is found, first a victim is randomly selected and tested
3450 * for eviction, otherwise then the LRU list of objects within the GTT
e007b19d
CW
3451 * is scanned to find the first set of replacement nodes to create the hole.
3452 * Those old overlapping nodes are evicted from the GTT (and so must be
3453 * rebound before any future use). Any node that is currently pinned cannot
3454 * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
3455 * active and #PIN_NONBLOCK is specified, that node is also skipped when
3456 * searching for an eviction candidate. See i915_gem_evict_something() for
3457 * the gory details on the eviction algorithm.
3458 *
3459 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3460 * asked to wait for eviction and interrupted.
3461 */
3462int i915_gem_gtt_insert(struct i915_address_space *vm,
3463 struct drm_mm_node *node,
3464 u64 size, u64 alignment, unsigned long color,
3465 u64 start, u64 end, unsigned int flags)
3466{
4e64e553 3467 enum drm_mm_insert_mode mode;
606fec95 3468 u64 offset;
e007b19d
CW
3469 int err;
3470
3471 lockdep_assert_held(&vm->i915->drm.struct_mutex);
3472 GEM_BUG_ON(!size);
3473 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3474 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
3475 GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
3476 GEM_BUG_ON(start >= end);
3477 GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
3478 GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
3fec7ec4 3479 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
9734ad13 3480 GEM_BUG_ON(drm_mm_node_allocated(node));
e007b19d
CW
3481
3482 if (unlikely(range_overflows(start, size, end)))
3483 return -ENOSPC;
3484
3485 if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
3486 return -ENOSPC;
3487
4e64e553
CW
3488 mode = DRM_MM_INSERT_BEST;
3489 if (flags & PIN_HIGH)
3490 mode = DRM_MM_INSERT_HIGH;
3491 if (flags & PIN_MAPPABLE)
3492 mode = DRM_MM_INSERT_LOW;
e007b19d
CW
3493
3494 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
3495 * so we know that we always have a minimum alignment of 4096.
3496 * The drm_mm range manager is optimised to return results
3497 * with zero alignment, so where possible use the optimal
3498 * path.
3499 */
3500 BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
3501 if (alignment <= I915_GTT_MIN_ALIGNMENT)
3502 alignment = 0;
3503
4e64e553
CW
3504 err = drm_mm_insert_node_in_range(&vm->mm, node,
3505 size, alignment, color,
3506 start, end, mode);
e007b19d
CW
3507 if (err != -ENOSPC)
3508 return err;
3509
616d9cee
CW
3510 if (flags & PIN_NOEVICT)
3511 return -ENOSPC;
3512
606fec95
CW
3513 /* No free space, pick a slot at random.
3514 *
3515 * There is a pathological case here using a GTT shared between
3516 * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
3517 *
3518 * |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
3519 * (64k objects) (448k objects)
3520 *
3521 * Now imagine that the eviction LRU is ordered top-down (just because
3522 * pathology meets real life), and that we need to evict an object to
3523 * make room inside the aperture. The eviction scan then has to walk
3524 * the 448k list before it finds one within range. And now imagine that
3525 * it has to search for a new hole between every byte inside the memcpy,
3526 * for several simultaneous clients.
3527 *
3528 * On a full-ppgtt system, if we have run out of available space, there
3529 * will be lots and lots of objects in the eviction list! Again,
3530 * searching that LRU list may be slow if we are also applying any
3531 * range restrictions (e.g. restriction to low 4GiB) and so, for
3532 * simplicity and similarilty between different GTT, try the single
3533 * random replacement first.
3534 */
3535 offset = random_offset(start, end,
3536 size, alignment ?: I915_GTT_MIN_ALIGNMENT);
3537 err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
3538 if (err != -ENOSPC)
3539 return err;
3540
3541 /* Randomly selected placement is pinned, do a search */
e007b19d
CW
3542 err = i915_gem_evict_something(vm, size, alignment, color,
3543 start, end, flags);
3544 if (err)
3545 return err;
3546
4e64e553
CW
3547 return drm_mm_insert_node_in_range(&vm->mm, node,
3548 size, alignment, color,
3549 start, end, DRM_MM_INSERT_EVICT);
e007b19d 3550}
3b5bb0a3
CW
3551
3552#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
3553#include "selftests/mock_gtt.c"
1c42819a 3554#include "selftests/i915_gem_gtt.c"
3b5bb0a3 3555#endif