]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/i915/i915_gem_gtt.c
drm/i915: Micro-optimise i915_get_ggtt_vma_pages()
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / i915_gem_gtt.c
CommitLineData
76aaf220
DV
1/*
2 * Copyright © 2010 Daniel Vetter
c4ac524c 3 * Copyright © 2011-2014 Intel Corporation
76aaf220
DV
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 *
24 */
25
aae4a3d8
CW
26#include <linux/slab.h> /* fault-inject.h is not standalone! */
27
28#include <linux/fault-inject.h>
e007b19d 29#include <linux/log2.h>
606fec95 30#include <linux/random.h>
0e46ce2e 31#include <linux/seq_file.h>
5bab6f60 32#include <linux/stop_machine.h>
e007b19d 33
760285e7
DH
34#include <drm/drmP.h>
35#include <drm/i915_drm.h>
e007b19d 36
76aaf220 37#include "i915_drv.h"
5dda8fa3 38#include "i915_vgpu.h"
76aaf220
DV
39#include "i915_trace.h"
40#include "intel_drv.h"
d07f0e59 41#include "intel_frontbuffer.h"
76aaf220 42
bb8f9cff
CW
43#define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
44
45f8f69a
TU
45/**
46 * DOC: Global GTT views
47 *
48 * Background and previous state
49 *
50 * Historically objects could exists (be bound) in global GTT space only as
51 * singular instances with a view representing all of the object's backing pages
52 * in a linear fashion. This view will be called a normal view.
53 *
54 * To support multiple views of the same object, where the number of mapped
55 * pages is not equal to the backing store, or where the layout of the pages
56 * is not linear, concept of a GGTT view was added.
57 *
58 * One example of an alternative view is a stereo display driven by a single
59 * image. In this case we would have a framebuffer looking like this
60 * (2x2 pages):
61 *
62 * 12
63 * 34
64 *
65 * Above would represent a normal GGTT view as normally mapped for GPU or CPU
66 * rendering. In contrast, fed to the display engine would be an alternative
67 * view which could look something like this:
68 *
69 * 1212
70 * 3434
71 *
72 * In this example both the size and layout of pages in the alternative view is
73 * different from the normal view.
74 *
75 * Implementation and usage
76 *
77 * GGTT views are implemented using VMAs and are distinguished via enum
78 * i915_ggtt_view_type and struct i915_ggtt_view.
79 *
80 * A new flavour of core GEM functions which work with GGTT bound objects were
ec7adb6e
JL
81 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
82 * renaming in large amounts of code. They take the struct i915_ggtt_view
83 * parameter encapsulating all metadata required to implement a view.
45f8f69a
TU
84 *
85 * As a helper for callers which are only interested in the normal view,
86 * globally const i915_ggtt_view_normal singleton instance exists. All old core
87 * GEM API functions, the ones not taking the view parameter, are operating on,
88 * or with the normal GGTT view.
89 *
90 * Code wanting to add or use a new GGTT view needs to:
91 *
92 * 1. Add a new enum with a suitable name.
93 * 2. Extend the metadata in the i915_ggtt_view structure if required.
94 * 3. Add support to i915_get_vma_pages().
95 *
96 * New views are required to build a scatter-gather table from within the
97 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
98 * exists for the lifetime of an VMA.
99 *
100 * Core API is designed to have copy semantics which means that passed in
101 * struct i915_ggtt_view does not need to be persistent (left around after
102 * calling the core API functions).
103 *
104 */
105
70b9f6f8
DV
106static int
107i915_get_ggtt_vma_pages(struct i915_vma *vma);
108
7c3f86b6
CW
109static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv)
110{
111 /* Note that as an uncached mmio write, this should flush the
112 * WCB of the writes into the GGTT before it triggers the invalidate.
113 */
114 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
115}
116
117static void guc_ggtt_invalidate(struct drm_i915_private *dev_priv)
118{
119 gen6_ggtt_invalidate(dev_priv);
120 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
121}
122
123static void gmch_ggtt_invalidate(struct drm_i915_private *dev_priv)
124{
125 intel_gtt_chipset_flush();
126}
127
128static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
129{
130 i915->ggtt.invalidate(i915);
131}
132
c033666a
CW
133int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
134 int enable_ppgtt)
cfa7c862 135{
1893a71b
CW
136 bool has_aliasing_ppgtt;
137 bool has_full_ppgtt;
1f9a99e0 138 bool has_full_48bit_ppgtt;
1893a71b 139
9e1d0e60
MT
140 has_aliasing_ppgtt = dev_priv->info.has_aliasing_ppgtt;
141 has_full_ppgtt = dev_priv->info.has_full_ppgtt;
142 has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
1893a71b 143
e320d400
ZW
144 if (intel_vgpu_active(dev_priv)) {
145 /* emulation is too hard */
146 has_full_ppgtt = false;
147 has_full_48bit_ppgtt = false;
148 }
71ba2d64 149
0e4ca100
CW
150 if (!has_aliasing_ppgtt)
151 return 0;
152
70ee45e1
DL
153 /*
154 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
155 * execlists, the sole mechanism available to submit work.
156 */
c033666a 157 if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
cfa7c862
DV
158 return 0;
159
160 if (enable_ppgtt == 1)
161 return 1;
162
1893a71b 163 if (enable_ppgtt == 2 && has_full_ppgtt)
cfa7c862
DV
164 return 2;
165
1f9a99e0
MT
166 if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
167 return 3;
168
93a25a9e
DV
169#ifdef CONFIG_INTEL_IOMMU
170 /* Disable ppgtt on SNB if VT-d is on. */
c033666a 171 if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped) {
93a25a9e 172 DRM_INFO("Disabling PPGTT because VT-d is on\n");
cfa7c862 173 return 0;
93a25a9e
DV
174 }
175#endif
176
62942ed7 177 /* Early VLV doesn't have this */
91c8a326 178 if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) {
62942ed7
JB
179 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
180 return 0;
181 }
182
e320d400 183 if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists && has_full_ppgtt)
1f9a99e0 184 return has_full_48bit_ppgtt ? 3 : 2;
2f82bbdf
MT
185 else
186 return has_aliasing_ppgtt ? 1 : 0;
93a25a9e
DV
187}
188
70b9f6f8
DV
189static int ppgtt_bind_vma(struct i915_vma *vma,
190 enum i915_cache_level cache_level,
191 u32 unused)
47552659
DV
192{
193 u32 pte_flags = 0;
194
a4f5ea64 195 vma->pages = vma->obj->mm.pages;
247177dd 196
47552659
DV
197 /* Currently applicable only to VLV */
198 if (vma->obj->gt_ro)
199 pte_flags |= PTE_READ_ONLY;
200
247177dd 201 vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
47552659 202 cache_level, pte_flags);
70b9f6f8
DV
203
204 return 0;
47552659
DV
205}
206
207static void ppgtt_unbind_vma(struct i915_vma *vma)
208{
209 vma->vm->clear_range(vma->vm,
210 vma->node.start,
4fb84d99 211 vma->size);
47552659 212}
6f65e29a 213
2c642b07 214static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
4fb84d99 215 enum i915_cache_level level)
94ec8f61 216{
4fb84d99 217 gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW;
94ec8f61 218 pte |= addr;
63c42e56
BW
219
220 switch (level) {
221 case I915_CACHE_NONE:
fbe5d36e 222 pte |= PPAT_UNCACHED_INDEX;
63c42e56
BW
223 break;
224 case I915_CACHE_WT:
225 pte |= PPAT_DISPLAY_ELLC_INDEX;
226 break;
227 default:
228 pte |= PPAT_CACHED_INDEX;
229 break;
230 }
231
94ec8f61
BW
232 return pte;
233}
234
fe36f55d
MK
235static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
236 const enum i915_cache_level level)
b1fe6673 237{
07749ef3 238 gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
b1fe6673
BW
239 pde |= addr;
240 if (level != I915_CACHE_NONE)
241 pde |= PPAT_CACHED_PDE_INDEX;
242 else
243 pde |= PPAT_UNCACHED_INDEX;
244 return pde;
245}
246
762d9936
MT
247#define gen8_pdpe_encode gen8_pde_encode
248#define gen8_pml4e_encode gen8_pde_encode
249
07749ef3
MT
250static gen6_pte_t snb_pte_encode(dma_addr_t addr,
251 enum i915_cache_level level,
4fb84d99 252 u32 unused)
54d12527 253{
4fb84d99 254 gen6_pte_t pte = GEN6_PTE_VALID;
54d12527 255 pte |= GEN6_PTE_ADDR_ENCODE(addr);
e7210c3c
BW
256
257 switch (level) {
350ec881
CW
258 case I915_CACHE_L3_LLC:
259 case I915_CACHE_LLC:
260 pte |= GEN6_PTE_CACHE_LLC;
261 break;
262 case I915_CACHE_NONE:
263 pte |= GEN6_PTE_UNCACHED;
264 break;
265 default:
5f77eeb0 266 MISSING_CASE(level);
350ec881
CW
267 }
268
269 return pte;
270}
271
07749ef3
MT
272static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
273 enum i915_cache_level level,
4fb84d99 274 u32 unused)
350ec881 275{
4fb84d99 276 gen6_pte_t pte = GEN6_PTE_VALID;
350ec881
CW
277 pte |= GEN6_PTE_ADDR_ENCODE(addr);
278
279 switch (level) {
280 case I915_CACHE_L3_LLC:
281 pte |= GEN7_PTE_CACHE_L3_LLC;
e7210c3c
BW
282 break;
283 case I915_CACHE_LLC:
284 pte |= GEN6_PTE_CACHE_LLC;
285 break;
286 case I915_CACHE_NONE:
9119708c 287 pte |= GEN6_PTE_UNCACHED;
e7210c3c
BW
288 break;
289 default:
5f77eeb0 290 MISSING_CASE(level);
e7210c3c
BW
291 }
292
54d12527
BW
293 return pte;
294}
295
07749ef3
MT
296static gen6_pte_t byt_pte_encode(dma_addr_t addr,
297 enum i915_cache_level level,
4fb84d99 298 u32 flags)
93c34e70 299{
4fb84d99 300 gen6_pte_t pte = GEN6_PTE_VALID;
93c34e70
KG
301 pte |= GEN6_PTE_ADDR_ENCODE(addr);
302
24f3a8cf
AG
303 if (!(flags & PTE_READ_ONLY))
304 pte |= BYT_PTE_WRITEABLE;
93c34e70
KG
305
306 if (level != I915_CACHE_NONE)
307 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
308
309 return pte;
310}
311
07749ef3
MT
312static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
313 enum i915_cache_level level,
4fb84d99 314 u32 unused)
9119708c 315{
4fb84d99 316 gen6_pte_t pte = GEN6_PTE_VALID;
0d8ff15e 317 pte |= HSW_PTE_ADDR_ENCODE(addr);
9119708c
KG
318
319 if (level != I915_CACHE_NONE)
87a6b688 320 pte |= HSW_WB_LLC_AGE3;
9119708c
KG
321
322 return pte;
323}
324
07749ef3
MT
325static gen6_pte_t iris_pte_encode(dma_addr_t addr,
326 enum i915_cache_level level,
4fb84d99 327 u32 unused)
4d15c145 328{
4fb84d99 329 gen6_pte_t pte = GEN6_PTE_VALID;
4d15c145
BW
330 pte |= HSW_PTE_ADDR_ENCODE(addr);
331
651d794f
CW
332 switch (level) {
333 case I915_CACHE_NONE:
334 break;
335 case I915_CACHE_WT:
c51e9701 336 pte |= HSW_WT_ELLC_LLC_AGE3;
651d794f
CW
337 break;
338 default:
c51e9701 339 pte |= HSW_WB_ELLC_LLC_AGE3;
651d794f
CW
340 break;
341 }
4d15c145
BW
342
343 return pte;
344}
345
275a991c 346static int __setup_page_dma(struct drm_i915_private *dev_priv,
c114f76a 347 struct i915_page_dma *p, gfp_t flags)
678d96fb 348{
275a991c 349 struct device *kdev = &dev_priv->drm.pdev->dev;
678d96fb 350
aae4a3d8
CW
351 if (I915_SELFTEST_ONLY(should_fail(&dev_priv->vm_fault, 1)))
352 i915_gem_shrink_all(dev_priv);
353
c114f76a 354 p->page = alloc_page(flags);
44159ddb
MK
355 if (!p->page)
356 return -ENOMEM;
678d96fb 357
c49d13ee 358 p->daddr = dma_map_page(kdev,
f51455d4 359 p->page, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
678d96fb 360
c49d13ee 361 if (dma_mapping_error(kdev, p->daddr)) {
44159ddb
MK
362 __free_page(p->page);
363 return -EINVAL;
364 }
1266cdb1
MT
365
366 return 0;
678d96fb
BW
367}
368
275a991c
TU
369static int setup_page_dma(struct drm_i915_private *dev_priv,
370 struct i915_page_dma *p)
c114f76a 371{
275a991c 372 return __setup_page_dma(dev_priv, p, I915_GFP_DMA);
c114f76a
MK
373}
374
275a991c
TU
375static void cleanup_page_dma(struct drm_i915_private *dev_priv,
376 struct i915_page_dma *p)
06fda602 377{
275a991c 378 struct pci_dev *pdev = dev_priv->drm.pdev;
52a05c30 379
44159ddb 380 if (WARN_ON(!p->page))
06fda602 381 return;
678d96fb 382
f51455d4 383 dma_unmap_page(&pdev->dev, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
44159ddb
MK
384 __free_page(p->page);
385 memset(p, 0, sizeof(*p));
386}
387
d1c54acd 388static void *kmap_page_dma(struct i915_page_dma *p)
73eeea53 389{
d1c54acd
MK
390 return kmap_atomic(p->page);
391}
73eeea53 392
d1c54acd
MK
393/* We use the flushing unmap only with ppgtt structures:
394 * page directories, page tables and scratch pages.
395 */
e2d214ae 396static void kunmap_page_dma(struct drm_i915_private *dev_priv, void *vaddr)
d1c54acd 397{
73eeea53
MK
398 /* There are only few exceptions for gen >=6. chv and bxt.
399 * And we are not sure about the latter so play safe for now.
400 */
cc3f90f0 401 if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
73eeea53
MK
402 drm_clflush_virt_range(vaddr, PAGE_SIZE);
403
404 kunmap_atomic(vaddr);
405}
406
567047be 407#define kmap_px(px) kmap_page_dma(px_base(px))
e2d214ae 408#define kunmap_px(ppgtt, vaddr) \
49d73912 409 kunmap_page_dma((ppgtt)->base.i915, (vaddr))
d1c54acd 410
275a991c
TU
411#define setup_px(dev_priv, px) setup_page_dma((dev_priv), px_base(px))
412#define cleanup_px(dev_priv, px) cleanup_page_dma((dev_priv), px_base(px))
e2d214ae
TU
413#define fill_px(dev_priv, px, v) fill_page_dma((dev_priv), px_base(px), (v))
414#define fill32_px(dev_priv, px, v) \
415 fill_page_dma_32((dev_priv), px_base(px), (v))
567047be 416
e2d214ae
TU
417static void fill_page_dma(struct drm_i915_private *dev_priv,
418 struct i915_page_dma *p, const uint64_t val)
d1c54acd
MK
419{
420 int i;
421 uint64_t * const vaddr = kmap_page_dma(p);
422
423 for (i = 0; i < 512; i++)
424 vaddr[i] = val;
425
e2d214ae 426 kunmap_page_dma(dev_priv, vaddr);
d1c54acd
MK
427}
428
e2d214ae
TU
429static void fill_page_dma_32(struct drm_i915_private *dev_priv,
430 struct i915_page_dma *p, const uint32_t val32)
73eeea53
MK
431{
432 uint64_t v = val32;
433
434 v = v << 32 | val32;
435
e2d214ae 436 fill_page_dma(dev_priv, p, v);
73eeea53
MK
437}
438
8bcdd0f7 439static int
275a991c 440setup_scratch_page(struct drm_i915_private *dev_priv,
bb8f9cff
CW
441 struct i915_page_dma *scratch,
442 gfp_t gfp)
4ad2af1e 443{
275a991c 444 return __setup_page_dma(dev_priv, scratch, gfp | __GFP_ZERO);
4ad2af1e
MK
445}
446
275a991c 447static void cleanup_scratch_page(struct drm_i915_private *dev_priv,
8bcdd0f7 448 struct i915_page_dma *scratch)
4ad2af1e 449{
275a991c 450 cleanup_page_dma(dev_priv, scratch);
4ad2af1e
MK
451}
452
275a991c 453static struct i915_page_table *alloc_pt(struct drm_i915_private *dev_priv)
06fda602 454{
ec565b3c 455 struct i915_page_table *pt;
275a991c 456 const size_t count = INTEL_GEN(dev_priv) >= 8 ? GEN8_PTES : GEN6_PTES;
678d96fb 457 int ret = -ENOMEM;
06fda602
BW
458
459 pt = kzalloc(sizeof(*pt), GFP_KERNEL);
460 if (!pt)
461 return ERR_PTR(-ENOMEM);
462
678d96fb
BW
463 pt->used_ptes = kcalloc(BITS_TO_LONGS(count), sizeof(*pt->used_ptes),
464 GFP_KERNEL);
465
466 if (!pt->used_ptes)
467 goto fail_bitmap;
468
275a991c 469 ret = setup_px(dev_priv, pt);
678d96fb 470 if (ret)
44159ddb 471 goto fail_page_m;
06fda602
BW
472
473 return pt;
678d96fb 474
44159ddb 475fail_page_m:
678d96fb
BW
476 kfree(pt->used_ptes);
477fail_bitmap:
478 kfree(pt);
479
480 return ERR_PTR(ret);
06fda602
BW
481}
482
275a991c
TU
483static void free_pt(struct drm_i915_private *dev_priv,
484 struct i915_page_table *pt)
06fda602 485{
275a991c 486 cleanup_px(dev_priv, pt);
2e906bea
MK
487 kfree(pt->used_ptes);
488 kfree(pt);
489}
490
491static void gen8_initialize_pt(struct i915_address_space *vm,
492 struct i915_page_table *pt)
493{
494 gen8_pte_t scratch_pte;
495
8bcdd0f7 496 scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
4fb84d99 497 I915_CACHE_LLC);
2e906bea 498
49d73912 499 fill_px(vm->i915, pt, scratch_pte);
2e906bea
MK
500}
501
502static void gen6_initialize_pt(struct i915_address_space *vm,
503 struct i915_page_table *pt)
504{
505 gen6_pte_t scratch_pte;
506
8bcdd0f7 507 WARN_ON(vm->scratch_page.daddr == 0);
2e906bea 508
8bcdd0f7 509 scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
4fb84d99 510 I915_CACHE_LLC, 0);
2e906bea 511
49d73912 512 fill32_px(vm->i915, pt, scratch_pte);
06fda602
BW
513}
514
275a991c 515static struct i915_page_directory *alloc_pd(struct drm_i915_private *dev_priv)
06fda602 516{
ec565b3c 517 struct i915_page_directory *pd;
33c8819f 518 int ret = -ENOMEM;
06fda602
BW
519
520 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
521 if (!pd)
522 return ERR_PTR(-ENOMEM);
523
33c8819f
MT
524 pd->used_pdes = kcalloc(BITS_TO_LONGS(I915_PDES),
525 sizeof(*pd->used_pdes), GFP_KERNEL);
526 if (!pd->used_pdes)
a08e111a 527 goto fail_bitmap;
33c8819f 528
275a991c 529 ret = setup_px(dev_priv, pd);
33c8819f 530 if (ret)
a08e111a 531 goto fail_page_m;
e5815a2e 532
06fda602 533 return pd;
33c8819f 534
a08e111a 535fail_page_m:
33c8819f 536 kfree(pd->used_pdes);
a08e111a 537fail_bitmap:
33c8819f
MT
538 kfree(pd);
539
540 return ERR_PTR(ret);
06fda602
BW
541}
542
275a991c
TU
543static void free_pd(struct drm_i915_private *dev_priv,
544 struct i915_page_directory *pd)
2e906bea
MK
545{
546 if (px_page(pd)) {
275a991c 547 cleanup_px(dev_priv, pd);
2e906bea
MK
548 kfree(pd->used_pdes);
549 kfree(pd);
550 }
551}
552
553static void gen8_initialize_pd(struct i915_address_space *vm,
554 struct i915_page_directory *pd)
555{
556 gen8_pde_t scratch_pde;
557
558 scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);
559
49d73912 560 fill_px(vm->i915, pd, scratch_pde);
2e906bea
MK
561}
562
275a991c 563static int __pdp_init(struct drm_i915_private *dev_priv,
6ac18502
MT
564 struct i915_page_directory_pointer *pdp)
565{
275a991c 566 size_t pdpes = I915_PDPES_PER_PDP(dev_priv);
6ac18502
MT
567
568 pdp->used_pdpes = kcalloc(BITS_TO_LONGS(pdpes),
569 sizeof(unsigned long),
570 GFP_KERNEL);
571 if (!pdp->used_pdpes)
572 return -ENOMEM;
573
574 pdp->page_directory = kcalloc(pdpes, sizeof(*pdp->page_directory),
575 GFP_KERNEL);
576 if (!pdp->page_directory) {
577 kfree(pdp->used_pdpes);
578 /* the PDP might be the statically allocated top level. Keep it
579 * as clean as possible */
580 pdp->used_pdpes = NULL;
581 return -ENOMEM;
582 }
583
584 return 0;
585}
586
587static void __pdp_fini(struct i915_page_directory_pointer *pdp)
588{
589 kfree(pdp->used_pdpes);
590 kfree(pdp->page_directory);
591 pdp->page_directory = NULL;
592}
593
762d9936 594static struct
275a991c 595i915_page_directory_pointer *alloc_pdp(struct drm_i915_private *dev_priv)
762d9936
MT
596{
597 struct i915_page_directory_pointer *pdp;
598 int ret = -ENOMEM;
599
275a991c 600 WARN_ON(!USES_FULL_48BIT_PPGTT(dev_priv));
762d9936
MT
601
602 pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
603 if (!pdp)
604 return ERR_PTR(-ENOMEM);
605
275a991c 606 ret = __pdp_init(dev_priv, pdp);
762d9936
MT
607 if (ret)
608 goto fail_bitmap;
609
275a991c 610 ret = setup_px(dev_priv, pdp);
762d9936
MT
611 if (ret)
612 goto fail_page_m;
613
614 return pdp;
615
616fail_page_m:
617 __pdp_fini(pdp);
618fail_bitmap:
619 kfree(pdp);
620
621 return ERR_PTR(ret);
622}
623
275a991c 624static void free_pdp(struct drm_i915_private *dev_priv,
6ac18502
MT
625 struct i915_page_directory_pointer *pdp)
626{
627 __pdp_fini(pdp);
275a991c
TU
628 if (USES_FULL_48BIT_PPGTT(dev_priv)) {
629 cleanup_px(dev_priv, pdp);
762d9936
MT
630 kfree(pdp);
631 }
632}
633
69ab76fd
MT
634static void gen8_initialize_pdp(struct i915_address_space *vm,
635 struct i915_page_directory_pointer *pdp)
636{
637 gen8_ppgtt_pdpe_t scratch_pdpe;
638
639 scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
640
49d73912 641 fill_px(vm->i915, pdp, scratch_pdpe);
69ab76fd
MT
642}
643
644static void gen8_initialize_pml4(struct i915_address_space *vm,
645 struct i915_pml4 *pml4)
646{
647 gen8_ppgtt_pml4e_t scratch_pml4e;
648
649 scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp),
650 I915_CACHE_LLC);
651
49d73912 652 fill_px(vm->i915, pml4, scratch_pml4e);
69ab76fd
MT
653}
654
762d9936 655static void
5c693b2b
MA
656gen8_setup_pdpe(struct i915_hw_ppgtt *ppgtt,
657 struct i915_page_directory_pointer *pdp,
658 struct i915_page_directory *pd,
659 int index)
762d9936
MT
660{
661 gen8_ppgtt_pdpe_t *page_directorypo;
662
275a991c 663 if (!USES_FULL_48BIT_PPGTT(to_i915(ppgtt->base.dev)))
762d9936
MT
664 return;
665
666 page_directorypo = kmap_px(pdp);
667 page_directorypo[index] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
668 kunmap_px(ppgtt, page_directorypo);
669}
670
671static void
56843107
MA
672gen8_setup_pml4e(struct i915_hw_ppgtt *ppgtt,
673 struct i915_pml4 *pml4,
674 struct i915_page_directory_pointer *pdp,
675 int index)
762d9936
MT
676{
677 gen8_ppgtt_pml4e_t *pagemap = kmap_px(pml4);
678
275a991c 679 WARN_ON(!USES_FULL_48BIT_PPGTT(to_i915(ppgtt->base.dev)));
762d9936
MT
680 pagemap[index] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
681 kunmap_px(ppgtt, pagemap);
6ac18502
MT
682}
683
94e409c1 684/* Broadwell Page Directory Pointer Descriptors */
e85b26dc 685static int gen8_write_pdp(struct drm_i915_gem_request *req,
7cb6d7ac
MT
686 unsigned entry,
687 dma_addr_t addr)
94e409c1 688{
4a570db5 689 struct intel_engine_cs *engine = req->engine;
73dec95e 690 u32 *cs;
94e409c1
BW
691
692 BUG_ON(entry >= 4);
693
73dec95e
TU
694 cs = intel_ring_begin(req, 6);
695 if (IS_ERR(cs))
696 return PTR_ERR(cs);
94e409c1 697
73dec95e
TU
698 *cs++ = MI_LOAD_REGISTER_IMM(1);
699 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, entry));
700 *cs++ = upper_32_bits(addr);
701 *cs++ = MI_LOAD_REGISTER_IMM(1);
702 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
703 *cs++ = lower_32_bits(addr);
704 intel_ring_advance(req, cs);
94e409c1
BW
705
706 return 0;
707}
708
2dba3239
MT
709static int gen8_legacy_mm_switch(struct i915_hw_ppgtt *ppgtt,
710 struct drm_i915_gem_request *req)
94e409c1 711{
eeb9488e 712 int i, ret;
94e409c1 713
7cb6d7ac 714 for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
d852c7bf
MK
715 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
716
e85b26dc 717 ret = gen8_write_pdp(req, i, pd_daddr);
eeb9488e
BW
718 if (ret)
719 return ret;
94e409c1 720 }
d595bd4b 721
eeb9488e 722 return 0;
94e409c1
BW
723}
724
2dba3239
MT
725static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
726 struct drm_i915_gem_request *req)
727{
728 return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
729}
730
fce93755
MK
731/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
732 * the page table structures, we mark them dirty so that
733 * context switching/execlist queuing code takes extra steps
734 * to ensure that tlbs are flushed.
735 */
736static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
737{
49d73912 738 ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.i915)->ring_mask;
fce93755
MK
739}
740
2ce5179f
MW
741/* Removes entries from a single page table, releasing it if it's empty.
742 * Caller can use the return value to update higher-level entries.
743 */
744static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
d209b9c3
MW
745 struct i915_page_table *pt,
746 uint64_t start,
747 uint64_t length)
459108b8 748{
e5716f55 749 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
d209b9c3 750 unsigned int num_entries = gen8_pte_count(start, length);
37c63934
MK
751 unsigned int pte = gen8_pte_index(start);
752 unsigned int pte_end = pte + num_entries;
f9b5b782 753 gen8_pte_t *pt_vaddr;
d209b9c3
MW
754 gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
755 I915_CACHE_LLC);
459108b8 756
d209b9c3 757 if (WARN_ON(!px_page(pt)))
2ce5179f 758 return false;
459108b8 759
37c63934
MK
760 GEM_BUG_ON(pte_end > GEN8_PTES);
761
762 bitmap_clear(pt->used_ptes, pte, num_entries);
e81ecb5e
ZW
763 if (USES_FULL_PPGTT(vm->i915)) {
764 if (bitmap_empty(pt->used_ptes, GEN8_PTES))
765 return true;
766 }
2ce5179f 767
d209b9c3
MW
768 pt_vaddr = kmap_px(pt);
769
37c63934
MK
770 while (pte < pte_end)
771 pt_vaddr[pte++] = scratch_pte;
06fda602 772
d209b9c3 773 kunmap_px(ppgtt, pt_vaddr);
2ce5179f
MW
774
775 return false;
d209b9c3 776}
06fda602 777
2ce5179f
MW
778/* Removes entries from a single page dir, releasing it if it's empty.
779 * Caller can use the return value to update higher-level entries
780 */
781static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
d209b9c3
MW
782 struct i915_page_directory *pd,
783 uint64_t start,
784 uint64_t length)
785{
2ce5179f 786 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
d209b9c3
MW
787 struct i915_page_table *pt;
788 uint64_t pde;
2ce5179f
MW
789 gen8_pde_t *pde_vaddr;
790 gen8_pde_t scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt),
791 I915_CACHE_LLC);
d209b9c3
MW
792
793 gen8_for_each_pde(pt, pd, start, length, pde) {
06fda602 794 if (WARN_ON(!pd->page_table[pde]))
00245266 795 break;
06fda602 796
2ce5179f
MW
797 if (gen8_ppgtt_clear_pt(vm, pt, start, length)) {
798 __clear_bit(pde, pd->used_pdes);
799 pde_vaddr = kmap_px(pd);
800 pde_vaddr[pde] = scratch_pde;
801 kunmap_px(ppgtt, pde_vaddr);
49d73912 802 free_pt(vm->i915, pt);
2ce5179f
MW
803 }
804 }
805
a18dbba8 806 if (bitmap_empty(pd->used_pdes, I915_PDES))
2ce5179f 807 return true;
2ce5179f
MW
808
809 return false;
d209b9c3 810}
06fda602 811
2ce5179f
MW
812/* Removes entries from a single page dir pointer, releasing it if it's empty.
813 * Caller can use the return value to update higher-level entries
814 */
815static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
d209b9c3
MW
816 struct i915_page_directory_pointer *pdp,
817 uint64_t start,
818 uint64_t length)
819{
2ce5179f 820 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
d209b9c3
MW
821 struct i915_page_directory *pd;
822 uint64_t pdpe;
06fda602 823
d209b9c3
MW
824 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
825 if (WARN_ON(!pdp->page_directory[pdpe]))
826 break;
459108b8 827
2ce5179f
MW
828 if (gen8_ppgtt_clear_pd(vm, pd, start, length)) {
829 __clear_bit(pdpe, pdp->used_pdpes);
9e65a378 830 gen8_setup_pdpe(ppgtt, pdp, vm->scratch_pd, pdpe);
49d73912 831 free_pd(vm->i915, pd);
2ce5179f
MW
832 }
833 }
834
fce93755
MK
835 mark_tlbs_dirty(ppgtt);
836
a18dbba8 837 if (bitmap_empty(pdp->used_pdpes, I915_PDPES_PER_PDP(dev_priv)))
2ce5179f 838 return true;
2ce5179f
MW
839
840 return false;
d209b9c3 841}
459108b8 842
2ce5179f
MW
843/* Removes entries from a single pml4.
844 * This is the top-level structure in 4-level page tables used on gen8+.
845 * Empty entries are always scratch pml4e.
846 */
d209b9c3
MW
847static void gen8_ppgtt_clear_pml4(struct i915_address_space *vm,
848 struct i915_pml4 *pml4,
849 uint64_t start,
850 uint64_t length)
851{
2ce5179f 852 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
d209b9c3
MW
853 struct i915_page_directory_pointer *pdp;
854 uint64_t pml4e;
2ce5179f 855
49d73912 856 GEM_BUG_ON(!USES_FULL_48BIT_PPGTT(vm->i915));
459108b8 857
d209b9c3
MW
858 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
859 if (WARN_ON(!pml4->pdps[pml4e]))
860 break;
459108b8 861
2ce5179f
MW
862 if (gen8_ppgtt_clear_pdp(vm, pdp, start, length)) {
863 __clear_bit(pml4e, pml4->used_pml4es);
9e65a378 864 gen8_setup_pml4e(ppgtt, pml4, vm->scratch_pdp, pml4e);
49d73912 865 free_pdp(vm->i915, pdp);
2ce5179f 866 }
459108b8
BW
867 }
868}
869
f9b5b782 870static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
4fb84d99 871 uint64_t start, uint64_t length)
9df15b49 872{
e5716f55 873 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
f9b5b782 874
c6385c94 875 if (USES_FULL_48BIT_PPGTT(vm->i915))
d209b9c3
MW
876 gen8_ppgtt_clear_pml4(vm, &ppgtt->pml4, start, length);
877 else
878 gen8_ppgtt_clear_pdp(vm, &ppgtt->pdp, start, length);
f9b5b782
MT
879}
880
881static void
882gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
883 struct i915_page_directory_pointer *pdp,
3387d433 884 struct sg_page_iter *sg_iter,
f9b5b782
MT
885 uint64_t start,
886 enum i915_cache_level cache_level)
887{
e5716f55 888 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
07749ef3 889 gen8_pte_t *pt_vaddr;
de5ba8eb
MT
890 unsigned pdpe = gen8_pdpe_index(start);
891 unsigned pde = gen8_pde_index(start);
892 unsigned pte = gen8_pte_index(start);
9df15b49 893
6f1cc993 894 pt_vaddr = NULL;
7ad47cf2 895
3387d433 896 while (__sg_page_iter_next(sg_iter)) {
d7b3de91 897 if (pt_vaddr == NULL) {
d4ec9da0 898 struct i915_page_directory *pd = pdp->page_directory[pdpe];
ec565b3c 899 struct i915_page_table *pt = pd->page_table[pde];
d1c54acd 900 pt_vaddr = kmap_px(pt);
d7b3de91 901 }
9df15b49 902
7ad47cf2 903 pt_vaddr[pte] =
3387d433 904 gen8_pte_encode(sg_page_iter_dma_address(sg_iter),
4fb84d99 905 cache_level);
07749ef3 906 if (++pte == GEN8_PTES) {
d1c54acd 907 kunmap_px(ppgtt, pt_vaddr);
6f1cc993 908 pt_vaddr = NULL;
07749ef3 909 if (++pde == I915_PDES) {
c6385c94 910 if (++pdpe == I915_PDPES_PER_PDP(vm->i915))
de5ba8eb 911 break;
7ad47cf2
BW
912 pde = 0;
913 }
914 pte = 0;
9df15b49
BW
915 }
916 }
d1c54acd
MK
917
918 if (pt_vaddr)
919 kunmap_px(ppgtt, pt_vaddr);
9df15b49
BW
920}
921
f9b5b782
MT
922static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
923 struct sg_table *pages,
924 uint64_t start,
925 enum i915_cache_level cache_level,
926 u32 unused)
927{
e5716f55 928 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
3387d433 929 struct sg_page_iter sg_iter;
f9b5b782 930
3387d433 931 __sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0);
de5ba8eb 932
c6385c94 933 if (!USES_FULL_48BIT_PPGTT(vm->i915)) {
de5ba8eb
MT
934 gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start,
935 cache_level);
936 } else {
937 struct i915_page_directory_pointer *pdp;
e8ebd8e2 938 uint64_t pml4e;
de5ba8eb
MT
939 uint64_t length = (uint64_t)pages->orig_nents << PAGE_SHIFT;
940
e8ebd8e2 941 gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
de5ba8eb
MT
942 gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter,
943 start, cache_level);
944 }
945 }
f9b5b782
MT
946}
947
275a991c 948static void gen8_free_page_tables(struct drm_i915_private *dev_priv,
f37c0505 949 struct i915_page_directory *pd)
7ad47cf2
BW
950{
951 int i;
952
567047be 953 if (!px_page(pd))
7ad47cf2
BW
954 return;
955
33c8819f 956 for_each_set_bit(i, pd->used_pdes, I915_PDES) {
06fda602
BW
957 if (WARN_ON(!pd->page_table[i]))
958 continue;
7ad47cf2 959
275a991c 960 free_pt(dev_priv, pd->page_table[i]);
06fda602
BW
961 pd->page_table[i] = NULL;
962 }
d7b3de91
BW
963}
964
8776f02b
MK
965static int gen8_init_scratch(struct i915_address_space *vm)
966{
49d73912 967 struct drm_i915_private *dev_priv = vm->i915;
64c050db 968 int ret;
8776f02b 969
275a991c 970 ret = setup_scratch_page(dev_priv, &vm->scratch_page, I915_GFP_DMA);
8bcdd0f7
CW
971 if (ret)
972 return ret;
8776f02b 973
275a991c 974 vm->scratch_pt = alloc_pt(dev_priv);
8776f02b 975 if (IS_ERR(vm->scratch_pt)) {
64c050db
MA
976 ret = PTR_ERR(vm->scratch_pt);
977 goto free_scratch_page;
8776f02b
MK
978 }
979
275a991c 980 vm->scratch_pd = alloc_pd(dev_priv);
8776f02b 981 if (IS_ERR(vm->scratch_pd)) {
64c050db
MA
982 ret = PTR_ERR(vm->scratch_pd);
983 goto free_pt;
8776f02b
MK
984 }
985
275a991c
TU
986 if (USES_FULL_48BIT_PPGTT(dev_priv)) {
987 vm->scratch_pdp = alloc_pdp(dev_priv);
69ab76fd 988 if (IS_ERR(vm->scratch_pdp)) {
64c050db
MA
989 ret = PTR_ERR(vm->scratch_pdp);
990 goto free_pd;
69ab76fd
MT
991 }
992 }
993
8776f02b
MK
994 gen8_initialize_pt(vm, vm->scratch_pt);
995 gen8_initialize_pd(vm, vm->scratch_pd);
275a991c 996 if (USES_FULL_48BIT_PPGTT(dev_priv))
69ab76fd 997 gen8_initialize_pdp(vm, vm->scratch_pdp);
8776f02b
MK
998
999 return 0;
64c050db
MA
1000
1001free_pd:
275a991c 1002 free_pd(dev_priv, vm->scratch_pd);
64c050db 1003free_pt:
275a991c 1004 free_pt(dev_priv, vm->scratch_pt);
64c050db 1005free_scratch_page:
275a991c 1006 cleanup_scratch_page(dev_priv, &vm->scratch_page);
64c050db
MA
1007
1008 return ret;
8776f02b
MK
1009}
1010
650da34c
ZL
1011static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
1012{
1013 enum vgt_g2v_type msg;
49d73912 1014 struct drm_i915_private *dev_priv = ppgtt->base.i915;
650da34c
ZL
1015 int i;
1016
df28564d 1017 if (USES_FULL_48BIT_PPGTT(dev_priv)) {
650da34c
ZL
1018 u64 daddr = px_dma(&ppgtt->pml4);
1019
ab75bb5d
VS
1020 I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
1021 I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
650da34c
ZL
1022
1023 msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
1024 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
1025 } else {
1026 for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
1027 u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
1028
ab75bb5d
VS
1029 I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
1030 I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
650da34c
ZL
1031 }
1032
1033 msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
1034 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
1035 }
1036
1037 I915_WRITE(vgtif_reg(g2v_notify), msg);
1038
1039 return 0;
1040}
1041
8776f02b
MK
1042static void gen8_free_scratch(struct i915_address_space *vm)
1043{
49d73912 1044 struct drm_i915_private *dev_priv = vm->i915;
8776f02b 1045
275a991c
TU
1046 if (USES_FULL_48BIT_PPGTT(dev_priv))
1047 free_pdp(dev_priv, vm->scratch_pdp);
1048 free_pd(dev_priv, vm->scratch_pd);
1049 free_pt(dev_priv, vm->scratch_pt);
1050 cleanup_scratch_page(dev_priv, &vm->scratch_page);
8776f02b
MK
1051}
1052
275a991c 1053static void gen8_ppgtt_cleanup_3lvl(struct drm_i915_private *dev_priv,
762d9936 1054 struct i915_page_directory_pointer *pdp)
b45a6715
BW
1055{
1056 int i;
1057
275a991c 1058 for_each_set_bit(i, pdp->used_pdpes, I915_PDPES_PER_PDP(dev_priv)) {
d4ec9da0 1059 if (WARN_ON(!pdp->page_directory[i]))
06fda602
BW
1060 continue;
1061
275a991c
TU
1062 gen8_free_page_tables(dev_priv, pdp->page_directory[i]);
1063 free_pd(dev_priv, pdp->page_directory[i]);
7ad47cf2 1064 }
69876bed 1065
275a991c 1066 free_pdp(dev_priv, pdp);
762d9936
MT
1067}
1068
1069static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
1070{
49d73912 1071 struct drm_i915_private *dev_priv = ppgtt->base.i915;
762d9936
MT
1072 int i;
1073
1074 for_each_set_bit(i, ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4) {
1075 if (WARN_ON(!ppgtt->pml4.pdps[i]))
1076 continue;
1077
275a991c 1078 gen8_ppgtt_cleanup_3lvl(dev_priv, ppgtt->pml4.pdps[i]);
762d9936
MT
1079 }
1080
275a991c 1081 cleanup_px(dev_priv, &ppgtt->pml4);
762d9936
MT
1082}
1083
1084static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
1085{
49d73912 1086 struct drm_i915_private *dev_priv = vm->i915;
e5716f55 1087 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
762d9936 1088
275a991c 1089 if (intel_vgpu_active(dev_priv))
650da34c
ZL
1090 gen8_ppgtt_notify_vgt(ppgtt, false);
1091
275a991c
TU
1092 if (!USES_FULL_48BIT_PPGTT(dev_priv))
1093 gen8_ppgtt_cleanup_3lvl(dev_priv, &ppgtt->pdp);
762d9936
MT
1094 else
1095 gen8_ppgtt_cleanup_4lvl(ppgtt);
d4ec9da0 1096
8776f02b 1097 gen8_free_scratch(vm);
b45a6715
BW
1098}
1099
d7b2633d
MT
1100/**
1101 * gen8_ppgtt_alloc_pagetabs() - Allocate page tables for VA range.
d4ec9da0
MT
1102 * @vm: Master vm structure.
1103 * @pd: Page directory for this address range.
d7b2633d 1104 * @start: Starting virtual address to begin allocations.
d4ec9da0 1105 * @length: Size of the allocations.
d7b2633d
MT
1106 * @new_pts: Bitmap set by function with new allocations. Likely used by the
1107 * caller to free on error.
1108 *
1109 * Allocate the required number of page tables. Extremely similar to
1110 * gen8_ppgtt_alloc_page_directories(). The main difference is here we are limited by
1111 * the page directory boundary (instead of the page directory pointer). That
1112 * boundary is 1GB virtual. Therefore, unlike gen8_ppgtt_alloc_page_directories(), it is
1113 * possible, and likely that the caller will need to use multiple calls of this
1114 * function to achieve the appropriate allocation.
1115 *
1116 * Return: 0 if success; negative error code otherwise.
1117 */
d4ec9da0 1118static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
e5815a2e 1119 struct i915_page_directory *pd,
5441f0cb 1120 uint64_t start,
d7b2633d
MT
1121 uint64_t length,
1122 unsigned long *new_pts)
bf2b4ed2 1123{
49d73912 1124 struct drm_i915_private *dev_priv = vm->i915;
d7b2633d 1125 struct i915_page_table *pt;
5441f0cb 1126 uint32_t pde;
bf2b4ed2 1127
e8ebd8e2 1128 gen8_for_each_pde(pt, pd, start, length, pde) {
d7b2633d 1129 /* Don't reallocate page tables */
6ac18502 1130 if (test_bit(pde, pd->used_pdes)) {
d7b2633d 1131 /* Scratch is never allocated this way */
d4ec9da0 1132 WARN_ON(pt == vm->scratch_pt);
d7b2633d
MT
1133 continue;
1134 }
1135
275a991c 1136 pt = alloc_pt(dev_priv);
d7b2633d 1137 if (IS_ERR(pt))
5441f0cb
MT
1138 goto unwind_out;
1139
d4ec9da0 1140 gen8_initialize_pt(vm, pt);
d7b2633d 1141 pd->page_table[pde] = pt;
966082c9 1142 __set_bit(pde, new_pts);
4c06ec8d 1143 trace_i915_page_table_entry_alloc(vm, pde, start, GEN8_PDE_SHIFT);
7ad47cf2
BW
1144 }
1145
bf2b4ed2 1146 return 0;
7ad47cf2
BW
1147
1148unwind_out:
d7b2633d 1149 for_each_set_bit(pde, new_pts, I915_PDES)
275a991c 1150 free_pt(dev_priv, pd->page_table[pde]);
7ad47cf2 1151
d7b3de91 1152 return -ENOMEM;
bf2b4ed2
BW
1153}
1154
d7b2633d
MT
1155/**
1156 * gen8_ppgtt_alloc_page_directories() - Allocate page directories for VA range.
d4ec9da0 1157 * @vm: Master vm structure.
d7b2633d
MT
1158 * @pdp: Page directory pointer for this address range.
1159 * @start: Starting virtual address to begin allocations.
d4ec9da0
MT
1160 * @length: Size of the allocations.
1161 * @new_pds: Bitmap set by function with new allocations. Likely used by the
d7b2633d
MT
1162 * caller to free on error.
1163 *
1164 * Allocate the required number of page directories starting at the pde index of
1165 * @start, and ending at the pde index @start + @length. This function will skip
1166 * over already allocated page directories within the range, and only allocate
1167 * new ones, setting the appropriate pointer within the pdp as well as the
1168 * correct position in the bitmap @new_pds.
1169 *
1170 * The function will only allocate the pages within the range for a give page
1171 * directory pointer. In other words, if @start + @length straddles a virtually
1172 * addressed PDP boundary (512GB for 4k pages), there will be more allocations
1173 * required by the caller, This is not currently possible, and the BUG in the
1174 * code will prevent it.
1175 *
1176 * Return: 0 if success; negative error code otherwise.
1177 */
d4ec9da0
MT
1178static int
1179gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
1180 struct i915_page_directory_pointer *pdp,
1181 uint64_t start,
1182 uint64_t length,
1183 unsigned long *new_pds)
bf2b4ed2 1184{
49d73912 1185 struct drm_i915_private *dev_priv = vm->i915;
d7b2633d 1186 struct i915_page_directory *pd;
69876bed 1187 uint32_t pdpe;
275a991c 1188 uint32_t pdpes = I915_PDPES_PER_PDP(dev_priv);
69876bed 1189
6ac18502 1190 WARN_ON(!bitmap_empty(new_pds, pdpes));
d7b2633d 1191
e8ebd8e2 1192 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
6ac18502 1193 if (test_bit(pdpe, pdp->used_pdpes))
d7b2633d 1194 continue;
33c8819f 1195
275a991c 1196 pd = alloc_pd(dev_priv);
d7b2633d 1197 if (IS_ERR(pd))
d7b3de91 1198 goto unwind_out;
69876bed 1199
d4ec9da0 1200 gen8_initialize_pd(vm, pd);
d7b2633d 1201 pdp->page_directory[pdpe] = pd;
966082c9 1202 __set_bit(pdpe, new_pds);
4c06ec8d 1203 trace_i915_page_directory_entry_alloc(vm, pdpe, start, GEN8_PDPE_SHIFT);
d7b3de91
BW
1204 }
1205
bf2b4ed2 1206 return 0;
d7b3de91
BW
1207
1208unwind_out:
6ac18502 1209 for_each_set_bit(pdpe, new_pds, pdpes)
275a991c 1210 free_pd(dev_priv, pdp->page_directory[pdpe]);
d7b3de91
BW
1211
1212 return -ENOMEM;
bf2b4ed2
BW
1213}
1214
762d9936
MT
1215/**
1216 * gen8_ppgtt_alloc_page_dirpointers() - Allocate pdps for VA range.
1217 * @vm: Master vm structure.
1218 * @pml4: Page map level 4 for this address range.
1219 * @start: Starting virtual address to begin allocations.
1220 * @length: Size of the allocations.
1221 * @new_pdps: Bitmap set by function with new allocations. Likely used by the
1222 * caller to free on error.
1223 *
1224 * Allocate the required number of page directory pointers. Extremely similar to
1225 * gen8_ppgtt_alloc_page_directories() and gen8_ppgtt_alloc_pagetabs().
1226 * The main difference is here we are limited by the pml4 boundary (instead of
1227 * the page directory pointer).
1228 *
1229 * Return: 0 if success; negative error code otherwise.
1230 */
1231static int
1232gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
1233 struct i915_pml4 *pml4,
1234 uint64_t start,
1235 uint64_t length,
1236 unsigned long *new_pdps)
1237{
49d73912 1238 struct drm_i915_private *dev_priv = vm->i915;
762d9936 1239 struct i915_page_directory_pointer *pdp;
762d9936
MT
1240 uint32_t pml4e;
1241
1242 WARN_ON(!bitmap_empty(new_pdps, GEN8_PML4ES_PER_PML4));
1243
e8ebd8e2 1244 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
762d9936 1245 if (!test_bit(pml4e, pml4->used_pml4es)) {
275a991c 1246 pdp = alloc_pdp(dev_priv);
762d9936
MT
1247 if (IS_ERR(pdp))
1248 goto unwind_out;
1249
69ab76fd 1250 gen8_initialize_pdp(vm, pdp);
762d9936
MT
1251 pml4->pdps[pml4e] = pdp;
1252 __set_bit(pml4e, new_pdps);
1253 trace_i915_page_directory_pointer_entry_alloc(vm,
1254 pml4e,
1255 start,
1256 GEN8_PML4E_SHIFT);
1257 }
1258 }
1259
1260 return 0;
1261
1262unwind_out:
1263 for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
275a991c 1264 free_pdp(dev_priv, pml4->pdps[pml4e]);
762d9936
MT
1265
1266 return -ENOMEM;
1267}
1268
d7b2633d 1269static void
3a41a05d 1270free_gen8_temp_bitmaps(unsigned long *new_pds, unsigned long *new_pts)
d7b2633d 1271{
d7b2633d
MT
1272 kfree(new_pts);
1273 kfree(new_pds);
1274}
1275
1276/* Fills in the page directory bitmap, and the array of page tables bitmap. Both
1277 * of these are based on the number of PDPEs in the system.
1278 */
1279static
1280int __must_check alloc_gen8_temp_bitmaps(unsigned long **new_pds,
3a41a05d 1281 unsigned long **new_pts,
6ac18502 1282 uint32_t pdpes)
d7b2633d 1283{
d7b2633d 1284 unsigned long *pds;
3a41a05d 1285 unsigned long *pts;
d7b2633d 1286
3a41a05d 1287 pds = kcalloc(BITS_TO_LONGS(pdpes), sizeof(unsigned long), GFP_TEMPORARY);
d7b2633d
MT
1288 if (!pds)
1289 return -ENOMEM;
1290
3a41a05d
MW
1291 pts = kcalloc(pdpes, BITS_TO_LONGS(I915_PDES) * sizeof(unsigned long),
1292 GFP_TEMPORARY);
1293 if (!pts)
1294 goto err_out;
d7b2633d
MT
1295
1296 *new_pds = pds;
1297 *new_pts = pts;
1298
1299 return 0;
1300
1301err_out:
3a41a05d 1302 free_gen8_temp_bitmaps(pds, pts);
d7b2633d
MT
1303 return -ENOMEM;
1304}
1305
762d9936
MT
1306static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
1307 struct i915_page_directory_pointer *pdp,
1308 uint64_t start,
1309 uint64_t length)
bf2b4ed2 1310{
e5716f55 1311 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
3a41a05d 1312 unsigned long *new_page_dirs, *new_page_tables;
49d73912 1313 struct drm_i915_private *dev_priv = vm->i915;
5441f0cb 1314 struct i915_page_directory *pd;
33c8819f
MT
1315 const uint64_t orig_start = start;
1316 const uint64_t orig_length = length;
5441f0cb 1317 uint32_t pdpe;
275a991c 1318 uint32_t pdpes = I915_PDPES_PER_PDP(dev_priv);
bf2b4ed2
BW
1319 int ret;
1320
6ac18502 1321 ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
bf2b4ed2
BW
1322 if (ret)
1323 return ret;
1324
d7b2633d 1325 /* Do the allocations first so we can easily bail out */
d4ec9da0
MT
1326 ret = gen8_ppgtt_alloc_page_directories(vm, pdp, start, length,
1327 new_page_dirs);
d7b2633d 1328 if (ret) {
3a41a05d 1329 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
d7b2633d
MT
1330 return ret;
1331 }
1332
1333 /* For every page directory referenced, allocate page tables */
e8ebd8e2 1334 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
d4ec9da0 1335 ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length,
3a41a05d 1336 new_page_tables + pdpe * BITS_TO_LONGS(I915_PDES));
5441f0cb
MT
1337 if (ret)
1338 goto err_out;
5441f0cb
MT
1339 }
1340
33c8819f
MT
1341 start = orig_start;
1342 length = orig_length;
1343
d7b2633d
MT
1344 /* Allocations have completed successfully, so set the bitmaps, and do
1345 * the mappings. */
e8ebd8e2 1346 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
d1c54acd 1347 gen8_pde_t *const page_directory = kmap_px(pd);
33c8819f 1348 struct i915_page_table *pt;
09120d4e 1349 uint64_t pd_len = length;
33c8819f
MT
1350 uint64_t pd_start = start;
1351 uint32_t pde;
1352
d7b2633d
MT
1353 /* Every pd should be allocated, we just did that above. */
1354 WARN_ON(!pd);
1355
e8ebd8e2 1356 gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
d7b2633d
MT
1357 /* Same reasoning as pd */
1358 WARN_ON(!pt);
1359 WARN_ON(!pd_len);
1360 WARN_ON(!gen8_pte_count(pd_start, pd_len));
1361
1362 /* Set our used ptes within the page table */
1363 bitmap_set(pt->used_ptes,
1364 gen8_pte_index(pd_start),
1365 gen8_pte_count(pd_start, pd_len));
1366
1367 /* Our pde is now pointing to the pagetable, pt */
966082c9 1368 __set_bit(pde, pd->used_pdes);
d7b2633d
MT
1369
1370 /* Map the PDE to the page table */
fe36f55d
MK
1371 page_directory[pde] = gen8_pde_encode(px_dma(pt),
1372 I915_CACHE_LLC);
4c06ec8d
MT
1373 trace_i915_page_table_entry_map(&ppgtt->base, pde, pt,
1374 gen8_pte_index(start),
1375 gen8_pte_count(start, length),
1376 GEN8_PTES);
d7b2633d
MT
1377
1378 /* NB: We haven't yet mapped ptes to pages. At this
1379 * point we're still relying on insert_entries() */
33c8819f 1380 }
d7b2633d 1381
d1c54acd 1382 kunmap_px(ppgtt, page_directory);
d4ec9da0 1383 __set_bit(pdpe, pdp->used_pdpes);
5c693b2b 1384 gen8_setup_pdpe(ppgtt, pdp, pd, pdpe);
33c8819f
MT
1385 }
1386
3a41a05d 1387 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
5b7e4c9c 1388 mark_tlbs_dirty(ppgtt);
d7b3de91 1389 return 0;
bf2b4ed2 1390
d7b3de91 1391err_out:
d7b2633d 1392 while (pdpe--) {
e8ebd8e2
DG
1393 unsigned long temp;
1394
3a41a05d
MW
1395 for_each_set_bit(temp, new_page_tables + pdpe *
1396 BITS_TO_LONGS(I915_PDES), I915_PDES)
275a991c
TU
1397 free_pt(dev_priv,
1398 pdp->page_directory[pdpe]->page_table[temp]);
d7b2633d
MT
1399 }
1400
6ac18502 1401 for_each_set_bit(pdpe, new_page_dirs, pdpes)
275a991c 1402 free_pd(dev_priv, pdp->page_directory[pdpe]);
d7b2633d 1403
3a41a05d 1404 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
5b7e4c9c 1405 mark_tlbs_dirty(ppgtt);
bf2b4ed2
BW
1406 return ret;
1407}
1408
762d9936
MT
1409static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
1410 struct i915_pml4 *pml4,
1411 uint64_t start,
1412 uint64_t length)
1413{
1414 DECLARE_BITMAP(new_pdps, GEN8_PML4ES_PER_PML4);
e5716f55 1415 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
762d9936 1416 struct i915_page_directory_pointer *pdp;
e8ebd8e2 1417 uint64_t pml4e;
762d9936
MT
1418 int ret = 0;
1419
1420 /* Do the pml4 allocations first, so we don't need to track the newly
1421 * allocated tables below the pdp */
1422 bitmap_zero(new_pdps, GEN8_PML4ES_PER_PML4);
1423
1424 /* The pagedirectory and pagetable allocations are done in the shared 3
1425 * and 4 level code. Just allocate the pdps.
1426 */
1427 ret = gen8_ppgtt_alloc_page_dirpointers(vm, pml4, start, length,
1428 new_pdps);
1429 if (ret)
1430 return ret;
1431
e8ebd8e2 1432 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
762d9936
MT
1433 WARN_ON(!pdp);
1434
1435 ret = gen8_alloc_va_range_3lvl(vm, pdp, start, length);
1436 if (ret)
1437 goto err_out;
1438
56843107 1439 gen8_setup_pml4e(ppgtt, pml4, pdp, pml4e);
762d9936
MT
1440 }
1441
1442 bitmap_or(pml4->used_pml4es, new_pdps, pml4->used_pml4es,
1443 GEN8_PML4ES_PER_PML4);
1444
1445 return 0;
1446
1447err_out:
1448 for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
49d73912 1449 gen8_ppgtt_cleanup_3lvl(vm->i915, pml4->pdps[pml4e]);
762d9936
MT
1450
1451 return ret;
1452}
1453
1454static int gen8_alloc_va_range(struct i915_address_space *vm,
1455 uint64_t start, uint64_t length)
1456{
e5716f55 1457 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
762d9936 1458
c6385c94 1459 if (USES_FULL_48BIT_PPGTT(vm->i915))
762d9936
MT
1460 return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length);
1461 else
1462 return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
1463}
1464
ea91e401
MT
1465static void gen8_dump_pdp(struct i915_page_directory_pointer *pdp,
1466 uint64_t start, uint64_t length,
1467 gen8_pte_t scratch_pte,
1468 struct seq_file *m)
1469{
1470 struct i915_page_directory *pd;
ea91e401
MT
1471 uint32_t pdpe;
1472
e8ebd8e2 1473 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
ea91e401
MT
1474 struct i915_page_table *pt;
1475 uint64_t pd_len = length;
1476 uint64_t pd_start = start;
1477 uint32_t pde;
1478
1479 if (!test_bit(pdpe, pdp->used_pdpes))
1480 continue;
1481
1482 seq_printf(m, "\tPDPE #%d\n", pdpe);
e8ebd8e2 1483 gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
ea91e401
MT
1484 uint32_t pte;
1485 gen8_pte_t *pt_vaddr;
1486
1487 if (!test_bit(pde, pd->used_pdes))
1488 continue;
1489
1490 pt_vaddr = kmap_px(pt);
1491 for (pte = 0; pte < GEN8_PTES; pte += 4) {
1492 uint64_t va =
1493 (pdpe << GEN8_PDPE_SHIFT) |
1494 (pde << GEN8_PDE_SHIFT) |
1495 (pte << GEN8_PTE_SHIFT);
1496 int i;
1497 bool found = false;
1498
1499 for (i = 0; i < 4; i++)
1500 if (pt_vaddr[pte + i] != scratch_pte)
1501 found = true;
1502 if (!found)
1503 continue;
1504
1505 seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
1506 for (i = 0; i < 4; i++) {
1507 if (pt_vaddr[pte + i] != scratch_pte)
1508 seq_printf(m, " %llx", pt_vaddr[pte + i]);
1509 else
1510 seq_puts(m, " SCRATCH ");
1511 }
1512 seq_puts(m, "\n");
1513 }
1514 /* don't use kunmap_px, it could trigger
1515 * an unnecessary flush.
1516 */
1517 kunmap_atomic(pt_vaddr);
1518 }
1519 }
1520}
1521
1522static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1523{
1524 struct i915_address_space *vm = &ppgtt->base;
1525 uint64_t start = ppgtt->base.start;
1526 uint64_t length = ppgtt->base.total;
8bcdd0f7 1527 gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
4fb84d99 1528 I915_CACHE_LLC);
ea91e401 1529
c6385c94 1530 if (!USES_FULL_48BIT_PPGTT(vm->i915)) {
ea91e401
MT
1531 gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
1532 } else {
e8ebd8e2 1533 uint64_t pml4e;
ea91e401
MT
1534 struct i915_pml4 *pml4 = &ppgtt->pml4;
1535 struct i915_page_directory_pointer *pdp;
1536
e8ebd8e2 1537 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
ea91e401
MT
1538 if (!test_bit(pml4e, pml4->used_pml4es))
1539 continue;
1540
1541 seq_printf(m, " PML4E #%llu\n", pml4e);
1542 gen8_dump_pdp(pdp, start, length, scratch_pte, m);
1543 }
1544 }
1545}
1546
331f38e7
ZL
1547static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt)
1548{
3a41a05d 1549 unsigned long *new_page_dirs, *new_page_tables;
275a991c 1550 uint32_t pdpes = I915_PDPES_PER_PDP(to_i915(ppgtt->base.dev));
331f38e7
ZL
1551 int ret;
1552
1553 /* We allocate temp bitmap for page tables for no gain
1554 * but as this is for init only, lets keep the things simple
1555 */
1556 ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
1557 if (ret)
1558 return ret;
1559
1560 /* Allocate for all pdps regardless of how the ppgtt
1561 * was defined.
1562 */
1563 ret = gen8_ppgtt_alloc_page_directories(&ppgtt->base, &ppgtt->pdp,
1564 0, 1ULL << 32,
1565 new_page_dirs);
1566 if (!ret)
1567 *ppgtt->pdp.used_pdpes = *new_page_dirs;
1568
3a41a05d 1569 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
331f38e7
ZL
1570
1571 return ret;
1572}
1573
eb0b44ad 1574/*
f3a964b9
BW
1575 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
1576 * with a net effect resembling a 2-level page table in normal x86 terms. Each
1577 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
1578 * space.
37aca44a 1579 *
f3a964b9 1580 */
5c5f6457 1581static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
37aca44a 1582{
49d73912 1583 struct drm_i915_private *dev_priv = ppgtt->base.i915;
8776f02b 1584 int ret;
7cb6d7ac 1585
8776f02b
MK
1586 ret = gen8_init_scratch(&ppgtt->base);
1587 if (ret)
1588 return ret;
69876bed 1589
d7b2633d 1590 ppgtt->base.start = 0;
d7b2633d 1591 ppgtt->base.cleanup = gen8_ppgtt_cleanup;
5c5f6457 1592 ppgtt->base.allocate_va_range = gen8_alloc_va_range;
d7b2633d 1593 ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
c7e16f22 1594 ppgtt->base.clear_range = gen8_ppgtt_clear_range;
777dc5bb
DV
1595 ppgtt->base.unbind_vma = ppgtt_unbind_vma;
1596 ppgtt->base.bind_vma = ppgtt_bind_vma;
ea91e401 1597 ppgtt->debug_dump = gen8_dump_ppgtt;
d7b2633d 1598
275a991c
TU
1599 if (USES_FULL_48BIT_PPGTT(dev_priv)) {
1600 ret = setup_px(dev_priv, &ppgtt->pml4);
762d9936
MT
1601 if (ret)
1602 goto free_scratch;
6ac18502 1603
69ab76fd
MT
1604 gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);
1605
762d9936 1606 ppgtt->base.total = 1ULL << 48;
2dba3239 1607 ppgtt->switch_mm = gen8_48b_mm_switch;
762d9936 1608 } else {
275a991c 1609 ret = __pdp_init(dev_priv, &ppgtt->pdp);
81ba8aef
MT
1610 if (ret)
1611 goto free_scratch;
1612
1613 ppgtt->base.total = 1ULL << 32;
2dba3239 1614 ppgtt->switch_mm = gen8_legacy_mm_switch;
762d9936
MT
1615 trace_i915_page_directory_pointer_entry_alloc(&ppgtt->base,
1616 0, 0,
1617 GEN8_PML4E_SHIFT);
331f38e7 1618
275a991c 1619 if (intel_vgpu_active(dev_priv)) {
331f38e7
ZL
1620 ret = gen8_preallocate_top_level_pdps(ppgtt);
1621 if (ret)
1622 goto free_scratch;
1623 }
81ba8aef 1624 }
6ac18502 1625
275a991c 1626 if (intel_vgpu_active(dev_priv))
650da34c
ZL
1627 gen8_ppgtt_notify_vgt(ppgtt, true);
1628
d7b2633d 1629 return 0;
6ac18502
MT
1630
1631free_scratch:
1632 gen8_free_scratch(&ppgtt->base);
1633 return ret;
d7b2633d
MT
1634}
1635
87d60b63
BW
1636static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1637{
87d60b63 1638 struct i915_address_space *vm = &ppgtt->base;
09942c65 1639 struct i915_page_table *unused;
07749ef3 1640 gen6_pte_t scratch_pte;
87d60b63 1641 uint32_t pd_entry;
731f74c5 1642 uint32_t pte, pde;
09942c65 1643 uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
87d60b63 1644
8bcdd0f7 1645 scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
4fb84d99 1646 I915_CACHE_LLC, 0);
87d60b63 1647
731f74c5 1648 gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
87d60b63 1649 u32 expected;
07749ef3 1650 gen6_pte_t *pt_vaddr;
567047be 1651 const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
09942c65 1652 pd_entry = readl(ppgtt->pd_addr + pde);
87d60b63
BW
1653 expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
1654
1655 if (pd_entry != expected)
1656 seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
1657 pde,
1658 pd_entry,
1659 expected);
1660 seq_printf(m, "\tPDE: %x\n", pd_entry);
1661
d1c54acd
MK
1662 pt_vaddr = kmap_px(ppgtt->pd.page_table[pde]);
1663
07749ef3 1664 for (pte = 0; pte < GEN6_PTES; pte+=4) {
87d60b63 1665 unsigned long va =
07749ef3 1666 (pde * PAGE_SIZE * GEN6_PTES) +
87d60b63
BW
1667 (pte * PAGE_SIZE);
1668 int i;
1669 bool found = false;
1670 for (i = 0; i < 4; i++)
1671 if (pt_vaddr[pte + i] != scratch_pte)
1672 found = true;
1673 if (!found)
1674 continue;
1675
1676 seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
1677 for (i = 0; i < 4; i++) {
1678 if (pt_vaddr[pte + i] != scratch_pte)
1679 seq_printf(m, " %08x", pt_vaddr[pte + i]);
1680 else
1681 seq_puts(m, " SCRATCH ");
1682 }
1683 seq_puts(m, "\n");
1684 }
d1c54acd 1685 kunmap_px(ppgtt, pt_vaddr);
87d60b63
BW
1686 }
1687}
1688
678d96fb 1689/* Write pde (index) from the page directory @pd to the page table @pt */
ec565b3c
MT
1690static void gen6_write_pde(struct i915_page_directory *pd,
1691 const int pde, struct i915_page_table *pt)
6197349b 1692{
678d96fb
BW
1693 /* Caller needs to make sure the write completes if necessary */
1694 struct i915_hw_ppgtt *ppgtt =
1695 container_of(pd, struct i915_hw_ppgtt, pd);
1696 u32 pd_entry;
6197349b 1697
567047be 1698 pd_entry = GEN6_PDE_ADDR_ENCODE(px_dma(pt));
678d96fb 1699 pd_entry |= GEN6_PDE_VALID;
6197349b 1700
678d96fb
BW
1701 writel(pd_entry, ppgtt->pd_addr + pde);
1702}
6197349b 1703
678d96fb
BW
1704/* Write all the page tables found in the ppgtt structure to incrementing page
1705 * directories. */
1706static void gen6_write_page_range(struct drm_i915_private *dev_priv,
ec565b3c 1707 struct i915_page_directory *pd,
678d96fb
BW
1708 uint32_t start, uint32_t length)
1709{
72e96d64 1710 struct i915_ggtt *ggtt = &dev_priv->ggtt;
ec565b3c 1711 struct i915_page_table *pt;
731f74c5 1712 uint32_t pde;
678d96fb 1713
731f74c5 1714 gen6_for_each_pde(pt, pd, start, length, pde)
678d96fb
BW
1715 gen6_write_pde(pd, pde, pt);
1716
1717 /* Make sure write is complete before other code can use this page
1718 * table. Also require for WC mapped PTEs */
72e96d64 1719 readl(ggtt->gsm);
3e302542
BW
1720}
1721
b4a74e3a 1722static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
3e302542 1723{
44159ddb 1724 BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
b4a74e3a 1725
44159ddb 1726 return (ppgtt->pd.base.ggtt_offset / 64) << 16;
b4a74e3a
BW
1727}
1728
90252e5c 1729static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
e85b26dc 1730 struct drm_i915_gem_request *req)
90252e5c 1731{
4a570db5 1732 struct intel_engine_cs *engine = req->engine;
73dec95e 1733 u32 *cs;
90252e5c
BW
1734 int ret;
1735
90252e5c 1736 /* NB: TLBs must be flushed and invalidated before a switch */
7c9cf4e3 1737 ret = engine->emit_flush(req, EMIT_INVALIDATE | EMIT_FLUSH);
90252e5c
BW
1738 if (ret)
1739 return ret;
1740
73dec95e
TU
1741 cs = intel_ring_begin(req, 6);
1742 if (IS_ERR(cs))
1743 return PTR_ERR(cs);
90252e5c 1744
73dec95e
TU
1745 *cs++ = MI_LOAD_REGISTER_IMM(2);
1746 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
1747 *cs++ = PP_DIR_DCLV_2G;
1748 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1749 *cs++ = get_pd_offset(ppgtt);
1750 *cs++ = MI_NOOP;
1751 intel_ring_advance(req, cs);
90252e5c
BW
1752
1753 return 0;
1754}
1755
48a10389 1756static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
e85b26dc 1757 struct drm_i915_gem_request *req)
48a10389 1758{
4a570db5 1759 struct intel_engine_cs *engine = req->engine;
73dec95e 1760 u32 *cs;
48a10389
BW
1761 int ret;
1762
48a10389 1763 /* NB: TLBs must be flushed and invalidated before a switch */
7c9cf4e3 1764 ret = engine->emit_flush(req, EMIT_INVALIDATE | EMIT_FLUSH);
48a10389
BW
1765 if (ret)
1766 return ret;
1767
73dec95e
TU
1768 cs = intel_ring_begin(req, 6);
1769 if (IS_ERR(cs))
1770 return PTR_ERR(cs);
1771
1772 *cs++ = MI_LOAD_REGISTER_IMM(2);
1773 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
1774 *cs++ = PP_DIR_DCLV_2G;
1775 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1776 *cs++ = get_pd_offset(ppgtt);
1777 *cs++ = MI_NOOP;
1778 intel_ring_advance(req, cs);
48a10389 1779
90252e5c 1780 /* XXX: RCS is the only one to auto invalidate the TLBs? */
e2f80391 1781 if (engine->id != RCS) {
7c9cf4e3 1782 ret = engine->emit_flush(req, EMIT_INVALIDATE | EMIT_FLUSH);
90252e5c
BW
1783 if (ret)
1784 return ret;
1785 }
1786
48a10389
BW
1787 return 0;
1788}
1789
eeb9488e 1790static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
e85b26dc 1791 struct drm_i915_gem_request *req)
eeb9488e 1792{
4a570db5 1793 struct intel_engine_cs *engine = req->engine;
8eb95204 1794 struct drm_i915_private *dev_priv = req->i915;
48a10389 1795
e2f80391
TU
1796 I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
1797 I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
eeb9488e
BW
1798 return 0;
1799}
1800
c6be607a 1801static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
eeb9488e 1802{
e2f80391 1803 struct intel_engine_cs *engine;
3b3f1650 1804 enum intel_engine_id id;
3e302542 1805
3b3f1650 1806 for_each_engine(engine, dev_priv, id) {
c6be607a
TU
1807 u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ?
1808 GEN8_GFX_PPGTT_48B : 0;
e2f80391 1809 I915_WRITE(RING_MODE_GEN7(engine),
2dba3239 1810 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
eeb9488e 1811 }
eeb9488e 1812}
6197349b 1813
c6be607a 1814static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
3e302542 1815{
e2f80391 1816 struct intel_engine_cs *engine;
b4a74e3a 1817 uint32_t ecochk, ecobits;
3b3f1650 1818 enum intel_engine_id id;
6197349b 1819
b4a74e3a
BW
1820 ecobits = I915_READ(GAC_ECO_BITS);
1821 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
a65c2fcd 1822
b4a74e3a 1823 ecochk = I915_READ(GAM_ECOCHK);
772c2a51 1824 if (IS_HASWELL(dev_priv)) {
b4a74e3a
BW
1825 ecochk |= ECOCHK_PPGTT_WB_HSW;
1826 } else {
1827 ecochk |= ECOCHK_PPGTT_LLC_IVB;
1828 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
1829 }
1830 I915_WRITE(GAM_ECOCHK, ecochk);
a65c2fcd 1831
3b3f1650 1832 for_each_engine(engine, dev_priv, id) {
6197349b 1833 /* GFX_MODE is per-ring on gen7+ */
e2f80391 1834 I915_WRITE(RING_MODE_GEN7(engine),
b4a74e3a 1835 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
6197349b 1836 }
b4a74e3a 1837}
6197349b 1838
c6be607a 1839static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
b4a74e3a 1840{
b4a74e3a 1841 uint32_t ecochk, gab_ctl, ecobits;
a65c2fcd 1842
b4a74e3a
BW
1843 ecobits = I915_READ(GAC_ECO_BITS);
1844 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
1845 ECOBITS_PPGTT_CACHE64B);
6197349b 1846
b4a74e3a
BW
1847 gab_ctl = I915_READ(GAB_CTL);
1848 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
1849
1850 ecochk = I915_READ(GAM_ECOCHK);
1851 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
1852
1853 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
6197349b
BW
1854}
1855
1d2a314c 1856/* PPGTT support for Sandybdrige/Gen6 and later */
853ba5d2 1857static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
782f1495 1858 uint64_t start,
4fb84d99 1859 uint64_t length)
1d2a314c 1860{
e5716f55 1861 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
07749ef3 1862 gen6_pte_t *pt_vaddr, scratch_pte;
782f1495
BW
1863 unsigned first_entry = start >> PAGE_SHIFT;
1864 unsigned num_entries = length >> PAGE_SHIFT;
07749ef3
MT
1865 unsigned act_pt = first_entry / GEN6_PTES;
1866 unsigned first_pte = first_entry % GEN6_PTES;
7bddb01f 1867 unsigned last_pte, i;
1d2a314c 1868
8bcdd0f7 1869 scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
4fb84d99 1870 I915_CACHE_LLC, 0);
1d2a314c 1871
7bddb01f
DV
1872 while (num_entries) {
1873 last_pte = first_pte + num_entries;
07749ef3
MT
1874 if (last_pte > GEN6_PTES)
1875 last_pte = GEN6_PTES;
7bddb01f 1876
d1c54acd 1877 pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
1d2a314c 1878
7bddb01f
DV
1879 for (i = first_pte; i < last_pte; i++)
1880 pt_vaddr[i] = scratch_pte;
1d2a314c 1881
d1c54acd 1882 kunmap_px(ppgtt, pt_vaddr);
1d2a314c 1883
7bddb01f
DV
1884 num_entries -= last_pte - first_pte;
1885 first_pte = 0;
a15326a5 1886 act_pt++;
7bddb01f 1887 }
1d2a314c
DV
1888}
1889
853ba5d2 1890static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
def886c3 1891 struct sg_table *pages,
782f1495 1892 uint64_t start,
24f3a8cf 1893 enum i915_cache_level cache_level, u32 flags)
def886c3 1894{
e5716f55 1895 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
782f1495 1896 unsigned first_entry = start >> PAGE_SHIFT;
07749ef3
MT
1897 unsigned act_pt = first_entry / GEN6_PTES;
1898 unsigned act_pte = first_entry % GEN6_PTES;
85d1225e
DG
1899 gen6_pte_t *pt_vaddr = NULL;
1900 struct sgt_iter sgt_iter;
1901 dma_addr_t addr;
6e995e23 1902
85d1225e 1903 for_each_sgt_dma(addr, sgt_iter, pages) {
cc79714f 1904 if (pt_vaddr == NULL)
d1c54acd 1905 pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
6e995e23 1906
cc79714f 1907 pt_vaddr[act_pte] =
4fb84d99 1908 vm->pte_encode(addr, cache_level, flags);
24f3a8cf 1909
07749ef3 1910 if (++act_pte == GEN6_PTES) {
d1c54acd 1911 kunmap_px(ppgtt, pt_vaddr);
cc79714f 1912 pt_vaddr = NULL;
a15326a5 1913 act_pt++;
6e995e23 1914 act_pte = 0;
def886c3 1915 }
def886c3 1916 }
85d1225e 1917
cc79714f 1918 if (pt_vaddr)
d1c54acd 1919 kunmap_px(ppgtt, pt_vaddr);
def886c3
DV
1920}
1921
678d96fb 1922static int gen6_alloc_va_range(struct i915_address_space *vm,
a05d80ee 1923 uint64_t start_in, uint64_t length_in)
678d96fb 1924{
4933d519 1925 DECLARE_BITMAP(new_page_tables, I915_PDES);
49d73912 1926 struct drm_i915_private *dev_priv = vm->i915;
72e96d64 1927 struct i915_ggtt *ggtt = &dev_priv->ggtt;
e5716f55 1928 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
ec565b3c 1929 struct i915_page_table *pt;
a05d80ee 1930 uint32_t start, length, start_save, length_save;
731f74c5 1931 uint32_t pde;
4933d519
MT
1932 int ret;
1933
a05d80ee
MK
1934 start = start_save = start_in;
1935 length = length_save = length_in;
4933d519
MT
1936
1937 bitmap_zero(new_page_tables, I915_PDES);
1938
1939 /* The allocation is done in two stages so that we can bail out with
1940 * minimal amount of pain. The first stage finds new page tables that
1941 * need allocation. The second stage marks use ptes within the page
1942 * tables.
1943 */
731f74c5 1944 gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
79ab9370 1945 if (pt != vm->scratch_pt) {
4933d519
MT
1946 WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES));
1947 continue;
1948 }
1949
1950 /* We've already allocated a page table */
1951 WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES));
1952
275a991c 1953 pt = alloc_pt(dev_priv);
4933d519
MT
1954 if (IS_ERR(pt)) {
1955 ret = PTR_ERR(pt);
1956 goto unwind_out;
1957 }
1958
1959 gen6_initialize_pt(vm, pt);
1960
1961 ppgtt->pd.page_table[pde] = pt;
966082c9 1962 __set_bit(pde, new_page_tables);
72744cb1 1963 trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT);
4933d519
MT
1964 }
1965
1966 start = start_save;
1967 length = length_save;
678d96fb 1968
731f74c5 1969 gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
678d96fb
BW
1970 DECLARE_BITMAP(tmp_bitmap, GEN6_PTES);
1971
1972 bitmap_zero(tmp_bitmap, GEN6_PTES);
1973 bitmap_set(tmp_bitmap, gen6_pte_index(start),
1974 gen6_pte_count(start, length));
1975
966082c9 1976 if (__test_and_clear_bit(pde, new_page_tables))
4933d519
MT
1977 gen6_write_pde(&ppgtt->pd, pde, pt);
1978
72744cb1
MT
1979 trace_i915_page_table_entry_map(vm, pde, pt,
1980 gen6_pte_index(start),
1981 gen6_pte_count(start, length),
1982 GEN6_PTES);
4933d519 1983 bitmap_or(pt->used_ptes, tmp_bitmap, pt->used_ptes,
678d96fb
BW
1984 GEN6_PTES);
1985 }
1986
4933d519
MT
1987 WARN_ON(!bitmap_empty(new_page_tables, I915_PDES));
1988
1989 /* Make sure write is complete before other code can use this page
1990 * table. Also require for WC mapped PTEs */
72e96d64 1991 readl(ggtt->gsm);
4933d519 1992
563222a7 1993 mark_tlbs_dirty(ppgtt);
678d96fb 1994 return 0;
4933d519
MT
1995
1996unwind_out:
1997 for_each_set_bit(pde, new_page_tables, I915_PDES) {
ec565b3c 1998 struct i915_page_table *pt = ppgtt->pd.page_table[pde];
4933d519 1999
79ab9370 2000 ppgtt->pd.page_table[pde] = vm->scratch_pt;
275a991c 2001 free_pt(dev_priv, pt);
4933d519
MT
2002 }
2003
2004 mark_tlbs_dirty(ppgtt);
2005 return ret;
678d96fb
BW
2006}
2007
8776f02b
MK
2008static int gen6_init_scratch(struct i915_address_space *vm)
2009{
49d73912 2010 struct drm_i915_private *dev_priv = vm->i915;
8bcdd0f7 2011 int ret;
8776f02b 2012
275a991c 2013 ret = setup_scratch_page(dev_priv, &vm->scratch_page, I915_GFP_DMA);
8bcdd0f7
CW
2014 if (ret)
2015 return ret;
8776f02b 2016
275a991c 2017 vm->scratch_pt = alloc_pt(dev_priv);
8776f02b 2018 if (IS_ERR(vm->scratch_pt)) {
275a991c 2019 cleanup_scratch_page(dev_priv, &vm->scratch_page);
8776f02b
MK
2020 return PTR_ERR(vm->scratch_pt);
2021 }
2022
2023 gen6_initialize_pt(vm, vm->scratch_pt);
2024
2025 return 0;
2026}
2027
2028static void gen6_free_scratch(struct i915_address_space *vm)
2029{
49d73912 2030 struct drm_i915_private *dev_priv = vm->i915;
8776f02b 2031
275a991c
TU
2032 free_pt(dev_priv, vm->scratch_pt);
2033 cleanup_scratch_page(dev_priv, &vm->scratch_page);
8776f02b
MK
2034}
2035
061dd493 2036static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
a00d825d 2037{
e5716f55 2038 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
731f74c5 2039 struct i915_page_directory *pd = &ppgtt->pd;
49d73912 2040 struct drm_i915_private *dev_priv = vm->i915;
09942c65
MT
2041 struct i915_page_table *pt;
2042 uint32_t pde;
4933d519 2043
061dd493
DV
2044 drm_mm_remove_node(&ppgtt->node);
2045
731f74c5 2046 gen6_for_all_pdes(pt, pd, pde)
79ab9370 2047 if (pt != vm->scratch_pt)
275a991c 2048 free_pt(dev_priv, pt);
06fda602 2049
8776f02b 2050 gen6_free_scratch(vm);
3440d265
DV
2051}
2052
b146520f 2053static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
3440d265 2054{
8776f02b 2055 struct i915_address_space *vm = &ppgtt->base;
49d73912 2056 struct drm_i915_private *dev_priv = ppgtt->base.i915;
72e96d64 2057 struct i915_ggtt *ggtt = &dev_priv->ggtt;
b146520f 2058 int ret;
1d2a314c 2059
c8d4c0d6
BW
2060 /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
2061 * allocator works in address space sizes, so it's multiplied by page
2062 * size. We allocate at the top of the GTT to avoid fragmentation.
2063 */
72e96d64 2064 BUG_ON(!drm_mm_initialized(&ggtt->base.mm));
4933d519 2065
8776f02b
MK
2066 ret = gen6_init_scratch(vm);
2067 if (ret)
2068 return ret;
4933d519 2069
e007b19d
CW
2070 ret = i915_gem_gtt_insert(&ggtt->base, &ppgtt->node,
2071 GEN6_PD_SIZE, GEN6_PD_ALIGN,
2072 I915_COLOR_UNEVICTABLE,
2073 0, ggtt->base.total,
2074 PIN_HIGH);
c8c26622 2075 if (ret)
678d96fb
BW
2076 goto err_out;
2077
72e96d64 2078 if (ppgtt->node.start < ggtt->mappable_end)
c8d4c0d6 2079 DRM_DEBUG("Forced to use aperture for PDEs\n");
1d2a314c 2080
c8c26622 2081 return 0;
678d96fb
BW
2082
2083err_out:
8776f02b 2084 gen6_free_scratch(vm);
678d96fb 2085 return ret;
b146520f
BW
2086}
2087
b146520f
BW
2088static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
2089{
2f2cf682 2090 return gen6_ppgtt_allocate_page_directories(ppgtt);
4933d519 2091}
06dc68d6 2092
4933d519
MT
2093static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
2094 uint64_t start, uint64_t length)
2095{
ec565b3c 2096 struct i915_page_table *unused;
731f74c5 2097 uint32_t pde;
1d2a314c 2098
731f74c5 2099 gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde)
79ab9370 2100 ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
b146520f
BW
2101}
2102
5c5f6457 2103static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
b146520f 2104{
49d73912 2105 struct drm_i915_private *dev_priv = ppgtt->base.i915;
72e96d64 2106 struct i915_ggtt *ggtt = &dev_priv->ggtt;
b146520f
BW
2107 int ret;
2108
72e96d64 2109 ppgtt->base.pte_encode = ggtt->base.pte_encode;
5db94019 2110 if (intel_vgpu_active(dev_priv) || IS_GEN6(dev_priv))
b146520f 2111 ppgtt->switch_mm = gen6_mm_switch;
772c2a51 2112 else if (IS_HASWELL(dev_priv))
b146520f 2113 ppgtt->switch_mm = hsw_mm_switch;
5db94019 2114 else if (IS_GEN7(dev_priv))
b146520f 2115 ppgtt->switch_mm = gen7_mm_switch;
8eb95204 2116 else
b146520f
BW
2117 BUG();
2118
2119 ret = gen6_ppgtt_alloc(ppgtt);
2120 if (ret)
2121 return ret;
2122
5c5f6457 2123 ppgtt->base.allocate_va_range = gen6_alloc_va_range;
b146520f
BW
2124 ppgtt->base.clear_range = gen6_ppgtt_clear_range;
2125 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
777dc5bb
DV
2126 ppgtt->base.unbind_vma = ppgtt_unbind_vma;
2127 ppgtt->base.bind_vma = ppgtt_bind_vma;
b146520f 2128 ppgtt->base.cleanup = gen6_ppgtt_cleanup;
b146520f 2129 ppgtt->base.start = 0;
09942c65 2130 ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
87d60b63 2131 ppgtt->debug_dump = gen6_dump_ppgtt;
1d2a314c 2132
44159ddb 2133 ppgtt->pd.base.ggtt_offset =
07749ef3 2134 ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
1d2a314c 2135
72e96d64 2136 ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm +
44159ddb 2137 ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
678d96fb 2138
5c5f6457 2139 gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
1d2a314c 2140
678d96fb
BW
2141 gen6_write_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->base.total);
2142
440fd528 2143 DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
b146520f
BW
2144 ppgtt->node.size >> 20,
2145 ppgtt->node.start / PAGE_SIZE);
3440d265 2146
fa76da34 2147 DRM_DEBUG("Adding PPGTT at offset %x\n",
44159ddb 2148 ppgtt->pd.base.ggtt_offset << 10);
fa76da34 2149
b146520f 2150 return 0;
3440d265
DV
2151}
2152
2bfa996e
CW
2153static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
2154 struct drm_i915_private *dev_priv)
3440d265 2155{
49d73912 2156 ppgtt->base.i915 = dev_priv;
3440d265 2157
2bfa996e 2158 if (INTEL_INFO(dev_priv)->gen < 8)
5c5f6457 2159 return gen6_ppgtt_init(ppgtt);
3ed124b2 2160 else
d7b2633d 2161 return gen8_ppgtt_init(ppgtt);
fa76da34 2162}
c114f76a 2163
a2cad9df 2164static void i915_address_space_init(struct i915_address_space *vm,
80b204bc
CW
2165 struct drm_i915_private *dev_priv,
2166 const char *name)
a2cad9df 2167{
80b204bc 2168 i915_gem_timeline_init(dev_priv, &vm->timeline, name);
47db922f 2169
a2cad9df 2170 drm_mm_init(&vm->mm, vm->start, vm->total);
47db922f
CW
2171 vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
2172
a2cad9df
MW
2173 INIT_LIST_HEAD(&vm->active_list);
2174 INIT_LIST_HEAD(&vm->inactive_list);
50e046b6 2175 INIT_LIST_HEAD(&vm->unbound_list);
47db922f 2176
a2cad9df
MW
2177 list_add_tail(&vm->global_link, &dev_priv->vm_list);
2178}
2179
ed9724dd
MA
2180static void i915_address_space_fini(struct i915_address_space *vm)
2181{
2182 i915_gem_timeline_fini(&vm->timeline);
2183 drm_mm_takedown(&vm->mm);
2184 list_del(&vm->global_link);
2185}
2186
c6be607a 2187static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
d5165ebd 2188{
d5165ebd
TG
2189 /* This function is for gtt related workarounds. This function is
2190 * called on driver load and after a GPU reset, so you can place
2191 * workarounds here even if they get overwritten by GPU reset.
2192 */
9fb5026f 2193 /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk */
8652744b 2194 if (IS_BROADWELL(dev_priv))
d5165ebd 2195 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
920a14b2 2196 else if (IS_CHERRYVIEW(dev_priv))
d5165ebd 2197 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
b976dc53 2198 else if (IS_GEN9_BC(dev_priv))
d5165ebd 2199 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
9fb5026f 2200 else if (IS_GEN9_LP(dev_priv))
d5165ebd
TG
2201 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
2202}
2203
2bfa996e
CW
2204static int i915_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
2205 struct drm_i915_private *dev_priv,
80b204bc
CW
2206 struct drm_i915_file_private *file_priv,
2207 const char *name)
fa76da34 2208{
2bfa996e 2209 int ret;
3ed124b2 2210
2bfa996e 2211 ret = __hw_ppgtt_init(ppgtt, dev_priv);
fa76da34 2212 if (ret == 0) {
c7c48dfd 2213 kref_init(&ppgtt->ref);
80b204bc 2214 i915_address_space_init(&ppgtt->base, dev_priv, name);
2bfa996e 2215 ppgtt->base.file = file_priv;
93bd8649 2216 }
1d2a314c
DV
2217
2218 return ret;
2219}
2220
c6be607a 2221int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
82460d97 2222{
c6be607a 2223 gtt_write_workarounds(dev_priv);
d5165ebd 2224
671b5013
TD
2225 /* In the case of execlists, PPGTT is enabled by the context descriptor
2226 * and the PDPs are contained within the context itself. We don't
2227 * need to do anything here. */
2228 if (i915.enable_execlists)
2229 return 0;
2230
c6be607a 2231 if (!USES_PPGTT(dev_priv))
82460d97
DV
2232 return 0;
2233
5db94019 2234 if (IS_GEN6(dev_priv))
c6be607a 2235 gen6_ppgtt_enable(dev_priv);
5db94019 2236 else if (IS_GEN7(dev_priv))
c6be607a
TU
2237 gen7_ppgtt_enable(dev_priv);
2238 else if (INTEL_GEN(dev_priv) >= 8)
2239 gen8_ppgtt_enable(dev_priv);
82460d97 2240 else
c6be607a 2241 MISSING_CASE(INTEL_GEN(dev_priv));
82460d97 2242
4ad2fd88
JH
2243 return 0;
2244}
1d2a314c 2245
4d884705 2246struct i915_hw_ppgtt *
2bfa996e 2247i915_ppgtt_create(struct drm_i915_private *dev_priv,
80b204bc
CW
2248 struct drm_i915_file_private *fpriv,
2249 const char *name)
4d884705
DV
2250{
2251 struct i915_hw_ppgtt *ppgtt;
2252 int ret;
2253
2254 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
2255 if (!ppgtt)
2256 return ERR_PTR(-ENOMEM);
2257
80b204bc 2258 ret = i915_ppgtt_init(ppgtt, dev_priv, fpriv, name);
4d884705
DV
2259 if (ret) {
2260 kfree(ppgtt);
2261 return ERR_PTR(ret);
2262 }
2263
198c974d
DCS
2264 trace_i915_ppgtt_create(&ppgtt->base);
2265
4d884705
DV
2266 return ppgtt;
2267}
2268
0c7eeda1
CW
2269void i915_ppgtt_close(struct i915_address_space *vm)
2270{
2271 struct list_head *phases[] = {
2272 &vm->active_list,
2273 &vm->inactive_list,
2274 &vm->unbound_list,
2275 NULL,
2276 }, **phase;
2277
2278 GEM_BUG_ON(vm->closed);
2279 vm->closed = true;
2280
2281 for (phase = phases; *phase; phase++) {
2282 struct i915_vma *vma, *vn;
2283
2284 list_for_each_entry_safe(vma, vn, *phase, vm_link)
2285 if (!i915_vma_is_closed(vma))
2286 i915_vma_close(vma);
2287 }
2288}
2289
ed9724dd 2290void i915_ppgtt_release(struct kref *kref)
ee960be7
DV
2291{
2292 struct i915_hw_ppgtt *ppgtt =
2293 container_of(kref, struct i915_hw_ppgtt, ref);
2294
198c974d
DCS
2295 trace_i915_ppgtt_release(&ppgtt->base);
2296
50e046b6 2297 /* vmas should already be unbound and destroyed */
ee960be7
DV
2298 WARN_ON(!list_empty(&ppgtt->base.active_list));
2299 WARN_ON(!list_empty(&ppgtt->base.inactive_list));
50e046b6 2300 WARN_ON(!list_empty(&ppgtt->base.unbound_list));
ee960be7 2301
ed9724dd 2302 i915_address_space_fini(&ppgtt->base);
19dd120c 2303
ee960be7
DV
2304 ppgtt->base.cleanup(&ppgtt->base);
2305 kfree(ppgtt);
2306}
1d2a314c 2307
a81cc00c
BW
2308/* Certain Gen5 chipsets require require idling the GPU before
2309 * unmapping anything from the GTT when VT-d is enabled.
2310 */
97d6d7ab 2311static bool needs_idle_maps(struct drm_i915_private *dev_priv)
a81cc00c
BW
2312{
2313#ifdef CONFIG_INTEL_IOMMU
2314 /* Query intel_iommu to see if we need the workaround. Presumably that
2315 * was loaded first.
2316 */
97d6d7ab 2317 if (IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_iommu_gfx_mapped)
a81cc00c
BW
2318 return true;
2319#endif
2320 return false;
2321}
2322
dc97997a 2323void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
828c7908 2324{
e2f80391 2325 struct intel_engine_cs *engine;
3b3f1650 2326 enum intel_engine_id id;
828c7908 2327
dc97997a 2328 if (INTEL_INFO(dev_priv)->gen < 6)
828c7908
BW
2329 return;
2330
3b3f1650 2331 for_each_engine(engine, dev_priv, id) {
828c7908 2332 u32 fault_reg;
e2f80391 2333 fault_reg = I915_READ(RING_FAULT_REG(engine));
828c7908
BW
2334 if (fault_reg & RING_FAULT_VALID) {
2335 DRM_DEBUG_DRIVER("Unexpected fault\n"
59a5d290 2336 "\tAddr: 0x%08lx\n"
828c7908
BW
2337 "\tAddress space: %s\n"
2338 "\tSource ID: %d\n"
2339 "\tType: %d\n",
2340 fault_reg & PAGE_MASK,
2341 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
2342 RING_FAULT_SRCID(fault_reg),
2343 RING_FAULT_FAULT_TYPE(fault_reg));
e2f80391 2344 I915_WRITE(RING_FAULT_REG(engine),
828c7908
BW
2345 fault_reg & ~RING_FAULT_VALID);
2346 }
2347 }
3b3f1650
AG
2348
2349 /* Engine specific init may not have been done till this point. */
2350 if (dev_priv->engine[RCS])
2351 POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
828c7908
BW
2352}
2353
275a991c 2354void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
828c7908 2355{
72e96d64 2356 struct i915_ggtt *ggtt = &dev_priv->ggtt;
828c7908
BW
2357
2358 /* Don't bother messing with faults pre GEN6 as we have little
2359 * documentation supporting that it's a good idea.
2360 */
275a991c 2361 if (INTEL_GEN(dev_priv) < 6)
828c7908
BW
2362 return;
2363
dc97997a 2364 i915_check_and_clear_faults(dev_priv);
828c7908 2365
4fb84d99 2366 ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total);
91e56499 2367
7c3f86b6 2368 i915_ggtt_invalidate(dev_priv);
828c7908
BW
2369}
2370
03ac84f1
CW
2371int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
2372 struct sg_table *pages)
7c2e6fdf 2373{
1a292fa5
CW
2374 do {
2375 if (dma_map_sg(&obj->base.dev->pdev->dev,
2376 pages->sgl, pages->nents,
2377 PCI_DMA_BIDIRECTIONAL))
2378 return 0;
2379
2380 /* If the DMA remap fails, one cause can be that we have
2381 * too many objects pinned in a small remapping table,
2382 * such as swiotlb. Incrementally purge all other objects and
2383 * try again - if there are no more pages to remove from
2384 * the DMA remapper, i915_gem_shrink will return 0.
2385 */
2386 GEM_BUG_ON(obj->mm.pages == pages);
2387 } while (i915_gem_shrink(to_i915(obj->base.dev),
2388 obj->base.size >> PAGE_SHIFT,
2389 I915_SHRINK_BOUND |
2390 I915_SHRINK_UNBOUND |
2391 I915_SHRINK_ACTIVE));
9da3da66 2392
03ac84f1 2393 return -ENOSPC;
7c2e6fdf
DV
2394}
2395
2c642b07 2396static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
94ec8f61 2397{
94ec8f61 2398 writeq(pte, addr);
94ec8f61
BW
2399}
2400
d6473f56
CW
2401static void gen8_ggtt_insert_page(struct i915_address_space *vm,
2402 dma_addr_t addr,
2403 uint64_t offset,
2404 enum i915_cache_level level,
2405 u32 unused)
2406{
7c3f86b6 2407 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
d6473f56 2408 gen8_pte_t __iomem *pte =
7c3f86b6 2409 (gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
d6473f56 2410
4fb84d99 2411 gen8_set_pte(pte, gen8_pte_encode(addr, level));
d6473f56 2412
7c3f86b6 2413 ggtt->invalidate(vm->i915);
d6473f56
CW
2414}
2415
94ec8f61
BW
2416static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
2417 struct sg_table *st,
782f1495 2418 uint64_t start,
24f3a8cf 2419 enum i915_cache_level level, u32 unused)
94ec8f61 2420{
ce7fda2e 2421 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
85d1225e
DG
2422 struct sgt_iter sgt_iter;
2423 gen8_pte_t __iomem *gtt_entries;
2424 gen8_pte_t gtt_entry;
2425 dma_addr_t addr;
85d1225e 2426 int i = 0;
be69459a 2427
85d1225e
DG
2428 gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
2429
2430 for_each_sgt_dma(addr, sgt_iter, st) {
4fb84d99 2431 gtt_entry = gen8_pte_encode(addr, level);
85d1225e 2432 gen8_set_pte(&gtt_entries[i++], gtt_entry);
94ec8f61
BW
2433 }
2434
2435 /*
2436 * XXX: This serves as a posting read to make sure that the PTE has
2437 * actually been updated. There is some concern that even though
2438 * registers and PTEs are within the same BAR that they are potentially
2439 * of NUMA access patterns. Therefore, even with the way we assume
2440 * hardware should work, we must keep this posting read for paranoia.
2441 */
2442 if (i != 0)
85d1225e 2443 WARN_ON(readq(&gtt_entries[i-1]) != gtt_entry);
94ec8f61 2444
94ec8f61
BW
2445 /* This next bit makes the above posting read even more important. We
2446 * want to flush the TLBs only after we're certain all the PTE updates
2447 * have finished.
2448 */
7c3f86b6 2449 ggtt->invalidate(vm->i915);
94ec8f61
BW
2450}
2451
c140330b
CW
2452struct insert_entries {
2453 struct i915_address_space *vm;
2454 struct sg_table *st;
2455 uint64_t start;
2456 enum i915_cache_level level;
2457 u32 flags;
2458};
2459
2460static int gen8_ggtt_insert_entries__cb(void *_arg)
2461{
2462 struct insert_entries *arg = _arg;
2463 gen8_ggtt_insert_entries(arg->vm, arg->st,
2464 arg->start, arg->level, arg->flags);
2465 return 0;
2466}
2467
2468static void gen8_ggtt_insert_entries__BKL(struct i915_address_space *vm,
2469 struct sg_table *st,
2470 uint64_t start,
2471 enum i915_cache_level level,
2472 u32 flags)
2473{
2474 struct insert_entries arg = { vm, st, start, level, flags };
2475 stop_machine(gen8_ggtt_insert_entries__cb, &arg, NULL);
2476}
2477
d6473f56
CW
2478static void gen6_ggtt_insert_page(struct i915_address_space *vm,
2479 dma_addr_t addr,
2480 uint64_t offset,
2481 enum i915_cache_level level,
2482 u32 flags)
2483{
7c3f86b6 2484 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
d6473f56 2485 gen6_pte_t __iomem *pte =
7c3f86b6 2486 (gen6_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
d6473f56 2487
4fb84d99 2488 iowrite32(vm->pte_encode(addr, level, flags), pte);
d6473f56 2489
7c3f86b6 2490 ggtt->invalidate(vm->i915);
d6473f56
CW
2491}
2492
e76e9aeb
BW
2493/*
2494 * Binds an object into the global gtt with the specified cache level. The object
2495 * will be accessible to the GPU via commands whose operands reference offsets
2496 * within the global GTT as well as accessible by the GPU through the GMADR
2497 * mapped BAR (dev_priv->mm.gtt->gtt).
2498 */
853ba5d2 2499static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
7faf1ab2 2500 struct sg_table *st,
782f1495 2501 uint64_t start,
24f3a8cf 2502 enum i915_cache_level level, u32 flags)
e76e9aeb 2503{
ce7fda2e 2504 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
85d1225e
DG
2505 struct sgt_iter sgt_iter;
2506 gen6_pte_t __iomem *gtt_entries;
2507 gen6_pte_t gtt_entry;
2508 dma_addr_t addr;
85d1225e 2509 int i = 0;
be69459a 2510
85d1225e
DG
2511 gtt_entries = (gen6_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
2512
2513 for_each_sgt_dma(addr, sgt_iter, st) {
4fb84d99 2514 gtt_entry = vm->pte_encode(addr, level, flags);
85d1225e 2515 iowrite32(gtt_entry, &gtt_entries[i++]);
e76e9aeb
BW
2516 }
2517
e76e9aeb
BW
2518 /* XXX: This serves as a posting read to make sure that the PTE has
2519 * actually been updated. There is some concern that even though
2520 * registers and PTEs are within the same BAR that they are potentially
2521 * of NUMA access patterns. Therefore, even with the way we assume
2522 * hardware should work, we must keep this posting read for paranoia.
2523 */
85d1225e
DG
2524 if (i != 0)
2525 WARN_ON(readl(&gtt_entries[i-1]) != gtt_entry);
0f9b91c7
BW
2526
2527 /* This next bit makes the above posting read even more important. We
2528 * want to flush the TLBs only after we're certain all the PTE updates
2529 * have finished.
2530 */
7c3f86b6 2531 ggtt->invalidate(vm->i915);
e76e9aeb
BW
2532}
2533
f7770bfd 2534static void nop_clear_range(struct i915_address_space *vm,
4fb84d99 2535 uint64_t start, uint64_t length)
f7770bfd
CW
2536{
2537}
2538
94ec8f61 2539static void gen8_ggtt_clear_range(struct i915_address_space *vm,
4fb84d99 2540 uint64_t start, uint64_t length)
94ec8f61 2541{
ce7fda2e 2542 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
782f1495
BW
2543 unsigned first_entry = start >> PAGE_SHIFT;
2544 unsigned num_entries = length >> PAGE_SHIFT;
07749ef3 2545 gen8_pte_t scratch_pte, __iomem *gtt_base =
72e96d64
JL
2546 (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
2547 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
94ec8f61
BW
2548 int i;
2549
2550 if (WARN(num_entries > max_entries,
2551 "First entry = %d; Num entries = %d (max=%d)\n",
2552 first_entry, num_entries, max_entries))
2553 num_entries = max_entries;
2554
8bcdd0f7 2555 scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
4fb84d99 2556 I915_CACHE_LLC);
94ec8f61
BW
2557 for (i = 0; i < num_entries; i++)
2558 gen8_set_pte(&gtt_base[i], scratch_pte);
2559 readl(gtt_base);
2560}
2561
853ba5d2 2562static void gen6_ggtt_clear_range(struct i915_address_space *vm,
782f1495 2563 uint64_t start,
4fb84d99 2564 uint64_t length)
7faf1ab2 2565{
ce7fda2e 2566 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
782f1495
BW
2567 unsigned first_entry = start >> PAGE_SHIFT;
2568 unsigned num_entries = length >> PAGE_SHIFT;
07749ef3 2569 gen6_pte_t scratch_pte, __iomem *gtt_base =
72e96d64
JL
2570 (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
2571 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
7faf1ab2
DV
2572 int i;
2573
2574 if (WARN(num_entries > max_entries,
2575 "First entry = %d; Num entries = %d (max=%d)\n",
2576 first_entry, num_entries, max_entries))
2577 num_entries = max_entries;
2578
8bcdd0f7 2579 scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
4fb84d99 2580 I915_CACHE_LLC, 0);
828c7908 2581
7faf1ab2
DV
2582 for (i = 0; i < num_entries; i++)
2583 iowrite32(scratch_pte, &gtt_base[i]);
2584 readl(gtt_base);
2585}
2586
d6473f56
CW
2587static void i915_ggtt_insert_page(struct i915_address_space *vm,
2588 dma_addr_t addr,
2589 uint64_t offset,
2590 enum i915_cache_level cache_level,
2591 u32 unused)
2592{
d6473f56
CW
2593 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2594 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
d6473f56
CW
2595
2596 intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
d6473f56
CW
2597}
2598
d369d2d9
DV
2599static void i915_ggtt_insert_entries(struct i915_address_space *vm,
2600 struct sg_table *pages,
2601 uint64_t start,
2602 enum i915_cache_level cache_level, u32 unused)
7faf1ab2
DV
2603{
2604 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2605 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2606
d369d2d9 2607 intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags);
0875546c 2608
7faf1ab2
DV
2609}
2610
853ba5d2 2611static void i915_ggtt_clear_range(struct i915_address_space *vm,
782f1495 2612 uint64_t start,
4fb84d99 2613 uint64_t length)
7faf1ab2 2614{
2eedfc7d 2615 intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
7faf1ab2
DV
2616}
2617
70b9f6f8
DV
2618static int ggtt_bind_vma(struct i915_vma *vma,
2619 enum i915_cache_level cache_level,
2620 u32 flags)
0a878716 2621{
49d73912 2622 struct drm_i915_private *i915 = vma->vm->i915;
0a878716 2623 struct drm_i915_gem_object *obj = vma->obj;
ba7a5741 2624 u32 pte_flags;
0a878716 2625
ba7a5741
CW
2626 if (unlikely(!vma->pages)) {
2627 int ret = i915_get_ggtt_vma_pages(vma);
2628 if (ret)
2629 return ret;
2630 }
0a878716
DV
2631
2632 /* Currently applicable only to VLV */
ba7a5741 2633 pte_flags = 0;
0a878716
DV
2634 if (obj->gt_ro)
2635 pte_flags |= PTE_READ_ONLY;
2636
9c870d03 2637 intel_runtime_pm_get(i915);
247177dd 2638 vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
0a878716 2639 cache_level, pte_flags);
9c870d03 2640 intel_runtime_pm_put(i915);
0a878716
DV
2641
2642 /*
2643 * Without aliasing PPGTT there's no difference between
2644 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
2645 * upgrade to both bound if we bind either to avoid double-binding.
2646 */
3272db53 2647 vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
0a878716
DV
2648
2649 return 0;
2650}
2651
2652static int aliasing_gtt_bind_vma(struct i915_vma *vma,
2653 enum i915_cache_level cache_level,
2654 u32 flags)
d5bd1449 2655{
49d73912 2656 struct drm_i915_private *i915 = vma->vm->i915;
321d178e 2657 u32 pte_flags;
70b9f6f8 2658
ba7a5741
CW
2659 if (unlikely(!vma->pages)) {
2660 int ret = i915_get_ggtt_vma_pages(vma);
2661 if (ret)
2662 return ret;
2663 }
7faf1ab2 2664
24f3a8cf 2665 /* Currently applicable only to VLV */
321d178e
CW
2666 pte_flags = 0;
2667 if (vma->obj->gt_ro)
f329f5f6 2668 pte_flags |= PTE_READ_ONLY;
24f3a8cf 2669
3272db53 2670 if (flags & I915_VMA_GLOBAL_BIND) {
9c870d03 2671 intel_runtime_pm_get(i915);
321d178e 2672 vma->vm->insert_entries(vma->vm,
247177dd 2673 vma->pages, vma->node.start,
0875546c 2674 cache_level, pte_flags);
9c870d03 2675 intel_runtime_pm_put(i915);
6f65e29a 2676 }
d5bd1449 2677
3272db53 2678 if (flags & I915_VMA_LOCAL_BIND) {
9c870d03 2679 struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
321d178e 2680 appgtt->base.insert_entries(&appgtt->base,
247177dd 2681 vma->pages, vma->node.start,
f329f5f6 2682 cache_level, pte_flags);
6f65e29a 2683 }
70b9f6f8
DV
2684
2685 return 0;
d5bd1449
CW
2686}
2687
6f65e29a 2688static void ggtt_unbind_vma(struct i915_vma *vma)
74163907 2689{
49d73912 2690 struct drm_i915_private *i915 = vma->vm->i915;
9c870d03 2691 struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
de180033 2692 const u64 size = min(vma->size, vma->node.size);
6f65e29a 2693
9c870d03
CW
2694 if (vma->flags & I915_VMA_GLOBAL_BIND) {
2695 intel_runtime_pm_get(i915);
782f1495 2696 vma->vm->clear_range(vma->vm,
4fb84d99 2697 vma->node.start, size);
9c870d03
CW
2698 intel_runtime_pm_put(i915);
2699 }
06615ee5 2700
3272db53 2701 if (vma->flags & I915_VMA_LOCAL_BIND && appgtt)
6f65e29a 2702 appgtt->base.clear_range(&appgtt->base,
4fb84d99 2703 vma->node.start, size);
74163907
DV
2704}
2705
03ac84f1
CW
2706void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
2707 struct sg_table *pages)
7c2e6fdf 2708{
52a05c30
DW
2709 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2710 struct device *kdev = &dev_priv->drm.pdev->dev;
307dc25b 2711 struct i915_ggtt *ggtt = &dev_priv->ggtt;
5c042287 2712
307dc25b 2713 if (unlikely(ggtt->do_idle_maps)) {
22dd3bb9 2714 if (i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED)) {
307dc25b
CW
2715 DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
2716 /* Wait a bit, in hopes it avoids the hang */
2717 udelay(10);
2718 }
2719 }
5c042287 2720
03ac84f1 2721 dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
7c2e6fdf 2722}
644ec02b 2723
45b186f1 2724static void i915_gtt_color_adjust(const struct drm_mm_node *node,
42d6ab48 2725 unsigned long color,
440fd528
TR
2726 u64 *start,
2727 u64 *end)
42d6ab48 2728{
a6508ded 2729 if (node->allocated && node->color != color)
f51455d4 2730 *start += I915_GTT_PAGE_SIZE;
42d6ab48 2731
a6508ded
CW
2732 /* Also leave a space between the unallocated reserved node after the
2733 * GTT and any objects within the GTT, i.e. we use the color adjustment
2734 * to insert a guard page to prevent prefetches crossing over the
2735 * GTT boundary.
2736 */
b44f97fd 2737 node = list_next_entry(node, node_list);
a6508ded 2738 if (node->color != color)
f51455d4 2739 *end -= I915_GTT_PAGE_SIZE;
42d6ab48 2740}
fbe5d36e 2741
6cde9a02
CW
2742int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
2743{
2744 struct i915_ggtt *ggtt = &i915->ggtt;
2745 struct i915_hw_ppgtt *ppgtt;
2746 int err;
2747
2748 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
2749 if (!ppgtt)
2750 return -ENOMEM;
2751
2752 err = __hw_ppgtt_init(ppgtt, i915);
2753 if (err)
2754 goto err_ppgtt;
2755
2756 if (ppgtt->base.allocate_va_range) {
2757 err = ppgtt->base.allocate_va_range(&ppgtt->base,
2758 0, ppgtt->base.total);
2759 if (err)
2760 goto err_ppgtt_cleanup;
2761 }
2762
2763 ppgtt->base.clear_range(&ppgtt->base,
2764 ppgtt->base.start,
2765 ppgtt->base.total);
2766
2767 i915->mm.aliasing_ppgtt = ppgtt;
2768 WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
2769 ggtt->base.bind_vma = aliasing_gtt_bind_vma;
2770
2771 return 0;
2772
2773err_ppgtt_cleanup:
2774 ppgtt->base.cleanup(&ppgtt->base);
2775err_ppgtt:
2776 kfree(ppgtt);
2777 return err;
2778}
2779
2780void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915)
2781{
2782 struct i915_ggtt *ggtt = &i915->ggtt;
2783 struct i915_hw_ppgtt *ppgtt;
2784
2785 ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt);
2786 if (!ppgtt)
2787 return;
2788
2789 ppgtt->base.cleanup(&ppgtt->base);
2790 kfree(ppgtt);
2791
2792 ggtt->base.bind_vma = ggtt_bind_vma;
2793}
2794
f6b9d5ca 2795int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
644ec02b 2796{
e78891ca
BW
2797 /* Let GEM Manage all of the aperture.
2798 *
2799 * However, leave one page at the end still bound to the scratch page.
2800 * There are a number of places where the hardware apparently prefetches
2801 * past the end of the object, and we've seen multiple hangs with the
2802 * GPU head pointer stuck in a batchbuffer bound at the last page of the
2803 * aperture. One page should be enough to keep any prefetching inside
2804 * of the aperture.
2805 */
72e96d64 2806 struct i915_ggtt *ggtt = &dev_priv->ggtt;
ed2f3452 2807 unsigned long hole_start, hole_end;
f6b9d5ca 2808 struct drm_mm_node *entry;
fa76da34 2809 int ret;
644ec02b 2810
b02d22a3
ZW
2811 ret = intel_vgt_balloon(dev_priv);
2812 if (ret)
2813 return ret;
5dda8fa3 2814
95374d75 2815 /* Reserve a mappable slot for our lockless error capture */
4e64e553
CW
2816 ret = drm_mm_insert_node_in_range(&ggtt->base.mm, &ggtt->error_capture,
2817 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
2818 0, ggtt->mappable_end,
2819 DRM_MM_INSERT_LOW);
95374d75
CW
2820 if (ret)
2821 return ret;
2822
ed2f3452 2823 /* Clear any non-preallocated blocks */
72e96d64 2824 drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
ed2f3452
CW
2825 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
2826 hole_start, hole_end);
72e96d64 2827 ggtt->base.clear_range(&ggtt->base, hole_start,
4fb84d99 2828 hole_end - hole_start);
ed2f3452
CW
2829 }
2830
2831 /* And finally clear the reserved guard page */
f6b9d5ca 2832 ggtt->base.clear_range(&ggtt->base,
4fb84d99 2833 ggtt->base.total - PAGE_SIZE, PAGE_SIZE);
6c5566a8 2834
97d6d7ab 2835 if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
6cde9a02 2836 ret = i915_gem_init_aliasing_ppgtt(dev_priv);
95374d75 2837 if (ret)
6cde9a02 2838 goto err;
fa76da34
DV
2839 }
2840
6c5566a8 2841 return 0;
95374d75 2842
95374d75
CW
2843err:
2844 drm_mm_remove_node(&ggtt->error_capture);
2845 return ret;
e76e9aeb
BW
2846}
2847
d85489d3
JL
2848/**
2849 * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
97d6d7ab 2850 * @dev_priv: i915 device
d85489d3 2851 */
97d6d7ab 2852void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
90d0a0e8 2853{
72e96d64 2854 struct i915_ggtt *ggtt = &dev_priv->ggtt;
94d4a2a9
CW
2855 struct i915_vma *vma, *vn;
2856
2857 ggtt->base.closed = true;
2858
2859 mutex_lock(&dev_priv->drm.struct_mutex);
2860 WARN_ON(!list_empty(&ggtt->base.active_list));
2861 list_for_each_entry_safe(vma, vn, &ggtt->base.inactive_list, vm_link)
2862 WARN_ON(i915_vma_unbind(vma));
2863 mutex_unlock(&dev_priv->drm.struct_mutex);
90d0a0e8 2864
6cde9a02 2865 i915_gem_fini_aliasing_ppgtt(dev_priv);
97d6d7ab 2866 i915_gem_cleanup_stolen(&dev_priv->drm);
a4eba47b 2867
95374d75
CW
2868 if (drm_mm_node_allocated(&ggtt->error_capture))
2869 drm_mm_remove_node(&ggtt->error_capture);
2870
72e96d64 2871 if (drm_mm_initialized(&ggtt->base.mm)) {
b02d22a3 2872 intel_vgt_deballoon(dev_priv);
5dda8fa3 2873
ed9724dd
MA
2874 mutex_lock(&dev_priv->drm.struct_mutex);
2875 i915_address_space_fini(&ggtt->base);
2876 mutex_unlock(&dev_priv->drm.struct_mutex);
90d0a0e8
DV
2877 }
2878
72e96d64 2879 ggtt->base.cleanup(&ggtt->base);
f6b9d5ca
CW
2880
2881 arch_phys_wc_del(ggtt->mtrr);
f7bbe788 2882 io_mapping_fini(&ggtt->mappable);
90d0a0e8 2883}
70e32544 2884
2c642b07 2885static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
e76e9aeb
BW
2886{
2887 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
2888 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
2889 return snb_gmch_ctl << 20;
2890}
2891
2c642b07 2892static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
9459d252
BW
2893{
2894 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
2895 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
2896 if (bdw_gmch_ctl)
2897 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
562d55d9
BW
2898
2899#ifdef CONFIG_X86_32
2900 /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
2901 if (bdw_gmch_ctl > 4)
2902 bdw_gmch_ctl = 4;
2903#endif
2904
9459d252
BW
2905 return bdw_gmch_ctl << 20;
2906}
2907
2c642b07 2908static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
d7f25f23
DL
2909{
2910 gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
2911 gmch_ctrl &= SNB_GMCH_GGMS_MASK;
2912
2913 if (gmch_ctrl)
2914 return 1 << (20 + gmch_ctrl);
2915
2916 return 0;
2917}
2918
2c642b07 2919static size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
e76e9aeb
BW
2920{
2921 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
2922 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
2923 return snb_gmch_ctl << 25; /* 32 MB units */
2924}
2925
2c642b07 2926static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
9459d252
BW
2927{
2928 bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2929 bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
2930 return bdw_gmch_ctl << 25; /* 32 MB units */
2931}
2932
d7f25f23
DL
2933static size_t chv_get_stolen_size(u16 gmch_ctrl)
2934{
2935 gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
2936 gmch_ctrl &= SNB_GMCH_GMS_MASK;
2937
2938 /*
2939 * 0x0 to 0x10: 32MB increments starting at 0MB
2940 * 0x11 to 0x16: 4MB increments starting at 8MB
2941 * 0x17 to 0x1d: 4MB increments start at 36MB
2942 */
2943 if (gmch_ctrl < 0x11)
2944 return gmch_ctrl << 25;
2945 else if (gmch_ctrl < 0x17)
2946 return (gmch_ctrl - 0x11 + 2) << 22;
2947 else
2948 return (gmch_ctrl - 0x17 + 9) << 22;
2949}
2950
66375014
DL
2951static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
2952{
2953 gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2954 gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
2955
2956 if (gen9_gmch_ctl < 0xf0)
2957 return gen9_gmch_ctl << 25; /* 32 MB units */
2958 else
2959 /* 4MB increments starting at 0xf0 for 4MB */
2960 return (gen9_gmch_ctl - 0xf0 + 1) << 22;
2961}
2962
34c998b4 2963static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
63340133 2964{
49d73912
CW
2965 struct drm_i915_private *dev_priv = ggtt->base.i915;
2966 struct pci_dev *pdev = dev_priv->drm.pdev;
34c998b4 2967 phys_addr_t phys_addr;
8bcdd0f7 2968 int ret;
63340133
BW
2969
2970 /* For Modern GENs the PTEs and register space are split in the BAR */
34c998b4 2971 phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
63340133 2972
2a073f89
ID
2973 /*
2974 * On BXT writes larger than 64 bit to the GTT pagetable range will be
2975 * dropped. For WC mappings in general we have 64 byte burst writes
2976 * when the WC buffer is flushed, so we can't use it, but have to
2977 * resort to an uncached mapping. The WC issue is easily caught by the
2978 * readback check when writing GTT PTE entries.
2979 */
cc3f90f0 2980 if (IS_GEN9_LP(dev_priv))
34c998b4 2981 ggtt->gsm = ioremap_nocache(phys_addr, size);
2a073f89 2982 else
34c998b4 2983 ggtt->gsm = ioremap_wc(phys_addr, size);
72e96d64 2984 if (!ggtt->gsm) {
34c998b4 2985 DRM_ERROR("Failed to map the ggtt page table\n");
63340133
BW
2986 return -ENOMEM;
2987 }
2988
275a991c 2989 ret = setup_scratch_page(dev_priv, &ggtt->base.scratch_page, GFP_DMA32);
8bcdd0f7 2990 if (ret) {
63340133
BW
2991 DRM_ERROR("Scratch setup failed\n");
2992 /* iounmap will also get called at remove, but meh */
72e96d64 2993 iounmap(ggtt->gsm);
8bcdd0f7 2994 return ret;
63340133
BW
2995 }
2996
4ad2af1e 2997 return 0;
63340133
BW
2998}
2999
fbe5d36e
BW
3000/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
3001 * bits. When using advanced contexts each context stores its own PAT, but
3002 * writing this data shouldn't be harmful even in those cases. */
ee0ce478 3003static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
fbe5d36e 3004{
fbe5d36e
BW
3005 uint64_t pat;
3006
3007 pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
3008 GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
3009 GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
3010 GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
3011 GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
3012 GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
3013 GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
3014 GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
3015
2d1fe073 3016 if (!USES_PPGTT(dev_priv))
d6a8b72e
RV
3017 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
3018 * so RTL will always use the value corresponding to
3019 * pat_sel = 000".
3020 * So let's disable cache for GGTT to avoid screen corruptions.
3021 * MOCS still can be used though.
3022 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
3023 * before this patch, i.e. the same uncached + snooping access
3024 * like on gen6/7 seems to be in effect.
3025 * - So this just fixes blitter/render access. Again it looks
3026 * like it's not just uncached access, but uncached + snooping.
3027 * So we can still hold onto all our assumptions wrt cpu
3028 * clflushing on LLC machines.
3029 */
3030 pat = GEN8_PPAT(0, GEN8_PPAT_UC);
3031
fbe5d36e
BW
3032 /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
3033 * write would work. */
7e435ad2
VS
3034 I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
3035 I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
fbe5d36e
BW
3036}
3037
ee0ce478
VS
3038static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
3039{
3040 uint64_t pat;
3041
3042 /*
3043 * Map WB on BDW to snooped on CHV.
3044 *
3045 * Only the snoop bit has meaning for CHV, the rest is
3046 * ignored.
3047 *
cf3d262e
VS
3048 * The hardware will never snoop for certain types of accesses:
3049 * - CPU GTT (GMADR->GGTT->no snoop->memory)
3050 * - PPGTT page tables
3051 * - some other special cycles
3052 *
3053 * As with BDW, we also need to consider the following for GT accesses:
3054 * "For GGTT, there is NO pat_sel[2:0] from the entry,
3055 * so RTL will always use the value corresponding to
3056 * pat_sel = 000".
3057 * Which means we must set the snoop bit in PAT entry 0
3058 * in order to keep the global status page working.
ee0ce478
VS
3059 */
3060 pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
3061 GEN8_PPAT(1, 0) |
3062 GEN8_PPAT(2, 0) |
3063 GEN8_PPAT(3, 0) |
3064 GEN8_PPAT(4, CHV_PPAT_SNOOP) |
3065 GEN8_PPAT(5, CHV_PPAT_SNOOP) |
3066 GEN8_PPAT(6, CHV_PPAT_SNOOP) |
3067 GEN8_PPAT(7, CHV_PPAT_SNOOP);
3068
7e435ad2
VS
3069 I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
3070 I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
ee0ce478
VS
3071}
3072
34c998b4
CW
3073static void gen6_gmch_remove(struct i915_address_space *vm)
3074{
3075 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
3076
3077 iounmap(ggtt->gsm);
49d73912 3078 cleanup_scratch_page(vm->i915, &vm->scratch_page);
34c998b4
CW
3079}
3080
d507d735 3081static int gen8_gmch_probe(struct i915_ggtt *ggtt)
63340133 3082{
49d73912 3083 struct drm_i915_private *dev_priv = ggtt->base.i915;
97d6d7ab 3084 struct pci_dev *pdev = dev_priv->drm.pdev;
34c998b4 3085 unsigned int size;
63340133 3086 u16 snb_gmch_ctl;
63340133
BW
3087
3088 /* TODO: We're not aware of mappable constraints on gen8 yet */
97d6d7ab
CW
3089 ggtt->mappable_base = pci_resource_start(pdev, 2);
3090 ggtt->mappable_end = pci_resource_len(pdev, 2);
63340133 3091
97d6d7ab
CW
3092 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(39)))
3093 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
63340133 3094
97d6d7ab 3095 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
63340133 3096
97d6d7ab 3097 if (INTEL_GEN(dev_priv) >= 9) {
d507d735 3098 ggtt->stolen_size = gen9_get_stolen_size(snb_gmch_ctl);
34c998b4 3099 size = gen8_get_total_gtt_size(snb_gmch_ctl);
97d6d7ab 3100 } else if (IS_CHERRYVIEW(dev_priv)) {
d507d735 3101 ggtt->stolen_size = chv_get_stolen_size(snb_gmch_ctl);
34c998b4 3102 size = chv_get_total_gtt_size(snb_gmch_ctl);
d7f25f23 3103 } else {
d507d735 3104 ggtt->stolen_size = gen8_get_stolen_size(snb_gmch_ctl);
34c998b4 3105 size = gen8_get_total_gtt_size(snb_gmch_ctl);
d7f25f23 3106 }
63340133 3107
34c998b4 3108 ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
63340133 3109
cc3f90f0 3110 if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
ee0ce478
VS
3111 chv_setup_private_ppat(dev_priv);
3112 else
3113 bdw_setup_private_ppat(dev_priv);
fbe5d36e 3114
34c998b4 3115 ggtt->base.cleanup = gen6_gmch_remove;
d507d735
JL
3116 ggtt->base.bind_vma = ggtt_bind_vma;
3117 ggtt->base.unbind_vma = ggtt_unbind_vma;
d6473f56 3118 ggtt->base.insert_page = gen8_ggtt_insert_page;
f7770bfd 3119 ggtt->base.clear_range = nop_clear_range;
48f112fe 3120 if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
f7770bfd
CW
3121 ggtt->base.clear_range = gen8_ggtt_clear_range;
3122
3123 ggtt->base.insert_entries = gen8_ggtt_insert_entries;
3124 if (IS_CHERRYVIEW(dev_priv))
3125 ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
3126
7c3f86b6
CW
3127 ggtt->invalidate = gen6_ggtt_invalidate;
3128
34c998b4 3129 return ggtt_probe_common(ggtt, size);
63340133
BW
3130}
3131
d507d735 3132static int gen6_gmch_probe(struct i915_ggtt *ggtt)
e76e9aeb 3133{
49d73912 3134 struct drm_i915_private *dev_priv = ggtt->base.i915;
97d6d7ab 3135 struct pci_dev *pdev = dev_priv->drm.pdev;
34c998b4 3136 unsigned int size;
e76e9aeb 3137 u16 snb_gmch_ctl;
e76e9aeb 3138
97d6d7ab
CW
3139 ggtt->mappable_base = pci_resource_start(pdev, 2);
3140 ggtt->mappable_end = pci_resource_len(pdev, 2);
41907ddc 3141
baa09f5f
BW
3142 /* 64/512MB is the current min/max we actually know of, but this is just
3143 * a coarse sanity check.
e76e9aeb 3144 */
34c998b4 3145 if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
d507d735 3146 DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt->mappable_end);
baa09f5f 3147 return -ENXIO;
e76e9aeb
BW
3148 }
3149
97d6d7ab
CW
3150 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(40)))
3151 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
3152 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
e76e9aeb 3153
d507d735 3154 ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
e76e9aeb 3155
34c998b4
CW
3156 size = gen6_get_total_gtt_size(snb_gmch_ctl);
3157 ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
e76e9aeb 3158
d507d735 3159 ggtt->base.clear_range = gen6_ggtt_clear_range;
d6473f56 3160 ggtt->base.insert_page = gen6_ggtt_insert_page;
d507d735
JL
3161 ggtt->base.insert_entries = gen6_ggtt_insert_entries;
3162 ggtt->base.bind_vma = ggtt_bind_vma;
3163 ggtt->base.unbind_vma = ggtt_unbind_vma;
34c998b4
CW
3164 ggtt->base.cleanup = gen6_gmch_remove;
3165
7c3f86b6
CW
3166 ggtt->invalidate = gen6_ggtt_invalidate;
3167
34c998b4
CW
3168 if (HAS_EDRAM(dev_priv))
3169 ggtt->base.pte_encode = iris_pte_encode;
3170 else if (IS_HASWELL(dev_priv))
3171 ggtt->base.pte_encode = hsw_pte_encode;
3172 else if (IS_VALLEYVIEW(dev_priv))
3173 ggtt->base.pte_encode = byt_pte_encode;
3174 else if (INTEL_GEN(dev_priv) >= 7)
3175 ggtt->base.pte_encode = ivb_pte_encode;
3176 else
3177 ggtt->base.pte_encode = snb_pte_encode;
7faf1ab2 3178
34c998b4 3179 return ggtt_probe_common(ggtt, size);
e76e9aeb
BW
3180}
3181
34c998b4 3182static void i915_gmch_remove(struct i915_address_space *vm)
e76e9aeb 3183{
34c998b4 3184 intel_gmch_remove();
644ec02b 3185}
baa09f5f 3186
d507d735 3187static int i915_gmch_probe(struct i915_ggtt *ggtt)
baa09f5f 3188{
49d73912 3189 struct drm_i915_private *dev_priv = ggtt->base.i915;
baa09f5f
BW
3190 int ret;
3191
91c8a326 3192 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
baa09f5f
BW
3193 if (!ret) {
3194 DRM_ERROR("failed to set up gmch\n");
3195 return -EIO;
3196 }
3197
edd1f2fe
CW
3198 intel_gtt_get(&ggtt->base.total,
3199 &ggtt->stolen_size,
3200 &ggtt->mappable_base,
3201 &ggtt->mappable_end);
baa09f5f 3202
97d6d7ab 3203 ggtt->do_idle_maps = needs_idle_maps(dev_priv);
d6473f56 3204 ggtt->base.insert_page = i915_ggtt_insert_page;
d507d735
JL
3205 ggtt->base.insert_entries = i915_ggtt_insert_entries;
3206 ggtt->base.clear_range = i915_ggtt_clear_range;
3207 ggtt->base.bind_vma = ggtt_bind_vma;
3208 ggtt->base.unbind_vma = ggtt_unbind_vma;
34c998b4 3209 ggtt->base.cleanup = i915_gmch_remove;
baa09f5f 3210
7c3f86b6
CW
3211 ggtt->invalidate = gmch_ggtt_invalidate;
3212
d507d735 3213 if (unlikely(ggtt->do_idle_maps))
c0a7f818
CW
3214 DRM_INFO("applying Ironlake quirks for intel_iommu\n");
3215
baa09f5f
BW
3216 return 0;
3217}
3218
d85489d3 3219/**
0088e522 3220 * i915_ggtt_probe_hw - Probe GGTT hardware location
97d6d7ab 3221 * @dev_priv: i915 device
d85489d3 3222 */
97d6d7ab 3223int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
baa09f5f 3224{
62106b4f 3225 struct i915_ggtt *ggtt = &dev_priv->ggtt;
baa09f5f
BW
3226 int ret;
3227
49d73912 3228 ggtt->base.i915 = dev_priv;
c114f76a 3229
34c998b4
CW
3230 if (INTEL_GEN(dev_priv) <= 5)
3231 ret = i915_gmch_probe(ggtt);
3232 else if (INTEL_GEN(dev_priv) < 8)
3233 ret = gen6_gmch_probe(ggtt);
3234 else
3235 ret = gen8_gmch_probe(ggtt);
a54c0c27 3236 if (ret)
baa09f5f 3237 return ret;
baa09f5f 3238
db9309a5
CW
3239 /* Trim the GGTT to fit the GuC mappable upper range (when enabled).
3240 * This is easier than doing range restriction on the fly, as we
3241 * currently don't have any bits spare to pass in this upper
3242 * restriction!
3243 */
3244 if (HAS_GUC(dev_priv) && i915.enable_guc_loading) {
3245 ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
3246 ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
3247 }
3248
c890e2d5
CW
3249 if ((ggtt->base.total - 1) >> 32) {
3250 DRM_ERROR("We never expected a Global GTT with more than 32bits"
f6b9d5ca 3251 " of address space! Found %lldM!\n",
c890e2d5
CW
3252 ggtt->base.total >> 20);
3253 ggtt->base.total = 1ULL << 32;
3254 ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
3255 }
3256
f6b9d5ca
CW
3257 if (ggtt->mappable_end > ggtt->base.total) {
3258 DRM_ERROR("mappable aperture extends past end of GGTT,"
3259 " aperture=%llx, total=%llx\n",
3260 ggtt->mappable_end, ggtt->base.total);
3261 ggtt->mappable_end = ggtt->base.total;
3262 }
3263
baa09f5f 3264 /* GMADR is the PCI mmio aperture into the global GTT. */
c44ef60e 3265 DRM_INFO("Memory usable by graphics device = %lluM\n",
62106b4f
JL
3266 ggtt->base.total >> 20);
3267 DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
edd1f2fe 3268 DRM_DEBUG_DRIVER("GTT stolen size = %uM\n", ggtt->stolen_size >> 20);
5db6c735
DV
3269#ifdef CONFIG_INTEL_IOMMU
3270 if (intel_iommu_gfx_mapped)
3271 DRM_INFO("VT-d active for gfx access\n");
3272#endif
baa09f5f
BW
3273
3274 return 0;
0088e522
CW
3275}
3276
3277/**
3278 * i915_ggtt_init_hw - Initialize GGTT hardware
97d6d7ab 3279 * @dev_priv: i915 device
0088e522 3280 */
97d6d7ab 3281int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
0088e522 3282{
0088e522
CW
3283 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3284 int ret;
3285
f6b9d5ca
CW
3286 INIT_LIST_HEAD(&dev_priv->vm_list);
3287
a6508ded
CW
3288 /* Note that we use page colouring to enforce a guard page at the
3289 * end of the address space. This is required as the CS may prefetch
3290 * beyond the end of the batch buffer, across the page boundary,
3291 * and beyond the end of the GTT if we do not provide a guard.
f6b9d5ca 3292 */
80b204bc 3293 mutex_lock(&dev_priv->drm.struct_mutex);
80b204bc 3294 i915_address_space_init(&ggtt->base, dev_priv, "[global]");
a6508ded 3295 if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
f6b9d5ca 3296 ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
80b204bc 3297 mutex_unlock(&dev_priv->drm.struct_mutex);
f6b9d5ca 3298
f7bbe788
CW
3299 if (!io_mapping_init_wc(&dev_priv->ggtt.mappable,
3300 dev_priv->ggtt.mappable_base,
3301 dev_priv->ggtt.mappable_end)) {
f6b9d5ca
CW
3302 ret = -EIO;
3303 goto out_gtt_cleanup;
3304 }
3305
3306 ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, ggtt->mappable_end);
3307
0088e522
CW
3308 /*
3309 * Initialise stolen early so that we may reserve preallocated
3310 * objects for the BIOS to KMS transition.
3311 */
7ace3d30 3312 ret = i915_gem_init_stolen(dev_priv);
0088e522
CW
3313 if (ret)
3314 goto out_gtt_cleanup;
3315
3316 return 0;
a4eba47b
ID
3317
3318out_gtt_cleanup:
72e96d64 3319 ggtt->base.cleanup(&ggtt->base);
a4eba47b 3320 return ret;
baa09f5f 3321}
6f65e29a 3322
97d6d7ab 3323int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
ac840ae5 3324{
97d6d7ab 3325 if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt())
ac840ae5
VS
3326 return -EIO;
3327
3328 return 0;
3329}
3330
7c3f86b6
CW
3331void i915_ggtt_enable_guc(struct drm_i915_private *i915)
3332{
3333 i915->ggtt.invalidate = guc_ggtt_invalidate;
3334}
3335
3336void i915_ggtt_disable_guc(struct drm_i915_private *i915)
3337{
3338 i915->ggtt.invalidate = gen6_ggtt_invalidate;
3339}
3340
275a991c 3341void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
fa42331b 3342{
72e96d64 3343 struct i915_ggtt *ggtt = &dev_priv->ggtt;
fbb30a5c 3344 struct drm_i915_gem_object *obj, *on;
fa42331b 3345
dc97997a 3346 i915_check_and_clear_faults(dev_priv);
fa42331b
DV
3347
3348 /* First fill our portion of the GTT with scratch pages */
4fb84d99 3349 ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total);
fa42331b 3350
fbb30a5c
CW
3351 ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
3352
3353 /* clflush objects bound into the GGTT and rebind them. */
3354 list_for_each_entry_safe(obj, on,
56cea323 3355 &dev_priv->mm.bound_list, global_link) {
fbb30a5c
CW
3356 bool ggtt_bound = false;
3357 struct i915_vma *vma;
3358
1c7f4bca 3359 list_for_each_entry(vma, &obj->vma_list, obj_link) {
72e96d64 3360 if (vma->vm != &ggtt->base)
2c3d9984 3361 continue;
fa42331b 3362
fbb30a5c
CW
3363 if (!i915_vma_unbind(vma))
3364 continue;
3365
2c3d9984
TU
3366 WARN_ON(i915_vma_bind(vma, obj->cache_level,
3367 PIN_UPDATE));
fbb30a5c 3368 ggtt_bound = true;
2c3d9984
TU
3369 }
3370
fbb30a5c 3371 if (ggtt_bound)
975f7ff4 3372 WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
2c3d9984 3373 }
fa42331b 3374
fbb30a5c
CW
3375 ggtt->base.closed = false;
3376
275a991c 3377 if (INTEL_GEN(dev_priv) >= 8) {
cc3f90f0 3378 if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
fa42331b
DV
3379 chv_setup_private_ppat(dev_priv);
3380 else
3381 bdw_setup_private_ppat(dev_priv);
3382
3383 return;
3384 }
3385
275a991c 3386 if (USES_PPGTT(dev_priv)) {
72e96d64
JL
3387 struct i915_address_space *vm;
3388
fa42331b
DV
3389 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
3390 /* TODO: Perhaps it shouldn't be gen6 specific */
3391
e5716f55 3392 struct i915_hw_ppgtt *ppgtt;
fa42331b 3393
2bfa996e 3394 if (i915_is_ggtt(vm))
fa42331b 3395 ppgtt = dev_priv->mm.aliasing_ppgtt;
e5716f55
JL
3396 else
3397 ppgtt = i915_vm_to_ppgtt(vm);
fa42331b
DV
3398
3399 gen6_write_page_range(dev_priv, &ppgtt->pd,
3400 0, ppgtt->base.total);
3401 }
3402 }
3403
7c3f86b6 3404 i915_ggtt_invalidate(dev_priv);
fa42331b
DV
3405}
3406
804beb4b 3407static struct scatterlist *
2d7f3bdb 3408rotate_pages(const dma_addr_t *in, unsigned int offset,
804beb4b 3409 unsigned int width, unsigned int height,
87130255 3410 unsigned int stride,
804beb4b 3411 struct sg_table *st, struct scatterlist *sg)
50470bb0
TU
3412{
3413 unsigned int column, row;
3414 unsigned int src_idx;
50470bb0 3415
50470bb0 3416 for (column = 0; column < width; column++) {
87130255 3417 src_idx = stride * (height - 1) + column;
50470bb0
TU
3418 for (row = 0; row < height; row++) {
3419 st->nents++;
3420 /* We don't need the pages, but need to initialize
3421 * the entries so the sg list can be happily traversed.
3422 * The only thing we need are DMA addresses.
3423 */
3424 sg_set_page(sg, NULL, PAGE_SIZE, 0);
804beb4b 3425 sg_dma_address(sg) = in[offset + src_idx];
50470bb0
TU
3426 sg_dma_len(sg) = PAGE_SIZE;
3427 sg = sg_next(sg);
87130255 3428 src_idx -= stride;
50470bb0
TU
3429 }
3430 }
804beb4b
TU
3431
3432 return sg;
50470bb0
TU
3433}
3434
ba7a5741
CW
3435static noinline struct sg_table *
3436intel_rotate_pages(struct intel_rotation_info *rot_info,
3437 struct drm_i915_gem_object *obj)
50470bb0 3438{
85d1225e 3439 const size_t n_pages = obj->base.size / PAGE_SIZE;
6687c906 3440 unsigned int size = intel_rotation_info_size(rot_info);
85d1225e
DG
3441 struct sgt_iter sgt_iter;
3442 dma_addr_t dma_addr;
50470bb0
TU
3443 unsigned long i;
3444 dma_addr_t *page_addr_list;
3445 struct sg_table *st;
89e3e142 3446 struct scatterlist *sg;
1d00dad5 3447 int ret = -ENOMEM;
50470bb0 3448
50470bb0 3449 /* Allocate a temporary list of source pages for random access. */
85d1225e 3450 page_addr_list = drm_malloc_gfp(n_pages,
f2a85e19
CW
3451 sizeof(dma_addr_t),
3452 GFP_TEMPORARY);
50470bb0
TU
3453 if (!page_addr_list)
3454 return ERR_PTR(ret);
3455
3456 /* Allocate target SG list. */
3457 st = kmalloc(sizeof(*st), GFP_KERNEL);
3458 if (!st)
3459 goto err_st_alloc;
3460
6687c906 3461 ret = sg_alloc_table(st, size, GFP_KERNEL);
50470bb0
TU
3462 if (ret)
3463 goto err_sg_alloc;
3464
3465 /* Populate source page list from the object. */
3466 i = 0;
a4f5ea64 3467 for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
85d1225e 3468 page_addr_list[i++] = dma_addr;
50470bb0 3469
85d1225e 3470 GEM_BUG_ON(i != n_pages);
11f20322
VS
3471 st->nents = 0;
3472 sg = st->sgl;
3473
6687c906
VS
3474 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
3475 sg = rotate_pages(page_addr_list, rot_info->plane[i].offset,
3476 rot_info->plane[i].width, rot_info->plane[i].height,
3477 rot_info->plane[i].stride, st, sg);
89e3e142
TU
3478 }
3479
6687c906
VS
3480 DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n",
3481 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
50470bb0
TU
3482
3483 drm_free_large(page_addr_list);
3484
3485 return st;
3486
3487err_sg_alloc:
3488 kfree(st);
3489err_st_alloc:
3490 drm_free_large(page_addr_list);
3491
6687c906
VS
3492 DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
3493 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
3494
50470bb0
TU
3495 return ERR_PTR(ret);
3496}
ec7adb6e 3497
ba7a5741 3498static noinline struct sg_table *
8bd7ef16
JL
3499intel_partial_pages(const struct i915_ggtt_view *view,
3500 struct drm_i915_gem_object *obj)
3501{
3502 struct sg_table *st;
d2a84a76 3503 struct scatterlist *sg, *iter;
8bab1193 3504 unsigned int count = view->partial.size;
d2a84a76 3505 unsigned int offset;
8bd7ef16
JL
3506 int ret = -ENOMEM;
3507
3508 st = kmalloc(sizeof(*st), GFP_KERNEL);
3509 if (!st)
3510 goto err_st_alloc;
3511
d2a84a76 3512 ret = sg_alloc_table(st, count, GFP_KERNEL);
8bd7ef16
JL
3513 if (ret)
3514 goto err_sg_alloc;
3515
8bab1193 3516 iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
d2a84a76
CW
3517 GEM_BUG_ON(!iter);
3518
8bd7ef16
JL
3519 sg = st->sgl;
3520 st->nents = 0;
d2a84a76
CW
3521 do {
3522 unsigned int len;
8bd7ef16 3523
d2a84a76
CW
3524 len = min(iter->length - (offset << PAGE_SHIFT),
3525 count << PAGE_SHIFT);
3526 sg_set_page(sg, NULL, len, 0);
3527 sg_dma_address(sg) =
3528 sg_dma_address(iter) + (offset << PAGE_SHIFT);
3529 sg_dma_len(sg) = len;
8bd7ef16 3530
8bd7ef16 3531 st->nents++;
d2a84a76
CW
3532 count -= len >> PAGE_SHIFT;
3533 if (count == 0) {
3534 sg_mark_end(sg);
3535 return st;
3536 }
8bd7ef16 3537
d2a84a76
CW
3538 sg = __sg_next(sg);
3539 iter = __sg_next(iter);
3540 offset = 0;
3541 } while (1);
8bd7ef16
JL
3542
3543err_sg_alloc:
3544 kfree(st);
3545err_st_alloc:
3546 return ERR_PTR(ret);
3547}
3548
70b9f6f8 3549static int
50470bb0 3550i915_get_ggtt_vma_pages(struct i915_vma *vma)
fe14d5f4 3551{
ba7a5741 3552 int ret;
50470bb0 3553
2c3a3f44
CW
3554 /* The vma->pages are only valid within the lifespan of the borrowed
3555 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
3556 * must be the vma->pages. A simple rule is that vma->pages must only
3557 * be accessed when the obj->mm.pages are pinned.
3558 */
3559 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
3560
ba7a5741
CW
3561 switch (vma->ggtt_view.type) {
3562 case I915_GGTT_VIEW_NORMAL:
3563 vma->pages = vma->obj->mm.pages;
fe14d5f4
TU
3564 return 0;
3565
ba7a5741 3566 case I915_GGTT_VIEW_ROTATED:
247177dd 3567 vma->pages =
ba7a5741
CW
3568 intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
3569 break;
3570
3571 case I915_GGTT_VIEW_PARTIAL:
247177dd 3572 vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
ba7a5741
CW
3573 break;
3574
3575 default:
fe14d5f4
TU
3576 WARN_ONCE(1, "GGTT view %u not implemented!\n",
3577 vma->ggtt_view.type);
ba7a5741
CW
3578 return -EINVAL;
3579 }
fe14d5f4 3580
ba7a5741
CW
3581 ret = 0;
3582 if (unlikely(IS_ERR(vma->pages))) {
247177dd
CW
3583 ret = PTR_ERR(vma->pages);
3584 vma->pages = NULL;
50470bb0
TU
3585 DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
3586 vma->ggtt_view.type, ret);
fe14d5f4 3587 }
50470bb0 3588 return ret;
fe14d5f4
TU
3589}
3590
625d988a
CW
3591/**
3592 * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
a4dbf7cf
CW
3593 * @vm: the &struct i915_address_space
3594 * @node: the &struct drm_mm_node (typically i915_vma.mode)
3595 * @size: how much space to allocate inside the GTT,
3596 * must be #I915_GTT_PAGE_SIZE aligned
3597 * @offset: where to insert inside the GTT,
3598 * must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
3599 * (@offset + @size) must fit within the address space
3600 * @color: color to apply to node, if this node is not from a VMA,
3601 * color must be #I915_COLOR_UNEVICTABLE
3602 * @flags: control search and eviction behaviour
625d988a
CW
3603 *
3604 * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
3605 * the address space (using @size and @color). If the @node does not fit, it
3606 * tries to evict any overlapping nodes from the GTT, including any
3607 * neighbouring nodes if the colors do not match (to ensure guard pages between
3608 * differing domains). See i915_gem_evict_for_node() for the gory details
3609 * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
3610 * evicting active overlapping objects, and any overlapping node that is pinned
3611 * or marked as unevictable will also result in failure.
3612 *
3613 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3614 * asked to wait for eviction and interrupted.
3615 */
3616int i915_gem_gtt_reserve(struct i915_address_space *vm,
3617 struct drm_mm_node *node,
3618 u64 size, u64 offset, unsigned long color,
3619 unsigned int flags)
3620{
3621 int err;
3622
3623 GEM_BUG_ON(!size);
3624 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3625 GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
3626 GEM_BUG_ON(range_overflows(offset, size, vm->total));
3fec7ec4 3627 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
9734ad13 3628 GEM_BUG_ON(drm_mm_node_allocated(node));
625d988a
CW
3629
3630 node->size = size;
3631 node->start = offset;
3632 node->color = color;
3633
3634 err = drm_mm_reserve_node(&vm->mm, node);
3635 if (err != -ENOSPC)
3636 return err;
3637
3638 err = i915_gem_evict_for_node(vm, node, flags);
3639 if (err == 0)
3640 err = drm_mm_reserve_node(&vm->mm, node);
3641
3642 return err;
3643}
3644
606fec95
CW
3645static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
3646{
3647 u64 range, addr;
3648
3649 GEM_BUG_ON(range_overflows(start, len, end));
3650 GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
3651
3652 range = round_down(end - len, align) - round_up(start, align);
3653 if (range) {
3654 if (sizeof(unsigned long) == sizeof(u64)) {
3655 addr = get_random_long();
3656 } else {
3657 addr = get_random_int();
3658 if (range > U32_MAX) {
3659 addr <<= 32;
3660 addr |= get_random_int();
3661 }
3662 }
3663 div64_u64_rem(addr, range, &addr);
3664 start += addr;
3665 }
3666
3667 return round_up(start, align);
3668}
3669
e007b19d
CW
3670/**
3671 * i915_gem_gtt_insert - insert a node into an address_space (GTT)
a4dbf7cf
CW
3672 * @vm: the &struct i915_address_space
3673 * @node: the &struct drm_mm_node (typically i915_vma.node)
3674 * @size: how much space to allocate inside the GTT,
3675 * must be #I915_GTT_PAGE_SIZE aligned
3676 * @alignment: required alignment of starting offset, may be 0 but
3677 * if specified, this must be a power-of-two and at least
3678 * #I915_GTT_MIN_ALIGNMENT
3679 * @color: color to apply to node
3680 * @start: start of any range restriction inside GTT (0 for all),
e007b19d 3681 * must be #I915_GTT_PAGE_SIZE aligned
a4dbf7cf
CW
3682 * @end: end of any range restriction inside GTT (U64_MAX for all),
3683 * must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
3684 * @flags: control search and eviction behaviour
e007b19d
CW
3685 *
3686 * i915_gem_gtt_insert() first searches for an available hole into which
3687 * is can insert the node. The hole address is aligned to @alignment and
3688 * its @size must then fit entirely within the [@start, @end] bounds. The
3689 * nodes on either side of the hole must match @color, or else a guard page
3690 * will be inserted between the two nodes (or the node evicted). If no
606fec95
CW
3691 * suitable hole is found, first a victim is randomly selected and tested
3692 * for eviction, otherwise then the LRU list of objects within the GTT
e007b19d
CW
3693 * is scanned to find the first set of replacement nodes to create the hole.
3694 * Those old overlapping nodes are evicted from the GTT (and so must be
3695 * rebound before any future use). Any node that is currently pinned cannot
3696 * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
3697 * active and #PIN_NONBLOCK is specified, that node is also skipped when
3698 * searching for an eviction candidate. See i915_gem_evict_something() for
3699 * the gory details on the eviction algorithm.
3700 *
3701 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3702 * asked to wait for eviction and interrupted.
3703 */
3704int i915_gem_gtt_insert(struct i915_address_space *vm,
3705 struct drm_mm_node *node,
3706 u64 size, u64 alignment, unsigned long color,
3707 u64 start, u64 end, unsigned int flags)
3708{
4e64e553 3709 enum drm_mm_insert_mode mode;
606fec95 3710 u64 offset;
e007b19d
CW
3711 int err;
3712
3713 lockdep_assert_held(&vm->i915->drm.struct_mutex);
3714 GEM_BUG_ON(!size);
3715 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3716 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
3717 GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
3718 GEM_BUG_ON(start >= end);
3719 GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
3720 GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
3fec7ec4 3721 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
9734ad13 3722 GEM_BUG_ON(drm_mm_node_allocated(node));
e007b19d
CW
3723
3724 if (unlikely(range_overflows(start, size, end)))
3725 return -ENOSPC;
3726
3727 if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
3728 return -ENOSPC;
3729
4e64e553
CW
3730 mode = DRM_MM_INSERT_BEST;
3731 if (flags & PIN_HIGH)
3732 mode = DRM_MM_INSERT_HIGH;
3733 if (flags & PIN_MAPPABLE)
3734 mode = DRM_MM_INSERT_LOW;
e007b19d
CW
3735
3736 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
3737 * so we know that we always have a minimum alignment of 4096.
3738 * The drm_mm range manager is optimised to return results
3739 * with zero alignment, so where possible use the optimal
3740 * path.
3741 */
3742 BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
3743 if (alignment <= I915_GTT_MIN_ALIGNMENT)
3744 alignment = 0;
3745
4e64e553
CW
3746 err = drm_mm_insert_node_in_range(&vm->mm, node,
3747 size, alignment, color,
3748 start, end, mode);
e007b19d
CW
3749 if (err != -ENOSPC)
3750 return err;
3751
606fec95
CW
3752 /* No free space, pick a slot at random.
3753 *
3754 * There is a pathological case here using a GTT shared between
3755 * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
3756 *
3757 * |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
3758 * (64k objects) (448k objects)
3759 *
3760 * Now imagine that the eviction LRU is ordered top-down (just because
3761 * pathology meets real life), and that we need to evict an object to
3762 * make room inside the aperture. The eviction scan then has to walk
3763 * the 448k list before it finds one within range. And now imagine that
3764 * it has to search for a new hole between every byte inside the memcpy,
3765 * for several simultaneous clients.
3766 *
3767 * On a full-ppgtt system, if we have run out of available space, there
3768 * will be lots and lots of objects in the eviction list! Again,
3769 * searching that LRU list may be slow if we are also applying any
3770 * range restrictions (e.g. restriction to low 4GiB) and so, for
3771 * simplicity and similarilty between different GTT, try the single
3772 * random replacement first.
3773 */
3774 offset = random_offset(start, end,
3775 size, alignment ?: I915_GTT_MIN_ALIGNMENT);
3776 err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
3777 if (err != -ENOSPC)
3778 return err;
3779
3780 /* Randomly selected placement is pinned, do a search */
e007b19d
CW
3781 err = i915_gem_evict_something(vm, size, alignment, color,
3782 start, end, flags);
3783 if (err)
3784 return err;
3785
4e64e553
CW
3786 return drm_mm_insert_node_in_range(&vm->mm, node,
3787 size, alignment, color,
3788 start, end, DRM_MM_INSERT_EVICT);
e007b19d 3789}
3b5bb0a3
CW
3790
3791#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
3792#include "selftests/mock_gtt.c"
1c42819a 3793#include "selftests/i915_gem_gtt.c"
3b5bb0a3 3794#endif