]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/i915/i915_gem_gtt.c
drm/i915: Make IS_SKYLAKE only take dev_priv
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / i915_gem_gtt.c
CommitLineData
76aaf220
DV
1/*
2 * Copyright © 2010 Daniel Vetter
c4ac524c 3 * Copyright © 2011-2014 Intel Corporation
76aaf220
DV
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 *
24 */
25
0e46ce2e 26#include <linux/seq_file.h>
5bab6f60 27#include <linux/stop_machine.h>
760285e7
DH
28#include <drm/drmP.h>
29#include <drm/i915_drm.h>
76aaf220 30#include "i915_drv.h"
5dda8fa3 31#include "i915_vgpu.h"
76aaf220
DV
32#include "i915_trace.h"
33#include "intel_drv.h"
34
bb8f9cff
CW
35#define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
36
45f8f69a
TU
37/**
38 * DOC: Global GTT views
39 *
40 * Background and previous state
41 *
42 * Historically objects could exists (be bound) in global GTT space only as
43 * singular instances with a view representing all of the object's backing pages
44 * in a linear fashion. This view will be called a normal view.
45 *
46 * To support multiple views of the same object, where the number of mapped
47 * pages is not equal to the backing store, or where the layout of the pages
48 * is not linear, concept of a GGTT view was added.
49 *
50 * One example of an alternative view is a stereo display driven by a single
51 * image. In this case we would have a framebuffer looking like this
52 * (2x2 pages):
53 *
54 * 12
55 * 34
56 *
57 * Above would represent a normal GGTT view as normally mapped for GPU or CPU
58 * rendering. In contrast, fed to the display engine would be an alternative
59 * view which could look something like this:
60 *
61 * 1212
62 * 3434
63 *
64 * In this example both the size and layout of pages in the alternative view is
65 * different from the normal view.
66 *
67 * Implementation and usage
68 *
69 * GGTT views are implemented using VMAs and are distinguished via enum
70 * i915_ggtt_view_type and struct i915_ggtt_view.
71 *
72 * A new flavour of core GEM functions which work with GGTT bound objects were
ec7adb6e
JL
73 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
74 * renaming in large amounts of code. They take the struct i915_ggtt_view
75 * parameter encapsulating all metadata required to implement a view.
45f8f69a
TU
76 *
77 * As a helper for callers which are only interested in the normal view,
78 * globally const i915_ggtt_view_normal singleton instance exists. All old core
79 * GEM API functions, the ones not taking the view parameter, are operating on,
80 * or with the normal GGTT view.
81 *
82 * Code wanting to add or use a new GGTT view needs to:
83 *
84 * 1. Add a new enum with a suitable name.
85 * 2. Extend the metadata in the i915_ggtt_view structure if required.
86 * 3. Add support to i915_get_vma_pages().
87 *
88 * New views are required to build a scatter-gather table from within the
89 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
90 * exists for the lifetime of an VMA.
91 *
92 * Core API is designed to have copy semantics which means that passed in
93 * struct i915_ggtt_view does not need to be persistent (left around after
94 * calling the core API functions).
95 *
96 */
97
ce7fda2e
CW
98static inline struct i915_ggtt *
99i915_vm_to_ggtt(struct i915_address_space *vm)
100{
101 GEM_BUG_ON(!i915_is_ggtt(vm));
102 return container_of(vm, struct i915_ggtt, base);
103}
104
70b9f6f8
DV
105static int
106i915_get_ggtt_vma_pages(struct i915_vma *vma);
107
b5e16987
VS
108const struct i915_ggtt_view i915_ggtt_view_normal = {
109 .type = I915_GGTT_VIEW_NORMAL,
110};
9abc4648 111const struct i915_ggtt_view i915_ggtt_view_rotated = {
b5e16987 112 .type = I915_GGTT_VIEW_ROTATED,
9abc4648 113};
fe14d5f4 114
c033666a
CW
115int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
116 int enable_ppgtt)
cfa7c862 117{
1893a71b
CW
118 bool has_aliasing_ppgtt;
119 bool has_full_ppgtt;
1f9a99e0 120 bool has_full_48bit_ppgtt;
1893a71b 121
c033666a
CW
122 has_aliasing_ppgtt = INTEL_GEN(dev_priv) >= 6;
123 has_full_ppgtt = INTEL_GEN(dev_priv) >= 7;
124 has_full_48bit_ppgtt =
125 IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9;
1893a71b 126
e320d400
ZW
127 if (intel_vgpu_active(dev_priv)) {
128 /* emulation is too hard */
129 has_full_ppgtt = false;
130 has_full_48bit_ppgtt = false;
131 }
71ba2d64 132
0e4ca100
CW
133 if (!has_aliasing_ppgtt)
134 return 0;
135
70ee45e1
DL
136 /*
137 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
138 * execlists, the sole mechanism available to submit work.
139 */
c033666a 140 if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
cfa7c862
DV
141 return 0;
142
143 if (enable_ppgtt == 1)
144 return 1;
145
1893a71b 146 if (enable_ppgtt == 2 && has_full_ppgtt)
cfa7c862
DV
147 return 2;
148
1f9a99e0
MT
149 if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
150 return 3;
151
93a25a9e
DV
152#ifdef CONFIG_INTEL_IOMMU
153 /* Disable ppgtt on SNB if VT-d is on. */
c033666a 154 if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped) {
93a25a9e 155 DRM_INFO("Disabling PPGTT because VT-d is on\n");
cfa7c862 156 return 0;
93a25a9e
DV
157 }
158#endif
159
62942ed7 160 /* Early VLV doesn't have this */
91c8a326 161 if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) {
62942ed7
JB
162 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
163 return 0;
164 }
165
e320d400 166 if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists && has_full_ppgtt)
1f9a99e0 167 return has_full_48bit_ppgtt ? 3 : 2;
2f82bbdf
MT
168 else
169 return has_aliasing_ppgtt ? 1 : 0;
93a25a9e
DV
170}
171
70b9f6f8
DV
172static int ppgtt_bind_vma(struct i915_vma *vma,
173 enum i915_cache_level cache_level,
174 u32 unused)
47552659
DV
175{
176 u32 pte_flags = 0;
177
247177dd
CW
178 vma->pages = vma->obj->pages;
179
47552659
DV
180 /* Currently applicable only to VLV */
181 if (vma->obj->gt_ro)
182 pte_flags |= PTE_READ_ONLY;
183
247177dd 184 vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
47552659 185 cache_level, pte_flags);
70b9f6f8
DV
186
187 return 0;
47552659
DV
188}
189
190static void ppgtt_unbind_vma(struct i915_vma *vma)
191{
192 vma->vm->clear_range(vma->vm,
193 vma->node.start,
de180033 194 vma->size,
47552659
DV
195 true);
196}
6f65e29a 197
2c642b07
DV
198static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
199 enum i915_cache_level level,
200 bool valid)
94ec8f61 201{
07749ef3 202 gen8_pte_t pte = valid ? _PAGE_PRESENT | _PAGE_RW : 0;
94ec8f61 203 pte |= addr;
63c42e56
BW
204
205 switch (level) {
206 case I915_CACHE_NONE:
fbe5d36e 207 pte |= PPAT_UNCACHED_INDEX;
63c42e56
BW
208 break;
209 case I915_CACHE_WT:
210 pte |= PPAT_DISPLAY_ELLC_INDEX;
211 break;
212 default:
213 pte |= PPAT_CACHED_INDEX;
214 break;
215 }
216
94ec8f61
BW
217 return pte;
218}
219
fe36f55d
MK
220static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
221 const enum i915_cache_level level)
b1fe6673 222{
07749ef3 223 gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
b1fe6673
BW
224 pde |= addr;
225 if (level != I915_CACHE_NONE)
226 pde |= PPAT_CACHED_PDE_INDEX;
227 else
228 pde |= PPAT_UNCACHED_INDEX;
229 return pde;
230}
231
762d9936
MT
232#define gen8_pdpe_encode gen8_pde_encode
233#define gen8_pml4e_encode gen8_pde_encode
234
07749ef3
MT
235static gen6_pte_t snb_pte_encode(dma_addr_t addr,
236 enum i915_cache_level level,
237 bool valid, u32 unused)
54d12527 238{
07749ef3 239 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
54d12527 240 pte |= GEN6_PTE_ADDR_ENCODE(addr);
e7210c3c
BW
241
242 switch (level) {
350ec881
CW
243 case I915_CACHE_L3_LLC:
244 case I915_CACHE_LLC:
245 pte |= GEN6_PTE_CACHE_LLC;
246 break;
247 case I915_CACHE_NONE:
248 pte |= GEN6_PTE_UNCACHED;
249 break;
250 default:
5f77eeb0 251 MISSING_CASE(level);
350ec881
CW
252 }
253
254 return pte;
255}
256
07749ef3
MT
257static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
258 enum i915_cache_level level,
259 bool valid, u32 unused)
350ec881 260{
07749ef3 261 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
350ec881
CW
262 pte |= GEN6_PTE_ADDR_ENCODE(addr);
263
264 switch (level) {
265 case I915_CACHE_L3_LLC:
266 pte |= GEN7_PTE_CACHE_L3_LLC;
e7210c3c
BW
267 break;
268 case I915_CACHE_LLC:
269 pte |= GEN6_PTE_CACHE_LLC;
270 break;
271 case I915_CACHE_NONE:
9119708c 272 pte |= GEN6_PTE_UNCACHED;
e7210c3c
BW
273 break;
274 default:
5f77eeb0 275 MISSING_CASE(level);
e7210c3c
BW
276 }
277
54d12527
BW
278 return pte;
279}
280
07749ef3
MT
281static gen6_pte_t byt_pte_encode(dma_addr_t addr,
282 enum i915_cache_level level,
283 bool valid, u32 flags)
93c34e70 284{
07749ef3 285 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
93c34e70
KG
286 pte |= GEN6_PTE_ADDR_ENCODE(addr);
287
24f3a8cf
AG
288 if (!(flags & PTE_READ_ONLY))
289 pte |= BYT_PTE_WRITEABLE;
93c34e70
KG
290
291 if (level != I915_CACHE_NONE)
292 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
293
294 return pte;
295}
296
07749ef3
MT
297static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
298 enum i915_cache_level level,
299 bool valid, u32 unused)
9119708c 300{
07749ef3 301 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
0d8ff15e 302 pte |= HSW_PTE_ADDR_ENCODE(addr);
9119708c
KG
303
304 if (level != I915_CACHE_NONE)
87a6b688 305 pte |= HSW_WB_LLC_AGE3;
9119708c
KG
306
307 return pte;
308}
309
07749ef3
MT
310static gen6_pte_t iris_pte_encode(dma_addr_t addr,
311 enum i915_cache_level level,
312 bool valid, u32 unused)
4d15c145 313{
07749ef3 314 gen6_pte_t pte = valid ? GEN6_PTE_VALID : 0;
4d15c145
BW
315 pte |= HSW_PTE_ADDR_ENCODE(addr);
316
651d794f
CW
317 switch (level) {
318 case I915_CACHE_NONE:
319 break;
320 case I915_CACHE_WT:
c51e9701 321 pte |= HSW_WT_ELLC_LLC_AGE3;
651d794f
CW
322 break;
323 default:
c51e9701 324 pte |= HSW_WB_ELLC_LLC_AGE3;
651d794f
CW
325 break;
326 }
4d15c145
BW
327
328 return pte;
329}
330
c114f76a
MK
331static int __setup_page_dma(struct drm_device *dev,
332 struct i915_page_dma *p, gfp_t flags)
678d96fb 333{
c49d13ee 334 struct device *kdev = &dev->pdev->dev;
678d96fb 335
c114f76a 336 p->page = alloc_page(flags);
44159ddb
MK
337 if (!p->page)
338 return -ENOMEM;
678d96fb 339
c49d13ee 340 p->daddr = dma_map_page(kdev,
44159ddb 341 p->page, 0, 4096, PCI_DMA_BIDIRECTIONAL);
678d96fb 342
c49d13ee 343 if (dma_mapping_error(kdev, p->daddr)) {
44159ddb
MK
344 __free_page(p->page);
345 return -EINVAL;
346 }
1266cdb1
MT
347
348 return 0;
678d96fb
BW
349}
350
c114f76a
MK
351static int setup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
352{
bb8f9cff 353 return __setup_page_dma(dev, p, I915_GFP_DMA);
c114f76a
MK
354}
355
44159ddb 356static void cleanup_page_dma(struct drm_device *dev, struct i915_page_dma *p)
06fda602 357{
52a05c30
DW
358 struct pci_dev *pdev = dev->pdev;
359
44159ddb 360 if (WARN_ON(!p->page))
06fda602 361 return;
678d96fb 362
52a05c30 363 dma_unmap_page(&pdev->dev, p->daddr, 4096, PCI_DMA_BIDIRECTIONAL);
44159ddb
MK
364 __free_page(p->page);
365 memset(p, 0, sizeof(*p));
366}
367
d1c54acd 368static void *kmap_page_dma(struct i915_page_dma *p)
73eeea53 369{
d1c54acd
MK
370 return kmap_atomic(p->page);
371}
73eeea53 372
d1c54acd
MK
373/* We use the flushing unmap only with ppgtt structures:
374 * page directories, page tables and scratch pages.
375 */
376static void kunmap_page_dma(struct drm_device *dev, void *vaddr)
377{
73eeea53
MK
378 /* There are only few exceptions for gen >=6. chv and bxt.
379 * And we are not sure about the latter so play safe for now.
380 */
381 if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
382 drm_clflush_virt_range(vaddr, PAGE_SIZE);
383
384 kunmap_atomic(vaddr);
385}
386
567047be 387#define kmap_px(px) kmap_page_dma(px_base(px))
d1c54acd
MK
388#define kunmap_px(ppgtt, vaddr) kunmap_page_dma((ppgtt)->base.dev, (vaddr))
389
567047be
MK
390#define setup_px(dev, px) setup_page_dma((dev), px_base(px))
391#define cleanup_px(dev, px) cleanup_page_dma((dev), px_base(px))
392#define fill_px(dev, px, v) fill_page_dma((dev), px_base(px), (v))
393#define fill32_px(dev, px, v) fill_page_dma_32((dev), px_base(px), (v))
394
d1c54acd
MK
395static void fill_page_dma(struct drm_device *dev, struct i915_page_dma *p,
396 const uint64_t val)
397{
398 int i;
399 uint64_t * const vaddr = kmap_page_dma(p);
400
401 for (i = 0; i < 512; i++)
402 vaddr[i] = val;
403
404 kunmap_page_dma(dev, vaddr);
405}
406
73eeea53
MK
407static void fill_page_dma_32(struct drm_device *dev, struct i915_page_dma *p,
408 const uint32_t val32)
409{
410 uint64_t v = val32;
411
412 v = v << 32 | val32;
413
414 fill_page_dma(dev, p, v);
415}
416
8bcdd0f7 417static int
bb8f9cff
CW
418setup_scratch_page(struct drm_device *dev,
419 struct i915_page_dma *scratch,
420 gfp_t gfp)
4ad2af1e 421{
bb8f9cff 422 return __setup_page_dma(dev, scratch, gfp | __GFP_ZERO);
4ad2af1e
MK
423}
424
8bcdd0f7
CW
425static void cleanup_scratch_page(struct drm_device *dev,
426 struct i915_page_dma *scratch)
4ad2af1e 427{
8bcdd0f7 428 cleanup_page_dma(dev, scratch);
4ad2af1e
MK
429}
430
8a1ebd74 431static struct i915_page_table *alloc_pt(struct drm_device *dev)
06fda602 432{
ec565b3c 433 struct i915_page_table *pt;
678d96fb
BW
434 const size_t count = INTEL_INFO(dev)->gen >= 8 ?
435 GEN8_PTES : GEN6_PTES;
436 int ret = -ENOMEM;
06fda602
BW
437
438 pt = kzalloc(sizeof(*pt), GFP_KERNEL);
439 if (!pt)
440 return ERR_PTR(-ENOMEM);
441
678d96fb
BW
442 pt->used_ptes = kcalloc(BITS_TO_LONGS(count), sizeof(*pt->used_ptes),
443 GFP_KERNEL);
444
445 if (!pt->used_ptes)
446 goto fail_bitmap;
447
567047be 448 ret = setup_px(dev, pt);
678d96fb 449 if (ret)
44159ddb 450 goto fail_page_m;
06fda602
BW
451
452 return pt;
678d96fb 453
44159ddb 454fail_page_m:
678d96fb
BW
455 kfree(pt->used_ptes);
456fail_bitmap:
457 kfree(pt);
458
459 return ERR_PTR(ret);
06fda602
BW
460}
461
2e906bea 462static void free_pt(struct drm_device *dev, struct i915_page_table *pt)
06fda602 463{
2e906bea
MK
464 cleanup_px(dev, pt);
465 kfree(pt->used_ptes);
466 kfree(pt);
467}
468
469static void gen8_initialize_pt(struct i915_address_space *vm,
470 struct i915_page_table *pt)
471{
472 gen8_pte_t scratch_pte;
473
8bcdd0f7 474 scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
2e906bea
MK
475 I915_CACHE_LLC, true);
476
477 fill_px(vm->dev, pt, scratch_pte);
478}
479
480static void gen6_initialize_pt(struct i915_address_space *vm,
481 struct i915_page_table *pt)
482{
483 gen6_pte_t scratch_pte;
484
8bcdd0f7 485 WARN_ON(vm->scratch_page.daddr == 0);
2e906bea 486
8bcdd0f7 487 scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
2e906bea
MK
488 I915_CACHE_LLC, true, 0);
489
490 fill32_px(vm->dev, pt, scratch_pte);
06fda602
BW
491}
492
8a1ebd74 493static struct i915_page_directory *alloc_pd(struct drm_device *dev)
06fda602 494{
ec565b3c 495 struct i915_page_directory *pd;
33c8819f 496 int ret = -ENOMEM;
06fda602
BW
497
498 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
499 if (!pd)
500 return ERR_PTR(-ENOMEM);
501
33c8819f
MT
502 pd->used_pdes = kcalloc(BITS_TO_LONGS(I915_PDES),
503 sizeof(*pd->used_pdes), GFP_KERNEL);
504 if (!pd->used_pdes)
a08e111a 505 goto fail_bitmap;
33c8819f 506
567047be 507 ret = setup_px(dev, pd);
33c8819f 508 if (ret)
a08e111a 509 goto fail_page_m;
e5815a2e 510
06fda602 511 return pd;
33c8819f 512
a08e111a 513fail_page_m:
33c8819f 514 kfree(pd->used_pdes);
a08e111a 515fail_bitmap:
33c8819f
MT
516 kfree(pd);
517
518 return ERR_PTR(ret);
06fda602
BW
519}
520
2e906bea
MK
521static void free_pd(struct drm_device *dev, struct i915_page_directory *pd)
522{
523 if (px_page(pd)) {
524 cleanup_px(dev, pd);
525 kfree(pd->used_pdes);
526 kfree(pd);
527 }
528}
529
530static void gen8_initialize_pd(struct i915_address_space *vm,
531 struct i915_page_directory *pd)
532{
533 gen8_pde_t scratch_pde;
534
535 scratch_pde = gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC);
536
537 fill_px(vm->dev, pd, scratch_pde);
538}
539
6ac18502
MT
540static int __pdp_init(struct drm_device *dev,
541 struct i915_page_directory_pointer *pdp)
542{
543 size_t pdpes = I915_PDPES_PER_PDP(dev);
544
545 pdp->used_pdpes = kcalloc(BITS_TO_LONGS(pdpes),
546 sizeof(unsigned long),
547 GFP_KERNEL);
548 if (!pdp->used_pdpes)
549 return -ENOMEM;
550
551 pdp->page_directory = kcalloc(pdpes, sizeof(*pdp->page_directory),
552 GFP_KERNEL);
553 if (!pdp->page_directory) {
554 kfree(pdp->used_pdpes);
555 /* the PDP might be the statically allocated top level. Keep it
556 * as clean as possible */
557 pdp->used_pdpes = NULL;
558 return -ENOMEM;
559 }
560
561 return 0;
562}
563
564static void __pdp_fini(struct i915_page_directory_pointer *pdp)
565{
566 kfree(pdp->used_pdpes);
567 kfree(pdp->page_directory);
568 pdp->page_directory = NULL;
569}
570
762d9936
MT
571static struct
572i915_page_directory_pointer *alloc_pdp(struct drm_device *dev)
573{
574 struct i915_page_directory_pointer *pdp;
575 int ret = -ENOMEM;
576
577 WARN_ON(!USES_FULL_48BIT_PPGTT(dev));
578
579 pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
580 if (!pdp)
581 return ERR_PTR(-ENOMEM);
582
583 ret = __pdp_init(dev, pdp);
584 if (ret)
585 goto fail_bitmap;
586
587 ret = setup_px(dev, pdp);
588 if (ret)
589 goto fail_page_m;
590
591 return pdp;
592
593fail_page_m:
594 __pdp_fini(pdp);
595fail_bitmap:
596 kfree(pdp);
597
598 return ERR_PTR(ret);
599}
600
6ac18502
MT
601static void free_pdp(struct drm_device *dev,
602 struct i915_page_directory_pointer *pdp)
603{
604 __pdp_fini(pdp);
762d9936
MT
605 if (USES_FULL_48BIT_PPGTT(dev)) {
606 cleanup_px(dev, pdp);
607 kfree(pdp);
608 }
609}
610
69ab76fd
MT
611static void gen8_initialize_pdp(struct i915_address_space *vm,
612 struct i915_page_directory_pointer *pdp)
613{
614 gen8_ppgtt_pdpe_t scratch_pdpe;
615
616 scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
617
618 fill_px(vm->dev, pdp, scratch_pdpe);
619}
620
621static void gen8_initialize_pml4(struct i915_address_space *vm,
622 struct i915_pml4 *pml4)
623{
624 gen8_ppgtt_pml4e_t scratch_pml4e;
625
626 scratch_pml4e = gen8_pml4e_encode(px_dma(vm->scratch_pdp),
627 I915_CACHE_LLC);
628
629 fill_px(vm->dev, pml4, scratch_pml4e);
630}
631
762d9936
MT
632static void
633gen8_setup_page_directory(struct i915_hw_ppgtt *ppgtt,
634 struct i915_page_directory_pointer *pdp,
635 struct i915_page_directory *pd,
636 int index)
637{
638 gen8_ppgtt_pdpe_t *page_directorypo;
639
640 if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
641 return;
642
643 page_directorypo = kmap_px(pdp);
644 page_directorypo[index] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
645 kunmap_px(ppgtt, page_directorypo);
646}
647
648static void
649gen8_setup_page_directory_pointer(struct i915_hw_ppgtt *ppgtt,
650 struct i915_pml4 *pml4,
651 struct i915_page_directory_pointer *pdp,
652 int index)
653{
654 gen8_ppgtt_pml4e_t *pagemap = kmap_px(pml4);
655
656 WARN_ON(!USES_FULL_48BIT_PPGTT(ppgtt->base.dev));
657 pagemap[index] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
658 kunmap_px(ppgtt, pagemap);
6ac18502
MT
659}
660
94e409c1 661/* Broadwell Page Directory Pointer Descriptors */
e85b26dc 662static int gen8_write_pdp(struct drm_i915_gem_request *req,
7cb6d7ac
MT
663 unsigned entry,
664 dma_addr_t addr)
94e409c1 665{
7e37f889 666 struct intel_ring *ring = req->ring;
4a570db5 667 struct intel_engine_cs *engine = req->engine;
94e409c1
BW
668 int ret;
669
670 BUG_ON(entry >= 4);
671
5fb9de1a 672 ret = intel_ring_begin(req, 6);
94e409c1
BW
673 if (ret)
674 return ret;
675
b5321f30
CW
676 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
677 intel_ring_emit_reg(ring, GEN8_RING_PDP_UDW(engine, entry));
678 intel_ring_emit(ring, upper_32_bits(addr));
679 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
680 intel_ring_emit_reg(ring, GEN8_RING_PDP_LDW(engine, entry));
681 intel_ring_emit(ring, lower_32_bits(addr));
682 intel_ring_advance(ring);
94e409c1
BW
683
684 return 0;
685}
686
2dba3239
MT
687static int gen8_legacy_mm_switch(struct i915_hw_ppgtt *ppgtt,
688 struct drm_i915_gem_request *req)
94e409c1 689{
eeb9488e 690 int i, ret;
94e409c1 691
7cb6d7ac 692 for (i = GEN8_LEGACY_PDPES - 1; i >= 0; i--) {
d852c7bf
MK
693 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
694
e85b26dc 695 ret = gen8_write_pdp(req, i, pd_daddr);
eeb9488e
BW
696 if (ret)
697 return ret;
94e409c1 698 }
d595bd4b 699
eeb9488e 700 return 0;
94e409c1
BW
701}
702
2dba3239
MT
703static int gen8_48b_mm_switch(struct i915_hw_ppgtt *ppgtt,
704 struct drm_i915_gem_request *req)
705{
706 return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
707}
708
f9b5b782
MT
709static void gen8_ppgtt_clear_pte_range(struct i915_address_space *vm,
710 struct i915_page_directory_pointer *pdp,
711 uint64_t start,
712 uint64_t length,
713 gen8_pte_t scratch_pte)
459108b8 714{
e5716f55 715 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
f9b5b782 716 gen8_pte_t *pt_vaddr;
de5ba8eb
MT
717 unsigned pdpe = gen8_pdpe_index(start);
718 unsigned pde = gen8_pde_index(start);
719 unsigned pte = gen8_pte_index(start);
782f1495 720 unsigned num_entries = length >> PAGE_SHIFT;
459108b8
BW
721 unsigned last_pte, i;
722
f9b5b782
MT
723 if (WARN_ON(!pdp))
724 return;
459108b8
BW
725
726 while (num_entries) {
ec565b3c
MT
727 struct i915_page_directory *pd;
728 struct i915_page_table *pt;
06fda602 729
d4ec9da0 730 if (WARN_ON(!pdp->page_directory[pdpe]))
00245266 731 break;
06fda602 732
d4ec9da0 733 pd = pdp->page_directory[pdpe];
06fda602
BW
734
735 if (WARN_ON(!pd->page_table[pde]))
00245266 736 break;
06fda602
BW
737
738 pt = pd->page_table[pde];
739
567047be 740 if (WARN_ON(!px_page(pt)))
00245266 741 break;
06fda602 742
7ad47cf2 743 last_pte = pte + num_entries;
07749ef3
MT
744 if (last_pte > GEN8_PTES)
745 last_pte = GEN8_PTES;
459108b8 746
d1c54acd 747 pt_vaddr = kmap_px(pt);
459108b8 748
7ad47cf2 749 for (i = pte; i < last_pte; i++) {
459108b8 750 pt_vaddr[i] = scratch_pte;
7ad47cf2
BW
751 num_entries--;
752 }
459108b8 753
44a71024 754 kunmap_px(ppgtt, pt_vaddr);
459108b8 755
7ad47cf2 756 pte = 0;
07749ef3 757 if (++pde == I915_PDES) {
de5ba8eb
MT
758 if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
759 break;
7ad47cf2
BW
760 pde = 0;
761 }
459108b8
BW
762 }
763}
764
f9b5b782
MT
765static void gen8_ppgtt_clear_range(struct i915_address_space *vm,
766 uint64_t start,
767 uint64_t length,
768 bool use_scratch)
9df15b49 769{
e5716f55 770 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
8bcdd0f7 771 gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
f9b5b782
MT
772 I915_CACHE_LLC, use_scratch);
773
de5ba8eb
MT
774 if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
775 gen8_ppgtt_clear_pte_range(vm, &ppgtt->pdp, start, length,
776 scratch_pte);
777 } else {
e8ebd8e2 778 uint64_t pml4e;
de5ba8eb
MT
779 struct i915_page_directory_pointer *pdp;
780
e8ebd8e2 781 gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
de5ba8eb
MT
782 gen8_ppgtt_clear_pte_range(vm, pdp, start, length,
783 scratch_pte);
784 }
785 }
f9b5b782
MT
786}
787
788static void
789gen8_ppgtt_insert_pte_entries(struct i915_address_space *vm,
790 struct i915_page_directory_pointer *pdp,
3387d433 791 struct sg_page_iter *sg_iter,
f9b5b782
MT
792 uint64_t start,
793 enum i915_cache_level cache_level)
794{
e5716f55 795 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
07749ef3 796 gen8_pte_t *pt_vaddr;
de5ba8eb
MT
797 unsigned pdpe = gen8_pdpe_index(start);
798 unsigned pde = gen8_pde_index(start);
799 unsigned pte = gen8_pte_index(start);
9df15b49 800
6f1cc993 801 pt_vaddr = NULL;
7ad47cf2 802
3387d433 803 while (__sg_page_iter_next(sg_iter)) {
d7b3de91 804 if (pt_vaddr == NULL) {
d4ec9da0 805 struct i915_page_directory *pd = pdp->page_directory[pdpe];
ec565b3c 806 struct i915_page_table *pt = pd->page_table[pde];
d1c54acd 807 pt_vaddr = kmap_px(pt);
d7b3de91 808 }
9df15b49 809
7ad47cf2 810 pt_vaddr[pte] =
3387d433 811 gen8_pte_encode(sg_page_iter_dma_address(sg_iter),
6f1cc993 812 cache_level, true);
07749ef3 813 if (++pte == GEN8_PTES) {
d1c54acd 814 kunmap_px(ppgtt, pt_vaddr);
6f1cc993 815 pt_vaddr = NULL;
07749ef3 816 if (++pde == I915_PDES) {
de5ba8eb
MT
817 if (++pdpe == I915_PDPES_PER_PDP(vm->dev))
818 break;
7ad47cf2
BW
819 pde = 0;
820 }
821 pte = 0;
9df15b49
BW
822 }
823 }
d1c54acd
MK
824
825 if (pt_vaddr)
826 kunmap_px(ppgtt, pt_vaddr);
9df15b49
BW
827}
828
f9b5b782
MT
829static void gen8_ppgtt_insert_entries(struct i915_address_space *vm,
830 struct sg_table *pages,
831 uint64_t start,
832 enum i915_cache_level cache_level,
833 u32 unused)
834{
e5716f55 835 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
3387d433 836 struct sg_page_iter sg_iter;
f9b5b782 837
3387d433 838 __sg_page_iter_start(&sg_iter, pages->sgl, sg_nents(pages->sgl), 0);
de5ba8eb
MT
839
840 if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
841 gen8_ppgtt_insert_pte_entries(vm, &ppgtt->pdp, &sg_iter, start,
842 cache_level);
843 } else {
844 struct i915_page_directory_pointer *pdp;
e8ebd8e2 845 uint64_t pml4e;
de5ba8eb
MT
846 uint64_t length = (uint64_t)pages->orig_nents << PAGE_SHIFT;
847
e8ebd8e2 848 gen8_for_each_pml4e(pdp, &ppgtt->pml4, start, length, pml4e) {
de5ba8eb
MT
849 gen8_ppgtt_insert_pte_entries(vm, pdp, &sg_iter,
850 start, cache_level);
851 }
852 }
f9b5b782
MT
853}
854
f37c0505
MT
855static void gen8_free_page_tables(struct drm_device *dev,
856 struct i915_page_directory *pd)
7ad47cf2
BW
857{
858 int i;
859
567047be 860 if (!px_page(pd))
7ad47cf2
BW
861 return;
862
33c8819f 863 for_each_set_bit(i, pd->used_pdes, I915_PDES) {
06fda602
BW
864 if (WARN_ON(!pd->page_table[i]))
865 continue;
7ad47cf2 866
a08e111a 867 free_pt(dev, pd->page_table[i]);
06fda602
BW
868 pd->page_table[i] = NULL;
869 }
d7b3de91
BW
870}
871
8776f02b
MK
872static int gen8_init_scratch(struct i915_address_space *vm)
873{
874 struct drm_device *dev = vm->dev;
64c050db 875 int ret;
8776f02b 876
bb8f9cff 877 ret = setup_scratch_page(dev, &vm->scratch_page, I915_GFP_DMA);
8bcdd0f7
CW
878 if (ret)
879 return ret;
8776f02b
MK
880
881 vm->scratch_pt = alloc_pt(dev);
882 if (IS_ERR(vm->scratch_pt)) {
64c050db
MA
883 ret = PTR_ERR(vm->scratch_pt);
884 goto free_scratch_page;
8776f02b
MK
885 }
886
887 vm->scratch_pd = alloc_pd(dev);
888 if (IS_ERR(vm->scratch_pd)) {
64c050db
MA
889 ret = PTR_ERR(vm->scratch_pd);
890 goto free_pt;
8776f02b
MK
891 }
892
69ab76fd
MT
893 if (USES_FULL_48BIT_PPGTT(dev)) {
894 vm->scratch_pdp = alloc_pdp(dev);
895 if (IS_ERR(vm->scratch_pdp)) {
64c050db
MA
896 ret = PTR_ERR(vm->scratch_pdp);
897 goto free_pd;
69ab76fd
MT
898 }
899 }
900
8776f02b
MK
901 gen8_initialize_pt(vm, vm->scratch_pt);
902 gen8_initialize_pd(vm, vm->scratch_pd);
69ab76fd
MT
903 if (USES_FULL_48BIT_PPGTT(dev))
904 gen8_initialize_pdp(vm, vm->scratch_pdp);
8776f02b
MK
905
906 return 0;
64c050db
MA
907
908free_pd:
909 free_pd(dev, vm->scratch_pd);
910free_pt:
911 free_pt(dev, vm->scratch_pt);
912free_scratch_page:
8bcdd0f7 913 cleanup_scratch_page(dev, &vm->scratch_page);
64c050db
MA
914
915 return ret;
8776f02b
MK
916}
917
650da34c
ZL
918static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
919{
920 enum vgt_g2v_type msg;
df28564d 921 struct drm_i915_private *dev_priv = to_i915(ppgtt->base.dev);
650da34c
ZL
922 int i;
923
df28564d 924 if (USES_FULL_48BIT_PPGTT(dev_priv)) {
650da34c
ZL
925 u64 daddr = px_dma(&ppgtt->pml4);
926
ab75bb5d
VS
927 I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
928 I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
650da34c
ZL
929
930 msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
931 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
932 } else {
933 for (i = 0; i < GEN8_LEGACY_PDPES; i++) {
934 u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
935
ab75bb5d
VS
936 I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
937 I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
650da34c
ZL
938 }
939
940 msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
941 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
942 }
943
944 I915_WRITE(vgtif_reg(g2v_notify), msg);
945
946 return 0;
947}
948
8776f02b
MK
949static void gen8_free_scratch(struct i915_address_space *vm)
950{
951 struct drm_device *dev = vm->dev;
952
69ab76fd
MT
953 if (USES_FULL_48BIT_PPGTT(dev))
954 free_pdp(dev, vm->scratch_pdp);
8776f02b
MK
955 free_pd(dev, vm->scratch_pd);
956 free_pt(dev, vm->scratch_pt);
8bcdd0f7 957 cleanup_scratch_page(dev, &vm->scratch_page);
8776f02b
MK
958}
959
762d9936
MT
960static void gen8_ppgtt_cleanup_3lvl(struct drm_device *dev,
961 struct i915_page_directory_pointer *pdp)
b45a6715
BW
962{
963 int i;
964
d4ec9da0
MT
965 for_each_set_bit(i, pdp->used_pdpes, I915_PDPES_PER_PDP(dev)) {
966 if (WARN_ON(!pdp->page_directory[i]))
06fda602
BW
967 continue;
968
d4ec9da0
MT
969 gen8_free_page_tables(dev, pdp->page_directory[i]);
970 free_pd(dev, pdp->page_directory[i]);
7ad47cf2 971 }
69876bed 972
d4ec9da0 973 free_pdp(dev, pdp);
762d9936
MT
974}
975
976static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
977{
978 int i;
979
980 for_each_set_bit(i, ppgtt->pml4.used_pml4es, GEN8_PML4ES_PER_PML4) {
981 if (WARN_ON(!ppgtt->pml4.pdps[i]))
982 continue;
983
984 gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, ppgtt->pml4.pdps[i]);
985 }
986
987 cleanup_px(ppgtt->base.dev, &ppgtt->pml4);
988}
989
990static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
991{
e5716f55 992 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
762d9936 993
c033666a 994 if (intel_vgpu_active(to_i915(vm->dev)))
650da34c
ZL
995 gen8_ppgtt_notify_vgt(ppgtt, false);
996
762d9936
MT
997 if (!USES_FULL_48BIT_PPGTT(ppgtt->base.dev))
998 gen8_ppgtt_cleanup_3lvl(ppgtt->base.dev, &ppgtt->pdp);
999 else
1000 gen8_ppgtt_cleanup_4lvl(ppgtt);
d4ec9da0 1001
8776f02b 1002 gen8_free_scratch(vm);
b45a6715
BW
1003}
1004
d7b2633d
MT
1005/**
1006 * gen8_ppgtt_alloc_pagetabs() - Allocate page tables for VA range.
d4ec9da0
MT
1007 * @vm: Master vm structure.
1008 * @pd: Page directory for this address range.
d7b2633d 1009 * @start: Starting virtual address to begin allocations.
d4ec9da0 1010 * @length: Size of the allocations.
d7b2633d
MT
1011 * @new_pts: Bitmap set by function with new allocations. Likely used by the
1012 * caller to free on error.
1013 *
1014 * Allocate the required number of page tables. Extremely similar to
1015 * gen8_ppgtt_alloc_page_directories(). The main difference is here we are limited by
1016 * the page directory boundary (instead of the page directory pointer). That
1017 * boundary is 1GB virtual. Therefore, unlike gen8_ppgtt_alloc_page_directories(), it is
1018 * possible, and likely that the caller will need to use multiple calls of this
1019 * function to achieve the appropriate allocation.
1020 *
1021 * Return: 0 if success; negative error code otherwise.
1022 */
d4ec9da0 1023static int gen8_ppgtt_alloc_pagetabs(struct i915_address_space *vm,
e5815a2e 1024 struct i915_page_directory *pd,
5441f0cb 1025 uint64_t start,
d7b2633d
MT
1026 uint64_t length,
1027 unsigned long *new_pts)
bf2b4ed2 1028{
d4ec9da0 1029 struct drm_device *dev = vm->dev;
d7b2633d 1030 struct i915_page_table *pt;
5441f0cb 1031 uint32_t pde;
bf2b4ed2 1032
e8ebd8e2 1033 gen8_for_each_pde(pt, pd, start, length, pde) {
d7b2633d 1034 /* Don't reallocate page tables */
6ac18502 1035 if (test_bit(pde, pd->used_pdes)) {
d7b2633d 1036 /* Scratch is never allocated this way */
d4ec9da0 1037 WARN_ON(pt == vm->scratch_pt);
d7b2633d
MT
1038 continue;
1039 }
1040
8a1ebd74 1041 pt = alloc_pt(dev);
d7b2633d 1042 if (IS_ERR(pt))
5441f0cb
MT
1043 goto unwind_out;
1044
d4ec9da0 1045 gen8_initialize_pt(vm, pt);
d7b2633d 1046 pd->page_table[pde] = pt;
966082c9 1047 __set_bit(pde, new_pts);
4c06ec8d 1048 trace_i915_page_table_entry_alloc(vm, pde, start, GEN8_PDE_SHIFT);
7ad47cf2
BW
1049 }
1050
bf2b4ed2 1051 return 0;
7ad47cf2
BW
1052
1053unwind_out:
d7b2633d 1054 for_each_set_bit(pde, new_pts, I915_PDES)
a08e111a 1055 free_pt(dev, pd->page_table[pde]);
7ad47cf2 1056
d7b3de91 1057 return -ENOMEM;
bf2b4ed2
BW
1058}
1059
d7b2633d
MT
1060/**
1061 * gen8_ppgtt_alloc_page_directories() - Allocate page directories for VA range.
d4ec9da0 1062 * @vm: Master vm structure.
d7b2633d
MT
1063 * @pdp: Page directory pointer for this address range.
1064 * @start: Starting virtual address to begin allocations.
d4ec9da0
MT
1065 * @length: Size of the allocations.
1066 * @new_pds: Bitmap set by function with new allocations. Likely used by the
d7b2633d
MT
1067 * caller to free on error.
1068 *
1069 * Allocate the required number of page directories starting at the pde index of
1070 * @start, and ending at the pde index @start + @length. This function will skip
1071 * over already allocated page directories within the range, and only allocate
1072 * new ones, setting the appropriate pointer within the pdp as well as the
1073 * correct position in the bitmap @new_pds.
1074 *
1075 * The function will only allocate the pages within the range for a give page
1076 * directory pointer. In other words, if @start + @length straddles a virtually
1077 * addressed PDP boundary (512GB for 4k pages), there will be more allocations
1078 * required by the caller, This is not currently possible, and the BUG in the
1079 * code will prevent it.
1080 *
1081 * Return: 0 if success; negative error code otherwise.
1082 */
d4ec9da0
MT
1083static int
1084gen8_ppgtt_alloc_page_directories(struct i915_address_space *vm,
1085 struct i915_page_directory_pointer *pdp,
1086 uint64_t start,
1087 uint64_t length,
1088 unsigned long *new_pds)
bf2b4ed2 1089{
d4ec9da0 1090 struct drm_device *dev = vm->dev;
d7b2633d 1091 struct i915_page_directory *pd;
69876bed 1092 uint32_t pdpe;
6ac18502 1093 uint32_t pdpes = I915_PDPES_PER_PDP(dev);
69876bed 1094
6ac18502 1095 WARN_ON(!bitmap_empty(new_pds, pdpes));
d7b2633d 1096
e8ebd8e2 1097 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
6ac18502 1098 if (test_bit(pdpe, pdp->used_pdpes))
d7b2633d 1099 continue;
33c8819f 1100
8a1ebd74 1101 pd = alloc_pd(dev);
d7b2633d 1102 if (IS_ERR(pd))
d7b3de91 1103 goto unwind_out;
69876bed 1104
d4ec9da0 1105 gen8_initialize_pd(vm, pd);
d7b2633d 1106 pdp->page_directory[pdpe] = pd;
966082c9 1107 __set_bit(pdpe, new_pds);
4c06ec8d 1108 trace_i915_page_directory_entry_alloc(vm, pdpe, start, GEN8_PDPE_SHIFT);
d7b3de91
BW
1109 }
1110
bf2b4ed2 1111 return 0;
d7b3de91
BW
1112
1113unwind_out:
6ac18502 1114 for_each_set_bit(pdpe, new_pds, pdpes)
a08e111a 1115 free_pd(dev, pdp->page_directory[pdpe]);
d7b3de91
BW
1116
1117 return -ENOMEM;
bf2b4ed2
BW
1118}
1119
762d9936
MT
1120/**
1121 * gen8_ppgtt_alloc_page_dirpointers() - Allocate pdps for VA range.
1122 * @vm: Master vm structure.
1123 * @pml4: Page map level 4 for this address range.
1124 * @start: Starting virtual address to begin allocations.
1125 * @length: Size of the allocations.
1126 * @new_pdps: Bitmap set by function with new allocations. Likely used by the
1127 * caller to free on error.
1128 *
1129 * Allocate the required number of page directory pointers. Extremely similar to
1130 * gen8_ppgtt_alloc_page_directories() and gen8_ppgtt_alloc_pagetabs().
1131 * The main difference is here we are limited by the pml4 boundary (instead of
1132 * the page directory pointer).
1133 *
1134 * Return: 0 if success; negative error code otherwise.
1135 */
1136static int
1137gen8_ppgtt_alloc_page_dirpointers(struct i915_address_space *vm,
1138 struct i915_pml4 *pml4,
1139 uint64_t start,
1140 uint64_t length,
1141 unsigned long *new_pdps)
1142{
1143 struct drm_device *dev = vm->dev;
1144 struct i915_page_directory_pointer *pdp;
762d9936
MT
1145 uint32_t pml4e;
1146
1147 WARN_ON(!bitmap_empty(new_pdps, GEN8_PML4ES_PER_PML4));
1148
e8ebd8e2 1149 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
762d9936
MT
1150 if (!test_bit(pml4e, pml4->used_pml4es)) {
1151 pdp = alloc_pdp(dev);
1152 if (IS_ERR(pdp))
1153 goto unwind_out;
1154
69ab76fd 1155 gen8_initialize_pdp(vm, pdp);
762d9936
MT
1156 pml4->pdps[pml4e] = pdp;
1157 __set_bit(pml4e, new_pdps);
1158 trace_i915_page_directory_pointer_entry_alloc(vm,
1159 pml4e,
1160 start,
1161 GEN8_PML4E_SHIFT);
1162 }
1163 }
1164
1165 return 0;
1166
1167unwind_out:
1168 for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
1169 free_pdp(dev, pml4->pdps[pml4e]);
1170
1171 return -ENOMEM;
1172}
1173
d7b2633d 1174static void
3a41a05d 1175free_gen8_temp_bitmaps(unsigned long *new_pds, unsigned long *new_pts)
d7b2633d 1176{
d7b2633d
MT
1177 kfree(new_pts);
1178 kfree(new_pds);
1179}
1180
1181/* Fills in the page directory bitmap, and the array of page tables bitmap. Both
1182 * of these are based on the number of PDPEs in the system.
1183 */
1184static
1185int __must_check alloc_gen8_temp_bitmaps(unsigned long **new_pds,
3a41a05d 1186 unsigned long **new_pts,
6ac18502 1187 uint32_t pdpes)
d7b2633d 1188{
d7b2633d 1189 unsigned long *pds;
3a41a05d 1190 unsigned long *pts;
d7b2633d 1191
3a41a05d 1192 pds = kcalloc(BITS_TO_LONGS(pdpes), sizeof(unsigned long), GFP_TEMPORARY);
d7b2633d
MT
1193 if (!pds)
1194 return -ENOMEM;
1195
3a41a05d
MW
1196 pts = kcalloc(pdpes, BITS_TO_LONGS(I915_PDES) * sizeof(unsigned long),
1197 GFP_TEMPORARY);
1198 if (!pts)
1199 goto err_out;
d7b2633d
MT
1200
1201 *new_pds = pds;
1202 *new_pts = pts;
1203
1204 return 0;
1205
1206err_out:
3a41a05d 1207 free_gen8_temp_bitmaps(pds, pts);
d7b2633d
MT
1208 return -ENOMEM;
1209}
1210
5b7e4c9c
MK
1211/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
1212 * the page table structures, we mark them dirty so that
1213 * context switching/execlist queuing code takes extra steps
1214 * to ensure that tlbs are flushed.
1215 */
1216static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
1217{
1218 ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.dev)->ring_mask;
1219}
1220
762d9936
MT
1221static int gen8_alloc_va_range_3lvl(struct i915_address_space *vm,
1222 struct i915_page_directory_pointer *pdp,
1223 uint64_t start,
1224 uint64_t length)
bf2b4ed2 1225{
e5716f55 1226 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
3a41a05d 1227 unsigned long *new_page_dirs, *new_page_tables;
d4ec9da0 1228 struct drm_device *dev = vm->dev;
5441f0cb 1229 struct i915_page_directory *pd;
33c8819f
MT
1230 const uint64_t orig_start = start;
1231 const uint64_t orig_length = length;
5441f0cb 1232 uint32_t pdpe;
d4ec9da0 1233 uint32_t pdpes = I915_PDPES_PER_PDP(dev);
bf2b4ed2
BW
1234 int ret;
1235
d7b2633d
MT
1236 /* Wrap is never okay since we can only represent 48b, and we don't
1237 * actually use the other side of the canonical address space.
1238 */
1239 if (WARN_ON(start + length < start))
a05d80ee
MK
1240 return -ENODEV;
1241
d4ec9da0 1242 if (WARN_ON(start + length > vm->total))
a05d80ee 1243 return -ENODEV;
d7b2633d 1244
6ac18502 1245 ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
bf2b4ed2
BW
1246 if (ret)
1247 return ret;
1248
d7b2633d 1249 /* Do the allocations first so we can easily bail out */
d4ec9da0
MT
1250 ret = gen8_ppgtt_alloc_page_directories(vm, pdp, start, length,
1251 new_page_dirs);
d7b2633d 1252 if (ret) {
3a41a05d 1253 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
d7b2633d
MT
1254 return ret;
1255 }
1256
1257 /* For every page directory referenced, allocate page tables */
e8ebd8e2 1258 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
d4ec9da0 1259 ret = gen8_ppgtt_alloc_pagetabs(vm, pd, start, length,
3a41a05d 1260 new_page_tables + pdpe * BITS_TO_LONGS(I915_PDES));
5441f0cb
MT
1261 if (ret)
1262 goto err_out;
5441f0cb
MT
1263 }
1264
33c8819f
MT
1265 start = orig_start;
1266 length = orig_length;
1267
d7b2633d
MT
1268 /* Allocations have completed successfully, so set the bitmaps, and do
1269 * the mappings. */
e8ebd8e2 1270 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
d1c54acd 1271 gen8_pde_t *const page_directory = kmap_px(pd);
33c8819f 1272 struct i915_page_table *pt;
09120d4e 1273 uint64_t pd_len = length;
33c8819f
MT
1274 uint64_t pd_start = start;
1275 uint32_t pde;
1276
d7b2633d
MT
1277 /* Every pd should be allocated, we just did that above. */
1278 WARN_ON(!pd);
1279
e8ebd8e2 1280 gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
d7b2633d
MT
1281 /* Same reasoning as pd */
1282 WARN_ON(!pt);
1283 WARN_ON(!pd_len);
1284 WARN_ON(!gen8_pte_count(pd_start, pd_len));
1285
1286 /* Set our used ptes within the page table */
1287 bitmap_set(pt->used_ptes,
1288 gen8_pte_index(pd_start),
1289 gen8_pte_count(pd_start, pd_len));
1290
1291 /* Our pde is now pointing to the pagetable, pt */
966082c9 1292 __set_bit(pde, pd->used_pdes);
d7b2633d
MT
1293
1294 /* Map the PDE to the page table */
fe36f55d
MK
1295 page_directory[pde] = gen8_pde_encode(px_dma(pt),
1296 I915_CACHE_LLC);
4c06ec8d
MT
1297 trace_i915_page_table_entry_map(&ppgtt->base, pde, pt,
1298 gen8_pte_index(start),
1299 gen8_pte_count(start, length),
1300 GEN8_PTES);
d7b2633d
MT
1301
1302 /* NB: We haven't yet mapped ptes to pages. At this
1303 * point we're still relying on insert_entries() */
33c8819f 1304 }
d7b2633d 1305
d1c54acd 1306 kunmap_px(ppgtt, page_directory);
d4ec9da0 1307 __set_bit(pdpe, pdp->used_pdpes);
762d9936 1308 gen8_setup_page_directory(ppgtt, pdp, pd, pdpe);
33c8819f
MT
1309 }
1310
3a41a05d 1311 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
5b7e4c9c 1312 mark_tlbs_dirty(ppgtt);
d7b3de91 1313 return 0;
bf2b4ed2 1314
d7b3de91 1315err_out:
d7b2633d 1316 while (pdpe--) {
e8ebd8e2
DG
1317 unsigned long temp;
1318
3a41a05d
MW
1319 for_each_set_bit(temp, new_page_tables + pdpe *
1320 BITS_TO_LONGS(I915_PDES), I915_PDES)
d4ec9da0 1321 free_pt(dev, pdp->page_directory[pdpe]->page_table[temp]);
d7b2633d
MT
1322 }
1323
6ac18502 1324 for_each_set_bit(pdpe, new_page_dirs, pdpes)
d4ec9da0 1325 free_pd(dev, pdp->page_directory[pdpe]);
d7b2633d 1326
3a41a05d 1327 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
5b7e4c9c 1328 mark_tlbs_dirty(ppgtt);
bf2b4ed2
BW
1329 return ret;
1330}
1331
762d9936
MT
1332static int gen8_alloc_va_range_4lvl(struct i915_address_space *vm,
1333 struct i915_pml4 *pml4,
1334 uint64_t start,
1335 uint64_t length)
1336{
1337 DECLARE_BITMAP(new_pdps, GEN8_PML4ES_PER_PML4);
e5716f55 1338 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
762d9936 1339 struct i915_page_directory_pointer *pdp;
e8ebd8e2 1340 uint64_t pml4e;
762d9936
MT
1341 int ret = 0;
1342
1343 /* Do the pml4 allocations first, so we don't need to track the newly
1344 * allocated tables below the pdp */
1345 bitmap_zero(new_pdps, GEN8_PML4ES_PER_PML4);
1346
1347 /* The pagedirectory and pagetable allocations are done in the shared 3
1348 * and 4 level code. Just allocate the pdps.
1349 */
1350 ret = gen8_ppgtt_alloc_page_dirpointers(vm, pml4, start, length,
1351 new_pdps);
1352 if (ret)
1353 return ret;
1354
1355 WARN(bitmap_weight(new_pdps, GEN8_PML4ES_PER_PML4) > 2,
1356 "The allocation has spanned more than 512GB. "
1357 "It is highly likely this is incorrect.");
1358
e8ebd8e2 1359 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
762d9936
MT
1360 WARN_ON(!pdp);
1361
1362 ret = gen8_alloc_va_range_3lvl(vm, pdp, start, length);
1363 if (ret)
1364 goto err_out;
1365
1366 gen8_setup_page_directory_pointer(ppgtt, pml4, pdp, pml4e);
1367 }
1368
1369 bitmap_or(pml4->used_pml4es, new_pdps, pml4->used_pml4es,
1370 GEN8_PML4ES_PER_PML4);
1371
1372 return 0;
1373
1374err_out:
1375 for_each_set_bit(pml4e, new_pdps, GEN8_PML4ES_PER_PML4)
1376 gen8_ppgtt_cleanup_3lvl(vm->dev, pml4->pdps[pml4e]);
1377
1378 return ret;
1379}
1380
1381static int gen8_alloc_va_range(struct i915_address_space *vm,
1382 uint64_t start, uint64_t length)
1383{
e5716f55 1384 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
762d9936
MT
1385
1386 if (USES_FULL_48BIT_PPGTT(vm->dev))
1387 return gen8_alloc_va_range_4lvl(vm, &ppgtt->pml4, start, length);
1388 else
1389 return gen8_alloc_va_range_3lvl(vm, &ppgtt->pdp, start, length);
1390}
1391
ea91e401
MT
1392static void gen8_dump_pdp(struct i915_page_directory_pointer *pdp,
1393 uint64_t start, uint64_t length,
1394 gen8_pte_t scratch_pte,
1395 struct seq_file *m)
1396{
1397 struct i915_page_directory *pd;
ea91e401
MT
1398 uint32_t pdpe;
1399
e8ebd8e2 1400 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
ea91e401
MT
1401 struct i915_page_table *pt;
1402 uint64_t pd_len = length;
1403 uint64_t pd_start = start;
1404 uint32_t pde;
1405
1406 if (!test_bit(pdpe, pdp->used_pdpes))
1407 continue;
1408
1409 seq_printf(m, "\tPDPE #%d\n", pdpe);
e8ebd8e2 1410 gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
ea91e401
MT
1411 uint32_t pte;
1412 gen8_pte_t *pt_vaddr;
1413
1414 if (!test_bit(pde, pd->used_pdes))
1415 continue;
1416
1417 pt_vaddr = kmap_px(pt);
1418 for (pte = 0; pte < GEN8_PTES; pte += 4) {
1419 uint64_t va =
1420 (pdpe << GEN8_PDPE_SHIFT) |
1421 (pde << GEN8_PDE_SHIFT) |
1422 (pte << GEN8_PTE_SHIFT);
1423 int i;
1424 bool found = false;
1425
1426 for (i = 0; i < 4; i++)
1427 if (pt_vaddr[pte + i] != scratch_pte)
1428 found = true;
1429 if (!found)
1430 continue;
1431
1432 seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
1433 for (i = 0; i < 4; i++) {
1434 if (pt_vaddr[pte + i] != scratch_pte)
1435 seq_printf(m, " %llx", pt_vaddr[pte + i]);
1436 else
1437 seq_puts(m, " SCRATCH ");
1438 }
1439 seq_puts(m, "\n");
1440 }
1441 /* don't use kunmap_px, it could trigger
1442 * an unnecessary flush.
1443 */
1444 kunmap_atomic(pt_vaddr);
1445 }
1446 }
1447}
1448
1449static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1450{
1451 struct i915_address_space *vm = &ppgtt->base;
1452 uint64_t start = ppgtt->base.start;
1453 uint64_t length = ppgtt->base.total;
8bcdd0f7 1454 gen8_pte_t scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
ea91e401
MT
1455 I915_CACHE_LLC, true);
1456
1457 if (!USES_FULL_48BIT_PPGTT(vm->dev)) {
1458 gen8_dump_pdp(&ppgtt->pdp, start, length, scratch_pte, m);
1459 } else {
e8ebd8e2 1460 uint64_t pml4e;
ea91e401
MT
1461 struct i915_pml4 *pml4 = &ppgtt->pml4;
1462 struct i915_page_directory_pointer *pdp;
1463
e8ebd8e2 1464 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
ea91e401
MT
1465 if (!test_bit(pml4e, pml4->used_pml4es))
1466 continue;
1467
1468 seq_printf(m, " PML4E #%llu\n", pml4e);
1469 gen8_dump_pdp(pdp, start, length, scratch_pte, m);
1470 }
1471 }
1472}
1473
331f38e7
ZL
1474static int gen8_preallocate_top_level_pdps(struct i915_hw_ppgtt *ppgtt)
1475{
3a41a05d 1476 unsigned long *new_page_dirs, *new_page_tables;
331f38e7
ZL
1477 uint32_t pdpes = I915_PDPES_PER_PDP(dev);
1478 int ret;
1479
1480 /* We allocate temp bitmap for page tables for no gain
1481 * but as this is for init only, lets keep the things simple
1482 */
1483 ret = alloc_gen8_temp_bitmaps(&new_page_dirs, &new_page_tables, pdpes);
1484 if (ret)
1485 return ret;
1486
1487 /* Allocate for all pdps regardless of how the ppgtt
1488 * was defined.
1489 */
1490 ret = gen8_ppgtt_alloc_page_directories(&ppgtt->base, &ppgtt->pdp,
1491 0, 1ULL << 32,
1492 new_page_dirs);
1493 if (!ret)
1494 *ppgtt->pdp.used_pdpes = *new_page_dirs;
1495
3a41a05d 1496 free_gen8_temp_bitmaps(new_page_dirs, new_page_tables);
331f38e7
ZL
1497
1498 return ret;
1499}
1500
eb0b44ad 1501/*
f3a964b9
BW
1502 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
1503 * with a net effect resembling a 2-level page table in normal x86 terms. Each
1504 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
1505 * space.
37aca44a 1506 *
f3a964b9 1507 */
5c5f6457 1508static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
37aca44a 1509{
8776f02b 1510 int ret;
7cb6d7ac 1511
8776f02b
MK
1512 ret = gen8_init_scratch(&ppgtt->base);
1513 if (ret)
1514 return ret;
69876bed 1515
d7b2633d 1516 ppgtt->base.start = 0;
d7b2633d 1517 ppgtt->base.cleanup = gen8_ppgtt_cleanup;
5c5f6457 1518 ppgtt->base.allocate_va_range = gen8_alloc_va_range;
d7b2633d 1519 ppgtt->base.insert_entries = gen8_ppgtt_insert_entries;
c7e16f22 1520 ppgtt->base.clear_range = gen8_ppgtt_clear_range;
777dc5bb
DV
1521 ppgtt->base.unbind_vma = ppgtt_unbind_vma;
1522 ppgtt->base.bind_vma = ppgtt_bind_vma;
ea91e401 1523 ppgtt->debug_dump = gen8_dump_ppgtt;
d7b2633d 1524
762d9936
MT
1525 if (USES_FULL_48BIT_PPGTT(ppgtt->base.dev)) {
1526 ret = setup_px(ppgtt->base.dev, &ppgtt->pml4);
1527 if (ret)
1528 goto free_scratch;
6ac18502 1529
69ab76fd
MT
1530 gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);
1531
762d9936 1532 ppgtt->base.total = 1ULL << 48;
2dba3239 1533 ppgtt->switch_mm = gen8_48b_mm_switch;
762d9936 1534 } else {
25f50337 1535 ret = __pdp_init(ppgtt->base.dev, &ppgtt->pdp);
81ba8aef
MT
1536 if (ret)
1537 goto free_scratch;
1538
1539 ppgtt->base.total = 1ULL << 32;
2dba3239 1540 ppgtt->switch_mm = gen8_legacy_mm_switch;
762d9936
MT
1541 trace_i915_page_directory_pointer_entry_alloc(&ppgtt->base,
1542 0, 0,
1543 GEN8_PML4E_SHIFT);
331f38e7 1544
c033666a 1545 if (intel_vgpu_active(to_i915(ppgtt->base.dev))) {
331f38e7
ZL
1546 ret = gen8_preallocate_top_level_pdps(ppgtt);
1547 if (ret)
1548 goto free_scratch;
1549 }
81ba8aef 1550 }
6ac18502 1551
c033666a 1552 if (intel_vgpu_active(to_i915(ppgtt->base.dev)))
650da34c
ZL
1553 gen8_ppgtt_notify_vgt(ppgtt, true);
1554
d7b2633d 1555 return 0;
6ac18502
MT
1556
1557free_scratch:
1558 gen8_free_scratch(&ppgtt->base);
1559 return ret;
d7b2633d
MT
1560}
1561
87d60b63
BW
1562static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1563{
87d60b63 1564 struct i915_address_space *vm = &ppgtt->base;
09942c65 1565 struct i915_page_table *unused;
07749ef3 1566 gen6_pte_t scratch_pte;
87d60b63 1567 uint32_t pd_entry;
731f74c5 1568 uint32_t pte, pde;
09942c65 1569 uint32_t start = ppgtt->base.start, length = ppgtt->base.total;
87d60b63 1570
8bcdd0f7 1571 scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
79ab9370 1572 I915_CACHE_LLC, true, 0);
87d60b63 1573
731f74c5 1574 gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
87d60b63 1575 u32 expected;
07749ef3 1576 gen6_pte_t *pt_vaddr;
567047be 1577 const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
09942c65 1578 pd_entry = readl(ppgtt->pd_addr + pde);
87d60b63
BW
1579 expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
1580
1581 if (pd_entry != expected)
1582 seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
1583 pde,
1584 pd_entry,
1585 expected);
1586 seq_printf(m, "\tPDE: %x\n", pd_entry);
1587
d1c54acd
MK
1588 pt_vaddr = kmap_px(ppgtt->pd.page_table[pde]);
1589
07749ef3 1590 for (pte = 0; pte < GEN6_PTES; pte+=4) {
87d60b63 1591 unsigned long va =
07749ef3 1592 (pde * PAGE_SIZE * GEN6_PTES) +
87d60b63
BW
1593 (pte * PAGE_SIZE);
1594 int i;
1595 bool found = false;
1596 for (i = 0; i < 4; i++)
1597 if (pt_vaddr[pte + i] != scratch_pte)
1598 found = true;
1599 if (!found)
1600 continue;
1601
1602 seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
1603 for (i = 0; i < 4; i++) {
1604 if (pt_vaddr[pte + i] != scratch_pte)
1605 seq_printf(m, " %08x", pt_vaddr[pte + i]);
1606 else
1607 seq_puts(m, " SCRATCH ");
1608 }
1609 seq_puts(m, "\n");
1610 }
d1c54acd 1611 kunmap_px(ppgtt, pt_vaddr);
87d60b63
BW
1612 }
1613}
1614
678d96fb 1615/* Write pde (index) from the page directory @pd to the page table @pt */
ec565b3c
MT
1616static void gen6_write_pde(struct i915_page_directory *pd,
1617 const int pde, struct i915_page_table *pt)
6197349b 1618{
678d96fb
BW
1619 /* Caller needs to make sure the write completes if necessary */
1620 struct i915_hw_ppgtt *ppgtt =
1621 container_of(pd, struct i915_hw_ppgtt, pd);
1622 u32 pd_entry;
6197349b 1623
567047be 1624 pd_entry = GEN6_PDE_ADDR_ENCODE(px_dma(pt));
678d96fb 1625 pd_entry |= GEN6_PDE_VALID;
6197349b 1626
678d96fb
BW
1627 writel(pd_entry, ppgtt->pd_addr + pde);
1628}
6197349b 1629
678d96fb
BW
1630/* Write all the page tables found in the ppgtt structure to incrementing page
1631 * directories. */
1632static void gen6_write_page_range(struct drm_i915_private *dev_priv,
ec565b3c 1633 struct i915_page_directory *pd,
678d96fb
BW
1634 uint32_t start, uint32_t length)
1635{
72e96d64 1636 struct i915_ggtt *ggtt = &dev_priv->ggtt;
ec565b3c 1637 struct i915_page_table *pt;
731f74c5 1638 uint32_t pde;
678d96fb 1639
731f74c5 1640 gen6_for_each_pde(pt, pd, start, length, pde)
678d96fb
BW
1641 gen6_write_pde(pd, pde, pt);
1642
1643 /* Make sure write is complete before other code can use this page
1644 * table. Also require for WC mapped PTEs */
72e96d64 1645 readl(ggtt->gsm);
3e302542
BW
1646}
1647
b4a74e3a 1648static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
3e302542 1649{
44159ddb 1650 BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
b4a74e3a 1651
44159ddb 1652 return (ppgtt->pd.base.ggtt_offset / 64) << 16;
b4a74e3a
BW
1653}
1654
90252e5c 1655static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
e85b26dc 1656 struct drm_i915_gem_request *req)
90252e5c 1657{
7e37f889 1658 struct intel_ring *ring = req->ring;
4a570db5 1659 struct intel_engine_cs *engine = req->engine;
90252e5c
BW
1660 int ret;
1661
90252e5c 1662 /* NB: TLBs must be flushed and invalidated before a switch */
7c9cf4e3 1663 ret = engine->emit_flush(req, EMIT_INVALIDATE | EMIT_FLUSH);
90252e5c
BW
1664 if (ret)
1665 return ret;
1666
5fb9de1a 1667 ret = intel_ring_begin(req, 6);
90252e5c
BW
1668 if (ret)
1669 return ret;
1670
b5321f30
CW
1671 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
1672 intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine));
1673 intel_ring_emit(ring, PP_DIR_DCLV_2G);
1674 intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine));
1675 intel_ring_emit(ring, get_pd_offset(ppgtt));
1676 intel_ring_emit(ring, MI_NOOP);
1677 intel_ring_advance(ring);
90252e5c
BW
1678
1679 return 0;
1680}
1681
48a10389 1682static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
e85b26dc 1683 struct drm_i915_gem_request *req)
48a10389 1684{
7e37f889 1685 struct intel_ring *ring = req->ring;
4a570db5 1686 struct intel_engine_cs *engine = req->engine;
48a10389
BW
1687 int ret;
1688
48a10389 1689 /* NB: TLBs must be flushed and invalidated before a switch */
7c9cf4e3 1690 ret = engine->emit_flush(req, EMIT_INVALIDATE | EMIT_FLUSH);
48a10389
BW
1691 if (ret)
1692 return ret;
1693
5fb9de1a 1694 ret = intel_ring_begin(req, 6);
48a10389
BW
1695 if (ret)
1696 return ret;
1697
b5321f30
CW
1698 intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(2));
1699 intel_ring_emit_reg(ring, RING_PP_DIR_DCLV(engine));
1700 intel_ring_emit(ring, PP_DIR_DCLV_2G);
1701 intel_ring_emit_reg(ring, RING_PP_DIR_BASE(engine));
1702 intel_ring_emit(ring, get_pd_offset(ppgtt));
1703 intel_ring_emit(ring, MI_NOOP);
1704 intel_ring_advance(ring);
48a10389 1705
90252e5c 1706 /* XXX: RCS is the only one to auto invalidate the TLBs? */
e2f80391 1707 if (engine->id != RCS) {
7c9cf4e3 1708 ret = engine->emit_flush(req, EMIT_INVALIDATE | EMIT_FLUSH);
90252e5c
BW
1709 if (ret)
1710 return ret;
1711 }
1712
48a10389
BW
1713 return 0;
1714}
1715
eeb9488e 1716static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
e85b26dc 1717 struct drm_i915_gem_request *req)
eeb9488e 1718{
4a570db5 1719 struct intel_engine_cs *engine = req->engine;
8eb95204 1720 struct drm_i915_private *dev_priv = req->i915;
48a10389 1721
e2f80391
TU
1722 I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
1723 I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
eeb9488e
BW
1724 return 0;
1725}
1726
82460d97 1727static void gen8_ppgtt_enable(struct drm_device *dev)
eeb9488e 1728{
fac5e23e 1729 struct drm_i915_private *dev_priv = to_i915(dev);
e2f80391 1730 struct intel_engine_cs *engine;
3b3f1650 1731 enum intel_engine_id id;
3e302542 1732
3b3f1650 1733 for_each_engine(engine, dev_priv, id) {
2dba3239 1734 u32 four_level = USES_FULL_48BIT_PPGTT(dev) ? GEN8_GFX_PPGTT_48B : 0;
e2f80391 1735 I915_WRITE(RING_MODE_GEN7(engine),
2dba3239 1736 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
eeb9488e 1737 }
eeb9488e 1738}
6197349b 1739
82460d97 1740static void gen7_ppgtt_enable(struct drm_device *dev)
3e302542 1741{
fac5e23e 1742 struct drm_i915_private *dev_priv = to_i915(dev);
e2f80391 1743 struct intel_engine_cs *engine;
b4a74e3a 1744 uint32_t ecochk, ecobits;
3b3f1650 1745 enum intel_engine_id id;
6197349b 1746
b4a74e3a
BW
1747 ecobits = I915_READ(GAC_ECO_BITS);
1748 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
a65c2fcd 1749
b4a74e3a 1750 ecochk = I915_READ(GAM_ECOCHK);
772c2a51 1751 if (IS_HASWELL(dev_priv)) {
b4a74e3a
BW
1752 ecochk |= ECOCHK_PPGTT_WB_HSW;
1753 } else {
1754 ecochk |= ECOCHK_PPGTT_LLC_IVB;
1755 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
1756 }
1757 I915_WRITE(GAM_ECOCHK, ecochk);
a65c2fcd 1758
3b3f1650 1759 for_each_engine(engine, dev_priv, id) {
6197349b 1760 /* GFX_MODE is per-ring on gen7+ */
e2f80391 1761 I915_WRITE(RING_MODE_GEN7(engine),
b4a74e3a 1762 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
6197349b 1763 }
b4a74e3a 1764}
6197349b 1765
82460d97 1766static void gen6_ppgtt_enable(struct drm_device *dev)
b4a74e3a 1767{
fac5e23e 1768 struct drm_i915_private *dev_priv = to_i915(dev);
b4a74e3a 1769 uint32_t ecochk, gab_ctl, ecobits;
a65c2fcd 1770
b4a74e3a
BW
1771 ecobits = I915_READ(GAC_ECO_BITS);
1772 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
1773 ECOBITS_PPGTT_CACHE64B);
6197349b 1774
b4a74e3a
BW
1775 gab_ctl = I915_READ(GAB_CTL);
1776 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
1777
1778 ecochk = I915_READ(GAM_ECOCHK);
1779 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
1780
1781 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
6197349b
BW
1782}
1783
1d2a314c 1784/* PPGTT support for Sandybdrige/Gen6 and later */
853ba5d2 1785static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
782f1495
BW
1786 uint64_t start,
1787 uint64_t length,
828c7908 1788 bool use_scratch)
1d2a314c 1789{
e5716f55 1790 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
07749ef3 1791 gen6_pte_t *pt_vaddr, scratch_pte;
782f1495
BW
1792 unsigned first_entry = start >> PAGE_SHIFT;
1793 unsigned num_entries = length >> PAGE_SHIFT;
07749ef3
MT
1794 unsigned act_pt = first_entry / GEN6_PTES;
1795 unsigned first_pte = first_entry % GEN6_PTES;
7bddb01f 1796 unsigned last_pte, i;
1d2a314c 1797
8bcdd0f7 1798 scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
c114f76a 1799 I915_CACHE_LLC, true, 0);
1d2a314c 1800
7bddb01f
DV
1801 while (num_entries) {
1802 last_pte = first_pte + num_entries;
07749ef3
MT
1803 if (last_pte > GEN6_PTES)
1804 last_pte = GEN6_PTES;
7bddb01f 1805
d1c54acd 1806 pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
1d2a314c 1807
7bddb01f
DV
1808 for (i = first_pte; i < last_pte; i++)
1809 pt_vaddr[i] = scratch_pte;
1d2a314c 1810
d1c54acd 1811 kunmap_px(ppgtt, pt_vaddr);
1d2a314c 1812
7bddb01f
DV
1813 num_entries -= last_pte - first_pte;
1814 first_pte = 0;
a15326a5 1815 act_pt++;
7bddb01f 1816 }
1d2a314c
DV
1817}
1818
853ba5d2 1819static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
def886c3 1820 struct sg_table *pages,
782f1495 1821 uint64_t start,
24f3a8cf 1822 enum i915_cache_level cache_level, u32 flags)
def886c3 1823{
e5716f55 1824 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
782f1495 1825 unsigned first_entry = start >> PAGE_SHIFT;
07749ef3
MT
1826 unsigned act_pt = first_entry / GEN6_PTES;
1827 unsigned act_pte = first_entry % GEN6_PTES;
85d1225e
DG
1828 gen6_pte_t *pt_vaddr = NULL;
1829 struct sgt_iter sgt_iter;
1830 dma_addr_t addr;
6e995e23 1831
85d1225e 1832 for_each_sgt_dma(addr, sgt_iter, pages) {
cc79714f 1833 if (pt_vaddr == NULL)
d1c54acd 1834 pt_vaddr = kmap_px(ppgtt->pd.page_table[act_pt]);
6e995e23 1835
cc79714f 1836 pt_vaddr[act_pte] =
85d1225e 1837 vm->pte_encode(addr, cache_level, true, flags);
24f3a8cf 1838
07749ef3 1839 if (++act_pte == GEN6_PTES) {
d1c54acd 1840 kunmap_px(ppgtt, pt_vaddr);
cc79714f 1841 pt_vaddr = NULL;
a15326a5 1842 act_pt++;
6e995e23 1843 act_pte = 0;
def886c3 1844 }
def886c3 1845 }
85d1225e 1846
cc79714f 1847 if (pt_vaddr)
d1c54acd 1848 kunmap_px(ppgtt, pt_vaddr);
def886c3
DV
1849}
1850
678d96fb 1851static int gen6_alloc_va_range(struct i915_address_space *vm,
a05d80ee 1852 uint64_t start_in, uint64_t length_in)
678d96fb 1853{
4933d519
MT
1854 DECLARE_BITMAP(new_page_tables, I915_PDES);
1855 struct drm_device *dev = vm->dev;
72e96d64
JL
1856 struct drm_i915_private *dev_priv = to_i915(dev);
1857 struct i915_ggtt *ggtt = &dev_priv->ggtt;
e5716f55 1858 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
ec565b3c 1859 struct i915_page_table *pt;
a05d80ee 1860 uint32_t start, length, start_save, length_save;
731f74c5 1861 uint32_t pde;
4933d519
MT
1862 int ret;
1863
a05d80ee
MK
1864 if (WARN_ON(start_in + length_in > ppgtt->base.total))
1865 return -ENODEV;
1866
1867 start = start_save = start_in;
1868 length = length_save = length_in;
4933d519
MT
1869
1870 bitmap_zero(new_page_tables, I915_PDES);
1871
1872 /* The allocation is done in two stages so that we can bail out with
1873 * minimal amount of pain. The first stage finds new page tables that
1874 * need allocation. The second stage marks use ptes within the page
1875 * tables.
1876 */
731f74c5 1877 gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
79ab9370 1878 if (pt != vm->scratch_pt) {
4933d519
MT
1879 WARN_ON(bitmap_empty(pt->used_ptes, GEN6_PTES));
1880 continue;
1881 }
1882
1883 /* We've already allocated a page table */
1884 WARN_ON(!bitmap_empty(pt->used_ptes, GEN6_PTES));
1885
8a1ebd74 1886 pt = alloc_pt(dev);
4933d519
MT
1887 if (IS_ERR(pt)) {
1888 ret = PTR_ERR(pt);
1889 goto unwind_out;
1890 }
1891
1892 gen6_initialize_pt(vm, pt);
1893
1894 ppgtt->pd.page_table[pde] = pt;
966082c9 1895 __set_bit(pde, new_page_tables);
72744cb1 1896 trace_i915_page_table_entry_alloc(vm, pde, start, GEN6_PDE_SHIFT);
4933d519
MT
1897 }
1898
1899 start = start_save;
1900 length = length_save;
678d96fb 1901
731f74c5 1902 gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
678d96fb
BW
1903 DECLARE_BITMAP(tmp_bitmap, GEN6_PTES);
1904
1905 bitmap_zero(tmp_bitmap, GEN6_PTES);
1906 bitmap_set(tmp_bitmap, gen6_pte_index(start),
1907 gen6_pte_count(start, length));
1908
966082c9 1909 if (__test_and_clear_bit(pde, new_page_tables))
4933d519
MT
1910 gen6_write_pde(&ppgtt->pd, pde, pt);
1911
72744cb1
MT
1912 trace_i915_page_table_entry_map(vm, pde, pt,
1913 gen6_pte_index(start),
1914 gen6_pte_count(start, length),
1915 GEN6_PTES);
4933d519 1916 bitmap_or(pt->used_ptes, tmp_bitmap, pt->used_ptes,
678d96fb
BW
1917 GEN6_PTES);
1918 }
1919
4933d519
MT
1920 WARN_ON(!bitmap_empty(new_page_tables, I915_PDES));
1921
1922 /* Make sure write is complete before other code can use this page
1923 * table. Also require for WC mapped PTEs */
72e96d64 1924 readl(ggtt->gsm);
4933d519 1925
563222a7 1926 mark_tlbs_dirty(ppgtt);
678d96fb 1927 return 0;
4933d519
MT
1928
1929unwind_out:
1930 for_each_set_bit(pde, new_page_tables, I915_PDES) {
ec565b3c 1931 struct i915_page_table *pt = ppgtt->pd.page_table[pde];
4933d519 1932
79ab9370 1933 ppgtt->pd.page_table[pde] = vm->scratch_pt;
a08e111a 1934 free_pt(vm->dev, pt);
4933d519
MT
1935 }
1936
1937 mark_tlbs_dirty(ppgtt);
1938 return ret;
678d96fb
BW
1939}
1940
8776f02b
MK
1941static int gen6_init_scratch(struct i915_address_space *vm)
1942{
1943 struct drm_device *dev = vm->dev;
8bcdd0f7 1944 int ret;
8776f02b 1945
bb8f9cff 1946 ret = setup_scratch_page(dev, &vm->scratch_page, I915_GFP_DMA);
8bcdd0f7
CW
1947 if (ret)
1948 return ret;
8776f02b
MK
1949
1950 vm->scratch_pt = alloc_pt(dev);
1951 if (IS_ERR(vm->scratch_pt)) {
8bcdd0f7 1952 cleanup_scratch_page(dev, &vm->scratch_page);
8776f02b
MK
1953 return PTR_ERR(vm->scratch_pt);
1954 }
1955
1956 gen6_initialize_pt(vm, vm->scratch_pt);
1957
1958 return 0;
1959}
1960
1961static void gen6_free_scratch(struct i915_address_space *vm)
1962{
1963 struct drm_device *dev = vm->dev;
1964
1965 free_pt(dev, vm->scratch_pt);
8bcdd0f7 1966 cleanup_scratch_page(dev, &vm->scratch_page);
8776f02b
MK
1967}
1968
061dd493 1969static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
a00d825d 1970{
e5716f55 1971 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
731f74c5
DG
1972 struct i915_page_directory *pd = &ppgtt->pd;
1973 struct drm_device *dev = vm->dev;
09942c65
MT
1974 struct i915_page_table *pt;
1975 uint32_t pde;
4933d519 1976
061dd493
DV
1977 drm_mm_remove_node(&ppgtt->node);
1978
731f74c5 1979 gen6_for_all_pdes(pt, pd, pde)
79ab9370 1980 if (pt != vm->scratch_pt)
731f74c5 1981 free_pt(dev, pt);
06fda602 1982
8776f02b 1983 gen6_free_scratch(vm);
3440d265
DV
1984}
1985
b146520f 1986static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
3440d265 1987{
8776f02b 1988 struct i915_address_space *vm = &ppgtt->base;
853ba5d2 1989 struct drm_device *dev = ppgtt->base.dev;
72e96d64
JL
1990 struct drm_i915_private *dev_priv = to_i915(dev);
1991 struct i915_ggtt *ggtt = &dev_priv->ggtt;
e3cc1995 1992 bool retried = false;
b146520f 1993 int ret;
1d2a314c 1994
c8d4c0d6
BW
1995 /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
1996 * allocator works in address space sizes, so it's multiplied by page
1997 * size. We allocate at the top of the GTT to avoid fragmentation.
1998 */
72e96d64 1999 BUG_ON(!drm_mm_initialized(&ggtt->base.mm));
4933d519 2000
8776f02b
MK
2001 ret = gen6_init_scratch(vm);
2002 if (ret)
2003 return ret;
4933d519 2004
e3cc1995 2005alloc:
72e96d64 2006 ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm,
c8d4c0d6
BW
2007 &ppgtt->node, GEN6_PD_SIZE,
2008 GEN6_PD_ALIGN, 0,
72e96d64 2009 0, ggtt->base.total,
3e8b5ae9 2010 DRM_MM_TOPDOWN);
e3cc1995 2011 if (ret == -ENOSPC && !retried) {
e522ac23 2012 ret = i915_gem_evict_something(&ggtt->base,
e3cc1995 2013 GEN6_PD_SIZE, GEN6_PD_ALIGN,
d23db88c 2014 I915_CACHE_NONE,
72e96d64 2015 0, ggtt->base.total,
d23db88c 2016 0);
e3cc1995 2017 if (ret)
678d96fb 2018 goto err_out;
e3cc1995
BW
2019
2020 retried = true;
2021 goto alloc;
2022 }
c8d4c0d6 2023
c8c26622 2024 if (ret)
678d96fb
BW
2025 goto err_out;
2026
c8c26622 2027
72e96d64 2028 if (ppgtt->node.start < ggtt->mappable_end)
c8d4c0d6 2029 DRM_DEBUG("Forced to use aperture for PDEs\n");
1d2a314c 2030
c8c26622 2031 return 0;
678d96fb
BW
2032
2033err_out:
8776f02b 2034 gen6_free_scratch(vm);
678d96fb 2035 return ret;
b146520f
BW
2036}
2037
b146520f
BW
2038static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
2039{
2f2cf682 2040 return gen6_ppgtt_allocate_page_directories(ppgtt);
4933d519 2041}
06dc68d6 2042
4933d519
MT
2043static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
2044 uint64_t start, uint64_t length)
2045{
ec565b3c 2046 struct i915_page_table *unused;
731f74c5 2047 uint32_t pde;
1d2a314c 2048
731f74c5 2049 gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde)
79ab9370 2050 ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
b146520f
BW
2051}
2052
5c5f6457 2053static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
b146520f
BW
2054{
2055 struct drm_device *dev = ppgtt->base.dev;
72e96d64
JL
2056 struct drm_i915_private *dev_priv = to_i915(dev);
2057 struct i915_ggtt *ggtt = &dev_priv->ggtt;
b146520f
BW
2058 int ret;
2059
72e96d64 2060 ppgtt->base.pte_encode = ggtt->base.pte_encode;
8eb95204 2061 if (intel_vgpu_active(dev_priv) || IS_GEN6(dev))
b146520f 2062 ppgtt->switch_mm = gen6_mm_switch;
772c2a51 2063 else if (IS_HASWELL(dev_priv))
b146520f 2064 ppgtt->switch_mm = hsw_mm_switch;
8eb95204 2065 else if (IS_GEN7(dev))
b146520f 2066 ppgtt->switch_mm = gen7_mm_switch;
8eb95204 2067 else
b146520f
BW
2068 BUG();
2069
2070 ret = gen6_ppgtt_alloc(ppgtt);
2071 if (ret)
2072 return ret;
2073
5c5f6457 2074 ppgtt->base.allocate_va_range = gen6_alloc_va_range;
b146520f
BW
2075 ppgtt->base.clear_range = gen6_ppgtt_clear_range;
2076 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
777dc5bb
DV
2077 ppgtt->base.unbind_vma = ppgtt_unbind_vma;
2078 ppgtt->base.bind_vma = ppgtt_bind_vma;
b146520f 2079 ppgtt->base.cleanup = gen6_ppgtt_cleanup;
b146520f 2080 ppgtt->base.start = 0;
09942c65 2081 ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
87d60b63 2082 ppgtt->debug_dump = gen6_dump_ppgtt;
1d2a314c 2083
44159ddb 2084 ppgtt->pd.base.ggtt_offset =
07749ef3 2085 ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
1d2a314c 2086
72e96d64 2087 ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm +
44159ddb 2088 ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
678d96fb 2089
5c5f6457 2090 gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
1d2a314c 2091
678d96fb
BW
2092 gen6_write_page_range(dev_priv, &ppgtt->pd, 0, ppgtt->base.total);
2093
440fd528 2094 DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
b146520f
BW
2095 ppgtt->node.size >> 20,
2096 ppgtt->node.start / PAGE_SIZE);
3440d265 2097
fa76da34 2098 DRM_DEBUG("Adding PPGTT at offset %x\n",
44159ddb 2099 ppgtt->pd.base.ggtt_offset << 10);
fa76da34 2100
b146520f 2101 return 0;
3440d265
DV
2102}
2103
2bfa996e
CW
2104static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
2105 struct drm_i915_private *dev_priv)
3440d265 2106{
2bfa996e 2107 ppgtt->base.dev = &dev_priv->drm;
3440d265 2108
2bfa996e 2109 if (INTEL_INFO(dev_priv)->gen < 8)
5c5f6457 2110 return gen6_ppgtt_init(ppgtt);
3ed124b2 2111 else
d7b2633d 2112 return gen8_ppgtt_init(ppgtt);
fa76da34 2113}
c114f76a 2114
a2cad9df
MW
2115static void i915_address_space_init(struct i915_address_space *vm,
2116 struct drm_i915_private *dev_priv)
2117{
2118 drm_mm_init(&vm->mm, vm->start, vm->total);
a2cad9df
MW
2119 INIT_LIST_HEAD(&vm->active_list);
2120 INIT_LIST_HEAD(&vm->inactive_list);
50e046b6 2121 INIT_LIST_HEAD(&vm->unbound_list);
a2cad9df
MW
2122 list_add_tail(&vm->global_link, &dev_priv->vm_list);
2123}
2124
d5165ebd
TG
2125static void gtt_write_workarounds(struct drm_device *dev)
2126{
fac5e23e 2127 struct drm_i915_private *dev_priv = to_i915(dev);
d5165ebd
TG
2128
2129 /* This function is for gtt related workarounds. This function is
2130 * called on driver load and after a GPU reset, so you can place
2131 * workarounds here even if they get overwritten by GPU reset.
2132 */
2133 /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt */
8652744b 2134 if (IS_BROADWELL(dev_priv))
d5165ebd
TG
2135 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
2136 else if (IS_CHERRYVIEW(dev))
2137 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
d9486e65 2138 else if (IS_SKYLAKE(dev_priv))
d5165ebd
TG
2139 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
2140 else if (IS_BROXTON(dev))
2141 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
2142}
2143
2bfa996e
CW
2144static int i915_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
2145 struct drm_i915_private *dev_priv,
2146 struct drm_i915_file_private *file_priv)
fa76da34 2147{
2bfa996e 2148 int ret;
3ed124b2 2149
2bfa996e 2150 ret = __hw_ppgtt_init(ppgtt, dev_priv);
fa76da34 2151 if (ret == 0) {
c7c48dfd 2152 kref_init(&ppgtt->ref);
a2cad9df 2153 i915_address_space_init(&ppgtt->base, dev_priv);
2bfa996e 2154 ppgtt->base.file = file_priv;
93bd8649 2155 }
1d2a314c
DV
2156
2157 return ret;
2158}
2159
82460d97
DV
2160int i915_ppgtt_init_hw(struct drm_device *dev)
2161{
d5165ebd
TG
2162 gtt_write_workarounds(dev);
2163
671b5013
TD
2164 /* In the case of execlists, PPGTT is enabled by the context descriptor
2165 * and the PDPs are contained within the context itself. We don't
2166 * need to do anything here. */
2167 if (i915.enable_execlists)
2168 return 0;
2169
82460d97
DV
2170 if (!USES_PPGTT(dev))
2171 return 0;
2172
2173 if (IS_GEN6(dev))
2174 gen6_ppgtt_enable(dev);
2175 else if (IS_GEN7(dev))
2176 gen7_ppgtt_enable(dev);
2177 else if (INTEL_INFO(dev)->gen >= 8)
2178 gen8_ppgtt_enable(dev);
2179 else
5f77eeb0 2180 MISSING_CASE(INTEL_INFO(dev)->gen);
82460d97 2181
4ad2fd88
JH
2182 return 0;
2183}
1d2a314c 2184
4d884705 2185struct i915_hw_ppgtt *
2bfa996e
CW
2186i915_ppgtt_create(struct drm_i915_private *dev_priv,
2187 struct drm_i915_file_private *fpriv)
4d884705
DV
2188{
2189 struct i915_hw_ppgtt *ppgtt;
2190 int ret;
2191
2192 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
2193 if (!ppgtt)
2194 return ERR_PTR(-ENOMEM);
2195
2bfa996e 2196 ret = i915_ppgtt_init(ppgtt, dev_priv, fpriv);
4d884705
DV
2197 if (ret) {
2198 kfree(ppgtt);
2199 return ERR_PTR(ret);
2200 }
2201
198c974d
DCS
2202 trace_i915_ppgtt_create(&ppgtt->base);
2203
4d884705
DV
2204 return ppgtt;
2205}
2206
ee960be7
DV
2207void i915_ppgtt_release(struct kref *kref)
2208{
2209 struct i915_hw_ppgtt *ppgtt =
2210 container_of(kref, struct i915_hw_ppgtt, ref);
2211
198c974d
DCS
2212 trace_i915_ppgtt_release(&ppgtt->base);
2213
50e046b6 2214 /* vmas should already be unbound and destroyed */
ee960be7
DV
2215 WARN_ON(!list_empty(&ppgtt->base.active_list));
2216 WARN_ON(!list_empty(&ppgtt->base.inactive_list));
50e046b6 2217 WARN_ON(!list_empty(&ppgtt->base.unbound_list));
ee960be7 2218
19dd120c
DV
2219 list_del(&ppgtt->base.global_link);
2220 drm_mm_takedown(&ppgtt->base.mm);
2221
ee960be7
DV
2222 ppgtt->base.cleanup(&ppgtt->base);
2223 kfree(ppgtt);
2224}
1d2a314c 2225
a81cc00c
BW
2226/* Certain Gen5 chipsets require require idling the GPU before
2227 * unmapping anything from the GTT when VT-d is enabled.
2228 */
97d6d7ab 2229static bool needs_idle_maps(struct drm_i915_private *dev_priv)
a81cc00c
BW
2230{
2231#ifdef CONFIG_INTEL_IOMMU
2232 /* Query intel_iommu to see if we need the workaround. Presumably that
2233 * was loaded first.
2234 */
97d6d7ab 2235 if (IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_iommu_gfx_mapped)
a81cc00c
BW
2236 return true;
2237#endif
2238 return false;
2239}
2240
dc97997a 2241void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
828c7908 2242{
e2f80391 2243 struct intel_engine_cs *engine;
3b3f1650 2244 enum intel_engine_id id;
828c7908 2245
dc97997a 2246 if (INTEL_INFO(dev_priv)->gen < 6)
828c7908
BW
2247 return;
2248
3b3f1650 2249 for_each_engine(engine, dev_priv, id) {
828c7908 2250 u32 fault_reg;
e2f80391 2251 fault_reg = I915_READ(RING_FAULT_REG(engine));
828c7908
BW
2252 if (fault_reg & RING_FAULT_VALID) {
2253 DRM_DEBUG_DRIVER("Unexpected fault\n"
59a5d290 2254 "\tAddr: 0x%08lx\n"
828c7908
BW
2255 "\tAddress space: %s\n"
2256 "\tSource ID: %d\n"
2257 "\tType: %d\n",
2258 fault_reg & PAGE_MASK,
2259 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
2260 RING_FAULT_SRCID(fault_reg),
2261 RING_FAULT_FAULT_TYPE(fault_reg));
e2f80391 2262 I915_WRITE(RING_FAULT_REG(engine),
828c7908
BW
2263 fault_reg & ~RING_FAULT_VALID);
2264 }
2265 }
3b3f1650
AG
2266
2267 /* Engine specific init may not have been done till this point. */
2268 if (dev_priv->engine[RCS])
2269 POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
828c7908
BW
2270}
2271
91e56499
CW
2272static void i915_ggtt_flush(struct drm_i915_private *dev_priv)
2273{
2d1fe073 2274 if (INTEL_INFO(dev_priv)->gen < 6) {
91e56499
CW
2275 intel_gtt_chipset_flush();
2276 } else {
2277 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
2278 POSTING_READ(GFX_FLSH_CNTL_GEN6);
2279 }
2280}
2281
828c7908
BW
2282void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
2283{
72e96d64
JL
2284 struct drm_i915_private *dev_priv = to_i915(dev);
2285 struct i915_ggtt *ggtt = &dev_priv->ggtt;
828c7908
BW
2286
2287 /* Don't bother messing with faults pre GEN6 as we have little
2288 * documentation supporting that it's a good idea.
2289 */
2290 if (INTEL_INFO(dev)->gen < 6)
2291 return;
2292
dc97997a 2293 i915_check_and_clear_faults(dev_priv);
828c7908 2294
72e96d64
JL
2295 ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
2296 true);
91e56499
CW
2297
2298 i915_ggtt_flush(dev_priv);
828c7908
BW
2299}
2300
74163907 2301int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
7c2e6fdf 2302{
9da3da66
CW
2303 if (!dma_map_sg(&obj->base.dev->pdev->dev,
2304 obj->pages->sgl, obj->pages->nents,
2305 PCI_DMA_BIDIRECTIONAL))
2306 return -ENOSPC;
2307
2308 return 0;
7c2e6fdf
DV
2309}
2310
2c642b07 2311static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
94ec8f61 2312{
94ec8f61 2313 writeq(pte, addr);
94ec8f61
BW
2314}
2315
d6473f56
CW
2316static void gen8_ggtt_insert_page(struct i915_address_space *vm,
2317 dma_addr_t addr,
2318 uint64_t offset,
2319 enum i915_cache_level level,
2320 u32 unused)
2321{
2322 struct drm_i915_private *dev_priv = to_i915(vm->dev);
2323 gen8_pte_t __iomem *pte =
2324 (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
2325 (offset >> PAGE_SHIFT);
2326 int rpm_atomic_seq;
2327
2328 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2329
2330 gen8_set_pte(pte, gen8_pte_encode(addr, level, true));
2331
2332 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
2333 POSTING_READ(GFX_FLSH_CNTL_GEN6);
2334
2335 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2336}
2337
94ec8f61
BW
2338static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
2339 struct sg_table *st,
782f1495 2340 uint64_t start,
24f3a8cf 2341 enum i915_cache_level level, u32 unused)
94ec8f61 2342{
72e96d64 2343 struct drm_i915_private *dev_priv = to_i915(vm->dev);
ce7fda2e 2344 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
85d1225e
DG
2345 struct sgt_iter sgt_iter;
2346 gen8_pte_t __iomem *gtt_entries;
2347 gen8_pte_t gtt_entry;
2348 dma_addr_t addr;
be69459a 2349 int rpm_atomic_seq;
85d1225e 2350 int i = 0;
be69459a
ID
2351
2352 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
94ec8f61 2353
85d1225e
DG
2354 gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
2355
2356 for_each_sgt_dma(addr, sgt_iter, st) {
2357 gtt_entry = gen8_pte_encode(addr, level, true);
2358 gen8_set_pte(&gtt_entries[i++], gtt_entry);
94ec8f61
BW
2359 }
2360
2361 /*
2362 * XXX: This serves as a posting read to make sure that the PTE has
2363 * actually been updated. There is some concern that even though
2364 * registers and PTEs are within the same BAR that they are potentially
2365 * of NUMA access patterns. Therefore, even with the way we assume
2366 * hardware should work, we must keep this posting read for paranoia.
2367 */
2368 if (i != 0)
85d1225e 2369 WARN_ON(readq(&gtt_entries[i-1]) != gtt_entry);
94ec8f61 2370
94ec8f61
BW
2371 /* This next bit makes the above posting read even more important. We
2372 * want to flush the TLBs only after we're certain all the PTE updates
2373 * have finished.
2374 */
2375 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
2376 POSTING_READ(GFX_FLSH_CNTL_GEN6);
be69459a
ID
2377
2378 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
94ec8f61
BW
2379}
2380
c140330b
CW
2381struct insert_entries {
2382 struct i915_address_space *vm;
2383 struct sg_table *st;
2384 uint64_t start;
2385 enum i915_cache_level level;
2386 u32 flags;
2387};
2388
2389static int gen8_ggtt_insert_entries__cb(void *_arg)
2390{
2391 struct insert_entries *arg = _arg;
2392 gen8_ggtt_insert_entries(arg->vm, arg->st,
2393 arg->start, arg->level, arg->flags);
2394 return 0;
2395}
2396
2397static void gen8_ggtt_insert_entries__BKL(struct i915_address_space *vm,
2398 struct sg_table *st,
2399 uint64_t start,
2400 enum i915_cache_level level,
2401 u32 flags)
2402{
2403 struct insert_entries arg = { vm, st, start, level, flags };
2404 stop_machine(gen8_ggtt_insert_entries__cb, &arg, NULL);
2405}
2406
d6473f56
CW
2407static void gen6_ggtt_insert_page(struct i915_address_space *vm,
2408 dma_addr_t addr,
2409 uint64_t offset,
2410 enum i915_cache_level level,
2411 u32 flags)
2412{
2413 struct drm_i915_private *dev_priv = to_i915(vm->dev);
2414 gen6_pte_t __iomem *pte =
2415 (gen6_pte_t __iomem *)dev_priv->ggtt.gsm +
2416 (offset >> PAGE_SHIFT);
2417 int rpm_atomic_seq;
2418
2419 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2420
2421 iowrite32(vm->pte_encode(addr, level, true, flags), pte);
2422
2423 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
2424 POSTING_READ(GFX_FLSH_CNTL_GEN6);
2425
2426 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2427}
2428
e76e9aeb
BW
2429/*
2430 * Binds an object into the global gtt with the specified cache level. The object
2431 * will be accessible to the GPU via commands whose operands reference offsets
2432 * within the global GTT as well as accessible by the GPU through the GMADR
2433 * mapped BAR (dev_priv->mm.gtt->gtt).
2434 */
853ba5d2 2435static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
7faf1ab2 2436 struct sg_table *st,
782f1495 2437 uint64_t start,
24f3a8cf 2438 enum i915_cache_level level, u32 flags)
e76e9aeb 2439{
72e96d64 2440 struct drm_i915_private *dev_priv = to_i915(vm->dev);
ce7fda2e 2441 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
85d1225e
DG
2442 struct sgt_iter sgt_iter;
2443 gen6_pte_t __iomem *gtt_entries;
2444 gen6_pte_t gtt_entry;
2445 dma_addr_t addr;
be69459a 2446 int rpm_atomic_seq;
85d1225e 2447 int i = 0;
be69459a
ID
2448
2449 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
e76e9aeb 2450
85d1225e
DG
2451 gtt_entries = (gen6_pte_t __iomem *)ggtt->gsm + (start >> PAGE_SHIFT);
2452
2453 for_each_sgt_dma(addr, sgt_iter, st) {
2454 gtt_entry = vm->pte_encode(addr, level, true, flags);
2455 iowrite32(gtt_entry, &gtt_entries[i++]);
e76e9aeb
BW
2456 }
2457
e76e9aeb
BW
2458 /* XXX: This serves as a posting read to make sure that the PTE has
2459 * actually been updated. There is some concern that even though
2460 * registers and PTEs are within the same BAR that they are potentially
2461 * of NUMA access patterns. Therefore, even with the way we assume
2462 * hardware should work, we must keep this posting read for paranoia.
2463 */
85d1225e
DG
2464 if (i != 0)
2465 WARN_ON(readl(&gtt_entries[i-1]) != gtt_entry);
0f9b91c7
BW
2466
2467 /* This next bit makes the above posting read even more important. We
2468 * want to flush the TLBs only after we're certain all the PTE updates
2469 * have finished.
2470 */
2471 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
2472 POSTING_READ(GFX_FLSH_CNTL_GEN6);
be69459a
ID
2473
2474 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
e76e9aeb
BW
2475}
2476
f7770bfd
CW
2477static void nop_clear_range(struct i915_address_space *vm,
2478 uint64_t start,
2479 uint64_t length,
2480 bool use_scratch)
2481{
2482}
2483
94ec8f61 2484static void gen8_ggtt_clear_range(struct i915_address_space *vm,
782f1495
BW
2485 uint64_t start,
2486 uint64_t length,
94ec8f61
BW
2487 bool use_scratch)
2488{
72e96d64 2489 struct drm_i915_private *dev_priv = to_i915(vm->dev);
ce7fda2e 2490 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
782f1495
BW
2491 unsigned first_entry = start >> PAGE_SHIFT;
2492 unsigned num_entries = length >> PAGE_SHIFT;
07749ef3 2493 gen8_pte_t scratch_pte, __iomem *gtt_base =
72e96d64
JL
2494 (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
2495 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
94ec8f61 2496 int i;
be69459a
ID
2497 int rpm_atomic_seq;
2498
2499 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
94ec8f61
BW
2500
2501 if (WARN(num_entries > max_entries,
2502 "First entry = %d; Num entries = %d (max=%d)\n",
2503 first_entry, num_entries, max_entries))
2504 num_entries = max_entries;
2505
8bcdd0f7 2506 scratch_pte = gen8_pte_encode(vm->scratch_page.daddr,
94ec8f61
BW
2507 I915_CACHE_LLC,
2508 use_scratch);
2509 for (i = 0; i < num_entries; i++)
2510 gen8_set_pte(&gtt_base[i], scratch_pte);
2511 readl(gtt_base);
be69459a
ID
2512
2513 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
94ec8f61
BW
2514}
2515
853ba5d2 2516static void gen6_ggtt_clear_range(struct i915_address_space *vm,
782f1495
BW
2517 uint64_t start,
2518 uint64_t length,
828c7908 2519 bool use_scratch)
7faf1ab2 2520{
72e96d64 2521 struct drm_i915_private *dev_priv = to_i915(vm->dev);
ce7fda2e 2522 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
782f1495
BW
2523 unsigned first_entry = start >> PAGE_SHIFT;
2524 unsigned num_entries = length >> PAGE_SHIFT;
07749ef3 2525 gen6_pte_t scratch_pte, __iomem *gtt_base =
72e96d64
JL
2526 (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
2527 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
7faf1ab2 2528 int i;
be69459a
ID
2529 int rpm_atomic_seq;
2530
2531 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
7faf1ab2
DV
2532
2533 if (WARN(num_entries > max_entries,
2534 "First entry = %d; Num entries = %d (max=%d)\n",
2535 first_entry, num_entries, max_entries))
2536 num_entries = max_entries;
2537
8bcdd0f7 2538 scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
c114f76a 2539 I915_CACHE_LLC, use_scratch, 0);
828c7908 2540
7faf1ab2
DV
2541 for (i = 0; i < num_entries; i++)
2542 iowrite32(scratch_pte, &gtt_base[i]);
2543 readl(gtt_base);
be69459a
ID
2544
2545 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
7faf1ab2
DV
2546}
2547
d6473f56
CW
2548static void i915_ggtt_insert_page(struct i915_address_space *vm,
2549 dma_addr_t addr,
2550 uint64_t offset,
2551 enum i915_cache_level cache_level,
2552 u32 unused)
2553{
2554 struct drm_i915_private *dev_priv = to_i915(vm->dev);
2555 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2556 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2557 int rpm_atomic_seq;
2558
2559 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2560
2561 intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
2562
2563 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2564}
2565
d369d2d9
DV
2566static void i915_ggtt_insert_entries(struct i915_address_space *vm,
2567 struct sg_table *pages,
2568 uint64_t start,
2569 enum i915_cache_level cache_level, u32 unused)
7faf1ab2 2570{
fac5e23e 2571 struct drm_i915_private *dev_priv = to_i915(vm->dev);
7faf1ab2
DV
2572 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2573 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
be69459a
ID
2574 int rpm_atomic_seq;
2575
2576 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
7faf1ab2 2577
d369d2d9 2578 intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags);
0875546c 2579
be69459a
ID
2580 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
2581
7faf1ab2
DV
2582}
2583
853ba5d2 2584static void i915_ggtt_clear_range(struct i915_address_space *vm,
782f1495
BW
2585 uint64_t start,
2586 uint64_t length,
828c7908 2587 bool unused)
7faf1ab2 2588{
fac5e23e 2589 struct drm_i915_private *dev_priv = to_i915(vm->dev);
782f1495
BW
2590 unsigned first_entry = start >> PAGE_SHIFT;
2591 unsigned num_entries = length >> PAGE_SHIFT;
be69459a
ID
2592 int rpm_atomic_seq;
2593
2594 rpm_atomic_seq = assert_rpm_atomic_begin(dev_priv);
2595
7faf1ab2 2596 intel_gtt_clear_range(first_entry, num_entries);
be69459a
ID
2597
2598 assert_rpm_atomic_end(dev_priv, rpm_atomic_seq);
7faf1ab2
DV
2599}
2600
70b9f6f8
DV
2601static int ggtt_bind_vma(struct i915_vma *vma,
2602 enum i915_cache_level cache_level,
2603 u32 flags)
0a878716
DV
2604{
2605 struct drm_i915_gem_object *obj = vma->obj;
2606 u32 pte_flags = 0;
2607 int ret;
2608
2609 ret = i915_get_ggtt_vma_pages(vma);
2610 if (ret)
2611 return ret;
2612
2613 /* Currently applicable only to VLV */
2614 if (obj->gt_ro)
2615 pte_flags |= PTE_READ_ONLY;
2616
247177dd 2617 vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
0a878716
DV
2618 cache_level, pte_flags);
2619
2620 /*
2621 * Without aliasing PPGTT there's no difference between
2622 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
2623 * upgrade to both bound if we bind either to avoid double-binding.
2624 */
3272db53 2625 vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
0a878716
DV
2626
2627 return 0;
2628}
2629
2630static int aliasing_gtt_bind_vma(struct i915_vma *vma,
2631 enum i915_cache_level cache_level,
2632 u32 flags)
d5bd1449 2633{
321d178e 2634 u32 pte_flags;
70b9f6f8
DV
2635 int ret;
2636
2637 ret = i915_get_ggtt_vma_pages(vma);
2638 if (ret)
2639 return ret;
7faf1ab2 2640
24f3a8cf 2641 /* Currently applicable only to VLV */
321d178e
CW
2642 pte_flags = 0;
2643 if (vma->obj->gt_ro)
f329f5f6 2644 pte_flags |= PTE_READ_ONLY;
24f3a8cf 2645
ec7adb6e 2646
3272db53 2647 if (flags & I915_VMA_GLOBAL_BIND) {
321d178e 2648 vma->vm->insert_entries(vma->vm,
247177dd 2649 vma->pages, vma->node.start,
0875546c 2650 cache_level, pte_flags);
6f65e29a 2651 }
d5bd1449 2652
3272db53 2653 if (flags & I915_VMA_LOCAL_BIND) {
321d178e
CW
2654 struct i915_hw_ppgtt *appgtt =
2655 to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
2656 appgtt->base.insert_entries(&appgtt->base,
247177dd 2657 vma->pages, vma->node.start,
f329f5f6 2658 cache_level, pte_flags);
6f65e29a 2659 }
70b9f6f8
DV
2660
2661 return 0;
d5bd1449
CW
2662}
2663
6f65e29a 2664static void ggtt_unbind_vma(struct i915_vma *vma)
74163907 2665{
de180033
CW
2666 struct i915_hw_ppgtt *appgtt = to_i915(vma->vm->dev)->mm.aliasing_ppgtt;
2667 const u64 size = min(vma->size, vma->node.size);
6f65e29a 2668
3272db53 2669 if (vma->flags & I915_VMA_GLOBAL_BIND)
782f1495 2670 vma->vm->clear_range(vma->vm,
de180033 2671 vma->node.start, size,
6f65e29a 2672 true);
06615ee5 2673
3272db53 2674 if (vma->flags & I915_VMA_LOCAL_BIND && appgtt)
6f65e29a 2675 appgtt->base.clear_range(&appgtt->base,
de180033 2676 vma->node.start, size,
6f65e29a 2677 true);
74163907
DV
2678}
2679
2680void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
7c2e6fdf 2681{
52a05c30
DW
2682 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2683 struct device *kdev = &dev_priv->drm.pdev->dev;
307dc25b 2684 struct i915_ggtt *ggtt = &dev_priv->ggtt;
5c042287 2685
307dc25b 2686 if (unlikely(ggtt->do_idle_maps)) {
22dd3bb9 2687 if (i915_gem_wait_for_idle(dev_priv, I915_WAIT_LOCKED)) {
307dc25b
CW
2688 DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
2689 /* Wait a bit, in hopes it avoids the hang */
2690 udelay(10);
2691 }
2692 }
5c042287 2693
52a05c30 2694 dma_unmap_sg(kdev, obj->pages->sgl, obj->pages->nents,
5ec5b516 2695 PCI_DMA_BIDIRECTIONAL);
7c2e6fdf 2696}
644ec02b 2697
42d6ab48
CW
2698static void i915_gtt_color_adjust(struct drm_mm_node *node,
2699 unsigned long color,
440fd528
TR
2700 u64 *start,
2701 u64 *end)
42d6ab48
CW
2702{
2703 if (node->color != color)
2704 *start += 4096;
2705
2a1d7752
CW
2706 node = list_first_entry_or_null(&node->node_list,
2707 struct drm_mm_node,
2708 node_list);
2709 if (node && node->allocated && node->color != color)
2710 *end -= 4096;
42d6ab48 2711}
fbe5d36e 2712
f6b9d5ca 2713int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
644ec02b 2714{
e78891ca
BW
2715 /* Let GEM Manage all of the aperture.
2716 *
2717 * However, leave one page at the end still bound to the scratch page.
2718 * There are a number of places where the hardware apparently prefetches
2719 * past the end of the object, and we've seen multiple hangs with the
2720 * GPU head pointer stuck in a batchbuffer bound at the last page of the
2721 * aperture. One page should be enough to keep any prefetching inside
2722 * of the aperture.
2723 */
72e96d64 2724 struct i915_ggtt *ggtt = &dev_priv->ggtt;
ed2f3452 2725 unsigned long hole_start, hole_end;
95374d75 2726 struct i915_hw_ppgtt *ppgtt;
f6b9d5ca 2727 struct drm_mm_node *entry;
fa76da34 2728 int ret;
644ec02b 2729
b02d22a3
ZW
2730 ret = intel_vgt_balloon(dev_priv);
2731 if (ret)
2732 return ret;
5dda8fa3 2733
95374d75
CW
2734 /* Reserve a mappable slot for our lockless error capture */
2735 ret = drm_mm_insert_node_in_range_generic(&ggtt->base.mm,
2736 &ggtt->error_capture,
2737 4096, 0, -1,
2738 0, ggtt->mappable_end,
2739 0, 0);
2740 if (ret)
2741 return ret;
2742
ed2f3452 2743 /* Clear any non-preallocated blocks */
72e96d64 2744 drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
ed2f3452
CW
2745 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
2746 hole_start, hole_end);
72e96d64 2747 ggtt->base.clear_range(&ggtt->base, hole_start,
782f1495 2748 hole_end - hole_start, true);
ed2f3452
CW
2749 }
2750
2751 /* And finally clear the reserved guard page */
f6b9d5ca
CW
2752 ggtt->base.clear_range(&ggtt->base,
2753 ggtt->base.total - PAGE_SIZE, PAGE_SIZE,
2754 true);
6c5566a8 2755
97d6d7ab 2756 if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
fa76da34 2757 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
95374d75
CW
2758 if (!ppgtt) {
2759 ret = -ENOMEM;
2760 goto err;
2761 }
fa76da34 2762
2bfa996e 2763 ret = __hw_ppgtt_init(ppgtt, dev_priv);
95374d75
CW
2764 if (ret)
2765 goto err_ppgtt;
5c5f6457 2766
95374d75 2767 if (ppgtt->base.allocate_va_range) {
5c5f6457
DV
2768 ret = ppgtt->base.allocate_va_range(&ppgtt->base, 0,
2769 ppgtt->base.total);
95374d75
CW
2770 if (ret)
2771 goto err_ppgtt_cleanup;
4933d519 2772 }
fa76da34 2773
5c5f6457
DV
2774 ppgtt->base.clear_range(&ppgtt->base,
2775 ppgtt->base.start,
2776 ppgtt->base.total,
2777 true);
2778
fa76da34 2779 dev_priv->mm.aliasing_ppgtt = ppgtt;
72e96d64
JL
2780 WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
2781 ggtt->base.bind_vma = aliasing_gtt_bind_vma;
fa76da34
DV
2782 }
2783
6c5566a8 2784 return 0;
95374d75
CW
2785
2786err_ppgtt_cleanup:
2787 ppgtt->base.cleanup(&ppgtt->base);
2788err_ppgtt:
2789 kfree(ppgtt);
2790err:
2791 drm_mm_remove_node(&ggtt->error_capture);
2792 return ret;
e76e9aeb
BW
2793}
2794
d85489d3
JL
2795/**
2796 * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
97d6d7ab 2797 * @dev_priv: i915 device
d85489d3 2798 */
97d6d7ab 2799void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
90d0a0e8 2800{
72e96d64 2801 struct i915_ggtt *ggtt = &dev_priv->ggtt;
90d0a0e8 2802
70e32544
DV
2803 if (dev_priv->mm.aliasing_ppgtt) {
2804 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
70e32544 2805 ppgtt->base.cleanup(&ppgtt->base);
cb7f2760 2806 kfree(ppgtt);
70e32544
DV
2807 }
2808
97d6d7ab 2809 i915_gem_cleanup_stolen(&dev_priv->drm);
a4eba47b 2810
95374d75
CW
2811 if (drm_mm_node_allocated(&ggtt->error_capture))
2812 drm_mm_remove_node(&ggtt->error_capture);
2813
72e96d64 2814 if (drm_mm_initialized(&ggtt->base.mm)) {
b02d22a3 2815 intel_vgt_deballoon(dev_priv);
5dda8fa3 2816
72e96d64
JL
2817 drm_mm_takedown(&ggtt->base.mm);
2818 list_del(&ggtt->base.global_link);
90d0a0e8
DV
2819 }
2820
72e96d64 2821 ggtt->base.cleanup(&ggtt->base);
f6b9d5ca
CW
2822
2823 arch_phys_wc_del(ggtt->mtrr);
f7bbe788 2824 io_mapping_fini(&ggtt->mappable);
90d0a0e8 2825}
70e32544 2826
2c642b07 2827static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
e76e9aeb
BW
2828{
2829 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
2830 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
2831 return snb_gmch_ctl << 20;
2832}
2833
2c642b07 2834static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
9459d252
BW
2835{
2836 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
2837 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
2838 if (bdw_gmch_ctl)
2839 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
562d55d9
BW
2840
2841#ifdef CONFIG_X86_32
2842 /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
2843 if (bdw_gmch_ctl > 4)
2844 bdw_gmch_ctl = 4;
2845#endif
2846
9459d252
BW
2847 return bdw_gmch_ctl << 20;
2848}
2849
2c642b07 2850static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
d7f25f23
DL
2851{
2852 gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
2853 gmch_ctrl &= SNB_GMCH_GGMS_MASK;
2854
2855 if (gmch_ctrl)
2856 return 1 << (20 + gmch_ctrl);
2857
2858 return 0;
2859}
2860
2c642b07 2861static size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
e76e9aeb
BW
2862{
2863 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
2864 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
2865 return snb_gmch_ctl << 25; /* 32 MB units */
2866}
2867
2c642b07 2868static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
9459d252
BW
2869{
2870 bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2871 bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
2872 return bdw_gmch_ctl << 25; /* 32 MB units */
2873}
2874
d7f25f23
DL
2875static size_t chv_get_stolen_size(u16 gmch_ctrl)
2876{
2877 gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
2878 gmch_ctrl &= SNB_GMCH_GMS_MASK;
2879
2880 /*
2881 * 0x0 to 0x10: 32MB increments starting at 0MB
2882 * 0x11 to 0x16: 4MB increments starting at 8MB
2883 * 0x17 to 0x1d: 4MB increments start at 36MB
2884 */
2885 if (gmch_ctrl < 0x11)
2886 return gmch_ctrl << 25;
2887 else if (gmch_ctrl < 0x17)
2888 return (gmch_ctrl - 0x11 + 2) << 22;
2889 else
2890 return (gmch_ctrl - 0x17 + 9) << 22;
2891}
2892
66375014
DL
2893static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
2894{
2895 gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2896 gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
2897
2898 if (gen9_gmch_ctl < 0xf0)
2899 return gen9_gmch_ctl << 25; /* 32 MB units */
2900 else
2901 /* 4MB increments starting at 0xf0 for 4MB */
2902 return (gen9_gmch_ctl - 0xf0 + 1) << 22;
2903}
2904
34c998b4 2905static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
63340133 2906{
34c998b4 2907 struct pci_dev *pdev = ggtt->base.dev->pdev;
34c998b4 2908 phys_addr_t phys_addr;
8bcdd0f7 2909 int ret;
63340133
BW
2910
2911 /* For Modern GENs the PTEs and register space are split in the BAR */
34c998b4 2912 phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
63340133 2913
2a073f89
ID
2914 /*
2915 * On BXT writes larger than 64 bit to the GTT pagetable range will be
2916 * dropped. For WC mappings in general we have 64 byte burst writes
2917 * when the WC buffer is flushed, so we can't use it, but have to
2918 * resort to an uncached mapping. The WC issue is easily caught by the
2919 * readback check when writing GTT PTE entries.
2920 */
34c998b4
CW
2921 if (IS_BROXTON(ggtt->base.dev))
2922 ggtt->gsm = ioremap_nocache(phys_addr, size);
2a073f89 2923 else
34c998b4 2924 ggtt->gsm = ioremap_wc(phys_addr, size);
72e96d64 2925 if (!ggtt->gsm) {
34c998b4 2926 DRM_ERROR("Failed to map the ggtt page table\n");
63340133
BW
2927 return -ENOMEM;
2928 }
2929
bb8f9cff
CW
2930 ret = setup_scratch_page(ggtt->base.dev,
2931 &ggtt->base.scratch_page,
2932 GFP_DMA32);
8bcdd0f7 2933 if (ret) {
63340133
BW
2934 DRM_ERROR("Scratch setup failed\n");
2935 /* iounmap will also get called at remove, but meh */
72e96d64 2936 iounmap(ggtt->gsm);
8bcdd0f7 2937 return ret;
63340133
BW
2938 }
2939
4ad2af1e 2940 return 0;
63340133
BW
2941}
2942
fbe5d36e
BW
2943/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
2944 * bits. When using advanced contexts each context stores its own PAT, but
2945 * writing this data shouldn't be harmful even in those cases. */
ee0ce478 2946static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
fbe5d36e 2947{
fbe5d36e
BW
2948 uint64_t pat;
2949
2950 pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
2951 GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
2952 GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
2953 GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
2954 GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
2955 GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
2956 GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
2957 GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
2958
2d1fe073 2959 if (!USES_PPGTT(dev_priv))
d6a8b72e
RV
2960 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
2961 * so RTL will always use the value corresponding to
2962 * pat_sel = 000".
2963 * So let's disable cache for GGTT to avoid screen corruptions.
2964 * MOCS still can be used though.
2965 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
2966 * before this patch, i.e. the same uncached + snooping access
2967 * like on gen6/7 seems to be in effect.
2968 * - So this just fixes blitter/render access. Again it looks
2969 * like it's not just uncached access, but uncached + snooping.
2970 * So we can still hold onto all our assumptions wrt cpu
2971 * clflushing on LLC machines.
2972 */
2973 pat = GEN8_PPAT(0, GEN8_PPAT_UC);
2974
fbe5d36e
BW
2975 /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
2976 * write would work. */
7e435ad2
VS
2977 I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
2978 I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
fbe5d36e
BW
2979}
2980
ee0ce478
VS
2981static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
2982{
2983 uint64_t pat;
2984
2985 /*
2986 * Map WB on BDW to snooped on CHV.
2987 *
2988 * Only the snoop bit has meaning for CHV, the rest is
2989 * ignored.
2990 *
cf3d262e
VS
2991 * The hardware will never snoop for certain types of accesses:
2992 * - CPU GTT (GMADR->GGTT->no snoop->memory)
2993 * - PPGTT page tables
2994 * - some other special cycles
2995 *
2996 * As with BDW, we also need to consider the following for GT accesses:
2997 * "For GGTT, there is NO pat_sel[2:0] from the entry,
2998 * so RTL will always use the value corresponding to
2999 * pat_sel = 000".
3000 * Which means we must set the snoop bit in PAT entry 0
3001 * in order to keep the global status page working.
ee0ce478
VS
3002 */
3003 pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
3004 GEN8_PPAT(1, 0) |
3005 GEN8_PPAT(2, 0) |
3006 GEN8_PPAT(3, 0) |
3007 GEN8_PPAT(4, CHV_PPAT_SNOOP) |
3008 GEN8_PPAT(5, CHV_PPAT_SNOOP) |
3009 GEN8_PPAT(6, CHV_PPAT_SNOOP) |
3010 GEN8_PPAT(7, CHV_PPAT_SNOOP);
3011
7e435ad2
VS
3012 I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
3013 I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
ee0ce478
VS
3014}
3015
34c998b4
CW
3016static void gen6_gmch_remove(struct i915_address_space *vm)
3017{
3018 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
3019
3020 iounmap(ggtt->gsm);
8bcdd0f7 3021 cleanup_scratch_page(vm->dev, &vm->scratch_page);
34c998b4
CW
3022}
3023
d507d735 3024static int gen8_gmch_probe(struct i915_ggtt *ggtt)
63340133 3025{
97d6d7ab
CW
3026 struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev);
3027 struct pci_dev *pdev = dev_priv->drm.pdev;
34c998b4 3028 unsigned int size;
63340133 3029 u16 snb_gmch_ctl;
63340133
BW
3030
3031 /* TODO: We're not aware of mappable constraints on gen8 yet */
97d6d7ab
CW
3032 ggtt->mappable_base = pci_resource_start(pdev, 2);
3033 ggtt->mappable_end = pci_resource_len(pdev, 2);
63340133 3034
97d6d7ab
CW
3035 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(39)))
3036 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
63340133 3037
97d6d7ab 3038 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
63340133 3039
97d6d7ab 3040 if (INTEL_GEN(dev_priv) >= 9) {
d507d735 3041 ggtt->stolen_size = gen9_get_stolen_size(snb_gmch_ctl);
34c998b4 3042 size = gen8_get_total_gtt_size(snb_gmch_ctl);
97d6d7ab 3043 } else if (IS_CHERRYVIEW(dev_priv)) {
d507d735 3044 ggtt->stolen_size = chv_get_stolen_size(snb_gmch_ctl);
34c998b4 3045 size = chv_get_total_gtt_size(snb_gmch_ctl);
d7f25f23 3046 } else {
d507d735 3047 ggtt->stolen_size = gen8_get_stolen_size(snb_gmch_ctl);
34c998b4 3048 size = gen8_get_total_gtt_size(snb_gmch_ctl);
d7f25f23 3049 }
63340133 3050
34c998b4 3051 ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
63340133 3052
97d6d7ab 3053 if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
ee0ce478
VS
3054 chv_setup_private_ppat(dev_priv);
3055 else
3056 bdw_setup_private_ppat(dev_priv);
fbe5d36e 3057
34c998b4 3058 ggtt->base.cleanup = gen6_gmch_remove;
d507d735
JL
3059 ggtt->base.bind_vma = ggtt_bind_vma;
3060 ggtt->base.unbind_vma = ggtt_unbind_vma;
d6473f56 3061 ggtt->base.insert_page = gen8_ggtt_insert_page;
f7770bfd 3062 ggtt->base.clear_range = nop_clear_range;
48f112fe 3063 if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
f7770bfd
CW
3064 ggtt->base.clear_range = gen8_ggtt_clear_range;
3065
3066 ggtt->base.insert_entries = gen8_ggtt_insert_entries;
3067 if (IS_CHERRYVIEW(dev_priv))
3068 ggtt->base.insert_entries = gen8_ggtt_insert_entries__BKL;
3069
34c998b4 3070 return ggtt_probe_common(ggtt, size);
63340133
BW
3071}
3072
d507d735 3073static int gen6_gmch_probe(struct i915_ggtt *ggtt)
e76e9aeb 3074{
97d6d7ab
CW
3075 struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev);
3076 struct pci_dev *pdev = dev_priv->drm.pdev;
34c998b4 3077 unsigned int size;
e76e9aeb 3078 u16 snb_gmch_ctl;
e76e9aeb 3079
97d6d7ab
CW
3080 ggtt->mappable_base = pci_resource_start(pdev, 2);
3081 ggtt->mappable_end = pci_resource_len(pdev, 2);
41907ddc 3082
baa09f5f
BW
3083 /* 64/512MB is the current min/max we actually know of, but this is just
3084 * a coarse sanity check.
e76e9aeb 3085 */
34c998b4 3086 if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
d507d735 3087 DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt->mappable_end);
baa09f5f 3088 return -ENXIO;
e76e9aeb
BW
3089 }
3090
97d6d7ab
CW
3091 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(40)))
3092 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
3093 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
e76e9aeb 3094
d507d735 3095 ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
e76e9aeb 3096
34c998b4
CW
3097 size = gen6_get_total_gtt_size(snb_gmch_ctl);
3098 ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
e76e9aeb 3099
d507d735 3100 ggtt->base.clear_range = gen6_ggtt_clear_range;
d6473f56 3101 ggtt->base.insert_page = gen6_ggtt_insert_page;
d507d735
JL
3102 ggtt->base.insert_entries = gen6_ggtt_insert_entries;
3103 ggtt->base.bind_vma = ggtt_bind_vma;
3104 ggtt->base.unbind_vma = ggtt_unbind_vma;
34c998b4
CW
3105 ggtt->base.cleanup = gen6_gmch_remove;
3106
3107 if (HAS_EDRAM(dev_priv))
3108 ggtt->base.pte_encode = iris_pte_encode;
3109 else if (IS_HASWELL(dev_priv))
3110 ggtt->base.pte_encode = hsw_pte_encode;
3111 else if (IS_VALLEYVIEW(dev_priv))
3112 ggtt->base.pte_encode = byt_pte_encode;
3113 else if (INTEL_GEN(dev_priv) >= 7)
3114 ggtt->base.pte_encode = ivb_pte_encode;
3115 else
3116 ggtt->base.pte_encode = snb_pte_encode;
7faf1ab2 3117
34c998b4 3118 return ggtt_probe_common(ggtt, size);
e76e9aeb
BW
3119}
3120
34c998b4 3121static void i915_gmch_remove(struct i915_address_space *vm)
e76e9aeb 3122{
34c998b4 3123 intel_gmch_remove();
644ec02b 3124}
baa09f5f 3125
d507d735 3126static int i915_gmch_probe(struct i915_ggtt *ggtt)
baa09f5f 3127{
97d6d7ab 3128 struct drm_i915_private *dev_priv = to_i915(ggtt->base.dev);
baa09f5f
BW
3129 int ret;
3130
91c8a326 3131 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
baa09f5f
BW
3132 if (!ret) {
3133 DRM_ERROR("failed to set up gmch\n");
3134 return -EIO;
3135 }
3136
d507d735
JL
3137 intel_gtt_get(&ggtt->base.total, &ggtt->stolen_size,
3138 &ggtt->mappable_base, &ggtt->mappable_end);
baa09f5f 3139
97d6d7ab 3140 ggtt->do_idle_maps = needs_idle_maps(dev_priv);
d6473f56 3141 ggtt->base.insert_page = i915_ggtt_insert_page;
d507d735
JL
3142 ggtt->base.insert_entries = i915_ggtt_insert_entries;
3143 ggtt->base.clear_range = i915_ggtt_clear_range;
3144 ggtt->base.bind_vma = ggtt_bind_vma;
3145 ggtt->base.unbind_vma = ggtt_unbind_vma;
34c998b4 3146 ggtt->base.cleanup = i915_gmch_remove;
baa09f5f 3147
d507d735 3148 if (unlikely(ggtt->do_idle_maps))
c0a7f818
CW
3149 DRM_INFO("applying Ironlake quirks for intel_iommu\n");
3150
baa09f5f
BW
3151 return 0;
3152}
3153
d85489d3 3154/**
0088e522 3155 * i915_ggtt_probe_hw - Probe GGTT hardware location
97d6d7ab 3156 * @dev_priv: i915 device
d85489d3 3157 */
97d6d7ab 3158int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
baa09f5f 3159{
62106b4f 3160 struct i915_ggtt *ggtt = &dev_priv->ggtt;
baa09f5f
BW
3161 int ret;
3162
97d6d7ab 3163 ggtt->base.dev = &dev_priv->drm;
c114f76a 3164
34c998b4
CW
3165 if (INTEL_GEN(dev_priv) <= 5)
3166 ret = i915_gmch_probe(ggtt);
3167 else if (INTEL_GEN(dev_priv) < 8)
3168 ret = gen6_gmch_probe(ggtt);
3169 else
3170 ret = gen8_gmch_probe(ggtt);
a54c0c27 3171 if (ret)
baa09f5f 3172 return ret;
baa09f5f 3173
c890e2d5
CW
3174 if ((ggtt->base.total - 1) >> 32) {
3175 DRM_ERROR("We never expected a Global GTT with more than 32bits"
f6b9d5ca 3176 " of address space! Found %lldM!\n",
c890e2d5
CW
3177 ggtt->base.total >> 20);
3178 ggtt->base.total = 1ULL << 32;
3179 ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
3180 }
3181
f6b9d5ca
CW
3182 if (ggtt->mappable_end > ggtt->base.total) {
3183 DRM_ERROR("mappable aperture extends past end of GGTT,"
3184 " aperture=%llx, total=%llx\n",
3185 ggtt->mappable_end, ggtt->base.total);
3186 ggtt->mappable_end = ggtt->base.total;
3187 }
3188
baa09f5f 3189 /* GMADR is the PCI mmio aperture into the global GTT. */
c44ef60e 3190 DRM_INFO("Memory usable by graphics device = %lluM\n",
62106b4f
JL
3191 ggtt->base.total >> 20);
3192 DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
3193 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", ggtt->stolen_size >> 20);
5db6c735
DV
3194#ifdef CONFIG_INTEL_IOMMU
3195 if (intel_iommu_gfx_mapped)
3196 DRM_INFO("VT-d active for gfx access\n");
3197#endif
baa09f5f
BW
3198
3199 return 0;
0088e522
CW
3200}
3201
3202/**
3203 * i915_ggtt_init_hw - Initialize GGTT hardware
97d6d7ab 3204 * @dev_priv: i915 device
0088e522 3205 */
97d6d7ab 3206int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
0088e522 3207{
0088e522
CW
3208 struct i915_ggtt *ggtt = &dev_priv->ggtt;
3209 int ret;
3210
f6b9d5ca
CW
3211 INIT_LIST_HEAD(&dev_priv->vm_list);
3212
3213 /* Subtract the guard page before address space initialization to
3214 * shrink the range used by drm_mm.
3215 */
3216 ggtt->base.total -= PAGE_SIZE;
3217 i915_address_space_init(&ggtt->base, dev_priv);
3218 ggtt->base.total += PAGE_SIZE;
3219 if (!HAS_LLC(dev_priv))
3220 ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
3221
f7bbe788
CW
3222 if (!io_mapping_init_wc(&dev_priv->ggtt.mappable,
3223 dev_priv->ggtt.mappable_base,
3224 dev_priv->ggtt.mappable_end)) {
f6b9d5ca
CW
3225 ret = -EIO;
3226 goto out_gtt_cleanup;
3227 }
3228
3229 ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, ggtt->mappable_end);
3230
0088e522
CW
3231 /*
3232 * Initialise stolen early so that we may reserve preallocated
3233 * objects for the BIOS to KMS transition.
3234 */
97d6d7ab 3235 ret = i915_gem_init_stolen(&dev_priv->drm);
0088e522
CW
3236 if (ret)
3237 goto out_gtt_cleanup;
3238
3239 return 0;
a4eba47b
ID
3240
3241out_gtt_cleanup:
72e96d64 3242 ggtt->base.cleanup(&ggtt->base);
a4eba47b 3243 return ret;
baa09f5f 3244}
6f65e29a 3245
97d6d7ab 3246int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
ac840ae5 3247{
97d6d7ab 3248 if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt())
ac840ae5
VS
3249 return -EIO;
3250
3251 return 0;
3252}
3253
fa42331b
DV
3254void i915_gem_restore_gtt_mappings(struct drm_device *dev)
3255{
72e96d64
JL
3256 struct drm_i915_private *dev_priv = to_i915(dev);
3257 struct i915_ggtt *ggtt = &dev_priv->ggtt;
fbb30a5c 3258 struct drm_i915_gem_object *obj, *on;
fa42331b 3259
dc97997a 3260 i915_check_and_clear_faults(dev_priv);
fa42331b
DV
3261
3262 /* First fill our portion of the GTT with scratch pages */
72e96d64
JL
3263 ggtt->base.clear_range(&ggtt->base, ggtt->base.start, ggtt->base.total,
3264 true);
fa42331b 3265
fbb30a5c
CW
3266 ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
3267
3268 /* clflush objects bound into the GGTT and rebind them. */
3269 list_for_each_entry_safe(obj, on,
3270 &dev_priv->mm.bound_list, global_list) {
3271 bool ggtt_bound = false;
3272 struct i915_vma *vma;
3273
1c7f4bca 3274 list_for_each_entry(vma, &obj->vma_list, obj_link) {
72e96d64 3275 if (vma->vm != &ggtt->base)
2c3d9984 3276 continue;
fa42331b 3277
fbb30a5c
CW
3278 if (!i915_vma_unbind(vma))
3279 continue;
3280
2c3d9984
TU
3281 WARN_ON(i915_vma_bind(vma, obj->cache_level,
3282 PIN_UPDATE));
fbb30a5c 3283 ggtt_bound = true;
2c3d9984
TU
3284 }
3285
fbb30a5c 3286 if (ggtt_bound)
975f7ff4 3287 WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
2c3d9984 3288 }
fa42331b 3289
fbb30a5c
CW
3290 ggtt->base.closed = false;
3291
fa42331b
DV
3292 if (INTEL_INFO(dev)->gen >= 8) {
3293 if (IS_CHERRYVIEW(dev) || IS_BROXTON(dev))
3294 chv_setup_private_ppat(dev_priv);
3295 else
3296 bdw_setup_private_ppat(dev_priv);
3297
3298 return;
3299 }
3300
3301 if (USES_PPGTT(dev)) {
72e96d64
JL
3302 struct i915_address_space *vm;
3303
fa42331b
DV
3304 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
3305 /* TODO: Perhaps it shouldn't be gen6 specific */
3306
e5716f55 3307 struct i915_hw_ppgtt *ppgtt;
fa42331b 3308
2bfa996e 3309 if (i915_is_ggtt(vm))
fa42331b 3310 ppgtt = dev_priv->mm.aliasing_ppgtt;
e5716f55
JL
3311 else
3312 ppgtt = i915_vm_to_ppgtt(vm);
fa42331b
DV
3313
3314 gen6_write_page_range(dev_priv, &ppgtt->pd,
3315 0, ppgtt->base.total);
3316 }
3317 }
3318
3319 i915_ggtt_flush(dev_priv);
3320}
3321
b0decaf7
CW
3322static void
3323i915_vma_retire(struct i915_gem_active *active,
3324 struct drm_i915_gem_request *rq)
3325{
3326 const unsigned int idx = rq->engine->id;
3327 struct i915_vma *vma =
3328 container_of(active, struct i915_vma, last_read[idx]);
3329
3330 GEM_BUG_ON(!i915_vma_has_active_engine(vma, idx));
3331
3332 i915_vma_clear_active(vma, idx);
3333 if (i915_vma_is_active(vma))
3334 return;
3335
3336 list_move_tail(&vma->vm_link, &vma->vm->inactive_list);
3272db53 3337 if (unlikely(i915_vma_is_closed(vma) && !i915_vma_is_pinned(vma)))
b1f788c6
CW
3338 WARN_ON(i915_vma_unbind(vma));
3339}
3340
3341void i915_vma_destroy(struct i915_vma *vma)
3342{
3343 GEM_BUG_ON(vma->node.allocated);
3344 GEM_BUG_ON(i915_vma_is_active(vma));
3272db53 3345 GEM_BUG_ON(!i915_vma_is_closed(vma));
49ef5294 3346 GEM_BUG_ON(vma->fence);
b1f788c6
CW
3347
3348 list_del(&vma->vm_link);
3272db53 3349 if (!i915_vma_is_ggtt(vma))
b1f788c6
CW
3350 i915_ppgtt_put(i915_vm_to_ppgtt(vma->vm));
3351
3352 kmem_cache_free(to_i915(vma->obj->base.dev)->vmas, vma);
3353}
3354
3355void i915_vma_close(struct i915_vma *vma)
3356{
3272db53
CW
3357 GEM_BUG_ON(i915_vma_is_closed(vma));
3358 vma->flags |= I915_VMA_CLOSED;
b1f788c6
CW
3359
3360 list_del_init(&vma->obj_link);
20dfbde4 3361 if (!i915_vma_is_active(vma) && !i915_vma_is_pinned(vma))
df0e9a28 3362 WARN_ON(i915_vma_unbind(vma));
b0decaf7
CW
3363}
3364
ec7adb6e 3365static struct i915_vma *
058d88c4
CW
3366__i915_vma_create(struct drm_i915_gem_object *obj,
3367 struct i915_address_space *vm,
3368 const struct i915_ggtt_view *view)
6f65e29a 3369{
dabde5c7 3370 struct i915_vma *vma;
b0decaf7 3371 int i;
6f65e29a 3372
50e046b6
CW
3373 GEM_BUG_ON(vm->closed);
3374
e20d2ab7 3375 vma = kmem_cache_zalloc(to_i915(obj->base.dev)->vmas, GFP_KERNEL);
dabde5c7
DC
3376 if (vma == NULL)
3377 return ERR_PTR(-ENOMEM);
ec7adb6e 3378
6f65e29a 3379 INIT_LIST_HEAD(&vma->exec_list);
b0decaf7
CW
3380 for (i = 0; i < ARRAY_SIZE(vma->last_read); i++)
3381 init_request_active(&vma->last_read[i], i915_vma_retire);
49ef5294 3382 init_request_active(&vma->last_fence, NULL);
50e046b6 3383 list_add(&vma->vm_link, &vm->unbound_list);
6f65e29a
BW
3384 vma->vm = vm;
3385 vma->obj = obj;
de180033 3386 vma->size = obj->base.size;
6f65e29a 3387
058d88c4 3388 if (view) {
de180033
CW
3389 vma->ggtt_view = *view;
3390 if (view->type == I915_GGTT_VIEW_PARTIAL) {
3391 vma->size = view->params.partial.size;
3392 vma->size <<= PAGE_SHIFT;
3393 } else if (view->type == I915_GGTT_VIEW_ROTATED) {
3394 vma->size =
3395 intel_rotation_info_size(&view->params.rotated);
3396 vma->size <<= PAGE_SHIFT;
3397 }
058d88c4
CW
3398 }
3399
3400 if (i915_is_ggtt(vm)) {
3401 vma->flags |= I915_VMA_GGTT;
de180033 3402 } else {
596c5923 3403 i915_ppgtt_get(i915_vm_to_ppgtt(vm));
de180033 3404 }
6f65e29a 3405
1c7f4bca 3406 list_add_tail(&vma->obj_link, &obj->vma_list);
6f65e29a
BW
3407 return vma;
3408}
3409
058d88c4
CW
3410static inline bool vma_matches(struct i915_vma *vma,
3411 struct i915_address_space *vm,
3412 const struct i915_ggtt_view *view)
3413{
3414 if (vma->vm != vm)
3415 return false;
3416
3417 if (!i915_vma_is_ggtt(vma))
3418 return true;
3419
3420 if (!view)
3421 return vma->ggtt_view.type == 0;
3422
3423 if (vma->ggtt_view.type != view->type)
3424 return false;
3425
3426 return memcmp(&vma->ggtt_view.params,
3427 &view->params,
3428 sizeof(view->params)) == 0;
3429}
3430
81a8aa4a
CW
3431struct i915_vma *
3432i915_vma_create(struct drm_i915_gem_object *obj,
3433 struct i915_address_space *vm,
3434 const struct i915_ggtt_view *view)
3435{
3436 GEM_BUG_ON(view && !i915_is_ggtt(vm));
058d88c4 3437 GEM_BUG_ON(i915_gem_obj_to_vma(obj, vm, view));
81a8aa4a 3438
058d88c4 3439 return __i915_vma_create(obj, vm, view);
81a8aa4a
CW
3440}
3441
6f65e29a 3442struct i915_vma *
058d88c4
CW
3443i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
3444 struct i915_address_space *vm,
3445 const struct i915_ggtt_view *view)
ec7adb6e
JL
3446{
3447 struct i915_vma *vma;
3448
058d88c4
CW
3449 list_for_each_entry_reverse(vma, &obj->vma_list, obj_link)
3450 if (vma_matches(vma, vm, view))
3451 return vma;
ec7adb6e 3452
058d88c4 3453 return NULL;
ec7adb6e
JL
3454}
3455
3456struct i915_vma *
058d88c4
CW
3457i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,
3458 struct i915_address_space *vm,
3459 const struct i915_ggtt_view *view)
6f65e29a 3460{
058d88c4 3461 struct i915_vma *vma;
ec7adb6e 3462
058d88c4 3463 GEM_BUG_ON(view && !i915_is_ggtt(vm));
de895082 3464
058d88c4 3465 vma = i915_gem_obj_to_vma(obj, vm, view);
6f65e29a 3466 if (!vma)
058d88c4 3467 vma = __i915_vma_create(obj, vm, view);
6f65e29a 3468
3272db53 3469 GEM_BUG_ON(i915_vma_is_closed(vma));
6f65e29a
BW
3470 return vma;
3471}
fe14d5f4 3472
804beb4b 3473static struct scatterlist *
2d7f3bdb 3474rotate_pages(const dma_addr_t *in, unsigned int offset,
804beb4b 3475 unsigned int width, unsigned int height,
87130255 3476 unsigned int stride,
804beb4b 3477 struct sg_table *st, struct scatterlist *sg)
50470bb0
TU
3478{
3479 unsigned int column, row;
3480 unsigned int src_idx;
50470bb0 3481
50470bb0 3482 for (column = 0; column < width; column++) {
87130255 3483 src_idx = stride * (height - 1) + column;
50470bb0
TU
3484 for (row = 0; row < height; row++) {
3485 st->nents++;
3486 /* We don't need the pages, but need to initialize
3487 * the entries so the sg list can be happily traversed.
3488 * The only thing we need are DMA addresses.
3489 */
3490 sg_set_page(sg, NULL, PAGE_SIZE, 0);
804beb4b 3491 sg_dma_address(sg) = in[offset + src_idx];
50470bb0
TU
3492 sg_dma_len(sg) = PAGE_SIZE;
3493 sg = sg_next(sg);
87130255 3494 src_idx -= stride;
50470bb0
TU
3495 }
3496 }
804beb4b
TU
3497
3498 return sg;
50470bb0
TU
3499}
3500
3501static struct sg_table *
6687c906 3502intel_rotate_fb_obj_pages(const struct intel_rotation_info *rot_info,
50470bb0
TU
3503 struct drm_i915_gem_object *obj)
3504{
85d1225e 3505 const size_t n_pages = obj->base.size / PAGE_SIZE;
6687c906 3506 unsigned int size = intel_rotation_info_size(rot_info);
85d1225e
DG
3507 struct sgt_iter sgt_iter;
3508 dma_addr_t dma_addr;
50470bb0
TU
3509 unsigned long i;
3510 dma_addr_t *page_addr_list;
3511 struct sg_table *st;
89e3e142 3512 struct scatterlist *sg;
1d00dad5 3513 int ret = -ENOMEM;
50470bb0 3514
50470bb0 3515 /* Allocate a temporary list of source pages for random access. */
85d1225e 3516 page_addr_list = drm_malloc_gfp(n_pages,
f2a85e19
CW
3517 sizeof(dma_addr_t),
3518 GFP_TEMPORARY);
50470bb0
TU
3519 if (!page_addr_list)
3520 return ERR_PTR(ret);
3521
3522 /* Allocate target SG list. */
3523 st = kmalloc(sizeof(*st), GFP_KERNEL);
3524 if (!st)
3525 goto err_st_alloc;
3526
6687c906 3527 ret = sg_alloc_table(st, size, GFP_KERNEL);
50470bb0
TU
3528 if (ret)
3529 goto err_sg_alloc;
3530
3531 /* Populate source page list from the object. */
3532 i = 0;
85d1225e
DG
3533 for_each_sgt_dma(dma_addr, sgt_iter, obj->pages)
3534 page_addr_list[i++] = dma_addr;
50470bb0 3535
85d1225e 3536 GEM_BUG_ON(i != n_pages);
11f20322
VS
3537 st->nents = 0;
3538 sg = st->sgl;
3539
6687c906
VS
3540 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
3541 sg = rotate_pages(page_addr_list, rot_info->plane[i].offset,
3542 rot_info->plane[i].width, rot_info->plane[i].height,
3543 rot_info->plane[i].stride, st, sg);
89e3e142
TU
3544 }
3545
6687c906
VS
3546 DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n",
3547 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
50470bb0
TU
3548
3549 drm_free_large(page_addr_list);
3550
3551 return st;
3552
3553err_sg_alloc:
3554 kfree(st);
3555err_st_alloc:
3556 drm_free_large(page_addr_list);
3557
6687c906
VS
3558 DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
3559 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
3560
50470bb0
TU
3561 return ERR_PTR(ret);
3562}
ec7adb6e 3563
8bd7ef16
JL
3564static struct sg_table *
3565intel_partial_pages(const struct i915_ggtt_view *view,
3566 struct drm_i915_gem_object *obj)
3567{
3568 struct sg_table *st;
3569 struct scatterlist *sg;
3570 struct sg_page_iter obj_sg_iter;
3571 int ret = -ENOMEM;
3572
3573 st = kmalloc(sizeof(*st), GFP_KERNEL);
3574 if (!st)
3575 goto err_st_alloc;
3576
3577 ret = sg_alloc_table(st, view->params.partial.size, GFP_KERNEL);
3578 if (ret)
3579 goto err_sg_alloc;
3580
3581 sg = st->sgl;
3582 st->nents = 0;
3583 for_each_sg_page(obj->pages->sgl, &obj_sg_iter, obj->pages->nents,
3584 view->params.partial.offset)
3585 {
3586 if (st->nents >= view->params.partial.size)
3587 break;
3588
3589 sg_set_page(sg, NULL, PAGE_SIZE, 0);
3590 sg_dma_address(sg) = sg_page_iter_dma_address(&obj_sg_iter);
3591 sg_dma_len(sg) = PAGE_SIZE;
3592
3593 sg = sg_next(sg);
3594 st->nents++;
3595 }
3596
3597 return st;
3598
3599err_sg_alloc:
3600 kfree(st);
3601err_st_alloc:
3602 return ERR_PTR(ret);
3603}
3604
70b9f6f8 3605static int
50470bb0 3606i915_get_ggtt_vma_pages(struct i915_vma *vma)
fe14d5f4 3607{
50470bb0
TU
3608 int ret = 0;
3609
247177dd 3610 if (vma->pages)
fe14d5f4
TU
3611 return 0;
3612
3613 if (vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
247177dd 3614 vma->pages = vma->obj->pages;
50470bb0 3615 else if (vma->ggtt_view.type == I915_GGTT_VIEW_ROTATED)
247177dd 3616 vma->pages =
11d23e6f 3617 intel_rotate_fb_obj_pages(&vma->ggtt_view.params.rotated, vma->obj);
8bd7ef16 3618 else if (vma->ggtt_view.type == I915_GGTT_VIEW_PARTIAL)
247177dd 3619 vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
fe14d5f4
TU
3620 else
3621 WARN_ONCE(1, "GGTT view %u not implemented!\n",
3622 vma->ggtt_view.type);
3623
247177dd 3624 if (!vma->pages) {
ec7adb6e 3625 DRM_ERROR("Failed to get pages for GGTT view type %u!\n",
fe14d5f4 3626 vma->ggtt_view.type);
50470bb0 3627 ret = -EINVAL;
247177dd
CW
3628 } else if (IS_ERR(vma->pages)) {
3629 ret = PTR_ERR(vma->pages);
3630 vma->pages = NULL;
50470bb0
TU
3631 DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
3632 vma->ggtt_view.type, ret);
fe14d5f4
TU
3633 }
3634
50470bb0 3635 return ret;
fe14d5f4
TU
3636}
3637
3638/**
3639 * i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
3640 * @vma: VMA to map
3641 * @cache_level: mapping cache level
3642 * @flags: flags like global or local mapping
3643 *
3644 * DMA addresses are taken from the scatter-gather table of this object (or of
3645 * this VMA in case of non-default GGTT views) and PTE entries set up.
3646 * Note that DMA addresses are also the only part of the SG table we care about.
3647 */
3648int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
3649 u32 flags)
3650{
75d04a37 3651 u32 bind_flags;
3272db53
CW
3652 u32 vma_flags;
3653 int ret;
1d335d1b 3654
75d04a37
MK
3655 if (WARN_ON(flags == 0))
3656 return -EINVAL;
1d335d1b 3657
75d04a37 3658 bind_flags = 0;
0875546c 3659 if (flags & PIN_GLOBAL)
3272db53 3660 bind_flags |= I915_VMA_GLOBAL_BIND;
0875546c 3661 if (flags & PIN_USER)
3272db53 3662 bind_flags |= I915_VMA_LOCAL_BIND;
0875546c 3663
3272db53 3664 vma_flags = vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND);
0875546c 3665 if (flags & PIN_UPDATE)
3272db53 3666 bind_flags |= vma_flags;
0875546c 3667 else
3272db53 3668 bind_flags &= ~vma_flags;
75d04a37
MK
3669 if (bind_flags == 0)
3670 return 0;
3671
3272db53 3672 if (vma_flags == 0 && vma->vm->allocate_va_range) {
596c5923 3673 trace_i915_va_alloc(vma);
75d04a37
MK
3674 ret = vma->vm->allocate_va_range(vma->vm,
3675 vma->node.start,
3676 vma->node.size);
3677 if (ret)
3678 return ret;
3679 }
3680
3681 ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
70b9f6f8
DV
3682 if (ret)
3683 return ret;
0875546c 3684
3272db53 3685 vma->flags |= bind_flags;
fe14d5f4
TU
3686 return 0;
3687}
91e6711e 3688
8ef8561f
CW
3689void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
3690{
3691 void __iomem *ptr;
3692
e5cdb22b
CW
3693 /* Access through the GTT requires the device to be awake. */
3694 assert_rpm_wakelock_held(to_i915(vma->vm->dev));
3695
8ef8561f 3696 lockdep_assert_held(&vma->vm->dev->struct_mutex);
05a20d09 3697 if (WARN_ON(!i915_vma_is_map_and_fenceable(vma)))
406ea8d2 3698 return IO_ERR_PTR(-ENODEV);
8ef8561f 3699
3272db53
CW
3700 GEM_BUG_ON(!i915_vma_is_ggtt(vma));
3701 GEM_BUG_ON((vma->flags & I915_VMA_GLOBAL_BIND) == 0);
8ef8561f
CW
3702
3703 ptr = vma->iomap;
3704 if (ptr == NULL) {
f7bbe788 3705 ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
8ef8561f
CW
3706 vma->node.start,
3707 vma->node.size);
3708 if (ptr == NULL)
406ea8d2 3709 return IO_ERR_PTR(-ENOMEM);
8ef8561f
CW
3710
3711 vma->iomap = ptr;
3712 }
3713
20dfbde4 3714 __i915_vma_pin(vma);
8ef8561f
CW
3715 return ptr;
3716}
19880c4a
CW
3717
3718void i915_vma_unpin_and_release(struct i915_vma **p_vma)
3719{
3720 struct i915_vma *vma;
3721
3722 vma = fetch_and_zero(p_vma);
3723 if (!vma)
3724 return;
3725
3726 i915_vma_unpin(vma);
3727 i915_vma_put(vma);
3728}