]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/i915/i915_gem_gtt.c
drm/i915/sdvo: Check error return from intel_sdvo_get_value()
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / i915_gem_gtt.c
CommitLineData
76aaf220
DV
1/*
2 * Copyright © 2010 Daniel Vetter
c4ac524c 3 * Copyright © 2011-2014 Intel Corporation
76aaf220
DV
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 *
24 */
25
aae4a3d8
CW
26#include <linux/slab.h> /* fault-inject.h is not standalone! */
27
28#include <linux/fault-inject.h>
e007b19d 29#include <linux/log2.h>
606fec95 30#include <linux/random.h>
0e46ce2e 31#include <linux/seq_file.h>
5bab6f60 32#include <linux/stop_machine.h>
e007b19d 33
760285e7
DH
34#include <drm/drmP.h>
35#include <drm/i915_drm.h>
e007b19d 36
76aaf220 37#include "i915_drv.h"
5dda8fa3 38#include "i915_vgpu.h"
76aaf220
DV
39#include "i915_trace.h"
40#include "intel_drv.h"
d07f0e59 41#include "intel_frontbuffer.h"
76aaf220 42
bb8f9cff
CW
43#define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM)
44
45f8f69a
TU
45/**
46 * DOC: Global GTT views
47 *
48 * Background and previous state
49 *
50 * Historically objects could exists (be bound) in global GTT space only as
51 * singular instances with a view representing all of the object's backing pages
52 * in a linear fashion. This view will be called a normal view.
53 *
54 * To support multiple views of the same object, where the number of mapped
55 * pages is not equal to the backing store, or where the layout of the pages
56 * is not linear, concept of a GGTT view was added.
57 *
58 * One example of an alternative view is a stereo display driven by a single
59 * image. In this case we would have a framebuffer looking like this
60 * (2x2 pages):
61 *
62 * 12
63 * 34
64 *
65 * Above would represent a normal GGTT view as normally mapped for GPU or CPU
66 * rendering. In contrast, fed to the display engine would be an alternative
67 * view which could look something like this:
68 *
69 * 1212
70 * 3434
71 *
72 * In this example both the size and layout of pages in the alternative view is
73 * different from the normal view.
74 *
75 * Implementation and usage
76 *
77 * GGTT views are implemented using VMAs and are distinguished via enum
78 * i915_ggtt_view_type and struct i915_ggtt_view.
79 *
80 * A new flavour of core GEM functions which work with GGTT bound objects were
ec7adb6e
JL
81 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
82 * renaming in large amounts of code. They take the struct i915_ggtt_view
83 * parameter encapsulating all metadata required to implement a view.
45f8f69a
TU
84 *
85 * As a helper for callers which are only interested in the normal view,
86 * globally const i915_ggtt_view_normal singleton instance exists. All old core
87 * GEM API functions, the ones not taking the view parameter, are operating on,
88 * or with the normal GGTT view.
89 *
90 * Code wanting to add or use a new GGTT view needs to:
91 *
92 * 1. Add a new enum with a suitable name.
93 * 2. Extend the metadata in the i915_ggtt_view structure if required.
94 * 3. Add support to i915_get_vma_pages().
95 *
96 * New views are required to build a scatter-gather table from within the
97 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
98 * exists for the lifetime of an VMA.
99 *
100 * Core API is designed to have copy semantics which means that passed in
101 * struct i915_ggtt_view does not need to be persistent (left around after
102 * calling the core API functions).
103 *
104 */
105
70b9f6f8
DV
106static int
107i915_get_ggtt_vma_pages(struct i915_vma *vma);
108
7c3f86b6
CW
109static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv)
110{
111 /* Note that as an uncached mmio write, this should flush the
112 * WCB of the writes into the GGTT before it triggers the invalidate.
113 */
114 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
115}
116
117static void guc_ggtt_invalidate(struct drm_i915_private *dev_priv)
118{
119 gen6_ggtt_invalidate(dev_priv);
120 I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE);
121}
122
123static void gmch_ggtt_invalidate(struct drm_i915_private *dev_priv)
124{
125 intel_gtt_chipset_flush();
126}
127
128static inline void i915_ggtt_invalidate(struct drm_i915_private *i915)
129{
130 i915->ggtt.invalidate(i915);
131}
132
c033666a
CW
133int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv,
134 int enable_ppgtt)
cfa7c862 135{
1893a71b
CW
136 bool has_aliasing_ppgtt;
137 bool has_full_ppgtt;
1f9a99e0 138 bool has_full_48bit_ppgtt;
1893a71b 139
9e1d0e60
MT
140 has_aliasing_ppgtt = dev_priv->info.has_aliasing_ppgtt;
141 has_full_ppgtt = dev_priv->info.has_full_ppgtt;
142 has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt;
1893a71b 143
e320d400
ZW
144 if (intel_vgpu_active(dev_priv)) {
145 /* emulation is too hard */
146 has_full_ppgtt = false;
147 has_full_48bit_ppgtt = false;
148 }
71ba2d64 149
0e4ca100
CW
150 if (!has_aliasing_ppgtt)
151 return 0;
152
70ee45e1
DL
153 /*
154 * We don't allow disabling PPGTT for gen9+ as it's a requirement for
155 * execlists, the sole mechanism available to submit work.
156 */
c033666a 157 if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9)
cfa7c862
DV
158 return 0;
159
160 if (enable_ppgtt == 1)
161 return 1;
162
1893a71b 163 if (enable_ppgtt == 2 && has_full_ppgtt)
cfa7c862
DV
164 return 2;
165
1f9a99e0
MT
166 if (enable_ppgtt == 3 && has_full_48bit_ppgtt)
167 return 3;
168
93a25a9e
DV
169#ifdef CONFIG_INTEL_IOMMU
170 /* Disable ppgtt on SNB if VT-d is on. */
c033666a 171 if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped) {
93a25a9e 172 DRM_INFO("Disabling PPGTT because VT-d is on\n");
cfa7c862 173 return 0;
93a25a9e
DV
174 }
175#endif
176
62942ed7 177 /* Early VLV doesn't have this */
91c8a326 178 if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) {
62942ed7
JB
179 DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n");
180 return 0;
181 }
182
e320d400 183 if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists && has_full_ppgtt)
1f9a99e0 184 return has_full_48bit_ppgtt ? 3 : 2;
2f82bbdf
MT
185 else
186 return has_aliasing_ppgtt ? 1 : 0;
93a25a9e
DV
187}
188
70b9f6f8
DV
189static int ppgtt_bind_vma(struct i915_vma *vma,
190 enum i915_cache_level cache_level,
191 u32 unused)
47552659 192{
ff685975
CW
193 u32 pte_flags;
194 int ret;
195
ff685975
CW
196 ret = vma->vm->allocate_va_range(vma->vm, vma->node.start, vma->size);
197 if (ret)
198 return ret;
47552659 199
a4f5ea64 200 vma->pages = vma->obj->mm.pages;
247177dd 201
47552659 202 /* Currently applicable only to VLV */
ff685975 203 pte_flags = 0;
47552659
DV
204 if (vma->obj->gt_ro)
205 pte_flags |= PTE_READ_ONLY;
206
247177dd 207 vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
47552659 208 cache_level, pte_flags);
70b9f6f8
DV
209
210 return 0;
47552659
DV
211}
212
213static void ppgtt_unbind_vma(struct i915_vma *vma)
214{
ff685975 215 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
47552659 216}
6f65e29a 217
2c642b07 218static gen8_pte_t gen8_pte_encode(dma_addr_t addr,
4fb84d99 219 enum i915_cache_level level)
94ec8f61 220{
4fb84d99 221 gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW;
94ec8f61 222 pte |= addr;
63c42e56
BW
223
224 switch (level) {
225 case I915_CACHE_NONE:
fbe5d36e 226 pte |= PPAT_UNCACHED_INDEX;
63c42e56
BW
227 break;
228 case I915_CACHE_WT:
229 pte |= PPAT_DISPLAY_ELLC_INDEX;
230 break;
231 default:
232 pte |= PPAT_CACHED_INDEX;
233 break;
234 }
235
94ec8f61
BW
236 return pte;
237}
238
fe36f55d
MK
239static gen8_pde_t gen8_pde_encode(const dma_addr_t addr,
240 const enum i915_cache_level level)
b1fe6673 241{
07749ef3 242 gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW;
b1fe6673
BW
243 pde |= addr;
244 if (level != I915_CACHE_NONE)
245 pde |= PPAT_CACHED_PDE_INDEX;
246 else
247 pde |= PPAT_UNCACHED_INDEX;
248 return pde;
249}
250
762d9936
MT
251#define gen8_pdpe_encode gen8_pde_encode
252#define gen8_pml4e_encode gen8_pde_encode
253
07749ef3
MT
254static gen6_pte_t snb_pte_encode(dma_addr_t addr,
255 enum i915_cache_level level,
4fb84d99 256 u32 unused)
54d12527 257{
4fb84d99 258 gen6_pte_t pte = GEN6_PTE_VALID;
54d12527 259 pte |= GEN6_PTE_ADDR_ENCODE(addr);
e7210c3c
BW
260
261 switch (level) {
350ec881
CW
262 case I915_CACHE_L3_LLC:
263 case I915_CACHE_LLC:
264 pte |= GEN6_PTE_CACHE_LLC;
265 break;
266 case I915_CACHE_NONE:
267 pte |= GEN6_PTE_UNCACHED;
268 break;
269 default:
5f77eeb0 270 MISSING_CASE(level);
350ec881
CW
271 }
272
273 return pte;
274}
275
07749ef3
MT
276static gen6_pte_t ivb_pte_encode(dma_addr_t addr,
277 enum i915_cache_level level,
4fb84d99 278 u32 unused)
350ec881 279{
4fb84d99 280 gen6_pte_t pte = GEN6_PTE_VALID;
350ec881
CW
281 pte |= GEN6_PTE_ADDR_ENCODE(addr);
282
283 switch (level) {
284 case I915_CACHE_L3_LLC:
285 pte |= GEN7_PTE_CACHE_L3_LLC;
e7210c3c
BW
286 break;
287 case I915_CACHE_LLC:
288 pte |= GEN6_PTE_CACHE_LLC;
289 break;
290 case I915_CACHE_NONE:
9119708c 291 pte |= GEN6_PTE_UNCACHED;
e7210c3c
BW
292 break;
293 default:
5f77eeb0 294 MISSING_CASE(level);
e7210c3c
BW
295 }
296
54d12527
BW
297 return pte;
298}
299
07749ef3
MT
300static gen6_pte_t byt_pte_encode(dma_addr_t addr,
301 enum i915_cache_level level,
4fb84d99 302 u32 flags)
93c34e70 303{
4fb84d99 304 gen6_pte_t pte = GEN6_PTE_VALID;
93c34e70
KG
305 pte |= GEN6_PTE_ADDR_ENCODE(addr);
306
24f3a8cf
AG
307 if (!(flags & PTE_READ_ONLY))
308 pte |= BYT_PTE_WRITEABLE;
93c34e70
KG
309
310 if (level != I915_CACHE_NONE)
311 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
312
313 return pte;
314}
315
07749ef3
MT
316static gen6_pte_t hsw_pte_encode(dma_addr_t addr,
317 enum i915_cache_level level,
4fb84d99 318 u32 unused)
9119708c 319{
4fb84d99 320 gen6_pte_t pte = GEN6_PTE_VALID;
0d8ff15e 321 pte |= HSW_PTE_ADDR_ENCODE(addr);
9119708c
KG
322
323 if (level != I915_CACHE_NONE)
87a6b688 324 pte |= HSW_WB_LLC_AGE3;
9119708c
KG
325
326 return pte;
327}
328
07749ef3
MT
329static gen6_pte_t iris_pte_encode(dma_addr_t addr,
330 enum i915_cache_level level,
4fb84d99 331 u32 unused)
4d15c145 332{
4fb84d99 333 gen6_pte_t pte = GEN6_PTE_VALID;
4d15c145
BW
334 pte |= HSW_PTE_ADDR_ENCODE(addr);
335
651d794f
CW
336 switch (level) {
337 case I915_CACHE_NONE:
338 break;
339 case I915_CACHE_WT:
c51e9701 340 pte |= HSW_WT_ELLC_LLC_AGE3;
651d794f
CW
341 break;
342 default:
c51e9701 343 pte |= HSW_WB_ELLC_LLC_AGE3;
651d794f
CW
344 break;
345 }
4d15c145
BW
346
347 return pte;
348}
349
8448661d 350static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
678d96fb 351{
8448661d 352 struct page *page;
678d96fb 353
8448661d
CW
354 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
355 i915_gem_shrink_all(vm->i915);
aae4a3d8 356
8448661d
CW
357 if (vm->free_pages.nr)
358 return vm->free_pages.pages[--vm->free_pages.nr];
359
360 page = alloc_page(gfp);
361 if (!page)
362 return NULL;
363
364 if (vm->pt_kmap_wc)
365 set_pages_array_wc(&page, 1);
366
367 return page;
368}
369
370static void vm_free_pages_release(struct i915_address_space *vm)
371{
372 GEM_BUG_ON(!pagevec_count(&vm->free_pages));
373
374 if (vm->pt_kmap_wc)
375 set_pages_array_wb(vm->free_pages.pages,
376 pagevec_count(&vm->free_pages));
377
378 __pagevec_release(&vm->free_pages);
379}
380
381static void vm_free_page(struct i915_address_space *vm, struct page *page)
382{
383 if (!pagevec_add(&vm->free_pages, page))
384 vm_free_pages_release(vm);
385}
678d96fb 386
8448661d
CW
387static int __setup_page_dma(struct i915_address_space *vm,
388 struct i915_page_dma *p,
389 gfp_t gfp)
390{
391 p->page = vm_alloc_page(vm, gfp | __GFP_NOWARN | __GFP_NORETRY);
392 if (unlikely(!p->page))
393 return -ENOMEM;
678d96fb 394
8448661d
CW
395 p->daddr = dma_map_page(vm->dma, p->page, 0, PAGE_SIZE,
396 PCI_DMA_BIDIRECTIONAL);
397 if (unlikely(dma_mapping_error(vm->dma, p->daddr))) {
398 vm_free_page(vm, p->page);
399 return -ENOMEM;
44159ddb 400 }
1266cdb1
MT
401
402 return 0;
678d96fb
BW
403}
404
8448661d 405static int setup_page_dma(struct i915_address_space *vm,
275a991c 406 struct i915_page_dma *p)
c114f76a 407{
8448661d 408 return __setup_page_dma(vm, p, I915_GFP_DMA);
c114f76a
MK
409}
410
8448661d 411static void cleanup_page_dma(struct i915_address_space *vm,
275a991c 412 struct i915_page_dma *p)
06fda602 413{
8448661d
CW
414 dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
415 vm_free_page(vm, p->page);
44159ddb
MK
416}
417
9231da70 418#define kmap_atomic_px(px) kmap_atomic(px_base(px)->page)
d1c54acd 419
8448661d
CW
420#define setup_px(vm, px) setup_page_dma((vm), px_base(px))
421#define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px))
422#define fill_px(ppgtt, px, v) fill_page_dma((vm), px_base(px), (v))
423#define fill32_px(ppgtt, px, v) fill_page_dma_32((vm), px_base(px), (v))
567047be 424
8448661d
CW
425static void fill_page_dma(struct i915_address_space *vm,
426 struct i915_page_dma *p,
427 const u64 val)
d1c54acd 428{
9231da70 429 u64 * const vaddr = kmap_atomic(p->page);
d1c54acd 430 int i;
d1c54acd
MK
431
432 for (i = 0; i < 512; i++)
433 vaddr[i] = val;
434
9231da70 435 kunmap_atomic(vaddr);
d1c54acd
MK
436}
437
8448661d
CW
438static void fill_page_dma_32(struct i915_address_space *vm,
439 struct i915_page_dma *p,
440 const u32 v)
73eeea53 441{
8448661d 442 fill_page_dma(vm, p, (u64)v << 32 | v);
73eeea53
MK
443}
444
8bcdd0f7 445static int
8448661d 446setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
4ad2af1e 447{
8448661d 448 return __setup_page_dma(vm, &vm->scratch_page, gfp | __GFP_ZERO);
4ad2af1e
MK
449}
450
8448661d 451static void cleanup_scratch_page(struct i915_address_space *vm)
4ad2af1e 452{
8448661d 453 cleanup_page_dma(vm, &vm->scratch_page);
4ad2af1e
MK
454}
455
8448661d 456static struct i915_page_table *alloc_pt(struct i915_address_space *vm)
06fda602 457{
ec565b3c 458 struct i915_page_table *pt;
06fda602 459
dd19674b
CW
460 pt = kmalloc(sizeof(*pt), GFP_KERNEL | __GFP_NOWARN);
461 if (unlikely(!pt))
06fda602
BW
462 return ERR_PTR(-ENOMEM);
463
dd19674b
CW
464 if (unlikely(setup_px(vm, pt))) {
465 kfree(pt);
466 return ERR_PTR(-ENOMEM);
467 }
06fda602 468
dd19674b 469 pt->used_ptes = 0;
06fda602
BW
470 return pt;
471}
472
8448661d 473static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt)
06fda602 474{
8448661d 475 cleanup_px(vm, pt);
2e906bea
MK
476 kfree(pt);
477}
478
479static void gen8_initialize_pt(struct i915_address_space *vm,
480 struct i915_page_table *pt)
481{
dd19674b
CW
482 fill_px(vm, pt,
483 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC));
2e906bea
MK
484}
485
486static void gen6_initialize_pt(struct i915_address_space *vm,
487 struct i915_page_table *pt)
488{
dd19674b
CW
489 fill32_px(vm, pt,
490 vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0));
06fda602
BW
491}
492
8448661d 493static struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
06fda602 494{
ec565b3c 495 struct i915_page_directory *pd;
06fda602 496
fe52e37f
CW
497 pd = kzalloc(sizeof(*pd), GFP_KERNEL | __GFP_NOWARN);
498 if (unlikely(!pd))
06fda602
BW
499 return ERR_PTR(-ENOMEM);
500
fe52e37f
CW
501 if (unlikely(setup_px(vm, pd))) {
502 kfree(pd);
503 return ERR_PTR(-ENOMEM);
504 }
e5815a2e 505
fe52e37f 506 pd->used_pdes = 0;
06fda602
BW
507 return pd;
508}
509
8448661d 510static void free_pd(struct i915_address_space *vm,
275a991c 511 struct i915_page_directory *pd)
2e906bea 512{
fe52e37f
CW
513 cleanup_px(vm, pd);
514 kfree(pd);
2e906bea
MK
515}
516
517static void gen8_initialize_pd(struct i915_address_space *vm,
518 struct i915_page_directory *pd)
519{
dd19674b 520 unsigned int i;
2e906bea 521
dd19674b
CW
522 fill_px(vm, pd,
523 gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC));
524 for (i = 0; i < I915_PDES; i++)
525 pd->page_table[i] = vm->scratch_pt;
2e906bea
MK
526}
527
fe52e37f 528static int __pdp_init(struct i915_address_space *vm,
6ac18502
MT
529 struct i915_page_directory_pointer *pdp)
530{
3e490042 531 const unsigned int pdpes = i915_pdpes_per_pdp(vm);
e2b763ca 532 unsigned int i;
6ac18502 533
fe52e37f 534 pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory),
e2b763ca
CW
535 GFP_KERNEL | __GFP_NOWARN);
536 if (unlikely(!pdp->page_directory))
6ac18502 537 return -ENOMEM;
6ac18502 538
fe52e37f
CW
539 for (i = 0; i < pdpes; i++)
540 pdp->page_directory[i] = vm->scratch_pd;
541
6ac18502
MT
542 return 0;
543}
544
545static void __pdp_fini(struct i915_page_directory_pointer *pdp)
546{
6ac18502
MT
547 kfree(pdp->page_directory);
548 pdp->page_directory = NULL;
549}
550
1e6437b0
MK
551static inline bool use_4lvl(const struct i915_address_space *vm)
552{
553 return i915_vm_is_48bit(vm);
554}
555
8448661d
CW
556static struct i915_page_directory_pointer *
557alloc_pdp(struct i915_address_space *vm)
762d9936
MT
558{
559 struct i915_page_directory_pointer *pdp;
560 int ret = -ENOMEM;
561
1e6437b0 562 WARN_ON(!use_4lvl(vm));
762d9936
MT
563
564 pdp = kzalloc(sizeof(*pdp), GFP_KERNEL);
565 if (!pdp)
566 return ERR_PTR(-ENOMEM);
567
fe52e37f 568 ret = __pdp_init(vm, pdp);
762d9936
MT
569 if (ret)
570 goto fail_bitmap;
571
8448661d 572 ret = setup_px(vm, pdp);
762d9936
MT
573 if (ret)
574 goto fail_page_m;
575
576 return pdp;
577
578fail_page_m:
579 __pdp_fini(pdp);
580fail_bitmap:
581 kfree(pdp);
582
583 return ERR_PTR(ret);
584}
585
8448661d 586static void free_pdp(struct i915_address_space *vm,
6ac18502
MT
587 struct i915_page_directory_pointer *pdp)
588{
589 __pdp_fini(pdp);
1e6437b0
MK
590
591 if (!use_4lvl(vm))
592 return;
593
594 cleanup_px(vm, pdp);
595 kfree(pdp);
762d9936
MT
596}
597
69ab76fd
MT
598static void gen8_initialize_pdp(struct i915_address_space *vm,
599 struct i915_page_directory_pointer *pdp)
600{
601 gen8_ppgtt_pdpe_t scratch_pdpe;
602
603 scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC);
604
8448661d 605 fill_px(vm, pdp, scratch_pdpe);
69ab76fd
MT
606}
607
608static void gen8_initialize_pml4(struct i915_address_space *vm,
609 struct i915_pml4 *pml4)
610{
e2b763ca 611 unsigned int i;
762d9936 612
e2b763ca
CW
613 fill_px(vm, pml4,
614 gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC));
615 for (i = 0; i < GEN8_PML4ES_PER_PML4; i++)
616 pml4->pdps[i] = vm->scratch_pdp;
6ac18502
MT
617}
618
94e409c1 619/* Broadwell Page Directory Pointer Descriptors */
e85b26dc 620static int gen8_write_pdp(struct drm_i915_gem_request *req,
7cb6d7ac
MT
621 unsigned entry,
622 dma_addr_t addr)
94e409c1 623{
4a570db5 624 struct intel_engine_cs *engine = req->engine;
73dec95e 625 u32 *cs;
94e409c1
BW
626
627 BUG_ON(entry >= 4);
628
73dec95e
TU
629 cs = intel_ring_begin(req, 6);
630 if (IS_ERR(cs))
631 return PTR_ERR(cs);
94e409c1 632
73dec95e
TU
633 *cs++ = MI_LOAD_REGISTER_IMM(1);
634 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, entry));
635 *cs++ = upper_32_bits(addr);
636 *cs++ = MI_LOAD_REGISTER_IMM(1);
637 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry));
638 *cs++ = lower_32_bits(addr);
639 intel_ring_advance(req, cs);
94e409c1
BW
640
641 return 0;
642}
643
e7167769
MK
644static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt,
645 struct drm_i915_gem_request *req)
94e409c1 646{
eeb9488e 647 int i, ret;
94e409c1 648
e7167769 649 for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) {
d852c7bf
MK
650 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
651
e85b26dc 652 ret = gen8_write_pdp(req, i, pd_daddr);
eeb9488e
BW
653 if (ret)
654 return ret;
94e409c1 655 }
d595bd4b 656
eeb9488e 657 return 0;
94e409c1
BW
658}
659
e7167769
MK
660static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt,
661 struct drm_i915_gem_request *req)
2dba3239
MT
662{
663 return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4));
664}
665
fce93755
MK
666/* PDE TLBs are a pain to invalidate on GEN8+. When we modify
667 * the page table structures, we mark them dirty so that
668 * context switching/execlist queuing code takes extra steps
669 * to ensure that tlbs are flushed.
670 */
671static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt)
672{
49d73912 673 ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.i915)->ring_mask;
fce93755
MK
674}
675
2ce5179f
MW
676/* Removes entries from a single page table, releasing it if it's empty.
677 * Caller can use the return value to update higher-level entries.
678 */
679static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm,
d209b9c3 680 struct i915_page_table *pt,
dd19674b 681 u64 start, u64 length)
459108b8 682{
d209b9c3 683 unsigned int num_entries = gen8_pte_count(start, length);
37c63934
MK
684 unsigned int pte = gen8_pte_index(start);
685 unsigned int pte_end = pte + num_entries;
894ccebe
CW
686 const gen8_pte_t scratch_pte =
687 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
688 gen8_pte_t *vaddr;
459108b8 689
dd19674b 690 GEM_BUG_ON(num_entries > pt->used_ptes);
37c63934 691
dd19674b
CW
692 pt->used_ptes -= num_entries;
693 if (!pt->used_ptes)
694 return true;
2ce5179f 695
9231da70 696 vaddr = kmap_atomic_px(pt);
37c63934 697 while (pte < pte_end)
894ccebe 698 vaddr[pte++] = scratch_pte;
9231da70 699 kunmap_atomic(vaddr);
2ce5179f
MW
700
701 return false;
d209b9c3 702}
06fda602 703
dd19674b
CW
704static void gen8_ppgtt_set_pde(struct i915_address_space *vm,
705 struct i915_page_directory *pd,
706 struct i915_page_table *pt,
707 unsigned int pde)
708{
709 gen8_pde_t *vaddr;
710
711 pd->page_table[pde] = pt;
712
713 vaddr = kmap_atomic_px(pd);
714 vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC);
715 kunmap_atomic(vaddr);
716}
717
2ce5179f 718static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm,
d209b9c3 719 struct i915_page_directory *pd,
dd19674b 720 u64 start, u64 length)
d209b9c3
MW
721{
722 struct i915_page_table *pt;
dd19674b 723 u32 pde;
d209b9c3
MW
724
725 gen8_for_each_pde(pt, pd, start, length, pde) {
bf75d59e
CW
726 GEM_BUG_ON(pt == vm->scratch_pt);
727
dd19674b
CW
728 if (!gen8_ppgtt_clear_pt(vm, pt, start, length))
729 continue;
06fda602 730
dd19674b 731 gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde);
bf75d59e 732 GEM_BUG_ON(!pd->used_pdes);
fe52e37f 733 pd->used_pdes--;
dd19674b
CW
734
735 free_pt(vm, pt);
2ce5179f
MW
736 }
737
fe52e37f
CW
738 return !pd->used_pdes;
739}
2ce5179f 740
fe52e37f
CW
741static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm,
742 struct i915_page_directory_pointer *pdp,
743 struct i915_page_directory *pd,
744 unsigned int pdpe)
745{
746 gen8_ppgtt_pdpe_t *vaddr;
747
748 pdp->page_directory[pdpe] = pd;
1e6437b0 749 if (!use_4lvl(vm))
fe52e37f
CW
750 return;
751
752 vaddr = kmap_atomic_px(pdp);
753 vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC);
754 kunmap_atomic(vaddr);
d209b9c3 755}
06fda602 756
2ce5179f
MW
757/* Removes entries from a single page dir pointer, releasing it if it's empty.
758 * Caller can use the return value to update higher-level entries
759 */
760static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm,
d209b9c3 761 struct i915_page_directory_pointer *pdp,
fe52e37f 762 u64 start, u64 length)
d209b9c3
MW
763{
764 struct i915_page_directory *pd;
fe52e37f 765 unsigned int pdpe;
06fda602 766
d209b9c3 767 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
bf75d59e
CW
768 GEM_BUG_ON(pd == vm->scratch_pd);
769
fe52e37f
CW
770 if (!gen8_ppgtt_clear_pd(vm, pd, start, length))
771 continue;
459108b8 772
fe52e37f 773 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
bf75d59e 774 GEM_BUG_ON(!pdp->used_pdpes);
e2b763ca 775 pdp->used_pdpes--;
2ce5179f 776
fe52e37f
CW
777 free_pd(vm, pd);
778 }
fce93755 779
e2b763ca 780 return !pdp->used_pdpes;
d209b9c3 781}
459108b8 782
fe52e37f
CW
783static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm,
784 u64 start, u64 length)
785{
786 gen8_ppgtt_clear_pdp(vm, &i915_vm_to_ppgtt(vm)->pdp, start, length);
787}
788
e2b763ca
CW
789static void gen8_ppgtt_set_pml4e(struct i915_pml4 *pml4,
790 struct i915_page_directory_pointer *pdp,
791 unsigned int pml4e)
792{
793 gen8_ppgtt_pml4e_t *vaddr;
794
795 pml4->pdps[pml4e] = pdp;
796
797 vaddr = kmap_atomic_px(pml4);
798 vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC);
799 kunmap_atomic(vaddr);
800}
801
2ce5179f
MW
802/* Removes entries from a single pml4.
803 * This is the top-level structure in 4-level page tables used on gen8+.
804 * Empty entries are always scratch pml4e.
805 */
fe52e37f
CW
806static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm,
807 u64 start, u64 length)
d209b9c3 808{
fe52e37f
CW
809 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
810 struct i915_pml4 *pml4 = &ppgtt->pml4;
d209b9c3 811 struct i915_page_directory_pointer *pdp;
e2b763ca 812 unsigned int pml4e;
2ce5179f 813
1e6437b0 814 GEM_BUG_ON(!use_4lvl(vm));
459108b8 815
d209b9c3 816 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
bf75d59e
CW
817 GEM_BUG_ON(pdp == vm->scratch_pdp);
818
e2b763ca
CW
819 if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length))
820 continue;
459108b8 821
e2b763ca 822 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
e2b763ca
CW
823
824 free_pdp(vm, pdp);
459108b8
BW
825 }
826}
827
894ccebe
CW
828struct sgt_dma {
829 struct scatterlist *sg;
830 dma_addr_t dma, max;
831};
832
9e89f9ee
CW
833struct gen8_insert_pte {
834 u16 pml4e;
835 u16 pdpe;
836 u16 pde;
837 u16 pte;
838};
839
840static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start)
841{
842 return (struct gen8_insert_pte) {
843 gen8_pml4e_index(start),
844 gen8_pdpe_index(start),
845 gen8_pde_index(start),
846 gen8_pte_index(start),
847 };
848}
849
894ccebe
CW
850static __always_inline bool
851gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt,
f9b5b782 852 struct i915_page_directory_pointer *pdp,
894ccebe 853 struct sgt_dma *iter,
9e89f9ee 854 struct gen8_insert_pte *idx,
f9b5b782
MT
855 enum i915_cache_level cache_level)
856{
894ccebe
CW
857 struct i915_page_directory *pd;
858 const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level);
859 gen8_pte_t *vaddr;
860 bool ret;
9df15b49 861
3e490042 862 GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
9e89f9ee
CW
863 pd = pdp->page_directory[idx->pdpe];
864 vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
894ccebe 865 do {
9e89f9ee
CW
866 vaddr[idx->pte] = pte_encode | iter->dma;
867
894ccebe
CW
868 iter->dma += PAGE_SIZE;
869 if (iter->dma >= iter->max) {
870 iter->sg = __sg_next(iter->sg);
871 if (!iter->sg) {
872 ret = false;
873 break;
874 }
7ad47cf2 875
894ccebe
CW
876 iter->dma = sg_dma_address(iter->sg);
877 iter->max = iter->dma + iter->sg->length;
d7b3de91 878 }
9df15b49 879
9e89f9ee
CW
880 if (++idx->pte == GEN8_PTES) {
881 idx->pte = 0;
882
883 if (++idx->pde == I915_PDES) {
884 idx->pde = 0;
885
894ccebe 886 /* Limited by sg length for 3lvl */
9e89f9ee
CW
887 if (++idx->pdpe == GEN8_PML4ES_PER_PML4) {
888 idx->pdpe = 0;
894ccebe 889 ret = true;
de5ba8eb 890 break;
894ccebe
CW
891 }
892
3e490042 893 GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base));
9e89f9ee 894 pd = pdp->page_directory[idx->pdpe];
7ad47cf2 895 }
894ccebe 896
9231da70 897 kunmap_atomic(vaddr);
9e89f9ee 898 vaddr = kmap_atomic_px(pd->page_table[idx->pde]);
9df15b49 899 }
894ccebe 900 } while (1);
9231da70 901 kunmap_atomic(vaddr);
d1c54acd 902
894ccebe 903 return ret;
9df15b49
BW
904}
905
894ccebe
CW
906static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm,
907 struct sg_table *pages,
908 u64 start,
909 enum i915_cache_level cache_level,
910 u32 unused)
f9b5b782 911{
e5716f55 912 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
894ccebe
CW
913 struct sgt_dma iter = {
914 .sg = pages->sgl,
915 .dma = sg_dma_address(iter.sg),
916 .max = iter.dma + iter.sg->length,
917 };
9e89f9ee 918 struct gen8_insert_pte idx = gen8_insert_pte(start);
f9b5b782 919
9e89f9ee
CW
920 gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx,
921 cache_level);
894ccebe 922}
de5ba8eb 923
894ccebe
CW
924static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm,
925 struct sg_table *pages,
75c7b0b8 926 u64 start,
894ccebe
CW
927 enum i915_cache_level cache_level,
928 u32 unused)
929{
930 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
931 struct sgt_dma iter = {
932 .sg = pages->sgl,
933 .dma = sg_dma_address(iter.sg),
934 .max = iter.dma + iter.sg->length,
935 };
936 struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps;
9e89f9ee 937 struct gen8_insert_pte idx = gen8_insert_pte(start);
de5ba8eb 938
9e89f9ee
CW
939 while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++], &iter,
940 &idx, cache_level))
941 GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4);
f9b5b782
MT
942}
943
8448661d 944static void gen8_free_page_tables(struct i915_address_space *vm,
f37c0505 945 struct i915_page_directory *pd)
7ad47cf2
BW
946{
947 int i;
948
567047be 949 if (!px_page(pd))
7ad47cf2
BW
950 return;
951
fe52e37f
CW
952 for (i = 0; i < I915_PDES; i++) {
953 if (pd->page_table[i] != vm->scratch_pt)
954 free_pt(vm, pd->page_table[i]);
06fda602 955 }
d7b3de91
BW
956}
957
8776f02b
MK
958static int gen8_init_scratch(struct i915_address_space *vm)
959{
64c050db 960 int ret;
8776f02b 961
8448661d 962 ret = setup_scratch_page(vm, I915_GFP_DMA);
8bcdd0f7
CW
963 if (ret)
964 return ret;
8776f02b 965
8448661d 966 vm->scratch_pt = alloc_pt(vm);
8776f02b 967 if (IS_ERR(vm->scratch_pt)) {
64c050db
MA
968 ret = PTR_ERR(vm->scratch_pt);
969 goto free_scratch_page;
8776f02b
MK
970 }
971
8448661d 972 vm->scratch_pd = alloc_pd(vm);
8776f02b 973 if (IS_ERR(vm->scratch_pd)) {
64c050db
MA
974 ret = PTR_ERR(vm->scratch_pd);
975 goto free_pt;
8776f02b
MK
976 }
977
1e6437b0 978 if (use_4lvl(vm)) {
8448661d 979 vm->scratch_pdp = alloc_pdp(vm);
69ab76fd 980 if (IS_ERR(vm->scratch_pdp)) {
64c050db
MA
981 ret = PTR_ERR(vm->scratch_pdp);
982 goto free_pd;
69ab76fd
MT
983 }
984 }
985
8776f02b
MK
986 gen8_initialize_pt(vm, vm->scratch_pt);
987 gen8_initialize_pd(vm, vm->scratch_pd);
1e6437b0 988 if (use_4lvl(vm))
69ab76fd 989 gen8_initialize_pdp(vm, vm->scratch_pdp);
8776f02b
MK
990
991 return 0;
64c050db
MA
992
993free_pd:
8448661d 994 free_pd(vm, vm->scratch_pd);
64c050db 995free_pt:
8448661d 996 free_pt(vm, vm->scratch_pt);
64c050db 997free_scratch_page:
8448661d 998 cleanup_scratch_page(vm);
64c050db
MA
999
1000 return ret;
8776f02b
MK
1001}
1002
650da34c
ZL
1003static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create)
1004{
1e6437b0
MK
1005 struct i915_address_space *vm = &ppgtt->base;
1006 struct drm_i915_private *dev_priv = vm->i915;
650da34c 1007 enum vgt_g2v_type msg;
650da34c
ZL
1008 int i;
1009
1e6437b0
MK
1010 if (use_4lvl(vm)) {
1011 const u64 daddr = px_dma(&ppgtt->pml4);
650da34c 1012
ab75bb5d
VS
1013 I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr));
1014 I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr));
650da34c
ZL
1015
1016 msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE :
1017 VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY);
1018 } else {
e7167769 1019 for (i = 0; i < GEN8_3LVL_PDPES; i++) {
1e6437b0 1020 const u64 daddr = i915_page_dir_dma_addr(ppgtt, i);
650da34c 1021
ab75bb5d
VS
1022 I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr));
1023 I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr));
650da34c
ZL
1024 }
1025
1026 msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE :
1027 VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY);
1028 }
1029
1030 I915_WRITE(vgtif_reg(g2v_notify), msg);
1031
1032 return 0;
1033}
1034
8776f02b
MK
1035static void gen8_free_scratch(struct i915_address_space *vm)
1036{
1e6437b0 1037 if (use_4lvl(vm))
8448661d
CW
1038 free_pdp(vm, vm->scratch_pdp);
1039 free_pd(vm, vm->scratch_pd);
1040 free_pt(vm, vm->scratch_pt);
1041 cleanup_scratch_page(vm);
8776f02b
MK
1042}
1043
8448661d 1044static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm,
762d9936 1045 struct i915_page_directory_pointer *pdp)
b45a6715 1046{
3e490042 1047 const unsigned int pdpes = i915_pdpes_per_pdp(vm);
b45a6715
BW
1048 int i;
1049
3e490042 1050 for (i = 0; i < pdpes; i++) {
fe52e37f 1051 if (pdp->page_directory[i] == vm->scratch_pd)
06fda602
BW
1052 continue;
1053
8448661d
CW
1054 gen8_free_page_tables(vm, pdp->page_directory[i]);
1055 free_pd(vm, pdp->page_directory[i]);
7ad47cf2 1056 }
69876bed 1057
8448661d 1058 free_pdp(vm, pdp);
762d9936
MT
1059}
1060
1061static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt)
1062{
1063 int i;
1064
c5d092a4
CW
1065 for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) {
1066 if (ppgtt->pml4.pdps[i] == ppgtt->base.scratch_pdp)
762d9936
MT
1067 continue;
1068
8448661d 1069 gen8_ppgtt_cleanup_3lvl(&ppgtt->base, ppgtt->pml4.pdps[i]);
762d9936
MT
1070 }
1071
8448661d 1072 cleanup_px(&ppgtt->base, &ppgtt->pml4);
762d9936
MT
1073}
1074
1075static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
1076{
49d73912 1077 struct drm_i915_private *dev_priv = vm->i915;
e5716f55 1078 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
762d9936 1079
275a991c 1080 if (intel_vgpu_active(dev_priv))
650da34c
ZL
1081 gen8_ppgtt_notify_vgt(ppgtt, false);
1082
1e6437b0 1083 if (use_4lvl(vm))
762d9936 1084 gen8_ppgtt_cleanup_4lvl(ppgtt);
1e6437b0
MK
1085 else
1086 gen8_ppgtt_cleanup_3lvl(&ppgtt->base, &ppgtt->pdp);
d4ec9da0 1087
8776f02b 1088 gen8_free_scratch(vm);
b45a6715
BW
1089}
1090
fe52e37f
CW
1091static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm,
1092 struct i915_page_directory *pd,
1093 u64 start, u64 length)
bf2b4ed2 1094{
d7b2633d 1095 struct i915_page_table *pt;
dd19674b 1096 u64 from = start;
fe52e37f 1097 unsigned int pde;
bf2b4ed2 1098
e8ebd8e2 1099 gen8_for_each_pde(pt, pd, start, length, pde) {
fe52e37f 1100 if (pt == vm->scratch_pt) {
dd19674b
CW
1101 pt = alloc_pt(vm);
1102 if (IS_ERR(pt))
1103 goto unwind;
5441f0cb 1104
dd19674b 1105 gen8_initialize_pt(vm, pt);
fe52e37f
CW
1106
1107 gen8_ppgtt_set_pde(vm, pd, pt, pde);
1108 pd->used_pdes++;
bf75d59e 1109 GEM_BUG_ON(pd->used_pdes > I915_PDES);
dd19674b 1110 }
fe52e37f 1111
dd19674b 1112 pt->used_ptes += gen8_pte_count(start, length);
7ad47cf2 1113 }
bf2b4ed2 1114 return 0;
7ad47cf2 1115
dd19674b
CW
1116unwind:
1117 gen8_ppgtt_clear_pd(vm, pd, from, start - from);
d7b3de91 1118 return -ENOMEM;
bf2b4ed2
BW
1119}
1120
c5d092a4
CW
1121static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm,
1122 struct i915_page_directory_pointer *pdp,
1123 u64 start, u64 length)
bf2b4ed2 1124{
5441f0cb 1125 struct i915_page_directory *pd;
e2b763ca
CW
1126 u64 from = start;
1127 unsigned int pdpe;
bf2b4ed2
BW
1128 int ret;
1129
e8ebd8e2 1130 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
e2b763ca
CW
1131 if (pd == vm->scratch_pd) {
1132 pd = alloc_pd(vm);
1133 if (IS_ERR(pd))
1134 goto unwind;
5441f0cb 1135
e2b763ca 1136 gen8_initialize_pd(vm, pd);
fe52e37f 1137 gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
e2b763ca 1138 pdp->used_pdpes++;
3e490042 1139 GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm));
75afcf72
CW
1140
1141 mark_tlbs_dirty(i915_vm_to_ppgtt(vm));
e2b763ca
CW
1142 }
1143
1144 ret = gen8_ppgtt_alloc_pd(vm, pd, start, length);
bf75d59e
CW
1145 if (unlikely(ret))
1146 goto unwind_pd;
fe52e37f 1147 }
33c8819f 1148
d7b3de91 1149 return 0;
bf2b4ed2 1150
bf75d59e
CW
1151unwind_pd:
1152 if (!pd->used_pdes) {
1153 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
1154 GEM_BUG_ON(!pdp->used_pdpes);
1155 pdp->used_pdpes--;
1156 free_pd(vm, pd);
1157 }
e2b763ca
CW
1158unwind:
1159 gen8_ppgtt_clear_pdp(vm, pdp, from, start - from);
1160 return -ENOMEM;
bf2b4ed2
BW
1161}
1162
c5d092a4
CW
1163static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm,
1164 u64 start, u64 length)
762d9936 1165{
c5d092a4
CW
1166 return gen8_ppgtt_alloc_pdp(vm,
1167 &i915_vm_to_ppgtt(vm)->pdp, start, length);
1168}
762d9936 1169
c5d092a4
CW
1170static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm,
1171 u64 start, u64 length)
1172{
1173 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
1174 struct i915_pml4 *pml4 = &ppgtt->pml4;
1175 struct i915_page_directory_pointer *pdp;
1176 u64 from = start;
1177 u32 pml4e;
1178 int ret;
762d9936 1179
e8ebd8e2 1180 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
c5d092a4
CW
1181 if (pml4->pdps[pml4e] == vm->scratch_pdp) {
1182 pdp = alloc_pdp(vm);
1183 if (IS_ERR(pdp))
1184 goto unwind;
762d9936 1185
c5d092a4
CW
1186 gen8_initialize_pdp(vm, pdp);
1187 gen8_ppgtt_set_pml4e(pml4, pdp, pml4e);
1188 }
762d9936 1189
c5d092a4 1190 ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length);
bf75d59e
CW
1191 if (unlikely(ret))
1192 goto unwind_pdp;
762d9936
MT
1193 }
1194
762d9936
MT
1195 return 0;
1196
bf75d59e
CW
1197unwind_pdp:
1198 if (!pdp->used_pdpes) {
1199 gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e);
1200 free_pdp(vm, pdp);
1201 }
c5d092a4
CW
1202unwind:
1203 gen8_ppgtt_clear_4lvl(vm, from, start - from);
1204 return -ENOMEM;
762d9936
MT
1205}
1206
8448661d
CW
1207static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt,
1208 struct i915_page_directory_pointer *pdp,
75c7b0b8 1209 u64 start, u64 length,
ea91e401
MT
1210 gen8_pte_t scratch_pte,
1211 struct seq_file *m)
1212{
3e490042 1213 struct i915_address_space *vm = &ppgtt->base;
ea91e401 1214 struct i915_page_directory *pd;
75c7b0b8 1215 u32 pdpe;
ea91e401 1216
e8ebd8e2 1217 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
ea91e401 1218 struct i915_page_table *pt;
75c7b0b8
CW
1219 u64 pd_len = length;
1220 u64 pd_start = start;
1221 u32 pde;
ea91e401 1222
e2b763ca 1223 if (pdp->page_directory[pdpe] == ppgtt->base.scratch_pd)
ea91e401
MT
1224 continue;
1225
1226 seq_printf(m, "\tPDPE #%d\n", pdpe);
e8ebd8e2 1227 gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) {
75c7b0b8 1228 u32 pte;
ea91e401
MT
1229 gen8_pte_t *pt_vaddr;
1230
fe52e37f 1231 if (pd->page_table[pde] == ppgtt->base.scratch_pt)
ea91e401
MT
1232 continue;
1233
9231da70 1234 pt_vaddr = kmap_atomic_px(pt);
ea91e401 1235 for (pte = 0; pte < GEN8_PTES; pte += 4) {
75c7b0b8
CW
1236 u64 va = (pdpe << GEN8_PDPE_SHIFT |
1237 pde << GEN8_PDE_SHIFT |
1238 pte << GEN8_PTE_SHIFT);
ea91e401
MT
1239 int i;
1240 bool found = false;
1241
1242 for (i = 0; i < 4; i++)
1243 if (pt_vaddr[pte + i] != scratch_pte)
1244 found = true;
1245 if (!found)
1246 continue;
1247
1248 seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte);
1249 for (i = 0; i < 4; i++) {
1250 if (pt_vaddr[pte + i] != scratch_pte)
1251 seq_printf(m, " %llx", pt_vaddr[pte + i]);
1252 else
1253 seq_puts(m, " SCRATCH ");
1254 }
1255 seq_puts(m, "\n");
1256 }
ea91e401
MT
1257 kunmap_atomic(pt_vaddr);
1258 }
1259 }
1260}
1261
1262static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1263{
1264 struct i915_address_space *vm = &ppgtt->base;
894ccebe
CW
1265 const gen8_pte_t scratch_pte =
1266 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
381b943b 1267 u64 start = 0, length = ppgtt->base.total;
ea91e401 1268
1e6437b0 1269 if (use_4lvl(vm)) {
75c7b0b8 1270 u64 pml4e;
ea91e401
MT
1271 struct i915_pml4 *pml4 = &ppgtt->pml4;
1272 struct i915_page_directory_pointer *pdp;
1273
e8ebd8e2 1274 gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) {
c5d092a4 1275 if (pml4->pdps[pml4e] == ppgtt->base.scratch_pdp)
ea91e401
MT
1276 continue;
1277
1278 seq_printf(m, " PML4E #%llu\n", pml4e);
8448661d 1279 gen8_dump_pdp(ppgtt, pdp, start, length, scratch_pte, m);
ea91e401 1280 }
1e6437b0
MK
1281 } else {
1282 gen8_dump_pdp(ppgtt, &ppgtt->pdp, start, length, scratch_pte, m);
ea91e401
MT
1283 }
1284}
1285
e2b763ca 1286static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt)
331f38e7 1287{
e2b763ca
CW
1288 struct i915_address_space *vm = &ppgtt->base;
1289 struct i915_page_directory_pointer *pdp = &ppgtt->pdp;
1290 struct i915_page_directory *pd;
1291 u64 start = 0, length = ppgtt->base.total;
1292 u64 from = start;
1293 unsigned int pdpe;
331f38e7 1294
e2b763ca
CW
1295 gen8_for_each_pdpe(pd, pdp, start, length, pdpe) {
1296 pd = alloc_pd(vm);
1297 if (IS_ERR(pd))
1298 goto unwind;
331f38e7 1299
e2b763ca
CW
1300 gen8_initialize_pd(vm, pd);
1301 gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe);
1302 pdp->used_pdpes++;
1303 }
331f38e7 1304
e2b763ca
CW
1305 pdp->used_pdpes++; /* never remove */
1306 return 0;
331f38e7 1307
e2b763ca
CW
1308unwind:
1309 start -= from;
1310 gen8_for_each_pdpe(pd, pdp, from, start, pdpe) {
1311 gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe);
1312 free_pd(vm, pd);
1313 }
1314 pdp->used_pdpes = 0;
1315 return -ENOMEM;
331f38e7
ZL
1316}
1317
eb0b44ad 1318/*
f3a964b9
BW
1319 * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers
1320 * with a net effect resembling a 2-level page table in normal x86 terms. Each
1321 * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address
1322 * space.
37aca44a 1323 *
f3a964b9 1324 */
5c5f6457 1325static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
37aca44a 1326{
1e6437b0
MK
1327 struct i915_address_space *vm = &ppgtt->base;
1328 struct drm_i915_private *dev_priv = vm->i915;
8776f02b 1329 int ret;
7cb6d7ac 1330
1e6437b0
MK
1331 ppgtt->base.total = USES_FULL_48BIT_PPGTT(dev_priv) ?
1332 1ULL << 48 :
1333 1ULL << 32;
1334
8776f02b 1335 ret = gen8_init_scratch(&ppgtt->base);
1e6437b0
MK
1336 if (ret) {
1337 ppgtt->base.total = 0;
8776f02b 1338 return ret;
1e6437b0 1339 }
69876bed 1340
8448661d
CW
1341 /* There are only few exceptions for gen >=6. chv and bxt.
1342 * And we are not sure about the latter so play safe for now.
1343 */
1344 if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv))
1345 ppgtt->base.pt_kmap_wc = true;
1346
1e6437b0 1347 if (use_4lvl(vm)) {
8448661d 1348 ret = setup_px(&ppgtt->base, &ppgtt->pml4);
762d9936
MT
1349 if (ret)
1350 goto free_scratch;
6ac18502 1351
69ab76fd
MT
1352 gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4);
1353
e7167769 1354 ppgtt->switch_mm = gen8_mm_switch_4lvl;
c5d092a4 1355 ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_4lvl;
894ccebe 1356 ppgtt->base.insert_entries = gen8_ppgtt_insert_4lvl;
fe52e37f 1357 ppgtt->base.clear_range = gen8_ppgtt_clear_4lvl;
762d9936 1358 } else {
fe52e37f 1359 ret = __pdp_init(&ppgtt->base, &ppgtt->pdp);
81ba8aef
MT
1360 if (ret)
1361 goto free_scratch;
1362
275a991c 1363 if (intel_vgpu_active(dev_priv)) {
e2b763ca
CW
1364 ret = gen8_preallocate_top_level_pdp(ppgtt);
1365 if (ret) {
1366 __pdp_fini(&ppgtt->pdp);
331f38e7 1367 goto free_scratch;
e2b763ca 1368 }
331f38e7 1369 }
894ccebe 1370
e7167769 1371 ppgtt->switch_mm = gen8_mm_switch_3lvl;
c5d092a4 1372 ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_3lvl;
894ccebe 1373 ppgtt->base.insert_entries = gen8_ppgtt_insert_3lvl;
fe52e37f 1374 ppgtt->base.clear_range = gen8_ppgtt_clear_3lvl;
81ba8aef 1375 }
6ac18502 1376
275a991c 1377 if (intel_vgpu_active(dev_priv))
650da34c
ZL
1378 gen8_ppgtt_notify_vgt(ppgtt, true);
1379
054b9acd
MK
1380 ppgtt->base.cleanup = gen8_ppgtt_cleanup;
1381 ppgtt->base.unbind_vma = ppgtt_unbind_vma;
1382 ppgtt->base.bind_vma = ppgtt_bind_vma;
1383 ppgtt->debug_dump = gen8_dump_ppgtt;
1384
d7b2633d 1385 return 0;
6ac18502
MT
1386
1387free_scratch:
1388 gen8_free_scratch(&ppgtt->base);
1389 return ret;
d7b2633d
MT
1390}
1391
87d60b63
BW
1392static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m)
1393{
87d60b63 1394 struct i915_address_space *vm = &ppgtt->base;
09942c65 1395 struct i915_page_table *unused;
07749ef3 1396 gen6_pte_t scratch_pte;
381b943b
CW
1397 u32 pd_entry, pte, pde;
1398 u32 start = 0, length = ppgtt->base.total;
87d60b63 1399
8bcdd0f7 1400 scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
4fb84d99 1401 I915_CACHE_LLC, 0);
87d60b63 1402
731f74c5 1403 gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) {
87d60b63 1404 u32 expected;
07749ef3 1405 gen6_pte_t *pt_vaddr;
567047be 1406 const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]);
09942c65 1407 pd_entry = readl(ppgtt->pd_addr + pde);
87d60b63
BW
1408 expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID);
1409
1410 if (pd_entry != expected)
1411 seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n",
1412 pde,
1413 pd_entry,
1414 expected);
1415 seq_printf(m, "\tPDE: %x\n", pd_entry);
1416
9231da70 1417 pt_vaddr = kmap_atomic_px(ppgtt->pd.page_table[pde]);
d1c54acd 1418
07749ef3 1419 for (pte = 0; pte < GEN6_PTES; pte+=4) {
87d60b63 1420 unsigned long va =
07749ef3 1421 (pde * PAGE_SIZE * GEN6_PTES) +
87d60b63
BW
1422 (pte * PAGE_SIZE);
1423 int i;
1424 bool found = false;
1425 for (i = 0; i < 4; i++)
1426 if (pt_vaddr[pte + i] != scratch_pte)
1427 found = true;
1428 if (!found)
1429 continue;
1430
1431 seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte);
1432 for (i = 0; i < 4; i++) {
1433 if (pt_vaddr[pte + i] != scratch_pte)
1434 seq_printf(m, " %08x", pt_vaddr[pte + i]);
1435 else
1436 seq_puts(m, " SCRATCH ");
1437 }
1438 seq_puts(m, "\n");
1439 }
9231da70 1440 kunmap_atomic(pt_vaddr);
87d60b63
BW
1441 }
1442}
1443
678d96fb 1444/* Write pde (index) from the page directory @pd to the page table @pt */
16a011c8
CW
1445static inline void gen6_write_pde(const struct i915_hw_ppgtt *ppgtt,
1446 const unsigned int pde,
1447 const struct i915_page_table *pt)
6197349b 1448{
678d96fb 1449 /* Caller needs to make sure the write completes if necessary */
16a011c8
CW
1450 writel_relaxed(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID,
1451 ppgtt->pd_addr + pde);
678d96fb 1452}
6197349b 1453
678d96fb
BW
1454/* Write all the page tables found in the ppgtt structure to incrementing page
1455 * directories. */
16a011c8 1456static void gen6_write_page_range(struct i915_hw_ppgtt *ppgtt,
75c7b0b8 1457 u32 start, u32 length)
678d96fb 1458{
ec565b3c 1459 struct i915_page_table *pt;
16a011c8 1460 unsigned int pde;
678d96fb 1461
16a011c8
CW
1462 gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde)
1463 gen6_write_pde(ppgtt, pde, pt);
678d96fb 1464
16a011c8 1465 mark_tlbs_dirty(ppgtt);
dd19674b 1466 wmb();
3e302542
BW
1467}
1468
75c7b0b8 1469static inline u32 get_pd_offset(struct i915_hw_ppgtt *ppgtt)
3e302542 1470{
dd19674b
CW
1471 GEM_BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f);
1472 return ppgtt->pd.base.ggtt_offset << 10;
b4a74e3a
BW
1473}
1474
90252e5c 1475static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt,
e85b26dc 1476 struct drm_i915_gem_request *req)
90252e5c 1477{
4a570db5 1478 struct intel_engine_cs *engine = req->engine;
73dec95e 1479 u32 *cs;
90252e5c 1480
90252e5c 1481 /* NB: TLBs must be flushed and invalidated before a switch */
73dec95e
TU
1482 cs = intel_ring_begin(req, 6);
1483 if (IS_ERR(cs))
1484 return PTR_ERR(cs);
90252e5c 1485
73dec95e
TU
1486 *cs++ = MI_LOAD_REGISTER_IMM(2);
1487 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
1488 *cs++ = PP_DIR_DCLV_2G;
1489 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1490 *cs++ = get_pd_offset(ppgtt);
1491 *cs++ = MI_NOOP;
1492 intel_ring_advance(req, cs);
90252e5c
BW
1493
1494 return 0;
1495}
1496
48a10389 1497static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt,
e85b26dc 1498 struct drm_i915_gem_request *req)
48a10389 1499{
4a570db5 1500 struct intel_engine_cs *engine = req->engine;
73dec95e 1501 u32 *cs;
48a10389 1502
48a10389 1503 /* NB: TLBs must be flushed and invalidated before a switch */
73dec95e
TU
1504 cs = intel_ring_begin(req, 6);
1505 if (IS_ERR(cs))
1506 return PTR_ERR(cs);
1507
1508 *cs++ = MI_LOAD_REGISTER_IMM(2);
1509 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
1510 *cs++ = PP_DIR_DCLV_2G;
1511 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1512 *cs++ = get_pd_offset(ppgtt);
1513 *cs++ = MI_NOOP;
1514 intel_ring_advance(req, cs);
48a10389
BW
1515
1516 return 0;
1517}
1518
eeb9488e 1519static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt,
e85b26dc 1520 struct drm_i915_gem_request *req)
eeb9488e 1521{
4a570db5 1522 struct intel_engine_cs *engine = req->engine;
8eb95204 1523 struct drm_i915_private *dev_priv = req->i915;
48a10389 1524
e2f80391
TU
1525 I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G);
1526 I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt));
eeb9488e
BW
1527 return 0;
1528}
1529
c6be607a 1530static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv)
eeb9488e 1531{
e2f80391 1532 struct intel_engine_cs *engine;
3b3f1650 1533 enum intel_engine_id id;
3e302542 1534
3b3f1650 1535 for_each_engine(engine, dev_priv, id) {
c6be607a
TU
1536 u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ?
1537 GEN8_GFX_PPGTT_48B : 0;
e2f80391 1538 I915_WRITE(RING_MODE_GEN7(engine),
2dba3239 1539 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level));
eeb9488e 1540 }
eeb9488e 1541}
6197349b 1542
c6be607a 1543static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv)
3e302542 1544{
e2f80391 1545 struct intel_engine_cs *engine;
75c7b0b8 1546 u32 ecochk, ecobits;
3b3f1650 1547 enum intel_engine_id id;
6197349b 1548
b4a74e3a
BW
1549 ecobits = I915_READ(GAC_ECO_BITS);
1550 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
a65c2fcd 1551
b4a74e3a 1552 ecochk = I915_READ(GAM_ECOCHK);
772c2a51 1553 if (IS_HASWELL(dev_priv)) {
b4a74e3a
BW
1554 ecochk |= ECOCHK_PPGTT_WB_HSW;
1555 } else {
1556 ecochk |= ECOCHK_PPGTT_LLC_IVB;
1557 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
1558 }
1559 I915_WRITE(GAM_ECOCHK, ecochk);
a65c2fcd 1560
3b3f1650 1561 for_each_engine(engine, dev_priv, id) {
6197349b 1562 /* GFX_MODE is per-ring on gen7+ */
e2f80391 1563 I915_WRITE(RING_MODE_GEN7(engine),
b4a74e3a 1564 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
6197349b 1565 }
b4a74e3a 1566}
6197349b 1567
c6be607a 1568static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv)
b4a74e3a 1569{
75c7b0b8 1570 u32 ecochk, gab_ctl, ecobits;
a65c2fcd 1571
b4a74e3a
BW
1572 ecobits = I915_READ(GAC_ECO_BITS);
1573 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
1574 ECOBITS_PPGTT_CACHE64B);
6197349b 1575
b4a74e3a
BW
1576 gab_ctl = I915_READ(GAB_CTL);
1577 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
1578
1579 ecochk = I915_READ(GAM_ECOCHK);
1580 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B);
1581
1582 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
6197349b
BW
1583}
1584
1d2a314c 1585/* PPGTT support for Sandybdrige/Gen6 and later */
853ba5d2 1586static void gen6_ppgtt_clear_range(struct i915_address_space *vm,
dd19674b 1587 u64 start, u64 length)
1d2a314c 1588{
e5716f55 1589 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
dd19674b
CW
1590 unsigned int first_entry = start >> PAGE_SHIFT;
1591 unsigned int pde = first_entry / GEN6_PTES;
1592 unsigned int pte = first_entry % GEN6_PTES;
1593 unsigned int num_entries = length >> PAGE_SHIFT;
1594 gen6_pte_t scratch_pte =
1595 vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0);
1d2a314c 1596
7bddb01f 1597 while (num_entries) {
dd19674b
CW
1598 struct i915_page_table *pt = ppgtt->pd.page_table[pde++];
1599 unsigned int end = min(pte + num_entries, GEN6_PTES);
1600 gen6_pte_t *vaddr;
7bddb01f 1601
dd19674b 1602 num_entries -= end - pte;
1d2a314c 1603
dd19674b
CW
1604 /* Note that the hw doesn't support removing PDE on the fly
1605 * (they are cached inside the context with no means to
1606 * invalidate the cache), so we can only reset the PTE
1607 * entries back to scratch.
1608 */
1d2a314c 1609
dd19674b
CW
1610 vaddr = kmap_atomic_px(pt);
1611 do {
1612 vaddr[pte++] = scratch_pte;
1613 } while (pte < end);
1614 kunmap_atomic(vaddr);
1d2a314c 1615
dd19674b 1616 pte = 0;
7bddb01f 1617 }
1d2a314c
DV
1618}
1619
853ba5d2 1620static void gen6_ppgtt_insert_entries(struct i915_address_space *vm,
def886c3 1621 struct sg_table *pages,
75c7b0b8
CW
1622 u64 start,
1623 enum i915_cache_level cache_level,
1624 u32 flags)
def886c3 1625{
e5716f55 1626 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
782f1495 1627 unsigned first_entry = start >> PAGE_SHIFT;
07749ef3
MT
1628 unsigned act_pt = first_entry / GEN6_PTES;
1629 unsigned act_pte = first_entry % GEN6_PTES;
b31144c0
CW
1630 const u32 pte_encode = vm->pte_encode(0, cache_level, flags);
1631 struct sgt_dma iter;
1632 gen6_pte_t *vaddr;
1633
9231da70 1634 vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]);
b31144c0
CW
1635 iter.sg = pages->sgl;
1636 iter.dma = sg_dma_address(iter.sg);
1637 iter.max = iter.dma + iter.sg->length;
1638 do {
1639 vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma);
6e995e23 1640
b31144c0
CW
1641 iter.dma += PAGE_SIZE;
1642 if (iter.dma == iter.max) {
1643 iter.sg = __sg_next(iter.sg);
1644 if (!iter.sg)
1645 break;
6e995e23 1646
b31144c0
CW
1647 iter.dma = sg_dma_address(iter.sg);
1648 iter.max = iter.dma + iter.sg->length;
1649 }
24f3a8cf 1650
07749ef3 1651 if (++act_pte == GEN6_PTES) {
9231da70
CW
1652 kunmap_atomic(vaddr);
1653 vaddr = kmap_atomic_px(ppgtt->pd.page_table[++act_pt]);
6e995e23 1654 act_pte = 0;
def886c3 1655 }
b31144c0 1656 } while (1);
9231da70 1657 kunmap_atomic(vaddr);
def886c3
DV
1658}
1659
678d96fb 1660static int gen6_alloc_va_range(struct i915_address_space *vm,
dd19674b 1661 u64 start, u64 length)
678d96fb 1662{
e5716f55 1663 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
ec565b3c 1664 struct i915_page_table *pt;
dd19674b
CW
1665 u64 from = start;
1666 unsigned int pde;
1667 bool flush = false;
4933d519 1668
731f74c5 1669 gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) {
dd19674b
CW
1670 if (pt == vm->scratch_pt) {
1671 pt = alloc_pt(vm);
1672 if (IS_ERR(pt))
1673 goto unwind_out;
4933d519 1674
dd19674b
CW
1675 gen6_initialize_pt(vm, pt);
1676 ppgtt->pd.page_table[pde] = pt;
1677 gen6_write_pde(ppgtt, pde, pt);
1678 flush = true;
4933d519 1679 }
4933d519
MT
1680 }
1681
dd19674b
CW
1682 if (flush) {
1683 mark_tlbs_dirty(ppgtt);
1684 wmb();
678d96fb
BW
1685 }
1686
1687 return 0;
4933d519
MT
1688
1689unwind_out:
dd19674b
CW
1690 gen6_ppgtt_clear_range(vm, from, start);
1691 return -ENOMEM;
678d96fb
BW
1692}
1693
8776f02b
MK
1694static int gen6_init_scratch(struct i915_address_space *vm)
1695{
8bcdd0f7 1696 int ret;
8776f02b 1697
8448661d 1698 ret = setup_scratch_page(vm, I915_GFP_DMA);
8bcdd0f7
CW
1699 if (ret)
1700 return ret;
8776f02b 1701
8448661d 1702 vm->scratch_pt = alloc_pt(vm);
8776f02b 1703 if (IS_ERR(vm->scratch_pt)) {
8448661d 1704 cleanup_scratch_page(vm);
8776f02b
MK
1705 return PTR_ERR(vm->scratch_pt);
1706 }
1707
1708 gen6_initialize_pt(vm, vm->scratch_pt);
1709
1710 return 0;
1711}
1712
1713static void gen6_free_scratch(struct i915_address_space *vm)
1714{
8448661d
CW
1715 free_pt(vm, vm->scratch_pt);
1716 cleanup_scratch_page(vm);
8776f02b
MK
1717}
1718
061dd493 1719static void gen6_ppgtt_cleanup(struct i915_address_space *vm)
a00d825d 1720{
e5716f55 1721 struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
731f74c5 1722 struct i915_page_directory *pd = &ppgtt->pd;
09942c65 1723 struct i915_page_table *pt;
75c7b0b8 1724 u32 pde;
4933d519 1725
061dd493
DV
1726 drm_mm_remove_node(&ppgtt->node);
1727
731f74c5 1728 gen6_for_all_pdes(pt, pd, pde)
79ab9370 1729 if (pt != vm->scratch_pt)
8448661d 1730 free_pt(vm, pt);
06fda602 1731
8776f02b 1732 gen6_free_scratch(vm);
3440d265
DV
1733}
1734
b146520f 1735static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
3440d265 1736{
8776f02b 1737 struct i915_address_space *vm = &ppgtt->base;
49d73912 1738 struct drm_i915_private *dev_priv = ppgtt->base.i915;
72e96d64 1739 struct i915_ggtt *ggtt = &dev_priv->ggtt;
b146520f 1740 int ret;
1d2a314c 1741
c8d4c0d6
BW
1742 /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The
1743 * allocator works in address space sizes, so it's multiplied by page
1744 * size. We allocate at the top of the GTT to avoid fragmentation.
1745 */
72e96d64 1746 BUG_ON(!drm_mm_initialized(&ggtt->base.mm));
4933d519 1747
8776f02b
MK
1748 ret = gen6_init_scratch(vm);
1749 if (ret)
1750 return ret;
4933d519 1751
e007b19d
CW
1752 ret = i915_gem_gtt_insert(&ggtt->base, &ppgtt->node,
1753 GEN6_PD_SIZE, GEN6_PD_ALIGN,
1754 I915_COLOR_UNEVICTABLE,
1755 0, ggtt->base.total,
1756 PIN_HIGH);
c8c26622 1757 if (ret)
678d96fb
BW
1758 goto err_out;
1759
72e96d64 1760 if (ppgtt->node.start < ggtt->mappable_end)
c8d4c0d6 1761 DRM_DEBUG("Forced to use aperture for PDEs\n");
1d2a314c 1762
52c126ee
CW
1763 ppgtt->pd.base.ggtt_offset =
1764 ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
1765
1766 ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm +
1767 ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
1768
c8c26622 1769 return 0;
678d96fb
BW
1770
1771err_out:
8776f02b 1772 gen6_free_scratch(vm);
678d96fb 1773 return ret;
b146520f
BW
1774}
1775
b146520f
BW
1776static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt)
1777{
2f2cf682 1778 return gen6_ppgtt_allocate_page_directories(ppgtt);
4933d519 1779}
06dc68d6 1780
4933d519 1781static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt,
75c7b0b8 1782 u64 start, u64 length)
4933d519 1783{
ec565b3c 1784 struct i915_page_table *unused;
75c7b0b8 1785 u32 pde;
1d2a314c 1786
731f74c5 1787 gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde)
79ab9370 1788 ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt;
b146520f
BW
1789}
1790
5c5f6457 1791static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
b146520f 1792{
49d73912 1793 struct drm_i915_private *dev_priv = ppgtt->base.i915;
72e96d64 1794 struct i915_ggtt *ggtt = &dev_priv->ggtt;
b146520f
BW
1795 int ret;
1796
72e96d64 1797 ppgtt->base.pte_encode = ggtt->base.pte_encode;
5db94019 1798 if (intel_vgpu_active(dev_priv) || IS_GEN6(dev_priv))
b146520f 1799 ppgtt->switch_mm = gen6_mm_switch;
772c2a51 1800 else if (IS_HASWELL(dev_priv))
b146520f 1801 ppgtt->switch_mm = hsw_mm_switch;
5db94019 1802 else if (IS_GEN7(dev_priv))
b146520f 1803 ppgtt->switch_mm = gen7_mm_switch;
8eb95204 1804 else
b146520f
BW
1805 BUG();
1806
1807 ret = gen6_ppgtt_alloc(ppgtt);
1808 if (ret)
1809 return ret;
1810
09942c65 1811 ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE;
1d2a314c 1812
5c5f6457 1813 gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
16a011c8 1814 gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
678d96fb 1815
52c126ee
CW
1816 ret = gen6_alloc_va_range(&ppgtt->base, 0, ppgtt->base.total);
1817 if (ret) {
1818 gen6_ppgtt_cleanup(&ppgtt->base);
1819 return ret;
1820 }
1821
054b9acd
MK
1822 ppgtt->base.clear_range = gen6_ppgtt_clear_range;
1823 ppgtt->base.insert_entries = gen6_ppgtt_insert_entries;
1824 ppgtt->base.unbind_vma = ppgtt_unbind_vma;
1825 ppgtt->base.bind_vma = ppgtt_bind_vma;
1826 ppgtt->base.cleanup = gen6_ppgtt_cleanup;
1827 ppgtt->debug_dump = gen6_dump_ppgtt;
1828
440fd528 1829 DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n",
b146520f
BW
1830 ppgtt->node.size >> 20,
1831 ppgtt->node.start / PAGE_SIZE);
3440d265 1832
52c126ee
CW
1833 DRM_DEBUG_DRIVER("Adding PPGTT at offset %x\n",
1834 ppgtt->pd.base.ggtt_offset << 10);
fa76da34 1835
b146520f 1836 return 0;
3440d265
DV
1837}
1838
2bfa996e
CW
1839static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt,
1840 struct drm_i915_private *dev_priv)
3440d265 1841{
49d73912 1842 ppgtt->base.i915 = dev_priv;
8448661d 1843 ppgtt->base.dma = &dev_priv->drm.pdev->dev;
3440d265 1844
2bfa996e 1845 if (INTEL_INFO(dev_priv)->gen < 8)
5c5f6457 1846 return gen6_ppgtt_init(ppgtt);
3ed124b2 1847 else
d7b2633d 1848 return gen8_ppgtt_init(ppgtt);
fa76da34 1849}
c114f76a 1850
a2cad9df 1851static void i915_address_space_init(struct i915_address_space *vm,
80b204bc
CW
1852 struct drm_i915_private *dev_priv,
1853 const char *name)
a2cad9df 1854{
80b204bc 1855 i915_gem_timeline_init(dev_priv, &vm->timeline, name);
47db922f 1856
381b943b 1857 drm_mm_init(&vm->mm, 0, vm->total);
47db922f
CW
1858 vm->mm.head_node.color = I915_COLOR_UNEVICTABLE;
1859
a2cad9df
MW
1860 INIT_LIST_HEAD(&vm->active_list);
1861 INIT_LIST_HEAD(&vm->inactive_list);
50e046b6 1862 INIT_LIST_HEAD(&vm->unbound_list);
47db922f 1863
a2cad9df 1864 list_add_tail(&vm->global_link, &dev_priv->vm_list);
8448661d 1865 pagevec_init(&vm->free_pages, false);
a2cad9df
MW
1866}
1867
ed9724dd
MA
1868static void i915_address_space_fini(struct i915_address_space *vm)
1869{
8448661d
CW
1870 if (pagevec_count(&vm->free_pages))
1871 vm_free_pages_release(vm);
1872
ed9724dd
MA
1873 i915_gem_timeline_fini(&vm->timeline);
1874 drm_mm_takedown(&vm->mm);
1875 list_del(&vm->global_link);
1876}
1877
c6be607a 1878static void gtt_write_workarounds(struct drm_i915_private *dev_priv)
d5165ebd 1879{
d5165ebd
TG
1880 /* This function is for gtt related workarounds. This function is
1881 * called on driver load and after a GPU reset, so you can place
1882 * workarounds here even if they get overwritten by GPU reset.
1883 */
9fb5026f 1884 /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk */
8652744b 1885 if (IS_BROADWELL(dev_priv))
d5165ebd 1886 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW);
920a14b2 1887 else if (IS_CHERRYVIEW(dev_priv))
d5165ebd 1888 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV);
b976dc53 1889 else if (IS_GEN9_BC(dev_priv))
d5165ebd 1890 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL);
9fb5026f 1891 else if (IS_GEN9_LP(dev_priv))
d5165ebd
TG
1892 I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT);
1893}
1894
c6be607a 1895int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv)
82460d97 1896{
c6be607a 1897 gtt_write_workarounds(dev_priv);
d5165ebd 1898
671b5013
TD
1899 /* In the case of execlists, PPGTT is enabled by the context descriptor
1900 * and the PDPs are contained within the context itself. We don't
1901 * need to do anything here. */
1902 if (i915.enable_execlists)
1903 return 0;
1904
c6be607a 1905 if (!USES_PPGTT(dev_priv))
82460d97
DV
1906 return 0;
1907
5db94019 1908 if (IS_GEN6(dev_priv))
c6be607a 1909 gen6_ppgtt_enable(dev_priv);
5db94019 1910 else if (IS_GEN7(dev_priv))
c6be607a
TU
1911 gen7_ppgtt_enable(dev_priv);
1912 else if (INTEL_GEN(dev_priv) >= 8)
1913 gen8_ppgtt_enable(dev_priv);
82460d97 1914 else
c6be607a 1915 MISSING_CASE(INTEL_GEN(dev_priv));
82460d97 1916
4ad2fd88
JH
1917 return 0;
1918}
1d2a314c 1919
4d884705 1920struct i915_hw_ppgtt *
2bfa996e 1921i915_ppgtt_create(struct drm_i915_private *dev_priv,
80b204bc
CW
1922 struct drm_i915_file_private *fpriv,
1923 const char *name)
4d884705
DV
1924{
1925 struct i915_hw_ppgtt *ppgtt;
1926 int ret;
1927
1928 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
1929 if (!ppgtt)
1930 return ERR_PTR(-ENOMEM);
1931
1188bc66 1932 ret = __hw_ppgtt_init(ppgtt, dev_priv);
4d884705
DV
1933 if (ret) {
1934 kfree(ppgtt);
1935 return ERR_PTR(ret);
1936 }
1937
1188bc66
CW
1938 kref_init(&ppgtt->ref);
1939 i915_address_space_init(&ppgtt->base, dev_priv, name);
1940 ppgtt->base.file = fpriv;
1941
198c974d
DCS
1942 trace_i915_ppgtt_create(&ppgtt->base);
1943
4d884705
DV
1944 return ppgtt;
1945}
1946
0c7eeda1
CW
1947void i915_ppgtt_close(struct i915_address_space *vm)
1948{
1949 struct list_head *phases[] = {
1950 &vm->active_list,
1951 &vm->inactive_list,
1952 &vm->unbound_list,
1953 NULL,
1954 }, **phase;
1955
1956 GEM_BUG_ON(vm->closed);
1957 vm->closed = true;
1958
1959 for (phase = phases; *phase; phase++) {
1960 struct i915_vma *vma, *vn;
1961
1962 list_for_each_entry_safe(vma, vn, *phase, vm_link)
1963 if (!i915_vma_is_closed(vma))
1964 i915_vma_close(vma);
1965 }
1966}
1967
ed9724dd 1968void i915_ppgtt_release(struct kref *kref)
ee960be7
DV
1969{
1970 struct i915_hw_ppgtt *ppgtt =
1971 container_of(kref, struct i915_hw_ppgtt, ref);
1972
198c974d
DCS
1973 trace_i915_ppgtt_release(&ppgtt->base);
1974
50e046b6 1975 /* vmas should already be unbound and destroyed */
ee960be7
DV
1976 WARN_ON(!list_empty(&ppgtt->base.active_list));
1977 WARN_ON(!list_empty(&ppgtt->base.inactive_list));
50e046b6 1978 WARN_ON(!list_empty(&ppgtt->base.unbound_list));
ee960be7
DV
1979
1980 ppgtt->base.cleanup(&ppgtt->base);
8448661d 1981 i915_address_space_fini(&ppgtt->base);
ee960be7
DV
1982 kfree(ppgtt);
1983}
1d2a314c 1984
a81cc00c
BW
1985/* Certain Gen5 chipsets require require idling the GPU before
1986 * unmapping anything from the GTT when VT-d is enabled.
1987 */
97d6d7ab 1988static bool needs_idle_maps(struct drm_i915_private *dev_priv)
a81cc00c
BW
1989{
1990#ifdef CONFIG_INTEL_IOMMU
1991 /* Query intel_iommu to see if we need the workaround. Presumably that
1992 * was loaded first.
1993 */
97d6d7ab 1994 if (IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_iommu_gfx_mapped)
a81cc00c
BW
1995 return true;
1996#endif
1997 return false;
1998}
1999
dc97997a 2000void i915_check_and_clear_faults(struct drm_i915_private *dev_priv)
828c7908 2001{
e2f80391 2002 struct intel_engine_cs *engine;
3b3f1650 2003 enum intel_engine_id id;
828c7908 2004
dc97997a 2005 if (INTEL_INFO(dev_priv)->gen < 6)
828c7908
BW
2006 return;
2007
3b3f1650 2008 for_each_engine(engine, dev_priv, id) {
828c7908 2009 u32 fault_reg;
e2f80391 2010 fault_reg = I915_READ(RING_FAULT_REG(engine));
828c7908
BW
2011 if (fault_reg & RING_FAULT_VALID) {
2012 DRM_DEBUG_DRIVER("Unexpected fault\n"
59a5d290 2013 "\tAddr: 0x%08lx\n"
828c7908
BW
2014 "\tAddress space: %s\n"
2015 "\tSource ID: %d\n"
2016 "\tType: %d\n",
2017 fault_reg & PAGE_MASK,
2018 fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT",
2019 RING_FAULT_SRCID(fault_reg),
2020 RING_FAULT_FAULT_TYPE(fault_reg));
e2f80391 2021 I915_WRITE(RING_FAULT_REG(engine),
828c7908
BW
2022 fault_reg & ~RING_FAULT_VALID);
2023 }
2024 }
3b3f1650
AG
2025
2026 /* Engine specific init may not have been done till this point. */
2027 if (dev_priv->engine[RCS])
2028 POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS]));
828c7908
BW
2029}
2030
275a991c 2031void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv)
828c7908 2032{
72e96d64 2033 struct i915_ggtt *ggtt = &dev_priv->ggtt;
828c7908
BW
2034
2035 /* Don't bother messing with faults pre GEN6 as we have little
2036 * documentation supporting that it's a good idea.
2037 */
275a991c 2038 if (INTEL_GEN(dev_priv) < 6)
828c7908
BW
2039 return;
2040
dc97997a 2041 i915_check_and_clear_faults(dev_priv);
828c7908 2042
381b943b 2043 ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
91e56499 2044
7c3f86b6 2045 i915_ggtt_invalidate(dev_priv);
828c7908
BW
2046}
2047
03ac84f1
CW
2048int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj,
2049 struct sg_table *pages)
7c2e6fdf 2050{
1a292fa5
CW
2051 do {
2052 if (dma_map_sg(&obj->base.dev->pdev->dev,
2053 pages->sgl, pages->nents,
2054 PCI_DMA_BIDIRECTIONAL))
2055 return 0;
2056
2057 /* If the DMA remap fails, one cause can be that we have
2058 * too many objects pinned in a small remapping table,
2059 * such as swiotlb. Incrementally purge all other objects and
2060 * try again - if there are no more pages to remove from
2061 * the DMA remapper, i915_gem_shrink will return 0.
2062 */
2063 GEM_BUG_ON(obj->mm.pages == pages);
2064 } while (i915_gem_shrink(to_i915(obj->base.dev),
2065 obj->base.size >> PAGE_SHIFT,
2066 I915_SHRINK_BOUND |
2067 I915_SHRINK_UNBOUND |
2068 I915_SHRINK_ACTIVE));
9da3da66 2069
03ac84f1 2070 return -ENOSPC;
7c2e6fdf
DV
2071}
2072
2c642b07 2073static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
94ec8f61 2074{
94ec8f61 2075 writeq(pte, addr);
94ec8f61
BW
2076}
2077
d6473f56
CW
2078static void gen8_ggtt_insert_page(struct i915_address_space *vm,
2079 dma_addr_t addr,
75c7b0b8 2080 u64 offset,
d6473f56
CW
2081 enum i915_cache_level level,
2082 u32 unused)
2083{
7c3f86b6 2084 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
d6473f56 2085 gen8_pte_t __iomem *pte =
7c3f86b6 2086 (gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
d6473f56 2087
4fb84d99 2088 gen8_set_pte(pte, gen8_pte_encode(addr, level));
d6473f56 2089
7c3f86b6 2090 ggtt->invalidate(vm->i915);
d6473f56
CW
2091}
2092
94ec8f61
BW
2093static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
2094 struct sg_table *st,
75c7b0b8
CW
2095 u64 start,
2096 enum i915_cache_level level,
2097 u32 unused)
94ec8f61 2098{
ce7fda2e 2099 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
85d1225e
DG
2100 struct sgt_iter sgt_iter;
2101 gen8_pte_t __iomem *gtt_entries;
894ccebe 2102 const gen8_pte_t pte_encode = gen8_pte_encode(0, level);
85d1225e 2103 dma_addr_t addr;
be69459a 2104
894ccebe
CW
2105 gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm;
2106 gtt_entries += start >> PAGE_SHIFT;
2107 for_each_sgt_dma(addr, sgt_iter, st)
2108 gen8_set_pte(gtt_entries++, pte_encode | addr);
85d1225e 2109
894ccebe 2110 wmb();
94ec8f61 2111
94ec8f61
BW
2112 /* This next bit makes the above posting read even more important. We
2113 * want to flush the TLBs only after we're certain all the PTE updates
2114 * have finished.
2115 */
7c3f86b6 2116 ggtt->invalidate(vm->i915);
94ec8f61
BW
2117}
2118
d6473f56
CW
2119static void gen6_ggtt_insert_page(struct i915_address_space *vm,
2120 dma_addr_t addr,
75c7b0b8 2121 u64 offset,
d6473f56
CW
2122 enum i915_cache_level level,
2123 u32 flags)
2124{
7c3f86b6 2125 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
d6473f56 2126 gen6_pte_t __iomem *pte =
7c3f86b6 2127 (gen6_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT);
d6473f56 2128
4fb84d99 2129 iowrite32(vm->pte_encode(addr, level, flags), pte);
d6473f56 2130
7c3f86b6 2131 ggtt->invalidate(vm->i915);
d6473f56
CW
2132}
2133
e76e9aeb
BW
2134/*
2135 * Binds an object into the global gtt with the specified cache level. The object
2136 * will be accessible to the GPU via commands whose operands reference offsets
2137 * within the global GTT as well as accessible by the GPU through the GMADR
2138 * mapped BAR (dev_priv->mm.gtt->gtt).
2139 */
853ba5d2 2140static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
7faf1ab2 2141 struct sg_table *st,
75c7b0b8
CW
2142 u64 start,
2143 enum i915_cache_level level,
2144 u32 flags)
e76e9aeb 2145{
ce7fda2e 2146 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
b31144c0
CW
2147 gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm;
2148 unsigned int i = start >> PAGE_SHIFT;
2149 struct sgt_iter iter;
85d1225e 2150 dma_addr_t addr;
b31144c0
CW
2151 for_each_sgt_dma(addr, iter, st)
2152 iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]);
2153 wmb();
0f9b91c7
BW
2154
2155 /* This next bit makes the above posting read even more important. We
2156 * want to flush the TLBs only after we're certain all the PTE updates
2157 * have finished.
2158 */
7c3f86b6 2159 ggtt->invalidate(vm->i915);
e76e9aeb
BW
2160}
2161
f7770bfd 2162static void nop_clear_range(struct i915_address_space *vm,
75c7b0b8 2163 u64 start, u64 length)
f7770bfd
CW
2164{
2165}
2166
94ec8f61 2167static void gen8_ggtt_clear_range(struct i915_address_space *vm,
75c7b0b8 2168 u64 start, u64 length)
94ec8f61 2169{
ce7fda2e 2170 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
782f1495
BW
2171 unsigned first_entry = start >> PAGE_SHIFT;
2172 unsigned num_entries = length >> PAGE_SHIFT;
894ccebe
CW
2173 const gen8_pte_t scratch_pte =
2174 gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC);
2175 gen8_pte_t __iomem *gtt_base =
72e96d64
JL
2176 (gen8_pte_t __iomem *)ggtt->gsm + first_entry;
2177 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
94ec8f61
BW
2178 int i;
2179
2180 if (WARN(num_entries > max_entries,
2181 "First entry = %d; Num entries = %d (max=%d)\n",
2182 first_entry, num_entries, max_entries))
2183 num_entries = max_entries;
2184
94ec8f61
BW
2185 for (i = 0; i < num_entries; i++)
2186 gen8_set_pte(&gtt_base[i], scratch_pte);
94ec8f61
BW
2187}
2188
853ba5d2 2189static void gen6_ggtt_clear_range(struct i915_address_space *vm,
75c7b0b8 2190 u64 start, u64 length)
7faf1ab2 2191{
ce7fda2e 2192 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
782f1495
BW
2193 unsigned first_entry = start >> PAGE_SHIFT;
2194 unsigned num_entries = length >> PAGE_SHIFT;
07749ef3 2195 gen6_pte_t scratch_pte, __iomem *gtt_base =
72e96d64
JL
2196 (gen6_pte_t __iomem *)ggtt->gsm + first_entry;
2197 const int max_entries = ggtt_total_entries(ggtt) - first_entry;
7faf1ab2
DV
2198 int i;
2199
2200 if (WARN(num_entries > max_entries,
2201 "First entry = %d; Num entries = %d (max=%d)\n",
2202 first_entry, num_entries, max_entries))
2203 num_entries = max_entries;
2204
8bcdd0f7 2205 scratch_pte = vm->pte_encode(vm->scratch_page.daddr,
4fb84d99 2206 I915_CACHE_LLC, 0);
828c7908 2207
7faf1ab2
DV
2208 for (i = 0; i < num_entries; i++)
2209 iowrite32(scratch_pte, &gtt_base[i]);
7faf1ab2
DV
2210}
2211
d6473f56
CW
2212static void i915_ggtt_insert_page(struct i915_address_space *vm,
2213 dma_addr_t addr,
75c7b0b8 2214 u64 offset,
d6473f56
CW
2215 enum i915_cache_level cache_level,
2216 u32 unused)
2217{
d6473f56
CW
2218 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2219 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
d6473f56
CW
2220
2221 intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags);
d6473f56
CW
2222}
2223
d369d2d9
DV
2224static void i915_ggtt_insert_entries(struct i915_address_space *vm,
2225 struct sg_table *pages,
75c7b0b8
CW
2226 u64 start,
2227 enum i915_cache_level cache_level,
2228 u32 unused)
7faf1ab2
DV
2229{
2230 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
2231 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
2232
d369d2d9 2233 intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags);
7faf1ab2
DV
2234}
2235
853ba5d2 2236static void i915_ggtt_clear_range(struct i915_address_space *vm,
75c7b0b8 2237 u64 start, u64 length)
7faf1ab2 2238{
2eedfc7d 2239 intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT);
7faf1ab2
DV
2240}
2241
70b9f6f8
DV
2242static int ggtt_bind_vma(struct i915_vma *vma,
2243 enum i915_cache_level cache_level,
2244 u32 flags)
0a878716 2245{
49d73912 2246 struct drm_i915_private *i915 = vma->vm->i915;
0a878716 2247 struct drm_i915_gem_object *obj = vma->obj;
ba7a5741 2248 u32 pte_flags;
0a878716 2249
ba7a5741
CW
2250 if (unlikely(!vma->pages)) {
2251 int ret = i915_get_ggtt_vma_pages(vma);
2252 if (ret)
2253 return ret;
2254 }
0a878716
DV
2255
2256 /* Currently applicable only to VLV */
ba7a5741 2257 pte_flags = 0;
0a878716
DV
2258 if (obj->gt_ro)
2259 pte_flags |= PTE_READ_ONLY;
2260
9c870d03 2261 intel_runtime_pm_get(i915);
247177dd 2262 vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start,
0a878716 2263 cache_level, pte_flags);
9c870d03 2264 intel_runtime_pm_put(i915);
0a878716
DV
2265
2266 /*
2267 * Without aliasing PPGTT there's no difference between
2268 * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
2269 * upgrade to both bound if we bind either to avoid double-binding.
2270 */
3272db53 2271 vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
0a878716
DV
2272
2273 return 0;
2274}
2275
cbc4e9e6
CW
2276static void ggtt_unbind_vma(struct i915_vma *vma)
2277{
2278 struct drm_i915_private *i915 = vma->vm->i915;
2279
2280 intel_runtime_pm_get(i915);
2281 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
2282 intel_runtime_pm_put(i915);
2283}
2284
0a878716
DV
2285static int aliasing_gtt_bind_vma(struct i915_vma *vma,
2286 enum i915_cache_level cache_level,
2287 u32 flags)
d5bd1449 2288{
49d73912 2289 struct drm_i915_private *i915 = vma->vm->i915;
321d178e 2290 u32 pte_flags;
ff685975 2291 int ret;
70b9f6f8 2292
ba7a5741 2293 if (unlikely(!vma->pages)) {
ff685975 2294 ret = i915_get_ggtt_vma_pages(vma);
ba7a5741
CW
2295 if (ret)
2296 return ret;
2297 }
7faf1ab2 2298
24f3a8cf 2299 /* Currently applicable only to VLV */
321d178e
CW
2300 pte_flags = 0;
2301 if (vma->obj->gt_ro)
f329f5f6 2302 pte_flags |= PTE_READ_ONLY;
24f3a8cf 2303
ff685975
CW
2304 if (flags & I915_VMA_LOCAL_BIND) {
2305 struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt;
2306
2307 if (appgtt->base.allocate_va_range) {
2308 ret = appgtt->base.allocate_va_range(&appgtt->base,
2309 vma->node.start,
2310 vma->node.size);
2311 if (ret)
2f7399af 2312 goto err_pages;
ff685975
CW
2313 }
2314
2315 appgtt->base.insert_entries(&appgtt->base,
2316 vma->pages, vma->node.start,
2317 cache_level, pte_flags);
2318 }
2319
3272db53 2320 if (flags & I915_VMA_GLOBAL_BIND) {
9c870d03 2321 intel_runtime_pm_get(i915);
321d178e 2322 vma->vm->insert_entries(vma->vm,
247177dd 2323 vma->pages, vma->node.start,
0875546c 2324 cache_level, pte_flags);
9c870d03 2325 intel_runtime_pm_put(i915);
6f65e29a 2326 }
d5bd1449 2327
70b9f6f8 2328 return 0;
2f7399af
CW
2329
2330err_pages:
2331 if (!(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND))) {
2332 if (vma->pages != vma->obj->mm.pages) {
2333 GEM_BUG_ON(!vma->pages);
2334 sg_free_table(vma->pages);
2335 kfree(vma->pages);
2336 }
2337 vma->pages = NULL;
2338 }
2339 return ret;
d5bd1449
CW
2340}
2341
cbc4e9e6 2342static void aliasing_gtt_unbind_vma(struct i915_vma *vma)
74163907 2343{
49d73912 2344 struct drm_i915_private *i915 = vma->vm->i915;
6f65e29a 2345
9c870d03
CW
2346 if (vma->flags & I915_VMA_GLOBAL_BIND) {
2347 intel_runtime_pm_get(i915);
cbc4e9e6 2348 vma->vm->clear_range(vma->vm, vma->node.start, vma->size);
9c870d03
CW
2349 intel_runtime_pm_put(i915);
2350 }
06615ee5 2351
cbc4e9e6
CW
2352 if (vma->flags & I915_VMA_LOCAL_BIND) {
2353 struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->base;
2354
2355 vm->clear_range(vm, vma->node.start, vma->size);
2356 }
74163907
DV
2357}
2358
03ac84f1
CW
2359void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
2360 struct sg_table *pages)
7c2e6fdf 2361{
52a05c30
DW
2362 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2363 struct device *kdev = &dev_priv->drm.pdev->dev;
307dc25b 2364 struct i915_ggtt *ggtt = &dev_priv->ggtt;
5c042287 2365
307dc25b 2366 if (unlikely(ggtt->do_idle_maps)) {
228ec87c 2367 if (i915_gem_wait_for_idle(dev_priv, 0)) {
307dc25b
CW
2368 DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
2369 /* Wait a bit, in hopes it avoids the hang */
2370 udelay(10);
2371 }
2372 }
5c042287 2373
03ac84f1 2374 dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL);
7c2e6fdf 2375}
644ec02b 2376
45b186f1 2377static void i915_gtt_color_adjust(const struct drm_mm_node *node,
42d6ab48 2378 unsigned long color,
440fd528
TR
2379 u64 *start,
2380 u64 *end)
42d6ab48 2381{
a6508ded 2382 if (node->allocated && node->color != color)
f51455d4 2383 *start += I915_GTT_PAGE_SIZE;
42d6ab48 2384
a6508ded
CW
2385 /* Also leave a space between the unallocated reserved node after the
2386 * GTT and any objects within the GTT, i.e. we use the color adjustment
2387 * to insert a guard page to prevent prefetches crossing over the
2388 * GTT boundary.
2389 */
b44f97fd 2390 node = list_next_entry(node, node_list);
a6508ded 2391 if (node->color != color)
f51455d4 2392 *end -= I915_GTT_PAGE_SIZE;
42d6ab48 2393}
fbe5d36e 2394
6cde9a02
CW
2395int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915)
2396{
2397 struct i915_ggtt *ggtt = &i915->ggtt;
2398 struct i915_hw_ppgtt *ppgtt;
2399 int err;
2400
57202f47 2401 ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM), "[alias]");
1188bc66
CW
2402 if (IS_ERR(ppgtt))
2403 return PTR_ERR(ppgtt);
6cde9a02 2404
e565ceb0
CW
2405 if (WARN_ON(ppgtt->base.total < ggtt->base.total)) {
2406 err = -ENODEV;
2407 goto err_ppgtt;
2408 }
2409
6cde9a02 2410 if (ppgtt->base.allocate_va_range) {
e565ceb0
CW
2411 /* Note we only pre-allocate as far as the end of the global
2412 * GTT. On 48b / 4-level page-tables, the difference is very,
2413 * very significant! We have to preallocate as GVT/vgpu does
2414 * not like the page directory disappearing.
2415 */
6cde9a02 2416 err = ppgtt->base.allocate_va_range(&ppgtt->base,
e565ceb0 2417 0, ggtt->base.total);
6cde9a02 2418 if (err)
1188bc66 2419 goto err_ppgtt;
6cde9a02
CW
2420 }
2421
6cde9a02 2422 i915->mm.aliasing_ppgtt = ppgtt;
cbc4e9e6 2423
6cde9a02
CW
2424 WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma);
2425 ggtt->base.bind_vma = aliasing_gtt_bind_vma;
2426
cbc4e9e6
CW
2427 WARN_ON(ggtt->base.unbind_vma != ggtt_unbind_vma);
2428 ggtt->base.unbind_vma = aliasing_gtt_unbind_vma;
2429
6cde9a02
CW
2430 return 0;
2431
6cde9a02 2432err_ppgtt:
1188bc66 2433 i915_ppgtt_put(ppgtt);
6cde9a02
CW
2434 return err;
2435}
2436
2437void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915)
2438{
2439 struct i915_ggtt *ggtt = &i915->ggtt;
2440 struct i915_hw_ppgtt *ppgtt;
2441
2442 ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt);
2443 if (!ppgtt)
2444 return;
2445
1188bc66 2446 i915_ppgtt_put(ppgtt);
6cde9a02
CW
2447
2448 ggtt->base.bind_vma = ggtt_bind_vma;
cbc4e9e6 2449 ggtt->base.unbind_vma = ggtt_unbind_vma;
6cde9a02
CW
2450}
2451
f6b9d5ca 2452int i915_gem_init_ggtt(struct drm_i915_private *dev_priv)
644ec02b 2453{
e78891ca
BW
2454 /* Let GEM Manage all of the aperture.
2455 *
2456 * However, leave one page at the end still bound to the scratch page.
2457 * There are a number of places where the hardware apparently prefetches
2458 * past the end of the object, and we've seen multiple hangs with the
2459 * GPU head pointer stuck in a batchbuffer bound at the last page of the
2460 * aperture. One page should be enough to keep any prefetching inside
2461 * of the aperture.
2462 */
72e96d64 2463 struct i915_ggtt *ggtt = &dev_priv->ggtt;
ed2f3452 2464 unsigned long hole_start, hole_end;
f6b9d5ca 2465 struct drm_mm_node *entry;
fa76da34 2466 int ret;
644ec02b 2467
b02d22a3
ZW
2468 ret = intel_vgt_balloon(dev_priv);
2469 if (ret)
2470 return ret;
5dda8fa3 2471
95374d75 2472 /* Reserve a mappable slot for our lockless error capture */
4e64e553
CW
2473 ret = drm_mm_insert_node_in_range(&ggtt->base.mm, &ggtt->error_capture,
2474 PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE,
2475 0, ggtt->mappable_end,
2476 DRM_MM_INSERT_LOW);
95374d75
CW
2477 if (ret)
2478 return ret;
2479
ed2f3452 2480 /* Clear any non-preallocated blocks */
72e96d64 2481 drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) {
ed2f3452
CW
2482 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
2483 hole_start, hole_end);
72e96d64 2484 ggtt->base.clear_range(&ggtt->base, hole_start,
4fb84d99 2485 hole_end - hole_start);
ed2f3452
CW
2486 }
2487
2488 /* And finally clear the reserved guard page */
f6b9d5ca 2489 ggtt->base.clear_range(&ggtt->base,
4fb84d99 2490 ggtt->base.total - PAGE_SIZE, PAGE_SIZE);
6c5566a8 2491
97d6d7ab 2492 if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) {
6cde9a02 2493 ret = i915_gem_init_aliasing_ppgtt(dev_priv);
95374d75 2494 if (ret)
6cde9a02 2495 goto err;
fa76da34
DV
2496 }
2497
6c5566a8 2498 return 0;
95374d75 2499
95374d75
CW
2500err:
2501 drm_mm_remove_node(&ggtt->error_capture);
2502 return ret;
e76e9aeb
BW
2503}
2504
d85489d3
JL
2505/**
2506 * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization
97d6d7ab 2507 * @dev_priv: i915 device
d85489d3 2508 */
97d6d7ab 2509void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
90d0a0e8 2510{
72e96d64 2511 struct i915_ggtt *ggtt = &dev_priv->ggtt;
94d4a2a9
CW
2512 struct i915_vma *vma, *vn;
2513
2514 ggtt->base.closed = true;
2515
2516 mutex_lock(&dev_priv->drm.struct_mutex);
2517 WARN_ON(!list_empty(&ggtt->base.active_list));
2518 list_for_each_entry_safe(vma, vn, &ggtt->base.inactive_list, vm_link)
2519 WARN_ON(i915_vma_unbind(vma));
2520 mutex_unlock(&dev_priv->drm.struct_mutex);
90d0a0e8 2521
97d6d7ab 2522 i915_gem_cleanup_stolen(&dev_priv->drm);
a4eba47b 2523
1188bc66
CW
2524 mutex_lock(&dev_priv->drm.struct_mutex);
2525 i915_gem_fini_aliasing_ppgtt(dev_priv);
2526
95374d75
CW
2527 if (drm_mm_node_allocated(&ggtt->error_capture))
2528 drm_mm_remove_node(&ggtt->error_capture);
2529
72e96d64 2530 if (drm_mm_initialized(&ggtt->base.mm)) {
b02d22a3 2531 intel_vgt_deballoon(dev_priv);
ed9724dd 2532 i915_address_space_fini(&ggtt->base);
90d0a0e8
DV
2533 }
2534
72e96d64 2535 ggtt->base.cleanup(&ggtt->base);
1188bc66 2536 mutex_unlock(&dev_priv->drm.struct_mutex);
f6b9d5ca
CW
2537
2538 arch_phys_wc_del(ggtt->mtrr);
f7bbe788 2539 io_mapping_fini(&ggtt->mappable);
90d0a0e8 2540}
70e32544 2541
2c642b07 2542static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
e76e9aeb
BW
2543{
2544 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
2545 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
2546 return snb_gmch_ctl << 20;
2547}
2548
2c642b07 2549static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
9459d252
BW
2550{
2551 bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT;
2552 bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
2553 if (bdw_gmch_ctl)
2554 bdw_gmch_ctl = 1 << bdw_gmch_ctl;
562d55d9
BW
2555
2556#ifdef CONFIG_X86_32
2557 /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */
2558 if (bdw_gmch_ctl > 4)
2559 bdw_gmch_ctl = 4;
2560#endif
2561
9459d252
BW
2562 return bdw_gmch_ctl << 20;
2563}
2564
2c642b07 2565static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl)
d7f25f23
DL
2566{
2567 gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT;
2568 gmch_ctrl &= SNB_GMCH_GGMS_MASK;
2569
2570 if (gmch_ctrl)
2571 return 1 << (20 + gmch_ctrl);
2572
2573 return 0;
2574}
2575
2c642b07 2576static size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
e76e9aeb
BW
2577{
2578 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
2579 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
2580 return snb_gmch_ctl << 25; /* 32 MB units */
2581}
2582
2c642b07 2583static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl)
9459d252
BW
2584{
2585 bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2586 bdw_gmch_ctl &= BDW_GMCH_GMS_MASK;
2587 return bdw_gmch_ctl << 25; /* 32 MB units */
2588}
2589
d7f25f23
DL
2590static size_t chv_get_stolen_size(u16 gmch_ctrl)
2591{
2592 gmch_ctrl >>= SNB_GMCH_GMS_SHIFT;
2593 gmch_ctrl &= SNB_GMCH_GMS_MASK;
2594
2595 /*
2596 * 0x0 to 0x10: 32MB increments starting at 0MB
2597 * 0x11 to 0x16: 4MB increments starting at 8MB
2598 * 0x17 to 0x1d: 4MB increments start at 36MB
2599 */
2600 if (gmch_ctrl < 0x11)
2601 return gmch_ctrl << 25;
2602 else if (gmch_ctrl < 0x17)
2603 return (gmch_ctrl - 0x11 + 2) << 22;
2604 else
2605 return (gmch_ctrl - 0x17 + 9) << 22;
2606}
2607
66375014
DL
2608static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl)
2609{
2610 gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT;
2611 gen9_gmch_ctl &= BDW_GMCH_GMS_MASK;
2612
2613 if (gen9_gmch_ctl < 0xf0)
2614 return gen9_gmch_ctl << 25; /* 32 MB units */
2615 else
2616 /* 4MB increments starting at 0xf0 for 4MB */
2617 return (gen9_gmch_ctl - 0xf0 + 1) << 22;
2618}
2619
34c998b4 2620static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size)
63340133 2621{
49d73912
CW
2622 struct drm_i915_private *dev_priv = ggtt->base.i915;
2623 struct pci_dev *pdev = dev_priv->drm.pdev;
34c998b4 2624 phys_addr_t phys_addr;
8bcdd0f7 2625 int ret;
63340133
BW
2626
2627 /* For Modern GENs the PTEs and register space are split in the BAR */
34c998b4 2628 phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2;
63340133 2629
2a073f89
ID
2630 /*
2631 * On BXT writes larger than 64 bit to the GTT pagetable range will be
2632 * dropped. For WC mappings in general we have 64 byte burst writes
2633 * when the WC buffer is flushed, so we can't use it, but have to
2634 * resort to an uncached mapping. The WC issue is easily caught by the
2635 * readback check when writing GTT PTE entries.
2636 */
cc3f90f0 2637 if (IS_GEN9_LP(dev_priv))
34c998b4 2638 ggtt->gsm = ioremap_nocache(phys_addr, size);
2a073f89 2639 else
34c998b4 2640 ggtt->gsm = ioremap_wc(phys_addr, size);
72e96d64 2641 if (!ggtt->gsm) {
34c998b4 2642 DRM_ERROR("Failed to map the ggtt page table\n");
63340133
BW
2643 return -ENOMEM;
2644 }
2645
8448661d 2646 ret = setup_scratch_page(&ggtt->base, GFP_DMA32);
8bcdd0f7 2647 if (ret) {
63340133
BW
2648 DRM_ERROR("Scratch setup failed\n");
2649 /* iounmap will also get called at remove, but meh */
72e96d64 2650 iounmap(ggtt->gsm);
8bcdd0f7 2651 return ret;
63340133
BW
2652 }
2653
4ad2af1e 2654 return 0;
63340133
BW
2655}
2656
fbe5d36e
BW
2657/* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability
2658 * bits. When using advanced contexts each context stores its own PAT, but
2659 * writing this data shouldn't be harmful even in those cases. */
ee0ce478 2660static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
fbe5d36e 2661{
75c7b0b8 2662 u64 pat;
fbe5d36e
BW
2663
2664 pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */
2665 GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */
2666 GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */
2667 GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */
2668 GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) |
2669 GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) |
2670 GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
2671 GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
2672
2d1fe073 2673 if (!USES_PPGTT(dev_priv))
d6a8b72e
RV
2674 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
2675 * so RTL will always use the value corresponding to
2676 * pat_sel = 000".
2677 * So let's disable cache for GGTT to avoid screen corruptions.
2678 * MOCS still can be used though.
2679 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
2680 * before this patch, i.e. the same uncached + snooping access
2681 * like on gen6/7 seems to be in effect.
2682 * - So this just fixes blitter/render access. Again it looks
2683 * like it's not just uncached access, but uncached + snooping.
2684 * So we can still hold onto all our assumptions wrt cpu
2685 * clflushing on LLC machines.
2686 */
2687 pat = GEN8_PPAT(0, GEN8_PPAT_UC);
2688
fbe5d36e
BW
2689 /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
2690 * write would work. */
7e435ad2
VS
2691 I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
2692 I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
fbe5d36e
BW
2693}
2694
ee0ce478
VS
2695static void chv_setup_private_ppat(struct drm_i915_private *dev_priv)
2696{
75c7b0b8 2697 u64 pat;
ee0ce478
VS
2698
2699 /*
2700 * Map WB on BDW to snooped on CHV.
2701 *
2702 * Only the snoop bit has meaning for CHV, the rest is
2703 * ignored.
2704 *
cf3d262e
VS
2705 * The hardware will never snoop for certain types of accesses:
2706 * - CPU GTT (GMADR->GGTT->no snoop->memory)
2707 * - PPGTT page tables
2708 * - some other special cycles
2709 *
2710 * As with BDW, we also need to consider the following for GT accesses:
2711 * "For GGTT, there is NO pat_sel[2:0] from the entry,
2712 * so RTL will always use the value corresponding to
2713 * pat_sel = 000".
2714 * Which means we must set the snoop bit in PAT entry 0
2715 * in order to keep the global status page working.
ee0ce478
VS
2716 */
2717 pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) |
2718 GEN8_PPAT(1, 0) |
2719 GEN8_PPAT(2, 0) |
2720 GEN8_PPAT(3, 0) |
2721 GEN8_PPAT(4, CHV_PPAT_SNOOP) |
2722 GEN8_PPAT(5, CHV_PPAT_SNOOP) |
2723 GEN8_PPAT(6, CHV_PPAT_SNOOP) |
2724 GEN8_PPAT(7, CHV_PPAT_SNOOP);
2725
7e435ad2
VS
2726 I915_WRITE(GEN8_PRIVATE_PAT_LO, pat);
2727 I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32);
ee0ce478
VS
2728}
2729
34c998b4
CW
2730static void gen6_gmch_remove(struct i915_address_space *vm)
2731{
2732 struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
2733
2734 iounmap(ggtt->gsm);
8448661d 2735 cleanup_scratch_page(vm);
34c998b4
CW
2736}
2737
d507d735 2738static int gen8_gmch_probe(struct i915_ggtt *ggtt)
63340133 2739{
49d73912 2740 struct drm_i915_private *dev_priv = ggtt->base.i915;
97d6d7ab 2741 struct pci_dev *pdev = dev_priv->drm.pdev;
34c998b4 2742 unsigned int size;
63340133 2743 u16 snb_gmch_ctl;
63340133
BW
2744
2745 /* TODO: We're not aware of mappable constraints on gen8 yet */
97d6d7ab
CW
2746 ggtt->mappable_base = pci_resource_start(pdev, 2);
2747 ggtt->mappable_end = pci_resource_len(pdev, 2);
63340133 2748
97d6d7ab
CW
2749 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(39)))
2750 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39));
63340133 2751
97d6d7ab 2752 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
63340133 2753
97d6d7ab 2754 if (INTEL_GEN(dev_priv) >= 9) {
d507d735 2755 ggtt->stolen_size = gen9_get_stolen_size(snb_gmch_ctl);
34c998b4 2756 size = gen8_get_total_gtt_size(snb_gmch_ctl);
97d6d7ab 2757 } else if (IS_CHERRYVIEW(dev_priv)) {
d507d735 2758 ggtt->stolen_size = chv_get_stolen_size(snb_gmch_ctl);
34c998b4 2759 size = chv_get_total_gtt_size(snb_gmch_ctl);
d7f25f23 2760 } else {
d507d735 2761 ggtt->stolen_size = gen8_get_stolen_size(snb_gmch_ctl);
34c998b4 2762 size = gen8_get_total_gtt_size(snb_gmch_ctl);
d7f25f23 2763 }
63340133 2764
34c998b4 2765 ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT;
63340133 2766
cc3f90f0 2767 if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
ee0ce478
VS
2768 chv_setup_private_ppat(dev_priv);
2769 else
2770 bdw_setup_private_ppat(dev_priv);
fbe5d36e 2771
34c998b4 2772 ggtt->base.cleanup = gen6_gmch_remove;
d507d735
JL
2773 ggtt->base.bind_vma = ggtt_bind_vma;
2774 ggtt->base.unbind_vma = ggtt_unbind_vma;
d6473f56 2775 ggtt->base.insert_page = gen8_ggtt_insert_page;
f7770bfd 2776 ggtt->base.clear_range = nop_clear_range;
48f112fe 2777 if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv))
f7770bfd
CW
2778 ggtt->base.clear_range = gen8_ggtt_clear_range;
2779
2780 ggtt->base.insert_entries = gen8_ggtt_insert_entries;
f7770bfd 2781
7c3f86b6
CW
2782 ggtt->invalidate = gen6_ggtt_invalidate;
2783
34c998b4 2784 return ggtt_probe_common(ggtt, size);
63340133
BW
2785}
2786
d507d735 2787static int gen6_gmch_probe(struct i915_ggtt *ggtt)
e76e9aeb 2788{
49d73912 2789 struct drm_i915_private *dev_priv = ggtt->base.i915;
97d6d7ab 2790 struct pci_dev *pdev = dev_priv->drm.pdev;
34c998b4 2791 unsigned int size;
e76e9aeb 2792 u16 snb_gmch_ctl;
e76e9aeb 2793
97d6d7ab
CW
2794 ggtt->mappable_base = pci_resource_start(pdev, 2);
2795 ggtt->mappable_end = pci_resource_len(pdev, 2);
41907ddc 2796
baa09f5f
BW
2797 /* 64/512MB is the current min/max we actually know of, but this is just
2798 * a coarse sanity check.
e76e9aeb 2799 */
34c998b4 2800 if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) {
d507d735 2801 DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt->mappable_end);
baa09f5f 2802 return -ENXIO;
e76e9aeb
BW
2803 }
2804
97d6d7ab
CW
2805 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(40)))
2806 pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
2807 pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
e76e9aeb 2808
d507d735 2809 ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
e76e9aeb 2810
34c998b4
CW
2811 size = gen6_get_total_gtt_size(snb_gmch_ctl);
2812 ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT;
e76e9aeb 2813
d507d735 2814 ggtt->base.clear_range = gen6_ggtt_clear_range;
d6473f56 2815 ggtt->base.insert_page = gen6_ggtt_insert_page;
d507d735
JL
2816 ggtt->base.insert_entries = gen6_ggtt_insert_entries;
2817 ggtt->base.bind_vma = ggtt_bind_vma;
2818 ggtt->base.unbind_vma = ggtt_unbind_vma;
34c998b4
CW
2819 ggtt->base.cleanup = gen6_gmch_remove;
2820
7c3f86b6
CW
2821 ggtt->invalidate = gen6_ggtt_invalidate;
2822
34c998b4
CW
2823 if (HAS_EDRAM(dev_priv))
2824 ggtt->base.pte_encode = iris_pte_encode;
2825 else if (IS_HASWELL(dev_priv))
2826 ggtt->base.pte_encode = hsw_pte_encode;
2827 else if (IS_VALLEYVIEW(dev_priv))
2828 ggtt->base.pte_encode = byt_pte_encode;
2829 else if (INTEL_GEN(dev_priv) >= 7)
2830 ggtt->base.pte_encode = ivb_pte_encode;
2831 else
2832 ggtt->base.pte_encode = snb_pte_encode;
7faf1ab2 2833
34c998b4 2834 return ggtt_probe_common(ggtt, size);
e76e9aeb
BW
2835}
2836
34c998b4 2837static void i915_gmch_remove(struct i915_address_space *vm)
e76e9aeb 2838{
34c998b4 2839 intel_gmch_remove();
644ec02b 2840}
baa09f5f 2841
d507d735 2842static int i915_gmch_probe(struct i915_ggtt *ggtt)
baa09f5f 2843{
49d73912 2844 struct drm_i915_private *dev_priv = ggtt->base.i915;
baa09f5f
BW
2845 int ret;
2846
91c8a326 2847 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL);
baa09f5f
BW
2848 if (!ret) {
2849 DRM_ERROR("failed to set up gmch\n");
2850 return -EIO;
2851 }
2852
edd1f2fe
CW
2853 intel_gtt_get(&ggtt->base.total,
2854 &ggtt->stolen_size,
2855 &ggtt->mappable_base,
2856 &ggtt->mappable_end);
baa09f5f 2857
97d6d7ab 2858 ggtt->do_idle_maps = needs_idle_maps(dev_priv);
d6473f56 2859 ggtt->base.insert_page = i915_ggtt_insert_page;
d507d735
JL
2860 ggtt->base.insert_entries = i915_ggtt_insert_entries;
2861 ggtt->base.clear_range = i915_ggtt_clear_range;
2862 ggtt->base.bind_vma = ggtt_bind_vma;
2863 ggtt->base.unbind_vma = ggtt_unbind_vma;
34c998b4 2864 ggtt->base.cleanup = i915_gmch_remove;
baa09f5f 2865
7c3f86b6
CW
2866 ggtt->invalidate = gmch_ggtt_invalidate;
2867
d507d735 2868 if (unlikely(ggtt->do_idle_maps))
c0a7f818
CW
2869 DRM_INFO("applying Ironlake quirks for intel_iommu\n");
2870
baa09f5f
BW
2871 return 0;
2872}
2873
d85489d3 2874/**
0088e522 2875 * i915_ggtt_probe_hw - Probe GGTT hardware location
97d6d7ab 2876 * @dev_priv: i915 device
d85489d3 2877 */
97d6d7ab 2878int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv)
baa09f5f 2879{
62106b4f 2880 struct i915_ggtt *ggtt = &dev_priv->ggtt;
baa09f5f
BW
2881 int ret;
2882
49d73912 2883 ggtt->base.i915 = dev_priv;
8448661d 2884 ggtt->base.dma = &dev_priv->drm.pdev->dev;
c114f76a 2885
34c998b4
CW
2886 if (INTEL_GEN(dev_priv) <= 5)
2887 ret = i915_gmch_probe(ggtt);
2888 else if (INTEL_GEN(dev_priv) < 8)
2889 ret = gen6_gmch_probe(ggtt);
2890 else
2891 ret = gen8_gmch_probe(ggtt);
a54c0c27 2892 if (ret)
baa09f5f 2893 return ret;
baa09f5f 2894
db9309a5
CW
2895 /* Trim the GGTT to fit the GuC mappable upper range (when enabled).
2896 * This is easier than doing range restriction on the fly, as we
2897 * currently don't have any bits spare to pass in this upper
2898 * restriction!
2899 */
2900 if (HAS_GUC(dev_priv) && i915.enable_guc_loading) {
2901 ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP);
2902 ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
2903 }
2904
c890e2d5
CW
2905 if ((ggtt->base.total - 1) >> 32) {
2906 DRM_ERROR("We never expected a Global GTT with more than 32bits"
f6b9d5ca 2907 " of address space! Found %lldM!\n",
c890e2d5
CW
2908 ggtt->base.total >> 20);
2909 ggtt->base.total = 1ULL << 32;
2910 ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total);
2911 }
2912
f6b9d5ca
CW
2913 if (ggtt->mappable_end > ggtt->base.total) {
2914 DRM_ERROR("mappable aperture extends past end of GGTT,"
2915 " aperture=%llx, total=%llx\n",
2916 ggtt->mappable_end, ggtt->base.total);
2917 ggtt->mappable_end = ggtt->base.total;
2918 }
2919
baa09f5f 2920 /* GMADR is the PCI mmio aperture into the global GTT. */
c44ef60e 2921 DRM_INFO("Memory usable by graphics device = %lluM\n",
62106b4f
JL
2922 ggtt->base.total >> 20);
2923 DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
edd1f2fe 2924 DRM_DEBUG_DRIVER("GTT stolen size = %uM\n", ggtt->stolen_size >> 20);
5db6c735
DV
2925#ifdef CONFIG_INTEL_IOMMU
2926 if (intel_iommu_gfx_mapped)
2927 DRM_INFO("VT-d active for gfx access\n");
2928#endif
baa09f5f
BW
2929
2930 return 0;
0088e522
CW
2931}
2932
2933/**
2934 * i915_ggtt_init_hw - Initialize GGTT hardware
97d6d7ab 2935 * @dev_priv: i915 device
0088e522 2936 */
97d6d7ab 2937int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
0088e522 2938{
0088e522
CW
2939 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2940 int ret;
2941
f6b9d5ca
CW
2942 INIT_LIST_HEAD(&dev_priv->vm_list);
2943
a6508ded
CW
2944 /* Note that we use page colouring to enforce a guard page at the
2945 * end of the address space. This is required as the CS may prefetch
2946 * beyond the end of the batch buffer, across the page boundary,
2947 * and beyond the end of the GTT if we do not provide a guard.
f6b9d5ca 2948 */
80b204bc 2949 mutex_lock(&dev_priv->drm.struct_mutex);
80b204bc 2950 i915_address_space_init(&ggtt->base, dev_priv, "[global]");
a6508ded 2951 if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv))
f6b9d5ca 2952 ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
80b204bc 2953 mutex_unlock(&dev_priv->drm.struct_mutex);
f6b9d5ca 2954
f7bbe788
CW
2955 if (!io_mapping_init_wc(&dev_priv->ggtt.mappable,
2956 dev_priv->ggtt.mappable_base,
2957 dev_priv->ggtt.mappable_end)) {
f6b9d5ca
CW
2958 ret = -EIO;
2959 goto out_gtt_cleanup;
2960 }
2961
2962 ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, ggtt->mappable_end);
2963
0088e522
CW
2964 /*
2965 * Initialise stolen early so that we may reserve preallocated
2966 * objects for the BIOS to KMS transition.
2967 */
7ace3d30 2968 ret = i915_gem_init_stolen(dev_priv);
0088e522
CW
2969 if (ret)
2970 goto out_gtt_cleanup;
2971
2972 return 0;
a4eba47b
ID
2973
2974out_gtt_cleanup:
72e96d64 2975 ggtt->base.cleanup(&ggtt->base);
a4eba47b 2976 return ret;
baa09f5f 2977}
6f65e29a 2978
97d6d7ab 2979int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv)
ac840ae5 2980{
97d6d7ab 2981 if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt())
ac840ae5
VS
2982 return -EIO;
2983
2984 return 0;
2985}
2986
7c3f86b6
CW
2987void i915_ggtt_enable_guc(struct drm_i915_private *i915)
2988{
2989 i915->ggtt.invalidate = guc_ggtt_invalidate;
2990}
2991
2992void i915_ggtt_disable_guc(struct drm_i915_private *i915)
2993{
2994 i915->ggtt.invalidate = gen6_ggtt_invalidate;
2995}
2996
275a991c 2997void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv)
fa42331b 2998{
72e96d64 2999 struct i915_ggtt *ggtt = &dev_priv->ggtt;
fbb30a5c 3000 struct drm_i915_gem_object *obj, *on;
fa42331b 3001
dc97997a 3002 i915_check_and_clear_faults(dev_priv);
fa42331b
DV
3003
3004 /* First fill our portion of the GTT with scratch pages */
381b943b 3005 ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total);
fa42331b 3006
fbb30a5c
CW
3007 ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */
3008
3009 /* clflush objects bound into the GGTT and rebind them. */
3010 list_for_each_entry_safe(obj, on,
56cea323 3011 &dev_priv->mm.bound_list, global_link) {
fbb30a5c
CW
3012 bool ggtt_bound = false;
3013 struct i915_vma *vma;
3014
1c7f4bca 3015 list_for_each_entry(vma, &obj->vma_list, obj_link) {
72e96d64 3016 if (vma->vm != &ggtt->base)
2c3d9984 3017 continue;
fa42331b 3018
fbb30a5c
CW
3019 if (!i915_vma_unbind(vma))
3020 continue;
3021
2c3d9984
TU
3022 WARN_ON(i915_vma_bind(vma, obj->cache_level,
3023 PIN_UPDATE));
fbb30a5c 3024 ggtt_bound = true;
2c3d9984
TU
3025 }
3026
fbb30a5c 3027 if (ggtt_bound)
975f7ff4 3028 WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
2c3d9984 3029 }
fa42331b 3030
fbb30a5c
CW
3031 ggtt->base.closed = false;
3032
275a991c 3033 if (INTEL_GEN(dev_priv) >= 8) {
cc3f90f0 3034 if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv))
fa42331b
DV
3035 chv_setup_private_ppat(dev_priv);
3036 else
3037 bdw_setup_private_ppat(dev_priv);
3038
3039 return;
3040 }
3041
275a991c 3042 if (USES_PPGTT(dev_priv)) {
72e96d64
JL
3043 struct i915_address_space *vm;
3044
fa42331b 3045 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
e5716f55 3046 struct i915_hw_ppgtt *ppgtt;
fa42331b 3047
2bfa996e 3048 if (i915_is_ggtt(vm))
fa42331b 3049 ppgtt = dev_priv->mm.aliasing_ppgtt;
e5716f55
JL
3050 else
3051 ppgtt = i915_vm_to_ppgtt(vm);
fa42331b 3052
16a011c8 3053 gen6_write_page_range(ppgtt, 0, ppgtt->base.total);
fa42331b
DV
3054 }
3055 }
3056
7c3f86b6 3057 i915_ggtt_invalidate(dev_priv);
fa42331b
DV
3058}
3059
804beb4b 3060static struct scatterlist *
2d7f3bdb 3061rotate_pages(const dma_addr_t *in, unsigned int offset,
804beb4b 3062 unsigned int width, unsigned int height,
87130255 3063 unsigned int stride,
804beb4b 3064 struct sg_table *st, struct scatterlist *sg)
50470bb0
TU
3065{
3066 unsigned int column, row;
3067 unsigned int src_idx;
50470bb0 3068
50470bb0 3069 for (column = 0; column < width; column++) {
87130255 3070 src_idx = stride * (height - 1) + column;
50470bb0
TU
3071 for (row = 0; row < height; row++) {
3072 st->nents++;
3073 /* We don't need the pages, but need to initialize
3074 * the entries so the sg list can be happily traversed.
3075 * The only thing we need are DMA addresses.
3076 */
3077 sg_set_page(sg, NULL, PAGE_SIZE, 0);
804beb4b 3078 sg_dma_address(sg) = in[offset + src_idx];
50470bb0
TU
3079 sg_dma_len(sg) = PAGE_SIZE;
3080 sg = sg_next(sg);
87130255 3081 src_idx -= stride;
50470bb0
TU
3082 }
3083 }
804beb4b
TU
3084
3085 return sg;
50470bb0
TU
3086}
3087
ba7a5741
CW
3088static noinline struct sg_table *
3089intel_rotate_pages(struct intel_rotation_info *rot_info,
3090 struct drm_i915_gem_object *obj)
50470bb0 3091{
75c7b0b8 3092 const unsigned long n_pages = obj->base.size / PAGE_SIZE;
6687c906 3093 unsigned int size = intel_rotation_info_size(rot_info);
85d1225e
DG
3094 struct sgt_iter sgt_iter;
3095 dma_addr_t dma_addr;
50470bb0
TU
3096 unsigned long i;
3097 dma_addr_t *page_addr_list;
3098 struct sg_table *st;
89e3e142 3099 struct scatterlist *sg;
1d00dad5 3100 int ret = -ENOMEM;
50470bb0 3101
50470bb0 3102 /* Allocate a temporary list of source pages for random access. */
85d1225e 3103 page_addr_list = drm_malloc_gfp(n_pages,
f2a85e19
CW
3104 sizeof(dma_addr_t),
3105 GFP_TEMPORARY);
50470bb0
TU
3106 if (!page_addr_list)
3107 return ERR_PTR(ret);
3108
3109 /* Allocate target SG list. */
3110 st = kmalloc(sizeof(*st), GFP_KERNEL);
3111 if (!st)
3112 goto err_st_alloc;
3113
6687c906 3114 ret = sg_alloc_table(st, size, GFP_KERNEL);
50470bb0
TU
3115 if (ret)
3116 goto err_sg_alloc;
3117
3118 /* Populate source page list from the object. */
3119 i = 0;
a4f5ea64 3120 for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages)
85d1225e 3121 page_addr_list[i++] = dma_addr;
50470bb0 3122
85d1225e 3123 GEM_BUG_ON(i != n_pages);
11f20322
VS
3124 st->nents = 0;
3125 sg = st->sgl;
3126
6687c906
VS
3127 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) {
3128 sg = rotate_pages(page_addr_list, rot_info->plane[i].offset,
3129 rot_info->plane[i].width, rot_info->plane[i].height,
3130 rot_info->plane[i].stride, st, sg);
89e3e142
TU
3131 }
3132
6687c906
VS
3133 DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n",
3134 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
50470bb0
TU
3135
3136 drm_free_large(page_addr_list);
3137
3138 return st;
3139
3140err_sg_alloc:
3141 kfree(st);
3142err_st_alloc:
3143 drm_free_large(page_addr_list);
3144
6687c906
VS
3145 DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
3146 obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
3147
50470bb0
TU
3148 return ERR_PTR(ret);
3149}
ec7adb6e 3150
ba7a5741 3151static noinline struct sg_table *
8bd7ef16
JL
3152intel_partial_pages(const struct i915_ggtt_view *view,
3153 struct drm_i915_gem_object *obj)
3154{
3155 struct sg_table *st;
d2a84a76 3156 struct scatterlist *sg, *iter;
8bab1193 3157 unsigned int count = view->partial.size;
d2a84a76 3158 unsigned int offset;
8bd7ef16
JL
3159 int ret = -ENOMEM;
3160
3161 st = kmalloc(sizeof(*st), GFP_KERNEL);
3162 if (!st)
3163 goto err_st_alloc;
3164
d2a84a76 3165 ret = sg_alloc_table(st, count, GFP_KERNEL);
8bd7ef16
JL
3166 if (ret)
3167 goto err_sg_alloc;
3168
8bab1193 3169 iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset);
d2a84a76
CW
3170 GEM_BUG_ON(!iter);
3171
8bd7ef16
JL
3172 sg = st->sgl;
3173 st->nents = 0;
d2a84a76
CW
3174 do {
3175 unsigned int len;
8bd7ef16 3176
d2a84a76
CW
3177 len = min(iter->length - (offset << PAGE_SHIFT),
3178 count << PAGE_SHIFT);
3179 sg_set_page(sg, NULL, len, 0);
3180 sg_dma_address(sg) =
3181 sg_dma_address(iter) + (offset << PAGE_SHIFT);
3182 sg_dma_len(sg) = len;
8bd7ef16 3183
8bd7ef16 3184 st->nents++;
d2a84a76
CW
3185 count -= len >> PAGE_SHIFT;
3186 if (count == 0) {
3187 sg_mark_end(sg);
3188 return st;
3189 }
8bd7ef16 3190
d2a84a76
CW
3191 sg = __sg_next(sg);
3192 iter = __sg_next(iter);
3193 offset = 0;
3194 } while (1);
8bd7ef16
JL
3195
3196err_sg_alloc:
3197 kfree(st);
3198err_st_alloc:
3199 return ERR_PTR(ret);
3200}
3201
70b9f6f8 3202static int
50470bb0 3203i915_get_ggtt_vma_pages(struct i915_vma *vma)
fe14d5f4 3204{
ba7a5741 3205 int ret;
50470bb0 3206
2c3a3f44
CW
3207 /* The vma->pages are only valid within the lifespan of the borrowed
3208 * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so
3209 * must be the vma->pages. A simple rule is that vma->pages must only
3210 * be accessed when the obj->mm.pages are pinned.
3211 */
3212 GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj));
3213
ba7a5741
CW
3214 switch (vma->ggtt_view.type) {
3215 case I915_GGTT_VIEW_NORMAL:
3216 vma->pages = vma->obj->mm.pages;
fe14d5f4
TU
3217 return 0;
3218
ba7a5741 3219 case I915_GGTT_VIEW_ROTATED:
247177dd 3220 vma->pages =
ba7a5741
CW
3221 intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj);
3222 break;
3223
3224 case I915_GGTT_VIEW_PARTIAL:
247177dd 3225 vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj);
ba7a5741
CW
3226 break;
3227
3228 default:
fe14d5f4
TU
3229 WARN_ONCE(1, "GGTT view %u not implemented!\n",
3230 vma->ggtt_view.type);
ba7a5741
CW
3231 return -EINVAL;
3232 }
fe14d5f4 3233
ba7a5741
CW
3234 ret = 0;
3235 if (unlikely(IS_ERR(vma->pages))) {
247177dd
CW
3236 ret = PTR_ERR(vma->pages);
3237 vma->pages = NULL;
50470bb0
TU
3238 DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
3239 vma->ggtt_view.type, ret);
fe14d5f4 3240 }
50470bb0 3241 return ret;
fe14d5f4
TU
3242}
3243
625d988a
CW
3244/**
3245 * i915_gem_gtt_reserve - reserve a node in an address_space (GTT)
a4dbf7cf
CW
3246 * @vm: the &struct i915_address_space
3247 * @node: the &struct drm_mm_node (typically i915_vma.mode)
3248 * @size: how much space to allocate inside the GTT,
3249 * must be #I915_GTT_PAGE_SIZE aligned
3250 * @offset: where to insert inside the GTT,
3251 * must be #I915_GTT_MIN_ALIGNMENT aligned, and the node
3252 * (@offset + @size) must fit within the address space
3253 * @color: color to apply to node, if this node is not from a VMA,
3254 * color must be #I915_COLOR_UNEVICTABLE
3255 * @flags: control search and eviction behaviour
625d988a
CW
3256 *
3257 * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside
3258 * the address space (using @size and @color). If the @node does not fit, it
3259 * tries to evict any overlapping nodes from the GTT, including any
3260 * neighbouring nodes if the colors do not match (to ensure guard pages between
3261 * differing domains). See i915_gem_evict_for_node() for the gory details
3262 * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on
3263 * evicting active overlapping objects, and any overlapping node that is pinned
3264 * or marked as unevictable will also result in failure.
3265 *
3266 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3267 * asked to wait for eviction and interrupted.
3268 */
3269int i915_gem_gtt_reserve(struct i915_address_space *vm,
3270 struct drm_mm_node *node,
3271 u64 size, u64 offset, unsigned long color,
3272 unsigned int flags)
3273{
3274 int err;
3275
3276 GEM_BUG_ON(!size);
3277 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3278 GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT));
3279 GEM_BUG_ON(range_overflows(offset, size, vm->total));
3fec7ec4 3280 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
9734ad13 3281 GEM_BUG_ON(drm_mm_node_allocated(node));
625d988a
CW
3282
3283 node->size = size;
3284 node->start = offset;
3285 node->color = color;
3286
3287 err = drm_mm_reserve_node(&vm->mm, node);
3288 if (err != -ENOSPC)
3289 return err;
3290
3291 err = i915_gem_evict_for_node(vm, node, flags);
3292 if (err == 0)
3293 err = drm_mm_reserve_node(&vm->mm, node);
3294
3295 return err;
3296}
3297
606fec95
CW
3298static u64 random_offset(u64 start, u64 end, u64 len, u64 align)
3299{
3300 u64 range, addr;
3301
3302 GEM_BUG_ON(range_overflows(start, len, end));
3303 GEM_BUG_ON(round_up(start, align) > round_down(end - len, align));
3304
3305 range = round_down(end - len, align) - round_up(start, align);
3306 if (range) {
3307 if (sizeof(unsigned long) == sizeof(u64)) {
3308 addr = get_random_long();
3309 } else {
3310 addr = get_random_int();
3311 if (range > U32_MAX) {
3312 addr <<= 32;
3313 addr |= get_random_int();
3314 }
3315 }
3316 div64_u64_rem(addr, range, &addr);
3317 start += addr;
3318 }
3319
3320 return round_up(start, align);
3321}
3322
e007b19d
CW
3323/**
3324 * i915_gem_gtt_insert - insert a node into an address_space (GTT)
a4dbf7cf
CW
3325 * @vm: the &struct i915_address_space
3326 * @node: the &struct drm_mm_node (typically i915_vma.node)
3327 * @size: how much space to allocate inside the GTT,
3328 * must be #I915_GTT_PAGE_SIZE aligned
3329 * @alignment: required alignment of starting offset, may be 0 but
3330 * if specified, this must be a power-of-two and at least
3331 * #I915_GTT_MIN_ALIGNMENT
3332 * @color: color to apply to node
3333 * @start: start of any range restriction inside GTT (0 for all),
e007b19d 3334 * must be #I915_GTT_PAGE_SIZE aligned
a4dbf7cf
CW
3335 * @end: end of any range restriction inside GTT (U64_MAX for all),
3336 * must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX
3337 * @flags: control search and eviction behaviour
e007b19d
CW
3338 *
3339 * i915_gem_gtt_insert() first searches for an available hole into which
3340 * is can insert the node. The hole address is aligned to @alignment and
3341 * its @size must then fit entirely within the [@start, @end] bounds. The
3342 * nodes on either side of the hole must match @color, or else a guard page
3343 * will be inserted between the two nodes (or the node evicted). If no
606fec95
CW
3344 * suitable hole is found, first a victim is randomly selected and tested
3345 * for eviction, otherwise then the LRU list of objects within the GTT
e007b19d
CW
3346 * is scanned to find the first set of replacement nodes to create the hole.
3347 * Those old overlapping nodes are evicted from the GTT (and so must be
3348 * rebound before any future use). Any node that is currently pinned cannot
3349 * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently
3350 * active and #PIN_NONBLOCK is specified, that node is also skipped when
3351 * searching for an eviction candidate. See i915_gem_evict_something() for
3352 * the gory details on the eviction algorithm.
3353 *
3354 * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if
3355 * asked to wait for eviction and interrupted.
3356 */
3357int i915_gem_gtt_insert(struct i915_address_space *vm,
3358 struct drm_mm_node *node,
3359 u64 size, u64 alignment, unsigned long color,
3360 u64 start, u64 end, unsigned int flags)
3361{
4e64e553 3362 enum drm_mm_insert_mode mode;
606fec95 3363 u64 offset;
e007b19d
CW
3364 int err;
3365
3366 lockdep_assert_held(&vm->i915->drm.struct_mutex);
3367 GEM_BUG_ON(!size);
3368 GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE));
3369 GEM_BUG_ON(alignment && !is_power_of_2(alignment));
3370 GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT));
3371 GEM_BUG_ON(start >= end);
3372 GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE));
3373 GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE));
3fec7ec4 3374 GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base);
9734ad13 3375 GEM_BUG_ON(drm_mm_node_allocated(node));
e007b19d
CW
3376
3377 if (unlikely(range_overflows(start, size, end)))
3378 return -ENOSPC;
3379
3380 if (unlikely(round_up(start, alignment) > round_down(end - size, alignment)))
3381 return -ENOSPC;
3382
4e64e553
CW
3383 mode = DRM_MM_INSERT_BEST;
3384 if (flags & PIN_HIGH)
3385 mode = DRM_MM_INSERT_HIGH;
3386 if (flags & PIN_MAPPABLE)
3387 mode = DRM_MM_INSERT_LOW;
e007b19d
CW
3388
3389 /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks,
3390 * so we know that we always have a minimum alignment of 4096.
3391 * The drm_mm range manager is optimised to return results
3392 * with zero alignment, so where possible use the optimal
3393 * path.
3394 */
3395 BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE);
3396 if (alignment <= I915_GTT_MIN_ALIGNMENT)
3397 alignment = 0;
3398
4e64e553
CW
3399 err = drm_mm_insert_node_in_range(&vm->mm, node,
3400 size, alignment, color,
3401 start, end, mode);
e007b19d
CW
3402 if (err != -ENOSPC)
3403 return err;
3404
606fec95
CW
3405 /* No free space, pick a slot at random.
3406 *
3407 * There is a pathological case here using a GTT shared between
3408 * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt):
3409 *
3410 * |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->|
3411 * (64k objects) (448k objects)
3412 *
3413 * Now imagine that the eviction LRU is ordered top-down (just because
3414 * pathology meets real life), and that we need to evict an object to
3415 * make room inside the aperture. The eviction scan then has to walk
3416 * the 448k list before it finds one within range. And now imagine that
3417 * it has to search for a new hole between every byte inside the memcpy,
3418 * for several simultaneous clients.
3419 *
3420 * On a full-ppgtt system, if we have run out of available space, there
3421 * will be lots and lots of objects in the eviction list! Again,
3422 * searching that LRU list may be slow if we are also applying any
3423 * range restrictions (e.g. restriction to low 4GiB) and so, for
3424 * simplicity and similarilty between different GTT, try the single
3425 * random replacement first.
3426 */
3427 offset = random_offset(start, end,
3428 size, alignment ?: I915_GTT_MIN_ALIGNMENT);
3429 err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags);
3430 if (err != -ENOSPC)
3431 return err;
3432
3433 /* Randomly selected placement is pinned, do a search */
e007b19d
CW
3434 err = i915_gem_evict_something(vm, size, alignment, color,
3435 start, end, flags);
3436 if (err)
3437 return err;
3438
4e64e553
CW
3439 return drm_mm_insert_node_in_range(&vm->mm, node,
3440 size, alignment, color,
3441 start, end, DRM_MM_INSERT_EVICT);
e007b19d 3442}
3b5bb0a3
CW
3443
3444#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
3445#include "selftests/mock_gtt.c"
1c42819a 3446#include "selftests/i915_gem_gtt.c"
3b5bb0a3 3447#endif