]>
Commit | Line | Data |
---|---|---|
76aaf220 DV |
1 | /* |
2 | * Copyright © 2010 Daniel Vetter | |
c4ac524c | 3 | * Copyright © 2011-2014 Intel Corporation |
76aaf220 DV |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the "Software"), | |
7 | * to deal in the Software without restriction, including without limitation | |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
9 | * and/or sell copies of the Software, and to permit persons to whom the | |
10 | * Software is furnished to do so, subject to the following conditions: | |
11 | * | |
12 | * The above copyright notice and this permission notice (including the next | |
13 | * paragraph) shall be included in all copies or substantial portions of the | |
14 | * Software. | |
15 | * | |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
21 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
22 | * IN THE SOFTWARE. | |
23 | * | |
24 | */ | |
25 | ||
aae4a3d8 CW |
26 | #include <linux/slab.h> /* fault-inject.h is not standalone! */ |
27 | ||
28 | #include <linux/fault-inject.h> | |
e007b19d | 29 | #include <linux/log2.h> |
606fec95 | 30 | #include <linux/random.h> |
0e46ce2e | 31 | #include <linux/seq_file.h> |
5bab6f60 | 32 | #include <linux/stop_machine.h> |
e007b19d | 33 | |
ed3ba079 LA |
34 | #include <asm/set_memory.h> |
35 | ||
760285e7 DH |
36 | #include <drm/drmP.h> |
37 | #include <drm/i915_drm.h> | |
e007b19d | 38 | |
76aaf220 | 39 | #include "i915_drv.h" |
5dda8fa3 | 40 | #include "i915_vgpu.h" |
76aaf220 DV |
41 | #include "i915_trace.h" |
42 | #include "intel_drv.h" | |
d07f0e59 | 43 | #include "intel_frontbuffer.h" |
76aaf220 | 44 | |
bb8f9cff CW |
45 | #define I915_GFP_DMA (GFP_KERNEL | __GFP_HIGHMEM) |
46 | ||
45f8f69a TU |
47 | /** |
48 | * DOC: Global GTT views | |
49 | * | |
50 | * Background and previous state | |
51 | * | |
52 | * Historically objects could exists (be bound) in global GTT space only as | |
53 | * singular instances with a view representing all of the object's backing pages | |
54 | * in a linear fashion. This view will be called a normal view. | |
55 | * | |
56 | * To support multiple views of the same object, where the number of mapped | |
57 | * pages is not equal to the backing store, or where the layout of the pages | |
58 | * is not linear, concept of a GGTT view was added. | |
59 | * | |
60 | * One example of an alternative view is a stereo display driven by a single | |
61 | * image. In this case we would have a framebuffer looking like this | |
62 | * (2x2 pages): | |
63 | * | |
64 | * 12 | |
65 | * 34 | |
66 | * | |
67 | * Above would represent a normal GGTT view as normally mapped for GPU or CPU | |
68 | * rendering. In contrast, fed to the display engine would be an alternative | |
69 | * view which could look something like this: | |
70 | * | |
71 | * 1212 | |
72 | * 3434 | |
73 | * | |
74 | * In this example both the size and layout of pages in the alternative view is | |
75 | * different from the normal view. | |
76 | * | |
77 | * Implementation and usage | |
78 | * | |
79 | * GGTT views are implemented using VMAs and are distinguished via enum | |
80 | * i915_ggtt_view_type and struct i915_ggtt_view. | |
81 | * | |
82 | * A new flavour of core GEM functions which work with GGTT bound objects were | |
ec7adb6e JL |
83 | * added with the _ggtt_ infix, and sometimes with _view postfix to avoid |
84 | * renaming in large amounts of code. They take the struct i915_ggtt_view | |
85 | * parameter encapsulating all metadata required to implement a view. | |
45f8f69a TU |
86 | * |
87 | * As a helper for callers which are only interested in the normal view, | |
88 | * globally const i915_ggtt_view_normal singleton instance exists. All old core | |
89 | * GEM API functions, the ones not taking the view parameter, are operating on, | |
90 | * or with the normal GGTT view. | |
91 | * | |
92 | * Code wanting to add or use a new GGTT view needs to: | |
93 | * | |
94 | * 1. Add a new enum with a suitable name. | |
95 | * 2. Extend the metadata in the i915_ggtt_view structure if required. | |
96 | * 3. Add support to i915_get_vma_pages(). | |
97 | * | |
98 | * New views are required to build a scatter-gather table from within the | |
99 | * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and | |
100 | * exists for the lifetime of an VMA. | |
101 | * | |
102 | * Core API is designed to have copy semantics which means that passed in | |
103 | * struct i915_ggtt_view does not need to be persistent (left around after | |
104 | * calling the core API functions). | |
105 | * | |
106 | */ | |
107 | ||
70b9f6f8 DV |
108 | static int |
109 | i915_get_ggtt_vma_pages(struct i915_vma *vma); | |
110 | ||
7c3f86b6 CW |
111 | static void gen6_ggtt_invalidate(struct drm_i915_private *dev_priv) |
112 | { | |
113 | /* Note that as an uncached mmio write, this should flush the | |
114 | * WCB of the writes into the GGTT before it triggers the invalidate. | |
115 | */ | |
116 | I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN); | |
117 | } | |
118 | ||
119 | static void guc_ggtt_invalidate(struct drm_i915_private *dev_priv) | |
120 | { | |
121 | gen6_ggtt_invalidate(dev_priv); | |
122 | I915_WRITE(GEN8_GTCR, GEN8_GTCR_INVALIDATE); | |
123 | } | |
124 | ||
125 | static void gmch_ggtt_invalidate(struct drm_i915_private *dev_priv) | |
126 | { | |
127 | intel_gtt_chipset_flush(); | |
128 | } | |
129 | ||
130 | static inline void i915_ggtt_invalidate(struct drm_i915_private *i915) | |
131 | { | |
132 | i915->ggtt.invalidate(i915); | |
133 | } | |
134 | ||
c033666a CW |
135 | int intel_sanitize_enable_ppgtt(struct drm_i915_private *dev_priv, |
136 | int enable_ppgtt) | |
cfa7c862 | 137 | { |
1893a71b CW |
138 | bool has_aliasing_ppgtt; |
139 | bool has_full_ppgtt; | |
1f9a99e0 | 140 | bool has_full_48bit_ppgtt; |
1893a71b | 141 | |
9e1d0e60 MT |
142 | has_aliasing_ppgtt = dev_priv->info.has_aliasing_ppgtt; |
143 | has_full_ppgtt = dev_priv->info.has_full_ppgtt; | |
144 | has_full_48bit_ppgtt = dev_priv->info.has_full_48bit_ppgtt; | |
1893a71b | 145 | |
e320d400 ZW |
146 | if (intel_vgpu_active(dev_priv)) { |
147 | /* emulation is too hard */ | |
148 | has_full_ppgtt = false; | |
149 | has_full_48bit_ppgtt = false; | |
150 | } | |
71ba2d64 | 151 | |
0e4ca100 CW |
152 | if (!has_aliasing_ppgtt) |
153 | return 0; | |
154 | ||
70ee45e1 DL |
155 | /* |
156 | * We don't allow disabling PPGTT for gen9+ as it's a requirement for | |
157 | * execlists, the sole mechanism available to submit work. | |
158 | */ | |
c033666a | 159 | if (enable_ppgtt == 0 && INTEL_GEN(dev_priv) < 9) |
cfa7c862 DV |
160 | return 0; |
161 | ||
162 | if (enable_ppgtt == 1) | |
163 | return 1; | |
164 | ||
1893a71b | 165 | if (enable_ppgtt == 2 && has_full_ppgtt) |
cfa7c862 DV |
166 | return 2; |
167 | ||
1f9a99e0 MT |
168 | if (enable_ppgtt == 3 && has_full_48bit_ppgtt) |
169 | return 3; | |
170 | ||
93a25a9e DV |
171 | #ifdef CONFIG_INTEL_IOMMU |
172 | /* Disable ppgtt on SNB if VT-d is on. */ | |
c033666a | 173 | if (IS_GEN6(dev_priv) && intel_iommu_gfx_mapped) { |
93a25a9e | 174 | DRM_INFO("Disabling PPGTT because VT-d is on\n"); |
cfa7c862 | 175 | return 0; |
93a25a9e DV |
176 | } |
177 | #endif | |
178 | ||
62942ed7 | 179 | /* Early VLV doesn't have this */ |
91c8a326 | 180 | if (IS_VALLEYVIEW(dev_priv) && dev_priv->drm.pdev->revision < 0xb) { |
62942ed7 JB |
181 | DRM_DEBUG_DRIVER("disabling PPGTT on pre-B3 step VLV\n"); |
182 | return 0; | |
183 | } | |
184 | ||
e320d400 | 185 | if (INTEL_GEN(dev_priv) >= 8 && i915.enable_execlists && has_full_ppgtt) |
1f9a99e0 | 186 | return has_full_48bit_ppgtt ? 3 : 2; |
2f82bbdf MT |
187 | else |
188 | return has_aliasing_ppgtt ? 1 : 0; | |
93a25a9e DV |
189 | } |
190 | ||
70b9f6f8 DV |
191 | static int ppgtt_bind_vma(struct i915_vma *vma, |
192 | enum i915_cache_level cache_level, | |
193 | u32 unused) | |
47552659 | 194 | { |
ff685975 CW |
195 | u32 pte_flags; |
196 | int ret; | |
197 | ||
2f720aac MA |
198 | if (!(vma->flags & I915_VMA_LOCAL_BIND)) { |
199 | ret = vma->vm->allocate_va_range(vma->vm, vma->node.start, | |
200 | vma->size); | |
201 | if (ret) | |
202 | return ret; | |
203 | } | |
47552659 | 204 | |
a4f5ea64 | 205 | vma->pages = vma->obj->mm.pages; |
247177dd | 206 | |
47552659 | 207 | /* Currently applicable only to VLV */ |
ff685975 | 208 | pte_flags = 0; |
47552659 DV |
209 | if (vma->obj->gt_ro) |
210 | pte_flags |= PTE_READ_ONLY; | |
211 | ||
247177dd | 212 | vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start, |
47552659 | 213 | cache_level, pte_flags); |
70b9f6f8 DV |
214 | |
215 | return 0; | |
47552659 DV |
216 | } |
217 | ||
218 | static void ppgtt_unbind_vma(struct i915_vma *vma) | |
219 | { | |
ff685975 | 220 | vma->vm->clear_range(vma->vm, vma->node.start, vma->size); |
47552659 | 221 | } |
6f65e29a | 222 | |
2c642b07 | 223 | static gen8_pte_t gen8_pte_encode(dma_addr_t addr, |
4fb84d99 | 224 | enum i915_cache_level level) |
94ec8f61 | 225 | { |
4fb84d99 | 226 | gen8_pte_t pte = _PAGE_PRESENT | _PAGE_RW; |
94ec8f61 | 227 | pte |= addr; |
63c42e56 BW |
228 | |
229 | switch (level) { | |
230 | case I915_CACHE_NONE: | |
fbe5d36e | 231 | pte |= PPAT_UNCACHED_INDEX; |
63c42e56 BW |
232 | break; |
233 | case I915_CACHE_WT: | |
234 | pte |= PPAT_DISPLAY_ELLC_INDEX; | |
235 | break; | |
236 | default: | |
237 | pte |= PPAT_CACHED_INDEX; | |
238 | break; | |
239 | } | |
240 | ||
94ec8f61 BW |
241 | return pte; |
242 | } | |
243 | ||
fe36f55d MK |
244 | static gen8_pde_t gen8_pde_encode(const dma_addr_t addr, |
245 | const enum i915_cache_level level) | |
b1fe6673 | 246 | { |
07749ef3 | 247 | gen8_pde_t pde = _PAGE_PRESENT | _PAGE_RW; |
b1fe6673 BW |
248 | pde |= addr; |
249 | if (level != I915_CACHE_NONE) | |
250 | pde |= PPAT_CACHED_PDE_INDEX; | |
251 | else | |
252 | pde |= PPAT_UNCACHED_INDEX; | |
253 | return pde; | |
254 | } | |
255 | ||
762d9936 MT |
256 | #define gen8_pdpe_encode gen8_pde_encode |
257 | #define gen8_pml4e_encode gen8_pde_encode | |
258 | ||
07749ef3 MT |
259 | static gen6_pte_t snb_pte_encode(dma_addr_t addr, |
260 | enum i915_cache_level level, | |
4fb84d99 | 261 | u32 unused) |
54d12527 | 262 | { |
4fb84d99 | 263 | gen6_pte_t pte = GEN6_PTE_VALID; |
54d12527 | 264 | pte |= GEN6_PTE_ADDR_ENCODE(addr); |
e7210c3c BW |
265 | |
266 | switch (level) { | |
350ec881 CW |
267 | case I915_CACHE_L3_LLC: |
268 | case I915_CACHE_LLC: | |
269 | pte |= GEN6_PTE_CACHE_LLC; | |
270 | break; | |
271 | case I915_CACHE_NONE: | |
272 | pte |= GEN6_PTE_UNCACHED; | |
273 | break; | |
274 | default: | |
5f77eeb0 | 275 | MISSING_CASE(level); |
350ec881 CW |
276 | } |
277 | ||
278 | return pte; | |
279 | } | |
280 | ||
07749ef3 MT |
281 | static gen6_pte_t ivb_pte_encode(dma_addr_t addr, |
282 | enum i915_cache_level level, | |
4fb84d99 | 283 | u32 unused) |
350ec881 | 284 | { |
4fb84d99 | 285 | gen6_pte_t pte = GEN6_PTE_VALID; |
350ec881 CW |
286 | pte |= GEN6_PTE_ADDR_ENCODE(addr); |
287 | ||
288 | switch (level) { | |
289 | case I915_CACHE_L3_LLC: | |
290 | pte |= GEN7_PTE_CACHE_L3_LLC; | |
e7210c3c BW |
291 | break; |
292 | case I915_CACHE_LLC: | |
293 | pte |= GEN6_PTE_CACHE_LLC; | |
294 | break; | |
295 | case I915_CACHE_NONE: | |
9119708c | 296 | pte |= GEN6_PTE_UNCACHED; |
e7210c3c BW |
297 | break; |
298 | default: | |
5f77eeb0 | 299 | MISSING_CASE(level); |
e7210c3c BW |
300 | } |
301 | ||
54d12527 BW |
302 | return pte; |
303 | } | |
304 | ||
07749ef3 MT |
305 | static gen6_pte_t byt_pte_encode(dma_addr_t addr, |
306 | enum i915_cache_level level, | |
4fb84d99 | 307 | u32 flags) |
93c34e70 | 308 | { |
4fb84d99 | 309 | gen6_pte_t pte = GEN6_PTE_VALID; |
93c34e70 KG |
310 | pte |= GEN6_PTE_ADDR_ENCODE(addr); |
311 | ||
24f3a8cf AG |
312 | if (!(flags & PTE_READ_ONLY)) |
313 | pte |= BYT_PTE_WRITEABLE; | |
93c34e70 KG |
314 | |
315 | if (level != I915_CACHE_NONE) | |
316 | pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES; | |
317 | ||
318 | return pte; | |
319 | } | |
320 | ||
07749ef3 MT |
321 | static gen6_pte_t hsw_pte_encode(dma_addr_t addr, |
322 | enum i915_cache_level level, | |
4fb84d99 | 323 | u32 unused) |
9119708c | 324 | { |
4fb84d99 | 325 | gen6_pte_t pte = GEN6_PTE_VALID; |
0d8ff15e | 326 | pte |= HSW_PTE_ADDR_ENCODE(addr); |
9119708c KG |
327 | |
328 | if (level != I915_CACHE_NONE) | |
87a6b688 | 329 | pte |= HSW_WB_LLC_AGE3; |
9119708c KG |
330 | |
331 | return pte; | |
332 | } | |
333 | ||
07749ef3 MT |
334 | static gen6_pte_t iris_pte_encode(dma_addr_t addr, |
335 | enum i915_cache_level level, | |
4fb84d99 | 336 | u32 unused) |
4d15c145 | 337 | { |
4fb84d99 | 338 | gen6_pte_t pte = GEN6_PTE_VALID; |
4d15c145 BW |
339 | pte |= HSW_PTE_ADDR_ENCODE(addr); |
340 | ||
651d794f CW |
341 | switch (level) { |
342 | case I915_CACHE_NONE: | |
343 | break; | |
344 | case I915_CACHE_WT: | |
c51e9701 | 345 | pte |= HSW_WT_ELLC_LLC_AGE3; |
651d794f CW |
346 | break; |
347 | default: | |
c51e9701 | 348 | pte |= HSW_WB_ELLC_LLC_AGE3; |
651d794f CW |
349 | break; |
350 | } | |
4d15c145 BW |
351 | |
352 | return pte; | |
353 | } | |
354 | ||
8448661d | 355 | static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp) |
678d96fb | 356 | { |
8448661d | 357 | struct page *page; |
678d96fb | 358 | |
8448661d CW |
359 | if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1))) |
360 | i915_gem_shrink_all(vm->i915); | |
aae4a3d8 | 361 | |
8448661d CW |
362 | if (vm->free_pages.nr) |
363 | return vm->free_pages.pages[--vm->free_pages.nr]; | |
364 | ||
365 | page = alloc_page(gfp); | |
366 | if (!page) | |
367 | return NULL; | |
368 | ||
369 | if (vm->pt_kmap_wc) | |
370 | set_pages_array_wc(&page, 1); | |
371 | ||
372 | return page; | |
373 | } | |
374 | ||
375 | static void vm_free_pages_release(struct i915_address_space *vm) | |
376 | { | |
377 | GEM_BUG_ON(!pagevec_count(&vm->free_pages)); | |
378 | ||
379 | if (vm->pt_kmap_wc) | |
380 | set_pages_array_wb(vm->free_pages.pages, | |
381 | pagevec_count(&vm->free_pages)); | |
382 | ||
383 | __pagevec_release(&vm->free_pages); | |
384 | } | |
385 | ||
386 | static void vm_free_page(struct i915_address_space *vm, struct page *page) | |
387 | { | |
388 | if (!pagevec_add(&vm->free_pages, page)) | |
389 | vm_free_pages_release(vm); | |
390 | } | |
678d96fb | 391 | |
8448661d CW |
392 | static int __setup_page_dma(struct i915_address_space *vm, |
393 | struct i915_page_dma *p, | |
394 | gfp_t gfp) | |
395 | { | |
396 | p->page = vm_alloc_page(vm, gfp | __GFP_NOWARN | __GFP_NORETRY); | |
397 | if (unlikely(!p->page)) | |
398 | return -ENOMEM; | |
678d96fb | 399 | |
8448661d CW |
400 | p->daddr = dma_map_page(vm->dma, p->page, 0, PAGE_SIZE, |
401 | PCI_DMA_BIDIRECTIONAL); | |
402 | if (unlikely(dma_mapping_error(vm->dma, p->daddr))) { | |
403 | vm_free_page(vm, p->page); | |
404 | return -ENOMEM; | |
44159ddb | 405 | } |
1266cdb1 MT |
406 | |
407 | return 0; | |
678d96fb BW |
408 | } |
409 | ||
8448661d | 410 | static int setup_page_dma(struct i915_address_space *vm, |
275a991c | 411 | struct i915_page_dma *p) |
c114f76a | 412 | { |
8448661d | 413 | return __setup_page_dma(vm, p, I915_GFP_DMA); |
c114f76a MK |
414 | } |
415 | ||
8448661d | 416 | static void cleanup_page_dma(struct i915_address_space *vm, |
275a991c | 417 | struct i915_page_dma *p) |
06fda602 | 418 | { |
8448661d CW |
419 | dma_unmap_page(vm->dma, p->daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); |
420 | vm_free_page(vm, p->page); | |
44159ddb MK |
421 | } |
422 | ||
9231da70 | 423 | #define kmap_atomic_px(px) kmap_atomic(px_base(px)->page) |
d1c54acd | 424 | |
8448661d CW |
425 | #define setup_px(vm, px) setup_page_dma((vm), px_base(px)) |
426 | #define cleanup_px(vm, px) cleanup_page_dma((vm), px_base(px)) | |
427 | #define fill_px(ppgtt, px, v) fill_page_dma((vm), px_base(px), (v)) | |
428 | #define fill32_px(ppgtt, px, v) fill_page_dma_32((vm), px_base(px), (v)) | |
567047be | 429 | |
8448661d CW |
430 | static void fill_page_dma(struct i915_address_space *vm, |
431 | struct i915_page_dma *p, | |
432 | const u64 val) | |
d1c54acd | 433 | { |
9231da70 | 434 | u64 * const vaddr = kmap_atomic(p->page); |
d1c54acd | 435 | int i; |
d1c54acd MK |
436 | |
437 | for (i = 0; i < 512; i++) | |
438 | vaddr[i] = val; | |
439 | ||
9231da70 | 440 | kunmap_atomic(vaddr); |
d1c54acd MK |
441 | } |
442 | ||
8448661d CW |
443 | static void fill_page_dma_32(struct i915_address_space *vm, |
444 | struct i915_page_dma *p, | |
445 | const u32 v) | |
73eeea53 | 446 | { |
8448661d | 447 | fill_page_dma(vm, p, (u64)v << 32 | v); |
73eeea53 MK |
448 | } |
449 | ||
8bcdd0f7 | 450 | static int |
8448661d | 451 | setup_scratch_page(struct i915_address_space *vm, gfp_t gfp) |
4ad2af1e | 452 | { |
8448661d | 453 | return __setup_page_dma(vm, &vm->scratch_page, gfp | __GFP_ZERO); |
4ad2af1e MK |
454 | } |
455 | ||
8448661d | 456 | static void cleanup_scratch_page(struct i915_address_space *vm) |
4ad2af1e | 457 | { |
8448661d | 458 | cleanup_page_dma(vm, &vm->scratch_page); |
4ad2af1e MK |
459 | } |
460 | ||
8448661d | 461 | static struct i915_page_table *alloc_pt(struct i915_address_space *vm) |
06fda602 | 462 | { |
ec565b3c | 463 | struct i915_page_table *pt; |
06fda602 | 464 | |
dd19674b CW |
465 | pt = kmalloc(sizeof(*pt), GFP_KERNEL | __GFP_NOWARN); |
466 | if (unlikely(!pt)) | |
06fda602 BW |
467 | return ERR_PTR(-ENOMEM); |
468 | ||
dd19674b CW |
469 | if (unlikely(setup_px(vm, pt))) { |
470 | kfree(pt); | |
471 | return ERR_PTR(-ENOMEM); | |
472 | } | |
06fda602 | 473 | |
dd19674b | 474 | pt->used_ptes = 0; |
06fda602 BW |
475 | return pt; |
476 | } | |
477 | ||
8448661d | 478 | static void free_pt(struct i915_address_space *vm, struct i915_page_table *pt) |
06fda602 | 479 | { |
8448661d | 480 | cleanup_px(vm, pt); |
2e906bea MK |
481 | kfree(pt); |
482 | } | |
483 | ||
484 | static void gen8_initialize_pt(struct i915_address_space *vm, | |
485 | struct i915_page_table *pt) | |
486 | { | |
dd19674b CW |
487 | fill_px(vm, pt, |
488 | gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC)); | |
2e906bea MK |
489 | } |
490 | ||
491 | static void gen6_initialize_pt(struct i915_address_space *vm, | |
492 | struct i915_page_table *pt) | |
493 | { | |
dd19674b CW |
494 | fill32_px(vm, pt, |
495 | vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0)); | |
06fda602 BW |
496 | } |
497 | ||
8448661d | 498 | static struct i915_page_directory *alloc_pd(struct i915_address_space *vm) |
06fda602 | 499 | { |
ec565b3c | 500 | struct i915_page_directory *pd; |
06fda602 | 501 | |
fe52e37f CW |
502 | pd = kzalloc(sizeof(*pd), GFP_KERNEL | __GFP_NOWARN); |
503 | if (unlikely(!pd)) | |
06fda602 BW |
504 | return ERR_PTR(-ENOMEM); |
505 | ||
fe52e37f CW |
506 | if (unlikely(setup_px(vm, pd))) { |
507 | kfree(pd); | |
508 | return ERR_PTR(-ENOMEM); | |
509 | } | |
e5815a2e | 510 | |
fe52e37f | 511 | pd->used_pdes = 0; |
06fda602 BW |
512 | return pd; |
513 | } | |
514 | ||
8448661d | 515 | static void free_pd(struct i915_address_space *vm, |
275a991c | 516 | struct i915_page_directory *pd) |
2e906bea | 517 | { |
fe52e37f CW |
518 | cleanup_px(vm, pd); |
519 | kfree(pd); | |
2e906bea MK |
520 | } |
521 | ||
522 | static void gen8_initialize_pd(struct i915_address_space *vm, | |
523 | struct i915_page_directory *pd) | |
524 | { | |
dd19674b | 525 | unsigned int i; |
2e906bea | 526 | |
dd19674b CW |
527 | fill_px(vm, pd, |
528 | gen8_pde_encode(px_dma(vm->scratch_pt), I915_CACHE_LLC)); | |
529 | for (i = 0; i < I915_PDES; i++) | |
530 | pd->page_table[i] = vm->scratch_pt; | |
2e906bea MK |
531 | } |
532 | ||
fe52e37f | 533 | static int __pdp_init(struct i915_address_space *vm, |
6ac18502 MT |
534 | struct i915_page_directory_pointer *pdp) |
535 | { | |
3e490042 | 536 | const unsigned int pdpes = i915_pdpes_per_pdp(vm); |
e2b763ca | 537 | unsigned int i; |
6ac18502 | 538 | |
fe52e37f | 539 | pdp->page_directory = kmalloc_array(pdpes, sizeof(*pdp->page_directory), |
e2b763ca CW |
540 | GFP_KERNEL | __GFP_NOWARN); |
541 | if (unlikely(!pdp->page_directory)) | |
6ac18502 | 542 | return -ENOMEM; |
6ac18502 | 543 | |
fe52e37f CW |
544 | for (i = 0; i < pdpes; i++) |
545 | pdp->page_directory[i] = vm->scratch_pd; | |
546 | ||
6ac18502 MT |
547 | return 0; |
548 | } | |
549 | ||
550 | static void __pdp_fini(struct i915_page_directory_pointer *pdp) | |
551 | { | |
6ac18502 MT |
552 | kfree(pdp->page_directory); |
553 | pdp->page_directory = NULL; | |
554 | } | |
555 | ||
1e6437b0 MK |
556 | static inline bool use_4lvl(const struct i915_address_space *vm) |
557 | { | |
558 | return i915_vm_is_48bit(vm); | |
559 | } | |
560 | ||
8448661d CW |
561 | static struct i915_page_directory_pointer * |
562 | alloc_pdp(struct i915_address_space *vm) | |
762d9936 MT |
563 | { |
564 | struct i915_page_directory_pointer *pdp; | |
565 | int ret = -ENOMEM; | |
566 | ||
1e6437b0 | 567 | WARN_ON(!use_4lvl(vm)); |
762d9936 MT |
568 | |
569 | pdp = kzalloc(sizeof(*pdp), GFP_KERNEL); | |
570 | if (!pdp) | |
571 | return ERR_PTR(-ENOMEM); | |
572 | ||
fe52e37f | 573 | ret = __pdp_init(vm, pdp); |
762d9936 MT |
574 | if (ret) |
575 | goto fail_bitmap; | |
576 | ||
8448661d | 577 | ret = setup_px(vm, pdp); |
762d9936 MT |
578 | if (ret) |
579 | goto fail_page_m; | |
580 | ||
581 | return pdp; | |
582 | ||
583 | fail_page_m: | |
584 | __pdp_fini(pdp); | |
585 | fail_bitmap: | |
586 | kfree(pdp); | |
587 | ||
588 | return ERR_PTR(ret); | |
589 | } | |
590 | ||
8448661d | 591 | static void free_pdp(struct i915_address_space *vm, |
6ac18502 MT |
592 | struct i915_page_directory_pointer *pdp) |
593 | { | |
594 | __pdp_fini(pdp); | |
1e6437b0 MK |
595 | |
596 | if (!use_4lvl(vm)) | |
597 | return; | |
598 | ||
599 | cleanup_px(vm, pdp); | |
600 | kfree(pdp); | |
762d9936 MT |
601 | } |
602 | ||
69ab76fd MT |
603 | static void gen8_initialize_pdp(struct i915_address_space *vm, |
604 | struct i915_page_directory_pointer *pdp) | |
605 | { | |
606 | gen8_ppgtt_pdpe_t scratch_pdpe; | |
607 | ||
608 | scratch_pdpe = gen8_pdpe_encode(px_dma(vm->scratch_pd), I915_CACHE_LLC); | |
609 | ||
8448661d | 610 | fill_px(vm, pdp, scratch_pdpe); |
69ab76fd MT |
611 | } |
612 | ||
613 | static void gen8_initialize_pml4(struct i915_address_space *vm, | |
614 | struct i915_pml4 *pml4) | |
615 | { | |
e2b763ca | 616 | unsigned int i; |
762d9936 | 617 | |
e2b763ca CW |
618 | fill_px(vm, pml4, |
619 | gen8_pml4e_encode(px_dma(vm->scratch_pdp), I915_CACHE_LLC)); | |
620 | for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) | |
621 | pml4->pdps[i] = vm->scratch_pdp; | |
6ac18502 MT |
622 | } |
623 | ||
94e409c1 | 624 | /* Broadwell Page Directory Pointer Descriptors */ |
e85b26dc | 625 | static int gen8_write_pdp(struct drm_i915_gem_request *req, |
7cb6d7ac MT |
626 | unsigned entry, |
627 | dma_addr_t addr) | |
94e409c1 | 628 | { |
4a570db5 | 629 | struct intel_engine_cs *engine = req->engine; |
73dec95e | 630 | u32 *cs; |
94e409c1 BW |
631 | |
632 | BUG_ON(entry >= 4); | |
633 | ||
73dec95e TU |
634 | cs = intel_ring_begin(req, 6); |
635 | if (IS_ERR(cs)) | |
636 | return PTR_ERR(cs); | |
94e409c1 | 637 | |
73dec95e TU |
638 | *cs++ = MI_LOAD_REGISTER_IMM(1); |
639 | *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, entry)); | |
640 | *cs++ = upper_32_bits(addr); | |
641 | *cs++ = MI_LOAD_REGISTER_IMM(1); | |
642 | *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, entry)); | |
643 | *cs++ = lower_32_bits(addr); | |
644 | intel_ring_advance(req, cs); | |
94e409c1 BW |
645 | |
646 | return 0; | |
647 | } | |
648 | ||
e7167769 MK |
649 | static int gen8_mm_switch_3lvl(struct i915_hw_ppgtt *ppgtt, |
650 | struct drm_i915_gem_request *req) | |
94e409c1 | 651 | { |
eeb9488e | 652 | int i, ret; |
94e409c1 | 653 | |
e7167769 | 654 | for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) { |
d852c7bf MK |
655 | const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); |
656 | ||
e85b26dc | 657 | ret = gen8_write_pdp(req, i, pd_daddr); |
eeb9488e BW |
658 | if (ret) |
659 | return ret; | |
94e409c1 | 660 | } |
d595bd4b | 661 | |
eeb9488e | 662 | return 0; |
94e409c1 BW |
663 | } |
664 | ||
e7167769 MK |
665 | static int gen8_mm_switch_4lvl(struct i915_hw_ppgtt *ppgtt, |
666 | struct drm_i915_gem_request *req) | |
2dba3239 MT |
667 | { |
668 | return gen8_write_pdp(req, 0, px_dma(&ppgtt->pml4)); | |
669 | } | |
670 | ||
fce93755 MK |
671 | /* PDE TLBs are a pain to invalidate on GEN8+. When we modify |
672 | * the page table structures, we mark them dirty so that | |
673 | * context switching/execlist queuing code takes extra steps | |
674 | * to ensure that tlbs are flushed. | |
675 | */ | |
676 | static void mark_tlbs_dirty(struct i915_hw_ppgtt *ppgtt) | |
677 | { | |
49d73912 | 678 | ppgtt->pd_dirty_rings = INTEL_INFO(ppgtt->base.i915)->ring_mask; |
fce93755 MK |
679 | } |
680 | ||
2ce5179f MW |
681 | /* Removes entries from a single page table, releasing it if it's empty. |
682 | * Caller can use the return value to update higher-level entries. | |
683 | */ | |
684 | static bool gen8_ppgtt_clear_pt(struct i915_address_space *vm, | |
d209b9c3 | 685 | struct i915_page_table *pt, |
dd19674b | 686 | u64 start, u64 length) |
459108b8 | 687 | { |
d209b9c3 | 688 | unsigned int num_entries = gen8_pte_count(start, length); |
37c63934 MK |
689 | unsigned int pte = gen8_pte_index(start); |
690 | unsigned int pte_end = pte + num_entries; | |
894ccebe CW |
691 | const gen8_pte_t scratch_pte = |
692 | gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC); | |
693 | gen8_pte_t *vaddr; | |
459108b8 | 694 | |
dd19674b | 695 | GEM_BUG_ON(num_entries > pt->used_ptes); |
37c63934 | 696 | |
dd19674b CW |
697 | pt->used_ptes -= num_entries; |
698 | if (!pt->used_ptes) | |
699 | return true; | |
2ce5179f | 700 | |
9231da70 | 701 | vaddr = kmap_atomic_px(pt); |
37c63934 | 702 | while (pte < pte_end) |
894ccebe | 703 | vaddr[pte++] = scratch_pte; |
9231da70 | 704 | kunmap_atomic(vaddr); |
2ce5179f MW |
705 | |
706 | return false; | |
d209b9c3 | 707 | } |
06fda602 | 708 | |
dd19674b CW |
709 | static void gen8_ppgtt_set_pde(struct i915_address_space *vm, |
710 | struct i915_page_directory *pd, | |
711 | struct i915_page_table *pt, | |
712 | unsigned int pde) | |
713 | { | |
714 | gen8_pde_t *vaddr; | |
715 | ||
716 | pd->page_table[pde] = pt; | |
717 | ||
718 | vaddr = kmap_atomic_px(pd); | |
719 | vaddr[pde] = gen8_pde_encode(px_dma(pt), I915_CACHE_LLC); | |
720 | kunmap_atomic(vaddr); | |
721 | } | |
722 | ||
2ce5179f | 723 | static bool gen8_ppgtt_clear_pd(struct i915_address_space *vm, |
d209b9c3 | 724 | struct i915_page_directory *pd, |
dd19674b | 725 | u64 start, u64 length) |
d209b9c3 MW |
726 | { |
727 | struct i915_page_table *pt; | |
dd19674b | 728 | u32 pde; |
d209b9c3 MW |
729 | |
730 | gen8_for_each_pde(pt, pd, start, length, pde) { | |
bf75d59e CW |
731 | GEM_BUG_ON(pt == vm->scratch_pt); |
732 | ||
dd19674b CW |
733 | if (!gen8_ppgtt_clear_pt(vm, pt, start, length)) |
734 | continue; | |
06fda602 | 735 | |
dd19674b | 736 | gen8_ppgtt_set_pde(vm, pd, vm->scratch_pt, pde); |
bf75d59e | 737 | GEM_BUG_ON(!pd->used_pdes); |
fe52e37f | 738 | pd->used_pdes--; |
dd19674b CW |
739 | |
740 | free_pt(vm, pt); | |
2ce5179f MW |
741 | } |
742 | ||
fe52e37f CW |
743 | return !pd->used_pdes; |
744 | } | |
2ce5179f | 745 | |
fe52e37f CW |
746 | static void gen8_ppgtt_set_pdpe(struct i915_address_space *vm, |
747 | struct i915_page_directory_pointer *pdp, | |
748 | struct i915_page_directory *pd, | |
749 | unsigned int pdpe) | |
750 | { | |
751 | gen8_ppgtt_pdpe_t *vaddr; | |
752 | ||
753 | pdp->page_directory[pdpe] = pd; | |
1e6437b0 | 754 | if (!use_4lvl(vm)) |
fe52e37f CW |
755 | return; |
756 | ||
757 | vaddr = kmap_atomic_px(pdp); | |
758 | vaddr[pdpe] = gen8_pdpe_encode(px_dma(pd), I915_CACHE_LLC); | |
759 | kunmap_atomic(vaddr); | |
d209b9c3 | 760 | } |
06fda602 | 761 | |
2ce5179f MW |
762 | /* Removes entries from a single page dir pointer, releasing it if it's empty. |
763 | * Caller can use the return value to update higher-level entries | |
764 | */ | |
765 | static bool gen8_ppgtt_clear_pdp(struct i915_address_space *vm, | |
d209b9c3 | 766 | struct i915_page_directory_pointer *pdp, |
fe52e37f | 767 | u64 start, u64 length) |
d209b9c3 MW |
768 | { |
769 | struct i915_page_directory *pd; | |
fe52e37f | 770 | unsigned int pdpe; |
06fda602 | 771 | |
d209b9c3 | 772 | gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { |
bf75d59e CW |
773 | GEM_BUG_ON(pd == vm->scratch_pd); |
774 | ||
fe52e37f CW |
775 | if (!gen8_ppgtt_clear_pd(vm, pd, start, length)) |
776 | continue; | |
459108b8 | 777 | |
fe52e37f | 778 | gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe); |
bf75d59e | 779 | GEM_BUG_ON(!pdp->used_pdpes); |
e2b763ca | 780 | pdp->used_pdpes--; |
2ce5179f | 781 | |
fe52e37f CW |
782 | free_pd(vm, pd); |
783 | } | |
fce93755 | 784 | |
e2b763ca | 785 | return !pdp->used_pdpes; |
d209b9c3 | 786 | } |
459108b8 | 787 | |
fe52e37f CW |
788 | static void gen8_ppgtt_clear_3lvl(struct i915_address_space *vm, |
789 | u64 start, u64 length) | |
790 | { | |
791 | gen8_ppgtt_clear_pdp(vm, &i915_vm_to_ppgtt(vm)->pdp, start, length); | |
792 | } | |
793 | ||
e2b763ca CW |
794 | static void gen8_ppgtt_set_pml4e(struct i915_pml4 *pml4, |
795 | struct i915_page_directory_pointer *pdp, | |
796 | unsigned int pml4e) | |
797 | { | |
798 | gen8_ppgtt_pml4e_t *vaddr; | |
799 | ||
800 | pml4->pdps[pml4e] = pdp; | |
801 | ||
802 | vaddr = kmap_atomic_px(pml4); | |
803 | vaddr[pml4e] = gen8_pml4e_encode(px_dma(pdp), I915_CACHE_LLC); | |
804 | kunmap_atomic(vaddr); | |
805 | } | |
806 | ||
2ce5179f MW |
807 | /* Removes entries from a single pml4. |
808 | * This is the top-level structure in 4-level page tables used on gen8+. | |
809 | * Empty entries are always scratch pml4e. | |
810 | */ | |
fe52e37f CW |
811 | static void gen8_ppgtt_clear_4lvl(struct i915_address_space *vm, |
812 | u64 start, u64 length) | |
d209b9c3 | 813 | { |
fe52e37f CW |
814 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); |
815 | struct i915_pml4 *pml4 = &ppgtt->pml4; | |
d209b9c3 | 816 | struct i915_page_directory_pointer *pdp; |
e2b763ca | 817 | unsigned int pml4e; |
2ce5179f | 818 | |
1e6437b0 | 819 | GEM_BUG_ON(!use_4lvl(vm)); |
459108b8 | 820 | |
d209b9c3 | 821 | gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { |
bf75d59e CW |
822 | GEM_BUG_ON(pdp == vm->scratch_pdp); |
823 | ||
e2b763ca CW |
824 | if (!gen8_ppgtt_clear_pdp(vm, pdp, start, length)) |
825 | continue; | |
459108b8 | 826 | |
e2b763ca | 827 | gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e); |
e2b763ca CW |
828 | |
829 | free_pdp(vm, pdp); | |
459108b8 BW |
830 | } |
831 | } | |
832 | ||
894ccebe CW |
833 | struct sgt_dma { |
834 | struct scatterlist *sg; | |
835 | dma_addr_t dma, max; | |
836 | }; | |
837 | ||
9e89f9ee CW |
838 | struct gen8_insert_pte { |
839 | u16 pml4e; | |
840 | u16 pdpe; | |
841 | u16 pde; | |
842 | u16 pte; | |
843 | }; | |
844 | ||
845 | static __always_inline struct gen8_insert_pte gen8_insert_pte(u64 start) | |
846 | { | |
847 | return (struct gen8_insert_pte) { | |
848 | gen8_pml4e_index(start), | |
849 | gen8_pdpe_index(start), | |
850 | gen8_pde_index(start), | |
851 | gen8_pte_index(start), | |
852 | }; | |
853 | } | |
854 | ||
894ccebe CW |
855 | static __always_inline bool |
856 | gen8_ppgtt_insert_pte_entries(struct i915_hw_ppgtt *ppgtt, | |
f9b5b782 | 857 | struct i915_page_directory_pointer *pdp, |
894ccebe | 858 | struct sgt_dma *iter, |
9e89f9ee | 859 | struct gen8_insert_pte *idx, |
f9b5b782 MT |
860 | enum i915_cache_level cache_level) |
861 | { | |
894ccebe CW |
862 | struct i915_page_directory *pd; |
863 | const gen8_pte_t pte_encode = gen8_pte_encode(0, cache_level); | |
864 | gen8_pte_t *vaddr; | |
865 | bool ret; | |
9df15b49 | 866 | |
3e490042 | 867 | GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base)); |
9e89f9ee CW |
868 | pd = pdp->page_directory[idx->pdpe]; |
869 | vaddr = kmap_atomic_px(pd->page_table[idx->pde]); | |
894ccebe | 870 | do { |
9e89f9ee CW |
871 | vaddr[idx->pte] = pte_encode | iter->dma; |
872 | ||
894ccebe CW |
873 | iter->dma += PAGE_SIZE; |
874 | if (iter->dma >= iter->max) { | |
875 | iter->sg = __sg_next(iter->sg); | |
876 | if (!iter->sg) { | |
877 | ret = false; | |
878 | break; | |
879 | } | |
7ad47cf2 | 880 | |
894ccebe CW |
881 | iter->dma = sg_dma_address(iter->sg); |
882 | iter->max = iter->dma + iter->sg->length; | |
d7b3de91 | 883 | } |
9df15b49 | 884 | |
9e89f9ee CW |
885 | if (++idx->pte == GEN8_PTES) { |
886 | idx->pte = 0; | |
887 | ||
888 | if (++idx->pde == I915_PDES) { | |
889 | idx->pde = 0; | |
890 | ||
894ccebe | 891 | /* Limited by sg length for 3lvl */ |
9e89f9ee CW |
892 | if (++idx->pdpe == GEN8_PML4ES_PER_PML4) { |
893 | idx->pdpe = 0; | |
894ccebe | 894 | ret = true; |
de5ba8eb | 895 | break; |
894ccebe CW |
896 | } |
897 | ||
3e490042 | 898 | GEM_BUG_ON(idx->pdpe >= i915_pdpes_per_pdp(&ppgtt->base)); |
9e89f9ee | 899 | pd = pdp->page_directory[idx->pdpe]; |
7ad47cf2 | 900 | } |
894ccebe | 901 | |
9231da70 | 902 | kunmap_atomic(vaddr); |
9e89f9ee | 903 | vaddr = kmap_atomic_px(pd->page_table[idx->pde]); |
9df15b49 | 904 | } |
894ccebe | 905 | } while (1); |
9231da70 | 906 | kunmap_atomic(vaddr); |
d1c54acd | 907 | |
894ccebe | 908 | return ret; |
9df15b49 BW |
909 | } |
910 | ||
894ccebe CW |
911 | static void gen8_ppgtt_insert_3lvl(struct i915_address_space *vm, |
912 | struct sg_table *pages, | |
913 | u64 start, | |
914 | enum i915_cache_level cache_level, | |
915 | u32 unused) | |
f9b5b782 | 916 | { |
e5716f55 | 917 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); |
894ccebe CW |
918 | struct sgt_dma iter = { |
919 | .sg = pages->sgl, | |
920 | .dma = sg_dma_address(iter.sg), | |
921 | .max = iter.dma + iter.sg->length, | |
922 | }; | |
9e89f9ee | 923 | struct gen8_insert_pte idx = gen8_insert_pte(start); |
f9b5b782 | 924 | |
9e89f9ee CW |
925 | gen8_ppgtt_insert_pte_entries(ppgtt, &ppgtt->pdp, &iter, &idx, |
926 | cache_level); | |
894ccebe | 927 | } |
de5ba8eb | 928 | |
894ccebe CW |
929 | static void gen8_ppgtt_insert_4lvl(struct i915_address_space *vm, |
930 | struct sg_table *pages, | |
75c7b0b8 | 931 | u64 start, |
894ccebe CW |
932 | enum i915_cache_level cache_level, |
933 | u32 unused) | |
934 | { | |
935 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); | |
936 | struct sgt_dma iter = { | |
937 | .sg = pages->sgl, | |
938 | .dma = sg_dma_address(iter.sg), | |
939 | .max = iter.dma + iter.sg->length, | |
940 | }; | |
941 | struct i915_page_directory_pointer **pdps = ppgtt->pml4.pdps; | |
9e89f9ee | 942 | struct gen8_insert_pte idx = gen8_insert_pte(start); |
de5ba8eb | 943 | |
9e89f9ee CW |
944 | while (gen8_ppgtt_insert_pte_entries(ppgtt, pdps[idx.pml4e++], &iter, |
945 | &idx, cache_level)) | |
946 | GEM_BUG_ON(idx.pml4e >= GEN8_PML4ES_PER_PML4); | |
f9b5b782 MT |
947 | } |
948 | ||
8448661d | 949 | static void gen8_free_page_tables(struct i915_address_space *vm, |
f37c0505 | 950 | struct i915_page_directory *pd) |
7ad47cf2 BW |
951 | { |
952 | int i; | |
953 | ||
567047be | 954 | if (!px_page(pd)) |
7ad47cf2 BW |
955 | return; |
956 | ||
fe52e37f CW |
957 | for (i = 0; i < I915_PDES; i++) { |
958 | if (pd->page_table[i] != vm->scratch_pt) | |
959 | free_pt(vm, pd->page_table[i]); | |
06fda602 | 960 | } |
d7b3de91 BW |
961 | } |
962 | ||
8776f02b MK |
963 | static int gen8_init_scratch(struct i915_address_space *vm) |
964 | { | |
64c050db | 965 | int ret; |
8776f02b | 966 | |
8448661d | 967 | ret = setup_scratch_page(vm, I915_GFP_DMA); |
8bcdd0f7 CW |
968 | if (ret) |
969 | return ret; | |
8776f02b | 970 | |
8448661d | 971 | vm->scratch_pt = alloc_pt(vm); |
8776f02b | 972 | if (IS_ERR(vm->scratch_pt)) { |
64c050db MA |
973 | ret = PTR_ERR(vm->scratch_pt); |
974 | goto free_scratch_page; | |
8776f02b MK |
975 | } |
976 | ||
8448661d | 977 | vm->scratch_pd = alloc_pd(vm); |
8776f02b | 978 | if (IS_ERR(vm->scratch_pd)) { |
64c050db MA |
979 | ret = PTR_ERR(vm->scratch_pd); |
980 | goto free_pt; | |
8776f02b MK |
981 | } |
982 | ||
1e6437b0 | 983 | if (use_4lvl(vm)) { |
8448661d | 984 | vm->scratch_pdp = alloc_pdp(vm); |
69ab76fd | 985 | if (IS_ERR(vm->scratch_pdp)) { |
64c050db MA |
986 | ret = PTR_ERR(vm->scratch_pdp); |
987 | goto free_pd; | |
69ab76fd MT |
988 | } |
989 | } | |
990 | ||
8776f02b MK |
991 | gen8_initialize_pt(vm, vm->scratch_pt); |
992 | gen8_initialize_pd(vm, vm->scratch_pd); | |
1e6437b0 | 993 | if (use_4lvl(vm)) |
69ab76fd | 994 | gen8_initialize_pdp(vm, vm->scratch_pdp); |
8776f02b MK |
995 | |
996 | return 0; | |
64c050db MA |
997 | |
998 | free_pd: | |
8448661d | 999 | free_pd(vm, vm->scratch_pd); |
64c050db | 1000 | free_pt: |
8448661d | 1001 | free_pt(vm, vm->scratch_pt); |
64c050db | 1002 | free_scratch_page: |
8448661d | 1003 | cleanup_scratch_page(vm); |
64c050db MA |
1004 | |
1005 | return ret; | |
8776f02b MK |
1006 | } |
1007 | ||
650da34c ZL |
1008 | static int gen8_ppgtt_notify_vgt(struct i915_hw_ppgtt *ppgtt, bool create) |
1009 | { | |
1e6437b0 MK |
1010 | struct i915_address_space *vm = &ppgtt->base; |
1011 | struct drm_i915_private *dev_priv = vm->i915; | |
650da34c | 1012 | enum vgt_g2v_type msg; |
650da34c ZL |
1013 | int i; |
1014 | ||
1e6437b0 MK |
1015 | if (use_4lvl(vm)) { |
1016 | const u64 daddr = px_dma(&ppgtt->pml4); | |
650da34c | 1017 | |
ab75bb5d VS |
1018 | I915_WRITE(vgtif_reg(pdp[0].lo), lower_32_bits(daddr)); |
1019 | I915_WRITE(vgtif_reg(pdp[0].hi), upper_32_bits(daddr)); | |
650da34c ZL |
1020 | |
1021 | msg = (create ? VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE : | |
1022 | VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY); | |
1023 | } else { | |
e7167769 | 1024 | for (i = 0; i < GEN8_3LVL_PDPES; i++) { |
1e6437b0 | 1025 | const u64 daddr = i915_page_dir_dma_addr(ppgtt, i); |
650da34c | 1026 | |
ab75bb5d VS |
1027 | I915_WRITE(vgtif_reg(pdp[i].lo), lower_32_bits(daddr)); |
1028 | I915_WRITE(vgtif_reg(pdp[i].hi), upper_32_bits(daddr)); | |
650da34c ZL |
1029 | } |
1030 | ||
1031 | msg = (create ? VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE : | |
1032 | VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY); | |
1033 | } | |
1034 | ||
1035 | I915_WRITE(vgtif_reg(g2v_notify), msg); | |
1036 | ||
1037 | return 0; | |
1038 | } | |
1039 | ||
8776f02b MK |
1040 | static void gen8_free_scratch(struct i915_address_space *vm) |
1041 | { | |
1e6437b0 | 1042 | if (use_4lvl(vm)) |
8448661d CW |
1043 | free_pdp(vm, vm->scratch_pdp); |
1044 | free_pd(vm, vm->scratch_pd); | |
1045 | free_pt(vm, vm->scratch_pt); | |
1046 | cleanup_scratch_page(vm); | |
8776f02b MK |
1047 | } |
1048 | ||
8448661d | 1049 | static void gen8_ppgtt_cleanup_3lvl(struct i915_address_space *vm, |
762d9936 | 1050 | struct i915_page_directory_pointer *pdp) |
b45a6715 | 1051 | { |
3e490042 | 1052 | const unsigned int pdpes = i915_pdpes_per_pdp(vm); |
b45a6715 BW |
1053 | int i; |
1054 | ||
3e490042 | 1055 | for (i = 0; i < pdpes; i++) { |
fe52e37f | 1056 | if (pdp->page_directory[i] == vm->scratch_pd) |
06fda602 BW |
1057 | continue; |
1058 | ||
8448661d CW |
1059 | gen8_free_page_tables(vm, pdp->page_directory[i]); |
1060 | free_pd(vm, pdp->page_directory[i]); | |
7ad47cf2 | 1061 | } |
69876bed | 1062 | |
8448661d | 1063 | free_pdp(vm, pdp); |
762d9936 MT |
1064 | } |
1065 | ||
1066 | static void gen8_ppgtt_cleanup_4lvl(struct i915_hw_ppgtt *ppgtt) | |
1067 | { | |
1068 | int i; | |
1069 | ||
c5d092a4 CW |
1070 | for (i = 0; i < GEN8_PML4ES_PER_PML4; i++) { |
1071 | if (ppgtt->pml4.pdps[i] == ppgtt->base.scratch_pdp) | |
762d9936 MT |
1072 | continue; |
1073 | ||
8448661d | 1074 | gen8_ppgtt_cleanup_3lvl(&ppgtt->base, ppgtt->pml4.pdps[i]); |
762d9936 MT |
1075 | } |
1076 | ||
8448661d | 1077 | cleanup_px(&ppgtt->base, &ppgtt->pml4); |
762d9936 MT |
1078 | } |
1079 | ||
1080 | static void gen8_ppgtt_cleanup(struct i915_address_space *vm) | |
1081 | { | |
49d73912 | 1082 | struct drm_i915_private *dev_priv = vm->i915; |
e5716f55 | 1083 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); |
762d9936 | 1084 | |
275a991c | 1085 | if (intel_vgpu_active(dev_priv)) |
650da34c ZL |
1086 | gen8_ppgtt_notify_vgt(ppgtt, false); |
1087 | ||
1e6437b0 | 1088 | if (use_4lvl(vm)) |
762d9936 | 1089 | gen8_ppgtt_cleanup_4lvl(ppgtt); |
1e6437b0 MK |
1090 | else |
1091 | gen8_ppgtt_cleanup_3lvl(&ppgtt->base, &ppgtt->pdp); | |
d4ec9da0 | 1092 | |
8776f02b | 1093 | gen8_free_scratch(vm); |
b45a6715 BW |
1094 | } |
1095 | ||
fe52e37f CW |
1096 | static int gen8_ppgtt_alloc_pd(struct i915_address_space *vm, |
1097 | struct i915_page_directory *pd, | |
1098 | u64 start, u64 length) | |
bf2b4ed2 | 1099 | { |
d7b2633d | 1100 | struct i915_page_table *pt; |
dd19674b | 1101 | u64 from = start; |
fe52e37f | 1102 | unsigned int pde; |
bf2b4ed2 | 1103 | |
e8ebd8e2 | 1104 | gen8_for_each_pde(pt, pd, start, length, pde) { |
fe52e37f | 1105 | if (pt == vm->scratch_pt) { |
dd19674b CW |
1106 | pt = alloc_pt(vm); |
1107 | if (IS_ERR(pt)) | |
1108 | goto unwind; | |
5441f0cb | 1109 | |
dd19674b | 1110 | gen8_initialize_pt(vm, pt); |
fe52e37f CW |
1111 | |
1112 | gen8_ppgtt_set_pde(vm, pd, pt, pde); | |
1113 | pd->used_pdes++; | |
bf75d59e | 1114 | GEM_BUG_ON(pd->used_pdes > I915_PDES); |
dd19674b | 1115 | } |
fe52e37f | 1116 | |
dd19674b | 1117 | pt->used_ptes += gen8_pte_count(start, length); |
7ad47cf2 | 1118 | } |
bf2b4ed2 | 1119 | return 0; |
7ad47cf2 | 1120 | |
dd19674b CW |
1121 | unwind: |
1122 | gen8_ppgtt_clear_pd(vm, pd, from, start - from); | |
d7b3de91 | 1123 | return -ENOMEM; |
bf2b4ed2 BW |
1124 | } |
1125 | ||
c5d092a4 CW |
1126 | static int gen8_ppgtt_alloc_pdp(struct i915_address_space *vm, |
1127 | struct i915_page_directory_pointer *pdp, | |
1128 | u64 start, u64 length) | |
bf2b4ed2 | 1129 | { |
5441f0cb | 1130 | struct i915_page_directory *pd; |
e2b763ca CW |
1131 | u64 from = start; |
1132 | unsigned int pdpe; | |
bf2b4ed2 BW |
1133 | int ret; |
1134 | ||
e8ebd8e2 | 1135 | gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { |
e2b763ca CW |
1136 | if (pd == vm->scratch_pd) { |
1137 | pd = alloc_pd(vm); | |
1138 | if (IS_ERR(pd)) | |
1139 | goto unwind; | |
5441f0cb | 1140 | |
e2b763ca | 1141 | gen8_initialize_pd(vm, pd); |
fe52e37f | 1142 | gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe); |
e2b763ca | 1143 | pdp->used_pdpes++; |
3e490042 | 1144 | GEM_BUG_ON(pdp->used_pdpes > i915_pdpes_per_pdp(vm)); |
75afcf72 CW |
1145 | |
1146 | mark_tlbs_dirty(i915_vm_to_ppgtt(vm)); | |
e2b763ca CW |
1147 | } |
1148 | ||
1149 | ret = gen8_ppgtt_alloc_pd(vm, pd, start, length); | |
bf75d59e CW |
1150 | if (unlikely(ret)) |
1151 | goto unwind_pd; | |
fe52e37f | 1152 | } |
33c8819f | 1153 | |
d7b3de91 | 1154 | return 0; |
bf2b4ed2 | 1155 | |
bf75d59e CW |
1156 | unwind_pd: |
1157 | if (!pd->used_pdes) { | |
1158 | gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe); | |
1159 | GEM_BUG_ON(!pdp->used_pdpes); | |
1160 | pdp->used_pdpes--; | |
1161 | free_pd(vm, pd); | |
1162 | } | |
e2b763ca CW |
1163 | unwind: |
1164 | gen8_ppgtt_clear_pdp(vm, pdp, from, start - from); | |
1165 | return -ENOMEM; | |
bf2b4ed2 BW |
1166 | } |
1167 | ||
c5d092a4 CW |
1168 | static int gen8_ppgtt_alloc_3lvl(struct i915_address_space *vm, |
1169 | u64 start, u64 length) | |
762d9936 | 1170 | { |
c5d092a4 CW |
1171 | return gen8_ppgtt_alloc_pdp(vm, |
1172 | &i915_vm_to_ppgtt(vm)->pdp, start, length); | |
1173 | } | |
762d9936 | 1174 | |
c5d092a4 CW |
1175 | static int gen8_ppgtt_alloc_4lvl(struct i915_address_space *vm, |
1176 | u64 start, u64 length) | |
1177 | { | |
1178 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); | |
1179 | struct i915_pml4 *pml4 = &ppgtt->pml4; | |
1180 | struct i915_page_directory_pointer *pdp; | |
1181 | u64 from = start; | |
1182 | u32 pml4e; | |
1183 | int ret; | |
762d9936 | 1184 | |
e8ebd8e2 | 1185 | gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { |
c5d092a4 CW |
1186 | if (pml4->pdps[pml4e] == vm->scratch_pdp) { |
1187 | pdp = alloc_pdp(vm); | |
1188 | if (IS_ERR(pdp)) | |
1189 | goto unwind; | |
762d9936 | 1190 | |
c5d092a4 CW |
1191 | gen8_initialize_pdp(vm, pdp); |
1192 | gen8_ppgtt_set_pml4e(pml4, pdp, pml4e); | |
1193 | } | |
762d9936 | 1194 | |
c5d092a4 | 1195 | ret = gen8_ppgtt_alloc_pdp(vm, pdp, start, length); |
bf75d59e CW |
1196 | if (unlikely(ret)) |
1197 | goto unwind_pdp; | |
762d9936 MT |
1198 | } |
1199 | ||
762d9936 MT |
1200 | return 0; |
1201 | ||
bf75d59e CW |
1202 | unwind_pdp: |
1203 | if (!pdp->used_pdpes) { | |
1204 | gen8_ppgtt_set_pml4e(pml4, vm->scratch_pdp, pml4e); | |
1205 | free_pdp(vm, pdp); | |
1206 | } | |
c5d092a4 CW |
1207 | unwind: |
1208 | gen8_ppgtt_clear_4lvl(vm, from, start - from); | |
1209 | return -ENOMEM; | |
762d9936 MT |
1210 | } |
1211 | ||
8448661d CW |
1212 | static void gen8_dump_pdp(struct i915_hw_ppgtt *ppgtt, |
1213 | struct i915_page_directory_pointer *pdp, | |
75c7b0b8 | 1214 | u64 start, u64 length, |
ea91e401 MT |
1215 | gen8_pte_t scratch_pte, |
1216 | struct seq_file *m) | |
1217 | { | |
3e490042 | 1218 | struct i915_address_space *vm = &ppgtt->base; |
ea91e401 | 1219 | struct i915_page_directory *pd; |
75c7b0b8 | 1220 | u32 pdpe; |
ea91e401 | 1221 | |
e8ebd8e2 | 1222 | gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { |
ea91e401 | 1223 | struct i915_page_table *pt; |
75c7b0b8 CW |
1224 | u64 pd_len = length; |
1225 | u64 pd_start = start; | |
1226 | u32 pde; | |
ea91e401 | 1227 | |
e2b763ca | 1228 | if (pdp->page_directory[pdpe] == ppgtt->base.scratch_pd) |
ea91e401 MT |
1229 | continue; |
1230 | ||
1231 | seq_printf(m, "\tPDPE #%d\n", pdpe); | |
e8ebd8e2 | 1232 | gen8_for_each_pde(pt, pd, pd_start, pd_len, pde) { |
75c7b0b8 | 1233 | u32 pte; |
ea91e401 MT |
1234 | gen8_pte_t *pt_vaddr; |
1235 | ||
fe52e37f | 1236 | if (pd->page_table[pde] == ppgtt->base.scratch_pt) |
ea91e401 MT |
1237 | continue; |
1238 | ||
9231da70 | 1239 | pt_vaddr = kmap_atomic_px(pt); |
ea91e401 | 1240 | for (pte = 0; pte < GEN8_PTES; pte += 4) { |
75c7b0b8 CW |
1241 | u64 va = (pdpe << GEN8_PDPE_SHIFT | |
1242 | pde << GEN8_PDE_SHIFT | | |
1243 | pte << GEN8_PTE_SHIFT); | |
ea91e401 MT |
1244 | int i; |
1245 | bool found = false; | |
1246 | ||
1247 | for (i = 0; i < 4; i++) | |
1248 | if (pt_vaddr[pte + i] != scratch_pte) | |
1249 | found = true; | |
1250 | if (!found) | |
1251 | continue; | |
1252 | ||
1253 | seq_printf(m, "\t\t0x%llx [%03d,%03d,%04d]: =", va, pdpe, pde, pte); | |
1254 | for (i = 0; i < 4; i++) { | |
1255 | if (pt_vaddr[pte + i] != scratch_pte) | |
1256 | seq_printf(m, " %llx", pt_vaddr[pte + i]); | |
1257 | else | |
1258 | seq_puts(m, " SCRATCH "); | |
1259 | } | |
1260 | seq_puts(m, "\n"); | |
1261 | } | |
ea91e401 MT |
1262 | kunmap_atomic(pt_vaddr); |
1263 | } | |
1264 | } | |
1265 | } | |
1266 | ||
1267 | static void gen8_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) | |
1268 | { | |
1269 | struct i915_address_space *vm = &ppgtt->base; | |
894ccebe CW |
1270 | const gen8_pte_t scratch_pte = |
1271 | gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC); | |
381b943b | 1272 | u64 start = 0, length = ppgtt->base.total; |
ea91e401 | 1273 | |
1e6437b0 | 1274 | if (use_4lvl(vm)) { |
75c7b0b8 | 1275 | u64 pml4e; |
ea91e401 MT |
1276 | struct i915_pml4 *pml4 = &ppgtt->pml4; |
1277 | struct i915_page_directory_pointer *pdp; | |
1278 | ||
e8ebd8e2 | 1279 | gen8_for_each_pml4e(pdp, pml4, start, length, pml4e) { |
c5d092a4 | 1280 | if (pml4->pdps[pml4e] == ppgtt->base.scratch_pdp) |
ea91e401 MT |
1281 | continue; |
1282 | ||
1283 | seq_printf(m, " PML4E #%llu\n", pml4e); | |
8448661d | 1284 | gen8_dump_pdp(ppgtt, pdp, start, length, scratch_pte, m); |
ea91e401 | 1285 | } |
1e6437b0 MK |
1286 | } else { |
1287 | gen8_dump_pdp(ppgtt, &ppgtt->pdp, start, length, scratch_pte, m); | |
ea91e401 MT |
1288 | } |
1289 | } | |
1290 | ||
e2b763ca | 1291 | static int gen8_preallocate_top_level_pdp(struct i915_hw_ppgtt *ppgtt) |
331f38e7 | 1292 | { |
e2b763ca CW |
1293 | struct i915_address_space *vm = &ppgtt->base; |
1294 | struct i915_page_directory_pointer *pdp = &ppgtt->pdp; | |
1295 | struct i915_page_directory *pd; | |
1296 | u64 start = 0, length = ppgtt->base.total; | |
1297 | u64 from = start; | |
1298 | unsigned int pdpe; | |
331f38e7 | 1299 | |
e2b763ca CW |
1300 | gen8_for_each_pdpe(pd, pdp, start, length, pdpe) { |
1301 | pd = alloc_pd(vm); | |
1302 | if (IS_ERR(pd)) | |
1303 | goto unwind; | |
331f38e7 | 1304 | |
e2b763ca CW |
1305 | gen8_initialize_pd(vm, pd); |
1306 | gen8_ppgtt_set_pdpe(vm, pdp, pd, pdpe); | |
1307 | pdp->used_pdpes++; | |
1308 | } | |
331f38e7 | 1309 | |
e2b763ca CW |
1310 | pdp->used_pdpes++; /* never remove */ |
1311 | return 0; | |
331f38e7 | 1312 | |
e2b763ca CW |
1313 | unwind: |
1314 | start -= from; | |
1315 | gen8_for_each_pdpe(pd, pdp, from, start, pdpe) { | |
1316 | gen8_ppgtt_set_pdpe(vm, pdp, vm->scratch_pd, pdpe); | |
1317 | free_pd(vm, pd); | |
1318 | } | |
1319 | pdp->used_pdpes = 0; | |
1320 | return -ENOMEM; | |
331f38e7 ZL |
1321 | } |
1322 | ||
eb0b44ad | 1323 | /* |
f3a964b9 BW |
1324 | * GEN8 legacy ppgtt programming is accomplished through a max 4 PDP registers |
1325 | * with a net effect resembling a 2-level page table in normal x86 terms. Each | |
1326 | * PDP represents 1GB of memory 4 * 512 * 512 * 4096 = 4GB legacy 32b address | |
1327 | * space. | |
37aca44a | 1328 | * |
f3a964b9 | 1329 | */ |
5c5f6457 | 1330 | static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt) |
37aca44a | 1331 | { |
1e6437b0 MK |
1332 | struct i915_address_space *vm = &ppgtt->base; |
1333 | struct drm_i915_private *dev_priv = vm->i915; | |
8776f02b | 1334 | int ret; |
7cb6d7ac | 1335 | |
1e6437b0 MK |
1336 | ppgtt->base.total = USES_FULL_48BIT_PPGTT(dev_priv) ? |
1337 | 1ULL << 48 : | |
1338 | 1ULL << 32; | |
1339 | ||
8776f02b | 1340 | ret = gen8_init_scratch(&ppgtt->base); |
1e6437b0 MK |
1341 | if (ret) { |
1342 | ppgtt->base.total = 0; | |
8776f02b | 1343 | return ret; |
1e6437b0 | 1344 | } |
69876bed | 1345 | |
8448661d CW |
1346 | /* There are only few exceptions for gen >=6. chv and bxt. |
1347 | * And we are not sure about the latter so play safe for now. | |
1348 | */ | |
1349 | if (IS_CHERRYVIEW(dev_priv) || IS_BROXTON(dev_priv)) | |
1350 | ppgtt->base.pt_kmap_wc = true; | |
1351 | ||
1e6437b0 | 1352 | if (use_4lvl(vm)) { |
8448661d | 1353 | ret = setup_px(&ppgtt->base, &ppgtt->pml4); |
762d9936 MT |
1354 | if (ret) |
1355 | goto free_scratch; | |
6ac18502 | 1356 | |
69ab76fd MT |
1357 | gen8_initialize_pml4(&ppgtt->base, &ppgtt->pml4); |
1358 | ||
e7167769 | 1359 | ppgtt->switch_mm = gen8_mm_switch_4lvl; |
c5d092a4 | 1360 | ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_4lvl; |
894ccebe | 1361 | ppgtt->base.insert_entries = gen8_ppgtt_insert_4lvl; |
fe52e37f | 1362 | ppgtt->base.clear_range = gen8_ppgtt_clear_4lvl; |
762d9936 | 1363 | } else { |
fe52e37f | 1364 | ret = __pdp_init(&ppgtt->base, &ppgtt->pdp); |
81ba8aef MT |
1365 | if (ret) |
1366 | goto free_scratch; | |
1367 | ||
275a991c | 1368 | if (intel_vgpu_active(dev_priv)) { |
e2b763ca CW |
1369 | ret = gen8_preallocate_top_level_pdp(ppgtt); |
1370 | if (ret) { | |
1371 | __pdp_fini(&ppgtt->pdp); | |
331f38e7 | 1372 | goto free_scratch; |
e2b763ca | 1373 | } |
331f38e7 | 1374 | } |
894ccebe | 1375 | |
e7167769 | 1376 | ppgtt->switch_mm = gen8_mm_switch_3lvl; |
c5d092a4 | 1377 | ppgtt->base.allocate_va_range = gen8_ppgtt_alloc_3lvl; |
894ccebe | 1378 | ppgtt->base.insert_entries = gen8_ppgtt_insert_3lvl; |
fe52e37f | 1379 | ppgtt->base.clear_range = gen8_ppgtt_clear_3lvl; |
81ba8aef | 1380 | } |
6ac18502 | 1381 | |
275a991c | 1382 | if (intel_vgpu_active(dev_priv)) |
650da34c ZL |
1383 | gen8_ppgtt_notify_vgt(ppgtt, true); |
1384 | ||
054b9acd MK |
1385 | ppgtt->base.cleanup = gen8_ppgtt_cleanup; |
1386 | ppgtt->base.unbind_vma = ppgtt_unbind_vma; | |
1387 | ppgtt->base.bind_vma = ppgtt_bind_vma; | |
1388 | ppgtt->debug_dump = gen8_dump_ppgtt; | |
1389 | ||
d7b2633d | 1390 | return 0; |
6ac18502 MT |
1391 | |
1392 | free_scratch: | |
1393 | gen8_free_scratch(&ppgtt->base); | |
1394 | return ret; | |
d7b2633d MT |
1395 | } |
1396 | ||
87d60b63 BW |
1397 | static void gen6_dump_ppgtt(struct i915_hw_ppgtt *ppgtt, struct seq_file *m) |
1398 | { | |
87d60b63 | 1399 | struct i915_address_space *vm = &ppgtt->base; |
09942c65 | 1400 | struct i915_page_table *unused; |
07749ef3 | 1401 | gen6_pte_t scratch_pte; |
381b943b CW |
1402 | u32 pd_entry, pte, pde; |
1403 | u32 start = 0, length = ppgtt->base.total; | |
87d60b63 | 1404 | |
8bcdd0f7 | 1405 | scratch_pte = vm->pte_encode(vm->scratch_page.daddr, |
4fb84d99 | 1406 | I915_CACHE_LLC, 0); |
87d60b63 | 1407 | |
731f74c5 | 1408 | gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) { |
87d60b63 | 1409 | u32 expected; |
07749ef3 | 1410 | gen6_pte_t *pt_vaddr; |
567047be | 1411 | const dma_addr_t pt_addr = px_dma(ppgtt->pd.page_table[pde]); |
09942c65 | 1412 | pd_entry = readl(ppgtt->pd_addr + pde); |
87d60b63 BW |
1413 | expected = (GEN6_PDE_ADDR_ENCODE(pt_addr) | GEN6_PDE_VALID); |
1414 | ||
1415 | if (pd_entry != expected) | |
1416 | seq_printf(m, "\tPDE #%d mismatch: Actual PDE: %x Expected PDE: %x\n", | |
1417 | pde, | |
1418 | pd_entry, | |
1419 | expected); | |
1420 | seq_printf(m, "\tPDE: %x\n", pd_entry); | |
1421 | ||
9231da70 | 1422 | pt_vaddr = kmap_atomic_px(ppgtt->pd.page_table[pde]); |
d1c54acd | 1423 | |
07749ef3 | 1424 | for (pte = 0; pte < GEN6_PTES; pte+=4) { |
87d60b63 | 1425 | unsigned long va = |
07749ef3 | 1426 | (pde * PAGE_SIZE * GEN6_PTES) + |
87d60b63 BW |
1427 | (pte * PAGE_SIZE); |
1428 | int i; | |
1429 | bool found = false; | |
1430 | for (i = 0; i < 4; i++) | |
1431 | if (pt_vaddr[pte + i] != scratch_pte) | |
1432 | found = true; | |
1433 | if (!found) | |
1434 | continue; | |
1435 | ||
1436 | seq_printf(m, "\t\t0x%lx [%03d,%04d]: =", va, pde, pte); | |
1437 | for (i = 0; i < 4; i++) { | |
1438 | if (pt_vaddr[pte + i] != scratch_pte) | |
1439 | seq_printf(m, " %08x", pt_vaddr[pte + i]); | |
1440 | else | |
1441 | seq_puts(m, " SCRATCH "); | |
1442 | } | |
1443 | seq_puts(m, "\n"); | |
1444 | } | |
9231da70 | 1445 | kunmap_atomic(pt_vaddr); |
87d60b63 BW |
1446 | } |
1447 | } | |
1448 | ||
678d96fb | 1449 | /* Write pde (index) from the page directory @pd to the page table @pt */ |
16a011c8 CW |
1450 | static inline void gen6_write_pde(const struct i915_hw_ppgtt *ppgtt, |
1451 | const unsigned int pde, | |
1452 | const struct i915_page_table *pt) | |
6197349b | 1453 | { |
678d96fb | 1454 | /* Caller needs to make sure the write completes if necessary */ |
16a011c8 CW |
1455 | writel_relaxed(GEN6_PDE_ADDR_ENCODE(px_dma(pt)) | GEN6_PDE_VALID, |
1456 | ppgtt->pd_addr + pde); | |
678d96fb | 1457 | } |
6197349b | 1458 | |
678d96fb BW |
1459 | /* Write all the page tables found in the ppgtt structure to incrementing page |
1460 | * directories. */ | |
16a011c8 | 1461 | static void gen6_write_page_range(struct i915_hw_ppgtt *ppgtt, |
75c7b0b8 | 1462 | u32 start, u32 length) |
678d96fb | 1463 | { |
ec565b3c | 1464 | struct i915_page_table *pt; |
16a011c8 | 1465 | unsigned int pde; |
678d96fb | 1466 | |
16a011c8 CW |
1467 | gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) |
1468 | gen6_write_pde(ppgtt, pde, pt); | |
678d96fb | 1469 | |
16a011c8 | 1470 | mark_tlbs_dirty(ppgtt); |
dd19674b | 1471 | wmb(); |
3e302542 BW |
1472 | } |
1473 | ||
75c7b0b8 | 1474 | static inline u32 get_pd_offset(struct i915_hw_ppgtt *ppgtt) |
3e302542 | 1475 | { |
dd19674b CW |
1476 | GEM_BUG_ON(ppgtt->pd.base.ggtt_offset & 0x3f); |
1477 | return ppgtt->pd.base.ggtt_offset << 10; | |
b4a74e3a BW |
1478 | } |
1479 | ||
90252e5c | 1480 | static int hsw_mm_switch(struct i915_hw_ppgtt *ppgtt, |
e85b26dc | 1481 | struct drm_i915_gem_request *req) |
90252e5c | 1482 | { |
4a570db5 | 1483 | struct intel_engine_cs *engine = req->engine; |
73dec95e | 1484 | u32 *cs; |
90252e5c | 1485 | |
90252e5c | 1486 | /* NB: TLBs must be flushed and invalidated before a switch */ |
73dec95e TU |
1487 | cs = intel_ring_begin(req, 6); |
1488 | if (IS_ERR(cs)) | |
1489 | return PTR_ERR(cs); | |
90252e5c | 1490 | |
73dec95e TU |
1491 | *cs++ = MI_LOAD_REGISTER_IMM(2); |
1492 | *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine)); | |
1493 | *cs++ = PP_DIR_DCLV_2G; | |
1494 | *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine)); | |
1495 | *cs++ = get_pd_offset(ppgtt); | |
1496 | *cs++ = MI_NOOP; | |
1497 | intel_ring_advance(req, cs); | |
90252e5c BW |
1498 | |
1499 | return 0; | |
1500 | } | |
1501 | ||
48a10389 | 1502 | static int gen7_mm_switch(struct i915_hw_ppgtt *ppgtt, |
e85b26dc | 1503 | struct drm_i915_gem_request *req) |
48a10389 | 1504 | { |
4a570db5 | 1505 | struct intel_engine_cs *engine = req->engine; |
73dec95e | 1506 | u32 *cs; |
48a10389 | 1507 | |
48a10389 | 1508 | /* NB: TLBs must be flushed and invalidated before a switch */ |
73dec95e TU |
1509 | cs = intel_ring_begin(req, 6); |
1510 | if (IS_ERR(cs)) | |
1511 | return PTR_ERR(cs); | |
1512 | ||
1513 | *cs++ = MI_LOAD_REGISTER_IMM(2); | |
1514 | *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine)); | |
1515 | *cs++ = PP_DIR_DCLV_2G; | |
1516 | *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine)); | |
1517 | *cs++ = get_pd_offset(ppgtt); | |
1518 | *cs++ = MI_NOOP; | |
1519 | intel_ring_advance(req, cs); | |
48a10389 BW |
1520 | |
1521 | return 0; | |
1522 | } | |
1523 | ||
eeb9488e | 1524 | static int gen6_mm_switch(struct i915_hw_ppgtt *ppgtt, |
e85b26dc | 1525 | struct drm_i915_gem_request *req) |
eeb9488e | 1526 | { |
4a570db5 | 1527 | struct intel_engine_cs *engine = req->engine; |
8eb95204 | 1528 | struct drm_i915_private *dev_priv = req->i915; |
48a10389 | 1529 | |
e2f80391 TU |
1530 | I915_WRITE(RING_PP_DIR_DCLV(engine), PP_DIR_DCLV_2G); |
1531 | I915_WRITE(RING_PP_DIR_BASE(engine), get_pd_offset(ppgtt)); | |
eeb9488e BW |
1532 | return 0; |
1533 | } | |
1534 | ||
c6be607a | 1535 | static void gen8_ppgtt_enable(struct drm_i915_private *dev_priv) |
eeb9488e | 1536 | { |
e2f80391 | 1537 | struct intel_engine_cs *engine; |
3b3f1650 | 1538 | enum intel_engine_id id; |
3e302542 | 1539 | |
3b3f1650 | 1540 | for_each_engine(engine, dev_priv, id) { |
c6be607a TU |
1541 | u32 four_level = USES_FULL_48BIT_PPGTT(dev_priv) ? |
1542 | GEN8_GFX_PPGTT_48B : 0; | |
e2f80391 | 1543 | I915_WRITE(RING_MODE_GEN7(engine), |
2dba3239 | 1544 | _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE | four_level)); |
eeb9488e | 1545 | } |
eeb9488e | 1546 | } |
6197349b | 1547 | |
c6be607a | 1548 | static void gen7_ppgtt_enable(struct drm_i915_private *dev_priv) |
3e302542 | 1549 | { |
e2f80391 | 1550 | struct intel_engine_cs *engine; |
75c7b0b8 | 1551 | u32 ecochk, ecobits; |
3b3f1650 | 1552 | enum intel_engine_id id; |
6197349b | 1553 | |
b4a74e3a BW |
1554 | ecobits = I915_READ(GAC_ECO_BITS); |
1555 | I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B); | |
a65c2fcd | 1556 | |
b4a74e3a | 1557 | ecochk = I915_READ(GAM_ECOCHK); |
772c2a51 | 1558 | if (IS_HASWELL(dev_priv)) { |
b4a74e3a BW |
1559 | ecochk |= ECOCHK_PPGTT_WB_HSW; |
1560 | } else { | |
1561 | ecochk |= ECOCHK_PPGTT_LLC_IVB; | |
1562 | ecochk &= ~ECOCHK_PPGTT_GFDT_IVB; | |
1563 | } | |
1564 | I915_WRITE(GAM_ECOCHK, ecochk); | |
a65c2fcd | 1565 | |
3b3f1650 | 1566 | for_each_engine(engine, dev_priv, id) { |
6197349b | 1567 | /* GFX_MODE is per-ring on gen7+ */ |
e2f80391 | 1568 | I915_WRITE(RING_MODE_GEN7(engine), |
b4a74e3a | 1569 | _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); |
6197349b | 1570 | } |
b4a74e3a | 1571 | } |
6197349b | 1572 | |
c6be607a | 1573 | static void gen6_ppgtt_enable(struct drm_i915_private *dev_priv) |
b4a74e3a | 1574 | { |
75c7b0b8 | 1575 | u32 ecochk, gab_ctl, ecobits; |
a65c2fcd | 1576 | |
b4a74e3a BW |
1577 | ecobits = I915_READ(GAC_ECO_BITS); |
1578 | I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT | | |
1579 | ECOBITS_PPGTT_CACHE64B); | |
6197349b | 1580 | |
b4a74e3a BW |
1581 | gab_ctl = I915_READ(GAB_CTL); |
1582 | I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT); | |
1583 | ||
1584 | ecochk = I915_READ(GAM_ECOCHK); | |
1585 | I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT | ECOCHK_PPGTT_CACHE64B); | |
1586 | ||
1587 | I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE)); | |
6197349b BW |
1588 | } |
1589 | ||
1d2a314c | 1590 | /* PPGTT support for Sandybdrige/Gen6 and later */ |
853ba5d2 | 1591 | static void gen6_ppgtt_clear_range(struct i915_address_space *vm, |
dd19674b | 1592 | u64 start, u64 length) |
1d2a314c | 1593 | { |
e5716f55 | 1594 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); |
dd19674b CW |
1595 | unsigned int first_entry = start >> PAGE_SHIFT; |
1596 | unsigned int pde = first_entry / GEN6_PTES; | |
1597 | unsigned int pte = first_entry % GEN6_PTES; | |
1598 | unsigned int num_entries = length >> PAGE_SHIFT; | |
1599 | gen6_pte_t scratch_pte = | |
1600 | vm->pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC, 0); | |
1d2a314c | 1601 | |
7bddb01f | 1602 | while (num_entries) { |
dd19674b CW |
1603 | struct i915_page_table *pt = ppgtt->pd.page_table[pde++]; |
1604 | unsigned int end = min(pte + num_entries, GEN6_PTES); | |
1605 | gen6_pte_t *vaddr; | |
7bddb01f | 1606 | |
dd19674b | 1607 | num_entries -= end - pte; |
1d2a314c | 1608 | |
dd19674b CW |
1609 | /* Note that the hw doesn't support removing PDE on the fly |
1610 | * (they are cached inside the context with no means to | |
1611 | * invalidate the cache), so we can only reset the PTE | |
1612 | * entries back to scratch. | |
1613 | */ | |
1d2a314c | 1614 | |
dd19674b CW |
1615 | vaddr = kmap_atomic_px(pt); |
1616 | do { | |
1617 | vaddr[pte++] = scratch_pte; | |
1618 | } while (pte < end); | |
1619 | kunmap_atomic(vaddr); | |
1d2a314c | 1620 | |
dd19674b | 1621 | pte = 0; |
7bddb01f | 1622 | } |
1d2a314c DV |
1623 | } |
1624 | ||
853ba5d2 | 1625 | static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, |
def886c3 | 1626 | struct sg_table *pages, |
75c7b0b8 CW |
1627 | u64 start, |
1628 | enum i915_cache_level cache_level, | |
1629 | u32 flags) | |
def886c3 | 1630 | { |
e5716f55 | 1631 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); |
782f1495 | 1632 | unsigned first_entry = start >> PAGE_SHIFT; |
07749ef3 MT |
1633 | unsigned act_pt = first_entry / GEN6_PTES; |
1634 | unsigned act_pte = first_entry % GEN6_PTES; | |
b31144c0 CW |
1635 | const u32 pte_encode = vm->pte_encode(0, cache_level, flags); |
1636 | struct sgt_dma iter; | |
1637 | gen6_pte_t *vaddr; | |
1638 | ||
9231da70 | 1639 | vaddr = kmap_atomic_px(ppgtt->pd.page_table[act_pt]); |
b31144c0 CW |
1640 | iter.sg = pages->sgl; |
1641 | iter.dma = sg_dma_address(iter.sg); | |
1642 | iter.max = iter.dma + iter.sg->length; | |
1643 | do { | |
1644 | vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma); | |
6e995e23 | 1645 | |
b31144c0 CW |
1646 | iter.dma += PAGE_SIZE; |
1647 | if (iter.dma == iter.max) { | |
1648 | iter.sg = __sg_next(iter.sg); | |
1649 | if (!iter.sg) | |
1650 | break; | |
6e995e23 | 1651 | |
b31144c0 CW |
1652 | iter.dma = sg_dma_address(iter.sg); |
1653 | iter.max = iter.dma + iter.sg->length; | |
1654 | } | |
24f3a8cf | 1655 | |
07749ef3 | 1656 | if (++act_pte == GEN6_PTES) { |
9231da70 CW |
1657 | kunmap_atomic(vaddr); |
1658 | vaddr = kmap_atomic_px(ppgtt->pd.page_table[++act_pt]); | |
6e995e23 | 1659 | act_pte = 0; |
def886c3 | 1660 | } |
b31144c0 | 1661 | } while (1); |
9231da70 | 1662 | kunmap_atomic(vaddr); |
def886c3 DV |
1663 | } |
1664 | ||
678d96fb | 1665 | static int gen6_alloc_va_range(struct i915_address_space *vm, |
dd19674b | 1666 | u64 start, u64 length) |
678d96fb | 1667 | { |
e5716f55 | 1668 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); |
ec565b3c | 1669 | struct i915_page_table *pt; |
dd19674b CW |
1670 | u64 from = start; |
1671 | unsigned int pde; | |
1672 | bool flush = false; | |
4933d519 | 1673 | |
731f74c5 | 1674 | gen6_for_each_pde(pt, &ppgtt->pd, start, length, pde) { |
dd19674b CW |
1675 | if (pt == vm->scratch_pt) { |
1676 | pt = alloc_pt(vm); | |
1677 | if (IS_ERR(pt)) | |
1678 | goto unwind_out; | |
4933d519 | 1679 | |
dd19674b CW |
1680 | gen6_initialize_pt(vm, pt); |
1681 | ppgtt->pd.page_table[pde] = pt; | |
1682 | gen6_write_pde(ppgtt, pde, pt); | |
1683 | flush = true; | |
4933d519 | 1684 | } |
4933d519 MT |
1685 | } |
1686 | ||
dd19674b CW |
1687 | if (flush) { |
1688 | mark_tlbs_dirty(ppgtt); | |
1689 | wmb(); | |
678d96fb BW |
1690 | } |
1691 | ||
1692 | return 0; | |
4933d519 MT |
1693 | |
1694 | unwind_out: | |
dd19674b CW |
1695 | gen6_ppgtt_clear_range(vm, from, start); |
1696 | return -ENOMEM; | |
678d96fb BW |
1697 | } |
1698 | ||
8776f02b MK |
1699 | static int gen6_init_scratch(struct i915_address_space *vm) |
1700 | { | |
8bcdd0f7 | 1701 | int ret; |
8776f02b | 1702 | |
8448661d | 1703 | ret = setup_scratch_page(vm, I915_GFP_DMA); |
8bcdd0f7 CW |
1704 | if (ret) |
1705 | return ret; | |
8776f02b | 1706 | |
8448661d | 1707 | vm->scratch_pt = alloc_pt(vm); |
8776f02b | 1708 | if (IS_ERR(vm->scratch_pt)) { |
8448661d | 1709 | cleanup_scratch_page(vm); |
8776f02b MK |
1710 | return PTR_ERR(vm->scratch_pt); |
1711 | } | |
1712 | ||
1713 | gen6_initialize_pt(vm, vm->scratch_pt); | |
1714 | ||
1715 | return 0; | |
1716 | } | |
1717 | ||
1718 | static void gen6_free_scratch(struct i915_address_space *vm) | |
1719 | { | |
8448661d CW |
1720 | free_pt(vm, vm->scratch_pt); |
1721 | cleanup_scratch_page(vm); | |
8776f02b MK |
1722 | } |
1723 | ||
061dd493 | 1724 | static void gen6_ppgtt_cleanup(struct i915_address_space *vm) |
a00d825d | 1725 | { |
e5716f55 | 1726 | struct i915_hw_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); |
731f74c5 | 1727 | struct i915_page_directory *pd = &ppgtt->pd; |
09942c65 | 1728 | struct i915_page_table *pt; |
75c7b0b8 | 1729 | u32 pde; |
4933d519 | 1730 | |
061dd493 DV |
1731 | drm_mm_remove_node(&ppgtt->node); |
1732 | ||
731f74c5 | 1733 | gen6_for_all_pdes(pt, pd, pde) |
79ab9370 | 1734 | if (pt != vm->scratch_pt) |
8448661d | 1735 | free_pt(vm, pt); |
06fda602 | 1736 | |
8776f02b | 1737 | gen6_free_scratch(vm); |
3440d265 DV |
1738 | } |
1739 | ||
b146520f | 1740 | static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt) |
3440d265 | 1741 | { |
8776f02b | 1742 | struct i915_address_space *vm = &ppgtt->base; |
49d73912 | 1743 | struct drm_i915_private *dev_priv = ppgtt->base.i915; |
72e96d64 | 1744 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
b146520f | 1745 | int ret; |
1d2a314c | 1746 | |
c8d4c0d6 BW |
1747 | /* PPGTT PDEs reside in the GGTT and consists of 512 entries. The |
1748 | * allocator works in address space sizes, so it's multiplied by page | |
1749 | * size. We allocate at the top of the GTT to avoid fragmentation. | |
1750 | */ | |
72e96d64 | 1751 | BUG_ON(!drm_mm_initialized(&ggtt->base.mm)); |
4933d519 | 1752 | |
8776f02b MK |
1753 | ret = gen6_init_scratch(vm); |
1754 | if (ret) | |
1755 | return ret; | |
4933d519 | 1756 | |
e007b19d CW |
1757 | ret = i915_gem_gtt_insert(&ggtt->base, &ppgtt->node, |
1758 | GEN6_PD_SIZE, GEN6_PD_ALIGN, | |
1759 | I915_COLOR_UNEVICTABLE, | |
1760 | 0, ggtt->base.total, | |
1761 | PIN_HIGH); | |
c8c26622 | 1762 | if (ret) |
678d96fb BW |
1763 | goto err_out; |
1764 | ||
72e96d64 | 1765 | if (ppgtt->node.start < ggtt->mappable_end) |
c8d4c0d6 | 1766 | DRM_DEBUG("Forced to use aperture for PDEs\n"); |
1d2a314c | 1767 | |
52c126ee CW |
1768 | ppgtt->pd.base.ggtt_offset = |
1769 | ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t); | |
1770 | ||
1771 | ppgtt->pd_addr = (gen6_pte_t __iomem *)ggtt->gsm + | |
1772 | ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t); | |
1773 | ||
c8c26622 | 1774 | return 0; |
678d96fb BW |
1775 | |
1776 | err_out: | |
8776f02b | 1777 | gen6_free_scratch(vm); |
678d96fb | 1778 | return ret; |
b146520f BW |
1779 | } |
1780 | ||
b146520f BW |
1781 | static int gen6_ppgtt_alloc(struct i915_hw_ppgtt *ppgtt) |
1782 | { | |
2f2cf682 | 1783 | return gen6_ppgtt_allocate_page_directories(ppgtt); |
4933d519 | 1784 | } |
06dc68d6 | 1785 | |
4933d519 | 1786 | static void gen6_scratch_va_range(struct i915_hw_ppgtt *ppgtt, |
75c7b0b8 | 1787 | u64 start, u64 length) |
4933d519 | 1788 | { |
ec565b3c | 1789 | struct i915_page_table *unused; |
75c7b0b8 | 1790 | u32 pde; |
1d2a314c | 1791 | |
731f74c5 | 1792 | gen6_for_each_pde(unused, &ppgtt->pd, start, length, pde) |
79ab9370 | 1793 | ppgtt->pd.page_table[pde] = ppgtt->base.scratch_pt; |
b146520f BW |
1794 | } |
1795 | ||
5c5f6457 | 1796 | static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt) |
b146520f | 1797 | { |
49d73912 | 1798 | struct drm_i915_private *dev_priv = ppgtt->base.i915; |
72e96d64 | 1799 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
b146520f BW |
1800 | int ret; |
1801 | ||
72e96d64 | 1802 | ppgtt->base.pte_encode = ggtt->base.pte_encode; |
5db94019 | 1803 | if (intel_vgpu_active(dev_priv) || IS_GEN6(dev_priv)) |
b146520f | 1804 | ppgtt->switch_mm = gen6_mm_switch; |
772c2a51 | 1805 | else if (IS_HASWELL(dev_priv)) |
b146520f | 1806 | ppgtt->switch_mm = hsw_mm_switch; |
5db94019 | 1807 | else if (IS_GEN7(dev_priv)) |
b146520f | 1808 | ppgtt->switch_mm = gen7_mm_switch; |
8eb95204 | 1809 | else |
b146520f BW |
1810 | BUG(); |
1811 | ||
1812 | ret = gen6_ppgtt_alloc(ppgtt); | |
1813 | if (ret) | |
1814 | return ret; | |
1815 | ||
09942c65 | 1816 | ppgtt->base.total = I915_PDES * GEN6_PTES * PAGE_SIZE; |
1d2a314c | 1817 | |
5c5f6457 | 1818 | gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total); |
16a011c8 | 1819 | gen6_write_page_range(ppgtt, 0, ppgtt->base.total); |
678d96fb | 1820 | |
52c126ee CW |
1821 | ret = gen6_alloc_va_range(&ppgtt->base, 0, ppgtt->base.total); |
1822 | if (ret) { | |
1823 | gen6_ppgtt_cleanup(&ppgtt->base); | |
1824 | return ret; | |
1825 | } | |
1826 | ||
054b9acd MK |
1827 | ppgtt->base.clear_range = gen6_ppgtt_clear_range; |
1828 | ppgtt->base.insert_entries = gen6_ppgtt_insert_entries; | |
1829 | ppgtt->base.unbind_vma = ppgtt_unbind_vma; | |
1830 | ppgtt->base.bind_vma = ppgtt_bind_vma; | |
1831 | ppgtt->base.cleanup = gen6_ppgtt_cleanup; | |
1832 | ppgtt->debug_dump = gen6_dump_ppgtt; | |
1833 | ||
440fd528 | 1834 | DRM_DEBUG_DRIVER("Allocated pde space (%lldM) at GTT entry: %llx\n", |
b146520f BW |
1835 | ppgtt->node.size >> 20, |
1836 | ppgtt->node.start / PAGE_SIZE); | |
3440d265 | 1837 | |
52c126ee CW |
1838 | DRM_DEBUG_DRIVER("Adding PPGTT at offset %x\n", |
1839 | ppgtt->pd.base.ggtt_offset << 10); | |
fa76da34 | 1840 | |
b146520f | 1841 | return 0; |
3440d265 DV |
1842 | } |
1843 | ||
2bfa996e CW |
1844 | static int __hw_ppgtt_init(struct i915_hw_ppgtt *ppgtt, |
1845 | struct drm_i915_private *dev_priv) | |
3440d265 | 1846 | { |
49d73912 | 1847 | ppgtt->base.i915 = dev_priv; |
8448661d | 1848 | ppgtt->base.dma = &dev_priv->drm.pdev->dev; |
3440d265 | 1849 | |
2bfa996e | 1850 | if (INTEL_INFO(dev_priv)->gen < 8) |
5c5f6457 | 1851 | return gen6_ppgtt_init(ppgtt); |
3ed124b2 | 1852 | else |
d7b2633d | 1853 | return gen8_ppgtt_init(ppgtt); |
fa76da34 | 1854 | } |
c114f76a | 1855 | |
a2cad9df | 1856 | static void i915_address_space_init(struct i915_address_space *vm, |
80b204bc CW |
1857 | struct drm_i915_private *dev_priv, |
1858 | const char *name) | |
a2cad9df | 1859 | { |
80b204bc | 1860 | i915_gem_timeline_init(dev_priv, &vm->timeline, name); |
47db922f | 1861 | |
381b943b | 1862 | drm_mm_init(&vm->mm, 0, vm->total); |
47db922f CW |
1863 | vm->mm.head_node.color = I915_COLOR_UNEVICTABLE; |
1864 | ||
a2cad9df MW |
1865 | INIT_LIST_HEAD(&vm->active_list); |
1866 | INIT_LIST_HEAD(&vm->inactive_list); | |
50e046b6 | 1867 | INIT_LIST_HEAD(&vm->unbound_list); |
47db922f | 1868 | |
a2cad9df | 1869 | list_add_tail(&vm->global_link, &dev_priv->vm_list); |
8448661d | 1870 | pagevec_init(&vm->free_pages, false); |
a2cad9df MW |
1871 | } |
1872 | ||
ed9724dd MA |
1873 | static void i915_address_space_fini(struct i915_address_space *vm) |
1874 | { | |
8448661d CW |
1875 | if (pagevec_count(&vm->free_pages)) |
1876 | vm_free_pages_release(vm); | |
1877 | ||
ed9724dd MA |
1878 | i915_gem_timeline_fini(&vm->timeline); |
1879 | drm_mm_takedown(&vm->mm); | |
1880 | list_del(&vm->global_link); | |
1881 | } | |
1882 | ||
c6be607a | 1883 | static void gtt_write_workarounds(struct drm_i915_private *dev_priv) |
d5165ebd | 1884 | { |
d5165ebd TG |
1885 | /* This function is for gtt related workarounds. This function is |
1886 | * called on driver load and after a GPU reset, so you can place | |
1887 | * workarounds here even if they get overwritten by GPU reset. | |
1888 | */ | |
9fb5026f | 1889 | /* WaIncreaseDefaultTLBEntries:chv,bdw,skl,bxt,kbl,glk */ |
8652744b | 1890 | if (IS_BROADWELL(dev_priv)) |
d5165ebd | 1891 | I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_BDW); |
920a14b2 | 1892 | else if (IS_CHERRYVIEW(dev_priv)) |
d5165ebd | 1893 | I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN8_L3_LRA_1_GPGPU_DEFAULT_VALUE_CHV); |
b976dc53 | 1894 | else if (IS_GEN9_BC(dev_priv)) |
d5165ebd | 1895 | I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_SKL); |
9fb5026f | 1896 | else if (IS_GEN9_LP(dev_priv)) |
d5165ebd TG |
1897 | I915_WRITE(GEN8_L3_LRA_1_GPGPU, GEN9_L3_LRA_1_GPGPU_DEFAULT_VALUE_BXT); |
1898 | } | |
1899 | ||
c6be607a | 1900 | int i915_ppgtt_init_hw(struct drm_i915_private *dev_priv) |
82460d97 | 1901 | { |
c6be607a | 1902 | gtt_write_workarounds(dev_priv); |
d5165ebd | 1903 | |
671b5013 TD |
1904 | /* In the case of execlists, PPGTT is enabled by the context descriptor |
1905 | * and the PDPs are contained within the context itself. We don't | |
1906 | * need to do anything here. */ | |
1907 | if (i915.enable_execlists) | |
1908 | return 0; | |
1909 | ||
c6be607a | 1910 | if (!USES_PPGTT(dev_priv)) |
82460d97 DV |
1911 | return 0; |
1912 | ||
5db94019 | 1913 | if (IS_GEN6(dev_priv)) |
c6be607a | 1914 | gen6_ppgtt_enable(dev_priv); |
5db94019 | 1915 | else if (IS_GEN7(dev_priv)) |
c6be607a TU |
1916 | gen7_ppgtt_enable(dev_priv); |
1917 | else if (INTEL_GEN(dev_priv) >= 8) | |
1918 | gen8_ppgtt_enable(dev_priv); | |
82460d97 | 1919 | else |
c6be607a | 1920 | MISSING_CASE(INTEL_GEN(dev_priv)); |
82460d97 | 1921 | |
4ad2fd88 JH |
1922 | return 0; |
1923 | } | |
1d2a314c | 1924 | |
4d884705 | 1925 | struct i915_hw_ppgtt * |
2bfa996e | 1926 | i915_ppgtt_create(struct drm_i915_private *dev_priv, |
80b204bc CW |
1927 | struct drm_i915_file_private *fpriv, |
1928 | const char *name) | |
4d884705 DV |
1929 | { |
1930 | struct i915_hw_ppgtt *ppgtt; | |
1931 | int ret; | |
1932 | ||
1933 | ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL); | |
1934 | if (!ppgtt) | |
1935 | return ERR_PTR(-ENOMEM); | |
1936 | ||
1188bc66 | 1937 | ret = __hw_ppgtt_init(ppgtt, dev_priv); |
4d884705 DV |
1938 | if (ret) { |
1939 | kfree(ppgtt); | |
1940 | return ERR_PTR(ret); | |
1941 | } | |
1942 | ||
1188bc66 CW |
1943 | kref_init(&ppgtt->ref); |
1944 | i915_address_space_init(&ppgtt->base, dev_priv, name); | |
1945 | ppgtt->base.file = fpriv; | |
1946 | ||
198c974d DCS |
1947 | trace_i915_ppgtt_create(&ppgtt->base); |
1948 | ||
4d884705 DV |
1949 | return ppgtt; |
1950 | } | |
1951 | ||
0c7eeda1 CW |
1952 | void i915_ppgtt_close(struct i915_address_space *vm) |
1953 | { | |
1954 | struct list_head *phases[] = { | |
1955 | &vm->active_list, | |
1956 | &vm->inactive_list, | |
1957 | &vm->unbound_list, | |
1958 | NULL, | |
1959 | }, **phase; | |
1960 | ||
1961 | GEM_BUG_ON(vm->closed); | |
1962 | vm->closed = true; | |
1963 | ||
1964 | for (phase = phases; *phase; phase++) { | |
1965 | struct i915_vma *vma, *vn; | |
1966 | ||
1967 | list_for_each_entry_safe(vma, vn, *phase, vm_link) | |
1968 | if (!i915_vma_is_closed(vma)) | |
1969 | i915_vma_close(vma); | |
1970 | } | |
1971 | } | |
1972 | ||
ed9724dd | 1973 | void i915_ppgtt_release(struct kref *kref) |
ee960be7 DV |
1974 | { |
1975 | struct i915_hw_ppgtt *ppgtt = | |
1976 | container_of(kref, struct i915_hw_ppgtt, ref); | |
1977 | ||
198c974d DCS |
1978 | trace_i915_ppgtt_release(&ppgtt->base); |
1979 | ||
50e046b6 | 1980 | /* vmas should already be unbound and destroyed */ |
ee960be7 DV |
1981 | WARN_ON(!list_empty(&ppgtt->base.active_list)); |
1982 | WARN_ON(!list_empty(&ppgtt->base.inactive_list)); | |
50e046b6 | 1983 | WARN_ON(!list_empty(&ppgtt->base.unbound_list)); |
ee960be7 DV |
1984 | |
1985 | ppgtt->base.cleanup(&ppgtt->base); | |
8448661d | 1986 | i915_address_space_fini(&ppgtt->base); |
ee960be7 DV |
1987 | kfree(ppgtt); |
1988 | } | |
1d2a314c | 1989 | |
a81cc00c BW |
1990 | /* Certain Gen5 chipsets require require idling the GPU before |
1991 | * unmapping anything from the GTT when VT-d is enabled. | |
1992 | */ | |
97d6d7ab | 1993 | static bool needs_idle_maps(struct drm_i915_private *dev_priv) |
a81cc00c BW |
1994 | { |
1995 | #ifdef CONFIG_INTEL_IOMMU | |
1996 | /* Query intel_iommu to see if we need the workaround. Presumably that | |
1997 | * was loaded first. | |
1998 | */ | |
97d6d7ab | 1999 | if (IS_GEN5(dev_priv) && IS_MOBILE(dev_priv) && intel_iommu_gfx_mapped) |
a81cc00c BW |
2000 | return true; |
2001 | #endif | |
2002 | return false; | |
2003 | } | |
2004 | ||
dc97997a | 2005 | void i915_check_and_clear_faults(struct drm_i915_private *dev_priv) |
828c7908 | 2006 | { |
e2f80391 | 2007 | struct intel_engine_cs *engine; |
3b3f1650 | 2008 | enum intel_engine_id id; |
828c7908 | 2009 | |
dc97997a | 2010 | if (INTEL_INFO(dev_priv)->gen < 6) |
828c7908 BW |
2011 | return; |
2012 | ||
3b3f1650 | 2013 | for_each_engine(engine, dev_priv, id) { |
828c7908 | 2014 | u32 fault_reg; |
e2f80391 | 2015 | fault_reg = I915_READ(RING_FAULT_REG(engine)); |
828c7908 BW |
2016 | if (fault_reg & RING_FAULT_VALID) { |
2017 | DRM_DEBUG_DRIVER("Unexpected fault\n" | |
59a5d290 | 2018 | "\tAddr: 0x%08lx\n" |
828c7908 BW |
2019 | "\tAddress space: %s\n" |
2020 | "\tSource ID: %d\n" | |
2021 | "\tType: %d\n", | |
2022 | fault_reg & PAGE_MASK, | |
2023 | fault_reg & RING_FAULT_GTTSEL_MASK ? "GGTT" : "PPGTT", | |
2024 | RING_FAULT_SRCID(fault_reg), | |
2025 | RING_FAULT_FAULT_TYPE(fault_reg)); | |
e2f80391 | 2026 | I915_WRITE(RING_FAULT_REG(engine), |
828c7908 BW |
2027 | fault_reg & ~RING_FAULT_VALID); |
2028 | } | |
2029 | } | |
3b3f1650 AG |
2030 | |
2031 | /* Engine specific init may not have been done till this point. */ | |
2032 | if (dev_priv->engine[RCS]) | |
2033 | POSTING_READ(RING_FAULT_REG(dev_priv->engine[RCS])); | |
828c7908 BW |
2034 | } |
2035 | ||
275a991c | 2036 | void i915_gem_suspend_gtt_mappings(struct drm_i915_private *dev_priv) |
828c7908 | 2037 | { |
72e96d64 | 2038 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
828c7908 BW |
2039 | |
2040 | /* Don't bother messing with faults pre GEN6 as we have little | |
2041 | * documentation supporting that it's a good idea. | |
2042 | */ | |
275a991c | 2043 | if (INTEL_GEN(dev_priv) < 6) |
828c7908 BW |
2044 | return; |
2045 | ||
dc97997a | 2046 | i915_check_and_clear_faults(dev_priv); |
828c7908 | 2047 | |
381b943b | 2048 | ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total); |
91e56499 | 2049 | |
7c3f86b6 | 2050 | i915_ggtt_invalidate(dev_priv); |
828c7908 BW |
2051 | } |
2052 | ||
03ac84f1 CW |
2053 | int i915_gem_gtt_prepare_pages(struct drm_i915_gem_object *obj, |
2054 | struct sg_table *pages) | |
7c2e6fdf | 2055 | { |
1a292fa5 CW |
2056 | do { |
2057 | if (dma_map_sg(&obj->base.dev->pdev->dev, | |
2058 | pages->sgl, pages->nents, | |
2059 | PCI_DMA_BIDIRECTIONAL)) | |
2060 | return 0; | |
2061 | ||
2062 | /* If the DMA remap fails, one cause can be that we have | |
2063 | * too many objects pinned in a small remapping table, | |
2064 | * such as swiotlb. Incrementally purge all other objects and | |
2065 | * try again - if there are no more pages to remove from | |
2066 | * the DMA remapper, i915_gem_shrink will return 0. | |
2067 | */ | |
2068 | GEM_BUG_ON(obj->mm.pages == pages); | |
2069 | } while (i915_gem_shrink(to_i915(obj->base.dev), | |
2070 | obj->base.size >> PAGE_SHIFT, | |
2071 | I915_SHRINK_BOUND | | |
2072 | I915_SHRINK_UNBOUND | | |
2073 | I915_SHRINK_ACTIVE)); | |
9da3da66 | 2074 | |
03ac84f1 | 2075 | return -ENOSPC; |
7c2e6fdf DV |
2076 | } |
2077 | ||
2c642b07 | 2078 | static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) |
94ec8f61 | 2079 | { |
94ec8f61 | 2080 | writeq(pte, addr); |
94ec8f61 BW |
2081 | } |
2082 | ||
d6473f56 CW |
2083 | static void gen8_ggtt_insert_page(struct i915_address_space *vm, |
2084 | dma_addr_t addr, | |
75c7b0b8 | 2085 | u64 offset, |
d6473f56 CW |
2086 | enum i915_cache_level level, |
2087 | u32 unused) | |
2088 | { | |
7c3f86b6 | 2089 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); |
d6473f56 | 2090 | gen8_pte_t __iomem *pte = |
7c3f86b6 | 2091 | (gen8_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT); |
d6473f56 | 2092 | |
4fb84d99 | 2093 | gen8_set_pte(pte, gen8_pte_encode(addr, level)); |
d6473f56 | 2094 | |
7c3f86b6 | 2095 | ggtt->invalidate(vm->i915); |
d6473f56 CW |
2096 | } |
2097 | ||
94ec8f61 BW |
2098 | static void gen8_ggtt_insert_entries(struct i915_address_space *vm, |
2099 | struct sg_table *st, | |
75c7b0b8 CW |
2100 | u64 start, |
2101 | enum i915_cache_level level, | |
2102 | u32 unused) | |
94ec8f61 | 2103 | { |
ce7fda2e | 2104 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); |
85d1225e DG |
2105 | struct sgt_iter sgt_iter; |
2106 | gen8_pte_t __iomem *gtt_entries; | |
894ccebe | 2107 | const gen8_pte_t pte_encode = gen8_pte_encode(0, level); |
85d1225e | 2108 | dma_addr_t addr; |
be69459a | 2109 | |
894ccebe CW |
2110 | gtt_entries = (gen8_pte_t __iomem *)ggtt->gsm; |
2111 | gtt_entries += start >> PAGE_SHIFT; | |
2112 | for_each_sgt_dma(addr, sgt_iter, st) | |
2113 | gen8_set_pte(gtt_entries++, pte_encode | addr); | |
85d1225e | 2114 | |
894ccebe | 2115 | wmb(); |
94ec8f61 | 2116 | |
94ec8f61 BW |
2117 | /* This next bit makes the above posting read even more important. We |
2118 | * want to flush the TLBs only after we're certain all the PTE updates | |
2119 | * have finished. | |
2120 | */ | |
7c3f86b6 | 2121 | ggtt->invalidate(vm->i915); |
94ec8f61 BW |
2122 | } |
2123 | ||
d6473f56 CW |
2124 | static void gen6_ggtt_insert_page(struct i915_address_space *vm, |
2125 | dma_addr_t addr, | |
75c7b0b8 | 2126 | u64 offset, |
d6473f56 CW |
2127 | enum i915_cache_level level, |
2128 | u32 flags) | |
2129 | { | |
7c3f86b6 | 2130 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); |
d6473f56 | 2131 | gen6_pte_t __iomem *pte = |
7c3f86b6 | 2132 | (gen6_pte_t __iomem *)ggtt->gsm + (offset >> PAGE_SHIFT); |
d6473f56 | 2133 | |
4fb84d99 | 2134 | iowrite32(vm->pte_encode(addr, level, flags), pte); |
d6473f56 | 2135 | |
7c3f86b6 | 2136 | ggtt->invalidate(vm->i915); |
d6473f56 CW |
2137 | } |
2138 | ||
e76e9aeb BW |
2139 | /* |
2140 | * Binds an object into the global gtt with the specified cache level. The object | |
2141 | * will be accessible to the GPU via commands whose operands reference offsets | |
2142 | * within the global GTT as well as accessible by the GPU through the GMADR | |
2143 | * mapped BAR (dev_priv->mm.gtt->gtt). | |
2144 | */ | |
853ba5d2 | 2145 | static void gen6_ggtt_insert_entries(struct i915_address_space *vm, |
7faf1ab2 | 2146 | struct sg_table *st, |
75c7b0b8 CW |
2147 | u64 start, |
2148 | enum i915_cache_level level, | |
2149 | u32 flags) | |
e76e9aeb | 2150 | { |
ce7fda2e | 2151 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); |
b31144c0 CW |
2152 | gen6_pte_t __iomem *entries = (gen6_pte_t __iomem *)ggtt->gsm; |
2153 | unsigned int i = start >> PAGE_SHIFT; | |
2154 | struct sgt_iter iter; | |
85d1225e | 2155 | dma_addr_t addr; |
b31144c0 CW |
2156 | for_each_sgt_dma(addr, iter, st) |
2157 | iowrite32(vm->pte_encode(addr, level, flags), &entries[i++]); | |
2158 | wmb(); | |
0f9b91c7 BW |
2159 | |
2160 | /* This next bit makes the above posting read even more important. We | |
2161 | * want to flush the TLBs only after we're certain all the PTE updates | |
2162 | * have finished. | |
2163 | */ | |
7c3f86b6 | 2164 | ggtt->invalidate(vm->i915); |
e76e9aeb BW |
2165 | } |
2166 | ||
f7770bfd | 2167 | static void nop_clear_range(struct i915_address_space *vm, |
75c7b0b8 | 2168 | u64 start, u64 length) |
f7770bfd CW |
2169 | { |
2170 | } | |
2171 | ||
94ec8f61 | 2172 | static void gen8_ggtt_clear_range(struct i915_address_space *vm, |
75c7b0b8 | 2173 | u64 start, u64 length) |
94ec8f61 | 2174 | { |
ce7fda2e | 2175 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); |
782f1495 BW |
2176 | unsigned first_entry = start >> PAGE_SHIFT; |
2177 | unsigned num_entries = length >> PAGE_SHIFT; | |
894ccebe CW |
2178 | const gen8_pte_t scratch_pte = |
2179 | gen8_pte_encode(vm->scratch_page.daddr, I915_CACHE_LLC); | |
2180 | gen8_pte_t __iomem *gtt_base = | |
72e96d64 JL |
2181 | (gen8_pte_t __iomem *)ggtt->gsm + first_entry; |
2182 | const int max_entries = ggtt_total_entries(ggtt) - first_entry; | |
94ec8f61 BW |
2183 | int i; |
2184 | ||
2185 | if (WARN(num_entries > max_entries, | |
2186 | "First entry = %d; Num entries = %d (max=%d)\n", | |
2187 | first_entry, num_entries, max_entries)) | |
2188 | num_entries = max_entries; | |
2189 | ||
94ec8f61 BW |
2190 | for (i = 0; i < num_entries; i++) |
2191 | gen8_set_pte(>t_base[i], scratch_pte); | |
94ec8f61 BW |
2192 | } |
2193 | ||
d86b18a0 JB |
2194 | static void bxt_vtd_ggtt_wa(struct i915_address_space *vm) |
2195 | { | |
2196 | struct drm_i915_private *dev_priv = vm->i915; | |
2197 | ||
2198 | /* | |
2199 | * Make sure the internal GAM fifo has been cleared of all GTT | |
2200 | * writes before exiting stop_machine(). This guarantees that | |
2201 | * any aperture accesses waiting to start in another process | |
2202 | * cannot back up behind the GTT writes causing a hang. | |
2203 | * The register can be any arbitrary GAM register. | |
2204 | */ | |
2205 | POSTING_READ(GFX_FLSH_CNTL_GEN6); | |
2206 | } | |
2207 | ||
2208 | struct insert_page { | |
2209 | struct i915_address_space *vm; | |
2210 | dma_addr_t addr; | |
2211 | u64 offset; | |
2212 | enum i915_cache_level level; | |
2213 | }; | |
2214 | ||
2215 | static int bxt_vtd_ggtt_insert_page__cb(void *_arg) | |
2216 | { | |
2217 | struct insert_page *arg = _arg; | |
2218 | ||
2219 | gen8_ggtt_insert_page(arg->vm, arg->addr, arg->offset, arg->level, 0); | |
2220 | bxt_vtd_ggtt_wa(arg->vm); | |
2221 | ||
2222 | return 0; | |
2223 | } | |
2224 | ||
2225 | static void bxt_vtd_ggtt_insert_page__BKL(struct i915_address_space *vm, | |
2226 | dma_addr_t addr, | |
2227 | u64 offset, | |
2228 | enum i915_cache_level level, | |
2229 | u32 unused) | |
2230 | { | |
2231 | struct insert_page arg = { vm, addr, offset, level }; | |
2232 | ||
2233 | stop_machine(bxt_vtd_ggtt_insert_page__cb, &arg, NULL); | |
2234 | } | |
2235 | ||
2236 | struct insert_entries { | |
2237 | struct i915_address_space *vm; | |
2238 | struct sg_table *st; | |
2239 | u64 start; | |
2240 | enum i915_cache_level level; | |
2241 | }; | |
2242 | ||
2243 | static int bxt_vtd_ggtt_insert_entries__cb(void *_arg) | |
2244 | { | |
2245 | struct insert_entries *arg = _arg; | |
2246 | ||
2247 | gen8_ggtt_insert_entries(arg->vm, arg->st, arg->start, arg->level, 0); | |
2248 | bxt_vtd_ggtt_wa(arg->vm); | |
2249 | ||
2250 | return 0; | |
2251 | } | |
2252 | ||
2253 | static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm, | |
2254 | struct sg_table *st, | |
2255 | u64 start, | |
2256 | enum i915_cache_level level, | |
2257 | u32 unused) | |
2258 | { | |
2259 | struct insert_entries arg = { vm, st, start, level }; | |
2260 | ||
2261 | stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL); | |
2262 | } | |
2263 | ||
2264 | struct clear_range { | |
2265 | struct i915_address_space *vm; | |
2266 | u64 start; | |
2267 | u64 length; | |
2268 | }; | |
2269 | ||
2270 | static int bxt_vtd_ggtt_clear_range__cb(void *_arg) | |
2271 | { | |
2272 | struct clear_range *arg = _arg; | |
2273 | ||
2274 | gen8_ggtt_clear_range(arg->vm, arg->start, arg->length); | |
2275 | bxt_vtd_ggtt_wa(arg->vm); | |
2276 | ||
2277 | return 0; | |
2278 | } | |
2279 | ||
2280 | static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm, | |
2281 | u64 start, | |
2282 | u64 length) | |
2283 | { | |
2284 | struct clear_range arg = { vm, start, length }; | |
2285 | ||
2286 | stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL); | |
2287 | } | |
2288 | ||
853ba5d2 | 2289 | static void gen6_ggtt_clear_range(struct i915_address_space *vm, |
75c7b0b8 | 2290 | u64 start, u64 length) |
7faf1ab2 | 2291 | { |
ce7fda2e | 2292 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); |
782f1495 BW |
2293 | unsigned first_entry = start >> PAGE_SHIFT; |
2294 | unsigned num_entries = length >> PAGE_SHIFT; | |
07749ef3 | 2295 | gen6_pte_t scratch_pte, __iomem *gtt_base = |
72e96d64 JL |
2296 | (gen6_pte_t __iomem *)ggtt->gsm + first_entry; |
2297 | const int max_entries = ggtt_total_entries(ggtt) - first_entry; | |
7faf1ab2 DV |
2298 | int i; |
2299 | ||
2300 | if (WARN(num_entries > max_entries, | |
2301 | "First entry = %d; Num entries = %d (max=%d)\n", | |
2302 | first_entry, num_entries, max_entries)) | |
2303 | num_entries = max_entries; | |
2304 | ||
8bcdd0f7 | 2305 | scratch_pte = vm->pte_encode(vm->scratch_page.daddr, |
4fb84d99 | 2306 | I915_CACHE_LLC, 0); |
828c7908 | 2307 | |
7faf1ab2 DV |
2308 | for (i = 0; i < num_entries; i++) |
2309 | iowrite32(scratch_pte, >t_base[i]); | |
7faf1ab2 DV |
2310 | } |
2311 | ||
d6473f56 CW |
2312 | static void i915_ggtt_insert_page(struct i915_address_space *vm, |
2313 | dma_addr_t addr, | |
75c7b0b8 | 2314 | u64 offset, |
d6473f56 CW |
2315 | enum i915_cache_level cache_level, |
2316 | u32 unused) | |
2317 | { | |
d6473f56 CW |
2318 | unsigned int flags = (cache_level == I915_CACHE_NONE) ? |
2319 | AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; | |
d6473f56 CW |
2320 | |
2321 | intel_gtt_insert_page(addr, offset >> PAGE_SHIFT, flags); | |
d6473f56 CW |
2322 | } |
2323 | ||
d369d2d9 DV |
2324 | static void i915_ggtt_insert_entries(struct i915_address_space *vm, |
2325 | struct sg_table *pages, | |
75c7b0b8 CW |
2326 | u64 start, |
2327 | enum i915_cache_level cache_level, | |
2328 | u32 unused) | |
7faf1ab2 DV |
2329 | { |
2330 | unsigned int flags = (cache_level == I915_CACHE_NONE) ? | |
2331 | AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY; | |
2332 | ||
d369d2d9 | 2333 | intel_gtt_insert_sg_entries(pages, start >> PAGE_SHIFT, flags); |
7faf1ab2 DV |
2334 | } |
2335 | ||
853ba5d2 | 2336 | static void i915_ggtt_clear_range(struct i915_address_space *vm, |
75c7b0b8 | 2337 | u64 start, u64 length) |
7faf1ab2 | 2338 | { |
2eedfc7d | 2339 | intel_gtt_clear_range(start >> PAGE_SHIFT, length >> PAGE_SHIFT); |
7faf1ab2 DV |
2340 | } |
2341 | ||
70b9f6f8 DV |
2342 | static int ggtt_bind_vma(struct i915_vma *vma, |
2343 | enum i915_cache_level cache_level, | |
2344 | u32 flags) | |
0a878716 | 2345 | { |
49d73912 | 2346 | struct drm_i915_private *i915 = vma->vm->i915; |
0a878716 | 2347 | struct drm_i915_gem_object *obj = vma->obj; |
ba7a5741 | 2348 | u32 pte_flags; |
0a878716 | 2349 | |
ba7a5741 CW |
2350 | if (unlikely(!vma->pages)) { |
2351 | int ret = i915_get_ggtt_vma_pages(vma); | |
2352 | if (ret) | |
2353 | return ret; | |
2354 | } | |
0a878716 DV |
2355 | |
2356 | /* Currently applicable only to VLV */ | |
ba7a5741 | 2357 | pte_flags = 0; |
0a878716 DV |
2358 | if (obj->gt_ro) |
2359 | pte_flags |= PTE_READ_ONLY; | |
2360 | ||
9c870d03 | 2361 | intel_runtime_pm_get(i915); |
247177dd | 2362 | vma->vm->insert_entries(vma->vm, vma->pages, vma->node.start, |
0a878716 | 2363 | cache_level, pte_flags); |
9c870d03 | 2364 | intel_runtime_pm_put(i915); |
0a878716 DV |
2365 | |
2366 | /* | |
2367 | * Without aliasing PPGTT there's no difference between | |
2368 | * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally | |
2369 | * upgrade to both bound if we bind either to avoid double-binding. | |
2370 | */ | |
3272db53 | 2371 | vma->flags |= I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND; |
0a878716 DV |
2372 | |
2373 | return 0; | |
2374 | } | |
2375 | ||
cbc4e9e6 CW |
2376 | static void ggtt_unbind_vma(struct i915_vma *vma) |
2377 | { | |
2378 | struct drm_i915_private *i915 = vma->vm->i915; | |
2379 | ||
2380 | intel_runtime_pm_get(i915); | |
2381 | vma->vm->clear_range(vma->vm, vma->node.start, vma->size); | |
2382 | intel_runtime_pm_put(i915); | |
2383 | } | |
2384 | ||
0a878716 DV |
2385 | static int aliasing_gtt_bind_vma(struct i915_vma *vma, |
2386 | enum i915_cache_level cache_level, | |
2387 | u32 flags) | |
d5bd1449 | 2388 | { |
49d73912 | 2389 | struct drm_i915_private *i915 = vma->vm->i915; |
321d178e | 2390 | u32 pte_flags; |
ff685975 | 2391 | int ret; |
70b9f6f8 | 2392 | |
ba7a5741 | 2393 | if (unlikely(!vma->pages)) { |
ff685975 | 2394 | ret = i915_get_ggtt_vma_pages(vma); |
ba7a5741 CW |
2395 | if (ret) |
2396 | return ret; | |
2397 | } | |
7faf1ab2 | 2398 | |
24f3a8cf | 2399 | /* Currently applicable only to VLV */ |
321d178e CW |
2400 | pte_flags = 0; |
2401 | if (vma->obj->gt_ro) | |
f329f5f6 | 2402 | pte_flags |= PTE_READ_ONLY; |
24f3a8cf | 2403 | |
ff685975 CW |
2404 | if (flags & I915_VMA_LOCAL_BIND) { |
2405 | struct i915_hw_ppgtt *appgtt = i915->mm.aliasing_ppgtt; | |
2406 | ||
2f720aac MA |
2407 | if (!(vma->flags & I915_VMA_LOCAL_BIND) && |
2408 | appgtt->base.allocate_va_range) { | |
ff685975 CW |
2409 | ret = appgtt->base.allocate_va_range(&appgtt->base, |
2410 | vma->node.start, | |
171d8b93 | 2411 | vma->size); |
ff685975 | 2412 | if (ret) |
2f7399af | 2413 | goto err_pages; |
ff685975 CW |
2414 | } |
2415 | ||
2416 | appgtt->base.insert_entries(&appgtt->base, | |
2417 | vma->pages, vma->node.start, | |
2418 | cache_level, pte_flags); | |
2419 | } | |
2420 | ||
3272db53 | 2421 | if (flags & I915_VMA_GLOBAL_BIND) { |
9c870d03 | 2422 | intel_runtime_pm_get(i915); |
321d178e | 2423 | vma->vm->insert_entries(vma->vm, |
247177dd | 2424 | vma->pages, vma->node.start, |
0875546c | 2425 | cache_level, pte_flags); |
9c870d03 | 2426 | intel_runtime_pm_put(i915); |
6f65e29a | 2427 | } |
d5bd1449 | 2428 | |
70b9f6f8 | 2429 | return 0; |
2f7399af CW |
2430 | |
2431 | err_pages: | |
2432 | if (!(vma->flags & (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND))) { | |
2433 | if (vma->pages != vma->obj->mm.pages) { | |
2434 | GEM_BUG_ON(!vma->pages); | |
2435 | sg_free_table(vma->pages); | |
2436 | kfree(vma->pages); | |
2437 | } | |
2438 | vma->pages = NULL; | |
2439 | } | |
2440 | return ret; | |
d5bd1449 CW |
2441 | } |
2442 | ||
cbc4e9e6 | 2443 | static void aliasing_gtt_unbind_vma(struct i915_vma *vma) |
74163907 | 2444 | { |
49d73912 | 2445 | struct drm_i915_private *i915 = vma->vm->i915; |
6f65e29a | 2446 | |
9c870d03 CW |
2447 | if (vma->flags & I915_VMA_GLOBAL_BIND) { |
2448 | intel_runtime_pm_get(i915); | |
cbc4e9e6 | 2449 | vma->vm->clear_range(vma->vm, vma->node.start, vma->size); |
9c870d03 CW |
2450 | intel_runtime_pm_put(i915); |
2451 | } | |
06615ee5 | 2452 | |
cbc4e9e6 CW |
2453 | if (vma->flags & I915_VMA_LOCAL_BIND) { |
2454 | struct i915_address_space *vm = &i915->mm.aliasing_ppgtt->base; | |
2455 | ||
2456 | vm->clear_range(vm, vma->node.start, vma->size); | |
2457 | } | |
74163907 DV |
2458 | } |
2459 | ||
03ac84f1 CW |
2460 | void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj, |
2461 | struct sg_table *pages) | |
7c2e6fdf | 2462 | { |
52a05c30 DW |
2463 | struct drm_i915_private *dev_priv = to_i915(obj->base.dev); |
2464 | struct device *kdev = &dev_priv->drm.pdev->dev; | |
307dc25b | 2465 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
5c042287 | 2466 | |
307dc25b | 2467 | if (unlikely(ggtt->do_idle_maps)) { |
228ec87c | 2468 | if (i915_gem_wait_for_idle(dev_priv, 0)) { |
307dc25b CW |
2469 | DRM_ERROR("Failed to wait for idle; VT'd may hang.\n"); |
2470 | /* Wait a bit, in hopes it avoids the hang */ | |
2471 | udelay(10); | |
2472 | } | |
2473 | } | |
5c042287 | 2474 | |
03ac84f1 | 2475 | dma_unmap_sg(kdev, pages->sgl, pages->nents, PCI_DMA_BIDIRECTIONAL); |
7c2e6fdf | 2476 | } |
644ec02b | 2477 | |
45b186f1 | 2478 | static void i915_gtt_color_adjust(const struct drm_mm_node *node, |
42d6ab48 | 2479 | unsigned long color, |
440fd528 TR |
2480 | u64 *start, |
2481 | u64 *end) | |
42d6ab48 | 2482 | { |
a6508ded | 2483 | if (node->allocated && node->color != color) |
f51455d4 | 2484 | *start += I915_GTT_PAGE_SIZE; |
42d6ab48 | 2485 | |
a6508ded CW |
2486 | /* Also leave a space between the unallocated reserved node after the |
2487 | * GTT and any objects within the GTT, i.e. we use the color adjustment | |
2488 | * to insert a guard page to prevent prefetches crossing over the | |
2489 | * GTT boundary. | |
2490 | */ | |
b44f97fd | 2491 | node = list_next_entry(node, node_list); |
a6508ded | 2492 | if (node->color != color) |
f51455d4 | 2493 | *end -= I915_GTT_PAGE_SIZE; |
42d6ab48 | 2494 | } |
fbe5d36e | 2495 | |
6cde9a02 CW |
2496 | int i915_gem_init_aliasing_ppgtt(struct drm_i915_private *i915) |
2497 | { | |
2498 | struct i915_ggtt *ggtt = &i915->ggtt; | |
2499 | struct i915_hw_ppgtt *ppgtt; | |
2500 | int err; | |
2501 | ||
57202f47 | 2502 | ppgtt = i915_ppgtt_create(i915, ERR_PTR(-EPERM), "[alias]"); |
1188bc66 CW |
2503 | if (IS_ERR(ppgtt)) |
2504 | return PTR_ERR(ppgtt); | |
6cde9a02 | 2505 | |
e565ceb0 CW |
2506 | if (WARN_ON(ppgtt->base.total < ggtt->base.total)) { |
2507 | err = -ENODEV; | |
2508 | goto err_ppgtt; | |
2509 | } | |
2510 | ||
6cde9a02 | 2511 | if (ppgtt->base.allocate_va_range) { |
e565ceb0 CW |
2512 | /* Note we only pre-allocate as far as the end of the global |
2513 | * GTT. On 48b / 4-level page-tables, the difference is very, | |
2514 | * very significant! We have to preallocate as GVT/vgpu does | |
2515 | * not like the page directory disappearing. | |
2516 | */ | |
6cde9a02 | 2517 | err = ppgtt->base.allocate_va_range(&ppgtt->base, |
e565ceb0 | 2518 | 0, ggtt->base.total); |
6cde9a02 | 2519 | if (err) |
1188bc66 | 2520 | goto err_ppgtt; |
6cde9a02 CW |
2521 | } |
2522 | ||
6cde9a02 | 2523 | i915->mm.aliasing_ppgtt = ppgtt; |
cbc4e9e6 | 2524 | |
6cde9a02 CW |
2525 | WARN_ON(ggtt->base.bind_vma != ggtt_bind_vma); |
2526 | ggtt->base.bind_vma = aliasing_gtt_bind_vma; | |
2527 | ||
cbc4e9e6 CW |
2528 | WARN_ON(ggtt->base.unbind_vma != ggtt_unbind_vma); |
2529 | ggtt->base.unbind_vma = aliasing_gtt_unbind_vma; | |
2530 | ||
6cde9a02 CW |
2531 | return 0; |
2532 | ||
6cde9a02 | 2533 | err_ppgtt: |
1188bc66 | 2534 | i915_ppgtt_put(ppgtt); |
6cde9a02 CW |
2535 | return err; |
2536 | } | |
2537 | ||
2538 | void i915_gem_fini_aliasing_ppgtt(struct drm_i915_private *i915) | |
2539 | { | |
2540 | struct i915_ggtt *ggtt = &i915->ggtt; | |
2541 | struct i915_hw_ppgtt *ppgtt; | |
2542 | ||
2543 | ppgtt = fetch_and_zero(&i915->mm.aliasing_ppgtt); | |
2544 | if (!ppgtt) | |
2545 | return; | |
2546 | ||
1188bc66 | 2547 | i915_ppgtt_put(ppgtt); |
6cde9a02 CW |
2548 | |
2549 | ggtt->base.bind_vma = ggtt_bind_vma; | |
cbc4e9e6 | 2550 | ggtt->base.unbind_vma = ggtt_unbind_vma; |
6cde9a02 CW |
2551 | } |
2552 | ||
f6b9d5ca | 2553 | int i915_gem_init_ggtt(struct drm_i915_private *dev_priv) |
644ec02b | 2554 | { |
e78891ca BW |
2555 | /* Let GEM Manage all of the aperture. |
2556 | * | |
2557 | * However, leave one page at the end still bound to the scratch page. | |
2558 | * There are a number of places where the hardware apparently prefetches | |
2559 | * past the end of the object, and we've seen multiple hangs with the | |
2560 | * GPU head pointer stuck in a batchbuffer bound at the last page of the | |
2561 | * aperture. One page should be enough to keep any prefetching inside | |
2562 | * of the aperture. | |
2563 | */ | |
72e96d64 | 2564 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
ed2f3452 | 2565 | unsigned long hole_start, hole_end; |
f6b9d5ca | 2566 | struct drm_mm_node *entry; |
fa76da34 | 2567 | int ret; |
644ec02b | 2568 | |
b02d22a3 ZW |
2569 | ret = intel_vgt_balloon(dev_priv); |
2570 | if (ret) | |
2571 | return ret; | |
5dda8fa3 | 2572 | |
95374d75 | 2573 | /* Reserve a mappable slot for our lockless error capture */ |
4e64e553 CW |
2574 | ret = drm_mm_insert_node_in_range(&ggtt->base.mm, &ggtt->error_capture, |
2575 | PAGE_SIZE, 0, I915_COLOR_UNEVICTABLE, | |
2576 | 0, ggtt->mappable_end, | |
2577 | DRM_MM_INSERT_LOW); | |
95374d75 CW |
2578 | if (ret) |
2579 | return ret; | |
2580 | ||
ed2f3452 | 2581 | /* Clear any non-preallocated blocks */ |
72e96d64 | 2582 | drm_mm_for_each_hole(entry, &ggtt->base.mm, hole_start, hole_end) { |
ed2f3452 CW |
2583 | DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n", |
2584 | hole_start, hole_end); | |
72e96d64 | 2585 | ggtt->base.clear_range(&ggtt->base, hole_start, |
4fb84d99 | 2586 | hole_end - hole_start); |
ed2f3452 CW |
2587 | } |
2588 | ||
2589 | /* And finally clear the reserved guard page */ | |
f6b9d5ca | 2590 | ggtt->base.clear_range(&ggtt->base, |
4fb84d99 | 2591 | ggtt->base.total - PAGE_SIZE, PAGE_SIZE); |
6c5566a8 | 2592 | |
97d6d7ab | 2593 | if (USES_PPGTT(dev_priv) && !USES_FULL_PPGTT(dev_priv)) { |
6cde9a02 | 2594 | ret = i915_gem_init_aliasing_ppgtt(dev_priv); |
95374d75 | 2595 | if (ret) |
6cde9a02 | 2596 | goto err; |
fa76da34 DV |
2597 | } |
2598 | ||
6c5566a8 | 2599 | return 0; |
95374d75 | 2600 | |
95374d75 CW |
2601 | err: |
2602 | drm_mm_remove_node(&ggtt->error_capture); | |
2603 | return ret; | |
e76e9aeb BW |
2604 | } |
2605 | ||
d85489d3 JL |
2606 | /** |
2607 | * i915_ggtt_cleanup_hw - Clean up GGTT hardware initialization | |
97d6d7ab | 2608 | * @dev_priv: i915 device |
d85489d3 | 2609 | */ |
97d6d7ab | 2610 | void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv) |
90d0a0e8 | 2611 | { |
72e96d64 | 2612 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
94d4a2a9 CW |
2613 | struct i915_vma *vma, *vn; |
2614 | ||
2615 | ggtt->base.closed = true; | |
2616 | ||
2617 | mutex_lock(&dev_priv->drm.struct_mutex); | |
2618 | WARN_ON(!list_empty(&ggtt->base.active_list)); | |
2619 | list_for_each_entry_safe(vma, vn, &ggtt->base.inactive_list, vm_link) | |
2620 | WARN_ON(i915_vma_unbind(vma)); | |
2621 | mutex_unlock(&dev_priv->drm.struct_mutex); | |
90d0a0e8 | 2622 | |
97d6d7ab | 2623 | i915_gem_cleanup_stolen(&dev_priv->drm); |
a4eba47b | 2624 | |
1188bc66 CW |
2625 | mutex_lock(&dev_priv->drm.struct_mutex); |
2626 | i915_gem_fini_aliasing_ppgtt(dev_priv); | |
2627 | ||
95374d75 CW |
2628 | if (drm_mm_node_allocated(&ggtt->error_capture)) |
2629 | drm_mm_remove_node(&ggtt->error_capture); | |
2630 | ||
72e96d64 | 2631 | if (drm_mm_initialized(&ggtt->base.mm)) { |
b02d22a3 | 2632 | intel_vgt_deballoon(dev_priv); |
ed9724dd | 2633 | i915_address_space_fini(&ggtt->base); |
90d0a0e8 DV |
2634 | } |
2635 | ||
72e96d64 | 2636 | ggtt->base.cleanup(&ggtt->base); |
1188bc66 | 2637 | mutex_unlock(&dev_priv->drm.struct_mutex); |
f6b9d5ca CW |
2638 | |
2639 | arch_phys_wc_del(ggtt->mtrr); | |
f7bbe788 | 2640 | io_mapping_fini(&ggtt->mappable); |
90d0a0e8 | 2641 | } |
70e32544 | 2642 | |
2c642b07 | 2643 | static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl) |
e76e9aeb BW |
2644 | { |
2645 | snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT; | |
2646 | snb_gmch_ctl &= SNB_GMCH_GGMS_MASK; | |
2647 | return snb_gmch_ctl << 20; | |
2648 | } | |
2649 | ||
2c642b07 | 2650 | static unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl) |
9459d252 BW |
2651 | { |
2652 | bdw_gmch_ctl >>= BDW_GMCH_GGMS_SHIFT; | |
2653 | bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK; | |
2654 | if (bdw_gmch_ctl) | |
2655 | bdw_gmch_ctl = 1 << bdw_gmch_ctl; | |
562d55d9 BW |
2656 | |
2657 | #ifdef CONFIG_X86_32 | |
2658 | /* Limit 32b platforms to a 2GB GGTT: 4 << 20 / pte size * PAGE_SIZE */ | |
2659 | if (bdw_gmch_ctl > 4) | |
2660 | bdw_gmch_ctl = 4; | |
2661 | #endif | |
2662 | ||
9459d252 BW |
2663 | return bdw_gmch_ctl << 20; |
2664 | } | |
2665 | ||
2c642b07 | 2666 | static unsigned int chv_get_total_gtt_size(u16 gmch_ctrl) |
d7f25f23 DL |
2667 | { |
2668 | gmch_ctrl >>= SNB_GMCH_GGMS_SHIFT; | |
2669 | gmch_ctrl &= SNB_GMCH_GGMS_MASK; | |
2670 | ||
2671 | if (gmch_ctrl) | |
2672 | return 1 << (20 + gmch_ctrl); | |
2673 | ||
2674 | return 0; | |
2675 | } | |
2676 | ||
2c642b07 | 2677 | static size_t gen6_get_stolen_size(u16 snb_gmch_ctl) |
e76e9aeb BW |
2678 | { |
2679 | snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT; | |
2680 | snb_gmch_ctl &= SNB_GMCH_GMS_MASK; | |
2681 | return snb_gmch_ctl << 25; /* 32 MB units */ | |
2682 | } | |
2683 | ||
2c642b07 | 2684 | static size_t gen8_get_stolen_size(u16 bdw_gmch_ctl) |
9459d252 BW |
2685 | { |
2686 | bdw_gmch_ctl >>= BDW_GMCH_GMS_SHIFT; | |
2687 | bdw_gmch_ctl &= BDW_GMCH_GMS_MASK; | |
2688 | return bdw_gmch_ctl << 25; /* 32 MB units */ | |
2689 | } | |
2690 | ||
d7f25f23 DL |
2691 | static size_t chv_get_stolen_size(u16 gmch_ctrl) |
2692 | { | |
2693 | gmch_ctrl >>= SNB_GMCH_GMS_SHIFT; | |
2694 | gmch_ctrl &= SNB_GMCH_GMS_MASK; | |
2695 | ||
2696 | /* | |
2697 | * 0x0 to 0x10: 32MB increments starting at 0MB | |
2698 | * 0x11 to 0x16: 4MB increments starting at 8MB | |
2699 | * 0x17 to 0x1d: 4MB increments start at 36MB | |
2700 | */ | |
2701 | if (gmch_ctrl < 0x11) | |
2702 | return gmch_ctrl << 25; | |
2703 | else if (gmch_ctrl < 0x17) | |
2704 | return (gmch_ctrl - 0x11 + 2) << 22; | |
2705 | else | |
2706 | return (gmch_ctrl - 0x17 + 9) << 22; | |
2707 | } | |
2708 | ||
66375014 DL |
2709 | static size_t gen9_get_stolen_size(u16 gen9_gmch_ctl) |
2710 | { | |
2711 | gen9_gmch_ctl >>= BDW_GMCH_GMS_SHIFT; | |
2712 | gen9_gmch_ctl &= BDW_GMCH_GMS_MASK; | |
2713 | ||
2714 | if (gen9_gmch_ctl < 0xf0) | |
2715 | return gen9_gmch_ctl << 25; /* 32 MB units */ | |
2716 | else | |
2717 | /* 4MB increments starting at 0xf0 for 4MB */ | |
2718 | return (gen9_gmch_ctl - 0xf0 + 1) << 22; | |
2719 | } | |
2720 | ||
34c998b4 | 2721 | static int ggtt_probe_common(struct i915_ggtt *ggtt, u64 size) |
63340133 | 2722 | { |
49d73912 CW |
2723 | struct drm_i915_private *dev_priv = ggtt->base.i915; |
2724 | struct pci_dev *pdev = dev_priv->drm.pdev; | |
34c998b4 | 2725 | phys_addr_t phys_addr; |
8bcdd0f7 | 2726 | int ret; |
63340133 BW |
2727 | |
2728 | /* For Modern GENs the PTEs and register space are split in the BAR */ | |
34c998b4 | 2729 | phys_addr = pci_resource_start(pdev, 0) + pci_resource_len(pdev, 0) / 2; |
63340133 | 2730 | |
2a073f89 ID |
2731 | /* |
2732 | * On BXT writes larger than 64 bit to the GTT pagetable range will be | |
2733 | * dropped. For WC mappings in general we have 64 byte burst writes | |
2734 | * when the WC buffer is flushed, so we can't use it, but have to | |
2735 | * resort to an uncached mapping. The WC issue is easily caught by the | |
2736 | * readback check when writing GTT PTE entries. | |
2737 | */ | |
cc3f90f0 | 2738 | if (IS_GEN9_LP(dev_priv)) |
34c998b4 | 2739 | ggtt->gsm = ioremap_nocache(phys_addr, size); |
2a073f89 | 2740 | else |
34c998b4 | 2741 | ggtt->gsm = ioremap_wc(phys_addr, size); |
72e96d64 | 2742 | if (!ggtt->gsm) { |
34c998b4 | 2743 | DRM_ERROR("Failed to map the ggtt page table\n"); |
63340133 BW |
2744 | return -ENOMEM; |
2745 | } | |
2746 | ||
8448661d | 2747 | ret = setup_scratch_page(&ggtt->base, GFP_DMA32); |
8bcdd0f7 | 2748 | if (ret) { |
63340133 BW |
2749 | DRM_ERROR("Scratch setup failed\n"); |
2750 | /* iounmap will also get called at remove, but meh */ | |
72e96d64 | 2751 | iounmap(ggtt->gsm); |
8bcdd0f7 | 2752 | return ret; |
63340133 BW |
2753 | } |
2754 | ||
4ad2af1e | 2755 | return 0; |
63340133 BW |
2756 | } |
2757 | ||
fbe5d36e BW |
2758 | /* The GGTT and PPGTT need a private PPAT setup in order to handle cacheability |
2759 | * bits. When using advanced contexts each context stores its own PAT, but | |
2760 | * writing this data shouldn't be harmful even in those cases. */ | |
ee0ce478 | 2761 | static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv) |
fbe5d36e | 2762 | { |
75c7b0b8 | 2763 | u64 pat; |
fbe5d36e BW |
2764 | |
2765 | pat = GEN8_PPAT(0, GEN8_PPAT_WB | GEN8_PPAT_LLC) | /* for normal objects, no eLLC */ | |
2766 | GEN8_PPAT(1, GEN8_PPAT_WC | GEN8_PPAT_LLCELLC) | /* for something pointing to ptes? */ | |
2767 | GEN8_PPAT(2, GEN8_PPAT_WT | GEN8_PPAT_LLCELLC) | /* for scanout with eLLC */ | |
2768 | GEN8_PPAT(3, GEN8_PPAT_UC) | /* Uncached objects, mostly for scanout */ | |
2769 | GEN8_PPAT(4, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(0)) | | |
2770 | GEN8_PPAT(5, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(1)) | | |
2771 | GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) | | |
2772 | GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); | |
2773 | ||
2d1fe073 | 2774 | if (!USES_PPGTT(dev_priv)) |
d6a8b72e RV |
2775 | /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry, |
2776 | * so RTL will always use the value corresponding to | |
2777 | * pat_sel = 000". | |
2778 | * So let's disable cache for GGTT to avoid screen corruptions. | |
2779 | * MOCS still can be used though. | |
2780 | * - System agent ggtt writes (i.e. cpu gtt mmaps) already work | |
2781 | * before this patch, i.e. the same uncached + snooping access | |
2782 | * like on gen6/7 seems to be in effect. | |
2783 | * - So this just fixes blitter/render access. Again it looks | |
2784 | * like it's not just uncached access, but uncached + snooping. | |
2785 | * So we can still hold onto all our assumptions wrt cpu | |
2786 | * clflushing on LLC machines. | |
2787 | */ | |
2788 | pat = GEN8_PPAT(0, GEN8_PPAT_UC); | |
2789 | ||
fbe5d36e BW |
2790 | /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b |
2791 | * write would work. */ | |
7e435ad2 VS |
2792 | I915_WRITE(GEN8_PRIVATE_PAT_LO, pat); |
2793 | I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32); | |
fbe5d36e BW |
2794 | } |
2795 | ||
ee0ce478 VS |
2796 | static void chv_setup_private_ppat(struct drm_i915_private *dev_priv) |
2797 | { | |
75c7b0b8 | 2798 | u64 pat; |
ee0ce478 VS |
2799 | |
2800 | /* | |
2801 | * Map WB on BDW to snooped on CHV. | |
2802 | * | |
2803 | * Only the snoop bit has meaning for CHV, the rest is | |
2804 | * ignored. | |
2805 | * | |
cf3d262e VS |
2806 | * The hardware will never snoop for certain types of accesses: |
2807 | * - CPU GTT (GMADR->GGTT->no snoop->memory) | |
2808 | * - PPGTT page tables | |
2809 | * - some other special cycles | |
2810 | * | |
2811 | * As with BDW, we also need to consider the following for GT accesses: | |
2812 | * "For GGTT, there is NO pat_sel[2:0] from the entry, | |
2813 | * so RTL will always use the value corresponding to | |
2814 | * pat_sel = 000". | |
2815 | * Which means we must set the snoop bit in PAT entry 0 | |
2816 | * in order to keep the global status page working. | |
ee0ce478 VS |
2817 | */ |
2818 | pat = GEN8_PPAT(0, CHV_PPAT_SNOOP) | | |
2819 | GEN8_PPAT(1, 0) | | |
2820 | GEN8_PPAT(2, 0) | | |
2821 | GEN8_PPAT(3, 0) | | |
2822 | GEN8_PPAT(4, CHV_PPAT_SNOOP) | | |
2823 | GEN8_PPAT(5, CHV_PPAT_SNOOP) | | |
2824 | GEN8_PPAT(6, CHV_PPAT_SNOOP) | | |
2825 | GEN8_PPAT(7, CHV_PPAT_SNOOP); | |
2826 | ||
7e435ad2 VS |
2827 | I915_WRITE(GEN8_PRIVATE_PAT_LO, pat); |
2828 | I915_WRITE(GEN8_PRIVATE_PAT_HI, pat >> 32); | |
ee0ce478 VS |
2829 | } |
2830 | ||
34c998b4 CW |
2831 | static void gen6_gmch_remove(struct i915_address_space *vm) |
2832 | { | |
2833 | struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm); | |
2834 | ||
2835 | iounmap(ggtt->gsm); | |
8448661d | 2836 | cleanup_scratch_page(vm); |
34c998b4 CW |
2837 | } |
2838 | ||
d507d735 | 2839 | static int gen8_gmch_probe(struct i915_ggtt *ggtt) |
63340133 | 2840 | { |
49d73912 | 2841 | struct drm_i915_private *dev_priv = ggtt->base.i915; |
97d6d7ab | 2842 | struct pci_dev *pdev = dev_priv->drm.pdev; |
34c998b4 | 2843 | unsigned int size; |
63340133 | 2844 | u16 snb_gmch_ctl; |
63340133 BW |
2845 | |
2846 | /* TODO: We're not aware of mappable constraints on gen8 yet */ | |
97d6d7ab CW |
2847 | ggtt->mappable_base = pci_resource_start(pdev, 2); |
2848 | ggtt->mappable_end = pci_resource_len(pdev, 2); | |
63340133 | 2849 | |
97d6d7ab CW |
2850 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(39))) |
2851 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(39)); | |
63340133 | 2852 | |
97d6d7ab | 2853 | pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); |
63340133 | 2854 | |
97d6d7ab | 2855 | if (INTEL_GEN(dev_priv) >= 9) { |
d507d735 | 2856 | ggtt->stolen_size = gen9_get_stolen_size(snb_gmch_ctl); |
34c998b4 | 2857 | size = gen8_get_total_gtt_size(snb_gmch_ctl); |
97d6d7ab | 2858 | } else if (IS_CHERRYVIEW(dev_priv)) { |
d507d735 | 2859 | ggtt->stolen_size = chv_get_stolen_size(snb_gmch_ctl); |
34c998b4 | 2860 | size = chv_get_total_gtt_size(snb_gmch_ctl); |
d7f25f23 | 2861 | } else { |
d507d735 | 2862 | ggtt->stolen_size = gen8_get_stolen_size(snb_gmch_ctl); |
34c998b4 | 2863 | size = gen8_get_total_gtt_size(snb_gmch_ctl); |
d7f25f23 | 2864 | } |
63340133 | 2865 | |
34c998b4 | 2866 | ggtt->base.total = (size / sizeof(gen8_pte_t)) << PAGE_SHIFT; |
63340133 | 2867 | |
cc3f90f0 | 2868 | if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv)) |
ee0ce478 VS |
2869 | chv_setup_private_ppat(dev_priv); |
2870 | else | |
2871 | bdw_setup_private_ppat(dev_priv); | |
fbe5d36e | 2872 | |
34c998b4 | 2873 | ggtt->base.cleanup = gen6_gmch_remove; |
d507d735 JL |
2874 | ggtt->base.bind_vma = ggtt_bind_vma; |
2875 | ggtt->base.unbind_vma = ggtt_unbind_vma; | |
d6473f56 | 2876 | ggtt->base.insert_page = gen8_ggtt_insert_page; |
f7770bfd | 2877 | ggtt->base.clear_range = nop_clear_range; |
48f112fe | 2878 | if (!USES_FULL_PPGTT(dev_priv) || intel_scanout_needs_vtd_wa(dev_priv)) |
f7770bfd CW |
2879 | ggtt->base.clear_range = gen8_ggtt_clear_range; |
2880 | ||
2881 | ggtt->base.insert_entries = gen8_ggtt_insert_entries; | |
f7770bfd | 2882 | |
d86b18a0 JB |
2883 | /* Serialize GTT updates with aperture access on BXT if VT-d is on. */ |
2884 | if (intel_ggtt_update_needs_vtd_wa(dev_priv)) { | |
2885 | ggtt->base.insert_entries = bxt_vtd_ggtt_insert_entries__BKL; | |
2886 | ggtt->base.insert_page = bxt_vtd_ggtt_insert_page__BKL; | |
2887 | if (ggtt->base.clear_range != nop_clear_range) | |
2888 | ggtt->base.clear_range = bxt_vtd_ggtt_clear_range__BKL; | |
2889 | } | |
2890 | ||
7c3f86b6 CW |
2891 | ggtt->invalidate = gen6_ggtt_invalidate; |
2892 | ||
34c998b4 | 2893 | return ggtt_probe_common(ggtt, size); |
63340133 BW |
2894 | } |
2895 | ||
d507d735 | 2896 | static int gen6_gmch_probe(struct i915_ggtt *ggtt) |
e76e9aeb | 2897 | { |
49d73912 | 2898 | struct drm_i915_private *dev_priv = ggtt->base.i915; |
97d6d7ab | 2899 | struct pci_dev *pdev = dev_priv->drm.pdev; |
34c998b4 | 2900 | unsigned int size; |
e76e9aeb | 2901 | u16 snb_gmch_ctl; |
e76e9aeb | 2902 | |
97d6d7ab CW |
2903 | ggtt->mappable_base = pci_resource_start(pdev, 2); |
2904 | ggtt->mappable_end = pci_resource_len(pdev, 2); | |
41907ddc | 2905 | |
baa09f5f BW |
2906 | /* 64/512MB is the current min/max we actually know of, but this is just |
2907 | * a coarse sanity check. | |
e76e9aeb | 2908 | */ |
34c998b4 | 2909 | if (ggtt->mappable_end < (64<<20) || ggtt->mappable_end > (512<<20)) { |
d507d735 | 2910 | DRM_ERROR("Unknown GMADR size (%llx)\n", ggtt->mappable_end); |
baa09f5f | 2911 | return -ENXIO; |
e76e9aeb BW |
2912 | } |
2913 | ||
97d6d7ab CW |
2914 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(40))) |
2915 | pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); | |
2916 | pci_read_config_word(pdev, SNB_GMCH_CTRL, &snb_gmch_ctl); | |
e76e9aeb | 2917 | |
d507d735 | 2918 | ggtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl); |
e76e9aeb | 2919 | |
34c998b4 CW |
2920 | size = gen6_get_total_gtt_size(snb_gmch_ctl); |
2921 | ggtt->base.total = (size / sizeof(gen6_pte_t)) << PAGE_SHIFT; | |
e76e9aeb | 2922 | |
d507d735 | 2923 | ggtt->base.clear_range = gen6_ggtt_clear_range; |
d6473f56 | 2924 | ggtt->base.insert_page = gen6_ggtt_insert_page; |
d507d735 JL |
2925 | ggtt->base.insert_entries = gen6_ggtt_insert_entries; |
2926 | ggtt->base.bind_vma = ggtt_bind_vma; | |
2927 | ggtt->base.unbind_vma = ggtt_unbind_vma; | |
34c998b4 CW |
2928 | ggtt->base.cleanup = gen6_gmch_remove; |
2929 | ||
7c3f86b6 CW |
2930 | ggtt->invalidate = gen6_ggtt_invalidate; |
2931 | ||
34c998b4 CW |
2932 | if (HAS_EDRAM(dev_priv)) |
2933 | ggtt->base.pte_encode = iris_pte_encode; | |
2934 | else if (IS_HASWELL(dev_priv)) | |
2935 | ggtt->base.pte_encode = hsw_pte_encode; | |
2936 | else if (IS_VALLEYVIEW(dev_priv)) | |
2937 | ggtt->base.pte_encode = byt_pte_encode; | |
2938 | else if (INTEL_GEN(dev_priv) >= 7) | |
2939 | ggtt->base.pte_encode = ivb_pte_encode; | |
2940 | else | |
2941 | ggtt->base.pte_encode = snb_pte_encode; | |
7faf1ab2 | 2942 | |
34c998b4 | 2943 | return ggtt_probe_common(ggtt, size); |
e76e9aeb BW |
2944 | } |
2945 | ||
34c998b4 | 2946 | static void i915_gmch_remove(struct i915_address_space *vm) |
e76e9aeb | 2947 | { |
34c998b4 | 2948 | intel_gmch_remove(); |
644ec02b | 2949 | } |
baa09f5f | 2950 | |
d507d735 | 2951 | static int i915_gmch_probe(struct i915_ggtt *ggtt) |
baa09f5f | 2952 | { |
49d73912 | 2953 | struct drm_i915_private *dev_priv = ggtt->base.i915; |
baa09f5f BW |
2954 | int ret; |
2955 | ||
91c8a326 | 2956 | ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->drm.pdev, NULL); |
baa09f5f BW |
2957 | if (!ret) { |
2958 | DRM_ERROR("failed to set up gmch\n"); | |
2959 | return -EIO; | |
2960 | } | |
2961 | ||
edd1f2fe CW |
2962 | intel_gtt_get(&ggtt->base.total, |
2963 | &ggtt->stolen_size, | |
2964 | &ggtt->mappable_base, | |
2965 | &ggtt->mappable_end); | |
baa09f5f | 2966 | |
97d6d7ab | 2967 | ggtt->do_idle_maps = needs_idle_maps(dev_priv); |
d6473f56 | 2968 | ggtt->base.insert_page = i915_ggtt_insert_page; |
d507d735 JL |
2969 | ggtt->base.insert_entries = i915_ggtt_insert_entries; |
2970 | ggtt->base.clear_range = i915_ggtt_clear_range; | |
2971 | ggtt->base.bind_vma = ggtt_bind_vma; | |
2972 | ggtt->base.unbind_vma = ggtt_unbind_vma; | |
34c998b4 | 2973 | ggtt->base.cleanup = i915_gmch_remove; |
baa09f5f | 2974 | |
7c3f86b6 CW |
2975 | ggtt->invalidate = gmch_ggtt_invalidate; |
2976 | ||
d507d735 | 2977 | if (unlikely(ggtt->do_idle_maps)) |
c0a7f818 CW |
2978 | DRM_INFO("applying Ironlake quirks for intel_iommu\n"); |
2979 | ||
baa09f5f BW |
2980 | return 0; |
2981 | } | |
2982 | ||
d85489d3 | 2983 | /** |
0088e522 | 2984 | * i915_ggtt_probe_hw - Probe GGTT hardware location |
97d6d7ab | 2985 | * @dev_priv: i915 device |
d85489d3 | 2986 | */ |
97d6d7ab | 2987 | int i915_ggtt_probe_hw(struct drm_i915_private *dev_priv) |
baa09f5f | 2988 | { |
62106b4f | 2989 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
baa09f5f BW |
2990 | int ret; |
2991 | ||
49d73912 | 2992 | ggtt->base.i915 = dev_priv; |
8448661d | 2993 | ggtt->base.dma = &dev_priv->drm.pdev->dev; |
c114f76a | 2994 | |
34c998b4 CW |
2995 | if (INTEL_GEN(dev_priv) <= 5) |
2996 | ret = i915_gmch_probe(ggtt); | |
2997 | else if (INTEL_GEN(dev_priv) < 8) | |
2998 | ret = gen6_gmch_probe(ggtt); | |
2999 | else | |
3000 | ret = gen8_gmch_probe(ggtt); | |
a54c0c27 | 3001 | if (ret) |
baa09f5f | 3002 | return ret; |
baa09f5f | 3003 | |
db9309a5 CW |
3004 | /* Trim the GGTT to fit the GuC mappable upper range (when enabled). |
3005 | * This is easier than doing range restriction on the fly, as we | |
3006 | * currently don't have any bits spare to pass in this upper | |
3007 | * restriction! | |
3008 | */ | |
3009 | if (HAS_GUC(dev_priv) && i915.enable_guc_loading) { | |
3010 | ggtt->base.total = min_t(u64, ggtt->base.total, GUC_GGTT_TOP); | |
3011 | ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total); | |
3012 | } | |
3013 | ||
c890e2d5 CW |
3014 | if ((ggtt->base.total - 1) >> 32) { |
3015 | DRM_ERROR("We never expected a Global GTT with more than 32bits" | |
f6b9d5ca | 3016 | " of address space! Found %lldM!\n", |
c890e2d5 CW |
3017 | ggtt->base.total >> 20); |
3018 | ggtt->base.total = 1ULL << 32; | |
3019 | ggtt->mappable_end = min(ggtt->mappable_end, ggtt->base.total); | |
3020 | } | |
3021 | ||
f6b9d5ca CW |
3022 | if (ggtt->mappable_end > ggtt->base.total) { |
3023 | DRM_ERROR("mappable aperture extends past end of GGTT," | |
3024 | " aperture=%llx, total=%llx\n", | |
3025 | ggtt->mappable_end, ggtt->base.total); | |
3026 | ggtt->mappable_end = ggtt->base.total; | |
3027 | } | |
3028 | ||
baa09f5f | 3029 | /* GMADR is the PCI mmio aperture into the global GTT. */ |
c44ef60e | 3030 | DRM_INFO("Memory usable by graphics device = %lluM\n", |
62106b4f JL |
3031 | ggtt->base.total >> 20); |
3032 | DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20); | |
edd1f2fe | 3033 | DRM_DEBUG_DRIVER("GTT stolen size = %uM\n", ggtt->stolen_size >> 20); |
5db6c735 DV |
3034 | #ifdef CONFIG_INTEL_IOMMU |
3035 | if (intel_iommu_gfx_mapped) | |
3036 | DRM_INFO("VT-d active for gfx access\n"); | |
3037 | #endif | |
baa09f5f BW |
3038 | |
3039 | return 0; | |
0088e522 CW |
3040 | } |
3041 | ||
3042 | /** | |
3043 | * i915_ggtt_init_hw - Initialize GGTT hardware | |
97d6d7ab | 3044 | * @dev_priv: i915 device |
0088e522 | 3045 | */ |
97d6d7ab | 3046 | int i915_ggtt_init_hw(struct drm_i915_private *dev_priv) |
0088e522 | 3047 | { |
0088e522 CW |
3048 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
3049 | int ret; | |
3050 | ||
f6b9d5ca CW |
3051 | INIT_LIST_HEAD(&dev_priv->vm_list); |
3052 | ||
a6508ded CW |
3053 | /* Note that we use page colouring to enforce a guard page at the |
3054 | * end of the address space. This is required as the CS may prefetch | |
3055 | * beyond the end of the batch buffer, across the page boundary, | |
3056 | * and beyond the end of the GTT if we do not provide a guard. | |
f6b9d5ca | 3057 | */ |
80b204bc | 3058 | mutex_lock(&dev_priv->drm.struct_mutex); |
80b204bc | 3059 | i915_address_space_init(&ggtt->base, dev_priv, "[global]"); |
a6508ded | 3060 | if (!HAS_LLC(dev_priv) && !USES_PPGTT(dev_priv)) |
f6b9d5ca | 3061 | ggtt->base.mm.color_adjust = i915_gtt_color_adjust; |
80b204bc | 3062 | mutex_unlock(&dev_priv->drm.struct_mutex); |
f6b9d5ca | 3063 | |
f7bbe788 CW |
3064 | if (!io_mapping_init_wc(&dev_priv->ggtt.mappable, |
3065 | dev_priv->ggtt.mappable_base, | |
3066 | dev_priv->ggtt.mappable_end)) { | |
f6b9d5ca CW |
3067 | ret = -EIO; |
3068 | goto out_gtt_cleanup; | |
3069 | } | |
3070 | ||
3071 | ggtt->mtrr = arch_phys_wc_add(ggtt->mappable_base, ggtt->mappable_end); | |
3072 | ||
0088e522 CW |
3073 | /* |
3074 | * Initialise stolen early so that we may reserve preallocated | |
3075 | * objects for the BIOS to KMS transition. | |
3076 | */ | |
7ace3d30 | 3077 | ret = i915_gem_init_stolen(dev_priv); |
0088e522 CW |
3078 | if (ret) |
3079 | goto out_gtt_cleanup; | |
3080 | ||
3081 | return 0; | |
a4eba47b ID |
3082 | |
3083 | out_gtt_cleanup: | |
72e96d64 | 3084 | ggtt->base.cleanup(&ggtt->base); |
a4eba47b | 3085 | return ret; |
baa09f5f | 3086 | } |
6f65e29a | 3087 | |
97d6d7ab | 3088 | int i915_ggtt_enable_hw(struct drm_i915_private *dev_priv) |
ac840ae5 | 3089 | { |
97d6d7ab | 3090 | if (INTEL_GEN(dev_priv) < 6 && !intel_enable_gtt()) |
ac840ae5 VS |
3091 | return -EIO; |
3092 | ||
3093 | return 0; | |
3094 | } | |
3095 | ||
7c3f86b6 CW |
3096 | void i915_ggtt_enable_guc(struct drm_i915_private *i915) |
3097 | { | |
3098 | i915->ggtt.invalidate = guc_ggtt_invalidate; | |
3099 | } | |
3100 | ||
3101 | void i915_ggtt_disable_guc(struct drm_i915_private *i915) | |
3102 | { | |
d90c9890 CW |
3103 | if (i915->ggtt.invalidate == guc_ggtt_invalidate) |
3104 | i915->ggtt.invalidate = gen6_ggtt_invalidate; | |
7c3f86b6 CW |
3105 | } |
3106 | ||
275a991c | 3107 | void i915_gem_restore_gtt_mappings(struct drm_i915_private *dev_priv) |
fa42331b | 3108 | { |
72e96d64 | 3109 | struct i915_ggtt *ggtt = &dev_priv->ggtt; |
fbb30a5c | 3110 | struct drm_i915_gem_object *obj, *on; |
fa42331b | 3111 | |
dc97997a | 3112 | i915_check_and_clear_faults(dev_priv); |
fa42331b DV |
3113 | |
3114 | /* First fill our portion of the GTT with scratch pages */ | |
381b943b | 3115 | ggtt->base.clear_range(&ggtt->base, 0, ggtt->base.total); |
fa42331b | 3116 | |
fbb30a5c CW |
3117 | ggtt->base.closed = true; /* skip rewriting PTE on VMA unbind */ |
3118 | ||
3119 | /* clflush objects bound into the GGTT and rebind them. */ | |
3120 | list_for_each_entry_safe(obj, on, | |
56cea323 | 3121 | &dev_priv->mm.bound_list, global_link) { |
fbb30a5c CW |
3122 | bool ggtt_bound = false; |
3123 | struct i915_vma *vma; | |
3124 | ||
1c7f4bca | 3125 | list_for_each_entry(vma, &obj->vma_list, obj_link) { |
72e96d64 | 3126 | if (vma->vm != &ggtt->base) |
2c3d9984 | 3127 | continue; |
fa42331b | 3128 | |
fbb30a5c CW |
3129 | if (!i915_vma_unbind(vma)) |
3130 | continue; | |
3131 | ||
2c3d9984 TU |
3132 | WARN_ON(i915_vma_bind(vma, obj->cache_level, |
3133 | PIN_UPDATE)); | |
fbb30a5c | 3134 | ggtt_bound = true; |
2c3d9984 TU |
3135 | } |
3136 | ||
fbb30a5c | 3137 | if (ggtt_bound) |
975f7ff4 | 3138 | WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false)); |
2c3d9984 | 3139 | } |
fa42331b | 3140 | |
fbb30a5c CW |
3141 | ggtt->base.closed = false; |
3142 | ||
275a991c | 3143 | if (INTEL_GEN(dev_priv) >= 8) { |
cc3f90f0 | 3144 | if (IS_CHERRYVIEW(dev_priv) || IS_GEN9_LP(dev_priv)) |
fa42331b DV |
3145 | chv_setup_private_ppat(dev_priv); |
3146 | else | |
3147 | bdw_setup_private_ppat(dev_priv); | |
3148 | ||
3149 | return; | |
3150 | } | |
3151 | ||
275a991c | 3152 | if (USES_PPGTT(dev_priv)) { |
72e96d64 JL |
3153 | struct i915_address_space *vm; |
3154 | ||
fa42331b | 3155 | list_for_each_entry(vm, &dev_priv->vm_list, global_link) { |
e5716f55 | 3156 | struct i915_hw_ppgtt *ppgtt; |
fa42331b | 3157 | |
2bfa996e | 3158 | if (i915_is_ggtt(vm)) |
fa42331b | 3159 | ppgtt = dev_priv->mm.aliasing_ppgtt; |
e5716f55 JL |
3160 | else |
3161 | ppgtt = i915_vm_to_ppgtt(vm); | |
fa42331b | 3162 | |
16a011c8 | 3163 | gen6_write_page_range(ppgtt, 0, ppgtt->base.total); |
fa42331b DV |
3164 | } |
3165 | } | |
3166 | ||
7c3f86b6 | 3167 | i915_ggtt_invalidate(dev_priv); |
fa42331b DV |
3168 | } |
3169 | ||
804beb4b | 3170 | static struct scatterlist * |
2d7f3bdb | 3171 | rotate_pages(const dma_addr_t *in, unsigned int offset, |
804beb4b | 3172 | unsigned int width, unsigned int height, |
87130255 | 3173 | unsigned int stride, |
804beb4b | 3174 | struct sg_table *st, struct scatterlist *sg) |
50470bb0 TU |
3175 | { |
3176 | unsigned int column, row; | |
3177 | unsigned int src_idx; | |
50470bb0 | 3178 | |
50470bb0 | 3179 | for (column = 0; column < width; column++) { |
87130255 | 3180 | src_idx = stride * (height - 1) + column; |
50470bb0 TU |
3181 | for (row = 0; row < height; row++) { |
3182 | st->nents++; | |
3183 | /* We don't need the pages, but need to initialize | |
3184 | * the entries so the sg list can be happily traversed. | |
3185 | * The only thing we need are DMA addresses. | |
3186 | */ | |
3187 | sg_set_page(sg, NULL, PAGE_SIZE, 0); | |
804beb4b | 3188 | sg_dma_address(sg) = in[offset + src_idx]; |
50470bb0 TU |
3189 | sg_dma_len(sg) = PAGE_SIZE; |
3190 | sg = sg_next(sg); | |
87130255 | 3191 | src_idx -= stride; |
50470bb0 TU |
3192 | } |
3193 | } | |
804beb4b TU |
3194 | |
3195 | return sg; | |
50470bb0 TU |
3196 | } |
3197 | ||
ba7a5741 CW |
3198 | static noinline struct sg_table * |
3199 | intel_rotate_pages(struct intel_rotation_info *rot_info, | |
3200 | struct drm_i915_gem_object *obj) | |
50470bb0 | 3201 | { |
75c7b0b8 | 3202 | const unsigned long n_pages = obj->base.size / PAGE_SIZE; |
6687c906 | 3203 | unsigned int size = intel_rotation_info_size(rot_info); |
85d1225e DG |
3204 | struct sgt_iter sgt_iter; |
3205 | dma_addr_t dma_addr; | |
50470bb0 TU |
3206 | unsigned long i; |
3207 | dma_addr_t *page_addr_list; | |
3208 | struct sg_table *st; | |
89e3e142 | 3209 | struct scatterlist *sg; |
1d00dad5 | 3210 | int ret = -ENOMEM; |
50470bb0 | 3211 | |
50470bb0 | 3212 | /* Allocate a temporary list of source pages for random access. */ |
85d1225e | 3213 | page_addr_list = drm_malloc_gfp(n_pages, |
f2a85e19 CW |
3214 | sizeof(dma_addr_t), |
3215 | GFP_TEMPORARY); | |
50470bb0 TU |
3216 | if (!page_addr_list) |
3217 | return ERR_PTR(ret); | |
3218 | ||
3219 | /* Allocate target SG list. */ | |
3220 | st = kmalloc(sizeof(*st), GFP_KERNEL); | |
3221 | if (!st) | |
3222 | goto err_st_alloc; | |
3223 | ||
6687c906 | 3224 | ret = sg_alloc_table(st, size, GFP_KERNEL); |
50470bb0 TU |
3225 | if (ret) |
3226 | goto err_sg_alloc; | |
3227 | ||
3228 | /* Populate source page list from the object. */ | |
3229 | i = 0; | |
a4f5ea64 | 3230 | for_each_sgt_dma(dma_addr, sgt_iter, obj->mm.pages) |
85d1225e | 3231 | page_addr_list[i++] = dma_addr; |
50470bb0 | 3232 | |
85d1225e | 3233 | GEM_BUG_ON(i != n_pages); |
11f20322 VS |
3234 | st->nents = 0; |
3235 | sg = st->sgl; | |
3236 | ||
6687c906 VS |
3237 | for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) { |
3238 | sg = rotate_pages(page_addr_list, rot_info->plane[i].offset, | |
3239 | rot_info->plane[i].width, rot_info->plane[i].height, | |
3240 | rot_info->plane[i].stride, st, sg); | |
89e3e142 TU |
3241 | } |
3242 | ||
6687c906 VS |
3243 | DRM_DEBUG_KMS("Created rotated page mapping for object size %zu (%ux%u tiles, %u pages)\n", |
3244 | obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size); | |
50470bb0 TU |
3245 | |
3246 | drm_free_large(page_addr_list); | |
3247 | ||
3248 | return st; | |
3249 | ||
3250 | err_sg_alloc: | |
3251 | kfree(st); | |
3252 | err_st_alloc: | |
3253 | drm_free_large(page_addr_list); | |
3254 | ||
6687c906 VS |
3255 | DRM_DEBUG_KMS("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n", |
3256 | obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size); | |
3257 | ||
50470bb0 TU |
3258 | return ERR_PTR(ret); |
3259 | } | |
ec7adb6e | 3260 | |
ba7a5741 | 3261 | static noinline struct sg_table * |
8bd7ef16 JL |
3262 | intel_partial_pages(const struct i915_ggtt_view *view, |
3263 | struct drm_i915_gem_object *obj) | |
3264 | { | |
3265 | struct sg_table *st; | |
d2a84a76 | 3266 | struct scatterlist *sg, *iter; |
8bab1193 | 3267 | unsigned int count = view->partial.size; |
d2a84a76 | 3268 | unsigned int offset; |
8bd7ef16 JL |
3269 | int ret = -ENOMEM; |
3270 | ||
3271 | st = kmalloc(sizeof(*st), GFP_KERNEL); | |
3272 | if (!st) | |
3273 | goto err_st_alloc; | |
3274 | ||
d2a84a76 | 3275 | ret = sg_alloc_table(st, count, GFP_KERNEL); |
8bd7ef16 JL |
3276 | if (ret) |
3277 | goto err_sg_alloc; | |
3278 | ||
8bab1193 | 3279 | iter = i915_gem_object_get_sg(obj, view->partial.offset, &offset); |
d2a84a76 CW |
3280 | GEM_BUG_ON(!iter); |
3281 | ||
8bd7ef16 JL |
3282 | sg = st->sgl; |
3283 | st->nents = 0; | |
d2a84a76 CW |
3284 | do { |
3285 | unsigned int len; | |
8bd7ef16 | 3286 | |
d2a84a76 CW |
3287 | len = min(iter->length - (offset << PAGE_SHIFT), |
3288 | count << PAGE_SHIFT); | |
3289 | sg_set_page(sg, NULL, len, 0); | |
3290 | sg_dma_address(sg) = | |
3291 | sg_dma_address(iter) + (offset << PAGE_SHIFT); | |
3292 | sg_dma_len(sg) = len; | |
8bd7ef16 | 3293 | |
8bd7ef16 | 3294 | st->nents++; |
d2a84a76 CW |
3295 | count -= len >> PAGE_SHIFT; |
3296 | if (count == 0) { | |
3297 | sg_mark_end(sg); | |
3298 | return st; | |
3299 | } | |
8bd7ef16 | 3300 | |
d2a84a76 CW |
3301 | sg = __sg_next(sg); |
3302 | iter = __sg_next(iter); | |
3303 | offset = 0; | |
3304 | } while (1); | |
8bd7ef16 JL |
3305 | |
3306 | err_sg_alloc: | |
3307 | kfree(st); | |
3308 | err_st_alloc: | |
3309 | return ERR_PTR(ret); | |
3310 | } | |
3311 | ||
70b9f6f8 | 3312 | static int |
50470bb0 | 3313 | i915_get_ggtt_vma_pages(struct i915_vma *vma) |
fe14d5f4 | 3314 | { |
ba7a5741 | 3315 | int ret; |
50470bb0 | 3316 | |
2c3a3f44 CW |
3317 | /* The vma->pages are only valid within the lifespan of the borrowed |
3318 | * obj->mm.pages. When the obj->mm.pages sg_table is regenerated, so | |
3319 | * must be the vma->pages. A simple rule is that vma->pages must only | |
3320 | * be accessed when the obj->mm.pages are pinned. | |
3321 | */ | |
3322 | GEM_BUG_ON(!i915_gem_object_has_pinned_pages(vma->obj)); | |
3323 | ||
ba7a5741 CW |
3324 | switch (vma->ggtt_view.type) { |
3325 | case I915_GGTT_VIEW_NORMAL: | |
3326 | vma->pages = vma->obj->mm.pages; | |
fe14d5f4 TU |
3327 | return 0; |
3328 | ||
ba7a5741 | 3329 | case I915_GGTT_VIEW_ROTATED: |
247177dd | 3330 | vma->pages = |
ba7a5741 CW |
3331 | intel_rotate_pages(&vma->ggtt_view.rotated, vma->obj); |
3332 | break; | |
3333 | ||
3334 | case I915_GGTT_VIEW_PARTIAL: | |
247177dd | 3335 | vma->pages = intel_partial_pages(&vma->ggtt_view, vma->obj); |
ba7a5741 CW |
3336 | break; |
3337 | ||
3338 | default: | |
fe14d5f4 TU |
3339 | WARN_ONCE(1, "GGTT view %u not implemented!\n", |
3340 | vma->ggtt_view.type); | |
ba7a5741 CW |
3341 | return -EINVAL; |
3342 | } | |
fe14d5f4 | 3343 | |
ba7a5741 CW |
3344 | ret = 0; |
3345 | if (unlikely(IS_ERR(vma->pages))) { | |
247177dd CW |
3346 | ret = PTR_ERR(vma->pages); |
3347 | vma->pages = NULL; | |
50470bb0 TU |
3348 | DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n", |
3349 | vma->ggtt_view.type, ret); | |
fe14d5f4 | 3350 | } |
50470bb0 | 3351 | return ret; |
fe14d5f4 TU |
3352 | } |
3353 | ||
625d988a CW |
3354 | /** |
3355 | * i915_gem_gtt_reserve - reserve a node in an address_space (GTT) | |
a4dbf7cf CW |
3356 | * @vm: the &struct i915_address_space |
3357 | * @node: the &struct drm_mm_node (typically i915_vma.mode) | |
3358 | * @size: how much space to allocate inside the GTT, | |
3359 | * must be #I915_GTT_PAGE_SIZE aligned | |
3360 | * @offset: where to insert inside the GTT, | |
3361 | * must be #I915_GTT_MIN_ALIGNMENT aligned, and the node | |
3362 | * (@offset + @size) must fit within the address space | |
3363 | * @color: color to apply to node, if this node is not from a VMA, | |
3364 | * color must be #I915_COLOR_UNEVICTABLE | |
3365 | * @flags: control search and eviction behaviour | |
625d988a CW |
3366 | * |
3367 | * i915_gem_gtt_reserve() tries to insert the @node at the exact @offset inside | |
3368 | * the address space (using @size and @color). If the @node does not fit, it | |
3369 | * tries to evict any overlapping nodes from the GTT, including any | |
3370 | * neighbouring nodes if the colors do not match (to ensure guard pages between | |
3371 | * differing domains). See i915_gem_evict_for_node() for the gory details | |
3372 | * on the eviction algorithm. #PIN_NONBLOCK may used to prevent waiting on | |
3373 | * evicting active overlapping objects, and any overlapping node that is pinned | |
3374 | * or marked as unevictable will also result in failure. | |
3375 | * | |
3376 | * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if | |
3377 | * asked to wait for eviction and interrupted. | |
3378 | */ | |
3379 | int i915_gem_gtt_reserve(struct i915_address_space *vm, | |
3380 | struct drm_mm_node *node, | |
3381 | u64 size, u64 offset, unsigned long color, | |
3382 | unsigned int flags) | |
3383 | { | |
3384 | int err; | |
3385 | ||
3386 | GEM_BUG_ON(!size); | |
3387 | GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); | |
3388 | GEM_BUG_ON(!IS_ALIGNED(offset, I915_GTT_MIN_ALIGNMENT)); | |
3389 | GEM_BUG_ON(range_overflows(offset, size, vm->total)); | |
3fec7ec4 | 3390 | GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base); |
9734ad13 | 3391 | GEM_BUG_ON(drm_mm_node_allocated(node)); |
625d988a CW |
3392 | |
3393 | node->size = size; | |
3394 | node->start = offset; | |
3395 | node->color = color; | |
3396 | ||
3397 | err = drm_mm_reserve_node(&vm->mm, node); | |
3398 | if (err != -ENOSPC) | |
3399 | return err; | |
3400 | ||
3401 | err = i915_gem_evict_for_node(vm, node, flags); | |
3402 | if (err == 0) | |
3403 | err = drm_mm_reserve_node(&vm->mm, node); | |
3404 | ||
3405 | return err; | |
3406 | } | |
3407 | ||
606fec95 CW |
3408 | static u64 random_offset(u64 start, u64 end, u64 len, u64 align) |
3409 | { | |
3410 | u64 range, addr; | |
3411 | ||
3412 | GEM_BUG_ON(range_overflows(start, len, end)); | |
3413 | GEM_BUG_ON(round_up(start, align) > round_down(end - len, align)); | |
3414 | ||
3415 | range = round_down(end - len, align) - round_up(start, align); | |
3416 | if (range) { | |
3417 | if (sizeof(unsigned long) == sizeof(u64)) { | |
3418 | addr = get_random_long(); | |
3419 | } else { | |
3420 | addr = get_random_int(); | |
3421 | if (range > U32_MAX) { | |
3422 | addr <<= 32; | |
3423 | addr |= get_random_int(); | |
3424 | } | |
3425 | } | |
3426 | div64_u64_rem(addr, range, &addr); | |
3427 | start += addr; | |
3428 | } | |
3429 | ||
3430 | return round_up(start, align); | |
3431 | } | |
3432 | ||
e007b19d CW |
3433 | /** |
3434 | * i915_gem_gtt_insert - insert a node into an address_space (GTT) | |
a4dbf7cf CW |
3435 | * @vm: the &struct i915_address_space |
3436 | * @node: the &struct drm_mm_node (typically i915_vma.node) | |
3437 | * @size: how much space to allocate inside the GTT, | |
3438 | * must be #I915_GTT_PAGE_SIZE aligned | |
3439 | * @alignment: required alignment of starting offset, may be 0 but | |
3440 | * if specified, this must be a power-of-two and at least | |
3441 | * #I915_GTT_MIN_ALIGNMENT | |
3442 | * @color: color to apply to node | |
3443 | * @start: start of any range restriction inside GTT (0 for all), | |
e007b19d | 3444 | * must be #I915_GTT_PAGE_SIZE aligned |
a4dbf7cf CW |
3445 | * @end: end of any range restriction inside GTT (U64_MAX for all), |
3446 | * must be #I915_GTT_PAGE_SIZE aligned if not U64_MAX | |
3447 | * @flags: control search and eviction behaviour | |
e007b19d CW |
3448 | * |
3449 | * i915_gem_gtt_insert() first searches for an available hole into which | |
3450 | * is can insert the node. The hole address is aligned to @alignment and | |
3451 | * its @size must then fit entirely within the [@start, @end] bounds. The | |
3452 | * nodes on either side of the hole must match @color, or else a guard page | |
3453 | * will be inserted between the two nodes (or the node evicted). If no | |
606fec95 CW |
3454 | * suitable hole is found, first a victim is randomly selected and tested |
3455 | * for eviction, otherwise then the LRU list of objects within the GTT | |
e007b19d CW |
3456 | * is scanned to find the first set of replacement nodes to create the hole. |
3457 | * Those old overlapping nodes are evicted from the GTT (and so must be | |
3458 | * rebound before any future use). Any node that is currently pinned cannot | |
3459 | * be evicted (see i915_vma_pin()). Similar if the node's VMA is currently | |
3460 | * active and #PIN_NONBLOCK is specified, that node is also skipped when | |
3461 | * searching for an eviction candidate. See i915_gem_evict_something() for | |
3462 | * the gory details on the eviction algorithm. | |
3463 | * | |
3464 | * Returns: 0 on success, -ENOSPC if no suitable hole is found, -EINTR if | |
3465 | * asked to wait for eviction and interrupted. | |
3466 | */ | |
3467 | int i915_gem_gtt_insert(struct i915_address_space *vm, | |
3468 | struct drm_mm_node *node, | |
3469 | u64 size, u64 alignment, unsigned long color, | |
3470 | u64 start, u64 end, unsigned int flags) | |
3471 | { | |
4e64e553 | 3472 | enum drm_mm_insert_mode mode; |
606fec95 | 3473 | u64 offset; |
e007b19d CW |
3474 | int err; |
3475 | ||
3476 | lockdep_assert_held(&vm->i915->drm.struct_mutex); | |
3477 | GEM_BUG_ON(!size); | |
3478 | GEM_BUG_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)); | |
3479 | GEM_BUG_ON(alignment && !is_power_of_2(alignment)); | |
3480 | GEM_BUG_ON(alignment && !IS_ALIGNED(alignment, I915_GTT_MIN_ALIGNMENT)); | |
3481 | GEM_BUG_ON(start >= end); | |
3482 | GEM_BUG_ON(start > 0 && !IS_ALIGNED(start, I915_GTT_PAGE_SIZE)); | |
3483 | GEM_BUG_ON(end < U64_MAX && !IS_ALIGNED(end, I915_GTT_PAGE_SIZE)); | |
3fec7ec4 | 3484 | GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->base); |
9734ad13 | 3485 | GEM_BUG_ON(drm_mm_node_allocated(node)); |
e007b19d CW |
3486 | |
3487 | if (unlikely(range_overflows(start, size, end))) | |
3488 | return -ENOSPC; | |
3489 | ||
3490 | if (unlikely(round_up(start, alignment) > round_down(end - size, alignment))) | |
3491 | return -ENOSPC; | |
3492 | ||
4e64e553 CW |
3493 | mode = DRM_MM_INSERT_BEST; |
3494 | if (flags & PIN_HIGH) | |
3495 | mode = DRM_MM_INSERT_HIGH; | |
3496 | if (flags & PIN_MAPPABLE) | |
3497 | mode = DRM_MM_INSERT_LOW; | |
e007b19d CW |
3498 | |
3499 | /* We only allocate in PAGE_SIZE/GTT_PAGE_SIZE (4096) chunks, | |
3500 | * so we know that we always have a minimum alignment of 4096. | |
3501 | * The drm_mm range manager is optimised to return results | |
3502 | * with zero alignment, so where possible use the optimal | |
3503 | * path. | |
3504 | */ | |
3505 | BUILD_BUG_ON(I915_GTT_MIN_ALIGNMENT > I915_GTT_PAGE_SIZE); | |
3506 | if (alignment <= I915_GTT_MIN_ALIGNMENT) | |
3507 | alignment = 0; | |
3508 | ||
4e64e553 CW |
3509 | err = drm_mm_insert_node_in_range(&vm->mm, node, |
3510 | size, alignment, color, | |
3511 | start, end, mode); | |
e007b19d CW |
3512 | if (err != -ENOSPC) |
3513 | return err; | |
3514 | ||
606fec95 CW |
3515 | /* No free space, pick a slot at random. |
3516 | * | |
3517 | * There is a pathological case here using a GTT shared between | |
3518 | * mmap and GPU (i.e. ggtt/aliasing_ppgtt but not full-ppgtt): | |
3519 | * | |
3520 | * |<-- 256 MiB aperture -->||<-- 1792 MiB unmappable -->| | |
3521 | * (64k objects) (448k objects) | |
3522 | * | |
3523 | * Now imagine that the eviction LRU is ordered top-down (just because | |
3524 | * pathology meets real life), and that we need to evict an object to | |
3525 | * make room inside the aperture. The eviction scan then has to walk | |
3526 | * the 448k list before it finds one within range. And now imagine that | |
3527 | * it has to search for a new hole between every byte inside the memcpy, | |
3528 | * for several simultaneous clients. | |
3529 | * | |
3530 | * On a full-ppgtt system, if we have run out of available space, there | |
3531 | * will be lots and lots of objects in the eviction list! Again, | |
3532 | * searching that LRU list may be slow if we are also applying any | |
3533 | * range restrictions (e.g. restriction to low 4GiB) and so, for | |
3534 | * simplicity and similarilty between different GTT, try the single | |
3535 | * random replacement first. | |
3536 | */ | |
3537 | offset = random_offset(start, end, | |
3538 | size, alignment ?: I915_GTT_MIN_ALIGNMENT); | |
3539 | err = i915_gem_gtt_reserve(vm, node, size, offset, color, flags); | |
3540 | if (err != -ENOSPC) | |
3541 | return err; | |
3542 | ||
3543 | /* Randomly selected placement is pinned, do a search */ | |
e007b19d CW |
3544 | err = i915_gem_evict_something(vm, size, alignment, color, |
3545 | start, end, flags); | |
3546 | if (err) | |
3547 | return err; | |
3548 | ||
4e64e553 CW |
3549 | return drm_mm_insert_node_in_range(&vm->mm, node, |
3550 | size, alignment, color, | |
3551 | start, end, DRM_MM_INSERT_EVICT); | |
e007b19d | 3552 | } |
3b5bb0a3 CW |
3553 | |
3554 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) | |
3555 | #include "selftests/mock_gtt.c" | |
1c42819a | 3556 | #include "selftests/i915_gem_gtt.c" |
3b5bb0a3 | 3557 | #endif |