]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/i915/i915_gem_gtt.c
drm/i915: introduce gtt_pte_t
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / i915_gem_gtt.c
CommitLineData
76aaf220
DV
1/*
2 * Copyright © 2010 Daniel Vetter
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
760285e7
DH
25#include <drm/drmP.h>
26#include <drm/i915_drm.h>
76aaf220
DV
27#include "i915_drv.h"
28#include "i915_trace.h"
29#include "intel_drv.h"
30
f61c0609
BW
31typedef uint32_t gtt_pte_t;
32
1d2a314c
DV
33/* PPGTT support for Sandybdrige/Gen6 and later */
34static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
35 unsigned first_entry,
36 unsigned num_entries)
37{
f61c0609
BW
38 gtt_pte_t *pt_vaddr;
39 gtt_pte_t scratch_pte;
7bddb01f
DV
40 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
41 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
42 unsigned last_pte, i;
1d2a314c
DV
43
44 scratch_pte = GEN6_PTE_ADDR_ENCODE(ppgtt->scratch_page_dma_addr);
45 scratch_pte |= GEN6_PTE_VALID | GEN6_PTE_CACHE_LLC;
46
7bddb01f
DV
47 while (num_entries) {
48 last_pte = first_pte + num_entries;
49 if (last_pte > I915_PPGTT_PT_ENTRIES)
50 last_pte = I915_PPGTT_PT_ENTRIES;
51
52 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
1d2a314c 53
7bddb01f
DV
54 for (i = first_pte; i < last_pte; i++)
55 pt_vaddr[i] = scratch_pte;
1d2a314c
DV
56
57 kunmap_atomic(pt_vaddr);
1d2a314c 58
7bddb01f
DV
59 num_entries -= last_pte - first_pte;
60 first_pte = 0;
61 act_pd++;
62 }
1d2a314c
DV
63}
64
65int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
66{
67 struct drm_i915_private *dev_priv = dev->dev_private;
68 struct i915_hw_ppgtt *ppgtt;
1d2a314c 69 unsigned first_pd_entry_in_global_pt;
1d2a314c
DV
70 int i;
71 int ret = -ENOMEM;
72
73 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
74 * entries. For aliasing ppgtt support we just steal them at the end for
75 * now. */
9a0f938b 76 first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES;
1d2a314c
DV
77
78 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
79 if (!ppgtt)
80 return ret;
81
8f2c59f0 82 ppgtt->dev = dev;
1d2a314c
DV
83 ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
84 ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
85 GFP_KERNEL);
86 if (!ppgtt->pt_pages)
87 goto err_ppgtt;
88
89 for (i = 0; i < ppgtt->num_pd_entries; i++) {
90 ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
91 if (!ppgtt->pt_pages[i])
92 goto err_pt_alloc;
93 }
94
95 if (dev_priv->mm.gtt->needs_dmar) {
96 ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t)
97 *ppgtt->num_pd_entries,
98 GFP_KERNEL);
99 if (!ppgtt->pt_dma_addr)
100 goto err_pt_alloc;
1d2a314c 101
211c568b
DV
102 for (i = 0; i < ppgtt->num_pd_entries; i++) {
103 dma_addr_t pt_addr;
104
1d2a314c
DV
105 pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
106 0, 4096,
107 PCI_DMA_BIDIRECTIONAL);
108
109 if (pci_dma_mapping_error(dev->pdev,
110 pt_addr)) {
111 ret = -EIO;
112 goto err_pd_pin;
113
114 }
115 ppgtt->pt_dma_addr[i] = pt_addr;
211c568b 116 }
1d2a314c 117 }
1d2a314c
DV
118
119 ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
120
121 i915_ppgtt_clear_range(ppgtt, 0,
122 ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
123
f61c0609 124 ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t);
1d2a314c
DV
125
126 dev_priv->mm.aliasing_ppgtt = ppgtt;
127
128 return 0;
129
130err_pd_pin:
131 if (ppgtt->pt_dma_addr) {
132 for (i--; i >= 0; i--)
133 pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
134 4096, PCI_DMA_BIDIRECTIONAL);
135 }
136err_pt_alloc:
137 kfree(ppgtt->pt_dma_addr);
138 for (i = 0; i < ppgtt->num_pd_entries; i++) {
139 if (ppgtt->pt_pages[i])
140 __free_page(ppgtt->pt_pages[i]);
141 }
142 kfree(ppgtt->pt_pages);
143err_ppgtt:
144 kfree(ppgtt);
145
146 return ret;
147}
148
149void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
150{
151 struct drm_i915_private *dev_priv = dev->dev_private;
152 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
153 int i;
154
155 if (!ppgtt)
156 return;
157
158 if (ppgtt->pt_dma_addr) {
159 for (i = 0; i < ppgtt->num_pd_entries; i++)
160 pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
161 4096, PCI_DMA_BIDIRECTIONAL);
162 }
163
164 kfree(ppgtt->pt_dma_addr);
165 for (i = 0; i < ppgtt->num_pd_entries; i++)
166 __free_page(ppgtt->pt_pages[i]);
167 kfree(ppgtt->pt_pages);
168 kfree(ppgtt);
169}
170
7bddb01f 171static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
9da3da66 172 const struct sg_table *pages,
7bddb01f 173 unsigned first_entry,
f61c0609 174 gtt_pte_t pte_flags)
7bddb01f 175{
f61c0609 176 gtt_pte_t *pt_vaddr, pte;
7bddb01f
DV
177 unsigned act_pd = first_entry / I915_PPGTT_PT_ENTRIES;
178 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
179 unsigned i, j, m, segment_len;
180 dma_addr_t page_addr;
181 struct scatterlist *sg;
182
183 /* init sg walking */
9da3da66 184 sg = pages->sgl;
7bddb01f
DV
185 i = 0;
186 segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
187 m = 0;
188
9da3da66 189 while (i < pages->nents) {
7bddb01f
DV
190 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pd]);
191
192 for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
193 page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
194 pte = GEN6_PTE_ADDR_ENCODE(page_addr);
195 pt_vaddr[j] = pte | pte_flags;
196
197 /* grab the next page */
9da3da66
CW
198 if (++m == segment_len) {
199 if (++i == pages->nents)
7bddb01f
DV
200 break;
201
9da3da66 202 sg = sg_next(sg);
7bddb01f
DV
203 segment_len = sg_dma_len(sg) >> PAGE_SHIFT;
204 m = 0;
205 }
206 }
207
208 kunmap_atomic(pt_vaddr);
209
210 first_pte = 0;
211 act_pd++;
212 }
213}
214
7bddb01f
DV
215void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
216 struct drm_i915_gem_object *obj,
217 enum i915_cache_level cache_level)
218{
f61c0609 219 gtt_pte_t pte_flags = GEN6_PTE_VALID;
7bddb01f
DV
220
221 switch (cache_level) {
222 case I915_CACHE_LLC_MLC:
8693607a 223 /* Haswell doesn't set L3 this way */
8f2c59f0 224 if (IS_HASWELL(ppgtt->dev))
8693607a
BW
225 pte_flags |= GEN6_PTE_CACHE_LLC;
226 else
227 pte_flags |= GEN6_PTE_CACHE_LLC_MLC;
7bddb01f
DV
228 break;
229 case I915_CACHE_LLC:
230 pte_flags |= GEN6_PTE_CACHE_LLC;
231 break;
232 case I915_CACHE_NONE:
8f2c59f0 233 if (IS_HASWELL(ppgtt->dev))
a843af18
DV
234 pte_flags |= HSW_PTE_UNCACHED;
235 else
236 pte_flags |= GEN6_PTE_UNCACHED;
7bddb01f
DV
237 break;
238 default:
239 BUG();
240 }
241
9da3da66 242 i915_ppgtt_insert_sg_entries(ppgtt,
2f745ad3 243 obj->pages,
9da3da66
CW
244 obj->gtt_space->start >> PAGE_SHIFT,
245 pte_flags);
7bddb01f
DV
246}
247
248void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
249 struct drm_i915_gem_object *obj)
250{
251 i915_ppgtt_clear_range(ppgtt,
252 obj->gtt_space->start >> PAGE_SHIFT,
253 obj->base.size >> PAGE_SHIFT);
254}
255
93dfb40c
CW
256/* XXX kill agp_type! */
257static unsigned int cache_level_to_agp_type(struct drm_device *dev,
258 enum i915_cache_level cache_level)
259{
260 switch (cache_level) {
261 case I915_CACHE_LLC_MLC:
93dfb40c
CW
262 /* Older chipsets do not have this extra level of CPU
263 * cacheing, so fallthrough and request the PTE simply
264 * as cached.
265 */
8693607a
BW
266 if (INTEL_INFO(dev)->gen >= 6 && !IS_HASWELL(dev))
267 return AGP_USER_CACHED_MEMORY_LLC_MLC;
93dfb40c
CW
268 case I915_CACHE_LLC:
269 return AGP_USER_CACHED_MEMORY;
270 default:
271 case I915_CACHE_NONE:
272 return AGP_USER_MEMORY;
273 }
274}
275
5c042287
BW
276static bool do_idling(struct drm_i915_private *dev_priv)
277{
278 bool ret = dev_priv->mm.interruptible;
279
280 if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
281 dev_priv->mm.interruptible = false;
b2da9fe5 282 if (i915_gpu_idle(dev_priv->dev)) {
5c042287
BW
283 DRM_ERROR("Couldn't idle GPU\n");
284 /* Wait a bit, in hopes it avoids the hang */
285 udelay(10);
286 }
287 }
288
289 return ret;
290}
291
292static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
293{
294 if (unlikely(dev_priv->mm.gtt->do_idle_maps))
295 dev_priv->mm.interruptible = interruptible;
296}
297
76aaf220
DV
298void i915_gem_restore_gtt_mappings(struct drm_device *dev)
299{
300 struct drm_i915_private *dev_priv = dev->dev_private;
05394f39 301 struct drm_i915_gem_object *obj;
76aaf220 302
bee4a186
CW
303 /* First fill our portion of the GTT with scratch pages */
304 intel_gtt_clear_range(dev_priv->mm.gtt_start / PAGE_SIZE,
305 (dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
306
6c085a72 307 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
a8e93126 308 i915_gem_clflush_object(obj);
74163907 309 i915_gem_gtt_bind_object(obj, obj->cache_level);
76aaf220
DV
310 }
311
76aaf220
DV
312 intel_gtt_chipset_flush();
313}
7c2e6fdf 314
74163907 315int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
7c2e6fdf 316{
9da3da66 317 if (obj->has_dma_mapping)
74163907 318 return 0;
9da3da66
CW
319
320 if (!dma_map_sg(&obj->base.dev->pdev->dev,
321 obj->pages->sgl, obj->pages->nents,
322 PCI_DMA_BIDIRECTIONAL))
323 return -ENOSPC;
324
325 return 0;
7c2e6fdf
DV
326}
327
74163907
DV
328void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
329 enum i915_cache_level cache_level)
d5bd1449
CW
330{
331 struct drm_device *dev = obj->base.dev;
d5bd1449
CW
332 unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
333
2f745ad3 334 intel_gtt_insert_sg_entries(obj->pages,
9da3da66
CW
335 obj->gtt_space->start >> PAGE_SHIFT,
336 agp_type);
74898d7e 337 obj->has_global_gtt_mapping = 1;
d5bd1449
CW
338}
339
05394f39 340void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
74163907
DV
341{
342 intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
343 obj->base.size >> PAGE_SHIFT);
74898d7e
DV
344
345 obj->has_global_gtt_mapping = 0;
74163907
DV
346}
347
348void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
7c2e6fdf 349{
5c042287
BW
350 struct drm_device *dev = obj->base.dev;
351 struct drm_i915_private *dev_priv = dev->dev_private;
352 bool interruptible;
353
354 interruptible = do_idling(dev_priv);
355
9da3da66
CW
356 if (!obj->has_dma_mapping)
357 dma_unmap_sg(&dev->pdev->dev,
358 obj->pages->sgl, obj->pages->nents,
359 PCI_DMA_BIDIRECTIONAL);
5c042287
BW
360
361 undo_idling(dev_priv, interruptible);
7c2e6fdf 362}
644ec02b 363
42d6ab48
CW
364static void i915_gtt_color_adjust(struct drm_mm_node *node,
365 unsigned long color,
366 unsigned long *start,
367 unsigned long *end)
368{
369 if (node->color != color)
370 *start += 4096;
371
372 if (!list_empty(&node->node_list)) {
373 node = list_entry(node->node_list.next,
374 struct drm_mm_node,
375 node_list);
376 if (node->allocated && node->color != color)
377 *end -= 4096;
378 }
379}
380
644ec02b
DV
381void i915_gem_init_global_gtt(struct drm_device *dev,
382 unsigned long start,
383 unsigned long mappable_end,
384 unsigned long end)
385{
386 drm_i915_private_t *dev_priv = dev->dev_private;
387
d1dd20a9
DV
388 /* Substract the guard page ... */
389 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
42d6ab48
CW
390 if (!HAS_LLC(dev))
391 dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
644ec02b
DV
392
393 dev_priv->mm.gtt_start = start;
394 dev_priv->mm.gtt_mappable_end = mappable_end;
395 dev_priv->mm.gtt_end = end;
396 dev_priv->mm.gtt_total = end - start;
397 dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
398
d1dd20a9 399 /* ... but ensure that we clear the entire range. */
644ec02b
DV
400 intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
401}