]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/gpu/drm/gma500/gtt.c
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 335
[mirror_ubuntu-eoan-kernel.git] / drivers / gpu / drm / gma500 / gtt.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2007, Intel Corporation.
4 * All Rights Reserved.
5 *
6 * Authors: Thomas Hellstrom <thomas-at-tungstengraphics.com>
7 * Alan Cox <alan@linux.intel.com>
8 */
9
10 #include <drm/drmP.h>
11 #include <linux/shmem_fs.h>
12 #include <asm/set_memory.h>
13 #include "psb_drv.h"
14 #include "blitter.h"
15
16
17 /*
18 * GTT resource allocator - manage page mappings in GTT space
19 */
20
21 /**
22 * psb_gtt_mask_pte - generate GTT pte entry
23 * @pfn: page number to encode
24 * @type: type of memory in the GTT
25 *
26 * Set the GTT entry for the appropriate memory type.
27 */
28 static inline uint32_t psb_gtt_mask_pte(uint32_t pfn, int type)
29 {
30 uint32_t mask = PSB_PTE_VALID;
31
32 /* Ensure we explode rather than put an invalid low mapping of
33 a high mapping page into the gtt */
34 BUG_ON(pfn & ~(0xFFFFFFFF >> PAGE_SHIFT));
35
36 if (type & PSB_MMU_CACHED_MEMORY)
37 mask |= PSB_PTE_CACHED;
38 if (type & PSB_MMU_RO_MEMORY)
39 mask |= PSB_PTE_RO;
40 if (type & PSB_MMU_WO_MEMORY)
41 mask |= PSB_PTE_WO;
42
43 return (pfn << PAGE_SHIFT) | mask;
44 }
45
46 /**
47 * psb_gtt_entry - find the GTT entries for a gtt_range
48 * @dev: our DRM device
49 * @r: our GTT range
50 *
51 * Given a gtt_range object return the GTT offset of the page table
52 * entries for this gtt_range
53 */
54 static u32 __iomem *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r)
55 {
56 struct drm_psb_private *dev_priv = dev->dev_private;
57 unsigned long offset;
58
59 offset = r->resource.start - dev_priv->gtt_mem->start;
60
61 return dev_priv->gtt_map + (offset >> PAGE_SHIFT);
62 }
63
64 /**
65 * psb_gtt_insert - put an object into the GTT
66 * @dev: our DRM device
67 * @r: our GTT range
68 * @resume: on resume
69 *
70 * Take our preallocated GTT range and insert the GEM object into
71 * the GTT. This is protected via the gtt mutex which the caller
72 * must hold.
73 */
74 static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r,
75 int resume)
76 {
77 u32 __iomem *gtt_slot;
78 u32 pte;
79 struct page **pages;
80 int i;
81
82 if (r->pages == NULL) {
83 WARN_ON(1);
84 return -EINVAL;
85 }
86
87 WARN_ON(r->stolen); /* refcount these maybe ? */
88
89 gtt_slot = psb_gtt_entry(dev, r);
90 pages = r->pages;
91
92 if (!resume) {
93 /* Make sure changes are visible to the GPU */
94 set_pages_array_wc(pages, r->npage);
95 }
96
97 /* Write our page entries into the GTT itself */
98 for (i = r->roll; i < r->npage; i++) {
99 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
100 PSB_MMU_CACHED_MEMORY);
101 iowrite32(pte, gtt_slot++);
102 }
103 for (i = 0; i < r->roll; i++) {
104 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
105 PSB_MMU_CACHED_MEMORY);
106 iowrite32(pte, gtt_slot++);
107 }
108 /* Make sure all the entries are set before we return */
109 ioread32(gtt_slot - 1);
110
111 return 0;
112 }
113
114 /**
115 * psb_gtt_remove - remove an object from the GTT
116 * @dev: our DRM device
117 * @r: our GTT range
118 *
119 * Remove a preallocated GTT range from the GTT. Overwrite all the
120 * page table entries with the dummy page. This is protected via the gtt
121 * mutex which the caller must hold.
122 */
123 static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r)
124 {
125 struct drm_psb_private *dev_priv = dev->dev_private;
126 u32 __iomem *gtt_slot;
127 u32 pte;
128 int i;
129
130 WARN_ON(r->stolen);
131
132 gtt_slot = psb_gtt_entry(dev, r);
133 pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page),
134 PSB_MMU_CACHED_MEMORY);
135
136 for (i = 0; i < r->npage; i++)
137 iowrite32(pte, gtt_slot++);
138 ioread32(gtt_slot - 1);
139 set_pages_array_wb(r->pages, r->npage);
140 }
141
142 /**
143 * psb_gtt_roll - set scrolling position
144 * @dev: our DRM device
145 * @r: the gtt mapping we are using
146 * @roll: roll offset
147 *
148 * Roll an existing pinned mapping by moving the pages through the GTT.
149 * This allows us to implement hardware scrolling on the consoles without
150 * a 2D engine
151 */
152 void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll)
153 {
154 u32 __iomem *gtt_slot;
155 u32 pte;
156 int i;
157
158 if (roll >= r->npage) {
159 WARN_ON(1);
160 return;
161 }
162
163 r->roll = roll;
164
165 /* Not currently in the GTT - no worry we will write the mapping at
166 the right position when it gets pinned */
167 if (!r->stolen && !r->in_gart)
168 return;
169
170 gtt_slot = psb_gtt_entry(dev, r);
171
172 for (i = r->roll; i < r->npage; i++) {
173 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
174 PSB_MMU_CACHED_MEMORY);
175 iowrite32(pte, gtt_slot++);
176 }
177 for (i = 0; i < r->roll; i++) {
178 pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]),
179 PSB_MMU_CACHED_MEMORY);
180 iowrite32(pte, gtt_slot++);
181 }
182 ioread32(gtt_slot - 1);
183 }
184
185 /**
186 * psb_gtt_attach_pages - attach and pin GEM pages
187 * @gt: the gtt range
188 *
189 * Pin and build an in kernel list of the pages that back our GEM object.
190 * While we hold this the pages cannot be swapped out. This is protected
191 * via the gtt mutex which the caller must hold.
192 */
193 static int psb_gtt_attach_pages(struct gtt_range *gt)
194 {
195 struct page **pages;
196
197 WARN_ON(gt->pages);
198
199 pages = drm_gem_get_pages(&gt->gem);
200 if (IS_ERR(pages))
201 return PTR_ERR(pages);
202
203 gt->npage = gt->gem.size / PAGE_SIZE;
204 gt->pages = pages;
205
206 return 0;
207 }
208
209 /**
210 * psb_gtt_detach_pages - attach and pin GEM pages
211 * @gt: the gtt range
212 *
213 * Undo the effect of psb_gtt_attach_pages. At this point the pages
214 * must have been removed from the GTT as they could now be paged out
215 * and move bus address. This is protected via the gtt mutex which the
216 * caller must hold.
217 */
218 static void psb_gtt_detach_pages(struct gtt_range *gt)
219 {
220 drm_gem_put_pages(&gt->gem, gt->pages, true, false);
221 gt->pages = NULL;
222 }
223
224 /**
225 * psb_gtt_pin - pin pages into the GTT
226 * @gt: range to pin
227 *
228 * Pin a set of pages into the GTT. The pins are refcounted so that
229 * multiple pins need multiple unpins to undo.
230 *
231 * Non GEM backed objects treat this as a no-op as they are always GTT
232 * backed objects.
233 */
234 int psb_gtt_pin(struct gtt_range *gt)
235 {
236 int ret = 0;
237 struct drm_device *dev = gt->gem.dev;
238 struct drm_psb_private *dev_priv = dev->dev_private;
239 u32 gpu_base = dev_priv->gtt.gatt_start;
240
241 mutex_lock(&dev_priv->gtt_mutex);
242
243 if (gt->in_gart == 0 && gt->stolen == 0) {
244 ret = psb_gtt_attach_pages(gt);
245 if (ret < 0)
246 goto out;
247 ret = psb_gtt_insert(dev, gt, 0);
248 if (ret < 0) {
249 psb_gtt_detach_pages(gt);
250 goto out;
251 }
252 psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu),
253 gt->pages, (gpu_base + gt->offset),
254 gt->npage, 0, 0, PSB_MMU_CACHED_MEMORY);
255 }
256 gt->in_gart++;
257 out:
258 mutex_unlock(&dev_priv->gtt_mutex);
259 return ret;
260 }
261
262 /**
263 * psb_gtt_unpin - Drop a GTT pin requirement
264 * @gt: range to pin
265 *
266 * Undoes the effect of psb_gtt_pin. On the last drop the GEM object
267 * will be removed from the GTT which will also drop the page references
268 * and allow the VM to clean up or page stuff.
269 *
270 * Non GEM backed objects treat this as a no-op as they are always GTT
271 * backed objects.
272 */
273 void psb_gtt_unpin(struct gtt_range *gt)
274 {
275 struct drm_device *dev = gt->gem.dev;
276 struct drm_psb_private *dev_priv = dev->dev_private;
277 u32 gpu_base = dev_priv->gtt.gatt_start;
278 int ret;
279
280 /* While holding the gtt_mutex no new blits can be initiated */
281 mutex_lock(&dev_priv->gtt_mutex);
282
283 /* Wait for any possible usage of the memory to be finished */
284 ret = gma_blt_wait_idle(dev_priv);
285 if (ret) {
286 DRM_ERROR("Failed to idle the blitter, unpin failed!");
287 goto out;
288 }
289
290 WARN_ON(!gt->in_gart);
291
292 gt->in_gart--;
293 if (gt->in_gart == 0 && gt->stolen == 0) {
294 psb_mmu_remove_pages(psb_mmu_get_default_pd(dev_priv->mmu),
295 (gpu_base + gt->offset), gt->npage, 0, 0);
296 psb_gtt_remove(dev, gt);
297 psb_gtt_detach_pages(gt);
298 }
299
300 out:
301 mutex_unlock(&dev_priv->gtt_mutex);
302 }
303
304 /*
305 * GTT resource allocator - allocate and manage GTT address space
306 */
307
308 /**
309 * psb_gtt_alloc_range - allocate GTT address space
310 * @dev: Our DRM device
311 * @len: length (bytes) of address space required
312 * @name: resource name
313 * @backed: resource should be backed by stolen pages
314 * @align: requested alignment
315 *
316 * Ask the kernel core to find us a suitable range of addresses
317 * to use for a GTT mapping.
318 *
319 * Returns a gtt_range structure describing the object, or NULL on
320 * error. On successful return the resource is both allocated and marked
321 * as in use.
322 */
323 struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len,
324 const char *name, int backed, u32 align)
325 {
326 struct drm_psb_private *dev_priv = dev->dev_private;
327 struct gtt_range *gt;
328 struct resource *r = dev_priv->gtt_mem;
329 int ret;
330 unsigned long start, end;
331
332 if (backed) {
333 /* The start of the GTT is the stolen pages */
334 start = r->start;
335 end = r->start + dev_priv->gtt.stolen_size - 1;
336 } else {
337 /* The rest we will use for GEM backed objects */
338 start = r->start + dev_priv->gtt.stolen_size;
339 end = r->end;
340 }
341
342 gt = kzalloc(sizeof(struct gtt_range), GFP_KERNEL);
343 if (gt == NULL)
344 return NULL;
345 gt->resource.name = name;
346 gt->stolen = backed;
347 gt->in_gart = backed;
348 gt->roll = 0;
349 /* Ensure this is set for non GEM objects */
350 gt->gem.dev = dev;
351 ret = allocate_resource(dev_priv->gtt_mem, &gt->resource,
352 len, start, end, align, NULL, NULL);
353 if (ret == 0) {
354 gt->offset = gt->resource.start - r->start;
355 return gt;
356 }
357 kfree(gt);
358 return NULL;
359 }
360
361 /**
362 * psb_gtt_free_range - release GTT address space
363 * @dev: our DRM device
364 * @gt: a mapping created with psb_gtt_alloc_range
365 *
366 * Release a resource that was allocated with psb_gtt_alloc_range. If the
367 * object has been pinned by mmap users we clean this up here currently.
368 */
369 void psb_gtt_free_range(struct drm_device *dev, struct gtt_range *gt)
370 {
371 /* Undo the mmap pin if we are destroying the object */
372 if (gt->mmapping) {
373 psb_gtt_unpin(gt);
374 gt->mmapping = 0;
375 }
376 WARN_ON(gt->in_gart && !gt->stolen);
377 release_resource(&gt->resource);
378 kfree(gt);
379 }
380
381 static void psb_gtt_alloc(struct drm_device *dev)
382 {
383 struct drm_psb_private *dev_priv = dev->dev_private;
384 init_rwsem(&dev_priv->gtt.sem);
385 }
386
387 void psb_gtt_takedown(struct drm_device *dev)
388 {
389 struct drm_psb_private *dev_priv = dev->dev_private;
390
391 if (dev_priv->gtt_map) {
392 iounmap(dev_priv->gtt_map);
393 dev_priv->gtt_map = NULL;
394 }
395 if (dev_priv->gtt_initialized) {
396 pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
397 dev_priv->gmch_ctrl);
398 PSB_WVDC32(dev_priv->pge_ctl, PSB_PGETBL_CTL);
399 (void) PSB_RVDC32(PSB_PGETBL_CTL);
400 }
401 if (dev_priv->vram_addr)
402 iounmap(dev_priv->gtt_map);
403 }
404
405 int psb_gtt_init(struct drm_device *dev, int resume)
406 {
407 struct drm_psb_private *dev_priv = dev->dev_private;
408 unsigned gtt_pages;
409 unsigned long stolen_size, vram_stolen_size;
410 unsigned i, num_pages;
411 unsigned pfn_base;
412 struct psb_gtt *pg;
413
414 int ret = 0;
415 uint32_t pte;
416
417 if (!resume) {
418 mutex_init(&dev_priv->gtt_mutex);
419 mutex_init(&dev_priv->mmap_mutex);
420 psb_gtt_alloc(dev);
421 }
422
423 pg = &dev_priv->gtt;
424
425 /* Enable the GTT */
426 pci_read_config_word(dev->pdev, PSB_GMCH_CTRL, &dev_priv->gmch_ctrl);
427 pci_write_config_word(dev->pdev, PSB_GMCH_CTRL,
428 dev_priv->gmch_ctrl | _PSB_GMCH_ENABLED);
429
430 dev_priv->pge_ctl = PSB_RVDC32(PSB_PGETBL_CTL);
431 PSB_WVDC32(dev_priv->pge_ctl | _PSB_PGETBL_ENABLED, PSB_PGETBL_CTL);
432 (void) PSB_RVDC32(PSB_PGETBL_CTL);
433
434 /* The root resource we allocate address space from */
435 dev_priv->gtt_initialized = 1;
436
437 pg->gtt_phys_start = dev_priv->pge_ctl & PAGE_MASK;
438
439 /*
440 * The video mmu has a hw bug when accessing 0x0D0000000.
441 * Make gatt start at 0x0e000,0000. This doesn't actually
442 * matter for us but may do if the video acceleration ever
443 * gets opened up.
444 */
445 pg->mmu_gatt_start = 0xE0000000;
446
447 pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
448 gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE)
449 >> PAGE_SHIFT;
450 /* CDV doesn't report this. In which case the system has 64 gtt pages */
451 if (pg->gtt_start == 0 || gtt_pages == 0) {
452 dev_dbg(dev->dev, "GTT PCI BAR not initialized.\n");
453 gtt_pages = 64;
454 pg->gtt_start = dev_priv->pge_ctl;
455 }
456
457 pg->gatt_start = pci_resource_start(dev->pdev, PSB_GATT_RESOURCE);
458 pg->gatt_pages = pci_resource_len(dev->pdev, PSB_GATT_RESOURCE)
459 >> PAGE_SHIFT;
460 dev_priv->gtt_mem = &dev->pdev->resource[PSB_GATT_RESOURCE];
461
462 if (pg->gatt_pages == 0 || pg->gatt_start == 0) {
463 static struct resource fudge; /* Preferably peppermint */
464 /* This can occur on CDV systems. Fudge it in this case.
465 We really don't care what imaginary space is being allocated
466 at this point */
467 dev_dbg(dev->dev, "GATT PCI BAR not initialized.\n");
468 pg->gatt_start = 0x40000000;
469 pg->gatt_pages = (128 * 1024 * 1024) >> PAGE_SHIFT;
470 /* This is a little confusing but in fact the GTT is providing
471 a view from the GPU into memory and not vice versa. As such
472 this is really allocating space that is not the same as the
473 CPU address space on CDV */
474 fudge.start = 0x40000000;
475 fudge.end = 0x40000000 + 128 * 1024 * 1024 - 1;
476 fudge.name = "fudge";
477 fudge.flags = IORESOURCE_MEM;
478 dev_priv->gtt_mem = &fudge;
479 }
480
481 pci_read_config_dword(dev->pdev, PSB_BSM, &dev_priv->stolen_base);
482 vram_stolen_size = pg->gtt_phys_start - dev_priv->stolen_base
483 - PAGE_SIZE;
484
485 stolen_size = vram_stolen_size;
486
487 dev_dbg(dev->dev, "Stolen memory base 0x%x, size %luK\n",
488 dev_priv->stolen_base, vram_stolen_size / 1024);
489
490 if (resume && (gtt_pages != pg->gtt_pages) &&
491 (stolen_size != pg->stolen_size)) {
492 dev_err(dev->dev, "GTT resume error.\n");
493 ret = -EINVAL;
494 goto out_err;
495 }
496
497 pg->gtt_pages = gtt_pages;
498 pg->stolen_size = stolen_size;
499 dev_priv->vram_stolen_size = vram_stolen_size;
500
501 /*
502 * Map the GTT and the stolen memory area
503 */
504 if (!resume)
505 dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start,
506 gtt_pages << PAGE_SHIFT);
507 if (!dev_priv->gtt_map) {
508 dev_err(dev->dev, "Failure to map gtt.\n");
509 ret = -ENOMEM;
510 goto out_err;
511 }
512
513 if (!resume)
514 dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base,
515 stolen_size);
516
517 if (!dev_priv->vram_addr) {
518 dev_err(dev->dev, "Failure to map stolen base.\n");
519 ret = -ENOMEM;
520 goto out_err;
521 }
522
523 /*
524 * Insert vram stolen pages into the GTT
525 */
526
527 pfn_base = dev_priv->stolen_base >> PAGE_SHIFT;
528 num_pages = vram_stolen_size >> PAGE_SHIFT;
529 dev_dbg(dev->dev, "Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n",
530 num_pages, pfn_base << PAGE_SHIFT, 0);
531 for (i = 0; i < num_pages; ++i) {
532 pte = psb_gtt_mask_pte(pfn_base + i, PSB_MMU_CACHED_MEMORY);
533 iowrite32(pte, dev_priv->gtt_map + i);
534 }
535
536 /*
537 * Init rest of GTT to the scratch page to avoid accidents or scribbles
538 */
539
540 pfn_base = page_to_pfn(dev_priv->scratch_page);
541 pte = psb_gtt_mask_pte(pfn_base, PSB_MMU_CACHED_MEMORY);
542 for (; i < gtt_pages; ++i)
543 iowrite32(pte, dev_priv->gtt_map + i);
544
545 (void) ioread32(dev_priv->gtt_map + i - 1);
546 return 0;
547
548 out_err:
549 psb_gtt_takedown(dev);
550 return ret;
551 }
552
553 int psb_gtt_restore(struct drm_device *dev)
554 {
555 struct drm_psb_private *dev_priv = dev->dev_private;
556 struct resource *r = dev_priv->gtt_mem->child;
557 struct gtt_range *range;
558 unsigned int restored = 0, total = 0, size = 0;
559
560 /* On resume, the gtt_mutex is already initialized */
561 mutex_lock(&dev_priv->gtt_mutex);
562 psb_gtt_init(dev, 1);
563
564 while (r != NULL) {
565 range = container_of(r, struct gtt_range, resource);
566 if (range->pages) {
567 psb_gtt_insert(dev, range, 1);
568 size += range->resource.end - range->resource.start;
569 restored++;
570 }
571 r = r->sibling;
572 total++;
573 }
574 mutex_unlock(&dev_priv->gtt_mutex);
575 DRM_DEBUG_DRIVER("Restored %u of %u gtt ranges (%u KB)", restored,
576 total, (size / 1024));
577
578 return 0;
579 }