]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
drm: use set_memory.h header
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / ttm / ttm_page_alloc_dma.c
1 /*
2 * Copyright 2011 (c) Oracle Corp.
3
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
24 */
25
26 /*
27 * A simple DMA pool losely based on dmapool.c. It has certain advantages
28 * over the DMA pools:
29 * - Pool collects resently freed pages for reuse (and hooks up to
30 * the shrinker).
31 * - Tracks currently in use pages
32 * - Tracks whether the page is UC, WB or cached (and reverts to WB
33 * when freed).
34 */
35
36 #if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
37 #define pr_fmt(fmt) "[TTM] " fmt
38
39 #include <linux/dma-mapping.h>
40 #include <linux/list.h>
41 #include <linux/seq_file.h> /* for seq_printf */
42 #include <linux/slab.h>
43 #include <linux/spinlock.h>
44 #include <linux/highmem.h>
45 #include <linux/mm_types.h>
46 #include <linux/module.h>
47 #include <linux/mm.h>
48 #include <linux/atomic.h>
49 #include <linux/device.h>
50 #include <linux/kthread.h>
51 #include <drm/ttm/ttm_bo_driver.h>
52 #include <drm/ttm/ttm_page_alloc.h>
53 #if IS_ENABLED(CONFIG_AGP)
54 #include <asm/agp.h>
55 #endif
56 #ifdef CONFIG_X86
57 #include <asm/set_memory.h>
58 #endif
59
60 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
61 #define SMALL_ALLOCATION 4
62 #define FREE_ALL_PAGES (~0U)
63 /* times are in msecs */
64 #define IS_UNDEFINED (0)
65 #define IS_WC (1<<1)
66 #define IS_UC (1<<2)
67 #define IS_CACHED (1<<3)
68 #define IS_DMA32 (1<<4)
69
70 enum pool_type {
71 POOL_IS_UNDEFINED,
72 POOL_IS_WC = IS_WC,
73 POOL_IS_UC = IS_UC,
74 POOL_IS_CACHED = IS_CACHED,
75 POOL_IS_WC_DMA32 = IS_WC | IS_DMA32,
76 POOL_IS_UC_DMA32 = IS_UC | IS_DMA32,
77 POOL_IS_CACHED_DMA32 = IS_CACHED | IS_DMA32,
78 };
79 /*
80 * The pool structure. There are usually six pools:
81 * - generic (not restricted to DMA32):
82 * - write combined, uncached, cached.
83 * - dma32 (up to 2^32 - so up 4GB):
84 * - write combined, uncached, cached.
85 * for each 'struct device'. The 'cached' is for pages that are actively used.
86 * The other ones can be shrunk by the shrinker API if neccessary.
87 * @pools: The 'struct device->dma_pools' link.
88 * @type: Type of the pool
89 * @lock: Protects the inuse_list and free_list from concurrnet access. Must be
90 * used with irqsave/irqrestore variants because pool allocator maybe called
91 * from delayed work.
92 * @inuse_list: Pool of pages that are in use. The order is very important and
93 * it is in the order that the TTM pages that are put back are in.
94 * @free_list: Pool of pages that are free to be used. No order requirements.
95 * @dev: The device that is associated with these pools.
96 * @size: Size used during DMA allocation.
97 * @npages_free: Count of available pages for re-use.
98 * @npages_in_use: Count of pages that are in use.
99 * @nfrees: Stats when pool is shrinking.
100 * @nrefills: Stats when the pool is grown.
101 * @gfp_flags: Flags to pass for alloc_page.
102 * @name: Name of the pool.
103 * @dev_name: Name derieved from dev - similar to how dev_info works.
104 * Used during shutdown as the dev_info during release is unavailable.
105 */
106 struct dma_pool {
107 struct list_head pools; /* The 'struct device->dma_pools link */
108 enum pool_type type;
109 spinlock_t lock;
110 struct list_head inuse_list;
111 struct list_head free_list;
112 struct device *dev;
113 unsigned size;
114 unsigned npages_free;
115 unsigned npages_in_use;
116 unsigned long nfrees; /* Stats when shrunk. */
117 unsigned long nrefills; /* Stats when grown. */
118 gfp_t gfp_flags;
119 char name[13]; /* "cached dma32" */
120 char dev_name[64]; /* Constructed from dev */
121 };
122
123 /*
124 * The accounting page keeping track of the allocated page along with
125 * the DMA address.
126 * @page_list: The link to the 'page_list' in 'struct dma_pool'.
127 * @vaddr: The virtual address of the page
128 * @dma: The bus address of the page. If the page is not allocated
129 * via the DMA API, it will be -1.
130 */
131 struct dma_page {
132 struct list_head page_list;
133 void *vaddr;
134 struct page *p;
135 dma_addr_t dma;
136 };
137
138 /*
139 * Limits for the pool. They are handled without locks because only place where
140 * they may change is in sysfs store. They won't have immediate effect anyway
141 * so forcing serialization to access them is pointless.
142 */
143
144 struct ttm_pool_opts {
145 unsigned alloc_size;
146 unsigned max_size;
147 unsigned small;
148 };
149
150 /*
151 * Contains the list of all of the 'struct device' and their corresponding
152 * DMA pools. Guarded by _mutex->lock.
153 * @pools: The link to 'struct ttm_pool_manager->pools'
154 * @dev: The 'struct device' associated with the 'pool'
155 * @pool: The 'struct dma_pool' associated with the 'dev'
156 */
157 struct device_pools {
158 struct list_head pools;
159 struct device *dev;
160 struct dma_pool *pool;
161 };
162
163 /*
164 * struct ttm_pool_manager - Holds memory pools for fast allocation
165 *
166 * @lock: Lock used when adding/removing from pools
167 * @pools: List of 'struct device' and 'struct dma_pool' tuples.
168 * @options: Limits for the pool.
169 * @npools: Total amount of pools in existence.
170 * @shrinker: The structure used by [un|]register_shrinker
171 */
172 struct ttm_pool_manager {
173 struct mutex lock;
174 struct list_head pools;
175 struct ttm_pool_opts options;
176 unsigned npools;
177 struct shrinker mm_shrink;
178 struct kobject kobj;
179 };
180
181 static struct ttm_pool_manager *_manager;
182
183 static struct attribute ttm_page_pool_max = {
184 .name = "pool_max_size",
185 .mode = S_IRUGO | S_IWUSR
186 };
187 static struct attribute ttm_page_pool_small = {
188 .name = "pool_small_allocation",
189 .mode = S_IRUGO | S_IWUSR
190 };
191 static struct attribute ttm_page_pool_alloc_size = {
192 .name = "pool_allocation_size",
193 .mode = S_IRUGO | S_IWUSR
194 };
195
196 static struct attribute *ttm_pool_attrs[] = {
197 &ttm_page_pool_max,
198 &ttm_page_pool_small,
199 &ttm_page_pool_alloc_size,
200 NULL
201 };
202
203 static void ttm_pool_kobj_release(struct kobject *kobj)
204 {
205 struct ttm_pool_manager *m =
206 container_of(kobj, struct ttm_pool_manager, kobj);
207 kfree(m);
208 }
209
210 static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr,
211 const char *buffer, size_t size)
212 {
213 struct ttm_pool_manager *m =
214 container_of(kobj, struct ttm_pool_manager, kobj);
215 int chars;
216 unsigned val;
217 chars = sscanf(buffer, "%u", &val);
218 if (chars == 0)
219 return size;
220
221 /* Convert kb to number of pages */
222 val = val / (PAGE_SIZE >> 10);
223
224 if (attr == &ttm_page_pool_max)
225 m->options.max_size = val;
226 else if (attr == &ttm_page_pool_small)
227 m->options.small = val;
228 else if (attr == &ttm_page_pool_alloc_size) {
229 if (val > NUM_PAGES_TO_ALLOC*8) {
230 pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
231 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
232 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
233 return size;
234 } else if (val > NUM_PAGES_TO_ALLOC) {
235 pr_warn("Setting allocation size to larger than %lu is not recommended\n",
236 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
237 }
238 m->options.alloc_size = val;
239 }
240
241 return size;
242 }
243
244 static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr,
245 char *buffer)
246 {
247 struct ttm_pool_manager *m =
248 container_of(kobj, struct ttm_pool_manager, kobj);
249 unsigned val = 0;
250
251 if (attr == &ttm_page_pool_max)
252 val = m->options.max_size;
253 else if (attr == &ttm_page_pool_small)
254 val = m->options.small;
255 else if (attr == &ttm_page_pool_alloc_size)
256 val = m->options.alloc_size;
257
258 val = val * (PAGE_SIZE >> 10);
259
260 return snprintf(buffer, PAGE_SIZE, "%u\n", val);
261 }
262
263 static const struct sysfs_ops ttm_pool_sysfs_ops = {
264 .show = &ttm_pool_show,
265 .store = &ttm_pool_store,
266 };
267
268 static struct kobj_type ttm_pool_kobj_type = {
269 .release = &ttm_pool_kobj_release,
270 .sysfs_ops = &ttm_pool_sysfs_ops,
271 .default_attrs = ttm_pool_attrs,
272 };
273
274 #ifndef CONFIG_X86
275 static int set_pages_array_wb(struct page **pages, int addrinarray)
276 {
277 #if IS_ENABLED(CONFIG_AGP)
278 int i;
279
280 for (i = 0; i < addrinarray; i++)
281 unmap_page_from_agp(pages[i]);
282 #endif
283 return 0;
284 }
285
286 static int set_pages_array_wc(struct page **pages, int addrinarray)
287 {
288 #if IS_ENABLED(CONFIG_AGP)
289 int i;
290
291 for (i = 0; i < addrinarray; i++)
292 map_page_into_agp(pages[i]);
293 #endif
294 return 0;
295 }
296
297 static int set_pages_array_uc(struct page **pages, int addrinarray)
298 {
299 #if IS_ENABLED(CONFIG_AGP)
300 int i;
301
302 for (i = 0; i < addrinarray; i++)
303 map_page_into_agp(pages[i]);
304 #endif
305 return 0;
306 }
307 #endif /* for !CONFIG_X86 */
308
309 static int ttm_set_pages_caching(struct dma_pool *pool,
310 struct page **pages, unsigned cpages)
311 {
312 int r = 0;
313 /* Set page caching */
314 if (pool->type & IS_UC) {
315 r = set_pages_array_uc(pages, cpages);
316 if (r)
317 pr_err("%s: Failed to set %d pages to uc!\n",
318 pool->dev_name, cpages);
319 }
320 if (pool->type & IS_WC) {
321 r = set_pages_array_wc(pages, cpages);
322 if (r)
323 pr_err("%s: Failed to set %d pages to wc!\n",
324 pool->dev_name, cpages);
325 }
326 return r;
327 }
328
329 static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page)
330 {
331 dma_addr_t dma = d_page->dma;
332 dma_free_coherent(pool->dev, pool->size, d_page->vaddr, dma);
333
334 kfree(d_page);
335 d_page = NULL;
336 }
337 static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool)
338 {
339 struct dma_page *d_page;
340
341 d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL);
342 if (!d_page)
343 return NULL;
344
345 d_page->vaddr = dma_alloc_coherent(pool->dev, pool->size,
346 &d_page->dma,
347 pool->gfp_flags);
348 if (d_page->vaddr) {
349 if (is_vmalloc_addr(d_page->vaddr))
350 d_page->p = vmalloc_to_page(d_page->vaddr);
351 else
352 d_page->p = virt_to_page(d_page->vaddr);
353 } else {
354 kfree(d_page);
355 d_page = NULL;
356 }
357 return d_page;
358 }
359 static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate)
360 {
361 enum pool_type type = IS_UNDEFINED;
362
363 if (flags & TTM_PAGE_FLAG_DMA32)
364 type |= IS_DMA32;
365 if (cstate == tt_cached)
366 type |= IS_CACHED;
367 else if (cstate == tt_uncached)
368 type |= IS_UC;
369 else
370 type |= IS_WC;
371
372 return type;
373 }
374
375 static void ttm_pool_update_free_locked(struct dma_pool *pool,
376 unsigned freed_pages)
377 {
378 pool->npages_free -= freed_pages;
379 pool->nfrees += freed_pages;
380
381 }
382
383 /* set memory back to wb and free the pages. */
384 static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
385 struct page *pages[], unsigned npages)
386 {
387 struct dma_page *d_page, *tmp;
388
389 /* Don't set WB on WB page pool. */
390 if (npages && !(pool->type & IS_CACHED) &&
391 set_pages_array_wb(pages, npages))
392 pr_err("%s: Failed to set %d pages to wb!\n",
393 pool->dev_name, npages);
394
395 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
396 list_del(&d_page->page_list);
397 __ttm_dma_free_page(pool, d_page);
398 }
399 }
400
401 static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
402 {
403 /* Don't set WB on WB page pool. */
404 if (!(pool->type & IS_CACHED) && set_pages_array_wb(&d_page->p, 1))
405 pr_err("%s: Failed to set %d pages to wb!\n",
406 pool->dev_name, 1);
407
408 list_del(&d_page->page_list);
409 __ttm_dma_free_page(pool, d_page);
410 }
411
412 /*
413 * Free pages from pool.
414 *
415 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
416 * number of pages in one go.
417 *
418 * @pool: to free the pages from
419 * @nr_free: If set to true will free all pages in pool
420 * @use_static: Safe to use static buffer
421 **/
422 static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
423 bool use_static)
424 {
425 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
426 unsigned long irq_flags;
427 struct dma_page *dma_p, *tmp;
428 struct page **pages_to_free;
429 struct list_head d_pages;
430 unsigned freed_pages = 0,
431 npages_to_free = nr_free;
432
433 if (NUM_PAGES_TO_ALLOC < nr_free)
434 npages_to_free = NUM_PAGES_TO_ALLOC;
435 #if 0
436 if (nr_free > 1) {
437 pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n",
438 pool->dev_name, pool->name, current->pid,
439 npages_to_free, nr_free);
440 }
441 #endif
442 if (use_static)
443 pages_to_free = static_buf;
444 else
445 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
446 GFP_KERNEL);
447
448 if (!pages_to_free) {
449 pr_err("%s: Failed to allocate memory for pool free operation\n",
450 pool->dev_name);
451 return 0;
452 }
453 INIT_LIST_HEAD(&d_pages);
454 restart:
455 spin_lock_irqsave(&pool->lock, irq_flags);
456
457 /* We picking the oldest ones off the list */
458 list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list,
459 page_list) {
460 if (freed_pages >= npages_to_free)
461 break;
462
463 /* Move the dma_page from one list to another. */
464 list_move(&dma_p->page_list, &d_pages);
465
466 pages_to_free[freed_pages++] = dma_p->p;
467 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
468 if (freed_pages >= NUM_PAGES_TO_ALLOC) {
469
470 ttm_pool_update_free_locked(pool, freed_pages);
471 /**
472 * Because changing page caching is costly
473 * we unlock the pool to prevent stalling.
474 */
475 spin_unlock_irqrestore(&pool->lock, irq_flags);
476
477 ttm_dma_pages_put(pool, &d_pages, pages_to_free,
478 freed_pages);
479
480 INIT_LIST_HEAD(&d_pages);
481
482 if (likely(nr_free != FREE_ALL_PAGES))
483 nr_free -= freed_pages;
484
485 if (NUM_PAGES_TO_ALLOC >= nr_free)
486 npages_to_free = nr_free;
487 else
488 npages_to_free = NUM_PAGES_TO_ALLOC;
489
490 freed_pages = 0;
491
492 /* free all so restart the processing */
493 if (nr_free)
494 goto restart;
495
496 /* Not allowed to fall through or break because
497 * following context is inside spinlock while we are
498 * outside here.
499 */
500 goto out;
501
502 }
503 }
504
505 /* remove range of pages from the pool */
506 if (freed_pages) {
507 ttm_pool_update_free_locked(pool, freed_pages);
508 nr_free -= freed_pages;
509 }
510
511 spin_unlock_irqrestore(&pool->lock, irq_flags);
512
513 if (freed_pages)
514 ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
515 out:
516 if (pages_to_free != static_buf)
517 kfree(pages_to_free);
518 return nr_free;
519 }
520
521 static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
522 {
523 struct device_pools *p;
524 struct dma_pool *pool;
525
526 if (!dev)
527 return;
528
529 mutex_lock(&_manager->lock);
530 list_for_each_entry_reverse(p, &_manager->pools, pools) {
531 if (p->dev != dev)
532 continue;
533 pool = p->pool;
534 if (pool->type != type)
535 continue;
536
537 list_del(&p->pools);
538 kfree(p);
539 _manager->npools--;
540 break;
541 }
542 list_for_each_entry_reverse(pool, &dev->dma_pools, pools) {
543 if (pool->type != type)
544 continue;
545 /* Takes a spinlock.. */
546 /* OK to use static buffer since global mutex is held. */
547 ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true);
548 WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
549 /* This code path is called after _all_ references to the
550 * struct device has been dropped - so nobody should be
551 * touching it. In case somebody is trying to _add_ we are
552 * guarded by the mutex. */
553 list_del(&pool->pools);
554 kfree(pool);
555 break;
556 }
557 mutex_unlock(&_manager->lock);
558 }
559
560 /*
561 * On free-ing of the 'struct device' this deconstructor is run.
562 * Albeit the pool might have already been freed earlier.
563 */
564 static void ttm_dma_pool_release(struct device *dev, void *res)
565 {
566 struct dma_pool *pool = *(struct dma_pool **)res;
567
568 if (pool)
569 ttm_dma_free_pool(dev, pool->type);
570 }
571
572 static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data)
573 {
574 return *(struct dma_pool **)res == match_data;
575 }
576
577 static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
578 enum pool_type type)
579 {
580 char *n[] = {"wc", "uc", "cached", " dma32", "unknown",};
581 enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_UNDEFINED};
582 struct device_pools *sec_pool = NULL;
583 struct dma_pool *pool = NULL, **ptr;
584 unsigned i;
585 int ret = -ENODEV;
586 char *p;
587
588 if (!dev)
589 return NULL;
590
591 ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL);
592 if (!ptr)
593 return NULL;
594
595 ret = -ENOMEM;
596
597 pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL,
598 dev_to_node(dev));
599 if (!pool)
600 goto err_mem;
601
602 sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL,
603 dev_to_node(dev));
604 if (!sec_pool)
605 goto err_mem;
606
607 INIT_LIST_HEAD(&sec_pool->pools);
608 sec_pool->dev = dev;
609 sec_pool->pool = pool;
610
611 INIT_LIST_HEAD(&pool->free_list);
612 INIT_LIST_HEAD(&pool->inuse_list);
613 INIT_LIST_HEAD(&pool->pools);
614 spin_lock_init(&pool->lock);
615 pool->dev = dev;
616 pool->npages_free = pool->npages_in_use = 0;
617 pool->nfrees = 0;
618 pool->gfp_flags = flags;
619 pool->size = PAGE_SIZE;
620 pool->type = type;
621 pool->nrefills = 0;
622 p = pool->name;
623 for (i = 0; i < 5; i++) {
624 if (type & t[i]) {
625 p += snprintf(p, sizeof(pool->name) - (p - pool->name),
626 "%s", n[i]);
627 }
628 }
629 *p = 0;
630 /* We copy the name for pr_ calls b/c when dma_pool_destroy is called
631 * - the kobj->name has already been deallocated.*/
632 snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s",
633 dev_driver_string(dev), dev_name(dev));
634 mutex_lock(&_manager->lock);
635 /* You can get the dma_pool from either the global: */
636 list_add(&sec_pool->pools, &_manager->pools);
637 _manager->npools++;
638 /* or from 'struct device': */
639 list_add(&pool->pools, &dev->dma_pools);
640 mutex_unlock(&_manager->lock);
641
642 *ptr = pool;
643 devres_add(dev, ptr);
644
645 return pool;
646 err_mem:
647 devres_free(ptr);
648 kfree(sec_pool);
649 kfree(pool);
650 return ERR_PTR(ret);
651 }
652
653 static struct dma_pool *ttm_dma_find_pool(struct device *dev,
654 enum pool_type type)
655 {
656 struct dma_pool *pool, *tmp, *found = NULL;
657
658 if (type == IS_UNDEFINED)
659 return found;
660
661 /* NB: We iterate on the 'struct dev' which has no spinlock, but
662 * it does have a kref which we have taken. The kref is taken during
663 * graphic driver loading - in the drm_pci_init it calls either
664 * pci_dev_get or pci_register_driver which both end up taking a kref
665 * on 'struct device'.
666 *
667 * On teardown, the graphic drivers end up quiescing the TTM (put_pages)
668 * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice
669 * thing is at that point of time there are no pages associated with the
670 * driver so this function will not be called.
671 */
672 list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) {
673 if (pool->type != type)
674 continue;
675 found = pool;
676 break;
677 }
678 return found;
679 }
680
681 /*
682 * Free pages the pages that failed to change the caching state. If there
683 * are pages that have changed their caching state already put them to the
684 * pool.
685 */
686 static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool,
687 struct list_head *d_pages,
688 struct page **failed_pages,
689 unsigned cpages)
690 {
691 struct dma_page *d_page, *tmp;
692 struct page *p;
693 unsigned i = 0;
694
695 p = failed_pages[0];
696 if (!p)
697 return;
698 /* Find the failed page. */
699 list_for_each_entry_safe(d_page, tmp, d_pages, page_list) {
700 if (d_page->p != p)
701 continue;
702 /* .. and then progress over the full list. */
703 list_del(&d_page->page_list);
704 __ttm_dma_free_page(pool, d_page);
705 if (++i < cpages)
706 p = failed_pages[i];
707 else
708 break;
709 }
710
711 }
712
713 /*
714 * Allocate 'count' pages, and put 'need' number of them on the
715 * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset.
716 * The full list of pages should also be on 'd_pages'.
717 * We return zero for success, and negative numbers as errors.
718 */
719 static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool,
720 struct list_head *d_pages,
721 unsigned count)
722 {
723 struct page **caching_array;
724 struct dma_page *dma_p;
725 struct page *p;
726 int r = 0;
727 unsigned i, cpages;
728 unsigned max_cpages = min(count,
729 (unsigned)(PAGE_SIZE/sizeof(struct page *)));
730
731 /* allocate array for page caching change */
732 caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
733
734 if (!caching_array) {
735 pr_err("%s: Unable to allocate table for new pages\n",
736 pool->dev_name);
737 return -ENOMEM;
738 }
739
740 if (count > 1) {
741 pr_debug("%s: (%s:%d) Getting %d pages\n",
742 pool->dev_name, pool->name, current->pid, count);
743 }
744
745 for (i = 0, cpages = 0; i < count; ++i) {
746 dma_p = __ttm_dma_alloc_page(pool);
747 if (!dma_p) {
748 pr_err("%s: Unable to get page %u\n",
749 pool->dev_name, i);
750
751 /* store already allocated pages in the pool after
752 * setting the caching state */
753 if (cpages) {
754 r = ttm_set_pages_caching(pool, caching_array,
755 cpages);
756 if (r)
757 ttm_dma_handle_caching_state_failure(
758 pool, d_pages, caching_array,
759 cpages);
760 }
761 r = -ENOMEM;
762 goto out;
763 }
764 p = dma_p->p;
765 #ifdef CONFIG_HIGHMEM
766 /* gfp flags of highmem page should never be dma32 so we
767 * we should be fine in such case
768 */
769 if (!PageHighMem(p))
770 #endif
771 {
772 caching_array[cpages++] = p;
773 if (cpages == max_cpages) {
774 /* Note: Cannot hold the spinlock */
775 r = ttm_set_pages_caching(pool, caching_array,
776 cpages);
777 if (r) {
778 ttm_dma_handle_caching_state_failure(
779 pool, d_pages, caching_array,
780 cpages);
781 goto out;
782 }
783 cpages = 0;
784 }
785 }
786 list_add(&dma_p->page_list, d_pages);
787 }
788
789 if (cpages) {
790 r = ttm_set_pages_caching(pool, caching_array, cpages);
791 if (r)
792 ttm_dma_handle_caching_state_failure(pool, d_pages,
793 caching_array, cpages);
794 }
795 out:
796 kfree(caching_array);
797 return r;
798 }
799
800 /*
801 * @return count of pages still required to fulfill the request.
802 */
803 static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool,
804 unsigned long *irq_flags)
805 {
806 unsigned count = _manager->options.small;
807 int r = pool->npages_free;
808
809 if (count > pool->npages_free) {
810 struct list_head d_pages;
811
812 INIT_LIST_HEAD(&d_pages);
813
814 spin_unlock_irqrestore(&pool->lock, *irq_flags);
815
816 /* Returns how many more are neccessary to fulfill the
817 * request. */
818 r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count);
819
820 spin_lock_irqsave(&pool->lock, *irq_flags);
821 if (!r) {
822 /* Add the fresh to the end.. */
823 list_splice(&d_pages, &pool->free_list);
824 ++pool->nrefills;
825 pool->npages_free += count;
826 r = count;
827 } else {
828 struct dma_page *d_page;
829 unsigned cpages = 0;
830
831 pr_err("%s: Failed to fill %s pool (r:%d)!\n",
832 pool->dev_name, pool->name, r);
833
834 list_for_each_entry(d_page, &d_pages, page_list) {
835 cpages++;
836 }
837 list_splice_tail(&d_pages, &pool->free_list);
838 pool->npages_free += cpages;
839 r = cpages;
840 }
841 }
842 return r;
843 }
844
845 /*
846 * @return count of pages still required to fulfill the request.
847 * The populate list is actually a stack (not that is matters as TTM
848 * allocates one page at a time.
849 */
850 static int ttm_dma_pool_get_pages(struct dma_pool *pool,
851 struct ttm_dma_tt *ttm_dma,
852 unsigned index)
853 {
854 struct dma_page *d_page;
855 struct ttm_tt *ttm = &ttm_dma->ttm;
856 unsigned long irq_flags;
857 int count, r = -ENOMEM;
858
859 spin_lock_irqsave(&pool->lock, irq_flags);
860 count = ttm_dma_page_pool_fill_locked(pool, &irq_flags);
861 if (count) {
862 d_page = list_first_entry(&pool->free_list, struct dma_page, page_list);
863 ttm->pages[index] = d_page->p;
864 ttm_dma->dma_address[index] = d_page->dma;
865 list_move_tail(&d_page->page_list, &ttm_dma->pages_list);
866 r = 0;
867 pool->npages_in_use += 1;
868 pool->npages_free -= 1;
869 }
870 spin_unlock_irqrestore(&pool->lock, irq_flags);
871 return r;
872 }
873
874 /*
875 * On success pages list will hold count number of correctly
876 * cached pages. On failure will hold the negative return value (-ENOMEM, etc).
877 */
878 int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
879 {
880 struct ttm_tt *ttm = &ttm_dma->ttm;
881 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
882 struct dma_pool *pool;
883 enum pool_type type;
884 unsigned i;
885 gfp_t gfp_flags;
886 int ret;
887
888 if (ttm->state != tt_unpopulated)
889 return 0;
890
891 type = ttm_to_type(ttm->page_flags, ttm->caching_state);
892 if (ttm->page_flags & TTM_PAGE_FLAG_DMA32)
893 gfp_flags = GFP_USER | GFP_DMA32;
894 else
895 gfp_flags = GFP_HIGHUSER;
896 if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
897 gfp_flags |= __GFP_ZERO;
898
899 pool = ttm_dma_find_pool(dev, type);
900 if (!pool) {
901 pool = ttm_dma_pool_init(dev, gfp_flags, type);
902 if (IS_ERR_OR_NULL(pool)) {
903 return -ENOMEM;
904 }
905 }
906
907 INIT_LIST_HEAD(&ttm_dma->pages_list);
908 for (i = 0; i < ttm->num_pages; ++i) {
909 ret = ttm_dma_pool_get_pages(pool, ttm_dma, i);
910 if (ret != 0) {
911 ttm_dma_unpopulate(ttm_dma, dev);
912 return -ENOMEM;
913 }
914
915 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
916 false, false);
917 if (unlikely(ret != 0)) {
918 ttm_dma_unpopulate(ttm_dma, dev);
919 return -ENOMEM;
920 }
921 }
922
923 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
924 ret = ttm_tt_swapin(ttm);
925 if (unlikely(ret != 0)) {
926 ttm_dma_unpopulate(ttm_dma, dev);
927 return ret;
928 }
929 }
930
931 ttm->state = tt_unbound;
932 return 0;
933 }
934 EXPORT_SYMBOL_GPL(ttm_dma_populate);
935
936 /* Put all pages in pages list to correct pool to wait for reuse */
937 void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
938 {
939 struct ttm_tt *ttm = &ttm_dma->ttm;
940 struct dma_pool *pool;
941 struct dma_page *d_page, *next;
942 enum pool_type type;
943 bool is_cached = false;
944 unsigned count = 0, i, npages = 0;
945 unsigned long irq_flags;
946
947 type = ttm_to_type(ttm->page_flags, ttm->caching_state);
948 pool = ttm_dma_find_pool(dev, type);
949 if (!pool)
950 return;
951
952 is_cached = (ttm_dma_find_pool(pool->dev,
953 ttm_to_type(ttm->page_flags, tt_cached)) == pool);
954
955 /* make sure pages array match list and count number of pages */
956 list_for_each_entry(d_page, &ttm_dma->pages_list, page_list) {
957 ttm->pages[count] = d_page->p;
958 count++;
959 }
960
961 spin_lock_irqsave(&pool->lock, irq_flags);
962 pool->npages_in_use -= count;
963 if (is_cached) {
964 pool->nfrees += count;
965 } else {
966 pool->npages_free += count;
967 list_splice(&ttm_dma->pages_list, &pool->free_list);
968 /*
969 * Wait to have at at least NUM_PAGES_TO_ALLOC number of pages
970 * to free in order to minimize calls to set_memory_wb().
971 */
972 if (pool->npages_free >= (_manager->options.max_size +
973 NUM_PAGES_TO_ALLOC))
974 npages = pool->npages_free - _manager->options.max_size;
975 }
976 spin_unlock_irqrestore(&pool->lock, irq_flags);
977
978 if (is_cached) {
979 list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, page_list) {
980 ttm_mem_global_free_page(ttm->glob->mem_glob,
981 d_page->p);
982 ttm_dma_page_put(pool, d_page);
983 }
984 } else {
985 for (i = 0; i < count; i++) {
986 ttm_mem_global_free_page(ttm->glob->mem_glob,
987 ttm->pages[i]);
988 }
989 }
990
991 INIT_LIST_HEAD(&ttm_dma->pages_list);
992 for (i = 0; i < ttm->num_pages; i++) {
993 ttm->pages[i] = NULL;
994 ttm_dma->dma_address[i] = 0;
995 }
996
997 /* shrink pool if necessary (only on !is_cached pools)*/
998 if (npages)
999 ttm_dma_page_pool_free(pool, npages, false);
1000 ttm->state = tt_unpopulated;
1001 }
1002 EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
1003
1004 /**
1005 * Callback for mm to request pool to reduce number of page held.
1006 *
1007 * XXX: (dchinner) Deadlock warning!
1008 *
1009 * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
1010 * shrinkers
1011 */
1012 static unsigned long
1013 ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1014 {
1015 static unsigned start_pool;
1016 unsigned idx = 0;
1017 unsigned pool_offset;
1018 unsigned shrink_pages = sc->nr_to_scan;
1019 struct device_pools *p;
1020 unsigned long freed = 0;
1021
1022 if (list_empty(&_manager->pools))
1023 return SHRINK_STOP;
1024
1025 if (!mutex_trylock(&_manager->lock))
1026 return SHRINK_STOP;
1027 if (!_manager->npools)
1028 goto out;
1029 pool_offset = ++start_pool % _manager->npools;
1030 list_for_each_entry(p, &_manager->pools, pools) {
1031 unsigned nr_free;
1032
1033 if (!p->dev)
1034 continue;
1035 if (shrink_pages == 0)
1036 break;
1037 /* Do it in round-robin fashion. */
1038 if (++idx < pool_offset)
1039 continue;
1040 nr_free = shrink_pages;
1041 /* OK to use static buffer since global mutex is held. */
1042 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
1043 freed += nr_free - shrink_pages;
1044
1045 pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
1046 p->pool->dev_name, p->pool->name, current->pid,
1047 nr_free, shrink_pages);
1048 }
1049 out:
1050 mutex_unlock(&_manager->lock);
1051 return freed;
1052 }
1053
1054 static unsigned long
1055 ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1056 {
1057 struct device_pools *p;
1058 unsigned long count = 0;
1059
1060 if (!mutex_trylock(&_manager->lock))
1061 return 0;
1062 list_for_each_entry(p, &_manager->pools, pools)
1063 count += p->pool->npages_free;
1064 mutex_unlock(&_manager->lock);
1065 return count;
1066 }
1067
1068 static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
1069 {
1070 manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count;
1071 manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan;
1072 manager->mm_shrink.seeks = 1;
1073 register_shrinker(&manager->mm_shrink);
1074 }
1075
1076 static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
1077 {
1078 unregister_shrinker(&manager->mm_shrink);
1079 }
1080
1081 int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
1082 {
1083 int ret = -ENOMEM;
1084
1085 WARN_ON(_manager);
1086
1087 pr_info("Initializing DMA pool allocator\n");
1088
1089 _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
1090 if (!_manager)
1091 goto err;
1092
1093 mutex_init(&_manager->lock);
1094 INIT_LIST_HEAD(&_manager->pools);
1095
1096 _manager->options.max_size = max_pages;
1097 _manager->options.small = SMALL_ALLOCATION;
1098 _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
1099
1100 /* This takes care of auto-freeing the _manager */
1101 ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
1102 &glob->kobj, "dma_pool");
1103 if (unlikely(ret != 0)) {
1104 kobject_put(&_manager->kobj);
1105 goto err;
1106 }
1107 ttm_dma_pool_mm_shrink_init(_manager);
1108 return 0;
1109 err:
1110 return ret;
1111 }
1112
1113 void ttm_dma_page_alloc_fini(void)
1114 {
1115 struct device_pools *p, *t;
1116
1117 pr_info("Finalizing DMA pool allocator\n");
1118 ttm_dma_pool_mm_shrink_fini(_manager);
1119
1120 list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) {
1121 dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name,
1122 current->pid);
1123 WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release,
1124 ttm_dma_pool_match, p->pool));
1125 ttm_dma_free_pool(p->dev, p->pool->type);
1126 }
1127 kobject_put(&_manager->kobj);
1128 _manager = NULL;
1129 }
1130
1131 int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data)
1132 {
1133 struct device_pools *p;
1134 struct dma_pool *pool = NULL;
1135 char *h[] = {"pool", "refills", "pages freed", "inuse", "available",
1136 "name", "virt", "busaddr"};
1137
1138 if (!_manager) {
1139 seq_printf(m, "No pool allocator running.\n");
1140 return 0;
1141 }
1142 seq_printf(m, "%13s %12s %13s %8s %8s %8s\n",
1143 h[0], h[1], h[2], h[3], h[4], h[5]);
1144 mutex_lock(&_manager->lock);
1145 list_for_each_entry(p, &_manager->pools, pools) {
1146 struct device *dev = p->dev;
1147 if (!dev)
1148 continue;
1149 pool = p->pool;
1150 seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n",
1151 pool->name, pool->nrefills,
1152 pool->nfrees, pool->npages_in_use,
1153 pool->npages_free,
1154 pool->dev_name);
1155 }
1156 mutex_unlock(&_manager->lock);
1157 return 0;
1158 }
1159 EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs);
1160
1161 #endif