]>
Commit | Line | Data |
---|---|---|
d099fc8f CK |
1 | // SPDX-License-Identifier: GPL-2.0 OR MIT |
2 | /* | |
3 | * Copyright 2020 Advanced Micro Devices, Inc. | |
4 | * | |
5 | * Permission is hereby granted, free of charge, to any person obtaining a | |
6 | * copy of this software and associated documentation files (the "Software"), | |
7 | * to deal in the Software without restriction, including without limitation | |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
9 | * and/or sell copies of the Software, and to permit persons to whom the | |
10 | * Software is furnished to do so, subject to the following conditions: | |
11 | * | |
12 | * The above copyright notice and this permission notice shall be included in | |
13 | * all copies or substantial portions of the Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR | |
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, | |
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR | |
21 | * OTHER DEALINGS IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: Christian König | |
24 | */ | |
25 | ||
26 | /* Pooling of allocated pages is necessary because changing the caching | |
27 | * attributes on x86 of the linear mapping requires a costly cross CPU TLB | |
28 | * invalidate for those addresses. | |
29 | * | |
30 | * Additional to that allocations from the DMA coherent API are pooled as well | |
31 | * cause they are rather slow compared to alloc_pages+map. | |
32 | */ | |
33 | ||
34 | #include <linux/module.h> | |
35 | #include <linux/dma-mapping.h> | |
36 | ||
37 | #ifdef CONFIG_X86 | |
38 | #include <asm/set_memory.h> | |
39 | #endif | |
40 | ||
41 | #include <drm/ttm/ttm_pool.h> | |
42 | #include <drm/ttm/ttm_bo_driver.h> | |
43 | #include <drm/ttm/ttm_tt.h> | |
44 | ||
45 | /** | |
46 | * struct ttm_pool_dma - Helper object for coherent DMA mappings | |
47 | * | |
48 | * @addr: original DMA address returned for the mapping | |
49 | * @vaddr: original vaddr return for the mapping and order in the lower bits | |
50 | */ | |
51 | struct ttm_pool_dma { | |
52 | dma_addr_t addr; | |
53 | unsigned long vaddr; | |
54 | }; | |
55 | ||
56 | static unsigned long page_pool_size; | |
57 | ||
58 | MODULE_PARM_DESC(page_pool_size, "Number of pages in the WC/UC/DMA pool"); | |
59 | module_param(page_pool_size, ulong, 0644); | |
60 | ||
61 | static atomic_long_t allocated_pages; | |
62 | ||
63 | static struct ttm_pool_type global_write_combined[MAX_ORDER]; | |
64 | static struct ttm_pool_type global_uncached[MAX_ORDER]; | |
65 | ||
3e3e59ef CK |
66 | static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER]; |
67 | static struct ttm_pool_type global_dma32_uncached[MAX_ORDER]; | |
68 | ||
bb52cb0d | 69 | static struct mutex shrinker_lock; |
d099fc8f CK |
70 | static struct list_head shrinker_list; |
71 | static struct shrinker mm_shrinker; | |
72 | ||
73 | /* Allocate pages of size 1 << order with the given gfp_flags */ | |
74 | static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags, | |
75 | unsigned int order) | |
76 | { | |
77 | unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS; | |
78 | struct ttm_pool_dma *dma; | |
79 | struct page *p; | |
80 | void *vaddr; | |
81 | ||
bf9eee24 CK |
82 | /* Don't set the __GFP_COMP flag for higher order allocations. |
83 | * Mapping pages directly into an userspace process and calling | |
84 | * put_page() on a TTM allocated page is illegal. | |
85 | */ | |
86 | if (order) | |
87 | gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | | |
d099fc8f | 88 | __GFP_KSWAPD_RECLAIM; |
d099fc8f CK |
89 | |
90 | if (!pool->use_dma_alloc) { | |
91 | p = alloc_pages(gfp_flags, order); | |
92 | if (p) | |
93 | p->private = order; | |
94 | return p; | |
95 | } | |
96 | ||
97 | dma = kmalloc(sizeof(*dma), GFP_KERNEL); | |
98 | if (!dma) | |
99 | return NULL; | |
100 | ||
101 | if (order) | |
102 | attr |= DMA_ATTR_NO_WARN; | |
103 | ||
104 | vaddr = dma_alloc_attrs(pool->dev, (1ULL << order) * PAGE_SIZE, | |
105 | &dma->addr, gfp_flags, attr); | |
106 | if (!vaddr) | |
107 | goto error_free; | |
108 | ||
109 | /* TODO: This is an illegal abuse of the DMA API, but we need to rework | |
110 | * TTM page fault handling and extend the DMA API to clean this up. | |
111 | */ | |
112 | if (is_vmalloc_addr(vaddr)) | |
113 | p = vmalloc_to_page(vaddr); | |
114 | else | |
115 | p = virt_to_page(vaddr); | |
116 | ||
117 | dma->vaddr = (unsigned long)vaddr | order; | |
118 | p->private = (unsigned long)dma; | |
119 | return p; | |
120 | ||
121 | error_free: | |
122 | kfree(dma); | |
123 | return NULL; | |
124 | } | |
125 | ||
126 | /* Reset the caching and pages of size 1 << order */ | |
127 | static void ttm_pool_free_page(struct ttm_pool *pool, enum ttm_caching caching, | |
128 | unsigned int order, struct page *p) | |
129 | { | |
130 | unsigned long attr = DMA_ATTR_FORCE_CONTIGUOUS; | |
131 | struct ttm_pool_dma *dma; | |
132 | void *vaddr; | |
133 | ||
134 | #ifdef CONFIG_X86 | |
135 | /* We don't care that set_pages_wb is inefficient here. This is only | |
136 | * used when we have to shrink and CPU overhead is irrelevant then. | |
137 | */ | |
138 | if (caching != ttm_cached && !PageHighMem(p)) | |
139 | set_pages_wb(p, 1 << order); | |
140 | #endif | |
141 | ||
e3e04399 | 142 | if (!pool || !pool->use_dma_alloc) { |
d099fc8f CK |
143 | __free_pages(p, order); |
144 | return; | |
145 | } | |
146 | ||
147 | if (order) | |
148 | attr |= DMA_ATTR_NO_WARN; | |
149 | ||
150 | dma = (void *)p->private; | |
151 | vaddr = (void *)(dma->vaddr & PAGE_MASK); | |
152 | dma_free_attrs(pool->dev, (1UL << order) * PAGE_SIZE, vaddr, dma->addr, | |
153 | attr); | |
154 | kfree(dma); | |
155 | } | |
156 | ||
157 | /* Apply a new caching to an array of pages */ | |
158 | static int ttm_pool_apply_caching(struct page **first, struct page **last, | |
159 | enum ttm_caching caching) | |
160 | { | |
161 | #ifdef CONFIG_X86 | |
162 | unsigned int num_pages = last - first; | |
163 | ||
164 | if (!num_pages) | |
165 | return 0; | |
166 | ||
167 | switch (caching) { | |
168 | case ttm_cached: | |
169 | break; | |
170 | case ttm_write_combined: | |
171 | return set_pages_array_wc(first, num_pages); | |
172 | case ttm_uncached: | |
173 | return set_pages_array_uc(first, num_pages); | |
174 | } | |
175 | #endif | |
176 | return 0; | |
177 | } | |
178 | ||
179 | /* Map pages of 1 << order size and fill the DMA address array */ | |
180 | static int ttm_pool_map(struct ttm_pool *pool, unsigned int order, | |
181 | struct page *p, dma_addr_t **dma_addr) | |
182 | { | |
183 | dma_addr_t addr; | |
184 | unsigned int i; | |
185 | ||
186 | if (pool->use_dma_alloc) { | |
187 | struct ttm_pool_dma *dma = (void *)p->private; | |
188 | ||
189 | addr = dma->addr; | |
190 | } else { | |
191 | size_t size = (1ULL << order) * PAGE_SIZE; | |
192 | ||
193 | addr = dma_map_page(pool->dev, p, 0, size, DMA_BIDIRECTIONAL); | |
843010a8 | 194 | if (dma_mapping_error(pool->dev, addr)) |
d099fc8f CK |
195 | return -EFAULT; |
196 | } | |
197 | ||
198 | for (i = 1 << order; i ; --i) { | |
199 | *(*dma_addr)++ = addr; | |
200 | addr += PAGE_SIZE; | |
201 | } | |
202 | ||
203 | return 0; | |
204 | } | |
205 | ||
206 | /* Unmap pages of 1 << order size */ | |
207 | static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr, | |
208 | unsigned int num_pages) | |
209 | { | |
210 | /* Unmapped while freeing the page */ | |
211 | if (pool->use_dma_alloc) | |
212 | return; | |
213 | ||
214 | dma_unmap_page(pool->dev, dma_addr, (long)num_pages << PAGE_SHIFT, | |
215 | DMA_BIDIRECTIONAL); | |
216 | } | |
217 | ||
218 | /* Give pages into a specific pool_type */ | |
219 | static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p) | |
220 | { | |
221 | spin_lock(&pt->lock); | |
222 | list_add(&p->lru, &pt->pages); | |
223 | spin_unlock(&pt->lock); | |
224 | atomic_long_add(1 << pt->order, &allocated_pages); | |
225 | } | |
226 | ||
227 | /* Take pages from a specific pool_type, return NULL when nothing available */ | |
228 | static struct page *ttm_pool_type_take(struct ttm_pool_type *pt) | |
229 | { | |
230 | struct page *p; | |
231 | ||
232 | spin_lock(&pt->lock); | |
233 | p = list_first_entry_or_null(&pt->pages, typeof(*p), lru); | |
234 | if (p) { | |
235 | atomic_long_sub(1 << pt->order, &allocated_pages); | |
236 | list_del(&p->lru); | |
237 | } | |
238 | spin_unlock(&pt->lock); | |
239 | ||
240 | return p; | |
241 | } | |
242 | ||
d099fc8f CK |
243 | /* Initialize and add a pool type to the global shrinker list */ |
244 | static void ttm_pool_type_init(struct ttm_pool_type *pt, struct ttm_pool *pool, | |
245 | enum ttm_caching caching, unsigned int order) | |
246 | { | |
247 | pt->pool = pool; | |
248 | pt->caching = caching; | |
249 | pt->order = order; | |
250 | spin_lock_init(&pt->lock); | |
251 | INIT_LIST_HEAD(&pt->pages); | |
252 | ||
bb52cb0d | 253 | mutex_lock(&shrinker_lock); |
d099fc8f | 254 | list_add_tail(&pt->shrinker_list, &shrinker_list); |
bb52cb0d | 255 | mutex_unlock(&shrinker_lock); |
d099fc8f CK |
256 | } |
257 | ||
258 | /* Remove a pool_type from the global shrinker list and free all pages */ | |
259 | static void ttm_pool_type_fini(struct ttm_pool_type *pt) | |
260 | { | |
261 | struct page *p, *tmp; | |
262 | ||
bb52cb0d | 263 | mutex_lock(&shrinker_lock); |
d099fc8f | 264 | list_del(&pt->shrinker_list); |
bb52cb0d | 265 | mutex_unlock(&shrinker_lock); |
d099fc8f CK |
266 | |
267 | list_for_each_entry_safe(p, tmp, &pt->pages, lru) | |
268 | ttm_pool_free_page(pt->pool, pt->caching, pt->order, p); | |
269 | } | |
270 | ||
271 | /* Return the pool_type to use for the given caching and order */ | |
272 | static struct ttm_pool_type *ttm_pool_select_type(struct ttm_pool *pool, | |
273 | enum ttm_caching caching, | |
274 | unsigned int order) | |
275 | { | |
276 | if (pool->use_dma_alloc) | |
277 | return &pool->caching[caching].orders[order]; | |
278 | ||
279 | #ifdef CONFIG_X86 | |
280 | switch (caching) { | |
281 | case ttm_write_combined: | |
3e3e59ef CK |
282 | if (pool->use_dma32) |
283 | return &global_dma32_write_combined[order]; | |
284 | ||
d099fc8f CK |
285 | return &global_write_combined[order]; |
286 | case ttm_uncached: | |
3e3e59ef CK |
287 | if (pool->use_dma32) |
288 | return &global_dma32_uncached[order]; | |
289 | ||
d099fc8f CK |
290 | return &global_uncached[order]; |
291 | default: | |
292 | break; | |
293 | } | |
294 | #endif | |
295 | ||
296 | return NULL; | |
297 | } | |
298 | ||
299 | /* Free pages using the global shrinker list */ | |
300 | static unsigned int ttm_pool_shrink(void) | |
301 | { | |
302 | struct ttm_pool_type *pt; | |
303 | unsigned int num_freed; | |
304 | struct page *p; | |
305 | ||
bb52cb0d | 306 | mutex_lock(&shrinker_lock); |
d099fc8f CK |
307 | pt = list_first_entry(&shrinker_list, typeof(*pt), shrinker_list); |
308 | ||
309 | p = ttm_pool_type_take(pt); | |
310 | if (p) { | |
311 | ttm_pool_free_page(pt->pool, pt->caching, pt->order, p); | |
312 | num_freed = 1 << pt->order; | |
313 | } else { | |
314 | num_freed = 0; | |
315 | } | |
316 | ||
317 | list_move_tail(&pt->shrinker_list, &shrinker_list); | |
bb52cb0d | 318 | mutex_unlock(&shrinker_lock); |
d099fc8f CK |
319 | |
320 | return num_freed; | |
321 | } | |
322 | ||
323 | /* Return the allocation order based for a page */ | |
324 | static unsigned int ttm_pool_page_order(struct ttm_pool *pool, struct page *p) | |
325 | { | |
326 | if (pool->use_dma_alloc) { | |
327 | struct ttm_pool_dma *dma = (void *)p->private; | |
328 | ||
329 | return dma->vaddr & ~PAGE_MASK; | |
330 | } | |
331 | ||
332 | return p->private; | |
333 | } | |
334 | ||
335 | /** | |
336 | * ttm_pool_alloc - Fill a ttm_tt object | |
337 | * | |
338 | * @pool: ttm_pool to use | |
339 | * @tt: ttm_tt object to fill | |
340 | * @ctx: operation context | |
341 | * | |
342 | * Fill the ttm_tt object with pages and also make sure to DMA map them when | |
343 | * necessary. | |
344 | * | |
345 | * Returns: 0 on successe, negative error code otherwise. | |
346 | */ | |
347 | int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, | |
348 | struct ttm_operation_ctx *ctx) | |
349 | { | |
350 | unsigned long num_pages = tt->num_pages; | |
351 | dma_addr_t *dma_addr = tt->dma_address; | |
352 | struct page **caching = tt->pages; | |
353 | struct page **pages = tt->pages; | |
354 | gfp_t gfp_flags = GFP_USER; | |
355 | unsigned int i, order; | |
356 | struct page *p; | |
357 | int r; | |
358 | ||
359 | WARN_ON(!num_pages || ttm_tt_is_populated(tt)); | |
360 | WARN_ON(dma_addr && !pool->dev); | |
361 | ||
362 | if (tt->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) | |
363 | gfp_flags |= __GFP_ZERO; | |
364 | ||
586052b0 | 365 | if (ctx->gfp_retry_mayfail) |
d099fc8f CK |
366 | gfp_flags |= __GFP_RETRY_MAYFAIL; |
367 | ||
368 | if (pool->use_dma32) | |
369 | gfp_flags |= GFP_DMA32; | |
370 | else | |
371 | gfp_flags |= GFP_HIGHUSER; | |
372 | ||
373 | for (order = min(MAX_ORDER - 1UL, __fls(num_pages)); num_pages; | |
374 | order = min_t(unsigned int, order, __fls(num_pages))) { | |
375 | bool apply_caching = false; | |
376 | struct ttm_pool_type *pt; | |
377 | ||
378 | pt = ttm_pool_select_type(pool, tt->caching, order); | |
379 | p = pt ? ttm_pool_type_take(pt) : NULL; | |
380 | if (p) { | |
381 | apply_caching = true; | |
382 | } else { | |
383 | p = ttm_pool_alloc_page(pool, gfp_flags, order); | |
384 | if (p && PageHighMem(p)) | |
385 | apply_caching = true; | |
386 | } | |
387 | ||
388 | if (!p) { | |
389 | if (order) { | |
390 | --order; | |
391 | continue; | |
392 | } | |
393 | r = -ENOMEM; | |
394 | goto error_free_all; | |
395 | } | |
396 | ||
397 | if (apply_caching) { | |
398 | r = ttm_pool_apply_caching(caching, pages, | |
399 | tt->caching); | |
400 | if (r) | |
401 | goto error_free_page; | |
402 | caching = pages + (1 << order); | |
403 | } | |
404 | ||
405 | r = ttm_mem_global_alloc_page(&ttm_mem_glob, p, | |
406 | (1 << order) * PAGE_SIZE, | |
407 | ctx); | |
408 | if (r) | |
409 | goto error_free_page; | |
410 | ||
411 | if (dma_addr) { | |
412 | r = ttm_pool_map(pool, order, p, &dma_addr); | |
413 | if (r) | |
414 | goto error_global_free; | |
415 | } | |
416 | ||
417 | num_pages -= 1 << order; | |
418 | for (i = 1 << order; i; --i) | |
419 | *(pages++) = p++; | |
420 | } | |
421 | ||
422 | r = ttm_pool_apply_caching(caching, pages, tt->caching); | |
423 | if (r) | |
424 | goto error_free_all; | |
425 | ||
426 | return 0; | |
427 | ||
428 | error_global_free: | |
429 | ttm_mem_global_free_page(&ttm_mem_glob, p, (1 << order) * PAGE_SIZE); | |
430 | ||
431 | error_free_page: | |
432 | ttm_pool_free_page(pool, tt->caching, order, p); | |
433 | ||
434 | error_free_all: | |
435 | num_pages = tt->num_pages - num_pages; | |
436 | for (i = 0; i < num_pages; ) { | |
437 | order = ttm_pool_page_order(pool, tt->pages[i]); | |
438 | ttm_pool_free_page(pool, tt->caching, order, tt->pages[i]); | |
439 | i += 1 << order; | |
440 | } | |
441 | ||
442 | return r; | |
443 | } | |
444 | EXPORT_SYMBOL(ttm_pool_alloc); | |
445 | ||
446 | /** | |
447 | * ttm_pool_free - Free the backing pages from a ttm_tt object | |
448 | * | |
449 | * @pool: Pool to give pages back to. | |
450 | * @tt: ttm_tt object to unpopulate | |
451 | * | |
452 | * Give the packing pages back to a pool or free them | |
453 | */ | |
454 | void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt) | |
455 | { | |
456 | unsigned int i; | |
457 | ||
458 | for (i = 0; i < tt->num_pages; ) { | |
459 | struct page *p = tt->pages[i]; | |
460 | unsigned int order, num_pages; | |
461 | struct ttm_pool_type *pt; | |
462 | ||
463 | order = ttm_pool_page_order(pool, p); | |
464 | num_pages = 1ULL << order; | |
465 | ttm_mem_global_free_page(&ttm_mem_glob, p, | |
466 | num_pages * PAGE_SIZE); | |
467 | if (tt->dma_address) | |
468 | ttm_pool_unmap(pool, tt->dma_address[i], num_pages); | |
469 | ||
470 | pt = ttm_pool_select_type(pool, tt->caching, order); | |
471 | if (pt) | |
472 | ttm_pool_type_give(pt, tt->pages[i]); | |
473 | else | |
474 | ttm_pool_free_page(pool, tt->caching, order, | |
475 | tt->pages[i]); | |
476 | ||
477 | i += num_pages; | |
478 | } | |
479 | ||
480 | while (atomic_long_read(&allocated_pages) > page_pool_size) | |
481 | ttm_pool_shrink(); | |
482 | } | |
483 | EXPORT_SYMBOL(ttm_pool_free); | |
484 | ||
485 | /** | |
486 | * ttm_pool_init - Initialize a pool | |
487 | * | |
488 | * @pool: the pool to initialize | |
489 | * @dev: device for DMA allocations and mappings | |
490 | * @use_dma_alloc: true if coherent DMA alloc should be used | |
491 | * @use_dma32: true if GFP_DMA32 should be used | |
492 | * | |
493 | * Initialize the pool and its pool types. | |
494 | */ | |
495 | void ttm_pool_init(struct ttm_pool *pool, struct device *dev, | |
496 | bool use_dma_alloc, bool use_dma32) | |
497 | { | |
498 | unsigned int i, j; | |
499 | ||
500 | WARN_ON(!dev && use_dma_alloc); | |
501 | ||
502 | pool->dev = dev; | |
503 | pool->use_dma_alloc = use_dma_alloc; | |
504 | pool->use_dma32 = use_dma32; | |
505 | ||
506 | for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) | |
507 | for (j = 0; j < MAX_ORDER; ++j) | |
508 | ttm_pool_type_init(&pool->caching[i].orders[j], | |
509 | pool, i, j); | |
510 | } | |
d099fc8f CK |
511 | |
512 | /** | |
513 | * ttm_pool_fini - Cleanup a pool | |
514 | * | |
515 | * @pool: the pool to clean up | |
516 | * | |
517 | * Free all pages in the pool and unregister the types from the global | |
518 | * shrinker. | |
519 | */ | |
520 | void ttm_pool_fini(struct ttm_pool *pool) | |
521 | { | |
522 | unsigned int i, j; | |
523 | ||
524 | for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) | |
525 | for (j = 0; j < MAX_ORDER; ++j) | |
526 | ttm_pool_type_fini(&pool->caching[i].orders[j]); | |
527 | } | |
d099fc8f CK |
528 | |
529 | #ifdef CONFIG_DEBUG_FS | |
846f151d AB |
530 | /* Count the number of pages available in a pool_type */ |
531 | static unsigned int ttm_pool_type_count(struct ttm_pool_type *pt) | |
532 | { | |
533 | unsigned int count = 0; | |
534 | struct page *p; | |
535 | ||
536 | spin_lock(&pt->lock); | |
537 | /* Only used for debugfs, the overhead doesn't matter */ | |
538 | list_for_each_entry(p, &pt->pages, lru) | |
539 | ++count; | |
540 | spin_unlock(&pt->lock); | |
541 | ||
542 | return count; | |
543 | } | |
d099fc8f CK |
544 | |
545 | /* Dump information about the different pool types */ | |
546 | static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt, | |
547 | struct seq_file *m) | |
548 | { | |
549 | unsigned int i; | |
550 | ||
551 | for (i = 0; i < MAX_ORDER; ++i) | |
552 | seq_printf(m, " %8u", ttm_pool_type_count(&pt[i])); | |
553 | seq_puts(m, "\n"); | |
554 | } | |
555 | ||
556 | /** | |
557 | * ttm_pool_debugfs - Debugfs dump function for a pool | |
558 | * | |
559 | * @pool: the pool to dump the information for | |
560 | * @m: seq_file to dump to | |
561 | * | |
562 | * Make a debugfs dump with the per pool and global information. | |
563 | */ | |
564 | int ttm_pool_debugfs(struct ttm_pool *pool, struct seq_file *m) | |
565 | { | |
566 | unsigned int i; | |
567 | ||
bb52cb0d | 568 | mutex_lock(&shrinker_lock); |
d099fc8f CK |
569 | |
570 | seq_puts(m, "\t "); | |
571 | for (i = 0; i < MAX_ORDER; ++i) | |
572 | seq_printf(m, " ---%2u---", i); | |
573 | seq_puts(m, "\n"); | |
574 | ||
575 | seq_puts(m, "wc\t:"); | |
576 | ttm_pool_debugfs_orders(global_write_combined, m); | |
577 | seq_puts(m, "uc\t:"); | |
578 | ttm_pool_debugfs_orders(global_uncached, m); | |
579 | ||
3e3e59ef CK |
580 | seq_puts(m, "wc 32\t:"); |
581 | ttm_pool_debugfs_orders(global_dma32_write_combined, m); | |
582 | seq_puts(m, "uc 32\t:"); | |
583 | ttm_pool_debugfs_orders(global_dma32_uncached, m); | |
584 | ||
d099fc8f CK |
585 | for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) { |
586 | seq_puts(m, "DMA "); | |
587 | switch (i) { | |
588 | case ttm_cached: | |
589 | seq_puts(m, "\t:"); | |
590 | break; | |
591 | case ttm_write_combined: | |
592 | seq_puts(m, "wc\t:"); | |
593 | break; | |
594 | case ttm_uncached: | |
595 | seq_puts(m, "uc\t:"); | |
596 | break; | |
597 | } | |
598 | ttm_pool_debugfs_orders(pool->caching[i].orders, m); | |
599 | } | |
600 | ||
601 | seq_printf(m, "\ntotal\t: %8lu of %8lu\n", | |
602 | atomic_long_read(&allocated_pages), page_pool_size); | |
603 | ||
bb52cb0d | 604 | mutex_unlock(&shrinker_lock); |
d099fc8f CK |
605 | |
606 | return 0; | |
607 | } | |
608 | EXPORT_SYMBOL(ttm_pool_debugfs); | |
609 | ||
610 | #endif | |
611 | ||
612 | /* As long as pages are available make sure to release at least one */ | |
613 | static unsigned long ttm_pool_shrinker_scan(struct shrinker *shrink, | |
614 | struct shrink_control *sc) | |
615 | { | |
616 | unsigned long num_freed = 0; | |
617 | ||
618 | do | |
619 | num_freed += ttm_pool_shrink(); | |
620 | while (!num_freed && atomic_long_read(&allocated_pages)); | |
621 | ||
622 | return num_freed; | |
623 | } | |
624 | ||
625 | /* Return the number of pages available or SHRINK_EMPTY if we have none */ | |
626 | static unsigned long ttm_pool_shrinker_count(struct shrinker *shrink, | |
627 | struct shrink_control *sc) | |
628 | { | |
629 | unsigned long num_pages = atomic_long_read(&allocated_pages); | |
630 | ||
631 | return num_pages ? num_pages : SHRINK_EMPTY; | |
632 | } | |
633 | ||
634 | /** | |
635 | * ttm_pool_mgr_init - Initialize globals | |
636 | * | |
637 | * @num_pages: default number of pages | |
638 | * | |
639 | * Initialize the global locks and lists for the MM shrinker. | |
640 | */ | |
641 | int ttm_pool_mgr_init(unsigned long num_pages) | |
642 | { | |
643 | unsigned int i; | |
644 | ||
645 | if (!page_pool_size) | |
646 | page_pool_size = num_pages; | |
647 | ||
bb52cb0d | 648 | mutex_init(&shrinker_lock); |
d099fc8f CK |
649 | INIT_LIST_HEAD(&shrinker_list); |
650 | ||
651 | for (i = 0; i < MAX_ORDER; ++i) { | |
652 | ttm_pool_type_init(&global_write_combined[i], NULL, | |
653 | ttm_write_combined, i); | |
654 | ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i); | |
3e3e59ef CK |
655 | |
656 | ttm_pool_type_init(&global_dma32_write_combined[i], NULL, | |
657 | ttm_write_combined, i); | |
658 | ttm_pool_type_init(&global_dma32_uncached[i], NULL, | |
659 | ttm_uncached, i); | |
d099fc8f CK |
660 | } |
661 | ||
662 | mm_shrinker.count_objects = ttm_pool_shrinker_count; | |
663 | mm_shrinker.scan_objects = ttm_pool_shrinker_scan; | |
664 | mm_shrinker.seeks = 1; | |
665 | return register_shrinker(&mm_shrinker); | |
666 | } | |
667 | ||
668 | /** | |
669 | * ttm_pool_mgr_fini - Finalize globals | |
670 | * | |
671 | * Cleanup the global pools and unregister the MM shrinker. | |
672 | */ | |
673 | void ttm_pool_mgr_fini(void) | |
674 | { | |
675 | unsigned int i; | |
676 | ||
677 | for (i = 0; i < MAX_ORDER; ++i) { | |
678 | ttm_pool_type_fini(&global_write_combined[i]); | |
679 | ttm_pool_type_fini(&global_uncached[i]); | |
3e3e59ef CK |
680 | |
681 | ttm_pool_type_fini(&global_dma32_write_combined[i]); | |
682 | ttm_pool_type_fini(&global_dma32_uncached[i]); | |
d099fc8f CK |
683 | } |
684 | ||
685 | unregister_shrinker(&mm_shrinker); | |
686 | WARN_ON(!list_empty(&shrinker_list)); | |
687 | } |