]>
Commit | Line | Data |
---|---|---|
1403b1a3 PN |
1 | /* |
2 | * Copyright (c) Red Hat Inc. | |
3 | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sub license, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the | |
12 | * next paragraph) shall be included in all copies or substantial portions | |
13 | * of the Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
21 | * DEALINGS IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: Dave Airlie <airlied@redhat.com> | |
24 | * Jerome Glisse <jglisse@redhat.com> | |
25 | * Pauli Nieminen <suokkos@gmail.com> | |
26 | */ | |
27 | ||
28 | /* simple list based uncached page pool | |
29 | * - Pool collects resently freed pages for reuse | |
30 | * - Use page->lru to keep a free list | |
31 | * - doesn't track currently in use pages | |
32 | */ | |
25d0479a JP |
33 | |
34 | #define pr_fmt(fmt) "[TTM] " fmt | |
35 | ||
1403b1a3 PN |
36 | #include <linux/list.h> |
37 | #include <linux/spinlock.h> | |
38 | #include <linux/highmem.h> | |
39 | #include <linux/mm_types.h> | |
07458661 | 40 | #include <linux/module.h> |
1403b1a3 | 41 | #include <linux/mm.h> |
4cdc840a | 42 | #include <linux/seq_file.h> /* for seq_printf */ |
2125b8a4 | 43 | #include <linux/slab.h> |
f9820a46 | 44 | #include <linux/dma-mapping.h> |
1403b1a3 | 45 | |
60063497 | 46 | #include <linux/atomic.h> |
1403b1a3 | 47 | |
760285e7 DH |
48 | #include <drm/ttm/ttm_bo_driver.h> |
49 | #include <drm/ttm/ttm_page_alloc.h> | |
1403b1a3 | 50 | |
e6bf6e57 | 51 | #if IS_ENABLED(CONFIG_AGP) |
d6678651 TL |
52 | #include <asm/agp.h> |
53 | #endif | |
ed3ba079 LA |
54 | #ifdef CONFIG_X86 |
55 | #include <asm/set_memory.h> | |
56 | #endif | |
1403b1a3 PN |
57 | |
58 | #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) | |
59 | #define SMALL_ALLOCATION 16 | |
60 | #define FREE_ALL_PAGES (~0U) | |
61 | /* times are in msecs */ | |
62 | #define PAGE_FREE_INTERVAL 1000 | |
63 | ||
64 | /** | |
65 | * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages. | |
66 | * | |
67 | * @lock: Protects the shared pool from concurrnet access. Must be used with | |
68 | * irqsave/irqrestore variants because pool allocator maybe called from | |
69 | * delayed work. | |
70 | * @fill_lock: Prevent concurrent calls to fill. | |
71 | * @list: Pool of free uc/wc pages for fast reuse. | |
72 | * @gfp_flags: Flags to pass for alloc_page. | |
73 | * @npages: Number of pages in pool. | |
74 | */ | |
75 | struct ttm_page_pool { | |
76 | spinlock_t lock; | |
77 | bool fill_lock; | |
78 | struct list_head list; | |
0e57a3cc | 79 | gfp_t gfp_flags; |
1403b1a3 | 80 | unsigned npages; |
07458661 PN |
81 | char *name; |
82 | unsigned long nfrees; | |
83 | unsigned long nrefills; | |
1403b1a3 PN |
84 | }; |
85 | ||
c96af79e PN |
86 | /** |
87 | * Limits for the pool. They are handled without locks because only place where | |
88 | * they may change is in sysfs store. They won't have immediate effect anyway | |
4abe4389 | 89 | * so forcing serialization to access them is pointless. |
c96af79e PN |
90 | */ |
91 | ||
1403b1a3 PN |
92 | struct ttm_pool_opts { |
93 | unsigned alloc_size; | |
94 | unsigned max_size; | |
95 | unsigned small; | |
96 | }; | |
97 | ||
6ed4e2e6 | 98 | #define NUM_POOLS 6 |
1403b1a3 PN |
99 | |
100 | /** | |
101 | * struct ttm_pool_manager - Holds memory pools for fst allocation | |
102 | * | |
103 | * Manager is read only object for pool code so it doesn't need locking. | |
104 | * | |
105 | * @free_interval: minimum number of jiffies between freeing pages from pool. | |
106 | * @page_alloc_inited: reference counting for pool allocation. | |
107 | * @work: Work that is used to shrink the pool. Work is only run when there is | |
108 | * some pages to free. | |
109 | * @small_allocation: Limit in number of pages what is small allocation. | |
110 | * | |
111 | * @pools: All pool objects in use. | |
112 | **/ | |
113 | struct ttm_pool_manager { | |
c96af79e | 114 | struct kobject kobj; |
1403b1a3 | 115 | struct shrinker mm_shrink; |
1403b1a3 PN |
116 | struct ttm_pool_opts options; |
117 | ||
118 | union { | |
119 | struct ttm_page_pool pools[NUM_POOLS]; | |
120 | struct { | |
121 | struct ttm_page_pool wc_pool; | |
122 | struct ttm_page_pool uc_pool; | |
123 | struct ttm_page_pool wc_pool_dma32; | |
124 | struct ttm_page_pool uc_pool_dma32; | |
6ed4e2e6 CK |
125 | struct ttm_page_pool wc_pool_huge; |
126 | struct ttm_page_pool uc_pool_huge; | |
1403b1a3 PN |
127 | } ; |
128 | }; | |
129 | }; | |
130 | ||
c96af79e PN |
131 | static struct attribute ttm_page_pool_max = { |
132 | .name = "pool_max_size", | |
133 | .mode = S_IRUGO | S_IWUSR | |
134 | }; | |
135 | static struct attribute ttm_page_pool_small = { | |
136 | .name = "pool_small_allocation", | |
137 | .mode = S_IRUGO | S_IWUSR | |
138 | }; | |
139 | static struct attribute ttm_page_pool_alloc_size = { | |
140 | .name = "pool_allocation_size", | |
141 | .mode = S_IRUGO | S_IWUSR | |
142 | }; | |
143 | ||
144 | static struct attribute *ttm_pool_attrs[] = { | |
145 | &ttm_page_pool_max, | |
146 | &ttm_page_pool_small, | |
147 | &ttm_page_pool_alloc_size, | |
148 | NULL | |
149 | }; | |
150 | ||
151 | static void ttm_pool_kobj_release(struct kobject *kobj) | |
152 | { | |
153 | struct ttm_pool_manager *m = | |
154 | container_of(kobj, struct ttm_pool_manager, kobj); | |
5870a4d9 | 155 | kfree(m); |
c96af79e PN |
156 | } |
157 | ||
158 | static ssize_t ttm_pool_store(struct kobject *kobj, | |
159 | struct attribute *attr, const char *buffer, size_t size) | |
160 | { | |
161 | struct ttm_pool_manager *m = | |
162 | container_of(kobj, struct ttm_pool_manager, kobj); | |
163 | int chars; | |
164 | unsigned val; | |
165 | chars = sscanf(buffer, "%u", &val); | |
166 | if (chars == 0) | |
167 | return size; | |
168 | ||
169 | /* Convert kb to number of pages */ | |
170 | val = val / (PAGE_SIZE >> 10); | |
171 | ||
172 | if (attr == &ttm_page_pool_max) | |
173 | m->options.max_size = val; | |
174 | else if (attr == &ttm_page_pool_small) | |
175 | m->options.small = val; | |
176 | else if (attr == &ttm_page_pool_alloc_size) { | |
177 | if (val > NUM_PAGES_TO_ALLOC*8) { | |
25d0479a | 178 | pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n", |
4abe4389 TH |
179 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), |
180 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); | |
c96af79e PN |
181 | return size; |
182 | } else if (val > NUM_PAGES_TO_ALLOC) { | |
25d0479a JP |
183 | pr_warn("Setting allocation size to larger than %lu is not recommended\n", |
184 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); | |
c96af79e PN |
185 | } |
186 | m->options.alloc_size = val; | |
187 | } | |
188 | ||
189 | return size; | |
190 | } | |
191 | ||
192 | static ssize_t ttm_pool_show(struct kobject *kobj, | |
193 | struct attribute *attr, char *buffer) | |
194 | { | |
195 | struct ttm_pool_manager *m = | |
196 | container_of(kobj, struct ttm_pool_manager, kobj); | |
197 | unsigned val = 0; | |
198 | ||
199 | if (attr == &ttm_page_pool_max) | |
200 | val = m->options.max_size; | |
201 | else if (attr == &ttm_page_pool_small) | |
202 | val = m->options.small; | |
203 | else if (attr == &ttm_page_pool_alloc_size) | |
204 | val = m->options.alloc_size; | |
205 | ||
206 | val = val * (PAGE_SIZE >> 10); | |
207 | ||
208 | return snprintf(buffer, PAGE_SIZE, "%u\n", val); | |
209 | } | |
210 | ||
211 | static const struct sysfs_ops ttm_pool_sysfs_ops = { | |
212 | .show = &ttm_pool_show, | |
213 | .store = &ttm_pool_store, | |
214 | }; | |
215 | ||
216 | static struct kobj_type ttm_pool_kobj_type = { | |
217 | .release = &ttm_pool_kobj_release, | |
218 | .sysfs_ops = &ttm_pool_sysfs_ops, | |
219 | .default_attrs = ttm_pool_attrs, | |
220 | }; | |
221 | ||
5870a4d9 | 222 | static struct ttm_pool_manager *_manager; |
1403b1a3 | 223 | |
975efdb1 | 224 | #ifndef CONFIG_X86 |
1403b1a3 PN |
225 | static int set_pages_array_wb(struct page **pages, int addrinarray) |
226 | { | |
e6bf6e57 | 227 | #if IS_ENABLED(CONFIG_AGP) |
1403b1a3 PN |
228 | int i; |
229 | ||
230 | for (i = 0; i < addrinarray; i++) | |
231 | unmap_page_from_agp(pages[i]); | |
232 | #endif | |
233 | return 0; | |
234 | } | |
235 | ||
236 | static int set_pages_array_wc(struct page **pages, int addrinarray) | |
237 | { | |
e6bf6e57 | 238 | #if IS_ENABLED(CONFIG_AGP) |
1403b1a3 PN |
239 | int i; |
240 | ||
241 | for (i = 0; i < addrinarray; i++) | |
242 | map_page_into_agp(pages[i]); | |
243 | #endif | |
244 | return 0; | |
245 | } | |
246 | ||
247 | static int set_pages_array_uc(struct page **pages, int addrinarray) | |
248 | { | |
e6bf6e57 | 249 | #if IS_ENABLED(CONFIG_AGP) |
1403b1a3 PN |
250 | int i; |
251 | ||
252 | for (i = 0; i < addrinarray; i++) | |
253 | map_page_into_agp(pages[i]); | |
254 | #endif | |
255 | return 0; | |
256 | } | |
257 | #endif | |
258 | ||
259 | /** | |
260 | * Select the right pool or requested caching state and ttm flags. */ | |
6ed4e2e6 CK |
261 | static struct ttm_page_pool *ttm_get_pool(int flags, bool huge, |
262 | enum ttm_caching_state cstate) | |
1403b1a3 PN |
263 | { |
264 | int pool_index; | |
265 | ||
266 | if (cstate == tt_cached) | |
267 | return NULL; | |
268 | ||
269 | if (cstate == tt_wc) | |
270 | pool_index = 0x0; | |
271 | else | |
272 | pool_index = 0x1; | |
273 | ||
6ed4e2e6 CK |
274 | if (flags & TTM_PAGE_FLAG_DMA32) { |
275 | if (huge) | |
276 | return NULL; | |
1403b1a3 PN |
277 | pool_index |= 0x2; |
278 | ||
6ed4e2e6 CK |
279 | } else if (huge) { |
280 | pool_index |= 0x4; | |
281 | } | |
282 | ||
5870a4d9 | 283 | return &_manager->pools[pool_index]; |
1403b1a3 PN |
284 | } |
285 | ||
286 | /* set memory back to wb and free the pages. */ | |
287 | static void ttm_pages_put(struct page *pages[], unsigned npages) | |
288 | { | |
289 | unsigned i; | |
290 | if (set_pages_array_wb(pages, npages)) | |
25d0479a | 291 | pr_err("Failed to set %d pages to wb!\n", npages); |
1403b1a3 PN |
292 | for (i = 0; i < npages; ++i) |
293 | __free_page(pages[i]); | |
294 | } | |
295 | ||
296 | static void ttm_pool_update_free_locked(struct ttm_page_pool *pool, | |
297 | unsigned freed_pages) | |
298 | { | |
299 | pool->npages -= freed_pages; | |
07458661 | 300 | pool->nfrees += freed_pages; |
1403b1a3 PN |
301 | } |
302 | ||
303 | /** | |
304 | * Free pages from pool. | |
305 | * | |
306 | * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC | |
307 | * number of pages in one go. | |
308 | * | |
309 | * @pool: to free the pages from | |
310 | * @free_all: If set to true will free all pages in pool | |
881fdaa5 | 311 | * @use_static: Safe to use static buffer |
1403b1a3 | 312 | **/ |
a91576d7 | 313 | static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free, |
881fdaa5 | 314 | bool use_static) |
1403b1a3 | 315 | { |
881fdaa5 | 316 | static struct page *static_buf[NUM_PAGES_TO_ALLOC]; |
1403b1a3 PN |
317 | unsigned long irq_flags; |
318 | struct page *p; | |
319 | struct page **pages_to_free; | |
320 | unsigned freed_pages = 0, | |
321 | npages_to_free = nr_free; | |
322 | ||
323 | if (NUM_PAGES_TO_ALLOC < nr_free) | |
324 | npages_to_free = NUM_PAGES_TO_ALLOC; | |
325 | ||
881fdaa5 TH |
326 | if (use_static) |
327 | pages_to_free = static_buf; | |
328 | else | |
329 | pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), | |
330 | GFP_KERNEL); | |
1403b1a3 | 331 | if (!pages_to_free) { |
25d0479a | 332 | pr_err("Failed to allocate memory for pool free operation\n"); |
1403b1a3 PN |
333 | return 0; |
334 | } | |
335 | ||
336 | restart: | |
337 | spin_lock_irqsave(&pool->lock, irq_flags); | |
338 | ||
339 | list_for_each_entry_reverse(p, &pool->list, lru) { | |
340 | if (freed_pages >= npages_to_free) | |
341 | break; | |
342 | ||
343 | pages_to_free[freed_pages++] = p; | |
344 | /* We can only remove NUM_PAGES_TO_ALLOC at a time. */ | |
345 | if (freed_pages >= NUM_PAGES_TO_ALLOC) { | |
346 | /* remove range of pages from the pool */ | |
347 | __list_del(p->lru.prev, &pool->list); | |
348 | ||
349 | ttm_pool_update_free_locked(pool, freed_pages); | |
350 | /** | |
351 | * Because changing page caching is costly | |
352 | * we unlock the pool to prevent stalling. | |
353 | */ | |
354 | spin_unlock_irqrestore(&pool->lock, irq_flags); | |
355 | ||
356 | ttm_pages_put(pages_to_free, freed_pages); | |
357 | if (likely(nr_free != FREE_ALL_PAGES)) | |
358 | nr_free -= freed_pages; | |
359 | ||
360 | if (NUM_PAGES_TO_ALLOC >= nr_free) | |
361 | npages_to_free = nr_free; | |
362 | else | |
363 | npages_to_free = NUM_PAGES_TO_ALLOC; | |
364 | ||
365 | freed_pages = 0; | |
366 | ||
367 | /* free all so restart the processing */ | |
368 | if (nr_free) | |
369 | goto restart; | |
370 | ||
0d74f86f | 371 | /* Not allowed to fall through or break because |
1403b1a3 PN |
372 | * following context is inside spinlock while we are |
373 | * outside here. | |
374 | */ | |
375 | goto out; | |
376 | ||
377 | } | |
378 | } | |
379 | ||
1403b1a3 PN |
380 | /* remove range of pages from the pool */ |
381 | if (freed_pages) { | |
382 | __list_del(&p->lru, &pool->list); | |
383 | ||
384 | ttm_pool_update_free_locked(pool, freed_pages); | |
385 | nr_free -= freed_pages; | |
386 | } | |
387 | ||
388 | spin_unlock_irqrestore(&pool->lock, irq_flags); | |
389 | ||
390 | if (freed_pages) | |
391 | ttm_pages_put(pages_to_free, freed_pages); | |
392 | out: | |
881fdaa5 TH |
393 | if (pages_to_free != static_buf) |
394 | kfree(pages_to_free); | |
1403b1a3 PN |
395 | return nr_free; |
396 | } | |
397 | ||
1403b1a3 | 398 | /** |
4abe4389 | 399 | * Callback for mm to request pool to reduce number of page held. |
7dc19d5a DC |
400 | * |
401 | * XXX: (dchinner) Deadlock warning! | |
402 | * | |
7dc19d5a | 403 | * This code is crying out for a shrinker per pool.... |
1403b1a3 | 404 | */ |
7dc19d5a DC |
405 | static unsigned long |
406 | ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) | |
1403b1a3 | 407 | { |
71336e01 TH |
408 | static DEFINE_MUTEX(lock); |
409 | static unsigned start_pool; | |
1403b1a3 | 410 | unsigned i; |
71336e01 | 411 | unsigned pool_offset; |
1403b1a3 | 412 | struct ttm_page_pool *pool; |
1495f230 | 413 | int shrink_pages = sc->nr_to_scan; |
7dc19d5a | 414 | unsigned long freed = 0; |
1403b1a3 | 415 | |
71336e01 TH |
416 | if (!mutex_trylock(&lock)) |
417 | return SHRINK_STOP; | |
418 | pool_offset = ++start_pool % NUM_POOLS; | |
1403b1a3 PN |
419 | /* select start pool in round robin fashion */ |
420 | for (i = 0; i < NUM_POOLS; ++i) { | |
421 | unsigned nr_free = shrink_pages; | |
422 | if (shrink_pages == 0) | |
423 | break; | |
5870a4d9 | 424 | pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; |
881fdaa5 TH |
425 | /* OK to use static buffer since global mutex is held. */ |
426 | shrink_pages = ttm_page_pool_free(pool, nr_free, true); | |
7dc19d5a | 427 | freed += nr_free - shrink_pages; |
1403b1a3 | 428 | } |
71336e01 | 429 | mutex_unlock(&lock); |
7dc19d5a DC |
430 | return freed; |
431 | } | |
432 | ||
433 | ||
434 | static unsigned long | |
435 | ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc) | |
436 | { | |
437 | unsigned i; | |
438 | unsigned long count = 0; | |
439 | ||
440 | for (i = 0; i < NUM_POOLS; ++i) | |
441 | count += _manager->pools[i].npages; | |
442 | ||
443 | return count; | |
1403b1a3 PN |
444 | } |
445 | ||
446 | static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager) | |
447 | { | |
7dc19d5a DC |
448 | manager->mm_shrink.count_objects = ttm_pool_shrink_count; |
449 | manager->mm_shrink.scan_objects = ttm_pool_shrink_scan; | |
1403b1a3 PN |
450 | manager->mm_shrink.seeks = 1; |
451 | register_shrinker(&manager->mm_shrink); | |
452 | } | |
453 | ||
454 | static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager) | |
455 | { | |
456 | unregister_shrinker(&manager->mm_shrink); | |
457 | } | |
458 | ||
459 | static int ttm_set_pages_caching(struct page **pages, | |
460 | enum ttm_caching_state cstate, unsigned cpages) | |
461 | { | |
462 | int r = 0; | |
463 | /* Set page caching */ | |
464 | switch (cstate) { | |
465 | case tt_uncached: | |
466 | r = set_pages_array_uc(pages, cpages); | |
467 | if (r) | |
25d0479a | 468 | pr_err("Failed to set %d pages to uc!\n", cpages); |
1403b1a3 PN |
469 | break; |
470 | case tt_wc: | |
471 | r = set_pages_array_wc(pages, cpages); | |
472 | if (r) | |
25d0479a | 473 | pr_err("Failed to set %d pages to wc!\n", cpages); |
1403b1a3 PN |
474 | break; |
475 | default: | |
476 | break; | |
477 | } | |
478 | return r; | |
479 | } | |
480 | ||
481 | /** | |
482 | * Free pages the pages that failed to change the caching state. If there is | |
483 | * any pages that have changed their caching state already put them to the | |
484 | * pool. | |
485 | */ | |
486 | static void ttm_handle_caching_state_failure(struct list_head *pages, | |
487 | int ttm_flags, enum ttm_caching_state cstate, | |
488 | struct page **failed_pages, unsigned cpages) | |
489 | { | |
490 | unsigned i; | |
4abe4389 | 491 | /* Failed pages have to be freed */ |
1403b1a3 PN |
492 | for (i = 0; i < cpages; ++i) { |
493 | list_del(&failed_pages[i]->lru); | |
494 | __free_page(failed_pages[i]); | |
495 | } | |
496 | } | |
497 | ||
498 | /** | |
499 | * Allocate new pages with correct caching. | |
500 | * | |
501 | * This function is reentrant if caller updates count depending on number of | |
502 | * pages returned in pages array. | |
503 | */ | |
0e57a3cc | 504 | static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, |
6ed4e2e6 CK |
505 | int ttm_flags, enum ttm_caching_state cstate, |
506 | unsigned count, unsigned order) | |
1403b1a3 PN |
507 | { |
508 | struct page **caching_array; | |
509 | struct page *p; | |
510 | int r = 0; | |
6ed4e2e6 CK |
511 | unsigned i, j, cpages; |
512 | unsigned npages = 1 << order; | |
1403b1a3 PN |
513 | unsigned max_cpages = min(count, |
514 | (unsigned)(PAGE_SIZE/sizeof(struct page *))); | |
515 | ||
516 | /* allocate array for page caching change */ | |
517 | caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); | |
518 | ||
519 | if (!caching_array) { | |
25d0479a | 520 | pr_err("Unable to allocate table for new pages\n"); |
1403b1a3 PN |
521 | return -ENOMEM; |
522 | } | |
523 | ||
524 | for (i = 0, cpages = 0; i < count; ++i) { | |
6ed4e2e6 | 525 | p = alloc_pages(gfp_flags, order); |
1403b1a3 PN |
526 | |
527 | if (!p) { | |
25d0479a | 528 | pr_err("Unable to get page %u\n", i); |
1403b1a3 PN |
529 | |
530 | /* store already allocated pages in the pool after | |
531 | * setting the caching state */ | |
532 | if (cpages) { | |
4abe4389 TH |
533 | r = ttm_set_pages_caching(caching_array, |
534 | cstate, cpages); | |
1403b1a3 PN |
535 | if (r) |
536 | ttm_handle_caching_state_failure(pages, | |
537 | ttm_flags, cstate, | |
538 | caching_array, cpages); | |
539 | } | |
540 | r = -ENOMEM; | |
541 | goto out; | |
542 | } | |
543 | ||
6ed4e2e6 CK |
544 | list_add(&p->lru, pages); |
545 | ||
1403b1a3 PN |
546 | #ifdef CONFIG_HIGHMEM |
547 | /* gfp flags of highmem page should never be dma32 so we | |
548 | * we should be fine in such case | |
549 | */ | |
6ed4e2e6 CK |
550 | if (PageHighMem(p)) |
551 | continue; | |
552 | ||
1403b1a3 | 553 | #endif |
6ed4e2e6 CK |
554 | for (j = 0; j < npages; ++j) { |
555 | caching_array[cpages++] = p++; | |
1403b1a3 PN |
556 | if (cpages == max_cpages) { |
557 | ||
558 | r = ttm_set_pages_caching(caching_array, | |
559 | cstate, cpages); | |
560 | if (r) { | |
561 | ttm_handle_caching_state_failure(pages, | |
562 | ttm_flags, cstate, | |
563 | caching_array, cpages); | |
564 | goto out; | |
565 | } | |
566 | cpages = 0; | |
567 | } | |
568 | } | |
1403b1a3 PN |
569 | } |
570 | ||
571 | if (cpages) { | |
572 | r = ttm_set_pages_caching(caching_array, cstate, cpages); | |
573 | if (r) | |
574 | ttm_handle_caching_state_failure(pages, | |
575 | ttm_flags, cstate, | |
576 | caching_array, cpages); | |
577 | } | |
578 | out: | |
579 | kfree(caching_array); | |
580 | ||
581 | return r; | |
582 | } | |
583 | ||
584 | /** | |
0d74f86f | 585 | * Fill the given pool if there aren't enough pages and the requested number of |
1403b1a3 PN |
586 | * pages is small. |
587 | */ | |
6ed4e2e6 CK |
588 | static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags, |
589 | enum ttm_caching_state cstate, | |
590 | unsigned count, unsigned long *irq_flags) | |
1403b1a3 PN |
591 | { |
592 | struct page *p; | |
593 | int r; | |
594 | unsigned cpages = 0; | |
595 | /** | |
596 | * Only allow one pool fill operation at a time. | |
597 | * If pool doesn't have enough pages for the allocation new pages are | |
598 | * allocated from outside of pool. | |
599 | */ | |
600 | if (pool->fill_lock) | |
601 | return; | |
602 | ||
603 | pool->fill_lock = true; | |
604 | ||
0d74f86f KRW |
605 | /* If allocation request is small and there are not enough |
606 | * pages in a pool we fill the pool up first. */ | |
5870a4d9 | 607 | if (count < _manager->options.small |
1403b1a3 PN |
608 | && count > pool->npages) { |
609 | struct list_head new_pages; | |
5870a4d9 | 610 | unsigned alloc_size = _manager->options.alloc_size; |
1403b1a3 PN |
611 | |
612 | /** | |
613 | * Can't change page caching if in irqsave context. We have to | |
614 | * drop the pool->lock. | |
615 | */ | |
616 | spin_unlock_irqrestore(&pool->lock, *irq_flags); | |
617 | ||
618 | INIT_LIST_HEAD(&new_pages); | |
619 | r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags, | |
6ed4e2e6 | 620 | cstate, alloc_size, 0); |
1403b1a3 PN |
621 | spin_lock_irqsave(&pool->lock, *irq_flags); |
622 | ||
623 | if (!r) { | |
624 | list_splice(&new_pages, &pool->list); | |
07458661 | 625 | ++pool->nrefills; |
1403b1a3 PN |
626 | pool->npages += alloc_size; |
627 | } else { | |
25d0479a | 628 | pr_err("Failed to fill pool (%p)\n", pool); |
1403b1a3 | 629 | /* If we have any pages left put them to the pool. */ |
9afae271 | 630 | list_for_each_entry(p, &new_pages, lru) { |
1403b1a3 PN |
631 | ++cpages; |
632 | } | |
633 | list_splice(&new_pages, &pool->list); | |
634 | pool->npages += cpages; | |
635 | } | |
636 | ||
637 | } | |
638 | pool->fill_lock = false; | |
639 | } | |
640 | ||
641 | /** | |
8593e9b8 | 642 | * Allocate pages from the pool and put them on the return list. |
1403b1a3 | 643 | * |
8593e9b8 | 644 | * @return zero for success or negative error code. |
1403b1a3 | 645 | */ |
8593e9b8 CK |
646 | static int ttm_page_pool_get_pages(struct ttm_page_pool *pool, |
647 | struct list_head *pages, | |
648 | int ttm_flags, | |
649 | enum ttm_caching_state cstate, | |
6ed4e2e6 | 650 | unsigned count, unsigned order) |
1403b1a3 PN |
651 | { |
652 | unsigned long irq_flags; | |
653 | struct list_head *p; | |
654 | unsigned i; | |
8593e9b8 | 655 | int r = 0; |
1403b1a3 PN |
656 | |
657 | spin_lock_irqsave(&pool->lock, irq_flags); | |
6ed4e2e6 CK |
658 | if (!order) |
659 | ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, | |
660 | &irq_flags); | |
1403b1a3 PN |
661 | |
662 | if (count >= pool->npages) { | |
663 | /* take all pages from the pool */ | |
664 | list_splice_init(&pool->list, pages); | |
665 | count -= pool->npages; | |
666 | pool->npages = 0; | |
667 | goto out; | |
668 | } | |
669 | /* find the last pages to include for requested number of pages. Split | |
0d74f86f | 670 | * pool to begin and halve it to reduce search space. */ |
1403b1a3 PN |
671 | if (count <= pool->npages/2) { |
672 | i = 0; | |
673 | list_for_each(p, &pool->list) { | |
674 | if (++i == count) | |
675 | break; | |
676 | } | |
677 | } else { | |
678 | i = pool->npages + 1; | |
679 | list_for_each_prev(p, &pool->list) { | |
680 | if (--i == count) | |
681 | break; | |
682 | } | |
683 | } | |
0d74f86f | 684 | /* Cut 'count' number of pages from the pool */ |
1403b1a3 PN |
685 | list_cut_position(pages, &pool->list, p); |
686 | pool->npages -= count; | |
687 | count = 0; | |
688 | out: | |
689 | spin_unlock_irqrestore(&pool->lock, irq_flags); | |
8593e9b8 CK |
690 | |
691 | /* clear the pages coming from the pool if requested */ | |
692 | if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) { | |
693 | struct page *page; | |
694 | ||
695 | list_for_each_entry(page, pages, lru) { | |
696 | if (PageHighMem(page)) | |
697 | clear_highpage(page); | |
698 | else | |
699 | clear_page(page_address(page)); | |
700 | } | |
701 | } | |
702 | ||
703 | /* If pool didn't have enough pages allocate new one. */ | |
704 | if (count) { | |
705 | gfp_t gfp_flags = pool->gfp_flags; | |
706 | ||
707 | /* set zero flag for page allocation if required */ | |
708 | if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) | |
709 | gfp_flags |= __GFP_ZERO; | |
710 | ||
711 | /* ttm_alloc_new_pages doesn't reference pool so we can run | |
712 | * multiple requests in parallel. | |
713 | **/ | |
714 | r = ttm_alloc_new_pages(pages, gfp_flags, ttm_flags, cstate, | |
6ed4e2e6 | 715 | count, order); |
8593e9b8 CK |
716 | } |
717 | ||
718 | return r; | |
1403b1a3 PN |
719 | } |
720 | ||
8e7e7052 JG |
721 | /* Put all pages in pages list to correct pool to wait for reuse */ |
722 | static void ttm_put_pages(struct page **pages, unsigned npages, int flags, | |
723 | enum ttm_caching_state cstate) | |
724 | { | |
6ed4e2e6 | 725 | struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate); |
7d0a4282 | 726 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
6ed4e2e6 | 727 | struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate); |
7d0a4282 | 728 | #endif |
8e7e7052 | 729 | unsigned long irq_flags; |
8e7e7052 JG |
730 | unsigned i; |
731 | ||
732 | if (pool == NULL) { | |
733 | /* No pool for this memory type so free the pages */ | |
0284f1ea CK |
734 | i = 0; |
735 | while (i < npages) { | |
5c42c64f CK |
736 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
737 | struct page *p = pages[i]; | |
738 | #endif | |
739 | unsigned order = 0, j; | |
0284f1ea CK |
740 | |
741 | if (!pages[i]) { | |
742 | ++i; | |
743 | continue; | |
744 | } | |
745 | ||
5c42c64f CK |
746 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
747 | for (j = 0; j < HPAGE_PMD_NR; ++j) | |
748 | if (p++ != pages[i + j]) | |
749 | break; | |
750 | ||
751 | if (j == HPAGE_PMD_NR) | |
752 | order = HPAGE_PMD_ORDER; | |
753 | #endif | |
754 | ||
0284f1ea CK |
755 | if (page_count(pages[i]) != 1) |
756 | pr_err("Erroneous page count. Leaking pages.\n"); | |
0284f1ea CK |
757 | __free_pages(pages[i], order); |
758 | ||
5c42c64f CK |
759 | j = 1 << order; |
760 | while (j) { | |
0284f1ea | 761 | pages[i++] = NULL; |
5c42c64f | 762 | --j; |
8e7e7052 JG |
763 | } |
764 | } | |
765 | return; | |
766 | } | |
767 | ||
6ed4e2e6 CK |
768 | i = 0; |
769 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
770 | if (huge) { | |
771 | unsigned max_size, n2free; | |
772 | ||
773 | spin_lock_irqsave(&huge->lock, irq_flags); | |
774 | while (i < npages) { | |
775 | struct page *p = pages[i]; | |
776 | unsigned j; | |
777 | ||
778 | if (!p) | |
779 | break; | |
780 | ||
781 | for (j = 0; j < HPAGE_PMD_NR; ++j) | |
782 | if (p++ != pages[i + j]) | |
783 | break; | |
784 | ||
785 | if (j != HPAGE_PMD_NR) | |
786 | break; | |
787 | ||
788 | list_add_tail(&pages[i]->lru, &huge->list); | |
789 | ||
790 | for (j = 0; j < HPAGE_PMD_NR; ++j) | |
791 | pages[i++] = NULL; | |
792 | huge->npages++; | |
793 | } | |
794 | ||
795 | /* Check that we don't go over the pool limit */ | |
796 | max_size = _manager->options.max_size; | |
797 | max_size /= HPAGE_PMD_NR; | |
798 | if (huge->npages > max_size) | |
799 | n2free = huge->npages - max_size; | |
800 | else | |
801 | n2free = 0; | |
802 | spin_unlock_irqrestore(&huge->lock, irq_flags); | |
803 | if (n2free) | |
804 | ttm_page_pool_free(huge, n2free, false); | |
805 | } | |
806 | #endif | |
807 | ||
8e7e7052 | 808 | spin_lock_irqsave(&pool->lock, irq_flags); |
6ed4e2e6 | 809 | while (i < npages) { |
8e7e7052 JG |
810 | if (pages[i]) { |
811 | if (page_count(pages[i]) != 1) | |
25d0479a | 812 | pr_err("Erroneous page count. Leaking pages.\n"); |
8e7e7052 JG |
813 | list_add_tail(&pages[i]->lru, &pool->list); |
814 | pages[i] = NULL; | |
815 | pool->npages++; | |
816 | } | |
6ed4e2e6 | 817 | ++i; |
8e7e7052 JG |
818 | } |
819 | /* Check that we don't go over the pool limit */ | |
820 | npages = 0; | |
821 | if (pool->npages > _manager->options.max_size) { | |
822 | npages = pool->npages - _manager->options.max_size; | |
823 | /* free at least NUM_PAGES_TO_ALLOC number of pages | |
824 | * to reduce calls to set_memory_wb */ | |
825 | if (npages < NUM_PAGES_TO_ALLOC) | |
826 | npages = NUM_PAGES_TO_ALLOC; | |
827 | } | |
828 | spin_unlock_irqrestore(&pool->lock, irq_flags); | |
829 | if (npages) | |
881fdaa5 | 830 | ttm_page_pool_free(pool, npages, false); |
8e7e7052 JG |
831 | } |
832 | ||
1403b1a3 PN |
833 | /* |
834 | * On success pages list will hold count number of correctly | |
835 | * cached pages. | |
836 | */ | |
8e7e7052 JG |
837 | static int ttm_get_pages(struct page **pages, unsigned npages, int flags, |
838 | enum ttm_caching_state cstate) | |
1403b1a3 | 839 | { |
6ed4e2e6 | 840 | struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate); |
7d0a4282 | 841 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
6ed4e2e6 | 842 | struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate); |
7d0a4282 | 843 | #endif |
822c4d9a | 844 | struct list_head plist; |
1403b1a3 | 845 | struct page *p = NULL; |
822c4d9a | 846 | unsigned count; |
1403b1a3 PN |
847 | int r; |
848 | ||
1403b1a3 PN |
849 | /* No pool for cached pages */ |
850 | if (pool == NULL) { | |
8593e9b8 | 851 | gfp_t gfp_flags = GFP_USER; |
7d0a4282 TSD |
852 | unsigned i; |
853 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
854 | unsigned j; | |
855 | #endif | |
0284f1ea | 856 | |
8593e9b8 CK |
857 | /* set zero flag for page allocation if required */ |
858 | if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) | |
859 | gfp_flags |= __GFP_ZERO; | |
860 | ||
1403b1a3 PN |
861 | if (flags & TTM_PAGE_FLAG_DMA32) |
862 | gfp_flags |= GFP_DMA32; | |
863 | else | |
e8613c0e | 864 | gfp_flags |= GFP_HIGHUSER; |
1403b1a3 | 865 | |
0284f1ea CK |
866 | i = 0; |
867 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
868 | while (npages >= HPAGE_PMD_NR) { | |
869 | gfp_t huge_flags = gfp_flags; | |
870 | ||
871 | huge_flags |= GFP_TRANSHUGE; | |
872 | huge_flags &= ~__GFP_MOVABLE; | |
873 | huge_flags &= ~__GFP_COMP; | |
874 | p = alloc_pages(huge_flags, HPAGE_PMD_ORDER); | |
875 | if (!p) | |
876 | break; | |
877 | ||
878 | for (j = 0; j < HPAGE_PMD_NR; ++j) | |
879 | pages[i++] = p++; | |
880 | ||
881 | npages -= HPAGE_PMD_NR; | |
882 | } | |
883 | #endif | |
884 | ||
885 | while (npages) { | |
d87dfdbf | 886 | p = alloc_page(gfp_flags); |
1403b1a3 PN |
887 | if (!p) { |
888 | ||
25d0479a | 889 | pr_err("Unable to allocate page\n"); |
1403b1a3 PN |
890 | return -ENOMEM; |
891 | } | |
d87dfdbf | 892 | |
0284f1ea CK |
893 | pages[i++] = p; |
894 | --npages; | |
1403b1a3 PN |
895 | } |
896 | return 0; | |
897 | } | |
898 | ||
6ed4e2e6 CK |
899 | count = 0; |
900 | ||
901 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
902 | if (huge && npages >= HPAGE_PMD_NR) { | |
903 | INIT_LIST_HEAD(&plist); | |
904 | ttm_page_pool_get_pages(huge, &plist, flags, cstate, | |
905 | npages / HPAGE_PMD_NR, | |
906 | HPAGE_PMD_ORDER); | |
907 | ||
908 | list_for_each_entry(p, &plist, lru) { | |
909 | unsigned j; | |
910 | ||
911 | for (j = 0; j < HPAGE_PMD_NR; ++j) | |
912 | pages[count++] = &p[j]; | |
913 | } | |
914 | } | |
915 | #endif | |
916 | ||
822c4d9a | 917 | INIT_LIST_HEAD(&plist); |
6ed4e2e6 CK |
918 | r = ttm_page_pool_get_pages(pool, &plist, flags, cstate, |
919 | npages - count, 0); | |
8593e9b8 | 920 | |
8593e9b8 | 921 | list_for_each_entry(p, &plist, lru) |
822c4d9a | 922 | pages[count++] = p; |
1403b1a3 | 923 | |
8593e9b8 CK |
924 | if (r) { |
925 | /* If there is any pages in the list put them back to | |
926 | * the pool. | |
927 | */ | |
928 | pr_err("Failed to allocate extra pages for large request\n"); | |
929 | ttm_put_pages(pages, count, flags, cstate); | |
930 | return r; | |
1403b1a3 PN |
931 | } |
932 | ||
1403b1a3 PN |
933 | return 0; |
934 | } | |
935 | ||
3b9c214a | 936 | static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags, |
07458661 | 937 | char *name) |
1403b1a3 PN |
938 | { |
939 | spin_lock_init(&pool->lock); | |
940 | pool->fill_lock = false; | |
941 | INIT_LIST_HEAD(&pool->list); | |
07458661 | 942 | pool->npages = pool->nfrees = 0; |
1403b1a3 | 943 | pool->gfp_flags = flags; |
07458661 | 944 | pool->name = name; |
1403b1a3 PN |
945 | } |
946 | ||
c96af79e | 947 | int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) |
1403b1a3 | 948 | { |
c96af79e | 949 | int ret; |
5870a4d9 FJ |
950 | |
951 | WARN_ON(_manager); | |
1403b1a3 | 952 | |
25d0479a | 953 | pr_info("Initializing pool allocator\n"); |
1403b1a3 | 954 | |
5870a4d9 | 955 | _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); |
1403b1a3 | 956 | |
5870a4d9 | 957 | ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc"); |
1403b1a3 | 958 | |
5870a4d9 | 959 | ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc"); |
1403b1a3 | 960 | |
5870a4d9 FJ |
961 | ttm_page_pool_init_locked(&_manager->wc_pool_dma32, |
962 | GFP_USER | GFP_DMA32, "wc dma"); | |
1403b1a3 | 963 | |
5870a4d9 FJ |
964 | ttm_page_pool_init_locked(&_manager->uc_pool_dma32, |
965 | GFP_USER | GFP_DMA32, "uc dma"); | |
1403b1a3 | 966 | |
6ed4e2e6 CK |
967 | ttm_page_pool_init_locked(&_manager->wc_pool_huge, |
968 | GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP), | |
969 | "wc huge"); | |
970 | ||
971 | ttm_page_pool_init_locked(&_manager->uc_pool_huge, | |
972 | GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP) | |
973 | , "uc huge"); | |
974 | ||
5870a4d9 FJ |
975 | _manager->options.max_size = max_pages; |
976 | _manager->options.small = SMALL_ALLOCATION; | |
977 | _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; | |
978 | ||
979 | ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type, | |
980 | &glob->kobj, "pool"); | |
c96af79e | 981 | if (unlikely(ret != 0)) { |
5870a4d9 FJ |
982 | kobject_put(&_manager->kobj); |
983 | _manager = NULL; | |
c96af79e PN |
984 | return ret; |
985 | } | |
986 | ||
5870a4d9 | 987 | ttm_pool_mm_shrink_init(_manager); |
1403b1a3 PN |
988 | |
989 | return 0; | |
990 | } | |
991 | ||
0e57a3cc | 992 | void ttm_page_alloc_fini(void) |
1403b1a3 PN |
993 | { |
994 | int i; | |
995 | ||
25d0479a | 996 | pr_info("Finalizing pool allocator\n"); |
5870a4d9 | 997 | ttm_pool_mm_shrink_fini(_manager); |
1403b1a3 | 998 | |
881fdaa5 | 999 | /* OK to use static buffer since global mutex is no longer used. */ |
1403b1a3 | 1000 | for (i = 0; i < NUM_POOLS; ++i) |
881fdaa5 | 1001 | ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true); |
c96af79e | 1002 | |
5870a4d9 FJ |
1003 | kobject_put(&_manager->kobj); |
1004 | _manager = NULL; | |
1403b1a3 | 1005 | } |
07458661 | 1006 | |
b1e5f172 JG |
1007 | int ttm_pool_populate(struct ttm_tt *ttm) |
1008 | { | |
1009 | struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; | |
1010 | unsigned i; | |
1011 | int ret; | |
1012 | ||
1013 | if (ttm->state != tt_unpopulated) | |
1014 | return 0; | |
1015 | ||
c6e839a3 CK |
1016 | ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags, |
1017 | ttm->caching_state); | |
1018 | if (unlikely(ret != 0)) { | |
1019 | ttm_pool_unpopulate(ttm); | |
1020 | return ret; | |
1021 | } | |
b1e5f172 | 1022 | |
c6e839a3 | 1023 | for (i = 0; i < ttm->num_pages; ++i) { |
d188bfa5 CK |
1024 | ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], |
1025 | PAGE_SIZE); | |
b1e5f172 JG |
1026 | if (unlikely(ret != 0)) { |
1027 | ttm_pool_unpopulate(ttm); | |
1028 | return -ENOMEM; | |
1029 | } | |
1030 | } | |
1031 | ||
1032 | if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { | |
1033 | ret = ttm_tt_swapin(ttm); | |
1034 | if (unlikely(ret != 0)) { | |
1035 | ttm_pool_unpopulate(ttm); | |
1036 | return ret; | |
1037 | } | |
1038 | } | |
1039 | ||
1040 | ttm->state = tt_unbound; | |
1041 | return 0; | |
1042 | } | |
1043 | EXPORT_SYMBOL(ttm_pool_populate); | |
1044 | ||
1045 | void ttm_pool_unpopulate(struct ttm_tt *ttm) | |
1046 | { | |
1047 | unsigned i; | |
1048 | ||
1049 | for (i = 0; i < ttm->num_pages; ++i) { | |
c6e839a3 CK |
1050 | if (!ttm->pages[i]) |
1051 | continue; | |
1052 | ||
1053 | ttm_mem_global_free_page(ttm->glob->mem_glob, ttm->pages[i], | |
1054 | PAGE_SIZE); | |
b1e5f172 | 1055 | } |
c6e839a3 CK |
1056 | ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags, |
1057 | ttm->caching_state); | |
b1e5f172 JG |
1058 | ttm->state = tt_unpopulated; |
1059 | } | |
1060 | EXPORT_SYMBOL(ttm_pool_unpopulate); | |
1061 | ||
7a9667ae | 1062 | #if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU) |
a4dec819 TSD |
1063 | int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt) |
1064 | { | |
6056a1a5 | 1065 | unsigned i, j; |
a4dec819 TSD |
1066 | int r; |
1067 | ||
1068 | r = ttm_pool_populate(&tt->ttm); | |
1069 | if (r) | |
1070 | return r; | |
1071 | ||
6056a1a5 CK |
1072 | for (i = 0; i < tt->ttm.num_pages; ++i) { |
1073 | struct page *p = tt->ttm.pages[i]; | |
1074 | size_t num_pages = 1; | |
1075 | ||
1076 | for (j = i + 1; j < tt->ttm.num_pages; ++j) { | |
1077 | if (++p != tt->ttm.pages[j]) | |
1078 | break; | |
1079 | ||
1080 | ++num_pages; | |
1081 | } | |
1082 | ||
a4dec819 | 1083 | tt->dma_address[i] = dma_map_page(dev, tt->ttm.pages[i], |
6056a1a5 | 1084 | 0, num_pages * PAGE_SIZE, |
a4dec819 TSD |
1085 | DMA_BIDIRECTIONAL); |
1086 | if (dma_mapping_error(dev, tt->dma_address[i])) { | |
1087 | while (i--) { | |
1088 | dma_unmap_page(dev, tt->dma_address[i], | |
1089 | PAGE_SIZE, DMA_BIDIRECTIONAL); | |
1090 | tt->dma_address[i] = 0; | |
1091 | } | |
1092 | ttm_pool_unpopulate(&tt->ttm); | |
1093 | return -EFAULT; | |
1094 | } | |
6056a1a5 CK |
1095 | |
1096 | for (j = 1; j < num_pages; ++j) { | |
1097 | tt->dma_address[i + 1] = tt->dma_address[i] + PAGE_SIZE; | |
1098 | ++i; | |
1099 | } | |
a4dec819 TSD |
1100 | } |
1101 | return 0; | |
1102 | } | |
1103 | EXPORT_SYMBOL(ttm_populate_and_map_pages); | |
1104 | ||
1105 | void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt) | |
1106 | { | |
6056a1a5 CK |
1107 | unsigned i, j; |
1108 | ||
1109 | for (i = 0; i < tt->ttm.num_pages;) { | |
1110 | struct page *p = tt->ttm.pages[i]; | |
1111 | size_t num_pages = 1; | |
1112 | ||
1113 | if (!tt->dma_address[i] || !tt->ttm.pages[i]) { | |
1114 | ++i; | |
1115 | continue; | |
a4dec819 | 1116 | } |
6056a1a5 CK |
1117 | |
1118 | for (j = i + 1; j < tt->ttm.num_pages; ++j) { | |
1119 | if (++p != tt->ttm.pages[j]) | |
1120 | break; | |
1121 | ||
1122 | ++num_pages; | |
1123 | } | |
1124 | ||
1125 | dma_unmap_page(dev, tt->dma_address[i], num_pages * PAGE_SIZE, | |
1126 | DMA_BIDIRECTIONAL); | |
1127 | ||
1128 | i += num_pages; | |
a4dec819 TSD |
1129 | } |
1130 | ttm_pool_unpopulate(&tt->ttm); | |
1131 | } | |
1132 | EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages); | |
7a9667ae | 1133 | #endif |
a4dec819 | 1134 | |
07458661 PN |
1135 | int ttm_page_alloc_debugfs(struct seq_file *m, void *data) |
1136 | { | |
1137 | struct ttm_page_pool *p; | |
1138 | unsigned i; | |
1139 | char *h[] = {"pool", "refills", "pages freed", "size"}; | |
5870a4d9 | 1140 | if (!_manager) { |
07458661 PN |
1141 | seq_printf(m, "No pool allocator running.\n"); |
1142 | return 0; | |
1143 | } | |
6ed4e2e6 | 1144 | seq_printf(m, "%7s %12s %13s %8s\n", |
07458661 PN |
1145 | h[0], h[1], h[2], h[3]); |
1146 | for (i = 0; i < NUM_POOLS; ++i) { | |
5870a4d9 | 1147 | p = &_manager->pools[i]; |
07458661 | 1148 | |
6ed4e2e6 | 1149 | seq_printf(m, "%7s %12ld %13ld %8d\n", |
07458661 PN |
1150 | p->name, p->nrefills, |
1151 | p->nfrees, p->npages); | |
1152 | } | |
1153 | return 0; | |
1154 | } | |
1155 | EXPORT_SYMBOL(ttm_page_alloc_debugfs); |