]>
Commit | Line | Data |
---|---|---|
1403b1a3 PN |
1 | /* |
2 | * Copyright (c) Red Hat Inc. | |
3 | ||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sub license, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the | |
12 | * next paragraph) shall be included in all copies or substantial portions | |
13 | * of the Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
21 | * DEALINGS IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: Dave Airlie <airlied@redhat.com> | |
24 | * Jerome Glisse <jglisse@redhat.com> | |
25 | * Pauli Nieminen <suokkos@gmail.com> | |
26 | */ | |
27 | ||
28 | /* simple list based uncached page pool | |
29 | * - Pool collects resently freed pages for reuse | |
30 | * - Use page->lru to keep a free list | |
31 | * - doesn't track currently in use pages | |
32 | */ | |
25d0479a JP |
33 | |
34 | #define pr_fmt(fmt) "[TTM] " fmt | |
35 | ||
1403b1a3 PN |
36 | #include <linux/list.h> |
37 | #include <linux/spinlock.h> | |
38 | #include <linux/highmem.h> | |
39 | #include <linux/mm_types.h> | |
07458661 | 40 | #include <linux/module.h> |
1403b1a3 | 41 | #include <linux/mm.h> |
4cdc840a | 42 | #include <linux/seq_file.h> /* for seq_printf */ |
2125b8a4 | 43 | #include <linux/slab.h> |
f9820a46 | 44 | #include <linux/dma-mapping.h> |
1403b1a3 | 45 | |
60063497 | 46 | #include <linux/atomic.h> |
1403b1a3 | 47 | |
760285e7 DH |
48 | #include <drm/ttm/ttm_bo_driver.h> |
49 | #include <drm/ttm/ttm_page_alloc.h> | |
1403b1a3 | 50 | |
e6bf6e57 | 51 | #if IS_ENABLED(CONFIG_AGP) |
d6678651 TL |
52 | #include <asm/agp.h> |
53 | #endif | |
ed3ba079 LA |
54 | #ifdef CONFIG_X86 |
55 | #include <asm/set_memory.h> | |
56 | #endif | |
1403b1a3 PN |
57 | |
58 | #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) | |
59 | #define SMALL_ALLOCATION 16 | |
60 | #define FREE_ALL_PAGES (~0U) | |
61 | /* times are in msecs */ | |
62 | #define PAGE_FREE_INTERVAL 1000 | |
63 | ||
64 | /** | |
65 | * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages. | |
66 | * | |
67 | * @lock: Protects the shared pool from concurrnet access. Must be used with | |
68 | * irqsave/irqrestore variants because pool allocator maybe called from | |
69 | * delayed work. | |
70 | * @fill_lock: Prevent concurrent calls to fill. | |
71 | * @list: Pool of free uc/wc pages for fast reuse. | |
72 | * @gfp_flags: Flags to pass for alloc_page. | |
73 | * @npages: Number of pages in pool. | |
74 | */ | |
75 | struct ttm_page_pool { | |
76 | spinlock_t lock; | |
77 | bool fill_lock; | |
78 | struct list_head list; | |
0e57a3cc | 79 | gfp_t gfp_flags; |
1403b1a3 | 80 | unsigned npages; |
07458661 PN |
81 | char *name; |
82 | unsigned long nfrees; | |
83 | unsigned long nrefills; | |
6d5e4e32 | 84 | unsigned int order; |
1403b1a3 PN |
85 | }; |
86 | ||
c96af79e PN |
87 | /** |
88 | * Limits for the pool. They are handled without locks because only place where | |
89 | * they may change is in sysfs store. They won't have immediate effect anyway | |
4abe4389 | 90 | * so forcing serialization to access them is pointless. |
c96af79e PN |
91 | */ |
92 | ||
1403b1a3 PN |
93 | struct ttm_pool_opts { |
94 | unsigned alloc_size; | |
95 | unsigned max_size; | |
96 | unsigned small; | |
97 | }; | |
98 | ||
6ed4e2e6 | 99 | #define NUM_POOLS 6 |
1403b1a3 PN |
100 | |
101 | /** | |
102 | * struct ttm_pool_manager - Holds memory pools for fst allocation | |
103 | * | |
104 | * Manager is read only object for pool code so it doesn't need locking. | |
105 | * | |
106 | * @free_interval: minimum number of jiffies between freeing pages from pool. | |
107 | * @page_alloc_inited: reference counting for pool allocation. | |
108 | * @work: Work that is used to shrink the pool. Work is only run when there is | |
109 | * some pages to free. | |
110 | * @small_allocation: Limit in number of pages what is small allocation. | |
111 | * | |
112 | * @pools: All pool objects in use. | |
113 | **/ | |
114 | struct ttm_pool_manager { | |
c96af79e | 115 | struct kobject kobj; |
1403b1a3 | 116 | struct shrinker mm_shrink; |
1403b1a3 PN |
117 | struct ttm_pool_opts options; |
118 | ||
119 | union { | |
120 | struct ttm_page_pool pools[NUM_POOLS]; | |
121 | struct { | |
122 | struct ttm_page_pool wc_pool; | |
123 | struct ttm_page_pool uc_pool; | |
124 | struct ttm_page_pool wc_pool_dma32; | |
125 | struct ttm_page_pool uc_pool_dma32; | |
6ed4e2e6 CK |
126 | struct ttm_page_pool wc_pool_huge; |
127 | struct ttm_page_pool uc_pool_huge; | |
1403b1a3 PN |
128 | } ; |
129 | }; | |
130 | }; | |
131 | ||
c96af79e PN |
132 | static struct attribute ttm_page_pool_max = { |
133 | .name = "pool_max_size", | |
134 | .mode = S_IRUGO | S_IWUSR | |
135 | }; | |
136 | static struct attribute ttm_page_pool_small = { | |
137 | .name = "pool_small_allocation", | |
138 | .mode = S_IRUGO | S_IWUSR | |
139 | }; | |
140 | static struct attribute ttm_page_pool_alloc_size = { | |
141 | .name = "pool_allocation_size", | |
142 | .mode = S_IRUGO | S_IWUSR | |
143 | }; | |
144 | ||
145 | static struct attribute *ttm_pool_attrs[] = { | |
146 | &ttm_page_pool_max, | |
147 | &ttm_page_pool_small, | |
148 | &ttm_page_pool_alloc_size, | |
149 | NULL | |
150 | }; | |
151 | ||
152 | static void ttm_pool_kobj_release(struct kobject *kobj) | |
153 | { | |
154 | struct ttm_pool_manager *m = | |
155 | container_of(kobj, struct ttm_pool_manager, kobj); | |
5870a4d9 | 156 | kfree(m); |
c96af79e PN |
157 | } |
158 | ||
159 | static ssize_t ttm_pool_store(struct kobject *kobj, | |
160 | struct attribute *attr, const char *buffer, size_t size) | |
161 | { | |
162 | struct ttm_pool_manager *m = | |
163 | container_of(kobj, struct ttm_pool_manager, kobj); | |
164 | int chars; | |
165 | unsigned val; | |
166 | chars = sscanf(buffer, "%u", &val); | |
167 | if (chars == 0) | |
168 | return size; | |
169 | ||
170 | /* Convert kb to number of pages */ | |
171 | val = val / (PAGE_SIZE >> 10); | |
172 | ||
173 | if (attr == &ttm_page_pool_max) | |
174 | m->options.max_size = val; | |
175 | else if (attr == &ttm_page_pool_small) | |
176 | m->options.small = val; | |
177 | else if (attr == &ttm_page_pool_alloc_size) { | |
178 | if (val > NUM_PAGES_TO_ALLOC*8) { | |
25d0479a | 179 | pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n", |
4abe4389 TH |
180 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), |
181 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); | |
c96af79e PN |
182 | return size; |
183 | } else if (val > NUM_PAGES_TO_ALLOC) { | |
25d0479a JP |
184 | pr_warn("Setting allocation size to larger than %lu is not recommended\n", |
185 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); | |
c96af79e PN |
186 | } |
187 | m->options.alloc_size = val; | |
188 | } | |
189 | ||
190 | return size; | |
191 | } | |
192 | ||
193 | static ssize_t ttm_pool_show(struct kobject *kobj, | |
194 | struct attribute *attr, char *buffer) | |
195 | { | |
196 | struct ttm_pool_manager *m = | |
197 | container_of(kobj, struct ttm_pool_manager, kobj); | |
198 | unsigned val = 0; | |
199 | ||
200 | if (attr == &ttm_page_pool_max) | |
201 | val = m->options.max_size; | |
202 | else if (attr == &ttm_page_pool_small) | |
203 | val = m->options.small; | |
204 | else if (attr == &ttm_page_pool_alloc_size) | |
205 | val = m->options.alloc_size; | |
206 | ||
207 | val = val * (PAGE_SIZE >> 10); | |
208 | ||
209 | return snprintf(buffer, PAGE_SIZE, "%u\n", val); | |
210 | } | |
211 | ||
212 | static const struct sysfs_ops ttm_pool_sysfs_ops = { | |
213 | .show = &ttm_pool_show, | |
214 | .store = &ttm_pool_store, | |
215 | }; | |
216 | ||
217 | static struct kobj_type ttm_pool_kobj_type = { | |
218 | .release = &ttm_pool_kobj_release, | |
219 | .sysfs_ops = &ttm_pool_sysfs_ops, | |
220 | .default_attrs = ttm_pool_attrs, | |
221 | }; | |
222 | ||
5870a4d9 | 223 | static struct ttm_pool_manager *_manager; |
1403b1a3 | 224 | |
975efdb1 | 225 | #ifndef CONFIG_X86 |
54ddc5f2 RH |
226 | static int set_pages_wb(struct page *page, int numpages) |
227 | { | |
228 | #if IS_ENABLED(CONFIG_AGP) | |
229 | int i; | |
230 | ||
231 | for (i = 0; i < numpages; i++) | |
232 | unmap_page_from_agp(page++); | |
233 | #endif | |
234 | return 0; | |
235 | } | |
236 | ||
1403b1a3 PN |
237 | static int set_pages_array_wb(struct page **pages, int addrinarray) |
238 | { | |
e6bf6e57 | 239 | #if IS_ENABLED(CONFIG_AGP) |
1403b1a3 PN |
240 | int i; |
241 | ||
242 | for (i = 0; i < addrinarray; i++) | |
243 | unmap_page_from_agp(pages[i]); | |
244 | #endif | |
245 | return 0; | |
246 | } | |
247 | ||
248 | static int set_pages_array_wc(struct page **pages, int addrinarray) | |
249 | { | |
e6bf6e57 | 250 | #if IS_ENABLED(CONFIG_AGP) |
1403b1a3 PN |
251 | int i; |
252 | ||
253 | for (i = 0; i < addrinarray; i++) | |
254 | map_page_into_agp(pages[i]); | |
255 | #endif | |
256 | return 0; | |
257 | } | |
258 | ||
259 | static int set_pages_array_uc(struct page **pages, int addrinarray) | |
260 | { | |
e6bf6e57 | 261 | #if IS_ENABLED(CONFIG_AGP) |
1403b1a3 PN |
262 | int i; |
263 | ||
264 | for (i = 0; i < addrinarray; i++) | |
265 | map_page_into_agp(pages[i]); | |
266 | #endif | |
267 | return 0; | |
268 | } | |
269 | #endif | |
270 | ||
271 | /** | |
272 | * Select the right pool or requested caching state and ttm flags. */ | |
6ed4e2e6 CK |
273 | static struct ttm_page_pool *ttm_get_pool(int flags, bool huge, |
274 | enum ttm_caching_state cstate) | |
1403b1a3 PN |
275 | { |
276 | int pool_index; | |
277 | ||
278 | if (cstate == tt_cached) | |
279 | return NULL; | |
280 | ||
281 | if (cstate == tt_wc) | |
282 | pool_index = 0x0; | |
283 | else | |
284 | pool_index = 0x1; | |
285 | ||
6ed4e2e6 CK |
286 | if (flags & TTM_PAGE_FLAG_DMA32) { |
287 | if (huge) | |
288 | return NULL; | |
1403b1a3 PN |
289 | pool_index |= 0x2; |
290 | ||
6ed4e2e6 CK |
291 | } else if (huge) { |
292 | pool_index |= 0x4; | |
293 | } | |
294 | ||
5870a4d9 | 295 | return &_manager->pools[pool_index]; |
1403b1a3 PN |
296 | } |
297 | ||
298 | /* set memory back to wb and free the pages. */ | |
bae5c5b5 RH |
299 | static void ttm_pages_put(struct page *pages[], unsigned npages, |
300 | unsigned int order) | |
1403b1a3 | 301 | { |
bae5c5b5 RH |
302 | unsigned int i, pages_nr = (1 << order); |
303 | ||
304 | if (order == 0) { | |
305 | if (set_pages_array_wb(pages, npages)) | |
306 | pr_err("Failed to set %d pages to wb!\n", npages); | |
307 | } | |
308 | ||
309 | for (i = 0; i < npages; ++i) { | |
310 | if (order > 0) { | |
311 | if (set_pages_wb(pages[i], pages_nr)) | |
312 | pr_err("Failed to set %d pages to wb!\n", pages_nr); | |
313 | } | |
314 | __free_pages(pages[i], order); | |
315 | } | |
1403b1a3 PN |
316 | } |
317 | ||
318 | static void ttm_pool_update_free_locked(struct ttm_page_pool *pool, | |
319 | unsigned freed_pages) | |
320 | { | |
321 | pool->npages -= freed_pages; | |
07458661 | 322 | pool->nfrees += freed_pages; |
1403b1a3 PN |
323 | } |
324 | ||
325 | /** | |
326 | * Free pages from pool. | |
327 | * | |
328 | * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC | |
329 | * number of pages in one go. | |
330 | * | |
331 | * @pool: to free the pages from | |
332 | * @free_all: If set to true will free all pages in pool | |
881fdaa5 | 333 | * @use_static: Safe to use static buffer |
1403b1a3 | 334 | **/ |
a91576d7 | 335 | static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free, |
881fdaa5 | 336 | bool use_static) |
1403b1a3 | 337 | { |
881fdaa5 | 338 | static struct page *static_buf[NUM_PAGES_TO_ALLOC]; |
1403b1a3 PN |
339 | unsigned long irq_flags; |
340 | struct page *p; | |
341 | struct page **pages_to_free; | |
342 | unsigned freed_pages = 0, | |
343 | npages_to_free = nr_free; | |
344 | ||
345 | if (NUM_PAGES_TO_ALLOC < nr_free) | |
346 | npages_to_free = NUM_PAGES_TO_ALLOC; | |
347 | ||
881fdaa5 TH |
348 | if (use_static) |
349 | pages_to_free = static_buf; | |
350 | else | |
351 | pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), | |
352 | GFP_KERNEL); | |
1403b1a3 | 353 | if (!pages_to_free) { |
767601d1 | 354 | pr_debug("Failed to allocate memory for pool free operation\n"); |
1403b1a3 PN |
355 | return 0; |
356 | } | |
357 | ||
358 | restart: | |
359 | spin_lock_irqsave(&pool->lock, irq_flags); | |
360 | ||
361 | list_for_each_entry_reverse(p, &pool->list, lru) { | |
362 | if (freed_pages >= npages_to_free) | |
363 | break; | |
364 | ||
365 | pages_to_free[freed_pages++] = p; | |
366 | /* We can only remove NUM_PAGES_TO_ALLOC at a time. */ | |
367 | if (freed_pages >= NUM_PAGES_TO_ALLOC) { | |
368 | /* remove range of pages from the pool */ | |
369 | __list_del(p->lru.prev, &pool->list); | |
370 | ||
371 | ttm_pool_update_free_locked(pool, freed_pages); | |
372 | /** | |
373 | * Because changing page caching is costly | |
374 | * we unlock the pool to prevent stalling. | |
375 | */ | |
376 | spin_unlock_irqrestore(&pool->lock, irq_flags); | |
377 | ||
bae5c5b5 | 378 | ttm_pages_put(pages_to_free, freed_pages, pool->order); |
1403b1a3 PN |
379 | if (likely(nr_free != FREE_ALL_PAGES)) |
380 | nr_free -= freed_pages; | |
381 | ||
382 | if (NUM_PAGES_TO_ALLOC >= nr_free) | |
383 | npages_to_free = nr_free; | |
384 | else | |
385 | npages_to_free = NUM_PAGES_TO_ALLOC; | |
386 | ||
387 | freed_pages = 0; | |
388 | ||
389 | /* free all so restart the processing */ | |
390 | if (nr_free) | |
391 | goto restart; | |
392 | ||
0d74f86f | 393 | /* Not allowed to fall through or break because |
1403b1a3 PN |
394 | * following context is inside spinlock while we are |
395 | * outside here. | |
396 | */ | |
397 | goto out; | |
398 | ||
399 | } | |
400 | } | |
401 | ||
1403b1a3 PN |
402 | /* remove range of pages from the pool */ |
403 | if (freed_pages) { | |
404 | __list_del(&p->lru, &pool->list); | |
405 | ||
406 | ttm_pool_update_free_locked(pool, freed_pages); | |
407 | nr_free -= freed_pages; | |
408 | } | |
409 | ||
410 | spin_unlock_irqrestore(&pool->lock, irq_flags); | |
411 | ||
412 | if (freed_pages) | |
bae5c5b5 | 413 | ttm_pages_put(pages_to_free, freed_pages, pool->order); |
1403b1a3 | 414 | out: |
881fdaa5 TH |
415 | if (pages_to_free != static_buf) |
416 | kfree(pages_to_free); | |
1403b1a3 PN |
417 | return nr_free; |
418 | } | |
419 | ||
1403b1a3 | 420 | /** |
4abe4389 | 421 | * Callback for mm to request pool to reduce number of page held. |
7dc19d5a DC |
422 | * |
423 | * XXX: (dchinner) Deadlock warning! | |
424 | * | |
7dc19d5a | 425 | * This code is crying out for a shrinker per pool.... |
1403b1a3 | 426 | */ |
7dc19d5a DC |
427 | static unsigned long |
428 | ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) | |
1403b1a3 | 429 | { |
71336e01 TH |
430 | static DEFINE_MUTEX(lock); |
431 | static unsigned start_pool; | |
1403b1a3 | 432 | unsigned i; |
71336e01 | 433 | unsigned pool_offset; |
1403b1a3 | 434 | struct ttm_page_pool *pool; |
1495f230 | 435 | int shrink_pages = sc->nr_to_scan; |
7dc19d5a | 436 | unsigned long freed = 0; |
6d5e4e32 | 437 | unsigned int nr_free_pool; |
1403b1a3 | 438 | |
71336e01 TH |
439 | if (!mutex_trylock(&lock)) |
440 | return SHRINK_STOP; | |
441 | pool_offset = ++start_pool % NUM_POOLS; | |
1403b1a3 PN |
442 | /* select start pool in round robin fashion */ |
443 | for (i = 0; i < NUM_POOLS; ++i) { | |
444 | unsigned nr_free = shrink_pages; | |
2bf257d6 RH |
445 | unsigned page_nr; |
446 | ||
1403b1a3 PN |
447 | if (shrink_pages == 0) |
448 | break; | |
6d5e4e32 | 449 | |
5870a4d9 | 450 | pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; |
2bf257d6 | 451 | page_nr = (1 << pool->order); |
881fdaa5 | 452 | /* OK to use static buffer since global mutex is held. */ |
2bf257d6 | 453 | nr_free_pool = roundup(nr_free, page_nr) >> pool->order; |
6d5e4e32 | 454 | shrink_pages = ttm_page_pool_free(pool, nr_free_pool, true); |
2bf257d6 RH |
455 | freed += (nr_free_pool - shrink_pages) << pool->order; |
456 | if (freed >= sc->nr_to_scan) | |
457 | break; | |
a782fc8c | 458 | shrink_pages <<= pool->order; |
1403b1a3 | 459 | } |
71336e01 | 460 | mutex_unlock(&lock); |
7dc19d5a DC |
461 | return freed; |
462 | } | |
463 | ||
464 | ||
465 | static unsigned long | |
466 | ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc) | |
467 | { | |
468 | unsigned i; | |
469 | unsigned long count = 0; | |
6d5e4e32 | 470 | struct ttm_page_pool *pool; |
7dc19d5a | 471 | |
6d5e4e32 RH |
472 | for (i = 0; i < NUM_POOLS; ++i) { |
473 | pool = &_manager->pools[i]; | |
474 | count += (pool->npages << pool->order); | |
475 | } | |
7dc19d5a DC |
476 | |
477 | return count; | |
1403b1a3 PN |
478 | } |
479 | ||
480 | static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager) | |
481 | { | |
7dc19d5a DC |
482 | manager->mm_shrink.count_objects = ttm_pool_shrink_count; |
483 | manager->mm_shrink.scan_objects = ttm_pool_shrink_scan; | |
1403b1a3 PN |
484 | manager->mm_shrink.seeks = 1; |
485 | register_shrinker(&manager->mm_shrink); | |
486 | } | |
487 | ||
488 | static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager) | |
489 | { | |
490 | unregister_shrinker(&manager->mm_shrink); | |
491 | } | |
492 | ||
493 | static int ttm_set_pages_caching(struct page **pages, | |
494 | enum ttm_caching_state cstate, unsigned cpages) | |
495 | { | |
496 | int r = 0; | |
497 | /* Set page caching */ | |
498 | switch (cstate) { | |
499 | case tt_uncached: | |
500 | r = set_pages_array_uc(pages, cpages); | |
501 | if (r) | |
25d0479a | 502 | pr_err("Failed to set %d pages to uc!\n", cpages); |
1403b1a3 PN |
503 | break; |
504 | case tt_wc: | |
505 | r = set_pages_array_wc(pages, cpages); | |
506 | if (r) | |
25d0479a | 507 | pr_err("Failed to set %d pages to wc!\n", cpages); |
1403b1a3 PN |
508 | break; |
509 | default: | |
510 | break; | |
511 | } | |
512 | return r; | |
513 | } | |
514 | ||
515 | /** | |
516 | * Free pages the pages that failed to change the caching state. If there is | |
517 | * any pages that have changed their caching state already put them to the | |
518 | * pool. | |
519 | */ | |
520 | static void ttm_handle_caching_state_failure(struct list_head *pages, | |
521 | int ttm_flags, enum ttm_caching_state cstate, | |
522 | struct page **failed_pages, unsigned cpages) | |
523 | { | |
524 | unsigned i; | |
4abe4389 | 525 | /* Failed pages have to be freed */ |
1403b1a3 PN |
526 | for (i = 0; i < cpages; ++i) { |
527 | list_del(&failed_pages[i]->lru); | |
528 | __free_page(failed_pages[i]); | |
529 | } | |
530 | } | |
531 | ||
532 | /** | |
533 | * Allocate new pages with correct caching. | |
534 | * | |
535 | * This function is reentrant if caller updates count depending on number of | |
536 | * pages returned in pages array. | |
537 | */ | |
0e57a3cc | 538 | static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, |
6ed4e2e6 CK |
539 | int ttm_flags, enum ttm_caching_state cstate, |
540 | unsigned count, unsigned order) | |
1403b1a3 PN |
541 | { |
542 | struct page **caching_array; | |
543 | struct page *p; | |
544 | int r = 0; | |
6ed4e2e6 CK |
545 | unsigned i, j, cpages; |
546 | unsigned npages = 1 << order; | |
13d3fc69 | 547 | unsigned max_cpages = min(count << order, (unsigned)NUM_PAGES_TO_ALLOC); |
1403b1a3 PN |
548 | |
549 | /* allocate array for page caching change */ | |
550 | caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); | |
551 | ||
552 | if (!caching_array) { | |
767601d1 | 553 | pr_debug("Unable to allocate table for new pages\n"); |
1403b1a3 PN |
554 | return -ENOMEM; |
555 | } | |
556 | ||
557 | for (i = 0, cpages = 0; i < count; ++i) { | |
6ed4e2e6 | 558 | p = alloc_pages(gfp_flags, order); |
1403b1a3 PN |
559 | |
560 | if (!p) { | |
767601d1 | 561 | pr_debug("Unable to get page %u\n", i); |
1403b1a3 PN |
562 | |
563 | /* store already allocated pages in the pool after | |
564 | * setting the caching state */ | |
565 | if (cpages) { | |
4abe4389 TH |
566 | r = ttm_set_pages_caching(caching_array, |
567 | cstate, cpages); | |
1403b1a3 PN |
568 | if (r) |
569 | ttm_handle_caching_state_failure(pages, | |
570 | ttm_flags, cstate, | |
571 | caching_array, cpages); | |
572 | } | |
573 | r = -ENOMEM; | |
574 | goto out; | |
575 | } | |
576 | ||
6ed4e2e6 CK |
577 | list_add(&p->lru, pages); |
578 | ||
1403b1a3 PN |
579 | #ifdef CONFIG_HIGHMEM |
580 | /* gfp flags of highmem page should never be dma32 so we | |
581 | * we should be fine in such case | |
582 | */ | |
6ed4e2e6 CK |
583 | if (PageHighMem(p)) |
584 | continue; | |
585 | ||
1403b1a3 | 586 | #endif |
6ed4e2e6 CK |
587 | for (j = 0; j < npages; ++j) { |
588 | caching_array[cpages++] = p++; | |
1403b1a3 PN |
589 | if (cpages == max_cpages) { |
590 | ||
591 | r = ttm_set_pages_caching(caching_array, | |
592 | cstate, cpages); | |
593 | if (r) { | |
594 | ttm_handle_caching_state_failure(pages, | |
595 | ttm_flags, cstate, | |
596 | caching_array, cpages); | |
597 | goto out; | |
598 | } | |
599 | cpages = 0; | |
600 | } | |
601 | } | |
1403b1a3 PN |
602 | } |
603 | ||
604 | if (cpages) { | |
605 | r = ttm_set_pages_caching(caching_array, cstate, cpages); | |
606 | if (r) | |
607 | ttm_handle_caching_state_failure(pages, | |
608 | ttm_flags, cstate, | |
609 | caching_array, cpages); | |
610 | } | |
611 | out: | |
612 | kfree(caching_array); | |
613 | ||
614 | return r; | |
615 | } | |
616 | ||
617 | /** | |
0d74f86f | 618 | * Fill the given pool if there aren't enough pages and the requested number of |
1403b1a3 PN |
619 | * pages is small. |
620 | */ | |
6ed4e2e6 CK |
621 | static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags, |
622 | enum ttm_caching_state cstate, | |
623 | unsigned count, unsigned long *irq_flags) | |
1403b1a3 PN |
624 | { |
625 | struct page *p; | |
626 | int r; | |
627 | unsigned cpages = 0; | |
628 | /** | |
629 | * Only allow one pool fill operation at a time. | |
630 | * If pool doesn't have enough pages for the allocation new pages are | |
631 | * allocated from outside of pool. | |
632 | */ | |
633 | if (pool->fill_lock) | |
634 | return; | |
635 | ||
636 | pool->fill_lock = true; | |
637 | ||
0d74f86f KRW |
638 | /* If allocation request is small and there are not enough |
639 | * pages in a pool we fill the pool up first. */ | |
5870a4d9 | 640 | if (count < _manager->options.small |
1403b1a3 PN |
641 | && count > pool->npages) { |
642 | struct list_head new_pages; | |
5870a4d9 | 643 | unsigned alloc_size = _manager->options.alloc_size; |
1403b1a3 PN |
644 | |
645 | /** | |
646 | * Can't change page caching if in irqsave context. We have to | |
647 | * drop the pool->lock. | |
648 | */ | |
649 | spin_unlock_irqrestore(&pool->lock, *irq_flags); | |
650 | ||
651 | INIT_LIST_HEAD(&new_pages); | |
652 | r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags, | |
6ed4e2e6 | 653 | cstate, alloc_size, 0); |
1403b1a3 PN |
654 | spin_lock_irqsave(&pool->lock, *irq_flags); |
655 | ||
656 | if (!r) { | |
657 | list_splice(&new_pages, &pool->list); | |
07458661 | 658 | ++pool->nrefills; |
1403b1a3 PN |
659 | pool->npages += alloc_size; |
660 | } else { | |
767601d1 | 661 | pr_debug("Failed to fill pool (%p)\n", pool); |
1403b1a3 | 662 | /* If we have any pages left put them to the pool. */ |
9afae271 | 663 | list_for_each_entry(p, &new_pages, lru) { |
1403b1a3 PN |
664 | ++cpages; |
665 | } | |
666 | list_splice(&new_pages, &pool->list); | |
667 | pool->npages += cpages; | |
668 | } | |
669 | ||
670 | } | |
671 | pool->fill_lock = false; | |
672 | } | |
673 | ||
674 | /** | |
8593e9b8 | 675 | * Allocate pages from the pool and put them on the return list. |
1403b1a3 | 676 | * |
8593e9b8 | 677 | * @return zero for success or negative error code. |
1403b1a3 | 678 | */ |
8593e9b8 CK |
679 | static int ttm_page_pool_get_pages(struct ttm_page_pool *pool, |
680 | struct list_head *pages, | |
681 | int ttm_flags, | |
682 | enum ttm_caching_state cstate, | |
6ed4e2e6 | 683 | unsigned count, unsigned order) |
1403b1a3 PN |
684 | { |
685 | unsigned long irq_flags; | |
686 | struct list_head *p; | |
687 | unsigned i; | |
8593e9b8 | 688 | int r = 0; |
1403b1a3 PN |
689 | |
690 | spin_lock_irqsave(&pool->lock, irq_flags); | |
6ed4e2e6 CK |
691 | if (!order) |
692 | ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, | |
693 | &irq_flags); | |
1403b1a3 PN |
694 | |
695 | if (count >= pool->npages) { | |
696 | /* take all pages from the pool */ | |
697 | list_splice_init(&pool->list, pages); | |
698 | count -= pool->npages; | |
699 | pool->npages = 0; | |
700 | goto out; | |
701 | } | |
702 | /* find the last pages to include for requested number of pages. Split | |
0d74f86f | 703 | * pool to begin and halve it to reduce search space. */ |
1403b1a3 PN |
704 | if (count <= pool->npages/2) { |
705 | i = 0; | |
706 | list_for_each(p, &pool->list) { | |
707 | if (++i == count) | |
708 | break; | |
709 | } | |
710 | } else { | |
711 | i = pool->npages + 1; | |
712 | list_for_each_prev(p, &pool->list) { | |
713 | if (--i == count) | |
714 | break; | |
715 | } | |
716 | } | |
0d74f86f | 717 | /* Cut 'count' number of pages from the pool */ |
1403b1a3 PN |
718 | list_cut_position(pages, &pool->list, p); |
719 | pool->npages -= count; | |
720 | count = 0; | |
721 | out: | |
722 | spin_unlock_irqrestore(&pool->lock, irq_flags); | |
8593e9b8 CK |
723 | |
724 | /* clear the pages coming from the pool if requested */ | |
725 | if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) { | |
726 | struct page *page; | |
727 | ||
728 | list_for_each_entry(page, pages, lru) { | |
729 | if (PageHighMem(page)) | |
730 | clear_highpage(page); | |
731 | else | |
732 | clear_page(page_address(page)); | |
733 | } | |
734 | } | |
735 | ||
736 | /* If pool didn't have enough pages allocate new one. */ | |
737 | if (count) { | |
738 | gfp_t gfp_flags = pool->gfp_flags; | |
739 | ||
740 | /* set zero flag for page allocation if required */ | |
741 | if (ttm_flags & TTM_PAGE_FLAG_ZERO_ALLOC) | |
742 | gfp_flags |= __GFP_ZERO; | |
743 | ||
744 | /* ttm_alloc_new_pages doesn't reference pool so we can run | |
745 | * multiple requests in parallel. | |
746 | **/ | |
747 | r = ttm_alloc_new_pages(pages, gfp_flags, ttm_flags, cstate, | |
6ed4e2e6 | 748 | count, order); |
8593e9b8 CK |
749 | } |
750 | ||
751 | return r; | |
1403b1a3 PN |
752 | } |
753 | ||
8e7e7052 JG |
754 | /* Put all pages in pages list to correct pool to wait for reuse */ |
755 | static void ttm_put_pages(struct page **pages, unsigned npages, int flags, | |
756 | enum ttm_caching_state cstate) | |
757 | { | |
6ed4e2e6 | 758 | struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate); |
7d0a4282 | 759 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
6ed4e2e6 | 760 | struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate); |
7d0a4282 | 761 | #endif |
8e7e7052 | 762 | unsigned long irq_flags; |
8e7e7052 JG |
763 | unsigned i; |
764 | ||
765 | if (pool == NULL) { | |
766 | /* No pool for this memory type so free the pages */ | |
0284f1ea CK |
767 | i = 0; |
768 | while (i < npages) { | |
5c42c64f CK |
769 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
770 | struct page *p = pages[i]; | |
771 | #endif | |
772 | unsigned order = 0, j; | |
0284f1ea CK |
773 | |
774 | if (!pages[i]) { | |
775 | ++i; | |
776 | continue; | |
777 | } | |
778 | ||
5c42c64f | 779 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
33d22c2e DA |
780 | if (!(flags & TTM_PAGE_FLAG_DMA32)) { |
781 | for (j = 0; j < HPAGE_PMD_NR; ++j) | |
782 | if (p++ != pages[i + j]) | |
783 | break; | |
5c42c64f | 784 | |
33d22c2e DA |
785 | if (j == HPAGE_PMD_NR) |
786 | order = HPAGE_PMD_ORDER; | |
787 | } | |
5c42c64f CK |
788 | #endif |
789 | ||
0284f1ea CK |
790 | if (page_count(pages[i]) != 1) |
791 | pr_err("Erroneous page count. Leaking pages.\n"); | |
0284f1ea CK |
792 | __free_pages(pages[i], order); |
793 | ||
5c42c64f CK |
794 | j = 1 << order; |
795 | while (j) { | |
0284f1ea | 796 | pages[i++] = NULL; |
5c42c64f | 797 | --j; |
8e7e7052 JG |
798 | } |
799 | } | |
800 | return; | |
801 | } | |
802 | ||
6ed4e2e6 CK |
803 | i = 0; |
804 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
805 | if (huge) { | |
806 | unsigned max_size, n2free; | |
807 | ||
808 | spin_lock_irqsave(&huge->lock, irq_flags); | |
809 | while (i < npages) { | |
810 | struct page *p = pages[i]; | |
811 | unsigned j; | |
812 | ||
813 | if (!p) | |
814 | break; | |
815 | ||
816 | for (j = 0; j < HPAGE_PMD_NR; ++j) | |
817 | if (p++ != pages[i + j]) | |
818 | break; | |
819 | ||
820 | if (j != HPAGE_PMD_NR) | |
821 | break; | |
822 | ||
823 | list_add_tail(&pages[i]->lru, &huge->list); | |
824 | ||
825 | for (j = 0; j < HPAGE_PMD_NR; ++j) | |
826 | pages[i++] = NULL; | |
827 | huge->npages++; | |
828 | } | |
829 | ||
830 | /* Check that we don't go over the pool limit */ | |
831 | max_size = _manager->options.max_size; | |
832 | max_size /= HPAGE_PMD_NR; | |
833 | if (huge->npages > max_size) | |
834 | n2free = huge->npages - max_size; | |
835 | else | |
836 | n2free = 0; | |
837 | spin_unlock_irqrestore(&huge->lock, irq_flags); | |
838 | if (n2free) | |
839 | ttm_page_pool_free(huge, n2free, false); | |
840 | } | |
841 | #endif | |
842 | ||
8e7e7052 | 843 | spin_lock_irqsave(&pool->lock, irq_flags); |
6ed4e2e6 | 844 | while (i < npages) { |
8e7e7052 JG |
845 | if (pages[i]) { |
846 | if (page_count(pages[i]) != 1) | |
25d0479a | 847 | pr_err("Erroneous page count. Leaking pages.\n"); |
8e7e7052 JG |
848 | list_add_tail(&pages[i]->lru, &pool->list); |
849 | pages[i] = NULL; | |
850 | pool->npages++; | |
851 | } | |
6ed4e2e6 | 852 | ++i; |
8e7e7052 JG |
853 | } |
854 | /* Check that we don't go over the pool limit */ | |
855 | npages = 0; | |
856 | if (pool->npages > _manager->options.max_size) { | |
857 | npages = pool->npages - _manager->options.max_size; | |
858 | /* free at least NUM_PAGES_TO_ALLOC number of pages | |
859 | * to reduce calls to set_memory_wb */ | |
860 | if (npages < NUM_PAGES_TO_ALLOC) | |
861 | npages = NUM_PAGES_TO_ALLOC; | |
862 | } | |
863 | spin_unlock_irqrestore(&pool->lock, irq_flags); | |
864 | if (npages) | |
881fdaa5 | 865 | ttm_page_pool_free(pool, npages, false); |
8e7e7052 JG |
866 | } |
867 | ||
1403b1a3 PN |
868 | /* |
869 | * On success pages list will hold count number of correctly | |
870 | * cached pages. | |
871 | */ | |
8e7e7052 JG |
872 | static int ttm_get_pages(struct page **pages, unsigned npages, int flags, |
873 | enum ttm_caching_state cstate) | |
1403b1a3 | 874 | { |
6ed4e2e6 | 875 | struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate); |
7d0a4282 | 876 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
6ed4e2e6 | 877 | struct ttm_page_pool *huge = ttm_get_pool(flags, true, cstate); |
7d0a4282 | 878 | #endif |
822c4d9a | 879 | struct list_head plist; |
1403b1a3 | 880 | struct page *p = NULL; |
5ff7ec02 | 881 | unsigned count, first; |
1403b1a3 PN |
882 | int r; |
883 | ||
1403b1a3 PN |
884 | /* No pool for cached pages */ |
885 | if (pool == NULL) { | |
8593e9b8 | 886 | gfp_t gfp_flags = GFP_USER; |
7d0a4282 TSD |
887 | unsigned i; |
888 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
889 | unsigned j; | |
890 | #endif | |
0284f1ea | 891 | |
8593e9b8 CK |
892 | /* set zero flag for page allocation if required */ |
893 | if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) | |
894 | gfp_flags |= __GFP_ZERO; | |
895 | ||
1403b1a3 PN |
896 | if (flags & TTM_PAGE_FLAG_DMA32) |
897 | gfp_flags |= GFP_DMA32; | |
898 | else | |
e8613c0e | 899 | gfp_flags |= GFP_HIGHUSER; |
1403b1a3 | 900 | |
0284f1ea CK |
901 | i = 0; |
902 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
33d22c2e DA |
903 | if (!(gfp_flags & GFP_DMA32)) { |
904 | while (npages >= HPAGE_PMD_NR) { | |
905 | gfp_t huge_flags = gfp_flags; | |
0284f1ea | 906 | |
33d22c2e DA |
907 | huge_flags |= GFP_TRANSHUGE; |
908 | huge_flags &= ~__GFP_MOVABLE; | |
909 | huge_flags &= ~__GFP_COMP; | |
910 | p = alloc_pages(huge_flags, HPAGE_PMD_ORDER); | |
911 | if (!p) | |
912 | break; | |
0284f1ea | 913 | |
33d22c2e DA |
914 | for (j = 0; j < HPAGE_PMD_NR; ++j) |
915 | pages[i++] = p++; | |
0284f1ea | 916 | |
33d22c2e DA |
917 | npages -= HPAGE_PMD_NR; |
918 | } | |
0284f1ea CK |
919 | } |
920 | #endif | |
921 | ||
5ff7ec02 | 922 | first = i; |
0284f1ea | 923 | while (npages) { |
d87dfdbf | 924 | p = alloc_page(gfp_flags); |
1403b1a3 | 925 | if (!p) { |
767601d1 | 926 | pr_debug("Unable to allocate page\n"); |
1403b1a3 PN |
927 | return -ENOMEM; |
928 | } | |
d87dfdbf | 929 | |
5ff7ec02 CK |
930 | /* Swap the pages if we detect consecutive order */ |
931 | if (i > first && pages[i - 1] == p - 1) | |
932 | swap(p, pages[i - 1]); | |
933 | ||
0284f1ea CK |
934 | pages[i++] = p; |
935 | --npages; | |
1403b1a3 PN |
936 | } |
937 | return 0; | |
938 | } | |
939 | ||
6ed4e2e6 CK |
940 | count = 0; |
941 | ||
942 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | |
943 | if (huge && npages >= HPAGE_PMD_NR) { | |
944 | INIT_LIST_HEAD(&plist); | |
945 | ttm_page_pool_get_pages(huge, &plist, flags, cstate, | |
946 | npages / HPAGE_PMD_NR, | |
947 | HPAGE_PMD_ORDER); | |
948 | ||
949 | list_for_each_entry(p, &plist, lru) { | |
950 | unsigned j; | |
951 | ||
952 | for (j = 0; j < HPAGE_PMD_NR; ++j) | |
953 | pages[count++] = &p[j]; | |
954 | } | |
955 | } | |
956 | #endif | |
957 | ||
822c4d9a | 958 | INIT_LIST_HEAD(&plist); |
6ed4e2e6 CK |
959 | r = ttm_page_pool_get_pages(pool, &plist, flags, cstate, |
960 | npages - count, 0); | |
8593e9b8 | 961 | |
e60bb46b CK |
962 | first = count; |
963 | list_for_each_entry(p, &plist, lru) { | |
964 | struct page *tmp = p; | |
965 | ||
966 | /* Swap the pages if we detect consecutive order */ | |
967 | if (count > first && pages[count - 1] == tmp - 1) | |
968 | swap(tmp, pages[count - 1]); | |
969 | pages[count++] = tmp; | |
970 | } | |
1403b1a3 | 971 | |
8593e9b8 CK |
972 | if (r) { |
973 | /* If there is any pages in the list put them back to | |
974 | * the pool. | |
975 | */ | |
767601d1 | 976 | pr_debug("Failed to allocate extra pages for large request\n"); |
8593e9b8 CK |
977 | ttm_put_pages(pages, count, flags, cstate); |
978 | return r; | |
1403b1a3 PN |
979 | } |
980 | ||
1403b1a3 PN |
981 | return 0; |
982 | } | |
983 | ||
3b9c214a | 984 | static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags, |
6d5e4e32 | 985 | char *name, unsigned int order) |
1403b1a3 PN |
986 | { |
987 | spin_lock_init(&pool->lock); | |
988 | pool->fill_lock = false; | |
989 | INIT_LIST_HEAD(&pool->list); | |
07458661 | 990 | pool->npages = pool->nfrees = 0; |
1403b1a3 | 991 | pool->gfp_flags = flags; |
07458661 | 992 | pool->name = name; |
6d5e4e32 | 993 | pool->order = order; |
1403b1a3 PN |
994 | } |
995 | ||
c96af79e | 996 | int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) |
1403b1a3 | 997 | { |
c96af79e | 998 | int ret; |
6d5e4e32 RH |
999 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
1000 | unsigned order = HPAGE_PMD_ORDER; | |
1001 | #else | |
1002 | unsigned order = 0; | |
1003 | #endif | |
5870a4d9 FJ |
1004 | |
1005 | WARN_ON(_manager); | |
1403b1a3 | 1006 | |
25d0479a | 1007 | pr_info("Initializing pool allocator\n"); |
1403b1a3 | 1008 | |
5870a4d9 | 1009 | _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); |
19d859a7 XS |
1010 | if (!_manager) |
1011 | return -ENOMEM; | |
1403b1a3 | 1012 | |
6d5e4e32 | 1013 | ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc", 0); |
1403b1a3 | 1014 | |
6d5e4e32 | 1015 | ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc", 0); |
1403b1a3 | 1016 | |
5870a4d9 | 1017 | ttm_page_pool_init_locked(&_manager->wc_pool_dma32, |
6d5e4e32 | 1018 | GFP_USER | GFP_DMA32, "wc dma", 0); |
1403b1a3 | 1019 | |
5870a4d9 | 1020 | ttm_page_pool_init_locked(&_manager->uc_pool_dma32, |
6d5e4e32 | 1021 | GFP_USER | GFP_DMA32, "uc dma", 0); |
1403b1a3 | 1022 | |
6ed4e2e6 CK |
1023 | ttm_page_pool_init_locked(&_manager->wc_pool_huge, |
1024 | GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP), | |
6d5e4e32 | 1025 | "wc huge", order); |
6ed4e2e6 CK |
1026 | |
1027 | ttm_page_pool_init_locked(&_manager->uc_pool_huge, | |
1028 | GFP_TRANSHUGE & ~(__GFP_MOVABLE | __GFP_COMP) | |
6d5e4e32 | 1029 | , "uc huge", order); |
6ed4e2e6 | 1030 | |
5870a4d9 FJ |
1031 | _manager->options.max_size = max_pages; |
1032 | _manager->options.small = SMALL_ALLOCATION; | |
1033 | _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; | |
1034 | ||
1035 | ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type, | |
1036 | &glob->kobj, "pool"); | |
c96af79e | 1037 | if (unlikely(ret != 0)) { |
5870a4d9 FJ |
1038 | kobject_put(&_manager->kobj); |
1039 | _manager = NULL; | |
c96af79e PN |
1040 | return ret; |
1041 | } | |
1042 | ||
5870a4d9 | 1043 | ttm_pool_mm_shrink_init(_manager); |
1403b1a3 PN |
1044 | |
1045 | return 0; | |
1046 | } | |
1047 | ||
0e57a3cc | 1048 | void ttm_page_alloc_fini(void) |
1403b1a3 PN |
1049 | { |
1050 | int i; | |
1051 | ||
25d0479a | 1052 | pr_info("Finalizing pool allocator\n"); |
5870a4d9 | 1053 | ttm_pool_mm_shrink_fini(_manager); |
1403b1a3 | 1054 | |
881fdaa5 | 1055 | /* OK to use static buffer since global mutex is no longer used. */ |
1403b1a3 | 1056 | for (i = 0; i < NUM_POOLS; ++i) |
881fdaa5 | 1057 | ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true); |
c96af79e | 1058 | |
5870a4d9 FJ |
1059 | kobject_put(&_manager->kobj); |
1060 | _manager = NULL; | |
1403b1a3 | 1061 | } |
07458661 | 1062 | |
b1e5f172 JG |
1063 | int ttm_pool_populate(struct ttm_tt *ttm) |
1064 | { | |
1065 | struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; | |
1066 | unsigned i; | |
1067 | int ret; | |
1068 | ||
1069 | if (ttm->state != tt_unpopulated) | |
1070 | return 0; | |
1071 | ||
c6e839a3 CK |
1072 | ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags, |
1073 | ttm->caching_state); | |
1074 | if (unlikely(ret != 0)) { | |
1075 | ttm_pool_unpopulate(ttm); | |
1076 | return ret; | |
1077 | } | |
b1e5f172 | 1078 | |
c6e839a3 | 1079 | for (i = 0; i < ttm->num_pages; ++i) { |
d188bfa5 CK |
1080 | ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], |
1081 | PAGE_SIZE); | |
b1e5f172 JG |
1082 | if (unlikely(ret != 0)) { |
1083 | ttm_pool_unpopulate(ttm); | |
1084 | return -ENOMEM; | |
1085 | } | |
1086 | } | |
1087 | ||
1088 | if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { | |
1089 | ret = ttm_tt_swapin(ttm); | |
1090 | if (unlikely(ret != 0)) { | |
1091 | ttm_pool_unpopulate(ttm); | |
1092 | return ret; | |
1093 | } | |
1094 | } | |
1095 | ||
1096 | ttm->state = tt_unbound; | |
1097 | return 0; | |
1098 | } | |
1099 | EXPORT_SYMBOL(ttm_pool_populate); | |
1100 | ||
1101 | void ttm_pool_unpopulate(struct ttm_tt *ttm) | |
1102 | { | |
1103 | unsigned i; | |
1104 | ||
1105 | for (i = 0; i < ttm->num_pages; ++i) { | |
c6e839a3 CK |
1106 | if (!ttm->pages[i]) |
1107 | continue; | |
1108 | ||
1109 | ttm_mem_global_free_page(ttm->glob->mem_glob, ttm->pages[i], | |
1110 | PAGE_SIZE); | |
b1e5f172 | 1111 | } |
c6e839a3 CK |
1112 | ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags, |
1113 | ttm->caching_state); | |
b1e5f172 JG |
1114 | ttm->state = tt_unpopulated; |
1115 | } | |
1116 | EXPORT_SYMBOL(ttm_pool_unpopulate); | |
1117 | ||
a4dec819 TSD |
1118 | int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt) |
1119 | { | |
6056a1a5 | 1120 | unsigned i, j; |
a4dec819 TSD |
1121 | int r; |
1122 | ||
1123 | r = ttm_pool_populate(&tt->ttm); | |
1124 | if (r) | |
1125 | return r; | |
1126 | ||
6056a1a5 CK |
1127 | for (i = 0; i < tt->ttm.num_pages; ++i) { |
1128 | struct page *p = tt->ttm.pages[i]; | |
1129 | size_t num_pages = 1; | |
1130 | ||
1131 | for (j = i + 1; j < tt->ttm.num_pages; ++j) { | |
1132 | if (++p != tt->ttm.pages[j]) | |
1133 | break; | |
1134 | ||
1135 | ++num_pages; | |
1136 | } | |
1137 | ||
a4dec819 | 1138 | tt->dma_address[i] = dma_map_page(dev, tt->ttm.pages[i], |
6056a1a5 | 1139 | 0, num_pages * PAGE_SIZE, |
a4dec819 TSD |
1140 | DMA_BIDIRECTIONAL); |
1141 | if (dma_mapping_error(dev, tt->dma_address[i])) { | |
1142 | while (i--) { | |
1143 | dma_unmap_page(dev, tt->dma_address[i], | |
1144 | PAGE_SIZE, DMA_BIDIRECTIONAL); | |
1145 | tt->dma_address[i] = 0; | |
1146 | } | |
1147 | ttm_pool_unpopulate(&tt->ttm); | |
1148 | return -EFAULT; | |
1149 | } | |
6056a1a5 CK |
1150 | |
1151 | for (j = 1; j < num_pages; ++j) { | |
1152 | tt->dma_address[i + 1] = tt->dma_address[i] + PAGE_SIZE; | |
1153 | ++i; | |
1154 | } | |
a4dec819 TSD |
1155 | } |
1156 | return 0; | |
1157 | } | |
1158 | EXPORT_SYMBOL(ttm_populate_and_map_pages); | |
1159 | ||
1160 | void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt) | |
1161 | { | |
6056a1a5 CK |
1162 | unsigned i, j; |
1163 | ||
1164 | for (i = 0; i < tt->ttm.num_pages;) { | |
1165 | struct page *p = tt->ttm.pages[i]; | |
1166 | size_t num_pages = 1; | |
1167 | ||
1168 | if (!tt->dma_address[i] || !tt->ttm.pages[i]) { | |
1169 | ++i; | |
1170 | continue; | |
a4dec819 | 1171 | } |
6056a1a5 CK |
1172 | |
1173 | for (j = i + 1; j < tt->ttm.num_pages; ++j) { | |
1174 | if (++p != tt->ttm.pages[j]) | |
1175 | break; | |
1176 | ||
1177 | ++num_pages; | |
1178 | } | |
1179 | ||
1180 | dma_unmap_page(dev, tt->dma_address[i], num_pages * PAGE_SIZE, | |
1181 | DMA_BIDIRECTIONAL); | |
1182 | ||
1183 | i += num_pages; | |
a4dec819 TSD |
1184 | } |
1185 | ttm_pool_unpopulate(&tt->ttm); | |
1186 | } | |
1187 | EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages); | |
1188 | ||
07458661 PN |
1189 | int ttm_page_alloc_debugfs(struct seq_file *m, void *data) |
1190 | { | |
1191 | struct ttm_page_pool *p; | |
1192 | unsigned i; | |
1193 | char *h[] = {"pool", "refills", "pages freed", "size"}; | |
5870a4d9 | 1194 | if (!_manager) { |
07458661 PN |
1195 | seq_printf(m, "No pool allocator running.\n"); |
1196 | return 0; | |
1197 | } | |
6ed4e2e6 | 1198 | seq_printf(m, "%7s %12s %13s %8s\n", |
07458661 PN |
1199 | h[0], h[1], h[2], h[3]); |
1200 | for (i = 0; i < NUM_POOLS; ++i) { | |
5870a4d9 | 1201 | p = &_manager->pools[i]; |
07458661 | 1202 | |
6ed4e2e6 | 1203 | seq_printf(m, "%7s %12ld %13ld %8d\n", |
07458661 PN |
1204 | p->name, p->nrefills, |
1205 | p->nfrees, p->npages); | |
1206 | } | |
1207 | return 0; | |
1208 | } | |
1209 | EXPORT_SYMBOL(ttm_page_alloc_debugfs); |