1 /**************************************************************************
3 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
31 #include <linux/vmalloc.h>
32 #include <linux/sched.h>
33 #include <linux/highmem.h>
34 #include <linux/pagemap.h>
35 #include <linux/file.h>
36 #include <linux/swap.h>
37 #include "ttm/ttm_module.h"
38 #include "ttm/ttm_bo_driver.h"
39 #include "ttm/ttm_placement.h"
41 static int ttm_tt_swapin(struct ttm_tt
*ttm
);
43 #if defined(CONFIG_X86)
44 static void ttm_tt_clflush_page(struct page
*page
)
46 uint8_t *page_virtual
;
49 if (unlikely(page
== NULL
))
52 page_virtual
= kmap_atomic(page
, KM_USER0
);
54 for (i
= 0; i
< PAGE_SIZE
; i
+= boot_cpu_data
.x86_clflush_size
)
55 clflush(page_virtual
+ i
);
57 kunmap_atomic(page_virtual
, KM_USER0
);
60 static void ttm_tt_cache_flush_clflush(struct page
*pages
[],
61 unsigned long num_pages
)
66 for (i
= 0; i
< num_pages
; ++i
)
67 ttm_tt_clflush_page(*pages
++);
70 #elif !defined(__powerpc__)
71 static void ttm_tt_ipi_handler(void *null
)
77 void ttm_tt_cache_flush(struct page
*pages
[], unsigned long num_pages
)
80 #if defined(CONFIG_X86)
81 if (cpu_has_clflush
) {
82 ttm_tt_cache_flush_clflush(pages
, num_pages
);
85 #elif defined(__powerpc__)
88 for (i
= 0; i
< num_pages
; ++i
) {
89 struct page
*page
= pages
[i
];
92 if (unlikely(page
== NULL
))
95 page_virtual
= kmap_atomic(page
, KM_USER0
);
96 flush_dcache_range((unsigned long) page_virtual
,
97 (unsigned long) page_virtual
+ PAGE_SIZE
);
98 kunmap_atomic(page_virtual
, KM_USER0
);
101 if (on_each_cpu(ttm_tt_ipi_handler
, NULL
, 1) != 0)
102 printk(KERN_ERR TTM_PFX
103 "Timed out waiting for drm cache flush.\n");
108 * Allocates storage for pointers to the pages that back the ttm.
110 * Uses kmalloc if possible. Otherwise falls back to vmalloc.
112 static void ttm_tt_alloc_page_directory(struct ttm_tt
*ttm
)
114 unsigned long size
= ttm
->num_pages
* sizeof(*ttm
->pages
);
117 if (size
<= PAGE_SIZE
)
118 ttm
->pages
= kzalloc(size
, GFP_KERNEL
);
121 ttm
->pages
= vmalloc_user(size
);
123 ttm
->page_flags
|= TTM_PAGE_FLAG_VMALLOC
;
127 static void ttm_tt_free_page_directory(struct ttm_tt
*ttm
)
129 if (ttm
->page_flags
& TTM_PAGE_FLAG_VMALLOC
) {
131 ttm
->page_flags
&= ~TTM_PAGE_FLAG_VMALLOC
;
138 static struct page
*ttm_tt_alloc_page(unsigned page_flags
)
140 gfp_t gfp_flags
= GFP_USER
;
142 if (page_flags
& TTM_PAGE_FLAG_ZERO_ALLOC
)
143 gfp_flags
|= __GFP_ZERO
;
145 if (page_flags
& TTM_PAGE_FLAG_DMA32
)
146 gfp_flags
|= __GFP_DMA32
;
148 gfp_flags
|= __GFP_HIGHMEM
;
150 return alloc_page(gfp_flags
);
153 static void ttm_tt_free_user_pages(struct ttm_tt
*ttm
)
159 struct ttm_backend
*be
= ttm
->be
;
161 BUG_ON(!(ttm
->page_flags
& TTM_PAGE_FLAG_USER
));
162 write
= ((ttm
->page_flags
& TTM_PAGE_FLAG_WRITE
) != 0);
163 dirty
= ((ttm
->page_flags
& TTM_PAGE_FLAG_USER_DIRTY
) != 0);
168 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
169 page
= ttm
->pages
[i
];
173 if (page
== ttm
->dummy_read_page
) {
178 if (write
&& dirty
&& !PageReserved(page
))
179 set_page_dirty_lock(page
);
181 ttm
->pages
[i
] = NULL
;
182 ttm_mem_global_free(ttm
->glob
->mem_glob
, PAGE_SIZE
);
185 ttm
->state
= tt_unpopulated
;
186 ttm
->first_himem_page
= ttm
->num_pages
;
187 ttm
->last_lomem_page
= -1;
190 static struct page
*__ttm_tt_get_page(struct ttm_tt
*ttm
, int index
)
193 struct ttm_mem_global
*mem_glob
= ttm
->glob
->mem_glob
;
196 while (NULL
== (p
= ttm
->pages
[index
])) {
197 p
= ttm_tt_alloc_page(ttm
->page_flags
);
202 ret
= ttm_mem_global_alloc_page(mem_glob
, p
, false, false);
203 if (unlikely(ret
!= 0))
207 ttm
->pages
[--ttm
->first_himem_page
] = p
;
209 ttm
->pages
[++ttm
->last_lomem_page
] = p
;
217 struct page
*ttm_tt_get_page(struct ttm_tt
*ttm
, int index
)
221 if (unlikely(ttm
->page_flags
& TTM_PAGE_FLAG_SWAPPED
)) {
222 ret
= ttm_tt_swapin(ttm
);
223 if (unlikely(ret
!= 0))
226 return __ttm_tt_get_page(ttm
, index
);
229 int ttm_tt_populate(struct ttm_tt
*ttm
)
233 struct ttm_backend
*be
;
236 if (ttm
->state
!= tt_unpopulated
)
239 if (unlikely(ttm
->page_flags
& TTM_PAGE_FLAG_SWAPPED
)) {
240 ret
= ttm_tt_swapin(ttm
);
241 if (unlikely(ret
!= 0))
247 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
248 page
= __ttm_tt_get_page(ttm
, i
);
253 be
->func
->populate(be
, ttm
->num_pages
, ttm
->pages
,
254 ttm
->dummy_read_page
);
255 ttm
->state
= tt_unbound
;
260 static inline int ttm_tt_set_page_caching(struct page
*p
,
261 enum ttm_caching_state c_state
)
268 return set_pages_wb(p
, 1);
270 return set_memory_wc((unsigned long) page_address(p
), 1);
272 return set_pages_uc(p
, 1);
275 #else /* CONFIG_X86 */
276 static inline int ttm_tt_set_page_caching(struct page
*p
,
277 enum ttm_caching_state c_state
)
281 #endif /* CONFIG_X86 */
284 * Change caching policy for the linear kernel map
285 * for range of pages in a ttm.
288 static int ttm_tt_set_caching(struct ttm_tt
*ttm
,
289 enum ttm_caching_state c_state
)
292 struct page
*cur_page
;
295 if (ttm
->caching_state
== c_state
)
298 if (c_state
!= tt_cached
) {
299 ret
= ttm_tt_populate(ttm
);
300 if (unlikely(ret
!= 0))
304 if (ttm
->caching_state
== tt_cached
)
305 ttm_tt_cache_flush(ttm
->pages
, ttm
->num_pages
);
307 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
308 cur_page
= ttm
->pages
[i
];
309 if (likely(cur_page
!= NULL
)) {
310 ret
= ttm_tt_set_page_caching(cur_page
, c_state
);
311 if (unlikely(ret
!= 0))
316 ttm
->caching_state
= c_state
;
321 for (j
= 0; j
< i
; ++j
) {
322 cur_page
= ttm
->pages
[j
];
323 if (likely(cur_page
!= NULL
)) {
324 (void)ttm_tt_set_page_caching(cur_page
,
332 int ttm_tt_set_placement_caching(struct ttm_tt
*ttm
, uint32_t placement
)
334 enum ttm_caching_state state
;
336 if (placement
& TTM_PL_FLAG_WC
)
338 else if (placement
& TTM_PL_FLAG_UNCACHED
)
343 return ttm_tt_set_caching(ttm
, state
);
346 static void ttm_tt_free_alloced_pages(struct ttm_tt
*ttm
)
349 struct page
*cur_page
;
350 struct ttm_backend
*be
= ttm
->be
;
354 (void)ttm_tt_set_caching(ttm
, tt_cached
);
355 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
356 cur_page
= ttm
->pages
[i
];
357 ttm
->pages
[i
] = NULL
;
359 if (page_count(cur_page
) != 1)
360 printk(KERN_ERR TTM_PFX
361 "Erroneous page count. "
363 ttm_mem_global_free_page(ttm
->glob
->mem_glob
,
365 __free_page(cur_page
);
368 ttm
->state
= tt_unpopulated
;
369 ttm
->first_himem_page
= ttm
->num_pages
;
370 ttm
->last_lomem_page
= -1;
373 void ttm_tt_destroy(struct ttm_tt
*ttm
)
375 struct ttm_backend
*be
;
377 if (unlikely(ttm
== NULL
))
381 if (likely(be
!= NULL
)) {
382 be
->func
->destroy(be
);
386 if (likely(ttm
->pages
!= NULL
)) {
387 if (ttm
->page_flags
& TTM_PAGE_FLAG_USER
)
388 ttm_tt_free_user_pages(ttm
);
390 ttm_tt_free_alloced_pages(ttm
);
392 ttm_tt_free_page_directory(ttm
);
395 if (!(ttm
->page_flags
& TTM_PAGE_FLAG_PERSISTANT_SWAP
) &&
397 fput(ttm
->swap_storage
);
402 int ttm_tt_set_user(struct ttm_tt
*ttm
,
403 struct task_struct
*tsk
,
404 unsigned long start
, unsigned long num_pages
)
406 struct mm_struct
*mm
= tsk
->mm
;
408 int write
= (ttm
->page_flags
& TTM_PAGE_FLAG_WRITE
) != 0;
409 struct ttm_mem_global
*mem_glob
= ttm
->glob
->mem_glob
;
411 BUG_ON(num_pages
!= ttm
->num_pages
);
412 BUG_ON((ttm
->page_flags
& TTM_PAGE_FLAG_USER
) == 0);
415 * Account user pages as lowmem pages for now.
418 ret
= ttm_mem_global_alloc(mem_glob
, num_pages
* PAGE_SIZE
,
420 if (unlikely(ret
!= 0))
423 down_read(&mm
->mmap_sem
);
424 ret
= get_user_pages(tsk
, mm
, start
, num_pages
,
425 write
, 0, ttm
->pages
, NULL
);
426 up_read(&mm
->mmap_sem
);
428 if (ret
!= num_pages
&& write
) {
429 ttm_tt_free_user_pages(ttm
);
430 ttm_mem_global_free(mem_glob
, num_pages
* PAGE_SIZE
);
436 ttm
->state
= tt_unbound
;
441 struct ttm_tt
*ttm_tt_create(struct ttm_bo_device
*bdev
, unsigned long size
,
442 uint32_t page_flags
, struct page
*dummy_read_page
)
444 struct ttm_bo_driver
*bo_driver
= bdev
->driver
;
450 ttm
= kzalloc(sizeof(*ttm
), GFP_KERNEL
);
454 ttm
->glob
= bdev
->glob
;
455 ttm
->num_pages
= (size
+ PAGE_SIZE
- 1) >> PAGE_SHIFT
;
456 ttm
->first_himem_page
= ttm
->num_pages
;
457 ttm
->last_lomem_page
= -1;
458 ttm
->caching_state
= tt_cached
;
459 ttm
->page_flags
= page_flags
;
461 ttm
->dummy_read_page
= dummy_read_page
;
463 ttm_tt_alloc_page_directory(ttm
);
466 printk(KERN_ERR TTM_PFX
"Failed allocating page table\n");
469 ttm
->be
= bo_driver
->create_ttm_backend_entry(bdev
);
472 printk(KERN_ERR TTM_PFX
"Failed creating ttm backend entry\n");
475 ttm
->state
= tt_unpopulated
;
479 void ttm_tt_unbind(struct ttm_tt
*ttm
)
482 struct ttm_backend
*be
= ttm
->be
;
484 if (ttm
->state
== tt_bound
) {
485 ret
= be
->func
->unbind(be
);
487 ttm
->state
= tt_unbound
;
491 int ttm_tt_bind(struct ttm_tt
*ttm
, struct ttm_mem_reg
*bo_mem
)
494 struct ttm_backend
*be
;
499 if (ttm
->state
== tt_bound
)
504 ret
= ttm_tt_populate(ttm
);
508 ret
= be
->func
->bind(be
, bo_mem
);
510 printk(KERN_ERR TTM_PFX
"Couldn't bind backend.\n");
514 ttm
->state
= tt_bound
;
516 if (ttm
->page_flags
& TTM_PAGE_FLAG_USER
)
517 ttm
->page_flags
|= TTM_PAGE_FLAG_USER_DIRTY
;
520 EXPORT_SYMBOL(ttm_tt_bind
);
522 static int ttm_tt_swapin(struct ttm_tt
*ttm
)
524 struct address_space
*swap_space
;
525 struct file
*swap_storage
;
526 struct page
*from_page
;
527 struct page
*to_page
;
533 if (ttm
->page_flags
& TTM_PAGE_FLAG_USER
) {
534 ret
= ttm_tt_set_user(ttm
, ttm
->tsk
, ttm
->start
,
536 if (unlikely(ret
!= 0))
539 ttm
->page_flags
&= ~TTM_PAGE_FLAG_SWAPPED
;
543 swap_storage
= ttm
->swap_storage
;
544 BUG_ON(swap_storage
== NULL
);
546 swap_space
= swap_storage
->f_path
.dentry
->d_inode
->i_mapping
;
548 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
549 from_page
= read_mapping_page(swap_space
, i
, NULL
);
550 if (IS_ERR(from_page
))
552 to_page
= __ttm_tt_get_page(ttm
, i
);
553 if (unlikely(to_page
== NULL
))
557 from_virtual
= kmap_atomic(from_page
, KM_USER0
);
558 to_virtual
= kmap_atomic(to_page
, KM_USER1
);
559 memcpy(to_virtual
, from_virtual
, PAGE_SIZE
);
560 kunmap_atomic(to_virtual
, KM_USER1
);
561 kunmap_atomic(from_virtual
, KM_USER0
);
563 page_cache_release(from_page
);
566 if (!(ttm
->page_flags
& TTM_PAGE_FLAG_PERSISTANT_SWAP
))
568 ttm
->swap_storage
= NULL
;
569 ttm
->page_flags
&= ~TTM_PAGE_FLAG_SWAPPED
;
573 ttm_tt_free_alloced_pages(ttm
);
577 int ttm_tt_swapout(struct ttm_tt
*ttm
, struct file
*persistant_swap_storage
)
579 struct address_space
*swap_space
;
580 struct file
*swap_storage
;
581 struct page
*from_page
;
582 struct page
*to_page
;
587 BUG_ON(ttm
->state
!= tt_unbound
&& ttm
->state
!= tt_unpopulated
);
588 BUG_ON(ttm
->caching_state
!= tt_cached
);
591 * For user buffers, just unpin the pages, as there should be
595 if (ttm
->page_flags
& TTM_PAGE_FLAG_USER
) {
596 ttm_tt_free_user_pages(ttm
);
597 ttm
->page_flags
|= TTM_PAGE_FLAG_SWAPPED
;
598 ttm
->swap_storage
= NULL
;
602 if (!persistant_swap_storage
) {
603 swap_storage
= shmem_file_setup("ttm swap",
604 ttm
->num_pages
<< PAGE_SHIFT
,
606 if (unlikely(IS_ERR(swap_storage
))) {
607 printk(KERN_ERR
"Failed allocating swap storage.\n");
611 swap_storage
= persistant_swap_storage
;
613 swap_space
= swap_storage
->f_path
.dentry
->d_inode
->i_mapping
;
615 for (i
= 0; i
< ttm
->num_pages
; ++i
) {
616 from_page
= ttm
->pages
[i
];
617 if (unlikely(from_page
== NULL
))
619 to_page
= read_mapping_page(swap_space
, i
, NULL
);
620 if (unlikely(to_page
== NULL
))
624 from_virtual
= kmap_atomic(from_page
, KM_USER0
);
625 to_virtual
= kmap_atomic(to_page
, KM_USER1
);
626 memcpy(to_virtual
, from_virtual
, PAGE_SIZE
);
627 kunmap_atomic(to_virtual
, KM_USER1
);
628 kunmap_atomic(from_virtual
, KM_USER0
);
630 set_page_dirty(to_page
);
631 mark_page_accessed(to_page
);
632 page_cache_release(to_page
);
635 ttm_tt_free_alloced_pages(ttm
);
636 ttm
->swap_storage
= swap_storage
;
637 ttm
->page_flags
|= TTM_PAGE_FLAG_SWAPPED
;
638 if (persistant_swap_storage
)
639 ttm
->page_flags
|= TTM_PAGE_FLAG_PERSISTANT_SWAP
;
643 if (!persistant_swap_storage
)