2 * Support for Medifield PNW Camera Imaging ISP subsystem.
4 * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
6 * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
24 * This file contains functions for buffer object structure management
26 #include <linux/kernel.h>
27 #include <linux/types.h>
28 #include <linux/gfp.h> /* for GFP_ATOMIC */
30 #include <linux/mm_types.h>
31 #include <linux/hugetlb.h>
32 #include <linux/highmem.h>
33 #include <linux/slab.h> /* for kmalloc */
34 #include <linux/module.h>
35 #include <linux/moduleparam.h>
36 #include <linux/string.h>
37 #include <linux/list.h>
38 #include <linux/errno.h>
40 #include <asm/current.h>
41 #include <linux/sched/signal.h>
42 #include <linux/file.h>
44 #include <asm/set_memory.h>
46 #include "atomisp_internal.h"
47 #include "hmm/hmm_common.h"
48 #include "hmm/hmm_pool.h"
49 #include "hmm/hmm_bo.h"
51 static unsigned int order_to_nr(unsigned int order
)
56 static unsigned int nr_to_order_bottom(unsigned int nr
)
61 static struct hmm_buffer_object
*__bo_alloc(struct kmem_cache
*bo_cache
)
63 struct hmm_buffer_object
*bo
;
65 bo
= kmem_cache_alloc(bo_cache
, GFP_KERNEL
);
67 dev_err(atomisp_dev
, "%s: failed!\n", __func__
);
72 static int __bo_init(struct hmm_bo_device
*bdev
, struct hmm_buffer_object
*bo
,
75 check_bodev_null_return(bdev
, -EINVAL
);
76 var_equal_return(hmm_bo_device_inited(bdev
), 0, -EINVAL
,
77 "hmm_bo_device not inited yet.\n");
78 /* prevent zero size buffer object */
80 dev_err(atomisp_dev
, "0 size buffer is not allowed.\n");
84 memset(bo
, 0, sizeof(*bo
));
85 mutex_init(&bo
->mutex
);
87 /* init the bo->list HEAD as an element of entire_bo_list */
88 INIT_LIST_HEAD(&bo
->list
);
92 bo
->status
= HMM_BO_FREE
;
93 bo
->start
= bdev
->start
;
95 bo
->end
= bo
->start
+ pgnr_to_size(pgnr
);
102 static struct hmm_buffer_object
*__bo_search_and_remove_from_free_rbtree(
103 struct rb_node
*node
, unsigned int pgnr
)
105 struct hmm_buffer_object
*this, *ret_bo
, *temp_bo
;
107 this = rb_entry(node
, struct hmm_buffer_object
, node
);
108 if (this->pgnr
== pgnr
||
109 (this->pgnr
> pgnr
&& this->node
.rb_left
== NULL
)) {
110 goto remove_bo_and_return
;
112 if (this->pgnr
< pgnr
) {
113 if (!this->node
.rb_right
)
115 ret_bo
= __bo_search_and_remove_from_free_rbtree(
116 this->node
.rb_right
, pgnr
);
118 ret_bo
= __bo_search_and_remove_from_free_rbtree(
119 this->node
.rb_left
, pgnr
);
122 if (this->pgnr
> pgnr
)
123 goto remove_bo_and_return
;
130 remove_bo_and_return
:
131 /* NOTE: All nodes on free rbtree have a 'prev' that points to NULL.
132 * 1. check if 'this->next' is NULL:
133 * yes: erase 'this' node and rebalance rbtree, return 'this'.
135 if (this->next
== NULL
) {
136 rb_erase(&this->node
, &this->bdev
->free_rbtree
);
139 /* NOTE: if 'this->next' is not NULL, always return 'this->next' bo.
140 * 2. check if 'this->next->next' is NULL:
141 * yes: change the related 'next/prev' pointer,
142 * return 'this->next' but the rbtree stays unchanged.
144 temp_bo
= this->next
;
145 this->next
= temp_bo
->next
;
147 temp_bo
->next
->prev
= this;
148 temp_bo
->next
= NULL
;
149 temp_bo
->prev
= NULL
;
153 static struct hmm_buffer_object
*__bo_search_by_addr(struct rb_root
*root
,
156 struct rb_node
*n
= root
->rb_node
;
157 struct hmm_buffer_object
*bo
;
160 bo
= rb_entry(n
, struct hmm_buffer_object
, node
);
162 if (bo
->start
> start
) {
163 if (n
->rb_left
== NULL
)
166 } else if (bo
->start
< start
) {
167 if (n
->rb_right
== NULL
)
178 static struct hmm_buffer_object
*__bo_search_by_addr_in_range(
179 struct rb_root
*root
, unsigned int start
)
181 struct rb_node
*n
= root
->rb_node
;
182 struct hmm_buffer_object
*bo
;
185 bo
= rb_entry(n
, struct hmm_buffer_object
, node
);
187 if (bo
->start
> start
) {
188 if (n
->rb_left
== NULL
)
194 if (n
->rb_right
== NULL
)
203 static void __bo_insert_to_free_rbtree(struct rb_root
*root
,
204 struct hmm_buffer_object
*bo
)
206 struct rb_node
**new = &(root
->rb_node
);
207 struct rb_node
*parent
= NULL
;
208 struct hmm_buffer_object
*this;
209 unsigned int pgnr
= bo
->pgnr
;
213 this = container_of(*new, struct hmm_buffer_object
, node
);
215 if (pgnr
< this->pgnr
) {
216 new = &((*new)->rb_left
);
217 } else if (pgnr
> this->pgnr
) {
218 new = &((*new)->rb_right
);
221 bo
->next
= this->next
;
223 this->next
->prev
= bo
;
225 bo
->status
= (bo
->status
& ~HMM_BO_MASK
) | HMM_BO_FREE
;
230 bo
->status
= (bo
->status
& ~HMM_BO_MASK
) | HMM_BO_FREE
;
232 rb_link_node(&bo
->node
, parent
, new);
233 rb_insert_color(&bo
->node
, root
);
236 static void __bo_insert_to_alloc_rbtree(struct rb_root
*root
,
237 struct hmm_buffer_object
*bo
)
239 struct rb_node
**new = &(root
->rb_node
);
240 struct rb_node
*parent
= NULL
;
241 struct hmm_buffer_object
*this;
242 unsigned int start
= bo
->start
;
246 this = container_of(*new, struct hmm_buffer_object
, node
);
248 if (start
< this->start
)
249 new = &((*new)->rb_left
);
251 new = &((*new)->rb_right
);
254 kref_init(&bo
->kref
);
255 bo
->status
= (bo
->status
& ~HMM_BO_MASK
) | HMM_BO_ALLOCED
;
257 rb_link_node(&bo
->node
, parent
, new);
258 rb_insert_color(&bo
->node
, root
);
261 static struct hmm_buffer_object
*__bo_break_up(struct hmm_bo_device
*bdev
,
262 struct hmm_buffer_object
*bo
,
265 struct hmm_buffer_object
*new_bo
;
269 new_bo
= __bo_alloc(bdev
->bo_cache
);
271 dev_err(atomisp_dev
, "%s: __bo_alloc failed!\n", __func__
);
274 ret
= __bo_init(bdev
, new_bo
, pgnr
);
276 dev_err(atomisp_dev
, "%s: __bo_init failed!\n", __func__
);
277 kmem_cache_free(bdev
->bo_cache
, new_bo
);
281 new_bo
->start
= bo
->start
;
282 new_bo
->end
= new_bo
->start
+ pgnr_to_size(pgnr
);
283 bo
->start
= new_bo
->end
;
284 bo
->pgnr
= bo
->pgnr
- pgnr
;
286 spin_lock_irqsave(&bdev
->list_lock
, flags
);
287 list_add_tail(&new_bo
->list
, &bo
->list
);
288 spin_unlock_irqrestore(&bdev
->list_lock
, flags
);
293 static void __bo_take_off_handling(struct hmm_buffer_object
*bo
)
295 struct hmm_bo_device
*bdev
= bo
->bdev
;
296 /* There are 4 situations when we take off a known bo from free rbtree:
297 * 1. if bo->next && bo->prev == NULL, bo is a rbtree node
298 * and does not have a linked list after bo, to take off this bo,
299 * we just need erase bo directly and rebalance the free rbtree
301 if (bo
->prev
== NULL
&& bo
->next
== NULL
) {
302 rb_erase(&bo
->node
, &bdev
->free_rbtree
);
303 /* 2. when bo->next != NULL && bo->prev == NULL, bo is a rbtree node,
304 * and has a linked list,to take off this bo we need erase bo
305 * first, then, insert bo->next into free rbtree and rebalance
308 } else if (bo
->prev
== NULL
&& bo
->next
!= NULL
) {
309 bo
->next
->prev
= NULL
;
310 rb_erase(&bo
->node
, &bdev
->free_rbtree
);
311 __bo_insert_to_free_rbtree(&bdev
->free_rbtree
, bo
->next
);
313 /* 3. when bo->prev != NULL && bo->next == NULL, bo is not a rbtree
314 * node, bo is the last element of the linked list after rbtree
315 * node, to take off this bo, we just need set the "prev/next"
316 * pointers to NULL, the free rbtree stays unchaged
318 } else if (bo
->prev
!= NULL
&& bo
->next
== NULL
) {
319 bo
->prev
->next
= NULL
;
321 /* 4. when bo->prev != NULL && bo->next != NULL ,bo is not a rbtree
322 * node, bo is in the middle of the linked list after rbtree node,
323 * to take off this bo, we just set take the "prev/next" pointers
324 * to NULL, the free rbtree stays unchaged
327 bo
->next
->prev
= bo
->prev
;
328 bo
->prev
->next
= bo
->next
;
334 static struct hmm_buffer_object
*__bo_merge(struct hmm_buffer_object
*bo
,
335 struct hmm_buffer_object
*next_bo
)
337 struct hmm_bo_device
*bdev
;
341 next_bo
->start
= bo
->start
;
342 next_bo
->pgnr
= next_bo
->pgnr
+ bo
->pgnr
;
344 spin_lock_irqsave(&bdev
->list_lock
, flags
);
346 spin_unlock_irqrestore(&bdev
->list_lock
, flags
);
348 kmem_cache_free(bo
->bdev
->bo_cache
, bo
);
354 * hmm_bo_device functions.
356 int hmm_bo_device_init(struct hmm_bo_device
*bdev
,
357 struct isp_mmu_client
*mmu_driver
,
358 unsigned int vaddr_start
,
361 struct hmm_buffer_object
*bo
;
365 check_bodev_null_return(bdev
, -EINVAL
);
367 ret
= isp_mmu_init(&bdev
->mmu
, mmu_driver
);
369 dev_err(atomisp_dev
, "isp_mmu_init failed.\n");
373 bdev
->start
= vaddr_start
;
374 bdev
->pgnr
= size_to_pgnr_ceil(size
);
375 bdev
->size
= pgnr_to_size(bdev
->pgnr
);
377 spin_lock_init(&bdev
->list_lock
);
378 mutex_init(&bdev
->rbtree_mutex
);
380 bdev
->flag
= HMM_BO_DEVICE_INITED
;
382 INIT_LIST_HEAD(&bdev
->entire_bo_list
);
383 bdev
->allocated_rbtree
= RB_ROOT
;
384 bdev
->free_rbtree
= RB_ROOT
;
386 bdev
->bo_cache
= kmem_cache_create("bo_cache",
387 sizeof(struct hmm_buffer_object
), 0, 0, NULL
);
388 if (!bdev
->bo_cache
) {
389 dev_err(atomisp_dev
, "%s: create cache failed!\n", __func__
);
390 isp_mmu_exit(&bdev
->mmu
);
394 bo
= __bo_alloc(bdev
->bo_cache
);
396 dev_err(atomisp_dev
, "%s: __bo_alloc failed!\n", __func__
);
397 isp_mmu_exit(&bdev
->mmu
);
401 ret
= __bo_init(bdev
, bo
, bdev
->pgnr
);
403 dev_err(atomisp_dev
, "%s: __bo_init failed!\n", __func__
);
404 kmem_cache_free(bdev
->bo_cache
, bo
);
405 isp_mmu_exit(&bdev
->mmu
);
409 spin_lock_irqsave(&bdev
->list_lock
, flags
);
410 list_add_tail(&bo
->list
, &bdev
->entire_bo_list
);
411 spin_unlock_irqrestore(&bdev
->list_lock
, flags
);
413 __bo_insert_to_free_rbtree(&bdev
->free_rbtree
, bo
);
418 struct hmm_buffer_object
*hmm_bo_alloc(struct hmm_bo_device
*bdev
,
421 struct hmm_buffer_object
*bo
, *new_bo
;
422 struct rb_root
*root
= &bdev
->free_rbtree
;
424 check_bodev_null_return(bdev
, NULL
);
425 var_equal_return(hmm_bo_device_inited(bdev
), 0, NULL
,
426 "hmm_bo_device not inited yet.\n");
429 dev_err(atomisp_dev
, "0 size buffer is not allowed.\n");
433 mutex_lock(&bdev
->rbtree_mutex
);
434 bo
= __bo_search_and_remove_from_free_rbtree(root
->rb_node
, pgnr
);
436 mutex_unlock(&bdev
->rbtree_mutex
);
437 dev_err(atomisp_dev
, "%s: Out of Memory! hmm_bo_alloc failed",
442 if (bo
->pgnr
> pgnr
) {
443 new_bo
= __bo_break_up(bdev
, bo
, pgnr
);
445 mutex_unlock(&bdev
->rbtree_mutex
);
446 dev_err(atomisp_dev
, "%s: __bo_break_up failed!\n",
451 __bo_insert_to_alloc_rbtree(&bdev
->allocated_rbtree
, new_bo
);
452 __bo_insert_to_free_rbtree(&bdev
->free_rbtree
, bo
);
454 mutex_unlock(&bdev
->rbtree_mutex
);
458 __bo_insert_to_alloc_rbtree(&bdev
->allocated_rbtree
, bo
);
460 mutex_unlock(&bdev
->rbtree_mutex
);
464 void hmm_bo_release(struct hmm_buffer_object
*bo
)
466 struct hmm_bo_device
*bdev
= bo
->bdev
;
467 struct hmm_buffer_object
*next_bo
, *prev_bo
;
469 mutex_lock(&bdev
->rbtree_mutex
);
474 * how to destroy the bo when it is stilled MMAPED?
476 * ideally, this will not happened as hmm_bo_release
477 * will only be called when kref reaches 0, and in mmap
478 * operation the hmm_bo_ref will eventually be called.
479 * so, if this happened, something goes wrong.
481 if (bo
->status
& HMM_BO_MMAPED
) {
482 mutex_unlock(&bdev
->rbtree_mutex
);
483 dev_dbg(atomisp_dev
, "destroy bo which is MMAPED, do nothing\n");
487 if (bo
->status
& HMM_BO_BINDED
) {
488 dev_warn(atomisp_dev
, "the bo is still binded, unbind it first...\n");
492 if (bo
->status
& HMM_BO_PAGE_ALLOCED
) {
493 dev_warn(atomisp_dev
, "the pages is not freed, free pages first\n");
494 hmm_bo_free_pages(bo
);
496 if (bo
->status
& HMM_BO_VMAPED
|| bo
->status
& HMM_BO_VMAPED_CACHED
) {
497 dev_warn(atomisp_dev
, "the vunmap is not done, do it...\n");
501 rb_erase(&bo
->node
, &bdev
->allocated_rbtree
);
503 prev_bo
= list_entry(bo
->list
.prev
, struct hmm_buffer_object
, list
);
504 next_bo
= list_entry(bo
->list
.next
, struct hmm_buffer_object
, list
);
506 if (bo
->list
.prev
!= &bdev
->entire_bo_list
&&
507 prev_bo
->end
== bo
->start
&&
508 (prev_bo
->status
& HMM_BO_MASK
) == HMM_BO_FREE
) {
509 __bo_take_off_handling(prev_bo
);
510 bo
= __bo_merge(prev_bo
, bo
);
513 if (bo
->list
.next
!= &bdev
->entire_bo_list
&&
514 next_bo
->start
== bo
->end
&&
515 (next_bo
->status
& HMM_BO_MASK
) == HMM_BO_FREE
) {
516 __bo_take_off_handling(next_bo
);
517 bo
= __bo_merge(bo
, next_bo
);
520 __bo_insert_to_free_rbtree(&bdev
->free_rbtree
, bo
);
522 mutex_unlock(&bdev
->rbtree_mutex
);
526 void hmm_bo_device_exit(struct hmm_bo_device
*bdev
)
528 struct hmm_buffer_object
*bo
;
531 dev_dbg(atomisp_dev
, "%s: entering!\n", __func__
);
533 check_bodev_null_return_void(bdev
);
536 * release all allocated bos even they a in use
537 * and all bos will be merged into a big bo
539 while (!RB_EMPTY_ROOT(&bdev
->allocated_rbtree
))
541 rbtree_node_to_hmm_bo(bdev
->allocated_rbtree
.rb_node
));
543 dev_dbg(atomisp_dev
, "%s: finished releasing all allocated bos!\n",
546 /* free all bos to release all ISP virtual memory */
547 while (!list_empty(&bdev
->entire_bo_list
)) {
548 bo
= list_to_hmm_bo(bdev
->entire_bo_list
.next
);
550 spin_lock_irqsave(&bdev
->list_lock
, flags
);
552 spin_unlock_irqrestore(&bdev
->list_lock
, flags
);
554 kmem_cache_free(bdev
->bo_cache
, bo
);
557 dev_dbg(atomisp_dev
, "%s: finished to free all bos!\n", __func__
);
559 kmem_cache_destroy(bdev
->bo_cache
);
561 isp_mmu_exit(&bdev
->mmu
);
564 int hmm_bo_device_inited(struct hmm_bo_device
*bdev
)
566 check_bodev_null_return(bdev
, -EINVAL
);
568 return bdev
->flag
== HMM_BO_DEVICE_INITED
;
571 int hmm_bo_allocated(struct hmm_buffer_object
*bo
)
573 check_bo_null_return(bo
, 0);
575 return bo
->status
& HMM_BO_ALLOCED
;
578 struct hmm_buffer_object
*hmm_bo_device_search_start(
579 struct hmm_bo_device
*bdev
, ia_css_ptr vaddr
)
581 struct hmm_buffer_object
*bo
;
583 check_bodev_null_return(bdev
, NULL
);
585 mutex_lock(&bdev
->rbtree_mutex
);
586 bo
= __bo_search_by_addr(&bdev
->allocated_rbtree
, vaddr
);
588 mutex_unlock(&bdev
->rbtree_mutex
);
589 dev_err(atomisp_dev
, "%s can not find bo with addr: 0x%x\n",
593 mutex_unlock(&bdev
->rbtree_mutex
);
598 struct hmm_buffer_object
*hmm_bo_device_search_in_range(
599 struct hmm_bo_device
*bdev
, unsigned int vaddr
)
601 struct hmm_buffer_object
*bo
;
603 check_bodev_null_return(bdev
, NULL
);
605 mutex_lock(&bdev
->rbtree_mutex
);
606 bo
= __bo_search_by_addr_in_range(&bdev
->allocated_rbtree
, vaddr
);
608 mutex_unlock(&bdev
->rbtree_mutex
);
609 dev_err(atomisp_dev
, "%s can not find bo contain addr: 0x%x\n",
613 mutex_unlock(&bdev
->rbtree_mutex
);
618 struct hmm_buffer_object
*hmm_bo_device_search_vmap_start(
619 struct hmm_bo_device
*bdev
, const void *vaddr
)
621 struct list_head
*pos
;
622 struct hmm_buffer_object
*bo
;
625 check_bodev_null_return(bdev
, NULL
);
627 spin_lock_irqsave(&bdev
->list_lock
, flags
);
628 list_for_each(pos
, &bdev
->entire_bo_list
) {
629 bo
= list_to_hmm_bo(pos
);
630 /* pass bo which has no vm_node allocated */
631 if ((bo
->status
& HMM_BO_MASK
) == HMM_BO_FREE
)
633 if (bo
->vmap_addr
== vaddr
)
636 spin_unlock_irqrestore(&bdev
->list_lock
, flags
);
639 spin_unlock_irqrestore(&bdev
->list_lock
, flags
);
645 static void free_private_bo_pages(struct hmm_buffer_object
*bo
,
646 struct hmm_pool
*dypool
,
647 struct hmm_pool
*repool
,
652 for (i
= 0; i
< free_pgnr
; i
++) {
653 switch (bo
->page_obj
[i
].type
) {
654 case HMM_PAGE_TYPE_RESERVED
:
656 && repool
->pops
->pool_free_pages
) {
657 repool
->pops
->pool_free_pages(repool
->pool_info
,
659 hmm_mem_stat
.res_cnt
--;
663 * HMM_PAGE_TYPE_GENERAL indicates that pages are from system
664 * memory, so when free them, they should be put into dynamic
667 case HMM_PAGE_TYPE_DYNAMIC
:
668 case HMM_PAGE_TYPE_GENERAL
:
670 && dypool
->pops
->pool_inited
671 && dypool
->pops
->pool_inited(dypool
->pool_info
)) {
672 if (dypool
->pops
->pool_free_pages
)
673 dypool
->pops
->pool_free_pages(
680 * if dynamic memory pool doesn't exist, need to free
681 * pages to system directly.
684 ret
= set_pages_wb(bo
->page_obj
[i
].page
, 1);
687 "set page to WB err ...ret = %d\n",
690 W/A: set_pages_wb seldom return value = -EFAULT
691 indicate that address of page is not in valid
692 range(0xffff880000000000~0xffffc7ffffffffff)
693 then, _free_pages would panic; Do not know why page
694 address be valid,it maybe memory corruption by lowmemory
697 __free_pages(bo
->page_obj
[i
].page
, 0);
698 hmm_mem_stat
.sys_size
--;
707 /*Allocate pages which will be used only by ISP*/
708 static int alloc_private_pages(struct hmm_buffer_object
*bo
,
711 struct hmm_pool
*dypool
,
712 struct hmm_pool
*repool
)
715 unsigned int pgnr
, order
, blk_pgnr
, alloc_pgnr
;
717 gfp_t gfp
= GFP_NOWAIT
| __GFP_NOWARN
; /* REVISIT: need __GFP_FS too? */
719 int failure_number
= 0;
720 bool reduce_order
= false;
721 bool lack_mem
= true;
724 gfp
|= __GFP_HIGHMEM
;
728 bo
->page_obj
= kmalloc(sizeof(struct hmm_page_object
) * pgnr
,
730 if (unlikely(!bo
->page_obj
))
737 * get physical pages from dynamic pages pool.
739 if (dypool
->pops
&& dypool
->pops
->pool_alloc_pages
) {
740 alloc_pgnr
= dypool
->pops
->pool_alloc_pages(dypool
->pool_info
,
743 hmm_mem_stat
.dyc_size
-= alloc_pgnr
;
745 if (alloc_pgnr
== pgnr
)
753 * get physical pages from reserved pages pool for atomisp.
755 if (repool
->pops
&& repool
->pops
->pool_alloc_pages
) {
756 alloc_pgnr
= repool
->pops
->pool_alloc_pages(repool
->pool_info
,
757 &bo
->page_obj
[i
], pgnr
,
759 hmm_mem_stat
.res_cnt
+= alloc_pgnr
;
760 if (alloc_pgnr
== pgnr
)
768 order
= nr_to_order_bottom(pgnr
);
770 * if be short of memory, we will set order to 0
774 order
= HMM_MIN_ORDER
;
775 else if (order
> HMM_MAX_ORDER
)
776 order
= HMM_MAX_ORDER
;
779 * When order > HMM_MIN_ORDER, for performance reasons we don't
780 * want alloc_pages() to sleep. In case it fails and fallbacks
781 * to HMM_MIN_ORDER or in case the requested order is originally
782 * the minimum value, we can allow alloc_pages() to sleep for
783 * robustness purpose.
785 * REVISIT: why __GFP_FS is necessary?
787 if (order
== HMM_MIN_ORDER
) {
789 gfp
|= __GFP_RECLAIM
| __GFP_FS
;
792 pages
= alloc_pages(gfp
, order
);
793 if (unlikely(!pages
)) {
795 * in low memory case, if allocation page fails,
796 * we turn to try if order=0 allocation could
797 * succeed. if order=0 fails too, that means there is
800 if (order
== HMM_MIN_ORDER
) {
802 "%s: cannot allocate pages\n",
806 order
= HMM_MIN_ORDER
;
810 * if fail two times continuously, we think be short
813 if (failure_number
== 2) {
819 blk_pgnr
= order_to_nr(order
);
823 * set memory to uncacheable -- UC_MINUS
825 ret
= set_pages_uc(pages
, blk_pgnr
);
828 "set page uncacheable"
831 __free_pages(pages
, order
);
837 for (j
= 0; j
< blk_pgnr
; j
++) {
838 bo
->page_obj
[i
].page
= pages
+ j
;
839 bo
->page_obj
[i
++].type
= HMM_PAGE_TYPE_GENERAL
;
843 hmm_mem_stat
.sys_size
+= blk_pgnr
;
846 * if order is not reduced this time, clear
850 reduce_order
= false;
859 free_private_bo_pages(bo
, dypool
, repool
, alloc_pgnr
);
866 static void free_private_pages(struct hmm_buffer_object
*bo
,
867 struct hmm_pool
*dypool
,
868 struct hmm_pool
*repool
)
870 free_private_bo_pages(bo
, dypool
, repool
, bo
->pgnr
);
876 * Hacked from kernel function __get_user_pages in mm/memory.c
878 * Handle buffers allocated by other kernel space driver and mmaped into user
879 * space, function Ignore the VM_PFNMAP and VM_IO flag in VMA structure
881 * Get physical pages from user space virtual address and update into page list
883 static int __get_pfnmap_pages(struct task_struct
*tsk
, struct mm_struct
*mm
,
884 unsigned long start
, int nr_pages
,
885 unsigned int gup_flags
, struct page
**pages
,
886 struct vm_area_struct
**vmas
)
889 unsigned long vm_flags
;
894 VM_BUG_ON(!!pages
!= !!(gup_flags
& FOLL_GET
));
897 * Require read or write permissions.
898 * If FOLL_FORCE is set, we only require the "MAY" flags.
900 vm_flags
= (gup_flags
& FOLL_WRITE
) ?
901 (VM_WRITE
| VM_MAYWRITE
) : (VM_READ
| VM_MAYREAD
);
902 vm_flags
&= (gup_flags
& FOLL_FORCE
) ?
903 (VM_MAYREAD
| VM_MAYWRITE
) : (VM_READ
| VM_WRITE
);
907 struct vm_area_struct
*vma
;
909 vma
= find_vma(mm
, start
);
911 dev_err(atomisp_dev
, "find_vma failed\n");
912 return i
? : -EFAULT
;
915 if (is_vm_hugetlb_page(vma
)) {
917 i = follow_hugetlb_page(mm, vma, pages, vmas,
918 &start, &nr_pages, i, gup_flags);
928 * If we have a pending SIGKILL, don't keep faulting
929 * pages and potentially allocating memory.
931 if (unlikely(fatal_signal_pending(current
))) {
933 "fatal_signal_pending in %s\n",
935 return i
? i
: -ERESTARTSYS
;
938 ret
= follow_pfn(vma
, start
, &pfn
);
940 dev_err(atomisp_dev
, "follow_pfn() failed\n");
941 return i
? : -EFAULT
;
944 page
= pfn_to_page(pfn
);
946 return i
? i
: PTR_ERR(page
);
950 flush_anon_page(vma
, page
, start
);
951 flush_dcache_page(page
);
958 } while (nr_pages
&& start
< vma
->vm_end
);
964 static int get_pfnmap_pages(struct task_struct
*tsk
, struct mm_struct
*mm
,
965 unsigned long start
, int nr_pages
, int write
, int force
,
966 struct page
**pages
, struct vm_area_struct
**vmas
)
968 int flags
= FOLL_TOUCH
;
977 return __get_pfnmap_pages(tsk
, mm
, start
, nr_pages
, flags
, pages
, vmas
);
981 * Convert user space virtual address into pages list
983 static int alloc_user_pages(struct hmm_buffer_object
*bo
,
984 void *userptr
, bool cached
)
988 struct vm_area_struct
*vma
;
991 pages
= kmalloc(sizeof(struct page
*) * bo
->pgnr
, GFP_KERNEL
);
992 if (unlikely(!pages
))
995 bo
->page_obj
= kmalloc(sizeof(struct hmm_page_object
) * bo
->pgnr
,
997 if (unlikely(!bo
->page_obj
)) {
1002 mutex_unlock(&bo
->mutex
);
1003 down_read(¤t
->mm
->mmap_sem
);
1004 vma
= find_vma(current
->mm
, (unsigned long)userptr
);
1005 up_read(¤t
->mm
->mmap_sem
);
1007 dev_err(atomisp_dev
, "find_vma failed\n");
1008 kfree(bo
->page_obj
);
1010 mutex_lock(&bo
->mutex
);
1013 mutex_lock(&bo
->mutex
);
1015 * Handle frame buffer allocated in other kerenl space driver
1016 * and map to user space
1018 if (vma
->vm_flags
& (VM_IO
| VM_PFNMAP
)) {
1019 page_nr
= get_pfnmap_pages(current
, current
->mm
,
1020 (unsigned long)userptr
,
1021 (int)(bo
->pgnr
), 1, 0,
1023 bo
->mem_type
= HMM_BO_MEM_TYPE_PFN
;
1025 /*Handle frame buffer allocated in user space*/
1026 mutex_unlock(&bo
->mutex
);
1027 down_read(¤t
->mm
->mmap_sem
);
1028 page_nr
= get_user_pages((unsigned long)userptr
,
1029 (int)(bo
->pgnr
), 1, pages
, NULL
);
1030 up_read(¤t
->mm
->mmap_sem
);
1031 mutex_lock(&bo
->mutex
);
1032 bo
->mem_type
= HMM_BO_MEM_TYPE_USER
;
1035 /* can be written by caller, not forced */
1036 if (page_nr
!= bo
->pgnr
) {
1037 dev_err(atomisp_dev
,
1038 "get_user_pages err: bo->pgnr = %d, "
1039 "pgnr actually pinned = %d.\n",
1044 for (i
= 0; i
< bo
->pgnr
; i
++) {
1045 bo
->page_obj
[i
].page
= pages
[i
];
1046 bo
->page_obj
[i
].type
= HMM_PAGE_TYPE_GENERAL
;
1048 hmm_mem_stat
.usr_size
+= bo
->pgnr
;
1054 for (i
= 0; i
< page_nr
; i
++)
1057 kfree(bo
->page_obj
);
1062 static void free_user_pages(struct hmm_buffer_object
*bo
)
1066 for (i
= 0; i
< bo
->pgnr
; i
++)
1067 put_page(bo
->page_obj
[i
].page
);
1068 hmm_mem_stat
.usr_size
-= bo
->pgnr
;
1070 kfree(bo
->page_obj
);
1074 * allocate/free physical pages for the bo.
1076 * type indicate where are the pages from. currently we have 3 types
1077 * of memory: HMM_BO_PRIVATE, HMM_BO_USER, HMM_BO_SHARE.
1079 * from_highmem is only valid when type is HMM_BO_PRIVATE, it will
1080 * try to alloc memory from highmem if from_highmem is set.
1082 * userptr is only valid when type is HMM_BO_USER, it indicates
1083 * the start address from user space task.
1085 * from_highmem and userptr will both be ignored when type is
1088 int hmm_bo_alloc_pages(struct hmm_buffer_object
*bo
,
1089 enum hmm_bo_type type
, int from_highmem
,
1090 void *userptr
, bool cached
)
1094 check_bo_null_return(bo
, -EINVAL
);
1096 mutex_lock(&bo
->mutex
);
1097 check_bo_status_no_goto(bo
, HMM_BO_PAGE_ALLOCED
, status_err
);
1101 * add HMM_BO_USER type
1103 if (type
== HMM_BO_PRIVATE
) {
1104 ret
= alloc_private_pages(bo
, from_highmem
,
1105 cached
, &dynamic_pool
, &reserved_pool
);
1106 } else if (type
== HMM_BO_USER
) {
1107 ret
= alloc_user_pages(bo
, userptr
, cached
);
1109 dev_err(atomisp_dev
, "invalid buffer type.\n");
1117 bo
->status
|= HMM_BO_PAGE_ALLOCED
;
1119 mutex_unlock(&bo
->mutex
);
1124 mutex_unlock(&bo
->mutex
);
1125 dev_err(atomisp_dev
, "alloc pages err...\n");
1128 mutex_unlock(&bo
->mutex
);
1129 dev_err(atomisp_dev
,
1130 "buffer object has already page allocated.\n");
1135 * free physical pages of the bo.
1137 void hmm_bo_free_pages(struct hmm_buffer_object
*bo
)
1139 check_bo_null_return_void(bo
);
1141 mutex_lock(&bo
->mutex
);
1143 check_bo_status_yes_goto(bo
, HMM_BO_PAGE_ALLOCED
, status_err2
);
1145 /* clear the flag anyway. */
1146 bo
->status
&= (~HMM_BO_PAGE_ALLOCED
);
1148 if (bo
->type
== HMM_BO_PRIVATE
)
1149 free_private_pages(bo
, &dynamic_pool
, &reserved_pool
);
1150 else if (bo
->type
== HMM_BO_USER
)
1151 free_user_pages(bo
);
1153 dev_err(atomisp_dev
, "invalid buffer type.\n");
1154 mutex_unlock(&bo
->mutex
);
1159 mutex_unlock(&bo
->mutex
);
1160 dev_err(atomisp_dev
,
1161 "buffer object not page allocated yet.\n");
1164 int hmm_bo_page_allocated(struct hmm_buffer_object
*bo
)
1166 check_bo_null_return(bo
, 0);
1168 return bo
->status
& HMM_BO_PAGE_ALLOCED
;
1172 * get physical page info of the bo.
1174 int hmm_bo_get_page_info(struct hmm_buffer_object
*bo
,
1175 struct hmm_page_object
**page_obj
, int *pgnr
)
1177 check_bo_null_return(bo
, -EINVAL
);
1179 mutex_lock(&bo
->mutex
);
1181 check_bo_status_yes_goto(bo
, HMM_BO_PAGE_ALLOCED
, status_err
);
1183 *page_obj
= bo
->page_obj
;
1186 mutex_unlock(&bo
->mutex
);
1191 dev_err(atomisp_dev
,
1192 "buffer object not page allocated yet.\n");
1193 mutex_unlock(&bo
->mutex
);
1198 * bind the physical pages to a virtual address space.
1200 int hmm_bo_bind(struct hmm_buffer_object
*bo
)
1204 struct hmm_bo_device
*bdev
;
1207 check_bo_null_return(bo
, -EINVAL
);
1209 mutex_lock(&bo
->mutex
);
1211 check_bo_status_yes_goto(bo
,
1212 HMM_BO_PAGE_ALLOCED
| HMM_BO_ALLOCED
,
1215 check_bo_status_no_goto(bo
, HMM_BO_BINDED
, status_err2
);
1221 for (i
= 0; i
< bo
->pgnr
; i
++) {
1223 isp_mmu_map(&bdev
->mmu
, virt
,
1224 page_to_phys(bo
->page_obj
[i
].page
), 1);
1227 virt
+= (1 << PAGE_SHIFT
);
1233 * theoretically, we donot need to flush TLB as we didnot change
1234 * any existed address mappings, but for Silicon Hive's MMU, its
1235 * really a bug here. I guess when fetching PTEs (page table entity)
1236 * to TLB, its MMU will fetch additional INVALID PTEs automatically
1237 * for performance issue. EX, we only set up 1 page address mapping,
1238 * meaning updating 1 PTE, but the MMU fetches 4 PTE at one time,
1239 * so the additional 3 PTEs are invalid.
1241 if (bo
->start
!= 0x0)
1242 isp_mmu_flush_tlb_range(&bdev
->mmu
, bo
->start
,
1243 (bo
->pgnr
<< PAGE_SHIFT
));
1245 bo
->status
|= HMM_BO_BINDED
;
1247 mutex_unlock(&bo
->mutex
);
1252 /* unbind the physical pages with related virtual address space */
1254 for ( ; i
> 0; i
--) {
1255 isp_mmu_unmap(&bdev
->mmu
, virt
, 1);
1256 virt
+= pgnr_to_size(1);
1259 mutex_unlock(&bo
->mutex
);
1260 dev_err(atomisp_dev
,
1261 "setup MMU address mapping failed.\n");
1265 mutex_unlock(&bo
->mutex
);
1266 dev_err(atomisp_dev
, "buffer object already binded.\n");
1269 mutex_unlock(&bo
->mutex
);
1270 dev_err(atomisp_dev
,
1271 "buffer object vm_node or page not allocated.\n");
1276 * unbind the physical pages with related virtual address space.
1278 void hmm_bo_unbind(struct hmm_buffer_object
*bo
)
1281 struct hmm_bo_device
*bdev
;
1284 check_bo_null_return_void(bo
);
1286 mutex_lock(&bo
->mutex
);
1288 check_bo_status_yes_goto(bo
,
1289 HMM_BO_PAGE_ALLOCED
|
1291 HMM_BO_BINDED
, status_err
);
1297 for (i
= 0; i
< bo
->pgnr
; i
++) {
1298 isp_mmu_unmap(&bdev
->mmu
, virt
, 1);
1299 virt
+= pgnr_to_size(1);
1303 * flush TLB as the address mapping has been removed and
1304 * related TLBs should be invalidated.
1306 isp_mmu_flush_tlb_range(&bdev
->mmu
, bo
->start
,
1307 (bo
->pgnr
<< PAGE_SHIFT
));
1309 bo
->status
&= (~HMM_BO_BINDED
);
1311 mutex_unlock(&bo
->mutex
);
1316 mutex_unlock(&bo
->mutex
);
1317 dev_err(atomisp_dev
,
1318 "buffer vm or page not allocated or not binded yet.\n");
1321 int hmm_bo_binded(struct hmm_buffer_object
*bo
)
1325 check_bo_null_return(bo
, 0);
1327 mutex_lock(&bo
->mutex
);
1329 ret
= bo
->status
& HMM_BO_BINDED
;
1331 mutex_unlock(&bo
->mutex
);
1336 void *hmm_bo_vmap(struct hmm_buffer_object
*bo
, bool cached
)
1338 struct page
**pages
;
1341 check_bo_null_return(bo
, NULL
);
1343 mutex_lock(&bo
->mutex
);
1344 if (((bo
->status
& HMM_BO_VMAPED
) && !cached
) ||
1345 ((bo
->status
& HMM_BO_VMAPED_CACHED
) && cached
)) {
1346 mutex_unlock(&bo
->mutex
);
1347 return bo
->vmap_addr
;
1350 /* cached status need to be changed, so vunmap first */
1351 if (bo
->status
& HMM_BO_VMAPED
|| bo
->status
& HMM_BO_VMAPED_CACHED
) {
1352 vunmap(bo
->vmap_addr
);
1353 bo
->vmap_addr
= NULL
;
1354 bo
->status
&= ~(HMM_BO_VMAPED
| HMM_BO_VMAPED_CACHED
);
1357 pages
= kmalloc(sizeof(*pages
) * bo
->pgnr
, GFP_KERNEL
);
1358 if (unlikely(!pages
)) {
1359 mutex_unlock(&bo
->mutex
);
1363 for (i
= 0; i
< bo
->pgnr
; i
++)
1364 pages
[i
] = bo
->page_obj
[i
].page
;
1366 bo
->vmap_addr
= vmap(pages
, bo
->pgnr
, VM_MAP
,
1367 cached
? PAGE_KERNEL
: PAGE_KERNEL_NOCACHE
);
1368 if (unlikely(!bo
->vmap_addr
)) {
1370 mutex_unlock(&bo
->mutex
);
1371 dev_err(atomisp_dev
, "vmap failed...\n");
1374 bo
->status
|= (cached
? HMM_BO_VMAPED_CACHED
: HMM_BO_VMAPED
);
1378 mutex_unlock(&bo
->mutex
);
1379 return bo
->vmap_addr
;
1382 void hmm_bo_flush_vmap(struct hmm_buffer_object
*bo
)
1384 check_bo_null_return_void(bo
);
1386 mutex_lock(&bo
->mutex
);
1387 if (!(bo
->status
& HMM_BO_VMAPED_CACHED
) || !bo
->vmap_addr
) {
1388 mutex_unlock(&bo
->mutex
);
1392 clflush_cache_range(bo
->vmap_addr
, bo
->pgnr
* PAGE_SIZE
);
1393 mutex_unlock(&bo
->mutex
);
1396 void hmm_bo_vunmap(struct hmm_buffer_object
*bo
)
1398 check_bo_null_return_void(bo
);
1400 mutex_lock(&bo
->mutex
);
1401 if (bo
->status
& HMM_BO_VMAPED
|| bo
->status
& HMM_BO_VMAPED_CACHED
) {
1402 vunmap(bo
->vmap_addr
);
1403 bo
->vmap_addr
= NULL
;
1404 bo
->status
&= ~(HMM_BO_VMAPED
| HMM_BO_VMAPED_CACHED
);
1407 mutex_unlock(&bo
->mutex
);
1411 void hmm_bo_ref(struct hmm_buffer_object
*bo
)
1413 check_bo_null_return_void(bo
);
1415 kref_get(&bo
->kref
);
1418 static void kref_hmm_bo_release(struct kref
*kref
)
1423 hmm_bo_release(kref_to_hmm_bo(kref
));
1426 void hmm_bo_unref(struct hmm_buffer_object
*bo
)
1428 check_bo_null_return_void(bo
);
1430 kref_put(&bo
->kref
, kref_hmm_bo_release
);
1433 static void hmm_bo_vm_open(struct vm_area_struct
*vma
)
1435 struct hmm_buffer_object
*bo
=
1436 (struct hmm_buffer_object
*)vma
->vm_private_data
;
1438 check_bo_null_return_void(bo
);
1442 mutex_lock(&bo
->mutex
);
1444 bo
->status
|= HMM_BO_MMAPED
;
1448 mutex_unlock(&bo
->mutex
);
1451 static void hmm_bo_vm_close(struct vm_area_struct
*vma
)
1453 struct hmm_buffer_object
*bo
=
1454 (struct hmm_buffer_object
*)vma
->vm_private_data
;
1456 check_bo_null_return_void(bo
);
1460 mutex_lock(&bo
->mutex
);
1464 if (!bo
->mmap_count
) {
1465 bo
->status
&= (~HMM_BO_MMAPED
);
1466 vma
->vm_private_data
= NULL
;
1469 mutex_unlock(&bo
->mutex
);
1472 static const struct vm_operations_struct hmm_bo_vm_ops
= {
1473 .open
= hmm_bo_vm_open
,
1474 .close
= hmm_bo_vm_close
,
1478 * mmap the bo to user space.
1480 int hmm_bo_mmap(struct vm_area_struct
*vma
, struct hmm_buffer_object
*bo
)
1482 unsigned int start
, end
;
1484 unsigned int pgnr
, i
;
1487 check_bo_null_return(bo
, -EINVAL
);
1489 check_bo_status_yes_goto(bo
, HMM_BO_PAGE_ALLOCED
, status_err
);
1492 start
= vma
->vm_start
;
1496 * check vma's virtual address space size and buffer object's size.
1499 if ((start
+ pgnr_to_size(pgnr
)) != end
) {
1500 dev_warn(atomisp_dev
,
1501 "vma's address space size not equal"
1502 " to buffer object's size");
1506 virt
= vma
->vm_start
;
1507 for (i
= 0; i
< pgnr
; i
++) {
1508 pfn
= page_to_pfn(bo
->page_obj
[i
].page
);
1509 if (remap_pfn_range(vma
, virt
, pfn
, PAGE_SIZE
, PAGE_SHARED
)) {
1510 dev_warn(atomisp_dev
,
1511 "remap_pfn_range failed:"
1512 " virt = 0x%x, pfn = 0x%x,"
1513 " mapped_pgnr = %d\n", virt
, pfn
, 1);
1519 vma
->vm_private_data
= bo
;
1521 vma
->vm_ops
= &hmm_bo_vm_ops
;
1522 vma
->vm_flags
|= VM_IO
|VM_DONTEXPAND
|VM_DONTDUMP
;
1525 * call hmm_bo_vm_open explictly.
1527 hmm_bo_vm_open(vma
);
1532 dev_err(atomisp_dev
, "buffer page not allocated yet.\n");