]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c
12c96c4f284dceaafec8492b04e9fd453cca0426
[mirror_ubuntu-bionic-kernel.git] / drivers / staging / media / atomisp / pci / atomisp2 / hmm / hmm_bo.c
1 /*
2 * Support for Medifield PNW Camera Imaging ISP subsystem.
3 *
4 * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
5 *
6 * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 */
23 /*
24 * This file contains functions for buffer object structure management
25 */
26 #include <linux/kernel.h>
27 #include <linux/types.h>
28 #include <linux/gfp.h> /* for GFP_ATOMIC */
29 #include <linux/mm.h>
30 #include <linux/mm_types.h>
31 #include <linux/hugetlb.h>
32 #include <linux/highmem.h>
33 #include <linux/slab.h> /* for kmalloc */
34 #include <linux/module.h>
35 #include <linux/moduleparam.h>
36 #include <linux/string.h>
37 #include <linux/list.h>
38 #include <linux/errno.h>
39 #include <linux/io.h>
40 #include <asm/current.h>
41 #include <linux/sched/signal.h>
42 #include <linux/file.h>
43
44 #include <asm/set_memory.h>
45
46 #include "atomisp_internal.h"
47 #include "hmm/hmm_common.h"
48 #include "hmm/hmm_pool.h"
49 #include "hmm/hmm_bo.h"
50
51 static unsigned int order_to_nr(unsigned int order)
52 {
53 return 1U << order;
54 }
55
56 static unsigned int nr_to_order_bottom(unsigned int nr)
57 {
58 return fls(nr) - 1;
59 }
60
61 static struct hmm_buffer_object *__bo_alloc(struct kmem_cache *bo_cache)
62 {
63 struct hmm_buffer_object *bo;
64
65 bo = kmem_cache_alloc(bo_cache, GFP_KERNEL);
66 if (!bo)
67 dev_err(atomisp_dev, "%s: failed!\n", __func__);
68
69 return bo;
70 }
71
72 static int __bo_init(struct hmm_bo_device *bdev, struct hmm_buffer_object *bo,
73 unsigned int pgnr)
74 {
75 check_bodev_null_return(bdev, -EINVAL);
76 var_equal_return(hmm_bo_device_inited(bdev), 0, -EINVAL,
77 "hmm_bo_device not inited yet.\n");
78 /* prevent zero size buffer object */
79 if (pgnr == 0) {
80 dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
81 return -EINVAL;
82 }
83
84 memset(bo, 0, sizeof(*bo));
85 mutex_init(&bo->mutex);
86
87 /* init the bo->list HEAD as an element of entire_bo_list */
88 INIT_LIST_HEAD(&bo->list);
89
90 bo->bdev = bdev;
91 bo->vmap_addr = NULL;
92 bo->status = HMM_BO_FREE;
93 bo->start = bdev->start;
94 bo->pgnr = pgnr;
95 bo->end = bo->start + pgnr_to_size(pgnr);
96 bo->prev = NULL;
97 bo->next = NULL;
98
99 return 0;
100 }
101
102 static struct hmm_buffer_object *__bo_search_and_remove_from_free_rbtree(
103 struct rb_node *node, unsigned int pgnr)
104 {
105 struct hmm_buffer_object *this, *ret_bo, *temp_bo;
106
107 this = rb_entry(node, struct hmm_buffer_object, node);
108 if (this->pgnr == pgnr ||
109 (this->pgnr > pgnr && this->node.rb_left == NULL)) {
110 goto remove_bo_and_return;
111 } else {
112 if (this->pgnr < pgnr) {
113 if (!this->node.rb_right)
114 return NULL;
115 ret_bo = __bo_search_and_remove_from_free_rbtree(
116 this->node.rb_right, pgnr);
117 } else {
118 ret_bo = __bo_search_and_remove_from_free_rbtree(
119 this->node.rb_left, pgnr);
120 }
121 if (!ret_bo) {
122 if (this->pgnr > pgnr)
123 goto remove_bo_and_return;
124 else
125 return NULL;
126 }
127 return ret_bo;
128 }
129
130 remove_bo_and_return:
131 /* NOTE: All nodes on free rbtree have a 'prev' that points to NULL.
132 * 1. check if 'this->next' is NULL:
133 * yes: erase 'this' node and rebalance rbtree, return 'this'.
134 */
135 if (this->next == NULL) {
136 rb_erase(&this->node, &this->bdev->free_rbtree);
137 return this;
138 }
139 /* NOTE: if 'this->next' is not NULL, always return 'this->next' bo.
140 * 2. check if 'this->next->next' is NULL:
141 * yes: change the related 'next/prev' pointer,
142 * return 'this->next' but the rbtree stays unchanged.
143 */
144 temp_bo = this->next;
145 this->next = temp_bo->next;
146 if (temp_bo->next)
147 temp_bo->next->prev = this;
148 temp_bo->next = NULL;
149 temp_bo->prev = NULL;
150 return temp_bo;
151 }
152
153 static struct hmm_buffer_object *__bo_search_by_addr(struct rb_root *root,
154 ia_css_ptr start)
155 {
156 struct rb_node *n = root->rb_node;
157 struct hmm_buffer_object *bo;
158
159 do {
160 bo = rb_entry(n, struct hmm_buffer_object, node);
161
162 if (bo->start > start) {
163 if (n->rb_left == NULL)
164 return NULL;
165 n = n->rb_left;
166 } else if (bo->start < start) {
167 if (n->rb_right == NULL)
168 return NULL;
169 n = n->rb_right;
170 } else {
171 return bo;
172 }
173 } while (n);
174
175 return NULL;
176 }
177
178 static struct hmm_buffer_object *__bo_search_by_addr_in_range(
179 struct rb_root *root, unsigned int start)
180 {
181 struct rb_node *n = root->rb_node;
182 struct hmm_buffer_object *bo;
183
184 do {
185 bo = rb_entry(n, struct hmm_buffer_object, node);
186
187 if (bo->start > start) {
188 if (n->rb_left == NULL)
189 return NULL;
190 n = n->rb_left;
191 } else {
192 if (bo->end > start)
193 return bo;
194 if (n->rb_right == NULL)
195 return NULL;
196 n = n->rb_right;
197 }
198 } while (n);
199
200 return NULL;
201 }
202
203 static void __bo_insert_to_free_rbtree(struct rb_root *root,
204 struct hmm_buffer_object *bo)
205 {
206 struct rb_node **new = &(root->rb_node);
207 struct rb_node *parent = NULL;
208 struct hmm_buffer_object *this;
209 unsigned int pgnr = bo->pgnr;
210
211 while (*new) {
212 parent = *new;
213 this = container_of(*new, struct hmm_buffer_object, node);
214
215 if (pgnr < this->pgnr) {
216 new = &((*new)->rb_left);
217 } else if (pgnr > this->pgnr) {
218 new = &((*new)->rb_right);
219 } else {
220 bo->prev = this;
221 bo->next = this->next;
222 if (this->next)
223 this->next->prev = bo;
224 this->next = bo;
225 bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
226 return;
227 }
228 }
229
230 bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
231
232 rb_link_node(&bo->node, parent, new);
233 rb_insert_color(&bo->node, root);
234 }
235
236 static void __bo_insert_to_alloc_rbtree(struct rb_root *root,
237 struct hmm_buffer_object *bo)
238 {
239 struct rb_node **new = &(root->rb_node);
240 struct rb_node *parent = NULL;
241 struct hmm_buffer_object *this;
242 unsigned int start = bo->start;
243
244 while (*new) {
245 parent = *new;
246 this = container_of(*new, struct hmm_buffer_object, node);
247
248 if (start < this->start)
249 new = &((*new)->rb_left);
250 else
251 new = &((*new)->rb_right);
252 }
253
254 kref_init(&bo->kref);
255 bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_ALLOCED;
256
257 rb_link_node(&bo->node, parent, new);
258 rb_insert_color(&bo->node, root);
259 }
260
261 static struct hmm_buffer_object *__bo_break_up(struct hmm_bo_device *bdev,
262 struct hmm_buffer_object *bo,
263 unsigned int pgnr)
264 {
265 struct hmm_buffer_object *new_bo;
266 unsigned long flags;
267 int ret;
268
269 new_bo = __bo_alloc(bdev->bo_cache);
270 if (!new_bo) {
271 dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
272 return NULL;
273 }
274 ret = __bo_init(bdev, new_bo, pgnr);
275 if (ret) {
276 dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
277 kmem_cache_free(bdev->bo_cache, new_bo);
278 return NULL;
279 }
280
281 new_bo->start = bo->start;
282 new_bo->end = new_bo->start + pgnr_to_size(pgnr);
283 bo->start = new_bo->end;
284 bo->pgnr = bo->pgnr - pgnr;
285
286 spin_lock_irqsave(&bdev->list_lock, flags);
287 list_add_tail(&new_bo->list, &bo->list);
288 spin_unlock_irqrestore(&bdev->list_lock, flags);
289
290 return new_bo;
291 }
292
293 static void __bo_take_off_handling(struct hmm_buffer_object *bo)
294 {
295 struct hmm_bo_device *bdev = bo->bdev;
296 /* There are 4 situations when we take off a known bo from free rbtree:
297 * 1. if bo->next && bo->prev == NULL, bo is a rbtree node
298 * and does not have a linked list after bo, to take off this bo,
299 * we just need erase bo directly and rebalance the free rbtree
300 */
301 if (bo->prev == NULL && bo->next == NULL) {
302 rb_erase(&bo->node, &bdev->free_rbtree);
303 /* 2. when bo->next != NULL && bo->prev == NULL, bo is a rbtree node,
304 * and has a linked list,to take off this bo we need erase bo
305 * first, then, insert bo->next into free rbtree and rebalance
306 * the free rbtree
307 */
308 } else if (bo->prev == NULL && bo->next != NULL) {
309 bo->next->prev = NULL;
310 rb_erase(&bo->node, &bdev->free_rbtree);
311 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo->next);
312 bo->next = NULL;
313 /* 3. when bo->prev != NULL && bo->next == NULL, bo is not a rbtree
314 * node, bo is the last element of the linked list after rbtree
315 * node, to take off this bo, we just need set the "prev/next"
316 * pointers to NULL, the free rbtree stays unchaged
317 */
318 } else if (bo->prev != NULL && bo->next == NULL) {
319 bo->prev->next = NULL;
320 bo->prev = NULL;
321 /* 4. when bo->prev != NULL && bo->next != NULL ,bo is not a rbtree
322 * node, bo is in the middle of the linked list after rbtree node,
323 * to take off this bo, we just set take the "prev/next" pointers
324 * to NULL, the free rbtree stays unchaged
325 */
326 } else {
327 bo->next->prev = bo->prev;
328 bo->prev->next = bo->next;
329 bo->next = NULL;
330 bo->prev = NULL;
331 }
332 }
333
334 static struct hmm_buffer_object *__bo_merge(struct hmm_buffer_object *bo,
335 struct hmm_buffer_object *next_bo)
336 {
337 struct hmm_bo_device *bdev;
338 unsigned long flags;
339
340 bdev = bo->bdev;
341 next_bo->start = bo->start;
342 next_bo->pgnr = next_bo->pgnr + bo->pgnr;
343
344 spin_lock_irqsave(&bdev->list_lock, flags);
345 list_del(&bo->list);
346 spin_unlock_irqrestore(&bdev->list_lock, flags);
347
348 kmem_cache_free(bo->bdev->bo_cache, bo);
349
350 return next_bo;
351 }
352
353 /*
354 * hmm_bo_device functions.
355 */
356 int hmm_bo_device_init(struct hmm_bo_device *bdev,
357 struct isp_mmu_client *mmu_driver,
358 unsigned int vaddr_start,
359 unsigned int size)
360 {
361 struct hmm_buffer_object *bo;
362 unsigned long flags;
363 int ret;
364
365 check_bodev_null_return(bdev, -EINVAL);
366
367 ret = isp_mmu_init(&bdev->mmu, mmu_driver);
368 if (ret) {
369 dev_err(atomisp_dev, "isp_mmu_init failed.\n");
370 return ret;
371 }
372
373 bdev->start = vaddr_start;
374 bdev->pgnr = size_to_pgnr_ceil(size);
375 bdev->size = pgnr_to_size(bdev->pgnr);
376
377 spin_lock_init(&bdev->list_lock);
378 mutex_init(&bdev->rbtree_mutex);
379
380 bdev->flag = HMM_BO_DEVICE_INITED;
381
382 INIT_LIST_HEAD(&bdev->entire_bo_list);
383 bdev->allocated_rbtree = RB_ROOT;
384 bdev->free_rbtree = RB_ROOT;
385
386 bdev->bo_cache = kmem_cache_create("bo_cache",
387 sizeof(struct hmm_buffer_object), 0, 0, NULL);
388 if (!bdev->bo_cache) {
389 dev_err(atomisp_dev, "%s: create cache failed!\n", __func__);
390 isp_mmu_exit(&bdev->mmu);
391 return -ENOMEM;
392 }
393
394 bo = __bo_alloc(bdev->bo_cache);
395 if (!bo) {
396 dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
397 isp_mmu_exit(&bdev->mmu);
398 return -ENOMEM;
399 }
400
401 ret = __bo_init(bdev, bo, bdev->pgnr);
402 if (ret) {
403 dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
404 kmem_cache_free(bdev->bo_cache, bo);
405 isp_mmu_exit(&bdev->mmu);
406 return -EINVAL;
407 }
408
409 spin_lock_irqsave(&bdev->list_lock, flags);
410 list_add_tail(&bo->list, &bdev->entire_bo_list);
411 spin_unlock_irqrestore(&bdev->list_lock, flags);
412
413 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
414
415 return 0;
416 }
417
418 struct hmm_buffer_object *hmm_bo_alloc(struct hmm_bo_device *bdev,
419 unsigned int pgnr)
420 {
421 struct hmm_buffer_object *bo, *new_bo;
422 struct rb_root *root = &bdev->free_rbtree;
423
424 check_bodev_null_return(bdev, NULL);
425 var_equal_return(hmm_bo_device_inited(bdev), 0, NULL,
426 "hmm_bo_device not inited yet.\n");
427
428 if (pgnr == 0) {
429 dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
430 return NULL;
431 }
432
433 mutex_lock(&bdev->rbtree_mutex);
434 bo = __bo_search_and_remove_from_free_rbtree(root->rb_node, pgnr);
435 if (!bo) {
436 mutex_unlock(&bdev->rbtree_mutex);
437 dev_err(atomisp_dev, "%s: Out of Memory! hmm_bo_alloc failed",
438 __func__);
439 return NULL;
440 }
441
442 if (bo->pgnr > pgnr) {
443 new_bo = __bo_break_up(bdev, bo, pgnr);
444 if (!new_bo) {
445 mutex_unlock(&bdev->rbtree_mutex);
446 dev_err(atomisp_dev, "%s: __bo_break_up failed!\n",
447 __func__);
448 return NULL;
449 }
450
451 __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, new_bo);
452 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
453
454 mutex_unlock(&bdev->rbtree_mutex);
455 return new_bo;
456 }
457
458 __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, bo);
459
460 mutex_unlock(&bdev->rbtree_mutex);
461 return bo;
462 }
463
464 void hmm_bo_release(struct hmm_buffer_object *bo)
465 {
466 struct hmm_bo_device *bdev = bo->bdev;
467 struct hmm_buffer_object *next_bo, *prev_bo;
468
469 mutex_lock(&bdev->rbtree_mutex);
470
471 /*
472 * FIX ME:
473 *
474 * how to destroy the bo when it is stilled MMAPED?
475 *
476 * ideally, this will not happened as hmm_bo_release
477 * will only be called when kref reaches 0, and in mmap
478 * operation the hmm_bo_ref will eventually be called.
479 * so, if this happened, something goes wrong.
480 */
481 if (bo->status & HMM_BO_MMAPED) {
482 mutex_unlock(&bdev->rbtree_mutex);
483 dev_dbg(atomisp_dev, "destroy bo which is MMAPED, do nothing\n");
484 return;
485 }
486
487 if (bo->status & HMM_BO_BINDED) {
488 dev_warn(atomisp_dev, "the bo is still binded, unbind it first...\n");
489 hmm_bo_unbind(bo);
490 }
491
492 if (bo->status & HMM_BO_PAGE_ALLOCED) {
493 dev_warn(atomisp_dev, "the pages is not freed, free pages first\n");
494 hmm_bo_free_pages(bo);
495 }
496 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
497 dev_warn(atomisp_dev, "the vunmap is not done, do it...\n");
498 hmm_bo_vunmap(bo);
499 }
500
501 rb_erase(&bo->node, &bdev->allocated_rbtree);
502
503 prev_bo = list_entry(bo->list.prev, struct hmm_buffer_object, list);
504 next_bo = list_entry(bo->list.next, struct hmm_buffer_object, list);
505
506 if (bo->list.prev != &bdev->entire_bo_list &&
507 prev_bo->end == bo->start &&
508 (prev_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
509 __bo_take_off_handling(prev_bo);
510 bo = __bo_merge(prev_bo, bo);
511 }
512
513 if (bo->list.next != &bdev->entire_bo_list &&
514 next_bo->start == bo->end &&
515 (next_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
516 __bo_take_off_handling(next_bo);
517 bo = __bo_merge(bo, next_bo);
518 }
519
520 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
521
522 mutex_unlock(&bdev->rbtree_mutex);
523 return;
524 }
525
526 void hmm_bo_device_exit(struct hmm_bo_device *bdev)
527 {
528 struct hmm_buffer_object *bo;
529 unsigned long flags;
530
531 dev_dbg(atomisp_dev, "%s: entering!\n", __func__);
532
533 check_bodev_null_return_void(bdev);
534
535 /*
536 * release all allocated bos even they a in use
537 * and all bos will be merged into a big bo
538 */
539 while (!RB_EMPTY_ROOT(&bdev->allocated_rbtree))
540 hmm_bo_release(
541 rbtree_node_to_hmm_bo(bdev->allocated_rbtree.rb_node));
542
543 dev_dbg(atomisp_dev, "%s: finished releasing all allocated bos!\n",
544 __func__);
545
546 /* free all bos to release all ISP virtual memory */
547 while (!list_empty(&bdev->entire_bo_list)) {
548 bo = list_to_hmm_bo(bdev->entire_bo_list.next);
549
550 spin_lock_irqsave(&bdev->list_lock, flags);
551 list_del(&bo->list);
552 spin_unlock_irqrestore(&bdev->list_lock, flags);
553
554 kmem_cache_free(bdev->bo_cache, bo);
555 }
556
557 dev_dbg(atomisp_dev, "%s: finished to free all bos!\n", __func__);
558
559 kmem_cache_destroy(bdev->bo_cache);
560
561 isp_mmu_exit(&bdev->mmu);
562 }
563
564 int hmm_bo_device_inited(struct hmm_bo_device *bdev)
565 {
566 check_bodev_null_return(bdev, -EINVAL);
567
568 return bdev->flag == HMM_BO_DEVICE_INITED;
569 }
570
571 int hmm_bo_allocated(struct hmm_buffer_object *bo)
572 {
573 check_bo_null_return(bo, 0);
574
575 return bo->status & HMM_BO_ALLOCED;
576 }
577
578 struct hmm_buffer_object *hmm_bo_device_search_start(
579 struct hmm_bo_device *bdev, ia_css_ptr vaddr)
580 {
581 struct hmm_buffer_object *bo;
582
583 check_bodev_null_return(bdev, NULL);
584
585 mutex_lock(&bdev->rbtree_mutex);
586 bo = __bo_search_by_addr(&bdev->allocated_rbtree, vaddr);
587 if (!bo) {
588 mutex_unlock(&bdev->rbtree_mutex);
589 dev_err(atomisp_dev, "%s can not find bo with addr: 0x%x\n",
590 __func__, vaddr);
591 return NULL;
592 }
593 mutex_unlock(&bdev->rbtree_mutex);
594
595 return bo;
596 }
597
598 struct hmm_buffer_object *hmm_bo_device_search_in_range(
599 struct hmm_bo_device *bdev, unsigned int vaddr)
600 {
601 struct hmm_buffer_object *bo;
602
603 check_bodev_null_return(bdev, NULL);
604
605 mutex_lock(&bdev->rbtree_mutex);
606 bo = __bo_search_by_addr_in_range(&bdev->allocated_rbtree, vaddr);
607 if (!bo) {
608 mutex_unlock(&bdev->rbtree_mutex);
609 dev_err(atomisp_dev, "%s can not find bo contain addr: 0x%x\n",
610 __func__, vaddr);
611 return NULL;
612 }
613 mutex_unlock(&bdev->rbtree_mutex);
614
615 return bo;
616 }
617
618 struct hmm_buffer_object *hmm_bo_device_search_vmap_start(
619 struct hmm_bo_device *bdev, const void *vaddr)
620 {
621 struct list_head *pos;
622 struct hmm_buffer_object *bo;
623 unsigned long flags;
624
625 check_bodev_null_return(bdev, NULL);
626
627 spin_lock_irqsave(&bdev->list_lock, flags);
628 list_for_each(pos, &bdev->entire_bo_list) {
629 bo = list_to_hmm_bo(pos);
630 /* pass bo which has no vm_node allocated */
631 if ((bo->status & HMM_BO_MASK) == HMM_BO_FREE)
632 continue;
633 if (bo->vmap_addr == vaddr)
634 goto found;
635 }
636 spin_unlock_irqrestore(&bdev->list_lock, flags);
637 return NULL;
638 found:
639 spin_unlock_irqrestore(&bdev->list_lock, flags);
640 return bo;
641
642 }
643
644
645 static void free_private_bo_pages(struct hmm_buffer_object *bo,
646 struct hmm_pool *dypool,
647 struct hmm_pool *repool,
648 int free_pgnr)
649 {
650 int i, ret;
651
652 for (i = 0; i < free_pgnr; i++) {
653 switch (bo->page_obj[i].type) {
654 case HMM_PAGE_TYPE_RESERVED:
655 if (repool->pops
656 && repool->pops->pool_free_pages) {
657 repool->pops->pool_free_pages(repool->pool_info,
658 &bo->page_obj[i]);
659 hmm_mem_stat.res_cnt--;
660 }
661 break;
662 /*
663 * HMM_PAGE_TYPE_GENERAL indicates that pages are from system
664 * memory, so when free them, they should be put into dynamic
665 * pool.
666 */
667 case HMM_PAGE_TYPE_DYNAMIC:
668 case HMM_PAGE_TYPE_GENERAL:
669 if (dypool->pops
670 && dypool->pops->pool_inited
671 && dypool->pops->pool_inited(dypool->pool_info)) {
672 if (dypool->pops->pool_free_pages)
673 dypool->pops->pool_free_pages(
674 dypool->pool_info,
675 &bo->page_obj[i]);
676 break;
677 }
678
679 /*
680 * if dynamic memory pool doesn't exist, need to free
681 * pages to system directly.
682 */
683 default:
684 ret = set_pages_wb(bo->page_obj[i].page, 1);
685 if (ret)
686 dev_err(atomisp_dev,
687 "set page to WB err ...ret = %d\n",
688 ret);
689 /*
690 W/A: set_pages_wb seldom return value = -EFAULT
691 indicate that address of page is not in valid
692 range(0xffff880000000000~0xffffc7ffffffffff)
693 then, _free_pages would panic; Do not know why page
694 address be valid,it maybe memory corruption by lowmemory
695 */
696 if (!ret) {
697 __free_pages(bo->page_obj[i].page, 0);
698 hmm_mem_stat.sys_size--;
699 }
700 break;
701 }
702 }
703
704 return;
705 }
706
707 /*Allocate pages which will be used only by ISP*/
708 static int alloc_private_pages(struct hmm_buffer_object *bo,
709 int from_highmem,
710 bool cached,
711 struct hmm_pool *dypool,
712 struct hmm_pool *repool)
713 {
714 int ret;
715 unsigned int pgnr, order, blk_pgnr, alloc_pgnr;
716 struct page *pages;
717 gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN; /* REVISIT: need __GFP_FS too? */
718 int i, j;
719 int failure_number = 0;
720 bool reduce_order = false;
721 bool lack_mem = true;
722
723 if (from_highmem)
724 gfp |= __GFP_HIGHMEM;
725
726 pgnr = bo->pgnr;
727
728 bo->page_obj = kmalloc(sizeof(struct hmm_page_object) * pgnr,
729 GFP_KERNEL);
730 if (unlikely(!bo->page_obj))
731 return -ENOMEM;
732
733 i = 0;
734 alloc_pgnr = 0;
735
736 /*
737 * get physical pages from dynamic pages pool.
738 */
739 if (dypool->pops && dypool->pops->pool_alloc_pages) {
740 alloc_pgnr = dypool->pops->pool_alloc_pages(dypool->pool_info,
741 bo->page_obj, pgnr,
742 cached);
743 hmm_mem_stat.dyc_size -= alloc_pgnr;
744
745 if (alloc_pgnr == pgnr)
746 return 0;
747 }
748
749 pgnr -= alloc_pgnr;
750 i += alloc_pgnr;
751
752 /*
753 * get physical pages from reserved pages pool for atomisp.
754 */
755 if (repool->pops && repool->pops->pool_alloc_pages) {
756 alloc_pgnr = repool->pops->pool_alloc_pages(repool->pool_info,
757 &bo->page_obj[i], pgnr,
758 cached);
759 hmm_mem_stat.res_cnt += alloc_pgnr;
760 if (alloc_pgnr == pgnr)
761 return 0;
762 }
763
764 pgnr -= alloc_pgnr;
765 i += alloc_pgnr;
766
767 while (pgnr) {
768 order = nr_to_order_bottom(pgnr);
769 /*
770 * if be short of memory, we will set order to 0
771 * everytime.
772 */
773 if (lack_mem)
774 order = HMM_MIN_ORDER;
775 else if (order > HMM_MAX_ORDER)
776 order = HMM_MAX_ORDER;
777 retry:
778 /*
779 * When order > HMM_MIN_ORDER, for performance reasons we don't
780 * want alloc_pages() to sleep. In case it fails and fallbacks
781 * to HMM_MIN_ORDER or in case the requested order is originally
782 * the minimum value, we can allow alloc_pages() to sleep for
783 * robustness purpose.
784 *
785 * REVISIT: why __GFP_FS is necessary?
786 */
787 if (order == HMM_MIN_ORDER) {
788 gfp &= ~GFP_NOWAIT;
789 gfp |= __GFP_RECLAIM | __GFP_FS;
790 }
791
792 pages = alloc_pages(gfp, order);
793 if (unlikely(!pages)) {
794 /*
795 * in low memory case, if allocation page fails,
796 * we turn to try if order=0 allocation could
797 * succeed. if order=0 fails too, that means there is
798 * no memory left.
799 */
800 if (order == HMM_MIN_ORDER) {
801 dev_err(atomisp_dev,
802 "%s: cannot allocate pages\n",
803 __func__);
804 goto cleanup;
805 }
806 order = HMM_MIN_ORDER;
807 failure_number++;
808 reduce_order = true;
809 /*
810 * if fail two times continuously, we think be short
811 * of memory now.
812 */
813 if (failure_number == 2) {
814 lack_mem = true;
815 failure_number = 0;
816 }
817 goto retry;
818 } else {
819 blk_pgnr = order_to_nr(order);
820
821 if (!cached) {
822 /*
823 * set memory to uncacheable -- UC_MINUS
824 */
825 ret = set_pages_uc(pages, blk_pgnr);
826 if (ret) {
827 dev_err(atomisp_dev,
828 "set page uncacheable"
829 "failed.\n");
830
831 __free_pages(pages, order);
832
833 goto cleanup;
834 }
835 }
836
837 for (j = 0; j < blk_pgnr; j++) {
838 bo->page_obj[i].page = pages + j;
839 bo->page_obj[i++].type = HMM_PAGE_TYPE_GENERAL;
840 }
841
842 pgnr -= blk_pgnr;
843 hmm_mem_stat.sys_size += blk_pgnr;
844
845 /*
846 * if order is not reduced this time, clear
847 * failure_number.
848 */
849 if (reduce_order)
850 reduce_order = false;
851 else
852 failure_number = 0;
853 }
854 }
855
856 return 0;
857 cleanup:
858 alloc_pgnr = i;
859 free_private_bo_pages(bo, dypool, repool, alloc_pgnr);
860
861 kfree(bo->page_obj);
862
863 return -ENOMEM;
864 }
865
866 static void free_private_pages(struct hmm_buffer_object *bo,
867 struct hmm_pool *dypool,
868 struct hmm_pool *repool)
869 {
870 free_private_bo_pages(bo, dypool, repool, bo->pgnr);
871
872 kfree(bo->page_obj);
873 }
874
875 /*
876 * Hacked from kernel function __get_user_pages in mm/memory.c
877 *
878 * Handle buffers allocated by other kernel space driver and mmaped into user
879 * space, function Ignore the VM_PFNMAP and VM_IO flag in VMA structure
880 *
881 * Get physical pages from user space virtual address and update into page list
882 */
883 static int __get_pfnmap_pages(struct task_struct *tsk, struct mm_struct *mm,
884 unsigned long start, int nr_pages,
885 unsigned int gup_flags, struct page **pages,
886 struct vm_area_struct **vmas)
887 {
888 int i, ret;
889 unsigned long vm_flags;
890
891 if (nr_pages <= 0)
892 return 0;
893
894 VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
895
896 /*
897 * Require read or write permissions.
898 * If FOLL_FORCE is set, we only require the "MAY" flags.
899 */
900 vm_flags = (gup_flags & FOLL_WRITE) ?
901 (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
902 vm_flags &= (gup_flags & FOLL_FORCE) ?
903 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
904 i = 0;
905
906 do {
907 struct vm_area_struct *vma;
908
909 vma = find_vma(mm, start);
910 if (!vma) {
911 dev_err(atomisp_dev, "find_vma failed\n");
912 return i ? : -EFAULT;
913 }
914
915 if (is_vm_hugetlb_page(vma)) {
916 /*
917 i = follow_hugetlb_page(mm, vma, pages, vmas,
918 &start, &nr_pages, i, gup_flags);
919 */
920 continue;
921 }
922
923 do {
924 struct page *page;
925 unsigned long pfn;
926
927 /*
928 * If we have a pending SIGKILL, don't keep faulting
929 * pages and potentially allocating memory.
930 */
931 if (unlikely(fatal_signal_pending(current))) {
932 dev_err(atomisp_dev,
933 "fatal_signal_pending in %s\n",
934 __func__);
935 return i ? i : -ERESTARTSYS;
936 }
937
938 ret = follow_pfn(vma, start, &pfn);
939 if (ret) {
940 dev_err(atomisp_dev, "follow_pfn() failed\n");
941 return i ? : -EFAULT;
942 }
943
944 page = pfn_to_page(pfn);
945 if (IS_ERR(page))
946 return i ? i : PTR_ERR(page);
947 if (pages) {
948 pages[i] = page;
949 get_page(page);
950 flush_anon_page(vma, page, start);
951 flush_dcache_page(page);
952 }
953 if (vmas)
954 vmas[i] = vma;
955 i++;
956 start += PAGE_SIZE;
957 nr_pages--;
958 } while (nr_pages && start < vma->vm_end);
959 } while (nr_pages);
960
961 return i;
962 }
963
964 static int get_pfnmap_pages(struct task_struct *tsk, struct mm_struct *mm,
965 unsigned long start, int nr_pages, int write, int force,
966 struct page **pages, struct vm_area_struct **vmas)
967 {
968 int flags = FOLL_TOUCH;
969
970 if (pages)
971 flags |= FOLL_GET;
972 if (write)
973 flags |= FOLL_WRITE;
974 if (force)
975 flags |= FOLL_FORCE;
976
977 return __get_pfnmap_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
978 }
979
980 /*
981 * Convert user space virtual address into pages list
982 */
983 static int alloc_user_pages(struct hmm_buffer_object *bo,
984 void *userptr, bool cached)
985 {
986 int page_nr;
987 int i;
988 struct vm_area_struct *vma;
989 struct page **pages;
990
991 pages = kmalloc(sizeof(struct page *) * bo->pgnr, GFP_KERNEL);
992 if (unlikely(!pages))
993 return -ENOMEM;
994
995 bo->page_obj = kmalloc(sizeof(struct hmm_page_object) * bo->pgnr,
996 GFP_KERNEL);
997 if (unlikely(!bo->page_obj)) {
998 kfree(pages);
999 return -ENOMEM;
1000 }
1001
1002 mutex_unlock(&bo->mutex);
1003 down_read(&current->mm->mmap_sem);
1004 vma = find_vma(current->mm, (unsigned long)userptr);
1005 up_read(&current->mm->mmap_sem);
1006 if (vma == NULL) {
1007 dev_err(atomisp_dev, "find_vma failed\n");
1008 kfree(bo->page_obj);
1009 kfree(pages);
1010 mutex_lock(&bo->mutex);
1011 return -EFAULT;
1012 }
1013 mutex_lock(&bo->mutex);
1014 /*
1015 * Handle frame buffer allocated in other kerenl space driver
1016 * and map to user space
1017 */
1018 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
1019 page_nr = get_pfnmap_pages(current, current->mm,
1020 (unsigned long)userptr,
1021 (int)(bo->pgnr), 1, 0,
1022 pages, NULL);
1023 bo->mem_type = HMM_BO_MEM_TYPE_PFN;
1024 } else {
1025 /*Handle frame buffer allocated in user space*/
1026 mutex_unlock(&bo->mutex);
1027 down_read(&current->mm->mmap_sem);
1028 page_nr = get_user_pages((unsigned long)userptr,
1029 (int)(bo->pgnr), 1, pages, NULL);
1030 up_read(&current->mm->mmap_sem);
1031 mutex_lock(&bo->mutex);
1032 bo->mem_type = HMM_BO_MEM_TYPE_USER;
1033 }
1034
1035 /* can be written by caller, not forced */
1036 if (page_nr != bo->pgnr) {
1037 dev_err(atomisp_dev,
1038 "get_user_pages err: bo->pgnr = %d, "
1039 "pgnr actually pinned = %d.\n",
1040 bo->pgnr, page_nr);
1041 goto out_of_mem;
1042 }
1043
1044 for (i = 0; i < bo->pgnr; i++) {
1045 bo->page_obj[i].page = pages[i];
1046 bo->page_obj[i].type = HMM_PAGE_TYPE_GENERAL;
1047 }
1048 hmm_mem_stat.usr_size += bo->pgnr;
1049 kfree(pages);
1050
1051 return 0;
1052
1053 out_of_mem:
1054 for (i = 0; i < page_nr; i++)
1055 put_page(pages[i]);
1056 kfree(pages);
1057 kfree(bo->page_obj);
1058
1059 return -ENOMEM;
1060 }
1061
1062 static void free_user_pages(struct hmm_buffer_object *bo)
1063 {
1064 int i;
1065
1066 for (i = 0; i < bo->pgnr; i++)
1067 put_page(bo->page_obj[i].page);
1068 hmm_mem_stat.usr_size -= bo->pgnr;
1069
1070 kfree(bo->page_obj);
1071 }
1072
1073 /*
1074 * allocate/free physical pages for the bo.
1075 *
1076 * type indicate where are the pages from. currently we have 3 types
1077 * of memory: HMM_BO_PRIVATE, HMM_BO_USER, HMM_BO_SHARE.
1078 *
1079 * from_highmem is only valid when type is HMM_BO_PRIVATE, it will
1080 * try to alloc memory from highmem if from_highmem is set.
1081 *
1082 * userptr is only valid when type is HMM_BO_USER, it indicates
1083 * the start address from user space task.
1084 *
1085 * from_highmem and userptr will both be ignored when type is
1086 * HMM_BO_SHARE.
1087 */
1088 int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
1089 enum hmm_bo_type type, int from_highmem,
1090 void *userptr, bool cached)
1091 {
1092 int ret = -EINVAL;
1093
1094 check_bo_null_return(bo, -EINVAL);
1095
1096 mutex_lock(&bo->mutex);
1097 check_bo_status_no_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
1098
1099 /*
1100 * TO DO:
1101 * add HMM_BO_USER type
1102 */
1103 if (type == HMM_BO_PRIVATE) {
1104 ret = alloc_private_pages(bo, from_highmem,
1105 cached, &dynamic_pool, &reserved_pool);
1106 } else if (type == HMM_BO_USER) {
1107 ret = alloc_user_pages(bo, userptr, cached);
1108 } else {
1109 dev_err(atomisp_dev, "invalid buffer type.\n");
1110 ret = -EINVAL;
1111 }
1112 if (ret)
1113 goto alloc_err;
1114
1115 bo->type = type;
1116
1117 bo->status |= HMM_BO_PAGE_ALLOCED;
1118
1119 mutex_unlock(&bo->mutex);
1120
1121 return 0;
1122
1123 alloc_err:
1124 mutex_unlock(&bo->mutex);
1125 dev_err(atomisp_dev, "alloc pages err...\n");
1126 return ret;
1127 status_err:
1128 mutex_unlock(&bo->mutex);
1129 dev_err(atomisp_dev,
1130 "buffer object has already page allocated.\n");
1131 return -EINVAL;
1132 }
1133
1134 /*
1135 * free physical pages of the bo.
1136 */
1137 void hmm_bo_free_pages(struct hmm_buffer_object *bo)
1138 {
1139 check_bo_null_return_void(bo);
1140
1141 mutex_lock(&bo->mutex);
1142
1143 check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err2);
1144
1145 /* clear the flag anyway. */
1146 bo->status &= (~HMM_BO_PAGE_ALLOCED);
1147
1148 if (bo->type == HMM_BO_PRIVATE)
1149 free_private_pages(bo, &dynamic_pool, &reserved_pool);
1150 else if (bo->type == HMM_BO_USER)
1151 free_user_pages(bo);
1152 else
1153 dev_err(atomisp_dev, "invalid buffer type.\n");
1154 mutex_unlock(&bo->mutex);
1155
1156 return;
1157
1158 status_err2:
1159 mutex_unlock(&bo->mutex);
1160 dev_err(atomisp_dev,
1161 "buffer object not page allocated yet.\n");
1162 }
1163
1164 int hmm_bo_page_allocated(struct hmm_buffer_object *bo)
1165 {
1166 check_bo_null_return(bo, 0);
1167
1168 return bo->status & HMM_BO_PAGE_ALLOCED;
1169 }
1170
1171 /*
1172 * get physical page info of the bo.
1173 */
1174 int hmm_bo_get_page_info(struct hmm_buffer_object *bo,
1175 struct hmm_page_object **page_obj, int *pgnr)
1176 {
1177 check_bo_null_return(bo, -EINVAL);
1178
1179 mutex_lock(&bo->mutex);
1180
1181 check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
1182
1183 *page_obj = bo->page_obj;
1184 *pgnr = bo->pgnr;
1185
1186 mutex_unlock(&bo->mutex);
1187
1188 return 0;
1189
1190 status_err:
1191 dev_err(atomisp_dev,
1192 "buffer object not page allocated yet.\n");
1193 mutex_unlock(&bo->mutex);
1194 return -EINVAL;
1195 }
1196
1197 /*
1198 * bind the physical pages to a virtual address space.
1199 */
1200 int hmm_bo_bind(struct hmm_buffer_object *bo)
1201 {
1202 int ret;
1203 unsigned int virt;
1204 struct hmm_bo_device *bdev;
1205 unsigned int i;
1206
1207 check_bo_null_return(bo, -EINVAL);
1208
1209 mutex_lock(&bo->mutex);
1210
1211 check_bo_status_yes_goto(bo,
1212 HMM_BO_PAGE_ALLOCED | HMM_BO_ALLOCED,
1213 status_err1);
1214
1215 check_bo_status_no_goto(bo, HMM_BO_BINDED, status_err2);
1216
1217 bdev = bo->bdev;
1218
1219 virt = bo->start;
1220
1221 for (i = 0; i < bo->pgnr; i++) {
1222 ret =
1223 isp_mmu_map(&bdev->mmu, virt,
1224 page_to_phys(bo->page_obj[i].page), 1);
1225 if (ret)
1226 goto map_err;
1227 virt += (1 << PAGE_SHIFT);
1228 }
1229
1230 /*
1231 * flush TBL here.
1232 *
1233 * theoretically, we donot need to flush TLB as we didnot change
1234 * any existed address mappings, but for Silicon Hive's MMU, its
1235 * really a bug here. I guess when fetching PTEs (page table entity)
1236 * to TLB, its MMU will fetch additional INVALID PTEs automatically
1237 * for performance issue. EX, we only set up 1 page address mapping,
1238 * meaning updating 1 PTE, but the MMU fetches 4 PTE at one time,
1239 * so the additional 3 PTEs are invalid.
1240 */
1241 if (bo->start != 0x0)
1242 isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
1243 (bo->pgnr << PAGE_SHIFT));
1244
1245 bo->status |= HMM_BO_BINDED;
1246
1247 mutex_unlock(&bo->mutex);
1248
1249 return 0;
1250
1251 map_err:
1252 /* unbind the physical pages with related virtual address space */
1253 virt = bo->start;
1254 for ( ; i > 0; i--) {
1255 isp_mmu_unmap(&bdev->mmu, virt, 1);
1256 virt += pgnr_to_size(1);
1257 }
1258
1259 mutex_unlock(&bo->mutex);
1260 dev_err(atomisp_dev,
1261 "setup MMU address mapping failed.\n");
1262 return ret;
1263
1264 status_err2:
1265 mutex_unlock(&bo->mutex);
1266 dev_err(atomisp_dev, "buffer object already binded.\n");
1267 return -EINVAL;
1268 status_err1:
1269 mutex_unlock(&bo->mutex);
1270 dev_err(atomisp_dev,
1271 "buffer object vm_node or page not allocated.\n");
1272 return -EINVAL;
1273 }
1274
1275 /*
1276 * unbind the physical pages with related virtual address space.
1277 */
1278 void hmm_bo_unbind(struct hmm_buffer_object *bo)
1279 {
1280 unsigned int virt;
1281 struct hmm_bo_device *bdev;
1282 unsigned int i;
1283
1284 check_bo_null_return_void(bo);
1285
1286 mutex_lock(&bo->mutex);
1287
1288 check_bo_status_yes_goto(bo,
1289 HMM_BO_PAGE_ALLOCED |
1290 HMM_BO_ALLOCED |
1291 HMM_BO_BINDED, status_err);
1292
1293 bdev = bo->bdev;
1294
1295 virt = bo->start;
1296
1297 for (i = 0; i < bo->pgnr; i++) {
1298 isp_mmu_unmap(&bdev->mmu, virt, 1);
1299 virt += pgnr_to_size(1);
1300 }
1301
1302 /*
1303 * flush TLB as the address mapping has been removed and
1304 * related TLBs should be invalidated.
1305 */
1306 isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
1307 (bo->pgnr << PAGE_SHIFT));
1308
1309 bo->status &= (~HMM_BO_BINDED);
1310
1311 mutex_unlock(&bo->mutex);
1312
1313 return;
1314
1315 status_err:
1316 mutex_unlock(&bo->mutex);
1317 dev_err(atomisp_dev,
1318 "buffer vm or page not allocated or not binded yet.\n");
1319 }
1320
1321 int hmm_bo_binded(struct hmm_buffer_object *bo)
1322 {
1323 int ret;
1324
1325 check_bo_null_return(bo, 0);
1326
1327 mutex_lock(&bo->mutex);
1328
1329 ret = bo->status & HMM_BO_BINDED;
1330
1331 mutex_unlock(&bo->mutex);
1332
1333 return ret;
1334 }
1335
1336 void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached)
1337 {
1338 struct page **pages;
1339 int i;
1340
1341 check_bo_null_return(bo, NULL);
1342
1343 mutex_lock(&bo->mutex);
1344 if (((bo->status & HMM_BO_VMAPED) && !cached) ||
1345 ((bo->status & HMM_BO_VMAPED_CACHED) && cached)) {
1346 mutex_unlock(&bo->mutex);
1347 return bo->vmap_addr;
1348 }
1349
1350 /* cached status need to be changed, so vunmap first */
1351 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
1352 vunmap(bo->vmap_addr);
1353 bo->vmap_addr = NULL;
1354 bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
1355 }
1356
1357 pages = kmalloc(sizeof(*pages) * bo->pgnr, GFP_KERNEL);
1358 if (unlikely(!pages)) {
1359 mutex_unlock(&bo->mutex);
1360 return NULL;
1361 }
1362
1363 for (i = 0; i < bo->pgnr; i++)
1364 pages[i] = bo->page_obj[i].page;
1365
1366 bo->vmap_addr = vmap(pages, bo->pgnr, VM_MAP,
1367 cached ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE);
1368 if (unlikely(!bo->vmap_addr)) {
1369 kfree(pages);
1370 mutex_unlock(&bo->mutex);
1371 dev_err(atomisp_dev, "vmap failed...\n");
1372 return NULL;
1373 }
1374 bo->status |= (cached ? HMM_BO_VMAPED_CACHED : HMM_BO_VMAPED);
1375
1376 kfree(pages);
1377
1378 mutex_unlock(&bo->mutex);
1379 return bo->vmap_addr;
1380 }
1381
1382 void hmm_bo_flush_vmap(struct hmm_buffer_object *bo)
1383 {
1384 check_bo_null_return_void(bo);
1385
1386 mutex_lock(&bo->mutex);
1387 if (!(bo->status & HMM_BO_VMAPED_CACHED) || !bo->vmap_addr) {
1388 mutex_unlock(&bo->mutex);
1389 return;
1390 }
1391
1392 clflush_cache_range(bo->vmap_addr, bo->pgnr * PAGE_SIZE);
1393 mutex_unlock(&bo->mutex);
1394 }
1395
1396 void hmm_bo_vunmap(struct hmm_buffer_object *bo)
1397 {
1398 check_bo_null_return_void(bo);
1399
1400 mutex_lock(&bo->mutex);
1401 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
1402 vunmap(bo->vmap_addr);
1403 bo->vmap_addr = NULL;
1404 bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
1405 }
1406
1407 mutex_unlock(&bo->mutex);
1408 return;
1409 }
1410
1411 void hmm_bo_ref(struct hmm_buffer_object *bo)
1412 {
1413 check_bo_null_return_void(bo);
1414
1415 kref_get(&bo->kref);
1416 }
1417
1418 static void kref_hmm_bo_release(struct kref *kref)
1419 {
1420 if (!kref)
1421 return;
1422
1423 hmm_bo_release(kref_to_hmm_bo(kref));
1424 }
1425
1426 void hmm_bo_unref(struct hmm_buffer_object *bo)
1427 {
1428 check_bo_null_return_void(bo);
1429
1430 kref_put(&bo->kref, kref_hmm_bo_release);
1431 }
1432
1433 static void hmm_bo_vm_open(struct vm_area_struct *vma)
1434 {
1435 struct hmm_buffer_object *bo =
1436 (struct hmm_buffer_object *)vma->vm_private_data;
1437
1438 check_bo_null_return_void(bo);
1439
1440 hmm_bo_ref(bo);
1441
1442 mutex_lock(&bo->mutex);
1443
1444 bo->status |= HMM_BO_MMAPED;
1445
1446 bo->mmap_count++;
1447
1448 mutex_unlock(&bo->mutex);
1449 }
1450
1451 static void hmm_bo_vm_close(struct vm_area_struct *vma)
1452 {
1453 struct hmm_buffer_object *bo =
1454 (struct hmm_buffer_object *)vma->vm_private_data;
1455
1456 check_bo_null_return_void(bo);
1457
1458 hmm_bo_unref(bo);
1459
1460 mutex_lock(&bo->mutex);
1461
1462 bo->mmap_count--;
1463
1464 if (!bo->mmap_count) {
1465 bo->status &= (~HMM_BO_MMAPED);
1466 vma->vm_private_data = NULL;
1467 }
1468
1469 mutex_unlock(&bo->mutex);
1470 }
1471
1472 static const struct vm_operations_struct hmm_bo_vm_ops = {
1473 .open = hmm_bo_vm_open,
1474 .close = hmm_bo_vm_close,
1475 };
1476
1477 /*
1478 * mmap the bo to user space.
1479 */
1480 int hmm_bo_mmap(struct vm_area_struct *vma, struct hmm_buffer_object *bo)
1481 {
1482 unsigned int start, end;
1483 unsigned int virt;
1484 unsigned int pgnr, i;
1485 unsigned int pfn;
1486
1487 check_bo_null_return(bo, -EINVAL);
1488
1489 check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
1490
1491 pgnr = bo->pgnr;
1492 start = vma->vm_start;
1493 end = vma->vm_end;
1494
1495 /*
1496 * check vma's virtual address space size and buffer object's size.
1497 * must be the same.
1498 */
1499 if ((start + pgnr_to_size(pgnr)) != end) {
1500 dev_warn(atomisp_dev,
1501 "vma's address space size not equal"
1502 " to buffer object's size");
1503 return -EINVAL;
1504 }
1505
1506 virt = vma->vm_start;
1507 for (i = 0; i < pgnr; i++) {
1508 pfn = page_to_pfn(bo->page_obj[i].page);
1509 if (remap_pfn_range(vma, virt, pfn, PAGE_SIZE, PAGE_SHARED)) {
1510 dev_warn(atomisp_dev,
1511 "remap_pfn_range failed:"
1512 " virt = 0x%x, pfn = 0x%x,"
1513 " mapped_pgnr = %d\n", virt, pfn, 1);
1514 return -EINVAL;
1515 }
1516 virt += PAGE_SIZE;
1517 }
1518
1519 vma->vm_private_data = bo;
1520
1521 vma->vm_ops = &hmm_bo_vm_ops;
1522 vma->vm_flags |= VM_IO|VM_DONTEXPAND|VM_DONTDUMP;
1523
1524 /*
1525 * call hmm_bo_vm_open explictly.
1526 */
1527 hmm_bo_vm_open(vma);
1528
1529 return 0;
1530
1531 status_err:
1532 dev_err(atomisp_dev, "buffer page not allocated yet.\n");
1533 return -EINVAL;
1534 }