]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_bo.c
Merge tag 'staging-4.15-rc1' into v4l_for_linus
[mirror_ubuntu-bionic-kernel.git] / drivers / staging / media / atomisp / pci / atomisp2 / hmm / hmm_bo.c
1 /*
2 * Support for Medifield PNW Camera Imaging ISP subsystem.
3 *
4 * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
5 *
6 * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 *
18 */
19 /*
20 * This file contains functions for buffer object structure management
21 */
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/gfp.h> /* for GFP_ATOMIC */
25 #include <linux/mm.h>
26 #include <linux/mm_types.h>
27 #include <linux/hugetlb.h>
28 #include <linux/highmem.h>
29 #include <linux/slab.h> /* for kmalloc */
30 #include <linux/module.h>
31 #include <linux/moduleparam.h>
32 #include <linux/string.h>
33 #include <linux/list.h>
34 #include <linux/errno.h>
35 #include <linux/io.h>
36 #include <asm/current.h>
37 #include <linux/sched/signal.h>
38 #include <linux/file.h>
39
40 #include <asm/set_memory.h>
41
42 #include "atomisp_internal.h"
43 #include "hmm/hmm_common.h"
44 #include "hmm/hmm_pool.h"
45 #include "hmm/hmm_bo.h"
46
47 static unsigned int order_to_nr(unsigned int order)
48 {
49 return 1U << order;
50 }
51
52 static unsigned int nr_to_order_bottom(unsigned int nr)
53 {
54 return fls(nr) - 1;
55 }
56
57 static struct hmm_buffer_object *__bo_alloc(struct kmem_cache *bo_cache)
58 {
59 struct hmm_buffer_object *bo;
60
61 bo = kmem_cache_alloc(bo_cache, GFP_KERNEL);
62 if (!bo)
63 dev_err(atomisp_dev, "%s: failed!\n", __func__);
64
65 return bo;
66 }
67
68 static int __bo_init(struct hmm_bo_device *bdev, struct hmm_buffer_object *bo,
69 unsigned int pgnr)
70 {
71 check_bodev_null_return(bdev, -EINVAL);
72 var_equal_return(hmm_bo_device_inited(bdev), 0, -EINVAL,
73 "hmm_bo_device not inited yet.\n");
74 /* prevent zero size buffer object */
75 if (pgnr == 0) {
76 dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
77 return -EINVAL;
78 }
79
80 memset(bo, 0, sizeof(*bo));
81 mutex_init(&bo->mutex);
82
83 /* init the bo->list HEAD as an element of entire_bo_list */
84 INIT_LIST_HEAD(&bo->list);
85
86 bo->bdev = bdev;
87 bo->vmap_addr = NULL;
88 bo->status = HMM_BO_FREE;
89 bo->start = bdev->start;
90 bo->pgnr = pgnr;
91 bo->end = bo->start + pgnr_to_size(pgnr);
92 bo->prev = NULL;
93 bo->next = NULL;
94
95 return 0;
96 }
97
98 static struct hmm_buffer_object *__bo_search_and_remove_from_free_rbtree(
99 struct rb_node *node, unsigned int pgnr)
100 {
101 struct hmm_buffer_object *this, *ret_bo, *temp_bo;
102
103 this = rb_entry(node, struct hmm_buffer_object, node);
104 if (this->pgnr == pgnr ||
105 (this->pgnr > pgnr && this->node.rb_left == NULL)) {
106 goto remove_bo_and_return;
107 } else {
108 if (this->pgnr < pgnr) {
109 if (!this->node.rb_right)
110 return NULL;
111 ret_bo = __bo_search_and_remove_from_free_rbtree(
112 this->node.rb_right, pgnr);
113 } else {
114 ret_bo = __bo_search_and_remove_from_free_rbtree(
115 this->node.rb_left, pgnr);
116 }
117 if (!ret_bo) {
118 if (this->pgnr > pgnr)
119 goto remove_bo_and_return;
120 else
121 return NULL;
122 }
123 return ret_bo;
124 }
125
126 remove_bo_and_return:
127 /* NOTE: All nodes on free rbtree have a 'prev' that points to NULL.
128 * 1. check if 'this->next' is NULL:
129 * yes: erase 'this' node and rebalance rbtree, return 'this'.
130 */
131 if (this->next == NULL) {
132 rb_erase(&this->node, &this->bdev->free_rbtree);
133 return this;
134 }
135 /* NOTE: if 'this->next' is not NULL, always return 'this->next' bo.
136 * 2. check if 'this->next->next' is NULL:
137 * yes: change the related 'next/prev' pointer,
138 * return 'this->next' but the rbtree stays unchanged.
139 */
140 temp_bo = this->next;
141 this->next = temp_bo->next;
142 if (temp_bo->next)
143 temp_bo->next->prev = this;
144 temp_bo->next = NULL;
145 temp_bo->prev = NULL;
146 return temp_bo;
147 }
148
149 static struct hmm_buffer_object *__bo_search_by_addr(struct rb_root *root,
150 ia_css_ptr start)
151 {
152 struct rb_node *n = root->rb_node;
153 struct hmm_buffer_object *bo;
154
155 do {
156 bo = rb_entry(n, struct hmm_buffer_object, node);
157
158 if (bo->start > start) {
159 if (n->rb_left == NULL)
160 return NULL;
161 n = n->rb_left;
162 } else if (bo->start < start) {
163 if (n->rb_right == NULL)
164 return NULL;
165 n = n->rb_right;
166 } else {
167 return bo;
168 }
169 } while (n);
170
171 return NULL;
172 }
173
174 static struct hmm_buffer_object *__bo_search_by_addr_in_range(
175 struct rb_root *root, unsigned int start)
176 {
177 struct rb_node *n = root->rb_node;
178 struct hmm_buffer_object *bo;
179
180 do {
181 bo = rb_entry(n, struct hmm_buffer_object, node);
182
183 if (bo->start > start) {
184 if (n->rb_left == NULL)
185 return NULL;
186 n = n->rb_left;
187 } else {
188 if (bo->end > start)
189 return bo;
190 if (n->rb_right == NULL)
191 return NULL;
192 n = n->rb_right;
193 }
194 } while (n);
195
196 return NULL;
197 }
198
199 static void __bo_insert_to_free_rbtree(struct rb_root *root,
200 struct hmm_buffer_object *bo)
201 {
202 struct rb_node **new = &(root->rb_node);
203 struct rb_node *parent = NULL;
204 struct hmm_buffer_object *this;
205 unsigned int pgnr = bo->pgnr;
206
207 while (*new) {
208 parent = *new;
209 this = container_of(*new, struct hmm_buffer_object, node);
210
211 if (pgnr < this->pgnr) {
212 new = &((*new)->rb_left);
213 } else if (pgnr > this->pgnr) {
214 new = &((*new)->rb_right);
215 } else {
216 bo->prev = this;
217 bo->next = this->next;
218 if (this->next)
219 this->next->prev = bo;
220 this->next = bo;
221 bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
222 return;
223 }
224 }
225
226 bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_FREE;
227
228 rb_link_node(&bo->node, parent, new);
229 rb_insert_color(&bo->node, root);
230 }
231
232 static void __bo_insert_to_alloc_rbtree(struct rb_root *root,
233 struct hmm_buffer_object *bo)
234 {
235 struct rb_node **new = &(root->rb_node);
236 struct rb_node *parent = NULL;
237 struct hmm_buffer_object *this;
238 unsigned int start = bo->start;
239
240 while (*new) {
241 parent = *new;
242 this = container_of(*new, struct hmm_buffer_object, node);
243
244 if (start < this->start)
245 new = &((*new)->rb_left);
246 else
247 new = &((*new)->rb_right);
248 }
249
250 kref_init(&bo->kref);
251 bo->status = (bo->status & ~HMM_BO_MASK) | HMM_BO_ALLOCED;
252
253 rb_link_node(&bo->node, parent, new);
254 rb_insert_color(&bo->node, root);
255 }
256
257 static struct hmm_buffer_object *__bo_break_up(struct hmm_bo_device *bdev,
258 struct hmm_buffer_object *bo,
259 unsigned int pgnr)
260 {
261 struct hmm_buffer_object *new_bo;
262 unsigned long flags;
263 int ret;
264
265 new_bo = __bo_alloc(bdev->bo_cache);
266 if (!new_bo) {
267 dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
268 return NULL;
269 }
270 ret = __bo_init(bdev, new_bo, pgnr);
271 if (ret) {
272 dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
273 kmem_cache_free(bdev->bo_cache, new_bo);
274 return NULL;
275 }
276
277 new_bo->start = bo->start;
278 new_bo->end = new_bo->start + pgnr_to_size(pgnr);
279 bo->start = new_bo->end;
280 bo->pgnr = bo->pgnr - pgnr;
281
282 spin_lock_irqsave(&bdev->list_lock, flags);
283 list_add_tail(&new_bo->list, &bo->list);
284 spin_unlock_irqrestore(&bdev->list_lock, flags);
285
286 return new_bo;
287 }
288
289 static void __bo_take_off_handling(struct hmm_buffer_object *bo)
290 {
291 struct hmm_bo_device *bdev = bo->bdev;
292 /* There are 4 situations when we take off a known bo from free rbtree:
293 * 1. if bo->next && bo->prev == NULL, bo is a rbtree node
294 * and does not have a linked list after bo, to take off this bo,
295 * we just need erase bo directly and rebalance the free rbtree
296 */
297 if (bo->prev == NULL && bo->next == NULL) {
298 rb_erase(&bo->node, &bdev->free_rbtree);
299 /* 2. when bo->next != NULL && bo->prev == NULL, bo is a rbtree node,
300 * and has a linked list,to take off this bo we need erase bo
301 * first, then, insert bo->next into free rbtree and rebalance
302 * the free rbtree
303 */
304 } else if (bo->prev == NULL && bo->next != NULL) {
305 bo->next->prev = NULL;
306 rb_erase(&bo->node, &bdev->free_rbtree);
307 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo->next);
308 bo->next = NULL;
309 /* 3. when bo->prev != NULL && bo->next == NULL, bo is not a rbtree
310 * node, bo is the last element of the linked list after rbtree
311 * node, to take off this bo, we just need set the "prev/next"
312 * pointers to NULL, the free rbtree stays unchaged
313 */
314 } else if (bo->prev != NULL && bo->next == NULL) {
315 bo->prev->next = NULL;
316 bo->prev = NULL;
317 /* 4. when bo->prev != NULL && bo->next != NULL ,bo is not a rbtree
318 * node, bo is in the middle of the linked list after rbtree node,
319 * to take off this bo, we just set take the "prev/next" pointers
320 * to NULL, the free rbtree stays unchaged
321 */
322 } else {
323 bo->next->prev = bo->prev;
324 bo->prev->next = bo->next;
325 bo->next = NULL;
326 bo->prev = NULL;
327 }
328 }
329
330 static struct hmm_buffer_object *__bo_merge(struct hmm_buffer_object *bo,
331 struct hmm_buffer_object *next_bo)
332 {
333 struct hmm_bo_device *bdev;
334 unsigned long flags;
335
336 bdev = bo->bdev;
337 next_bo->start = bo->start;
338 next_bo->pgnr = next_bo->pgnr + bo->pgnr;
339
340 spin_lock_irqsave(&bdev->list_lock, flags);
341 list_del(&bo->list);
342 spin_unlock_irqrestore(&bdev->list_lock, flags);
343
344 kmem_cache_free(bo->bdev->bo_cache, bo);
345
346 return next_bo;
347 }
348
349 /*
350 * hmm_bo_device functions.
351 */
352 int hmm_bo_device_init(struct hmm_bo_device *bdev,
353 struct isp_mmu_client *mmu_driver,
354 unsigned int vaddr_start,
355 unsigned int size)
356 {
357 struct hmm_buffer_object *bo;
358 unsigned long flags;
359 int ret;
360
361 check_bodev_null_return(bdev, -EINVAL);
362
363 ret = isp_mmu_init(&bdev->mmu, mmu_driver);
364 if (ret) {
365 dev_err(atomisp_dev, "isp_mmu_init failed.\n");
366 return ret;
367 }
368
369 bdev->start = vaddr_start;
370 bdev->pgnr = size_to_pgnr_ceil(size);
371 bdev->size = pgnr_to_size(bdev->pgnr);
372
373 spin_lock_init(&bdev->list_lock);
374 mutex_init(&bdev->rbtree_mutex);
375
376 bdev->flag = HMM_BO_DEVICE_INITED;
377
378 INIT_LIST_HEAD(&bdev->entire_bo_list);
379 bdev->allocated_rbtree = RB_ROOT;
380 bdev->free_rbtree = RB_ROOT;
381
382 bdev->bo_cache = kmem_cache_create("bo_cache",
383 sizeof(struct hmm_buffer_object), 0, 0, NULL);
384 if (!bdev->bo_cache) {
385 dev_err(atomisp_dev, "%s: create cache failed!\n", __func__);
386 isp_mmu_exit(&bdev->mmu);
387 return -ENOMEM;
388 }
389
390 bo = __bo_alloc(bdev->bo_cache);
391 if (!bo) {
392 dev_err(atomisp_dev, "%s: __bo_alloc failed!\n", __func__);
393 isp_mmu_exit(&bdev->mmu);
394 return -ENOMEM;
395 }
396
397 ret = __bo_init(bdev, bo, bdev->pgnr);
398 if (ret) {
399 dev_err(atomisp_dev, "%s: __bo_init failed!\n", __func__);
400 kmem_cache_free(bdev->bo_cache, bo);
401 isp_mmu_exit(&bdev->mmu);
402 return -EINVAL;
403 }
404
405 spin_lock_irqsave(&bdev->list_lock, flags);
406 list_add_tail(&bo->list, &bdev->entire_bo_list);
407 spin_unlock_irqrestore(&bdev->list_lock, flags);
408
409 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
410
411 return 0;
412 }
413
414 struct hmm_buffer_object *hmm_bo_alloc(struct hmm_bo_device *bdev,
415 unsigned int pgnr)
416 {
417 struct hmm_buffer_object *bo, *new_bo;
418 struct rb_root *root = &bdev->free_rbtree;
419
420 check_bodev_null_return(bdev, NULL);
421 var_equal_return(hmm_bo_device_inited(bdev), 0, NULL,
422 "hmm_bo_device not inited yet.\n");
423
424 if (pgnr == 0) {
425 dev_err(atomisp_dev, "0 size buffer is not allowed.\n");
426 return NULL;
427 }
428
429 mutex_lock(&bdev->rbtree_mutex);
430 bo = __bo_search_and_remove_from_free_rbtree(root->rb_node, pgnr);
431 if (!bo) {
432 mutex_unlock(&bdev->rbtree_mutex);
433 dev_err(atomisp_dev, "%s: Out of Memory! hmm_bo_alloc failed",
434 __func__);
435 return NULL;
436 }
437
438 if (bo->pgnr > pgnr) {
439 new_bo = __bo_break_up(bdev, bo, pgnr);
440 if (!new_bo) {
441 mutex_unlock(&bdev->rbtree_mutex);
442 dev_err(atomisp_dev, "%s: __bo_break_up failed!\n",
443 __func__);
444 return NULL;
445 }
446
447 __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, new_bo);
448 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
449
450 mutex_unlock(&bdev->rbtree_mutex);
451 return new_bo;
452 }
453
454 __bo_insert_to_alloc_rbtree(&bdev->allocated_rbtree, bo);
455
456 mutex_unlock(&bdev->rbtree_mutex);
457 return bo;
458 }
459
460 void hmm_bo_release(struct hmm_buffer_object *bo)
461 {
462 struct hmm_bo_device *bdev = bo->bdev;
463 struct hmm_buffer_object *next_bo, *prev_bo;
464
465 mutex_lock(&bdev->rbtree_mutex);
466
467 /*
468 * FIX ME:
469 *
470 * how to destroy the bo when it is stilled MMAPED?
471 *
472 * ideally, this will not happened as hmm_bo_release
473 * will only be called when kref reaches 0, and in mmap
474 * operation the hmm_bo_ref will eventually be called.
475 * so, if this happened, something goes wrong.
476 */
477 if (bo->status & HMM_BO_MMAPED) {
478 mutex_unlock(&bdev->rbtree_mutex);
479 dev_dbg(atomisp_dev, "destroy bo which is MMAPED, do nothing\n");
480 return;
481 }
482
483 if (bo->status & HMM_BO_BINDED) {
484 dev_warn(atomisp_dev, "the bo is still binded, unbind it first...\n");
485 hmm_bo_unbind(bo);
486 }
487
488 if (bo->status & HMM_BO_PAGE_ALLOCED) {
489 dev_warn(atomisp_dev, "the pages is not freed, free pages first\n");
490 hmm_bo_free_pages(bo);
491 }
492 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
493 dev_warn(atomisp_dev, "the vunmap is not done, do it...\n");
494 hmm_bo_vunmap(bo);
495 }
496
497 rb_erase(&bo->node, &bdev->allocated_rbtree);
498
499 prev_bo = list_entry(bo->list.prev, struct hmm_buffer_object, list);
500 next_bo = list_entry(bo->list.next, struct hmm_buffer_object, list);
501
502 if (bo->list.prev != &bdev->entire_bo_list &&
503 prev_bo->end == bo->start &&
504 (prev_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
505 __bo_take_off_handling(prev_bo);
506 bo = __bo_merge(prev_bo, bo);
507 }
508
509 if (bo->list.next != &bdev->entire_bo_list &&
510 next_bo->start == bo->end &&
511 (next_bo->status & HMM_BO_MASK) == HMM_BO_FREE) {
512 __bo_take_off_handling(next_bo);
513 bo = __bo_merge(bo, next_bo);
514 }
515
516 __bo_insert_to_free_rbtree(&bdev->free_rbtree, bo);
517
518 mutex_unlock(&bdev->rbtree_mutex);
519 return;
520 }
521
522 void hmm_bo_device_exit(struct hmm_bo_device *bdev)
523 {
524 struct hmm_buffer_object *bo;
525 unsigned long flags;
526
527 dev_dbg(atomisp_dev, "%s: entering!\n", __func__);
528
529 check_bodev_null_return_void(bdev);
530
531 /*
532 * release all allocated bos even they a in use
533 * and all bos will be merged into a big bo
534 */
535 while (!RB_EMPTY_ROOT(&bdev->allocated_rbtree))
536 hmm_bo_release(
537 rbtree_node_to_hmm_bo(bdev->allocated_rbtree.rb_node));
538
539 dev_dbg(atomisp_dev, "%s: finished releasing all allocated bos!\n",
540 __func__);
541
542 /* free all bos to release all ISP virtual memory */
543 while (!list_empty(&bdev->entire_bo_list)) {
544 bo = list_to_hmm_bo(bdev->entire_bo_list.next);
545
546 spin_lock_irqsave(&bdev->list_lock, flags);
547 list_del(&bo->list);
548 spin_unlock_irqrestore(&bdev->list_lock, flags);
549
550 kmem_cache_free(bdev->bo_cache, bo);
551 }
552
553 dev_dbg(atomisp_dev, "%s: finished to free all bos!\n", __func__);
554
555 kmem_cache_destroy(bdev->bo_cache);
556
557 isp_mmu_exit(&bdev->mmu);
558 }
559
560 int hmm_bo_device_inited(struct hmm_bo_device *bdev)
561 {
562 check_bodev_null_return(bdev, -EINVAL);
563
564 return bdev->flag == HMM_BO_DEVICE_INITED;
565 }
566
567 int hmm_bo_allocated(struct hmm_buffer_object *bo)
568 {
569 check_bo_null_return(bo, 0);
570
571 return bo->status & HMM_BO_ALLOCED;
572 }
573
574 struct hmm_buffer_object *hmm_bo_device_search_start(
575 struct hmm_bo_device *bdev, ia_css_ptr vaddr)
576 {
577 struct hmm_buffer_object *bo;
578
579 check_bodev_null_return(bdev, NULL);
580
581 mutex_lock(&bdev->rbtree_mutex);
582 bo = __bo_search_by_addr(&bdev->allocated_rbtree, vaddr);
583 if (!bo) {
584 mutex_unlock(&bdev->rbtree_mutex);
585 dev_err(atomisp_dev, "%s can not find bo with addr: 0x%x\n",
586 __func__, vaddr);
587 return NULL;
588 }
589 mutex_unlock(&bdev->rbtree_mutex);
590
591 return bo;
592 }
593
594 struct hmm_buffer_object *hmm_bo_device_search_in_range(
595 struct hmm_bo_device *bdev, unsigned int vaddr)
596 {
597 struct hmm_buffer_object *bo;
598
599 check_bodev_null_return(bdev, NULL);
600
601 mutex_lock(&bdev->rbtree_mutex);
602 bo = __bo_search_by_addr_in_range(&bdev->allocated_rbtree, vaddr);
603 if (!bo) {
604 mutex_unlock(&bdev->rbtree_mutex);
605 dev_err(atomisp_dev, "%s can not find bo contain addr: 0x%x\n",
606 __func__, vaddr);
607 return NULL;
608 }
609 mutex_unlock(&bdev->rbtree_mutex);
610
611 return bo;
612 }
613
614 struct hmm_buffer_object *hmm_bo_device_search_vmap_start(
615 struct hmm_bo_device *bdev, const void *vaddr)
616 {
617 struct list_head *pos;
618 struct hmm_buffer_object *bo;
619 unsigned long flags;
620
621 check_bodev_null_return(bdev, NULL);
622
623 spin_lock_irqsave(&bdev->list_lock, flags);
624 list_for_each(pos, &bdev->entire_bo_list) {
625 bo = list_to_hmm_bo(pos);
626 /* pass bo which has no vm_node allocated */
627 if ((bo->status & HMM_BO_MASK) == HMM_BO_FREE)
628 continue;
629 if (bo->vmap_addr == vaddr)
630 goto found;
631 }
632 spin_unlock_irqrestore(&bdev->list_lock, flags);
633 return NULL;
634 found:
635 spin_unlock_irqrestore(&bdev->list_lock, flags);
636 return bo;
637
638 }
639
640
641 static void free_private_bo_pages(struct hmm_buffer_object *bo,
642 struct hmm_pool *dypool,
643 struct hmm_pool *repool,
644 int free_pgnr)
645 {
646 int i, ret;
647
648 for (i = 0; i < free_pgnr; i++) {
649 switch (bo->page_obj[i].type) {
650 case HMM_PAGE_TYPE_RESERVED:
651 if (repool->pops
652 && repool->pops->pool_free_pages) {
653 repool->pops->pool_free_pages(repool->pool_info,
654 &bo->page_obj[i]);
655 hmm_mem_stat.res_cnt--;
656 }
657 break;
658 /*
659 * HMM_PAGE_TYPE_GENERAL indicates that pages are from system
660 * memory, so when free them, they should be put into dynamic
661 * pool.
662 */
663 case HMM_PAGE_TYPE_DYNAMIC:
664 case HMM_PAGE_TYPE_GENERAL:
665 if (dypool->pops
666 && dypool->pops->pool_inited
667 && dypool->pops->pool_inited(dypool->pool_info)) {
668 if (dypool->pops->pool_free_pages)
669 dypool->pops->pool_free_pages(
670 dypool->pool_info,
671 &bo->page_obj[i]);
672 break;
673 }
674
675 /*
676 * if dynamic memory pool doesn't exist, need to free
677 * pages to system directly.
678 */
679 default:
680 ret = set_pages_wb(bo->page_obj[i].page, 1);
681 if (ret)
682 dev_err(atomisp_dev,
683 "set page to WB err ...ret = %d\n",
684 ret);
685 /*
686 W/A: set_pages_wb seldom return value = -EFAULT
687 indicate that address of page is not in valid
688 range(0xffff880000000000~0xffffc7ffffffffff)
689 then, _free_pages would panic; Do not know why page
690 address be valid,it maybe memory corruption by lowmemory
691 */
692 if (!ret) {
693 __free_pages(bo->page_obj[i].page, 0);
694 hmm_mem_stat.sys_size--;
695 }
696 break;
697 }
698 }
699
700 return;
701 }
702
703 /*Allocate pages which will be used only by ISP*/
704 static int alloc_private_pages(struct hmm_buffer_object *bo,
705 int from_highmem,
706 bool cached,
707 struct hmm_pool *dypool,
708 struct hmm_pool *repool)
709 {
710 int ret;
711 unsigned int pgnr, order, blk_pgnr, alloc_pgnr;
712 struct page *pages;
713 gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN; /* REVISIT: need __GFP_FS too? */
714 int i, j;
715 int failure_number = 0;
716 bool reduce_order = false;
717 bool lack_mem = true;
718
719 if (from_highmem)
720 gfp |= __GFP_HIGHMEM;
721
722 pgnr = bo->pgnr;
723
724 bo->page_obj = kmalloc_array(pgnr, sizeof(struct hmm_page_object),
725 GFP_KERNEL);
726 if (unlikely(!bo->page_obj))
727 return -ENOMEM;
728
729 i = 0;
730 alloc_pgnr = 0;
731
732 /*
733 * get physical pages from dynamic pages pool.
734 */
735 if (dypool->pops && dypool->pops->pool_alloc_pages) {
736 alloc_pgnr = dypool->pops->pool_alloc_pages(dypool->pool_info,
737 bo->page_obj, pgnr,
738 cached);
739 hmm_mem_stat.dyc_size -= alloc_pgnr;
740
741 if (alloc_pgnr == pgnr)
742 return 0;
743 }
744
745 pgnr -= alloc_pgnr;
746 i += alloc_pgnr;
747
748 /*
749 * get physical pages from reserved pages pool for atomisp.
750 */
751 if (repool->pops && repool->pops->pool_alloc_pages) {
752 alloc_pgnr = repool->pops->pool_alloc_pages(repool->pool_info,
753 &bo->page_obj[i], pgnr,
754 cached);
755 hmm_mem_stat.res_cnt += alloc_pgnr;
756 if (alloc_pgnr == pgnr)
757 return 0;
758 }
759
760 pgnr -= alloc_pgnr;
761 i += alloc_pgnr;
762
763 while (pgnr) {
764 order = nr_to_order_bottom(pgnr);
765 /*
766 * if be short of memory, we will set order to 0
767 * everytime.
768 */
769 if (lack_mem)
770 order = HMM_MIN_ORDER;
771 else if (order > HMM_MAX_ORDER)
772 order = HMM_MAX_ORDER;
773 retry:
774 /*
775 * When order > HMM_MIN_ORDER, for performance reasons we don't
776 * want alloc_pages() to sleep. In case it fails and fallbacks
777 * to HMM_MIN_ORDER or in case the requested order is originally
778 * the minimum value, we can allow alloc_pages() to sleep for
779 * robustness purpose.
780 *
781 * REVISIT: why __GFP_FS is necessary?
782 */
783 if (order == HMM_MIN_ORDER) {
784 gfp &= ~GFP_NOWAIT;
785 gfp |= __GFP_RECLAIM | __GFP_FS;
786 }
787
788 pages = alloc_pages(gfp, order);
789 if (unlikely(!pages)) {
790 /*
791 * in low memory case, if allocation page fails,
792 * we turn to try if order=0 allocation could
793 * succeed. if order=0 fails too, that means there is
794 * no memory left.
795 */
796 if (order == HMM_MIN_ORDER) {
797 dev_err(atomisp_dev,
798 "%s: cannot allocate pages\n",
799 __func__);
800 goto cleanup;
801 }
802 order = HMM_MIN_ORDER;
803 failure_number++;
804 reduce_order = true;
805 /*
806 * if fail two times continuously, we think be short
807 * of memory now.
808 */
809 if (failure_number == 2) {
810 lack_mem = true;
811 failure_number = 0;
812 }
813 goto retry;
814 } else {
815 blk_pgnr = order_to_nr(order);
816
817 if (!cached) {
818 /*
819 * set memory to uncacheable -- UC_MINUS
820 */
821 ret = set_pages_uc(pages, blk_pgnr);
822 if (ret) {
823 dev_err(atomisp_dev,
824 "set page uncacheable"
825 "failed.\n");
826
827 __free_pages(pages, order);
828
829 goto cleanup;
830 }
831 }
832
833 for (j = 0; j < blk_pgnr; j++) {
834 bo->page_obj[i].page = pages + j;
835 bo->page_obj[i++].type = HMM_PAGE_TYPE_GENERAL;
836 }
837
838 pgnr -= blk_pgnr;
839 hmm_mem_stat.sys_size += blk_pgnr;
840
841 /*
842 * if order is not reduced this time, clear
843 * failure_number.
844 */
845 if (reduce_order)
846 reduce_order = false;
847 else
848 failure_number = 0;
849 }
850 }
851
852 return 0;
853 cleanup:
854 alloc_pgnr = i;
855 free_private_bo_pages(bo, dypool, repool, alloc_pgnr);
856
857 kfree(bo->page_obj);
858
859 return -ENOMEM;
860 }
861
862 static void free_private_pages(struct hmm_buffer_object *bo,
863 struct hmm_pool *dypool,
864 struct hmm_pool *repool)
865 {
866 free_private_bo_pages(bo, dypool, repool, bo->pgnr);
867
868 kfree(bo->page_obj);
869 }
870
871 /*
872 * Hacked from kernel function __get_user_pages in mm/memory.c
873 *
874 * Handle buffers allocated by other kernel space driver and mmaped into user
875 * space, function Ignore the VM_PFNMAP and VM_IO flag in VMA structure
876 *
877 * Get physical pages from user space virtual address and update into page list
878 */
879 static int __get_pfnmap_pages(struct task_struct *tsk, struct mm_struct *mm,
880 unsigned long start, int nr_pages,
881 unsigned int gup_flags, struct page **pages,
882 struct vm_area_struct **vmas)
883 {
884 int i, ret;
885 unsigned long vm_flags;
886
887 if (nr_pages <= 0)
888 return 0;
889
890 VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
891
892 /*
893 * Require read or write permissions.
894 * If FOLL_FORCE is set, we only require the "MAY" flags.
895 */
896 vm_flags = (gup_flags & FOLL_WRITE) ?
897 (VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
898 vm_flags &= (gup_flags & FOLL_FORCE) ?
899 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
900 i = 0;
901
902 do {
903 struct vm_area_struct *vma;
904
905 vma = find_vma(mm, start);
906 if (!vma) {
907 dev_err(atomisp_dev, "find_vma failed\n");
908 return i ? : -EFAULT;
909 }
910
911 if (is_vm_hugetlb_page(vma)) {
912 /*
913 i = follow_hugetlb_page(mm, vma, pages, vmas,
914 &start, &nr_pages, i, gup_flags);
915 */
916 continue;
917 }
918
919 do {
920 struct page *page;
921 unsigned long pfn;
922
923 /*
924 * If we have a pending SIGKILL, don't keep faulting
925 * pages and potentially allocating memory.
926 */
927 if (unlikely(fatal_signal_pending(current))) {
928 dev_err(atomisp_dev,
929 "fatal_signal_pending in %s\n",
930 __func__);
931 return i ? i : -ERESTARTSYS;
932 }
933
934 ret = follow_pfn(vma, start, &pfn);
935 if (ret) {
936 dev_err(atomisp_dev, "follow_pfn() failed\n");
937 return i ? : -EFAULT;
938 }
939
940 page = pfn_to_page(pfn);
941 if (IS_ERR(page))
942 return i ? i : PTR_ERR(page);
943 if (pages) {
944 pages[i] = page;
945 get_page(page);
946 flush_anon_page(vma, page, start);
947 flush_dcache_page(page);
948 }
949 if (vmas)
950 vmas[i] = vma;
951 i++;
952 start += PAGE_SIZE;
953 nr_pages--;
954 } while (nr_pages && start < vma->vm_end);
955 } while (nr_pages);
956
957 return i;
958 }
959
960 static int get_pfnmap_pages(struct task_struct *tsk, struct mm_struct *mm,
961 unsigned long start, int nr_pages, int write, int force,
962 struct page **pages, struct vm_area_struct **vmas)
963 {
964 int flags = FOLL_TOUCH;
965
966 if (pages)
967 flags |= FOLL_GET;
968 if (write)
969 flags |= FOLL_WRITE;
970 if (force)
971 flags |= FOLL_FORCE;
972
973 return __get_pfnmap_pages(tsk, mm, start, nr_pages, flags, pages, vmas);
974 }
975
976 /*
977 * Convert user space virtual address into pages list
978 */
979 static int alloc_user_pages(struct hmm_buffer_object *bo,
980 void *userptr, bool cached)
981 {
982 int page_nr;
983 int i;
984 struct vm_area_struct *vma;
985 struct page **pages;
986
987 pages = kmalloc_array(bo->pgnr, sizeof(struct page *), GFP_KERNEL);
988 if (unlikely(!pages))
989 return -ENOMEM;
990
991 bo->page_obj = kmalloc_array(bo->pgnr, sizeof(struct hmm_page_object),
992 GFP_KERNEL);
993 if (unlikely(!bo->page_obj)) {
994 kfree(pages);
995 return -ENOMEM;
996 }
997
998 mutex_unlock(&bo->mutex);
999 down_read(&current->mm->mmap_sem);
1000 vma = find_vma(current->mm, (unsigned long)userptr);
1001 up_read(&current->mm->mmap_sem);
1002 if (vma == NULL) {
1003 dev_err(atomisp_dev, "find_vma failed\n");
1004 kfree(bo->page_obj);
1005 kfree(pages);
1006 mutex_lock(&bo->mutex);
1007 return -EFAULT;
1008 }
1009 mutex_lock(&bo->mutex);
1010 /*
1011 * Handle frame buffer allocated in other kerenl space driver
1012 * and map to user space
1013 */
1014 if (vma->vm_flags & (VM_IO | VM_PFNMAP)) {
1015 page_nr = get_pfnmap_pages(current, current->mm,
1016 (unsigned long)userptr,
1017 (int)(bo->pgnr), 1, 0,
1018 pages, NULL);
1019 bo->mem_type = HMM_BO_MEM_TYPE_PFN;
1020 } else {
1021 /*Handle frame buffer allocated in user space*/
1022 mutex_unlock(&bo->mutex);
1023 down_read(&current->mm->mmap_sem);
1024 page_nr = get_user_pages((unsigned long)userptr,
1025 (int)(bo->pgnr), 1, pages, NULL);
1026 up_read(&current->mm->mmap_sem);
1027 mutex_lock(&bo->mutex);
1028 bo->mem_type = HMM_BO_MEM_TYPE_USER;
1029 }
1030
1031 /* can be written by caller, not forced */
1032 if (page_nr != bo->pgnr) {
1033 dev_err(atomisp_dev,
1034 "get_user_pages err: bo->pgnr = %d, "
1035 "pgnr actually pinned = %d.\n",
1036 bo->pgnr, page_nr);
1037 goto out_of_mem;
1038 }
1039
1040 for (i = 0; i < bo->pgnr; i++) {
1041 bo->page_obj[i].page = pages[i];
1042 bo->page_obj[i].type = HMM_PAGE_TYPE_GENERAL;
1043 }
1044 hmm_mem_stat.usr_size += bo->pgnr;
1045 kfree(pages);
1046
1047 return 0;
1048
1049 out_of_mem:
1050 for (i = 0; i < page_nr; i++)
1051 put_page(pages[i]);
1052 kfree(pages);
1053 kfree(bo->page_obj);
1054
1055 return -ENOMEM;
1056 }
1057
1058 static void free_user_pages(struct hmm_buffer_object *bo)
1059 {
1060 int i;
1061
1062 for (i = 0; i < bo->pgnr; i++)
1063 put_page(bo->page_obj[i].page);
1064 hmm_mem_stat.usr_size -= bo->pgnr;
1065
1066 kfree(bo->page_obj);
1067 }
1068
1069 /*
1070 * allocate/free physical pages for the bo.
1071 *
1072 * type indicate where are the pages from. currently we have 3 types
1073 * of memory: HMM_BO_PRIVATE, HMM_BO_USER, HMM_BO_SHARE.
1074 *
1075 * from_highmem is only valid when type is HMM_BO_PRIVATE, it will
1076 * try to alloc memory from highmem if from_highmem is set.
1077 *
1078 * userptr is only valid when type is HMM_BO_USER, it indicates
1079 * the start address from user space task.
1080 *
1081 * from_highmem and userptr will both be ignored when type is
1082 * HMM_BO_SHARE.
1083 */
1084 int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
1085 enum hmm_bo_type type, int from_highmem,
1086 void *userptr, bool cached)
1087 {
1088 int ret = -EINVAL;
1089
1090 check_bo_null_return(bo, -EINVAL);
1091
1092 mutex_lock(&bo->mutex);
1093 check_bo_status_no_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
1094
1095 /*
1096 * TO DO:
1097 * add HMM_BO_USER type
1098 */
1099 if (type == HMM_BO_PRIVATE) {
1100 ret = alloc_private_pages(bo, from_highmem,
1101 cached, &dynamic_pool, &reserved_pool);
1102 } else if (type == HMM_BO_USER) {
1103 ret = alloc_user_pages(bo, userptr, cached);
1104 } else {
1105 dev_err(atomisp_dev, "invalid buffer type.\n");
1106 ret = -EINVAL;
1107 }
1108 if (ret)
1109 goto alloc_err;
1110
1111 bo->type = type;
1112
1113 bo->status |= HMM_BO_PAGE_ALLOCED;
1114
1115 mutex_unlock(&bo->mutex);
1116
1117 return 0;
1118
1119 alloc_err:
1120 mutex_unlock(&bo->mutex);
1121 dev_err(atomisp_dev, "alloc pages err...\n");
1122 return ret;
1123 status_err:
1124 mutex_unlock(&bo->mutex);
1125 dev_err(atomisp_dev,
1126 "buffer object has already page allocated.\n");
1127 return -EINVAL;
1128 }
1129
1130 /*
1131 * free physical pages of the bo.
1132 */
1133 void hmm_bo_free_pages(struct hmm_buffer_object *bo)
1134 {
1135 check_bo_null_return_void(bo);
1136
1137 mutex_lock(&bo->mutex);
1138
1139 check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err2);
1140
1141 /* clear the flag anyway. */
1142 bo->status &= (~HMM_BO_PAGE_ALLOCED);
1143
1144 if (bo->type == HMM_BO_PRIVATE)
1145 free_private_pages(bo, &dynamic_pool, &reserved_pool);
1146 else if (bo->type == HMM_BO_USER)
1147 free_user_pages(bo);
1148 else
1149 dev_err(atomisp_dev, "invalid buffer type.\n");
1150 mutex_unlock(&bo->mutex);
1151
1152 return;
1153
1154 status_err2:
1155 mutex_unlock(&bo->mutex);
1156 dev_err(atomisp_dev,
1157 "buffer object not page allocated yet.\n");
1158 }
1159
1160 int hmm_bo_page_allocated(struct hmm_buffer_object *bo)
1161 {
1162 check_bo_null_return(bo, 0);
1163
1164 return bo->status & HMM_BO_PAGE_ALLOCED;
1165 }
1166
1167 /*
1168 * get physical page info of the bo.
1169 */
1170 int hmm_bo_get_page_info(struct hmm_buffer_object *bo,
1171 struct hmm_page_object **page_obj, int *pgnr)
1172 {
1173 check_bo_null_return(bo, -EINVAL);
1174
1175 mutex_lock(&bo->mutex);
1176
1177 check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
1178
1179 *page_obj = bo->page_obj;
1180 *pgnr = bo->pgnr;
1181
1182 mutex_unlock(&bo->mutex);
1183
1184 return 0;
1185
1186 status_err:
1187 dev_err(atomisp_dev,
1188 "buffer object not page allocated yet.\n");
1189 mutex_unlock(&bo->mutex);
1190 return -EINVAL;
1191 }
1192
1193 /*
1194 * bind the physical pages to a virtual address space.
1195 */
1196 int hmm_bo_bind(struct hmm_buffer_object *bo)
1197 {
1198 int ret;
1199 unsigned int virt;
1200 struct hmm_bo_device *bdev;
1201 unsigned int i;
1202
1203 check_bo_null_return(bo, -EINVAL);
1204
1205 mutex_lock(&bo->mutex);
1206
1207 check_bo_status_yes_goto(bo,
1208 HMM_BO_PAGE_ALLOCED | HMM_BO_ALLOCED,
1209 status_err1);
1210
1211 check_bo_status_no_goto(bo, HMM_BO_BINDED, status_err2);
1212
1213 bdev = bo->bdev;
1214
1215 virt = bo->start;
1216
1217 for (i = 0; i < bo->pgnr; i++) {
1218 ret =
1219 isp_mmu_map(&bdev->mmu, virt,
1220 page_to_phys(bo->page_obj[i].page), 1);
1221 if (ret)
1222 goto map_err;
1223 virt += (1 << PAGE_SHIFT);
1224 }
1225
1226 /*
1227 * flush TBL here.
1228 *
1229 * theoretically, we donot need to flush TLB as we didnot change
1230 * any existed address mappings, but for Silicon Hive's MMU, its
1231 * really a bug here. I guess when fetching PTEs (page table entity)
1232 * to TLB, its MMU will fetch additional INVALID PTEs automatically
1233 * for performance issue. EX, we only set up 1 page address mapping,
1234 * meaning updating 1 PTE, but the MMU fetches 4 PTE at one time,
1235 * so the additional 3 PTEs are invalid.
1236 */
1237 if (bo->start != 0x0)
1238 isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
1239 (bo->pgnr << PAGE_SHIFT));
1240
1241 bo->status |= HMM_BO_BINDED;
1242
1243 mutex_unlock(&bo->mutex);
1244
1245 return 0;
1246
1247 map_err:
1248 /* unbind the physical pages with related virtual address space */
1249 virt = bo->start;
1250 for ( ; i > 0; i--) {
1251 isp_mmu_unmap(&bdev->mmu, virt, 1);
1252 virt += pgnr_to_size(1);
1253 }
1254
1255 mutex_unlock(&bo->mutex);
1256 dev_err(atomisp_dev,
1257 "setup MMU address mapping failed.\n");
1258 return ret;
1259
1260 status_err2:
1261 mutex_unlock(&bo->mutex);
1262 dev_err(atomisp_dev, "buffer object already binded.\n");
1263 return -EINVAL;
1264 status_err1:
1265 mutex_unlock(&bo->mutex);
1266 dev_err(atomisp_dev,
1267 "buffer object vm_node or page not allocated.\n");
1268 return -EINVAL;
1269 }
1270
1271 /*
1272 * unbind the physical pages with related virtual address space.
1273 */
1274 void hmm_bo_unbind(struct hmm_buffer_object *bo)
1275 {
1276 unsigned int virt;
1277 struct hmm_bo_device *bdev;
1278 unsigned int i;
1279
1280 check_bo_null_return_void(bo);
1281
1282 mutex_lock(&bo->mutex);
1283
1284 check_bo_status_yes_goto(bo,
1285 HMM_BO_PAGE_ALLOCED |
1286 HMM_BO_ALLOCED |
1287 HMM_BO_BINDED, status_err);
1288
1289 bdev = bo->bdev;
1290
1291 virt = bo->start;
1292
1293 for (i = 0; i < bo->pgnr; i++) {
1294 isp_mmu_unmap(&bdev->mmu, virt, 1);
1295 virt += pgnr_to_size(1);
1296 }
1297
1298 /*
1299 * flush TLB as the address mapping has been removed and
1300 * related TLBs should be invalidated.
1301 */
1302 isp_mmu_flush_tlb_range(&bdev->mmu, bo->start,
1303 (bo->pgnr << PAGE_SHIFT));
1304
1305 bo->status &= (~HMM_BO_BINDED);
1306
1307 mutex_unlock(&bo->mutex);
1308
1309 return;
1310
1311 status_err:
1312 mutex_unlock(&bo->mutex);
1313 dev_err(atomisp_dev,
1314 "buffer vm or page not allocated or not binded yet.\n");
1315 }
1316
1317 int hmm_bo_binded(struct hmm_buffer_object *bo)
1318 {
1319 int ret;
1320
1321 check_bo_null_return(bo, 0);
1322
1323 mutex_lock(&bo->mutex);
1324
1325 ret = bo->status & HMM_BO_BINDED;
1326
1327 mutex_unlock(&bo->mutex);
1328
1329 return ret;
1330 }
1331
1332 void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached)
1333 {
1334 struct page **pages;
1335 int i;
1336
1337 check_bo_null_return(bo, NULL);
1338
1339 mutex_lock(&bo->mutex);
1340 if (((bo->status & HMM_BO_VMAPED) && !cached) ||
1341 ((bo->status & HMM_BO_VMAPED_CACHED) && cached)) {
1342 mutex_unlock(&bo->mutex);
1343 return bo->vmap_addr;
1344 }
1345
1346 /* cached status need to be changed, so vunmap first */
1347 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
1348 vunmap(bo->vmap_addr);
1349 bo->vmap_addr = NULL;
1350 bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
1351 }
1352
1353 pages = kmalloc_array(bo->pgnr, sizeof(*pages), GFP_KERNEL);
1354 if (unlikely(!pages)) {
1355 mutex_unlock(&bo->mutex);
1356 return NULL;
1357 }
1358
1359 for (i = 0; i < bo->pgnr; i++)
1360 pages[i] = bo->page_obj[i].page;
1361
1362 bo->vmap_addr = vmap(pages, bo->pgnr, VM_MAP,
1363 cached ? PAGE_KERNEL : PAGE_KERNEL_NOCACHE);
1364 if (unlikely(!bo->vmap_addr)) {
1365 kfree(pages);
1366 mutex_unlock(&bo->mutex);
1367 dev_err(atomisp_dev, "vmap failed...\n");
1368 return NULL;
1369 }
1370 bo->status |= (cached ? HMM_BO_VMAPED_CACHED : HMM_BO_VMAPED);
1371
1372 kfree(pages);
1373
1374 mutex_unlock(&bo->mutex);
1375 return bo->vmap_addr;
1376 }
1377
1378 void hmm_bo_flush_vmap(struct hmm_buffer_object *bo)
1379 {
1380 check_bo_null_return_void(bo);
1381
1382 mutex_lock(&bo->mutex);
1383 if (!(bo->status & HMM_BO_VMAPED_CACHED) || !bo->vmap_addr) {
1384 mutex_unlock(&bo->mutex);
1385 return;
1386 }
1387
1388 clflush_cache_range(bo->vmap_addr, bo->pgnr * PAGE_SIZE);
1389 mutex_unlock(&bo->mutex);
1390 }
1391
1392 void hmm_bo_vunmap(struct hmm_buffer_object *bo)
1393 {
1394 check_bo_null_return_void(bo);
1395
1396 mutex_lock(&bo->mutex);
1397 if (bo->status & HMM_BO_VMAPED || bo->status & HMM_BO_VMAPED_CACHED) {
1398 vunmap(bo->vmap_addr);
1399 bo->vmap_addr = NULL;
1400 bo->status &= ~(HMM_BO_VMAPED | HMM_BO_VMAPED_CACHED);
1401 }
1402
1403 mutex_unlock(&bo->mutex);
1404 return;
1405 }
1406
1407 void hmm_bo_ref(struct hmm_buffer_object *bo)
1408 {
1409 check_bo_null_return_void(bo);
1410
1411 kref_get(&bo->kref);
1412 }
1413
1414 static void kref_hmm_bo_release(struct kref *kref)
1415 {
1416 if (!kref)
1417 return;
1418
1419 hmm_bo_release(kref_to_hmm_bo(kref));
1420 }
1421
1422 void hmm_bo_unref(struct hmm_buffer_object *bo)
1423 {
1424 check_bo_null_return_void(bo);
1425
1426 kref_put(&bo->kref, kref_hmm_bo_release);
1427 }
1428
1429 static void hmm_bo_vm_open(struct vm_area_struct *vma)
1430 {
1431 struct hmm_buffer_object *bo =
1432 (struct hmm_buffer_object *)vma->vm_private_data;
1433
1434 check_bo_null_return_void(bo);
1435
1436 hmm_bo_ref(bo);
1437
1438 mutex_lock(&bo->mutex);
1439
1440 bo->status |= HMM_BO_MMAPED;
1441
1442 bo->mmap_count++;
1443
1444 mutex_unlock(&bo->mutex);
1445 }
1446
1447 static void hmm_bo_vm_close(struct vm_area_struct *vma)
1448 {
1449 struct hmm_buffer_object *bo =
1450 (struct hmm_buffer_object *)vma->vm_private_data;
1451
1452 check_bo_null_return_void(bo);
1453
1454 hmm_bo_unref(bo);
1455
1456 mutex_lock(&bo->mutex);
1457
1458 bo->mmap_count--;
1459
1460 if (!bo->mmap_count) {
1461 bo->status &= (~HMM_BO_MMAPED);
1462 vma->vm_private_data = NULL;
1463 }
1464
1465 mutex_unlock(&bo->mutex);
1466 }
1467
1468 static const struct vm_operations_struct hmm_bo_vm_ops = {
1469 .open = hmm_bo_vm_open,
1470 .close = hmm_bo_vm_close,
1471 };
1472
1473 /*
1474 * mmap the bo to user space.
1475 */
1476 int hmm_bo_mmap(struct vm_area_struct *vma, struct hmm_buffer_object *bo)
1477 {
1478 unsigned int start, end;
1479 unsigned int virt;
1480 unsigned int pgnr, i;
1481 unsigned int pfn;
1482
1483 check_bo_null_return(bo, -EINVAL);
1484
1485 check_bo_status_yes_goto(bo, HMM_BO_PAGE_ALLOCED, status_err);
1486
1487 pgnr = bo->pgnr;
1488 start = vma->vm_start;
1489 end = vma->vm_end;
1490
1491 /*
1492 * check vma's virtual address space size and buffer object's size.
1493 * must be the same.
1494 */
1495 if ((start + pgnr_to_size(pgnr)) != end) {
1496 dev_warn(atomisp_dev,
1497 "vma's address space size not equal"
1498 " to buffer object's size");
1499 return -EINVAL;
1500 }
1501
1502 virt = vma->vm_start;
1503 for (i = 0; i < pgnr; i++) {
1504 pfn = page_to_pfn(bo->page_obj[i].page);
1505 if (remap_pfn_range(vma, virt, pfn, PAGE_SIZE, PAGE_SHARED)) {
1506 dev_warn(atomisp_dev,
1507 "remap_pfn_range failed:"
1508 " virt = 0x%x, pfn = 0x%x,"
1509 " mapped_pgnr = %d\n", virt, pfn, 1);
1510 return -EINVAL;
1511 }
1512 virt += PAGE_SIZE;
1513 }
1514
1515 vma->vm_private_data = bo;
1516
1517 vma->vm_ops = &hmm_bo_vm_ops;
1518 vma->vm_flags |= VM_IO|VM_DONTEXPAND|VM_DONTDUMP;
1519
1520 /*
1521 * call hmm_bo_vm_open explictly.
1522 */
1523 hmm_bo_vm_open(vma);
1524
1525 return 0;
1526
1527 status_err:
1528 dev_err(atomisp_dev, "buffer page not allocated yet.\n");
1529 return -EINVAL;
1530 }