]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/staging/media/atomisp/pci/atomisp2/include/hmm/hmm_bo.h
dffd6e9cf6938f824230e66fdc326dd7c6c0c25e
[mirror_ubuntu-bionic-kernel.git] / drivers / staging / media / atomisp / pci / atomisp2 / include / hmm / hmm_bo.h
1 /*
2 * Support for Medifield PNW Camera Imaging ISP subsystem.
3 *
4 * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
5 *
6 * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 */
23
24 #ifndef __HMM_BO_H__
25 #define __HMM_BO_H__
26
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/list.h>
30 #include <linux/spinlock.h>
31 #include <linux/mutex.h>
32 #include "mmu/isp_mmu.h"
33 #include "hmm/hmm_common.h"
34 #include "ia_css_types.h"
35
36 #define check_bodev_null_return(bdev, exp) \
37 check_null_return(bdev, exp, \
38 "NULL hmm_bo_device.\n")
39
40 #define check_bodev_null_return_void(bdev) \
41 check_null_return_void(bdev, \
42 "NULL hmm_bo_device.\n")
43
44 #define check_bo_status_yes_goto(bo, _status, label) \
45 var_not_equal_goto((bo->status & (_status)), (_status), \
46 label, \
47 "HMM buffer status not contain %s.\n", \
48 #_status)
49
50 #define check_bo_status_no_goto(bo, _status, label) \
51 var_equal_goto((bo->status & (_status)), (_status), \
52 label, \
53 "HMM buffer status contains %s.\n", \
54 #_status)
55
56 #define rbtree_node_to_hmm_bo(root_node) \
57 container_of((root_node), struct hmm_buffer_object, node)
58
59 #define list_to_hmm_bo(list_ptr) \
60 list_entry((list_ptr), struct hmm_buffer_object, list)
61
62 #define kref_to_hmm_bo(kref_ptr) \
63 list_entry((kref_ptr), struct hmm_buffer_object, kref)
64
65 #define check_bo_null_return(bo, exp) \
66 check_null_return(bo, exp, "NULL hmm buffer object.\n")
67
68 #define check_bo_null_return_void(bo) \
69 check_null_return_void(bo, "NULL hmm buffer object.\n")
70
71 #define HMM_MAX_ORDER 3
72 #define HMM_MIN_ORDER 0
73
74 #define ISP_VM_START 0x0
75 #define ISP_VM_SIZE (0x7FFFFFFF) /* 2G address space */
76 #define ISP_PTR_NULL NULL
77
78 #define HMM_BO_DEVICE_INITED 0x1
79
80 enum hmm_bo_type {
81 HMM_BO_PRIVATE,
82 HMM_BO_SHARE,
83 HMM_BO_USER,
84 #ifdef CONFIG_ION
85 HMM_BO_ION,
86 #endif
87 HMM_BO_LAST,
88 };
89
90 enum hmm_page_type {
91 HMM_PAGE_TYPE_RESERVED,
92 HMM_PAGE_TYPE_DYNAMIC,
93 HMM_PAGE_TYPE_GENERAL,
94 };
95
96 #define HMM_BO_MASK 0x1
97 #define HMM_BO_FREE 0x0
98 #define HMM_BO_ALLOCED 0x1
99 #define HMM_BO_PAGE_ALLOCED 0x2
100 #define HMM_BO_BINDED 0x4
101 #define HMM_BO_MMAPED 0x8
102 #define HMM_BO_VMAPED 0x10
103 #define HMM_BO_VMAPED_CACHED 0x20
104 #define HMM_BO_ACTIVE 0x1000
105 #define HMM_BO_MEM_TYPE_USER 0x1
106 #define HMM_BO_MEM_TYPE_PFN 0x2
107
108 struct hmm_bo_device {
109 struct isp_mmu mmu;
110
111 /* start/pgnr/size is used to record the virtual memory of this bo */
112 unsigned int start;
113 unsigned int pgnr;
114 unsigned int size;
115
116 /* list lock is used to protect the entire_bo_list */
117 spinlock_t list_lock;
118 #ifdef CONFIG_ION
119 struct ion_client *iclient;
120 #endif
121 int flag;
122
123 /* linked list for entire buffer object */
124 struct list_head entire_bo_list;
125 /* rbtree for maintain entire allocated vm */
126 struct rb_root allocated_rbtree;
127 /* rbtree for maintain entire free vm */
128 struct rb_root free_rbtree;
129 struct mutex rbtree_mutex;
130 struct kmem_cache *bo_cache;
131 };
132
133 struct hmm_page_object {
134 struct page *page;
135 enum hmm_page_type type;
136 };
137
138 struct hmm_buffer_object {
139 struct hmm_bo_device *bdev;
140 struct list_head list;
141 struct kref kref;
142
143 /* mutex protecting this BO */
144 struct mutex mutex;
145 enum hmm_bo_type type;
146 struct hmm_page_object *page_obj; /* physical pages */
147 int from_highmem;
148 int mmap_count;
149 #ifdef CONFIG_ION
150 struct ion_handle *ihandle;
151 #endif
152 int status;
153 int mem_type;
154 void *vmap_addr; /* kernel virtual address by vmap */
155
156 struct rb_node node;
157 unsigned int start;
158 unsigned int end;
159 unsigned int pgnr;
160 /*
161 * When insert a bo which has the same pgnr with an existed
162 * bo node in the free_rbtree, using "prev & next" pointer
163 * to maintain a bo linked list instead of insert this bo
164 * into free_rbtree directly, it will make sure each node
165 * in free_rbtree has different pgnr.
166 * "prev & next" default is NULL.
167 */
168 struct hmm_buffer_object *prev;
169 struct hmm_buffer_object *next;
170 };
171
172 struct hmm_buffer_object *hmm_bo_alloc(struct hmm_bo_device *bdev,
173 unsigned int pgnr);
174
175 void hmm_bo_release(struct hmm_buffer_object *bo);
176
177 int hmm_bo_device_init(struct hmm_bo_device *bdev,
178 struct isp_mmu_client *mmu_driver,
179 unsigned int vaddr_start, unsigned int size);
180
181 /*
182 * clean up all hmm_bo_device related things.
183 */
184 void hmm_bo_device_exit(struct hmm_bo_device *bdev);
185
186 /*
187 * whether the bo device is inited or not.
188 */
189 int hmm_bo_device_inited(struct hmm_bo_device *bdev);
190
191 /*
192 * increse buffer object reference.
193 */
194 void hmm_bo_ref(struct hmm_buffer_object *bo);
195
196 /*
197 * decrese buffer object reference. if reference reaches 0,
198 * release function of the buffer object will be called.
199 *
200 * this call is also used to release hmm_buffer_object or its
201 * upper level object with it embedded in. you need to call
202 * this function when it is no longer used.
203 *
204 * Note:
205 *
206 * user dont need to care about internal resource release of
207 * the buffer object in the release callback, it will be
208 * handled internally.
209 *
210 * this call will only release internal resource of the buffer
211 * object but will not free the buffer object itself, as the
212 * buffer object can be both pre-allocated statically or
213 * dynamically allocated. so user need to deal with the release
214 * of the buffer object itself manually. below example shows
215 * the normal case of using the buffer object.
216 *
217 * struct hmm_buffer_object *bo = hmm_bo_create(bdev, pgnr);
218 * ......
219 * hmm_bo_unref(bo);
220 *
221 * or:
222 *
223 * struct hmm_buffer_object bo;
224 *
225 * hmm_bo_init(bdev, &bo, pgnr, NULL);
226 * ...
227 * hmm_bo_unref(&bo);
228 */
229 void hmm_bo_unref(struct hmm_buffer_object *bo);
230
231
232 /*
233 * allocate/free physical pages for the bo. will try to alloc mem
234 * from highmem if from_highmem is set, and type indicate that the
235 * pages will be allocated by using video driver (for share buffer)
236 * or by ISP driver itself.
237 */
238
239
240 int hmm_bo_allocated(struct hmm_buffer_object *bo);
241
242
243 /*
244 * allocate/free physical pages for the bo. will try to alloc mem
245 * from highmem if from_highmem is set, and type indicate that the
246 * pages will be allocated by using video driver (for share buffer)
247 * or by ISP driver itself.
248 */
249 int hmm_bo_alloc_pages(struct hmm_buffer_object *bo,
250 enum hmm_bo_type type, int from_highmem,
251 void *userptr, bool cached);
252 void hmm_bo_free_pages(struct hmm_buffer_object *bo);
253 int hmm_bo_page_allocated(struct hmm_buffer_object *bo);
254
255 /*
256 * get physical page info of the bo.
257 */
258 int hmm_bo_get_page_info(struct hmm_buffer_object *bo,
259 struct hmm_page_object **page_obj, int *pgnr);
260
261 /*
262 * bind/unbind the physical pages to a virtual address space.
263 */
264 int hmm_bo_bind(struct hmm_buffer_object *bo);
265 void hmm_bo_unbind(struct hmm_buffer_object *bo);
266 int hmm_bo_binded(struct hmm_buffer_object *bo);
267
268 /*
269 * vmap buffer object's pages to contiguous kernel virtual address.
270 * if the buffer has been vmaped, return the virtual address directly.
271 */
272 void *hmm_bo_vmap(struct hmm_buffer_object *bo, bool cached);
273
274 /*
275 * flush the cache for the vmapped buffer object's pages,
276 * if the buffer has not been vmapped, return directly.
277 */
278 void hmm_bo_flush_vmap(struct hmm_buffer_object *bo);
279
280 /*
281 * vunmap buffer object's kernel virtual address.
282 */
283 void hmm_bo_vunmap(struct hmm_buffer_object *bo);
284
285 /*
286 * mmap the bo's physical pages to specific vma.
287 *
288 * vma's address space size must be the same as bo's size,
289 * otherwise it will return -EINVAL.
290 *
291 * vma->vm_flags will be set to (VM_RESERVED | VM_IO).
292 */
293 int hmm_bo_mmap(struct vm_area_struct *vma,
294 struct hmm_buffer_object *bo);
295
296 extern struct hmm_pool dynamic_pool;
297 extern struct hmm_pool reserved_pool;
298
299 /*
300 * find the buffer object by its virtual address vaddr.
301 * return NULL if no such buffer object found.
302 */
303 struct hmm_buffer_object *hmm_bo_device_search_start(
304 struct hmm_bo_device *bdev, ia_css_ptr vaddr);
305
306 /*
307 * find the buffer object by its virtual address.
308 * it does not need to be the start address of one bo,
309 * it can be an address within the range of one bo.
310 * return NULL if no such buffer object found.
311 */
312 struct hmm_buffer_object *hmm_bo_device_search_in_range(
313 struct hmm_bo_device *bdev, ia_css_ptr vaddr);
314
315 /*
316 * find the buffer object with kernel virtual address vaddr.
317 * return NULL if no such buffer object found.
318 */
319 struct hmm_buffer_object *hmm_bo_device_search_vmap_start(
320 struct hmm_bo_device *bdev, const void *vaddr);
321
322
323 #endif