]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/staging/media/atomisp/pci/atomisp2/hmm/hmm_vm.c
staging/atomisp: Add support for the Intel IPU v2
[mirror_ubuntu-jammy-kernel.git] / drivers / staging / media / atomisp / pci / atomisp2 / hmm / hmm_vm.c
1 /*
2 * Support for Medifield PNW Camera Imaging ISP subsystem.
3 *
4 * Copyright (c) 2010 Intel Corporation. All Rights Reserved.
5 *
6 * Copyright (c) 2010 Silicon Hive www.siliconhive.com.
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
20 * 02110-1301, USA.
21 *
22 */
23 /*
24 * This file contains function for ISP virtual address management in ISP driver
25 */
26 #include <linux/kernel.h>
27 #include <linux/types.h>
28 #include <linux/mm.h>
29 #include <linux/slab.h>
30 #include <asm/page.h>
31
32 #include "atomisp_internal.h"
33 #include "mmu/isp_mmu.h"
34 #include "hmm/hmm_vm.h"
35 #include "hmm/hmm_common.h"
36
37 static unsigned int vm_node_end(unsigned int start, unsigned int pgnr)
38 {
39 return start + pgnr_to_size(pgnr);
40 }
41
42 static int addr_in_vm_node(unsigned int addr,
43 struct hmm_vm_node *node)
44 {
45 return (addr >= node->start) && (addr < (node->start + node->size));
46 }
47
48 int hmm_vm_init(struct hmm_vm *vm, unsigned int start,
49 unsigned int size)
50 {
51 if (!vm)
52 return -1;
53
54 vm->start = start;
55 vm->pgnr = size_to_pgnr_ceil(size);
56 vm->size = pgnr_to_size(vm->pgnr);
57
58 INIT_LIST_HEAD(&vm->vm_node_list);
59 spin_lock_init(&vm->lock);
60 vm->cache = kmem_cache_create("atomisp_vm", sizeof(struct hmm_vm_node),
61 0, 0, NULL);
62
63 return vm->cache != NULL ? 0 : -ENOMEM;
64 }
65
66 void hmm_vm_clean(struct hmm_vm *vm)
67 {
68 struct hmm_vm_node *node, *tmp;
69 struct list_head new_head;
70
71 if (!vm)
72 return;
73
74 spin_lock(&vm->lock);
75 list_replace_init(&vm->vm_node_list, &new_head);
76 spin_unlock(&vm->lock);
77
78 list_for_each_entry_safe(node, tmp, &new_head, list) {
79 list_del(&node->list);
80 kmem_cache_free(vm->cache, node);
81 }
82
83 kmem_cache_destroy(vm->cache);
84 }
85
86 static struct hmm_vm_node *alloc_hmm_vm_node(unsigned int pgnr,
87 struct hmm_vm *vm)
88 {
89 struct hmm_vm_node *node;
90
91 node = kmem_cache_alloc(vm->cache, GFP_KERNEL);
92 if (!node) {
93 dev_err(atomisp_dev, "out of memory.\n");
94 return NULL;
95 }
96
97 INIT_LIST_HEAD(&node->list);
98 node->pgnr = pgnr;
99 node->size = pgnr_to_size(pgnr);
100 node->vm = vm;
101
102 return node;
103 }
104
105 struct hmm_vm_node *hmm_vm_alloc_node(struct hmm_vm *vm, unsigned int pgnr)
106 {
107 struct list_head *head;
108 struct hmm_vm_node *node, *cur, *next;
109 unsigned int vm_start, vm_end;
110 unsigned int addr;
111 unsigned int size;
112
113 if (!vm)
114 return NULL;
115
116 vm_start = vm->start;
117 vm_end = vm_node_end(vm->start, vm->pgnr);
118 size = pgnr_to_size(pgnr);
119
120 addr = vm_start;
121 head = &vm->vm_node_list;
122
123 node = alloc_hmm_vm_node(pgnr, vm);
124 if (!node) {
125 dev_err(atomisp_dev, "no memory to allocate hmm vm node.\n");
126 return NULL;
127 }
128
129 spin_lock(&vm->lock);
130 /*
131 * if list is empty, the loop code will not be executed.
132 */
133 list_for_each_entry(cur, head, list) {
134 /* Add gap between vm areas as helper to not hide overflow */
135 addr = PAGE_ALIGN(vm_node_end(cur->start, cur->pgnr) + 1);
136
137 if (list_is_last(&cur->list, head)) {
138 if (addr + size > vm_end) {
139 /* vm area does not have space anymore */
140 spin_unlock(&vm->lock);
141 kmem_cache_free(vm->cache, node);
142 dev_err(atomisp_dev,
143 "no enough virtual address space.\n");
144 return NULL;
145 }
146
147 /* We still have vm space to add new node to tail */
148 break;
149 }
150
151 next = list_entry(cur->list.next, struct hmm_vm_node, list);
152 if ((next->start - addr) > size)
153 break;
154 }
155 node->start = addr;
156 node->vm = vm;
157 list_add(&node->list, &cur->list);
158 spin_unlock(&vm->lock);
159
160 return node;
161 }
162
163 void hmm_vm_free_node(struct hmm_vm_node *node)
164 {
165 struct hmm_vm *vm;
166
167 if (!node)
168 return;
169
170 vm = node->vm;
171
172 spin_lock(&vm->lock);
173 list_del(&node->list);
174 spin_unlock(&vm->lock);
175
176 kmem_cache_free(vm->cache, node);
177 }
178
179 struct hmm_vm_node *hmm_vm_find_node_start(struct hmm_vm *vm, unsigned int addr)
180 {
181 struct hmm_vm_node *node;
182
183 if (!vm)
184 return NULL;
185
186 spin_lock(&vm->lock);
187
188 list_for_each_entry(node, &vm->vm_node_list, list) {
189 if (node->start == addr) {
190 spin_unlock(&vm->lock);
191 return node;
192 }
193 }
194
195 spin_unlock(&vm->lock);
196 return NULL;
197 }
198
199 struct hmm_vm_node *hmm_vm_find_node_in_range(struct hmm_vm *vm,
200 unsigned int addr)
201 {
202 struct hmm_vm_node *node;
203
204 if (!vm)
205 return NULL;
206
207 spin_lock(&vm->lock);
208
209 list_for_each_entry(node, &vm->vm_node_list, list) {
210 if (addr_in_vm_node(addr, node)) {
211 spin_unlock(&vm->lock);
212 return node;
213 }
214 }
215
216 spin_unlock(&vm->lock);
217 return NULL;
218 }