]>
Commit | Line | Data |
---|---|---|
9b843757 PM |
1 | /* drivers/android/pmem.c |
2 | * | |
3 | * Copyright (C) 2007 Google, Inc. | |
4 | * | |
5 | * This software is licensed under the terms of the GNU General Public | |
6 | * License version 2, as published by the Free Software Foundation, and | |
7 | * may be copied, distributed, and modified under those terms. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | */ | |
15 | ||
16 | #include <linux/miscdevice.h> | |
17 | #include <linux/platform_device.h> | |
18 | #include <linux/fs.h> | |
19 | #include <linux/file.h> | |
20 | #include <linux/mm.h> | |
21 | #include <linux/list.h> | |
22 | #include <linux/debugfs.h> | |
23 | #include <linux/android_pmem.h> | |
24 | #include <linux/mempolicy.h> | |
25 | #include <linux/sched.h> | |
5a0e3ad6 | 26 | #include <linux/slab.h> |
ef079a76 NC |
27 | #include <linux/io.h> |
28 | #include <linux/uaccess.h> | |
9b843757 PM |
29 | #include <asm/cacheflush.h> |
30 | ||
31 | #define PMEM_MAX_DEVICES 10 | |
32 | #define PMEM_MAX_ORDER 128 | |
33 | #define PMEM_MIN_ALLOC PAGE_SIZE | |
34 | ||
35 | #define PMEM_DEBUG 1 | |
36 | ||
37 | /* indicates that a refernce to this file has been taken via get_pmem_file, | |
38 | * the file should not be released until put_pmem_file is called */ | |
39 | #define PMEM_FLAGS_BUSY 0x1 | |
40 | /* indicates that this is a suballocation of a larger master range */ | |
eb450e89 | 41 | #define PMEM_FLAGS_CONNECTED (0x1 << 1) |
9b843757 | 42 | /* indicates this is a master and not a sub allocation and that it is mmaped */ |
eb450e89 | 43 | #define PMEM_FLAGS_MASTERMAP (0x1 << 2) |
9b843757 PM |
44 | /* submap and unsubmap flags indicate: |
45 | * 00: subregion has never been mmaped | |
46 | * 10: subregion has been mmaped, reference to the mm was taken | |
47 | * 11: subretion has ben released, refernece to the mm still held | |
48 | * 01: subretion has been released, reference to the mm has been released | |
49 | */ | |
eb450e89 CC |
50 | #define PMEM_FLAGS_SUBMAP (0x1 << 3) |
51 | #define PMEM_FLAGS_UNSUBMAP (0x1 << 4) | |
9b843757 PM |
52 | |
53 | ||
54 | struct pmem_data { | |
55 | /* in alloc mode: an index into the bitmap | |
56 | * in no_alloc mode: the size of the allocation */ | |
57 | int index; | |
58 | /* see flags above for descriptions */ | |
59 | unsigned int flags; | |
60 | /* protects this data field, if the mm_mmap sem will be held at the | |
61 | * same time as this sem, the mm sem must be taken first (as this is | |
62 | * the order for vma_open and vma_close ops */ | |
63 | struct rw_semaphore sem; | |
64 | /* info about the mmaping process */ | |
65 | struct vm_area_struct *vma; | |
66 | /* task struct of the mapping process */ | |
67 | struct task_struct *task; | |
68 | /* process id of teh mapping process */ | |
69 | pid_t pid; | |
70 | /* file descriptor of the master */ | |
71 | int master_fd; | |
72 | /* file struct of the master */ | |
73 | struct file *master_file; | |
74 | /* a list of currently available regions if this is a suballocation */ | |
75 | struct list_head region_list; | |
76 | /* a linked list of data so we can access them for debugging */ | |
77 | struct list_head list; | |
78 | #if PMEM_DEBUG | |
79 | int ref; | |
80 | #endif | |
81 | }; | |
82 | ||
83 | struct pmem_bits { | |
84 | unsigned allocated:1; /* 1 if allocated, 0 if free */ | |
85 | unsigned order:7; /* size of the region in pmem space */ | |
86 | }; | |
87 | ||
88 | struct pmem_region_node { | |
89 | struct pmem_region region; | |
90 | struct list_head list; | |
91 | }; | |
92 | ||
93 | #define PMEM_DEBUG_MSGS 0 | |
94 | #if PMEM_DEBUG_MSGS | |
df16b962 | 95 | #define DLOG(fmt, args...) \ |
9b843757 PM |
96 | do { printk(KERN_INFO "[%s:%s:%d] "fmt, __FILE__, __func__, __LINE__, \ |
97 | ##args); } \ | |
98 | while (0) | |
99 | #else | |
100 | #define DLOG(x...) do {} while (0) | |
101 | #endif | |
102 | ||
103 | struct pmem_info { | |
104 | struct miscdevice dev; | |
105 | /* physical start address of the remaped pmem space */ | |
106 | unsigned long base; | |
107 | /* vitual start address of the remaped pmem space */ | |
108 | unsigned char __iomem *vbase; | |
109 | /* total size of the pmem space */ | |
110 | unsigned long size; | |
111 | /* number of entries in the pmem space */ | |
112 | unsigned long num_entries; | |
113 | /* pfn of the garbage page in memory */ | |
114 | unsigned long garbage_pfn; | |
115 | /* index of the garbage page in the pmem space */ | |
116 | int garbage_index; | |
117 | /* the bitmap for the region indicating which entries are allocated | |
118 | * and which are free */ | |
119 | struct pmem_bits *bitmap; | |
120 | /* indicates the region should not be managed with an allocator */ | |
121 | unsigned no_allocator; | |
122 | /* indicates maps of this region should be cached, if a mix of | |
123 | * cached and uncached is desired, set this and open the device with | |
124 | * O_SYNC to get an uncached region */ | |
125 | unsigned cached; | |
126 | unsigned buffered; | |
127 | /* in no_allocator mode the first mapper gets the whole space and sets | |
128 | * this flag */ | |
129 | unsigned allocated; | |
130 | /* for debugging, creates a list of pmem file structs, the | |
131 | * data_list_sem should be taken before pmem_data->sem if both are | |
132 | * needed */ | |
133 | struct semaphore data_list_sem; | |
134 | struct list_head data_list; | |
135 | /* pmem_sem protects the bitmap array | |
136 | * a write lock should be held when modifying entries in bitmap | |
137 | * a read lock should be held when reading data from bits or | |
138 | * dereferencing a pointer into bitmap | |
139 | * | |
140 | * pmem_data->sem protects the pmem data of a particular file | |
141 | * Many of the function that require the pmem_data->sem have a non- | |
142 | * locking version for when the caller is already holding that sem. | |
143 | * | |
144 | * IF YOU TAKE BOTH LOCKS TAKE THEM IN THIS ORDER: | |
145 | * down(pmem_data->sem) => down(bitmap_sem) | |
146 | */ | |
147 | struct rw_semaphore bitmap_sem; | |
148 | ||
149 | long (*ioctl)(struct file *, unsigned int, unsigned long); | |
150 | int (*release)(struct inode *, struct file *); | |
151 | }; | |
152 | ||
153 | static struct pmem_info pmem[PMEM_MAX_DEVICES]; | |
154 | static int id_count; | |
155 | ||
eb450e89 | 156 | #define PMEM_IS_FREE(id, index) (!(pmem[id].bitmap[index].allocated)) |
9b843757 PM |
157 | #define PMEM_ORDER(id, index) pmem[id].bitmap[index].order |
158 | #define PMEM_BUDDY_INDEX(id, index) (index ^ (1 << PMEM_ORDER(id, index))) | |
159 | #define PMEM_NEXT_INDEX(id, index) (index + (1 << PMEM_ORDER(id, index))) | |
160 | #define PMEM_OFFSET(index) (index * PMEM_MIN_ALLOC) | |
161 | #define PMEM_START_ADDR(id, index) (PMEM_OFFSET(index) + pmem[id].base) | |
162 | #define PMEM_LEN(id, index) ((1 << PMEM_ORDER(id, index)) * PMEM_MIN_ALLOC) | |
163 | #define PMEM_END_ADDR(id, index) (PMEM_START_ADDR(id, index) + \ | |
164 | PMEM_LEN(id, index)) | |
165 | #define PMEM_START_VADDR(id, index) (PMEM_OFFSET(id, index) + pmem[id].vbase) | |
166 | #define PMEM_END_VADDR(id, index) (PMEM_START_VADDR(id, index) + \ | |
167 | PMEM_LEN(id, index)) | |
168 | #define PMEM_REVOKED(data) (data->flags & PMEM_FLAGS_REVOKED) | |
169 | #define PMEM_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK))) | |
170 | #define PMEM_IS_SUBMAP(data) ((data->flags & PMEM_FLAGS_SUBMAP) && \ | |
171 | (!(data->flags & PMEM_FLAGS_UNSUBMAP))) | |
172 | ||
173 | static int pmem_release(struct inode *, struct file *); | |
174 | static int pmem_mmap(struct file *, struct vm_area_struct *); | |
175 | static int pmem_open(struct inode *, struct file *); | |
176 | static long pmem_ioctl(struct file *, unsigned int, unsigned long); | |
177 | ||
ef079a76 | 178 | const struct file_operations pmem_fops = { |
9b843757 PM |
179 | .release = pmem_release, |
180 | .mmap = pmem_mmap, | |
181 | .open = pmem_open, | |
182 | .unlocked_ioctl = pmem_ioctl, | |
6038f373 | 183 | .llseek = noop_llseek, |
9b843757 PM |
184 | }; |
185 | ||
186 | static int get_id(struct file *file) | |
187 | { | |
188 | return MINOR(file->f_dentry->d_inode->i_rdev); | |
189 | } | |
190 | ||
191 | static int is_pmem_file(struct file *file) | |
192 | { | |
193 | int id; | |
194 | ||
195 | if (unlikely(!file || !file->f_dentry || !file->f_dentry->d_inode)) | |
196 | return 0; | |
197 | id = get_id(file); | |
198 | if (unlikely(id >= PMEM_MAX_DEVICES)) | |
199 | return 0; | |
200 | if (unlikely(file->f_dentry->d_inode->i_rdev != | |
201 | MKDEV(MISC_MAJOR, pmem[id].dev.minor))) | |
202 | return 0; | |
203 | return 1; | |
204 | } | |
205 | ||
206 | static int has_allocation(struct file *file) | |
207 | { | |
208 | struct pmem_data *data; | |
209 | /* check is_pmem_file first if not accessed via pmem_file_ops */ | |
210 | ||
211 | if (unlikely(!file->private_data)) | |
212 | return 0; | |
37157cc4 | 213 | data = file->private_data; |
9b843757 PM |
214 | if (unlikely(data->index < 0)) |
215 | return 0; | |
216 | return 1; | |
217 | } | |
218 | ||
219 | static int is_master_owner(struct file *file) | |
220 | { | |
221 | struct file *master_file; | |
222 | struct pmem_data *data; | |
223 | int put_needed, ret = 0; | |
224 | ||
225 | if (!is_pmem_file(file) || !has_allocation(file)) | |
226 | return 0; | |
37157cc4 | 227 | data = file->private_data; |
9b843757 PM |
228 | if (PMEM_FLAGS_MASTERMAP & data->flags) |
229 | return 1; | |
230 | master_file = fget_light(data->master_fd, &put_needed); | |
231 | if (master_file && data->master_file == master_file) | |
232 | ret = 1; | |
233 | fput_light(master_file, put_needed); | |
234 | return ret; | |
235 | } | |
236 | ||
237 | static int pmem_free(int id, int index) | |
238 | { | |
239 | /* caller should hold the write lock on pmem_sem! */ | |
240 | int buddy, curr = index; | |
241 | DLOG("index %d\n", index); | |
242 | ||
243 | if (pmem[id].no_allocator) { | |
244 | pmem[id].allocated = 0; | |
245 | return 0; | |
246 | } | |
247 | /* clean up the bitmap, merging any buddies */ | |
248 | pmem[id].bitmap[curr].allocated = 0; | |
249 | /* find a slots buddy Buddy# = Slot# ^ (1 << order) | |
250 | * if the buddy is also free merge them | |
251 | * repeat until the buddy is not free or end of the bitmap is reached | |
252 | */ | |
253 | do { | |
254 | buddy = PMEM_BUDDY_INDEX(id, curr); | |
255 | if (PMEM_IS_FREE(id, buddy) && | |
256 | PMEM_ORDER(id, buddy) == PMEM_ORDER(id, curr)) { | |
257 | PMEM_ORDER(id, buddy)++; | |
258 | PMEM_ORDER(id, curr)++; | |
259 | curr = min(buddy, curr); | |
260 | } else { | |
261 | break; | |
262 | } | |
263 | } while (curr < pmem[id].num_entries); | |
264 | ||
265 | return 0; | |
266 | } | |
267 | ||
268 | static void pmem_revoke(struct file *file, struct pmem_data *data); | |
269 | ||
270 | static int pmem_release(struct inode *inode, struct file *file) | |
271 | { | |
37157cc4 | 272 | struct pmem_data *data = file->private_data; |
9b843757 PM |
273 | struct pmem_region_node *region_node; |
274 | struct list_head *elt, *elt2; | |
275 | int id = get_id(file), ret = 0; | |
276 | ||
277 | ||
278 | down(&pmem[id].data_list_sem); | |
279 | /* if this file is a master, revoke all the memory in the connected | |
280 | * files */ | |
281 | if (PMEM_FLAGS_MASTERMAP & data->flags) { | |
282 | struct pmem_data *sub_data; | |
283 | list_for_each(elt, &pmem[id].data_list) { | |
284 | sub_data = list_entry(elt, struct pmem_data, list); | |
285 | down_read(&sub_data->sem); | |
286 | if (PMEM_IS_SUBMAP(sub_data) && | |
287 | file == sub_data->master_file) { | |
288 | up_read(&sub_data->sem); | |
289 | pmem_revoke(file, sub_data); | |
290 | } else | |
291 | up_read(&sub_data->sem); | |
292 | } | |
293 | } | |
294 | list_del(&data->list); | |
295 | up(&pmem[id].data_list_sem); | |
296 | ||
297 | ||
298 | down_write(&data->sem); | |
299 | ||
300 | /* if its not a conencted file and it has an allocation, free it */ | |
301 | if (!(PMEM_FLAGS_CONNECTED & data->flags) && has_allocation(file)) { | |
302 | down_write(&pmem[id].bitmap_sem); | |
303 | ret = pmem_free(id, data->index); | |
304 | up_write(&pmem[id].bitmap_sem); | |
305 | } | |
306 | ||
307 | /* if this file is a submap (mapped, connected file), downref the | |
308 | * task struct */ | |
309 | if (PMEM_FLAGS_SUBMAP & data->flags) | |
310 | if (data->task) { | |
311 | put_task_struct(data->task); | |
312 | data->task = NULL; | |
313 | } | |
314 | ||
315 | file->private_data = NULL; | |
316 | ||
317 | list_for_each_safe(elt, elt2, &data->region_list) { | |
318 | region_node = list_entry(elt, struct pmem_region_node, list); | |
319 | list_del(elt); | |
320 | kfree(region_node); | |
321 | } | |
322 | BUG_ON(!list_empty(&data->region_list)); | |
323 | ||
324 | up_write(&data->sem); | |
325 | kfree(data); | |
326 | if (pmem[id].release) | |
327 | ret = pmem[id].release(inode, file); | |
328 | ||
329 | return ret; | |
330 | } | |
331 | ||
332 | static int pmem_open(struct inode *inode, struct file *file) | |
333 | { | |
334 | struct pmem_data *data; | |
335 | int id = get_id(file); | |
336 | int ret = 0; | |
337 | ||
338 | DLOG("current %u file %p(%d)\n", current->pid, file, file_count(file)); | |
339 | /* setup file->private_data to indicate its unmapped */ | |
340 | /* you can only open a pmem device one time */ | |
341 | if (file->private_data != NULL) | |
342 | return -1; | |
343 | data = kmalloc(sizeof(struct pmem_data), GFP_KERNEL); | |
344 | if (!data) { | |
345 | printk("pmem: unable to allocate memory for pmem metadata."); | |
346 | return -1; | |
347 | } | |
348 | data->flags = 0; | |
349 | data->index = -1; | |
350 | data->task = NULL; | |
351 | data->vma = NULL; | |
352 | data->pid = 0; | |
353 | data->master_file = NULL; | |
354 | #if PMEM_DEBUG | |
355 | data->ref = 0; | |
356 | #endif | |
357 | INIT_LIST_HEAD(&data->region_list); | |
358 | init_rwsem(&data->sem); | |
359 | ||
360 | file->private_data = data; | |
361 | INIT_LIST_HEAD(&data->list); | |
362 | ||
363 | down(&pmem[id].data_list_sem); | |
364 | list_add(&data->list, &pmem[id].data_list); | |
365 | up(&pmem[id].data_list_sem); | |
366 | return ret; | |
367 | } | |
368 | ||
369 | static unsigned long pmem_order(unsigned long len) | |
370 | { | |
371 | int i; | |
372 | ||
373 | len = (len + PMEM_MIN_ALLOC - 1)/PMEM_MIN_ALLOC; | |
374 | len--; | |
375 | for (i = 0; i < sizeof(len)*8; i++) | |
376 | if (len >> i == 0) | |
377 | break; | |
378 | return i; | |
379 | } | |
380 | ||
381 | static int pmem_allocate(int id, unsigned long len) | |
382 | { | |
383 | /* caller should hold the write lock on pmem_sem! */ | |
384 | /* return the corresponding pdata[] entry */ | |
385 | int curr = 0; | |
386 | int end = pmem[id].num_entries; | |
387 | int best_fit = -1; | |
388 | unsigned long order = pmem_order(len); | |
389 | ||
390 | if (pmem[id].no_allocator) { | |
391 | DLOG("no allocator"); | |
392 | if ((len > pmem[id].size) || pmem[id].allocated) | |
393 | return -1; | |
394 | pmem[id].allocated = 1; | |
395 | return len; | |
396 | } | |
397 | ||
398 | if (order > PMEM_MAX_ORDER) | |
399 | return -1; | |
400 | DLOG("order %lx\n", order); | |
401 | ||
402 | /* look through the bitmap: | |
ef079a76 NC |
403 | * if you find a free slot of the correct order use it |
404 | * otherwise, use the best fit (smallest with size > order) slot | |
9b843757 PM |
405 | */ |
406 | while (curr < end) { | |
407 | if (PMEM_IS_FREE(id, curr)) { | |
408 | if (PMEM_ORDER(id, curr) == (unsigned char)order) { | |
409 | /* set the not free bit and clear others */ | |
410 | best_fit = curr; | |
411 | break; | |
412 | } | |
413 | if (PMEM_ORDER(id, curr) > (unsigned char)order && | |
414 | (best_fit < 0 || | |
415 | PMEM_ORDER(id, curr) < PMEM_ORDER(id, best_fit))) | |
416 | best_fit = curr; | |
417 | } | |
418 | curr = PMEM_NEXT_INDEX(id, curr); | |
419 | } | |
420 | ||
421 | /* if best_fit < 0, there are no suitable slots, | |
422 | * return an error | |
423 | */ | |
424 | if (best_fit < 0) { | |
425 | printk("pmem: no space left to allocate!\n"); | |
426 | return -1; | |
427 | } | |
428 | ||
429 | /* now partition the best fit: | |
ef079a76 NC |
430 | * split the slot into 2 buddies of order - 1 |
431 | * repeat until the slot is of the correct order | |
9b843757 PM |
432 | */ |
433 | while (PMEM_ORDER(id, best_fit) > (unsigned char)order) { | |
434 | int buddy; | |
435 | PMEM_ORDER(id, best_fit) -= 1; | |
436 | buddy = PMEM_BUDDY_INDEX(id, best_fit); | |
437 | PMEM_ORDER(id, buddy) = PMEM_ORDER(id, best_fit); | |
438 | } | |
439 | pmem[id].bitmap[best_fit].allocated = 1; | |
440 | return best_fit; | |
441 | } | |
442 | ||
443 | static pgprot_t phys_mem_access_prot(struct file *file, pgprot_t vma_prot) | |
444 | { | |
445 | int id = get_id(file); | |
446 | #ifdef pgprot_noncached | |
447 | if (pmem[id].cached == 0 || file->f_flags & O_SYNC) | |
448 | return pgprot_noncached(vma_prot); | |
449 | #endif | |
450 | #ifdef pgprot_ext_buffered | |
451 | else if (pmem[id].buffered) | |
452 | return pgprot_ext_buffered(vma_prot); | |
453 | #endif | |
454 | return vma_prot; | |
455 | } | |
456 | ||
457 | static unsigned long pmem_start_addr(int id, struct pmem_data *data) | |
458 | { | |
459 | if (pmem[id].no_allocator) | |
460 | return PMEM_START_ADDR(id, 0); | |
461 | else | |
462 | return PMEM_START_ADDR(id, data->index); | |
463 | ||
464 | } | |
465 | ||
466 | static void *pmem_start_vaddr(int id, struct pmem_data *data) | |
467 | { | |
468 | return pmem_start_addr(id, data) - pmem[id].base + pmem[id].vbase; | |
469 | } | |
470 | ||
471 | static unsigned long pmem_len(int id, struct pmem_data *data) | |
472 | { | |
473 | if (pmem[id].no_allocator) | |
474 | return data->index; | |
475 | else | |
476 | return PMEM_LEN(id, data->index); | |
477 | } | |
478 | ||
479 | static int pmem_map_garbage(int id, struct vm_area_struct *vma, | |
480 | struct pmem_data *data, unsigned long offset, | |
481 | unsigned long len) | |
482 | { | |
483 | int i, garbage_pages = len >> PAGE_SHIFT; | |
484 | ||
485 | vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP | VM_SHARED | VM_WRITE; | |
486 | for (i = 0; i < garbage_pages; i++) { | |
487 | if (vm_insert_pfn(vma, vma->vm_start + offset + (i * PAGE_SIZE), | |
488 | pmem[id].garbage_pfn)) | |
489 | return -EAGAIN; | |
490 | } | |
491 | return 0; | |
492 | } | |
493 | ||
494 | static int pmem_unmap_pfn_range(int id, struct vm_area_struct *vma, | |
495 | struct pmem_data *data, unsigned long offset, | |
496 | unsigned long len) | |
497 | { | |
498 | int garbage_pages; | |
499 | DLOG("unmap offset %lx len %lx\n", offset, len); | |
500 | ||
501 | BUG_ON(!PMEM_IS_PAGE_ALIGNED(len)); | |
502 | ||
503 | garbage_pages = len >> PAGE_SHIFT; | |
504 | zap_page_range(vma, vma->vm_start + offset, len, NULL); | |
505 | pmem_map_garbage(id, vma, data, offset, len); | |
506 | return 0; | |
507 | } | |
508 | ||
509 | static int pmem_map_pfn_range(int id, struct vm_area_struct *vma, | |
510 | struct pmem_data *data, unsigned long offset, | |
511 | unsigned long len) | |
512 | { | |
513 | DLOG("map offset %lx len %lx\n", offset, len); | |
514 | BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_start)); | |
515 | BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_end)); | |
516 | BUG_ON(!PMEM_IS_PAGE_ALIGNED(len)); | |
517 | BUG_ON(!PMEM_IS_PAGE_ALIGNED(offset)); | |
518 | ||
519 | if (io_remap_pfn_range(vma, vma->vm_start + offset, | |
520 | (pmem_start_addr(id, data) + offset) >> PAGE_SHIFT, | |
521 | len, vma->vm_page_prot)) { | |
522 | return -EAGAIN; | |
523 | } | |
524 | return 0; | |
525 | } | |
526 | ||
527 | static int pmem_remap_pfn_range(int id, struct vm_area_struct *vma, | |
528 | struct pmem_data *data, unsigned long offset, | |
529 | unsigned long len) | |
530 | { | |
531 | /* hold the mm semp for the vma you are modifying when you call this */ | |
532 | BUG_ON(!vma); | |
533 | zap_page_range(vma, vma->vm_start + offset, len, NULL); | |
534 | return pmem_map_pfn_range(id, vma, data, offset, len); | |
535 | } | |
536 | ||
537 | static void pmem_vma_open(struct vm_area_struct *vma) | |
538 | { | |
539 | struct file *file = vma->vm_file; | |
540 | struct pmem_data *data = file->private_data; | |
541 | int id = get_id(file); | |
542 | /* this should never be called as we don't support copying pmem | |
543 | * ranges via fork */ | |
544 | BUG_ON(!has_allocation(file)); | |
545 | down_write(&data->sem); | |
546 | /* remap the garbage pages, forkers don't get access to the data */ | |
547 | pmem_unmap_pfn_range(id, vma, data, 0, vma->vm_start - vma->vm_end); | |
548 | up_write(&data->sem); | |
549 | } | |
550 | ||
551 | static void pmem_vma_close(struct vm_area_struct *vma) | |
552 | { | |
553 | struct file *file = vma->vm_file; | |
554 | struct pmem_data *data = file->private_data; | |
555 | ||
556 | DLOG("current %u ppid %u file %p count %d\n", current->pid, | |
557 | current->parent->pid, file, file_count(file)); | |
558 | if (unlikely(!is_pmem_file(file) || !has_allocation(file))) { | |
559 | printk(KERN_WARNING "pmem: something is very wrong, you are " | |
560 | "closing a vm backing an allocation that doesn't " | |
561 | "exist!\n"); | |
562 | return; | |
563 | } | |
564 | down_write(&data->sem); | |
565 | if (data->vma == vma) { | |
566 | data->vma = NULL; | |
567 | if ((data->flags & PMEM_FLAGS_CONNECTED) && | |
568 | (data->flags & PMEM_FLAGS_SUBMAP)) | |
569 | data->flags |= PMEM_FLAGS_UNSUBMAP; | |
570 | } | |
571 | /* the kernel is going to free this vma now anyway */ | |
572 | up_write(&data->sem); | |
573 | } | |
574 | ||
575 | static struct vm_operations_struct vm_ops = { | |
576 | .open = pmem_vma_open, | |
577 | .close = pmem_vma_close, | |
578 | }; | |
579 | ||
580 | static int pmem_mmap(struct file *file, struct vm_area_struct *vma) | |
581 | { | |
582 | struct pmem_data *data; | |
583 | int index; | |
584 | unsigned long vma_size = vma->vm_end - vma->vm_start; | |
585 | int ret = 0, id = get_id(file); | |
586 | ||
587 | if (vma->vm_pgoff || !PMEM_IS_PAGE_ALIGNED(vma_size)) { | |
588 | #if PMEM_DEBUG | |
589 | printk(KERN_ERR "pmem: mmaps must be at offset zero, aligned" | |
590 | " and a multiple of pages_size.\n"); | |
591 | #endif | |
592 | return -EINVAL; | |
593 | } | |
594 | ||
37157cc4 | 595 | data = file->private_data; |
9b843757 PM |
596 | down_write(&data->sem); |
597 | /* check this file isn't already mmaped, for submaps check this file | |
598 | * has never been mmaped */ | |
599 | if ((data->flags & PMEM_FLAGS_MASTERMAP) || | |
600 | (data->flags & PMEM_FLAGS_SUBMAP) || | |
601 | (data->flags & PMEM_FLAGS_UNSUBMAP)) { | |
602 | #if PMEM_DEBUG | |
603 | printk(KERN_ERR "pmem: you can only mmap a pmem file once, " | |
604 | "this file is already mmaped. %x\n", data->flags); | |
605 | #endif | |
606 | ret = -EINVAL; | |
607 | goto error; | |
608 | } | |
609 | /* if file->private_data == unalloced, alloc*/ | |
610 | if (data && data->index == -1) { | |
611 | down_write(&pmem[id].bitmap_sem); | |
612 | index = pmem_allocate(id, vma->vm_end - vma->vm_start); | |
613 | up_write(&pmem[id].bitmap_sem); | |
614 | data->index = index; | |
615 | } | |
616 | /* either no space was available or an error occured */ | |
617 | if (!has_allocation(file)) { | |
618 | ret = -EINVAL; | |
619 | printk("pmem: could not find allocation for map.\n"); | |
620 | goto error; | |
621 | } | |
622 | ||
623 | if (pmem_len(id, data) < vma_size) { | |
624 | #if PMEM_DEBUG | |
625 | printk(KERN_WARNING "pmem: mmap size [%lu] does not match" | |
626 | "size of backing region [%lu].\n", vma_size, | |
627 | pmem_len(id, data)); | |
628 | #endif | |
629 | ret = -EINVAL; | |
630 | goto error; | |
631 | } | |
632 | ||
633 | vma->vm_pgoff = pmem_start_addr(id, data) >> PAGE_SHIFT; | |
634 | vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_page_prot); | |
635 | ||
636 | if (data->flags & PMEM_FLAGS_CONNECTED) { | |
637 | struct pmem_region_node *region_node; | |
638 | struct list_head *elt; | |
639 | if (pmem_map_garbage(id, vma, data, 0, vma_size)) { | |
640 | printk("pmem: mmap failed in kernel!\n"); | |
641 | ret = -EAGAIN; | |
642 | goto error; | |
643 | } | |
644 | list_for_each(elt, &data->region_list) { | |
645 | region_node = list_entry(elt, struct pmem_region_node, | |
646 | list); | |
647 | DLOG("remapping file: %p %lx %lx\n", file, | |
648 | region_node->region.offset, | |
649 | region_node->region.len); | |
650 | if (pmem_remap_pfn_range(id, vma, data, | |
651 | region_node->region.offset, | |
652 | region_node->region.len)) { | |
653 | ret = -EAGAIN; | |
654 | goto error; | |
655 | } | |
656 | } | |
657 | data->flags |= PMEM_FLAGS_SUBMAP; | |
658 | get_task_struct(current->group_leader); | |
659 | data->task = current->group_leader; | |
660 | data->vma = vma; | |
661 | #if PMEM_DEBUG | |
662 | data->pid = current->pid; | |
663 | #endif | |
664 | DLOG("submmapped file %p vma %p pid %u\n", file, vma, | |
665 | current->pid); | |
666 | } else { | |
667 | if (pmem_map_pfn_range(id, vma, data, 0, vma_size)) { | |
668 | printk(KERN_INFO "pmem: mmap failed in kernel!\n"); | |
669 | ret = -EAGAIN; | |
670 | goto error; | |
671 | } | |
672 | data->flags |= PMEM_FLAGS_MASTERMAP; | |
673 | data->pid = current->pid; | |
674 | } | |
675 | vma->vm_ops = &vm_ops; | |
676 | error: | |
677 | up_write(&data->sem); | |
678 | return ret; | |
679 | } | |
680 | ||
681 | /* the following are the api for accessing pmem regions by other drivers | |
682 | * from inside the kernel */ | |
683 | int get_pmem_user_addr(struct file *file, unsigned long *start, | |
684 | unsigned long *len) | |
685 | { | |
686 | struct pmem_data *data; | |
687 | if (!is_pmem_file(file) || !has_allocation(file)) { | |
688 | #if PMEM_DEBUG | |
689 | printk(KERN_INFO "pmem: requested pmem data from invalid" | |
690 | "file.\n"); | |
691 | #endif | |
692 | return -1; | |
693 | } | |
37157cc4 | 694 | data = file->private_data; |
9b843757 PM |
695 | down_read(&data->sem); |
696 | if (data->vma) { | |
697 | *start = data->vma->vm_start; | |
698 | *len = data->vma->vm_end - data->vma->vm_start; | |
699 | } else { | |
700 | *start = 0; | |
701 | *len = 0; | |
702 | } | |
703 | up_read(&data->sem); | |
704 | return 0; | |
705 | } | |
706 | ||
707 | int get_pmem_addr(struct file *file, unsigned long *start, | |
708 | unsigned long *vstart, unsigned long *len) | |
709 | { | |
710 | struct pmem_data *data; | |
711 | int id; | |
712 | ||
df16b962 | 713 | if (!is_pmem_file(file) || !has_allocation(file)) |
9b843757 | 714 | return -1; |
9b843757 | 715 | |
37157cc4 | 716 | data = file->private_data; |
9b843757 PM |
717 | if (data->index == -1) { |
718 | #if PMEM_DEBUG | |
719 | printk(KERN_INFO "pmem: requested pmem data from file with no " | |
720 | "allocation.\n"); | |
721 | return -1; | |
722 | #endif | |
723 | } | |
724 | id = get_id(file); | |
725 | ||
726 | down_read(&data->sem); | |
727 | *start = pmem_start_addr(id, data); | |
728 | *len = pmem_len(id, data); | |
729 | *vstart = (unsigned long)pmem_start_vaddr(id, data); | |
730 | up_read(&data->sem); | |
731 | #if PMEM_DEBUG | |
732 | down_write(&data->sem); | |
733 | data->ref++; | |
734 | up_write(&data->sem); | |
735 | #endif | |
736 | return 0; | |
737 | } | |
738 | ||
739 | int get_pmem_file(int fd, unsigned long *start, unsigned long *vstart, | |
740 | unsigned long *len, struct file **filp) | |
741 | { | |
742 | struct file *file; | |
743 | ||
744 | file = fget(fd); | |
745 | if (unlikely(file == NULL)) { | |
746 | printk(KERN_INFO "pmem: requested data from file descriptor " | |
747 | "that doesn't exist."); | |
748 | return -1; | |
749 | } | |
750 | ||
751 | if (get_pmem_addr(file, start, vstart, len)) | |
752 | goto end; | |
753 | ||
754 | if (filp) | |
755 | *filp = file; | |
756 | return 0; | |
757 | end: | |
758 | fput(file); | |
759 | return -1; | |
760 | } | |
761 | ||
762 | void put_pmem_file(struct file *file) | |
763 | { | |
764 | struct pmem_data *data; | |
765 | int id; | |
766 | ||
767 | if (!is_pmem_file(file)) | |
768 | return; | |
769 | id = get_id(file); | |
37157cc4 | 770 | data = file->private_data; |
9b843757 PM |
771 | #if PMEM_DEBUG |
772 | down_write(&data->sem); | |
773 | if (data->ref == 0) { | |
774 | printk("pmem: pmem_put > pmem_get %s (pid %d)\n", | |
775 | pmem[id].dev.name, data->pid); | |
776 | BUG(); | |
777 | } | |
778 | data->ref--; | |
779 | up_write(&data->sem); | |
780 | #endif | |
781 | fput(file); | |
782 | } | |
783 | ||
784 | void flush_pmem_file(struct file *file, unsigned long offset, unsigned long len) | |
785 | { | |
786 | struct pmem_data *data; | |
787 | int id; | |
788 | void *vaddr; | |
789 | struct pmem_region_node *region_node; | |
790 | struct list_head *elt; | |
791 | void *flush_start, *flush_end; | |
792 | ||
df16b962 | 793 | if (!is_pmem_file(file) || !has_allocation(file)) |
9b843757 | 794 | return; |
9b843757 PM |
795 | |
796 | id = get_id(file); | |
37157cc4 | 797 | data = file->private_data; |
9b843757 PM |
798 | if (!pmem[id].cached) |
799 | return; | |
800 | ||
801 | down_read(&data->sem); | |
802 | vaddr = pmem_start_vaddr(id, data); | |
803 | /* if this isn't a submmapped file, flush the whole thing */ | |
804 | if (unlikely(!(data->flags & PMEM_FLAGS_CONNECTED))) { | |
805 | dmac_flush_range(vaddr, vaddr + pmem_len(id, data)); | |
806 | goto end; | |
807 | } | |
808 | /* otherwise, flush the region of the file we are drawing */ | |
809 | list_for_each(elt, &data->region_list) { | |
810 | region_node = list_entry(elt, struct pmem_region_node, list); | |
811 | if ((offset >= region_node->region.offset) && | |
812 | ((offset + len) <= (region_node->region.offset + | |
813 | region_node->region.len))) { | |
814 | flush_start = vaddr + region_node->region.offset; | |
815 | flush_end = flush_start + region_node->region.len; | |
816 | dmac_flush_range(flush_start, flush_end); | |
817 | break; | |
818 | } | |
819 | } | |
820 | end: | |
821 | up_read(&data->sem); | |
822 | } | |
823 | ||
824 | static int pmem_connect(unsigned long connect, struct file *file) | |
825 | { | |
37157cc4 | 826 | struct pmem_data *data = file->private_data; |
9b843757 PM |
827 | struct pmem_data *src_data; |
828 | struct file *src_file; | |
829 | int ret = 0, put_needed; | |
830 | ||
831 | down_write(&data->sem); | |
832 | /* retrieve the src file and check it is a pmem file with an alloc */ | |
833 | src_file = fget_light(connect, &put_needed); | |
834 | DLOG("connect %p to %p\n", file, src_file); | |
835 | if (!src_file) { | |
df16b962 | 836 | printk(KERN_INFO "pmem: src file not found!\n"); |
9b843757 PM |
837 | ret = -EINVAL; |
838 | goto err_no_file; | |
839 | } | |
840 | if (unlikely(!is_pmem_file(src_file) || !has_allocation(src_file))) { | |
841 | printk(KERN_INFO "pmem: src file is not a pmem file or has no " | |
842 | "alloc!\n"); | |
843 | ret = -EINVAL; | |
844 | goto err_bad_file; | |
845 | } | |
37157cc4 | 846 | src_data = src_file->private_data; |
9b843757 PM |
847 | |
848 | if (has_allocation(file) && (data->index != src_data->index)) { | |
eb450e89 CC |
849 | printk(KERN_INFO "pmem: file is already mapped but doesn't " |
850 | "match this src_file!\n"); | |
9b843757 PM |
851 | ret = -EINVAL; |
852 | goto err_bad_file; | |
853 | } | |
854 | data->index = src_data->index; | |
855 | data->flags |= PMEM_FLAGS_CONNECTED; | |
856 | data->master_fd = connect; | |
857 | data->master_file = src_file; | |
858 | ||
859 | err_bad_file: | |
860 | fput_light(src_file, put_needed); | |
861 | err_no_file: | |
862 | up_write(&data->sem); | |
863 | return ret; | |
864 | } | |
865 | ||
866 | static void pmem_unlock_data_and_mm(struct pmem_data *data, | |
867 | struct mm_struct *mm) | |
868 | { | |
869 | up_write(&data->sem); | |
870 | if (mm != NULL) { | |
871 | up_write(&mm->mmap_sem); | |
872 | mmput(mm); | |
873 | } | |
874 | } | |
875 | ||
876 | static int pmem_lock_data_and_mm(struct file *file, struct pmem_data *data, | |
877 | struct mm_struct **locked_mm) | |
878 | { | |
879 | int ret = 0; | |
880 | struct mm_struct *mm = NULL; | |
881 | *locked_mm = NULL; | |
882 | lock_mm: | |
883 | down_read(&data->sem); | |
884 | if (PMEM_IS_SUBMAP(data)) { | |
885 | mm = get_task_mm(data->task); | |
886 | if (!mm) { | |
887 | #if PMEM_DEBUG | |
df16b962 | 888 | printk(KERN_DEBUG "pmem: can't remap task is gone!\n"); |
9b843757 PM |
889 | #endif |
890 | up_read(&data->sem); | |
891 | return -1; | |
892 | } | |
893 | } | |
894 | up_read(&data->sem); | |
895 | ||
896 | if (mm) | |
897 | down_write(&mm->mmap_sem); | |
898 | ||
899 | down_write(&data->sem); | |
900 | /* check that the file didn't get mmaped before we could take the | |
901 | * data sem, this should be safe b/c you can only submap each file | |
902 | * once */ | |
903 | if (PMEM_IS_SUBMAP(data) && !mm) { | |
904 | pmem_unlock_data_and_mm(data, mm); | |
905 | up_write(&data->sem); | |
906 | goto lock_mm; | |
907 | } | |
908 | /* now check that vma.mm is still there, it could have been | |
909 | * deleted by vma_close before we could get the data->sem */ | |
910 | if ((data->flags & PMEM_FLAGS_UNSUBMAP) && (mm != NULL)) { | |
911 | /* might as well release this */ | |
912 | if (data->flags & PMEM_FLAGS_SUBMAP) { | |
913 | put_task_struct(data->task); | |
914 | data->task = NULL; | |
915 | /* lower the submap flag to show the mm is gone */ | |
916 | data->flags &= ~(PMEM_FLAGS_SUBMAP); | |
917 | } | |
918 | pmem_unlock_data_and_mm(data, mm); | |
919 | return -1; | |
920 | } | |
921 | *locked_mm = mm; | |
922 | return ret; | |
923 | } | |
924 | ||
925 | int pmem_remap(struct pmem_region *region, struct file *file, | |
926 | unsigned operation) | |
927 | { | |
928 | int ret; | |
929 | struct pmem_region_node *region_node; | |
930 | struct mm_struct *mm = NULL; | |
931 | struct list_head *elt, *elt2; | |
932 | int id = get_id(file); | |
37157cc4 | 933 | struct pmem_data *data = file->private_data; |
9b843757 PM |
934 | |
935 | /* pmem region must be aligned on a page boundry */ | |
936 | if (unlikely(!PMEM_IS_PAGE_ALIGNED(region->offset) || | |
937 | !PMEM_IS_PAGE_ALIGNED(region->len))) { | |
938 | #if PMEM_DEBUG | |
eb450e89 CC |
939 | printk(KERN_DEBUG "pmem: request for unaligned pmem " |
940 | "suballocation %lx %lx\n", region->offset, region->len); | |
9b843757 PM |
941 | #endif |
942 | return -EINVAL; | |
943 | } | |
944 | ||
945 | /* if userspace requests a region of len 0, there's nothing to do */ | |
946 | if (region->len == 0) | |
947 | return 0; | |
948 | ||
949 | /* lock the mm and data */ | |
950 | ret = pmem_lock_data_and_mm(file, data, &mm); | |
951 | if (ret) | |
952 | return 0; | |
953 | ||
954 | /* only the owner of the master file can remap the client fds | |
955 | * that back in it */ | |
956 | if (!is_master_owner(file)) { | |
957 | #if PMEM_DEBUG | |
958 | printk("pmem: remap requested from non-master process\n"); | |
959 | #endif | |
960 | ret = -EINVAL; | |
961 | goto err; | |
962 | } | |
963 | ||
964 | /* check that the requested range is within the src allocation */ | |
965 | if (unlikely((region->offset > pmem_len(id, data)) || | |
966 | (region->len > pmem_len(id, data)) || | |
967 | (region->offset + region->len > pmem_len(id, data)))) { | |
968 | #if PMEM_DEBUG | |
969 | printk(KERN_INFO "pmem: suballoc doesn't fit in src_file!\n"); | |
970 | #endif | |
971 | ret = -EINVAL; | |
972 | goto err; | |
973 | } | |
974 | ||
975 | if (operation == PMEM_MAP) { | |
976 | region_node = kmalloc(sizeof(struct pmem_region_node), | |
977 | GFP_KERNEL); | |
978 | if (!region_node) { | |
979 | ret = -ENOMEM; | |
980 | #if PMEM_DEBUG | |
981 | printk(KERN_INFO "No space to allocate metadata!"); | |
982 | #endif | |
983 | goto err; | |
984 | } | |
985 | region_node->region = *region; | |
986 | list_add(®ion_node->list, &data->region_list); | |
987 | } else if (operation == PMEM_UNMAP) { | |
988 | int found = 0; | |
989 | list_for_each_safe(elt, elt2, &data->region_list) { | |
990 | region_node = list_entry(elt, struct pmem_region_node, | |
991 | list); | |
992 | if (region->len == 0 || | |
993 | (region_node->region.offset == region->offset && | |
994 | region_node->region.len == region->len)) { | |
995 | list_del(elt); | |
996 | kfree(region_node); | |
997 | found = 1; | |
998 | } | |
999 | } | |
1000 | if (!found) { | |
1001 | #if PMEM_DEBUG | |
1002 | printk("pmem: Unmap region does not map any mapped " | |
1003 | "region!"); | |
1004 | #endif | |
1005 | ret = -EINVAL; | |
1006 | goto err; | |
1007 | } | |
1008 | } | |
1009 | ||
1010 | if (data->vma && PMEM_IS_SUBMAP(data)) { | |
1011 | if (operation == PMEM_MAP) | |
1012 | ret = pmem_remap_pfn_range(id, data->vma, data, | |
1013 | region->offset, region->len); | |
1014 | else if (operation == PMEM_UNMAP) | |
1015 | ret = pmem_unmap_pfn_range(id, data->vma, data, | |
1016 | region->offset, region->len); | |
1017 | } | |
1018 | ||
1019 | err: | |
1020 | pmem_unlock_data_and_mm(data, mm); | |
1021 | return ret; | |
1022 | } | |
1023 | ||
1024 | static void pmem_revoke(struct file *file, struct pmem_data *data) | |
1025 | { | |
1026 | struct pmem_region_node *region_node; | |
1027 | struct list_head *elt, *elt2; | |
1028 | struct mm_struct *mm = NULL; | |
1029 | int id = get_id(file); | |
1030 | int ret = 0; | |
1031 | ||
1032 | data->master_file = NULL; | |
1033 | ret = pmem_lock_data_and_mm(file, data, &mm); | |
1034 | /* if lock_data_and_mm fails either the task that mapped the fd, or | |
1035 | * the vma that mapped it have already gone away, nothing more | |
1036 | * needs to be done */ | |
1037 | if (ret) | |
1038 | return; | |
1039 | /* unmap everything */ | |
1040 | /* delete the regions and region list nothing is mapped any more */ | |
1041 | if (data->vma) | |
1042 | list_for_each_safe(elt, elt2, &data->region_list) { | |
1043 | region_node = list_entry(elt, struct pmem_region_node, | |
1044 | list); | |
1045 | pmem_unmap_pfn_range(id, data->vma, data, | |
1046 | region_node->region.offset, | |
1047 | region_node->region.len); | |
1048 | list_del(elt); | |
1049 | kfree(region_node); | |
1050 | } | |
1051 | /* delete the master file */ | |
1052 | pmem_unlock_data_and_mm(data, mm); | |
1053 | } | |
1054 | ||
1055 | static void pmem_get_size(struct pmem_region *region, struct file *file) | |
1056 | { | |
37157cc4 | 1057 | struct pmem_data *data = file->private_data; |
9b843757 PM |
1058 | int id = get_id(file); |
1059 | ||
1060 | if (!has_allocation(file)) { | |
1061 | region->offset = 0; | |
1062 | region->len = 0; | |
1063 | return; | |
1064 | } else { | |
1065 | region->offset = pmem_start_addr(id, data); | |
1066 | region->len = pmem_len(id, data); | |
1067 | } | |
1068 | DLOG("offset %lx len %lx\n", region->offset, region->len); | |
1069 | } | |
1070 | ||
1071 | ||
1072 | static long pmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |
1073 | { | |
1074 | struct pmem_data *data; | |
1075 | int id = get_id(file); | |
1076 | ||
1077 | switch (cmd) { | |
1078 | case PMEM_GET_PHYS: | |
1079 | { | |
1080 | struct pmem_region region; | |
1081 | DLOG("get_phys\n"); | |
1082 | if (!has_allocation(file)) { | |
1083 | region.offset = 0; | |
1084 | region.len = 0; | |
1085 | } else { | |
37157cc4 | 1086 | data = file->private_data; |
9b843757 PM |
1087 | region.offset = pmem_start_addr(id, data); |
1088 | region.len = pmem_len(id, data); | |
1089 | } | |
eb450e89 CC |
1090 | printk(KERN_INFO "pmem: request for physical address " |
1091 | "of pmem region from process %d.\n", current->pid); | |
9b843757 PM |
1092 | if (copy_to_user((void __user *)arg, ®ion, |
1093 | sizeof(struct pmem_region))) | |
1094 | return -EFAULT; | |
1095 | break; | |
1096 | } | |
1097 | case PMEM_MAP: | |
1098 | { | |
1099 | struct pmem_region region; | |
1100 | if (copy_from_user(®ion, (void __user *)arg, | |
1101 | sizeof(struct pmem_region))) | |
1102 | return -EFAULT; | |
37157cc4 | 1103 | data = file->private_data; |
9b843757 PM |
1104 | return pmem_remap(®ion, file, PMEM_MAP); |
1105 | } | |
1106 | break; | |
1107 | case PMEM_UNMAP: | |
1108 | { | |
1109 | struct pmem_region region; | |
1110 | if (copy_from_user(®ion, (void __user *)arg, | |
1111 | sizeof(struct pmem_region))) | |
1112 | return -EFAULT; | |
37157cc4 | 1113 | data = file->private_data; |
9b843757 PM |
1114 | return pmem_remap(®ion, file, PMEM_UNMAP); |
1115 | break; | |
1116 | } | |
1117 | case PMEM_GET_SIZE: | |
1118 | { | |
1119 | struct pmem_region region; | |
1120 | DLOG("get_size\n"); | |
1121 | pmem_get_size(®ion, file); | |
1122 | if (copy_to_user((void __user *)arg, ®ion, | |
1123 | sizeof(struct pmem_region))) | |
1124 | return -EFAULT; | |
1125 | break; | |
1126 | } | |
1127 | case PMEM_GET_TOTAL_SIZE: | |
1128 | { | |
1129 | struct pmem_region region; | |
1130 | DLOG("get total size\n"); | |
1131 | region.offset = 0; | |
1132 | get_id(file); | |
1133 | region.len = pmem[id].size; | |
1134 | if (copy_to_user((void __user *)arg, ®ion, | |
1135 | sizeof(struct pmem_region))) | |
1136 | return -EFAULT; | |
1137 | break; | |
1138 | } | |
1139 | case PMEM_ALLOCATE: | |
1140 | { | |
1141 | if (has_allocation(file)) | |
1142 | return -EINVAL; | |
37157cc4 | 1143 | data = file->private_data; |
9b843757 PM |
1144 | data->index = pmem_allocate(id, arg); |
1145 | break; | |
1146 | } | |
1147 | case PMEM_CONNECT: | |
1148 | DLOG("connect\n"); | |
1149 | return pmem_connect(arg, file); | |
1150 | break; | |
1151 | default: | |
1152 | if (pmem[id].ioctl) | |
1153 | return pmem[id].ioctl(file, cmd, arg); | |
1154 | return -EINVAL; | |
1155 | } | |
1156 | return 0; | |
1157 | } | |
1158 | ||
1159 | #if PMEM_DEBUG | |
1160 | static ssize_t debug_open(struct inode *inode, struct file *file) | |
1161 | { | |
1162 | file->private_data = inode->i_private; | |
1163 | return 0; | |
1164 | } | |
1165 | ||
1166 | static ssize_t debug_read(struct file *file, char __user *buf, size_t count, | |
1167 | loff_t *ppos) | |
1168 | { | |
1169 | struct list_head *elt, *elt2; | |
1170 | struct pmem_data *data; | |
1171 | struct pmem_region_node *region_node; | |
1172 | int id = (int)file->private_data; | |
1173 | const int debug_bufmax = 4096; | |
1174 | static char buffer[4096]; | |
1175 | int n = 0; | |
1176 | ||
1177 | DLOG("debug open\n"); | |
1178 | n = scnprintf(buffer, debug_bufmax, | |
1179 | "pid #: mapped regions (offset, len) (offset,len)...\n"); | |
1180 | ||
1181 | down(&pmem[id].data_list_sem); | |
1182 | list_for_each(elt, &pmem[id].data_list) { | |
1183 | data = list_entry(elt, struct pmem_data, list); | |
1184 | down_read(&data->sem); | |
1185 | n += scnprintf(buffer + n, debug_bufmax - n, "pid %u:", | |
1186 | data->pid); | |
1187 | list_for_each(elt2, &data->region_list) { | |
1188 | region_node = list_entry(elt2, struct pmem_region_node, | |
1189 | list); | |
1190 | n += scnprintf(buffer + n, debug_bufmax - n, | |
1191 | "(%lx,%lx) ", | |
1192 | region_node->region.offset, | |
1193 | region_node->region.len); | |
1194 | } | |
1195 | n += scnprintf(buffer + n, debug_bufmax - n, "\n"); | |
1196 | up_read(&data->sem); | |
1197 | } | |
1198 | up(&pmem[id].data_list_sem); | |
1199 | ||
1200 | n++; | |
1201 | buffer[n] = 0; | |
1202 | return simple_read_from_buffer(buf, count, ppos, buffer, n); | |
1203 | } | |
1204 | ||
1205 | static struct file_operations debug_fops = { | |
1206 | .read = debug_read, | |
1207 | .open = debug_open, | |
6038f373 | 1208 | .llseek = default_llseek, |
9b843757 PM |
1209 | }; |
1210 | #endif | |
1211 | ||
1212 | #if 0 | |
1213 | static struct miscdevice pmem_dev = { | |
1214 | .name = "pmem", | |
1215 | .fops = &pmem_fops, | |
1216 | }; | |
1217 | #endif | |
1218 | ||
1219 | int pmem_setup(struct android_pmem_platform_data *pdata, | |
1220 | long (*ioctl)(struct file *, unsigned int, unsigned long), | |
1221 | int (*release)(struct inode *, struct file *)) | |
1222 | { | |
1223 | int err = 0; | |
1224 | int i, index = 0; | |
1225 | int id = id_count; | |
1226 | id_count++; | |
1227 | ||
1228 | pmem[id].no_allocator = pdata->no_allocator; | |
1229 | pmem[id].cached = pdata->cached; | |
1230 | pmem[id].buffered = pdata->buffered; | |
1231 | pmem[id].base = pdata->start; | |
1232 | pmem[id].size = pdata->size; | |
1233 | pmem[id].ioctl = ioctl; | |
1234 | pmem[id].release = release; | |
1235 | init_rwsem(&pmem[id].bitmap_sem); | |
1236 | init_MUTEX(&pmem[id].data_list_sem); | |
1237 | INIT_LIST_HEAD(&pmem[id].data_list); | |
1238 | pmem[id].dev.name = pdata->name; | |
1239 | pmem[id].dev.minor = id; | |
1240 | pmem[id].dev.fops = &pmem_fops; | |
1241 | printk(KERN_INFO "%s: %d init\n", pdata->name, pdata->cached); | |
1242 | ||
1243 | err = misc_register(&pmem[id].dev); | |
1244 | if (err) { | |
1245 | printk(KERN_ALERT "Unable to register pmem driver!\n"); | |
1246 | goto err_cant_register_device; | |
1247 | } | |
1248 | pmem[id].num_entries = pmem[id].size / PMEM_MIN_ALLOC; | |
1249 | ||
7a6cb0d5 | 1250 | pmem[id].bitmap = kcalloc(pmem[id].num_entries, |
9b843757 PM |
1251 | sizeof(struct pmem_bits), GFP_KERNEL); |
1252 | if (!pmem[id].bitmap) | |
1253 | goto err_no_mem_for_metadata; | |
1254 | ||
9b843757 PM |
1255 | for (i = sizeof(pmem[id].num_entries) * 8 - 1; i >= 0; i--) { |
1256 | if ((pmem[id].num_entries) & 1<<i) { | |
1257 | PMEM_ORDER(id, index) = i; | |
1258 | index = PMEM_NEXT_INDEX(id, index); | |
1259 | } | |
1260 | } | |
1261 | ||
1262 | if (pmem[id].cached) | |
1263 | pmem[id].vbase = ioremap_cached(pmem[id].base, | |
1264 | pmem[id].size); | |
1265 | #ifdef ioremap_ext_buffered | |
1266 | else if (pmem[id].buffered) | |
1267 | pmem[id].vbase = ioremap_ext_buffered(pmem[id].base, | |
1268 | pmem[id].size); | |
1269 | #endif | |
1270 | else | |
1271 | pmem[id].vbase = ioremap(pmem[id].base, pmem[id].size); | |
1272 | ||
1273 | if (pmem[id].vbase == 0) | |
1274 | goto error_cant_remap; | |
1275 | ||
1276 | pmem[id].garbage_pfn = page_to_pfn(alloc_page(GFP_KERNEL)); | |
1277 | if (pmem[id].no_allocator) | |
1278 | pmem[id].allocated = 0; | |
1279 | ||
1280 | #if PMEM_DEBUG | |
1281 | debugfs_create_file(pdata->name, S_IFREG | S_IRUGO, NULL, (void *)id, | |
1282 | &debug_fops); | |
1283 | #endif | |
1284 | return 0; | |
1285 | error_cant_remap: | |
1286 | kfree(pmem[id].bitmap); | |
1287 | err_no_mem_for_metadata: | |
1288 | misc_deregister(&pmem[id].dev); | |
1289 | err_cant_register_device: | |
1290 | return -1; | |
1291 | } | |
1292 | ||
1293 | static int pmem_probe(struct platform_device *pdev) | |
1294 | { | |
1295 | struct android_pmem_platform_data *pdata; | |
1296 | ||
1297 | if (!pdev || !pdev->dev.platform_data) { | |
1298 | printk(KERN_ALERT "Unable to probe pmem!\n"); | |
1299 | return -1; | |
1300 | } | |
1301 | pdata = pdev->dev.platform_data; | |
1302 | return pmem_setup(pdata, NULL, NULL); | |
1303 | } | |
1304 | ||
1305 | ||
1306 | static int pmem_remove(struct platform_device *pdev) | |
1307 | { | |
1308 | int id = pdev->id; | |
1309 | __free_page(pfn_to_page(pmem[id].garbage_pfn)); | |
1310 | misc_deregister(&pmem[id].dev); | |
1311 | return 0; | |
1312 | } | |
1313 | ||
1314 | static struct platform_driver pmem_driver = { | |
1315 | .probe = pmem_probe, | |
1316 | .remove = pmem_remove, | |
1317 | .driver = { .name = "android_pmem" } | |
1318 | }; | |
1319 | ||
1320 | ||
1321 | static int __init pmem_init(void) | |
1322 | { | |
1323 | return platform_driver_register(&pmem_driver); | |
1324 | } | |
1325 | ||
1326 | static void __exit pmem_exit(void) | |
1327 | { | |
1328 | platform_driver_unregister(&pmem_driver); | |
1329 | } | |
1330 | ||
1331 | module_init(pmem_init); | |
1332 | module_exit(pmem_exit); | |
1333 |