]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - drivers/staging/android/ashmem.c
mm: kill vma flag VM_CAN_NONLINEAR
[mirror_ubuntu-eoan-kernel.git] / drivers / staging / android / ashmem.c
1 /* mm/ashmem.c
2 *
3 * Anonymous Shared Memory Subsystem, ashmem
4 *
5 * Copyright (C) 2008 Google, Inc.
6 *
7 * Robert Love <rlove@google.com>
8 *
9 * This software is licensed under the terms of the GNU General Public
10 * License version 2, as published by the Free Software Foundation, and
11 * may be copied, distributed, and modified under those terms.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
18
19 #define pr_fmt(fmt) "ashmem: " fmt
20
21 #include <linux/module.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/falloc.h>
25 #include <linux/miscdevice.h>
26 #include <linux/security.h>
27 #include <linux/mm.h>
28 #include <linux/mman.h>
29 #include <linux/uaccess.h>
30 #include <linux/personality.h>
31 #include <linux/bitops.h>
32 #include <linux/mutex.h>
33 #include <linux/shmem_fs.h>
34 #include "ashmem.h"
35
36 #define ASHMEM_NAME_PREFIX "dev/ashmem/"
37 #define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
38 #define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
39
40 /*
41 * ashmem_area - anonymous shared memory area
42 * Lifecycle: From our parent file's open() until its release()
43 * Locking: Protected by `ashmem_mutex'
44 * Big Note: Mappings do NOT pin this structure; it dies on close()
45 */
46 struct ashmem_area {
47 char name[ASHMEM_FULL_NAME_LEN]; /* optional name in /proc/pid/maps */
48 struct list_head unpinned_list; /* list of all ashmem areas */
49 struct file *file; /* the shmem-based backing file */
50 size_t size; /* size of the mapping, in bytes */
51 unsigned long prot_mask; /* allowed prot bits, as vm_flags */
52 };
53
54 /*
55 * ashmem_range - represents an interval of unpinned (evictable) pages
56 * Lifecycle: From unpin to pin
57 * Locking: Protected by `ashmem_mutex'
58 */
59 struct ashmem_range {
60 struct list_head lru; /* entry in LRU list */
61 struct list_head unpinned; /* entry in its area's unpinned list */
62 struct ashmem_area *asma; /* associated area */
63 size_t pgstart; /* starting page, inclusive */
64 size_t pgend; /* ending page, inclusive */
65 unsigned int purged; /* ASHMEM_NOT or ASHMEM_WAS_PURGED */
66 };
67
68 /* LRU list of unpinned pages, protected by ashmem_mutex */
69 static LIST_HEAD(ashmem_lru_list);
70
71 /* Count of pages on our LRU list, protected by ashmem_mutex */
72 static unsigned long lru_count;
73
74 /*
75 * ashmem_mutex - protects the list of and each individual ashmem_area
76 *
77 * Lock Ordering: ashmex_mutex -> i_mutex -> i_alloc_sem
78 */
79 static DEFINE_MUTEX(ashmem_mutex);
80
81 static struct kmem_cache *ashmem_area_cachep __read_mostly;
82 static struct kmem_cache *ashmem_range_cachep __read_mostly;
83
84 #define range_size(range) \
85 ((range)->pgend - (range)->pgstart + 1)
86
87 #define range_on_lru(range) \
88 ((range)->purged == ASHMEM_NOT_PURGED)
89
90 #define page_range_subsumes_range(range, start, end) \
91 (((range)->pgstart >= (start)) && ((range)->pgend <= (end)))
92
93 #define page_range_subsumed_by_range(range, start, end) \
94 (((range)->pgstart <= (start)) && ((range)->pgend >= (end)))
95
96 #define page_in_range(range, page) \
97 (((range)->pgstart <= (page)) && ((range)->pgend >= (page)))
98
99 #define page_range_in_range(range, start, end) \
100 (page_in_range(range, start) || page_in_range(range, end) || \
101 page_range_subsumes_range(range, start, end))
102
103 #define range_before_page(range, page) \
104 ((range)->pgend < (page))
105
106 #define PROT_MASK (PROT_EXEC | PROT_READ | PROT_WRITE)
107
108 static inline void lru_add(struct ashmem_range *range)
109 {
110 list_add_tail(&range->lru, &ashmem_lru_list);
111 lru_count += range_size(range);
112 }
113
114 static inline void lru_del(struct ashmem_range *range)
115 {
116 list_del(&range->lru);
117 lru_count -= range_size(range);
118 }
119
120 /*
121 * range_alloc - allocate and initialize a new ashmem_range structure
122 *
123 * 'asma' - associated ashmem_area
124 * 'prev_range' - the previous ashmem_range in the sorted asma->unpinned list
125 * 'purged' - initial purge value (ASMEM_NOT_PURGED or ASHMEM_WAS_PURGED)
126 * 'start' - starting page, inclusive
127 * 'end' - ending page, inclusive
128 *
129 * Caller must hold ashmem_mutex.
130 */
131 static int range_alloc(struct ashmem_area *asma,
132 struct ashmem_range *prev_range, unsigned int purged,
133 size_t start, size_t end)
134 {
135 struct ashmem_range *range;
136
137 range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
138 if (unlikely(!range))
139 return -ENOMEM;
140
141 range->asma = asma;
142 range->pgstart = start;
143 range->pgend = end;
144 range->purged = purged;
145
146 list_add_tail(&range->unpinned, &prev_range->unpinned);
147
148 if (range_on_lru(range))
149 lru_add(range);
150
151 return 0;
152 }
153
154 static void range_del(struct ashmem_range *range)
155 {
156 list_del(&range->unpinned);
157 if (range_on_lru(range))
158 lru_del(range);
159 kmem_cache_free(ashmem_range_cachep, range);
160 }
161
162 /*
163 * range_shrink - shrinks a range
164 *
165 * Caller must hold ashmem_mutex.
166 */
167 static inline void range_shrink(struct ashmem_range *range,
168 size_t start, size_t end)
169 {
170 size_t pre = range_size(range);
171
172 range->pgstart = start;
173 range->pgend = end;
174
175 if (range_on_lru(range))
176 lru_count -= pre - range_size(range);
177 }
178
179 static int ashmem_open(struct inode *inode, struct file *file)
180 {
181 struct ashmem_area *asma;
182 int ret;
183
184 ret = generic_file_open(inode, file);
185 if (unlikely(ret))
186 return ret;
187
188 asma = kmem_cache_zalloc(ashmem_area_cachep, GFP_KERNEL);
189 if (unlikely(!asma))
190 return -ENOMEM;
191
192 INIT_LIST_HEAD(&asma->unpinned_list);
193 memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
194 asma->prot_mask = PROT_MASK;
195 file->private_data = asma;
196
197 return 0;
198 }
199
200 static int ashmem_release(struct inode *ignored, struct file *file)
201 {
202 struct ashmem_area *asma = file->private_data;
203 struct ashmem_range *range, *next;
204
205 mutex_lock(&ashmem_mutex);
206 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned)
207 range_del(range);
208 mutex_unlock(&ashmem_mutex);
209
210 if (asma->file)
211 fput(asma->file);
212 kmem_cache_free(ashmem_area_cachep, asma);
213
214 return 0;
215 }
216
217 static ssize_t ashmem_read(struct file *file, char __user *buf,
218 size_t len, loff_t *pos)
219 {
220 struct ashmem_area *asma = file->private_data;
221 int ret = 0;
222
223 mutex_lock(&ashmem_mutex);
224
225 /* If size is not set, or set to 0, always return EOF. */
226 if (asma->size == 0)
227 goto out;
228
229 if (!asma->file) {
230 ret = -EBADF;
231 goto out;
232 }
233
234 ret = asma->file->f_op->read(asma->file, buf, len, pos);
235 if (ret < 0)
236 goto out;
237
238 /** Update backing file pos, since f_ops->read() doesn't */
239 asma->file->f_pos = *pos;
240
241 out:
242 mutex_unlock(&ashmem_mutex);
243 return ret;
244 }
245
246 static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
247 {
248 struct ashmem_area *asma = file->private_data;
249 int ret;
250
251 mutex_lock(&ashmem_mutex);
252
253 if (asma->size == 0) {
254 ret = -EINVAL;
255 goto out;
256 }
257
258 if (!asma->file) {
259 ret = -EBADF;
260 goto out;
261 }
262
263 ret = asma->file->f_op->llseek(asma->file, offset, origin);
264 if (ret < 0)
265 goto out;
266
267 /** Copy f_pos from backing file, since f_ops->llseek() sets it */
268 file->f_pos = asma->file->f_pos;
269
270 out:
271 mutex_unlock(&ashmem_mutex);
272 return ret;
273 }
274
275 static inline vm_flags_t calc_vm_may_flags(unsigned long prot)
276 {
277 return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD) |
278 _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) |
279 _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC);
280 }
281
282 static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
283 {
284 struct ashmem_area *asma = file->private_data;
285 int ret = 0;
286
287 mutex_lock(&ashmem_mutex);
288
289 /* user needs to SET_SIZE before mapping */
290 if (unlikely(!asma->size)) {
291 ret = -EINVAL;
292 goto out;
293 }
294
295 /* requested protection bits must match our allowed protection mask */
296 if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) &
297 calc_vm_prot_bits(PROT_MASK))) {
298 ret = -EPERM;
299 goto out;
300 }
301 vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask);
302
303 if (!asma->file) {
304 char *name = ASHMEM_NAME_DEF;
305 struct file *vmfile;
306
307 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0')
308 name = asma->name;
309
310 /* ... and allocate the backing shmem file */
311 vmfile = shmem_file_setup(name, asma->size, vma->vm_flags);
312 if (unlikely(IS_ERR(vmfile))) {
313 ret = PTR_ERR(vmfile);
314 goto out;
315 }
316 asma->file = vmfile;
317 }
318 get_file(asma->file);
319
320 /*
321 * XXX - Reworked to use shmem_zero_setup() instead of
322 * shmem_set_file while we're in staging. -jstultz
323 */
324 if (vma->vm_flags & VM_SHARED) {
325 ret = shmem_zero_setup(vma);
326 if (ret) {
327 fput(asma->file);
328 goto out;
329 }
330 }
331
332 if (vma->vm_file)
333 fput(vma->vm_file);
334 vma->vm_file = asma->file;
335
336 out:
337 mutex_unlock(&ashmem_mutex);
338 return ret;
339 }
340
341 /*
342 * ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab
343 *
344 * 'nr_to_scan' is the number of objects (pages) to prune, or 0 to query how
345 * many objects (pages) we have in total.
346 *
347 * 'gfp_mask' is the mask of the allocation that got us into this mess.
348 *
349 * Return value is the number of objects (pages) remaining, or -1 if we cannot
350 * proceed without risk of deadlock (due to gfp_mask).
351 *
352 * We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
353 * chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
354 * pages freed.
355 */
356 static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc)
357 {
358 struct ashmem_range *range, *next;
359
360 /* We might recurse into filesystem code, so bail out if necessary */
361 if (sc->nr_to_scan && !(sc->gfp_mask & __GFP_FS))
362 return -1;
363 if (!sc->nr_to_scan)
364 return lru_count;
365
366 mutex_lock(&ashmem_mutex);
367 list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
368 loff_t start = range->pgstart * PAGE_SIZE;
369 loff_t end = (range->pgend + 1) * PAGE_SIZE;
370
371 do_fallocate(range->asma->file,
372 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
373 start, end - start);
374 range->purged = ASHMEM_WAS_PURGED;
375 lru_del(range);
376
377 sc->nr_to_scan -= range_size(range);
378 if (sc->nr_to_scan <= 0)
379 break;
380 }
381 mutex_unlock(&ashmem_mutex);
382
383 return lru_count;
384 }
385
386 static struct shrinker ashmem_shrinker = {
387 .shrink = ashmem_shrink,
388 .seeks = DEFAULT_SEEKS * 4,
389 };
390
391 static int set_prot_mask(struct ashmem_area *asma, unsigned long prot)
392 {
393 int ret = 0;
394
395 mutex_lock(&ashmem_mutex);
396
397 /* the user can only remove, not add, protection bits */
398 if (unlikely((asma->prot_mask & prot) != prot)) {
399 ret = -EINVAL;
400 goto out;
401 }
402
403 /* does the application expect PROT_READ to imply PROT_EXEC? */
404 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
405 prot |= PROT_EXEC;
406
407 asma->prot_mask = prot;
408
409 out:
410 mutex_unlock(&ashmem_mutex);
411 return ret;
412 }
413
414 static int set_name(struct ashmem_area *asma, void __user *name)
415 {
416 int ret = 0;
417
418 mutex_lock(&ashmem_mutex);
419
420 /* cannot change an existing mapping's name */
421 if (unlikely(asma->file)) {
422 ret = -EINVAL;
423 goto out;
424 }
425
426 if (unlikely(copy_from_user(asma->name + ASHMEM_NAME_PREFIX_LEN,
427 name, ASHMEM_NAME_LEN)))
428 ret = -EFAULT;
429 asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0';
430
431 out:
432 mutex_unlock(&ashmem_mutex);
433
434 return ret;
435 }
436
437 static int get_name(struct ashmem_area *asma, void __user *name)
438 {
439 int ret = 0;
440
441 mutex_lock(&ashmem_mutex);
442 if (asma->name[ASHMEM_NAME_PREFIX_LEN] != '\0') {
443 size_t len;
444
445 /*
446 * Copying only `len', instead of ASHMEM_NAME_LEN, bytes
447 * prevents us from revealing one user's stack to another.
448 */
449 len = strlen(asma->name + ASHMEM_NAME_PREFIX_LEN) + 1;
450 if (unlikely(copy_to_user(name,
451 asma->name + ASHMEM_NAME_PREFIX_LEN, len)))
452 ret = -EFAULT;
453 } else {
454 if (unlikely(copy_to_user(name, ASHMEM_NAME_DEF,
455 sizeof(ASHMEM_NAME_DEF))))
456 ret = -EFAULT;
457 }
458 mutex_unlock(&ashmem_mutex);
459
460 return ret;
461 }
462
463 /*
464 * ashmem_pin - pin the given ashmem region, returning whether it was
465 * previously purged (ASHMEM_WAS_PURGED) or not (ASHMEM_NOT_PURGED).
466 *
467 * Caller must hold ashmem_mutex.
468 */
469 static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
470 {
471 struct ashmem_range *range, *next;
472 int ret = ASHMEM_NOT_PURGED;
473
474 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
475 /* moved past last applicable page; we can short circuit */
476 if (range_before_page(range, pgstart))
477 break;
478
479 /*
480 * The user can ask us to pin pages that span multiple ranges,
481 * or to pin pages that aren't even unpinned, so this is messy.
482 *
483 * Four cases:
484 * 1. The requested range subsumes an existing range, so we
485 * just remove the entire matching range.
486 * 2. The requested range overlaps the start of an existing
487 * range, so we just update that range.
488 * 3. The requested range overlaps the end of an existing
489 * range, so we just update that range.
490 * 4. The requested range punches a hole in an existing range,
491 * so we have to update one side of the range and then
492 * create a new range for the other side.
493 */
494 if (page_range_in_range(range, pgstart, pgend)) {
495 ret |= range->purged;
496
497 /* Case #1: Easy. Just nuke the whole thing. */
498 if (page_range_subsumes_range(range, pgstart, pgend)) {
499 range_del(range);
500 continue;
501 }
502
503 /* Case #2: We overlap from the start, so adjust it */
504 if (range->pgstart >= pgstart) {
505 range_shrink(range, pgend + 1, range->pgend);
506 continue;
507 }
508
509 /* Case #3: We overlap from the rear, so adjust it */
510 if (range->pgend <= pgend) {
511 range_shrink(range, range->pgstart, pgstart-1);
512 continue;
513 }
514
515 /*
516 * Case #4: We eat a chunk out of the middle. A bit
517 * more complicated, we allocate a new range for the
518 * second half and adjust the first chunk's endpoint.
519 */
520 range_alloc(asma, range, range->purged,
521 pgend + 1, range->pgend);
522 range_shrink(range, range->pgstart, pgstart - 1);
523 break;
524 }
525 }
526
527 return ret;
528 }
529
530 /*
531 * ashmem_unpin - unpin the given range of pages. Returns zero on success.
532 *
533 * Caller must hold ashmem_mutex.
534 */
535 static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
536 {
537 struct ashmem_range *range, *next;
538 unsigned int purged = ASHMEM_NOT_PURGED;
539
540 restart:
541 list_for_each_entry_safe(range, next, &asma->unpinned_list, unpinned) {
542 /* short circuit: this is our insertion point */
543 if (range_before_page(range, pgstart))
544 break;
545
546 /*
547 * The user can ask us to unpin pages that are already entirely
548 * or partially pinned. We handle those two cases here.
549 */
550 if (page_range_subsumed_by_range(range, pgstart, pgend))
551 return 0;
552 if (page_range_in_range(range, pgstart, pgend)) {
553 pgstart = min_t(size_t, range->pgstart, pgstart),
554 pgend = max_t(size_t, range->pgend, pgend);
555 purged |= range->purged;
556 range_del(range);
557 goto restart;
558 }
559 }
560
561 return range_alloc(asma, range, purged, pgstart, pgend);
562 }
563
564 /*
565 * ashmem_get_pin_status - Returns ASHMEM_IS_UNPINNED if _any_ pages in the
566 * given interval are unpinned and ASHMEM_IS_PINNED otherwise.
567 *
568 * Caller must hold ashmem_mutex.
569 */
570 static int ashmem_get_pin_status(struct ashmem_area *asma, size_t pgstart,
571 size_t pgend)
572 {
573 struct ashmem_range *range;
574 int ret = ASHMEM_IS_PINNED;
575
576 list_for_each_entry(range, &asma->unpinned_list, unpinned) {
577 if (range_before_page(range, pgstart))
578 break;
579 if (page_range_in_range(range, pgstart, pgend)) {
580 ret = ASHMEM_IS_UNPINNED;
581 break;
582 }
583 }
584
585 return ret;
586 }
587
588 static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
589 void __user *p)
590 {
591 struct ashmem_pin pin;
592 size_t pgstart, pgend;
593 int ret = -EINVAL;
594
595 if (unlikely(!asma->file))
596 return -EINVAL;
597
598 if (unlikely(copy_from_user(&pin, p, sizeof(pin))))
599 return -EFAULT;
600
601 /* per custom, you can pass zero for len to mean "everything onward" */
602 if (!pin.len)
603 pin.len = PAGE_ALIGN(asma->size) - pin.offset;
604
605 if (unlikely((pin.offset | pin.len) & ~PAGE_MASK))
606 return -EINVAL;
607
608 if (unlikely(((__u32) -1) - pin.offset < pin.len))
609 return -EINVAL;
610
611 if (unlikely(PAGE_ALIGN(asma->size) < pin.offset + pin.len))
612 return -EINVAL;
613
614 pgstart = pin.offset / PAGE_SIZE;
615 pgend = pgstart + (pin.len / PAGE_SIZE) - 1;
616
617 mutex_lock(&ashmem_mutex);
618
619 switch (cmd) {
620 case ASHMEM_PIN:
621 ret = ashmem_pin(asma, pgstart, pgend);
622 break;
623 case ASHMEM_UNPIN:
624 ret = ashmem_unpin(asma, pgstart, pgend);
625 break;
626 case ASHMEM_GET_PIN_STATUS:
627 ret = ashmem_get_pin_status(asma, pgstart, pgend);
628 break;
629 }
630
631 mutex_unlock(&ashmem_mutex);
632
633 return ret;
634 }
635
636 static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
637 {
638 struct ashmem_area *asma = file->private_data;
639 long ret = -ENOTTY;
640
641 switch (cmd) {
642 case ASHMEM_SET_NAME:
643 ret = set_name(asma, (void __user *) arg);
644 break;
645 case ASHMEM_GET_NAME:
646 ret = get_name(asma, (void __user *) arg);
647 break;
648 case ASHMEM_SET_SIZE:
649 ret = -EINVAL;
650 if (!asma->file) {
651 ret = 0;
652 asma->size = (size_t) arg;
653 }
654 break;
655 case ASHMEM_GET_SIZE:
656 ret = asma->size;
657 break;
658 case ASHMEM_SET_PROT_MASK:
659 ret = set_prot_mask(asma, arg);
660 break;
661 case ASHMEM_GET_PROT_MASK:
662 ret = asma->prot_mask;
663 break;
664 case ASHMEM_PIN:
665 case ASHMEM_UNPIN:
666 case ASHMEM_GET_PIN_STATUS:
667 ret = ashmem_pin_unpin(asma, cmd, (void __user *) arg);
668 break;
669 case ASHMEM_PURGE_ALL_CACHES:
670 ret = -EPERM;
671 if (capable(CAP_SYS_ADMIN)) {
672 struct shrink_control sc = {
673 .gfp_mask = GFP_KERNEL,
674 .nr_to_scan = 0,
675 };
676 ret = ashmem_shrink(&ashmem_shrinker, &sc);
677 sc.nr_to_scan = ret;
678 ashmem_shrink(&ashmem_shrinker, &sc);
679 }
680 break;
681 }
682
683 return ret;
684 }
685
686 static const struct file_operations ashmem_fops = {
687 .owner = THIS_MODULE,
688 .open = ashmem_open,
689 .release = ashmem_release,
690 .read = ashmem_read,
691 .llseek = ashmem_llseek,
692 .mmap = ashmem_mmap,
693 .unlocked_ioctl = ashmem_ioctl,
694 .compat_ioctl = ashmem_ioctl,
695 };
696
697 static struct miscdevice ashmem_misc = {
698 .minor = MISC_DYNAMIC_MINOR,
699 .name = "ashmem",
700 .fops = &ashmem_fops,
701 };
702
703 static int __init ashmem_init(void)
704 {
705 int ret;
706
707 ashmem_area_cachep = kmem_cache_create("ashmem_area_cache",
708 sizeof(struct ashmem_area),
709 0, 0, NULL);
710 if (unlikely(!ashmem_area_cachep)) {
711 pr_err("failed to create slab cache\n");
712 return -ENOMEM;
713 }
714
715 ashmem_range_cachep = kmem_cache_create("ashmem_range_cache",
716 sizeof(struct ashmem_range),
717 0, 0, NULL);
718 if (unlikely(!ashmem_range_cachep)) {
719 pr_err("failed to create slab cache\n");
720 return -ENOMEM;
721 }
722
723 ret = misc_register(&ashmem_misc);
724 if (unlikely(ret)) {
725 pr_err("failed to register misc device!\n");
726 return ret;
727 }
728
729 register_shrinker(&ashmem_shrinker);
730
731 pr_info("initialized\n");
732
733 return 0;
734 }
735
736 static void __exit ashmem_exit(void)
737 {
738 int ret;
739
740 unregister_shrinker(&ashmem_shrinker);
741
742 ret = misc_deregister(&ashmem_misc);
743 if (unlikely(ret))
744 pr_err("failed to unregister misc device!\n");
745
746 kmem_cache_destroy(ashmem_range_cachep);
747 kmem_cache_destroy(ashmem_area_cachep);
748
749 pr_info("unloaded\n");
750 }
751
752 module_init(ashmem_init);
753 module_exit(ashmem_exit);
754
755 MODULE_LICENSE("GPL");