]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/char/mem.c
mm: Tighten x86 /dev/mem with zeroing reads
[mirror_ubuntu-zesty-kernel.git] / drivers / char / mem.c
1 /*
2 * linux/drivers/char/mem.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * Added devfs support.
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9 */
10
11 #include <linux/mm.h>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/backing-dev.h>
25 #include <linux/shmem_fs.h>
26 #include <linux/splice.h>
27 #include <linux/pfn.h>
28 #include <linux/export.h>
29 #include <linux/io.h>
30 #include <linux/uio.h>
31 #include <linux/module.h>
32
33 #include <linux/uaccess.h>
34
35 #ifdef CONFIG_IA64
36 # include <linux/efi.h>
37 #endif
38
39 #define DEVPORT_MINOR 4
40
41 static inline unsigned long size_inside_page(unsigned long start,
42 unsigned long size)
43 {
44 unsigned long sz;
45
46 sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
47
48 return min(sz, size);
49 }
50
51 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
52 static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
53 {
54 return addr + count <= __pa(high_memory);
55 }
56
57 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
58 {
59 return 1;
60 }
61 #endif
62
63 #ifdef CONFIG_STRICT_DEVMEM
64 static inline int page_is_allowed(unsigned long pfn)
65 {
66 return devmem_is_allowed(pfn);
67 }
68 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
69 {
70 u64 from = ((u64)pfn) << PAGE_SHIFT;
71 u64 to = from + size;
72 u64 cursor = from;
73
74 while (cursor < to) {
75 if (!devmem_is_allowed(pfn))
76 return 0;
77 cursor += PAGE_SIZE;
78 pfn++;
79 }
80 return 1;
81 }
82 #else
83 static inline int page_is_allowed(unsigned long pfn)
84 {
85 return 1;
86 }
87 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
88 {
89 return 1;
90 }
91 #endif
92
93 #ifndef unxlate_dev_mem_ptr
94 #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
95 void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
96 {
97 }
98 #endif
99
100 /*
101 * This funcion reads the *physical* memory. The f_pos points directly to the
102 * memory location.
103 */
104 static ssize_t read_mem(struct file *file, char __user *buf,
105 size_t count, loff_t *ppos)
106 {
107 phys_addr_t p = *ppos;
108 ssize_t read, sz;
109 void *ptr;
110
111 if (p != *ppos)
112 return 0;
113
114 if (!valid_phys_addr_range(p, count))
115 return -EFAULT;
116 read = 0;
117 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
118 /* we don't have page 0 mapped on sparc and m68k.. */
119 if (p < PAGE_SIZE) {
120 sz = size_inside_page(p, count);
121 if (sz > 0) {
122 if (clear_user(buf, sz))
123 return -EFAULT;
124 buf += sz;
125 p += sz;
126 count -= sz;
127 read += sz;
128 }
129 }
130 #endif
131
132 while (count > 0) {
133 unsigned long remaining;
134 int allowed;
135
136 sz = size_inside_page(p, count);
137
138 allowed = page_is_allowed(p >> PAGE_SHIFT);
139 if (!allowed)
140 return -EPERM;
141 if (allowed == 2) {
142 /* Show zeros for restricted memory. */
143 remaining = clear_user(buf, sz);
144 } else {
145 /*
146 * On ia64 if a page has been mapped somewhere as
147 * uncached, then it must also be accessed uncached
148 * by the kernel or data corruption may occur.
149 */
150 ptr = xlate_dev_mem_ptr(p);
151 if (!ptr)
152 return -EFAULT;
153
154 remaining = copy_to_user(buf, ptr, sz);
155
156 unxlate_dev_mem_ptr(p, ptr);
157 }
158
159 if (remaining)
160 return -EFAULT;
161
162 buf += sz;
163 p += sz;
164 count -= sz;
165 read += sz;
166 }
167
168 *ppos += read;
169 return read;
170 }
171
172 static ssize_t write_mem(struct file *file, const char __user *buf,
173 size_t count, loff_t *ppos)
174 {
175 phys_addr_t p = *ppos;
176 ssize_t written, sz;
177 unsigned long copied;
178 void *ptr;
179
180 if (p != *ppos)
181 return -EFBIG;
182
183 if (secure_modules())
184 return -EPERM;
185
186 if (!valid_phys_addr_range(p, count))
187 return -EFAULT;
188
189 written = 0;
190
191 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
192 /* we don't have page 0 mapped on sparc and m68k.. */
193 if (p < PAGE_SIZE) {
194 sz = size_inside_page(p, count);
195 /* Hmm. Do something? */
196 buf += sz;
197 p += sz;
198 count -= sz;
199 written += sz;
200 }
201 #endif
202
203 while (count > 0) {
204 int allowed;
205
206 sz = size_inside_page(p, count);
207
208 allowed = page_is_allowed(p >> PAGE_SHIFT);
209 if (!allowed)
210 return -EPERM;
211
212 /* Skip actual writing when a page is marked as restricted. */
213 if (allowed == 1) {
214 /*
215 * On ia64 if a page has been mapped somewhere as
216 * uncached, then it must also be accessed uncached
217 * by the kernel or data corruption may occur.
218 */
219 ptr = xlate_dev_mem_ptr(p);
220 if (!ptr) {
221 if (written)
222 break;
223 return -EFAULT;
224 }
225
226 copied = copy_from_user(ptr, buf, sz);
227 unxlate_dev_mem_ptr(p, ptr);
228 if (copied) {
229 written += sz - copied;
230 if (written)
231 break;
232 return -EFAULT;
233 }
234 }
235
236 buf += sz;
237 p += sz;
238 count -= sz;
239 written += sz;
240 }
241
242 *ppos += written;
243 return written;
244 }
245
246 int __weak phys_mem_access_prot_allowed(struct file *file,
247 unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
248 {
249 return 1;
250 }
251
252 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
253
254 /*
255 * Architectures vary in how they handle caching for addresses
256 * outside of main memory.
257 *
258 */
259 #ifdef pgprot_noncached
260 static int uncached_access(struct file *file, phys_addr_t addr)
261 {
262 #if defined(CONFIG_IA64)
263 /*
264 * On ia64, we ignore O_DSYNC because we cannot tolerate memory
265 * attribute aliases.
266 */
267 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
268 #elif defined(CONFIG_MIPS)
269 {
270 extern int __uncached_access(struct file *file,
271 unsigned long addr);
272
273 return __uncached_access(file, addr);
274 }
275 #else
276 /*
277 * Accessing memory above the top the kernel knows about or through a
278 * file pointer
279 * that was marked O_DSYNC will be done non-cached.
280 */
281 if (file->f_flags & O_DSYNC)
282 return 1;
283 return addr >= __pa(high_memory);
284 #endif
285 }
286 #endif
287
288 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
289 unsigned long size, pgprot_t vma_prot)
290 {
291 #ifdef pgprot_noncached
292 phys_addr_t offset = pfn << PAGE_SHIFT;
293
294 if (uncached_access(file, offset))
295 return pgprot_noncached(vma_prot);
296 #endif
297 return vma_prot;
298 }
299 #endif
300
301 #ifndef CONFIG_MMU
302 static unsigned long get_unmapped_area_mem(struct file *file,
303 unsigned long addr,
304 unsigned long len,
305 unsigned long pgoff,
306 unsigned long flags)
307 {
308 if (!valid_mmap_phys_addr_range(pgoff, len))
309 return (unsigned long) -EINVAL;
310 return pgoff << PAGE_SHIFT;
311 }
312
313 /* permit direct mmap, for read, write or exec */
314 static unsigned memory_mmap_capabilities(struct file *file)
315 {
316 return NOMMU_MAP_DIRECT |
317 NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
318 }
319
320 static unsigned zero_mmap_capabilities(struct file *file)
321 {
322 return NOMMU_MAP_COPY;
323 }
324
325 /* can't do an in-place private mapping if there's no MMU */
326 static inline int private_mapping_ok(struct vm_area_struct *vma)
327 {
328 return vma->vm_flags & VM_MAYSHARE;
329 }
330 #else
331
332 static inline int private_mapping_ok(struct vm_area_struct *vma)
333 {
334 return 1;
335 }
336 #endif
337
338 static const struct vm_operations_struct mmap_mem_ops = {
339 #ifdef CONFIG_HAVE_IOREMAP_PROT
340 .access = generic_access_phys
341 #endif
342 };
343
344 static int mmap_mem(struct file *file, struct vm_area_struct *vma)
345 {
346 size_t size = vma->vm_end - vma->vm_start;
347
348 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
349 return -EINVAL;
350
351 if (!private_mapping_ok(vma))
352 return -ENOSYS;
353
354 if (!range_is_allowed(vma->vm_pgoff, size))
355 return -EPERM;
356
357 if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
358 &vma->vm_page_prot))
359 return -EINVAL;
360
361 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
362 size,
363 vma->vm_page_prot);
364
365 vma->vm_ops = &mmap_mem_ops;
366
367 /* Remap-pfn-range will mark the range VM_IO */
368 if (remap_pfn_range(vma,
369 vma->vm_start,
370 vma->vm_pgoff,
371 size,
372 vma->vm_page_prot)) {
373 return -EAGAIN;
374 }
375 return 0;
376 }
377
378 static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
379 {
380 unsigned long pfn;
381
382 /* Turn a kernel-virtual address into a physical page frame */
383 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
384
385 /*
386 * RED-PEN: on some architectures there is more mapped memory than
387 * available in mem_map which pfn_valid checks for. Perhaps should add a
388 * new macro here.
389 *
390 * RED-PEN: vmalloc is not supported right now.
391 */
392 if (!pfn_valid(pfn))
393 return -EIO;
394
395 vma->vm_pgoff = pfn;
396 return mmap_mem(file, vma);
397 }
398
399 /*
400 * This function reads the *virtual* memory as seen by the kernel.
401 */
402 static ssize_t read_kmem(struct file *file, char __user *buf,
403 size_t count, loff_t *ppos)
404 {
405 unsigned long p = *ppos;
406 ssize_t low_count, read, sz;
407 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
408 int err = 0;
409
410 read = 0;
411 if (p < (unsigned long) high_memory) {
412 low_count = count;
413 if (count > (unsigned long)high_memory - p)
414 low_count = (unsigned long)high_memory - p;
415
416 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
417 /* we don't have page 0 mapped on sparc and m68k.. */
418 if (p < PAGE_SIZE && low_count > 0) {
419 sz = size_inside_page(p, low_count);
420 if (clear_user(buf, sz))
421 return -EFAULT;
422 buf += sz;
423 p += sz;
424 read += sz;
425 low_count -= sz;
426 count -= sz;
427 }
428 #endif
429 while (low_count > 0) {
430 sz = size_inside_page(p, low_count);
431
432 /*
433 * On ia64 if a page has been mapped somewhere as
434 * uncached, then it must also be accessed uncached
435 * by the kernel or data corruption may occur
436 */
437 kbuf = xlate_dev_kmem_ptr((void *)p);
438 if (!virt_addr_valid(kbuf))
439 return -ENXIO;
440
441 if (copy_to_user(buf, kbuf, sz))
442 return -EFAULT;
443 buf += sz;
444 p += sz;
445 read += sz;
446 low_count -= sz;
447 count -= sz;
448 }
449 }
450
451 if (count > 0) {
452 kbuf = (char *)__get_free_page(GFP_KERNEL);
453 if (!kbuf)
454 return -ENOMEM;
455 while (count > 0) {
456 sz = size_inside_page(p, count);
457 if (!is_vmalloc_or_module_addr((void *)p)) {
458 err = -ENXIO;
459 break;
460 }
461 sz = vread(kbuf, (char *)p, sz);
462 if (!sz)
463 break;
464 if (copy_to_user(buf, kbuf, sz)) {
465 err = -EFAULT;
466 break;
467 }
468 count -= sz;
469 buf += sz;
470 read += sz;
471 p += sz;
472 }
473 free_page((unsigned long)kbuf);
474 }
475 *ppos = p;
476 return read ? read : err;
477 }
478
479
480 static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
481 size_t count, loff_t *ppos)
482 {
483 ssize_t written, sz;
484 unsigned long copied;
485
486 written = 0;
487 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
488 /* we don't have page 0 mapped on sparc and m68k.. */
489 if (p < PAGE_SIZE) {
490 sz = size_inside_page(p, count);
491 /* Hmm. Do something? */
492 buf += sz;
493 p += sz;
494 count -= sz;
495 written += sz;
496 }
497 #endif
498
499 while (count > 0) {
500 void *ptr;
501
502 sz = size_inside_page(p, count);
503
504 /*
505 * On ia64 if a page has been mapped somewhere as uncached, then
506 * it must also be accessed uncached by the kernel or data
507 * corruption may occur.
508 */
509 ptr = xlate_dev_kmem_ptr((void *)p);
510 if (!virt_addr_valid(ptr))
511 return -ENXIO;
512
513 copied = copy_from_user(ptr, buf, sz);
514 if (copied) {
515 written += sz - copied;
516 if (written)
517 break;
518 return -EFAULT;
519 }
520 buf += sz;
521 p += sz;
522 count -= sz;
523 written += sz;
524 }
525
526 *ppos += written;
527 return written;
528 }
529
530 /*
531 * This function writes to the *virtual* memory as seen by the kernel.
532 */
533 static ssize_t write_kmem(struct file *file, const char __user *buf,
534 size_t count, loff_t *ppos)
535 {
536 unsigned long p = *ppos;
537 ssize_t wrote = 0;
538 ssize_t virtr = 0;
539 char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
540 int err = 0;
541
542 if (!pfn_valid(PFN_DOWN(p)))
543 return -EIO;
544 if (secure_modules())
545 return -EPERM;
546
547 if (p < (unsigned long) high_memory) {
548 unsigned long to_write = min_t(unsigned long, count,
549 (unsigned long)high_memory - p);
550 wrote = do_write_kmem(p, buf, to_write, ppos);
551 if (wrote != to_write)
552 return wrote;
553 p += wrote;
554 buf += wrote;
555 count -= wrote;
556 }
557
558 if (count > 0) {
559 kbuf = (char *)__get_free_page(GFP_KERNEL);
560 if (!kbuf)
561 return wrote ? wrote : -ENOMEM;
562 while (count > 0) {
563 unsigned long sz = size_inside_page(p, count);
564 unsigned long n;
565
566 if (!is_vmalloc_or_module_addr((void *)p)) {
567 err = -ENXIO;
568 break;
569 }
570 n = copy_from_user(kbuf, buf, sz);
571 if (n) {
572 err = -EFAULT;
573 break;
574 }
575 vwrite(kbuf, (char *)p, sz);
576 count -= sz;
577 buf += sz;
578 virtr += sz;
579 p += sz;
580 }
581 free_page((unsigned long)kbuf);
582 }
583
584 *ppos = p;
585 return virtr + wrote ? : err;
586 }
587
588 static ssize_t read_port(struct file *file, char __user *buf,
589 size_t count, loff_t *ppos)
590 {
591 unsigned long i = *ppos;
592 char __user *tmp = buf;
593
594 if (!access_ok(VERIFY_WRITE, buf, count))
595 return -EFAULT;
596 while (count-- > 0 && i < 65536) {
597 if (__put_user(inb(i), tmp) < 0)
598 return -EFAULT;
599 i++;
600 tmp++;
601 }
602 *ppos = i;
603 return tmp-buf;
604 }
605
606 static ssize_t write_port(struct file *file, const char __user *buf,
607 size_t count, loff_t *ppos)
608 {
609 unsigned long i = *ppos;
610 const char __user *tmp = buf;
611
612 if (secure_modules())
613 return -EPERM;
614
615 if (!access_ok(VERIFY_READ, buf, count))
616 return -EFAULT;
617 while (count-- > 0 && i < 65536) {
618 char c;
619
620 if (__get_user(c, tmp)) {
621 if (tmp > buf)
622 break;
623 return -EFAULT;
624 }
625 outb(c, i);
626 i++;
627 tmp++;
628 }
629 *ppos = i;
630 return tmp-buf;
631 }
632
633 static ssize_t read_null(struct file *file, char __user *buf,
634 size_t count, loff_t *ppos)
635 {
636 return 0;
637 }
638
639 static ssize_t write_null(struct file *file, const char __user *buf,
640 size_t count, loff_t *ppos)
641 {
642 return count;
643 }
644
645 static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
646 {
647 return 0;
648 }
649
650 static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
651 {
652 size_t count = iov_iter_count(from);
653 iov_iter_advance(from, count);
654 return count;
655 }
656
657 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
658 struct splice_desc *sd)
659 {
660 return sd->len;
661 }
662
663 static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
664 loff_t *ppos, size_t len, unsigned int flags)
665 {
666 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
667 }
668
669 static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
670 {
671 size_t written = 0;
672
673 while (iov_iter_count(iter)) {
674 size_t chunk = iov_iter_count(iter), n;
675
676 if (chunk > PAGE_SIZE)
677 chunk = PAGE_SIZE; /* Just for latency reasons */
678 n = iov_iter_zero(chunk, iter);
679 if (!n && iov_iter_count(iter))
680 return written ? written : -EFAULT;
681 written += n;
682 if (signal_pending(current))
683 return written ? written : -ERESTARTSYS;
684 cond_resched();
685 }
686 return written;
687 }
688
689 static int mmap_zero(struct file *file, struct vm_area_struct *vma)
690 {
691 #ifndef CONFIG_MMU
692 return -ENOSYS;
693 #endif
694 if (vma->vm_flags & VM_SHARED)
695 return shmem_zero_setup(vma);
696 return 0;
697 }
698
699 static unsigned long get_unmapped_area_zero(struct file *file,
700 unsigned long addr, unsigned long len,
701 unsigned long pgoff, unsigned long flags)
702 {
703 #ifdef CONFIG_MMU
704 if (flags & MAP_SHARED) {
705 /*
706 * mmap_zero() will call shmem_zero_setup() to create a file,
707 * so use shmem's get_unmapped_area in case it can be huge;
708 * and pass NULL for file as in mmap.c's get_unmapped_area(),
709 * so as not to confuse shmem with our handle on "/dev/zero".
710 */
711 return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
712 }
713
714 /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
715 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
716 #else
717 return -ENOSYS;
718 #endif
719 }
720
721 static ssize_t write_full(struct file *file, const char __user *buf,
722 size_t count, loff_t *ppos)
723 {
724 return -ENOSPC;
725 }
726
727 /*
728 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
729 * can fopen() both devices with "a" now. This was previously impossible.
730 * -- SRB.
731 */
732 static loff_t null_lseek(struct file *file, loff_t offset, int orig)
733 {
734 return file->f_pos = 0;
735 }
736
737 /*
738 * The memory devices use the full 32/64 bits of the offset, and so we cannot
739 * check against negative addresses: they are ok. The return value is weird,
740 * though, in that case (0).
741 *
742 * also note that seeking relative to the "end of file" isn't supported:
743 * it has no meaning, so it returns -EINVAL.
744 */
745 static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
746 {
747 loff_t ret;
748
749 inode_lock(file_inode(file));
750 switch (orig) {
751 case SEEK_CUR:
752 offset += file->f_pos;
753 case SEEK_SET:
754 /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
755 if ((unsigned long long)offset >= -MAX_ERRNO) {
756 ret = -EOVERFLOW;
757 break;
758 }
759 file->f_pos = offset;
760 ret = file->f_pos;
761 force_successful_syscall_return();
762 break;
763 default:
764 ret = -EINVAL;
765 }
766 inode_unlock(file_inode(file));
767 return ret;
768 }
769
770 static int open_port(struct inode *inode, struct file *filp)
771 {
772 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
773 }
774
775 #define zero_lseek null_lseek
776 #define full_lseek null_lseek
777 #define write_zero write_null
778 #define write_iter_zero write_iter_null
779 #define open_mem open_port
780 #define open_kmem open_mem
781
782 static const struct file_operations __maybe_unused mem_fops = {
783 .llseek = memory_lseek,
784 .read = read_mem,
785 .write = write_mem,
786 .mmap = mmap_mem,
787 .open = open_mem,
788 #ifndef CONFIG_MMU
789 .get_unmapped_area = get_unmapped_area_mem,
790 .mmap_capabilities = memory_mmap_capabilities,
791 #endif
792 };
793
794 static const struct file_operations __maybe_unused kmem_fops = {
795 .llseek = memory_lseek,
796 .read = read_kmem,
797 .write = write_kmem,
798 .mmap = mmap_kmem,
799 .open = open_kmem,
800 #ifndef CONFIG_MMU
801 .get_unmapped_area = get_unmapped_area_mem,
802 .mmap_capabilities = memory_mmap_capabilities,
803 #endif
804 };
805
806 static const struct file_operations null_fops = {
807 .llseek = null_lseek,
808 .read = read_null,
809 .write = write_null,
810 .read_iter = read_iter_null,
811 .write_iter = write_iter_null,
812 .splice_write = splice_write_null,
813 };
814
815 static const struct file_operations __maybe_unused port_fops = {
816 .llseek = memory_lseek,
817 .read = read_port,
818 .write = write_port,
819 .open = open_port,
820 };
821
822 static const struct file_operations zero_fops = {
823 .llseek = zero_lseek,
824 .write = write_zero,
825 .read_iter = read_iter_zero,
826 .write_iter = write_iter_zero,
827 .mmap = mmap_zero,
828 .get_unmapped_area = get_unmapped_area_zero,
829 #ifndef CONFIG_MMU
830 .mmap_capabilities = zero_mmap_capabilities,
831 #endif
832 };
833
834 static const struct file_operations full_fops = {
835 .llseek = full_lseek,
836 .read_iter = read_iter_zero,
837 .write = write_full,
838 };
839
840 static const struct memdev {
841 const char *name;
842 umode_t mode;
843 const struct file_operations *fops;
844 fmode_t fmode;
845 } devlist[] = {
846 #ifdef CONFIG_DEVMEM
847 [1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
848 #endif
849 #ifdef CONFIG_DEVKMEM
850 [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET },
851 #endif
852 [3] = { "null", 0666, &null_fops, 0 },
853 #ifdef CONFIG_DEVPORT
854 [4] = { "port", 0, &port_fops, 0 },
855 #endif
856 [5] = { "zero", 0666, &zero_fops, 0 },
857 [7] = { "full", 0666, &full_fops, 0 },
858 [8] = { "random", 0666, &random_fops, 0 },
859 [9] = { "urandom", 0666, &urandom_fops, 0 },
860 #ifdef CONFIG_PRINTK
861 [11] = { "kmsg", 0644, &kmsg_fops, 0 },
862 #endif
863 };
864
865 static int memory_open(struct inode *inode, struct file *filp)
866 {
867 int minor;
868 const struct memdev *dev;
869
870 minor = iminor(inode);
871 if (minor >= ARRAY_SIZE(devlist))
872 return -ENXIO;
873
874 dev = &devlist[minor];
875 if (!dev->fops)
876 return -ENXIO;
877
878 filp->f_op = dev->fops;
879 filp->f_mode |= dev->fmode;
880
881 if (dev->fops->open)
882 return dev->fops->open(inode, filp);
883
884 return 0;
885 }
886
887 static const struct file_operations memory_fops = {
888 .open = memory_open,
889 .llseek = noop_llseek,
890 };
891
892 static char *mem_devnode(struct device *dev, umode_t *mode)
893 {
894 if (mode && devlist[MINOR(dev->devt)].mode)
895 *mode = devlist[MINOR(dev->devt)].mode;
896 return NULL;
897 }
898
899 static struct class *mem_class;
900
901 static int __init chr_dev_init(void)
902 {
903 int minor;
904
905 if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
906 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
907
908 mem_class = class_create(THIS_MODULE, "mem");
909 if (IS_ERR(mem_class))
910 return PTR_ERR(mem_class);
911
912 mem_class->devnode = mem_devnode;
913 for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
914 if (!devlist[minor].name)
915 continue;
916
917 /*
918 * Create /dev/port?
919 */
920 if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
921 continue;
922
923 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
924 NULL, devlist[minor].name);
925 }
926
927 return tty_init();
928 }
929
930 fs_initcall(chr_dev_init);