]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/char/mem.c
UBUNTU: Ubuntu-4.13.0-45.50
[mirror_ubuntu-artful-kernel.git] / drivers / char / mem.c
1 /*
2 * linux/drivers/char/mem.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * Added devfs support.
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9 */
10
11 #include <linux/mm.h>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/backing-dev.h>
25 #include <linux/shmem_fs.h>
26 #include <linux/splice.h>
27 #include <linux/pfn.h>
28 #include <linux/export.h>
29 #include <linux/io.h>
30 #include <linux/uio.h>
31
32 #include <linux/uaccess.h>
33
34 #ifdef CONFIG_IA64
35 # include <linux/efi.h>
36 #endif
37
38 #define DEVPORT_MINOR 4
39
40 static inline unsigned long size_inside_page(unsigned long start,
41 unsigned long size)
42 {
43 unsigned long sz;
44
45 sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
46
47 return min(sz, size);
48 }
49
50 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
51 static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
52 {
53 return addr + count <= __pa(high_memory);
54 }
55
56 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
57 {
58 return 1;
59 }
60 #endif
61
62 #ifdef CONFIG_STRICT_DEVMEM
63 static inline int page_is_allowed(unsigned long pfn)
64 {
65 return devmem_is_allowed(pfn);
66 }
67 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
68 {
69 u64 from = ((u64)pfn) << PAGE_SHIFT;
70 u64 to = from + size;
71 u64 cursor = from;
72
73 while (cursor < to) {
74 if (!devmem_is_allowed(pfn))
75 return 0;
76 cursor += PAGE_SIZE;
77 pfn++;
78 }
79 return 1;
80 }
81 #else
82 static inline int page_is_allowed(unsigned long pfn)
83 {
84 return 1;
85 }
86 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
87 {
88 return 1;
89 }
90 #endif
91
92 #ifndef unxlate_dev_mem_ptr
93 #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
94 void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
95 {
96 }
97 #endif
98
99 /*
100 * This funcion reads the *physical* memory. The f_pos points directly to the
101 * memory location.
102 */
103 static ssize_t read_mem(struct file *file, char __user *buf,
104 size_t count, loff_t *ppos)
105 {
106 phys_addr_t p = *ppos;
107 ssize_t read, sz;
108 void *ptr;
109
110 if (p != *ppos)
111 return 0;
112
113 if (!valid_phys_addr_range(p, count))
114 return -EFAULT;
115 read = 0;
116 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
117 /* we don't have page 0 mapped on sparc and m68k.. */
118 if (p < PAGE_SIZE) {
119 sz = size_inside_page(p, count);
120 if (sz > 0) {
121 if (clear_user(buf, sz))
122 return -EFAULT;
123 buf += sz;
124 p += sz;
125 count -= sz;
126 read += sz;
127 }
128 }
129 #endif
130
131 while (count > 0) {
132 unsigned long remaining;
133 int allowed;
134
135 sz = size_inside_page(p, count);
136
137 allowed = page_is_allowed(p >> PAGE_SHIFT);
138 if (!allowed)
139 return -EPERM;
140 if (allowed == 2) {
141 /* Show zeros for restricted memory. */
142 remaining = clear_user(buf, sz);
143 } else {
144 /*
145 * On ia64 if a page has been mapped somewhere as
146 * uncached, then it must also be accessed uncached
147 * by the kernel or data corruption may occur.
148 */
149 ptr = xlate_dev_mem_ptr(p);
150 if (!ptr)
151 return -EFAULT;
152
153 remaining = copy_to_user(buf, ptr, sz);
154
155 unxlate_dev_mem_ptr(p, ptr);
156 }
157
158 if (remaining)
159 return -EFAULT;
160
161 buf += sz;
162 p += sz;
163 count -= sz;
164 read += sz;
165 }
166
167 *ppos += read;
168 return read;
169 }
170
171 static ssize_t write_mem(struct file *file, const char __user *buf,
172 size_t count, loff_t *ppos)
173 {
174 phys_addr_t p = *ppos;
175 ssize_t written, sz;
176 unsigned long copied;
177 void *ptr;
178
179 if (p != *ppos)
180 return -EFBIG;
181
182 if (kernel_is_locked_down())
183 return -EPERM;
184
185 if (!valid_phys_addr_range(p, count))
186 return -EFAULT;
187
188 written = 0;
189
190 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
191 /* we don't have page 0 mapped on sparc and m68k.. */
192 if (p < PAGE_SIZE) {
193 sz = size_inside_page(p, count);
194 /* Hmm. Do something? */
195 buf += sz;
196 p += sz;
197 count -= sz;
198 written += sz;
199 }
200 #endif
201
202 while (count > 0) {
203 int allowed;
204
205 sz = size_inside_page(p, count);
206
207 allowed = page_is_allowed(p >> PAGE_SHIFT);
208 if (!allowed)
209 return -EPERM;
210
211 /* Skip actual writing when a page is marked as restricted. */
212 if (allowed == 1) {
213 /*
214 * On ia64 if a page has been mapped somewhere as
215 * uncached, then it must also be accessed uncached
216 * by the kernel or data corruption may occur.
217 */
218 ptr = xlate_dev_mem_ptr(p);
219 if (!ptr) {
220 if (written)
221 break;
222 return -EFAULT;
223 }
224
225 copied = copy_from_user(ptr, buf, sz);
226 unxlate_dev_mem_ptr(p, ptr);
227 if (copied) {
228 written += sz - copied;
229 if (written)
230 break;
231 return -EFAULT;
232 }
233 }
234
235 buf += sz;
236 p += sz;
237 count -= sz;
238 written += sz;
239 }
240
241 *ppos += written;
242 return written;
243 }
244
245 int __weak phys_mem_access_prot_allowed(struct file *file,
246 unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
247 {
248 return 1;
249 }
250
251 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
252
253 /*
254 * Architectures vary in how they handle caching for addresses
255 * outside of main memory.
256 *
257 */
258 #ifdef pgprot_noncached
259 static int uncached_access(struct file *file, phys_addr_t addr)
260 {
261 #if defined(CONFIG_IA64)
262 /*
263 * On ia64, we ignore O_DSYNC because we cannot tolerate memory
264 * attribute aliases.
265 */
266 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
267 #elif defined(CONFIG_MIPS)
268 {
269 extern int __uncached_access(struct file *file,
270 unsigned long addr);
271
272 return __uncached_access(file, addr);
273 }
274 #else
275 /*
276 * Accessing memory above the top the kernel knows about or through a
277 * file pointer
278 * that was marked O_DSYNC will be done non-cached.
279 */
280 if (file->f_flags & O_DSYNC)
281 return 1;
282 return addr >= __pa(high_memory);
283 #endif
284 }
285 #endif
286
287 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
288 unsigned long size, pgprot_t vma_prot)
289 {
290 #ifdef pgprot_noncached
291 phys_addr_t offset = pfn << PAGE_SHIFT;
292
293 if (uncached_access(file, offset))
294 return pgprot_noncached(vma_prot);
295 #endif
296 return vma_prot;
297 }
298 #endif
299
300 #ifndef CONFIG_MMU
301 static unsigned long get_unmapped_area_mem(struct file *file,
302 unsigned long addr,
303 unsigned long len,
304 unsigned long pgoff,
305 unsigned long flags)
306 {
307 if (!valid_mmap_phys_addr_range(pgoff, len))
308 return (unsigned long) -EINVAL;
309 return pgoff << PAGE_SHIFT;
310 }
311
312 /* permit direct mmap, for read, write or exec */
313 static unsigned memory_mmap_capabilities(struct file *file)
314 {
315 return NOMMU_MAP_DIRECT |
316 NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
317 }
318
319 static unsigned zero_mmap_capabilities(struct file *file)
320 {
321 return NOMMU_MAP_COPY;
322 }
323
324 /* can't do an in-place private mapping if there's no MMU */
325 static inline int private_mapping_ok(struct vm_area_struct *vma)
326 {
327 return vma->vm_flags & VM_MAYSHARE;
328 }
329 #else
330
331 static inline int private_mapping_ok(struct vm_area_struct *vma)
332 {
333 return 1;
334 }
335 #endif
336
337 static const struct vm_operations_struct mmap_mem_ops = {
338 #ifdef CONFIG_HAVE_IOREMAP_PROT
339 .access = generic_access_phys
340 #endif
341 };
342
343 static int mmap_mem(struct file *file, struct vm_area_struct *vma)
344 {
345 size_t size = vma->vm_end - vma->vm_start;
346 phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
347
348 /* It's illegal to wrap around the end of the physical address space. */
349 if (offset + (phys_addr_t)size - 1 < offset)
350 return -EINVAL;
351
352 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
353 return -EINVAL;
354
355 if (!private_mapping_ok(vma))
356 return -ENOSYS;
357
358 if (!range_is_allowed(vma->vm_pgoff, size))
359 return -EPERM;
360
361 if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
362 &vma->vm_page_prot))
363 return -EINVAL;
364
365 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
366 size,
367 vma->vm_page_prot);
368
369 vma->vm_ops = &mmap_mem_ops;
370
371 /* Remap-pfn-range will mark the range VM_IO */
372 if (remap_pfn_range(vma,
373 vma->vm_start,
374 vma->vm_pgoff,
375 size,
376 vma->vm_page_prot)) {
377 return -EAGAIN;
378 }
379 return 0;
380 }
381
382 static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
383 {
384 unsigned long pfn;
385
386 /* Turn a kernel-virtual address into a physical page frame */
387 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
388
389 /*
390 * RED-PEN: on some architectures there is more mapped memory than
391 * available in mem_map which pfn_valid checks for. Perhaps should add a
392 * new macro here.
393 *
394 * RED-PEN: vmalloc is not supported right now.
395 */
396 if (!pfn_valid(pfn))
397 return -EIO;
398
399 vma->vm_pgoff = pfn;
400 return mmap_mem(file, vma);
401 }
402
403 /*
404 * This function reads the *virtual* memory as seen by the kernel.
405 */
406 static ssize_t read_kmem(struct file *file, char __user *buf,
407 size_t count, loff_t *ppos)
408 {
409 unsigned long p = *ppos;
410 ssize_t low_count, read, sz;
411 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
412 int err = 0;
413
414 read = 0;
415 if (p < (unsigned long) high_memory) {
416 low_count = count;
417 if (count > (unsigned long)high_memory - p)
418 low_count = (unsigned long)high_memory - p;
419
420 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
421 /* we don't have page 0 mapped on sparc and m68k.. */
422 if (p < PAGE_SIZE && low_count > 0) {
423 sz = size_inside_page(p, low_count);
424 if (clear_user(buf, sz))
425 return -EFAULT;
426 buf += sz;
427 p += sz;
428 read += sz;
429 low_count -= sz;
430 count -= sz;
431 }
432 #endif
433 while (low_count > 0) {
434 sz = size_inside_page(p, low_count);
435
436 /*
437 * On ia64 if a page has been mapped somewhere as
438 * uncached, then it must also be accessed uncached
439 * by the kernel or data corruption may occur
440 */
441 kbuf = xlate_dev_kmem_ptr((void *)p);
442 if (!virt_addr_valid(kbuf))
443 return -ENXIO;
444
445 if (copy_to_user(buf, kbuf, sz))
446 return -EFAULT;
447 buf += sz;
448 p += sz;
449 read += sz;
450 low_count -= sz;
451 count -= sz;
452 }
453 }
454
455 if (count > 0) {
456 kbuf = (char *)__get_free_page(GFP_KERNEL);
457 if (!kbuf)
458 return -ENOMEM;
459 while (count > 0) {
460 sz = size_inside_page(p, count);
461 if (!is_vmalloc_or_module_addr((void *)p)) {
462 err = -ENXIO;
463 break;
464 }
465 sz = vread(kbuf, (char *)p, sz);
466 if (!sz)
467 break;
468 if (copy_to_user(buf, kbuf, sz)) {
469 err = -EFAULT;
470 break;
471 }
472 count -= sz;
473 buf += sz;
474 read += sz;
475 p += sz;
476 }
477 free_page((unsigned long)kbuf);
478 }
479 *ppos = p;
480 return read ? read : err;
481 }
482
483
484 static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
485 size_t count, loff_t *ppos)
486 {
487 ssize_t written, sz;
488 unsigned long copied;
489
490 written = 0;
491 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
492 /* we don't have page 0 mapped on sparc and m68k.. */
493 if (p < PAGE_SIZE) {
494 sz = size_inside_page(p, count);
495 /* Hmm. Do something? */
496 buf += sz;
497 p += sz;
498 count -= sz;
499 written += sz;
500 }
501 #endif
502
503 while (count > 0) {
504 void *ptr;
505
506 sz = size_inside_page(p, count);
507
508 /*
509 * On ia64 if a page has been mapped somewhere as uncached, then
510 * it must also be accessed uncached by the kernel or data
511 * corruption may occur.
512 */
513 ptr = xlate_dev_kmem_ptr((void *)p);
514 if (!virt_addr_valid(ptr))
515 return -ENXIO;
516
517 copied = copy_from_user(ptr, buf, sz);
518 if (copied) {
519 written += sz - copied;
520 if (written)
521 break;
522 return -EFAULT;
523 }
524 buf += sz;
525 p += sz;
526 count -= sz;
527 written += sz;
528 }
529
530 *ppos += written;
531 return written;
532 }
533
534 /*
535 * This function writes to the *virtual* memory as seen by the kernel.
536 */
537 static ssize_t write_kmem(struct file *file, const char __user *buf,
538 size_t count, loff_t *ppos)
539 {
540 unsigned long p = *ppos;
541 ssize_t wrote = 0;
542 ssize_t virtr = 0;
543 char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
544 int err = 0;
545
546 if (kernel_is_locked_down())
547 return -EPERM;
548
549 if (p < (unsigned long) high_memory) {
550 unsigned long to_write = min_t(unsigned long, count,
551 (unsigned long)high_memory - p);
552 wrote = do_write_kmem(p, buf, to_write, ppos);
553 if (wrote != to_write)
554 return wrote;
555 p += wrote;
556 buf += wrote;
557 count -= wrote;
558 }
559
560 if (count > 0) {
561 kbuf = (char *)__get_free_page(GFP_KERNEL);
562 if (!kbuf)
563 return wrote ? wrote : -ENOMEM;
564 while (count > 0) {
565 unsigned long sz = size_inside_page(p, count);
566 unsigned long n;
567
568 if (!is_vmalloc_or_module_addr((void *)p)) {
569 err = -ENXIO;
570 break;
571 }
572 n = copy_from_user(kbuf, buf, sz);
573 if (n) {
574 err = -EFAULT;
575 break;
576 }
577 vwrite(kbuf, (char *)p, sz);
578 count -= sz;
579 buf += sz;
580 virtr += sz;
581 p += sz;
582 }
583 free_page((unsigned long)kbuf);
584 }
585
586 *ppos = p;
587 return virtr + wrote ? : err;
588 }
589
590 static ssize_t read_port(struct file *file, char __user *buf,
591 size_t count, loff_t *ppos)
592 {
593 unsigned long i = *ppos;
594 char __user *tmp = buf;
595
596 if (!access_ok(VERIFY_WRITE, buf, count))
597 return -EFAULT;
598 while (count-- > 0 && i < 65536) {
599 if (__put_user(inb(i), tmp) < 0)
600 return -EFAULT;
601 i++;
602 tmp++;
603 }
604 *ppos = i;
605 return tmp-buf;
606 }
607
608 static ssize_t write_port(struct file *file, const char __user *buf,
609 size_t count, loff_t *ppos)
610 {
611 unsigned long i = *ppos;
612 const char __user *tmp = buf;
613
614 if (!access_ok(VERIFY_READ, buf, count))
615 return -EFAULT;
616 while (count-- > 0 && i < 65536) {
617 char c;
618
619 if (__get_user(c, tmp)) {
620 if (tmp > buf)
621 break;
622 return -EFAULT;
623 }
624 outb(c, i);
625 i++;
626 tmp++;
627 }
628 *ppos = i;
629 return tmp-buf;
630 }
631
632 static ssize_t read_null(struct file *file, char __user *buf,
633 size_t count, loff_t *ppos)
634 {
635 return 0;
636 }
637
638 static ssize_t write_null(struct file *file, const char __user *buf,
639 size_t count, loff_t *ppos)
640 {
641 return count;
642 }
643
644 static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
645 {
646 return 0;
647 }
648
649 static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
650 {
651 size_t count = iov_iter_count(from);
652 iov_iter_advance(from, count);
653 return count;
654 }
655
656 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
657 struct splice_desc *sd)
658 {
659 return sd->len;
660 }
661
662 static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
663 loff_t *ppos, size_t len, unsigned int flags)
664 {
665 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
666 }
667
668 static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
669 {
670 size_t written = 0;
671
672 while (iov_iter_count(iter)) {
673 size_t chunk = iov_iter_count(iter), n;
674
675 if (chunk > PAGE_SIZE)
676 chunk = PAGE_SIZE; /* Just for latency reasons */
677 n = iov_iter_zero(chunk, iter);
678 if (!n && iov_iter_count(iter))
679 return written ? written : -EFAULT;
680 written += n;
681 if (signal_pending(current))
682 return written ? written : -ERESTARTSYS;
683 cond_resched();
684 }
685 return written;
686 }
687
688 static int mmap_zero(struct file *file, struct vm_area_struct *vma)
689 {
690 #ifndef CONFIG_MMU
691 return -ENOSYS;
692 #endif
693 if (vma->vm_flags & VM_SHARED)
694 return shmem_zero_setup(vma);
695 return 0;
696 }
697
698 static unsigned long get_unmapped_area_zero(struct file *file,
699 unsigned long addr, unsigned long len,
700 unsigned long pgoff, unsigned long flags)
701 {
702 #ifdef CONFIG_MMU
703 if (flags & MAP_SHARED) {
704 /*
705 * mmap_zero() will call shmem_zero_setup() to create a file,
706 * so use shmem's get_unmapped_area in case it can be huge;
707 * and pass NULL for file as in mmap.c's get_unmapped_area(),
708 * so as not to confuse shmem with our handle on "/dev/zero".
709 */
710 return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
711 }
712
713 /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
714 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
715 #else
716 return -ENOSYS;
717 #endif
718 }
719
720 static ssize_t write_full(struct file *file, const char __user *buf,
721 size_t count, loff_t *ppos)
722 {
723 return -ENOSPC;
724 }
725
726 /*
727 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
728 * can fopen() both devices with "a" now. This was previously impossible.
729 * -- SRB.
730 */
731 static loff_t null_lseek(struct file *file, loff_t offset, int orig)
732 {
733 return file->f_pos = 0;
734 }
735
736 /*
737 * The memory devices use the full 32/64 bits of the offset, and so we cannot
738 * check against negative addresses: they are ok. The return value is weird,
739 * though, in that case (0).
740 *
741 * also note that seeking relative to the "end of file" isn't supported:
742 * it has no meaning, so it returns -EINVAL.
743 */
744 static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
745 {
746 loff_t ret;
747
748 inode_lock(file_inode(file));
749 switch (orig) {
750 case SEEK_CUR:
751 offset += file->f_pos;
752 case SEEK_SET:
753 /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
754 if ((unsigned long long)offset >= -MAX_ERRNO) {
755 ret = -EOVERFLOW;
756 break;
757 }
758 file->f_pos = offset;
759 ret = file->f_pos;
760 force_successful_syscall_return();
761 break;
762 default:
763 ret = -EINVAL;
764 }
765 inode_unlock(file_inode(file));
766 return ret;
767 }
768
769 static int open_port(struct inode *inode, struct file *filp)
770 {
771 if (kernel_is_locked_down())
772 return -EPERM;
773 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
774 }
775
776 #define zero_lseek null_lseek
777 #define full_lseek null_lseek
778 #define write_zero write_null
779 #define write_iter_zero write_iter_null
780 #define open_mem open_port
781 #define open_kmem open_mem
782
783 static const struct file_operations __maybe_unused mem_fops = {
784 .llseek = memory_lseek,
785 .read = read_mem,
786 .write = write_mem,
787 .mmap = mmap_mem,
788 .open = open_mem,
789 #ifndef CONFIG_MMU
790 .get_unmapped_area = get_unmapped_area_mem,
791 .mmap_capabilities = memory_mmap_capabilities,
792 #endif
793 };
794
795 static const struct file_operations __maybe_unused kmem_fops = {
796 .llseek = memory_lseek,
797 .read = read_kmem,
798 .write = write_kmem,
799 .mmap = mmap_kmem,
800 .open = open_kmem,
801 #ifndef CONFIG_MMU
802 .get_unmapped_area = get_unmapped_area_mem,
803 .mmap_capabilities = memory_mmap_capabilities,
804 #endif
805 };
806
807 static const struct file_operations null_fops = {
808 .llseek = null_lseek,
809 .read = read_null,
810 .write = write_null,
811 .read_iter = read_iter_null,
812 .write_iter = write_iter_null,
813 .splice_write = splice_write_null,
814 };
815
816 static const struct file_operations __maybe_unused port_fops = {
817 .llseek = memory_lseek,
818 .read = read_port,
819 .write = write_port,
820 .open = open_port,
821 };
822
823 static const struct file_operations zero_fops = {
824 .llseek = zero_lseek,
825 .write = write_zero,
826 .read_iter = read_iter_zero,
827 .write_iter = write_iter_zero,
828 .mmap = mmap_zero,
829 .get_unmapped_area = get_unmapped_area_zero,
830 #ifndef CONFIG_MMU
831 .mmap_capabilities = zero_mmap_capabilities,
832 #endif
833 };
834
835 static const struct file_operations full_fops = {
836 .llseek = full_lseek,
837 .read_iter = read_iter_zero,
838 .write = write_full,
839 };
840
841 static const struct memdev {
842 const char *name;
843 umode_t mode;
844 const struct file_operations *fops;
845 fmode_t fmode;
846 } devlist[] = {
847 #ifdef CONFIG_DEVMEM
848 [1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
849 #endif
850 #ifdef CONFIG_DEVKMEM
851 [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET },
852 #endif
853 [3] = { "null", 0666, &null_fops, 0 },
854 #ifdef CONFIG_DEVPORT
855 [4] = { "port", 0, &port_fops, 0 },
856 #endif
857 [5] = { "zero", 0666, &zero_fops, 0 },
858 [7] = { "full", 0666, &full_fops, 0 },
859 [8] = { "random", 0666, &random_fops, 0 },
860 [9] = { "urandom", 0666, &urandom_fops, 0 },
861 #ifdef CONFIG_PRINTK
862 [11] = { "kmsg", 0644, &kmsg_fops, 0 },
863 #endif
864 };
865
866 static int memory_open(struct inode *inode, struct file *filp)
867 {
868 int minor;
869 const struct memdev *dev;
870
871 minor = iminor(inode);
872 if (minor >= ARRAY_SIZE(devlist))
873 return -ENXIO;
874
875 dev = &devlist[minor];
876 if (!dev->fops)
877 return -ENXIO;
878
879 filp->f_op = dev->fops;
880 filp->f_mode |= dev->fmode;
881
882 if (dev->fops->open)
883 return dev->fops->open(inode, filp);
884
885 return 0;
886 }
887
888 static const struct file_operations memory_fops = {
889 .open = memory_open,
890 .llseek = noop_llseek,
891 };
892
893 static char *mem_devnode(struct device *dev, umode_t *mode)
894 {
895 if (mode && devlist[MINOR(dev->devt)].mode)
896 *mode = devlist[MINOR(dev->devt)].mode;
897 return NULL;
898 }
899
900 static struct class *mem_class;
901
902 static int __init chr_dev_init(void)
903 {
904 int minor;
905
906 if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
907 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
908
909 mem_class = class_create(THIS_MODULE, "mem");
910 if (IS_ERR(mem_class))
911 return PTR_ERR(mem_class);
912
913 mem_class->devnode = mem_devnode;
914 for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
915 if (!devlist[minor].name)
916 continue;
917
918 /*
919 * Create /dev/port?
920 */
921 if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
922 continue;
923
924 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
925 NULL, devlist[minor].name);
926 }
927
928 return tty_init();
929 }
930
931 fs_initcall(chr_dev_init);