]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/char/mem.c
Merge branch 'master' of /pub/scm/linux/kernel/git/torvalds/linux-2.6
[mirror_ubuntu-artful-kernel.git] / drivers / char / mem.c
1 /*
2 * linux/drivers/char/mem.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * Added devfs support.
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9 */
10
11 #include <linux/mm.h>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/crash_dump.h>
25 #include <linux/backing-dev.h>
26 #include <linux/bootmem.h>
27 #include <linux/splice.h>
28 #include <linux/pfn.h>
29
30 #include <asm/uaccess.h>
31 #include <asm/io.h>
32
33 #ifdef CONFIG_IA64
34 # include <linux/efi.h>
35 #endif
36
37 /*
38 * Architectures vary in how they handle caching for addresses
39 * outside of main memory.
40 *
41 */
42 static inline int uncached_access(struct file *file, unsigned long addr)
43 {
44 #if defined(CONFIG_IA64)
45 /*
46 * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
47 */
48 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
49 #elif defined(CONFIG_MIPS)
50 {
51 extern int __uncached_access(struct file *file,
52 unsigned long addr);
53
54 return __uncached_access(file, addr);
55 }
56 #else
57 /*
58 * Accessing memory above the top the kernel knows about or through a file pointer
59 * that was marked O_SYNC will be done non-cached.
60 */
61 if (file->f_flags & O_SYNC)
62 return 1;
63 return addr >= __pa(high_memory);
64 #endif
65 }
66
67 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
68 static inline int valid_phys_addr_range(unsigned long addr, size_t count)
69 {
70 if (addr + count > __pa(high_memory))
71 return 0;
72
73 return 1;
74 }
75
76 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
77 {
78 return 1;
79 }
80 #endif
81
82 #ifdef CONFIG_NONPROMISC_DEVMEM
83 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
84 {
85 u64 from = ((u64)pfn) << PAGE_SHIFT;
86 u64 to = from + size;
87 u64 cursor = from;
88
89 while (cursor < to) {
90 if (!devmem_is_allowed(pfn)) {
91 printk(KERN_INFO
92 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
93 current->comm, from, to);
94 return 0;
95 }
96 cursor += PAGE_SIZE;
97 pfn++;
98 }
99 return 1;
100 }
101 #else
102 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
103 {
104 return 1;
105 }
106 #endif
107
108 void __attribute__((weak)) unxlate_dev_mem_ptr(unsigned long phys, void *addr)
109 {
110 }
111
112 /*
113 * This funcion reads the *physical* memory. The f_pos points directly to the
114 * memory location.
115 */
116 static ssize_t read_mem(struct file * file, char __user * buf,
117 size_t count, loff_t *ppos)
118 {
119 unsigned long p = *ppos;
120 ssize_t read, sz;
121 char *ptr;
122
123 if (!valid_phys_addr_range(p, count))
124 return -EFAULT;
125 read = 0;
126 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
127 /* we don't have page 0 mapped on sparc and m68k.. */
128 if (p < PAGE_SIZE) {
129 sz = PAGE_SIZE - p;
130 if (sz > count)
131 sz = count;
132 if (sz > 0) {
133 if (clear_user(buf, sz))
134 return -EFAULT;
135 buf += sz;
136 p += sz;
137 count -= sz;
138 read += sz;
139 }
140 }
141 #endif
142
143 while (count > 0) {
144 /*
145 * Handle first page in case it's not aligned
146 */
147 if (-p & (PAGE_SIZE - 1))
148 sz = -p & (PAGE_SIZE - 1);
149 else
150 sz = PAGE_SIZE;
151
152 sz = min_t(unsigned long, sz, count);
153
154 if (!range_is_allowed(p >> PAGE_SHIFT, count))
155 return -EPERM;
156
157 /*
158 * On ia64 if a page has been mapped somewhere as
159 * uncached, then it must also be accessed uncached
160 * by the kernel or data corruption may occur
161 */
162 ptr = xlate_dev_mem_ptr(p);
163 if (!ptr)
164 return -EFAULT;
165
166 if (copy_to_user(buf, ptr, sz)) {
167 unxlate_dev_mem_ptr(p, ptr);
168 return -EFAULT;
169 }
170
171 unxlate_dev_mem_ptr(p, ptr);
172
173 buf += sz;
174 p += sz;
175 count -= sz;
176 read += sz;
177 }
178
179 *ppos += read;
180 return read;
181 }
182
183 static ssize_t write_mem(struct file * file, const char __user * buf,
184 size_t count, loff_t *ppos)
185 {
186 unsigned long p = *ppos;
187 ssize_t written, sz;
188 unsigned long copied;
189 void *ptr;
190
191 if (!valid_phys_addr_range(p, count))
192 return -EFAULT;
193
194 written = 0;
195
196 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
197 /* we don't have page 0 mapped on sparc and m68k.. */
198 if (p < PAGE_SIZE) {
199 unsigned long sz = PAGE_SIZE - p;
200 if (sz > count)
201 sz = count;
202 /* Hmm. Do something? */
203 buf += sz;
204 p += sz;
205 count -= sz;
206 written += sz;
207 }
208 #endif
209
210 while (count > 0) {
211 /*
212 * Handle first page in case it's not aligned
213 */
214 if (-p & (PAGE_SIZE - 1))
215 sz = -p & (PAGE_SIZE - 1);
216 else
217 sz = PAGE_SIZE;
218
219 sz = min_t(unsigned long, sz, count);
220
221 if (!range_is_allowed(p >> PAGE_SHIFT, sz))
222 return -EPERM;
223
224 /*
225 * On ia64 if a page has been mapped somewhere as
226 * uncached, then it must also be accessed uncached
227 * by the kernel or data corruption may occur
228 */
229 ptr = xlate_dev_mem_ptr(p);
230 if (!ptr) {
231 if (written)
232 break;
233 return -EFAULT;
234 }
235
236 copied = copy_from_user(ptr, buf, sz);
237 if (copied) {
238 written += sz - copied;
239 unxlate_dev_mem_ptr(p, ptr);
240 if (written)
241 break;
242 return -EFAULT;
243 }
244
245 unxlate_dev_mem_ptr(p, ptr);
246
247 buf += sz;
248 p += sz;
249 count -= sz;
250 written += sz;
251 }
252
253 *ppos += written;
254 return written;
255 }
256
257 int __attribute__((weak)) phys_mem_access_prot_allowed(struct file *file,
258 unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
259 {
260 return 1;
261 }
262
263 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
264 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
265 unsigned long size, pgprot_t vma_prot)
266 {
267 #ifdef pgprot_noncached
268 unsigned long offset = pfn << PAGE_SHIFT;
269
270 if (uncached_access(file, offset))
271 return pgprot_noncached(vma_prot);
272 #endif
273 return vma_prot;
274 }
275 #endif
276
277 #ifndef CONFIG_MMU
278 static unsigned long get_unmapped_area_mem(struct file *file,
279 unsigned long addr,
280 unsigned long len,
281 unsigned long pgoff,
282 unsigned long flags)
283 {
284 if (!valid_mmap_phys_addr_range(pgoff, len))
285 return (unsigned long) -EINVAL;
286 return pgoff << PAGE_SHIFT;
287 }
288
289 /* can't do an in-place private mapping if there's no MMU */
290 static inline int private_mapping_ok(struct vm_area_struct *vma)
291 {
292 return vma->vm_flags & VM_MAYSHARE;
293 }
294 #else
295 #define get_unmapped_area_mem NULL
296
297 static inline int private_mapping_ok(struct vm_area_struct *vma)
298 {
299 return 1;
300 }
301 #endif
302
303 void __attribute__((weak))
304 map_devmem(unsigned long pfn, unsigned long len, pgprot_t prot)
305 {
306 /* nothing. architectures can override. */
307 }
308
309 void __attribute__((weak))
310 unmap_devmem(unsigned long pfn, unsigned long len, pgprot_t prot)
311 {
312 /* nothing. architectures can override. */
313 }
314
315 static void mmap_mem_open(struct vm_area_struct *vma)
316 {
317 map_devmem(vma->vm_pgoff, vma->vm_end - vma->vm_start,
318 vma->vm_page_prot);
319 }
320
321 static void mmap_mem_close(struct vm_area_struct *vma)
322 {
323 unmap_devmem(vma->vm_pgoff, vma->vm_end - vma->vm_start,
324 vma->vm_page_prot);
325 }
326
327 static struct vm_operations_struct mmap_mem_ops = {
328 .open = mmap_mem_open,
329 .close = mmap_mem_close
330 };
331
332 static int mmap_mem(struct file * file, struct vm_area_struct * vma)
333 {
334 size_t size = vma->vm_end - vma->vm_start;
335
336 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
337 return -EINVAL;
338
339 if (!private_mapping_ok(vma))
340 return -ENOSYS;
341
342 if (!range_is_allowed(vma->vm_pgoff, size))
343 return -EPERM;
344
345 if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
346 &vma->vm_page_prot))
347 return -EINVAL;
348
349 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
350 size,
351 vma->vm_page_prot);
352
353 vma->vm_ops = &mmap_mem_ops;
354
355 /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
356 if (remap_pfn_range(vma,
357 vma->vm_start,
358 vma->vm_pgoff,
359 size,
360 vma->vm_page_prot)) {
361 unmap_devmem(vma->vm_pgoff, size, vma->vm_page_prot);
362 return -EAGAIN;
363 }
364 return 0;
365 }
366
367 static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
368 {
369 unsigned long pfn;
370
371 /* Turn a kernel-virtual address into a physical page frame */
372 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
373
374 /*
375 * RED-PEN: on some architectures there is more mapped memory
376 * than available in mem_map which pfn_valid checks
377 * for. Perhaps should add a new macro here.
378 *
379 * RED-PEN: vmalloc is not supported right now.
380 */
381 if (!pfn_valid(pfn))
382 return -EIO;
383
384 vma->vm_pgoff = pfn;
385 return mmap_mem(file, vma);
386 }
387
388 #ifdef CONFIG_CRASH_DUMP
389 /*
390 * Read memory corresponding to the old kernel.
391 */
392 static ssize_t read_oldmem(struct file *file, char __user *buf,
393 size_t count, loff_t *ppos)
394 {
395 unsigned long pfn, offset;
396 size_t read = 0, csize;
397 int rc = 0;
398
399 while (count) {
400 pfn = *ppos / PAGE_SIZE;
401 if (pfn > saved_max_pfn)
402 return read;
403
404 offset = (unsigned long)(*ppos % PAGE_SIZE);
405 if (count > PAGE_SIZE - offset)
406 csize = PAGE_SIZE - offset;
407 else
408 csize = count;
409
410 rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
411 if (rc < 0)
412 return rc;
413 buf += csize;
414 *ppos += csize;
415 read += csize;
416 count -= csize;
417 }
418 return read;
419 }
420 #endif
421
422 extern long vread(char *buf, char *addr, unsigned long count);
423 extern long vwrite(char *buf, char *addr, unsigned long count);
424
425 /*
426 * This function reads the *virtual* memory as seen by the kernel.
427 */
428 static ssize_t read_kmem(struct file *file, char __user *buf,
429 size_t count, loff_t *ppos)
430 {
431 unsigned long p = *ppos;
432 ssize_t low_count, read, sz;
433 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
434
435 read = 0;
436 if (p < (unsigned long) high_memory) {
437 low_count = count;
438 if (count > (unsigned long) high_memory - p)
439 low_count = (unsigned long) high_memory - p;
440
441 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
442 /* we don't have page 0 mapped on sparc and m68k.. */
443 if (p < PAGE_SIZE && low_count > 0) {
444 size_t tmp = PAGE_SIZE - p;
445 if (tmp > low_count) tmp = low_count;
446 if (clear_user(buf, tmp))
447 return -EFAULT;
448 buf += tmp;
449 p += tmp;
450 read += tmp;
451 low_count -= tmp;
452 count -= tmp;
453 }
454 #endif
455 while (low_count > 0) {
456 /*
457 * Handle first page in case it's not aligned
458 */
459 if (-p & (PAGE_SIZE - 1))
460 sz = -p & (PAGE_SIZE - 1);
461 else
462 sz = PAGE_SIZE;
463
464 sz = min_t(unsigned long, sz, low_count);
465
466 /*
467 * On ia64 if a page has been mapped somewhere as
468 * uncached, then it must also be accessed uncached
469 * by the kernel or data corruption may occur
470 */
471 kbuf = xlate_dev_kmem_ptr((char *)p);
472
473 if (copy_to_user(buf, kbuf, sz))
474 return -EFAULT;
475 buf += sz;
476 p += sz;
477 read += sz;
478 low_count -= sz;
479 count -= sz;
480 }
481 }
482
483 if (count > 0) {
484 kbuf = (char *)__get_free_page(GFP_KERNEL);
485 if (!kbuf)
486 return -ENOMEM;
487 while (count > 0) {
488 int len = count;
489
490 if (len > PAGE_SIZE)
491 len = PAGE_SIZE;
492 len = vread(kbuf, (char *)p, len);
493 if (!len)
494 break;
495 if (copy_to_user(buf, kbuf, len)) {
496 free_page((unsigned long)kbuf);
497 return -EFAULT;
498 }
499 count -= len;
500 buf += len;
501 read += len;
502 p += len;
503 }
504 free_page((unsigned long)kbuf);
505 }
506 *ppos = p;
507 return read;
508 }
509
510
511 static inline ssize_t
512 do_write_kmem(void *p, unsigned long realp, const char __user * buf,
513 size_t count, loff_t *ppos)
514 {
515 ssize_t written, sz;
516 unsigned long copied;
517
518 written = 0;
519 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
520 /* we don't have page 0 mapped on sparc and m68k.. */
521 if (realp < PAGE_SIZE) {
522 unsigned long sz = PAGE_SIZE - realp;
523 if (sz > count)
524 sz = count;
525 /* Hmm. Do something? */
526 buf += sz;
527 p += sz;
528 realp += sz;
529 count -= sz;
530 written += sz;
531 }
532 #endif
533
534 while (count > 0) {
535 char *ptr;
536 /*
537 * Handle first page in case it's not aligned
538 */
539 if (-realp & (PAGE_SIZE - 1))
540 sz = -realp & (PAGE_SIZE - 1);
541 else
542 sz = PAGE_SIZE;
543
544 sz = min_t(unsigned long, sz, count);
545
546 /*
547 * On ia64 if a page has been mapped somewhere as
548 * uncached, then it must also be accessed uncached
549 * by the kernel or data corruption may occur
550 */
551 ptr = xlate_dev_kmem_ptr(p);
552
553 copied = copy_from_user(ptr, buf, sz);
554 if (copied) {
555 written += sz - copied;
556 if (written)
557 break;
558 return -EFAULT;
559 }
560 buf += sz;
561 p += sz;
562 realp += sz;
563 count -= sz;
564 written += sz;
565 }
566
567 *ppos += written;
568 return written;
569 }
570
571
572 /*
573 * This function writes to the *virtual* memory as seen by the kernel.
574 */
575 static ssize_t write_kmem(struct file * file, const char __user * buf,
576 size_t count, loff_t *ppos)
577 {
578 unsigned long p = *ppos;
579 ssize_t wrote = 0;
580 ssize_t virtr = 0;
581 ssize_t written;
582 char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
583
584 if (p < (unsigned long) high_memory) {
585
586 wrote = count;
587 if (count > (unsigned long) high_memory - p)
588 wrote = (unsigned long) high_memory - p;
589
590 written = do_write_kmem((void*)p, p, buf, wrote, ppos);
591 if (written != wrote)
592 return written;
593 wrote = written;
594 p += wrote;
595 buf += wrote;
596 count -= wrote;
597 }
598
599 if (count > 0) {
600 kbuf = (char *)__get_free_page(GFP_KERNEL);
601 if (!kbuf)
602 return wrote ? wrote : -ENOMEM;
603 while (count > 0) {
604 int len = count;
605
606 if (len > PAGE_SIZE)
607 len = PAGE_SIZE;
608 if (len) {
609 written = copy_from_user(kbuf, buf, len);
610 if (written) {
611 if (wrote + virtr)
612 break;
613 free_page((unsigned long)kbuf);
614 return -EFAULT;
615 }
616 }
617 len = vwrite(kbuf, (char *)p, len);
618 count -= len;
619 buf += len;
620 virtr += len;
621 p += len;
622 }
623 free_page((unsigned long)kbuf);
624 }
625
626 *ppos = p;
627 return virtr + wrote;
628 }
629
630 #ifdef CONFIG_DEVPORT
631 static ssize_t read_port(struct file * file, char __user * buf,
632 size_t count, loff_t *ppos)
633 {
634 unsigned long i = *ppos;
635 char __user *tmp = buf;
636
637 if (!access_ok(VERIFY_WRITE, buf, count))
638 return -EFAULT;
639 while (count-- > 0 && i < 65536) {
640 if (__put_user(inb(i),tmp) < 0)
641 return -EFAULT;
642 i++;
643 tmp++;
644 }
645 *ppos = i;
646 return tmp-buf;
647 }
648
649 static ssize_t write_port(struct file * file, const char __user * buf,
650 size_t count, loff_t *ppos)
651 {
652 unsigned long i = *ppos;
653 const char __user * tmp = buf;
654
655 if (!access_ok(VERIFY_READ,buf,count))
656 return -EFAULT;
657 while (count-- > 0 && i < 65536) {
658 char c;
659 if (__get_user(c, tmp)) {
660 if (tmp > buf)
661 break;
662 return -EFAULT;
663 }
664 outb(c,i);
665 i++;
666 tmp++;
667 }
668 *ppos = i;
669 return tmp-buf;
670 }
671 #endif
672
673 static ssize_t read_null(struct file * file, char __user * buf,
674 size_t count, loff_t *ppos)
675 {
676 return 0;
677 }
678
679 static ssize_t write_null(struct file * file, const char __user * buf,
680 size_t count, loff_t *ppos)
681 {
682 return count;
683 }
684
685 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
686 struct splice_desc *sd)
687 {
688 return sd->len;
689 }
690
691 static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out,
692 loff_t *ppos, size_t len, unsigned int flags)
693 {
694 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
695 }
696
697 static ssize_t read_zero(struct file * file, char __user * buf,
698 size_t count, loff_t *ppos)
699 {
700 size_t written;
701
702 if (!count)
703 return 0;
704
705 if (!access_ok(VERIFY_WRITE, buf, count))
706 return -EFAULT;
707
708 written = 0;
709 while (count) {
710 unsigned long unwritten;
711 size_t chunk = count;
712
713 if (chunk > PAGE_SIZE)
714 chunk = PAGE_SIZE; /* Just for latency reasons */
715 unwritten = clear_user(buf, chunk);
716 written += chunk - unwritten;
717 if (unwritten)
718 break;
719 buf += chunk;
720 count -= chunk;
721 cond_resched();
722 }
723 return written ? written : -EFAULT;
724 }
725
726 static int mmap_zero(struct file * file, struct vm_area_struct * vma)
727 {
728 #ifndef CONFIG_MMU
729 return -ENOSYS;
730 #endif
731 if (vma->vm_flags & VM_SHARED)
732 return shmem_zero_setup(vma);
733 return 0;
734 }
735
736 static ssize_t write_full(struct file * file, const char __user * buf,
737 size_t count, loff_t *ppos)
738 {
739 return -ENOSPC;
740 }
741
742 /*
743 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
744 * can fopen() both devices with "a" now. This was previously impossible.
745 * -- SRB.
746 */
747
748 static loff_t null_lseek(struct file * file, loff_t offset, int orig)
749 {
750 return file->f_pos = 0;
751 }
752
753 /*
754 * The memory devices use the full 32/64 bits of the offset, and so we cannot
755 * check against negative addresses: they are ok. The return value is weird,
756 * though, in that case (0).
757 *
758 * also note that seeking relative to the "end of file" isn't supported:
759 * it has no meaning, so it returns -EINVAL.
760 */
761 static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
762 {
763 loff_t ret;
764
765 mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
766 switch (orig) {
767 case 0:
768 file->f_pos = offset;
769 ret = file->f_pos;
770 force_successful_syscall_return();
771 break;
772 case 1:
773 file->f_pos += offset;
774 ret = file->f_pos;
775 force_successful_syscall_return();
776 break;
777 default:
778 ret = -EINVAL;
779 }
780 mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
781 return ret;
782 }
783
784 static int open_port(struct inode * inode, struct file * filp)
785 {
786 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
787 }
788
789 #define zero_lseek null_lseek
790 #define full_lseek null_lseek
791 #define write_zero write_null
792 #define read_full read_zero
793 #define open_mem open_port
794 #define open_kmem open_mem
795 #define open_oldmem open_mem
796
797 static const struct file_operations mem_fops = {
798 .llseek = memory_lseek,
799 .read = read_mem,
800 .write = write_mem,
801 .mmap = mmap_mem,
802 .open = open_mem,
803 .get_unmapped_area = get_unmapped_area_mem,
804 };
805
806 static const struct file_operations kmem_fops = {
807 .llseek = memory_lseek,
808 .read = read_kmem,
809 .write = write_kmem,
810 .mmap = mmap_kmem,
811 .open = open_kmem,
812 .get_unmapped_area = get_unmapped_area_mem,
813 };
814
815 static const struct file_operations null_fops = {
816 .llseek = null_lseek,
817 .read = read_null,
818 .write = write_null,
819 .splice_write = splice_write_null,
820 };
821
822 #ifdef CONFIG_DEVPORT
823 static const struct file_operations port_fops = {
824 .llseek = memory_lseek,
825 .read = read_port,
826 .write = write_port,
827 .open = open_port,
828 };
829 #endif
830
831 static const struct file_operations zero_fops = {
832 .llseek = zero_lseek,
833 .read = read_zero,
834 .write = write_zero,
835 .mmap = mmap_zero,
836 };
837
838 /*
839 * capabilities for /dev/zero
840 * - permits private mappings, "copies" are taken of the source of zeros
841 */
842 static struct backing_dev_info zero_bdi = {
843 .capabilities = BDI_CAP_MAP_COPY,
844 };
845
846 static const struct file_operations full_fops = {
847 .llseek = full_lseek,
848 .read = read_full,
849 .write = write_full,
850 };
851
852 #ifdef CONFIG_CRASH_DUMP
853 static const struct file_operations oldmem_fops = {
854 .read = read_oldmem,
855 .open = open_oldmem,
856 };
857 #endif
858
859 static ssize_t kmsg_write(struct file * file, const char __user * buf,
860 size_t count, loff_t *ppos)
861 {
862 char *tmp;
863 ssize_t ret;
864
865 tmp = kmalloc(count + 1, GFP_KERNEL);
866 if (tmp == NULL)
867 return -ENOMEM;
868 ret = -EFAULT;
869 if (!copy_from_user(tmp, buf, count)) {
870 tmp[count] = 0;
871 ret = printk("%s", tmp);
872 if (ret > count)
873 /* printk can add a prefix */
874 ret = count;
875 }
876 kfree(tmp);
877 return ret;
878 }
879
880 static const struct file_operations kmsg_fops = {
881 .write = kmsg_write,
882 };
883
884 static int memory_open(struct inode * inode, struct file * filp)
885 {
886 switch (iminor(inode)) {
887 case 1:
888 filp->f_op = &mem_fops;
889 filp->f_mapping->backing_dev_info =
890 &directly_mappable_cdev_bdi;
891 break;
892 case 2:
893 filp->f_op = &kmem_fops;
894 filp->f_mapping->backing_dev_info =
895 &directly_mappable_cdev_bdi;
896 break;
897 case 3:
898 filp->f_op = &null_fops;
899 break;
900 #ifdef CONFIG_DEVPORT
901 case 4:
902 filp->f_op = &port_fops;
903 break;
904 #endif
905 case 5:
906 filp->f_mapping->backing_dev_info = &zero_bdi;
907 filp->f_op = &zero_fops;
908 break;
909 case 7:
910 filp->f_op = &full_fops;
911 break;
912 case 8:
913 filp->f_op = &random_fops;
914 break;
915 case 9:
916 filp->f_op = &urandom_fops;
917 break;
918 case 11:
919 filp->f_op = &kmsg_fops;
920 break;
921 #ifdef CONFIG_CRASH_DUMP
922 case 12:
923 filp->f_op = &oldmem_fops;
924 break;
925 #endif
926 default:
927 return -ENXIO;
928 }
929 if (filp->f_op && filp->f_op->open)
930 return filp->f_op->open(inode,filp);
931 return 0;
932 }
933
934 static const struct file_operations memory_fops = {
935 .open = memory_open, /* just a selector for the real open */
936 };
937
938 static const struct {
939 unsigned int minor;
940 char *name;
941 umode_t mode;
942 const struct file_operations *fops;
943 } devlist[] = { /* list of minor devices */
944 {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
945 {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
946 {3, "null", S_IRUGO | S_IWUGO, &null_fops},
947 #ifdef CONFIG_DEVPORT
948 {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
949 #endif
950 {5, "zero", S_IRUGO | S_IWUGO, &zero_fops},
951 {7, "full", S_IRUGO | S_IWUGO, &full_fops},
952 {8, "random", S_IRUGO | S_IWUSR, &random_fops},
953 {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops},
954 {11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops},
955 #ifdef CONFIG_CRASH_DUMP
956 {12,"oldmem", S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops},
957 #endif
958 };
959
960 static struct class *mem_class;
961
962 static int __init chr_dev_init(void)
963 {
964 int i;
965 int err;
966
967 err = bdi_init(&zero_bdi);
968 if (err)
969 return err;
970
971 if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
972 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
973
974 mem_class = class_create(THIS_MODULE, "mem");
975 for (i = 0; i < ARRAY_SIZE(devlist); i++)
976 device_create(mem_class, NULL,
977 MKDEV(MEM_MAJOR, devlist[i].minor),
978 devlist[i].name);
979
980 return 0;
981 }
982
983 fs_initcall(chr_dev_init);