]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/char/mem.c
x86: introduce /dev/mem restrictions with a config option
[mirror_ubuntu-artful-kernel.git] / drivers / char / mem.c
1 /*
2 * linux/drivers/char/mem.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * Added devfs support.
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9 */
10
11 #include <linux/mm.h>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/crash_dump.h>
25 #include <linux/backing-dev.h>
26 #include <linux/bootmem.h>
27 #include <linux/splice.h>
28 #include <linux/pfn.h>
29
30 #include <asm/uaccess.h>
31 #include <asm/io.h>
32
33 #ifdef CONFIG_IA64
34 # include <linux/efi.h>
35 #endif
36
37 /*
38 * Architectures vary in how they handle caching for addresses
39 * outside of main memory.
40 *
41 */
42 static inline int uncached_access(struct file *file, unsigned long addr)
43 {
44 #if defined(__i386__) && !defined(__arch_um__)
45 /*
46 * On the PPro and successors, the MTRRs are used to set
47 * memory types for physical addresses outside main memory,
48 * so blindly setting PCD or PWT on those pages is wrong.
49 * For Pentiums and earlier, the surround logic should disable
50 * caching for the high addresses through the KEN pin, but
51 * we maintain the tradition of paranoia in this code.
52 */
53 if (file->f_flags & O_SYNC)
54 return 1;
55 return !( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) ||
56 test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) ||
57 test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ||
58 test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) )
59 && addr >= __pa(high_memory);
60 #elif defined(__x86_64__) && !defined(__arch_um__)
61 /*
62 * This is broken because it can generate memory type aliases,
63 * which can cause cache corruptions
64 * But it is only available for root and we have to be bug-to-bug
65 * compatible with i386.
66 */
67 if (file->f_flags & O_SYNC)
68 return 1;
69 /* same behaviour as i386. PAT always set to cached and MTRRs control the
70 caching behaviour.
71 Hopefully a full PAT implementation will fix that soon. */
72 return 0;
73 #elif defined(CONFIG_IA64)
74 /*
75 * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
76 */
77 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
78 #elif defined(CONFIG_MIPS)
79 {
80 extern int __uncached_access(struct file *file,
81 unsigned long addr);
82
83 return __uncached_access(file, addr);
84 }
85 #else
86 /*
87 * Accessing memory above the top the kernel knows about or through a file pointer
88 * that was marked O_SYNC will be done non-cached.
89 */
90 if (file->f_flags & O_SYNC)
91 return 1;
92 return addr >= __pa(high_memory);
93 #endif
94 }
95
96 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
97 static inline int valid_phys_addr_range(unsigned long addr, size_t count)
98 {
99 if (addr + count > __pa(high_memory))
100 return 0;
101
102 return 1;
103 }
104
105 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
106 {
107 return 1;
108 }
109 #endif
110
111 #ifdef CONFIG_NONPROMISC_DEVMEM
112 static inline int range_is_allowed(unsigned long from, unsigned long to)
113 {
114 unsigned long cursor;
115
116 cursor = from >> PAGE_SHIFT;
117 while ((cursor << PAGE_SHIFT) < to) {
118 if (!devmem_is_allowed(cursor)) {
119 printk(KERN_INFO "Program %s tried to read /dev/mem "
120 "between %lx->%lx.\n",
121 current->comm, from, to);
122 return 0;
123 }
124 cursor++;
125 }
126 return 1;
127 }
128 #else
129 static inline int range_is_allowed(unsigned long from, unsigned long to)
130 {
131 return 1;
132 }
133 #endif
134
135 /*
136 * This funcion reads the *physical* memory. The f_pos points directly to the
137 * memory location.
138 */
139 static ssize_t read_mem(struct file * file, char __user * buf,
140 size_t count, loff_t *ppos)
141 {
142 unsigned long p = *ppos;
143 ssize_t read, sz;
144 char *ptr;
145
146 if (!valid_phys_addr_range(p, count))
147 return -EFAULT;
148 read = 0;
149 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
150 /* we don't have page 0 mapped on sparc and m68k.. */
151 if (p < PAGE_SIZE) {
152 sz = PAGE_SIZE - p;
153 if (sz > count)
154 sz = count;
155 if (sz > 0) {
156 if (clear_user(buf, sz))
157 return -EFAULT;
158 buf += sz;
159 p += sz;
160 count -= sz;
161 read += sz;
162 }
163 }
164 #endif
165
166 while (count > 0) {
167 /*
168 * Handle first page in case it's not aligned
169 */
170 if (-p & (PAGE_SIZE - 1))
171 sz = -p & (PAGE_SIZE - 1);
172 else
173 sz = PAGE_SIZE;
174
175 sz = min_t(unsigned long, sz, count);
176
177 /*
178 * On ia64 if a page has been mapped somewhere as
179 * uncached, then it must also be accessed uncached
180 * by the kernel or data corruption may occur
181 */
182 ptr = xlate_dev_mem_ptr(p);
183
184 if (!range_is_allowed(p, p+count))
185 return -EPERM;
186 if (copy_to_user(buf, ptr, sz))
187 return -EFAULT;
188 buf += sz;
189 p += sz;
190 count -= sz;
191 read += sz;
192 }
193
194 *ppos += read;
195 return read;
196 }
197
198 static ssize_t write_mem(struct file * file, const char __user * buf,
199 size_t count, loff_t *ppos)
200 {
201 unsigned long p = *ppos;
202 ssize_t written, sz;
203 unsigned long copied;
204 void *ptr;
205
206 if (!valid_phys_addr_range(p, count))
207 return -EFAULT;
208
209 written = 0;
210
211 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
212 /* we don't have page 0 mapped on sparc and m68k.. */
213 if (p < PAGE_SIZE) {
214 unsigned long sz = PAGE_SIZE - p;
215 if (sz > count)
216 sz = count;
217 /* Hmm. Do something? */
218 buf += sz;
219 p += sz;
220 count -= sz;
221 written += sz;
222 }
223 #endif
224
225 while (count > 0) {
226 /*
227 * Handle first page in case it's not aligned
228 */
229 if (-p & (PAGE_SIZE - 1))
230 sz = -p & (PAGE_SIZE - 1);
231 else
232 sz = PAGE_SIZE;
233
234 sz = min_t(unsigned long, sz, count);
235
236 /*
237 * On ia64 if a page has been mapped somewhere as
238 * uncached, then it must also be accessed uncached
239 * by the kernel or data corruption may occur
240 */
241 ptr = xlate_dev_mem_ptr(p);
242
243 if (!range_is_allowed(p, p+sz))
244 return -EPERM;
245 copied = copy_from_user(ptr, buf, sz);
246 if (copied) {
247 written += sz - copied;
248 if (written)
249 break;
250 return -EFAULT;
251 }
252 buf += sz;
253 p += sz;
254 count -= sz;
255 written += sz;
256 }
257
258 *ppos += written;
259 return written;
260 }
261
262 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
263 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
264 unsigned long size, pgprot_t vma_prot)
265 {
266 #ifdef pgprot_noncached
267 unsigned long offset = pfn << PAGE_SHIFT;
268
269 if (uncached_access(file, offset))
270 return pgprot_noncached(vma_prot);
271 #endif
272 return vma_prot;
273 }
274 #endif
275
276 #ifndef CONFIG_MMU
277 static unsigned long get_unmapped_area_mem(struct file *file,
278 unsigned long addr,
279 unsigned long len,
280 unsigned long pgoff,
281 unsigned long flags)
282 {
283 if (!valid_mmap_phys_addr_range(pgoff, len))
284 return (unsigned long) -EINVAL;
285 return pgoff << PAGE_SHIFT;
286 }
287
288 /* can't do an in-place private mapping if there's no MMU */
289 static inline int private_mapping_ok(struct vm_area_struct *vma)
290 {
291 return vma->vm_flags & VM_MAYSHARE;
292 }
293 #else
294 #define get_unmapped_area_mem NULL
295
296 static inline int private_mapping_ok(struct vm_area_struct *vma)
297 {
298 return 1;
299 }
300 #endif
301
302 static int mmap_mem(struct file * file, struct vm_area_struct * vma)
303 {
304 size_t size = vma->vm_end - vma->vm_start;
305
306 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
307 return -EINVAL;
308
309 if (!private_mapping_ok(vma))
310 return -ENOSYS;
311
312 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
313 size,
314 vma->vm_page_prot);
315
316 /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
317 if (remap_pfn_range(vma,
318 vma->vm_start,
319 vma->vm_pgoff,
320 size,
321 vma->vm_page_prot))
322 return -EAGAIN;
323 return 0;
324 }
325
326 static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
327 {
328 unsigned long pfn;
329
330 /* Turn a kernel-virtual address into a physical page frame */
331 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
332
333 /*
334 * RED-PEN: on some architectures there is more mapped memory
335 * than available in mem_map which pfn_valid checks
336 * for. Perhaps should add a new macro here.
337 *
338 * RED-PEN: vmalloc is not supported right now.
339 */
340 if (!pfn_valid(pfn))
341 return -EIO;
342
343 vma->vm_pgoff = pfn;
344 return mmap_mem(file, vma);
345 }
346
347 #ifdef CONFIG_CRASH_DUMP
348 /*
349 * Read memory corresponding to the old kernel.
350 */
351 static ssize_t read_oldmem(struct file *file, char __user *buf,
352 size_t count, loff_t *ppos)
353 {
354 unsigned long pfn, offset;
355 size_t read = 0, csize;
356 int rc = 0;
357
358 while (count) {
359 pfn = *ppos / PAGE_SIZE;
360 if (pfn > saved_max_pfn)
361 return read;
362
363 offset = (unsigned long)(*ppos % PAGE_SIZE);
364 if (count > PAGE_SIZE - offset)
365 csize = PAGE_SIZE - offset;
366 else
367 csize = count;
368
369 rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
370 if (rc < 0)
371 return rc;
372 buf += csize;
373 *ppos += csize;
374 read += csize;
375 count -= csize;
376 }
377 return read;
378 }
379 #endif
380
381 extern long vread(char *buf, char *addr, unsigned long count);
382 extern long vwrite(char *buf, char *addr, unsigned long count);
383
384 /*
385 * This function reads the *virtual* memory as seen by the kernel.
386 */
387 static ssize_t read_kmem(struct file *file, char __user *buf,
388 size_t count, loff_t *ppos)
389 {
390 unsigned long p = *ppos;
391 ssize_t low_count, read, sz;
392 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
393
394 read = 0;
395 if (p < (unsigned long) high_memory) {
396 low_count = count;
397 if (count > (unsigned long) high_memory - p)
398 low_count = (unsigned long) high_memory - p;
399
400 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
401 /* we don't have page 0 mapped on sparc and m68k.. */
402 if (p < PAGE_SIZE && low_count > 0) {
403 size_t tmp = PAGE_SIZE - p;
404 if (tmp > low_count) tmp = low_count;
405 if (clear_user(buf, tmp))
406 return -EFAULT;
407 buf += tmp;
408 p += tmp;
409 read += tmp;
410 low_count -= tmp;
411 count -= tmp;
412 }
413 #endif
414 while (low_count > 0) {
415 /*
416 * Handle first page in case it's not aligned
417 */
418 if (-p & (PAGE_SIZE - 1))
419 sz = -p & (PAGE_SIZE - 1);
420 else
421 sz = PAGE_SIZE;
422
423 sz = min_t(unsigned long, sz, low_count);
424
425 /*
426 * On ia64 if a page has been mapped somewhere as
427 * uncached, then it must also be accessed uncached
428 * by the kernel or data corruption may occur
429 */
430 kbuf = xlate_dev_kmem_ptr((char *)p);
431
432 if (copy_to_user(buf, kbuf, sz))
433 return -EFAULT;
434 buf += sz;
435 p += sz;
436 read += sz;
437 low_count -= sz;
438 count -= sz;
439 }
440 }
441
442 if (count > 0) {
443 kbuf = (char *)__get_free_page(GFP_KERNEL);
444 if (!kbuf)
445 return -ENOMEM;
446 while (count > 0) {
447 int len = count;
448
449 if (len > PAGE_SIZE)
450 len = PAGE_SIZE;
451 len = vread(kbuf, (char *)p, len);
452 if (!len)
453 break;
454 if (copy_to_user(buf, kbuf, len)) {
455 free_page((unsigned long)kbuf);
456 return -EFAULT;
457 }
458 count -= len;
459 buf += len;
460 read += len;
461 p += len;
462 }
463 free_page((unsigned long)kbuf);
464 }
465 *ppos = p;
466 return read;
467 }
468
469
470 static inline ssize_t
471 do_write_kmem(void *p, unsigned long realp, const char __user * buf,
472 size_t count, loff_t *ppos)
473 {
474 ssize_t written, sz;
475 unsigned long copied;
476
477 written = 0;
478 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
479 /* we don't have page 0 mapped on sparc and m68k.. */
480 if (realp < PAGE_SIZE) {
481 unsigned long sz = PAGE_SIZE - realp;
482 if (sz > count)
483 sz = count;
484 /* Hmm. Do something? */
485 buf += sz;
486 p += sz;
487 realp += sz;
488 count -= sz;
489 written += sz;
490 }
491 #endif
492
493 while (count > 0) {
494 char *ptr;
495 /*
496 * Handle first page in case it's not aligned
497 */
498 if (-realp & (PAGE_SIZE - 1))
499 sz = -realp & (PAGE_SIZE - 1);
500 else
501 sz = PAGE_SIZE;
502
503 sz = min_t(unsigned long, sz, count);
504
505 /*
506 * On ia64 if a page has been mapped somewhere as
507 * uncached, then it must also be accessed uncached
508 * by the kernel or data corruption may occur
509 */
510 ptr = xlate_dev_kmem_ptr(p);
511
512 copied = copy_from_user(ptr, buf, sz);
513 if (copied) {
514 written += sz - copied;
515 if (written)
516 break;
517 return -EFAULT;
518 }
519 buf += sz;
520 p += sz;
521 realp += sz;
522 count -= sz;
523 written += sz;
524 }
525
526 *ppos += written;
527 return written;
528 }
529
530
531 /*
532 * This function writes to the *virtual* memory as seen by the kernel.
533 */
534 static ssize_t write_kmem(struct file * file, const char __user * buf,
535 size_t count, loff_t *ppos)
536 {
537 unsigned long p = *ppos;
538 ssize_t wrote = 0;
539 ssize_t virtr = 0;
540 ssize_t written;
541 char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
542
543 if (p < (unsigned long) high_memory) {
544
545 wrote = count;
546 if (count > (unsigned long) high_memory - p)
547 wrote = (unsigned long) high_memory - p;
548
549 written = do_write_kmem((void*)p, p, buf, wrote, ppos);
550 if (written != wrote)
551 return written;
552 wrote = written;
553 p += wrote;
554 buf += wrote;
555 count -= wrote;
556 }
557
558 if (count > 0) {
559 kbuf = (char *)__get_free_page(GFP_KERNEL);
560 if (!kbuf)
561 return wrote ? wrote : -ENOMEM;
562 while (count > 0) {
563 int len = count;
564
565 if (len > PAGE_SIZE)
566 len = PAGE_SIZE;
567 if (len) {
568 written = copy_from_user(kbuf, buf, len);
569 if (written) {
570 if (wrote + virtr)
571 break;
572 free_page((unsigned long)kbuf);
573 return -EFAULT;
574 }
575 }
576 len = vwrite(kbuf, (char *)p, len);
577 count -= len;
578 buf += len;
579 virtr += len;
580 p += len;
581 }
582 free_page((unsigned long)kbuf);
583 }
584
585 *ppos = p;
586 return virtr + wrote;
587 }
588
589 #ifdef CONFIG_DEVPORT
590 static ssize_t read_port(struct file * file, char __user * buf,
591 size_t count, loff_t *ppos)
592 {
593 unsigned long i = *ppos;
594 char __user *tmp = buf;
595
596 if (!access_ok(VERIFY_WRITE, buf, count))
597 return -EFAULT;
598 while (count-- > 0 && i < 65536) {
599 if (__put_user(inb(i),tmp) < 0)
600 return -EFAULT;
601 i++;
602 tmp++;
603 }
604 *ppos = i;
605 return tmp-buf;
606 }
607
608 static ssize_t write_port(struct file * file, const char __user * buf,
609 size_t count, loff_t *ppos)
610 {
611 unsigned long i = *ppos;
612 const char __user * tmp = buf;
613
614 if (!access_ok(VERIFY_READ,buf,count))
615 return -EFAULT;
616 while (count-- > 0 && i < 65536) {
617 char c;
618 if (__get_user(c, tmp)) {
619 if (tmp > buf)
620 break;
621 return -EFAULT;
622 }
623 outb(c,i);
624 i++;
625 tmp++;
626 }
627 *ppos = i;
628 return tmp-buf;
629 }
630 #endif
631
632 static ssize_t read_null(struct file * file, char __user * buf,
633 size_t count, loff_t *ppos)
634 {
635 return 0;
636 }
637
638 static ssize_t write_null(struct file * file, const char __user * buf,
639 size_t count, loff_t *ppos)
640 {
641 return count;
642 }
643
644 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
645 struct splice_desc *sd)
646 {
647 return sd->len;
648 }
649
650 static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out,
651 loff_t *ppos, size_t len, unsigned int flags)
652 {
653 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
654 }
655
656 static ssize_t read_zero(struct file * file, char __user * buf,
657 size_t count, loff_t *ppos)
658 {
659 size_t written;
660
661 if (!count)
662 return 0;
663
664 if (!access_ok(VERIFY_WRITE, buf, count))
665 return -EFAULT;
666
667 written = 0;
668 while (count) {
669 unsigned long unwritten;
670 size_t chunk = count;
671
672 if (chunk > PAGE_SIZE)
673 chunk = PAGE_SIZE; /* Just for latency reasons */
674 unwritten = clear_user(buf, chunk);
675 written += chunk - unwritten;
676 if (unwritten)
677 break;
678 buf += chunk;
679 count -= chunk;
680 cond_resched();
681 }
682 return written ? written : -EFAULT;
683 }
684
685 static int mmap_zero(struct file * file, struct vm_area_struct * vma)
686 {
687 #ifndef CONFIG_MMU
688 return -ENOSYS;
689 #endif
690 if (vma->vm_flags & VM_SHARED)
691 return shmem_zero_setup(vma);
692 return 0;
693 }
694
695 static ssize_t write_full(struct file * file, const char __user * buf,
696 size_t count, loff_t *ppos)
697 {
698 return -ENOSPC;
699 }
700
701 /*
702 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
703 * can fopen() both devices with "a" now. This was previously impossible.
704 * -- SRB.
705 */
706
707 static loff_t null_lseek(struct file * file, loff_t offset, int orig)
708 {
709 return file->f_pos = 0;
710 }
711
712 /*
713 * The memory devices use the full 32/64 bits of the offset, and so we cannot
714 * check against negative addresses: they are ok. The return value is weird,
715 * though, in that case (0).
716 *
717 * also note that seeking relative to the "end of file" isn't supported:
718 * it has no meaning, so it returns -EINVAL.
719 */
720 static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
721 {
722 loff_t ret;
723
724 mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
725 switch (orig) {
726 case 0:
727 file->f_pos = offset;
728 ret = file->f_pos;
729 force_successful_syscall_return();
730 break;
731 case 1:
732 file->f_pos += offset;
733 ret = file->f_pos;
734 force_successful_syscall_return();
735 break;
736 default:
737 ret = -EINVAL;
738 }
739 mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
740 return ret;
741 }
742
743 static int open_port(struct inode * inode, struct file * filp)
744 {
745 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
746 }
747
748 #define zero_lseek null_lseek
749 #define full_lseek null_lseek
750 #define write_zero write_null
751 #define read_full read_zero
752 #define open_mem open_port
753 #define open_kmem open_mem
754 #define open_oldmem open_mem
755
756 static const struct file_operations mem_fops = {
757 .llseek = memory_lseek,
758 .read = read_mem,
759 .write = write_mem,
760 .mmap = mmap_mem,
761 .open = open_mem,
762 .get_unmapped_area = get_unmapped_area_mem,
763 };
764
765 static const struct file_operations kmem_fops = {
766 .llseek = memory_lseek,
767 .read = read_kmem,
768 .write = write_kmem,
769 .mmap = mmap_kmem,
770 .open = open_kmem,
771 .get_unmapped_area = get_unmapped_area_mem,
772 };
773
774 static const struct file_operations null_fops = {
775 .llseek = null_lseek,
776 .read = read_null,
777 .write = write_null,
778 .splice_write = splice_write_null,
779 };
780
781 #ifdef CONFIG_DEVPORT
782 static const struct file_operations port_fops = {
783 .llseek = memory_lseek,
784 .read = read_port,
785 .write = write_port,
786 .open = open_port,
787 };
788 #endif
789
790 static const struct file_operations zero_fops = {
791 .llseek = zero_lseek,
792 .read = read_zero,
793 .write = write_zero,
794 .mmap = mmap_zero,
795 };
796
797 /*
798 * capabilities for /dev/zero
799 * - permits private mappings, "copies" are taken of the source of zeros
800 */
801 static struct backing_dev_info zero_bdi = {
802 .capabilities = BDI_CAP_MAP_COPY,
803 };
804
805 static const struct file_operations full_fops = {
806 .llseek = full_lseek,
807 .read = read_full,
808 .write = write_full,
809 };
810
811 #ifdef CONFIG_CRASH_DUMP
812 static const struct file_operations oldmem_fops = {
813 .read = read_oldmem,
814 .open = open_oldmem,
815 };
816 #endif
817
818 static ssize_t kmsg_write(struct file * file, const char __user * buf,
819 size_t count, loff_t *ppos)
820 {
821 char *tmp;
822 ssize_t ret;
823
824 tmp = kmalloc(count + 1, GFP_KERNEL);
825 if (tmp == NULL)
826 return -ENOMEM;
827 ret = -EFAULT;
828 if (!copy_from_user(tmp, buf, count)) {
829 tmp[count] = 0;
830 ret = printk("%s", tmp);
831 if (ret > count)
832 /* printk can add a prefix */
833 ret = count;
834 }
835 kfree(tmp);
836 return ret;
837 }
838
839 static const struct file_operations kmsg_fops = {
840 .write = kmsg_write,
841 };
842
843 static int memory_open(struct inode * inode, struct file * filp)
844 {
845 switch (iminor(inode)) {
846 case 1:
847 filp->f_op = &mem_fops;
848 filp->f_mapping->backing_dev_info =
849 &directly_mappable_cdev_bdi;
850 break;
851 case 2:
852 filp->f_op = &kmem_fops;
853 filp->f_mapping->backing_dev_info =
854 &directly_mappable_cdev_bdi;
855 break;
856 case 3:
857 filp->f_op = &null_fops;
858 break;
859 #ifdef CONFIG_DEVPORT
860 case 4:
861 filp->f_op = &port_fops;
862 break;
863 #endif
864 case 5:
865 filp->f_mapping->backing_dev_info = &zero_bdi;
866 filp->f_op = &zero_fops;
867 break;
868 case 7:
869 filp->f_op = &full_fops;
870 break;
871 case 8:
872 filp->f_op = &random_fops;
873 break;
874 case 9:
875 filp->f_op = &urandom_fops;
876 break;
877 case 11:
878 filp->f_op = &kmsg_fops;
879 break;
880 #ifdef CONFIG_CRASH_DUMP
881 case 12:
882 filp->f_op = &oldmem_fops;
883 break;
884 #endif
885 default:
886 return -ENXIO;
887 }
888 if (filp->f_op && filp->f_op->open)
889 return filp->f_op->open(inode,filp);
890 return 0;
891 }
892
893 static const struct file_operations memory_fops = {
894 .open = memory_open, /* just a selector for the real open */
895 };
896
897 static const struct {
898 unsigned int minor;
899 char *name;
900 umode_t mode;
901 const struct file_operations *fops;
902 } devlist[] = { /* list of minor devices */
903 {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
904 {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
905 {3, "null", S_IRUGO | S_IWUGO, &null_fops},
906 #ifdef CONFIG_DEVPORT
907 {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
908 #endif
909 {5, "zero", S_IRUGO | S_IWUGO, &zero_fops},
910 {7, "full", S_IRUGO | S_IWUGO, &full_fops},
911 {8, "random", S_IRUGO | S_IWUSR, &random_fops},
912 {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops},
913 {11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops},
914 #ifdef CONFIG_CRASH_DUMP
915 {12,"oldmem", S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops},
916 #endif
917 };
918
919 static struct class *mem_class;
920
921 static int __init chr_dev_init(void)
922 {
923 int i;
924 int err;
925
926 err = bdi_init(&zero_bdi);
927 if (err)
928 return err;
929
930 if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
931 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
932
933 mem_class = class_create(THIS_MODULE, "mem");
934 for (i = 0; i < ARRAY_SIZE(devlist); i++)
935 device_create(mem_class, NULL,
936 MKDEV(MEM_MAJOR, devlist[i].minor),
937 devlist[i].name);
938
939 return 0;
940 }
941
942 fs_initcall(chr_dev_init);