]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/char/mem.c
d633974e7f8b1d82914743e0a27bda495e1ff5af
[mirror_ubuntu-artful-kernel.git] / drivers / char / mem.c
1 /*
2 * linux/drivers/char/mem.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * Added devfs support.
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9 */
10
11 #include <linux/mm.h>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/backing-dev.h>
25 #include <linux/splice.h>
26 #include <linux/pfn.h>
27 #include <linux/export.h>
28 #include <linux/io.h>
29 #include <linux/uio.h>
30
31 #include <linux/uaccess.h>
32
33 #ifdef CONFIG_IA64
34 # include <linux/efi.h>
35 #endif
36
37 #define DEVPORT_MINOR 4
38
39 static inline unsigned long size_inside_page(unsigned long start,
40 unsigned long size)
41 {
42 unsigned long sz;
43
44 sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
45
46 return min(sz, size);
47 }
48
49 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
50 static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
51 {
52 return addr + count <= __pa(high_memory);
53 }
54
55 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
56 {
57 return 1;
58 }
59 #endif
60
61 #ifdef CONFIG_STRICT_DEVMEM
62 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
63 {
64 u64 from = ((u64)pfn) << PAGE_SHIFT;
65 u64 to = from + size;
66 u64 cursor = from;
67
68 while (cursor < to) {
69 if (!devmem_is_allowed(pfn))
70 return 0;
71 cursor += PAGE_SIZE;
72 pfn++;
73 }
74 return 1;
75 }
76 #else
77 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
78 {
79 return 1;
80 }
81 #endif
82
83 #ifndef unxlate_dev_mem_ptr
84 #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
85 void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
86 {
87 }
88 #endif
89
90 /*
91 * This funcion reads the *physical* memory. The f_pos points directly to the
92 * memory location.
93 */
94 static ssize_t read_mem(struct file *file, char __user *buf,
95 size_t count, loff_t *ppos)
96 {
97 phys_addr_t p = *ppos;
98 ssize_t read, sz;
99 void *ptr;
100
101 if (p != *ppos)
102 return 0;
103
104 if (!valid_phys_addr_range(p, count))
105 return -EFAULT;
106 read = 0;
107 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
108 /* we don't have page 0 mapped on sparc and m68k.. */
109 if (p < PAGE_SIZE) {
110 sz = size_inside_page(p, count);
111 if (sz > 0) {
112 if (clear_user(buf, sz))
113 return -EFAULT;
114 buf += sz;
115 p += sz;
116 count -= sz;
117 read += sz;
118 }
119 }
120 #endif
121
122 while (count > 0) {
123 unsigned long remaining;
124
125 sz = size_inside_page(p, count);
126
127 if (!range_is_allowed(p >> PAGE_SHIFT, count))
128 return -EPERM;
129
130 /*
131 * On ia64 if a page has been mapped somewhere as uncached, then
132 * it must also be accessed uncached by the kernel or data
133 * corruption may occur.
134 */
135 ptr = xlate_dev_mem_ptr(p);
136 if (!ptr)
137 return -EFAULT;
138
139 remaining = copy_to_user(buf, ptr, sz);
140 unxlate_dev_mem_ptr(p, ptr);
141 if (remaining)
142 return -EFAULT;
143
144 buf += sz;
145 p += sz;
146 count -= sz;
147 read += sz;
148 }
149
150 *ppos += read;
151 return read;
152 }
153
154 static ssize_t write_mem(struct file *file, const char __user *buf,
155 size_t count, loff_t *ppos)
156 {
157 phys_addr_t p = *ppos;
158 ssize_t written, sz;
159 unsigned long copied;
160 void *ptr;
161
162 if (p != *ppos)
163 return -EFBIG;
164
165 if (!valid_phys_addr_range(p, count))
166 return -EFAULT;
167
168 written = 0;
169
170 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
171 /* we don't have page 0 mapped on sparc and m68k.. */
172 if (p < PAGE_SIZE) {
173 sz = size_inside_page(p, count);
174 /* Hmm. Do something? */
175 buf += sz;
176 p += sz;
177 count -= sz;
178 written += sz;
179 }
180 #endif
181
182 while (count > 0) {
183 sz = size_inside_page(p, count);
184
185 if (!range_is_allowed(p >> PAGE_SHIFT, sz))
186 return -EPERM;
187
188 /*
189 * On ia64 if a page has been mapped somewhere as uncached, then
190 * it must also be accessed uncached by the kernel or data
191 * corruption may occur.
192 */
193 ptr = xlate_dev_mem_ptr(p);
194 if (!ptr) {
195 if (written)
196 break;
197 return -EFAULT;
198 }
199
200 copied = copy_from_user(ptr, buf, sz);
201 unxlate_dev_mem_ptr(p, ptr);
202 if (copied) {
203 written += sz - copied;
204 if (written)
205 break;
206 return -EFAULT;
207 }
208
209 buf += sz;
210 p += sz;
211 count -= sz;
212 written += sz;
213 }
214
215 *ppos += written;
216 return written;
217 }
218
219 int __weak phys_mem_access_prot_allowed(struct file *file,
220 unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
221 {
222 return 1;
223 }
224
225 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
226
227 /*
228 * Architectures vary in how they handle caching for addresses
229 * outside of main memory.
230 *
231 */
232 #ifdef pgprot_noncached
233 static int uncached_access(struct file *file, phys_addr_t addr)
234 {
235 #if defined(CONFIG_IA64)
236 /*
237 * On ia64, we ignore O_DSYNC because we cannot tolerate memory
238 * attribute aliases.
239 */
240 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
241 #elif defined(CONFIG_MIPS)
242 {
243 extern int __uncached_access(struct file *file,
244 unsigned long addr);
245
246 return __uncached_access(file, addr);
247 }
248 #else
249 /*
250 * Accessing memory above the top the kernel knows about or through a
251 * file pointer
252 * that was marked O_DSYNC will be done non-cached.
253 */
254 if (file->f_flags & O_DSYNC)
255 return 1;
256 return addr >= __pa(high_memory);
257 #endif
258 }
259 #endif
260
261 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
262 unsigned long size, pgprot_t vma_prot)
263 {
264 #ifdef pgprot_noncached
265 phys_addr_t offset = pfn << PAGE_SHIFT;
266
267 if (uncached_access(file, offset))
268 return pgprot_noncached(vma_prot);
269 #endif
270 return vma_prot;
271 }
272 #endif
273
274 #ifndef CONFIG_MMU
275 static unsigned long get_unmapped_area_mem(struct file *file,
276 unsigned long addr,
277 unsigned long len,
278 unsigned long pgoff,
279 unsigned long flags)
280 {
281 if (!valid_mmap_phys_addr_range(pgoff, len))
282 return (unsigned long) -EINVAL;
283 return pgoff << PAGE_SHIFT;
284 }
285
286 /* permit direct mmap, for read, write or exec */
287 static unsigned memory_mmap_capabilities(struct file *file)
288 {
289 return NOMMU_MAP_DIRECT |
290 NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
291 }
292
293 static unsigned zero_mmap_capabilities(struct file *file)
294 {
295 return NOMMU_MAP_COPY;
296 }
297
298 /* can't do an in-place private mapping if there's no MMU */
299 static inline int private_mapping_ok(struct vm_area_struct *vma)
300 {
301 return vma->vm_flags & VM_MAYSHARE;
302 }
303 #else
304
305 static inline int private_mapping_ok(struct vm_area_struct *vma)
306 {
307 return 1;
308 }
309 #endif
310
311 static const struct vm_operations_struct mmap_mem_ops = {
312 #ifdef CONFIG_HAVE_IOREMAP_PROT
313 .access = generic_access_phys
314 #endif
315 };
316
317 static int mmap_mem(struct file *file, struct vm_area_struct *vma)
318 {
319 size_t size = vma->vm_end - vma->vm_start;
320
321 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
322 return -EINVAL;
323
324 if (!private_mapping_ok(vma))
325 return -ENOSYS;
326
327 if (!range_is_allowed(vma->vm_pgoff, size))
328 return -EPERM;
329
330 if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
331 &vma->vm_page_prot))
332 return -EINVAL;
333
334 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
335 size,
336 vma->vm_page_prot);
337
338 vma->vm_ops = &mmap_mem_ops;
339
340 /* Remap-pfn-range will mark the range VM_IO */
341 if (remap_pfn_range(vma,
342 vma->vm_start,
343 vma->vm_pgoff,
344 size,
345 vma->vm_page_prot)) {
346 return -EAGAIN;
347 }
348 return 0;
349 }
350
351 static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
352 {
353 unsigned long pfn;
354
355 /* Turn a kernel-virtual address into a physical page frame */
356 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
357
358 /*
359 * RED-PEN: on some architectures there is more mapped memory than
360 * available in mem_map which pfn_valid checks for. Perhaps should add a
361 * new macro here.
362 *
363 * RED-PEN: vmalloc is not supported right now.
364 */
365 if (!pfn_valid(pfn))
366 return -EIO;
367
368 vma->vm_pgoff = pfn;
369 return mmap_mem(file, vma);
370 }
371
372 /*
373 * This function reads the *virtual* memory as seen by the kernel.
374 */
375 static ssize_t read_kmem(struct file *file, char __user *buf,
376 size_t count, loff_t *ppos)
377 {
378 unsigned long p = *ppos;
379 ssize_t low_count, read, sz;
380 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
381 int err = 0;
382
383 read = 0;
384 if (p < (unsigned long) high_memory) {
385 low_count = count;
386 if (count > (unsigned long)high_memory - p)
387 low_count = (unsigned long)high_memory - p;
388
389 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
390 /* we don't have page 0 mapped on sparc and m68k.. */
391 if (p < PAGE_SIZE && low_count > 0) {
392 sz = size_inside_page(p, low_count);
393 if (clear_user(buf, sz))
394 return -EFAULT;
395 buf += sz;
396 p += sz;
397 read += sz;
398 low_count -= sz;
399 count -= sz;
400 }
401 #endif
402 while (low_count > 0) {
403 sz = size_inside_page(p, low_count);
404
405 /*
406 * On ia64 if a page has been mapped somewhere as
407 * uncached, then it must also be accessed uncached
408 * by the kernel or data corruption may occur
409 */
410 kbuf = xlate_dev_kmem_ptr((void *)p);
411
412 if (copy_to_user(buf, kbuf, sz))
413 return -EFAULT;
414 buf += sz;
415 p += sz;
416 read += sz;
417 low_count -= sz;
418 count -= sz;
419 }
420 }
421
422 if (count > 0) {
423 kbuf = (char *)__get_free_page(GFP_KERNEL);
424 if (!kbuf)
425 return -ENOMEM;
426 while (count > 0) {
427 sz = size_inside_page(p, count);
428 if (!is_vmalloc_or_module_addr((void *)p)) {
429 err = -ENXIO;
430 break;
431 }
432 sz = vread(kbuf, (char *)p, sz);
433 if (!sz)
434 break;
435 if (copy_to_user(buf, kbuf, sz)) {
436 err = -EFAULT;
437 break;
438 }
439 count -= sz;
440 buf += sz;
441 read += sz;
442 p += sz;
443 }
444 free_page((unsigned long)kbuf);
445 }
446 *ppos = p;
447 return read ? read : err;
448 }
449
450
451 static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
452 size_t count, loff_t *ppos)
453 {
454 ssize_t written, sz;
455 unsigned long copied;
456
457 written = 0;
458 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
459 /* we don't have page 0 mapped on sparc and m68k.. */
460 if (p < PAGE_SIZE) {
461 sz = size_inside_page(p, count);
462 /* Hmm. Do something? */
463 buf += sz;
464 p += sz;
465 count -= sz;
466 written += sz;
467 }
468 #endif
469
470 while (count > 0) {
471 void *ptr;
472
473 sz = size_inside_page(p, count);
474
475 /*
476 * On ia64 if a page has been mapped somewhere as uncached, then
477 * it must also be accessed uncached by the kernel or data
478 * corruption may occur.
479 */
480 ptr = xlate_dev_kmem_ptr((void *)p);
481
482 copied = copy_from_user(ptr, buf, sz);
483 if (copied) {
484 written += sz - copied;
485 if (written)
486 break;
487 return -EFAULT;
488 }
489 buf += sz;
490 p += sz;
491 count -= sz;
492 written += sz;
493 }
494
495 *ppos += written;
496 return written;
497 }
498
499 /*
500 * This function writes to the *virtual* memory as seen by the kernel.
501 */
502 static ssize_t write_kmem(struct file *file, const char __user *buf,
503 size_t count, loff_t *ppos)
504 {
505 unsigned long p = *ppos;
506 ssize_t wrote = 0;
507 ssize_t virtr = 0;
508 char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
509 int err = 0;
510
511 if (p < (unsigned long) high_memory) {
512 unsigned long to_write = min_t(unsigned long, count,
513 (unsigned long)high_memory - p);
514 wrote = do_write_kmem(p, buf, to_write, ppos);
515 if (wrote != to_write)
516 return wrote;
517 p += wrote;
518 buf += wrote;
519 count -= wrote;
520 }
521
522 if (count > 0) {
523 kbuf = (char *)__get_free_page(GFP_KERNEL);
524 if (!kbuf)
525 return wrote ? wrote : -ENOMEM;
526 while (count > 0) {
527 unsigned long sz = size_inside_page(p, count);
528 unsigned long n;
529
530 if (!is_vmalloc_or_module_addr((void *)p)) {
531 err = -ENXIO;
532 break;
533 }
534 n = copy_from_user(kbuf, buf, sz);
535 if (n) {
536 err = -EFAULT;
537 break;
538 }
539 vwrite(kbuf, (char *)p, sz);
540 count -= sz;
541 buf += sz;
542 virtr += sz;
543 p += sz;
544 }
545 free_page((unsigned long)kbuf);
546 }
547
548 *ppos = p;
549 return virtr + wrote ? : err;
550 }
551
552 static ssize_t read_port(struct file *file, char __user *buf,
553 size_t count, loff_t *ppos)
554 {
555 unsigned long i = *ppos;
556 char __user *tmp = buf;
557
558 if (!access_ok(VERIFY_WRITE, buf, count))
559 return -EFAULT;
560 while (count-- > 0 && i < 65536) {
561 if (__put_user(inb(i), tmp) < 0)
562 return -EFAULT;
563 i++;
564 tmp++;
565 }
566 *ppos = i;
567 return tmp-buf;
568 }
569
570 static ssize_t write_port(struct file *file, const char __user *buf,
571 size_t count, loff_t *ppos)
572 {
573 unsigned long i = *ppos;
574 const char __user *tmp = buf;
575
576 if (!access_ok(VERIFY_READ, buf, count))
577 return -EFAULT;
578 while (count-- > 0 && i < 65536) {
579 char c;
580
581 if (__get_user(c, tmp)) {
582 if (tmp > buf)
583 break;
584 return -EFAULT;
585 }
586 outb(c, i);
587 i++;
588 tmp++;
589 }
590 *ppos = i;
591 return tmp-buf;
592 }
593
594 static ssize_t read_null(struct file *file, char __user *buf,
595 size_t count, loff_t *ppos)
596 {
597 return 0;
598 }
599
600 static ssize_t write_null(struct file *file, const char __user *buf,
601 size_t count, loff_t *ppos)
602 {
603 return count;
604 }
605
606 static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
607 {
608 return 0;
609 }
610
611 static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
612 {
613 size_t count = iov_iter_count(from);
614 iov_iter_advance(from, count);
615 return count;
616 }
617
618 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
619 struct splice_desc *sd)
620 {
621 return sd->len;
622 }
623
624 static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
625 loff_t *ppos, size_t len, unsigned int flags)
626 {
627 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
628 }
629
630 static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
631 {
632 size_t written = 0;
633
634 while (iov_iter_count(iter)) {
635 size_t chunk = iov_iter_count(iter), n;
636
637 if (chunk > PAGE_SIZE)
638 chunk = PAGE_SIZE; /* Just for latency reasons */
639 n = iov_iter_zero(chunk, iter);
640 if (!n && iov_iter_count(iter))
641 return written ? written : -EFAULT;
642 written += n;
643 if (signal_pending(current))
644 return written ? written : -ERESTARTSYS;
645 cond_resched();
646 }
647 return written;
648 }
649
650 static int mmap_zero(struct file *file, struct vm_area_struct *vma)
651 {
652 #ifndef CONFIG_MMU
653 return -ENOSYS;
654 #endif
655 if (vma->vm_flags & VM_SHARED)
656 return shmem_zero_setup(vma);
657 return 0;
658 }
659
660 static ssize_t write_full(struct file *file, const char __user *buf,
661 size_t count, loff_t *ppos)
662 {
663 return -ENOSPC;
664 }
665
666 /*
667 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
668 * can fopen() both devices with "a" now. This was previously impossible.
669 * -- SRB.
670 */
671 static loff_t null_lseek(struct file *file, loff_t offset, int orig)
672 {
673 return file->f_pos = 0;
674 }
675
676 /*
677 * The memory devices use the full 32/64 bits of the offset, and so we cannot
678 * check against negative addresses: they are ok. The return value is weird,
679 * though, in that case (0).
680 *
681 * also note that seeking relative to the "end of file" isn't supported:
682 * it has no meaning, so it returns -EINVAL.
683 */
684 static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
685 {
686 loff_t ret;
687
688 inode_lock(file_inode(file));
689 switch (orig) {
690 case SEEK_CUR:
691 offset += file->f_pos;
692 case SEEK_SET:
693 /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
694 if ((unsigned long long)offset >= -MAX_ERRNO) {
695 ret = -EOVERFLOW;
696 break;
697 }
698 file->f_pos = offset;
699 ret = file->f_pos;
700 force_successful_syscall_return();
701 break;
702 default:
703 ret = -EINVAL;
704 }
705 inode_unlock(file_inode(file));
706 return ret;
707 }
708
709 static int open_port(struct inode *inode, struct file *filp)
710 {
711 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
712 }
713
714 #define zero_lseek null_lseek
715 #define full_lseek null_lseek
716 #define write_zero write_null
717 #define write_iter_zero write_iter_null
718 #define open_mem open_port
719 #define open_kmem open_mem
720
721 static const struct file_operations __maybe_unused mem_fops = {
722 .llseek = memory_lseek,
723 .read = read_mem,
724 .write = write_mem,
725 .mmap = mmap_mem,
726 .open = open_mem,
727 #ifndef CONFIG_MMU
728 .get_unmapped_area = get_unmapped_area_mem,
729 .mmap_capabilities = memory_mmap_capabilities,
730 #endif
731 };
732
733 static const struct file_operations __maybe_unused kmem_fops = {
734 .llseek = memory_lseek,
735 .read = read_kmem,
736 .write = write_kmem,
737 .mmap = mmap_kmem,
738 .open = open_kmem,
739 #ifndef CONFIG_MMU
740 .get_unmapped_area = get_unmapped_area_mem,
741 .mmap_capabilities = memory_mmap_capabilities,
742 #endif
743 };
744
745 static const struct file_operations null_fops = {
746 .llseek = null_lseek,
747 .read = read_null,
748 .write = write_null,
749 .read_iter = read_iter_null,
750 .write_iter = write_iter_null,
751 .splice_write = splice_write_null,
752 };
753
754 static const struct file_operations __maybe_unused port_fops = {
755 .llseek = memory_lseek,
756 .read = read_port,
757 .write = write_port,
758 .open = open_port,
759 };
760
761 static const struct file_operations zero_fops = {
762 .llseek = zero_lseek,
763 .write = write_zero,
764 .read_iter = read_iter_zero,
765 .write_iter = write_iter_zero,
766 .mmap = mmap_zero,
767 #ifndef CONFIG_MMU
768 .mmap_capabilities = zero_mmap_capabilities,
769 #endif
770 };
771
772 static const struct file_operations full_fops = {
773 .llseek = full_lseek,
774 .read_iter = read_iter_zero,
775 .write = write_full,
776 };
777
778 static const struct memdev {
779 const char *name;
780 umode_t mode;
781 const struct file_operations *fops;
782 fmode_t fmode;
783 } devlist[] = {
784 #ifdef CONFIG_DEVMEM
785 [1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
786 #endif
787 #ifdef CONFIG_DEVKMEM
788 [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET },
789 #endif
790 [3] = { "null", 0666, &null_fops, 0 },
791 #ifdef CONFIG_DEVPORT
792 [4] = { "port", 0, &port_fops, 0 },
793 #endif
794 [5] = { "zero", 0666, &zero_fops, 0 },
795 [7] = { "full", 0666, &full_fops, 0 },
796 [8] = { "random", 0666, &random_fops, 0 },
797 [9] = { "urandom", 0666, &urandom_fops, 0 },
798 #ifdef CONFIG_PRINTK
799 [11] = { "kmsg", 0644, &kmsg_fops, 0 },
800 #endif
801 };
802
803 static int memory_open(struct inode *inode, struct file *filp)
804 {
805 int minor;
806 const struct memdev *dev;
807
808 minor = iminor(inode);
809 if (minor >= ARRAY_SIZE(devlist))
810 return -ENXIO;
811
812 dev = &devlist[minor];
813 if (!dev->fops)
814 return -ENXIO;
815
816 filp->f_op = dev->fops;
817 filp->f_mode |= dev->fmode;
818
819 if (dev->fops->open)
820 return dev->fops->open(inode, filp);
821
822 return 0;
823 }
824
825 static const struct file_operations memory_fops = {
826 .open = memory_open,
827 .llseek = noop_llseek,
828 };
829
830 static char *mem_devnode(struct device *dev, umode_t *mode)
831 {
832 if (mode && devlist[MINOR(dev->devt)].mode)
833 *mode = devlist[MINOR(dev->devt)].mode;
834 return NULL;
835 }
836
837 static struct class *mem_class;
838
839 static int __init chr_dev_init(void)
840 {
841 int minor;
842
843 if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
844 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
845
846 mem_class = class_create(THIS_MODULE, "mem");
847 if (IS_ERR(mem_class))
848 return PTR_ERR(mem_class);
849
850 mem_class->devnode = mem_devnode;
851 for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
852 if (!devlist[minor].name)
853 continue;
854
855 /*
856 * Create /dev/port?
857 */
858 if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
859 continue;
860
861 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
862 NULL, devlist[minor].name);
863 }
864
865 return tty_init();
866 }
867
868 fs_initcall(chr_dev_init);