Linux 3.10.107
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / char / mem.c
1 /*
2 * linux/drivers/char/mem.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * Added devfs support.
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9 */
10
11 #include <linux/mm.h>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/crash_dump.h>
25 #include <linux/backing-dev.h>
26 #include <linux/bootmem.h>
27 #include <linux/splice.h>
28 #include <linux/pfn.h>
29 #include <linux/export.h>
30 #include <linux/io.h>
31 #include <linux/aio.h>
32
33 #include <asm/uaccess.h>
34
35 #ifdef CONFIG_IA64
36 # include <linux/efi.h>
37 #endif
38
39 #define DEVPORT_MINOR 4
40
41 static inline unsigned long size_inside_page(unsigned long start,
42 unsigned long size)
43 {
44 unsigned long sz;
45
46 sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
47
48 return min(sz, size);
49 }
50
51 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
52 static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
53 {
54 return addr + count <= __pa(high_memory);
55 }
56
57 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
58 {
59 return 1;
60 }
61 #endif
62
63 #ifdef CONFIG_STRICT_DEVMEM
64 static inline int page_is_allowed(unsigned long pfn)
65 {
66 return devmem_is_allowed(pfn);
67 }
68 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
69 {
70 u64 from = ((u64)pfn) << PAGE_SHIFT;
71 u64 to = from + size;
72 u64 cursor = from;
73
74 while (cursor < to) {
75 if (!devmem_is_allowed(pfn))
76 return 0;
77 cursor += PAGE_SIZE;
78 pfn++;
79 }
80 return 1;
81 }
82 #else
83 static inline int page_is_allowed(unsigned long pfn)
84 {
85 return 1;
86 }
87 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
88 {
89 return 1;
90 }
91 #endif
92
93 void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr)
94 {
95 }
96
97 /*
98 * This funcion reads the *physical* memory. The f_pos points directly to the
99 * memory location.
100 */
101 static ssize_t read_mem(struct file *file, char __user *buf,
102 size_t count, loff_t *ppos)
103 {
104 phys_addr_t p = *ppos;
105 ssize_t read, sz;
106 char *ptr;
107
108 if (!valid_phys_addr_range(p, count))
109 return -EFAULT;
110 read = 0;
111 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
112 /* we don't have page 0 mapped on sparc and m68k.. */
113 if (p < PAGE_SIZE) {
114 sz = size_inside_page(p, count);
115 if (sz > 0) {
116 if (clear_user(buf, sz))
117 return -EFAULT;
118 buf += sz;
119 p += sz;
120 count -= sz;
121 read += sz;
122 }
123 }
124 #endif
125
126 while (count > 0) {
127 unsigned long remaining;
128 int allowed;
129
130 sz = size_inside_page(p, count);
131
132 allowed = page_is_allowed(p >> PAGE_SHIFT);
133 if (!allowed)
134 return -EPERM;
135 if (allowed == 2) {
136 /* Show zeros for restricted memory. */
137 remaining = clear_user(buf, sz);
138 } else {
139 /*
140 * On ia64 if a page has been mapped somewhere as
141 * uncached, then it must also be accessed uncached
142 * by the kernel or data corruption may occur.
143 */
144 ptr = xlate_dev_mem_ptr(p);
145 if (!ptr)
146 return -EFAULT;
147
148 remaining = copy_to_user(buf, ptr, sz);
149
150 unxlate_dev_mem_ptr(p, ptr);
151 }
152
153 if (remaining)
154 return -EFAULT;
155
156 buf += sz;
157 p += sz;
158 count -= sz;
159 read += sz;
160 }
161
162 *ppos += read;
163 return read;
164 }
165
166 static ssize_t write_mem(struct file *file, const char __user *buf,
167 size_t count, loff_t *ppos)
168 {
169 phys_addr_t p = *ppos;
170 ssize_t written, sz;
171 unsigned long copied;
172 void *ptr;
173
174 if (!valid_phys_addr_range(p, count))
175 return -EFAULT;
176
177 written = 0;
178
179 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
180 /* we don't have page 0 mapped on sparc and m68k.. */
181 if (p < PAGE_SIZE) {
182 sz = size_inside_page(p, count);
183 /* Hmm. Do something? */
184 buf += sz;
185 p += sz;
186 count -= sz;
187 written += sz;
188 }
189 #endif
190
191 while (count > 0) {
192 int allowed;
193
194 sz = size_inside_page(p, count);
195
196 allowed = page_is_allowed(p >> PAGE_SHIFT);
197 if (!allowed)
198 return -EPERM;
199
200 /* Skip actual writing when a page is marked as restricted. */
201 if (allowed == 1) {
202 /*
203 * On ia64 if a page has been mapped somewhere as
204 * uncached, then it must also be accessed uncached
205 * by the kernel or data corruption may occur.
206 */
207 ptr = xlate_dev_mem_ptr(p);
208 if (!ptr) {
209 if (written)
210 break;
211 return -EFAULT;
212 }
213
214 copied = copy_from_user(ptr, buf, sz);
215 unxlate_dev_mem_ptr(p, ptr);
216 if (copied) {
217 written += sz - copied;
218 if (written)
219 break;
220 return -EFAULT;
221 }
222 }
223
224 buf += sz;
225 p += sz;
226 count -= sz;
227 written += sz;
228 }
229
230 *ppos += written;
231 return written;
232 }
233
234 int __weak phys_mem_access_prot_allowed(struct file *file,
235 unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
236 {
237 return 1;
238 }
239
240 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
241
242 /*
243 * Architectures vary in how they handle caching for addresses
244 * outside of main memory.
245 *
246 */
247 #ifdef pgprot_noncached
248 static int uncached_access(struct file *file, phys_addr_t addr)
249 {
250 #if defined(CONFIG_IA64)
251 /*
252 * On ia64, we ignore O_DSYNC because we cannot tolerate memory
253 * attribute aliases.
254 */
255 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
256 #elif defined(CONFIG_MIPS)
257 {
258 extern int __uncached_access(struct file *file,
259 unsigned long addr);
260
261 return __uncached_access(file, addr);
262 }
263 #else
264 /*
265 * Accessing memory above the top the kernel knows about or through a
266 * file pointer
267 * that was marked O_DSYNC will be done non-cached.
268 */
269 if (file->f_flags & O_DSYNC)
270 return 1;
271 return addr >= __pa(high_memory);
272 #endif
273 }
274 #endif
275
276 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
277 unsigned long size, pgprot_t vma_prot)
278 {
279 #ifdef pgprot_noncached
280 phys_addr_t offset = pfn << PAGE_SHIFT;
281
282 if (uncached_access(file, offset))
283 return pgprot_noncached(vma_prot);
284 #endif
285 return vma_prot;
286 }
287 #endif
288
289 #ifndef CONFIG_MMU
290 static unsigned long get_unmapped_area_mem(struct file *file,
291 unsigned long addr,
292 unsigned long len,
293 unsigned long pgoff,
294 unsigned long flags)
295 {
296 if (!valid_mmap_phys_addr_range(pgoff, len))
297 return (unsigned long) -EINVAL;
298 return pgoff << PAGE_SHIFT;
299 }
300
301 /* can't do an in-place private mapping if there's no MMU */
302 static inline int private_mapping_ok(struct vm_area_struct *vma)
303 {
304 return vma->vm_flags & VM_MAYSHARE;
305 }
306 #else
307 #define get_unmapped_area_mem NULL
308
309 static inline int private_mapping_ok(struct vm_area_struct *vma)
310 {
311 return 1;
312 }
313 #endif
314
315 static const struct vm_operations_struct mmap_mem_ops = {
316 #ifdef CONFIG_HAVE_IOREMAP_PROT
317 .access = generic_access_phys
318 #endif
319 };
320
321 static int mmap_mem(struct file *file, struct vm_area_struct *vma)
322 {
323 size_t size = vma->vm_end - vma->vm_start;
324
325 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
326 return -EINVAL;
327
328 if (!private_mapping_ok(vma))
329 return -ENOSYS;
330
331 if (!range_is_allowed(vma->vm_pgoff, size))
332 return -EPERM;
333
334 if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
335 &vma->vm_page_prot))
336 return -EINVAL;
337
338 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
339 size,
340 vma->vm_page_prot);
341
342 vma->vm_ops = &mmap_mem_ops;
343
344 /* Remap-pfn-range will mark the range VM_IO */
345 if (remap_pfn_range(vma,
346 vma->vm_start,
347 vma->vm_pgoff,
348 size,
349 vma->vm_page_prot)) {
350 return -EAGAIN;
351 }
352 return 0;
353 }
354
355 #ifdef CONFIG_DEVKMEM
356 static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
357 {
358 unsigned long pfn;
359
360 /* Turn a kernel-virtual address into a physical page frame */
361 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
362
363 /*
364 * RED-PEN: on some architectures there is more mapped memory than
365 * available in mem_map which pfn_valid checks for. Perhaps should add a
366 * new macro here.
367 *
368 * RED-PEN: vmalloc is not supported right now.
369 */
370 if (!pfn_valid(pfn))
371 return -EIO;
372
373 vma->vm_pgoff = pfn;
374 return mmap_mem(file, vma);
375 }
376 #endif
377
378 #ifdef CONFIG_CRASH_DUMP
379 /*
380 * Read memory corresponding to the old kernel.
381 */
382 static ssize_t read_oldmem(struct file *file, char __user *buf,
383 size_t count, loff_t *ppos)
384 {
385 unsigned long pfn, offset;
386 size_t read = 0, csize;
387 int rc = 0;
388
389 while (count) {
390 pfn = *ppos / PAGE_SIZE;
391 if (pfn > saved_max_pfn)
392 return read;
393
394 offset = (unsigned long)(*ppos % PAGE_SIZE);
395 if (count > PAGE_SIZE - offset)
396 csize = PAGE_SIZE - offset;
397 else
398 csize = count;
399
400 rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
401 if (rc < 0)
402 return rc;
403 buf += csize;
404 *ppos += csize;
405 read += csize;
406 count -= csize;
407 }
408 return read;
409 }
410 #endif
411
412 #ifdef CONFIG_DEVKMEM
413 /*
414 * This function reads the *virtual* memory as seen by the kernel.
415 */
416 static ssize_t read_kmem(struct file *file, char __user *buf,
417 size_t count, loff_t *ppos)
418 {
419 unsigned long p = *ppos;
420 ssize_t low_count, read, sz;
421 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
422 int err = 0;
423
424 read = 0;
425 if (p < (unsigned long) high_memory) {
426 low_count = count;
427 if (count > (unsigned long)high_memory - p)
428 low_count = (unsigned long)high_memory - p;
429
430 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
431 /* we don't have page 0 mapped on sparc and m68k.. */
432 if (p < PAGE_SIZE && low_count > 0) {
433 sz = size_inside_page(p, low_count);
434 if (clear_user(buf, sz))
435 return -EFAULT;
436 buf += sz;
437 p += sz;
438 read += sz;
439 low_count -= sz;
440 count -= sz;
441 }
442 #endif
443 while (low_count > 0) {
444 sz = size_inside_page(p, low_count);
445
446 /*
447 * On ia64 if a page has been mapped somewhere as
448 * uncached, then it must also be accessed uncached
449 * by the kernel or data corruption may occur
450 */
451 kbuf = xlate_dev_kmem_ptr((char *)p);
452
453 if (copy_to_user(buf, kbuf, sz))
454 return -EFAULT;
455 buf += sz;
456 p += sz;
457 read += sz;
458 low_count -= sz;
459 count -= sz;
460 }
461 }
462
463 if (count > 0) {
464 kbuf = (char *)__get_free_page(GFP_KERNEL);
465 if (!kbuf)
466 return -ENOMEM;
467 while (count > 0) {
468 sz = size_inside_page(p, count);
469 if (!is_vmalloc_or_module_addr((void *)p)) {
470 err = -ENXIO;
471 break;
472 }
473 sz = vread(kbuf, (char *)p, sz);
474 if (!sz)
475 break;
476 if (copy_to_user(buf, kbuf, sz)) {
477 err = -EFAULT;
478 break;
479 }
480 count -= sz;
481 buf += sz;
482 read += sz;
483 p += sz;
484 }
485 free_page((unsigned long)kbuf);
486 }
487 *ppos = p;
488 return read ? read : err;
489 }
490
491
492 static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
493 size_t count, loff_t *ppos)
494 {
495 ssize_t written, sz;
496 unsigned long copied;
497
498 written = 0;
499 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
500 /* we don't have page 0 mapped on sparc and m68k.. */
501 if (p < PAGE_SIZE) {
502 sz = size_inside_page(p, count);
503 /* Hmm. Do something? */
504 buf += sz;
505 p += sz;
506 count -= sz;
507 written += sz;
508 }
509 #endif
510
511 while (count > 0) {
512 char *ptr;
513
514 sz = size_inside_page(p, count);
515
516 /*
517 * On ia64 if a page has been mapped somewhere as uncached, then
518 * it must also be accessed uncached by the kernel or data
519 * corruption may occur.
520 */
521 ptr = xlate_dev_kmem_ptr((char *)p);
522
523 copied = copy_from_user(ptr, buf, sz);
524 if (copied) {
525 written += sz - copied;
526 if (written)
527 break;
528 return -EFAULT;
529 }
530 buf += sz;
531 p += sz;
532 count -= sz;
533 written += sz;
534 }
535
536 *ppos += written;
537 return written;
538 }
539
540 /*
541 * This function writes to the *virtual* memory as seen by the kernel.
542 */
543 static ssize_t write_kmem(struct file *file, const char __user *buf,
544 size_t count, loff_t *ppos)
545 {
546 unsigned long p = *ppos;
547 ssize_t wrote = 0;
548 ssize_t virtr = 0;
549 char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
550 int err = 0;
551
552 if (p < (unsigned long) high_memory) {
553 unsigned long to_write = min_t(unsigned long, count,
554 (unsigned long)high_memory - p);
555 wrote = do_write_kmem(p, buf, to_write, ppos);
556 if (wrote != to_write)
557 return wrote;
558 p += wrote;
559 buf += wrote;
560 count -= wrote;
561 }
562
563 if (count > 0) {
564 kbuf = (char *)__get_free_page(GFP_KERNEL);
565 if (!kbuf)
566 return wrote ? wrote : -ENOMEM;
567 while (count > 0) {
568 unsigned long sz = size_inside_page(p, count);
569 unsigned long n;
570
571 if (!is_vmalloc_or_module_addr((void *)p)) {
572 err = -ENXIO;
573 break;
574 }
575 n = copy_from_user(kbuf, buf, sz);
576 if (n) {
577 err = -EFAULT;
578 break;
579 }
580 vwrite(kbuf, (char *)p, sz);
581 count -= sz;
582 buf += sz;
583 virtr += sz;
584 p += sz;
585 }
586 free_page((unsigned long)kbuf);
587 }
588
589 *ppos = p;
590 return virtr + wrote ? : err;
591 }
592 #endif
593
594 #ifdef CONFIG_DEVPORT
595 static ssize_t read_port(struct file *file, char __user *buf,
596 size_t count, loff_t *ppos)
597 {
598 unsigned long i = *ppos;
599 char __user *tmp = buf;
600
601 if (!access_ok(VERIFY_WRITE, buf, count))
602 return -EFAULT;
603 while (count-- > 0 && i < 65536) {
604 if (__put_user(inb(i), tmp) < 0)
605 return -EFAULT;
606 i++;
607 tmp++;
608 }
609 *ppos = i;
610 return tmp-buf;
611 }
612
613 static ssize_t write_port(struct file *file, const char __user *buf,
614 size_t count, loff_t *ppos)
615 {
616 unsigned long i = *ppos;
617 const char __user *tmp = buf;
618
619 if (!access_ok(VERIFY_READ, buf, count))
620 return -EFAULT;
621 while (count-- > 0 && i < 65536) {
622 char c;
623 if (__get_user(c, tmp)) {
624 if (tmp > buf)
625 break;
626 return -EFAULT;
627 }
628 outb(c, i);
629 i++;
630 tmp++;
631 }
632 *ppos = i;
633 return tmp-buf;
634 }
635 #endif
636
637 static ssize_t read_null(struct file *file, char __user *buf,
638 size_t count, loff_t *ppos)
639 {
640 return 0;
641 }
642
643 static ssize_t write_null(struct file *file, const char __user *buf,
644 size_t count, loff_t *ppos)
645 {
646 return count;
647 }
648
649 static ssize_t aio_read_null(struct kiocb *iocb, const struct iovec *iov,
650 unsigned long nr_segs, loff_t pos)
651 {
652 return 0;
653 }
654
655 static ssize_t aio_write_null(struct kiocb *iocb, const struct iovec *iov,
656 unsigned long nr_segs, loff_t pos)
657 {
658 return iov_length(iov, nr_segs);
659 }
660
661 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
662 struct splice_desc *sd)
663 {
664 return sd->len;
665 }
666
667 static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
668 loff_t *ppos, size_t len, unsigned int flags)
669 {
670 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
671 }
672
673 static ssize_t read_zero(struct file *file, char __user *buf,
674 size_t count, loff_t *ppos)
675 {
676 size_t written;
677
678 if (!count)
679 return 0;
680
681 if (!access_ok(VERIFY_WRITE, buf, count))
682 return -EFAULT;
683
684 written = 0;
685 while (count) {
686 unsigned long unwritten;
687 size_t chunk = count;
688
689 if (chunk > PAGE_SIZE)
690 chunk = PAGE_SIZE; /* Just for latency reasons */
691 unwritten = __clear_user(buf, chunk);
692 written += chunk - unwritten;
693 if (unwritten)
694 break;
695 if (signal_pending(current))
696 return written ? written : -ERESTARTSYS;
697 buf += chunk;
698 count -= chunk;
699 cond_resched();
700 }
701 return written ? written : -EFAULT;
702 }
703
704 static ssize_t aio_read_zero(struct kiocb *iocb, const struct iovec *iov,
705 unsigned long nr_segs, loff_t pos)
706 {
707 size_t written = 0;
708 unsigned long i;
709 ssize_t ret;
710
711 for (i = 0; i < nr_segs; i++) {
712 ret = read_zero(iocb->ki_filp, iov[i].iov_base, iov[i].iov_len,
713 &pos);
714 if (ret < 0)
715 break;
716 written += ret;
717 }
718
719 return written ? written : -EFAULT;
720 }
721
722 static int mmap_zero(struct file *file, struct vm_area_struct *vma)
723 {
724 #ifndef CONFIG_MMU
725 return -ENOSYS;
726 #endif
727 if (vma->vm_flags & VM_SHARED)
728 return shmem_zero_setup(vma);
729 return 0;
730 }
731
732 static ssize_t write_full(struct file *file, const char __user *buf,
733 size_t count, loff_t *ppos)
734 {
735 return -ENOSPC;
736 }
737
738 /*
739 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
740 * can fopen() both devices with "a" now. This was previously impossible.
741 * -- SRB.
742 */
743 static loff_t null_lseek(struct file *file, loff_t offset, int orig)
744 {
745 return file->f_pos = 0;
746 }
747
748 /*
749 * The memory devices use the full 32/64 bits of the offset, and so we cannot
750 * check against negative addresses: they are ok. The return value is weird,
751 * though, in that case (0).
752 *
753 * also note that seeking relative to the "end of file" isn't supported:
754 * it has no meaning, so it returns -EINVAL.
755 */
756 static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
757 {
758 loff_t ret;
759
760 mutex_lock(&file_inode(file)->i_mutex);
761 switch (orig) {
762 case SEEK_CUR:
763 offset += file->f_pos;
764 case SEEK_SET:
765 /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
766 if ((unsigned long long)offset >= ~0xFFFULL) {
767 ret = -EOVERFLOW;
768 break;
769 }
770 file->f_pos = offset;
771 ret = file->f_pos;
772 force_successful_syscall_return();
773 break;
774 default:
775 ret = -EINVAL;
776 }
777 mutex_unlock(&file_inode(file)->i_mutex);
778 return ret;
779 }
780
781 static int open_port(struct inode *inode, struct file *filp)
782 {
783 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
784 }
785
786 #define zero_lseek null_lseek
787 #define full_lseek null_lseek
788 #define write_zero write_null
789 #define read_full read_zero
790 #define aio_write_zero aio_write_null
791 #define open_mem open_port
792 #define open_kmem open_mem
793 #define open_oldmem open_mem
794
795 static const struct file_operations mem_fops = {
796 .llseek = memory_lseek,
797 .read = read_mem,
798 .write = write_mem,
799 .mmap = mmap_mem,
800 .open = open_mem,
801 .get_unmapped_area = get_unmapped_area_mem,
802 };
803
804 #ifdef CONFIG_DEVKMEM
805 static const struct file_operations kmem_fops = {
806 .llseek = memory_lseek,
807 .read = read_kmem,
808 .write = write_kmem,
809 .mmap = mmap_kmem,
810 .open = open_kmem,
811 .get_unmapped_area = get_unmapped_area_mem,
812 };
813 #endif
814
815 static const struct file_operations null_fops = {
816 .llseek = null_lseek,
817 .read = read_null,
818 .write = write_null,
819 .aio_read = aio_read_null,
820 .aio_write = aio_write_null,
821 .splice_write = splice_write_null,
822 };
823
824 #ifdef CONFIG_DEVPORT
825 static const struct file_operations port_fops = {
826 .llseek = memory_lseek,
827 .read = read_port,
828 .write = write_port,
829 .open = open_port,
830 };
831 #endif
832
833 static const struct file_operations zero_fops = {
834 .llseek = zero_lseek,
835 .read = read_zero,
836 .write = write_zero,
837 .aio_read = aio_read_zero,
838 .aio_write = aio_write_zero,
839 .mmap = mmap_zero,
840 };
841
842 /*
843 * capabilities for /dev/zero
844 * - permits private mappings, "copies" are taken of the source of zeros
845 * - no writeback happens
846 */
847 static struct backing_dev_info zero_bdi = {
848 .name = "char/mem",
849 .capabilities = BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK,
850 };
851
852 static const struct file_operations full_fops = {
853 .llseek = full_lseek,
854 .read = read_full,
855 .write = write_full,
856 };
857
858 #ifdef CONFIG_CRASH_DUMP
859 static const struct file_operations oldmem_fops = {
860 .read = read_oldmem,
861 .open = open_oldmem,
862 .llseek = default_llseek,
863 };
864 #endif
865
866 static const struct memdev {
867 const char *name;
868 umode_t mode;
869 const struct file_operations *fops;
870 struct backing_dev_info *dev_info;
871 } devlist[] = {
872 [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
873 #ifdef CONFIG_DEVKMEM
874 [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
875 #endif
876 [3] = { "null", 0666, &null_fops, NULL },
877 #ifdef CONFIG_DEVPORT
878 [4] = { "port", 0, &port_fops, NULL },
879 #endif
880 [5] = { "zero", 0666, &zero_fops, &zero_bdi },
881 [7] = { "full", 0666, &full_fops, NULL },
882 [8] = { "random", 0666, &random_fops, NULL },
883 [9] = { "urandom", 0666, &urandom_fops, NULL },
884 #ifdef CONFIG_PRINTK
885 [11] = { "kmsg", 0644, &kmsg_fops, NULL },
886 #endif
887 #ifdef CONFIG_CRASH_DUMP
888 [12] = { "oldmem", 0, &oldmem_fops, NULL },
889 #endif
890 };
891
892 static int memory_open(struct inode *inode, struct file *filp)
893 {
894 int minor;
895 const struct memdev *dev;
896
897 minor = iminor(inode);
898 if (minor >= ARRAY_SIZE(devlist))
899 return -ENXIO;
900
901 dev = &devlist[minor];
902 if (!dev->fops)
903 return -ENXIO;
904
905 filp->f_op = dev->fops;
906 if (dev->dev_info)
907 filp->f_mapping->backing_dev_info = dev->dev_info;
908
909 /* Is /dev/mem or /dev/kmem ? */
910 if (dev->dev_info == &directly_mappable_cdev_bdi)
911 filp->f_mode |= FMODE_UNSIGNED_OFFSET;
912
913 if (dev->fops->open)
914 return dev->fops->open(inode, filp);
915
916 return 0;
917 }
918
919 static const struct file_operations memory_fops = {
920 .open = memory_open,
921 .llseek = noop_llseek,
922 };
923
924 static char *mem_devnode(struct device *dev, umode_t *mode)
925 {
926 if (mode && devlist[MINOR(dev->devt)].mode)
927 *mode = devlist[MINOR(dev->devt)].mode;
928 return NULL;
929 }
930
931 static struct class *mem_class;
932
933 static int __init chr_dev_init(void)
934 {
935 int minor;
936 int err;
937
938 err = bdi_init(&zero_bdi);
939 if (err)
940 return err;
941
942 if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
943 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
944
945 mem_class = class_create(THIS_MODULE, "mem");
946 if (IS_ERR(mem_class))
947 return PTR_ERR(mem_class);
948
949 mem_class->devnode = mem_devnode;
950 for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
951 if (!devlist[minor].name)
952 continue;
953
954 /*
955 * Create /dev/port?
956 */
957 if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
958 continue;
959
960 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
961 NULL, devlist[minor].name);
962 }
963
964 return tty_init();
965 }
966
967 fs_initcall(chr_dev_init);