battery: sec_battery: export {CURRENT/VOLTAGE}_MAX to sysfs
[GitHub/LineageOS/android_kernel_samsung_universal7580.git] / drivers / char / mem.c
1 /*
2 * linux/drivers/char/mem.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * Added devfs support.
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9 */
10
11 #include <linux/mm.h>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/crash_dump.h>
25 #include <linux/backing-dev.h>
26 #include <linux/bootmem.h>
27 #include <linux/splice.h>
28 #include <linux/pfn.h>
29 #include <linux/export.h>
30 #include <linux/io.h>
31 #include <linux/aio.h>
32
33 #ifdef CONFIG_KNOX_KAP
34 #include <linux/knox_kap.h>
35 #endif
36
37 #ifdef CONFIG_MST_LDO
38 #include <linux/mst_ctrl.h>
39 #endif
40
41 #include <asm/uaccess.h>
42
43 #ifdef CONFIG_IA64
44 # include <linux/efi.h>
45 #endif
46
47 #define DEVPORT_MINOR 4
48
49 static inline unsigned long size_inside_page(unsigned long start,
50 unsigned long size)
51 {
52 unsigned long sz;
53
54 sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
55
56 return min(sz, size);
57 }
58
59 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
60 static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
61 {
62 return addr + count <= __pa(high_memory);
63 }
64
65 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
66 {
67 return 1;
68 }
69 #endif
70
71 #if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
72 #ifdef CONFIG_STRICT_DEVMEM
73 static inline int page_is_allowed(unsigned long pfn)
74 {
75 return devmem_is_allowed(pfn);
76 }
77 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
78 {
79 u64 from = ((u64)pfn) << PAGE_SHIFT;
80 u64 to = from + size;
81 u64 cursor = from;
82
83 while (cursor < to) {
84 if (!devmem_is_allowed(pfn))
85 return 0;
86 cursor += PAGE_SIZE;
87 pfn++;
88 }
89 return 1;
90 }
91 #else
92 static inline int page_is_allowed(unsigned long pfn)
93 {
94 return 1;
95 }
96 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
97 {
98 return 1;
99 }
100 #endif
101 #endif
102
103 #ifdef CONFIG_DEVMEM
104 void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr)
105 {
106 }
107
108 /*
109 * This funcion reads the *physical* memory. The f_pos points directly to the
110 * memory location.
111 */
112 static ssize_t read_mem(struct file *file, char __user *buf,
113 size_t count, loff_t *ppos)
114 {
115 phys_addr_t p = *ppos;
116 ssize_t read, sz;
117 char *ptr;
118
119 if (!valid_phys_addr_range(p, count))
120 return -EFAULT;
121 read = 0;
122 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
123 /* we don't have page 0 mapped on sparc and m68k.. */
124 if (p < PAGE_SIZE) {
125 sz = size_inside_page(p, count);
126 if (sz > 0) {
127 if (clear_user(buf, sz))
128 return -EFAULT;
129 buf += sz;
130 p += sz;
131 count -= sz;
132 read += sz;
133 }
134 }
135 #endif
136
137 while (count > 0) {
138 unsigned long remaining;
139 int allowed;
140
141 sz = size_inside_page(p, count);
142
143 allowed = page_is_allowed(p >> PAGE_SHIFT);
144 if (!allowed)
145 return -EPERM;
146 if (allowed == 2) {
147 /* Show zeros for restricted memory. */
148 remaining = clear_user(buf, sz);
149 } else {
150 /*
151 * On ia64 if a page has been mapped somewhere as
152 * uncached, then it must also be accessed uncached
153 * by the kernel or data corruption may occur.
154 */
155 ptr = xlate_dev_mem_ptr(p);
156 if (!ptr)
157 return -EFAULT;
158
159 remaining = copy_to_user(buf, ptr, sz);
160
161 unxlate_dev_mem_ptr(p, ptr);
162 }
163
164 if (remaining)
165 return -EFAULT;
166
167 buf += sz;
168 p += sz;
169 count -= sz;
170 read += sz;
171 }
172
173 *ppos += read;
174 return read;
175 }
176
177 static ssize_t write_mem(struct file *file, const char __user *buf,
178 size_t count, loff_t *ppos)
179 {
180 phys_addr_t p = *ppos;
181 ssize_t written, sz;
182 unsigned long copied;
183 void *ptr;
184
185 if (!valid_phys_addr_range(p, count))
186 return -EFAULT;
187
188 written = 0;
189
190 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
191 /* we don't have page 0 mapped on sparc and m68k.. */
192 if (p < PAGE_SIZE) {
193 sz = size_inside_page(p, count);
194 /* Hmm. Do something? */
195 buf += sz;
196 p += sz;
197 count -= sz;
198 written += sz;
199 }
200 #endif
201
202 while (count > 0) {
203 int allowed;
204
205 sz = size_inside_page(p, count);
206
207 allowed = page_is_allowed(p >> PAGE_SHIFT);
208 if (!allowed)
209 return -EPERM;
210
211 /* Skip actual writing when a page is marked as restricted. */
212 if (allowed == 1) {
213 /*
214 * On ia64 if a page has been mapped somewhere as
215 * uncached, then it must also be accessed uncached
216 * by the kernel or data corruption may occur.
217 */
218 ptr = xlate_dev_mem_ptr(p);
219 if (!ptr) {
220 if (written)
221 break;
222 return -EFAULT;
223 }
224
225 copied = copy_from_user(ptr, buf, sz);
226 unxlate_dev_mem_ptr(p, ptr);
227 if (copied) {
228 written += sz - copied;
229 if (written)
230 break;
231 return -EFAULT;
232 }
233 }
234
235 buf += sz;
236 p += sz;
237 count -= sz;
238 written += sz;
239 }
240
241 *ppos += written;
242 return written;
243 }
244 #endif /* CONFIG_DEVMEM */
245
246 #if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM)
247
248 int __weak phys_mem_access_prot_allowed(struct file *file,
249 unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
250 {
251 return 1;
252 }
253
254 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
255
256 /*
257 * Architectures vary in how they handle caching for addresses
258 * outside of main memory.
259 *
260 */
261 #ifdef pgprot_noncached
262 static int uncached_access(struct file *file, phys_addr_t addr)
263 {
264 #if defined(CONFIG_IA64)
265 /*
266 * On ia64, we ignore O_DSYNC because we cannot tolerate memory
267 * attribute aliases.
268 */
269 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
270 #elif defined(CONFIG_MIPS)
271 {
272 extern int __uncached_access(struct file *file,
273 unsigned long addr);
274
275 return __uncached_access(file, addr);
276 }
277 #else
278 /*
279 * Accessing memory above the top the kernel knows about or through a
280 * file pointer
281 * that was marked O_DSYNC will be done non-cached.
282 */
283 if (file->f_flags & O_DSYNC)
284 return 1;
285 return addr >= __pa(high_memory);
286 #endif
287 }
288 #endif
289
290 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
291 unsigned long size, pgprot_t vma_prot)
292 {
293 #ifdef pgprot_noncached
294 phys_addr_t offset = pfn << PAGE_SHIFT;
295
296 if (uncached_access(file, offset))
297 return pgprot_noncached(vma_prot);
298 #endif
299 return vma_prot;
300 }
301 #endif
302
303 #ifndef CONFIG_MMU
304 static unsigned long get_unmapped_area_mem(struct file *file,
305 unsigned long addr,
306 unsigned long len,
307 unsigned long pgoff,
308 unsigned long flags)
309 {
310 if (!valid_mmap_phys_addr_range(pgoff, len))
311 return (unsigned long) -EINVAL;
312 return pgoff << PAGE_SHIFT;
313 }
314
315 /* can't do an in-place private mapping if there's no MMU */
316 static inline int private_mapping_ok(struct vm_area_struct *vma)
317 {
318 return vma->vm_flags & VM_MAYSHARE;
319 }
320 #else
321 #define get_unmapped_area_mem NULL
322
323 static inline int private_mapping_ok(struct vm_area_struct *vma)
324 {
325 return 1;
326 }
327 #endif
328
329 static const struct vm_operations_struct mmap_mem_ops = {
330 #ifdef CONFIG_HAVE_IOREMAP_PROT
331 .access = generic_access_phys
332 #endif
333 };
334
335 static int mmap_mem(struct file *file, struct vm_area_struct *vma)
336 {
337 size_t size = vma->vm_end - vma->vm_start;
338
339 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
340 return -EINVAL;
341
342 if (!private_mapping_ok(vma))
343 return -ENOSYS;
344
345 if (!range_is_allowed(vma->vm_pgoff, size))
346 return -EPERM;
347
348 if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
349 &vma->vm_page_prot))
350 return -EINVAL;
351
352 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
353 size,
354 vma->vm_page_prot);
355
356 vma->vm_ops = &mmap_mem_ops;
357
358 /* Remap-pfn-range will mark the range VM_IO */
359 if (remap_pfn_range(vma,
360 vma->vm_start,
361 vma->vm_pgoff,
362 size,
363 vma->vm_page_prot)) {
364 return -EAGAIN;
365 }
366 return 0;
367 }
368 #endif /* CONFIG_DEVMEM */
369
370 #ifdef CONFIG_DEVKMEM
371 static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
372 {
373 unsigned long pfn;
374
375 /* Turn a kernel-virtual address into a physical page frame */
376 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
377
378 /*
379 * RED-PEN: on some architectures there is more mapped memory than
380 * available in mem_map which pfn_valid checks for. Perhaps should add a
381 * new macro here.
382 *
383 * RED-PEN: vmalloc is not supported right now.
384 */
385 if (!pfn_valid(pfn))
386 return -EIO;
387
388 vma->vm_pgoff = pfn;
389 return mmap_mem(file, vma);
390 }
391 #endif
392
393 #ifdef CONFIG_CRASH_DUMP
394 /*
395 * Read memory corresponding to the old kernel.
396 */
397 static ssize_t read_oldmem(struct file *file, char __user *buf,
398 size_t count, loff_t *ppos)
399 {
400 unsigned long pfn, offset;
401 size_t read = 0, csize;
402 int rc = 0;
403
404 while (count) {
405 pfn = *ppos / PAGE_SIZE;
406 if (pfn > saved_max_pfn)
407 return read;
408
409 offset = (unsigned long)(*ppos % PAGE_SIZE);
410 if (count > PAGE_SIZE - offset)
411 csize = PAGE_SIZE - offset;
412 else
413 csize = count;
414
415 rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
416 if (rc < 0)
417 return rc;
418 buf += csize;
419 *ppos += csize;
420 read += csize;
421 count -= csize;
422 }
423 return read;
424 }
425 #endif
426
427 #ifdef CONFIG_DEVKMEM
428 /*
429 * This function reads the *virtual* memory as seen by the kernel.
430 */
431 static ssize_t read_kmem(struct file *file, char __user *buf,
432 size_t count, loff_t *ppos)
433 {
434 unsigned long p = *ppos;
435 ssize_t low_count, read, sz;
436 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
437 int err = 0;
438
439 read = 0;
440 if (p < (unsigned long) high_memory) {
441 low_count = count;
442 if (count > (unsigned long)high_memory - p)
443 low_count = (unsigned long)high_memory - p;
444
445 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
446 /* we don't have page 0 mapped on sparc and m68k.. */
447 if (p < PAGE_SIZE && low_count > 0) {
448 sz = size_inside_page(p, low_count);
449 if (clear_user(buf, sz))
450 return -EFAULT;
451 buf += sz;
452 p += sz;
453 read += sz;
454 low_count -= sz;
455 count -= sz;
456 }
457 #endif
458 while (low_count > 0) {
459 sz = size_inside_page(p, low_count);
460
461 /*
462 * On ia64 if a page has been mapped somewhere as
463 * uncached, then it must also be accessed uncached
464 * by the kernel or data corruption may occur
465 */
466 kbuf = xlate_dev_kmem_ptr((char *)p);
467
468 if (copy_to_user(buf, kbuf, sz))
469 return -EFAULT;
470 buf += sz;
471 p += sz;
472 read += sz;
473 low_count -= sz;
474 count -= sz;
475 }
476 }
477
478 if (count > 0) {
479 kbuf = (char *)__get_free_page(GFP_KERNEL);
480 if (!kbuf)
481 return -ENOMEM;
482 while (count > 0) {
483 sz = size_inside_page(p, count);
484 if (!is_vmalloc_or_module_addr((void *)p)) {
485 err = -ENXIO;
486 break;
487 }
488 sz = vread(kbuf, (char *)p, sz);
489 if (!sz)
490 break;
491 if (copy_to_user(buf, kbuf, sz)) {
492 err = -EFAULT;
493 break;
494 }
495 count -= sz;
496 buf += sz;
497 read += sz;
498 p += sz;
499 }
500 free_page((unsigned long)kbuf);
501 }
502 *ppos = p;
503 return read ? read : err;
504 }
505
506
507 static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
508 size_t count, loff_t *ppos)
509 {
510 ssize_t written, sz;
511 unsigned long copied;
512
513 written = 0;
514 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
515 /* we don't have page 0 mapped on sparc and m68k.. */
516 if (p < PAGE_SIZE) {
517 sz = size_inside_page(p, count);
518 /* Hmm. Do something? */
519 buf += sz;
520 p += sz;
521 count -= sz;
522 written += sz;
523 }
524 #endif
525
526 while (count > 0) {
527 char *ptr;
528
529 sz = size_inside_page(p, count);
530
531 /*
532 * On ia64 if a page has been mapped somewhere as uncached, then
533 * it must also be accessed uncached by the kernel or data
534 * corruption may occur.
535 */
536 ptr = xlate_dev_kmem_ptr((char *)p);
537
538 copied = copy_from_user(ptr, buf, sz);
539 if (copied) {
540 written += sz - copied;
541 if (written)
542 break;
543 return -EFAULT;
544 }
545 buf += sz;
546 p += sz;
547 count -= sz;
548 written += sz;
549 }
550
551 *ppos += written;
552 return written;
553 }
554
555 /*
556 * This function writes to the *virtual* memory as seen by the kernel.
557 */
558 static ssize_t write_kmem(struct file *file, const char __user *buf,
559 size_t count, loff_t *ppos)
560 {
561 unsigned long p = *ppos;
562 ssize_t wrote = 0;
563 ssize_t virtr = 0;
564 char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
565 int err = 0;
566
567 if (p < (unsigned long) high_memory) {
568 unsigned long to_write = min_t(unsigned long, count,
569 (unsigned long)high_memory - p);
570 wrote = do_write_kmem(p, buf, to_write, ppos);
571 if (wrote != to_write)
572 return wrote;
573 p += wrote;
574 buf += wrote;
575 count -= wrote;
576 }
577
578 if (count > 0) {
579 kbuf = (char *)__get_free_page(GFP_KERNEL);
580 if (!kbuf)
581 return wrote ? wrote : -ENOMEM;
582 while (count > 0) {
583 unsigned long sz = size_inside_page(p, count);
584 unsigned long n;
585
586 if (!is_vmalloc_or_module_addr((void *)p)) {
587 err = -ENXIO;
588 break;
589 }
590 n = copy_from_user(kbuf, buf, sz);
591 if (n) {
592 err = -EFAULT;
593 break;
594 }
595 vwrite(kbuf, (char *)p, sz);
596 count -= sz;
597 buf += sz;
598 virtr += sz;
599 p += sz;
600 }
601 free_page((unsigned long)kbuf);
602 }
603
604 *ppos = p;
605 return virtr + wrote ? : err;
606 }
607 #endif
608
609 #ifdef CONFIG_DEVPORT
610 static ssize_t read_port(struct file *file, char __user *buf,
611 size_t count, loff_t *ppos)
612 {
613 unsigned long i = *ppos;
614 char __user *tmp = buf;
615
616 if (!access_ok(VERIFY_WRITE, buf, count))
617 return -EFAULT;
618 while (count-- > 0 && i < 65536) {
619 if (__put_user(inb(i), tmp) < 0)
620 return -EFAULT;
621 i++;
622 tmp++;
623 }
624 *ppos = i;
625 return tmp-buf;
626 }
627
628 static ssize_t write_port(struct file *file, const char __user *buf,
629 size_t count, loff_t *ppos)
630 {
631 unsigned long i = *ppos;
632 const char __user *tmp = buf;
633
634 if (!access_ok(VERIFY_READ, buf, count))
635 return -EFAULT;
636 while (count-- > 0 && i < 65536) {
637 char c;
638 if (__get_user(c, tmp)) {
639 if (tmp > buf)
640 break;
641 return -EFAULT;
642 }
643 outb(c, i);
644 i++;
645 tmp++;
646 }
647 *ppos = i;
648 return tmp-buf;
649 }
650 #endif
651
652 static ssize_t read_null(struct file *file, char __user *buf,
653 size_t count, loff_t *ppos)
654 {
655 return 0;
656 }
657
658 static ssize_t write_null(struct file *file, const char __user *buf,
659 size_t count, loff_t *ppos)
660 {
661 return count;
662 }
663
664 static ssize_t aio_read_null(struct kiocb *iocb, const struct iovec *iov,
665 unsigned long nr_segs, loff_t pos)
666 {
667 return 0;
668 }
669
670 static ssize_t aio_write_null(struct kiocb *iocb, const struct iovec *iov,
671 unsigned long nr_segs, loff_t pos)
672 {
673 return iov_length(iov, nr_segs);
674 }
675
676 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
677 struct splice_desc *sd)
678 {
679 return sd->len;
680 }
681
682 static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
683 loff_t *ppos, size_t len, unsigned int flags)
684 {
685 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
686 }
687
688 static ssize_t read_zero(struct file *file, char __user *buf,
689 size_t count, loff_t *ppos)
690 {
691 size_t written;
692
693 if (!count)
694 return 0;
695
696 if (!access_ok(VERIFY_WRITE, buf, count))
697 return -EFAULT;
698
699 written = 0;
700 while (count) {
701 unsigned long unwritten;
702 size_t chunk = count;
703
704 if (chunk > PAGE_SIZE)
705 chunk = PAGE_SIZE; /* Just for latency reasons */
706 unwritten = __clear_user(buf, chunk);
707 written += chunk - unwritten;
708 if (unwritten)
709 break;
710 if (signal_pending(current))
711 return written ? written : -ERESTARTSYS;
712 buf += chunk;
713 count -= chunk;
714 cond_resched();
715 }
716 return written ? written : -EFAULT;
717 }
718
719 static ssize_t aio_read_zero(struct kiocb *iocb, const struct iovec *iov,
720 unsigned long nr_segs, loff_t pos)
721 {
722 size_t written = 0;
723 unsigned long i;
724 ssize_t ret;
725
726 for (i = 0; i < nr_segs; i++) {
727 ret = read_zero(iocb->ki_filp, iov[i].iov_base, iov[i].iov_len,
728 &pos);
729 if (ret < 0)
730 break;
731 written += ret;
732 }
733
734 return written ? written : -EFAULT;
735 }
736
737 static int mmap_zero(struct file *file, struct vm_area_struct *vma)
738 {
739 #ifndef CONFIG_MMU
740 return -ENOSYS;
741 #endif
742 if (vma->vm_flags & VM_SHARED)
743 return shmem_zero_setup(vma);
744 return 0;
745 }
746
747 static ssize_t write_full(struct file *file, const char __user *buf,
748 size_t count, loff_t *ppos)
749 {
750 return -ENOSPC;
751 }
752
753 /*
754 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
755 * can fopen() both devices with "a" now. This was previously impossible.
756 * -- SRB.
757 */
758 static loff_t null_lseek(struct file *file, loff_t offset, int orig)
759 {
760 return file->f_pos = 0;
761 }
762
763 #if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
764
765 /*
766 * The memory devices use the full 32/64 bits of the offset, and so we cannot
767 * check against negative addresses: they are ok. The return value is weird,
768 * though, in that case (0).
769 *
770 * also note that seeking relative to the "end of file" isn't supported:
771 * it has no meaning, so it returns -EINVAL.
772 */
773 static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
774 {
775 loff_t ret;
776
777 mutex_lock(&file_inode(file)->i_mutex);
778 switch (orig) {
779 case SEEK_CUR:
780 offset += file->f_pos;
781 case SEEK_SET:
782 /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
783 if ((unsigned long long)offset >= ~0xFFFULL) {
784 ret = -EOVERFLOW;
785 break;
786 }
787 file->f_pos = offset;
788 ret = file->f_pos;
789 force_successful_syscall_return();
790 break;
791 default:
792 ret = -EINVAL;
793 }
794 mutex_unlock(&file_inode(file)->i_mutex);
795 return ret;
796 }
797
798 #endif
799
800 #if defined(CONFIG_DEVMEM) || defined(CONFIG_DEVKMEM) || defined(CONFIG_DEVPORT)
801 static int open_port(struct inode *inode, struct file *filp)
802 {
803 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
804 }
805 #endif
806
807 #define zero_lseek null_lseek
808 #define full_lseek null_lseek
809 #define write_zero write_null
810 #define read_full read_zero
811 #define aio_write_zero aio_write_null
812 #define open_mem open_port
813 #define open_kmem open_mem
814 #define open_oldmem open_mem
815
816 #ifdef CONFIG_DEVMEM
817 static const struct file_operations mem_fops = {
818 .llseek = memory_lseek,
819 .read = read_mem,
820 .write = write_mem,
821 .mmap = mmap_mem,
822 .open = open_mem,
823 .get_unmapped_area = get_unmapped_area_mem,
824 };
825 #endif
826
827 #ifdef CONFIG_DEVKMEM
828 static const struct file_operations kmem_fops = {
829 .llseek = memory_lseek,
830 .read = read_kmem,
831 .write = write_kmem,
832 .mmap = mmap_kmem,
833 .open = open_kmem,
834 .get_unmapped_area = get_unmapped_area_mem,
835 };
836 #endif
837
838 static const struct file_operations null_fops = {
839 .llseek = null_lseek,
840 .read = read_null,
841 .write = write_null,
842 .aio_read = aio_read_null,
843 .aio_write = aio_write_null,
844 .splice_write = splice_write_null,
845 };
846
847 #ifdef CONFIG_DEVPORT
848 static const struct file_operations port_fops = {
849 .llseek = memory_lseek,
850 .read = read_port,
851 .write = write_port,
852 .open = open_port,
853 };
854 #endif
855
856 static const struct file_operations zero_fops = {
857 .llseek = zero_lseek,
858 .read = read_zero,
859 .write = write_zero,
860 .aio_read = aio_read_zero,
861 .aio_write = aio_write_zero,
862 .mmap = mmap_zero,
863 };
864
865 /*
866 * capabilities for /dev/zero
867 * - permits private mappings, "copies" are taken of the source of zeros
868 * - no writeback happens
869 */
870 static struct backing_dev_info zero_bdi = {
871 .name = "char/mem",
872 .capabilities = BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK,
873 };
874
875 static const struct file_operations full_fops = {
876 .llseek = full_lseek,
877 .read = read_full,
878 .write = write_full,
879 };
880
881 #ifdef CONFIG_CRASH_DUMP
882 static const struct file_operations oldmem_fops = {
883 .read = read_oldmem,
884 .open = open_oldmem,
885 .llseek = default_llseek,
886 };
887 #endif
888
889 static const struct memdev {
890 const char *name;
891 umode_t mode;
892 const struct file_operations *fops;
893 struct backing_dev_info *dev_info;
894 } devlist[] = {
895 #ifdef CONFIG_DEVMEM
896 [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
897 #endif
898 #ifdef CONFIG_DEVKMEM
899 [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
900 #endif
901 [3] = { "null", 0666, &null_fops, NULL },
902 #ifdef CONFIG_DEVPORT
903 [4] = { "port", 0, &port_fops, NULL },
904 #endif
905 [5] = { "zero", 0666, &zero_fops, &zero_bdi },
906 [7] = { "full", 0666, &full_fops, NULL },
907 [8] = { "random", 0666, &random_fops, NULL },
908 [9] = { "urandom", 0666, &urandom_fops, NULL },
909 #ifdef CONFIG_PRINTK
910 [11] = { "kmsg", 0644, &kmsg_fops, NULL },
911 #endif
912 #ifdef CONFIG_CRASH_DUMP
913 [12] = { "oldmem", 0, &oldmem_fops, NULL },
914 #endif
915 #ifdef CONFIG_KNOX_KAP
916 [13] = { "knox_kap", 0666, &knox_kap_fops, NULL },
917 #endif
918 #ifdef CONFIG_MST_LDO
919 [14] = { "mst_ctrl", 0666, &mst_ctrl_fops, NULL },
920 #endif
921 };
922
923 static int memory_open(struct inode *inode, struct file *filp)
924 {
925 int minor;
926 const struct memdev *dev;
927
928 minor = iminor(inode);
929 if (minor >= ARRAY_SIZE(devlist))
930 return -ENXIO;
931
932 dev = &devlist[minor];
933 if (!dev->fops)
934 return -ENXIO;
935
936 filp->f_op = dev->fops;
937 if (dev->dev_info)
938 filp->f_mapping->backing_dev_info = dev->dev_info;
939
940 /* Is /dev/mem or /dev/kmem ? */
941 if (dev->dev_info == &directly_mappable_cdev_bdi)
942 filp->f_mode |= FMODE_UNSIGNED_OFFSET;
943
944 if (dev->fops->open)
945 return dev->fops->open(inode, filp);
946
947 return 0;
948 }
949
950 static const struct file_operations memory_fops = {
951 .open = memory_open,
952 .llseek = noop_llseek,
953 };
954
955 static char *mem_devnode(struct device *dev, umode_t *mode)
956 {
957 if (mode && devlist[MINOR(dev->devt)].mode)
958 *mode = devlist[MINOR(dev->devt)].mode;
959 return NULL;
960 }
961
962 static struct class *mem_class;
963
964 static int __init chr_dev_init(void)
965 {
966 int minor;
967 int err;
968
969 err = bdi_init(&zero_bdi);
970 if (err)
971 return err;
972
973 if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
974 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
975
976 mem_class = class_create(THIS_MODULE, "mem");
977 if (IS_ERR(mem_class))
978 return PTR_ERR(mem_class);
979
980 mem_class->devnode = mem_devnode;
981 for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
982 if (!devlist[minor].name)
983 continue;
984
985 /*
986 * Create /dev/port?
987 */
988 if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
989 continue;
990
991 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
992 NULL, devlist[minor].name);
993 }
994
995 return tty_init();
996 }
997
998 fs_initcall(chr_dev_init);