zisofs: Implement reading of compressed files when PAGE_CACHE_SIZE > compress block...
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / char / mem.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/char/mem.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * Added devfs support.
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
af901ca1 8 * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
1da177e4
LT
9 */
10
1da177e4
LT
11#include <linux/mm.h>
12#include <linux/miscdevice.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
15#include <linux/mman.h>
16#include <linux/random.h>
17#include <linux/init.h>
18#include <linux/raw.h>
19#include <linux/tty.h>
20#include <linux/capability.h>
1da177e4
LT
21#include <linux/ptrace.h>
22#include <linux/device.h>
50b1fdbd
VG
23#include <linux/highmem.h>
24#include <linux/crash_dump.h>
1da177e4 25#include <linux/backing-dev.h>
315c215c 26#include <linux/bootmem.h>
d6b29d7c 27#include <linux/splice.h>
b8a3ad5b 28#include <linux/pfn.h>
1da177e4
LT
29
30#include <asm/uaccess.h>
31#include <asm/io.h>
32
33#ifdef CONFIG_IA64
34# include <linux/efi.h>
35#endif
36
1da177e4
LT
37/*
38 * Architectures vary in how they handle caching for addresses
39 * outside of main memory.
40 *
41 */
42static inline int uncached_access(struct file *file, unsigned long addr)
43{
f0970c13 44#if defined(CONFIG_IA64)
1da177e4
LT
45 /*
46 * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
47 */
48 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
24e9d0b9
RB
49#elif defined(CONFIG_MIPS)
50 {
51 extern int __uncached_access(struct file *file,
52 unsigned long addr);
53
54 return __uncached_access(file, addr);
55 }
1da177e4
LT
56#else
57 /*
58 * Accessing memory above the top the kernel knows about or through a file pointer
59 * that was marked O_SYNC will be done non-cached.
60 */
61 if (file->f_flags & O_SYNC)
62 return 1;
63 return addr >= __pa(high_memory);
64#endif
65}
66
67#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
136939a2 68static inline int valid_phys_addr_range(unsigned long addr, size_t count)
1da177e4 69{
136939a2 70 if (addr + count > __pa(high_memory))
1da177e4
LT
71 return 0;
72
1da177e4
LT
73 return 1;
74}
80851ef2 75
06c67bef 76static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
80851ef2
BH
77{
78 return 1;
79}
1da177e4
LT
80#endif
81
d092633b 82#ifdef CONFIG_STRICT_DEVMEM
e2beb3ea 83static inline int range_is_allowed(unsigned long pfn, unsigned long size)
ae531c26 84{
e2beb3ea
VP
85 u64 from = ((u64)pfn) << PAGE_SHIFT;
86 u64 to = from + size;
87 u64 cursor = from;
88
89 while (cursor < to) {
90 if (!devmem_is_allowed(pfn)) {
91 printk(KERN_INFO
92 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
ae531c26
AV
93 current->comm, from, to);
94 return 0;
95 }
e2beb3ea
VP
96 cursor += PAGE_SIZE;
97 pfn++;
ae531c26
AV
98 }
99 return 1;
100}
101#else
e2beb3ea 102static inline int range_is_allowed(unsigned long pfn, unsigned long size)
ae531c26
AV
103{
104 return 1;
105}
106#endif
107
e045fb2a 108void __attribute__((weak)) unxlate_dev_mem_ptr(unsigned long phys, void *addr)
109{
110}
111
1da177e4
LT
112/*
113 * This funcion reads the *physical* memory. The f_pos points directly to the
114 * memory location.
115 */
116static ssize_t read_mem(struct file * file, char __user * buf,
117 size_t count, loff_t *ppos)
118{
119 unsigned long p = *ppos;
120 ssize_t read, sz;
121 char *ptr;
122
136939a2 123 if (!valid_phys_addr_range(p, count))
1da177e4
LT
124 return -EFAULT;
125 read = 0;
126#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
127 /* we don't have page 0 mapped on sparc and m68k.. */
128 if (p < PAGE_SIZE) {
129 sz = PAGE_SIZE - p;
130 if (sz > count)
131 sz = count;
132 if (sz > 0) {
133 if (clear_user(buf, sz))
134 return -EFAULT;
135 buf += sz;
136 p += sz;
137 count -= sz;
138 read += sz;
139 }
140 }
141#endif
142
143 while (count > 0) {
144 /*
145 * Handle first page in case it's not aligned
146 */
147 if (-p & (PAGE_SIZE - 1))
148 sz = -p & (PAGE_SIZE - 1);
149 else
150 sz = PAGE_SIZE;
151
152 sz = min_t(unsigned long, sz, count);
153
e045fb2a 154 if (!range_is_allowed(p >> PAGE_SHIFT, count))
155 return -EPERM;
156
1da177e4
LT
157 /*
158 * On ia64 if a page has been mapped somewhere as
159 * uncached, then it must also be accessed uncached
160 * by the kernel or data corruption may occur
161 */
162 ptr = xlate_dev_mem_ptr(p);
e045fb2a 163 if (!ptr)
164 return -EFAULT;
1da177e4 165
e045fb2a 166 if (copy_to_user(buf, ptr, sz)) {
167 unxlate_dev_mem_ptr(p, ptr);
1da177e4 168 return -EFAULT;
e045fb2a 169 }
170
171 unxlate_dev_mem_ptr(p, ptr);
172
1da177e4
LT
173 buf += sz;
174 p += sz;
175 count -= sz;
176 read += sz;
177 }
178
179 *ppos += read;
180 return read;
181}
182
183static ssize_t write_mem(struct file * file, const char __user * buf,
184 size_t count, loff_t *ppos)
185{
186 unsigned long p = *ppos;
187 ssize_t written, sz;
188 unsigned long copied;
189 void *ptr;
190
136939a2 191 if (!valid_phys_addr_range(p, count))
1da177e4
LT
192 return -EFAULT;
193
194 written = 0;
195
196#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
197 /* we don't have page 0 mapped on sparc and m68k.. */
198 if (p < PAGE_SIZE) {
199 unsigned long sz = PAGE_SIZE - p;
200 if (sz > count)
201 sz = count;
202 /* Hmm. Do something? */
203 buf += sz;
204 p += sz;
205 count -= sz;
206 written += sz;
207 }
208#endif
209
210 while (count > 0) {
211 /*
212 * Handle first page in case it's not aligned
213 */
214 if (-p & (PAGE_SIZE - 1))
215 sz = -p & (PAGE_SIZE - 1);
216 else
217 sz = PAGE_SIZE;
218
219 sz = min_t(unsigned long, sz, count);
220
e045fb2a 221 if (!range_is_allowed(p >> PAGE_SHIFT, sz))
222 return -EPERM;
223
1da177e4
LT
224 /*
225 * On ia64 if a page has been mapped somewhere as
226 * uncached, then it must also be accessed uncached
227 * by the kernel or data corruption may occur
228 */
229 ptr = xlate_dev_mem_ptr(p);
e045fb2a 230 if (!ptr) {
231 if (written)
232 break;
233 return -EFAULT;
234 }
1da177e4
LT
235
236 copied = copy_from_user(ptr, buf, sz);
237 if (copied) {
c654d60e 238 written += sz - copied;
e045fb2a 239 unxlate_dev_mem_ptr(p, ptr);
c654d60e
JB
240 if (written)
241 break;
1da177e4
LT
242 return -EFAULT;
243 }
e045fb2a 244
245 unxlate_dev_mem_ptr(p, ptr);
246
1da177e4
LT
247 buf += sz;
248 p += sz;
249 count -= sz;
250 written += sz;
251 }
252
253 *ppos += written;
254 return written;
255}
256
f0970c13 257int __attribute__((weak)) phys_mem_access_prot_allowed(struct file *file,
258 unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
259{
260 return 1;
261}
262
44ac8413
BH
263#ifndef __HAVE_PHYS_MEM_ACCESS_PROT
264static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
265 unsigned long size, pgprot_t vma_prot)
266{
267#ifdef pgprot_noncached
268 unsigned long offset = pfn << PAGE_SHIFT;
269
270 if (uncached_access(file, offset))
271 return pgprot_noncached(vma_prot);
272#endif
273 return vma_prot;
274}
275#endif
276
5da6185b
DH
277#ifndef CONFIG_MMU
278static unsigned long get_unmapped_area_mem(struct file *file,
279 unsigned long addr,
280 unsigned long len,
281 unsigned long pgoff,
282 unsigned long flags)
283{
284 if (!valid_mmap_phys_addr_range(pgoff, len))
285 return (unsigned long) -EINVAL;
8a93258c 286 return pgoff << PAGE_SHIFT;
5da6185b
DH
287}
288
289/* can't do an in-place private mapping if there's no MMU */
290static inline int private_mapping_ok(struct vm_area_struct *vma)
291{
292 return vma->vm_flags & VM_MAYSHARE;
293}
294#else
295#define get_unmapped_area_mem NULL
296
297static inline int private_mapping_ok(struct vm_area_struct *vma)
298{
299 return 1;
300}
301#endif
302
f0f37e2f 303static const struct vm_operations_struct mmap_mem_ops = {
7ae8ed50
RR
304#ifdef CONFIG_HAVE_IOREMAP_PROT
305 .access = generic_access_phys
306#endif
e7f260a2 307};
308
1da177e4
LT
309static int mmap_mem(struct file * file, struct vm_area_struct * vma)
310{
80851ef2
BH
311 size_t size = vma->vm_end - vma->vm_start;
312
06c67bef 313 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
80851ef2
BH
314 return -EINVAL;
315
5da6185b
DH
316 if (!private_mapping_ok(vma))
317 return -ENOSYS;
318
e2beb3ea
VP
319 if (!range_is_allowed(vma->vm_pgoff, size))
320 return -EPERM;
321
f0970c13 322 if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
323 &vma->vm_page_prot))
324 return -EINVAL;
325
8b150478 326 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
80851ef2 327 size,
1da177e4 328 vma->vm_page_prot);
1da177e4 329
e7f260a2 330 vma->vm_ops = &mmap_mem_ops;
331
1da177e4
LT
332 /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
333 if (remap_pfn_range(vma,
334 vma->vm_start,
335 vma->vm_pgoff,
80851ef2 336 size,
e7f260a2 337 vma->vm_page_prot)) {
1da177e4 338 return -EAGAIN;
e7f260a2 339 }
1da177e4
LT
340 return 0;
341}
342
b781ecb6 343#ifdef CONFIG_DEVKMEM
1da177e4
LT
344static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
345{
4bb82551
LT
346 unsigned long pfn;
347
6d3154cc
LT
348 /* Turn a kernel-virtual address into a physical page frame */
349 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
4bb82551 350
1da177e4
LT
351 /*
352 * RED-PEN: on some architectures there is more mapped memory
353 * than available in mem_map which pfn_valid checks
354 * for. Perhaps should add a new macro here.
355 *
356 * RED-PEN: vmalloc is not supported right now.
357 */
4bb82551 358 if (!pfn_valid(pfn))
1da177e4 359 return -EIO;
4bb82551
LT
360
361 vma->vm_pgoff = pfn;
1da177e4
LT
362 return mmap_mem(file, vma);
363}
b781ecb6 364#endif
1da177e4 365
50b1fdbd
VG
366#ifdef CONFIG_CRASH_DUMP
367/*
368 * Read memory corresponding to the old kernel.
50b1fdbd 369 */
315c215c 370static ssize_t read_oldmem(struct file *file, char __user *buf,
50b1fdbd
VG
371 size_t count, loff_t *ppos)
372{
315c215c
VG
373 unsigned long pfn, offset;
374 size_t read = 0, csize;
375 int rc = 0;
50b1fdbd 376
72414d3f 377 while (count) {
50b1fdbd 378 pfn = *ppos / PAGE_SIZE;
315c215c
VG
379 if (pfn > saved_max_pfn)
380 return read;
50b1fdbd 381
315c215c
VG
382 offset = (unsigned long)(*ppos % PAGE_SIZE);
383 if (count > PAGE_SIZE - offset)
384 csize = PAGE_SIZE - offset;
385 else
386 csize = count;
50b1fdbd 387
315c215c
VG
388 rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
389 if (rc < 0)
390 return rc;
50b1fdbd
VG
391 buf += csize;
392 *ppos += csize;
393 read += csize;
394 count -= csize;
395 }
50b1fdbd
VG
396 return read;
397}
398#endif
1da177e4 399
b781ecb6 400#ifdef CONFIG_DEVKMEM
1da177e4
LT
401/*
402 * This function reads the *virtual* memory as seen by the kernel.
403 */
404static ssize_t read_kmem(struct file *file, char __user *buf,
405 size_t count, loff_t *ppos)
406{
407 unsigned long p = *ppos;
408 ssize_t low_count, read, sz;
409 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
410
411 read = 0;
412 if (p < (unsigned long) high_memory) {
413 low_count = count;
414 if (count > (unsigned long) high_memory - p)
415 low_count = (unsigned long) high_memory - p;
416
417#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
418 /* we don't have page 0 mapped on sparc and m68k.. */
419 if (p < PAGE_SIZE && low_count > 0) {
420 size_t tmp = PAGE_SIZE - p;
421 if (tmp > low_count) tmp = low_count;
422 if (clear_user(buf, tmp))
423 return -EFAULT;
424 buf += tmp;
425 p += tmp;
426 read += tmp;
427 low_count -= tmp;
428 count -= tmp;
429 }
430#endif
431 while (low_count > 0) {
432 /*
433 * Handle first page in case it's not aligned
434 */
435 if (-p & (PAGE_SIZE - 1))
436 sz = -p & (PAGE_SIZE - 1);
437 else
438 sz = PAGE_SIZE;
439
440 sz = min_t(unsigned long, sz, low_count);
441
442 /*
443 * On ia64 if a page has been mapped somewhere as
444 * uncached, then it must also be accessed uncached
445 * by the kernel or data corruption may occur
446 */
447 kbuf = xlate_dev_kmem_ptr((char *)p);
448
449 if (copy_to_user(buf, kbuf, sz))
450 return -EFAULT;
451 buf += sz;
452 p += sz;
453 read += sz;
454 low_count -= sz;
455 count -= sz;
456 }
457 }
458
459 if (count > 0) {
460 kbuf = (char *)__get_free_page(GFP_KERNEL);
461 if (!kbuf)
462 return -ENOMEM;
463 while (count > 0) {
464 int len = count;
465
466 if (len > PAGE_SIZE)
467 len = PAGE_SIZE;
468 len = vread(kbuf, (char *)p, len);
469 if (!len)
470 break;
471 if (copy_to_user(buf, kbuf, len)) {
472 free_page((unsigned long)kbuf);
473 return -EFAULT;
474 }
475 count -= len;
476 buf += len;
477 read += len;
478 p += len;
479 }
480 free_page((unsigned long)kbuf);
481 }
482 *ppos = p;
483 return read;
484}
485
486
487static inline ssize_t
488do_write_kmem(void *p, unsigned long realp, const char __user * buf,
489 size_t count, loff_t *ppos)
490{
491 ssize_t written, sz;
492 unsigned long copied;
493
494 written = 0;
495#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
496 /* we don't have page 0 mapped on sparc and m68k.. */
497 if (realp < PAGE_SIZE) {
498 unsigned long sz = PAGE_SIZE - realp;
499 if (sz > count)
500 sz = count;
501 /* Hmm. Do something? */
502 buf += sz;
503 p += sz;
504 realp += sz;
505 count -= sz;
506 written += sz;
507 }
508#endif
509
510 while (count > 0) {
511 char *ptr;
512 /*
513 * Handle first page in case it's not aligned
514 */
515 if (-realp & (PAGE_SIZE - 1))
516 sz = -realp & (PAGE_SIZE - 1);
517 else
518 sz = PAGE_SIZE;
519
520 sz = min_t(unsigned long, sz, count);
521
522 /*
523 * On ia64 if a page has been mapped somewhere as
524 * uncached, then it must also be accessed uncached
525 * by the kernel or data corruption may occur
526 */
527 ptr = xlate_dev_kmem_ptr(p);
528
529 copied = copy_from_user(ptr, buf, sz);
530 if (copied) {
c654d60e
JB
531 written += sz - copied;
532 if (written)
533 break;
1da177e4
LT
534 return -EFAULT;
535 }
536 buf += sz;
537 p += sz;
538 realp += sz;
539 count -= sz;
540 written += sz;
541 }
542
543 *ppos += written;
544 return written;
545}
546
547
548/*
549 * This function writes to the *virtual* memory as seen by the kernel.
550 */
551static ssize_t write_kmem(struct file * file, const char __user * buf,
552 size_t count, loff_t *ppos)
553{
554 unsigned long p = *ppos;
555 ssize_t wrote = 0;
556 ssize_t virtr = 0;
557 ssize_t written;
558 char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
559
560 if (p < (unsigned long) high_memory) {
561
562 wrote = count;
563 if (count > (unsigned long) high_memory - p)
564 wrote = (unsigned long) high_memory - p;
565
566 written = do_write_kmem((void*)p, p, buf, wrote, ppos);
567 if (written != wrote)
568 return written;
569 wrote = written;
570 p += wrote;
571 buf += wrote;
572 count -= wrote;
573 }
574
575 if (count > 0) {
576 kbuf = (char *)__get_free_page(GFP_KERNEL);
577 if (!kbuf)
578 return wrote ? wrote : -ENOMEM;
579 while (count > 0) {
580 int len = count;
581
582 if (len > PAGE_SIZE)
583 len = PAGE_SIZE;
584 if (len) {
585 written = copy_from_user(kbuf, buf, len);
586 if (written) {
c654d60e
JB
587 if (wrote + virtr)
588 break;
1da177e4 589 free_page((unsigned long)kbuf);
c654d60e 590 return -EFAULT;
1da177e4
LT
591 }
592 }
593 len = vwrite(kbuf, (char *)p, len);
594 count -= len;
595 buf += len;
596 virtr += len;
597 p += len;
598 }
599 free_page((unsigned long)kbuf);
600 }
601
602 *ppos = p;
603 return virtr + wrote;
604}
b781ecb6 605#endif
1da177e4 606
4f911d64 607#ifdef CONFIG_DEVPORT
1da177e4
LT
608static ssize_t read_port(struct file * file, char __user * buf,
609 size_t count, loff_t *ppos)
610{
611 unsigned long i = *ppos;
612 char __user *tmp = buf;
613
614 if (!access_ok(VERIFY_WRITE, buf, count))
615 return -EFAULT;
616 while (count-- > 0 && i < 65536) {
617 if (__put_user(inb(i),tmp) < 0)
618 return -EFAULT;
619 i++;
620 tmp++;
621 }
622 *ppos = i;
623 return tmp-buf;
624}
625
626static ssize_t write_port(struct file * file, const char __user * buf,
627 size_t count, loff_t *ppos)
628{
629 unsigned long i = *ppos;
630 const char __user * tmp = buf;
631
632 if (!access_ok(VERIFY_READ,buf,count))
633 return -EFAULT;
634 while (count-- > 0 && i < 65536) {
635 char c;
c654d60e
JB
636 if (__get_user(c, tmp)) {
637 if (tmp > buf)
638 break;
1da177e4 639 return -EFAULT;
c654d60e 640 }
1da177e4
LT
641 outb(c,i);
642 i++;
643 tmp++;
644 }
645 *ppos = i;
646 return tmp-buf;
647}
648#endif
649
650static ssize_t read_null(struct file * file, char __user * buf,
651 size_t count, loff_t *ppos)
652{
653 return 0;
654}
655
656static ssize_t write_null(struct file * file, const char __user * buf,
657 size_t count, loff_t *ppos)
658{
659 return count;
660}
661
1ebd32fc
JA
662static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
663 struct splice_desc *sd)
664{
665 return sd->len;
666}
667
668static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out,
669 loff_t *ppos, size_t len, unsigned int flags)
670{
671 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
672}
673
1da177e4
LT
674static ssize_t read_zero(struct file * file, char __user * buf,
675 size_t count, loff_t *ppos)
676{
557ed1fa 677 size_t written;
1da177e4
LT
678
679 if (!count)
680 return 0;
681
682 if (!access_ok(VERIFY_WRITE, buf, count))
683 return -EFAULT;
684
557ed1fa
NP
685 written = 0;
686 while (count) {
687 unsigned long unwritten;
688 size_t chunk = count;
1da177e4 689
557ed1fa
NP
690 if (chunk > PAGE_SIZE)
691 chunk = PAGE_SIZE; /* Just for latency reasons */
bb521c5d 692 unwritten = __clear_user(buf, chunk);
557ed1fa 693 written += chunk - unwritten;
1da177e4 694 if (unwritten)
557ed1fa 695 break;
2b838687
LT
696 if (signal_pending(current))
697 return written ? written : -ERESTARTSYS;
1da177e4 698 buf += chunk;
557ed1fa 699 count -= chunk;
1da177e4
LT
700 cond_resched();
701 }
557ed1fa 702 return written ? written : -EFAULT;
1da177e4
LT
703}
704
705static int mmap_zero(struct file * file, struct vm_area_struct * vma)
706{
557ed1fa 707#ifndef CONFIG_MMU
1da177e4 708 return -ENOSYS;
557ed1fa
NP
709#endif
710 if (vma->vm_flags & VM_SHARED)
711 return shmem_zero_setup(vma);
712 return 0;
1da177e4 713}
1da177e4
LT
714
715static ssize_t write_full(struct file * file, const char __user * buf,
716 size_t count, loff_t *ppos)
717{
718 return -ENOSPC;
719}
720
721/*
722 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
723 * can fopen() both devices with "a" now. This was previously impossible.
724 * -- SRB.
725 */
726
727static loff_t null_lseek(struct file * file, loff_t offset, int orig)
728{
729 return file->f_pos = 0;
730}
731
732/*
733 * The memory devices use the full 32/64 bits of the offset, and so we cannot
734 * check against negative addresses: they are ok. The return value is weird,
735 * though, in that case (0).
736 *
737 * also note that seeking relative to the "end of file" isn't supported:
738 * it has no meaning, so it returns -EINVAL.
739 */
740static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
741{
742 loff_t ret;
743
a7113a96 744 mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
1da177e4
LT
745 switch (orig) {
746 case 0:
747 file->f_pos = offset;
748 ret = file->f_pos;
749 force_successful_syscall_return();
750 break;
751 case 1:
752 file->f_pos += offset;
753 ret = file->f_pos;
754 force_successful_syscall_return();
755 break;
756 default:
757 ret = -EINVAL;
758 }
a7113a96 759 mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
1da177e4
LT
760 return ret;
761}
762
763static int open_port(struct inode * inode, struct file * filp)
764{
765 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
766}
767
768#define zero_lseek null_lseek
769#define full_lseek null_lseek
770#define write_zero write_null
771#define read_full read_zero
772#define open_mem open_port
773#define open_kmem open_mem
50b1fdbd 774#define open_oldmem open_mem
1da177e4 775
62322d25 776static const struct file_operations mem_fops = {
1da177e4
LT
777 .llseek = memory_lseek,
778 .read = read_mem,
779 .write = write_mem,
780 .mmap = mmap_mem,
781 .open = open_mem,
5da6185b 782 .get_unmapped_area = get_unmapped_area_mem,
1da177e4
LT
783};
784
b781ecb6 785#ifdef CONFIG_DEVKMEM
62322d25 786static const struct file_operations kmem_fops = {
1da177e4
LT
787 .llseek = memory_lseek,
788 .read = read_kmem,
789 .write = write_kmem,
790 .mmap = mmap_kmem,
791 .open = open_kmem,
5da6185b 792 .get_unmapped_area = get_unmapped_area_mem,
1da177e4 793};
b781ecb6 794#endif
1da177e4 795
62322d25 796static const struct file_operations null_fops = {
1da177e4
LT
797 .llseek = null_lseek,
798 .read = read_null,
799 .write = write_null,
1ebd32fc 800 .splice_write = splice_write_null,
1da177e4
LT
801};
802
4f911d64 803#ifdef CONFIG_DEVPORT
62322d25 804static const struct file_operations port_fops = {
1da177e4
LT
805 .llseek = memory_lseek,
806 .read = read_port,
807 .write = write_port,
808 .open = open_port,
809};
810#endif
811
62322d25 812static const struct file_operations zero_fops = {
1da177e4
LT
813 .llseek = zero_lseek,
814 .read = read_zero,
815 .write = write_zero,
816 .mmap = mmap_zero,
817};
818
5da6185b
DH
819/*
820 * capabilities for /dev/zero
821 * - permits private mappings, "copies" are taken of the source of zeros
822 */
1da177e4 823static struct backing_dev_info zero_bdi = {
d993831f 824 .name = "char/mem",
1da177e4
LT
825 .capabilities = BDI_CAP_MAP_COPY,
826};
827
62322d25 828static const struct file_operations full_fops = {
1da177e4
LT
829 .llseek = full_lseek,
830 .read = read_full,
831 .write = write_full,
832};
833
50b1fdbd 834#ifdef CONFIG_CRASH_DUMP
62322d25 835static const struct file_operations oldmem_fops = {
50b1fdbd
VG
836 .read = read_oldmem,
837 .open = open_oldmem,
838};
839#endif
840
1da177e4
LT
841static ssize_t kmsg_write(struct file * file, const char __user * buf,
842 size_t count, loff_t *ppos)
843{
844 char *tmp;
cd140a5c 845 ssize_t ret;
1da177e4
LT
846
847 tmp = kmalloc(count + 1, GFP_KERNEL);
848 if (tmp == NULL)
849 return -ENOMEM;
850 ret = -EFAULT;
851 if (!copy_from_user(tmp, buf, count)) {
852 tmp[count] = 0;
853 ret = printk("%s", tmp);
cd140a5c
GC
854 if (ret > count)
855 /* printk can add a prefix */
856 ret = count;
1da177e4
LT
857 }
858 kfree(tmp);
859 return ret;
860}
861
62322d25 862static const struct file_operations kmsg_fops = {
1da177e4
LT
863 .write = kmsg_write,
864};
865
389e0cb9
KS
866static const struct memdev {
867 const char *name;
e454cea2 868 mode_t mode;
389e0cb9
KS
869 const struct file_operations *fops;
870 struct backing_dev_info *dev_info;
871} devlist[] = {
e454cea2 872 [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
b781ecb6 873#ifdef CONFIG_DEVKMEM
e454cea2 874 [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
b781ecb6 875#endif
e454cea2 876 [3] = { "null", 0666, &null_fops, NULL },
4f911d64 877#ifdef CONFIG_DEVPORT
e454cea2 878 [4] = { "port", 0, &port_fops, NULL },
1da177e4 879#endif
e454cea2
KS
880 [5] = { "zero", 0666, &zero_fops, &zero_bdi },
881 [7] = { "full", 0666, &full_fops, NULL },
882 [8] = { "random", 0666, &random_fops, NULL },
883 [9] = { "urandom", 0666, &urandom_fops, NULL },
884 [11] = { "kmsg", 0, &kmsg_fops, NULL },
50b1fdbd 885#ifdef CONFIG_CRASH_DUMP
e454cea2 886 [12] = { "oldmem", 0, &oldmem_fops, NULL },
50b1fdbd 887#endif
d6f47bef
ASF
888};
889
890static int memory_open(struct inode *inode, struct file *filp)
891{
389e0cb9
KS
892 int minor;
893 const struct memdev *dev;
d6f47bef 894
389e0cb9
KS
895 minor = iminor(inode);
896 if (minor >= ARRAY_SIZE(devlist))
205153aa 897 return -ENXIO;
d6f47bef 898
389e0cb9
KS
899 dev = &devlist[minor];
900 if (!dev->fops)
205153aa 901 return -ENXIO;
d6f47bef 902
389e0cb9
KS
903 filp->f_op = dev->fops;
904 if (dev->dev_info)
905 filp->f_mapping->backing_dev_info = dev->dev_info;
d6f47bef 906
389e0cb9 907 if (dev->fops->open)
205153aa
FW
908 return dev->fops->open(inode, filp);
909
910 return 0;
1da177e4
LT
911}
912
62322d25 913static const struct file_operations memory_fops = {
389e0cb9 914 .open = memory_open,
1da177e4
LT
915};
916
e454cea2
KS
917static char *mem_devnode(struct device *dev, mode_t *mode)
918{
919 if (mode && devlist[MINOR(dev->devt)].mode)
920 *mode = devlist[MINOR(dev->devt)].mode;
921 return NULL;
922}
923
ca8eca68 924static struct class *mem_class;
1da177e4
LT
925
926static int __init chr_dev_init(void)
927{
389e0cb9 928 int minor;
e0bf68dd
PZ
929 int err;
930
931 err = bdi_init(&zero_bdi);
932 if (err)
933 return err;
1da177e4
LT
934
935 if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
936 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
937
ca8eca68 938 mem_class = class_create(THIS_MODULE, "mem");
e454cea2 939 mem_class->devnode = mem_devnode;
389e0cb9
KS
940 for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
941 if (!devlist[minor].name)
942 continue;
943 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
944 NULL, devlist[minor].name);
945 }
ebf644c4 946
1da177e4
LT
947 return 0;
948}
949
950fs_initcall(chr_dev_init);