net: add skb_recycle_check() to enable netdriver skb recycling
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / char / mem.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/char/mem.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * Added devfs support.
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9 */
10
1da177e4
LT
11#include <linux/mm.h>
12#include <linux/miscdevice.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
15#include <linux/mman.h>
16#include <linux/random.h>
17#include <linux/init.h>
18#include <linux/raw.h>
19#include <linux/tty.h>
20#include <linux/capability.h>
1da177e4
LT
21#include <linux/ptrace.h>
22#include <linux/device.h>
50b1fdbd
VG
23#include <linux/highmem.h>
24#include <linux/crash_dump.h>
1da177e4 25#include <linux/backing-dev.h>
315c215c 26#include <linux/bootmem.h>
d6b29d7c 27#include <linux/splice.h>
b8a3ad5b 28#include <linux/pfn.h>
1f439647 29#include <linux/smp_lock.h>
1da177e4
LT
30
31#include <asm/uaccess.h>
32#include <asm/io.h>
33
34#ifdef CONFIG_IA64
35# include <linux/efi.h>
36#endif
37
1da177e4
LT
38/*
39 * Architectures vary in how they handle caching for addresses
40 * outside of main memory.
41 *
42 */
43static inline int uncached_access(struct file *file, unsigned long addr)
44{
f0970c13 45#if defined(CONFIG_IA64)
1da177e4
LT
46 /*
47 * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
48 */
49 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
24e9d0b9
RB
50#elif defined(CONFIG_MIPS)
51 {
52 extern int __uncached_access(struct file *file,
53 unsigned long addr);
54
55 return __uncached_access(file, addr);
56 }
1da177e4
LT
57#else
58 /*
59 * Accessing memory above the top the kernel knows about or through a file pointer
60 * that was marked O_SYNC will be done non-cached.
61 */
62 if (file->f_flags & O_SYNC)
63 return 1;
64 return addr >= __pa(high_memory);
65#endif
66}
67
68#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
136939a2 69static inline int valid_phys_addr_range(unsigned long addr, size_t count)
1da177e4 70{
136939a2 71 if (addr + count > __pa(high_memory))
1da177e4
LT
72 return 0;
73
1da177e4
LT
74 return 1;
75}
80851ef2 76
06c67bef 77static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
80851ef2
BH
78{
79 return 1;
80}
1da177e4
LT
81#endif
82
d092633b 83#ifdef CONFIG_STRICT_DEVMEM
e2beb3ea 84static inline int range_is_allowed(unsigned long pfn, unsigned long size)
ae531c26 85{
e2beb3ea
VP
86 u64 from = ((u64)pfn) << PAGE_SHIFT;
87 u64 to = from + size;
88 u64 cursor = from;
89
90 while (cursor < to) {
91 if (!devmem_is_allowed(pfn)) {
92 printk(KERN_INFO
93 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
ae531c26
AV
94 current->comm, from, to);
95 return 0;
96 }
e2beb3ea
VP
97 cursor += PAGE_SIZE;
98 pfn++;
ae531c26
AV
99 }
100 return 1;
101}
102#else
e2beb3ea 103static inline int range_is_allowed(unsigned long pfn, unsigned long size)
ae531c26
AV
104{
105 return 1;
106}
107#endif
108
e045fb2a 109void __attribute__((weak)) unxlate_dev_mem_ptr(unsigned long phys, void *addr)
110{
111}
112
1da177e4
LT
113/*
114 * This funcion reads the *physical* memory. The f_pos points directly to the
115 * memory location.
116 */
117static ssize_t read_mem(struct file * file, char __user * buf,
118 size_t count, loff_t *ppos)
119{
120 unsigned long p = *ppos;
121 ssize_t read, sz;
122 char *ptr;
123
136939a2 124 if (!valid_phys_addr_range(p, count))
1da177e4
LT
125 return -EFAULT;
126 read = 0;
127#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
128 /* we don't have page 0 mapped on sparc and m68k.. */
129 if (p < PAGE_SIZE) {
130 sz = PAGE_SIZE - p;
131 if (sz > count)
132 sz = count;
133 if (sz > 0) {
134 if (clear_user(buf, sz))
135 return -EFAULT;
136 buf += sz;
137 p += sz;
138 count -= sz;
139 read += sz;
140 }
141 }
142#endif
143
144 while (count > 0) {
145 /*
146 * Handle first page in case it's not aligned
147 */
148 if (-p & (PAGE_SIZE - 1))
149 sz = -p & (PAGE_SIZE - 1);
150 else
151 sz = PAGE_SIZE;
152
153 sz = min_t(unsigned long, sz, count);
154
e045fb2a 155 if (!range_is_allowed(p >> PAGE_SHIFT, count))
156 return -EPERM;
157
1da177e4
LT
158 /*
159 * On ia64 if a page has been mapped somewhere as
160 * uncached, then it must also be accessed uncached
161 * by the kernel or data corruption may occur
162 */
163 ptr = xlate_dev_mem_ptr(p);
e045fb2a 164 if (!ptr)
165 return -EFAULT;
1da177e4 166
e045fb2a 167 if (copy_to_user(buf, ptr, sz)) {
168 unxlate_dev_mem_ptr(p, ptr);
1da177e4 169 return -EFAULT;
e045fb2a 170 }
171
172 unxlate_dev_mem_ptr(p, ptr);
173
1da177e4
LT
174 buf += sz;
175 p += sz;
176 count -= sz;
177 read += sz;
178 }
179
180 *ppos += read;
181 return read;
182}
183
184static ssize_t write_mem(struct file * file, const char __user * buf,
185 size_t count, loff_t *ppos)
186{
187 unsigned long p = *ppos;
188 ssize_t written, sz;
189 unsigned long copied;
190 void *ptr;
191
136939a2 192 if (!valid_phys_addr_range(p, count))
1da177e4
LT
193 return -EFAULT;
194
195 written = 0;
196
197#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
198 /* we don't have page 0 mapped on sparc and m68k.. */
199 if (p < PAGE_SIZE) {
200 unsigned long sz = PAGE_SIZE - p;
201 if (sz > count)
202 sz = count;
203 /* Hmm. Do something? */
204 buf += sz;
205 p += sz;
206 count -= sz;
207 written += sz;
208 }
209#endif
210
211 while (count > 0) {
212 /*
213 * Handle first page in case it's not aligned
214 */
215 if (-p & (PAGE_SIZE - 1))
216 sz = -p & (PAGE_SIZE - 1);
217 else
218 sz = PAGE_SIZE;
219
220 sz = min_t(unsigned long, sz, count);
221
e045fb2a 222 if (!range_is_allowed(p >> PAGE_SHIFT, sz))
223 return -EPERM;
224
1da177e4
LT
225 /*
226 * On ia64 if a page has been mapped somewhere as
227 * uncached, then it must also be accessed uncached
228 * by the kernel or data corruption may occur
229 */
230 ptr = xlate_dev_mem_ptr(p);
e045fb2a 231 if (!ptr) {
232 if (written)
233 break;
234 return -EFAULT;
235 }
1da177e4
LT
236
237 copied = copy_from_user(ptr, buf, sz);
238 if (copied) {
c654d60e 239 written += sz - copied;
e045fb2a 240 unxlate_dev_mem_ptr(p, ptr);
c654d60e
JB
241 if (written)
242 break;
1da177e4
LT
243 return -EFAULT;
244 }
e045fb2a 245
246 unxlate_dev_mem_ptr(p, ptr);
247
1da177e4
LT
248 buf += sz;
249 p += sz;
250 count -= sz;
251 written += sz;
252 }
253
254 *ppos += written;
255 return written;
256}
257
f0970c13 258int __attribute__((weak)) phys_mem_access_prot_allowed(struct file *file,
259 unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
260{
261 return 1;
262}
263
44ac8413
BH
264#ifndef __HAVE_PHYS_MEM_ACCESS_PROT
265static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
266 unsigned long size, pgprot_t vma_prot)
267{
268#ifdef pgprot_noncached
269 unsigned long offset = pfn << PAGE_SHIFT;
270
271 if (uncached_access(file, offset))
272 return pgprot_noncached(vma_prot);
273#endif
274 return vma_prot;
275}
276#endif
277
5da6185b
DH
278#ifndef CONFIG_MMU
279static unsigned long get_unmapped_area_mem(struct file *file,
280 unsigned long addr,
281 unsigned long len,
282 unsigned long pgoff,
283 unsigned long flags)
284{
285 if (!valid_mmap_phys_addr_range(pgoff, len))
286 return (unsigned long) -EINVAL;
8a93258c 287 return pgoff << PAGE_SHIFT;
5da6185b
DH
288}
289
290/* can't do an in-place private mapping if there's no MMU */
291static inline int private_mapping_ok(struct vm_area_struct *vma)
292{
293 return vma->vm_flags & VM_MAYSHARE;
294}
295#else
296#define get_unmapped_area_mem NULL
297
298static inline int private_mapping_ok(struct vm_area_struct *vma)
299{
300 return 1;
301}
302#endif
303
e7f260a2 304void __attribute__((weak))
305map_devmem(unsigned long pfn, unsigned long len, pgprot_t prot)
306{
307 /* nothing. architectures can override. */
308}
309
310void __attribute__((weak))
311unmap_devmem(unsigned long pfn, unsigned long len, pgprot_t prot)
312{
313 /* nothing. architectures can override. */
314}
315
316static void mmap_mem_open(struct vm_area_struct *vma)
317{
318 map_devmem(vma->vm_pgoff, vma->vm_end - vma->vm_start,
319 vma->vm_page_prot);
320}
321
322static void mmap_mem_close(struct vm_area_struct *vma)
323{
324 unmap_devmem(vma->vm_pgoff, vma->vm_end - vma->vm_start,
325 vma->vm_page_prot);
326}
327
328static struct vm_operations_struct mmap_mem_ops = {
329 .open = mmap_mem_open,
7ae8ed50
RR
330 .close = mmap_mem_close,
331#ifdef CONFIG_HAVE_IOREMAP_PROT
332 .access = generic_access_phys
333#endif
e7f260a2 334};
335
1da177e4
LT
336static int mmap_mem(struct file * file, struct vm_area_struct * vma)
337{
80851ef2
BH
338 size_t size = vma->vm_end - vma->vm_start;
339
06c67bef 340 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
80851ef2
BH
341 return -EINVAL;
342
5da6185b
DH
343 if (!private_mapping_ok(vma))
344 return -ENOSYS;
345
e2beb3ea
VP
346 if (!range_is_allowed(vma->vm_pgoff, size))
347 return -EPERM;
348
f0970c13 349 if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
350 &vma->vm_page_prot))
351 return -EINVAL;
352
8b150478 353 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
80851ef2 354 size,
1da177e4 355 vma->vm_page_prot);
1da177e4 356
e7f260a2 357 vma->vm_ops = &mmap_mem_ops;
358
1da177e4
LT
359 /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
360 if (remap_pfn_range(vma,
361 vma->vm_start,
362 vma->vm_pgoff,
80851ef2 363 size,
e7f260a2 364 vma->vm_page_prot)) {
365 unmap_devmem(vma->vm_pgoff, size, vma->vm_page_prot);
1da177e4 366 return -EAGAIN;
e7f260a2 367 }
1da177e4
LT
368 return 0;
369}
370
b781ecb6 371#ifdef CONFIG_DEVKMEM
1da177e4
LT
372static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
373{
4bb82551
LT
374 unsigned long pfn;
375
6d3154cc
LT
376 /* Turn a kernel-virtual address into a physical page frame */
377 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
4bb82551 378
1da177e4
LT
379 /*
380 * RED-PEN: on some architectures there is more mapped memory
381 * than available in mem_map which pfn_valid checks
382 * for. Perhaps should add a new macro here.
383 *
384 * RED-PEN: vmalloc is not supported right now.
385 */
4bb82551 386 if (!pfn_valid(pfn))
1da177e4 387 return -EIO;
4bb82551
LT
388
389 vma->vm_pgoff = pfn;
1da177e4
LT
390 return mmap_mem(file, vma);
391}
b781ecb6 392#endif
1da177e4 393
50b1fdbd
VG
394#ifdef CONFIG_CRASH_DUMP
395/*
396 * Read memory corresponding to the old kernel.
50b1fdbd 397 */
315c215c 398static ssize_t read_oldmem(struct file *file, char __user *buf,
50b1fdbd
VG
399 size_t count, loff_t *ppos)
400{
315c215c
VG
401 unsigned long pfn, offset;
402 size_t read = 0, csize;
403 int rc = 0;
50b1fdbd 404
72414d3f 405 while (count) {
50b1fdbd 406 pfn = *ppos / PAGE_SIZE;
315c215c
VG
407 if (pfn > saved_max_pfn)
408 return read;
50b1fdbd 409
315c215c
VG
410 offset = (unsigned long)(*ppos % PAGE_SIZE);
411 if (count > PAGE_SIZE - offset)
412 csize = PAGE_SIZE - offset;
413 else
414 csize = count;
50b1fdbd 415
315c215c
VG
416 rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
417 if (rc < 0)
418 return rc;
50b1fdbd
VG
419 buf += csize;
420 *ppos += csize;
421 read += csize;
422 count -= csize;
423 }
50b1fdbd
VG
424 return read;
425}
426#endif
427
1da177e4
LT
428extern long vread(char *buf, char *addr, unsigned long count);
429extern long vwrite(char *buf, char *addr, unsigned long count);
430
b781ecb6 431#ifdef CONFIG_DEVKMEM
1da177e4
LT
432/*
433 * This function reads the *virtual* memory as seen by the kernel.
434 */
435static ssize_t read_kmem(struct file *file, char __user *buf,
436 size_t count, loff_t *ppos)
437{
438 unsigned long p = *ppos;
439 ssize_t low_count, read, sz;
440 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
441
442 read = 0;
443 if (p < (unsigned long) high_memory) {
444 low_count = count;
445 if (count > (unsigned long) high_memory - p)
446 low_count = (unsigned long) high_memory - p;
447
448#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
449 /* we don't have page 0 mapped on sparc and m68k.. */
450 if (p < PAGE_SIZE && low_count > 0) {
451 size_t tmp = PAGE_SIZE - p;
452 if (tmp > low_count) tmp = low_count;
453 if (clear_user(buf, tmp))
454 return -EFAULT;
455 buf += tmp;
456 p += tmp;
457 read += tmp;
458 low_count -= tmp;
459 count -= tmp;
460 }
461#endif
462 while (low_count > 0) {
463 /*
464 * Handle first page in case it's not aligned
465 */
466 if (-p & (PAGE_SIZE - 1))
467 sz = -p & (PAGE_SIZE - 1);
468 else
469 sz = PAGE_SIZE;
470
471 sz = min_t(unsigned long, sz, low_count);
472
473 /*
474 * On ia64 if a page has been mapped somewhere as
475 * uncached, then it must also be accessed uncached
476 * by the kernel or data corruption may occur
477 */
478 kbuf = xlate_dev_kmem_ptr((char *)p);
479
480 if (copy_to_user(buf, kbuf, sz))
481 return -EFAULT;
482 buf += sz;
483 p += sz;
484 read += sz;
485 low_count -= sz;
486 count -= sz;
487 }
488 }
489
490 if (count > 0) {
491 kbuf = (char *)__get_free_page(GFP_KERNEL);
492 if (!kbuf)
493 return -ENOMEM;
494 while (count > 0) {
495 int len = count;
496
497 if (len > PAGE_SIZE)
498 len = PAGE_SIZE;
499 len = vread(kbuf, (char *)p, len);
500 if (!len)
501 break;
502 if (copy_to_user(buf, kbuf, len)) {
503 free_page((unsigned long)kbuf);
504 return -EFAULT;
505 }
506 count -= len;
507 buf += len;
508 read += len;
509 p += len;
510 }
511 free_page((unsigned long)kbuf);
512 }
513 *ppos = p;
514 return read;
515}
516
517
518static inline ssize_t
519do_write_kmem(void *p, unsigned long realp, const char __user * buf,
520 size_t count, loff_t *ppos)
521{
522 ssize_t written, sz;
523 unsigned long copied;
524
525 written = 0;
526#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
527 /* we don't have page 0 mapped on sparc and m68k.. */
528 if (realp < PAGE_SIZE) {
529 unsigned long sz = PAGE_SIZE - realp;
530 if (sz > count)
531 sz = count;
532 /* Hmm. Do something? */
533 buf += sz;
534 p += sz;
535 realp += sz;
536 count -= sz;
537 written += sz;
538 }
539#endif
540
541 while (count > 0) {
542 char *ptr;
543 /*
544 * Handle first page in case it's not aligned
545 */
546 if (-realp & (PAGE_SIZE - 1))
547 sz = -realp & (PAGE_SIZE - 1);
548 else
549 sz = PAGE_SIZE;
550
551 sz = min_t(unsigned long, sz, count);
552
553 /*
554 * On ia64 if a page has been mapped somewhere as
555 * uncached, then it must also be accessed uncached
556 * by the kernel or data corruption may occur
557 */
558 ptr = xlate_dev_kmem_ptr(p);
559
560 copied = copy_from_user(ptr, buf, sz);
561 if (copied) {
c654d60e
JB
562 written += sz - copied;
563 if (written)
564 break;
1da177e4
LT
565 return -EFAULT;
566 }
567 buf += sz;
568 p += sz;
569 realp += sz;
570 count -= sz;
571 written += sz;
572 }
573
574 *ppos += written;
575 return written;
576}
577
578
579/*
580 * This function writes to the *virtual* memory as seen by the kernel.
581 */
582static ssize_t write_kmem(struct file * file, const char __user * buf,
583 size_t count, loff_t *ppos)
584{
585 unsigned long p = *ppos;
586 ssize_t wrote = 0;
587 ssize_t virtr = 0;
588 ssize_t written;
589 char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
590
591 if (p < (unsigned long) high_memory) {
592
593 wrote = count;
594 if (count > (unsigned long) high_memory - p)
595 wrote = (unsigned long) high_memory - p;
596
597 written = do_write_kmem((void*)p, p, buf, wrote, ppos);
598 if (written != wrote)
599 return written;
600 wrote = written;
601 p += wrote;
602 buf += wrote;
603 count -= wrote;
604 }
605
606 if (count > 0) {
607 kbuf = (char *)__get_free_page(GFP_KERNEL);
608 if (!kbuf)
609 return wrote ? wrote : -ENOMEM;
610 while (count > 0) {
611 int len = count;
612
613 if (len > PAGE_SIZE)
614 len = PAGE_SIZE;
615 if (len) {
616 written = copy_from_user(kbuf, buf, len);
617 if (written) {
c654d60e
JB
618 if (wrote + virtr)
619 break;
1da177e4 620 free_page((unsigned long)kbuf);
c654d60e 621 return -EFAULT;
1da177e4
LT
622 }
623 }
624 len = vwrite(kbuf, (char *)p, len);
625 count -= len;
626 buf += len;
627 virtr += len;
628 p += len;
629 }
630 free_page((unsigned long)kbuf);
631 }
632
633 *ppos = p;
634 return virtr + wrote;
635}
b781ecb6 636#endif
1da177e4 637
4f911d64 638#ifdef CONFIG_DEVPORT
1da177e4
LT
639static ssize_t read_port(struct file * file, char __user * buf,
640 size_t count, loff_t *ppos)
641{
642 unsigned long i = *ppos;
643 char __user *tmp = buf;
644
645 if (!access_ok(VERIFY_WRITE, buf, count))
646 return -EFAULT;
647 while (count-- > 0 && i < 65536) {
648 if (__put_user(inb(i),tmp) < 0)
649 return -EFAULT;
650 i++;
651 tmp++;
652 }
653 *ppos = i;
654 return tmp-buf;
655}
656
657static ssize_t write_port(struct file * file, const char __user * buf,
658 size_t count, loff_t *ppos)
659{
660 unsigned long i = *ppos;
661 const char __user * tmp = buf;
662
663 if (!access_ok(VERIFY_READ,buf,count))
664 return -EFAULT;
665 while (count-- > 0 && i < 65536) {
666 char c;
c654d60e
JB
667 if (__get_user(c, tmp)) {
668 if (tmp > buf)
669 break;
1da177e4 670 return -EFAULT;
c654d60e 671 }
1da177e4
LT
672 outb(c,i);
673 i++;
674 tmp++;
675 }
676 *ppos = i;
677 return tmp-buf;
678}
679#endif
680
681static ssize_t read_null(struct file * file, char __user * buf,
682 size_t count, loff_t *ppos)
683{
684 return 0;
685}
686
687static ssize_t write_null(struct file * file, const char __user * buf,
688 size_t count, loff_t *ppos)
689{
690 return count;
691}
692
1ebd32fc
JA
693static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
694 struct splice_desc *sd)
695{
696 return sd->len;
697}
698
699static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out,
700 loff_t *ppos, size_t len, unsigned int flags)
701{
702 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
703}
704
1da177e4
LT
705static ssize_t read_zero(struct file * file, char __user * buf,
706 size_t count, loff_t *ppos)
707{
557ed1fa 708 size_t written;
1da177e4
LT
709
710 if (!count)
711 return 0;
712
713 if (!access_ok(VERIFY_WRITE, buf, count))
714 return -EFAULT;
715
557ed1fa
NP
716 written = 0;
717 while (count) {
718 unsigned long unwritten;
719 size_t chunk = count;
1da177e4 720
557ed1fa
NP
721 if (chunk > PAGE_SIZE)
722 chunk = PAGE_SIZE; /* Just for latency reasons */
723 unwritten = clear_user(buf, chunk);
724 written += chunk - unwritten;
1da177e4 725 if (unwritten)
557ed1fa 726 break;
1da177e4 727 buf += chunk;
557ed1fa 728 count -= chunk;
1da177e4
LT
729 cond_resched();
730 }
557ed1fa 731 return written ? written : -EFAULT;
1da177e4
LT
732}
733
734static int mmap_zero(struct file * file, struct vm_area_struct * vma)
735{
557ed1fa 736#ifndef CONFIG_MMU
1da177e4 737 return -ENOSYS;
557ed1fa
NP
738#endif
739 if (vma->vm_flags & VM_SHARED)
740 return shmem_zero_setup(vma);
741 return 0;
1da177e4 742}
1da177e4
LT
743
744static ssize_t write_full(struct file * file, const char __user * buf,
745 size_t count, loff_t *ppos)
746{
747 return -ENOSPC;
748}
749
750/*
751 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
752 * can fopen() both devices with "a" now. This was previously impossible.
753 * -- SRB.
754 */
755
756static loff_t null_lseek(struct file * file, loff_t offset, int orig)
757{
758 return file->f_pos = 0;
759}
760
761/*
762 * The memory devices use the full 32/64 bits of the offset, and so we cannot
763 * check against negative addresses: they are ok. The return value is weird,
764 * though, in that case (0).
765 *
766 * also note that seeking relative to the "end of file" isn't supported:
767 * it has no meaning, so it returns -EINVAL.
768 */
769static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
770{
771 loff_t ret;
772
a7113a96 773 mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
1da177e4
LT
774 switch (orig) {
775 case 0:
776 file->f_pos = offset;
777 ret = file->f_pos;
778 force_successful_syscall_return();
779 break;
780 case 1:
781 file->f_pos += offset;
782 ret = file->f_pos;
783 force_successful_syscall_return();
784 break;
785 default:
786 ret = -EINVAL;
787 }
a7113a96 788 mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
1da177e4
LT
789 return ret;
790}
791
792static int open_port(struct inode * inode, struct file * filp)
793{
794 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
795}
796
797#define zero_lseek null_lseek
798#define full_lseek null_lseek
799#define write_zero write_null
800#define read_full read_zero
801#define open_mem open_port
802#define open_kmem open_mem
50b1fdbd 803#define open_oldmem open_mem
1da177e4 804
62322d25 805static const struct file_operations mem_fops = {
1da177e4
LT
806 .llseek = memory_lseek,
807 .read = read_mem,
808 .write = write_mem,
809 .mmap = mmap_mem,
810 .open = open_mem,
5da6185b 811 .get_unmapped_area = get_unmapped_area_mem,
1da177e4
LT
812};
813
b781ecb6 814#ifdef CONFIG_DEVKMEM
62322d25 815static const struct file_operations kmem_fops = {
1da177e4
LT
816 .llseek = memory_lseek,
817 .read = read_kmem,
818 .write = write_kmem,
819 .mmap = mmap_kmem,
820 .open = open_kmem,
5da6185b 821 .get_unmapped_area = get_unmapped_area_mem,
1da177e4 822};
b781ecb6 823#endif
1da177e4 824
62322d25 825static const struct file_operations null_fops = {
1da177e4
LT
826 .llseek = null_lseek,
827 .read = read_null,
828 .write = write_null,
1ebd32fc 829 .splice_write = splice_write_null,
1da177e4
LT
830};
831
4f911d64 832#ifdef CONFIG_DEVPORT
62322d25 833static const struct file_operations port_fops = {
1da177e4
LT
834 .llseek = memory_lseek,
835 .read = read_port,
836 .write = write_port,
837 .open = open_port,
838};
839#endif
840
62322d25 841static const struct file_operations zero_fops = {
1da177e4
LT
842 .llseek = zero_lseek,
843 .read = read_zero,
844 .write = write_zero,
845 .mmap = mmap_zero,
846};
847
5da6185b
DH
848/*
849 * capabilities for /dev/zero
850 * - permits private mappings, "copies" are taken of the source of zeros
851 */
1da177e4
LT
852static struct backing_dev_info zero_bdi = {
853 .capabilities = BDI_CAP_MAP_COPY,
854};
855
62322d25 856static const struct file_operations full_fops = {
1da177e4
LT
857 .llseek = full_lseek,
858 .read = read_full,
859 .write = write_full,
860};
861
50b1fdbd 862#ifdef CONFIG_CRASH_DUMP
62322d25 863static const struct file_operations oldmem_fops = {
50b1fdbd
VG
864 .read = read_oldmem,
865 .open = open_oldmem,
866};
867#endif
868
1da177e4
LT
869static ssize_t kmsg_write(struct file * file, const char __user * buf,
870 size_t count, loff_t *ppos)
871{
872 char *tmp;
cd140a5c 873 ssize_t ret;
1da177e4
LT
874
875 tmp = kmalloc(count + 1, GFP_KERNEL);
876 if (tmp == NULL)
877 return -ENOMEM;
878 ret = -EFAULT;
879 if (!copy_from_user(tmp, buf, count)) {
880 tmp[count] = 0;
881 ret = printk("%s", tmp);
cd140a5c
GC
882 if (ret > count)
883 /* printk can add a prefix */
884 ret = count;
1da177e4
LT
885 }
886 kfree(tmp);
887 return ret;
888}
889
62322d25 890static const struct file_operations kmsg_fops = {
1da177e4
LT
891 .write = kmsg_write,
892};
893
894static int memory_open(struct inode * inode, struct file * filp)
895{
1f439647
JC
896 int ret = 0;
897
898 lock_kernel();
1da177e4
LT
899 switch (iminor(inode)) {
900 case 1:
901 filp->f_op = &mem_fops;
5da6185b
DH
902 filp->f_mapping->backing_dev_info =
903 &directly_mappable_cdev_bdi;
1da177e4 904 break;
b781ecb6 905#ifdef CONFIG_DEVKMEM
1da177e4
LT
906 case 2:
907 filp->f_op = &kmem_fops;
5da6185b
DH
908 filp->f_mapping->backing_dev_info =
909 &directly_mappable_cdev_bdi;
1da177e4 910 break;
b781ecb6 911#endif
1da177e4
LT
912 case 3:
913 filp->f_op = &null_fops;
914 break;
4f911d64 915#ifdef CONFIG_DEVPORT
1da177e4
LT
916 case 4:
917 filp->f_op = &port_fops;
918 break;
919#endif
920 case 5:
921 filp->f_mapping->backing_dev_info = &zero_bdi;
922 filp->f_op = &zero_fops;
923 break;
924 case 7:
925 filp->f_op = &full_fops;
926 break;
927 case 8:
928 filp->f_op = &random_fops;
929 break;
930 case 9:
931 filp->f_op = &urandom_fops;
932 break;
933 case 11:
934 filp->f_op = &kmsg_fops;
935 break;
50b1fdbd
VG
936#ifdef CONFIG_CRASH_DUMP
937 case 12:
938 filp->f_op = &oldmem_fops;
939 break;
940#endif
1da177e4 941 default:
1f439647 942 unlock_kernel();
1da177e4
LT
943 return -ENXIO;
944 }
945 if (filp->f_op && filp->f_op->open)
1f439647
JC
946 ret = filp->f_op->open(inode,filp);
947 unlock_kernel();
948 return ret;
1da177e4
LT
949}
950
62322d25 951static const struct file_operations memory_fops = {
1da177e4
LT
952 .open = memory_open, /* just a selector for the real open */
953};
954
955static const struct {
956 unsigned int minor;
957 char *name;
958 umode_t mode;
99ac48f5 959 const struct file_operations *fops;
1da177e4
LT
960} devlist[] = { /* list of minor devices */
961 {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
b781ecb6 962#ifdef CONFIG_DEVKMEM
1da177e4 963 {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
b781ecb6 964#endif
1da177e4 965 {3, "null", S_IRUGO | S_IWUGO, &null_fops},
4f911d64 966#ifdef CONFIG_DEVPORT
1da177e4
LT
967 {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
968#endif
969 {5, "zero", S_IRUGO | S_IWUGO, &zero_fops},
970 {7, "full", S_IRUGO | S_IWUGO, &full_fops},
971 {8, "random", S_IRUGO | S_IWUSR, &random_fops},
972 {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops},
973 {11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops},
50b1fdbd
VG
974#ifdef CONFIG_CRASH_DUMP
975 {12,"oldmem", S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops},
976#endif
1da177e4
LT
977};
978
ca8eca68 979static struct class *mem_class;
1da177e4
LT
980
981static int __init chr_dev_init(void)
982{
983 int i;
e0bf68dd
PZ
984 int err;
985
986 err = bdi_init(&zero_bdi);
987 if (err)
988 return err;
1da177e4
LT
989
990 if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
991 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
992
ca8eca68 993 mem_class = class_create(THIS_MODULE, "mem");
7c69ef79 994 for (i = 0; i < ARRAY_SIZE(devlist); i++)
47aa5793
GKH
995 device_create_drvdata(mem_class, NULL,
996 MKDEV(MEM_MAJOR, devlist[i].minor),
997 NULL, devlist[i].name);
ebf644c4 998
1da177e4
LT
999 return 0;
1000}
1001
1002fs_initcall(chr_dev_init);