spi: Remove BKL from spidev_open
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / char / mem.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/char/mem.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * Added devfs support.
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9 */
10
1da177e4
LT
11#include <linux/mm.h>
12#include <linux/miscdevice.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
15#include <linux/mman.h>
16#include <linux/random.h>
17#include <linux/init.h>
18#include <linux/raw.h>
19#include <linux/tty.h>
20#include <linux/capability.h>
1da177e4
LT
21#include <linux/ptrace.h>
22#include <linux/device.h>
50b1fdbd
VG
23#include <linux/highmem.h>
24#include <linux/crash_dump.h>
1da177e4 25#include <linux/backing-dev.h>
315c215c 26#include <linux/bootmem.h>
d6b29d7c 27#include <linux/splice.h>
b8a3ad5b 28#include <linux/pfn.h>
1f439647 29#include <linux/smp_lock.h>
1da177e4
LT
30
31#include <asm/uaccess.h>
32#include <asm/io.h>
33
34#ifdef CONFIG_IA64
35# include <linux/efi.h>
36#endif
37
1da177e4
LT
38/*
39 * Architectures vary in how they handle caching for addresses
40 * outside of main memory.
41 *
42 */
43static inline int uncached_access(struct file *file, unsigned long addr)
44{
f0970c13 45#if defined(CONFIG_IA64)
1da177e4
LT
46 /*
47 * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
48 */
49 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
24e9d0b9
RB
50#elif defined(CONFIG_MIPS)
51 {
52 extern int __uncached_access(struct file *file,
53 unsigned long addr);
54
55 return __uncached_access(file, addr);
56 }
1da177e4
LT
57#else
58 /*
59 * Accessing memory above the top the kernel knows about or through a file pointer
60 * that was marked O_SYNC will be done non-cached.
61 */
62 if (file->f_flags & O_SYNC)
63 return 1;
64 return addr >= __pa(high_memory);
65#endif
66}
67
68#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
136939a2 69static inline int valid_phys_addr_range(unsigned long addr, size_t count)
1da177e4 70{
136939a2 71 if (addr + count > __pa(high_memory))
1da177e4
LT
72 return 0;
73
1da177e4
LT
74 return 1;
75}
80851ef2 76
06c67bef 77static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
80851ef2
BH
78{
79 return 1;
80}
1da177e4
LT
81#endif
82
d092633b 83#ifdef CONFIG_STRICT_DEVMEM
e2beb3ea 84static inline int range_is_allowed(unsigned long pfn, unsigned long size)
ae531c26 85{
e2beb3ea
VP
86 u64 from = ((u64)pfn) << PAGE_SHIFT;
87 u64 to = from + size;
88 u64 cursor = from;
89
90 while (cursor < to) {
91 if (!devmem_is_allowed(pfn)) {
92 printk(KERN_INFO
93 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
ae531c26
AV
94 current->comm, from, to);
95 return 0;
96 }
e2beb3ea
VP
97 cursor += PAGE_SIZE;
98 pfn++;
ae531c26
AV
99 }
100 return 1;
101}
102#else
e2beb3ea 103static inline int range_is_allowed(unsigned long pfn, unsigned long size)
ae531c26
AV
104{
105 return 1;
106}
107#endif
108
e045fb2a 109void __attribute__((weak)) unxlate_dev_mem_ptr(unsigned long phys, void *addr)
110{
111}
112
1da177e4
LT
113/*
114 * This funcion reads the *physical* memory. The f_pos points directly to the
115 * memory location.
116 */
117static ssize_t read_mem(struct file * file, char __user * buf,
118 size_t count, loff_t *ppos)
119{
120 unsigned long p = *ppos;
121 ssize_t read, sz;
122 char *ptr;
123
136939a2 124 if (!valid_phys_addr_range(p, count))
1da177e4
LT
125 return -EFAULT;
126 read = 0;
127#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
128 /* we don't have page 0 mapped on sparc and m68k.. */
129 if (p < PAGE_SIZE) {
130 sz = PAGE_SIZE - p;
131 if (sz > count)
132 sz = count;
133 if (sz > 0) {
134 if (clear_user(buf, sz))
135 return -EFAULT;
136 buf += sz;
137 p += sz;
138 count -= sz;
139 read += sz;
140 }
141 }
142#endif
143
144 while (count > 0) {
145 /*
146 * Handle first page in case it's not aligned
147 */
148 if (-p & (PAGE_SIZE - 1))
149 sz = -p & (PAGE_SIZE - 1);
150 else
151 sz = PAGE_SIZE;
152
153 sz = min_t(unsigned long, sz, count);
154
e045fb2a 155 if (!range_is_allowed(p >> PAGE_SHIFT, count))
156 return -EPERM;
157
1da177e4
LT
158 /*
159 * On ia64 if a page has been mapped somewhere as
160 * uncached, then it must also be accessed uncached
161 * by the kernel or data corruption may occur
162 */
163 ptr = xlate_dev_mem_ptr(p);
e045fb2a 164 if (!ptr)
165 return -EFAULT;
1da177e4 166
e045fb2a 167 if (copy_to_user(buf, ptr, sz)) {
168 unxlate_dev_mem_ptr(p, ptr);
1da177e4 169 return -EFAULT;
e045fb2a 170 }
171
172 unxlate_dev_mem_ptr(p, ptr);
173
1da177e4
LT
174 buf += sz;
175 p += sz;
176 count -= sz;
177 read += sz;
178 }
179
180 *ppos += read;
181 return read;
182}
183
184static ssize_t write_mem(struct file * file, const char __user * buf,
185 size_t count, loff_t *ppos)
186{
187 unsigned long p = *ppos;
188 ssize_t written, sz;
189 unsigned long copied;
190 void *ptr;
191
136939a2 192 if (!valid_phys_addr_range(p, count))
1da177e4
LT
193 return -EFAULT;
194
195 written = 0;
196
197#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
198 /* we don't have page 0 mapped on sparc and m68k.. */
199 if (p < PAGE_SIZE) {
200 unsigned long sz = PAGE_SIZE - p;
201 if (sz > count)
202 sz = count;
203 /* Hmm. Do something? */
204 buf += sz;
205 p += sz;
206 count -= sz;
207 written += sz;
208 }
209#endif
210
211 while (count > 0) {
212 /*
213 * Handle first page in case it's not aligned
214 */
215 if (-p & (PAGE_SIZE - 1))
216 sz = -p & (PAGE_SIZE - 1);
217 else
218 sz = PAGE_SIZE;
219
220 sz = min_t(unsigned long, sz, count);
221
e045fb2a 222 if (!range_is_allowed(p >> PAGE_SHIFT, sz))
223 return -EPERM;
224
1da177e4
LT
225 /*
226 * On ia64 if a page has been mapped somewhere as
227 * uncached, then it must also be accessed uncached
228 * by the kernel or data corruption may occur
229 */
230 ptr = xlate_dev_mem_ptr(p);
e045fb2a 231 if (!ptr) {
232 if (written)
233 break;
234 return -EFAULT;
235 }
1da177e4
LT
236
237 copied = copy_from_user(ptr, buf, sz);
238 if (copied) {
c654d60e 239 written += sz - copied;
e045fb2a 240 unxlate_dev_mem_ptr(p, ptr);
c654d60e
JB
241 if (written)
242 break;
1da177e4
LT
243 return -EFAULT;
244 }
e045fb2a 245
246 unxlate_dev_mem_ptr(p, ptr);
247
1da177e4
LT
248 buf += sz;
249 p += sz;
250 count -= sz;
251 written += sz;
252 }
253
254 *ppos += written;
255 return written;
256}
257
f0970c13 258int __attribute__((weak)) phys_mem_access_prot_allowed(struct file *file,
259 unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
260{
261 return 1;
262}
263
44ac8413
BH
264#ifndef __HAVE_PHYS_MEM_ACCESS_PROT
265static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
266 unsigned long size, pgprot_t vma_prot)
267{
268#ifdef pgprot_noncached
269 unsigned long offset = pfn << PAGE_SHIFT;
270
271 if (uncached_access(file, offset))
272 return pgprot_noncached(vma_prot);
273#endif
274 return vma_prot;
275}
276#endif
277
5da6185b
DH
278#ifndef CONFIG_MMU
279static unsigned long get_unmapped_area_mem(struct file *file,
280 unsigned long addr,
281 unsigned long len,
282 unsigned long pgoff,
283 unsigned long flags)
284{
285 if (!valid_mmap_phys_addr_range(pgoff, len))
286 return (unsigned long) -EINVAL;
8a93258c 287 return pgoff << PAGE_SHIFT;
5da6185b
DH
288}
289
290/* can't do an in-place private mapping if there's no MMU */
291static inline int private_mapping_ok(struct vm_area_struct *vma)
292{
293 return vma->vm_flags & VM_MAYSHARE;
294}
295#else
296#define get_unmapped_area_mem NULL
297
298static inline int private_mapping_ok(struct vm_area_struct *vma)
299{
300 return 1;
301}
302#endif
303
f0f37e2f 304static const struct vm_operations_struct mmap_mem_ops = {
7ae8ed50
RR
305#ifdef CONFIG_HAVE_IOREMAP_PROT
306 .access = generic_access_phys
307#endif
e7f260a2 308};
309
1da177e4
LT
310static int mmap_mem(struct file * file, struct vm_area_struct * vma)
311{
80851ef2
BH
312 size_t size = vma->vm_end - vma->vm_start;
313
06c67bef 314 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
80851ef2
BH
315 return -EINVAL;
316
5da6185b
DH
317 if (!private_mapping_ok(vma))
318 return -ENOSYS;
319
e2beb3ea
VP
320 if (!range_is_allowed(vma->vm_pgoff, size))
321 return -EPERM;
322
f0970c13 323 if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
324 &vma->vm_page_prot))
325 return -EINVAL;
326
8b150478 327 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
80851ef2 328 size,
1da177e4 329 vma->vm_page_prot);
1da177e4 330
e7f260a2 331 vma->vm_ops = &mmap_mem_ops;
332
1da177e4
LT
333 /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
334 if (remap_pfn_range(vma,
335 vma->vm_start,
336 vma->vm_pgoff,
80851ef2 337 size,
e7f260a2 338 vma->vm_page_prot)) {
1da177e4 339 return -EAGAIN;
e7f260a2 340 }
1da177e4
LT
341 return 0;
342}
343
b781ecb6 344#ifdef CONFIG_DEVKMEM
1da177e4
LT
345static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
346{
4bb82551
LT
347 unsigned long pfn;
348
6d3154cc
LT
349 /* Turn a kernel-virtual address into a physical page frame */
350 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
4bb82551 351
1da177e4
LT
352 /*
353 * RED-PEN: on some architectures there is more mapped memory
354 * than available in mem_map which pfn_valid checks
355 * for. Perhaps should add a new macro here.
356 *
357 * RED-PEN: vmalloc is not supported right now.
358 */
4bb82551 359 if (!pfn_valid(pfn))
1da177e4 360 return -EIO;
4bb82551
LT
361
362 vma->vm_pgoff = pfn;
1da177e4
LT
363 return mmap_mem(file, vma);
364}
b781ecb6 365#endif
1da177e4 366
50b1fdbd
VG
367#ifdef CONFIG_CRASH_DUMP
368/*
369 * Read memory corresponding to the old kernel.
50b1fdbd 370 */
315c215c 371static ssize_t read_oldmem(struct file *file, char __user *buf,
50b1fdbd
VG
372 size_t count, loff_t *ppos)
373{
315c215c
VG
374 unsigned long pfn, offset;
375 size_t read = 0, csize;
376 int rc = 0;
50b1fdbd 377
72414d3f 378 while (count) {
50b1fdbd 379 pfn = *ppos / PAGE_SIZE;
315c215c
VG
380 if (pfn > saved_max_pfn)
381 return read;
50b1fdbd 382
315c215c
VG
383 offset = (unsigned long)(*ppos % PAGE_SIZE);
384 if (count > PAGE_SIZE - offset)
385 csize = PAGE_SIZE - offset;
386 else
387 csize = count;
50b1fdbd 388
315c215c
VG
389 rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
390 if (rc < 0)
391 return rc;
50b1fdbd
VG
392 buf += csize;
393 *ppos += csize;
394 read += csize;
395 count -= csize;
396 }
50b1fdbd
VG
397 return read;
398}
399#endif
1da177e4 400
b781ecb6 401#ifdef CONFIG_DEVKMEM
1da177e4
LT
402/*
403 * This function reads the *virtual* memory as seen by the kernel.
404 */
405static ssize_t read_kmem(struct file *file, char __user *buf,
406 size_t count, loff_t *ppos)
407{
408 unsigned long p = *ppos;
409 ssize_t low_count, read, sz;
410 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
411
412 read = 0;
413 if (p < (unsigned long) high_memory) {
414 low_count = count;
415 if (count > (unsigned long) high_memory - p)
416 low_count = (unsigned long) high_memory - p;
417
418#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
419 /* we don't have page 0 mapped on sparc and m68k.. */
420 if (p < PAGE_SIZE && low_count > 0) {
421 size_t tmp = PAGE_SIZE - p;
422 if (tmp > low_count) tmp = low_count;
423 if (clear_user(buf, tmp))
424 return -EFAULT;
425 buf += tmp;
426 p += tmp;
427 read += tmp;
428 low_count -= tmp;
429 count -= tmp;
430 }
431#endif
432 while (low_count > 0) {
433 /*
434 * Handle first page in case it's not aligned
435 */
436 if (-p & (PAGE_SIZE - 1))
437 sz = -p & (PAGE_SIZE - 1);
438 else
439 sz = PAGE_SIZE;
440
441 sz = min_t(unsigned long, sz, low_count);
442
443 /*
444 * On ia64 if a page has been mapped somewhere as
445 * uncached, then it must also be accessed uncached
446 * by the kernel or data corruption may occur
447 */
448 kbuf = xlate_dev_kmem_ptr((char *)p);
449
450 if (copy_to_user(buf, kbuf, sz))
451 return -EFAULT;
452 buf += sz;
453 p += sz;
454 read += sz;
455 low_count -= sz;
456 count -= sz;
457 }
458 }
459
460 if (count > 0) {
461 kbuf = (char *)__get_free_page(GFP_KERNEL);
462 if (!kbuf)
463 return -ENOMEM;
464 while (count > 0) {
465 int len = count;
466
467 if (len > PAGE_SIZE)
468 len = PAGE_SIZE;
469 len = vread(kbuf, (char *)p, len);
470 if (!len)
471 break;
472 if (copy_to_user(buf, kbuf, len)) {
473 free_page((unsigned long)kbuf);
474 return -EFAULT;
475 }
476 count -= len;
477 buf += len;
478 read += len;
479 p += len;
480 }
481 free_page((unsigned long)kbuf);
482 }
483 *ppos = p;
484 return read;
485}
486
487
488static inline ssize_t
489do_write_kmem(void *p, unsigned long realp, const char __user * buf,
490 size_t count, loff_t *ppos)
491{
492 ssize_t written, sz;
493 unsigned long copied;
494
495 written = 0;
496#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
497 /* we don't have page 0 mapped on sparc and m68k.. */
498 if (realp < PAGE_SIZE) {
499 unsigned long sz = PAGE_SIZE - realp;
500 if (sz > count)
501 sz = count;
502 /* Hmm. Do something? */
503 buf += sz;
504 p += sz;
505 realp += sz;
506 count -= sz;
507 written += sz;
508 }
509#endif
510
511 while (count > 0) {
512 char *ptr;
513 /*
514 * Handle first page in case it's not aligned
515 */
516 if (-realp & (PAGE_SIZE - 1))
517 sz = -realp & (PAGE_SIZE - 1);
518 else
519 sz = PAGE_SIZE;
520
521 sz = min_t(unsigned long, sz, count);
522
523 /*
524 * On ia64 if a page has been mapped somewhere as
525 * uncached, then it must also be accessed uncached
526 * by the kernel or data corruption may occur
527 */
528 ptr = xlate_dev_kmem_ptr(p);
529
530 copied = copy_from_user(ptr, buf, sz);
531 if (copied) {
c654d60e
JB
532 written += sz - copied;
533 if (written)
534 break;
1da177e4
LT
535 return -EFAULT;
536 }
537 buf += sz;
538 p += sz;
539 realp += sz;
540 count -= sz;
541 written += sz;
542 }
543
544 *ppos += written;
545 return written;
546}
547
548
549/*
550 * This function writes to the *virtual* memory as seen by the kernel.
551 */
552static ssize_t write_kmem(struct file * file, const char __user * buf,
553 size_t count, loff_t *ppos)
554{
555 unsigned long p = *ppos;
556 ssize_t wrote = 0;
557 ssize_t virtr = 0;
558 ssize_t written;
559 char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
560
561 if (p < (unsigned long) high_memory) {
562
563 wrote = count;
564 if (count > (unsigned long) high_memory - p)
565 wrote = (unsigned long) high_memory - p;
566
567 written = do_write_kmem((void*)p, p, buf, wrote, ppos);
568 if (written != wrote)
569 return written;
570 wrote = written;
571 p += wrote;
572 buf += wrote;
573 count -= wrote;
574 }
575
576 if (count > 0) {
577 kbuf = (char *)__get_free_page(GFP_KERNEL);
578 if (!kbuf)
579 return wrote ? wrote : -ENOMEM;
580 while (count > 0) {
581 int len = count;
582
583 if (len > PAGE_SIZE)
584 len = PAGE_SIZE;
585 if (len) {
586 written = copy_from_user(kbuf, buf, len);
587 if (written) {
c654d60e
JB
588 if (wrote + virtr)
589 break;
1da177e4 590 free_page((unsigned long)kbuf);
c654d60e 591 return -EFAULT;
1da177e4
LT
592 }
593 }
594 len = vwrite(kbuf, (char *)p, len);
595 count -= len;
596 buf += len;
597 virtr += len;
598 p += len;
599 }
600 free_page((unsigned long)kbuf);
601 }
602
603 *ppos = p;
604 return virtr + wrote;
605}
b781ecb6 606#endif
1da177e4 607
4f911d64 608#ifdef CONFIG_DEVPORT
1da177e4
LT
609static ssize_t read_port(struct file * file, char __user * buf,
610 size_t count, loff_t *ppos)
611{
612 unsigned long i = *ppos;
613 char __user *tmp = buf;
614
615 if (!access_ok(VERIFY_WRITE, buf, count))
616 return -EFAULT;
617 while (count-- > 0 && i < 65536) {
618 if (__put_user(inb(i),tmp) < 0)
619 return -EFAULT;
620 i++;
621 tmp++;
622 }
623 *ppos = i;
624 return tmp-buf;
625}
626
627static ssize_t write_port(struct file * file, const char __user * buf,
628 size_t count, loff_t *ppos)
629{
630 unsigned long i = *ppos;
631 const char __user * tmp = buf;
632
633 if (!access_ok(VERIFY_READ,buf,count))
634 return -EFAULT;
635 while (count-- > 0 && i < 65536) {
636 char c;
c654d60e
JB
637 if (__get_user(c, tmp)) {
638 if (tmp > buf)
639 break;
1da177e4 640 return -EFAULT;
c654d60e 641 }
1da177e4
LT
642 outb(c,i);
643 i++;
644 tmp++;
645 }
646 *ppos = i;
647 return tmp-buf;
648}
649#endif
650
651static ssize_t read_null(struct file * file, char __user * buf,
652 size_t count, loff_t *ppos)
653{
654 return 0;
655}
656
657static ssize_t write_null(struct file * file, const char __user * buf,
658 size_t count, loff_t *ppos)
659{
660 return count;
661}
662
1ebd32fc
JA
663static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
664 struct splice_desc *sd)
665{
666 return sd->len;
667}
668
669static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out,
670 loff_t *ppos, size_t len, unsigned int flags)
671{
672 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
673}
674
1da177e4
LT
675static ssize_t read_zero(struct file * file, char __user * buf,
676 size_t count, loff_t *ppos)
677{
557ed1fa 678 size_t written;
1da177e4
LT
679
680 if (!count)
681 return 0;
682
683 if (!access_ok(VERIFY_WRITE, buf, count))
684 return -EFAULT;
685
557ed1fa
NP
686 written = 0;
687 while (count) {
688 unsigned long unwritten;
689 size_t chunk = count;
1da177e4 690
557ed1fa
NP
691 if (chunk > PAGE_SIZE)
692 chunk = PAGE_SIZE; /* Just for latency reasons */
bb521c5d 693 unwritten = __clear_user(buf, chunk);
557ed1fa 694 written += chunk - unwritten;
1da177e4 695 if (unwritten)
557ed1fa 696 break;
2b838687
LT
697 if (signal_pending(current))
698 return written ? written : -ERESTARTSYS;
1da177e4 699 buf += chunk;
557ed1fa 700 count -= chunk;
1da177e4
LT
701 cond_resched();
702 }
557ed1fa 703 return written ? written : -EFAULT;
1da177e4
LT
704}
705
706static int mmap_zero(struct file * file, struct vm_area_struct * vma)
707{
557ed1fa 708#ifndef CONFIG_MMU
1da177e4 709 return -ENOSYS;
557ed1fa
NP
710#endif
711 if (vma->vm_flags & VM_SHARED)
712 return shmem_zero_setup(vma);
713 return 0;
1da177e4 714}
1da177e4
LT
715
716static ssize_t write_full(struct file * file, const char __user * buf,
717 size_t count, loff_t *ppos)
718{
719 return -ENOSPC;
720}
721
722/*
723 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
724 * can fopen() both devices with "a" now. This was previously impossible.
725 * -- SRB.
726 */
727
728static loff_t null_lseek(struct file * file, loff_t offset, int orig)
729{
730 return file->f_pos = 0;
731}
732
733/*
734 * The memory devices use the full 32/64 bits of the offset, and so we cannot
735 * check against negative addresses: they are ok. The return value is weird,
736 * though, in that case (0).
737 *
738 * also note that seeking relative to the "end of file" isn't supported:
739 * it has no meaning, so it returns -EINVAL.
740 */
741static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
742{
743 loff_t ret;
744
a7113a96 745 mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
1da177e4
LT
746 switch (orig) {
747 case 0:
748 file->f_pos = offset;
749 ret = file->f_pos;
750 force_successful_syscall_return();
751 break;
752 case 1:
753 file->f_pos += offset;
754 ret = file->f_pos;
755 force_successful_syscall_return();
756 break;
757 default:
758 ret = -EINVAL;
759 }
a7113a96 760 mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
1da177e4
LT
761 return ret;
762}
763
764static int open_port(struct inode * inode, struct file * filp)
765{
766 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
767}
768
769#define zero_lseek null_lseek
770#define full_lseek null_lseek
771#define write_zero write_null
772#define read_full read_zero
773#define open_mem open_port
774#define open_kmem open_mem
50b1fdbd 775#define open_oldmem open_mem
1da177e4 776
62322d25 777static const struct file_operations mem_fops = {
1da177e4
LT
778 .llseek = memory_lseek,
779 .read = read_mem,
780 .write = write_mem,
781 .mmap = mmap_mem,
782 .open = open_mem,
5da6185b 783 .get_unmapped_area = get_unmapped_area_mem,
1da177e4
LT
784};
785
b781ecb6 786#ifdef CONFIG_DEVKMEM
62322d25 787static const struct file_operations kmem_fops = {
1da177e4
LT
788 .llseek = memory_lseek,
789 .read = read_kmem,
790 .write = write_kmem,
791 .mmap = mmap_kmem,
792 .open = open_kmem,
5da6185b 793 .get_unmapped_area = get_unmapped_area_mem,
1da177e4 794};
b781ecb6 795#endif
1da177e4 796
62322d25 797static const struct file_operations null_fops = {
1da177e4
LT
798 .llseek = null_lseek,
799 .read = read_null,
800 .write = write_null,
1ebd32fc 801 .splice_write = splice_write_null,
1da177e4
LT
802};
803
4f911d64 804#ifdef CONFIG_DEVPORT
62322d25 805static const struct file_operations port_fops = {
1da177e4
LT
806 .llseek = memory_lseek,
807 .read = read_port,
808 .write = write_port,
809 .open = open_port,
810};
811#endif
812
62322d25 813static const struct file_operations zero_fops = {
1da177e4
LT
814 .llseek = zero_lseek,
815 .read = read_zero,
816 .write = write_zero,
817 .mmap = mmap_zero,
818};
819
5da6185b
DH
820/*
821 * capabilities for /dev/zero
822 * - permits private mappings, "copies" are taken of the source of zeros
823 */
1da177e4 824static struct backing_dev_info zero_bdi = {
d993831f 825 .name = "char/mem",
1da177e4
LT
826 .capabilities = BDI_CAP_MAP_COPY,
827};
828
62322d25 829static const struct file_operations full_fops = {
1da177e4
LT
830 .llseek = full_lseek,
831 .read = read_full,
832 .write = write_full,
833};
834
50b1fdbd 835#ifdef CONFIG_CRASH_DUMP
62322d25 836static const struct file_operations oldmem_fops = {
50b1fdbd
VG
837 .read = read_oldmem,
838 .open = open_oldmem,
839};
840#endif
841
1da177e4
LT
842static ssize_t kmsg_write(struct file * file, const char __user * buf,
843 size_t count, loff_t *ppos)
844{
845 char *tmp;
cd140a5c 846 ssize_t ret;
1da177e4
LT
847
848 tmp = kmalloc(count + 1, GFP_KERNEL);
849 if (tmp == NULL)
850 return -ENOMEM;
851 ret = -EFAULT;
852 if (!copy_from_user(tmp, buf, count)) {
853 tmp[count] = 0;
854 ret = printk("%s", tmp);
cd140a5c
GC
855 if (ret > count)
856 /* printk can add a prefix */
857 ret = count;
1da177e4
LT
858 }
859 kfree(tmp);
860 return ret;
861}
862
62322d25 863static const struct file_operations kmsg_fops = {
1da177e4
LT
864 .write = kmsg_write,
865};
866
389e0cb9
KS
867static const struct memdev {
868 const char *name;
e454cea2 869 mode_t mode;
389e0cb9
KS
870 const struct file_operations *fops;
871 struct backing_dev_info *dev_info;
872} devlist[] = {
e454cea2 873 [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
b781ecb6 874#ifdef CONFIG_DEVKMEM
e454cea2 875 [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
b781ecb6 876#endif
e454cea2 877 [3] = { "null", 0666, &null_fops, NULL },
4f911d64 878#ifdef CONFIG_DEVPORT
e454cea2 879 [4] = { "port", 0, &port_fops, NULL },
1da177e4 880#endif
e454cea2
KS
881 [5] = { "zero", 0666, &zero_fops, &zero_bdi },
882 [7] = { "full", 0666, &full_fops, NULL },
883 [8] = { "random", 0666, &random_fops, NULL },
884 [9] = { "urandom", 0666, &urandom_fops, NULL },
885 [11] = { "kmsg", 0, &kmsg_fops, NULL },
50b1fdbd 886#ifdef CONFIG_CRASH_DUMP
e454cea2 887 [12] = { "oldmem", 0, &oldmem_fops, NULL },
50b1fdbd 888#endif
d6f47bef
ASF
889};
890
891static int memory_open(struct inode *inode, struct file *filp)
892{
389e0cb9
KS
893 int minor;
894 const struct memdev *dev;
895 int ret = -ENXIO;
d6f47bef
ASF
896
897 lock_kernel();
898
389e0cb9
KS
899 minor = iminor(inode);
900 if (minor >= ARRAY_SIZE(devlist))
901 goto out;
d6f47bef 902
389e0cb9
KS
903 dev = &devlist[minor];
904 if (!dev->fops)
905 goto out;
d6f47bef 906
389e0cb9
KS
907 filp->f_op = dev->fops;
908 if (dev->dev_info)
909 filp->f_mapping->backing_dev_info = dev->dev_info;
d6f47bef 910
389e0cb9
KS
911 if (dev->fops->open)
912 ret = dev->fops->open(inode, filp);
913 else
914 ret = 0;
915out:
1f439647
JC
916 unlock_kernel();
917 return ret;
1da177e4
LT
918}
919
62322d25 920static const struct file_operations memory_fops = {
389e0cb9 921 .open = memory_open,
1da177e4
LT
922};
923
e454cea2
KS
924static char *mem_devnode(struct device *dev, mode_t *mode)
925{
926 if (mode && devlist[MINOR(dev->devt)].mode)
927 *mode = devlist[MINOR(dev->devt)].mode;
928 return NULL;
929}
930
ca8eca68 931static struct class *mem_class;
1da177e4
LT
932
933static int __init chr_dev_init(void)
934{
389e0cb9 935 int minor;
e0bf68dd
PZ
936 int err;
937
938 err = bdi_init(&zero_bdi);
939 if (err)
940 return err;
1da177e4
LT
941
942 if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
943 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
944
ca8eca68 945 mem_class = class_create(THIS_MODULE, "mem");
e454cea2 946 mem_class->devnode = mem_devnode;
389e0cb9
KS
947 for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
948 if (!devlist[minor].name)
949 continue;
950 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
951 NULL, devlist[minor].name);
952 }
ebf644c4 953
1da177e4
LT
954 return 0;
955}
956
957fs_initcall(chr_dev_init);