Fix 82875 PCI setup
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / char / mem.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/char/mem.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * Added devfs support.
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9 */
10
1da177e4
LT
11#include <linux/mm.h>
12#include <linux/miscdevice.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
15#include <linux/mman.h>
16#include <linux/random.h>
17#include <linux/init.h>
18#include <linux/raw.h>
19#include <linux/tty.h>
20#include <linux/capability.h>
1da177e4
LT
21#include <linux/ptrace.h>
22#include <linux/device.h>
50b1fdbd
VG
23#include <linux/highmem.h>
24#include <linux/crash_dump.h>
1da177e4 25#include <linux/backing-dev.h>
315c215c 26#include <linux/bootmem.h>
1ebd32fc 27#include <linux/pipe_fs_i.h>
b8a3ad5b 28#include <linux/pfn.h>
1da177e4
LT
29
30#include <asm/uaccess.h>
31#include <asm/io.h>
32
33#ifdef CONFIG_IA64
34# include <linux/efi.h>
35#endif
36
1da177e4
LT
37/*
38 * Architectures vary in how they handle caching for addresses
39 * outside of main memory.
40 *
41 */
42static inline int uncached_access(struct file *file, unsigned long addr)
43{
44#if defined(__i386__)
45 /*
46 * On the PPro and successors, the MTRRs are used to set
47 * memory types for physical addresses outside main memory,
48 * so blindly setting PCD or PWT on those pages is wrong.
49 * For Pentiums and earlier, the surround logic should disable
50 * caching for the high addresses through the KEN pin, but
51 * we maintain the tradition of paranoia in this code.
52 */
53 if (file->f_flags & O_SYNC)
54 return 1;
55 return !( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) ||
56 test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) ||
57 test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ||
58 test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) )
59 && addr >= __pa(high_memory);
60#elif defined(__x86_64__)
61 /*
62 * This is broken because it can generate memory type aliases,
63 * which can cause cache corruptions
64 * But it is only available for root and we have to be bug-to-bug
65 * compatible with i386.
66 */
67 if (file->f_flags & O_SYNC)
68 return 1;
69 /* same behaviour as i386. PAT always set to cached and MTRRs control the
70 caching behaviour.
71 Hopefully a full PAT implementation will fix that soon. */
72 return 0;
73#elif defined(CONFIG_IA64)
74 /*
75 * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
76 */
77 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
78#else
79 /*
80 * Accessing memory above the top the kernel knows about or through a file pointer
81 * that was marked O_SYNC will be done non-cached.
82 */
83 if (file->f_flags & O_SYNC)
84 return 1;
85 return addr >= __pa(high_memory);
86#endif
87}
88
89#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
136939a2 90static inline int valid_phys_addr_range(unsigned long addr, size_t count)
1da177e4 91{
136939a2 92 if (addr + count > __pa(high_memory))
1da177e4
LT
93 return 0;
94
1da177e4
LT
95 return 1;
96}
80851ef2 97
06c67bef 98static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
80851ef2
BH
99{
100 return 1;
101}
1da177e4
LT
102#endif
103
104/*
105 * This funcion reads the *physical* memory. The f_pos points directly to the
106 * memory location.
107 */
108static ssize_t read_mem(struct file * file, char __user * buf,
109 size_t count, loff_t *ppos)
110{
111 unsigned long p = *ppos;
112 ssize_t read, sz;
113 char *ptr;
114
136939a2 115 if (!valid_phys_addr_range(p, count))
1da177e4
LT
116 return -EFAULT;
117 read = 0;
118#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
119 /* we don't have page 0 mapped on sparc and m68k.. */
120 if (p < PAGE_SIZE) {
121 sz = PAGE_SIZE - p;
122 if (sz > count)
123 sz = count;
124 if (sz > 0) {
125 if (clear_user(buf, sz))
126 return -EFAULT;
127 buf += sz;
128 p += sz;
129 count -= sz;
130 read += sz;
131 }
132 }
133#endif
134
135 while (count > 0) {
136 /*
137 * Handle first page in case it's not aligned
138 */
139 if (-p & (PAGE_SIZE - 1))
140 sz = -p & (PAGE_SIZE - 1);
141 else
142 sz = PAGE_SIZE;
143
144 sz = min_t(unsigned long, sz, count);
145
146 /*
147 * On ia64 if a page has been mapped somewhere as
148 * uncached, then it must also be accessed uncached
149 * by the kernel or data corruption may occur
150 */
151 ptr = xlate_dev_mem_ptr(p);
152
153 if (copy_to_user(buf, ptr, sz))
154 return -EFAULT;
155 buf += sz;
156 p += sz;
157 count -= sz;
158 read += sz;
159 }
160
161 *ppos += read;
162 return read;
163}
164
165static ssize_t write_mem(struct file * file, const char __user * buf,
166 size_t count, loff_t *ppos)
167{
168 unsigned long p = *ppos;
169 ssize_t written, sz;
170 unsigned long copied;
171 void *ptr;
172
136939a2 173 if (!valid_phys_addr_range(p, count))
1da177e4
LT
174 return -EFAULT;
175
176 written = 0;
177
178#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
179 /* we don't have page 0 mapped on sparc and m68k.. */
180 if (p < PAGE_SIZE) {
181 unsigned long sz = PAGE_SIZE - p;
182 if (sz > count)
183 sz = count;
184 /* Hmm. Do something? */
185 buf += sz;
186 p += sz;
187 count -= sz;
188 written += sz;
189 }
190#endif
191
192 while (count > 0) {
193 /*
194 * Handle first page in case it's not aligned
195 */
196 if (-p & (PAGE_SIZE - 1))
197 sz = -p & (PAGE_SIZE - 1);
198 else
199 sz = PAGE_SIZE;
200
201 sz = min_t(unsigned long, sz, count);
202
203 /*
204 * On ia64 if a page has been mapped somewhere as
205 * uncached, then it must also be accessed uncached
206 * by the kernel or data corruption may occur
207 */
208 ptr = xlate_dev_mem_ptr(p);
209
210 copied = copy_from_user(ptr, buf, sz);
211 if (copied) {
c654d60e
JB
212 written += sz - copied;
213 if (written)
214 break;
1da177e4
LT
215 return -EFAULT;
216 }
217 buf += sz;
218 p += sz;
219 count -= sz;
220 written += sz;
221 }
222
223 *ppos += written;
224 return written;
225}
226
44ac8413
BH
227#ifndef __HAVE_PHYS_MEM_ACCESS_PROT
228static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
229 unsigned long size, pgprot_t vma_prot)
230{
231#ifdef pgprot_noncached
232 unsigned long offset = pfn << PAGE_SHIFT;
233
234 if (uncached_access(file, offset))
235 return pgprot_noncached(vma_prot);
236#endif
237 return vma_prot;
238}
239#endif
240
5da6185b
DH
241#ifndef CONFIG_MMU
242static unsigned long get_unmapped_area_mem(struct file *file,
243 unsigned long addr,
244 unsigned long len,
245 unsigned long pgoff,
246 unsigned long flags)
247{
248 if (!valid_mmap_phys_addr_range(pgoff, len))
249 return (unsigned long) -EINVAL;
8a93258c 250 return pgoff << PAGE_SHIFT;
5da6185b
DH
251}
252
253/* can't do an in-place private mapping if there's no MMU */
254static inline int private_mapping_ok(struct vm_area_struct *vma)
255{
256 return vma->vm_flags & VM_MAYSHARE;
257}
258#else
259#define get_unmapped_area_mem NULL
260
261static inline int private_mapping_ok(struct vm_area_struct *vma)
262{
263 return 1;
264}
265#endif
266
1da177e4
LT
267static int mmap_mem(struct file * file, struct vm_area_struct * vma)
268{
80851ef2
BH
269 size_t size = vma->vm_end - vma->vm_start;
270
06c67bef 271 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
80851ef2
BH
272 return -EINVAL;
273
5da6185b
DH
274 if (!private_mapping_ok(vma))
275 return -ENOSYS;
276
8b150478 277 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
80851ef2 278 size,
1da177e4 279 vma->vm_page_prot);
1da177e4
LT
280
281 /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
282 if (remap_pfn_range(vma,
283 vma->vm_start,
284 vma->vm_pgoff,
80851ef2 285 size,
1da177e4
LT
286 vma->vm_page_prot))
287 return -EAGAIN;
288 return 0;
289}
290
291static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
292{
4bb82551
LT
293 unsigned long pfn;
294
6d3154cc
LT
295 /* Turn a kernel-virtual address into a physical page frame */
296 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
4bb82551 297
1da177e4
LT
298 /*
299 * RED-PEN: on some architectures there is more mapped memory
300 * than available in mem_map which pfn_valid checks
301 * for. Perhaps should add a new macro here.
302 *
303 * RED-PEN: vmalloc is not supported right now.
304 */
4bb82551 305 if (!pfn_valid(pfn))
1da177e4 306 return -EIO;
4bb82551
LT
307
308 vma->vm_pgoff = pfn;
1da177e4
LT
309 return mmap_mem(file, vma);
310}
311
50b1fdbd
VG
312#ifdef CONFIG_CRASH_DUMP
313/*
314 * Read memory corresponding to the old kernel.
50b1fdbd 315 */
315c215c 316static ssize_t read_oldmem(struct file *file, char __user *buf,
50b1fdbd
VG
317 size_t count, loff_t *ppos)
318{
315c215c
VG
319 unsigned long pfn, offset;
320 size_t read = 0, csize;
321 int rc = 0;
50b1fdbd 322
72414d3f 323 while (count) {
50b1fdbd 324 pfn = *ppos / PAGE_SIZE;
315c215c
VG
325 if (pfn > saved_max_pfn)
326 return read;
50b1fdbd 327
315c215c
VG
328 offset = (unsigned long)(*ppos % PAGE_SIZE);
329 if (count > PAGE_SIZE - offset)
330 csize = PAGE_SIZE - offset;
331 else
332 csize = count;
50b1fdbd 333
315c215c
VG
334 rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
335 if (rc < 0)
336 return rc;
50b1fdbd
VG
337 buf += csize;
338 *ppos += csize;
339 read += csize;
340 count -= csize;
341 }
50b1fdbd
VG
342 return read;
343}
344#endif
345
1da177e4
LT
346extern long vread(char *buf, char *addr, unsigned long count);
347extern long vwrite(char *buf, char *addr, unsigned long count);
348
349/*
350 * This function reads the *virtual* memory as seen by the kernel.
351 */
352static ssize_t read_kmem(struct file *file, char __user *buf,
353 size_t count, loff_t *ppos)
354{
355 unsigned long p = *ppos;
356 ssize_t low_count, read, sz;
357 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
358
359 read = 0;
360 if (p < (unsigned long) high_memory) {
361 low_count = count;
362 if (count > (unsigned long) high_memory - p)
363 low_count = (unsigned long) high_memory - p;
364
365#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
366 /* we don't have page 0 mapped on sparc and m68k.. */
367 if (p < PAGE_SIZE && low_count > 0) {
368 size_t tmp = PAGE_SIZE - p;
369 if (tmp > low_count) tmp = low_count;
370 if (clear_user(buf, tmp))
371 return -EFAULT;
372 buf += tmp;
373 p += tmp;
374 read += tmp;
375 low_count -= tmp;
376 count -= tmp;
377 }
378#endif
379 while (low_count > 0) {
380 /*
381 * Handle first page in case it's not aligned
382 */
383 if (-p & (PAGE_SIZE - 1))
384 sz = -p & (PAGE_SIZE - 1);
385 else
386 sz = PAGE_SIZE;
387
388 sz = min_t(unsigned long, sz, low_count);
389
390 /*
391 * On ia64 if a page has been mapped somewhere as
392 * uncached, then it must also be accessed uncached
393 * by the kernel or data corruption may occur
394 */
395 kbuf = xlate_dev_kmem_ptr((char *)p);
396
397 if (copy_to_user(buf, kbuf, sz))
398 return -EFAULT;
399 buf += sz;
400 p += sz;
401 read += sz;
402 low_count -= sz;
403 count -= sz;
404 }
405 }
406
407 if (count > 0) {
408 kbuf = (char *)__get_free_page(GFP_KERNEL);
409 if (!kbuf)
410 return -ENOMEM;
411 while (count > 0) {
412 int len = count;
413
414 if (len > PAGE_SIZE)
415 len = PAGE_SIZE;
416 len = vread(kbuf, (char *)p, len);
417 if (!len)
418 break;
419 if (copy_to_user(buf, kbuf, len)) {
420 free_page((unsigned long)kbuf);
421 return -EFAULT;
422 }
423 count -= len;
424 buf += len;
425 read += len;
426 p += len;
427 }
428 free_page((unsigned long)kbuf);
429 }
430 *ppos = p;
431 return read;
432}
433
434
435static inline ssize_t
436do_write_kmem(void *p, unsigned long realp, const char __user * buf,
437 size_t count, loff_t *ppos)
438{
439 ssize_t written, sz;
440 unsigned long copied;
441
442 written = 0;
443#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
444 /* we don't have page 0 mapped on sparc and m68k.. */
445 if (realp < PAGE_SIZE) {
446 unsigned long sz = PAGE_SIZE - realp;
447 if (sz > count)
448 sz = count;
449 /* Hmm. Do something? */
450 buf += sz;
451 p += sz;
452 realp += sz;
453 count -= sz;
454 written += sz;
455 }
456#endif
457
458 while (count > 0) {
459 char *ptr;
460 /*
461 * Handle first page in case it's not aligned
462 */
463 if (-realp & (PAGE_SIZE - 1))
464 sz = -realp & (PAGE_SIZE - 1);
465 else
466 sz = PAGE_SIZE;
467
468 sz = min_t(unsigned long, sz, count);
469
470 /*
471 * On ia64 if a page has been mapped somewhere as
472 * uncached, then it must also be accessed uncached
473 * by the kernel or data corruption may occur
474 */
475 ptr = xlate_dev_kmem_ptr(p);
476
477 copied = copy_from_user(ptr, buf, sz);
478 if (copied) {
c654d60e
JB
479 written += sz - copied;
480 if (written)
481 break;
1da177e4
LT
482 return -EFAULT;
483 }
484 buf += sz;
485 p += sz;
486 realp += sz;
487 count -= sz;
488 written += sz;
489 }
490
491 *ppos += written;
492 return written;
493}
494
495
496/*
497 * This function writes to the *virtual* memory as seen by the kernel.
498 */
499static ssize_t write_kmem(struct file * file, const char __user * buf,
500 size_t count, loff_t *ppos)
501{
502 unsigned long p = *ppos;
503 ssize_t wrote = 0;
504 ssize_t virtr = 0;
505 ssize_t written;
506 char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
507
508 if (p < (unsigned long) high_memory) {
509
510 wrote = count;
511 if (count > (unsigned long) high_memory - p)
512 wrote = (unsigned long) high_memory - p;
513
514 written = do_write_kmem((void*)p, p, buf, wrote, ppos);
515 if (written != wrote)
516 return written;
517 wrote = written;
518 p += wrote;
519 buf += wrote;
520 count -= wrote;
521 }
522
523 if (count > 0) {
524 kbuf = (char *)__get_free_page(GFP_KERNEL);
525 if (!kbuf)
526 return wrote ? wrote : -ENOMEM;
527 while (count > 0) {
528 int len = count;
529
530 if (len > PAGE_SIZE)
531 len = PAGE_SIZE;
532 if (len) {
533 written = copy_from_user(kbuf, buf, len);
534 if (written) {
c654d60e
JB
535 if (wrote + virtr)
536 break;
1da177e4 537 free_page((unsigned long)kbuf);
c654d60e 538 return -EFAULT;
1da177e4
LT
539 }
540 }
541 len = vwrite(kbuf, (char *)p, len);
542 count -= len;
543 buf += len;
544 virtr += len;
545 p += len;
546 }
547 free_page((unsigned long)kbuf);
548 }
549
550 *ppos = p;
551 return virtr + wrote;
552}
553
153dcc54 554#if (defined(CONFIG_ISA) || defined(CONFIG_PCI)) && !defined(__mc68000__)
1da177e4
LT
555static ssize_t read_port(struct file * file, char __user * buf,
556 size_t count, loff_t *ppos)
557{
558 unsigned long i = *ppos;
559 char __user *tmp = buf;
560
561 if (!access_ok(VERIFY_WRITE, buf, count))
562 return -EFAULT;
563 while (count-- > 0 && i < 65536) {
564 if (__put_user(inb(i),tmp) < 0)
565 return -EFAULT;
566 i++;
567 tmp++;
568 }
569 *ppos = i;
570 return tmp-buf;
571}
572
573static ssize_t write_port(struct file * file, const char __user * buf,
574 size_t count, loff_t *ppos)
575{
576 unsigned long i = *ppos;
577 const char __user * tmp = buf;
578
579 if (!access_ok(VERIFY_READ,buf,count))
580 return -EFAULT;
581 while (count-- > 0 && i < 65536) {
582 char c;
c654d60e
JB
583 if (__get_user(c, tmp)) {
584 if (tmp > buf)
585 break;
1da177e4 586 return -EFAULT;
c654d60e 587 }
1da177e4
LT
588 outb(c,i);
589 i++;
590 tmp++;
591 }
592 *ppos = i;
593 return tmp-buf;
594}
595#endif
596
597static ssize_t read_null(struct file * file, char __user * buf,
598 size_t count, loff_t *ppos)
599{
600 return 0;
601}
602
603static ssize_t write_null(struct file * file, const char __user * buf,
604 size_t count, loff_t *ppos)
605{
606 return count;
607}
608
1ebd32fc
JA
609static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
610 struct splice_desc *sd)
611{
612 return sd->len;
613}
614
615static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out,
616 loff_t *ppos, size_t len, unsigned int flags)
617{
618 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
619}
620
1da177e4
LT
621#ifdef CONFIG_MMU
622/*
623 * For fun, we are using the MMU for this.
624 */
625static inline size_t read_zero_pagealigned(char __user * buf, size_t size)
626{
627 struct mm_struct *mm;
628 struct vm_area_struct * vma;
629 unsigned long addr=(unsigned long)buf;
630
631 mm = current->mm;
632 /* Oops, this was forgotten before. -ben */
633 down_read(&mm->mmap_sem);
634
635 /* For private mappings, just map in zero pages. */
636 for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
637 unsigned long count;
638
639 if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0)
640 goto out_up;
6aab341e 641 if (vma->vm_flags & (VM_SHARED | VM_HUGETLB))
1da177e4
LT
642 break;
643 count = vma->vm_end - addr;
644 if (count > size)
645 count = size;
646
647 zap_page_range(vma, addr, count, NULL);
5fcf7bb7
HD
648 if (zeromap_page_range(vma, addr, count, PAGE_COPY))
649 break;
1da177e4
LT
650
651 size -= count;
652 buf += count;
653 addr += count;
654 if (size == 0)
655 goto out_up;
656 }
657
658 up_read(&mm->mmap_sem);
659
660 /* The shared case is hard. Let's do the conventional zeroing. */
661 do {
662 unsigned long unwritten = clear_user(buf, PAGE_SIZE);
663 if (unwritten)
664 return size + unwritten - PAGE_SIZE;
665 cond_resched();
666 buf += PAGE_SIZE;
667 size -= PAGE_SIZE;
668 } while (size);
669
670 return size;
671out_up:
672 up_read(&mm->mmap_sem);
673 return size;
674}
675
676static ssize_t read_zero(struct file * file, char __user * buf,
677 size_t count, loff_t *ppos)
678{
679 unsigned long left, unwritten, written = 0;
680
681 if (!count)
682 return 0;
683
684 if (!access_ok(VERIFY_WRITE, buf, count))
685 return -EFAULT;
686
687 left = count;
688
689 /* do we want to be clever? Arbitrary cut-off */
690 if (count >= PAGE_SIZE*4) {
691 unsigned long partial;
692
693 /* How much left of the page? */
694 partial = (PAGE_SIZE-1) & -(unsigned long) buf;
695 unwritten = clear_user(buf, partial);
696 written = partial - unwritten;
697 if (unwritten)
698 goto out;
699 left -= partial;
700 buf += partial;
701 unwritten = read_zero_pagealigned(buf, left & PAGE_MASK);
702 written += (left & PAGE_MASK) - unwritten;
703 if (unwritten)
704 goto out;
705 buf += left & PAGE_MASK;
706 left &= ~PAGE_MASK;
707 }
708 unwritten = clear_user(buf, left);
709 written += left - unwritten;
710out:
711 return written ? written : -EFAULT;
712}
713
714static int mmap_zero(struct file * file, struct vm_area_struct * vma)
715{
5fcf7bb7
HD
716 int err;
717
1da177e4
LT
718 if (vma->vm_flags & VM_SHARED)
719 return shmem_zero_setup(vma);
5fcf7bb7
HD
720 err = zeromap_page_range(vma, vma->vm_start,
721 vma->vm_end - vma->vm_start, vma->vm_page_prot);
722 BUG_ON(err == -EEXIST);
723 return err;
1da177e4
LT
724}
725#else /* CONFIG_MMU */
726static ssize_t read_zero(struct file * file, char * buf,
727 size_t count, loff_t *ppos)
728{
729 size_t todo = count;
730
731 while (todo) {
732 size_t chunk = todo;
733
734 if (chunk > 4096)
735 chunk = 4096; /* Just for latency reasons */
736 if (clear_user(buf, chunk))
737 return -EFAULT;
738 buf += chunk;
739 todo -= chunk;
740 cond_resched();
741 }
742 return count;
743}
744
745static int mmap_zero(struct file * file, struct vm_area_struct * vma)
746{
747 return -ENOSYS;
748}
749#endif /* CONFIG_MMU */
750
751static ssize_t write_full(struct file * file, const char __user * buf,
752 size_t count, loff_t *ppos)
753{
754 return -ENOSPC;
755}
756
757/*
758 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
759 * can fopen() both devices with "a" now. This was previously impossible.
760 * -- SRB.
761 */
762
763static loff_t null_lseek(struct file * file, loff_t offset, int orig)
764{
765 return file->f_pos = 0;
766}
767
768/*
769 * The memory devices use the full 32/64 bits of the offset, and so we cannot
770 * check against negative addresses: they are ok. The return value is weird,
771 * though, in that case (0).
772 *
773 * also note that seeking relative to the "end of file" isn't supported:
774 * it has no meaning, so it returns -EINVAL.
775 */
776static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
777{
778 loff_t ret;
779
a7113a96 780 mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
1da177e4
LT
781 switch (orig) {
782 case 0:
783 file->f_pos = offset;
784 ret = file->f_pos;
785 force_successful_syscall_return();
786 break;
787 case 1:
788 file->f_pos += offset;
789 ret = file->f_pos;
790 force_successful_syscall_return();
791 break;
792 default:
793 ret = -EINVAL;
794 }
a7113a96 795 mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
1da177e4
LT
796 return ret;
797}
798
799static int open_port(struct inode * inode, struct file * filp)
800{
801 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
802}
803
804#define zero_lseek null_lseek
805#define full_lseek null_lseek
806#define write_zero write_null
807#define read_full read_zero
808#define open_mem open_port
809#define open_kmem open_mem
50b1fdbd 810#define open_oldmem open_mem
1da177e4 811
62322d25 812static const struct file_operations mem_fops = {
1da177e4
LT
813 .llseek = memory_lseek,
814 .read = read_mem,
815 .write = write_mem,
816 .mmap = mmap_mem,
817 .open = open_mem,
5da6185b 818 .get_unmapped_area = get_unmapped_area_mem,
1da177e4
LT
819};
820
62322d25 821static const struct file_operations kmem_fops = {
1da177e4
LT
822 .llseek = memory_lseek,
823 .read = read_kmem,
824 .write = write_kmem,
825 .mmap = mmap_kmem,
826 .open = open_kmem,
5da6185b 827 .get_unmapped_area = get_unmapped_area_mem,
1da177e4
LT
828};
829
62322d25 830static const struct file_operations null_fops = {
1da177e4
LT
831 .llseek = null_lseek,
832 .read = read_null,
833 .write = write_null,
1ebd32fc 834 .splice_write = splice_write_null,
1da177e4
LT
835};
836
153dcc54 837#if (defined(CONFIG_ISA) || defined(CONFIG_PCI)) && !defined(__mc68000__)
62322d25 838static const struct file_operations port_fops = {
1da177e4
LT
839 .llseek = memory_lseek,
840 .read = read_port,
841 .write = write_port,
842 .open = open_port,
843};
844#endif
845
62322d25 846static const struct file_operations zero_fops = {
1da177e4
LT
847 .llseek = zero_lseek,
848 .read = read_zero,
849 .write = write_zero,
850 .mmap = mmap_zero,
851};
852
5da6185b
DH
853/*
854 * capabilities for /dev/zero
855 * - permits private mappings, "copies" are taken of the source of zeros
856 */
1da177e4
LT
857static struct backing_dev_info zero_bdi = {
858 .capabilities = BDI_CAP_MAP_COPY,
859};
860
62322d25 861static const struct file_operations full_fops = {
1da177e4
LT
862 .llseek = full_lseek,
863 .read = read_full,
864 .write = write_full,
865};
866
50b1fdbd 867#ifdef CONFIG_CRASH_DUMP
62322d25 868static const struct file_operations oldmem_fops = {
50b1fdbd
VG
869 .read = read_oldmem,
870 .open = open_oldmem,
871};
872#endif
873
1da177e4
LT
874static ssize_t kmsg_write(struct file * file, const char __user * buf,
875 size_t count, loff_t *ppos)
876{
877 char *tmp;
cd140a5c 878 ssize_t ret;
1da177e4
LT
879
880 tmp = kmalloc(count + 1, GFP_KERNEL);
881 if (tmp == NULL)
882 return -ENOMEM;
883 ret = -EFAULT;
884 if (!copy_from_user(tmp, buf, count)) {
885 tmp[count] = 0;
886 ret = printk("%s", tmp);
cd140a5c
GC
887 if (ret > count)
888 /* printk can add a prefix */
889 ret = count;
1da177e4
LT
890 }
891 kfree(tmp);
892 return ret;
893}
894
62322d25 895static const struct file_operations kmsg_fops = {
1da177e4
LT
896 .write = kmsg_write,
897};
898
899static int memory_open(struct inode * inode, struct file * filp)
900{
901 switch (iminor(inode)) {
902 case 1:
903 filp->f_op = &mem_fops;
5da6185b
DH
904 filp->f_mapping->backing_dev_info =
905 &directly_mappable_cdev_bdi;
1da177e4
LT
906 break;
907 case 2:
908 filp->f_op = &kmem_fops;
5da6185b
DH
909 filp->f_mapping->backing_dev_info =
910 &directly_mappable_cdev_bdi;
1da177e4
LT
911 break;
912 case 3:
913 filp->f_op = &null_fops;
914 break;
153dcc54 915#if (defined(CONFIG_ISA) || defined(CONFIG_PCI)) && !defined(__mc68000__)
1da177e4
LT
916 case 4:
917 filp->f_op = &port_fops;
918 break;
919#endif
920 case 5:
921 filp->f_mapping->backing_dev_info = &zero_bdi;
922 filp->f_op = &zero_fops;
923 break;
924 case 7:
925 filp->f_op = &full_fops;
926 break;
927 case 8:
928 filp->f_op = &random_fops;
929 break;
930 case 9:
931 filp->f_op = &urandom_fops;
932 break;
933 case 11:
934 filp->f_op = &kmsg_fops;
935 break;
50b1fdbd
VG
936#ifdef CONFIG_CRASH_DUMP
937 case 12:
938 filp->f_op = &oldmem_fops;
939 break;
940#endif
1da177e4
LT
941 default:
942 return -ENXIO;
943 }
944 if (filp->f_op && filp->f_op->open)
945 return filp->f_op->open(inode,filp);
946 return 0;
947}
948
62322d25 949static const struct file_operations memory_fops = {
1da177e4
LT
950 .open = memory_open, /* just a selector for the real open */
951};
952
953static const struct {
954 unsigned int minor;
955 char *name;
956 umode_t mode;
99ac48f5 957 const struct file_operations *fops;
1da177e4
LT
958} devlist[] = { /* list of minor devices */
959 {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
960 {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
961 {3, "null", S_IRUGO | S_IWUGO, &null_fops},
153dcc54 962#if (defined(CONFIG_ISA) || defined(CONFIG_PCI)) && !defined(__mc68000__)
1da177e4
LT
963 {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
964#endif
965 {5, "zero", S_IRUGO | S_IWUGO, &zero_fops},
966 {7, "full", S_IRUGO | S_IWUGO, &full_fops},
967 {8, "random", S_IRUGO | S_IWUSR, &random_fops},
968 {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops},
969 {11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops},
50b1fdbd
VG
970#ifdef CONFIG_CRASH_DUMP
971 {12,"oldmem", S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops},
972#endif
1da177e4
LT
973};
974
ca8eca68 975static struct class *mem_class;
1da177e4
LT
976
977static int __init chr_dev_init(void)
978{
979 int i;
980
981 if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
982 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
983
ca8eca68 984 mem_class = class_create(THIS_MODULE, "mem");
7c69ef79 985 for (i = 0; i < ARRAY_SIZE(devlist); i++)
ebf644c4
GKH
986 device_create(mem_class, NULL,
987 MKDEV(MEM_MAJOR, devlist[i].minor),
988 devlist[i].name);
989
1da177e4
LT
990 return 0;
991}
992
993fs_initcall(chr_dev_init);