[PATCH] kdump: Accessing dump file in linear raw format (/dev/oldmem)
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / drivers / char / mem.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/char/mem.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * Added devfs support.
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9 */
10
11#include <linux/config.h>
12#include <linux/mm.h>
13#include <linux/miscdevice.h>
14#include <linux/slab.h>
15#include <linux/vmalloc.h>
16#include <linux/mman.h>
17#include <linux/random.h>
18#include <linux/init.h>
19#include <linux/raw.h>
20#include <linux/tty.h>
21#include <linux/capability.h>
22#include <linux/smp_lock.h>
23#include <linux/devfs_fs_kernel.h>
24#include <linux/ptrace.h>
25#include <linux/device.h>
50b1fdbd
VG
26#include <linux/highmem.h>
27#include <linux/crash_dump.h>
1da177e4
LT
28#include <linux/backing-dev.h>
29
30#include <asm/uaccess.h>
31#include <asm/io.h>
32
33#ifdef CONFIG_IA64
34# include <linux/efi.h>
35#endif
36
37#if defined(CONFIG_S390_TAPE) && defined(CONFIG_S390_TAPE_CHAR)
38extern void tapechar_init(void);
39#endif
40
41/*
42 * Architectures vary in how they handle caching for addresses
43 * outside of main memory.
44 *
45 */
46static inline int uncached_access(struct file *file, unsigned long addr)
47{
48#if defined(__i386__)
49 /*
50 * On the PPro and successors, the MTRRs are used to set
51 * memory types for physical addresses outside main memory,
52 * so blindly setting PCD or PWT on those pages is wrong.
53 * For Pentiums and earlier, the surround logic should disable
54 * caching for the high addresses through the KEN pin, but
55 * we maintain the tradition of paranoia in this code.
56 */
57 if (file->f_flags & O_SYNC)
58 return 1;
59 return !( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) ||
60 test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) ||
61 test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) ||
62 test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) )
63 && addr >= __pa(high_memory);
64#elif defined(__x86_64__)
65 /*
66 * This is broken because it can generate memory type aliases,
67 * which can cause cache corruptions
68 * But it is only available for root and we have to be bug-to-bug
69 * compatible with i386.
70 */
71 if (file->f_flags & O_SYNC)
72 return 1;
73 /* same behaviour as i386. PAT always set to cached and MTRRs control the
74 caching behaviour.
75 Hopefully a full PAT implementation will fix that soon. */
76 return 0;
77#elif defined(CONFIG_IA64)
78 /*
79 * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
80 */
81 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
82#else
83 /*
84 * Accessing memory above the top the kernel knows about or through a file pointer
85 * that was marked O_SYNC will be done non-cached.
86 */
87 if (file->f_flags & O_SYNC)
88 return 1;
89 return addr >= __pa(high_memory);
90#endif
91}
92
93#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
94static inline int valid_phys_addr_range(unsigned long addr, size_t *count)
95{
96 unsigned long end_mem;
97
98 end_mem = __pa(high_memory);
99 if (addr >= end_mem)
100 return 0;
101
102 if (*count > end_mem - addr)
103 *count = end_mem - addr;
104
105 return 1;
106}
107#endif
108
109/*
110 * This funcion reads the *physical* memory. The f_pos points directly to the
111 * memory location.
112 */
113static ssize_t read_mem(struct file * file, char __user * buf,
114 size_t count, loff_t *ppos)
115{
116 unsigned long p = *ppos;
117 ssize_t read, sz;
118 char *ptr;
119
120 if (!valid_phys_addr_range(p, &count))
121 return -EFAULT;
122 read = 0;
123#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
124 /* we don't have page 0 mapped on sparc and m68k.. */
125 if (p < PAGE_SIZE) {
126 sz = PAGE_SIZE - p;
127 if (sz > count)
128 sz = count;
129 if (sz > 0) {
130 if (clear_user(buf, sz))
131 return -EFAULT;
132 buf += sz;
133 p += sz;
134 count -= sz;
135 read += sz;
136 }
137 }
138#endif
139
140 while (count > 0) {
141 /*
142 * Handle first page in case it's not aligned
143 */
144 if (-p & (PAGE_SIZE - 1))
145 sz = -p & (PAGE_SIZE - 1);
146 else
147 sz = PAGE_SIZE;
148
149 sz = min_t(unsigned long, sz, count);
150
151 /*
152 * On ia64 if a page has been mapped somewhere as
153 * uncached, then it must also be accessed uncached
154 * by the kernel or data corruption may occur
155 */
156 ptr = xlate_dev_mem_ptr(p);
157
158 if (copy_to_user(buf, ptr, sz))
159 return -EFAULT;
160 buf += sz;
161 p += sz;
162 count -= sz;
163 read += sz;
164 }
165
166 *ppos += read;
167 return read;
168}
169
170static ssize_t write_mem(struct file * file, const char __user * buf,
171 size_t count, loff_t *ppos)
172{
173 unsigned long p = *ppos;
174 ssize_t written, sz;
175 unsigned long copied;
176 void *ptr;
177
178 if (!valid_phys_addr_range(p, &count))
179 return -EFAULT;
180
181 written = 0;
182
183#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
184 /* we don't have page 0 mapped on sparc and m68k.. */
185 if (p < PAGE_SIZE) {
186 unsigned long sz = PAGE_SIZE - p;
187 if (sz > count)
188 sz = count;
189 /* Hmm. Do something? */
190 buf += sz;
191 p += sz;
192 count -= sz;
193 written += sz;
194 }
195#endif
196
197 while (count > 0) {
198 /*
199 * Handle first page in case it's not aligned
200 */
201 if (-p & (PAGE_SIZE - 1))
202 sz = -p & (PAGE_SIZE - 1);
203 else
204 sz = PAGE_SIZE;
205
206 sz = min_t(unsigned long, sz, count);
207
208 /*
209 * On ia64 if a page has been mapped somewhere as
210 * uncached, then it must also be accessed uncached
211 * by the kernel or data corruption may occur
212 */
213 ptr = xlate_dev_mem_ptr(p);
214
215 copied = copy_from_user(ptr, buf, sz);
216 if (copied) {
217 ssize_t ret;
218
219 ret = written + (sz - copied);
220 if (ret)
221 return ret;
222 return -EFAULT;
223 }
224 buf += sz;
225 p += sz;
226 count -= sz;
227 written += sz;
228 }
229
230 *ppos += written;
231 return written;
232}
233
234static int mmap_mem(struct file * file, struct vm_area_struct * vma)
235{
236#if defined(__HAVE_PHYS_MEM_ACCESS_PROT)
237 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
238
239 vma->vm_page_prot = phys_mem_access_prot(file, offset,
240 vma->vm_end - vma->vm_start,
241 vma->vm_page_prot);
242#elif defined(pgprot_noncached)
243 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
244 int uncached;
245
246 uncached = uncached_access(file, offset);
247 if (uncached)
248 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
249#endif
250
251 /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
252 if (remap_pfn_range(vma,
253 vma->vm_start,
254 vma->vm_pgoff,
255 vma->vm_end-vma->vm_start,
256 vma->vm_page_prot))
257 return -EAGAIN;
258 return 0;
259}
260
261static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
262{
263 unsigned long long val;
264 /*
265 * RED-PEN: on some architectures there is more mapped memory
266 * than available in mem_map which pfn_valid checks
267 * for. Perhaps should add a new macro here.
268 *
269 * RED-PEN: vmalloc is not supported right now.
270 */
271 if (!pfn_valid(vma->vm_pgoff))
272 return -EIO;
273 val = (u64)vma->vm_pgoff << PAGE_SHIFT;
274 vma->vm_pgoff = __pa(val) >> PAGE_SHIFT;
275 return mmap_mem(file, vma);
276}
277
50b1fdbd
VG
278#ifdef CONFIG_CRASH_DUMP
279/*
280 * Read memory corresponding to the old kernel.
281 * If we are reading from the reserved section, which is
282 * actually used by the current kernel, we just return zeroes.
283 * Or if we are reading from the first 640k, we return from the
284 * backed up area.
285 */
286static ssize_t read_oldmem(struct file * file, char * buf,
287 size_t count, loff_t *ppos)
288{
289 unsigned long pfn;
290 unsigned backup_start, backup_end, relocate_start;
291 size_t read=0, csize;
292
293 backup_start = CRASH_BACKUP_BASE / PAGE_SIZE;
294 backup_end = backup_start + (CRASH_BACKUP_SIZE / PAGE_SIZE);
295 relocate_start = (CRASH_BACKUP_BASE + CRASH_BACKUP_SIZE) / PAGE_SIZE;
296
297 while(count) {
298 pfn = *ppos / PAGE_SIZE;
299
300 csize = (count > PAGE_SIZE) ? PAGE_SIZE : count;
301
302 /* Perform translation (see comment above) */
303 if ((pfn >= backup_start) && (pfn < backup_end)) {
304 if (clear_user(buf, csize)) {
305 read = -EFAULT;
306 goto done;
307 }
308
309 goto copy_done;
310 } else if (pfn < (CRASH_RELOCATE_SIZE / PAGE_SIZE))
311 pfn += relocate_start;
312
313 if (pfn > saved_max_pfn) {
314 read = 0;
315 goto done;
316 }
317
318 if (copy_oldmem_page(pfn, buf, csize, 1)) {
319 read = -EFAULT;
320 goto done;
321 }
322
323copy_done:
324 buf += csize;
325 *ppos += csize;
326 read += csize;
327 count -= csize;
328 }
329done:
330 return read;
331}
332#endif
333
1da177e4
LT
334extern long vread(char *buf, char *addr, unsigned long count);
335extern long vwrite(char *buf, char *addr, unsigned long count);
336
337/*
338 * This function reads the *virtual* memory as seen by the kernel.
339 */
340static ssize_t read_kmem(struct file *file, char __user *buf,
341 size_t count, loff_t *ppos)
342{
343 unsigned long p = *ppos;
344 ssize_t low_count, read, sz;
345 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
346
347 read = 0;
348 if (p < (unsigned long) high_memory) {
349 low_count = count;
350 if (count > (unsigned long) high_memory - p)
351 low_count = (unsigned long) high_memory - p;
352
353#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
354 /* we don't have page 0 mapped on sparc and m68k.. */
355 if (p < PAGE_SIZE && low_count > 0) {
356 size_t tmp = PAGE_SIZE - p;
357 if (tmp > low_count) tmp = low_count;
358 if (clear_user(buf, tmp))
359 return -EFAULT;
360 buf += tmp;
361 p += tmp;
362 read += tmp;
363 low_count -= tmp;
364 count -= tmp;
365 }
366#endif
367 while (low_count > 0) {
368 /*
369 * Handle first page in case it's not aligned
370 */
371 if (-p & (PAGE_SIZE - 1))
372 sz = -p & (PAGE_SIZE - 1);
373 else
374 sz = PAGE_SIZE;
375
376 sz = min_t(unsigned long, sz, low_count);
377
378 /*
379 * On ia64 if a page has been mapped somewhere as
380 * uncached, then it must also be accessed uncached
381 * by the kernel or data corruption may occur
382 */
383 kbuf = xlate_dev_kmem_ptr((char *)p);
384
385 if (copy_to_user(buf, kbuf, sz))
386 return -EFAULT;
387 buf += sz;
388 p += sz;
389 read += sz;
390 low_count -= sz;
391 count -= sz;
392 }
393 }
394
395 if (count > 0) {
396 kbuf = (char *)__get_free_page(GFP_KERNEL);
397 if (!kbuf)
398 return -ENOMEM;
399 while (count > 0) {
400 int len = count;
401
402 if (len > PAGE_SIZE)
403 len = PAGE_SIZE;
404 len = vread(kbuf, (char *)p, len);
405 if (!len)
406 break;
407 if (copy_to_user(buf, kbuf, len)) {
408 free_page((unsigned long)kbuf);
409 return -EFAULT;
410 }
411 count -= len;
412 buf += len;
413 read += len;
414 p += len;
415 }
416 free_page((unsigned long)kbuf);
417 }
418 *ppos = p;
419 return read;
420}
421
422
423static inline ssize_t
424do_write_kmem(void *p, unsigned long realp, const char __user * buf,
425 size_t count, loff_t *ppos)
426{
427 ssize_t written, sz;
428 unsigned long copied;
429
430 written = 0;
431#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
432 /* we don't have page 0 mapped on sparc and m68k.. */
433 if (realp < PAGE_SIZE) {
434 unsigned long sz = PAGE_SIZE - realp;
435 if (sz > count)
436 sz = count;
437 /* Hmm. Do something? */
438 buf += sz;
439 p += sz;
440 realp += sz;
441 count -= sz;
442 written += sz;
443 }
444#endif
445
446 while (count > 0) {
447 char *ptr;
448 /*
449 * Handle first page in case it's not aligned
450 */
451 if (-realp & (PAGE_SIZE - 1))
452 sz = -realp & (PAGE_SIZE - 1);
453 else
454 sz = PAGE_SIZE;
455
456 sz = min_t(unsigned long, sz, count);
457
458 /*
459 * On ia64 if a page has been mapped somewhere as
460 * uncached, then it must also be accessed uncached
461 * by the kernel or data corruption may occur
462 */
463 ptr = xlate_dev_kmem_ptr(p);
464
465 copied = copy_from_user(ptr, buf, sz);
466 if (copied) {
467 ssize_t ret;
468
469 ret = written + (sz - copied);
470 if (ret)
471 return ret;
472 return -EFAULT;
473 }
474 buf += sz;
475 p += sz;
476 realp += sz;
477 count -= sz;
478 written += sz;
479 }
480
481 *ppos += written;
482 return written;
483}
484
485
486/*
487 * This function writes to the *virtual* memory as seen by the kernel.
488 */
489static ssize_t write_kmem(struct file * file, const char __user * buf,
490 size_t count, loff_t *ppos)
491{
492 unsigned long p = *ppos;
493 ssize_t wrote = 0;
494 ssize_t virtr = 0;
495 ssize_t written;
496 char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
497
498 if (p < (unsigned long) high_memory) {
499
500 wrote = count;
501 if (count > (unsigned long) high_memory - p)
502 wrote = (unsigned long) high_memory - p;
503
504 written = do_write_kmem((void*)p, p, buf, wrote, ppos);
505 if (written != wrote)
506 return written;
507 wrote = written;
508 p += wrote;
509 buf += wrote;
510 count -= wrote;
511 }
512
513 if (count > 0) {
514 kbuf = (char *)__get_free_page(GFP_KERNEL);
515 if (!kbuf)
516 return wrote ? wrote : -ENOMEM;
517 while (count > 0) {
518 int len = count;
519
520 if (len > PAGE_SIZE)
521 len = PAGE_SIZE;
522 if (len) {
523 written = copy_from_user(kbuf, buf, len);
524 if (written) {
525 ssize_t ret;
526
527 free_page((unsigned long)kbuf);
528 ret = wrote + virtr + (len - written);
529 return ret ? ret : -EFAULT;
530 }
531 }
532 len = vwrite(kbuf, (char *)p, len);
533 count -= len;
534 buf += len;
535 virtr += len;
536 p += len;
537 }
538 free_page((unsigned long)kbuf);
539 }
540
541 *ppos = p;
542 return virtr + wrote;
543}
544
145d01e4 545#if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI))
1da177e4
LT
546static ssize_t read_port(struct file * file, char __user * buf,
547 size_t count, loff_t *ppos)
548{
549 unsigned long i = *ppos;
550 char __user *tmp = buf;
551
552 if (!access_ok(VERIFY_WRITE, buf, count))
553 return -EFAULT;
554 while (count-- > 0 && i < 65536) {
555 if (__put_user(inb(i),tmp) < 0)
556 return -EFAULT;
557 i++;
558 tmp++;
559 }
560 *ppos = i;
561 return tmp-buf;
562}
563
564static ssize_t write_port(struct file * file, const char __user * buf,
565 size_t count, loff_t *ppos)
566{
567 unsigned long i = *ppos;
568 const char __user * tmp = buf;
569
570 if (!access_ok(VERIFY_READ,buf,count))
571 return -EFAULT;
572 while (count-- > 0 && i < 65536) {
573 char c;
574 if (__get_user(c, tmp))
575 return -EFAULT;
576 outb(c,i);
577 i++;
578 tmp++;
579 }
580 *ppos = i;
581 return tmp-buf;
582}
583#endif
584
585static ssize_t read_null(struct file * file, char __user * buf,
586 size_t count, loff_t *ppos)
587{
588 return 0;
589}
590
591static ssize_t write_null(struct file * file, const char __user * buf,
592 size_t count, loff_t *ppos)
593{
594 return count;
595}
596
597#ifdef CONFIG_MMU
598/*
599 * For fun, we are using the MMU for this.
600 */
601static inline size_t read_zero_pagealigned(char __user * buf, size_t size)
602{
603 struct mm_struct *mm;
604 struct vm_area_struct * vma;
605 unsigned long addr=(unsigned long)buf;
606
607 mm = current->mm;
608 /* Oops, this was forgotten before. -ben */
609 down_read(&mm->mmap_sem);
610
611 /* For private mappings, just map in zero pages. */
612 for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
613 unsigned long count;
614
615 if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0)
616 goto out_up;
617 if (vma->vm_flags & (VM_SHARED | VM_HUGETLB))
618 break;
619 count = vma->vm_end - addr;
620 if (count > size)
621 count = size;
622
623 zap_page_range(vma, addr, count, NULL);
624 zeromap_page_range(vma, addr, count, PAGE_COPY);
625
626 size -= count;
627 buf += count;
628 addr += count;
629 if (size == 0)
630 goto out_up;
631 }
632
633 up_read(&mm->mmap_sem);
634
635 /* The shared case is hard. Let's do the conventional zeroing. */
636 do {
637 unsigned long unwritten = clear_user(buf, PAGE_SIZE);
638 if (unwritten)
639 return size + unwritten - PAGE_SIZE;
640 cond_resched();
641 buf += PAGE_SIZE;
642 size -= PAGE_SIZE;
643 } while (size);
644
645 return size;
646out_up:
647 up_read(&mm->mmap_sem);
648 return size;
649}
650
651static ssize_t read_zero(struct file * file, char __user * buf,
652 size_t count, loff_t *ppos)
653{
654 unsigned long left, unwritten, written = 0;
655
656 if (!count)
657 return 0;
658
659 if (!access_ok(VERIFY_WRITE, buf, count))
660 return -EFAULT;
661
662 left = count;
663
664 /* do we want to be clever? Arbitrary cut-off */
665 if (count >= PAGE_SIZE*4) {
666 unsigned long partial;
667
668 /* How much left of the page? */
669 partial = (PAGE_SIZE-1) & -(unsigned long) buf;
670 unwritten = clear_user(buf, partial);
671 written = partial - unwritten;
672 if (unwritten)
673 goto out;
674 left -= partial;
675 buf += partial;
676 unwritten = read_zero_pagealigned(buf, left & PAGE_MASK);
677 written += (left & PAGE_MASK) - unwritten;
678 if (unwritten)
679 goto out;
680 buf += left & PAGE_MASK;
681 left &= ~PAGE_MASK;
682 }
683 unwritten = clear_user(buf, left);
684 written += left - unwritten;
685out:
686 return written ? written : -EFAULT;
687}
688
689static int mmap_zero(struct file * file, struct vm_area_struct * vma)
690{
691 if (vma->vm_flags & VM_SHARED)
692 return shmem_zero_setup(vma);
693 if (zeromap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot))
694 return -EAGAIN;
695 return 0;
696}
697#else /* CONFIG_MMU */
698static ssize_t read_zero(struct file * file, char * buf,
699 size_t count, loff_t *ppos)
700{
701 size_t todo = count;
702
703 while (todo) {
704 size_t chunk = todo;
705
706 if (chunk > 4096)
707 chunk = 4096; /* Just for latency reasons */
708 if (clear_user(buf, chunk))
709 return -EFAULT;
710 buf += chunk;
711 todo -= chunk;
712 cond_resched();
713 }
714 return count;
715}
716
717static int mmap_zero(struct file * file, struct vm_area_struct * vma)
718{
719 return -ENOSYS;
720}
721#endif /* CONFIG_MMU */
722
723static ssize_t write_full(struct file * file, const char __user * buf,
724 size_t count, loff_t *ppos)
725{
726 return -ENOSPC;
727}
728
729/*
730 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
731 * can fopen() both devices with "a" now. This was previously impossible.
732 * -- SRB.
733 */
734
735static loff_t null_lseek(struct file * file, loff_t offset, int orig)
736{
737 return file->f_pos = 0;
738}
739
740/*
741 * The memory devices use the full 32/64 bits of the offset, and so we cannot
742 * check against negative addresses: they are ok. The return value is weird,
743 * though, in that case (0).
744 *
745 * also note that seeking relative to the "end of file" isn't supported:
746 * it has no meaning, so it returns -EINVAL.
747 */
748static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
749{
750 loff_t ret;
751
752 down(&file->f_dentry->d_inode->i_sem);
753 switch (orig) {
754 case 0:
755 file->f_pos = offset;
756 ret = file->f_pos;
757 force_successful_syscall_return();
758 break;
759 case 1:
760 file->f_pos += offset;
761 ret = file->f_pos;
762 force_successful_syscall_return();
763 break;
764 default:
765 ret = -EINVAL;
766 }
767 up(&file->f_dentry->d_inode->i_sem);
768 return ret;
769}
770
771static int open_port(struct inode * inode, struct file * filp)
772{
773 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
774}
775
776#define zero_lseek null_lseek
777#define full_lseek null_lseek
778#define write_zero write_null
779#define read_full read_zero
780#define open_mem open_port
781#define open_kmem open_mem
50b1fdbd 782#define open_oldmem open_mem
1da177e4
LT
783
784static struct file_operations mem_fops = {
785 .llseek = memory_lseek,
786 .read = read_mem,
787 .write = write_mem,
788 .mmap = mmap_mem,
789 .open = open_mem,
790};
791
792static struct file_operations kmem_fops = {
793 .llseek = memory_lseek,
794 .read = read_kmem,
795 .write = write_kmem,
796 .mmap = mmap_kmem,
797 .open = open_kmem,
798};
799
800static struct file_operations null_fops = {
801 .llseek = null_lseek,
802 .read = read_null,
803 .write = write_null,
804};
805
145d01e4 806#if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI))
1da177e4
LT
807static struct file_operations port_fops = {
808 .llseek = memory_lseek,
809 .read = read_port,
810 .write = write_port,
811 .open = open_port,
812};
813#endif
814
815static struct file_operations zero_fops = {
816 .llseek = zero_lseek,
817 .read = read_zero,
818 .write = write_zero,
819 .mmap = mmap_zero,
820};
821
822static struct backing_dev_info zero_bdi = {
823 .capabilities = BDI_CAP_MAP_COPY,
824};
825
826static struct file_operations full_fops = {
827 .llseek = full_lseek,
828 .read = read_full,
829 .write = write_full,
830};
831
50b1fdbd
VG
832#ifdef CONFIG_CRASH_DUMP
833static struct file_operations oldmem_fops = {
834 .read = read_oldmem,
835 .open = open_oldmem,
836};
837#endif
838
1da177e4
LT
839static ssize_t kmsg_write(struct file * file, const char __user * buf,
840 size_t count, loff_t *ppos)
841{
842 char *tmp;
843 int ret;
844
845 tmp = kmalloc(count + 1, GFP_KERNEL);
846 if (tmp == NULL)
847 return -ENOMEM;
848 ret = -EFAULT;
849 if (!copy_from_user(tmp, buf, count)) {
850 tmp[count] = 0;
851 ret = printk("%s", tmp);
852 }
853 kfree(tmp);
854 return ret;
855}
856
857static struct file_operations kmsg_fops = {
858 .write = kmsg_write,
859};
860
861static int memory_open(struct inode * inode, struct file * filp)
862{
863 switch (iminor(inode)) {
864 case 1:
865 filp->f_op = &mem_fops;
866 break;
867 case 2:
868 filp->f_op = &kmem_fops;
869 break;
870 case 3:
871 filp->f_op = &null_fops;
872 break;
145d01e4 873#if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI))
1da177e4
LT
874 case 4:
875 filp->f_op = &port_fops;
876 break;
877#endif
878 case 5:
879 filp->f_mapping->backing_dev_info = &zero_bdi;
880 filp->f_op = &zero_fops;
881 break;
882 case 7:
883 filp->f_op = &full_fops;
884 break;
885 case 8:
886 filp->f_op = &random_fops;
887 break;
888 case 9:
889 filp->f_op = &urandom_fops;
890 break;
891 case 11:
892 filp->f_op = &kmsg_fops;
893 break;
50b1fdbd
VG
894#ifdef CONFIG_CRASH_DUMP
895 case 12:
896 filp->f_op = &oldmem_fops;
897 break;
898#endif
1da177e4
LT
899 default:
900 return -ENXIO;
901 }
902 if (filp->f_op && filp->f_op->open)
903 return filp->f_op->open(inode,filp);
904 return 0;
905}
906
907static struct file_operations memory_fops = {
908 .open = memory_open, /* just a selector for the real open */
909};
910
911static const struct {
912 unsigned int minor;
913 char *name;
914 umode_t mode;
915 struct file_operations *fops;
916} devlist[] = { /* list of minor devices */
917 {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
918 {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
919 {3, "null", S_IRUGO | S_IWUGO, &null_fops},
145d01e4 920#if (defined(CONFIG_ISA) || !defined(__mc68000__)) && (!defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI))
1da177e4
LT
921 {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
922#endif
923 {5, "zero", S_IRUGO | S_IWUGO, &zero_fops},
924 {7, "full", S_IRUGO | S_IWUGO, &full_fops},
925 {8, "random", S_IRUGO | S_IWUSR, &random_fops},
926 {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops},
927 {11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops},
50b1fdbd
VG
928#ifdef CONFIG_CRASH_DUMP
929 {12,"oldmem", S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops},
930#endif
1da177e4
LT
931};
932
ca8eca68 933static struct class *mem_class;
1da177e4
LT
934
935static int __init chr_dev_init(void)
936{
937 int i;
938
939 if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
940 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
941
ca8eca68 942 mem_class = class_create(THIS_MODULE, "mem");
1da177e4 943 for (i = 0; i < ARRAY_SIZE(devlist); i++) {
ca8eca68 944 class_device_create(mem_class, MKDEV(MEM_MAJOR, devlist[i].minor),
1da177e4
LT
945 NULL, devlist[i].name);
946 devfs_mk_cdev(MKDEV(MEM_MAJOR, devlist[i].minor),
947 S_IFCHR | devlist[i].mode, devlist[i].name);
948 }
949
950 return 0;
951}
952
953fs_initcall(chr_dev_init);