Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/drivers/char/mem.c | |
3 | * | |
4 | * Copyright (C) 1991, 1992 Linus Torvalds | |
5 | * | |
6 | * Added devfs support. | |
7 | * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu> | |
8 | * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com> | |
9 | */ | |
10 | ||
11 | #include <linux/config.h> | |
12 | #include <linux/mm.h> | |
13 | #include <linux/miscdevice.h> | |
14 | #include <linux/slab.h> | |
15 | #include <linux/vmalloc.h> | |
16 | #include <linux/mman.h> | |
17 | #include <linux/random.h> | |
18 | #include <linux/init.h> | |
19 | #include <linux/raw.h> | |
20 | #include <linux/tty.h> | |
21 | #include <linux/capability.h> | |
22 | #include <linux/smp_lock.h> | |
23 | #include <linux/devfs_fs_kernel.h> | |
24 | #include <linux/ptrace.h> | |
25 | #include <linux/device.h> | |
50b1fdbd VG |
26 | #include <linux/highmem.h> |
27 | #include <linux/crash_dump.h> | |
1da177e4 | 28 | #include <linux/backing-dev.h> |
315c215c | 29 | #include <linux/bootmem.h> |
1da177e4 LT |
30 | |
31 | #include <asm/uaccess.h> | |
32 | #include <asm/io.h> | |
33 | ||
34 | #ifdef CONFIG_IA64 | |
35 | # include <linux/efi.h> | |
36 | #endif | |
37 | ||
1da177e4 LT |
38 | /* |
39 | * Architectures vary in how they handle caching for addresses | |
40 | * outside of main memory. | |
41 | * | |
42 | */ | |
43 | static inline int uncached_access(struct file *file, unsigned long addr) | |
44 | { | |
45 | #if defined(__i386__) | |
46 | /* | |
47 | * On the PPro and successors, the MTRRs are used to set | |
48 | * memory types for physical addresses outside main memory, | |
49 | * so blindly setting PCD or PWT on those pages is wrong. | |
50 | * For Pentiums and earlier, the surround logic should disable | |
51 | * caching for the high addresses through the KEN pin, but | |
52 | * we maintain the tradition of paranoia in this code. | |
53 | */ | |
54 | if (file->f_flags & O_SYNC) | |
55 | return 1; | |
56 | return !( test_bit(X86_FEATURE_MTRR, boot_cpu_data.x86_capability) || | |
57 | test_bit(X86_FEATURE_K6_MTRR, boot_cpu_data.x86_capability) || | |
58 | test_bit(X86_FEATURE_CYRIX_ARR, boot_cpu_data.x86_capability) || | |
59 | test_bit(X86_FEATURE_CENTAUR_MCR, boot_cpu_data.x86_capability) ) | |
60 | && addr >= __pa(high_memory); | |
61 | #elif defined(__x86_64__) | |
62 | /* | |
63 | * This is broken because it can generate memory type aliases, | |
64 | * which can cause cache corruptions | |
65 | * But it is only available for root and we have to be bug-to-bug | |
66 | * compatible with i386. | |
67 | */ | |
68 | if (file->f_flags & O_SYNC) | |
69 | return 1; | |
70 | /* same behaviour as i386. PAT always set to cached and MTRRs control the | |
71 | caching behaviour. | |
72 | Hopefully a full PAT implementation will fix that soon. */ | |
73 | return 0; | |
74 | #elif defined(CONFIG_IA64) | |
75 | /* | |
76 | * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases. | |
77 | */ | |
78 | return !(efi_mem_attributes(addr) & EFI_MEMORY_WB); | |
79 | #else | |
80 | /* | |
81 | * Accessing memory above the top the kernel knows about or through a file pointer | |
82 | * that was marked O_SYNC will be done non-cached. | |
83 | */ | |
84 | if (file->f_flags & O_SYNC) | |
85 | return 1; | |
86 | return addr >= __pa(high_memory); | |
87 | #endif | |
88 | } | |
89 | ||
90 | #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE | |
91 | static inline int valid_phys_addr_range(unsigned long addr, size_t *count) | |
92 | { | |
93 | unsigned long end_mem; | |
94 | ||
95 | end_mem = __pa(high_memory); | |
96 | if (addr >= end_mem) | |
97 | return 0; | |
98 | ||
99 | if (*count > end_mem - addr) | |
100 | *count = end_mem - addr; | |
101 | ||
102 | return 1; | |
103 | } | |
80851ef2 BH |
104 | |
105 | static inline int valid_mmap_phys_addr_range(unsigned long addr, size_t *size) | |
106 | { | |
107 | return 1; | |
108 | } | |
1da177e4 LT |
109 | #endif |
110 | ||
111 | /* | |
112 | * This funcion reads the *physical* memory. The f_pos points directly to the | |
113 | * memory location. | |
114 | */ | |
115 | static ssize_t read_mem(struct file * file, char __user * buf, | |
116 | size_t count, loff_t *ppos) | |
117 | { | |
118 | unsigned long p = *ppos; | |
119 | ssize_t read, sz; | |
120 | char *ptr; | |
121 | ||
122 | if (!valid_phys_addr_range(p, &count)) | |
123 | return -EFAULT; | |
124 | read = 0; | |
125 | #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED | |
126 | /* we don't have page 0 mapped on sparc and m68k.. */ | |
127 | if (p < PAGE_SIZE) { | |
128 | sz = PAGE_SIZE - p; | |
129 | if (sz > count) | |
130 | sz = count; | |
131 | if (sz > 0) { | |
132 | if (clear_user(buf, sz)) | |
133 | return -EFAULT; | |
134 | buf += sz; | |
135 | p += sz; | |
136 | count -= sz; | |
137 | read += sz; | |
138 | } | |
139 | } | |
140 | #endif | |
141 | ||
142 | while (count > 0) { | |
143 | /* | |
144 | * Handle first page in case it's not aligned | |
145 | */ | |
146 | if (-p & (PAGE_SIZE - 1)) | |
147 | sz = -p & (PAGE_SIZE - 1); | |
148 | else | |
149 | sz = PAGE_SIZE; | |
150 | ||
151 | sz = min_t(unsigned long, sz, count); | |
152 | ||
153 | /* | |
154 | * On ia64 if a page has been mapped somewhere as | |
155 | * uncached, then it must also be accessed uncached | |
156 | * by the kernel or data corruption may occur | |
157 | */ | |
158 | ptr = xlate_dev_mem_ptr(p); | |
159 | ||
160 | if (copy_to_user(buf, ptr, sz)) | |
161 | return -EFAULT; | |
162 | buf += sz; | |
163 | p += sz; | |
164 | count -= sz; | |
165 | read += sz; | |
166 | } | |
167 | ||
168 | *ppos += read; | |
169 | return read; | |
170 | } | |
171 | ||
172 | static ssize_t write_mem(struct file * file, const char __user * buf, | |
173 | size_t count, loff_t *ppos) | |
174 | { | |
175 | unsigned long p = *ppos; | |
176 | ssize_t written, sz; | |
177 | unsigned long copied; | |
178 | void *ptr; | |
179 | ||
180 | if (!valid_phys_addr_range(p, &count)) | |
181 | return -EFAULT; | |
182 | ||
183 | written = 0; | |
184 | ||
185 | #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED | |
186 | /* we don't have page 0 mapped on sparc and m68k.. */ | |
187 | if (p < PAGE_SIZE) { | |
188 | unsigned long sz = PAGE_SIZE - p; | |
189 | if (sz > count) | |
190 | sz = count; | |
191 | /* Hmm. Do something? */ | |
192 | buf += sz; | |
193 | p += sz; | |
194 | count -= sz; | |
195 | written += sz; | |
196 | } | |
197 | #endif | |
198 | ||
199 | while (count > 0) { | |
200 | /* | |
201 | * Handle first page in case it's not aligned | |
202 | */ | |
203 | if (-p & (PAGE_SIZE - 1)) | |
204 | sz = -p & (PAGE_SIZE - 1); | |
205 | else | |
206 | sz = PAGE_SIZE; | |
207 | ||
208 | sz = min_t(unsigned long, sz, count); | |
209 | ||
210 | /* | |
211 | * On ia64 if a page has been mapped somewhere as | |
212 | * uncached, then it must also be accessed uncached | |
213 | * by the kernel or data corruption may occur | |
214 | */ | |
215 | ptr = xlate_dev_mem_ptr(p); | |
216 | ||
217 | copied = copy_from_user(ptr, buf, sz); | |
218 | if (copied) { | |
c654d60e JB |
219 | written += sz - copied; |
220 | if (written) | |
221 | break; | |
1da177e4 LT |
222 | return -EFAULT; |
223 | } | |
224 | buf += sz; | |
225 | p += sz; | |
226 | count -= sz; | |
227 | written += sz; | |
228 | } | |
229 | ||
230 | *ppos += written; | |
231 | return written; | |
232 | } | |
233 | ||
44ac8413 BH |
234 | #ifndef __HAVE_PHYS_MEM_ACCESS_PROT |
235 | static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | |
236 | unsigned long size, pgprot_t vma_prot) | |
237 | { | |
238 | #ifdef pgprot_noncached | |
239 | unsigned long offset = pfn << PAGE_SHIFT; | |
240 | ||
241 | if (uncached_access(file, offset)) | |
242 | return pgprot_noncached(vma_prot); | |
243 | #endif | |
244 | return vma_prot; | |
245 | } | |
246 | #endif | |
247 | ||
1da177e4 LT |
248 | static int mmap_mem(struct file * file, struct vm_area_struct * vma) |
249 | { | |
80851ef2 BH |
250 | size_t size = vma->vm_end - vma->vm_start; |
251 | ||
252 | if (!valid_mmap_phys_addr_range(vma->vm_pgoff << PAGE_SHIFT, &size)) | |
253 | return -EINVAL; | |
254 | ||
8b150478 | 255 | vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff, |
80851ef2 | 256 | size, |
1da177e4 | 257 | vma->vm_page_prot); |
1da177e4 LT |
258 | |
259 | /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */ | |
260 | if (remap_pfn_range(vma, | |
261 | vma->vm_start, | |
262 | vma->vm_pgoff, | |
80851ef2 | 263 | size, |
1da177e4 LT |
264 | vma->vm_page_prot)) |
265 | return -EAGAIN; | |
266 | return 0; | |
267 | } | |
268 | ||
269 | static int mmap_kmem(struct file * file, struct vm_area_struct * vma) | |
270 | { | |
4bb82551 LT |
271 | unsigned long pfn; |
272 | ||
273 | /* Turn a kernel-virtual address into a physical page frame */ | |
274 | pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT; | |
275 | ||
1da177e4 LT |
276 | /* |
277 | * RED-PEN: on some architectures there is more mapped memory | |
278 | * than available in mem_map which pfn_valid checks | |
279 | * for. Perhaps should add a new macro here. | |
280 | * | |
281 | * RED-PEN: vmalloc is not supported right now. | |
282 | */ | |
4bb82551 | 283 | if (!pfn_valid(pfn)) |
1da177e4 | 284 | return -EIO; |
4bb82551 LT |
285 | |
286 | vma->vm_pgoff = pfn; | |
1da177e4 LT |
287 | return mmap_mem(file, vma); |
288 | } | |
289 | ||
50b1fdbd VG |
290 | #ifdef CONFIG_CRASH_DUMP |
291 | /* | |
292 | * Read memory corresponding to the old kernel. | |
50b1fdbd | 293 | */ |
315c215c | 294 | static ssize_t read_oldmem(struct file *file, char __user *buf, |
50b1fdbd VG |
295 | size_t count, loff_t *ppos) |
296 | { | |
315c215c VG |
297 | unsigned long pfn, offset; |
298 | size_t read = 0, csize; | |
299 | int rc = 0; | |
50b1fdbd | 300 | |
72414d3f | 301 | while (count) { |
50b1fdbd | 302 | pfn = *ppos / PAGE_SIZE; |
315c215c VG |
303 | if (pfn > saved_max_pfn) |
304 | return read; | |
50b1fdbd | 305 | |
315c215c VG |
306 | offset = (unsigned long)(*ppos % PAGE_SIZE); |
307 | if (count > PAGE_SIZE - offset) | |
308 | csize = PAGE_SIZE - offset; | |
309 | else | |
310 | csize = count; | |
50b1fdbd | 311 | |
315c215c VG |
312 | rc = copy_oldmem_page(pfn, buf, csize, offset, 1); |
313 | if (rc < 0) | |
314 | return rc; | |
50b1fdbd VG |
315 | buf += csize; |
316 | *ppos += csize; | |
317 | read += csize; | |
318 | count -= csize; | |
319 | } | |
50b1fdbd VG |
320 | return read; |
321 | } | |
322 | #endif | |
323 | ||
1da177e4 LT |
324 | extern long vread(char *buf, char *addr, unsigned long count); |
325 | extern long vwrite(char *buf, char *addr, unsigned long count); | |
326 | ||
327 | /* | |
328 | * This function reads the *virtual* memory as seen by the kernel. | |
329 | */ | |
330 | static ssize_t read_kmem(struct file *file, char __user *buf, | |
331 | size_t count, loff_t *ppos) | |
332 | { | |
333 | unsigned long p = *ppos; | |
334 | ssize_t low_count, read, sz; | |
335 | char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */ | |
336 | ||
337 | read = 0; | |
338 | if (p < (unsigned long) high_memory) { | |
339 | low_count = count; | |
340 | if (count > (unsigned long) high_memory - p) | |
341 | low_count = (unsigned long) high_memory - p; | |
342 | ||
343 | #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED | |
344 | /* we don't have page 0 mapped on sparc and m68k.. */ | |
345 | if (p < PAGE_SIZE && low_count > 0) { | |
346 | size_t tmp = PAGE_SIZE - p; | |
347 | if (tmp > low_count) tmp = low_count; | |
348 | if (clear_user(buf, tmp)) | |
349 | return -EFAULT; | |
350 | buf += tmp; | |
351 | p += tmp; | |
352 | read += tmp; | |
353 | low_count -= tmp; | |
354 | count -= tmp; | |
355 | } | |
356 | #endif | |
357 | while (low_count > 0) { | |
358 | /* | |
359 | * Handle first page in case it's not aligned | |
360 | */ | |
361 | if (-p & (PAGE_SIZE - 1)) | |
362 | sz = -p & (PAGE_SIZE - 1); | |
363 | else | |
364 | sz = PAGE_SIZE; | |
365 | ||
366 | sz = min_t(unsigned long, sz, low_count); | |
367 | ||
368 | /* | |
369 | * On ia64 if a page has been mapped somewhere as | |
370 | * uncached, then it must also be accessed uncached | |
371 | * by the kernel or data corruption may occur | |
372 | */ | |
373 | kbuf = xlate_dev_kmem_ptr((char *)p); | |
374 | ||
375 | if (copy_to_user(buf, kbuf, sz)) | |
376 | return -EFAULT; | |
377 | buf += sz; | |
378 | p += sz; | |
379 | read += sz; | |
380 | low_count -= sz; | |
381 | count -= sz; | |
382 | } | |
383 | } | |
384 | ||
385 | if (count > 0) { | |
386 | kbuf = (char *)__get_free_page(GFP_KERNEL); | |
387 | if (!kbuf) | |
388 | return -ENOMEM; | |
389 | while (count > 0) { | |
390 | int len = count; | |
391 | ||
392 | if (len > PAGE_SIZE) | |
393 | len = PAGE_SIZE; | |
394 | len = vread(kbuf, (char *)p, len); | |
395 | if (!len) | |
396 | break; | |
397 | if (copy_to_user(buf, kbuf, len)) { | |
398 | free_page((unsigned long)kbuf); | |
399 | return -EFAULT; | |
400 | } | |
401 | count -= len; | |
402 | buf += len; | |
403 | read += len; | |
404 | p += len; | |
405 | } | |
406 | free_page((unsigned long)kbuf); | |
407 | } | |
408 | *ppos = p; | |
409 | return read; | |
410 | } | |
411 | ||
412 | ||
413 | static inline ssize_t | |
414 | do_write_kmem(void *p, unsigned long realp, const char __user * buf, | |
415 | size_t count, loff_t *ppos) | |
416 | { | |
417 | ssize_t written, sz; | |
418 | unsigned long copied; | |
419 | ||
420 | written = 0; | |
421 | #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED | |
422 | /* we don't have page 0 mapped on sparc and m68k.. */ | |
423 | if (realp < PAGE_SIZE) { | |
424 | unsigned long sz = PAGE_SIZE - realp; | |
425 | if (sz > count) | |
426 | sz = count; | |
427 | /* Hmm. Do something? */ | |
428 | buf += sz; | |
429 | p += sz; | |
430 | realp += sz; | |
431 | count -= sz; | |
432 | written += sz; | |
433 | } | |
434 | #endif | |
435 | ||
436 | while (count > 0) { | |
437 | char *ptr; | |
438 | /* | |
439 | * Handle first page in case it's not aligned | |
440 | */ | |
441 | if (-realp & (PAGE_SIZE - 1)) | |
442 | sz = -realp & (PAGE_SIZE - 1); | |
443 | else | |
444 | sz = PAGE_SIZE; | |
445 | ||
446 | sz = min_t(unsigned long, sz, count); | |
447 | ||
448 | /* | |
449 | * On ia64 if a page has been mapped somewhere as | |
450 | * uncached, then it must also be accessed uncached | |
451 | * by the kernel or data corruption may occur | |
452 | */ | |
453 | ptr = xlate_dev_kmem_ptr(p); | |
454 | ||
455 | copied = copy_from_user(ptr, buf, sz); | |
456 | if (copied) { | |
c654d60e JB |
457 | written += sz - copied; |
458 | if (written) | |
459 | break; | |
1da177e4 LT |
460 | return -EFAULT; |
461 | } | |
462 | buf += sz; | |
463 | p += sz; | |
464 | realp += sz; | |
465 | count -= sz; | |
466 | written += sz; | |
467 | } | |
468 | ||
469 | *ppos += written; | |
470 | return written; | |
471 | } | |
472 | ||
473 | ||
474 | /* | |
475 | * This function writes to the *virtual* memory as seen by the kernel. | |
476 | */ | |
477 | static ssize_t write_kmem(struct file * file, const char __user * buf, | |
478 | size_t count, loff_t *ppos) | |
479 | { | |
480 | unsigned long p = *ppos; | |
481 | ssize_t wrote = 0; | |
482 | ssize_t virtr = 0; | |
483 | ssize_t written; | |
484 | char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */ | |
485 | ||
486 | if (p < (unsigned long) high_memory) { | |
487 | ||
488 | wrote = count; | |
489 | if (count > (unsigned long) high_memory - p) | |
490 | wrote = (unsigned long) high_memory - p; | |
491 | ||
492 | written = do_write_kmem((void*)p, p, buf, wrote, ppos); | |
493 | if (written != wrote) | |
494 | return written; | |
495 | wrote = written; | |
496 | p += wrote; | |
497 | buf += wrote; | |
498 | count -= wrote; | |
499 | } | |
500 | ||
501 | if (count > 0) { | |
502 | kbuf = (char *)__get_free_page(GFP_KERNEL); | |
503 | if (!kbuf) | |
504 | return wrote ? wrote : -ENOMEM; | |
505 | while (count > 0) { | |
506 | int len = count; | |
507 | ||
508 | if (len > PAGE_SIZE) | |
509 | len = PAGE_SIZE; | |
510 | if (len) { | |
511 | written = copy_from_user(kbuf, buf, len); | |
512 | if (written) { | |
c654d60e JB |
513 | if (wrote + virtr) |
514 | break; | |
1da177e4 | 515 | free_page((unsigned long)kbuf); |
c654d60e | 516 | return -EFAULT; |
1da177e4 LT |
517 | } |
518 | } | |
519 | len = vwrite(kbuf, (char *)p, len); | |
520 | count -= len; | |
521 | buf += len; | |
522 | virtr += len; | |
523 | p += len; | |
524 | } | |
525 | free_page((unsigned long)kbuf); | |
526 | } | |
527 | ||
528 | *ppos = p; | |
529 | return virtr + wrote; | |
530 | } | |
531 | ||
ee2cdece | 532 | #if defined(CONFIG_ISA) || !defined(__mc68000__) |
1da177e4 LT |
533 | static ssize_t read_port(struct file * file, char __user * buf, |
534 | size_t count, loff_t *ppos) | |
535 | { | |
536 | unsigned long i = *ppos; | |
537 | char __user *tmp = buf; | |
538 | ||
539 | if (!access_ok(VERIFY_WRITE, buf, count)) | |
540 | return -EFAULT; | |
541 | while (count-- > 0 && i < 65536) { | |
542 | if (__put_user(inb(i),tmp) < 0) | |
543 | return -EFAULT; | |
544 | i++; | |
545 | tmp++; | |
546 | } | |
547 | *ppos = i; | |
548 | return tmp-buf; | |
549 | } | |
550 | ||
551 | static ssize_t write_port(struct file * file, const char __user * buf, | |
552 | size_t count, loff_t *ppos) | |
553 | { | |
554 | unsigned long i = *ppos; | |
555 | const char __user * tmp = buf; | |
556 | ||
557 | if (!access_ok(VERIFY_READ,buf,count)) | |
558 | return -EFAULT; | |
559 | while (count-- > 0 && i < 65536) { | |
560 | char c; | |
c654d60e JB |
561 | if (__get_user(c, tmp)) { |
562 | if (tmp > buf) | |
563 | break; | |
1da177e4 | 564 | return -EFAULT; |
c654d60e | 565 | } |
1da177e4 LT |
566 | outb(c,i); |
567 | i++; | |
568 | tmp++; | |
569 | } | |
570 | *ppos = i; | |
571 | return tmp-buf; | |
572 | } | |
573 | #endif | |
574 | ||
575 | static ssize_t read_null(struct file * file, char __user * buf, | |
576 | size_t count, loff_t *ppos) | |
577 | { | |
578 | return 0; | |
579 | } | |
580 | ||
581 | static ssize_t write_null(struct file * file, const char __user * buf, | |
582 | size_t count, loff_t *ppos) | |
583 | { | |
584 | return count; | |
585 | } | |
586 | ||
587 | #ifdef CONFIG_MMU | |
588 | /* | |
589 | * For fun, we are using the MMU for this. | |
590 | */ | |
591 | static inline size_t read_zero_pagealigned(char __user * buf, size_t size) | |
592 | { | |
593 | struct mm_struct *mm; | |
594 | struct vm_area_struct * vma; | |
595 | unsigned long addr=(unsigned long)buf; | |
596 | ||
597 | mm = current->mm; | |
598 | /* Oops, this was forgotten before. -ben */ | |
599 | down_read(&mm->mmap_sem); | |
600 | ||
601 | /* For private mappings, just map in zero pages. */ | |
602 | for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) { | |
603 | unsigned long count; | |
604 | ||
605 | if (vma->vm_start > addr || (vma->vm_flags & VM_WRITE) == 0) | |
606 | goto out_up; | |
6aab341e | 607 | if (vma->vm_flags & (VM_SHARED | VM_HUGETLB)) |
1da177e4 LT |
608 | break; |
609 | count = vma->vm_end - addr; | |
610 | if (count > size) | |
611 | count = size; | |
612 | ||
613 | zap_page_range(vma, addr, count, NULL); | |
614 | zeromap_page_range(vma, addr, count, PAGE_COPY); | |
615 | ||
616 | size -= count; | |
617 | buf += count; | |
618 | addr += count; | |
619 | if (size == 0) | |
620 | goto out_up; | |
621 | } | |
622 | ||
623 | up_read(&mm->mmap_sem); | |
624 | ||
625 | /* The shared case is hard. Let's do the conventional zeroing. */ | |
626 | do { | |
627 | unsigned long unwritten = clear_user(buf, PAGE_SIZE); | |
628 | if (unwritten) | |
629 | return size + unwritten - PAGE_SIZE; | |
630 | cond_resched(); | |
631 | buf += PAGE_SIZE; | |
632 | size -= PAGE_SIZE; | |
633 | } while (size); | |
634 | ||
635 | return size; | |
636 | out_up: | |
637 | up_read(&mm->mmap_sem); | |
638 | return size; | |
639 | } | |
640 | ||
641 | static ssize_t read_zero(struct file * file, char __user * buf, | |
642 | size_t count, loff_t *ppos) | |
643 | { | |
644 | unsigned long left, unwritten, written = 0; | |
645 | ||
646 | if (!count) | |
647 | return 0; | |
648 | ||
649 | if (!access_ok(VERIFY_WRITE, buf, count)) | |
650 | return -EFAULT; | |
651 | ||
652 | left = count; | |
653 | ||
654 | /* do we want to be clever? Arbitrary cut-off */ | |
655 | if (count >= PAGE_SIZE*4) { | |
656 | unsigned long partial; | |
657 | ||
658 | /* How much left of the page? */ | |
659 | partial = (PAGE_SIZE-1) & -(unsigned long) buf; | |
660 | unwritten = clear_user(buf, partial); | |
661 | written = partial - unwritten; | |
662 | if (unwritten) | |
663 | goto out; | |
664 | left -= partial; | |
665 | buf += partial; | |
666 | unwritten = read_zero_pagealigned(buf, left & PAGE_MASK); | |
667 | written += (left & PAGE_MASK) - unwritten; | |
668 | if (unwritten) | |
669 | goto out; | |
670 | buf += left & PAGE_MASK; | |
671 | left &= ~PAGE_MASK; | |
672 | } | |
673 | unwritten = clear_user(buf, left); | |
674 | written += left - unwritten; | |
675 | out: | |
676 | return written ? written : -EFAULT; | |
677 | } | |
678 | ||
679 | static int mmap_zero(struct file * file, struct vm_area_struct * vma) | |
680 | { | |
681 | if (vma->vm_flags & VM_SHARED) | |
682 | return shmem_zero_setup(vma); | |
683 | if (zeromap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start, vma->vm_page_prot)) | |
684 | return -EAGAIN; | |
685 | return 0; | |
686 | } | |
687 | #else /* CONFIG_MMU */ | |
688 | static ssize_t read_zero(struct file * file, char * buf, | |
689 | size_t count, loff_t *ppos) | |
690 | { | |
691 | size_t todo = count; | |
692 | ||
693 | while (todo) { | |
694 | size_t chunk = todo; | |
695 | ||
696 | if (chunk > 4096) | |
697 | chunk = 4096; /* Just for latency reasons */ | |
698 | if (clear_user(buf, chunk)) | |
699 | return -EFAULT; | |
700 | buf += chunk; | |
701 | todo -= chunk; | |
702 | cond_resched(); | |
703 | } | |
704 | return count; | |
705 | } | |
706 | ||
707 | static int mmap_zero(struct file * file, struct vm_area_struct * vma) | |
708 | { | |
709 | return -ENOSYS; | |
710 | } | |
711 | #endif /* CONFIG_MMU */ | |
712 | ||
713 | static ssize_t write_full(struct file * file, const char __user * buf, | |
714 | size_t count, loff_t *ppos) | |
715 | { | |
716 | return -ENOSPC; | |
717 | } | |
718 | ||
719 | /* | |
720 | * Special lseek() function for /dev/null and /dev/zero. Most notably, you | |
721 | * can fopen() both devices with "a" now. This was previously impossible. | |
722 | * -- SRB. | |
723 | */ | |
724 | ||
725 | static loff_t null_lseek(struct file * file, loff_t offset, int orig) | |
726 | { | |
727 | return file->f_pos = 0; | |
728 | } | |
729 | ||
730 | /* | |
731 | * The memory devices use the full 32/64 bits of the offset, and so we cannot | |
732 | * check against negative addresses: they are ok. The return value is weird, | |
733 | * though, in that case (0). | |
734 | * | |
735 | * also note that seeking relative to the "end of file" isn't supported: | |
736 | * it has no meaning, so it returns -EINVAL. | |
737 | */ | |
738 | static loff_t memory_lseek(struct file * file, loff_t offset, int orig) | |
739 | { | |
740 | loff_t ret; | |
741 | ||
1b1dcc1b | 742 | mutex_lock(&file->f_dentry->d_inode->i_mutex); |
1da177e4 LT |
743 | switch (orig) { |
744 | case 0: | |
745 | file->f_pos = offset; | |
746 | ret = file->f_pos; | |
747 | force_successful_syscall_return(); | |
748 | break; | |
749 | case 1: | |
750 | file->f_pos += offset; | |
751 | ret = file->f_pos; | |
752 | force_successful_syscall_return(); | |
753 | break; | |
754 | default: | |
755 | ret = -EINVAL; | |
756 | } | |
1b1dcc1b | 757 | mutex_unlock(&file->f_dentry->d_inode->i_mutex); |
1da177e4 LT |
758 | return ret; |
759 | } | |
760 | ||
761 | static int open_port(struct inode * inode, struct file * filp) | |
762 | { | |
763 | return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; | |
764 | } | |
765 | ||
766 | #define zero_lseek null_lseek | |
767 | #define full_lseek null_lseek | |
768 | #define write_zero write_null | |
769 | #define read_full read_zero | |
770 | #define open_mem open_port | |
771 | #define open_kmem open_mem | |
50b1fdbd | 772 | #define open_oldmem open_mem |
1da177e4 LT |
773 | |
774 | static struct file_operations mem_fops = { | |
775 | .llseek = memory_lseek, | |
776 | .read = read_mem, | |
777 | .write = write_mem, | |
778 | .mmap = mmap_mem, | |
779 | .open = open_mem, | |
780 | }; | |
781 | ||
782 | static struct file_operations kmem_fops = { | |
783 | .llseek = memory_lseek, | |
784 | .read = read_kmem, | |
785 | .write = write_kmem, | |
786 | .mmap = mmap_kmem, | |
787 | .open = open_kmem, | |
788 | }; | |
789 | ||
790 | static struct file_operations null_fops = { | |
791 | .llseek = null_lseek, | |
792 | .read = read_null, | |
793 | .write = write_null, | |
794 | }; | |
795 | ||
ee2cdece | 796 | #if defined(CONFIG_ISA) || !defined(__mc68000__) |
1da177e4 LT |
797 | static struct file_operations port_fops = { |
798 | .llseek = memory_lseek, | |
799 | .read = read_port, | |
800 | .write = write_port, | |
801 | .open = open_port, | |
802 | }; | |
803 | #endif | |
804 | ||
805 | static struct file_operations zero_fops = { | |
806 | .llseek = zero_lseek, | |
807 | .read = read_zero, | |
808 | .write = write_zero, | |
809 | .mmap = mmap_zero, | |
810 | }; | |
811 | ||
812 | static struct backing_dev_info zero_bdi = { | |
813 | .capabilities = BDI_CAP_MAP_COPY, | |
814 | }; | |
815 | ||
816 | static struct file_operations full_fops = { | |
817 | .llseek = full_lseek, | |
818 | .read = read_full, | |
819 | .write = write_full, | |
820 | }; | |
821 | ||
50b1fdbd VG |
822 | #ifdef CONFIG_CRASH_DUMP |
823 | static struct file_operations oldmem_fops = { | |
824 | .read = read_oldmem, | |
825 | .open = open_oldmem, | |
826 | }; | |
827 | #endif | |
828 | ||
1da177e4 LT |
829 | static ssize_t kmsg_write(struct file * file, const char __user * buf, |
830 | size_t count, loff_t *ppos) | |
831 | { | |
832 | char *tmp; | |
cd140a5c | 833 | ssize_t ret; |
1da177e4 LT |
834 | |
835 | tmp = kmalloc(count + 1, GFP_KERNEL); | |
836 | if (tmp == NULL) | |
837 | return -ENOMEM; | |
838 | ret = -EFAULT; | |
839 | if (!copy_from_user(tmp, buf, count)) { | |
840 | tmp[count] = 0; | |
841 | ret = printk("%s", tmp); | |
cd140a5c GC |
842 | if (ret > count) |
843 | /* printk can add a prefix */ | |
844 | ret = count; | |
1da177e4 LT |
845 | } |
846 | kfree(tmp); | |
847 | return ret; | |
848 | } | |
849 | ||
850 | static struct file_operations kmsg_fops = { | |
851 | .write = kmsg_write, | |
852 | }; | |
853 | ||
854 | static int memory_open(struct inode * inode, struct file * filp) | |
855 | { | |
856 | switch (iminor(inode)) { | |
857 | case 1: | |
858 | filp->f_op = &mem_fops; | |
859 | break; | |
860 | case 2: | |
861 | filp->f_op = &kmem_fops; | |
862 | break; | |
863 | case 3: | |
864 | filp->f_op = &null_fops; | |
865 | break; | |
ee2cdece | 866 | #if defined(CONFIG_ISA) || !defined(__mc68000__) |
1da177e4 LT |
867 | case 4: |
868 | filp->f_op = &port_fops; | |
869 | break; | |
870 | #endif | |
871 | case 5: | |
872 | filp->f_mapping->backing_dev_info = &zero_bdi; | |
873 | filp->f_op = &zero_fops; | |
874 | break; | |
875 | case 7: | |
876 | filp->f_op = &full_fops; | |
877 | break; | |
878 | case 8: | |
879 | filp->f_op = &random_fops; | |
880 | break; | |
881 | case 9: | |
882 | filp->f_op = &urandom_fops; | |
883 | break; | |
884 | case 11: | |
885 | filp->f_op = &kmsg_fops; | |
886 | break; | |
50b1fdbd VG |
887 | #ifdef CONFIG_CRASH_DUMP |
888 | case 12: | |
889 | filp->f_op = &oldmem_fops; | |
890 | break; | |
891 | #endif | |
1da177e4 LT |
892 | default: |
893 | return -ENXIO; | |
894 | } | |
895 | if (filp->f_op && filp->f_op->open) | |
896 | return filp->f_op->open(inode,filp); | |
897 | return 0; | |
898 | } | |
899 | ||
900 | static struct file_operations memory_fops = { | |
901 | .open = memory_open, /* just a selector for the real open */ | |
902 | }; | |
903 | ||
904 | static const struct { | |
905 | unsigned int minor; | |
906 | char *name; | |
907 | umode_t mode; | |
908 | struct file_operations *fops; | |
909 | } devlist[] = { /* list of minor devices */ | |
910 | {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops}, | |
911 | {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops}, | |
912 | {3, "null", S_IRUGO | S_IWUGO, &null_fops}, | |
ee2cdece | 913 | #if defined(CONFIG_ISA) || !defined(__mc68000__) |
1da177e4 LT |
914 | {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops}, |
915 | #endif | |
916 | {5, "zero", S_IRUGO | S_IWUGO, &zero_fops}, | |
917 | {7, "full", S_IRUGO | S_IWUGO, &full_fops}, | |
918 | {8, "random", S_IRUGO | S_IWUSR, &random_fops}, | |
919 | {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops}, | |
920 | {11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops}, | |
50b1fdbd VG |
921 | #ifdef CONFIG_CRASH_DUMP |
922 | {12,"oldmem", S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops}, | |
923 | #endif | |
1da177e4 LT |
924 | }; |
925 | ||
ca8eca68 | 926 | static struct class *mem_class; |
1da177e4 LT |
927 | |
928 | static int __init chr_dev_init(void) | |
929 | { | |
930 | int i; | |
931 | ||
932 | if (register_chrdev(MEM_MAJOR,"mem",&memory_fops)) | |
933 | printk("unable to get major %d for memory devs\n", MEM_MAJOR); | |
934 | ||
ca8eca68 | 935 | mem_class = class_create(THIS_MODULE, "mem"); |
1da177e4 | 936 | for (i = 0; i < ARRAY_SIZE(devlist); i++) { |
53f46542 GKH |
937 | class_device_create(mem_class, NULL, |
938 | MKDEV(MEM_MAJOR, devlist[i].minor), | |
1da177e4 LT |
939 | NULL, devlist[i].name); |
940 | devfs_mk_cdev(MKDEV(MEM_MAJOR, devlist[i].minor), | |
941 | S_IFCHR | devlist[i].mode, devlist[i].name); | |
942 | } | |
943 | ||
944 | return 0; | |
945 | } | |
946 | ||
947 | fs_initcall(chr_dev_init); |