2 * linux/drivers/char/mem.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
11 #include <linux/config.h>
13 #include <linux/miscdevice.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/mman.h>
17 #include <linux/random.h>
18 #include <linux/init.h>
19 #include <linux/raw.h>
20 #include <linux/tty.h>
21 #include <linux/capability.h>
22 #include <linux/smp_lock.h>
23 #include <linux/devfs_fs_kernel.h>
24 #include <linux/ptrace.h>
25 #include <linux/device.h>
26 #include <linux/highmem.h>
27 #include <linux/crash_dump.h>
28 #include <linux/backing-dev.h>
29 #include <linux/bootmem.h>
31 #include <asm/uaccess.h>
35 # include <linux/efi.h>
39 * Architectures vary in how they handle caching for addresses
40 * outside of main memory.
43 static inline int uncached_access(struct file
*file
, unsigned long addr
)
47 * On the PPro and successors, the MTRRs are used to set
48 * memory types for physical addresses outside main memory,
49 * so blindly setting PCD or PWT on those pages is wrong.
50 * For Pentiums and earlier, the surround logic should disable
51 * caching for the high addresses through the KEN pin, but
52 * we maintain the tradition of paranoia in this code.
54 if (file
->f_flags
& O_SYNC
)
56 return !( test_bit(X86_FEATURE_MTRR
, boot_cpu_data
.x86_capability
) ||
57 test_bit(X86_FEATURE_K6_MTRR
, boot_cpu_data
.x86_capability
) ||
58 test_bit(X86_FEATURE_CYRIX_ARR
, boot_cpu_data
.x86_capability
) ||
59 test_bit(X86_FEATURE_CENTAUR_MCR
, boot_cpu_data
.x86_capability
) )
60 && addr
>= __pa(high_memory
);
61 #elif defined(__x86_64__)
63 * This is broken because it can generate memory type aliases,
64 * which can cause cache corruptions
65 * But it is only available for root and we have to be bug-to-bug
66 * compatible with i386.
68 if (file
->f_flags
& O_SYNC
)
70 /* same behaviour as i386. PAT always set to cached and MTRRs control the
72 Hopefully a full PAT implementation will fix that soon. */
74 #elif defined(CONFIG_IA64)
76 * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
78 return !(efi_mem_attributes(addr
) & EFI_MEMORY_WB
);
81 * Accessing memory above the top the kernel knows about or through a file pointer
82 * that was marked O_SYNC will be done non-cached.
84 if (file
->f_flags
& O_SYNC
)
86 return addr
>= __pa(high_memory
);
90 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
91 static inline int valid_phys_addr_range(unsigned long addr
, size_t *count
)
93 unsigned long end_mem
;
95 end_mem
= __pa(high_memory
);
99 if (*count
> end_mem
- addr
)
100 *count
= end_mem
- addr
;
105 static inline int valid_mmap_phys_addr_range(unsigned long addr
, size_t *size
)
112 * This funcion reads the *physical* memory. The f_pos points directly to the
115 static ssize_t
read_mem(struct file
* file
, char __user
* buf
,
116 size_t count
, loff_t
*ppos
)
118 unsigned long p
= *ppos
;
122 if (!valid_phys_addr_range(p
, &count
))
125 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
126 /* we don't have page 0 mapped on sparc and m68k.. */
132 if (clear_user(buf
, sz
))
144 * Handle first page in case it's not aligned
146 if (-p
& (PAGE_SIZE
- 1))
147 sz
= -p
& (PAGE_SIZE
- 1);
151 sz
= min_t(unsigned long, sz
, count
);
154 * On ia64 if a page has been mapped somewhere as
155 * uncached, then it must also be accessed uncached
156 * by the kernel or data corruption may occur
158 ptr
= xlate_dev_mem_ptr(p
);
160 if (copy_to_user(buf
, ptr
, sz
))
172 static ssize_t
write_mem(struct file
* file
, const char __user
* buf
,
173 size_t count
, loff_t
*ppos
)
175 unsigned long p
= *ppos
;
177 unsigned long copied
;
180 if (!valid_phys_addr_range(p
, &count
))
185 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
186 /* we don't have page 0 mapped on sparc and m68k.. */
188 unsigned long sz
= PAGE_SIZE
- p
;
191 /* Hmm. Do something? */
201 * Handle first page in case it's not aligned
203 if (-p
& (PAGE_SIZE
- 1))
204 sz
= -p
& (PAGE_SIZE
- 1);
208 sz
= min_t(unsigned long, sz
, count
);
211 * On ia64 if a page has been mapped somewhere as
212 * uncached, then it must also be accessed uncached
213 * by the kernel or data corruption may occur
215 ptr
= xlate_dev_mem_ptr(p
);
217 copied
= copy_from_user(ptr
, buf
, sz
);
219 written
+= sz
- copied
;
234 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
235 static pgprot_t
phys_mem_access_prot(struct file
*file
, unsigned long pfn
,
236 unsigned long size
, pgprot_t vma_prot
)
238 #ifdef pgprot_noncached
239 unsigned long offset
= pfn
<< PAGE_SHIFT
;
241 if (uncached_access(file
, offset
))
242 return pgprot_noncached(vma_prot
);
248 static int mmap_mem(struct file
* file
, struct vm_area_struct
* vma
)
250 size_t size
= vma
->vm_end
- vma
->vm_start
;
252 if (!valid_mmap_phys_addr_range(vma
->vm_pgoff
<< PAGE_SHIFT
, &size
))
255 vma
->vm_page_prot
= phys_mem_access_prot(file
, vma
->vm_pgoff
,
259 /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
260 if (remap_pfn_range(vma
,
269 static int mmap_kmem(struct file
* file
, struct vm_area_struct
* vma
)
273 /* Turn a kernel-virtual address into a physical page frame */
274 pfn
= __pa((u64
)vma
->vm_pgoff
<< PAGE_SHIFT
) >> PAGE_SHIFT
;
277 * RED-PEN: on some architectures there is more mapped memory
278 * than available in mem_map which pfn_valid checks
279 * for. Perhaps should add a new macro here.
281 * RED-PEN: vmalloc is not supported right now.
287 return mmap_mem(file
, vma
);
290 #ifdef CONFIG_CRASH_DUMP
292 * Read memory corresponding to the old kernel.
294 static ssize_t
read_oldmem(struct file
*file
, char __user
*buf
,
295 size_t count
, loff_t
*ppos
)
297 unsigned long pfn
, offset
;
298 size_t read
= 0, csize
;
302 pfn
= *ppos
/ PAGE_SIZE
;
303 if (pfn
> saved_max_pfn
)
306 offset
= (unsigned long)(*ppos
% PAGE_SIZE
);
307 if (count
> PAGE_SIZE
- offset
)
308 csize
= PAGE_SIZE
- offset
;
312 rc
= copy_oldmem_page(pfn
, buf
, csize
, offset
, 1);
324 extern long vread(char *buf
, char *addr
, unsigned long count
);
325 extern long vwrite(char *buf
, char *addr
, unsigned long count
);
328 * This function reads the *virtual* memory as seen by the kernel.
330 static ssize_t
read_kmem(struct file
*file
, char __user
*buf
,
331 size_t count
, loff_t
*ppos
)
333 unsigned long p
= *ppos
;
334 ssize_t low_count
, read
, sz
;
335 char * kbuf
; /* k-addr because vread() takes vmlist_lock rwlock */
338 if (p
< (unsigned long) high_memory
) {
340 if (count
> (unsigned long) high_memory
- p
)
341 low_count
= (unsigned long) high_memory
- p
;
343 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
344 /* we don't have page 0 mapped on sparc and m68k.. */
345 if (p
< PAGE_SIZE
&& low_count
> 0) {
346 size_t tmp
= PAGE_SIZE
- p
;
347 if (tmp
> low_count
) tmp
= low_count
;
348 if (clear_user(buf
, tmp
))
357 while (low_count
> 0) {
359 * Handle first page in case it's not aligned
361 if (-p
& (PAGE_SIZE
- 1))
362 sz
= -p
& (PAGE_SIZE
- 1);
366 sz
= min_t(unsigned long, sz
, low_count
);
369 * On ia64 if a page has been mapped somewhere as
370 * uncached, then it must also be accessed uncached
371 * by the kernel or data corruption may occur
373 kbuf
= xlate_dev_kmem_ptr((char *)p
);
375 if (copy_to_user(buf
, kbuf
, sz
))
386 kbuf
= (char *)__get_free_page(GFP_KERNEL
);
394 len
= vread(kbuf
, (char *)p
, len
);
397 if (copy_to_user(buf
, kbuf
, len
)) {
398 free_page((unsigned long)kbuf
);
406 free_page((unsigned long)kbuf
);
413 static inline ssize_t
414 do_write_kmem(void *p
, unsigned long realp
, const char __user
* buf
,
415 size_t count
, loff_t
*ppos
)
418 unsigned long copied
;
421 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
422 /* we don't have page 0 mapped on sparc and m68k.. */
423 if (realp
< PAGE_SIZE
) {
424 unsigned long sz
= PAGE_SIZE
- realp
;
427 /* Hmm. Do something? */
439 * Handle first page in case it's not aligned
441 if (-realp
& (PAGE_SIZE
- 1))
442 sz
= -realp
& (PAGE_SIZE
- 1);
446 sz
= min_t(unsigned long, sz
, count
);
449 * On ia64 if a page has been mapped somewhere as
450 * uncached, then it must also be accessed uncached
451 * by the kernel or data corruption may occur
453 ptr
= xlate_dev_kmem_ptr(p
);
455 copied
= copy_from_user(ptr
, buf
, sz
);
457 written
+= sz
- copied
;
475 * This function writes to the *virtual* memory as seen by the kernel.
477 static ssize_t
write_kmem(struct file
* file
, const char __user
* buf
,
478 size_t count
, loff_t
*ppos
)
480 unsigned long p
= *ppos
;
484 char * kbuf
; /* k-addr because vwrite() takes vmlist_lock rwlock */
486 if (p
< (unsigned long) high_memory
) {
489 if (count
> (unsigned long) high_memory
- p
)
490 wrote
= (unsigned long) high_memory
- p
;
492 written
= do_write_kmem((void*)p
, p
, buf
, wrote
, ppos
);
493 if (written
!= wrote
)
502 kbuf
= (char *)__get_free_page(GFP_KERNEL
);
504 return wrote
? wrote
: -ENOMEM
;
511 written
= copy_from_user(kbuf
, buf
, len
);
515 free_page((unsigned long)kbuf
);
519 len
= vwrite(kbuf
, (char *)p
, len
);
525 free_page((unsigned long)kbuf
);
529 return virtr
+ wrote
;
532 #if defined(CONFIG_ISA) || !defined(__mc68000__)
533 static ssize_t
read_port(struct file
* file
, char __user
* buf
,
534 size_t count
, loff_t
*ppos
)
536 unsigned long i
= *ppos
;
537 char __user
*tmp
= buf
;
539 if (!access_ok(VERIFY_WRITE
, buf
, count
))
541 while (count
-- > 0 && i
< 65536) {
542 if (__put_user(inb(i
),tmp
) < 0)
551 static ssize_t
write_port(struct file
* file
, const char __user
* buf
,
552 size_t count
, loff_t
*ppos
)
554 unsigned long i
= *ppos
;
555 const char __user
* tmp
= buf
;
557 if (!access_ok(VERIFY_READ
,buf
,count
))
559 while (count
-- > 0 && i
< 65536) {
561 if (__get_user(c
, tmp
)) {
575 static ssize_t
read_null(struct file
* file
, char __user
* buf
,
576 size_t count
, loff_t
*ppos
)
581 static ssize_t
write_null(struct file
* file
, const char __user
* buf
,
582 size_t count
, loff_t
*ppos
)
589 * For fun, we are using the MMU for this.
591 static inline size_t read_zero_pagealigned(char __user
* buf
, size_t size
)
593 struct mm_struct
*mm
;
594 struct vm_area_struct
* vma
;
595 unsigned long addr
=(unsigned long)buf
;
598 /* Oops, this was forgotten before. -ben */
599 down_read(&mm
->mmap_sem
);
601 /* For private mappings, just map in zero pages. */
602 for (vma
= find_vma(mm
, addr
); vma
; vma
= vma
->vm_next
) {
605 if (vma
->vm_start
> addr
|| (vma
->vm_flags
& VM_WRITE
) == 0)
607 if (vma
->vm_flags
& (VM_SHARED
| VM_HUGETLB
))
609 count
= vma
->vm_end
- addr
;
613 zap_page_range(vma
, addr
, count
, NULL
);
614 zeromap_page_range(vma
, addr
, count
, PAGE_COPY
);
623 up_read(&mm
->mmap_sem
);
625 /* The shared case is hard. Let's do the conventional zeroing. */
627 unsigned long unwritten
= clear_user(buf
, PAGE_SIZE
);
629 return size
+ unwritten
- PAGE_SIZE
;
637 up_read(&mm
->mmap_sem
);
641 static ssize_t
read_zero(struct file
* file
, char __user
* buf
,
642 size_t count
, loff_t
*ppos
)
644 unsigned long left
, unwritten
, written
= 0;
649 if (!access_ok(VERIFY_WRITE
, buf
, count
))
654 /* do we want to be clever? Arbitrary cut-off */
655 if (count
>= PAGE_SIZE
*4) {
656 unsigned long partial
;
658 /* How much left of the page? */
659 partial
= (PAGE_SIZE
-1) & -(unsigned long) buf
;
660 unwritten
= clear_user(buf
, partial
);
661 written
= partial
- unwritten
;
666 unwritten
= read_zero_pagealigned(buf
, left
& PAGE_MASK
);
667 written
+= (left
& PAGE_MASK
) - unwritten
;
670 buf
+= left
& PAGE_MASK
;
673 unwritten
= clear_user(buf
, left
);
674 written
+= left
- unwritten
;
676 return written
? written
: -EFAULT
;
679 static int mmap_zero(struct file
* file
, struct vm_area_struct
* vma
)
681 if (vma
->vm_flags
& VM_SHARED
)
682 return shmem_zero_setup(vma
);
683 if (zeromap_page_range(vma
, vma
->vm_start
, vma
->vm_end
- vma
->vm_start
, vma
->vm_page_prot
))
687 #else /* CONFIG_MMU */
688 static ssize_t
read_zero(struct file
* file
, char * buf
,
689 size_t count
, loff_t
*ppos
)
697 chunk
= 4096; /* Just for latency reasons */
698 if (clear_user(buf
, chunk
))
707 static int mmap_zero(struct file
* file
, struct vm_area_struct
* vma
)
711 #endif /* CONFIG_MMU */
713 static ssize_t
write_full(struct file
* file
, const char __user
* buf
,
714 size_t count
, loff_t
*ppos
)
720 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
721 * can fopen() both devices with "a" now. This was previously impossible.
725 static loff_t
null_lseek(struct file
* file
, loff_t offset
, int orig
)
727 return file
->f_pos
= 0;
731 * The memory devices use the full 32/64 bits of the offset, and so we cannot
732 * check against negative addresses: they are ok. The return value is weird,
733 * though, in that case (0).
735 * also note that seeking relative to the "end of file" isn't supported:
736 * it has no meaning, so it returns -EINVAL.
738 static loff_t
memory_lseek(struct file
* file
, loff_t offset
, int orig
)
742 mutex_lock(&file
->f_dentry
->d_inode
->i_mutex
);
745 file
->f_pos
= offset
;
747 force_successful_syscall_return();
750 file
->f_pos
+= offset
;
752 force_successful_syscall_return();
757 mutex_unlock(&file
->f_dentry
->d_inode
->i_mutex
);
761 static int open_port(struct inode
* inode
, struct file
* filp
)
763 return capable(CAP_SYS_RAWIO
) ? 0 : -EPERM
;
766 #define zero_lseek null_lseek
767 #define full_lseek null_lseek
768 #define write_zero write_null
769 #define read_full read_zero
770 #define open_mem open_port
771 #define open_kmem open_mem
772 #define open_oldmem open_mem
774 static struct file_operations mem_fops
= {
775 .llseek
= memory_lseek
,
782 static struct file_operations kmem_fops
= {
783 .llseek
= memory_lseek
,
790 static struct file_operations null_fops
= {
791 .llseek
= null_lseek
,
796 #if defined(CONFIG_ISA) || !defined(__mc68000__)
797 static struct file_operations port_fops
= {
798 .llseek
= memory_lseek
,
805 static struct file_operations zero_fops
= {
806 .llseek
= zero_lseek
,
812 static struct backing_dev_info zero_bdi
= {
813 .capabilities
= BDI_CAP_MAP_COPY
,
816 static struct file_operations full_fops
= {
817 .llseek
= full_lseek
,
822 #ifdef CONFIG_CRASH_DUMP
823 static struct file_operations oldmem_fops
= {
829 static ssize_t
kmsg_write(struct file
* file
, const char __user
* buf
,
830 size_t count
, loff_t
*ppos
)
835 tmp
= kmalloc(count
+ 1, GFP_KERNEL
);
839 if (!copy_from_user(tmp
, buf
, count
)) {
841 ret
= printk("%s", tmp
);
843 /* printk can add a prefix */
850 static struct file_operations kmsg_fops
= {
854 static int memory_open(struct inode
* inode
, struct file
* filp
)
856 switch (iminor(inode
)) {
858 filp
->f_op
= &mem_fops
;
861 filp
->f_op
= &kmem_fops
;
864 filp
->f_op
= &null_fops
;
866 #if defined(CONFIG_ISA) || !defined(__mc68000__)
868 filp
->f_op
= &port_fops
;
872 filp
->f_mapping
->backing_dev_info
= &zero_bdi
;
873 filp
->f_op
= &zero_fops
;
876 filp
->f_op
= &full_fops
;
879 filp
->f_op
= &random_fops
;
882 filp
->f_op
= &urandom_fops
;
885 filp
->f_op
= &kmsg_fops
;
887 #ifdef CONFIG_CRASH_DUMP
889 filp
->f_op
= &oldmem_fops
;
895 if (filp
->f_op
&& filp
->f_op
->open
)
896 return filp
->f_op
->open(inode
,filp
);
900 static struct file_operations memory_fops
= {
901 .open
= memory_open
, /* just a selector for the real open */
904 static const struct {
908 struct file_operations
*fops
;
909 } devlist
[] = { /* list of minor devices */
910 {1, "mem", S_IRUSR
| S_IWUSR
| S_IRGRP
, &mem_fops
},
911 {2, "kmem", S_IRUSR
| S_IWUSR
| S_IRGRP
, &kmem_fops
},
912 {3, "null", S_IRUGO
| S_IWUGO
, &null_fops
},
913 #if defined(CONFIG_ISA) || !defined(__mc68000__)
914 {4, "port", S_IRUSR
| S_IWUSR
| S_IRGRP
, &port_fops
},
916 {5, "zero", S_IRUGO
| S_IWUGO
, &zero_fops
},
917 {7, "full", S_IRUGO
| S_IWUGO
, &full_fops
},
918 {8, "random", S_IRUGO
| S_IWUSR
, &random_fops
},
919 {9, "urandom", S_IRUGO
| S_IWUSR
, &urandom_fops
},
920 {11,"kmsg", S_IRUGO
| S_IWUSR
, &kmsg_fops
},
921 #ifdef CONFIG_CRASH_DUMP
922 {12,"oldmem", S_IRUSR
| S_IWUSR
| S_IRGRP
, &oldmem_fops
},
926 static struct class *mem_class
;
928 static int __init
chr_dev_init(void)
932 if (register_chrdev(MEM_MAJOR
,"mem",&memory_fops
))
933 printk("unable to get major %d for memory devs\n", MEM_MAJOR
);
935 mem_class
= class_create(THIS_MODULE
, "mem");
936 for (i
= 0; i
< ARRAY_SIZE(devlist
); i
++) {
937 class_device_create(mem_class
, NULL
,
938 MKDEV(MEM_MAJOR
, devlist
[i
].minor
),
939 NULL
, devlist
[i
].name
);
940 devfs_mk_cdev(MKDEV(MEM_MAJOR
, devlist
[i
].minor
),
941 S_IFCHR
| devlist
[i
].mode
, devlist
[i
].name
);
947 fs_initcall(chr_dev_init
);