x86: PAT phys_mem_access_prot_allowed for dev/mem mmap
[deliverable/linux.git] / drivers / char / mem.c
CommitLineData
1da177e4
LT
1/*
2 * linux/drivers/char/mem.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * Added devfs support.
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9 */
10
1da177e4
LT
11#include <linux/mm.h>
12#include <linux/miscdevice.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
15#include <linux/mman.h>
16#include <linux/random.h>
17#include <linux/init.h>
18#include <linux/raw.h>
19#include <linux/tty.h>
20#include <linux/capability.h>
1da177e4
LT
21#include <linux/ptrace.h>
22#include <linux/device.h>
50b1fdbd
VG
23#include <linux/highmem.h>
24#include <linux/crash_dump.h>
1da177e4 25#include <linux/backing-dev.h>
315c215c 26#include <linux/bootmem.h>
d6b29d7c 27#include <linux/splice.h>
b8a3ad5b 28#include <linux/pfn.h>
1da177e4
LT
29
30#include <asm/uaccess.h>
31#include <asm/io.h>
32
33#ifdef CONFIG_IA64
34# include <linux/efi.h>
35#endif
36
1da177e4
LT
37/*
38 * Architectures vary in how they handle caching for addresses
39 * outside of main memory.
40 *
41 */
42static inline int uncached_access(struct file *file, unsigned long addr)
43{
f0970c13 44#if defined(CONFIG_IA64)
1da177e4
LT
45 /*
46 * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
47 */
48 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
24e9d0b9
RB
49#elif defined(CONFIG_MIPS)
50 {
51 extern int __uncached_access(struct file *file,
52 unsigned long addr);
53
54 return __uncached_access(file, addr);
55 }
1da177e4
LT
56#else
57 /*
58 * Accessing memory above the top the kernel knows about or through a file pointer
59 * that was marked O_SYNC will be done non-cached.
60 */
61 if (file->f_flags & O_SYNC)
62 return 1;
63 return addr >= __pa(high_memory);
64#endif
65}
66
67#ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
136939a2 68static inline int valid_phys_addr_range(unsigned long addr, size_t count)
1da177e4 69{
136939a2 70 if (addr + count > __pa(high_memory))
1da177e4
LT
71 return 0;
72
1da177e4
LT
73 return 1;
74}
80851ef2 75
06c67bef 76static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
80851ef2
BH
77{
78 return 1;
79}
1da177e4
LT
80#endif
81
ae531c26 82#ifdef CONFIG_NONPROMISC_DEVMEM
e2beb3ea 83static inline int range_is_allowed(unsigned long pfn, unsigned long size)
ae531c26 84{
e2beb3ea
VP
85 u64 from = ((u64)pfn) << PAGE_SHIFT;
86 u64 to = from + size;
87 u64 cursor = from;
88
89 while (cursor < to) {
90 if (!devmem_is_allowed(pfn)) {
91 printk(KERN_INFO
92 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
ae531c26
AV
93 current->comm, from, to);
94 return 0;
95 }
e2beb3ea
VP
96 cursor += PAGE_SIZE;
97 pfn++;
ae531c26
AV
98 }
99 return 1;
100}
101#else
e2beb3ea 102static inline int range_is_allowed(unsigned long pfn, unsigned long size)
ae531c26
AV
103{
104 return 1;
105}
106#endif
107
e045fb2a 108void __attribute__((weak)) unxlate_dev_mem_ptr(unsigned long phys, void *addr)
109{
110}
111
1da177e4
LT
112/*
113 * This funcion reads the *physical* memory. The f_pos points directly to the
114 * memory location.
115 */
116static ssize_t read_mem(struct file * file, char __user * buf,
117 size_t count, loff_t *ppos)
118{
119 unsigned long p = *ppos;
120 ssize_t read, sz;
121 char *ptr;
122
136939a2 123 if (!valid_phys_addr_range(p, count))
1da177e4
LT
124 return -EFAULT;
125 read = 0;
126#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
127 /* we don't have page 0 mapped on sparc and m68k.. */
128 if (p < PAGE_SIZE) {
129 sz = PAGE_SIZE - p;
130 if (sz > count)
131 sz = count;
132 if (sz > 0) {
133 if (clear_user(buf, sz))
134 return -EFAULT;
135 buf += sz;
136 p += sz;
137 count -= sz;
138 read += sz;
139 }
140 }
141#endif
142
143 while (count > 0) {
144 /*
145 * Handle first page in case it's not aligned
146 */
147 if (-p & (PAGE_SIZE - 1))
148 sz = -p & (PAGE_SIZE - 1);
149 else
150 sz = PAGE_SIZE;
151
152 sz = min_t(unsigned long, sz, count);
153
e045fb2a 154 if (!range_is_allowed(p >> PAGE_SHIFT, count))
155 return -EPERM;
156
1da177e4
LT
157 /*
158 * On ia64 if a page has been mapped somewhere as
159 * uncached, then it must also be accessed uncached
160 * by the kernel or data corruption may occur
161 */
162 ptr = xlate_dev_mem_ptr(p);
e045fb2a 163 if (!ptr)
164 return -EFAULT;
1da177e4 165
e045fb2a 166 if (copy_to_user(buf, ptr, sz)) {
167 unxlate_dev_mem_ptr(p, ptr);
1da177e4 168 return -EFAULT;
e045fb2a 169 }
170
171 unxlate_dev_mem_ptr(p, ptr);
172
1da177e4
LT
173 buf += sz;
174 p += sz;
175 count -= sz;
176 read += sz;
177 }
178
179 *ppos += read;
180 return read;
181}
182
183static ssize_t write_mem(struct file * file, const char __user * buf,
184 size_t count, loff_t *ppos)
185{
186 unsigned long p = *ppos;
187 ssize_t written, sz;
188 unsigned long copied;
189 void *ptr;
190
136939a2 191 if (!valid_phys_addr_range(p, count))
1da177e4
LT
192 return -EFAULT;
193
194 written = 0;
195
196#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
197 /* we don't have page 0 mapped on sparc and m68k.. */
198 if (p < PAGE_SIZE) {
199 unsigned long sz = PAGE_SIZE - p;
200 if (sz > count)
201 sz = count;
202 /* Hmm. Do something? */
203 buf += sz;
204 p += sz;
205 count -= sz;
206 written += sz;
207 }
208#endif
209
210 while (count > 0) {
211 /*
212 * Handle first page in case it's not aligned
213 */
214 if (-p & (PAGE_SIZE - 1))
215 sz = -p & (PAGE_SIZE - 1);
216 else
217 sz = PAGE_SIZE;
218
219 sz = min_t(unsigned long, sz, count);
220
e045fb2a 221 if (!range_is_allowed(p >> PAGE_SHIFT, sz))
222 return -EPERM;
223
1da177e4
LT
224 /*
225 * On ia64 if a page has been mapped somewhere as
226 * uncached, then it must also be accessed uncached
227 * by the kernel or data corruption may occur
228 */
229 ptr = xlate_dev_mem_ptr(p);
e045fb2a 230 if (!ptr) {
231 if (written)
232 break;
233 return -EFAULT;
234 }
1da177e4
LT
235
236 copied = copy_from_user(ptr, buf, sz);
237 if (copied) {
c654d60e 238 written += sz - copied;
e045fb2a 239 unxlate_dev_mem_ptr(p, ptr);
c654d60e
JB
240 if (written)
241 break;
1da177e4
LT
242 return -EFAULT;
243 }
e045fb2a 244
245 unxlate_dev_mem_ptr(p, ptr);
246
1da177e4
LT
247 buf += sz;
248 p += sz;
249 count -= sz;
250 written += sz;
251 }
252
253 *ppos += written;
254 return written;
255}
256
f0970c13 257int __attribute__((weak)) phys_mem_access_prot_allowed(struct file *file,
258 unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
259{
260 return 1;
261}
262
44ac8413
BH
263#ifndef __HAVE_PHYS_MEM_ACCESS_PROT
264static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
265 unsigned long size, pgprot_t vma_prot)
266{
267#ifdef pgprot_noncached
268 unsigned long offset = pfn << PAGE_SHIFT;
269
270 if (uncached_access(file, offset))
271 return pgprot_noncached(vma_prot);
272#endif
273 return vma_prot;
274}
275#endif
276
5da6185b
DH
277#ifndef CONFIG_MMU
278static unsigned long get_unmapped_area_mem(struct file *file,
279 unsigned long addr,
280 unsigned long len,
281 unsigned long pgoff,
282 unsigned long flags)
283{
284 if (!valid_mmap_phys_addr_range(pgoff, len))
285 return (unsigned long) -EINVAL;
8a93258c 286 return pgoff << PAGE_SHIFT;
5da6185b
DH
287}
288
289/* can't do an in-place private mapping if there's no MMU */
290static inline int private_mapping_ok(struct vm_area_struct *vma)
291{
292 return vma->vm_flags & VM_MAYSHARE;
293}
294#else
295#define get_unmapped_area_mem NULL
296
297static inline int private_mapping_ok(struct vm_area_struct *vma)
298{
299 return 1;
300}
301#endif
302
1da177e4
LT
303static int mmap_mem(struct file * file, struct vm_area_struct * vma)
304{
80851ef2
BH
305 size_t size = vma->vm_end - vma->vm_start;
306
06c67bef 307 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
80851ef2
BH
308 return -EINVAL;
309
5da6185b
DH
310 if (!private_mapping_ok(vma))
311 return -ENOSYS;
312
e2beb3ea
VP
313 if (!range_is_allowed(vma->vm_pgoff, size))
314 return -EPERM;
315
f0970c13 316 if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
317 &vma->vm_page_prot))
318 return -EINVAL;
319
8b150478 320 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
80851ef2 321 size,
1da177e4 322 vma->vm_page_prot);
1da177e4
LT
323
324 /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
325 if (remap_pfn_range(vma,
326 vma->vm_start,
327 vma->vm_pgoff,
80851ef2 328 size,
1da177e4
LT
329 vma->vm_page_prot))
330 return -EAGAIN;
331 return 0;
332}
333
334static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
335{
4bb82551
LT
336 unsigned long pfn;
337
6d3154cc
LT
338 /* Turn a kernel-virtual address into a physical page frame */
339 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
4bb82551 340
1da177e4
LT
341 /*
342 * RED-PEN: on some architectures there is more mapped memory
343 * than available in mem_map which pfn_valid checks
344 * for. Perhaps should add a new macro here.
345 *
346 * RED-PEN: vmalloc is not supported right now.
347 */
4bb82551 348 if (!pfn_valid(pfn))
1da177e4 349 return -EIO;
4bb82551
LT
350
351 vma->vm_pgoff = pfn;
1da177e4
LT
352 return mmap_mem(file, vma);
353}
354
50b1fdbd
VG
355#ifdef CONFIG_CRASH_DUMP
356/*
357 * Read memory corresponding to the old kernel.
50b1fdbd 358 */
315c215c 359static ssize_t read_oldmem(struct file *file, char __user *buf,
50b1fdbd
VG
360 size_t count, loff_t *ppos)
361{
315c215c
VG
362 unsigned long pfn, offset;
363 size_t read = 0, csize;
364 int rc = 0;
50b1fdbd 365
72414d3f 366 while (count) {
50b1fdbd 367 pfn = *ppos / PAGE_SIZE;
315c215c
VG
368 if (pfn > saved_max_pfn)
369 return read;
50b1fdbd 370
315c215c
VG
371 offset = (unsigned long)(*ppos % PAGE_SIZE);
372 if (count > PAGE_SIZE - offset)
373 csize = PAGE_SIZE - offset;
374 else
375 csize = count;
50b1fdbd 376
315c215c
VG
377 rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
378 if (rc < 0)
379 return rc;
50b1fdbd
VG
380 buf += csize;
381 *ppos += csize;
382 read += csize;
383 count -= csize;
384 }
50b1fdbd
VG
385 return read;
386}
387#endif
388
1da177e4
LT
389extern long vread(char *buf, char *addr, unsigned long count);
390extern long vwrite(char *buf, char *addr, unsigned long count);
391
392/*
393 * This function reads the *virtual* memory as seen by the kernel.
394 */
395static ssize_t read_kmem(struct file *file, char __user *buf,
396 size_t count, loff_t *ppos)
397{
398 unsigned long p = *ppos;
399 ssize_t low_count, read, sz;
400 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
401
402 read = 0;
403 if (p < (unsigned long) high_memory) {
404 low_count = count;
405 if (count > (unsigned long) high_memory - p)
406 low_count = (unsigned long) high_memory - p;
407
408#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
409 /* we don't have page 0 mapped on sparc and m68k.. */
410 if (p < PAGE_SIZE && low_count > 0) {
411 size_t tmp = PAGE_SIZE - p;
412 if (tmp > low_count) tmp = low_count;
413 if (clear_user(buf, tmp))
414 return -EFAULT;
415 buf += tmp;
416 p += tmp;
417 read += tmp;
418 low_count -= tmp;
419 count -= tmp;
420 }
421#endif
422 while (low_count > 0) {
423 /*
424 * Handle first page in case it's not aligned
425 */
426 if (-p & (PAGE_SIZE - 1))
427 sz = -p & (PAGE_SIZE - 1);
428 else
429 sz = PAGE_SIZE;
430
431 sz = min_t(unsigned long, sz, low_count);
432
433 /*
434 * On ia64 if a page has been mapped somewhere as
435 * uncached, then it must also be accessed uncached
436 * by the kernel or data corruption may occur
437 */
438 kbuf = xlate_dev_kmem_ptr((char *)p);
439
440 if (copy_to_user(buf, kbuf, sz))
441 return -EFAULT;
442 buf += sz;
443 p += sz;
444 read += sz;
445 low_count -= sz;
446 count -= sz;
447 }
448 }
449
450 if (count > 0) {
451 kbuf = (char *)__get_free_page(GFP_KERNEL);
452 if (!kbuf)
453 return -ENOMEM;
454 while (count > 0) {
455 int len = count;
456
457 if (len > PAGE_SIZE)
458 len = PAGE_SIZE;
459 len = vread(kbuf, (char *)p, len);
460 if (!len)
461 break;
462 if (copy_to_user(buf, kbuf, len)) {
463 free_page((unsigned long)kbuf);
464 return -EFAULT;
465 }
466 count -= len;
467 buf += len;
468 read += len;
469 p += len;
470 }
471 free_page((unsigned long)kbuf);
472 }
473 *ppos = p;
474 return read;
475}
476
477
478static inline ssize_t
479do_write_kmem(void *p, unsigned long realp, const char __user * buf,
480 size_t count, loff_t *ppos)
481{
482 ssize_t written, sz;
483 unsigned long copied;
484
485 written = 0;
486#ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
487 /* we don't have page 0 mapped on sparc and m68k.. */
488 if (realp < PAGE_SIZE) {
489 unsigned long sz = PAGE_SIZE - realp;
490 if (sz > count)
491 sz = count;
492 /* Hmm. Do something? */
493 buf += sz;
494 p += sz;
495 realp += sz;
496 count -= sz;
497 written += sz;
498 }
499#endif
500
501 while (count > 0) {
502 char *ptr;
503 /*
504 * Handle first page in case it's not aligned
505 */
506 if (-realp & (PAGE_SIZE - 1))
507 sz = -realp & (PAGE_SIZE - 1);
508 else
509 sz = PAGE_SIZE;
510
511 sz = min_t(unsigned long, sz, count);
512
513 /*
514 * On ia64 if a page has been mapped somewhere as
515 * uncached, then it must also be accessed uncached
516 * by the kernel or data corruption may occur
517 */
518 ptr = xlate_dev_kmem_ptr(p);
519
520 copied = copy_from_user(ptr, buf, sz);
521 if (copied) {
c654d60e
JB
522 written += sz - copied;
523 if (written)
524 break;
1da177e4
LT
525 return -EFAULT;
526 }
527 buf += sz;
528 p += sz;
529 realp += sz;
530 count -= sz;
531 written += sz;
532 }
533
534 *ppos += written;
535 return written;
536}
537
538
539/*
540 * This function writes to the *virtual* memory as seen by the kernel.
541 */
542static ssize_t write_kmem(struct file * file, const char __user * buf,
543 size_t count, loff_t *ppos)
544{
545 unsigned long p = *ppos;
546 ssize_t wrote = 0;
547 ssize_t virtr = 0;
548 ssize_t written;
549 char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
550
551 if (p < (unsigned long) high_memory) {
552
553 wrote = count;
554 if (count > (unsigned long) high_memory - p)
555 wrote = (unsigned long) high_memory - p;
556
557 written = do_write_kmem((void*)p, p, buf, wrote, ppos);
558 if (written != wrote)
559 return written;
560 wrote = written;
561 p += wrote;
562 buf += wrote;
563 count -= wrote;
564 }
565
566 if (count > 0) {
567 kbuf = (char *)__get_free_page(GFP_KERNEL);
568 if (!kbuf)
569 return wrote ? wrote : -ENOMEM;
570 while (count > 0) {
571 int len = count;
572
573 if (len > PAGE_SIZE)
574 len = PAGE_SIZE;
575 if (len) {
576 written = copy_from_user(kbuf, buf, len);
577 if (written) {
c654d60e
JB
578 if (wrote + virtr)
579 break;
1da177e4 580 free_page((unsigned long)kbuf);
c654d60e 581 return -EFAULT;
1da177e4
LT
582 }
583 }
584 len = vwrite(kbuf, (char *)p, len);
585 count -= len;
586 buf += len;
587 virtr += len;
588 p += len;
589 }
590 free_page((unsigned long)kbuf);
591 }
592
593 *ppos = p;
594 return virtr + wrote;
595}
596
4f911d64 597#ifdef CONFIG_DEVPORT
1da177e4
LT
598static ssize_t read_port(struct file * file, char __user * buf,
599 size_t count, loff_t *ppos)
600{
601 unsigned long i = *ppos;
602 char __user *tmp = buf;
603
604 if (!access_ok(VERIFY_WRITE, buf, count))
605 return -EFAULT;
606 while (count-- > 0 && i < 65536) {
607 if (__put_user(inb(i),tmp) < 0)
608 return -EFAULT;
609 i++;
610 tmp++;
611 }
612 *ppos = i;
613 return tmp-buf;
614}
615
616static ssize_t write_port(struct file * file, const char __user * buf,
617 size_t count, loff_t *ppos)
618{
619 unsigned long i = *ppos;
620 const char __user * tmp = buf;
621
622 if (!access_ok(VERIFY_READ,buf,count))
623 return -EFAULT;
624 while (count-- > 0 && i < 65536) {
625 char c;
c654d60e
JB
626 if (__get_user(c, tmp)) {
627 if (tmp > buf)
628 break;
1da177e4 629 return -EFAULT;
c654d60e 630 }
1da177e4
LT
631 outb(c,i);
632 i++;
633 tmp++;
634 }
635 *ppos = i;
636 return tmp-buf;
637}
638#endif
639
640static ssize_t read_null(struct file * file, char __user * buf,
641 size_t count, loff_t *ppos)
642{
643 return 0;
644}
645
646static ssize_t write_null(struct file * file, const char __user * buf,
647 size_t count, loff_t *ppos)
648{
649 return count;
650}
651
1ebd32fc
JA
652static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
653 struct splice_desc *sd)
654{
655 return sd->len;
656}
657
658static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out,
659 loff_t *ppos, size_t len, unsigned int flags)
660{
661 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
662}
663
1da177e4
LT
664static ssize_t read_zero(struct file * file, char __user * buf,
665 size_t count, loff_t *ppos)
666{
557ed1fa 667 size_t written;
1da177e4
LT
668
669 if (!count)
670 return 0;
671
672 if (!access_ok(VERIFY_WRITE, buf, count))
673 return -EFAULT;
674
557ed1fa
NP
675 written = 0;
676 while (count) {
677 unsigned long unwritten;
678 size_t chunk = count;
1da177e4 679
557ed1fa
NP
680 if (chunk > PAGE_SIZE)
681 chunk = PAGE_SIZE; /* Just for latency reasons */
682 unwritten = clear_user(buf, chunk);
683 written += chunk - unwritten;
1da177e4 684 if (unwritten)
557ed1fa 685 break;
1da177e4 686 buf += chunk;
557ed1fa 687 count -= chunk;
1da177e4
LT
688 cond_resched();
689 }
557ed1fa 690 return written ? written : -EFAULT;
1da177e4
LT
691}
692
693static int mmap_zero(struct file * file, struct vm_area_struct * vma)
694{
557ed1fa 695#ifndef CONFIG_MMU
1da177e4 696 return -ENOSYS;
557ed1fa
NP
697#endif
698 if (vma->vm_flags & VM_SHARED)
699 return shmem_zero_setup(vma);
700 return 0;
1da177e4 701}
1da177e4
LT
702
703static ssize_t write_full(struct file * file, const char __user * buf,
704 size_t count, loff_t *ppos)
705{
706 return -ENOSPC;
707}
708
709/*
710 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
711 * can fopen() both devices with "a" now. This was previously impossible.
712 * -- SRB.
713 */
714
715static loff_t null_lseek(struct file * file, loff_t offset, int orig)
716{
717 return file->f_pos = 0;
718}
719
720/*
721 * The memory devices use the full 32/64 bits of the offset, and so we cannot
722 * check against negative addresses: they are ok. The return value is weird,
723 * though, in that case (0).
724 *
725 * also note that seeking relative to the "end of file" isn't supported:
726 * it has no meaning, so it returns -EINVAL.
727 */
728static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
729{
730 loff_t ret;
731
a7113a96 732 mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
1da177e4
LT
733 switch (orig) {
734 case 0:
735 file->f_pos = offset;
736 ret = file->f_pos;
737 force_successful_syscall_return();
738 break;
739 case 1:
740 file->f_pos += offset;
741 ret = file->f_pos;
742 force_successful_syscall_return();
743 break;
744 default:
745 ret = -EINVAL;
746 }
a7113a96 747 mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
1da177e4
LT
748 return ret;
749}
750
751static int open_port(struct inode * inode, struct file * filp)
752{
753 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
754}
755
756#define zero_lseek null_lseek
757#define full_lseek null_lseek
758#define write_zero write_null
759#define read_full read_zero
760#define open_mem open_port
761#define open_kmem open_mem
50b1fdbd 762#define open_oldmem open_mem
1da177e4 763
62322d25 764static const struct file_operations mem_fops = {
1da177e4
LT
765 .llseek = memory_lseek,
766 .read = read_mem,
767 .write = write_mem,
768 .mmap = mmap_mem,
769 .open = open_mem,
5da6185b 770 .get_unmapped_area = get_unmapped_area_mem,
1da177e4
LT
771};
772
62322d25 773static const struct file_operations kmem_fops = {
1da177e4
LT
774 .llseek = memory_lseek,
775 .read = read_kmem,
776 .write = write_kmem,
777 .mmap = mmap_kmem,
778 .open = open_kmem,
5da6185b 779 .get_unmapped_area = get_unmapped_area_mem,
1da177e4
LT
780};
781
62322d25 782static const struct file_operations null_fops = {
1da177e4
LT
783 .llseek = null_lseek,
784 .read = read_null,
785 .write = write_null,
1ebd32fc 786 .splice_write = splice_write_null,
1da177e4
LT
787};
788
4f911d64 789#ifdef CONFIG_DEVPORT
62322d25 790static const struct file_operations port_fops = {
1da177e4
LT
791 .llseek = memory_lseek,
792 .read = read_port,
793 .write = write_port,
794 .open = open_port,
795};
796#endif
797
62322d25 798static const struct file_operations zero_fops = {
1da177e4
LT
799 .llseek = zero_lseek,
800 .read = read_zero,
801 .write = write_zero,
802 .mmap = mmap_zero,
803};
804
5da6185b
DH
805/*
806 * capabilities for /dev/zero
807 * - permits private mappings, "copies" are taken of the source of zeros
808 */
1da177e4
LT
809static struct backing_dev_info zero_bdi = {
810 .capabilities = BDI_CAP_MAP_COPY,
811};
812
62322d25 813static const struct file_operations full_fops = {
1da177e4
LT
814 .llseek = full_lseek,
815 .read = read_full,
816 .write = write_full,
817};
818
50b1fdbd 819#ifdef CONFIG_CRASH_DUMP
62322d25 820static const struct file_operations oldmem_fops = {
50b1fdbd
VG
821 .read = read_oldmem,
822 .open = open_oldmem,
823};
824#endif
825
1da177e4
LT
826static ssize_t kmsg_write(struct file * file, const char __user * buf,
827 size_t count, loff_t *ppos)
828{
829 char *tmp;
cd140a5c 830 ssize_t ret;
1da177e4
LT
831
832 tmp = kmalloc(count + 1, GFP_KERNEL);
833 if (tmp == NULL)
834 return -ENOMEM;
835 ret = -EFAULT;
836 if (!copy_from_user(tmp, buf, count)) {
837 tmp[count] = 0;
838 ret = printk("%s", tmp);
cd140a5c
GC
839 if (ret > count)
840 /* printk can add a prefix */
841 ret = count;
1da177e4
LT
842 }
843 kfree(tmp);
844 return ret;
845}
846
62322d25 847static const struct file_operations kmsg_fops = {
1da177e4
LT
848 .write = kmsg_write,
849};
850
851static int memory_open(struct inode * inode, struct file * filp)
852{
853 switch (iminor(inode)) {
854 case 1:
855 filp->f_op = &mem_fops;
5da6185b
DH
856 filp->f_mapping->backing_dev_info =
857 &directly_mappable_cdev_bdi;
1da177e4
LT
858 break;
859 case 2:
860 filp->f_op = &kmem_fops;
5da6185b
DH
861 filp->f_mapping->backing_dev_info =
862 &directly_mappable_cdev_bdi;
1da177e4
LT
863 break;
864 case 3:
865 filp->f_op = &null_fops;
866 break;
4f911d64 867#ifdef CONFIG_DEVPORT
1da177e4
LT
868 case 4:
869 filp->f_op = &port_fops;
870 break;
871#endif
872 case 5:
873 filp->f_mapping->backing_dev_info = &zero_bdi;
874 filp->f_op = &zero_fops;
875 break;
876 case 7:
877 filp->f_op = &full_fops;
878 break;
879 case 8:
880 filp->f_op = &random_fops;
881 break;
882 case 9:
883 filp->f_op = &urandom_fops;
884 break;
885 case 11:
886 filp->f_op = &kmsg_fops;
887 break;
50b1fdbd
VG
888#ifdef CONFIG_CRASH_DUMP
889 case 12:
890 filp->f_op = &oldmem_fops;
891 break;
892#endif
1da177e4
LT
893 default:
894 return -ENXIO;
895 }
896 if (filp->f_op && filp->f_op->open)
897 return filp->f_op->open(inode,filp);
898 return 0;
899}
900
62322d25 901static const struct file_operations memory_fops = {
1da177e4
LT
902 .open = memory_open, /* just a selector for the real open */
903};
904
905static const struct {
906 unsigned int minor;
907 char *name;
908 umode_t mode;
99ac48f5 909 const struct file_operations *fops;
1da177e4
LT
910} devlist[] = { /* list of minor devices */
911 {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
912 {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
913 {3, "null", S_IRUGO | S_IWUGO, &null_fops},
4f911d64 914#ifdef CONFIG_DEVPORT
1da177e4
LT
915 {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
916#endif
917 {5, "zero", S_IRUGO | S_IWUGO, &zero_fops},
918 {7, "full", S_IRUGO | S_IWUGO, &full_fops},
919 {8, "random", S_IRUGO | S_IWUSR, &random_fops},
920 {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops},
921 {11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops},
50b1fdbd
VG
922#ifdef CONFIG_CRASH_DUMP
923 {12,"oldmem", S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops},
924#endif
1da177e4
LT
925};
926
ca8eca68 927static struct class *mem_class;
1da177e4
LT
928
929static int __init chr_dev_init(void)
930{
931 int i;
e0bf68dd
PZ
932 int err;
933
934 err = bdi_init(&zero_bdi);
935 if (err)
936 return err;
1da177e4
LT
937
938 if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
939 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
940
ca8eca68 941 mem_class = class_create(THIS_MODULE, "mem");
7c69ef79 942 for (i = 0; i < ARRAY_SIZE(devlist); i++)
ebf644c4
GKH
943 device_create(mem_class, NULL,
944 MKDEV(MEM_MAJOR, devlist[i].minor),
945 devlist[i].name);
946
1da177e4
LT
947 return 0;
948}
949
950fs_initcall(chr_dev_init);
This page took 0.561379 seconds and 5 git commands to generate.