oprofile: introduce module_param oprofile.cpu_type
[deliverable/linux.git] / drivers / char / mem.c
1 /*
2 * linux/drivers/char/mem.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * Added devfs support.
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9 */
10
11 #include <linux/mm.h>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/crash_dump.h>
25 #include <linux/backing-dev.h>
26 #include <linux/bootmem.h>
27 #include <linux/splice.h>
28 #include <linux/pfn.h>
29 #include <linux/smp_lock.h>
30
31 #include <asm/uaccess.h>
32 #include <asm/io.h>
33
34 #ifdef CONFIG_IA64
35 # include <linux/efi.h>
36 #endif
37
38 /*
39 * Architectures vary in how they handle caching for addresses
40 * outside of main memory.
41 *
42 */
43 static inline int uncached_access(struct file *file, unsigned long addr)
44 {
45 #if defined(CONFIG_IA64)
46 /*
47 * On ia64, we ignore O_SYNC because we cannot tolerate memory attribute aliases.
48 */
49 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
50 #elif defined(CONFIG_MIPS)
51 {
52 extern int __uncached_access(struct file *file,
53 unsigned long addr);
54
55 return __uncached_access(file, addr);
56 }
57 #else
58 /*
59 * Accessing memory above the top the kernel knows about or through a file pointer
60 * that was marked O_SYNC will be done non-cached.
61 */
62 if (file->f_flags & O_SYNC)
63 return 1;
64 return addr >= __pa(high_memory);
65 #endif
66 }
67
68 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
69 static inline int valid_phys_addr_range(unsigned long addr, size_t count)
70 {
71 if (addr + count > __pa(high_memory))
72 return 0;
73
74 return 1;
75 }
76
77 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
78 {
79 return 1;
80 }
81 #endif
82
83 #ifdef CONFIG_STRICT_DEVMEM
84 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
85 {
86 u64 from = ((u64)pfn) << PAGE_SHIFT;
87 u64 to = from + size;
88 u64 cursor = from;
89
90 while (cursor < to) {
91 if (!devmem_is_allowed(pfn)) {
92 printk(KERN_INFO
93 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
94 current->comm, from, to);
95 return 0;
96 }
97 cursor += PAGE_SIZE;
98 pfn++;
99 }
100 return 1;
101 }
102 #else
103 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
104 {
105 return 1;
106 }
107 #endif
108
109 void __attribute__((weak)) unxlate_dev_mem_ptr(unsigned long phys, void *addr)
110 {
111 }
112
113 /*
114 * This funcion reads the *physical* memory. The f_pos points directly to the
115 * memory location.
116 */
117 static ssize_t read_mem(struct file * file, char __user * buf,
118 size_t count, loff_t *ppos)
119 {
120 unsigned long p = *ppos;
121 ssize_t read, sz;
122 char *ptr;
123
124 if (!valid_phys_addr_range(p, count))
125 return -EFAULT;
126 read = 0;
127 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
128 /* we don't have page 0 mapped on sparc and m68k.. */
129 if (p < PAGE_SIZE) {
130 sz = PAGE_SIZE - p;
131 if (sz > count)
132 sz = count;
133 if (sz > 0) {
134 if (clear_user(buf, sz))
135 return -EFAULT;
136 buf += sz;
137 p += sz;
138 count -= sz;
139 read += sz;
140 }
141 }
142 #endif
143
144 while (count > 0) {
145 /*
146 * Handle first page in case it's not aligned
147 */
148 if (-p & (PAGE_SIZE - 1))
149 sz = -p & (PAGE_SIZE - 1);
150 else
151 sz = PAGE_SIZE;
152
153 sz = min_t(unsigned long, sz, count);
154
155 if (!range_is_allowed(p >> PAGE_SHIFT, count))
156 return -EPERM;
157
158 /*
159 * On ia64 if a page has been mapped somewhere as
160 * uncached, then it must also be accessed uncached
161 * by the kernel or data corruption may occur
162 */
163 ptr = xlate_dev_mem_ptr(p);
164 if (!ptr)
165 return -EFAULT;
166
167 if (copy_to_user(buf, ptr, sz)) {
168 unxlate_dev_mem_ptr(p, ptr);
169 return -EFAULT;
170 }
171
172 unxlate_dev_mem_ptr(p, ptr);
173
174 buf += sz;
175 p += sz;
176 count -= sz;
177 read += sz;
178 }
179
180 *ppos += read;
181 return read;
182 }
183
184 static ssize_t write_mem(struct file * file, const char __user * buf,
185 size_t count, loff_t *ppos)
186 {
187 unsigned long p = *ppos;
188 ssize_t written, sz;
189 unsigned long copied;
190 void *ptr;
191
192 if (!valid_phys_addr_range(p, count))
193 return -EFAULT;
194
195 written = 0;
196
197 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
198 /* we don't have page 0 mapped on sparc and m68k.. */
199 if (p < PAGE_SIZE) {
200 unsigned long sz = PAGE_SIZE - p;
201 if (sz > count)
202 sz = count;
203 /* Hmm. Do something? */
204 buf += sz;
205 p += sz;
206 count -= sz;
207 written += sz;
208 }
209 #endif
210
211 while (count > 0) {
212 /*
213 * Handle first page in case it's not aligned
214 */
215 if (-p & (PAGE_SIZE - 1))
216 sz = -p & (PAGE_SIZE - 1);
217 else
218 sz = PAGE_SIZE;
219
220 sz = min_t(unsigned long, sz, count);
221
222 if (!range_is_allowed(p >> PAGE_SHIFT, sz))
223 return -EPERM;
224
225 /*
226 * On ia64 if a page has been mapped somewhere as
227 * uncached, then it must also be accessed uncached
228 * by the kernel or data corruption may occur
229 */
230 ptr = xlate_dev_mem_ptr(p);
231 if (!ptr) {
232 if (written)
233 break;
234 return -EFAULT;
235 }
236
237 copied = copy_from_user(ptr, buf, sz);
238 if (copied) {
239 written += sz - copied;
240 unxlate_dev_mem_ptr(p, ptr);
241 if (written)
242 break;
243 return -EFAULT;
244 }
245
246 unxlate_dev_mem_ptr(p, ptr);
247
248 buf += sz;
249 p += sz;
250 count -= sz;
251 written += sz;
252 }
253
254 *ppos += written;
255 return written;
256 }
257
258 int __attribute__((weak)) phys_mem_access_prot_allowed(struct file *file,
259 unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
260 {
261 return 1;
262 }
263
264 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
265 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
266 unsigned long size, pgprot_t vma_prot)
267 {
268 #ifdef pgprot_noncached
269 unsigned long offset = pfn << PAGE_SHIFT;
270
271 if (uncached_access(file, offset))
272 return pgprot_noncached(vma_prot);
273 #endif
274 return vma_prot;
275 }
276 #endif
277
278 #ifndef CONFIG_MMU
279 static unsigned long get_unmapped_area_mem(struct file *file,
280 unsigned long addr,
281 unsigned long len,
282 unsigned long pgoff,
283 unsigned long flags)
284 {
285 if (!valid_mmap_phys_addr_range(pgoff, len))
286 return (unsigned long) -EINVAL;
287 return pgoff << PAGE_SHIFT;
288 }
289
290 /* can't do an in-place private mapping if there's no MMU */
291 static inline int private_mapping_ok(struct vm_area_struct *vma)
292 {
293 return vma->vm_flags & VM_MAYSHARE;
294 }
295 #else
296 #define get_unmapped_area_mem NULL
297
298 static inline int private_mapping_ok(struct vm_area_struct *vma)
299 {
300 return 1;
301 }
302 #endif
303
304 static struct vm_operations_struct mmap_mem_ops = {
305 #ifdef CONFIG_HAVE_IOREMAP_PROT
306 .access = generic_access_phys
307 #endif
308 };
309
310 static int mmap_mem(struct file * file, struct vm_area_struct * vma)
311 {
312 size_t size = vma->vm_end - vma->vm_start;
313
314 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
315 return -EINVAL;
316
317 if (!private_mapping_ok(vma))
318 return -ENOSYS;
319
320 if (!range_is_allowed(vma->vm_pgoff, size))
321 return -EPERM;
322
323 if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
324 &vma->vm_page_prot))
325 return -EINVAL;
326
327 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
328 size,
329 vma->vm_page_prot);
330
331 vma->vm_ops = &mmap_mem_ops;
332
333 /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
334 if (remap_pfn_range(vma,
335 vma->vm_start,
336 vma->vm_pgoff,
337 size,
338 vma->vm_page_prot)) {
339 return -EAGAIN;
340 }
341 return 0;
342 }
343
344 #ifdef CONFIG_DEVKMEM
345 static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
346 {
347 unsigned long pfn;
348
349 /* Turn a kernel-virtual address into a physical page frame */
350 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
351
352 /*
353 * RED-PEN: on some architectures there is more mapped memory
354 * than available in mem_map which pfn_valid checks
355 * for. Perhaps should add a new macro here.
356 *
357 * RED-PEN: vmalloc is not supported right now.
358 */
359 if (!pfn_valid(pfn))
360 return -EIO;
361
362 vma->vm_pgoff = pfn;
363 return mmap_mem(file, vma);
364 }
365 #endif
366
367 #ifdef CONFIG_CRASH_DUMP
368 /*
369 * Read memory corresponding to the old kernel.
370 */
371 static ssize_t read_oldmem(struct file *file, char __user *buf,
372 size_t count, loff_t *ppos)
373 {
374 unsigned long pfn, offset;
375 size_t read = 0, csize;
376 int rc = 0;
377
378 while (count) {
379 pfn = *ppos / PAGE_SIZE;
380 if (pfn > saved_max_pfn)
381 return read;
382
383 offset = (unsigned long)(*ppos % PAGE_SIZE);
384 if (count > PAGE_SIZE - offset)
385 csize = PAGE_SIZE - offset;
386 else
387 csize = count;
388
389 rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
390 if (rc < 0)
391 return rc;
392 buf += csize;
393 *ppos += csize;
394 read += csize;
395 count -= csize;
396 }
397 return read;
398 }
399 #endif
400
401 #ifdef CONFIG_DEVKMEM
402 /*
403 * This function reads the *virtual* memory as seen by the kernel.
404 */
405 static ssize_t read_kmem(struct file *file, char __user *buf,
406 size_t count, loff_t *ppos)
407 {
408 unsigned long p = *ppos;
409 ssize_t low_count, read, sz;
410 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
411
412 read = 0;
413 if (p < (unsigned long) high_memory) {
414 low_count = count;
415 if (count > (unsigned long) high_memory - p)
416 low_count = (unsigned long) high_memory - p;
417
418 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
419 /* we don't have page 0 mapped on sparc and m68k.. */
420 if (p < PAGE_SIZE && low_count > 0) {
421 size_t tmp = PAGE_SIZE - p;
422 if (tmp > low_count) tmp = low_count;
423 if (clear_user(buf, tmp))
424 return -EFAULT;
425 buf += tmp;
426 p += tmp;
427 read += tmp;
428 low_count -= tmp;
429 count -= tmp;
430 }
431 #endif
432 while (low_count > 0) {
433 /*
434 * Handle first page in case it's not aligned
435 */
436 if (-p & (PAGE_SIZE - 1))
437 sz = -p & (PAGE_SIZE - 1);
438 else
439 sz = PAGE_SIZE;
440
441 sz = min_t(unsigned long, sz, low_count);
442
443 /*
444 * On ia64 if a page has been mapped somewhere as
445 * uncached, then it must also be accessed uncached
446 * by the kernel or data corruption may occur
447 */
448 kbuf = xlate_dev_kmem_ptr((char *)p);
449
450 if (copy_to_user(buf, kbuf, sz))
451 return -EFAULT;
452 buf += sz;
453 p += sz;
454 read += sz;
455 low_count -= sz;
456 count -= sz;
457 }
458 }
459
460 if (count > 0) {
461 kbuf = (char *)__get_free_page(GFP_KERNEL);
462 if (!kbuf)
463 return -ENOMEM;
464 while (count > 0) {
465 int len = count;
466
467 if (len > PAGE_SIZE)
468 len = PAGE_SIZE;
469 len = vread(kbuf, (char *)p, len);
470 if (!len)
471 break;
472 if (copy_to_user(buf, kbuf, len)) {
473 free_page((unsigned long)kbuf);
474 return -EFAULT;
475 }
476 count -= len;
477 buf += len;
478 read += len;
479 p += len;
480 }
481 free_page((unsigned long)kbuf);
482 }
483 *ppos = p;
484 return read;
485 }
486
487
488 static inline ssize_t
489 do_write_kmem(void *p, unsigned long realp, const char __user * buf,
490 size_t count, loff_t *ppos)
491 {
492 ssize_t written, sz;
493 unsigned long copied;
494
495 written = 0;
496 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
497 /* we don't have page 0 mapped on sparc and m68k.. */
498 if (realp < PAGE_SIZE) {
499 unsigned long sz = PAGE_SIZE - realp;
500 if (sz > count)
501 sz = count;
502 /* Hmm. Do something? */
503 buf += sz;
504 p += sz;
505 realp += sz;
506 count -= sz;
507 written += sz;
508 }
509 #endif
510
511 while (count > 0) {
512 char *ptr;
513 /*
514 * Handle first page in case it's not aligned
515 */
516 if (-realp & (PAGE_SIZE - 1))
517 sz = -realp & (PAGE_SIZE - 1);
518 else
519 sz = PAGE_SIZE;
520
521 sz = min_t(unsigned long, sz, count);
522
523 /*
524 * On ia64 if a page has been mapped somewhere as
525 * uncached, then it must also be accessed uncached
526 * by the kernel or data corruption may occur
527 */
528 ptr = xlate_dev_kmem_ptr(p);
529
530 copied = copy_from_user(ptr, buf, sz);
531 if (copied) {
532 written += sz - copied;
533 if (written)
534 break;
535 return -EFAULT;
536 }
537 buf += sz;
538 p += sz;
539 realp += sz;
540 count -= sz;
541 written += sz;
542 }
543
544 *ppos += written;
545 return written;
546 }
547
548
549 /*
550 * This function writes to the *virtual* memory as seen by the kernel.
551 */
552 static ssize_t write_kmem(struct file * file, const char __user * buf,
553 size_t count, loff_t *ppos)
554 {
555 unsigned long p = *ppos;
556 ssize_t wrote = 0;
557 ssize_t virtr = 0;
558 ssize_t written;
559 char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
560
561 if (p < (unsigned long) high_memory) {
562
563 wrote = count;
564 if (count > (unsigned long) high_memory - p)
565 wrote = (unsigned long) high_memory - p;
566
567 written = do_write_kmem((void*)p, p, buf, wrote, ppos);
568 if (written != wrote)
569 return written;
570 wrote = written;
571 p += wrote;
572 buf += wrote;
573 count -= wrote;
574 }
575
576 if (count > 0) {
577 kbuf = (char *)__get_free_page(GFP_KERNEL);
578 if (!kbuf)
579 return wrote ? wrote : -ENOMEM;
580 while (count > 0) {
581 int len = count;
582
583 if (len > PAGE_SIZE)
584 len = PAGE_SIZE;
585 if (len) {
586 written = copy_from_user(kbuf, buf, len);
587 if (written) {
588 if (wrote + virtr)
589 break;
590 free_page((unsigned long)kbuf);
591 return -EFAULT;
592 }
593 }
594 len = vwrite(kbuf, (char *)p, len);
595 count -= len;
596 buf += len;
597 virtr += len;
598 p += len;
599 }
600 free_page((unsigned long)kbuf);
601 }
602
603 *ppos = p;
604 return virtr + wrote;
605 }
606 #endif
607
608 #ifdef CONFIG_DEVPORT
609 static ssize_t read_port(struct file * file, char __user * buf,
610 size_t count, loff_t *ppos)
611 {
612 unsigned long i = *ppos;
613 char __user *tmp = buf;
614
615 if (!access_ok(VERIFY_WRITE, buf, count))
616 return -EFAULT;
617 while (count-- > 0 && i < 65536) {
618 if (__put_user(inb(i),tmp) < 0)
619 return -EFAULT;
620 i++;
621 tmp++;
622 }
623 *ppos = i;
624 return tmp-buf;
625 }
626
627 static ssize_t write_port(struct file * file, const char __user * buf,
628 size_t count, loff_t *ppos)
629 {
630 unsigned long i = *ppos;
631 const char __user * tmp = buf;
632
633 if (!access_ok(VERIFY_READ,buf,count))
634 return -EFAULT;
635 while (count-- > 0 && i < 65536) {
636 char c;
637 if (__get_user(c, tmp)) {
638 if (tmp > buf)
639 break;
640 return -EFAULT;
641 }
642 outb(c,i);
643 i++;
644 tmp++;
645 }
646 *ppos = i;
647 return tmp-buf;
648 }
649 #endif
650
651 static ssize_t read_null(struct file * file, char __user * buf,
652 size_t count, loff_t *ppos)
653 {
654 return 0;
655 }
656
657 static ssize_t write_null(struct file * file, const char __user * buf,
658 size_t count, loff_t *ppos)
659 {
660 return count;
661 }
662
663 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
664 struct splice_desc *sd)
665 {
666 return sd->len;
667 }
668
669 static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out,
670 loff_t *ppos, size_t len, unsigned int flags)
671 {
672 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
673 }
674
675 static ssize_t read_zero(struct file * file, char __user * buf,
676 size_t count, loff_t *ppos)
677 {
678 size_t written;
679
680 if (!count)
681 return 0;
682
683 if (!access_ok(VERIFY_WRITE, buf, count))
684 return -EFAULT;
685
686 written = 0;
687 while (count) {
688 unsigned long unwritten;
689 size_t chunk = count;
690
691 if (chunk > PAGE_SIZE)
692 chunk = PAGE_SIZE; /* Just for latency reasons */
693 unwritten = clear_user(buf, chunk);
694 written += chunk - unwritten;
695 if (unwritten)
696 break;
697 buf += chunk;
698 count -= chunk;
699 cond_resched();
700 }
701 return written ? written : -EFAULT;
702 }
703
704 static int mmap_zero(struct file * file, struct vm_area_struct * vma)
705 {
706 #ifndef CONFIG_MMU
707 return -ENOSYS;
708 #endif
709 if (vma->vm_flags & VM_SHARED)
710 return shmem_zero_setup(vma);
711 return 0;
712 }
713
714 static ssize_t write_full(struct file * file, const char __user * buf,
715 size_t count, loff_t *ppos)
716 {
717 return -ENOSPC;
718 }
719
720 /*
721 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
722 * can fopen() both devices with "a" now. This was previously impossible.
723 * -- SRB.
724 */
725
726 static loff_t null_lseek(struct file * file, loff_t offset, int orig)
727 {
728 return file->f_pos = 0;
729 }
730
731 /*
732 * The memory devices use the full 32/64 bits of the offset, and so we cannot
733 * check against negative addresses: they are ok. The return value is weird,
734 * though, in that case (0).
735 *
736 * also note that seeking relative to the "end of file" isn't supported:
737 * it has no meaning, so it returns -EINVAL.
738 */
739 static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
740 {
741 loff_t ret;
742
743 mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
744 switch (orig) {
745 case 0:
746 file->f_pos = offset;
747 ret = file->f_pos;
748 force_successful_syscall_return();
749 break;
750 case 1:
751 file->f_pos += offset;
752 ret = file->f_pos;
753 force_successful_syscall_return();
754 break;
755 default:
756 ret = -EINVAL;
757 }
758 mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
759 return ret;
760 }
761
762 static int open_port(struct inode * inode, struct file * filp)
763 {
764 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
765 }
766
767 #define zero_lseek null_lseek
768 #define full_lseek null_lseek
769 #define write_zero write_null
770 #define read_full read_zero
771 #define open_mem open_port
772 #define open_kmem open_mem
773 #define open_oldmem open_mem
774
775 static const struct file_operations mem_fops = {
776 .llseek = memory_lseek,
777 .read = read_mem,
778 .write = write_mem,
779 .mmap = mmap_mem,
780 .open = open_mem,
781 .get_unmapped_area = get_unmapped_area_mem,
782 };
783
784 #ifdef CONFIG_DEVKMEM
785 static const struct file_operations kmem_fops = {
786 .llseek = memory_lseek,
787 .read = read_kmem,
788 .write = write_kmem,
789 .mmap = mmap_kmem,
790 .open = open_kmem,
791 .get_unmapped_area = get_unmapped_area_mem,
792 };
793 #endif
794
795 static const struct file_operations null_fops = {
796 .llseek = null_lseek,
797 .read = read_null,
798 .write = write_null,
799 .splice_write = splice_write_null,
800 };
801
802 #ifdef CONFIG_DEVPORT
803 static const struct file_operations port_fops = {
804 .llseek = memory_lseek,
805 .read = read_port,
806 .write = write_port,
807 .open = open_port,
808 };
809 #endif
810
811 static const struct file_operations zero_fops = {
812 .llseek = zero_lseek,
813 .read = read_zero,
814 .write = write_zero,
815 .mmap = mmap_zero,
816 };
817
818 /*
819 * capabilities for /dev/zero
820 * - permits private mappings, "copies" are taken of the source of zeros
821 */
822 static struct backing_dev_info zero_bdi = {
823 .capabilities = BDI_CAP_MAP_COPY,
824 };
825
826 static const struct file_operations full_fops = {
827 .llseek = full_lseek,
828 .read = read_full,
829 .write = write_full,
830 };
831
832 #ifdef CONFIG_CRASH_DUMP
833 static const struct file_operations oldmem_fops = {
834 .read = read_oldmem,
835 .open = open_oldmem,
836 };
837 #endif
838
839 static ssize_t kmsg_write(struct file * file, const char __user * buf,
840 size_t count, loff_t *ppos)
841 {
842 char *tmp;
843 ssize_t ret;
844
845 tmp = kmalloc(count + 1, GFP_KERNEL);
846 if (tmp == NULL)
847 return -ENOMEM;
848 ret = -EFAULT;
849 if (!copy_from_user(tmp, buf, count)) {
850 tmp[count] = 0;
851 ret = printk("%s", tmp);
852 if (ret > count)
853 /* printk can add a prefix */
854 ret = count;
855 }
856 kfree(tmp);
857 return ret;
858 }
859
860 static const struct file_operations kmsg_fops = {
861 .write = kmsg_write,
862 };
863
864 static int memory_open(struct inode * inode, struct file * filp)
865 {
866 int ret = 0;
867
868 lock_kernel();
869 switch (iminor(inode)) {
870 case 1:
871 filp->f_op = &mem_fops;
872 filp->f_mapping->backing_dev_info =
873 &directly_mappable_cdev_bdi;
874 break;
875 #ifdef CONFIG_DEVKMEM
876 case 2:
877 filp->f_op = &kmem_fops;
878 filp->f_mapping->backing_dev_info =
879 &directly_mappable_cdev_bdi;
880 break;
881 #endif
882 case 3:
883 filp->f_op = &null_fops;
884 break;
885 #ifdef CONFIG_DEVPORT
886 case 4:
887 filp->f_op = &port_fops;
888 break;
889 #endif
890 case 5:
891 filp->f_mapping->backing_dev_info = &zero_bdi;
892 filp->f_op = &zero_fops;
893 break;
894 case 7:
895 filp->f_op = &full_fops;
896 break;
897 case 8:
898 filp->f_op = &random_fops;
899 break;
900 case 9:
901 filp->f_op = &urandom_fops;
902 break;
903 case 11:
904 filp->f_op = &kmsg_fops;
905 break;
906 #ifdef CONFIG_CRASH_DUMP
907 case 12:
908 filp->f_op = &oldmem_fops;
909 break;
910 #endif
911 default:
912 unlock_kernel();
913 return -ENXIO;
914 }
915 if (filp->f_op && filp->f_op->open)
916 ret = filp->f_op->open(inode,filp);
917 unlock_kernel();
918 return ret;
919 }
920
921 static const struct file_operations memory_fops = {
922 .open = memory_open, /* just a selector for the real open */
923 };
924
925 static const struct {
926 unsigned int minor;
927 char *name;
928 umode_t mode;
929 const struct file_operations *fops;
930 } devlist[] = { /* list of minor devices */
931 {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops},
932 #ifdef CONFIG_DEVKMEM
933 {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops},
934 #endif
935 {3, "null", S_IRUGO | S_IWUGO, &null_fops},
936 #ifdef CONFIG_DEVPORT
937 {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops},
938 #endif
939 {5, "zero", S_IRUGO | S_IWUGO, &zero_fops},
940 {7, "full", S_IRUGO | S_IWUGO, &full_fops},
941 {8, "random", S_IRUGO | S_IWUSR, &random_fops},
942 {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops},
943 {11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops},
944 #ifdef CONFIG_CRASH_DUMP
945 {12,"oldmem", S_IRUSR | S_IWUSR | S_IRGRP, &oldmem_fops},
946 #endif
947 };
948
949 static struct class *mem_class;
950
951 static int __init chr_dev_init(void)
952 {
953 int i;
954 int err;
955
956 err = bdi_init(&zero_bdi);
957 if (err)
958 return err;
959
960 if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
961 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
962
963 mem_class = class_create(THIS_MODULE, "mem");
964 for (i = 0; i < ARRAY_SIZE(devlist); i++)
965 device_create(mem_class, NULL,
966 MKDEV(MEM_MAJOR, devlist[i].minor), NULL,
967 devlist[i].name);
968
969 return 0;
970 }
971
972 fs_initcall(chr_dev_init);
This page took 0.062332 seconds and 5 git commands to generate.