Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[deliverable/linux.git] / drivers / char / mem.c
1 /*
2 * linux/drivers/char/mem.c
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 *
6 * Added devfs support.
7 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8 * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9 */
10
11 #include <linux/mm.h>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/crash_dump.h>
25 #include <linux/backing-dev.h>
26 #include <linux/bootmem.h>
27 #include <linux/splice.h>
28 #include <linux/pfn.h>
29
30 #include <asm/uaccess.h>
31 #include <asm/io.h>
32
33 #ifdef CONFIG_IA64
34 # include <linux/efi.h>
35 #endif
36
37 static inline unsigned long size_inside_page(unsigned long start,
38 unsigned long size)
39 {
40 unsigned long sz;
41
42 sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
43
44 return min(sz, size);
45 }
46
47 /*
48 * Architectures vary in how they handle caching for addresses
49 * outside of main memory.
50 *
51 */
52 static inline int uncached_access(struct file *file, unsigned long addr)
53 {
54 #if defined(CONFIG_IA64)
55 /*
56 * On ia64, we ignore O_DSYNC because we cannot tolerate memory attribute aliases.
57 */
58 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
59 #elif defined(CONFIG_MIPS)
60 {
61 extern int __uncached_access(struct file *file,
62 unsigned long addr);
63
64 return __uncached_access(file, addr);
65 }
66 #else
67 /*
68 * Accessing memory above the top the kernel knows about or through a file pointer
69 * that was marked O_DSYNC will be done non-cached.
70 */
71 if (file->f_flags & O_DSYNC)
72 return 1;
73 return addr >= __pa(high_memory);
74 #endif
75 }
76
77 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
78 static inline int valid_phys_addr_range(unsigned long addr, size_t count)
79 {
80 if (addr + count > __pa(high_memory))
81 return 0;
82
83 return 1;
84 }
85
86 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
87 {
88 return 1;
89 }
90 #endif
91
92 #ifdef CONFIG_STRICT_DEVMEM
93 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
94 {
95 u64 from = ((u64)pfn) << PAGE_SHIFT;
96 u64 to = from + size;
97 u64 cursor = from;
98
99 while (cursor < to) {
100 if (!devmem_is_allowed(pfn)) {
101 printk(KERN_INFO
102 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
103 current->comm, from, to);
104 return 0;
105 }
106 cursor += PAGE_SIZE;
107 pfn++;
108 }
109 return 1;
110 }
111 #else
112 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
113 {
114 return 1;
115 }
116 #endif
117
118 void __attribute__((weak)) unxlate_dev_mem_ptr(unsigned long phys, void *addr)
119 {
120 }
121
122 /*
123 * This funcion reads the *physical* memory. The f_pos points directly to the
124 * memory location.
125 */
126 static ssize_t read_mem(struct file * file, char __user * buf,
127 size_t count, loff_t *ppos)
128 {
129 unsigned long p = *ppos;
130 ssize_t read, sz;
131 char *ptr;
132
133 if (!valid_phys_addr_range(p, count))
134 return -EFAULT;
135 read = 0;
136 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
137 /* we don't have page 0 mapped on sparc and m68k.. */
138 if (p < PAGE_SIZE) {
139 sz = size_inside_page(p, count);
140 if (sz > 0) {
141 if (clear_user(buf, sz))
142 return -EFAULT;
143 buf += sz;
144 p += sz;
145 count -= sz;
146 read += sz;
147 }
148 }
149 #endif
150
151 while (count > 0) {
152 unsigned long remaining;
153
154 sz = size_inside_page(p, count);
155
156 if (!range_is_allowed(p >> PAGE_SHIFT, count))
157 return -EPERM;
158
159 /*
160 * On ia64 if a page has been mapped somewhere as
161 * uncached, then it must also be accessed uncached
162 * by the kernel or data corruption may occur
163 */
164 ptr = xlate_dev_mem_ptr(p);
165 if (!ptr)
166 return -EFAULT;
167
168 remaining = copy_to_user(buf, ptr, sz);
169 unxlate_dev_mem_ptr(p, ptr);
170 if (remaining)
171 return -EFAULT;
172
173 buf += sz;
174 p += sz;
175 count -= sz;
176 read += sz;
177 }
178
179 *ppos += read;
180 return read;
181 }
182
183 static ssize_t write_mem(struct file * file, const char __user * buf,
184 size_t count, loff_t *ppos)
185 {
186 unsigned long p = *ppos;
187 ssize_t written, sz;
188 unsigned long copied;
189 void *ptr;
190
191 if (!valid_phys_addr_range(p, count))
192 return -EFAULT;
193
194 written = 0;
195
196 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
197 /* we don't have page 0 mapped on sparc and m68k.. */
198 if (p < PAGE_SIZE) {
199 sz = size_inside_page(p, count);
200 /* Hmm. Do something? */
201 buf += sz;
202 p += sz;
203 count -= sz;
204 written += sz;
205 }
206 #endif
207
208 while (count > 0) {
209 sz = size_inside_page(p, count);
210
211 if (!range_is_allowed(p >> PAGE_SHIFT, sz))
212 return -EPERM;
213
214 /*
215 * On ia64 if a page has been mapped somewhere as
216 * uncached, then it must also be accessed uncached
217 * by the kernel or data corruption may occur
218 */
219 ptr = xlate_dev_mem_ptr(p);
220 if (!ptr) {
221 if (written)
222 break;
223 return -EFAULT;
224 }
225
226 copied = copy_from_user(ptr, buf, sz);
227 unxlate_dev_mem_ptr(p, ptr);
228 if (copied) {
229 written += sz - copied;
230 if (written)
231 break;
232 return -EFAULT;
233 }
234
235 buf += sz;
236 p += sz;
237 count -= sz;
238 written += sz;
239 }
240
241 *ppos += written;
242 return written;
243 }
244
245 int __attribute__((weak)) phys_mem_access_prot_allowed(struct file *file,
246 unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
247 {
248 return 1;
249 }
250
251 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
252 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
253 unsigned long size, pgprot_t vma_prot)
254 {
255 #ifdef pgprot_noncached
256 unsigned long offset = pfn << PAGE_SHIFT;
257
258 if (uncached_access(file, offset))
259 return pgprot_noncached(vma_prot);
260 #endif
261 return vma_prot;
262 }
263 #endif
264
265 #ifndef CONFIG_MMU
266 static unsigned long get_unmapped_area_mem(struct file *file,
267 unsigned long addr,
268 unsigned long len,
269 unsigned long pgoff,
270 unsigned long flags)
271 {
272 if (!valid_mmap_phys_addr_range(pgoff, len))
273 return (unsigned long) -EINVAL;
274 return pgoff << PAGE_SHIFT;
275 }
276
277 /* can't do an in-place private mapping if there's no MMU */
278 static inline int private_mapping_ok(struct vm_area_struct *vma)
279 {
280 return vma->vm_flags & VM_MAYSHARE;
281 }
282 #else
283 #define get_unmapped_area_mem NULL
284
285 static inline int private_mapping_ok(struct vm_area_struct *vma)
286 {
287 return 1;
288 }
289 #endif
290
291 static const struct vm_operations_struct mmap_mem_ops = {
292 #ifdef CONFIG_HAVE_IOREMAP_PROT
293 .access = generic_access_phys
294 #endif
295 };
296
297 static int mmap_mem(struct file * file, struct vm_area_struct * vma)
298 {
299 size_t size = vma->vm_end - vma->vm_start;
300
301 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
302 return -EINVAL;
303
304 if (!private_mapping_ok(vma))
305 return -ENOSYS;
306
307 if (!range_is_allowed(vma->vm_pgoff, size))
308 return -EPERM;
309
310 if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
311 &vma->vm_page_prot))
312 return -EINVAL;
313
314 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
315 size,
316 vma->vm_page_prot);
317
318 vma->vm_ops = &mmap_mem_ops;
319
320 /* Remap-pfn-range will mark the range VM_IO and VM_RESERVED */
321 if (remap_pfn_range(vma,
322 vma->vm_start,
323 vma->vm_pgoff,
324 size,
325 vma->vm_page_prot)) {
326 return -EAGAIN;
327 }
328 return 0;
329 }
330
331 #ifdef CONFIG_DEVKMEM
332 static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
333 {
334 unsigned long pfn;
335
336 /* Turn a kernel-virtual address into a physical page frame */
337 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
338
339 /*
340 * RED-PEN: on some architectures there is more mapped memory
341 * than available in mem_map which pfn_valid checks
342 * for. Perhaps should add a new macro here.
343 *
344 * RED-PEN: vmalloc is not supported right now.
345 */
346 if (!pfn_valid(pfn))
347 return -EIO;
348
349 vma->vm_pgoff = pfn;
350 return mmap_mem(file, vma);
351 }
352 #endif
353
354 #ifdef CONFIG_CRASH_DUMP
355 /*
356 * Read memory corresponding to the old kernel.
357 */
358 static ssize_t read_oldmem(struct file *file, char __user *buf,
359 size_t count, loff_t *ppos)
360 {
361 unsigned long pfn, offset;
362 size_t read = 0, csize;
363 int rc = 0;
364
365 while (count) {
366 pfn = *ppos / PAGE_SIZE;
367 if (pfn > saved_max_pfn)
368 return read;
369
370 offset = (unsigned long)(*ppos % PAGE_SIZE);
371 if (count > PAGE_SIZE - offset)
372 csize = PAGE_SIZE - offset;
373 else
374 csize = count;
375
376 rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
377 if (rc < 0)
378 return rc;
379 buf += csize;
380 *ppos += csize;
381 read += csize;
382 count -= csize;
383 }
384 return read;
385 }
386 #endif
387
388 #ifdef CONFIG_DEVKMEM
389 /*
390 * This function reads the *virtual* memory as seen by the kernel.
391 */
392 static ssize_t read_kmem(struct file *file, char __user *buf,
393 size_t count, loff_t *ppos)
394 {
395 unsigned long p = *ppos;
396 ssize_t low_count, read, sz;
397 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
398
399 read = 0;
400 if (p < (unsigned long) high_memory) {
401 low_count = count;
402 if (count > (unsigned long) high_memory - p)
403 low_count = (unsigned long) high_memory - p;
404
405 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
406 /* we don't have page 0 mapped on sparc and m68k.. */
407 if (p < PAGE_SIZE && low_count > 0) {
408 sz = size_inside_page(p, low_count);
409 if (clear_user(buf, sz))
410 return -EFAULT;
411 buf += sz;
412 p += sz;
413 read += sz;
414 low_count -= sz;
415 count -= sz;
416 }
417 #endif
418 while (low_count > 0) {
419 sz = size_inside_page(p, low_count);
420
421 /*
422 * On ia64 if a page has been mapped somewhere as
423 * uncached, then it must also be accessed uncached
424 * by the kernel or data corruption may occur
425 */
426 kbuf = xlate_dev_kmem_ptr((char *)p);
427
428 if (copy_to_user(buf, kbuf, sz))
429 return -EFAULT;
430 buf += sz;
431 p += sz;
432 read += sz;
433 low_count -= sz;
434 count -= sz;
435 }
436 }
437
438 if (count > 0) {
439 kbuf = (char *)__get_free_page(GFP_KERNEL);
440 if (!kbuf)
441 return -ENOMEM;
442 while (count > 0) {
443 sz = size_inside_page(p, count);
444 sz = vread(kbuf, (char *)p, sz);
445 if (!sz)
446 break;
447 if (copy_to_user(buf, kbuf, sz)) {
448 free_page((unsigned long)kbuf);
449 return -EFAULT;
450 }
451 count -= sz;
452 buf += sz;
453 read += sz;
454 p += sz;
455 }
456 free_page((unsigned long)kbuf);
457 }
458 *ppos = p;
459 return read;
460 }
461
462
463 static inline ssize_t
464 do_write_kmem(unsigned long p, const char __user *buf,
465 size_t count, loff_t *ppos)
466 {
467 ssize_t written, sz;
468 unsigned long copied;
469
470 written = 0;
471 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
472 /* we don't have page 0 mapped on sparc and m68k.. */
473 if (p < PAGE_SIZE) {
474 sz = size_inside_page(p, count);
475 /* Hmm. Do something? */
476 buf += sz;
477 p += sz;
478 count -= sz;
479 written += sz;
480 }
481 #endif
482
483 while (count > 0) {
484 char *ptr;
485
486 sz = size_inside_page(p, count);
487
488 /*
489 * On ia64 if a page has been mapped somewhere as
490 * uncached, then it must also be accessed uncached
491 * by the kernel or data corruption may occur
492 */
493 ptr = xlate_dev_kmem_ptr((char *)p);
494
495 copied = copy_from_user(ptr, buf, sz);
496 if (copied) {
497 written += sz - copied;
498 if (written)
499 break;
500 return -EFAULT;
501 }
502 buf += sz;
503 p += sz;
504 count -= sz;
505 written += sz;
506 }
507
508 *ppos += written;
509 return written;
510 }
511
512
513 /*
514 * This function writes to the *virtual* memory as seen by the kernel.
515 */
516 static ssize_t write_kmem(struct file * file, const char __user * buf,
517 size_t count, loff_t *ppos)
518 {
519 unsigned long p = *ppos;
520 ssize_t wrote = 0;
521 ssize_t virtr = 0;
522 char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
523
524 if (p < (unsigned long) high_memory) {
525 unsigned long to_write = min_t(unsigned long, count,
526 (unsigned long)high_memory - p);
527 wrote = do_write_kmem(p, buf, to_write, ppos);
528 if (wrote != to_write)
529 return wrote;
530 p += wrote;
531 buf += wrote;
532 count -= wrote;
533 }
534
535 if (count > 0) {
536 kbuf = (char *)__get_free_page(GFP_KERNEL);
537 if (!kbuf)
538 return wrote ? wrote : -ENOMEM;
539 while (count > 0) {
540 unsigned long sz = size_inside_page(p, count);
541 unsigned long n;
542
543 n = copy_from_user(kbuf, buf, sz);
544 if (n) {
545 if (wrote + virtr)
546 break;
547 free_page((unsigned long)kbuf);
548 return -EFAULT;
549 }
550 sz = vwrite(kbuf, (char *)p, sz);
551 count -= sz;
552 buf += sz;
553 virtr += sz;
554 p += sz;
555 }
556 free_page((unsigned long)kbuf);
557 }
558
559 *ppos = p;
560 return virtr + wrote;
561 }
562 #endif
563
564 #ifdef CONFIG_DEVPORT
565 static ssize_t read_port(struct file * file, char __user * buf,
566 size_t count, loff_t *ppos)
567 {
568 unsigned long i = *ppos;
569 char __user *tmp = buf;
570
571 if (!access_ok(VERIFY_WRITE, buf, count))
572 return -EFAULT;
573 while (count-- > 0 && i < 65536) {
574 if (__put_user(inb(i),tmp) < 0)
575 return -EFAULT;
576 i++;
577 tmp++;
578 }
579 *ppos = i;
580 return tmp-buf;
581 }
582
583 static ssize_t write_port(struct file * file, const char __user * buf,
584 size_t count, loff_t *ppos)
585 {
586 unsigned long i = *ppos;
587 const char __user * tmp = buf;
588
589 if (!access_ok(VERIFY_READ,buf,count))
590 return -EFAULT;
591 while (count-- > 0 && i < 65536) {
592 char c;
593 if (__get_user(c, tmp)) {
594 if (tmp > buf)
595 break;
596 return -EFAULT;
597 }
598 outb(c,i);
599 i++;
600 tmp++;
601 }
602 *ppos = i;
603 return tmp-buf;
604 }
605 #endif
606
607 static ssize_t read_null(struct file * file, char __user * buf,
608 size_t count, loff_t *ppos)
609 {
610 return 0;
611 }
612
613 static ssize_t write_null(struct file * file, const char __user * buf,
614 size_t count, loff_t *ppos)
615 {
616 return count;
617 }
618
619 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
620 struct splice_desc *sd)
621 {
622 return sd->len;
623 }
624
625 static ssize_t splice_write_null(struct pipe_inode_info *pipe,struct file *out,
626 loff_t *ppos, size_t len, unsigned int flags)
627 {
628 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
629 }
630
631 static ssize_t read_zero(struct file * file, char __user * buf,
632 size_t count, loff_t *ppos)
633 {
634 size_t written;
635
636 if (!count)
637 return 0;
638
639 if (!access_ok(VERIFY_WRITE, buf, count))
640 return -EFAULT;
641
642 written = 0;
643 while (count) {
644 unsigned long unwritten;
645 size_t chunk = count;
646
647 if (chunk > PAGE_SIZE)
648 chunk = PAGE_SIZE; /* Just for latency reasons */
649 unwritten = __clear_user(buf, chunk);
650 written += chunk - unwritten;
651 if (unwritten)
652 break;
653 if (signal_pending(current))
654 return written ? written : -ERESTARTSYS;
655 buf += chunk;
656 count -= chunk;
657 cond_resched();
658 }
659 return written ? written : -EFAULT;
660 }
661
662 static int mmap_zero(struct file * file, struct vm_area_struct * vma)
663 {
664 #ifndef CONFIG_MMU
665 return -ENOSYS;
666 #endif
667 if (vma->vm_flags & VM_SHARED)
668 return shmem_zero_setup(vma);
669 return 0;
670 }
671
672 static ssize_t write_full(struct file * file, const char __user * buf,
673 size_t count, loff_t *ppos)
674 {
675 return -ENOSPC;
676 }
677
678 /*
679 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
680 * can fopen() both devices with "a" now. This was previously impossible.
681 * -- SRB.
682 */
683
684 static loff_t null_lseek(struct file * file, loff_t offset, int orig)
685 {
686 return file->f_pos = 0;
687 }
688
689 /*
690 * The memory devices use the full 32/64 bits of the offset, and so we cannot
691 * check against negative addresses: they are ok. The return value is weird,
692 * though, in that case (0).
693 *
694 * also note that seeking relative to the "end of file" isn't supported:
695 * it has no meaning, so it returns -EINVAL.
696 */
697 static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
698 {
699 loff_t ret;
700
701 mutex_lock(&file->f_path.dentry->d_inode->i_mutex);
702 switch (orig) {
703 case 0:
704 file->f_pos = offset;
705 ret = file->f_pos;
706 force_successful_syscall_return();
707 break;
708 case 1:
709 file->f_pos += offset;
710 ret = file->f_pos;
711 force_successful_syscall_return();
712 break;
713 default:
714 ret = -EINVAL;
715 }
716 mutex_unlock(&file->f_path.dentry->d_inode->i_mutex);
717 return ret;
718 }
719
720 static int open_port(struct inode * inode, struct file * filp)
721 {
722 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
723 }
724
725 #define zero_lseek null_lseek
726 #define full_lseek null_lseek
727 #define write_zero write_null
728 #define read_full read_zero
729 #define open_mem open_port
730 #define open_kmem open_mem
731 #define open_oldmem open_mem
732
733 static const struct file_operations mem_fops = {
734 .llseek = memory_lseek,
735 .read = read_mem,
736 .write = write_mem,
737 .mmap = mmap_mem,
738 .open = open_mem,
739 .get_unmapped_area = get_unmapped_area_mem,
740 };
741
742 #ifdef CONFIG_DEVKMEM
743 static const struct file_operations kmem_fops = {
744 .llseek = memory_lseek,
745 .read = read_kmem,
746 .write = write_kmem,
747 .mmap = mmap_kmem,
748 .open = open_kmem,
749 .get_unmapped_area = get_unmapped_area_mem,
750 };
751 #endif
752
753 static const struct file_operations null_fops = {
754 .llseek = null_lseek,
755 .read = read_null,
756 .write = write_null,
757 .splice_write = splice_write_null,
758 };
759
760 #ifdef CONFIG_DEVPORT
761 static const struct file_operations port_fops = {
762 .llseek = memory_lseek,
763 .read = read_port,
764 .write = write_port,
765 .open = open_port,
766 };
767 #endif
768
769 static const struct file_operations zero_fops = {
770 .llseek = zero_lseek,
771 .read = read_zero,
772 .write = write_zero,
773 .mmap = mmap_zero,
774 };
775
776 /*
777 * capabilities for /dev/zero
778 * - permits private mappings, "copies" are taken of the source of zeros
779 */
780 static struct backing_dev_info zero_bdi = {
781 .name = "char/mem",
782 .capabilities = BDI_CAP_MAP_COPY,
783 };
784
785 static const struct file_operations full_fops = {
786 .llseek = full_lseek,
787 .read = read_full,
788 .write = write_full,
789 };
790
791 #ifdef CONFIG_CRASH_DUMP
792 static const struct file_operations oldmem_fops = {
793 .read = read_oldmem,
794 .open = open_oldmem,
795 };
796 #endif
797
798 static ssize_t kmsg_write(struct file * file, const char __user * buf,
799 size_t count, loff_t *ppos)
800 {
801 char *tmp;
802 ssize_t ret;
803
804 tmp = kmalloc(count + 1, GFP_KERNEL);
805 if (tmp == NULL)
806 return -ENOMEM;
807 ret = -EFAULT;
808 if (!copy_from_user(tmp, buf, count)) {
809 tmp[count] = 0;
810 ret = printk("%s", tmp);
811 if (ret > count)
812 /* printk can add a prefix */
813 ret = count;
814 }
815 kfree(tmp);
816 return ret;
817 }
818
819 static const struct file_operations kmsg_fops = {
820 .write = kmsg_write,
821 };
822
823 static const struct memdev {
824 const char *name;
825 mode_t mode;
826 const struct file_operations *fops;
827 struct backing_dev_info *dev_info;
828 } devlist[] = {
829 [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
830 #ifdef CONFIG_DEVKMEM
831 [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
832 #endif
833 [3] = { "null", 0666, &null_fops, NULL },
834 #ifdef CONFIG_DEVPORT
835 [4] = { "port", 0, &port_fops, NULL },
836 #endif
837 [5] = { "zero", 0666, &zero_fops, &zero_bdi },
838 [7] = { "full", 0666, &full_fops, NULL },
839 [8] = { "random", 0666, &random_fops, NULL },
840 [9] = { "urandom", 0666, &urandom_fops, NULL },
841 [11] = { "kmsg", 0, &kmsg_fops, NULL },
842 #ifdef CONFIG_CRASH_DUMP
843 [12] = { "oldmem", 0, &oldmem_fops, NULL },
844 #endif
845 };
846
847 static int memory_open(struct inode *inode, struct file *filp)
848 {
849 int minor;
850 const struct memdev *dev;
851
852 minor = iminor(inode);
853 if (minor >= ARRAY_SIZE(devlist))
854 return -ENXIO;
855
856 dev = &devlist[minor];
857 if (!dev->fops)
858 return -ENXIO;
859
860 filp->f_op = dev->fops;
861 if (dev->dev_info)
862 filp->f_mapping->backing_dev_info = dev->dev_info;
863
864 if (dev->fops->open)
865 return dev->fops->open(inode, filp);
866
867 return 0;
868 }
869
870 static const struct file_operations memory_fops = {
871 .open = memory_open,
872 };
873
874 static char *mem_devnode(struct device *dev, mode_t *mode)
875 {
876 if (mode && devlist[MINOR(dev->devt)].mode)
877 *mode = devlist[MINOR(dev->devt)].mode;
878 return NULL;
879 }
880
881 static struct class *mem_class;
882
883 static int __init chr_dev_init(void)
884 {
885 int minor;
886 int err;
887
888 err = bdi_init(&zero_bdi);
889 if (err)
890 return err;
891
892 if (register_chrdev(MEM_MAJOR,"mem",&memory_fops))
893 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
894
895 mem_class = class_create(THIS_MODULE, "mem");
896 mem_class->devnode = mem_devnode;
897 for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
898 if (!devlist[minor].name)
899 continue;
900 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
901 NULL, devlist[minor].name);
902 }
903
904 return 0;
905 }
906
907 fs_initcall(chr_dev_init);
This page took 0.057127 seconds and 6 git commands to generate.