| 1 | /* |
| 2 | * linux/drivers/char/mem.c |
| 3 | * |
| 4 | * Copyright (C) 1991, 1992 Linus Torvalds |
| 5 | * |
| 6 | * Added devfs support. |
| 7 | * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu> |
| 8 | * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com> |
| 9 | */ |
| 10 | |
| 11 | #include <linux/mm.h> |
| 12 | #include <linux/miscdevice.h> |
| 13 | #include <linux/slab.h> |
| 14 | #include <linux/vmalloc.h> |
| 15 | #include <linux/mman.h> |
| 16 | #include <linux/random.h> |
| 17 | #include <linux/init.h> |
| 18 | #include <linux/raw.h> |
| 19 | #include <linux/tty.h> |
| 20 | #include <linux/capability.h> |
| 21 | #include <linux/ptrace.h> |
| 22 | #include <linux/device.h> |
| 23 | #include <linux/highmem.h> |
| 24 | #include <linux/backing-dev.h> |
| 25 | #include <linux/splice.h> |
| 26 | #include <linux/pfn.h> |
| 27 | #include <linux/export.h> |
| 28 | #include <linux/io.h> |
| 29 | #include <linux/uio.h> |
| 30 | |
| 31 | #include <linux/uaccess.h> |
| 32 | |
| 33 | #ifdef CONFIG_IA64 |
| 34 | # include <linux/efi.h> |
| 35 | #endif |
| 36 | |
| 37 | #define DEVPORT_MINOR 4 |
| 38 | |
| 39 | static inline unsigned long size_inside_page(unsigned long start, |
| 40 | unsigned long size) |
| 41 | { |
| 42 | unsigned long sz; |
| 43 | |
| 44 | sz = PAGE_SIZE - (start & (PAGE_SIZE - 1)); |
| 45 | |
| 46 | return min(sz, size); |
| 47 | } |
| 48 | |
| 49 | #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE |
| 50 | static inline int valid_phys_addr_range(phys_addr_t addr, size_t count) |
| 51 | { |
| 52 | return addr + count <= __pa(high_memory); |
| 53 | } |
| 54 | |
| 55 | static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) |
| 56 | { |
| 57 | return 1; |
| 58 | } |
| 59 | #endif |
| 60 | |
| 61 | #ifdef CONFIG_STRICT_DEVMEM |
| 62 | static inline int range_is_allowed(unsigned long pfn, unsigned long size) |
| 63 | { |
| 64 | u64 from = ((u64)pfn) << PAGE_SHIFT; |
| 65 | u64 to = from + size; |
| 66 | u64 cursor = from; |
| 67 | |
| 68 | while (cursor < to) { |
| 69 | if (!devmem_is_allowed(pfn)) { |
| 70 | printk(KERN_INFO |
| 71 | "Program %s tried to access /dev/mem between %Lx->%Lx.\n", |
| 72 | current->comm, from, to); |
| 73 | return 0; |
| 74 | } |
| 75 | cursor += PAGE_SIZE; |
| 76 | pfn++; |
| 77 | } |
| 78 | return 1; |
| 79 | } |
| 80 | #else |
| 81 | static inline int range_is_allowed(unsigned long pfn, unsigned long size) |
| 82 | { |
| 83 | return 1; |
| 84 | } |
| 85 | #endif |
| 86 | |
| 87 | #ifndef unxlate_dev_mem_ptr |
| 88 | #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr |
| 89 | void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr) |
| 90 | { |
| 91 | } |
| 92 | #endif |
| 93 | |
| 94 | /* |
| 95 | * This funcion reads the *physical* memory. The f_pos points directly to the |
| 96 | * memory location. |
| 97 | */ |
| 98 | static ssize_t read_mem(struct file *file, char __user *buf, |
| 99 | size_t count, loff_t *ppos) |
| 100 | { |
| 101 | phys_addr_t p = *ppos; |
| 102 | ssize_t read, sz; |
| 103 | void *ptr; |
| 104 | |
| 105 | if (p != *ppos) |
| 106 | return 0; |
| 107 | |
| 108 | if (!valid_phys_addr_range(p, count)) |
| 109 | return -EFAULT; |
| 110 | read = 0; |
| 111 | #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED |
| 112 | /* we don't have page 0 mapped on sparc and m68k.. */ |
| 113 | if (p < PAGE_SIZE) { |
| 114 | sz = size_inside_page(p, count); |
| 115 | if (sz > 0) { |
| 116 | if (clear_user(buf, sz)) |
| 117 | return -EFAULT; |
| 118 | buf += sz; |
| 119 | p += sz; |
| 120 | count -= sz; |
| 121 | read += sz; |
| 122 | } |
| 123 | } |
| 124 | #endif |
| 125 | |
| 126 | while (count > 0) { |
| 127 | unsigned long remaining; |
| 128 | |
| 129 | sz = size_inside_page(p, count); |
| 130 | |
| 131 | if (!range_is_allowed(p >> PAGE_SHIFT, count)) |
| 132 | return -EPERM; |
| 133 | |
| 134 | /* |
| 135 | * On ia64 if a page has been mapped somewhere as uncached, then |
| 136 | * it must also be accessed uncached by the kernel or data |
| 137 | * corruption may occur. |
| 138 | */ |
| 139 | ptr = xlate_dev_mem_ptr(p); |
| 140 | if (!ptr) |
| 141 | return -EFAULT; |
| 142 | |
| 143 | remaining = copy_to_user(buf, ptr, sz); |
| 144 | unxlate_dev_mem_ptr(p, ptr); |
| 145 | if (remaining) |
| 146 | return -EFAULT; |
| 147 | |
| 148 | buf += sz; |
| 149 | p += sz; |
| 150 | count -= sz; |
| 151 | read += sz; |
| 152 | } |
| 153 | |
| 154 | *ppos += read; |
| 155 | return read; |
| 156 | } |
| 157 | |
| 158 | static ssize_t write_mem(struct file *file, const char __user *buf, |
| 159 | size_t count, loff_t *ppos) |
| 160 | { |
| 161 | phys_addr_t p = *ppos; |
| 162 | ssize_t written, sz; |
| 163 | unsigned long copied; |
| 164 | void *ptr; |
| 165 | |
| 166 | if (p != *ppos) |
| 167 | return -EFBIG; |
| 168 | |
| 169 | if (!valid_phys_addr_range(p, count)) |
| 170 | return -EFAULT; |
| 171 | |
| 172 | written = 0; |
| 173 | |
| 174 | #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED |
| 175 | /* we don't have page 0 mapped on sparc and m68k.. */ |
| 176 | if (p < PAGE_SIZE) { |
| 177 | sz = size_inside_page(p, count); |
| 178 | /* Hmm. Do something? */ |
| 179 | buf += sz; |
| 180 | p += sz; |
| 181 | count -= sz; |
| 182 | written += sz; |
| 183 | } |
| 184 | #endif |
| 185 | |
| 186 | while (count > 0) { |
| 187 | sz = size_inside_page(p, count); |
| 188 | |
| 189 | if (!range_is_allowed(p >> PAGE_SHIFT, sz)) |
| 190 | return -EPERM; |
| 191 | |
| 192 | /* |
| 193 | * On ia64 if a page has been mapped somewhere as uncached, then |
| 194 | * it must also be accessed uncached by the kernel or data |
| 195 | * corruption may occur. |
| 196 | */ |
| 197 | ptr = xlate_dev_mem_ptr(p); |
| 198 | if (!ptr) { |
| 199 | if (written) |
| 200 | break; |
| 201 | return -EFAULT; |
| 202 | } |
| 203 | |
| 204 | copied = copy_from_user(ptr, buf, sz); |
| 205 | unxlate_dev_mem_ptr(p, ptr); |
| 206 | if (copied) { |
| 207 | written += sz - copied; |
| 208 | if (written) |
| 209 | break; |
| 210 | return -EFAULT; |
| 211 | } |
| 212 | |
| 213 | buf += sz; |
| 214 | p += sz; |
| 215 | count -= sz; |
| 216 | written += sz; |
| 217 | } |
| 218 | |
| 219 | *ppos += written; |
| 220 | return written; |
| 221 | } |
| 222 | |
| 223 | int __weak phys_mem_access_prot_allowed(struct file *file, |
| 224 | unsigned long pfn, unsigned long size, pgprot_t *vma_prot) |
| 225 | { |
| 226 | return 1; |
| 227 | } |
| 228 | |
| 229 | #ifndef __HAVE_PHYS_MEM_ACCESS_PROT |
| 230 | |
| 231 | /* |
| 232 | * Architectures vary in how they handle caching for addresses |
| 233 | * outside of main memory. |
| 234 | * |
| 235 | */ |
| 236 | #ifdef pgprot_noncached |
| 237 | static int uncached_access(struct file *file, phys_addr_t addr) |
| 238 | { |
| 239 | #if defined(CONFIG_IA64) |
| 240 | /* |
| 241 | * On ia64, we ignore O_DSYNC because we cannot tolerate memory |
| 242 | * attribute aliases. |
| 243 | */ |
| 244 | return !(efi_mem_attributes(addr) & EFI_MEMORY_WB); |
| 245 | #elif defined(CONFIG_MIPS) |
| 246 | { |
| 247 | extern int __uncached_access(struct file *file, |
| 248 | unsigned long addr); |
| 249 | |
| 250 | return __uncached_access(file, addr); |
| 251 | } |
| 252 | #else |
| 253 | /* |
| 254 | * Accessing memory above the top the kernel knows about or through a |
| 255 | * file pointer |
| 256 | * that was marked O_DSYNC will be done non-cached. |
| 257 | */ |
| 258 | if (file->f_flags & O_DSYNC) |
| 259 | return 1; |
| 260 | return addr >= __pa(high_memory); |
| 261 | #endif |
| 262 | } |
| 263 | #endif |
| 264 | |
| 265 | static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
| 266 | unsigned long size, pgprot_t vma_prot) |
| 267 | { |
| 268 | #ifdef pgprot_noncached |
| 269 | phys_addr_t offset = pfn << PAGE_SHIFT; |
| 270 | |
| 271 | if (uncached_access(file, offset)) |
| 272 | return pgprot_noncached(vma_prot); |
| 273 | #endif |
| 274 | return vma_prot; |
| 275 | } |
| 276 | #endif |
| 277 | |
| 278 | #ifndef CONFIG_MMU |
| 279 | static unsigned long get_unmapped_area_mem(struct file *file, |
| 280 | unsigned long addr, |
| 281 | unsigned long len, |
| 282 | unsigned long pgoff, |
| 283 | unsigned long flags) |
| 284 | { |
| 285 | if (!valid_mmap_phys_addr_range(pgoff, len)) |
| 286 | return (unsigned long) -EINVAL; |
| 287 | return pgoff << PAGE_SHIFT; |
| 288 | } |
| 289 | |
| 290 | /* permit direct mmap, for read, write or exec */ |
| 291 | static unsigned memory_mmap_capabilities(struct file *file) |
| 292 | { |
| 293 | return NOMMU_MAP_DIRECT | |
| 294 | NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC; |
| 295 | } |
| 296 | |
| 297 | static unsigned zero_mmap_capabilities(struct file *file) |
| 298 | { |
| 299 | return NOMMU_MAP_COPY; |
| 300 | } |
| 301 | |
| 302 | /* can't do an in-place private mapping if there's no MMU */ |
| 303 | static inline int private_mapping_ok(struct vm_area_struct *vma) |
| 304 | { |
| 305 | return vma->vm_flags & VM_MAYSHARE; |
| 306 | } |
| 307 | #else |
| 308 | |
| 309 | static inline int private_mapping_ok(struct vm_area_struct *vma) |
| 310 | { |
| 311 | return 1; |
| 312 | } |
| 313 | #endif |
| 314 | |
| 315 | static const struct vm_operations_struct mmap_mem_ops = { |
| 316 | #ifdef CONFIG_HAVE_IOREMAP_PROT |
| 317 | .access = generic_access_phys |
| 318 | #endif |
| 319 | }; |
| 320 | |
| 321 | static int mmap_mem(struct file *file, struct vm_area_struct *vma) |
| 322 | { |
| 323 | size_t size = vma->vm_end - vma->vm_start; |
| 324 | |
| 325 | if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size)) |
| 326 | return -EINVAL; |
| 327 | |
| 328 | if (!private_mapping_ok(vma)) |
| 329 | return -ENOSYS; |
| 330 | |
| 331 | if (!range_is_allowed(vma->vm_pgoff, size)) |
| 332 | return -EPERM; |
| 333 | |
| 334 | if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size, |
| 335 | &vma->vm_page_prot)) |
| 336 | return -EINVAL; |
| 337 | |
| 338 | vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff, |
| 339 | size, |
| 340 | vma->vm_page_prot); |
| 341 | |
| 342 | vma->vm_ops = &mmap_mem_ops; |
| 343 | |
| 344 | /* Remap-pfn-range will mark the range VM_IO */ |
| 345 | if (remap_pfn_range(vma, |
| 346 | vma->vm_start, |
| 347 | vma->vm_pgoff, |
| 348 | size, |
| 349 | vma->vm_page_prot)) { |
| 350 | return -EAGAIN; |
| 351 | } |
| 352 | return 0; |
| 353 | } |
| 354 | |
| 355 | static int mmap_kmem(struct file *file, struct vm_area_struct *vma) |
| 356 | { |
| 357 | unsigned long pfn; |
| 358 | |
| 359 | /* Turn a kernel-virtual address into a physical page frame */ |
| 360 | pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT; |
| 361 | |
| 362 | /* |
| 363 | * RED-PEN: on some architectures there is more mapped memory than |
| 364 | * available in mem_map which pfn_valid checks for. Perhaps should add a |
| 365 | * new macro here. |
| 366 | * |
| 367 | * RED-PEN: vmalloc is not supported right now. |
| 368 | */ |
| 369 | if (!pfn_valid(pfn)) |
| 370 | return -EIO; |
| 371 | |
| 372 | vma->vm_pgoff = pfn; |
| 373 | return mmap_mem(file, vma); |
| 374 | } |
| 375 | |
| 376 | /* |
| 377 | * This function reads the *virtual* memory as seen by the kernel. |
| 378 | */ |
| 379 | static ssize_t read_kmem(struct file *file, char __user *buf, |
| 380 | size_t count, loff_t *ppos) |
| 381 | { |
| 382 | unsigned long p = *ppos; |
| 383 | ssize_t low_count, read, sz; |
| 384 | char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */ |
| 385 | int err = 0; |
| 386 | |
| 387 | read = 0; |
| 388 | if (p < (unsigned long) high_memory) { |
| 389 | low_count = count; |
| 390 | if (count > (unsigned long)high_memory - p) |
| 391 | low_count = (unsigned long)high_memory - p; |
| 392 | |
| 393 | #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED |
| 394 | /* we don't have page 0 mapped on sparc and m68k.. */ |
| 395 | if (p < PAGE_SIZE && low_count > 0) { |
| 396 | sz = size_inside_page(p, low_count); |
| 397 | if (clear_user(buf, sz)) |
| 398 | return -EFAULT; |
| 399 | buf += sz; |
| 400 | p += sz; |
| 401 | read += sz; |
| 402 | low_count -= sz; |
| 403 | count -= sz; |
| 404 | } |
| 405 | #endif |
| 406 | while (low_count > 0) { |
| 407 | sz = size_inside_page(p, low_count); |
| 408 | |
| 409 | /* |
| 410 | * On ia64 if a page has been mapped somewhere as |
| 411 | * uncached, then it must also be accessed uncached |
| 412 | * by the kernel or data corruption may occur |
| 413 | */ |
| 414 | kbuf = xlate_dev_kmem_ptr((void *)p); |
| 415 | |
| 416 | if (copy_to_user(buf, kbuf, sz)) |
| 417 | return -EFAULT; |
| 418 | buf += sz; |
| 419 | p += sz; |
| 420 | read += sz; |
| 421 | low_count -= sz; |
| 422 | count -= sz; |
| 423 | } |
| 424 | } |
| 425 | |
| 426 | if (count > 0) { |
| 427 | kbuf = (char *)__get_free_page(GFP_KERNEL); |
| 428 | if (!kbuf) |
| 429 | return -ENOMEM; |
| 430 | while (count > 0) { |
| 431 | sz = size_inside_page(p, count); |
| 432 | if (!is_vmalloc_or_module_addr((void *)p)) { |
| 433 | err = -ENXIO; |
| 434 | break; |
| 435 | } |
| 436 | sz = vread(kbuf, (char *)p, sz); |
| 437 | if (!sz) |
| 438 | break; |
| 439 | if (copy_to_user(buf, kbuf, sz)) { |
| 440 | err = -EFAULT; |
| 441 | break; |
| 442 | } |
| 443 | count -= sz; |
| 444 | buf += sz; |
| 445 | read += sz; |
| 446 | p += sz; |
| 447 | } |
| 448 | free_page((unsigned long)kbuf); |
| 449 | } |
| 450 | *ppos = p; |
| 451 | return read ? read : err; |
| 452 | } |
| 453 | |
| 454 | |
| 455 | static ssize_t do_write_kmem(unsigned long p, const char __user *buf, |
| 456 | size_t count, loff_t *ppos) |
| 457 | { |
| 458 | ssize_t written, sz; |
| 459 | unsigned long copied; |
| 460 | |
| 461 | written = 0; |
| 462 | #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED |
| 463 | /* we don't have page 0 mapped on sparc and m68k.. */ |
| 464 | if (p < PAGE_SIZE) { |
| 465 | sz = size_inside_page(p, count); |
| 466 | /* Hmm. Do something? */ |
| 467 | buf += sz; |
| 468 | p += sz; |
| 469 | count -= sz; |
| 470 | written += sz; |
| 471 | } |
| 472 | #endif |
| 473 | |
| 474 | while (count > 0) { |
| 475 | void *ptr; |
| 476 | |
| 477 | sz = size_inside_page(p, count); |
| 478 | |
| 479 | /* |
| 480 | * On ia64 if a page has been mapped somewhere as uncached, then |
| 481 | * it must also be accessed uncached by the kernel or data |
| 482 | * corruption may occur. |
| 483 | */ |
| 484 | ptr = xlate_dev_kmem_ptr((void *)p); |
| 485 | |
| 486 | copied = copy_from_user(ptr, buf, sz); |
| 487 | if (copied) { |
| 488 | written += sz - copied; |
| 489 | if (written) |
| 490 | break; |
| 491 | return -EFAULT; |
| 492 | } |
| 493 | buf += sz; |
| 494 | p += sz; |
| 495 | count -= sz; |
| 496 | written += sz; |
| 497 | } |
| 498 | |
| 499 | *ppos += written; |
| 500 | return written; |
| 501 | } |
| 502 | |
| 503 | /* |
| 504 | * This function writes to the *virtual* memory as seen by the kernel. |
| 505 | */ |
| 506 | static ssize_t write_kmem(struct file *file, const char __user *buf, |
| 507 | size_t count, loff_t *ppos) |
| 508 | { |
| 509 | unsigned long p = *ppos; |
| 510 | ssize_t wrote = 0; |
| 511 | ssize_t virtr = 0; |
| 512 | char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */ |
| 513 | int err = 0; |
| 514 | |
| 515 | if (p < (unsigned long) high_memory) { |
| 516 | unsigned long to_write = min_t(unsigned long, count, |
| 517 | (unsigned long)high_memory - p); |
| 518 | wrote = do_write_kmem(p, buf, to_write, ppos); |
| 519 | if (wrote != to_write) |
| 520 | return wrote; |
| 521 | p += wrote; |
| 522 | buf += wrote; |
| 523 | count -= wrote; |
| 524 | } |
| 525 | |
| 526 | if (count > 0) { |
| 527 | kbuf = (char *)__get_free_page(GFP_KERNEL); |
| 528 | if (!kbuf) |
| 529 | return wrote ? wrote : -ENOMEM; |
| 530 | while (count > 0) { |
| 531 | unsigned long sz = size_inside_page(p, count); |
| 532 | unsigned long n; |
| 533 | |
| 534 | if (!is_vmalloc_or_module_addr((void *)p)) { |
| 535 | err = -ENXIO; |
| 536 | break; |
| 537 | } |
| 538 | n = copy_from_user(kbuf, buf, sz); |
| 539 | if (n) { |
| 540 | err = -EFAULT; |
| 541 | break; |
| 542 | } |
| 543 | vwrite(kbuf, (char *)p, sz); |
| 544 | count -= sz; |
| 545 | buf += sz; |
| 546 | virtr += sz; |
| 547 | p += sz; |
| 548 | } |
| 549 | free_page((unsigned long)kbuf); |
| 550 | } |
| 551 | |
| 552 | *ppos = p; |
| 553 | return virtr + wrote ? : err; |
| 554 | } |
| 555 | |
| 556 | static ssize_t read_port(struct file *file, char __user *buf, |
| 557 | size_t count, loff_t *ppos) |
| 558 | { |
| 559 | unsigned long i = *ppos; |
| 560 | char __user *tmp = buf; |
| 561 | |
| 562 | if (!access_ok(VERIFY_WRITE, buf, count)) |
| 563 | return -EFAULT; |
| 564 | while (count-- > 0 && i < 65536) { |
| 565 | if (__put_user(inb(i), tmp) < 0) |
| 566 | return -EFAULT; |
| 567 | i++; |
| 568 | tmp++; |
| 569 | } |
| 570 | *ppos = i; |
| 571 | return tmp-buf; |
| 572 | } |
| 573 | |
| 574 | static ssize_t write_port(struct file *file, const char __user *buf, |
| 575 | size_t count, loff_t *ppos) |
| 576 | { |
| 577 | unsigned long i = *ppos; |
| 578 | const char __user *tmp = buf; |
| 579 | |
| 580 | if (!access_ok(VERIFY_READ, buf, count)) |
| 581 | return -EFAULT; |
| 582 | while (count-- > 0 && i < 65536) { |
| 583 | char c; |
| 584 | |
| 585 | if (__get_user(c, tmp)) { |
| 586 | if (tmp > buf) |
| 587 | break; |
| 588 | return -EFAULT; |
| 589 | } |
| 590 | outb(c, i); |
| 591 | i++; |
| 592 | tmp++; |
| 593 | } |
| 594 | *ppos = i; |
| 595 | return tmp-buf; |
| 596 | } |
| 597 | |
| 598 | static ssize_t read_null(struct file *file, char __user *buf, |
| 599 | size_t count, loff_t *ppos) |
| 600 | { |
| 601 | return 0; |
| 602 | } |
| 603 | |
| 604 | static ssize_t write_null(struct file *file, const char __user *buf, |
| 605 | size_t count, loff_t *ppos) |
| 606 | { |
| 607 | return count; |
| 608 | } |
| 609 | |
| 610 | static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to) |
| 611 | { |
| 612 | return 0; |
| 613 | } |
| 614 | |
| 615 | static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from) |
| 616 | { |
| 617 | size_t count = iov_iter_count(from); |
| 618 | iov_iter_advance(from, count); |
| 619 | return count; |
| 620 | } |
| 621 | |
| 622 | static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf, |
| 623 | struct splice_desc *sd) |
| 624 | { |
| 625 | return sd->len; |
| 626 | } |
| 627 | |
| 628 | static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out, |
| 629 | loff_t *ppos, size_t len, unsigned int flags) |
| 630 | { |
| 631 | return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null); |
| 632 | } |
| 633 | |
| 634 | static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter) |
| 635 | { |
| 636 | size_t written = 0; |
| 637 | |
| 638 | while (iov_iter_count(iter)) { |
| 639 | size_t chunk = iov_iter_count(iter), n; |
| 640 | |
| 641 | if (chunk > PAGE_SIZE) |
| 642 | chunk = PAGE_SIZE; /* Just for latency reasons */ |
| 643 | n = iov_iter_zero(chunk, iter); |
| 644 | if (!n && iov_iter_count(iter)) |
| 645 | return written ? written : -EFAULT; |
| 646 | written += n; |
| 647 | if (signal_pending(current)) |
| 648 | return written ? written : -ERESTARTSYS; |
| 649 | cond_resched(); |
| 650 | } |
| 651 | return written; |
| 652 | } |
| 653 | |
| 654 | static int mmap_zero(struct file *file, struct vm_area_struct *vma) |
| 655 | { |
| 656 | #ifndef CONFIG_MMU |
| 657 | return -ENOSYS; |
| 658 | #endif |
| 659 | if (vma->vm_flags & VM_SHARED) |
| 660 | return shmem_zero_setup(vma); |
| 661 | return 0; |
| 662 | } |
| 663 | |
| 664 | static ssize_t write_full(struct file *file, const char __user *buf, |
| 665 | size_t count, loff_t *ppos) |
| 666 | { |
| 667 | return -ENOSPC; |
| 668 | } |
| 669 | |
| 670 | /* |
| 671 | * Special lseek() function for /dev/null and /dev/zero. Most notably, you |
| 672 | * can fopen() both devices with "a" now. This was previously impossible. |
| 673 | * -- SRB. |
| 674 | */ |
| 675 | static loff_t null_lseek(struct file *file, loff_t offset, int orig) |
| 676 | { |
| 677 | return file->f_pos = 0; |
| 678 | } |
| 679 | |
| 680 | /* |
| 681 | * The memory devices use the full 32/64 bits of the offset, and so we cannot |
| 682 | * check against negative addresses: they are ok. The return value is weird, |
| 683 | * though, in that case (0). |
| 684 | * |
| 685 | * also note that seeking relative to the "end of file" isn't supported: |
| 686 | * it has no meaning, so it returns -EINVAL. |
| 687 | */ |
| 688 | static loff_t memory_lseek(struct file *file, loff_t offset, int orig) |
| 689 | { |
| 690 | loff_t ret; |
| 691 | |
| 692 | inode_lock(file_inode(file)); |
| 693 | switch (orig) { |
| 694 | case SEEK_CUR: |
| 695 | offset += file->f_pos; |
| 696 | case SEEK_SET: |
| 697 | /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */ |
| 698 | if (IS_ERR_VALUE((unsigned long long)offset)) { |
| 699 | ret = -EOVERFLOW; |
| 700 | break; |
| 701 | } |
| 702 | file->f_pos = offset; |
| 703 | ret = file->f_pos; |
| 704 | force_successful_syscall_return(); |
| 705 | break; |
| 706 | default: |
| 707 | ret = -EINVAL; |
| 708 | } |
| 709 | inode_unlock(file_inode(file)); |
| 710 | return ret; |
| 711 | } |
| 712 | |
| 713 | static int open_port(struct inode *inode, struct file *filp) |
| 714 | { |
| 715 | return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; |
| 716 | } |
| 717 | |
| 718 | #define zero_lseek null_lseek |
| 719 | #define full_lseek null_lseek |
| 720 | #define write_zero write_null |
| 721 | #define write_iter_zero write_iter_null |
| 722 | #define open_mem open_port |
| 723 | #define open_kmem open_mem |
| 724 | |
| 725 | static const struct file_operations __maybe_unused mem_fops = { |
| 726 | .llseek = memory_lseek, |
| 727 | .read = read_mem, |
| 728 | .write = write_mem, |
| 729 | .mmap = mmap_mem, |
| 730 | .open = open_mem, |
| 731 | #ifndef CONFIG_MMU |
| 732 | .get_unmapped_area = get_unmapped_area_mem, |
| 733 | .mmap_capabilities = memory_mmap_capabilities, |
| 734 | #endif |
| 735 | }; |
| 736 | |
| 737 | static const struct file_operations __maybe_unused kmem_fops = { |
| 738 | .llseek = memory_lseek, |
| 739 | .read = read_kmem, |
| 740 | .write = write_kmem, |
| 741 | .mmap = mmap_kmem, |
| 742 | .open = open_kmem, |
| 743 | #ifndef CONFIG_MMU |
| 744 | .get_unmapped_area = get_unmapped_area_mem, |
| 745 | .mmap_capabilities = memory_mmap_capabilities, |
| 746 | #endif |
| 747 | }; |
| 748 | |
| 749 | static const struct file_operations null_fops = { |
| 750 | .llseek = null_lseek, |
| 751 | .read = read_null, |
| 752 | .write = write_null, |
| 753 | .read_iter = read_iter_null, |
| 754 | .write_iter = write_iter_null, |
| 755 | .splice_write = splice_write_null, |
| 756 | }; |
| 757 | |
| 758 | static const struct file_operations __maybe_unused port_fops = { |
| 759 | .llseek = memory_lseek, |
| 760 | .read = read_port, |
| 761 | .write = write_port, |
| 762 | .open = open_port, |
| 763 | }; |
| 764 | |
| 765 | static const struct file_operations zero_fops = { |
| 766 | .llseek = zero_lseek, |
| 767 | .write = write_zero, |
| 768 | .read_iter = read_iter_zero, |
| 769 | .write_iter = write_iter_zero, |
| 770 | .mmap = mmap_zero, |
| 771 | #ifndef CONFIG_MMU |
| 772 | .mmap_capabilities = zero_mmap_capabilities, |
| 773 | #endif |
| 774 | }; |
| 775 | |
| 776 | static const struct file_operations full_fops = { |
| 777 | .llseek = full_lseek, |
| 778 | .read_iter = read_iter_zero, |
| 779 | .write = write_full, |
| 780 | }; |
| 781 | |
| 782 | static const struct memdev { |
| 783 | const char *name; |
| 784 | umode_t mode; |
| 785 | const struct file_operations *fops; |
| 786 | fmode_t fmode; |
| 787 | } devlist[] = { |
| 788 | #ifdef CONFIG_DEVMEM |
| 789 | [1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET }, |
| 790 | #endif |
| 791 | #ifdef CONFIG_DEVKMEM |
| 792 | [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET }, |
| 793 | #endif |
| 794 | [3] = { "null", 0666, &null_fops, 0 }, |
| 795 | #ifdef CONFIG_DEVPORT |
| 796 | [4] = { "port", 0, &port_fops, 0 }, |
| 797 | #endif |
| 798 | [5] = { "zero", 0666, &zero_fops, 0 }, |
| 799 | [7] = { "full", 0666, &full_fops, 0 }, |
| 800 | [8] = { "random", 0666, &random_fops, 0 }, |
| 801 | [9] = { "urandom", 0666, &urandom_fops, 0 }, |
| 802 | #ifdef CONFIG_PRINTK |
| 803 | [11] = { "kmsg", 0644, &kmsg_fops, 0 }, |
| 804 | #endif |
| 805 | }; |
| 806 | |
| 807 | static int memory_open(struct inode *inode, struct file *filp) |
| 808 | { |
| 809 | int minor; |
| 810 | const struct memdev *dev; |
| 811 | |
| 812 | minor = iminor(inode); |
| 813 | if (minor >= ARRAY_SIZE(devlist)) |
| 814 | return -ENXIO; |
| 815 | |
| 816 | dev = &devlist[minor]; |
| 817 | if (!dev->fops) |
| 818 | return -ENXIO; |
| 819 | |
| 820 | filp->f_op = dev->fops; |
| 821 | filp->f_mode |= dev->fmode; |
| 822 | |
| 823 | if (dev->fops->open) |
| 824 | return dev->fops->open(inode, filp); |
| 825 | |
| 826 | return 0; |
| 827 | } |
| 828 | |
| 829 | static const struct file_operations memory_fops = { |
| 830 | .open = memory_open, |
| 831 | .llseek = noop_llseek, |
| 832 | }; |
| 833 | |
| 834 | static char *mem_devnode(struct device *dev, umode_t *mode) |
| 835 | { |
| 836 | if (mode && devlist[MINOR(dev->devt)].mode) |
| 837 | *mode = devlist[MINOR(dev->devt)].mode; |
| 838 | return NULL; |
| 839 | } |
| 840 | |
| 841 | static struct class *mem_class; |
| 842 | |
| 843 | static int __init chr_dev_init(void) |
| 844 | { |
| 845 | int minor; |
| 846 | |
| 847 | if (register_chrdev(MEM_MAJOR, "mem", &memory_fops)) |
| 848 | printk("unable to get major %d for memory devs\n", MEM_MAJOR); |
| 849 | |
| 850 | mem_class = class_create(THIS_MODULE, "mem"); |
| 851 | if (IS_ERR(mem_class)) |
| 852 | return PTR_ERR(mem_class); |
| 853 | |
| 854 | mem_class->devnode = mem_devnode; |
| 855 | for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) { |
| 856 | if (!devlist[minor].name) |
| 857 | continue; |
| 858 | |
| 859 | /* |
| 860 | * Create /dev/port? |
| 861 | */ |
| 862 | if ((minor == DEVPORT_MINOR) && !arch_has_dev_port()) |
| 863 | continue; |
| 864 | |
| 865 | device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor), |
| 866 | NULL, devlist[minor].name); |
| 867 | } |
| 868 | |
| 869 | return tty_init(); |
| 870 | } |
| 871 | |
| 872 | fs_initcall(chr_dev_init); |