2 * fs/kernfs/file.c - kernfs file implementation
4 * Copyright (c) 2001-3 Patrick Mochel
5 * Copyright (c) 2007 SUSE Linux Products GmbH
6 * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org>
8 * This file is released under the GPLv2.
12 #include <linux/seq_file.h>
13 #include <linux/slab.h>
14 #include <linux/poll.h>
15 #include <linux/pagemap.h>
16 #include <linux/sched.h>
18 #include "kernfs-internal.h"
21 * There's one kernfs_open_file for each open file and one kernfs_open_node
22 * for each kernfs_node with one or more open files.
24 * kernfs_node->attr.open points to kernfs_open_node. attr.open is
25 * protected by kernfs_open_node_lock.
27 * filp->private_data points to seq_file whose ->private points to
28 * kernfs_open_file. kernfs_open_files are chained at
29 * kernfs_open_node->files, which is protected by kernfs_open_file_mutex.
31 static DEFINE_SPINLOCK(kernfs_open_node_lock
);
32 static DEFINE_MUTEX(kernfs_open_file_mutex
);
34 struct kernfs_open_node
{
37 wait_queue_head_t poll
;
38 struct list_head files
; /* goes through kernfs_open_file.list */
41 static struct kernfs_open_file
*kernfs_of(struct file
*file
)
43 return ((struct seq_file
*)file
->private_data
)->private;
47 * Determine the kernfs_ops for the given kernfs_node. This function must
48 * be called while holding an active reference.
50 static const struct kernfs_ops
*kernfs_ops(struct kernfs_node
*kn
)
52 if (kn
->flags
& KERNFS_LOCKDEP
)
53 lockdep_assert_held(kn
);
57 static void *kernfs_seq_start(struct seq_file
*sf
, loff_t
*ppos
)
59 struct kernfs_open_file
*of
= sf
->private;
60 const struct kernfs_ops
*ops
;
63 * @of->mutex nests outside active ref and is just to ensure that
64 * the ops aren't called concurrently for the same open file.
66 mutex_lock(&of
->mutex
);
67 if (!kernfs_get_active(of
->kn
))
68 return ERR_PTR(-ENODEV
);
70 ops
= kernfs_ops(of
->kn
);
72 return ops
->seq_start(sf
, ppos
);
75 * The same behavior and code as single_open(). Returns
76 * !NULL if pos is at the beginning; otherwise, NULL.
82 static void *kernfs_seq_next(struct seq_file
*sf
, void *v
, loff_t
*ppos
)
84 struct kernfs_open_file
*of
= sf
->private;
85 const struct kernfs_ops
*ops
= kernfs_ops(of
->kn
);
88 return ops
->seq_next(sf
, v
, ppos
);
91 * The same behavior and code as single_open(), always
92 * terminate after the initial read.
99 static void kernfs_seq_stop(struct seq_file
*sf
, void *v
)
101 struct kernfs_open_file
*of
= sf
->private;
102 const struct kernfs_ops
*ops
= kernfs_ops(of
->kn
);
105 ops
->seq_stop(sf
, v
);
107 kernfs_put_active(of
->kn
);
108 mutex_unlock(&of
->mutex
);
111 static int kernfs_seq_show(struct seq_file
*sf
, void *v
)
113 struct kernfs_open_file
*of
= sf
->private;
115 of
->event
= atomic_read(&of
->kn
->attr
.open
->event
);
117 return of
->kn
->attr
.ops
->seq_show(sf
, v
);
120 static const struct seq_operations kernfs_seq_ops
= {
121 .start
= kernfs_seq_start
,
122 .next
= kernfs_seq_next
,
123 .stop
= kernfs_seq_stop
,
124 .show
= kernfs_seq_show
,
128 * As reading a bin file can have side-effects, the exact offset and bytes
129 * specified in read(2) call should be passed to the read callback making
130 * it difficult to use seq_file. Implement simplistic custom buffering for
133 static ssize_t
kernfs_file_direct_read(struct kernfs_open_file
*of
,
134 char __user
*user_buf
, size_t count
,
137 ssize_t len
= min_t(size_t, count
, PAGE_SIZE
);
138 const struct kernfs_ops
*ops
;
141 buf
= kmalloc(len
, GFP_KERNEL
);
146 * @of->mutex nests outside active ref and is just to ensure that
147 * the ops aren't called concurrently for the same open file.
149 mutex_lock(&of
->mutex
);
150 if (!kernfs_get_active(of
->kn
)) {
152 mutex_unlock(&of
->mutex
);
156 ops
= kernfs_ops(of
->kn
);
158 len
= ops
->read(of
, buf
, len
, *ppos
);
162 kernfs_put_active(of
->kn
);
163 mutex_unlock(&of
->mutex
);
168 if (copy_to_user(user_buf
, buf
, len
)) {
181 * kernfs_fop_read - kernfs vfs read callback
182 * @file: file pointer
183 * @user_buf: data to write
184 * @count: number of bytes
185 * @ppos: starting offset
187 static ssize_t
kernfs_fop_read(struct file
*file
, char __user
*user_buf
,
188 size_t count
, loff_t
*ppos
)
190 struct kernfs_open_file
*of
= kernfs_of(file
);
192 if (of
->kn
->flags
& KERNFS_HAS_SEQ_SHOW
)
193 return seq_read(file
, user_buf
, count
, ppos
);
195 return kernfs_file_direct_read(of
, user_buf
, count
, ppos
);
199 * kernfs_fop_write - kernfs vfs write callback
200 * @file: file pointer
201 * @user_buf: data to write
202 * @count: number of bytes
203 * @ppos: starting offset
205 * Copy data in from userland and pass it to the matching kernfs write
208 * There is no easy way for us to know if userspace is only doing a partial
209 * write, so we don't support them. We expect the entire buffer to come on
210 * the first write. Hint: if you're writing a value, first read the file,
211 * modify only the the value you're changing, then write entire buffer
214 static ssize_t
kernfs_fop_write(struct file
*file
, const char __user
*user_buf
,
215 size_t count
, loff_t
*ppos
)
217 struct kernfs_open_file
*of
= kernfs_of(file
);
218 ssize_t len
= min_t(size_t, count
, PAGE_SIZE
);
219 const struct kernfs_ops
*ops
;
222 buf
= kmalloc(len
+ 1, GFP_KERNEL
);
226 if (copy_from_user(buf
, user_buf
, len
)) {
230 buf
[len
] = '\0'; /* guarantee string termination */
233 * @of->mutex nests outside active ref and is just to ensure that
234 * the ops aren't called concurrently for the same open file.
236 mutex_lock(&of
->mutex
);
237 if (!kernfs_get_active(of
->kn
)) {
238 mutex_unlock(&of
->mutex
);
243 ops
= kernfs_ops(of
->kn
);
245 len
= ops
->write(of
, buf
, len
, *ppos
);
249 kernfs_put_active(of
->kn
);
250 mutex_unlock(&of
->mutex
);
259 static void kernfs_vma_open(struct vm_area_struct
*vma
)
261 struct file
*file
= vma
->vm_file
;
262 struct kernfs_open_file
*of
= kernfs_of(file
);
267 if (!kernfs_get_active(of
->kn
))
270 if (of
->vm_ops
->open
)
271 of
->vm_ops
->open(vma
);
273 kernfs_put_active(of
->kn
);
276 static int kernfs_vma_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
278 struct file
*file
= vma
->vm_file
;
279 struct kernfs_open_file
*of
= kernfs_of(file
);
283 return VM_FAULT_SIGBUS
;
285 if (!kernfs_get_active(of
->kn
))
286 return VM_FAULT_SIGBUS
;
288 ret
= VM_FAULT_SIGBUS
;
289 if (of
->vm_ops
->fault
)
290 ret
= of
->vm_ops
->fault(vma
, vmf
);
292 kernfs_put_active(of
->kn
);
296 static int kernfs_vma_page_mkwrite(struct vm_area_struct
*vma
,
297 struct vm_fault
*vmf
)
299 struct file
*file
= vma
->vm_file
;
300 struct kernfs_open_file
*of
= kernfs_of(file
);
304 return VM_FAULT_SIGBUS
;
306 if (!kernfs_get_active(of
->kn
))
307 return VM_FAULT_SIGBUS
;
310 if (of
->vm_ops
->page_mkwrite
)
311 ret
= of
->vm_ops
->page_mkwrite(vma
, vmf
);
313 file_update_time(file
);
315 kernfs_put_active(of
->kn
);
319 static int kernfs_vma_access(struct vm_area_struct
*vma
, unsigned long addr
,
320 void *buf
, int len
, int write
)
322 struct file
*file
= vma
->vm_file
;
323 struct kernfs_open_file
*of
= kernfs_of(file
);
329 if (!kernfs_get_active(of
->kn
))
333 if (of
->vm_ops
->access
)
334 ret
= of
->vm_ops
->access(vma
, addr
, buf
, len
, write
);
336 kernfs_put_active(of
->kn
);
341 static int kernfs_vma_set_policy(struct vm_area_struct
*vma
,
342 struct mempolicy
*new)
344 struct file
*file
= vma
->vm_file
;
345 struct kernfs_open_file
*of
= kernfs_of(file
);
351 if (!kernfs_get_active(of
->kn
))
355 if (of
->vm_ops
->set_policy
)
356 ret
= of
->vm_ops
->set_policy(vma
, new);
358 kernfs_put_active(of
->kn
);
362 static struct mempolicy
*kernfs_vma_get_policy(struct vm_area_struct
*vma
,
365 struct file
*file
= vma
->vm_file
;
366 struct kernfs_open_file
*of
= kernfs_of(file
);
367 struct mempolicy
*pol
;
370 return vma
->vm_policy
;
372 if (!kernfs_get_active(of
->kn
))
373 return vma
->vm_policy
;
375 pol
= vma
->vm_policy
;
376 if (of
->vm_ops
->get_policy
)
377 pol
= of
->vm_ops
->get_policy(vma
, addr
);
379 kernfs_put_active(of
->kn
);
383 static int kernfs_vma_migrate(struct vm_area_struct
*vma
,
384 const nodemask_t
*from
, const nodemask_t
*to
,
387 struct file
*file
= vma
->vm_file
;
388 struct kernfs_open_file
*of
= kernfs_of(file
);
394 if (!kernfs_get_active(of
->kn
))
398 if (of
->vm_ops
->migrate
)
399 ret
= of
->vm_ops
->migrate(vma
, from
, to
, flags
);
401 kernfs_put_active(of
->kn
);
406 static const struct vm_operations_struct kernfs_vm_ops
= {
407 .open
= kernfs_vma_open
,
408 .fault
= kernfs_vma_fault
,
409 .page_mkwrite
= kernfs_vma_page_mkwrite
,
410 .access
= kernfs_vma_access
,
412 .set_policy
= kernfs_vma_set_policy
,
413 .get_policy
= kernfs_vma_get_policy
,
414 .migrate
= kernfs_vma_migrate
,
418 static int kernfs_fop_mmap(struct file
*file
, struct vm_area_struct
*vma
)
420 struct kernfs_open_file
*of
= kernfs_of(file
);
421 const struct kernfs_ops
*ops
;
425 * mmap path and of->mutex are prone to triggering spurious lockdep
426 * warnings and we don't want to add spurious locking dependency
427 * between the two. Check whether mmap is actually implemented
428 * without grabbing @of->mutex by testing HAS_MMAP flag. See the
429 * comment in kernfs_file_open() for more details.
431 if (!(of
->kn
->flags
& KERNFS_HAS_MMAP
))
434 mutex_lock(&of
->mutex
);
437 if (!kernfs_get_active(of
->kn
))
440 ops
= kernfs_ops(of
->kn
);
441 rc
= ops
->mmap(of
, vma
);
444 * PowerPC's pci_mmap of legacy_mem uses shmem_zero_setup()
445 * to satisfy versions of X which crash if the mmap fails: that
446 * substitutes a new vm_file, and we don't then want bin_vm_ops.
448 if (vma
->vm_file
!= file
)
452 if (of
->mmapped
&& of
->vm_ops
!= vma
->vm_ops
)
456 * It is not possible to successfully wrap close.
457 * So error if someone is trying to use close.
460 if (vma
->vm_ops
&& vma
->vm_ops
->close
)
465 of
->vm_ops
= vma
->vm_ops
;
466 vma
->vm_ops
= &kernfs_vm_ops
;
468 kernfs_put_active(of
->kn
);
470 mutex_unlock(&of
->mutex
);
476 * kernfs_get_open_node - get or create kernfs_open_node
477 * @kn: target kernfs_node
478 * @of: kernfs_open_file for this instance of open
480 * If @kn->attr.open exists, increment its reference count; otherwise,
481 * create one. @of is chained to the files list.
484 * Kernel thread context (may sleep).
487 * 0 on success, -errno on failure.
489 static int kernfs_get_open_node(struct kernfs_node
*kn
,
490 struct kernfs_open_file
*of
)
492 struct kernfs_open_node
*on
, *new_on
= NULL
;
495 mutex_lock(&kernfs_open_file_mutex
);
496 spin_lock_irq(&kernfs_open_node_lock
);
498 if (!kn
->attr
.open
&& new_on
) {
499 kn
->attr
.open
= new_on
;
505 atomic_inc(&on
->refcnt
);
506 list_add_tail(&of
->list
, &on
->files
);
509 spin_unlock_irq(&kernfs_open_node_lock
);
510 mutex_unlock(&kernfs_open_file_mutex
);
517 /* not there, initialize a new one and retry */
518 new_on
= kmalloc(sizeof(*new_on
), GFP_KERNEL
);
522 atomic_set(&new_on
->refcnt
, 0);
523 atomic_set(&new_on
->event
, 1);
524 init_waitqueue_head(&new_on
->poll
);
525 INIT_LIST_HEAD(&new_on
->files
);
530 * kernfs_put_open_node - put kernfs_open_node
531 * @kn: target kernfs_nodet
532 * @of: associated kernfs_open_file
534 * Put @kn->attr.open and unlink @of from the files list. If
535 * reference count reaches zero, disassociate and free it.
540 static void kernfs_put_open_node(struct kernfs_node
*kn
,
541 struct kernfs_open_file
*of
)
543 struct kernfs_open_node
*on
= kn
->attr
.open
;
546 mutex_lock(&kernfs_open_file_mutex
);
547 spin_lock_irqsave(&kernfs_open_node_lock
, flags
);
552 if (atomic_dec_and_test(&on
->refcnt
))
553 kn
->attr
.open
= NULL
;
557 spin_unlock_irqrestore(&kernfs_open_node_lock
, flags
);
558 mutex_unlock(&kernfs_open_file_mutex
);
563 static int kernfs_fop_open(struct inode
*inode
, struct file
*file
)
565 struct kernfs_node
*kn
= file
->f_path
.dentry
->d_fsdata
;
566 const struct kernfs_ops
*ops
;
567 struct kernfs_open_file
*of
;
568 bool has_read
, has_write
, has_mmap
;
571 if (!kernfs_get_active(kn
))
574 ops
= kernfs_ops(kn
);
576 has_read
= ops
->seq_show
|| ops
->read
|| ops
->mmap
;
577 has_write
= ops
->write
|| ops
->mmap
;
578 has_mmap
= ops
->mmap
;
580 /* check perms and supported operations */
581 if ((file
->f_mode
& FMODE_WRITE
) &&
582 (!(inode
->i_mode
& S_IWUGO
) || !has_write
))
585 if ((file
->f_mode
& FMODE_READ
) &&
586 (!(inode
->i_mode
& S_IRUGO
) || !has_read
))
589 /* allocate a kernfs_open_file for the file */
591 of
= kzalloc(sizeof(struct kernfs_open_file
), GFP_KERNEL
);
596 * The following is done to give a different lockdep key to
597 * @of->mutex for files which implement mmap. This is a rather
598 * crude way to avoid false positive lockdep warning around
599 * mm->mmap_sem - mmap nests @of->mutex under mm->mmap_sem and
600 * reading /sys/block/sda/trace/act_mask grabs sr_mutex, under
601 * which mm->mmap_sem nests, while holding @of->mutex. As each
602 * open file has a separate mutex, it's okay as long as those don't
603 * happen on the same file. At this point, we can't easily give
604 * each file a separate locking class. Let's differentiate on
605 * whether the file has mmap or not for now.
607 * Both paths of the branch look the same. They're supposed to
608 * look that way and give @of->mutex different static lockdep keys.
611 mutex_init(&of
->mutex
);
613 mutex_init(&of
->mutex
);
619 * Always instantiate seq_file even if read access doesn't use
620 * seq_file or is not requested. This unifies private data access
621 * and readable regular files are the vast majority anyway.
624 error
= seq_open(file
, &kernfs_seq_ops
);
626 error
= seq_open(file
, NULL
);
630 ((struct seq_file
*)file
->private_data
)->private = of
;
632 /* seq_file clears PWRITE unconditionally, restore it if WRITE */
633 if (file
->f_mode
& FMODE_WRITE
)
634 file
->f_mode
|= FMODE_PWRITE
;
636 /* make sure we have open node struct */
637 error
= kernfs_get_open_node(kn
, of
);
641 /* open succeeded, put active references */
642 kernfs_put_active(kn
);
646 seq_release(inode
, file
);
650 kernfs_put_active(kn
);
654 static int kernfs_fop_release(struct inode
*inode
, struct file
*filp
)
656 struct kernfs_node
*kn
= filp
->f_path
.dentry
->d_fsdata
;
657 struct kernfs_open_file
*of
= kernfs_of(filp
);
659 kernfs_put_open_node(kn
, of
);
660 seq_release(inode
, filp
);
666 void kernfs_unmap_bin_file(struct kernfs_node
*kn
)
668 struct kernfs_open_node
*on
;
669 struct kernfs_open_file
*of
;
671 if (!(kn
->flags
& KERNFS_HAS_MMAP
))
674 spin_lock_irq(&kernfs_open_node_lock
);
677 atomic_inc(&on
->refcnt
);
678 spin_unlock_irq(&kernfs_open_node_lock
);
682 mutex_lock(&kernfs_open_file_mutex
);
683 list_for_each_entry(of
, &on
->files
, list
) {
684 struct inode
*inode
= file_inode(of
->file
);
685 unmap_mapping_range(inode
->i_mapping
, 0, 0, 1);
687 mutex_unlock(&kernfs_open_file_mutex
);
689 kernfs_put_open_node(kn
, NULL
);
693 * Kernfs attribute files are pollable. The idea is that you read
694 * the content and then you use 'poll' or 'select' to wait for
695 * the content to change. When the content changes (assuming the
696 * manager for the kobject supports notification), poll will
697 * return POLLERR|POLLPRI, and select will return the fd whether
698 * it is waiting for read, write, or exceptions.
699 * Once poll/select indicates that the value has changed, you
700 * need to close and re-open the file, or seek to 0 and read again.
701 * Reminder: this only works for attributes which actively support
702 * it, and it is not possible to test an attribute from userspace
703 * to see if it supports poll (Neither 'poll' nor 'select' return
704 * an appropriate error code). When in doubt, set a suitable timeout value.
706 static unsigned int kernfs_fop_poll(struct file
*filp
, poll_table
*wait
)
708 struct kernfs_open_file
*of
= kernfs_of(filp
);
709 struct kernfs_node
*kn
= filp
->f_path
.dentry
->d_fsdata
;
710 struct kernfs_open_node
*on
= kn
->attr
.open
;
712 /* need parent for the kobj, grab both */
713 if (!kernfs_get_active(kn
))
716 poll_wait(filp
, &on
->poll
, wait
);
718 kernfs_put_active(kn
);
720 if (of
->event
!= atomic_read(&on
->event
))
723 return DEFAULT_POLLMASK
;
726 return DEFAULT_POLLMASK
|POLLERR
|POLLPRI
;
730 * kernfs_notify - notify a kernfs file
731 * @kn: file to notify
733 * Notify @kn such that poll(2) on @kn wakes up.
735 void kernfs_notify(struct kernfs_node
*kn
)
737 struct kernfs_open_node
*on
;
740 spin_lock_irqsave(&kernfs_open_node_lock
, flags
);
742 if (!WARN_ON(kernfs_type(kn
) != KERNFS_FILE
)) {
745 atomic_inc(&on
->event
);
746 wake_up_interruptible(&on
->poll
);
750 spin_unlock_irqrestore(&kernfs_open_node_lock
, flags
);
752 EXPORT_SYMBOL_GPL(kernfs_notify
);
754 const struct file_operations kernfs_file_fops
= {
755 .read
= kernfs_fop_read
,
756 .write
= kernfs_fop_write
,
757 .llseek
= generic_file_llseek
,
758 .mmap
= kernfs_fop_mmap
,
759 .open
= kernfs_fop_open
,
760 .release
= kernfs_fop_release
,
761 .poll
= kernfs_fop_poll
,
765 * kernfs_create_file_ns_key - create a file
766 * @parent: directory to create the file in
767 * @name: name of the file
768 * @mode: mode of the file
769 * @size: size of the file
770 * @ops: kernfs operations for the file
771 * @priv: private data for the file
772 * @ns: optional namespace tag of the file
773 * @key: lockdep key for the file's active_ref, %NULL to disable lockdep
775 * Returns the created node on success, ERR_PTR() value on error.
777 struct kernfs_node
*kernfs_create_file_ns_key(struct kernfs_node
*parent
,
779 umode_t mode
, loff_t size
,
780 const struct kernfs_ops
*ops
,
781 void *priv
, const void *ns
,
782 struct lock_class_key
*key
)
784 struct kernfs_addrm_cxt acxt
;
785 struct kernfs_node
*kn
;
788 kn
= kernfs_new_node(kernfs_root(parent
), name
,
789 (mode
& S_IALLUGO
) | S_IFREG
, KERNFS_FILE
);
791 return ERR_PTR(-ENOMEM
);
794 kn
->attr
.size
= size
;
798 #ifdef CONFIG_DEBUG_LOCK_ALLOC
800 lockdep_init_map(&kn
->dep_map
, "s_active", key
, 0);
801 kn
->flags
|= KERNFS_LOCKDEP
;
806 * kn->attr.ops is accesible only while holding active ref. We
807 * need to know whether some ops are implemented outside active
808 * ref. Cache their existence in flags.
811 kn
->flags
|= KERNFS_HAS_SEQ_SHOW
;
813 kn
->flags
|= KERNFS_HAS_MMAP
;
815 kernfs_addrm_start(&acxt
);
816 rc
= kernfs_add_one(&acxt
, kn
, parent
);
817 kernfs_addrm_finish(&acxt
);