2 * POSIX message queues filesystem for Linux.
4 * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl)
5 * Michal Wronski (michal.wronski@gmail.com)
7 * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com)
8 * Lockless receive & send, fd based notify:
9 * Manfred Spraul (manfred@colorfullife.com)
11 * Audit: George Wilson (ltcgcw@us.ibm.com)
13 * This file is released under the GPL.
16 #include <linux/capability.h>
17 #include <linux/init.h>
18 #include <linux/pagemap.h>
19 #include <linux/file.h>
20 #include <linux/mount.h>
21 #include <linux/namei.h>
22 #include <linux/sysctl.h>
23 #include <linux/poll.h>
24 #include <linux/mqueue.h>
25 #include <linux/msg.h>
26 #include <linux/skbuff.h>
27 #include <linux/netlink.h>
28 #include <linux/syscalls.h>
29 #include <linux/audit.h>
30 #include <linux/signal.h>
31 #include <linux/mutex.h>
36 #define MQUEUE_MAGIC 0x19800202
37 #define DIRENT_SIZE 20
38 #define FILENT_SIZE 80
44 #define STATE_PENDING 1
48 #define DFLT_QUEUESMAX 256 /* max number of message queues */
49 #define DFLT_MSGMAX 10 /* max number of messages in each queue */
50 #define HARD_MSGMAX (131072/sizeof(void*))
51 #define DFLT_MSGSIZEMAX 8192 /* max message size */
54 struct ext_wait_queue
{ /* queue of sleeping tasks */
55 struct task_struct
*task
;
56 struct list_head list
;
57 struct msg_msg
*msg
; /* ptr of loaded message */
58 int state
; /* one of STATE_* values */
61 struct mqueue_inode_info
{
63 struct inode vfs_inode
;
64 wait_queue_head_t wait_q
;
66 struct msg_msg
**messages
;
69 struct sigevent notify
;
70 struct pid
* notify_owner
;
71 struct user_struct
*user
; /* user who created, for accounting */
72 struct sock
*notify_sock
;
73 struct sk_buff
*notify_cookie
;
75 /* for tasks waiting for free space and messages, respectively */
76 struct ext_wait_queue e_wait_q
[2];
78 unsigned long qsize
; /* size of queue in memory (sum of all msgs) */
81 static const struct inode_operations mqueue_dir_inode_operations
;
82 static const struct file_operations mqueue_file_operations
;
83 static struct super_operations mqueue_super_ops
;
84 static void remove_notification(struct mqueue_inode_info
*info
);
86 static spinlock_t mq_lock
;
87 static struct kmem_cache
*mqueue_inode_cachep
;
88 static struct vfsmount
*mqueue_mnt
;
90 static unsigned int queues_count
;
91 static unsigned int queues_max
= DFLT_QUEUESMAX
;
92 static unsigned int msg_max
= DFLT_MSGMAX
;
93 static unsigned int msgsize_max
= DFLT_MSGSIZEMAX
;
95 static struct ctl_table_header
* mq_sysctl_table
;
97 static inline struct mqueue_inode_info
*MQUEUE_I(struct inode
*inode
)
99 return container_of(inode
, struct mqueue_inode_info
, vfs_inode
);
102 static struct inode
*mqueue_get_inode(struct super_block
*sb
, int mode
,
103 struct mq_attr
*attr
)
107 inode
= new_inode(sb
);
109 inode
->i_mode
= mode
;
110 inode
->i_uid
= current
->fsuid
;
111 inode
->i_gid
= current
->fsgid
;
113 inode
->i_mtime
= inode
->i_ctime
= inode
->i_atime
=
117 struct mqueue_inode_info
*info
;
118 struct task_struct
*p
= current
;
119 struct user_struct
*u
= p
->user
;
120 unsigned long mq_bytes
, mq_msg_tblsz
;
122 inode
->i_fop
= &mqueue_file_operations
;
123 inode
->i_size
= FILENT_SIZE
;
124 /* mqueue specific info */
125 info
= MQUEUE_I(inode
);
126 spin_lock_init(&info
->lock
);
127 init_waitqueue_head(&info
->wait_q
);
128 INIT_LIST_HEAD(&info
->e_wait_q
[0].list
);
129 INIT_LIST_HEAD(&info
->e_wait_q
[1].list
);
130 info
->messages
= NULL
;
131 info
->notify_owner
= NULL
;
133 info
->user
= NULL
; /* set when all is ok */
134 memset(&info
->attr
, 0, sizeof(info
->attr
));
135 info
->attr
.mq_maxmsg
= DFLT_MSGMAX
;
136 info
->attr
.mq_msgsize
= DFLT_MSGSIZEMAX
;
138 info
->attr
.mq_maxmsg
= attr
->mq_maxmsg
;
139 info
->attr
.mq_msgsize
= attr
->mq_msgsize
;
141 mq_msg_tblsz
= info
->attr
.mq_maxmsg
* sizeof(struct msg_msg
*);
142 mq_bytes
= (mq_msg_tblsz
+
143 (info
->attr
.mq_maxmsg
* info
->attr
.mq_msgsize
));
146 if (u
->mq_bytes
+ mq_bytes
< u
->mq_bytes
||
147 u
->mq_bytes
+ mq_bytes
>
148 p
->signal
->rlim
[RLIMIT_MSGQUEUE
].rlim_cur
) {
149 spin_unlock(&mq_lock
);
152 u
->mq_bytes
+= mq_bytes
;
153 spin_unlock(&mq_lock
);
155 info
->messages
= kmalloc(mq_msg_tblsz
, GFP_KERNEL
);
156 if (!info
->messages
) {
158 u
->mq_bytes
-= mq_bytes
;
159 spin_unlock(&mq_lock
);
163 info
->user
= get_uid(u
);
164 } else if (S_ISDIR(mode
)) {
166 /* Some things misbehave if size == 0 on a directory */
167 inode
->i_size
= 2 * DIRENT_SIZE
;
168 inode
->i_op
= &mqueue_dir_inode_operations
;
169 inode
->i_fop
= &simple_dir_operations
;
174 make_bad_inode(inode
);
179 static int mqueue_fill_super(struct super_block
*sb
, void *data
, int silent
)
183 sb
->s_blocksize
= PAGE_CACHE_SIZE
;
184 sb
->s_blocksize_bits
= PAGE_CACHE_SHIFT
;
185 sb
->s_magic
= MQUEUE_MAGIC
;
186 sb
->s_op
= &mqueue_super_ops
;
188 inode
= mqueue_get_inode(sb
, S_IFDIR
| S_ISVTX
| S_IRWXUGO
, NULL
);
192 sb
->s_root
= d_alloc_root(inode
);
201 static int mqueue_get_sb(struct file_system_type
*fs_type
,
202 int flags
, const char *dev_name
,
203 void *data
, struct vfsmount
*mnt
)
205 return get_sb_single(fs_type
, flags
, data
, mqueue_fill_super
, mnt
);
208 static void init_once(struct kmem_cache
*cachep
, void *foo
)
210 struct mqueue_inode_info
*p
= (struct mqueue_inode_info
*) foo
;
212 inode_init_once(&p
->vfs_inode
);
215 static struct inode
*mqueue_alloc_inode(struct super_block
*sb
)
217 struct mqueue_inode_info
*ei
;
219 ei
= kmem_cache_alloc(mqueue_inode_cachep
, GFP_KERNEL
);
222 return &ei
->vfs_inode
;
225 static void mqueue_destroy_inode(struct inode
*inode
)
227 kmem_cache_free(mqueue_inode_cachep
, MQUEUE_I(inode
));
230 static void mqueue_delete_inode(struct inode
*inode
)
232 struct mqueue_inode_info
*info
;
233 struct user_struct
*user
;
234 unsigned long mq_bytes
;
237 if (S_ISDIR(inode
->i_mode
)) {
241 info
= MQUEUE_I(inode
);
242 spin_lock(&info
->lock
);
243 for (i
= 0; i
< info
->attr
.mq_curmsgs
; i
++)
244 free_msg(info
->messages
[i
]);
245 kfree(info
->messages
);
246 spin_unlock(&info
->lock
);
250 mq_bytes
= (info
->attr
.mq_maxmsg
* sizeof(struct msg_msg
*) +
251 (info
->attr
.mq_maxmsg
* info
->attr
.mq_msgsize
));
255 user
->mq_bytes
-= mq_bytes
;
257 spin_unlock(&mq_lock
);
262 static int mqueue_create(struct inode
*dir
, struct dentry
*dentry
,
263 int mode
, struct nameidata
*nd
)
266 struct mq_attr
*attr
= dentry
->d_fsdata
;
270 if (queues_count
>= queues_max
&& !capable(CAP_SYS_RESOURCE
)) {
275 spin_unlock(&mq_lock
);
277 inode
= mqueue_get_inode(dir
->i_sb
, mode
, attr
);
285 dir
->i_size
+= DIRENT_SIZE
;
286 dir
->i_ctime
= dir
->i_mtime
= dir
->i_atime
= CURRENT_TIME
;
288 d_instantiate(dentry
, inode
);
292 spin_unlock(&mq_lock
);
296 static int mqueue_unlink(struct inode
*dir
, struct dentry
*dentry
)
298 struct inode
*inode
= dentry
->d_inode
;
300 dir
->i_ctime
= dir
->i_mtime
= dir
->i_atime
= CURRENT_TIME
;
301 dir
->i_size
-= DIRENT_SIZE
;
308 * This is routine for system read from queue file.
309 * To avoid mess with doing here some sort of mq_receive we allow
310 * to read only queue size & notification info (the only values
311 * that are interesting from user point of view and aren't accessible
312 * through std routines)
314 static ssize_t
mqueue_read_file(struct file
*filp
, char __user
*u_data
,
315 size_t count
, loff_t
* off
)
317 struct mqueue_inode_info
*info
= MQUEUE_I(filp
->f_path
.dentry
->d_inode
);
318 char buffer
[FILENT_SIZE
];
325 spin_lock(&info
->lock
);
326 snprintf(buffer
, sizeof(buffer
),
327 "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
329 info
->notify_owner
? info
->notify
.sigev_notify
: 0,
330 (info
->notify_owner
&&
331 info
->notify
.sigev_notify
== SIGEV_SIGNAL
) ?
332 info
->notify
.sigev_signo
: 0,
333 pid_nr(info
->notify_owner
));
334 spin_unlock(&info
->lock
);
335 buffer
[sizeof(buffer
)-1] = '\0';
336 slen
= strlen(buffer
)+1;
342 if (o
+ count
> slen
)
345 if (copy_to_user(u_data
, buffer
+ o
, count
))
349 filp
->f_path
.dentry
->d_inode
->i_atime
= filp
->f_path
.dentry
->d_inode
->i_ctime
= CURRENT_TIME
;
353 static int mqueue_flush_file(struct file
*filp
, fl_owner_t id
)
355 struct mqueue_inode_info
*info
= MQUEUE_I(filp
->f_path
.dentry
->d_inode
);
357 spin_lock(&info
->lock
);
358 if (task_tgid(current
) == info
->notify_owner
)
359 remove_notification(info
);
361 spin_unlock(&info
->lock
);
365 static unsigned int mqueue_poll_file(struct file
*filp
, struct poll_table_struct
*poll_tab
)
367 struct mqueue_inode_info
*info
= MQUEUE_I(filp
->f_path
.dentry
->d_inode
);
370 poll_wait(filp
, &info
->wait_q
, poll_tab
);
372 spin_lock(&info
->lock
);
373 if (info
->attr
.mq_curmsgs
)
374 retval
= POLLIN
| POLLRDNORM
;
376 if (info
->attr
.mq_curmsgs
< info
->attr
.mq_maxmsg
)
377 retval
|= POLLOUT
| POLLWRNORM
;
378 spin_unlock(&info
->lock
);
383 /* Adds current to info->e_wait_q[sr] before element with smaller prio */
384 static void wq_add(struct mqueue_inode_info
*info
, int sr
,
385 struct ext_wait_queue
*ewp
)
387 struct ext_wait_queue
*walk
;
391 list_for_each_entry(walk
, &info
->e_wait_q
[sr
].list
, list
) {
392 if (walk
->task
->static_prio
<= current
->static_prio
) {
393 list_add_tail(&ewp
->list
, &walk
->list
);
397 list_add_tail(&ewp
->list
, &info
->e_wait_q
[sr
].list
);
401 * Puts current task to sleep. Caller must hold queue lock. After return
405 static int wq_sleep(struct mqueue_inode_info
*info
, int sr
,
406 long timeout
, struct ext_wait_queue
*ewp
)
411 wq_add(info
, sr
, ewp
);
414 set_current_state(TASK_INTERRUPTIBLE
);
416 spin_unlock(&info
->lock
);
417 time
= schedule_timeout(timeout
);
419 while (ewp
->state
== STATE_PENDING
)
422 if (ewp
->state
== STATE_READY
) {
426 spin_lock(&info
->lock
);
427 if (ewp
->state
== STATE_READY
) {
431 if (signal_pending(current
)) {
432 retval
= -ERESTARTSYS
;
440 list_del(&ewp
->list
);
442 spin_unlock(&info
->lock
);
448 * Returns waiting task that should be serviced first or NULL if none exists
450 static struct ext_wait_queue
*wq_get_first_waiter(
451 struct mqueue_inode_info
*info
, int sr
)
453 struct list_head
*ptr
;
455 ptr
= info
->e_wait_q
[sr
].list
.prev
;
456 if (ptr
== &info
->e_wait_q
[sr
].list
)
458 return list_entry(ptr
, struct ext_wait_queue
, list
);
461 /* Auxiliary functions to manipulate messages' list */
462 static void msg_insert(struct msg_msg
*ptr
, struct mqueue_inode_info
*info
)
466 k
= info
->attr
.mq_curmsgs
- 1;
467 while (k
>= 0 && info
->messages
[k
]->m_type
>= ptr
->m_type
) {
468 info
->messages
[k
+ 1] = info
->messages
[k
];
471 info
->attr
.mq_curmsgs
++;
472 info
->qsize
+= ptr
->m_ts
;
473 info
->messages
[k
+ 1] = ptr
;
476 static inline struct msg_msg
*msg_get(struct mqueue_inode_info
*info
)
478 info
->qsize
-= info
->messages
[--info
->attr
.mq_curmsgs
]->m_ts
;
479 return info
->messages
[info
->attr
.mq_curmsgs
];
482 static inline void set_cookie(struct sk_buff
*skb
, char code
)
484 ((char*)skb
->data
)[NOTIFY_COOKIE_LEN
-1] = code
;
488 * The next function is only to split too long sys_mq_timedsend
490 static void __do_notify(struct mqueue_inode_info
*info
)
493 * invoked when there is registered process and there isn't process
494 * waiting synchronously for message AND state of queue changed from
495 * empty to not empty. Here we are sure that no one is waiting
497 if (info
->notify_owner
&&
498 info
->attr
.mq_curmsgs
== 1) {
499 struct siginfo sig_i
;
500 switch (info
->notify
.sigev_notify
) {
506 sig_i
.si_signo
= info
->notify
.sigev_signo
;
508 sig_i
.si_code
= SI_MESGQ
;
509 sig_i
.si_value
= info
->notify
.sigev_value
;
510 sig_i
.si_pid
= current
->tgid
;
511 sig_i
.si_uid
= current
->uid
;
513 kill_pid_info(info
->notify
.sigev_signo
,
514 &sig_i
, info
->notify_owner
);
517 set_cookie(info
->notify_cookie
, NOTIFY_WOKENUP
);
518 netlink_sendskb(info
->notify_sock
, info
->notify_cookie
);
521 /* after notification unregisters process */
522 put_pid(info
->notify_owner
);
523 info
->notify_owner
= NULL
;
525 wake_up(&info
->wait_q
);
528 static long prepare_timeout(const struct timespec __user
*u_arg
)
530 struct timespec ts
, nowts
;
534 if (unlikely(copy_from_user(&ts
, u_arg
,
535 sizeof(struct timespec
))))
538 if (unlikely(ts
.tv_nsec
< 0 || ts
.tv_sec
< 0
539 || ts
.tv_nsec
>= NSEC_PER_SEC
))
541 nowts
= CURRENT_TIME
;
542 /* first subtract as jiffies can't be too big */
543 ts
.tv_sec
-= nowts
.tv_sec
;
544 if (ts
.tv_nsec
< nowts
.tv_nsec
) {
545 ts
.tv_nsec
+= NSEC_PER_SEC
;
548 ts
.tv_nsec
-= nowts
.tv_nsec
;
552 timeout
= timespec_to_jiffies(&ts
) + 1;
554 return MAX_SCHEDULE_TIMEOUT
;
559 static void remove_notification(struct mqueue_inode_info
*info
)
561 if (info
->notify_owner
!= NULL
&&
562 info
->notify
.sigev_notify
== SIGEV_THREAD
) {
563 set_cookie(info
->notify_cookie
, NOTIFY_REMOVED
);
564 netlink_sendskb(info
->notify_sock
, info
->notify_cookie
);
566 put_pid(info
->notify_owner
);
567 info
->notify_owner
= NULL
;
570 static int mq_attr_ok(struct mq_attr
*attr
)
572 if (attr
->mq_maxmsg
<= 0 || attr
->mq_msgsize
<= 0)
574 if (capable(CAP_SYS_RESOURCE
)) {
575 if (attr
->mq_maxmsg
> HARD_MSGMAX
)
578 if (attr
->mq_maxmsg
> msg_max
||
579 attr
->mq_msgsize
> msgsize_max
)
582 /* check for overflow */
583 if (attr
->mq_msgsize
> ULONG_MAX
/attr
->mq_maxmsg
)
585 if ((unsigned long)(attr
->mq_maxmsg
* attr
->mq_msgsize
) +
586 (attr
->mq_maxmsg
* sizeof (struct msg_msg
*)) <
587 (unsigned long)(attr
->mq_maxmsg
* attr
->mq_msgsize
))
593 * Invoked when creating a new queue via sys_mq_open
595 static struct file
*do_create(struct dentry
*dir
, struct dentry
*dentry
,
596 int oflag
, mode_t mode
, struct mq_attr __user
*u_attr
)
603 if (copy_from_user(&attr
, u_attr
, sizeof(attr
)))
606 if (!mq_attr_ok(&attr
))
608 /* store for use during create */
609 dentry
->d_fsdata
= &attr
;
612 mode
&= ~current
->fs
->umask
;
613 ret
= vfs_create(dir
->d_inode
, dentry
, mode
, NULL
);
614 dentry
->d_fsdata
= NULL
;
618 return dentry_open(dentry
, mqueue_mnt
, oflag
);
626 /* Opens existing queue */
627 static struct file
*do_open(struct dentry
*dentry
, int oflag
)
629 static int oflag2acc
[O_ACCMODE
] = { MAY_READ
, MAY_WRITE
,
630 MAY_READ
| MAY_WRITE
};
632 if ((oflag
& O_ACCMODE
) == (O_RDWR
| O_WRONLY
)) {
635 return ERR_PTR(-EINVAL
);
638 if (permission(dentry
->d_inode
, oflag2acc
[oflag
& O_ACCMODE
], NULL
)) {
641 return ERR_PTR(-EACCES
);
644 return dentry_open(dentry
, mqueue_mnt
, oflag
);
647 asmlinkage
long sys_mq_open(const char __user
*u_name
, int oflag
, mode_t mode
,
648 struct mq_attr __user
*u_attr
)
650 struct dentry
*dentry
;
655 error
= audit_mq_open(oflag
, mode
, u_attr
);
659 if (IS_ERR(name
= getname(u_name
)))
660 return PTR_ERR(name
);
662 fd
= get_unused_fd();
666 mutex_lock(&mqueue_mnt
->mnt_root
->d_inode
->i_mutex
);
667 dentry
= lookup_one_len(name
, mqueue_mnt
->mnt_root
, strlen(name
));
668 if (IS_ERR(dentry
)) {
669 error
= PTR_ERR(dentry
);
674 if (oflag
& O_CREAT
) {
675 if (dentry
->d_inode
) { /* entry already exists */
676 audit_inode(name
, dentry
->d_inode
);
680 filp
= do_open(dentry
, oflag
);
682 filp
= do_create(mqueue_mnt
->mnt_root
, dentry
,
683 oflag
, mode
, u_attr
);
687 if (!dentry
->d_inode
)
689 audit_inode(name
, dentry
->d_inode
);
690 filp
= do_open(dentry
, oflag
);
694 error
= PTR_ERR(filp
);
698 set_close_on_exec(fd
, 1);
699 fd_install(fd
, filp
);
710 mutex_unlock(&mqueue_mnt
->mnt_root
->d_inode
->i_mutex
);
716 asmlinkage
long sys_mq_unlink(const char __user
*u_name
)
720 struct dentry
*dentry
;
721 struct inode
*inode
= NULL
;
723 name
= getname(u_name
);
725 return PTR_ERR(name
);
727 mutex_lock_nested(&mqueue_mnt
->mnt_root
->d_inode
->i_mutex
,
729 dentry
= lookup_one_len(name
, mqueue_mnt
->mnt_root
, strlen(name
));
730 if (IS_ERR(dentry
)) {
731 err
= PTR_ERR(dentry
);
735 if (!dentry
->d_inode
) {
740 inode
= dentry
->d_inode
;
742 atomic_inc(&inode
->i_count
);
744 err
= vfs_unlink(dentry
->d_parent
->d_inode
, dentry
);
749 mutex_unlock(&mqueue_mnt
->mnt_root
->d_inode
->i_mutex
);
757 /* Pipelined send and receive functions.
759 * If a receiver finds no waiting message, then it registers itself in the
760 * list of waiting receivers. A sender checks that list before adding the new
761 * message into the message array. If there is a waiting receiver, then it
762 * bypasses the message array and directly hands the message over to the
764 * The receiver accepts the message and returns without grabbing the queue
765 * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers
766 * are necessary. The same algorithm is used for sysv semaphores, see
767 * ipc/sem.c for more details.
769 * The same algorithm is used for senders.
772 /* pipelined_send() - send a message directly to the task waiting in
773 * sys_mq_timedreceive() (without inserting message into a queue).
775 static inline void pipelined_send(struct mqueue_inode_info
*info
,
776 struct msg_msg
*message
,
777 struct ext_wait_queue
*receiver
)
779 receiver
->msg
= message
;
780 list_del(&receiver
->list
);
781 receiver
->state
= STATE_PENDING
;
782 wake_up_process(receiver
->task
);
784 receiver
->state
= STATE_READY
;
787 /* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
788 * gets its message and put to the queue (we have one free place for sure). */
789 static inline void pipelined_receive(struct mqueue_inode_info
*info
)
791 struct ext_wait_queue
*sender
= wq_get_first_waiter(info
, SEND
);
795 wake_up_interruptible(&info
->wait_q
);
798 msg_insert(sender
->msg
, info
);
799 list_del(&sender
->list
);
800 sender
->state
= STATE_PENDING
;
801 wake_up_process(sender
->task
);
803 sender
->state
= STATE_READY
;
806 asmlinkage
long sys_mq_timedsend(mqd_t mqdes
, const char __user
*u_msg_ptr
,
807 size_t msg_len
, unsigned int msg_prio
,
808 const struct timespec __user
*u_abs_timeout
)
812 struct ext_wait_queue wait
;
813 struct ext_wait_queue
*receiver
;
814 struct msg_msg
*msg_ptr
;
815 struct mqueue_inode_info
*info
;
819 ret
= audit_mq_timedsend(mqdes
, msg_len
, msg_prio
, u_abs_timeout
);
823 if (unlikely(msg_prio
>= (unsigned long) MQ_PRIO_MAX
))
826 timeout
= prepare_timeout(u_abs_timeout
);
833 inode
= filp
->f_path
.dentry
->d_inode
;
834 if (unlikely(filp
->f_op
!= &mqueue_file_operations
))
836 info
= MQUEUE_I(inode
);
837 audit_inode(NULL
, inode
);
839 if (unlikely(!(filp
->f_mode
& FMODE_WRITE
)))
842 if (unlikely(msg_len
> info
->attr
.mq_msgsize
)) {
847 /* First try to allocate memory, before doing anything with
848 * existing queues. */
849 msg_ptr
= load_msg(u_msg_ptr
, msg_len
);
850 if (IS_ERR(msg_ptr
)) {
851 ret
= PTR_ERR(msg_ptr
);
854 msg_ptr
->m_ts
= msg_len
;
855 msg_ptr
->m_type
= msg_prio
;
857 spin_lock(&info
->lock
);
859 if (info
->attr
.mq_curmsgs
== info
->attr
.mq_maxmsg
) {
860 if (filp
->f_flags
& O_NONBLOCK
) {
861 spin_unlock(&info
->lock
);
863 } else if (unlikely(timeout
< 0)) {
864 spin_unlock(&info
->lock
);
868 wait
.msg
= (void *) msg_ptr
;
869 wait
.state
= STATE_NONE
;
870 ret
= wq_sleep(info
, SEND
, timeout
, &wait
);
875 receiver
= wq_get_first_waiter(info
, RECV
);
877 pipelined_send(info
, msg_ptr
, receiver
);
879 /* adds message to the queue */
880 msg_insert(msg_ptr
, info
);
883 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
=
885 spin_unlock(&info
->lock
);
894 asmlinkage ssize_t
sys_mq_timedreceive(mqd_t mqdes
, char __user
*u_msg_ptr
,
895 size_t msg_len
, unsigned int __user
*u_msg_prio
,
896 const struct timespec __user
*u_abs_timeout
)
900 struct msg_msg
*msg_ptr
;
903 struct mqueue_inode_info
*info
;
904 struct ext_wait_queue wait
;
906 ret
= audit_mq_timedreceive(mqdes
, msg_len
, u_msg_prio
, u_abs_timeout
);
910 timeout
= prepare_timeout(u_abs_timeout
);
917 inode
= filp
->f_path
.dentry
->d_inode
;
918 if (unlikely(filp
->f_op
!= &mqueue_file_operations
))
920 info
= MQUEUE_I(inode
);
921 audit_inode(NULL
, inode
);
923 if (unlikely(!(filp
->f_mode
& FMODE_READ
)))
926 /* checks if buffer is big enough */
927 if (unlikely(msg_len
< info
->attr
.mq_msgsize
)) {
932 spin_lock(&info
->lock
);
933 if (info
->attr
.mq_curmsgs
== 0) {
934 if (filp
->f_flags
& O_NONBLOCK
) {
935 spin_unlock(&info
->lock
);
938 } else if (unlikely(timeout
< 0)) {
939 spin_unlock(&info
->lock
);
944 wait
.state
= STATE_NONE
;
945 ret
= wq_sleep(info
, RECV
, timeout
, &wait
);
949 msg_ptr
= msg_get(info
);
951 inode
->i_atime
= inode
->i_mtime
= inode
->i_ctime
=
954 /* There is now free space in queue. */
955 pipelined_receive(info
);
956 spin_unlock(&info
->lock
);
962 if ((u_msg_prio
&& put_user(msg_ptr
->m_type
, u_msg_prio
)) ||
963 store_msg(u_msg_ptr
, msg_ptr
, msg_ptr
->m_ts
)) {
975 * Notes: the case when user wants us to deregister (with NULL as pointer)
976 * and he isn't currently owner of notification, will be silently discarded.
977 * It isn't explicitly defined in the POSIX.
979 asmlinkage
long sys_mq_notify(mqd_t mqdes
,
980 const struct sigevent __user
*u_notification
)
986 struct sigevent notification
;
987 struct mqueue_inode_info
*info
;
990 ret
= audit_mq_notify(mqdes
, u_notification
);
996 if (u_notification
!= NULL
) {
997 if (copy_from_user(¬ification
, u_notification
,
998 sizeof(struct sigevent
)))
1001 if (unlikely(notification
.sigev_notify
!= SIGEV_NONE
&&
1002 notification
.sigev_notify
!= SIGEV_SIGNAL
&&
1003 notification
.sigev_notify
!= SIGEV_THREAD
))
1005 if (notification
.sigev_notify
== SIGEV_SIGNAL
&&
1006 !valid_signal(notification
.sigev_signo
)) {
1009 if (notification
.sigev_notify
== SIGEV_THREAD
) {
1010 /* create the notify skb */
1011 nc
= alloc_skb(NOTIFY_COOKIE_LEN
, GFP_KERNEL
);
1016 if (copy_from_user(nc
->data
,
1017 notification
.sigev_value
.sival_ptr
,
1018 NOTIFY_COOKIE_LEN
)) {
1022 /* TODO: add a header? */
1023 skb_put(nc
, NOTIFY_COOKIE_LEN
);
1024 /* and attach it to the socket */
1026 filp
= fget(notification
.sigev_signo
);
1030 sock
= netlink_getsockbyfilp(filp
);
1033 ret
= PTR_ERR(sock
);
1038 ret
= netlink_attachskb(sock
, nc
, 0,
1039 MAX_SCHEDULE_TIMEOUT
, NULL
);
1055 inode
= filp
->f_path
.dentry
->d_inode
;
1056 if (unlikely(filp
->f_op
!= &mqueue_file_operations
))
1058 info
= MQUEUE_I(inode
);
1061 spin_lock(&info
->lock
);
1062 if (u_notification
== NULL
) {
1063 if (info
->notify_owner
== task_tgid(current
)) {
1064 remove_notification(info
);
1065 inode
->i_atime
= inode
->i_ctime
= CURRENT_TIME
;
1067 } else if (info
->notify_owner
!= NULL
) {
1070 switch (notification
.sigev_notify
) {
1072 info
->notify
.sigev_notify
= SIGEV_NONE
;
1075 info
->notify_sock
= sock
;
1076 info
->notify_cookie
= nc
;
1079 info
->notify
.sigev_notify
= SIGEV_THREAD
;
1082 info
->notify
.sigev_signo
= notification
.sigev_signo
;
1083 info
->notify
.sigev_value
= notification
.sigev_value
;
1084 info
->notify
.sigev_notify
= SIGEV_SIGNAL
;
1088 info
->notify_owner
= get_pid(task_tgid(current
));
1089 inode
->i_atime
= inode
->i_ctime
= CURRENT_TIME
;
1091 spin_unlock(&info
->lock
);
1096 netlink_detachskb(sock
, nc
);
1103 asmlinkage
long sys_mq_getsetattr(mqd_t mqdes
,
1104 const struct mq_attr __user
*u_mqstat
,
1105 struct mq_attr __user
*u_omqstat
)
1108 struct mq_attr mqstat
, omqstat
;
1110 struct inode
*inode
;
1111 struct mqueue_inode_info
*info
;
1113 if (u_mqstat
!= NULL
) {
1114 if (copy_from_user(&mqstat
, u_mqstat
, sizeof(struct mq_attr
)))
1116 if (mqstat
.mq_flags
& (~O_NONBLOCK
))
1125 inode
= filp
->f_path
.dentry
->d_inode
;
1126 if (unlikely(filp
->f_op
!= &mqueue_file_operations
))
1128 info
= MQUEUE_I(inode
);
1130 spin_lock(&info
->lock
);
1132 omqstat
= info
->attr
;
1133 omqstat
.mq_flags
= filp
->f_flags
& O_NONBLOCK
;
1135 ret
= audit_mq_getsetattr(mqdes
, &mqstat
);
1138 if (mqstat
.mq_flags
& O_NONBLOCK
)
1139 filp
->f_flags
|= O_NONBLOCK
;
1141 filp
->f_flags
&= ~O_NONBLOCK
;
1143 inode
->i_atime
= inode
->i_ctime
= CURRENT_TIME
;
1146 spin_unlock(&info
->lock
);
1149 if (u_omqstat
!= NULL
&& copy_to_user(u_omqstat
, &omqstat
,
1150 sizeof(struct mq_attr
)))
1159 static const struct inode_operations mqueue_dir_inode_operations
= {
1160 .lookup
= simple_lookup
,
1161 .create
= mqueue_create
,
1162 .unlink
= mqueue_unlink
,
1165 static const struct file_operations mqueue_file_operations
= {
1166 .flush
= mqueue_flush_file
,
1167 .poll
= mqueue_poll_file
,
1168 .read
= mqueue_read_file
,
1171 static struct super_operations mqueue_super_ops
= {
1172 .alloc_inode
= mqueue_alloc_inode
,
1173 .destroy_inode
= mqueue_destroy_inode
,
1174 .statfs
= simple_statfs
,
1175 .delete_inode
= mqueue_delete_inode
,
1176 .drop_inode
= generic_delete_inode
,
1179 static struct file_system_type mqueue_fs_type
= {
1181 .get_sb
= mqueue_get_sb
,
1182 .kill_sb
= kill_litter_super
,
1185 static int msg_max_limit_min
= DFLT_MSGMAX
;
1186 static int msg_max_limit_max
= HARD_MSGMAX
;
1188 static int msg_maxsize_limit_min
= DFLT_MSGSIZEMAX
;
1189 static int msg_maxsize_limit_max
= INT_MAX
;
1191 static ctl_table mq_sysctls
[] = {
1193 .procname
= "queues_max",
1194 .data
= &queues_max
,
1195 .maxlen
= sizeof(int),
1197 .proc_handler
= &proc_dointvec
,
1200 .procname
= "msg_max",
1202 .maxlen
= sizeof(int),
1204 .proc_handler
= &proc_dointvec_minmax
,
1205 .extra1
= &msg_max_limit_min
,
1206 .extra2
= &msg_max_limit_max
,
1209 .procname
= "msgsize_max",
1210 .data
= &msgsize_max
,
1211 .maxlen
= sizeof(int),
1213 .proc_handler
= &proc_dointvec_minmax
,
1214 .extra1
= &msg_maxsize_limit_min
,
1215 .extra2
= &msg_maxsize_limit_max
,
1220 static ctl_table mq_sysctl_dir
[] = {
1222 .procname
= "mqueue",
1224 .child
= mq_sysctls
,
1229 static ctl_table mq_sysctl_root
[] = {
1234 .child
= mq_sysctl_dir
,
1239 static int __init
init_mqueue_fs(void)
1243 mqueue_inode_cachep
= kmem_cache_create("mqueue_inode_cache",
1244 sizeof(struct mqueue_inode_info
), 0,
1245 SLAB_HWCACHE_ALIGN
, init_once
);
1246 if (mqueue_inode_cachep
== NULL
)
1249 /* ignore failues - they are not fatal */
1250 mq_sysctl_table
= register_sysctl_table(mq_sysctl_root
);
1252 error
= register_filesystem(&mqueue_fs_type
);
1256 if (IS_ERR(mqueue_mnt
= kern_mount(&mqueue_fs_type
))) {
1257 error
= PTR_ERR(mqueue_mnt
);
1258 goto out_filesystem
;
1261 /* internal initialization - not common for vfs */
1263 spin_lock_init(&mq_lock
);
1268 unregister_filesystem(&mqueue_fs_type
);
1270 if (mq_sysctl_table
)
1271 unregister_sysctl_table(mq_sysctl_table
);
1272 kmem_cache_destroy(mqueue_inode_cachep
);
1276 __initcall(init_mqueue_fs
);