3 * Copyright (C) 1992 Krishna Balasubramanian
5 * Removed all the remaining kerneld mess
6 * Catch the -EFAULT stuff properly
7 * Use GFP_KERNEL for messages as in 1.2
8 * Fixed up the unchecked user space derefs
9 * Copyright (C) 1998 Alan Cox & Andi Kleen
11 * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
13 * mostly rewritten, threaded and wake-one semantics added
14 * MSGMAX limit removed, sysctl's added
15 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
17 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
22 * Pavel Emelianov <xemul@openvz.org>
25 #include <linux/capability.h>
26 #include <linux/slab.h>
27 #include <linux/msg.h>
28 #include <linux/spinlock.h>
29 #include <linux/init.h>
30 #include <linux/proc_fs.h>
31 #include <linux/list.h>
32 #include <linux/security.h>
33 #include <linux/sched.h>
34 #include <linux/syscalls.h>
35 #include <linux/audit.h>
36 #include <linux/seq_file.h>
37 #include <linux/rwsem.h>
38 #include <linux/nsproxy.h>
39 #include <linux/ipc_namespace.h>
41 #include <asm/current.h>
42 #include <asm/uaccess.h>
46 * one msg_receiver structure for each sleeping receiver:
49 struct list_head r_list
;
50 struct task_struct
*r_tsk
;
56 struct msg_msg
*volatile r_msg
;
59 /* one msg_sender for each sleeping sender */
61 struct list_head list
;
62 struct task_struct
*tsk
;
66 #define SEARCH_EQUAL 2
67 #define SEARCH_NOTEQUAL 3
68 #define SEARCH_LESSEQUAL 4
70 static struct ipc_ids init_msg_ids
;
72 #define msg_ids(ns) (*((ns)->ids[IPC_MSG_IDS]))
74 #define msg_unlock(msq) ipc_unlock(&(msq)->q_perm)
75 #define msg_buildid(id, seq) ipc_buildid(id, seq)
77 static void freeque(struct ipc_namespace
*, struct msg_queue
*);
78 static int newque(struct ipc_namespace
*, struct ipc_params
*);
80 static int sysvipc_msg_proc_show(struct seq_file
*s
, void *it
);
83 static void __msg_init_ns(struct ipc_namespace
*ns
, struct ipc_ids
*ids
)
85 ns
->ids
[IPC_MSG_IDS
] = ids
;
86 ns
->msg_ctlmax
= MSGMAX
;
87 ns
->msg_ctlmnb
= MSGMNB
;
88 ns
->msg_ctlmni
= MSGMNI
;
89 atomic_set(&ns
->msg_bytes
, 0);
90 atomic_set(&ns
->msg_hdrs
, 0);
95 int msg_init_ns(struct ipc_namespace
*ns
)
99 ids
= kmalloc(sizeof(struct ipc_ids
), GFP_KERNEL
);
103 __msg_init_ns(ns
, ids
);
107 void msg_exit_ns(struct ipc_namespace
*ns
)
109 struct msg_queue
*msq
;
110 struct kern_ipc_perm
*perm
;
114 down_write(&msg_ids(ns
).rw_mutex
);
116 in_use
= msg_ids(ns
).in_use
;
118 for (total
= 0, next_id
= 0; total
< in_use
; next_id
++) {
119 perm
= idr_find(&msg_ids(ns
).ipcs_idr
, next_id
);
122 ipc_lock_by_ptr(perm
);
123 msq
= container_of(perm
, struct msg_queue
, q_perm
);
128 up_write(&msg_ids(ns
).rw_mutex
);
130 kfree(ns
->ids
[IPC_MSG_IDS
]);
131 ns
->ids
[IPC_MSG_IDS
] = NULL
;
135 void __init
msg_init(void)
137 __msg_init_ns(&init_ipc_ns
, &init_msg_ids
);
138 ipc_init_proc_interface("sysvipc/msg",
139 " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n",
140 IPC_MSG_IDS
, sysvipc_msg_proc_show
);
144 * This routine is called in the paths where the rw_mutex is held to protect
145 * access to the idr tree.
147 static inline struct msg_queue
*msg_lock_check_down(struct ipc_namespace
*ns
,
150 struct kern_ipc_perm
*ipcp
= ipc_lock_check_down(&msg_ids(ns
), id
);
153 return (struct msg_queue
*)ipcp
;
155 return container_of(ipcp
, struct msg_queue
, q_perm
);
159 * msg_lock_(check_) routines are called in the paths where the rw_mutex
162 static inline struct msg_queue
*msg_lock(struct ipc_namespace
*ns
, int id
)
164 struct kern_ipc_perm
*ipcp
= ipc_lock(&msg_ids(ns
), id
);
167 return (struct msg_queue
*)ipcp
;
169 return container_of(ipcp
, struct msg_queue
, q_perm
);
172 static inline struct msg_queue
*msg_lock_check(struct ipc_namespace
*ns
,
175 struct kern_ipc_perm
*ipcp
= ipc_lock_check(&msg_ids(ns
), id
);
178 return (struct msg_queue
*)ipcp
;
180 return container_of(ipcp
, struct msg_queue
, q_perm
);
183 static inline void msg_rmid(struct ipc_namespace
*ns
, struct msg_queue
*s
)
185 ipc_rmid(&msg_ids(ns
), &s
->q_perm
);
189 * newque - Create a new msg queue
191 * @params: ptr to the structure that contains the key and msgflg
193 * Called with msg_ids.rw_mutex held (writer)
195 static int newque(struct ipc_namespace
*ns
, struct ipc_params
*params
)
197 struct msg_queue
*msq
;
199 key_t key
= params
->key
;
200 int msgflg
= params
->flg
;
202 msq
= ipc_rcu_alloc(sizeof(*msq
));
206 msq
->q_perm
.mode
= msgflg
& S_IRWXUGO
;
207 msq
->q_perm
.key
= key
;
209 msq
->q_perm
.security
= NULL
;
210 retval
= security_msg_queue_alloc(msq
);
217 * ipc_addid() locks msq
219 id
= ipc_addid(&msg_ids(ns
), &msq
->q_perm
, ns
->msg_ctlmni
);
221 security_msg_queue_free(msq
);
226 msq
->q_perm
.id
= msg_buildid(id
, msq
->q_perm
.seq
);
227 msq
->q_stime
= msq
->q_rtime
= 0;
228 msq
->q_ctime
= get_seconds();
229 msq
->q_cbytes
= msq
->q_qnum
= 0;
230 msq
->q_qbytes
= ns
->msg_ctlmnb
;
231 msq
->q_lspid
= msq
->q_lrpid
= 0;
232 INIT_LIST_HEAD(&msq
->q_messages
);
233 INIT_LIST_HEAD(&msq
->q_receivers
);
234 INIT_LIST_HEAD(&msq
->q_senders
);
238 return msq
->q_perm
.id
;
241 static inline void ss_add(struct msg_queue
*msq
, struct msg_sender
*mss
)
244 current
->state
= TASK_INTERRUPTIBLE
;
245 list_add_tail(&mss
->list
, &msq
->q_senders
);
248 static inline void ss_del(struct msg_sender
*mss
)
250 if (mss
->list
.next
!= NULL
)
251 list_del(&mss
->list
);
254 static void ss_wakeup(struct list_head
*h
, int kill
)
256 struct list_head
*tmp
;
260 struct msg_sender
*mss
;
262 mss
= list_entry(tmp
, struct msg_sender
, list
);
265 mss
->list
.next
= NULL
;
266 wake_up_process(mss
->tsk
);
270 static void expunge_all(struct msg_queue
*msq
, int res
)
272 struct list_head
*tmp
;
274 tmp
= msq
->q_receivers
.next
;
275 while (tmp
!= &msq
->q_receivers
) {
276 struct msg_receiver
*msr
;
278 msr
= list_entry(tmp
, struct msg_receiver
, r_list
);
281 wake_up_process(msr
->r_tsk
);
283 msr
->r_msg
= ERR_PTR(res
);
288 * freeque() wakes up waiters on the sender and receiver waiting queue,
289 * removes the message queue from message queue ID IDR, and cleans up all the
290 * messages associated with this queue.
292 * msg_ids.rw_mutex (writer) and the spinlock for this message queue are held
293 * before freeque() is called. msg_ids.rw_mutex remains locked on exit.
295 static void freeque(struct ipc_namespace
*ns
, struct msg_queue
*msq
)
297 struct list_head
*tmp
;
299 expunge_all(msq
, -EIDRM
);
300 ss_wakeup(&msq
->q_senders
, 1);
304 tmp
= msq
->q_messages
.next
;
305 while (tmp
!= &msq
->q_messages
) {
306 struct msg_msg
*msg
= list_entry(tmp
, struct msg_msg
, m_list
);
309 atomic_dec(&ns
->msg_hdrs
);
312 atomic_sub(msq
->q_cbytes
, &ns
->msg_bytes
);
313 security_msg_queue_free(msq
);
318 * Called with msg_ids.rw_mutex and ipcp locked.
320 static inline int msg_security(struct kern_ipc_perm
*ipcp
, int msgflg
)
322 struct msg_queue
*msq
= container_of(ipcp
, struct msg_queue
, q_perm
);
324 return security_msg_queue_associate(msq
, msgflg
);
327 asmlinkage
long sys_msgget(key_t key
, int msgflg
)
329 struct ipc_namespace
*ns
;
330 struct ipc_ops msg_ops
;
331 struct ipc_params msg_params
;
333 ns
= current
->nsproxy
->ipc_ns
;
335 msg_ops
.getnew
= newque
;
336 msg_ops
.associate
= msg_security
;
337 msg_ops
.more_checks
= NULL
;
339 msg_params
.key
= key
;
340 msg_params
.flg
= msgflg
;
342 return ipcget(ns
, &msg_ids(ns
), &msg_ops
, &msg_params
);
345 static inline unsigned long
346 copy_msqid_to_user(void __user
*buf
, struct msqid64_ds
*in
, int version
)
350 return copy_to_user(buf
, in
, sizeof(*in
));
355 memset(&out
, 0, sizeof(out
));
357 ipc64_perm_to_ipc_perm(&in
->msg_perm
, &out
.msg_perm
);
359 out
.msg_stime
= in
->msg_stime
;
360 out
.msg_rtime
= in
->msg_rtime
;
361 out
.msg_ctime
= in
->msg_ctime
;
363 if (in
->msg_cbytes
> USHRT_MAX
)
364 out
.msg_cbytes
= USHRT_MAX
;
366 out
.msg_cbytes
= in
->msg_cbytes
;
367 out
.msg_lcbytes
= in
->msg_cbytes
;
369 if (in
->msg_qnum
> USHRT_MAX
)
370 out
.msg_qnum
= USHRT_MAX
;
372 out
.msg_qnum
= in
->msg_qnum
;
374 if (in
->msg_qbytes
> USHRT_MAX
)
375 out
.msg_qbytes
= USHRT_MAX
;
377 out
.msg_qbytes
= in
->msg_qbytes
;
378 out
.msg_lqbytes
= in
->msg_qbytes
;
380 out
.msg_lspid
= in
->msg_lspid
;
381 out
.msg_lrpid
= in
->msg_lrpid
;
383 return copy_to_user(buf
, &out
, sizeof(out
));
391 unsigned long qbytes
;
397 static inline unsigned long
398 copy_msqid_from_user(struct msq_setbuf
*out
, void __user
*buf
, int version
)
403 struct msqid64_ds tbuf
;
405 if (copy_from_user(&tbuf
, buf
, sizeof(tbuf
)))
408 out
->qbytes
= tbuf
.msg_qbytes
;
409 out
->uid
= tbuf
.msg_perm
.uid
;
410 out
->gid
= tbuf
.msg_perm
.gid
;
411 out
->mode
= tbuf
.msg_perm
.mode
;
417 struct msqid_ds tbuf_old
;
419 if (copy_from_user(&tbuf_old
, buf
, sizeof(tbuf_old
)))
422 out
->uid
= tbuf_old
.msg_perm
.uid
;
423 out
->gid
= tbuf_old
.msg_perm
.gid
;
424 out
->mode
= tbuf_old
.msg_perm
.mode
;
426 if (tbuf_old
.msg_qbytes
== 0)
427 out
->qbytes
= tbuf_old
.msg_lqbytes
;
429 out
->qbytes
= tbuf_old
.msg_qbytes
;
438 asmlinkage
long sys_msgctl(int msqid
, int cmd
, struct msqid_ds __user
*buf
)
440 struct kern_ipc_perm
*ipcp
;
441 struct msq_setbuf
uninitialized_var(setbuf
);
442 struct msg_queue
*msq
;
444 struct ipc_namespace
*ns
;
446 if (msqid
< 0 || cmd
< 0)
449 version
= ipc_parse_version(&cmd
);
450 ns
= current
->nsproxy
->ipc_ns
;
456 struct msginfo msginfo
;
462 * We must not return kernel stack data.
463 * due to padding, it's not enough
464 * to set all member fields.
466 err
= security_msg_queue_msgctl(NULL
, cmd
);
470 memset(&msginfo
, 0, sizeof(msginfo
));
471 msginfo
.msgmni
= ns
->msg_ctlmni
;
472 msginfo
.msgmax
= ns
->msg_ctlmax
;
473 msginfo
.msgmnb
= ns
->msg_ctlmnb
;
474 msginfo
.msgssz
= MSGSSZ
;
475 msginfo
.msgseg
= MSGSEG
;
476 down_read(&msg_ids(ns
).rw_mutex
);
477 if (cmd
== MSG_INFO
) {
478 msginfo
.msgpool
= msg_ids(ns
).in_use
;
479 msginfo
.msgmap
= atomic_read(&ns
->msg_hdrs
);
480 msginfo
.msgtql
= atomic_read(&ns
->msg_bytes
);
482 msginfo
.msgmap
= MSGMAP
;
483 msginfo
.msgpool
= MSGPOOL
;
484 msginfo
.msgtql
= MSGTQL
;
486 max_id
= ipc_get_maxid(&msg_ids(ns
));
487 up_read(&msg_ids(ns
).rw_mutex
);
488 if (copy_to_user(buf
, &msginfo
, sizeof(struct msginfo
)))
490 return (max_id
< 0) ? 0 : max_id
;
492 case MSG_STAT
: /* msqid is an index rather than a msg queue id */
495 struct msqid64_ds tbuf
;
501 if (cmd
== MSG_STAT
) {
502 msq
= msg_lock(ns
, msqid
);
505 success_return
= msq
->q_perm
.id
;
507 msq
= msg_lock_check(ns
, msqid
);
513 if (ipcperms(&msq
->q_perm
, S_IRUGO
))
516 err
= security_msg_queue_msgctl(msq
, cmd
);
520 memset(&tbuf
, 0, sizeof(tbuf
));
522 kernel_to_ipc64_perm(&msq
->q_perm
, &tbuf
.msg_perm
);
523 tbuf
.msg_stime
= msq
->q_stime
;
524 tbuf
.msg_rtime
= msq
->q_rtime
;
525 tbuf
.msg_ctime
= msq
->q_ctime
;
526 tbuf
.msg_cbytes
= msq
->q_cbytes
;
527 tbuf
.msg_qnum
= msq
->q_qnum
;
528 tbuf
.msg_qbytes
= msq
->q_qbytes
;
529 tbuf
.msg_lspid
= msq
->q_lspid
;
530 tbuf
.msg_lrpid
= msq
->q_lrpid
;
532 if (copy_msqid_to_user(buf
, &tbuf
, version
))
534 return success_return
;
539 if (copy_msqid_from_user(&setbuf
, buf
, version
))
548 down_write(&msg_ids(ns
).rw_mutex
);
549 msq
= msg_lock_check_down(ns
, msqid
);
557 err
= audit_ipc_obj(ipcp
);
560 if (cmd
== IPC_SET
) {
561 err
= audit_ipc_set_perm(setbuf
.qbytes
, setbuf
.uid
, setbuf
.gid
,
568 if (current
->euid
!= ipcp
->cuid
&&
569 current
->euid
!= ipcp
->uid
&& !capable(CAP_SYS_ADMIN
))
570 /* We _could_ check for CAP_CHOWN above, but we don't */
573 err
= security_msg_queue_msgctl(msq
, cmd
);
581 if (setbuf
.qbytes
> ns
->msg_ctlmnb
&& !capable(CAP_SYS_RESOURCE
))
584 msq
->q_qbytes
= setbuf
.qbytes
;
586 ipcp
->uid
= setbuf
.uid
;
587 ipcp
->gid
= setbuf
.gid
;
588 ipcp
->mode
= (ipcp
->mode
& ~S_IRWXUGO
) |
589 (S_IRWXUGO
& setbuf
.mode
);
590 msq
->q_ctime
= get_seconds();
591 /* sleeping receivers might be excluded by
592 * stricter permissions.
594 expunge_all(msq
, -EAGAIN
);
595 /* sleeping senders might be able to send
596 * due to a larger queue size.
598 ss_wakeup(&msq
->q_senders
, 0);
608 up_write(&msg_ids(ns
).rw_mutex
);
618 static int testmsg(struct msg_msg
*msg
, long type
, int mode
)
624 case SEARCH_LESSEQUAL
:
625 if (msg
->m_type
<=type
)
629 if (msg
->m_type
== type
)
632 case SEARCH_NOTEQUAL
:
633 if (msg
->m_type
!= type
)
640 static inline int pipelined_send(struct msg_queue
*msq
, struct msg_msg
*msg
)
642 struct list_head
*tmp
;
644 tmp
= msq
->q_receivers
.next
;
645 while (tmp
!= &msq
->q_receivers
) {
646 struct msg_receiver
*msr
;
648 msr
= list_entry(tmp
, struct msg_receiver
, r_list
);
650 if (testmsg(msg
, msr
->r_msgtype
, msr
->r_mode
) &&
651 !security_msg_queue_msgrcv(msq
, msg
, msr
->r_tsk
,
652 msr
->r_msgtype
, msr
->r_mode
)) {
654 list_del(&msr
->r_list
);
655 if (msr
->r_maxsize
< msg
->m_ts
) {
657 wake_up_process(msr
->r_tsk
);
659 msr
->r_msg
= ERR_PTR(-E2BIG
);
662 msq
->q_lrpid
= task_pid_vnr(msr
->r_tsk
);
663 msq
->q_rtime
= get_seconds();
664 wake_up_process(msr
->r_tsk
);
675 long do_msgsnd(int msqid
, long mtype
, void __user
*mtext
,
676 size_t msgsz
, int msgflg
)
678 struct msg_queue
*msq
;
681 struct ipc_namespace
*ns
;
683 ns
= current
->nsproxy
->ipc_ns
;
685 if (msgsz
> ns
->msg_ctlmax
|| (long) msgsz
< 0 || msqid
< 0)
690 msg
= load_msg(mtext
, msgsz
);
697 msq
= msg_lock_check(ns
, msqid
);
707 if (ipcperms(&msq
->q_perm
, S_IWUGO
))
708 goto out_unlock_free
;
710 err
= security_msg_queue_msgsnd(msq
, msg
, msgflg
);
712 goto out_unlock_free
;
714 if (msgsz
+ msq
->q_cbytes
<= msq
->q_qbytes
&&
715 1 + msq
->q_qnum
<= msq
->q_qbytes
) {
719 /* queue full, wait: */
720 if (msgflg
& IPC_NOWAIT
) {
722 goto out_unlock_free
;
729 ipc_lock_by_ptr(&msq
->q_perm
);
731 if (msq
->q_perm
.deleted
) {
733 goto out_unlock_free
;
737 if (signal_pending(current
)) {
738 err
= -ERESTARTNOHAND
;
739 goto out_unlock_free
;
743 msq
->q_lspid
= task_tgid_vnr(current
);
744 msq
->q_stime
= get_seconds();
746 if (!pipelined_send(msq
, msg
)) {
747 /* noone is waiting for this message, enqueue it */
748 list_add_tail(&msg
->m_list
, &msq
->q_messages
);
749 msq
->q_cbytes
+= msgsz
;
751 atomic_add(msgsz
, &ns
->msg_bytes
);
752 atomic_inc(&ns
->msg_hdrs
);
767 sys_msgsnd(int msqid
, struct msgbuf __user
*msgp
, size_t msgsz
, int msgflg
)
771 if (get_user(mtype
, &msgp
->mtype
))
773 return do_msgsnd(msqid
, mtype
, msgp
->mtext
, msgsz
, msgflg
);
776 static inline int convert_mode(long *msgtyp
, int msgflg
)
779 * find message of correct type.
780 * msgtyp = 0 => get first.
781 * msgtyp > 0 => get first message of matching type.
782 * msgtyp < 0 => get message with least type must be < abs(msgtype).
788 return SEARCH_LESSEQUAL
;
790 if (msgflg
& MSG_EXCEPT
)
791 return SEARCH_NOTEQUAL
;
795 long do_msgrcv(int msqid
, long *pmtype
, void __user
*mtext
,
796 size_t msgsz
, long msgtyp
, int msgflg
)
798 struct msg_queue
*msq
;
801 struct ipc_namespace
*ns
;
803 if (msqid
< 0 || (long) msgsz
< 0)
805 mode
= convert_mode(&msgtyp
, msgflg
);
806 ns
= current
->nsproxy
->ipc_ns
;
808 msq
= msg_lock_check(ns
, msqid
);
813 struct msg_receiver msr_d
;
814 struct list_head
*tmp
;
816 msg
= ERR_PTR(-EACCES
);
817 if (ipcperms(&msq
->q_perm
, S_IRUGO
))
820 msg
= ERR_PTR(-EAGAIN
);
821 tmp
= msq
->q_messages
.next
;
822 while (tmp
!= &msq
->q_messages
) {
823 struct msg_msg
*walk_msg
;
825 walk_msg
= list_entry(tmp
, struct msg_msg
, m_list
);
826 if (testmsg(walk_msg
, msgtyp
, mode
) &&
827 !security_msg_queue_msgrcv(msq
, walk_msg
, current
,
831 if (mode
== SEARCH_LESSEQUAL
&&
832 walk_msg
->m_type
!= 1) {
834 msgtyp
= walk_msg
->m_type
- 1;
844 * Found a suitable message.
845 * Unlink it from the queue.
847 if ((msgsz
< msg
->m_ts
) && !(msgflg
& MSG_NOERROR
)) {
848 msg
= ERR_PTR(-E2BIG
);
851 list_del(&msg
->m_list
);
853 msq
->q_rtime
= get_seconds();
854 msq
->q_lrpid
= task_tgid_vnr(current
);
855 msq
->q_cbytes
-= msg
->m_ts
;
856 atomic_sub(msg
->m_ts
, &ns
->msg_bytes
);
857 atomic_dec(&ns
->msg_hdrs
);
858 ss_wakeup(&msq
->q_senders
, 0);
862 /* No message waiting. Wait for a message */
863 if (msgflg
& IPC_NOWAIT
) {
864 msg
= ERR_PTR(-ENOMSG
);
867 list_add_tail(&msr_d
.r_list
, &msq
->q_receivers
);
868 msr_d
.r_tsk
= current
;
869 msr_d
.r_msgtype
= msgtyp
;
871 if (msgflg
& MSG_NOERROR
)
872 msr_d
.r_maxsize
= INT_MAX
;
874 msr_d
.r_maxsize
= msgsz
;
875 msr_d
.r_msg
= ERR_PTR(-EAGAIN
);
876 current
->state
= TASK_INTERRUPTIBLE
;
881 /* Lockless receive, part 1:
882 * Disable preemption. We don't hold a reference to the queue
883 * and getting a reference would defeat the idea of a lockless
884 * operation, thus the code relies on rcu to guarantee the
886 * Prior to destruction, expunge_all(-EIRDM) changes r_msg.
887 * Thus if r_msg is -EAGAIN, then the queue not yet destroyed.
888 * rcu_read_lock() prevents preemption between reading r_msg
889 * and the spin_lock() inside ipc_lock_by_ptr().
893 /* Lockless receive, part 2:
894 * Wait until pipelined_send or expunge_all are outside of
895 * wake_up_process(). There is a race with exit(), see
896 * ipc/mqueue.c for the details.
898 msg
= (struct msg_msg
*)msr_d
.r_msg
;
899 while (msg
== NULL
) {
901 msg
= (struct msg_msg
*)msr_d
.r_msg
;
904 /* Lockless receive, part 3:
905 * If there is a message or an error then accept it without
908 if (msg
!= ERR_PTR(-EAGAIN
)) {
913 /* Lockless receive, part 3:
914 * Acquire the queue spinlock.
916 ipc_lock_by_ptr(&msq
->q_perm
);
919 /* Lockless receive, part 4:
920 * Repeat test after acquiring the spinlock.
922 msg
= (struct msg_msg
*)msr_d
.r_msg
;
923 if (msg
!= ERR_PTR(-EAGAIN
))
926 list_del(&msr_d
.r_list
);
927 if (signal_pending(current
)) {
928 msg
= ERR_PTR(-ERESTARTNOHAND
);
937 msgsz
= (msgsz
> msg
->m_ts
) ? msg
->m_ts
: msgsz
;
938 *pmtype
= msg
->m_type
;
939 if (store_msg(mtext
, msg
, msgsz
))
947 asmlinkage
long sys_msgrcv(int msqid
, struct msgbuf __user
*msgp
, size_t msgsz
,
948 long msgtyp
, int msgflg
)
952 err
= do_msgrcv(msqid
, &mtype
, msgp
->mtext
, msgsz
, msgtyp
, msgflg
);
956 if (put_user(mtype
, &msgp
->mtype
))
962 #ifdef CONFIG_PROC_FS
963 static int sysvipc_msg_proc_show(struct seq_file
*s
, void *it
)
965 struct msg_queue
*msq
= it
;
968 "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
This page took 0.757115 seconds and 5 git commands to generate.