[PATCH] Fix ipc entries removal
[deliverable/linux.git] / ipc / msg.c
1 /*
2 * linux/ipc/msg.c
3 * Copyright (C) 1992 Krishna Balasubramanian
4 *
5 * Removed all the remaining kerneld mess
6 * Catch the -EFAULT stuff properly
7 * Use GFP_KERNEL for messages as in 1.2
8 * Fixed up the unchecked user space derefs
9 * Copyright (C) 1998 Alan Cox & Andi Kleen
10 *
11 * /proc/sysvipc/msg support (c) 1999 Dragos Acostachioaie <dragos@iname.com>
12 *
13 * mostly rewritten, threaded and wake-one semantics added
14 * MSGMAX limit removed, sysctl's added
15 * (c) 1999 Manfred Spraul <manfred@colorfullife.com>
16 *
17 * support for audit of ipc object properties and permission changes
18 * Dustin Kirkland <dustin.kirkland@us.ibm.com>
19 *
20 * namespaces support
21 * OpenVZ, SWsoft Inc.
22 * Pavel Emelianov <xemul@openvz.org>
23 */
24
25 #include <linux/capability.h>
26 #include <linux/slab.h>
27 #include <linux/msg.h>
28 #include <linux/spinlock.h>
29 #include <linux/init.h>
30 #include <linux/proc_fs.h>
31 #include <linux/list.h>
32 #include <linux/security.h>
33 #include <linux/sched.h>
34 #include <linux/syscalls.h>
35 #include <linux/audit.h>
36 #include <linux/seq_file.h>
37 #include <linux/mutex.h>
38 #include <linux/nsproxy.h>
39
40 #include <asm/current.h>
41 #include <asm/uaccess.h>
42 #include "util.h"
43
44 /*
45 * one msg_receiver structure for each sleeping receiver:
46 */
47 struct msg_receiver {
48 struct list_head r_list;
49 struct task_struct *r_tsk;
50
51 int r_mode;
52 long r_msgtype;
53 long r_maxsize;
54
55 volatile struct msg_msg *r_msg;
56 };
57
58 /* one msg_sender for each sleeping sender */
59 struct msg_sender {
60 struct list_head list;
61 struct task_struct *tsk;
62 };
63
64 #define SEARCH_ANY 1
65 #define SEARCH_EQUAL 2
66 #define SEARCH_NOTEQUAL 3
67 #define SEARCH_LESSEQUAL 4
68
69 static atomic_t msg_bytes = ATOMIC_INIT(0);
70 static atomic_t msg_hdrs = ATOMIC_INIT(0);
71
72 static struct ipc_ids init_msg_ids;
73
74 #define msg_ids(ns) (*((ns)->ids[IPC_MSG_IDS]))
75
76 #define msg_lock(ns, id) ((struct msg_queue*)ipc_lock(&msg_ids(ns), id))
77 #define msg_unlock(msq) ipc_unlock(&(msq)->q_perm)
78 #define msg_rmid(ns, id) ((struct msg_queue*)ipc_rmid(&msg_ids(ns), id))
79 #define msg_checkid(ns, msq, msgid) \
80 ipc_checkid(&msg_ids(ns), &msq->q_perm, msgid)
81 #define msg_buildid(ns, id, seq) \
82 ipc_buildid(&msg_ids(ns), id, seq)
83
84 static void freeque (struct ipc_namespace *ns, struct msg_queue *msq, int id);
85 static int newque (struct ipc_namespace *ns, key_t key, int msgflg);
86 #ifdef CONFIG_PROC_FS
87 static int sysvipc_msg_proc_show(struct seq_file *s, void *it);
88 #endif
89
90 static void __ipc_init __msg_init_ns(struct ipc_namespace *ns, struct ipc_ids *ids)
91 {
92 ns->ids[IPC_MSG_IDS] = ids;
93 ns->msg_ctlmax = MSGMAX;
94 ns->msg_ctlmnb = MSGMNB;
95 ns->msg_ctlmni = MSGMNI;
96 ipc_init_ids(ids, ns->msg_ctlmni);
97 }
98
99 #ifdef CONFIG_IPC_NS
100 int msg_init_ns(struct ipc_namespace *ns)
101 {
102 struct ipc_ids *ids;
103
104 ids = kmalloc(sizeof(struct ipc_ids), GFP_KERNEL);
105 if (ids == NULL)
106 return -ENOMEM;
107
108 __msg_init_ns(ns, ids);
109 return 0;
110 }
111
112 void msg_exit_ns(struct ipc_namespace *ns)
113 {
114 int i;
115 struct msg_queue *msq;
116
117 mutex_lock(&msg_ids(ns).mutex);
118 for (i = 0; i <= msg_ids(ns).max_id; i++) {
119 msq = msg_lock(ns, i);
120 if (msq == NULL)
121 continue;
122
123 freeque(ns, msq, i);
124 }
125 mutex_unlock(&msg_ids(ns).mutex);
126
127 ipc_fini_ids(ns->ids[IPC_MSG_IDS]);
128 kfree(ns->ids[IPC_MSG_IDS]);
129 ns->ids[IPC_MSG_IDS] = NULL;
130 }
131 #endif
132
133 void __init msg_init(void)
134 {
135 __msg_init_ns(&init_ipc_ns, &init_msg_ids);
136 ipc_init_proc_interface("sysvipc/msg",
137 " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n",
138 IPC_MSG_IDS, sysvipc_msg_proc_show);
139 }
140
141 static int newque (struct ipc_namespace *ns, key_t key, int msgflg)
142 {
143 struct msg_queue *msq;
144 int id, retval;
145
146 msq = ipc_rcu_alloc(sizeof(*msq));
147 if (!msq)
148 return -ENOMEM;
149
150 msq->q_perm.mode = msgflg & S_IRWXUGO;
151 msq->q_perm.key = key;
152
153 msq->q_perm.security = NULL;
154 retval = security_msg_queue_alloc(msq);
155 if (retval) {
156 ipc_rcu_putref(msq);
157 return retval;
158 }
159
160 id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
161 if (id == -1) {
162 security_msg_queue_free(msq);
163 ipc_rcu_putref(msq);
164 return -ENOSPC;
165 }
166
167 msq->q_id = msg_buildid(ns, id, msq->q_perm.seq);
168 msq->q_stime = msq->q_rtime = 0;
169 msq->q_ctime = get_seconds();
170 msq->q_cbytes = msq->q_qnum = 0;
171 msq->q_qbytes = ns->msg_ctlmnb;
172 msq->q_lspid = msq->q_lrpid = 0;
173 INIT_LIST_HEAD(&msq->q_messages);
174 INIT_LIST_HEAD(&msq->q_receivers);
175 INIT_LIST_HEAD(&msq->q_senders);
176 msg_unlock(msq);
177
178 return msq->q_id;
179 }
180
181 static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss)
182 {
183 mss->tsk = current;
184 current->state = TASK_INTERRUPTIBLE;
185 list_add_tail(&mss->list, &msq->q_senders);
186 }
187
188 static inline void ss_del(struct msg_sender *mss)
189 {
190 if (mss->list.next != NULL)
191 list_del(&mss->list);
192 }
193
194 static void ss_wakeup(struct list_head *h, int kill)
195 {
196 struct list_head *tmp;
197
198 tmp = h->next;
199 while (tmp != h) {
200 struct msg_sender *mss;
201
202 mss = list_entry(tmp, struct msg_sender, list);
203 tmp = tmp->next;
204 if (kill)
205 mss->list.next = NULL;
206 wake_up_process(mss->tsk);
207 }
208 }
209
210 static void expunge_all(struct msg_queue *msq, int res)
211 {
212 struct list_head *tmp;
213
214 tmp = msq->q_receivers.next;
215 while (tmp != &msq->q_receivers) {
216 struct msg_receiver *msr;
217
218 msr = list_entry(tmp, struct msg_receiver, r_list);
219 tmp = tmp->next;
220 msr->r_msg = NULL;
221 wake_up_process(msr->r_tsk);
222 smp_mb();
223 msr->r_msg = ERR_PTR(res);
224 }
225 }
226
227 /*
228 * freeque() wakes up waiters on the sender and receiver waiting queue,
229 * removes the message queue from message queue ID
230 * array, and cleans up all the messages associated with this queue.
231 *
232 * msg_ids.mutex and the spinlock for this message queue is hold
233 * before freeque() is called. msg_ids.mutex remains locked on exit.
234 */
235 static void freeque(struct ipc_namespace *ns, struct msg_queue *msq, int id)
236 {
237 struct list_head *tmp;
238
239 expunge_all(msq, -EIDRM);
240 ss_wakeup(&msq->q_senders, 1);
241 msq = msg_rmid(ns, id);
242 msg_unlock(msq);
243
244 tmp = msq->q_messages.next;
245 while (tmp != &msq->q_messages) {
246 struct msg_msg *msg = list_entry(tmp, struct msg_msg, m_list);
247
248 tmp = tmp->next;
249 atomic_dec(&msg_hdrs);
250 free_msg(msg);
251 }
252 atomic_sub(msq->q_cbytes, &msg_bytes);
253 security_msg_queue_free(msq);
254 ipc_rcu_putref(msq);
255 }
256
257 asmlinkage long sys_msgget(key_t key, int msgflg)
258 {
259 struct msg_queue *msq;
260 int id, ret = -EPERM;
261 struct ipc_namespace *ns;
262
263 ns = current->nsproxy->ipc_ns;
264
265 mutex_lock(&msg_ids(ns).mutex);
266 if (key == IPC_PRIVATE)
267 ret = newque(ns, key, msgflg);
268 else if ((id = ipc_findkey(&msg_ids(ns), key)) == -1) { /* key not used */
269 if (!(msgflg & IPC_CREAT))
270 ret = -ENOENT;
271 else
272 ret = newque(ns, key, msgflg);
273 } else if (msgflg & IPC_CREAT && msgflg & IPC_EXCL) {
274 ret = -EEXIST;
275 } else {
276 msq = msg_lock(ns, id);
277 BUG_ON(msq == NULL);
278 if (ipcperms(&msq->q_perm, msgflg))
279 ret = -EACCES;
280 else {
281 int qid = msg_buildid(ns, id, msq->q_perm.seq);
282
283 ret = security_msg_queue_associate(msq, msgflg);
284 if (!ret)
285 ret = qid;
286 }
287 msg_unlock(msq);
288 }
289 mutex_unlock(&msg_ids(ns).mutex);
290
291 return ret;
292 }
293
294 static inline unsigned long
295 copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version)
296 {
297 switch(version) {
298 case IPC_64:
299 return copy_to_user(buf, in, sizeof(*in));
300 case IPC_OLD:
301 {
302 struct msqid_ds out;
303
304 memset(&out, 0, sizeof(out));
305
306 ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm);
307
308 out.msg_stime = in->msg_stime;
309 out.msg_rtime = in->msg_rtime;
310 out.msg_ctime = in->msg_ctime;
311
312 if (in->msg_cbytes > USHRT_MAX)
313 out.msg_cbytes = USHRT_MAX;
314 else
315 out.msg_cbytes = in->msg_cbytes;
316 out.msg_lcbytes = in->msg_cbytes;
317
318 if (in->msg_qnum > USHRT_MAX)
319 out.msg_qnum = USHRT_MAX;
320 else
321 out.msg_qnum = in->msg_qnum;
322
323 if (in->msg_qbytes > USHRT_MAX)
324 out.msg_qbytes = USHRT_MAX;
325 else
326 out.msg_qbytes = in->msg_qbytes;
327 out.msg_lqbytes = in->msg_qbytes;
328
329 out.msg_lspid = in->msg_lspid;
330 out.msg_lrpid = in->msg_lrpid;
331
332 return copy_to_user(buf, &out, sizeof(out));
333 }
334 default:
335 return -EINVAL;
336 }
337 }
338
339 struct msq_setbuf {
340 unsigned long qbytes;
341 uid_t uid;
342 gid_t gid;
343 mode_t mode;
344 };
345
346 static inline unsigned long
347 copy_msqid_from_user(struct msq_setbuf *out, void __user *buf, int version)
348 {
349 switch(version) {
350 case IPC_64:
351 {
352 struct msqid64_ds tbuf;
353
354 if (copy_from_user(&tbuf, buf, sizeof(tbuf)))
355 return -EFAULT;
356
357 out->qbytes = tbuf.msg_qbytes;
358 out->uid = tbuf.msg_perm.uid;
359 out->gid = tbuf.msg_perm.gid;
360 out->mode = tbuf.msg_perm.mode;
361
362 return 0;
363 }
364 case IPC_OLD:
365 {
366 struct msqid_ds tbuf_old;
367
368 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
369 return -EFAULT;
370
371 out->uid = tbuf_old.msg_perm.uid;
372 out->gid = tbuf_old.msg_perm.gid;
373 out->mode = tbuf_old.msg_perm.mode;
374
375 if (tbuf_old.msg_qbytes == 0)
376 out->qbytes = tbuf_old.msg_lqbytes;
377 else
378 out->qbytes = tbuf_old.msg_qbytes;
379
380 return 0;
381 }
382 default:
383 return -EINVAL;
384 }
385 }
386
387 asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf)
388 {
389 struct kern_ipc_perm *ipcp;
390 struct msq_setbuf setbuf;
391 struct msg_queue *msq;
392 int err, version;
393 struct ipc_namespace *ns;
394
395 if (msqid < 0 || cmd < 0)
396 return -EINVAL;
397
398 version = ipc_parse_version(&cmd);
399 ns = current->nsproxy->ipc_ns;
400
401 switch (cmd) {
402 case IPC_INFO:
403 case MSG_INFO:
404 {
405 struct msginfo msginfo;
406 int max_id;
407
408 if (!buf)
409 return -EFAULT;
410 /*
411 * We must not return kernel stack data.
412 * due to padding, it's not enough
413 * to set all member fields.
414 */
415 err = security_msg_queue_msgctl(NULL, cmd);
416 if (err)
417 return err;
418
419 memset(&msginfo, 0, sizeof(msginfo));
420 msginfo.msgmni = ns->msg_ctlmni;
421 msginfo.msgmax = ns->msg_ctlmax;
422 msginfo.msgmnb = ns->msg_ctlmnb;
423 msginfo.msgssz = MSGSSZ;
424 msginfo.msgseg = MSGSEG;
425 mutex_lock(&msg_ids(ns).mutex);
426 if (cmd == MSG_INFO) {
427 msginfo.msgpool = msg_ids(ns).in_use;
428 msginfo.msgmap = atomic_read(&msg_hdrs);
429 msginfo.msgtql = atomic_read(&msg_bytes);
430 } else {
431 msginfo.msgmap = MSGMAP;
432 msginfo.msgpool = MSGPOOL;
433 msginfo.msgtql = MSGTQL;
434 }
435 max_id = msg_ids(ns).max_id;
436 mutex_unlock(&msg_ids(ns).mutex);
437 if (copy_to_user(buf, &msginfo, sizeof(struct msginfo)))
438 return -EFAULT;
439 return (max_id < 0) ? 0 : max_id;
440 }
441 case MSG_STAT:
442 case IPC_STAT:
443 {
444 struct msqid64_ds tbuf;
445 int success_return;
446
447 if (!buf)
448 return -EFAULT;
449 if (cmd == MSG_STAT && msqid >= msg_ids(ns).entries->size)
450 return -EINVAL;
451
452 memset(&tbuf, 0, sizeof(tbuf));
453
454 msq = msg_lock(ns, msqid);
455 if (msq == NULL)
456 return -EINVAL;
457
458 if (cmd == MSG_STAT) {
459 success_return = msg_buildid(ns, msqid, msq->q_perm.seq);
460 } else {
461 err = -EIDRM;
462 if (msg_checkid(ns, msq, msqid))
463 goto out_unlock;
464 success_return = 0;
465 }
466 err = -EACCES;
467 if (ipcperms(&msq->q_perm, S_IRUGO))
468 goto out_unlock;
469
470 err = security_msg_queue_msgctl(msq, cmd);
471 if (err)
472 goto out_unlock;
473
474 kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm);
475 tbuf.msg_stime = msq->q_stime;
476 tbuf.msg_rtime = msq->q_rtime;
477 tbuf.msg_ctime = msq->q_ctime;
478 tbuf.msg_cbytes = msq->q_cbytes;
479 tbuf.msg_qnum = msq->q_qnum;
480 tbuf.msg_qbytes = msq->q_qbytes;
481 tbuf.msg_lspid = msq->q_lspid;
482 tbuf.msg_lrpid = msq->q_lrpid;
483 msg_unlock(msq);
484 if (copy_msqid_to_user(buf, &tbuf, version))
485 return -EFAULT;
486 return success_return;
487 }
488 case IPC_SET:
489 if (!buf)
490 return -EFAULT;
491 if (copy_msqid_from_user(&setbuf, buf, version))
492 return -EFAULT;
493 break;
494 case IPC_RMID:
495 break;
496 default:
497 return -EINVAL;
498 }
499
500 mutex_lock(&msg_ids(ns).mutex);
501 msq = msg_lock(ns, msqid);
502 err = -EINVAL;
503 if (msq == NULL)
504 goto out_up;
505
506 err = -EIDRM;
507 if (msg_checkid(ns, msq, msqid))
508 goto out_unlock_up;
509 ipcp = &msq->q_perm;
510
511 err = audit_ipc_obj(ipcp);
512 if (err)
513 goto out_unlock_up;
514 if (cmd==IPC_SET) {
515 err = audit_ipc_set_perm(setbuf.qbytes, setbuf.uid, setbuf.gid,
516 setbuf.mode);
517 if (err)
518 goto out_unlock_up;
519 }
520
521 err = -EPERM;
522 if (current->euid != ipcp->cuid &&
523 current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN))
524 /* We _could_ check for CAP_CHOWN above, but we don't */
525 goto out_unlock_up;
526
527 err = security_msg_queue_msgctl(msq, cmd);
528 if (err)
529 goto out_unlock_up;
530
531 switch (cmd) {
532 case IPC_SET:
533 {
534 err = -EPERM;
535 if (setbuf.qbytes > ns->msg_ctlmnb && !capable(CAP_SYS_RESOURCE))
536 goto out_unlock_up;
537
538 msq->q_qbytes = setbuf.qbytes;
539
540 ipcp->uid = setbuf.uid;
541 ipcp->gid = setbuf.gid;
542 ipcp->mode = (ipcp->mode & ~S_IRWXUGO) |
543 (S_IRWXUGO & setbuf.mode);
544 msq->q_ctime = get_seconds();
545 /* sleeping receivers might be excluded by
546 * stricter permissions.
547 */
548 expunge_all(msq, -EAGAIN);
549 /* sleeping senders might be able to send
550 * due to a larger queue size.
551 */
552 ss_wakeup(&msq->q_senders, 0);
553 msg_unlock(msq);
554 break;
555 }
556 case IPC_RMID:
557 freeque(ns, msq, msqid);
558 break;
559 }
560 err = 0;
561 out_up:
562 mutex_unlock(&msg_ids(ns).mutex);
563 return err;
564 out_unlock_up:
565 msg_unlock(msq);
566 goto out_up;
567 out_unlock:
568 msg_unlock(msq);
569 return err;
570 }
571
572 static int testmsg(struct msg_msg *msg, long type, int mode)
573 {
574 switch(mode)
575 {
576 case SEARCH_ANY:
577 return 1;
578 case SEARCH_LESSEQUAL:
579 if (msg->m_type <=type)
580 return 1;
581 break;
582 case SEARCH_EQUAL:
583 if (msg->m_type == type)
584 return 1;
585 break;
586 case SEARCH_NOTEQUAL:
587 if (msg->m_type != type)
588 return 1;
589 break;
590 }
591 return 0;
592 }
593
594 static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
595 {
596 struct list_head *tmp;
597
598 tmp = msq->q_receivers.next;
599 while (tmp != &msq->q_receivers) {
600 struct msg_receiver *msr;
601
602 msr = list_entry(tmp, struct msg_receiver, r_list);
603 tmp = tmp->next;
604 if (testmsg(msg, msr->r_msgtype, msr->r_mode) &&
605 !security_msg_queue_msgrcv(msq, msg, msr->r_tsk,
606 msr->r_msgtype, msr->r_mode)) {
607
608 list_del(&msr->r_list);
609 if (msr->r_maxsize < msg->m_ts) {
610 msr->r_msg = NULL;
611 wake_up_process(msr->r_tsk);
612 smp_mb();
613 msr->r_msg = ERR_PTR(-E2BIG);
614 } else {
615 msr->r_msg = NULL;
616 msq->q_lrpid = msr->r_tsk->pid;
617 msq->q_rtime = get_seconds();
618 wake_up_process(msr->r_tsk);
619 smp_mb();
620 msr->r_msg = msg;
621
622 return 1;
623 }
624 }
625 }
626 return 0;
627 }
628
629 asmlinkage long
630 sys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz, int msgflg)
631 {
632 struct msg_queue *msq;
633 struct msg_msg *msg;
634 long mtype;
635 int err;
636 struct ipc_namespace *ns;
637
638 ns = current->nsproxy->ipc_ns;
639
640 if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0)
641 return -EINVAL;
642 if (get_user(mtype, &msgp->mtype))
643 return -EFAULT;
644 if (mtype < 1)
645 return -EINVAL;
646
647 msg = load_msg(msgp->mtext, msgsz);
648 if (IS_ERR(msg))
649 return PTR_ERR(msg);
650
651 msg->m_type = mtype;
652 msg->m_ts = msgsz;
653
654 msq = msg_lock(ns, msqid);
655 err = -EINVAL;
656 if (msq == NULL)
657 goto out_free;
658
659 err= -EIDRM;
660 if (msg_checkid(ns, msq, msqid))
661 goto out_unlock_free;
662
663 for (;;) {
664 struct msg_sender s;
665
666 err = -EACCES;
667 if (ipcperms(&msq->q_perm, S_IWUGO))
668 goto out_unlock_free;
669
670 err = security_msg_queue_msgsnd(msq, msg, msgflg);
671 if (err)
672 goto out_unlock_free;
673
674 if (msgsz + msq->q_cbytes <= msq->q_qbytes &&
675 1 + msq->q_qnum <= msq->q_qbytes) {
676 break;
677 }
678
679 /* queue full, wait: */
680 if (msgflg & IPC_NOWAIT) {
681 err = -EAGAIN;
682 goto out_unlock_free;
683 }
684 ss_add(msq, &s);
685 ipc_rcu_getref(msq);
686 msg_unlock(msq);
687 schedule();
688
689 ipc_lock_by_ptr(&msq->q_perm);
690 ipc_rcu_putref(msq);
691 if (msq->q_perm.deleted) {
692 err = -EIDRM;
693 goto out_unlock_free;
694 }
695 ss_del(&s);
696
697 if (signal_pending(current)) {
698 err = -ERESTARTNOHAND;
699 goto out_unlock_free;
700 }
701 }
702
703 msq->q_lspid = current->tgid;
704 msq->q_stime = get_seconds();
705
706 if (!pipelined_send(msq, msg)) {
707 /* noone is waiting for this message, enqueue it */
708 list_add_tail(&msg->m_list, &msq->q_messages);
709 msq->q_cbytes += msgsz;
710 msq->q_qnum++;
711 atomic_add(msgsz, &msg_bytes);
712 atomic_inc(&msg_hdrs);
713 }
714
715 err = 0;
716 msg = NULL;
717
718 out_unlock_free:
719 msg_unlock(msq);
720 out_free:
721 if (msg != NULL)
722 free_msg(msg);
723 return err;
724 }
725
726 static inline int convert_mode(long *msgtyp, int msgflg)
727 {
728 /*
729 * find message of correct type.
730 * msgtyp = 0 => get first.
731 * msgtyp > 0 => get first message of matching type.
732 * msgtyp < 0 => get message with least type must be < abs(msgtype).
733 */
734 if (*msgtyp == 0)
735 return SEARCH_ANY;
736 if (*msgtyp < 0) {
737 *msgtyp = -*msgtyp;
738 return SEARCH_LESSEQUAL;
739 }
740 if (msgflg & MSG_EXCEPT)
741 return SEARCH_NOTEQUAL;
742 return SEARCH_EQUAL;
743 }
744
745 asmlinkage long sys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz,
746 long msgtyp, int msgflg)
747 {
748 struct msg_queue *msq;
749 struct msg_msg *msg;
750 int mode;
751 struct ipc_namespace *ns;
752
753 if (msqid < 0 || (long) msgsz < 0)
754 return -EINVAL;
755 mode = convert_mode(&msgtyp, msgflg);
756 ns = current->nsproxy->ipc_ns;
757
758 msq = msg_lock(ns, msqid);
759 if (msq == NULL)
760 return -EINVAL;
761
762 msg = ERR_PTR(-EIDRM);
763 if (msg_checkid(ns, msq, msqid))
764 goto out_unlock;
765
766 for (;;) {
767 struct msg_receiver msr_d;
768 struct list_head *tmp;
769
770 msg = ERR_PTR(-EACCES);
771 if (ipcperms(&msq->q_perm, S_IRUGO))
772 goto out_unlock;
773
774 msg = ERR_PTR(-EAGAIN);
775 tmp = msq->q_messages.next;
776 while (tmp != &msq->q_messages) {
777 struct msg_msg *walk_msg;
778
779 walk_msg = list_entry(tmp, struct msg_msg, m_list);
780 if (testmsg(walk_msg, msgtyp, mode) &&
781 !security_msg_queue_msgrcv(msq, walk_msg, current,
782 msgtyp, mode)) {
783
784 msg = walk_msg;
785 if (mode == SEARCH_LESSEQUAL &&
786 walk_msg->m_type != 1) {
787 msg = walk_msg;
788 msgtyp = walk_msg->m_type - 1;
789 } else {
790 msg = walk_msg;
791 break;
792 }
793 }
794 tmp = tmp->next;
795 }
796 if (!IS_ERR(msg)) {
797 /*
798 * Found a suitable message.
799 * Unlink it from the queue.
800 */
801 if ((msgsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) {
802 msg = ERR_PTR(-E2BIG);
803 goto out_unlock;
804 }
805 list_del(&msg->m_list);
806 msq->q_qnum--;
807 msq->q_rtime = get_seconds();
808 msq->q_lrpid = current->tgid;
809 msq->q_cbytes -= msg->m_ts;
810 atomic_sub(msg->m_ts, &msg_bytes);
811 atomic_dec(&msg_hdrs);
812 ss_wakeup(&msq->q_senders, 0);
813 msg_unlock(msq);
814 break;
815 }
816 /* No message waiting. Wait for a message */
817 if (msgflg & IPC_NOWAIT) {
818 msg = ERR_PTR(-ENOMSG);
819 goto out_unlock;
820 }
821 list_add_tail(&msr_d.r_list, &msq->q_receivers);
822 msr_d.r_tsk = current;
823 msr_d.r_msgtype = msgtyp;
824 msr_d.r_mode = mode;
825 if (msgflg & MSG_NOERROR)
826 msr_d.r_maxsize = INT_MAX;
827 else
828 msr_d.r_maxsize = msgsz;
829 msr_d.r_msg = ERR_PTR(-EAGAIN);
830 current->state = TASK_INTERRUPTIBLE;
831 msg_unlock(msq);
832
833 schedule();
834
835 /* Lockless receive, part 1:
836 * Disable preemption. We don't hold a reference to the queue
837 * and getting a reference would defeat the idea of a lockless
838 * operation, thus the code relies on rcu to guarantee the
839 * existance of msq:
840 * Prior to destruction, expunge_all(-EIRDM) changes r_msg.
841 * Thus if r_msg is -EAGAIN, then the queue not yet destroyed.
842 * rcu_read_lock() prevents preemption between reading r_msg
843 * and the spin_lock() inside ipc_lock_by_ptr().
844 */
845 rcu_read_lock();
846
847 /* Lockless receive, part 2:
848 * Wait until pipelined_send or expunge_all are outside of
849 * wake_up_process(). There is a race with exit(), see
850 * ipc/mqueue.c for the details.
851 */
852 msg = (struct msg_msg*)msr_d.r_msg;
853 while (msg == NULL) {
854 cpu_relax();
855 msg = (struct msg_msg *)msr_d.r_msg;
856 }
857
858 /* Lockless receive, part 3:
859 * If there is a message or an error then accept it without
860 * locking.
861 */
862 if (msg != ERR_PTR(-EAGAIN)) {
863 rcu_read_unlock();
864 break;
865 }
866
867 /* Lockless receive, part 3:
868 * Acquire the queue spinlock.
869 */
870 ipc_lock_by_ptr(&msq->q_perm);
871 rcu_read_unlock();
872
873 /* Lockless receive, part 4:
874 * Repeat test after acquiring the spinlock.
875 */
876 msg = (struct msg_msg*)msr_d.r_msg;
877 if (msg != ERR_PTR(-EAGAIN))
878 goto out_unlock;
879
880 list_del(&msr_d.r_list);
881 if (signal_pending(current)) {
882 msg = ERR_PTR(-ERESTARTNOHAND);
883 out_unlock:
884 msg_unlock(msq);
885 break;
886 }
887 }
888 if (IS_ERR(msg))
889 return PTR_ERR(msg);
890
891 msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz;
892 if (put_user (msg->m_type, &msgp->mtype) ||
893 store_msg(msgp->mtext, msg, msgsz)) {
894 msgsz = -EFAULT;
895 }
896 free_msg(msg);
897
898 return msgsz;
899 }
900
901 #ifdef CONFIG_PROC_FS
902 static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
903 {
904 struct msg_queue *msq = it;
905
906 return seq_printf(s,
907 "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
908 msq->q_perm.key,
909 msq->q_id,
910 msq->q_perm.mode,
911 msq->q_cbytes,
912 msq->q_qnum,
913 msq->q_lspid,
914 msq->q_lrpid,
915 msq->q_perm.uid,
916 msq->q_perm.gid,
917 msq->q_perm.cuid,
918 msq->q_perm.cgid,
919 msq->q_stime,
920 msq->q_rtime,
921 msq->q_ctime);
922 }
923 #endif
This page took 0.047804 seconds and 5 git commands to generate.