2 * linux/fs/9p/trans_fd.c
4 * Fd transport layer. Includes deprecated socket layer.
6 * Copyright (C) 2006 by Russ Cox <rsc@swtch.com>
7 * Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
8 * Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
9 * Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2
13 * as published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to:
22 * Free Software Foundation
23 * 51 Franklin Street, Fifth Floor
24 * Boston, MA 02111-1301 USA
29 #include <linux/module.h>
30 #include <linux/net.h>
31 #include <linux/ipv6.h>
32 #include <linux/kthread.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
36 #include <linux/uaccess.h>
37 #include <linux/inet.h>
38 #include <linux/idr.h>
39 #include <linux/file.h>
40 #include <linux/parser.h>
41 #include <net/9p/9p.h>
42 #include <net/9p/client.h>
43 #include <net/9p/transport.h>
46 #define MAX_SOCK_BUF (64*1024)
48 #define MAXPOLLWADDR 2
51 * struct p9_fd_opts - per-transport options
52 * @rfd: file descriptor for reading (trans=fd)
53 * @wfd: file descriptor for writing (trans=fd)
54 * @port: port to connect to (trans=tcp)
65 * struct p9_trans_fd - transport state
66 * @rd: reference to file to read from
67 * @wr: reference of file to write to
68 * @conn: connection state reference
79 * Option Parsing (code inspired by NFS code)
80 * - a little lazy - parse all fd-transport options
84 /* Options that take integer arguments */
85 Opt_port
, Opt_rfdno
, Opt_wfdno
, Opt_err
,
88 static const match_table_t tokens
= {
89 {Opt_port
, "port=%u"},
90 {Opt_rfdno
, "rfdno=%u"},
91 {Opt_wfdno
, "wfdno=%u"},
96 Rworksched
= 1, /* read work scheduled or running */
97 Rpending
= 2, /* can read */
98 Wworksched
= 4, /* write work scheduled or running */
99 Wpending
= 8, /* can write */
109 typedef void (*p9_conn_req_callback
)(struct p9_req
*req
, void *a
);
112 * struct p9_req - fd mux encoding of an rpc transaction
113 * @lock: protects req_list
114 * @tag: numeric tag for rpc transaction
115 * @tcall: request &p9_fcall structure
116 * @rcall: response &p9_fcall structure
118 * @cb: callback for when response is received
119 * @cba: argument to pass to callback
120 * @flush: flag to indicate RPC has been flushed
121 * @req_list: list link for higher level objects to chain requests
122 * @m: connection this request was issued on
123 * @wqueue: wait queue that client is blocked on for this rpc
130 struct p9_fcall
*tcall
;
131 struct p9_fcall
*rcall
;
133 p9_conn_req_callback cb
;
136 struct list_head req_list
;
138 wait_queue_head_t wqueue
;
141 struct p9_poll_wait
{
142 struct p9_conn
*conn
;
144 wait_queue_head_t
*wait_addr
;
148 * struct p9_conn - fd mux connection state information
149 * @lock: protects mux_list (?)
150 * @mux_list: list link for mux to manage multiple connections (?)
151 * @client: reference to client instance for this connection
152 * @tagpool: id accounting for transactions
154 * @req_list: accounting for requests which have been sent
155 * @unsent_req_list: accounting for requests that haven't been sent
156 * @rcall: current response &p9_fcall structure
157 * @rpos: read position in current frame
158 * @rbuf: current read buffer
159 * @wpos: write position for current frame
160 * @wsize: amount of data to write for current frame
161 * @wbuf: current write buffer
162 * @poll_wait: array of wait_q's for various worker threads
165 * @rq: current read work
166 * @wq: current write work
172 spinlock_t lock
; /* protect lock structure */
173 struct list_head mux_list
;
174 struct p9_client
*client
;
175 struct p9_idpool
*tagpool
;
177 struct list_head req_list
;
178 struct list_head unsent_req_list
;
179 struct p9_fcall
*rcall
;
185 struct list_head poll_pending_link
;
186 struct p9_poll_wait poll_wait
[MAXPOLLWADDR
];
188 struct work_struct rq
;
189 struct work_struct wq
;
190 unsigned long wsched
;
193 static DEFINE_SPINLOCK(p9_poll_lock
);
194 static LIST_HEAD(p9_poll_pending_list
);
195 static struct workqueue_struct
*p9_mux_wq
;
196 static struct task_struct
*p9_poll_task
;
198 static u16
p9_mux_get_tag(struct p9_conn
*m
)
202 tag
= p9_idpool_get(m
->tagpool
);
209 static void p9_mux_put_tag(struct p9_conn
*m
, u16 tag
)
211 if (tag
!= P9_NOTAG
&& p9_idpool_check(tag
, m
->tagpool
))
212 p9_idpool_put(tag
, m
->tagpool
);
215 static void p9_mux_poll_stop(struct p9_conn
*m
)
220 for (i
= 0; i
< ARRAY_SIZE(m
->poll_wait
); i
++) {
221 struct p9_poll_wait
*pwait
= &m
->poll_wait
[i
];
223 if (pwait
->wait_addr
) {
224 remove_wait_queue(pwait
->wait_addr
, &pwait
->wait
);
225 pwait
->wait_addr
= NULL
;
229 spin_lock_irqsave(&p9_poll_lock
, flags
);
230 list_del_init(&m
->poll_pending_link
);
231 spin_unlock_irqrestore(&p9_poll_lock
, flags
);
235 * p9_conn_cancel - cancel all pending requests with error
241 void p9_conn_cancel(struct p9_conn
*m
, int err
)
243 struct p9_req
*req
, *rtmp
;
244 LIST_HEAD(cancel_list
);
246 P9_DPRINTK(P9_DEBUG_ERROR
, "mux %p err %d\n", m
, err
);
249 list_for_each_entry_safe(req
, rtmp
, &m
->req_list
, req_list
) {
250 list_move(&req
->req_list
, &cancel_list
);
252 list_for_each_entry_safe(req
, rtmp
, &m
->unsent_req_list
, req_list
) {
253 list_move(&req
->req_list
, &cancel_list
);
255 spin_unlock(&m
->lock
);
257 list_for_each_entry_safe(req
, rtmp
, &cancel_list
, req_list
) {
258 list_del(&req
->req_list
);
263 (*req
->cb
) (req
, req
->cba
);
269 static void process_request(struct p9_conn
*m
, struct p9_req
*req
)
272 struct p9_str
*ename
;
274 if (!req
->err
&& req
->rcall
->id
== P9_RERROR
) {
275 ecode
= req
->rcall
->params
.rerror
.errno
;
276 ename
= &req
->rcall
->params
.rerror
.error
;
278 P9_DPRINTK(P9_DEBUG_MUX
, "Rerror %.*s\n", ename
->len
,
285 req
->err
= p9_errstr2errno(ename
->str
, ename
->len
);
287 /* string match failed */
289 PRINT_FCALL_ERROR("unknown error", req
->rcall
);
290 req
->err
= -ESERVERFAULT
;
293 } else if (req
->tcall
&& req
->rcall
->id
!= req
->tcall
->id
+ 1) {
294 P9_DPRINTK(P9_DEBUG_ERROR
,
295 "fcall mismatch: expected %d, got %d\n",
296 req
->tcall
->id
+ 1, req
->rcall
->id
);
303 p9_fd_poll(struct p9_client
*client
, struct poll_table_struct
*pt
)
306 struct p9_trans_fd
*ts
= NULL
;
308 if (client
&& client
->status
== Connected
)
314 if (!ts
->rd
->f_op
|| !ts
->rd
->f_op
->poll
)
317 if (!ts
->wr
->f_op
|| !ts
->wr
->f_op
->poll
)
320 ret
= ts
->rd
->f_op
->poll(ts
->rd
, pt
);
324 if (ts
->rd
!= ts
->wr
) {
325 n
= ts
->wr
->f_op
->poll(ts
->wr
, pt
);
328 ret
= (ret
& ~POLLOUT
) | (n
& ~POLLIN
);
335 * p9_fd_read- read from a fd
336 * @client: client instance
337 * @v: buffer to receive data into
338 * @len: size of receive buffer
342 static int p9_fd_read(struct p9_client
*client
, void *v
, int len
)
345 struct p9_trans_fd
*ts
= NULL
;
347 if (client
&& client
->status
!= Disconnected
)
353 if (!(ts
->rd
->f_flags
& O_NONBLOCK
))
354 P9_DPRINTK(P9_DEBUG_ERROR
, "blocking read ...\n");
356 ret
= kernel_read(ts
->rd
, ts
->rd
->f_pos
, v
, len
);
357 if (ret
<= 0 && ret
!= -ERESTARTSYS
&& ret
!= -EAGAIN
)
358 client
->status
= Disconnected
;
363 * p9_read_work - called when there is some data to be read from a transport
364 * @work: container of work to be done
368 static void p9_read_work(struct work_struct
*work
)
372 struct p9_req
*req
, *rptr
, *rreq
;
373 struct p9_fcall
*rcall
;
376 m
= container_of(work
, struct p9_conn
, rq
);
382 P9_DPRINTK(P9_DEBUG_MUX
, "start mux %p pos %d\n", m
, m
->rpos
);
386 kmalloc(sizeof(struct p9_fcall
) + m
->client
->msize
,
393 m
->rbuf
= (char *)m
->rcall
+ sizeof(struct p9_fcall
);
397 clear_bit(Rpending
, &m
->wsched
);
398 err
= p9_fd_read(m
->client
, m
->rbuf
+ m
->rpos
,
399 m
->client
->msize
- m
->rpos
);
400 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p got %d bytes\n", m
, err
);
401 if (err
== -EAGAIN
) {
402 clear_bit(Rworksched
, &m
->wsched
);
410 while (m
->rpos
> 4) {
411 n
= le32_to_cpu(*(__le32
*) m
->rbuf
);
412 if (n
>= m
->client
->msize
) {
413 P9_DPRINTK(P9_DEBUG_ERROR
,
414 "requested packet size too big: %d\n", n
);
423 p9_deserialize_fcall(m
->rbuf
, n
, m
->rcall
, m
->client
->dotu
);
427 #ifdef CONFIG_NET_9P_DEBUG
428 if ((p9_debug_level
&P9_DEBUG_FCALL
) == P9_DEBUG_FCALL
) {
431 p9_printfcall(buf
, sizeof(buf
), m
->rcall
,
433 printk(KERN_NOTICE
">>> %p %s\n", m
, buf
);
440 m
->rcall
= kmalloc(sizeof(struct p9_fcall
) +
441 m
->client
->msize
, GFP_KERNEL
);
447 m
->rbuf
= (char *)m
->rcall
+ sizeof(struct p9_fcall
);
448 memmove(m
->rbuf
, rbuf
+ n
, m
->rpos
- n
);
456 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p fcall id %d tag %d\n", m
,
457 rcall
->id
, rcall
->tag
);
461 list_for_each_entry_safe(rreq
, rptr
, &m
->req_list
, req_list
) {
462 if (rreq
->tag
== rcall
->tag
) {
464 if (req
->flush
!= Flushing
)
465 list_del(&req
->req_list
);
469 spin_unlock(&m
->lock
);
473 process_request(m
, req
);
475 if (req
->flush
!= Flushing
) {
477 (*req
->cb
) (req
, req
->cba
);
482 if (err
>= 0 && rcall
->id
!= P9_RFLUSH
)
483 P9_DPRINTK(P9_DEBUG_ERROR
,
484 "unexpected response mux %p id %d tag %d\n",
485 m
, rcall
->id
, rcall
->tag
);
490 if (!list_empty(&m
->req_list
)) {
491 if (test_and_clear_bit(Rpending
, &m
->wsched
))
494 n
= p9_fd_poll(m
->client
, NULL
);
497 P9_DPRINTK(P9_DEBUG_MUX
, "schedule read work %p\n", m
);
498 queue_work(p9_mux_wq
, &m
->rq
);
500 clear_bit(Rworksched
, &m
->wsched
);
502 clear_bit(Rworksched
, &m
->wsched
);
507 p9_conn_cancel(m
, err
);
508 clear_bit(Rworksched
, &m
->wsched
);
512 * p9_fd_write - write to a socket
513 * @client: client instance
514 * @v: buffer to send data from
515 * @len: size of send buffer
519 static int p9_fd_write(struct p9_client
*client
, void *v
, int len
)
523 struct p9_trans_fd
*ts
= NULL
;
525 if (client
&& client
->status
!= Disconnected
)
531 if (!(ts
->wr
->f_flags
& O_NONBLOCK
))
532 P9_DPRINTK(P9_DEBUG_ERROR
, "blocking write ...\n");
536 /* The cast to a user pointer is valid due to the set_fs() */
537 ret
= vfs_write(ts
->wr
, (void __user
*)v
, len
, &ts
->wr
->f_pos
);
540 if (ret
<= 0 && ret
!= -ERESTARTSYS
&& ret
!= -EAGAIN
)
541 client
->status
= Disconnected
;
546 * p9_write_work - called when a transport can send some data
547 * @work: container for work to be done
551 static void p9_write_work(struct work_struct
*work
)
557 m
= container_of(work
, struct p9_conn
, wq
);
560 clear_bit(Wworksched
, &m
->wsched
);
565 if (list_empty(&m
->unsent_req_list
)) {
566 clear_bit(Wworksched
, &m
->wsched
);
572 req
= list_entry(m
->unsent_req_list
.next
, struct p9_req
,
574 list_move_tail(&req
->req_list
, &m
->req_list
);
575 if (req
->err
== ERREQFLUSH
)
578 m
->wbuf
= req
->tcall
->sdata
;
579 m
->wsize
= req
->tcall
->size
;
581 spin_unlock(&m
->lock
);
584 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p pos %d size %d\n", m
, m
->wpos
,
586 clear_bit(Wpending
, &m
->wsched
);
587 err
= p9_fd_write(m
->client
, m
->wbuf
+ m
->wpos
, m
->wsize
- m
->wpos
);
588 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p sent %d bytes\n", m
, err
);
589 if (err
== -EAGAIN
) {
590 clear_bit(Wworksched
, &m
->wsched
);
602 if (m
->wpos
== m
->wsize
)
603 m
->wpos
= m
->wsize
= 0;
605 if (m
->wsize
== 0 && !list_empty(&m
->unsent_req_list
)) {
606 if (test_and_clear_bit(Wpending
, &m
->wsched
))
609 n
= p9_fd_poll(m
->client
, NULL
);
612 P9_DPRINTK(P9_DEBUG_MUX
, "schedule write work %p\n", m
);
613 queue_work(p9_mux_wq
, &m
->wq
);
615 clear_bit(Wworksched
, &m
->wsched
);
617 clear_bit(Wworksched
, &m
->wsched
);
622 p9_conn_cancel(m
, err
);
623 clear_bit(Wworksched
, &m
->wsched
);
626 static int p9_pollwake(wait_queue_t
*wait
, unsigned mode
, int sync
, void *key
)
628 struct p9_poll_wait
*pwait
=
629 container_of(wait
, struct p9_poll_wait
, wait
);
630 struct p9_conn
*m
= pwait
->conn
;
632 DECLARE_WAITQUEUE(dummy_wait
, p9_poll_task
);
634 spin_lock_irqsave(&p9_poll_lock
, flags
);
635 if (list_empty(&m
->poll_pending_link
))
636 list_add_tail(&m
->poll_pending_link
, &p9_poll_pending_list
);
637 spin_unlock_irqrestore(&p9_poll_lock
, flags
);
639 /* perform the default wake up operation */
640 return default_wake_function(&dummy_wait
, mode
, sync
, key
);
644 * p9_pollwait - add poll task to the wait queue
645 * @filp: file pointer being polled
646 * @wait_address: wait_q to block on
649 * called by files poll operation to add v9fs-poll task to files wait queue
653 p9_pollwait(struct file
*filp
, wait_queue_head_t
*wait_address
, poll_table
*p
)
655 struct p9_conn
*m
= container_of(p
, struct p9_conn
, pt
);
656 struct p9_poll_wait
*pwait
= NULL
;
659 for (i
= 0; i
< ARRAY_SIZE(m
->poll_wait
); i
++) {
660 if (m
->poll_wait
[i
].wait_addr
== NULL
) {
661 pwait
= &m
->poll_wait
[i
];
667 P9_DPRINTK(P9_DEBUG_ERROR
, "not enough wait_address slots\n");
672 P9_DPRINTK(P9_DEBUG_ERROR
, "no wait_address\n");
673 pwait
->wait_addr
= ERR_PTR(-EIO
);
678 pwait
->wait_addr
= wait_address
;
679 init_waitqueue_func_entry(&pwait
->wait
, p9_pollwake
);
680 add_wait_queue(wait_address
, &pwait
->wait
);
684 * p9_conn_create - allocate and initialize the per-session mux data
685 * @client: client instance
687 * Note: Creates the polling task if this is the first session.
690 static struct p9_conn
*p9_conn_create(struct p9_client
*client
)
695 P9_DPRINTK(P9_DEBUG_MUX
, "client %p msize %d\n", client
, client
->msize
);
696 m
= kzalloc(sizeof(struct p9_conn
), GFP_KERNEL
);
698 return ERR_PTR(-ENOMEM
);
700 spin_lock_init(&m
->lock
);
701 INIT_LIST_HEAD(&m
->mux_list
);
703 m
->tagpool
= p9_idpool_create();
704 if (IS_ERR(m
->tagpool
)) {
706 return ERR_PTR(-ENOMEM
);
709 INIT_LIST_HEAD(&m
->req_list
);
710 INIT_LIST_HEAD(&m
->unsent_req_list
);
711 INIT_WORK(&m
->rq
, p9_read_work
);
712 INIT_WORK(&m
->wq
, p9_write_work
);
713 INIT_LIST_HEAD(&m
->poll_pending_link
);
714 init_poll_funcptr(&m
->pt
, p9_pollwait
);
716 n
= p9_fd_poll(client
, &m
->pt
);
718 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p can read\n", m
);
719 set_bit(Rpending
, &m
->wsched
);
723 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p can write\n", m
);
724 set_bit(Wpending
, &m
->wsched
);
727 for (i
= 0; i
< ARRAY_SIZE(m
->poll_wait
); i
++) {
728 if (IS_ERR(m
->poll_wait
[i
].wait_addr
)) {
731 /* return the error code */
732 return (void *)m
->poll_wait
[i
].wait_addr
;
740 * p9_poll_mux - polls a mux and schedules read or write works if necessary
741 * @m: connection to poll
745 static void p9_poll_mux(struct p9_conn
*m
)
752 n
= p9_fd_poll(m
->client
, NULL
);
753 if (n
< 0 || n
& (POLLERR
| POLLHUP
| POLLNVAL
)) {
754 P9_DPRINTK(P9_DEBUG_MUX
, "error mux %p err %d\n", m
, n
);
757 p9_conn_cancel(m
, n
);
761 set_bit(Rpending
, &m
->wsched
);
762 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p can read\n", m
);
763 if (!test_and_set_bit(Rworksched
, &m
->wsched
)) {
764 P9_DPRINTK(P9_DEBUG_MUX
, "schedule read work %p\n", m
);
765 queue_work(p9_mux_wq
, &m
->rq
);
770 set_bit(Wpending
, &m
->wsched
);
771 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p can write\n", m
);
772 if ((m
->wsize
|| !list_empty(&m
->unsent_req_list
))
773 && !test_and_set_bit(Wworksched
, &m
->wsched
)) {
774 P9_DPRINTK(P9_DEBUG_MUX
, "schedule write work %p\n", m
);
775 queue_work(p9_mux_wq
, &m
->wq
);
781 * p9_send_request - send 9P request
782 * The function can sleep until the request is scheduled for sending.
783 * The function can be interrupted. Return from the function is not
784 * a guarantee that the request is sent successfully. Can return errors
785 * that can be retrieved by PTR_ERR macros.
788 * @tc: request to be sent
789 * @cb: callback function to call when response is received
790 * @cba: parameter to pass to the callback function
794 static struct p9_req
*p9_send_request(struct p9_conn
*m
,
796 p9_conn_req_callback cb
, void *cba
)
801 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p task %p tcall %p id %d\n", m
, current
,
804 return ERR_PTR(m
->err
);
806 req
= kmalloc(sizeof(struct p9_req
), GFP_KERNEL
);
808 return ERR_PTR(-ENOMEM
);
810 if (tc
->id
== P9_TVERSION
)
813 n
= p9_mux_get_tag(m
);
817 return ERR_PTR(-ENOMEM
);
822 #ifdef CONFIG_NET_9P_DEBUG
823 if ((p9_debug_level
&P9_DEBUG_FCALL
) == P9_DEBUG_FCALL
) {
826 p9_printfcall(buf
, sizeof(buf
), tc
, m
->client
->dotu
);
827 printk(KERN_NOTICE
"<<< %p %s\n", m
, buf
);
831 spin_lock_init(&req
->lock
);
833 init_waitqueue_head(&req
->wqueue
);
843 list_add_tail(&req
->req_list
, &m
->unsent_req_list
);
844 spin_unlock(&m
->lock
);
846 if (test_and_clear_bit(Wpending
, &m
->wsched
))
849 n
= p9_fd_poll(m
->client
, NULL
);
851 if (n
& POLLOUT
&& !test_and_set_bit(Wworksched
, &m
->wsched
))
852 queue_work(p9_mux_wq
, &m
->wq
);
857 static void p9_mux_free_request(struct p9_conn
*m
, struct p9_req
*req
)
859 p9_mux_put_tag(m
, req
->tag
);
863 static void p9_mux_flush_cb(struct p9_req
*freq
, void *a
)
867 struct p9_req
*req
, *rreq
, *rptr
;
870 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p tc %p rc %p err %d oldtag %d\n", m
,
871 freq
->tcall
, freq
->rcall
, freq
->err
,
872 freq
->tcall
->params
.tflush
.oldtag
);
875 tag
= freq
->tcall
->params
.tflush
.oldtag
;
877 list_for_each_entry_safe(rreq
, rptr
, &m
->req_list
, req_list
) {
878 if (rreq
->tag
== tag
) {
880 list_del(&req
->req_list
);
884 spin_unlock(&m
->lock
);
887 spin_lock(&req
->lock
);
888 req
->flush
= Flushed
;
889 spin_unlock(&req
->lock
);
892 (*req
->cb
) (req
, req
->cba
);
899 p9_mux_free_request(m
, freq
);
903 p9_mux_flush_request(struct p9_conn
*m
, struct p9_req
*req
)
906 struct p9_req
*rreq
, *rptr
;
908 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p req %p tag %d\n", m
, req
, req
->tag
);
910 /* if a response was received for a request, do nothing */
911 spin_lock(&req
->lock
);
912 if (req
->rcall
|| req
->err
) {
913 spin_unlock(&req
->lock
);
914 P9_DPRINTK(P9_DEBUG_MUX
,
915 "mux %p req %p response already received\n", m
, req
);
919 req
->flush
= Flushing
;
920 spin_unlock(&req
->lock
);
923 /* if the request is not sent yet, just remove it from the list */
924 list_for_each_entry_safe(rreq
, rptr
, &m
->unsent_req_list
, req_list
) {
925 if (rreq
->tag
== req
->tag
) {
926 P9_DPRINTK(P9_DEBUG_MUX
,
927 "mux %p req %p request is not sent yet\n", m
, req
);
928 list_del(&rreq
->req_list
);
929 req
->flush
= Flushed
;
930 spin_unlock(&m
->lock
);
932 (*req
->cb
) (req
, req
->cba
);
936 spin_unlock(&m
->lock
);
938 clear_thread_flag(TIF_SIGPENDING
);
939 fc
= p9_create_tflush(req
->tag
);
940 p9_send_request(m
, fc
, p9_mux_flush_cb
, m
);
944 static void p9_conn_rpc_cb(struct p9_req
*req
, void *a
)
946 P9_DPRINTK(P9_DEBUG_MUX
, "req %p arg %p\n", req
, a
);
948 if (req
->flush
!= None
&& !req
->err
)
949 req
->err
= -ERESTARTSYS
;
951 wake_up(&req
->wqueue
);
955 * p9_fd_rpc- sends 9P request and waits until a response is available.
956 * The function can be interrupted.
957 * @client: client instance
958 * @tc: request to be sent
959 * @rc: pointer where a pointer to the response is stored
964 p9_fd_rpc(struct p9_client
*client
, struct p9_fcall
*tc
, struct p9_fcall
**rc
)
966 struct p9_trans_fd
*p
= client
->trans
;
967 struct p9_conn
*m
= p
->conn
;
976 if (signal_pending(current
)) {
978 clear_thread_flag(TIF_SIGPENDING
);
981 req
= p9_send_request(m
, tc
, p9_conn_rpc_cb
, NULL
);
984 P9_DPRINTK(P9_DEBUG_MUX
, "error %d\n", err
);
988 err
= wait_event_interruptible(req
->wqueue
, req
->rcall
!= NULL
||
993 if (err
== -ERESTARTSYS
&& client
->status
== Connected
995 if (p9_mux_flush_request(m
, req
)) {
996 /* wait until we get response of the flush message */
998 clear_thread_flag(TIF_SIGPENDING
);
999 err
= wait_event_interruptible(req
->wqueue
,
1000 req
->rcall
|| req
->err
);
1001 } while (!req
->rcall
&& !req
->err
&&
1002 err
== -ERESTARTSYS
&&
1003 client
->status
== Connected
&& !m
->err
);
1011 spin_lock_irqsave(¤t
->sighand
->siglock
, flags
);
1012 recalc_sigpending();
1013 spin_unlock_irqrestore(¤t
->sighand
->siglock
, flags
);
1021 p9_mux_free_request(m
, req
);
1029 * parse_options - parse mount options into session structure
1030 * @options: options string passed from mount
1031 * @opts: transport-specific structure to parse options into
1033 * Returns 0 upon success, -ERRNO upon failure
1036 static int parse_opts(char *params
, struct p9_fd_opts
*opts
)
1039 substring_t args
[MAX_OPT_ARGS
];
1044 opts
->port
= P9_PORT
;
1051 options
= kstrdup(params
, GFP_KERNEL
);
1053 P9_DPRINTK(P9_DEBUG_ERROR
,
1054 "failed to allocate copy of option string\n");
1058 while ((p
= strsep(&options
, ",")) != NULL
) {
1063 token
= match_token(p
, tokens
, args
);
1064 r
= match_int(&args
[0], &option
);
1066 P9_DPRINTK(P9_DEBUG_ERROR
,
1067 "integer field, but no integer?\n");
1073 opts
->port
= option
;
1089 static int p9_fd_open(struct p9_client
*client
, int rfd
, int wfd
)
1091 struct p9_trans_fd
*ts
= kmalloc(sizeof(struct p9_trans_fd
),
1098 if (!ts
->rd
|| !ts
->wr
) {
1108 client
->status
= Connected
;
1113 static int p9_socket_open(struct p9_client
*client
, struct socket
*csocket
)
1117 csocket
->sk
->sk_allocation
= GFP_NOIO
;
1118 fd
= sock_map_fd(csocket
, 0);
1120 P9_EPRINTK(KERN_ERR
, "p9_socket_open: failed to map fd\n");
1124 ret
= p9_fd_open(client
, fd
, fd
);
1126 P9_EPRINTK(KERN_ERR
, "p9_socket_open: failed to open fd\n");
1127 sockfd_put(csocket
);
1131 ((struct p9_trans_fd
*)client
->trans
)->rd
->f_flags
|= O_NONBLOCK
;
1137 * p9_mux_destroy - cancels all pending requests and frees mux resources
1138 * @m: mux to destroy
1142 static void p9_conn_destroy(struct p9_conn
*m
)
1144 P9_DPRINTK(P9_DEBUG_MUX
, "mux %p prev %p next %p\n", m
,
1145 m
->mux_list
.prev
, m
->mux_list
.next
);
1147 p9_mux_poll_stop(m
);
1148 cancel_work_sync(&m
->rq
);
1149 cancel_work_sync(&m
->wq
);
1151 p9_conn_cancel(m
, -ECONNRESET
);
1154 p9_idpool_destroy(m
->tagpool
);
1159 * p9_fd_close - shutdown file descriptor transport
1160 * @client: client instance
1164 static void p9_fd_close(struct p9_client
*client
)
1166 struct p9_trans_fd
*ts
;
1175 client
->status
= Disconnected
;
1177 p9_conn_destroy(ts
->conn
);
1188 * stolen from NFS - maybe should be made a generic function?
1190 static inline int valid_ipaddr4(const char *buf
)
1192 int rc
, count
, in
[4];
1194 rc
= sscanf(buf
, "%d.%d.%d.%d", &in
[0], &in
[1], &in
[2], &in
[3]);
1197 for (count
= 0; count
< 4; count
++) {
1198 if (in
[count
] > 255)
1205 p9_fd_create_tcp(struct p9_client
*client
, const char *addr
, char *args
)
1208 struct socket
*csocket
;
1209 struct sockaddr_in sin_server
;
1210 struct p9_fd_opts opts
;
1211 struct p9_trans_fd
*p
= NULL
; /* this gets allocated in p9_fd_open */
1213 err
= parse_opts(args
, &opts
);
1217 if (valid_ipaddr4(addr
) < 0)
1222 sin_server
.sin_family
= AF_INET
;
1223 sin_server
.sin_addr
.s_addr
= in_aton(addr
);
1224 sin_server
.sin_port
= htons(opts
.port
);
1225 sock_create_kern(PF_INET
, SOCK_STREAM
, IPPROTO_TCP
, &csocket
);
1228 P9_EPRINTK(KERN_ERR
, "p9_trans_tcp: problem creating socket\n");
1233 err
= csocket
->ops
->connect(csocket
,
1234 (struct sockaddr
*)&sin_server
,
1235 sizeof(struct sockaddr_in
), 0);
1237 P9_EPRINTK(KERN_ERR
,
1238 "p9_trans_tcp: problem connecting socket to %s\n",
1243 err
= p9_socket_open(client
, csocket
);
1247 p
= (struct p9_trans_fd
*) client
->trans
;
1248 p
->conn
= p9_conn_create(client
);
1249 if (IS_ERR(p
->conn
)) {
1250 err
= PTR_ERR(p
->conn
);
1259 sock_release(csocket
);
1267 p9_fd_create_unix(struct p9_client
*client
, const char *addr
, char *args
)
1270 struct socket
*csocket
;
1271 struct sockaddr_un sun_server
;
1272 struct p9_trans_fd
*p
= NULL
; /* this gets allocated in p9_fd_open */
1276 if (strlen(addr
) > UNIX_PATH_MAX
) {
1277 P9_EPRINTK(KERN_ERR
, "p9_trans_unix: address too long: %s\n",
1279 err
= -ENAMETOOLONG
;
1283 sun_server
.sun_family
= PF_UNIX
;
1284 strcpy(sun_server
.sun_path
, addr
);
1285 sock_create_kern(PF_UNIX
, SOCK_STREAM
, 0, &csocket
);
1286 err
= csocket
->ops
->connect(csocket
, (struct sockaddr
*)&sun_server
,
1287 sizeof(struct sockaddr_un
) - 1, 0);
1289 P9_EPRINTK(KERN_ERR
,
1290 "p9_trans_unix: problem connecting socket: %s: %d\n",
1295 err
= p9_socket_open(client
, csocket
);
1299 p
= (struct p9_trans_fd
*) client
->trans
;
1300 p
->conn
= p9_conn_create(client
);
1301 if (IS_ERR(p
->conn
)) {
1302 err
= PTR_ERR(p
->conn
);
1311 sock_release(csocket
);
1318 p9_fd_create(struct p9_client
*client
, const char *addr
, char *args
)
1321 struct p9_fd_opts opts
;
1322 struct p9_trans_fd
*p
= NULL
; /* this get allocated in p9_fd_open */
1324 parse_opts(args
, &opts
);
1326 if (opts
.rfd
== ~0 || opts
.wfd
== ~0) {
1327 printk(KERN_ERR
"v9fs: Insufficient options for proto=fd\n");
1328 return -ENOPROTOOPT
;
1331 err
= p9_fd_open(client
, opts
.rfd
, opts
.wfd
);
1335 p
= (struct p9_trans_fd
*) client
->trans
;
1336 p
->conn
= p9_conn_create(client
);
1337 if (IS_ERR(p
->conn
)) {
1338 err
= PTR_ERR(p
->conn
);
1350 static struct p9_trans_module p9_tcp_trans
= {
1352 .maxsize
= MAX_SOCK_BUF
,
1354 .create
= p9_fd_create_tcp
,
1355 .close
= p9_fd_close
,
1357 .owner
= THIS_MODULE
,
1360 static struct p9_trans_module p9_unix_trans
= {
1362 .maxsize
= MAX_SOCK_BUF
,
1364 .create
= p9_fd_create_unix
,
1365 .close
= p9_fd_close
,
1367 .owner
= THIS_MODULE
,
1370 static struct p9_trans_module p9_fd_trans
= {
1372 .maxsize
= MAX_SOCK_BUF
,
1374 .create
= p9_fd_create
,
1375 .close
= p9_fd_close
,
1377 .owner
= THIS_MODULE
,
1381 * p9_poll_proc - poll worker thread
1382 * @a: thread state and arguments
1384 * polls all v9fs transports for new events and queues the appropriate
1385 * work to the work queue
1389 static int p9_poll_proc(void *a
)
1391 unsigned long flags
;
1393 P9_DPRINTK(P9_DEBUG_MUX
, "start %p\n", current
);
1395 spin_lock_irqsave(&p9_poll_lock
, flags
);
1396 while (!list_empty(&p9_poll_pending_list
)) {
1397 struct p9_conn
*conn
= list_first_entry(&p9_poll_pending_list
,
1400 list_del_init(&conn
->poll_pending_link
);
1401 spin_unlock_irqrestore(&p9_poll_lock
, flags
);
1405 spin_lock_irqsave(&p9_poll_lock
, flags
);
1407 spin_unlock_irqrestore(&p9_poll_lock
, flags
);
1409 set_current_state(TASK_INTERRUPTIBLE
);
1410 if (list_empty(&p9_poll_pending_list
)) {
1411 P9_DPRINTK(P9_DEBUG_MUX
, "sleeping...\n");
1414 __set_current_state(TASK_RUNNING
);
1416 if (!kthread_should_stop())
1419 P9_DPRINTK(P9_DEBUG_MUX
, "finish\n");
1423 int p9_trans_fd_init(void)
1425 p9_mux_wq
= create_workqueue("v9fs");
1427 printk(KERN_WARNING
"v9fs: mux: creating workqueue failed\n");
1431 p9_poll_task
= kthread_run(p9_poll_proc
, NULL
, "v9fs-poll");
1432 if (IS_ERR(p9_poll_task
)) {
1433 destroy_workqueue(p9_mux_wq
);
1434 printk(KERN_WARNING
"v9fs: mux: creating poll task failed\n");
1435 return PTR_ERR(p9_poll_task
);
1438 v9fs_register_trans(&p9_tcp_trans
);
1439 v9fs_register_trans(&p9_unix_trans
);
1440 v9fs_register_trans(&p9_fd_trans
);
1445 void p9_trans_fd_exit(void)
1447 kthread_stop(p9_poll_task
);
1448 v9fs_unregister_trans(&p9_tcp_trans
);
1449 v9fs_unregister_trans(&p9_unix_trans
);
1450 v9fs_unregister_trans(&p9_fd_trans
);
1452 destroy_workqueue(p9_mux_wq
);