2 * VMware vSockets Driver
4 * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation version 2 and no later version.
10 * This program is distributed in the hope that it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 /* Implementation notes:
18 * - There are two kinds of sockets: those created by user action (such as
19 * calling socket(2)) and those created by incoming connection request packets.
21 * - There are two "global" tables, one for bound sockets (sockets that have
22 * specified an address that they are responsible for) and one for connected
23 * sockets (sockets that have established a connection with another socket).
24 * These tables are "global" in that all sockets on the system are placed
25 * within them. - Note, though, that the bound table contains an extra entry
26 * for a list of unbound sockets and SOCK_DGRAM sockets will always remain in
27 * that list. The bound table is used solely for lookup of sockets when packets
28 * are received and that's not necessary for SOCK_DGRAM sockets since we create
29 * a datagram handle for each and need not perform a lookup. Keeping SOCK_DGRAM
30 * sockets out of the bound hash buckets will reduce the chance of collisions
31 * when looking for SOCK_STREAM sockets and prevents us from having to check the
32 * socket type in the hash table lookups.
34 * - Sockets created by user action will either be "client" sockets that
35 * initiate a connection or "server" sockets that listen for connections; we do
36 * not support simultaneous connects (two "client" sockets connecting).
38 * - "Server" sockets are referred to as listener sockets throughout this
39 * implementation because they are in the SS_LISTEN state. When a connection
40 * request is received (the second kind of socket mentioned above), we create a
41 * new socket and refer to it as a pending socket. These pending sockets are
42 * placed on the pending connection list of the listener socket. When future
43 * packets are received for the address the listener socket is bound to, we
44 * check if the source of the packet is from one that has an existing pending
45 * connection. If it does, we process the packet for the pending socket. When
46 * that socket reaches the connected state, it is removed from the listener
47 * socket's pending list and enqueued in the listener socket's accept queue.
48 * Callers of accept(2) will accept connected sockets from the listener socket's
49 * accept queue. If the socket cannot be accepted for some reason then it is
50 * marked rejected. Once the connection is accepted, it is owned by the user
51 * process and the responsibility for cleanup falls with that user process.
53 * - It is possible that these pending sockets will never reach the connected
54 * state; in fact, we may never receive another packet after the connection
55 * request. Because of this, we must schedule a cleanup function to run in the
56 * future, after some amount of time passes where a connection should have been
57 * established. This function ensures that the socket is off all lists so it
58 * cannot be retrieved, then drops all references to the socket so it is cleaned
59 * up (sock_put() -> sk_free() -> our sk_destruct implementation). Note this
60 * function will also cleanup rejected sockets, those that reach the connected
61 * state but leave it before they have been accepted.
63 * - Sockets created by user action will be cleaned up when the user process
64 * calls close(2), causing our release implementation to be called. Our release
65 * implementation will perform some cleanup then drop the last reference so our
66 * sk_destruct implementation is invoked. Our sk_destruct implementation will
67 * perform additional cleanup that's common for both types of sockets.
69 * - A socket's reference count is what ensures that the structure won't be
70 * freed. Each entry in a list (such as the "global" bound and connected tables
71 * and the listener socket's pending list and connected queue) ensures a
72 * reference. When we defer work until process context and pass a socket as our
73 * argument, we must ensure the reference count is increased to ensure the
74 * socket isn't freed before the function is run; the deferred function will
75 * then drop the reference.
78 #include <linux/types.h>
79 #include <linux/bitops.h>
80 #include <linux/cred.h>
81 #include <linux/init.h>
83 #include <linux/kernel.h>
84 #include <linux/kmod.h>
85 #include <linux/list.h>
86 #include <linux/miscdevice.h>
87 #include <linux/module.h>
88 #include <linux/mutex.h>
89 #include <linux/net.h>
90 #include <linux/poll.h>
91 #include <linux/skbuff.h>
92 #include <linux/smp.h>
93 #include <linux/socket.h>
94 #include <linux/stddef.h>
95 #include <linux/unistd.h>
96 #include <linux/wait.h>
97 #include <linux/workqueue.h>
100 #include "af_vsock.h"
102 static int __vsock_bind(struct sock
*sk
, struct sockaddr_vm
*addr
);
103 static void vsock_sk_destruct(struct sock
*sk
);
104 static int vsock_queue_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
);
106 /* Protocol family. */
107 static struct proto vsock_proto
= {
109 .owner
= THIS_MODULE
,
110 .obj_size
= sizeof(struct vsock_sock
),
113 /* The default peer timeout indicates how long we will wait for a peer response
114 * to a control message.
116 #define VSOCK_DEFAULT_CONNECT_TIMEOUT (2 * HZ)
118 #define SS_LISTEN 255
120 static const struct vsock_transport
*transport
;
121 static DEFINE_MUTEX(vsock_register_mutex
);
125 /* Get the ID of the local context. This is transport dependent. */
127 int vm_sockets_get_local_cid(void)
129 return transport
->get_local_cid();
131 EXPORT_SYMBOL_GPL(vm_sockets_get_local_cid
);
135 /* Each bound VSocket is stored in the bind hash table and each connected
136 * VSocket is stored in the connected hash table.
138 * Unbound sockets are all put on the same list attached to the end of the hash
139 * table (vsock_unbound_sockets). Bound sockets are added to the hash table in
140 * the bucket that their local address hashes to (vsock_bound_sockets(addr)
141 * represents the list that addr hashes to).
143 * Specifically, we initialize the vsock_bind_table array to a size of
144 * VSOCK_HASH_SIZE + 1 so that vsock_bind_table[0] through
145 * vsock_bind_table[VSOCK_HASH_SIZE - 1] are for bound sockets and
146 * vsock_bind_table[VSOCK_HASH_SIZE] is for unbound sockets. The hash function
147 * mods with VSOCK_HASH_SIZE - 1 to ensure this.
149 #define VSOCK_HASH_SIZE 251
150 #define MAX_PORT_RETRIES 24
152 #define VSOCK_HASH(addr) ((addr)->svm_port % (VSOCK_HASH_SIZE - 1))
153 #define vsock_bound_sockets(addr) (&vsock_bind_table[VSOCK_HASH(addr)])
154 #define vsock_unbound_sockets (&vsock_bind_table[VSOCK_HASH_SIZE])
156 /* XXX This can probably be implemented in a better way. */
157 #define VSOCK_CONN_HASH(src, dst) \
158 (((src)->svm_cid ^ (dst)->svm_port) % (VSOCK_HASH_SIZE - 1))
159 #define vsock_connected_sockets(src, dst) \
160 (&vsock_connected_table[VSOCK_CONN_HASH(src, dst)])
161 #define vsock_connected_sockets_vsk(vsk) \
162 vsock_connected_sockets(&(vsk)->remote_addr, &(vsk)->local_addr)
164 static struct list_head vsock_bind_table
[VSOCK_HASH_SIZE
+ 1];
165 static struct list_head vsock_connected_table
[VSOCK_HASH_SIZE
];
166 static DEFINE_SPINLOCK(vsock_table_lock
);
168 static void vsock_init_tables(void)
172 for (i
= 0; i
< ARRAY_SIZE(vsock_bind_table
); i
++)
173 INIT_LIST_HEAD(&vsock_bind_table
[i
]);
175 for (i
= 0; i
< ARRAY_SIZE(vsock_connected_table
); i
++)
176 INIT_LIST_HEAD(&vsock_connected_table
[i
]);
179 static void __vsock_insert_bound(struct list_head
*list
,
180 struct vsock_sock
*vsk
)
183 list_add(&vsk
->bound_table
, list
);
186 static void __vsock_insert_connected(struct list_head
*list
,
187 struct vsock_sock
*vsk
)
190 list_add(&vsk
->connected_table
, list
);
193 static void __vsock_remove_bound(struct vsock_sock
*vsk
)
195 list_del_init(&vsk
->bound_table
);
199 static void __vsock_remove_connected(struct vsock_sock
*vsk
)
201 list_del_init(&vsk
->connected_table
);
205 static struct sock
*__vsock_find_bound_socket(struct sockaddr_vm
*addr
)
207 struct vsock_sock
*vsk
;
209 list_for_each_entry(vsk
, vsock_bound_sockets(addr
), bound_table
)
210 if (addr
->svm_port
== vsk
->local_addr
.svm_port
)
211 return sk_vsock(vsk
);
216 static struct sock
*__vsock_find_connected_socket(struct sockaddr_vm
*src
,
217 struct sockaddr_vm
*dst
)
219 struct vsock_sock
*vsk
;
221 list_for_each_entry(vsk
, vsock_connected_sockets(src
, dst
),
223 if (vsock_addr_equals_addr(src
, &vsk
->remote_addr
) &&
224 dst
->svm_port
== vsk
->local_addr
.svm_port
) {
225 return sk_vsock(vsk
);
232 static bool __vsock_in_bound_table(struct vsock_sock
*vsk
)
234 return !list_empty(&vsk
->bound_table
);
237 static bool __vsock_in_connected_table(struct vsock_sock
*vsk
)
239 return !list_empty(&vsk
->connected_table
);
242 static void vsock_insert_unbound(struct vsock_sock
*vsk
)
244 spin_lock_bh(&vsock_table_lock
);
245 __vsock_insert_bound(vsock_unbound_sockets
, vsk
);
246 spin_unlock_bh(&vsock_table_lock
);
249 void vsock_insert_connected(struct vsock_sock
*vsk
)
251 struct list_head
*list
= vsock_connected_sockets(
252 &vsk
->remote_addr
, &vsk
->local_addr
);
254 spin_lock_bh(&vsock_table_lock
);
255 __vsock_insert_connected(list
, vsk
);
256 spin_unlock_bh(&vsock_table_lock
);
258 EXPORT_SYMBOL_GPL(vsock_insert_connected
);
260 void vsock_remove_bound(struct vsock_sock
*vsk
)
262 spin_lock_bh(&vsock_table_lock
);
263 __vsock_remove_bound(vsk
);
264 spin_unlock_bh(&vsock_table_lock
);
266 EXPORT_SYMBOL_GPL(vsock_remove_bound
);
268 void vsock_remove_connected(struct vsock_sock
*vsk
)
270 spin_lock_bh(&vsock_table_lock
);
271 __vsock_remove_connected(vsk
);
272 spin_unlock_bh(&vsock_table_lock
);
274 EXPORT_SYMBOL_GPL(vsock_remove_connected
);
276 struct sock
*vsock_find_bound_socket(struct sockaddr_vm
*addr
)
280 spin_lock_bh(&vsock_table_lock
);
281 sk
= __vsock_find_bound_socket(addr
);
285 spin_unlock_bh(&vsock_table_lock
);
289 EXPORT_SYMBOL_GPL(vsock_find_bound_socket
);
291 struct sock
*vsock_find_connected_socket(struct sockaddr_vm
*src
,
292 struct sockaddr_vm
*dst
)
296 spin_lock_bh(&vsock_table_lock
);
297 sk
= __vsock_find_connected_socket(src
, dst
);
301 spin_unlock_bh(&vsock_table_lock
);
305 EXPORT_SYMBOL_GPL(vsock_find_connected_socket
);
307 static bool vsock_in_bound_table(struct vsock_sock
*vsk
)
311 spin_lock_bh(&vsock_table_lock
);
312 ret
= __vsock_in_bound_table(vsk
);
313 spin_unlock_bh(&vsock_table_lock
);
318 static bool vsock_in_connected_table(struct vsock_sock
*vsk
)
322 spin_lock_bh(&vsock_table_lock
);
323 ret
= __vsock_in_connected_table(vsk
);
324 spin_unlock_bh(&vsock_table_lock
);
329 void vsock_for_each_connected_socket(void (*fn
)(struct sock
*sk
))
333 spin_lock_bh(&vsock_table_lock
);
335 for (i
= 0; i
< ARRAY_SIZE(vsock_connected_table
); i
++) {
336 struct vsock_sock
*vsk
;
337 list_for_each_entry(vsk
, &vsock_connected_table
[i
],
342 spin_unlock_bh(&vsock_table_lock
);
344 EXPORT_SYMBOL_GPL(vsock_for_each_connected_socket
);
346 void vsock_add_pending(struct sock
*listener
, struct sock
*pending
)
348 struct vsock_sock
*vlistener
;
349 struct vsock_sock
*vpending
;
351 vlistener
= vsock_sk(listener
);
352 vpending
= vsock_sk(pending
);
356 list_add_tail(&vpending
->pending_links
, &vlistener
->pending_links
);
358 EXPORT_SYMBOL_GPL(vsock_add_pending
);
360 void vsock_remove_pending(struct sock
*listener
, struct sock
*pending
)
362 struct vsock_sock
*vpending
= vsock_sk(pending
);
364 list_del_init(&vpending
->pending_links
);
368 EXPORT_SYMBOL_GPL(vsock_remove_pending
);
370 void vsock_enqueue_accept(struct sock
*listener
, struct sock
*connected
)
372 struct vsock_sock
*vlistener
;
373 struct vsock_sock
*vconnected
;
375 vlistener
= vsock_sk(listener
);
376 vconnected
= vsock_sk(connected
);
378 sock_hold(connected
);
380 list_add_tail(&vconnected
->accept_queue
, &vlistener
->accept_queue
);
382 EXPORT_SYMBOL_GPL(vsock_enqueue_accept
);
384 static struct sock
*vsock_dequeue_accept(struct sock
*listener
)
386 struct vsock_sock
*vlistener
;
387 struct vsock_sock
*vconnected
;
389 vlistener
= vsock_sk(listener
);
391 if (list_empty(&vlistener
->accept_queue
))
394 vconnected
= list_entry(vlistener
->accept_queue
.next
,
395 struct vsock_sock
, accept_queue
);
397 list_del_init(&vconnected
->accept_queue
);
399 /* The caller will need a reference on the connected socket so we let
400 * it call sock_put().
403 return sk_vsock(vconnected
);
406 static bool vsock_is_accept_queue_empty(struct sock
*sk
)
408 struct vsock_sock
*vsk
= vsock_sk(sk
);
409 return list_empty(&vsk
->accept_queue
);
412 static bool vsock_is_pending(struct sock
*sk
)
414 struct vsock_sock
*vsk
= vsock_sk(sk
);
415 return !list_empty(&vsk
->pending_links
);
418 static int vsock_send_shutdown(struct sock
*sk
, int mode
)
420 return transport
->shutdown(vsock_sk(sk
), mode
);
423 void vsock_pending_work(struct work_struct
*work
)
426 struct sock
*listener
;
427 struct vsock_sock
*vsk
;
430 vsk
= container_of(work
, struct vsock_sock
, dwork
.work
);
432 listener
= vsk
->listener
;
438 if (vsock_is_pending(sk
)) {
439 vsock_remove_pending(listener
, sk
);
440 } else if (!vsk
->rejected
) {
441 /* We are not on the pending list and accept() did not reject
442 * us, so we must have been accepted by our user process. We
443 * just need to drop our references to the sockets and be on
450 listener
->sk_ack_backlog
--;
452 /* We need to remove ourself from the global connected sockets list so
453 * incoming packets can't find this socket, and to reduce the reference
456 if (vsock_in_connected_table(vsk
))
457 vsock_remove_connected(vsk
);
459 sk
->sk_state
= SS_FREE
;
463 release_sock(listener
);
470 EXPORT_SYMBOL_GPL(vsock_pending_work
);
472 /**** SOCKET OPERATIONS ****/
474 static int __vsock_bind_stream(struct vsock_sock
*vsk
,
475 struct sockaddr_vm
*addr
)
477 static u32 port
= LAST_RESERVED_PORT
+ 1;
478 struct sockaddr_vm new_addr
;
480 vsock_addr_init(&new_addr
, addr
->svm_cid
, addr
->svm_port
);
482 if (addr
->svm_port
== VMADDR_PORT_ANY
) {
486 for (i
= 0; i
< MAX_PORT_RETRIES
; i
++) {
487 if (port
<= LAST_RESERVED_PORT
)
488 port
= LAST_RESERVED_PORT
+ 1;
490 new_addr
.svm_port
= port
++;
492 if (!__vsock_find_bound_socket(&new_addr
)) {
499 return -EADDRNOTAVAIL
;
501 /* If port is in reserved range, ensure caller
502 * has necessary privileges.
504 if (addr
->svm_port
<= LAST_RESERVED_PORT
&&
505 !capable(CAP_NET_BIND_SERVICE
)) {
509 if (__vsock_find_bound_socket(&new_addr
))
513 vsock_addr_init(&vsk
->local_addr
, new_addr
.svm_cid
, new_addr
.svm_port
);
515 /* Remove stream sockets from the unbound list and add them to the hash
516 * table for easy lookup by its address. The unbound list is simply an
517 * extra entry at the end of the hash table, a trick used by AF_UNIX.
519 __vsock_remove_bound(vsk
);
520 __vsock_insert_bound(vsock_bound_sockets(&vsk
->local_addr
), vsk
);
525 static int __vsock_bind_dgram(struct vsock_sock
*vsk
,
526 struct sockaddr_vm
*addr
)
528 return transport
->dgram_bind(vsk
, addr
);
531 static int __vsock_bind(struct sock
*sk
, struct sockaddr_vm
*addr
)
533 struct vsock_sock
*vsk
= vsock_sk(sk
);
537 /* First ensure this socket isn't already bound. */
538 if (vsock_addr_bound(&vsk
->local_addr
))
541 /* Now bind to the provided address or select appropriate values if
542 * none are provided (VMADDR_CID_ANY and VMADDR_PORT_ANY). Note that
543 * like AF_INET prevents binding to a non-local IP address (in most
544 * cases), we only allow binding to the local CID.
546 cid
= transport
->get_local_cid();
547 if (addr
->svm_cid
!= cid
&& addr
->svm_cid
!= VMADDR_CID_ANY
)
548 return -EADDRNOTAVAIL
;
550 switch (sk
->sk_socket
->type
) {
552 spin_lock_bh(&vsock_table_lock
);
553 retval
= __vsock_bind_stream(vsk
, addr
);
554 spin_unlock_bh(&vsock_table_lock
);
558 retval
= __vsock_bind_dgram(vsk
, addr
);
569 struct sock
*__vsock_create(struct net
*net
,
576 struct vsock_sock
*psk
;
577 struct vsock_sock
*vsk
;
579 sk
= sk_alloc(net
, AF_VSOCK
, priority
, &vsock_proto
);
583 sock_init_data(sock
, sk
);
585 /* sk->sk_type is normally set in sock_init_data, but only if sock is
586 * non-NULL. We make sure that our sockets always have a type by
587 * setting it here if needed.
593 vsock_addr_init(&vsk
->local_addr
, VMADDR_CID_ANY
, VMADDR_PORT_ANY
);
594 vsock_addr_init(&vsk
->remote_addr
, VMADDR_CID_ANY
, VMADDR_PORT_ANY
);
596 sk
->sk_destruct
= vsock_sk_destruct
;
597 sk
->sk_backlog_rcv
= vsock_queue_rcv_skb
;
599 sock_reset_flag(sk
, SOCK_DONE
);
601 INIT_LIST_HEAD(&vsk
->bound_table
);
602 INIT_LIST_HEAD(&vsk
->connected_table
);
603 vsk
->listener
= NULL
;
604 INIT_LIST_HEAD(&vsk
->pending_links
);
605 INIT_LIST_HEAD(&vsk
->accept_queue
);
606 vsk
->rejected
= false;
607 vsk
->sent_request
= false;
608 vsk
->ignore_connecting_rst
= false;
609 vsk
->peer_shutdown
= 0;
611 psk
= parent
? vsock_sk(parent
) : NULL
;
613 vsk
->trusted
= psk
->trusted
;
614 vsk
->owner
= get_cred(psk
->owner
);
615 vsk
->connect_timeout
= psk
->connect_timeout
;
617 vsk
->trusted
= capable(CAP_NET_ADMIN
);
618 vsk
->owner
= get_current_cred();
619 vsk
->connect_timeout
= VSOCK_DEFAULT_CONNECT_TIMEOUT
;
622 if (transport
->init(vsk
, psk
) < 0) {
628 vsock_insert_unbound(vsk
);
632 EXPORT_SYMBOL_GPL(__vsock_create
);
634 static void __vsock_release(struct sock
*sk
)
638 struct sock
*pending
;
639 struct vsock_sock
*vsk
;
642 pending
= NULL
; /* Compiler warning. */
644 if (vsock_in_bound_table(vsk
))
645 vsock_remove_bound(vsk
);
647 if (vsock_in_connected_table(vsk
))
648 vsock_remove_connected(vsk
);
650 transport
->release(vsk
);
654 sk
->sk_shutdown
= SHUTDOWN_MASK
;
656 while ((skb
= skb_dequeue(&sk
->sk_receive_queue
)))
659 /* Clean up any sockets that never were accepted. */
660 while ((pending
= vsock_dequeue_accept(sk
)) != NULL
) {
661 __vsock_release(pending
);
670 static void vsock_sk_destruct(struct sock
*sk
)
672 struct vsock_sock
*vsk
= vsock_sk(sk
);
674 transport
->destruct(vsk
);
676 /* When clearing these addresses, there's no need to set the family and
677 * possibly register the address family with the kernel.
679 vsock_addr_init(&vsk
->local_addr
, VMADDR_CID_ANY
, VMADDR_PORT_ANY
);
680 vsock_addr_init(&vsk
->remote_addr
, VMADDR_CID_ANY
, VMADDR_PORT_ANY
);
682 put_cred(vsk
->owner
);
685 static int vsock_queue_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
)
689 err
= sock_queue_rcv_skb(sk
, skb
);
696 s64
vsock_stream_has_data(struct vsock_sock
*vsk
)
698 return transport
->stream_has_data(vsk
);
700 EXPORT_SYMBOL_GPL(vsock_stream_has_data
);
702 s64
vsock_stream_has_space(struct vsock_sock
*vsk
)
704 return transport
->stream_has_space(vsk
);
706 EXPORT_SYMBOL_GPL(vsock_stream_has_space
);
708 static int vsock_release(struct socket
*sock
)
710 __vsock_release(sock
->sk
);
712 sock
->state
= SS_FREE
;
718 vsock_bind(struct socket
*sock
, struct sockaddr
*addr
, int addr_len
)
722 struct sockaddr_vm
*vm_addr
;
726 if (vsock_addr_cast(addr
, addr_len
, &vm_addr
) != 0)
730 err
= __vsock_bind(sk
, vm_addr
);
736 static int vsock_getname(struct socket
*sock
,
737 struct sockaddr
*addr
, int *addr_len
, int peer
)
741 struct vsock_sock
*vsk
;
742 struct sockaddr_vm
*vm_addr
;
751 if (sock
->state
!= SS_CONNECTED
) {
755 vm_addr
= &vsk
->remote_addr
;
757 vm_addr
= &vsk
->local_addr
;
765 /* sys_getsockname() and sys_getpeername() pass us a
766 * MAX_SOCK_ADDR-sized buffer and don't set addr_len. Unfortunately
767 * that macro is defined in socket.c instead of .h, so we hardcode its
770 BUILD_BUG_ON(sizeof(*vm_addr
) > 128);
771 memcpy(addr
, vm_addr
, sizeof(*vm_addr
));
772 *addr_len
= sizeof(*vm_addr
);
779 static int vsock_shutdown(struct socket
*sock
, int mode
)
784 /* User level uses SHUT_RD (0) and SHUT_WR (1), but the kernel uses
785 * RCV_SHUTDOWN (1) and SEND_SHUTDOWN (2), so we must increment mode
786 * here like the other address families do. Note also that the
787 * increment makes SHUT_RDWR (2) into RCV_SHUTDOWN | SEND_SHUTDOWN (3),
788 * which is what we want.
792 if ((mode
& ~SHUTDOWN_MASK
) || !mode
)
795 /* If this is a STREAM socket and it is not connected then bail out
796 * immediately. If it is a DGRAM socket then we must first kick the
797 * socket so that it wakes up from any sleeping calls, for example
798 * recv(), and then afterwards return the error.
802 if (sock
->state
== SS_UNCONNECTED
) {
804 if (sk
->sk_type
== SOCK_STREAM
)
807 sock
->state
= SS_DISCONNECTING
;
811 /* Receive and send shutdowns are treated alike. */
812 mode
= mode
& (RCV_SHUTDOWN
| SEND_SHUTDOWN
);
815 sk
->sk_shutdown
|= mode
;
816 sk
->sk_state_change(sk
);
819 if (sk
->sk_type
== SOCK_STREAM
) {
820 sock_reset_flag(sk
, SOCK_DONE
);
821 vsock_send_shutdown(sk
, mode
);
828 static unsigned int vsock_poll(struct file
*file
, struct socket
*sock
,
833 struct vsock_sock
*vsk
;
838 poll_wait(file
, sk_sleep(sk
), wait
);
842 /* Signify that there has been an error on this socket. */
845 /* INET sockets treat local write shutdown and peer write shutdown as a
846 * case of POLLHUP set.
848 if ((sk
->sk_shutdown
== SHUTDOWN_MASK
) ||
849 ((sk
->sk_shutdown
& SEND_SHUTDOWN
) &&
850 (vsk
->peer_shutdown
& SEND_SHUTDOWN
))) {
854 if (sk
->sk_shutdown
& RCV_SHUTDOWN
||
855 vsk
->peer_shutdown
& SEND_SHUTDOWN
) {
859 if (sock
->type
== SOCK_DGRAM
) {
860 /* For datagram sockets we can read if there is something in
861 * the queue and write as long as the socket isn't shutdown for
864 if (!skb_queue_empty(&sk
->sk_receive_queue
) ||
865 (sk
->sk_shutdown
& RCV_SHUTDOWN
)) {
866 mask
|= POLLIN
| POLLRDNORM
;
869 if (!(sk
->sk_shutdown
& SEND_SHUTDOWN
))
870 mask
|= POLLOUT
| POLLWRNORM
| POLLWRBAND
;
872 } else if (sock
->type
== SOCK_STREAM
) {
875 /* Listening sockets that have connections in their accept
878 if (sk
->sk_state
== SS_LISTEN
879 && !vsock_is_accept_queue_empty(sk
))
880 mask
|= POLLIN
| POLLRDNORM
;
882 /* If there is something in the queue then we can read. */
883 if (transport
->stream_is_active(vsk
) &&
884 !(sk
->sk_shutdown
& RCV_SHUTDOWN
)) {
885 bool data_ready_now
= false;
886 int ret
= transport
->notify_poll_in(
887 vsk
, 1, &data_ready_now
);
892 mask
|= POLLIN
| POLLRDNORM
;
897 /* Sockets whose connections have been closed, reset, or
898 * terminated should also be considered read, and we check the
899 * shutdown flag for that.
901 if (sk
->sk_shutdown
& RCV_SHUTDOWN
||
902 vsk
->peer_shutdown
& SEND_SHUTDOWN
) {
903 mask
|= POLLIN
| POLLRDNORM
;
906 /* Connected sockets that can produce data can be written. */
907 if (sk
->sk_state
== SS_CONNECTED
) {
908 if (!(sk
->sk_shutdown
& SEND_SHUTDOWN
)) {
909 bool space_avail_now
= false;
910 int ret
= transport
->notify_poll_out(
911 vsk
, 1, &space_avail_now
);
916 /* Remove POLLWRBAND since INET
917 * sockets are not setting it.
919 mask
|= POLLOUT
| POLLWRNORM
;
925 /* Simulate INET socket poll behaviors, which sets
926 * POLLOUT|POLLWRNORM when peer is closed and nothing to read,
927 * but local send is not shutdown.
929 if (sk
->sk_state
== SS_UNCONNECTED
) {
930 if (!(sk
->sk_shutdown
& SEND_SHUTDOWN
))
931 mask
|= POLLOUT
| POLLWRNORM
;
941 static int vsock_dgram_sendmsg(struct kiocb
*kiocb
, struct socket
*sock
,
942 struct msghdr
*msg
, size_t len
)
946 struct vsock_sock
*vsk
;
947 struct sockaddr_vm
*remote_addr
;
949 if (msg
->msg_flags
& MSG_OOB
)
952 /* For now, MSG_DONTWAIT is always assumed... */
959 if (!vsock_addr_bound(&vsk
->local_addr
)) {
960 struct sockaddr_vm local_addr
;
962 vsock_addr_init(&local_addr
, VMADDR_CID_ANY
, VMADDR_PORT_ANY
);
963 err
= __vsock_bind(sk
, &local_addr
);
969 /* If the provided message contains an address, use that. Otherwise
970 * fall back on the socket's remote handle (if it has been connected).
973 vsock_addr_cast(msg
->msg_name
, msg
->msg_namelen
,
974 &remote_addr
) == 0) {
975 /* Ensure this address is of the right type and is a valid
979 if (remote_addr
->svm_cid
== VMADDR_CID_ANY
)
980 remote_addr
->svm_cid
= transport
->get_local_cid();
982 if (!vsock_addr_bound(remote_addr
)) {
986 } else if (sock
->state
== SS_CONNECTED
) {
987 remote_addr
= &vsk
->remote_addr
;
989 if (remote_addr
->svm_cid
== VMADDR_CID_ANY
)
990 remote_addr
->svm_cid
= transport
->get_local_cid();
992 /* XXX Should connect() or this function ensure remote_addr is
995 if (!vsock_addr_bound(&vsk
->remote_addr
)) {
1004 if (!transport
->dgram_allow(remote_addr
->svm_cid
,
1005 remote_addr
->svm_port
)) {
1010 err
= transport
->dgram_enqueue(vsk
, remote_addr
, msg
->msg_iov
, len
);
1017 static int vsock_dgram_connect(struct socket
*sock
,
1018 struct sockaddr
*addr
, int addr_len
, int flags
)
1022 struct vsock_sock
*vsk
;
1023 struct sockaddr_vm
*remote_addr
;
1028 err
= vsock_addr_cast(addr
, addr_len
, &remote_addr
);
1029 if (err
== -EAFNOSUPPORT
&& remote_addr
->svm_family
== AF_UNSPEC
) {
1031 vsock_addr_init(&vsk
->remote_addr
, VMADDR_CID_ANY
,
1033 sock
->state
= SS_UNCONNECTED
;
1036 } else if (err
!= 0)
1041 if (!vsock_addr_bound(&vsk
->local_addr
)) {
1042 struct sockaddr_vm local_addr
;
1044 vsock_addr_init(&local_addr
, VMADDR_CID_ANY
, VMADDR_PORT_ANY
);
1045 err
= __vsock_bind(sk
, &local_addr
);
1051 if (!transport
->dgram_allow(remote_addr
->svm_cid
,
1052 remote_addr
->svm_port
)) {
1057 memcpy(&vsk
->remote_addr
, remote_addr
, sizeof(vsk
->remote_addr
));
1058 sock
->state
= SS_CONNECTED
;
1065 static int vsock_dgram_recvmsg(struct kiocb
*kiocb
, struct socket
*sock
,
1066 struct msghdr
*msg
, size_t len
, int flags
)
1068 return transport
->dgram_dequeue(kiocb
, vsock_sk(sock
->sk
), msg
, len
,
1072 static const struct proto_ops vsock_dgram_ops
= {
1074 .owner
= THIS_MODULE
,
1075 .release
= vsock_release
,
1077 .connect
= vsock_dgram_connect
,
1078 .socketpair
= sock_no_socketpair
,
1079 .accept
= sock_no_accept
,
1080 .getname
= vsock_getname
,
1082 .ioctl
= sock_no_ioctl
,
1083 .listen
= sock_no_listen
,
1084 .shutdown
= vsock_shutdown
,
1085 .setsockopt
= sock_no_setsockopt
,
1086 .getsockopt
= sock_no_getsockopt
,
1087 .sendmsg
= vsock_dgram_sendmsg
,
1088 .recvmsg
= vsock_dgram_recvmsg
,
1089 .mmap
= sock_no_mmap
,
1090 .sendpage
= sock_no_sendpage
,
1093 static void vsock_connect_timeout(struct work_struct
*work
)
1096 struct vsock_sock
*vsk
;
1098 vsk
= container_of(work
, struct vsock_sock
, dwork
.work
);
1102 if (sk
->sk_state
== SS_CONNECTING
&&
1103 (sk
->sk_shutdown
!= SHUTDOWN_MASK
)) {
1104 sk
->sk_state
= SS_UNCONNECTED
;
1105 sk
->sk_err
= ETIMEDOUT
;
1106 sk
->sk_error_report(sk
);
1113 static int vsock_stream_connect(struct socket
*sock
, struct sockaddr
*addr
,
1114 int addr_len
, int flags
)
1118 struct vsock_sock
*vsk
;
1119 struct sockaddr_vm
*remote_addr
;
1129 /* XXX AF_UNSPEC should make us disconnect like AF_INET. */
1130 switch (sock
->state
) {
1134 case SS_DISCONNECTING
:
1138 /* This continues on so we can move sock into the SS_CONNECTED
1139 * state once the connection has completed (at which point err
1140 * will be set to zero also). Otherwise, we will either wait
1141 * for the connection or return -EALREADY should this be a
1142 * non-blocking call.
1147 if ((sk
->sk_state
== SS_LISTEN
) ||
1148 vsock_addr_cast(addr
, addr_len
, &remote_addr
) != 0) {
1153 /* The hypervisor and well-known contexts do not have socket
1156 if (!transport
->stream_allow(remote_addr
->svm_cid
,
1157 remote_addr
->svm_port
)) {
1162 /* Set the remote address that we are connecting to. */
1163 memcpy(&vsk
->remote_addr
, remote_addr
,
1164 sizeof(vsk
->remote_addr
));
1166 /* Autobind this socket to the local address if necessary. */
1167 if (!vsock_addr_bound(&vsk
->local_addr
)) {
1168 struct sockaddr_vm local_addr
;
1170 vsock_addr_init(&local_addr
, VMADDR_CID_ANY
,
1172 err
= __vsock_bind(sk
, &local_addr
);
1178 sk
->sk_state
= SS_CONNECTING
;
1180 err
= transport
->connect(vsk
);
1184 /* Mark sock as connecting and set the error code to in
1185 * progress in case this is a non-blocking connect.
1187 sock
->state
= SS_CONNECTING
;
1191 /* The receive path will handle all communication until we are able to
1192 * enter the connected state. Here we wait for the connection to be
1193 * completed or a notification of an error.
1195 timeout
= vsk
->connect_timeout
;
1196 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
1198 while (sk
->sk_state
!= SS_CONNECTED
&& sk
->sk_err
== 0) {
1199 if (flags
& O_NONBLOCK
) {
1200 /* If we're not going to block, we schedule a timeout
1201 * function to generate a timeout on the connection
1202 * attempt, in case the peer doesn't respond in a
1203 * timely manner. We hold on to the socket until the
1207 INIT_DELAYED_WORK(&vsk
->dwork
,
1208 vsock_connect_timeout
);
1209 schedule_delayed_work(&vsk
->dwork
, timeout
);
1211 /* Skip ahead to preserve error code set above. */
1216 timeout
= schedule_timeout(timeout
);
1219 if (signal_pending(current
)) {
1220 err
= sock_intr_errno(timeout
);
1221 goto out_wait_error
;
1222 } else if (timeout
== 0) {
1224 goto out_wait_error
;
1227 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
1232 goto out_wait_error
;
1237 finish_wait(sk_sleep(sk
), &wait
);
1243 sk
->sk_state
= SS_UNCONNECTED
;
1244 sock
->state
= SS_UNCONNECTED
;
1248 static int vsock_accept(struct socket
*sock
, struct socket
*newsock
, int flags
)
1250 struct sock
*listener
;
1252 struct sock
*connected
;
1253 struct vsock_sock
*vconnected
;
1258 listener
= sock
->sk
;
1260 lock_sock(listener
);
1262 if (sock
->type
!= SOCK_STREAM
) {
1267 if (listener
->sk_state
!= SS_LISTEN
) {
1272 /* Wait for children sockets to appear; these are the new sockets
1273 * created upon connection establishment.
1275 timeout
= sock_sndtimeo(listener
, flags
& O_NONBLOCK
);
1276 prepare_to_wait(sk_sleep(listener
), &wait
, TASK_INTERRUPTIBLE
);
1278 while ((connected
= vsock_dequeue_accept(listener
)) == NULL
&&
1279 listener
->sk_err
== 0) {
1280 release_sock(listener
);
1281 timeout
= schedule_timeout(timeout
);
1282 lock_sock(listener
);
1284 if (signal_pending(current
)) {
1285 err
= sock_intr_errno(timeout
);
1287 } else if (timeout
== 0) {
1292 prepare_to_wait(sk_sleep(listener
), &wait
, TASK_INTERRUPTIBLE
);
1295 if (listener
->sk_err
)
1296 err
= -listener
->sk_err
;
1299 listener
->sk_ack_backlog
--;
1301 lock_sock(connected
);
1302 vconnected
= vsock_sk(connected
);
1304 /* If the listener socket has received an error, then we should
1305 * reject this socket and return. Note that we simply mark the
1306 * socket rejected, drop our reference, and let the cleanup
1307 * function handle the cleanup; the fact that we found it in
1308 * the listener's accept queue guarantees that the cleanup
1309 * function hasn't run yet.
1312 vconnected
->rejected
= true;
1313 release_sock(connected
);
1314 sock_put(connected
);
1318 newsock
->state
= SS_CONNECTED
;
1319 sock_graft(connected
, newsock
);
1320 release_sock(connected
);
1321 sock_put(connected
);
1325 finish_wait(sk_sleep(listener
), &wait
);
1327 release_sock(listener
);
1331 static int vsock_listen(struct socket
*sock
, int backlog
)
1335 struct vsock_sock
*vsk
;
1341 if (sock
->type
!= SOCK_STREAM
) {
1346 if (sock
->state
!= SS_UNCONNECTED
) {
1353 if (!vsock_addr_bound(&vsk
->local_addr
)) {
1358 sk
->sk_max_ack_backlog
= backlog
;
1359 sk
->sk_state
= SS_LISTEN
;
1368 static int vsock_stream_setsockopt(struct socket
*sock
,
1371 char __user
*optval
,
1372 unsigned int optlen
)
1376 struct vsock_sock
*vsk
;
1379 if (level
!= AF_VSOCK
)
1380 return -ENOPROTOOPT
;
1382 #define COPY_IN(_v) \
1384 if (optlen < sizeof(_v)) { \
1388 if (copy_from_user(&_v, optval, sizeof(_v)) != 0) { \
1401 case SO_VM_SOCKETS_BUFFER_SIZE
:
1403 transport
->set_buffer_size(vsk
, val
);
1406 case SO_VM_SOCKETS_BUFFER_MAX_SIZE
:
1408 transport
->set_max_buffer_size(vsk
, val
);
1411 case SO_VM_SOCKETS_BUFFER_MIN_SIZE
:
1413 transport
->set_min_buffer_size(vsk
, val
);
1416 case SO_VM_SOCKETS_CONNECT_TIMEOUT
: {
1419 if (tv
.tv_sec
>= 0 && tv
.tv_usec
< USEC_PER_SEC
&&
1420 tv
.tv_sec
< (MAX_SCHEDULE_TIMEOUT
/ HZ
- 1)) {
1421 vsk
->connect_timeout
= tv
.tv_sec
* HZ
+
1422 DIV_ROUND_UP(tv
.tv_usec
, (1000000 / HZ
));
1423 if (vsk
->connect_timeout
== 0)
1424 vsk
->connect_timeout
=
1425 VSOCK_DEFAULT_CONNECT_TIMEOUT
;
1445 static int vsock_stream_getsockopt(struct socket
*sock
,
1446 int level
, int optname
,
1447 char __user
*optval
,
1453 struct vsock_sock
*vsk
;
1456 if (level
!= AF_VSOCK
)
1457 return -ENOPROTOOPT
;
1459 err
= get_user(len
, optlen
);
1463 #define COPY_OUT(_v) \
1465 if (len < sizeof(_v)) \
1469 if (copy_to_user(optval, &_v, len) != 0) \
1479 case SO_VM_SOCKETS_BUFFER_SIZE
:
1480 val
= transport
->get_buffer_size(vsk
);
1484 case SO_VM_SOCKETS_BUFFER_MAX_SIZE
:
1485 val
= transport
->get_max_buffer_size(vsk
);
1489 case SO_VM_SOCKETS_BUFFER_MIN_SIZE
:
1490 val
= transport
->get_min_buffer_size(vsk
);
1494 case SO_VM_SOCKETS_CONNECT_TIMEOUT
: {
1496 tv
.tv_sec
= vsk
->connect_timeout
/ HZ
;
1498 (vsk
->connect_timeout
-
1499 tv
.tv_sec
* HZ
) * (1000000 / HZ
);
1504 return -ENOPROTOOPT
;
1507 err
= put_user(len
, optlen
);
1516 static int vsock_stream_sendmsg(struct kiocb
*kiocb
, struct socket
*sock
,
1517 struct msghdr
*msg
, size_t len
)
1520 struct vsock_sock
*vsk
;
1521 ssize_t total_written
;
1524 struct vsock_transport_send_notify_data send_data
;
1533 if (msg
->msg_flags
& MSG_OOB
)
1538 /* Callers should not provide a destination with stream sockets. */
1539 if (msg
->msg_namelen
) {
1540 err
= sk
->sk_state
== SS_CONNECTED
? -EISCONN
: -EOPNOTSUPP
;
1544 /* Send data only if both sides are not shutdown in the direction. */
1545 if (sk
->sk_shutdown
& SEND_SHUTDOWN
||
1546 vsk
->peer_shutdown
& RCV_SHUTDOWN
) {
1551 if (sk
->sk_state
!= SS_CONNECTED
||
1552 !vsock_addr_bound(&vsk
->local_addr
)) {
1557 if (!vsock_addr_bound(&vsk
->remote_addr
)) {
1558 err
= -EDESTADDRREQ
;
1562 /* Wait for room in the produce queue to enqueue our user's data. */
1563 timeout
= sock_sndtimeo(sk
, msg
->msg_flags
& MSG_DONTWAIT
);
1565 err
= transport
->notify_send_init(vsk
, &send_data
);
1569 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
1571 while (total_written
< len
) {
1574 while (vsock_stream_has_space(vsk
) == 0 &&
1576 !(sk
->sk_shutdown
& SEND_SHUTDOWN
) &&
1577 !(vsk
->peer_shutdown
& RCV_SHUTDOWN
)) {
1579 /* Don't wait for non-blocking sockets. */
1585 err
= transport
->notify_send_pre_block(vsk
, &send_data
);
1590 timeout
= schedule_timeout(timeout
);
1592 if (signal_pending(current
)) {
1593 err
= sock_intr_errno(timeout
);
1595 } else if (timeout
== 0) {
1600 prepare_to_wait(sk_sleep(sk
), &wait
,
1601 TASK_INTERRUPTIBLE
);
1604 /* These checks occur both as part of and after the loop
1605 * conditional since we need to check before and after
1611 } else if ((sk
->sk_shutdown
& SEND_SHUTDOWN
) ||
1612 (vsk
->peer_shutdown
& RCV_SHUTDOWN
)) {
1617 err
= transport
->notify_send_pre_enqueue(vsk
, &send_data
);
1621 /* Note that enqueue will only write as many bytes as are free
1622 * in the produce queue, so we don't need to ensure len is
1623 * smaller than the queue size. It is the caller's
1624 * responsibility to check how many bytes we were able to send.
1627 written
= transport
->stream_enqueue(
1629 len
- total_written
);
1635 total_written
+= written
;
1637 err
= transport
->notify_send_post_enqueue(
1638 vsk
, written
, &send_data
);
1645 if (total_written
> 0)
1646 err
= total_written
;
1647 finish_wait(sk_sleep(sk
), &wait
);
1655 vsock_stream_recvmsg(struct kiocb
*kiocb
,
1656 struct socket
*sock
,
1657 struct msghdr
*msg
, size_t len
, int flags
)
1660 struct vsock_sock
*vsk
;
1665 struct vsock_transport_recv_notify_data recv_data
;
1673 msg
->msg_namelen
= 0;
1677 if (sk
->sk_state
!= SS_CONNECTED
) {
1678 /* Recvmsg is supposed to return 0 if a peer performs an
1679 * orderly shutdown. Differentiate between that case and when a
1680 * peer has not connected or a local shutdown occured with the
1683 if (sock_flag(sk
, SOCK_DONE
))
1691 if (flags
& MSG_OOB
) {
1696 /* We don't check peer_shutdown flag here since peer may actually shut
1697 * down, but there can be data in the queue that a local socket can
1700 if (sk
->sk_shutdown
& RCV_SHUTDOWN
) {
1705 /* It is valid on Linux to pass in a zero-length receive buffer. This
1706 * is not an error. We may as well bail out now.
1713 /* We must not copy less than target bytes into the user's buffer
1714 * before returning successfully, so we wait for the consume queue to
1715 * have that much data to consume before dequeueing. Note that this
1716 * makes it impossible to handle cases where target is greater than the
1719 target
= sock_rcvlowat(sk
, flags
& MSG_WAITALL
, len
);
1720 if (target
>= transport
->stream_rcvhiwat(vsk
)) {
1724 timeout
= sock_rcvtimeo(sk
, flags
& MSG_DONTWAIT
);
1727 err
= transport
->notify_recv_init(vsk
, target
, &recv_data
);
1731 prepare_to_wait(sk_sleep(sk
), &wait
, TASK_INTERRUPTIBLE
);
1734 s64 ready
= vsock_stream_has_data(vsk
);
1737 /* Invalid queue pair content. XXX This should be
1738 * changed to a connection reset in a later change.
1743 } else if (ready
> 0) {
1746 err
= transport
->notify_recv_pre_dequeue(
1747 vsk
, target
, &recv_data
);
1751 read
= transport
->stream_dequeue(
1753 len
- copied
, flags
);
1761 err
= transport
->notify_recv_post_dequeue(
1763 !(flags
& MSG_PEEK
), &recv_data
);
1767 if (read
>= target
|| flags
& MSG_PEEK
)
1772 if (sk
->sk_err
!= 0 || (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1773 || (vsk
->peer_shutdown
& SEND_SHUTDOWN
)) {
1776 /* Don't wait for non-blocking sockets. */
1782 err
= transport
->notify_recv_pre_block(
1783 vsk
, target
, &recv_data
);
1788 timeout
= schedule_timeout(timeout
);
1791 if (signal_pending(current
)) {
1792 err
= sock_intr_errno(timeout
);
1794 } else if (timeout
== 0) {
1799 prepare_to_wait(sk_sleep(sk
), &wait
,
1800 TASK_INTERRUPTIBLE
);
1806 else if (sk
->sk_shutdown
& RCV_SHUTDOWN
)
1810 /* We only do these additional bookkeeping/notification steps
1811 * if we actually copied something out of the queue pair
1812 * instead of just peeking ahead.
1815 if (!(flags
& MSG_PEEK
)) {
1816 /* If the other side has shutdown for sending and there
1817 * is nothing more to read, then modify the socket
1820 if (vsk
->peer_shutdown
& SEND_SHUTDOWN
) {
1821 if (vsock_stream_has_data(vsk
) <= 0) {
1822 sk
->sk_state
= SS_UNCONNECTED
;
1823 sock_set_flag(sk
, SOCK_DONE
);
1824 sk
->sk_state_change(sk
);
1832 finish_wait(sk_sleep(sk
), &wait
);
1838 static const struct proto_ops vsock_stream_ops
= {
1840 .owner
= THIS_MODULE
,
1841 .release
= vsock_release
,
1843 .connect
= vsock_stream_connect
,
1844 .socketpair
= sock_no_socketpair
,
1845 .accept
= vsock_accept
,
1846 .getname
= vsock_getname
,
1848 .ioctl
= sock_no_ioctl
,
1849 .listen
= vsock_listen
,
1850 .shutdown
= vsock_shutdown
,
1851 .setsockopt
= vsock_stream_setsockopt
,
1852 .getsockopt
= vsock_stream_getsockopt
,
1853 .sendmsg
= vsock_stream_sendmsg
,
1854 .recvmsg
= vsock_stream_recvmsg
,
1855 .mmap
= sock_no_mmap
,
1856 .sendpage
= sock_no_sendpage
,
1859 static int vsock_create(struct net
*net
, struct socket
*sock
,
1860 int protocol
, int kern
)
1865 if (protocol
&& protocol
!= PF_VSOCK
)
1866 return -EPROTONOSUPPORT
;
1868 switch (sock
->type
) {
1870 sock
->ops
= &vsock_dgram_ops
;
1873 sock
->ops
= &vsock_stream_ops
;
1876 return -ESOCKTNOSUPPORT
;
1879 sock
->state
= SS_UNCONNECTED
;
1881 return __vsock_create(net
, sock
, NULL
, GFP_KERNEL
, 0) ? 0 : -ENOMEM
;
1884 static const struct net_proto_family vsock_family_ops
= {
1886 .create
= vsock_create
,
1887 .owner
= THIS_MODULE
,
1890 static long vsock_dev_do_ioctl(struct file
*filp
,
1891 unsigned int cmd
, void __user
*ptr
)
1893 u32 __user
*p
= ptr
;
1897 case IOCTL_VM_SOCKETS_GET_LOCAL_CID
:
1898 if (put_user(transport
->get_local_cid(), p
) != 0)
1903 pr_err("Unknown ioctl %d\n", cmd
);
1910 static long vsock_dev_ioctl(struct file
*filp
,
1911 unsigned int cmd
, unsigned long arg
)
1913 return vsock_dev_do_ioctl(filp
, cmd
, (void __user
*)arg
);
1916 #ifdef CONFIG_COMPAT
1917 static long vsock_dev_compat_ioctl(struct file
*filp
,
1918 unsigned int cmd
, unsigned long arg
)
1920 return vsock_dev_do_ioctl(filp
, cmd
, compat_ptr(arg
));
1924 static const struct file_operations vsock_device_ops
= {
1925 .owner
= THIS_MODULE
,
1926 .unlocked_ioctl
= vsock_dev_ioctl
,
1927 #ifdef CONFIG_COMPAT
1928 .compat_ioctl
= vsock_dev_compat_ioctl
,
1930 .open
= nonseekable_open
,
1933 static struct miscdevice vsock_device
= {
1935 .fops
= &vsock_device_ops
,
1938 static int __vsock_core_init(void)
1942 vsock_init_tables();
1944 vsock_device
.minor
= MISC_DYNAMIC_MINOR
;
1945 err
= misc_register(&vsock_device
);
1947 pr_err("Failed to register misc device\n");
1951 err
= proto_register(&vsock_proto
, 1); /* we want our slab */
1953 pr_err("Cannot register vsock protocol\n");
1954 goto err_misc_deregister
;
1957 err
= sock_register(&vsock_family_ops
);
1959 pr_err("could not register af_vsock (%d) address family: %d\n",
1961 goto err_unregister_proto
;
1966 err_unregister_proto
:
1967 proto_unregister(&vsock_proto
);
1968 err_misc_deregister
:
1969 misc_deregister(&vsock_device
);
1973 int vsock_core_init(const struct vsock_transport
*t
)
1975 int retval
= mutex_lock_interruptible(&vsock_register_mutex
);
1985 retval
= __vsock_core_init();
1990 mutex_unlock(&vsock_register_mutex
);
1993 EXPORT_SYMBOL_GPL(vsock_core_init
);
1995 void vsock_core_exit(void)
1997 mutex_lock(&vsock_register_mutex
);
1999 misc_deregister(&vsock_device
);
2000 sock_unregister(AF_VSOCK
);
2001 proto_unregister(&vsock_proto
);
2003 /* We do not want the assignment below re-ordered. */
2007 mutex_unlock(&vsock_register_mutex
);
2009 EXPORT_SYMBOL_GPL(vsock_core_exit
);
2011 MODULE_AUTHOR("VMware, Inc.");
2012 MODULE_DESCRIPTION("VMware Virtual Socket Family");
2013 MODULE_VERSION("1.0.0.0-k");
2014 MODULE_LICENSE("GPL v2");