2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Definitions for the AF_INET socket handler.
8 * Version: @(#)sock.h 1.0.4 05/13/93
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Florian La Roche <flla@stud.uni-sb.de>
16 * Alan Cox : Volatiles in skbuff pointers. See
17 * skbuff comments. May be overdone,
18 * better to prove they can be removed
20 * Alan Cox : Added a zapped field for tcp to note
21 * a socket is reset and must stay shut up
22 * Alan Cox : New fields for options
23 * Pauline Middelink : identd support
24 * Alan Cox : Eliminate low level recv/recvfrom
25 * David S. Miller : New socket lookup architecture.
26 * Steve Whitehouse: Default routines for sock_ops
27 * Arnaldo C. Melo : removed net_pinfo, tp_pinfo and made
28 * protinfo be just a void pointer, as the
29 * protocol specific parts were moved to
30 * respective headers and ipv4/v6, etc now
31 * use private slabcaches for its socks
32 * Pedro Hortas : New flags field for socket options
35 * This program is free software; you can redistribute it and/or
36 * modify it under the terms of the GNU General Public License
37 * as published by the Free Software Foundation; either version
38 * 2 of the License, or (at your option) any later version.
43 #include <linux/kernel.h>
44 #include <linux/list.h>
45 #include <linux/list_nulls.h>
46 #include <linux/timer.h>
47 #include <linux/cache.h>
48 #include <linux/module.h>
49 #include <linux/lockdep.h>
50 #include <linux/netdevice.h>
51 #include <linux/skbuff.h> /* struct sk_buff */
53 #include <linux/security.h>
54 #include <linux/slab.h>
56 #include <linux/filter.h>
57 #include <linux/rculist_nulls.h>
58 #include <linux/poll.h>
60 #include <asm/atomic.h>
62 #include <net/checksum.h>
65 * This structure really needs to be cleaned up.
66 * Most of it is for TCP, and not used by any of
67 * the other protocols.
70 /* Define this to get the SOCK_DBG debugging facility. */
71 #define SOCK_DEBUGGING
73 #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
74 printk(KERN_DEBUG msg); } while (0)
76 /* Validate arguments and do nothing */
77 static void inline int __attribute__ ((format (printf
, 2, 3)))
78 SOCK_DEBUG(struct sock
*sk
, const char *msg
, ...)
83 /* This is the per-socket lock. The spinlock provides a synchronization
84 * between user contexts and software interrupt processing, whereas the
85 * mini-semaphore synchronizes multiple users amongst themselves.
92 * We express the mutex-alike socket_lock semantics
93 * to the lock validator by explicitly managing
94 * the slock as a lock variant (in addition to
97 #ifdef CONFIG_DEBUG_LOCK_ALLOC
98 struct lockdep_map dep_map
;
107 * struct sock_common - minimal network layer representation of sockets
108 * @skc_node: main hash linkage for various protocol lookup tables
109 * @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
110 * @skc_refcnt: reference count
111 * @skc_tx_queue_mapping: tx queue number for this connection
112 * @skc_hash: hash value used with various protocol lookup tables
113 * @skc_u16hashes: two u16 hash values used by UDP lookup tables
114 * @skc_family: network address family
115 * @skc_state: Connection state
116 * @skc_reuse: %SO_REUSEADDR setting
117 * @skc_bound_dev_if: bound device index if != 0
118 * @skc_bind_node: bind hash linkage for various protocol lookup tables
119 * @skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol
120 * @skc_prot: protocol handlers inside a network family
121 * @skc_net: reference to the network namespace of this socket
123 * This is the minimal network layer representation of sockets, the header
124 * for struct sock and struct inet_timewait_sock.
128 * first fields are not copied in sock_copy()
131 struct hlist_node skc_node
;
132 struct hlist_nulls_node skc_nulls_node
;
135 int skc_tx_queue_mapping
;
138 unsigned int skc_hash
;
139 __u16 skc_u16hashes
[2];
141 unsigned short skc_family
;
142 volatile unsigned char skc_state
;
143 unsigned char skc_reuse
;
144 int skc_bound_dev_if
;
146 struct hlist_node skc_bind_node
;
147 struct hlist_nulls_node skc_portaddr_node
;
149 struct proto
*skc_prot
;
156 * struct sock - network layer representation of sockets
157 * @__sk_common: shared layout with inet_timewait_sock
158 * @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
159 * @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
160 * @sk_lock: synchronizer
161 * @sk_rcvbuf: size of receive buffer in bytes
162 * @sk_sleep: sock wait queue
163 * @sk_dst_cache: destination cache
164 * @sk_dst_lock: destination cache lock
165 * @sk_policy: flow policy
166 * @sk_rmem_alloc: receive queue bytes committed
167 * @sk_receive_queue: incoming packets
168 * @sk_wmem_alloc: transmit queue bytes committed
169 * @sk_write_queue: Packet sending queue
170 * @sk_async_wait_queue: DMA copied packets
171 * @sk_omem_alloc: "o" is "option" or "other"
172 * @sk_wmem_queued: persistent queue size
173 * @sk_forward_alloc: space allocated forward
174 * @sk_allocation: allocation mode
175 * @sk_sndbuf: size of send buffer in bytes
176 * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
177 * %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
178 * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets
179 * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
180 * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
181 * @sk_gso_max_size: Maximum GSO segment size to build
182 * @sk_lingertime: %SO_LINGER l_linger setting
183 * @sk_backlog: always used with the per-socket spinlock held
184 * @sk_callback_lock: used with the callbacks in the end of this struct
185 * @sk_error_queue: rarely used
186 * @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt,
187 * IPV6_ADDRFORM for instance)
188 * @sk_err: last error
189 * @sk_err_soft: errors that don't cause failure but are the cause of a
190 * persistent failure not just 'timed out'
191 * @sk_drops: raw/udp drops counter
192 * @sk_ack_backlog: current listen backlog
193 * @sk_max_ack_backlog: listen backlog set in listen()
194 * @sk_priority: %SO_PRIORITY setting
195 * @sk_type: socket type (%SOCK_STREAM, etc)
196 * @sk_protocol: which protocol this socket belongs in this network family
197 * @sk_peercred: %SO_PEERCRED setting
198 * @sk_rcvlowat: %SO_RCVLOWAT setting
199 * @sk_rcvtimeo: %SO_RCVTIMEO setting
200 * @sk_sndtimeo: %SO_SNDTIMEO setting
201 * @sk_rxhash: flow hash received from netif layer
202 * @sk_filter: socket filtering instructions
203 * @sk_protinfo: private area, net family specific, when not using slab
204 * @sk_timer: sock cleanup timer
205 * @sk_stamp: time stamp of last packet received
206 * @sk_socket: Identd and reporting IO signals
207 * @sk_user_data: RPC layer private data
208 * @sk_sndmsg_page: cached page for sendmsg
209 * @sk_sndmsg_off: cached offset for sendmsg
210 * @sk_send_head: front of stuff to transmit
211 * @sk_security: used by security modules
212 * @sk_mark: generic packet mark
213 * @sk_write_pending: a write to stream socket waits to start
214 * @sk_state_change: callback to indicate change in the state of the sock
215 * @sk_data_ready: callback to indicate there is data to be processed
216 * @sk_write_space: callback to indicate there is bf sending space available
217 * @sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE)
218 * @sk_backlog_rcv: callback to process the backlog
219 * @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
223 * Now struct inet_timewait_sock also uses sock_common, so please just
224 * don't add nothing before this first member (__sk_common) --acme
226 struct sock_common __sk_common
;
227 #define sk_node __sk_common.skc_node
228 #define sk_nulls_node __sk_common.skc_nulls_node
229 #define sk_refcnt __sk_common.skc_refcnt
230 #define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping
232 #define sk_copy_start __sk_common.skc_hash
233 #define sk_hash __sk_common.skc_hash
234 #define sk_family __sk_common.skc_family
235 #define sk_state __sk_common.skc_state
236 #define sk_reuse __sk_common.skc_reuse
237 #define sk_bound_dev_if __sk_common.skc_bound_dev_if
238 #define sk_bind_node __sk_common.skc_bind_node
239 #define sk_prot __sk_common.skc_prot
240 #define sk_net __sk_common.skc_net
241 kmemcheck_bitfield_begin(flags
);
242 unsigned int sk_shutdown
: 2,
247 kmemcheck_bitfield_end(flags
);
249 socket_lock_t sk_lock
;
251 * The backlog queue is special, it is always used with
252 * the per-socket spinlock held and requires low latency
253 * access. Therefore we special case it's implementation.
256 struct sk_buff
*head
;
257 struct sk_buff
*tail
;
261 wait_queue_head_t
*sk_sleep
;
262 struct dst_entry
*sk_dst_cache
;
264 struct xfrm_policy
*sk_policy
[2];
266 spinlock_t sk_dst_lock
;
267 atomic_t sk_rmem_alloc
;
268 atomic_t sk_wmem_alloc
;
269 atomic_t sk_omem_alloc
;
271 struct sk_buff_head sk_receive_queue
;
272 struct sk_buff_head sk_write_queue
;
273 #ifdef CONFIG_NET_DMA
274 struct sk_buff_head sk_async_wait_queue
;
277 int sk_forward_alloc
;
281 unsigned int sk_gso_max_size
;
286 unsigned long sk_flags
;
287 unsigned long sk_lingertime
;
288 struct sk_buff_head sk_error_queue
;
289 struct proto
*sk_prot_creator
;
290 rwlock_t sk_callback_lock
;
294 unsigned short sk_ack_backlog
;
295 unsigned short sk_max_ack_backlog
;
297 struct ucred sk_peercred
;
300 struct sk_filter
*sk_filter
;
302 struct timer_list sk_timer
;
304 struct socket
*sk_socket
;
306 struct page
*sk_sndmsg_page
;
307 struct sk_buff
*sk_send_head
;
309 int sk_write_pending
;
310 #ifdef CONFIG_SECURITY
314 /* XXX 4 bytes hole on 64 bit */
315 void (*sk_state_change
)(struct sock
*sk
);
316 void (*sk_data_ready
)(struct sock
*sk
, int bytes
);
317 void (*sk_write_space
)(struct sock
*sk
);
318 void (*sk_error_report
)(struct sock
*sk
);
319 int (*sk_backlog_rcv
)(struct sock
*sk
,
320 struct sk_buff
*skb
);
321 void (*sk_destruct
)(struct sock
*sk
);
325 * Hashed lists helper routines
327 static inline struct sock
*sk_entry(const struct hlist_node
*node
)
329 return hlist_entry(node
, struct sock
, sk_node
);
332 static inline struct sock
*__sk_head(const struct hlist_head
*head
)
334 return hlist_entry(head
->first
, struct sock
, sk_node
);
337 static inline struct sock
*sk_head(const struct hlist_head
*head
)
339 return hlist_empty(head
) ? NULL
: __sk_head(head
);
342 static inline struct sock
*__sk_nulls_head(const struct hlist_nulls_head
*head
)
344 return hlist_nulls_entry(head
->first
, struct sock
, sk_nulls_node
);
347 static inline struct sock
*sk_nulls_head(const struct hlist_nulls_head
*head
)
349 return hlist_nulls_empty(head
) ? NULL
: __sk_nulls_head(head
);
352 static inline struct sock
*sk_next(const struct sock
*sk
)
354 return sk
->sk_node
.next
?
355 hlist_entry(sk
->sk_node
.next
, struct sock
, sk_node
) : NULL
;
358 static inline struct sock
*sk_nulls_next(const struct sock
*sk
)
360 return (!is_a_nulls(sk
->sk_nulls_node
.next
)) ?
361 hlist_nulls_entry(sk
->sk_nulls_node
.next
,
362 struct sock
, sk_nulls_node
) :
366 static inline int sk_unhashed(const struct sock
*sk
)
368 return hlist_unhashed(&sk
->sk_node
);
371 static inline int sk_hashed(const struct sock
*sk
)
373 return !sk_unhashed(sk
);
376 static __inline__
void sk_node_init(struct hlist_node
*node
)
381 static __inline__
void sk_nulls_node_init(struct hlist_nulls_node
*node
)
386 static __inline__
void __sk_del_node(struct sock
*sk
)
388 __hlist_del(&sk
->sk_node
);
391 /* NB: equivalent to hlist_del_init_rcu */
392 static __inline__
int __sk_del_node_init(struct sock
*sk
)
396 sk_node_init(&sk
->sk_node
);
402 /* Grab socket reference count. This operation is valid only
403 when sk is ALREADY grabbed f.e. it is found in hash table
404 or a list and the lookup is made under lock preventing hash table
408 static inline void sock_hold(struct sock
*sk
)
410 atomic_inc(&sk
->sk_refcnt
);
413 /* Ungrab socket in the context, which assumes that socket refcnt
414 cannot hit zero, f.e. it is true in context of any socketcall.
416 static inline void __sock_put(struct sock
*sk
)
418 atomic_dec(&sk
->sk_refcnt
);
421 static __inline__
int sk_del_node_init(struct sock
*sk
)
423 int rc
= __sk_del_node_init(sk
);
426 /* paranoid for a while -acme */
427 WARN_ON(atomic_read(&sk
->sk_refcnt
) == 1);
432 #define sk_del_node_init_rcu(sk) sk_del_node_init(sk)
434 static __inline__
int __sk_nulls_del_node_init_rcu(struct sock
*sk
)
437 hlist_nulls_del_init_rcu(&sk
->sk_nulls_node
);
443 static __inline__
int sk_nulls_del_node_init_rcu(struct sock
*sk
)
445 int rc
= __sk_nulls_del_node_init_rcu(sk
);
448 /* paranoid for a while -acme */
449 WARN_ON(atomic_read(&sk
->sk_refcnt
) == 1);
455 static __inline__
void __sk_add_node(struct sock
*sk
, struct hlist_head
*list
)
457 hlist_add_head(&sk
->sk_node
, list
);
460 static __inline__
void sk_add_node(struct sock
*sk
, struct hlist_head
*list
)
463 __sk_add_node(sk
, list
);
466 static __inline__
void sk_add_node_rcu(struct sock
*sk
, struct hlist_head
*list
)
469 hlist_add_head_rcu(&sk
->sk_node
, list
);
472 static __inline__
void __sk_nulls_add_node_rcu(struct sock
*sk
, struct hlist_nulls_head
*list
)
474 hlist_nulls_add_head_rcu(&sk
->sk_nulls_node
, list
);
477 static __inline__
void sk_nulls_add_node_rcu(struct sock
*sk
, struct hlist_nulls_head
*list
)
480 __sk_nulls_add_node_rcu(sk
, list
);
483 static __inline__
void __sk_del_bind_node(struct sock
*sk
)
485 __hlist_del(&sk
->sk_bind_node
);
488 static __inline__
void sk_add_bind_node(struct sock
*sk
,
489 struct hlist_head
*list
)
491 hlist_add_head(&sk
->sk_bind_node
, list
);
494 #define sk_for_each(__sk, node, list) \
495 hlist_for_each_entry(__sk, node, list, sk_node)
496 #define sk_for_each_rcu(__sk, node, list) \
497 hlist_for_each_entry_rcu(__sk, node, list, sk_node)
498 #define sk_nulls_for_each(__sk, node, list) \
499 hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
500 #define sk_nulls_for_each_rcu(__sk, node, list) \
501 hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
502 #define sk_for_each_from(__sk, node) \
503 if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
504 hlist_for_each_entry_from(__sk, node, sk_node)
505 #define sk_nulls_for_each_from(__sk, node) \
506 if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
507 hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
508 #define sk_for_each_continue(__sk, node) \
509 if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
510 hlist_for_each_entry_continue(__sk, node, sk_node)
511 #define sk_for_each_safe(__sk, node, tmp, list) \
512 hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node)
513 #define sk_for_each_bound(__sk, node, list) \
514 hlist_for_each_entry(__sk, node, list, sk_bind_node)
527 SOCK_USE_WRITE_QUEUE
, /* whether to call sk->sk_write_space in sock_wfree */
528 SOCK_DBG
, /* %SO_DEBUG setting */
529 SOCK_RCVTSTAMP
, /* %SO_TIMESTAMP setting */
530 SOCK_RCVTSTAMPNS
, /* %SO_TIMESTAMPNS setting */
531 SOCK_LOCALROUTE
, /* route locally only, %SO_DONTROUTE setting */
532 SOCK_QUEUE_SHRUNK
, /* write queue has been shrunk recently */
533 SOCK_TIMESTAMPING_TX_HARDWARE
, /* %SOF_TIMESTAMPING_TX_HARDWARE */
534 SOCK_TIMESTAMPING_TX_SOFTWARE
, /* %SOF_TIMESTAMPING_TX_SOFTWARE */
535 SOCK_TIMESTAMPING_RX_HARDWARE
, /* %SOF_TIMESTAMPING_RX_HARDWARE */
536 SOCK_TIMESTAMPING_RX_SOFTWARE
, /* %SOF_TIMESTAMPING_RX_SOFTWARE */
537 SOCK_TIMESTAMPING_SOFTWARE
, /* %SOF_TIMESTAMPING_SOFTWARE */
538 SOCK_TIMESTAMPING_RAW_HARDWARE
, /* %SOF_TIMESTAMPING_RAW_HARDWARE */
539 SOCK_TIMESTAMPING_SYS_HARDWARE
, /* %SOF_TIMESTAMPING_SYS_HARDWARE */
540 SOCK_FASYNC
, /* fasync() active */
544 static inline void sock_copy_flags(struct sock
*nsk
, struct sock
*osk
)
546 nsk
->sk_flags
= osk
->sk_flags
;
549 static inline void sock_set_flag(struct sock
*sk
, enum sock_flags flag
)
551 __set_bit(flag
, &sk
->sk_flags
);
554 static inline void sock_reset_flag(struct sock
*sk
, enum sock_flags flag
)
556 __clear_bit(flag
, &sk
->sk_flags
);
559 static inline int sock_flag(struct sock
*sk
, enum sock_flags flag
)
561 return test_bit(flag
, &sk
->sk_flags
);
564 static inline void sk_acceptq_removed(struct sock
*sk
)
566 sk
->sk_ack_backlog
--;
569 static inline void sk_acceptq_added(struct sock
*sk
)
571 sk
->sk_ack_backlog
++;
574 static inline int sk_acceptq_is_full(struct sock
*sk
)
576 return sk
->sk_ack_backlog
> sk
->sk_max_ack_backlog
;
580 * Compute minimal free write space needed to queue new packets.
582 static inline int sk_stream_min_wspace(struct sock
*sk
)
584 return sk
->sk_wmem_queued
>> 1;
587 static inline int sk_stream_wspace(struct sock
*sk
)
589 return sk
->sk_sndbuf
- sk
->sk_wmem_queued
;
592 extern void sk_stream_write_space(struct sock
*sk
);
594 static inline int sk_stream_memory_free(struct sock
*sk
)
596 return sk
->sk_wmem_queued
< sk
->sk_sndbuf
;
599 /* OOB backlog add */
600 static inline void __sk_add_backlog(struct sock
*sk
, struct sk_buff
*skb
)
602 if (!sk
->sk_backlog
.tail
) {
603 sk
->sk_backlog
.head
= sk
->sk_backlog
.tail
= skb
;
605 sk
->sk_backlog
.tail
->next
= skb
;
606 sk
->sk_backlog
.tail
= skb
;
611 /* The per-socket spinlock must be held here. */
612 static inline __must_check
int sk_add_backlog(struct sock
*sk
, struct sk_buff
*skb
)
614 if (sk
->sk_backlog
.len
>= max(sk
->sk_backlog
.limit
, sk
->sk_rcvbuf
<< 1))
617 __sk_add_backlog(sk
, skb
);
618 sk
->sk_backlog
.len
+= skb
->truesize
;
622 static inline int sk_backlog_rcv(struct sock
*sk
, struct sk_buff
*skb
)
624 return sk
->sk_backlog_rcv(sk
, skb
);
627 static inline void sock_rps_record_flow(const struct sock
*sk
)
630 struct rps_sock_flow_table
*sock_flow_table
;
633 sock_flow_table
= rcu_dereference(rps_sock_flow_table
);
634 rps_record_sock_flow(sock_flow_table
, sk
->sk_rxhash
);
639 static inline void sock_rps_reset_flow(const struct sock
*sk
)
642 struct rps_sock_flow_table
*sock_flow_table
;
645 sock_flow_table
= rcu_dereference(rps_sock_flow_table
);
646 rps_reset_sock_flow(sock_flow_table
, sk
->sk_rxhash
);
651 static inline void sock_rps_save_rxhash(struct sock
*sk
, u32 rxhash
)
654 if (unlikely(sk
->sk_rxhash
!= rxhash
)) {
655 sock_rps_reset_flow(sk
);
656 sk
->sk_rxhash
= rxhash
;
661 #define sk_wait_event(__sk, __timeo, __condition) \
663 release_sock(__sk); \
664 __rc = __condition; \
666 *(__timeo) = schedule_timeout(*(__timeo)); \
669 __rc = __condition; \
673 extern int sk_stream_wait_connect(struct sock
*sk
, long *timeo_p
);
674 extern int sk_stream_wait_memory(struct sock
*sk
, long *timeo_p
);
675 extern void sk_stream_wait_close(struct sock
*sk
, long timeo_p
);
676 extern int sk_stream_error(struct sock
*sk
, int flags
, int err
);
677 extern void sk_stream_kill_queues(struct sock
*sk
);
679 extern int sk_wait_data(struct sock
*sk
, long *timeo
);
681 struct request_sock_ops
;
682 struct timewait_sock_ops
;
683 struct inet_hashinfo
;
686 /* Networking protocol blocks we attach to sockets.
687 * socket layer -> transport layer interface
688 * transport -> network interface is defined by struct inet_proto
691 void (*close
)(struct sock
*sk
,
693 int (*connect
)(struct sock
*sk
,
694 struct sockaddr
*uaddr
,
696 int (*disconnect
)(struct sock
*sk
, int flags
);
698 struct sock
* (*accept
) (struct sock
*sk
, int flags
, int *err
);
700 int (*ioctl
)(struct sock
*sk
, int cmd
,
702 int (*init
)(struct sock
*sk
);
703 void (*destroy
)(struct sock
*sk
);
704 void (*shutdown
)(struct sock
*sk
, int how
);
705 int (*setsockopt
)(struct sock
*sk
, int level
,
706 int optname
, char __user
*optval
,
707 unsigned int optlen
);
708 int (*getsockopt
)(struct sock
*sk
, int level
,
709 int optname
, char __user
*optval
,
712 int (*compat_setsockopt
)(struct sock
*sk
,
714 int optname
, char __user
*optval
,
715 unsigned int optlen
);
716 int (*compat_getsockopt
)(struct sock
*sk
,
718 int optname
, char __user
*optval
,
721 int (*sendmsg
)(struct kiocb
*iocb
, struct sock
*sk
,
722 struct msghdr
*msg
, size_t len
);
723 int (*recvmsg
)(struct kiocb
*iocb
, struct sock
*sk
,
725 size_t len
, int noblock
, int flags
,
727 int (*sendpage
)(struct sock
*sk
, struct page
*page
,
728 int offset
, size_t size
, int flags
);
729 int (*bind
)(struct sock
*sk
,
730 struct sockaddr
*uaddr
, int addr_len
);
732 int (*backlog_rcv
) (struct sock
*sk
,
733 struct sk_buff
*skb
);
735 /* Keeping track of sk's, looking them up, and port selection methods. */
736 void (*hash
)(struct sock
*sk
);
737 void (*unhash
)(struct sock
*sk
);
738 int (*get_port
)(struct sock
*sk
, unsigned short snum
);
740 /* Keeping track of sockets in use */
741 #ifdef CONFIG_PROC_FS
742 unsigned int inuse_idx
;
745 /* Memory pressure */
746 void (*enter_memory_pressure
)(struct sock
*sk
);
747 atomic_t
*memory_allocated
; /* Current allocated memory. */
748 struct percpu_counter
*sockets_allocated
; /* Current number of sockets. */
750 * Pressure flag: try to collapse.
751 * Technical note: it is used by multiple contexts non atomically.
752 * All the __sk_mem_schedule() is of this nature: accounting
753 * is strict, actions are advisory and have some latency.
755 int *memory_pressure
;
761 struct kmem_cache
*slab
;
762 unsigned int obj_size
;
765 struct percpu_counter
*orphan_count
;
767 struct request_sock_ops
*rsk_prot
;
768 struct timewait_sock_ops
*twsk_prot
;
771 struct inet_hashinfo
*hashinfo
;
772 struct udp_table
*udp_table
;
773 struct raw_hashinfo
*raw_hash
;
776 struct module
*owner
;
780 struct list_head node
;
781 #ifdef SOCK_REFCNT_DEBUG
786 extern int proto_register(struct proto
*prot
, int alloc_slab
);
787 extern void proto_unregister(struct proto
*prot
);
789 #ifdef SOCK_REFCNT_DEBUG
790 static inline void sk_refcnt_debug_inc(struct sock
*sk
)
792 atomic_inc(&sk
->sk_prot
->socks
);
795 static inline void sk_refcnt_debug_dec(struct sock
*sk
)
797 atomic_dec(&sk
->sk_prot
->socks
);
798 printk(KERN_DEBUG
"%s socket %p released, %d are still alive\n",
799 sk
->sk_prot
->name
, sk
, atomic_read(&sk
->sk_prot
->socks
));
802 static inline void sk_refcnt_debug_release(const struct sock
*sk
)
804 if (atomic_read(&sk
->sk_refcnt
) != 1)
805 printk(KERN_DEBUG
"Destruction of the %s socket %p delayed, refcnt=%d\n",
806 sk
->sk_prot
->name
, sk
, atomic_read(&sk
->sk_refcnt
));
808 #else /* SOCK_REFCNT_DEBUG */
809 #define sk_refcnt_debug_inc(sk) do { } while (0)
810 #define sk_refcnt_debug_dec(sk) do { } while (0)
811 #define sk_refcnt_debug_release(sk) do { } while (0)
812 #endif /* SOCK_REFCNT_DEBUG */
815 #ifdef CONFIG_PROC_FS
816 /* Called with local bh disabled */
817 extern void sock_prot_inuse_add(struct net
*net
, struct proto
*prot
, int inc
);
818 extern int sock_prot_inuse_get(struct net
*net
, struct proto
*proto
);
820 static void inline sock_prot_inuse_add(struct net
*net
, struct proto
*prot
,
827 /* With per-bucket locks this operation is not-atomic, so that
828 * this version is not worse.
830 static inline void __sk_prot_rehash(struct sock
*sk
)
832 sk
->sk_prot
->unhash(sk
);
833 sk
->sk_prot
->hash(sk
);
836 /* About 10 seconds */
837 #define SOCK_DESTROY_TIME (10*HZ)
839 /* Sockets 0-1023 can't be bound to unless you are superuser */
840 #define PROT_SOCK 1024
842 #define SHUTDOWN_MASK 3
843 #define RCV_SHUTDOWN 1
844 #define SEND_SHUTDOWN 2
846 #define SOCK_SNDBUF_LOCK 1
847 #define SOCK_RCVBUF_LOCK 2
848 #define SOCK_BINDADDR_LOCK 4
849 #define SOCK_BINDPORT_LOCK 8
851 /* sock_iocb: used to kick off async processing of socket ios */
853 struct list_head list
;
859 struct scm_cookie
*scm
;
860 struct msghdr
*msg
, async_msg
;
864 static inline struct sock_iocb
*kiocb_to_siocb(struct kiocb
*iocb
)
866 return (struct sock_iocb
*)iocb
->private;
869 static inline struct kiocb
*siocb_to_kiocb(struct sock_iocb
*si
)
874 struct socket_alloc
{
875 struct socket socket
;
876 struct inode vfs_inode
;
879 static inline struct socket
*SOCKET_I(struct inode
*inode
)
881 return &container_of(inode
, struct socket_alloc
, vfs_inode
)->socket
;
884 static inline struct inode
*SOCK_INODE(struct socket
*socket
)
886 return &container_of(socket
, struct socket_alloc
, socket
)->vfs_inode
;
890 * Functions for memory accounting
892 extern int __sk_mem_schedule(struct sock
*sk
, int size
, int kind
);
893 extern void __sk_mem_reclaim(struct sock
*sk
);
895 #define SK_MEM_QUANTUM ((int)PAGE_SIZE)
896 #define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
897 #define SK_MEM_SEND 0
898 #define SK_MEM_RECV 1
900 static inline int sk_mem_pages(int amt
)
902 return (amt
+ SK_MEM_QUANTUM
- 1) >> SK_MEM_QUANTUM_SHIFT
;
905 static inline int sk_has_account(struct sock
*sk
)
907 /* return true if protocol supports memory accounting */
908 return !!sk
->sk_prot
->memory_allocated
;
911 static inline int sk_wmem_schedule(struct sock
*sk
, int size
)
913 if (!sk_has_account(sk
))
915 return size
<= sk
->sk_forward_alloc
||
916 __sk_mem_schedule(sk
, size
, SK_MEM_SEND
);
919 static inline int sk_rmem_schedule(struct sock
*sk
, int size
)
921 if (!sk_has_account(sk
))
923 return size
<= sk
->sk_forward_alloc
||
924 __sk_mem_schedule(sk
, size
, SK_MEM_RECV
);
927 static inline void sk_mem_reclaim(struct sock
*sk
)
929 if (!sk_has_account(sk
))
931 if (sk
->sk_forward_alloc
>= SK_MEM_QUANTUM
)
932 __sk_mem_reclaim(sk
);
935 static inline void sk_mem_reclaim_partial(struct sock
*sk
)
937 if (!sk_has_account(sk
))
939 if (sk
->sk_forward_alloc
> SK_MEM_QUANTUM
)
940 __sk_mem_reclaim(sk
);
943 static inline void sk_mem_charge(struct sock
*sk
, int size
)
945 if (!sk_has_account(sk
))
947 sk
->sk_forward_alloc
-= size
;
950 static inline void sk_mem_uncharge(struct sock
*sk
, int size
)
952 if (!sk_has_account(sk
))
954 sk
->sk_forward_alloc
+= size
;
957 static inline void sk_wmem_free_skb(struct sock
*sk
, struct sk_buff
*skb
)
959 sock_set_flag(sk
, SOCK_QUEUE_SHRUNK
);
960 sk
->sk_wmem_queued
-= skb
->truesize
;
961 sk_mem_uncharge(sk
, skb
->truesize
);
965 /* Used by processes to "lock" a socket state, so that
966 * interrupts and bottom half handlers won't change it
967 * from under us. It essentially blocks any incoming
968 * packets, so that we won't get any new data or any
969 * packets that change the state of the socket.
971 * While locked, BH processing will add new packets to
972 * the backlog queue. This queue is processed by the
973 * owner of the socket lock right before it is released.
975 * Since ~2.3.5 it is also exclusive sleep lock serializing
976 * accesses from user process context.
978 #define sock_owned_by_user(sk) ((sk)->sk_lock.owned)
981 * Macro so as to not evaluate some arguments when
982 * lockdep is not enabled.
984 * Mark both the sk_lock and the sk_lock.slock as a
985 * per-address-family lock class.
987 #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \
989 sk->sk_lock.owned = 0; \
990 init_waitqueue_head(&sk->sk_lock.wq); \
991 spin_lock_init(&(sk)->sk_lock.slock); \
992 debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
993 sizeof((sk)->sk_lock)); \
994 lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
996 lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
999 extern void lock_sock_nested(struct sock
*sk
, int subclass
);
1001 static inline void lock_sock(struct sock
*sk
)
1003 lock_sock_nested(sk
, 0);
1006 extern void release_sock(struct sock
*sk
);
1008 /* BH context may only use the following locking interface. */
1009 #define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock))
1010 #define bh_lock_sock_nested(__sk) \
1011 spin_lock_nested(&((__sk)->sk_lock.slock), \
1012 SINGLE_DEPTH_NESTING)
1013 #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
1015 extern struct sock
*sk_alloc(struct net
*net
, int family
,
1017 struct proto
*prot
);
1018 extern void sk_free(struct sock
*sk
);
1019 extern void sk_release_kernel(struct sock
*sk
);
1020 extern struct sock
*sk_clone(const struct sock
*sk
,
1021 const gfp_t priority
);
1023 extern struct sk_buff
*sock_wmalloc(struct sock
*sk
,
1024 unsigned long size
, int force
,
1026 extern struct sk_buff
*sock_rmalloc(struct sock
*sk
,
1027 unsigned long size
, int force
,
1029 extern void sock_wfree(struct sk_buff
*skb
);
1030 extern void sock_rfree(struct sk_buff
*skb
);
1032 extern int sock_setsockopt(struct socket
*sock
, int level
,
1033 int op
, char __user
*optval
,
1034 unsigned int optlen
);
1036 extern int sock_getsockopt(struct socket
*sock
, int level
,
1037 int op
, char __user
*optval
,
1038 int __user
*optlen
);
1039 extern struct sk_buff
*sock_alloc_send_skb(struct sock
*sk
,
1043 extern struct sk_buff
*sock_alloc_send_pskb(struct sock
*sk
,
1044 unsigned long header_len
,
1045 unsigned long data_len
,
1048 extern void *sock_kmalloc(struct sock
*sk
, int size
,
1050 extern void sock_kfree_s(struct sock
*sk
, void *mem
, int size
);
1051 extern void sk_send_sigurg(struct sock
*sk
);
1054 * Functions to fill in entries in struct proto_ops when a protocol
1055 * does not implement a particular function.
1057 extern int sock_no_bind(struct socket
*,
1058 struct sockaddr
*, int);
1059 extern int sock_no_connect(struct socket
*,
1060 struct sockaddr
*, int, int);
1061 extern int sock_no_socketpair(struct socket
*,
1063 extern int sock_no_accept(struct socket
*,
1064 struct socket
*, int);
1065 extern int sock_no_getname(struct socket
*,
1066 struct sockaddr
*, int *, int);
1067 extern unsigned int sock_no_poll(struct file
*, struct socket
*,
1068 struct poll_table_struct
*);
1069 extern int sock_no_ioctl(struct socket
*, unsigned int,
1071 extern int sock_no_listen(struct socket
*, int);
1072 extern int sock_no_shutdown(struct socket
*, int);
1073 extern int sock_no_getsockopt(struct socket
*, int , int,
1074 char __user
*, int __user
*);
1075 extern int sock_no_setsockopt(struct socket
*, int, int,
1076 char __user
*, unsigned int);
1077 extern int sock_no_sendmsg(struct kiocb
*, struct socket
*,
1078 struct msghdr
*, size_t);
1079 extern int sock_no_recvmsg(struct kiocb
*, struct socket
*,
1080 struct msghdr
*, size_t, int);
1081 extern int sock_no_mmap(struct file
*file
,
1082 struct socket
*sock
,
1083 struct vm_area_struct
*vma
);
1084 extern ssize_t
sock_no_sendpage(struct socket
*sock
,
1086 int offset
, size_t size
,
1090 * Functions to fill in entries in struct proto_ops when a protocol
1091 * uses the inet style.
1093 extern int sock_common_getsockopt(struct socket
*sock
, int level
, int optname
,
1094 char __user
*optval
, int __user
*optlen
);
1095 extern int sock_common_recvmsg(struct kiocb
*iocb
, struct socket
*sock
,
1096 struct msghdr
*msg
, size_t size
, int flags
);
1097 extern int sock_common_setsockopt(struct socket
*sock
, int level
, int optname
,
1098 char __user
*optval
, unsigned int optlen
);
1099 extern int compat_sock_common_getsockopt(struct socket
*sock
, int level
,
1100 int optname
, char __user
*optval
, int __user
*optlen
);
1101 extern int compat_sock_common_setsockopt(struct socket
*sock
, int level
,
1102 int optname
, char __user
*optval
, unsigned int optlen
);
1104 extern void sk_common_release(struct sock
*sk
);
1107 * Default socket callbacks and setup code
1110 /* Initialise core socket variables */
1111 extern void sock_init_data(struct socket
*sock
, struct sock
*sk
);
1114 * sk_filter_release - release a socket filter
1115 * @fp: filter to remove
1117 * Remove a filter from a socket and release its resources.
1120 static inline void sk_filter_release(struct sk_filter
*fp
)
1122 if (atomic_dec_and_test(&fp
->refcnt
))
1126 static inline void sk_filter_uncharge(struct sock
*sk
, struct sk_filter
*fp
)
1128 unsigned int size
= sk_filter_len(fp
);
1130 atomic_sub(size
, &sk
->sk_omem_alloc
);
1131 sk_filter_release(fp
);
1134 static inline void sk_filter_charge(struct sock
*sk
, struct sk_filter
*fp
)
1136 atomic_inc(&fp
->refcnt
);
1137 atomic_add(sk_filter_len(fp
), &sk
->sk_omem_alloc
);
1141 * Socket reference counting postulates.
1143 * * Each user of socket SHOULD hold a reference count.
1144 * * Each access point to socket (an hash table bucket, reference from a list,
1145 * running timer, skb in flight MUST hold a reference count.
1146 * * When reference count hits 0, it means it will never increase back.
1147 * * When reference count hits 0, it means that no references from
1148 * outside exist to this socket and current process on current CPU
1149 * is last user and may/should destroy this socket.
1150 * * sk_free is called from any context: process, BH, IRQ. When
1151 * it is called, socket has no references from outside -> sk_free
1152 * may release descendant resources allocated by the socket, but
1153 * to the time when it is called, socket is NOT referenced by any
1154 * hash tables, lists etc.
1155 * * Packets, delivered from outside (from network or from another process)
1156 * and enqueued on receive/error queues SHOULD NOT grab reference count,
1157 * when they sit in queue. Otherwise, packets will leak to hole, when
1158 * socket is looked up by one cpu and unhasing is made by another CPU.
1159 * It is true for udp/raw, netlink (leak to receive and error queues), tcp
1160 * (leak to backlog). Packet socket does all the processing inside
1161 * BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets
1162 * use separate SMP lock, so that they are prone too.
1165 /* Ungrab socket and destroy it, if it was the last reference. */
1166 static inline void sock_put(struct sock
*sk
)
1168 if (atomic_dec_and_test(&sk
->sk_refcnt
))
1172 extern int sk_receive_skb(struct sock
*sk
, struct sk_buff
*skb
,
1175 static inline void sk_tx_queue_set(struct sock
*sk
, int tx_queue
)
1177 sk
->sk_tx_queue_mapping
= tx_queue
;
1180 static inline void sk_tx_queue_clear(struct sock
*sk
)
1182 sk
->sk_tx_queue_mapping
= -1;
1185 static inline int sk_tx_queue_get(const struct sock
*sk
)
1187 return sk
->sk_tx_queue_mapping
;
1190 static inline bool sk_tx_queue_recorded(const struct sock
*sk
)
1192 return (sk
&& sk
->sk_tx_queue_mapping
>= 0);
1195 static inline void sk_set_socket(struct sock
*sk
, struct socket
*sock
)
1197 sk_tx_queue_clear(sk
);
1198 sk
->sk_socket
= sock
;
1201 static inline wait_queue_head_t
*sk_sleep(struct sock
*sk
)
1203 return sk
->sk_sleep
;
1205 /* Detach socket from process context.
1206 * Announce socket dead, detach it from wait queue and inode.
1207 * Note that parent inode held reference count on this struct sock,
1208 * we do not release it in this function, because protocol
1209 * probably wants some additional cleanups or even continuing
1210 * to work with this socket (TCP).
1212 static inline void sock_orphan(struct sock
*sk
)
1214 write_lock_bh(&sk
->sk_callback_lock
);
1215 sock_set_flag(sk
, SOCK_DEAD
);
1216 sk_set_socket(sk
, NULL
);
1217 sk
->sk_sleep
= NULL
;
1218 write_unlock_bh(&sk
->sk_callback_lock
);
1221 static inline void sock_graft(struct sock
*sk
, struct socket
*parent
)
1223 write_lock_bh(&sk
->sk_callback_lock
);
1224 sk
->sk_sleep
= &parent
->wait
;
1226 sk_set_socket(sk
, parent
);
1227 security_sock_graft(sk
, parent
);
1228 write_unlock_bh(&sk
->sk_callback_lock
);
1231 extern int sock_i_uid(struct sock
*sk
);
1232 extern unsigned long sock_i_ino(struct sock
*sk
);
1234 static inline struct dst_entry
*
1235 __sk_dst_get(struct sock
*sk
)
1237 return rcu_dereference_check(sk
->sk_dst_cache
, rcu_read_lock_held() ||
1238 sock_owned_by_user(sk
) ||
1239 lockdep_is_held(&sk
->sk_lock
.slock
));
1242 static inline struct dst_entry
*
1243 sk_dst_get(struct sock
*sk
)
1245 struct dst_entry
*dst
;
1248 dst
= rcu_dereference(sk
->sk_dst_cache
);
1255 extern void sk_reset_txq(struct sock
*sk
);
1257 static inline void dst_negative_advice(struct sock
*sk
)
1259 struct dst_entry
*ndst
, *dst
= __sk_dst_get(sk
);
1261 if (dst
&& dst
->ops
->negative_advice
) {
1262 ndst
= dst
->ops
->negative_advice(dst
);
1265 rcu_assign_pointer(sk
->sk_dst_cache
, ndst
);
1272 __sk_dst_set(struct sock
*sk
, struct dst_entry
*dst
)
1274 struct dst_entry
*old_dst
;
1276 sk_tx_queue_clear(sk
);
1278 * This can be called while sk is owned by the caller only,
1279 * with no state that can be checked in a rcu_dereference_check() cond
1281 old_dst
= rcu_dereference_raw(sk
->sk_dst_cache
);
1282 rcu_assign_pointer(sk
->sk_dst_cache
, dst
);
1283 dst_release(old_dst
);
1287 sk_dst_set(struct sock
*sk
, struct dst_entry
*dst
)
1289 spin_lock(&sk
->sk_dst_lock
);
1290 __sk_dst_set(sk
, dst
);
1291 spin_unlock(&sk
->sk_dst_lock
);
1295 __sk_dst_reset(struct sock
*sk
)
1297 __sk_dst_set(sk
, NULL
);
1301 sk_dst_reset(struct sock
*sk
)
1303 spin_lock(&sk
->sk_dst_lock
);
1305 spin_unlock(&sk
->sk_dst_lock
);
1308 extern struct dst_entry
*__sk_dst_check(struct sock
*sk
, u32 cookie
);
1310 extern struct dst_entry
*sk_dst_check(struct sock
*sk
, u32 cookie
);
1312 static inline int sk_can_gso(const struct sock
*sk
)
1314 return net_gso_ok(sk
->sk_route_caps
, sk
->sk_gso_type
);
1317 extern void sk_setup_caps(struct sock
*sk
, struct dst_entry
*dst
);
1319 static inline int skb_copy_to_page(struct sock
*sk
, char __user
*from
,
1320 struct sk_buff
*skb
, struct page
*page
,
1323 if (skb
->ip_summed
== CHECKSUM_NONE
) {
1325 __wsum csum
= csum_and_copy_from_user(from
,
1326 page_address(page
) + off
,
1330 skb
->csum
= csum_block_add(skb
->csum
, csum
, skb
->len
);
1331 } else if (copy_from_user(page_address(page
) + off
, from
, copy
))
1335 skb
->data_len
+= copy
;
1336 skb
->truesize
+= copy
;
1337 sk
->sk_wmem_queued
+= copy
;
1338 sk_mem_charge(sk
, copy
);
1343 * sk_wmem_alloc_get - returns write allocations
1346 * Returns sk_wmem_alloc minus initial offset of one
1348 static inline int sk_wmem_alloc_get(const struct sock
*sk
)
1350 return atomic_read(&sk
->sk_wmem_alloc
) - 1;
1354 * sk_rmem_alloc_get - returns read allocations
1357 * Returns sk_rmem_alloc
1359 static inline int sk_rmem_alloc_get(const struct sock
*sk
)
1361 return atomic_read(&sk
->sk_rmem_alloc
);
1365 * sk_has_allocations - check if allocations are outstanding
1368 * Returns true if socket has write or read allocations
1370 static inline int sk_has_allocations(const struct sock
*sk
)
1372 return sk_wmem_alloc_get(sk
) || sk_rmem_alloc_get(sk
);
1376 * sk_has_sleeper - check if there are any waiting processes
1379 * Returns true if socket has waiting processes
1381 * The purpose of the sk_has_sleeper and sock_poll_wait is to wrap the memory
1382 * barrier call. They were added due to the race found within the tcp code.
1384 * Consider following tcp code paths:
1388 * sys_select receive packet
1390 * __add_wait_queue update tp->rcv_nxt
1392 * tp->rcv_nxt check sock_def_readable
1395 * if (sk_sleep(sk) && waitqueue_active(sk_sleep(sk)))
1396 * wake_up_interruptible(sk_sleep(sk))
1400 * The race for tcp fires when the __add_wait_queue changes done by CPU1 stay
1401 * in its cache, and so does the tp->rcv_nxt update on CPU2 side. The CPU1
1402 * could then endup calling schedule and sleep forever if there are no more
1403 * data on the socket.
1405 * The sk_has_sleeper is always called right after a call to read_lock, so we
1406 * can use smp_mb__after_lock barrier.
1408 static inline int sk_has_sleeper(struct sock
*sk
)
1411 * We need to be sure we are in sync with the
1412 * add_wait_queue modifications to the wait queue.
1414 * This memory barrier is paired in the sock_poll_wait.
1416 smp_mb__after_lock();
1417 return sk_sleep(sk
) && waitqueue_active(sk_sleep(sk
));
1421 * sock_poll_wait - place memory barrier behind the poll_wait call.
1423 * @wait_address: socket wait queue
1426 * See the comments in the sk_has_sleeper function.
1428 static inline void sock_poll_wait(struct file
*filp
,
1429 wait_queue_head_t
*wait_address
, poll_table
*p
)
1431 if (p
&& wait_address
) {
1432 poll_wait(filp
, wait_address
, p
);
1434 * We need to be sure we are in sync with the
1435 * socket flags modification.
1437 * This memory barrier is paired in the sk_has_sleeper.
1444 * Queue a received datagram if it will fit. Stream and sequenced
1445 * protocols can't normally use this as they need to fit buffers in
1446 * and play with them.
1448 * Inlined as it's very short and called for pretty much every
1449 * packet ever received.
1452 static inline void skb_set_owner_w(struct sk_buff
*skb
, struct sock
*sk
)
1456 skb
->destructor
= sock_wfree
;
1458 * We used to take a refcount on sk, but following operation
1459 * is enough to guarantee sk_free() wont free this sock until
1460 * all in-flight packets are completed
1462 atomic_add(skb
->truesize
, &sk
->sk_wmem_alloc
);
1465 static inline void skb_set_owner_r(struct sk_buff
*skb
, struct sock
*sk
)
1469 skb
->destructor
= sock_rfree
;
1470 atomic_add(skb
->truesize
, &sk
->sk_rmem_alloc
);
1471 sk_mem_charge(sk
, skb
->truesize
);
1474 extern void sk_reset_timer(struct sock
*sk
, struct timer_list
* timer
,
1475 unsigned long expires
);
1477 extern void sk_stop_timer(struct sock
*sk
, struct timer_list
* timer
);
1479 extern int sock_queue_rcv_skb(struct sock
*sk
, struct sk_buff
*skb
);
1481 static inline int sock_queue_err_skb(struct sock
*sk
, struct sk_buff
*skb
)
1483 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
1484 number of warnings when compiling with -W --ANK
1486 if (atomic_read(&sk
->sk_rmem_alloc
) + skb
->truesize
>=
1487 (unsigned)sk
->sk_rcvbuf
)
1489 skb_set_owner_r(skb
, sk
);
1490 skb_queue_tail(&sk
->sk_error_queue
, skb
);
1491 if (!sock_flag(sk
, SOCK_DEAD
))
1492 sk
->sk_data_ready(sk
, skb
->len
);
1497 * Recover an error report and clear atomically
1500 static inline int sock_error(struct sock
*sk
)
1503 if (likely(!sk
->sk_err
))
1505 err
= xchg(&sk
->sk_err
, 0);
1509 static inline unsigned long sock_wspace(struct sock
*sk
)
1513 if (!(sk
->sk_shutdown
& SEND_SHUTDOWN
)) {
1514 amt
= sk
->sk_sndbuf
- atomic_read(&sk
->sk_wmem_alloc
);
1521 static inline void sk_wake_async(struct sock
*sk
, int how
, int band
)
1523 if (sock_flag(sk
, SOCK_FASYNC
))
1524 sock_wake_async(sk
->sk_socket
, how
, band
);
1527 #define SOCK_MIN_SNDBUF 2048
1528 #define SOCK_MIN_RCVBUF 256
1530 static inline void sk_stream_moderate_sndbuf(struct sock
*sk
)
1532 if (!(sk
->sk_userlocks
& SOCK_SNDBUF_LOCK
)) {
1533 sk
->sk_sndbuf
= min(sk
->sk_sndbuf
, sk
->sk_wmem_queued
>> 1);
1534 sk
->sk_sndbuf
= max(sk
->sk_sndbuf
, SOCK_MIN_SNDBUF
);
1538 struct sk_buff
*sk_stream_alloc_skb(struct sock
*sk
, int size
, gfp_t gfp
);
1540 static inline struct page
*sk_stream_alloc_page(struct sock
*sk
)
1542 struct page
*page
= NULL
;
1544 page
= alloc_pages(sk
->sk_allocation
, 0);
1546 sk
->sk_prot
->enter_memory_pressure(sk
);
1547 sk_stream_moderate_sndbuf(sk
);
1553 * Default write policy as shown to user space via poll/select/SIGIO
1555 static inline int sock_writeable(const struct sock
*sk
)
1557 return atomic_read(&sk
->sk_wmem_alloc
) < (sk
->sk_sndbuf
>> 1);
1560 static inline gfp_t
gfp_any(void)
1562 return in_softirq() ? GFP_ATOMIC
: GFP_KERNEL
;
1565 static inline long sock_rcvtimeo(const struct sock
*sk
, int noblock
)
1567 return noblock
? 0 : sk
->sk_rcvtimeo
;
1570 static inline long sock_sndtimeo(const struct sock
*sk
, int noblock
)
1572 return noblock
? 0 : sk
->sk_sndtimeo
;
1575 static inline int sock_rcvlowat(const struct sock
*sk
, int waitall
, int len
)
1577 return (waitall
? len
: min_t(int, sk
->sk_rcvlowat
, len
)) ? : 1;
1580 /* Alas, with timeout socket operations are not restartable.
1581 * Compare this to poll().
1583 static inline int sock_intr_errno(long timeo
)
1585 return timeo
== MAX_SCHEDULE_TIMEOUT
? -ERESTARTSYS
: -EINTR
;
1588 extern void __sock_recv_timestamp(struct msghdr
*msg
, struct sock
*sk
,
1589 struct sk_buff
*skb
);
1591 static __inline__
void
1592 sock_recv_timestamp(struct msghdr
*msg
, struct sock
*sk
, struct sk_buff
*skb
)
1594 ktime_t kt
= skb
->tstamp
;
1595 struct skb_shared_hwtstamps
*hwtstamps
= skb_hwtstamps(skb
);
1598 * generate control messages if
1599 * - receive time stamping in software requested (SOCK_RCVTSTAMP
1600 * or SOCK_TIMESTAMPING_RX_SOFTWARE)
1601 * - software time stamp available and wanted
1602 * (SOCK_TIMESTAMPING_SOFTWARE)
1603 * - hardware time stamps available and wanted
1604 * (SOCK_TIMESTAMPING_SYS_HARDWARE or
1605 * SOCK_TIMESTAMPING_RAW_HARDWARE)
1607 if (sock_flag(sk
, SOCK_RCVTSTAMP
) ||
1608 sock_flag(sk
, SOCK_TIMESTAMPING_RX_SOFTWARE
) ||
1609 (kt
.tv64
&& sock_flag(sk
, SOCK_TIMESTAMPING_SOFTWARE
)) ||
1610 (hwtstamps
->hwtstamp
.tv64
&&
1611 sock_flag(sk
, SOCK_TIMESTAMPING_RAW_HARDWARE
)) ||
1612 (hwtstamps
->syststamp
.tv64
&&
1613 sock_flag(sk
, SOCK_TIMESTAMPING_SYS_HARDWARE
)))
1614 __sock_recv_timestamp(msg
, sk
, skb
);
1619 extern void sock_recv_ts_and_drops(struct msghdr
*msg
, struct sock
*sk
, struct sk_buff
*skb
);
1622 * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
1623 * @msg: outgoing packet
1624 * @sk: socket sending this packet
1625 * @shtx: filled with instructions for time stamping
1627 * Currently only depends on SOCK_TIMESTAMPING* flags. Returns error code if
1628 * parameters are invalid.
1630 extern int sock_tx_timestamp(struct msghdr
*msg
,
1632 union skb_shared_tx
*shtx
);
1636 * sk_eat_skb - Release a skb if it is no longer needed
1637 * @sk: socket to eat this skb from
1638 * @skb: socket buffer to eat
1639 * @copied_early: flag indicating whether DMA operations copied this data early
1641 * This routine must be called with interrupts disabled or with the socket
1642 * locked so that the sk_buff queue operation is ok.
1644 #ifdef CONFIG_NET_DMA
1645 static inline void sk_eat_skb(struct sock
*sk
, struct sk_buff
*skb
, int copied_early
)
1647 __skb_unlink(skb
, &sk
->sk_receive_queue
);
1651 __skb_queue_tail(&sk
->sk_async_wait_queue
, skb
);
1654 static inline void sk_eat_skb(struct sock
*sk
, struct sk_buff
*skb
, int copied_early
)
1656 __skb_unlink(skb
, &sk
->sk_receive_queue
);
1662 struct net
*sock_net(const struct sock
*sk
)
1664 #ifdef CONFIG_NET_NS
1672 void sock_net_set(struct sock
*sk
, struct net
*net
)
1674 #ifdef CONFIG_NET_NS
1680 * Kernel sockets, f.e. rtnl or icmp_socket, are a part of a namespace.
1681 * They should not hold a referrence to a namespace in order to allow
1683 * Sockets after sk_change_net should be released using sk_release_kernel
1685 static inline void sk_change_net(struct sock
*sk
, struct net
*net
)
1687 put_net(sock_net(sk
));
1688 sock_net_set(sk
, hold_net(net
));
1691 static inline struct sock
*skb_steal_sock(struct sk_buff
*skb
)
1693 if (unlikely(skb
->sk
)) {
1694 struct sock
*sk
= skb
->sk
;
1696 skb
->destructor
= NULL
;
1703 extern void sock_enable_timestamp(struct sock
*sk
, int flag
);
1704 extern int sock_get_timestamp(struct sock
*, struct timeval __user
*);
1705 extern int sock_get_timestampns(struct sock
*, struct timespec __user
*);
1708 * Enable debug/info messages
1710 extern int net_msg_warn
;
1711 #define NETDEBUG(fmt, args...) \
1712 do { if (net_msg_warn) printk(fmt,##args); } while (0)
1714 #define LIMIT_NETDEBUG(fmt, args...) \
1715 do { if (net_msg_warn && net_ratelimit()) printk(fmt,##args); } while(0)
1717 extern __u32 sysctl_wmem_max
;
1718 extern __u32 sysctl_rmem_max
;
1720 extern void sk_init(void);
1722 extern int sysctl_optmem_max
;
1724 extern __u32 sysctl_wmem_default
;
1725 extern __u32 sysctl_rmem_default
;
1727 #endif /* _SOCK_H */