appletalk/ddp.c: Neaten checksum function
[deliverable/linux.git] / include / net / sock.h
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the AF_INET socket handler.
7 *
8 * Version: @(#)sock.h 1.0.4 05/13/93
9 *
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Florian La Roche <flla@stud.uni-sb.de>
14 *
15 * Fixes:
16 * Alan Cox : Volatiles in skbuff pointers. See
17 * skbuff comments. May be overdone,
18 * better to prove they can be removed
19 * than the reverse.
20 * Alan Cox : Added a zapped field for tcp to note
21 * a socket is reset and must stay shut up
22 * Alan Cox : New fields for options
23 * Pauline Middelink : identd support
24 * Alan Cox : Eliminate low level recv/recvfrom
25 * David S. Miller : New socket lookup architecture.
26 * Steve Whitehouse: Default routines for sock_ops
27 * Arnaldo C. Melo : removed net_pinfo, tp_pinfo and made
28 * protinfo be just a void pointer, as the
29 * protocol specific parts were moved to
30 * respective headers and ipv4/v6, etc now
31 * use private slabcaches for its socks
32 * Pedro Hortas : New flags field for socket options
33 *
34 *
35 * This program is free software; you can redistribute it and/or
36 * modify it under the terms of the GNU General Public License
37 * as published by the Free Software Foundation; either version
38 * 2 of the License, or (at your option) any later version.
39 */
40 #ifndef _SOCK_H
41 #define _SOCK_H
42
43 #include <linux/kernel.h>
44 #include <linux/list.h>
45 #include <linux/list_nulls.h>
46 #include <linux/timer.h>
47 #include <linux/cache.h>
48 #include <linux/module.h>
49 #include <linux/lockdep.h>
50 #include <linux/netdevice.h>
51 #include <linux/skbuff.h> /* struct sk_buff */
52 #include <linux/mm.h>
53 #include <linux/security.h>
54
55 #include <linux/filter.h>
56 #include <linux/rculist_nulls.h>
57 #include <linux/poll.h>
58
59 #include <asm/atomic.h>
60 #include <net/dst.h>
61 #include <net/checksum.h>
62
63 /*
64 * This structure really needs to be cleaned up.
65 * Most of it is for TCP, and not used by any of
66 * the other protocols.
67 */
68
69 /* Define this to get the SOCK_DBG debugging facility. */
70 #define SOCK_DEBUGGING
71 #ifdef SOCK_DEBUGGING
72 #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
73 printk(KERN_DEBUG msg); } while (0)
74 #else
75 /* Validate arguments and do nothing */
76 static void inline int __attribute__ ((format (printf, 2, 3)))
77 SOCK_DEBUG(struct sock *sk, const char *msg, ...)
78 {
79 }
80 #endif
81
82 /* This is the per-socket lock. The spinlock provides a synchronization
83 * between user contexts and software interrupt processing, whereas the
84 * mini-semaphore synchronizes multiple users amongst themselves.
85 */
86 typedef struct {
87 spinlock_t slock;
88 int owned;
89 wait_queue_head_t wq;
90 /*
91 * We express the mutex-alike socket_lock semantics
92 * to the lock validator by explicitly managing
93 * the slock as a lock variant (in addition to
94 * the slock itself):
95 */
96 #ifdef CONFIG_DEBUG_LOCK_ALLOC
97 struct lockdep_map dep_map;
98 #endif
99 } socket_lock_t;
100
101 struct sock;
102 struct proto;
103 struct net;
104
105 /**
106 * struct sock_common - minimal network layer representation of sockets
107 * @skc_node: main hash linkage for various protocol lookup tables
108 * @skc_nulls_node: main hash linkage for UDP/UDP-Lite protocol
109 * @skc_refcnt: reference count
110 * @skc_tx_queue_mapping: tx queue number for this connection
111 * @skc_hash: hash value used with various protocol lookup tables
112 * @skc_family: network address family
113 * @skc_state: Connection state
114 * @skc_reuse: %SO_REUSEADDR setting
115 * @skc_bound_dev_if: bound device index if != 0
116 * @skc_bind_node: bind hash linkage for various protocol lookup tables
117 * @skc_prot: protocol handlers inside a network family
118 * @skc_net: reference to the network namespace of this socket
119 *
120 * This is the minimal network layer representation of sockets, the header
121 * for struct sock and struct inet_timewait_sock.
122 */
123 struct sock_common {
124 /*
125 * first fields are not copied in sock_copy()
126 */
127 union {
128 struct hlist_node skc_node;
129 struct hlist_nulls_node skc_nulls_node;
130 };
131 atomic_t skc_refcnt;
132 int skc_tx_queue_mapping;
133
134 unsigned int skc_hash;
135 unsigned short skc_family;
136 volatile unsigned char skc_state;
137 unsigned char skc_reuse;
138 int skc_bound_dev_if;
139 struct hlist_node skc_bind_node;
140 struct proto *skc_prot;
141 #ifdef CONFIG_NET_NS
142 struct net *skc_net;
143 #endif
144 };
145
146 /**
147 * struct sock - network layer representation of sockets
148 * @__sk_common: shared layout with inet_timewait_sock
149 * @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
150 * @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
151 * @sk_lock: synchronizer
152 * @sk_rcvbuf: size of receive buffer in bytes
153 * @sk_sleep: sock wait queue
154 * @sk_dst_cache: destination cache
155 * @sk_dst_lock: destination cache lock
156 * @sk_policy: flow policy
157 * @sk_rmem_alloc: receive queue bytes committed
158 * @sk_receive_queue: incoming packets
159 * @sk_wmem_alloc: transmit queue bytes committed
160 * @sk_write_queue: Packet sending queue
161 * @sk_async_wait_queue: DMA copied packets
162 * @sk_omem_alloc: "o" is "option" or "other"
163 * @sk_wmem_queued: persistent queue size
164 * @sk_forward_alloc: space allocated forward
165 * @sk_allocation: allocation mode
166 * @sk_sndbuf: size of send buffer in bytes
167 * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
168 * %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
169 * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets
170 * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
171 * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
172 * @sk_gso_max_size: Maximum GSO segment size to build
173 * @sk_lingertime: %SO_LINGER l_linger setting
174 * @sk_backlog: always used with the per-socket spinlock held
175 * @sk_callback_lock: used with the callbacks in the end of this struct
176 * @sk_error_queue: rarely used
177 * @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt,
178 * IPV6_ADDRFORM for instance)
179 * @sk_err: last error
180 * @sk_err_soft: errors that don't cause failure but are the cause of a
181 * persistent failure not just 'timed out'
182 * @sk_drops: raw/udp drops counter
183 * @sk_ack_backlog: current listen backlog
184 * @sk_max_ack_backlog: listen backlog set in listen()
185 * @sk_priority: %SO_PRIORITY setting
186 * @sk_type: socket type (%SOCK_STREAM, etc)
187 * @sk_protocol: which protocol this socket belongs in this network family
188 * @sk_peercred: %SO_PEERCRED setting
189 * @sk_rcvlowat: %SO_RCVLOWAT setting
190 * @sk_rcvtimeo: %SO_RCVTIMEO setting
191 * @sk_sndtimeo: %SO_SNDTIMEO setting
192 * @sk_filter: socket filtering instructions
193 * @sk_protinfo: private area, net family specific, when not using slab
194 * @sk_timer: sock cleanup timer
195 * @sk_stamp: time stamp of last packet received
196 * @sk_socket: Identd and reporting IO signals
197 * @sk_user_data: RPC layer private data
198 * @sk_sndmsg_page: cached page for sendmsg
199 * @sk_sndmsg_off: cached offset for sendmsg
200 * @sk_send_head: front of stuff to transmit
201 * @sk_security: used by security modules
202 * @sk_mark: generic packet mark
203 * @sk_write_pending: a write to stream socket waits to start
204 * @sk_state_change: callback to indicate change in the state of the sock
205 * @sk_data_ready: callback to indicate there is data to be processed
206 * @sk_write_space: callback to indicate there is bf sending space available
207 * @sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE)
208 * @sk_backlog_rcv: callback to process the backlog
209 * @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
210 */
211 struct sock {
212 /*
213 * Now struct inet_timewait_sock also uses sock_common, so please just
214 * don't add nothing before this first member (__sk_common) --acme
215 */
216 struct sock_common __sk_common;
217 #define sk_node __sk_common.skc_node
218 #define sk_nulls_node __sk_common.skc_nulls_node
219 #define sk_refcnt __sk_common.skc_refcnt
220 #define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping
221
222 #define sk_copy_start __sk_common.skc_hash
223 #define sk_hash __sk_common.skc_hash
224 #define sk_family __sk_common.skc_family
225 #define sk_state __sk_common.skc_state
226 #define sk_reuse __sk_common.skc_reuse
227 #define sk_bound_dev_if __sk_common.skc_bound_dev_if
228 #define sk_bind_node __sk_common.skc_bind_node
229 #define sk_prot __sk_common.skc_prot
230 #define sk_net __sk_common.skc_net
231 kmemcheck_bitfield_begin(flags);
232 unsigned int sk_shutdown : 2,
233 sk_no_check : 2,
234 sk_userlocks : 4,
235 sk_protocol : 8,
236 sk_type : 16;
237 kmemcheck_bitfield_end(flags);
238 int sk_rcvbuf;
239 socket_lock_t sk_lock;
240 /*
241 * The backlog queue is special, it is always used with
242 * the per-socket spinlock held and requires low latency
243 * access. Therefore we special case it's implementation.
244 */
245 struct {
246 struct sk_buff *head;
247 struct sk_buff *tail;
248 } sk_backlog;
249 wait_queue_head_t *sk_sleep;
250 struct dst_entry *sk_dst_cache;
251 #ifdef CONFIG_XFRM
252 struct xfrm_policy *sk_policy[2];
253 #endif
254 rwlock_t sk_dst_lock;
255 atomic_t sk_rmem_alloc;
256 atomic_t sk_wmem_alloc;
257 atomic_t sk_omem_alloc;
258 int sk_sndbuf;
259 struct sk_buff_head sk_receive_queue;
260 struct sk_buff_head sk_write_queue;
261 #ifdef CONFIG_NET_DMA
262 struct sk_buff_head sk_async_wait_queue;
263 #endif
264 int sk_wmem_queued;
265 int sk_forward_alloc;
266 gfp_t sk_allocation;
267 int sk_route_caps;
268 int sk_gso_type;
269 unsigned int sk_gso_max_size;
270 int sk_rcvlowat;
271 unsigned long sk_flags;
272 unsigned long sk_lingertime;
273 struct sk_buff_head sk_error_queue;
274 struct proto *sk_prot_creator;
275 rwlock_t sk_callback_lock;
276 int sk_err,
277 sk_err_soft;
278 atomic_t sk_drops;
279 unsigned short sk_ack_backlog;
280 unsigned short sk_max_ack_backlog;
281 __u32 sk_priority;
282 struct ucred sk_peercred;
283 long sk_rcvtimeo;
284 long sk_sndtimeo;
285 struct sk_filter *sk_filter;
286 void *sk_protinfo;
287 struct timer_list sk_timer;
288 ktime_t sk_stamp;
289 struct socket *sk_socket;
290 void *sk_user_data;
291 struct page *sk_sndmsg_page;
292 struct sk_buff *sk_send_head;
293 __u32 sk_sndmsg_off;
294 int sk_write_pending;
295 #ifdef CONFIG_SECURITY
296 void *sk_security;
297 #endif
298 __u32 sk_mark;
299 /* XXX 4 bytes hole on 64 bit */
300 void (*sk_state_change)(struct sock *sk);
301 void (*sk_data_ready)(struct sock *sk, int bytes);
302 void (*sk_write_space)(struct sock *sk);
303 void (*sk_error_report)(struct sock *sk);
304 int (*sk_backlog_rcv)(struct sock *sk,
305 struct sk_buff *skb);
306 void (*sk_destruct)(struct sock *sk);
307 };
308
309 /*
310 * Hashed lists helper routines
311 */
312 static inline struct sock *__sk_head(const struct hlist_head *head)
313 {
314 return hlist_entry(head->first, struct sock, sk_node);
315 }
316
317 static inline struct sock *sk_head(const struct hlist_head *head)
318 {
319 return hlist_empty(head) ? NULL : __sk_head(head);
320 }
321
322 static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head)
323 {
324 return hlist_nulls_entry(head->first, struct sock, sk_nulls_node);
325 }
326
327 static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head)
328 {
329 return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head);
330 }
331
332 static inline struct sock *sk_next(const struct sock *sk)
333 {
334 return sk->sk_node.next ?
335 hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL;
336 }
337
338 static inline struct sock *sk_nulls_next(const struct sock *sk)
339 {
340 return (!is_a_nulls(sk->sk_nulls_node.next)) ?
341 hlist_nulls_entry(sk->sk_nulls_node.next,
342 struct sock, sk_nulls_node) :
343 NULL;
344 }
345
346 static inline int sk_unhashed(const struct sock *sk)
347 {
348 return hlist_unhashed(&sk->sk_node);
349 }
350
351 static inline int sk_hashed(const struct sock *sk)
352 {
353 return !sk_unhashed(sk);
354 }
355
356 static __inline__ void sk_node_init(struct hlist_node *node)
357 {
358 node->pprev = NULL;
359 }
360
361 static __inline__ void sk_nulls_node_init(struct hlist_nulls_node *node)
362 {
363 node->pprev = NULL;
364 }
365
366 static __inline__ void __sk_del_node(struct sock *sk)
367 {
368 __hlist_del(&sk->sk_node);
369 }
370
371 static __inline__ int __sk_del_node_init(struct sock *sk)
372 {
373 if (sk_hashed(sk)) {
374 __sk_del_node(sk);
375 sk_node_init(&sk->sk_node);
376 return 1;
377 }
378 return 0;
379 }
380
381 /* Grab socket reference count. This operation is valid only
382 when sk is ALREADY grabbed f.e. it is found in hash table
383 or a list and the lookup is made under lock preventing hash table
384 modifications.
385 */
386
387 static inline void sock_hold(struct sock *sk)
388 {
389 atomic_inc(&sk->sk_refcnt);
390 }
391
392 /* Ungrab socket in the context, which assumes that socket refcnt
393 cannot hit zero, f.e. it is true in context of any socketcall.
394 */
395 static inline void __sock_put(struct sock *sk)
396 {
397 atomic_dec(&sk->sk_refcnt);
398 }
399
400 static __inline__ int sk_del_node_init(struct sock *sk)
401 {
402 int rc = __sk_del_node_init(sk);
403
404 if (rc) {
405 /* paranoid for a while -acme */
406 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
407 __sock_put(sk);
408 }
409 return rc;
410 }
411
412 static __inline__ int __sk_nulls_del_node_init_rcu(struct sock *sk)
413 {
414 if (sk_hashed(sk)) {
415 hlist_nulls_del_init_rcu(&sk->sk_nulls_node);
416 return 1;
417 }
418 return 0;
419 }
420
421 static __inline__ int sk_nulls_del_node_init_rcu(struct sock *sk)
422 {
423 int rc = __sk_nulls_del_node_init_rcu(sk);
424
425 if (rc) {
426 /* paranoid for a while -acme */
427 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
428 __sock_put(sk);
429 }
430 return rc;
431 }
432
433 static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list)
434 {
435 hlist_add_head(&sk->sk_node, list);
436 }
437
438 static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list)
439 {
440 sock_hold(sk);
441 __sk_add_node(sk, list);
442 }
443
444 static __inline__ void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
445 {
446 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
447 }
448
449 static __inline__ void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
450 {
451 sock_hold(sk);
452 __sk_nulls_add_node_rcu(sk, list);
453 }
454
455 static __inline__ void __sk_del_bind_node(struct sock *sk)
456 {
457 __hlist_del(&sk->sk_bind_node);
458 }
459
460 static __inline__ void sk_add_bind_node(struct sock *sk,
461 struct hlist_head *list)
462 {
463 hlist_add_head(&sk->sk_bind_node, list);
464 }
465
466 #define sk_for_each(__sk, node, list) \
467 hlist_for_each_entry(__sk, node, list, sk_node)
468 #define sk_nulls_for_each(__sk, node, list) \
469 hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
470 #define sk_nulls_for_each_rcu(__sk, node, list) \
471 hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
472 #define sk_for_each_from(__sk, node) \
473 if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
474 hlist_for_each_entry_from(__sk, node, sk_node)
475 #define sk_nulls_for_each_from(__sk, node) \
476 if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
477 hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
478 #define sk_for_each_continue(__sk, node) \
479 if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
480 hlist_for_each_entry_continue(__sk, node, sk_node)
481 #define sk_for_each_safe(__sk, node, tmp, list) \
482 hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node)
483 #define sk_for_each_bound(__sk, node, list) \
484 hlist_for_each_entry(__sk, node, list, sk_bind_node)
485
486 /* Sock flags */
487 enum sock_flags {
488 SOCK_DEAD,
489 SOCK_DONE,
490 SOCK_URGINLINE,
491 SOCK_KEEPOPEN,
492 SOCK_LINGER,
493 SOCK_DESTROY,
494 SOCK_BROADCAST,
495 SOCK_TIMESTAMP,
496 SOCK_ZAPPED,
497 SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */
498 SOCK_DBG, /* %SO_DEBUG setting */
499 SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
500 SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */
501 SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
502 SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
503 SOCK_TIMESTAMPING_TX_HARDWARE, /* %SOF_TIMESTAMPING_TX_HARDWARE */
504 SOCK_TIMESTAMPING_TX_SOFTWARE, /* %SOF_TIMESTAMPING_TX_SOFTWARE */
505 SOCK_TIMESTAMPING_RX_HARDWARE, /* %SOF_TIMESTAMPING_RX_HARDWARE */
506 SOCK_TIMESTAMPING_RX_SOFTWARE, /* %SOF_TIMESTAMPING_RX_SOFTWARE */
507 SOCK_TIMESTAMPING_SOFTWARE, /* %SOF_TIMESTAMPING_SOFTWARE */
508 SOCK_TIMESTAMPING_RAW_HARDWARE, /* %SOF_TIMESTAMPING_RAW_HARDWARE */
509 SOCK_TIMESTAMPING_SYS_HARDWARE, /* %SOF_TIMESTAMPING_SYS_HARDWARE */
510 SOCK_FASYNC, /* fasync() active */
511 SOCK_RXQ_OVFL,
512 };
513
514 static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
515 {
516 nsk->sk_flags = osk->sk_flags;
517 }
518
519 static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
520 {
521 __set_bit(flag, &sk->sk_flags);
522 }
523
524 static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
525 {
526 __clear_bit(flag, &sk->sk_flags);
527 }
528
529 static inline int sock_flag(struct sock *sk, enum sock_flags flag)
530 {
531 return test_bit(flag, &sk->sk_flags);
532 }
533
534 static inline void sk_acceptq_removed(struct sock *sk)
535 {
536 sk->sk_ack_backlog--;
537 }
538
539 static inline void sk_acceptq_added(struct sock *sk)
540 {
541 sk->sk_ack_backlog++;
542 }
543
544 static inline int sk_acceptq_is_full(struct sock *sk)
545 {
546 return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
547 }
548
549 /*
550 * Compute minimal free write space needed to queue new packets.
551 */
552 static inline int sk_stream_min_wspace(struct sock *sk)
553 {
554 return sk->sk_wmem_queued >> 1;
555 }
556
557 static inline int sk_stream_wspace(struct sock *sk)
558 {
559 return sk->sk_sndbuf - sk->sk_wmem_queued;
560 }
561
562 extern void sk_stream_write_space(struct sock *sk);
563
564 static inline int sk_stream_memory_free(struct sock *sk)
565 {
566 return sk->sk_wmem_queued < sk->sk_sndbuf;
567 }
568
569 /* The per-socket spinlock must be held here. */
570 static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
571 {
572 if (!sk->sk_backlog.tail) {
573 sk->sk_backlog.head = sk->sk_backlog.tail = skb;
574 } else {
575 sk->sk_backlog.tail->next = skb;
576 sk->sk_backlog.tail = skb;
577 }
578 skb->next = NULL;
579 }
580
581 static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
582 {
583 return sk->sk_backlog_rcv(sk, skb);
584 }
585
586 #define sk_wait_event(__sk, __timeo, __condition) \
587 ({ int __rc; \
588 release_sock(__sk); \
589 __rc = __condition; \
590 if (!__rc) { \
591 *(__timeo) = schedule_timeout(*(__timeo)); \
592 } \
593 lock_sock(__sk); \
594 __rc = __condition; \
595 __rc; \
596 })
597
598 extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
599 extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
600 extern void sk_stream_wait_close(struct sock *sk, long timeo_p);
601 extern int sk_stream_error(struct sock *sk, int flags, int err);
602 extern void sk_stream_kill_queues(struct sock *sk);
603
604 extern int sk_wait_data(struct sock *sk, long *timeo);
605
606 struct request_sock_ops;
607 struct timewait_sock_ops;
608 struct inet_hashinfo;
609 struct raw_hashinfo;
610
611 /* Networking protocol blocks we attach to sockets.
612 * socket layer -> transport layer interface
613 * transport -> network interface is defined by struct inet_proto
614 */
615 struct proto {
616 void (*close)(struct sock *sk,
617 long timeout);
618 int (*connect)(struct sock *sk,
619 struct sockaddr *uaddr,
620 int addr_len);
621 int (*disconnect)(struct sock *sk, int flags);
622
623 struct sock * (*accept) (struct sock *sk, int flags, int *err);
624
625 int (*ioctl)(struct sock *sk, int cmd,
626 unsigned long arg);
627 int (*init)(struct sock *sk);
628 void (*destroy)(struct sock *sk);
629 void (*shutdown)(struct sock *sk, int how);
630 int (*setsockopt)(struct sock *sk, int level,
631 int optname, char __user *optval,
632 unsigned int optlen);
633 int (*getsockopt)(struct sock *sk, int level,
634 int optname, char __user *optval,
635 int __user *option);
636 #ifdef CONFIG_COMPAT
637 int (*compat_setsockopt)(struct sock *sk,
638 int level,
639 int optname, char __user *optval,
640 unsigned int optlen);
641 int (*compat_getsockopt)(struct sock *sk,
642 int level,
643 int optname, char __user *optval,
644 int __user *option);
645 #endif
646 int (*sendmsg)(struct kiocb *iocb, struct sock *sk,
647 struct msghdr *msg, size_t len);
648 int (*recvmsg)(struct kiocb *iocb, struct sock *sk,
649 struct msghdr *msg,
650 size_t len, int noblock, int flags,
651 int *addr_len);
652 int (*sendpage)(struct sock *sk, struct page *page,
653 int offset, size_t size, int flags);
654 int (*bind)(struct sock *sk,
655 struct sockaddr *uaddr, int addr_len);
656
657 int (*backlog_rcv) (struct sock *sk,
658 struct sk_buff *skb);
659
660 /* Keeping track of sk's, looking them up, and port selection methods. */
661 void (*hash)(struct sock *sk);
662 void (*unhash)(struct sock *sk);
663 int (*get_port)(struct sock *sk, unsigned short snum);
664
665 /* Keeping track of sockets in use */
666 #ifdef CONFIG_PROC_FS
667 unsigned int inuse_idx;
668 #endif
669
670 /* Memory pressure */
671 void (*enter_memory_pressure)(struct sock *sk);
672 atomic_t *memory_allocated; /* Current allocated memory. */
673 struct percpu_counter *sockets_allocated; /* Current number of sockets. */
674 /*
675 * Pressure flag: try to collapse.
676 * Technical note: it is used by multiple contexts non atomically.
677 * All the __sk_mem_schedule() is of this nature: accounting
678 * is strict, actions are advisory and have some latency.
679 */
680 int *memory_pressure;
681 int *sysctl_mem;
682 int *sysctl_wmem;
683 int *sysctl_rmem;
684 int max_header;
685
686 struct kmem_cache *slab;
687 unsigned int obj_size;
688 int slab_flags;
689
690 struct percpu_counter *orphan_count;
691
692 struct request_sock_ops *rsk_prot;
693 struct timewait_sock_ops *twsk_prot;
694
695 union {
696 struct inet_hashinfo *hashinfo;
697 struct udp_table *udp_table;
698 struct raw_hashinfo *raw_hash;
699 } h;
700
701 struct module *owner;
702
703 char name[32];
704
705 struct list_head node;
706 #ifdef SOCK_REFCNT_DEBUG
707 atomic_t socks;
708 #endif
709 };
710
711 extern int proto_register(struct proto *prot, int alloc_slab);
712 extern void proto_unregister(struct proto *prot);
713
714 #ifdef SOCK_REFCNT_DEBUG
715 static inline void sk_refcnt_debug_inc(struct sock *sk)
716 {
717 atomic_inc(&sk->sk_prot->socks);
718 }
719
720 static inline void sk_refcnt_debug_dec(struct sock *sk)
721 {
722 atomic_dec(&sk->sk_prot->socks);
723 printk(KERN_DEBUG "%s socket %p released, %d are still alive\n",
724 sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
725 }
726
727 static inline void sk_refcnt_debug_release(const struct sock *sk)
728 {
729 if (atomic_read(&sk->sk_refcnt) != 1)
730 printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
731 sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt));
732 }
733 #else /* SOCK_REFCNT_DEBUG */
734 #define sk_refcnt_debug_inc(sk) do { } while (0)
735 #define sk_refcnt_debug_dec(sk) do { } while (0)
736 #define sk_refcnt_debug_release(sk) do { } while (0)
737 #endif /* SOCK_REFCNT_DEBUG */
738
739
740 #ifdef CONFIG_PROC_FS
741 /* Called with local bh disabled */
742 extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
743 extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
744 #else
745 static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
746 int inc)
747 {
748 }
749 #endif
750
751
752 /* With per-bucket locks this operation is not-atomic, so that
753 * this version is not worse.
754 */
755 static inline void __sk_prot_rehash(struct sock *sk)
756 {
757 sk->sk_prot->unhash(sk);
758 sk->sk_prot->hash(sk);
759 }
760
761 /* About 10 seconds */
762 #define SOCK_DESTROY_TIME (10*HZ)
763
764 /* Sockets 0-1023 can't be bound to unless you are superuser */
765 #define PROT_SOCK 1024
766
767 #define SHUTDOWN_MASK 3
768 #define RCV_SHUTDOWN 1
769 #define SEND_SHUTDOWN 2
770
771 #define SOCK_SNDBUF_LOCK 1
772 #define SOCK_RCVBUF_LOCK 2
773 #define SOCK_BINDADDR_LOCK 4
774 #define SOCK_BINDPORT_LOCK 8
775
776 /* sock_iocb: used to kick off async processing of socket ios */
777 struct sock_iocb {
778 struct list_head list;
779
780 int flags;
781 int size;
782 struct socket *sock;
783 struct sock *sk;
784 struct scm_cookie *scm;
785 struct msghdr *msg, async_msg;
786 struct kiocb *kiocb;
787 };
788
789 static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
790 {
791 return (struct sock_iocb *)iocb->private;
792 }
793
794 static inline struct kiocb *siocb_to_kiocb(struct sock_iocb *si)
795 {
796 return si->kiocb;
797 }
798
799 struct socket_alloc {
800 struct socket socket;
801 struct inode vfs_inode;
802 };
803
804 static inline struct socket *SOCKET_I(struct inode *inode)
805 {
806 return &container_of(inode, struct socket_alloc, vfs_inode)->socket;
807 }
808
809 static inline struct inode *SOCK_INODE(struct socket *socket)
810 {
811 return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
812 }
813
814 /*
815 * Functions for memory accounting
816 */
817 extern int __sk_mem_schedule(struct sock *sk, int size, int kind);
818 extern void __sk_mem_reclaim(struct sock *sk);
819
820 #define SK_MEM_QUANTUM ((int)PAGE_SIZE)
821 #define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
822 #define SK_MEM_SEND 0
823 #define SK_MEM_RECV 1
824
825 static inline int sk_mem_pages(int amt)
826 {
827 return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT;
828 }
829
830 static inline int sk_has_account(struct sock *sk)
831 {
832 /* return true if protocol supports memory accounting */
833 return !!sk->sk_prot->memory_allocated;
834 }
835
836 static inline int sk_wmem_schedule(struct sock *sk, int size)
837 {
838 if (!sk_has_account(sk))
839 return 1;
840 return size <= sk->sk_forward_alloc ||
841 __sk_mem_schedule(sk, size, SK_MEM_SEND);
842 }
843
844 static inline int sk_rmem_schedule(struct sock *sk, int size)
845 {
846 if (!sk_has_account(sk))
847 return 1;
848 return size <= sk->sk_forward_alloc ||
849 __sk_mem_schedule(sk, size, SK_MEM_RECV);
850 }
851
852 static inline void sk_mem_reclaim(struct sock *sk)
853 {
854 if (!sk_has_account(sk))
855 return;
856 if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
857 __sk_mem_reclaim(sk);
858 }
859
860 static inline void sk_mem_reclaim_partial(struct sock *sk)
861 {
862 if (!sk_has_account(sk))
863 return;
864 if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
865 __sk_mem_reclaim(sk);
866 }
867
868 static inline void sk_mem_charge(struct sock *sk, int size)
869 {
870 if (!sk_has_account(sk))
871 return;
872 sk->sk_forward_alloc -= size;
873 }
874
875 static inline void sk_mem_uncharge(struct sock *sk, int size)
876 {
877 if (!sk_has_account(sk))
878 return;
879 sk->sk_forward_alloc += size;
880 }
881
882 static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
883 {
884 sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
885 sk->sk_wmem_queued -= skb->truesize;
886 sk_mem_uncharge(sk, skb->truesize);
887 __kfree_skb(skb);
888 }
889
890 /* Used by processes to "lock" a socket state, so that
891 * interrupts and bottom half handlers won't change it
892 * from under us. It essentially blocks any incoming
893 * packets, so that we won't get any new data or any
894 * packets that change the state of the socket.
895 *
896 * While locked, BH processing will add new packets to
897 * the backlog queue. This queue is processed by the
898 * owner of the socket lock right before it is released.
899 *
900 * Since ~2.3.5 it is also exclusive sleep lock serializing
901 * accesses from user process context.
902 */
903 #define sock_owned_by_user(sk) ((sk)->sk_lock.owned)
904
905 /*
906 * Macro so as to not evaluate some arguments when
907 * lockdep is not enabled.
908 *
909 * Mark both the sk_lock and the sk_lock.slock as a
910 * per-address-family lock class.
911 */
912 #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \
913 do { \
914 sk->sk_lock.owned = 0; \
915 init_waitqueue_head(&sk->sk_lock.wq); \
916 spin_lock_init(&(sk)->sk_lock.slock); \
917 debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
918 sizeof((sk)->sk_lock)); \
919 lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
920 (skey), (sname)); \
921 lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
922 } while (0)
923
924 extern void lock_sock_nested(struct sock *sk, int subclass);
925
926 static inline void lock_sock(struct sock *sk)
927 {
928 lock_sock_nested(sk, 0);
929 }
930
931 extern void release_sock(struct sock *sk);
932
933 /* BH context may only use the following locking interface. */
934 #define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock))
935 #define bh_lock_sock_nested(__sk) \
936 spin_lock_nested(&((__sk)->sk_lock.slock), \
937 SINGLE_DEPTH_NESTING)
938 #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
939
940 extern struct sock *sk_alloc(struct net *net, int family,
941 gfp_t priority,
942 struct proto *prot);
943 extern void sk_free(struct sock *sk);
944 extern void sk_release_kernel(struct sock *sk);
945 extern struct sock *sk_clone(const struct sock *sk,
946 const gfp_t priority);
947
948 extern struct sk_buff *sock_wmalloc(struct sock *sk,
949 unsigned long size, int force,
950 gfp_t priority);
951 extern struct sk_buff *sock_rmalloc(struct sock *sk,
952 unsigned long size, int force,
953 gfp_t priority);
954 extern void sock_wfree(struct sk_buff *skb);
955 extern void sock_rfree(struct sk_buff *skb);
956
957 extern int sock_setsockopt(struct socket *sock, int level,
958 int op, char __user *optval,
959 unsigned int optlen);
960
961 extern int sock_getsockopt(struct socket *sock, int level,
962 int op, char __user *optval,
963 int __user *optlen);
964 extern struct sk_buff *sock_alloc_send_skb(struct sock *sk,
965 unsigned long size,
966 int noblock,
967 int *errcode);
968 extern struct sk_buff *sock_alloc_send_pskb(struct sock *sk,
969 unsigned long header_len,
970 unsigned long data_len,
971 int noblock,
972 int *errcode);
973 extern void *sock_kmalloc(struct sock *sk, int size,
974 gfp_t priority);
975 extern void sock_kfree_s(struct sock *sk, void *mem, int size);
976 extern void sk_send_sigurg(struct sock *sk);
977
978 /*
979 * Functions to fill in entries in struct proto_ops when a protocol
980 * does not implement a particular function.
981 */
982 extern int sock_no_bind(struct socket *,
983 struct sockaddr *, int);
984 extern int sock_no_connect(struct socket *,
985 struct sockaddr *, int, int);
986 extern int sock_no_socketpair(struct socket *,
987 struct socket *);
988 extern int sock_no_accept(struct socket *,
989 struct socket *, int);
990 extern int sock_no_getname(struct socket *,
991 struct sockaddr *, int *, int);
992 extern unsigned int sock_no_poll(struct file *, struct socket *,
993 struct poll_table_struct *);
994 extern int sock_no_ioctl(struct socket *, unsigned int,
995 unsigned long);
996 extern int sock_no_listen(struct socket *, int);
997 extern int sock_no_shutdown(struct socket *, int);
998 extern int sock_no_getsockopt(struct socket *, int , int,
999 char __user *, int __user *);
1000 extern int sock_no_setsockopt(struct socket *, int, int,
1001 char __user *, unsigned int);
1002 extern int sock_no_sendmsg(struct kiocb *, struct socket *,
1003 struct msghdr *, size_t);
1004 extern int sock_no_recvmsg(struct kiocb *, struct socket *,
1005 struct msghdr *, size_t, int);
1006 extern int sock_no_mmap(struct file *file,
1007 struct socket *sock,
1008 struct vm_area_struct *vma);
1009 extern ssize_t sock_no_sendpage(struct socket *sock,
1010 struct page *page,
1011 int offset, size_t size,
1012 int flags);
1013
1014 /*
1015 * Functions to fill in entries in struct proto_ops when a protocol
1016 * uses the inet style.
1017 */
1018 extern int sock_common_getsockopt(struct socket *sock, int level, int optname,
1019 char __user *optval, int __user *optlen);
1020 extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
1021 struct msghdr *msg, size_t size, int flags);
1022 extern int sock_common_setsockopt(struct socket *sock, int level, int optname,
1023 char __user *optval, unsigned int optlen);
1024 extern int compat_sock_common_getsockopt(struct socket *sock, int level,
1025 int optname, char __user *optval, int __user *optlen);
1026 extern int compat_sock_common_setsockopt(struct socket *sock, int level,
1027 int optname, char __user *optval, unsigned int optlen);
1028
1029 extern void sk_common_release(struct sock *sk);
1030
1031 /*
1032 * Default socket callbacks and setup code
1033 */
1034
1035 /* Initialise core socket variables */
1036 extern void sock_init_data(struct socket *sock, struct sock *sk);
1037
1038 /**
1039 * sk_filter_release: Release a socket filter
1040 * @fp: filter to remove
1041 *
1042 * Remove a filter from a socket and release its resources.
1043 */
1044
1045 static inline void sk_filter_release(struct sk_filter *fp)
1046 {
1047 if (atomic_dec_and_test(&fp->refcnt))
1048 kfree(fp);
1049 }
1050
1051 static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
1052 {
1053 unsigned int size = sk_filter_len(fp);
1054
1055 atomic_sub(size, &sk->sk_omem_alloc);
1056 sk_filter_release(fp);
1057 }
1058
1059 static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
1060 {
1061 atomic_inc(&fp->refcnt);
1062 atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc);
1063 }
1064
1065 /*
1066 * Socket reference counting postulates.
1067 *
1068 * * Each user of socket SHOULD hold a reference count.
1069 * * Each access point to socket (an hash table bucket, reference from a list,
1070 * running timer, skb in flight MUST hold a reference count.
1071 * * When reference count hits 0, it means it will never increase back.
1072 * * When reference count hits 0, it means that no references from
1073 * outside exist to this socket and current process on current CPU
1074 * is last user and may/should destroy this socket.
1075 * * sk_free is called from any context: process, BH, IRQ. When
1076 * it is called, socket has no references from outside -> sk_free
1077 * may release descendant resources allocated by the socket, but
1078 * to the time when it is called, socket is NOT referenced by any
1079 * hash tables, lists etc.
1080 * * Packets, delivered from outside (from network or from another process)
1081 * and enqueued on receive/error queues SHOULD NOT grab reference count,
1082 * when they sit in queue. Otherwise, packets will leak to hole, when
1083 * socket is looked up by one cpu and unhasing is made by another CPU.
1084 * It is true for udp/raw, netlink (leak to receive and error queues), tcp
1085 * (leak to backlog). Packet socket does all the processing inside
1086 * BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets
1087 * use separate SMP lock, so that they are prone too.
1088 */
1089
1090 /* Ungrab socket and destroy it, if it was the last reference. */
1091 static inline void sock_put(struct sock *sk)
1092 {
1093 if (atomic_dec_and_test(&sk->sk_refcnt))
1094 sk_free(sk);
1095 }
1096
1097 extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
1098 const int nested);
1099
1100 static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
1101 {
1102 sk->sk_tx_queue_mapping = tx_queue;
1103 }
1104
1105 static inline void sk_tx_queue_clear(struct sock *sk)
1106 {
1107 sk->sk_tx_queue_mapping = -1;
1108 }
1109
1110 static inline int sk_tx_queue_get(const struct sock *sk)
1111 {
1112 return sk->sk_tx_queue_mapping;
1113 }
1114
1115 static inline bool sk_tx_queue_recorded(const struct sock *sk)
1116 {
1117 return (sk && sk->sk_tx_queue_mapping >= 0);
1118 }
1119
1120 static inline void sk_set_socket(struct sock *sk, struct socket *sock)
1121 {
1122 sk_tx_queue_clear(sk);
1123 sk->sk_socket = sock;
1124 }
1125
1126 /* Detach socket from process context.
1127 * Announce socket dead, detach it from wait queue and inode.
1128 * Note that parent inode held reference count on this struct sock,
1129 * we do not release it in this function, because protocol
1130 * probably wants some additional cleanups or even continuing
1131 * to work with this socket (TCP).
1132 */
1133 static inline void sock_orphan(struct sock *sk)
1134 {
1135 write_lock_bh(&sk->sk_callback_lock);
1136 sock_set_flag(sk, SOCK_DEAD);
1137 sk_set_socket(sk, NULL);
1138 sk->sk_sleep = NULL;
1139 write_unlock_bh(&sk->sk_callback_lock);
1140 }
1141
1142 static inline void sock_graft(struct sock *sk, struct socket *parent)
1143 {
1144 write_lock_bh(&sk->sk_callback_lock);
1145 sk->sk_sleep = &parent->wait;
1146 parent->sk = sk;
1147 sk_set_socket(sk, parent);
1148 security_sock_graft(sk, parent);
1149 write_unlock_bh(&sk->sk_callback_lock);
1150 }
1151
1152 extern int sock_i_uid(struct sock *sk);
1153 extern unsigned long sock_i_ino(struct sock *sk);
1154
1155 static inline struct dst_entry *
1156 __sk_dst_get(struct sock *sk)
1157 {
1158 return sk->sk_dst_cache;
1159 }
1160
1161 static inline struct dst_entry *
1162 sk_dst_get(struct sock *sk)
1163 {
1164 struct dst_entry *dst;
1165
1166 read_lock(&sk->sk_dst_lock);
1167 dst = sk->sk_dst_cache;
1168 if (dst)
1169 dst_hold(dst);
1170 read_unlock(&sk->sk_dst_lock);
1171 return dst;
1172 }
1173
1174 static inline void
1175 __sk_dst_set(struct sock *sk, struct dst_entry *dst)
1176 {
1177 struct dst_entry *old_dst;
1178
1179 sk_tx_queue_clear(sk);
1180 old_dst = sk->sk_dst_cache;
1181 sk->sk_dst_cache = dst;
1182 dst_release(old_dst);
1183 }
1184
1185 static inline void
1186 sk_dst_set(struct sock *sk, struct dst_entry *dst)
1187 {
1188 write_lock(&sk->sk_dst_lock);
1189 __sk_dst_set(sk, dst);
1190 write_unlock(&sk->sk_dst_lock);
1191 }
1192
1193 static inline void
1194 __sk_dst_reset(struct sock *sk)
1195 {
1196 struct dst_entry *old_dst;
1197
1198 sk_tx_queue_clear(sk);
1199 old_dst = sk->sk_dst_cache;
1200 sk->sk_dst_cache = NULL;
1201 dst_release(old_dst);
1202 }
1203
1204 static inline void
1205 sk_dst_reset(struct sock *sk)
1206 {
1207 write_lock(&sk->sk_dst_lock);
1208 __sk_dst_reset(sk);
1209 write_unlock(&sk->sk_dst_lock);
1210 }
1211
1212 extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
1213
1214 extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
1215
1216 static inline int sk_can_gso(const struct sock *sk)
1217 {
1218 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
1219 }
1220
1221 extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
1222
1223 static inline int skb_copy_to_page(struct sock *sk, char __user *from,
1224 struct sk_buff *skb, struct page *page,
1225 int off, int copy)
1226 {
1227 if (skb->ip_summed == CHECKSUM_NONE) {
1228 int err = 0;
1229 __wsum csum = csum_and_copy_from_user(from,
1230 page_address(page) + off,
1231 copy, 0, &err);
1232 if (err)
1233 return err;
1234 skb->csum = csum_block_add(skb->csum, csum, skb->len);
1235 } else if (copy_from_user(page_address(page) + off, from, copy))
1236 return -EFAULT;
1237
1238 skb->len += copy;
1239 skb->data_len += copy;
1240 skb->truesize += copy;
1241 sk->sk_wmem_queued += copy;
1242 sk_mem_charge(sk, copy);
1243 return 0;
1244 }
1245
1246 /**
1247 * sk_wmem_alloc_get - returns write allocations
1248 * @sk: socket
1249 *
1250 * Returns sk_wmem_alloc minus initial offset of one
1251 */
1252 static inline int sk_wmem_alloc_get(const struct sock *sk)
1253 {
1254 return atomic_read(&sk->sk_wmem_alloc) - 1;
1255 }
1256
1257 /**
1258 * sk_rmem_alloc_get - returns read allocations
1259 * @sk: socket
1260 *
1261 * Returns sk_rmem_alloc
1262 */
1263 static inline int sk_rmem_alloc_get(const struct sock *sk)
1264 {
1265 return atomic_read(&sk->sk_rmem_alloc);
1266 }
1267
1268 /**
1269 * sk_has_allocations - check if allocations are outstanding
1270 * @sk: socket
1271 *
1272 * Returns true if socket has write or read allocations
1273 */
1274 static inline int sk_has_allocations(const struct sock *sk)
1275 {
1276 return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk);
1277 }
1278
1279 /**
1280 * sk_has_sleeper - check if there are any waiting processes
1281 * @sk: socket
1282 *
1283 * Returns true if socket has waiting processes
1284 *
1285 * The purpose of the sk_has_sleeper and sock_poll_wait is to wrap the memory
1286 * barrier call. They were added due to the race found within the tcp code.
1287 *
1288 * Consider following tcp code paths:
1289 *
1290 * CPU1 CPU2
1291 *
1292 * sys_select receive packet
1293 * ... ...
1294 * __add_wait_queue update tp->rcv_nxt
1295 * ... ...
1296 * tp->rcv_nxt check sock_def_readable
1297 * ... {
1298 * schedule ...
1299 * if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1300 * wake_up_interruptible(sk->sk_sleep)
1301 * ...
1302 * }
1303 *
1304 * The race for tcp fires when the __add_wait_queue changes done by CPU1 stay
1305 * in its cache, and so does the tp->rcv_nxt update on CPU2 side. The CPU1
1306 * could then endup calling schedule and sleep forever if there are no more
1307 * data on the socket.
1308 *
1309 * The sk_has_sleeper is always called right after a call to read_lock, so we
1310 * can use smp_mb__after_lock barrier.
1311 */
1312 static inline int sk_has_sleeper(struct sock *sk)
1313 {
1314 /*
1315 * We need to be sure we are in sync with the
1316 * add_wait_queue modifications to the wait queue.
1317 *
1318 * This memory barrier is paired in the sock_poll_wait.
1319 */
1320 smp_mb__after_lock();
1321 return sk->sk_sleep && waitqueue_active(sk->sk_sleep);
1322 }
1323
1324 /**
1325 * sock_poll_wait - place memory barrier behind the poll_wait call.
1326 * @filp: file
1327 * @wait_address: socket wait queue
1328 * @p: poll_table
1329 *
1330 * See the comments in the sk_has_sleeper function.
1331 */
1332 static inline void sock_poll_wait(struct file *filp,
1333 wait_queue_head_t *wait_address, poll_table *p)
1334 {
1335 if (p && wait_address) {
1336 poll_wait(filp, wait_address, p);
1337 /*
1338 * We need to be sure we are in sync with the
1339 * socket flags modification.
1340 *
1341 * This memory barrier is paired in the sk_has_sleeper.
1342 */
1343 smp_mb();
1344 }
1345 }
1346
1347 /*
1348 * Queue a received datagram if it will fit. Stream and sequenced
1349 * protocols can't normally use this as they need to fit buffers in
1350 * and play with them.
1351 *
1352 * Inlined as it's very short and called for pretty much every
1353 * packet ever received.
1354 */
1355
1356 static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1357 {
1358 skb_orphan(skb);
1359 skb->sk = sk;
1360 skb->destructor = sock_wfree;
1361 /*
1362 * We used to take a refcount on sk, but following operation
1363 * is enough to guarantee sk_free() wont free this sock until
1364 * all in-flight packets are completed
1365 */
1366 atomic_add(skb->truesize, &sk->sk_wmem_alloc);
1367 }
1368
1369 static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
1370 {
1371 skb_orphan(skb);
1372 skb->sk = sk;
1373 skb->destructor = sock_rfree;
1374 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
1375 sk_mem_charge(sk, skb->truesize);
1376 }
1377
1378 extern void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1379 unsigned long expires);
1380
1381 extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);
1382
1383 extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
1384
1385 static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
1386 {
1387 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
1388 number of warnings when compiling with -W --ANK
1389 */
1390 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
1391 (unsigned)sk->sk_rcvbuf)
1392 return -ENOMEM;
1393 skb_set_owner_r(skb, sk);
1394 skb_queue_tail(&sk->sk_error_queue, skb);
1395 if (!sock_flag(sk, SOCK_DEAD))
1396 sk->sk_data_ready(sk, skb->len);
1397 return 0;
1398 }
1399
1400 /*
1401 * Recover an error report and clear atomically
1402 */
1403
1404 static inline int sock_error(struct sock *sk)
1405 {
1406 int err;
1407 if (likely(!sk->sk_err))
1408 return 0;
1409 err = xchg(&sk->sk_err, 0);
1410 return -err;
1411 }
1412
1413 static inline unsigned long sock_wspace(struct sock *sk)
1414 {
1415 int amt = 0;
1416
1417 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
1418 amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
1419 if (amt < 0)
1420 amt = 0;
1421 }
1422 return amt;
1423 }
1424
1425 static inline void sk_wake_async(struct sock *sk, int how, int band)
1426 {
1427 if (sock_flag(sk, SOCK_FASYNC))
1428 sock_wake_async(sk->sk_socket, how, band);
1429 }
1430
1431 #define SOCK_MIN_SNDBUF 2048
1432 #define SOCK_MIN_RCVBUF 256
1433
1434 static inline void sk_stream_moderate_sndbuf(struct sock *sk)
1435 {
1436 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
1437 sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
1438 sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF);
1439 }
1440 }
1441
1442 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
1443
1444 static inline struct page *sk_stream_alloc_page(struct sock *sk)
1445 {
1446 struct page *page = NULL;
1447
1448 page = alloc_pages(sk->sk_allocation, 0);
1449 if (!page) {
1450 sk->sk_prot->enter_memory_pressure(sk);
1451 sk_stream_moderate_sndbuf(sk);
1452 }
1453 return page;
1454 }
1455
1456 /*
1457 * Default write policy as shown to user space via poll/select/SIGIO
1458 */
1459 static inline int sock_writeable(const struct sock *sk)
1460 {
1461 return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1);
1462 }
1463
1464 static inline gfp_t gfp_any(void)
1465 {
1466 return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
1467 }
1468
1469 static inline long sock_rcvtimeo(const struct sock *sk, int noblock)
1470 {
1471 return noblock ? 0 : sk->sk_rcvtimeo;
1472 }
1473
1474 static inline long sock_sndtimeo(const struct sock *sk, int noblock)
1475 {
1476 return noblock ? 0 : sk->sk_sndtimeo;
1477 }
1478
1479 static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
1480 {
1481 return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1;
1482 }
1483
1484 /* Alas, with timeout socket operations are not restartable.
1485 * Compare this to poll().
1486 */
1487 static inline int sock_intr_errno(long timeo)
1488 {
1489 return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
1490 }
1491
1492 extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
1493 struct sk_buff *skb);
1494
1495 static __inline__ void
1496 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
1497 {
1498 ktime_t kt = skb->tstamp;
1499 struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
1500
1501 /*
1502 * generate control messages if
1503 * - receive time stamping in software requested (SOCK_RCVTSTAMP
1504 * or SOCK_TIMESTAMPING_RX_SOFTWARE)
1505 * - software time stamp available and wanted
1506 * (SOCK_TIMESTAMPING_SOFTWARE)
1507 * - hardware time stamps available and wanted
1508 * (SOCK_TIMESTAMPING_SYS_HARDWARE or
1509 * SOCK_TIMESTAMPING_RAW_HARDWARE)
1510 */
1511 if (sock_flag(sk, SOCK_RCVTSTAMP) ||
1512 sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE) ||
1513 (kt.tv64 && sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE)) ||
1514 (hwtstamps->hwtstamp.tv64 &&
1515 sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE)) ||
1516 (hwtstamps->syststamp.tv64 &&
1517 sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE)))
1518 __sock_recv_timestamp(msg, sk, skb);
1519 else
1520 sk->sk_stamp = kt;
1521 }
1522
1523 extern void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb);
1524
1525 /**
1526 * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
1527 * @msg: outgoing packet
1528 * @sk: socket sending this packet
1529 * @shtx: filled with instructions for time stamping
1530 *
1531 * Currently only depends on SOCK_TIMESTAMPING* flags. Returns error code if
1532 * parameters are invalid.
1533 */
1534 extern int sock_tx_timestamp(struct msghdr *msg,
1535 struct sock *sk,
1536 union skb_shared_tx *shtx);
1537
1538
1539 /**
1540 * sk_eat_skb - Release a skb if it is no longer needed
1541 * @sk: socket to eat this skb from
1542 * @skb: socket buffer to eat
1543 * @copied_early: flag indicating whether DMA operations copied this data early
1544 *
1545 * This routine must be called with interrupts disabled or with the socket
1546 * locked so that the sk_buff queue operation is ok.
1547 */
1548 #ifdef CONFIG_NET_DMA
1549 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
1550 {
1551 __skb_unlink(skb, &sk->sk_receive_queue);
1552 if (!copied_early)
1553 __kfree_skb(skb);
1554 else
1555 __skb_queue_tail(&sk->sk_async_wait_queue, skb);
1556 }
1557 #else
1558 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
1559 {
1560 __skb_unlink(skb, &sk->sk_receive_queue);
1561 __kfree_skb(skb);
1562 }
1563 #endif
1564
1565 static inline
1566 struct net *sock_net(const struct sock *sk)
1567 {
1568 #ifdef CONFIG_NET_NS
1569 return sk->sk_net;
1570 #else
1571 return &init_net;
1572 #endif
1573 }
1574
1575 static inline
1576 void sock_net_set(struct sock *sk, struct net *net)
1577 {
1578 #ifdef CONFIG_NET_NS
1579 sk->sk_net = net;
1580 #endif
1581 }
1582
1583 /*
1584 * Kernel sockets, f.e. rtnl or icmp_socket, are a part of a namespace.
1585 * They should not hold a referrence to a namespace in order to allow
1586 * to stop it.
1587 * Sockets after sk_change_net should be released using sk_release_kernel
1588 */
1589 static inline void sk_change_net(struct sock *sk, struct net *net)
1590 {
1591 put_net(sock_net(sk));
1592 sock_net_set(sk, hold_net(net));
1593 }
1594
1595 static inline struct sock *skb_steal_sock(struct sk_buff *skb)
1596 {
1597 if (unlikely(skb->sk)) {
1598 struct sock *sk = skb->sk;
1599
1600 skb->destructor = NULL;
1601 skb->sk = NULL;
1602 return sk;
1603 }
1604 return NULL;
1605 }
1606
1607 extern void sock_enable_timestamp(struct sock *sk, int flag);
1608 extern int sock_get_timestamp(struct sock *, struct timeval __user *);
1609 extern int sock_get_timestampns(struct sock *, struct timespec __user *);
1610
1611 /*
1612 * Enable debug/info messages
1613 */
1614 extern int net_msg_warn;
1615 #define NETDEBUG(fmt, args...) \
1616 do { if (net_msg_warn) printk(fmt,##args); } while (0)
1617
1618 #define LIMIT_NETDEBUG(fmt, args...) \
1619 do { if (net_msg_warn && net_ratelimit()) printk(fmt,##args); } while(0)
1620
1621 extern __u32 sysctl_wmem_max;
1622 extern __u32 sysctl_rmem_max;
1623
1624 extern void sk_init(void);
1625
1626 extern int sysctl_optmem_max;
1627
1628 extern __u32 sysctl_wmem_default;
1629 extern __u32 sysctl_rmem_default;
1630
1631 #endif /* _SOCK_H */
This page took 0.08143 seconds and 5 git commands to generate.