Merge git://git.kernel.org/pub/scm/linux/kernel/git/wim/linux-2.6-watchdog
[deliverable/linux.git] / include / net / sock.h
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the AF_INET socket handler.
7 *
8 * Version: @(#)sock.h 1.0.4 05/13/93
9 *
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Corey Minyard <wf-rch!minyard@relay.EU.net>
13 * Florian La Roche <flla@stud.uni-sb.de>
14 *
15 * Fixes:
16 * Alan Cox : Volatiles in skbuff pointers. See
17 * skbuff comments. May be overdone,
18 * better to prove they can be removed
19 * than the reverse.
20 * Alan Cox : Added a zapped field for tcp to note
21 * a socket is reset and must stay shut up
22 * Alan Cox : New fields for options
23 * Pauline Middelink : identd support
24 * Alan Cox : Eliminate low level recv/recvfrom
25 * David S. Miller : New socket lookup architecture.
26 * Steve Whitehouse: Default routines for sock_ops
27 * Arnaldo C. Melo : removed net_pinfo, tp_pinfo and made
28 * protinfo be just a void pointer, as the
29 * protocol specific parts were moved to
30 * respective headers and ipv4/v6, etc now
31 * use private slabcaches for its socks
32 * Pedro Hortas : New flags field for socket options
33 *
34 *
35 * This program is free software; you can redistribute it and/or
36 * modify it under the terms of the GNU General Public License
37 * as published by the Free Software Foundation; either version
38 * 2 of the License, or (at your option) any later version.
39 */
40 #ifndef _SOCK_H
41 #define _SOCK_H
42
43 #include <linux/kernel.h>
44 #include <linux/list.h>
45 #include <linux/list_nulls.h>
46 #include <linux/timer.h>
47 #include <linux/cache.h>
48 #include <linux/module.h>
49 #include <linux/lockdep.h>
50 #include <linux/netdevice.h>
51 #include <linux/skbuff.h> /* struct sk_buff */
52 #include <linux/mm.h>
53 #include <linux/security.h>
54 #include <linux/slab.h>
55
56 #include <linux/filter.h>
57 #include <linux/rculist_nulls.h>
58 #include <linux/poll.h>
59
60 #include <asm/atomic.h>
61 #include <net/dst.h>
62 #include <net/checksum.h>
63
64 /*
65 * This structure really needs to be cleaned up.
66 * Most of it is for TCP, and not used by any of
67 * the other protocols.
68 */
69
70 /* Define this to get the SOCK_DBG debugging facility. */
71 #define SOCK_DEBUGGING
72 #ifdef SOCK_DEBUGGING
73 #define SOCK_DEBUG(sk, msg...) do { if ((sk) && sock_flag((sk), SOCK_DBG)) \
74 printk(KERN_DEBUG msg); } while (0)
75 #else
76 /* Validate arguments and do nothing */
77 static void inline int __attribute__ ((format (printf, 2, 3)))
78 SOCK_DEBUG(struct sock *sk, const char *msg, ...)
79 {
80 }
81 #endif
82
83 /* This is the per-socket lock. The spinlock provides a synchronization
84 * between user contexts and software interrupt processing, whereas the
85 * mini-semaphore synchronizes multiple users amongst themselves.
86 */
87 typedef struct {
88 spinlock_t slock;
89 int owned;
90 wait_queue_head_t wq;
91 /*
92 * We express the mutex-alike socket_lock semantics
93 * to the lock validator by explicitly managing
94 * the slock as a lock variant (in addition to
95 * the slock itself):
96 */
97 #ifdef CONFIG_DEBUG_LOCK_ALLOC
98 struct lockdep_map dep_map;
99 #endif
100 } socket_lock_t;
101
102 struct sock;
103 struct proto;
104 struct net;
105
106 /**
107 * struct sock_common - minimal network layer representation of sockets
108 * @skc_node: main hash linkage for various protocol lookup tables
109 * @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
110 * @skc_refcnt: reference count
111 * @skc_tx_queue_mapping: tx queue number for this connection
112 * @skc_hash: hash value used with various protocol lookup tables
113 * @skc_u16hashes: two u16 hash values used by UDP lookup tables
114 * @skc_family: network address family
115 * @skc_state: Connection state
116 * @skc_reuse: %SO_REUSEADDR setting
117 * @skc_bound_dev_if: bound device index if != 0
118 * @skc_bind_node: bind hash linkage for various protocol lookup tables
119 * @skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol
120 * @skc_prot: protocol handlers inside a network family
121 * @skc_net: reference to the network namespace of this socket
122 *
123 * This is the minimal network layer representation of sockets, the header
124 * for struct sock and struct inet_timewait_sock.
125 */
126 struct sock_common {
127 /*
128 * first fields are not copied in sock_copy()
129 */
130 union {
131 struct hlist_node skc_node;
132 struct hlist_nulls_node skc_nulls_node;
133 };
134 atomic_t skc_refcnt;
135 int skc_tx_queue_mapping;
136
137 union {
138 unsigned int skc_hash;
139 __u16 skc_u16hashes[2];
140 };
141 unsigned short skc_family;
142 volatile unsigned char skc_state;
143 unsigned char skc_reuse;
144 int skc_bound_dev_if;
145 union {
146 struct hlist_node skc_bind_node;
147 struct hlist_nulls_node skc_portaddr_node;
148 };
149 struct proto *skc_prot;
150 #ifdef CONFIG_NET_NS
151 struct net *skc_net;
152 #endif
153 };
154
155 /**
156 * struct sock - network layer representation of sockets
157 * @__sk_common: shared layout with inet_timewait_sock
158 * @sk_shutdown: mask of %SEND_SHUTDOWN and/or %RCV_SHUTDOWN
159 * @sk_userlocks: %SO_SNDBUF and %SO_RCVBUF settings
160 * @sk_lock: synchronizer
161 * @sk_rcvbuf: size of receive buffer in bytes
162 * @sk_sleep: sock wait queue
163 * @sk_dst_cache: destination cache
164 * @sk_dst_lock: destination cache lock
165 * @sk_policy: flow policy
166 * @sk_rmem_alloc: receive queue bytes committed
167 * @sk_receive_queue: incoming packets
168 * @sk_wmem_alloc: transmit queue bytes committed
169 * @sk_write_queue: Packet sending queue
170 * @sk_async_wait_queue: DMA copied packets
171 * @sk_omem_alloc: "o" is "option" or "other"
172 * @sk_wmem_queued: persistent queue size
173 * @sk_forward_alloc: space allocated forward
174 * @sk_allocation: allocation mode
175 * @sk_sndbuf: size of send buffer in bytes
176 * @sk_flags: %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
177 * %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
178 * @sk_no_check: %SO_NO_CHECK setting, wether or not checkup packets
179 * @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
180 * @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
181 * @sk_gso_max_size: Maximum GSO segment size to build
182 * @sk_lingertime: %SO_LINGER l_linger setting
183 * @sk_backlog: always used with the per-socket spinlock held
184 * @sk_callback_lock: used with the callbacks in the end of this struct
185 * @sk_error_queue: rarely used
186 * @sk_prot_creator: sk_prot of original sock creator (see ipv6_setsockopt,
187 * IPV6_ADDRFORM for instance)
188 * @sk_err: last error
189 * @sk_err_soft: errors that don't cause failure but are the cause of a
190 * persistent failure not just 'timed out'
191 * @sk_drops: raw/udp drops counter
192 * @sk_ack_backlog: current listen backlog
193 * @sk_max_ack_backlog: listen backlog set in listen()
194 * @sk_priority: %SO_PRIORITY setting
195 * @sk_type: socket type (%SOCK_STREAM, etc)
196 * @sk_protocol: which protocol this socket belongs in this network family
197 * @sk_peercred: %SO_PEERCRED setting
198 * @sk_rcvlowat: %SO_RCVLOWAT setting
199 * @sk_rcvtimeo: %SO_RCVTIMEO setting
200 * @sk_sndtimeo: %SO_SNDTIMEO setting
201 * @sk_filter: socket filtering instructions
202 * @sk_protinfo: private area, net family specific, when not using slab
203 * @sk_timer: sock cleanup timer
204 * @sk_stamp: time stamp of last packet received
205 * @sk_socket: Identd and reporting IO signals
206 * @sk_user_data: RPC layer private data
207 * @sk_sndmsg_page: cached page for sendmsg
208 * @sk_sndmsg_off: cached offset for sendmsg
209 * @sk_send_head: front of stuff to transmit
210 * @sk_security: used by security modules
211 * @sk_mark: generic packet mark
212 * @sk_write_pending: a write to stream socket waits to start
213 * @sk_state_change: callback to indicate change in the state of the sock
214 * @sk_data_ready: callback to indicate there is data to be processed
215 * @sk_write_space: callback to indicate there is bf sending space available
216 * @sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE)
217 * @sk_backlog_rcv: callback to process the backlog
218 * @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
219 */
220 struct sock {
221 /*
222 * Now struct inet_timewait_sock also uses sock_common, so please just
223 * don't add nothing before this first member (__sk_common) --acme
224 */
225 struct sock_common __sk_common;
226 #define sk_node __sk_common.skc_node
227 #define sk_nulls_node __sk_common.skc_nulls_node
228 #define sk_refcnt __sk_common.skc_refcnt
229 #define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping
230
231 #define sk_copy_start __sk_common.skc_hash
232 #define sk_hash __sk_common.skc_hash
233 #define sk_family __sk_common.skc_family
234 #define sk_state __sk_common.skc_state
235 #define sk_reuse __sk_common.skc_reuse
236 #define sk_bound_dev_if __sk_common.skc_bound_dev_if
237 #define sk_bind_node __sk_common.skc_bind_node
238 #define sk_prot __sk_common.skc_prot
239 #define sk_net __sk_common.skc_net
240 kmemcheck_bitfield_begin(flags);
241 unsigned int sk_shutdown : 2,
242 sk_no_check : 2,
243 sk_userlocks : 4,
244 sk_protocol : 8,
245 sk_type : 16;
246 kmemcheck_bitfield_end(flags);
247 int sk_rcvbuf;
248 socket_lock_t sk_lock;
249 /*
250 * The backlog queue is special, it is always used with
251 * the per-socket spinlock held and requires low latency
252 * access. Therefore we special case it's implementation.
253 */
254 struct {
255 struct sk_buff *head;
256 struct sk_buff *tail;
257 int len;
258 int limit;
259 } sk_backlog;
260 wait_queue_head_t *sk_sleep;
261 struct dst_entry *sk_dst_cache;
262 #ifdef CONFIG_XFRM
263 struct xfrm_policy *sk_policy[2];
264 #endif
265 rwlock_t sk_dst_lock;
266 atomic_t sk_rmem_alloc;
267 atomic_t sk_wmem_alloc;
268 atomic_t sk_omem_alloc;
269 int sk_sndbuf;
270 struct sk_buff_head sk_receive_queue;
271 struct sk_buff_head sk_write_queue;
272 #ifdef CONFIG_NET_DMA
273 struct sk_buff_head sk_async_wait_queue;
274 #endif
275 int sk_wmem_queued;
276 int sk_forward_alloc;
277 gfp_t sk_allocation;
278 int sk_route_caps;
279 int sk_gso_type;
280 unsigned int sk_gso_max_size;
281 int sk_rcvlowat;
282 unsigned long sk_flags;
283 unsigned long sk_lingertime;
284 struct sk_buff_head sk_error_queue;
285 struct proto *sk_prot_creator;
286 rwlock_t sk_callback_lock;
287 int sk_err,
288 sk_err_soft;
289 atomic_t sk_drops;
290 unsigned short sk_ack_backlog;
291 unsigned short sk_max_ack_backlog;
292 __u32 sk_priority;
293 struct ucred sk_peercred;
294 long sk_rcvtimeo;
295 long sk_sndtimeo;
296 struct sk_filter *sk_filter;
297 void *sk_protinfo;
298 struct timer_list sk_timer;
299 ktime_t sk_stamp;
300 struct socket *sk_socket;
301 void *sk_user_data;
302 struct page *sk_sndmsg_page;
303 struct sk_buff *sk_send_head;
304 __u32 sk_sndmsg_off;
305 int sk_write_pending;
306 #ifdef CONFIG_SECURITY
307 void *sk_security;
308 #endif
309 __u32 sk_mark;
310 /* XXX 4 bytes hole on 64 bit */
311 void (*sk_state_change)(struct sock *sk);
312 void (*sk_data_ready)(struct sock *sk, int bytes);
313 void (*sk_write_space)(struct sock *sk);
314 void (*sk_error_report)(struct sock *sk);
315 int (*sk_backlog_rcv)(struct sock *sk,
316 struct sk_buff *skb);
317 void (*sk_destruct)(struct sock *sk);
318 };
319
320 /*
321 * Hashed lists helper routines
322 */
323 static inline struct sock *sk_entry(const struct hlist_node *node)
324 {
325 return hlist_entry(node, struct sock, sk_node);
326 }
327
328 static inline struct sock *__sk_head(const struct hlist_head *head)
329 {
330 return hlist_entry(head->first, struct sock, sk_node);
331 }
332
333 static inline struct sock *sk_head(const struct hlist_head *head)
334 {
335 return hlist_empty(head) ? NULL : __sk_head(head);
336 }
337
338 static inline struct sock *__sk_nulls_head(const struct hlist_nulls_head *head)
339 {
340 return hlist_nulls_entry(head->first, struct sock, sk_nulls_node);
341 }
342
343 static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head)
344 {
345 return hlist_nulls_empty(head) ? NULL : __sk_nulls_head(head);
346 }
347
348 static inline struct sock *sk_next(const struct sock *sk)
349 {
350 return sk->sk_node.next ?
351 hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL;
352 }
353
354 static inline struct sock *sk_nulls_next(const struct sock *sk)
355 {
356 return (!is_a_nulls(sk->sk_nulls_node.next)) ?
357 hlist_nulls_entry(sk->sk_nulls_node.next,
358 struct sock, sk_nulls_node) :
359 NULL;
360 }
361
362 static inline int sk_unhashed(const struct sock *sk)
363 {
364 return hlist_unhashed(&sk->sk_node);
365 }
366
367 static inline int sk_hashed(const struct sock *sk)
368 {
369 return !sk_unhashed(sk);
370 }
371
372 static __inline__ void sk_node_init(struct hlist_node *node)
373 {
374 node->pprev = NULL;
375 }
376
377 static __inline__ void sk_nulls_node_init(struct hlist_nulls_node *node)
378 {
379 node->pprev = NULL;
380 }
381
382 static __inline__ void __sk_del_node(struct sock *sk)
383 {
384 __hlist_del(&sk->sk_node);
385 }
386
387 /* NB: equivalent to hlist_del_init_rcu */
388 static __inline__ int __sk_del_node_init(struct sock *sk)
389 {
390 if (sk_hashed(sk)) {
391 __sk_del_node(sk);
392 sk_node_init(&sk->sk_node);
393 return 1;
394 }
395 return 0;
396 }
397
398 /* Grab socket reference count. This operation is valid only
399 when sk is ALREADY grabbed f.e. it is found in hash table
400 or a list and the lookup is made under lock preventing hash table
401 modifications.
402 */
403
404 static inline void sock_hold(struct sock *sk)
405 {
406 atomic_inc(&sk->sk_refcnt);
407 }
408
409 /* Ungrab socket in the context, which assumes that socket refcnt
410 cannot hit zero, f.e. it is true in context of any socketcall.
411 */
412 static inline void __sock_put(struct sock *sk)
413 {
414 atomic_dec(&sk->sk_refcnt);
415 }
416
417 static __inline__ int sk_del_node_init(struct sock *sk)
418 {
419 int rc = __sk_del_node_init(sk);
420
421 if (rc) {
422 /* paranoid for a while -acme */
423 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
424 __sock_put(sk);
425 }
426 return rc;
427 }
428 #define sk_del_node_init_rcu(sk) sk_del_node_init(sk)
429
430 static __inline__ int __sk_nulls_del_node_init_rcu(struct sock *sk)
431 {
432 if (sk_hashed(sk)) {
433 hlist_nulls_del_init_rcu(&sk->sk_nulls_node);
434 return 1;
435 }
436 return 0;
437 }
438
439 static __inline__ int sk_nulls_del_node_init_rcu(struct sock *sk)
440 {
441 int rc = __sk_nulls_del_node_init_rcu(sk);
442
443 if (rc) {
444 /* paranoid for a while -acme */
445 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
446 __sock_put(sk);
447 }
448 return rc;
449 }
450
451 static __inline__ void __sk_add_node(struct sock *sk, struct hlist_head *list)
452 {
453 hlist_add_head(&sk->sk_node, list);
454 }
455
456 static __inline__ void sk_add_node(struct sock *sk, struct hlist_head *list)
457 {
458 sock_hold(sk);
459 __sk_add_node(sk, list);
460 }
461
462 static __inline__ void sk_add_node_rcu(struct sock *sk, struct hlist_head *list)
463 {
464 sock_hold(sk);
465 hlist_add_head_rcu(&sk->sk_node, list);
466 }
467
468 static __inline__ void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
469 {
470 hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
471 }
472
473 static __inline__ void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
474 {
475 sock_hold(sk);
476 __sk_nulls_add_node_rcu(sk, list);
477 }
478
479 static __inline__ void __sk_del_bind_node(struct sock *sk)
480 {
481 __hlist_del(&sk->sk_bind_node);
482 }
483
484 static __inline__ void sk_add_bind_node(struct sock *sk,
485 struct hlist_head *list)
486 {
487 hlist_add_head(&sk->sk_bind_node, list);
488 }
489
490 #define sk_for_each(__sk, node, list) \
491 hlist_for_each_entry(__sk, node, list, sk_node)
492 #define sk_for_each_rcu(__sk, node, list) \
493 hlist_for_each_entry_rcu(__sk, node, list, sk_node)
494 #define sk_nulls_for_each(__sk, node, list) \
495 hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
496 #define sk_nulls_for_each_rcu(__sk, node, list) \
497 hlist_nulls_for_each_entry_rcu(__sk, node, list, sk_nulls_node)
498 #define sk_for_each_from(__sk, node) \
499 if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
500 hlist_for_each_entry_from(__sk, node, sk_node)
501 #define sk_nulls_for_each_from(__sk, node) \
502 if (__sk && ({ node = &(__sk)->sk_nulls_node; 1; })) \
503 hlist_nulls_for_each_entry_from(__sk, node, sk_nulls_node)
504 #define sk_for_each_continue(__sk, node) \
505 if (__sk && ({ node = &(__sk)->sk_node; 1; })) \
506 hlist_for_each_entry_continue(__sk, node, sk_node)
507 #define sk_for_each_safe(__sk, node, tmp, list) \
508 hlist_for_each_entry_safe(__sk, node, tmp, list, sk_node)
509 #define sk_for_each_bound(__sk, node, list) \
510 hlist_for_each_entry(__sk, node, list, sk_bind_node)
511
512 /* Sock flags */
513 enum sock_flags {
514 SOCK_DEAD,
515 SOCK_DONE,
516 SOCK_URGINLINE,
517 SOCK_KEEPOPEN,
518 SOCK_LINGER,
519 SOCK_DESTROY,
520 SOCK_BROADCAST,
521 SOCK_TIMESTAMP,
522 SOCK_ZAPPED,
523 SOCK_USE_WRITE_QUEUE, /* whether to call sk->sk_write_space in sock_wfree */
524 SOCK_DBG, /* %SO_DEBUG setting */
525 SOCK_RCVTSTAMP, /* %SO_TIMESTAMP setting */
526 SOCK_RCVTSTAMPNS, /* %SO_TIMESTAMPNS setting */
527 SOCK_LOCALROUTE, /* route locally only, %SO_DONTROUTE setting */
528 SOCK_QUEUE_SHRUNK, /* write queue has been shrunk recently */
529 SOCK_TIMESTAMPING_TX_HARDWARE, /* %SOF_TIMESTAMPING_TX_HARDWARE */
530 SOCK_TIMESTAMPING_TX_SOFTWARE, /* %SOF_TIMESTAMPING_TX_SOFTWARE */
531 SOCK_TIMESTAMPING_RX_HARDWARE, /* %SOF_TIMESTAMPING_RX_HARDWARE */
532 SOCK_TIMESTAMPING_RX_SOFTWARE, /* %SOF_TIMESTAMPING_RX_SOFTWARE */
533 SOCK_TIMESTAMPING_SOFTWARE, /* %SOF_TIMESTAMPING_SOFTWARE */
534 SOCK_TIMESTAMPING_RAW_HARDWARE, /* %SOF_TIMESTAMPING_RAW_HARDWARE */
535 SOCK_TIMESTAMPING_SYS_HARDWARE, /* %SOF_TIMESTAMPING_SYS_HARDWARE */
536 SOCK_FASYNC, /* fasync() active */
537 SOCK_RXQ_OVFL,
538 };
539
540 static inline void sock_copy_flags(struct sock *nsk, struct sock *osk)
541 {
542 nsk->sk_flags = osk->sk_flags;
543 }
544
545 static inline void sock_set_flag(struct sock *sk, enum sock_flags flag)
546 {
547 __set_bit(flag, &sk->sk_flags);
548 }
549
550 static inline void sock_reset_flag(struct sock *sk, enum sock_flags flag)
551 {
552 __clear_bit(flag, &sk->sk_flags);
553 }
554
555 static inline int sock_flag(struct sock *sk, enum sock_flags flag)
556 {
557 return test_bit(flag, &sk->sk_flags);
558 }
559
560 static inline void sk_acceptq_removed(struct sock *sk)
561 {
562 sk->sk_ack_backlog--;
563 }
564
565 static inline void sk_acceptq_added(struct sock *sk)
566 {
567 sk->sk_ack_backlog++;
568 }
569
570 static inline int sk_acceptq_is_full(struct sock *sk)
571 {
572 return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
573 }
574
575 /*
576 * Compute minimal free write space needed to queue new packets.
577 */
578 static inline int sk_stream_min_wspace(struct sock *sk)
579 {
580 return sk->sk_wmem_queued >> 1;
581 }
582
583 static inline int sk_stream_wspace(struct sock *sk)
584 {
585 return sk->sk_sndbuf - sk->sk_wmem_queued;
586 }
587
588 extern void sk_stream_write_space(struct sock *sk);
589
590 static inline int sk_stream_memory_free(struct sock *sk)
591 {
592 return sk->sk_wmem_queued < sk->sk_sndbuf;
593 }
594
595 /* OOB backlog add */
596 static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
597 {
598 if (!sk->sk_backlog.tail) {
599 sk->sk_backlog.head = sk->sk_backlog.tail = skb;
600 } else {
601 sk->sk_backlog.tail->next = skb;
602 sk->sk_backlog.tail = skb;
603 }
604 skb->next = NULL;
605 }
606
607 /* The per-socket spinlock must be held here. */
608 static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb)
609 {
610 if (sk->sk_backlog.len >= max(sk->sk_backlog.limit, sk->sk_rcvbuf << 1))
611 return -ENOBUFS;
612
613 __sk_add_backlog(sk, skb);
614 sk->sk_backlog.len += skb->truesize;
615 return 0;
616 }
617
618 static inline int sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
619 {
620 return sk->sk_backlog_rcv(sk, skb);
621 }
622
623 #define sk_wait_event(__sk, __timeo, __condition) \
624 ({ int __rc; \
625 release_sock(__sk); \
626 __rc = __condition; \
627 if (!__rc) { \
628 *(__timeo) = schedule_timeout(*(__timeo)); \
629 } \
630 lock_sock(__sk); \
631 __rc = __condition; \
632 __rc; \
633 })
634
635 extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
636 extern int sk_stream_wait_memory(struct sock *sk, long *timeo_p);
637 extern void sk_stream_wait_close(struct sock *sk, long timeo_p);
638 extern int sk_stream_error(struct sock *sk, int flags, int err);
639 extern void sk_stream_kill_queues(struct sock *sk);
640
641 extern int sk_wait_data(struct sock *sk, long *timeo);
642
643 struct request_sock_ops;
644 struct timewait_sock_ops;
645 struct inet_hashinfo;
646 struct raw_hashinfo;
647
648 /* Networking protocol blocks we attach to sockets.
649 * socket layer -> transport layer interface
650 * transport -> network interface is defined by struct inet_proto
651 */
652 struct proto {
653 void (*close)(struct sock *sk,
654 long timeout);
655 int (*connect)(struct sock *sk,
656 struct sockaddr *uaddr,
657 int addr_len);
658 int (*disconnect)(struct sock *sk, int flags);
659
660 struct sock * (*accept) (struct sock *sk, int flags, int *err);
661
662 int (*ioctl)(struct sock *sk, int cmd,
663 unsigned long arg);
664 int (*init)(struct sock *sk);
665 void (*destroy)(struct sock *sk);
666 void (*shutdown)(struct sock *sk, int how);
667 int (*setsockopt)(struct sock *sk, int level,
668 int optname, char __user *optval,
669 unsigned int optlen);
670 int (*getsockopt)(struct sock *sk, int level,
671 int optname, char __user *optval,
672 int __user *option);
673 #ifdef CONFIG_COMPAT
674 int (*compat_setsockopt)(struct sock *sk,
675 int level,
676 int optname, char __user *optval,
677 unsigned int optlen);
678 int (*compat_getsockopt)(struct sock *sk,
679 int level,
680 int optname, char __user *optval,
681 int __user *option);
682 #endif
683 int (*sendmsg)(struct kiocb *iocb, struct sock *sk,
684 struct msghdr *msg, size_t len);
685 int (*recvmsg)(struct kiocb *iocb, struct sock *sk,
686 struct msghdr *msg,
687 size_t len, int noblock, int flags,
688 int *addr_len);
689 int (*sendpage)(struct sock *sk, struct page *page,
690 int offset, size_t size, int flags);
691 int (*bind)(struct sock *sk,
692 struct sockaddr *uaddr, int addr_len);
693
694 int (*backlog_rcv) (struct sock *sk,
695 struct sk_buff *skb);
696
697 /* Keeping track of sk's, looking them up, and port selection methods. */
698 void (*hash)(struct sock *sk);
699 void (*unhash)(struct sock *sk);
700 int (*get_port)(struct sock *sk, unsigned short snum);
701
702 /* Keeping track of sockets in use */
703 #ifdef CONFIG_PROC_FS
704 unsigned int inuse_idx;
705 #endif
706
707 /* Memory pressure */
708 void (*enter_memory_pressure)(struct sock *sk);
709 atomic_t *memory_allocated; /* Current allocated memory. */
710 struct percpu_counter *sockets_allocated; /* Current number of sockets. */
711 /*
712 * Pressure flag: try to collapse.
713 * Technical note: it is used by multiple contexts non atomically.
714 * All the __sk_mem_schedule() is of this nature: accounting
715 * is strict, actions are advisory and have some latency.
716 */
717 int *memory_pressure;
718 int *sysctl_mem;
719 int *sysctl_wmem;
720 int *sysctl_rmem;
721 int max_header;
722
723 struct kmem_cache *slab;
724 unsigned int obj_size;
725 int slab_flags;
726
727 struct percpu_counter *orphan_count;
728
729 struct request_sock_ops *rsk_prot;
730 struct timewait_sock_ops *twsk_prot;
731
732 union {
733 struct inet_hashinfo *hashinfo;
734 struct udp_table *udp_table;
735 struct raw_hashinfo *raw_hash;
736 } h;
737
738 struct module *owner;
739
740 char name[32];
741
742 struct list_head node;
743 #ifdef SOCK_REFCNT_DEBUG
744 atomic_t socks;
745 #endif
746 };
747
748 extern int proto_register(struct proto *prot, int alloc_slab);
749 extern void proto_unregister(struct proto *prot);
750
751 #ifdef SOCK_REFCNT_DEBUG
752 static inline void sk_refcnt_debug_inc(struct sock *sk)
753 {
754 atomic_inc(&sk->sk_prot->socks);
755 }
756
757 static inline void sk_refcnt_debug_dec(struct sock *sk)
758 {
759 atomic_dec(&sk->sk_prot->socks);
760 printk(KERN_DEBUG "%s socket %p released, %d are still alive\n",
761 sk->sk_prot->name, sk, atomic_read(&sk->sk_prot->socks));
762 }
763
764 static inline void sk_refcnt_debug_release(const struct sock *sk)
765 {
766 if (atomic_read(&sk->sk_refcnt) != 1)
767 printk(KERN_DEBUG "Destruction of the %s socket %p delayed, refcnt=%d\n",
768 sk->sk_prot->name, sk, atomic_read(&sk->sk_refcnt));
769 }
770 #else /* SOCK_REFCNT_DEBUG */
771 #define sk_refcnt_debug_inc(sk) do { } while (0)
772 #define sk_refcnt_debug_dec(sk) do { } while (0)
773 #define sk_refcnt_debug_release(sk) do { } while (0)
774 #endif /* SOCK_REFCNT_DEBUG */
775
776
777 #ifdef CONFIG_PROC_FS
778 /* Called with local bh disabled */
779 extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
780 extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
781 #else
782 static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
783 int inc)
784 {
785 }
786 #endif
787
788
789 /* With per-bucket locks this operation is not-atomic, so that
790 * this version is not worse.
791 */
792 static inline void __sk_prot_rehash(struct sock *sk)
793 {
794 sk->sk_prot->unhash(sk);
795 sk->sk_prot->hash(sk);
796 }
797
798 /* About 10 seconds */
799 #define SOCK_DESTROY_TIME (10*HZ)
800
801 /* Sockets 0-1023 can't be bound to unless you are superuser */
802 #define PROT_SOCK 1024
803
804 #define SHUTDOWN_MASK 3
805 #define RCV_SHUTDOWN 1
806 #define SEND_SHUTDOWN 2
807
808 #define SOCK_SNDBUF_LOCK 1
809 #define SOCK_RCVBUF_LOCK 2
810 #define SOCK_BINDADDR_LOCK 4
811 #define SOCK_BINDPORT_LOCK 8
812
813 /* sock_iocb: used to kick off async processing of socket ios */
814 struct sock_iocb {
815 struct list_head list;
816
817 int flags;
818 int size;
819 struct socket *sock;
820 struct sock *sk;
821 struct scm_cookie *scm;
822 struct msghdr *msg, async_msg;
823 struct kiocb *kiocb;
824 };
825
826 static inline struct sock_iocb *kiocb_to_siocb(struct kiocb *iocb)
827 {
828 return (struct sock_iocb *)iocb->private;
829 }
830
831 static inline struct kiocb *siocb_to_kiocb(struct sock_iocb *si)
832 {
833 return si->kiocb;
834 }
835
836 struct socket_alloc {
837 struct socket socket;
838 struct inode vfs_inode;
839 };
840
841 static inline struct socket *SOCKET_I(struct inode *inode)
842 {
843 return &container_of(inode, struct socket_alloc, vfs_inode)->socket;
844 }
845
846 static inline struct inode *SOCK_INODE(struct socket *socket)
847 {
848 return &container_of(socket, struct socket_alloc, socket)->vfs_inode;
849 }
850
851 /*
852 * Functions for memory accounting
853 */
854 extern int __sk_mem_schedule(struct sock *sk, int size, int kind);
855 extern void __sk_mem_reclaim(struct sock *sk);
856
857 #define SK_MEM_QUANTUM ((int)PAGE_SIZE)
858 #define SK_MEM_QUANTUM_SHIFT ilog2(SK_MEM_QUANTUM)
859 #define SK_MEM_SEND 0
860 #define SK_MEM_RECV 1
861
862 static inline int sk_mem_pages(int amt)
863 {
864 return (amt + SK_MEM_QUANTUM - 1) >> SK_MEM_QUANTUM_SHIFT;
865 }
866
867 static inline int sk_has_account(struct sock *sk)
868 {
869 /* return true if protocol supports memory accounting */
870 return !!sk->sk_prot->memory_allocated;
871 }
872
873 static inline int sk_wmem_schedule(struct sock *sk, int size)
874 {
875 if (!sk_has_account(sk))
876 return 1;
877 return size <= sk->sk_forward_alloc ||
878 __sk_mem_schedule(sk, size, SK_MEM_SEND);
879 }
880
881 static inline int sk_rmem_schedule(struct sock *sk, int size)
882 {
883 if (!sk_has_account(sk))
884 return 1;
885 return size <= sk->sk_forward_alloc ||
886 __sk_mem_schedule(sk, size, SK_MEM_RECV);
887 }
888
889 static inline void sk_mem_reclaim(struct sock *sk)
890 {
891 if (!sk_has_account(sk))
892 return;
893 if (sk->sk_forward_alloc >= SK_MEM_QUANTUM)
894 __sk_mem_reclaim(sk);
895 }
896
897 static inline void sk_mem_reclaim_partial(struct sock *sk)
898 {
899 if (!sk_has_account(sk))
900 return;
901 if (sk->sk_forward_alloc > SK_MEM_QUANTUM)
902 __sk_mem_reclaim(sk);
903 }
904
905 static inline void sk_mem_charge(struct sock *sk, int size)
906 {
907 if (!sk_has_account(sk))
908 return;
909 sk->sk_forward_alloc -= size;
910 }
911
912 static inline void sk_mem_uncharge(struct sock *sk, int size)
913 {
914 if (!sk_has_account(sk))
915 return;
916 sk->sk_forward_alloc += size;
917 }
918
919 static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
920 {
921 sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
922 sk->sk_wmem_queued -= skb->truesize;
923 sk_mem_uncharge(sk, skb->truesize);
924 __kfree_skb(skb);
925 }
926
927 /* Used by processes to "lock" a socket state, so that
928 * interrupts and bottom half handlers won't change it
929 * from under us. It essentially blocks any incoming
930 * packets, so that we won't get any new data or any
931 * packets that change the state of the socket.
932 *
933 * While locked, BH processing will add new packets to
934 * the backlog queue. This queue is processed by the
935 * owner of the socket lock right before it is released.
936 *
937 * Since ~2.3.5 it is also exclusive sleep lock serializing
938 * accesses from user process context.
939 */
940 #define sock_owned_by_user(sk) ((sk)->sk_lock.owned)
941
942 /*
943 * Macro so as to not evaluate some arguments when
944 * lockdep is not enabled.
945 *
946 * Mark both the sk_lock and the sk_lock.slock as a
947 * per-address-family lock class.
948 */
949 #define sock_lock_init_class_and_name(sk, sname, skey, name, key) \
950 do { \
951 sk->sk_lock.owned = 0; \
952 init_waitqueue_head(&sk->sk_lock.wq); \
953 spin_lock_init(&(sk)->sk_lock.slock); \
954 debug_check_no_locks_freed((void *)&(sk)->sk_lock, \
955 sizeof((sk)->sk_lock)); \
956 lockdep_set_class_and_name(&(sk)->sk_lock.slock, \
957 (skey), (sname)); \
958 lockdep_init_map(&(sk)->sk_lock.dep_map, (name), (key), 0); \
959 } while (0)
960
961 extern void lock_sock_nested(struct sock *sk, int subclass);
962
963 static inline void lock_sock(struct sock *sk)
964 {
965 lock_sock_nested(sk, 0);
966 }
967
968 extern void release_sock(struct sock *sk);
969
970 /* BH context may only use the following locking interface. */
971 #define bh_lock_sock(__sk) spin_lock(&((__sk)->sk_lock.slock))
972 #define bh_lock_sock_nested(__sk) \
973 spin_lock_nested(&((__sk)->sk_lock.slock), \
974 SINGLE_DEPTH_NESTING)
975 #define bh_unlock_sock(__sk) spin_unlock(&((__sk)->sk_lock.slock))
976
977 extern struct sock *sk_alloc(struct net *net, int family,
978 gfp_t priority,
979 struct proto *prot);
980 extern void sk_free(struct sock *sk);
981 extern void sk_release_kernel(struct sock *sk);
982 extern struct sock *sk_clone(const struct sock *sk,
983 const gfp_t priority);
984
985 extern struct sk_buff *sock_wmalloc(struct sock *sk,
986 unsigned long size, int force,
987 gfp_t priority);
988 extern struct sk_buff *sock_rmalloc(struct sock *sk,
989 unsigned long size, int force,
990 gfp_t priority);
991 extern void sock_wfree(struct sk_buff *skb);
992 extern void sock_rfree(struct sk_buff *skb);
993
994 extern int sock_setsockopt(struct socket *sock, int level,
995 int op, char __user *optval,
996 unsigned int optlen);
997
998 extern int sock_getsockopt(struct socket *sock, int level,
999 int op, char __user *optval,
1000 int __user *optlen);
1001 extern struct sk_buff *sock_alloc_send_skb(struct sock *sk,
1002 unsigned long size,
1003 int noblock,
1004 int *errcode);
1005 extern struct sk_buff *sock_alloc_send_pskb(struct sock *sk,
1006 unsigned long header_len,
1007 unsigned long data_len,
1008 int noblock,
1009 int *errcode);
1010 extern void *sock_kmalloc(struct sock *sk, int size,
1011 gfp_t priority);
1012 extern void sock_kfree_s(struct sock *sk, void *mem, int size);
1013 extern void sk_send_sigurg(struct sock *sk);
1014
1015 /*
1016 * Functions to fill in entries in struct proto_ops when a protocol
1017 * does not implement a particular function.
1018 */
1019 extern int sock_no_bind(struct socket *,
1020 struct sockaddr *, int);
1021 extern int sock_no_connect(struct socket *,
1022 struct sockaddr *, int, int);
1023 extern int sock_no_socketpair(struct socket *,
1024 struct socket *);
1025 extern int sock_no_accept(struct socket *,
1026 struct socket *, int);
1027 extern int sock_no_getname(struct socket *,
1028 struct sockaddr *, int *, int);
1029 extern unsigned int sock_no_poll(struct file *, struct socket *,
1030 struct poll_table_struct *);
1031 extern int sock_no_ioctl(struct socket *, unsigned int,
1032 unsigned long);
1033 extern int sock_no_listen(struct socket *, int);
1034 extern int sock_no_shutdown(struct socket *, int);
1035 extern int sock_no_getsockopt(struct socket *, int , int,
1036 char __user *, int __user *);
1037 extern int sock_no_setsockopt(struct socket *, int, int,
1038 char __user *, unsigned int);
1039 extern int sock_no_sendmsg(struct kiocb *, struct socket *,
1040 struct msghdr *, size_t);
1041 extern int sock_no_recvmsg(struct kiocb *, struct socket *,
1042 struct msghdr *, size_t, int);
1043 extern int sock_no_mmap(struct file *file,
1044 struct socket *sock,
1045 struct vm_area_struct *vma);
1046 extern ssize_t sock_no_sendpage(struct socket *sock,
1047 struct page *page,
1048 int offset, size_t size,
1049 int flags);
1050
1051 /*
1052 * Functions to fill in entries in struct proto_ops when a protocol
1053 * uses the inet style.
1054 */
1055 extern int sock_common_getsockopt(struct socket *sock, int level, int optname,
1056 char __user *optval, int __user *optlen);
1057 extern int sock_common_recvmsg(struct kiocb *iocb, struct socket *sock,
1058 struct msghdr *msg, size_t size, int flags);
1059 extern int sock_common_setsockopt(struct socket *sock, int level, int optname,
1060 char __user *optval, unsigned int optlen);
1061 extern int compat_sock_common_getsockopt(struct socket *sock, int level,
1062 int optname, char __user *optval, int __user *optlen);
1063 extern int compat_sock_common_setsockopt(struct socket *sock, int level,
1064 int optname, char __user *optval, unsigned int optlen);
1065
1066 extern void sk_common_release(struct sock *sk);
1067
1068 /*
1069 * Default socket callbacks and setup code
1070 */
1071
1072 /* Initialise core socket variables */
1073 extern void sock_init_data(struct socket *sock, struct sock *sk);
1074
1075 /**
1076 * sk_filter_release - release a socket filter
1077 * @fp: filter to remove
1078 *
1079 * Remove a filter from a socket and release its resources.
1080 */
1081
1082 static inline void sk_filter_release(struct sk_filter *fp)
1083 {
1084 if (atomic_dec_and_test(&fp->refcnt))
1085 kfree(fp);
1086 }
1087
1088 static inline void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
1089 {
1090 unsigned int size = sk_filter_len(fp);
1091
1092 atomic_sub(size, &sk->sk_omem_alloc);
1093 sk_filter_release(fp);
1094 }
1095
1096 static inline void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
1097 {
1098 atomic_inc(&fp->refcnt);
1099 atomic_add(sk_filter_len(fp), &sk->sk_omem_alloc);
1100 }
1101
1102 /*
1103 * Socket reference counting postulates.
1104 *
1105 * * Each user of socket SHOULD hold a reference count.
1106 * * Each access point to socket (an hash table bucket, reference from a list,
1107 * running timer, skb in flight MUST hold a reference count.
1108 * * When reference count hits 0, it means it will never increase back.
1109 * * When reference count hits 0, it means that no references from
1110 * outside exist to this socket and current process on current CPU
1111 * is last user and may/should destroy this socket.
1112 * * sk_free is called from any context: process, BH, IRQ. When
1113 * it is called, socket has no references from outside -> sk_free
1114 * may release descendant resources allocated by the socket, but
1115 * to the time when it is called, socket is NOT referenced by any
1116 * hash tables, lists etc.
1117 * * Packets, delivered from outside (from network or from another process)
1118 * and enqueued on receive/error queues SHOULD NOT grab reference count,
1119 * when they sit in queue. Otherwise, packets will leak to hole, when
1120 * socket is looked up by one cpu and unhasing is made by another CPU.
1121 * It is true for udp/raw, netlink (leak to receive and error queues), tcp
1122 * (leak to backlog). Packet socket does all the processing inside
1123 * BR_NETPROTO_LOCK, so that it has not this race condition. UNIX sockets
1124 * use separate SMP lock, so that they are prone too.
1125 */
1126
1127 /* Ungrab socket and destroy it, if it was the last reference. */
1128 static inline void sock_put(struct sock *sk)
1129 {
1130 if (atomic_dec_and_test(&sk->sk_refcnt))
1131 sk_free(sk);
1132 }
1133
1134 extern int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
1135 const int nested);
1136
1137 static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
1138 {
1139 sk->sk_tx_queue_mapping = tx_queue;
1140 }
1141
1142 static inline void sk_tx_queue_clear(struct sock *sk)
1143 {
1144 sk->sk_tx_queue_mapping = -1;
1145 }
1146
1147 static inline int sk_tx_queue_get(const struct sock *sk)
1148 {
1149 return sk->sk_tx_queue_mapping;
1150 }
1151
1152 static inline bool sk_tx_queue_recorded(const struct sock *sk)
1153 {
1154 return (sk && sk->sk_tx_queue_mapping >= 0);
1155 }
1156
1157 static inline void sk_set_socket(struct sock *sk, struct socket *sock)
1158 {
1159 sk_tx_queue_clear(sk);
1160 sk->sk_socket = sock;
1161 }
1162
1163 /* Detach socket from process context.
1164 * Announce socket dead, detach it from wait queue and inode.
1165 * Note that parent inode held reference count on this struct sock,
1166 * we do not release it in this function, because protocol
1167 * probably wants some additional cleanups or even continuing
1168 * to work with this socket (TCP).
1169 */
1170 static inline void sock_orphan(struct sock *sk)
1171 {
1172 write_lock_bh(&sk->sk_callback_lock);
1173 sock_set_flag(sk, SOCK_DEAD);
1174 sk_set_socket(sk, NULL);
1175 sk->sk_sleep = NULL;
1176 write_unlock_bh(&sk->sk_callback_lock);
1177 }
1178
1179 static inline void sock_graft(struct sock *sk, struct socket *parent)
1180 {
1181 write_lock_bh(&sk->sk_callback_lock);
1182 sk->sk_sleep = &parent->wait;
1183 parent->sk = sk;
1184 sk_set_socket(sk, parent);
1185 security_sock_graft(sk, parent);
1186 write_unlock_bh(&sk->sk_callback_lock);
1187 }
1188
1189 extern int sock_i_uid(struct sock *sk);
1190 extern unsigned long sock_i_ino(struct sock *sk);
1191
1192 static inline struct dst_entry *
1193 __sk_dst_get(struct sock *sk)
1194 {
1195 return sk->sk_dst_cache;
1196 }
1197
1198 static inline struct dst_entry *
1199 sk_dst_get(struct sock *sk)
1200 {
1201 struct dst_entry *dst;
1202
1203 read_lock(&sk->sk_dst_lock);
1204 dst = sk->sk_dst_cache;
1205 if (dst)
1206 dst_hold(dst);
1207 read_unlock(&sk->sk_dst_lock);
1208 return dst;
1209 }
1210
1211 static inline void
1212 __sk_dst_set(struct sock *sk, struct dst_entry *dst)
1213 {
1214 struct dst_entry *old_dst;
1215
1216 sk_tx_queue_clear(sk);
1217 old_dst = sk->sk_dst_cache;
1218 sk->sk_dst_cache = dst;
1219 dst_release(old_dst);
1220 }
1221
1222 static inline void
1223 sk_dst_set(struct sock *sk, struct dst_entry *dst)
1224 {
1225 write_lock(&sk->sk_dst_lock);
1226 __sk_dst_set(sk, dst);
1227 write_unlock(&sk->sk_dst_lock);
1228 }
1229
1230 static inline void
1231 __sk_dst_reset(struct sock *sk)
1232 {
1233 struct dst_entry *old_dst;
1234
1235 sk_tx_queue_clear(sk);
1236 old_dst = sk->sk_dst_cache;
1237 sk->sk_dst_cache = NULL;
1238 dst_release(old_dst);
1239 }
1240
1241 static inline void
1242 sk_dst_reset(struct sock *sk)
1243 {
1244 write_lock(&sk->sk_dst_lock);
1245 __sk_dst_reset(sk);
1246 write_unlock(&sk->sk_dst_lock);
1247 }
1248
1249 extern struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
1250
1251 extern struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
1252
1253 static inline int sk_can_gso(const struct sock *sk)
1254 {
1255 return net_gso_ok(sk->sk_route_caps, sk->sk_gso_type);
1256 }
1257
1258 extern void sk_setup_caps(struct sock *sk, struct dst_entry *dst);
1259
1260 static inline int skb_copy_to_page(struct sock *sk, char __user *from,
1261 struct sk_buff *skb, struct page *page,
1262 int off, int copy)
1263 {
1264 if (skb->ip_summed == CHECKSUM_NONE) {
1265 int err = 0;
1266 __wsum csum = csum_and_copy_from_user(from,
1267 page_address(page) + off,
1268 copy, 0, &err);
1269 if (err)
1270 return err;
1271 skb->csum = csum_block_add(skb->csum, csum, skb->len);
1272 } else if (copy_from_user(page_address(page) + off, from, copy))
1273 return -EFAULT;
1274
1275 skb->len += copy;
1276 skb->data_len += copy;
1277 skb->truesize += copy;
1278 sk->sk_wmem_queued += copy;
1279 sk_mem_charge(sk, copy);
1280 return 0;
1281 }
1282
1283 /**
1284 * sk_wmem_alloc_get - returns write allocations
1285 * @sk: socket
1286 *
1287 * Returns sk_wmem_alloc minus initial offset of one
1288 */
1289 static inline int sk_wmem_alloc_get(const struct sock *sk)
1290 {
1291 return atomic_read(&sk->sk_wmem_alloc) - 1;
1292 }
1293
1294 /**
1295 * sk_rmem_alloc_get - returns read allocations
1296 * @sk: socket
1297 *
1298 * Returns sk_rmem_alloc
1299 */
1300 static inline int sk_rmem_alloc_get(const struct sock *sk)
1301 {
1302 return atomic_read(&sk->sk_rmem_alloc);
1303 }
1304
1305 /**
1306 * sk_has_allocations - check if allocations are outstanding
1307 * @sk: socket
1308 *
1309 * Returns true if socket has write or read allocations
1310 */
1311 static inline int sk_has_allocations(const struct sock *sk)
1312 {
1313 return sk_wmem_alloc_get(sk) || sk_rmem_alloc_get(sk);
1314 }
1315
1316 /**
1317 * sk_has_sleeper - check if there are any waiting processes
1318 * @sk: socket
1319 *
1320 * Returns true if socket has waiting processes
1321 *
1322 * The purpose of the sk_has_sleeper and sock_poll_wait is to wrap the memory
1323 * barrier call. They were added due to the race found within the tcp code.
1324 *
1325 * Consider following tcp code paths:
1326 *
1327 * CPU1 CPU2
1328 *
1329 * sys_select receive packet
1330 * ... ...
1331 * __add_wait_queue update tp->rcv_nxt
1332 * ... ...
1333 * tp->rcv_nxt check sock_def_readable
1334 * ... {
1335 * schedule ...
1336 * if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
1337 * wake_up_interruptible(sk->sk_sleep)
1338 * ...
1339 * }
1340 *
1341 * The race for tcp fires when the __add_wait_queue changes done by CPU1 stay
1342 * in its cache, and so does the tp->rcv_nxt update on CPU2 side. The CPU1
1343 * could then endup calling schedule and sleep forever if there are no more
1344 * data on the socket.
1345 *
1346 * The sk_has_sleeper is always called right after a call to read_lock, so we
1347 * can use smp_mb__after_lock barrier.
1348 */
1349 static inline int sk_has_sleeper(struct sock *sk)
1350 {
1351 /*
1352 * We need to be sure we are in sync with the
1353 * add_wait_queue modifications to the wait queue.
1354 *
1355 * This memory barrier is paired in the sock_poll_wait.
1356 */
1357 smp_mb__after_lock();
1358 return sk->sk_sleep && waitqueue_active(sk->sk_sleep);
1359 }
1360
1361 /**
1362 * sock_poll_wait - place memory barrier behind the poll_wait call.
1363 * @filp: file
1364 * @wait_address: socket wait queue
1365 * @p: poll_table
1366 *
1367 * See the comments in the sk_has_sleeper function.
1368 */
1369 static inline void sock_poll_wait(struct file *filp,
1370 wait_queue_head_t *wait_address, poll_table *p)
1371 {
1372 if (p && wait_address) {
1373 poll_wait(filp, wait_address, p);
1374 /*
1375 * We need to be sure we are in sync with the
1376 * socket flags modification.
1377 *
1378 * This memory barrier is paired in the sk_has_sleeper.
1379 */
1380 smp_mb();
1381 }
1382 }
1383
1384 /*
1385 * Queue a received datagram if it will fit. Stream and sequenced
1386 * protocols can't normally use this as they need to fit buffers in
1387 * and play with them.
1388 *
1389 * Inlined as it's very short and called for pretty much every
1390 * packet ever received.
1391 */
1392
1393 static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
1394 {
1395 skb_orphan(skb);
1396 skb->sk = sk;
1397 skb->destructor = sock_wfree;
1398 /*
1399 * We used to take a refcount on sk, but following operation
1400 * is enough to guarantee sk_free() wont free this sock until
1401 * all in-flight packets are completed
1402 */
1403 atomic_add(skb->truesize, &sk->sk_wmem_alloc);
1404 }
1405
1406 static inline void skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
1407 {
1408 skb_orphan(skb);
1409 skb->sk = sk;
1410 skb->destructor = sock_rfree;
1411 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
1412 sk_mem_charge(sk, skb->truesize);
1413 }
1414
1415 extern void sk_reset_timer(struct sock *sk, struct timer_list* timer,
1416 unsigned long expires);
1417
1418 extern void sk_stop_timer(struct sock *sk, struct timer_list* timer);
1419
1420 extern int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb);
1421
1422 static inline int sock_queue_err_skb(struct sock *sk, struct sk_buff *skb)
1423 {
1424 /* Cast skb->rcvbuf to unsigned... It's pointless, but reduces
1425 number of warnings when compiling with -W --ANK
1426 */
1427 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
1428 (unsigned)sk->sk_rcvbuf)
1429 return -ENOMEM;
1430 skb_set_owner_r(skb, sk);
1431 skb_queue_tail(&sk->sk_error_queue, skb);
1432 if (!sock_flag(sk, SOCK_DEAD))
1433 sk->sk_data_ready(sk, skb->len);
1434 return 0;
1435 }
1436
1437 /*
1438 * Recover an error report and clear atomically
1439 */
1440
1441 static inline int sock_error(struct sock *sk)
1442 {
1443 int err;
1444 if (likely(!sk->sk_err))
1445 return 0;
1446 err = xchg(&sk->sk_err, 0);
1447 return -err;
1448 }
1449
1450 static inline unsigned long sock_wspace(struct sock *sk)
1451 {
1452 int amt = 0;
1453
1454 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
1455 amt = sk->sk_sndbuf - atomic_read(&sk->sk_wmem_alloc);
1456 if (amt < 0)
1457 amt = 0;
1458 }
1459 return amt;
1460 }
1461
1462 static inline void sk_wake_async(struct sock *sk, int how, int band)
1463 {
1464 if (sock_flag(sk, SOCK_FASYNC))
1465 sock_wake_async(sk->sk_socket, how, band);
1466 }
1467
1468 #define SOCK_MIN_SNDBUF 2048
1469 #define SOCK_MIN_RCVBUF 256
1470
1471 static inline void sk_stream_moderate_sndbuf(struct sock *sk)
1472 {
1473 if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) {
1474 sk->sk_sndbuf = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1);
1475 sk->sk_sndbuf = max(sk->sk_sndbuf, SOCK_MIN_SNDBUF);
1476 }
1477 }
1478
1479 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
1480
1481 static inline struct page *sk_stream_alloc_page(struct sock *sk)
1482 {
1483 struct page *page = NULL;
1484
1485 page = alloc_pages(sk->sk_allocation, 0);
1486 if (!page) {
1487 sk->sk_prot->enter_memory_pressure(sk);
1488 sk_stream_moderate_sndbuf(sk);
1489 }
1490 return page;
1491 }
1492
1493 /*
1494 * Default write policy as shown to user space via poll/select/SIGIO
1495 */
1496 static inline int sock_writeable(const struct sock *sk)
1497 {
1498 return atomic_read(&sk->sk_wmem_alloc) < (sk->sk_sndbuf >> 1);
1499 }
1500
1501 static inline gfp_t gfp_any(void)
1502 {
1503 return in_softirq() ? GFP_ATOMIC : GFP_KERNEL;
1504 }
1505
1506 static inline long sock_rcvtimeo(const struct sock *sk, int noblock)
1507 {
1508 return noblock ? 0 : sk->sk_rcvtimeo;
1509 }
1510
1511 static inline long sock_sndtimeo(const struct sock *sk, int noblock)
1512 {
1513 return noblock ? 0 : sk->sk_sndtimeo;
1514 }
1515
1516 static inline int sock_rcvlowat(const struct sock *sk, int waitall, int len)
1517 {
1518 return (waitall ? len : min_t(int, sk->sk_rcvlowat, len)) ? : 1;
1519 }
1520
1521 /* Alas, with timeout socket operations are not restartable.
1522 * Compare this to poll().
1523 */
1524 static inline int sock_intr_errno(long timeo)
1525 {
1526 return timeo == MAX_SCHEDULE_TIMEOUT ? -ERESTARTSYS : -EINTR;
1527 }
1528
1529 extern void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
1530 struct sk_buff *skb);
1531
1532 static __inline__ void
1533 sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
1534 {
1535 ktime_t kt = skb->tstamp;
1536 struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
1537
1538 /*
1539 * generate control messages if
1540 * - receive time stamping in software requested (SOCK_RCVTSTAMP
1541 * or SOCK_TIMESTAMPING_RX_SOFTWARE)
1542 * - software time stamp available and wanted
1543 * (SOCK_TIMESTAMPING_SOFTWARE)
1544 * - hardware time stamps available and wanted
1545 * (SOCK_TIMESTAMPING_SYS_HARDWARE or
1546 * SOCK_TIMESTAMPING_RAW_HARDWARE)
1547 */
1548 if (sock_flag(sk, SOCK_RCVTSTAMP) ||
1549 sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE) ||
1550 (kt.tv64 && sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE)) ||
1551 (hwtstamps->hwtstamp.tv64 &&
1552 sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE)) ||
1553 (hwtstamps->syststamp.tv64 &&
1554 sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE)))
1555 __sock_recv_timestamp(msg, sk, skb);
1556 else
1557 sk->sk_stamp = kt;
1558 }
1559
1560 extern void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, struct sk_buff *skb);
1561
1562 /**
1563 * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped
1564 * @msg: outgoing packet
1565 * @sk: socket sending this packet
1566 * @shtx: filled with instructions for time stamping
1567 *
1568 * Currently only depends on SOCK_TIMESTAMPING* flags. Returns error code if
1569 * parameters are invalid.
1570 */
1571 extern int sock_tx_timestamp(struct msghdr *msg,
1572 struct sock *sk,
1573 union skb_shared_tx *shtx);
1574
1575
1576 /**
1577 * sk_eat_skb - Release a skb if it is no longer needed
1578 * @sk: socket to eat this skb from
1579 * @skb: socket buffer to eat
1580 * @copied_early: flag indicating whether DMA operations copied this data early
1581 *
1582 * This routine must be called with interrupts disabled or with the socket
1583 * locked so that the sk_buff queue operation is ok.
1584 */
1585 #ifdef CONFIG_NET_DMA
1586 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
1587 {
1588 __skb_unlink(skb, &sk->sk_receive_queue);
1589 if (!copied_early)
1590 __kfree_skb(skb);
1591 else
1592 __skb_queue_tail(&sk->sk_async_wait_queue, skb);
1593 }
1594 #else
1595 static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb, int copied_early)
1596 {
1597 __skb_unlink(skb, &sk->sk_receive_queue);
1598 __kfree_skb(skb);
1599 }
1600 #endif
1601
1602 static inline
1603 struct net *sock_net(const struct sock *sk)
1604 {
1605 #ifdef CONFIG_NET_NS
1606 return sk->sk_net;
1607 #else
1608 return &init_net;
1609 #endif
1610 }
1611
1612 static inline
1613 void sock_net_set(struct sock *sk, struct net *net)
1614 {
1615 #ifdef CONFIG_NET_NS
1616 sk->sk_net = net;
1617 #endif
1618 }
1619
1620 /*
1621 * Kernel sockets, f.e. rtnl or icmp_socket, are a part of a namespace.
1622 * They should not hold a referrence to a namespace in order to allow
1623 * to stop it.
1624 * Sockets after sk_change_net should be released using sk_release_kernel
1625 */
1626 static inline void sk_change_net(struct sock *sk, struct net *net)
1627 {
1628 put_net(sock_net(sk));
1629 sock_net_set(sk, hold_net(net));
1630 }
1631
1632 static inline struct sock *skb_steal_sock(struct sk_buff *skb)
1633 {
1634 if (unlikely(skb->sk)) {
1635 struct sock *sk = skb->sk;
1636
1637 skb->destructor = NULL;
1638 skb->sk = NULL;
1639 return sk;
1640 }
1641 return NULL;
1642 }
1643
1644 extern void sock_enable_timestamp(struct sock *sk, int flag);
1645 extern int sock_get_timestamp(struct sock *, struct timeval __user *);
1646 extern int sock_get_timestampns(struct sock *, struct timespec __user *);
1647
1648 /*
1649 * Enable debug/info messages
1650 */
1651 extern int net_msg_warn;
1652 #define NETDEBUG(fmt, args...) \
1653 do { if (net_msg_warn) printk(fmt,##args); } while (0)
1654
1655 #define LIMIT_NETDEBUG(fmt, args...) \
1656 do { if (net_msg_warn && net_ratelimit()) printk(fmt,##args); } while(0)
1657
1658 extern __u32 sysctl_wmem_max;
1659 extern __u32 sysctl_rmem_max;
1660
1661 extern void sk_init(void);
1662
1663 extern int sysctl_optmem_max;
1664
1665 extern __u32 sysctl_wmem_default;
1666 extern __u32 sysctl_rmem_default;
1667
1668 #endif /* _SOCK_H */
This page took 0.107765 seconds and 6 git commands to generate.