#include <linux/slab.h>
#include <linux/cache.h>
#include <linux/percpu.h>
+#include <net/inet_hashtables.h>
#include <net/checksum.h>
#include <net/request_sock.h>
#include <net/sock.h>
#include <net/snmp.h>
#include <net/ip.h>
+#include <net/tcp_states.h>
+
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
#include <linux/ipv6.h>
#endif
#include <linux/seq_file.h>
-/* This is for all connections with a full identity, no wildcards.
- * New scheme, half the table is for TIME_WAIT, the other half is
- * for the rest. I'll experiment with dynamic table growth later.
- */
-struct tcp_ehash_bucket {
- rwlock_t lock;
- struct hlist_head chain;
-} __attribute__((__aligned__(8)));
-
-/* This is for listening sockets, thus all sockets which possess wildcards. */
-#define TCP_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */
-
-/* There are a few simple rules, which allow for local port reuse by
- * an application. In essence:
- *
- * 1) Sockets bound to different interfaces may share a local port.
- * Failing that, goto test 2.
- * 2) If all sockets have sk->sk_reuse set, and none of them are in
- * TCP_LISTEN state, the port may be shared.
- * Failing that, goto test 3.
- * 3) If all sockets are bound to a specific inet_sk(sk)->rcv_saddr local
- * address, and none of them are the same, the port may be
- * shared.
- * Failing this, the port cannot be shared.
- *
- * The interesting point, is test #2. This is what an FTP server does
- * all day. To optimize this case we use a specific flag bit defined
- * below. As we add sockets to a bind bucket list, we perform a
- * check of: (newsk->sk_reuse && (newsk->sk_state != TCP_LISTEN))
- * As long as all sockets added to a bind bucket pass this test,
- * the flag bit will be set.
- * The resulting situation is that tcp_v[46]_verify_bind() can just check
- * for this flag bit, if it is set and the socket trying to bind has
- * sk->sk_reuse set, we don't even have to walk the owners list at all,
- * we return that it is ok to bind this socket to the requested local port.
- *
- * Sounds like a lot of work, but it is worth it. In a more naive
- * implementation (ie. current FreeBSD etc.) the entire list of ports
- * must be walked for each data port opened by an ftp server. Needless
- * to say, this does not scale at all. With a couple thousand FTP
- * users logged onto your box, isn't it nice to know that new data
- * ports are created in O(1) time? I thought so. ;-) -DaveM
- */
-struct tcp_bind_bucket {
- unsigned short port;
- signed short fastreuse;
- struct hlist_node node;
- struct hlist_head owners;
-};
-
-#define tb_for_each(tb, node, head) hlist_for_each_entry(tb, node, head, node)
-
-struct tcp_bind_hashbucket {
- spinlock_t lock;
- struct hlist_head chain;
-};
-
-static inline struct tcp_bind_bucket *__tb_head(struct tcp_bind_hashbucket *head)
-{
- return hlist_entry(head->chain.first, struct tcp_bind_bucket, node);
-}
-
-static inline struct tcp_bind_bucket *tb_head(struct tcp_bind_hashbucket *head)
-{
- return hlist_empty(&head->chain) ? NULL : __tb_head(head);
-}
-
-extern struct tcp_hashinfo {
- /* This is for sockets with full identity only. Sockets here will
- * always be without wildcards and will have the following invariant:
- *
- * TCP_ESTABLISHED <= sk->sk_state < TCP_CLOSE
- *
- * First half of the table is for sockets not in TIME_WAIT, second half
- * is for TIME_WAIT sockets only.
- */
- struct tcp_ehash_bucket *__tcp_ehash;
-
- /* Ok, let's try this, I give up, we do need a local binding
- * TCP hash as well as the others for fast bind/connect.
- */
- struct tcp_bind_hashbucket *__tcp_bhash;
-
- int __tcp_bhash_size;
- int __tcp_ehash_size;
-
- /* All sockets in TCP_LISTEN state will be in here. This is the only
- * table where wildcard'd TCP sockets can exist. Hash function here
- * is just local port number.
- */
- struct hlist_head __tcp_listening_hash[TCP_LHTABLE_SIZE];
-
- /* All the above members are written once at bootup and
- * never written again _or_ are predominantly read-access.
- *
- * Now align to a new cache line as all the following members
- * are often dirty.
- */
- rwlock_t __tcp_lhash_lock ____cacheline_aligned;
- atomic_t __tcp_lhash_users;
- wait_queue_head_t __tcp_lhash_wait;
- spinlock_t __tcp_portalloc_lock;
-} tcp_hashinfo;
-
-#define tcp_ehash (tcp_hashinfo.__tcp_ehash)
-#define tcp_bhash (tcp_hashinfo.__tcp_bhash)
-#define tcp_ehash_size (tcp_hashinfo.__tcp_ehash_size)
-#define tcp_bhash_size (tcp_hashinfo.__tcp_bhash_size)
-#define tcp_listening_hash (tcp_hashinfo.__tcp_listening_hash)
-#define tcp_lhash_lock (tcp_hashinfo.__tcp_lhash_lock)
-#define tcp_lhash_users (tcp_hashinfo.__tcp_lhash_users)
-#define tcp_lhash_wait (tcp_hashinfo.__tcp_lhash_wait)
-#define tcp_portalloc_lock (tcp_hashinfo.__tcp_portalloc_lock)
-
-extern kmem_cache_t *tcp_bucket_cachep;
-extern struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head,
- unsigned short snum);
-extern void tcp_bucket_destroy(struct tcp_bind_bucket *tb);
-extern void tcp_bucket_unlock(struct sock *sk);
-extern int tcp_port_rover;
-
-/* These are AF independent. */
-static __inline__ int tcp_bhashfn(__u16 lport)
-{
- return (lport & (tcp_bhash_size - 1));
-}
-
-extern void tcp_bind_hash(struct sock *sk, struct tcp_bind_bucket *tb,
- unsigned short snum);
+extern struct inet_hashinfo tcp_hashinfo;
#if (BITS_PER_LONG == 64)
#define TCP_ADDRCMP_ALIGN_BYTES 8
__u32 tw_ts_recent;
long tw_ts_recent_stamp;
unsigned long tw_ttd;
- struct tcp_bind_bucket *tw_tb;
+ struct inet_bind_bucket *tw_tb;
struct hlist_node tw_death_node;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
struct in6_addr tw_v6_daddr;
static inline void tcp_tw_put(struct tcp_tw_bucket *tw)
{
if (atomic_dec_and_test(&tw->tw_refcnt)) {
-#ifdef INET_REFCNT_DEBUG
+#ifdef SOCK_REFCNT_DEBUG
printk(KERN_DEBUG "tw_bucket %p released\n", tw);
#endif
kmem_cache_free(tcp_timewait_cachep, tw);
ipv6_addr_equal(&inet6_sk(__sk)->rcv_saddr, (__daddr)) && \
(!((__sk)->sk_bound_dev_if) || ((__sk)->sk_bound_dev_if == (__dif))))
-/* These can have wildcards, don't try too hard. */
-static __inline__ int tcp_lhashfn(unsigned short num)
-{
- return num & (TCP_LHTABLE_SIZE - 1);
-}
-
-static __inline__ int tcp_sk_listen_hashfn(struct sock *sk)
-{
- return tcp_lhashfn(inet_sk(sk)->num);
-}
-
#define MAX_TCP_HEADER (128 + MAX_HEADER)
/*
#define TCP_ADD_STATS_BH(field, val) SNMP_ADD_STATS_BH(tcp_statistics, field, val)
#define TCP_ADD_STATS_USER(field, val) SNMP_ADD_STATS_USER(tcp_statistics, field, val)
-extern void tcp_put_port(struct sock *sk);
-extern void tcp_inherit_port(struct sock *sk, struct sock *child);
-
extern void tcp_v4_err(struct sk_buff *skb, u32);
extern void tcp_shutdown (struct sock *sk, int how);
* TCP v4 functions exported for the inet6 API
*/
-extern int tcp_v4_rebuild_header(struct sock *sk);
-
-extern int tcp_v4_build_header(struct sock *sk,
- struct sk_buff *skb);
-
extern void tcp_v4_send_check(struct sock *sk,
struct tcphdr *th, int len,
struct sk_buff *skb);
TCP_INC_STATS(TCP_MIB_ESTABRESETS);
sk->sk_prot->unhash(sk);
- if (tcp_sk(sk)->bind_hash &&
+ if (inet_sk(sk)->bind_hash &&
!(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
- tcp_put_port(sk);
+ inet_put_port(&tcp_hashinfo, sk);
/* fall through */
default:
if (oldstate==TCP_ESTABLISHED)
extern void tcp_enter_memory_pressure(void);
-extern void tcp_listen_wlock(void);
-
-/* - We may sleep inside this lock.
- * - If sleeping is not required (or called from BH),
- * use plain read_(un)lock(&tcp_lhash_lock).
- */
-
-static inline void tcp_listen_lock(void)
-{
- /* read_lock synchronizes to candidates to writers */
- read_lock(&tcp_lhash_lock);
- atomic_inc(&tcp_lhash_users);
- read_unlock(&tcp_lhash_lock);
-}
-
-static inline void tcp_listen_unlock(void)
-{
- if (atomic_dec_and_test(&tcp_lhash_users))
- wake_up(&tcp_lhash_wait);
-}
-
static inline int keepalive_intvl_when(const struct tcp_sock *tp)
{
return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
return 1;
}
-static inline void tcp_v4_setup_caps(struct sock *sk, struct dst_entry *dst)
-{
- sk->sk_route_caps = dst->dev->features;
- if (sk->sk_route_caps & NETIF_F_TSO) {
- if (sock_flag(sk, SOCK_NO_LARGESEND) || dst->header_len)
- sk->sk_route_caps &= ~NETIF_F_TSO;
- }
-}
-
#define TCP_CHECK_TIMER(sk) do { } while (0)
static inline int tcp_use_frto(const struct sock *sk)