Merge tag 'please-pull-misc-4.7' of git://git.kernel.org/pub/scm/linux/kernel/git...
[deliverable/linux.git] / net / ipv4 / tcp_ipv4.c
index d2a5763e5abcf47b8b81242e682ac790bb03c90b..3708de2a66833cf1d4a221a2b6ce3923bde978c4 100644 (file)
@@ -320,7 +320,7 @@ void tcp_req_err(struct sock *sk, u32 seq, bool abort)
         * an established socket here.
         */
        if (seq != tcp_rsk(req)->snt_isn) {
-               NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
+               __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
        } else if (abort) {
                /*
                 * Still in SYN_RECV, just remove it silently.
@@ -372,7 +372,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
                                       th->dest, iph->saddr, ntohs(th->source),
                                       inet_iif(icmp_skb));
        if (!sk) {
-               ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
+               __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
                return;
        }
        if (sk->sk_state == TCP_TIME_WAIT) {
@@ -396,13 +396,13 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
         */
        if (sock_owned_by_user(sk)) {
                if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
-                       NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
+                       __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
        }
        if (sk->sk_state == TCP_CLOSE)
                goto out;
 
        if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
-               NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
+               __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
                goto out;
        }
 
@@ -413,7 +413,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
        snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
        if (sk->sk_state != TCP_LISTEN &&
            !between(seq, snd_una, tp->snd_nxt)) {
-               NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
+               __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
                goto out;
        }
 
@@ -692,13 +692,15 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
                     offsetof(struct inet_timewait_sock, tw_bound_dev_if));
 
        arg.tos = ip_hdr(skb)->tos;
+       local_bh_disable();
        ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
                              skb, &TCP_SKB_CB(skb)->header.h4.opt,
                              ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
                              &arg, arg.iov[0].iov_len);
 
-       TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
-       TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
+       __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
+       __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
+       local_bh_enable();
 
 #ifdef CONFIG_TCP_MD5SIG
 out:
@@ -774,12 +776,14 @@ static void tcp_v4_send_ack(struct net *net,
        if (oif)
                arg.bound_dev_if = oif;
        arg.tos = tos;
+       local_bh_disable();
        ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
                              skb, &TCP_SKB_CB(skb)->header.h4.opt,
                              ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
                              &arg, arg.iov[0].iov_len);
 
-       TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
+       __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
+       local_bh_enable();
 }
 
 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
@@ -1151,12 +1155,12 @@ static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
                return false;
 
        if (hash_expected && !hash_location) {
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
                return true;
        }
 
        if (!hash_expected && hash_location) {
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
                return true;
        }
 
@@ -1342,7 +1346,7 @@ struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
        return newsk;
 
 exit_overflow:
-       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
+       NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
 exit_nonewsk:
        dst_release(dst);
 exit:
@@ -1432,8 +1436,8 @@ discard:
        return 0;
 
 csum_err:
-       TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
-       TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
+       TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
+       TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
        goto discard;
 }
 EXPORT_SYMBOL(tcp_v4_do_rcv);
@@ -1506,16 +1510,16 @@ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
 
        __skb_queue_tail(&tp->ucopy.prequeue, skb);
        tp->ucopy.memory += skb->truesize;
-       if (tp->ucopy.memory > sk->sk_rcvbuf) {
+       if (skb_queue_len(&tp->ucopy.prequeue) >= 32 ||
+           tp->ucopy.memory + atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) {
                struct sk_buff *skb1;
 
                BUG_ON(sock_owned_by_user(sk));
+               __NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED,
+                               skb_queue_len(&tp->ucopy.prequeue));
 
-               while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
+               while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
                        sk_backlog_rcv(sk, skb1);
-                       NET_INC_STATS_BH(sock_net(sk),
-                                        LINUX_MIB_TCPPREQUEUEDROPPED);
-               }
 
                tp->ucopy.memory = 0;
        } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
@@ -1547,14 +1551,14 @@ int tcp_v4_rcv(struct sk_buff *skb)
                goto discard_it;
 
        /* Count it even if it's bad */
-       TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
+       __TCP_INC_STATS(net, TCP_MIB_INSEGS);
 
        if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
                goto discard_it;
 
-       th = tcp_hdr(skb);
+       th = (const struct tcphdr *)skb->data;
 
-       if (th->doff < sizeof(struct tcphdr) / 4)
+       if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
                goto bad_packet;
        if (!pskb_may_pull(skb, th->doff * 4))
                goto discard_it;
@@ -1567,7 +1571,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
        if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
                goto csum_error;
 
-       th = tcp_hdr(skb);
+       th = (const struct tcphdr *)skb->data;
        iph = ip_hdr(skb);
        /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
         * barrier() makes sure compiler wont play fool^Waliasing games.
@@ -1629,7 +1633,7 @@ process:
                }
        }
        if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
-               NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
+               __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
                goto discard_and_relse;
        }
 
@@ -1662,7 +1666,7 @@ process:
        } else if (unlikely(sk_add_backlog(sk, skb,
                                           sk->sk_rcvbuf + sk->sk_sndbuf))) {
                bh_unlock_sock(sk);
-               NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
+               __NET_INC_STATS(net, LINUX_MIB_TCPBACKLOGDROP);
                goto discard_and_relse;
        }
        bh_unlock_sock(sk);
@@ -1679,9 +1683,9 @@ no_tcp_socket:
 
        if (tcp_checksum_complete(skb)) {
 csum_error:
-               TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
+               __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
 bad_packet:
-               TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
+               __TCP_INC_STATS(net, TCP_MIB_INERRS);
        } else {
                tcp_v4_send_reset(NULL, skb);
        }
@@ -1835,7 +1839,9 @@ void tcp_v4_destroy_sock(struct sock *sk)
        tcp_free_fastopen_req(tp);
        tcp_saved_syn_free(tp);
 
+       local_bh_disable();
        sk_sockets_allocated_dec(sk);
+       local_bh_enable();
 
        if (mem_cgroup_sockets_enabled && sk->sk_memcg)
                sock_release_memcg(sk);
This page took 0.033116 seconds and 5 git commands to generate.