[TCP]: Remove superflucious FLAG_DATA_SACKED
[deliverable/linux.git] / net / ipv4 / tcp_input.c
index 5e01ac2c003cffa94fb59d5196cfce8aa2460dc8..871110842809d0c61dcc30eb1084b4763c8b927a 100644 (file)
@@ -1118,12 +1118,11 @@ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack,
  * highest SACK block). Also calculate the lowest snd_nxt among the remaining
  * retransmitted skbs to avoid some costly processing per ACKs.
  */
-static int tcp_mark_lost_retrans(struct sock *sk)
+static void tcp_mark_lost_retrans(struct sock *sk)
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
-       int flag = 0;
        int cnt = 0;
        u32 new_low_seq = tp->snd_nxt;
        u32 received_upto = TCP_SKB_CB(tp->highest_sack)->end_seq;
@@ -1131,7 +1130,7 @@ static int tcp_mark_lost_retrans(struct sock *sk)
        if (!tcp_is_fack(tp) || !tp->retrans_out ||
            !after(received_upto, tp->lost_retrans_low) ||
            icsk->icsk_ca_state != TCP_CA_Recovery)
-               return flag;
+               return;
 
        tcp_for_write_queue(skb, sk) {
                u32 ack_seq = TCP_SKB_CB(skb)->ack_seq;
@@ -1159,9 +1158,8 @@ static int tcp_mark_lost_retrans(struct sock *sk)
                        if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
                                tp->lost_out += tcp_skb_pcount(skb);
                                TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
-                               flag |= FLAG_DATA_SACKED;
-                               NET_INC_STATS_BH(LINUX_MIB_TCPLOSTRETRANSMIT);
                        }
+                       NET_INC_STATS_BH(LINUX_MIB_TCPLOSTRETRANSMIT);
                } else {
                        if (before(ack_seq, new_low_seq))
                                new_low_seq = ack_seq;
@@ -1171,8 +1169,6 @@ static int tcp_mark_lost_retrans(struct sock *sk)
 
        if (tp->retrans_out)
                tp->lost_retrans_low = new_low_seq;
-
-       return flag;
 }
 
 static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb,
@@ -1240,6 +1236,181 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
        return in_sack;
 }
 
+static int tcp_sacktag_one(struct sk_buff *skb, struct tcp_sock *tp,
+                          int *reord, int dup_sack, int fack_count)
+{
+       u8 sacked = TCP_SKB_CB(skb)->sacked;
+       int flag = 0;
+
+       /* Account D-SACK for retransmitted packet. */
+       if (dup_sack && (sacked & TCPCB_RETRANS)) {
+               if (after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker))
+                       tp->undo_retrans--;
+               if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una) &&
+                   (sacked & TCPCB_SACKED_ACKED))
+                       *reord = min(fack_count, *reord);
+       }
+
+       /* Nothing to do; acked frame is about to be dropped (was ACKed). */
+       if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
+               return flag;
+
+       if (!(sacked & TCPCB_SACKED_ACKED)) {
+               if (sacked & TCPCB_SACKED_RETRANS) {
+                       /* If the segment is not tagged as lost,
+                        * we do not clear RETRANS, believing
+                        * that retransmission is still in flight.
+                        */
+                       if (sacked & TCPCB_LOST) {
+                               TCP_SKB_CB(skb)->sacked &=
+                                       ~(TCPCB_LOST|TCPCB_SACKED_RETRANS);
+                               tp->lost_out -= tcp_skb_pcount(skb);
+                               tp->retrans_out -= tcp_skb_pcount(skb);
+
+                               /* clear lost hint */
+                               tp->retransmit_skb_hint = NULL;
+                       }
+               } else {
+                       if (!(sacked & TCPCB_RETRANS)) {
+                               /* New sack for not retransmitted frame,
+                                * which was in hole. It is reordering.
+                                */
+                               if (before(TCP_SKB_CB(skb)->seq,
+                                          tcp_highest_sack_seq(tp)))
+                                       *reord = min(fack_count, *reord);
+
+                               /* SACK enhanced F-RTO (RFC4138; Appendix B) */
+                               if (!after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark))
+                                       flag |= FLAG_ONLY_ORIG_SACKED;
+                       }
+
+                       if (sacked & TCPCB_LOST) {
+                               TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
+                               tp->lost_out -= tcp_skb_pcount(skb);
+
+                               /* clear lost hint */
+                               tp->retransmit_skb_hint = NULL;
+                       }
+               }
+
+               TCP_SKB_CB(skb)->sacked |= TCPCB_SACKED_ACKED;
+               flag |= FLAG_DATA_SACKED;
+               tp->sacked_out += tcp_skb_pcount(skb);
+
+               fack_count += tcp_skb_pcount(skb);
+
+               /* Lost marker hint past SACKed? Tweak RFC3517 cnt */
+               if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) &&
+                   before(TCP_SKB_CB(skb)->seq,
+                          TCP_SKB_CB(tp->lost_skb_hint)->seq))
+                       tp->lost_cnt_hint += tcp_skb_pcount(skb);
+
+               if (fack_count > tp->fackets_out)
+                       tp->fackets_out = fack_count;
+
+               if (after(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
+                       tp->highest_sack = skb;
+
+       } else {
+               if (dup_sack && (sacked & TCPCB_RETRANS))
+                       *reord = min(fack_count, *reord);
+       }
+
+       /* D-SACK. We can detect redundant retransmission in S|R and plain R
+        * frames and clear it. undo_retrans is decreased above, L|R frames
+        * are accounted above as well.
+        */
+       if (dup_sack && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)) {
+               TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
+               tp->retrans_out -= tcp_skb_pcount(skb);
+               tp->retransmit_skb_hint = NULL;
+       }
+
+       return flag;
+}
+
+static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
+                                       struct tcp_sack_block *next_dup,
+                                       u32 start_seq, u32 end_seq,
+                                       int dup_sack_in, int *fack_count,
+                                       int *reord, int *flag)
+{
+       struct tcp_sock *tp = tcp_sk(sk);
+
+       tcp_for_write_queue_from(skb, sk) {
+               int in_sack = 0;
+               int dup_sack = dup_sack_in;
+
+               if (skb == tcp_send_head(sk))
+                       break;
+
+               /* queue is in-order => we can short-circuit the walk early */
+               if (!before(TCP_SKB_CB(skb)->seq, end_seq))
+                       break;
+
+               if ((next_dup != NULL) &&
+                   before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) {
+                       in_sack = tcp_match_skb_to_sack(sk, skb,
+                                                       next_dup->start_seq,
+                                                       next_dup->end_seq);
+                       if (in_sack > 0)
+                               dup_sack = 1;
+               }
+
+               if (in_sack <= 0)
+                       in_sack = tcp_match_skb_to_sack(sk, skb, start_seq, end_seq);
+               if (unlikely(in_sack < 0))
+                       break;
+
+               if (in_sack)
+                       *flag |= tcp_sacktag_one(skb, tp, reord, dup_sack, *fack_count);
+
+               *fack_count += tcp_skb_pcount(skb);
+       }
+       return skb;
+}
+
+/* Avoid all extra work that is being done by sacktag while walking in
+ * a normal way
+ */
+static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk,
+                                       u32 skip_to_seq)
+{
+       tcp_for_write_queue_from(skb, sk) {
+               if (skb == tcp_send_head(sk))
+                       break;
+
+               if (!before(TCP_SKB_CB(skb)->end_seq, skip_to_seq))
+                       break;
+       }
+       return skb;
+}
+
+static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb,
+                                               struct sock *sk,
+                                               struct tcp_sack_block *next_dup,
+                                               u32 skip_to_seq,
+                                               int *fack_count, int *reord,
+                                               int *flag)
+{
+       if (next_dup == NULL)
+               return skb;
+
+       if (before(next_dup->start_seq, skip_to_seq)) {
+               skb = tcp_sacktag_skip(skb, sk, next_dup->start_seq);
+               tcp_sacktag_walk(skb, sk, NULL,
+                                next_dup->start_seq, next_dup->end_seq,
+                                1, fack_count, reord, flag);
+       }
+
+       return skb;
+}
+
+static int tcp_sack_cache_ok(struct tcp_sock *tp, struct tcp_sack_block *cache)
+{
+       return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache);
+}
+
 static int
 tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_una)
 {
@@ -1247,16 +1418,18 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
        struct tcp_sock *tp = tcp_sk(sk);
        unsigned char *ptr = (skb_transport_header(ack_skb) +
                              TCP_SKB_CB(ack_skb)->sacked);
-       struct tcp_sack_block_wire *sp = (struct tcp_sack_block_wire *)(ptr+2);
-       struct sk_buff *cached_skb;
+       struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2);
+       struct tcp_sack_block sp[4];
+       struct tcp_sack_block *cache;
+       struct sk_buff *skb;
        int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE)>>3;
+       int used_sacks;
        int reord = tp->packets_out;
        int flag = 0;
        int found_dup_sack = 0;
-       int cached_fack_count;
-       int i;
+       int fack_count;
+       int i, j;
        int first_sack_index;
-       int force_one_sack;
 
        if (!tp->sacked_out) {
                if (WARN_ON(tp->fackets_out))
@@ -1264,7 +1437,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
                tp->highest_sack = tcp_write_queue_head(sk);
        }
 
-       found_dup_sack = tcp_check_dsack(tp, ack_skb, sp,
+       found_dup_sack = tcp_check_dsack(tp, ack_skb, sp_wire,
                                         num_sacks, prior_snd_una);
        if (found_dup_sack)
                flag |= FLAG_DSACKING_ACK;
@@ -1279,78 +1452,17 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
        if (!tp->packets_out)
                goto out;
 
-       /* SACK fastpath:
-        * if the only SACK change is the increase of the end_seq of
-        * the first block then only apply that SACK block
-        * and use retrans queue hinting otherwise slowpath */
-       force_one_sack = 1;
-       for (i = 0; i < num_sacks; i++) {
-               __be32 start_seq = sp[i].start_seq;
-               __be32 end_seq = sp[i].end_seq;
-
-               if (i == 0) {
-                       if (tp->recv_sack_cache[i].start_seq != start_seq)
-                               force_one_sack = 0;
-               } else {
-                       if ((tp->recv_sack_cache[i].start_seq != start_seq) ||
-                           (tp->recv_sack_cache[i].end_seq != end_seq))
-                               force_one_sack = 0;
-               }
-               tp->recv_sack_cache[i].start_seq = start_seq;
-               tp->recv_sack_cache[i].end_seq = end_seq;
-       }
-       /* Clear the rest of the cache sack blocks so they won't match mistakenly. */
-       for (; i < ARRAY_SIZE(tp->recv_sack_cache); i++) {
-               tp->recv_sack_cache[i].start_seq = 0;
-               tp->recv_sack_cache[i].end_seq = 0;
-       }
-
+       used_sacks = 0;
        first_sack_index = 0;
-       if (force_one_sack)
-               num_sacks = 1;
-       else {
-               int j;
-               tp->fastpath_skb_hint = NULL;
-
-               /* order SACK blocks to allow in order walk of the retrans queue */
-               for (i = num_sacks-1; i > 0; i--) {
-                       for (j = 0; j < i; j++){
-                               if (after(ntohl(sp[j].start_seq),
-                                         ntohl(sp[j+1].start_seq))){
-                                       struct tcp_sack_block_wire tmp;
-
-                                       tmp = sp[j];
-                                       sp[j] = sp[j+1];
-                                       sp[j+1] = tmp;
-
-                                       /* Track where the first SACK block goes to */
-                                       if (j == first_sack_index)
-                                               first_sack_index = j+1;
-                               }
-
-                       }
-               }
-       }
-
-       /* Use SACK fastpath hint if valid */
-       cached_skb = tp->fastpath_skb_hint;
-       cached_fack_count = tp->fastpath_cnt_hint;
-       if (!cached_skb) {
-               cached_skb = tcp_write_queue_head(sk);
-               cached_fack_count = 0;
-       }
-
        for (i = 0; i < num_sacks; i++) {
-               struct sk_buff *skb;
-               __u32 start_seq = ntohl(sp->start_seq);
-               __u32 end_seq = ntohl(sp->end_seq);
-               int fack_count;
-               int dup_sack = (found_dup_sack && (i == first_sack_index));
-               int next_dup = (found_dup_sack && (i+1 == first_sack_index));
+               int dup_sack = !i && found_dup_sack;
 
-               sp++;
+               sp[used_sacks].start_seq = ntohl(get_unaligned(&sp_wire[i].start_seq));
+               sp[used_sacks].end_seq = ntohl(get_unaligned(&sp_wire[i].end_seq));
 
-               if (!tcp_is_sackblock_valid(tp, dup_sack, start_seq, end_seq)) {
+               if (!tcp_is_sackblock_valid(tp, dup_sack,
+                                           sp[used_sacks].start_seq,
+                                           sp[used_sacks].end_seq)) {
                        if (dup_sack) {
                                if (!tp->undo_marker)
                                        NET_INC_STATS_BH(LINUX_MIB_TCPDSACKIGNOREDNOUNDO);
@@ -1359,166 +1471,135 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
                        } else {
                                /* Don't count olds caused by ACK reordering */
                                if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) &&
-                                   !after(end_seq, tp->snd_una))
+                                   !after(sp[used_sacks].end_seq, tp->snd_una))
                                        continue;
                                NET_INC_STATS_BH(LINUX_MIB_TCPSACKDISCARD);
                        }
+                       if (i == 0)
+                               first_sack_index = -1;
                        continue;
                }
 
-               skb = cached_skb;
-               fack_count = cached_fack_count;
+               /* Ignore very old stuff early */
+               if (!after(sp[used_sacks].end_seq, prior_snd_una))
+                       continue;
 
-               /* Event "B" in the comment above. */
-               if (after(end_seq, tp->high_seq))
-                       flag |= FLAG_DATA_LOST;
+               used_sacks++;
+       }
 
-               tcp_for_write_queue_from(skb, sk) {
-                       int in_sack = 0;
-                       u8 sacked;
+       /* order SACK blocks to allow in order walk of the retrans queue */
+       for (i = used_sacks - 1; i > 0; i--) {
+               for (j = 0; j < i; j++){
+                       if (after(sp[j].start_seq, sp[j+1].start_seq)) {
+                               struct tcp_sack_block tmp;
 
-                       if (skb == tcp_send_head(sk))
-                               break;
+                               tmp = sp[j];
+                               sp[j] = sp[j+1];
+                               sp[j+1] = tmp;
 
-                       cached_skb = skb;
-                       cached_fack_count = fack_count;
-                       if (i == first_sack_index) {
-                               tp->fastpath_skb_hint = skb;
-                               tp->fastpath_cnt_hint = fack_count;
+                               /* Track where the first SACK block goes to */
+                               if (j == first_sack_index)
+                                       first_sack_index = j+1;
                        }
+               }
+       }
 
-                       /* The retransmission queue is always in order, so
-                        * we can short-circuit the walk early.
-                        */
-                       if (!before(TCP_SKB_CB(skb)->seq, end_seq))
-                               break;
+       skb = tcp_write_queue_head(sk);
+       fack_count = 0;
+       i = 0;
 
-                       dup_sack = (found_dup_sack && (i == first_sack_index));
+       if (!tp->sacked_out) {
+               /* It's already past, so skip checking against it */
+               cache = tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache);
+       } else {
+               cache = tp->recv_sack_cache;
+               /* Skip empty blocks in at head of the cache */
+               while (tcp_sack_cache_ok(tp, cache) && !cache->start_seq &&
+                      !cache->end_seq)
+                       cache++;
+       }
 
-                       /* Due to sorting DSACK may reside within this SACK block! */
-                       if (next_dup) {
-                               u32 dup_start = ntohl(sp->start_seq);
-                               u32 dup_end = ntohl(sp->end_seq);
+       while (i < used_sacks) {
+               u32 start_seq = sp[i].start_seq;
+               u32 end_seq = sp[i].end_seq;
+               int dup_sack = (found_dup_sack && (i == first_sack_index));
+               struct tcp_sack_block *next_dup = NULL;
 
-                               if (before(TCP_SKB_CB(skb)->seq, dup_end)) {
-                                       in_sack = tcp_match_skb_to_sack(sk, skb, dup_start, dup_end);
-                                       if (in_sack > 0)
-                                               dup_sack = 1;
-                               }
-                       }
+               if (found_dup_sack && ((i + 1) == first_sack_index))
+                       next_dup = &sp[i + 1];
 
-                       /* DSACK info lost if out-of-mem, try SACK still */
-                       if (in_sack <= 0)
-                               in_sack = tcp_match_skb_to_sack(sk, skb, start_seq, end_seq);
-                       if (unlikely(in_sack < 0))
-                               break;
+               /* Event "B" in the comment above. */
+               if (after(end_seq, tp->high_seq))
+                       flag |= FLAG_DATA_LOST;
 
-                       if (!in_sack) {
-                               fack_count += tcp_skb_pcount(skb);
-                               continue;
+               /* Skip too early cached blocks */
+               while (tcp_sack_cache_ok(tp, cache) &&
+                      !before(start_seq, cache->end_seq))
+                       cache++;
+
+               /* Can skip some work by looking recv_sack_cache? */
+               if (tcp_sack_cache_ok(tp, cache) && !dup_sack &&
+                   after(end_seq, cache->start_seq)) {
+
+                       /* Head todo? */
+                       if (before(start_seq, cache->start_seq)) {
+                               skb = tcp_sacktag_skip(skb, sk, start_seq);
+                               skb = tcp_sacktag_walk(skb, sk, next_dup, start_seq,
+                                                      cache->start_seq, dup_sack,
+                                                      &fack_count, &reord, &flag);
                        }
 
-                       sacked = TCP_SKB_CB(skb)->sacked;
-
-                       /* Account D-SACK for retransmitted packet. */
-                       if (dup_sack && (sacked & TCPCB_RETRANS)) {
-                               if (after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker))
-                                       tp->undo_retrans--;
-                               if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una) &&
-                                   (sacked & TCPCB_SACKED_ACKED))
-                                       reord = min(fack_count, reord);
-                       }
+                       /* Rest of the block already fully processed? */
+                       if (!after(end_seq, cache->end_seq))
+                               goto advance_sp;
 
+                       skb = tcp_maybe_skipping_dsack(skb, sk, next_dup, cache->end_seq,
+                                                      &fack_count, &reord, &flag);
 
-                       /* Nothing to do; acked frame is about to be dropped (was ACKed). */
-                       if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) {
-                               fack_count += tcp_skb_pcount(skb);
-                               continue;
+                       /* ...tail remains todo... */
+                       if (TCP_SKB_CB(tp->highest_sack)->end_seq == cache->end_seq) {
+                               /* ...but better entrypoint exists! */
+                               skb = tcp_write_queue_next(sk, tp->highest_sack);
+                               fack_count = tp->fackets_out;
+                               cache++;
+                               goto walk;
                        }
 
-                       if (!(sacked&TCPCB_SACKED_ACKED)) {
-                               if (sacked & TCPCB_SACKED_RETRANS) {
-                                       /* If the segment is not tagged as lost,
-                                        * we do not clear RETRANS, believing
-                                        * that retransmission is still in flight.
-                                        */
-                                       if (sacked & TCPCB_LOST) {
-                                               TCP_SKB_CB(skb)->sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS);
-                                               tp->lost_out -= tcp_skb_pcount(skb);
-                                               tp->retrans_out -= tcp_skb_pcount(skb);
-
-                                               /* clear lost hint */
-                                               tp->retransmit_skb_hint = NULL;
-                                       }
-                               } else {
-                                       if (!(sacked & TCPCB_RETRANS)) {
-                                               /* New sack for not retransmitted frame,
-                                                * which was in hole. It is reordering.
-                                                */
-                                               if (before(TCP_SKB_CB(skb)->seq,
-                                                          tcp_highest_sack_seq(tp)))
-                                                       reord = min(fack_count, reord);
-
-                                               /* SACK enhanced F-RTO (RFC4138; Appendix B) */
-                                               if (!after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark))
-                                                       flag |= FLAG_ONLY_ORIG_SACKED;
-                                       }
-
-                                       if (sacked & TCPCB_LOST) {
-                                               TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
-                                               tp->lost_out -= tcp_skb_pcount(skb);
-
-                                               /* clear lost hint */
-                                               tp->retransmit_skb_hint = NULL;
-                                       }
-                               }
-
-                               TCP_SKB_CB(skb)->sacked |= TCPCB_SACKED_ACKED;
-                               flag |= FLAG_DATA_SACKED;
-                               tp->sacked_out += tcp_skb_pcount(skb);
-
-                               fack_count += tcp_skb_pcount(skb);
-
-                               /* Lost marker hint past SACKed? Tweak RFC3517 cnt */
-                               if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) &&
-                                   before(TCP_SKB_CB(skb)->seq,
-                                          TCP_SKB_CB(tp->lost_skb_hint)->seq))
-                                       tp->lost_cnt_hint += tcp_skb_pcount(skb);
-
-                               if (fack_count > tp->fackets_out)
-                                       tp->fackets_out = fack_count;
-
-                               if (after(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
-                                       tp->highest_sack = skb;
-
-                       } else {
-                               if (dup_sack && (sacked&TCPCB_RETRANS))
-                                       reord = min(fack_count, reord);
-
-                               fack_count += tcp_skb_pcount(skb);
-                       }
+                       skb = tcp_sacktag_skip(skb, sk, cache->end_seq);
+                       /* Check overlap against next cached too (past this one already) */
+                       cache++;
+                       continue;
+               }
 
-                       /* D-SACK. We can detect redundant retransmission
-                        * in S|R and plain R frames and clear it.
-                        * undo_retrans is decreased above, L|R frames
-                        * are accounted above as well.
-                        */
-                       if (dup_sack &&
-                           (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS)) {
-                               TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
-                               tp->retrans_out -= tcp_skb_pcount(skb);
-                               tp->retransmit_skb_hint = NULL;
-                       }
+               if (tp->sacked_out && !before(start_seq, tcp_highest_sack_seq(tp))) {
+                       skb = tcp_write_queue_next(sk, tp->highest_sack);
+                       fack_count = tp->fackets_out;
                }
+               skb = tcp_sacktag_skip(skb, sk, start_seq);
+
+walk:
+               skb = tcp_sacktag_walk(skb, sk, next_dup, start_seq, end_seq,
+                                      dup_sack, &fack_count, &reord, &flag);
 
+advance_sp:
                /* SACK enhanced FRTO (RFC4138, Appendix B): Clearing correct
                 * due to in-order walk
                 */
                if (after(end_seq, tp->frto_highmark))
                        flag &= ~FLAG_ONLY_ORIG_SACKED;
+
+               i++;
+       }
+
+       /* Clear the head of the cache sack blocks so we can skip it next time */
+       for (i = 0; i < ARRAY_SIZE(tp->recv_sack_cache) - used_sacks; i++) {
+               tp->recv_sack_cache[i].start_seq = 0;
+               tp->recv_sack_cache[i].end_seq = 0;
        }
+       for (j = 0; j < used_sacks; j++)
+               tp->recv_sack_cache[i++] = sp[j];
 
-       flag |= tcp_mark_lost_retrans(sk);
+       tcp_mark_lost_retrans(sk);
 
        tcp_verify_left_out(tp);
 
@@ -2804,9 +2885,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, s32 *seq_rtt_p,
                }
 
                tp->fackets_out -= min(pkts_acked, tp->fackets_out);
-               /* hint's skb might be NULL but we don't need to care */
-               tp->fastpath_cnt_hint -= min_t(u32, pkts_acked,
-                                              tp->fastpath_cnt_hint);
+
                if (ca_ops->pkts_acked) {
                        s32 rtt_us = -1;
 
@@ -3512,9 +3591,9 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
                /* Do not send POLL_HUP for half duplex close. */
                if (sk->sk_shutdown == SHUTDOWN_MASK ||
                    sk->sk_state == TCP_CLOSE)
-                       sk_wake_async(sk, 1, POLL_HUP);
+                       sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
                else
-                       sk_wake_async(sk, 1, POLL_IN);
+                       sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
        }
 }
 
@@ -4873,7 +4952,7 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
 
                if (!sock_flag(sk, SOCK_DEAD)) {
                        sk->sk_state_change(sk);
-                       sk_wake_async(sk, 0, POLL_OUT);
+                       sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
                }
 
                if (sk->sk_write_pending ||
@@ -5103,9 +5182,9 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                                 * are not waked up, because sk->sk_sleep ==
                                 * NULL and sk->sk_socket == NULL.
                                 */
-                               if (sk->sk_socket) {
-                                       sk_wake_async(sk,0,POLL_OUT);
-                               }
+                               if (sk->sk_socket)
+                                       sk_wake_async(sk,
+                                                       SOCK_WAKE_IO, POLL_OUT);
 
                                tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
                                tp->snd_wnd = ntohs(th->window) <<
This page took 0.032576 seconds and 5 git commands to generate.