* highest SACK block). Also calculate the lowest snd_nxt among the remaining
* retransmitted skbs to avoid some costly processing per ACKs.
*/
-static int tcp_mark_lost_retrans(struct sock *sk)
+static void tcp_mark_lost_retrans(struct sock *sk)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
- int flag = 0;
int cnt = 0;
u32 new_low_seq = tp->snd_nxt;
u32 received_upto = TCP_SKB_CB(tp->highest_sack)->end_seq;
if (!tcp_is_fack(tp) || !tp->retrans_out ||
!after(received_upto, tp->lost_retrans_low) ||
icsk->icsk_ca_state != TCP_CA_Recovery)
- return flag;
+ return;
tcp_for_write_queue(skb, sk) {
u32 ack_seq = TCP_SKB_CB(skb)->ack_seq;
if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) {
tp->lost_out += tcp_skb_pcount(skb);
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
- flag |= FLAG_DATA_SACKED;
- NET_INC_STATS_BH(LINUX_MIB_TCPLOSTRETRANSMIT);
}
+ NET_INC_STATS_BH(LINUX_MIB_TCPLOSTRETRANSMIT);
} else {
if (before(ack_seq, new_low_seq))
new_low_seq = ack_seq;
if (tp->retrans_out)
tp->lost_retrans_low = new_low_seq;
-
- return flag;
}
static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb,
if (skb == tcp_send_head(sk))
break;
- if (before(TCP_SKB_CB(skb)->end_seq, skip_to_seq))
+ if (!before(TCP_SKB_CB(skb)->end_seq, skip_to_seq))
break;
}
return skb;
}
/* Rest of the block already fully processed? */
- if (!after(end_seq, cache->end_seq)) {
- skb = tcp_maybe_skipping_dsack(skb, sk, next_dup, cache->end_seq,
- &fack_count, &reord, &flag);
+ if (!after(end_seq, cache->end_seq))
goto advance_sp;
- }
+
+ skb = tcp_maybe_skipping_dsack(skb, sk, next_dup, cache->end_seq,
+ &fack_count, &reord, &flag);
/* ...tail remains todo... */
if (TCP_SKB_CB(tp->highest_sack)->end_seq == cache->end_seq) {
- /* ...but better entrypoint exists! Check that DSACKs are
- * properly accounted while skipping here
- */
- tcp_maybe_skipping_dsack(skb, sk, next_dup, cache->end_seq,
- &fack_count, &reord, &flag);
-
+ /* ...but better entrypoint exists! */
skb = tcp_write_queue_next(sk, tp->highest_sack);
fack_count = tp->fackets_out;
cache++;
continue;
}
- if (!before(start_seq, tcp_highest_sack_seq(tp))) {
+ if (tp->sacked_out && !before(start_seq, tcp_highest_sack_seq(tp))) {
skb = tcp_write_queue_next(sk, tp->highest_sack);
fack_count = tp->fackets_out;
}
for (j = 0; j < used_sacks; j++)
tp->recv_sack_cache[i++] = sp[j];
- flag |= tcp_mark_lost_retrans(sk);
+ tcp_mark_lost_retrans(sk);
tcp_verify_left_out(tp);
/* Do not send POLL_HUP for half duplex close. */
if (sk->sk_shutdown == SHUTDOWN_MASK ||
sk->sk_state == TCP_CLOSE)
- sk_wake_async(sk, 1, POLL_HUP);
+ sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP);
else
- sk_wake_async(sk, 1, POLL_IN);
+ sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN);
}
}
if (!sock_flag(sk, SOCK_DEAD)) {
sk->sk_state_change(sk);
- sk_wake_async(sk, 0, POLL_OUT);
+ sk_wake_async(sk, SOCK_WAKE_IO, POLL_OUT);
}
if (sk->sk_write_pending ||
* are not waked up, because sk->sk_sleep ==
* NULL and sk->sk_socket == NULL.
*/
- if (sk->sk_socket) {
- sk_wake_async(sk,0,POLL_OUT);
- }
+ if (sk->sk_socket)
+ sk_wake_async(sk,
+ SOCK_WAKE_IO, POLL_OUT);
tp->snd_una = TCP_SKB_CB(skb)->ack_seq;
tp->snd_wnd = ntohs(th->window) <<