[SK_BUFF]: Introduce skb_transport_header(skb)
[deliverable/linux.git] / net / ipv4 / tcp_input.c
index cb715eadf8f59bb48b689608c6519d180744dd10..9c3b4c7a50ad1a1dd82e914a4c219d837a1fa816 100644 (file)
@@ -140,7 +140,7 @@ static void tcp_measure_rcv_mss(struct sock *sk,
                 *
                 * "len" is invariant segment length, including TCP header.
                 */
-               len += skb->data - skb->h.raw;
+               len += skb->data - skb_transport_header(skb);
                if (len >= TCP_MIN_RCVMSS + sizeof(struct tcphdr) ||
                    /* If PSH is not set, packet should be
                     * full sized, provided peer TCP is not badly broken.
@@ -148,7 +148,7 @@ static void tcp_measure_rcv_mss(struct sock *sk,
                     * to handle super-low mtu links fairly.
                     */
                    (len >= TCP_MIN_MSS + sizeof(struct tcphdr) &&
-                    !(tcp_flag_word(skb->h.th)&TCP_REMNANT))) {
+                    !(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) {
                        /* Subtract also invariant (if peer is RFC compliant),
                         * tcp header plus fixed timestamp option length.
                         * Resulting "len" is MSS free of SACK jitter.
@@ -578,7 +578,7 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
         * does not matter how to _calculate_ it. Seems, it was trap
         * that VJ failed to avoid. 8)
         */
-       if(m == 0)
+       if (m == 0)
                m = 1;
        if (tp->srtt != 0) {
                m -= (tp->srtt >> 3);   /* m is now error in rtt est */
@@ -940,7 +940,8 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
 {
        const struct inet_connection_sock *icsk = inet_csk(sk);
        struct tcp_sock *tp = tcp_sk(sk);
-       unsigned char *ptr = ack_skb->h.raw + TCP_SKB_CB(ack_skb)->sacked;
+       unsigned char *ptr = (skb_transport_header(ack_skb) +
+                             TCP_SKB_CB(ack_skb)->sacked);
        struct tcp_sack_block_wire *sp = (struct tcp_sack_block_wire *)(ptr+2);
        struct sk_buff *cached_skb;
        int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE)>>3;
@@ -1044,7 +1045,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
        cached_skb = tp->fastpath_skb_hint;
        cached_fack_count = tp->fastpath_cnt_hint;
        if (!cached_skb) {
-               cached_skb = sk->sk_write_queue.next;
+               cached_skb = tcp_write_queue_head(sk);
                cached_fack_count = 0;
        }
 
@@ -1061,10 +1062,13 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
                if (after(end_seq, tp->high_seq))
                        flag |= FLAG_DATA_LOST;
 
-               sk_stream_for_retrans_queue_from(skb, sk) {
+               tcp_for_write_queue_from(skb, sk) {
                        int in_sack, pcount;
                        u8 sacked;
 
+                       if (skb == tcp_send_head(sk))
+                               break;
+
                        cached_skb = skb;
                        cached_fack_count = fack_count;
                        if (i == first_sack_index) {
@@ -1213,7 +1217,9 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
        if (lost_retrans && icsk->icsk_ca_state == TCP_CA_Recovery) {
                struct sk_buff *skb;
 
-               sk_stream_for_retrans_queue(skb, sk) {
+               tcp_for_write_queue(skb, sk) {
+                       if (skb == tcp_send_head(sk))
+                               break;
                        if (after(TCP_SKB_CB(skb)->seq, lost_retrans))
                                break;
                        if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
@@ -1266,8 +1272,8 @@ int tcp_use_frto(struct sock *sk)
        const struct tcp_sock *tp = tcp_sk(sk);
        struct sk_buff *skb;
 
-       if (!sysctl_tcp_frto || !sk->sk_send_head ||
-               after(TCP_SKB_CB(sk->sk_send_head)->end_seq,
+       if (!sysctl_tcp_frto || !tcp_send_head(sk) ||
+               after(TCP_SKB_CB(tcp_send_head(sk))->end_seq,
                      tp->snd_una + tp->snd_wnd))
                return 0;
 
@@ -1278,8 +1284,11 @@ int tcp_use_frto(struct sock *sk)
        if (tp->retrans_out > 1)
                return 0;
 
-       skb = skb_peek(&sk->sk_write_queue)->next;      /* Skips head */
-       sk_stream_for_retrans_queue_from(skb, sk) {
+       skb = tcp_write_queue_head(sk);
+       skb = tcp_write_queue_next(sk, skb);    /* Skips head */
+       tcp_for_write_queue_from(skb, sk) {
+               if (skb == tcp_send_head(sk))
+                       break;
                if (TCP_SKB_CB(skb)->sacked&TCPCB_RETRANS)
                        return 0;
                /* Short-circuit when first non-SACKed skb has been checked */
@@ -1343,7 +1352,7 @@ void tcp_enter_frto(struct sock *sk)
        tp->undo_marker = tp->snd_una;
        tp->undo_retrans = 0;
 
-       skb = skb_peek(&sk->sk_write_queue);
+       skb = tcp_write_queue_head(sk);
        if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
                TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
                tp->retrans_out -= tcp_skb_pcount(skb);
@@ -1380,7 +1389,9 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
        tp->fackets_out = 0;
        tp->retrans_out = 0;
 
-       sk_stream_for_retrans_queue(skb, sk) {
+       tcp_for_write_queue(skb, sk) {
+               if (skb == tcp_send_head(sk))
+                       break;
                cnt += tcp_skb_pcount(skb);
                /*
                 * Count the retransmission made on RTO correctly (only when
@@ -1468,7 +1479,9 @@ void tcp_enter_loss(struct sock *sk, int how)
        if (!how)
                tp->undo_marker = tp->snd_una;
 
-       sk_stream_for_retrans_queue(skb, sk) {
+       tcp_for_write_queue(skb, sk) {
+               if (skb == tcp_send_head(sk))
+                       break;
                cnt += tcp_skb_pcount(skb);
                if (TCP_SKB_CB(skb)->sacked&TCPCB_RETRANS)
                        tp->undo_marker = 0;
@@ -1503,14 +1516,14 @@ static int tcp_check_sack_reneging(struct sock *sk)
         * receiver _host_ is heavily congested (or buggy).
         * Do processing similar to RTO timeout.
         */
-       if ((skb = skb_peek(&sk->sk_write_queue)) != NULL &&
+       if ((skb = tcp_write_queue_head(sk)) != NULL &&
            (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
                struct inet_connection_sock *icsk = inet_csk(sk);
                NET_INC_STATS_BH(LINUX_MIB_TCPSACKRENEGING);
 
                tcp_enter_loss(sk, 1);
                icsk->icsk_retransmits++;
-               tcp_retransmit_skb(sk, skb_peek(&sk->sk_write_queue));
+               tcp_retransmit_skb(sk, tcp_write_queue_head(sk));
                inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
                                          icsk->icsk_rto, TCP_RTO_MAX);
                return 1;
@@ -1531,7 +1544,7 @@ static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb)
 static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp)
 {
        return tp->packets_out &&
-              tcp_skb_timedout(sk, skb_peek(&sk->sk_write_queue));
+              tcp_skb_timedout(sk, tcp_write_queue_head(sk));
 }
 
 /* Linux NewReno/SACK/FACK/ECN state machine.
@@ -1726,11 +1739,13 @@ static void tcp_mark_head_lost(struct sock *sk, struct tcp_sock *tp,
                skb = tp->lost_skb_hint;
                cnt = tp->lost_cnt_hint;
        } else {
-               skb = sk->sk_write_queue.next;
+               skb = tcp_write_queue_head(sk);
                cnt = 0;
        }
 
-       sk_stream_for_retrans_queue_from(skb, sk) {
+       tcp_for_write_queue_from(skb, sk) {
+               if (skb == tcp_send_head(sk))
+                       break;
                /* TODO: do this better */
                /* this is not the most efficient way to do this... */
                tp->lost_skb_hint = skb;
@@ -1744,12 +1759,11 @@ static void tcp_mark_head_lost(struct sock *sk, struct tcp_sock *tp,
 
                        /* clear xmit_retransmit_queue hints
                         *  if this is beyond hint */
-                       if(tp->retransmit_skb_hint != NULL &&
-                          before(TCP_SKB_CB(skb)->seq,
-                                 TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) {
-
+                       if (tp->retransmit_skb_hint != NULL &&
+                           before(TCP_SKB_CB(skb)->seq,
+                                  TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
                                tp->retransmit_skb_hint = NULL;
-                       }
+
                }
        }
        tcp_sync_left_out(tp);
@@ -1777,9 +1791,11 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp)
                struct sk_buff *skb;
 
                skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint
-                       : sk->sk_write_queue.next;
+                       : tcp_write_queue_head(sk);
 
-               sk_stream_for_retrans_queue_from(skb, sk) {
+               tcp_for_write_queue_from(skb, sk) {
+                       if (skb == tcp_send_head(sk))
+                               break;
                        if (!tcp_skb_timedout(sk, skb))
                                break;
 
@@ -1970,7 +1986,9 @@ static int tcp_try_undo_loss(struct sock *sk, struct tcp_sock *tp)
 {
        if (tcp_may_undo(tp)) {
                struct sk_buff *skb;
-               sk_stream_for_retrans_queue(skb, sk) {
+               tcp_for_write_queue(skb, sk) {
+                       if (skb == tcp_send_head(sk))
+                               break;
                        TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
                }
 
@@ -2382,8 +2400,8 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
                = icsk->icsk_ca_ops->rtt_sample;
        struct timeval tv = { .tv_sec = 0, .tv_usec = 0 };
 
-       while ((skb = skb_peek(&sk->sk_write_queue)) &&
-              skb != sk->sk_send_head) {
+       while ((skb = tcp_write_queue_head(sk)) &&
+              skb != tcp_send_head(sk)) {
                struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
                __u8 sacked = scb->sacked;
 
@@ -2423,7 +2441,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
 
                if (sacked) {
                        if (sacked & TCPCB_RETRANS) {
-                               if(sacked & TCPCB_SACKED_RETRANS)
+                               if (sacked & TCPCB_SACKED_RETRANS)
                                        tp->retrans_out -= tcp_skb_pcount(skb);
                                acked |= FLAG_RETRANS_DATA_ACKED;
                                seq_rtt = -1;
@@ -2446,7 +2464,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
                }
                tcp_dec_pcount_approx(&tp->fackets_out, skb);
                tcp_packets_out_dec(tp, skb);
-               __skb_unlink(skb, &sk->sk_write_queue);
+               tcp_unlink_write_queue(skb, sk);
                sk_stream_free_skb(sk, skb);
                clear_all_retrans_hints(tp);
        }
@@ -2495,7 +2513,7 @@ static void tcp_ack_probe(struct sock *sk)
 
        /* Was it a usable window open? */
 
-       if (!after(TCP_SKB_CB(sk->sk_send_head)->end_seq,
+       if (!after(TCP_SKB_CB(tcp_send_head(sk))->end_seq,
                   tp->snd_una + tp->snd_wnd)) {
                icsk->icsk_backoff = 0;
                inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0);
@@ -2542,9 +2560,9 @@ static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp,
                                 struct sk_buff *skb, u32 ack, u32 ack_seq)
 {
        int flag = 0;
-       u32 nwin = ntohs(skb->h.th->window);
+       u32 nwin = ntohs(tcp_hdr(skb)->window);
 
-       if (likely(!skb->h.th->syn))
+       if (likely(!tcp_hdr(skb)->syn))
                nwin <<= tp->rx_opt.snd_wscale;
 
        if (tcp_may_update_window(tp, ack, ack_seq, nwin)) {
@@ -2587,14 +2605,15 @@ static void tcp_conservative_spur_to_response(struct tcp_sock *tp)
  */
 static void tcp_ratehalving_spur_to_response(struct sock *sk)
 {
-       struct tcp_sock *tp = tcp_sk(sk);
        tcp_enter_cwr(sk, 0);
-       tp->high_seq = tp->frto_highmark;       /* Smoother w/o this? - ij */
 }
 
-static void tcp_undo_spur_to_response(struct sock *sk)
+static void tcp_undo_spur_to_response(struct sock *sk, int flag)
 {
-       tcp_undo_cwr(sk, 1);
+       if (flag&FLAG_ECE)
+               tcp_ratehalving_spur_to_response(sk);
+       else
+               tcp_undo_cwr(sk, 1);
 }
 
 /* F-RTO spurious RTO detection algorithm (RFC4138)
@@ -2681,7 +2700,7 @@ static int tcp_process_frto(struct sock *sk, u32 prior_snd_una, int flag)
        } else /* frto_counter == 2 */ {
                switch (sysctl_tcp_frto_response) {
                case 2:
-                       tcp_undo_spur_to_response(sk);
+                       tcp_undo_spur_to_response(sk, flag);
                        break;
                case 1:
                        tcp_conservative_spur_to_response(tp);
@@ -2748,7 +2767,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
                if (TCP_SKB_CB(skb)->sacked)
                        flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una);
 
-               if (TCP_ECN_rcv_ecn_echo(tp, skb->h.th))
+               if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb)))
                        flag |= FLAG_ECE;
 
                tcp_ca_event(sk, CA_EVENT_SLOW_ACK);
@@ -2794,7 +2813,7 @@ no_queue:
         * being used to time the probes, and is probably far higher than
         * it needs to be for normal retransmission.
         */
-       if (sk->sk_send_head)
+       if (tcp_send_head(sk))
                tcp_ack_probe(sk);
        return 1;
 
@@ -2815,13 +2834,13 @@ uninteresting_ack:
 void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx, int estab)
 {
        unsigned char *ptr;
-       struct tcphdr *th = skb->h.th;
+       struct tcphdr *th = tcp_hdr(skb);
        int length=(th->doff*4)-sizeof(struct tcphdr);
 
        ptr = (unsigned char *)(th + 1);
        opt_rx->saw_tstamp = 0;
 
-       while(length>0) {
+       while (length > 0) {
                int opcode=*ptr++;
                int opsize;
 
@@ -2837,9 +2856,9 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
                                        return;
                                if (opsize > length)
                                        return; /* don't parse partial options */
-                               switch(opcode) {
+                               switch (opcode) {
                                case TCPOPT_MSS:
-                                       if(opsize==TCPOLEN_MSS && th->syn && !estab) {
+                                       if (opsize==TCPOLEN_MSS && th->syn && !estab) {
                                                u16 in_mss = ntohs(get_unaligned((__be16 *)ptr));
                                                if (in_mss) {
                                                        if (opt_rx->user_mss && opt_rx->user_mss < in_mss)
@@ -2849,12 +2868,12 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
                                        }
                                        break;
                                case TCPOPT_WINDOW:
-                                       if(opsize==TCPOLEN_WINDOW && th->syn && !estab)
+                                       if (opsize==TCPOLEN_WINDOW && th->syn && !estab)
                                                if (sysctl_tcp_window_scaling) {
                                                        __u8 snd_wscale = *(__u8 *) ptr;
                                                        opt_rx->wscale_ok = 1;
                                                        if (snd_wscale > 14) {
-                                                               if(net_ratelimit())
+                                                               if (net_ratelimit())
                                                                        printk(KERN_INFO "tcp_parse_options: Illegal window "
                                                                               "scaling value %d >14 received.\n",
                                                                               snd_wscale);
@@ -2864,7 +2883,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
                                                }
                                        break;
                                case TCPOPT_TIMESTAMP:
-                                       if(opsize==TCPOLEN_TIMESTAMP) {
+                                       if (opsize==TCPOLEN_TIMESTAMP) {
                                                if ((estab && opt_rx->tstamp_ok) ||
                                                    (!estab && sysctl_tcp_timestamps)) {
                                                        opt_rx->saw_tstamp = 1;
@@ -2874,7 +2893,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
                                        }
                                        break;
                                case TCPOPT_SACK_PERM:
-                                       if(opsize==TCPOLEN_SACK_PERM && th->syn && !estab) {
+                                       if (opsize==TCPOLEN_SACK_PERM && th->syn && !estab) {
                                                if (sysctl_tcp_sack) {
                                                        opt_rx->sack_ok = 1;
                                                        tcp_sack_reset(opt_rx);
@@ -2883,7 +2902,7 @@ void tcp_parse_options(struct sk_buff *skb, struct tcp_options_received *opt_rx,
                                        break;
 
                                case TCPOPT_SACK:
-                                       if((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) &&
+                                       if ((opsize >= (TCPOLEN_SACK_BASE + TCPOLEN_SACK_PERBLOCK)) &&
                                           !((opsize - TCPOLEN_SACK_BASE) % TCPOLEN_SACK_PERBLOCK) &&
                                           opt_rx->sack_ok) {
                                                TCP_SKB_CB(skb)->sacked = (ptr - 2) - (unsigned char *)th;
@@ -2932,7 +2951,7 @@ static int tcp_fast_parse_options(struct sk_buff *skb, struct tcphdr *th,
 static inline void tcp_store_ts_recent(struct tcp_sock *tp)
 {
        tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval;
-       tp->rx_opt.ts_recent_stamp = xtime.tv_sec;
+       tp->rx_opt.ts_recent_stamp = get_seconds();
 }
 
 static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
@@ -2945,8 +2964,8 @@ static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
                 * Not only, also it occurs for expired timestamps.
                 */
 
-               if((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) >= 0 ||
-                  xtime.tv_sec >= tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS)
+               if ((s32)(tp->rx_opt.rcv_tsval - tp->rx_opt.ts_recent) >= 0 ||
+                  get_seconds() >= tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS)
                        tcp_store_ts_recent(tp);
        }
 }
@@ -2977,7 +2996,7 @@ static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq)
 static int tcp_disordered_ack(const struct sock *sk, const struct sk_buff *skb)
 {
        struct tcp_sock *tp = tcp_sk(sk);
-       struct tcphdr *th = skb->h.th;
+       struct tcphdr *th = tcp_hdr(skb);
        u32 seq = TCP_SKB_CB(skb)->seq;
        u32 ack = TCP_SKB_CB(skb)->ack_seq;
 
@@ -2998,7 +3017,7 @@ static inline int tcp_paws_discard(const struct sock *sk, const struct sk_buff *
 {
        const struct tcp_sock *tp = tcp_sk(sk);
        return ((s32)(tp->rx_opt.ts_recent - tp->rx_opt.rcv_tsval) > TCP_PAWS_WINDOW &&
-               xtime.tv_sec < tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS &&
+               get_seconds() < tp->rx_opt.ts_recent_stamp + TCP_PAWS_24DAYS &&
                !tcp_disordered_ack(sk, skb));
 }
 
@@ -3204,7 +3223,7 @@ static void tcp_sack_maybe_coalesce(struct tcp_sock *tp)
                         */
                        tp->rx_opt.num_sacks--;
                        tp->rx_opt.eff_sacks = min(tp->rx_opt.num_sacks + tp->rx_opt.dsack, 4 - tp->rx_opt.tstamp_ok);
-                       for(i=this_sack; i < tp->rx_opt.num_sacks; i++)
+                       for (i=this_sack; i < tp->rx_opt.num_sacks; i++)
                                sp[i] = sp[i+1];
                        continue;
                }
@@ -3257,7 +3276,7 @@ static void tcp_sack_new_ofo_skb(struct sock *sk, u32 seq, u32 end_seq)
                tp->rx_opt.num_sacks--;
                sp--;
        }
-       for(; this_sack > 0; this_sack--, sp--)
+       for (; this_sack > 0; this_sack--, sp--)
                *sp = *(sp-1);
 
 new_sack:
@@ -3283,7 +3302,7 @@ static void tcp_sack_remove(struct tcp_sock *tp)
                return;
        }
 
-       for(this_sack = 0; this_sack < num_sacks; ) {
+       for (this_sack = 0; this_sack < num_sacks; ) {
                /* Check if the start of the sack is covered by RCV.NXT. */
                if (!before(tp->rcv_nxt, sp->start_seq)) {
                        int i;
@@ -3339,8 +3358,8 @@ static void tcp_ofo_queue(struct sock *sk)
                __skb_unlink(skb, &tp->out_of_order_queue);
                __skb_queue_tail(&sk->sk_receive_queue, skb);
                tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
-               if(skb->h.th->fin)
-                       tcp_fin(skb, sk, skb->h.th);
+               if (tcp_hdr(skb)->fin)
+                       tcp_fin(skb, sk, tcp_hdr(skb));
        }
 }
 
@@ -3348,7 +3367,7 @@ static int tcp_prune_queue(struct sock *sk);
 
 static void tcp_data_queue(struct sock *sk, struct sk_buff *skb)
 {
-       struct tcphdr *th = skb->h.th;
+       struct tcphdr *th = tcp_hdr(skb);
        struct tcp_sock *tp = tcp_sk(sk);
        int eaten = -1;
 
@@ -3405,9 +3424,9 @@ queue_and_out:
                        __skb_queue_tail(&sk->sk_receive_queue, skb);
                }
                tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
-               if(skb->len)
+               if (skb->len)
                        tcp_event_data_recv(sk, tp, skb);
-               if(th->fin)
+               if (th->fin)
                        tcp_fin(skb, sk, th);
 
                if (!skb_queue_empty(&tp->out_of_order_queue)) {
@@ -3587,7 +3606,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
                 * - bloated or contains data before "start" or
                 *   overlaps to the next one.
                 */
-               if (!skb->h.th->syn && !skb->h.th->fin &&
+               if (!tcp_hdr(skb)->syn && !tcp_hdr(skb)->fin &&
                    (tcp_win_from_space(skb->truesize) > skb->len ||
                     before(TCP_SKB_CB(skb)->seq, start) ||
                     (skb->next != tail &&
@@ -3598,7 +3617,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
                start = TCP_SKB_CB(skb)->end_seq;
                skb = skb->next;
        }
-       if (skb == tail || skb->h.th->syn || skb->h.th->fin)
+       if (skb == tail || tcp_hdr(skb)->syn || tcp_hdr(skb)->fin)
                return;
 
        while (before(start, end)) {
@@ -3614,11 +3633,14 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
                nskb = alloc_skb(copy+header, GFP_ATOMIC);
                if (!nskb)
                        return;
+
+               skb_set_mac_header(nskb, skb_mac_header(skb) - skb->head);
+               skb_set_network_header(nskb, (skb_network_header(skb) -
+                                             skb->head));
+               skb_set_transport_header(nskb, (skb_transport_header(skb) -
+                                               skb->head));
                skb_reserve(nskb, header);
                memcpy(nskb->head, skb->head, header);
-               nskb->nh.raw = nskb->head + (skb->nh.raw-skb->head);
-               nskb->h.raw = nskb->head + (skb->h.raw-skb->head);
-               nskb->mac.raw = nskb->head + (skb->mac.raw-skb->head);
                memcpy(nskb->cb, skb->cb, sizeof(skb->cb));
                TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(nskb)->end_seq = start;
                __skb_insert(nskb, skb->prev, skb, list);
@@ -3644,7 +3666,9 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
                                __kfree_skb(skb);
                                NET_INC_STATS_BH(LINUX_MIB_TCPRCVCOLLAPSED);
                                skb = next;
-                               if (skb == tail || skb->h.th->syn || skb->h.th->fin)
+                               if (skb == tail ||
+                                   tcp_hdr(skb)->syn ||
+                                   tcp_hdr(skb)->fin)
                                        return;
                        }
                }
@@ -4051,7 +4075,7 @@ static int tcp_dma_try_early_copy(struct sock *sk, struct sk_buff *skb, int hlen
                tcp_rcv_space_adjust(sk);
 
                if ((tp->ucopy.len == 0) ||
-                   (tcp_flag_word(skb->h.th) & TCP_FLAG_PSH) ||
+                   (tcp_flag_word(tcp_hdr(skb)) & TCP_FLAG_PSH) ||
                    (atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1))) {
                        tp->ucopy.wakeup = 1;
                        sk->sk_data_ready(sk, 0);
@@ -4304,7 +4328,7 @@ slow_path:
                goto discard;
        }
 
-       if(th->rst) {
+       if (th->rst) {
                tcp_reset(sk);
                goto discard;
        }
@@ -4319,7 +4343,7 @@ slow_path:
        }
 
 step5:
-       if(th->ack)
+       if (th->ack)
                tcp_ack(sk, skb, FLAG_SLOWPATH);
 
        tcp_rcv_rtt_measure_ts(sk, skb);
@@ -4607,13 +4631,13 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
                goto discard;
 
        case TCP_LISTEN:
-               if(th->ack)
+               if (th->ack)
                        return 1;
 
-               if(th->rst)
+               if (th->rst)
                        goto discard;
 
-               if(th->syn) {
+               if (th->syn) {
                        if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
                                return 1;
 
@@ -4669,7 +4693,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
        }
 
        /* step 2: check RST bit */
-       if(th->rst) {
+       if (th->rst) {
                tcp_reset(sk);
                goto discard;
        }
@@ -4692,7 +4716,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
        if (th->ack) {
                int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH);
 
-               switch(sk->sk_state) {
+               switch (sk->sk_state) {
                case TCP_SYN_RECV:
                        if (acceptable) {
                                tp->copied_seq = tp->rcv_nxt;
This page took 0.053552 seconds and 5 git commands to generate.