tp->mss_cache = TCP_MSS_DEFAULT;
tp->reordering = sysctl_tcp_reordering;
+ tcp_enable_early_retrans(tp);
icsk->icsk_ca_ops = &tcp_init_congestion_ops;
sk->sk_state = TCP_CLOSE;
while (psize > 0) {
struct sk_buff *skb = tcp_write_queue_tail(sk);
struct page *page = pages[poffset / PAGE_SIZE];
- int copy, i, can_coalesce;
+ int copy, i;
int offset = poffset % PAGE_SIZE;
int size = min_t(size_t, psize, PAGE_SIZE - offset);
+ bool can_coalesce;
if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
new_segment:
return tmp;
}
-static int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size)
-{
- struct sk_buff *skb;
- struct tcp_skb_cb *cb;
- struct tcphdr *th;
-
- skb = alloc_skb(size + sizeof(*th), sk->sk_allocation);
- if (!skb)
- goto err;
-
- th = (struct tcphdr *)skb_put(skb, sizeof(*th));
- skb_reset_transport_header(skb);
- memset(th, 0, sizeof(*th));
-
- if (memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size))
- goto err_free;
-
- cb = TCP_SKB_CB(skb);
-
- TCP_SKB_CB(skb)->seq = tcp_sk(sk)->rcv_nxt;
- TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + size;
- TCP_SKB_CB(skb)->ack_seq = tcp_sk(sk)->snd_una - 1;
-
- tcp_queue_rcv(sk, skb, sizeof(*th));
-
- return size;
-
-err_free:
- kfree_skb(skb);
-err:
- return -ENOMEM;
-}
-
int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
size_t size)
{
break;
}
if (tcp_hdr(skb)->fin) {
- sk_eat_skb(sk, skb, 0);
+ sk_eat_skb(sk, skb, false);
++seq;
break;
}
- sk_eat_skb(sk, skb, 0);
+ sk_eat_skb(sk, skb, false);
if (!desc->count)
break;
tp->copied_seq = seq;
int target; /* Read at least this many bytes */
long timeo;
struct task_struct *user_recv = NULL;
- int copied_early = 0;
+ bool copied_early = false;
struct sk_buff *skb;
u32 urg_hole = 0;
}
if ((flags & MSG_PEEK) &&
(peek_seq - copied - urg_hole != tp->copied_seq)) {
- if (net_ratelimit())
- printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
- current->comm, task_pid_nr(current));
+ net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n",
+ current->comm,
+ task_pid_nr(current));
peek_seq = tp->copied_seq;
}
continue;
dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
if ((offset + used) == skb->len)
- copied_early = 1;
+ copied_early = true;
} else
#endif
goto found_fin_ok;
if (!(flags & MSG_PEEK)) {
sk_eat_skb(sk, skb, copied_early);
- copied_early = 0;
+ copied_early = false;
}
continue;
++*seq;
if (!(flags & MSG_PEEK)) {
sk_eat_skb(sk, skb, copied_early);
- copied_early = 0;
+ copied_early = false;
}
break;
} while (len > 0);
too_many_orphans = tcp_too_many_orphans(sk, shift);
out_of_socket_memory = tcp_out_of_memory(sk);
- if (too_many_orphans && net_ratelimit())
- pr_info("too many orphaned sockets\n");
- if (out_of_socket_memory && net_ratelimit())
- pr_info("out of memory -- consider tuning tcp_mem\n");
+ if (too_many_orphans)
+ net_info_ratelimited("too many orphaned sockets\n");
+ if (out_of_socket_memory)
+ net_info_ratelimited("out of memory -- consider tuning tcp_mem\n");
return too_many_orphans || out_of_socket_memory;
}
((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED));
}
-static int tcp_repair_options_est(struct tcp_sock *tp, char __user *optbuf, unsigned int len)
+static int tcp_repair_options_est(struct tcp_sock *tp,
+ struct tcp_repair_opt __user *optbuf, unsigned int len)
{
- /*
- * Options are stored in CODE:VALUE form where CODE is 8bit and VALUE
- * fits the respective TCPOLEN_ size
- */
+ struct tcp_repair_opt opt;
- while (len > 0) {
- u8 opcode;
-
- if (get_user(opcode, optbuf))
+ while (len >= sizeof(opt)) {
+ if (copy_from_user(&opt, optbuf, sizeof(opt)))
return -EFAULT;
optbuf++;
- len--;
-
- switch (opcode) {
- case TCPOPT_MSS: {
- u16 in_mss;
+ len -= sizeof(opt);
- if (len < sizeof(in_mss))
- return -ENODATA;
- if (get_user(in_mss, optbuf))
- return -EFAULT;
-
- tp->rx_opt.mss_clamp = in_mss;
-
- optbuf += sizeof(in_mss);
- len -= sizeof(in_mss);
+ switch (opt.opt_code) {
+ case TCPOPT_MSS:
+ tp->rx_opt.mss_clamp = opt.opt_val;
break;
- }
- case TCPOPT_WINDOW: {
- u8 wscale;
-
- if (len < sizeof(wscale))
- return -ENODATA;
- if (get_user(wscale, optbuf))
- return -EFAULT;
-
- if (wscale > 14)
+ case TCPOPT_WINDOW:
+ if (opt.opt_val > 14)
return -EFBIG;
- tp->rx_opt.snd_wscale = wscale;
-
- optbuf += sizeof(wscale);
- len -= sizeof(wscale);
+ tp->rx_opt.snd_wscale = opt.opt_val;
break;
- }
case TCPOPT_SACK_PERM:
+ if (opt.opt_val != 0)
+ return -EINVAL;
+
tp->rx_opt.sack_ok |= TCP_SACK_SEEN;
if (sysctl_tcp_fack)
tcp_enable_fack(tp);
break;
case TCPOPT_TIMESTAMP:
+ if (opt.opt_val != 0)
+ return -EINVAL;
+
tp->rx_opt.tstamp_ok = 1;
break;
}
err = -EINVAL;
else
tp->thin_dupack = val;
+ if (tp->thin_dupack)
+ tcp_disable_early_retrans(tp);
break;
case TCP_REPAIR:
if (!tp->repair)
err = -EINVAL;
else if (sk->sk_state == TCP_ESTABLISHED)
- err = tcp_repair_options_est(tp, optval, optlen);
+ err = tcp_repair_options_est(tp,
+ (struct tcp_repair_opt __user *)optval,
+ optlen);
else
err = -EPERM;
break;
{
struct sk_buff *skb = NULL;
unsigned long limit;
- int max_share, cnt;
+ int max_rshare, max_wshare, cnt;
unsigned int i;
unsigned long jiffy = jiffies;
tcp_init_mem(&init_net);
/* Set per-socket limits to no more than 1/128 the pressure threshold */
limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
- max_share = min(4UL*1024*1024, limit);
+ max_wshare = min(4UL*1024*1024, limit);
+ max_rshare = min(6UL*1024*1024, limit);
sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
sysctl_tcp_wmem[1] = 16*1024;
- sysctl_tcp_wmem[2] = max(64*1024, max_share);
+ sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
sysctl_tcp_rmem[1] = 87380;
- sysctl_tcp_rmem[2] = max(87380, max_share);
+ sysctl_tcp_rmem[2] = max(87380, max_rshare);
pr_info("Hash tables configured (established %u bind %u)\n",
tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);