projects
/
cascardo
/
linux.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Merge branches 'pm-cpufreq' and 'pm-devfreq'
[cascardo/linux.git]
/
net
/
ipv4
/
tcp_output.c
diff --git
a/net/ipv4/tcp_output.c
b/net/ipv4/tcp_output.c
index
bdaef7f
..
d48d557
100644
(file)
--- a/
net/ipv4/tcp_output.c
+++ b/
net/ipv4/tcp_output.c
@@
-1966,12
+1966,14
@@
static int tcp_mtu_probe(struct sock *sk)
len = 0;
tcp_for_write_queue_from_safe(skb, next, sk) {
copy = min_t(int, skb->len, probe_size - len);
len = 0;
tcp_for_write_queue_from_safe(skb, next, sk) {
copy = min_t(int, skb->len, probe_size - len);
- if (nskb->ip_summed)
+ if (nskb->ip_summed)
{
skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
skb_copy_bits(skb, 0, skb_put(nskb, copy), copy);
- else
- nskb->csum = skb_copy_and_csum_bits(skb, 0,
- skb_put(nskb, copy),
- copy, nskb->csum);
+ } else {
+ __wsum csum = skb_copy_and_csum_bits(skb, 0,
+ skb_put(nskb, copy),
+ copy, 0);
+ nskb->csum = csum_block_add(nskb->csum, csum, len);
+ }
if (skb->len <= copy) {
/* We've eaten all the data from this skb.
if (skb->len <= copy) {
/* We've eaten all the data from this skb.
@@
-2605,7
+2607,8
@@
int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
* copying overhead: fragmentation, tunneling, mangling etc.
*/
if (atomic_read(&sk->sk_wmem_alloc) >
* copying overhead: fragmentation, tunneling, mangling etc.
*/
if (atomic_read(&sk->sk_wmem_alloc) >
- min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
+ min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2),
+ sk->sk_sndbuf))
return -EAGAIN;
if (skb_still_in_host_queue(sk, skb))
return -EAGAIN;
if (skb_still_in_host_queue(sk, skb))
@@
-2830,7
+2833,7
@@
begin_fwd:
if (tcp_retransmit_skb(sk, skb, segs))
return;
if (tcp_retransmit_skb(sk, skb, segs))
return;
- NET_
INC_STATS(sock_net(sk), mib_idx
);
+ NET_
ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb)
);
if (tcp_in_cwnd_reduction(sk))
tp->prr_out += tcp_skb_pcount(skb);
if (tcp_in_cwnd_reduction(sk))
tp->prr_out += tcp_skb_pcount(skb);
@@
-3567,6
+3570,8
@@
int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
if (!res) {
__TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
if (!res) {
__TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
__NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
+ if (unlikely(tcp_passive_fastopen(sk)))
+ tcp_sk(sk)->total_retrans++;
}
return res;
}
}
return res;
}