Merge tag 'iio-fixes-for-4.9a' of git://git.kernel.org/pub/scm/linux/kernel/git/jic23...
[cascardo/linux.git] / net / ipv4 / tcp_ipv4.c
index 7158d4f..bd5e8d1 100644 (file)
@@ -1175,6 +1175,7 @@ static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
                                      NULL, skb);
 
        if (genhash || memcmp(hash_location, newhash, 16) != 0) {
+               NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
                net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
                                     &iph->saddr, ntohs(th->source),
                                     &iph->daddr, ntohs(th->dest),
@@ -1195,7 +1196,6 @@ static void tcp_v4_init_req(struct request_sock *req,
 
        sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
        sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
-       ireq->no_srccheck = inet_sk(sk_listener)->transparent;
        ireq->opt = tcp_v4_save_options(skb);
 }
 
@@ -1537,6 +1537,34 @@ bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(tcp_prequeue);
 
+bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
+{
+       u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
+
+       /* Only socket owner can try to collapse/prune rx queues
+        * to reduce memory overhead, so add a little headroom here.
+        * Few sockets backlog are possibly concurrently non empty.
+        */
+       limit += 64*1024;
+
+       /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
+        * we can fix skb->truesize to its real value to avoid future drops.
+        * This is valid because skb is not yet charged to the socket.
+        * It has been noticed pure SACK packets were sometimes dropped
+        * (if cooked by drivers without copybreak feature).
+        */
+       if (!skb->data_len)
+               skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
+
+       if (unlikely(sk_add_backlog(sk, skb, limit))) {
+               bh_unlock_sock(sk);
+               __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
+               return true;
+       }
+       return false;
+}
+EXPORT_SYMBOL(tcp_add_backlog);
+
 /*
  *     From tcp_input.c
  */
@@ -1608,6 +1636,7 @@ process:
 
                sk = req->rsk_listener;
                if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
+                       sk_drops_add(sk, skb);
                        reqsk_put(req);
                        goto discard_it;
                }
@@ -1666,10 +1695,7 @@ process:
        if (!sock_owned_by_user(sk)) {
                if (!tcp_prequeue(sk, skb))
                        ret = tcp_v4_do_rcv(sk, skb);
-       } else if (unlikely(sk_add_backlog(sk, skb,
-                                          sk->sk_rcvbuf + sk->sk_sndbuf))) {
-               bh_unlock_sock(sk);
-               __NET_INC_STATS(net, LINUX_MIB_TCPBACKLOGDROP);
+       } else if (tcp_add_backlog(sk, skb)) {
                goto discard_and_relse;
        }
        bh_unlock_sock(sk);
@@ -1818,7 +1844,7 @@ void tcp_v4_destroy_sock(struct sock *sk)
        tcp_write_queue_purge(sk);
 
        /* Cleans up our, hopefully empty, out_of_order_queue. */
-       __skb_queue_purge(&tp->out_of_order_queue);
+       skb_rbtree_purge(&tp->out_of_order_queue);
 
 #ifdef CONFIG_TCP_MD5SIG
        /* Clean up the MD5 key list, if any */
@@ -1845,9 +1871,6 @@ void tcp_v4_destroy_sock(struct sock *sk)
        local_bh_disable();
        sk_sockets_allocated_dec(sk);
        local_bh_enable();
-
-       if (mem_cgroup_sockets_enabled && sk->sk_memcg)
-               sock_release_memcg(sk);
 }
 EXPORT_SYMBOL(tcp_v4_destroy_sock);