]> git.proxmox.com Git - mirror_ubuntu-kernels.git/commitdiff
net: relax rcvbuf limits
authorEric Dumazet <eric.dumazet@gmail.com>
Wed, 21 Dec 2011 07:11:44 +0000 (07:11 +0000)
committerDavid S. Miller <davem@davemloft.net>
Fri, 23 Dec 2011 07:15:14 +0000 (02:15 -0500)
skb->truesize might be big even for a small packet.

Its even bigger after commit 87fb4b7b533 (net: more accurate skb
truesize) and big MTU.

We should allow queueing at least one packet per receiver, even with a
low RCVBUF setting.

Reported-by: Michal Simek <monstr@monstr.eu>
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
include/net/sock.h
net/core/sock.c
net/packet/af_packet.c

index abb6e0f0c3c3e59b79f23fc72d2e9d8d389e3774..32e39371fba627ebfaab66713e645889ccb7ba47 100644 (file)
@@ -637,12 +637,14 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
 
 /*
  * Take into account size of receive queue and backlog queue
+ * Do not take into account this skb truesize,
+ * to allow even a single big packet to come.
  */
 static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
 {
        unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
 
-       return qsize + skb->truesize > sk->sk_rcvbuf;
+       return qsize > sk->sk_rcvbuf;
 }
 
 /* The per-socket spinlock must be held here. */
index 4ed7b1d12f5ecde5b8c2119c0d4cfaaa765ff470..b23f174ab84c3b6c72302834d8e13695d22a1c6c 100644 (file)
@@ -288,11 +288,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
        unsigned long flags;
        struct sk_buff_head *list = &sk->sk_receive_queue;
 
-       /* Cast sk->rcvbuf to unsigned... It's pointless, but reduces
-          number of warnings when compiling with -W --ANK
-        */
-       if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
-           (unsigned)sk->sk_rcvbuf) {
+       if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
                atomic_inc(&sk->sk_drops);
                trace_sock_rcvqueue_full(sk, skb);
                return -ENOMEM;
index 82a6f34d39d012fb35d9a0d490503fcc2048e6e2..3891702b81df8fef757677dc779f4bddd2e31789 100644 (file)
@@ -1630,8 +1630,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
        if (snaplen > res)
                snaplen = res;
 
-       if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
-           (unsigned)sk->sk_rcvbuf)
+       if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
                goto drop_n_acct;
 
        if (skb_shared(skb)) {
@@ -1762,8 +1761,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
        if (po->tp_version <= TPACKET_V2) {
                if (macoff + snaplen > po->rx_ring.frame_size) {
                        if (po->copy_thresh &&
-                               atomic_read(&sk->sk_rmem_alloc) + skb->truesize
-                               < (unsigned)sk->sk_rcvbuf) {
+                           atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
                                if (skb_shared(skb)) {
                                        copy_skb = skb_clone(skb, GFP_ATOMIC);
                                } else {