[kernel] r19775 - in dists/squeeze-security/linux-2.6/debian: . patches/bugfix/all patches/debian patches/series

Dann Frazier dannf at alioth.debian.org
Wed Jan 23 21:05:05 UTC 2013


Author: dannf
Date: Wed Jan 23 21:05:04 2013
New Revision: 19775

Log:
net: sk_add_backlog() take rmem_alloc into account (CVE-2010-4805)

Added:
   dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/net-sk_add_backlog-take-remem_alloc-into-account.patch
   dists/squeeze-security/linux-2.6/debian/patches/debian/net-Avoid-ABI-change-from-limit-for-socket-backlog-2.patch
Modified:
   dists/squeeze-security/linux-2.6/debian/changelog
   dists/squeeze-security/linux-2.6/debian/patches/series/47squeeze1

Modified: dists/squeeze-security/linux-2.6/debian/changelog
==============================================================================
--- dists/squeeze-security/linux-2.6/debian/changelog	Wed Jan 23 14:39:09 2013	(r19774)
+++ dists/squeeze-security/linux-2.6/debian/changelog	Wed Jan 23 21:05:04 2013	(r19775)
@@ -7,6 +7,7 @@
   * exec: use -ELOOP for max recursion depth (CVE-2012-4530)
   * ext4: Fix max file size and logical block counting of extent format file
     (CVE-2011-2695)
+  * net: sk_add_backlog() take rmem_alloc into account (CVE-2010-4805)
 
  -- dann frazier <dannf at debian.org>  Mon, 22 Oct 2012 20:34:13 -0500
 

Added: dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/net-sk_add_backlog-take-remem_alloc-into-account.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/bugfix/all/net-sk_add_backlog-take-remem_alloc-into-account.patch	Wed Jan 23 21:05:04 2013	(r19775)
@@ -0,0 +1,136 @@
+commit c377411f2494a931ff7facdbb3a6839b1266bcf6
+Author: Eric Dumazet <eric.dumazet at gmail.com>
+Date:   Tue Apr 27 15:13:20 2010 -0700
+
+    net: sk_add_backlog() take rmem_alloc into account
+    
+    Current socket backlog limit is not enough to really stop DDOS attacks,
+    because user thread spend many time to process a full backlog each
+    round, and user might crazy spin on socket lock.
+    
+    We should add backlog size and receive_queue size (aka rmem_alloc) to
+    pace writers, and let user run without being slow down too much.
+    
+    Introduce a sk_rcvqueues_full() helper, to avoid taking socket lock in
+    stress situations.
+    
+    Under huge stress from a multiqueue/RPS enabled NIC, a single flow udp
+    receiver can now process ~200.000 pps (instead of ~100 pps before the
+    patch) on a 8 core machine.
+    
+    Signed-off-by: Eric Dumazet <eric.dumazet at gmail.com>
+    Signed-off-by: David S. Miller <davem at davemloft.net>
+    [dannf: backported to Debian's 2.6.32]
+
+diff --git a/include/net/sock.h b/include/net/sock.h
+index d04a1ab..1f6d6aa 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -243,7 +243,6 @@ struct sock {
+ 		struct sk_buff *head;
+ 		struct sk_buff *tail;
+ 		int len;
+-		int limit;
+ 	} sk_backlog;
+ 	wait_queue_head_t	*sk_sleep;
+ 	struct dst_entry	*sk_dst_cache;
+@@ -575,10 +574,20 @@ static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
+ 	skb->next = NULL;
+ }
+ 
++/*
++ * Take into account size of receive queue and backlog queue
++ */
++static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
++{
++	unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
++
++	return qsize + skb->truesize > sk->sk_rcvbuf;
++}
++
+ /* The per-socket spinlock must be held here. */
+ static inline int sk_add_backlog_limited(struct sock *sk, struct sk_buff *skb)
+ {
+-	if (sk->sk_backlog.len >= max(sk->sk_backlog.limit, sk->sk_rcvbuf << 1))
++	if (sk_rcvqueues_full(sk, skb))
+ 		return -ENOBUFS;
+ 
+ 	sk_add_backlog(sk, skb);
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 273e1e9..755a614 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -323,6 +323,10 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
+ 
+ 	skb->dev = NULL;
+ 
++	if (sk_rcvqueues_full(sk, skb)) {
++		atomic_inc(&sk->sk_drops);
++		goto discard_and_relse;
++	}
+ 	if (nested)
+ 		bh_lock_sock_nested(sk);
+ 	else
+@@ -1863,7 +1867,6 @@ void sock_init_data(struct socket *sock, struct sock *sk)
+ 	sk->sk_allocation	=	GFP_KERNEL;
+ 	sk->sk_rcvbuf		=	sysctl_rmem_default;
+ 	sk->sk_sndbuf		=	sysctl_wmem_default;
+-	sk->sk_backlog.limit	=	sk->sk_rcvbuf << 1;
+ 	sk->sk_state		=	TCP_CLOSE;
+ 	sk_set_socket(sk, sock);
+ 
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index f66a23b..571da81 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -1183,6 +1183,10 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+ 			goto drop;
+ 	}
+ 
++
++	if (sk_rcvqueues_full(sk, skb))
++		goto drop;
++
+ 	rc = 0;
+ 
+ 	bh_lock_sock(sk);
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 4ae5ee3..7d2e94e 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -480,6 +480,10 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
+ 			bh_unlock_sock(sk2);
+ 		}
+ 	}
++	if (sk_rcvqueues_full(sk, skb)) {
++		kfree_skb(skb);
++		goto out;
++	}
+ 	bh_lock_sock(sk);
+ 	if (!sock_owned_by_user(sk))
+ 		udpv6_queue_rcv_skb(sk, skb);
+@@ -602,6 +606,10 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
+ 
+ 	/* deliver */
+ 
++	if (sk_rcvqueues_full(sk, skb)) {
++		sock_put(sk);
++		goto discard;
++	}
+ 	bh_lock_sock(sk);
+ 	if (!sock_owned_by_user(sk))
+ 		udpv6_queue_rcv_skb(sk, skb);
+diff --git a/net/sctp/socket.c b/net/sctp/socket.c
+index 374dfe5..3a95fcb 100644
+--- a/net/sctp/socket.c
++++ b/net/sctp/socket.c
+@@ -3719,9 +3719,6 @@ SCTP_STATIC int sctp_init_sock(struct sock *sk)
+ 	SCTP_DBG_OBJCNT_INC(sock);
+ 	percpu_counter_inc(&sctp_sockets_allocated);
+ 
+-	/* Set socket backlog limit. */
+-	sk->sk_backlog.limit = sysctl_sctp_rmem[1];
+-
+ 	local_bh_disable();
+ 	sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
+ 	local_bh_enable();

Added: dists/squeeze-security/linux-2.6/debian/patches/debian/net-Avoid-ABI-change-from-limit-for-socket-backlog-2.patch
==============================================================================
--- /dev/null	00:00:00 1970	(empty, because file is newly added)
+++ dists/squeeze-security/linux-2.6/debian/patches/debian/net-Avoid-ABI-change-from-limit-for-socket-backlog-2.patch	Wed Jan 23 21:05:04 2013	(r19775)
@@ -0,0 +1,74 @@
+From: Ben Hutchings <ben at decadent.org.uk>
+Date: Thu, 9 Sep 2010 03:46:50 +0100
+Subject: [PATCH 8/8] net: Avoid ABI change from limit for socket backlog
+
+Move the new fields to the end of struct sock and hide them from genksyms.
+---
+ include/net/sock.h |   10 ++++++----
+ net/core/sock.c    |    6 +++---
+ net/sctp/socket.c  |    2 +-
+ 3 files changed, 10 insertions(+), 8 deletions(-)
+ [dannf: Adjusted to apply on top of bugfix/all/net-sk_add_backlog-take-remem_alloc-into-account.patch]
+
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 1f6d6aa..e5a0d8c 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -242,7 +242,6 @@ struct sock {
+ 	struct {
+ 		struct sk_buff *head;
+ 		struct sk_buff *tail;
+-		int len;
+ 	} sk_backlog;
+ 	wait_queue_head_t	*sk_sleep;
+ 	struct dst_entry	*sk_dst_cache;
+@@ -302,6 +301,9 @@ struct sock {
+   	int			(*sk_backlog_rcv)(struct sock *sk,
+ 						  struct sk_buff *skb);  
+ 	void                    (*sk_destruct)(struct sock *sk);
++#ifndef __GENKSYMS__
++	int                     sk_backlog_len;
++#endif
+ };
+ 
+ /*
+@@ -579,7 +581,7 @@ static inline void sk_add_backlog(struct sock *sk, struct sk_buff *skb)
+  */
+ static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb)
+ {
+-	unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
++	unsigned int qsize = sk->sk_backlog_len + atomic_read(&sk->sk_rmem_alloc);
+ 
+ 	return qsize + skb->truesize > sk->sk_rcvbuf;
+ }
+@@ -591,7 +593,7 @@ static inline int sk_add_backlog_limited(struct sock *sk, struct sk_buff *skb)
+ 		return -ENOBUFS;
+ 
+ 	sk_add_backlog(sk, skb);
+-	sk->sk_backlog.len += skb->truesize;
++	sk->sk_backlog_len += skb->truesize;
+ 	return 0;
+ }
+ 
+diff --git a/net/core/sock.c b/net/core/sock.c
+index 755a614..188a326 100644
+--- a/net/core/sock.c
++++ b/net/core/sock.c
+@@ -1122,7 +1122,7 @@ struct sock *sk_clone(const struct sock *sk, const gfp_t priority)
+ 		sock_lock_init(newsk);
+ 		bh_lock_sock(newsk);
+ 		newsk->sk_backlog.head	= newsk->sk_backlog.tail = NULL;
+-		newsk->sk_backlog.len = 0;
++		newsk->sk_backlog_len = 0;
+ 
+ 		atomic_set(&newsk->sk_rmem_alloc, 0);
+ 		/*
+@@ -1534,7 +1534,7 @@ static void __release_sock(struct sock *sk)
+ 	 * Doing the zeroing here guarantee we can not loop forever
+ 	 * while a wild producer attempts to flood us.
+ 	 */
+-	sk->sk_backlog.len = 0;
++	sk->sk_backlog_len = 0;
+ }
+ 
+ /**

Modified: dists/squeeze-security/linux-2.6/debian/patches/series/47squeeze1
==============================================================================
--- dists/squeeze-security/linux-2.6/debian/patches/series/47squeeze1	Wed Jan 23 14:39:09 2013	(r19774)
+++ dists/squeeze-security/linux-2.6/debian/patches/series/47squeeze1	Wed Jan 23 21:05:04 2013	(r19775)
@@ -9,3 +9,6 @@
 + bugfix/all/exec-do-not-leave-bprm-interp-on-stack.patch
 + bugfix/all/exec-use-ELOOP-for-max-recursion-depth.patch
 + bugfix/all/ext4-Fix-max-file-size-and-logical-block-counting-of-extent-format-file.patch
+- debian/net-Avoid-ABI-change-from-limit-for-socket-backlog.patch
++ bugfix/all/net-sk_add_backlog-take-remem_alloc-into-account.patch
++ debian/net-Avoid-ABI-change-from-limit-for-socket-backlog-2.patch



More information about the Kernel-svn-changes mailing list