diff options
Diffstat (limited to 'net/ipv4')
| -rw-r--r-- | net/ipv4/af_inet.c | 14 | ||||
| -rw-r--r-- | net/ipv4/route.c | 26 | ||||
| -rw-r--r-- | net/ipv4/tcp.c | 13 | ||||
| -rw-r--r-- | net/ipv4/tcp_ipv4.c | 4 | ||||
| -rw-r--r-- | net/ipv4/tcp_minisocks.c | 2 | 
5 files changed, 42 insertions, 17 deletions
| diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 8d157157bf8e..318d4674faa1 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1106,7 +1106,15 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)  	int ihl;  	int id; -	if (!pskb_may_pull(skb, sizeof(*iph))) +	if (unlikely(skb_shinfo(skb)->gso_type & +		     ~(SKB_GSO_TCPV4 | +		       SKB_GSO_UDP | +		       SKB_GSO_DODGY | +		       SKB_GSO_TCP_ECN | +		       0))) +		goto out; + +	if (unlikely(!pskb_may_pull(skb, sizeof(*iph))))  		goto out;  	iph = skb->nh.iph; @@ -1114,7 +1122,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)  	if (ihl < sizeof(*iph))  		goto out; -	if (!pskb_may_pull(skb, ihl)) +	if (unlikely(!pskb_may_pull(skb, ihl)))  		goto out;  	skb->h.raw = __skb_pull(skb, ihl); @@ -1125,7 +1133,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features)  	rcu_read_lock();  	ops = rcu_dereference(inet_protos[proto]); -	if (ops && ops->gso_segment) +	if (likely(ops && ops->gso_segment))  		segs = ops->gso_segment(skb, features);  	rcu_read_unlock(); diff --git a/net/ipv4/route.c b/net/ipv4/route.c index da44fabf4dc5..2dc6dbb28467 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -205,21 +205,27 @@ __u8 ip_tos2prio[16] = {  struct rt_hash_bucket {  	struct rtable	*chain;  }; -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) +#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) || \ +	defined(CONFIG_PROVE_LOCKING)  /*   * Instead of using one spinlock for each rt_hash_bucket, we use a table of spinlocks   * The size of this table is a power of two and depends on the number of CPUS. + * (on lockdep we have a quite big spinlock_t, so keep the size down there)   */ -#if NR_CPUS >= 32 -#define RT_HASH_LOCK_SZ	4096 -#elif NR_CPUS >= 16 -#define RT_HASH_LOCK_SZ	2048 -#elif NR_CPUS >= 8 -#define RT_HASH_LOCK_SZ	1024 -#elif NR_CPUS >= 4 -#define RT_HASH_LOCK_SZ	512 +#ifdef CONFIG_LOCKDEP +# define RT_HASH_LOCK_SZ	256  #else -#define RT_HASH_LOCK_SZ	256 +# if NR_CPUS >= 32 +#  define RT_HASH_LOCK_SZ	4096 +# elif NR_CPUS >= 16 +#  define RT_HASH_LOCK_SZ	2048 +# elif NR_CPUS >= 8 +#  define RT_HASH_LOCK_SZ	1024 +# elif NR_CPUS >= 4 +#  define RT_HASH_LOCK_SZ	512 +# else +#  define RT_HASH_LOCK_SZ	256 +# endif  #endif  static spinlock_t	*rt_hash_locks; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 804458712d88..f6a2d9223d07 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2170,8 +2170,19 @@ struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)  	if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {  		/* Packet is from an untrusted source, reset gso_segs. */ -		int mss = skb_shinfo(skb)->gso_size; +		int type = skb_shinfo(skb)->gso_type; +		int mss; + +		if (unlikely(type & +			     ~(SKB_GSO_TCPV4 | +			       SKB_GSO_DODGY | +			       SKB_GSO_TCP_ECN | +			       SKB_GSO_TCPV6 | +			       0) || +			     !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))) +			goto out; +		mss = skb_shinfo(skb)->gso_size;  		skb_shinfo(skb)->gso_segs = (skb->len + mss - 1) / mss;  		segs = NULL; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 8355b729fa95..5a886e6efbbe 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -90,7 +90,7 @@ static struct socket *tcp_socket;  void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb);  struct inet_hashinfo __cacheline_aligned tcp_hashinfo = { -	.lhash_lock	= RW_LOCK_UNLOCKED, +	.lhash_lock	= __RW_LOCK_UNLOCKED(tcp_hashinfo.lhash_lock),  	.lhash_users	= ATOMIC_INIT(0),  	.lhash_wait	= __WAIT_QUEUE_HEAD_INITIALIZER(tcp_hashinfo.lhash_wait),  }; @@ -1090,7 +1090,7 @@ process:  	skb->dev = NULL; -	bh_lock_sock(sk); +	bh_lock_sock_nested(sk);  	ret = 0;  	if (!sock_owned_by_user(sk)) {  #ifdef CONFIG_NET_DMA diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index e0851697ad5e..0ccb7cb22b15 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -40,7 +40,7 @@ int sysctl_tcp_abort_on_overflow;  struct inet_timewait_death_row tcp_death_row = {  	.sysctl_max_tw_buckets = NR_FILE * 2,  	.period		= TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS, -	.death_lock	= SPIN_LOCK_UNLOCKED, +	.death_lock	= __SPIN_LOCK_UNLOCKED(tcp_death_row.death_lock),  	.hashinfo	= &tcp_hashinfo,  	.tw_timer	= TIMER_INITIALIZER(inet_twdr_hangman, 0,  					    (unsigned long)&tcp_death_row), | 
