diff options
Diffstat (limited to 'net')
62 files changed, 323 insertions, 295 deletions
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 7a36878241da..4f99bb86af5c 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -462,7 +462,8 @@ int vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs... */ - if (veth->h_vlan_proto != htons(ETH_P_8021Q)) { + if (veth->h_vlan_proto != htons(ETH_P_8021Q) || + VLAN_DEV_INFO(dev)->flags & VLAN_FLAG_REORDER_HDR) { int orig_headroom = skb_headroom(skb); unsigned short veth_TCI; diff --git a/net/bridge/br.c b/net/bridge/br.c index 93867bb6cc97..a90182873120 100644 --- a/net/bridge/br.c +++ b/net/bridge/br.c @@ -39,7 +39,7 @@ static int __init br_init(void) err = br_fdb_init(); if (err) - goto err_out1; + goto err_out; err = br_netfilter_init(); if (err) @@ -65,6 +65,8 @@ err_out3: err_out2: br_netfilter_fini(); err_out1: + br_fdb_fini(); +err_out: llc_sap_put(br_stp_sap); return err; } diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 3cedd4eeeed6..0ee79a726d91 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -122,6 +122,7 @@ static inline int is_link_local(const unsigned char *dest) struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb) { const unsigned char *dest = eth_hdr(skb)->h_dest; + int (*rhook)(struct sk_buff *skb); if (!is_valid_ether_addr(eth_hdr(skb)->h_source)) goto drop; @@ -147,9 +148,9 @@ struct sk_buff *br_handle_frame(struct net_bridge_port *p, struct sk_buff *skb) switch (p->state) { case BR_STATE_FORWARDING: - - if (br_should_route_hook) { - if (br_should_route_hook(skb)) + rhook = rcu_dereference(br_should_route_hook); + if (rhook != NULL) { + if (rhook(skb)) return skb; dest = eth_hdr(skb)->h_dest; } diff --git a/net/bridge/netfilter/ebt_among.c b/net/bridge/netfilter/ebt_among.c index 392d877040d3..6436d30a550e 100644 --- a/net/bridge/netfilter/ebt_among.c +++ b/net/bridge/netfilter/ebt_among.c @@ -187,7 +187,7 @@ static int ebt_among_check(const char *tablename, unsigned int hookmask, if (datalen != EBT_ALIGN(expected_length)) { printk(KERN_WARNING - "ebtables: among: wrong size: %d" + "ebtables: among: wrong size: %d " "against expected %d, rounded to %Zd\n", datalen, expected_length, EBT_ALIGN(expected_length)); diff --git a/net/bridge/netfilter/ebtable_broute.c b/net/bridge/netfilter/ebtable_broute.c index e44519ebf1d2..be6f18681053 100644 --- a/net/bridge/netfilter/ebtable_broute.c +++ b/net/bridge/netfilter/ebtable_broute.c @@ -70,13 +70,13 @@ static int __init ebtable_broute_init(void) if (ret < 0) return ret; /* see br_input.c */ - br_should_route_hook = ebt_broute; + rcu_assign_pointer(br_should_route_hook, ebt_broute); return ret; } static void __exit ebtable_broute_fini(void) { - br_should_route_hook = NULL; + rcu_assign_pointer(br_should_route_hook, NULL); synchronize_net(); ebt_unregister_table(&broute_table); } diff --git a/net/core/pktgen.c b/net/core/pktgen.c index de33f36947e9..285ec3ed9b37 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -2463,8 +2463,6 @@ static int pktgen_output_ipsec(struct sk_buff *skb, struct pktgen_dev *pkt_dev) x->curlft.bytes +=skb->len; x->curlft.packets++; - spin_unlock(&x->lock); - error: spin_unlock(&x->lock); return err; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 32d5826b7177..5b4ce9b4dd20 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -275,12 +275,11 @@ static void skb_release_data(struct sk_buff *skb) /* * Free an skbuff by memory without cleaning the state. */ -void kfree_skbmem(struct sk_buff *skb) +static void kfree_skbmem(struct sk_buff *skb) { struct sk_buff *other; atomic_t *fclone_ref; - skb_release_data(skb); switch (skb->fclone) { case SKB_FCLONE_UNAVAILABLE: kmem_cache_free(skbuff_head_cache, skb); @@ -307,16 +306,8 @@ void kfree_skbmem(struct sk_buff *skb) } } -/** - * __kfree_skb - private function - * @skb: buffer - * - * Free an sk_buff. Release anything attached to the buffer. - * Clean the state. This is an internal helper function. Users should - * always call kfree_skb - */ - -void __kfree_skb(struct sk_buff *skb) +/* Free everything but the sk_buff shell. */ +static void skb_release_all(struct sk_buff *skb) { dst_release(skb->dst); #ifdef CONFIG_XFRM @@ -340,7 +331,21 @@ void __kfree_skb(struct sk_buff *skb) skb->tc_verd = 0; #endif #endif + skb_release_data(skb); +} + +/** + * __kfree_skb - private function + * @skb: buffer + * + * Free an sk_buff. Release anything attached to the buffer. + * Clean the state. This is an internal helper function. Users should + * always call kfree_skb + */ +void __kfree_skb(struct sk_buff *skb) +{ + skb_release_all(skb); kfree_skbmem(skb); } @@ -441,7 +446,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb) */ struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src) { - skb_release_data(dst); + skb_release_all(dst); return __skb_clone(dst, src); } EXPORT_SYMBOL_GPL(skb_morph); diff --git a/net/dccp/ccids/lib/loss_interval.c b/net/dccp/ccids/lib/loss_interval.c index 40ad428a27f5..d26b88dbbb45 100644 --- a/net/dccp/ccids/lib/loss_interval.c +++ b/net/dccp/ccids/lib/loss_interval.c @@ -166,7 +166,7 @@ static u32 dccp_li_calc_first_li(struct sock *sk, } if (unlikely(interval == 0)) { - DCCP_WARN("%s(%p), Could not find a win_count interval > 0." + DCCP_WARN("%s(%p), Could not find a win_count interval > 0. " "Defaulting to 1\n", dccp_role(sk), sk); interval = 1; } diff --git a/net/decnet/dn_dev.c b/net/decnet/dn_dev.c index 66e266fb5908..3bc82dc83b38 100644 --- a/net/decnet/dn_dev.c +++ b/net/decnet/dn_dev.c @@ -651,16 +651,18 @@ static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg) struct dn_dev *dn_db; struct ifaddrmsg *ifm; struct dn_ifaddr *ifa, **ifap; - int err = -EADDRNOTAVAIL; + int err; err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy); if (err < 0) goto errout; + err = -ENODEV; ifm = nlmsg_data(nlh); if ((dn_db = dn_dev_by_index(ifm->ifa_index)) == NULL) goto errout; + err = -EADDRNOTAVAIL; for (ifap = &dn_db->ifa_list; (ifa = *ifap); ifap = &ifa->ifa_next) { if (tb[IFA_LOCAL] && nla_memcmp(tb[IFA_LOCAL], &ifa->ifa_local, 2)) diff --git a/net/ieee80211/ieee80211_crypt_ccmp.c b/net/ieee80211/ieee80211_crypt_ccmp.c index c6d760d9fbbe..208bf35b5546 100644 --- a/net/ieee80211/ieee80211_crypt_ccmp.c +++ b/net/ieee80211/ieee80211_crypt_ccmp.c @@ -338,7 +338,7 @@ static int ieee80211_ccmp_decrypt(struct sk_buff *skb, int hdr_len, void *priv) pos += 8; if (ccmp_replay_check(pn, key->rx_pn)) { - if (net_ratelimit()) { + if (ieee80211_ratelimit_debug(IEEE80211_DL_DROP)) { IEEE80211_DEBUG_DROP("CCMP: replay detected: STA=%s " "previous PN %02x%02x%02x%02x%02x%02x " "received PN %02x%02x%02x%02x%02x%02x\n", diff --git a/net/ieee80211/ieee80211_crypt_tkip.c b/net/ieee80211/ieee80211_crypt_tkip.c index 58b22619ab15..8e146949fc6f 100644 --- a/net/ieee80211/ieee80211_crypt_tkip.c +++ b/net/ieee80211/ieee80211_crypt_tkip.c @@ -464,7 +464,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) pos += 8; if (tkip_replay_check(iv32, iv16, tkey->rx_iv32, tkey->rx_iv16)) { - if (net_ratelimit()) { + if (ieee80211_ratelimit_debug(IEEE80211_DL_DROP)) { IEEE80211_DEBUG_DROP("TKIP: replay detected: STA=%s" " previous TSC %08x%04x received TSC " "%08x%04x\n", print_mac(mac, hdr->addr2), @@ -504,7 +504,7 @@ static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) * it needs to be recalculated for the next packet. */ tkey->rx_phase1_done = 0; } - if (net_ratelimit()) { + if (ieee80211_ratelimit_debug(IEEE80211_DL_DROP)) { IEEE80211_DEBUG_DROP("TKIP: ICV error detected: STA=" "%s\n", print_mac(mac, hdr->addr2)); } diff --git a/net/ieee80211/ieee80211_tx.c b/net/ieee80211/ieee80211_tx.c index a4c3c51140a3..6d06f1385e28 100644 --- a/net/ieee80211/ieee80211_tx.c +++ b/net/ieee80211/ieee80211_tx.c @@ -144,7 +144,8 @@ static int ieee80211_copy_snap(u8 * data, u16 h_proto) snap->oui[1] = oui[1]; snap->oui[2] = oui[2]; - *(u16 *) (data + SNAP_SIZE) = htons(h_proto); + h_proto = htons(h_proto); + memcpy(data + SNAP_SIZE, &h_proto, sizeof(u16)); return SNAP_SIZE + sizeof(u16); } diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c index 36d6798947b5..b3f366a33a5c 100644 --- a/net/ipv4/arp.c +++ b/net/ipv4/arp.c @@ -111,12 +111,8 @@ #include <net/tcp.h> #include <net/sock.h> #include <net/arp.h> -#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) #include <net/ax25.h> -#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE) #include <net/netrom.h> -#endif -#endif #if defined(CONFIG_ATM_CLIP) || defined(CONFIG_ATM_CLIP_MODULE) #include <net/atmclip.h> struct neigh_table *clip_tbl_hook; @@ -731,20 +727,10 @@ static int arp_process(struct sk_buff *skb) htons(dev_type) != arp->ar_hrd) goto out; break; -#ifdef CONFIG_NET_ETHERNET case ARPHRD_ETHER: -#endif -#ifdef CONFIG_TR case ARPHRD_IEEE802_TR: -#endif -#ifdef CONFIG_FDDI case ARPHRD_FDDI: -#endif -#ifdef CONFIG_NET_FC case ARPHRD_IEEE802: -#endif -#if defined(CONFIG_NET_ETHERNET) || defined(CONFIG_TR) || \ - defined(CONFIG_FDDI) || defined(CONFIG_NET_FC) /* * ETHERNET, Token Ring and Fibre Channel (which are IEEE 802 * devices, according to RFC 2625) devices will accept ARP @@ -759,21 +745,16 @@ static int arp_process(struct sk_buff *skb) arp->ar_pro != htons(ETH_P_IP)) goto out; break; -#endif -#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE) case ARPHRD_AX25: if (arp->ar_pro != htons(AX25_P_IP) || arp->ar_hrd != htons(ARPHRD_AX25)) goto out; break; -#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE) case ARPHRD_NETROM: if (arp->ar_pro != htons(AX25_P_IP) || arp->ar_hrd != htons(ARPHRD_NETROM)) goto out; break; -#endif -#endif } /* Understand only these message types */ @@ -828,7 +809,8 @@ static int arp_process(struct sk_buff *skb) if (arp->ar_op == htons(ARPOP_REQUEST) && inet_addr_type(tip) == RTN_LOCAL && !arp_ignore(in_dev,dev,sip,tip)) - arp_send(ARPOP_REPLY,ETH_P_ARP,tip,dev,tip,sha,dev->dev_addr,dev->dev_addr); + arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha, + dev->dev_addr, sha); goto out; } diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index b0170732b5e9..e468e7a7aac4 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -51,6 +51,29 @@ static struct sock *idiagnl; #define INET_DIAG_PUT(skb, attrtype, attrlen) \ RTA_DATA(__RTA_PUT(skb, attrtype, attrlen)) +static DEFINE_MUTEX(inet_diag_table_mutex); + +static const struct inet_diag_handler *inet_diag_lock_handler(int type) +{ +#ifdef CONFIG_KMOD + if (!inet_diag_table[type]) + request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, + NETLINK_INET_DIAG, type); +#endif + + mutex_lock(&inet_diag_table_mutex); + if (!inet_diag_table[type]) + return ERR_PTR(-ENOENT); + + return inet_diag_table[type]; +} + +static inline void inet_diag_unlock_handler( + const struct inet_diag_handler *handler) +{ + mutex_unlock(&inet_diag_table_mutex); +} + static int inet_csk_diag_fill(struct sock *sk, struct sk_buff *skb, int ext, u32 pid, u32 seq, u16 nlmsg_flags, @@ -235,9 +258,12 @@ static int inet_diag_get_exact(struct sk_buff *in_skb, struct inet_hashinfo *hashinfo; const struct inet_diag_handler *handler; - handler = inet_diag_table[nlh->nlmsg_type]; - BUG_ON(handler == NULL); + handler = inet_diag_lock_handler(nlh->nlmsg_type); + if (!handler) + return -ENOENT; + hashinfo = handler->idiag_hashinfo; + err = -EINVAL; if (req->idiag_family == AF_INET) { sk = inet_lookup(hashinfo, req->id.idiag_dst[0], @@ -255,11 +281,12 @@ static int inet_diag_get_exact(struct sk_buff *in_skb, } #endif else { - return -EINVAL; + goto unlock; } + err = -ENOENT; if (sk == NULL) - return -ENOENT; + goto unlock; err = -ESTALE; if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE || @@ -296,6 +323,8 @@ out: else sock_put(sk); } +unlock: + inet_diag_unlock_handler(handler); return err; } @@ -678,8 +707,10 @@ static int inet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) const struct inet_diag_handler *handler; struct inet_hashinfo *hashinfo; - handler = inet_diag_table[cb->nlh->nlmsg_type]; - BUG_ON(handler == NULL); + handler = inet_diag_lock_handler(cb->nlh->nlmsg_type); + if (!handler) + goto no_handler; + hashinfo = handler->idiag_hashinfo; s_i = cb->args[1]; @@ -743,7 +774,7 @@ skip_listen_ht: } if (!(r->idiag_states & ~(TCPF_LISTEN | TCPF_SYN_RECV))) - return skb->len; + goto unlock; for (i = s_i; i < hashinfo->ehash_size; i++) { struct inet_ehash_bucket *head = &hashinfo->ehash[i]; @@ -805,6 +836,9 @@ next_dying: done: cb->args[1] = i; cb->args[2] = num; +unlock: + inet_diag_unlock_handler(handler); +no_handler: return skb->len; } @@ -816,15 +850,6 @@ static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) nlmsg_len(nlh) < hdrlen) return -EINVAL; -#ifdef CONFIG_KMOD - if (inet_diag_table[nlh->nlmsg_type] == NULL) - request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, - NETLINK_INET_DIAG, nlh->nlmsg_type); -#endif - - if (inet_diag_table[nlh->nlmsg_type] == NULL) - return -ENOENT; - if (nlh->nlmsg_flags & NLM_F_DUMP) { if (nlmsg_attrlen(nlh, hdrlen)) { struct nlattr *attr; @@ -853,8 +878,6 @@ static void inet_diag_rcv(struct sk_buff *skb) mutex_unlock(&inet_diag_mutex); } -static DEFINE_SPINLOCK(inet_diag_register_lock); - int inet_diag_register(const struct inet_diag_handler *h) { const __u16 type = h->idiag_type; @@ -863,13 +886,13 @@ int inet_diag_register(const struct inet_diag_handler *h) if (type >= INET_DIAG_GETSOCK_MAX) goto out; - spin_lock(&inet_diag_register_lock); + mutex_lock(&inet_diag_table_mutex); err = -EEXIST; if (inet_diag_table[type] == NULL) { inet_diag_table[type] = h; err = 0; } - spin_unlock(&inet_diag_register_lock); + mutex_unlock(&inet_diag_table_mutex); out: return err; } @@ -882,11 +905,9 @@ void inet_diag_unregister(const struct inet_diag_handler *h) if (type >= INET_DIAG_GETSOCK_MAX) return; - spin_lock(&inet_diag_register_lock); + mutex_lock(&inet_diag_table_mutex); inet_diag_table[type] = NULL; - spin_unlock(&inet_diag_register_lock); - - synchronize_rcu(); + mutex_unlock(&inet_diag_table_mutex); } EXPORT_SYMBOL_GPL(inet_diag_unregister); diff --git a/net/ipv4/inet_lro.c b/net/ipv4/inet_lro.c index ac3b1d3dba2e..9a96c277393d 100644 --- a/net/ipv4/inet_lro.c +++ b/net/ipv4/inet_lro.c @@ -401,10 +401,11 @@ static struct sk_buff *lro_gen_skb(struct net_lro_mgr *lro_mgr, int data_len = len; int hdr_len = min(len, hlen); - skb = netdev_alloc_skb(lro_mgr->dev, hlen); + skb = netdev_alloc_skb(lro_mgr->dev, hlen + lro_mgr->frag_align_pad); if (!skb) return NULL; + skb_reserve(skb, lro_mgr->frag_align_pad); skb->len = len; skb->data_len = len - hdr_len; skb->truesize += true_size; diff --git a/net/ipv4/ipvs/ip_vs_core.c b/net/ipv4/ipvs/ip_vs_core.c index 20c884a57721..8fba20256f52 100644 --- a/net/ipv4/ipvs/ip_vs_core.c +++ b/net/ipv4/ipvs/ip_vs_core.c @@ -637,7 +637,7 @@ static int ip_vs_out_icmp(struct sk_buff *skb, int *related) verdict = NF_DROP; if (IP_VS_FWD_METHOD(cp) != 0) { - IP_VS_ERR("shouldn't reach here, because the box is on the" + IP_VS_ERR("shouldn't reach here, because the box is on the " "half connection in the tun/dr module.\n"); } diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c index b64cf45a9ead..693d92490c11 100644 --- a/net/ipv4/ipvs/ip_vs_ctl.c +++ b/net/ipv4/ipvs/ip_vs_ctl.c @@ -1424,7 +1424,6 @@ proc_do_sync_threshold(ctl_table *table, int write, struct file *filp, static struct ctl_table vs_vars[] = { { - .ctl_name = NET_IPV4_VS_AMEMTHRESH, .procname = "amemthresh", .data = &sysctl_ip_vs_amemthresh, .maxlen = sizeof(int), @@ -1433,7 +1432,6 @@ static struct ctl_table vs_vars[] = { }, #ifdef CONFIG_IP_VS_DEBUG { - .ctl_name = NET_IPV4_VS_DEBUG_LEVEL, .procname = "debug_level", .data = &sysctl_ip_vs_debug_level, .maxlen = sizeof(int), @@ -1442,7 +1440,6 @@ static struct ctl_table vs_vars[] = { }, #endif { - .ctl_name = NET_IPV4_VS_AMDROPRATE, .procname = "am_droprate", .data = &sysctl_ip_vs_am_droprate, .maxlen = sizeof(int), @@ -1450,7 +1447,6 @@ static struct ctl_table vs_vars[] = { .proc_handler = &proc_dointvec, }, { - .ctl_name = NET_IPV4_VS_DROP_ENTRY, .procname = "drop_entry", .data = &sysctl_ip_vs_drop_entry, .maxlen = sizeof(int), @@ -1458,7 +1454,6 @@ static struct ctl_table vs_vars[] = { .proc_handler = &proc_do_defense_mode, }, { - .ctl_name = NET_IPV4_VS_DROP_PACKET, .procname = "drop_packet", .data = &sysctl_ip_vs_drop_packet, .maxlen = sizeof(int), @@ -1466,7 +1461,6 @@ static struct ctl_table vs_vars[] = { .proc_handler = &proc_do_defense_mode, }, { - .ctl_name = NET_IPV4_VS_SECURE_TCP, .procname = "secure_tcp", .data = &sysctl_ip_vs_secure_tcp, .maxlen = sizeof(int), @@ -1475,7 +1469,6 @@ static struct ctl_table vs_vars[] = { }, #if 0 { - .ctl_name = NET_IPV4_VS_TO_ES, .procname = "timeout_established", .data = &vs_timeout_table_dos.timeout[IP_VS_S_ESTABLISHED], .maxlen = sizeof(int), @@ -1483,7 +1476,6 @@ static struct ctl_table vs_vars[] = { .proc_handler = &proc_dointvec_jiffies, }, { - .ctl_name = NET_IPV4_VS_TO_SS, .procname = "timeout_synsent", .data = &vs_timeout_table_dos.timeout[IP_VS_S_SYN_SENT], .maxlen = sizeof(int), @@ -1491,7 +1483,6 @@ static struct ctl_table vs_vars[] = { .proc_handler = &proc_dointvec_jiffies, }, { - .ctl_name = NET_IPV4_VS_TO_SR, .procname = "timeout_synrecv", .data = &vs_timeout_table_dos.timeout[IP_VS_S_SYN_RECV], .maxlen = sizeof(int), @@ -1499,7 +1490,6 @@ static struct ctl_table vs_vars[] = { .proc_handler = &proc_dointvec_jiffies, }, { - .ctl_name = NET_IPV4_VS_TO_FW, .procname = "timeout_finwait", .data = &vs_timeout_table_dos.timeout[IP_VS_S_FIN_WAIT], .maxlen = sizeof(int), @@ -1507,7 +1497,6 @@ static struct ctl_table vs_vars[] = { .proc_handler = &proc_dointvec_jiffies, }, { - .ctl_name = NET_IPV4_VS_TO_TW, .procname = "timeout_timewait", .data = &vs_timeout_table_dos.timeout[IP_VS_S_TIME_WAIT], .maxlen = sizeof(int), @@ -1515,7 +1504,6 @@ static struct ctl_table vs_vars[] = { .proc_handler = &proc_dointvec_jiffies, }, { - .ctl_name = NET_IPV4_VS_TO_CL, .procname = "timeout_close", .data = &vs_timeout_table_dos.timeout[IP_VS_S_CLOSE], .maxlen = sizeof(int), @@ -1523,7 +1511,6 @@ static struct ctl_table vs_vars[] = { .proc_handler = &proc_dointvec_jiffies, }, { - .ctl_name = NET_IPV4_VS_TO_CW, .procname = "timeout_closewait", .data = &vs_timeout_table_dos.timeout[IP_VS_S_CLOSE_WAIT], .maxlen = sizeof(int), @@ -1531,7 +1518,6 @@ static struct ctl_table vs_vars[] = { .proc_handler = &proc_dointvec_jiffies, }, { - .ctl_name = NET_IPV4_VS_TO_LA, .procname = "timeout_lastack", .data = &vs_timeout_table_dos.timeout[IP_VS_S_LAST_ACK], .maxlen = sizeof(int), @@ -1539,7 +1525,6 @@ static struct ctl_table vs_vars[] = { .proc_handler = &proc_dointvec_jiffies, }, { - .ctl_name = NET_IPV4_VS_TO_LI, .procname = "timeout_listen", .data = &vs_timeout_table_dos.timeout[IP_VS_S_LISTEN], .maxlen = sizeof(int), @@ -1547,7 +1532,6 @@ static struct ctl_table vs_vars[] = { .proc_handler = &proc_dointvec_jiffies, }, { - .ctl_name = NET_IPV4_VS_TO_SA, .procname = "timeout_synack", .data = &vs_timeout_table_dos.timeout[IP_VS_S_SYNACK], .maxlen = sizeof(int), @@ -1555,7 +1539,6 @@ static struct ctl_table vs_vars[] = { .proc_handler = &proc_dointvec_jiffies, }, { - .ctl_name = NET_IPV4_VS_TO_UDP, .procname = "timeout_udp", .data = &vs_timeout_table_dos.timeout[IP_VS_S_UDP], .maxlen = sizeof(int), @@ -1563,7 +1546,6 @@ static struct ctl_table vs_vars[] = { .proc_handler = &proc_dointvec_jiffies, }, { - .ctl_name = NET_IPV4_VS_TO_ICMP, .procname = "timeout_icmp", .data = &vs_timeout_table_dos.timeout[IP_VS_S_ICMP], .maxlen = sizeof(int), @@ -1572,7 +1554,6 @@ static struct ctl_table vs_vars[] = { }, #endif { - .ctl_name = NET_IPV4_VS_CACHE_BYPASS, .procname = "cache_bypass", .data = &sysctl_ip_vs_cache_bypass, .maxlen = sizeof(int), @@ -1580,7 +1561,6 @@ static struct ctl_table vs_vars[] = { .proc_handler = &proc_dointvec, }, { - .ctl_name = NET_IPV4_VS_EXPIRE_NODEST_CONN, .procname = "expire_nodest_conn", .data = &sysctl_ip_vs_expire_nodest_conn, .maxlen = sizeof(int), @@ -1588,7 +1568,6 @@ static struct ctl_table vs_vars[] = { .proc_handler = &proc_dointvec, }, { - .ctl_name = NET_IPV4_VS_EXPIRE_QUIESCENT_TEMPLATE, .procname = "expire_quiescent_template", .data = &sysctl_ip_vs_expire_quiescent_template, .maxlen = sizeof(int), @@ -1596,7 +1575,6 @@ static struct ctl_table vs_vars[] = { .proc_handler = &proc_dointvec, }, { - .ctl_name = NET_IPV4_VS_SYNC_THRESHOLD, .procname = "sync_threshold", .data = &sysctl_ip_vs_sync_threshold, .maxlen = sizeof(sysctl_ip_vs_sync_threshold), @@ -1604,7 +1582,6 @@ static struct ctl_table vs_vars[] = { .proc_handler = &proc_do_sync_threshold, }, { - .ctl_name = NET_IPV4_VS_NAT_ICMP_SEND, .procname = "nat_icmp_send", .data = &sysctl_ip_vs_nat_icmp_send, .maxlen = sizeof(int), @@ -1616,7 +1593,6 @@ static struct ctl_table vs_vars[] = { static ctl_table vs_table[] = { { - .ctl_name = NET_IPV4_VS, .procname = "vs", .mode = 0555, .child = vs_vars diff --git a/net/ipv4/ipvs/ip_vs_lblc.c b/net/ipv4/ipvs/ip_vs_lblc.c index 052f4ed59174..ad89644ef5d2 100644 --- a/net/ipv4/ipvs/ip_vs_lblc.c +++ b/net/ipv4/ipvs/ip_vs_lblc.c @@ -114,7 +114,6 @@ struct ip_vs_lblc_table { static ctl_table vs_vars_table[] = { { - .ctl_name = NET_IPV4_VS_LBLC_EXPIRE, .procname = "lblc_expiration", .data = &sysctl_ip_vs_lblc_expiration, .maxlen = sizeof(int), @@ -126,7 +125,6 @@ static ctl_table vs_vars_table[] = { static ctl_table vs_table[] = { { - .ctl_name = NET_IPV4_VS, .procname = "vs", .mode = 0555, .child = vs_vars_table @@ -582,9 +580,14 @@ static struct ip_vs_scheduler ip_vs_lblc_scheduler = static int __init ip_vs_lblc_init(void) { + int ret; + INIT_LIST_HEAD(&ip_vs_lblc_scheduler.n_list); sysctl_header = register_sysctl_table(lblc_root_table); - return register_ip_vs_scheduler(&ip_vs_lblc_scheduler); + ret = register_ip_vs_scheduler(&ip_vs_lblc_scheduler); + if (ret) + unregister_sysctl_table(sysctl_header); + return ret; } diff --git a/net/ipv4/ipvs/ip_vs_lblcr.c b/net/ipv4/ipvs/ip_vs_lblcr.c index 427b593c1069..2a5ed85a3352 100644 --- a/net/ipv4/ipvs/ip_vs_lblcr.c +++ b/net/ipv4/ipvs/ip_vs_lblcr.c @@ -302,7 +302,6 @@ struct ip_vs_lblcr_table { static ctl_table vs_vars_table[] = { { - .ctl_name = NET_IPV4_VS_LBLCR_EXPIRE, .procname = "lblcr_expiration", .data = &sysctl_ip_vs_lblcr_expiration, .maxlen = sizeof(int), @@ -314,7 +313,6 @@ static ctl_table vs_vars_table[] = { static ctl_table vs_table[] = { { - .ctl_name = NET_IPV4_VS, .procname = "vs", .mode = 0555, .child = vs_vars_table @@ -771,9 +769,14 @@ static struct ip_vs_scheduler ip_vs_lblcr_scheduler = static int __init ip_vs_lblcr_init(void) { + int ret; + INIT_LIST_HEAD(&ip_vs_lblcr_scheduler.n_list); sysctl_header = register_sysctl_table(lblcr_root_table); - return register_ip_vs_scheduler(&ip_vs_lblcr_scheduler); + ret = register_ip_vs_scheduler(&ip_vs_lblcr_scheduler); + if (ret) + unregister_sysctl_table(sysctl_header); + return ret; } diff --git a/net/ipv4/ipvs/ip_vs_proto.c b/net/ipv4/ipvs/ip_vs_proto.c index e844ddb82b9a..c0e11ec8f0f9 100644 --- a/net/ipv4/ipvs/ip_vs_proto.c +++ b/net/ipv4/ipvs/ip_vs_proto.c @@ -45,7 +45,7 @@ static struct ip_vs_protocol *ip_vs_proto_table[IP_VS_PROTO_TAB_SIZE]; /* * register an ipvs protocol */ -static int register_ip_vs_protocol(struct ip_vs_protocol *pp) +static int __used register_ip_vs_protocol(struct ip_vs_protocol *pp) { unsigned hash = IP_VS_PROTO_HASH(pp->protocol); diff --git a/net/ipv4/ipvs/ip_vs_sched.c b/net/ipv4/ipvs/ip_vs_sched.c index 1602304abbf9..432235861908 100644 --- a/net/ipv4/ipvs/ip_vs_sched.c +++ b/net/ipv4/ipvs/ip_vs_sched.c @@ -183,19 +183,6 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler) /* increase the module use count */ ip_vs_use_count_inc(); - /* - * Make sure that the scheduler with this name doesn't exist - * in the scheduler list. - */ - sched = ip_vs_sched_getbyname(scheduler->name); - if (sched) { - ip_vs_scheduler_put(sched); - ip_vs_use_count_dec(); - IP_VS_ERR("register_ip_vs_scheduler(): [%s] scheduler " - "already existed in the system\n", scheduler->name); - return -EINVAL; - } - write_lock_bh(&__ip_vs_sched_lock); if (scheduler->n_list.next != &scheduler->n_list) { @@ -207,6 +194,20 @@ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler) } /* + * Make sure that the scheduler with this name doesn't exist + * in the scheduler list. + */ + list_for_each_entry(sched, &ip_vs_schedulers, n_list) { + if (strcmp(scheduler->name, sched->name) == 0) { + write_unlock_bh(&__ip_vs_sched_lock); + ip_vs_use_count_dec(); + IP_VS_ERR("register_ip_vs_scheduler(): [%s] scheduler " + "already existed in the system\n", + scheduler->name); + return -EINVAL; + } + } + /* * Add it into the d-linked scheduler list */ list_add(&scheduler->n_list, &ip_vs_schedulers); diff --git a/net/ipv4/netfilter/iptable_raw.c b/net/ipv4/netfilter/iptable_raw.c index 5de6e57ac55c..f8678651250f 100644 --- a/net/ipv4/netfilter/iptable_raw.c +++ b/net/ipv4/netfilter/iptable_raw.c @@ -66,7 +66,7 @@ ipt_local_hook(unsigned int hook, if (skb->len < sizeof(struct iphdr) || ip_hdrlen(skb) < sizeof(struct iphdr)) { if (net_ratelimit()) - printk("iptable_raw: ignoring short SOCK_RAW" + printk("iptable_raw: ignoring short SOCK_RAW " "packet.\n"); return NF_ACCEPT; } diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c index 70e7997ea284..86b465b176ba 100644 --- a/net/ipv4/netfilter/nf_nat_core.c +++ b/net/ipv4/netfilter/nf_nat_core.c @@ -607,13 +607,10 @@ static void nf_nat_move_storage(struct nf_conn *conntrack, void *old) struct nf_conn_nat *new_nat = nf_ct_ext_find(conntrack, NF_CT_EXT_NAT); struct nf_conn_nat *old_nat = (struct nf_conn_nat *)old; struct nf_conn *ct = old_nat->ct; - unsigned int srchash; - if (!(ct->status & IPS_NAT_DONE_MASK)) + if (!ct || !(ct->status & IPS_NAT_DONE_MASK)) return; - srchash = hash_by_src(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); - write_lock_bh(&nf_nat_lock); hlist_replace_rcu(&old_nat->bysource, &new_nat->bysource); new_nat->ct = ct; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 1bff9ed349ff..c426dec6d579 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -2888,18 +2888,14 @@ static int ip_rt_acct_read(char *buffer, char **start, off_t offset, offset /= sizeof(u32); if (length > 0) { - u32 *src = ((u32 *) IP_RT_ACCT_CPU(0)) + offset; u32 *dst = (u32 *) buffer; - /* Copy first cpu. */ *start = buffer; - memcpy(dst, src, length); + memset(dst, 0, length); - /* Add the other cpus in, one int at a time */ for_each_possible_cpu(i) { unsigned int j; - - src = ((u32 *) IP_RT_ACCT_CPU(i)) + offset; + u32 *src = ((u32 *) IP_RT_ACCT_CPU(i)) + offset; for (j = 0; j < length/4; j++) dst[j] += src[j]; diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index ffddd2b45352..bec6fe880657 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -191,7 +191,7 @@ static int sysctl_tcp_congestion_control(ctl_table *table, int __user *name, tcp_get_default_congestion_control(val); ret = sysctl_string(&tbl, name, nlen, oldval, oldlenp, newval, newlen); - if (ret == 0 && newval && newlen) + if (ret == 1 && newval && newlen) ret = tcp_set_default_congestion_control(val); return ret; } diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c index 64f1cbaf96e8..5aa5f5496d6d 100644 --- a/net/ipv4/tcp_illinois.c +++ b/net/ipv4/tcp_illinois.c @@ -298,7 +298,7 @@ static u32 tcp_illinois_ssthresh(struct sock *sk) struct illinois *ca = inet_csk_ca(sk); /* Multiplicative decrease */ - return max((tp->snd_cwnd * ca->beta) >> BETA_SHIFT, 2U); + return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->beta) >> BETA_SHIFT), 2U); } diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 0f0c1c9829a1..b9e429d2d1de 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3003,17 +3003,13 @@ static int tcp_process_frto(struct sock *sk, int flag) } if (tp->frto_counter == 1) { - /* Sending of the next skb must be allowed or no F-RTO */ - if (!tcp_send_head(sk) || - after(TCP_SKB_CB(tcp_send_head(sk))->end_seq, - tp->snd_una + tp->snd_wnd)) { - tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3), - flag); - return 1; - } - + /* tcp_may_send_now needs to see updated state */ tp->snd_cwnd = tcp_packets_in_flight(tp) + 2; tp->frto_counter = 2; + + if (!tcp_may_send_now(sk)) + tcp_enter_frto_loss(sk, 2, flag); + return 1; } else { switch (sysctl_tcp_frto_response) { @@ -3069,6 +3065,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) } prior_fackets = tp->fackets_out; + prior_in_flight = tcp_packets_in_flight(tp); if (!(flag&FLAG_SLOWPATH) && after(ack, prior_snd_una)) { /* Window is constant, pure forward advance. @@ -3108,8 +3105,6 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) if (!prior_packets) goto no_queue; - prior_in_flight = tcp_packets_in_flight(tp); - /* See if we can take anything off of the retransmit queue. */ flag |= tcp_clean_rtx_queue(sk, &seq_rtt, prior_fackets); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index e566f3c67677..652c32368ccc 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -900,8 +900,7 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, sizeof(*keys) * md5sig->entries4); /* Free old key list, and reference new one */ - if (md5sig->keys4) - kfree(md5sig->keys4); + kfree(md5sig->keys4); md5sig->keys4 = keys; md5sig->alloced4++; } @@ -939,10 +938,10 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr) tp->md5sig_info->alloced4 = 0; } else if (tp->md5sig_info->entries4 != i) { /* Need to do some manipulation */ - memcpy(&tp->md5sig_info->keys4[i], - &tp->md5sig_info->keys4[i+1], - (tp->md5sig_info->entries4 - i) * - sizeof(struct tcp4_md5sig_key)); + memmove(&tp->md5sig_info->keys4[i], + &tp->md5sig_info->keys4[i+1], + (tp->md5sig_info->entries4 - i) * + sizeof(struct tcp4_md5sig_key)); } tcp_free_md5sig_pool(); return 0; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 324b4207254a..f4c1eef89af0 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1162,8 +1162,7 @@ int tcp_may_send_now(struct sock *sk) return (skb && tcp_snd_test(sk, skb, tcp_current_mss(sk, 1), (tcp_skb_is_last(sk, skb) ? - TCP_NAGLE_PUSH : - tp->nonagle))); + tp->nonagle : TCP_NAGLE_PUSH))); } /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet @@ -1295,6 +1294,7 @@ static int tcp_mtu_probe(struct sock *sk) struct sk_buff *skb, *nskb, *next; int len; int probe_size; + int size_needed; unsigned int pif; int copy; int mss_now; @@ -1313,27 +1313,20 @@ static int tcp_mtu_probe(struct sock *sk) /* Very simple search strategy: just double the MSS. */ mss_now = tcp_current_mss(sk, 0); probe_size = 2*tp->mss_cache; + size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) { /* TODO: set timer for probe_converge_event */ return -1; } /* Have enough data in the send queue to probe? */ - len = 0; - if ((skb = tcp_send_head(sk)) == NULL) - return -1; - while ((len += skb->len) < probe_size && !tcp_skb_is_last(sk, skb)) - skb = tcp_write_queue_next(sk, skb); - if (len < probe_size) + if (tp->write_seq - tp->snd_nxt < size_needed) return -1; - /* Receive window check. */ - if (after(TCP_SKB_CB(skb)->seq + probe_size, tp->snd_una + tp->snd_wnd)) { - if (tp->snd_wnd < probe_size) - return -1; - else - return 0; - } + if (tp->snd_wnd < size_needed) + return -1; + if (after(tp->snd_nxt + size_needed, tp->snd_una + tp->snd_wnd)) + return 0; /* Do we need to wait to drain cwnd? */ pif = tcp_packets_in_flight(tp); @@ -1352,7 +1345,6 @@ static int tcp_mtu_probe(struct sock *sk) skb = tcp_send_head(sk); tcp_insert_write_queue_before(nskb, skb, sk); - tcp_advance_send_head(sk, skb); TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 1bd8d818f8e9..e8c347579da9 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -967,7 +967,7 @@ int ipv6_dev_get_saddr(struct net_device *daddr_dev, if (unlikely(score.addr_type == IPV6_ADDR_ANY || score.addr_type & IPV6_ADDR_MULTICAST)) { LIMIT_NETDEBUG(KERN_DEBUG - "ADDRCONF: unspecified / multicast address" + "ADDRCONF: unspecified / multicast address " "assigned as unicast address on %s", dev->name); continue; @@ -2293,6 +2293,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, break; } + if (!idev && dev->mtu >= IPV6_MIN_MTU) + idev = ipv6_add_dev(dev); + if (idev) idev->if_flags |= IF_READY; } else { @@ -2357,12 +2360,18 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event, break; case NETDEV_CHANGEMTU: - if ( idev && dev->mtu >= IPV6_MIN_MTU) { + if (idev && dev->mtu >= IPV6_MIN_MTU) { rt6_mtu_change(dev, dev->mtu); idev->cnf.mtu6 = dev->mtu; break; } + if (!idev && dev->mtu >= IPV6_MIN_MTU) { + idev = ipv6_add_dev(dev); + if (idev) + break; + } + /* MTU falled under IPV6_MIN_MTU. Stop IPv6 on this interface. */ case NETDEV_DOWN: diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 3aad861975a0..93980c3b83e6 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -581,7 +581,10 @@ static int tcp_v6_md5_do_add(struct sock *sk, struct in6_addr *peer, } sk->sk_route_caps &= ~NETIF_F_GSO_MASK; } - tcp_alloc_md5sig_pool(); + if (tcp_alloc_md5sig_pool() == NULL) { + kfree(newkey); + return -ENOMEM; + } if (tp->md5sig_info->alloced6 == tp->md5sig_info->entries6) { keys = kmalloc((sizeof (tp->md5sig_info->keys6[0]) * (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC); @@ -634,10 +637,6 @@ static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer) kfree(tp->md5sig_info->keys6); tp->md5sig_info->keys6 = NULL; tp->md5sig_info->alloced6 = 0; - - tcp_free_md5sig_pool(); - - return 0; } else { /* shrink the database */ if (tp->md5sig_info->entries6 != i) @@ -646,6 +645,8 @@ static int tcp_v6_md5_do_del(struct sock *sk, struct in6_addr *peer) (tp->md5sig_info->entries6 - i) * sizeof (tp->md5sig_info->keys6[0])); } + tcp_free_md5sig_pool(); + return 0; } } return -ENOENT; diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c index 1120b150e211..be627e1f04d8 100644 --- a/net/irda/ircomm/ircomm_tty.c +++ b/net/irda/ircomm/ircomm_tty.c @@ -1245,6 +1245,7 @@ static void ircomm_tty_flow_indication(void *instance, void *sap, self->flow = cmd; } +#ifdef CONFIG_PROC_FS static int ircomm_tty_line_info(struct ircomm_tty_cb *self, char *buf) { int ret=0; @@ -1354,7 +1355,6 @@ static int ircomm_tty_line_info(struct ircomm_tty_cb *self, char *buf) * * */ -#ifdef CONFIG_PROC_FS static int ircomm_tty_read_proc(char *buf, char **start, off_t offset, int len, int *eof, void *unused) { diff --git a/net/irda/iriap.c b/net/irda/iriap.c index dc5e34a01620..a86a5d83786b 100644 --- a/net/irda/iriap.c +++ b/net/irda/iriap.c @@ -928,7 +928,7 @@ void iriap_call_indication(struct iriap_cb *self, struct sk_buff *skb) opcode = fp[0]; if (~opcode & 0x80) { - IRDA_WARNING("%s: IrIAS multiframe commands or results" + IRDA_WARNING("%s: IrIAS multiframe commands or results " "is not implemented yet!\n", __FUNCTION__); return; } diff --git a/net/irda/irlan/irlan_eth.c b/net/irda/irlan/irlan_eth.c index 7f9c8542e5fc..c68220773d28 100644 --- a/net/irda/irlan/irlan_eth.c +++ b/net/irda/irlan/irlan_eth.c @@ -296,6 +296,7 @@ void irlan_eth_flow_indication(void *instance, void *sap, LOCAL_FLOW flow) */ void irlan_eth_send_gratuitous_arp(struct net_device *dev) { +#ifdef CONFIG_INET struct in_device *in_dev; /* @@ -303,7 +304,6 @@ void irlan_eth_send_gratuitous_arp(struct net_device *dev) * is useful if we have changed access points on the same * subnet. */ -#ifdef CONFIG_INET IRDA_DEBUG(4, "IrLAN: Sending gratuitous ARP\n"); rcu_read_lock(); in_dev = __in_dev_get_rcu(dev); diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index a2f5a6ea3895..7698f6c459d6 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c @@ -97,7 +97,7 @@ struct iucv_irq_list { struct iucv_irq_data data; }; -static struct iucv_irq_data *iucv_irq_data; +static struct iucv_irq_data *iucv_irq_data[NR_CPUS]; static cpumask_t iucv_buffer_cpumask = CPU_MASK_NONE; static cpumask_t iucv_irq_cpumask = CPU_MASK_NONE; @@ -277,7 +277,7 @@ union iucv_param { /* * Anchor for per-cpu IUCV command parameter block. */ -static union iucv_param *iucv_param; +static union iucv_param *iucv_param[NR_CPUS]; /** * iucv_call_b2f0 @@ -356,7 +356,7 @@ static void iucv_allow_cpu(void *data) * 0x10 - Flag to allow priority message completion interrupts * 0x08 - Flag to allow IUCV control interrupts */ - parm = percpu_ptr(iucv_param, smp_processor_id()); + parm = iucv_param[cpu]; memset(parm, 0, sizeof(union iucv_param)); parm->set_mask.ipmask = 0xf8; iucv_call_b2f0(IUCV_SETMASK, parm); @@ -377,7 +377,7 @@ static void iucv_block_cpu(void *data) union iucv_param *parm; /* Disable all iucv interrupts. */ - parm = percpu_ptr(iucv_param, smp_processor_id()); + parm = iucv_param[cpu]; memset(parm, 0, sizeof(union iucv_param)); iucv_call_b2f0(IUCV_SETMASK, parm); @@ -401,9 +401,9 @@ static void iucv_declare_cpu(void *data) return; /* Declare interrupt buffer. */ - parm = percpu_ptr(iucv_param, cpu); + parm = iucv_param[cpu]; memset(parm, 0, sizeof(union iucv_param)); - parm->db.ipbfadr1 = virt_to_phys(percpu_ptr(iucv_irq_data, cpu)); + parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]); rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm); if (rc) { char *err = "Unknown"; @@ -458,7 +458,7 @@ static void iucv_retrieve_cpu(void *data) iucv_block_cpu(NULL); /* Retrieve interrupt buffer. */ - parm = percpu_ptr(iucv_param, cpu); + parm = iucv_param[cpu]; iucv_call_b2f0(IUCV_RETRIEVE_BUFFER, parm); /* Clear indication that an iucv buffer exists for this cpu. */ @@ -558,22 +558,23 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self, switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: - if (!percpu_populate(iucv_irq_data, - sizeof(struct iucv_irq_data), - GFP_KERNEL|GFP_DMA, cpu)) + iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data), + GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); + if (!iucv_irq_data[cpu]) return NOTIFY_BAD; - if (!percpu_populate(iucv_param, sizeof(union iucv_param), - GFP_KERNEL|GFP_DMA, cpu)) { - percpu_depopulate(iucv_irq_data, cpu); + iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), + GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); + if (!iucv_param[cpu]) return NOTIFY_BAD; - } break; case CPU_UP_CANCELED: case CPU_UP_CANCELED_FROZEN: case CPU_DEAD: case CPU_DEAD_FROZEN: - percpu_depopulate(iucv_param, cpu); - percpu_depopulate(iucv_irq_data, cpu); + kfree(iucv_param[cpu]); + iucv_param[cpu] = NULL; + kfree(iucv_irq_data[cpu]); + iucv_irq_data[cpu] = NULL; break; case CPU_ONLINE: case CPU_ONLINE_FROZEN: @@ -612,7 +613,7 @@ static int iucv_sever_pathid(u16 pathid, u8 userdata[16]) { union iucv_param *parm; - parm = percpu_ptr(iucv_param, smp_processor_id()); + parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); if (userdata) memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); @@ -755,7 +756,7 @@ int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler, local_bh_disable(); /* Prepare parameter block. */ - parm = percpu_ptr(iucv_param, smp_processor_id()); + parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); parm->ctrl.ippathid = path->pathid; parm->ctrl.ipmsglim = path->msglim; @@ -799,7 +800,7 @@ int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler, BUG_ON(in_atomic()); spin_lock_bh(&iucv_table_lock); iucv_cleanup_queue(); - parm = percpu_ptr(iucv_param, smp_processor_id()); + parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); parm->ctrl.ipmsglim = path->msglim; parm->ctrl.ipflags1 = path->flags; @@ -854,7 +855,7 @@ int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16]) int rc; local_bh_disable(); - parm = percpu_ptr(iucv_param, smp_processor_id()); + parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); if (userdata) memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); @@ -881,7 +882,7 @@ int iucv_path_resume(struct iucv_path *path, u8 userdata[16]) int rc; local_bh_disable(); - parm = percpu_ptr(iucv_param, smp_processor_id()); + parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); if (userdata) memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); @@ -936,7 +937,7 @@ int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg, int rc; local_bh_disable(); - parm = percpu_ptr(iucv_param, smp_processor_id()); + parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); parm->purge.ippathid = path->pathid; parm->purge.ipmsgid = msg->id; @@ -1003,7 +1004,7 @@ int iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, } local_bh_disable(); - parm = percpu_ptr(iucv_param, smp_processor_id()); + parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); parm->db.ipbfadr1 = (u32)(addr_t) buffer; parm->db.ipbfln1f = (u32) size; @@ -1040,7 +1041,7 @@ int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg) int rc; local_bh_disable(); - parm = percpu_ptr(iucv_param, smp_processor_id()); + parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); parm->db.ippathid = path->pathid; parm->db.ipmsgid = msg->id; @@ -1074,7 +1075,7 @@ int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg, int rc; local_bh_disable(); - parm = percpu_ptr(iucv_param, smp_processor_id()); + parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); if (flags & IUCV_IPRMDATA) { parm->dpl.ippathid = path->pathid; @@ -1118,7 +1119,7 @@ int iucv_message_send(struct iucv_path *path, struct iucv_message *msg, int rc; local_bh_disable(); - parm = percpu_ptr(iucv_param, smp_processor_id()); + parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); if (flags & IUCV_IPRMDATA) { /* Message of 8 bytes can be placed into the parameter list. */ @@ -1172,7 +1173,7 @@ int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg, int rc; local_bh_disable(); - parm = percpu_ptr(iucv_param, smp_processor_id()); + parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); if (flags & IUCV_IPRMDATA) { parm->dpl.ippathid = path->pathid; @@ -1559,7 +1560,7 @@ static void iucv_external_interrupt(u16 code) struct iucv_irq_data *p; struct iucv_irq_list *work; - p = percpu_ptr(iucv_irq_data, smp_processor_id()); + p = iucv_irq_data[smp_processor_id()]; if (p->ippathid >= iucv_max_pathid) { printk(KERN_WARNING "iucv_do_int: Got interrupt with " "pathid %d > max_connections (%ld)\n", @@ -1598,6 +1599,7 @@ static void iucv_external_interrupt(u16 code) static int __init iucv_init(void) { int rc; + int cpu; if (!MACHINE_IS_VM) { rc = -EPROTONOSUPPORT; @@ -1617,19 +1619,23 @@ static int __init iucv_init(void) rc = PTR_ERR(iucv_root); goto out_bus; } - /* Note: GFP_DMA used to get memory below 2G */ - iucv_irq_data = percpu_alloc(sizeof(struct iucv_irq_data), - GFP_KERNEL|GFP_DMA); - if (!iucv_irq_data) { - rc = -ENOMEM; - goto out_root; - } - /* Allocate parameter blocks. */ - iucv_param = percpu_alloc(sizeof(union iucv_param), - GFP_KERNEL|GFP_DMA); - if (!iucv_param) { - rc = -ENOMEM; - goto out_extint; + + for_each_online_cpu(cpu) { + /* Note: GFP_DMA used to get memory below 2G */ + iucv_irq_data[cpu] = kmalloc_node(sizeof(struct iucv_irq_data), + GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); + if (!iucv_irq_data[cpu]) { + rc = -ENOMEM; + goto out_free; + } + + /* Allocate parameter blocks. */ + iucv_param[cpu] = kmalloc_node(sizeof(union iucv_param), + GFP_KERNEL|GFP_DMA, cpu_to_node(cpu)); + if (!iucv_param[cpu]) { + rc = -ENOMEM; + goto out_free; + } } register_hotcpu_notifier(&iucv_cpu_notifier); ASCEBC(iucv_error_no_listener, 16); @@ -1638,9 +1644,13 @@ static int __init iucv_init(void) iucv_available = 1; return 0; -out_extint: - percpu_free(iucv_irq_data); -out_root: +out_free: + for_each_possible_cpu(cpu) { + kfree(iucv_param[cpu]); + iucv_param[cpu] = NULL; + kfree(iucv_irq_data[cpu]); + iucv_irq_data[cpu] = NULL; + } s390_root_dev_unregister(iucv_root); out_bus: bus_unregister(&iucv_bus); @@ -1658,6 +1668,7 @@ out: static void __exit iucv_exit(void) { struct iucv_irq_list *p, *n; + int cpu; spin_lock_irq(&iucv_queue_lock); list_for_each_entry_safe(p, n, &iucv_task_queue, list) @@ -1666,8 +1677,12 @@ static void __exit iucv_exit(void) kfree(p); spin_unlock_irq(&iucv_queue_lock); unregister_hotcpu_notifier(&iucv_cpu_notifier); - percpu_free(iucv_param); - percpu_free(iucv_irq_data); + for_each_possible_cpu(cpu) { + kfree(iucv_param[cpu]); + iucv_param[cpu] = NULL; + kfree(iucv_irq_data[cpu]); + iucv_irq_data[cpu] = NULL; + } s390_root_dev_unregister(iucv_root); bus_unregister(&iucv_bus); unregister_external_interrupt(0x4000, iucv_external_interrupt); diff --git a/net/key/af_key.c b/net/key/af_key.c index 10c89d47f685..878039b9557d 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -1015,9 +1015,7 @@ static inline struct sk_buff *pfkey_xfrm_state2msg(struct xfrm_state *x) { struct sk_buff *skb; - spin_lock_bh(&x->lock); skb = __pfkey_xfrm_state2msg(x, 1, 3); - spin_unlock_bh(&x->lock); return skb; } @@ -1552,7 +1550,7 @@ static int pfkey_get(struct sock *sk, struct sk_buff *skb, struct sadb_msg *hdr, out_hdr = (struct sadb_msg *) out_skb->data; out_hdr->sadb_msg_version = hdr->sadb_msg_version; - out_hdr->sadb_msg_type = SADB_DUMP; + out_hdr->sadb_msg_type = SADB_GET; out_hdr->sadb_msg_satype = pfkey_proto2satype(proto); out_hdr->sadb_msg_errno = 0; out_hdr->sadb_msg_reserved = 0; diff --git a/net/mac80211/ieee80211.c b/net/mac80211/ieee80211.c index e0ee65a969bc..505af1f067ab 100644 --- a/net/mac80211/ieee80211.c +++ b/net/mac80211/ieee80211.c @@ -216,6 +216,7 @@ static int ieee80211_open(struct net_device *dev) res = local->ops->start(local_to_hw(local)); if (res) return res; + ieee80211_hw_config(local); } switch (sdata->type) { @@ -232,7 +233,6 @@ static int ieee80211_open(struct net_device *dev) netif_tx_unlock_bh(local->mdev); local->hw.conf.flags |= IEEE80211_CONF_RADIOTAP; - ieee80211_hw_config(local); } break; case IEEE80211_IF_TYPE_STA: @@ -267,6 +267,17 @@ static int ieee80211_open(struct net_device *dev) tasklet_enable(&local->tasklet); } + /* + * set_multicast_list will be invoked by the networking core + * which will check whether any increments here were done in + * error and sync them down to the hardware as filter flags. + */ + if (sdata->flags & IEEE80211_SDATA_ALLMULTI) + atomic_inc(&local->iff_allmultis); + + if (sdata->flags & IEEE80211_SDATA_PROMISC) + atomic_inc(&local->iff_promiscs); + local->open_count++; netif_start_queue(dev); @@ -284,6 +295,18 @@ static int ieee80211_stop(struct net_device *dev) netif_stop_queue(dev); + /* + * Don't count this interface for promisc/allmulti while it + * is down. dev_mc_unsync() will invoke set_multicast_list + * on the master interface which will sync these down to the + * hardware as filter flags. + */ + if (sdata->flags & IEEE80211_SDATA_ALLMULTI) + atomic_dec(&local->iff_allmultis); + + if (sdata->flags & IEEE80211_SDATA_PROMISC) + atomic_dec(&local->iff_promiscs); + dev_mc_unsync(local->mdev, dev); /* down all dependent devices, that is VLANs */ @@ -311,8 +334,7 @@ static int ieee80211_stop(struct net_device *dev) ieee80211_configure_filter(local); netif_tx_unlock_bh(local->mdev); - local->hw.conf.flags |= IEEE80211_CONF_RADIOTAP; - ieee80211_hw_config(local); + local->hw.conf.flags &= ~IEEE80211_CONF_RADIOTAP; } break; case IEEE80211_IF_TYPE_STA: @@ -334,6 +356,11 @@ static int ieee80211_stop(struct net_device *dev) cancel_delayed_work(&local->scan_work); } flush_workqueue(local->hw.workqueue); + + sdata->u.sta.flags &= ~IEEE80211_STA_PRIVACY_INVOKED; + kfree(sdata->u.sta.extra_ie); + sdata->u.sta.extra_ie = NULL; + sdata->u.sta.extra_ie_len = 0; /* fall through */ default: conf.if_id = dev->ifindex; @@ -366,8 +393,8 @@ static void ieee80211_set_multicast_list(struct net_device *dev) allmulti = !!(dev->flags & IFF_ALLMULTI); promisc = !!(dev->flags & IFF_PROMISC); - sdata_allmulti = sdata->flags & IEEE80211_SDATA_ALLMULTI; - sdata_promisc = sdata->flags & IEEE80211_SDATA_PROMISC; + sdata_allmulti = !!(sdata->flags & IEEE80211_SDATA_ALLMULTI); + sdata_promisc = !!(sdata->flags & IEEE80211_SDATA_PROMISC); if (allmulti != sdata_allmulti) { if (dev->flags & IFF_ALLMULTI) diff --git a/net/mac80211/ieee80211_sta.c b/net/mac80211/ieee80211_sta.c index 015b3f879aa9..16afd24d4f6b 100644 --- a/net/mac80211/ieee80211_sta.c +++ b/net/mac80211/ieee80211_sta.c @@ -2647,7 +2647,7 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw) local->sta_scanning = 0; if (ieee80211_hw_config(local)) - printk(KERN_DEBUG "%s: failed to restore operational" + printk(KERN_DEBUG "%s: failed to restore operational " "channel after scan\n", dev->name); diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 428a9fcf57d6..00f908d9275e 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -997,7 +997,7 @@ ieee80211_rx_h_drop_unencrypted(struct ieee80211_txrx_data *rx) if (unlikely(!(rx->fc & IEEE80211_FCTL_PROTECTED) && (rx->fc & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA && (rx->fc & IEEE80211_FCTL_STYPE) != IEEE80211_STYPE_NULLFUNC && - rx->sdata->drop_unencrypted && + (rx->key || rx->sdata->drop_unencrypted) && (rx->sdata->eapol == 0 || !ieee80211_is_eapol(rx->skb)))) { if (net_ratelimit()) printk(KERN_DEBUG "%s: RX non-WEP frame, but expected " diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c index 9bf0e1cc530a..b5f3413403bd 100644 --- a/net/mac80211/wep.c +++ b/net/mac80211/wep.c @@ -265,7 +265,8 @@ int ieee80211_wep_decrypt(struct ieee80211_local *local, struct sk_buff *skb, if (ieee80211_wep_decrypt_data(local->wep_rx_tfm, rc4key, klen, skb->data + hdrlen + WEP_IV_LEN, len)) { - printk(KERN_DEBUG "WEP decrypt failed (ICV)\n"); + if (net_ratelimit()) + printk(KERN_DEBUG "WEP decrypt failed (ICV)\n"); ret = -1; } diff --git a/net/netfilter/xt_CONNMARK.c b/net/netfilter/xt_CONNMARK.c index 856793e8db7a..0621ca7de3b0 100644 --- a/net/netfilter/xt_CONNMARK.c +++ b/net/netfilter/xt_CONNMARK.c @@ -86,11 +86,6 @@ checkentry(const char *tablename, { const struct xt_connmark_target_info *matchinfo = targinfo; - if (nf_ct_l3proto_try_module_get(target->family) < 0) { - printk(KERN_WARNING "can't load conntrack support for " - "proto=%d\n", target->family); - return false; - } if (matchinfo->mode == XT_CONNMARK_RESTORE) { if (strcmp(tablename, "mangle") != 0) { printk(KERN_WARNING "CONNMARK: restore can only be " @@ -103,6 +98,11 @@ checkentry(const char *tablename, printk(KERN_WARNING "CONNMARK: Only supports 32bit mark\n"); return false; } + if (nf_ct_l3proto_try_module_get(target->family) < 0) { + printk(KERN_WARNING "can't load conntrack support for " + "proto=%d\n", target->family); + return false; + } return true; } diff --git a/net/netfilter/xt_CONNSECMARK.c b/net/netfilter/xt_CONNSECMARK.c index 021b5c8d20e2..d8feba9bdb48 100644 --- a/net/netfilter/xt_CONNSECMARK.c +++ b/net/netfilter/xt_CONNSECMARK.c @@ -90,11 +90,6 @@ static bool checkentry(const char *tablename, const void *entry, { const struct xt_connsecmark_target_info *info = targinfo; - if (nf_ct_l3proto_try_module_get(target->family) < 0) { - printk(KERN_WARNING "can't load conntrack support for " - "proto=%d\n", target->family); - return false; - } switch (info->mode) { case CONNSECMARK_SAVE: case CONNSECMARK_RESTORE: @@ -105,6 +100,11 @@ static bool checkentry(const char *tablename, const void *entry, return false; } + if (nf_ct_l3proto_try_module_get(target->family) < 0) { + printk(KERN_WARNING "can't load conntrack support for " + "proto=%d\n", target->family); + return false; + } return true; } diff --git a/net/netfilter/xt_TCPMSS.c b/net/netfilter/xt_TCPMSS.c index 07435a602b11..8e76d1f52fbe 100644 --- a/net/netfilter/xt_TCPMSS.c +++ b/net/netfilter/xt_TCPMSS.c @@ -174,10 +174,8 @@ xt_tcpmss_target6(struct sk_buff *skb, nexthdr = ipv6h->nexthdr; tcphoff = ipv6_skip_exthdr(skb, sizeof(*ipv6h), &nexthdr); - if (tcphoff < 0) { - WARN_ON(1); + if (tcphoff < 0) return NF_DROP; - } ret = tcpmss_mangle_packet(skb, targinfo, tcphoff, sizeof(*ipv6h) + sizeof(struct tcphdr)); if (ret < 0) diff --git a/net/rfkill/rfkill.c b/net/rfkill/rfkill.c index 73d60a307129..4469a7be006c 100644 --- a/net/rfkill/rfkill.c +++ b/net/rfkill/rfkill.c @@ -60,11 +60,7 @@ static void rfkill_led_trigger(struct rfkill *rfkill, static int rfkill_toggle_radio(struct rfkill *rfkill, enum rfkill_state state) { - int retval; - - retval = mutex_lock_interruptible(&rfkill->mutex); - if (retval) - return retval; + int retval = 0; if (state != rfkill->state) { retval = rfkill->toggle_radio(rfkill->data, state); @@ -74,7 +70,6 @@ static int rfkill_toggle_radio(struct rfkill *rfkill, } } - mutex_unlock(&rfkill->mutex); return retval; } @@ -158,12 +153,13 @@ static ssize_t rfkill_state_store(struct device *dev, if (!capable(CAP_NET_ADMIN)) return -EPERM; + if (mutex_lock_interruptible(&rfkill->mutex)) + return -ERESTARTSYS; error = rfkill_toggle_radio(rfkill, state ? RFKILL_STATE_ON : RFKILL_STATE_OFF); - if (error) - return error; + mutex_unlock(&rfkill->mutex); - return count; + return error ? error : count; } static ssize_t rfkill_claim_show(struct device *dev, diff --git a/net/rose/rose_dev.c b/net/rose/rose_dev.c index 1b6741f1d746..12cfcf09556b 100644 --- a/net/rose/rose_dev.c +++ b/net/rose/rose_dev.c @@ -55,13 +55,13 @@ static int rose_header(struct sk_buff *skb, struct net_device *dev, static int rose_rebuild_header(struct sk_buff *skb) { +#ifdef CONFIG_INET struct net_device *dev = skb->dev; struct net_device_stats *stats = netdev_priv(dev); unsigned char *bp = (unsigned char *)skb->data; struct sk_buff *skbn; unsigned int len; -#ifdef CONFIG_INET if (arp_find(bp + 7, skb)) { return 1; } diff --git a/net/rxrpc/Kconfig b/net/rxrpc/Kconfig index e662f1d07664..0d3103c4f11c 100644 --- a/net/rxrpc/Kconfig +++ b/net/rxrpc/Kconfig @@ -5,6 +5,7 @@ config AF_RXRPC tristate "RxRPC session sockets" depends on INET && EXPERIMENTAL + select CRYPTO select KEYS help Say Y or M here to include support for RxRPC session sockets (just diff --git a/net/sctp/Kconfig b/net/sctp/Kconfig index 8210f549c492..5390bc792159 100644 --- a/net/sctp/Kconfig +++ b/net/sctp/Kconfig @@ -6,9 +6,9 @@ menuconfig IP_SCTP tristate "The SCTP Protocol (EXPERIMENTAL)" depends on INET && EXPERIMENTAL depends on IPV6 || IPV6=n - select CRYPTO if SCTP_HMAC_SHA1 || SCTP_HMAC_MD5 - select CRYPTO_HMAC if SCTP_HMAC_SHA1 || SCTP_HMAC_MD5 - select CRYPTO_SHA1 if SCTP_HMAC_SHA1 + select CRYPTO + select CRYPTO_HMAC + select CRYPTO_SHA1 select CRYPTO_MD5 if SCTP_HMAC_MD5 ---help--- Stream Control Transmission Protocol diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 6d5fa6bb371b..97e6ebd14500 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c @@ -54,11 +54,13 @@ static struct sctp_hmac sctp_hmac_list[SCTP_AUTH_NUM_HMACS] = { /* id 2 is reserved as well */ .hmac_id = SCTP_AUTH_HMAC_ID_RESERVED_2, }, +#if defined (CONFIG_CRYPTO_SHA256) || defined (CONFIG_CRYPTO_SHA256_MODULE) { .hmac_id = SCTP_AUTH_HMAC_ID_SHA256, .hmac_name="hmac(sha256)", .hmac_len = SCTP_SHA256_SIG_SIZE, } +#endif }; @@ -631,7 +633,7 @@ static int __sctp_auth_cid(sctp_cid_t chunk, struct sctp_chunks_param *param) int found = 0; int i; - if (!param) + if (!param || param->param_hdr.length == 0) return 0; len = ntohs(param->param_hdr.length) - sizeof(sctp_paramhdr_t); diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 5a9783c38de1..f4876291bb5e 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -77,6 +77,8 @@ static int sctp_process_param(struct sctp_association *asoc, union sctp_params param, const union sctp_addr *peer_addr, gfp_t gfp); +static void *sctp_addto_param(struct sctp_chunk *chunk, int len, + const void *data); /* What was the inbound interface for this chunk? */ int sctp_chunk_iif(const struct sctp_chunk *chunk) @@ -207,11 +209,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, chunksize = sizeof(init) + addrs_len + SCTP_SAT_LEN(num_types); chunksize += sizeof(ecap_param); - if (sctp_prsctp_enable) { - chunksize += sizeof(prsctp_param); - extensions[num_ext] = SCTP_CID_FWD_TSN; - num_ext += 1; - } + /* ADDIP: Section 4.2.7: * An implementation supporting this extension [ADDIP] MUST list * the ASCONF,the ASCONF-ACK, and the AUTH chunks in its INIT and @@ -243,7 +241,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, if (auth_chunks->length) chunksize += ntohs(auth_chunks->length); else - auth_hmacs = NULL; + auth_chunks = NULL; extensions[num_ext] = SCTP_CID_AUTH; num_ext += 1; @@ -297,7 +295,7 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, htons(sizeof(sctp_supported_ext_param_t) + num_ext); sctp_addto_chunk(retval, sizeof(sctp_supported_ext_param_t), &ext_param); - sctp_addto_chunk(retval, num_ext, extensions); + sctp_addto_param(retval, num_ext, extensions); } if (sctp_prsctp_enable) @@ -371,20 +369,12 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, if (asoc->peer.ecn_capable) chunksize += sizeof(ecap_param); - /* Tell peer that we'll do PR-SCTP only if peer advertised. */ - if (asoc->peer.prsctp_capable) { - chunksize += sizeof(prsctp_param); - extensions[num_ext] = SCTP_CID_FWD_TSN; - num_ext += 1; - } - if (sctp_addip_enable) { extensions[num_ext] = SCTP_CID_ASCONF; extensions[num_ext+1] = SCTP_CID_ASCONF_ACK; num_ext += 2; } - chunksize += sizeof(ext_param) + num_ext; chunksize += sizeof(aiparam); if (asoc->peer.auth_capable) { @@ -407,6 +397,9 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, num_ext += 1; } + if (num_ext) + chunksize += sizeof(sctp_supported_ext_param_t) + num_ext; + /* Now allocate and fill out the chunk. */ retval = sctp_make_chunk(asoc, SCTP_CID_INIT_ACK, 0, chunksize); if (!retval) @@ -428,7 +421,7 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, htons(sizeof(sctp_supported_ext_param_t) + num_ext); sctp_addto_chunk(retval, sizeof(sctp_supported_ext_param_t), &ext_param); - sctp_addto_chunk(retval, num_ext, extensions); + sctp_addto_param(retval, num_ext, extensions); } if (asoc->peer.prsctp_capable) sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param); diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 5ebbe808d801..5fb84778846d 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -959,7 +959,7 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep, { struct sctp_transport *transport = (struct sctp_transport *) arg; - if (asoc->overall_error_count >= asoc->max_retrans) { + if (asoc->overall_error_count > asoc->max_retrans) { sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ETIMEDOUT)); /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ @@ -1146,7 +1146,7 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep, /* Check if the timestamp looks valid. */ if (time_after(hbinfo->sent_at, jiffies) || time_after(jiffies, hbinfo->sent_at + max_interval)) { - SCTP_DEBUG_PRINTK("%s: HEARTBEAT ACK with invalid timestamp" + SCTP_DEBUG_PRINTK("%s: HEARTBEAT ACK with invalid timestamp " "received for transport: %p\n", __FUNCTION__, link); return SCTP_DISPOSITION_DISCARD; diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 53995af9ca4b..a6e57d1c2eb6 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c @@ -540,7 +540,7 @@ gss_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) p = gss_fill_context(p, end, ctx, gss_msg->auth->mech); if (IS_ERR(p)) { err = PTR_ERR(p); - gss_msg->msg.errno = (err == -EACCES) ? -EACCES : -EAGAIN; + gss_msg->msg.errno = (err == -EAGAIN) ? -EAGAIN : -EACCES; goto err_release_msg; } gss_msg->ctx = gss_get_ctx(ctx); @@ -967,7 +967,7 @@ gss_validate(struct rpc_task *task, __be32 *p) if (maj_stat == GSS_S_CONTEXT_EXPIRED) clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); if (maj_stat) { - dprintk("RPC: %5u gss_validate: gss_verify_mic returned" + dprintk("RPC: %5u gss_validate: gss_verify_mic returned " "error 0x%08x\n", task->tk_pid, maj_stat); goto out_bad; } diff --git a/net/sunrpc/auth_gss/gss_krb5_mech.c b/net/sunrpc/auth_gss/gss_krb5_mech.c index 9843eacef11d..60c3dba545d7 100644 --- a/net/sunrpc/auth_gss/gss_krb5_mech.c +++ b/net/sunrpc/auth_gss/gss_krb5_mech.c @@ -147,13 +147,17 @@ gss_import_sec_context_kerberos(const void *p, p = simple_get_bytes(p, end, &tmp, sizeof(tmp)); if (IS_ERR(p)) goto out_err_free_ctx; - if (tmp != SGN_ALG_DES_MAC_MD5) + if (tmp != SGN_ALG_DES_MAC_MD5) { + p = ERR_PTR(-ENOSYS); goto out_err_free_ctx; + } p = simple_get_bytes(p, end, &tmp, sizeof(tmp)); if (IS_ERR(p)) goto out_err_free_ctx; - if (tmp != SEAL_ALG_DES) + if (tmp != SEAL_ALG_DES) { + p = ERR_PTR(-ENOSYS); goto out_err_free_ctx; + } p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime)); if (IS_ERR(p)) goto out_err_free_ctx; diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c index 1c6eda5077c1..dedcbd6108f4 100644 --- a/net/sunrpc/auth_gss/gss_krb5_seal.c +++ b/net/sunrpc/auth_gss/gss_krb5_seal.c @@ -83,6 +83,7 @@ gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, u32 seq_send; dprintk("RPC: gss_krb5_seal\n"); + BUG_ON(ctx == NULL); now = get_seconds(); diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index 18f0a8dcc095..c59f3ca2b41b 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -280,7 +280,7 @@ rpc_pipe_poll(struct file *filp, struct poll_table_struct *wait) mask = POLLOUT | POLLWRNORM; if (rpci->ops == NULL) mask |= POLLERR | POLLHUP; - if (!list_empty(&rpci->pipe)) + if (filp->private_data || !list_empty(&rpci->pipe)) mask |= POLLIN | POLLRDNORM; return mask; } diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 282a9a2ec90c..cd641c8634f0 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -62,7 +62,7 @@ static inline void do_xprt_reserve(struct rpc_task *); static void xprt_connect_status(struct rpc_task *task); static int __xprt_get_cong(struct rpc_xprt *, struct rpc_task *); -static spinlock_t xprt_list_lock = SPIN_LOCK_UNLOCKED; +static DEFINE_SPINLOCK(xprt_list_lock); static LIST_HEAD(xprt_list); /* diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c index 1afeb3eb8e4c..6f2112dd9f78 100644 --- a/net/sunrpc/xprtrdma/transport.c +++ b/net/sunrpc/xprtrdma/transport.c @@ -89,7 +89,7 @@ static struct ctl_table_header *sunrpc_table_header; static ctl_table xr_tunables_table[] = { { - .ctl_name = CTL_SLOTTABLE_RDMA, + .ctl_name = CTL_UNNUMBERED, .procname = "rdma_slot_table_entries", .data = &xprt_rdma_slot_table_entries, .maxlen = sizeof(unsigned int), @@ -100,7 +100,7 @@ static ctl_table xr_tunables_table[] = { .extra2 = &max_slot_table_size }, { - .ctl_name = CTL_RDMA_MAXINLINEREAD, + .ctl_name = CTL_UNNUMBERED, .procname = "rdma_max_inline_read", .data = &xprt_rdma_max_inline_read, .maxlen = sizeof(unsigned int), @@ -109,7 +109,7 @@ static ctl_table xr_tunables_table[] = { .strategy = &sysctl_intvec, }, { - .ctl_name = CTL_RDMA_MAXINLINEWRITE, + .ctl_name = CTL_UNNUMBERED, .procname = "rdma_max_inline_write", .data = &xprt_rdma_max_inline_write, .maxlen = sizeof(unsigned int), @@ -118,7 +118,7 @@ static ctl_table xr_tunables_table[] = { .strategy = &sysctl_intvec, }, { - .ctl_name = CTL_RDMA_WRITEPADDING, + .ctl_name = CTL_UNNUMBERED, .procname = "rdma_inline_write_padding", .data = &xprt_rdma_inline_write_padding, .maxlen = sizeof(unsigned int), @@ -129,7 +129,7 @@ static ctl_table xr_tunables_table[] = { .extra2 = &max_padding, }, { - .ctl_name = CTL_RDMA_MEMREG, + .ctl_name = CTL_UNNUMBERED, .procname = "rdma_memreg_strategy", .data = &xprt_rdma_memreg_strategy, .maxlen = sizeof(unsigned int), diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 02298f529dad..2f630a512ab7 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -1828,7 +1828,7 @@ static struct rpc_xprt *xs_setup_xprt(struct xprt_create *args, * @args: rpc transport creation arguments * */ -struct rpc_xprt *xs_setup_udp(struct xprt_create *args) +static struct rpc_xprt *xs_setup_udp(struct xprt_create *args) { struct sockaddr *addr = args->dstaddr; struct rpc_xprt *xprt; @@ -1894,7 +1894,7 @@ struct rpc_xprt *xs_setup_udp(struct xprt_create *args) * @args: rpc transport creation arguments * */ -struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) +static struct rpc_xprt *xs_setup_tcp(struct xprt_create *args) { struct sockaddr *addr = args->dstaddr; struct rpc_xprt *xprt; diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index e835da8fc091..060bba4567d2 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1637,8 +1637,15 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock, mutex_lock(&u->readlock); skb = skb_recv_datagram(sk, flags, noblock, &err); - if (!skb) + if (!skb) { + unix_state_lock(sk); + /* Signal EOF on disconnected non-blocking SEQPACKET socket. */ + if (sk->sk_type == SOCK_SEQPACKET && err == -EAGAIN && + (sk->sk_shutdown & RCV_SHUTDOWN)) + err = 0; + unix_state_unlock(sk); goto out_unlock; + } wake_up_interruptible_sync(&u->peer_wait); diff --git a/net/wireless/wext.c b/net/wireless/wext.c index 85e5f9dd0d8e..47e80cc2077c 100644 --- a/net/wireless/wext.c +++ b/net/wireless/wext.c @@ -1094,7 +1094,7 @@ int wext_handle_ioctl(struct net *net, struct ifreq *ifr, unsigned int cmd, rtnl_lock(); ret = wireless_process_ioctl(net, ifr, cmd); rtnl_unlock(); - if (IW_IS_GET(cmd) && copy_to_user(arg, ifr, sizeof(struct ifreq))) + if (IW_IS_GET(cmd) && copy_to_user(arg, ifr, sizeof(struct iwreq))) return -EFAULT; return ret; } diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index b702bd8a3893..9a4cf2e45a15 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -1344,6 +1344,7 @@ restart: xfrm_nr += pols[0]->xfrm_nr; switch (policy->action) { + default: case XFRM_POLICY_BLOCK: /* Prohibit the flow */ err = -EPERM; diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 224b44e31a07..cf43c49eab37 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -552,7 +552,7 @@ int __xfrm_state_delete(struct xfrm_state *x) * The xfrm_state_alloc call gives a reference, and that * is what we are dropping here. */ - __xfrm_state_put(x); + xfrm_state_put(x); err = 0; } diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index d41588d101d0..e75dbdcb08a4 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -507,7 +507,6 @@ static int copy_to_user_state_extra(struct xfrm_state *x, struct xfrm_usersa_info *p, struct sk_buff *skb) { - spin_lock_bh(&x->lock); copy_to_user_state(x, p); if (x->coaddr) @@ -515,7 +514,6 @@ static int copy_to_user_state_extra(struct xfrm_state *x, if (x->lastused) NLA_PUT_U64(skb, XFRMA_LASTUSED, x->lastused); - spin_unlock_bh(&x->lock); if (x->aalg) NLA_PUT(skb, XFRMA_ALG_AUTH, alg_len(x->aalg), x->aalg); |
