diff options
Diffstat (limited to 'net')
37 files changed, 157 insertions, 86 deletions
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 2d7b73242958..9a1cb5079a7a 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -5053,7 +5053,7 @@ void br_multicast_uninit_stats(struct net_bridge *br) free_percpu(br->mcast_stats); } -/* noinline for https://bugs.llvm.org/show_bug.cgi?id=45802#c9 */ +/* noinline for https://llvm.org/pr45802#c9 */ static noinline_for_stack void mcast_stats_add_dir(u64 *dst, u64 *src) { dst[BR_MCAST_DIR_RX] += src[BR_MCAST_DIR_RX]; diff --git a/net/core/dev.c b/net/core/dev.c index 0766a245816b..9a67003e49db 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2245,7 +2245,7 @@ void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev) rcu_read_lock(); again: list_for_each_entry_rcu(ptype, ptype_list, list) { - if (ptype->ignore_outgoing) + if (READ_ONCE(ptype->ignore_outgoing)) continue; /* Never send packets back to the socket @@ -6743,6 +6743,8 @@ static int napi_threaded_poll(void *data) void *have; while (!napi_thread_wait(napi)) { + unsigned long last_qs = jiffies; + for (;;) { bool repoll = false; @@ -6767,6 +6769,7 @@ static int napi_threaded_poll(void *data) if (!repoll) break; + rcu_softirq_qs_periodic(last_qs); cond_resched(); } } @@ -11665,11 +11668,12 @@ static void __init net_dev_struct_check(void) /* TXRX read-mostly hotpath */ CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, lstats); + CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, state); CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, flags); CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, hard_header_len); CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, features); CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, ip6_ptr); - CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_txrx, 38); + CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_txrx, 46); /* RX read-mostly hotpath */ CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, ptype_specific); diff --git a/net/core/sock.c b/net/core/sock.c index 43bf3818c19e..0963689a5950 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -482,7 +482,7 @@ int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) unsigned long flags; struct sk_buff_head *list = &sk->sk_receive_queue; - if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) { + if (atomic_read(&sk->sk_rmem_alloc) >= READ_ONCE(sk->sk_rcvbuf)) { atomic_inc(&sk->sk_drops); trace_sock_rcvqueue_full(sk, skb); return -ENOMEM; @@ -552,7 +552,7 @@ int __sk_receive_skb(struct sock *sk, struct sk_buff *skb, skb->dev = NULL; - if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) { + if (sk_rcvqueues_full(sk, READ_ONCE(sk->sk_rcvbuf))) { atomic_inc(&sk->sk_drops); goto discard_and_relse; } diff --git a/net/dccp/ccids/Kconfig b/net/dccp/ccids/Kconfig index a3eeb84d16f9..e3d388c33d25 100644 --- a/net/dccp/ccids/Kconfig +++ b/net/dccp/ccids/Kconfig @@ -13,7 +13,7 @@ config IP_DCCP_CCID2_DEBUG config IP_DCCP_CCID3 bool "CCID-3 (TCP-Friendly)" - def_bool y if (IP_DCCP = y || IP_DCCP = m) + default IP_DCCP = y || IP_DCCP = m help CCID-3 denotes TCP-Friendly Rate Control (TFRC), an equation-based rate-controlled congestion control mechanism. TFRC is designed to diff --git a/net/devlink/port.c b/net/devlink/port.c index 4b2d46ccfe48..118d130d2afd 100644 --- a/net/devlink/port.c +++ b/net/devlink/port.c @@ -889,7 +889,7 @@ int devlink_nl_port_new_doit(struct sk_buff *skb, struct genl_info *info) err = -ENOMEM; goto err_out_port_del; } - err = devlink_nl_port_fill(msg, devlink_port, DEVLINK_CMD_NEW, + err = devlink_nl_port_fill(msg, devlink_port, DEVLINK_CMD_PORT_NEW, info->snd_portid, info->snd_seq, 0, NULL); if (WARN_ON_ONCE(err)) goto err_out_msg_free; diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c index cb83c8feb746..9756e657bab9 100644 --- a/net/hsr/hsr_main.c +++ b/net/hsr/hsr_main.c @@ -148,14 +148,21 @@ static struct notifier_block hsr_nb = { static int __init hsr_init(void) { - int res; + int err; BUILD_BUG_ON(sizeof(struct hsr_tag) != HSR_HLEN); - register_netdevice_notifier(&hsr_nb); - res = hsr_netlink_init(); + err = register_netdevice_notifier(&hsr_nb); + if (err) + return err; + + err = hsr_netlink_init(); + if (err) { + unregister_netdevice_notifier(&hsr_nb); + return err; + } - return res; + return 0; } static void __exit hsr_exit(void) diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c index e5742f2a2d52..1b6457f357bd 100644 --- a/net/hsr/hsr_slave.c +++ b/net/hsr/hsr_slave.c @@ -220,7 +220,8 @@ void hsr_del_port(struct hsr_port *port) netdev_update_features(master->dev); dev_set_mtu(master->dev, hsr_get_max_mtu(hsr)); netdev_rx_handler_unregister(port->dev); - dev_set_promiscuity(port->dev, -1); + if (!port->hsr->fwd_offloaded) + dev_set_promiscuity(port->dev, -1); netdev_upper_dev_unlink(port->dev, master->dev); } diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 4dd9e5040672..d33d12421814 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -95,7 +95,7 @@ static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead, __alignof__(struct scatterlist)); } -static void esp_ssg_unref(struct xfrm_state *x, void *tmp) +static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb) { struct crypto_aead *aead = x->data; int extralen = 0; @@ -114,7 +114,7 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp) */ if (req->src != req->dst) for (sg = sg_next(req->src); sg; sg = sg_next(sg)) - put_page(sg_page(sg)); + skb_page_unref(skb, sg_page(sg), false); } #ifdef CONFIG_INET_ESPINTCP @@ -260,7 +260,7 @@ static void esp_output_done(void *data, int err) } tmp = ESP_SKB_CB(skb)->tmp; - esp_ssg_unref(x, tmp); + esp_ssg_unref(x, tmp, skb); kfree(tmp); if (xo && (xo->flags & XFRM_DEV_RESUME)) { @@ -639,7 +639,7 @@ int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info * } if (sg != dsg) - esp_ssg_unref(x, tmp); + esp_ssg_unref(x, tmp, skb); if (!err && x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP) err = esp_output_tail_tcp(x, skb); diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c index cbb2b4bb0dfa..3757fd93523f 100644 --- a/net/ipv4/gre_demux.c +++ b/net/ipv4/gre_demux.c @@ -217,5 +217,5 @@ module_init(gre_init); module_exit(gre_exit); MODULE_DESCRIPTION("GRE over IPv4 demultiplexer driver"); -MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)"); +MODULE_AUTHOR("D. Kozlov <xeb@mail.ru>"); MODULE_LICENSE("GPL"); diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 7d8090f109ef..c038e28e2f1e 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -771,6 +771,20 @@ void inet_csk_clear_xmit_timers(struct sock *sk) } EXPORT_SYMBOL(inet_csk_clear_xmit_timers); +void inet_csk_clear_xmit_timers_sync(struct sock *sk) +{ + struct inet_connection_sock *icsk = inet_csk(sk); + + /* ongoing timer handlers need to acquire socket lock. */ + sock_not_owned_by_me(sk); + + icsk->icsk_pending = icsk->icsk_ack.pending = 0; + + sk_stop_timer_sync(sk, &icsk->icsk_retransmit_timer); + sk_stop_timer_sync(sk, &icsk->icsk_delack_timer); + sk_stop_timer_sync(sk, &sk->sk_timer); +} + void inet_csk_delete_keepalive_timer(struct sock *sk) { sk_stop_timer(sk, &sk->sk_timer); diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 33f93dc730a3..1fe794967211 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -1458,7 +1458,6 @@ struct sk_buff *__ip_make_skb(struct sock *sk, skb->priority = (cork->tos != -1) ? cork->priority: READ_ONCE(sk->sk_priority); skb->mark = cork->mark; skb->tstamp = cork->transmit_time; - skb->mono_delivery_time = !!skb->tstamp; /* * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec * on dst refcount diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c index 74928a9d1aa4..535856b0f0ed 100644 --- a/net/ipv4/nexthop.c +++ b/net/ipv4/nexthop.c @@ -768,8 +768,10 @@ static int nh_grp_hw_stats_update(struct nexthop *nh, bool *hw_stats_used) struct net *net = nh->net; int err; - if (nexthop_notifiers_is_empty(net)) + if (nexthop_notifiers_is_empty(net)) { + *hw_stats_used = false; return 0; + } err = nh_notifier_grp_hw_stats_init(&info, nh); if (err) diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 42ac434cfcfa..dcb11f22cbf2 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -357,10 +357,10 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4, goto error; skb_reserve(skb, hlen); + skb->protocol = htons(ETH_P_IP); skb->priority = READ_ONCE(sk->sk_priority); skb->mark = sockc->mark; skb->tstamp = sockc->transmit_time; - skb->mono_delivery_time = !!skb->tstamp; skb_dst_set(skb, &rt->dst); *rtp = NULL; diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 7972ad3d7c73..500f665f98cb 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -474,6 +474,9 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) ireq->wscale_ok, &rcv_wscale, dst_metric(&rt->dst, RTAX_INITRWND)); + /* req->syncookie is set true only if ACK is validated + * by BPF kfunc, then, rcv_wscale is already configured. + */ if (!req->syncookie) ireq->rcv_wscale = rcv_wscale; ireq->ecn_ok &= cookie_ecn_ok(net, &rt->dst); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index d20b62d52171..e767721b3a58 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2931,6 +2931,8 @@ void tcp_close(struct sock *sk, long timeout) lock_sock(sk); __tcp_close(sk, timeout); release_sock(sk); + if (!sk->sk_net_refcnt) + inet_csk_clear_xmit_timers_sync(sk); sock_put(sk); } EXPORT_SYMBOL(tcp_close); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 247bd4d8ee45..92db9b474f2b 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -5416,10 +5416,11 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb, err = 0; if (fillargs.ifindex) { - err = -ENODEV; dev = dev_get_by_index_rcu(tgt_net, fillargs.ifindex); - if (!dev) + if (!dev) { + err = -ENODEV; goto done; + } idev = __in6_dev_get(dev); if (idev) err = in6_dump_addrs(idev, skb, cb, diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 6e6efe026cdc..7371886d4f9f 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -112,7 +112,7 @@ static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead, __alignof__(struct scatterlist)); } -static void esp_ssg_unref(struct xfrm_state *x, void *tmp) +static void esp_ssg_unref(struct xfrm_state *x, void *tmp, struct sk_buff *skb) { struct crypto_aead *aead = x->data; int extralen = 0; @@ -131,7 +131,7 @@ static void esp_ssg_unref(struct xfrm_state *x, void *tmp) */ if (req->src != req->dst) for (sg = sg_next(req->src); sg; sg = sg_next(sg)) - put_page(sg_page(sg)); + skb_page_unref(skb, sg_page(sg), false); } #ifdef CONFIG_INET6_ESPINTCP @@ -294,7 +294,7 @@ static void esp_output_done(void *data, int err) } tmp = ESP_SKB_CB(skb)->tmp; - esp_ssg_unref(x, tmp); + esp_ssg_unref(x, tmp, skb); kfree(tmp); esp_output_encap_csum(skb); @@ -677,7 +677,7 @@ int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info } if (sg != dsg) - esp_ssg_unref(x, tmp); + esp_ssg_unref(x, tmp, skb); if (!err && x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP) err = esp_output_tail_tcp(x, skb); diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c index 5e97e0aa8e07..ca7e77e84283 100644 --- a/net/ipv6/ip6_gre.c +++ b/net/ipv6/ip6_gre.c @@ -2405,7 +2405,7 @@ static void __exit ip6gre_fini(void) module_init(ip6gre_init); module_exit(ip6gre_fini); MODULE_LICENSE("GPL"); -MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)"); +MODULE_AUTHOR("D. Kozlov <xeb@mail.ru>"); MODULE_DESCRIPTION("GRE over IPv6 tunneling device"); MODULE_ALIAS_RTNL_LINK("ip6gre"); MODULE_ALIAS_RTNL_LINK("ip6gretap"); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 02eeca5492cd..b9dd3a66e423 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -1925,7 +1925,7 @@ struct sk_buff *__ip6_make_skb(struct sock *sk, skb->priority = READ_ONCE(sk->sk_priority); skb->mark = cork->base.mark; skb->tstamp = cork->base.transmit_time; - skb->mono_delivery_time = !!skb->tstamp; + ip6_cork_steal_dst(skb, cork); IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS); if (proto == IPPROTO_ICMPV6) { diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index ca49e6617afa..0d896ca7b589 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -622,7 +622,7 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length, skb->priority = READ_ONCE(sk->sk_priority); skb->mark = sockc->mark; skb->tstamp = sockc->transmit_time; - skb->mono_delivery_time = !!skb->tstamp; + skb_put(skb, length); skb_reset_network_header(skb); iph = ipv6_hdr(skb); diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 8bad0a44a0a6..6d8286c299c9 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -258,6 +258,9 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) ireq->wscale_ok, &rcv_wscale, dst_metric(dst, RTAX_INITRWND)); + /* req->syncookie is set true only if ACK is validated + * by BPF kfunc, then, rcv_wscale is already configured. + */ if (!req->syncookie) ireq->rcv_wscale = rcv_wscale; ireq->ecn_ok &= cookie_ecn_ok(net, dst); diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index 4aa1c72e6c49..7c8c3adcac6e 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -1060,12 +1060,12 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg, int i; /* skip iucv_array lying in the headroom */ - iba[0].address = (u32)virt_to_phys(skb->data); + iba[0].address = virt_to_dma32(skb->data); iba[0].length = (u32)skb_headlen(skb); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - iba[i + 1].address = (u32)virt_to_phys(skb_frag_address(frag)); + iba[i + 1].address = virt_to_dma32(skb_frag_address(frag)); iba[i + 1].length = (u32)skb_frag_size(frag); } err = pr_iucv->message_send(iucv->path, &txmsg, @@ -1161,12 +1161,12 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb, struct iucv_array *iba = (struct iucv_array *)skb->head; int i; - iba[0].address = (u32)virt_to_phys(skb->data); + iba[0].address = virt_to_dma32(skb->data); iba[0].length = (u32)skb_headlen(skb); for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - iba[i + 1].address = (u32)virt_to_phys(skb_frag_address(frag)); + iba[i + 1].address = virt_to_dma32(skb_frag_address(frag)); iba[i + 1].length = (u32)skb_frag_size(frag); } rc = pr_iucv->message_receive(path, msg, diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 5b56ae6612dd..a4ab615ca3e3 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c @@ -210,7 +210,7 @@ struct iucv_cmd_dpl { u8 iprmmsg[8]; u32 ipsrccls; u32 ipmsgtag; - u32 ipbfadr2; + dma32_t ipbfadr2; u32 ipbfln2f; u32 res; } __attribute__ ((packed,aligned(8))); @@ -226,11 +226,11 @@ struct iucv_cmd_db { u8 iprcode; u32 ipmsgid; u32 iptrgcls; - u32 ipbfadr1; + dma32_t ipbfadr1; u32 ipbfln1f; u32 ipsrccls; u32 ipmsgtag; - u32 ipbfadr2; + dma32_t ipbfadr2; u32 ipbfln2f; u32 res; } __attribute__ ((packed,aligned(8))); @@ -432,7 +432,7 @@ static void iucv_declare_cpu(void *data) /* Declare interrupt buffer. */ parm = iucv_param_irq[cpu]; memset(parm, 0, sizeof(union iucv_param)); - parm->db.ipbfadr1 = virt_to_phys(iucv_irq_data[cpu]); + parm->db.ipbfadr1 = virt_to_dma32(iucv_irq_data[cpu]); rc = iucv_call_b2f0(IUCV_DECLARE_BUFFER, parm); if (rc) { char *err = "Unknown"; @@ -1081,8 +1081,7 @@ static int iucv_message_receive_iprmdata(struct iucv_path *path, size = (size < 8) ? size : 8; for (array = buffer; size > 0; array++) { copy = min_t(size_t, size, array->length); - memcpy((u8 *)(addr_t) array->address, - rmmsg, copy); + memcpy(dma32_to_virt(array->address), rmmsg, copy); rmmsg += copy; size -= copy; } @@ -1124,7 +1123,7 @@ int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, parm = iucv_param[smp_processor_id()]; memset(parm, 0, sizeof(union iucv_param)); - parm->db.ipbfadr1 = (u32)virt_to_phys(buffer); + parm->db.ipbfadr1 = virt_to_dma32(buffer); parm->db.ipbfln1f = (u32) size; parm->db.ipmsgid = msg->id; parm->db.ippathid = path->pathid; @@ -1242,7 +1241,7 @@ int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg, parm->dpl.iptrgcls = msg->class; memcpy(parm->dpl.iprmmsg, reply, min_t(size_t, size, 8)); } else { - parm->db.ipbfadr1 = (u32)virt_to_phys(reply); + parm->db.ipbfadr1 = virt_to_dma32(reply); parm->db.ipbfln1f = (u32) size; parm->db.ippathid = path->pathid; parm->db.ipflags1 = flags; @@ -1294,7 +1293,7 @@ int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg, parm->dpl.ipmsgtag = msg->tag; memcpy(parm->dpl.iprmmsg, buffer, 8); } else { - parm->db.ipbfadr1 = (u32)virt_to_phys(buffer); + parm->db.ipbfadr1 = virt_to_dma32(buffer); parm->db.ipbfln1f = (u32) size; parm->db.ippathid = path->pathid; parm->db.ipflags1 = flags | IUCV_IPNORPY; @@ -1379,7 +1378,7 @@ int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg, parm->dpl.iptrgcls = msg->class; parm->dpl.ipsrccls = srccls; parm->dpl.ipmsgtag = msg->tag; - parm->dpl.ipbfadr2 = (u32)virt_to_phys(answer); + parm->dpl.ipbfadr2 = virt_to_dma32(answer); parm->dpl.ipbfln2f = (u32) asize; memcpy(parm->dpl.iprmmsg, buffer, 8); } else { @@ -1388,9 +1387,9 @@ int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg, parm->db.iptrgcls = msg->class; parm->db.ipsrccls = srccls; parm->db.ipmsgtag = msg->tag; - parm->db.ipbfadr1 = (u32)virt_to_phys(buffer); + parm->db.ipbfadr1 = virt_to_dma32(buffer); parm->db.ipbfln1f = (u32) size; - parm->db.ipbfadr2 = (u32)virt_to_phys(answer); + parm->db.ipbfadr2 = virt_to_dma32(answer); parm->db.ipbfln2f = (u32) asize; } rc = iucv_call_b2f0(IUCV_SEND, parm); @@ -1904,6 +1903,6 @@ static void __exit iucv_exit(void) subsys_initcall(iucv_init); module_exit(iucv_exit); -MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert (felfert@millenux.com)"); +MODULE_AUTHOR("(C) 2001 IBM Corp. by Fritz Elfert <felfert@millenux.com>"); MODULE_DESCRIPTION("Linux for S/390 IUCV lowlevel driver"); MODULE_LICENSE("GPL"); diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c index 45d1e6a157fc..34ab659f541e 100644 --- a/net/mpls/mpls_gso.c +++ b/net/mpls/mpls_gso.c @@ -109,5 +109,5 @@ module_init(mpls_gso_init); module_exit(mpls_gso_exit); MODULE_DESCRIPTION("MPLS GSO support"); -MODULE_AUTHOR("Simon Horman (horms@verge.net.au)"); +MODULE_AUTHOR("Simon Horman <horms@verge.net.au>"); MODULE_LICENSE("GPL"); diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index e93f905e60b6..5fa3d3540c93 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -1213,7 +1213,7 @@ static int nf_tables_updtable(struct nft_ctx *ctx) if (flags & ~NFT_TABLE_F_MASK) return -EOPNOTSUPP; - if (flags == ctx->table->flags) + if (flags == (ctx->table->flags & NFT_TABLE_F_MASK)) return 0; if ((nft_table_has_owner(ctx->table) && @@ -2631,19 +2631,6 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, } } - if (nla[NFTA_CHAIN_COUNTERS]) { - if (!nft_is_base_chain(chain)) { - err = -EOPNOTSUPP; - goto err_hooks; - } - - stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]); - if (IS_ERR(stats)) { - err = PTR_ERR(stats); - goto err_hooks; - } - } - if (!(table->flags & NFT_TABLE_F_DORMANT) && nft_is_base_chain(chain) && !list_empty(&hook.list)) { @@ -2658,6 +2645,20 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, } unregister = true; + + if (nla[NFTA_CHAIN_COUNTERS]) { + if (!nft_is_base_chain(chain)) { + err = -EOPNOTSUPP; + goto err_hooks; + } + + stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]); + if (IS_ERR(stats)) { + err = PTR_ERR(stats); + goto err_hooks; + } + } + err = -ENOMEM; trans = nft_trans_alloc(ctx, NFT_MSG_NEWCHAIN, sizeof(struct nft_trans_chain)); diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c index c0ceea068936..df8de5090246 100644 --- a/net/netfilter/nft_set_pipapo.c +++ b/net/netfilter/nft_set_pipapo.c @@ -2329,8 +2329,6 @@ static void nft_pipapo_destroy(const struct nft_ctx *ctx, if (m) { rcu_barrier(); - nft_set_pipapo_match_destroy(ctx, set, m); - for_each_possible_cpu(cpu) pipapo_free_scratch(m, cpu); free_percpu(m->scratch); @@ -2342,8 +2340,7 @@ static void nft_pipapo_destroy(const struct nft_ctx *ctx, if (priv->clone) { m = priv->clone; - if (priv->dirty) - nft_set_pipapo_match_destroy(ctx, set, m); + nft_set_pipapo_match_destroy(ctx, set, m); for_each_possible_cpu(cpu) pipapo_free_scratch(priv->clone, cpu); diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index cdad47b140fa..0d26c8ec9993 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c @@ -1516,6 +1516,11 @@ static void nci_rx_work(struct work_struct *work) nfc_send_to_raw_sock(ndev->nfc_dev, skb, RAW_PAYLOAD_NCI, NFC_DIRECTION_RX); + if (!nci_plen(skb->data)) { + kfree_skb(skb); + break; + } + /* Process frame */ switch (nci_mt(skb->data)) { case NCI_MT_RSP_PKT: diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 61270826b9ac..18f616f487ea 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2057,7 +2057,7 @@ retry: skb->priority = READ_ONCE(sk->sk_priority); skb->mark = READ_ONCE(sk->sk_mark); skb->tstamp = sockc.transmit_time; - skb->mono_delivery_time = !!skb->tstamp; + skb_setup_tx_timestamp(skb, sockc.tsflags); if (unlikely(extra_len == 4)) @@ -2586,7 +2586,6 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, skb->priority = READ_ONCE(po->sk.sk_priority); skb->mark = READ_ONCE(po->sk.sk_mark); skb->tstamp = sockc->transmit_time; - skb->mono_delivery_time = !!skb->tstamp; skb_setup_tx_timestamp(skb, sockc->tsflags); skb_zcopy_set_nouarg(skb, ph.raw); @@ -3065,7 +3064,6 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) skb->priority = READ_ONCE(sk->sk_priority); skb->mark = sockc.mark; skb->tstamp = sockc.transmit_time; - skb->mono_delivery_time = !!skb->tstamp; if (unlikely(extra_len == 4)) skb->no_fcs = 1; @@ -4000,7 +3998,7 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, if (val < 0 || val > 1) return -EINVAL; - po->prot_hook.ignore_outgoing = !!val; + WRITE_ONCE(po->prot_hook.ignore_outgoing, !!val); return 0; } case PACKET_TX_HAS_OFF: @@ -4134,7 +4132,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname, 0); break; case PACKET_IGNORE_OUTGOING: - val = po->prot_hook.ignore_outgoing; + val = READ_ONCE(po->prot_hook.ignore_outgoing); break; case PACKET_ROLLOVER_STATS: if (!po->rollover) diff --git a/net/rds/send.c b/net/rds/send.c index 2899def23865..09a280110654 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -103,13 +103,12 @@ EXPORT_SYMBOL_GPL(rds_send_path_reset); static int acquire_in_xmit(struct rds_conn_path *cp) { - return test_and_set_bit(RDS_IN_XMIT, &cp->cp_flags) == 0; + return test_and_set_bit_lock(RDS_IN_XMIT, &cp->cp_flags) == 0; } static void release_in_xmit(struct rds_conn_path *cp) { - clear_bit(RDS_IN_XMIT, &cp->cp_flags); - smp_mb__after_atomic(); + clear_bit_unlock(RDS_IN_XMIT, &cp->cp_flags); /* * We don't use wait_on_bit()/wake_up_bit() because our waking is in a * hot path and finding waiters is very rare. We don't want to walk diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c index 5b595773e59b..358cf304f4c9 100644 --- a/net/sched/sch_fq_pie.c +++ b/net/sched/sch_fq_pie.c @@ -10,6 +10,7 @@ */ #include <linux/jhash.h> +#include <linux/module.h> #include <linux/sizes.h> #include <linux/vmalloc.h> #include <net/pkt_cls.h> @@ -563,6 +564,7 @@ static struct Qdisc_ops fq_pie_qdisc_ops __read_mostly = { .dump_stats = fq_pie_dump_stats, .owner = THIS_MODULE, }; +MODULE_ALIAS_NET_SCH("fq_pie"); static int __init fq_pie_module_init(void) { diff --git a/net/socket.c b/net/socket.c index 7e9c8fc9a5b4..e5f3af49a8b6 100644 --- a/net/socket.c +++ b/net/socket.c @@ -2600,9 +2600,9 @@ out: return err; } -int sendmsg_copy_msghdr(struct msghdr *msg, - struct user_msghdr __user *umsg, unsigned flags, - struct iovec **iov) +static int sendmsg_copy_msghdr(struct msghdr *msg, + struct user_msghdr __user *umsg, unsigned flags, + struct iovec **iov) { int err; @@ -2753,10 +2753,10 @@ SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg, return __sys_sendmmsg(fd, mmsg, vlen, flags, true); } -int recvmsg_copy_msghdr(struct msghdr *msg, - struct user_msghdr __user *umsg, unsigned flags, - struct sockaddr __user **uaddr, - struct iovec **iov) +static int recvmsg_copy_msghdr(struct msghdr *msg, + struct user_msghdr __user *umsg, unsigned flags, + struct sockaddr __user **uaddr, + struct iovec **iov) { ssize_t err; diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c index d435bffc6199..97ff11973c49 100644 --- a/net/sunrpc/addr.c +++ b/net/sunrpc/addr.c @@ -284,10 +284,10 @@ char *rpc_sockaddr2uaddr(const struct sockaddr *sap, gfp_t gfp_flags) } if (snprintf(portbuf, sizeof(portbuf), - ".%u.%u", port >> 8, port & 0xff) > (int)sizeof(portbuf)) + ".%u.%u", port >> 8, port & 0xff) >= (int)sizeof(portbuf)) return NULL; - if (strlcat(addrbuf, portbuf, sizeof(addrbuf)) > sizeof(addrbuf)) + if (strlcat(addrbuf, portbuf, sizeof(addrbuf)) >= sizeof(addrbuf)) return NULL; return kstrdup(addrbuf, gfp_flags); diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index cda0935a68c9..28f3749f6dc6 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -405,7 +405,7 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, clnt->cl_maxproc = version->nrprocs; clnt->cl_prog = args->prognumber ? : program->number; clnt->cl_vers = version->number; - clnt->cl_stats = program->stats; + clnt->cl_stats = args->stats ? : program->stats; clnt->cl_metrics = rpc_alloc_iostats(clnt); rpc_init_pipe_dir_head(&clnt->cl_pipedir_objects); err = -ENOMEM; @@ -691,6 +691,7 @@ struct rpc_clnt *rpc_clone_client(struct rpc_clnt *clnt) .version = clnt->cl_vers, .authflavor = clnt->cl_auth->au_flavor, .cred = clnt->cl_cred, + .stats = clnt->cl_stats, }; return __rpc_clone_client(&args, clnt); } @@ -713,6 +714,7 @@ rpc_clone_client_set_auth(struct rpc_clnt *clnt, rpc_authflavor_t flavor) .version = clnt->cl_vers, .authflavor = flavor, .cred = clnt->cl_cred, + .stats = clnt->cl_stats, }; return __rpc_clone_client(&args, clnt); } @@ -1068,6 +1070,7 @@ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old, .version = vers, .authflavor = old->cl_auth->au_flavor, .cred = old->cl_cred, + .stats = old->cl_stats, }; struct rpc_clnt *clnt; int err; diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index dcc2b4f49e77..910a5d850d04 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -1490,7 +1490,7 @@ int register_rpc_pipefs(void) rpc_inode_cachep = kmem_cache_create("rpc_inode_cache", sizeof(struct rpc_inode), 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT| - SLAB_MEM_SPREAD|SLAB_ACCOUNT), + SLAB_ACCOUNT), init_once); if (!rpc_inode_cachep) return -ENOMEM; diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index af13fdfa6672..09f245cda526 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c @@ -1398,6 +1398,12 @@ xprt_request_dequeue_transmit_locked(struct rpc_task *task) if (!test_and_clear_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) return; if (!list_empty(&req->rq_xmit)) { + struct rpc_xprt *xprt = req->rq_xprt; + + if (list_is_first(&req->rq_xmit, &xprt->xmit_queue) && + xprt->ops->abort_send_request) + xprt->ops->abort_send_request(req); + list_del(&req->rq_xmit); if (!list_empty(&req->rq_xmit2)) { struct rpc_rqst *next = list_first_entry(&req->rq_xmit2, @@ -1541,6 +1547,9 @@ xprt_request_transmit(struct rpc_rqst *req, struct rpc_task *snd_task) int is_retrans = RPC_WAS_SENT(task); int status; + if (test_bit(XPRT_CLOSE_WAIT, &xprt->state)) + return -ENOTCONN; + if (!req->rq_bytes_sent) { if (xprt_request_data_received(task)) { status = 0; diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index d92c13e78a56..bb9b747d58a1 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -62,6 +62,7 @@ #include "sunrpc.h" static void xs_close(struct rpc_xprt *xprt); +static void xs_reset_srcport(struct sock_xprt *transport); static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock); static void xs_tcp_set_socket_timeouts(struct rpc_xprt *xprt, struct socket *sock); @@ -883,6 +884,17 @@ static int xs_stream_prepare_request(struct rpc_rqst *req, struct xdr_buf *buf) return xdr_alloc_bvec(buf, rpc_task_gfp_mask()); } +static void xs_stream_abort_send_request(struct rpc_rqst *req) +{ + struct rpc_xprt *xprt = req->rq_xprt; + struct sock_xprt *transport = + container_of(xprt, struct sock_xprt, xprt); + + if (transport->xmit.offset != 0 && + !test_bit(XPRT_CLOSE_WAIT, &xprt->state)) + xprt_force_disconnect(xprt); +} + /* * Determine if the previous message in the stream was aborted before it * could complete transmission. @@ -1565,8 +1577,10 @@ static void xs_tcp_state_change(struct sock *sk) break; case TCP_CLOSE: if (test_and_clear_bit(XPRT_SOCK_CONNECTING, - &transport->sock_state)) + &transport->sock_state)) { + xs_reset_srcport(transport); xprt_clear_connecting(xprt); + } clear_bit(XPRT_CLOSING, &xprt->state); /* Trigger the socket release */ xs_run_error_worker(transport, XPRT_SOCK_WAKE_DISCONNECT); @@ -1722,6 +1736,11 @@ static void xs_set_port(struct rpc_xprt *xprt, unsigned short port) xs_update_peer_port(xprt); } +static void xs_reset_srcport(struct sock_xprt *transport) +{ + transport->srcport = 0; +} + static void xs_set_srcport(struct sock_xprt *transport, struct socket *sock) { if (transport->srcport == 0 && transport->xprt.reuseport) @@ -3012,6 +3031,7 @@ static const struct rpc_xprt_ops xs_local_ops = { .buf_free = rpc_free, .prepare_request = xs_stream_prepare_request, .send_request = xs_local_send_request, + .abort_send_request = xs_stream_abort_send_request, .wait_for_reply_request = xprt_wait_for_reply_request_def, .close = xs_close, .destroy = xs_destroy, @@ -3059,6 +3079,7 @@ static const struct rpc_xprt_ops xs_tcp_ops = { .buf_free = rpc_free, .prepare_request = xs_stream_prepare_request, .send_request = xs_tcp_send_request, + .abort_send_request = xs_stream_abort_send_request, .wait_for_reply_request = xprt_wait_for_reply_request_def, .close = xs_tcp_shutdown, .destroy = xs_destroy, diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index 653e51ae3964..6346690d5c69 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -407,7 +407,8 @@ bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x) struct xfrm_dst *xdst = (struct xfrm_dst *)dst; struct net_device *dev = x->xso.dev; - if (!x->type_offload) + if (!x->type_offload || + (x->xso.type == XFRM_DEV_OFFLOAD_UNSPECIFIED && x->encap)) return false; if (x->xso.type == XFRM_DEV_OFFLOAD_PACKET || |
