diff options
Diffstat (limited to 'net')
| -rw-r--r-- | net/batman-adv/originator.c | 14 | ||||
| -rw-r--r-- | net/bluetooth/hci_conn.c | 7 | ||||
| -rw-r--r-- | net/bluetooth/hci_event.c | 11 | ||||
| -rw-r--r-- | net/bluetooth/hci_sync.c | 21 | ||||
| -rw-r--r-- | net/bluetooth/iso.c | 10 | ||||
| -rw-r--r-- | net/bluetooth/l2cap_core.c | 4 | ||||
| -rw-r--r-- | net/bluetooth/mgmt.c | 26 | ||||
| -rw-r--r-- | net/bluetooth/rfcomm/tty.c | 26 | ||||
| -rw-r--r-- | net/core/devmem.c | 27 | ||||
| -rw-r--r-- | net/core/filter.c | 3 | ||||
| -rw-r--r-- | net/ipv4/tcp_input.c | 21 | ||||
| -rw-r--r-- | net/mac80211/cfg.c | 3 | ||||
| -rw-r--r-- | net/mac80211/key.c | 11 | ||||
| -rw-r--r-- | net/mptcp/mib.c | 1 | ||||
| -rw-r--r-- | net/mptcp/mib.h | 1 | ||||
| -rw-r--r-- | net/mptcp/protocol.c | 83 | ||||
| -rw-r--r-- | net/mptcp/protocol.h | 2 | ||||
| -rw-r--r-- | net/netfilter/nft_connlimit.c | 2 | ||||
| -rw-r--r-- | net/netfilter/nft_ct.c | 30 | ||||
| -rw-r--r-- | net/sctp/input.c | 2 | ||||
| -rw-r--r-- | net/tls/tls_device.c | 4 | ||||
| -rw-r--r-- | net/wireless/nl80211.c | 3 | 
22 files changed, 217 insertions, 95 deletions
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index c84420cb410d..a662408ad867 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -763,11 +763,16 @@ int batadv_hardif_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb)  	bat_priv = netdev_priv(mesh_iface);  	primary_if = batadv_primary_if_get_selected(bat_priv); -	if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) { +	if (!primary_if) {  		ret = -ENOENT;  		goto out_put_mesh_iface;  	} +	if (primary_if->if_status != BATADV_IF_ACTIVE) { +		ret = -ENOENT; +		goto out_put_primary_if; +	} +  	hard_iface = batadv_netlink_get_hardif(bat_priv, cb);  	if (IS_ERR(hard_iface) && PTR_ERR(hard_iface) != -ENONET) {  		ret = PTR_ERR(hard_iface); @@ -1327,11 +1332,16 @@ int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb)  	bat_priv = netdev_priv(mesh_iface);  	primary_if = batadv_primary_if_get_selected(bat_priv); -	if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) { +	if (!primary_if) {  		ret = -ENOENT;  		goto out_put_mesh_iface;  	} +	if (primary_if->if_status != BATADV_IF_ACTIVE) { +		ret = -ENOENT; +		goto out_put_primary_if; +	} +  	hard_iface = batadv_netlink_get_hardif(bat_priv, cb);  	if (IS_ERR(hard_iface) && PTR_ERR(hard_iface) != -ENONET) {  		ret = PTR_ERR(hard_iface); diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 111f0e37b672..c5dedf39a129 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -843,6 +843,13 @@ static void bis_cleanup(struct hci_conn *conn)  		if (bis)  			return; +		bis = hci_conn_hash_lookup_big_state(hdev, +						     conn->iso_qos.bcast.big, +						     BT_OPEN, +						     HCI_ROLE_MASTER); +		if (bis) +			return; +  		hci_le_terminate_big(hdev, conn);  	} else {  		hci_le_big_terminate(hdev, conn->iso_qos.bcast.big, diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index d790b0d4eb9a..d37db364acf7 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -1607,8 +1607,10 @@ static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,  		hci_dev_set_flag(hdev, HCI_LE_ADV); -		if (adv && !adv->periodic) +		if (adv)  			adv->enabled = true; +		else if (!set->handle) +			hci_dev_set_flag(hdev, HCI_LE_ADV_0);  		conn = hci_lookup_le_connect(hdev);  		if (conn) @@ -1619,6 +1621,8 @@ static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data,  		if (cp->num_of_sets) {  			if (adv)  				adv->enabled = false; +			else if (!set->handle) +				hci_dev_clear_flag(hdev, HCI_LE_ADV_0);  			/* If just one instance was disabled check if there are  			 * any other instance enabled before clearing HCI_LE_ADV @@ -3959,8 +3963,11 @@ static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data,  		hci_dev_set_flag(hdev, HCI_LE_PER_ADV);  		if (adv) -			adv->enabled = true; +			adv->periodic_enabled = true;  	} else { +		if (adv) +			adv->periodic_enabled = false; +  		/* If just one instance was disabled check if there are  		 * any other instance enabled before clearing HCI_LE_PER_ADV.  		 * The current periodic adv instance will be marked as diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c index eefdb6134ca5..73fc41b68b68 100644 --- a/net/bluetooth/hci_sync.c +++ b/net/bluetooth/hci_sync.c @@ -863,11 +863,17 @@ bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev,  {  	struct hci_cmd_sync_work_entry *entry; -	entry = hci_cmd_sync_lookup_entry(hdev, func, data, destroy); -	if (!entry) +	mutex_lock(&hdev->cmd_sync_work_lock); + +	entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy); +	if (!entry) { +		mutex_unlock(&hdev->cmd_sync_work_lock);  		return false; +	} -	hci_cmd_sync_cancel_entry(hdev, entry); +	_hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED); + +	mutex_unlock(&hdev->cmd_sync_work_lock);  	return true;  } @@ -1601,7 +1607,7 @@ int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance)  	/* If periodic advertising already disabled there is nothing to do. */  	adv = hci_find_adv_instance(hdev, instance); -	if (!adv || !adv->periodic || !adv->enabled) +	if (!adv || !adv->periodic_enabled)  		return 0;  	memset(&cp, 0, sizeof(cp)); @@ -1666,7 +1672,7 @@ static int hci_enable_per_advertising_sync(struct hci_dev *hdev, u8 instance)  	/* If periodic advertising already enabled there is nothing to do. */  	adv = hci_find_adv_instance(hdev, instance); -	if (adv && adv->periodic && adv->enabled) +	if (adv && adv->periodic_enabled)  		return 0;  	memset(&cp, 0, sizeof(cp)); @@ -2600,9 +2606,8 @@ static int hci_resume_advertising_sync(struct hci_dev *hdev)  		/* If current advertising instance is set to instance 0x00  		 * then we need to re-enable it.  		 */ -		if (!hdev->cur_adv_instance) -			err = hci_enable_ext_advertising_sync(hdev, -							      hdev->cur_adv_instance); +		if (hci_dev_test_and_clear_flag(hdev, HCI_LE_ADV_0)) +			err = hci_enable_ext_advertising_sync(hdev, 0x00);  	} else {  		/* Schedule for most recent instance to be restarted and begin  		 * the software rotation loop diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c index 9b263d061e05..3d98cb6291da 100644 --- a/net/bluetooth/iso.c +++ b/net/bluetooth/iso.c @@ -2032,7 +2032,7 @@ static void iso_conn_ready(struct iso_conn *conn)  		 */  		if (!bacmp(&hcon->dst, BDADDR_ANY)) {  			bacpy(&hcon->dst, &iso_pi(parent)->dst); -			hcon->dst_type = iso_pi(parent)->dst_type; +			hcon->dst_type = le_addr_type(iso_pi(parent)->dst_type);  		}  		if (test_bit(HCI_CONN_PA_SYNC, &hcon->flags)) { @@ -2046,7 +2046,13 @@ static void iso_conn_ready(struct iso_conn *conn)  		}  		bacpy(&iso_pi(sk)->dst, &hcon->dst); -		iso_pi(sk)->dst_type = hcon->dst_type; + +		/* Convert from HCI to three-value type */ +		if (hcon->dst_type == ADDR_LE_DEV_PUBLIC) +			iso_pi(sk)->dst_type = BDADDR_LE_PUBLIC; +		else +			iso_pi(sk)->dst_type = BDADDR_LE_RANDOM; +  		iso_pi(sk)->sync_handle = iso_pi(parent)->sync_handle;  		memcpy(iso_pi(sk)->base, iso_pi(parent)->base, iso_pi(parent)->base_len);  		iso_pi(sk)->base_len = iso_pi(parent)->base_len; diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 805c752ac0a9..d08320380ad6 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -282,7 +282,7 @@ static void __set_retrans_timer(struct l2cap_chan *chan)  	if (!delayed_work_pending(&chan->monitor_timer) &&  	    chan->retrans_timeout) {  		l2cap_set_timer(chan, &chan->retrans_timer, -				secs_to_jiffies(chan->retrans_timeout)); +				msecs_to_jiffies(chan->retrans_timeout));  	}  } @@ -291,7 +291,7 @@ static void __set_monitor_timer(struct l2cap_chan *chan)  	__clear_retrans_timer(chan);  	if (chan->monitor_timeout) {  		l2cap_set_timer(chan, &chan->monitor_timer, -				secs_to_jiffies(chan->monitor_timeout)); +				msecs_to_jiffies(chan->monitor_timeout));  	}  } diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index a3d16eece0d2..24e335e3a727 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -2175,19 +2175,24 @@ static void set_mesh_complete(struct hci_dev *hdev, void *data, int err)  	sk = cmd->sk;  	if (status) { +		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, +				status);  		mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev, true,  				     cmd_status_rsp, &status); -		return; +		goto done;  	} -	mgmt_pending_remove(cmd);  	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0); + +done: +	mgmt_pending_free(cmd);  }  static int set_mesh_sync(struct hci_dev *hdev, void *data)  {  	struct mgmt_pending_cmd *cmd = data; -	struct mgmt_cp_set_mesh cp; +	DEFINE_FLEX(struct mgmt_cp_set_mesh, cp, ad_types, num_ad_types, +		    sizeof(hdev->mesh_ad_types));  	size_t len;  	mutex_lock(&hdev->mgmt_pending_lock); @@ -2197,27 +2202,26 @@ static int set_mesh_sync(struct hci_dev *hdev, void *data)  		return -ECANCELED;  	} -	memcpy(&cp, cmd->param, sizeof(cp)); +	len = cmd->param_len; +	memcpy(cp, cmd->param, min(__struct_size(cp), len));  	mutex_unlock(&hdev->mgmt_pending_lock); -	len = cmd->param_len; -  	memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types)); -	if (cp.enable) +	if (cp->enable)  		hci_dev_set_flag(hdev, HCI_MESH);  	else  		hci_dev_clear_flag(hdev, HCI_MESH); -	hdev->le_scan_interval = __le16_to_cpu(cp.period); -	hdev->le_scan_window = __le16_to_cpu(cp.window); +	hdev->le_scan_interval = __le16_to_cpu(cp->period); +	hdev->le_scan_window = __le16_to_cpu(cp->window); -	len -= sizeof(cp); +	len -= sizeof(struct mgmt_cp_set_mesh);  	/* If filters don't fit, forward all adv pkts */  	if (len <= sizeof(hdev->mesh_ad_types)) -		memcpy(hdev->mesh_ad_types, cp.ad_types, len); +		memcpy(hdev->mesh_ad_types, cp->ad_types, len);  	hci_update_passive_scan_sync(hdev);  	return 0; diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index 376ce6de84be..b783526ab588 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c @@ -643,8 +643,8 @@ static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig)  		tty_port_tty_hangup(&dev->port, true);  	dev->modem_status = -		((v24_sig & RFCOMM_V24_RTC) ? (TIOCM_DSR | TIOCM_DTR) : 0) | -		((v24_sig & RFCOMM_V24_RTR) ? (TIOCM_RTS | TIOCM_CTS) : 0) | +		((v24_sig & RFCOMM_V24_RTC) ? TIOCM_DSR : 0) | +		((v24_sig & RFCOMM_V24_RTR) ? TIOCM_CTS : 0) |  		((v24_sig & RFCOMM_V24_IC)  ? TIOCM_RI : 0) |  		((v24_sig & RFCOMM_V24_DV)  ? TIOCM_CD : 0);  } @@ -1055,10 +1055,14 @@ static void rfcomm_tty_hangup(struct tty_struct *tty)  static int rfcomm_tty_tiocmget(struct tty_struct *tty)  {  	struct rfcomm_dev *dev = tty->driver_data; +	struct rfcomm_dlc *dlc = dev->dlc; +	u8 v24_sig;  	BT_DBG("tty %p dev %p", tty, dev); -	return dev->modem_status; +	rfcomm_dlc_get_modem_status(dlc, &v24_sig); + +	return (v24_sig & (TIOCM_DTR | TIOCM_RTS)) | dev->modem_status;  }  static int rfcomm_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) @@ -1071,23 +1075,15 @@ static int rfcomm_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigne  	rfcomm_dlc_get_modem_status(dlc, &v24_sig); -	if (set & TIOCM_DSR || set & TIOCM_DTR) +	if (set & TIOCM_DTR)  		v24_sig |= RFCOMM_V24_RTC; -	if (set & TIOCM_RTS || set & TIOCM_CTS) +	if (set & TIOCM_RTS)  		v24_sig |= RFCOMM_V24_RTR; -	if (set & TIOCM_RI) -		v24_sig |= RFCOMM_V24_IC; -	if (set & TIOCM_CD) -		v24_sig |= RFCOMM_V24_DV; -	if (clear & TIOCM_DSR || clear & TIOCM_DTR) +	if (clear & TIOCM_DTR)  		v24_sig &= ~RFCOMM_V24_RTC; -	if (clear & TIOCM_RTS || clear & TIOCM_CTS) +	if (clear & TIOCM_RTS)  		v24_sig &= ~RFCOMM_V24_RTR; -	if (clear & TIOCM_RI) -		v24_sig &= ~RFCOMM_V24_IC; -	if (clear & TIOCM_CD) -		v24_sig &= ~RFCOMM_V24_DV;  	rfcomm_dlc_set_modem_status(dlc, v24_sig); diff --git a/net/core/devmem.c b/net/core/devmem.c index d9de31a6cc7f..1d04754bc756 100644 --- a/net/core/devmem.c +++ b/net/core/devmem.c @@ -17,6 +17,7 @@  #include <net/page_pool/helpers.h>  #include <net/page_pool/memory_provider.h>  #include <net/sock.h> +#include <net/tcp.h>  #include <trace/events/page_pool.h>  #include "devmem.h" @@ -357,7 +358,8 @@ struct net_devmem_dmabuf_binding *net_devmem_get_binding(struct sock *sk,  							 unsigned int dmabuf_id)  {  	struct net_devmem_dmabuf_binding *binding; -	struct dst_entry *dst = __sk_dst_get(sk); +	struct net_device *dst_dev; +	struct dst_entry *dst;  	int err = 0;  	binding = net_devmem_lookup_dmabuf(dmabuf_id); @@ -366,16 +368,35 @@ struct net_devmem_dmabuf_binding *net_devmem_get_binding(struct sock *sk,  		goto out_err;  	} +	rcu_read_lock(); +	dst = __sk_dst_get(sk); +	/* If dst is NULL (route expired), attempt to rebuild it. */ +	if (unlikely(!dst)) { +		if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) { +			err = -EHOSTUNREACH; +			goto out_unlock; +		} +		dst = __sk_dst_get(sk); +		if (unlikely(!dst)) { +			err = -ENODEV; +			goto out_unlock; +		} +	} +  	/* The dma-addrs in this binding are only reachable to the corresponding  	 * net_device.  	 */ -	if (!dst || !dst->dev || dst->dev->ifindex != binding->dev->ifindex) { +	dst_dev = dst_dev_rcu(dst); +	if (unlikely(!dst_dev) || unlikely(dst_dev != binding->dev)) {  		err = -ENODEV; -		goto out_err; +		goto out_unlock;  	} +	rcu_read_unlock();  	return binding; +out_unlock: +	rcu_read_unlock();  out_err:  	if (binding)  		net_devmem_dmabuf_binding_put(binding); diff --git a/net/core/filter.c b/net/core/filter.c index 76628df1fc82..fa06c5a08e22 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3877,7 +3877,8 @@ static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room,  	u32 new_len = skb->len + head_room;  	int ret; -	if (unlikely(flags || (!skb_is_gso(skb) && new_len > max_len) || +	if (unlikely(flags || (int)head_room < 0 || +		     (!skb_is_gso(skb) && new_len > max_len) ||  		     new_len < skb->len))  		return -EINVAL; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 31ea5af49f2d..e4a979b75cc6 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -891,18 +891,27 @@ static inline void tcp_rcv_rtt_measure_ts(struct sock *sk,  	}  } -void tcp_rcvbuf_grow(struct sock *sk) +void tcp_rcvbuf_grow(struct sock *sk, u32 newval)  {  	const struct net *net = sock_net(sk);  	struct tcp_sock *tp = tcp_sk(sk); -	int rcvwin, rcvbuf, cap; +	u32 rcvwin, rcvbuf, cap, oldval; +	u64 grow; + +	oldval = tp->rcvq_space.space; +	tp->rcvq_space.space = newval;  	if (!READ_ONCE(net->ipv4.sysctl_tcp_moderate_rcvbuf) ||  	    (sk->sk_userlocks & SOCK_RCVBUF_LOCK))  		return; +	/* DRS is always one RTT late. */ +	rcvwin = newval << 1; +  	/* slow start: allow the sender to double its rate. */ -	rcvwin = tp->rcvq_space.space << 1; +	grow = (u64)rcvwin * (newval - oldval); +	do_div(grow, oldval); +	rcvwin += grow << 1;  	if (!RB_EMPTY_ROOT(&tp->out_of_order_queue))  		rcvwin += TCP_SKB_CB(tp->ooo_last_skb)->end_seq - tp->rcv_nxt; @@ -943,9 +952,7 @@ void tcp_rcv_space_adjust(struct sock *sk)  	trace_tcp_rcvbuf_grow(sk, time); -	tp->rcvq_space.space = copied; - -	tcp_rcvbuf_grow(sk); +	tcp_rcvbuf_grow(sk, copied);  new_measure:  	tp->rcvq_space.seq = tp->copied_seq; @@ -5270,7 +5277,7 @@ end:  	}  	/* do not grow rcvbuf for not-yet-accepted or orphaned sockets. */  	if (sk->sk_socket) -		tcp_rcvbuf_grow(sk); +		tcp_rcvbuf_grow(sk, tp->rcvq_space.space);  }  static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index d9aca1c3c097..c52b0456039d 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -1876,6 +1876,9 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev,  	link_conf->nontransmitted = false;  	link_conf->ema_ap = false;  	link_conf->bssid_indicator = 0; +	link_conf->fils_discovery.min_interval = 0; +	link_conf->fils_discovery.max_interval = 0; +	link_conf->unsol_bcast_probe_resp_interval = 0;  	__sta_info_flush(sdata, true, link_id, NULL); diff --git a/net/mac80211/key.c b/net/mac80211/key.c index b14e9cd9713f..d5da7ccea66e 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -508,11 +508,16 @@ static int ieee80211_key_replace(struct ieee80211_sub_if_data *sdata,  				ret = ieee80211_key_enable_hw_accel(new);  		}  	} else { -		if (!new->local->wowlan) +		if (!new->local->wowlan) {  			ret = ieee80211_key_enable_hw_accel(new); -		else if (link_id < 0 || !sdata->vif.active_links || -			 BIT(link_id) & sdata->vif.active_links) +		} else if (link_id < 0 || !sdata->vif.active_links || +			 BIT(link_id) & sdata->vif.active_links) {  			new->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; +			if (!(new->conf.flags & (IEEE80211_KEY_FLAG_GENERATE_MMIC | +						 IEEE80211_KEY_FLAG_PUT_MIC_SPACE | +						 IEEE80211_KEY_FLAG_RESERVE_TAILROOM))) +				decrease_tailroom_need_count(sdata, 1); +		}  	}  	if (ret) diff --git a/net/mptcp/mib.c b/net/mptcp/mib.c index 6003e47c770a..171643815076 100644 --- a/net/mptcp/mib.c +++ b/net/mptcp/mib.c @@ -85,6 +85,7 @@ static const struct snmp_mib mptcp_snmp_list[] = {  	SNMP_MIB_ITEM("DssFallback", MPTCP_MIB_DSSFALLBACK),  	SNMP_MIB_ITEM("SimultConnectFallback", MPTCP_MIB_SIMULTCONNFALLBACK),  	SNMP_MIB_ITEM("FallbackFailed", MPTCP_MIB_FALLBACKFAILED), +	SNMP_MIB_ITEM("WinProbe", MPTCP_MIB_WINPROBE),  };  /* mptcp_mib_alloc - allocate percpu mib counters diff --git a/net/mptcp/mib.h b/net/mptcp/mib.h index 309bac6fea32..a1d3e9369fbb 100644 --- a/net/mptcp/mib.h +++ b/net/mptcp/mib.h @@ -88,6 +88,7 @@ enum linux_mptcp_mib_field {  	MPTCP_MIB_DSSFALLBACK,		/* Bad or missing DSS */  	MPTCP_MIB_SIMULTCONNFALLBACK,	/* Simultaneous connect */  	MPTCP_MIB_FALLBACKFAILED,	/* Can't fallback due to msk status */ +	MPTCP_MIB_WINPROBE,		/* MPTCP-level zero window probe */  	__MPTCP_MIB_MAX  }; diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 0292162a14ee..2d6b8de35c44 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -194,17 +194,26 @@ static bool mptcp_ooo_try_coalesce(struct mptcp_sock *msk, struct sk_buff *to,   * - mptcp does not maintain a msk-level window clamp   * - returns true when  the receive buffer is actually updated   */ -static bool mptcp_rcvbuf_grow(struct sock *sk) +static bool mptcp_rcvbuf_grow(struct sock *sk, u32 newval)  {  	struct mptcp_sock *msk = mptcp_sk(sk);  	const struct net *net = sock_net(sk); -	int rcvwin, rcvbuf, cap; +	u32 rcvwin, rcvbuf, cap, oldval; +	u64 grow; +	oldval = msk->rcvq_space.space; +	msk->rcvq_space.space = newval;  	if (!READ_ONCE(net->ipv4.sysctl_tcp_moderate_rcvbuf) ||  	    (sk->sk_userlocks & SOCK_RCVBUF_LOCK))  		return false; -	rcvwin = msk->rcvq_space.space << 1; +	/* DRS is always one RTT late. */ +	rcvwin = newval << 1; + +	/* slow start: allow the sender to double its rate. */ +	grow = (u64)rcvwin * (newval - oldval); +	do_div(grow, oldval); +	rcvwin += grow << 1;  	if (!RB_EMPTY_ROOT(&msk->out_of_order_queue))  		rcvwin += MPTCP_SKB_CB(msk->ooo_last_skb)->end_seq - msk->ack_seq; @@ -334,7 +343,7 @@ end:  	skb_set_owner_r(skb, sk);  	/* do not grow rcvbuf for not-yet-accepted or orphaned sockets. */  	if (sk->sk_socket) -		mptcp_rcvbuf_grow(sk); +		mptcp_rcvbuf_grow(sk, msk->rcvq_space.space);  }  static void mptcp_init_skb(struct sock *ssk, struct sk_buff *skb, int offset, @@ -998,7 +1007,7 @@ static void __mptcp_clean_una(struct sock *sk)  			if (WARN_ON_ONCE(!msk->recovery))  				break; -			WRITE_ONCE(msk->first_pending, mptcp_send_next(sk)); +			msk->first_pending = mptcp_send_next(sk);  		}  		dfrag_clear(sk, dfrag); @@ -1290,7 +1299,12 @@ alloc_skb:  	if (copy == 0) {  		u64 snd_una = READ_ONCE(msk->snd_una); -		if (snd_una != msk->snd_nxt || tcp_write_queue_tail(ssk)) { +		/* No need for zero probe if there are any data pending +		 * either at the msk or ssk level; skb is the current write +		 * queue tail and can be empty at this point. +		 */ +		if (snd_una != msk->snd_nxt || skb->len || +		    skb != tcp_send_head(ssk)) {  			tcp_remove_empty_skb(ssk);  			return 0;  		} @@ -1341,6 +1355,7 @@ alloc_skb:  		 mpext->dsn64);  	if (zero_window_probe) { +		MPTCP_INC_STATS(sock_net(ssk), MPTCP_MIB_WINPROBE);  		mptcp_subflow_ctx(ssk)->rel_write_seq += copy;  		mpext->frozen = 1;  		if (READ_ONCE(msk->csum_enabled)) @@ -1543,7 +1558,7 @@ static int __subflow_push_pending(struct sock *sk, struct sock *ssk,  			mptcp_update_post_push(msk, dfrag, ret);  		} -		WRITE_ONCE(msk->first_pending, mptcp_send_next(sk)); +		msk->first_pending = mptcp_send_next(sk);  		if (msk->snd_burst <= 0 ||  		    !sk_stream_memory_free(ssk) || @@ -1903,7 +1918,7 @@ static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)  			get_page(dfrag->page);  			list_add_tail(&dfrag->list, &msk->rtx_queue);  			if (!msk->first_pending) -				WRITE_ONCE(msk->first_pending, dfrag); +				msk->first_pending = dfrag;  		}  		pr_debug("msk=%p dfrag at seq=%llu len=%u sent=%u new=%d\n", msk,  			 dfrag->data_seq, dfrag->data_len, dfrag->already_sent, @@ -1936,22 +1951,36 @@ do_error:  static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied); -static int __mptcp_recvmsg_mskq(struct sock *sk, -				struct msghdr *msg, -				size_t len, int flags, +static int __mptcp_recvmsg_mskq(struct sock *sk, struct msghdr *msg, +				size_t len, int flags, int copied_total,  				struct scm_timestamping_internal *tss,  				int *cmsg_flags)  {  	struct mptcp_sock *msk = mptcp_sk(sk);  	struct sk_buff *skb, *tmp; +	int total_data_len = 0;  	int copied = 0;  	skb_queue_walk_safe(&sk->sk_receive_queue, skb, tmp) { -		u32 offset = MPTCP_SKB_CB(skb)->offset; +		u32 delta, offset = MPTCP_SKB_CB(skb)->offset;  		u32 data_len = skb->len - offset; -		u32 count = min_t(size_t, len - copied, data_len); +		u32 count;  		int err; +		if (flags & MSG_PEEK) { +			/* skip already peeked skbs */ +			if (total_data_len + data_len <= copied_total) { +				total_data_len += data_len; +				continue; +			} + +			/* skip the already peeked data in the current skb */ +			delta = copied_total - total_data_len; +			offset += delta; +			data_len -= delta; +		} + +		count = min_t(size_t, len - copied, data_len);  		if (!(flags & MSG_TRUNC)) {  			err = skb_copy_datagram_msg(skb, offset, msg, count);  			if (unlikely(err < 0)) { @@ -1968,16 +1997,14 @@ static int __mptcp_recvmsg_mskq(struct sock *sk,  		copied += count; -		if (count < data_len) { -			if (!(flags & MSG_PEEK)) { +		if (!(flags & MSG_PEEK)) { +			msk->bytes_consumed += count; +			if (count < data_len) {  				MPTCP_SKB_CB(skb)->offset += count;  				MPTCP_SKB_CB(skb)->map_seq += count; -				msk->bytes_consumed += count; +				break;  			} -			break; -		} -		if (!(flags & MSG_PEEK)) {  			/* avoid the indirect call, we know the destructor is sock_rfree */  			skb->destructor = NULL;  			skb->sk = NULL; @@ -1985,7 +2012,6 @@ static int __mptcp_recvmsg_mskq(struct sock *sk,  			sk_mem_uncharge(sk, skb->truesize);  			__skb_unlink(skb, &sk->sk_receive_queue);  			skb_attempt_defer_free(skb); -			msk->bytes_consumed += count;  		}  		if (copied >= len) @@ -2049,9 +2075,7 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)  	if (msk->rcvq_space.copied <= msk->rcvq_space.space)  		goto new_measure; -	msk->rcvq_space.space = msk->rcvq_space.copied; -	if (mptcp_rcvbuf_grow(sk)) { - +	if (mptcp_rcvbuf_grow(sk, msk->rcvq_space.copied)) {  		/* Make subflows follow along.  If we do not do this, we  		 * get drops at subflow level if skbs can't be moved to  		 * the mptcp rx queue fast enough (announced rcv_win can @@ -2063,8 +2087,9 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied)  			ssk = mptcp_subflow_tcp_sock(subflow);  			slow = lock_sock_fast(ssk); -			tcp_sk(ssk)->rcvq_space.space = msk->rcvq_space.copied; -			tcp_rcvbuf_grow(ssk); +			/* subflows can be added before tcp_init_transfer() */ +			if (tcp_sk(ssk)->rcvq_space.space) +				tcp_rcvbuf_grow(ssk, msk->rcvq_space.copied);  			unlock_sock_fast(ssk, slow);  		}  	} @@ -2183,7 +2208,8 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,  	while (copied < len) {  		int err, bytes_read; -		bytes_read = __mptcp_recvmsg_mskq(sk, msg, len - copied, flags, &tss, &cmsg_flags); +		bytes_read = __mptcp_recvmsg_mskq(sk, msg, len - copied, flags, +						  copied, &tss, &cmsg_flags);  		if (unlikely(bytes_read < 0)) {  			if (!copied)  				copied = bytes_read; @@ -2874,7 +2900,7 @@ static void __mptcp_clear_xmit(struct sock *sk)  	struct mptcp_sock *msk = mptcp_sk(sk);  	struct mptcp_data_frag *dtmp, *dfrag; -	WRITE_ONCE(msk->first_pending, NULL); +	msk->first_pending = NULL;  	list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list)  		dfrag_clear(sk, dfrag);  } @@ -3414,9 +3440,6 @@ void __mptcp_data_acked(struct sock *sk)  void __mptcp_check_push(struct sock *sk, struct sock *ssk)  { -	if (!mptcp_send_head(sk)) -		return; -  	if (!sock_owned_by_user(sk))  		__mptcp_subflow_push_pending(sk, ssk, false);  	else diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 52f9cfa4ce95..379a88e14e8d 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -414,7 +414,7 @@ static inline struct mptcp_data_frag *mptcp_send_head(const struct sock *sk)  {  	const struct mptcp_sock *msk = mptcp_sk(sk); -	return READ_ONCE(msk->first_pending); +	return msk->first_pending;  }  static inline struct mptcp_data_frag *mptcp_send_next(struct sock *sk) diff --git a/net/netfilter/nft_connlimit.c b/net/netfilter/nft_connlimit.c index 92b984fa8175..fc35a11cdca2 100644 --- a/net/netfilter/nft_connlimit.c +++ b/net/netfilter/nft_connlimit.c @@ -48,7 +48,7 @@ static inline void nft_connlimit_do_eval(struct nft_connlimit *priv,  		return;  	} -	count = priv->list->count; +	count = READ_ONCE(priv->list->count);  	if ((count > priv->limit) ^ priv->invert) {  		regs->verdict.code = NFT_BREAK; diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index d526e69a2a2b..6f2ae7cad731 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -22,6 +22,7 @@  #include <net/netfilter/nf_conntrack_timeout.h>  #include <net/netfilter/nf_conntrack_l4proto.h>  #include <net/netfilter/nf_conntrack_expect.h> +#include <net/netfilter/nf_conntrack_seqadj.h>  struct nft_ct_helper_obj  {  	struct nf_conntrack_helper *helper4; @@ -379,6 +380,14 @@ static bool nft_ct_tmpl_alloc_pcpu(void)  }  #endif +static void __nft_ct_get_destroy(const struct nft_ctx *ctx, struct nft_ct *priv) +{ +#ifdef CONFIG_NF_CONNTRACK_LABELS +	if (priv->key == NFT_CT_LABELS) +		nf_connlabels_put(ctx->net); +#endif +} +  static int nft_ct_get_init(const struct nft_ctx *ctx,  			   const struct nft_expr *expr,  			   const struct nlattr * const tb[]) @@ -413,6 +422,10 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,  		if (tb[NFTA_CT_DIRECTION] != NULL)  			return -EINVAL;  		len = NF_CT_LABELS_MAX_SIZE; + +		err = nf_connlabels_get(ctx->net, (len * BITS_PER_BYTE) - 1); +		if (err) +			return err;  		break;  #endif  	case NFT_CT_HELPER: @@ -494,7 +507,8 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,  		case IP_CT_DIR_REPLY:  			break;  		default: -			return -EINVAL; +			err = -EINVAL; +			goto err;  		}  	} @@ -502,11 +516,11 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,  	err = nft_parse_register_store(ctx, tb[NFTA_CT_DREG], &priv->dreg, NULL,  				       NFT_DATA_VALUE, len);  	if (err < 0) -		return err; +		goto err;  	err = nf_ct_netns_get(ctx->net, ctx->family);  	if (err < 0) -		return err; +		goto err;  	if (priv->key == NFT_CT_BYTES ||  	    priv->key == NFT_CT_PKTS  || @@ -514,6 +528,9 @@ static int nft_ct_get_init(const struct nft_ctx *ctx,  		nf_ct_set_acct(ctx->net, true);  	return 0; +err: +	__nft_ct_get_destroy(ctx, priv); +	return err;  }  static void __nft_ct_set_destroy(const struct nft_ctx *ctx, struct nft_ct *priv) @@ -626,6 +643,9 @@ err1:  static void nft_ct_get_destroy(const struct nft_ctx *ctx,  			       const struct nft_expr *expr)  { +	struct nft_ct *priv = nft_expr_priv(expr); + +	__nft_ct_get_destroy(ctx, priv);  	nf_ct_netns_put(ctx->net, ctx->family);  } @@ -1173,6 +1193,10 @@ static void nft_ct_helper_obj_eval(struct nft_object *obj,  	if (help) {  		rcu_assign_pointer(help->helper, to_assign);  		set_bit(IPS_HELPER_BIT, &ct->status); + +		if ((ct->status & IPS_NAT_MASK) && !nfct_seqadj(ct)) +			if (!nfct_seqadj_ext_add(ct)) +				regs->verdict.code = NF_DROP;  	}  } diff --git a/net/sctp/input.c b/net/sctp/input.c index 7e99894778d4..e119e460ccde 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c @@ -190,7 +190,7 @@ int sctp_rcv(struct sk_buff *skb)  		goto discard_release;  	nf_reset_ct(skb); -	if (sk_filter(sk, skb)) +	if (sk_filter(sk, skb) || skb->len < sizeof(struct sctp_chunkhdr))  		goto discard_release;  	/* Create an SCTP packet structure. */ diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index a64ae15b1a60..71734411ff4c 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -723,8 +723,10 @@ tls_device_rx_resync_async(struct tls_offload_resync_async *resync_async,  		/* shouldn't get to wraparound:  		 * too long in async stage, something bad happened  		 */ -		if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX)) +		if (WARN_ON_ONCE(resync_async->rcd_delta == USHRT_MAX)) { +			tls_offload_rx_resync_async_request_cancel(resync_async);  			return false; +		}  		/* asynchronous stage: log all headers seq such that  		 * req_seq <= seq <= end_seq, and wait for real resync request diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 346dfd2bd987..03d07b54359a 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -4136,8 +4136,7 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)  			rdev->wiphy.txq_quantum = old_txq_quantum;  		} -		if (old_rts_threshold) -			kfree(old_radio_rts_threshold); +		kfree(old_radio_rts_threshold);  		return result;  	}  | 
