diff options
| author | Tvrtko Ursulin <tvrtko.ursulin@intel.com> | 2022-02-25 13:44:44 +0000 |
|---|---|---|
| committer | Tvrtko Ursulin <tvrtko.ursulin@intel.com> | 2022-02-25 13:44:44 +0000 |
| commit | db927686e43ffebfc5d1693c1cb4fd74f462d99b (patch) | |
| tree | 0cbd82aea0825efc1cb10bad4af664ed1bed9b83 /net/ipv4 | |
| parent | b8986c889e7ac26c57cb548f8f344456fa925a2f (diff) | |
| parent | 54f43c17d681f6d9523fcfaeefc9df77993802e1 (diff) | |
Merge drm/drm-next into drm-intel-gt-next
Matt needed some buddy allocator changes for landing DG2 small BAR
support patches.
Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Diffstat (limited to 'net/ipv4')
| -rw-r--r-- | net/ipv4/ipmr.c | 2 | ||||
| -rw-r--r-- | net/ipv4/netfilter/Kconfig | 4 | ||||
| -rw-r--r-- | net/ipv4/tcp.c | 30 | ||||
| -rw-r--r-- | net/ipv4/tcp_input.c | 2 |
4 files changed, 23 insertions, 15 deletions
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index 07274619b9ea..29bbe2b08ae9 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -256,7 +256,9 @@ static int __net_init ipmr_rules_init(struct net *net) return 0; err2: + rtnl_lock(); ipmr_free_table(mrt); + rtnl_unlock(); err1: fib_rules_unregister(ops); return err; diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index 67087f95579f..aab384126f61 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig @@ -58,10 +58,6 @@ config NF_TABLES_ARP endif # NF_TABLES -config NF_FLOW_TABLE_IPV4 - tristate - select NF_FLOW_TABLE_INET - config NF_DUP_IPV4 tristate "Netfilter IPv4 packet duplication to alternate destination" depends on !NF_CONNTRACK || NF_CONNTRACK diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 78e81465f5f3..02cb275e5487 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -937,6 +937,22 @@ void tcp_remove_empty_skb(struct sock *sk) } } +/* skb changing from pure zc to mixed, must charge zc */ +static int tcp_downgrade_zcopy_pure(struct sock *sk, struct sk_buff *skb) +{ + if (unlikely(skb_zcopy_pure(skb))) { + u32 extra = skb->truesize - + SKB_TRUESIZE(skb_end_offset(skb)); + + if (!sk_wmem_schedule(sk, extra)) + return -ENOMEM; + + sk_mem_charge(sk, extra); + skb_shinfo(skb)->flags &= ~SKBFL_PURE_ZEROCOPY; + } + return 0; +} + static struct sk_buff *tcp_build_frag(struct sock *sk, int size_goal, int flags, struct page *page, int offset, size_t *size) { @@ -972,7 +988,7 @@ new_segment: tcp_mark_push(tp, skb); goto new_segment; } - if (!sk_wmem_schedule(sk, copy)) + if (tcp_downgrade_zcopy_pure(sk, skb) || !sk_wmem_schedule(sk, copy)) return NULL; if (can_coalesce) { @@ -1320,16 +1336,8 @@ new_segment: copy = min_t(int, copy, pfrag->size - pfrag->offset); - /* skb changing from pure zc to mixed, must charge zc */ - if (unlikely(skb_zcopy_pure(skb))) { - if (!sk_wmem_schedule(sk, skb->data_len)) - goto wait_for_space; - - sk_mem_charge(sk, skb->data_len); - skb_shinfo(skb)->flags &= ~SKBFL_PURE_ZEROCOPY; - } - - if (!sk_wmem_schedule(sk, copy)) + if (tcp_downgrade_zcopy_pure(sk, skb) || + !sk_wmem_schedule(sk, copy)) goto wait_for_space; err = skb_copy_to_page_nocache(sk, &msg->msg_iter, skb, diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index dc49a3d551eb..bfe4112e000c 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1660,6 +1660,8 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, (mss != tcp_skb_seglen(skb))) goto out; + if (!tcp_skb_can_collapse(prev, skb)) + goto out; len = skb->len; pcount = tcp_skb_pcount(skb); if (tcp_skb_shift(prev, skb, pcount, len)) |
