summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2019-05-28 23:16:22 +0200
committerIngo Molnar <mingo@kernel.org>2019-05-28 23:16:22 +0200
commit849e96f30068d4f6f8352715e02a10533a46deba (patch)
treedef395835d94b580b1b6225b4441b8b820bcba87 /include
parent5322ea58a06da2e69c5ef36a9b4d4b9255edd423 (diff)
parenta7350998a25ac10cdca5b33dee1d343a74debbfe (diff)
Merge tag 'perf-urgent-for-mingo-5.2-20190528' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/urgent
Pull perf/urgent fixes: BPF: Jiri Olsa: - Fixup determination of end of kernel map, to avoid having BPF programs, that are after the kernel headers and just before module texts mixed up in the kernel map. tools UAPI header copies: Arnaldo Carvalho de Melo: - Update copy of files related to new fspick, fsmount, fsconfig, fsopen, move_mount and open_tree syscalls. - Sync cpufeatures.h, sched.h, fs.h, drm.h, i915_drm.h and kvm.h headers. Namespaces: Namhyung Kim: - Add missing byte swap ops for namespace events when processing records from perf.data files that could have been recorded in a arch with a different endianness. - Fix access to the thread namespaces list by using the namespaces_lock. perf data: Shawn Landden: - Fix 'strncat may truncate' build failure with recent gcc. s/390 Thomas Richter: - Fix s390 missing module symbol and warning for non-root users in 'perf record'. arm64: Vitaly Chikunov: - Fix mksyscalltbl when system kernel headers are ahead of the kernel. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/bpf.h1
-rw-r--r--include/linux/mlx5/eswitch.h6
-rw-r--r--include/linux/of_net.h2
-rw-r--r--include/linux/rhashtable.h58
-rw-r--r--include/linux/skbuff.h9
-rw-r--r--include/net/flow_offload.h2
-rw-r--r--include/net/ip6_fib.h3
-rw-r--r--include/net/sock.h2
-rw-r--r--include/uapi/linux/btf.h2
9 files changed, 49 insertions, 36 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 59631dd0777c..4fb3aa2dc975 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -36,6 +36,7 @@ struct bpf_map_ops {
void (*map_free)(struct bpf_map *map);
int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
void (*map_release_uref)(struct bpf_map *map);
+ void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
/* funcs callable from userspace and from eBPF programs */
void *(*map_lookup_elem)(struct bpf_map *map, void *key);
diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h
index 0ca77dd1429c..cf226c190329 100644
--- a/include/linux/mlx5/eswitch.h
+++ b/include/linux/mlx5/eswitch.h
@@ -51,13 +51,13 @@ void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
u8 rep_type);
void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type);
void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
- int vport,
+ u16 vport_num,
u8 rep_type);
struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
- int vport);
+ u16 vport_num);
void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type);
u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw);
struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw,
- int vport, u32 sqn);
+ u16 vport_num, u32 sqn);
#endif
diff --git a/include/linux/of_net.h b/include/linux/of_net.h
index 9cd72aab76fe..0f0346e6829c 100644
--- a/include/linux/of_net.h
+++ b/include/linux/of_net.h
@@ -22,7 +22,7 @@ static inline int of_get_phy_mode(struct device_node *np)
static inline const void *of_get_mac_address(struct device_node *np)
{
- return NULL;
+ return ERR_PTR(-ENODEV);
}
static inline struct net_device *of_find_net_device_by_node(struct device_node *np)
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index f7714d3b46bd..9f8bc06d4136 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -84,7 +84,7 @@ struct bucket_table {
struct lockdep_map dep_map;
- struct rhash_lock_head __rcu *buckets[] ____cacheline_aligned_in_smp;
+ struct rhash_lock_head *buckets[] ____cacheline_aligned_in_smp;
};
/*
@@ -261,13 +261,13 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
void *arg);
void rhashtable_destroy(struct rhashtable *ht);
-struct rhash_lock_head __rcu **rht_bucket_nested(const struct bucket_table *tbl,
- unsigned int hash);
-struct rhash_lock_head __rcu **__rht_bucket_nested(const struct bucket_table *tbl,
- unsigned int hash);
-struct rhash_lock_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
- struct bucket_table *tbl,
- unsigned int hash);
+struct rhash_lock_head **rht_bucket_nested(const struct bucket_table *tbl,
+ unsigned int hash);
+struct rhash_lock_head **__rht_bucket_nested(const struct bucket_table *tbl,
+ unsigned int hash);
+struct rhash_lock_head **rht_bucket_nested_insert(struct rhashtable *ht,
+ struct bucket_table *tbl,
+ unsigned int hash);
#define rht_dereference(p, ht) \
rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
@@ -284,21 +284,21 @@ struct rhash_lock_head __rcu **rht_bucket_nested_insert(struct rhashtable *ht,
#define rht_entry(tpos, pos, member) \
({ tpos = container_of(pos, typeof(*tpos), member); 1; })
-static inline struct rhash_lock_head __rcu *const *rht_bucket(
+static inline struct rhash_lock_head *const *rht_bucket(
const struct bucket_table *tbl, unsigned int hash)
{
return unlikely(tbl->nest) ? rht_bucket_nested(tbl, hash) :
&tbl->buckets[hash];
}
-static inline struct rhash_lock_head __rcu **rht_bucket_var(
+static inline struct rhash_lock_head **rht_bucket_var(
struct bucket_table *tbl, unsigned int hash)
{
return unlikely(tbl->nest) ? __rht_bucket_nested(tbl, hash) :
&tbl->buckets[hash];
}
-static inline struct rhash_lock_head __rcu **rht_bucket_insert(
+static inline struct rhash_lock_head **rht_bucket_insert(
struct rhashtable *ht, struct bucket_table *tbl, unsigned int hash)
{
return unlikely(tbl->nest) ? rht_bucket_nested_insert(ht, tbl, hash) :
@@ -349,6 +349,12 @@ static inline void rht_unlock(struct bucket_table *tbl,
local_bh_enable();
}
+static inline struct rhash_head __rcu *__rht_ptr(
+ struct rhash_lock_head *const *bkt)
+{
+ return (struct rhash_head __rcu *)((unsigned long)*bkt & ~BIT(0));
+}
+
/*
* Where 'bkt' is a bucket and might be locked:
* rht_ptr() dereferences that pointer and clears the lock bit.
@@ -356,30 +362,30 @@ static inline void rht_unlock(struct bucket_table *tbl,
* access is guaranteed, such as when destroying the table.
*/
static inline struct rhash_head *rht_ptr(
- struct rhash_lock_head __rcu * const *bkt,
+ struct rhash_lock_head *const *bkt,
struct bucket_table *tbl,
unsigned int hash)
{
- const struct rhash_lock_head *p =
- rht_dereference_bucket_rcu(*bkt, tbl, hash);
+ struct rhash_head __rcu *p = __rht_ptr(bkt);
- if ((((unsigned long)p) & ~BIT(0)) == 0)
+ if (!p)
return RHT_NULLS_MARKER(bkt);
- return (void *)(((unsigned long)p) & ~BIT(0));
+
+ return rht_dereference_bucket_rcu(p, tbl, hash);
}
static inline struct rhash_head *rht_ptr_exclusive(
- struct rhash_lock_head __rcu * const *bkt)
+ struct rhash_lock_head *const *bkt)
{
- const struct rhash_lock_head *p =
- rcu_dereference_protected(*bkt, 1);
+ struct rhash_head __rcu *p = __rht_ptr(bkt);
if (!p)
return RHT_NULLS_MARKER(bkt);
- return (void *)(((unsigned long)p) & ~BIT(0));
+
+ return rcu_dereference_protected(p, 1);
}
-static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt,
+static inline void rht_assign_locked(struct rhash_lock_head **bkt,
struct rhash_head *obj)
{
struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt;
@@ -390,7 +396,7 @@ static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt,
}
static inline void rht_assign_unlock(struct bucket_table *tbl,
- struct rhash_lock_head __rcu **bkt,
+ struct rhash_lock_head **bkt,
struct rhash_head *obj)
{
struct rhash_head __rcu **p = (struct rhash_head __rcu **)bkt;
@@ -587,7 +593,7 @@ static inline struct rhash_head *__rhashtable_lookup(
.ht = ht,
.key = key,
};
- struct rhash_lock_head __rcu * const *bkt;
+ struct rhash_lock_head *const *bkt;
struct bucket_table *tbl;
struct rhash_head *he;
unsigned int hash;
@@ -703,7 +709,7 @@ static inline void *__rhashtable_insert_fast(
.ht = ht,
.key = key,
};
- struct rhash_lock_head __rcu **bkt;
+ struct rhash_lock_head **bkt;
struct rhash_head __rcu **pprev;
struct bucket_table *tbl;
struct rhash_head *head;
@@ -989,7 +995,7 @@ static inline int __rhashtable_remove_fast_one(
struct rhash_head *obj, const struct rhashtable_params params,
bool rhlist)
{
- struct rhash_lock_head __rcu **bkt;
+ struct rhash_lock_head **bkt;
struct rhash_head __rcu **pprev;
struct rhash_head *he;
unsigned int hash;
@@ -1141,7 +1147,7 @@ static inline int __rhashtable_replace_fast(
struct rhash_head *obj_old, struct rhash_head *obj_new,
const struct rhashtable_params params)
{
- struct rhash_lock_head __rcu **bkt;
+ struct rhash_lock_head **bkt;
struct rhash_head __rcu **pprev;
struct rhash_head *he;
unsigned int hash;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 6d58fa8a65fd..2ee5e63195c0 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1434,10 +1434,12 @@ static inline void skb_zcopy_clear(struct sk_buff *skb, bool zerocopy)
struct ubuf_info *uarg = skb_zcopy(skb);
if (uarg) {
- if (uarg->callback == sock_zerocopy_callback) {
+ if (skb_zcopy_is_nouarg(skb)) {
+ /* no notification callback */
+ } else if (uarg->callback == sock_zerocopy_callback) {
uarg->zerocopy = uarg->zerocopy && zerocopy;
sock_zerocopy_put(uarg);
- } else if (!skb_zcopy_is_nouarg(skb)) {
+ } else {
uarg->callback(uarg, zerocopy);
}
@@ -2691,7 +2693,8 @@ static inline int skb_orphan_frags(struct sk_buff *skb, gfp_t gfp_mask)
{
if (likely(!skb_zcopy(skb)))
return 0;
- if (skb_uarg(skb)->callback == sock_zerocopy_callback)
+ if (!skb_zcopy_is_nouarg(skb) &&
+ skb_uarg(skb)->callback == sock_zerocopy_callback)
return 0;
return skb_copy_ubufs(skb, gfp_mask);
}
diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
index 6200900434e1..a2df99f9b196 100644
--- a/include/net/flow_offload.h
+++ b/include/net/flow_offload.h
@@ -71,6 +71,8 @@ void flow_rule_match_eth_addrs(const struct flow_rule *rule,
struct flow_match_eth_addrs *out);
void flow_rule_match_vlan(const struct flow_rule *rule,
struct flow_match_vlan *out);
+void flow_rule_match_cvlan(const struct flow_rule *rule,
+ struct flow_match_vlan *out);
void flow_rule_match_ipv4_addrs(const struct flow_rule *rule,
struct flow_match_ipv4_addrs *out);
void flow_rule_match_ipv6_addrs(const struct flow_rule *rule,
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 40105738e2f6..525f701653ca 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -167,7 +167,8 @@ struct fib6_info {
dst_nocount:1,
dst_nopolicy:1,
dst_host:1,
- unused:3;
+ fib6_destroying:1,
+ unused:2;
struct fib6_nh fib6_nh;
struct rcu_head rcu;
diff --git a/include/net/sock.h b/include/net/sock.h
index 4d208c0f9c14..0680fa988497 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1473,7 +1473,7 @@ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
sk->sk_wmem_queued -= skb->truesize;
sk_mem_uncharge(sk, skb->truesize);
- if (!sk->sk_tx_skb_cache) {
+ if (!sk->sk_tx_skb_cache && !skb_cloned(skb)) {
skb_zcopy_clear(skb, true);
sk->sk_tx_skb_cache = skb;
return;
diff --git a/include/uapi/linux/btf.h b/include/uapi/linux/btf.h
index 9310652ca4f9..63ae4a39e58b 100644
--- a/include/uapi/linux/btf.h
+++ b/include/uapi/linux/btf.h
@@ -83,7 +83,7 @@ struct btf_type {
* is the 32 bits arrangement:
*/
#define BTF_INT_ENCODING(VAL) (((VAL) & 0x0f000000) >> 24)
-#define BTF_INT_OFFSET(VAL) (((VAL & 0x00ff0000)) >> 16)
+#define BTF_INT_OFFSET(VAL) (((VAL) & 0x00ff0000) >> 16)
#define BTF_INT_BITS(VAL) ((VAL) & 0x000000ff)
/* Attributes stored in the BTF_INT_ENCODING */