summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/tlb.h4
-rw-r--r--include/linux/cgroup.h1
-rw-r--r--include/linux/gfp.h18
-rw-r--r--include/linux/license.h2
-rw-r--r--include/linux/mlx5/fs.h12
-rw-r--r--include/linux/mlx5/mlx5_ifc.h74
-rw-r--r--include/linux/mlx5/vport.h2
-rw-r--r--include/linux/mm.h29
-rw-r--r--include/linux/mmc/mmc.h2
-rw-r--r--include/linux/netdevice.h1
-rw-r--r--include/linux/pgtable.h18
-rw-r--r--include/linux/rhashtable.h61
-rw-r--r--include/linux/skbuff.h1
-rw-r--r--include/linux/soc/mediatek/mtk_wed.h8
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/net/act_api.h10
-rw-r--r--include/net/af_rxrpc.h2
-rw-r--r--include/net/bluetooth/hci.h12
-rw-r--r--include/net/cfg802154.h18
-rw-r--r--include/net/devlink.h39
-rw-r--r--include/net/mana/gdma.h9
-rw-r--r--include/net/nl802154.h43
-rw-r--r--include/net/ping.h3
-rw-r--r--include/net/sock.h6
-rw-r--r--include/net/tc_wrapper.h251
-rw-r--r--include/trace/events/fscache.h2
-rw-r--r--include/trace/events/rxrpc.h486
-rw-r--r--include/uapi/linux/devlink.h13
-rw-r--r--include/uapi/linux/ethtool_netlink.h14
-rw-r--r--include/uapi/linux/net_tstamp.h3
-rw-r--r--include/uapi/linux/openvswitch.h14
31 files changed, 983 insertions, 176 deletions
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 492dce43236e..cab7cfebf40b 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -222,12 +222,16 @@ extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
#define tlb_needs_table_invalidate() (true)
#endif
+void tlb_remove_table_sync_one(void);
+
#else
#ifdef tlb_needs_table_invalidate
#error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
#endif
+static inline void tlb_remove_table_sync_one(void) { }
+
#endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 528bd44b59e2..2b7d077de7ef 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -68,6 +68,7 @@ struct css_task_iter {
struct list_head iters_node; /* css_set->task_iters */
};
+extern struct file_system_type cgroup_fs_type;
extern struct cgroup_root cgrp_dfl_root;
extern struct css_set init_css_set;
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index ef4aea3b356e..65a78773dcca 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -210,6 +210,20 @@ alloc_pages_bulk_array_node(gfp_t gfp, int nid, unsigned long nr_pages, struct p
return __alloc_pages_bulk(gfp, nid, NULL, nr_pages, NULL, page_array);
}
+static inline void warn_if_node_offline(int this_node, gfp_t gfp_mask)
+{
+ gfp_t warn_gfp = gfp_mask & (__GFP_THISNODE|__GFP_NOWARN);
+
+ if (warn_gfp != (__GFP_THISNODE|__GFP_NOWARN))
+ return;
+
+ if (node_online(this_node))
+ return;
+
+ pr_warn("%pGg allocation from offline node %d\n", &gfp_mask, this_node);
+ dump_stack();
+}
+
/*
* Allocate pages, preferring the node given as nid. The node must be valid and
* online. For more general interface, see alloc_pages_node().
@@ -218,7 +232,7 @@ static inline struct page *
__alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order)
{
VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
- VM_WARN_ON((gfp_mask & __GFP_THISNODE) && !node_online(nid));
+ warn_if_node_offline(nid, gfp_mask);
return __alloc_pages(gfp_mask, order, nid, NULL);
}
@@ -227,7 +241,7 @@ static inline
struct folio *__folio_alloc_node(gfp_t gfp, unsigned int order, int nid)
{
VM_BUG_ON(nid < 0 || nid >= MAX_NUMNODES);
- VM_WARN_ON((gfp & __GFP_THISNODE) && !node_online(nid));
+ warn_if_node_offline(nid, gfp);
return __folio_alloc(gfp, order, nid, NULL);
}
diff --git a/include/linux/license.h b/include/linux/license.h
index ad937f57f2cb..7cce390f120b 100644
--- a/include/linux/license.h
+++ b/include/linux/license.h
@@ -2,8 +2,6 @@
#ifndef __LICENSE_H
#define __LICENSE_H
-#include <linux/string.h>
-
static inline int license_is_gpl_compatible(const char *license)
{
return (strcmp(license, "GPL") == 0
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index c7a91981cd5a..ba6958b49a8e 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -50,6 +50,7 @@ enum mlx5_flow_destination_type {
MLX5_FLOW_DESTINATION_TYPE_PORT,
MLX5_FLOW_DESTINATION_TYPE_COUNTER,
MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM,
+ MLX5_FLOW_DESTINATION_TYPE_RANGE,
};
enum {
@@ -143,6 +144,10 @@ enum {
MLX5_FLOW_DEST_VPORT_REFORMAT_ID = BIT(1),
};
+enum mlx5_flow_dest_range_field {
+ MLX5_FLOW_DEST_RANGE_FIELD_PKT_LEN = 0,
+};
+
struct mlx5_flow_destination {
enum mlx5_flow_destination_type type;
union {
@@ -156,6 +161,13 @@ struct mlx5_flow_destination {
struct mlx5_pkt_reformat *pkt_reformat;
u8 flags;
} vport;
+ struct {
+ struct mlx5_flow_table *hit_ft;
+ struct mlx5_flow_table *miss_ft;
+ enum mlx5_flow_dest_range_field field;
+ u32 min;
+ u32 max;
+ } range;
u32 sampler_id;
};
};
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 300b56ea5ff4..152d2d7f8743 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -68,6 +68,7 @@ enum {
MLX5_SET_HCA_CAP_OP_MOD_ODP = 0x2,
MLX5_SET_HCA_CAP_OP_MOD_ATOMIC = 0x3,
MLX5_SET_HCA_CAP_OP_MOD_ROCE = 0x4,
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE2 = 0x20,
MLX5_SET_HCA_CAP_OP_MODE_PORT_SELECTION = 0x25,
};
@@ -1880,7 +1881,10 @@ struct mlx5_ifc_cmd_hca_cap_bits {
};
struct mlx5_ifc_cmd_hca_cap_2_bits {
- u8 reserved_at_0[0xa0];
+ u8 reserved_at_0[0x80];
+
+ u8 migratable[0x1];
+ u8 reserved_at_81[0x1f];
u8 max_reformat_insert_size[0x8];
u8 max_reformat_insert_offset[0x8];
@@ -6109,6 +6113,38 @@ struct mlx5_ifc_match_definer_format_32_bits {
u8 inner_dmac_15_0[0x10];
};
+enum {
+ MLX5_IFC_DEFINER_FORMAT_ID_SELECT = 61,
+};
+
+#define MLX5_IFC_DEFINER_FORMAT_OFFSET_UNUSED 0x0
+#define MLX5_IFC_DEFINER_FORMAT_OFFSET_OUTER_ETH_PKT_LEN 0x48
+#define MLX5_IFC_DEFINER_DW_SELECTORS_NUM 9
+#define MLX5_IFC_DEFINER_BYTE_SELECTORS_NUM 8
+
+struct mlx5_ifc_match_definer_match_mask_bits {
+ u8 reserved_at_1c0[5][0x20];
+ u8 match_dw_8[0x20];
+ u8 match_dw_7[0x20];
+ u8 match_dw_6[0x20];
+ u8 match_dw_5[0x20];
+ u8 match_dw_4[0x20];
+ u8 match_dw_3[0x20];
+ u8 match_dw_2[0x20];
+ u8 match_dw_1[0x20];
+ u8 match_dw_0[0x20];
+
+ u8 match_byte_7[0x8];
+ u8 match_byte_6[0x8];
+ u8 match_byte_5[0x8];
+ u8 match_byte_4[0x8];
+
+ u8 match_byte_3[0x8];
+ u8 match_byte_2[0x8];
+ u8 match_byte_1[0x8];
+ u8 match_byte_0[0x8];
+};
+
struct mlx5_ifc_match_definer_bits {
u8 modify_field_select[0x40];
@@ -6117,9 +6153,41 @@ struct mlx5_ifc_match_definer_bits {
u8 reserved_at_80[0x10];
u8 format_id[0x10];
- u8 reserved_at_a0[0x160];
+ u8 reserved_at_a0[0x60];
- u8 match_mask[16][0x20];
+ u8 format_select_dw3[0x8];
+ u8 format_select_dw2[0x8];
+ u8 format_select_dw1[0x8];
+ u8 format_select_dw0[0x8];
+
+ u8 format_select_dw7[0x8];
+ u8 format_select_dw6[0x8];
+ u8 format_select_dw5[0x8];
+ u8 format_select_dw4[0x8];
+
+ u8 reserved_at_100[0x18];
+ u8 format_select_dw8[0x8];
+
+ u8 reserved_at_120[0x20];
+
+ u8 format_select_byte3[0x8];
+ u8 format_select_byte2[0x8];
+ u8 format_select_byte1[0x8];
+ u8 format_select_byte0[0x8];
+
+ u8 format_select_byte7[0x8];
+ u8 format_select_byte6[0x8];
+ u8 format_select_byte5[0x8];
+ u8 format_select_byte4[0x8];
+
+ u8 reserved_at_180[0x40];
+
+ union {
+ struct {
+ u8 match_mask[16][0x20];
+ };
+ struct mlx5_ifc_match_definer_match_mask_bits match_mask_format;
+ };
};
struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index aad53cb72f17..7f31432f44c2 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -132,4 +132,6 @@ int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev,
int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev);
u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev);
+int mlx5_vport_get_other_func_cap(struct mlx5_core_dev *dev, u16 function_id, void *out,
+ u16 opmod);
#endif /* __MLX5_VPORT_H__ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 8bbcccbc5565..974ccca609d2 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1852,6 +1852,25 @@ static void __maybe_unused show_free_areas(unsigned int flags, nodemask_t *nodem
__show_free_areas(flags, nodemask, MAX_NR_ZONES - 1);
}
+/*
+ * Parameter block passed down to zap_pte_range in exceptional cases.
+ */
+struct zap_details {
+ struct folio *single_folio; /* Locked folio to be unmapped */
+ bool even_cows; /* Zap COWed private pages too? */
+ zap_flags_t zap_flags; /* Extra flags for zapping */
+};
+
+/*
+ * Whether to drop the pte markers, for example, the uffd-wp information for
+ * file-backed memory. This should only be specified when we will completely
+ * drop the page in the mm, either by truncation or unmapping of the vma. By
+ * default, the flag is not set.
+ */
+#define ZAP_FLAG_DROP_MARKER ((__force zap_flags_t) BIT(0))
+/* Set in unmap_vmas() to indicate a final unmap call. Only used by hugetlb */
+#define ZAP_FLAG_UNMAP ((__force zap_flags_t) BIT(1))
+
#ifdef CONFIG_MMU
extern bool can_do_mlock(void);
#else
@@ -1869,6 +1888,8 @@ void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
void zap_page_range(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
+void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
+ unsigned long size, struct zap_details *details);
void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt,
struct vm_area_struct *start_vma, unsigned long start,
unsigned long end);
@@ -3467,12 +3488,4 @@ madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
}
#endif
-/*
- * Whether to drop the pte markers, for example, the uffd-wp information for
- * file-backed memory. This should only be specified when we will completely
- * drop the page in the mm, either by truncation or unmapping of the vma. By
- * default, the flag is not set.
- */
-#define ZAP_FLAG_DROP_MARKER ((__force zap_flags_t) BIT(0))
-
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 9c50bc40f8ff..6f7993803ee7 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -451,7 +451,7 @@ static inline bool mmc_ready_for_data(u32 status)
#define MMC_SECURE_TRIM1_ARG 0x80000001
#define MMC_SECURE_TRIM2_ARG 0x80008000
#define MMC_SECURE_ARGS 0x80000000
-#define MMC_TRIM_ARGS 0x00008001
+#define MMC_TRIM_OR_DISCARD_ARGS 0x00008003
#define mmc_driver_type_mask(n) (1 << (n))
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 29ae964e3b89..2287cb8eb9e4 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -78,6 +78,7 @@ struct xdp_buff;
void synchronize_net(void);
void netdev_set_default_ethtool_ops(struct net_device *dev,
const struct ethtool_ops *ops);
+void netdev_sw_irq_coalesce_default_on(struct net_device *dev);
/* Backlog congestion levels */
#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index a108b60a6962..5f0d7d0b9471 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -165,6 +165,13 @@ static inline pte_t *virt_to_kpte(unsigned long vaddr)
return pmd_none(*pmd) ? NULL : pte_offset_kernel(pmd, vaddr);
}
+#ifndef pmd_young
+static inline int pmd_young(pmd_t pmd)
+{
+ return 0;
+}
+#endif
+
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
extern int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
@@ -260,6 +267,17 @@ static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif
+#ifndef arch_has_hw_nonleaf_pmd_young
+/*
+ * Return whether the accessed bit in non-leaf PMD entries is supported on the
+ * local CPU.
+ */
+static inline bool arch_has_hw_nonleaf_pmd_young(void)
+{
+ return IS_ENABLED(CONFIG_ARCH_HAS_NONLEAF_PMD_YOUNG);
+}
+#endif
+
#ifndef arch_has_hw_pte_young
/*
* Return whether the accessed bit is supported on the local CPU.
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 68dab3e08aad..5b5357c0bd8c 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -323,29 +323,36 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert(
* When we write to a bucket without unlocking, we use rht_assign_locked().
*/
-static inline void rht_lock(struct bucket_table *tbl,
- struct rhash_lock_head __rcu **bkt)
+static inline unsigned long rht_lock(struct bucket_table *tbl,
+ struct rhash_lock_head __rcu **bkt)
{
- local_bh_disable();
+ unsigned long flags;
+
+ local_irq_save(flags);
bit_spin_lock(0, (unsigned long *)bkt);
lock_map_acquire(&tbl->dep_map);
+ return flags;
}
-static inline void rht_lock_nested(struct bucket_table *tbl,
- struct rhash_lock_head __rcu **bucket,
- unsigned int subclass)
+static inline unsigned long rht_lock_nested(struct bucket_table *tbl,
+ struct rhash_lock_head __rcu **bucket,
+ unsigned int subclass)
{
- local_bh_disable();
+ unsigned long flags;
+
+ local_irq_save(flags);
bit_spin_lock(0, (unsigned long *)bucket);
lock_acquire_exclusive(&tbl->dep_map, subclass, 0, NULL, _THIS_IP_);
+ return flags;
}
static inline void rht_unlock(struct bucket_table *tbl,
- struct rhash_lock_head __rcu **bkt)
+ struct rhash_lock_head __rcu **bkt,
+ unsigned long flags)
{
lock_map_release(&tbl->dep_map);
bit_spin_unlock(0, (unsigned long *)bkt);
- local_bh_enable();
+ local_irq_restore(flags);
}
static inline struct rhash_head *__rht_ptr(
@@ -393,7 +400,8 @@ static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt,
static inline void rht_assign_unlock(struct bucket_table *tbl,
struct rhash_lock_head __rcu **bkt,
- struct rhash_head *obj)
+ struct rhash_head *obj,
+ unsigned long flags)
{
if (rht_is_a_nulls(obj))
obj = NULL;
@@ -401,7 +409,7 @@ static inline void rht_assign_unlock(struct bucket_table *tbl,
rcu_assign_pointer(*bkt, (void *)obj);
preempt_enable();
__release(bitlock);
- local_bh_enable();
+ local_irq_restore(flags);
}
/**
@@ -706,6 +714,7 @@ static inline void *__rhashtable_insert_fast(
struct rhash_head __rcu **pprev;
struct bucket_table *tbl;
struct rhash_head *head;
+ unsigned long flags;
unsigned int hash;
int elasticity;
void *data;
@@ -720,11 +729,11 @@ static inline void *__rhashtable_insert_fast(
if (!bkt)
goto out;
pprev = NULL;
- rht_lock(tbl, bkt);
+ flags = rht_lock(tbl, bkt);
if (unlikely(rcu_access_pointer(tbl->future_tbl))) {
slow_path:
- rht_unlock(tbl, bkt);
+ rht_unlock(tbl, bkt, flags);
rcu_read_unlock();
return rhashtable_insert_slow(ht, key, obj);
}
@@ -756,9 +765,9 @@ slow_path:
RCU_INIT_POINTER(list->rhead.next, head);
if (pprev) {
rcu_assign_pointer(*pprev, obj);
- rht_unlock(tbl, bkt);
+ rht_unlock(tbl, bkt, flags);
} else
- rht_assign_unlock(tbl, bkt, obj);
+ rht_assign_unlock(tbl, bkt, obj, flags);
data = NULL;
goto out;
}
@@ -785,7 +794,7 @@ slow_path:
}
atomic_inc(&ht->nelems);
- rht_assign_unlock(tbl, bkt, obj);
+ rht_assign_unlock(tbl, bkt, obj, flags);
if (rht_grow_above_75(ht, tbl))
schedule_work(&ht->run_work);
@@ -797,7 +806,7 @@ out:
return data;
out_unlock:
- rht_unlock(tbl, bkt);
+ rht_unlock(tbl, bkt, flags);
goto out;
}
@@ -991,6 +1000,7 @@ static inline int __rhashtable_remove_fast_one(
struct rhash_lock_head __rcu **bkt;
struct rhash_head __rcu **pprev;
struct rhash_head *he;
+ unsigned long flags;
unsigned int hash;
int err = -ENOENT;
@@ -999,7 +1009,7 @@ static inline int __rhashtable_remove_fast_one(
if (!bkt)
return -ENOENT;
pprev = NULL;
- rht_lock(tbl, bkt);
+ flags = rht_lock(tbl, bkt);
rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) {
struct rhlist_head *list;
@@ -1043,14 +1053,14 @@ static inline int __rhashtable_remove_fast_one(
if (pprev) {
rcu_assign_pointer(*pprev, obj);
- rht_unlock(tbl, bkt);
+ rht_unlock(tbl, bkt, flags);
} else {
- rht_assign_unlock(tbl, bkt, obj);
+ rht_assign_unlock(tbl, bkt, obj, flags);
}
goto unlocked;
}
- rht_unlock(tbl, bkt);
+ rht_unlock(tbl, bkt, flags);
unlocked:
if (err > 0) {
atomic_dec(&ht->nelems);
@@ -1143,6 +1153,7 @@ static inline int __rhashtable_replace_fast(
struct rhash_lock_head __rcu **bkt;
struct rhash_head __rcu **pprev;
struct rhash_head *he;
+ unsigned long flags;
unsigned int hash;
int err = -ENOENT;
@@ -1158,7 +1169,7 @@ static inline int __rhashtable_replace_fast(
return -ENOENT;
pprev = NULL;
- rht_lock(tbl, bkt);
+ flags = rht_lock(tbl, bkt);
rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) {
if (he != obj_old) {
@@ -1169,15 +1180,15 @@ static inline int __rhashtable_replace_fast(
rcu_assign_pointer(obj_new->next, obj_old->next);
if (pprev) {
rcu_assign_pointer(*pprev, obj_new);
- rht_unlock(tbl, bkt);
+ rht_unlock(tbl, bkt, flags);
} else {
- rht_assign_unlock(tbl, bkt, obj_new);
+ rht_assign_unlock(tbl, bkt, obj_new, flags);
}
err = 0;
goto unlocked;
}
- rht_unlock(tbl, bkt);
+ rht_unlock(tbl, bkt, flags);
unlocked:
return err;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 4e464a27adaf..4c8492401a10 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1255,6 +1255,7 @@ struct sk_buff *build_skb_around(struct sk_buff *skb,
void skb_attempt_defer_free(struct sk_buff *skb);
struct sk_buff *napi_build_skb(void *data, unsigned int frag_size);
+struct sk_buff *slab_build_skb(void *data);
/**
* alloc_skb - allocate a network buffer
diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
index beb190449704..a0746d4aec20 100644
--- a/include/linux/soc/mediatek/mtk_wed.h
+++ b/include/linux/soc/mediatek/mtk_wed.h
@@ -160,7 +160,7 @@ struct mtk_wed_ops {
int (*tx_ring_setup)(struct mtk_wed_device *dev, int ring,
void __iomem *regs, bool reset);
int (*rx_ring_setup)(struct mtk_wed_device *dev, int ring,
- void __iomem *regs);
+ void __iomem *regs, bool reset);
int (*txfree_ring_setup)(struct mtk_wed_device *dev,
void __iomem *regs);
int (*msg_update)(struct mtk_wed_device *dev, int cmd_id,
@@ -228,8 +228,8 @@ mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
(_dev)->ops->irq_get(_dev, _mask)
#define mtk_wed_device_irq_set_mask(_dev, _mask) \
(_dev)->ops->irq_set_mask(_dev, _mask)
-#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) \
- (_dev)->ops->rx_ring_setup(_dev, _ring, _regs)
+#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs, _reset) \
+ (_dev)->ops->rx_ring_setup(_dev, _ring, _regs, _reset)
#define mtk_wed_device_ppe_check(_dev, _skb, _reason, _hash) \
(_dev)->ops->ppe_check(_dev, _skb, _reason, _hash)
#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) \
@@ -249,7 +249,7 @@ static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
#define mtk_wed_device_reg_write(_dev, _reg, _val) do {} while (0)
#define mtk_wed_device_irq_get(_dev, _mask) 0
#define mtk_wed_device_irq_set_mask(_dev, _mask) do {} while (0)
-#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs) -ENODEV
+#define mtk_wed_device_rx_ring_setup(_dev, _ring, _regs, _reset) -ENODEV
#define mtk_wed_device_ppe_check(_dev, _skb, _reason, _hash) do {} while (0)
#define mtk_wed_device_update_msg(_dev, _id, _msg, _len) -ENODEV
#define mtk_wed_device_stop(_dev) do {} while (0)
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index fb2e88614f5d..83ca2e8eb6b5 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -271,5 +271,6 @@ struct plat_stmmacenet_data {
int msi_tx_base_vec;
bool use_phy_wol;
bool sph_disable;
+ bool serdes_up_after_phy_linkup;
};
#endif
diff --git a/include/net/act_api.h b/include/net/act_api.h
index c94ea1a306e0..2a6f443f0ef6 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -101,11 +101,6 @@ static inline enum flow_action_hw_stats tc_act_hw_stats(u8 hw_stats)
return hw_stats;
}
-#ifdef CONFIG_NET_CLS_ACT
-
-#define ACT_P_CREATED 1
-#define ACT_P_DELETED 1
-
typedef void (*tc_action_priv_destructor)(void *priv);
struct tc_action_ops {
@@ -140,6 +135,11 @@ struct tc_action_ops {
struct netlink_ext_ack *extack);
};
+#ifdef CONFIG_NET_CLS_ACT
+
+#define ACT_P_CREATED 1
+#define ACT_P_DELETED 1
+
struct tc_action_net {
struct tcf_idrinfo *idrinfo;
const struct tc_action_ops *ops;
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index b69ca695935c..d5a5ae926380 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -66,10 +66,10 @@ int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64);
bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *);
u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
-bool rxrpc_kernel_call_is_complete(struct rxrpc_call *);
void rxrpc_kernel_set_max_life(struct socket *, struct rxrpc_call *,
unsigned long);
int rxrpc_sock_set_min_security_level(struct sock *sk, unsigned int val);
+int rxrpc_sock_set_security_keyring(struct sock *, struct key *);
#endif /* _NET_RXRPC_H */
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index e004ba04a9ae..684f1cd28730 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -228,6 +228,17 @@ enum {
*/
HCI_QUIRK_VALID_LE_STATES,
+ /* When this quirk is set, then erroneous data reporting
+ * is ignored. This is mainly due to the fact that the HCI
+ * Read Default Erroneous Data Reporting command is advertised,
+ * but not supported; these controllers often reply with unknown
+ * command and tend to lock up randomly. Needing a hard reset.
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ */
+ HCI_QUIRK_BROKEN_ERR_DATA_REPORTING,
+
/*
* When this quirk is set, then the hci_suspend_notifier is not
* registered. This is intended for devices which drop completely
@@ -1424,7 +1435,6 @@ struct hci_std_codecs_v2 {
} __packed;
struct hci_vnd_codec_v2 {
- __u8 id;
__le16 cid;
__le16 vid;
__u8 transport;
diff --git a/include/net/cfg802154.h b/include/net/cfg802154.h
index e1481f9cf049..d09c393d229f 100644
--- a/include/net/cfg802154.h
+++ b/include/net/cfg802154.h
@@ -260,6 +260,24 @@ struct ieee802154_addr {
};
};
+/**
+ * struct ieee802154_coord_desc - Coordinator descriptor
+ * @addr: PAN ID and coordinator address
+ * @page: page this coordinator is using
+ * @channel: channel this coordinator is using
+ * @superframe_spec: SuperFrame specification as received
+ * @link_quality: link quality indicator at which the beacon was received
+ * @gts_permit: the coordinator accepts GTS requests
+ */
+struct ieee802154_coord_desc {
+ struct ieee802154_addr addr;
+ u8 page;
+ u8 channel;
+ u16 superframe_spec;
+ u8 link_quality;
+ bool gts_permit;
+};
+
struct ieee802154_llsec_key_id {
u8 mode;
u8 id;
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 5f6eca5e4a40..0f376a28b9c4 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -1452,6 +1452,45 @@ struct devlink_ops {
const u8 *hw_addr, int hw_addr_len,
struct netlink_ext_ack *extack);
/**
+ * @port_fn_roce_get: Port function's roce get function.
+ *
+ * Query RoCE state of a function managed by the devlink port.
+ * Return -EOPNOTSUPP if port function RoCE handling is not supported.
+ */
+ int (*port_fn_roce_get)(struct devlink_port *devlink_port,
+ bool *is_enable,
+ struct netlink_ext_ack *extack);
+ /**
+ * @port_fn_roce_set: Port function's roce set function.
+ *
+ * Enable/Disable the RoCE state of a function managed by the devlink
+ * port.
+ * Return -EOPNOTSUPP if port function RoCE handling is not supported.
+ */
+ int (*port_fn_roce_set)(struct devlink_port *devlink_port,
+ bool enable, struct netlink_ext_ack *extack);
+ /**
+ * @port_fn_migratable_get: Port function's migratable get function.
+ *
+ * Query migratable state of a function managed by the devlink port.
+ * Return -EOPNOTSUPP if port function migratable handling is not
+ * supported.
+ */
+ int (*port_fn_migratable_get)(struct devlink_port *devlink_port,
+ bool *is_enable,
+ struct netlink_ext_ack *extack);
+ /**
+ * @port_fn_migratable_set: Port function's migratable set function.
+ *
+ * Enable/Disable migratable state of a function managed by the devlink
+ * port.
+ * Return -EOPNOTSUPP if port function migratable handling is not
+ * supported.
+ */
+ int (*port_fn_migratable_set)(struct devlink_port *devlink_port,
+ bool enable,
+ struct netlink_ext_ack *extack);
+ /**
* port_new() - Add a new port function of a specified flavor
* @devlink: Devlink instance
* @attrs: attributes of the new port
diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h
index 28d0687bf7da..d80c78506f19 100644
--- a/include/net/mana/gdma.h
+++ b/include/net/mana/gdma.h
@@ -522,7 +522,14 @@ enum {
#define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT BIT(0)
-#define GDMA_DRV_CAP_FLAGS1 GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT
+/* Advertise to the NIC firmware: the NAPI work_done variable race is fixed,
+ * so the driver is able to reliably support features like busy_poll.
+ */
+#define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2)
+
+#define GDMA_DRV_CAP_FLAGS1 \
+ (GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \
+ GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX)
#define GDMA_DRV_CAP_FLAGS2 0
diff --git a/include/net/nl802154.h b/include/net/nl802154.h
index f5850b569c52..b79a89d5207c 100644
--- a/include/net/nl802154.h
+++ b/include/net/nl802154.h
@@ -72,6 +72,8 @@ enum nl802154_commands {
NL802154_CMD_NEW_SEC_LEVEL,
NL802154_CMD_DEL_SEC_LEVEL,
+ NL802154_CMD_SCAN_EVENT,
+
/* add new commands above here */
/* used to define NL802154_CMD_MAX below */
@@ -131,6 +133,8 @@ enum nl802154_attrs {
NL802154_ATTR_PID,
NL802154_ATTR_NETNS_FD,
+ NL802154_ATTR_COORDINATOR,
+
/* add attributes here, update the policy in nl802154.c */
#ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL
@@ -217,6 +221,45 @@ enum nl802154_wpan_phy_capability_attr {
};
/**
+ * enum nl802154_coord - Netlink attributes for a coord
+ *
+ * @__NL802154_COORD_INVALID: invalid
+ * @NL802154_COORD_PANID: PANID of the coordinator (2 bytes)
+ * @NL802154_COORD_ADDR: coordinator address, (8 bytes or 2 bytes)
+ * @NL802154_COORD_CHANNEL: channel number, related to @NL802154_COORD_PAGE (u8)
+ * @NL802154_COORD_PAGE: channel page, related to @NL802154_COORD_CHANNEL (u8)
+ * @NL802154_COORD_PREAMBLE_CODE: Preamble code used when the beacon was received,
+ * this is PHY dependent and optional (u8)
+ * @NL802154_COORD_MEAN_PRF: Mean PRF used when the beacon was received,
+ * this is PHY dependent and optional (u8)
+ * @NL802154_COORD_SUPERFRAME_SPEC: superframe specification of the PAN (u16)
+ * @NL802154_COORD_LINK_QUALITY: signal quality of beacon in unspecified units,
+ * scaled to 0..255 (u8)
+ * @NL802154_COORD_GTS_PERMIT: set to true if GTS is permitted on this PAN
+ * @NL802154_COORD_PAYLOAD_DATA: binary data containing the raw data from the
+ * frame payload, (only if beacon or probe response had data)
+ * @NL802154_COORD_PAD: attribute used for padding for 64-bit alignment
+ * @NL802154_COORD_MAX: highest coordinator attribute
+ */
+enum nl802154_coord {
+ __NL802154_COORD_INVALID,
+ NL802154_COORD_PANID,
+ NL802154_COORD_ADDR,
+ NL802154_COORD_CHANNEL,
+ NL802154_COORD_PAGE,
+ NL802154_COORD_PREAMBLE_CODE,
+ NL802154_COORD_MEAN_PRF,
+ NL802154_COORD_SUPERFRAME_SPEC,
+ NL802154_COORD_LINK_QUALITY,
+ NL802154_COORD_GTS_PERMIT,
+ NL802154_COORD_PAYLOAD_DATA,
+ NL802154_COORD_PAD,
+
+ /* keep last */
+ NL802154_COORD_MAX,
+};
+
+/**
* enum nl802154_cca_modes - cca modes
*
* @__NL802154_CCA_INVALID: cca mode number 0 is reserved
diff --git a/include/net/ping.h b/include/net/ping.h
index e4ff3911cbf5..9233ad3de0ad 100644
--- a/include/net/ping.h
+++ b/include/net/ping.h
@@ -16,9 +16,6 @@
#define PING_HTABLE_SIZE 64
#define PING_HTABLE_MASK (PING_HTABLE_SIZE-1)
-#define ping_portaddr_for_each_entry(__sk, node, list) \
- hlist_nulls_for_each_entry(__sk, node, list, sk_nulls_node)
-
/*
* gid_t is either uint or ushort. We want to pass it to
* proc_dointvec_minmax(), so it must not be larger than MAX_INT
diff --git a/include/net/sock.h b/include/net/sock.h
index 6d207e7c4ad0..ecea3dcc2217 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -503,10 +503,10 @@ struct sock {
#if BITS_PER_LONG==32
seqlock_t sk_stamp_seq;
#endif
- u16 sk_tsflags;
- u8 sk_shutdown;
atomic_t sk_tskey;
atomic_t sk_zckey;
+ u32 sk_tsflags;
+ u8 sk_shutdown;
u8 sk_clockid;
u8 sk_txtime_deadline_mode : 1,
@@ -1899,7 +1899,7 @@ static inline void sock_replace_proto(struct sock *sk, struct proto *proto)
struct sockcm_cookie {
u64 transmit_time;
u32 mark;
- u16 tsflags;
+ u32 tsflags;
};
static inline void sockcm_init(struct sockcm_cookie *sockc,
diff --git a/include/net/tc_wrapper.h b/include/net/tc_wrapper.h
new file mode 100644
index 000000000000..ceed2fc089ff
--- /dev/null
+++ b/include/net/tc_wrapper.h
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __NET_TC_WRAPPER_H
+#define __NET_TC_WRAPPER_H
+
+#include <net/pkt_cls.h>
+
+#if IS_ENABLED(CONFIG_RETPOLINE)
+
+#include <linux/cpufeature.h>
+#include <linux/static_key.h>
+#include <linux/indirect_call_wrapper.h>
+
+#define TC_INDIRECT_SCOPE
+
+extern struct static_key_false tc_skip_wrapper;
+
+/* TC Actions */
+#ifdef CONFIG_NET_CLS_ACT
+
+#define TC_INDIRECT_ACTION_DECLARE(fname) \
+ INDIRECT_CALLABLE_DECLARE(int fname(struct sk_buff *skb, \
+ const struct tc_action *a, \
+ struct tcf_result *res))
+
+TC_INDIRECT_ACTION_DECLARE(tcf_bpf_act);
+TC_INDIRECT_ACTION_DECLARE(tcf_connmark_act);
+TC_INDIRECT_ACTION_DECLARE(tcf_csum_act);
+TC_INDIRECT_ACTION_DECLARE(tcf_ct_act);
+TC_INDIRECT_ACTION_DECLARE(tcf_ctinfo_act);
+TC_INDIRECT_ACTION_DECLARE(tcf_gact_act);
+TC_INDIRECT_ACTION_DECLARE(tcf_gate_act);
+TC_INDIRECT_ACTION_DECLARE(tcf_ife_act);
+TC_INDIRECT_ACTION_DECLARE(tcf_ipt_act);
+TC_INDIRECT_ACTION_DECLARE(tcf_mirred_act);
+TC_INDIRECT_ACTION_DECLARE(tcf_mpls_act);
+TC_INDIRECT_ACTION_DECLARE(tcf_nat_act);
+TC_INDIRECT_ACTION_DECLARE(tcf_pedit_act);
+TC_INDIRECT_ACTION_DECLARE(tcf_police_act);
+TC_INDIRECT_ACTION_DECLARE(tcf_sample_act);
+TC_INDIRECT_ACTION_DECLARE(tcf_simp_act);
+TC_INDIRECT_ACTION_DECLARE(tcf_skbedit_act);
+TC_INDIRECT_ACTION_DECLARE(tcf_skbmod_act);
+TC_INDIRECT_ACTION_DECLARE(tcf_vlan_act);
+TC_INDIRECT_ACTION_DECLARE(tunnel_key_act);
+
+static inline int tc_act(struct sk_buff *skb, const struct tc_action *a,
+ struct tcf_result *res)
+{
+ if (static_branch_likely(&tc_skip_wrapper))
+ goto skip;
+
+#if IS_BUILTIN(CONFIG_NET_ACT_GACT)
+ if (a->ops->act == tcf_gact_act)
+ return tcf_gact_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_MIRRED)
+ if (a->ops->act == tcf_mirred_act)
+ return tcf_mirred_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_PEDIT)
+ if (a->ops->act == tcf_pedit_act)
+ return tcf_pedit_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_SKBEDIT)
+ if (a->ops->act == tcf_skbedit_act)
+ return tcf_skbedit_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_SKBMOD)
+ if (a->ops->act == tcf_skbmod_act)
+ return tcf_skbmod_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_POLICE)
+ if (a->ops->act == tcf_police_act)
+ return tcf_police_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_BPF)
+ if (a->ops->act == tcf_bpf_act)
+ return tcf_bpf_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_CONNMARK)
+ if (a->ops->act == tcf_connmark_act)
+ return tcf_connmark_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_CSUM)
+ if (a->ops->act == tcf_csum_act)
+ return tcf_csum_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_CT)
+ if (a->ops->act == tcf_ct_act)
+ return tcf_ct_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_CTINFO)
+ if (a->ops->act == tcf_ctinfo_act)
+ return tcf_ctinfo_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_GATE)
+ if (a->ops->act == tcf_gate_act)
+ return tcf_gate_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_MPLS)
+ if (a->ops->act == tcf_mpls_act)
+ return tcf_mpls_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_NAT)
+ if (a->ops->act == tcf_nat_act)
+ return tcf_nat_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_TUNNEL_KEY)
+ if (a->ops->act == tunnel_key_act)
+ return tunnel_key_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_VLAN)
+ if (a->ops->act == tcf_vlan_act)
+ return tcf_vlan_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_IFE)
+ if (a->ops->act == tcf_ife_act)
+ return tcf_ife_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_IPT)
+ if (a->ops->act == tcf_ipt_act)
+ return tcf_ipt_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_SIMP)
+ if (a->ops->act == tcf_simp_act)
+ return tcf_simp_act(skb, a, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_ACT_SAMPLE)
+ if (a->ops->act == tcf_sample_act)
+ return tcf_sample_act(skb, a, res);
+#endif
+
+skip:
+ return a->ops->act(skb, a, res);
+}
+
+#endif /* CONFIG_NET_CLS_ACT */
+
+/* TC Filters */
+#ifdef CONFIG_NET_CLS
+
+#define TC_INDIRECT_FILTER_DECLARE(fname) \
+ INDIRECT_CALLABLE_DECLARE(int fname(struct sk_buff *skb, \
+ const struct tcf_proto *tp, \
+ struct tcf_result *res))
+
+TC_INDIRECT_FILTER_DECLARE(basic_classify);
+TC_INDIRECT_FILTER_DECLARE(cls_bpf_classify);
+TC_INDIRECT_FILTER_DECLARE(cls_cgroup_classify);
+TC_INDIRECT_FILTER_DECLARE(fl_classify);
+TC_INDIRECT_FILTER_DECLARE(flow_classify);
+TC_INDIRECT_FILTER_DECLARE(fw_classify);
+TC_INDIRECT_FILTER_DECLARE(mall_classify);
+TC_INDIRECT_FILTER_DECLARE(route4_classify);
+TC_INDIRECT_FILTER_DECLARE(rsvp_classify);
+TC_INDIRECT_FILTER_DECLARE(rsvp6_classify);
+TC_INDIRECT_FILTER_DECLARE(tcindex_classify);
+TC_INDIRECT_FILTER_DECLARE(u32_classify);
+
+static inline int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+ struct tcf_result *res)
+{
+ if (static_branch_likely(&tc_skip_wrapper))
+ goto skip;
+
+#if IS_BUILTIN(CONFIG_NET_CLS_BPF)
+ if (tp->classify == cls_bpf_classify)
+ return cls_bpf_classify(skb, tp, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_CLS_U32)
+ if (tp->classify == u32_classify)
+ return u32_classify(skb, tp, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_CLS_FLOWER)
+ if (tp->classify == fl_classify)
+ return fl_classify(skb, tp, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_CLS_FW)
+ if (tp->classify == fw_classify)
+ return fw_classify(skb, tp, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_CLS_MATCHALL)
+ if (tp->classify == mall_classify)
+ return mall_classify(skb, tp, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_CLS_BASIC)
+ if (tp->classify == basic_classify)
+ return basic_classify(skb, tp, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_CLS_CGROUP)
+ if (tp->classify == cls_cgroup_classify)
+ return cls_cgroup_classify(skb, tp, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_CLS_FLOW)
+ if (tp->classify == flow_classify)
+ return flow_classify(skb, tp, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_CLS_ROUTE4)
+ if (tp->classify == route4_classify)
+ return route4_classify(skb, tp, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_CLS_RSVP)
+ if (tp->classify == rsvp_classify)
+ return rsvp_classify(skb, tp, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_CLS_RSVP6)
+ if (tp->classify == rsvp6_classify)
+ return rsvp6_classify(skb, tp, res);
+#endif
+#if IS_BUILTIN(CONFIG_NET_CLS_TCINDEX)
+ if (tp->classify == tcindex_classify)
+ return tcindex_classify(skb, tp, res);
+#endif
+
+skip:
+ return tp->classify(skb, tp, res);
+}
+
+static inline void tc_wrapper_init(void)
+{
+#ifdef CONFIG_X86
+ if (!cpu_feature_enabled(X86_FEATURE_RETPOLINE))
+ static_branch_enable(&tc_skip_wrapper);
+#endif
+}
+
+#endif /* CONFIG_NET_CLS */
+
+#else
+
+#define TC_INDIRECT_SCOPE static
+
+static inline int tc_act(struct sk_buff *skb, const struct tc_action *a,
+ struct tcf_result *res)
+{
+ return a->ops->act(skb, a, res);
+}
+
+static inline int tc_classify(struct sk_buff *skb, const struct tcf_proto *tp,
+ struct tcf_result *res)
+{
+ return tp->classify(skb, tp, res);
+}
+
+static inline void tc_wrapper_init(void)
+{
+}
+
+#endif
+
+#endif /* __NET_TC_WRAPPER_H */
diff --git a/include/trace/events/fscache.h b/include/trace/events/fscache.h
index c078c48a8e6d..a6190aa1b406 100644
--- a/include/trace/events/fscache.h
+++ b/include/trace/events/fscache.h
@@ -66,6 +66,7 @@ enum fscache_cookie_trace {
fscache_cookie_put_work,
fscache_cookie_see_active,
fscache_cookie_see_lru_discard,
+ fscache_cookie_see_lru_discard_clear,
fscache_cookie_see_lru_do_one,
fscache_cookie_see_relinquish,
fscache_cookie_see_withdraw,
@@ -149,6 +150,7 @@ enum fscache_access_trace {
EM(fscache_cookie_put_work, "PQ work ") \
EM(fscache_cookie_see_active, "- activ") \
EM(fscache_cookie_see_lru_discard, "- x-lru") \
+ EM(fscache_cookie_see_lru_discard_clear,"- lrudc") \
EM(fscache_cookie_see_lru_do_one, "- lrudo") \
EM(fscache_cookie_see_relinquish, "- x-rlq") \
EM(fscache_cookie_see_withdraw, "- x-wth") \
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index b9886d1df825..049b52e7aa6a 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -16,44 +16,121 @@
/*
* Declare tracing information enums and their string mappings for display.
*/
+#define rxrpc_call_poke_traces \
+ EM(rxrpc_call_poke_error, "Error") \
+ EM(rxrpc_call_poke_idle, "Idle") \
+ EM(rxrpc_call_poke_start, "Start") \
+ EM(rxrpc_call_poke_timer, "Timer") \
+ E_(rxrpc_call_poke_timer_now, "Timer-now")
+
#define rxrpc_skb_traces \
- EM(rxrpc_skb_ack, "ACK") \
- EM(rxrpc_skb_cleaned, "CLN") \
- EM(rxrpc_skb_cloned_jumbo, "CLJ") \
- EM(rxrpc_skb_freed, "FRE") \
- EM(rxrpc_skb_got, "GOT") \
- EM(rxrpc_skb_lost, "*L*") \
- EM(rxrpc_skb_new, "NEW") \
- EM(rxrpc_skb_purged, "PUR") \
- EM(rxrpc_skb_received, "RCV") \
- EM(rxrpc_skb_rotated, "ROT") \
- EM(rxrpc_skb_seen, "SEE") \
- EM(rxrpc_skb_unshared, "UNS") \
- E_(rxrpc_skb_unshared_nomem, "US0")
+ EM(rxrpc_skb_eaten_by_unshare, "ETN unshare ") \
+ EM(rxrpc_skb_eaten_by_unshare_nomem, "ETN unshar-nm") \
+ EM(rxrpc_skb_get_conn_work, "GET conn-work") \
+ EM(rxrpc_skb_get_local_work, "GET locl-work") \
+ EM(rxrpc_skb_get_reject_work, "GET rej-work ") \
+ EM(rxrpc_skb_get_to_recvmsg, "GET to-recv ") \
+ EM(rxrpc_skb_get_to_recvmsg_oos, "GET to-recv-o") \
+ EM(rxrpc_skb_new_encap_rcv, "NEW encap-rcv") \
+ EM(rxrpc_skb_new_error_report, "NEW error-rpt") \
+ EM(rxrpc_skb_new_jumbo_subpacket, "NEW jumbo-sub") \
+ EM(rxrpc_skb_new_unshared, "NEW unshared ") \
+ EM(rxrpc_skb_put_conn_work, "PUT conn-work") \
+ EM(rxrpc_skb_put_error_report, "PUT error-rep") \
+ EM(rxrpc_skb_put_input, "PUT input ") \
+ EM(rxrpc_skb_put_jumbo_subpacket, "PUT jumbo-sub") \
+ EM(rxrpc_skb_put_purge, "PUT purge ") \
+ EM(rxrpc_skb_put_rotate, "PUT rotate ") \
+ EM(rxrpc_skb_put_unknown, "PUT unknown ") \
+ EM(rxrpc_skb_see_conn_work, "SEE conn-work") \
+ EM(rxrpc_skb_see_recvmsg, "SEE recvmsg ") \
+ EM(rxrpc_skb_see_reject, "SEE reject ") \
+ EM(rxrpc_skb_see_rotate, "SEE rotate ") \
+ E_(rxrpc_skb_see_version, "SEE version ")
#define rxrpc_local_traces \
- EM(rxrpc_local_got, "GOT") \
- EM(rxrpc_local_new, "NEW") \
- EM(rxrpc_local_processing, "PRO") \
- EM(rxrpc_local_put, "PUT") \
- EM(rxrpc_local_queued, "QUE") \
- E_(rxrpc_local_tx_ack, "TAK")
+ EM(rxrpc_local_free, "FREE ") \
+ EM(rxrpc_local_get_call, "GET call ") \
+ EM(rxrpc_local_get_client_conn, "GET conn-cln") \
+ EM(rxrpc_local_get_for_use, "GET for-use ") \
+ EM(rxrpc_local_get_peer, "GET peer ") \
+ EM(rxrpc_local_get_prealloc_conn, "GET conn-pre") \
+ EM(rxrpc_local_new, "NEW ") \
+ EM(rxrpc_local_put_bind, "PUT bind ") \
+ EM(rxrpc_local_put_call, "PUT call ") \
+ EM(rxrpc_local_put_for_use, "PUT for-use ") \
+ EM(rxrpc_local_put_kill_conn, "PUT conn-kil") \
+ EM(rxrpc_local_put_peer, "PUT peer ") \
+ EM(rxrpc_local_put_prealloc_conn, "PUT conn-pre") \
+ EM(rxrpc_local_put_release_sock, "PUT rel-sock") \
+ EM(rxrpc_local_stop, "STOP ") \
+ EM(rxrpc_local_stopped, "STOPPED ") \
+ EM(rxrpc_local_unuse_bind, "UNU bind ") \
+ EM(rxrpc_local_unuse_conn_work, "UNU conn-wrk") \
+ EM(rxrpc_local_unuse_peer_keepalive, "UNU peer-kpa") \
+ EM(rxrpc_local_unuse_release_sock, "UNU rel-sock") \
+ EM(rxrpc_local_use_conn_work, "USE conn-wrk") \
+ EM(rxrpc_local_use_lookup, "USE lookup ") \
+ E_(rxrpc_local_use_peer_keepalive, "USE peer-kpa")
#define rxrpc_peer_traces \
- EM(rxrpc_peer_got, "GOT") \
- EM(rxrpc_peer_new, "NEW") \
- EM(rxrpc_peer_processing, "PRO") \
- E_(rxrpc_peer_put, "PUT")
+ EM(rxrpc_peer_free, "FREE ") \
+ EM(rxrpc_peer_get_accept, "GET accept ") \
+ EM(rxrpc_peer_get_activate_call, "GET act-call") \
+ EM(rxrpc_peer_get_bundle, "GET bundle ") \
+ EM(rxrpc_peer_get_client_conn, "GET cln-conn") \
+ EM(rxrpc_peer_get_input, "GET input ") \
+ EM(rxrpc_peer_get_input_error, "GET inpt-err") \
+ EM(rxrpc_peer_get_keepalive, "GET keepaliv") \
+ EM(rxrpc_peer_get_lookup_client, "GET look-cln") \
+ EM(rxrpc_peer_get_service_conn, "GET srv-conn") \
+ EM(rxrpc_peer_new_client, "NEW client ") \
+ EM(rxrpc_peer_new_prealloc, "NEW prealloc") \
+ EM(rxrpc_peer_put_bundle, "PUT bundle ") \
+ EM(rxrpc_peer_put_call, "PUT call ") \
+ EM(rxrpc_peer_put_conn, "PUT conn ") \
+ EM(rxrpc_peer_put_discard_tmp, "PUT disc-tmp") \
+ EM(rxrpc_peer_put_input, "PUT input ") \
+ EM(rxrpc_peer_put_input_error, "PUT inpt-err") \
+ E_(rxrpc_peer_put_keepalive, "PUT keepaliv")
+
+#define rxrpc_bundle_traces \
+ EM(rxrpc_bundle_free, "FREE ") \
+ EM(rxrpc_bundle_get_client_call, "GET clt-call") \
+ EM(rxrpc_bundle_get_client_conn, "GET clt-conn") \
+ EM(rxrpc_bundle_get_service_conn, "GET svc-conn") \
+ EM(rxrpc_bundle_put_conn, "PUT conn ") \
+ EM(rxrpc_bundle_put_discard, "PUT discard ") \
+ E_(rxrpc_bundle_new, "NEW ")
#define rxrpc_conn_traces \
- EM(rxrpc_conn_got, "GOT") \
- EM(rxrpc_conn_new_client, "NWc") \
- EM(rxrpc_conn_new_service, "NWs") \
- EM(rxrpc_conn_put_client, "PTc") \
- EM(rxrpc_conn_put_service, "PTs") \
- EM(rxrpc_conn_queued, "QUE") \
- EM(rxrpc_conn_reap_service, "RPs") \
- E_(rxrpc_conn_seen, "SEE")
+ EM(rxrpc_conn_free, "FREE ") \
+ EM(rxrpc_conn_get_activate_call, "GET act-call") \
+ EM(rxrpc_conn_get_call_input, "GET inp-call") \
+ EM(rxrpc_conn_get_conn_input, "GET inp-conn") \
+ EM(rxrpc_conn_get_idle, "GET idle ") \
+ EM(rxrpc_conn_get_poke, "GET poke ") \
+ EM(rxrpc_conn_get_service_conn, "GET svc-conn") \
+ EM(rxrpc_conn_new_client, "NEW client ") \
+ EM(rxrpc_conn_new_service, "NEW service ") \
+ EM(rxrpc_conn_put_call, "PUT call ") \
+ EM(rxrpc_conn_put_call_input, "PUT inp-call") \
+ EM(rxrpc_conn_put_conn_input, "PUT inp-conn") \
+ EM(rxrpc_conn_put_discard, "PUT discard ") \
+ EM(rxrpc_conn_put_discard_idle, "PUT disc-idl") \
+ EM(rxrpc_conn_put_local_dead, "PUT loc-dead") \
+ EM(rxrpc_conn_put_noreuse, "PUT noreuse ") \
+ EM(rxrpc_conn_put_poke, "PUT poke ") \
+ EM(rxrpc_conn_put_service_reaped, "PUT svc-reap") \
+ EM(rxrpc_conn_put_unbundle, "PUT unbundle") \
+ EM(rxrpc_conn_put_unidle, "PUT unidle ") \
+ EM(rxrpc_conn_queue_challenge, "QUE chall ") \
+ EM(rxrpc_conn_queue_retry_work, "QUE retry-wk") \
+ EM(rxrpc_conn_queue_rx_work, "QUE rx-work ") \
+ EM(rxrpc_conn_queue_timer, "QUE timer ") \
+ EM(rxrpc_conn_see_new_service_conn, "SEE new-svc ") \
+ EM(rxrpc_conn_see_reap_service, "SEE reap-svc") \
+ E_(rxrpc_conn_see_work, "SEE work ")
#define rxrpc_client_traces \
EM(rxrpc_client_activate_chans, "Activa") \
@@ -71,26 +148,36 @@
E_(rxrpc_client_to_idle, "->Idle")
#define rxrpc_call_traces \
- EM(rxrpc_call_connected, "CON") \
- EM(rxrpc_call_error, "*E*") \
- EM(rxrpc_call_got, "GOT") \
- EM(rxrpc_call_got_kernel, "Gke") \
- EM(rxrpc_call_got_timer, "GTM") \
- EM(rxrpc_call_got_tx, "Gtx") \
- EM(rxrpc_call_got_userid, "Gus") \
- EM(rxrpc_call_new_client, "NWc") \
- EM(rxrpc_call_new_service, "NWs") \
- EM(rxrpc_call_put, "PUT") \
- EM(rxrpc_call_put_kernel, "Pke") \
- EM(rxrpc_call_put_noqueue, "PnQ") \
- EM(rxrpc_call_put_notimer, "PnT") \
- EM(rxrpc_call_put_timer, "PTM") \
- EM(rxrpc_call_put_tx, "Ptx") \
- EM(rxrpc_call_put_userid, "Pus") \
- EM(rxrpc_call_queued, "QUE") \
- EM(rxrpc_call_queued_ref, "QUR") \
- EM(rxrpc_call_release, "RLS") \
- E_(rxrpc_call_seen, "SEE")
+ EM(rxrpc_call_get_input, "GET input ") \
+ EM(rxrpc_call_get_kernel_service, "GET krnl-srv") \
+ EM(rxrpc_call_get_notify_socket, "GET notify ") \
+ EM(rxrpc_call_get_poke, "GET poke ") \
+ EM(rxrpc_call_get_recvmsg, "GET recvmsg ") \
+ EM(rxrpc_call_get_release_sock, "GET rel-sock") \
+ EM(rxrpc_call_get_sendmsg, "GET sendmsg ") \
+ EM(rxrpc_call_get_userid, "GET user-id ") \
+ EM(rxrpc_call_new_client, "NEW client ") \
+ EM(rxrpc_call_new_prealloc_service, "NEW prealloc") \
+ EM(rxrpc_call_put_discard_prealloc, "PUT disc-pre") \
+ EM(rxrpc_call_put_discard_error, "PUT disc-err") \
+ EM(rxrpc_call_put_input, "PUT input ") \
+ EM(rxrpc_call_put_kernel, "PUT kernel ") \
+ EM(rxrpc_call_put_poke, "PUT poke ") \
+ EM(rxrpc_call_put_recvmsg, "PUT recvmsg ") \
+ EM(rxrpc_call_put_release_sock, "PUT rls-sock") \
+ EM(rxrpc_call_put_release_sock_tba, "PUT rls-sk-a") \
+ EM(rxrpc_call_put_sendmsg, "PUT sendmsg ") \
+ EM(rxrpc_call_put_unnotify, "PUT unnotify") \
+ EM(rxrpc_call_put_userid_exists, "PUT u-exists") \
+ EM(rxrpc_call_see_accept, "SEE accept ") \
+ EM(rxrpc_call_see_activate_client, "SEE act-clnt") \
+ EM(rxrpc_call_see_connect_failed, "SEE con-fail") \
+ EM(rxrpc_call_see_connected, "SEE connect ") \
+ EM(rxrpc_call_see_distribute_error, "SEE dist-err") \
+ EM(rxrpc_call_see_input, "SEE input ") \
+ EM(rxrpc_call_see_release, "SEE release ") \
+ EM(rxrpc_call_see_userid_exists, "SEE u-exists") \
+ E_(rxrpc_call_see_zap, "SEE zap ")
#define rxrpc_txqueue_traces \
EM(rxrpc_txqueue_await_reply, "AWR") \
@@ -179,6 +266,7 @@
EM(rxrpc_propose_ack_respond_to_ping, "Rsp2Png") \
EM(rxrpc_propose_ack_retry_tx, "RetryTx") \
EM(rxrpc_propose_ack_rotate_rx, "RxAck ") \
+ EM(rxrpc_propose_ack_rx_idle, "RxIdle ") \
E_(rxrpc_propose_ack_terminal_ack, "ClTerm ")
#define rxrpc_congest_modes \
@@ -273,6 +361,7 @@
EM(rxrpc_txbuf_put_rotated, "PUT ROTATED") \
EM(rxrpc_txbuf_put_send_aborted, "PUT SEND-X ") \
EM(rxrpc_txbuf_put_trans, "PUT TRANS ") \
+ EM(rxrpc_txbuf_see_out_of_step, "OUT-OF-STEP") \
EM(rxrpc_txbuf_see_send_more, "SEE SEND+ ") \
E_(rxrpc_txbuf_see_unacked, "SEE UNACKED")
@@ -287,6 +376,8 @@
#define EM(a, b) a,
#define E_(a, b) a
+enum rxrpc_bundle_trace { rxrpc_bundle_traces } __mode(byte);
+enum rxrpc_call_poke_trace { rxrpc_call_poke_traces } __mode(byte);
enum rxrpc_call_trace { rxrpc_call_traces } __mode(byte);
enum rxrpc_client_trace { rxrpc_client_traces } __mode(byte);
enum rxrpc_congest_change { rxrpc_congest_changes } __mode(byte);
@@ -316,6 +407,8 @@ enum rxrpc_txqueue_trace { rxrpc_txqueue_traces } __mode(byte);
#define EM(a, b) TRACE_DEFINE_ENUM(a);
#define E_(a, b) TRACE_DEFINE_ENUM(a);
+rxrpc_bundle_traces;
+rxrpc_call_poke_traces;
rxrpc_call_traces;
rxrpc_client_traces;
rxrpc_congest_changes;
@@ -345,83 +438,98 @@ rxrpc_txqueue_traces;
TRACE_EVENT(rxrpc_local,
TP_PROTO(unsigned int local_debug_id, enum rxrpc_local_trace op,
- int usage, const void *where),
+ int ref, int usage),
- TP_ARGS(local_debug_id, op, usage, where),
+ TP_ARGS(local_debug_id, op, ref, usage),
TP_STRUCT__entry(
__field(unsigned int, local )
__field(int, op )
+ __field(int, ref )
__field(int, usage )
- __field(const void *, where )
),
TP_fast_assign(
__entry->local = local_debug_id;
__entry->op = op;
+ __entry->ref = ref;
__entry->usage = usage;
- __entry->where = where;
),
- TP_printk("L=%08x %s u=%d sp=%pSR",
+ TP_printk("L=%08x %s r=%d u=%d",
__entry->local,
__print_symbolic(__entry->op, rxrpc_local_traces),
- __entry->usage,
- __entry->where)
+ __entry->ref,
+ __entry->usage)
);
TRACE_EVENT(rxrpc_peer,
- TP_PROTO(unsigned int peer_debug_id, enum rxrpc_peer_trace op,
- int usage, const void *where),
+ TP_PROTO(unsigned int peer_debug_id, int ref, enum rxrpc_peer_trace why),
- TP_ARGS(peer_debug_id, op, usage, where),
+ TP_ARGS(peer_debug_id, ref, why),
TP_STRUCT__entry(
__field(unsigned int, peer )
- __field(int, op )
- __field(int, usage )
- __field(const void *, where )
+ __field(int, ref )
+ __field(int, why )
),
TP_fast_assign(
__entry->peer = peer_debug_id;
- __entry->op = op;
- __entry->usage = usage;
- __entry->where = where;
+ __entry->ref = ref;
+ __entry->why = why;
),
- TP_printk("P=%08x %s u=%d sp=%pSR",
+ TP_printk("P=%08x %s r=%d",
__entry->peer,
- __print_symbolic(__entry->op, rxrpc_peer_traces),
- __entry->usage,
- __entry->where)
+ __print_symbolic(__entry->why, rxrpc_peer_traces),
+ __entry->ref)
+ );
+
+TRACE_EVENT(rxrpc_bundle,
+ TP_PROTO(unsigned int bundle_debug_id, int ref, enum rxrpc_bundle_trace why),
+
+ TP_ARGS(bundle_debug_id, ref, why),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, bundle )
+ __field(int, ref )
+ __field(int, why )
+ ),
+
+ TP_fast_assign(
+ __entry->bundle = bundle_debug_id;
+ __entry->ref = ref;
+ __entry->why = why;
+ ),
+
+ TP_printk("CB=%08x %s r=%d",
+ __entry->bundle,
+ __print_symbolic(__entry->why, rxrpc_bundle_traces),
+ __entry->ref)
);
TRACE_EVENT(rxrpc_conn,
- TP_PROTO(unsigned int conn_debug_id, enum rxrpc_conn_trace op,
- int usage, const void *where),
+ TP_PROTO(unsigned int conn_debug_id, int ref, enum rxrpc_conn_trace why),
- TP_ARGS(conn_debug_id, op, usage, where),
+ TP_ARGS(conn_debug_id, ref, why),
TP_STRUCT__entry(
__field(unsigned int, conn )
- __field(int, op )
- __field(int, usage )
- __field(const void *, where )
+ __field(int, ref )
+ __field(int, why )
),
TP_fast_assign(
__entry->conn = conn_debug_id;
- __entry->op = op;
- __entry->usage = usage;
- __entry->where = where;
+ __entry->ref = ref;
+ __entry->why = why;
),
- TP_printk("C=%08x %s u=%d sp=%pSR",
+ TP_printk("C=%08x %s r=%d",
__entry->conn,
- __print_symbolic(__entry->op, rxrpc_conn_traces),
- __entry->usage,
- __entry->where)
+ __print_symbolic(__entry->why, rxrpc_conn_traces),
+ __entry->ref)
);
TRACE_EVENT(rxrpc_client,
@@ -455,63 +563,57 @@ TRACE_EVENT(rxrpc_client,
);
TRACE_EVENT(rxrpc_call,
- TP_PROTO(unsigned int call_debug_id, enum rxrpc_call_trace op,
- int usage, const void *where, const void *aux),
+ TP_PROTO(unsigned int call_debug_id, int ref, unsigned long aux,
+ enum rxrpc_call_trace why),
- TP_ARGS(call_debug_id, op, usage, where, aux),
+ TP_ARGS(call_debug_id, ref, aux, why),
TP_STRUCT__entry(
__field(unsigned int, call )
- __field(int, op )
- __field(int, usage )
- __field(const void *, where )
- __field(const void *, aux )
+ __field(int, ref )
+ __field(int, why )
+ __field(unsigned long, aux )
),
TP_fast_assign(
__entry->call = call_debug_id;
- __entry->op = op;
- __entry->usage = usage;
- __entry->where = where;
+ __entry->ref = ref;
+ __entry->why = why;
__entry->aux = aux;
),
- TP_printk("c=%08x %s u=%d sp=%pSR a=%p",
+ TP_printk("c=%08x %s r=%d a=%lx",
__entry->call,
- __print_symbolic(__entry->op, rxrpc_call_traces),
- __entry->usage,
- __entry->where,
+ __print_symbolic(__entry->why, rxrpc_call_traces),
+ __entry->ref,
__entry->aux)
);
TRACE_EVENT(rxrpc_skb,
- TP_PROTO(struct sk_buff *skb, enum rxrpc_skb_trace op,
- int usage, int mod_count, const void *where),
+ TP_PROTO(struct sk_buff *skb, int usage, int mod_count,
+ enum rxrpc_skb_trace why),
- TP_ARGS(skb, op, usage, mod_count, where),
+ TP_ARGS(skb, usage, mod_count, why),
TP_STRUCT__entry(
__field(struct sk_buff *, skb )
- __field(enum rxrpc_skb_trace, op )
__field(int, usage )
__field(int, mod_count )
- __field(const void *, where )
+ __field(enum rxrpc_skb_trace, why )
),
TP_fast_assign(
__entry->skb = skb;
- __entry->op = op;
__entry->usage = usage;
__entry->mod_count = mod_count;
- __entry->where = where;
+ __entry->why = why;
),
- TP_printk("s=%p Rx %s u=%d m=%d p=%pSR",
+ TP_printk("s=%p Rx %s u=%d m=%d",
__entry->skb,
- __print_symbolic(__entry->op, rxrpc_skb_traces),
+ __print_symbolic(__entry->why, rxrpc_skb_traces),
__entry->usage,
- __entry->mod_count,
- __entry->where)
+ __entry->mod_count)
);
TRACE_EVENT(rxrpc_rx_packet,
@@ -623,6 +725,7 @@ TRACE_EVENT(rxrpc_txqueue,
__field(rxrpc_seq_t, acks_hard_ack )
__field(rxrpc_seq_t, tx_bottom )
__field(rxrpc_seq_t, tx_top )
+ __field(rxrpc_seq_t, tx_prepared )
__field(int, tx_winsize )
),
@@ -632,16 +735,18 @@ TRACE_EVENT(rxrpc_txqueue,
__entry->acks_hard_ack = call->acks_hard_ack;
__entry->tx_bottom = call->tx_bottom;
__entry->tx_top = call->tx_top;
+ __entry->tx_prepared = call->tx_prepared;
__entry->tx_winsize = call->tx_winsize;
),
- TP_printk("c=%08x %s f=%08x h=%08x n=%u/%u/%u",
+ TP_printk("c=%08x %s f=%08x h=%08x n=%u/%u/%u/%u",
__entry->call,
__print_symbolic(__entry->why, rxrpc_txqueue_traces),
__entry->tx_bottom,
__entry->acks_hard_ack,
__entry->tx_top - __entry->tx_bottom,
__entry->tx_top - __entry->acks_hard_ack,
+ __entry->tx_prepared - __entry->tx_bottom,
__entry->tx_winsize)
);
@@ -733,6 +838,66 @@ TRACE_EVENT(rxrpc_rx_abort,
__entry->abort_code)
);
+TRACE_EVENT(rxrpc_rx_challenge,
+ TP_PROTO(struct rxrpc_connection *conn, rxrpc_serial_t serial,
+ u32 version, u32 nonce, u32 min_level),
+
+ TP_ARGS(conn, serial, version, nonce, min_level),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, conn )
+ __field(rxrpc_serial_t, serial )
+ __field(u32, version )
+ __field(u32, nonce )
+ __field(u32, min_level )
+ ),
+
+ TP_fast_assign(
+ __entry->conn = conn->debug_id;
+ __entry->serial = serial;
+ __entry->version = version;
+ __entry->nonce = nonce;
+ __entry->min_level = min_level;
+ ),
+
+ TP_printk("C=%08x CHALLENGE %08x v=%x n=%x ml=%x",
+ __entry->conn,
+ __entry->serial,
+ __entry->version,
+ __entry->nonce,
+ __entry->min_level)
+ );
+
+TRACE_EVENT(rxrpc_rx_response,
+ TP_PROTO(struct rxrpc_connection *conn, rxrpc_serial_t serial,
+ u32 version, u32 kvno, u32 ticket_len),
+
+ TP_ARGS(conn, serial, version, kvno, ticket_len),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, conn )
+ __field(rxrpc_serial_t, serial )
+ __field(u32, version )
+ __field(u32, kvno )
+ __field(u32, ticket_len )
+ ),
+
+ TP_fast_assign(
+ __entry->conn = conn->debug_id;
+ __entry->serial = serial;
+ __entry->version = version;
+ __entry->kvno = kvno;
+ __entry->ticket_len = ticket_len;
+ ),
+
+ TP_printk("C=%08x RESPONSE %08x v=%x kvno=%x tl=%x",
+ __entry->conn,
+ __entry->serial,
+ __entry->version,
+ __entry->kvno,
+ __entry->ticket_len)
+ );
+
TRACE_EVENT(rxrpc_rx_rwind_change,
TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial,
u32 rwind, bool wake),
@@ -1278,6 +1443,44 @@ TRACE_EVENT(rxrpc_congest,
__entry->sum.retrans_timeo ? " rTxTo" : "")
);
+TRACE_EVENT(rxrpc_reset_cwnd,
+ TP_PROTO(struct rxrpc_call *call, ktime_t now),
+
+ TP_ARGS(call, now),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call )
+ __field(enum rxrpc_congest_mode, mode )
+ __field(unsigned short, cwnd )
+ __field(unsigned short, extra )
+ __field(rxrpc_seq_t, hard_ack )
+ __field(rxrpc_seq_t, prepared )
+ __field(ktime_t, since_last_tx )
+ __field(bool, has_data )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call->debug_id;
+ __entry->mode = call->cong_mode;
+ __entry->cwnd = call->cong_cwnd;
+ __entry->extra = call->cong_extra;
+ __entry->hard_ack = call->acks_hard_ack;
+ __entry->prepared = call->tx_prepared - call->tx_bottom;
+ __entry->since_last_tx = ktime_sub(now, call->tx_last_sent);
+ __entry->has_data = !list_empty(&call->tx_sendmsg);
+ ),
+
+ TP_printk("c=%08x q=%08x %s cw=%u+%u pr=%u tm=%llu d=%u",
+ __entry->call,
+ __entry->hard_ack,
+ __print_symbolic(__entry->mode, rxrpc_congest_modes),
+ __entry->cwnd,
+ __entry->extra,
+ __entry->prepared,
+ ktime_to_ns(__entry->since_last_tx),
+ __entry->has_data)
+ );
+
TRACE_EVENT(rxrpc_disconnect_call,
TP_PROTO(struct rxrpc_call *call),
@@ -1352,6 +1555,7 @@ TRACE_EVENT(rxrpc_connect_call,
__field(unsigned long, user_call_ID )
__field(u32, cid )
__field(u32, call_id )
+ __field_struct(struct sockaddr_rxrpc, srx )
),
TP_fast_assign(
@@ -1359,33 +1563,42 @@ TRACE_EVENT(rxrpc_connect_call,
__entry->user_call_ID = call->user_call_ID;
__entry->cid = call->cid;
__entry->call_id = call->call_id;
+ __entry->srx = call->dest_srx;
),
- TP_printk("c=%08x u=%p %08x:%08x",
+ TP_printk("c=%08x u=%p %08x:%08x dst=%pISp",
__entry->call,
(void *)__entry->user_call_ID,
__entry->cid,
- __entry->call_id)
+ __entry->call_id,
+ &__entry->srx.transport)
);
TRACE_EVENT(rxrpc_resend,
- TP_PROTO(struct rxrpc_call *call),
+ TP_PROTO(struct rxrpc_call *call, struct sk_buff *ack),
- TP_ARGS(call),
+ TP_ARGS(call, ack),
TP_STRUCT__entry(
__field(unsigned int, call )
__field(rxrpc_seq_t, seq )
+ __field(rxrpc_seq_t, transmitted )
+ __field(rxrpc_serial_t, ack_serial )
),
TP_fast_assign(
+ struct rxrpc_skb_priv *sp = ack ? rxrpc_skb(ack) : NULL;
__entry->call = call->debug_id;
__entry->seq = call->acks_hard_ack;
+ __entry->transmitted = call->tx_transmitted;
+ __entry->ack_serial = sp ? sp->hdr.serial : 0;
),
- TP_printk("c=%08x q=%x",
+ TP_printk("c=%08x r=%x q=%x tq=%x",
__entry->call,
- __entry->seq)
+ __entry->ack_serial,
+ __entry->seq,
+ __entry->transmitted)
);
TRACE_EVENT(rxrpc_rx_icmp,
@@ -1586,6 +1799,47 @@ TRACE_EVENT(rxrpc_txbuf,
__entry->ref)
);
+TRACE_EVENT(rxrpc_poke_call,
+ TP_PROTO(struct rxrpc_call *call, bool busy,
+ enum rxrpc_call_poke_trace what),
+
+ TP_ARGS(call, busy, what),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call_debug_id )
+ __field(bool, busy )
+ __field(enum rxrpc_call_poke_trace, what )
+ ),
+
+ TP_fast_assign(
+ __entry->call_debug_id = call->debug_id;
+ __entry->busy = busy;
+ __entry->what = what;
+ ),
+
+ TP_printk("c=%08x %s%s",
+ __entry->call_debug_id,
+ __print_symbolic(__entry->what, rxrpc_call_poke_traces),
+ __entry->busy ? "!" : "")
+ );
+
+TRACE_EVENT(rxrpc_call_poked,
+ TP_PROTO(struct rxrpc_call *call),
+
+ TP_ARGS(call),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call_debug_id )
+ ),
+
+ TP_fast_assign(
+ __entry->call_debug_id = call->debug_id;
+ ),
+
+ TP_printk("c=%08x",
+ __entry->call_debug_id)
+ );
+
#undef EM
#undef E_
#endif /* _TRACE_RXRPC_H */
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index 70191d96af89..3782d4219ac9 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -658,11 +658,24 @@ enum devlink_resource_unit {
DEVLINK_RESOURCE_UNIT_ENTRY,
};
+enum devlink_port_fn_attr_cap {
+ DEVLINK_PORT_FN_ATTR_CAP_ROCE_BIT,
+ DEVLINK_PORT_FN_ATTR_CAP_MIGRATABLE_BIT,
+
+ /* Add new caps above */
+ __DEVLINK_PORT_FN_ATTR_CAPS_MAX,
+};
+
+#define DEVLINK_PORT_FN_CAP_ROCE _BITUL(DEVLINK_PORT_FN_ATTR_CAP_ROCE_BIT)
+#define DEVLINK_PORT_FN_CAP_MIGRATABLE \
+ _BITUL(DEVLINK_PORT_FN_ATTR_CAP_MIGRATABLE_BIT)
+
enum devlink_port_function_attr {
DEVLINK_PORT_FUNCTION_ATTR_UNSPEC,
DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR, /* binary */
DEVLINK_PORT_FN_ATTR_STATE, /* u8 */
DEVLINK_PORT_FN_ATTR_OPSTATE, /* u8 */
+ DEVLINK_PORT_FN_ATTR_CAPS, /* bitfield32 */
__DEVLINK_PORT_FUNCTION_ATTR_MAX,
DEVLINK_PORT_FUNCTION_ATTR_MAX = __DEVLINK_PORT_FUNCTION_ATTR_MAX - 1
diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h
index aaf7c6963d61..5799a9db034e 100644
--- a/include/uapi/linux/ethtool_netlink.h
+++ b/include/uapi/linux/ethtool_netlink.h
@@ -51,6 +51,7 @@ enum {
ETHTOOL_MSG_MODULE_SET,
ETHTOOL_MSG_PSE_GET,
ETHTOOL_MSG_PSE_SET,
+ ETHTOOL_MSG_RSS_GET,
/* add new constants above here */
__ETHTOOL_MSG_USER_CNT,
@@ -97,6 +98,7 @@ enum {
ETHTOOL_MSG_MODULE_GET_REPLY,
ETHTOOL_MSG_MODULE_NTF,
ETHTOOL_MSG_PSE_GET_REPLY,
+ ETHTOOL_MSG_RSS_GET_REPLY,
/* add new constants above here */
__ETHTOOL_MSG_KERNEL_CNT,
@@ -880,6 +882,18 @@ enum {
ETHTOOL_A_PSE_MAX = (__ETHTOOL_A_PSE_CNT - 1)
};
+enum {
+ ETHTOOL_A_RSS_UNSPEC,
+ ETHTOOL_A_RSS_HEADER,
+ ETHTOOL_A_RSS_CONTEXT, /* u32 */
+ ETHTOOL_A_RSS_HFUNC, /* u32 */
+ ETHTOOL_A_RSS_INDIR, /* binary */
+ ETHTOOL_A_RSS_HKEY, /* binary */
+
+ __ETHTOOL_A_RSS_CNT,
+ ETHTOOL_A_RSS_MAX = (__ETHTOOL_A_RSS_CNT - 1),
+};
+
/* generic netlink info */
#define ETHTOOL_GENL_NAME "ethtool"
#define ETHTOOL_GENL_VERSION 1
diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h
index 55501e5e7ac8..a2c66b3d7f0f 100644
--- a/include/uapi/linux/net_tstamp.h
+++ b/include/uapi/linux/net_tstamp.h
@@ -31,8 +31,9 @@ enum {
SOF_TIMESTAMPING_OPT_PKTINFO = (1<<13),
SOF_TIMESTAMPING_OPT_TX_SWHW = (1<<14),
SOF_TIMESTAMPING_BIND_PHC = (1 << 15),
+ SOF_TIMESTAMPING_OPT_ID_TCP = (1 << 16),
- SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_BIND_PHC,
+ SOF_TIMESTAMPING_LAST = SOF_TIMESTAMPING_OPT_ID_TCP,
SOF_TIMESTAMPING_MASK = (SOF_TIMESTAMPING_LAST - 1) |
SOF_TIMESTAMPING_LAST
};
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 94066f87e9ee..c5d62ee82567 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -277,11 +277,25 @@ enum ovs_vport_attr {
OVS_VPORT_ATTR_PAD,
OVS_VPORT_ATTR_IFINDEX,
OVS_VPORT_ATTR_NETNSID,
+ OVS_VPORT_ATTR_UPCALL_STATS,
__OVS_VPORT_ATTR_MAX
};
#define OVS_VPORT_ATTR_MAX (__OVS_VPORT_ATTR_MAX - 1)
+/**
+ * enum ovs_vport_upcall_attr - attributes for %OVS_VPORT_UPCALL* commands
+ * @OVS_VPORT_UPCALL_SUCCESS: 64-bit upcall success packets.
+ * @OVS_VPORT_UPCALL_FAIL: 64-bit upcall fail packets.
+ */
+enum ovs_vport_upcall_attr {
+ OVS_VPORT_UPCALL_ATTR_SUCCESS,
+ OVS_VPORT_UPCALL_ATTR_FAIL,
+ __OVS_VPORT_UPCALL_ATTR_MAX
+};
+
+#define OVS_VPORT_UPCALL_ATTR_MAX (__OVS_VPORT_UPCALL_ATTR_MAX - 1)
+
enum {
OVS_VXLAN_EXT_UNSPEC,
OVS_VXLAN_EXT_GBP, /* Flag or __u32 */