diff options
author | Jakub Kicinski <kuba@kernel.org> | 2022-07-29 21:39:06 -0700 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2022-07-29 21:39:07 -0700 |
commit | 63757225a93353bc2ce4499af5501eabdbbf23f9 (patch) | |
tree | 8c7e565333e6ad7cdfb5e5be2a54d861c78fbd96 | |
parent | 84a8d931ab213250932959c4f929a7e7e670b8a7 (diff) | |
parent | 069448b2fd0a40b5252915dc10ae561c4b5c8c5a (diff) |
Merge tag 'mlx5-updates-2022-07-28' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux
Saeed Mahameed says:
====================
mlx5-updates-2022-07-28
Misc updates to mlx5 driver:
1) Gal corrects to use skb_tcp_all_headers on encapsulated skbs.
2) Roi Adds the support for offloading standalone police actions.
3) lama, did some refactoring to minimize code coupling with
mlx5e_priv "god object" in some of the follows, and converts some of the
objects to pointers to preserve on memory when these objects aren't needed.
This is part one of two parts series.
* tag 'mlx5-updates-2022-07-28' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
net/mlx5e: Move mlx5e_init_l2_addr to en_main
net/mlx5e: Split en_fs ndo's and move to en_main
net/mlx5e: Separate mlx5e_set_rx_mode_work and move caller to en_main
net/mlx5e: Add mdev to flow_steering struct
net/mlx5e: Report flow steering errors with mdev err report API
net/mlx5e: Convert mlx5e_flow_steering member of mlx5e_priv to pointer
net/mlx5e: Allocate VLAN and TC for featured profiles only
net/mlx5e: Make mlx5e_tc_table private
net/mlx5e: Convert mlx5e_tc_table member of mlx5e_flow_steering to pointer
net/mlx5e: TC, Support tc action api for police
net/mlx5e: TC, Separate get/update/replace meter functions
net/mlx5e: Add red and green counters for metering
net/mlx5e: TC, Allocate post meter ft per rule
net/mlx5: DR, Add support for flow metering ASO
net/mlx5e: Fix wrong use of skb_tcp_all_headers() with encapsulation
====================
Link: https://lore.kernel.org/r/20220728205728.143074-1-saeed@kernel.org
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
30 files changed, 1265 insertions, 511 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index b07228f69b91..bea4a1329474 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -921,7 +921,7 @@ struct mlx5e_priv { struct mlx5e_rx_res *rx_res; u32 *tx_rates; - struct mlx5e_flow_steering fs; + struct mlx5e_flow_steering *fs; struct workqueue_struct *wq; struct work_struct update_carrier_work; @@ -987,6 +987,8 @@ enum mlx5e_profile_feature { MLX5E_PROFILE_FEATURE_PTP_RX, MLX5E_PROFILE_FEATURE_PTP_TX, MLX5E_PROFILE_FEATURE_QOS_HTB, + MLX5E_PROFILE_FEATURE_FS_VLAN, + MLX5E_PROFILE_FEATURE_FS_TC, }; struct mlx5e_profile { @@ -1022,7 +1024,6 @@ void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s); -void mlx5e_init_l2_addr(struct mlx5e_priv *priv); int mlx5e_self_test_num(struct mlx5e_priv *priv); int mlx5e_self_test_fill_strings(struct mlx5e_priv *priv, u8 *data); void mlx5e_self_test(struct net_device *ndev, struct ethtool_test *etest, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index 6e3a90a959e9..9b8cdf2e68ad 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -15,29 +15,6 @@ enum { MLX5E_TC_MISS_LEVEL, }; -struct mlx5e_tc_table { - /* Protects the dynamic assignment of the t parameter - * which is the nic tc root table. - */ - struct mutex t_lock; - struct mlx5_flow_table *t; - struct mlx5_flow_table *miss_t; - struct mlx5_fs_chains *chains; - struct mlx5e_post_act *post_act; - - struct rhashtable ht; - - struct mod_hdr_tbl mod_hdr; - struct mutex hairpin_tbl_lock; /* protects hairpin_tbl */ - DECLARE_HASHTABLE(hairpin_tbl, 8); - - struct notifier_block netdevice_nb; - struct netdev_net_notifier netdevice_nn; - - struct mlx5_tc_ct_priv *ct; - struct mapping_ctx *mapping; -}; - struct mlx5e_flow_table { int num_groups; struct mlx5_flow_table *t; @@ -160,16 +137,20 @@ static inline int mlx5e_arfs_disable(struct mlx5e_priv *priv) { return -EOPNOTSU struct mlx5e_accel_fs_tcp; #endif +struct mlx5e_profile; struct mlx5e_fs_udp; struct mlx5e_fs_any; struct mlx5e_ptp_fs; struct mlx5e_flow_steering { + bool state_destroy; + bool vlan_strip_disable; + struct mlx5_core_dev *mdev; struct mlx5_flow_namespace *ns; #ifdef CONFIG_MLX5_EN_RXNFC struct mlx5e_ethtool_steering ethtool; #endif - struct mlx5e_tc_table tc; + struct mlx5e_tc_table *tc; struct mlx5e_promisc_table promisc; struct mlx5e_vlan_table *vlan; struct mlx5e_l2_table l2; @@ -200,13 +181,22 @@ void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv); int mlx5e_create_flow_steering(struct mlx5e_priv *priv); void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv); -int mlx5e_fs_init(struct mlx5e_priv *priv); -void mlx5e_fs_cleanup(struct mlx5e_priv *priv); +struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile, + struct mlx5_core_dev *mdev, + bool state_destroy); +void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs); int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num); void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv); int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num); void mlx5e_remove_mac_trap(struct mlx5e_priv *priv); - +void mlx5e_fs_set_rx_mode_work(struct mlx5e_flow_steering *fs, struct net_device *netdev); +int mlx5e_fs_vlan_rx_add_vid(struct mlx5e_flow_steering *fs, + struct net_device *netdev, + __be16 proto, u16 vid); +int mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering *fs, + struct net_device *netdev, + __be16 proto, u16 vid); +void mlx5e_fs_init_l2_addr(struct mlx5e_flow_steering *fs, struct net_device *netdev); #endif /* __MLX5E_FLOW_STEER_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c index 7aa25a5e29d7..e153d6119e02 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c @@ -94,7 +94,7 @@ mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv, if (!spec) return ERR_PTR(-ENOMEM); - fs_udp = priv->fs.udp; + fs_udp = priv->fs->udp; ft = fs_udp->tables[type].t; fs_udp_set_dport_flow(spec, type, d_port); @@ -121,10 +121,10 @@ static int fs_udp_add_default_rule(struct mlx5e_priv *priv, enum fs_udp_type typ struct mlx5e_fs_udp *fs_udp; int err; - fs_udp = priv->fs.udp; + fs_udp = priv->fs->udp; fs_udp_t = &fs_udp->tables[type]; - dest = mlx5_ttc_get_default_dest(priv->fs.ttc, fs_udp2tt(type)); + dest = mlx5_ttc_get_default_dest(priv->fs->ttc, fs_udp2tt(type)); rule = mlx5_add_flow_rules(fs_udp_t->t, NULL, &flow_act, &dest, 1); if (IS_ERR(rule)) { err = PTR_ERR(rule); @@ -208,7 +208,7 @@ out: static int fs_udp_create_table(struct mlx5e_priv *priv, enum fs_udp_type type) { - struct mlx5e_flow_table *ft = &priv->fs.udp->tables[type]; + struct mlx5e_flow_table *ft = &priv->fs->udp->tables[type]; struct mlx5_flow_table_attr ft_attr = {}; int err; @@ -218,7 +218,7 @@ static int fs_udp_create_table(struct mlx5e_priv *priv, enum fs_udp_type type) ft_attr.level = MLX5E_FS_TT_UDP_FT_LEVEL; ft_attr.prio = MLX5E_NIC_PRIO; - ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr); + ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr); if (IS_ERR(ft->t)) { err = PTR_ERR(ft->t); ft->t = NULL; @@ -259,7 +259,7 @@ static int fs_udp_disable(struct mlx5e_priv *priv) for (i = 0; i < FS_UDP_NUM_TYPES; i++) { /* Modify ttc rules destination to point back to the indir TIRs */ - err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_udp2tt(i)); + err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_udp2tt(i)); if (err) { netdev_err(priv->netdev, "%s: modify ttc[%d] default destination failed, err(%d)\n", @@ -278,10 +278,10 @@ static int fs_udp_enable(struct mlx5e_priv *priv) dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; for (i = 0; i < FS_UDP_NUM_TYPES; i++) { - dest.ft = priv->fs.udp->tables[i].t; + dest.ft = priv->fs->udp->tables[i].t; /* Modify ttc rules destination to point on the accel_fs FTs */ - err = mlx5_ttc_fwd_dest(priv->fs.ttc, fs_udp2tt(i), &dest); + err = mlx5_ttc_fwd_dest(priv->fs->ttc, fs_udp2tt(i), &dest); if (err) { netdev_err(priv->netdev, "%s: modify ttc[%d] destination to accel failed, err(%d)\n", @@ -294,7 +294,7 @@ static int fs_udp_enable(struct mlx5e_priv *priv) void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv) { - struct mlx5e_fs_udp *fs_udp = priv->fs.udp; + struct mlx5e_fs_udp *fs_udp = priv->fs->udp; int i; if (!fs_udp) @@ -309,20 +309,20 @@ void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv) fs_udp_destroy_table(fs_udp, i); kfree(fs_udp); - priv->fs.udp = NULL; + priv->fs->udp = NULL; } int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_priv *priv) { int i, err; - if (priv->fs.udp) { - priv->fs.udp->ref_cnt++; + if (priv->fs->udp) { + priv->fs->udp->ref_cnt++; return 0; } - priv->fs.udp = kzalloc(sizeof(*priv->fs.udp), GFP_KERNEL); - if (!priv->fs.udp) + priv->fs->udp = kzalloc(sizeof(*priv->fs->udp), GFP_KERNEL); + if (!priv->fs->udp) return -ENOMEM; for (i = 0; i < FS_UDP_NUM_TYPES; i++) { @@ -335,16 +335,16 @@ int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_priv *priv) if (err) goto err_destroy_tables; - priv->fs.udp->ref_cnt = 1; + priv->fs->udp->ref_cnt = 1; return 0; err_destroy_tables: while (--i >= 0) - fs_udp_destroy_table(priv->fs.udp, i); + fs_udp_destroy_table(priv->fs->udp, i); - kfree(priv->fs.udp); - priv->fs.udp = NULL; + kfree(priv->fs->udp); + priv->fs->udp = NULL; return err; } @@ -371,7 +371,7 @@ mlx5e_fs_tt_redirect_any_add_rule(struct mlx5e_priv *priv, if (!spec) return ERR_PTR(-ENOMEM); - fs_any = priv->fs.any; + fs_any = priv->fs->any; ft = fs_any->table.t; fs_any_set_ethertype_flow(spec, ether_type); @@ -398,10 +398,10 @@ static int fs_any_add_default_rule(struct mlx5e_priv *priv) struct mlx5e_fs_any *fs_any; int err; - fs_any = priv->fs.any; + fs_any = priv->fs->any; fs_any_t = &fs_any->table; - dest = mlx5_ttc_get_default_dest(priv->fs.ttc, MLX5_TT_ANY); + dest = mlx5_ttc_get_default_dest(priv->fs->ttc, MLX5_TT_ANY); rule = mlx5_add_flow_rules(fs_any_t->t, NULL, &flow_act, &dest, 1); if (IS_ERR(rule)) { err = PTR_ERR(rule); @@ -474,7 +474,7 @@ err: static int fs_any_create_table(struct mlx5e_priv *priv) { - struct mlx5e_flow_table *ft = &priv->fs.any->table; + struct mlx5e_flow_table *ft = &priv->fs->any->table; struct mlx5_flow_table_attr ft_attr = {}; int err; @@ -484,7 +484,7 @@ static int fs_any_create_table(struct mlx5e_priv *priv) ft_attr.level = MLX5E_FS_TT_ANY_FT_LEVEL; ft_attr.prio = MLX5E_NIC_PRIO; - ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr); + ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr); if (IS_ERR(ft->t)) { err = PTR_ERR(ft->t); ft->t = NULL; @@ -514,7 +514,7 @@ static int fs_any_disable(struct mlx5e_priv *priv) int err; /* Modify ttc rules destination to point back to the indir TIRs */ - err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, MLX5_TT_ANY); + err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, MLX5_TT_ANY); if (err) { netdev_err(priv->netdev, "%s: modify ttc[%d] default destination failed, err(%d)\n", @@ -530,10 +530,10 @@ static int fs_any_enable(struct mlx5e_priv *priv) int err; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest.ft = priv->fs.any->table.t; + dest.ft = priv->fs->any->table.t; /* Modify ttc rules destination to point on the accel_fs FTs */ - err = mlx5_ttc_fwd_dest(priv->fs.ttc, MLX5_TT_ANY, &dest); + err = mlx5_ttc_fwd_dest(priv->fs->ttc, MLX5_TT_ANY, &dest); if (err) { netdev_err(priv->netdev, "%s: modify ttc[%d] destination to accel failed, err(%d)\n", @@ -555,7 +555,7 @@ static void fs_any_destroy_table(struct mlx5e_fs_any *fs_any) void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_priv *priv) { - struct mlx5e_fs_any *fs_any = priv->fs.any; + struct mlx5e_fs_any *fs_any = priv->fs->any; if (!fs_any) return; @@ -568,20 +568,20 @@ void mlx5e_fs_tt_redirect_any_destroy(struct mlx5e_priv *priv) fs_any_destroy_table(fs_any); kfree(fs_any); - priv->fs.any = NULL; + priv->fs->any = NULL; } int mlx5e_fs_tt_redirect_any_create(struct mlx5e_priv *priv) { int err; - if (priv->fs.any) { - priv->fs.any->ref_cnt++; + if (priv->fs->any) { + priv->fs->any->ref_cnt++; return 0; } - priv->fs.any = kzalloc(sizeof(*priv->fs.any), GFP_KERNEL); - if (!priv->fs.any) + priv->fs->any = kzalloc(sizeof(*priv->fs->any), GFP_KERNEL); + if (!priv->fs->any) return -ENOMEM; err = fs_any_create_table(priv); @@ -592,14 +592,14 @@ int mlx5e_fs_tt_redirect_any_create(struct mlx5e_priv *priv) if (err) goto err_destroy_table; - priv->fs.any->ref_cnt = 1; + priv->fs->any->ref_cnt = 1; return 0; err_destroy_table: - fs_any_destroy_table(priv->fs.any); + fs_any_destroy_table(priv->fs->any); - kfree(priv->fs.any); - priv->fs.any = NULL; + kfree(priv->fs->any); + priv->fs->any = NULL; return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c index 78ad96cf4222..903de88bab53 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c @@ -624,7 +624,7 @@ static int mlx5e_ptp_set_state(struct mlx5e_ptp *c, struct mlx5e_params *params) static void mlx5e_ptp_rx_unset_fs(struct mlx5e_priv *priv) { - struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs; + struct mlx5e_ptp_fs *ptp_fs = priv->fs->ptp_fs; if (!ptp_fs->valid) return; @@ -641,7 +641,7 @@ static void mlx5e_ptp_rx_unset_fs(struct mlx5e_priv *priv) static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv) { u32 tirn = mlx5e_rx_res_get_tirn_ptp(priv->rx_res); - struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs; + struct mlx5e_ptp_fs *ptp_fs = priv->fs->ptp_fs; struct mlx5_flow_handle *rule; int err; @@ -808,13 +808,13 @@ int mlx5e_ptp_alloc_rx_fs(struct mlx5e_priv *priv) if (!ptp_fs) return -ENOMEM; - priv->fs.ptp_fs = ptp_fs; + priv->fs->ptp_fs = ptp_fs; return 0; } void mlx5e_ptp_free_rx_fs(struct mlx5e_priv *priv) { - struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs; + struct mlx5e_ptp_fs *ptp_fs = priv->fs->ptp_fs; if (!mlx5e_profile_feature_cap(priv->profile, PTP_RX)) return; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c index 86fa0bdbee36..fac7e3ff2674 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c @@ -21,6 +21,7 @@ #include "en/tc/sample.h" #include "en_accel/ipsec_rxtx.h" #include "en/tc/int_port.h" +#include "en/tc/act/act.h" struct mlx5e_rep_indr_block_priv { struct net_device *netdev; @@ -511,6 +512,120 @@ mlx5e_rep_indr_setup_block(struct net_device *netdev, struct Qdisc *sch, return 0; } +static int +mlx5e_rep_indr_replace_act(struct mlx5e_rep_priv *rpriv, + struct flow_offload_action *fl_act) + +{ + struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + enum mlx5_flow_namespace_type ns_type; + struct flow_action_entry *action; + struct mlx5e_tc_act *act; + bool add = false; + int i; + + /* There is no use case currently for more than one action (e.g. pedit). + * when there will be, need to handle cleaning multiple actions on err. + */ + if (!flow_offload_has_one_action(&fl_act->action)) + return -EOPNOTSUPP; + + if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS) + ns_type = MLX5_FLOW_NAMESPACE_FDB; + else + ns_type = MLX5_FLOW_NAMESPACE_KERNEL; + + flow_action_for_each(i, action, &fl_act->action) { + act = mlx5e_tc_act_get(action->id, ns_type); + if (!act) + continue; + + if (!act->offload_action) + continue; + + if (!act->offload_action(priv, fl_act, action)) + add = true; + } + + return add ? 0 : -EOPNOTSUPP; +} + +static int +mlx5e_rep_indr_destroy_act(struct mlx5e_rep_priv *rpriv, + struct flow_offload_action *fl_act) +{ + struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + enum mlx5_flow_namespace_type ns_type; + struct mlx5e_tc_act *act; + + if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS) + ns_type = MLX5_FLOW_NAMESPACE_FDB; + else + ns_type = MLX5_FLOW_NAMESPACE_KERNEL; + + act = mlx5e_tc_act_get(fl_act->id, ns_type); + if (!act || !act->destroy_action) + return -EOPNOTSUPP; + + return act->destroy_action(priv, fl_act); +} + +static int +mlx5e_rep_indr_stats_act(struct mlx5e_rep_priv *rpriv, + struct flow_offload_action *fl_act) + +{ + struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + enum mlx5_flow_namespace_type ns_type; + struct mlx5e_tc_act *act; + + if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS) + ns_type = MLX5_FLOW_NAMESPACE_FDB; + else + ns_type = MLX5_FLOW_NAMESPACE_KERNEL; + + act = mlx5e_tc_act_get(fl_act->id, ns_type); + if (!act || !act->stats_action) + return -EOPNOTSUPP; + + return act->stats_action(priv, fl_act); +} + +static int +mlx5e_rep_indr_setup_act(struct mlx5e_rep_priv *rpriv, + struct flow_offload_action *fl_act) +{ + switch (fl_act->command) { + case FLOW_ACT_REPLACE: + return mlx5e_rep_indr_replace_act(rpriv, fl_act); + case FLOW_ACT_DESTROY: + return mlx5e_rep_indr_destroy_act(rpriv, fl_act); + case FLOW_ACT_STATS: + return mlx5e_rep_indr_stats_act(rpriv, fl_act); + default: + return -EOPNOTSUPP; + } +} + +static int +mlx5e_rep_indr_no_dev_setup(struct mlx5e_rep_priv *rpriv, + enum tc_setup_type type, + void *data) +{ + if (!data) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_ACT: + return mlx5e_rep_indr_setup_act(rpriv, data); + default: + return -EOPNOTSUPP; + } +} + static int mlx5e_rep_indr_setup_cb(struct net_device *netdev, struct Qdisc *sch, void *cb_priv, enum tc_setup_type type, void *type_data, @@ -518,7 +633,7 @@ int mlx5e_rep_indr_setup_cb(struct net_device *netdev, struct Qdisc *sch, void * void (*cleanup)(struct flow_block_cb *block_cb)) { if (!netdev) - return -EOPNOTSUPP; + return mlx5e_rep_indr_no_dev_setup(cb_priv, type, data); switch (type) { case TC_SETUP_BLOCK: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h index 095ff8ef80e2..e1570ff056ae 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.h @@ -50,6 +50,16 @@ struct mlx5e_tc_act { bool (*is_multi_table_act)(struct mlx5e_priv *priv, const struct flow_action_entry *act, struct mlx5_flow_attr *attr); + + int (*offload_action)(struct mlx5e_priv *priv, + struct flow_offload_action *fl_act, + struct flow_action_entry *act); + + int (*destroy_action)(struct mlx5e_priv *priv, + struct flow_offload_action *fl_act); + + int (*stats_action)(struct mlx5e_priv *priv, + struct flow_offload_action *fl_act); }; struct mlx5e_tc_flow_action { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c index 4726bcb46eec..69949ab830b6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/goto.c @@ -21,7 +21,7 @@ validate_goto_chain(struct mlx5e_priv *priv, u32 max_chain; esw = priv->mdev->priv.eswitch; - chains = is_esw ? esw_chains(esw) : mlx5e_nic_chains(priv); + chains = is_esw ? esw_chains(esw) : mlx5e_nic_chains(priv->fs->tc); max_chain = mlx5_chains_get_chain_range(chains); reformat_and_fwd = is_esw ? MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_and_fwd_to_table) : diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c index 4bd9c04a49e3..37522352e4b2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c @@ -24,14 +24,9 @@ tc_act_can_offload_police(struct mlx5e_tc_act_parse_state *parse_state, } static int -tc_act_parse_police(struct mlx5e_tc_act_parse_state *parse_state, - const struct flow_action_entry *act, - struct mlx5e_priv *priv, - struct mlx5_flow_attr *attr) +fill_meter_params_from_act(const struct flow_action_entry *act, + struct mlx5e_flow_meter_params *params) { - struct mlx5e_flow_meter_params *params; - - params = &attr->meter_attr.params; params->index = act->hw_index; if (act->police.rate_bytes_ps) { params->mode = MLX5_RATE_LIMIT_BPS; @@ -46,6 +41,21 @@ tc_act_parse_police(struct mlx5e_tc_act_parse_state *parse_state, return -EOPNOTSUPP; } + return 0; +} + +static int +tc_act_parse_police(struct mlx5e_tc_act_parse_state *parse_state, + const struct flow_action_entry *act, + struct mlx5e_priv *priv, + struct mlx5_flow_attr *attr) +{ + int err; + + err = fill_meter_params_from_act(act, &attr->meter_attr.params); + if (err) + return err; + attr->action |= MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO; attr->exe_aso_type = MLX5_EXE_ASO_FLOW_METER; @@ -60,8 +70,84 @@ tc_act_is_multi_table_act_police(struct mlx5e_priv *priv, return true; } +static int +tc_act_police_offload(struct mlx5e_priv *priv, + struct flow_offload_action *fl_act, + struct flow_action_entry *act) +{ + struct mlx5e_flow_meter_params params = {}; + struct mlx5e_flow_meter_handle *meter; + int err = 0; + + err = fill_meter_params_from_act(act, ¶ms); + if (err) + return err; + + meter = mlx5e_tc_meter_get(priv->mdev, ¶ms); + if (IS_ERR(meter) && PTR_ERR(meter) == -ENOENT) { + meter = mlx5e_tc_meter_replace(priv->mdev, ¶ms); + } else if (!IS_ERR(meter)) { + err = mlx5e_tc_meter_update(meter, ¶ms); + mlx5e_tc_meter_put(meter); + } + + if (IS_ERR(meter)) { + NL_SET_ERR_MSG_MOD(fl_act->extack, "Failed to get flow meter"); + mlx5_core_err(priv->mdev, "Failed to get flow meter %d\n", params.index); + err = PTR_ERR(meter); + } + + return err; +} + +static int +tc_act_police_destroy(struct mlx5e_priv *priv, + struct flow_offload_action *fl_act) +{ + struct mlx5e_flow_meter_params params = {}; + struct mlx5e_flow_meter_handle *meter; + + params.index = fl_act->index; + meter = mlx5e_tc_meter_get(priv->mdev, ¶ms); + if (IS_ERR(meter)) { + NL_SET_ERR_MSG_MOD(fl_act->extack, "Failed to get flow meter"); + mlx5_core_err(priv->mdev, "Failed to get flow meter %d\n", params.index); + return PTR_ERR(meter); + } + /* first put for the get and second for cleanup */ + mlx5e_tc_meter_put(meter); + mlx5e_tc_meter_put(meter); + return 0; +} + +static int +tc_act_police_stats(struct mlx5e_priv *priv, + struct flow_offload_action *fl_act) +{ + struct mlx5e_flow_meter_params params = {}; + struct mlx5e_flow_meter_handle *meter; + u64 bytes, packets, drops, lastuse; + + params.index = fl_act->index; + meter = mlx5e_tc_meter_get(priv->mdev, ¶ms); + if (IS_ERR(meter)) { + NL_SET_ERR_MSG_MOD(fl_act->extack, "Failed to get flow meter"); + mlx5_core_err(priv->mdev, "Failed to get flow meter %d\n", params.index); + return PTR_ERR(meter); + } + + mlx5e_tc_meter_get_stats(meter, &bytes, &packets, &drops, &lastuse); + flow_stats_update(&fl_act->stats, bytes, packets, drops, lastuse, + FLOW_ACTION_HW_STATS_DELAYED); + mlx5e_tc_meter_put(meter); + return 0; +} + struct mlx5e_tc_act mlx5e_tc_act_police = { .can_offload = tc_act_can_offload_police, .parse_action = tc_act_parse_police, .is_multi_table_act = tc_act_is_multi_table_act_police, + .offload_action = tc_act_police_offload, + .destroy_action = tc_act_police_destroy, + .stats_action = tc_act_police_stats, }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c index ca33f673396f..a53e205f4a89 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.c @@ -6,7 +6,6 @@ #include "en/tc/post_act.h" #include "meter.h" #include "en/tc_priv.h" -#include "post_meter.h" #define MLX5_START_COLOR_SHIFT 28 #define MLX5_METER_MODE_SHIFT 24 @@ -47,8 +46,6 @@ struct mlx5e_flow_meters { struct mlx5_core_dev *mdev; struct mlx5e_post_act *post_act; - - struct mlx5e_post_meter_priv *post_meter; }; static void @@ -243,6 +240,7 @@ __mlx5e_flow_meter_alloc(struct mlx5e_flow_meters *flow_meters) struct mlx5_core_dev *mdev = flow_meters->mdev; struct mlx5e_flow_meter_aso_obj *meters_obj; struct mlx5e_flow_meter_handle *meter; + struct mlx5_fc *counter; int err, pos, total; u32 id; @@ -250,6 +248,20 @@ __mlx5e_flow_meter_alloc(struct mlx5e_flow_meters *flow_meters) if (!meter) return ERR_PTR(-ENOMEM); + counter = mlx5_fc_create(mdev, true); + if (IS_ERR(counter)) { + err = PTR_ERR(counter); + goto err_red_counter; + } + meter->red_counter = counter; + + counter = mlx5_fc_create(mdev, true); + if (IS_ERR(counter)) { + err = PTR_ERR(counter); + goto err_green_counter; + } + meter->green_counter = counter; + meters_obj = list_first_entry_or_null(&flow_meters->partial_list, struct mlx5e_flow_meter_aso_obj, entry); @@ -295,6 +307,10 @@ __mlx5e_flow_meter_alloc(struct mlx5e_flow_meters *flow_meters) err_mem: mlx5e_flow_meter_destroy_aso_obj(mdev, id); err_create: + mlx5_fc_destroy(mdev, meter->green_counter); +err_green_counter: + mlx5_fc_destroy(mdev, meter->red_counter); +err_red_counter: kfree(meter); return ERR_PTR(err); } @@ -307,6 +323,9 @@ __mlx5e_flow_meter_free(struct mlx5e_flow_meter_handle *meter) struct mlx5e_flow_meter_aso_obj *meters_obj; int n, pos; + mlx5_fc_destroy(mdev, meter->green_counter); + mlx5_fc_destroy(mdev, meter->red_counter); + meters_obj = meter->meters_obj; pos = (meter->obj_id - meters_obj->base_id) * 2 + meter->idx; bitmap_clear(meters_obj->meters_map, pos, 1); @@ -325,75 +344,155 @@ __mlx5e_flow_meter_free(struct mlx5e_flow_meter_handle *meter) kfree(meter); } +static struct mlx5e_flow_meter_handle * +__mlx5e_tc_meter_get(struct mlx5e_flow_meters *flow_meters, u32 index) +{ + struct mlx5e_flow_meter_handle *meter; + + hash_for_each_possible(flow_meters->hashtbl, meter, hlist, index) + if (meter->params.index == index) + goto add_ref; + + return ERR_PTR(-ENOENT); + +add_ref: + meter->refcnt++; + + return meter; +} + struct mlx5e_flow_meter_handle * mlx5e_tc_meter_get(struct mlx5_core_dev *mdev, struct mlx5e_flow_meter_params *params) { struct mlx5e_flow_meters *flow_meters; struct mlx5e_flow_meter_handle *meter; - int err; flow_meters = mlx5e_get_flow_meters(mdev); if (!flow_meters) return ERR_PTR(-EOPNOTSUPP); mutex_lock(&flow_meters->sync_lock); - hash_for_each_possible(flow_meters->hashtbl, meter, hlist, params->index) - if (meter->params.index == params->index) - goto add_ref; + meter = __mlx5e_tc_meter_get(flow_meters, params->index); + mutex_unlock(&flow_meters->sync_lock); - meter = __mlx5e_flow_meter_alloc(flow_meters); - if (IS_ERR(meter)) { - err = PTR_ERR(meter); - goto err_alloc; + return meter; +} + +static void +__mlx5e_tc_meter_put(struct mlx5e_flow_meter_handle *meter) +{ + if (--meter->refcnt == 0) { + hash_del(&meter->hlist); + __mlx5e_flow_meter_free(meter); } +} + +void +mlx5e_tc_meter_put(struct mlx5e_flow_meter_handle *meter) +{ + struct mlx5e_flow_meters *flow_meters = meter->flow_meters; + + mutex_lock(&flow_meters->sync_lock); + __mlx5e_tc_meter_put(meter); + mutex_unlock(&flow_meters->sync_lock); +} + +static struct mlx5e_flow_meter_handle * +mlx5e_tc_meter_alloc(struct mlx5e_flow_meters *flow_meters, + struct mlx5e_flow_meter_params *params) +{ + struct mlx5e_flow_meter_handle *meter; + + meter = __mlx5e_flow_meter_alloc(flow_meters); + if (IS_ERR(meter)) + return meter; hash_add(flow_meters->hashtbl, &meter->hlist, params->index); meter->params.index = params->index; - -add_ref: meter->refcnt++; + return meter; +} + +static int +__mlx5e_tc_meter_update(struct mlx5e_flow_meter_handle *meter, + struct mlx5e_flow_meter_params *params) +{ + struct mlx5_core_dev *mdev = meter->flow_meters->mdev; + int err = 0; + if (meter->params.mode != params->mode || meter->params.rate != params->rate || meter->params.burst != params->burst) { err = mlx5e_tc_meter_modify(mdev, meter, params); if (err) - goto err_update; + goto out; meter->params.mode = params->mode; meter->params.rate = params->rate; meter->params.burst = params->burst; } - mutex_unlock(&flow_meters->sync_lock); - return meter; +out: + return err; +} -err_update: - if (--meter->refcnt == 0) { - hash_del(&meter->hlist); - __mlx5e_flow_meter_free(meter); - } -err_alloc: +int +mlx5e_tc_meter_update(struct mlx5e_flow_meter_handle *meter, + struct mlx5e_flow_meter_params *params) +{ + struct mlx5_core_dev *mdev = meter->flow_meters->mdev; + struct mlx5e_flow_meters *flow_meters; + int err; + + flow_meters = mlx5e_get_flow_meters(mdev); + if (!flow_meters) + return -EOPNOTSUPP; + + mutex_lock(&flow_meters->sync_lock); + err = __mlx5e_tc_meter_update(meter, params); mutex_unlock(&flow_meters->sync_lock); - return ERR_PTR(err); + return err; } -void -mlx5e_tc_meter_put(struct mlx5e_flow_meter_handle *meter) +struct mlx5e_flow_meter_handle * +mlx5e_tc_meter_replace(struct mlx5_core_dev *mdev, struct mlx5e_flow_meter_params *params) { - struct mlx5e_flow_meters *flow_meters = meter->flow_meters; + struct mlx5e_flow_meters *flow_meters; + struct mlx5e_flow_meter_handle *meter; + int err; + + flow_meters = mlx5e_get_flow_meters(mdev); + if (!flow_meters) + return ERR_PTR(-EOPNOTSUPP); mutex_lock(&flow_meters->sync_lock); - if (--meter->refcnt == 0) { - hash_del(&meter->hlist); - __mlx5e_flow_meter_free(meter); + meter = __mlx5e_tc_meter_get(flow_meters, params->index); + if (IS_ERR(meter)) { + meter = mlx5e_tc_meter_alloc(flow_meters, params); + if (IS_ERR(meter)) { + err = PTR_ERR(meter); + goto err_get; + } } + + err = __mlx5e_tc_meter_update(meter, params); + if (err) + goto err_update; + mutex_unlock(&flow_meters->sync_lock); + return meter; + +err_update: + __mlx5e_tc_meter_put(meter); +err_get: + mutex_unlock(&flow_meters->sync_lock); + return ERR_PTR(err); } -struct mlx5_flow_table * -mlx5e_tc_meter_get_post_meter_ft(struct mlx5e_flow_meters *flow_meters) +enum mlx5_flow_namespace_type +mlx5e_tc_meter_get_namespace(struct mlx5e_flow_meters *flow_meters) { - return mlx5e_post_meter_get_ft(flow_meters->post_meter); + return flow_meters->ns_type; } struct mlx5e_flow_meters * @@ -432,12 +531,6 @@ mlx5e_flow_meters_init(struct mlx5e_priv *priv, goto err_sq; } - flow_meters->post_meter = mlx5e_post_meter_init(priv, ns_type, post_act); - if (IS_ERR(flow_meters->post_meter)) { - err = PTR_ERR(flow_meters->post_meter); - goto err_post_meter; - } - mutex_init(&flow_meters->sync_lock); INIT_LIST_HEAD(&flow_meters->partial_list); INIT_LIST_HEAD(&flow_meters->full_list); @@ -451,8 +544,6 @@ mlx5e_flow_meters_init(struct mlx5e_priv *priv, return flow_meters; -err_post_meter: - mlx5_aso_destroy(flow_meters->aso); err_sq: mlx5_core_dealloc_pd(mdev, flow_meters->pdn); err_out: @@ -466,9 +557,23 @@ mlx5e_flow_meters_cleanup(struct mlx5e_flow_meters *flow_meters) if (IS_ERR_OR_NULL(flow_meters)) return; - mlx5e_post_meter_cleanup(flow_meters->post_meter); mlx5_aso_destroy(flow_meters->aso); mlx5_core_dealloc_pd(flow_meters->mdev, flow_meters->pdn); - kfree(flow_meters); } + +void +mlx5e_tc_meter_get_stats(struct mlx5e_flow_meter_handle *meter, + u64 *bytes, u64 *packets, u64 *drops, u64 *lastuse) +{ + u64 bytes1, packets1, lastuse1; + u64 bytes2, packets2, lastuse2; + + mlx5_fc_query_cached(meter->green_counter, &bytes1, &packets1, &lastuse1); + mlx5_fc_query_cached(meter->red_counter, &bytes2, &packets2, &lastuse2); + + *bytes = bytes1 + bytes2; + *packets = packets1 + packets2; + *drops = packets2; + *lastuse = max_t(u64, lastuse1, lastuse2); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h index 78885db5dc7d..6de6e8a16327 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/meter.h @@ -4,6 +4,7 @@ #ifndef __MLX5_EN_FLOW_METER_H__ #define __MLX5_EN_FLOW_METER_H__ +struct mlx5e_post_meter_priv; struct mlx5e_flow_meter_aso_obj; struct mlx5e_flow_meters; struct mlx5_flow_attr; @@ -30,11 +31,15 @@ struct mlx5e_flow_meter_handle { int refcnt; struct hlist_node hlist; struct mlx5e_flow_meter_params params; + + struct mlx5_fc *green_counter; + struct mlx5_fc *red_counter; }; struct mlx5e_meter_attr { struct mlx5e_flow_meter_params params; struct mlx5e_flow_meter_handle *meter; + struct mlx5e_post_meter_priv *post_meter; }; int @@ -46,9 +51,14 @@ struct mlx5e_flow_meter_handle * mlx5e_tc_meter_get(struct mlx5_core_dev *mdev, struct mlx5e_flow_meter_params *params); void mlx5e_tc_meter_put(struct mlx5e_flow_meter_handle *meter); +int +mlx5e_tc_meter_update(struct mlx5e_flow_meter_handle *meter, + struct mlx5e_flow_meter_params *params); +struct mlx5e_flow_meter_handle * +mlx5e_tc_meter_replace(struct mlx5_core_dev *mdev, struct mlx5e_flow_meter_params *params); -struct mlx5_flow_table * -mlx5e_tc_meter_get_post_meter_ft(struct mlx5e_flow_meters *flow_meters); +enum mlx5_flow_namespace_type +mlx5e_tc_meter_get_namespace(struct mlx5e_flow_meters *flow_meters); struct mlx5e_flow_meters * mlx5e_flow_meters_init(struct mlx5e_priv *priv, @@ -57,4 +67,8 @@ mlx5e_flow_meters_init(struct mlx5e_priv *priv, void mlx5e_flow_meters_cleanup(struct mlx5e_flow_meters *flow_meters); +void +mlx5e_tc_meter_get_stats(struct mlx5e_flow_meter_handle *meter, + u64 *bytes, u64 *packets, u64 *drops, u64 *lastuse); + #endif /* __MLX5_EN_FLOW_METER_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c index efa20356764e..8b77e822810e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.c @@ -84,9 +84,11 @@ mlx5e_post_meter_fg_create(struct mlx5e_priv *priv, static int mlx5e_post_meter_rules_create(struct mlx5e_priv *priv, struct mlx5e_post_meter_priv *post_meter, - struct mlx5e_post_act *post_act) + struct mlx5e_post_act *post_act, + struct mlx5_fc *green_counter, + struct mlx5_fc *red_counter) { - struct mlx5_flow_destination dest = {}; + struct mlx5_flow_destination dest[2] = {}; struct mlx5_flow_act flow_act = {}; struct mlx5_flow_handle *rule; struct mlx5_flow_spec *spec; @@ -98,10 +100,13 @@ mlx5e_post_meter_rules_create(struct mlx5e_priv *priv, mlx5e_tc_match_to_reg_match(spec, PACKET_COLOR_TO_REG, MLX5_FLOW_METER_COLOR_RED, MLX5_PACKET_COLOR_MASK); - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP | + MLX5_FLOW_CONTEXT_ACTION_COUNT; flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; + dest[0].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest[0].counter_id = mlx5_fc_id(red_counter); - rule = mlx5_add_flow_rules(post_meter->ft, spec, &flow_act, NULL, 0); + rule = mlx5_add_flow_rules(post_meter->ft, spec, &flow_act, dest, 1); if (IS_ERR(rule)) { mlx5_core_warn(priv->mdev, "Failed to create post_meter flow drop rule\n"); err = PTR_ERR(rule); @@ -111,11 +116,14 @@ mlx5e_post_meter_rules_create(struct mlx5e_priv *priv, mlx5e_tc_match_to_reg_match(spec, PACKET_COLOR_TO_REG, MLX5_FLOW_METER_COLOR_GREEN, MLX5_PACKET_COLOR_MASK); - flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest.ft = mlx5e_tc_post_act_get_ft(post_act); - - rule = mlx5_add_flow_rules(post_meter->ft, spec, &flow_act, &dest, 1); + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; + dest[0].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest[0].ft = mlx5e_tc_post_act_get_ft(post_act); + dest[1].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dest[1].counter_id = mlx5_fc_id(green_counter); + + rule = mlx5_add_flow_rules(post_meter->ft, spec, &flow_act, dest, 2); if (IS_ERR(rule)) { mlx5_core_warn(priv->mdev, "Failed to create post_meter flow fwd rule\n"); err = PTR_ERR(rule); @@ -155,7 +163,9 @@ mlx5e_post_meter_table_destroy(struct mlx5e_post_meter_priv *post_meter) struct mlx5e_post_meter_priv * mlx5e_post_meter_init(struct mlx5e_priv *priv, enum mlx5_flow_namespace_type ns_type, - struct mlx5e_post_act *post_act) + struct mlx5e_post_act *post_act, + struct mlx5_fc *green_counter, + struct mlx5_fc *red_counter) { struct mlx5e_post_meter_priv *post_meter; int err; @@ -172,7 +182,8 @@ mlx5e_post_meter_init(struct mlx5e_priv *priv, if (err) goto err_fg; - err = mlx5e_post_meter_rules_create(priv, post_meter, post_act); + err = mlx5e_post_meter_rules_create(priv, post_meter, post_act, green_counter, + red_counter); if (err) goto err_rules; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h index c74f3cbd810d..34d0e4b9fc7a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_meter.h @@ -20,7 +20,9 @@ mlx5e_post_meter_get_ft(struct mlx5e_post_meter_priv *post_meter); struct mlx5e_post_meter_priv * mlx5e_post_meter_init(struct mlx5e_priv *priv, enum mlx5_flow_namespace_type ns_type, - struct mlx5e_post_act *post_act); + struct mlx5e_post_act *post_act, + struct mlx5_fc *green_counter, + struct mlx5_fc *red_counter); void mlx5e_post_meter_cleanup(struct mlx5e_post_meter_priv *post_meter); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h index d2bdfd6872bc..10c9a8a79d00 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h @@ -11,7 +11,6 @@ #define MLX5E_TC_MAX_SPLITS 1 -#define mlx5e_nic_chains(priv) ((priv)->fs.tc.chains) enum { MLX5E_TC_FLOW_FLAG_INGRESS = MLX5E_TC_FLAG_INGRESS_BIT, @@ -44,6 +43,8 @@ struct mlx5e_tc_flow_parse_attr { struct mlx5e_tc_act_parse_state parse_state; }; +struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc); + /* Helper struct for accessing a struct containing list_head array. * Containing struct * |- Helper array diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c index 3ae6067c7e6b..20a4f1e585af 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c @@ -86,7 +86,7 @@ struct mlx5_flow_handle *mlx5e_accel_fs_add_sk(struct mlx5e_priv *priv, if (!spec) return ERR_PTR(-ENOMEM); - fs_tcp = priv->fs.accel_tcp; + fs_tcp = priv->fs->accel_tcp; spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; @@ -158,10 +158,10 @@ static int accel_fs_tcp_add_default_rule(struct mlx5e_priv *priv, struct mlx5_flow_handle *rule; int err = 0; - fs_tcp = priv->fs.accel_tcp; + fs_tcp = priv->fs->accel_tcp; accel_fs_t = &fs_tcp->tables[type]; - dest = mlx5_ttc_get_default_dest(priv->fs.ttc, fs_accel2tt(type)); + dest = mlx5_ttc_get_default_dest(priv->fs->ttc, fs_accel2tt(type)); rule = mlx5_add_flow_rules(accel_fs_t->t, NULL, &flow_act, &dest, 1); if (IS_ERR(rule)) { err = PTR_ERR(rule); @@ -267,7 +267,7 @@ out: static int accel_fs_tcp_create_table(struct mlx5e_priv *priv, enum accel_fs_tcp_type type) { - struct mlx5e_flow_table *ft = &priv->fs.accel_tcp->tables[type]; + struct mlx5e_flow_table *ft = &priv->fs->accel_tcp->tables[type]; struct mlx5_flow_table_attr ft_attr = {}; int err; @@ -277,7 +277,7 @@ static int accel_fs_tcp_create_table(struct mlx5e_priv *priv, enum accel_fs_tcp_ ft_attr.level = MLX5E_ACCEL_FS_TCP_FT_LEVEL; ft_attr.prio = MLX5E_NIC_PRIO; - ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr); + ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr); if (IS_ERR(ft->t)) { err = PTR_ERR(ft->t); ft->t = NULL; @@ -307,7 +307,7 @@ static int accel_fs_tcp_disable(struct mlx5e_priv *priv) for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) { /* Modify ttc rules destination to point back to the indir TIRs */ - err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_accel2tt(i)); + err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_accel2tt(i)); if (err) { netdev_err(priv->netdev, "%s: modify ttc[%d] default destination failed, err(%d)\n", @@ -326,10 +326,10 @@ static int accel_fs_tcp_enable(struct mlx5e_priv *priv) dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) { - dest.ft = priv->fs.accel_tcp->tables[i].t; + dest.ft = priv->fs->accel_tcp->tables[i].t; /* Modify ttc rules destination to point on the accel_fs FTs */ - err = mlx5_ttc_fwd_dest(priv->fs.ttc, fs_accel2tt(i), &dest); + err = mlx5_ttc_fwd_dest(priv->fs->ttc, fs_accel2tt(i), &dest); if (err) { netdev_err(priv->netdev, "%s: modify ttc[%d] destination to accel failed, err(%d)\n", @@ -344,7 +344,7 @@ static void accel_fs_tcp_destroy_table(struct mlx5e_priv *priv, int i) { struct mlx5e_accel_fs_tcp *fs_tcp; - fs_tcp = priv->fs.accel_tcp; + fs_tcp = priv->fs->accel_tcp; if (IS_ERR_OR_NULL(fs_tcp->tables[i].t)) return; @@ -357,7 +357,7 @@ void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv) { int i; - if (!priv->fs.accel_tcp) + if (!priv->fs->accel_tcp) return; accel_fs_tcp_disable(priv); @@ -365,8 +365,8 @@ void mlx5e_accel_fs_tcp_destroy(struct mlx5e_priv *priv) for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) accel_fs_tcp_destroy_table(priv, i); - kfree(priv->fs.accel_tcp); - priv->fs.accel_tcp = NULL; + kfree(priv->fs->accel_tcp); + priv->fs->accel_tcp = NULL; } int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv) @@ -376,8 +376,8 @@ int mlx5e_accel_fs_tcp_create(struct mlx5e_priv *priv) if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version)) return -EOPNOTSUPP; - priv->fs.accel_tcp = kzalloc(sizeof(*priv->fs.accel_tcp), GFP_KERNEL); - if (!priv->fs.accel_tcp) + priv->fs->accel_tcp = kzalloc(sizeof(*priv->fs->accel_tcp), GFP_KERNEL); + if (!priv->fs->accel_tcp) return -ENOMEM; for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) { @@ -396,7 +396,7 @@ err_destroy_tables: while (--i >= 0) accel_fs_tcp_destroy_table(priv, i); - kfree(priv->fs.accel_tcp); - priv->fs.accel_tcp = NULL; + kfree(priv->fs->accel_tcp); + priv->fs->accel_tcp = NULL; return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c index 8315e8f603d7..f8113fd23265 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -184,13 +184,13 @@ static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type) fs_prot = &accel_esp->fs_prot[type]; fs_prot->default_dest = - mlx5_ttc_get_default_dest(priv->fs.ttc, fs_esp2tt(type)); + mlx5_ttc_get_default_dest(priv->fs->ttc, fs_esp2tt(type)); ft_attr.max_fte = 1; ft_attr.autogroup.max_num_groups = 1; ft_attr.level = MLX5E_ACCEL_FS_ESP_FT_ERR_LEVEL; ft_attr.prio = MLX5E_NIC_PRIO; - ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr); + ft = mlx5_create_auto_grouped_flow_table(priv->fs->ns, &ft_attr); if (IS_ERR(ft)) return PTR_ERR(ft); @@ -205,7 +205,7 @@ static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type) ft_attr.prio = MLX5E_NIC_PRIO; ft_attr.autogroup.num_reserved_entries = 1; ft_attr.autogroup.max_num_groups = 1; - ft = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr); + ft = mlx5_create_auto_grouped_flow_table(priv->fs->ns, &ft_attr); if (IS_ERR(ft)) { err = PTR_ERR(ft); goto err_fs_ft; @@ -249,7 +249,7 @@ static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type) /* connect */ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest.ft = fs_prot->ft; - mlx5_ttc_fwd_dest(priv->fs.ttc, fs_esp2tt(type), &dest); + mlx5_ttc_fwd_dest(priv->fs->ttc, fs_esp2tt(type), &dest); skip: fs_prot->refcnt++; @@ -271,7 +271,7 @@ static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type) goto out; /* disconnect */ - mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_esp2tt(type)); + mlx5_ttc_fwd_default_dest(priv->fs->ttc, fs_esp2tt(type)); /* remove FT */ rx_destroy(priv, type); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index 49cca6bd49a1..cd7f245dcf14 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -120,7 +120,7 @@ static int arfs_disable(struct mlx5e_priv *priv) for (i = 0; i < ARFS_NUM_TYPES; i++) { /* Modify ttc rules destination back to their default */ - err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, arfs_get_tt(i)); + err = mlx5_ttc_fwd_default_dest(priv->fs->ttc, arfs_get_tt(i)); if (err) { netdev_err(priv->netdev, "%s: modify ttc[%d] default destination failed, err(%d)\n", @@ -147,9 +147,9 @@ int mlx5e_arfs_enable(struct mlx5e_priv *priv) dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; for (i = 0; i < ARFS_NUM_TYPES; i++) { - dest.ft = priv->fs.arfs->arfs_tables[i].ft.t; + dest.ft = priv->fs->arfs->arfs_tables[i].ft.t; /* Modify ttc rules destination to point on the aRFS FTs */ - err = mlx5_ttc_fwd_dest(priv->fs.ttc, arfs_get_tt(i), &dest); + err = mlx5_ttc_fwd_dest(priv->fs->ttc, arfs_get_tt(i), &dest); if (err) { netdev_err(priv->netdev, "%s: modify ttc[%d] dest to arfs, failed err(%d)\n", @@ -172,10 +172,10 @@ static void _mlx5e_cleanup_tables(struct mlx5e_priv *priv) int i; arfs_del_rules(priv); - destroy_workqueue(priv->fs.arfs->wq); + destroy_workqueue(priv->fs->arfs->wq); for (i = 0; i < ARFS_NUM_TYPES; i++) { - if (!IS_ERR_OR_NULL(priv->fs.arfs->arfs_tables[i].ft.t)) - arfs_destroy_table(&priv->fs.arfs->arfs_tables[i]); + if (!IS_ERR_OR_NULL(priv->fs->arfs->arfs_tables[i].ft.t)) + arfs_destroy_table(&priv->fs->arfs->arfs_tables[i]); } } @@ -185,13 +185,13 @@ void mlx5e_arfs_destroy_tables(struct mlx5e_priv *priv) return; _mlx5e_cleanup_tables(priv); - kvfree(priv->fs.arfs); + kvfree(priv->fs->arfs); } static int arfs_add_default_rule(struct mlx5e_priv *priv, enum arfs_type type) { - struct arfs_table *arfs_t = &priv->fs.arfs->arfs_tables[type]; + struct arfs_table *arfs_t = &priv->fs->arfs->arfs_tables[type]; struct mlx5_flow_destination dest = {}; MLX5_DECLARE_FLOW_ACT(flow_act); enum mlx5_traffic_types tt; @@ -321,7 +321,7 @@ out: static int arfs_create_table(struct mlx5e_priv *priv, enum arfs_type type) { - struct mlx5e_arfs_tables *arfs = priv->fs.arfs; + struct mlx5e_arfs_tables *arfs = priv->fs->arfs; struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft; struct mlx5_flow_table_attr ft_attr = {}; int err; @@ -332,7 +332,7 @@ static int arfs_create_table(struct mlx5e_priv *priv, ft_attr.level = MLX5E_ARFS_FT_LEVEL; ft_attr.prio = MLX5E_NIC_PRIO; - ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr); + ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr); if (IS_ERR(ft->t)) { err = PTR_ERR(ft->t); ft->t = NULL; @@ -361,14 +361,14 @@ int mlx5e_arfs_create_tables(struct mlx5e_priv *priv) if (!(priv->netdev->hw_features & NETIF_F_NTUPLE)) return 0; - priv->fs.arfs = kvzalloc(sizeof(*priv->fs.arfs), GFP_KERNEL); - if (!priv->fs.arfs) + priv->fs->arfs = kvzalloc(sizeof(*priv->fs->arfs), GFP_KERNEL); + if (!priv->fs->arfs) return -ENOMEM; - spin_lock_init(&priv->fs.arfs->arfs_lock); - INIT_LIST_HEAD(&priv->fs.arfs->rules); - priv->fs.arfs->wq = create_singlethread_workqueue("mlx5e_arfs"); - if (!priv->fs.arfs->wq) + spin_lock_init(&priv->fs->arfs->arfs_lock); + INIT_LIST_HEAD(&priv->fs->arfs->rules); + priv->fs->arfs->wq = create_singlethread_workqueue("mlx5e_arfs"); + if (!priv->fs->arfs->wq) goto err; for (i = 0; i < ARFS_NUM_TYPES; i++) { @@ -381,7 +381,7 @@ int mlx5e_arfs_create_tables(struct mlx5e_priv *priv) err_des: _mlx5e_cleanup_tables(priv); err: - kvfree(priv->fs.arfs); + kvfree(priv->fs->arfs); return err; } @@ -396,8 +396,8 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv) int i; int j; - spin_lock_bh(&priv->fs.arfs->arfs_lock); - mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs->arfs_tables, i, j) { + spin_lock_bh(&priv->fs->arfs->arfs_lock); + mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs->arfs->arfs_tables, i, j) { if (!work_pending(&arfs_rule->arfs_work) && rps_may_expire_flow(priv->netdev, arfs_rule->rxq, arfs_rule->flow_id, @@ -408,7 +408,7 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv) break; } } - spin_unlock_bh(&priv->fs.arfs->arfs_lock); + spin_unlock_bh(&priv->fs->arfs->arfs_lock); hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) { if (arfs_rule->rule) mlx5_del_flow_rules(arfs_rule->rule); @@ -425,12 +425,12 @@ static void arfs_del_rules(struct mlx5e_priv *priv) int i; int j; - spin_lock_bh(&priv->fs.arfs->arfs_lock); - mlx5e_for_each_arfs_rule(rule, htmp, priv->fs.arfs->arfs_tables, i, j) { + spin_lock_bh(&priv->fs->arfs->arfs_lock); + mlx5e_for_each_arfs_rule(rule, htmp, priv->fs->arfs->arfs_tables, i, j) { hlist_del_init(&rule->hlist); hlist_add_head(&rule->hlist, &del_list); } - spin_unlock_bh(&priv->fs.arfs->arfs_lock); + spin_unlock_bh(&priv->fs->arfs->arfs_lock); hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) { cancel_work_sync(&rule->arfs_work); @@ -474,7 +474,7 @@ static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs, static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv, struct arfs_rule *arfs_rule) { - struct mlx5e_arfs_tables *arfs = priv->fs.arfs; + struct mlx5e_arfs_tables *arfs = priv->fs->arfs; struct arfs_tuple *tuple = &arfs_rule->tuple; struct mlx5_flow_handle *rule = NULL; struct mlx5_flow_destination dest = {}; @@ -592,9 +592,9 @@ static void arfs_handle_work(struct work_struct *work) mutex_lock(&priv->state_lock); if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { - spin_lock_bh(&priv->fs.arfs->arfs_lock); + spin_lock_bh(&priv->fs->arfs->arfs_lock); hlist_del(&arfs_rule->hlist); - spin_unlock_bh(&priv->fs.arfs->arfs_lock); + spin_unlock_bh(&priv->fs->arfs->arfs_lock); mutex_unlock(&priv->state_lock); kfree(arfs_rule); @@ -647,7 +647,7 @@ static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv, tuple->dst_port = fk->ports.dst; rule->flow_id = flow_id; - rule->filter_id = priv->fs.arfs->last_filter_id++ % RPS_NO_FILTER; + rule->filter_id = priv->fs->arfs->last_filter_id++ % RPS_NO_FILTER; hlist_add_head(&rule->hlist, arfs_hash_bucket(arfs_t, tuple->src_port, @@ -691,7 +691,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, u16 rxq_index, u32 flow_id) { struct mlx5e_priv *priv = netdev_priv(dev); - struct mlx5e_arfs_tables *arfs = priv->fs.arfs; + struct mlx5e_arfs_tables *arfs = priv->fs->arfs; struct arfs_table *arfs_t; struct arfs_rule *arfs_rule; struct flow_keys fk; @@ -725,7 +725,7 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, return -ENOMEM; } } - queue_work(priv->fs.arfs->wq, &arfs_rule->arfs_work); + queue_work(priv->fs->arfs->wq, &arfs_rule->arfs_work); spin_unlock_bh(&arfs->arfs_lock); return arfs_rule->filter_id; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index d2f0773f95c6..e2a9b9be5c1f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -37,13 +37,13 @@ #include <linux/mlx5/fs.h> #include <linux/mlx5/mpfs.h> #include "en.h" -#include "en_rep.h" +#include "en_tc.h" #include "lib/mpfs.h" #include "en/ptp.h" -static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, +static int mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering *fs, struct mlx5e_l2_rule *ai, int type); -static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv, +static void mlx5e_del_l2_flow_rule(struct mlx5e_flow_steering *fs, struct mlx5e_l2_rule *ai); enum { @@ -132,9 +132,8 @@ struct mlx5_flow_table *mlx5e_vlan_get_flowtable(struct mlx5e_vlan_table *vlan) return vlan->ft.t; } -static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv) +static int mlx5e_vport_context_update_vlans(struct mlx5e_flow_steering *fs) { - struct net_device *ndev = priv->netdev; int max_list_size; int list_size; u16 *vlans; @@ -143,15 +142,15 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv) int i; list_size = 0; - for_each_set_bit(vlan, priv->fs.vlan->active_cvlans, VLAN_N_VID) + for_each_set_bit(vlan, fs->vlan->active_cvlans, VLAN_N_VID) list_size++; - max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list); + max_list_size = 1 << MLX5_CAP_GEN(fs->mdev, log_max_vlan_list); if (list_size > max_list_size) { - netdev_warn(ndev, - "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n", - list_size, max_list_size); + mlx5_core_warn(fs->mdev, + "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n", + list_size, max_list_size); list_size = max_list_size; } @@ -160,16 +159,16 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv) return -ENOMEM; i = 0; - for_each_set_bit(vlan, priv->fs.vlan->active_cvlans, VLAN_N_VID) { + for_each_set_bit(vlan, fs->vlan->active_cvlans, VLAN_N_VID) { if (i >= list_size) break; vlans[i++] = vlan; } - err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size); + err = mlx5_modify_nic_vport_vlans(fs->mdev, vlans, list_size); if (err) - netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n", - err); + mlx5_core_err(fs->mdev, "Failed to modify vport vlans list err(%d)\n", + err); kvfree(vlans); return err; @@ -183,18 +182,18 @@ enum mlx5e_vlan_rule_type { MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, }; -static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, +static int __mlx5e_add_vlan_rule(struct mlx5e_flow_steering *fs, enum mlx5e_vlan_rule_type rule_type, u16 vid, struct mlx5_flow_spec *spec) { - struct mlx5_flow_table *ft = priv->fs.vlan->ft.t; + struct mlx5_flow_table *ft = fs->vlan->ft.t; struct mlx5_flow_destination dest = {}; struct mlx5_flow_handle **rule_p; MLX5_DECLARE_FLOW_ACT(flow_act); int err = 0; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest.ft = priv->fs.l2.ft.t; + dest.ft = fs->l2.ft.t; spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; @@ -204,24 +203,24 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, * disabled in match value means both S & C tags * don't exist (untagged of both) */ - rule_p = &priv->fs.vlan->untagged_rule; + rule_p = &fs->vlan->untagged_rule; MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); break; case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID: - rule_p = &priv->fs.vlan->any_cvlan_rule; + rule_p = &fs->vlan->any_cvlan_rule; MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1); break; case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID: - rule_p = &priv->fs.vlan->any_svlan_rule; + rule_p = &fs->vlan->any_svlan_rule; MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.svlan_tag); MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1); break; case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID: - rule_p = &priv->fs.vlan->active_svlans_rule[vid]; + rule_p = &fs->vlan->active_svlans_rule[vid]; MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.svlan_tag); MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1); @@ -231,7 +230,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, vid); break; default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */ - rule_p = &priv->fs.vlan->active_cvlans_rule[vid]; + rule_p = &fs->vlan->active_cvlans_rule[vid]; MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1); @@ -250,13 +249,13 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv, if (IS_ERR(*rule_p)) { err = PTR_ERR(*rule_p); *rule_p = NULL; - netdev_err(priv->netdev, "%s: add rule failed\n", __func__); + mlx5_core_err(fs->mdev, "%s: add rule failed\n", __func__); } return err; } -static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv, +static int mlx5e_add_vlan_rule(struct mlx5e_flow_steering *fs, enum mlx5e_vlan_rule_type rule_type, u16 vid) { struct mlx5_flow_spec *spec; @@ -267,68 +266,68 @@ static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv, return -ENOMEM; if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID) - mlx5e_vport_context_update_vlans(priv); + mlx5e_vport_context_update_vlans(fs); - err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec); + err = __mlx5e_add_vlan_rule(fs, rule_type, vid, spec); kvfree(spec); return err; } -static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv, - enum mlx5e_vlan_rule_type rule_type, u16 vid) +static void mlx5e_fs_del_vlan_rule(struct mlx5e_flow_steering *fs, + enum mlx5e_vlan_rule_type rule_type, u16 vid) { switch (rule_type) { case MLX5E_VLAN_RULE_TYPE_UNTAGGED: - if (priv->fs.vlan->untagged_rule) { - mlx5_del_flow_rules(priv->fs.vlan->untagged_rule); - priv->fs.vlan->untagged_rule = NULL; + if (fs->vlan->untagged_rule) { + mlx5_del_flow_rules(fs->vlan->untagged_rule); + fs->vlan->untagged_rule = NULL; } break; case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID: - if (priv->fs.vlan->any_cvlan_rule) { - mlx5_del_flow_rules(priv->fs.vlan->any_cvlan_rule); - priv->fs.vlan->any_cvlan_rule = NULL; + if (fs->vlan->any_cvlan_rule) { + mlx5_del_flow_rules(fs->vlan->any_cvlan_rule); + fs->vlan->any_cvlan_rule = NULL; } break; case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID: - if (priv->fs.vlan->any_svlan_rule) { - mlx5_del_flow_rules(priv->fs.vlan->any_svlan_rule); - priv->fs.vlan->any_svlan_rule = NULL; + if (fs->vlan->any_svlan_rule) { + mlx5_del_flow_rules(fs->vlan->any_svlan_rule); + fs->vlan->any_svlan_rule = NULL; } break; case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID: - if (priv->fs.vlan->active_svlans_rule[vid]) { - mlx5_del_flow_rules(priv->fs.vlan->active_svlans_rule[vid]); - priv->fs.vlan->active_svlans_rule[vid] = NULL; + if (fs->vlan->active_svlans_rule[vid]) { + mlx5_del_flow_rules(fs->vlan->active_svlans_rule[vid]); + fs->vlan->active_svlans_rule[vid] = NULL; } break; case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID: - if (priv->fs.vlan->active_cvlans_rule[vid]) { - mlx5_del_flow_rules(priv->fs.vlan->active_cvlans_rule[vid]); - priv->fs.vlan->active_cvlans_rule[vid] = NULL; + if (fs->vlan->active_cvlans_rule[vid]) { + mlx5_del_flow_rules(fs->vlan->active_cvlans_rule[vid]); + fs->vlan->active_cvlans_rule[vid] = NULL; } - mlx5e_vport_context_update_vlans(priv); + mlx5e_vport_context_update_vlans(fs); break; } } -static void mlx5e_del_any_vid_rules(struct mlx5e_priv *priv) +static void mlx5e_fs_del_any_vid_rules(struct mlx5e_flow_steering *fs) { - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0); + mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); + mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0); } -static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv) +static int mlx5e_fs_add_any_vid_rules(struct mlx5e_flow_steering *fs) { int err; - err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); + err = mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); if (err) return err; - return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0); + return mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0); } static struct mlx5_flow_handle * @@ -354,101 +353,101 @@ mlx5e_add_trap_rule(struct mlx5_flow_table *ft, int trap_id, int tir_num) int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num) { - struct mlx5_flow_table *ft = priv->fs.vlan->ft.t; + struct mlx5_flow_table *ft = priv->fs->vlan->ft.t; struct mlx5_flow_handle *rule; int err; rule = mlx5e_add_trap_rule(ft, trap_id, tir_num); if (IS_ERR(rule)) { err = PTR_ERR(rule); - priv->fs.vlan->trap_rule = NULL; - netdev_err(priv->netdev, "%s: add VLAN trap rule failed, err %d\n", - __func__, err); + priv->fs->vlan->trap_rule = NULL; + mlx5_core_err(priv->fs->mdev, "%s: add VLAN trap rule failed, err %d\n", + __func__, err); return err; } - priv->fs.vlan->trap_rule = rule; + priv->fs->vlan->trap_rule = rule; return 0; } void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv) { - if (priv->fs.vlan->trap_rule) { - mlx5_del_flow_rules(priv->fs.vlan->trap_rule); - priv->fs.vlan->trap_rule = NULL; + if (priv->fs->vlan->trap_rule) { + mlx5_del_flow_rules(priv->fs->vlan->trap_rule); + priv->fs->vlan->trap_rule = NULL; } } int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num) { - struct mlx5_flow_table *ft = priv->fs.l2.ft.t; + struct mlx5_flow_table *ft = priv->fs->l2.ft.t; struct mlx5_flow_handle *rule; int err; rule = mlx5e_add_trap_rule(ft, trap_id, tir_num); if (IS_ERR(rule)) { err = PTR_ERR(rule); - priv->fs.l2.trap_rule = NULL; - netdev_err(priv->netdev, "%s: add MAC trap rule failed, err %d\n", - __func__, err); + priv->fs->l2.trap_rule = NULL; + mlx5_core_err(priv->fs->mdev, "%s: add MAC trap rule failed, err %d\n", + __func__, err); return err; } - priv->fs.l2.trap_rule = rule; + priv->fs->l2.trap_rule = rule; return 0; } void mlx5e_remove_mac_trap(struct mlx5e_priv *priv) { - if (priv->fs.l2.trap_rule) { - mlx5_del_flow_rules(priv->fs.l2.trap_rule); - priv->fs.l2.trap_rule = NULL; + if (priv->fs->l2.trap_rule) { + mlx5_del_flow_rules(priv->fs->l2.trap_rule); + priv->fs->l2.trap_rule = NULL; } } void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv) { - if (!priv->fs.vlan->cvlan_filter_disabled) + if (!priv->fs->vlan->cvlan_filter_disabled) return; - priv->fs.vlan->cvlan_filter_disabled = false; + priv->fs->vlan->cvlan_filter_disabled = false; if (priv->netdev->flags & IFF_PROMISC) return; - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); + mlx5e_fs_del_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); } void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv) { - if (priv->fs.vlan->cvlan_filter_disabled) + if (priv->fs->vlan->cvlan_filter_disabled) return; - priv->fs.vlan->cvlan_filter_disabled = true; + priv->fs->vlan->cvlan_filter_disabled = true; if (priv->netdev->flags & IFF_PROMISC) return; - mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); + mlx5e_add_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0); } -static int mlx5e_vlan_rx_add_cvid(struct mlx5e_priv *priv, u16 vid) +static int mlx5e_vlan_rx_add_cvid(struct mlx5e_flow_steering *fs, u16 vid) { int err; - set_bit(vid, priv->fs.vlan->active_cvlans); + set_bit(vid, fs->vlan->active_cvlans); - err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid); + err = mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid); if (err) - clear_bit(vid, priv->fs.vlan->active_cvlans); + clear_bit(vid, fs->vlan->active_cvlans); return err; } -static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv *priv, u16 vid) +static int mlx5e_vlan_rx_add_svid(struct mlx5e_flow_steering *fs, + struct net_device *netdev, u16 vid) { - struct net_device *netdev = priv->netdev; int err; - set_bit(vid, priv->fs.vlan->active_svlans); + set_bit(vid, fs->vlan->active_svlans); - err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid); + err = mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid); if (err) { - clear_bit(vid, priv->fs.vlan->active_svlans); + clear_bit(vid, fs->vlan->active_svlans); return err; } @@ -457,86 +456,91 @@ static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv *priv, u16 vid) return err; } -int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) +int mlx5e_fs_vlan_rx_add_vid(struct mlx5e_flow_steering *fs, + struct net_device *netdev, + __be16 proto, u16 vid) { - struct mlx5e_priv *priv = netdev_priv(dev); - if (mlx5e_is_uplink_rep(priv)) - return 0; /* no vlan table for uplink rep */ + if (!fs->vlan) { + mlx5_core_err(fs->mdev, "Vlan doesn't exist\n"); + return -EINVAL; + } if (be16_to_cpu(proto) == ETH_P_8021Q) - return mlx5e_vlan_rx_add_cvid(priv, vid); + return mlx5e_vlan_rx_add_cvid(fs, vid); else if (be16_to_cpu(proto) == ETH_P_8021AD) - return mlx5e_vlan_rx_add_svid(priv, vid); + return mlx5e_vlan_rx_add_svid(fs, netdev, vid); return -EOPNOTSUPP; } -int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) +int mlx5e_fs_vlan_rx_kill_vid(struct mlx5e_flow_steering *fs, + struct net_device *netdev, + __be16 proto, u16 vid) { - struct mlx5e_priv *priv = netdev_priv(dev); - - if (mlx5e_is_uplink_rep(priv)) - return 0; /* no vlan table for uplink rep */ + if (!fs->vlan) { + mlx5_core_err(fs->mdev, "Vlan doesn't exist\n"); + return -EINVAL; + } if (be16_to_cpu(proto) == ETH_P_8021Q) { - clear_bit(vid, priv->fs.vlan->active_cvlans); - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid); + clear_bit(vid, fs->vlan->active_cvlans); + mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid); } else if (be16_to_cpu(proto) == ETH_P_8021AD) { - clear_bit(vid, priv->fs.vlan->active_svlans); - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid); - netdev_update_features(dev); + clear_bit(vid, fs->vlan->active_svlans); + mlx5e_fs_del_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid); + netdev_update_features(netdev); } return 0; } -static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv) +static void mlx5e_fs_add_vlan_rules(struct mlx5e_flow_steering *fs) { int i; - mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); + mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); - for_each_set_bit(i, priv->fs.vlan->active_cvlans, VLAN_N_VID) { - mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i); + for_each_set_bit(i, fs->vlan->active_cvlans, VLAN_N_VID) { + mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i); } - for_each_set_bit(i, priv->fs.vlan->active_svlans, VLAN_N_VID) - mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i); + for_each_set_bit(i, fs->vlan->active_svlans, VLAN_N_VID) + mlx5e_add_vlan_rule(fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i); - if (priv->fs.vlan->cvlan_filter_disabled) - mlx5e_add_any_vid_rules(priv); + if (fs->vlan->cvlan_filter_disabled) + mlx5e_fs_add_any_vid_rules(fs); } static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv) { int i; - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); + mlx5e_fs_del_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0); - for_each_set_bit(i, priv->fs.vlan->active_cvlans, VLAN_N_VID) { - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i); + for_each_set_bit(i, priv->fs->vlan->active_cvlans, VLAN_N_VID) { + mlx5e_fs_del_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i); } - for_each_set_bit(i, priv->fs.vlan->active_svlans, VLAN_N_VID) - mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i); + for_each_set_bit(i, priv->fs->vlan->active_svlans, VLAN_N_VID) + mlx5e_fs_del_vlan_rule(priv->fs, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i); - WARN_ON_ONCE(!(test_bit(MLX5E_STATE_DESTROYING, &priv->state))); + WARN_ON_ONCE(priv->fs->state_destroy); mlx5e_remove_vlan_trap(priv); /* must be called after DESTROY bit is set and * set_rx_mode is called and flushed */ - if (priv->fs.vlan->cvlan_filter_disabled) - mlx5e_del_any_vid_rules(priv); + if (priv->fs->vlan->cvlan_filter_disabled) + mlx5e_fs_del_any_vid_rules(priv->fs); } #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \ for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \ hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist) -static void mlx5e_execute_l2_action(struct mlx5e_priv *priv, +static void mlx5e_execute_l2_action(struct mlx5e_flow_steering *fs, struct mlx5e_l2_hash_node *hn) { u8 action = hn->action; @@ -547,9 +551,9 @@ static void mlx5e_execute_l2_action(struct mlx5e_priv *priv, switch (action) { case MLX5E_ACTION_ADD: - mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH); + mlx5e_add_l2_flow_rule(fs, &hn->ai, MLX5E_FULLMATCH); if (!is_multicast_ether_addr(mac_addr)) { - l2_err = mlx5_mpfs_add_mac(priv->mdev, mac_addr); + l2_err = mlx5_mpfs_add_mac(fs->mdev, mac_addr); hn->mpfs = !l2_err; } hn->action = MLX5E_ACTION_NONE; @@ -557,52 +561,50 @@ static void mlx5e_execute_l2_action(struct mlx5e_priv *priv, case MLX5E_ACTION_DEL: if (!is_multicast_ether_addr(mac_addr) && hn->mpfs) - l2_err = mlx5_mpfs_del_mac(priv->mdev, mac_addr); - mlx5e_del_l2_flow_rule(priv, &hn->ai); + l2_err = mlx5_mpfs_del_mac(fs->mdev, mac_addr); + mlx5e_del_l2_flow_rule(fs, &hn->ai); mlx5e_del_l2_from_hash(hn); break; } if (l2_err) - netdev_warn(priv->netdev, "MPFS, failed to %s mac %pM, err(%d)\n", - action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err); + mlx5_core_warn(fs->mdev, "MPFS, failed to %s mac %pM, err(%d)\n", + action == MLX5E_ACTION_ADD ? "add" : "del", mac_addr, l2_err); } -static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv) +static void mlx5e_sync_netdev_addr(struct mlx5e_flow_steering *fs, + struct net_device *netdev) { - struct net_device *netdev = priv->netdev; struct netdev_hw_addr *ha; netif_addr_lock_bh(netdev); - mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, - priv->netdev->dev_addr); - + mlx5e_add_l2_to_hash(fs->l2.netdev_uc, netdev->dev_addr); netdev_for_each_uc_addr(ha, netdev) - mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr); + mlx5e_add_l2_to_hash(fs->l2.netdev_uc, ha->addr); netdev_for_each_mc_addr(ha, netdev) - mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr); + mlx5e_add_l2_to_hash(fs->l2.netdev_mc, ha->addr); netif_addr_unlock_bh(netdev); } -static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type, +static void mlx5e_fill_addr_array(struct mlx5e_flow_steering *fs, int list_type, + struct net_device *ndev, u8 addr_array[][ETH_ALEN], int size) { bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC); - struct net_device *ndev = priv->netdev; struct mlx5e_l2_hash_node *hn; struct hlist_head *addr_list; struct hlist_node *tmp; int i = 0; int hi; - addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc; + addr_list = is_uc ? fs->l2.netdev_uc : fs->l2.netdev_mc; if (is_uc) /* Make sure our own address is pushed first */ ether_addr_copy(addr_array[i++], ndev->dev_addr); - else if (priv->fs.l2.broadcast_enabled) + else if (fs->l2.broadcast_enabled) ether_addr_copy(addr_array[i++], ndev->broadcast); mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) { @@ -614,7 +616,8 @@ static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type, } } -static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv, +static void mlx5e_vport_context_update_addr_list(struct mlx5e_flow_steering *fs, + struct net_device *netdev, int list_type) { bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC); @@ -627,19 +630,19 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv, int err; int hi; - size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0); + size = is_uc ? 0 : (fs->l2.broadcast_enabled ? 1 : 0); max_size = is_uc ? - 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) : - 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list); + 1 << MLX5_CAP_GEN(fs->mdev, log_max_current_uc_list) : + 1 << MLX5_CAP_GEN(fs->mdev, log_max_current_mc_list); - addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc; + addr_list = is_uc ? fs->l2.netdev_uc : fs->l2.netdev_mc; mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) size++; if (size > max_size) { - netdev_warn(priv->netdev, - "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n", - is_uc ? "UC" : "MC", size, max_size); + mlx5_core_warn(fs->mdev, + "mdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n", + is_uc ? "UC" : "MC", size, max_size); size = max_size; } @@ -649,65 +652,67 @@ static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv, err = -ENOMEM; goto out; } - mlx5e_fill_addr_array(priv, list_type, addr_array, size); + mlx5e_fill_addr_array(fs, list_type, netdev, addr_array, size); } - err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size); + err = mlx5_modify_nic_vport_mac_list(fs->mdev, list_type, addr_array, size); out: if (err) - netdev_err(priv->netdev, - "Failed to modify vport %s list err(%d)\n", - is_uc ? "UC" : "MC", err); + mlx5_core_err(fs->mdev, + "Failed to modify vport %s list err(%d)\n", + is_uc ? "UC" : "MC", err); kfree(addr_array); } -static void mlx5e_vport_context_update(struct mlx5e_priv *priv) +static void mlx5e_vport_context_update(struct mlx5e_flow_steering *fs, + struct net_device *netdev) { - struct mlx5e_l2_table *ea = &priv->fs.l2; + struct mlx5e_l2_table *ea = &fs->l2; - mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC); - mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC); - mlx5_modify_nic_vport_promisc(priv->mdev, 0, + mlx5e_vport_context_update_addr_list(fs, netdev, MLX5_NVPRT_LIST_TYPE_UC); + mlx5e_vport_context_update_addr_list(fs, netdev, MLX5_NVPRT_LIST_TYPE_MC); + mlx5_modify_nic_vport_promisc(fs->mdev, 0, ea->allmulti_enabled, ea->promisc_enabled); } -static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv) +static void mlx5e_apply_netdev_addr(struct mlx5e_flow_steering *fs) { struct mlx5e_l2_hash_node *hn; struct hlist_node *tmp; int i; - mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i) - mlx5e_execute_l2_action(priv, hn); + mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_uc, i) + mlx5e_execute_l2_action(fs, hn); - mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i) - mlx5e_execute_l2_action(priv, hn); + mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_mc, i) + mlx5e_execute_l2_action(fs, hn); } -static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv) +static void mlx5e_handle_netdev_addr(struct mlx5e_flow_steering *fs, + struct net_device *netdev) { struct mlx5e_l2_hash_node *hn; struct hlist_node *tmp; int i; - mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i) + mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_uc, i) hn->action = MLX5E_ACTION_DEL; - mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i) + mlx5e_for_each_hash_node(hn, tmp, fs->l2.netdev_mc, i) hn->action = MLX5E_ACTION_DEL; - if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state)) - mlx5e_sync_netdev_addr(priv); + if (fs->state_destroy) + mlx5e_sync_netdev_addr(fs, netdev); - mlx5e_apply_netdev_addr(priv); + mlx5e_apply_netdev_addr(fs); } #define MLX5E_PROMISC_GROUP0_SIZE BIT(0) #define MLX5E_PROMISC_TABLE_SIZE MLX5E_PROMISC_GROUP0_SIZE -static int mlx5e_add_promisc_rule(struct mlx5e_priv *priv) +static int mlx5e_add_promisc_rule(struct mlx5e_flow_steering *fs) { - struct mlx5_flow_table *ft = priv->fs.promisc.ft.t; + struct mlx5_flow_table *ft = fs->promisc.ft.t; struct mlx5_flow_destination dest = {}; struct mlx5_flow_handle **rule_p; MLX5_DECLARE_FLOW_ACT(flow_act); @@ -718,22 +723,22 @@ static int mlx5e_add_promisc_rule(struct mlx5e_priv *priv) if (!spec) return -ENOMEM; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest.ft = mlx5_get_ttc_flow_table(priv->fs.ttc); + dest.ft = mlx5_get_ttc_flow_table(fs->ttc); - rule_p = &priv->fs.promisc.rule; + rule_p = &fs->promisc.rule; *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); if (IS_ERR(*rule_p)) { err = PTR_ERR(*rule_p); *rule_p = NULL; - netdev_err(priv->netdev, "%s: add promiscuous rule failed\n", __func__); + mlx5_core_err(fs->mdev, "%s: add promiscuous rule failed\n", __func__); } kvfree(spec); return err; } -static int mlx5e_create_promisc_table(struct mlx5e_priv *priv) +static int mlx5e_create_promisc_table(struct mlx5e_flow_steering *fs) { - struct mlx5e_flow_table *ft = &priv->fs.promisc.ft; + struct mlx5e_flow_table *ft = &fs->promisc.ft; struct mlx5_flow_table_attr ft_attr = {}; int err; @@ -742,14 +747,14 @@ static int mlx5e_create_promisc_table(struct mlx5e_priv *priv) ft_attr.level = MLX5E_PROMISC_FT_LEVEL; ft_attr.prio = MLX5E_NIC_PRIO; - ft->t = mlx5_create_auto_grouped_flow_table(priv->fs.ns, &ft_attr); + ft->t = mlx5_create_auto_grouped_flow_table(fs->ns, &ft_attr); if (IS_ERR(ft->t)) { err = PTR_ERR(ft->t); - netdev_err(priv->netdev, "fail to create promisc table err=%d\n", err); + mlx5_core_err(fs->mdev, "fail to create promisc table err=%d\n", err); return err; } - err = mlx5e_add_promisc_rule(priv); + err = mlx5e_add_promisc_rule(fs); if (err) goto err_destroy_promisc_table; @@ -762,34 +767,31 @@ err_destroy_promisc_table: return err; } -static void mlx5e_del_promisc_rule(struct mlx5e_priv *priv) +static void mlx5e_del_promisc_rule(struct mlx5e_flow_steering *fs) { - if (WARN(!priv->fs.promisc.rule, "Trying to remove non-existing promiscuous rule")) + if (WARN(!fs->promisc.rule, "Trying to remove non-existing promiscuous rule")) return; - mlx5_del_flow_rules(priv->fs.promisc.rule); - priv->fs.promisc.rule = NULL; + mlx5_del_flow_rules(fs->promisc.rule); + fs->promisc.rule = NULL; } -static void mlx5e_destroy_promisc_table(struct mlx5e_priv *priv) +static void mlx5e_destroy_promisc_table(struct mlx5e_flow_steering *fs) { - if (WARN(!priv->fs.promisc.ft.t, "Trying to remove non-existing promiscuous table")) + if (WARN(!fs->promisc.ft.t, "Trying to remove non-existing promiscuous table")) return; - mlx5e_del_promisc_rule(priv); - mlx5_destroy_flow_table(priv->fs.promisc.ft.t); - priv->fs.promisc.ft.t = NULL; + mlx5e_del_promisc_rule(fs); + mlx5_destroy_flow_table(fs->promisc.ft.t); + fs->promisc.ft.t = NULL; } -void mlx5e_set_rx_mode_work(struct work_struct *work) +void mlx5e_fs_set_rx_mode_work(struct mlx5e_flow_steering *fs, + struct net_device *netdev) { - struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, - set_rx_mode_work); + struct mlx5e_l2_table *ea = &fs->l2; - struct mlx5e_l2_table *ea = &priv->fs.l2; - struct net_device *ndev = priv->netdev; - - bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state); - bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC); - bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI); + bool rx_mode_enable = fs->state_destroy; + bool promisc_enabled = rx_mode_enable && (netdev->flags & IFF_PROMISC); + bool allmulti_enabled = rx_mode_enable && (netdev->flags & IFF_ALLMULTI); bool broadcast_enabled = rx_mode_enable; bool enable_promisc = !ea->promisc_enabled && promisc_enabled; @@ -801,32 +803,32 @@ void mlx5e_set_rx_mode_work(struct work_struct *work) int err; if (enable_promisc) { - err = mlx5e_create_promisc_table(priv); + err = mlx5e_create_promisc_table(fs); if (err) enable_promisc = false; - if (!priv->channels.params.vlan_strip_disable && !err) - netdev_warn_once(ndev, - "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n"); + if (!fs->vlan_strip_disable && !err) + mlx5_core_warn_once(fs->mdev, + "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n"); } if (enable_allmulti) - mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI); + mlx5e_add_l2_flow_rule(fs, &ea->allmulti, MLX5E_ALLMULTI); if (enable_broadcast) - mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH); + mlx5e_add_l2_flow_rule(fs, &ea->broadcast, MLX5E_FULLMATCH); - mlx5e_handle_netdev_addr(priv); + mlx5e_handle_netdev_addr(fs, netdev); if (disable_broadcast) - mlx5e_del_l2_flow_rule(priv, &ea->broadcast); + mlx5e_del_l2_flow_rule(fs, &ea->broadcast); if (disable_allmulti) - mlx5e_del_l2_flow_rule(priv, &ea->allmulti); + mlx5e_del_l2_flow_rule(fs, &ea->allmulti); if (disable_promisc) - mlx5e_destroy_promisc_table(priv); + mlx5e_destroy_promisc_table(fs); ea->promisc_enabled = promisc_enabled; ea->allmulti_enabled = allmulti_enabled; ea->broadcast_enabled = broadcast_enabled; - mlx5e_vport_context_update(priv); + mlx5e_vport_context_update(fs, netdev); } static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft) @@ -841,9 +843,9 @@ static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft) ft->num_groups = 0; } -void mlx5e_init_l2_addr(struct mlx5e_priv *priv) +void mlx5e_fs_init_l2_addr(struct mlx5e_flow_steering *fs, struct net_device *netdev) { - ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast); + ether_addr_copy(fs->l2.broadcast.addr, netdev->broadcast); } void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft) @@ -861,7 +863,7 @@ static void mlx5e_set_inner_ttc_params(struct mlx5e_priv *priv, int tt; memset(ttc_params, 0, sizeof(*ttc_params)); - ttc_params->ns = mlx5_get_flow_namespace(priv->mdev, + ttc_params->ns = mlx5_get_flow_namespace(priv->fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL); ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL; ft_attr->prio = MLX5E_NIC_PRIO; @@ -884,7 +886,7 @@ void mlx5e_set_ttc_params(struct mlx5e_priv *priv, int tt; memset(ttc_params, 0, sizeof(*ttc_params)); - ttc_params->ns = mlx5_get_flow_namespace(priv->mdev, + ttc_params->ns = mlx5_get_flow_namespace(priv->fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL); ft_attr->level = MLX5E_TTC_FT_LEVEL; ft_attr->prio = MLX5E_NIC_PRIO; @@ -898,18 +900,18 @@ void mlx5e_set_ttc_params(struct mlx5e_priv *priv, } ttc_params->inner_ttc = tunnel; - if (!tunnel || !mlx5_tunnel_inner_ft_supported(priv->mdev)) + if (!tunnel || !mlx5_tunnel_inner_ft_supported(priv->fs->mdev)) return; for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) { ttc_params->tunnel_dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; ttc_params->tunnel_dests[tt].ft = - mlx5_get_ttc_flow_table(priv->fs.inner_ttc); + mlx5_get_ttc_flow_table(priv->fs->inner_ttc); } } -static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv, +static void mlx5e_del_l2_flow_rule(struct mlx5e_flow_steering *fs, struct mlx5e_l2_rule *ai) { if (!IS_ERR_OR_NULL(ai->rule)) { @@ -918,10 +920,10 @@ static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv, } } -static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, +static int mlx5e_add_l2_flow_rule(struct mlx5e_flow_steering *fs, struct mlx5e_l2_rule *ai, int type) { - struct mlx5_flow_table *ft = priv->fs.l2.ft.t; + struct mlx5_flow_table *ft = fs->l2.ft.t; struct mlx5_flow_destination dest = {}; MLX5_DECLARE_FLOW_ACT(flow_act); struct mlx5_flow_spec *spec; @@ -939,7 +941,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, outer_headers.dmac_47_16); dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest.ft = mlx5_get_ttc_flow_table(priv->fs.ttc); + dest.ft = mlx5_get_ttc_flow_table(fs->ttc); switch (type) { case MLX5E_FULLMATCH: @@ -957,8 +959,8 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); if (IS_ERR(ai->rule)) { - netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n", - __func__, mv_dmac); + mlx5_core_err(fs->mdev, "%s: add l2 rule(mac:%pM) failed\n", + __func__, mv_dmac); err = PTR_ERR(ai->rule); ai->rule = NULL; } @@ -1044,12 +1046,12 @@ err_destroy_groups: static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv) { - mlx5e_destroy_flow_table(&priv->fs.l2.ft); + mlx5e_destroy_flow_table(&priv->fs->l2.ft); } static int mlx5e_create_l2_table(struct mlx5e_priv *priv) { - struct mlx5e_l2_table *l2_table = &priv->fs.l2; + struct mlx5e_l2_table *l2_table = &priv->fs->l2; struct mlx5e_flow_table *ft = &l2_table->ft; struct mlx5_flow_table_attr ft_attr = {}; int err; @@ -1060,7 +1062,7 @@ static int mlx5e_create_l2_table(struct mlx5e_priv *priv) ft_attr.level = MLX5E_L2_FT_LEVEL; ft_attr.prio = MLX5E_NIC_PRIO; - ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr); + ft->t = mlx5_create_flow_table(priv->fs->ns, &ft_attr); if (IS_ERR(ft->t)) { err = PTR_ERR(ft->t); ft->t = NULL; @@ -1180,20 +1182,20 @@ static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft) return err; } -static int mlx5e_create_vlan_table(struct mlx5e_priv *priv) +static int mlx5e_fs_create_vlan_table(struct mlx5e_flow_steering *fs) { struct mlx5_flow_table_attr ft_attr = {}; struct mlx5e_flow_table *ft; int err; - ft = &priv->fs.vlan->ft; + ft = &fs->vlan->ft; ft->num_groups = 0; ft_attr.max_fte = MLX5E_VLAN_TABLE_SIZE; ft_attr.level = MLX5E_VLAN_FT_LEVEL; ft_attr.prio = MLX5E_NIC_PRIO; - ft->t = mlx5_create_flow_table(priv->fs.ns, &ft_attr); + ft->t = mlx5_create_flow_table(fs->ns, &ft_attr); if (IS_ERR(ft->t)) return PTR_ERR(ft->t); @@ -1207,7 +1209,7 @@ static int mlx5e_create_vlan_table(struct mlx5e_priv *priv) if (err) goto err_free_g; - mlx5e_add_vlan_rules(priv); + mlx5e_fs_add_vlan_rules(fs); return 0; @@ -1222,33 +1224,33 @@ err_destroy_vlan_table: static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv) { mlx5e_del_vlan_rules(priv); - mlx5e_destroy_flow_table(&priv->fs.vlan->ft); + mlx5e_destroy_flow_table(&priv->fs->vlan->ft); } static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv) { - if (!mlx5_tunnel_inner_ft_supported(priv->mdev)) + if (!mlx5_tunnel_inner_ft_supported(priv->fs->mdev)) return; - mlx5_destroy_ttc_table(priv->fs.inner_ttc); + mlx5_destroy_ttc_table(priv->fs->inner_ttc); } void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv) { - mlx5_destroy_ttc_table(priv->fs.ttc); + mlx5_destroy_ttc_table(priv->fs->ttc); } static int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv) { struct ttc_params ttc_params = {}; - if (!mlx5_tunnel_inner_ft_supported(priv->mdev)) + if (!mlx5_tunnel_inner_ft_supported(priv->fs->mdev)) return 0; mlx5e_set_inner_ttc_params(priv, &ttc_params); - priv->fs.inner_ttc = mlx5_create_inner_ttc_table(priv->mdev, - &ttc_params); - if (IS_ERR(priv->fs.inner_ttc)) - return PTR_ERR(priv->fs.inner_ttc); + priv->fs->inner_ttc = mlx5_create_inner_ttc_table(priv->fs->mdev, + &ttc_params); + if (IS_ERR(priv->fs->inner_ttc)) + return PTR_ERR(priv->fs->inner_ttc); return 0; } @@ -1257,9 +1259,9 @@ int mlx5e_create_ttc_table(struct mlx5e_priv *priv) struct ttc_params ttc_params = {}; mlx5e_set_ttc_params(priv, &ttc_params, true); - priv->fs.ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params); - if (IS_ERR(priv->fs.ttc)) - return PTR_ERR(priv->fs.ttc); + priv->fs->ttc = mlx5_create_ttc_table(priv->fs->mdev, &ttc_params); + if (IS_ERR(priv->fs->ttc)) + return PTR_ERR(priv->fs->ttc); return 0; } @@ -1267,45 +1269,44 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv) { int err; - priv->fs.ns = mlx5_get_flow_namespace(priv->mdev, + priv->fs->ns = mlx5_get_flow_namespace(priv->fs->mdev, MLX5_FLOW_NAMESPACE_KERNEL); - if (!priv->fs.ns) + if (!priv->fs->ns) return -EOPNOTSUPP; err = mlx5e_arfs_create_tables(priv); if (err) { - netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n", - err); + mlx5_core_err(priv->fs->mdev, "Failed to create arfs tables, err=%d\n", + err); priv->netdev->hw_features &= ~NETIF_F_NTUPLE; } err = mlx5e_create_inner_ttc_table(priv); if (err) { - netdev_err(priv->netdev, - "Failed to create inner ttc table, err=%d\n", - err); + mlx5_core_err(priv->fs->mdev, + "Failed to create inner ttc table, err=%d\n", err); goto err_destroy_arfs_tables; } err = mlx5e_create_ttc_table(priv); if (err) { - netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", - err); + mlx5_core_err(priv->fs->mdev, "Failed to create ttc table, err=%d\n", + err); goto err_destroy_inner_ttc_table; } err = mlx5e_create_l2_table(priv); if (err) { - netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n", - err); + mlx5_core_err(priv->fs->mdev, "Failed to create l2 table, err=%d\n", + err); goto err_destroy_ttc_table; } - err = mlx5e_create_vlan_table(priv); + err = mlx5e_fs_create_vlan_table(priv->fs); if (err) { - netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n", - err); + mlx5_core_err(priv->fs->mdev, "Failed to create vlan table, err=%d\n", + err); goto err_destroy_l2_table; } @@ -1342,16 +1343,69 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv) mlx5e_ethtool_cleanup_steering(priv); } -int mlx5e_fs_init(struct mlx5e_priv *priv) +static int mlx5e_fs_vlan_alloc(struct mlx5e_flow_steering *fs) +{ + fs->vlan = kvzalloc(sizeof(*fs->vlan), GFP_KERNEL); + if (!fs->vlan) + return -ENOMEM; + return 0; +} + +static void mlx5e_fs_vlan_free(struct mlx5e_flow_steering *fs) { - priv->fs.vlan = kvzalloc(sizeof(*priv->fs.vlan), GFP_KERNEL); - if (!priv->fs.vlan) + kvfree(fs->vlan); +} + +static int mlx5e_fs_tc_alloc(struct mlx5e_flow_steering *fs) +{ + fs->tc = mlx5e_tc_table_alloc(); + if (IS_ERR(fs->tc)) return -ENOMEM; return 0; } -void mlx5e_fs_cleanup(struct mlx5e_priv *priv) +static void mlx5e_fs_tc_free(struct mlx5e_flow_steering *fs) +{ + mlx5e_tc_table_free(fs->tc); +} + +struct mlx5e_flow_steering *mlx5e_fs_init(const struct mlx5e_profile *profile, + struct mlx5_core_dev *mdev, + bool state_destroy) +{ + struct mlx5e_flow_steering *fs; + int err; + + fs = kvzalloc(sizeof(*fs), GFP_KERNEL); + if (!fs) + goto err; + + fs->mdev = mdev; + fs->state_destroy = state_destroy; + if (mlx5e_profile_feature_cap(profile, FS_VLAN)) { + err = mlx5e_fs_vlan_alloc(fs); + if (err) + goto err_free_fs; + } + + if (mlx5e_profile_feature_cap(profile, FS_TC)) { + err = mlx5e_fs_tc_alloc(fs); + if (err) + goto err_free_vlan; + } + + return fs; +err_free_fs: + kvfree(fs); +err_free_vlan: + mlx5e_fs_vlan_free(fs); +err: + return NULL; +} + +void mlx5e_fs_cleanup(struct mlx5e_flow_steering *fs) { - kvfree(priv->fs.vlan); - priv->fs.vlan = NULL; + mlx5e_fs_tc_free(fs); + mlx5e_fs_vlan_free(fs); + kvfree(fs); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index 9466202fd97b..3e4bc7836ef4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c @@ -81,18 +81,18 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv, case UDP_V6_FLOW: max_tuples = ETHTOOL_NUM_L3_L4_FTS; prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples); - eth_ft = &priv->fs.ethtool.l3_l4_ft[prio]; + eth_ft = &priv->fs->ethtool.l3_l4_ft[prio]; break; case IP_USER_FLOW: case IPV6_USER_FLOW: max_tuples = ETHTOOL_NUM_L3_L4_FTS; prio = MLX5E_ETHTOOL_L3_L4_PRIO + (max_tuples - num_tuples); - eth_ft = &priv->fs.ethtool.l3_l4_ft[prio]; + eth_ft = &priv->fs->ethtool.l3_l4_ft[prio]; break; case ETHER_FLOW: max_tuples = ETHTOOL_NUM_L2_FTS; prio = max_tuples - num_tuples; - eth_ft = &priv->fs.ethtool.l2_ft[prio]; + eth_ft = &priv->fs->ethtool.l2_ft[prio]; prio += MLX5E_ETHTOOL_L2_PRIO; break; default: @@ -383,14 +383,14 @@ static void add_rule_to_list(struct mlx5e_priv *priv, struct mlx5e_ethtool_rule *rule) { struct mlx5e_ethtool_rule *iter; - struct list_head *head = &priv->fs.ethtool.rules; + struct list_head *head = &priv->fs->ethtool.rules; - list_for_each_entry(iter, &priv->fs.ethtool.rules, list) { + list_for_each_entry(iter, &priv->fs->ethtool.rules, list) { if (iter->flow_spec.location > rule->flow_spec.location) break; head = &iter->list; } - priv->fs.ethtool.tot_num_rules++; + priv->fs->ethtool.tot_num_rules++; list_add(&rule->list, head); } @@ -507,7 +507,7 @@ static void del_ethtool_rule(struct mlx5e_priv *priv, if (eth_rule->rss) mlx5e_rss_refcnt_dec(eth_rule->rss); list_del(ð_rule->list); - priv->fs.ethtool.tot_num_rules--; + priv->fs->ethtool.tot_num_rules--; put_flow_table(eth_rule->eth_ft); kfree(eth_rule); } @@ -517,7 +517,7 @@ static struct mlx5e_ethtool_rule *find_ethtool_rule(struct mlx5e_priv *priv, { struct mlx5e_ethtool_rule *iter; - list_for_each_entry(iter, &priv->fs.ethtool.rules, list) { + list_for_each_entry(iter, &priv->fs->ethtool.rules, list) { if (iter->flow_spec.location == location) return iter; } @@ -788,7 +788,7 @@ mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, if (location < 0 || location >= MAX_NUM_OF_ETHTOOL_RULES) return -EINVAL; - list_for_each_entry(eth_rule, &priv->fs.ethtool.rules, list) { + list_for_each_entry(eth_rule, &priv->fs->ethtool.rules, list) { int index; if (eth_rule->flow_spec.location != location) @@ -831,13 +831,13 @@ void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv) struct mlx5e_ethtool_rule *iter; struct mlx5e_ethtool_rule *temp; - list_for_each_entry_safe(iter, temp, &priv->fs.ethtool.rules, list) + list_for_each_entry_safe(iter, temp, &priv->fs->ethtool.rules, list) del_ethtool_rule(priv, iter); } void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv) { - INIT_LIST_HEAD(&priv->fs.ethtool.rules); + INIT_LIST_HEAD(&priv->fs->ethtool.rules); } static int flow_type_to_traffic_type(u32 flow_type) @@ -963,7 +963,7 @@ int mlx5e_ethtool_get_rxnfc(struct mlx5e_priv *priv, switch (info->cmd) { case ETHTOOL_GRXCLSRLCNT: - info->rule_cnt = priv->fs.ethtool.tot_num_rules; + info->rule_cnt = priv->fs->ethtool.tot_num_rules; break; case ETHTOOL_GRXCLSRULE: err = mlx5e_ethtool_get_flow(priv, info, info->fs.location); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 24ddd438c066..d858667736a3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3778,20 +3778,45 @@ static int set_feature_rx_vlan(struct net_device *netdev, bool enable) mutex_lock(&priv->state_lock); + priv->fs->vlan_strip_disable = !enable; priv->channels.params.vlan_strip_disable = !enable; + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) goto unlock; err = mlx5e_modify_channels_vsd(&priv->channels, !enable); - if (err) + if (err) { + priv->fs->vlan_strip_disable = enable; priv->channels.params.vlan_strip_disable = enable; - + } unlock: mutex_unlock(&priv->state_lock); return err; } +int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_flow_steering *fs = priv->fs; + + if (mlx5e_is_uplink_rep(priv)) + return 0; /* no vlan table for uplink rep */ + + return mlx5e_fs_vlan_rx_add_vid(fs, dev, proto, vid); +} + +int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_flow_steering *fs = priv->fs; + + if (mlx5e_is_uplink_rep(priv)) + return 0; /* no vlan table for uplink rep */ + + return mlx5e_fs_vlan_rx_kill_vid(fs, dev, proto, vid); +} + #ifdef CONFIG_MLX5_EN_ARFS static int set_feature_arfs(struct net_device *netdev, bool enable) { @@ -3889,8 +3914,8 @@ static netdev_features_t mlx5e_fix_features(struct net_device *netdev, mutex_lock(&priv->state_lock); params = &priv->channels.params; - if (!priv->fs.vlan || - !bitmap_empty(mlx5e_vlan_get_active_svlans(priv->fs.vlan), VLAN_N_VID)) { + if (!priv->fs->vlan || + !bitmap_empty(mlx5e_vlan_get_active_svlans(priv->fs->vlan), VLAN_N_VID)) { /* HW strips the outer C-tag header, this is a problem * for S-tag traffic. */ @@ -5013,6 +5038,7 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_flow_steering *fs; int err; mlx5e_build_nic_params(priv, &priv->xsk, netdev->mtu); @@ -5020,11 +5046,14 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, mlx5e_timestamp_init(priv); - err = mlx5e_fs_init(priv); - if (err) { + fs = mlx5e_fs_init(priv->profile, mdev, + !test_bit(MLX5E_STATE_DESTROYING, &priv->state)); + if (!fs) { + err = -ENOMEM; mlx5_core_err(mdev, "FS initialization failed, %d\n", err); return err; } + priv->fs = fs; err = mlx5e_ipsec_init(priv); if (err) @@ -5043,7 +5072,7 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) mlx5e_health_destroy_reporters(priv); mlx5e_ktls_cleanup(priv); mlx5e_ipsec_cleanup(priv); - mlx5e_fs_cleanup(priv); + mlx5e_fs_cleanup(priv->fs); } static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) @@ -5166,7 +5195,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) struct net_device *netdev = priv->netdev; struct mlx5_core_dev *mdev = priv->mdev; - mlx5e_init_l2_addr(priv); + mlx5e_fs_init_l2_addr(priv->fs, netdev); /* Marking the link as currently not needed by the Driver */ if (!netif_running(netdev)) @@ -5251,7 +5280,9 @@ static const struct mlx5e_profile mlx5e_nic_profile = { .stats_grps_num = mlx5e_nic_stats_grps_num, .features = BIT(MLX5E_PROFILE_FEATURE_PTP_RX) | BIT(MLX5E_PROFILE_FEATURE_PTP_TX) | - BIT(MLX5E_PROFILE_FEATURE_QOS_HTB), + BIT(MLX5E_PROFILE_FEATURE_QOS_HTB) | + BIT(MLX5E_PROFILE_FEATURE_FS_VLAN) | + BIT(MLX5E_PROFILE_FEATURE_FS_TC), }; static int mlx5e_profile_max_num_channels(struct mlx5_core_dev *mdev, @@ -5301,6 +5332,14 @@ int mlx5e_get_pf_num_tirs(struct mlx5_core_dev *mdev) + mlx5e_profile_max_num_channels(mdev, &mlx5e_nic_profile); } +void mlx5e_set_rx_mode_work(struct work_struct *work) +{ + struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, + set_rx_mode_work); + + return mlx5e_fs_set_rx_mode_work(priv->fs, priv->netdev); +} + /* mlx5e generic netdev management API (move to en_common.c) */ int mlx5e_priv_init(struct mlx5e_priv *priv, const struct mlx5e_profile *profile, @@ -5478,6 +5517,8 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv) int err; clear_bit(MLX5E_STATE_DESTROYING, &priv->state); + if (priv->fs) + priv->fs->state_destroy = !test_bit(MLX5E_STATE_DESTROYING, &priv->state); /* max number of channels may have changed */ max_nch = mlx5e_calc_max_nch(priv->mdev, priv->netdev, profile); @@ -5537,6 +5578,8 @@ err_cleanup_tx: out: mlx5e_reset_channels(priv->netdev); set_bit(MLX5E_STATE_DESTROYING, &priv->state); + if (priv->fs) + priv->fs->state_destroy = !test_bit(MLX5E_STATE_DESTROYING, &priv->state); cancel_work_sync(&priv->update_stats_work); return err; } @@ -5546,6 +5589,8 @@ void mlx5e_detach_netdev(struct mlx5e_priv *priv) const struct mlx5e_profile *profile = priv->profile; set_bit(MLX5E_STATE_DESTROYING, &priv->state); + if (priv->fs) + priv->fs->state_destroy = !test_bit(MLX5E_STATE_DESTROYING, &priv->state); if (profile->disable) profile->disable(priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index ae90b06d21e2..4c1599de652c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -718,6 +718,7 @@ static int mlx5e_init_ul_rep(struct mlx5_core_dev *mdev, static void mlx5e_cleanup_rep(struct mlx5e_priv *priv) { + mlx5e_fs_cleanup(priv->fs); mlx5e_ipsec_cleanup(priv); } @@ -728,8 +729,8 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv) struct ttc_params ttc_params = {}; int err; - priv->fs.ns = mlx5_get_flow_namespace(priv->mdev, - MLX5_FLOW_NAMESPACE_KERNEL); + priv->fs->ns = mlx5_get_flow_namespace(priv->mdev, + MLX5_FLOW_NAMESPACE_KERNEL); /* The inner_ttc in the ttc params is intentionally not set */ mlx5e_set_ttc_params(priv, &ttc_params, false); @@ -738,9 +739,9 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv) /* To give uplik rep TTC a lower level for chaining from root ft */ ttc_params.ft_attr.level = MLX5E_TTC_FT_LEVEL + 1; - priv->fs.ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params); - if (IS_ERR(priv->fs.ttc)) { - err = PTR_ERR(priv->fs.ttc); + priv->fs->ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params); + if (IS_ERR(priv->fs->ttc)) { + err = PTR_ERR(priv->fs->ttc); netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n", err); return err; @@ -760,7 +761,7 @@ static int mlx5e_create_rep_root_ft(struct mlx5e_priv *priv) /* non uplik reps will skip any bypass tables and go directly to * their own ttc */ - rpriv->root_ft = mlx5_get_ttc_flow_table(priv->fs.ttc); + rpriv->root_ft = mlx5_get_ttc_flow_table(priv->fs->ttc); return 0; } @@ -835,11 +836,20 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) struct mlx5_core_dev *mdev = priv->mdev; int err; - priv->rx_res = mlx5e_rx_res_alloc(); - if (!priv->rx_res) + priv->fs = mlx5e_fs_init(priv->profile, mdev, + !test_bit(MLX5E_STATE_DESTROYING, &priv->state)); + if (!priv->fs) { + netdev_err(priv->netdev, "FS allocation failed\n"); return -ENOMEM; + } + + priv->rx_res = mlx5e_rx_res_alloc(); + if (!priv->rx_res) { + err = -ENOMEM; + goto err_free_fs; + } - mlx5e_init_l2_addr(priv); + mlx5e_fs_init_l2_addr(priv->fs, priv->netdev); err = mlx5e_open_drop_rq(priv, &priv->drop_rq); if (err) { @@ -873,13 +883,15 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) err_destroy_root_ft: mlx5e_destroy_rep_root_ft(priv); err_destroy_ttc_table: - mlx5_destroy_ttc_table(priv->fs.ttc); + mlx5_destroy_ttc_table(priv->fs->ttc); err_destroy_rx_res: mlx5e_rx_res_destroy(priv->rx_res); err_close_drop_rq: mlx5e_close_drop_rq(&priv->drop_rq); mlx5e_rx_res_free(priv->rx_res); priv->rx_res = NULL; +err_free_fs: + mlx5e_fs_cleanup(priv->fs); return err; } @@ -888,7 +900,7 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) mlx5e_ethtool_cleanup_steering(priv); rep_vport_rx_rule_destroy(priv); mlx5e_destroy_rep_root_ft(priv); - mlx5_destroy_ttc_table(priv->fs.ttc); + mlx5_destroy_ttc_table(priv->fs->ttc); mlx5e_rx_res_destroy(priv->rx_res); mlx5e_close_drop_rq(&priv->drop_rq); mlx5e_rx_res_free(priv->rx_res); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 2e12280a936f..f154bda668ad 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -71,6 +71,30 @@ #define MLX5E_TC_TABLE_NUM_GROUPS 4 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18) +struct mlx5e_tc_table { + /* Protects the dynamic assignment of the t parameter + * which is the nic tc root table. + */ + struct mutex t_lock; + struct mlx5e_priv *priv; + struct mlx5_flow_table *t; + struct mlx5_flow_table *miss_t; + struct mlx5_fs_chains *chains; + struct mlx5e_post_act *post_act; + + struct rhashtable ht; + + struct mod_hdr_tbl mod_hdr; + struct mutex hairpin_tbl_lock; /* protects hairpin_tbl */ + DECLARE_HASHTABLE(hairpin_tbl, 8); + + struct notifier_block netdevice_nb; + struct netdev_net_notifier netdevice_nn; + + struct mlx5_tc_ct_priv *ct; + struct mapping_ctx *mapping; +}; + struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = { [CHAIN_TO_REG] = { .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0, @@ -108,6 +132,24 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = { [PACKET_COLOR_TO_REG] = packet_color_to_reg, }; +struct mlx5e_tc_table *mlx5e_tc_table_alloc(void) +{ + struct mlx5e_tc_table *tc; + + tc = kvzalloc(sizeof(*tc), GFP_KERNEL); + return tc ? tc : ERR_PTR(-ENOMEM); +} + +void mlx5e_tc_table_free(struct mlx5e_tc_table *tc) +{ + kvfree(tc); +} + +struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc) +{ + return tc->chains; +} + /* To avoid false lock dependency warning set the tc_ht lock * class different than the lock class of the ht being used when deleting * last flow from a group and then deleting a group, we get into del_sw_flow_group() @@ -280,7 +322,7 @@ get_ct_priv(struct mlx5e_priv *priv) return uplink_priv->ct_priv; } - return priv->fs.tc.ct; + return priv->fs->tc->ct; } static struct mlx5e_tc_psample * @@ -314,7 +356,7 @@ get_post_action(struct mlx5e_priv *priv) return uplink_priv->post_act; } - return priv->fs.tc.post_act; + return priv->fs->tc->post_act; } struct mlx5_flow_handle * @@ -356,19 +398,42 @@ static int mlx5e_tc_add_flow_meter(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr) { + struct mlx5e_post_act *post_act = get_post_action(priv); + struct mlx5e_post_meter_priv *post_meter; + enum mlx5_flow_namespace_type ns_type; struct mlx5e_flow_meter_handle *meter; - meter = mlx5e_tc_meter_get(priv->mdev, &attr->meter_attr.params); + meter = mlx5e_tc_meter_replace(priv->mdev, &attr->meter_attr.params); if (IS_ERR(meter)) { mlx5_core_err(priv->mdev, "Failed to get flow meter\n"); return PTR_ERR(meter); } + ns_type = mlx5e_tc_meter_get_namespace(meter->flow_meters); + post_meter = mlx5e_post_meter_init(priv, ns_type, post_act, meter->green_counter, + meter->red_counter); + if (IS_ERR(post_meter)) { + mlx5_core_err(priv->mdev, "Failed to init post meter\n"); + goto err_meter_init; + } + attr->meter_attr.meter = meter; - attr->dest_ft = mlx5e_tc_meter_get_post_meter_ft(meter->flow_meters); + attr->meter_attr.post_meter = post_meter; + attr->dest_ft = mlx5e_post_meter_get_ft(post_meter); attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; return 0; + +err_meter_init: + mlx5e_tc_meter_put(meter); + return PTR_ERR(post_meter); +} + +static void +mlx5e_tc_del_flow_meter(struct mlx5_flow_attr *attr) +{ + mlx5e_post_meter_cleanup(attr->meter_attr.post_meter); + mlx5e_tc_meter_put(attr->meter_attr.meter); } struct mlx5_flow_handle * @@ -428,7 +493,7 @@ mlx5e_tc_rule_unoffload(struct mlx5e_priv *priv, mlx5_eswitch_del_offloaded_rule(esw, rule, attr); if (attr->meter_attr.meter) - mlx5e_tc_meter_put(attr->meter_attr.meter); + mlx5e_tc_del_flow_meter(attr); } int @@ -546,7 +611,7 @@ get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) return mlx5e_get_flow_namespace(flow) == MLX5_FLOW_NAMESPACE_FDB ? &esw->offloads.mod_hdr : - &priv->fs.tc.mod_hdr; + &priv->fs->tc->mod_hdr; } static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv, @@ -764,7 +829,7 @@ static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp) netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n", hp->num_channels, - mlx5_get_ttc_flow_table(priv->fs.ttc)->id); + mlx5_get_ttc_flow_table(priv->fs->ttc)->id); return 0; @@ -854,7 +919,7 @@ static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv, struct mlx5e_hairpin_entry *hpe; u32 hash_key = hash_hairpin_info(peer_vhca_id, prio); - hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe, + hash_for_each_possible(priv->fs->tc->hairpin_tbl, hpe, hairpin_hlist, hash_key) { if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) { refcount_inc(&hpe->refcnt); @@ -869,10 +934,10 @@ static void mlx5e_hairpin_put(struct mlx5e_priv *priv, struct mlx5e_hairpin_entry *hpe) { /* no more hairpin flows for us, release the hairpin pair */ - if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs.tc.hairpin_tbl_lock)) + if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs->tc->hairpin_tbl_lock)) return; hash_del(&hpe->hairpin_hlist); - mutex_unlock(&priv->fs.tc.hairpin_tbl_lock); + mutex_unlock(&priv->fs->tc->hairpin_tbl_lock); if (!IS_ERR_OR_NULL(hpe->hp)) { netdev_dbg(priv->netdev, "del hairpin: peer %s\n", @@ -956,10 +1021,10 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, if (err) return err; - mutex_lock(&priv->fs.tc.hairpin_tbl_lock); + mutex_lock(&priv->fs->tc->hairpin_tbl_lock); hpe = mlx5e_hairpin_get(priv, peer_id, match_prio); if (hpe) { - mutex_unlock(&priv->fs.tc.hairpin_tbl_lock); + mutex_unlock(&priv->fs->tc->hairpin_tbl_lock); wait_for_completion(&hpe->res_ready); if (IS_ERR(hpe->hp)) { @@ -971,7 +1036,7 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, hpe = kzalloc(sizeof(*hpe), GFP_KERNEL); if (!hpe) { - mutex_unlock(&priv->fs.tc.hairpin_tbl_lock); + mutex_unlock(&priv->fs->tc->hairpin_tbl_lock); return -ENOMEM; } @@ -983,9 +1048,9 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, refcount_set(&hpe->refcnt, 1); init_completion(&hpe->res_ready); - hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist, + hash_add(priv->fs->tc->hairpin_tbl, &hpe->hairpin_hlist, hash_hairpin_info(peer_id, match_prio)); - mutex_unlock(&priv->fs.tc.hairpin_tbl_lock); + mutex_unlock(&priv->fs->tc->hairpin_tbl_lock); params.log_data_size = 16; params.log_data_size = min_t(u8, params.log_data_size, @@ -1061,10 +1126,10 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv, struct mlx5_flow_attr *attr) { struct mlx5_flow_context *flow_context = &spec->flow_context; - struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv); struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr; - struct mlx5e_tc_table *tc = &priv->fs.tc; + struct mlx5e_tc_table *tc = priv->fs->tc; struct mlx5_flow_destination dest[2] = {}; + struct mlx5_fs_chains *nic_chains; struct mlx5_flow_act flow_act = { .action = attr->action, .flags = FLOW_ACT_NO_APPEND, @@ -1073,6 +1138,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv, struct mlx5_flow_table *ft; int dest_ix = 0; + nic_chains = mlx5e_nic_chains(tc); flow_context->flags |= FLOW_CONTEXT_HAS_TAG; flow_context->flow_tag = nic_attr->flow_tag; @@ -1097,7 +1163,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv, if (IS_ERR(dest[dest_ix].ft)) return ERR_CAST(dest[dest_ix].ft); } else { - dest[dest_ix].ft = mlx5e_vlan_get_flowtable(priv->fs.vlan); + dest[dest_ix].ft = mlx5e_vlan_get_flowtable(priv->fs->vlan); } dest_ix++; } @@ -1125,7 +1191,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv, mutex_unlock(&tc->t_lock); netdev_err(priv->netdev, "Failed to create tc offload table\n"); - rule = ERR_CAST(priv->fs.tc.t); + rule = ERR_CAST(priv->fs->tc->t); goto err_ft_get; } } @@ -1227,7 +1293,7 @@ void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv, struct mlx5_flow_handle *rule, struct mlx5_flow_attr *attr) { - struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv); + struct mlx5_fs_chains *nic_chains = mlx5e_nic_chains(priv->fs->tc); mlx5_del_flow_rules(rule); @@ -1244,7 +1310,7 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow) { struct mlx5_flow_attr *attr = flow->attr; - struct mlx5e_tc_table *tc = &priv->fs.tc; + struct mlx5e_tc_table *tc = priv->fs->tc; flow_flag_clear(flow, OFFLOADED); @@ -1256,13 +1322,13 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, /* Remove root table if no rules are left to avoid * extra steering hops. */ - mutex_lock(&priv->fs.tc.t_lock); + mutex_lock(&priv->fs->tc->t_lock); if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) && !IS_ERR_OR_NULL(tc->t)) { - mlx5_chains_put_table(mlx5e_nic_chains(priv), 0, 1, MLX5E_TC_FT_LEVEL); - priv->fs.tc.t = NULL; + mlx5_chains_put_table(mlx5e_nic_chains(tc), 0, 1, MLX5E_TC_FT_LEVEL); + priv->fs->tc->t = NULL; } - mutex_unlock(&priv->fs.tc.t_lock); + mutex_unlock(&priv->fs->tc->t_lock); if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) mlx5e_detach_mod_hdr(priv, flow); @@ -3998,7 +4064,7 @@ static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv, rpriv = priv->ppriv; return &rpriv->tc_ht; } else /* NIC offload */ - return &priv->fs.tc.ht; + return &priv->fs->tc->ht; } static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow) @@ -4717,11 +4783,11 @@ static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv, peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id); - mutex_lock(&priv->fs.tc.hairpin_tbl_lock); - hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist) + mutex_lock(&priv->fs->tc->hairpin_tbl_lock); + hash_for_each(priv->fs->tc->hairpin_tbl, bkt, hpe, hairpin_hlist) if (refcount_inc_not_zero(&hpe->refcnt)) list_add(&hpe->dead_peer_wait_list, &init_wait_list); - mutex_unlock(&priv->fs.tc.hairpin_tbl_lock); + mutex_unlock(&priv->fs->tc->hairpin_tbl_lock); list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) { wait_for_completion(&hpe->res_ready); @@ -4736,7 +4802,6 @@ static int mlx5e_tc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *ndev = netdev_notifier_info_to_dev(ptr); - struct mlx5e_flow_steering *fs; struct mlx5e_priv *peer_priv; struct mlx5e_tc_table *tc; struct mlx5e_priv *priv; @@ -4747,8 +4812,7 @@ static int mlx5e_tc_netdev_event(struct notifier_block *this, return NOTIFY_DONE; tc = container_of(this, struct mlx5e_tc_table, netdevice_nb); - fs = container_of(tc, struct mlx5e_flow_steering, tc); - priv = container_of(fs, struct mlx5e_priv, fs); + priv = tc->priv; peer_priv = netdev_priv(ndev); if (priv == peer_priv || !(priv->netdev->features & NETIF_F_HW_TC)) @@ -4777,7 +4841,7 @@ static int mlx5e_tc_nic_get_ft_size(struct mlx5_core_dev *dev) static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv) { - struct mlx5_flow_table **ft = &priv->fs.tc.miss_t; + struct mlx5_flow_table **ft = &priv->fs->tc->miss_t; struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_flow_namespace *ns; int err = 0; @@ -4799,12 +4863,12 @@ static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv) static void mlx5e_tc_nic_destroy_miss_table(struct mlx5e_priv *priv) { - mlx5_destroy_flow_table(priv->fs.tc.miss_t); + mlx5_destroy_flow_table(priv->fs->tc->miss_t); } int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { - struct mlx5e_tc_table *tc = &priv->fs.tc; + struct mlx5e_tc_table *tc = priv->fs->tc; struct mlx5_core_dev *dev = priv->mdev; struct mapping_ctx *chains_mapping; struct mlx5_chains_attr attr = {}; @@ -4815,6 +4879,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv) mutex_init(&tc->t_lock); mutex_init(&tc->hairpin_tbl_lock); hash_init(tc->hairpin_tbl); + tc->priv = priv; err = rhashtable_init(&tc->ht, &tc_ht_params); if (err) @@ -4844,7 +4909,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv) attr.ns = MLX5_FLOW_NAMESPACE_KERNEL; attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev); attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS; - attr.default_ft = priv->fs.tc.miss_t; + attr.default_ft = priv->fs->tc->miss_t; attr.mapping = chains_mapping; tc->chains = mlx5_chains_create(dev, &attr); @@ -4854,7 +4919,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv) } tc->post_act = mlx5e_tc_post_act_init(priv, tc->chains, MLX5_FLOW_NAMESPACE_KERNEL); - tc->ct = mlx5_tc_ct_init(priv, tc->chains, &priv->fs.tc.mod_hdr, + tc->ct = mlx5_tc_ct_init(priv, tc->chains, &tc->mod_hdr, MLX5_FLOW_NAMESPACE_KERNEL, tc->post_act); tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event; @@ -4893,7 +4958,7 @@ static void _mlx5e_tc_del_flow(void *ptr, void *arg) void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) { - struct mlx5e_tc_table *tc = &priv->fs.tc; + struct mlx5e_tc_table *tc = priv->fs->tc; if (tc->netdevice_nb.notifier_call) unregister_netdevice_notifier_dev_net(priv->netdev, @@ -5098,7 +5163,7 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) u32 chain = 0, chain_tag, reg_b, zone_restore_id; struct mlx5e_priv *priv = netdev_priv(skb->dev); - struct mlx5e_tc_table *tc = &priv->fs.tc; + struct mlx5e_tc_table *tc = priv->fs->tc; struct mlx5_mapped_obj mapped_obj; struct tc_skb_ext *tc_skb_ext; int err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index 517f2252b5ff..6ce1ab6b86b7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -356,6 +356,8 @@ mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) #endif #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) +struct mlx5e_tc_table *mlx5e_tc_table_alloc(void); +void mlx5e_tc_table_free(struct mlx5e_tc_table *tc); static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe) { #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) @@ -376,6 +378,8 @@ static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe) bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb); #else /* CONFIG_MLX5_CLS_ACT */ +static inline struct mlx5e_tc_table *mlx5e_tc_table_alloc(void) { return NULL; } +static inline void mlx5e_tc_table_free(struct mlx5e_tc_table *tc) {} static inline bool mlx5e_cqe_regb_chain(struct mlx5_cqe64 *cqe) { return false; } static inline bool diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index dc1e01e93d5a..27f791feb517 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -152,7 +152,7 @@ mlx5e_tx_get_gso_ihs(struct mlx5e_txqsq *sq, struct sk_buff *skb, int *hopbyhop) *hopbyhop = 0; if (skb->encapsulation) { - ihs = skb_tcp_all_headers(skb); + ihs = skb_inner_tcp_all_headers(skb); stats->tso_inner_packets++; stats->tso_inner_bytes += skb->len - ihs; } else { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 0a99a020a3b2..c02b7b08fb4c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -322,10 +322,10 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) { int err; - priv->fs.ns = mlx5_get_flow_namespace(priv->mdev, + priv->fs->ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL); - if (!priv->fs.ns) + if (!priv->fs->ns) return -EINVAL; err = mlx5e_arfs_create_tables(priv); @@ -364,9 +364,18 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv) struct mlx5_core_dev *mdev = priv->mdev; int err; - priv->rx_res = mlx5e_rx_res_alloc(); - if (!priv->rx_res) + priv->fs = mlx5e_fs_init(priv->profile, mdev, + !test_bit(MLX5E_STATE_DESTROYING, &priv->state)); + if (!priv->fs) { + netdev_err(priv->netdev, "FS allocation failed\n"); return -ENOMEM; + } + + priv->rx_res = mlx5e_rx_res_alloc(); + if (!priv->rx_res) { + err = -ENOMEM; + goto err_free_fs; + } mlx5e_create_q_counters(priv); @@ -397,6 +406,8 @@ err_destroy_q_counters: mlx5e_destroy_q_counters(priv); mlx5e_rx_res_free(priv->rx_res); priv->rx_res = NULL; +err_free_fs: + mlx5e_fs_cleanup(priv->fs); return err; } @@ -408,6 +419,7 @@ static void mlx5i_cleanup_rx(struct mlx5e_priv *priv) mlx5e_destroy_q_counters(priv); mlx5e_rx_res_free(priv->rx_res); priv->rx_res = NULL; + mlx5e_fs_cleanup(priv->fs); } /* The stats groups order is opposite to the update_stats() order calls */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c index 1383550f44c1..b1dfad274a39 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c @@ -22,6 +22,7 @@ enum dr_action_valid_state { DR_ACTION_STATE_PUSH_VLAN, DR_ACTION_STATE_NON_TERM, DR_ACTION_STATE_TERM, + DR_ACTION_STATE_ASO, DR_ACTION_STATE_MAX, }; @@ -42,6 +43,7 @@ static const char * const action_type_to_str[] = { [DR_ACTION_TYP_SAMPLER] = "DR_ACTION_TYP_SAMPLER", [DR_ACTION_TYP_INSERT_HDR] = "DR_ACTION_TYP_INSERT_HDR", [DR_ACTION_TYP_REMOVE_HDR] = "DR_ACTION_TYP_REMOVE_HDR", + [DR_ACTION_TYP_ASO_FLOW_METER] = "DR_ACTION_TYP_ASO_FLOW_METER", [DR_ACTION_TYP_MAX] = "DR_ACTION_UNKNOWN", }; @@ -71,6 +73,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO, }, [DR_ACTION_STATE_DECAP] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, @@ -85,6 +88,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO, }, [DR_ACTION_STATE_ENCAP] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, @@ -93,6 +97,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_TAG] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO, }, [DR_ACTION_STATE_MODIFY_HDR] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, @@ -105,6 +110,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO, }, [DR_ACTION_STATE_POP_VLAN] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, @@ -118,6 +124,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO, }, [DR_ACTION_STATE_PUSH_VLAN] = { [DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM, @@ -128,6 +135,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO, }, [DR_ACTION_STATE_NON_TERM] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, @@ -145,6 +153,13 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO, + }, + [DR_ACTION_STATE_ASO] = { + [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ASO, }, [DR_ACTION_STATE_TERM] = { [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM, @@ -163,18 +178,21 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO, }, [DR_ACTION_STATE_DECAP] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP, + [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO, }, [DR_ACTION_STATE_ENCAP] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO, }, [DR_ACTION_STATE_MODIFY_HDR] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, @@ -185,6 +203,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO, }, [DR_ACTION_STATE_POP_VLAN] = { [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, @@ -196,6 +215,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO, }, [DR_ACTION_STATE_PUSH_VLAN] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, @@ -206,6 +226,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO, }, [DR_ACTION_STATE_NON_TERM] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, @@ -219,6 +240,16 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO, + }, + [DR_ACTION_STATE_ASO] = { + [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ASO, + [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, }, [DR_ACTION_STATE_TERM] = { [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM, @@ -240,6 +271,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO, }, [DR_ACTION_STATE_DECAP] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, @@ -253,6 +285,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO, }, [DR_ACTION_STATE_ENCAP] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, @@ -261,6 +294,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO, }, [DR_ACTION_STATE_MODIFY_HDR] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, @@ -272,6 +306,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO, }, [DR_ACTION_STATE_POP_VLAN] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, @@ -284,6 +319,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO, }, [DR_ACTION_STATE_PUSH_VLAN] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, @@ -296,6 +332,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO, }, [DR_ACTION_STATE_NON_TERM] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, @@ -312,6 +349,13 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO, + }, + [DR_ACTION_STATE_ASO] = { + [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ASO, }, [DR_ACTION_STATE_TERM] = { [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM, @@ -331,6 +375,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO, }, [DR_ACTION_STATE_DECAP] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, @@ -338,6 +383,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP, [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO, }, [DR_ACTION_STATE_ENCAP] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, @@ -345,6 +391,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO, }, [DR_ACTION_STATE_MODIFY_HDR] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, @@ -356,6 +403,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO, }, [DR_ACTION_STATE_POP_VLAN] = { [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, @@ -368,6 +416,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO, }, [DR_ACTION_STATE_PUSH_VLAN] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, @@ -379,6 +428,7 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO, }, [DR_ACTION_STATE_NON_TERM] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, @@ -393,6 +443,17 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_ASO_FLOW_METER] = DR_ACTION_STATE_ASO, + }, + [DR_ACTION_STATE_ASO] = { + [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_ASO, }, [DR_ACTION_STATE_TERM] = { [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM, @@ -738,6 +799,12 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, attr.reformat.param_0 = action->reformat->param_0; attr.reformat.param_1 = action->reformat->param_1; break; + case DR_ACTION_TYP_ASO_FLOW_METER: + attr.aso_flow_meter.obj_id = action->aso->obj_id; + attr.aso_flow_meter.offset = action->aso->offset; + attr.aso_flow_meter.dest_reg_id = action->aso->dest_reg_id; + attr.aso_flow_meter.init_color = action->aso->init_color; + break; default: mlx5dr_err(dmn, "Unsupported action type %d\n", action_type); return -EINVAL; @@ -798,6 +865,7 @@ static unsigned int action_size[DR_ACTION_TYP_MAX] = { [DR_ACTION_TYP_INSERT_HDR] = sizeof(struct mlx5dr_action_reformat), [DR_ACTION_TYP_REMOVE_HDR] = sizeof(struct mlx5dr_action_reformat), [DR_ACTION_TYP_SAMPLER] = sizeof(struct mlx5dr_action_sampler), + [DR_ACTION_TYP_ASO_FLOW_METER] = sizeof(struct mlx5dr_action_aso_flow_meter), }; static struct mlx5dr_action * @@ -1830,6 +1898,34 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *dmn, return action; } +struct mlx5dr_action * +mlx5dr_action_create_aso(struct mlx5dr_domain *dmn, u32 obj_id, + u8 dest_reg_id, u8 aso_type, + u8 init_color, u8 meter_id) +{ + struct mlx5dr_action *action; + + if (aso_type != MLX5_EXE_ASO_FLOW_METER) + return NULL; + + if (init_color > MLX5_FLOW_METER_COLOR_UNDEFINED) + return NULL; + + action = dr_action_create_generic(DR_ACTION_TYP_ASO_FLOW_METER); + if (!action) + return NULL; + + action->aso->obj_id = obj_id; + action->aso->offset = meter_id; + action->aso->dest_reg_id = dest_reg_id; + action->aso->init_color = init_color; + action->aso->dmn = dmn; + + refcount_inc(&dmn->refcount); + + return action; +} + int mlx5dr_action_destroy(struct mlx5dr_action *action) { if (WARN_ON_ONCE(refcount_read(&action->refcount) > 1)) @@ -1881,6 +1977,9 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action) case DR_ACTION_TYP_SAMPLER: refcount_dec(&action->sampler->dmn->refcount); break; + case DR_ACTION_TYP_ASO_FLOW_METER: + refcount_dec(&action->aso->dmn->refcount); + break; default: break; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c index fcb962c6db2e..ee677a5c76be 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c @@ -89,6 +89,7 @@ enum dr_ste_v1_action_id { DR_STE_V1_ACTION_ID_QUEUE_ID_SEL = 0x0d, DR_STE_V1_ACTION_ID_ACCELERATED_LIST = 0x0e, DR_STE_V1_ACTION_ID_MODIFY_LIST = 0x0f, + DR_STE_V1_ACTION_ID_ASO = 0x12, DR_STE_V1_ACTION_ID_TRAILER = 0x13, DR_STE_V1_ACTION_ID_COUNTER_ID = 0x14, DR_STE_V1_ACTION_ID_MAX = 0x21, @@ -129,6 +130,10 @@ enum { DR_STE_V1_ACTION_MDFY_FLD_REGISTER_0_1 = 0x91, }; +enum dr_ste_v1_aso_ctx_type { + DR_STE_V1_ASO_CTX_TYPE_POLICERS = 0x2, +}; + static const struct mlx5dr_ste_action_modify_field dr_ste_v1_action_modify_field_arr[] = { [MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16] = { .hw_field = DR_STE_V1_ACTION_MDFY_FLD_SRC_L2_OUT_0, .start = 0, .end = 31, @@ -494,6 +499,27 @@ static void dr_ste_v1_set_rewrite_actions(u8 *hw_ste_p, dr_ste_v1_set_reparse(hw_ste_p); } +static void dr_ste_v1_set_aso_flow_meter(u8 *d_action, + u32 object_id, + u32 offset, + u8 dest_reg_id, + u8 init_color) +{ + MLX5_SET(ste_double_action_aso_v1, d_action, action_id, + DR_STE_V1_ACTION_ID_ASO); + MLX5_SET(ste_double_action_aso_v1, d_action, aso_context_number, + object_id + (offset / MLX5DR_ASO_FLOW_METER_NUM_PER_OBJ)); + /* Convert reg_c index to HW 64bit index */ + MLX5_SET(ste_double_action_aso_v1, d_action, dest_reg_id, + (dest_reg_id - 1) / 2); + MLX5_SET(ste_double_action_aso_v1, d_action, aso_context_type, + DR_STE_V1_ASO_CTX_TYPE_POLICERS); + MLX5_SET(ste_double_action_aso_v1, d_action, flow_meter.line_id, + offset % MLX5DR_ASO_FLOW_METER_NUM_PER_OBJ); + MLX5_SET(ste_double_action_aso_v1, d_action, flow_meter.initial_color, + init_color); +} + static void dr_ste_v1_arr_init_next_match(u8 **last_ste, u32 *added_stes, u16 gvmi) @@ -629,6 +655,21 @@ void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, action += DR_STE_ACTION_SINGLE_SZ; } + if (action_type_set[DR_ACTION_TYP_ASO_FLOW_METER]) { + if (action_sz < DR_STE_ACTION_DOUBLE_SZ) { + dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi); + action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action); + action_sz = DR_STE_ACTION_TRIPLE_SZ; + } + dr_ste_v1_set_aso_flow_meter(action, + attr->aso_flow_meter.obj_id, + attr->aso_flow_meter.offset, + attr->aso_flow_meter.dest_reg_id, + attr->aso_flow_meter.init_color); + action_sz -= DR_STE_ACTION_DOUBLE_SZ; + action += DR_STE_ACTION_DOUBLE_SZ; + } + dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi); dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1); } @@ -802,6 +843,21 @@ void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, action += DR_STE_ACTION_SINGLE_SZ; } + if (action_type_set[DR_ACTION_TYP_ASO_FLOW_METER]) { + if (action_sz < DR_STE_ACTION_DOUBLE_SZ) { + dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi); + action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action); + action_sz = DR_STE_ACTION_TRIPLE_SZ; + } + dr_ste_v1_set_aso_flow_meter(action, + attr->aso_flow_meter.obj_id, + attr->aso_flow_meter.offset, + attr->aso_flow_meter.dest_reg_id, + attr->aso_flow_meter.init_color); + action_sz -= DR_STE_ACTION_DOUBLE_SZ; + action += DR_STE_ACTION_DOUBLE_SZ; + } + dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi); dr_ste_v1_set_hit_addr(last_ste, attr->final_icm_addr, 1); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 98320e3945ad..feed227944b7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -127,6 +127,7 @@ enum mlx5dr_action_type { DR_ACTION_TYP_INSERT_HDR, DR_ACTION_TYP_REMOVE_HDR, DR_ACTION_TYP_SAMPLER, + DR_ACTION_TYP_ASO_FLOW_METER, DR_ACTION_TYP_MAX, }; @@ -271,6 +272,13 @@ struct mlx5dr_ste_actions_attr { int count; u32 headers[MLX5DR_MAX_VLANS]; } vlans; + + struct { + u32 obj_id; + u32 offset; + u8 dest_reg_id; + u8 init_color; + } aso_flow_meter; }; void mlx5dr_ste_set_actions_rx(struct mlx5dr_ste_ctx *ste_ctx, @@ -1035,6 +1043,14 @@ struct mlx5dr_rule_action_member { struct list_head list; }; +struct mlx5dr_action_aso_flow_meter { + struct mlx5dr_domain *dmn; + u32 obj_id; + u32 offset; + u8 dest_reg_id; + u8 init_color; +}; + struct mlx5dr_action { enum mlx5dr_action_type action_type; refcount_t refcount; @@ -1049,6 +1065,7 @@ struct mlx5dr_action { struct mlx5dr_action_vport *vport; struct mlx5dr_action_push_vlan *push_vlan; struct mlx5dr_action_flow_tag *flow_tag; + struct mlx5dr_action_aso_flow_meter *aso; }; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c index 6a9abba92df6..75672663359d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c @@ -500,6 +500,27 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, } } + if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) { + if (fte->action.exe_aso.type != MLX5_EXE_ASO_FLOW_METER) { + err = -EOPNOTSUPP; + goto free_actions; + } + + tmp_action = + mlx5dr_action_create_aso(domain, + fte->action.exe_aso.object_id, + fte->action.exe_aso.return_reg_id, + fte->action.exe_aso.type, + fte->action.exe_aso.flow_meter.init_color, + fte->action.exe_aso.flow_meter.meter_idx); + if (!tmp_action) { + err = -ENOMEM; + goto free_actions; + } + fs_dr_actions[fs_dr_num_actions++] = tmp_action; + actions[num_actions++] = tmp_action; + } + params.match_sz = match_sz; params.match_buf = (u64 *)fte->val; if (num_term_actions == 1) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h index 9604b2091358..fb078fa0f0cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h @@ -574,4 +574,30 @@ struct mlx5_ifc_dr_action_hw_copy_bits { u8 reserved_at_38[0x8]; }; +enum { + MLX5DR_ASO_FLOW_METER_NUM_PER_OBJ = 2, +}; + +struct mlx5_ifc_ste_aso_flow_meter_action_bits { + u8 reserved_at_0[0xc]; + u8 action[0x1]; + u8 initial_color[0x2]; + u8 line_id[0x1]; +}; + +struct mlx5_ifc_ste_double_action_aso_v1_bits { + u8 action_id[0x8]; + u8 aso_context_number[0x18]; + + u8 dest_reg_id[0x2]; + u8 change_ordering_tag[0x1]; + u8 aso_check_ordering[0x1]; + u8 aso_context_type[0x4]; + u8 reserved_at_28[0x8]; + union { + u8 aso_fields[0x10]; + struct mlx5_ifc_ste_aso_flow_meter_action_bits flow_meter; + }; +}; + #endif /* MLX5_IFC_DR_H */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h index 7626c85643b1..ecffc2cbe6fe 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h @@ -131,6 +131,14 @@ struct mlx5dr_action *mlx5dr_action_create_pop_vlan(void); struct mlx5dr_action * mlx5dr_action_create_push_vlan(struct mlx5dr_domain *domain, __be32 vlan_hdr); +struct mlx5dr_action * +mlx5dr_action_create_aso(struct mlx5dr_domain *dmn, + u32 obj_id, + u8 return_reg_id, + u8 aso_type, + u8 init_color, + u8 meter_id); + int mlx5dr_action_destroy(struct mlx5dr_action *action); static inline bool |