Merge branch 'net-mlx5-misc-changes-2025-09-28'

Tariq Toukan says:

====================
net/mlx5: misc changes 2025-09-28

This series contains misc enhancements to the mlx5 driver.

v1: https://lore.kernel.org/1758531671-819655-1-git-send-email-tariqt@nvidia.com
====================

Link: https://patch.msgid.link/1759094723-843774-1-git-send-email-tariqt@nvidia.com
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Jakub Kicinski
2025-09-30 17:21:17 -07:00
14 changed files with 1033 additions and 1313 deletions

View File

@@ -75,15 +75,14 @@ struct mlx5e_rss {
struct mlx5e_tir *inner_tir[MLX5E_NUM_INDIR_TIRS];
struct mlx5e_rqt rqt;
struct mlx5_core_dev *mdev; /* primary */
u32 drop_rqn;
bool inner_ft_support;
struct mlx5e_rss_params params;
bool enabled;
refcount_t refcnt;
};
bool mlx5e_rss_get_inner_ft_support(struct mlx5e_rss *rss)
{
return rss->inner_ft_support;
return rss->params.inner_ft_support;
}
void mlx5e_rss_params_indir_modify_actual_size(struct mlx5e_rss *rss, u32 num_channels)
@@ -91,7 +90,7 @@ void mlx5e_rss_params_indir_modify_actual_size(struct mlx5e_rss *rss, u32 num_ch
rss->indir.actual_table_size = mlx5e_rqt_size(rss->mdev, num_channels);
}
int mlx5e_rss_params_indir_init(struct mlx5e_rss_params_indir *indir, struct mlx5_core_dev *mdev,
int mlx5e_rss_params_indir_init(struct mlx5e_rss_params_indir *indir,
u32 actual_table_size, u32 max_table_size)
{
indir->table = kvmalloc_array(max_table_size, sizeof(*indir->table), GFP_KERNEL);
@@ -139,7 +138,8 @@ static struct mlx5e_rss *mlx5e_rss_init_copy(const struct mlx5e_rss *from)
if (!rss)
return ERR_PTR(-ENOMEM);
err = mlx5e_rss_params_indir_init(&rss->indir, from->mdev, from->indir.actual_table_size,
err = mlx5e_rss_params_indir_init(&rss->indir,
from->indir.actual_table_size,
from->indir.max_table_size);
if (err)
goto err_free_rss;
@@ -192,11 +192,12 @@ mlx5e_rss_get_tt_config(struct mlx5e_rss *rss, enum mlx5_traffic_types tt)
return rss_tt;
}
static int mlx5e_rss_create_tir(struct mlx5e_rss *rss,
enum mlx5_traffic_types tt,
const struct mlx5e_packet_merge_param *init_pkt_merge_param,
bool inner)
static int
mlx5e_rss_create_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
const struct mlx5e_packet_merge_param *pkt_merge_param,
bool inner)
{
bool rss_inner = rss->params.inner_ft_support;
struct mlx5e_rss_params_traffic_type rss_tt;
struct mlx5e_tir_builder *builder;
struct mlx5e_tir **tir_p;
@@ -204,7 +205,7 @@ static int mlx5e_rss_create_tir(struct mlx5e_rss *rss,
u32 rqtn;
int err;
if (inner && !rss->inner_ft_support) {
if (inner && !rss_inner) {
mlx5e_rss_warn(rss->mdev,
"Cannot create inner indirect TIR[%d], RSS inner FT is not supported.\n",
tt);
@@ -227,8 +228,8 @@ static int mlx5e_rss_create_tir(struct mlx5e_rss *rss,
rqtn = mlx5e_rqt_get_rqtn(&rss->rqt);
mlx5e_tir_builder_build_rqt(builder, rss->mdev->mlx5e_res.hw_objs.td.tdn,
rqtn, rss->inner_ft_support);
mlx5e_tir_builder_build_packet_merge(builder, init_pkt_merge_param);
rqtn, rss_inner);
mlx5e_tir_builder_build_packet_merge(builder, pkt_merge_param);
rss_tt = mlx5e_rss_get_tt_config(rss, tt);
mlx5e_tir_builder_build_rss(builder, &rss->hash, &rss_tt, inner);
@@ -264,15 +265,16 @@ static void mlx5e_rss_destroy_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types
*tir_p = NULL;
}
static int mlx5e_rss_create_tirs(struct mlx5e_rss *rss,
const struct mlx5e_packet_merge_param *init_pkt_merge_param,
bool inner)
static int
mlx5e_rss_create_tirs(struct mlx5e_rss *rss,
const struct mlx5e_packet_merge_param *pkt_merge_param,
bool inner)
{
enum mlx5_traffic_types tt, max_tt;
int err;
for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
err = mlx5e_rss_create_tir(rss, tt, init_pkt_merge_param, inner);
err = mlx5e_rss_create_tir(rss, tt, pkt_merge_param, inner);
if (err)
goto err_destroy_tirs;
}
@@ -335,7 +337,7 @@ static int mlx5e_rss_update_tirs(struct mlx5e_rss *rss)
tt, err);
}
if (!rss->inner_ft_support)
if (!rss->params.inner_ft_support)
continue;
err = mlx5e_rss_update_tir(rss, tt, true);
@@ -355,14 +357,16 @@ static int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss)
refcount_set(&rss->refcnt, 1);
return mlx5e_rqt_init_direct(&rss->rqt, rss->mdev, true,
rss->drop_rqn, rss->indir.max_table_size);
rss->params.drop_rqn,
rss->indir.max_table_size);
}
struct mlx5e_rss *mlx5e_rss_init(struct mlx5_core_dev *mdev, bool inner_ft_support, u32 drop_rqn,
const struct mlx5e_packet_merge_param *init_pkt_merge_param,
enum mlx5e_rss_init_type type, unsigned int nch,
unsigned int max_nch)
struct mlx5e_rss *
mlx5e_rss_init(struct mlx5_core_dev *mdev,
const struct mlx5e_rss_params *params,
const struct mlx5e_rss_init_params *init_params)
{
u32 rqt_max_size, rqt_size;
struct mlx5e_rss *rss;
int err;
@@ -370,29 +374,31 @@ struct mlx5e_rss *mlx5e_rss_init(struct mlx5_core_dev *mdev, bool inner_ft_suppo
if (!rss)
return ERR_PTR(-ENOMEM);
err = mlx5e_rss_params_indir_init(&rss->indir, mdev,
mlx5e_rqt_size(mdev, nch),
mlx5e_rqt_size(mdev, max_nch));
rqt_size = mlx5e_rqt_size(mdev, init_params->nch);
rqt_max_size = mlx5e_rqt_size(mdev, init_params->max_nch);
err = mlx5e_rss_params_indir_init(&rss->indir, rqt_size, rqt_max_size);
if (err)
goto err_free_rss;
rss->mdev = mdev;
rss->inner_ft_support = inner_ft_support;
rss->drop_rqn = drop_rqn;
rss->params = *params;
err = mlx5e_rss_init_no_tirs(rss);
if (err)
goto err_free_indir;
if (type == MLX5E_RSS_INIT_NO_TIRS)
if (init_params->type == MLX5E_RSS_INIT_NO_TIRS)
goto out;
err = mlx5e_rss_create_tirs(rss, init_pkt_merge_param, false);
err = mlx5e_rss_create_tirs(rss, init_params->pkt_merge_param,
false);
if (err)
goto err_destroy_rqt;
if (inner_ft_support) {
err = mlx5e_rss_create_tirs(rss, init_pkt_merge_param, true);
if (params->inner_ft_support) {
err = mlx5e_rss_create_tirs(rss,
init_params->pkt_merge_param,
true);
if (err)
goto err_destroy_tirs;
}
@@ -418,7 +424,7 @@ int mlx5e_rss_cleanup(struct mlx5e_rss *rss)
mlx5e_rss_destroy_tirs(rss, false);
if (rss->inner_ft_support)
if (rss->params.inner_ft_support)
mlx5e_rss_destroy_tirs(rss, true);
mlx5e_rqt_destroy(&rss->rqt);
@@ -448,7 +454,7 @@ u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
{
struct mlx5e_tir *tir;
WARN_ON(inner && !rss->inner_ft_support);
WARN_ON(inner && !rss->params.inner_ft_support);
tir = rss_get_tir(rss, tt, inner);
WARN_ON(!tir);
@@ -468,10 +474,10 @@ bool mlx5e_rss_valid_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, bool
/* Fill the "tirn" output parameter.
* Create the requested TIR if it's its first usage.
*/
int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
enum mlx5_traffic_types tt,
const struct mlx5e_packet_merge_param *init_pkt_merge_param,
bool inner, u32 *tirn)
int
mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
const struct mlx5e_packet_merge_param *pkt_merge_param,
bool inner, u32 *tirn)
{
struct mlx5e_tir *tir;
@@ -479,7 +485,7 @@ int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
if (!tir) { /* TIR doesn't exist, create one */
int err;
err = mlx5e_rss_create_tir(rss, tt, init_pkt_merge_param, inner);
err = mlx5e_rss_create_tir(rss, tt, pkt_merge_param, inner);
if (err)
return err;
tir = rss_get_tir(rss, tt, inner);
@@ -512,10 +518,11 @@ void mlx5e_rss_disable(struct mlx5e_rss *rss)
int err;
rss->enabled = false;
err = mlx5e_rqt_redirect_direct(&rss->rqt, rss->drop_rqn, NULL);
err = mlx5e_rqt_redirect_direct(&rss->rqt, rss->params.drop_rqn, NULL);
if (err)
mlx5e_rss_warn(rss->mdev, "Failed to redirect RQT %#x to drop RQ %#x: err = %d\n",
mlx5e_rqt_get_rqtn(&rss->rqt), rss->drop_rqn, err);
mlx5e_rqt_get_rqtn(&rss->rqt),
rss->params.drop_rqn, err);
}
int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
@@ -548,7 +555,7 @@ int mlx5e_rss_packet_merge_set_param(struct mlx5e_rss *rss,
}
inner_tir:
if (!rss->inner_ft_support)
if (!rss->params.inner_ft_support)
continue;
tir = rss_get_tir(rss, tt, true);
@@ -681,7 +688,7 @@ int mlx5e_rss_set_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
return err;
}
if (!(rss->inner_ft_support))
if (!(rss->params.inner_ft_support))
return 0;
err = mlx5e_rss_update_tir(rss, tt, true);

View File

@@ -13,19 +13,31 @@ enum mlx5e_rss_init_type {
MLX5E_RSS_INIT_TIRS
};
struct mlx5e_rss_init_params {
enum mlx5e_rss_init_type type;
const struct mlx5e_packet_merge_param *pkt_merge_param;
unsigned int nch;
unsigned int max_nch;
};
struct mlx5e_rss_params {
bool inner_ft_support;
u32 drop_rqn;
};
struct mlx5e_rss_params_traffic_type
mlx5e_rss_get_default_tt_config(enum mlx5_traffic_types tt);
struct mlx5e_rss;
int mlx5e_rss_params_indir_init(struct mlx5e_rss_params_indir *indir, struct mlx5_core_dev *mdev,
int mlx5e_rss_params_indir_init(struct mlx5e_rss_params_indir *indir,
u32 actual_table_size, u32 max_table_size);
void mlx5e_rss_params_indir_cleanup(struct mlx5e_rss_params_indir *indir);
void mlx5e_rss_params_indir_modify_actual_size(struct mlx5e_rss *rss, u32 num_channels);
struct mlx5e_rss *mlx5e_rss_init(struct mlx5_core_dev *mdev, bool inner_ft_support, u32 drop_rqn,
const struct mlx5e_packet_merge_param *init_pkt_merge_param,
enum mlx5e_rss_init_type type, unsigned int nch,
unsigned int max_nch);
struct mlx5e_rss *
mlx5e_rss_init(struct mlx5_core_dev *mdev,
const struct mlx5e_rss_params *params,
const struct mlx5e_rss_init_params *init_params);
int mlx5e_rss_cleanup(struct mlx5e_rss *rss);
void mlx5e_rss_refcnt_inc(struct mlx5e_rss *rss);
@@ -37,10 +49,10 @@ u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
bool inner);
bool mlx5e_rss_valid_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, bool inner);
u32 mlx5e_rss_get_rqtn(struct mlx5e_rss *rss);
int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss,
enum mlx5_traffic_types tt,
const struct mlx5e_packet_merge_param *init_pkt_merge_param,
bool inner, u32 *tirn);
int
mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt,
const struct mlx5e_packet_merge_param *pkt_merge_param,
bool inner, u32 *tirn);
void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, u32 *vhca_ids, unsigned int num_rqns);
void mlx5e_rss_disable(struct mlx5e_rss *rss);

View File

@@ -54,17 +54,30 @@ static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res,
unsigned int init_nch)
{
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
struct mlx5e_rss_init_params init_params;
struct mlx5e_rss_params rss_params;
struct mlx5e_rss *rss;
if (WARN_ON(res->rss[0]))
return -EINVAL;
rss = mlx5e_rss_init(res->mdev, inner_ft_support, res->drop_rqn,
&res->pkt_merge_param, MLX5E_RSS_INIT_TIRS, init_nch, res->max_nch);
init_params = (struct mlx5e_rss_init_params) {
.type = MLX5E_RSS_INIT_TIRS,
.pkt_merge_param = &res->pkt_merge_param,
.nch = init_nch,
.max_nch = res->max_nch,
};
rss_params = (struct mlx5e_rss_params) {
.inner_ft_support = inner_ft_support,
.drop_rqn = res->drop_rqn,
};
rss = mlx5e_rss_init(res->mdev, &rss_params, &init_params);
if (IS_ERR(rss))
return PTR_ERR(rss);
mlx5e_rss_set_indir_uniform(rss, init_nch);
mlx5e_rss_set_indir_uniform(rss, init_params.nch);
res->rss[0] = rss;
@@ -74,18 +87,30 @@ static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res,
int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 rss_idx, unsigned int init_nch)
{
bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT;
struct mlx5e_rss_init_params init_params;
struct mlx5e_rss_params rss_params;
struct mlx5e_rss *rss;
if (WARN_ON_ONCE(res->rss[rss_idx]))
return -ENOSPC;
rss = mlx5e_rss_init(res->mdev, inner_ft_support, res->drop_rqn,
&res->pkt_merge_param, MLX5E_RSS_INIT_NO_TIRS, init_nch,
res->max_nch);
init_params = (struct mlx5e_rss_init_params) {
.type = MLX5E_RSS_INIT_NO_TIRS,
.pkt_merge_param = &res->pkt_merge_param,
.nch = init_nch,
.max_nch = res->max_nch,
};
rss_params = (struct mlx5e_rss_params) {
.inner_ft_support = inner_ft_support,
.drop_rqn = res->drop_rqn,
};
rss = mlx5e_rss_init(res->mdev, &rss_params, &init_params);
if (IS_ERR(rss))
return PTR_ERR(rss);
mlx5e_rss_set_indir_uniform(rss, init_nch);
mlx5e_rss_set_indir_uniform(rss, init_params.nch);
if (res->rss_active) {
u32 *vhca_ids = get_vhca_ids(res, 0);
@@ -438,7 +463,7 @@ static void mlx5e_rx_res_ptp_destroy(struct mlx5e_rx_res *res)
struct mlx5e_rx_res *
mlx5e_rx_res_create(struct mlx5_core_dev *mdev, enum mlx5e_rx_res_features features,
unsigned int max_nch, u32 drop_rqn,
const struct mlx5e_packet_merge_param *init_pkt_merge_param,
const struct mlx5e_packet_merge_param *pkt_merge_param,
unsigned int init_nch)
{
bool multi_vhca = features & MLX5E_RX_RES_FEATURE_MULTI_VHCA;
@@ -454,7 +479,7 @@ mlx5e_rx_res_create(struct mlx5_core_dev *mdev, enum mlx5e_rx_res_features featu
res->max_nch = max_nch;
res->drop_rqn = drop_rqn;
res->pkt_merge_param = *init_pkt_merge_param;
res->pkt_merge_param = *pkt_merge_param;
init_rwsem(&res->pkt_merge_param_sem);
err = mlx5e_rx_res_rss_init_def(res, init_nch);

View File

@@ -27,7 +27,7 @@ enum mlx5e_rx_res_features {
struct mlx5e_rx_res *
mlx5e_rx_res_create(struct mlx5_core_dev *mdev, enum mlx5e_rx_res_features features,
unsigned int max_nch, u32 drop_rqn,
const struct mlx5e_packet_merge_param *init_pkt_merge_param,
const struct mlx5e_packet_merge_param *pkt_merge_param,
unsigned int init_nch);
void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res);

View File

@@ -1494,7 +1494,8 @@ static int mlx5e_get_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *
}
static int mlx5e_rxfh_hfunc_check(struct mlx5e_priv *priv,
const struct ethtool_rxfh_param *rxfh)
const struct ethtool_rxfh_param *rxfh,
struct netlink_ext_ack *extack)
{
unsigned int count;
@@ -1504,8 +1505,10 @@ static int mlx5e_rxfh_hfunc_check(struct mlx5e_priv *priv,
unsigned int xor8_max_channels = mlx5e_rqt_max_num_channels_allowed_for_xor8();
if (count > xor8_max_channels) {
netdev_err(priv->netdev, "%s: Cannot set RSS hash function to XOR, current number of channels (%d) exceeds the maximum allowed for XOR8 RSS hfunc (%d)\n",
__func__, count, xor8_max_channels);
NL_SET_ERR_MSG_FMT_MOD(
extack,
"Number of channels (%u) exceeds the max for XOR8 RSS (%u)",
count, xor8_max_channels);
return -EINVAL;
}
}
@@ -1524,7 +1527,7 @@ static int mlx5e_set_rxfh(struct net_device *dev,
mutex_lock(&priv->state_lock);
err = mlx5e_rxfh_hfunc_check(priv, rxfh);
err = mlx5e_rxfh_hfunc_check(priv, rxfh, extack);
if (err)
goto unlock;
@@ -1550,7 +1553,7 @@ static int mlx5e_create_rxfh_context(struct net_device *dev,
mutex_lock(&priv->state_lock);
err = mlx5e_rxfh_hfunc_check(priv, rxfh);
err = mlx5e_rxfh_hfunc_check(priv, rxfh, extack);
if (err)
goto unlock;
@@ -1590,7 +1593,7 @@ static int mlx5e_modify_rxfh_context(struct net_device *dev,
mutex_lock(&priv->state_lock);
err = mlx5e_rxfh_hfunc_check(priv, rxfh);
err = mlx5e_rxfh_hfunc_check(priv, rxfh, extack);
if (err)
goto unlock;

View File

@@ -758,11 +758,11 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
struct mlx5e_priv *priv = hp->func_priv;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_rss_params_indir indir;
u32 rqt_size;
int err;
err = mlx5e_rss_params_indir_init(&indir, mdev,
mlx5e_rqt_size(mdev, hp->num_channels),
mlx5e_rqt_size(mdev, hp->num_channels));
rqt_size = mlx5e_rqt_size(mdev, hp->num_channels);
err = mlx5e_rss_params_indir_init(&indir, rqt_size, rqt_size);
if (err)
return err;

View File

@@ -971,8 +971,9 @@ esw_qos_vport_tc_enable(struct mlx5_vport *vport, enum sched_node_type type,
max_level = 1 << MLX5_CAP_QOS(vport_node->esw->dev,
log_esw_max_sched_depth);
if (new_level > max_level) {
NL_SET_ERR_MSG_MOD(extack,
"TC arbitration on leafs is not supported beyond max scheduling depth");
NL_SET_ERR_MSG_FMT_MOD(extack,
"TC arbitration on leafs is not supported beyond max depth %d",
max_level);
return -EOPNOTSUPP;
}
}
@@ -1444,8 +1445,9 @@ static int esw_qos_node_enable_tc_arbitration(struct mlx5_esw_sched_node *node,
new_level = node->level + 1;
max_level = 1 << MLX5_CAP_QOS(node->esw->dev, log_esw_max_sched_depth);
if (new_level > max_level) {
NL_SET_ERR_MSG_MOD(extack,
"TC arbitration on nodes is not supported beyond max scheduling depth");
NL_SET_ERR_MSG_FMT_MOD(extack,
"TC arbitration on nodes is not supported beyond max depth %d",
max_level);
return -EOPNOTSUPP;
}
@@ -1997,8 +1999,9 @@ mlx5_esw_qos_node_validate_set_parent(struct mlx5_esw_sched_node *node,
max_level = 1 << MLX5_CAP_QOS(node->esw->dev, log_esw_max_sched_depth);
if (new_level > max_level) {
NL_SET_ERR_MSG_MOD(extack,
"Node hierarchy depth exceeds the maximum supported level");
NL_SET_ERR_MSG_FMT_MOD(extack,
"Node hierarchy depth %d exceeds the maximum supported level %d",
new_level, max_level);
return -EOPNOTSUPP;
}

View File

@@ -3774,6 +3774,29 @@ void mlx5_eswitch_unblock_mode(struct mlx5_core_dev *dev)
up_write(&esw->mode_lock);
}
/* Returns false only when uplink netdev exists and its netns is different from
* devlink's netns. True for all others so entering switchdev mode is allowed.
*/
static bool mlx5_devlink_netdev_netns_immutable_set(struct devlink *devlink,
bool immutable)
{
struct mlx5_core_dev *mdev = devlink_priv(devlink);
struct net_device *netdev;
bool ret;
netdev = mlx5_uplink_netdev_get(mdev);
if (!netdev)
return true;
rtnl_lock();
netdev->netns_immutable = immutable;
ret = net_eq(dev_net(netdev), devlink_net(devlink));
rtnl_unlock();
mlx5_uplink_netdev_put(mdev, netdev);
return ret;
}
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
struct netlink_ext_ack *extack)
{
@@ -3816,6 +3839,14 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
esw->eswitch_operation_in_progress = true;
up_write(&esw->mode_lock);
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV &&
!mlx5_devlink_netdev_netns_immutable_set(devlink, true)) {
NL_SET_ERR_MSG_MOD(extack,
"Can't change E-Switch mode to switchdev when netdev net namespace has diverged from the devlink's.");
err = -EINVAL;
goto skip;
}
if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
esw->dev->priv.flags |= MLX5_PRIV_FLAGS_SWITCH_LEGACY;
mlx5_eswitch_disable_locked(esw);
@@ -3834,6 +3865,8 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
}
skip:
if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && err)
mlx5_devlink_netdev_netns_immutable_set(devlink, false);
down_write(&esw->mode_lock);
esw->eswitch_operation_in_progress = false;
unlock:

View File

@@ -51,9 +51,6 @@ static void hws_bwc_matcher_init_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
u8 size_log_rx, u8 size_log_tx,
struct mlx5hws_matcher_attr *attr)
{
struct mlx5hws_bwc_matcher *first_matcher =
bwc_matcher->complex_first_bwc_matcher;
memset(attr, 0, sizeof(*attr));
attr->priority = priority;
@@ -66,9 +63,6 @@ static void hws_bwc_matcher_init_attr(struct mlx5hws_bwc_matcher *bwc_matcher,
attr->size[MLX5HWS_MATCHER_SIZE_TYPE_TX].rule.num_log = size_log_tx;
attr->resizable = true;
attr->max_num_of_at_attach = MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM;
attr->isolated_matcher_end_ft_id =
first_matcher ? first_matcher->matcher->end_ft_id : 0;
}
static int
@@ -171,10 +165,16 @@ hws_bwc_matcher_move_all_simple(struct mlx5hws_bwc_matcher *bwc_matcher)
static int hws_bwc_matcher_move_all(struct mlx5hws_bwc_matcher *bwc_matcher)
{
if (!bwc_matcher->complex)
switch (bwc_matcher->matcher_type) {
case MLX5HWS_BWC_MATCHER_SIMPLE:
return hws_bwc_matcher_move_all_simple(bwc_matcher);
return mlx5hws_bwc_matcher_move_all_complex(bwc_matcher);
case MLX5HWS_BWC_MATCHER_COMPLEX_FIRST:
return mlx5hws_bwc_matcher_complex_move_first(bwc_matcher);
case MLX5HWS_BWC_MATCHER_COMPLEX_SUBMATCHER:
return mlx5hws_bwc_matcher_complex_move(bwc_matcher);
default:
return -EINVAL;
}
}
static int hws_bwc_matcher_move(struct mlx5hws_bwc_matcher *bwc_matcher)
@@ -249,6 +249,7 @@ int mlx5hws_bwc_matcher_create_simple(struct mlx5hws_bwc_matcher *bwc_matcher,
bwc_matcher->tx_size.size_log,
&attr);
bwc_matcher->matcher_type = MLX5HWS_BWC_MATCHER_SIMPLE;
bwc_matcher->priority = priority;
bwc_matcher->size_of_at_array = MLX5HWS_BWC_MATCHER_ATTACH_AT_NUM;
@@ -393,7 +394,7 @@ int mlx5hws_bwc_matcher_destroy(struct mlx5hws_bwc_matcher *bwc_matcher)
"BWC matcher destroy: matcher still has %u RX and %u TX rules\n",
rx_rules, tx_rules);
if (bwc_matcher->complex)
if (bwc_matcher->matcher_type == MLX5HWS_BWC_MATCHER_COMPLEX_FIRST)
mlx5hws_bwc_matcher_destroy_complex(bwc_matcher);
else
mlx5hws_bwc_matcher_destroy_simple(bwc_matcher);
@@ -651,7 +652,8 @@ int mlx5hws_bwc_rule_destroy_simple(struct mlx5hws_bwc_rule *bwc_rule)
int mlx5hws_bwc_rule_destroy(struct mlx5hws_bwc_rule *bwc_rule)
{
bool is_complex = !!bwc_rule->bwc_matcher->complex;
bool is_complex = bwc_rule->bwc_matcher->matcher_type ==
MLX5HWS_BWC_MATCHER_COMPLEX_FIRST;
int ret = 0;
if (is_complex)
@@ -1147,7 +1149,7 @@ mlx5hws_bwc_rule_create(struct mlx5hws_bwc_matcher *bwc_matcher,
bwc_queue_idx = hws_bwc_gen_queue_idx(ctx);
if (bwc_matcher->complex)
if (bwc_matcher->matcher_type == MLX5HWS_BWC_MATCHER_COMPLEX_FIRST)
ret = mlx5hws_bwc_rule_create_complex(bwc_rule,
params,
flow_source,
@@ -1216,10 +1218,9 @@ int mlx5hws_bwc_rule_action_update(struct mlx5hws_bwc_rule *bwc_rule,
return -EINVAL;
}
/* For complex rule, the update should happen on the second matcher */
if (bwc_rule->isolated_bwc_rule)
return hws_bwc_rule_action_update(bwc_rule->isolated_bwc_rule,
rule_actions);
else
return hws_bwc_rule_action_update(bwc_rule, rule_actions);
/* For complex rules, the update should happen on the last subrule. */
while (bwc_rule->next_subrule)
bwc_rule = bwc_rule->next_subrule;
return hws_bwc_rule_action_update(bwc_rule, rule_actions);
}

View File

@@ -18,6 +18,21 @@
#define MLX5HWS_BWC_POLLING_TIMEOUT 60
enum mlx5hws_bwc_matcher_type {
/* Standalone bwc matcher. */
MLX5HWS_BWC_MATCHER_SIMPLE,
/* The first matcher of a complex matcher. When rules are inserted into
* a matcher of this type, they are split into subrules and inserted
* into their corresponding submatchers.
*/
MLX5HWS_BWC_MATCHER_COMPLEX_FIRST,
/* A submatcher that is part of a complex matcher. For most purposes
* these are treated as simple matchers, except when it comes to moving
* rules during resize.
*/
MLX5HWS_BWC_MATCHER_COMPLEX_SUBMATCHER,
};
struct mlx5hws_bwc_matcher_complex_data;
struct mlx5hws_bwc_matcher_size {
@@ -31,9 +46,9 @@ struct mlx5hws_bwc_matcher {
struct mlx5hws_match_template *mt;
struct mlx5hws_action_template **at;
struct mlx5hws_bwc_matcher_complex_data *complex;
struct mlx5hws_bwc_matcher *complex_first_bwc_matcher;
u8 num_of_at;
u8 size_of_at_array;
enum mlx5hws_bwc_matcher_type matcher_type;
u32 priority;
struct mlx5hws_bwc_matcher_size rx_size;
struct mlx5hws_bwc_matcher_size tx_size;
@@ -43,8 +58,8 @@ struct mlx5hws_bwc_matcher {
struct mlx5hws_bwc_rule {
struct mlx5hws_bwc_matcher *bwc_matcher;
struct mlx5hws_rule *rule;
struct mlx5hws_bwc_rule *isolated_bwc_rule;
struct mlx5hws_bwc_complex_rule_hash_node *complex_hash_node;
struct mlx5hws_bwc_rule *next_subrule;
struct mlx5hws_bwc_complex_subrule_data *subrule_data;
u32 flow_source;
u16 bwc_queue_idx;
bool skip_rx;

View File

@@ -4,25 +4,60 @@
#ifndef HWS_BWC_COMPLEX_H_
#define HWS_BWC_COMPLEX_H_
struct mlx5hws_bwc_complex_rule_hash_node {
u32 match_buf[MLX5_ST_SZ_DW_MATCH_PARAM];
u32 tag;
#define MLX5HWS_BWC_COMPLEX_MAX_SUBMATCHERS 4
/* A matcher can't contain two rules with the same match tag, but it is possible
* that two different complex rules' subrules have the same match tag. In that
* case, those subrules correspond to a single rule, and we need to refcount.
*/
struct mlx5hws_bwc_complex_subrule_data {
struct mlx5hws_rule_match_tag match_tag;
refcount_t refcount;
bool rtc_valid;
/* The chain_id is what glues individual subrules into larger complex
* rules. It is the value that this subrule writes to register C6, and
* that the next subrule matches against.
*/
u32 chain_id;
u32 rtc_0;
u32 rtc_1;
/* During rehash we iterate through all the subrules to move them. But
* two or more subrules can share the same physical rule in the
* submatcher, so we use `was_moved` to keep track if a given rule was
* already moved.
*/
bool was_moved;
struct rhash_head hash_node;
};
struct mlx5hws_bwc_complex_submatcher {
/* Isolated table that the matcher lives in. Not set for the first
* matcher, which lives in the original table.
*/
struct mlx5hws_table *tbl;
/* Match a rule with this action to go to `tbl`. This is set in all
* submatchers but the first.
*/
struct mlx5hws_action *action_tbl;
/* This submatcher's simple matcher. The first submatcher points to the
* outer (complex) matcher.
*/
struct mlx5hws_bwc_matcher *bwc_matcher;
struct rhashtable rules_hash;
struct ida chain_ida;
struct mutex hash_lock; /* Protect the hash and ida. */
};
struct mlx5hws_bwc_matcher_complex_data {
struct mlx5hws_table *isolated_tbl;
struct mlx5hws_bwc_matcher *isolated_bwc_matcher;
struct mlx5hws_bwc_complex_submatcher
submatchers[MLX5HWS_BWC_COMPLEX_MAX_SUBMATCHERS];
int num_submatchers;
/* Actions used by all but the last submatcher to point to the next
* submatcher in the chain. The last submatcher uses the action template
* from the complex matcher, to perform the actions that the user
* originally requested.
*/
struct mlx5hws_action *action_metadata;
struct mlx5hws_action *action_go_to_tbl;
struct mlx5hws_action *action_last;
struct rhashtable refcount_hash;
struct mutex hash_lock; /* Protect the refcount rhashtable */
struct ida metadata_ida;
};
bool mlx5hws_bwc_match_params_is_complex(struct mlx5hws_context *ctx,
@@ -37,7 +72,10 @@ int mlx5hws_bwc_matcher_create_complex(struct mlx5hws_bwc_matcher *bwc_matcher,
void mlx5hws_bwc_matcher_destroy_complex(struct mlx5hws_bwc_matcher *bwc_matcher);
int mlx5hws_bwc_matcher_move_all_complex(struct mlx5hws_bwc_matcher *bwc_matcher);
int mlx5hws_bwc_matcher_complex_move(struct mlx5hws_bwc_matcher *bwc_matcher);
int
mlx5hws_bwc_matcher_complex_move_first(struct mlx5hws_bwc_matcher *bwc_matcher);
int mlx5hws_bwc_rule_create_complex(struct mlx5hws_bwc_rule *bwc_rule,
struct mlx5hws_match_parameters *params,

View File

@@ -1831,80 +1831,6 @@ err_free_fc:
return ret;
}
struct mlx5hws_definer_fc *
mlx5hws_definer_conv_match_params_to_compressed_fc(struct mlx5hws_context *ctx,
u8 match_criteria_enable,
u32 *match_param,
int *fc_sz)
{
struct mlx5hws_definer_fc *compressed_fc = NULL;
struct mlx5hws_definer_conv_data cd = {0};
struct mlx5hws_definer_fc *fc;
int ret;
fc = hws_definer_alloc_fc(ctx, MLX5HWS_DEFINER_FNAME_MAX);
if (!fc)
return NULL;
cd.fc = fc;
cd.ctx = ctx;
if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_OUTER) {
ret = hws_definer_conv_outer(&cd, match_param);
if (ret)
goto err_free_fc;
}
if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_INNER) {
ret = hws_definer_conv_inner(&cd, match_param);
if (ret)
goto err_free_fc;
}
if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC) {
ret = hws_definer_conv_misc(&cd, match_param);
if (ret)
goto err_free_fc;
}
if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC2) {
ret = hws_definer_conv_misc2(&cd, match_param);
if (ret)
goto err_free_fc;
}
if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC3) {
ret = hws_definer_conv_misc3(&cd, match_param);
if (ret)
goto err_free_fc;
}
if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC4) {
ret = hws_definer_conv_misc4(&cd, match_param);
if (ret)
goto err_free_fc;
}
if (match_criteria_enable & MLX5HWS_DEFINER_MATCH_CRITERIA_MISC5) {
ret = hws_definer_conv_misc5(&cd, match_param);
if (ret)
goto err_free_fc;
}
/* Allocate fc array on mt */
compressed_fc = hws_definer_alloc_compressed_fc(fc);
if (!compressed_fc) {
mlx5hws_err(ctx,
"Convert to compressed fc: failed to set field copy to match template\n");
goto err_free_fc;
}
*fc_sz = hws_definer_get_fc_size(fc);
err_free_fc:
kfree(fc);
return compressed_fc;
}
static int
hws_definer_find_byte_in_tag(struct mlx5hws_definer *definer,
u32 hl_byte_off,
@@ -2067,7 +1993,7 @@ hws_definer_copy_sel_ctrl(struct mlx5hws_definer_sel_ctrl *ctrl,
static int
hws_definer_find_best_match_fit(struct mlx5hws_context *ctx,
struct mlx5hws_definer *definer,
u8 *hl)
u8 *hl, bool allow_jumbo)
{
struct mlx5hws_definer_sel_ctrl ctrl = {0};
bool found;
@@ -2084,6 +2010,9 @@ hws_definer_find_best_match_fit(struct mlx5hws_context *ctx,
return 0;
}
if (!allow_jumbo)
return -E2BIG;
/* Try to create a full/limited jumbo definer */
ctrl.allowed_full_dw = ctx->caps->full_dw_jumbo_support ? DW_SELECTORS :
DW_SELECTORS_MATCH;
@@ -2160,7 +2089,8 @@ int mlx5hws_definer_compare(struct mlx5hws_definer *definer_a,
int
mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx,
struct mlx5hws_match_template *mt,
struct mlx5hws_definer *match_definer)
struct mlx5hws_definer *match_definer,
bool allow_jumbo)
{
u8 *match_hl;
int ret;
@@ -2182,7 +2112,8 @@ mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx,
}
/* Find the match definer layout for header layout match union */
ret = hws_definer_find_best_match_fit(ctx, match_definer, match_hl);
ret = hws_definer_find_best_match_fit(ctx, match_definer, match_hl,
allow_jumbo);
if (ret) {
if (ret == -E2BIG)
mlx5hws_dbg(ctx,
@@ -2370,7 +2301,7 @@ int mlx5hws_definer_mt_init(struct mlx5hws_context *ctx,
struct mlx5hws_definer match_layout = {0};
int ret;
ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout);
ret = mlx5hws_definer_calc_layout(ctx, mt, &match_layout, true);
if (ret) {
mlx5hws_err(ctx, "Failed to calculate matcher definer layout\n");
return ret;

View File

@@ -823,13 +823,8 @@ void mlx5hws_definer_free(struct mlx5hws_context *ctx,
int mlx5hws_definer_calc_layout(struct mlx5hws_context *ctx,
struct mlx5hws_match_template *mt,
struct mlx5hws_definer *match_definer);
struct mlx5hws_definer_fc *
mlx5hws_definer_conv_match_params_to_compressed_fc(struct mlx5hws_context *ctx,
u8 match_criteria_enable,
u32 *match_param,
int *fc_sz);
struct mlx5hws_definer *match_definer,
bool allow_jumbo);
const char *mlx5hws_definer_fname_to_str(enum mlx5hws_definer_fname fname);