summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSaeed Mahameed <saeedm@nvidia.com>2025-06-16 17:14:38 +0300
committerJakub Kicinski <kuba@kernel.org>2025-06-17 18:34:12 -0700
commitdb3010bb5a0134644c45dc0df89e76e02553478c (patch)
tree1a495012dd561f6372da872a9991e0d7a500f1c1
parentd1668f119943aec7c10244e5481964fad24b7ba2 (diff)
net/mlx5e: Add support for UNREADABLE netmem page pools
On netdev_rx_queue_restart, a special type of page pool maybe expected. In this patch declare support for UNREADABLE netmem iov pages in the pool params only when header data split shampo RQ mode is enabled, also set the queue index in the page pool params struct. Shampo mode requirement: Without header split rx needs to peek at the data, we can't do UNREADABLE_NETMEM. The patch also enables the use of a separate page pool for headers when a memory provider is installed for the queue, otherwise the same common page pool continues to be used. Signed-off-by: Saeed Mahameed <saeedm@nvidia.com> Reviewed-by: Dragos Tatulea <dtatulea@nvidia.com> Reviewed-by: Tariq Toukan <tariqt@nvidia.com> Reviewed-by: Mina Almasry <almasrymina@google.com> Signed-off-by: Mark Bloch <mbloch@nvidia.com> Link: https://patch.msgid.link/20250616141441.1243044-10-mbloch@nvidia.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c9
1 files changed, 8 insertions, 1 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 5e649705e35f..a51e204bd364 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -749,7 +749,9 @@ static void mlx5e_rq_shampo_hd_info_free(struct mlx5e_rq *rq)
static bool mlx5_rq_needs_separate_hd_pool(struct mlx5e_rq *rq)
{
- return false;
+ struct netdev_rx_queue *rxq = __netif_get_rx_queue(rq->netdev, rq->ix);
+
+ return !!rxq->mp_params.mp_ops;
}
static int mlx5_rq_shampo_alloc(struct mlx5_core_dev *mdev,
@@ -964,6 +966,11 @@ static int mlx5e_alloc_rq(struct mlx5e_params *params,
pp_params.netdev = rq->netdev;
pp_params.dma_dir = rq->buff.map_dir;
pp_params.max_len = PAGE_SIZE;
+ pp_params.queue_idx = rq->ix;
+
+ /* Shampo header data split allow for unreadable netmem */
+ if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
+ pp_params.flags |= PP_FLAG_ALLOW_UNREADABLE_NETMEM;
/* page_pool can be used even when there is no rq->xdp_prog,
* given page_pool does not handle DMA mapping there is no