(pci_bw < 40000) && (pci_bw < link_speed));
}
+static bool hw_lro_heuristic(u32 link_speed, u32 pci_bw)
+{
+ return !(link_speed && pci_bw &&
+ (pci_bw <= 16000) && (pci_bw < link_speed));
+}
+
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
{
params->rx_cq_period_mode = cq_period_mode;
params->num_channels = max_channels;
params->num_tc = 1;
+ mlx5e_get_max_linkspeed(mdev, &link_speed);
+ mlx5e_get_pci_bw(mdev, &pci_bw);
+ mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n",
+ link_speed, pci_bw);
+
/* SQ */
params->log_sq_size = is_kdump_kernel() ?
MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
/* set CQE compression */
params->rx_cqe_compress_def = false;
if (MLX5_CAP_GEN(mdev, cqe_compression) &&
- MLX5_CAP_GEN(mdev, vport_group_manager)) {
- mlx5e_get_max_linkspeed(mdev, &link_speed);
- mlx5e_get_pci_bw(mdev, &pci_bw);
- mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n",
- link_speed, pci_bw);
+ MLX5_CAP_GEN(mdev, vport_group_manager))
params->rx_cqe_compress_def = cqe_compress_heuristic(link_speed, pci_bw);
- }
+
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
/* RQ */
/* HW LRO */
/* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */
if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
- params->lro_en = true;
+ params->lro_en = hw_lro_heuristic(link_speed, pci_bw);
params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
/* CQ moderation params */