}
static void mlx5_free_cmd_msg(struct mlx5_core_dev *dev,
- struct mlx5_cmd_msg *msg)
+ struct mlx5_cmd_msg *msg)
{
struct mlx5_cmd_mailbox *head = msg->next;
struct mlx5_cmd_mailbox *next;
u8 port_state;
port_state = mlx5_query_vport_state(mdev,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
+ MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT,
+ 0);
if (port_state == VPORT_STATE_UP) {
netdev_info(priv->netdev, "Link up\n");
/* set CQE compression */
params->rx_cqe_compress_def = false;
if (MLX5_CAP_GEN(mdev, cqe_compression) &&
- MLX5_CAP_GEN(mdev, vport_group_manager))
+ MLX5_CAP_GEN(mdev, vport_group_manager))
params->rx_cqe_compress_def = cqe_compress_heuristic(link_speed, pci_bw);
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
int fsz = skb_frag_size(frag);
dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
- DMA_TO_DEVICE);
+ DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
return -ENOMEM;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
- &flow_act, &dest, 1);
+ &flow_act, &dest, 1);
if (IS_ERR(flow_rule)) {
esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
goto out;