From ca5b91d63192ceaa41a6145f8c923debb64c71fa Mon Sep 17 00:00:00 2001 From: Or Gerlitz Date: Sun, 27 Nov 2016 16:51:36 +0200 Subject: [PATCH] IB/mlx5: Support RAW Ethernet when RoCE is disabled On some environments, such as certain SRIOV VF configurations, RoCE is not supported for mlx5 Ethernet ports. Currently, the driver will not open IB device on that port. This is problematic, since we do want user-space RAW Ethernet (RAW_PACKET QPs) functionality to remain in place. For that end, enhance the relevant driver flows such that we do create a device instance in that case. Signed-off-by: Or Gerlitz Reviewed-by: Matan Barak Signed-off-by: Leon Romanovsky Signed-off-by: Doug Ledford --- drivers/infiniband/hw/mlx5/main.c | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index b5b7459e9af0..8b013f8b832a 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -2779,6 +2779,8 @@ static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num, struct ib_port_immutable *immutable) { struct ib_port_attr attr; + struct mlx5_ib_dev *dev = to_mdev(ibdev); + enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num); int err; err = mlx5_ib_query_port(ibdev, port_num, &attr); @@ -2788,7 +2790,8 @@ static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num, immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; immutable->core_cap_flags = get_core_cap_flags(ibdev); - immutable->max_mad_size = IB_MGMT_MAD_SIZE; + if ((ll == IB_LINK_LAYER_INFINIBAND) || MLX5_CAP_GEN(dev->mdev, roce)) + immutable->max_mad_size = IB_MGMT_MAD_SIZE; return 0; } @@ -2873,9 +2876,11 @@ static int mlx5_enable_eth(struct mlx5_ib_dev *dev) if (err) return err; - err = mlx5_nic_vport_enable_roce(dev->mdev); - if (err) - goto err_unregister_netdevice_notifier; + if (MLX5_CAP_GEN(dev->mdev, roce)) { + err = mlx5_nic_vport_enable_roce(dev->mdev); + if (err) + goto err_unregister_netdevice_notifier; + } err = mlx5_eth_lag_init(dev); if (err) @@ -2884,7 +2889,8 @@ static int mlx5_enable_eth(struct mlx5_ib_dev *dev) return 0; err_disable_roce: - mlx5_nic_vport_disable_roce(dev->mdev); + if (MLX5_CAP_GEN(dev->mdev, roce)) + mlx5_nic_vport_disable_roce(dev->mdev); err_unregister_netdevice_notifier: mlx5_remove_netdev_notifier(dev); @@ -2894,7 +2900,8 @@ err_unregister_netdevice_notifier: static void mlx5_disable_eth(struct mlx5_ib_dev *dev) { mlx5_eth_lag_cleanup(dev); - mlx5_nic_vport_disable_roce(dev->mdev); + if (MLX5_CAP_GEN(dev->mdev, roce)) + mlx5_nic_vport_disable_roce(dev->mdev); } static void mlx5_ib_dealloc_q_counters(struct mlx5_ib_dev *dev) @@ -3016,9 +3023,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) port_type_cap = MLX5_CAP_GEN(mdev, port_type); ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap); - if ((ll == IB_LINK_LAYER_ETHERNET) && !MLX5_CAP_GEN(mdev, roce)) - return NULL; - printk_once(KERN_INFO "%s", mlx5_version); dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev)); -- 2.20.1