if (listen_id_priv) {
cma_dev = listen_id_priv->cma_dev;
port = listen_id_priv->id.port_num;
- gidp = rdma_protocol_iboe(cma_dev->device, port) ?
+ gidp = rdma_protocol_roce(cma_dev->device, port) ?
&iboe_gid : &gid;
ret = cma_validate_port(cma_dev->device, port, gidp,
listen_id_priv->id.port_num == port)
continue;
- gidp = rdma_protocol_iboe(cma_dev->device, port) ?
+ gidp = rdma_protocol_roce(cma_dev->device, port) ?
&iboe_gid : &gid;
ret = cma_validate_port(cma_dev->device, port, gidp,
BUG_ON(id_priv->cma_dev->device != id_priv->id.device);
- if (rdma_protocol_iboe(id_priv->id.device, id_priv->id.port_num)) {
+ if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) {
ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr.smac, NULL);
if (ret)
atomic_inc(&id_priv->refcount);
if (rdma_cap_ib_sa(id->device, id->port_num))
ret = cma_resolve_ib_route(id_priv, timeout_ms);
- else if (rdma_protocol_iboe(id->device, id->port_num))
+ else if (rdma_protocol_roce(id->device, id->port_num))
ret = cma_resolve_iboe_route(id_priv);
else if (rdma_protocol_iwarp(id->device, id->port_num))
ret = cma_resolve_iw_route(id_priv, timeout_ms);
list_add(&mc->list, &id_priv->mc_list);
spin_unlock(&id_priv->lock);
- if (rdma_protocol_iboe(id->device, id->port_num)) {
+ if (rdma_protocol_roce(id->device, id->port_num)) {
kref_init(&mc->mcref);
ret = cma_iboe_join_multicast(id_priv, mc);
} else if (rdma_cap_ib_mcast(id->device, id->port_num))
if (rdma_cap_ib_mcast(id->device, id->port_num)) {
ib_sa_free_multicast(mc->multicast.ib);
kfree(mc);
- } else if (rdma_protocol_iboe(id->device, id->port_num))
+ } else if (rdma_protocol_roce(id->device, id->port_num))
kref_put(&mc->mcref, release_mc);
return;
if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num))
ucma_copy_ib_route(&resp, &ctx->cm_id->route);
- else if (rdma_protocol_iboe(ctx->cm_id->device, ctx->cm_id->port_num))
+ else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num))
ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num))
ucma_copy_iw_route(&resp, &ctx->cm_id->route);
return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB;
}
-static inline bool rdma_protocol_iboe(struct ib_device *device, u8 port_num)
+static inline bool rdma_protocol_roce(struct ib_device *device, u8 port_num)
{
return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE;
}
return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP;
}
-static inline bool rdma_ib_or_iboe(struct ib_device *device, u8 port_num)
+static inline bool rdma_ib_or_roce(struct ib_device *device, u8 port_num)
{
return device->port_immutable[port_num].core_cap_flags &
(RDMA_CORE_CAP_PROT_IB | RDMA_CORE_CAP_PROT_ROCE);