static void cma_cancel_route(struct rdma_id_private *id_priv)
{
- if (rdma_protocol_ib(id_priv->id.device, id_priv->id.port_num)) {
+ if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) {
if (id_priv->query)
ib_sa_cancel_query(id_priv->query_id, id_priv->query);
}
return -EINVAL;
atomic_inc(&id_priv->refcount);
- if (rdma_protocol_ib(id->device, id->port_num))
+ if (rdma_cap_ib_sa(id->device, id->port_num))
ret = cma_resolve_ib_route(id_priv, timeout_ms);
else if (rdma_protocol_iboe(id->device, id->port_num))
ret = cma_resolve_iboe_route(id_priv);
struct ib_sa_port *port =
&sa_dev->port[event->element.port_num - sa_dev->start_port];
- if (WARN_ON(!rdma_protocol_ib(handler->device, port->port_num)))
+ if (WARN_ON(!rdma_cap_ib_sa(handler->device, port->port_num)))
return;
spin_lock_irqsave(&port->ah_lock, flags);
for (i = 0; i <= e - s; ++i) {
spin_lock_init(&sa_dev->port[i].ah_lock);
- if (!rdma_protocol_ib(device, i + 1))
+ if (!rdma_cap_ib_sa(device, i + 1))
continue;
sa_dev->port[i].sm_ah = NULL;
goto err;
for (i = 0; i <= e - s; ++i) {
- if (rdma_protocol_ib(device, i + 1))
+ if (rdma_cap_ib_sa(device, i + 1))
update_sm_ah(&sa_dev->port[i].update_task);
}
err:
while (--i >= 0) {
- if (rdma_protocol_ib(device, i + 1))
+ if (rdma_cap_ib_sa(device, i + 1))
ib_unregister_mad_agent(sa_dev->port[i].agent);
}
free:
flush_workqueue(ib_wq);
for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
- if (rdma_protocol_ib(device, i + 1)) {
+ if (rdma_cap_ib_sa(device, i + 1)) {
ib_unregister_mad_agent(sa_dev->port[i].agent);
if (sa_dev->port[i].sm_ah)
kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid;
resp.port_num = ctx->cm_id->port_num;
- if (rdma_protocol_ib(ctx->cm_id->device, ctx->cm_id->port_num))
+ if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num))
ucma_copy_ib_route(&resp, &ctx->cm_id->route);
else if (rdma_protocol_iboe(ctx->cm_id->device, ctx->cm_id->port_num))
ucma_copy_iboe_route(&resp, &ctx->cm_id->route);
return rdma_protocol_iwarp(device, port_num);
}
+/**
+ * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband
+ * Subnet Administration.
+ *
+ * @device: Device to be checked
+ * @port_num: Port number of the device
+ *
+ * Return false when port of the device don't support Infiniband
+ * Subnet Administration.
+ */
+static inline bool rdma_cap_ib_sa(struct ib_device *device, u8 port_num)
+{
+ return rdma_protocol_ib(device, port_num);
+}
+
int ib_query_gid(struct ib_device *device,
u8 port_num, int index, union ib_gid *gid);