struct ib_agent_port_private *entry;
list_for_each_entry(entry, &ib_agent_port_list, port_list) {
- if (entry->agent[0]->device == device &&
- entry->agent[0]->port_num == port_num)
+ if (entry->agent[1]->device == device &&
+ entry->agent[1]->port_num == port_num)
return entry;
}
return NULL;
goto error1;
}
- /* Obtain send only MAD agent for SMI QP */
- port_priv->agent[0] = ib_register_mad_agent(device, port_num,
- IB_QPT_SMI, NULL, 0,
- &agent_send_handler,
- NULL, NULL);
- if (IS_ERR(port_priv->agent[0])) {
- ret = PTR_ERR(port_priv->agent[0]);
- goto error2;
+ if (rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND) {
+ /* Obtain send only MAD agent for SMI QP */
+ port_priv->agent[0] = ib_register_mad_agent(device, port_num,
+ IB_QPT_SMI, NULL, 0,
+ &agent_send_handler,
+ NULL, NULL);
+ if (IS_ERR(port_priv->agent[0])) {
+ ret = PTR_ERR(port_priv->agent[0]);
+ goto error2;
+ }
}
/* Obtain send only MAD agent for GSI QP */
return 0;
error3:
- ib_unregister_mad_agent(port_priv->agent[0]);
+ if (port_priv->agent[0])
+ ib_unregister_mad_agent(port_priv->agent[0]);
error2:
kfree(port_priv);
error1:
spin_unlock_irqrestore(&ib_agent_port_list_lock, flags);
ib_unregister_mad_agent(port_priv->agent[1]);
- ib_unregister_mad_agent(port_priv->agent[0]);
+ if (port_priv->agent[0])
+ ib_unregister_mad_agent(port_priv->agent[0]);
+
kfree(port_priv);
return 0;
}
struct ib_mad_private *recv;
struct ib_mad_list_head *mad_list;
+ if (!qp_info->qp)
+ return;
+
while (!list_empty(&qp_info->recv_queue.list)) {
mad_list = list_entry(qp_info->recv_queue.list.next,
for (i = 0; i < IB_MAD_QPS_CORE; i++) {
qp = port_priv->qp_info[i].qp;
+ if (!qp)
+ continue;
+
/*
* PKey index for QP1 is irrelevant but
* one is needed for the Reset to Init transition
}
for (i = 0; i < IB_MAD_QPS_CORE; i++) {
+ if (!port_priv->qp_info[i].qp)
+ continue;
+
ret = ib_mad_post_receive_mads(&port_priv->qp_info[i], NULL);
if (ret) {
printk(KERN_ERR PFX "Couldn't post receive WRs\n");
static void destroy_mad_qp(struct ib_mad_qp_info *qp_info)
{
+ if (!qp_info->qp)
+ return;
+
ib_destroy_qp(qp_info->qp);
kfree(qp_info->snoop_table);
}
struct ib_mad_port_private *port_priv;
unsigned long flags;
char name[sizeof "ib_mad123"];
+ int has_smi;
/* Create new device info */
port_priv = kzalloc(sizeof *port_priv, GFP_KERNEL);
init_mad_qp(port_priv, &port_priv->qp_info[0]);
init_mad_qp(port_priv, &port_priv->qp_info[1]);
- cq_size = (mad_sendq_size + mad_recvq_size) * 2;
+ cq_size = mad_sendq_size + mad_recvq_size;
+ has_smi = rdma_port_get_link_layer(device, port_num) == IB_LINK_LAYER_INFINIBAND;
+ if (has_smi)
+ cq_size *= 2;
+
port_priv->cq = ib_create_cq(port_priv->device,
ib_mad_thread_completion_handler,
NULL, port_priv, cq_size, 0);
goto error5;
}
- ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
- if (ret)
- goto error6;
+ if (has_smi) {
+ ret = create_mad_qp(&port_priv->qp_info[0], IB_QPT_SMI);
+ if (ret)
+ goto error6;
+ }
ret = create_mad_qp(&port_priv->qp_info[1], IB_QPT_GSI);
if (ret)
goto error7;
int index;
dev = container_of(handler, struct mcast_device, event_handler);
+ if (rdma_port_get_link_layer(dev->device, event->element.port_num) !=
+ IB_LINK_LAYER_INFINIBAND)
+ return;
+
index = event->element.port_num - dev->start_port;
switch (event->event) {
struct mcast_device *dev;
struct mcast_port *port;
int i;
+ int count = 0;
if (rdma_node_get_transport(device->node_type) != RDMA_TRANSPORT_IB)
return;
}
for (i = 0; i <= dev->end_port - dev->start_port; i++) {
+ if (rdma_port_get_link_layer(device, dev->start_port + i) !=
+ IB_LINK_LAYER_INFINIBAND)
+ continue;
port = &dev->port[i];
port->dev = dev;
port->port_num = dev->start_port + i;
port->table = RB_ROOT;
init_completion(&port->comp);
atomic_set(&port->refcount, 1);
+ ++count;
+ }
+
+ if (!count) {
+ kfree(dev);
+ return;
}
dev->device = device;
flush_workqueue(mcast_wq);
for (i = 0; i <= dev->end_port - dev->start_port; i++) {
- port = &dev->port[i];
- deref_port(port);
- wait_for_completion(&port->comp);
+ if (rdma_port_get_link_layer(device, dev->start_port + i) ==
+ IB_LINK_LAYER_INFINIBAND) {
+ port = &dev->port[i];
+ deref_port(port);
+ wait_for_completion(&port->comp);
+ }
}
kfree(dev);
struct ib_sa_port *port =
&sa_dev->port[event->element.port_num - sa_dev->start_port];
+ if (rdma_port_get_link_layer(handler->device, port->port_num) != IB_LINK_LAYER_INFINIBAND)
+ return;
+
spin_lock_irqsave(&port->ah_lock, flags);
if (port->sm_ah)
kref_put(&port->sm_ah->ref, free_sm_ah);
e = device->phys_port_cnt;
}
- sa_dev = kmalloc(sizeof *sa_dev +
+ sa_dev = kzalloc(sizeof *sa_dev +
(e - s + 1) * sizeof (struct ib_sa_port),
GFP_KERNEL);
if (!sa_dev)
sa_dev->end_port = e;
for (i = 0; i <= e - s; ++i) {
+ spin_lock_init(&sa_dev->port[i].ah_lock);
+ if (rdma_port_get_link_layer(device, i + 1) != IB_LINK_LAYER_INFINIBAND)
+ continue;
+
sa_dev->port[i].sm_ah = NULL;
sa_dev->port[i].port_num = i + s;
- spin_lock_init(&sa_dev->port[i].ah_lock);
sa_dev->port[i].agent =
ib_register_mad_agent(device, i + s, IB_QPT_GSI,
goto err;
for (i = 0; i <= e - s; ++i)
- update_sm_ah(&sa_dev->port[i].update_task);
+ if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND)
+ update_sm_ah(&sa_dev->port[i].update_task);
return;
err:
while (--i >= 0)
- ib_unregister_mad_agent(sa_dev->port[i].agent);
+ if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND)
+ ib_unregister_mad_agent(sa_dev->port[i].agent);
kfree(sa_dev);
flush_scheduled_work();
for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) {
- ib_unregister_mad_agent(sa_dev->port[i].agent);
- if (sa_dev->port[i].sm_ah)
- kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
+ if (rdma_port_get_link_layer(device, i + 1) == IB_LINK_LAYER_INFINIBAND) {
+ ib_unregister_mad_agent(sa_dev->port[i].agent);
+ if (sa_dev->port[i].sm_ah)
+ kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah);
+ }
+
}
kfree(sa_dev);