/* constants */
enum {
- IPOIB_PACKET_SIZE = 2048,
- IPOIB_BUF_SIZE = IPOIB_PACKET_SIZE + IB_GRH_BYTES,
+ IPOIB_PACKET_SIZE = 2048,
+ IPOIB_BUF_SIZE = IPOIB_PACKET_SIZE + IB_GRH_BYTES,
- IPOIB_ENCAP_LEN = 4,
+ IPOIB_ENCAP_LEN = 4,
- IPOIB_CM_MTU = 0x10000 - 0x10, /* padding to align header to 16 */
- IPOIB_CM_BUF_SIZE = IPOIB_CM_MTU + IPOIB_ENCAP_LEN,
- IPOIB_CM_HEAD_SIZE = IPOIB_CM_BUF_SIZE % PAGE_SIZE,
- IPOIB_CM_RX_SG = ALIGN(IPOIB_CM_BUF_SIZE, PAGE_SIZE) / PAGE_SIZE,
- IPOIB_RX_RING_SIZE = 128,
- IPOIB_TX_RING_SIZE = 64,
+ IPOIB_CM_MTU = 0x10000 - 0x10, /* padding to align header to 16 */
+ IPOIB_CM_BUF_SIZE = IPOIB_CM_MTU + IPOIB_ENCAP_LEN,
+ IPOIB_CM_HEAD_SIZE = IPOIB_CM_BUF_SIZE % PAGE_SIZE,
+ IPOIB_CM_RX_SG = ALIGN(IPOIB_CM_BUF_SIZE, PAGE_SIZE) / PAGE_SIZE,
+ IPOIB_RX_RING_SIZE = 128,
+ IPOIB_TX_RING_SIZE = 64,
IPOIB_MAX_QUEUE_SIZE = 8192,
IPOIB_MIN_QUEUE_SIZE = 2,
- IPOIB_NUM_WC = 4,
+ IPOIB_NUM_WC = 4,
IPOIB_MAX_PATH_REC_QUEUE = 3,
- IPOIB_MAX_MCAST_QUEUE = 3,
-
- IPOIB_FLAG_OPER_UP = 0,
- IPOIB_FLAG_INITIALIZED = 1,
- IPOIB_FLAG_ADMIN_UP = 2,
- IPOIB_PKEY_ASSIGNED = 3,
- IPOIB_PKEY_STOP = 4,
- IPOIB_FLAG_SUBINTERFACE = 5,
- IPOIB_MCAST_RUN = 6,
- IPOIB_STOP_REAPER = 7,
- IPOIB_MCAST_STARTED = 8,
- IPOIB_FLAG_ADMIN_CM = 9,
+ IPOIB_MAX_MCAST_QUEUE = 3,
+
+ IPOIB_FLAG_OPER_UP = 0,
+ IPOIB_FLAG_INITIALIZED = 1,
+ IPOIB_FLAG_ADMIN_UP = 2,
+ IPOIB_PKEY_ASSIGNED = 3,
+ IPOIB_PKEY_STOP = 4,
+ IPOIB_FLAG_SUBINTERFACE = 5,
+ IPOIB_MCAST_RUN = 6,
+ IPOIB_STOP_REAPER = 7,
+ IPOIB_MCAST_STARTED = 8,
+ IPOIB_FLAG_ADMIN_CM = 9,
IPOIB_FLAG_UMCAST = 10,
IPOIB_MAX_BACKOFF_SECONDS = 16,
- IPOIB_MCAST_FLAG_FOUND = 0, /* used in set_multicast_list */
+ IPOIB_MCAST_FLAG_FOUND = 0, /* used in set_multicast_list */
IPOIB_MCAST_FLAG_SENDONLY = 1,
- IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */
+ IPOIB_MCAST_FLAG_BUSY = 2, /* joining or already joined */
IPOIB_MCAST_FLAG_ATTACHED = 3,
};
struct ipoib_mcast {
struct ib_sa_mcmember_rec mcmember;
struct ib_sa_multicast *mc;
- struct ipoib_ah *ah;
+ struct ipoib_ah *ah;
struct rb_node rb_node;
struct list_head list;
};
struct ipoib_cm_rx {
- struct ib_cm_id *id;
- struct ib_qp *qp;
- struct list_head list;
- struct net_device *dev;
- unsigned long jiffies;
- enum ipoib_cm_state state;
+ struct ib_cm_id *id;
+ struct ib_qp *qp;
+ struct list_head list;
+ struct net_device *dev;
+ unsigned long jiffies;
+ enum ipoib_cm_state state;
};
struct ipoib_cm_tx {
- struct ib_cm_id *id;
- struct ib_qp *qp;
+ struct ib_cm_id *id;
+ struct ib_qp *qp;
struct list_head list;
struct net_device *dev;
struct ipoib_neigh *neigh;
struct ipoib_path *path;
struct ipoib_tx_buf *tx_ring;
- unsigned tx_head;
- unsigned tx_tail;
- unsigned long flags;
- u32 mtu;
- struct ib_wc ibwc[IPOIB_NUM_WC];
+ unsigned tx_head;
+ unsigned tx_tail;
+ unsigned long flags;
+ u32 mtu;
+ struct ib_wc ibwc[IPOIB_NUM_WC];
};
struct ipoib_cm_rx_buf {
};
struct ipoib_cm_dev_priv {
- struct ib_srq *srq;
+ struct ib_srq *srq;
struct ipoib_cm_rx_buf *srq_ring;
- struct ib_cm_id *id;
- struct list_head passive_ids; /* state: LIVE */
- struct list_head rx_error_list; /* state: ERROR */
- struct list_head rx_flush_list; /* state: FLUSH, drain not started */
- struct list_head rx_drain_list; /* state: FLUSH, drain started */
- struct list_head rx_reap_list; /* state: FLUSH, drain done */
+ struct ib_cm_id *id;
+ struct list_head passive_ids; /* state: LIVE */
+ struct list_head rx_error_list; /* state: ERROR */
+ struct list_head rx_flush_list; /* state: FLUSH, drain not started */
+ struct list_head rx_drain_list; /* state: FLUSH, drain started */
+ struct list_head rx_reap_list; /* state: FLUSH, drain done */
struct work_struct start_task;
struct work_struct reap_task;
struct work_struct skb_task;
struct work_struct rx_reap_task;
struct delayed_work stale_task;
struct sk_buff_head skb_queue;
- struct list_head start_list;
- struct list_head reap_list;
- struct ib_wc ibwc[IPOIB_NUM_WC];
- struct ib_sge rx_sge[IPOIB_CM_RX_SG];
+ struct list_head start_list;
+ struct list_head reap_list;
+ struct ib_wc ibwc[IPOIB_NUM_WC];
+ struct ib_sge rx_sge[IPOIB_CM_RX_SG];
struct ib_recv_wr rx_wr;
};
struct work_struct pkey_event_task;
struct ib_device *ca;
- u8 port;
- u16 pkey;
- u16 pkey_index;
- struct ib_pd *pd;
- struct ib_mr *mr;
- struct ib_cq *cq;
- struct ib_qp *qp;
- u32 qkey;
+ u8 port;
+ u16 pkey;
+ u16 pkey_index;
+ struct ib_pd *pd;
+ struct ib_mr *mr;
+ struct ib_cq *cq;
+ struct ib_qp *qp;
+ u32 qkey;
union ib_gid local_gid;
- u16 local_lid;
+ u16 local_lid;
unsigned int admin_mtu;
unsigned int mcast_mtu;
struct ipoib_rx_buf *rx_ring;
- spinlock_t tx_lock;
+ spinlock_t tx_lock;
struct ipoib_tx_buf *tx_ring;
- unsigned tx_head;
- unsigned tx_tail;
- struct ib_sge tx_sge;
+ unsigned tx_head;
+ unsigned tx_tail;
+ struct ib_sge tx_sge;
struct ib_send_wr tx_wr;
- unsigned tx_outstanding;
+ unsigned tx_outstanding;
struct ib_wc ibwc[IPOIB_NUM_WC];
struct ipoib_ah {
struct net_device *dev;
- struct ib_ah *ah;
+ struct ib_ah *ah;
struct list_head list;
- struct kref ref;
- unsigned last_send;
+ struct kref ref;
+ unsigned last_send;
};
struct ipoib_path {
struct list_head neigh_list;
- int query_id;
+ int query_id;
struct ib_sa_query *query;
struct completion done;
- struct rb_node rb_node;
+ struct rb_node rb_node;
struct list_head list;
};
#ifdef CONFIG_INFINIBAND_IPOIB_CM
struct ipoib_cm_tx *cm;
#endif
- union ib_gid dgid;
+ union ib_gid dgid;
struct sk_buff_head queue;
struct neighbour *neighbour;
#ifdef CONFIG_INFINIBAND_IPOIB_CM
-#define IPOIB_FLAGS_RC 0x80
-#define IPOIB_FLAGS_UC 0x40
+#define IPOIB_FLAGS_RC 0x80
+#define IPOIB_FLAGS_UC 0x40
/* We don't support UC connections at the moment */
#define IPOIB_CM_SUPPORTED(ha) (ha[0] & (IPOIB_FLAGS_RC))
struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path,
struct ipoib_neigh *neigh);
void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx);
-void ipoib_cm_skb_too_long(struct net_device* dev, struct sk_buff *skb,
+void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
unsigned int mtu);
void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc);
void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc);
return 0;
}
-static inline void ipoib_cm_skb_too_long(struct net_device* dev, struct sk_buff *skb,
+static inline void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
unsigned int mtu)
{
dev_kfree_skb_any(skb);
extern int ipoib_debug_level;
#define ipoib_dbg(priv, format, arg...) \
- do { \
+ do { \
if (ipoib_debug_level > 0) \
ipoib_printk(KERN_DEBUG, priv, format , ## arg); \
} while (0)
#define ipoib_dbg_mcast(priv, format, arg...) \
- do { \
+ do { \
if (mcast_debug_level > 0) \
ipoib_printk(KERN_DEBUG, priv, format , ## arg); \
} while (0)
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
#define ipoib_dbg_data(priv, format, arg...) \
- do { \
+ do { \
if (data_debug_level > 0) \
ipoib_printk(KERN_DEBUG, priv, format , ## arg); \
} while (0)
return NULL;
}
-static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv* priv)
+static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
{
struct ib_send_wr *bad_wr;
struct ipoib_cm_rx *p;
{
struct ib_send_wr *bad_wr;
- priv->tx_sge.addr = addr;
- priv->tx_sge.length = len;
+ priv->tx_sge.addr = addr;
+ priv->tx_sge.length = len;
- priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM;
+ priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM;
return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr);
}
tx_req->mapping = addr;
if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
- addr, skb->len))) {
+ addr, skb->len))) {
ipoib_warn(priv, "post_send failed\n");
++dev->stats.tx_errors;
ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
.sq_sig_type = IB_SIGNAL_ALL_WR,
.qp_type = IB_QPT_RC,
.qp_context = tx
- };
+ };
return ib_create_qp(priv->pd, &attr);
}
data.qpn = cpu_to_be32(priv->qp->qp_num);
data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
- req.primary_path = pathrec;
- req.alternate_path = NULL;
- req.service_id = cpu_to_be64(IPOIB_CM_IETF_ID | qpn);
- req.qp_num = qp->qp_num;
- req.qp_type = qp->qp_type;
- req.private_data = &data;
- req.private_data_len = sizeof data;
- req.flow_control = 0;
+ req.primary_path = pathrec;
+ req.alternate_path = NULL;
+ req.service_id = cpu_to_be64(IPOIB_CM_IETF_ID | qpn);
+ req.qp_num = qp->qp_num;
+ req.qp_type = qp->qp_type;
+ req.private_data = &data;
+ req.private_data_len = sizeof data;
+ req.flow_control = 0;
- req.starting_psn = 0; /* FIXME */
+ req.starting_psn = 0; /* FIXME */
/*
* Pick some arbitrary defaults here; we could make these
* module parameters if anyone cared about setting them.
*/
- req.responder_resources = 4;
- req.remote_cm_response_timeout = 20;
- req.local_cm_response_timeout = 20;
- req.retry_count = 0; /* RFC draft warns against retries */
- req.rnr_retry_count = 0; /* RFC draft warns against retries */
- req.max_cm_retries = 15;
- req.srq = 1;
+ req.responder_resources = 4;
+ req.remote_cm_response_timeout = 20;
+ req.local_cm_response_timeout = 20;
+ req.retry_count = 0; /* RFC draft warns against retries */
+ req.rnr_retry_count = 0; /* RFC draft warns against retries */
+ req.max_cm_retries = 15;
+ req.srq = 1;
return ib_send_cm_req(id, &req);
}
spin_unlock_irq(&priv->tx_lock);
}
-void ipoib_cm_skb_too_long(struct net_device* dev, struct sk_buff *skb,
+void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
unsigned int mtu)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
}
-static ssize_t show_mode(struct device *d, struct device_attribute *attr,
+static ssize_t show_mode(struct device *d, struct device_attribute *attr,
char *buf)
{
struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(d));
INIT_LIST_HEAD(&path->neigh_list);
memcpy(path->pathrec.dgid.raw, gid, sizeof (union ib_gid));
- path->pathrec.sgid = priv->local_gid;
- path->pathrec.pkey = cpu_to_be16(priv->pkey);
+ path->pathrec.sgid = priv->local_gid;
+ path->pathrec.pkey = cpu_to_be16(priv->pkey);
path->pathrec.numb_path = 1;
path->pathrec.traffic_class = priv->broadcast->mcmember.traffic_class;
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
- dev->open = ipoib_open;
- dev->stop = ipoib_stop;
- dev->change_mtu = ipoib_change_mtu;
- dev->hard_start_xmit = ipoib_start_xmit;
- dev->tx_timeout = ipoib_timeout;
- dev->header_ops = &ipoib_header_ops;
- dev->set_multicast_list = ipoib_set_mcast_list;
- dev->neigh_setup = ipoib_neigh_setup_dev;
+ dev->open = ipoib_open;
+ dev->stop = ipoib_stop;
+ dev->change_mtu = ipoib_change_mtu;
+ dev->hard_start_xmit = ipoib_start_xmit;
+ dev->tx_timeout = ipoib_timeout;
+ dev->header_ops = &ipoib_header_ops;
+ dev->set_multicast_list = ipoib_set_mcast_list;
+ dev->neigh_setup = ipoib_neigh_setup_dev;
netif_napi_add(dev, &priv->napi, ipoib_poll, 100);
- dev->watchdog_timeo = HZ;
+ dev->watchdog_timeo = HZ;
- dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
+ dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
/*
* We add in INFINIBAND_ALEN to allow for the destination
* address "pseudoheader" for skbs without neighbour struct.
*/
- dev->hard_header_len = IPOIB_ENCAP_LEN + INFINIBAND_ALEN;
- dev->addr_len = INFINIBAND_ALEN;
- dev->type = ARPHRD_INFINIBAND;
- dev->tx_queue_len = ipoib_sendq_size * 2;
- dev->features = NETIF_F_VLAN_CHALLENGED | NETIF_F_LLTX;
+ dev->hard_header_len = IPOIB_ENCAP_LEN + INFINIBAND_ALEN;
+ dev->addr_len = INFINIBAND_ALEN;
+ dev->type = ARPHRD_INFINIBAND;
+ dev->tx_queue_len = ipoib_sendq_size * 2;
+ dev->features = NETIF_F_VLAN_CHALLENGED | NETIF_F_LLTX;
/* MTU will be reset when mcast join happens */
- dev->mtu = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN;
- priv->mcast_mtu = priv->admin_mtu = dev->mtu;
+ dev->mtu = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN;
+ priv->mcast_mtu = priv->admin_mtu = dev->mtu;
memcpy(dev->broadcast, ipv4_bcast_addr, INFINIBAND_ALEN);