* we'll print them and continue. We reuse the same message buffer as
* ipath_handle_errors() to avoid excessive stack usage.
*/
-void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
- size_t msgl)
+static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
+ size_t msgl)
{
ipath_err_t hwerrs;
u32 bits, ctrl;
* freeze mode), and enable hardware errors as errors (along with
* everything else) in errormask
*/
-void ipath_pe_init_hwerrors(struct ipath_devdata *dd)
+static void ipath_pe_init_hwerrors(struct ipath_devdata *dd)
{
ipath_err_t val;
u64 extsval;
* ipath_pe_bringup_serdes - bring up the serdes
* @dd: the infinipath device
*/
-int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
+static int ipath_pe_bringup_serdes(struct ipath_devdata *dd)
{
u64 val, tmp, config1;
int ret = 0, change = 0;
* @dd: the infinipath device
* Called when driver is being unloaded
*/
-void ipath_pe_quiet_serdes(struct ipath_devdata *dd)
+static void ipath_pe_quiet_serdes(struct ipath_devdata *dd)
{
u64 val = ipath_read_kreg64(dd, dd->ipath_kregs->kr_serdesconfig0);
* Allocate the next available QPN and put the QP into the hash table.
* The hash table holds a reference to the QP.
*/
-int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp,
- enum ib_qp_type type)
+static int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp,
+ enum ib_qp_type type)
{
unsigned long flags;
u32 qpn;
* Remove the QP from the table so it can't be found asynchronously by
* the receive interrupt routine.
*/
-void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp)
+static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp)
{
struct ipath_qp *q, **qpp;
unsigned long flags;
qp->r_reuse_sge = 0;
}
+/**
+ * ipath_error_qp - put a QP into an error state
+ * @qp: the QP to put into an error state
+ *
+ * Flushes both send and receive work queues.
+ * QP r_rq.lock and s_lock should be held.
+ */
+
+static void ipath_error_qp(struct ipath_qp *qp)
+{
+ struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
+ struct ib_wc wc;
+
+ _VERBS_INFO("QP%d/%d in error state\n",
+ qp->ibqp.qp_num, qp->remote_qpn);
+
+ spin_lock(&dev->pending_lock);
+ /* XXX What if its already removed by the timeout code? */
+ if (qp->timerwait.next != LIST_POISON1)
+ list_del(&qp->timerwait);
+ if (qp->piowait.next != LIST_POISON1)
+ list_del(&qp->piowait);
+ spin_unlock(&dev->pending_lock);
+
+ wc.status = IB_WC_WR_FLUSH_ERR;
+ wc.vendor_err = 0;
+ wc.byte_len = 0;
+ wc.imm_data = 0;
+ wc.qp_num = qp->ibqp.qp_num;
+ wc.src_qp = 0;
+ wc.wc_flags = 0;
+ wc.pkey_index = 0;
+ wc.slid = 0;
+ wc.sl = 0;
+ wc.dlid_path_bits = 0;
+ wc.port_num = 0;
+
+ while (qp->s_last != qp->s_head) {
+ struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
+
+ wc.wr_id = wqe->wr.wr_id;
+ wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
+ if (++qp->s_last >= qp->s_size)
+ qp->s_last = 0;
+ ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
+ }
+ qp->s_cur = qp->s_tail = qp->s_head;
+ qp->s_hdrwords = 0;
+ qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
+
+ wc.opcode = IB_WC_RECV;
+ while (qp->r_rq.tail != qp->r_rq.head) {
+ wc.wr_id = get_rwqe_ptr(&qp->r_rq, qp->r_rq.tail)->wr_id;
+ if (++qp->r_rq.tail >= qp->r_rq.size)
+ qp->r_rq.tail = 0;
+ ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
+ }
+}
+
/**
* ipath_modify_qp - modify the attributes of a queue pair
* @ibqp: the queue pair who's attributes we're modifying
qp->state = IB_QPS_SQE;
}
-/**
- * ipath_error_qp - put a QP into an error state
- * @qp: the QP to put into an error state
- *
- * Flushes both send and receive work queues.
- * QP r_rq.lock and s_lock should be held.
- */
-
-void ipath_error_qp(struct ipath_qp *qp)
-{
- struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
- struct ib_wc wc;
-
- _VERBS_INFO("QP%d/%d in error state\n",
- qp->ibqp.qp_num, qp->remote_qpn);
-
- spin_lock(&dev->pending_lock);
- /* XXX What if its already removed by the timeout code? */
- if (qp->timerwait.next != LIST_POISON1)
- list_del(&qp->timerwait);
- if (qp->piowait.next != LIST_POISON1)
- list_del(&qp->piowait);
- spin_unlock(&dev->pending_lock);
-
- wc.status = IB_WC_WR_FLUSH_ERR;
- wc.vendor_err = 0;
- wc.byte_len = 0;
- wc.imm_data = 0;
- wc.qp_num = qp->ibqp.qp_num;
- wc.src_qp = 0;
- wc.wc_flags = 0;
- wc.pkey_index = 0;
- wc.slid = 0;
- wc.sl = 0;
- wc.dlid_path_bits = 0;
- wc.port_num = 0;
-
- while (qp->s_last != qp->s_head) {
- struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
-
- wc.wr_id = wqe->wr.wr_id;
- wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
- if (++qp->s_last >= qp->s_size)
- qp->s_last = 0;
- ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
- }
- qp->s_cur = qp->s_tail = qp->s_head;
- qp->s_hdrwords = 0;
- qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
-
- wc.opcode = IB_WC_RECV;
- while (qp->r_rq.tail != qp->r_rq.head) {
- wc.wr_id = get_rwqe_ptr(&qp->r_rq, qp->r_rq.tail)->wr_id;
- if (++qp->r_rq.tail >= qp->r_rq.size)
- qp->r_rq.tail = 0;
- ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
- }
-}
-
/**
* ipath_get_credit - flush the send work queue of a QP
* @qp: the qp who's send work queue to flush
/* Not static, because we don't want the compiler removing it */
const char ipath_verbs_version[] = "ipath_verbs " IPATH_IDSTR;
-unsigned int ib_ipath_qp_table_size = 251;
+static unsigned int ib_ipath_qp_table_size = 251;
module_param_named(qp_table_size, ib_ipath_qp_table_size, uint, S_IRUGO);
MODULE_PARM_DESC(qp_table_size, "QP table size");
/*
* System image GUID.
*/
-__be64 sys_image_guid;
+static __be64 sys_image_guid;
/**
* ipath_copy_sge - copy data to SGE memory
ib_dealloc_device(ibdev);
}
-int __init ipath_verbs_init(void)
+static int __init ipath_verbs_init(void)
{
return ipath_verbs_register(ipath_register_ib_device,
ipath_unregister_ib_device,
ipath_ib_timer);
}
-void __exit ipath_verbs_cleanup(void)
+static void __exit ipath_verbs_cleanup(void)
{
ipath_verbs_unregister();
}