#include "trace.h"
#include "sdma.h"
-#define BITS_PER_PAGE (PAGE_SIZE*BITS_PER_BYTE)
-#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
-
static unsigned int hfi1_qp_table_size = 256;
module_param_named(qp_table_size, hfi1_qp_table_size, uint, S_IRUGO);
MODULE_PARM_DESC(qp_table_size, "QP table size");
unsigned seq);
static void iowait_wakeup(struct iowait *wait, int reason);
-static inline unsigned mk_qpn(struct hfi1_qpn_table *qpt,
- struct qpn_map *map, unsigned off)
+static inline unsigned mk_qpn(struct rvt_qpn_table *qpt,
+ struct rvt_qpn_map *map, unsigned off)
{
- return (map - qpt->map) * BITS_PER_PAGE + off;
+ return (map - qpt->map) * RVT_BITS_PER_PAGE + off;
}
/*
32768 /* 1E */
};
-static void get_map_page(struct hfi1_qpn_table *qpt, struct qpn_map *map)
+static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map)
{
unsigned long page = get_zeroed_page(GFP_KERNEL);
* Allocate the next available QPN or
* zero/one for QP type IB_QPT_SMI/IB_QPT_GSI.
*/
-static int alloc_qpn(struct hfi1_devdata *dd, struct hfi1_qpn_table *qpt,
+static int alloc_qpn(struct hfi1_devdata *dd, struct rvt_qpn_table *qpt,
enum ib_qp_type type, u8 port)
{
u32 i, offset, max_scan, qpn;
- struct qpn_map *map;
+ struct rvt_qpn_map *map;
u32 ret;
if (type == IB_QPT_SMI || type == IB_QPT_GSI) {
}
qpn = qpt->last + qpt->incr;
- if (qpn >= QPN_MAX)
+ if (qpn >= RVT_QPN_MAX)
qpn = qpt->incr | ((qpt->last & 1) ^ 1);
/* offset carries bit 0 */
- offset = qpn & BITS_PER_PAGE_MASK;
- map = &qpt->map[qpn / BITS_PER_PAGE];
+ offset = qpn & RVT_BITS_PER_PAGE_MASK;
+ map = &qpt->map[qpn / RVT_BITS_PER_PAGE];
max_scan = qpt->nmaps - !offset;
for (i = 0;;) {
if (unlikely(!map->page)) {
}
offset += qpt->incr;
/*
- * This qpn might be bogus if offset >= BITS_PER_PAGE.
- * That is OK. It gets re-assigned below
+ * This qpn might be bogus if offset >=
+ * RVT_BITS_PER_PAGE. That is OK. It gets re-assigned
+ * below
*/
qpn = mk_qpn(qpt, map, offset);
- } while (offset < BITS_PER_PAGE && qpn < QPN_MAX);
+ } while (offset < RVT_BITS_PER_PAGE && qpn < RVT_QPN_MAX);
/*
* In order to keep the number of pages allocated to a
* minimum, we scan the all existing pages before increasing
* the size of the bitmap table.
*/
if (++i > max_scan) {
- if (qpt->nmaps == QPNMAP_ENTRIES)
+ if (qpt->nmaps == RVT_QPNMAP_ENTRIES)
break;
map = &qpt->map[qpt->nmaps++];
/* start at incr with current bit 0 */
return ret;
}
-static void free_qpn(struct hfi1_qpn_table *qpt, u32 qpn)
+static void free_qpn(struct rvt_qpn_table *qpt, u32 qpn)
{
- struct qpn_map *map;
+ struct rvt_qpn_map *map;
- map = qpt->map + qpn / BITS_PER_PAGE;
+ map = qpt->map + qpn / RVT_BITS_PER_PAGE;
if (map->page)
- clear_bit(qpn & BITS_PER_PAGE_MASK, map->page);
+ clear_bit(qpn & RVT_BITS_PER_PAGE_MASK, map->page);
}
/*
unsigned long flags;
atomic_inc(&qp->refcount);
- spin_lock_irqsave(&dev->qp_dev->qpt_lock, flags);
+ spin_lock_irqsave(&dev->rdi.qp_dev->qpt_lock, flags);
if (qp->ibqp.qp_num <= 1) {
rcu_assign_pointer(ibp->rvp.qp[qp->ibqp.qp_num], qp);
} else {
- u32 n = qpn_hash(dev->qp_dev, qp->ibqp.qp_num);
+ u32 n = qpn_hash(dev->rdi.qp_dev, qp->ibqp.qp_num);
- qp->next = dev->qp_dev->qp_table[n];
- rcu_assign_pointer(dev->qp_dev->qp_table[n], qp);
+ qp->next = dev->rdi.qp_dev->qp_table[n];
+ rcu_assign_pointer(dev->rdi.qp_dev->qp_table[n], qp);
trace_hfi1_qpinsert(qp, n);
}
- spin_unlock_irqrestore(&dev->qp_dev->qpt_lock, flags);
+ spin_unlock_irqrestore(&dev->rdi.qp_dev->qpt_lock, flags);
}
/*
static void remove_qp(struct hfi1_ibdev *dev, struct rvt_qp *qp)
{
struct hfi1_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num);
- u32 n = qpn_hash(dev->qp_dev, qp->ibqp.qp_num);
+ u32 n = qpn_hash(dev->rdi.qp_dev, qp->ibqp.qp_num);
unsigned long flags;
int removed = 1;
- spin_lock_irqsave(&dev->qp_dev->qpt_lock, flags);
+ spin_lock_irqsave(&dev->rdi.qp_dev->qpt_lock, flags);
if (rcu_dereference_protected(ibp->rvp.qp[0],
lockdep_is_held(
- &dev->qp_dev->qpt_lock)) == qp) {
+ &dev->rdi.qp_dev->qpt_lock)) == qp) {
RCU_INIT_POINTER(ibp->rvp.qp[0], NULL);
} else if (rcu_dereference_protected(ibp->rvp.qp[1],
- lockdep_is_held(&dev->qp_dev->qpt_lock)) == qp) {
+ lockdep_is_held(&dev->rdi.qp_dev->qpt_lock)) == qp) {
RCU_INIT_POINTER(ibp->rvp.qp[1], NULL);
} else {
struct rvt_qp *q;
struct rvt_qp __rcu **qpp;
removed = 0;
- qpp = &dev->qp_dev->qp_table[n];
+ qpp = &dev->rdi.qp_dev->qp_table[n];
for (; (q = rcu_dereference_protected(*qpp,
- lockdep_is_held(&dev->qp_dev->qpt_lock)))
+ lockdep_is_held(&dev->rdi.qp_dev->qpt_lock)))
!= NULL;
qpp = &q->next)
if (q == qp) {
RCU_INIT_POINTER(*qpp,
rcu_dereference_protected(qp->next,
- lockdep_is_held(&dev->qp_dev->qpt_lock)));
+ lockdep_is_held(&dev->rdi.qp_dev->qpt_lock)));
removed = 1;
trace_hfi1_qpremove(qp, n);
break;
}
}
- spin_unlock_irqrestore(&dev->qp_dev->qpt_lock, flags);
+ spin_unlock_irqrestore(&dev->rdi.qp_dev->qpt_lock, flags);
if (removed) {
synchronize_rcu();
if (atomic_dec_and_test(&qp->refcount))
unsigned long flags;
struct rvt_qp *qp;
unsigned n, qp_inuse = 0;
+ spinlock_t *l; /* useless pointer to shutup checkpatch */
for (n = 0; n < dd->num_pports; n++) {
struct hfi1_ibport *ibp = &dd->pport[n].ibport_data;
rcu_read_unlock();
}
- if (!dev->qp_dev)
+ if (!dev->rdi.qp_dev)
goto bail;
- spin_lock_irqsave(&dev->qp_dev->qpt_lock, flags);
- for (n = 0; n < dev->qp_dev->qp_table_size; n++) {
- qp = rcu_dereference_protected(dev->qp_dev->qp_table[n],
- lockdep_is_held(&dev->qp_dev->qpt_lock));
- RCU_INIT_POINTER(dev->qp_dev->qp_table[n], NULL);
+ spin_lock_irqsave(&dev->rdi.qp_dev->qpt_lock, flags);
+ for (n = 0; n < dev->rdi.qp_dev->qp_table_size; n++) {
+ l = &dev->rdi.qp_dev->qpt_lock;
+ qp = rcu_dereference_protected(dev->rdi.qp_dev->qp_table[n],
+ lockdep_is_held(l));
+ RCU_INIT_POINTER(dev->rdi.qp_dev->qp_table[n], NULL);
for (; qp; qp = rcu_dereference_protected(qp->next,
- lockdep_is_held(&dev->qp_dev->qpt_lock)))
+ lockdep_is_held(l)))
qp_inuse++;
}
- spin_unlock_irqrestore(&dev->qp_dev->qpt_lock, flags);
+ spin_unlock_irqrestore(&dev->rdi.qp_dev->qpt_lock, flags);
synchronize_rcu();
bail:
return qp_inuse;
qp->s_flags = RVT_S_SIGNAL_REQ_WR;
dev = to_idev(ibpd->device);
dd = dd_from_dev(dev);
- err = alloc_qpn(dd, &dev->qp_dev->qpn_table, init_attr->qp_type,
+ err = alloc_qpn(dd, &dev->rdi.qp_dev->qpn_table,
+ init_attr->qp_type,
init_attr->port_num);
if (err < 0) {
ret = ERR_PTR(err);
kref_put(&qp->ip->ref, rvt_release_mmap_info);
else
vfree(qp->r_rq.wq);
- free_qpn(&dev->qp_dev->qpn_table, qp->ibqp.qp_num);
+ free_qpn(&dev->rdi.qp_dev->qpn_table, qp->ibqp.qp_num);
bail_qp:
kfree(priv->s_hdr);
kfree(priv);
spin_unlock_irq(&qp->r_lock);
/* all user's cleaned up, mark it available */
- free_qpn(&dev->qp_dev->qpn_table, qp->ibqp.qp_num);
+ free_qpn(&dev->rdi.qp_dev->qpn_table, qp->ibqp.qp_num);
spin_lock(&dev->n_qps_lock);
dev->n_qps_allocated--;
spin_unlock(&dev->n_qps_lock);
* init_qpn_table - initialize the QP number table for a device
* @qpt: the QPN table
*/
-static int init_qpn_table(struct hfi1_devdata *dd, struct hfi1_qpn_table *qpt)
+static int init_qpn_table(struct hfi1_devdata *dd, struct rvt_qpn_table *qpt)
{
u32 offset, qpn, i;
- struct qpn_map *map;
+ struct rvt_qpn_map *map;
int ret = 0;
spin_lock_init(&qpt->lock);
/* insure we don't assign QPs from KDETH 64K window */
qpn = kdeth_qp << 16;
- qpt->nmaps = qpn / BITS_PER_PAGE;
+ qpt->nmaps = qpn / RVT_BITS_PER_PAGE;
/* This should always be zero */
- offset = qpn & BITS_PER_PAGE_MASK;
+ offset = qpn & RVT_BITS_PER_PAGE_MASK;
map = &qpt->map[qpt->nmaps];
dd_dev_info(dd, "Reserving QPNs for KDETH window from 0x%x to 0x%x\n",
qpn, qpn + 65535);
}
set_bit(offset, map->page);
offset++;
- if (offset == BITS_PER_PAGE) {
+ if (offset == RVT_BITS_PER_PAGE) {
/* next page */
qpt->nmaps++;
map++;
* free_qpn_table - free the QP number table for a device
* @qpt: the QPN table
*/
-static void free_qpn_table(struct hfi1_qpn_table *qpt)
+static void free_qpn_table(struct rvt_qpn_table *qpt)
{
int i;
int ret = -ENOMEM;
/* allocate parent object */
- dev->qp_dev = kzalloc(sizeof(*dev->qp_dev), GFP_KERNEL);
- if (!dev->qp_dev)
+ dev->rdi.qp_dev = kzalloc(sizeof(*dev->rdi.qp_dev), GFP_KERNEL);
+ if (!dev->rdi.qp_dev)
goto nomem;
/* allocate hash table */
- dev->qp_dev->qp_table_size = hfi1_qp_table_size;
- dev->qp_dev->qp_table_bits = ilog2(hfi1_qp_table_size);
- dev->qp_dev->qp_table =
- kmalloc(dev->qp_dev->qp_table_size *
- sizeof(*dev->qp_dev->qp_table),
+ dev->rdi.qp_dev->qp_table_size = hfi1_qp_table_size;
+ dev->rdi.qp_dev->qp_table_bits = ilog2(hfi1_qp_table_size);
+ dev->rdi.qp_dev->qp_table =
+ kmalloc(dev->rdi.qp_dev->qp_table_size *
+ sizeof(*dev->rdi.qp_dev->qp_table),
GFP_KERNEL);
- if (!dev->qp_dev->qp_table)
+ if (!dev->rdi.qp_dev->qp_table)
goto nomem;
- for (i = 0; i < dev->qp_dev->qp_table_size; i++)
- RCU_INIT_POINTER(dev->qp_dev->qp_table[i], NULL);
- spin_lock_init(&dev->qp_dev->qpt_lock);
+ for (i = 0; i < dev->rdi.qp_dev->qp_table_size; i++)
+ RCU_INIT_POINTER(dev->rdi.qp_dev->qp_table[i], NULL);
+ spin_lock_init(&dev->rdi.qp_dev->qpt_lock);
/* initialize qpn map */
- ret = init_qpn_table(dd, &dev->qp_dev->qpn_table);
+ ret = init_qpn_table(dd, &dev->rdi.qp_dev->qpn_table);
if (ret)
goto nomem;
return ret;
nomem:
- if (dev->qp_dev) {
- kfree(dev->qp_dev->qp_table);
- free_qpn_table(&dev->qp_dev->qpn_table);
- kfree(dev->qp_dev);
+ if (dev->rdi.qp_dev) {
+ kfree(dev->rdi.qp_dev->qp_table);
+ free_qpn_table(&dev->rdi.qp_dev->qpn_table);
+ kfree(dev->rdi.qp_dev);
}
return ret;
}
if (qps_inuse)
dd_dev_err(dd, "QP memory leak! %u still in use\n",
qps_inuse);
- if (dev->qp_dev) {
- kfree(dev->qp_dev->qp_table);
- free_qpn_table(&dev->qp_dev->qpn_table);
- kfree(dev->qp_dev);
+ if (dev->rdi.qp_dev) {
+ kfree(dev->rdi.qp_dev->qp_table);
+ free_qpn_table(&dev->rdi.qp_dev->qpn_table);
+ kfree(dev->rdi.qp_dev);
}
}
*
* n = 0..iter->specials is the special qp indices
*
- * n = iter->specials..dev->qp_dev->qp_table_size+iter->specials are
+ * n = iter->specials..dev->rdi.qp_dev->qp_table_size+iter->specials are
* the potential hash bucket entries
*
*/
- for (; n < dev->qp_dev->qp_table_size + iter->specials; n++) {
+ for (; n < dev->rdi.qp_dev->qp_table_size + iter->specials; n++) {
if (pqp) {
qp = rcu_dereference(pqp->next);
} else {
qp = rcu_dereference(ibp->rvp.qp[1]);
} else {
qp = rcu_dereference(
- dev->qp_dev->qp_table[
+ dev->rdi.qp_dev->qp_table[
(n - iter->specials)]);
}
}