ah->type = MTHCA_AH_PCI_POOL;
- if (dev->hca_type == ARBEL_NATIVE) {
+ if (mthca_is_memfree(dev)) {
ah->av = kmalloc(sizeof *ah->av, GFP_ATOMIC);
if (!ah->av)
return -ENOMEM;
{
int err;
- if (dev->hca_type == ARBEL_NATIVE)
+ if (mthca_is_memfree(dev))
return 0;
err = mthca_alloc_init(&dev->av_table.alloc,
void __devexit mthca_cleanup_av_table(struct mthca_dev *dev)
{
- if (dev->hca_type == ARBEL_NATIVE)
+ if (mthca_is_memfree(dev))
return;
if (dev->av_table.av_map)
mthca_dbg(dev, "FW version %012llx, max commands %d\n",
(unsigned long long) dev->fw_ver, dev->cmd.max_cmds);
- if (dev->hca_type == ARBEL_NATIVE) {
+ if (mthca_is_memfree(dev)) {
MTHCA_GET(dev->fw.arbel.fw_pages, outbox, QUERY_FW_SIZE_OFFSET);
MTHCA_GET(dev->fw.arbel.clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET);
MTHCA_GET(dev->fw.arbel.eq_arm_base, outbox, QUERY_FW_EQ_ARM_BASE_OFFSET);
mthca_dbg(dev, "Flags: %08x\n", dev_lim->flags);
- if (dev->hca_type == ARBEL_NATIVE) {
+ if (mthca_is_memfree(dev)) {
MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSZ_SRQ_OFFSET);
dev_lim->hca.arbel.resize_srq = field & 1;
MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_RQ_OFFSET);
/* TPT attributes */
MTHCA_PUT(inbox, param->mpt_base, INIT_HCA_MPT_BASE_OFFSET);
- if (dev->hca_type != ARBEL_NATIVE)
+ if (!mthca_is_memfree(dev))
MTHCA_PUT(inbox, param->mtt_seg_sz, INIT_HCA_MTT_SEG_SZ_OFFSET);
MTHCA_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET);
MTHCA_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET);
MTHCA_PUT(inbox, param->uar_scratch_base, INIT_HCA_UAR_SCATCH_BASE_OFFSET);
- if (dev->hca_type == ARBEL_NATIVE) {
+ if (mthca_is_memfree(dev)) {
MTHCA_PUT(inbox, param->log_uarc_sz, INIT_HCA_UARC_SZ_OFFSET);
MTHCA_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET);
MTHCA_PUT(inbox, param->uarc_base, INIT_HCA_UAR_CTX_BASE_OFFSET);
{
u32 doorbell[2];
- if (dev->hca_type == ARBEL_NATIVE) {
+ if (mthca_is_memfree(dev)) {
*cq->set_ci_db = cpu_to_be32(cq->cons_index);
wmb();
} else {
if (cq->cqn == -1)
return -ENOMEM;
- if (dev->hca_type == ARBEL_NATIVE) {
+ if (mthca_is_memfree(dev)) {
cq->arm_sn = 1;
err = mthca_table_get(dev, dev->cq_table.table, cq->cqn);
cq_context->lkey = cpu_to_be32(cq->mr.ibmr.lkey);
cq_context->cqn = cpu_to_be32(cq->cqn);
- if (dev->hca_type == ARBEL_NATIVE) {
+ if (mthca_is_memfree(dev)) {
cq_context->ci_db = cpu_to_be32(cq->set_ci_db_index);
cq_context->state_db = cpu_to_be32(cq->arm_db_index);
}
err_out_mailbox:
kfree(mailbox);
- if (dev->hca_type == ARBEL_NATIVE)
+ if (mthca_is_memfree(dev))
mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index);
err_out_ci:
- if (dev->hca_type == ARBEL_NATIVE)
+ if (mthca_is_memfree(dev))
mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index);
err_out_icm:
mthca_free_mr(dev, &cq->mr);
mthca_free_cq_buf(dev, cq);
- if (dev->hca_type == ARBEL_NATIVE) {
+ if (mthca_is_memfree(dev)) {
mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index);
mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index);
mthca_table_put(dev, dev->cq_table.table, cq->cqn);
return container_of(ibdev, struct mthca_dev, ib_dev);
}
+static inline int mthca_is_memfree(struct mthca_dev *dev)
+{
+ return dev->hca_type == ARBEL_NATIVE;
+}
+
#endif /* MTHCA_DEV_H */
static inline void set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci)
{
- if (dev->hca_type == ARBEL_NATIVE)
+ if (mthca_is_memfree(dev))
arbel_set_eq_ci(dev, eq, ci);
else
tavor_set_eq_ci(dev, eq, ci);
static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn)
{
- if (dev->hca_type != ARBEL_NATIVE) {
+ if (!mthca_is_memfree(dev)) {
u32 doorbell[2];
doorbell[0] = cpu_to_be32(MTHCA_EQ_DB_DISARM_CQ | eqn);
MTHCA_EQ_OWNER_HW |
MTHCA_EQ_STATE_ARMED |
MTHCA_EQ_FLAG_TR);
- if (dev->hca_type == ARBEL_NATIVE)
+ if (mthca_is_memfree(dev))
eq_context->flags |= cpu_to_be32(MTHCA_EQ_STATE_ARBEL);
eq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24);
- if (dev->hca_type == ARBEL_NATIVE) {
+ if (mthca_is_memfree(dev)) {
eq_context->arbel_pd = cpu_to_be32(dev->driver_pd.pd_num);
} else {
eq_context->logsize_usrpage |= cpu_to_be32(dev->driver_uar.index);
mthca_base = pci_resource_start(dev->pdev, 0);
- if (dev->hca_type == ARBEL_NATIVE) {
+ if (mthca_is_memfree(dev)) {
/*
* We assume that the EQ arm and EQ set CI registers
* fall within the first BAR. We can't trust the
static void __devexit mthca_unmap_eq_regs(struct mthca_dev *dev)
{
- if (dev->hca_type == ARBEL_NATIVE) {
+ if (mthca_is_memfree(dev)) {
mthca_unmap_reg(dev, (pci_resource_len(dev->pdev, 0) - 1) &
dev->fw.arbel.eq_set_ci_base,
MTHCA_EQ_SET_CI_SIZE,
for (i = 0; i < MTHCA_NUM_EQ; ++i) {
err = request_irq(dev->eq_table.eq[i].msi_x_vector,
- dev->hca_type == ARBEL_NATIVE ?
+ mthca_is_memfree(dev) ?
mthca_arbel_msi_x_interrupt :
mthca_tavor_msi_x_interrupt,
0, eq_name[i], dev->eq_table.eq + i);
}
} else {
err = request_irq(dev->pdev->irq,
- dev->hca_type == ARBEL_NATIVE ?
+ mthca_is_memfree(dev) ?
mthca_arbel_interrupt :
mthca_tavor_interrupt,
SA_SHIRQ, DRV_NAME, dev);
dev->eq_table.eq[MTHCA_EQ_CMD].eqn, status);
for (i = 0; i < MTHCA_EQ_CMD; ++i)
- if (dev->hca_type == ARBEL_NATIVE)
+ if (mthca_is_memfree(dev))
arbel_eq_req_not(dev, dev->eq_table.eq[i].eqn_mask);
else
tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn);
static int __devinit mthca_init_hca(struct mthca_dev *mdev)
{
- if (mdev->hca_type == ARBEL_NATIVE)
+ if (mthca_is_memfree(mdev))
return mthca_init_arbel(mdev);
else
return mthca_init_tavor(mdev);
mthca_CLOSE_HCA(mdev, 0, &status);
- if (mdev->hca_type == ARBEL_NATIVE) {
+ if (mthca_is_memfree(mdev)) {
mthca_free_icm_table(mdev, mdev->cq_table.table);
mthca_free_icm_table(mdev, mdev->qp_table.eqp_table);
mthca_free_icm_table(mdev, mdev->qp_table.qp_table);
mdev->pdev = pdev;
mdev->hca_type = id->driver_data;
- if (mdev->hca_type == ARBEL_NATIVE && !mthca_memfree_warned++)
+ if (mthca_is_memfree(mdev) && !mthca_memfree_warned++)
mthca_warn(mdev, "Warning: native MT25208 mode support is incomplete. "
"Your HCA may not work properly.\n");
{
int i;
- if (dev->hca_type != ARBEL_NATIVE)
+ if (!mthca_is_memfree(dev))
return 0;
dev->db_tab = kmalloc(sizeof *dev->db_tab, GFP_KERNEL);
int i;
u8 status;
- if (dev->hca_type != ARBEL_NATIVE)
+ if (!mthca_is_memfree(dev))
return;
/*
if (seg == -1)
return -1;
- if (dev->hca_type == ARBEL_NATIVE)
+ if (mthca_is_memfree(dev))
if (mthca_table_get_range(dev, dev->mr_table.mtt_table, seg,
seg + (1 << order) - 1)) {
mthca_buddy_free(buddy, seg, order);
{
mthca_buddy_free(buddy, seg, order);
- if (dev->hca_type == ARBEL_NATIVE)
+ if (mthca_is_memfree(dev))
mthca_table_put_range(dev, dev->mr_table.mtt_table, seg,
seg + (1 << order) - 1);
}
static inline u32 hw_index_to_key(struct mthca_dev *dev, u32 ind)
{
- if (dev->hca_type == ARBEL_NATIVE)
+ if (mthca_is_memfree(dev))
return arbel_hw_index_to_key(ind);
else
return tavor_hw_index_to_key(ind);
static inline u32 key_to_hw_index(struct mthca_dev *dev, u32 key)
{
- if (dev->hca_type == ARBEL_NATIVE)
+ if (mthca_is_memfree(dev))
return arbel_key_to_hw_index(key);
else
return tavor_key_to_hw_index(key);
return -ENOMEM;
mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
- if (dev->hca_type == ARBEL_NATIVE) {
+ if (mthca_is_memfree(dev)) {
err = mthca_table_get(dev, dev->mr_table.mpt_table, key);
if (err)
goto err_out_mpt_free;
return err;
err_out_table:
- if (dev->hca_type == ARBEL_NATIVE)
+ if (mthca_is_memfree(dev))
mthca_table_put(dev, dev->mr_table.mpt_table, key);
err_out_mpt_free:
return -ENOMEM;
mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
- if (dev->hca_type == ARBEL_NATIVE) {
+ if (mthca_is_memfree(dev)) {
err = mthca_table_get(dev, dev->mr_table.mpt_table, key);
if (err)
goto err_out_mpt_free;
mthca_free_mtt(dev, mr->first_seg, mr->order, &dev->mr_table.mtt_buddy);
err_out_table:
- if (dev->hca_type == ARBEL_NATIVE)
+ if (mthca_is_memfree(dev))
mthca_table_put(dev, dev->mr_table.mpt_table, key);
err_out_mpt_free:
if (order >= 0)
mthca_free_mtt(dev, first_seg, order, buddy);
- if (dev->hca_type == ARBEL_NATIVE)
+ if (mthca_is_memfree(dev))
mthca_table_put(dev, dev->mr_table.mpt_table,
arbel_key_to_hw_index(lkey));
return -EINVAL;
/* For Arbel, all MTTs must fit in the same page. */
- if (dev->hca_type == ARBEL_NATIVE &&
+ if (mthca_is_memfree(dev) &&
mr->attr.max_pages * sizeof *mr->mem.arbel.mtts > PAGE_SIZE)
return -EINVAL;
idx = key & (dev->limits.num_mpts - 1);
mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
- if (dev->hca_type == ARBEL_NATIVE) {
+ if (mthca_is_memfree(dev)) {
err = mthca_table_get(dev, dev->mr_table.mpt_table, key);
if (err)
goto err_out_mpt_free;
mtt_seg = mr->first_seg * MTHCA_MTT_SEG_SIZE;
- if (dev->hca_type == ARBEL_NATIVE) {
+ if (mthca_is_memfree(dev)) {
mr->mem.arbel.mtts = mthca_table_find(dev->mr_table.mtt_table,
mr->first_seg);
BUG_ON(!mr->mem.arbel.mtts);
dev->mr_table.fmr_mtt_buddy);
err_out_table:
- if (dev->hca_type == ARBEL_NATIVE)
+ if (mthca_is_memfree(dev))
mthca_table_put(dev, dev->mr_table.mpt_table, key);
err_out_mpt_free:
if (err)
return err;
- if (dev->hca_type != ARBEL_NATIVE &&
+ if (!mthca_is_memfree(dev) &&
(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN))
dev->limits.fmr_reserved_mtts = 0;
else
profile[i].type = i;
profile[i].log_num = max(ffs(profile[i].num) - 1, 0);
profile[i].size *= profile[i].num;
- if (dev->hca_type == ARBEL_NATIVE)
+ if (mthca_is_memfree(dev))
profile[i].size = max(profile[i].size, (u64) PAGE_SIZE);
}
- if (dev->hca_type == ARBEL_NATIVE) {
+ if (mthca_is_memfree(dev)) {
mem_base = 0;
mem_avail = dev_lim->hca.arbel.max_icm_sz;
} else {
(unsigned long long) profile[i].size);
}
- if (dev->hca_type == ARBEL_NATIVE)
+ if (mthca_is_memfree(dev))
mthca_dbg(dev, "HCA context memory: reserving %d KB\n",
(int) (total_size >> 10));
else
* out of the MR pool. They don't use additional memory, but
* we assign them as part of the HCA profile anyway.
*/
- if (dev->hca_type == ARBEL_NATIVE)
+ if (mthca_is_memfree(dev))
dev->limits.fmr_reserved_mtts = 0;
else
dev->limits.fmr_reserved_mtts = request->fmr_reserved_mtts;
if (!mdev)
return 0;
- if (mdev->hca_type == ARBEL_NATIVE) {
+ if (mthca_is_memfree(mdev)) {
list_for_each_entry(fmr, fmr_list, list)
mthca_arbel_fmr_unmap(mdev, to_mfmr(fmr));
dev->ib_dev.alloc_fmr = mthca_alloc_fmr;
dev->ib_dev.unmap_fmr = mthca_unmap_fmr;
dev->ib_dev.dealloc_fmr = mthca_dealloc_fmr;
- if (dev->hca_type == ARBEL_NATIVE)
+ if (mthca_is_memfree(dev))
dev->ib_dev.map_phys_fmr = mthca_arbel_map_phys_fmr;
else
dev->ib_dev.map_phys_fmr = mthca_tavor_map_phys_fmr;
dev->ib_dev.detach_mcast = mthca_multicast_detach;
dev->ib_dev.process_mad = mthca_process_mad;
- if (dev->hca_type == ARBEL_NATIVE) {
+ if (mthca_is_memfree(dev)) {
dev->ib_dev.req_notify_cq = mthca_arbel_arm_cq;
dev->ib_dev.post_send = mthca_arbel_post_send;
dev->ib_dev.post_recv = mthca_arbel_post_receive;
else if (attr_mask & IB_QP_PATH_MTU)
qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31;
- if (dev->hca_type == ARBEL_NATIVE) {
+ if (mthca_is_memfree(dev)) {
qp_context->rq_size_stride =
((ffs(qp->rq.max) - 1) << 3) | (qp->rq.wqe_shift - 4);
qp_context->sq_size_stride =
qp_context->next_send_psn = cpu_to_be32(attr->sq_psn);
qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn);
- if (dev->hca_type == ARBEL_NATIVE) {
+ if (mthca_is_memfree(dev)) {
qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset);
qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index);
}
qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn);
- if (dev->hca_type == ARBEL_NATIVE)
+ if (mthca_is_memfree(dev))
qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index);
if (attr_mask & IB_QP_QKEY) {
size += 2 * sizeof (struct mthca_data_seg);
break;
case UD:
- if (dev->hca_type == ARBEL_NATIVE)
+ if (mthca_is_memfree(dev))
size += sizeof (struct mthca_arbel_ud_seg);
else
size += sizeof (struct mthca_tavor_ud_seg);
{
int ret = 0;
- if (dev->hca_type == ARBEL_NATIVE) {
+ if (mthca_is_memfree(dev)) {
ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn);
if (ret)
return ret;
static void mthca_free_memfree(struct mthca_dev *dev,
struct mthca_qp *qp)
{
- if (dev->hca_type == ARBEL_NATIVE) {
+ if (mthca_is_memfree(dev)) {
mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index);
mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
return ret;
}
- if (dev->hca_type == ARBEL_NATIVE) {
+ if (mthca_is_memfree(dev)) {
for (i = 0; i < qp->rq.max; ++i) {
wqe = get_recv_wqe(qp, i);
wqe->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) <<
{
int i;
- if (dev->hca_type != ARBEL_NATIVE)
+ if (!mthca_is_memfree(dev))
return;
for (i = 0; 1 << i < qp->rq.max; ++i)
else
next = get_recv_wqe(qp, index);
- if (dev->hca_type == ARBEL_NATIVE)
+ if (mthca_is_memfree(dev))
*dbd = 1;
else
*dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD));