Fix bug in MTT allocation in mem-free mode.
I misunderstood the MTT size value returned by the firmware -- it is really
the size of a single MTT entry, since mem-free mode does not segment the MTT
as the original firmware did. This meant that our MTT addresses ended up
being off by a factor of 8. This meant that our MTT allocations might
overlap, and so we could overwrite and corrupt earlier memory regions when
writing new MTT entries.
We fix this by always using our 64-byte MTT segment size. This allows some
simplification of the code as well, since there's no reason to put the MTT
segment size in a variable -- we can always use our enum value directly.
Signed-off-by: Roland Dreier <roland@topspin.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_RQ_OFFSET);
dev_lim->max_sg = min_t(int, field, dev_lim->max_sg);
MTHCA_GET(size, outbox, QUERY_DEV_LIM_MTT_ENTRY_SZ_OFFSET);
- dev_lim->mtt_seg_sz = size;
MTHCA_GET(size, outbox, QUERY_DEV_LIM_MPT_ENTRY_SZ_OFFSET);
dev_lim->mpt_entry_sz = size;
MTHCA_GET(field, outbox, QUERY_DEV_LIM_PBL_SZ_OFFSET);
} else {
MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_AV_OFFSET);
dev_lim->hca.tavor.max_avs = 1 << (field & 0x3f);
- dev_lim->mtt_seg_sz = MTHCA_MTT_SEG_SIZE;
dev_lim->mpt_entry_sz = MTHCA_MPT_ENTRY_SIZE;
}
int cqc_entry_sz;
int srq_entry_sz;
int uar_scratch_entry_sz;
- int mtt_seg_sz;
int mpt_entry_sz;
union {
struct {
int reserved_eqs;
int num_mpts;
int num_mtt_segs;
- int mtt_seg_size;
int reserved_mtts;
int reserved_mrws;
int reserved_uars;
}
mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base,
- dev_lim->mtt_seg_sz,
+ MTHCA_MTT_SEG_SIZE,
mdev->limits.num_mtt_segs,
mdev->limits.reserved_mtts, 1);
if (!mdev->mr_table.mtt_table) {
goto err_out_mpt_free;
}
- for (i = dev->limits.mtt_seg_size / 8, mr->order = 0;
+ for (i = MTHCA_MTT_SEG_SIZE / 8, mr->order = 0;
i < list_len;
i <<= 1, ++mr->order)
; /* nothing */
mtt_entry = MAILBOX_ALIGN(mailbox);
mtt_entry[0] = cpu_to_be64(dev->mr_table.mtt_base +
- mr->first_seg * dev->limits.mtt_seg_size);
+ mr->first_seg * MTHCA_MTT_SEG_SIZE);
mtt_entry[1] = 0;
for (i = 0; i < list_len; ++i)
mtt_entry[i + 2] = cpu_to_be64(buffer_list[i] |
memset(&mpt_entry->lkey, 0,
sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, lkey));
mpt_entry->mtt_seg = cpu_to_be64(dev->mr_table.mtt_base +
- mr->first_seg * dev->limits.mtt_seg_size);
+ mr->first_seg * MTHCA_MTT_SEG_SIZE);
if (0) {
mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey);
profile[MTHCA_RES_RDB].size = MTHCA_RDB_ENTRY_SIZE;
profile[MTHCA_RES_MCG].size = MTHCA_MGM_ENTRY_SIZE;
profile[MTHCA_RES_MPT].size = dev_lim->mpt_entry_sz;
- profile[MTHCA_RES_MTT].size = dev_lim->mtt_seg_sz;
+ profile[MTHCA_RES_MTT].size = MTHCA_MTT_SEG_SIZE;
profile[MTHCA_RES_UAR].size = dev_lim->uar_scratch_entry_sz;
profile[MTHCA_RES_UDAV].size = MTHCA_AV_SIZE;
profile[MTHCA_RES_UARC].size = request->uarc_size;
break;
case MTHCA_RES_MTT:
dev->limits.num_mtt_segs = profile[i].num;
- dev->limits.mtt_seg_size = dev_lim->mtt_seg_sz;
dev->mr_table.mtt_base = profile[i].start;
init_hca->mtt_base = profile[i].start;
- init_hca->mtt_seg_sz = ffs(dev_lim->mtt_seg_sz) - 7;
+ init_hca->mtt_seg_sz = ffs(MTHCA_MTT_SEG_SIZE) - 7;
break;
case MTHCA_RES_UAR:
dev->limits.num_uars = profile[i].num;