Fix some issues around int variables used in data structures related
to memory registration.
Handle int overflow in mlx4_init_icm_table by using a u64 intermediate
variable and changing struct mlx4_icm_table num_obj field to be u32.
Change some more fields/variables to use u32 instead of int to prevent
a case where the variable becomes negative when bit 31 is set.
Also subtract log_mtts_per_seg from the exponent when computing
num_mtt, since its added later on in that very same code area.
This and the previous commit fixes some issues which actually prevent
commit
db5a7a65c058 ("mlx4_core: Scale size of MTT table with system
RAM") from working. Now, when the number of MTTs is scaled with the
size of the RAM we can map up to 8TB.
Signed-off-by: Yishai Hadas <yishaih@mellanox.com>
Signed-off-by: Jack Morgenstein <jackm@mellanox.com>
Signed-off-by: Or Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
}
int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
- u64 virt, int obj_size, int nobj, int reserved,
+ u64 virt, int obj_size, u32 nobj, int reserved,
int use_lowmem, int use_coherent)
{
int obj_per_chunk;
int num_icm;
unsigned chunk_size;
int i;
+ u64 size;
obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
table->coherent = use_coherent;
mutex_init(&table->mutex);
+ size = (u64) nobj * obj_size;
for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
chunk_size = MLX4_TABLE_CHUNK_SIZE;
- if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > nobj * obj_size)
- chunk_size = PAGE_ALIGN(nobj * obj_size - i * MLX4_TABLE_CHUNK_SIZE);
+ if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > size)
+ chunk_size = PAGE_ALIGN(size -
+ i * MLX4_TABLE_CHUNK_SIZE);
table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
(use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
int start, int end);
int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
- u64 virt, int obj_size, int nobj, int reserved,
+ u64 virt, int obj_size, u32 nobj, int reserved,
int use_lowmem, int use_coherent);
void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table);
void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle);
struct mlx4_buddy {
unsigned long **bits;
unsigned int *num_free;
- int max_order;
+ u32 max_order;
spinlock_t lock;
};
struct mlx4_icm_table {
u64 virt;
int num_icm;
- int num_obj;
+ u32 num_obj;
int obj_size;
int lowmem;
int coherent;
return err;
err = mlx4_buddy_init(&mr_table->mtt_buddy,
- ilog2(dev->caps.num_mtts /
+ ilog2((u32)dev->caps.num_mtts /
(1 << log_mtts_per_seg)));
if (err)
goto err_buddy;
mlx4_alloc_mtt_range(dev,
fls(dev->caps.reserved_mtts - 1));
if (priv->reserved_mtts < 0) {
- mlx4_warn(dev, "MTT table of order %d is too small.\n",
+ mlx4_warn(dev, "MTT table of order %u is too small.\n",
mr_table->mtt_buddy.max_order);
err = -ENOMEM;
goto err_reserve_mtts;
u64 size;
u64 start;
int type;
- int num;
+ u32 num;
int log_num;
};
si_meminfo(&si);
request->num_mtt =
roundup_pow_of_two(max_t(unsigned, request->num_mtt,
- min(1UL << 31,
+ min(1UL << (31 - log_mtts_per_seg),
si.totalram >> (log_mtts_per_seg - 1))));
profile[MLX4_RES_QP].size = dev_cap->qpc_entry_sz;