int err;
err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size,
- PAGE_SIZE * 2, &buf->buf, GFP_KERNEL);
+ PAGE_SIZE * 2, &buf->buf);
if (err)
goto out;
if (err)
goto err_buf;
- err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf, GFP_KERNEL);
+ err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf);
if (err)
goto err_mtt;
uar = &to_mucontext(context)->uar;
} else {
- err = mlx4_db_alloc(dev->dev, &cq->db, 1, GFP_KERNEL);
+ err = mlx4_db_alloc(dev->dev, &cq->db, 1);
if (err)
goto err_cq;
MLX4_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO,
MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
MLX4_IB_QP_NETIF = IB_QP_CREATE_NETIF_QP,
- MLX4_IB_QP_CREATE_USE_GFP_NOIO = IB_QP_CREATE_USE_GFP_NOIO,
/* Mellanox specific flags start from IB_QP_CREATE_RESERVED_START */
MLX4_IB_ROCE_V2_GSI_QP = MLX4_IB_QP_CREATE_ROCE_V2_GSI,
static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
- struct ib_udata *udata, int sqpn, struct mlx4_ib_qp **caller_qp,
- gfp_t gfp)
+ struct ib_udata *udata, int sqpn,
+ struct mlx4_ib_qp **caller_qp)
{
int qpn;
int err;
if (qp_type == MLX4_IB_QPT_SMI || qp_type == MLX4_IB_QPT_GSI ||
(qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER |
MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) {
- sqp = kzalloc(sizeof (struct mlx4_ib_sqp), gfp);
+ sqp = kzalloc(sizeof(struct mlx4_ib_sqp), GFP_KERNEL);
if (!sqp)
return -ENOMEM;
qp = &sqp->qp;
qp->pri.vid = 0xFFFF;
qp->alt.vid = 0xFFFF;
} else {
- qp = kzalloc(sizeof (struct mlx4_ib_qp), gfp);
+ qp = kzalloc(sizeof(struct mlx4_ib_qp), GFP_KERNEL);
if (!qp)
return -ENOMEM;
qp->pri.vid = 0xFFFF;
goto err;
if (qp_has_rq(init_attr)) {
- err = mlx4_db_alloc(dev->dev, &qp->db, 0, gfp);
+ err = mlx4_db_alloc(dev->dev, &qp->db, 0);
if (err)
goto err;
}
if (mlx4_buf_alloc(dev->dev, qp->buf_size, qp->buf_size,
- &qp->buf, gfp)) {
+ &qp->buf)) {
memcpy(&init_attr->cap, &backup_cap,
sizeof(backup_cap));
err = set_kernel_sq_size(dev, &init_attr->cap, qp_type,
goto err_db;
if (mlx4_buf_alloc(dev->dev, qp->buf_size,
- PAGE_SIZE * 2, &qp->buf, gfp)) {
+ PAGE_SIZE * 2, &qp->buf)) {
err = -ENOMEM;
goto err_db;
}
if (err)
goto err_buf;
- err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf, gfp);
+ err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf);
if (err)
goto err_mtt;
qp->sq.wrid = kmalloc_array(qp->sq.wqe_cnt, sizeof(u64),
- gfp | __GFP_NOWARN);
+ GFP_KERNEL | __GFP_NOWARN);
if (!qp->sq.wrid)
qp->sq.wrid = __vmalloc(qp->sq.wqe_cnt * sizeof(u64),
- gfp, PAGE_KERNEL);
+ GFP_KERNEL, PAGE_KERNEL);
qp->rq.wrid = kmalloc_array(qp->rq.wqe_cnt, sizeof(u64),
- gfp | __GFP_NOWARN);
+ GFP_KERNEL | __GFP_NOWARN);
if (!qp->rq.wrid)
qp->rq.wrid = __vmalloc(qp->rq.wqe_cnt * sizeof(u64),
- gfp, PAGE_KERNEL);
+ GFP_KERNEL, PAGE_KERNEL);
if (!qp->sq.wrid || !qp->rq.wrid) {
err = -ENOMEM;
goto err_wrid;
if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
- err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp, gfp);
+ err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp);
if (err)
goto err_qpn;
int err;
int sup_u_create_flags = MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK;
u16 xrcdn = 0;
- gfp_t gfp;
- gfp = (init_attr->create_flags & MLX4_IB_QP_CREATE_USE_GFP_NOIO) ?
- GFP_NOIO : GFP_KERNEL;
/*
* We only support LSO, vendor flag1, and multicast loopback blocking,
* and only for kernel UD QPs.
MLX4_IB_SRIOV_TUNNEL_QP |
MLX4_IB_SRIOV_SQP |
MLX4_IB_QP_NETIF |
- MLX4_IB_QP_CREATE_ROCE_V2_GSI |
- MLX4_IB_QP_CREATE_USE_GFP_NOIO))
+ MLX4_IB_QP_CREATE_ROCE_V2_GSI))
return ERR_PTR(-EINVAL);
if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) {
return ERR_PTR(-EINVAL);
if ((init_attr->create_flags & ~(MLX4_IB_SRIOV_SQP |
- MLX4_IB_QP_CREATE_USE_GFP_NOIO |
MLX4_IB_QP_CREATE_ROCE_V2_GSI |
MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) &&
init_attr->qp_type != IB_QPT_UD) ||
case IB_QPT_RC:
case IB_QPT_UC:
case IB_QPT_RAW_PACKET:
- qp = kzalloc(sizeof *qp, gfp);
+ qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp)
return ERR_PTR(-ENOMEM);
qp->pri.vid = 0xFFFF;
case IB_QPT_UD:
{
err = create_qp_common(to_mdev(pd->device), pd, init_attr,
- udata, 0, &qp, gfp);
+ udata, 0, &qp);
if (err) {
kfree(qp);
return ERR_PTR(err);
}
err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata,
- sqpn,
- &qp, gfp);
+ sqpn, &qp);
if (err)
return ERR_PTR(err);
if (err)
goto err_mtt;
} else {
- err = mlx4_db_alloc(dev->dev, &srq->db, 0, GFP_KERNEL);
+ err = mlx4_db_alloc(dev->dev, &srq->db, 0);
if (err)
goto err_srq;
*srq->db.db = 0;
- if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf,
- GFP_KERNEL)) {
+ if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2,
+ &srq->buf)) {
err = -ENOMEM;
goto err_db;
}
if (err)
goto err_buf;
- err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf, GFP_KERNEL);
+ err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf);
if (err)
goto err_mtt;
}
static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size,
- struct mlx4_buf *buf, gfp_t gfp)
+ struct mlx4_buf *buf)
{
dma_addr_t t;
buf->page_shift = get_order(size) + PAGE_SHIFT;
buf->direct.buf =
dma_zalloc_coherent(&dev->persist->pdev->dev,
- size, &t, gfp);
+ size, &t, GFP_KERNEL);
if (!buf->direct.buf)
return -ENOMEM;
* multiple pages, so we don't require too much contiguous memory.
*/
int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
- struct mlx4_buf *buf, gfp_t gfp)
+ struct mlx4_buf *buf)
{
if (size <= max_direct) {
- return mlx4_buf_direct_alloc(dev, size, buf, gfp);
+ return mlx4_buf_direct_alloc(dev, size, buf);
} else {
dma_addr_t t;
int i;
buf->npages = buf->nbufs;
buf->page_shift = PAGE_SHIFT;
buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
- gfp);
+ GFP_KERNEL);
if (!buf->page_list)
return -ENOMEM;
for (i = 0; i < buf->nbufs; ++i) {
buf->page_list[i].buf =
dma_zalloc_coherent(&dev->persist->pdev->dev,
- PAGE_SIZE, &t, gfp);
+ PAGE_SIZE, &t, GFP_KERNEL);
if (!buf->page_list[i].buf)
goto err_free;
}
EXPORT_SYMBOL_GPL(mlx4_buf_free);
-static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device,
- gfp_t gfp)
+static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device)
{
struct mlx4_db_pgdir *pgdir;
- pgdir = kzalloc(sizeof *pgdir, gfp);
+ pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL);
if (!pgdir)
return NULL;
pgdir->bits[0] = pgdir->order0;
pgdir->bits[1] = pgdir->order1;
pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE,
- &pgdir->db_dma, gfp);
+ &pgdir->db_dma, GFP_KERNEL);
if (!pgdir->db_page) {
kfree(pgdir);
return NULL;
return 0;
}
-int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, gfp_t gfp)
+int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_db_pgdir *pgdir;
if (!mlx4_alloc_db_from_pgdir(pgdir, db, order))
goto out;
- pgdir = mlx4_alloc_db_pgdir(&dev->persist->pdev->dev, gfp);
+ pgdir = mlx4_alloc_db_pgdir(&dev->persist->pdev->dev);
if (!pgdir) {
ret = -ENOMEM;
goto out;
{
int err;
- err = mlx4_db_alloc(dev, &wqres->db, 1, GFP_KERNEL);
+ err = mlx4_db_alloc(dev, &wqres->db, 1);
if (err)
return err;
*wqres->db.db = 0;
- err = mlx4_buf_direct_alloc(dev, size, &wqres->buf, GFP_KERNEL);
+ err = mlx4_buf_direct_alloc(dev, size, &wqres->buf);
if (err)
goto err_db;
if (err)
goto err_buf;
- err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf, GFP_KERNEL);
+ err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf);
if (err)
goto err_mtt;
if (*cqn == -1)
return -ENOMEM;
- err = mlx4_table_get(dev, &cq_table->table, *cqn, GFP_KERNEL);
+ err = mlx4_table_get(dev, &cq_table->table, *cqn);
if (err)
goto err_out;
- err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn, GFP_KERNEL);
+ err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn);
if (err)
goto err_put;
return 0;
if (!context)
return -ENOMEM;
- err = mlx4_qp_alloc(mdev->dev, qpn, qp, GFP_KERNEL);
+ err = mlx4_qp_alloc(mdev->dev, qpn, qp);
if (err) {
en_err(priv, "Failed to allocate qp #%x\n", qpn);
goto out;
en_err(priv, "Failed reserving drop qpn\n");
return err;
}
- err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp, GFP_KERNEL);
+ err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp);
if (err) {
en_err(priv, "Failed allocating drop qp\n");
mlx4_qp_release_range(priv->mdev->dev, qpn, 1);
}
/* Configure RSS indirection qp */
- err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp,
- GFP_KERNEL);
+ err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp);
if (err) {
en_err(priv, "Failed to allocate RSS indirection QP\n");
goto rss_err;
goto err_hwq_res;
}
- err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->sp_qp, GFP_KERNEL);
+ err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->sp_qp);
if (err) {
en_err(priv, "Failed allocating qp %d\n", ring->qpn);
goto err_reserve;
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
-int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj,
- gfp_t gfp)
+int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj)
{
u32 i = (obj & (table->num_obj - 1)) /
(MLX4_TABLE_CHUNK_SIZE / table->obj_size);
}
table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
- (table->lowmem ? gfp : GFP_HIGHUSER) |
+ (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
__GFP_NOWARN, table->coherent);
if (!table->icm[i]) {
ret = -ENOMEM;
u32 i;
for (i = start; i <= end; i += inc) {
- err = mlx4_table_get(dev, table, i, GFP_KERNEL);
+ err = mlx4_table_get(dev, table, i);
if (err)
goto fail;
}
gfp_t gfp_mask, int coherent);
void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent);
-int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj,
- gfp_t gfp);
+int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj);
void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj);
int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
u32 start, u32 end);
void mlx4_cleanup_qp_table(struct mlx4_dev *dev);
void mlx4_cleanup_srq_table(struct mlx4_dev *dev);
void mlx4_cleanup_mcg_table(struct mlx4_dev *dev);
-int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp);
+int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn);
void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn);
int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn);
void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn);
void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn);
int __mlx4_mpt_reserve(struct mlx4_dev *dev);
void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index);
-int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp);
+int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index);
void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index);
u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order);
void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order);
__mlx4_mpt_release(dev, index);
}
-int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp)
+int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
{
struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table;
- return mlx4_table_get(dev, &mr_table->dmpt_table, index, gfp);
+ return mlx4_table_get(dev, &mr_table->dmpt_table, index);
}
-static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp)
+static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index)
{
u64 param = 0;
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED);
}
- return __mlx4_mpt_alloc_icm(dev, index, gfp);
+ return __mlx4_mpt_alloc_icm(dev, index);
}
void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index)
struct mlx4_mpt_entry *mpt_entry;
int err;
- err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key), GFP_KERNEL);
+ err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key));
if (err)
return err;
EXPORT_SYMBOL_GPL(mlx4_write_mtt);
int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
- struct mlx4_buf *buf, gfp_t gfp)
+ struct mlx4_buf *buf)
{
u64 *page_list;
int err;
int i;
- page_list = kmalloc(buf->npages * sizeof *page_list,
- gfp);
+ page_list = kcalloc(buf->npages, sizeof(*page_list), GFP_KERNEL);
if (!page_list)
return -ENOMEM;
struct mlx4_mpt_entry *mpt_entry;
int err;
- err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key), GFP_KERNEL);
+ err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key));
if (err)
return err;
}
EXPORT_SYMBOL_GPL(mlx4_qp_release_range);
-int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp)
+int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
int err;
- err = mlx4_table_get(dev, &qp_table->qp_table, qpn, gfp);
+ err = mlx4_table_get(dev, &qp_table->qp_table, qpn);
if (err)
goto err_out;
- err = mlx4_table_get(dev, &qp_table->auxc_table, qpn, gfp);
+ err = mlx4_table_get(dev, &qp_table->auxc_table, qpn);
if (err)
goto err_put_qp;
- err = mlx4_table_get(dev, &qp_table->altc_table, qpn, gfp);
+ err = mlx4_table_get(dev, &qp_table->altc_table, qpn);
if (err)
goto err_put_auxc;
- err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn, gfp);
+ err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn);
if (err)
goto err_put_altc;
- err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn, gfp);
+ err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn);
if (err)
goto err_put_rdmarc;
return err;
}
-static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp)
+static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn)
{
u64 param = 0;
MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED);
}
- return __mlx4_qp_alloc_icm(dev, qpn, gfp);
+ return __mlx4_qp_alloc_icm(dev, qpn);
}
void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn)
return qp;
}
-int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp)
+int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_qp_table *qp_table = &priv->qp_table;
qp->qpn = qpn;
- err = mlx4_qp_alloc_icm(dev, qpn, gfp);
+ err = mlx4_qp_alloc_icm(dev, qpn);
if (err)
return err;
return err;
if (!fw_reserved(dev, qpn)) {
- err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
+ err = __mlx4_qp_alloc_icm(dev, qpn);
if (err) {
res_abort_move(dev, slave, RES_QP, qpn);
return err;
if (err)
return err;
- err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
+ err = __mlx4_mpt_alloc_icm(dev, mpt->key);
if (err) {
res_abort_move(dev, slave, RES_MPT, id);
return err;
if (*srqn == -1)
return -ENOMEM;
- err = mlx4_table_get(dev, &srq_table->table, *srqn, GFP_KERNEL);
+ err = mlx4_table_get(dev, &srq_table->table, *srqn);
if (err)
goto err_out;
- err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn, GFP_KERNEL);
+ err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn);
if (err)
goto err_put;
return 0;
}
int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
- struct mlx4_buf *buf, gfp_t gfp);
+ struct mlx4_buf *buf);
void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf);
static inline void *mlx4_buf_offset(struct mlx4_buf *buf, int offset)
{
int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
int start_index, int npages, u64 *page_list);
int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
- struct mlx4_buf *buf, gfp_t gfp);
+ struct mlx4_buf *buf);
-int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order,
- gfp_t gfp);
+int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order);
void mlx4_db_free(struct mlx4_dev *dev, struct mlx4_db *db);
int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres,
int *base, u8 flags);
void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt);
-int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp,
- gfp_t gfp);
+int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp);
void mlx4_qp_free(struct mlx4_dev *dev, struct mlx4_qp *qp);
int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, u32 cqn, u16 xrcdn,