#include <linux/init.h>
#include <linux/errno.h>
#include <linux/mm.h>
+#include <linux/scatterlist.h>
#include <linux/mlx4/cmd.h>
MLX4_TABLE_CHUNK_SIZE = 1 << 18
};
-void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm)
+static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
{
- struct mlx4_icm_chunk *chunk, *tmp;
int i;
- list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
- if (chunk->nsg > 0)
- pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
- PCI_DMA_BIDIRECTIONAL);
+ if (chunk->nsg > 0)
+ pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
+ PCI_DMA_BIDIRECTIONAL);
+
+ for (i = 0; i < chunk->npages; ++i)
+ __free_pages(chunk->mem[i].page,
+ get_order(chunk->mem[i].length));
+}
- for (i = 0; i < chunk->npages; ++i)
- __free_pages(chunk->mem[i].page,
- get_order(chunk->mem[i].length));
+static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
+{
+ int i;
+
+ for (i = 0; i < chunk->npages; ++i)
+ dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
+ lowmem_page_address(chunk->mem[i].page),
+ sg_dma_address(&chunk->mem[i]));
+}
+
+void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
+{
+ struct mlx4_icm_chunk *chunk, *tmp;
+
+ if (!icm)
+ return;
+
+ list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
+ if (coherent)
+ mlx4_free_icm_coherent(dev, chunk);
+ else
+ mlx4_free_icm_pages(dev, chunk);
kfree(chunk);
}
kfree(icm);
}
+static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
+{
+ mem->page = alloc_pages(gfp_mask, order);
+ if (!mem->page)
+ return -ENOMEM;
+
+ mem->length = PAGE_SIZE << order;
+ mem->offset = 0;
+ return 0;
+}
+
+static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
+ int order, gfp_t gfp_mask)
+{
+ void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order,
+ &sg_dma_address(mem), gfp_mask);
+ if (!buf)
+ return -ENOMEM;
+
+ sg_set_buf(mem, buf, PAGE_SIZE << order);
+ BUG_ON(mem->offset);
+ sg_dma_len(mem) = PAGE_SIZE << order;
+ return 0;
+}
+
struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
- gfp_t gfp_mask)
+ gfp_t gfp_mask, int coherent)
{
struct mlx4_icm *icm;
struct mlx4_icm_chunk *chunk = NULL;
int cur_order;
+ int ret;
+
+ /* We use sg_set_buf for coherent allocs, which assumes low memory */
+ BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
if (!icm)
- return icm;
+ return NULL;
icm->refcount = 0;
INIT_LIST_HEAD(&icm->chunk_list);
while (1 << cur_order > npages)
--cur_order;
- chunk->mem[chunk->npages].page = alloc_pages(gfp_mask, cur_order);
- if (chunk->mem[chunk->npages].page) {
- chunk->mem[chunk->npages].length = PAGE_SIZE << cur_order;
- chunk->mem[chunk->npages].offset = 0;
+ if (coherent)
+ ret = mlx4_alloc_icm_coherent(&dev->pdev->dev,
+ &chunk->mem[chunk->npages],
+ cur_order, gfp_mask);
+ else
+ ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
+ cur_order, gfp_mask);
+
+ if (!ret) {
+ ++chunk->npages;
- if (++chunk->npages == MLX4_ICM_CHUNK_LEN) {
+ if (coherent)
+ ++chunk->nsg;
+ else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
chunk->npages,
PCI_DMA_BIDIRECTIONAL);
}
}
- if (chunk) {
+ if (!coherent && chunk) {
chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
chunk->npages,
PCI_DMA_BIDIRECTIONAL);
return icm;
fail:
- mlx4_free_icm(dev, icm);
+ mlx4_free_icm(dev, icm, coherent);
return NULL;
}
table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
(table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
- __GFP_NOWARN);
+ __GFP_NOWARN, table->coherent);
if (!table->icm[i]) {
ret = -ENOMEM;
goto out;
if (mlx4_MAP_ICM(dev, table->icm[i], table->virt +
(u64) i * MLX4_TABLE_CHUNK_SIZE)) {
- mlx4_free_icm(dev, table->icm[i]);
+ mlx4_free_icm(dev, table->icm[i], table->coherent);
table->icm[i] = NULL;
ret = -ENOMEM;
goto out;
if (--table->icm[i]->refcount == 0) {
mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
- mlx4_free_icm(dev, table->icm[i]);
+ mlx4_free_icm(dev, table->icm[i], table->coherent);
table->icm[i] = NULL;
}
int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
u64 virt, int obj_size, int nobj, int reserved,
- int use_lowmem)
+ int use_lowmem, int use_coherent)
{
int obj_per_chunk;
int num_icm;
table->num_obj = nobj;
table->obj_size = obj_size;
table->lowmem = use_lowmem;
+ table->coherent = use_coherent;
mutex_init(&table->mutex);
for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
(use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
- __GFP_NOWARN);
+ __GFP_NOWARN, use_coherent);
if (!table->icm[i])
goto err;
if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) {
- mlx4_free_icm(dev, table->icm[i]);
+ mlx4_free_icm(dev, table->icm[i], use_coherent);
table->icm[i] = NULL;
goto err;
}
if (table->icm[i]) {
mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE,
MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
- mlx4_free_icm(dev, table->icm[i]);
+ mlx4_free_icm(dev, table->icm[i], use_coherent);
}
return -ENOMEM;
if (table->icm[i]) {
mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
- mlx4_free_icm(dev, table->icm[i]);
+ mlx4_free_icm(dev, table->icm[i], table->coherent);
}
kfree(table->icm);
int err;
priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
- GFP_HIGHUSER | __GFP_NOWARN);
+ GFP_HIGHUSER | __GFP_NOWARN, 0);
if (!priv->fw.fw_icm) {
mlx4_err(dev, "Couldn't allocate FW area, aborting.\n");
return -ENOMEM;
mlx4_UNMAP_FA(dev);
err_free:
- mlx4_free_icm(dev, priv->fw.fw_icm);
+ mlx4_free_icm(dev, priv->fw.fw_icm, 0);
return err;
}
((u64) (MLX4_CMPT_TYPE_QP *
cmpt_entry_sz) << MLX4_CMPT_SHIFT),
cmpt_entry_sz, dev->caps.num_qps,
- dev->caps.reserved_qps, 0);
+ dev->caps.reserved_qps, 0, 0);
if (err)
goto err;
((u64) (MLX4_CMPT_TYPE_SRQ *
cmpt_entry_sz) << MLX4_CMPT_SHIFT),
cmpt_entry_sz, dev->caps.num_srqs,
- dev->caps.reserved_srqs, 0);
+ dev->caps.reserved_srqs, 0, 0);
if (err)
goto err_qp;
((u64) (MLX4_CMPT_TYPE_CQ *
cmpt_entry_sz) << MLX4_CMPT_SHIFT),
cmpt_entry_sz, dev->caps.num_cqs,
- dev->caps.reserved_cqs, 0);
+ dev->caps.reserved_cqs, 0, 0);
if (err)
goto err_srq;
cmpt_entry_sz,
roundup_pow_of_two(MLX4_NUM_EQ +
dev->caps.reserved_eqs),
- MLX4_NUM_EQ + dev->caps.reserved_eqs, 0);
+ MLX4_NUM_EQ + dev->caps.reserved_eqs, 0, 0);
if (err)
goto err_cq;
(unsigned long long) aux_pages << 2);
priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
- GFP_HIGHUSER | __GFP_NOWARN);
+ GFP_HIGHUSER | __GFP_NOWARN, 0);
if (!priv->fw.aux_icm) {
mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n");
return -ENOMEM;
init_hca->mtt_base,
dev->caps.mtt_entry_sz,
dev->caps.num_mtt_segs,
- dev->caps.reserved_mtts, 1);
+ dev->caps.reserved_mtts, 1, 0);
if (err) {
mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
goto err_unmap_eq;
init_hca->dmpt_base,
dev_cap->dmpt_entry_sz,
dev->caps.num_mpts,
- dev->caps.reserved_mrws, 1);
+ dev->caps.reserved_mrws, 1, 1);
if (err) {
mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n");
goto err_unmap_mtt;
init_hca->qpc_base,
dev_cap->qpc_entry_sz,
dev->caps.num_qps,
- dev->caps.reserved_qps, 0);
+ dev->caps.reserved_qps, 0, 0);
if (err) {
mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
goto err_unmap_dmpt;
init_hca->auxc_base,
dev_cap->aux_entry_sz,
dev->caps.num_qps,
- dev->caps.reserved_qps, 0);
+ dev->caps.reserved_qps, 0, 0);
if (err) {
mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
goto err_unmap_qp;
init_hca->altc_base,
dev_cap->altc_entry_sz,
dev->caps.num_qps,
- dev->caps.reserved_qps, 0);
+ dev->caps.reserved_qps, 0, 0);
if (err) {
mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
goto err_unmap_auxc;
init_hca->rdmarc_base,
dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
dev->caps.num_qps,
- dev->caps.reserved_qps, 0);
+ dev->caps.reserved_qps, 0, 0);
if (err) {
mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
goto err_unmap_altc;
init_hca->cqc_base,
dev_cap->cqc_entry_sz,
dev->caps.num_cqs,
- dev->caps.reserved_cqs, 0);
+ dev->caps.reserved_cqs, 0, 0);
if (err) {
mlx4_err(dev, "Failed to map CQ context memory, aborting.\n");
goto err_unmap_rdmarc;
init_hca->srqc_base,
dev_cap->srq_entry_sz,
dev->caps.num_srqs,
- dev->caps.reserved_srqs, 0);
+ dev->caps.reserved_srqs, 0, 0);
if (err) {
mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n");
goto err_unmap_cq;
init_hca->mc_base, MLX4_MGM_ENTRY_SIZE,
dev->caps.num_mgms + dev->caps.num_amgms,
dev->caps.num_mgms + dev->caps.num_amgms,
- 0);
+ 0, 0);
if (err) {
mlx4_err(dev, "Failed to map MCG context memory, aborting.\n");
goto err_unmap_srq;
mlx4_UNMAP_ICM_AUX(dev);
err_free_aux:
- mlx4_free_icm(dev, priv->fw.aux_icm);
+ mlx4_free_icm(dev, priv->fw.aux_icm, 0);
return err;
}
mlx4_unmap_eq_icm(dev);
mlx4_UNMAP_ICM_AUX(dev);
- mlx4_free_icm(dev, priv->fw.aux_icm);
+ mlx4_free_icm(dev, priv->fw.aux_icm, 0);
}
static void mlx4_close_hca(struct mlx4_dev *dev)
mlx4_CLOSE_HCA(dev, 0);
mlx4_free_icms(dev);
mlx4_UNMAP_FA(dev);
- mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm);
+ mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
}
static int __devinit mlx4_init_hca(struct mlx4_dev *dev)
err_stop_fw:
mlx4_UNMAP_FA(dev);
- mlx4_free_icm(dev, priv->fw.fw_icm);
+ mlx4_free_icm(dev, priv->fw.fw_icm, 0);
return err;
}