msm_gem_prime.o \
msm_gem_shrinker.o \
msm_gem_submit.o \
+ msm_gem_vma.o \
msm_gpu.o \
msm_iommu.o \
msm_perf.o \
#endif
}
- if (!gpu->mmu) {
+ if (!gpu->aspace) {
/* TODO we think it is possible to configure the GPU to
* restrict access to VRAM carveout. But the required
* registers are unknown. For now just bail out and
#endif
}
- if (!gpu->mmu) {
+ if (!gpu->aspace) {
/* TODO we think it is possible to configure the GPU to
* restrict access to VRAM carveout. But the required
* registers are unknown. For now just bail out and
return ret;
}
- mmu = gpu->mmu;
+ mmu = gpu->aspace->mmu;
if (mmu) {
ret = mmu->funcs->attach(mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
#include "msm_drv.h"
+#include "msm_gem.h"
#include "msm_mmu.h"
#include "mdp4_kms.h"
{
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
struct device *dev = mdp4_kms->dev->dev;
- struct msm_mmu *mmu = mdp4_kms->mmu;
-
- if (mmu) {
- mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
- mmu->funcs->destroy(mmu);
- }
+ struct msm_gem_address_space *aspace = mdp4_kms->aspace;
if (mdp4_kms->blank_cursor_iova)
msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
+ if (aspace) {
+ aspace->mmu->funcs->detach(aspace->mmu,
+ iommu_ports, ARRAY_SIZE(iommu_ports));
+ msm_gem_address_space_destroy(aspace);
+ }
+
if (mdp4_kms->rpm_enabled)
pm_runtime_disable(dev);
struct mdp4_platform_config *config = mdp4_get_config(pdev);
struct mdp4_kms *mdp4_kms;
struct msm_kms *kms = NULL;
- struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
int irq, ret;
mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
mdelay(16);
if (config->iommu) {
- mmu = msm_iommu_new(&pdev->dev, config->iommu);
- if (IS_ERR(mmu)) {
- ret = PTR_ERR(mmu);
+ aspace = msm_gem_address_space_create(&pdev->dev,
+ config->iommu, "mdp4");
+ if (IS_ERR(aspace)) {
+ ret = PTR_ERR(aspace);
goto fail;
}
- ret = mmu->funcs->attach(mmu, iommu_ports,
+
+ mdp4_kms->aspace = aspace;
+
+ ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
if (ret)
goto fail;
-
- mdp4_kms->mmu = mmu;
} else {
dev_info(dev->dev, "no iommu, fallback to phys "
"contig buffers for scanout\n");
- mmu = NULL;
+ aspace = NULL;
}
- mdp4_kms->id = msm_register_mmu(dev, mmu);
+ mdp4_kms->id = msm_register_address_space(dev, aspace);
if (mdp4_kms->id < 0) {
ret = mdp4_kms->id;
dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret);
/* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */
config.max_clk = 266667000;
config.iommu = iommu_domain_alloc(&platform_bus_type);
+ if (config.iommu) {
+ config.iommu->geometry.aperture_start = 0x1000;
+ config.iommu->geometry.aperture_end = 0xffffffff;
+ }
return &config;
}
struct clk *pclk;
struct clk *lut_clk;
struct clk *axi_clk;
- struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
struct mdp_irq error_handler;
static struct mdp5_cfg_platform config = {};
config.iommu = iommu_domain_alloc(&platform_bus_type);
+ if (config.iommu) {
+ config.iommu->geometry.aperture_start = 0x1000;
+ config.iommu->geometry.aperture_end = 0xffffffff;
+ }
return &config;
}
#include <linux/of_irq.h>
#include "msm_drv.h"
+#include "msm_gem.h"
#include "msm_mmu.h"
#include "mdp5_kms.h"
static void mdp5_kms_destroy(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- struct msm_mmu *mmu = mdp5_kms->mmu;
+ struct msm_gem_address_space *aspace = mdp5_kms->aspace;
- if (mmu) {
- mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
- mmu->funcs->destroy(mmu);
+ if (aspace) {
+ aspace->mmu->funcs->detach(aspace->mmu,
+ iommu_ports, ARRAY_SIZE(iommu_ports));
+ msm_gem_address_space_destroy(aspace);
}
}
struct mdp5_kms *mdp5_kms;
struct mdp5_cfg *config;
struct msm_kms *kms;
- struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
int irq, i, ret;
/* priv->kms would have been populated by the MDP5 driver */
mdelay(16);
if (config->platform.iommu) {
- mmu = msm_iommu_new(&pdev->dev, config->platform.iommu);
- if (IS_ERR(mmu)) {
- ret = PTR_ERR(mmu);
- dev_err(&pdev->dev, "failed to init iommu: %d\n", ret);
- iommu_domain_free(config->platform.iommu);
+ aspace = msm_gem_address_space_create(&pdev->dev,
+ config->platform.iommu, "mdp5");
+ if (IS_ERR(aspace)) {
+ ret = PTR_ERR(aspace);
goto fail;
}
- ret = mmu->funcs->attach(mmu, iommu_ports,
+ mdp5_kms->aspace = aspace;
+
+ ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
if (ret) {
dev_err(&pdev->dev, "failed to attach iommu: %d\n",
ret);
- mmu->funcs->destroy(mmu);
goto fail;
}
} else {
dev_info(&pdev->dev,
"no iommu, fallback to phys contig buffers for scanout\n");
- mmu = NULL;
+ aspace = NULL;;
}
- mdp5_kms->mmu = mmu;
- mdp5_kms->id = msm_register_mmu(dev, mmu);
+ mdp5_kms->id = msm_register_address_space(dev, aspace);
if (mdp5_kms->id < 0) {
ret = mdp5_kms->id;
dev_err(&pdev->dev, "failed to register mdp5 iommu: %d\n", ret);
/* mapper-id used to request GEM buffer mapped for scanout: */
int id;
- struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
struct mdp5_smp *smp;
struct mdp5_ctl_manager *ctlm;
.atomic_commit = msm_atomic_commit,
};
-int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu)
+int msm_register_address_space(struct drm_device *dev,
+ struct msm_gem_address_space *aspace)
{
struct msm_drm_private *priv = dev->dev_private;
- int idx = priv->num_mmus++;
+ int idx = priv->num_aspaces++;
- if (WARN_ON(idx >= ARRAY_SIZE(priv->mmus)))
+ if (WARN_ON(idx >= ARRAY_SIZE(priv->aspace)))
return -EINVAL;
- priv->mmus[idx] = mmu;
+ priv->aspace[idx] = aspace;
return idx;
}
struct msm_gem_submit;
struct msm_fence_context;
struct msm_fence_cb;
+struct msm_gem_address_space;
+struct msm_gem_vma;
#define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */
uint32_t pending_crtcs;
wait_queue_head_t pending_crtcs_event;
- /* registered MMUs: */
- unsigned int num_mmus;
- struct msm_mmu *mmus[NUM_DOMAINS];
+ /* Registered address spaces.. currently this is fixed per # of
+ * iommu's. Ie. one for display block and one for gpu block.
+ * Eventually, to do per-process gpu pagetables, we'll want one
+ * of these per-process.
+ */
+ unsigned int num_aspaces;
+ struct msm_gem_address_space *aspace[NUM_DOMAINS];
unsigned int num_planes;
struct drm_plane *planes[8];
int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool nonblock);
-int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
+int msm_register_address_space(struct drm_device *dev,
+ struct msm_gem_address_space *aspace);
+
+void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt);
+int msm_gem_map_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt, int npages);
+
+void msm_gem_address_space_destroy(struct msm_gem_address_space *aspace);
+struct msm_gem_address_space *
+msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
+ const char *name);
void msm_gem_submit_free(struct msm_gem_submit *submit);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
- struct msm_mmu *mmu = priv->mmus[id];
- if (mmu && msm_obj->domain[id].iova) {
- uint32_t offset = msm_obj->domain[id].iova;
- mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
- msm_obj->domain[id].iova = 0;
- }
+ msm_gem_unmap_vma(priv->aspace[id],
+ &msm_obj->domain[id], msm_obj->sgt);
}
}
return PTR_ERR(pages);
if (iommu_present(&platform_bus_type)) {
- struct msm_mmu *mmu = priv->mmus[id];
- uint32_t offset;
-
- if (WARN_ON(!mmu))
- return -EINVAL;
-
- offset = (uint32_t)mmap_offset(obj);
- ret = mmu->funcs->map(mmu, offset, msm_obj->sgt,
- obj->size, IOMMU_READ | IOMMU_WRITE);
- msm_obj->domain[id].iova = offset;
+ ret = msm_gem_map_vma(priv->aspace[id], &msm_obj->domain[id],
+ msm_obj->sgt, obj->size >> PAGE_SHIFT);
} else {
msm_obj->domain[id].iova = physaddr(obj);
}
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct reservation_object *robj = msm_obj->resv;
struct reservation_object_list *fobj;
+ struct msm_drm_private *priv = obj->dev->dev_private;
struct dma_fence *fence;
uint64_t off = drm_vma_node_start(&obj->vma_node);
const char *madv;
+ unsigned id;
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
break;
}
- seq_printf(m, "%08x: %c %2d (%2d) %08llx %p %zu%s\n",
+ seq_printf(m, "%08x: %c %2d (%2d) %08llx %p\t",
msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
obj->name, obj->refcount.refcount.counter,
- off, msm_obj->vaddr, obj->size, madv);
+ off, msm_obj->vaddr);
+
+ for (id = 0; id < priv->num_aspaces; id++)
+ seq_printf(m, " %08llx", msm_obj->domain[id].iova);
+
+ seq_printf(m, " %zu%s\n", obj->size, madv);
rcu_read_lock();
fobj = rcu_dereference(robj->fence);
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
- unsigned sz;
bool use_vram = false;
switch (flags & MSM_BO_CACHE_MASK) {
if (WARN_ON(use_vram && !priv->vram.size))
return -EINVAL;
- sz = sizeof(*msm_obj);
- if (use_vram)
- sz += sizeof(struct drm_mm_node);
-
- msm_obj = kzalloc(sz, GFP_KERNEL);
+ msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
if (!msm_obj)
return -ENOMEM;
if (use_vram)
- msm_obj->vram_node = (void *)&msm_obj[1];
+ msm_obj->vram_node = &msm_obj->domain[0].node;
msm_obj->flags = flags;
msm_obj->madv = MSM_MADV_WILLNEED;
/* Additional internal-use only BO flags: */
#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
+struct msm_gem_address_space {
+ const char *name;
+ /* NOTE: mm managed at the page level, size is in # of pages
+ * and position mm_node->start is in # of pages:
+ */
+ struct drm_mm mm;
+ struct msm_mmu *mmu;
+};
+
+struct msm_gem_vma {
+ struct drm_mm_node node;
+ uint64_t iova;
+};
+
struct msm_gem_object {
struct drm_gem_object base;
struct sg_table *sgt;
void *vaddr;
- struct {
- // XXX
- uint32_t iova;
- } domain[NUM_DOMAINS];
+ struct msm_gem_vma domain[NUM_DOMAINS];
/* normally (resv == &_resv) except for imported bo's */
struct reservation_object *resv;
--- /dev/null
+/*
+ * Copyright (C) 2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+#include "msm_gem.h"
+#include "msm_mmu.h"
+
+void
+msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt)
+{
+ if (!vma->iova)
+ return;
+
+ if (aspace->mmu) {
+ unsigned size = vma->node.size << PAGE_SHIFT;
+ aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, size);
+ }
+
+ drm_mm_remove_node(&vma->node);
+
+ vma->iova = 0;
+}
+
+int
+msm_gem_map_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt, int npages)
+{
+ int ret;
+
+ if (WARN_ON(drm_mm_node_allocated(&vma->node)))
+ return 0;
+
+ ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages,
+ 0, DRM_MM_SEARCH_DEFAULT);
+ if (ret)
+ return ret;
+
+ vma->iova = vma->node.start << PAGE_SHIFT;
+
+ if (aspace->mmu) {
+ unsigned size = npages << PAGE_SHIFT;
+ ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
+ size, IOMMU_READ | IOMMU_WRITE);
+ }
+
+ return ret;
+}
+
+void
+msm_gem_address_space_destroy(struct msm_gem_address_space *aspace)
+{
+ drm_mm_takedown(&aspace->mm);
+ if (aspace->mmu)
+ aspace->mmu->funcs->destroy(aspace->mmu);
+ kfree(aspace);
+}
+
+struct msm_gem_address_space *
+msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
+ const char *name)
+{
+ struct msm_gem_address_space *aspace;
+
+ aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
+ if (!aspace)
+ return ERR_PTR(-ENOMEM);
+
+ aspace->name = name;
+ aspace->mmu = msm_iommu_new(dev, domain);
+
+ drm_mm_init(&aspace->mm, (domain->geometry.aperture_start >> PAGE_SHIFT),
+ (domain->geometry.aperture_end >> PAGE_SHIFT) - 1);
+
+ return aspace;
+}
*/
iommu = iommu_domain_alloc(&platform_bus_type);
if (iommu) {
+ /* TODO 32b vs 64b address space.. */
+ iommu->geometry.aperture_start = 0x1000;
+ iommu->geometry.aperture_end = 0xffffffff;
+
dev_info(drm->dev, "%s: using IOMMU\n", name);
- gpu->mmu = msm_iommu_new(&pdev->dev, iommu);
- if (IS_ERR(gpu->mmu)) {
- ret = PTR_ERR(gpu->mmu);
+ gpu->aspace = msm_gem_address_space_create(&pdev->dev,
+ iommu, "gpu");
+ if (IS_ERR(gpu->aspace)) {
+ ret = PTR_ERR(gpu->aspace);
dev_err(drm->dev, "failed to init iommu: %d\n", ret);
- gpu->mmu = NULL;
+ gpu->aspace = NULL;
iommu_domain_free(iommu);
goto fail;
}
} else {
dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
}
- gpu->id = msm_register_mmu(drm, gpu->mmu);
+ gpu->id = msm_register_address_space(drm, gpu->aspace);
/* Create ringbuffer: */
msm_ringbuffer_destroy(gpu->rb);
}
- if (gpu->mmu)
- gpu->mmu->funcs->destroy(gpu->mmu);
+ if (gpu->aspace)
+ msm_gem_address_space_destroy(gpu->aspace);
if (gpu->fctx)
msm_fence_context_free(gpu->fctx);
void __iomem *mmio;
int irq;
- struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
int id;
/* Power Control: */