#include "drmP.h"
#include "nouveau_drv.h"
+#include "nouveau_vm.h"
-struct nvc0_gpuobj_node {
- struct nouveau_bo *vram;
- struct drm_mm_node *ramin;
- u32 align;
+struct nvc0_instmem_priv {
+ struct nouveau_gpuobj *bar1_pgd;
+ struct nouveau_channel *bar1;
+ struct nouveau_gpuobj *bar3_pgd;
+ struct nouveau_channel *bar3;
};
int
-nvc0_instmem_get(struct nouveau_gpuobj *gpuobj, u32 size, u32 align)
+nvc0_instmem_suspend(struct drm_device *dev)
{
- struct drm_device *dev = gpuobj->dev;
- struct nvc0_gpuobj_node *node = NULL;
- int ret;
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- if (!node)
- return -ENOMEM;
- node->align = align;
-
- ret = nouveau_bo_new(dev, NULL, size, align, TTM_PL_FLAG_VRAM,
- 0, 0x0000, true, false, &node->vram);
- if (ret) {
- NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret);
- return ret;
- }
-
- ret = nouveau_bo_pin(node->vram, TTM_PL_FLAG_VRAM);
- if (ret) {
- NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret);
- nouveau_bo_ref(NULL, &node->vram);
- return ret;
- }
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
- gpuobj->vinst = node->vram->bo.mem.start << PAGE_SHIFT;
- gpuobj->size = node->vram->bo.mem.num_pages << PAGE_SHIFT;
- gpuobj->node = node;
+ dev_priv->ramin_available = false;
return 0;
}
void
-nvc0_instmem_put(struct nouveau_gpuobj *gpuobj)
+nvc0_instmem_resume(struct drm_device *dev)
{
- struct nvc0_gpuobj_node *node;
-
- node = gpuobj->node;
- gpuobj->node = NULL;
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv;
- nouveau_bo_unpin(node->vram);
- nouveau_bo_ref(NULL, &node->vram);
- kfree(node);
+ nv_mask(dev, 0x100c80, 0x00000001, 0x00000000);
+ nv_wr32(dev, 0x001704, 0x80000000 | priv->bar1->ramin->vinst >> 12);
+ nv_wr32(dev, 0x001714, 0xc0000000 | priv->bar3->ramin->vinst >> 12);
+ dev_priv->ramin_available = true;
}
-int
-nvc0_instmem_map(struct nouveau_gpuobj *gpuobj)
+static void
+nvc0_channel_del(struct nouveau_channel **pchan)
{
- struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
- struct nvc0_gpuobj_node *node = gpuobj->node;
- struct drm_device *dev = gpuobj->dev;
- struct drm_mm_node *ramin = NULL;
- u32 pte, pte_end;
- u64 vram;
-
- do {
- if (drm_mm_pre_get(&dev_priv->ramin_heap))
- return -ENOMEM;
-
- spin_lock(&dev_priv->ramin_lock);
- ramin = drm_mm_search_free(&dev_priv->ramin_heap, gpuobj->size,
- node->align, 0);
- if (ramin == NULL) {
- spin_unlock(&dev_priv->ramin_lock);
- return -ENOMEM;
- }
-
- ramin = drm_mm_get_block_atomic(ramin, gpuobj->size, node->align);
- spin_unlock(&dev_priv->ramin_lock);
- } while (ramin == NULL);
-
- pte = (ramin->start >> 12) << 1;
- pte_end = ((ramin->size >> 12) << 1) + pte;
- vram = gpuobj->vinst;
-
- NV_DEBUG(dev, "pramin=0x%lx, pte=%d, pte_end=%d\n",
- ramin->start, pte, pte_end);
- NV_DEBUG(dev, "first vram page: 0x%010llx\n", gpuobj->vinst);
-
- while (pte < pte_end) {
- nv_wr32(dev, 0x702000 + (pte * 8), (vram >> 8) | 1);
- nv_wr32(dev, 0x702004 + (pte * 8), 0);
- vram += 4096;
- pte++;
- }
- dev_priv->engine.instmem.flush(dev);
+ struct nouveau_channel *chan;
- if (1) {
- u32 chan = nv_rd32(dev, 0x1700) << 16;
- nv_wr32(dev, 0x100cb8, (chan + 0x1000) >> 8);
- nv_wr32(dev, 0x100cbc, 0x80000005);
- }
+ chan = *pchan;
+ *pchan = NULL;
+ if (!chan)
+ return;
- node->ramin = ramin;
- gpuobj->pinst = ramin->start;
- return 0;
+ nouveau_vm_ref(NULL, &chan->vm, NULL);
+ if (chan->ramin_heap.free_stack.next)
+ drm_mm_takedown(&chan->ramin_heap);
+ nouveau_gpuobj_ref(NULL, &chan->ramin);
+ kfree(chan);
}
-void
-nvc0_instmem_unmap(struct nouveau_gpuobj *gpuobj)
+static int
+nvc0_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm,
+ struct nouveau_channel **pchan,
+ struct nouveau_gpuobj *pgd, u64 vm_size)
{
- struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
- struct nvc0_gpuobj_node *node = gpuobj->node;
- u32 pte, pte_end;
+ struct nouveau_channel *chan;
+ int ret;
- if (!node->ramin || !dev_priv->ramin_available)
- return;
+ chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ if (!chan)
+ return -ENOMEM;
+ chan->dev = dev;
- pte = (node->ramin->start >> 12) << 1;
- pte_end = ((node->ramin->size >> 12) << 1) + pte;
+ ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
+ if (ret) {
+ nvc0_channel_del(&chan);
+ return ret;
+ }
- while (pte < pte_end) {
- nv_wr32(gpuobj->dev, 0x702000 + (pte * 8), 0);
- nv_wr32(gpuobj->dev, 0x702004 + (pte * 8), 0);
- pte++;
+ ret = drm_mm_init(&chan->ramin_heap, 0x1000, size - 0x1000);
+ if (ret) {
+ nvc0_channel_del(&chan);
+ return ret;
}
- dev_priv->engine.instmem.flush(gpuobj->dev);
- spin_lock(&dev_priv->ramin_lock);
- drm_mm_put_block(node->ramin);
- node->ramin = NULL;
- spin_unlock(&dev_priv->ramin_lock);
-}
+ ret = nouveau_vm_ref(vm, &chan->vm, NULL);
+ if (ret) {
+ nvc0_channel_del(&chan);
+ return ret;
+ }
-void
-nvc0_instmem_flush(struct drm_device *dev)
-{
- nv_wr32(dev, 0x070000, 1);
- if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000))
- NV_ERROR(dev, "PRAMIN flush timeout\n");
+ nv_wo32(chan->ramin, 0x0200, lower_32_bits(pgd->vinst));
+ nv_wo32(chan->ramin, 0x0204, upper_32_bits(pgd->vinst));
+ nv_wo32(chan->ramin, 0x0208, lower_32_bits(vm_size - 1));
+ nv_wo32(chan->ramin, 0x020c, upper_32_bits(vm_size - 1));
+
+ *pchan = chan;
+ return 0;
}
int
-nvc0_instmem_suspend(struct drm_device *dev)
+nvc0_instmem_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 *buf;
- int i;
+ struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+ struct pci_dev *pdev = dev->pdev;
+ struct nvc0_instmem_priv *priv;
+ struct nouveau_vm *vm = NULL;
+ int ret;
- dev_priv->susres.ramin_copy = vmalloc(65536);
- if (!dev_priv->susres.ramin_copy)
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
- buf = dev_priv->susres.ramin_copy;
-
- for (i = 0; i < 65536; i += 4)
- buf[i/4] = nv_rd32(dev, NV04_PRAMIN + i);
+ pinstmem->priv = priv;
+
+ /* BAR3 VM */
+ ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 3), 0,
+ &dev_priv->bar3_vm);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL,
+ (pci_resource_len(pdev, 3) >> 12) * 8, 0,
+ NVOBJ_FLAG_DONT_MAP |
+ NVOBJ_FLAG_ZERO_ALLOC,
+ &dev_priv->bar3_vm->pgt[0].obj[0]);
+ if (ret)
+ goto error;
+ dev_priv->bar3_vm->pgt[0].refcount[0] = 1;
+
+ nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]);
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->bar3_pgd);
+ if (ret)
+ goto error;
+
+ ret = nouveau_vm_ref(dev_priv->bar3_vm, &vm, priv->bar3_pgd);
+ if (ret)
+ goto error;
+ nouveau_vm_ref(NULL, &vm, NULL);
+
+ ret = nvc0_channel_new(dev, 8192, dev_priv->bar3_vm, &priv->bar3,
+ priv->bar3_pgd, pci_resource_len(dev->pdev, 3));
+ if (ret)
+ goto error;
+
+ /* BAR1 VM */
+ ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 1), 0, &vm);
+ if (ret)
+ goto error;
+
+ ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096,
+ NVOBJ_FLAG_ZERO_ALLOC, &priv->bar1_pgd);
+ if (ret)
+ goto error;
+
+ ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, priv->bar1_pgd);
+ if (ret)
+ goto error;
+ nouveau_vm_ref(NULL, &vm, NULL);
+
+ ret = nvc0_channel_new(dev, 8192, dev_priv->bar1_vm, &priv->bar1,
+ priv->bar1_pgd, pci_resource_len(dev->pdev, 1));
+ if (ret)
+ goto error;
+
+ nvc0_instmem_resume(dev);
return 0;
+error:
+ nvc0_instmem_takedown(dev);
+ return ret;
}
void
-nvc0_instmem_resume(struct drm_device *dev)
-{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- u32 *buf = dev_priv->susres.ramin_copy;
- u64 chan;
- int i;
-
- chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
- nv_wr32(dev, 0x001700, chan >> 16);
-
- for (i = 0; i < 65536; i += 4)
- nv_wr32(dev, NV04_PRAMIN + i, buf[i/4]);
- vfree(dev_priv->susres.ramin_copy);
- dev_priv->susres.ramin_copy = NULL;
-
- nv_wr32(dev, 0x001714, 0xc0000000 | (chan >> 12));
-}
-
-int
-nvc0_instmem_init(struct drm_device *dev)
+nvc0_instmem_takedown(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
- u64 chan, pgt3, imem, lim3 = dev_priv->ramin_size - 1;
- int ret, i;
-
- dev_priv->ramin_rsvd_vram = 1 * 1024 * 1024;
- chan = dev_priv->vram_size - dev_priv->ramin_rsvd_vram;
- imem = 4096 + 4096 + 32768;
-
- nv_wr32(dev, 0x001700, chan >> 16);
-
- /* channel setup */
- nv_wr32(dev, 0x700200, lower_32_bits(chan + 0x1000));
- nv_wr32(dev, 0x700204, upper_32_bits(chan + 0x1000));
- nv_wr32(dev, 0x700208, lower_32_bits(lim3));
- nv_wr32(dev, 0x70020c, upper_32_bits(lim3));
-
- /* point pgd -> pgt */
- nv_wr32(dev, 0x701000, 0);
- nv_wr32(dev, 0x701004, ((chan + 0x2000) >> 8) | 1);
-
- /* point pgt -> physical vram for channel */
- pgt3 = 0x2000;
- for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4096, pgt3 += 8) {
- nv_wr32(dev, 0x700000 + pgt3, ((chan + i) >> 8) | 1);
- nv_wr32(dev, 0x700004 + pgt3, 0);
- }
+ struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv;
+ struct nouveau_vm *vm = NULL;
- /* clear rest of pgt */
- for (; i < dev_priv->ramin_size; i += 4096, pgt3 += 8) {
- nv_wr32(dev, 0x700000 + pgt3, 0);
- nv_wr32(dev, 0x700004 + pgt3, 0);
- }
+ nvc0_instmem_suspend(dev);
- /* point bar3 at the channel */
- nv_wr32(dev, 0x001714, 0xc0000000 | (chan >> 12));
+ nv_wr32(dev, 0x1704, 0x00000000);
+ nv_wr32(dev, 0x1714, 0x00000000);
- /* Global PRAMIN heap */
- ret = drm_mm_init(&dev_priv->ramin_heap, imem,
- dev_priv->ramin_size - imem);
- if (ret) {
- NV_ERROR(dev, "Failed to init RAMIN heap\n");
- return -ENOMEM;
- }
+ nvc0_channel_del(&priv->bar1);
+ nouveau_vm_ref(NULL, &dev_priv->bar1_vm, priv->bar1_pgd);
+ nouveau_gpuobj_ref(NULL, &priv->bar1_pgd);
- return 0;
-}
+ nvc0_channel_del(&priv->bar3);
+ nouveau_vm_ref(dev_priv->bar3_vm, &vm, NULL);
+ nouveau_vm_ref(NULL, &vm, priv->bar3_pgd);
+ nouveau_gpuobj_ref(NULL, &priv->bar3_pgd);
+ nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]);
+ nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL);
-void
-nvc0_instmem_takedown(struct drm_device *dev)
-{
+ dev_priv->engine.instmem.priv = NULL;
+ kfree(priv);
}
--- /dev/null
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+#include "nouveau_drv.h"
+#include "nouveau_mm.h"
+
+bool
+nvc0_vram_flags_valid(struct drm_device *dev, u32 tile_flags)
+{
+ if (likely(!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK)))
+ return true;
+ return false;
+}
+
+int
+nvc0_vram_new(struct drm_device *dev, u64 size, u32 align, u32 ncmin,
+ u32 type, struct nouveau_vram **pvram)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
+ struct ttm_mem_type_manager *man = &bdev->man[TTM_PL_VRAM];
+ struct nouveau_mm *mm = man->priv;
+ struct nouveau_mm_node *r;
+ struct nouveau_vram *vram;
+ int ret;
+
+ size >>= 12;
+ align >>= 12;
+ ncmin >>= 12;
+
+ vram = kzalloc(sizeof(*vram), GFP_KERNEL);
+ if (!vram)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&vram->regions);
+ vram->dev = dev_priv->dev;
+ vram->memtype = type;
+ vram->size = size;
+
+ mutex_lock(&mm->mutex);
+ do {
+ ret = nouveau_mm_get(mm, 1, size, ncmin, align, &r);
+ if (ret) {
+ mutex_unlock(&mm->mutex);
+ nv50_vram_del(dev, &vram);
+ return ret;
+ }
+
+ list_add_tail(&r->rl_entry, &vram->regions);
+ size -= r->length;
+ } while (size);
+ mutex_unlock(&mm->mutex);
+
+ r = list_first_entry(&vram->regions, struct nouveau_mm_node, rl_entry);
+ vram->offset = (u64)r->offset << 12;
+ *pvram = vram;
+ return 0;
+}
+
+int
+nvc0_vram_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20;
+ dev_priv->vram_size *= nv_rd32(dev, 0x121c74);
+ dev_priv->vram_rblock_size = 4096;
+ return 0;
+}