int nvkm_gpuobj_new(struct nvkm_object *, struct nvkm_object *, u32 size,
u32 align, u32 flags, struct nvkm_gpuobj **);
-int nvkm_gpuobj_dup(struct nvkm_object *, struct nvkm_gpuobj *,
+int nvkm_gpuobj_dup(struct nvkm_object *, struct nvkm_memory *,
struct nvkm_gpuobj **);
int nvkm_gpuobj_map(struct nvkm_gpuobj *, u32 acc, struct nvkm_vma *);
int nvkm_gpuobj_map_vm(struct nvkm_gpuobj *, struct nvkm_vm *, u32 access,
struct nvkm_mem;
struct nvkm_vm_pgt {
- struct nvkm_gpuobj *obj[2];
+ struct nvkm_memory *mem[2];
u32 refcount[2];
};
struct nvkm_vm **);
void (*map_pgt)(struct nvkm_gpuobj *pgd, u32 pde,
- struct nvkm_gpuobj *pgt[2]);
- void (*map)(struct nvkm_vma *, struct nvkm_gpuobj *,
+ struct nvkm_memory *pgt[2]);
+ void (*map)(struct nvkm_vma *, struct nvkm_memory *,
struct nvkm_mem *, u32 pte, u32 cnt,
u64 phys, u64 delta);
- void (*map_sg)(struct nvkm_vma *, struct nvkm_gpuobj *,
+ void (*map_sg)(struct nvkm_vma *, struct nvkm_memory *,
struct nvkm_mem *, u32 pte, u32 cnt, dma_addr_t *);
- void (*unmap)(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt);
+ void (*unmap)(struct nvkm_vma *, struct nvkm_memory *pgt,
+ u32 pte, u32 cnt);
void (*flush)(struct nvkm_vm *);
};
nvkm_gpudup_dtor(struct nvkm_object *object)
{
struct nvkm_gpuobj *gpuobj = (void *)object;
- nvkm_object_ref(NULL, (struct nvkm_object **)&gpuobj->parent);
nvkm_object_destroy(&gpuobj->object);
}
};
int
-nvkm_gpuobj_dup(struct nvkm_object *parent, struct nvkm_gpuobj *base,
+nvkm_gpuobj_dup(struct nvkm_object *parent, struct nvkm_memory *base,
struct nvkm_gpuobj **pgpuobj)
{
struct nvkm_gpuobj *gpuobj;
if (ret)
return ret;
- nvkm_object_ref(nv_object(base), (struct nvkm_object **)&gpuobj->parent);
- gpuobj->addr = base->addr;
- gpuobj->size = base->size;
+ gpuobj->addr = nvkm_memory_addr(base);
+ gpuobj->size = nvkm_memory_size(base);
return 0;
}
if (dmaobj->clone) {
struct nv04_mmu *mmu = nv04_mmu(dmaobj);
- struct nvkm_gpuobj *pgt = mmu->vm->pgt[0].obj[0];
+ struct nvkm_memory *pgt = mmu->vm->pgt[0].mem[0];
if (!dmaobj->base.start)
return nvkm_gpuobj_dup(parent, pgt, pgpuobj);
nvkm_kmap(pgt);
nvkm_gpuobj_ref(NULL, &bar->bar[1].mem);
if (bar->bar[0].vm) {
- nvkm_gpuobj_ref(NULL, &bar->bar[0].vm->pgt[0].obj[0]);
+ nvkm_memory_del(&bar->bar[0].vm->pgt[0].mem[0]);
nvkm_vm_ref(NULL, &bar->bar[0].vm, bar->bar[0].pgd);
}
nvkm_gpuobj_ref(NULL, &bar->bar[0].pgd);
nvkm_vm_ref(NULL, &bar->bar1_vm, bar->pgd);
nvkm_gpuobj_ref(NULL, &bar->bar3);
if (bar->bar3_vm) {
- nvkm_gpuobj_ref(NULL, &bar->bar3_vm->pgt[0].obj[0]);
+ nvkm_memory_del(&bar->bar3_vm->pgt[0].mem[0]);
nvkm_vm_ref(NULL, &bar->bar3_vm, bar->pgd);
}
nvkm_gpuobj_ref(NULL, &bar->pgd);
u32 num = r->length >> bits;
while (num) {
- struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big];
+ struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
end = (pte + num);
if (unlikely(end >= max))
struct scatterlist *sg;
for_each_sg(mem->sg->sgl, sg, mem->sg->nents, i) {
- struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big];
+ struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
sglen = sg_dma_len(sg) >> PAGE_SHIFT;
end = pte + sglen;
u32 end, len;
while (num) {
- struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big];
+ struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
end = (pte + num);
if (unlikely(end >= max))
u32 end, len;
while (num) {
- struct nvkm_gpuobj *pgt = vm->pgt[pde].obj[big];
+ struct nvkm_memory *pgt = vm->pgt[pde].mem[big];
end = (pte + num);
if (unlikely(end >= max))
end = max;
len = end - pte;
- mmu->unmap(pgt, pte, len);
+ mmu->unmap(vma, pgt, pte, len);
num -= len;
pte += len;
struct nvkm_mmu *mmu = vm->mmu;
struct nvkm_vm_pgd *vpgd;
struct nvkm_vm_pgt *vpgt;
- struct nvkm_gpuobj *pgt;
+ struct nvkm_memory *pgt;
u32 pde;
for (pde = fpde; pde <= lpde; pde++) {
if (--vpgt->refcount[big])
continue;
- pgt = vpgt->obj[big];
- vpgt->obj[big] = NULL;
+ pgt = vpgt->mem[big];
+ vpgt->mem[big] = NULL;
list_for_each_entry(vpgd, &vm->pgd_list, head) {
- mmu->map_pgt(vpgd->obj, pde, vpgt->obj);
+ mmu->map_pgt(vpgd->obj, pde, vpgt->mem);
}
- nvkm_gpuobj_ref(NULL, &pgt);
+ nvkm_memory_del(&pgt);
}
}
pgt_size = (1 << (mmu->pgt_bits + 12)) >> type;
pgt_size *= 8;
- ret = nvkm_gpuobj_new(nv_object(vm->mmu), NULL, pgt_size, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &vpgt->obj[big]);
+ ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
+ pgt_size, 0x1000, true, &vpgt->mem[big]);
if (unlikely(ret))
return ret;
list_for_each_entry(vpgd, &vm->pgd_list, head) {
- mmu->map_pgt(vpgd->obj, pde, vpgt->obj);
+ mmu->map_pgt(vpgd->obj, pde, vpgt->mem);
}
vpgt->refcount[big]++;
nvkm_vm_boot(struct nvkm_vm *vm, u64 size)
{
struct nvkm_mmu *mmu = vm->mmu;
- struct nvkm_gpuobj *pgt;
+ struct nvkm_memory *pgt;
int ret;
- ret = nvkm_gpuobj_new(nv_object(mmu), NULL,
- (size >> mmu->spg_shift) * 8, 0x1000,
- NVOBJ_FLAG_ZERO_ALLOC, &pgt);
+ ret = nvkm_memory_new(mmu->subdev.device, NVKM_MEM_TARGET_INST,
+ (size >> mmu->spg_shift) * 8, 0x1000, true, &pgt);
if (ret == 0) {
vm->pgt[0].refcount[0] = 1;
- vm->pgt[0].obj[0] = pgt;
- nvkm_memory_boot(pgt->memory, vm);
+ vm->pgt[0].mem[0] = pgt;
+ nvkm_memory_boot(pgt, vm);
}
return ret;
mutex_lock(&vm->mutex);
for (i = vm->fpde; i <= vm->lpde; i++)
- mmu->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
+ mmu->map_pgt(pgd, i, vm->pgt[i - vm->fpde].mem);
list_add(&vpgd->head, &vm->pgd_list);
mutex_unlock(&vm->mutex);
return 0;
static void
-gf100_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 index, struct nvkm_gpuobj *pgt[2])
+gf100_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 index, struct nvkm_memory *pgt[2])
{
u32 pde[2] = { 0, 0 };
if (pgt[0])
- pde[1] = 0x00000001 | (pgt[0]->addr >> 8);
+ pde[1] = 0x00000001 | (nvkm_memory_addr(pgt[0]) >> 8);
if (pgt[1])
- pde[0] = 0x00000001 | (pgt[1]->addr >> 8);
+ pde[0] = 0x00000001 | (nvkm_memory_addr(pgt[1]) >> 8);
nvkm_kmap(pgd);
nvkm_wo32(pgd, (index * 8) + 0, pde[0]);
}
static void
-gf100_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+gf100_vm_map(struct nvkm_vma *vma, struct nvkm_memory *pgt,
struct nvkm_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
{
u64 next = 1 << (vma->node->type - 8);
}
static void
-gf100_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+gf100_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 7 : 5;
}
static void
-gf100_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
+gf100_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
{
nvkm_kmap(pgt);
pte <<= 3;
******************************************************************************/
static void
-nv04_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+nv04_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
pte = 0x00008 + (pte * 4);
}
static void
-nv04_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
+nv04_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
{
pte = 0x00008 + (pte * 4);
nvkm_kmap(pgt);
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
+ struct nvkm_device *device = (void *)parent;
struct nv04_mmu *mmu;
- struct nvkm_gpuobj *dma;
+ struct nvkm_memory *dma;
int ret;
ret = nvkm_mmu_create(parent, engine, oclass, "PCIGART",
if (ret)
return ret;
- ret = nvkm_gpuobj_new(nv_object(mmu), NULL,
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
(NV04_PDMA_SIZE / NV04_PDMA_PAGE) * 4 + 8,
- 16, NVOBJ_FLAG_ZERO_ALLOC,
- &mmu->vm->pgt[0].obj[0]);
- dma = mmu->vm->pgt[0].obj[0];
+ 16, true, &dma);
+ mmu->vm->pgt[0].mem[0] = dma;
mmu->vm->pgt[0].refcount[0] = 1;
if (ret)
return ret;
{
struct nv04_mmu *mmu = (void *)object;
if (mmu->vm) {
- nvkm_gpuobj_ref(NULL, &mmu->vm->pgt[0].obj[0]);
+ nvkm_memory_del(&mmu->vm->pgt[0].mem[0]);
nvkm_vm_ref(NULL, &mmu->vm, NULL);
}
if (mmu->nullp) {
******************************************************************************/
static void
-nv41_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+nv41_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
pte = pte * 4;
}
static void
-nv41_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
+nv41_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
{
pte = pte * 4;
nvkm_kmap(pgt);
static void
nv41_vm_flush(struct nvkm_vm *vm)
{
- struct nv04_mmu *mmu = (void *)vm->mmu;
+ struct nv04_mmu *mmu = nv04_mmu(vm->mmu);
struct nvkm_device *device = mmu->base.subdev.device;
mutex_lock(&nv_subdev(mmu)->mutex);
if (ret)
return ret;
- ret = nvkm_gpuobj_new(nv_object(mmu), NULL,
- (NV41_GART_SIZE / NV41_GART_PAGE) * 4, 16,
- NVOBJ_FLAG_ZERO_ALLOC,
- &mmu->vm->pgt[0].obj[0]);
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
+ (NV41_GART_SIZE / NV41_GART_PAGE) * 4, 16, true,
+ &mmu->vm->pgt[0].mem[0]);
mmu->vm->pgt[0].refcount[0] = 1;
if (ret)
return ret;
{
struct nv04_mmu *mmu = (void *)object;
struct nvkm_device *device = mmu->base.subdev.device;
- struct nvkm_gpuobj *dma = mmu->vm->pgt[0].obj[0];
+ struct nvkm_memory *dma = mmu->vm->pgt[0].mem[0];
int ret;
ret = nvkm_mmu_init(&mmu->base);
if (ret)
return ret;
- nvkm_wr32(device, 0x100800, dma->addr | 0x00000002);
+ nvkm_wr32(device, 0x100800, 0x00000002 | nvkm_memory_addr(dma));
nvkm_mask(device, 0x10008c, 0x00000100, 0x00000100);
nvkm_wr32(device, 0x100820, 0x00000000);
return 0;
******************************************************************************/
static void
-nv44_vm_fill(struct nvkm_gpuobj *pgt, dma_addr_t null,
+nv44_vm_fill(struct nvkm_memory *pgt, dma_addr_t null,
dma_addr_t *list, u32 pte, u32 cnt)
{
u32 base = (pte << 2) & ~0x0000000f;
}
static void
-nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+nv44_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
- struct nv04_mmu *mmu = (void *)vma->vm->mmu;
+ struct nv04_mmu *mmu = nv04_mmu(vma->vm->mmu);
u32 tmp[4];
int i;
}
static void
-nv44_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
+nv44_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
{
- struct nv04_mmu *mmu = (void *)nvkm_mmu(pgt);
+ struct nv04_mmu *mmu = nv04_mmu(vma->vm->mmu);
nvkm_kmap(pgt);
if (pte & 3) {
static void
nv44_vm_flush(struct nvkm_vm *vm)
{
- struct nv04_mmu *mmu = (void *)vm->mmu;
+ struct nv04_mmu *mmu = nv04_mmu(vm->mmu);
struct nvkm_device *device = mmu->base.subdev.device;
nvkm_wr32(device, 0x100814, mmu->base.limit - NV44_GART_PAGE);
nvkm_wr32(device, 0x100808, 0x00000020);
if (ret)
return ret;
- ret = nvkm_gpuobj_new(nv_object(mmu), NULL,
+ ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST,
(NV44_GART_SIZE / NV44_GART_PAGE) * 4,
- 512 * 1024, NVOBJ_FLAG_ZERO_ALLOC,
- &mmu->vm->pgt[0].obj[0]);
+ 512 * 1024, true,
+ &mmu->vm->pgt[0].mem[0]);
mmu->vm->pgt[0].refcount[0] = 1;
if (ret)
return ret;
{
struct nv04_mmu *mmu = (void *)object;
struct nvkm_device *device = mmu->base.subdev.device;
- struct nvkm_gpuobj *gart = mmu->vm->pgt[0].obj[0];
+ struct nvkm_memory *gart = mmu->vm->pgt[0].mem[0];
u32 addr;
int ret;
* of 512KiB for this to work correctly
*/
addr = nvkm_rd32(device, 0x10020c);
- addr -= ((gart->addr >> 19) + 1) << 19;
+ addr -= ((nvkm_memory_addr(gart) >> 19) + 1) << 19;
nvkm_wr32(device, 0x100850, 0x80000000);
nvkm_wr32(device, 0x100818, mmu->null);
#include <core/gpuobj.h>
static void
-nv50_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 pde, struct nvkm_gpuobj *pgt[2])
+nv50_vm_map_pgt(struct nvkm_gpuobj *pgd, u32 pde, struct nvkm_memory *pgt[2])
{
u64 phys = 0xdeadcafe00000000ULL;
u32 coverage = 0;
if (pgt[0]) {
- phys = 0x00000003 | pgt[0]->addr; /* present, 4KiB pages */
- coverage = (pgt[0]->size >> 3) << 12;
+ /* present, 4KiB pages */
+ phys = 0x00000003 | nvkm_memory_addr(pgt[0]);
+ coverage = (nvkm_memory_size(pgt[0]) >> 3) << 12;
} else
if (pgt[1]) {
- phys = 0x00000001 | pgt[1]->addr; /* present */
- coverage = (pgt[1]->size >> 3) << 16;
+ /* present, 64KiB pages */
+ phys = 0x00000001 | nvkm_memory_addr(pgt[1]);
+ coverage = (nvkm_memory_size(pgt[1]) >> 3) << 16;
}
if (phys & 1) {
}
static void
-nv50_vm_map(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+nv50_vm_map(struct nvkm_vma *vma, struct nvkm_memory *pgt,
struct nvkm_mem *mem, u32 pte, u32 cnt, u64 phys, u64 delta)
{
u32 comp = (mem->memtype & 0x180) >> 7;
}
static void
-nv50_vm_map_sg(struct nvkm_vma *vma, struct nvkm_gpuobj *pgt,
+nv50_vm_map_sg(struct nvkm_vma *vma, struct nvkm_memory *pgt,
struct nvkm_mem *mem, u32 pte, u32 cnt, dma_addr_t *list)
{
u32 target = (vma->access & NV_MEM_ACCESS_NOSNOOP) ? 3 : 2;
}
static void
-nv50_vm_unmap(struct nvkm_gpuobj *pgt, u32 pte, u32 cnt)
+nv50_vm_unmap(struct nvkm_vma *vma, struct nvkm_memory *pgt, u32 pte, u32 cnt)
{
pte <<= 3;
nvkm_kmap(pgt);