void nouveau_vm_map_at(struct nouveau_vma *, u64 offset, struct nouveau_mem *);
void nouveau_vm_unmap(struct nouveau_vma *);
void nouveau_vm_unmap_at(struct nouveau_vma *, u64 offset, u64 length);
-void nouveau_vm_map_sg(struct nouveau_vma *, u64 offset, u64 length,
- struct nouveau_mem *);
-void nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
- struct nouveau_mem *mem);
#endif
vmm->flush(vm);
}
-void
-nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
-{
- nouveau_vm_map_at(vma, 0, node);
-}
-
-void
+static void
nouveau_vm_map_sg_table(struct nouveau_vma *vma, u64 delta, u64 length,
struct nouveau_mem *mem)
{
vmm->flush(vm);
}
-void
+static void
nouveau_vm_map_sg(struct nouveau_vma *vma, u64 delta, u64 length,
struct nouveau_mem *mem)
{
vmm->flush(vm);
}
+void
+nouveau_vm_map(struct nouveau_vma *vma, struct nouveau_mem *node)
+{
+ if (node->sg)
+ nouveau_vm_map_sg_table(vma, 0, node->size << 12, node);
+ else
+ if (node->pages)
+ nouveau_vm_map_sg(vma, 0, node->size << 12, node);
+ else
+ nouveau_vm_map_at(vma, 0, node);
+}
+
void
nouveau_vm_unmap_at(struct nouveau_vma *vma, u64 delta, u64 length)
{
if (ret)
return ret;
- if (mem->mem_type == TTM_PL_VRAM)
- nouveau_vm_map(vma, node);
- else
- nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node);
-
+ nouveau_vm_map(vma, node);
return 0;
}
return;
list_for_each_entry(vma, &nvbo->vma_list, head) {
- if (new_mem && new_mem->mem_type == TTM_PL_VRAM) {
+ if (new_mem && new_mem->mem_type != TTM_PL_SYSTEM &&
+ (new_mem->mem_type == TTM_PL_VRAM ||
+ nvbo->page_shift != vma->vm->vmm->lpg_shift)) {
nouveau_vm_map(vma, new_mem->mm_node);
- } else
- if (new_mem && new_mem->mem_type == TTM_PL_TT &&
- nvbo->page_shift == vma->vm->vmm->spg_shift) {
- if (((struct nouveau_mem *)new_mem->mm_node)->sg)
- nouveau_vm_map_sg_table(vma, 0, new_mem->
- num_pages << PAGE_SHIFT,
- new_mem->mm_node);
- else
- nouveau_vm_map_sg(vma, 0, new_mem->
- num_pages << PAGE_SHIFT,
- new_mem->mm_node);
} else {
nouveau_vm_unmap(vma);
}
struct nouveau_vma *vma)
{
const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
- struct nouveau_mem *node = nvbo->bo.mem.mm_node;
int ret;
ret = nouveau_vm_get(vm, size, nvbo->page_shift,
if (ret)
return ret;
- if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
+ if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
+ (nvbo->bo.mem.mem_type == TTM_PL_VRAM ||
+ nvbo->page_shift != vma->vm->vmm->lpg_shift))
nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
- else if (nvbo->bo.mem.mem_type == TTM_PL_TT &&
- nvbo->page_shift == vma->vm->vmm->spg_shift) {
- if (node->sg)
- nouveau_vm_map_sg_table(vma, 0, size, node);
- else
- nouveau_vm_map_sg(vma, 0, size, node);
- }
list_add_tail(&vma->head, &nvbo->vma_list);
vma->refcount = 1;
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct nouveau_mem *node = mem->mm_node;
- u64 size = mem->num_pages << 12;
if (ttm->sg) {
- node->sg = ttm->sg;
- nouveau_vm_map_sg_table(&node->vma[0], 0, size, node);
+ node->sg = ttm->sg;
+ node->pages = NULL;
} else {
+ node->sg = NULL;
node->pages = nvbe->ttm.dma_address;
- nouveau_vm_map_sg(&node->vma[0], 0, size, node);
}
+ node->size = (mem->num_pages << PAGE_SHIFT) >> 12;
+ nouveau_vm_map(&node->vma[0], node);
nvbe->node = node;
return 0;
}
/* noop: bound in move_notify() */
if (ttm->sg) {
- node->sg = ttm->sg;
- } else
+ node->sg = ttm->sg;
+ node->pages = NULL;
+ } else {
+ node->sg = NULL;
node->pages = nvbe->ttm.dma_address;
+ }
+ node->size = (mem->num_pages << PAGE_SHIFT) >> 12;
return 0;
}