/* link all shadow bo */
struct list_head shadow_list;
struct mutex shadow_list_lock;
+ /* link all gtt */
+ spinlock_t gtt_list_lock;
+ struct list_head gtt_list;
+
};
bool amdgpu_device_is_px(struct drm_device *dev);
INIT_LIST_HEAD(&adev->shadow_list);
mutex_init(&adev->shadow_list_lock);
+ INIT_LIST_HEAD(&adev->gtt_list);
+ spin_lock_init(&adev->gtt_list_lock);
+
if (adev->asic_type >= CHIP_BONAIRE) {
adev->rmmio_base = pci_resource_start(adev->pdev, 5);
adev->rmmio_size = pci_resource_len(adev->pdev, 5);
adev->rmmio_base = pci_resource_start(adev->pdev, 2);
adev->rmmio_size = pci_resource_len(adev->pdev, 2);
}
+
adev->rmmio = ioremap(adev->rmmio_base, adev->rmmio_size);
if (adev->rmmio == NULL) {
return -ENOMEM;
spinlock_t guptasklock;
struct list_head guptasks;
atomic_t mmu_invalidations;
+ struct list_head list;
};
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
ttm->num_pages, (unsigned)gtt->offset);
return r;
}
+ spin_lock(>t->adev->gtt_list_lock);
+ list_add_tail(>t->list, >t->adev->gtt_list);
+ spin_unlock(>t->adev->gtt_list_lock);
return 0;
}
if (gtt->userptr)
amdgpu_ttm_tt_unpin_userptr(ttm);
+ spin_lock(>t->adev->gtt_list_lock);
+ list_del_init(>t->list);
+ spin_unlock(>t->adev->gtt_list_lock);
+
return 0;
}
kfree(gtt);
return NULL;
}
+ INIT_LIST_HEAD(>t->list);
return >t->ttm.ttm;
}