/*
* TTM backend functions.
*/
+struct amdgpu_ttm_gup_task_list {
+ struct list_head list;
+ struct task_struct *task;
+};
+
struct amdgpu_ttm_tt {
- struct ttm_dma_tt ttm;
- struct amdgpu_device *adev;
- u64 offset;
- uint64_t userptr;
- struct mm_struct *usermm;
- uint32_t userflags;
+ struct ttm_dma_tt ttm;
+ struct amdgpu_device *adev;
+ u64 offset;
+ uint64_t userptr;
+ struct mm_struct *usermm;
+ uint32_t userflags;
+ spinlock_t guptasklock;
+ struct list_head guptasks;
};
/* prepare the sg table with the user pages */
unsigned num_pages = ttm->num_pages - pinned;
uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
struct page **pages = ttm->pages + pinned;
+ struct amdgpu_ttm_gup_task_list guptask;
+
+ guptask.task = current;
+ spin_lock(>t->guptasklock);
+ list_add(&guptask.list, >t->guptasks);
+ spin_unlock(>t->guptasklock);
r = get_user_pages(current, current->mm, userptr, num_pages,
write, 0, pages, NULL);
+
+ spin_lock(>t->guptasklock);
+ list_del(&guptask.list);
+ spin_unlock(>t->guptasklock);
+
if (r < 0)
goto release_pages;
gtt->userptr = addr;
gtt->usermm = current->mm;
gtt->userflags = flags;
+ spin_lock_init(>t->guptasklock);
+ INIT_LIST_HEAD(>t->guptasks);
+
return 0;
}
unsigned long end)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ struct amdgpu_ttm_gup_task_list *entry;
unsigned long size;
- if (gtt == NULL)
- return false;
-
- if (gtt->ttm.ttm.state != tt_bound || !gtt->userptr)
+ if (gtt == NULL || !gtt->userptr)
return false;
size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
if (gtt->userptr > end || gtt->userptr + size <= start)
return false;
+ spin_lock(>t->guptasklock);
+ list_for_each_entry(entry, >t->guptasks, list) {
+ if (entry->task == current) {
+ spin_unlock(>t->guptasklock);
+ return false;
+ }
+ }
+ spin_unlock(>t->guptasklock);
+
return true;
}