bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
uint32_t flags);
-bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
+struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm);
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
unsigned long end);
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
bool has_userptr = false;
unsigned i;
+ int r;
array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry));
if (!array)
for (i = 0; i < num_entries; ++i) {
struct amdgpu_bo_list_entry *entry = &array[i];
struct drm_gem_object *gobj;
+ struct mm_struct *usermm;
gobj = drm_gem_object_lookup(adev->ddev, filp, info[i].bo_handle);
- if (!gobj)
+ if (!gobj) {
+ r = -ENOENT;
goto error_free;
+ }
entry->robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
drm_gem_object_unreference_unlocked(gobj);
entry->priority = min(info[i].bo_priority,
AMDGPU_BO_LIST_MAX_PRIORITY);
- if (amdgpu_ttm_tt_has_userptr(entry->robj->tbo.ttm))
+ usermm = amdgpu_ttm_tt_get_usermm(entry->robj->tbo.ttm);
+ if (usermm) {
+ if (usermm != current->mm) {
+ r = -EPERM;
+ goto error_free;
+ }
has_userptr = true;
+ }
entry->tv.bo = &entry->robj->tbo;
entry->tv.shared = true;
error_free:
drm_free_large(array);
- return -ENOENT;
+ return r;
}
struct amdgpu_bo_list *
p->uf.bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
p->uf.offset = fence_data->offset;
- if (amdgpu_ttm_tt_has_userptr(p->uf.bo->tbo.ttm)) {
+ if (amdgpu_ttm_tt_get_usermm(p->uf.bo->tbo.ttm)) {
drm_gem_object_unreference_unlocked(gobj);
return -EINVAL;
}
list_for_each_entry(lobj, validated, tv.head) {
struct amdgpu_bo *bo = lobj->robj;
+ struct mm_struct *usermm;
uint32_t domain;
+ usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
+ if (usermm && usermm != current->mm)
+ return -EPERM;
+
if (bo->pin_count)
continue;
return -ENOENT;
}
robj = gem_to_amdgpu_bo(gobj);
- if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm) ||
+ if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) ||
(robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) {
drm_gem_object_unreference_unlocked(gobj);
return -EPERM;
break;
}
case AMDGPU_GEM_OP_SET_PLACEMENT:
- if (amdgpu_ttm_tt_has_userptr(robj->tbo.ttm)) {
+ if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm)) {
r = -EPERM;
amdgpu_bo_unreserve(robj);
break;
int r, i;
unsigned fpfn, lpfn;
- if (amdgpu_ttm_tt_has_userptr(bo->tbo.ttm))
+ if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
return -EPERM;
if (WARN_ON_ONCE(min_offset > max_offset))
{
struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj);
- if (amdgpu_ttm_tt_has_userptr(bo->tbo.ttm))
+ if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm))
return ERR_PTR(-EPERM);
return drm_gem_prime_export(dev, gobj, flags);
enum dma_data_direction direction = write ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
- if (current->mm != gtt->usermm)
- return -EPERM;
-
if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
/* check that we only pin down anonymous memory
to prevent problems with writeback */
return 0;
}
-bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm)
+struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
if (gtt == NULL)
- return false;
+ return NULL;
- return !!gtt->userptr;
+ return gtt->usermm;
}
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,