struct amdgpu_mn_node *node, *next_node;
struct amdgpu_bo *bo, *next_bo;
- down_write(&rmn->mm->mmap_sem);
mutex_lock(&adev->mn_lock);
+ down_write(&rmn->mm->mmap_sem);
hash_del(&rmn->node);
rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
it.rb) {
-
interval_tree_remove(&node->it, &rmn->objects);
list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
bo->mn = NULL;
}
kfree(node);
}
- mutex_unlock(&adev->mn_lock);
up_write(&rmn->mm->mmap_sem);
+ mutex_unlock(&adev->mn_lock);
mmu_notifier_unregister_no_release(&rmn->mn, rmn->mm);
kfree(rmn);
}
struct amdgpu_mn *rmn;
int r;
- down_write(&mm->mmap_sem);
mutex_lock(&adev->mn_lock);
+ down_write(&mm->mmap_sem);
hash_for_each_possible(adev->mn_hash, rmn, node, (unsigned long)mm)
if (rmn->mm == mm)
hash_add(adev->mn_hash, &rmn->node, (unsigned long)mm);
release_locks:
- mutex_unlock(&adev->mn_lock);
up_write(&mm->mmap_sem);
+ mutex_unlock(&adev->mn_lock);
return rmn;
free_rmn:
- mutex_unlock(&adev->mn_lock);
up_write(&mm->mmap_sem);
+ mutex_unlock(&adev->mn_lock);
kfree(rmn);
return ERR_PTR(r);
void amdgpu_mn_unregister(struct amdgpu_bo *bo)
{
struct amdgpu_device *adev = bo->adev;
- struct amdgpu_mn *rmn = bo->mn;
+ struct amdgpu_mn *rmn;
struct list_head *head;
- if (rmn == NULL)
+ mutex_lock(&adev->mn_lock);
+
+ rmn = bo->mn;
+ if (rmn == NULL) {
+ mutex_unlock(&adev->mn_lock);
return;
+ }
down_write(&rmn->mm->mmap_sem);
- mutex_lock(&adev->mn_lock);
/* save the next list entry for later */
head = bo->mn_list.next;
kfree(node);
}
- mutex_unlock(&adev->mn_lock);
up_write(&rmn->mm->mmap_sem);
+ mutex_unlock(&adev->mn_lock);
}