spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
}
+/**
+ * amdgpu_vm_prt_put - drop a PRT user
+ */
+static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
+{
+ if (atomic_dec_return(&adev->vm_manager.num_prt_mappings) == 0)
+ amdgpu_vm_update_prt_state(adev);
+}
+
/**
* amdgpu_vm_prt - callback for updating the PRT status
*/
{
struct amdgpu_prt_cb *cb = container_of(_cb, struct amdgpu_prt_cb, cb);
- if (atomic_dec_return(&cb->adev->vm_manager.num_prt_mappings) == 0)
- amdgpu_vm_update_prt_state(cb->adev);
+ amdgpu_vm_prt_put(cb->adev);
kfree(cb);
}
struct amdgpu_prt_cb *cb = kmalloc(sizeof(struct amdgpu_prt_cb),
GFP_KERNEL);
- cb->adev = adev;
- if (!fence || dma_fence_add_callback(fence, &cb->cb,
- amdgpu_vm_prt_cb))
- amdgpu_vm_prt_cb(fence, &cb->cb);
+ if (!cb) {
+ /* Last resort when we are OOM */
+ if (fence)
+ dma_fence_wait(fence, false);
+
+ amdgpu_vm_prt_put(cb->adev);
+ } else {
+ cb->adev = adev;
+ if (!fence || dma_fence_add_callback(fence, &cb->cb,
+ amdgpu_vm_prt_cb))
+ amdgpu_vm_prt_cb(fence, &cb->cb);
+ }
}
kfree(mapping);
}