struct mutex bo_list_lock;
struct idr bo_list_handles;
struct amdgpu_ctx_mgr ctx_mgr;
+ u32 vram_lost_counter;
};
/*
atomic64_t num_bytes_moved;
atomic64_t num_evictions;
atomic_t gpu_reset_counter;
+ atomic_t vram_lost_counter;
/* data for buffer migration throttling */
struct {
extern const struct drm_ioctl_desc amdgpu_ioctls_kms[];
extern const int amdgpu_max_kms_ioctl;
+bool amdgpu_kms_vram_lost(struct amdgpu_device *adev,
+ struct amdgpu_fpriv *fpriv);
int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags);
void amdgpu_driver_unload_kms(struct drm_device *dev);
void amdgpu_driver_lastclose_kms(struct drm_device *dev);
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
union drm_amdgpu_cs *cs = data;
struct amdgpu_cs_parser parser = {};
bool reserved_buffers = false;
if (!adev->accel_working)
return -EBUSY;
+ if (amdgpu_kms_vram_lost(adev, fpriv))
+ return -ENODEV;
parser.adev = adev;
parser.filp = filp;
{
union drm_amdgpu_wait_cs *wait = data;
struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
unsigned long timeout = amdgpu_gem_timeout(wait->in.timeout);
struct amdgpu_ring *ring = NULL;
struct amdgpu_ctx *ctx;
struct dma_fence *fence;
long r;
+ if (amdgpu_kms_vram_lost(adev, fpriv))
+ return -ENODEV;
r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
wait->in.ring, &ring);
if (r)
struct drm_file *filp)
{
struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
union drm_amdgpu_wait_fences *wait = data;
uint32_t fence_count = wait->in.fence_count;
struct drm_amdgpu_fence *fences_user;
struct drm_amdgpu_fence *fences;
int r;
+ if (amdgpu_kms_vram_lost(adev, fpriv))
+ return -ENODEV;
/* Get the fences from userspace */
fences = kmalloc_array(fence_count, sizeof(struct drm_amdgpu_fence),
GFP_KERNEL);
if (r)
goto out;
vram_lost = amdgpu_check_vram_lost(adev);
- if (vram_lost)
+ if (vram_lost) {
DRM_ERROR("VRAM is lost!\n");
+ atomic_inc(&adev->vram_lost_counter);
+ }
r = amdgpu_ttm_recover_gart(adev);
if (r)
goto out;
args->operation);
return -EINVAL;
}
+ if ((args->operation == AMDGPU_VA_OP_MAP) ||
+ (args->operation == AMDGPU_VA_OP_REPLACE)) {
+ if (amdgpu_kms_vram_lost(adev, fpriv))
+ return -ENODEV;
+ }
INIT_LIST_HEAD(&list);
if ((args->operation != AMDGPU_VA_OP_CLEAR) &&
static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
struct amdgpu_device *adev = dev->dev_private;
+ struct amdgpu_fpriv *fpriv = filp->driver_priv;
struct drm_amdgpu_info *info = data;
struct amdgpu_mode_info *minfo = &adev->mode_info;
void __user *out = (void __user *)(uintptr_t)info->return_pointer;
if (!info->return_size || !info->return_pointer)
return -EINVAL;
+ if (amdgpu_kms_vram_lost(adev, fpriv))
+ return -ENODEV;
switch (info->query) {
case AMDGPU_INFO_ACCEL_WORKING:
vga_switcheroo_process_delayed_switch();
}
+bool amdgpu_kms_vram_lost(struct amdgpu_device *adev,
+ struct amdgpu_fpriv *fpriv)
+{
+ return fpriv->vram_lost_counter != atomic_read(&adev->vram_lost_counter);
+}
+
/**
* amdgpu_driver_open_kms - drm callback for open
*
amdgpu_ctx_mgr_init(&fpriv->ctx_mgr);
+ fpriv->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
file_priv->driver_priv = fpriv;
out_suspend: