struct drm_mm mm;
} vram;
+ struct notifier_block vmap_notifier;
struct shrinker shrinker;
struct msm_vblank_ctrl vblank_ctrl;
void msm_gem_put_vaddr(struct drm_gem_object *obj);
int msm_gem_madvise(struct drm_gem_object *obj, unsigned madv);
void msm_gem_purge(struct drm_gem_object *obj);
+void msm_gem_vunmap(struct drm_gem_object *obj);
int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_context *fctx, bool exclusive);
void msm_gem_move_to_active(struct drm_gem_object *obj,
if (msm_obj->vaddr == NULL)
return ERR_PTR(-ENOMEM);
}
+ msm_obj->vmap_count++;
return msm_obj->vaddr;
}
void msm_gem_put_vaddr_locked(struct drm_gem_object *obj)
{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
- /* no-op for now */
+ WARN_ON(msm_obj->vmap_count < 1);
+ msm_obj->vmap_count--;
}
void msm_gem_put_vaddr(struct drm_gem_object *obj)
{
- /* no-op for now */
+ mutex_lock(&obj->dev->struct_mutex);
+ msm_gem_put_vaddr_locked(obj);
+ mutex_unlock(&obj->dev->struct_mutex);
}
/* Update madvise status, returns true if not purged, else
put_iova(obj);
- vunmap(msm_obj->vaddr);
- msm_obj->vaddr = NULL;
+ msm_gem_vunmap(obj);
put_pages(obj);
0, (loff_t)-1);
}
+void msm_gem_vunmap(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ if (!msm_obj->vaddr || WARN_ON(!is_vunmapable(msm_obj)))
+ return;
+
+ vunmap(msm_obj->vaddr);
+ msm_obj->vaddr = NULL;
+}
+
/* must be called before _move_to_active().. */
int msm_gem_sync_object(struct drm_gem_object *obj,
struct msm_fence_context *fctx, bool exclusive)
drm_prime_gem_destroy(obj, msm_obj->sgt);
} else {
- vunmap(msm_obj->vaddr);
+ msm_gem_vunmap(obj);
put_pages(obj);
}
*/
uint8_t madv;
+ /**
+ * count of active vmap'ing
+ */
+ uint8_t vmap_count;
+
/* And object is either:
* inactive - on priv->inactive_list
* active - on one one of the gpu's active_list.. well, at
!msm_obj->base.dma_buf && !msm_obj->base.import_attach;
}
+static inline bool is_vunmapable(struct msm_gem_object *msm_obj)
+{
+ return (msm_obj->vmap_count == 0) && msm_obj->vaddr;
+}
+
#define MAX_CMDS 4
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
return freed;
}
+static int
+msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
+{
+ struct msm_drm_private *priv =
+ container_of(nb, struct msm_drm_private, vmap_notifier);
+ struct drm_device *dev = priv->dev;
+ struct msm_gem_object *msm_obj;
+ unsigned unmapped = 0;
+ bool unlock;
+
+ if (!msm_gem_shrinker_lock(dev, &unlock))
+ return NOTIFY_DONE;
+
+ list_for_each_entry(msm_obj, &priv->inactive_list, mm_list) {
+ if (is_vunmapable(msm_obj)) {
+ msm_gem_vunmap(&msm_obj->base);
+ /* since we don't know any better, lets bail after a few
+ * and if necessary the shrinker will be invoked again.
+ * Seems better than unmapping *everything*
+ */
+ if (++unmapped >= 15)
+ break;
+ }
+ }
+
+ if (unlock)
+ mutex_unlock(&dev->struct_mutex);
+
+ *(unsigned long *)ptr += unmapped;
+
+ if (unmapped > 0)
+ pr_info_ratelimited("Purging %u vmaps\n", unmapped);
+
+ return NOTIFY_DONE;
+}
+
/**
* msm_gem_shrinker_init - Initialize msm shrinker
* @dev_priv: msm device
priv->shrinker.scan_objects = msm_gem_shrinker_scan;
priv->shrinker.seeks = DEFAULT_SEEKS;
WARN_ON(register_shrinker(&priv->shrinker));
+
+ priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
+ WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
}
/**
void msm_gem_shrinker_cleanup(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
+ WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
unregister_shrinker(&priv->shrinker);
}