drm/i915: vfuncs for gtt_clear_range/insert_entries
authorDaniel Vetter <daniel.vetter@ffwll.ch>
Thu, 24 Jan 2013 22:44:55 +0000 (14:44 -0800)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Thu, 31 Jan 2013 10:50:05 +0000 (11:50 +0100)
We have a few too many differences here, so finally take the prepared
abstraction and run with it. A few smaller changes are required to get
things into shape:

- move i915_cache_level up since we need it in the gt funcs
- split up i915_ggtt_clear_range and move the two functions down to
  where the relevant insert_entries functions are
- adjustments to a few function parameter lists

Now we have 2 functions which deal with the gen6+ global gtt
(gen6_ggtt_ prefix) and 2 functions which deal with the legacy gtt
code in the intel-gtt.c fake agp driver (i915_ggtt_ prefix).

Init is still a bit a mess, but honestly I don't care about that.

One thing I've thought about while deciding on the exact interfaces is
a flag parameter for ->clear_range: We could use that to decide
between writing invalid pte entries or scratch pte entries. In case we
ever get around to fixing all our bugs which currently prevent us from
filling the gtt with empty ptes for the truly unused ranges ...

Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
[bwidawsk: Moved functions to the gtt struct]
Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Reviewed-by: Damien Lespiau <damien.lespiau@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem_gtt.c

index bffe222e1616d258b0d343790746cc64e256df59..fb0815bebe0f657a3d633ca589dd952f0821517d 100644 (file)
@@ -365,6 +365,12 @@ struct intel_device_info {
        u8 has_llc:1;
 };
 
+enum i915_cache_level {
+       I915_CACHE_NONE = 0,
+       I915_CACHE_LLC,
+       I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
+};
+
 /* The Graphics Translation Table is the way in which GEN hardware translates a
  * Graphics Virtual Address into a Physical Address. In addition to the normal
  * collateral associated with any va->pa translations GEN hardware also has a
@@ -386,6 +392,15 @@ struct i915_gtt {
        bool do_idle_maps;
        dma_addr_t scratch_page_dma;
        struct page *scratch_page;
+
+       /* global gtt ops */
+       void (*gtt_clear_range)(struct drm_device *dev,
+                               unsigned int first_entry,
+                               unsigned int num_entries);
+       void (*gtt_insert_entries)(struct drm_device *dev,
+                                  struct sg_table *st,
+                                  unsigned int pg_start,
+                                  enum i915_cache_level cache_level);
 };
 
 #define I915_PPGTT_PD_ENTRIES 512
@@ -1023,12 +1038,6 @@ enum hdmi_force_audio {
        HDMI_AUDIO_ON,                  /* force turn on HDMI audio */
 };
 
-enum i915_cache_level {
-       I915_CACHE_NONE = 0,
-       I915_CACHE_LLC,
-       I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
-};
-
 #define I915_GTT_RESERVED ((struct drm_mm_node *)0x1)
 
 struct drm_i915_gem_object_ops {
index a0ba4a9e53c771832109f0b0771961a6ae29d39d..4712626f8dc1ea27dcaac5af65713402158a94bc 100644 (file)
@@ -367,40 +367,14 @@ static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
                dev_priv->mm.interruptible = interruptible;
 }
 
-static void i915_ggtt_clear_range(struct drm_device *dev,
-                                unsigned first_entry,
-                                unsigned num_entries)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       gtt_pte_t scratch_pte;
-       gtt_pte_t __iomem *gtt_base = (gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
-       const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
-       int i;
-
-       if (INTEL_INFO(dev)->gen < 6) {
-               intel_gtt_clear_range(first_entry, num_entries);
-               return;
-       }
-
-       if (WARN(num_entries > max_entries,
-                "First entry = %d; Num entries = %d (max=%d)\n",
-                first_entry, num_entries, max_entries))
-               num_entries = max_entries;
-
-       scratch_pte = pte_encode(dev, dev_priv->gtt.scratch_page_dma, I915_CACHE_LLC);
-       for (i = 0; i < num_entries; i++)
-               iowrite32(scratch_pte, &gtt_base[i]);
-       readl(gtt_base);
-}
-
 void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj;
 
        /* First fill our portion of the GTT with scratch pages */
-       i915_ggtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE,
-                             dev_priv->gtt.total / PAGE_SIZE);
+       dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE,
+                                     dev_priv->gtt.total / PAGE_SIZE);
 
        list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
                i915_gem_clflush_object(obj);
@@ -429,15 +403,13 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
  * within the global GTT as well as accessible by the GPU through the GMADR
  * mapped BAR (dev_priv->mm.gtt->gtt).
  */
-static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
-                                 enum i915_cache_level level)
+static void gen6_ggtt_insert_entries(struct drm_device *dev,
+                                    struct sg_table *st,
+                                    unsigned int first_entry,
+                                    enum i915_cache_level level)
 {
-       struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct sg_table *st = obj->pages;
        struct scatterlist *sg = st->sgl;
-       const int first_entry = obj->gtt_space->start >> PAGE_SHIFT;
-       const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
        gtt_pte_t __iomem *gtt_entries =
                (gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
        int unused, i = 0;
@@ -453,9 +425,6 @@ static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
                }
        }
 
-       BUG_ON(i > max_entries);
-       BUG_ON(i != obj->base.size / PAGE_SIZE);
-
        /* XXX: This serves as a posting read to make sure that the PTE has
         * actually been updated. There is some concern that even though
         * registers and PTEs are within the same BAR that they are potentially
@@ -473,28 +442,69 @@ static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
        POSTING_READ(GFX_FLSH_CNTL_GEN6);
 }
 
+static void gen6_ggtt_clear_range(struct drm_device *dev,
+                                 unsigned int first_entry,
+                                 unsigned int num_entries)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       gtt_pte_t scratch_pte;
+       gtt_pte_t __iomem *gtt_base = (gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
+       const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
+       int i;
+
+       if (WARN(num_entries > max_entries,
+                "First entry = %d; Num entries = %d (max=%d)\n",
+                first_entry, num_entries, max_entries))
+               num_entries = max_entries;
+
+       scratch_pte = pte_encode(dev, dev_priv->gtt.scratch_page_dma, I915_CACHE_LLC);
+       for (i = 0; i < num_entries; i++)
+               iowrite32(scratch_pte, &gtt_base[i]);
+       readl(gtt_base);
+}
+
+
+static void i915_ggtt_insert_entries(struct drm_device *dev,
+                                    struct sg_table *st,
+                                    unsigned int pg_start,
+                                    enum i915_cache_level cache_level)
+{
+       unsigned int flags = (cache_level == I915_CACHE_NONE) ?
+               AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
+
+       intel_gtt_insert_sg_entries(st, pg_start, flags);
+
+}
+
+static void i915_ggtt_clear_range(struct drm_device *dev,
+                                 unsigned int first_entry,
+                                 unsigned int num_entries)
+{
+       intel_gtt_clear_range(first_entry, num_entries);
+}
+
+
 void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
                              enum i915_cache_level cache_level)
 {
        struct drm_device *dev = obj->base.dev;
-       if (INTEL_INFO(dev)->gen < 6) {
-               unsigned int flags = (cache_level == I915_CACHE_NONE) ?
-                       AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
-               intel_gtt_insert_sg_entries(obj->pages,
-                                           obj->gtt_space->start >> PAGE_SHIFT,
-                                           flags);
-       } else {
-               gen6_ggtt_bind_object(obj, cache_level);
-       }
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
+                                        obj->gtt_space->start >> PAGE_SHIFT,
+                                        cache_level);
 
        obj->has_global_gtt_mapping = 1;
 }
 
 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
 {
-       i915_ggtt_clear_range(obj->base.dev,
-                             obj->gtt_space->start >> PAGE_SHIFT,
-                             obj->base.size >> PAGE_SHIFT);
+       struct drm_device *dev = obj->base.dev;
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       dev_priv->gtt.gtt_clear_range(obj->base.dev,
+                                     obj->gtt_space->start >> PAGE_SHIFT,
+                                     obj->base.size >> PAGE_SHIFT);
 
        obj->has_global_gtt_mapping = 0;
 }
@@ -570,13 +580,12 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
                             hole_start, hole_end) {
                DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
                              hole_start, hole_end);
-               i915_ggtt_clear_range(dev,
-                                     hole_start / PAGE_SIZE,
-                                     (hole_end-hole_start) / PAGE_SIZE);
+               dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE,
+                                             (hole_end-hole_start) / PAGE_SIZE);
        }
 
        /* And finally clear the reserved guard page */
-       i915_ggtt_clear_range(dev, end / PAGE_SIZE - 1, 1);
+       dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1);
 }
 
 static bool
@@ -718,6 +727,9 @@ int i915_gem_gtt_init(struct drm_device *dev)
 
                dev_priv->gtt.do_idle_maps = needs_idle_maps(dev);
 
+               dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range;
+               dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries;
+
                return 0;
        }
 
@@ -771,6 +783,9 @@ int i915_gem_gtt_init(struct drm_device *dev)
        DRM_DEBUG_DRIVER("GMADR size = %ldM\n", dev_priv->gtt.mappable_end >> 20);
        DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20);
 
+       dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range;
+       dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries;
+
        return 0;
 
 err_out: