fb_format_modifier));
}
+static int
+intel_fill_fb_ggtt_view(struct i915_ggtt_view *view, struct drm_framebuffer *fb,
+ const struct drm_plane_state *plane_state)
+{
+ *view = i915_ggtt_view_normal;
+
+ return 0;
+}
+
int
intel_pin_and_fence_fb_obj(struct drm_plane *plane,
struct drm_framebuffer *fb,
struct drm_device *dev = fb->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct i915_ggtt_view view;
u32 alignment;
int ret;
return -EINVAL;
}
+ ret = intel_fill_fb_ggtt_view(&view, fb, plane_state);
+ if (ret)
+ return ret;
+
/* Note that the w/a also requires 64 PTE of padding following the
* bo. We currently fill all unused PTE with the shadow page and so
* we should always have valid PTE following the scanout preventing
dev_priv->mm.interruptible = false;
ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined,
- &i915_ggtt_view_normal);
+ &view);
if (ret)
goto err_interruptible;
return 0;
err_unpin:
- i915_gem_object_unpin_from_display_plane(obj, &i915_ggtt_view_normal);
+ i915_gem_object_unpin_from_display_plane(obj, &view);
err_interruptible:
dev_priv->mm.interruptible = true;
intel_runtime_pm_put(dev_priv);
const struct drm_plane_state *plane_state)
{
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct i915_ggtt_view view;
+ int ret;
WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
+ ret = intel_fill_fb_ggtt_view(&view, fb, plane_state);
+ WARN_ONCE(ret, "Couldn't get view from plane state!");
+
i915_gem_object_unpin_fence(obj);
- i915_gem_object_unpin_from_display_plane(obj, &i915_ggtt_view_normal);
+ i915_gem_object_unpin_from_display_plane(obj, &view);
}
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel