#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/pci.h>
-#include <linux/intel-gtt.h>
- static int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined);
-static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
-
-static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
-static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
-static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
-static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
- int write);
-static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
++static void i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
+static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
+static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
+static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
+ bool write);
+static int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
uint64_t offset,
uint64_t size);
-static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
-static int i915_gem_object_wait_rendering(struct drm_gem_object *obj,
- bool interruptible);
-static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
- unsigned alignment);
-static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
-static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
+static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
+ unsigned alignment,
+ bool map_and_fenceable);
+static void i915_gem_clear_fence_reg(struct drm_i915_gem_object *obj);
+static int i915_gem_phys_pwrite(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv);
-static void i915_gem_free_object_tail(struct drm_gem_object *obj);
-
-static int
-i915_gem_object_get_pages(struct drm_gem_object *obj,
- gfp_t gfpmask);
+ struct drm_file *file);
+static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
-static void
-i915_gem_object_put_pages(struct drm_gem_object *obj);
+static int i915_gem_inactive_shrink(struct shrinker *shrinker,
+ int nr_to_scan,
+ gfp_t gfp_mask);
-static LIST_HEAD(shrink_list);
-static DEFINE_SPINLOCK(shrink_list_lock);
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
* therefore we must wait for any outstanding access to complete
* before clearing the fence.
*/
- reg = &dev_priv->fence_regs[obj_priv->fence_reg];
- if (reg->gpu) {
- int ret;
-
- ret = i915_gem_object_flush_gpu_write_domain(obj);
- if (ret)
- return ret;
+ if (obj->fenced_gpu_access) {
- ret = i915_gem_object_flush_gpu_write_domain(obj, NULL);
- if (ret)
- return ret;
-
++ i915_gem_object_flush_gpu_write_domain(obj);
+ obj->fenced_gpu_access = false;
+ }
- ret = i915_gem_object_wait_rendering(obj, interruptible);
+ if (obj->last_fenced_seqno) {
+ ret = i915_do_wait_request(dev,
+ obj->last_fenced_seqno,
+ interruptible,
+ obj->last_fenced_ring);
if (ret)
return ret;
}
/** Flushes any GPU write domain for the object if it's dirty. */
--static int
- i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj,
- struct intel_ring_buffer *pipelined)
-i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
++static void
++i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
- uint32_t old_write_domain;
+ struct drm_device *dev = obj->base.dev;
- if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
- return 0;
+ if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
- return 0;
++ return;
/* Queue the GPU write cache flushing we need. */
- old_write_domain = obj->write_domain;
- i915_gem_flush_ring(dev, NULL,
- to_intel_bo(obj)->ring,
- 0, obj->write_domain);
- BUG_ON(obj->write_domain);
-
- trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
- old_write_domain);
-
- return 0;
+ i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain);
+ BUG_ON(obj->base.write_domain);
-
- if (pipelined && pipelined == obj->ring)
- return 0;
-
- return i915_gem_object_wait_rendering(obj, true);
}
/** Flushes the GTT write domain for the object if it's dirty. */
int ret;
/* Not valid to be called on unbound objects. */
- if (obj_priv->gtt_space == NULL)
+ if (obj->gtt_space == NULL)
return -EINVAL;
- ret = i915_gem_object_flush_gpu_write_domain(obj, NULL);
- ret = i915_gem_object_flush_gpu_write_domain(obj);
-- if (ret != 0)
- return ret;
++ i915_gem_object_flush_gpu_write_domain(obj);
+ ret = i915_gem_object_wait_rendering(obj, true);
+ if (ret)
return ret;
i915_gem_object_flush_cpu_write_domain(obj);
- if (write) {
- ret = i915_gem_object_wait_rendering(obj, true);
- if (ret)
- return ret;
- }
-
- old_write_domain = obj->write_domain;
- old_read_domains = obj->read_domains;
+ old_write_domain = obj->base.write_domain;
+ old_read_domains = obj->base.read_domains;
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
int ret;
/* Not valid to be called on unbound objects. */
- if (obj_priv->gtt_space == NULL)
+ if (obj->gtt_space == NULL)
return -EINVAL;
- ret = i915_gem_object_flush_gpu_write_domain(obj, pipelined);
- ret = i915_gem_object_flush_gpu_write_domain(obj);
-- if (ret)
-- return ret;
++ i915_gem_object_flush_gpu_write_domain(obj);
/* Currently, we are always called from an non-interruptible context. */
if (!pipelined) {
uint32_t old_write_domain, old_read_domains;
int ret;
- ret = i915_gem_object_flush_gpu_write_domain(obj, false);
- ret = i915_gem_object_flush_gpu_write_domain(obj);
-- if (ret != 0)
- return ret;
++ i915_gem_object_flush_gpu_write_domain(obj);
+ ret = i915_gem_object_wait_rendering(obj, true);
+ if (ret)
return ret;
i915_gem_object_flush_gtt_write_domain(obj);
*/
i915_gem_object_set_to_full_cpu_read_domain(obj);
- if (write) {
- ret = i915_gem_object_wait_rendering(obj, true);
- if (ret)
- return ret;
- }
-
- old_write_domain = obj->write_domain;
- old_read_domains = obj->read_domains;
+ old_write_domain = obj->base.write_domain;
+ old_read_domains = obj->base.read_domains;
/* Flush the CPU cache if it's still invalid. */
- if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
i915_gem_clflush_object(obj);
- obj->read_domains |= I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
}
/* It should now be out of any other write domains, and we can update
uint32_t old_read_domains;
int i, ret;
- if (offset == 0 && size == obj->size)
+ if (offset == 0 && size == obj->base.size)
return i915_gem_object_set_to_cpu_domain(obj, 0);
- ret = i915_gem_object_flush_gpu_write_domain(obj, false);
- ret = i915_gem_object_flush_gpu_write_domain(obj);
-- if (ret != 0)
- return ret;
++ i915_gem_object_flush_gpu_write_domain(obj);
+ ret = i915_gem_object_wait_rendering(obj, true);
+ if (ret)
return ret;
+
i915_gem_object_flush_gtt_write_domain(obj);
/* If we're already fully in the CPU read domain, we're done. */