drm/i915: Use drm_i915_gem_object as the preferred type
authorChris Wilson <chris@chris-wilson.co.uk>
Mon, 8 Nov 2010 19:18:58 +0000 (19:18 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Tue, 23 Nov 2010 20:19:10 +0000 (20:19 +0000)
A glorified s/obj_priv/obj/ with a net reduction of over a 100 lines and
many characters!

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
16 files changed:
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_debug.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_tiling.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_trace.h
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_fb.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h

index 4fe49e0228ef10a946497c1e1b346925e007f835..1e8cd74d18d59400beb178afdd4b8e31b5b6a98e 100644 (file)
@@ -87,19 +87,19 @@ static int i915_capabilities(struct seq_file *m, void *data)
        return 0;
 }
 
-static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
+static const char *get_pin_flag(struct drm_i915_gem_object *obj)
 {
-       if (obj_priv->user_pin_count > 0)
+       if (obj->user_pin_count > 0)
                return "P";
-       else if (obj_priv->pin_count > 0)
+       else if (obj->pin_count > 0)
                return "p";
        else
                return " ";
 }
 
-static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
+static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
 {
-    switch (obj_priv->tiling_mode) {
+    switch (obj->tiling_mode) {
     default:
     case I915_TILING_NONE: return " ";
     case I915_TILING_X: return "X";
@@ -140,7 +140,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
        struct list_head *head;
        struct drm_device *dev = node->minor->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv;
+       struct drm_i915_gem_object *obj;
        size_t total_obj_size, total_gtt_size;
        int count, ret;
 
@@ -175,12 +175,12 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
        }
 
        total_obj_size = total_gtt_size = count = 0;
-       list_for_each_entry(obj_priv, head, mm_list) {
+       list_for_each_entry(obj, head, mm_list) {
                seq_printf(m, "   ");
-               describe_obj(m, obj_priv);
+               describe_obj(m, obj);
                seq_printf(m, "\n");
-               total_obj_size += obj_priv->base.size;
-               total_gtt_size += obj_priv->gtt_space->size;
+               total_obj_size += obj->base.size;
+               total_gtt_size += obj->gtt_space->size;
                count++;
        }
        mutex_unlock(&dev->struct_mutex);
@@ -251,14 +251,14 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
                        seq_printf(m, "%d prepares\n", work->pending);
 
                        if (work->old_fb_obj) {
-                               struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj);
-                               if(obj_priv)
-                                       seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
+                               struct drm_i915_gem_object *obj = work->old_fb_obj;
+                               if (obj)
+                                       seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
                        }
                        if (work->pending_flip_obj) {
-                               struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj);
-                               if(obj_priv)
-                                       seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
+                               struct drm_i915_gem_object *obj = work->pending_flip_obj;
+                               if (obj)
+                                       seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
                        }
                }
                spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -421,17 +421,17 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
        seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
        seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
        for (i = 0; i < dev_priv->num_fence_regs; i++) {
-               struct drm_gem_object *obj = dev_priv->fence_regs[i].obj;
+               struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
 
                seq_printf(m, "Fenced object[%2d] = ", i);
                if (obj == NULL)
                        seq_printf(m, "unused");
                else
-                       describe_obj(m, to_intel_bo(obj));
+                       describe_obj(m, obj);
                seq_printf(m, "\n");
        }
-       mutex_unlock(&dev->struct_mutex);
 
+       mutex_unlock(&dev->struct_mutex);
        return 0;
 }
 
@@ -465,14 +465,14 @@ static int i915_hws_info(struct seq_file *m, void *data)
 
 static void i915_dump_object(struct seq_file *m,
                             struct io_mapping *mapping,
-                            struct drm_i915_gem_object *obj_priv)
+                            struct drm_i915_gem_object *obj)
 {
        int page, page_count, i;
 
-       page_count = obj_priv->base.size / PAGE_SIZE;
+       page_count = obj->base.size / PAGE_SIZE;
        for (page = 0; page < page_count; page++) {
                u32 *mem = io_mapping_map_wc(mapping,
-                                            obj_priv->gtt_offset + page * PAGE_SIZE);
+                                            obj->gtt_offset + page * PAGE_SIZE);
                for (i = 0; i < PAGE_SIZE; i += 4)
                        seq_printf(m, "%08x :  %08x\n", i, mem[i / 4]);
                io_mapping_unmap(mem);
@@ -484,25 +484,21 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_gem_object *obj;
-       struct drm_i915_gem_object *obj_priv;
+       struct drm_i915_gem_object *obj;
        int ret;
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
        if (ret)
                return ret;
 
-       list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
-               obj = &obj_priv->base;
-               if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
-                   seq_printf(m, "--- gtt_offset = 0x%08x\n",
-                              obj_priv->gtt_offset);
-                   i915_dump_object(m, dev_priv->mm.gtt_mapping, obj_priv);
+       list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
+               if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) {
+                   seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
+                   i915_dump_object(m, dev_priv->mm.gtt_mapping, obj);
                }
        }
 
        mutex_unlock(&dev->struct_mutex);
-
        return 0;
 }
 
@@ -525,7 +521,7 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
        if (ret)
                return ret;
 
-       if (!ring->gem_object) {
+       if (!ring->obj) {
                seq_printf(m, "No ringbuffer setup\n");
        } else {
                u8 *virt = ring->virtual_start;
@@ -983,7 +979,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
                   fb->base.height,
                   fb->base.depth,
                   fb->base.bits_per_pixel);
-       describe_obj(m, to_intel_bo(fb->obj));
+       describe_obj(m, fb->obj);
        seq_printf(m, "\n");
 
        list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
@@ -995,7 +991,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
                           fb->base.height,
                           fb->base.depth,
                           fb->base.bits_per_pixel);
-               describe_obj(m, to_intel_bo(fb->obj));
+               describe_obj(m, fb->obj);
                seq_printf(m, "\n");
        }
 
index 7084de7c4c550ec683d3075ae8b5cdb4da5ed0de..7960fd63ecb1ebc6400dff4c2b7765e12ccba143 100644 (file)
@@ -157,7 +157,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
        }
 
        if (init->ring_size != 0) {
-               if (dev_priv->render_ring.gem_object != NULL) {
+               if (dev_priv->render_ring.obj != NULL) {
                        i915_dma_cleanup(dev);
                        DRM_ERROR("Client tried to initialize ringbuffer in "
                                  "GEM mode\n");
index dc371d987aa7a1aecfdb43713959555b6ff0a484..22d6388b331f04fc8bc17654fea709bbd46aa616 100644 (file)
@@ -32,7 +32,6 @@
 
 #include "i915_reg.h"
 #include "intel_bios.h"
-#include "i915_trace.h"
 #include "intel_ringbuffer.h"
 #include <linux/io-mapping.h>
 #include <linux/i2c.h>
@@ -90,7 +89,7 @@ struct drm_i915_gem_phys_object {
        int id;
        struct page **page_list;
        drm_dma_handle_t *handle;
-       struct drm_gem_object *cur_obj;
+       struct drm_i915_gem_object *cur_obj;
 };
 
 struct mem_block {
@@ -125,7 +124,7 @@ struct drm_i915_master_private {
 #define I915_FENCE_REG_NONE -1
 
 struct drm_i915_fence_reg {
-       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj;
        struct list_head lru_list;
        bool gpu;
 };
@@ -280,9 +279,9 @@ typedef struct drm_i915_private {
        uint32_t counter;
        unsigned int seqno_gfx_addr;
        drm_local_map_t hws_map;
-       struct drm_gem_object *seqno_obj;
-       struct drm_gem_object *pwrctx;
-       struct drm_gem_object *renderctx;
+       struct drm_i915_gem_object *seqno_obj;
+       struct drm_i915_gem_object *pwrctx;
+       struct drm_i915_gem_object *renderctx;
 
        struct resource mch_res;
 
@@ -690,14 +689,14 @@ typedef struct drm_i915_private {
        u8 fmax;
        u8 fstart;
 
-       u64 last_count1;
-       unsigned long last_time1;
-       u64 last_count2;
-       struct timespec last_time2;
-       unsigned long gfx_power;
-       int c_m;
-       int r_t;
-       u8 corr;
+       u64 last_count1;
+       unsigned long last_time1;
+       u64 last_count2;
+       struct timespec last_time2;
+       unsigned long gfx_power;
+       int c_m;
+       int r_t;
+       u8 corr;
        spinlock_t *mchdev_lock;
 
        enum no_fbc_reason no_fbc_reason;
@@ -711,7 +710,6 @@ typedef struct drm_i915_private {
        struct intel_fbdev *fbdev;
 } drm_i915_private_t;
 
-/** driver private structure attached to each drm_gem_object */
 struct drm_i915_gem_object {
        struct drm_gem_object base;
 
@@ -918,7 +916,7 @@ enum intel_chip_family {
 #define HAS_BLT(dev)            (INTEL_INFO(dev)->has_blt_ring)
 #define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
 
-#define HAS_OVERLAY(dev)               (INTEL_INFO(dev)->has_overlay)
+#define HAS_OVERLAY(dev)               (INTEL_INFO(dev)->has_overlay)
 #define OVERLAY_NEEDS_PHYSICAL(dev)    (INTEL_INFO(dev)->overlay_needs_physical)
 
 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
@@ -947,6 +945,8 @@ enum intel_chip_family {
 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
 #define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
 
+#include "i915_trace.h"
+
 extern struct drm_ioctl_desc i915_ioctls[];
 extern int i915_max_ioctl;
 extern unsigned int i915_fbpercrtc;
@@ -1085,14 +1085,15 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *file_priv);
 void i915_gem_load(struct drm_device *dev);
 int i915_gem_init_object(struct drm_gem_object *obj);
-struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
-                                             size_t size);
+struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+                                                 size_t size);
 void i915_gem_free_object(struct drm_gem_object *obj);
-int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment,
+int i915_gem_object_pin(struct drm_i915_gem_object *obj,
+                       uint32_t alignment,
                        bool map_and_fenceable);
-void i915_gem_object_unpin(struct drm_gem_object *obj);
-int i915_gem_object_unbind(struct drm_gem_object *obj);
-void i915_gem_release_mmap(struct drm_gem_object *obj);
+void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
+int i915_gem_object_unbind(struct drm_i915_gem_object *obj);
+void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
 void i915_gem_lastclose(struct drm_device *dev);
 
 /**
@@ -1104,14 +1105,14 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
        return (int32_t)(seq1 - seq2) >= 0;
 }
 
-int i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
+int i915_gem_object_get_fence_reg(struct drm_i915_gem_object *obj,
                                  bool interruptible);
-int i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
+int i915_gem_object_put_fence_reg(struct drm_i915_gem_object *obj,
                                  bool interruptible);
 void i915_gem_retire_requests(struct drm_device *dev);
 void i915_gem_reset(struct drm_device *dev);
-void i915_gem_clflush_object(struct drm_gem_object *obj);
-int i915_gem_object_set_domain(struct drm_gem_object *obj,
+void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
+int i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
                               uint32_t read_domains,
                               uint32_t write_domain);
 int i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
@@ -1131,23 +1132,23 @@ int i915_do_wait_request(struct drm_device *dev,
                         bool interruptible,
                         struct intel_ring_buffer *ring);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
-int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
+int i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
                                      int write);
-int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
+int i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
                                         bool pipelined);
 int i915_gem_attach_phys_object(struct drm_device *dev,
-                               struct drm_gem_object *obj,
+                               struct drm_i915_gem_object *obj,
                                int id,
                                int align);
 void i915_gem_detach_phys_object(struct drm_device *dev,
-                                struct drm_gem_object *obj);
+                                struct drm_i915_gem_object *obj);
 void i915_gem_free_all_phys_object(struct drm_device *dev);
-void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
+void i915_gem_release(struct drm_device *dev, struct drm_file *file);
 
 /* i915_gem_gtt.c */
 void i915_gem_restore_gtt_mappings(struct drm_device *dev);
-int i915_gem_gtt_bind_object(struct drm_gem_object *obj);
-void i915_gem_gtt_unbind_object(struct drm_gem_object *obj);
+int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
+void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
 
 /* i915_gem_evict.c */
 int i915_gem_evict_something(struct drm_device *dev, int min_size,
@@ -1157,19 +1158,20 @@ int i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only);
 
 /* i915_gem_tiling.c */
 void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
-void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
-void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
+void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
+void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
 
 /* i915_gem_debug.c */
-void i915_gem_dump_object(struct drm_gem_object *obj, int len,
+void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
                          const char *where, uint32_t mark);
 #if WATCH_LISTS
 int i915_verify_lists(struct drm_device *dev);
 #else
 #define i915_verify_lists(dev) 0
 #endif
-void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
-void i915_gem_dump_object(struct drm_gem_object *obj, int len,
+void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj,
+                                    int handle);
+void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
                          const char *where, uint32_t mark);
 
 /* i915_debugfs.c */
@@ -1251,10 +1253,10 @@ extern void intel_display_print_error_state(struct seq_file *m,
  * In that case, we don't need to do it when GEM is initialized as nobody else
  * has access to the ring.
  */
-#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do {                        \
-       if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \
+#define RING_LOCK_TEST_WITH_RETURN(dev, file) do {                     \
+       if (((drm_i915_private_t *)dev->dev_private)->render_ring.obj \
                        == NULL)                                        \
-               LOCK_TEST_WITH_RETURN(dev, file_priv);                  \
+               LOCK_TEST_WITH_RETURN(dev, file);                       \
 } while (0)
 
 
index 3cac366b3053ab794ff13b40360ebb31c24029a1..d196895527a65de566aeb1978b27ae98c54fdbad 100644 (file)
@@ -41,29 +41,30 @@ struct change_domains {
        uint32_t flush_rings;
 };
 
-static uint32_t i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj_priv);
-static uint32_t i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv);
+static uint32_t i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj);
+static uint32_t i915_gem_get_gtt_size(struct drm_i915_gem_object *obj);
 
-static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
+static int i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj,
                                                  bool pipelined);
-static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
-static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
-static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
+static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
+static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
+static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
                                             int write);
-static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
+static int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
                                                     uint64_t offset,
                                                     uint64_t size);
-static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
-static int i915_gem_object_wait_rendering(struct drm_gem_object *obj,
+static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
+static int i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
                                          bool interruptible);
-static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
+static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
                                       unsigned alignment,
                                       bool map_and_fenceable);
-static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
-static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+static void i915_gem_clear_fence_reg(struct drm_i915_gem_object *obj);
+static int i915_gem_phys_pwrite(struct drm_device *dev,
+                               struct drm_i915_gem_object *obj,
                                struct drm_i915_gem_pwrite *args,
-                               struct drm_file *file_priv);
-static void i915_gem_free_object_tail(struct drm_gem_object *obj);
+                               struct drm_file *file);
+static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
 
 static int i915_gem_inactive_shrink(struct shrinker *shrinker,
                                    int nr_to_scan,
@@ -212,11 +213,9 @@ static int i915_mutex_lock_interruptible(struct drm_device *dev)
 }
 
 static inline bool
-i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
+i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
 {
-       return obj_priv->gtt_space &&
-               !obj_priv->active &&
-               obj_priv->pin_count == 0;
+       return obj->gtt_space && !obj->active && obj->pin_count == 0;
 }
 
 int i915_gem_do_init(struct drm_device *dev,
@@ -244,7 +243,7 @@ int i915_gem_do_init(struct drm_device *dev,
 
 int
 i915_gem_init_ioctl(struct drm_device *dev, void *data,
-                   struct drm_file *file_priv)
+                   struct drm_file *file)
 {
        struct drm_i915_gem_init *args = data;
        int ret;
@@ -258,7 +257,7 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data,
 
 int
 i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
-                           struct drm_file *file_priv)
+                           struct drm_file *file)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_get_aperture *args = data;
@@ -280,10 +279,10 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
  */
 int
 i915_gem_create_ioctl(struct drm_device *dev, void *data,
-                     struct drm_file *file_priv)
+                     struct drm_file *file)
 {
        struct drm_i915_gem_create *args = data;
-       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj;
        int ret;
        u32 handle;
 
@@ -294,29 +293,28 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
        if (obj == NULL)
                return -ENOMEM;
 
-       ret = drm_gem_handle_create(file_priv, obj, &handle);
+       ret = drm_gem_handle_create(file, &obj->base, &handle);
        if (ret) {
-               drm_gem_object_release(obj);
-               i915_gem_info_remove_obj(dev->dev_private, obj->size);
+               drm_gem_object_release(&obj->base);
+               i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
                kfree(obj);
                return ret;
        }
 
        /* drop reference from allocate - handle holds it now */
-       drm_gem_object_unreference(obj);
+       drm_gem_object_unreference(&obj->base);
        trace_i915_gem_object_create(obj);
 
        args->handle = handle;
        return 0;
 }
 
-static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
+static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
 {
-       drm_i915_private_t *dev_priv = obj->dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+       drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
 
        return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
-               obj_priv->tiling_mode != I915_TILING_NONE;
+               obj->tiling_mode != I915_TILING_NONE;
 }
 
 static inline void
@@ -392,12 +390,12 @@ slow_shmem_bit17_copy(struct page *gpu_page,
  * fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
  */
 static int
-i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_shmem_pread_fast(struct drm_device *dev,
+                         struct drm_i915_gem_object *obj,
                          struct drm_i915_gem_pread *args,
-                         struct drm_file *file_priv)
+                         struct drm_file *file)
 {
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-       struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
+       struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
        ssize_t remain;
        loff_t offset;
        char __user *user_data;
@@ -406,7 +404,6 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
        user_data = (char __user *) (uintptr_t) args->data_ptr;
        remain = args->size;
 
-       obj_priv = to_intel_bo(obj);
        offset = args->offset;
 
        while (remain > 0) {
@@ -455,12 +452,12 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
  * and not take page faults.
  */
 static int
-i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_shmem_pread_slow(struct drm_device *dev,
+                         struct drm_i915_gem_object *obj,
                          struct drm_i915_gem_pread *args,
-                         struct drm_file *file_priv)
+                         struct drm_file *file)
 {
-       struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+       struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
        struct mm_struct *mm = current->mm;
        struct page **user_pages;
        ssize_t remain;
@@ -506,7 +503,6 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
 
        do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
 
-       obj_priv = to_intel_bo(obj);
        offset = args->offset;
 
        while (remain > 0) {
@@ -575,11 +571,10 @@ out:
  */
 int
 i915_gem_pread_ioctl(struct drm_device *dev, void *data,
-                    struct drm_file *file_priv)
+                    struct drm_file *file)
 {
        struct drm_i915_gem_pread *args = data;
-       struct drm_gem_object *obj;
-       struct drm_i915_gem_object *obj_priv;
+       struct drm_i915_gem_object *obj;
        int ret = 0;
 
        if (args->size == 0)
@@ -599,15 +594,15 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
        if (ret)
                return ret;
 
-       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
        if (obj == NULL) {
                ret = -ENOENT;
                goto unlock;
        }
-       obj_priv = to_intel_bo(obj);
 
        /* Bounds check source.  */
-       if (args->offset > obj->size || args->size > obj->size - args->offset) {
+       if (args->offset > obj->base.size ||
+           args->size > obj->base.size - args->offset) {
                ret = -EINVAL;
                goto out;
        }
@@ -620,12 +615,12 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
 
        ret = -EFAULT;
        if (!i915_gem_object_needs_bit17_swizzle(obj))
-               ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
+               ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
        if (ret == -EFAULT)
-               ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
+               ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
 
 out:
-       drm_gem_object_unreference(obj);
+       drm_gem_object_unreference(&obj->base);
 unlock:
        mutex_unlock(&dev->struct_mutex);
        return ret;
@@ -680,11 +675,11 @@ slow_kernel_write(struct io_mapping *mapping,
  * user into the GTT, uncached.
  */
 static int
-i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_gtt_pwrite_fast(struct drm_device *dev,
+                        struct drm_i915_gem_object *obj,
                         struct drm_i915_gem_pwrite *args,
-                        struct drm_file *file_priv)
+                        struct drm_file *file)
 {
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        drm_i915_private_t *dev_priv = dev->dev_private;
        ssize_t remain;
        loff_t offset, page_base;
@@ -694,8 +689,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
        user_data = (char __user *) (uintptr_t) args->data_ptr;
        remain = args->size;
 
-       obj_priv = to_intel_bo(obj);
-       offset = obj_priv->gtt_offset + args->offset;
+       offset = obj->gtt_offset + args->offset;
 
        while (remain > 0) {
                /* Operation in this page
@@ -735,11 +729,11 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
  * than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
  */
 static int
-i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_gtt_pwrite_slow(struct drm_device *dev,
+                        struct drm_i915_gem_object *obj,
                         struct drm_i915_gem_pwrite *args,
-                        struct drm_file *file_priv)
+                        struct drm_file *file)
 {
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        drm_i915_private_t *dev_priv = dev->dev_private;
        ssize_t remain;
        loff_t gtt_page_base, offset;
@@ -780,8 +774,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
        if (ret)
                goto out_unpin_pages;
 
-       obj_priv = to_intel_bo(obj);
-       offset = obj_priv->gtt_offset + args->offset;
+       offset = obj->gtt_offset + args->offset;
 
        while (remain > 0) {
                /* Operation in this page
@@ -827,12 +820,12 @@ out_unpin_pages:
  * copy_from_user into the kmapped pages backing the object.
  */
 static int
-i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_shmem_pwrite_fast(struct drm_device *dev,
+                          struct drm_i915_gem_object *obj,
                           struct drm_i915_gem_pwrite *args,
-                          struct drm_file *file_priv)
+                          struct drm_file *file)
 {
-       struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+       struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
        ssize_t remain;
        loff_t offset;
        char __user *user_data;
@@ -841,9 +834,8 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
        user_data = (char __user *) (uintptr_t) args->data_ptr;
        remain = args->size;
 
-       obj_priv = to_intel_bo(obj);
        offset = args->offset;
-       obj_priv->dirty = 1;
+       obj->dirty = 1;
 
        while (remain > 0) {
                struct page *page;
@@ -898,12 +890,12 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
  * struct_mutex is held.
  */
 static int
-i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_shmem_pwrite_slow(struct drm_device *dev,
+                          struct drm_i915_gem_object *obj,
                           struct drm_i915_gem_pwrite *args,
-                          struct drm_file *file_priv)
+                          struct drm_file *file)
 {
-       struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+       struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
        struct mm_struct *mm = current->mm;
        struct page **user_pages;
        ssize_t remain;
@@ -947,9 +939,8 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
 
        do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
 
-       obj_priv = to_intel_bo(obj);
        offset = args->offset;
-       obj_priv->dirty = 1;
+       obj->dirty = 1;
 
        while (remain > 0) {
                struct page *page;
@@ -1020,8 +1011,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                      struct drm_file *file)
 {
        struct drm_i915_gem_pwrite *args = data;
-       struct drm_gem_object *obj;
-       struct drm_i915_gem_object *obj_priv;
+       struct drm_i915_gem_object *obj;
        int ret;
 
        if (args->size == 0)
@@ -1041,15 +1031,15 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
        if (ret)
                return ret;
 
-       obj = drm_gem_object_lookup(dev, file, args->handle);
+       obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
        if (obj == NULL) {
                ret = -ENOENT;
                goto unlock;
        }
-       obj_priv = to_intel_bo(obj);
 
        /* Bounds check destination. */
-       if (args->offset > obj->size || args->size > obj->size - args->offset) {
+       if (args->offset > obj->base.size ||
+           args->size > obj->base.size - args->offset) {
                ret = -EINVAL;
                goto out;
        }
@@ -1060,11 +1050,11 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
         * pread/pwrite currently are reading and writing from the CPU
         * perspective, requiring manual detiling by the client.
         */
-       if (obj_priv->phys_obj)
+       if (obj->phys_obj)
                ret = i915_gem_phys_pwrite(dev, obj, args, file);
-       else if (obj_priv->tiling_mode == I915_TILING_NONE &&
-                obj_priv->gtt_space &&
-                obj->write_domain != I915_GEM_DOMAIN_CPU) {
+       else if (obj->tiling_mode == I915_TILING_NONE &&
+                obj->gtt_space &&
+                obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
                ret = i915_gem_object_pin(obj, 0, true);
                if (ret)
                        goto out;
@@ -1092,7 +1082,7 @@ out_unpin:
        }
 
 out:
-       drm_gem_object_unreference(obj);
+       drm_gem_object_unreference(&obj->base);
 unlock:
        mutex_unlock(&dev->struct_mutex);
        return ret;
@@ -1104,12 +1094,11 @@ unlock:
  */
 int
 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
-                         struct drm_file *file_priv)
+                         struct drm_file *file)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_set_domain *args = data;
-       struct drm_gem_object *obj;
-       struct drm_i915_gem_object *obj_priv;
+       struct drm_i915_gem_object *obj;
        uint32_t read_domains = args->read_domains;
        uint32_t write_domain = args->write_domain;
        int ret;
@@ -1134,12 +1123,11 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
        if (ret)
                return ret;
 
-       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
        if (obj == NULL) {
                ret = -ENOENT;
                goto unlock;
        }
-       obj_priv = to_intel_bo(obj);
 
        intel_mark_busy(dev, obj);
 
@@ -1149,9 +1137,9 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
                /* Update the LRU on the fence for the CPU access that's
                 * about to occur.
                 */
-               if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
+               if (obj->fence_reg != I915_FENCE_REG_NONE) {
                        struct drm_i915_fence_reg *reg =
-                               &dev_priv->fence_regs[obj_priv->fence_reg];
+                               &dev_priv->fence_regs[obj->fence_reg];
                        list_move_tail(&reg->lru_list,
                                       &dev_priv->mm.fence_list);
                }
@@ -1167,10 +1155,10 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
        }
 
        /* Maintain LRU order of "inactive" objects */
-       if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
-               list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
+       if (ret == 0 && i915_gem_object_is_inactive(obj))
+               list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
 
-       drm_gem_object_unreference(obj);
+       drm_gem_object_unreference(&obj->base);
 unlock:
        mutex_unlock(&dev->struct_mutex);
        return ret;
@@ -1181,10 +1169,10 @@ unlock:
  */
 int
 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
-                     struct drm_file *file_priv)
+                        struct drm_file *file)
 {
        struct drm_i915_gem_sw_finish *args = data;
-       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj;
        int ret = 0;
 
        if (!(dev->driver->driver_features & DRIVER_GEM))
@@ -1194,17 +1182,17 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
        if (ret)
                return ret;
 
-       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
        if (obj == NULL) {
                ret = -ENOENT;
                goto unlock;
        }
 
        /* Pinned buffers may be scanout, so flush the cache */
-       if (to_intel_bo(obj)->pin_count)
+       if (obj->pin_count)
                i915_gem_object_flush_cpu_write_domain(obj);
 
-       drm_gem_object_unreference(obj);
+       drm_gem_object_unreference(&obj->base);
 unlock:
        mutex_unlock(&dev->struct_mutex);
        return ret;
@@ -1219,7 +1207,7 @@ unlock:
  */
 int
 i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
-                  struct drm_file *file_priv)
+                   struct drm_file *file)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_mmap *args = data;
@@ -1230,7 +1218,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
        if (!(dev->driver->driver_features & DRIVER_GEM))
                return -ENODEV;
 
-       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       obj = drm_gem_object_lookup(dev, file, args->handle);
        if (obj == NULL)
                return -ENOENT;
 
@@ -1273,10 +1261,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
  */
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 {
-       struct drm_gem_object *obj = vma->vm_private_data;
-       struct drm_device *dev = obj->dev;
+       struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
+       struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        pgoff_t page_offset;
        unsigned long pfn;
        int ret = 0;
@@ -1288,17 +1275,17 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 
        /* Now bind it into the GTT if needed */
        mutex_lock(&dev->struct_mutex);
-       BUG_ON(obj_priv->pin_count && !obj_priv->pin_mappable);
+       BUG_ON(obj->pin_count && !obj->pin_mappable);
 
-       if (obj_priv->gtt_space) {
-               if (!obj_priv->map_and_fenceable) {
+       if (obj->gtt_space) {
+               if (!obj->map_and_fenceable) {
                        ret = i915_gem_object_unbind(obj);
                        if (ret)
                                goto unlock;
                }
        }
 
-       if (!obj_priv->gtt_space) {
+       if (!obj->gtt_space) {
                ret = i915_gem_object_bind_to_gtt(obj, 0, true);
                if (ret)
                        goto unlock;
@@ -1308,22 +1295,22 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        if (ret)
                goto unlock;
 
-       if (!obj_priv->fault_mappable) {
-               obj_priv->fault_mappable = true;
-               i915_gem_info_update_mappable(dev_priv, obj_priv, true);
+       if (!obj->fault_mappable) {
+               obj->fault_mappable = true;
+               i915_gem_info_update_mappable(dev_priv, obj, true);
        }
 
        /* Need a new fence register? */
-       if (obj_priv->tiling_mode != I915_TILING_NONE) {
+       if (obj->tiling_mode != I915_TILING_NONE) {
                ret = i915_gem_object_get_fence_reg(obj, true);
                if (ret)
                        goto unlock;
        }
 
-       if (i915_gem_object_is_inactive(obj_priv))
-               list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
+       if (i915_gem_object_is_inactive(obj))
+               list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
 
-       pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
+       pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
                page_offset;
 
        /* Finally, remap it using the new GTT offset */
@@ -1356,36 +1343,39 @@ unlock:
  * This routine allocates and attaches a fake offset for @obj.
  */
 static int
-i915_gem_create_mmap_offset(struct drm_gem_object *obj)
+i915_gem_create_mmap_offset(struct drm_i915_gem_object *obj)
 {
-       struct drm_device *dev = obj->dev;
+       struct drm_device *dev = obj->base.dev;
        struct drm_gem_mm *mm = dev->mm_private;
        struct drm_map_list *list;
        struct drm_local_map *map;
        int ret = 0;
 
        /* Set the object up for mmap'ing */
-       list = &obj->map_list;
+       list = &obj->base.map_list;
        list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
        if (!list->map)
                return -ENOMEM;
 
        map = list->map;
        map->type = _DRM_GEM;
-       map->size = obj->size;
+       map->size = obj->base.size;
        map->handle = obj;
 
        /* Get a DRM GEM mmap offset allocated... */
        list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
-                                                   obj->size / PAGE_SIZE, 0, 0);
+                                                   obj->base.size / PAGE_SIZE,
+                                                   0, 0);
        if (!list->file_offset_node) {
-               DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
+               DRM_ERROR("failed to allocate offset for bo %d\n",
+                         obj->base.name);
                ret = -ENOSPC;
                goto out_free_list;
        }
 
        list->file_offset_node = drm_mm_get_block(list->file_offset_node,
-                                                 obj->size / PAGE_SIZE, 0);
+                                                 obj->base.size / PAGE_SIZE,
+                                                 0);
        if (!list->file_offset_node) {
                ret = -ENOMEM;
                goto out_free_list;
@@ -1424,29 +1414,28 @@ out_free_list:
  * fixup by i915_gem_fault().
  */
 void
-i915_gem_release_mmap(struct drm_gem_object *obj)
+i915_gem_release_mmap(struct drm_i915_gem_object *obj)
 {
-       struct drm_device *dev = obj->dev;
+       struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
-       if (unlikely(obj->map_list.map && dev->dev_mapping))
+       if (unlikely(obj->base.map_list.map && dev->dev_mapping))
                unmap_mapping_range(dev->dev_mapping,
-                                   (loff_t)obj->map_list.hash.key<<PAGE_SHIFT,
-                                   obj->size, 1);
+                                   (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
+                                   obj->base.size, 1);
 
-       if (obj_priv->fault_mappable) {
-               obj_priv->fault_mappable = false;
-               i915_gem_info_update_mappable(dev_priv, obj_priv, false);
+       if (obj->fault_mappable) {
+               obj->fault_mappable = false;
+               i915_gem_info_update_mappable(dev_priv, obj, false);
        }
 }
 
 static void
-i915_gem_free_mmap_offset(struct drm_gem_object *obj)
+i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj)
 {
-       struct drm_device *dev = obj->dev;
+       struct drm_device *dev = obj->base.dev;
        struct drm_gem_mm *mm = dev->mm_private;
-       struct drm_map_list *list = &obj->map_list;
+       struct drm_map_list *list = &obj->base.map_list;
 
        drm_ht_remove_item(&mm->offset_hash, &list->hash);
        drm_mm_put_block(list->file_offset_node);
@@ -1462,23 +1451,23 @@ i915_gem_free_mmap_offset(struct drm_gem_object *obj)
  * potential fence register mapping.
  */
 static uint32_t
-i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj_priv)
+i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
 {
-       struct drm_device *dev = obj_priv->base.dev;
+       struct drm_device *dev = obj->base.dev;
 
        /*
         * Minimum alignment is 4k (GTT page size), but might be greater
         * if a fence register is needed for the object.
         */
        if (INTEL_INFO(dev)->gen >= 4 ||
-           obj_priv->tiling_mode == I915_TILING_NONE)
+           obj->tiling_mode == I915_TILING_NONE)
                return 4096;
 
        /*
         * Previous chips need to be aligned to the size of the smallest
         * fence register that can contain the object.
         */
-       return i915_gem_get_gtt_size(obj_priv);
+       return i915_gem_get_gtt_size(obj);
 }
 
 /**
@@ -1490,16 +1479,16 @@ i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj_priv)
  * unfenced tiled surface requirements.
  */
 static uint32_t
-i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj_priv)
+i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
 {
-       struct drm_device *dev = obj_priv->base.dev;
+       struct drm_device *dev = obj->base.dev;
        int tile_height;
 
        /*
         * Minimum alignment is 4k (GTT page size) for sane hw.
         */
        if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
-           obj_priv->tiling_mode == I915_TILING_NONE)
+           obj->tiling_mode == I915_TILING_NONE)
                return 4096;
 
        /*
@@ -1508,18 +1497,18 @@ i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj_priv)
         * placed in a fenced gtt region).
         */
        if (IS_GEN2(dev) ||
-           (obj_priv->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
+           (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
                tile_height = 32;
        else
                tile_height = 8;
 
-       return tile_height * obj_priv->stride * 2;
+       return tile_height * obj->stride * 2;
 }
 
 static uint32_t
-i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv)
+i915_gem_get_gtt_size(struct drm_i915_gem_object *obj)
 {
-       struct drm_device *dev = obj_priv->base.dev;
+       struct drm_device *dev = obj->base.dev;
        uint32_t size;
 
        /*
@@ -1527,7 +1516,7 @@ i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv)
         * if a fence register is needed for the object.
         */
        if (INTEL_INFO(dev)->gen >= 4)
-               return obj_priv->base.size;
+               return obj->base.size;
 
        /*
         * Previous chips need to be aligned to the size of the smallest
@@ -1538,7 +1527,7 @@ i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv)
        else
                size = 512*1024;
 
-       while (size < obj_priv->base.size)
+       while (size < obj->base.size)
                size <<= 1;
 
        return size;
@@ -1548,7 +1537,7 @@ i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv)
  * i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
  * @dev: DRM device
  * @data: GTT mapping ioctl data
- * @file_priv: GEM object info
+ * @file: GEM object info
  *
  * Simply returns the fake offset to userspace so it can mmap it.
  * The mmap call will end up in drm_gem_mmap(), which will set things
@@ -1561,12 +1550,11 @@ i915_gem_get_gtt_size(struct drm_i915_gem_object *obj_priv)
  */
 int
 i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
-                       struct drm_file *file_priv)
+                       struct drm_file *file)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_mmap_gtt *args = data;
-       struct drm_gem_object *obj;
-       struct drm_i915_gem_object *obj_priv;
+       struct drm_i915_gem_object *obj;
        int ret;
 
        if (!(dev->driver->driver_features & DRIVER_GEM))
@@ -1576,44 +1564,42 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
        if (ret)
                return ret;
 
-       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
        if (obj == NULL) {
                ret = -ENOENT;
                goto unlock;
        }
-       obj_priv = to_intel_bo(obj);
 
-       if (obj->size > dev_priv->mm.gtt_mappable_end) {
+       if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
                ret = -E2BIG;
                goto unlock;
        }
 
-       if (obj_priv->madv != I915_MADV_WILLNEED) {
+       if (obj->madv != I915_MADV_WILLNEED) {
                DRM_ERROR("Attempting to mmap a purgeable buffer\n");
                ret = -EINVAL;
                goto out;
        }
 
-       if (!obj->map_list.map) {
+       if (!obj->base.map_list.map) {
                ret = i915_gem_create_mmap_offset(obj);
                if (ret)
                        goto out;
        }
 
-       args->offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
+       args->offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
 
 out:
-       drm_gem_object_unreference(obj);
+       drm_gem_object_unreference(&obj->base);
 unlock:
        mutex_unlock(&dev->struct_mutex);
        return ret;
 }
 
 static int
-i915_gem_object_get_pages_gtt(struct drm_gem_object *obj,
+i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
                              gfp_t gfpmask)
 {
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int page_count, i;
        struct address_space *mapping;
        struct inode *inode;
@@ -1622,13 +1608,13 @@ i915_gem_object_get_pages_gtt(struct drm_gem_object *obj,
        /* Get the list of pages out of our struct file.  They'll be pinned
         * at this point until we release them.
         */
-       page_count = obj->size / PAGE_SIZE;
-       BUG_ON(obj_priv->pages != NULL);
-       obj_priv->pages = drm_malloc_ab(page_count, sizeof(struct page *));
-       if (obj_priv->pages == NULL)
+       page_count = obj->base.size / PAGE_SIZE;
+       BUG_ON(obj->pages != NULL);
+       obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
+       if (obj->pages == NULL)
                return -ENOMEM;
 
-       inode = obj->filp->f_path.dentry->d_inode;
+       inode = obj->base.filp->f_path.dentry->d_inode;
        mapping = inode->i_mapping;
        for (i = 0; i < page_count; i++) {
                page = read_cache_page_gfp(mapping, i,
@@ -1639,51 +1625,50 @@ i915_gem_object_get_pages_gtt(struct drm_gem_object *obj,
                if (IS_ERR(page))
                        goto err_pages;
 
-               obj_priv->pages[i] = page;
+               obj->pages[i] = page;
        }
 
-       if (obj_priv->tiling_mode != I915_TILING_NONE)
+       if (obj->tiling_mode != I915_TILING_NONE)
                i915_gem_object_do_bit_17_swizzle(obj);
 
        return 0;
 
 err_pages:
        while (i--)
-               page_cache_release(obj_priv->pages[i]);
+               page_cache_release(obj->pages[i]);
 
-       drm_free_large(obj_priv->pages);
-       obj_priv->pages = NULL;
+       drm_free_large(obj->pages);
+       obj->pages = NULL;
        return PTR_ERR(page);
 }
 
 static void
-i915_gem_object_put_pages_gtt(struct drm_gem_object *obj)
+i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-       int page_count = obj->size / PAGE_SIZE;
+       int page_count = obj->base.size / PAGE_SIZE;
        int i;
 
-       BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
+       BUG_ON(obj->madv == __I915_MADV_PURGED);
 
-       if (obj_priv->tiling_mode != I915_TILING_NONE)
+       if (obj->tiling_mode != I915_TILING_NONE)
                i915_gem_object_save_bit_17_swizzle(obj);
 
-       if (obj_priv->madv == I915_MADV_DONTNEED)
-               obj_priv->dirty = 0;
+       if (obj->madv == I915_MADV_DONTNEED)
+               obj->dirty = 0;
 
        for (i = 0; i < page_count; i++) {
-               if (obj_priv->dirty)
-                       set_page_dirty(obj_priv->pages[i]);
+               if (obj->dirty)
+                       set_page_dirty(obj->pages[i]);
 
-               if (obj_priv->madv == I915_MADV_WILLNEED)
-                       mark_page_accessed(obj_priv->pages[i]);
+               if (obj->madv == I915_MADV_WILLNEED)
+                       mark_page_accessed(obj->pages[i]);
 
-               page_cache_release(obj_priv->pages[i]);
+               page_cache_release(obj->pages[i]);
        }
-       obj_priv->dirty = 0;
+       obj->dirty = 0;
 
-       drm_free_large(obj_priv->pages);
-       obj_priv->pages = NULL;
+       drm_free_large(obj->pages);
+       obj->pages = NULL;
 }
 
 static uint32_t
@@ -1695,47 +1680,44 @@ i915_gem_next_request_seqno(struct drm_device *dev,
 }
 
 static void
-i915_gem_object_move_to_active(struct drm_gem_object *obj,
+i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
                               struct intel_ring_buffer *ring)
 {
-       struct drm_device *dev = obj->dev;
+       struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
 
        BUG_ON(ring == NULL);
-       obj_priv->ring = ring;
+       obj->ring = ring;
 
        /* Add a reference if we're newly entering the active list. */
-       if (!obj_priv->active) {
-               drm_gem_object_reference(obj);
-               obj_priv->active = 1;
+       if (!obj->active) {
+               drm_gem_object_reference(&obj->base);
+               obj->active = 1;
        }
 
        /* Move from whatever list we were on to the tail of execution. */
-       list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list);
-       list_move_tail(&obj_priv->ring_list, &ring->active_list);
-       obj_priv->last_rendering_seqno = seqno;
+       list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
+       list_move_tail(&obj->ring_list, &ring->active_list);
+       obj->last_rendering_seqno = seqno;
 }
 
 static void
-i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
+i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
 {
-       struct drm_device *dev = obj->dev;
+       struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
-       BUG_ON(!obj_priv->active);
-       list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list);
-       list_del_init(&obj_priv->ring_list);
-       obj_priv->last_rendering_seqno = 0;
+       BUG_ON(!obj->active);
+       list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
+       list_del_init(&obj->ring_list);
+       obj->last_rendering_seqno = 0;
 }
 
 /* Immediately discard the backing storage */
 static void
-i915_gem_object_truncate(struct drm_gem_object *obj)
+i915_gem_object_truncate(struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        struct inode *inode;
 
        /* Our goal here is to return as much of the memory as
@@ -1744,40 +1726,39 @@ i915_gem_object_truncate(struct drm_gem_object *obj)
         * backing pages, *now*. Here we mirror the actions taken
         * when by shmem_delete_inode() to release the backing store.
         */
-       inode = obj->filp->f_path.dentry->d_inode;
+       inode = obj->base.filp->f_path.dentry->d_inode;
        truncate_inode_pages(inode->i_mapping, 0);
        if (inode->i_op->truncate_range)
                inode->i_op->truncate_range(inode, 0, (loff_t)-1);
 
-       obj_priv->madv = __I915_MADV_PURGED;
+       obj->madv = __I915_MADV_PURGED;
 }
 
 static inline int
-i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
+i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
 {
-       return obj_priv->madv == I915_MADV_DONTNEED;
+       return obj->madv == I915_MADV_DONTNEED;
 }
 
 static void
-i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
+i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
 {
-       struct drm_device *dev = obj->dev;
+       struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
-       if (obj_priv->pin_count != 0)
-               list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list);
+       if (obj->pin_count != 0)
+               list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
        else
-               list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
-       list_del_init(&obj_priv->ring_list);
+               list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+       list_del_init(&obj->ring_list);
 
-       BUG_ON(!list_empty(&obj_priv->gpu_write_list));
+       BUG_ON(!list_empty(&obj->gpu_write_list));
 
-       obj_priv->last_rendering_seqno = 0;
-       obj_priv->ring = NULL;
-       if (obj_priv->active) {
-               obj_priv->active = 0;
-               drm_gem_object_unreference(obj);
+       obj->last_rendering_seqno = 0;
+       obj->ring = NULL;
+       if (obj->active) {
+               obj->active = 0;
+               drm_gem_object_unreference(&obj->base);
        }
        WARN_ON(i915_verify_lists(dev));
 }
@@ -1788,30 +1769,28 @@ i915_gem_process_flushing_list(struct drm_device *dev,
                               struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv, *next;
+       struct drm_i915_gem_object *obj, *next;
 
-       list_for_each_entry_safe(obj_priv, next,
+       list_for_each_entry_safe(obj, next,
                                 &ring->gpu_write_list,
                                 gpu_write_list) {
-               struct drm_gem_object *obj = &obj_priv->base;
+               if (obj->base.write_domain & flush_domains) {
+                       uint32_t old_write_domain = obj->base.write_domain;
 
-               if (obj->write_domain & flush_domains) {
-                       uint32_t old_write_domain = obj->write_domain;
-
-                       obj->write_domain = 0;
-                       list_del_init(&obj_priv->gpu_write_list);
+                       obj->base.write_domain = 0;
+                       list_del_init(&obj->gpu_write_list);
                        i915_gem_object_move_to_active(obj, ring);
 
                        /* update the fence lru list */
-                       if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
+                       if (obj->fence_reg != I915_FENCE_REG_NONE) {
                                struct drm_i915_fence_reg *reg =
-                                       &dev_priv->fence_regs[obj_priv->fence_reg];
+                                       &dev_priv->fence_regs[obj->fence_reg];
                                list_move_tail(&reg->lru_list,
                                                &dev_priv->mm.fence_list);
                        }
 
                        trace_i915_gem_object_change_domain(obj,
-                                                           obj->read_domains,
+                                                           obj->base.read_domains,
                                                            old_write_domain);
                }
        }
@@ -1912,22 +1891,22 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
        }
 
        while (!list_empty(&ring->active_list)) {
-               struct drm_i915_gem_object *obj_priv;
+               struct drm_i915_gem_object *obj;
 
-               obj_priv = list_first_entry(&ring->active_list,
-                                           struct drm_i915_gem_object,
-                                           ring_list);
+               obj = list_first_entry(&ring->active_list,
+                                      struct drm_i915_gem_object,
+                                      ring_list);
 
-               obj_priv->base.write_domain = 0;
-               list_del_init(&obj_priv->gpu_write_list);
-               i915_gem_object_move_to_inactive(&obj_priv->base);
+               obj->base.write_domain = 0;
+               list_del_init(&obj->gpu_write_list);
+               i915_gem_object_move_to_inactive(obj);
        }
 }
 
 void i915_gem_reset(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv;
+       struct drm_i915_gem_object *obj;
        int i;
 
        i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
@@ -1939,23 +1918,23 @@ void i915_gem_reset(struct drm_device *dev)
         * lost bo to the inactive list.
         */
        while (!list_empty(&dev_priv->mm.flushing_list)) {
-               obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
-                                           struct drm_i915_gem_object,
-                                           mm_list);
+               obj= list_first_entry(&dev_priv->mm.flushing_list,
+                                     struct drm_i915_gem_object,
+                                     mm_list);
 
-               obj_priv->base.write_domain = 0;
-               list_del_init(&obj_priv->gpu_write_list);
-               i915_gem_object_move_to_inactive(&obj_priv->base);
+               obj->base.write_domain = 0;
+               list_del_init(&obj->gpu_write_list);
+               i915_gem_object_move_to_inactive(obj);
        }
 
        /* Move everything out of the GPU domains to ensure we do any
         * necessary invalidation upon reuse.
         */
-       list_for_each_entry(obj_priv,
+       list_for_each_entry(obj,
                            &dev_priv->mm.inactive_list,
                            mm_list)
        {
-               obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
+               obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
        }
 
        /* The fence registers are invalidated so clear them out */
@@ -2008,18 +1987,16 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
         * by the ringbuffer to the flushing/inactive lists as appropriate.
         */
        while (!list_empty(&ring->active_list)) {
-               struct drm_gem_object *obj;
-               struct drm_i915_gem_object *obj_priv;
+               struct drm_i915_gem_object *obj;
 
-               obj_priv = list_first_entry(&ring->active_list,
-                                           struct drm_i915_gem_object,
-                                           ring_list);
+               obj= list_first_entry(&ring->active_list,
+                                     struct drm_i915_gem_object,
+                                     ring_list);
 
-               if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
+               if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
                        break;
 
-               obj = &obj_priv->base;
-               if (obj->write_domain != 0)
+               if (obj->base.write_domain != 0)
                        i915_gem_object_move_to_flushing(obj);
                else
                        i915_gem_object_move_to_inactive(obj);
@@ -2040,17 +2017,17 @@ i915_gem_retire_requests(struct drm_device *dev)
        drm_i915_private_t *dev_priv = dev->dev_private;
 
        if (!list_empty(&dev_priv->mm.deferred_free_list)) {
-           struct drm_i915_gem_object *obj_priv, *tmp;
+           struct drm_i915_gem_object *obj, *next;
 
            /* We must be careful that during unbind() we do not
             * accidentally infinitely recurse into retire requests.
             * Currently:
             *   retire -> free -> unbind -> wait -> retire_ring
             */
-           list_for_each_entry_safe(obj_priv, tmp,
+           list_for_each_entry_safe(obj, next,
                                     &dev_priv->mm.deferred_free_list,
                                     mm_list)
-                   i915_gem_free_object_tail(&obj_priv->base);
+                   i915_gem_free_object_tail(obj);
        }
 
        i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
@@ -2175,7 +2152,6 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno,
 
 static void
 i915_gem_flush_ring(struct drm_device *dev,
-                   struct drm_file *file_priv,
                    struct intel_ring_buffer *ring,
                    uint32_t invalidate_domains,
                    uint32_t flush_domains)
@@ -2186,7 +2162,6 @@ i915_gem_flush_ring(struct drm_device *dev,
 
 static void
 i915_gem_flush(struct drm_device *dev,
-              struct drm_file *file_priv,
               uint32_t invalidate_domains,
               uint32_t flush_domains,
               uint32_t flush_rings)
@@ -2198,16 +2173,13 @@ i915_gem_flush(struct drm_device *dev,
 
        if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
                if (flush_rings & RING_RENDER)
-                       i915_gem_flush_ring(dev, file_priv,
-                                           &dev_priv->render_ring,
+                       i915_gem_flush_ring(dev, &dev_priv->render_ring,
                                            invalidate_domains, flush_domains);
                if (flush_rings & RING_BSD)
-                       i915_gem_flush_ring(dev, file_priv,
-                                           &dev_priv->bsd_ring,
+                       i915_gem_flush_ring(dev, &dev_priv->bsd_ring,
                                            invalidate_domains, flush_domains);
                if (flush_rings & RING_BLT)
-                       i915_gem_flush_ring(dev, file_priv,
-                                           &dev_priv->blt_ring,
+                       i915_gem_flush_ring(dev, &dev_priv->blt_ring,
                                            invalidate_domains, flush_domains);
        }
 }
@@ -2217,26 +2189,25 @@ i915_gem_flush(struct drm_device *dev,
  * safe to unbind from the GTT or access from the CPU.
  */
 static int
-i915_gem_object_wait_rendering(struct drm_gem_object *obj,
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
                               bool interruptible)
 {
-       struct drm_device *dev = obj->dev;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+       struct drm_device *dev = obj->base.dev;
        int ret;
 
        /* This function only exists to support waiting for existing rendering,
         * not for emitting required flushes.
         */
-       BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
+       BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
 
        /* If there is rendering queued on the buffer being evicted, wait for
         * it.
         */
-       if (obj_priv->active) {
+       if (obj->active) {
                ret = i915_do_wait_request(dev,
-                                          obj_priv->last_rendering_seqno,
+                                          obj->last_rendering_seqno,
                                           interruptible,
-                                          obj_priv->ring);
+                                          obj->ring);
                if (ret)
                        return ret;
        }
@@ -2248,17 +2219,16 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj,
  * Unbinds an object from the GTT aperture.
  */
 int
-i915_gem_object_unbind(struct drm_gem_object *obj)
+i915_gem_object_unbind(struct drm_i915_gem_object *obj)
 {
-       struct drm_device *dev = obj->dev;
+       struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int ret = 0;
 
-       if (obj_priv->gtt_space == NULL)
+       if (obj->gtt_space == NULL)
                return 0;
 
-       if (obj_priv->pin_count != 0) {
+       if (obj->pin_count != 0) {
                DRM_ERROR("Attempting to unbind pinned buffer\n");
                return -EINVAL;
        }
@@ -2281,27 +2251,27 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
         */
        if (ret) {
                i915_gem_clflush_object(obj);
-               obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU;
+               obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
        }
 
        /* release the fence reg _after_ flushing */
-       if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
+       if (obj->fence_reg != I915_FENCE_REG_NONE)
                i915_gem_clear_fence_reg(obj);
 
        i915_gem_gtt_unbind_object(obj);
 
        i915_gem_object_put_pages_gtt(obj);
 
-       i915_gem_info_remove_gtt(dev_priv, obj_priv);
-       list_del_init(&obj_priv->mm_list);
+       i915_gem_info_remove_gtt(dev_priv, obj);
+       list_del_init(&obj->mm_list);
        /* Avoid an unnecessary call to unbind on rebind. */
-       obj_priv->map_and_fenceable = true;
+       obj->map_and_fenceable = true;
 
-       drm_mm_put_block(obj_priv->gtt_space);
-       obj_priv->gtt_space = NULL;
-       obj_priv->gtt_offset = 0;
+       drm_mm_put_block(obj->gtt_space);
+       obj->gtt_space = NULL;
+       obj->gtt_offset = 0;
 
-       if (i915_gem_object_is_purgeable(obj_priv))
+       if (i915_gem_object_is_purgeable(obj))
                i915_gem_object_truncate(obj);
 
        trace_i915_gem_object_unbind(obj);
@@ -2315,7 +2285,7 @@ static int i915_ring_idle(struct drm_device *dev,
        if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
                return 0;
 
-       i915_gem_flush_ring(dev, NULL, ring,
+       i915_gem_flush_ring(dev, ring,
                            I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
        return i915_wait_request(dev,
                                 i915_gem_next_request_seqno(dev, ring),
@@ -2350,89 +2320,86 @@ i915_gpu_idle(struct drm_device *dev)
        return 0;
 }
 
-static void sandybridge_write_fence_reg(struct drm_gem_object *obj)
+static void sandybridge_write_fence_reg(struct drm_i915_gem_object *obj)
 {
-       struct drm_device *dev = obj->dev;
+       struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-       u32 size = i915_gem_get_gtt_size(obj_priv);
-       int regnum = obj_priv->fence_reg;
+       u32 size = obj->gtt_space->size;
+       int regnum = obj->fence_reg;
        uint64_t val;
 
-       val = (uint64_t)((obj_priv->gtt_offset + size - 4096) &
+       val = (uint64_t)((obj->gtt_offset + size - 4096) &
                    0xfffff000) << 32;
-       val |= obj_priv->gtt_offset & 0xfffff000;
-       val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
+       val |= obj->gtt_offset & 0xfffff000;
+       val |= (uint64_t)((obj->stride / 128) - 1) <<
                SANDYBRIDGE_FENCE_PITCH_SHIFT;
 
-       if (obj_priv->tiling_mode == I915_TILING_Y)
+       if (obj->tiling_mode == I915_TILING_Y)
                val |= 1 << I965_FENCE_TILING_Y_SHIFT;
        val |= I965_FENCE_REG_VALID;
 
        I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
 }
 
-static void i965_write_fence_reg(struct drm_gem_object *obj)
+static void i965_write_fence_reg(struct drm_i915_gem_object *obj)
 {
-       struct drm_device *dev = obj->dev;
+       struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-       u32 size = i915_gem_get_gtt_size(obj_priv);
-       int regnum = obj_priv->fence_reg;
+       u32 size = obj->gtt_space->size;
+       int regnum = obj->fence_reg;
        uint64_t val;
 
-       val = (uint64_t)((obj_priv->gtt_offset + size - 4096) &
+       val = (uint64_t)((obj->gtt_offset + size - 4096) &
                    0xfffff000) << 32;
-       val |= obj_priv->gtt_offset & 0xfffff000;
-       val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
-       if (obj_priv->tiling_mode == I915_TILING_Y)
+       val |= obj->gtt_offset & 0xfffff000;
+       val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
+       if (obj->tiling_mode == I915_TILING_Y)
                val |= 1 << I965_FENCE_TILING_Y_SHIFT;
        val |= I965_FENCE_REG_VALID;
 
        I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
 }
 
-static void i915_write_fence_reg(struct drm_gem_object *obj)
+static void i915_write_fence_reg(struct drm_i915_gem_object *obj)
 {
-       struct drm_device *dev = obj->dev;
+       struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-       u32 size = i915_gem_get_gtt_size(obj_priv);
+       u32 size = obj->gtt_space->size;
        uint32_t fence_reg, val, pitch_val;
        int tile_width;
 
-       if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
-           (obj_priv->gtt_offset & (size - 1))) {
+       if ((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
+           (obj->gtt_offset & (size - 1))) {
                WARN(1, "%s: object 0x%08x [fenceable? %d] not 1M or size (0x%08x) aligned [gtt_space offset=%lx, size=%lx]\n",
-                    __func__, obj_priv->gtt_offset, obj_priv->map_and_fenceable, size,
-                    obj_priv->gtt_space->start, obj_priv->gtt_space->size);
+                    __func__, obj->gtt_offset, obj->map_and_fenceable, size,
+                    obj->gtt_space->start, obj->gtt_space->size);
                return;
        }
 
-       if (obj_priv->tiling_mode == I915_TILING_Y &&
+       if (obj->tiling_mode == I915_TILING_Y &&
            HAS_128_BYTE_Y_TILING(dev))
                tile_width = 128;
        else
                tile_width = 512;
 
        /* Note: pitch better be a power of two tile widths */
-       pitch_val = obj_priv->stride / tile_width;
+       pitch_val = obj->stride / tile_width;
        pitch_val = ffs(pitch_val) - 1;
 
-       if (obj_priv->tiling_mode == I915_TILING_Y &&
+       if (obj->tiling_mode == I915_TILING_Y &&
            HAS_128_BYTE_Y_TILING(dev))
                WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
        else
                WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
 
-       val = obj_priv->gtt_offset;
-       if (obj_priv->tiling_mode == I915_TILING_Y)
+       val = obj->gtt_offset;
+       if (obj->tiling_mode == I915_TILING_Y)
                val |= 1 << I830_FENCE_TILING_Y_SHIFT;
        val |= I915_FENCE_SIZE_BITS(size);
        val |= pitch_val << I830_FENCE_PITCH_SHIFT;
        val |= I830_FENCE_REG_VALID;
 
-       fence_reg = obj_priv->fence_reg;
+       fence_reg = obj->fence_reg;
        if (fence_reg < 8)
                fence_reg = FENCE_REG_830_0 + fence_reg * 4;
        else
@@ -2440,30 +2407,29 @@ static void i915_write_fence_reg(struct drm_gem_object *obj)
        I915_WRITE(fence_reg, val);
 }
 
-static void i830_write_fence_reg(struct drm_gem_object *obj)
+static void i830_write_fence_reg(struct drm_i915_gem_object *obj)
 {
-       struct drm_device *dev = obj->dev;
+       struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-       u32 size = i915_gem_get_gtt_size(obj_priv);
-       int regnum = obj_priv->fence_reg;
+       u32 size = obj->gtt_space->size;
+       int regnum = obj->fence_reg;
        uint32_t val;
        uint32_t pitch_val;
        uint32_t fence_size_bits;
 
-       if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
-           (obj_priv->gtt_offset & (obj->size - 1))) {
+       if ((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
+           (obj->gtt_offset & (obj->base.size - 1))) {
                WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
-                    __func__, obj_priv->gtt_offset);
+                    __func__, obj->gtt_offset);
                return;
        }
 
-       pitch_val = obj_priv->stride / 128;
+       pitch_val = obj->stride / 128;
        pitch_val = ffs(pitch_val) - 1;
        WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
 
-       val = obj_priv->gtt_offset;
-       if (obj_priv->tiling_mode == I915_TILING_Y)
+       val = obj->gtt_offset;
+       if (obj->tiling_mode == I915_TILING_Y)
                val |= 1 << I830_FENCE_TILING_Y_SHIFT;
        fence_size_bits = I830_FENCE_SIZE_BITS(size);
        WARN_ON(fence_size_bits & ~0x00000f00);
@@ -2479,7 +2445,7 @@ static int i915_find_fence_reg(struct drm_device *dev,
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_fence_reg *reg;
-       struct drm_i915_gem_object *obj_priv = NULL;
+       struct drm_i915_gem_object *obj = NULL;
        int i, avail, ret;
 
        /* First try to find a free reg */
@@ -2489,9 +2455,8 @@ static int i915_find_fence_reg(struct drm_device *dev,
                if (!reg->obj)
                        return i;
 
-               obj_priv = to_intel_bo(reg->obj);
-               if (!obj_priv->pin_count)
-                   avail++;
+               if (!reg->obj->pin_count)
+                       avail++;
        }
 
        if (avail == 0)
@@ -2501,12 +2466,12 @@ static int i915_find_fence_reg(struct drm_device *dev,
        avail = I915_FENCE_REG_NONE;
        list_for_each_entry(reg, &dev_priv->mm.fence_list,
                            lru_list) {
-               obj_priv = to_intel_bo(reg->obj);
-               if (obj_priv->pin_count)
+               obj = reg->obj;
+               if (obj->pin_count)
                        continue;
 
                /* found one! */
-               avail = obj_priv->fence_reg;
+               avail = obj->fence_reg;
                break;
        }
 
@@ -2516,9 +2481,9 @@ static int i915_find_fence_reg(struct drm_device *dev,
         * might drop that one, causing a use-after-free in it. So hold a
         * private reference to obj like the other callers of put_fence_reg
         * (set_tiling ioctl) do. */
-       drm_gem_object_reference(&obj_priv->base);
-       ret = i915_gem_object_put_fence_reg(&obj_priv->base, interruptible);
-       drm_gem_object_unreference(&obj_priv->base);
+       drm_gem_object_reference(&obj->base);
+       ret = i915_gem_object_put_fence_reg(obj, interruptible);
+       drm_gem_object_unreference(&obj->base);
        if (ret != 0)
                return ret;
 
@@ -2539,39 +2504,38 @@ static int i915_find_fence_reg(struct drm_device *dev,
  * and tiling format.
  */
 int
-i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
+i915_gem_object_get_fence_reg(struct drm_i915_gem_object *obj,
                              bool interruptible)
 {
-       struct drm_device *dev = obj->dev;
+       struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        struct drm_i915_fence_reg *reg = NULL;
        int ret;
 
        /* Just update our place in the LRU if our fence is getting used. */
-       if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
-               reg = &dev_priv->fence_regs[obj_priv->fence_reg];
+       if (obj->fence_reg != I915_FENCE_REG_NONE) {
+               reg = &dev_priv->fence_regs[obj->fence_reg];
                list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
                return 0;
        }
 
-       switch (obj_priv->tiling_mode) {
+       switch (obj->tiling_mode) {
        case I915_TILING_NONE:
                WARN(1, "allocating a fence for non-tiled object?\n");
                break;
        case I915_TILING_X:
-               if (!obj_priv->stride)
+               if (!obj->stride)
                        return -EINVAL;
-               WARN((obj_priv->stride & (512 - 1)),
+               WARN((obj->stride & (512 - 1)),
                     "object 0x%08x is X tiled but has non-512B pitch\n",
-                    obj_priv->gtt_offset);
+                    obj->gtt_offset);
                break;
        case I915_TILING_Y:
-               if (!obj_priv->stride)
+               if (!obj->stride)
                        return -EINVAL;
-               WARN((obj_priv->stride & (128 - 1)),
+               WARN((obj->stride & (128 - 1)),
                     "object 0x%08x is Y tiled but has non-128B pitch\n",
-                    obj_priv->gtt_offset);
+                    obj->gtt_offset);
                break;
        }
 
@@ -2579,8 +2543,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
        if (ret < 0)
                return ret;
 
-       obj_priv->fence_reg = ret;
-       reg = &dev_priv->fence_regs[obj_priv->fence_reg];
+       obj->fence_reg = ret;
+       reg = &dev_priv->fence_regs[obj->fence_reg];
        list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
 
        reg->obj = obj;
@@ -2602,8 +2566,8 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
        }
 
        trace_i915_gem_object_get_fence(obj,
-                                       obj_priv->fence_reg,
-                                       obj_priv->tiling_mode);
+                                       obj->fence_reg,
+                                       obj->tiling_mode);
 
        return 0;
 }
@@ -2613,40 +2577,38 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
  * @obj: object to clear
  *
  * Zeroes out the fence register itself and clears out the associated
- * data structures in dev_priv and obj_priv.
+ * data structures in dev_priv and obj.
  */
 static void
-i915_gem_clear_fence_reg(struct drm_gem_object *obj)
+i915_gem_clear_fence_reg(struct drm_i915_gem_object *obj)
 {
-       struct drm_device *dev = obj->dev;
+       struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-       struct drm_i915_fence_reg *reg =
-               &dev_priv->fence_regs[obj_priv->fence_reg];
+       struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[obj->fence_reg];
        uint32_t fence_reg;
 
        switch (INTEL_INFO(dev)->gen) {
        case 6:
                I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
-                            (obj_priv->fence_reg * 8), 0);
+                            (obj->fence_reg * 8), 0);
                break;
        case 5:
        case 4:
-               I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
+               I915_WRITE64(FENCE_REG_965_0 + (obj->fence_reg * 8), 0);
                break;
        case 3:
-               if (obj_priv->fence_reg >= 8)
-                       fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4;
+               if (obj->fence_reg >= 8)
+                       fence_reg = FENCE_REG_945_8 + (obj->fence_reg - 8) * 4;
                else
        case 2:
-                       fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
+                       fence_reg = FENCE_REG_830_0 + obj->fence_reg * 4;
 
                I915_WRITE(fence_reg, 0);
                break;
        }
 
        reg->obj = NULL;
-       obj_priv->fence_reg = I915_FENCE_REG_NONE;
+       obj->fence_reg = I915_FENCE_REG_NONE;
        list_del_init(&reg->lru_list);
 }
 
@@ -2657,18 +2619,17 @@ i915_gem_clear_fence_reg(struct drm_gem_object *obj)
  * @bool: whether the wait upon the fence is interruptible
  *
  * Zeroes out the fence register itself and clears out the associated
- * data structures in dev_priv and obj_priv.
+ * data structures in dev_priv and obj.
  */
 int
-i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
+i915_gem_object_put_fence_reg(struct drm_i915_gem_object *obj,
                              bool interruptible)
 {
-       struct drm_device *dev = obj->dev;
+       struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        struct drm_i915_fence_reg *reg;
 
-       if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
+       if (obj->fence_reg == I915_FENCE_REG_NONE)
                return 0;
 
        /* If we've changed tiling, GTT-mappings of the object
@@ -2681,7 +2642,7 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
         * therefore we must wait for any outstanding access to complete
         * before clearing the fence.
         */
-       reg = &dev_priv->fence_regs[obj_priv->fence_reg];
+       reg = &dev_priv->fence_regs[obj->fence_reg];
        if (reg->gpu) {
                int ret;
 
@@ -2706,27 +2667,26 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
  * Finds free space in the GTT aperture and binds the object there.
  */
 static int
-i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
+i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
                            unsigned alignment,
                            bool map_and_fenceable)
 {
-       struct drm_device *dev = obj->dev;
+       struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        struct drm_mm_node *free_space;
        gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
        u32 size, fence_size, fence_alignment, unfenced_alignment;
        bool mappable, fenceable;
        int ret;
 
-       if (obj_priv->madv != I915_MADV_WILLNEED) {
+       if (obj->madv != I915_MADV_WILLNEED) {
                DRM_ERROR("Attempting to bind a purgeable object\n");
                return -EINVAL;
        }
 
-       fence_size = i915_gem_get_gtt_size(obj_priv);
-       fence_alignment = i915_gem_get_gtt_alignment(obj_priv);
-       unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj_priv);
+       fence_size = i915_gem_get_gtt_size(obj);
+       fence_alignment = i915_gem_get_gtt_alignment(obj);
+       unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj);
 
        if (alignment == 0)
                alignment = map_and_fenceable ? fence_alignment :
@@ -2736,12 +2696,12 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
                return -EINVAL;
        }
 
-       size = map_and_fenceable ? fence_size : obj->size;
+       size = map_and_fenceable ? fence_size : obj->base.size;
 
        /* If the object is bigger than the entire aperture, reject it early
         * before evicting everything in a vain attempt to find space.
         */
-       if (obj->size >
+       if (obj->base.size >
            (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
                DRM_ERROR("Attempting to bind an object larger than the aperture\n");
                return -E2BIG;
@@ -2760,16 +2720,16 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
 
        if (free_space != NULL) {
                if (map_and_fenceable)
-                       obj_priv->gtt_space =
+                       obj->gtt_space =
                                drm_mm_get_block_range_generic(free_space,
                                                               size, alignment, 0,
                                                               dev_priv->mm.gtt_mappable_end,
                                                               0);
                else
-                       obj_priv->gtt_space =
+                       obj->gtt_space =
                                drm_mm_get_block(free_space, size, alignment);
        }
-       if (obj_priv->gtt_space == NULL) {
+       if (obj->gtt_space == NULL) {
                /* If the gtt is empty and we're still having trouble
                 * fitting our object in, we're out of memory.
                 */
@@ -2783,8 +2743,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
 
        ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
        if (ret) {
-               drm_mm_put_block(obj_priv->gtt_space);
-               obj_priv->gtt_space = NULL;
+               drm_mm_put_block(obj->gtt_space);
+               obj->gtt_space = NULL;
 
                if (ret == -ENOMEM) {
                        /* first try to clear up some space from the GTT */
@@ -2810,8 +2770,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
        ret = i915_gem_gtt_bind_object(obj);
        if (ret) {
                i915_gem_object_put_pages_gtt(obj);
-               drm_mm_put_block(obj_priv->gtt_space);
-               obj_priv->gtt_space = NULL;
+               drm_mm_put_block(obj->gtt_space);
+               obj->gtt_space = NULL;
 
                ret = i915_gem_evict_something(dev, size,
                                               alignment, map_and_fenceable);
@@ -2821,65 +2781,61 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
                goto search_free;
        }
 
-       obj_priv->gtt_offset = obj_priv->gtt_space->start;
+       obj->gtt_offset = obj->gtt_space->start;
 
        /* keep track of bounds object by adding it to the inactive list */
-       list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
-       i915_gem_info_add_gtt(dev_priv, obj_priv);
+       list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+       i915_gem_info_add_gtt(dev_priv, obj);
 
        /* Assert that the object is not currently in any GPU domain. As it
         * wasn't in the GTT, there shouldn't be any way it could have been in
         * a GPU cache
         */
-       BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
-       BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
+       BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
+       BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
 
-       trace_i915_gem_object_bind(obj, obj_priv->gtt_offset, map_and_fenceable);
+       trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable);
 
        fenceable =
-               obj_priv->gtt_space->size == fence_size &&
-               (obj_priv->gtt_space->start & (fence_alignment -1)) == 0;
+               obj->gtt_space->size == fence_size &&
+               (obj->gtt_space->start & (fence_alignment -1)) == 0;
 
        mappable =
-               obj_priv->gtt_offset + obj->size <= dev_priv->mm.gtt_mappable_end;
+               obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
 
-       obj_priv->map_and_fenceable = mappable && fenceable;
+       obj->map_and_fenceable = mappable && fenceable;
 
        return 0;
 }
 
 void
-i915_gem_clflush_object(struct drm_gem_object *obj)
+i915_gem_clflush_object(struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_gem_object      *obj_priv = to_intel_bo(obj);
-
        /* If we don't have a page list set up, then we're not pinned
         * to GPU, and we can ignore the cache flush because it'll happen
         * again at bind time.
         */
-       if (obj_priv->pages == NULL)
+       if (obj->pages == NULL)
                return;
 
        trace_i915_gem_object_clflush(obj);
 
-       drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
+       drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
 }
 
 /** Flushes any GPU write domain for the object if it's dirty. */
 static int
-i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
+i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj,
                                       bool pipelined)
 {
-       struct drm_device *dev = obj->dev;
+       struct drm_device *dev = obj->base.dev;
 
-       if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
+       if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
                return 0;
 
        /* Queue the GPU write cache flushing we need. */
-       i915_gem_flush_ring(dev, NULL,
-                           to_intel_bo(obj)->ring,
-                           0, obj->write_domain);
-       BUG_ON(obj->write_domain);
+       i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain);
+       BUG_ON(obj->base.write_domain);
 
        if (pipelined)
                return 0;
@@ -2889,11 +2845,11 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
 
 /** Flushes the GTT write domain for the object if it's dirty. */
 static void
-i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
+i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
 {
        uint32_t old_write_domain;
 
-       if (obj->write_domain != I915_GEM_DOMAIN_GTT)
+       if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
                return;
 
        /* No actual flushing is required for the GTT write domain.   Writes
@@ -2902,30 +2858,30 @@ i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
         */
        i915_gem_release_mmap(obj);
 
-       old_write_domain = obj->write_domain;
-       obj->write_domain = 0;
+       old_write_domain = obj->base.write_domain;
+       obj->base.write_domain = 0;
 
        trace_i915_gem_object_change_domain(obj,
-                                           obj->read_domains,
+                                           obj->base.read_domains,
                                            old_write_domain);
 }
 
 /** Flushes the CPU write domain for the object if it's dirty. */
 static void
-i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
+i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
 {
        uint32_t old_write_domain;
 
-       if (obj->write_domain != I915_GEM_DOMAIN_CPU)
+       if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
                return;
 
        i915_gem_clflush_object(obj);
        intel_gtt_chipset_flush();
-       old_write_domain = obj->write_domain;
-       obj->write_domain = 0;
+       old_write_domain = obj->base.write_domain;
+       obj->base.write_domain = 0;
 
        trace_i915_gem_object_change_domain(obj,
-                                           obj->read_domains,
+                                           obj->base.read_domains,
                                            old_write_domain);
 }
 
@@ -2936,14 +2892,13 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
  * flushes to occur.
  */
 int
-i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
+i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, int write)
 {
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        uint32_t old_write_domain, old_read_domains;
        int ret;
 
        /* Not valid to be called on unbound objects. */
-       if (obj_priv->gtt_space == NULL)
+       if (obj->gtt_space == NULL)
                return -EINVAL;
 
        ret = i915_gem_object_flush_gpu_write_domain(obj, false);
@@ -2958,18 +2913,18 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
                        return ret;
        }
 
-       old_write_domain = obj->write_domain;
-       old_read_domains = obj->read_domains;
+       old_write_domain = obj->base.write_domain;
+       old_read_domains = obj->base.read_domains;
 
        /* It should now be out of any other write domains, and we can update
         * the domain values for our changes.
         */
-       BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
-       obj->read_domains |= I915_GEM_DOMAIN_GTT;
+       BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+       obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
        if (write) {
-               obj->read_domains = I915_GEM_DOMAIN_GTT;
-               obj->write_domain = I915_GEM_DOMAIN_GTT;
-               obj_priv->dirty = 1;
+               obj->base.read_domains = I915_GEM_DOMAIN_GTT;
+               obj->base.write_domain = I915_GEM_DOMAIN_GTT;
+               obj->dirty = 1;
        }
 
        trace_i915_gem_object_change_domain(obj,
@@ -2984,15 +2939,14 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
  * wait, as in modesetting process we're not supposed to be interrupted.
  */
 int
-i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
+i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
                                     bool pipelined)
 {
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        uint32_t old_read_domains;
        int ret;
 
        /* Not valid to be called on unbound objects. */
-       if (obj_priv->gtt_space == NULL)
+       if (obj->gtt_space == NULL)
                return -EINVAL;
 
        ret = i915_gem_object_flush_gpu_write_domain(obj, true);
@@ -3008,12 +2962,12 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
 
        i915_gem_object_flush_cpu_write_domain(obj);
 
-       old_read_domains = obj->read_domains;
-       obj->read_domains |= I915_GEM_DOMAIN_GTT;
+       old_read_domains = obj->base.read_domains;
+       obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
 
        trace_i915_gem_object_change_domain(obj,
                                            old_read_domains,
-                                           obj->write_domain);
+                                           obj->base.write_domain);
 
        return 0;
 }
@@ -3026,10 +2980,10 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
                return 0;
 
        if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
-               i915_gem_flush_ring(obj->base.dev, NULL, obj->ring,
+               i915_gem_flush_ring(obj->base.dev, obj->ring,
                                    0, obj->base.write_domain);
 
-       return i915_gem_object_wait_rendering(&obj->base, interruptible);
+       return i915_gem_object_wait_rendering(obj, interruptible);
 }
 
 /**
@@ -3039,7 +2993,7 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
  * flushes to occur.
  */
 static int
-i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
+i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, int write)
 {
        uint32_t old_write_domain, old_read_domains;
        int ret;
@@ -3061,27 +3015,27 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
                        return ret;
        }
 
-       old_write_domain = obj->write_domain;
-       old_read_domains = obj->read_domains;
+       old_write_domain = obj->base.write_domain;
+       old_read_domains = obj->base.read_domains;
 
        /* Flush the CPU cache if it's still invalid. */
-       if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
+       if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
                i915_gem_clflush_object(obj);
 
-               obj->read_domains |= I915_GEM_DOMAIN_CPU;
+               obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
        }
 
        /* It should now be out of any other write domains, and we can update
         * the domain values for our changes.
         */
-       BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
+       BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
 
        /* If we're writing through the CPU, then the GPU read domains will
         * need to be invalidated at next use.
         */
        if (write) {
-               obj->read_domains = I915_GEM_DOMAIN_CPU;
-               obj->write_domain = I915_GEM_DOMAIN_CPU;
+               obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+               obj->base.write_domain = I915_GEM_DOMAIN_CPU;
        }
 
        trace_i915_gem_object_change_domain(obj,
@@ -3203,20 +3157,18 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
  *             drm_agp_chipset_flush
  */
 static void
-i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
+i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
                                  struct intel_ring_buffer *ring,
                                  struct change_domains *cd)
 {
-       struct drm_i915_gem_object      *obj_priv = to_intel_bo(obj);
-       uint32_t                        invalidate_domains = 0;
-       uint32_t                        flush_domains = 0;
+       uint32_t invalidate_domains = 0, flush_domains = 0;
 
        /*
         * If the object isn't moving to a new write domain,
         * let the object stay in multiple read domains
         */
-       if (obj->pending_write_domain == 0)
-               obj->pending_read_domains |= obj->read_domains;
+       if (obj->base.pending_write_domain == 0)
+               obj->base.pending_read_domains |= obj->base.read_domains;
 
        /*
         * Flush the current write domain if
@@ -3224,18 +3176,18 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
         * any read domains which differ from the old
         * write domain
         */
-       if (obj->write_domain &&
-           (obj->write_domain != obj->pending_read_domains ||
-            obj_priv->ring != ring)) {
-               flush_domains |= obj->write_domain;
+       if (obj->base.write_domain &&
+           (obj->base.write_domain != obj->base.pending_read_domains ||
+            obj->ring != ring)) {
+               flush_domains |= obj->base.write_domain;
                invalidate_domains |=
-                       obj->pending_read_domains & ~obj->write_domain;
+                       obj->base.pending_read_domains & ~obj->base.write_domain;
        }
        /*
         * Invalidate any read caches which may have
         * stale data. That is, any new read domains.
         */
-       invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
+       invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
        if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
                i915_gem_clflush_object(obj);
 
@@ -3249,13 +3201,13 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
         * write_domains).  So if we have a current write domain that we
         * aren't changing, set pending_write_domain to that.
         */
-       if (flush_domains == 0 && obj->pending_write_domain == 0)
-               obj->pending_write_domain = obj->write_domain;
+       if (flush_domains == 0 && obj->base.pending_write_domain == 0)
+               obj->base.pending_write_domain = obj->base.write_domain;
 
        cd->invalidate_domains |= invalidate_domains;
        cd->flush_domains |= flush_domains;
        if (flush_domains & I915_GEM_GPU_DOMAINS)
-               cd->flush_rings |= obj_priv->ring->id;
+               cd->flush_rings |= obj->ring->id;
        if (invalidate_domains & I915_GEM_GPU_DOMAINS)
                cd->flush_rings |= ring->id;
 }
@@ -3267,30 +3219,28 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
  * and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
  */
 static void
-i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
+i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-
-       if (!obj_priv->page_cpu_valid)
+       if (!obj->page_cpu_valid)
                return;
 
        /* If we're partially in the CPU read domain, finish moving it in.
         */
-       if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
+       if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
                int i;
 
-               for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
-                       if (obj_priv->page_cpu_valid[i])
+               for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
+                       if (obj->page_cpu_valid[i])
                                continue;
-                       drm_clflush_pages(obj_priv->pages + i, 1);
+                       drm_clflush_pages(obj->pages + i, 1);
                }
        }
 
        /* Free the page_cpu_valid mappings which are now stale, whether
         * or not we've got I915_GEM_DOMAIN_CPU.
         */
-       kfree(obj_priv->page_cpu_valid);
-       obj_priv->page_cpu_valid = NULL;
+       kfree(obj->page_cpu_valid);
+       obj->page_cpu_valid = NULL;
 }
 
 /**
@@ -3306,14 +3256,13 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
  * flushes to occur.
  */
 static int
-i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
+i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
                                          uint64_t offset, uint64_t size)
 {
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        uint32_t old_read_domains;
        int i, ret;
 
-       if (offset == 0 && size == obj->size)
+       if (offset == 0 && size == obj->base.size)
                return i915_gem_object_set_to_cpu_domain(obj, 0);
 
        ret = i915_gem_object_flush_gpu_write_domain(obj, false);
@@ -3322,45 +3271,45 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
        i915_gem_object_flush_gtt_write_domain(obj);
 
        /* If we're already fully in the CPU read domain, we're done. */
-       if (obj_priv->page_cpu_valid == NULL &&
-           (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
+       if (obj->page_cpu_valid == NULL &&
+           (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
                return 0;
 
        /* Otherwise, create/clear the per-page CPU read domain flag if we're
         * newly adding I915_GEM_DOMAIN_CPU
         */
-       if (obj_priv->page_cpu_valid == NULL) {
-               obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
-                                                  GFP_KERNEL);
-               if (obj_priv->page_cpu_valid == NULL)
+       if (obj->page_cpu_valid == NULL) {
+               obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
+                                             GFP_KERNEL);
+               if (obj->page_cpu_valid == NULL)
                        return -ENOMEM;
-       } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
-               memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
+       } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
+               memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
 
        /* Flush the cache on any pages that are still invalid from the CPU's
         * perspective.
         */
        for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
             i++) {
-               if (obj_priv->page_cpu_valid[i])
+               if (obj->page_cpu_valid[i])
                        continue;
 
-               drm_clflush_pages(obj_priv->pages + i, 1);
+               drm_clflush_pages(obj->pages + i, 1);
 
-               obj_priv->page_cpu_valid[i] = 1;
+               obj->page_cpu_valid[i] = 1;
        }
 
        /* It should now be out of any other write domains, and we can update
         * the domain values for our changes.
         */
-       BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
+       BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
 
-       old_read_domains = obj->read_domains;
-       obj->read_domains |= I915_GEM_DOMAIN_CPU;
+       old_read_domains = obj->base.read_domains;
+       obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
 
        trace_i915_gem_object_change_domain(obj,
                                            old_read_domains,
-                                           obj->write_domain);
+                                           obj->base.write_domain);
 
        return 0;
 }
@@ -3490,7 +3439,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
                uint32_t __iomem *reloc_entry;
                void __iomem *reloc_page;
 
-               ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
+               ret = i915_gem_object_set_to_gtt_domain(obj, 1);
                if (ret)
                        goto err;
 
@@ -3564,14 +3513,14 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
 static int
 i915_gem_execbuffer_relocate(struct drm_device *dev,
                             struct drm_file *file,
-                            struct drm_gem_object **object_list,
+                            struct drm_i915_gem_object **object_list,
                             struct drm_i915_gem_exec_object2 *exec_list,
                             int count)
 {
        int i, ret;
 
        for (i = 0; i < count; i++) {
-               struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
+               struct drm_i915_gem_object *obj = object_list[i];
                obj->base.pending_read_domains = 0;
                obj->base.pending_write_domain = 0;
                ret = i915_gem_execbuffer_relocate_object(obj, file,
@@ -3586,7 +3535,7 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
 static int
 i915_gem_execbuffer_reserve(struct drm_device *dev,
                            struct drm_file *file,
-                           struct drm_gem_object **object_list,
+                           struct drm_i915_gem_object **object_list,
                            struct drm_i915_gem_exec_object2 *exec_list,
                            int count)
 {
@@ -3599,7 +3548,7 @@ i915_gem_execbuffer_reserve(struct drm_device *dev,
                ret = 0;
                for (i = 0; i < count; i++) {
                        struct drm_i915_gem_exec_object2 *entry = &exec_list[i];
-                       struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
+                       struct drm_i915_gem_object *obj = object_list[i];
                        bool need_fence =
                                entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
                                obj->tiling_mode != I915_TILING_NONE;
@@ -3610,12 +3559,12 @@ i915_gem_execbuffer_reserve(struct drm_device *dev,
 
                        /* Check fence reg constraints and rebind if necessary */
                        if (need_mappable && !obj->map_and_fenceable) {
-                               ret = i915_gem_object_unbind(&obj->base);
+                               ret = i915_gem_object_unbind(obj);
                                if (ret)
                                        break;
                        }
 
-                       ret = i915_gem_object_pin(&obj->base,
+                       ret = i915_gem_object_pin(obj,
                                                  entry->alignment,
                                                  need_mappable);
                        if (ret)
@@ -3626,9 +3575,9 @@ i915_gem_execbuffer_reserve(struct drm_device *dev,
                         * to properly handle blits to/from tiled surfaces.
                         */
                        if (need_fence) {
-                               ret = i915_gem_object_get_fence_reg(&obj->base, true);
+                               ret = i915_gem_object_get_fence_reg(obj, true);
                                if (ret) {
-                                       i915_gem_object_unpin(&obj->base);
+                                       i915_gem_object_unpin(obj);
                                        break;
                                }
 
@@ -3658,17 +3607,15 @@ i915_gem_execbuffer_reserve(struct drm_device *dev,
 static int
 i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
                                  struct drm_file *file,
-                                 struct drm_gem_object **object_list,
+                                 struct drm_i915_gem_object **object_list,
                                  struct drm_i915_gem_exec_object2 *exec_list,
                                  int count)
 {
        struct drm_i915_gem_relocation_entry *reloc;
        int i, total, ret;
 
-       for (i = 0; i < count; i++) {
-               struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
-               obj->in_execbuffer = false;
-       }
+       for (i = 0; i < count; i++)
+               object_list[i]->in_execbuffer = false;
 
        mutex_unlock(&dev->struct_mutex);
 
@@ -3713,7 +3660,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
 
        total = 0;
        for (i = 0; i < count; i++) {
-               struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
+               struct drm_i915_gem_object *obj = object_list[i];
                obj->base.pending_read_domains = 0;
                obj->base.pending_write_domain = 0;
                ret = i915_gem_execbuffer_relocate_object_slow(obj, file,
@@ -3740,7 +3687,7 @@ static int
 i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
                                struct drm_file *file,
                                struct intel_ring_buffer *ring,
-                               struct drm_gem_object **objects,
+                               struct drm_i915_gem_object **objects,
                                int count)
 {
        struct change_domains cd;
@@ -3759,17 +3706,17 @@ i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
                         cd.invalidate_domains,
                         cd.flush_domains);
 #endif
-               i915_gem_flush(dev, file,
+               i915_gem_flush(dev,
                               cd.invalidate_domains,
                               cd.flush_domains,
                               cd.flush_rings);
        }
 
        for (i = 0; i < count; i++) {
-               struct drm_i915_gem_object *obj = to_intel_bo(objects[i]);
+               struct drm_i915_gem_object *obj = objects[i];
                /* XXX replace with semaphores */
                if (obj->ring && ring != obj->ring) {
-                       ret = i915_gem_object_wait_rendering(&obj->base, true);
+                       ret = i915_gem_object_wait_rendering(obj, true);
                        if (ret)
                                return ret;
                }
@@ -3891,8 +3838,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                       struct drm_i915_gem_exec_object2 *exec_list)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_gem_object **object_list = NULL;
-       struct drm_gem_object *batch_obj;
+       struct drm_i915_gem_object **object_list = NULL;
+       struct drm_i915_gem_object *batch_obj;
        struct drm_clip_rect *cliprects = NULL;
        struct drm_i915_gem_request *request = NULL;
        int ret, i, flips;
@@ -3987,29 +3934,29 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
        /* Look up object handles */
        for (i = 0; i < args->buffer_count; i++) {
-               struct drm_i915_gem_object *obj_priv;
+               struct drm_i915_gem_object *obj;
 
-               object_list[i] = drm_gem_object_lookup(dev, file,
-                                                      exec_list[i].handle);
-               if (object_list[i] == NULL) {
+               obj = to_intel_bo (drm_gem_object_lookup(dev, file,
+                                                        exec_list[i].handle));
+               if (obj == NULL) {
                        DRM_ERROR("Invalid object handle %d at index %d\n",
                                   exec_list[i].handle, i);
                        /* prevent error path from reading uninitialized data */
-                       args->buffer_count = i + 1;
+                       args->buffer_count = i;
                        ret = -ENOENT;
                        goto err;
                }
+               object_list[i] = obj;
 
-               obj_priv = to_intel_bo(object_list[i]);
-               if (obj_priv->in_execbuffer) {
+               if (obj->in_execbuffer) {
                        DRM_ERROR("Object %p appears more than once in object list\n",
-                                  object_list[i]);
+                                  obj);
                        /* prevent error path from reading uninitialized data */
                        args->buffer_count = i + 1;
                        ret = -EINVAL;
                        goto err;
                }
-               obj_priv->in_execbuffer = true;
+               obj->in_execbuffer = true;
        }
 
        /* Move the objects en-masse into the GTT, evicting if necessary. */
@@ -4037,15 +3984,15 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
        /* Set the pending read domains for the batch buffer to COMMAND */
        batch_obj = object_list[args->buffer_count-1];
-       if (batch_obj->pending_write_domain) {
+       if (batch_obj->base.pending_write_domain) {
                DRM_ERROR("Attempting to use self-modifying batch buffer\n");
                ret = -EINVAL;
                goto err;
        }
-       batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
+       batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
 
        /* Sanity check the batch buffer */
-       exec_offset = to_intel_bo(batch_obj)->gtt_offset;
+       exec_offset = batch_obj->gtt_offset;
        ret = i915_gem_check_execbuffer(args, exec_offset);
        if (ret != 0) {
                DRM_ERROR("execbuf with invalid offset/length\n");
@@ -4077,8 +4024,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
         */
        flips = 0;
        for (i = 0; i < args->buffer_count; i++) {
-               if (object_list[i]->write_domain)
-                       flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip);
+               if (object_list[i]->base.write_domain)
+                       flips |= atomic_read(&object_list[i]->pending_flip);
        }
        if (flips) {
                int plane, flip_mask;
@@ -4110,23 +4057,22 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
        }
 
        for (i = 0; i < args->buffer_count; i++) {
-               struct drm_gem_object *obj = object_list[i];
+               struct drm_i915_gem_object *obj = object_list[i];
 
-               obj->read_domains = obj->pending_read_domains;
-               obj->write_domain = obj->pending_write_domain;
+               obj->base.read_domains = obj->base.pending_read_domains;
+               obj->base.write_domain = obj->base.pending_write_domain;
 
                i915_gem_object_move_to_active(obj, ring);
-               if (obj->write_domain) {
-                       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-                       obj_priv->dirty = 1;
-                       list_move_tail(&obj_priv->gpu_write_list,
+               if (obj->base.write_domain) {
+                       obj->dirty = 1;
+                       list_move_tail(&obj->gpu_write_list,
                                       &ring->gpu_write_list);
                        intel_mark_busy(dev, obj);
                }
 
                trace_i915_gem_object_change_domain(obj,
-                                                   obj->read_domains,
-                                                   obj->write_domain);
+                                                   obj->base.read_domains,
+                                                   obj->base.write_domain);
        }
 
        /*
@@ -4142,11 +4088,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
 
 err:
        for (i = 0; i < args->buffer_count; i++) {
-               if (object_list[i] == NULL)
-                   break;
-
-               to_intel_bo(object_list[i])->in_execbuffer = false;
-               drm_gem_object_unreference(object_list[i]);
+               object_list[i]->in_execbuffer = false;
+               drm_gem_object_unreference(&object_list[i]->base);
        }
 
        mutex_unlock(&dev->struct_mutex);
@@ -4165,7 +4108,7 @@ pre_mutex_err:
  */
 int
 i915_gem_execbuffer(struct drm_device *dev, void *data,
-                   struct drm_file *file_priv)
+                   struct drm_file *file)
 {
        struct drm_i915_gem_execbuffer *args = data;
        struct drm_i915_gem_execbuffer2 exec2;
@@ -4227,7 +4170,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
        exec2.cliprects_ptr = args->cliprects_ptr;
        exec2.flags = I915_EXEC_RENDER;
 
-       ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
+       ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
        if (!ret) {
                /* Copy the new buffer offsets back to the user's exec list. */
                for (i = 0; i < args->buffer_count; i++)
@@ -4252,7 +4195,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
 
 int
 i915_gem_execbuffer2(struct drm_device *dev, void *data,
-                    struct drm_file *file_priv)
+                    struct drm_file *file)
 {
        struct drm_i915_gem_execbuffer2 *args = data;
        struct drm_i915_gem_exec_object2 *exec2_list = NULL;
@@ -4285,7 +4228,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
                return -EFAULT;
        }
 
-       ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
+       ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
        if (!ret) {
                /* Copy the new buffer offsets back to the user's exec list. */
                ret = copy_to_user((struct drm_i915_relocation_entry __user *)
@@ -4305,109 +4248,106 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
 }
 
 int
-i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment,
+i915_gem_object_pin(struct drm_i915_gem_object *obj,
+                   uint32_t alignment,
                    bool map_and_fenceable)
 {
-       struct drm_device *dev = obj->dev;
+       struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int ret;
 
-       BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
+       BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
        BUG_ON(map_and_fenceable && !map_and_fenceable);
        WARN_ON(i915_verify_lists(dev));
 
-       if (obj_priv->gtt_space != NULL) {
-               if ((alignment && obj_priv->gtt_offset & (alignment - 1)) ||
-                   (map_and_fenceable && !obj_priv->map_and_fenceable)) {
-                       WARN(obj_priv->pin_count,
+       if (obj->gtt_space != NULL) {
+               if ((alignment && obj->gtt_offset & (alignment - 1)) ||
+                   (map_and_fenceable && !obj->map_and_fenceable)) {
+                       WARN(obj->pin_count,
                             "bo is already pinned with incorrect alignment:"
                             " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
                             " obj->map_and_fenceable=%d\n",
-                            obj_priv->gtt_offset, alignment,
+                            obj->gtt_offset, alignment,
                             map_and_fenceable,
-                            obj_priv->map_and_fenceable);
+                            obj->map_and_fenceable);
                        ret = i915_gem_object_unbind(obj);
                        if (ret)
                                return ret;
                }
        }
 
-       if (obj_priv->gtt_space == NULL) {
+       if (obj->gtt_space == NULL) {
                ret = i915_gem_object_bind_to_gtt(obj, alignment,
                                                  map_and_fenceable);
                if (ret)
                        return ret;
        }
 
-       if (obj_priv->pin_count++ == 0) {
-               i915_gem_info_add_pin(dev_priv, obj_priv, map_and_fenceable);
-               if (!obj_priv->active)
-                       list_move_tail(&obj_priv->mm_list,
+       if (obj->pin_count++ == 0) {
+               i915_gem_info_add_pin(dev_priv, obj, map_and_fenceable);
+               if (!obj->active)
+                       list_move_tail(&obj->mm_list,
                                       &dev_priv->mm.pinned_list);
        }
-       BUG_ON(!obj_priv->pin_mappable && map_and_fenceable);
+       BUG_ON(!obj->pin_mappable && map_and_fenceable);
 
        WARN_ON(i915_verify_lists(dev));
        return 0;
 }
 
 void
-i915_gem_object_unpin(struct drm_gem_object *obj)
+i915_gem_object_unpin(struct drm_i915_gem_object *obj)
 {
-       struct drm_device *dev = obj->dev;
+       struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
        WARN_ON(i915_verify_lists(dev));
-       BUG_ON(obj_priv->pin_count == 0);
-       BUG_ON(obj_priv->gtt_space == NULL);
+       BUG_ON(obj->pin_count == 0);
+       BUG_ON(obj->gtt_space == NULL);
 
-       if (--obj_priv->pin_count == 0) {
-               if (!obj_priv->active)
-                       list_move_tail(&obj_priv->mm_list,
+       if (--obj->pin_count == 0) {
+               if (!obj->active)
+                       list_move_tail(&obj->mm_list,
                                       &dev_priv->mm.inactive_list);
-               i915_gem_info_remove_pin(dev_priv, obj_priv);
+               i915_gem_info_remove_pin(dev_priv, obj);
        }
        WARN_ON(i915_verify_lists(dev));
 }
 
 int
 i915_gem_pin_ioctl(struct drm_device *dev, void *data,
-                  struct drm_file *file_priv)
+                  struct drm_file *file)
 {
        struct drm_i915_gem_pin *args = data;
-       struct drm_gem_object *obj;
-       struct drm_i915_gem_object *obj_priv;
+       struct drm_i915_gem_object *obj;
        int ret;
 
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
                return ret;
 
-       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
        if (obj == NULL) {
                ret = -ENOENT;
                goto unlock;
        }
-       obj_priv = to_intel_bo(obj);
 
-       if (obj_priv->madv != I915_MADV_WILLNEED) {
+       if (obj->madv != I915_MADV_WILLNEED) {
                DRM_ERROR("Attempting to pin a purgeable buffer\n");
                ret = -EINVAL;
                goto out;
        }
 
-       if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
+       if (obj->pin_filp != NULL && obj->pin_filp != file) {
                DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
                          args->handle);
                ret = -EINVAL;
                goto out;
        }
 
-       obj_priv->user_pin_count++;
-       obj_priv->pin_filp = file_priv;
-       if (obj_priv->user_pin_count == 1) {
+       obj->user_pin_count++;
+       obj->pin_filp = file;
+       if (obj->user_pin_count == 1) {
                ret = i915_gem_object_pin(obj, args->alignment, true);
                if (ret)
                        goto out;
@@ -4417,9 +4357,9 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
         * as the X server doesn't manage domains yet
         */
        i915_gem_object_flush_cpu_write_domain(obj);
-       args->offset = obj_priv->gtt_offset;
+       args->offset = obj->gtt_offset;
 out:
-       drm_gem_object_unreference(obj);
+       drm_gem_object_unreference(&obj->base);
 unlock:
        mutex_unlock(&dev->struct_mutex);
        return ret;
@@ -4427,38 +4367,36 @@ unlock:
 
 int
 i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
-                    struct drm_file *file_priv)
+                    struct drm_file *file)
 {
        struct drm_i915_gem_pin *args = data;
-       struct drm_gem_object *obj;
-       struct drm_i915_gem_object *obj_priv;
+       struct drm_i915_gem_object *obj;
        int ret;
 
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
                return ret;
 
-       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
        if (obj == NULL) {
                ret = -ENOENT;
                goto unlock;
        }
-       obj_priv = to_intel_bo(obj);
 
-       if (obj_priv->pin_filp != file_priv) {
+       if (obj->pin_filp != file) {
                DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
                          args->handle);
                ret = -EINVAL;
                goto out;
        }
-       obj_priv->user_pin_count--;
-       if (obj_priv->user_pin_count == 0) {
-               obj_priv->pin_filp = NULL;
+       obj->user_pin_count--;
+       if (obj->user_pin_count == 0) {
+               obj->pin_filp = NULL;
                i915_gem_object_unpin(obj);
        }
 
 out:
-       drm_gem_object_unreference(obj);
+       drm_gem_object_unreference(&obj->base);
 unlock:
        mutex_unlock(&dev->struct_mutex);
        return ret;
@@ -4466,52 +4404,49 @@ unlock:
 
 int
 i915_gem_busy_ioctl(struct drm_device *dev, void *data,
-                   struct drm_file *file_priv)
+                   struct drm_file *file)
 {
        struct drm_i915_gem_busy *args = data;
-       struct drm_gem_object *obj;
-       struct drm_i915_gem_object *obj_priv;
+       struct drm_i915_gem_object *obj;
        int ret;
 
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
                return ret;
 
-       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
        if (obj == NULL) {
                ret = -ENOENT;
                goto unlock;
        }
-       obj_priv = to_intel_bo(obj);
 
        /* Count all active objects as busy, even if they are currently not used
         * by the gpu. Users of this interface expect objects to eventually
         * become non-busy without any further actions, therefore emit any
         * necessary flushes here.
         */
-       args->busy = obj_priv->active;
+       args->busy = obj->active;
        if (args->busy) {
                /* Unconditionally flush objects, even when the gpu still uses this
                 * object. Userspace calling this function indicates that it wants to
                 * use this buffer rather sooner than later, so issuing the required
                 * flush earlier is beneficial.
                 */
-               if (obj->write_domain & I915_GEM_GPU_DOMAINS)
-                       i915_gem_flush_ring(dev, file_priv,
-                                           obj_priv->ring,
-                                           0, obj->write_domain);
+               if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
+                       i915_gem_flush_ring(dev, obj->ring,
+                                           0, obj->base.write_domain);
 
                /* Update the active list for the hardware's current position.
                 * Otherwise this only updates on a delayed timer or when irqs
                 * are actually unmasked, and our working set ends up being
                 * larger than required.
                 */
-               i915_gem_retire_requests_ring(dev, obj_priv->ring);
+               i915_gem_retire_requests_ring(dev, obj->ring);
 
-               args->busy = obj_priv->active;
+               args->busy = obj->active;
        }
 
-       drm_gem_object_unreference(obj);
+       drm_gem_object_unreference(&obj->base);
 unlock:
        mutex_unlock(&dev->struct_mutex);
        return ret;
@@ -4529,8 +4464,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *file_priv)
 {
        struct drm_i915_gem_madvise *args = data;
-       struct drm_gem_object *obj;
-       struct drm_i915_gem_object *obj_priv;
+       struct drm_i915_gem_object *obj;
        int ret;
 
        switch (args->madv) {
@@ -4545,37 +4479,36 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
        if (ret)
                return ret;
 
-       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
        if (obj == NULL) {
                ret = -ENOENT;
                goto unlock;
        }
-       obj_priv = to_intel_bo(obj);
 
-       if (obj_priv->pin_count) {
+       if (obj->pin_count) {
                ret = -EINVAL;
                goto out;
        }
 
-       if (obj_priv->madv != __I915_MADV_PURGED)
-               obj_priv->madv = args->madv;
+       if (obj->madv != __I915_MADV_PURGED)
+               obj->madv = args->madv;
 
        /* if the object is no longer bound, discard its backing storage */
-       if (i915_gem_object_is_purgeable(obj_priv) &&
-           obj_priv->gtt_space == NULL)
+       if (i915_gem_object_is_purgeable(obj) &&
+           obj->gtt_space == NULL)
                i915_gem_object_truncate(obj);
 
-       args->retained = obj_priv->madv != __I915_MADV_PURGED;
+       args->retained = obj->madv != __I915_MADV_PURGED;
 
 out:
-       drm_gem_object_unreference(obj);
+       drm_gem_object_unreference(&obj->base);
 unlock:
        mutex_unlock(&dev->struct_mutex);
        return ret;
 }
 
-struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
-                                             size_t size)
+struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+                                                 size_t size)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj;
@@ -4605,7 +4538,7 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
        /* Avoid an unnecessary call to unbind on the first bind. */
        obj->map_and_fenceable = true;
 
-       return &obj->base;
+       return obj;
 }
 
 int i915_gem_init_object(struct drm_gem_object *obj)
@@ -4615,42 +4548,41 @@ int i915_gem_init_object(struct drm_gem_object *obj)
        return 0;
 }
 
-static void i915_gem_free_object_tail(struct drm_gem_object *obj)
+static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
 {
-       struct drm_device *dev = obj->dev;
+       struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int ret;
 
        ret = i915_gem_object_unbind(obj);
        if (ret == -ERESTARTSYS) {
-               list_move(&obj_priv->mm_list,
+               list_move(&obj->mm_list,
                          &dev_priv->mm.deferred_free_list);
                return;
        }
 
-       if (obj->map_list.map)
+       if (obj->base.map_list.map)
                i915_gem_free_mmap_offset(obj);
 
-       drm_gem_object_release(obj);
-       i915_gem_info_remove_obj(dev_priv, obj->size);
+       drm_gem_object_release(&obj->base);
+       i915_gem_info_remove_obj(dev_priv, obj->base.size);
 
-       kfree(obj_priv->page_cpu_valid);
-       kfree(obj_priv->bit_17);
-       kfree(obj_priv);
+       kfree(obj->page_cpu_valid);
+       kfree(obj->bit_17);
+       kfree(obj);
 }
 
-void i915_gem_free_object(struct drm_gem_object *obj)
+void i915_gem_free_object(struct drm_gem_object *gem_obj)
 {
-       struct drm_device *dev = obj->dev;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+       struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
+       struct drm_device *dev = obj->base.dev;
 
        trace_i915_gem_object_destroy(obj);
 
-       while (obj_priv->pin_count > 0)
+       while (obj->pin_count > 0)
                i915_gem_object_unpin(obj);
 
-       if (obj_priv->phys_obj)
+       if (obj->phys_obj)
                i915_gem_detach_phys_object(dev, obj);
 
        i915_gem_free_object_tail(obj);
@@ -4710,8 +4642,7 @@ static int
 i915_gem_init_pipe_control(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_gem_object *obj;
-       struct drm_i915_gem_object *obj_priv;
+       struct drm_i915_gem_object *obj;
        int ret;
 
        obj = i915_gem_alloc_object(dev, 4096);
@@ -4720,15 +4651,14 @@ i915_gem_init_pipe_control(struct drm_device *dev)
                ret = -ENOMEM;
                goto err;
        }
-       obj_priv = to_intel_bo(obj);
-       obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
+       obj->agp_type = AGP_USER_CACHED_MEMORY;
 
        ret = i915_gem_object_pin(obj, 4096, true);
        if (ret)
                goto err_unref;
 
-       dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
-       dev_priv->seqno_page =  kmap(obj_priv->pages[0]);
+       dev_priv->seqno_gfx_addr = obj->gtt_offset;
+       dev_priv->seqno_page =  kmap(obj->pages[0]);
        if (dev_priv->seqno_page == NULL)
                goto err_unpin;
 
@@ -4740,7 +4670,7 @@ i915_gem_init_pipe_control(struct drm_device *dev)
 err_unpin:
        i915_gem_object_unpin(obj);
 err_unref:
-       drm_gem_object_unreference(obj);
+       drm_gem_object_unreference(&obj->base);
 err:
        return ret;
 }
@@ -4750,14 +4680,12 @@ static void
 i915_gem_cleanup_pipe_control(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_gem_object *obj;
-       struct drm_i915_gem_object *obj_priv;
+       struct drm_i915_gem_object *obj;
 
        obj = dev_priv->seqno_obj;
-       obj_priv = to_intel_bo(obj);
-       kunmap(obj_priv->pages[0]);
+       kunmap(obj->pages[0]);
        i915_gem_object_unpin(obj);
-       drm_gem_object_unreference(obj);
+       drm_gem_object_unreference(&obj->base);
        dev_priv->seqno_obj = NULL;
 
        dev_priv->seqno_page = NULL;
@@ -5035,20 +4963,18 @@ void i915_gem_free_all_phys_object(struct drm_device *dev)
 }
 
 void i915_gem_detach_phys_object(struct drm_device *dev,
-                                struct drm_gem_object *obj)
+                                struct drm_i915_gem_object *obj)
 {
-       struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+       struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
        char *vaddr;
        int i;
        int page_count;
 
-       if (!obj_priv->phys_obj)
+       if (!obj->phys_obj)
                return;
-       vaddr = obj_priv->phys_obj->handle->vaddr;
-
-       page_count = obj->size / PAGE_SIZE;
+       vaddr = obj->phys_obj->handle->vaddr;
 
+       page_count = obj->base.size / PAGE_SIZE;
        for (i = 0; i < page_count; i++) {
                struct page *page = read_cache_page_gfp(mapping, i,
                                                        GFP_HIGHUSER | __GFP_RECLAIMABLE);
@@ -5066,19 +4992,18 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
        }
        intel_gtt_chipset_flush();
 
-       obj_priv->phys_obj->cur_obj = NULL;
-       obj_priv->phys_obj = NULL;
+       obj->phys_obj->cur_obj = NULL;
+       obj->phys_obj = NULL;
 }
 
 int
 i915_gem_attach_phys_object(struct drm_device *dev,
-                           struct drm_gem_object *obj,
+                           struct drm_i915_gem_object *obj,
                            int id,
                            int align)
 {
-       struct address_space *mapping = obj->filp->f_path.dentry->d_inode->i_mapping;
+       struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv;
        int ret = 0;
        int page_count;
        int i;
@@ -5086,10 +5011,8 @@ i915_gem_attach_phys_object(struct drm_device *dev,
        if (id > I915_MAX_PHYS_OBJECT)
                return -EINVAL;
 
-       obj_priv = to_intel_bo(obj);
-
-       if (obj_priv->phys_obj) {
-               if (obj_priv->phys_obj->id == id)
+       if (obj->phys_obj) {
+               if (obj->phys_obj->id == id)
                        return 0;
                i915_gem_detach_phys_object(dev, obj);
        }
@@ -5097,18 +5020,19 @@ i915_gem_attach_phys_object(struct drm_device *dev,
        /* create a new object */
        if (!dev_priv->mm.phys_objs[id - 1]) {
                ret = i915_gem_init_phys_object(dev, id,
-                                               obj->size, align);
+                                               obj->base.size, align);
                if (ret) {
-                       DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
+                       DRM_ERROR("failed to init phys object %d size: %zu\n",
+                                 id, obj->base.size);
                        return ret;
                }
        }
 
        /* bind to the object */
-       obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
-       obj_priv->phys_obj->cur_obj = obj;
+       obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
+       obj->phys_obj->cur_obj = obj;
 
-       page_count = obj->size / PAGE_SIZE;
+       page_count = obj->base.size / PAGE_SIZE;
 
        for (i = 0; i < page_count; i++) {
                struct page *page;
@@ -5120,7 +5044,7 @@ i915_gem_attach_phys_object(struct drm_device *dev,
                        return PTR_ERR(page);
 
                src = kmap_atomic(page);
-               dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
+               dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
                memcpy(dst, src, PAGE_SIZE);
                kunmap_atomic(src);
 
@@ -5132,16 +5056,14 @@ i915_gem_attach_phys_object(struct drm_device *dev,
 }
 
 static int
-i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_phys_pwrite(struct drm_device *dev,
+                    struct drm_i915_gem_object *obj,
                     struct drm_i915_gem_pwrite *args,
                     struct drm_file *file_priv)
 {
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-       void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset;
+       void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
        char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
 
-       DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size);
-
        if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
                unsigned long unwritten;
 
@@ -5228,7 +5150,7 @@ rescan:
                                 &dev_priv->mm.inactive_list,
                                 mm_list) {
                if (i915_gem_object_is_purgeable(obj)) {
-                       i915_gem_object_unbind(&obj->base);
+                       i915_gem_object_unbind(obj);
                        if (--nr_to_scan == 0)
                                break;
                }
@@ -5240,7 +5162,7 @@ rescan:
                                 &dev_priv->mm.inactive_list,
                                 mm_list) {
                if (nr_to_scan) {
-                       i915_gem_object_unbind(&obj->base);
+                       i915_gem_object_unbind(obj);
                        nr_to_scan--;
                } else
                        cnt++;
index 48644b840a8dc08dfac36c6d5570265427324f6a..29d014c48ca264abd3849b6dce535b84201c32df 100644 (file)
@@ -152,13 +152,12 @@ i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
 }
 
 void
-i915_gem_dump_object(struct drm_gem_object *obj, int len,
+i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
                     const char *where, uint32_t mark)
 {
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int page;
 
-       DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
+       DRM_INFO("%s: object at offset %08x\n", where, obj->gtt_offset);
        for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
                int page_len, chunk, chunk_len;
 
@@ -170,9 +169,9 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
                        chunk_len = page_len - chunk;
                        if (chunk_len > 128)
                                chunk_len = 128;
-                       i915_gem_dump_page(obj_priv->pages[page],
+                       i915_gem_dump_page(obj->pages[page],
                                           chunk, chunk + chunk_len,
-                                          obj_priv->gtt_offset +
+                                          obj->gtt_offset +
                                           page * PAGE_SIZE,
                                           mark);
                }
@@ -182,21 +181,19 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
 
 #if WATCH_COHERENCY
 void
-i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
+i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
 {
-       struct drm_device *dev = obj->dev;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+       struct drm_device *dev = obj->base.dev;
        int page;
        uint32_t *gtt_mapping;
        uint32_t *backing_map = NULL;
        int bad_count = 0;
 
        DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
-                __func__, obj, obj_priv->gtt_offset, handle,
+                __func__, obj, obj->gtt_offset, handle,
                 obj->size / 1024);
 
-       gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
-                             obj->size);
+       gtt_mapping = ioremap(dev->agp->base + obj->gtt_offset, obj->base.size);
        if (gtt_mapping == NULL) {
                DRM_ERROR("failed to map GTT space\n");
                return;
@@ -205,7 +202,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
        for (page = 0; page < obj->size / PAGE_SIZE; page++) {
                int i;
 
-               backing_map = kmap_atomic(obj_priv->pages[page], KM_USER0);
+               backing_map = kmap_atomic(obj->pages[page], KM_USER0);
 
                if (backing_map == NULL) {
                        DRM_ERROR("failed to map backing page\n");
@@ -220,7 +217,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
                        if (cpuval != gttval) {
                                DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
                                         "0x%08x vs 0x%08x\n",
-                                        (int)(obj_priv->gtt_offset +
+                                        (int)(obj->gtt_offset +
                                               page * PAGE_SIZE + i * 4),
                                         cpuval, gttval);
                                if (bad_count++ >= 8) {
index 3f6f336bbb4da30e68f758ce58172ccf42ced2fb..03e15d37b5509c56fb56a6d32bee74108b2f4aaf 100644 (file)
 #include "i915_drm.h"
 
 static bool
-mark_free(struct drm_i915_gem_object *obj_priv,
-          struct list_head *unwind)
+mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
 {
-       list_add(&obj_priv->evict_list, unwind);
-       drm_gem_object_reference(&obj_priv->base);
-       return drm_mm_scan_add_block(obj_priv->gtt_space);
+       list_add(&obj->evict_list, unwind);
+       drm_gem_object_reference(&obj->base);
+       return drm_mm_scan_add_block(obj->gtt_space);
 }
 
 int
@@ -46,7 +45,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct list_head eviction_list, unwind_list;
-       struct drm_i915_gem_object *obj_priv;
+       struct drm_i915_gem_object *obj;
        int ret = 0;
 
        i915_gem_retire_requests(dev);
@@ -96,42 +95,42 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
                drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
 
        /* First see if there is a large enough contiguous idle region... */
-       list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
-               if (mark_free(obj_priv, &unwind_list))
+       list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
+               if (mark_free(obj, &unwind_list))
                        goto found;
        }
 
        /* Now merge in the soon-to-be-expired objects... */
-       list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
+       list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
                /* Does the object require an outstanding flush? */
-               if (obj_priv->base.write_domain || obj_priv->pin_count)
+               if (obj->base.write_domain || obj->pin_count)
                        continue;
 
-               if (mark_free(obj_priv, &unwind_list))
+               if (mark_free(obj, &unwind_list))
                        goto found;
        }
 
        /* Finally add anything with a pending flush (in order of retirement) */
-       list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
-               if (obj_priv->pin_count)
+       list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
+               if (obj->pin_count)
                        continue;
 
-               if (mark_free(obj_priv, &unwind_list))
+               if (mark_free(obj, &unwind_list))
                        goto found;
        }
-       list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
-               if (! obj_priv->base.write_domain || obj_priv->pin_count)
+       list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
+               if (! obj->base.write_domain || obj->pin_count)
                        continue;
 
-               if (mark_free(obj_priv, &unwind_list))
+               if (mark_free(obj, &unwind_list))
                        goto found;
        }
 
        /* Nothing found, clean up and bail out! */
-       list_for_each_entry(obj_priv, &unwind_list, evict_list) {
-               ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
+       list_for_each_entry(obj, &unwind_list, evict_list) {
+               ret = drm_mm_scan_remove_block(obj->gtt_space);
                BUG_ON(ret);
-               drm_gem_object_unreference(&obj_priv->base);
+               drm_gem_object_unreference(&obj->base);
        }
 
        /* We expect the caller to unpin, evict all and try again, or give up.
@@ -145,26 +144,26 @@ found:
         * temporary list. */
        INIT_LIST_HEAD(&eviction_list);
        while (!list_empty(&unwind_list)) {
-               obj_priv = list_first_entry(&unwind_list,
-                                           struct drm_i915_gem_object,
-                                           evict_list);
-               if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
-                       list_move(&obj_priv->evict_list, &eviction_list);
+               obj = list_first_entry(&unwind_list,
+                                      struct drm_i915_gem_object,
+                                      evict_list);
+               if (drm_mm_scan_remove_block(obj->gtt_space)) {
+                       list_move(&obj->evict_list, &eviction_list);
                        continue;
                }
-               list_del(&obj_priv->evict_list);
-               drm_gem_object_unreference(&obj_priv->base);
+               list_del(&obj->evict_list);
+               drm_gem_object_unreference(&obj->base);
        }
 
        /* Unbinding will emit any required flushes */
        while (!list_empty(&eviction_list)) {
-               obj_priv = list_first_entry(&eviction_list,
-                                           struct drm_i915_gem_object,
-                                           evict_list);
+               obj = list_first_entry(&eviction_list,
+                                      struct drm_i915_gem_object,
+                                      evict_list);
                if (ret == 0)
-                       ret = i915_gem_object_unbind(&obj_priv->base);
-               list_del(&obj_priv->evict_list);
-               drm_gem_object_unreference(&obj_priv->base);
+                       ret = i915_gem_object_unbind(obj);
+               list_del(&obj->evict_list);
+               drm_gem_object_unreference(&obj->base);
        }
 
        return ret;
@@ -203,7 +202,7 @@ i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only)
        list_for_each_entry_safe(obj, next,
                                 &dev_priv->mm.inactive_list, mm_list) {
                if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
-                       int ret = i915_gem_object_unbind(&obj->base);
+                       int ret = i915_gem_object_unbind(obj);
                        if (ret)
                                return ret;
                }
index 0b34a1aee9b647ae16bd1ccbf3657ac6741dd51c..71c2b0f3747b843f3868079a28ce5dbaffde70b0 100644 (file)
 void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv;
+       struct drm_i915_gem_object *obj;
 
-       list_for_each_entry(obj_priv,
-                           &dev_priv->mm.gtt_list,
-                           gtt_list) {
+       list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
                if (dev_priv->mm.gtt->needs_dmar) {
-                       BUG_ON(!obj_priv->sg_list);
+                       BUG_ON(!obj->sg_list);
 
-                       intel_gtt_insert_sg_entries(obj_priv->sg_list,
-                                                   obj_priv->num_sg,
-                                                   obj_priv->gtt_space->start
+                       intel_gtt_insert_sg_entries(obj->sg_list,
+                                                   obj->num_sg,
+                                                   obj->gtt_space->start
                                                        >> PAGE_SHIFT,
-                                                   obj_priv->agp_type);
+                                                   obj->agp_type);
                } else
-                       intel_gtt_insert_pages(obj_priv->gtt_space->start
+                       intel_gtt_insert_pages(obj->gtt_space->start
                                                   >> PAGE_SHIFT,
-                                              obj_priv->base.size >> PAGE_SHIFT,
-                                              obj_priv->pages,
-                                              obj_priv->agp_type);
+                                              obj->base.size >> PAGE_SHIFT,
+                                              obj->pages,
+                                              obj->agp_type);
        }
 
        /* Be paranoid and flush the chipset cache. */
        intel_gtt_chipset_flush();
 }
 
-int i915_gem_gtt_bind_object(struct drm_gem_object *obj)
+int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
 {
-       struct drm_device *dev = obj->dev;
+       struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int ret;
 
        if (dev_priv->mm.gtt->needs_dmar) {
-               ret = intel_gtt_map_memory(obj_priv->pages,
-                                          obj->size >> PAGE_SHIFT,
-                                          &obj_priv->sg_list,
-                                          &obj_priv->num_sg);
+               ret = intel_gtt_map_memory(obj->pages,
+                                          obj->base.size >> PAGE_SHIFT,
+                                          &obj->sg_list,
+                                          &obj->num_sg);
                if (ret != 0)
                        return ret;
 
-               intel_gtt_insert_sg_entries(obj_priv->sg_list, obj_priv->num_sg,
-                                           obj_priv->gtt_space->start
-                                               >> PAGE_SHIFT,
-                                           obj_priv->agp_type);
+               intel_gtt_insert_sg_entries(obj->sg_list,
+                                           obj->num_sg,
+                                           obj->gtt_space->start >> PAGE_SHIFT,
+                                           obj->agp_type);
        } else
-               intel_gtt_insert_pages(obj_priv->gtt_space->start >> PAGE_SHIFT,
-                                      obj->size >> PAGE_SHIFT,
-                                      obj_priv->pages,
-                                      obj_priv->agp_type);
+               intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
+                                      obj->base.size >> PAGE_SHIFT,
+                                      obj->pages,
+                                      obj->agp_type);
 
        return 0;
 }
 
-void i915_gem_gtt_unbind_object(struct drm_gem_object *obj)
+void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
 {
-       struct drm_device *dev = obj->dev;
+       struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
 
        if (dev_priv->mm.gtt->needs_dmar) {
-               intel_gtt_unmap_memory(obj_priv->sg_list, obj_priv->num_sg);
-               obj_priv->sg_list = NULL;
-               obj_priv->num_sg = 0;
+               intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
+               obj->sg_list = NULL;
+               obj->num_sg = 0;
        }
 
-       intel_gtt_clear_range(obj_priv->gtt_space->start >> PAGE_SHIFT,
-                             obj->size >> PAGE_SHIFT);
+       intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
+                             obj->base.size >> PAGE_SHIFT);
 }
index a517b48d441d625604d00c160ce863695520f6aa..1c5fdb30f2722527029d27aa719ad7c243b87510 100644 (file)
@@ -234,25 +234,24 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
 
 /* Is the current GTT allocation valid for the change in tiling? */
 static bool
-i915_gem_object_fence_ok(struct drm_gem_object *obj, int tiling_mode)
+i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
 {
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        u32 size;
 
        if (tiling_mode == I915_TILING_NONE)
                return true;
 
-       if (INTEL_INFO(obj->dev)->gen >= 4)
+       if (INTEL_INFO(obj->base.dev)->gen >= 4)
                return true;
 
-       if (!obj_priv->gtt_space)
+       if (!obj->gtt_space)
                return true;
 
-       if (INTEL_INFO(obj->dev)->gen == 3) {
-               if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
+       if (INTEL_INFO(obj->base.dev)->gen == 3) {
+               if (obj->gtt_offset & ~I915_FENCE_START_MASK)
                        return false;
        } else {
-               if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
+               if (obj->gtt_offset & ~I830_FENCE_START_MASK)
                        return false;
        }
 
@@ -260,18 +259,18 @@ i915_gem_object_fence_ok(struct drm_gem_object *obj, int tiling_mode)
         * Previous chips need to be aligned to the size of the smallest
         * fence register that can contain the object.
         */
-       if (INTEL_INFO(obj->dev)->gen == 3)
+       if (INTEL_INFO(obj->base.dev)->gen == 3)
                size = 1024*1024;
        else
                size = 512*1024;
 
-       while (size < obj_priv->base.size)
+       while (size < obj->base.size)
                size <<= 1;
 
-       if (obj_priv->gtt_space->size != size)
+       if (obj->gtt_space->size != size)
                return false;
 
-       if (obj_priv->gtt_offset & (size - 1))
+       if (obj->gtt_offset & (size - 1))
                return false;
 
        return true;
@@ -283,30 +282,29 @@ i915_gem_object_fence_ok(struct drm_gem_object *obj, int tiling_mode)
  */
 int
 i915_gem_set_tiling(struct drm_device *dev, void *data,
-                  struct drm_file *file_priv)
+                  struct drm_file *file)
 {
        struct drm_i915_gem_set_tiling *args = data;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_gem_object *obj;
-       struct drm_i915_gem_object *obj_priv;
+       struct drm_i915_gem_object *obj;
        int ret;
 
        ret = i915_gem_check_is_wedged(dev);
        if (ret)
                return ret;
 
-       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
        if (obj == NULL)
                return -ENOENT;
-       obj_priv = to_intel_bo(obj);
 
-       if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
-               drm_gem_object_unreference_unlocked(obj);
+       if (!i915_tiling_ok(dev,
+                           args->stride, obj->base.size, args->tiling_mode)) {
+               drm_gem_object_unreference_unlocked(&obj->base);
                return -EINVAL;
        }
 
-       if (obj_priv->pin_count) {
-               drm_gem_object_unreference_unlocked(obj);
+       if (obj->pin_count) {
+               drm_gem_object_unreference_unlocked(&obj->base);
                return -EBUSY;
        }
 
@@ -340,8 +338,8 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
        }
 
        mutex_lock(&dev->struct_mutex);
-       if (args->tiling_mode != obj_priv->tiling_mode ||
-           args->stride != obj_priv->stride) {
+       if (args->tiling_mode != obj->tiling_mode ||
+           args->stride != obj->stride) {
                /* We need to rebind the object if its current allocation
                 * no longer meets the alignment restrictions for its new
                 * tiling mode. Otherwise we can just leave it alone, but
@@ -349,22 +347,22 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
                 */
                if (!i915_gem_object_fence_ok(obj, args->tiling_mode))
                        ret = i915_gem_object_unbind(obj);
-               else if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
+               else if (obj->fence_reg != I915_FENCE_REG_NONE)
                        ret = i915_gem_object_put_fence_reg(obj, true);
                else
                        i915_gem_release_mmap(obj);
 
                if (ret != 0) {
-                       args->tiling_mode = obj_priv->tiling_mode;
-                       args->stride = obj_priv->stride;
+                       args->tiling_mode = obj->tiling_mode;
+                       args->stride = obj->stride;
                        goto err;
                }
 
-               obj_priv->tiling_mode = args->tiling_mode;
-               obj_priv->stride = args->stride;
+               obj->tiling_mode = args->tiling_mode;
+               obj->stride = args->stride;
        }
 err:
-       drm_gem_object_unreference(obj);
+       drm_gem_object_unreference(&obj->base);
        mutex_unlock(&dev->struct_mutex);
 
        return ret;
@@ -375,22 +373,20 @@ err:
  */
 int
 i915_gem_get_tiling(struct drm_device *dev, void *data,
-                  struct drm_file *file_priv)
+                  struct drm_file *file)
 {
        struct drm_i915_gem_get_tiling *args = data;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_gem_object *obj;
-       struct drm_i915_gem_object *obj_priv;
+       struct drm_i915_gem_object *obj;
 
-       obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+       obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
        if (obj == NULL)
                return -ENOENT;
-       obj_priv = to_intel_bo(obj);
 
        mutex_lock(&dev->struct_mutex);
 
-       args->tiling_mode = obj_priv->tiling_mode;
-       switch (obj_priv->tiling_mode) {
+       args->tiling_mode = obj->tiling_mode;
+       switch (obj->tiling_mode) {
        case I915_TILING_X:
                args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
                break;
@@ -410,7 +406,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
        if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
                args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
 
-       drm_gem_object_unreference(obj);
+       drm_gem_object_unreference(&obj->base);
        mutex_unlock(&dev->struct_mutex);
 
        return 0;
@@ -440,46 +436,44 @@ i915_gem_swizzle_page(struct page *page)
 }
 
 void
-i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
+i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
 {
-       struct drm_device *dev = obj->dev;
+       struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-       int page_count = obj->size >> PAGE_SHIFT;
+       int page_count = obj->base.size >> PAGE_SHIFT;
        int i;
 
        if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
                return;
 
-       if (obj_priv->bit_17 == NULL)
+       if (obj->bit_17 == NULL)
                return;
 
        for (i = 0; i < page_count; i++) {
-               char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17;
+               char new_bit_17 = page_to_phys(obj->pages[i]) >> 17;
                if ((new_bit_17 & 0x1) !=
-                   (test_bit(i, obj_priv->bit_17) != 0)) {
-                       i915_gem_swizzle_page(obj_priv->pages[i]);
-                       set_page_dirty(obj_priv->pages[i]);
+                   (test_bit(i, obj->bit_17) != 0)) {
+                       i915_gem_swizzle_page(obj->pages[i]);
+                       set_page_dirty(obj->pages[i]);
                }
        }
 }
 
 void
-i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
+i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
 {
-       struct drm_device *dev = obj->dev;
+       struct drm_device *dev = obj->base.dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-       int page_count = obj->size >> PAGE_SHIFT;
+       int page_count = obj->base.size >> PAGE_SHIFT;
        int i;
 
        if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
                return;
 
-       if (obj_priv->bit_17 == NULL) {
-               obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
+       if (obj->bit_17 == NULL) {
+               obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
                                           sizeof(long), GFP_KERNEL);
-               if (obj_priv->bit_17 == NULL) {
+               if (obj->bit_17 == NULL) {
                        DRM_ERROR("Failed to allocate memory for bit 17 "
                                  "record\n");
                        return;
@@ -487,9 +481,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
        }
 
        for (i = 0; i < page_count; i++) {
-               if (page_to_phys(obj_priv->pages[i]) & (1 << 17))
-                       __set_bit(i, obj_priv->bit_17);
+               if (page_to_phys(obj->pages[i]) & (1 << 17))
+                       __set_bit(i, obj->bit_17);
                else
-                       __clear_bit(i, obj_priv->bit_17);
+                       __clear_bit(i, obj->bit_17);
        }
 }
index a8f55f061f6d0ab51dac5009003a566acbc4ce61..09ac3bbd8165d74ac812c6ecdc34fcfd11921755 100644 (file)
@@ -423,28 +423,23 @@ static void i915_error_work_func(struct work_struct *work)
 #ifdef CONFIG_DEBUG_FS
 static struct drm_i915_error_object *
 i915_error_object_create(struct drm_device *dev,
-                        struct drm_gem_object *src)
+                        struct drm_i915_gem_object *src)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_error_object *dst;
-       struct drm_i915_gem_object *src_priv;
        int page, page_count;
        u32 reloc_offset;
 
-       if (src == NULL)
+       if (src == NULL || src->pages == NULL)
                return NULL;
 
-       src_priv = to_intel_bo(src);
-       if (src_priv->pages == NULL)
-               return NULL;
-
-       page_count = src->size / PAGE_SIZE;
+       page_count = src->base.size / PAGE_SIZE;
 
        dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC);
        if (dst == NULL)
                return NULL;
 
-       reloc_offset = src_priv->gtt_offset;
+       reloc_offset = src->gtt_offset;
        for (page = 0; page < page_count; page++) {
                unsigned long flags;
                void __iomem *s;
@@ -466,7 +461,7 @@ i915_error_object_create(struct drm_device *dev,
                reloc_offset += PAGE_SIZE;
        }
        dst->page_count = page_count;
-       dst->gtt_offset = src_priv->gtt_offset;
+       dst->gtt_offset = src->gtt_offset;
 
        return dst;
 
@@ -598,9 +593,9 @@ static u32 capture_bo_list(struct drm_i915_error_buffer *err,
 static void i915_capture_error_state(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
-       struct drm_i915_gem_object *obj_priv;
+       struct drm_i915_gem_object *obj;
        struct drm_i915_error_state *error;
-       struct drm_gem_object *batchbuffer[2];
+       struct drm_i915_gem_object *batchbuffer[2];
        unsigned long flags;
        u32 bbaddr;
        int count;
@@ -668,34 +663,30 @@ static void i915_capture_error_state(struct drm_device *dev)
        batchbuffer[0] = NULL;
        batchbuffer[1] = NULL;
        count = 0;
-       list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
-               struct drm_gem_object *obj = &obj_priv->base;
-
+       list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
                if (batchbuffer[0] == NULL &&
-                   bbaddr >= obj_priv->gtt_offset &&
-                   bbaddr < obj_priv->gtt_offset + obj->size)
+                   bbaddr >= obj->gtt_offset &&
+                   bbaddr < obj->gtt_offset + obj->base.size)
                        batchbuffer[0] = obj;
 
                if (batchbuffer[1] == NULL &&
-                   error->acthd >= obj_priv->gtt_offset &&
-                   error->acthd < obj_priv->gtt_offset + obj->size)
+                   error->acthd >= obj->gtt_offset &&
+                   error->acthd < obj->gtt_offset + obj->base.size)
                        batchbuffer[1] = obj;
 
                count++;
        }
        /* Scan the other lists for completeness for those bizarre errors. */
        if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
-               list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
-                       struct drm_gem_object *obj = &obj_priv->base;
-
+               list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
                        if (batchbuffer[0] == NULL &&
-                           bbaddr >= obj_priv->gtt_offset &&
-                           bbaddr < obj_priv->gtt_offset + obj->size)
+                           bbaddr >= obj->gtt_offset &&
+                           bbaddr < obj->gtt_offset + obj->base.size)
                                batchbuffer[0] = obj;
 
                        if (batchbuffer[1] == NULL &&
-                           error->acthd >= obj_priv->gtt_offset &&
-                           error->acthd < obj_priv->gtt_offset + obj->size)
+                           error->acthd >= obj->gtt_offset &&
+                           error->acthd < obj->gtt_offset + obj->base.size)
                                batchbuffer[1] = obj;
 
                        if (batchbuffer[0] && batchbuffer[1])
@@ -703,17 +694,15 @@ static void i915_capture_error_state(struct drm_device *dev)
                }
        }
        if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
-               list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
-                       struct drm_gem_object *obj = &obj_priv->base;
-
+               list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
                        if (batchbuffer[0] == NULL &&
-                           bbaddr >= obj_priv->gtt_offset &&
-                           bbaddr < obj_priv->gtt_offset + obj->size)
+                           bbaddr >= obj->gtt_offset &&
+                           bbaddr < obj->gtt_offset + obj->base.size)
                                batchbuffer[0] = obj;
 
                        if (batchbuffer[1] == NULL &&
-                           error->acthd >= obj_priv->gtt_offset &&
-                           error->acthd < obj_priv->gtt_offset + obj->size)
+                           error->acthd >= obj->gtt_offset &&
+                           error->acthd < obj->gtt_offset + obj->base.size)
                                batchbuffer[1] = obj;
 
                        if (batchbuffer[0] && batchbuffer[1])
@@ -732,14 +721,14 @@ static void i915_capture_error_state(struct drm_device *dev)
 
        /* Record the ringbuffer */
        error->ringbuffer = i915_error_object_create(dev,
-                       dev_priv->render_ring.gem_object);
+                                                    dev_priv->render_ring.obj);
 
        /* Record buffers on the active and pinned lists. */
        error->active_bo = NULL;
        error->pinned_bo = NULL;
 
        error->active_bo_count = count;
-       list_for_each_entry(obj_priv, &dev_priv->mm.pinned_list, mm_list)
+       list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
                count++;
        error->pinned_bo_count = count - error->active_bo_count;
 
@@ -948,7 +937,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct drm_i915_gem_object *obj_priv;
+       struct drm_i915_gem_object *obj;
        struct intel_unpin_work *work;
        unsigned long flags;
        bool stall_detected;
@@ -967,13 +956,13 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
        }
 
        /* Potential stall - if we see that the flip has happened, assume a missed interrupt */
-       obj_priv = to_intel_bo(work->pending_flip_obj);
+       obj = work->pending_flip_obj;
        if (INTEL_INFO(dev)->gen >= 4) {
                int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
-               stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset;
+               stall_detected = I915_READ(dspsurf) == obj->gtt_offset;
        } else {
                int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR;
-               stall_detected = I915_READ(dspaddr) == (obj_priv->gtt_offset +
+               stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
                                                        crtc->y * crtc->fb->pitch +
                                                        crtc->x * crtc->fb->bits_per_pixel/8);
        }
index 34ef49fd0377f57b22ae564c342526d497d00b08..1df7262ae0775dceeb12d0466712fea7330b4632 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/tracepoint.h>
 
 #include <drm/drmP.h>
+#include "i915_drv.h"
 
 #undef TRACE_SYSTEM
 #define TRACE_SYSTEM i915
 
 TRACE_EVENT(i915_gem_object_create,
 
-           TP_PROTO(struct drm_gem_object *obj),
+           TP_PROTO(struct drm_i915_gem_object *obj),
 
            TP_ARGS(obj),
 
            TP_STRUCT__entry(
-                            __field(struct drm_gem_object *, obj)
+                            __field(struct drm_i915_gem_object *, obj)
                             __field(u32, size)
                             ),
 
            TP_fast_assign(
                           __entry->obj = obj;
-                          __entry->size = obj->size;
+                          __entry->size = obj->base.size;
                           ),
 
            TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
@@ -35,12 +36,12 @@ TRACE_EVENT(i915_gem_object_create,
 
 TRACE_EVENT(i915_gem_object_bind,
 
-           TP_PROTO(struct drm_gem_object *obj, u32 gtt_offset, bool mappable),
+           TP_PROTO(struct drm_i915_gem_object *obj, u32 gtt_offset, bool mappable),
 
            TP_ARGS(obj, gtt_offset, mappable),
 
            TP_STRUCT__entry(
-                            __field(struct drm_gem_object *, obj)
+                            __field(struct drm_i915_gem_object *, obj)
                             __field(u32, gtt_offset)
                             __field(bool, mappable)
                             ),
@@ -58,20 +59,20 @@ TRACE_EVENT(i915_gem_object_bind,
 
 TRACE_EVENT(i915_gem_object_change_domain,
 
-           TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
+           TP_PROTO(struct drm_i915_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
 
            TP_ARGS(obj, old_read_domains, old_write_domain),
 
            TP_STRUCT__entry(
-                            __field(struct drm_gem_object *, obj)
+                            __field(struct drm_i915_gem_object *, obj)
                             __field(u32, read_domains)
                             __field(u32, write_domain)
                             ),
 
            TP_fast_assign(
                           __entry->obj = obj;
-                          __entry->read_domains = obj->read_domains | (old_read_domains << 16);
-                          __entry->write_domain = obj->write_domain | (old_write_domain << 16);
+                          __entry->read_domains = obj->base.read_domains | (old_read_domains << 16);
+                          __entry->write_domain = obj->base.write_domain | (old_write_domain << 16);
                           ),
 
            TP_printk("obj=%p, read=%04x, write=%04x",
@@ -81,12 +82,12 @@ TRACE_EVENT(i915_gem_object_change_domain,
 
 TRACE_EVENT(i915_gem_object_get_fence,
 
-           TP_PROTO(struct drm_gem_object *obj, int fence, int tiling_mode),
+           TP_PROTO(struct drm_i915_gem_object *obj, int fence, int tiling_mode),
 
            TP_ARGS(obj, fence, tiling_mode),
 
            TP_STRUCT__entry(
-                            __field(struct drm_gem_object *, obj)
+                            __field(struct drm_i915_gem_object *, obj)
                             __field(int, fence)
                             __field(int, tiling_mode)
                             ),
@@ -103,12 +104,12 @@ TRACE_EVENT(i915_gem_object_get_fence,
 
 DECLARE_EVENT_CLASS(i915_gem_object,
 
-           TP_PROTO(struct drm_gem_object *obj),
+           TP_PROTO(struct drm_i915_gem_object *obj),
 
            TP_ARGS(obj),
 
            TP_STRUCT__entry(
-                            __field(struct drm_gem_object *, obj)
+                            __field(struct drm_i915_gem_object *, obj)
                             ),
 
            TP_fast_assign(
@@ -120,21 +121,21 @@ DECLARE_EVENT_CLASS(i915_gem_object,
 
 DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
 
-           TP_PROTO(struct drm_gem_object *obj),
+           TP_PROTO(struct drm_i915_gem_object *obj),
 
            TP_ARGS(obj)
 );
 
 DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind,
 
-           TP_PROTO(struct drm_gem_object *obj),
+           TP_PROTO(struct drm_i915_gem_object *obj),
 
            TP_ARGS(obj)
 );
 
 DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
 
-           TP_PROTO(struct drm_gem_object *obj),
+           TP_PROTO(struct drm_i915_gem_object *obj),
 
            TP_ARGS(obj)
 );
@@ -266,13 +267,13 @@ DEFINE_EVENT(i915_ring, i915_ring_wait_end,
 );
 
 TRACE_EVENT(i915_flip_request,
-           TP_PROTO(int plane, struct drm_gem_object *obj),
+           TP_PROTO(int plane, struct drm_i915_gem_object *obj),
 
            TP_ARGS(plane, obj),
 
            TP_STRUCT__entry(
                    __field(int, plane)
-                   __field(struct drm_gem_object *, obj)
+                   __field(struct drm_i915_gem_object *, obj)
                    ),
 
            TP_fast_assign(
@@ -284,13 +285,13 @@ TRACE_EVENT(i915_flip_request,
 );
 
 TRACE_EVENT(i915_flip_complete,
-           TP_PROTO(int plane, struct drm_gem_object *obj),
+           TP_PROTO(int plane, struct drm_i915_gem_object *obj),
 
            TP_ARGS(plane, obj),
 
            TP_STRUCT__entry(
                    __field(int, plane)
-                   __field(struct drm_gem_object *, obj)
+                   __field(struct drm_i915_gem_object *, obj)
                    ),
 
            TP_fast_assign(
index d4bc443f43fc28a24e4672f16d39a5e76db03e1e..ae7d4f55ce0719391a3b6596bd8cb0eaf91d914d 100644 (file)
@@ -1066,13 +1066,13 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_framebuffer *fb = crtc->fb;
        struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
+       struct drm_i915_gem_object *obj = intel_fb->obj;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int plane, i;
        u32 fbc_ctl, fbc_ctl2;
 
        if (fb->pitch == dev_priv->cfb_pitch &&
-           obj_priv->fence_reg == dev_priv->cfb_fence &&
+           obj->fence_reg == dev_priv->cfb_fence &&
            intel_crtc->plane == dev_priv->cfb_plane &&
            I915_READ(FBC_CONTROL) & FBC_CTL_EN)
                return;
@@ -1086,7 +1086,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
 
        /* FBC_CTL wants 64B units */
        dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
-       dev_priv->cfb_fence = obj_priv->fence_reg;
+       dev_priv->cfb_fence = obj->fence_reg;
        dev_priv->cfb_plane = intel_crtc->plane;
        plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
 
@@ -1096,7 +1096,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
 
        /* Set it up... */
        fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane;
-       if (obj_priv->tiling_mode != I915_TILING_NONE)
+       if (obj->tiling_mode != I915_TILING_NONE)
                fbc_ctl2 |= FBC_CTL_CPU_FENCE;
        I915_WRITE(FBC_CONTROL2, fbc_ctl2);
        I915_WRITE(FBC_FENCE_OFF, crtc->y);
@@ -1107,7 +1107,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
                fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
        fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
        fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
-       if (obj_priv->tiling_mode != I915_TILING_NONE)
+       if (obj->tiling_mode != I915_TILING_NONE)
                fbc_ctl |= dev_priv->cfb_fence;
        I915_WRITE(FBC_CONTROL, fbc_ctl);
 
@@ -1150,7 +1150,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_framebuffer *fb = crtc->fb;
        struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
+       struct drm_i915_gem_object *obj = intel_fb->obj;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
        unsigned long stall_watermark = 200;
@@ -1159,7 +1159,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
        dpfc_ctl = I915_READ(DPFC_CONTROL);
        if (dpfc_ctl & DPFC_CTL_EN) {
                if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
-                   dev_priv->cfb_fence == obj_priv->fence_reg &&
+                   dev_priv->cfb_fence == obj->fence_reg &&
                    dev_priv->cfb_plane == intel_crtc->plane &&
                    dev_priv->cfb_y == crtc->y)
                        return;
@@ -1170,12 +1170,12 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
        }
 
        dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
-       dev_priv->cfb_fence = obj_priv->fence_reg;
+       dev_priv->cfb_fence = obj->fence_reg;
        dev_priv->cfb_plane = intel_crtc->plane;
        dev_priv->cfb_y = crtc->y;
 
        dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
-       if (obj_priv->tiling_mode != I915_TILING_NONE) {
+       if (obj->tiling_mode != I915_TILING_NONE) {
                dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence;
                I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
        } else {
@@ -1221,7 +1221,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_framebuffer *fb = crtc->fb;
        struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
+       struct drm_i915_gem_object *obj = intel_fb->obj;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
        unsigned long stall_watermark = 200;
@@ -1230,9 +1230,9 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
        dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
        if (dpfc_ctl & DPFC_CTL_EN) {
                if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
-                   dev_priv->cfb_fence == obj_priv->fence_reg &&
+                   dev_priv->cfb_fence == obj->fence_reg &&
                    dev_priv->cfb_plane == intel_crtc->plane &&
-                   dev_priv->cfb_offset == obj_priv->gtt_offset &&
+                   dev_priv->cfb_offset == obj->gtt_offset &&
                    dev_priv->cfb_y == crtc->y)
                        return;
 
@@ -1242,14 +1242,14 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
        }
 
        dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
-       dev_priv->cfb_fence = obj_priv->fence_reg;
+       dev_priv->cfb_fence = obj->fence_reg;
        dev_priv->cfb_plane = intel_crtc->plane;
-       dev_priv->cfb_offset = obj_priv->gtt_offset;
+       dev_priv->cfb_offset = obj->gtt_offset;
        dev_priv->cfb_y = crtc->y;
 
        dpfc_ctl &= DPFC_RESERVED;
        dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
-       if (obj_priv->tiling_mode != I915_TILING_NONE) {
+       if (obj->tiling_mode != I915_TILING_NONE) {
                dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence);
                I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
        } else {
@@ -1260,7 +1260,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
                   (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
                   (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
        I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
-       I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID);
+       I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
        /* enable it... */
        I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
 
@@ -1345,7 +1345,7 @@ static void intel_update_fbc(struct drm_device *dev)
        struct intel_crtc *intel_crtc;
        struct drm_framebuffer *fb;
        struct intel_framebuffer *intel_fb;
-       struct drm_i915_gem_object *obj_priv;
+       struct drm_i915_gem_object *obj;
 
        DRM_DEBUG_KMS("\n");
 
@@ -1384,9 +1384,9 @@ static void intel_update_fbc(struct drm_device *dev)
        intel_crtc = to_intel_crtc(crtc);
        fb = crtc->fb;
        intel_fb = to_intel_framebuffer(fb);
-       obj_priv = to_intel_bo(intel_fb->obj);
+       obj = intel_fb->obj;
 
-       if (intel_fb->obj->size > dev_priv->cfb_size) {
+       if (intel_fb->obj->base.size > dev_priv->cfb_size) {
                DRM_DEBUG_KMS("framebuffer too large, disabling "
                              "compression\n");
                dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
@@ -1410,7 +1410,7 @@ static void intel_update_fbc(struct drm_device *dev)
                dev_priv->no_fbc_reason = FBC_BAD_PLANE;
                goto out_disable;
        }
-       if (obj_priv->tiling_mode != I915_TILING_X) {
+       if (obj->tiling_mode != I915_TILING_X) {
                DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n");
                dev_priv->no_fbc_reason = FBC_NOT_TILED;
                goto out_disable;
@@ -1433,14 +1433,13 @@ out_disable:
 
 int
 intel_pin_and_fence_fb_obj(struct drm_device *dev,
-                          struct drm_gem_object *obj,
+                          struct drm_i915_gem_object *obj,
                           bool pipelined)
 {
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        u32 alignment;
        int ret;
 
-       switch (obj_priv->tiling_mode) {
+       switch (obj->tiling_mode) {
        case I915_TILING_NONE:
                if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
                        alignment = 128 * 1024;
@@ -1474,7 +1473,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
         * framebuffer compression.  For simplicity, we always install
         * a fence as the cost is not that onerous.
         */
-       if (obj_priv->tiling_mode != I915_TILING_NONE) {
+       if (obj->tiling_mode != I915_TILING_NONE) {
                ret = i915_gem_object_get_fence_reg(obj, false);
                if (ret)
                        goto err_unpin;
@@ -1496,8 +1495,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_framebuffer *intel_fb;
-       struct drm_i915_gem_object *obj_priv;
-       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj;
        int plane = intel_crtc->plane;
        unsigned long Start, Offset;
        u32 dspcntr;
@@ -1514,7 +1512,6 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 
        intel_fb = to_intel_framebuffer(fb);
        obj = intel_fb->obj;
-       obj_priv = to_intel_bo(obj);
 
        reg = DSPCNTR(plane);
        dspcntr = I915_READ(reg);
@@ -1539,7 +1536,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
                return -EINVAL;
        }
        if (INTEL_INFO(dev)->gen >= 4) {
-               if (obj_priv->tiling_mode != I915_TILING_NONE)
+               if (obj->tiling_mode != I915_TILING_NONE)
                        dspcntr |= DISPPLANE_TILED;
                else
                        dspcntr &= ~DISPPLANE_TILED;
@@ -1551,7 +1548,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
 
        I915_WRITE(reg, dspcntr);
 
-       Start = obj_priv->gtt_offset;
+       Start = obj->gtt_offset;
        Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
 
        DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
@@ -1605,18 +1602,17 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
 
        if (old_fb) {
                struct drm_i915_private *dev_priv = dev->dev_private;
-               struct drm_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
-               struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+               struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
 
                wait_event(dev_priv->pending_flip_queue,
-                          atomic_read(&obj_priv->pending_flip) == 0);
+                          atomic_read(&obj->pending_flip) == 0);
 
                /* Big Hammer, we also need to ensure that any pending
                 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
                 * current scanout is retired before unpinning the old
                 * framebuffer.
                 */
-               ret = i915_gem_object_flush_gpu(obj_priv, false);
+               ret = i915_gem_object_flush_gpu(obj, false);
                if (ret) {
                        i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
                        mutex_unlock(&dev->struct_mutex);
@@ -2010,16 +2006,16 @@ static void intel_clear_scanline_wait(struct drm_device *dev)
 
 static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
 {
-       struct drm_i915_gem_object *obj_priv;
+       struct drm_i915_gem_object *obj;
        struct drm_i915_private *dev_priv;
 
        if (crtc->fb == NULL)
                return;
 
-       obj_priv = to_intel_bo(to_intel_framebuffer(crtc->fb)->obj);
+       obj = to_intel_framebuffer(crtc->fb)->obj;
        dev_priv = crtc->dev->dev_private;
        wait_event(dev_priv->pending_flip_queue,
-                  atomic_read(&obj_priv->pending_flip) == 0);
+                  atomic_read(&obj->pending_flip) == 0);
 }
 
 static void ironlake_crtc_enable(struct drm_crtc *crtc)
@@ -4333,15 +4329,14 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
 }
 
 static int intel_crtc_cursor_set(struct drm_crtc *crtc,
-                                struct drm_file *file_priv,
+                                struct drm_file *file,
                                 uint32_t handle,
                                 uint32_t width, uint32_t height)
 {
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
-       struct drm_gem_object *bo;
-       struct drm_i915_gem_object *obj_priv;
+       struct drm_i915_gem_object *obj;
        uint32_t addr;
        int ret;
 
@@ -4351,7 +4346,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
        if (!handle) {
                DRM_DEBUG_KMS("cursor off\n");
                addr = 0;
-               bo = NULL;
+               obj = NULL;
                mutex_lock(&dev->struct_mutex);
                goto finish;
        }
@@ -4362,13 +4357,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
                return -EINVAL;
        }
 
-       bo = drm_gem_object_lookup(dev, file_priv, handle);
-       if (!bo)
+       obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
+       if (!obj)
                return -ENOENT;
 
-       obj_priv = to_intel_bo(bo);
-
-       if (bo->size < width * height * 4) {
+       if (obj->base.size < width * height * 4) {
                DRM_ERROR("buffer is to small\n");
                ret = -ENOMEM;
                goto fail;
@@ -4377,29 +4370,29 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
        /* we only need to pin inside GTT if cursor is non-phy */
        mutex_lock(&dev->struct_mutex);
        if (!dev_priv->info->cursor_needs_physical) {
-               ret = i915_gem_object_pin(bo, PAGE_SIZE, true);
+               ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
                if (ret) {
                        DRM_ERROR("failed to pin cursor bo\n");
                        goto fail_locked;
                }
 
-               ret = i915_gem_object_set_to_gtt_domain(bo, 0);
+               ret = i915_gem_object_set_to_gtt_domain(obj, 0);
                if (ret) {
                        DRM_ERROR("failed to move cursor bo into the GTT\n");
                        goto fail_unpin;
                }
 
-               addr = obj_priv->gtt_offset;
+               addr = obj->gtt_offset;
        } else {
                int align = IS_I830(dev) ? 16 * 1024 : 256;
-               ret = i915_gem_attach_phys_object(dev, bo,
+               ret = i915_gem_attach_phys_object(dev, obj,
                                                  (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
                                                  align);
                if (ret) {
                        DRM_ERROR("failed to attach phys object\n");
                        goto fail_locked;
                }
-               addr = obj_priv->phys_obj->handle->busaddr;
+               addr = obj->phys_obj->handle->busaddr;
        }
 
        if (IS_GEN2(dev))
@@ -4408,17 +4401,17 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
  finish:
        if (intel_crtc->cursor_bo) {
                if (dev_priv->info->cursor_needs_physical) {
-                       if (intel_crtc->cursor_bo != bo)
+                       if (intel_crtc->cursor_bo != obj)
                                i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
                } else
                        i915_gem_object_unpin(intel_crtc->cursor_bo);
-               drm_gem_object_unreference(intel_crtc->cursor_bo);
+               drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
        }
 
        mutex_unlock(&dev->struct_mutex);
 
        intel_crtc->cursor_addr = addr;
-       intel_crtc->cursor_bo = bo;
+       intel_crtc->cursor_bo = obj;
        intel_crtc->cursor_width = width;
        intel_crtc->cursor_height = height;
 
@@ -4426,11 +4419,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
 
        return 0;
 fail_unpin:
-       i915_gem_object_unpin(bo);
+       i915_gem_object_unpin(obj);
 fail_locked:
        mutex_unlock(&dev->struct_mutex);
 fail:
-       drm_gem_object_unreference_unlocked(bo);
+       drm_gem_object_unreference_unlocked(&obj->base);
        return ret;
 }
 
@@ -4890,7 +4883,7 @@ static void intel_idle_update(struct work_struct *work)
  * buffer), we'll also mark the display as busy, so we know to increase its
  * clock frequency.
  */
-void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
+void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_crtc *crtc = NULL;
@@ -4971,8 +4964,8 @@ static void intel_unpin_work_fn(struct work_struct *__work)
 
        mutex_lock(&work->dev->struct_mutex);
        i915_gem_object_unpin(work->old_fb_obj);
-       drm_gem_object_unreference(work->pending_flip_obj);
-       drm_gem_object_unreference(work->old_fb_obj);
+       drm_gem_object_unreference(&work->pending_flip_obj->base);
+       drm_gem_object_unreference(&work->old_fb_obj->base);
        mutex_unlock(&work->dev->struct_mutex);
        kfree(work);
 }
@@ -4983,7 +4976,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_unpin_work *work;
-       struct drm_i915_gem_object *obj_priv;
+       struct drm_i915_gem_object *obj;
        struct drm_pending_vblank_event *e;
        struct timeval now;
        unsigned long flags;
@@ -5015,10 +5008,10 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
 
        spin_unlock_irqrestore(&dev->event_lock, flags);
 
-       obj_priv = to_intel_bo(work->old_fb_obj);
+       obj = work->old_fb_obj;
        atomic_clear_mask(1 << intel_crtc->plane,
-                         &obj_priv->pending_flip.counter);
-       if (atomic_read(&obj_priv->pending_flip) == 0)
+                         &obj->pending_flip.counter);
+       if (atomic_read(&obj->pending_flip) == 0)
                wake_up(&dev_priv->pending_flip_queue);
        schedule_work(&work->work);
 
@@ -5065,8 +5058,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_framebuffer *intel_fb;
-       struct drm_i915_gem_object *obj_priv;
-       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
        struct intel_unpin_work *work;
        unsigned long flags, offset;
@@ -5105,8 +5097,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
                goto cleanup_work;
 
        /* Reference the objects for the scheduled work. */
-       drm_gem_object_reference(work->old_fb_obj);
-       drm_gem_object_reference(obj);
+       drm_gem_object_reference(&work->old_fb_obj->base);
+       drm_gem_object_reference(&obj->base);
 
        crtc->fb = fb;
 
@@ -5134,7 +5126,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        }
 
        work->pending_flip_obj = obj;
-       obj_priv = to_intel_bo(obj);
 
        work->enable_stall_check = true;
 
@@ -5148,15 +5139,14 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        /* Block clients from rendering to the new back buffer until
         * the flip occurs and the object is no longer visible.
         */
-       atomic_add(1 << intel_crtc->plane,
-                  &to_intel_bo(work->old_fb_obj)->pending_flip);
+       atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
 
        switch (INTEL_INFO(dev)->gen) {
        case 2:
                OUT_RING(MI_DISPLAY_FLIP |
                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
                OUT_RING(fb->pitch);
-               OUT_RING(obj_priv->gtt_offset + offset);
+               OUT_RING(obj->gtt_offset + offset);
                OUT_RING(MI_NOOP);
                break;
 
@@ -5164,7 +5154,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
                OUT_RING(MI_DISPLAY_FLIP_I915 |
                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
                OUT_RING(fb->pitch);
-               OUT_RING(obj_priv->gtt_offset + offset);
+               OUT_RING(obj->gtt_offset + offset);
                OUT_RING(MI_NOOP);
                break;
 
@@ -5177,7 +5167,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
                OUT_RING(MI_DISPLAY_FLIP |
                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
                OUT_RING(fb->pitch);
-               OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
+               OUT_RING(obj->gtt_offset | obj->tiling_mode);
 
                /* XXX Enabling the panel-fitter across page-flip is so far
                 * untested on non-native modes, so ignore it for now.
@@ -5191,8 +5181,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        case 6:
                OUT_RING(MI_DISPLAY_FLIP |
                         MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
-               OUT_RING(fb->pitch | obj_priv->tiling_mode);
-               OUT_RING(obj_priv->gtt_offset);
+               OUT_RING(fb->pitch | obj->tiling_mode);
+               OUT_RING(obj->gtt_offset);
 
                pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
                pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
@@ -5208,8 +5198,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        return 0;
 
 cleanup_objs:
-       drm_gem_object_unreference(work->old_fb_obj);
-       drm_gem_object_unreference(obj);
+       drm_gem_object_unreference(&work->old_fb_obj->base);
+       drm_gem_object_unreference(&obj->base);
 cleanup_work:
        mutex_unlock(&dev->struct_mutex);
 
@@ -5295,7 +5285,7 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
 }
 
 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
-                               struct drm_file *file_priv)
+                               struct drm_file *file)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
@@ -5440,19 +5430,19 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
        struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
 
        drm_framebuffer_cleanup(fb);
-       drm_gem_object_unreference_unlocked(intel_fb->obj);
+       drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
 
        kfree(intel_fb);
 }
 
 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
-                                               struct drm_file *file_priv,
+                                               struct drm_file *file,
                                                unsigned int *handle)
 {
        struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
-       struct drm_gem_object *object = intel_fb->obj;
+       struct drm_i915_gem_object *obj = intel_fb->obj;
 
-       return drm_gem_handle_create(file_priv, object, handle);
+       return drm_gem_handle_create(file, &obj->base, handle);
 }
 
 static const struct drm_framebuffer_funcs intel_fb_funcs = {
@@ -5463,12 +5453,11 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
 int intel_framebuffer_init(struct drm_device *dev,
                           struct intel_framebuffer *intel_fb,
                           struct drm_mode_fb_cmd *mode_cmd,
-                          struct drm_gem_object *obj)
+                          struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
        int ret;
 
-       if (obj_priv->tiling_mode == I915_TILING_Y)
+       if (obj->tiling_mode == I915_TILING_Y)
                return -EINVAL;
 
        if (mode_cmd->pitch & 63)
@@ -5500,11 +5489,11 @@ intel_user_framebuffer_create(struct drm_device *dev,
                              struct drm_file *filp,
                              struct drm_mode_fb_cmd *mode_cmd)
 {
-       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj;
        struct intel_framebuffer *intel_fb;
        int ret;
 
-       obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle);
+       obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle));
        if (!obj)
                return ERR_PTR(-ENOENT);
 
@@ -5512,10 +5501,9 @@ intel_user_framebuffer_create(struct drm_device *dev,
        if (!intel_fb)
                return ERR_PTR(-ENOMEM);
 
-       ret = intel_framebuffer_init(dev, intel_fb,
-                                    mode_cmd, obj);
+       ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
        if (ret) {
-               drm_gem_object_unreference_unlocked(obj);
+               drm_gem_object_unreference_unlocked(&obj->base);
                kfree(intel_fb);
                return ERR_PTR(ret);
        }
@@ -5528,10 +5516,10 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
        .output_poll_changed = intel_fb_output_poll_changed,
 };
 
-static struct drm_gem_object *
+static struct drm_i915_gem_object *
 intel_alloc_context_page(struct drm_device *dev)
 {
-       struct drm_gem_object *ctx;
+       struct drm_i915_gem_object *ctx;
        int ret;
 
        ctx = i915_gem_alloc_object(dev, 4096);
@@ -5559,7 +5547,7 @@ intel_alloc_context_page(struct drm_device *dev)
 err_unpin:
        i915_gem_object_unpin(ctx);
 err_unref:
-       drm_gem_object_unreference(ctx);
+       drm_gem_object_unreference(&ctx->base);
        mutex_unlock(&dev->struct_mutex);
        return NULL;
 }
@@ -5886,20 +5874,17 @@ void intel_init_clock_gating(struct drm_device *dev)
                if (dev_priv->renderctx == NULL)
                        dev_priv->renderctx = intel_alloc_context_page(dev);
                if (dev_priv->renderctx) {
-                       struct drm_i915_gem_object *obj_priv;
-                       obj_priv = to_intel_bo(dev_priv->renderctx);
-                       if (obj_priv) {
-                               if (BEGIN_LP_RING(4) == 0) {
-                                       OUT_RING(MI_SET_CONTEXT);
-                                       OUT_RING(obj_priv->gtt_offset |
-                                                MI_MM_SPACE_GTT |
-                                                MI_SAVE_EXT_STATE_EN |
-                                                MI_RESTORE_EXT_STATE_EN |
-                                                MI_RESTORE_INHIBIT);
-                                       OUT_RING(MI_NOOP);
-                                       OUT_RING(MI_FLUSH);
-                                       ADVANCE_LP_RING();
-                               }
+                       struct drm_i915_gem_object *obj = dev_priv->renderctx;
+                       if (BEGIN_LP_RING(4) == 0) {
+                               OUT_RING(MI_SET_CONTEXT);
+                               OUT_RING(obj->gtt_offset |
+                                        MI_MM_SPACE_GTT |
+                                        MI_SAVE_EXT_STATE_EN |
+                                        MI_RESTORE_EXT_STATE_EN |
+                                        MI_RESTORE_INHIBIT);
+                               OUT_RING(MI_NOOP);
+                               OUT_RING(MI_FLUSH);
+                               ADVANCE_LP_RING();
                        }
                } else
                        DRM_DEBUG_KMS("Failed to allocate render context."
@@ -5907,22 +5892,11 @@ void intel_init_clock_gating(struct drm_device *dev)
        }
 
        if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
-               struct drm_i915_gem_object *obj_priv = NULL;
-
+               if (dev_priv->pwrctx == NULL)
+                       dev_priv->pwrctx = intel_alloc_context_page(dev);
                if (dev_priv->pwrctx) {
-                       obj_priv = to_intel_bo(dev_priv->pwrctx);
-               } else {
-                       struct drm_gem_object *pwrctx;
-
-                       pwrctx = intel_alloc_context_page(dev);
-                       if (pwrctx) {
-                               dev_priv->pwrctx = pwrctx;
-                               obj_priv = to_intel_bo(pwrctx);
-                       }
-               }
-
-               if (obj_priv) {
-                       I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
+                       struct drm_i915_gem_object *obj = dev_priv->pwrctx;
+                       I915_WRITE(PWRCTXA, obj->gtt_offset | PWRCTX_EN);
                        I915_WRITE(MCHBAR_RENDER_STANDBY,
                                   I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
                }
@@ -6197,23 +6171,25 @@ void intel_modeset_cleanup(struct drm_device *dev)
                dev_priv->display.disable_fbc(dev);
 
        if (dev_priv->renderctx) {
-               struct drm_i915_gem_object *obj_priv;
+               struct drm_i915_gem_object *obj = dev_priv->renderctx;
+
+               I915_WRITE(CCID, obj->gtt_offset &~ CCID_EN);
+               POSTING_READ(CCID);
 
-               obj_priv = to_intel_bo(dev_priv->renderctx);
-               I915_WRITE(CCID, obj_priv->gtt_offset &~ CCID_EN);
-               I915_READ(CCID);
-               i915_gem_object_unpin(dev_priv->renderctx);
-               drm_gem_object_unreference(dev_priv->renderctx);
+               i915_gem_object_unpin(obj);
+               drm_gem_object_unreference(&obj->base);
+               dev_priv->renderctx = NULL;
        }
 
        if (dev_priv->pwrctx) {
-               struct drm_i915_gem_object *obj_priv;
+               struct drm_i915_gem_object *obj = dev_priv->pwrctx;
+
+               I915_WRITE(PWRCTXA, obj->gtt_offset &~ PWRCTX_EN);
+               POSTING_READ(PWRCTXA);
 
-               obj_priv = to_intel_bo(dev_priv->pwrctx);
-               I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN);
-               I915_READ(PWRCTXA);
-               i915_gem_object_unpin(dev_priv->pwrctx);
-               drm_gem_object_unreference(dev_priv->pwrctx);
+               i915_gem_object_unpin(obj);
+               drm_gem_object_unreference(&obj->base);
+               dev_priv->pwrctx = NULL;
        }
 
        if (IS_IRONLAKE_M(dev))
index 21551fe745416abb4597341b18d647f2529e85ce..5a4f14e36d6c4d4fb5b8c7ac540a46f94ba2367e 100644 (file)
@@ -127,7 +127,7 @@ intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode)
 
 struct intel_framebuffer {
        struct drm_framebuffer base;
-       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj;
 };
 
 struct intel_fbdev {
@@ -166,7 +166,7 @@ struct intel_crtc {
        struct intel_unpin_work *unpin_work;
        int fdi_lanes;
 
-       struct drm_gem_object *cursor_bo;
+       struct drm_i915_gem_object *cursor_bo;
        uint32_t cursor_addr;
        int16_t cursor_x, cursor_y;
        int16_t cursor_width, cursor_height;
@@ -220,8 +220,8 @@ intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
 struct intel_unpin_work {
        struct work_struct work;
        struct drm_device *dev;
-       struct drm_gem_object *old_fb_obj;
-       struct drm_gem_object *pending_flip_obj;
+       struct drm_i915_gem_object *old_fb_obj;
+       struct drm_i915_gem_object *pending_flip_obj;
        struct drm_pending_vblank_event *event;
        int pending;
        bool enable_stall_check;
@@ -236,7 +236,8 @@ void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
 extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
 extern void intel_dvo_init(struct drm_device *dev);
 extern void intel_tv_init(struct drm_device *dev);
-extern void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj);
+extern void intel_mark_busy(struct drm_device *dev,
+                           struct drm_i915_gem_object *obj);
 extern void intel_lvds_init(struct drm_device *dev);
 extern void intel_dp_init(struct drm_device *dev, int dp_reg);
 void
@@ -299,13 +300,13 @@ extern void ironlake_disable_drps(struct drm_device *dev);
 extern void intel_init_emon(struct drm_device *dev);
 
 extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
-                                     struct drm_gem_object *obj,
+                                     struct drm_i915_gem_object *obj,
                                      bool pipelined);
 
 extern int intel_framebuffer_init(struct drm_device *dev,
                                  struct intel_framebuffer *ifb,
                                  struct drm_mode_fb_cmd *mode_cmd,
-                                 struct drm_gem_object *obj);
+                                 struct drm_i915_gem_object *obj);
 extern int intel_fbdev_init(struct drm_device *dev);
 extern void intel_fbdev_fini(struct drm_device *dev);
 
index af2a1dddc28e2e908529db44a24cea44ff10d387..c2cffeb4fe89799c6c5f83f474bbf5be7992a049 100644 (file)
@@ -65,8 +65,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
        struct fb_info *info;
        struct drm_framebuffer *fb;
        struct drm_mode_fb_cmd mode_cmd;
-       struct drm_gem_object *fbo = NULL;
-       struct drm_i915_gem_object *obj_priv;
+       struct drm_i915_gem_object *obj;
        struct device *device = &dev->pdev->dev;
        int size, ret, mmio_bar = IS_GEN2(dev) ? 1 : 0;
 
@@ -83,18 +82,17 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
 
        size = mode_cmd.pitch * mode_cmd.height;
        size = ALIGN(size, PAGE_SIZE);
-       fbo = i915_gem_alloc_object(dev, size);
-       if (!fbo) {
+       obj = i915_gem_alloc_object(dev, size);
+       if (!obj) {
                DRM_ERROR("failed to allocate framebuffer\n");
                ret = -ENOMEM;
                goto out;
        }
-       obj_priv = to_intel_bo(fbo);
 
        mutex_lock(&dev->struct_mutex);
 
        /* Flush everything out, we'll be doing GTT only from now on */
-       ret = intel_pin_and_fence_fb_obj(dev, fbo, false);
+       ret = intel_pin_and_fence_fb_obj(dev, obj, false);
        if (ret) {
                DRM_ERROR("failed to pin fb: %d\n", ret);
                goto out_unref;
@@ -108,7 +106,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
 
        info->par = ifbdev;
 
-       ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, fbo);
+       ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
        if (ret)
                goto out_unpin;
 
@@ -134,11 +132,10 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
        else
                info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
 
-       info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset;
+       info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
        info->fix.smem_len = size;
 
-       info->screen_base = ioremap_wc(dev->agp->base + obj_priv->gtt_offset,
-                                      size);
+       info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size);
        if (!info->screen_base) {
                ret = -ENOSPC;
                goto out_unpin;
@@ -168,7 +165,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
 
        DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
                      fb->width, fb->height,
-                     obj_priv->gtt_offset, fbo);
+                     obj->gtt_offset, obj);
 
 
        mutex_unlock(&dev->struct_mutex);
@@ -176,9 +173,9 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
        return 0;
 
 out_unpin:
-       i915_gem_object_unpin(fbo);
+       i915_gem_object_unpin(obj);
 out_unref:
-       drm_gem_object_unreference(fbo);
+       drm_gem_object_unreference(&obj->base);
        mutex_unlock(&dev->struct_mutex);
 out:
        return ret;
@@ -225,7 +222,7 @@ static void intel_fbdev_destroy(struct drm_device *dev,
 
        drm_framebuffer_cleanup(&ifb->base);
        if (ifb->obj) {
-               drm_gem_object_unreference_unlocked(ifb->obj);
+               drm_gem_object_unreference_unlocked(&ifb->obj->base);
                ifb->obj = NULL;
        }
 }
index ec8ffaccbbdb2fa1c8c064a8c82b4aa65a8c9293..af715cc03ee078cddaa3e648909b21a910667473 100644 (file)
@@ -376,24 +376,23 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 
 static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
 {
-       struct drm_gem_object *obj = &overlay->old_vid_bo->base;
+       struct drm_i915_gem_object *obj = overlay->old_vid_bo;
 
        i915_gem_object_unpin(obj);
-       drm_gem_object_unreference(obj);
+       drm_gem_object_unreference(&obj->base);
 
        overlay->old_vid_bo = NULL;
 }
 
 static void intel_overlay_off_tail(struct intel_overlay *overlay)
 {
-       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj = overlay->vid_bo;
 
        /* never have the overlay hw on without showing a frame */
        BUG_ON(!overlay->vid_bo);
-       obj = &overlay->vid_bo->base;
 
        i915_gem_object_unpin(obj);
-       drm_gem_object_unreference(obj);
+       drm_gem_object_unreference(&obj->base);
        overlay->vid_bo = NULL;
 
        overlay->crtc->overlay = NULL;
@@ -764,13 +763,12 @@ static u32 overlay_cmd_reg(struct put_image_params *params)
 }
 
 static int intel_overlay_do_put_image(struct intel_overlay *overlay,
-                                     struct drm_gem_object *new_bo,
+                                     struct drm_i915_gem_object *new_bo,
                                      struct put_image_params *params)
 {
        int ret, tmp_width;
        struct overlay_registers *regs;
        bool scale_changed = false;
-       struct drm_i915_gem_object *bo_priv = to_intel_bo(new_bo);
        struct drm_device *dev = overlay->dev;
 
        BUG_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -825,7 +823,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
        regs->SWIDTHSW = calc_swidthsw(overlay->dev,
                                       params->offset_Y, tmp_width);
        regs->SHEIGHT = params->src_h;
-       regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y;
+       regs->OBUF_0Y = new_bo->gtt_offset + params-> offset_Y;
        regs->OSTRIDE = params->stride_Y;
 
        if (params->format & I915_OVERLAY_YUV_PLANAR) {
@@ -839,8 +837,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
                                      params->src_w/uv_hscale);
                regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
                regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
-               regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U;
-               regs->OBUF_0V = bo_priv->gtt_offset + params->offset_V;
+               regs->OBUF_0U = new_bo->gtt_offset + params->offset_U;
+               regs->OBUF_0V = new_bo->gtt_offset + params->offset_V;
                regs->OSTRIDE |= params->stride_UV << 16;
        }
 
@@ -857,7 +855,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
                goto out_unpin;
 
        overlay->old_vid_bo = overlay->vid_bo;
-       overlay->vid_bo = to_intel_bo(new_bo);
+       overlay->vid_bo = new_bo;
 
        return 0;
 
@@ -970,7 +968,7 @@ static int check_overlay_scaling(struct put_image_params *rec)
 
 static int check_overlay_src(struct drm_device *dev,
                             struct drm_intel_overlay_put_image *rec,
-                            struct drm_gem_object *new_bo)
+                            struct drm_i915_gem_object *new_bo)
 {
        int uv_hscale = uv_hsubsampling(rec->flags);
        int uv_vscale = uv_vsubsampling(rec->flags);
@@ -1055,7 +1053,7 @@ static int check_overlay_src(struct drm_device *dev,
                        return -EINVAL;
 
                tmp = rec->stride_Y*rec->src_height;
-               if (rec->offset_Y + tmp > new_bo->size)
+               if (rec->offset_Y + tmp > new_bo->base.size)
                        return -EINVAL;
                break;
 
@@ -1066,12 +1064,12 @@ static int check_overlay_src(struct drm_device *dev,
                        return -EINVAL;
 
                tmp = rec->stride_Y * rec->src_height;
-               if (rec->offset_Y + tmp > new_bo->size)
+               if (rec->offset_Y + tmp > new_bo->base.size)
                        return -EINVAL;
 
                tmp = rec->stride_UV * (rec->src_height / uv_vscale);
-               if (rec->offset_U + tmp > new_bo->size ||
-                   rec->offset_V + tmp > new_bo->size)
+               if (rec->offset_U + tmp > new_bo->base.size ||
+                   rec->offset_V + tmp > new_bo->base.size)
                        return -EINVAL;
                break;
        }
@@ -1114,7 +1112,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
        struct intel_overlay *overlay;
        struct drm_mode_object *drmmode_obj;
        struct intel_crtc *crtc;
-       struct drm_gem_object *new_bo;
+       struct drm_i915_gem_object *new_bo;
        struct put_image_params *params;
        int ret;
 
@@ -1153,8 +1151,8 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
        }
        crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
 
-       new_bo = drm_gem_object_lookup(dev, file_priv,
-                                      put_image_rec->bo_handle);
+       new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv,
+                                                  put_image_rec->bo_handle));
        if (!new_bo) {
                ret = -ENOENT;
                goto out_free;
@@ -1245,7 +1243,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
 out_unlock:
        mutex_unlock(&dev->struct_mutex);
        mutex_unlock(&dev->mode_config.mutex);
-       drm_gem_object_unreference_unlocked(new_bo);
+       drm_gem_object_unreference_unlocked(&new_bo->base);
 out_free:
        kfree(params);
 
@@ -1398,7 +1396,7 @@ void intel_setup_overlay(struct drm_device *dev)
 {
         drm_i915_private_t *dev_priv = dev->dev_private;
        struct intel_overlay *overlay;
-       struct drm_gem_object *reg_bo;
+       struct drm_i915_gem_object *reg_bo;
        struct overlay_registers *regs;
        int ret;
 
@@ -1413,7 +1411,7 @@ void intel_setup_overlay(struct drm_device *dev)
        reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
        if (!reg_bo)
                goto out_free;
-       overlay->reg_bo = to_intel_bo(reg_bo);
+       overlay->reg_bo = reg_bo;
 
        if (OVERLAY_NEEDS_PHYSICAL(dev)) {
                ret = i915_gem_attach_phys_object(dev, reg_bo,
@@ -1423,14 +1421,14 @@ void intel_setup_overlay(struct drm_device *dev)
                         DRM_ERROR("failed to attach phys overlay regs\n");
                         goto out_free_bo;
                 }
-               overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr;
+               overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
        } else {
                ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true);
                if (ret) {
                         DRM_ERROR("failed to pin overlay register bo\n");
                         goto out_free_bo;
                 }
-               overlay->flip_addr = overlay->reg_bo->gtt_offset;
+               overlay->flip_addr = reg_bo->gtt_offset;
 
                ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
                if (ret) {
@@ -1462,7 +1460,7 @@ void intel_setup_overlay(struct drm_device *dev)
 out_unpin_bo:
        i915_gem_object_unpin(reg_bo);
 out_free_bo:
-       drm_gem_object_unreference(reg_bo);
+       drm_gem_object_unreference(&reg_bo->base);
 out_free:
        kfree(overlay);
        return;
index 1db860d7989a5e46fa86087ba6b943f895fcc173..181aad31125d89c32aaa6d3f85ea78a66f8fcf29 100644 (file)
@@ -139,7 +139,7 @@ u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
 static int init_ring_common(struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = ring->dev->dev_private;
-       struct drm_i915_gem_object *obj_priv = to_intel_bo(ring->gem_object);
+       struct drm_i915_gem_object *obj = ring->obj;
        u32 head;
 
        /* Stop the ring if it's running. */
@@ -148,7 +148,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
        ring->write_tail(ring, 0);
 
        /* Initialize the ring. */
-       I915_WRITE_START(ring, obj_priv->gtt_offset);
+       I915_WRITE_START(ring, obj->gtt_offset);
        head = I915_READ_HEAD(ring) & HEAD_ADDR;
 
        /* G45 ring initialization fails to reset head to zero */
@@ -178,7 +178,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)
 
        /* If the head is still not zero, the ring is dead */
        if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
-           I915_READ_START(ring) != obj_priv->gtt_offset ||
+           I915_READ_START(ring) != obj->gtt_offset ||
            (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
                DRM_ERROR("%s initialization failed "
                                "ctl %08x head %08x tail %08x start %08x\n",
@@ -514,17 +514,15 @@ render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
 static void cleanup_status_page(struct intel_ring_buffer *ring)
 {
        drm_i915_private_t *dev_priv = ring->dev->dev_private;
-       struct drm_gem_object *obj;
-       struct drm_i915_gem_object *obj_priv;
+       struct drm_i915_gem_object *obj;
 
        obj = ring->status_page.obj;
        if (obj == NULL)
                return;
-       obj_priv = to_intel_bo(obj);
 
-       kunmap(obj_priv->pages[0]);
+       kunmap(obj->pages[0]);
        i915_gem_object_unpin(obj);
-       drm_gem_object_unreference(obj);
+       drm_gem_object_unreference(&obj->base);
        ring->status_page.obj = NULL;
 
        memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
@@ -534,8 +532,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
 {
        struct drm_device *dev = ring->dev;
        drm_i915_private_t *dev_priv = dev->dev_private;
-       struct drm_gem_object *obj;
-       struct drm_i915_gem_object *obj_priv;
+       struct drm_i915_gem_object *obj;
        int ret;
 
        obj = i915_gem_alloc_object(dev, 4096);
@@ -544,16 +541,15 @@ static int init_status_page(struct intel_ring_buffer *ring)
                ret = -ENOMEM;
                goto err;
        }
-       obj_priv = to_intel_bo(obj);
-       obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
+       obj->agp_type = AGP_USER_CACHED_MEMORY;
 
        ret = i915_gem_object_pin(obj, 4096, true);
        if (ret != 0) {
                goto err_unref;
        }
 
-       ring->status_page.gfx_addr = obj_priv->gtt_offset;
-       ring->status_page.page_addr = kmap(obj_priv->pages[0]);
+       ring->status_page.gfx_addr = obj->gtt_offset;
+       ring->status_page.page_addr = kmap(obj->pages[0]);
        if (ring->status_page.page_addr == NULL) {
                memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
                goto err_unpin;
@@ -570,7 +566,7 @@ static int init_status_page(struct intel_ring_buffer *ring)
 err_unpin:
        i915_gem_object_unpin(obj);
 err_unref:
-       drm_gem_object_unreference(obj);
+       drm_gem_object_unreference(&obj->base);
 err:
        return ret;
 }
@@ -578,8 +574,7 @@ err:
 int intel_init_ring_buffer(struct drm_device *dev,
                           struct intel_ring_buffer *ring)
 {
-       struct drm_i915_gem_object *obj_priv;
-       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj;
        int ret;
 
        ring->dev = dev;
@@ -600,15 +595,14 @@ int intel_init_ring_buffer(struct drm_device *dev,
                goto err_hws;
        }
 
-       ring->gem_object = obj;
+       ring->obj = obj;
 
        ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
        if (ret)
                goto err_unref;
 
-       obj_priv = to_intel_bo(obj);
        ring->map.size = ring->size;
-       ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
+       ring->map.offset = dev->agp->base + obj->gtt_offset;
        ring->map.type = 0;
        ring->map.flags = 0;
        ring->map.mtrr = 0;
@@ -632,8 +626,8 @@ err_unmap:
 err_unpin:
        i915_gem_object_unpin(obj);
 err_unref:
-       drm_gem_object_unreference(obj);
-       ring->gem_object = NULL;
+       drm_gem_object_unreference(&obj->base);
+       ring->obj = NULL;
 err_hws:
        cleanup_status_page(ring);
        return ret;
@@ -644,7 +638,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
        struct drm_i915_private *dev_priv;
        int ret;
 
-       if (ring->gem_object == NULL)
+       if (ring->obj == NULL)
                return;
 
        /* Disable the ring buffer. The ring must be idle at this point */
@@ -654,9 +648,9 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
 
        drm_core_ioremapfree(&ring->map, ring->dev);
 
-       i915_gem_object_unpin(ring->gem_object);
-       drm_gem_object_unreference(ring->gem_object);
-       ring->gem_object = NULL;
+       i915_gem_object_unpin(ring->obj);
+       drm_gem_object_unreference(&ring->obj->base);
+       ring->obj = NULL;
 
        if (ring->cleanup)
                ring->cleanup(ring);
@@ -902,11 +896,11 @@ static int blt_ring_init(struct intel_ring_buffer *ring)
                u32 *ptr;
                int ret;
 
-               obj = to_intel_bo(i915_gem_alloc_object(ring->dev, 4096));
+               obj = i915_gem_alloc_object(ring->dev, 4096);
                if (obj == NULL)
                        return -ENOMEM;
 
-               ret = i915_gem_object_pin(&obj->base, 4096, true);
+               ret = i915_gem_object_pin(obj, 4096, true);
                if (ret) {
                        drm_gem_object_unreference(&obj->base);
                        return ret;
@@ -917,9 +911,9 @@ static int blt_ring_init(struct intel_ring_buffer *ring)
                *ptr++ = MI_NOOP;
                kunmap(obj->pages[0]);
 
-               ret = i915_gem_object_set_to_gtt_domain(&obj->base, false);
+               ret = i915_gem_object_set_to_gtt_domain(obj, false);
                if (ret) {
-                       i915_gem_object_unpin(&obj->base);
+                       i915_gem_object_unpin(obj);
                        drm_gem_object_unreference(&obj->base);
                        return ret;
                }
index 2565d65a625bfbfa43e6b5bd2d302c66df31d0d9..1747e329ee9445206cc94259950146d8a8c0585e 100644 (file)
@@ -4,7 +4,7 @@
 struct  intel_hw_status_page {
        u32     __iomem *page_addr;
        unsigned int    gfx_addr;
-       struct          drm_gem_object *obj;
+       struct          drm_i915_gem_object *obj;
 };
 
 #define I915_RING_READ(reg) i915_safe_read(dev_priv, reg)
@@ -32,7 +32,7 @@ struct  intel_ring_buffer {
        u32             mmio_base;
        void            *virtual_start;
        struct          drm_device *dev;
-       struct          drm_gem_object *gem_object;
+       struct          drm_i915_gem_object *obj;
 
        unsigned int    head;
        unsigned int    tail;