drm/vmwgfx: Use TTM handles instead of SIDs as user-space surface handles.
authorThomas Hellstrom <thellstrom@vmware.com>
Tue, 22 Dec 2009 15:53:41 +0000 (16:53 +0100)
committerDave Airlie <airlied@redhat.com>
Wed, 23 Dec 2009 00:06:24 +0000 (10:06 +1000)
Improve the command verifier to catch all occurences of surface handles,
and translate to SIDs.

This way DMA buffers and 3D surfaces share a common handle space,
which makes it possible for the kms code to differentiate.

Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: Dave Airlie <airlied@redhat.com>
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c

index 43546d09d1b047eb13eab55c1421365b5d0cff2e..e61bd85b6975abe693d2e64cf2de7438c7a6bf50 100644 (file)
@@ -123,6 +123,7 @@ struct vmw_sw_context{
        uint32_t last_cid;
        bool cid_valid;
        uint32_t last_sid;
+       uint32_t sid_translation;
        bool sid_valid;
        struct ttm_object_file *tfile;
        struct list_head validate_nodes;
@@ -317,9 +318,10 @@ extern void vmw_surface_res_free(struct vmw_resource *res);
 extern int vmw_surface_init(struct vmw_private *dev_priv,
                            struct vmw_surface *srf,
                            void (*res_free) (struct vmw_resource *res));
-extern int vmw_user_surface_lookup(struct vmw_private *dev_priv,
-                                  struct ttm_object_file *tfile,
-                                  int sid, struct vmw_surface **out);
+extern int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
+                                         struct ttm_object_file *tfile,
+                                         uint32_t handle,
+                                         struct vmw_surface **out);
 extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
                                     struct drm_file *file_priv);
 extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
@@ -328,7 +330,7 @@ extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
                                       struct drm_file *file_priv);
 extern int vmw_surface_check(struct vmw_private *dev_priv,
                             struct ttm_object_file *tfile,
-                            int id);
+                            uint32_t handle, int *id);
 extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
 extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
                           struct vmw_dma_buffer *vmw_bo,
index 7e73cf51e2983e0fa0b8d8645091481984a77c87..2e92da5674033cd1af7d1e658d8ac5fa9c5d0a0a 100644 (file)
@@ -73,21 +73,32 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
 
 static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
                             struct vmw_sw_context *sw_context,
-                            uint32_t sid)
+                            uint32_t *sid)
 {
-       if (unlikely((!sw_context->sid_valid || sid != sw_context->last_sid) &&
-                    sid != SVGA3D_INVALID_ID)) {
-               int ret = vmw_surface_check(dev_priv, sw_context->tfile, sid);
+       if (*sid == SVGA3D_INVALID_ID)
+               return 0;
+
+       if (unlikely((!sw_context->sid_valid  ||
+                     *sid != sw_context->last_sid))) {
+               int real_id;
+               int ret = vmw_surface_check(dev_priv, sw_context->tfile,
+                                           *sid, &real_id);
 
                if (unlikely(ret != 0)) {
-                       DRM_ERROR("Could ot find or use surface %u\n",
-                                 (unsigned) sid);
+                       DRM_ERROR("Could ot find or use surface 0x%08x "
+                                 "address 0x%08lx\n",
+                                 (unsigned int) *sid,
+                                 (unsigned long) sid);
                        return ret;
                }
 
-               sw_context->last_sid = sid;
+               sw_context->last_sid = *sid;
                sw_context->sid_valid = true;
-       }
+               *sid = real_id;
+               sw_context->sid_translation = real_id;
+       } else
+               *sid = sw_context->sid_translation;
+
        return 0;
 }
 
@@ -107,7 +118,8 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
                return ret;
 
        cmd = container_of(header, struct vmw_sid_cmd, header);
-       return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.target.sid);
+       ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
+       return ret;
 }
 
 static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
@@ -121,10 +133,10 @@ static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
        int ret;
 
        cmd = container_of(header, struct vmw_sid_cmd, header);
-       ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.src.sid);
+       ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
        if (unlikely(ret != 0))
                return ret;
-       return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.dest.sid);
+       return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
 }
 
 static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
@@ -138,10 +150,10 @@ static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
        int ret;
 
        cmd = container_of(header, struct vmw_sid_cmd, header);
-       ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.src.sid);
+       ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
        if (unlikely(ret != 0))
                return ret;
-       return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.dest.sid);
+       return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
 }
 
 static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
@@ -154,7 +166,7 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
        } *cmd;
 
        cmd = container_of(header, struct vmw_sid_cmd, header);
-       return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.srcImage.sid);
+       return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
 }
 
 static int vmw_cmd_present_check(struct vmw_private *dev_priv,
@@ -167,7 +179,7 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
        } *cmd;
 
        cmd = container_of(header, struct vmw_sid_cmd, header);
-       return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.sid);
+       return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
 }
 
 static int vmw_cmd_dma(struct vmw_private *dev_priv,
@@ -187,12 +199,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
        uint32_t cur_validate_node;
        struct ttm_validate_buffer *val_buf;
 
-
        cmd = container_of(header, struct vmw_dma_cmd, header);
-       ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->dma.host.sid);
-       if (unlikely(ret != 0))
-               return ret;
-
        handle = cmd->dma.guest.ptr.gmrId;
        ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
        if (unlikely(ret != 0)) {
@@ -228,14 +235,23 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
                ++sw_context->cur_val_buf;
        }
 
-       ret = vmw_user_surface_lookup(dev_priv, sw_context->tfile,
-                                     cmd->dma.host.sid, &srf);
+       ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
+                                            cmd->dma.host.sid, &srf);
        if (ret) {
                DRM_ERROR("could not find surface\n");
                goto out_no_reloc;
        }
 
+       /**
+        * Patch command stream with device SID.
+        */
+
+       cmd->dma.host.sid = srf->res.id;
        vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
+       /**
+        * FIXME: May deadlock here when called from the
+        * command parsing code.
+        */
        vmw_surface_unreference(&srf);
 
 out_no_reloc:
@@ -243,6 +259,90 @@ out_no_reloc:
        return ret;
 }
 
+static int vmw_cmd_draw(struct vmw_private *dev_priv,
+                       struct vmw_sw_context *sw_context,
+                       SVGA3dCmdHeader *header)
+{
+       struct vmw_draw_cmd {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdDrawPrimitives body;
+       } *cmd;
+       SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
+               (unsigned long)header + sizeof(*cmd));
+       SVGA3dPrimitiveRange *range;
+       uint32_t i;
+       uint32_t maxnum;
+       int ret;
+
+       ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+       if (unlikely(ret != 0))
+               return ret;
+
+       cmd = container_of(header, struct vmw_draw_cmd, header);
+       maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
+
+       if (unlikely(cmd->body.numVertexDecls > maxnum)) {
+               DRM_ERROR("Illegal number of vertex declarations.\n");
+               return -EINVAL;
+       }
+
+       for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
+               ret = vmw_cmd_sid_check(dev_priv, sw_context,
+                                       &decl->array.surfaceId);
+               if (unlikely(ret != 0))
+                       return ret;
+       }
+
+       maxnum = (header->size - sizeof(cmd->body) -
+                 cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
+       if (unlikely(cmd->body.numRanges > maxnum)) {
+               DRM_ERROR("Illegal number of index ranges.\n");
+               return -EINVAL;
+       }
+
+       range = (SVGA3dPrimitiveRange *) decl;
+       for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
+               ret = vmw_cmd_sid_check(dev_priv, sw_context,
+                                       &range->indexArray.surfaceId);
+               if (unlikely(ret != 0))
+                       return ret;
+       }
+       return 0;
+}
+
+
+static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
+                            struct vmw_sw_context *sw_context,
+                            SVGA3dCmdHeader *header)
+{
+       struct vmw_tex_state_cmd {
+               SVGA3dCmdHeader header;
+               SVGA3dCmdSetTextureState state;
+       };
+
+       SVGA3dTextureState *last_state = (SVGA3dTextureState *)
+         ((unsigned long) header + header->size + sizeof(header));
+       SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
+               ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
+       int ret;
+
+       ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
+       if (unlikely(ret != 0))
+               return ret;
+
+       for (; cur_state < last_state; ++cur_state) {
+               if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
+                       continue;
+
+               ret = vmw_cmd_sid_check(dev_priv, sw_context,
+                                       &cur_state->value);
+               if (unlikely(ret != 0))
+                       return ret;
+       }
+
+       return 0;
+}
+
 
 typedef int (*vmw_cmd_func) (struct vmw_private *,
                             struct vmw_sw_context *,
@@ -264,7 +364,7 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
        VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
        VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
                    &vmw_cmd_set_render_target_check),
-       VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_cid_check),
+       VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
        VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
        VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
        VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
@@ -276,7 +376,7 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
        VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
        VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
        VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
-       VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_cid_check),
+       VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
        VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
        VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
        VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check),
@@ -291,6 +391,7 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
                         void *buf, uint32_t *size)
 {
        uint32_t cmd_id;
+       uint32_t size_remaining = *size;
        SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
        int ret;
 
@@ -304,6 +405,9 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
        *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
 
        cmd_id -= SVGA_3D_CMD_BASE;
+       if (unlikely(*size > size_remaining))
+               goto out_err;
+
        if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
                goto out_err;
 
@@ -326,6 +430,7 @@ static int vmw_cmd_check_all(struct vmw_private *dev_priv,
        int ret;
 
        while (cur_size > 0) {
+               size = cur_size;
                ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
                if (unlikely(ret != 0))
                        return ret;
index e9403be446fedb1c97a4e06b644300991ad69ed8..b1af76e371c38ac4179afcae85d0a61810cef348 100644 (file)
@@ -106,8 +106,8 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
        int ret;
 
        if (handle) {
-               ret = vmw_user_surface_lookup(dev_priv, tfile,
-                                             handle, &surface);
+               ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
+                                                    handle, &surface);
                if (!ret) {
                        if (!surface->snooper.image) {
                                DRM_ERROR("surface not suitable for cursor\n");
@@ -704,8 +704,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
        struct vmw_dma_buffer *bo = NULL;
        int ret;
 
-       ret = vmw_user_surface_lookup(dev_priv, tfile,
-                                     mode_cmd->handle, &surface);
+       ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
+                                            mode_cmd->handle, &surface);
        if (ret)
                goto try_dmabuf;
 
index a1ceed0c8e070588a4d59e1000ef52cb07ae23bc..c012d5927f650263bea8b63fb004551a61214ea9 100644 (file)
@@ -488,28 +488,44 @@ static void vmw_user_surface_free(struct vmw_resource *res)
        kfree(user_srf);
 }
 
-int vmw_user_surface_lookup(struct vmw_private *dev_priv,
-                           struct ttm_object_file *tfile,
-                           int sid, struct vmw_surface **out)
+int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
+                                  struct ttm_object_file *tfile,
+                                  uint32_t handle, struct vmw_surface **out)
 {
        struct vmw_resource *res;
        struct vmw_surface *srf;
        struct vmw_user_surface *user_srf;
+       struct ttm_base_object *base;
+       int ret = -EINVAL;
 
-       res = vmw_resource_lookup(dev_priv, &dev_priv->surface_idr, sid);
-       if (unlikely(res == NULL))
+       base = ttm_base_object_lookup(tfile, handle);
+       if (unlikely(base == NULL))
                return -EINVAL;
 
-       if (res->res_free != &vmw_user_surface_free)
-               return -EINVAL;
+       if (unlikely(base->object_type != VMW_RES_SURFACE))
+               goto out_bad_resource;
 
-       srf = container_of(res, struct vmw_surface, res);
-       user_srf = container_of(srf, struct vmw_user_surface, srf);
-       if (user_srf->base.tfile != tfile && !user_srf->base.shareable)
-               return -EPERM;
+       user_srf = container_of(base, struct vmw_user_surface, base);
+       srf = &user_srf->srf;
+       res = &srf->res;
+
+       read_lock(&dev_priv->resource_lock);
+
+       if (!res->avail || res->res_free != &vmw_user_surface_free) {
+               read_unlock(&dev_priv->resource_lock);
+               goto out_bad_resource;
+       }
+
+       kref_get(&res->kref);
+       read_unlock(&dev_priv->resource_lock);
 
        *out = srf;
-       return 0;
+       ret = 0;
+
+out_bad_resource:
+       ttm_base_object_unref(&base);
+
+       return ret;
 }
 
 static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
@@ -526,35 +542,10 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
 int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
                              struct drm_file *file_priv)
 {
-       struct vmw_private *dev_priv = vmw_priv(dev);
-       struct vmw_resource *res;
-       struct vmw_surface *srf;
-       struct vmw_user_surface *user_srf;
        struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-       int ret = 0;
-
-       res = vmw_resource_lookup(dev_priv, &dev_priv->surface_idr, arg->sid);
-       if (unlikely(res == NULL))
-               return -EINVAL;
-
-       if (res->res_free != &vmw_user_surface_free) {
-               ret = -EINVAL;
-               goto out;
-       }
 
-       srf = container_of(res, struct vmw_surface, res);
-       user_srf = container_of(srf, struct vmw_user_surface, srf);
-       if (user_srf->base.tfile != tfile && !user_srf->base.shareable) {
-               ret = -EPERM;
-               goto out;
-       }
-
-       ttm_ref_object_base_unref(tfile, user_srf->base.hash.key,
-                                 TTM_REF_USAGE);
-out:
-       vmw_resource_unreference(&res);
-       return ret;
+       return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
 }
 
 int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
@@ -649,7 +640,10 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
        }
        srf->snooper.crtc = NULL;
 
-       rep->sid = res->id;
+       rep->sid = user_srf->base.hash.key;
+       if (rep->sid == SVGA3D_INVALID_ID)
+               DRM_ERROR("Created bad Surface ID.\n");
+
        vmw_resource_unreference(&res);
        return 0;
 out_err1:
@@ -662,39 +656,33 @@ out_err0:
 int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
                                struct drm_file *file_priv)
 {
-       struct vmw_private *dev_priv = vmw_priv(dev);
        union drm_vmw_surface_reference_arg *arg =
            (union drm_vmw_surface_reference_arg *)data;
        struct drm_vmw_surface_arg *req = &arg->req;
        struct drm_vmw_surface_create_req *rep = &arg->rep;
        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-       struct vmw_resource *res;
        struct vmw_surface *srf;
        struct vmw_user_surface *user_srf;
        struct drm_vmw_size __user *user_sizes;
-       int ret;
+       struct ttm_base_object *base;
+       int ret = -EINVAL;
 
-       res = vmw_resource_lookup(dev_priv, &dev_priv->surface_idr, req->sid);
-       if (unlikely(res == NULL))
+       base = ttm_base_object_lookup(tfile, req->sid);
+       if (unlikely(base == NULL)) {
+               DRM_ERROR("Could not find surface to reference.\n");
                return -EINVAL;
-
-       if (res->res_free != &vmw_user_surface_free) {
-               ret = -EINVAL;
-               goto out;
        }
 
-       srf = container_of(res, struct vmw_surface, res);
-       user_srf = container_of(srf, struct vmw_user_surface, srf);
-       if (user_srf->base.tfile != tfile && !user_srf->base.shareable) {
-               DRM_ERROR("Tried to reference none shareable surface\n");
-               ret = -EPERM;
-               goto out;
-       }
+       if (unlikely(base->object_type != VMW_RES_SURFACE))
+               goto out_bad_resource;
+
+       user_srf = container_of(base, struct vmw_user_surface, base);
+       srf = &user_srf->srf;
 
        ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
        if (unlikely(ret != 0)) {
                DRM_ERROR("Could not add a reference to a surface.\n");
-               goto out;
+               goto out_no_reference;
        }
 
        rep->flags = srf->flags;
@@ -706,40 +694,43 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
        if (user_sizes)
                ret = copy_to_user(user_sizes, srf->sizes,
                                   srf->num_sizes * sizeof(*srf->sizes));
-       if (unlikely(ret != 0)) {
+       if (unlikely(ret != 0))
                DRM_ERROR("copy_to_user failed %p %u\n",
                          user_sizes, srf->num_sizes);
-               /**
-                * FIXME: Unreference surface here?
-                */
-               goto out;
-       }
-out:
-       vmw_resource_unreference(&res);
+out_bad_resource:
+out_no_reference:
+       ttm_base_object_unref(&base);
+
        return ret;
 }
 
 int vmw_surface_check(struct vmw_private *dev_priv,
                      struct ttm_object_file *tfile,
-                     int id)
+                     uint32_t handle, int *id)
 {
-       struct vmw_resource *res;
-       int ret = 0;
+       struct ttm_base_object *base;
+       struct vmw_user_surface *user_srf;
 
-       read_lock(&dev_priv->resource_lock);
-       res = idr_find(&dev_priv->surface_idr, id);
-       if (res && res->avail) {
-               struct vmw_surface *srf =
-                       container_of(res, struct vmw_surface, res);
-               struct vmw_user_surface *usrf =
-                       container_of(srf, struct vmw_user_surface, srf);
+       int ret = -EPERM;
 
-               if (usrf->base.tfile != tfile && !usrf->base.shareable)
-                       ret = -EPERM;
-       } else
-               ret = -EINVAL;
-       read_unlock(&dev_priv->resource_lock);
+       base = ttm_base_object_lookup(tfile, handle);
+       if (unlikely(base == NULL))
+               return -EINVAL;
+
+       if (unlikely(base->object_type != VMW_RES_SURFACE))
+               goto out_bad_surface;
 
+       user_srf = container_of(base, struct vmw_user_surface, base);
+       *id = user_srf->srf.res.id;
+       ret = 0;
+
+out_bad_surface:
+       /**
+        * FIXME: May deadlock here when called from the
+        * command parsing code.
+        */
+
+       ttm_base_object_unref(&base);
        return ret;
 }