}
if (client->vm)
- atomic_inc(&client->vm->engref[nv_engidx(engobj)]);
+ atomic_inc(&client->vm->engref[nv_engidx(engine)]);
list_add(&nv_engctx(engctx)->head, &engine->contexts);
nv_engctx(engctx)->addr = ~0ULL;
spin_unlock_irqrestore(&engine->lock, save);
void
nouveau_engctx_destroy(struct nouveau_engctx *engctx)
{
- struct nouveau_object *engobj = nv_object(engctx)->engine;
- struct nouveau_engine *engine = nv_engine(engobj);
+ struct nouveau_engine *engine = engctx->gpuobj.object.engine;
struct nouveau_client *client = nouveau_client(engctx);
unsigned long save;
spin_unlock_irqrestore(&engine->lock, save);
if (client->vm)
- atomic_dec(&client->vm->engref[nv_engidx(engobj)]);
+ atomic_dec(&client->vm->engref[nv_engidx(engine)]);
if (engctx->gpuobj.size)
nouveau_gpuobj_destroy(&engctx->gpuobj);
if (parent) {
struct nouveau_device *device = nv_device(parent);
- int engidx = nv_engidx(nv_object(engine));
+ int engidx = nv_engidx(engine);
if (device->disable_mask & (1ULL << engidx)) {
if (!nouveau_boolopt(device->cfgopt, iname, false)) {
};
if (!nv_iclass(engine, NV_SUBDEV_CLASS))
- engine = engine->engine;
+ engine = &engine->engine->subdev.object;
BUG_ON(engine == NULL);
return nouveau_object_ctor(parent, engine, &_nouveau_gpuobj_oclass,
struct nouveau_gpuobj *gpuobj;
int ret;
- ret = nouveau_object_create(parent, parent->engine,
+ ret = nouveau_object_create(parent, &parent->engine->subdev.object,
&nouveau_gpudup_oclass, 0, &gpuobj);
*pgpuobj = gpuobj;
if (ret)
return -ENOMEM;
nouveau_object_ref(parent, &object->parent);
- nouveau_object_ref(engine, &object->engine);
+ nouveau_object_ref(engine, (struct nouveau_object **)&object->engine);
object->oclass = oclass;
object->oclass->handle |= pclass;
atomic_set(&object->refcount, 1);
list_del(&object->list);
spin_unlock(&_objlist_lock);
#endif
- nouveau_object_ref(NULL, &object->engine);
+ nouveau_object_ref(NULL, (struct nouveau_object **)&object->engine);
nouveau_object_ref(NULL, &object->parent);
kfree(object);
}
if (object->engine) {
mutex_lock(&nv_subdev(object->engine)->mutex);
- ret = nouveau_object_inc(object->engine);
+ ret = nouveau_object_inc(&object->engine->subdev.object);
mutex_unlock(&nv_subdev(object->engine)->mutex);
if (ret) {
nv_error(object, "engine failed, %d\n", ret);
fail_self:
if (object->engine) {
mutex_lock(&nv_subdev(object->engine)->mutex);
- nouveau_object_dec(object->engine, false);
+ nouveau_object_dec(&object->engine->subdev.object, false);
mutex_unlock(&nv_subdev(object->engine)->mutex);
}
fail_engine:
if (object->engine) {
mutex_lock(&nv_subdev(object->engine)->mutex);
- nouveau_object_dec(object->engine, false);
+ nouveau_object_dec(&object->engine->subdev.object, false);
mutex_unlock(&nv_subdev(object->engine)->mutex);
}
if (object->engine) {
mutex_lock(&nv_subdev(object->engine)->mutex);
- ret = nouveau_object_dec(object->engine, true);
+ ret = nouveau_object_dec(&object->engine->subdev.object, true);
mutex_unlock(&nv_subdev(object->engine)->mutex);
if (ret) {
nv_warn(object, "engine failed suspend, %d\n", ret);
fail_parent:
if (object->engine) {
mutex_lock(&nv_subdev(object->engine)->mutex);
- rret = nouveau_object_inc(object->engine);
+ rret = nouveau_object_inc(&object->engine->subdev.object);
mutex_unlock(&nv_subdev(object->engine)->mutex);
if (rret)
nv_fatal(object, "engine failed to reinit, %d\n", rret);
sclass = nv_parent(parent)->sclass;
while (sclass) {
if ((sclass->oclass->handle & 0xffff) == handle) {
- *pengine = parent->engine;
+ *pengine = &parent->engine->subdev.object;
*poclass = sclass->oclass;
return 0;
}
while (subdev && !nv_iclass(subdev, NV_SUBDEV_CLASS))
subdev = subdev->parent;
} else {
- subdev = object->engine;
+ subdev = &object->engine->subdev.object;
}
device = subdev;
int ret;
ret = nouveau_gpuobj_create(parent, parent->engine ?
- parent->engine : parent, /* <nv50 ramht */
+ &parent->engine->subdev.object : parent, /* <nv50 ramht */
&nouveau_ramht_oclass, 0, pargpu, size,
align, NVOBJ_FLAG_ZERO_ALLOC, &ramht);
*pramht = ramht;
struct nouveau_object *object = nv_object(obj);
while (object && !nv_iclass(object, NV_SUBDEV_CLASS))
object = object->parent;
- if (object == NULL || nv_subidx(object) != idx)
+ if (object == NULL || nv_subidx(nv_subdev(object)) != idx)
object = nv_device(obj)->subdev[idx];
return object ? nv_subdev(object) : NULL;
}
if (ret)
return ret;
- nv_wo32(priv, 0x084, nv_engidx(object) - NVDEV_ENGINE_COPY0);
+ nv_wo32(priv, 0x084, nv_engidx(&priv->base.base) - NVDEV_ENGINE_COPY0);
return 0;
}
static void
nve0_copy_intr(struct nouveau_subdev *subdev)
{
- const int ce = nv_subidx(nv_object(subdev)) - NVDEV_ENGINE_COPY0;
+ const int ce = nv_subidx(subdev) - NVDEV_ENGINE_COPY0;
struct nve0_copy_priv *priv = (void *)subdev;
u32 stat = nv_rd32(priv, 0x104908 + (ce * 0x1000));
while (device && device->parent)
device = device->parent;
} else {
- device = nv_object(obj)->engine;
+ device = &nv_object(obj)->engine->subdev.object;
if (device && device->parent)
device = device->parent;
}
nvc0_fifo_recover(struct nvc0_fifo_priv *priv, struct nouveau_engine *engine,
struct nvc0_fifo_chan *chan)
{
- struct nouveau_object *engobj = nv_object(engine);
u32 chid = chan->base.chid;
unsigned long flags;
chan->state = KILLED;
spin_lock_irqsave(&priv->base.lock, flags);
- priv->mask |= 1ULL << nv_engidx(engobj);
+ priv->mask |= 1ULL << nv_engidx(engine);
spin_unlock_irqrestore(&priv->base.lock, flags);
schedule_work(&priv->fault);
}
nve0_fifo_recover(struct nve0_fifo_priv *priv, struct nouveau_engine *engine,
struct nve0_fifo_chan *chan)
{
- struct nouveau_object *engobj = nv_object(engine);
u32 chid = chan->base.chid;
unsigned long flags;
chan->state = KILLED;
spin_lock_irqsave(&priv->base.lock, flags);
- priv->mask |= 1ULL << nv_engidx(engobj);
+ priv->mask |= 1ULL << nv_engidx(engine);
spin_unlock_irqrestore(&priv->base.lock, flags);
schedule_work(&priv->fault);
}
}
static inline int
-nv_engidx(struct nouveau_object *object)
+nv_engidx(struct nouveau_engine *engine)
{
- return nv_subidx(object);
+ return nv_subidx(&engine->subdev);
}
struct nouveau_engine *nouveau_engine(void *obj, int idx);
struct nouveau_object {
struct nouveau_oclass *oclass;
struct nouveau_object *parent;
- struct nouveau_object *engine;
+ struct nouveau_engine *engine;
atomic_t refcount;
atomic_t usecount;
#if CONFIG_NOUVEAU_DEBUG >= NV_DBG_PARANOIA
}
static inline int
-nv_subidx(struct nouveau_object *object)
+nv_subidx(struct nouveau_subdev *subdev)
{
- return nv_hclass(nv_subdev(object)) & 0xff;
+ return nv_hclass(subdev) & 0xff;
}
struct nouveau_subdev *nouveau_subdev(void *obj, int idx);
struct nouveau_mem *mem, struct nouveau_object **pobject)
{
struct nouveau_object *gpuobj;
- int ret = nouveau_object_ctor(parent, parent->engine,
+ int ret = nouveau_object_ctor(parent, &parent->engine->subdev.object,
&nouveau_barobj_oclass,
mem, 0, &gpuobj);
if (ret == 0)
{
struct nouveau_instmem_impl *impl = (void *)imem->base.object.oclass;
struct nouveau_instobj_args args = { .size = size, .align = align };
- return nouveau_object_ctor(parent, parent->engine, impl->instobj, &args,
- sizeof(args), pobject);
+ return nouveau_object_ctor(parent, &parent->engine->subdev.object,
+ impl->instobj, &args, sizeof(args), pobject);
}
int