kref_get(&uobject->ref);
}
-static void uverbs_uobject_put_ref(struct kref *ref)
+static void uverbs_uobject_free(struct kref *ref)
{
struct ib_uobject *uobj =
container_of(ref, struct ib_uobject, ref);
void uverbs_uobject_put(struct ib_uobject *uobject)
{
- kref_put(&uobject->ref, uverbs_uobject_put_ref);
+ kref_put(&uobject->ref, uverbs_uobject_free);
}
-static int uverbs_try_lock_object(struct ib_uobject *uobj, bool write)
+static int uverbs_try_lock_object(struct ib_uobject *uobj, bool exclusive)
{
/*
- * When a read is required, we use a positive counter. Each read
- * request checks that the value != -1 and increment it. Write
- * requires an exclusive access, thus we check that the counter is
- * zero (nobody claimed this object) and we set it to -1.
- * Releasing a read lock is done by simply decreasing the counter.
- * As for writes, since only a single write is permitted, setting
- * it to zero is enough for releasing it.
+ * When a shared access is required, we use a positive counter. Each
+ * shared access request checks that the value != -1 and increment it.
+ * Exclusive access is required for operations like write or destroy.
+ * In exclusive access mode, we check that the counter is zero (nobody
+ * claimed this object) and we set it to -1. Releasing a shared access
+ * lock is done simply by decreasing the counter. As for exclusive
+ * access locks, since only a single one of them is is allowed
+ * concurrently, setting the counter to zero is enough for releasing
+ * this lock.
*/
- if (!write)
+ if (!exclusive)
return __atomic_add_unless(&uobj->usecnt, 1, -1) == -1 ?
-EBUSY : 0;
/* Returns the ib_uobject or an error. The caller should check for IS_ERR. */
static struct ib_uobject *lookup_get_idr_uobject(const struct uverbs_obj_type *type,
struct ib_ucontext *ucontext,
- int id, bool write)
+ int id, bool exclusive)
{
struct ib_uobject *uobj;
static struct ib_uobject *lookup_get_fd_uobject(const struct uverbs_obj_type *type,
struct ib_ucontext *ucontext,
- int id, bool write)
+ int id, bool exclusive)
{
struct file *f;
struct ib_uobject *uobject;
const struct uverbs_obj_fd_type *fd_type =
container_of(type, struct uverbs_obj_fd_type, type);
- if (write)
+ if (exclusive)
return ERR_PTR(-EOPNOTSUPP);
f = fget(id);
struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_obj_type *type,
struct ib_ucontext *ucontext,
- int id, bool write)
+ int id, bool exclusive)
{
struct ib_uobject *uobj;
int ret;
- uobj = type->type_class->lookup_get(type, ucontext, id, write);
+ uobj = type->type_class->lookup_get(type, ucontext, id, exclusive);
if (IS_ERR(uobj))
return uobj;
goto free;
}
- ret = uverbs_try_lock_object(uobj, write);
+ ret = uverbs_try_lock_object(uobj, exclusive);
if (ret) {
WARN(ucontext->cleanup_reason,
"ib_uverbs: Trying to lookup_get while cleanup context\n");
return uobj;
free:
- uobj->type->type_class->lookup_put(uobj, write);
+ uobj->type->type_class->lookup_put(uobj, exclusive);
uverbs_uobject_put(uobj);
return ERR_PTR(ret);
}
return ret;
}
-static void lockdep_check(struct ib_uobject *uobj, bool write)
+static void lockdep_check(struct ib_uobject *uobj, bool exclusive)
{
#ifdef CONFIG_LOCKDEP
- if (write)
+ if (exclusive)
WARN_ON(atomic_read(&uobj->usecnt) > 0);
else
WARN_ON(atomic_read(&uobj->usecnt) == -1);
uobj->type->type_class->alloc_abort(uobj);
}
-static void lookup_put_idr_uobject(struct ib_uobject *uobj, bool write)
+static void lookup_put_idr_uobject(struct ib_uobject *uobj, bool exclusive)
{
}
-static void lookup_put_fd_uobject(struct ib_uobject *uobj, bool write)
+static void lookup_put_fd_uobject(struct ib_uobject *uobj, bool exclusive)
{
struct file *filp = uobj->object;
- WARN_ON(write);
+ WARN_ON(exclusive);
/* This indirectly calls uverbs_close_fd and free the object */
fput(filp);
}
-void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool write)
+void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool exclusive)
{
- lockdep_check(uobj, write);
- uobj->type->type_class->lookup_put(uobj, write);
+ lockdep_check(uobj, exclusive);
+ uobj->type->type_class->lookup_put(uobj, exclusive);
/*
* In order to unlock an object, either decrease its usecnt for
- * read access or zero it in case of write access. See
+ * read access or zero it in case of exclusive access. See
* uverbs_try_lock_object for locking schema information.
*/
- if (!write)
+ if (!exclusive)
atomic_dec(&uobj->usecnt);
else
atomic_set(&uobj->usecnt, 0);
* When the other thread continue - without the RCU, it would
* access freed memory. However, the rcu_read_lock delays the free
* until the rcu_read_lock of the READ operation quits. Since the
- * write lock of the object is still taken by the DESTROY flow, the
+ * exclusive lock of the object is still taken by the DESTROY flow, the
* READ operation will get -EBUSY and it'll just bail out.
*/
.needs_kfree_rcu = true,
* destroyed.
* [lookup]: Starts with lookup_get which fetches and locks the
* object. After the handler finished using the object, it
- * needs to call lookup_put to unlock it. The write flag
- * indicates if the object is locked for exclusive access.
- * [remove]: Starts with lookup_get with write flag set. This locks
- * the object for exclusive access. If the handler code
- * completed successfully, remove_commit is called and
- * the ib_uobject is removed from the context's uobjects
- * repository and put. The object itself is destroyed as
- * well. Once remove succeeds new krefs to the object
- * cannot be acquired by other threads or userspace and
- * the hardware driver is removed from the object.
- * Other krefs on the object may still exist.
+ * needs to call lookup_put to unlock it. The exclusive
+ * flag indicates if the object is locked for exclusive
+ * access.
+ * [remove]: Starts with lookup_get with exclusive flag set. This
+ * locks the object for exclusive access. If the handler
+ * code completed successfully, remove_commit is called
+ * and the ib_uobject is removed from the context's
+ * uobjects repository and put. The object itself is
+ * destroyed as well. Once remove succeeds new krefs to
+ * the object cannot be acquired by other threads or
+ * userspace and the hardware driver is removed from the
+ * object. Other krefs on the object may still exist.
* If the handler code failed, lookup_put should be
* called. This callback is used when the context
* is destroyed as well (process termination,
struct ib_uobject *(*lookup_get)(const struct uverbs_obj_type *type,
struct ib_ucontext *ucontext, int id,
- bool write);
- void (*lookup_put)(struct ib_uobject *uobj, bool write);
+ bool exclusive);
+ void (*lookup_put)(struct ib_uobject *uobj, bool exclusive);
/*
- * Must be called with the write lock held. If successful uobj is
+ * Must be called with the exclusive lock held. If successful uobj is
* invalid on return. On failure uobject is left completely
* unchanged
*/
struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_obj_type *type,
struct ib_ucontext *ucontext,
- int id, bool write);
-void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool write);
+ int id, bool exclusive);
+void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool exclusive);
struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_obj_type *type,
struct ib_ucontext *ucontext);
void rdma_alloc_abort_uobject(struct ib_uobject *uobj);