}
if (!lock->l_readers && !lock->l_writers &&
- ldlm_is_cbpending(lock)) {
+ (ldlm_is_cbpending(lock) || lock->l_req_mode == LCK_GROUP)) {
/* If we received a blocked AST and this was the last reference,
* run the callback.
+ * Group locks are special:
+ * They must not go in LRU, but they are not called back
+ * like non-group locks, instead they are manually released.
+ * They have an l_writers reference which they keep until
+ * they are manually released, so we remove them when they have
+ * no more reader or writer references. - LU-6368
*/
-
LDLM_DEBUG(lock, "final decref done on cbpending lock");
LDLM_LOCK_GET(lock); /* dropped by bl thread */
* Decrease reader/writer refcount for LDLM lock with handle
* \a lockh and mark it for subsequent cancellation once r/w refcount
* drops to zero instead of putting into LRU.
- *
- * Typical usage is for GROUP locks which we cannot allow to be cached.
*/
void ldlm_lock_decref_and_cancel(const struct lustre_handle *lockh, __u32 mode)
{
osc_enqueue_upcall_f upcall,
void *cookie, struct ldlm_enqueue_info *einfo,
struct ptlrpc_request_set *rqset, int async, int agl);
-int osc_cancel_base(struct lustre_handle *lockh, __u32 mode);
int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
__u32 type, ldlm_policy_data_t *policy, __u32 mode,
if (olck->ols_hold) {
olck->ols_hold = 0;
- osc_cancel_base(&olck->ols_handle, olck->ols_einfo.ei_mode);
+ ldlm_lock_decref(&olck->ols_handle, olck->ols_einfo.ei_mode);
olck->ols_handle.cookie = 0ULL;
}
return rc;
}
-int osc_cancel_base(struct lustre_handle *lockh, __u32 mode)
-{
- if (unlikely(mode == LCK_GROUP))
- ldlm_lock_decref_and_cancel(lockh, mode);
- else
- ldlm_lock_decref(lockh, mode);
-
- return 0;
-}
-
static int osc_statfs_interpret(const struct lu_env *env,
struct ptlrpc_request *req,
struct osc_async_args *aa, int rc)