extern void debug_mutex_unlock(struct mutex *lock);
extern void debug_mutex_init(struct mutex *lock, const char *name,
struct lock_class_key *key);
-
-#define spin_lock_mutex(lock, flags) \
- do { \
- struct mutex *l = container_of(lock, struct mutex, wait_lock); \
- \
- DEBUG_LOCKS_WARN_ON(in_interrupt()); \
- local_irq_save(flags); \
- arch_spin_lock(&(lock)->rlock.raw_lock);\
- DEBUG_LOCKS_WARN_ON(l->magic != l); \
- } while (0)
-
-#define spin_unlock_mutex(lock, flags) \
- do { \
- arch_spin_unlock(&(lock)->rlock.raw_lock); \
- local_irq_restore(flags); \
- preempt_check_resched(); \
- } while (0)
static __always_inline void
ww_mutex_set_context_fastpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{
- unsigned long flags;
-
ww_mutex_lock_acquired(lock, ctx);
lock->ctx = ctx;
* Uh oh, we raced in fastpath, wake up everyone in this case,
* so they can see the new lock->ctx.
*/
- spin_lock_mutex(&lock->base.wait_lock, flags);
+ spin_lock(&lock->base.wait_lock);
__ww_mutex_wakeup_for_backoff(&lock->base, ctx);
- spin_unlock_mutex(&lock->base.wait_lock, flags);
+ spin_unlock(&lock->base.wait_lock);
}
/*
struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
{
struct mutex_waiter waiter;
- unsigned long flags;
bool first = false;
struct ww_mutex *ww;
int ret;
return 0;
}
- spin_lock_mutex(&lock->wait_lock, flags);
+ spin_lock(&lock->wait_lock);
/*
* After waiting to acquire the wait_lock, try again.
*/
goto err;
}
- spin_unlock_mutex(&lock->wait_lock, flags);
+ spin_unlock(&lock->wait_lock);
schedule_preempt_disabled();
/*
(first && mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, &waiter)))
break;
- spin_lock_mutex(&lock->wait_lock, flags);
+ spin_lock(&lock->wait_lock);
}
- spin_lock_mutex(&lock->wait_lock, flags);
+ spin_lock(&lock->wait_lock);
acquired:
__set_current_state(TASK_RUNNING);
if (use_ww_ctx && ww_ctx)
ww_mutex_set_context_slowpath(ww, ww_ctx);
- spin_unlock_mutex(&lock->wait_lock, flags);
+ spin_unlock(&lock->wait_lock);
preempt_enable();
return 0;
__set_current_state(TASK_RUNNING);
mutex_remove_waiter(lock, &waiter, current);
err_early_backoff:
- spin_unlock_mutex(&lock->wait_lock, flags);
+ spin_unlock(&lock->wait_lock);
debug_mutex_free_waiter(&waiter);
mutex_release(&lock->dep_map, 1, ip);
preempt_enable();
static noinline void __sched __mutex_unlock_slowpath(struct mutex *lock, unsigned long ip)
{
struct task_struct *next = NULL;
- unsigned long owner, flags;
DEFINE_WAKE_Q(wake_q);
+ unsigned long owner;
mutex_release(&lock->dep_map, 1, ip);
owner = old;
}
- spin_lock_mutex(&lock->wait_lock, flags);
+ spin_lock(&lock->wait_lock);
debug_mutex_unlock(lock);
if (!list_empty(&lock->wait_list)) {
/* get the first entry from the wait-list: */
if (owner & MUTEX_FLAG_HANDOFF)
__mutex_handoff(lock, next);
- spin_unlock_mutex(&lock->wait_lock, flags);
+ spin_unlock(&lock->wait_lock);
wake_up_q(&wake_q);
}