goto out;
}
- map_acquire(&handle->h_lockdep_map);
+ lock_map_acquire(&handle->h_lockdep_map);
out:
return handle;
spin_unlock(&journal->j_state_lock);
}
- map_release(&handle->h_lockdep_map);
+ lock_map_release(&handle->h_lockdep_map);
jbd_free_handle(handle);
return err;
goto out;
}
- map_acquire(&handle->h_lockdep_map);
+ lock_map_acquire(&handle->h_lockdep_map);
out:
return handle;
}
spin_unlock(&journal->j_state_lock);
}
- map_release(&handle->h_lockdep_map);
+ lock_map_release(&handle->h_lockdep_map);
jbd2_free_handle(handle);
return err;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# ifdef CONFIG_PROVE_LOCKING
-# define map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
+# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 2, NULL, _THIS_IP_)
# else
-# define map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
+# define lock_map_acquire(l) lock_acquire(l, 0, 0, 0, 1, NULL, _THIS_IP_)
# endif
-# define map_release(l) lock_release(l, 1, _THIS_IP_)
+# define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
#else
-# define map_acquire(l) do { } while (0)
-# define map_release(l) do { } while (0)
+# define lock_map_acquire(l) do { } while (0)
+# define lock_map_release(l) do { } while (0)
#endif
#endif /* __LINUX_LOCKDEP_H */
BUG_ON(get_wq_data(work) != cwq);
work_clear_pending(work);
- map_acquire(&cwq->wq->lockdep_map);
- map_acquire(&lockdep_map);
+ lock_map_acquire(&cwq->wq->lockdep_map);
+ lock_map_acquire(&lockdep_map);
f(work);
- map_release(&lockdep_map);
- map_release(&cwq->wq->lockdep_map);
+ lock_map_release(&lockdep_map);
+ lock_map_release(&cwq->wq->lockdep_map);
if (unlikely(in_atomic() || lockdep_depth(current) > 0)) {
printk(KERN_ERR "BUG: workqueue leaked lock or atomic: "
int cpu;
might_sleep();
- map_acquire(&wq->lockdep_map);
- map_release(&wq->lockdep_map);
+ lock_map_acquire(&wq->lockdep_map);
+ lock_map_release(&wq->lockdep_map);
for_each_cpu_mask_nr(cpu, *cpu_map)
flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
}
if (!cwq)
return 0;
- map_acquire(&cwq->wq->lockdep_map);
- map_release(&cwq->wq->lockdep_map);
+ lock_map_acquire(&cwq->wq->lockdep_map);
+ lock_map_release(&cwq->wq->lockdep_map);
prev = NULL;
spin_lock_irq(&cwq->lock);
might_sleep();
- map_acquire(&work->lockdep_map);
- map_release(&work->lockdep_map);
+ lock_map_acquire(&work->lockdep_map);
+ lock_map_release(&work->lockdep_map);
cwq = get_wq_data(work);
if (!cwq)
if (cwq->thread == NULL)
return;
- map_acquire(&cwq->wq->lockdep_map);
- map_release(&cwq->wq->lockdep_map);
+ lock_map_acquire(&cwq->wq->lockdep_map);
+ lock_map_release(&cwq->wq->lockdep_map);
flush_cpu_workqueue(cwq);
/*