extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
extern void __percpu_up_read(struct percpu_rw_semaphore *);
-static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
+static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *sem)
{
might_sleep();
__this_cpu_inc(*sem->read_count);
if (unlikely(!rcu_sync_is_idle(&sem->rss)))
__percpu_down_read(sem, false); /* Unconditional memory barrier */
- preempt_enable();
+ barrier();
/*
- * The barrier() from preempt_enable() prevents the compiler from
+ * The barrier() prevents the compiler from
* bleeding the critical section out.
*/
}
+static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
+{
+ percpu_down_read_preempt_disable(sem);
+ preempt_enable();
+}
+
static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
{
int ret = 1;
return ret;
}
-static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
+static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem)
{
/*
- * The barrier() in preempt_disable() prevents the compiler from
+ * The barrier() prevents the compiler from
* bleeding the critical section out.
*/
- preempt_disable();
+ barrier();
/*
* Same as in percpu_down_read().
*/
rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_);
}
+static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
+{
+ preempt_disable();
+ percpu_up_read_preempt_enable(sem);
+}
+
extern void percpu_down_write(struct percpu_rw_semaphore *);
extern void percpu_up_write(struct percpu_rw_semaphore *);