1 #ifndef _ASM_PARISC_FUTEX_H
2 #define _ASM_PARISC_FUTEX_H
6 #include <linux/futex.h>
7 #include <linux/uaccess.h>
8 #include <asm/atomic.h>
11 /* The following has to match the LWS code in syscall.S. We have
12 sixteen four-word locks. */
15 _futex_spin_lock_irqsave(u32 __user
*uaddr
, unsigned long int *flags
)
17 extern u32 lws_lock_start
[];
18 long index
= ((long)uaddr
& 0xf0) >> 2;
19 arch_spinlock_t
*s
= (arch_spinlock_t
*)&lws_lock_start
[index
];
20 local_irq_save(*flags
);
25 _futex_spin_unlock_irqrestore(u32 __user
*uaddr
, unsigned long int *flags
)
27 extern u32 lws_lock_start
[];
28 long index
= ((long)uaddr
& 0xf0) >> 2;
29 arch_spinlock_t
*s
= (arch_spinlock_t
*)&lws_lock_start
[index
];
31 local_irq_restore(*flags
);
35 futex_atomic_op_inuser (int encoded_op
, u32 __user
*uaddr
)
37 unsigned long int flags
;
38 int op
= (encoded_op
>> 28) & 7;
39 int cmp
= (encoded_op
>> 24) & 15;
40 int oparg
= (encoded_op
<< 8) >> 20;
41 int cmparg
= (encoded_op
<< 20) >> 20;
45 if (encoded_op
& (FUTEX_OP_OPARG_SHIFT
<< 28))
48 if (!access_ok(VERIFY_WRITE
, uaddr
, sizeof(*uaddr
)))
51 _futex_spin_lock_irqsave(uaddr
, &flags
);
55 if (unlikely(get_user(oldval
, uaddr
) != 0))
56 goto out_pagefault_enable
;
81 if (ret
== 0 && unlikely(put_user(tmp
, uaddr
) != 0))
86 _futex_spin_unlock_irqrestore(uaddr
, &flags
);
90 case FUTEX_OP_CMP_EQ
: ret
= (oldval
== cmparg
); break;
91 case FUTEX_OP_CMP_NE
: ret
= (oldval
!= cmparg
); break;
92 case FUTEX_OP_CMP_LT
: ret
= (oldval
< cmparg
); break;
93 case FUTEX_OP_CMP_GE
: ret
= (oldval
>= cmparg
); break;
94 case FUTEX_OP_CMP_LE
: ret
= (oldval
<= cmparg
); break;
95 case FUTEX_OP_CMP_GT
: ret
= (oldval
> cmparg
); break;
96 default: ret
= -ENOSYS
;
103 futex_atomic_cmpxchg_inatomic(u32
*uval
, u32 __user
*uaddr
,
104 u32 oldval
, u32 newval
)
109 /* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is
110 * our gateway page, and causes no end of trouble...
112 if (uaccess_kernel() && !uaddr
)
115 if (!access_ok(VERIFY_WRITE
, uaddr
, sizeof(u32
)))
118 /* HPPA has no cmpxchg in hardware and therefore the
119 * best we can do here is use an array of locks. The
120 * lock selected is based on a hash of the userspace
121 * address. This should scale to a couple of CPUs.
124 _futex_spin_lock_irqsave(uaddr
, &flags
);
125 if (unlikely(get_user(val
, uaddr
) != 0)) {
126 _futex_spin_unlock_irqrestore(uaddr
, &flags
);
130 if (val
== oldval
&& unlikely(put_user(newval
, uaddr
) != 0)) {
131 _futex_spin_unlock_irqrestore(uaddr
, &flags
);
136 _futex_spin_unlock_irqrestore(uaddr
, &flags
);
141 #endif /*__KERNEL__*/
142 #endif /*_ASM_PARISC_FUTEX_H*/