do { \
int __d0, __d1, __d2; \
might_sleep(); \
+ if (current->mm) \
+ might_lock_read(¤t->mm->mmap_sem); \
__asm__ __volatile__( \
" testl %1,%1\n" \
" jz 2f\n" \
do { \
int __d0; \
might_sleep(); \
+ if (current->mm) \
+ might_lock_read(¤t->mm->mmap_sem); \
__asm__ __volatile__( \
"0: rep; stosl\n" \
" movl %2,%0\n" \
unsigned long
clear_user(void __user *to, unsigned long n)
{
- might_sleep();
if (access_ok(VERIFY_WRITE, to, n))
__do_clear_user(to, n);
return n;
unsigned long res, tmp;
might_sleep();
+ if (current->mm)
+ might_lock_read(¤t->mm->mmap_sem);
__asm__ __volatile__(
" testl %0, %0\n"
do { \
long __d0, __d1, __d2; \
might_sleep(); \
+ if (current->mm) \
+ might_lock_read(¤t->mm->mmap_sem); \
__asm__ __volatile__( \
" testq %1,%1\n" \
" jz 2f\n" \
{
long __d0;
might_sleep();
+ if (current->mm)
+ might_lock_read(¤t->mm->mmap_sem);
/* no memory constraint because it doesn't change any memory gcc knows
about */
asm volatile(
#include <linux/thread_info.h>
#include <linux/prefetch.h>
#include <linux/string.h>
+#include <linux/lockdep.h>
+#include <linux/sched.h>
#include <asm/asm.h>
#include <asm/page.h>
int __ret_gu; \
unsigned long __val_gu; \
__chk_user_ptr(ptr); \
+ might_sleep(); \
+ if (current->mm) \
+ might_lock_read(¤t->mm->mmap_sem); \
switch (sizeof(*(ptr))) { \
case 1: \
__get_user_x(1, __ret_gu, __val_gu, ptr); \
int __ret_pu; \
__typeof__(*(ptr)) __pu_val; \
__chk_user_ptr(ptr); \
+ might_sleep(); \
+ if (current->mm) \
+ might_lock_read(¤t->mm->mmap_sem); \
__pu_val = x; \
switch (sizeof(*(ptr))) { \
case 1: \
#define __put_user_size(x, ptr, size, retval, errret) \
do { \
retval = 0; \
+ might_sleep(); \
+ if (current->mm) \
+ might_lock_read(¤t->mm->mmap_sem); \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: \
#define __get_user_size(x, ptr, size, retval, errret) \
do { \
retval = 0; \
+ might_sleep(); \
+ if (current->mm) \
+ might_lock_read(¤t->mm->mmap_sem); \
__chk_user_ptr(ptr); \
switch (size) { \
case 1: \
static __always_inline unsigned long __must_check
__copy_to_user(void __user *to, const void *from, unsigned long n)
{
- might_sleep();
- return __copy_to_user_inatomic(to, from, n);
+ might_sleep();
+ if (current->mm)
+ might_lock_read(¤t->mm->mmap_sem);
+ return __copy_to_user_inatomic(to, from, n);
}
static __always_inline unsigned long
__copy_from_user(void *to, const void __user *from, unsigned long n)
{
might_sleep();
+ if (current->mm)
+ might_lock_read(¤t->mm->mmap_sem);
if (__builtin_constant_p(n)) {
unsigned long ret;
const void __user *from, unsigned long n)
{
might_sleep();
+ if (current->mm)
+ might_lock_read(¤t->mm->mmap_sem);
if (__builtin_constant_p(n)) {
unsigned long ret;
int __copy_from_user(void *dst, const void __user *src, unsigned size)
{
int ret = 0;
+
+ might_sleep();
+ if (current->mm)
+ might_lock_read(¤t->mm->mmap_sem);
if (!__builtin_constant_p(size))
return copy_user_generic(dst, (__force void *)src, size);
switch (size) {
int __copy_to_user(void __user *dst, const void *src, unsigned size)
{
int ret = 0;
+
+ might_sleep();
+ if (current->mm)
+ might_lock_read(¤t->mm->mmap_sem);
if (!__builtin_constant_p(size))
return copy_user_generic((__force void *)dst, src, size);
switch (size) {
int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
{
int ret = 0;
+
+ might_sleep();
+ if (current->mm)
+ might_lock_read(¤t->mm->mmap_sem);
if (!__builtin_constant_p(size))
return copy_user_generic((__force void *)dst,
(__force void *)src, size);