random: move randomize_page() into mm where it belongs
authorJason A. Donenfeld <Jason@zx2c4.com>
Sat, 14 May 2022 11:59:30 +0000 (13:59 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 25 Jun 2022 09:46:40 +0000 (11:46 +0200)
commit 5ad7dd882e45d7fe432c32e896e2aaa0b21746ea upstream.

randomize_page is an mm function. It is documented like one. It contains
the history of one. It has the naming convention of one. It looks
just like another very similar function in mm, randomize_stack_top().
And it has always been maintained and updated by mm people. There is no
need for it to be in random.c. In the "which shape does not look like
the other ones" test, pointing to randomize_page() is correct.

So move randomize_page() into mm/util.c, right next to the similar
randomize_stack_top() function.

This commit contains no actual code changes.

Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
drivers/char/random.c
include/linux/mm.h
include/linux/random.h
mm/util.c

index 343c37414f3266f1bbbaa8e3a72d7685bd7f6a39..088e072adcb1b8f218339d89c50bf0a12a988124 100644 (file)
@@ -52,6 +52,7 @@
 #include <linux/uuid.h>
 #include <linux/uaccess.h>
 #include <linux/siphash.h>
+#include <linux/uio.h>
 #include <crypto/chacha20.h>
 #include <crypto/blake2s.h>
 #include <asm/processor.h>
@@ -446,13 +447,13 @@ void get_random_bytes(void *buf, size_t len)
 }
 EXPORT_SYMBOL(get_random_bytes);
 
-static ssize_t get_random_bytes_user(void __user *ubuf, size_t len)
+static ssize_t get_random_bytes_user(struct iov_iter *iter)
 {
-       size_t block_len, left, ret = 0;
        u32 chacha_state[CHACHA20_BLOCK_SIZE / sizeof(u32)];
-       u8 output[CHACHA20_BLOCK_SIZE];
+       u8 block[CHACHA20_BLOCK_SIZE];
+       size_t ret = 0, copied;
 
-       if (!len)
+       if (unlikely(!iov_iter_count(iter)))
                return 0;
 
        /*
@@ -466,30 +467,22 @@ static ssize_t get_random_bytes_user(void __user *ubuf, size_t len)
         * use chacha_state after, so we can simply return those bytes to
         * the user directly.
         */
-       if (len <= CHACHA20_KEY_SIZE) {
-               ret = len - copy_to_user(ubuf, &chacha_state[4], len);
+       if (iov_iter_count(iter) <= CHACHA20_KEY_SIZE) {
+               ret = copy_to_iter(&chacha_state[4], CHACHA20_KEY_SIZE, iter);
                goto out_zero_chacha;
        }
 
        for (;;) {
-               chacha20_block(chacha_state, output);
+               chacha20_block(chacha_state, block);
                if (unlikely(chacha_state[12] == 0))
                        ++chacha_state[13];
 
-               block_len = min_t(size_t, len, CHACHA20_BLOCK_SIZE);
-               left = copy_to_user(ubuf, output, block_len);
-               if (left) {
-                       ret += block_len - left;
+               copied = copy_to_iter(block, sizeof(block), iter);
+               ret += copied;
+               if (!iov_iter_count(iter) || copied != sizeof(block))
                        break;
-               }
 
-               ubuf += block_len;
-               ret += block_len;
-               len -= block_len;
-               if (!len)
-                       break;
-
-               BUILD_BUG_ON(PAGE_SIZE % CHACHA20_BLOCK_SIZE != 0);
+               BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
                if (ret % PAGE_SIZE == 0) {
                        if (signal_pending(current))
                                break;
@@ -497,7 +490,7 @@ static ssize_t get_random_bytes_user(void __user *ubuf, size_t len)
                }
        }
 
-       memzero_explicit(output, sizeof(output));
+       memzero_explicit(block, sizeof(block));
 out_zero_chacha:
        memzero_explicit(chacha_state, sizeof(chacha_state));
        return ret ? ret : -EFAULT;
@@ -509,96 +502,60 @@ out_zero_chacha:
  * provided by this function is okay, the function wait_for_random_bytes()
  * should be called and return 0 at least once at any point prior.
  */
-struct batched_entropy {
-       union {
-               /*
-                * We make this 1.5x a ChaCha block, so that we get the
-                * remaining 32 bytes from fast key erasure, plus one full
-                * block from the detached ChaCha state. We can increase
-                * the size of this later if needed so long as we keep the
-                * formula of (integer_blocks + 0.5) * CHACHA20_BLOCK_SIZE.
-                */
-               u64 entropy_u64[CHACHA20_BLOCK_SIZE * 3 / (2 * sizeof(u64))];
-               u32 entropy_u32[CHACHA20_BLOCK_SIZE * 3 / (2 * sizeof(u32))];
-       };
-       unsigned long generation;
-       unsigned int position;
-};
-
-
-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
-       .position = UINT_MAX
-};
-
-u64 get_random_u64(void)
-{
-       u64 ret;
-       unsigned long flags;
-       struct batched_entropy *batch;
-       unsigned long next_gen;
-
-       warn_unseeded_randomness();
-
-       if  (!crng_ready()) {
-               _get_random_bytes(&ret, sizeof(ret));
-               return ret;
-       }
-
-       local_irq_save(flags);
-       batch = raw_cpu_ptr(&batched_entropy_u64);
-
-       next_gen = READ_ONCE(base_crng.generation);
-       if (batch->position >= ARRAY_SIZE(batch->entropy_u64) ||
-           next_gen != batch->generation) {
-               _get_random_bytes(batch->entropy_u64, sizeof(batch->entropy_u64));
-               batch->position = 0;
-               batch->generation = next_gen;
-       }
-
-       ret = batch->entropy_u64[batch->position];
-       batch->entropy_u64[batch->position] = 0;
-       ++batch->position;
-       local_irq_restore(flags);
-       return ret;
-}
-EXPORT_SYMBOL(get_random_u64);
-
-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
-       .position = UINT_MAX
-};
-
-u32 get_random_u32(void)
-{
-       u32 ret;
-       unsigned long flags;
-       struct batched_entropy *batch;
-       unsigned long next_gen;
 
-       warn_unseeded_randomness();
-
-       if  (!crng_ready()) {
-               _get_random_bytes(&ret, sizeof(ret));
-               return ret;
-       }
-
-       local_irq_save(flags);
-       batch = raw_cpu_ptr(&batched_entropy_u32);
-
-       next_gen = READ_ONCE(base_crng.generation);
-       if (batch->position >= ARRAY_SIZE(batch->entropy_u32) ||
-           next_gen != batch->generation) {
-               _get_random_bytes(batch->entropy_u32, sizeof(batch->entropy_u32));
-               batch->position = 0;
-               batch->generation = next_gen;
-       }
-
-       ret = batch->entropy_u32[batch->position];
-       batch->entropy_u32[batch->position] = 0;
-       ++batch->position;
-       local_irq_restore(flags);
-       return ret;
-}
-EXPORT_SYMBOL(get_random_u32);
+#define DEFINE_BATCHED_ENTROPY(type)                                           \
+struct batch_ ##type {                                                         \
+       /*                                                                      \
+        * We make this 1.5x a ChaCha block, so that we get the                 \
+        * remaining 32 bytes from fast key erasure, plus one full              \
+        * block from the detached ChaCha state. We can increase                \
+        * the size of this later if needed so long as we keep the              \
+        * formula of (integer_blocks + 0.5) * CHACHA20_BLOCK_SIZE.             \
+        */                                                                     \
+       type entropy[CHACHA20_BLOCK_SIZE * 3 / (2 * sizeof(type))];             \
+       unsigned long generation;                                               \
+       unsigned int position;                                                  \
+};                                                                             \
+                                                                               \
+static DEFINE_PER_CPU(struct batch_ ##type, batched_entropy_ ##type) = {       \
+       .position = UINT_MAX                                                    \
+};                                                                             \
+                                                                               \
+type get_random_ ##type(void)                                                  \
+{                                                                              \
+       type ret;                                                               \
+       unsigned long flags;                                                    \
+       struct batch_ ##type *batch;                                            \
+       unsigned long next_gen;                                                 \
+                                                                               \
+       warn_unseeded_randomness();                                             \
+                                                                               \
+       if  (!crng_ready()) {                                                   \
+               _get_random_bytes(&ret, sizeof(ret));                           \
+               return ret;                                                     \
+       }                                                                       \
+                                                                               \
+       local_irq_save(flags);          \
+       batch = raw_cpu_ptr(&batched_entropy_##type);                           \
+                                                                               \
+       next_gen = READ_ONCE(base_crng.generation);                             \
+       if (batch->position >= ARRAY_SIZE(batch->entropy) ||                    \
+           next_gen != batch->generation) {                                    \
+               _get_random_bytes(batch->entropy, sizeof(batch->entropy));      \
+               batch->position = 0;                                            \
+               batch->generation = next_gen;                                   \
+       }                                                                       \
+                                                                               \
+       ret = batch->entropy[batch->position];                                  \
+       batch->entropy[batch->position] = 0;                                    \
+       ++batch->position;                                                      \
+       local_irq_restore(flags);               \
+       return ret;                                                             \
+}                                                                              \
+EXPORT_SYMBOL(get_random_ ##type);
+
+DEFINE_BATCHED_ENTROPY(u64)
+DEFINE_BATCHED_ENTROPY(u32)
 
 #ifdef CONFIG_SMP
 /*
@@ -619,38 +576,6 @@ int __cold random_prepare_cpu(unsigned int cpu)
 }
 #endif
 
-/**
- * randomize_page - Generate a random, page aligned address
- * @start:     The smallest acceptable address the caller will take.
- * @range:     The size of the area, starting at @start, within which the
- *             random address must fall.
- *
- * If @start + @range would overflow, @range is capped.
- *
- * NOTE: Historical use of randomize_range, which this replaces, presumed that
- * @start was already page aligned.  We now align it regardless.
- *
- * Return: A page aligned address within [start, start + range).  On error,
- * @start is returned.
- */
-unsigned long randomize_page(unsigned long start, unsigned long range)
-{
-       if (!PAGE_ALIGNED(start)) {
-               range -= PAGE_ALIGN(start) - start;
-               start = PAGE_ALIGN(start);
-       }
-
-       if (start > ULONG_MAX - range)
-               range = ULONG_MAX - start;
-
-       range >>= PAGE_SHIFT;
-
-       if (range == 0)
-               return start;
-
-       return start + (get_random_long() % range << PAGE_SHIFT);
-}
-
 /*
  * This function will use the architecture-specific hardware random
  * number generator if it is available. It is not recommended for
@@ -1292,6 +1217,10 @@ static void __cold try_to_generate_entropy(void)
 
 SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags)
 {
+       struct iov_iter iter;
+       struct iovec iov;
+       int ret;
+
        if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
                return -EINVAL;
 
@@ -1302,19 +1231,18 @@ SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags
        if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
                return -EINVAL;
 
-       if (len > INT_MAX)
-               len = INT_MAX;
-
        if (!crng_ready() && !(flags & GRND_INSECURE)) {
-               int ret;
-
                if (flags & GRND_NONBLOCK)
                        return -EAGAIN;
                ret = wait_for_random_bytes();
                if (unlikely(ret))
                        return ret;
        }
-       return get_random_bytes_user(ubuf, len);
+
+       ret = import_single_range(READ, ubuf, len, &iov, &iter);
+       if (unlikely(ret))
+               return ret;
+       return get_random_bytes_user(&iter);
 }
 
 static unsigned int random_poll(struct file *file, poll_table *wait)
@@ -1358,8 +1286,7 @@ static ssize_t random_write(struct file *file, const char __user *ubuf,
        return (ssize_t)len;
 }
 
-static ssize_t urandom_read(struct file *file, char __user *ubuf,
-                           size_t len, loff_t *ppos)
+static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
 {
        static int maxwarn = 10;
 
@@ -1368,23 +1295,22 @@ static ssize_t urandom_read(struct file *file, char __user *ubuf,
                        ++urandom_warning.missed;
                else if (ratelimit_disable || __ratelimit(&urandom_warning)) {
                        --maxwarn;
-                       pr_notice("%s: uninitialized urandom read (%zd bytes read)\n",
-                                 current->comm, len);
+                       pr_notice("%s: uninitialized urandom read (%zu bytes read)\n",
+                                 current->comm, iov_iter_count(iter));
                }
        }
 
-       return get_random_bytes_user(ubuf, len);
+       return get_random_bytes_user(iter);
 }
 
-static ssize_t random_read(struct file *file, char __user *ubuf,
-                          size_t len, loff_t *ppos)
+static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
 {
        int ret;
 
        ret = wait_for_random_bytes();
        if (ret != 0)
                return ret;
-       return get_random_bytes_user(ubuf, len);
+       return get_random_bytes_user(iter);
 }
 
 static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
@@ -1446,7 +1372,7 @@ static int random_fasync(int fd, struct file *filp, int on)
 }
 
 const struct file_operations random_fops = {
-       .read = random_read,
+       .read_iter = random_read_iter,
        .write = random_write,
        .poll = random_poll,
        .unlocked_ioctl = random_ioctl,
@@ -1455,7 +1381,7 @@ const struct file_operations random_fops = {
 };
 
 const struct file_operations urandom_fops = {
-       .read = urandom_read,
+       .read_iter = urandom_read_iter,
        .write = random_write,
        .unlocked_ioctl = random_ioctl,
        .fasync = random_fasync,
index 94b138e4c84092922eed9bf7bfcecaa0847bda75..794f52e2540e29b5593e93c125f6fa576ff0161b 100644 (file)
@@ -2164,6 +2164,8 @@ extern int install_special_mapping(struct mm_struct *mm,
                                   unsigned long addr, unsigned long len,
                                   unsigned long flags, struct page **pages);
 
+unsigned long randomize_page(unsigned long start, unsigned long range);
+
 extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
 
 extern unsigned long mmap_region(struct file *file, unsigned long addr,
index d6ef6cf4346aafc849c1d984372c6ac97083a4e7..917470c4490ac9015421d5bccc717ca6f0366e3d 100644 (file)
@@ -64,8 +64,6 @@ static inline unsigned long get_random_canary(void)
        return get_random_long() & CANARY_MASK;
 }
 
-unsigned long randomize_page(unsigned long start, unsigned long range);
-
 int __init random_init(const char *command_line);
 bool rng_is_initialized(void);
 int wait_for_random_bytes(void);
index f0d773c719a1b748526268cf51e7f0c8bb51246e..84e775f5216fe04dbcb30a9754fbedf9f591ab02 100644 (file)
--- a/mm/util.c
+++ b/mm/util.c
@@ -14,6 +14,7 @@
 #include <linux/hugetlb.h>
 #include <linux/vmalloc.h>
 #include <linux/userfaultfd_k.h>
+#include <linux/random.h>
 
 #include <asm/sections.h>
 #include <linux/uaccess.h>
@@ -264,6 +265,38 @@ int vma_is_stack_for_current(struct vm_area_struct *vma)
        return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
 }
 
+/**
+ * randomize_page - Generate a random, page aligned address
+ * @start:     The smallest acceptable address the caller will take.
+ * @range:     The size of the area, starting at @start, within which the
+ *             random address must fall.
+ *
+ * If @start + @range would overflow, @range is capped.
+ *
+ * NOTE: Historical use of randomize_range, which this replaces, presumed that
+ * @start was already page aligned.  We now align it regardless.
+ *
+ * Return: A page aligned address within [start, start + range).  On error,
+ * @start is returned.
+ */
+unsigned long randomize_page(unsigned long start, unsigned long range)
+{
+       if (!PAGE_ALIGNED(start)) {
+               range -= PAGE_ALIGN(start) - start;
+               start = PAGE_ALIGN(start);
+       }
+
+       if (start > ULONG_MAX - range)
+               range = ULONG_MAX - start;
+
+       range >>= PAGE_SHIFT;
+
+       if (range == 0)
+               return start;
+
+       return start + (get_random_long() % range << PAGE_SHIFT);
+}
+
 #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
 void arch_pick_mmap_layout(struct mm_struct *mm)
 {