#include <linux/uuid.h>
#include <linux/uaccess.h>
#include <linux/siphash.h>
+#include <linux/uio.h>
#include <crypto/chacha20.h>
#include <crypto/blake2s.h>
#include <asm/processor.h>
}
EXPORT_SYMBOL(get_random_bytes);
-static ssize_t get_random_bytes_user(void __user *ubuf, size_t len)
+static ssize_t get_random_bytes_user(struct iov_iter *iter)
{
- size_t block_len, left, ret = 0;
u32 chacha_state[CHACHA20_BLOCK_SIZE / sizeof(u32)];
- u8 output[CHACHA20_BLOCK_SIZE];
+ u8 block[CHACHA20_BLOCK_SIZE];
+ size_t ret = 0, copied;
- if (!len)
+ if (unlikely(!iov_iter_count(iter)))
return 0;
/*
* use chacha_state after, so we can simply return those bytes to
* the user directly.
*/
- if (len <= CHACHA20_KEY_SIZE) {
- ret = len - copy_to_user(ubuf, &chacha_state[4], len);
+ if (iov_iter_count(iter) <= CHACHA20_KEY_SIZE) {
+ ret = copy_to_iter(&chacha_state[4], CHACHA20_KEY_SIZE, iter);
goto out_zero_chacha;
}
for (;;) {
- chacha20_block(chacha_state, output);
+ chacha20_block(chacha_state, block);
if (unlikely(chacha_state[12] == 0))
++chacha_state[13];
- block_len = min_t(size_t, len, CHACHA20_BLOCK_SIZE);
- left = copy_to_user(ubuf, output, block_len);
- if (left) {
- ret += block_len - left;
+ copied = copy_to_iter(block, sizeof(block), iter);
+ ret += copied;
+ if (!iov_iter_count(iter) || copied != sizeof(block))
break;
- }
- ubuf += block_len;
- ret += block_len;
- len -= block_len;
- if (!len)
- break;
-
- BUILD_BUG_ON(PAGE_SIZE % CHACHA20_BLOCK_SIZE != 0);
+ BUILD_BUG_ON(PAGE_SIZE % sizeof(block) != 0);
if (ret % PAGE_SIZE == 0) {
if (signal_pending(current))
break;
}
}
- memzero_explicit(output, sizeof(output));
+ memzero_explicit(block, sizeof(block));
out_zero_chacha:
memzero_explicit(chacha_state, sizeof(chacha_state));
return ret ? ret : -EFAULT;
* provided by this function is okay, the function wait_for_random_bytes()
* should be called and return 0 at least once at any point prior.
*/
-struct batched_entropy {
- union {
- /*
- * We make this 1.5x a ChaCha block, so that we get the
- * remaining 32 bytes from fast key erasure, plus one full
- * block from the detached ChaCha state. We can increase
- * the size of this later if needed so long as we keep the
- * formula of (integer_blocks + 0.5) * CHACHA20_BLOCK_SIZE.
- */
- u64 entropy_u64[CHACHA20_BLOCK_SIZE * 3 / (2 * sizeof(u64))];
- u32 entropy_u32[CHACHA20_BLOCK_SIZE * 3 / (2 * sizeof(u32))];
- };
- unsigned long generation;
- unsigned int position;
-};
-
-
-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
- .position = UINT_MAX
-};
-
-u64 get_random_u64(void)
-{
- u64 ret;
- unsigned long flags;
- struct batched_entropy *batch;
- unsigned long next_gen;
-
- warn_unseeded_randomness();
-
- if (!crng_ready()) {
- _get_random_bytes(&ret, sizeof(ret));
- return ret;
- }
-
- local_irq_save(flags);
- batch = raw_cpu_ptr(&batched_entropy_u64);
-
- next_gen = READ_ONCE(base_crng.generation);
- if (batch->position >= ARRAY_SIZE(batch->entropy_u64) ||
- next_gen != batch->generation) {
- _get_random_bytes(batch->entropy_u64, sizeof(batch->entropy_u64));
- batch->position = 0;
- batch->generation = next_gen;
- }
-
- ret = batch->entropy_u64[batch->position];
- batch->entropy_u64[batch->position] = 0;
- ++batch->position;
- local_irq_restore(flags);
- return ret;
-}
-EXPORT_SYMBOL(get_random_u64);
-
-static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
- .position = UINT_MAX
-};
-
-u32 get_random_u32(void)
-{
- u32 ret;
- unsigned long flags;
- struct batched_entropy *batch;
- unsigned long next_gen;
- warn_unseeded_randomness();
-
- if (!crng_ready()) {
- _get_random_bytes(&ret, sizeof(ret));
- return ret;
- }
-
- local_irq_save(flags);
- batch = raw_cpu_ptr(&batched_entropy_u32);
-
- next_gen = READ_ONCE(base_crng.generation);
- if (batch->position >= ARRAY_SIZE(batch->entropy_u32) ||
- next_gen != batch->generation) {
- _get_random_bytes(batch->entropy_u32, sizeof(batch->entropy_u32));
- batch->position = 0;
- batch->generation = next_gen;
- }
-
- ret = batch->entropy_u32[batch->position];
- batch->entropy_u32[batch->position] = 0;
- ++batch->position;
- local_irq_restore(flags);
- return ret;
-}
-EXPORT_SYMBOL(get_random_u32);
+#define DEFINE_BATCHED_ENTROPY(type) \
+struct batch_ ##type { \
+ /* \
+ * We make this 1.5x a ChaCha block, so that we get the \
+ * remaining 32 bytes from fast key erasure, plus one full \
+ * block from the detached ChaCha state. We can increase \
+ * the size of this later if needed so long as we keep the \
+ * formula of (integer_blocks + 0.5) * CHACHA20_BLOCK_SIZE. \
+ */ \
+ type entropy[CHACHA20_BLOCK_SIZE * 3 / (2 * sizeof(type))]; \
+ unsigned long generation; \
+ unsigned int position; \
+}; \
+ \
+static DEFINE_PER_CPU(struct batch_ ##type, batched_entropy_ ##type) = { \
+ .position = UINT_MAX \
+}; \
+ \
+type get_random_ ##type(void) \
+{ \
+ type ret; \
+ unsigned long flags; \
+ struct batch_ ##type *batch; \
+ unsigned long next_gen; \
+ \
+ warn_unseeded_randomness(); \
+ \
+ if (!crng_ready()) { \
+ _get_random_bytes(&ret, sizeof(ret)); \
+ return ret; \
+ } \
+ \
+ local_irq_save(flags); \
+ batch = raw_cpu_ptr(&batched_entropy_##type); \
+ \
+ next_gen = READ_ONCE(base_crng.generation); \
+ if (batch->position >= ARRAY_SIZE(batch->entropy) || \
+ next_gen != batch->generation) { \
+ _get_random_bytes(batch->entropy, sizeof(batch->entropy)); \
+ batch->position = 0; \
+ batch->generation = next_gen; \
+ } \
+ \
+ ret = batch->entropy[batch->position]; \
+ batch->entropy[batch->position] = 0; \
+ ++batch->position; \
+ local_irq_restore(flags); \
+ return ret; \
+} \
+EXPORT_SYMBOL(get_random_ ##type);
+
+DEFINE_BATCHED_ENTROPY(u64)
+DEFINE_BATCHED_ENTROPY(u32)
#ifdef CONFIG_SMP
/*
}
#endif
-/**
- * randomize_page - Generate a random, page aligned address
- * @start: The smallest acceptable address the caller will take.
- * @range: The size of the area, starting at @start, within which the
- * random address must fall.
- *
- * If @start + @range would overflow, @range is capped.
- *
- * NOTE: Historical use of randomize_range, which this replaces, presumed that
- * @start was already page aligned. We now align it regardless.
- *
- * Return: A page aligned address within [start, start + range). On error,
- * @start is returned.
- */
-unsigned long randomize_page(unsigned long start, unsigned long range)
-{
- if (!PAGE_ALIGNED(start)) {
- range -= PAGE_ALIGN(start) - start;
- start = PAGE_ALIGN(start);
- }
-
- if (start > ULONG_MAX - range)
- range = ULONG_MAX - start;
-
- range >>= PAGE_SHIFT;
-
- if (range == 0)
- return start;
-
- return start + (get_random_long() % range << PAGE_SHIFT);
-}
-
/*
* This function will use the architecture-specific hardware random
* number generator if it is available. It is not recommended for
SYSCALL_DEFINE3(getrandom, char __user *, ubuf, size_t, len, unsigned int, flags)
{
+ struct iov_iter iter;
+ struct iovec iov;
+ int ret;
+
if (flags & ~(GRND_NONBLOCK | GRND_RANDOM | GRND_INSECURE))
return -EINVAL;
if ((flags & (GRND_INSECURE | GRND_RANDOM)) == (GRND_INSECURE | GRND_RANDOM))
return -EINVAL;
- if (len > INT_MAX)
- len = INT_MAX;
-
if (!crng_ready() && !(flags & GRND_INSECURE)) {
- int ret;
-
if (flags & GRND_NONBLOCK)
return -EAGAIN;
ret = wait_for_random_bytes();
if (unlikely(ret))
return ret;
}
- return get_random_bytes_user(ubuf, len);
+
+ ret = import_single_range(READ, ubuf, len, &iov, &iter);
+ if (unlikely(ret))
+ return ret;
+ return get_random_bytes_user(&iter);
}
static unsigned int random_poll(struct file *file, poll_table *wait)
return (ssize_t)len;
}
-static ssize_t urandom_read(struct file *file, char __user *ubuf,
- size_t len, loff_t *ppos)
+static ssize_t urandom_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
{
static int maxwarn = 10;
++urandom_warning.missed;
else if (ratelimit_disable || __ratelimit(&urandom_warning)) {
--maxwarn;
- pr_notice("%s: uninitialized urandom read (%zd bytes read)\n",
- current->comm, len);
+ pr_notice("%s: uninitialized urandom read (%zu bytes read)\n",
+ current->comm, iov_iter_count(iter));
}
}
- return get_random_bytes_user(ubuf, len);
+ return get_random_bytes_user(iter);
}
-static ssize_t random_read(struct file *file, char __user *ubuf,
- size_t len, loff_t *ppos)
+static ssize_t random_read_iter(struct kiocb *kiocb, struct iov_iter *iter)
{
int ret;
ret = wait_for_random_bytes();
if (ret != 0)
return ret;
- return get_random_bytes_user(ubuf, len);
+ return get_random_bytes_user(iter);
}
static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
}
const struct file_operations random_fops = {
- .read = random_read,
+ .read_iter = random_read_iter,
.write = random_write,
.poll = random_poll,
.unlocked_ioctl = random_ioctl,
};
const struct file_operations urandom_fops = {
- .read = urandom_read,
+ .read_iter = urandom_read_iter,
.write = random_write,
.unlocked_ioctl = random_ioctl,
.fasync = random_fasync,