enum poolinfo {
POOL_BITS = BLAKE2S_HASH_SIZE * 8,
POOL_BITSHIFT = ilog2(POOL_BITS),
+ POOL_MIN_BITS = POOL_BITS / 2,
/* To allow fractional bits to be tracked, the entropy_count field is
* denominated in units of 1/8th bits. */
POOL_ENTROPY_SHIFT = 3,
#define POOL_ENTROPY_BITS() (input_pool.entropy_count >> POOL_ENTROPY_SHIFT)
- POOL_FRACBITS = POOL_BITS << POOL_ENTROPY_SHIFT
+ POOL_FRACBITS = POOL_BITS << POOL_ENTROPY_SHIFT,
+ POOL_MIN_FRACBITS = POOL_MIN_BITS << POOL_ENTROPY_SHIFT
};
/*
.lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
};
-static bool extract_entropy(void *buf, size_t nbytes, int min);
-static void _extract_entropy(void *buf, size_t nbytes);
+static void extract_entropy(void *buf, size_t nbytes);
static void crng_reseed(struct crng_state *crng, bool use_input_pool);
*/
static void credit_entropy_bits(int nbits)
{
- int entropy_count, entropy_bits, orig;
+ int entropy_count, orig;
int nfrac = nbits << POOL_ENTROPY_SHIFT;
/* Ensure that the multiplication can avoid being 64 bits wide. */
trace_credit_entropy_bits(nbits, entropy_count >> POOL_ENTROPY_SHIFT, _RET_IP_);
- entropy_bits = entropy_count >> POOL_ENTROPY_SHIFT;
- if (crng_init < 2 && entropy_bits >= 128)
+ if (crng_init < 2 && entropy_count >= POOL_MIN_FRACBITS)
crng_reseed(&primary_crng, true);
}
static void __init crng_initialize_primary(void)
{
- _extract_entropy(&primary_crng.state[4], sizeof(u32) * 12);
+ extract_entropy(&primary_crng.state[4], sizeof(u32) * 12);
if (crng_init_try_arch_early() && trust_cpu && crng_init < 2) {
invalidate_batched_entropy();
numa_crng_init();
} buf;
if (use_input_pool) {
- if (!extract_entropy(&buf, 32, 16))
- return;
+ int entropy_count;
+ do {
+ entropy_count = READ_ONCE(input_pool.entropy_count);
+ if (entropy_count < POOL_MIN_FRACBITS)
+ return;
+ } while (cmpxchg(&input_pool.entropy_count, entropy_count, 0) != entropy_count);
+ extract_entropy(buf.key, sizeof(buf.key));
+ if (random_write_wakeup_bits) {
+ wake_up_interruptible(&random_write_wait);
+ kill_fasync(&fasync, SIGIO, POLL_OUT);
+ }
} else {
_extract_crng(&primary_crng, buf.block);
_crng_backtrack_protect(&primary_crng, buf.block,
*
*********************************************************************/
-/*
- * This function decides how many bytes to actually take from the
- * given pool, and also debits the entropy count accordingly.
- */
-static size_t account(size_t nbytes, int min)
-{
- int entropy_count, orig;
- size_t ibytes, nfrac;
-
- BUG_ON(input_pool.entropy_count > POOL_FRACBITS);
-
- /* Can we pull enough? */
-retry:
- entropy_count = orig = READ_ONCE(input_pool.entropy_count);
- if (WARN_ON(entropy_count < 0)) {
- pr_warn("negative entropy count: count %d\n", entropy_count);
- entropy_count = 0;
- }
-
- /* never pull more than available */
- ibytes = min_t(size_t, nbytes, entropy_count >> (POOL_ENTROPY_SHIFT + 3));
- if (ibytes < min)
- ibytes = 0;
- nfrac = ibytes << (POOL_ENTROPY_SHIFT + 3);
- if ((size_t)entropy_count > nfrac)
- entropy_count -= nfrac;
- else
- entropy_count = 0;
-
- if (cmpxchg(&input_pool.entropy_count, orig, entropy_count) != orig)
- goto retry;
-
- trace_debit_entropy(8 * ibytes);
- if (ibytes && POOL_ENTROPY_BITS() < random_write_wakeup_bits) {
- wake_up_interruptible(&random_write_wait);
- kill_fasync(&fasync, SIGIO, POLL_OUT);
- }
-
- return ibytes;
-}
-
/*
* This is an HKDF-like construction for using the hashed collected entropy
* as a PRF key, that's then expanded block-by-block.
*/
-static void _extract_entropy(void *buf, size_t nbytes)
+static void extract_entropy(void *buf, size_t nbytes)
{
unsigned long flags;
u8 seed[BLAKE2S_HASH_SIZE], next_key[BLAKE2S_HASH_SIZE];
} block;
size_t i;
+ trace_extract_entropy(nbytes, POOL_ENTROPY_BITS());
+
for (i = 0; i < ARRAY_SIZE(block.rdrand); ++i) {
if (!arch_get_random_long(&block.rdrand[i]))
block.rdrand[i] = random_get_entropy();
memzero_explicit(&block, sizeof(block));
}
-/*
- * This function extracts randomness from the "entropy pool", and
- * returns it in a buffer.
- *
- * The min parameter specifies the minimum amount we can pull before
- * failing to avoid races that defeat catastrophic reseeding. If we
- * have less than min entropy available, we return false and buf is
- * not filled.
- */
-static bool extract_entropy(void *buf, size_t nbytes, int min)
-{
- trace_extract_entropy(nbytes, POOL_ENTROPY_BITS(), _RET_IP_);
- if (account(nbytes, min)) {
- _extract_entropy(buf, nbytes);
- return true;
- }
- return false;
-}
-
#define warn_unseeded_randomness(previous) \
_warn_unseeded_randomness(__func__, (void *)_RET_IP_, (previous))
__entry->bits, __entry->entropy_count, (void *)__entry->IP)
);
-TRACE_EVENT(debit_entropy,
- TP_PROTO(int debit_bits),
-
- TP_ARGS( debit_bits),
-
- TP_STRUCT__entry(
- __field( int, debit_bits )
- ),
-
- TP_fast_assign(
- __entry->debit_bits = debit_bits;
- ),
-
- TP_printk("input pool: debit_bits %d", __entry->debit_bits)
-);
-
TRACE_EVENT(add_input_randomness,
TP_PROTO(int input_bits),
);
DECLARE_EVENT_CLASS(random__extract_entropy,
- TP_PROTO(int nbytes, int entropy_count, unsigned long IP),
+ TP_PROTO(int nbytes, int entropy_count),
- TP_ARGS(nbytes, entropy_count, IP),
+ TP_ARGS(nbytes, entropy_count),
TP_STRUCT__entry(
__field( int, nbytes )
__field( int, entropy_count )
- __field(unsigned long, IP )
),
TP_fast_assign(
__entry->nbytes = nbytes;
__entry->entropy_count = entropy_count;
- __entry->IP = IP;
),
- TP_printk("input pool: nbytes %d entropy_count %d caller %pS",
- __entry->nbytes, __entry->entropy_count, (void *)__entry->IP)
+ TP_printk("input pool: nbytes %d entropy_count %d",
+ __entry->nbytes, __entry->entropy_count)
);
DEFINE_EVENT(random__extract_entropy, extract_entropy,
- TP_PROTO(int nbytes, int entropy_count, unsigned long IP),
+ TP_PROTO(int nbytes, int entropy_count),
- TP_ARGS(nbytes, entropy_count, IP)
+ TP_ARGS(nbytes, entropy_count)
);
TRACE_EVENT(urandom_read,