}
EXPORT_SYMBOL(rng_is_initialized);
-static void crng_set_ready(struct work_struct *work)
+static void __cold crng_set_ready(struct work_struct *work)
{
static_branch_enable(&crng_is_ready);
}
* returns: 0 if callback is successfully added
* -EALREADY if pool is already initialised (callback not called)
*/
-int register_random_ready_notifier(struct notifier_block *nb)
+int __cold register_random_ready_notifier(struct notifier_block *nb)
{
unsigned long flags;
int ret = -EALREADY;
/*
* Delete a previously registered readiness callback function.
*/
-int unregister_random_ready_notifier(struct notifier_block *nb)
+int __cold unregister_random_ready_notifier(struct notifier_block *nb)
{
unsigned long flags;
int ret;
}
EXPORT_SYMBOL(unregister_random_ready_notifier);
-static void process_random_ready_list(void)
+static void __cold process_random_ready_list(void)
{
unsigned long flags;
}
#define warn_unseeded_randomness() \
- _warn_unseeded_randomness(__func__, (void *)_RET_IP_)
-
-static void _warn_unseeded_randomness(const char *func_name, void *caller)
-{
- if (!IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) || crng_ready())
- return;
- printk_deferred(KERN_NOTICE "random: %s called from %pS with crng_init=%d\n",
- func_name, caller, crng_init);
-}
+ if (IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM) && !crng_ready()) \
+ pr_notice("%s called from %pS with crng_init=%d\n", \
+ __func__, (void *)_RET_IP_, crng_init)
/*********************************************************************
* This function is called when the CPU is coming up, with entry
* CPUHP_RANDOM_PREPARE, which comes before CPUHP_WORKQUEUE_PREP.
*/
-int random_prepare_cpu(unsigned int cpu)
+int __cold random_prepare_cpu(unsigned int cpu)
{
/*
* When the cpu comes back online, immediately invalidate both
memzero_explicit(&block, sizeof(block));
}
-static void credit_init_bits(size_t bits)
+#define credit_init_bits(bits) if (!crng_ready()) _credit_init_bits(bits)
+
+static void __cold _credit_init_bits(size_t bits)
{
static struct execute_work set_ready;
unsigned int new, orig, add;
unsigned long flags;
- if (crng_ready() || !bits)
+ if (!bits)
return;
add = min_t(size_t, bits, POOL_BITS);
* Handle random seed passed by bootloader, and credit it if
* CONFIG_RANDOM_TRUST_BOOTLOADER is set.
*/
-void add_bootloader_randomness(const void *buf, size_t len)
+void __cold add_bootloader_randomness(const void *buf, size_t len)
{
mix_pool_bytes(buf, len);
if (trust_bootloader)
* This function is called when the CPU has just come online, with
* entry CPUHP_AP_RANDOM_ONLINE, just after CPUHP_AP_WORKQUEUE_ONLINE.
*/
-int random_online_cpu(unsigned int cpu)
+int __cold random_online_cpu(unsigned int cpu)
{
/*
* During CPU shutdown and before CPU onlining, add_interrupt_
if (in_irq())
this_cpu_ptr(&irq_randomness)->count += max(1u, bits * 64) - 1;
else
- credit_init_bits(bits);
+ _credit_init_bits(bits);
}
void add_input_randomness(unsigned int type, unsigned int code, unsigned int value)
}
EXPORT_SYMBOL_GPL(add_disk_randomness);
-void rand_initialize_disk(struct gendisk *disk)
+void __cold rand_initialize_disk(struct gendisk *disk)
{
struct timer_rand_state *state;
*
* So the re-arming always happens in the entropy loop itself.
*/
-static void entropy_timer(unsigned long data)
+static void __cold entropy_timer(unsigned long data)
{
credit_init_bits(1);
}
* If we have an actual cycle counter, see if we can
* generate enough entropy with timing noise
*/
-static void try_to_generate_entropy(void)
+static void __cold try_to_generate_entropy(void)
{
struct {
unsigned long entropy;