Improve performance by using a lockless update mechanism suggested by
Linus, and make sure we refresh per-CPU entropy returned get_random_* as soon as the CRNG is initialized. -----BEGIN PGP SIGNATURE----- iQEzBAABCAAdFiEEK2m5VNv+CHkogTfJ8vlZVpUNgaMFAlk9i5UACgkQ8vlZVpUN gaO2HQf9H2QKe6B52l5YzXedKbspCaAwELmnpyOGY4pI5mvcf9y9MJMAHPD8yHcv Oc/TWV/KWxdhXh24e9djFKAPdJxqrhdN+m42lFJrFI3djrS9QbiMUR0Fg3O63mPa w7/GLrwYWVjKfzEIzh37xcpKg1QYi328ePbRqtt33qrUgj9TOEwywjaHpB57HaGP Jbskx+3GN/yvDFZBYeaYxSyCCAOt4qtjzOlRyyX7wyp3nPXfg88gZKUfTpmmElm/ znbP1+VSWVOiPtOKXG+2T9OwRjO3zQg3rV+G5FZALAADeUdBlLiTC0xOefTDugZJ mNxNNxVCr8eNVONvCdKy4RXbvsr75A== =+3lu -----END PGP SIGNATURE----- Merge tag 'random_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/random Pull randomness fixes from Ted Ts'o: "Improve performance by using a lockless update mechanism suggested by Linus, and make sure we refresh per-CPU entropy returned get_random_* as soon as the CRNG is initialized" * tag 'random_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/random: random: invalidate batched entropy after crng init random: use lockless method of accessing and updating f->reg_idx
This commit is contained in:
commit
5ad9345d23
1 changed files with 43 additions and 6 deletions
|
@ -1,6 +1,9 @@
|
|||
/*
|
||||
* random.c -- A strong random number generator
|
||||
*
|
||||
* Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All
|
||||
* Rights Reserved.
|
||||
*
|
||||
* Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005
|
||||
*
|
||||
* Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All
|
||||
|
@ -762,6 +765,8 @@ static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
|
|||
static struct crng_state **crng_node_pool __read_mostly;
|
||||
#endif
|
||||
|
||||
static void invalidate_batched_entropy(void);
|
||||
|
||||
static void crng_initialize(struct crng_state *crng)
|
||||
{
|
||||
int i;
|
||||
|
@ -799,6 +804,7 @@ static int crng_fast_load(const char *cp, size_t len)
|
|||
cp++; crng_init_cnt++; len--;
|
||||
}
|
||||
if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
|
||||
invalidate_batched_entropy();
|
||||
crng_init = 1;
|
||||
wake_up_interruptible(&crng_init_wait);
|
||||
pr_notice("random: fast init done\n");
|
||||
|
@ -836,6 +842,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
|
|||
memzero_explicit(&buf, sizeof(buf));
|
||||
crng->init_time = jiffies;
|
||||
if (crng == &primary_crng && crng_init < 2) {
|
||||
invalidate_batched_entropy();
|
||||
crng_init = 2;
|
||||
process_random_ready_list();
|
||||
wake_up_interruptible(&crng_init_wait);
|
||||
|
@ -1097,15 +1104,15 @@ static void add_interrupt_bench(cycles_t start)
|
|||
static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
|
||||
{
|
||||
__u32 *ptr = (__u32 *) regs;
|
||||
unsigned long flags;
|
||||
unsigned int idx;
|
||||
|
||||
if (regs == NULL)
|
||||
return 0;
|
||||
local_irq_save(flags);
|
||||
if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32))
|
||||
f->reg_idx = 0;
|
||||
ptr += f->reg_idx++;
|
||||
local_irq_restore(flags);
|
||||
idx = READ_ONCE(f->reg_idx);
|
||||
if (idx >= sizeof(struct pt_regs) / sizeof(__u32))
|
||||
idx = 0;
|
||||
ptr += idx++;
|
||||
WRITE_ONCE(f->reg_idx, idx);
|
||||
return *ptr;
|
||||
}
|
||||
|
||||
|
@ -2023,6 +2030,7 @@ struct batched_entropy {
|
|||
};
|
||||
unsigned int position;
|
||||
};
|
||||
static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock);
|
||||
|
||||
/*
|
||||
* Get a random word for internal kernel use only. The quality of the random
|
||||
|
@ -2033,6 +2041,8 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64);
|
|||
u64 get_random_u64(void)
|
||||
{
|
||||
u64 ret;
|
||||
bool use_lock = crng_init < 2;
|
||||
unsigned long flags;
|
||||
struct batched_entropy *batch;
|
||||
|
||||
#if BITS_PER_LONG == 64
|
||||
|
@ -2045,11 +2055,15 @@ u64 get_random_u64(void)
|
|||
#endif
|
||||
|
||||
batch = &get_cpu_var(batched_entropy_u64);
|
||||
if (use_lock)
|
||||
read_lock_irqsave(&batched_entropy_reset_lock, flags);
|
||||
if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
|
||||
extract_crng((u8 *)batch->entropy_u64);
|
||||
batch->position = 0;
|
||||
}
|
||||
ret = batch->entropy_u64[batch->position++];
|
||||
if (use_lock)
|
||||
read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
|
||||
put_cpu_var(batched_entropy_u64);
|
||||
return ret;
|
||||
}
|
||||
|
@ -2059,22 +2073,45 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32);
|
|||
u32 get_random_u32(void)
|
||||
{
|
||||
u32 ret;
|
||||
bool use_lock = crng_init < 2;
|
||||
unsigned long flags;
|
||||
struct batched_entropy *batch;
|
||||
|
||||
if (arch_get_random_int(&ret))
|
||||
return ret;
|
||||
|
||||
batch = &get_cpu_var(batched_entropy_u32);
|
||||
if (use_lock)
|
||||
read_lock_irqsave(&batched_entropy_reset_lock, flags);
|
||||
if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
|
||||
extract_crng((u8 *)batch->entropy_u32);
|
||||
batch->position = 0;
|
||||
}
|
||||
ret = batch->entropy_u32[batch->position++];
|
||||
if (use_lock)
|
||||
read_unlock_irqrestore(&batched_entropy_reset_lock, flags);
|
||||
put_cpu_var(batched_entropy_u32);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(get_random_u32);
|
||||
|
||||
/* It's important to invalidate all potential batched entropy that might
|
||||
* be stored before the crng is initialized, which we can do lazily by
|
||||
* simply resetting the counter to zero so that it's re-extracted on the
|
||||
* next usage. */
|
||||
static void invalidate_batched_entropy(void)
|
||||
{
|
||||
int cpu;
|
||||
unsigned long flags;
|
||||
|
||||
write_lock_irqsave(&batched_entropy_reset_lock, flags);
|
||||
for_each_possible_cpu (cpu) {
|
||||
per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0;
|
||||
per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0;
|
||||
}
|
||||
write_unlock_irqrestore(&batched_entropy_reset_lock, flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* randomize_page - Generate a random, page aligned address
|
||||
* @start: The smallest acceptable address the caller will take.
|
||||
|
|
Loading…
Reference in a new issue