random: invalidate batched entropy after crng init

It's possible that get_random_{u32,u64} is used before the crng has
initialized, in which case, its output might not be cryptographically
secure. For this problem, directly, this patch set is introducing the
*_wait variety of functions, but even with that, there's a subtle issue:
what happens to our batched entropy that was generated before
initialization. Prior to this commit, it'd stick around, supplying bad
numbers. After this commit, we force the entropy to be re-extracted
after each phase of the crng has initialized.

In order to avoid a race condition with the position counter, we
introduce a simple rwlock for this invalidation. Since it's only during
this awkward transition period, after things are all set up, we stop
using it, so that it doesn't have an impact on performance.

Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Cc: stable@vger.kernel.org # v4.11+

authored by

Jason A. Donenfeld and committed by
Theodore Ts'o
b169c13d 92e75428

+37
+37
drivers/char/random.c
··· 1 1 /* 2 2 * random.c -- A strong random number generator 3 3 * 4 + * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All 5 + * Rights Reserved. 6 + * 4 7 * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005 5 8 * 6 9 * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All ··· 765 762 static struct crng_state **crng_node_pool __read_mostly; 766 763 #endif 767 764 765 + static void invalidate_batched_entropy(void); 766 + 768 767 static void crng_initialize(struct crng_state *crng) 769 768 { 770 769 int i; ··· 804 799 cp++; crng_init_cnt++; len--; 805 800 } 806 801 if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { 802 + invalidate_batched_entropy(); 807 803 crng_init = 1; 808 804 wake_up_interruptible(&crng_init_wait); 809 805 pr_notice("random: fast init done\n"); ··· 842 836 memzero_explicit(&buf, sizeof(buf)); 843 837 crng->init_time = jiffies; 844 838 if (crng == &primary_crng && crng_init < 2) { 839 + invalidate_batched_entropy(); 845 840 crng_init = 2; 846 841 process_random_ready_list(); 847 842 wake_up_interruptible(&crng_init_wait); ··· 2030 2023 }; 2031 2024 unsigned int position; 2032 2025 }; 2026 + static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock); 2033 2027 2034 2028 /* 2035 2029 * Get a random word for internal kernel use only. The quality of the random ··· 2041 2033 u64 get_random_u64(void) 2042 2034 { 2043 2035 u64 ret; 2036 + bool use_lock = crng_init < 2; 2037 + unsigned long flags; 2044 2038 struct batched_entropy *batch; 2045 2039 2046 2040 #if BITS_PER_LONG == 64 ··· 2055 2045 #endif 2056 2046 2057 2047 batch = &get_cpu_var(batched_entropy_u64); 2048 + if (use_lock) 2049 + read_lock_irqsave(&batched_entropy_reset_lock, flags); 2058 2050 if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) { 2059 2051 extract_crng((u8 *)batch->entropy_u64); 2060 2052 batch->position = 0; 2061 2053 } 2062 2054 ret = batch->entropy_u64[batch->position++]; 2055 + if (use_lock) 2056 + read_unlock_irqrestore(&batched_entropy_reset_lock, flags); 2063 2057 put_cpu_var(batched_entropy_u64); 2064 2058 return ret; 2065 2059 } ··· 2073 2059 u32 get_random_u32(void) 2074 2060 { 2075 2061 u32 ret; 2062 + bool use_lock = crng_init < 2; 2063 + unsigned long flags; 2076 2064 struct batched_entropy *batch; 2077 2065 2078 2066 if (arch_get_random_int(&ret)) 2079 2067 return ret; 2080 2068 2081 2069 batch = &get_cpu_var(batched_entropy_u32); 2070 + if (use_lock) 2071 + read_lock_irqsave(&batched_entropy_reset_lock, flags); 2082 2072 if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) { 2083 2073 extract_crng((u8 *)batch->entropy_u32); 2084 2074 batch->position = 0; 2085 2075 } 2086 2076 ret = batch->entropy_u32[batch->position++]; 2077 + if (use_lock) 2078 + read_unlock_irqrestore(&batched_entropy_reset_lock, flags); 2087 2079 put_cpu_var(batched_entropy_u32); 2088 2080 return ret; 2089 2081 } 2090 2082 EXPORT_SYMBOL(get_random_u32); 2083 + 2084 + /* It's important to invalidate all potential batched entropy that might 2085 + * be stored before the crng is initialized, which we can do lazily by 2086 + * simply resetting the counter to zero so that it's re-extracted on the 2087 + * next usage. */ 2088 + static void invalidate_batched_entropy(void) 2089 + { 2090 + int cpu; 2091 + unsigned long flags; 2092 + 2093 + write_lock_irqsave(&batched_entropy_reset_lock, flags); 2094 + for_each_possible_cpu (cpu) { 2095 + per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0; 2096 + per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0; 2097 + } 2098 + write_unlock_irqrestore(&batched_entropy_reset_lock, flags); 2099 + } 2091 2100 2092 2101 /** 2093 2102 * randomize_page - Generate a random, page aligned address