random: invalidate batched entropy after crng init

It's possible that get_random_{u32,u64} is used before the crng has
initialized, in which case, its output might not be cryptographically
secure. For this problem, directly, this patch set is introducing the
*_wait variety of functions, but even with that, there's a subtle issue:
what happens to our batched entropy that was generated before
initialization. Prior to this commit, it'd stick around, supplying bad
numbers. After this commit, we force the entropy to be re-extracted
after each phase of the crng has initialized.

In order to avoid a race condition with the position counter, we
introduce a simple rwlock for this invalidation. Since it's only during
this awkward transition period, after things are all set up, we stop
using it, so that it doesn't have an impact on performance.

Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Cc: stable@vger.kernel.org # v4.11+

authored by

Jason A. Donenfeld and committed by
Theodore Ts'o
b169c13d 92e75428

+37
+37
drivers/char/random.c
··· 1 /* 2 * random.c -- A strong random number generator 3 * 4 * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005 5 * 6 * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All ··· 765 static struct crng_state **crng_node_pool __read_mostly; 766 #endif 767 768 static void crng_initialize(struct crng_state *crng) 769 { 770 int i; ··· 804 cp++; crng_init_cnt++; len--; 805 } 806 if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { 807 crng_init = 1; 808 wake_up_interruptible(&crng_init_wait); 809 pr_notice("random: fast init done\n"); ··· 842 memzero_explicit(&buf, sizeof(buf)); 843 crng->init_time = jiffies; 844 if (crng == &primary_crng && crng_init < 2) { 845 crng_init = 2; 846 process_random_ready_list(); 847 wake_up_interruptible(&crng_init_wait); ··· 2030 }; 2031 unsigned int position; 2032 }; 2033 2034 /* 2035 * Get a random word for internal kernel use only. The quality of the random ··· 2041 u64 get_random_u64(void) 2042 { 2043 u64 ret; 2044 struct batched_entropy *batch; 2045 2046 #if BITS_PER_LONG == 64 ··· 2055 #endif 2056 2057 batch = &get_cpu_var(batched_entropy_u64); 2058 if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) { 2059 extract_crng((u8 *)batch->entropy_u64); 2060 batch->position = 0; 2061 } 2062 ret = batch->entropy_u64[batch->position++]; 2063 put_cpu_var(batched_entropy_u64); 2064 return ret; 2065 } ··· 2073 u32 get_random_u32(void) 2074 { 2075 u32 ret; 2076 struct batched_entropy *batch; 2077 2078 if (arch_get_random_int(&ret)) 2079 return ret; 2080 2081 batch = &get_cpu_var(batched_entropy_u32); 2082 if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) { 2083 extract_crng((u8 *)batch->entropy_u32); 2084 batch->position = 0; 2085 } 2086 ret = batch->entropy_u32[batch->position++]; 2087 put_cpu_var(batched_entropy_u32); 2088 return ret; 2089 } 2090 EXPORT_SYMBOL(get_random_u32); 2091 2092 /** 2093 * randomize_page - Generate a random, page aligned address
··· 1 /* 2 * random.c -- A strong random number generator 3 * 4 + * Copyright (C) 2017 Jason A. Donenfeld <Jason@zx2c4.com>. All 5 + * Rights Reserved. 6 + * 7 * Copyright Matt Mackall <mpm@selenic.com>, 2003, 2004, 2005 8 * 9 * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999. All ··· 762 static struct crng_state **crng_node_pool __read_mostly; 763 #endif 764 765 + static void invalidate_batched_entropy(void); 766 + 767 static void crng_initialize(struct crng_state *crng) 768 { 769 int i; ··· 799 cp++; crng_init_cnt++; len--; 800 } 801 if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) { 802 + invalidate_batched_entropy(); 803 crng_init = 1; 804 wake_up_interruptible(&crng_init_wait); 805 pr_notice("random: fast init done\n"); ··· 836 memzero_explicit(&buf, sizeof(buf)); 837 crng->init_time = jiffies; 838 if (crng == &primary_crng && crng_init < 2) { 839 + invalidate_batched_entropy(); 840 crng_init = 2; 841 process_random_ready_list(); 842 wake_up_interruptible(&crng_init_wait); ··· 2023 }; 2024 unsigned int position; 2025 }; 2026 + static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_reset_lock); 2027 2028 /* 2029 * Get a random word for internal kernel use only. The quality of the random ··· 2033 u64 get_random_u64(void) 2034 { 2035 u64 ret; 2036 + bool use_lock = crng_init < 2; 2037 + unsigned long flags; 2038 struct batched_entropy *batch; 2039 2040 #if BITS_PER_LONG == 64 ··· 2045 #endif 2046 2047 batch = &get_cpu_var(batched_entropy_u64); 2048 + if (use_lock) 2049 + read_lock_irqsave(&batched_entropy_reset_lock, flags); 2050 if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) { 2051 extract_crng((u8 *)batch->entropy_u64); 2052 batch->position = 0; 2053 } 2054 ret = batch->entropy_u64[batch->position++]; 2055 + if (use_lock) 2056 + read_unlock_irqrestore(&batched_entropy_reset_lock, flags); 2057 put_cpu_var(batched_entropy_u64); 2058 return ret; 2059 } ··· 2059 u32 get_random_u32(void) 2060 { 2061 u32 ret; 2062 + bool use_lock = crng_init < 2; 2063 + unsigned long flags; 2064 struct batched_entropy *batch; 2065 2066 if (arch_get_random_int(&ret)) 2067 return ret; 2068 2069 batch = &get_cpu_var(batched_entropy_u32); 2070 + if (use_lock) 2071 + read_lock_irqsave(&batched_entropy_reset_lock, flags); 2072 if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) { 2073 extract_crng((u8 *)batch->entropy_u32); 2074 batch->position = 0; 2075 } 2076 ret = batch->entropy_u32[batch->position++]; 2077 + if (use_lock) 2078 + read_unlock_irqrestore(&batched_entropy_reset_lock, flags); 2079 put_cpu_var(batched_entropy_u32); 2080 return ret; 2081 } 2082 EXPORT_SYMBOL(get_random_u32); 2083 + 2084 + /* It's important to invalidate all potential batched entropy that might 2085 + * be stored before the crng is initialized, which we can do lazily by 2086 + * simply resetting the counter to zero so that it's re-extracted on the 2087 + * next usage. */ 2088 + static void invalidate_batched_entropy(void) 2089 + { 2090 + int cpu; 2091 + unsigned long flags; 2092 + 2093 + write_lock_irqsave(&batched_entropy_reset_lock, flags); 2094 + for_each_possible_cpu (cpu) { 2095 + per_cpu_ptr(&batched_entropy_u32, cpu)->position = 0; 2096 + per_cpu_ptr(&batched_entropy_u64, cpu)->position = 0; 2097 + } 2098 + write_unlock_irqrestore(&batched_entropy_reset_lock, flags); 2099 + } 2100 2101 /** 2102 * randomize_page - Generate a random, page aligned address