Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'random_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/random

Pull /dev/random updates from Ted Ts'o:

- Improve getrandom and /dev/random's support for those arm64
architecture variants that have RNG instructions.

- Use batched output from CRNG instead of CPU's RNG instructions for
better performance.

- Miscellaneous bug fixes.

* tag 'random_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/random:
random: avoid warnings for !CONFIG_NUMA builds
random: fix data races at timer_rand_state
random: always use batched entropy for get_random_u{32,64}
random: Make RANDOM_TRUST_CPU depend on ARCH_RANDOM
arm64: add credited/trusted RNG support
random: add arch_get_random_*long_early()
random: split primary/secondary crng init paths

+87 -35
+14
arch/arm64/include/asm/archrandom.h
··· 4 4 5 5 #ifdef CONFIG_ARCH_RANDOM 6 6 7 + #include <linux/bug.h> 8 + #include <linux/kernel.h> 7 9 #include <linux/random.h> 8 10 #include <asm/cpufeature.h> 9 11 ··· 67 65 unsigned long ftr = read_sysreg_s(SYS_ID_AA64ISAR0_EL1); 68 66 return (ftr >> ID_AA64ISAR0_RNDR_SHIFT) & 0xf; 69 67 } 68 + 69 + static inline bool __init __must_check 70 + arch_get_random_seed_long_early(unsigned long *v) 71 + { 72 + WARN_ON(system_state != SYSTEM_BOOTING); 73 + 74 + if (!__early_cpu_has_rndr()) 75 + return false; 76 + 77 + return __arm64_rndr(v); 78 + } 79 + #define arch_get_random_seed_long_early arch_get_random_seed_long_early 70 80 71 81 #else 72 82
+1 -1
drivers/char/Kconfig
··· 474 474 475 475 config RANDOM_TRUST_CPU 476 476 bool "Trust the CPU manufacturer to initialize Linux's CRNG" 477 - depends on X86 || S390 || PPC 477 + depends on ARCH_RANDOM 478 478 default n 479 479 help 480 480 Assume that CPU manufacturer (e.g., Intel or AMD for RDSEED or
+50 -34
drivers/char/random.c
··· 781 781 } 782 782 early_param("random.trust_cpu", parse_trust_cpu); 783 783 784 - static void crng_initialize(struct crng_state *crng) 784 + static bool crng_init_try_arch(struct crng_state *crng) 785 785 { 786 786 int i; 787 - int arch_init = 1; 787 + bool arch_init = true; 788 788 unsigned long rv; 789 789 790 - memcpy(&crng->state[0], "expand 32-byte k", 16); 791 - if (crng == &primary_crng) 792 - _extract_entropy(&input_pool, &crng->state[4], 793 - sizeof(__u32) * 12, 0); 794 - else 795 - _get_random_bytes(&crng->state[4], sizeof(__u32) * 12); 796 790 for (i = 4; i < 16; i++) { 797 791 if (!arch_get_random_seed_long(&rv) && 798 792 !arch_get_random_long(&rv)) { 799 793 rv = random_get_entropy(); 800 - arch_init = 0; 794 + arch_init = false; 801 795 } 802 796 crng->state[i] ^= rv; 803 797 } 804 - if (trust_cpu && arch_init && crng == &primary_crng) { 798 + 799 + return arch_init; 800 + } 801 + 802 + static bool __init crng_init_try_arch_early(struct crng_state *crng) 803 + { 804 + int i; 805 + bool arch_init = true; 806 + unsigned long rv; 807 + 808 + for (i = 4; i < 16; i++) { 809 + if (!arch_get_random_seed_long_early(&rv) && 810 + !arch_get_random_long_early(&rv)) { 811 + rv = random_get_entropy(); 812 + arch_init = false; 813 + } 814 + crng->state[i] ^= rv; 815 + } 816 + 817 + return arch_init; 818 + } 819 + 820 + static void __maybe_unused crng_initialize_secondary(struct crng_state *crng) 821 + { 822 + memcpy(&crng->state[0], "expand 32-byte k", 16); 823 + _get_random_bytes(&crng->state[4], sizeof(__u32) * 12); 824 + crng_init_try_arch(crng); 825 + crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; 826 + } 827 + 828 + static void __init crng_initialize_primary(struct crng_state *crng) 829 + { 830 + memcpy(&crng->state[0], "expand 32-byte k", 16); 831 + _extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0); 832 + if (crng_init_try_arch_early(crng) && trust_cpu) { 805 833 invalidate_batched_entropy(); 806 834 numa_crng_init(); 807 835 crng_init = 2; ··· 850 822 crng = kmalloc_node(sizeof(struct crng_state), 851 823 GFP_KERNEL | __GFP_NOFAIL, i); 852 824 spin_lock_init(&crng->lock); 853 - crng_initialize(crng); 825 + crng_initialize_secondary(crng); 854 826 pool[i] = crng; 855 827 } 856 828 mb(); ··· 1170 1142 * We take into account the first, second and third-order deltas 1171 1143 * in order to make our estimate. 1172 1144 */ 1173 - delta = sample.jiffies - state->last_time; 1174 - state->last_time = sample.jiffies; 1145 + delta = sample.jiffies - READ_ONCE(state->last_time); 1146 + WRITE_ONCE(state->last_time, sample.jiffies); 1175 1147 1176 - delta2 = delta - state->last_delta; 1177 - state->last_delta = delta; 1148 + delta2 = delta - READ_ONCE(state->last_delta); 1149 + WRITE_ONCE(state->last_delta, delta); 1178 1150 1179 - delta3 = delta2 - state->last_delta2; 1180 - state->last_delta2 = delta2; 1151 + delta3 = delta2 - READ_ONCE(state->last_delta2); 1152 + WRITE_ONCE(state->last_delta2, delta2); 1181 1153 1182 1154 if (delta < 0) 1183 1155 delta = -delta; ··· 1799 1771 int __init rand_initialize(void) 1800 1772 { 1801 1773 init_std_data(&input_pool); 1802 - crng_initialize(&primary_crng); 1774 + crng_initialize_primary(&primary_crng); 1803 1775 crng_global_init_time = jiffies; 1804 1776 if (ratelimit_disable) { 1805 1777 urandom_warning.interval = 0; ··· 2177 2149 2178 2150 /* 2179 2151 * Get a random word for internal kernel use only. The quality of the random 2180 - * number is either as good as RDRAND or as good as /dev/urandom, with the 2181 - * goal of being quite fast and not depleting entropy. In order to ensure 2152 + * number is good as /dev/urandom, but there is no backtrack protection, with 2153 + * the goal of being quite fast and not depleting entropy. In order to ensure 2182 2154 * that the randomness provided by this function is okay, the function 2183 - * wait_for_random_bytes() should be called and return 0 at least once 2184 - * at any point prior. 2155 + * wait_for_random_bytes() should be called and return 0 at least once at any 2156 + * point prior. 2185 2157 */ 2186 2158 static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = { 2187 2159 .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock), ··· 2193 2165 unsigned long flags; 2194 2166 struct batched_entropy *batch; 2195 2167 static void *previous; 2196 - 2197 - #if BITS_PER_LONG == 64 2198 - if (arch_get_random_long((unsigned long *)&ret)) 2199 - return ret; 2200 - #else 2201 - if (arch_get_random_long((unsigned long *)&ret) && 2202 - arch_get_random_long((unsigned long *)&ret + 1)) 2203 - return ret; 2204 - #endif 2205 2168 2206 2169 warn_unseeded_randomness(&previous); 2207 2170 ··· 2217 2198 unsigned long flags; 2218 2199 struct batched_entropy *batch; 2219 2200 static void *previous; 2220 - 2221 - if (arch_get_random_int(&ret)) 2222 - return ret; 2223 2201 2224 2202 warn_unseeded_randomness(&previous); 2225 2203
+22
include/linux/random.h
··· 7 7 #ifndef _LINUX_RANDOM_H 8 8 #define _LINUX_RANDOM_H 9 9 10 + #include <linux/bug.h> 11 + #include <linux/kernel.h> 10 12 #include <linux/list.h> 11 13 #include <linux/once.h> 12 14 ··· 184 182 static inline bool __must_check arch_get_random_seed_int(unsigned int *v) 185 183 { 186 184 return false; 185 + } 186 + #endif 187 + 188 + /* 189 + * Called from the boot CPU during startup; not valid to call once 190 + * secondary CPUs are up and preemption is possible. 191 + */ 192 + #ifndef arch_get_random_seed_long_early 193 + static inline bool __init arch_get_random_seed_long_early(unsigned long *v) 194 + { 195 + WARN_ON(system_state != SYSTEM_BOOTING); 196 + return arch_get_random_seed_long(v); 197 + } 198 + #endif 199 + 200 + #ifndef arch_get_random_long_early 201 + static inline bool __init arch_get_random_long_early(unsigned long *v) 202 + { 203 + WARN_ON(system_state != SYSTEM_BOOTING); 204 + return arch_get_random_long(v); 187 205 } 188 206 #endif 189 207