Merge tag 'random_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/random

Pull random updates from Ted Ts'o:
"Add wait_for_random_bytes() and get_random_*_wait() functions so that
callers can more safely get random bytes if they can block until the
CRNG is initialized.

Also print a warning if get_random_*() is called before the CRNG is
initialized. By default, only one single-line warning will be printed
per boot. If CONFIG_WARN_ALL_UNSEEDED_RANDOM is defined, then a
warning will be printed for each function which tries to get random
bytes before the CRNG is initialized. This can get spammy for certain
architecture types, so it is not enabled by default"

* tag 'random_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/random:
random: reorder READ_ONCE() in get_random_uXX
random: suppress spammy warnings about unseeded randomness
random: warn when kernel uses unseeded randomness
net/route: use get_random_int for random counter
net/neighbor: use get_random_u32 for 32-bit hash random
rhashtable: use get_random_u32 for hash_rnd
ceph: ensure RNG is seeded before using
iscsi: ensure RNG is seeded before use
cifs: use get_random_u32 for 32-bit lock random
random: add get_random_{bytes,u32,u64,int,long,once}_wait family
random: add wait_for_random_bytes() API

Changed files
+168 -38
drivers
fs
cifs
include
lib
net
+76 -20
drivers/char/random.c
··· 288 288 #define SEC_XFER_SIZE 512 289 289 #define EXTRACT_SIZE 10 290 290 291 - #define DEBUG_RANDOM_BOOT 0 292 291 293 292 #define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long)) 294 293 ··· 436 437 static void _crng_backtrack_protect(struct crng_state *crng, 437 438 __u8 tmp[CHACHA20_BLOCK_SIZE], int used); 438 439 static void process_random_ready_list(void); 440 + static void _get_random_bytes(void *buf, int nbytes); 439 441 440 442 /********************************************************************** 441 443 * ··· 777 777 _extract_entropy(&input_pool, &crng->state[4], 778 778 sizeof(__u32) * 12, 0); 779 779 else 780 - get_random_bytes(&crng->state[4], sizeof(__u32) * 12); 780 + _get_random_bytes(&crng->state[4], sizeof(__u32) * 12); 781 781 for (i = 4; i < 16; i++) { 782 782 if (!arch_get_random_seed_long(&rv) && 783 783 !arch_get_random_long(&rv)) ··· 849 849 wake_up_interruptible(&crng_init_wait); 850 850 pr_notice("random: crng init done\n"); 851 851 } 852 - } 853 - 854 - static inline void crng_wait_ready(void) 855 - { 856 - wait_event_interruptible(crng_init_wait, crng_ready()); 857 852 } 858 853 859 854 static void _extract_crng(struct crng_state *crng, ··· 1472 1477 return ret; 1473 1478 } 1474 1479 1480 + #define warn_unseeded_randomness(previous) \ 1481 + _warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous)) 1482 + 1483 + static void _warn_unseeded_randomness(const char *func_name, void *caller, 1484 + void **previous) 1485 + { 1486 + #ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM 1487 + const bool print_once = false; 1488 + #else 1489 + static bool print_once __read_mostly; 1490 + #endif 1491 + 1492 + if (print_once || 1493 + crng_ready() || 1494 + (previous && (caller == READ_ONCE(*previous)))) 1495 + return; 1496 + WRITE_ONCE(*previous, caller); 1497 + #ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM 1498 + print_once = true; 1499 + #endif 1500 + pr_notice("random: %s called from %pF with crng_init=%d\n", 1501 + func_name, caller, crng_init); 1502 + } 1503 + 1475 1504 /* 1476 1505 * This function is the exported kernel interface. It returns some 1477 1506 * number of good random numbers, suitable for key generation, seeding 1478 1507 * TCP sequence numbers, etc. It does not rely on the hardware random 1479 1508 * number generator. For random bytes direct from the hardware RNG 1480 - * (when available), use get_random_bytes_arch(). 1509 + * (when available), use get_random_bytes_arch(). In order to ensure 1510 + * that the randomness provided by this function is okay, the function 1511 + * wait_for_random_bytes() should be called and return 0 at least once 1512 + * at any point prior. 1481 1513 */ 1482 - void get_random_bytes(void *buf, int nbytes) 1514 + static void _get_random_bytes(void *buf, int nbytes) 1483 1515 { 1484 1516 __u8 tmp[CHACHA20_BLOCK_SIZE]; 1485 1517 1486 - #if DEBUG_RANDOM_BOOT > 0 1487 - if (!crng_ready()) 1488 - printk(KERN_NOTICE "random: %pF get_random_bytes called " 1489 - "with crng_init = %d\n", (void *) _RET_IP_, crng_init); 1490 - #endif 1491 1518 trace_get_random_bytes(nbytes, _RET_IP_); 1492 1519 1493 1520 while (nbytes >= CHACHA20_BLOCK_SIZE) { ··· 1526 1509 crng_backtrack_protect(tmp, CHACHA20_BLOCK_SIZE); 1527 1510 memzero_explicit(tmp, sizeof(tmp)); 1528 1511 } 1512 + 1513 + void get_random_bytes(void *buf, int nbytes) 1514 + { 1515 + static void *previous; 1516 + 1517 + warn_unseeded_randomness(&previous); 1518 + _get_random_bytes(buf, nbytes); 1519 + } 1529 1520 EXPORT_SYMBOL(get_random_bytes); 1521 + 1522 + /* 1523 + * Wait for the urandom pool to be seeded and thus guaranteed to supply 1524 + * cryptographically secure random numbers. This applies to: the /dev/urandom 1525 + * device, the get_random_bytes function, and the get_random_{u32,u64,int,long} 1526 + * family of functions. Using any of these functions without first calling 1527 + * this function forfeits the guarantee of security. 1528 + * 1529 + * Returns: 0 if the urandom pool has been seeded. 1530 + * -ERESTARTSYS if the function was interrupted by a signal. 1531 + */ 1532 + int wait_for_random_bytes(void) 1533 + { 1534 + if (likely(crng_ready())) 1535 + return 0; 1536 + return wait_event_interruptible(crng_init_wait, crng_ready()); 1537 + } 1538 + EXPORT_SYMBOL(wait_for_random_bytes); 1530 1539 1531 1540 /* 1532 1541 * Add a callback function that will be invoked when the nonblocking ··· 1908 1865 SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, 1909 1866 unsigned int, flags) 1910 1867 { 1868 + int ret; 1869 + 1911 1870 if (flags & ~(GRND_NONBLOCK|GRND_RANDOM)) 1912 1871 return -EINVAL; 1913 1872 ··· 1922 1877 if (!crng_ready()) { 1923 1878 if (flags & GRND_NONBLOCK) 1924 1879 return -EAGAIN; 1925 - crng_wait_ready(); 1926 - if (signal_pending(current)) 1927 - return -ERESTARTSYS; 1880 + ret = wait_for_random_bytes(); 1881 + if (unlikely(ret)) 1882 + return ret; 1928 1883 } 1929 1884 return urandom_read(NULL, buf, count, NULL); 1930 1885 } ··· 2085 2040 /* 2086 2041 * Get a random word for internal kernel use only. The quality of the random 2087 2042 * number is either as good as RDRAND or as good as /dev/urandom, with the 2088 - * goal of being quite fast and not depleting entropy. 2043 + * goal of being quite fast and not depleting entropy. In order to ensure 2044 + * that the randomness provided by this function is okay, the function 2045 + * wait_for_random_bytes() should be called and return 0 at least once 2046 + * at any point prior. 2089 2047 */ 2090 2048 static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64); 2091 2049 u64 get_random_u64(void) 2092 2050 { 2093 2051 u64 ret; 2094 - bool use_lock = READ_ONCE(crng_init) < 2; 2052 + bool use_lock; 2095 2053 unsigned long flags = 0; 2096 2054 struct batched_entropy *batch; 2055 + static void *previous; 2097 2056 2098 2057 #if BITS_PER_LONG == 64 2099 2058 if (arch_get_random_long((unsigned long *)&ret)) ··· 2108 2059 return ret; 2109 2060 #endif 2110 2061 2062 + warn_unseeded_randomness(&previous); 2063 + 2064 + use_lock = READ_ONCE(crng_init) < 2; 2111 2065 batch = &get_cpu_var(batched_entropy_u64); 2112 2066 if (use_lock) 2113 2067 read_lock_irqsave(&batched_entropy_reset_lock, flags); ··· 2130 2078 u32 get_random_u32(void) 2131 2079 { 2132 2080 u32 ret; 2133 - bool use_lock = READ_ONCE(crng_init) < 2; 2081 + bool use_lock; 2134 2082 unsigned long flags = 0; 2135 2083 struct batched_entropy *batch; 2084 + static void *previous; 2136 2085 2137 2086 if (arch_get_random_int(&ret)) 2138 2087 return ret; 2139 2088 2089 + warn_unseeded_randomness(&previous); 2090 + 2091 + use_lock = READ_ONCE(crng_init) < 2; 2140 2092 batch = &get_cpu_var(batched_entropy_u32); 2141 2093 if (use_lock) 2142 2094 read_lock_irqsave(&batched_entropy_reset_lock, flags);
+11 -3
drivers/target/iscsi/iscsi_target_auth.c
··· 47 47 } 48 48 } 49 49 50 - static void chap_gen_challenge( 50 + static int chap_gen_challenge( 51 51 struct iscsi_conn *conn, 52 52 int caller, 53 53 char *c_str, 54 54 unsigned int *c_len) 55 55 { 56 + int ret; 56 57 unsigned char challenge_asciihex[CHAP_CHALLENGE_LENGTH * 2 + 1]; 57 58 struct iscsi_chap *chap = conn->auth_protocol; 58 59 59 60 memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1); 60 61 61 - get_random_bytes(chap->challenge, CHAP_CHALLENGE_LENGTH); 62 + ret = get_random_bytes_wait(chap->challenge, CHAP_CHALLENGE_LENGTH); 63 + if (unlikely(ret)) 64 + return ret; 62 65 chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge, 63 66 CHAP_CHALLENGE_LENGTH); 64 67 /* ··· 72 69 73 70 pr_debug("[%s] Sending CHAP_C=0x%s\n\n", (caller) ? "server" : "client", 74 71 challenge_asciihex); 72 + return 0; 75 73 } 76 74 77 75 static int chap_check_algorithm(const char *a_str) ··· 147 143 case CHAP_DIGEST_UNKNOWN: 148 144 default: 149 145 pr_err("Unsupported CHAP_A value\n"); 146 + kfree(conn->auth_protocol); 150 147 return NULL; 151 148 } 152 149 ··· 161 156 /* 162 157 * Generate Challenge. 163 158 */ 164 - chap_gen_challenge(conn, 1, aic_str, aic_len); 159 + if (chap_gen_challenge(conn, 1, aic_str, aic_len) < 0) { 160 + kfree(conn->auth_protocol); 161 + return NULL; 162 + } 165 163 166 164 return chap; 167 165 }
+14 -8
drivers/target/iscsi/iscsi_target_login.c
··· 245 245 return 0; 246 246 } 247 247 248 - static void iscsi_login_set_conn_values( 248 + static int iscsi_login_set_conn_values( 249 249 struct iscsi_session *sess, 250 250 struct iscsi_conn *conn, 251 251 __be16 cid) 252 252 { 253 + int ret; 253 254 conn->sess = sess; 254 255 conn->cid = be16_to_cpu(cid); 255 256 /* 256 257 * Generate a random Status sequence number (statsn) for the new 257 258 * iSCSI connection. 258 259 */ 259 - get_random_bytes(&conn->stat_sn, sizeof(u32)); 260 + ret = get_random_bytes_wait(&conn->stat_sn, sizeof(u32)); 261 + if (unlikely(ret)) 262 + return ret; 260 263 261 264 mutex_lock(&auth_id_lock); 262 265 conn->auth_id = iscsit_global->auth_id++; 263 266 mutex_unlock(&auth_id_lock); 267 + return 0; 264 268 } 265 269 266 270 __printf(2, 3) int iscsi_change_param_sprintf( ··· 310 306 return -ENOMEM; 311 307 } 312 308 313 - iscsi_login_set_conn_values(sess, conn, pdu->cid); 309 + ret = iscsi_login_set_conn_values(sess, conn, pdu->cid); 310 + if (unlikely(ret)) { 311 + kfree(sess); 312 + return ret; 313 + } 314 314 sess->init_task_tag = pdu->itt; 315 315 memcpy(&sess->isid, pdu->isid, 6); 316 316 sess->exp_cmd_sn = be32_to_cpu(pdu->cmdsn); ··· 505 497 { 506 498 struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf; 507 499 508 - iscsi_login_set_conn_values(NULL, conn, pdu->cid); 509 - return 0; 500 + return iscsi_login_set_conn_values(NULL, conn, pdu->cid); 510 501 } 511 502 512 503 /* ··· 561 554 atomic_set(&sess->session_continuation, 1); 562 555 spin_unlock_bh(&sess->conn_lock); 563 556 564 - iscsi_login_set_conn_values(sess, conn, pdu->cid); 565 - 566 - if (iscsi_copy_param_list(&conn->param_list, 557 + if (iscsi_login_set_conn_values(sess, conn, pdu->cid) < 0 || 558 + iscsi_copy_param_list(&conn->param_list, 567 559 conn->tpg->param_list, 0) < 0) { 568 560 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, 569 561 ISCSI_LOGIN_STATUS_NO_RESOURCES);
+1 -1
fs/cifs/cifsfs.c
··· 1354 1354 spin_lock_init(&cifs_tcp_ses_lock); 1355 1355 spin_lock_init(&GlobalMid_Lock); 1356 1356 1357 - get_random_bytes(&cifs_lock_secret, sizeof(cifs_lock_secret)); 1357 + cifs_lock_secret = get_random_u32(); 1358 1358 1359 1359 if (cifs_max_pending < 2) { 1360 1360 cifs_max_pending = 2;
+2
include/linux/net.h
··· 274 274 275 275 #define net_get_random_once(buf, nbytes) \ 276 276 get_random_once((buf), (nbytes)) 277 + #define net_get_random_once_wait(buf, nbytes) \ 278 + get_random_once_wait((buf), (nbytes)) 277 279 278 280 int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, 279 281 size_t num, size_t len);
+2
include/linux/once.h
··· 53 53 54 54 #define get_random_once(buf, nbytes) \ 55 55 DO_ONCE(get_random_bytes, (buf), (nbytes)) 56 + #define get_random_once_wait(buf, nbytes) \ 57 + DO_ONCE(get_random_bytes_wait, (buf), (nbytes)) \ 56 58 57 59 #endif /* _LINUX_ONCE_H */
+26
include/linux/random.h
··· 34 34 extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy; 35 35 36 36 extern void get_random_bytes(void *buf, int nbytes); 37 + extern int wait_for_random_bytes(void); 37 38 extern int add_random_ready_callback(struct random_ready_callback *rdy); 38 39 extern void del_random_ready_callback(struct random_ready_callback *rdy); 39 40 extern void get_random_bytes_arch(void *buf, int nbytes); ··· 78 77 79 78 return val & CANARY_MASK; 80 79 } 80 + 81 + /* Calls wait_for_random_bytes() and then calls get_random_bytes(buf, nbytes). 82 + * Returns the result of the call to wait_for_random_bytes. */ 83 + static inline int get_random_bytes_wait(void *buf, int nbytes) 84 + { 85 + int ret = wait_for_random_bytes(); 86 + if (unlikely(ret)) 87 + return ret; 88 + get_random_bytes(buf, nbytes); 89 + return 0; 90 + } 91 + 92 + #define declare_get_random_var_wait(var) \ 93 + static inline int get_random_ ## var ## _wait(var *out) { \ 94 + int ret = wait_for_random_bytes(); \ 95 + if (unlikely(ret)) \ 96 + return ret; \ 97 + *out = get_random_ ## var(); \ 98 + return 0; \ 99 + } 100 + declare_get_random_var_wait(u32) 101 + declare_get_random_var_wait(u64) 102 + declare_get_random_var_wait(int) 103 + declare_get_random_var_wait(long) 104 + #undef declare_get_random_var 81 105 82 106 unsigned long randomize_page(unsigned long start, unsigned long range); 83 107
+28
lib/Kconfig.debug
··· 1223 1223 It is also used by various kernel debugging features that require 1224 1224 stack trace generation. 1225 1225 1226 + config WARN_ALL_UNSEEDED_RANDOM 1227 + bool "Warn for all uses of unseeded randomness" 1228 + default n 1229 + help 1230 + Some parts of the kernel contain bugs relating to their use of 1231 + cryptographically secure random numbers before it's actually possible 1232 + to generate those numbers securely. This setting ensures that these 1233 + flaws don't go unnoticed, by enabling a message, should this ever 1234 + occur. This will allow people with obscure setups to know when things 1235 + are going wrong, so that they might contact developers about fixing 1236 + it. 1237 + 1238 + Unfortunately, on some models of some architectures getting 1239 + a fully seeded CRNG is extremely difficult, and so this can 1240 + result in dmesg getting spammed for a surprisingly long 1241 + time. This is really bad from a security perspective, and 1242 + so architecture maintainers really need to do what they can 1243 + to get the CRNG seeded sooner after the system is booted. 1244 + However, since users can not do anything actionble to 1245 + address this, by default the kernel will issue only a single 1246 + warning for the first use of unseeded randomness. 1247 + 1248 + Say Y here if you want to receive warnings for all uses of 1249 + unseeded randomness. This will be of use primarily for 1250 + those developers interersted in improving the security of 1251 + Linux kernels running on their architecture (or 1252 + subarchitecture). 1253 + 1226 1254 config DEBUG_KOBJECT 1227 1255 bool "kobject debugging" 1228 1256 depends on DEBUG_KERNEL
+1 -1
lib/rhashtable.c
··· 234 234 235 235 INIT_LIST_HEAD(&tbl->walkers); 236 236 237 - get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd)); 237 + tbl->hash_rnd = get_random_u32(); 238 238 239 239 for (i = 0; i < nbuckets; i++) 240 240 INIT_RHT_NULLS_HEAD(tbl->buckets[i], ht, i);
+5 -1
net/ceph/ceph_common.c
··· 599 599 { 600 600 struct ceph_client *client; 601 601 struct ceph_entity_addr *myaddr = NULL; 602 - int err = -ENOMEM; 602 + int err; 603 + 604 + err = wait_for_random_bytes(); 605 + if (err < 0) 606 + return ERR_PTR(err); 603 607 604 608 client = kzalloc(sizeof(*client), GFP_KERNEL); 605 609 if (client == NULL)
+1 -2
net/core/neighbour.c
··· 347 347 348 348 static void neigh_get_hash_rnd(u32 *x) 349 349 { 350 - get_random_bytes(x, sizeof(*x)); 351 - *x |= 1; 350 + *x = get_random_u32() | 1; 352 351 } 353 352 354 353 static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
+1 -2
net/ipv4/route.c
··· 2979 2979 { 2980 2980 atomic_set(&net->ipv4.rt_genid, 0); 2981 2981 atomic_set(&net->fnhe_genid, 0); 2982 - get_random_bytes(&net->ipv4.dev_addr_genid, 2983 - sizeof(net->ipv4.dev_addr_genid)); 2982 + atomic_set(&net->ipv4.dev_addr_genid, get_random_int()); 2984 2983 return 0; 2985 2984 } 2986 2985