Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'random_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/random

Pull /dev/random changes from Ted Ts'o:
"A number of cleanups plus support for the RDSEED instruction, which
will be showing up in Intel Broadwell CPU's"

* tag 'random_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/random:
random: Add arch_has_random[_seed]()
random: If we have arch_get_random_seed*(), try it before blocking
random: Use arch_get_random_seed*() at init time and once a second
x86, random: Enable the RDSEED instruction
random: use the architectural HWRNG for the SHA's IV in extract_buf()
random: clarify bits/bytes in wakeup thresholds
random: entropy_bytes is actually bits
random: simplify accounting code
random: tighten bound on random_read_wakeup_thresh
random: forget lock in lockless accounting
random: simplify accounting logic
random: fix comment on "account"
random: simplify loop in random_read
random: fix description of get_random_bytes
random: fix comment on proc_do_uuid
random: fix typos / spelling errors in comments

+211 -109
+18
arch/powerpc/include/asm/archrandom.h
··· 25 25 return rc; 26 26 } 27 27 28 + static inline int arch_has_random(void) 29 + { 30 + return !!ppc_md.get_random_long; 31 + } 32 + 28 33 int powernv_get_random_long(unsigned long *v); 34 + 35 + static inline int arch_get_random_seed_long(unsigned long *v) 36 + { 37 + return 0; 38 + } 39 + static inline int arch_get_random_seed_int(unsigned int *v) 40 + { 41 + return 0; 42 + } 43 + static inline int arch_has_random_seed(void) 44 + { 45 + return 0; 46 + } 29 47 30 48 #endif /* CONFIG_ARCH_RANDOM */ 31 49
+41 -1
arch/x86/include/asm/archrandom.h
··· 1 1 /* 2 2 * This file is part of the Linux kernel. 3 3 * 4 - * Copyright (c) 2011, Intel Corporation 4 + * Copyright (c) 2011-2014, Intel Corporation 5 5 * Authors: Fenghua Yu <fenghua.yu@intel.com>, 6 6 * H. Peter Anvin <hpa@linux.intel.com> 7 7 * ··· 31 31 #define RDRAND_RETRY_LOOPS 10 32 32 33 33 #define RDRAND_INT ".byte 0x0f,0xc7,0xf0" 34 + #define RDSEED_INT ".byte 0x0f,0xc7,0xf8" 34 35 #ifdef CONFIG_X86_64 35 36 # define RDRAND_LONG ".byte 0x48,0x0f,0xc7,0xf0" 37 + # define RDSEED_LONG ".byte 0x48,0x0f,0xc7,0xf8" 36 38 #else 37 39 # define RDRAND_LONG RDRAND_INT 40 + # define RDSEED_LONG RDSEED_INT 38 41 #endif 39 42 40 43 #ifdef CONFIG_ARCH_RANDOM ··· 53 50 "2:" 54 51 : "=r" (ok), "=a" (*v) 55 52 : "0" (RDRAND_RETRY_LOOPS)); 53 + return ok; 54 + } 55 + 56 + /* A single attempt at RDSEED */ 57 + static inline bool rdseed_long(unsigned long *v) 58 + { 59 + unsigned char ok; 60 + asm volatile(RDSEED_LONG "\n\t" 61 + "setc %0" 62 + : "=qm" (ok), "=a" (*v)); 56 63 return ok; 57 64 } 58 65 ··· 83 70 return ok; \ 84 71 } 85 72 73 + #define GET_SEED(name, type, rdseed, nop) \ 74 + static inline int name(type *v) \ 75 + { \ 76 + unsigned char ok; \ 77 + alternative_io("movb $0, %0\n\t" \ 78 + nop, \ 79 + rdseed "\n\t" \ 80 + "setc %0", \ 81 + X86_FEATURE_RDSEED, \ 82 + ASM_OUTPUT2("=q" (ok), "=a" (*v))); \ 83 + return ok; \ 84 + } 85 + 86 86 #ifdef CONFIG_X86_64 87 87 88 88 GET_RANDOM(arch_get_random_long, unsigned long, RDRAND_LONG, ASM_NOP5); 89 89 GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP4); 90 + 91 + GET_SEED(arch_get_random_seed_long, unsigned long, RDSEED_LONG, ASM_NOP5); 92 + GET_SEED(arch_get_random_seed_int, unsigned int, RDSEED_INT, ASM_NOP4); 90 93 91 94 #else 92 95 93 96 GET_RANDOM(arch_get_random_long, unsigned long, RDRAND_LONG, ASM_NOP3); 94 97 GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP3); 95 98 99 + GET_SEED(arch_get_random_seed_long, unsigned long, RDSEED_LONG, ASM_NOP4); 100 + GET_SEED(arch_get_random_seed_int, unsigned int, RDSEED_INT, ASM_NOP4); 101 + 96 102 #endif /* CONFIG_X86_64 */ 103 + 104 + #define arch_has_random() static_cpu_has(X86_FEATURE_RDRAND) 105 + #define arch_has_random_seed() static_cpu_has(X86_FEATURE_RDSEED) 97 106 98 107 #else 99 108 100 109 static inline int rdrand_long(unsigned long *v) 110 + { 111 + return 0; 112 + } 113 + 114 + static inline bool rdseed_long(unsigned long *v) 101 115 { 102 116 return 0; 103 117 }
+136 -108
drivers/char/random.c
··· 295 295 * The minimum number of bits of entropy before we wake up a read on 296 296 * /dev/random. Should be enough to do a significant reseed. 297 297 */ 298 - static int random_read_wakeup_thresh = 64; 298 + static int random_read_wakeup_bits = 64; 299 299 300 300 /* 301 301 * If the entropy count falls under this number of bits, then we 302 302 * should wake up processes which are selecting or polling on write 303 303 * access to /dev/random. 304 304 */ 305 - static int random_write_wakeup_thresh = 28 * OUTPUT_POOL_WORDS; 305 + static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS; 306 306 307 307 /* 308 - * The minimum number of seconds between urandom pool resending. We 308 + * The minimum number of seconds between urandom pool reseeding. We 309 309 * do this to limit the amount of entropy that can be drained from the 310 310 * input pool even if there are heavy demands on /dev/urandom. 311 311 */ ··· 322 322 * Register. (See M. Matsumoto & Y. Kurita, 1992. Twisted GFSR 323 323 * generators. ACM Transactions on Modeling and Computer Simulation 324 324 * 2(3):179-194. Also see M. Matsumoto & Y. Kurita, 1994. Twisted 325 - * GFSR generators II. ACM Transactions on Mdeling and Computer 325 + * GFSR generators II. ACM Transactions on Modeling and Computer 326 326 * Simulation 4:254-266) 327 327 * 328 328 * Thanks to Colin Plumb for suggesting this. ··· 666 666 r->entropy_total, _RET_IP_); 667 667 668 668 if (r == &input_pool) { 669 - int entropy_bytes = entropy_count >> ENTROPY_SHIFT; 669 + int entropy_bits = entropy_count >> ENTROPY_SHIFT; 670 670 671 671 /* should we wake readers? */ 672 - if (entropy_bytes >= random_read_wakeup_thresh) { 672 + if (entropy_bits >= random_read_wakeup_bits) { 673 673 wake_up_interruptible(&random_read_wait); 674 674 kill_fasync(&fasync, SIGIO, POLL_IN); 675 675 } ··· 678 678 * forth between them, until the output pools are 75% 679 679 * full. 680 680 */ 681 - if (entropy_bytes > random_write_wakeup_thresh && 681 + if (entropy_bits > random_write_wakeup_bits && 682 682 r->initialized && 683 - r->entropy_total >= 2*random_read_wakeup_thresh) { 683 + r->entropy_total >= 2*random_read_wakeup_bits) { 684 684 static struct entropy_store *last = &blocking_pool; 685 685 struct entropy_store *other = &blocking_pool; 686 686 ··· 844 844 cycles_t cycles = random_get_entropy(); 845 845 __u32 input[4], c_high, j_high; 846 846 __u64 ip; 847 + unsigned long seed; 848 + int credit; 847 849 848 850 c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0; 849 851 j_high = (sizeof(now) > 4) ? now >> 32 : 0; ··· 864 862 865 863 r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool; 866 864 __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL); 865 + 867 866 /* 868 867 * If we don't have a valid cycle counter, and we see 869 868 * back-to-back timer interrupts, then skip giving credit for 870 - * any entropy. 869 + * any entropy, otherwise credit 1 bit. 871 870 */ 871 + credit = 1; 872 872 if (cycles == 0) { 873 873 if (irq_flags & __IRQF_TIMER) { 874 874 if (fast_pool->last_timer_intr) 875 - return; 875 + credit = 0; 876 876 fast_pool->last_timer_intr = 1; 877 877 } else 878 878 fast_pool->last_timer_intr = 0; 879 879 } 880 - credit_entropy_bits(r, 1); 880 + 881 + /* 882 + * If we have architectural seed generator, produce a seed and 883 + * add it to the pool. For the sake of paranoia count it as 884 + * 50% entropic. 885 + */ 886 + if (arch_get_random_seed_long(&seed)) { 887 + __mix_pool_bytes(r, &seed, sizeof(seed), NULL); 888 + credit += sizeof(seed) * 4; 889 + } 890 + 891 + credit_entropy_bits(r, credit); 881 892 } 882 893 883 894 #ifdef CONFIG_BLOCK ··· 939 924 { 940 925 __u32 tmp[OUTPUT_POOL_WORDS]; 941 926 942 - /* For /dev/random's pool, always leave two wakeup worth's BITS */ 943 - int rsvd = r->limit ? 0 : random_read_wakeup_thresh/4; 927 + /* For /dev/random's pool, always leave two wakeups' worth */ 928 + int rsvd_bytes = r->limit ? 0 : random_read_wakeup_bits / 4; 944 929 int bytes = nbytes; 945 930 946 - /* pull at least as many as BYTES as wakeup BITS */ 947 - bytes = max_t(int, bytes, random_read_wakeup_thresh / 8); 931 + /* pull at least as much as a wakeup */ 932 + bytes = max_t(int, bytes, random_read_wakeup_bits / 8); 948 933 /* but never more than the buffer size */ 949 934 bytes = min_t(int, bytes, sizeof(tmp)); 950 935 951 936 trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8, 952 937 ENTROPY_BITS(r), ENTROPY_BITS(r->pull)); 953 938 bytes = extract_entropy(r->pull, tmp, bytes, 954 - random_read_wakeup_thresh / 8, rsvd); 939 + random_read_wakeup_bits / 8, rsvd_bytes); 955 940 mix_pool_bytes(r, tmp, bytes, NULL); 956 941 credit_entropy_bits(r, bytes*8); 957 942 } ··· 967 952 struct entropy_store *r = container_of(work, struct entropy_store, 968 953 push_work); 969 954 BUG_ON(!r); 970 - _xfer_secondary_pool(r, random_read_wakeup_thresh/8); 955 + _xfer_secondary_pool(r, random_read_wakeup_bits/8); 971 956 trace_push_to_pool(r->name, r->entropy_count >> ENTROPY_SHIFT, 972 957 r->pull->entropy_count >> ENTROPY_SHIFT); 973 958 } 974 959 975 960 /* 976 - * These functions extracts randomness from the "entropy pool", and 977 - * returns it in a buffer. 978 - * 979 - * The min parameter specifies the minimum amount we can pull before 980 - * failing to avoid races that defeat catastrophic reseeding while the 981 - * reserved parameter indicates how much entropy we must leave in the 982 - * pool after each pull to avoid starving other readers. 983 - * 984 - * Note: extract_entropy() assumes that .poolwords is a multiple of 16 words. 961 + * This function decides how many bytes to actually take from the 962 + * given pool, and also debits the entropy count accordingly. 985 963 */ 986 - 987 964 static size_t account(struct entropy_store *r, size_t nbytes, int min, 988 965 int reserved) 989 966 { 990 - unsigned long flags; 991 - int wakeup_write = 0; 992 967 int have_bytes; 993 968 int entropy_count, orig; 994 969 size_t ibytes; 995 - 996 - /* Hold lock while accounting */ 997 - spin_lock_irqsave(&r->lock, flags); 998 970 999 971 BUG_ON(r->entropy_count > r->poolinfo->poolfracbits); 1000 972 ··· 990 988 entropy_count = orig = ACCESS_ONCE(r->entropy_count); 991 989 have_bytes = entropy_count >> (ENTROPY_SHIFT + 3); 992 990 ibytes = nbytes; 993 - if (have_bytes < min + reserved) { 991 + /* If limited, never pull more than available */ 992 + if (r->limit) 993 + ibytes = min_t(size_t, ibytes, have_bytes - reserved); 994 + if (ibytes < min) 994 995 ibytes = 0; 995 - } else { 996 - /* If limited, never pull more than available */ 997 - if (r->limit && ibytes + reserved >= have_bytes) 998 - ibytes = have_bytes - reserved; 999 - 1000 - if (have_bytes >= ibytes + reserved) 1001 - entropy_count -= ibytes << (ENTROPY_SHIFT + 3); 1002 - else 1003 - entropy_count = reserved << (ENTROPY_SHIFT + 3); 1004 - 1005 - if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) 1006 - goto retry; 1007 - 1008 - if ((r->entropy_count >> ENTROPY_SHIFT) 1009 - < random_write_wakeup_thresh) 1010 - wakeup_write = 1; 1011 - } 1012 - spin_unlock_irqrestore(&r->lock, flags); 996 + entropy_count = max_t(int, 0, 997 + entropy_count - (ibytes << (ENTROPY_SHIFT + 3))); 998 + if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) 999 + goto retry; 1013 1000 1014 1001 trace_debit_entropy(r->name, 8 * ibytes); 1015 - if (wakeup_write) { 1002 + if (ibytes && 1003 + (r->entropy_count >> ENTROPY_SHIFT) < random_write_wakeup_bits) { 1016 1004 wake_up_interruptible(&random_write_wait); 1017 1005 kill_fasync(&fasync, SIGIO, POLL_OUT); 1018 1006 } ··· 1010 1018 return ibytes; 1011 1019 } 1012 1020 1021 + /* 1022 + * This function does the actual extraction for extract_entropy and 1023 + * extract_entropy_user. 1024 + * 1025 + * Note: we assume that .poolwords is a multiple of 16 words. 1026 + */ 1013 1027 static void extract_buf(struct entropy_store *r, __u8 *out) 1014 1028 { 1015 1029 int i; ··· 1027 1029 __u8 extract[64]; 1028 1030 unsigned long flags; 1029 1031 1030 - /* Generate a hash across the pool, 16 words (512 bits) at a time */ 1031 - sha_init(hash.w); 1032 - spin_lock_irqsave(&r->lock, flags); 1033 - for (i = 0; i < r->poolinfo->poolwords; i += 16) 1034 - sha_transform(hash.w, (__u8 *)(r->pool + i), workspace); 1035 - 1036 1032 /* 1037 - * If we have a architectural hardware random number 1038 - * generator, mix that in, too. 1033 + * If we have an architectural hardware random number 1034 + * generator, use it for SHA's initial vector 1039 1035 */ 1036 + sha_init(hash.w); 1040 1037 for (i = 0; i < LONGS(20); i++) { 1041 1038 unsigned long v; 1042 1039 if (!arch_get_random_long(&v)) 1043 1040 break; 1044 - hash.l[i] ^= v; 1041 + hash.l[i] = v; 1045 1042 } 1043 + 1044 + /* Generate a hash across the pool, 16 words (512 bits) at a time */ 1045 + spin_lock_irqsave(&r->lock, flags); 1046 + for (i = 0; i < r->poolinfo->poolwords; i += 16) 1047 + sha_transform(hash.w, (__u8 *)(r->pool + i), workspace); 1046 1048 1047 1049 /* 1048 1050 * We mix the hash back into the pool to prevent backtracking ··· 1077 1079 memset(&hash, 0, sizeof(hash)); 1078 1080 } 1079 1081 1082 + /* 1083 + * This function extracts randomness from the "entropy pool", and 1084 + * returns it in a buffer. 1085 + * 1086 + * The min parameter specifies the minimum amount we can pull before 1087 + * failing to avoid races that defeat catastrophic reseeding while the 1088 + * reserved parameter indicates how much entropy we must leave in the 1089 + * pool after each pull to avoid starving other readers. 1090 + */ 1080 1091 static ssize_t extract_entropy(struct entropy_store *r, void *buf, 1081 1092 size_t nbytes, int min, int reserved) 1082 1093 { ··· 1136 1129 return ret; 1137 1130 } 1138 1131 1132 + /* 1133 + * This function extracts randomness from the "entropy pool", and 1134 + * returns it in a userspace buffer. 1135 + */ 1139 1136 static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf, 1140 1137 size_t nbytes) 1141 1138 { ··· 1181 1170 /* 1182 1171 * This function is the exported kernel interface. It returns some 1183 1172 * number of good random numbers, suitable for key generation, seeding 1184 - * TCP sequence numbers, etc. It does not use the hw random number 1185 - * generator, if available; use get_random_bytes_arch() for that. 1173 + * TCP sequence numbers, etc. It does not rely on the hardware random 1174 + * number generator. For random bytes direct from the hardware RNG 1175 + * (when available), use get_random_bytes_arch(). 1186 1176 */ 1187 1177 void get_random_bytes(void *buf, int nbytes) 1188 1178 { ··· 1250 1238 r->last_pulled = jiffies; 1251 1239 mix_pool_bytes(r, &now, sizeof(now), NULL); 1252 1240 for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) { 1253 - if (!arch_get_random_long(&rv)) 1241 + if (!arch_get_random_seed_long(&rv) && 1242 + !arch_get_random_long(&rv)) 1254 1243 rv = random_get_entropy(); 1255 1244 mix_pool_bytes(r, &rv, sizeof(rv), NULL); 1256 1245 } ··· 1294 1281 } 1295 1282 #endif 1296 1283 1284 + /* 1285 + * Attempt an emergency refill using arch_get_random_seed_long(). 1286 + * 1287 + * As with add_interrupt_randomness() be paranoid and only 1288 + * credit the output as 50% entropic. 1289 + */ 1290 + static int arch_random_refill(void) 1291 + { 1292 + const unsigned int nlongs = 64; /* Arbitrary number */ 1293 + unsigned int n = 0; 1294 + unsigned int i; 1295 + unsigned long buf[nlongs]; 1296 + 1297 + if (!arch_has_random_seed()) 1298 + return 0; 1299 + 1300 + for (i = 0; i < nlongs; i++) { 1301 + if (arch_get_random_seed_long(&buf[n])) 1302 + n++; 1303 + } 1304 + 1305 + if (n) { 1306 + unsigned int rand_bytes = n * sizeof(unsigned long); 1307 + 1308 + mix_pool_bytes(&input_pool, buf, rand_bytes, NULL); 1309 + credit_entropy_bits(&input_pool, rand_bytes*4); 1310 + } 1311 + 1312 + return n; 1313 + } 1314 + 1297 1315 static ssize_t 1298 1316 random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) 1299 1317 { 1300 - ssize_t n, retval = 0, count = 0; 1318 + ssize_t n; 1301 1319 1302 1320 if (nbytes == 0) 1303 1321 return 0; 1304 1322 1305 - while (nbytes > 0) { 1306 - n = nbytes; 1307 - if (n > SEC_XFER_SIZE) 1308 - n = SEC_XFER_SIZE; 1309 - 1310 - n = extract_entropy_user(&blocking_pool, buf, n); 1311 - 1312 - if (n < 0) { 1313 - retval = n; 1314 - break; 1315 - } 1316 - 1323 + nbytes = min_t(size_t, nbytes, SEC_XFER_SIZE); 1324 + while (1) { 1325 + n = extract_entropy_user(&blocking_pool, buf, nbytes); 1326 + if (n < 0) 1327 + return n; 1317 1328 trace_random_read(n*8, (nbytes-n)*8, 1318 1329 ENTROPY_BITS(&blocking_pool), 1319 1330 ENTROPY_BITS(&input_pool)); 1331 + if (n > 0) 1332 + return n; 1320 1333 1321 - if (n == 0) { 1322 - if (file->f_flags & O_NONBLOCK) { 1323 - retval = -EAGAIN; 1324 - break; 1325 - } 1334 + /* Pool is (near) empty. Maybe wait and retry. */ 1326 1335 1327 - wait_event_interruptible(random_read_wait, 1328 - ENTROPY_BITS(&input_pool) >= 1329 - random_read_wakeup_thresh); 1330 - 1331 - if (signal_pending(current)) { 1332 - retval = -ERESTARTSYS; 1333 - break; 1334 - } 1335 - 1336 + /* First try an emergency refill */ 1337 + if (arch_random_refill()) 1336 1338 continue; 1337 - } 1338 1339 1339 - count += n; 1340 - buf += n; 1341 - nbytes -= n; 1342 - break; /* This break makes the device work */ 1343 - /* like a named pipe */ 1340 + if (file->f_flags & O_NONBLOCK) 1341 + return -EAGAIN; 1342 + 1343 + wait_event_interruptible(random_read_wait, 1344 + ENTROPY_BITS(&input_pool) >= 1345 + random_read_wakeup_bits); 1346 + if (signal_pending(current)) 1347 + return -ERESTARTSYS; 1344 1348 } 1345 - 1346 - return (count ? count : retval); 1347 1349 } 1348 1350 1349 1351 static ssize_t ··· 1386 1358 poll_wait(file, &random_read_wait, wait); 1387 1359 poll_wait(file, &random_write_wait, wait); 1388 1360 mask = 0; 1389 - if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_thresh) 1361 + if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits) 1390 1362 mask |= POLLIN | POLLRDNORM; 1391 - if (ENTROPY_BITS(&input_pool) < random_write_wakeup_thresh) 1363 + if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits) 1392 1364 mask |= POLLOUT | POLLWRNORM; 1393 1365 return mask; 1394 1366 } ··· 1535 1507 #include <linux/sysctl.h> 1536 1508 1537 1509 static int min_read_thresh = 8, min_write_thresh; 1538 - static int max_read_thresh = INPUT_POOL_WORDS * 32; 1510 + static int max_read_thresh = OUTPUT_POOL_WORDS * 32; 1539 1511 static int max_write_thresh = INPUT_POOL_WORDS * 32; 1540 1512 static char sysctl_bootid[16]; 1541 1513 1542 1514 /* 1543 - * These functions is used to return both the bootid UUID, and random 1515 + * This function is used to return both the bootid UUID, and random 1544 1516 * UUID. The difference is in whether table->data is NULL; if it is, 1545 1517 * then a new UUID is generated and returned to the user. 1546 1518 * 1547 - * If the user accesses this via the proc interface, it will be returned 1548 - * as an ASCII string in the standard UUID format. If accesses via the 1549 - * sysctl system call, it is returned as 16 bytes of binary data. 1519 + * If the user accesses this via the proc interface, the UUID will be 1520 + * returned as an ASCII string in the standard UUID format; if via the 1521 + * sysctl system call, as 16 bytes of binary data. 1550 1522 */ 1551 1523 static int proc_do_uuid(struct ctl_table *table, int write, 1552 1524 void __user *buffer, size_t *lenp, loff_t *ppos) ··· 1611 1583 }, 1612 1584 { 1613 1585 .procname = "read_wakeup_threshold", 1614 - .data = &random_read_wakeup_thresh, 1586 + .data = &random_read_wakeup_bits, 1615 1587 .maxlen = sizeof(int), 1616 1588 .mode = 0644, 1617 1589 .proc_handler = proc_dointvec_minmax, ··· 1620 1592 }, 1621 1593 { 1622 1594 .procname = "write_wakeup_threshold", 1623 - .data = &random_write_wakeup_thresh, 1595 + .data = &random_write_wakeup_bits, 1624 1596 .maxlen = sizeof(int), 1625 1597 .mode = 0644, 1626 1598 .proc_handler = proc_dointvec_minmax,
+16
include/linux/random.h
··· 88 88 { 89 89 return 0; 90 90 } 91 + static inline int arch_has_random(void) 92 + { 93 + return 0; 94 + } 95 + static inline int arch_get_random_seed_long(unsigned long *v) 96 + { 97 + return 0; 98 + } 99 + static inline int arch_get_random_seed_int(unsigned int *v) 100 + { 101 + return 0; 102 + } 103 + static inline int arch_has_random_seed(void) 104 + { 105 + return 0; 106 + } 91 107 #endif 92 108 93 109 /* Pseudo random number generator from numerical recipes. */