Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

random: use registers from interrupted code for CPU's w/o a cycle counter

For CPU's that don't have a cycle counter, or something equivalent
which can be used for random_get_entropy(), random_get_entropy() will
always return 0. In that case, substitute with the saved interrupt
registers to add a bit more unpredictability.

Some folks have suggested hashing all of the registers
unconditionally, but this would increase the overhead of
add_interrupt_randomness() by at least an order of magnitude, and this
would very likely be unacceptable.

The changes in this commit have been benchmarked as mostly unaffecting
the overhead of add_interrupt_randomness() if the entropy counter is
present, and doubling the overhead if it is not present.

Signed-off-by: Theodore Ts'o <tytso@mit.edu>
Cc: Jörn Engel <joern@logfs.org>

+22 -25
+22 -25
drivers/char/random.c
··· 551 551 struct fast_pool { 552 552 __u32 pool[4]; 553 553 unsigned long last; 554 + unsigned short reg_idx; 554 555 unsigned char count; 555 - unsigned char notimer_count; 556 - unsigned char rotate; 557 556 }; 558 557 559 558 /* ··· 856 857 #define add_interrupt_bench(x) 857 858 #endif 858 859 860 + static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs) 861 + { 862 + __u32 *ptr = (__u32 *) regs; 863 + 864 + if (regs == NULL) 865 + return 0; 866 + if (f->reg_idx >= sizeof(struct pt_regs) / sizeof(__u32)) 867 + f->reg_idx = 0; 868 + return *(ptr + f->reg_idx++); 869 + } 870 + 859 871 void add_interrupt_randomness(int irq, int irq_flags) 860 872 { 861 873 struct entropy_store *r; ··· 879 869 unsigned long seed; 880 870 int credit = 0; 881 871 872 + if (cycles == 0) 873 + cycles = get_reg(fast_pool, regs); 882 874 c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0; 883 875 j_high = (sizeof(now) > 4) ? now >> 32 : 0; 884 876 fast_pool->pool[0] ^= cycles ^ j_high ^ irq; 885 877 fast_pool->pool[1] ^= now ^ c_high; 886 878 ip = regs ? instruction_pointer(regs) : _RET_IP_; 887 879 fast_pool->pool[2] ^= ip; 888 - fast_pool->pool[3] ^= ip >> 32; 880 + fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 : 881 + get_reg(fast_pool, regs); 889 882 890 883 fast_mix(fast_pool); 891 - if ((irq_flags & __IRQF_TIMER) == 0) 892 - fast_pool->notimer_count++; 893 884 add_interrupt_bench(cycles); 894 885 895 - if (cycles) { 896 - if ((fast_pool->count < 64) && 897 - !time_after(now, fast_pool->last + HZ)) 898 - return; 899 - } else { 900 - /* CPU does not have a cycle counting register :-( */ 901 - if (fast_pool->count < 64) 902 - return; 903 - } 886 + if ((fast_pool->count < 64) && 887 + !time_after(now, fast_pool->last + HZ)) 888 + return; 904 889 905 890 r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool; 906 891 if (!spin_trylock(&r->lock)) ··· 915 910 } 916 911 spin_unlock(&r->lock); 917 912 918 - /* 919 - * If we have a valid cycle counter or if the majority of 920 - * interrupts collected were non-timer interrupts, then give 921 - * an entropy credit of 1 bit. Yes, this is being very 922 - * conservative. 923 - */ 924 - if (cycles || (fast_pool->notimer_count >= 32)) 925 - credit++; 913 + fast_pool->count = 0; 926 914 927 - fast_pool->count = fast_pool->notimer_count = 0; 928 - 929 - credit_entropy_bits(r, credit); 915 + /* award one bit for the contents of the fast pool */ 916 + credit_entropy_bits(r, credit + 1); 930 917 } 931 918 932 919 #ifdef CONFIG_BLOCK