latent_entropy: Mark functions with __latent_entropy

The __latent_entropy gcc attribute can be used only on functions and
variables. If it is on a function then the plugin will instrument it for
gathering control-flow entropy. If the attribute is on a variable then
the plugin will initialize it with random contents. The variable must
be an integer, an integer array type or a structure with integer fields.

These specific functions have been selected because they are init
functions (to help gather boot-time entropy), are called at unpredictable
times, or they have variable loops, each of which provide some level of
latent entropy.

Signed-off-by: Emese Revfy <re.emese@gmail.com>
[kees: expanded commit message]
Signed-off-by: Kees Cook <keescook@chromium.org>

authored by Emese Revfy and committed by Kees Cook 0766f788 38addce8

Changed files
+37 -22
block
drivers
char
fs
include
kernel
lib
mm
net
core
+1 -1
block/blk-softirq.c
··· 18 18 * Softirq action handler - move entries to local list and loop over them 19 19 * while passing them to the queue registered handler. 20 20 */ 21 - static void blk_done_softirq(struct softirq_action *h) 21 + static __latent_entropy void blk_done_softirq(struct softirq_action *h) 22 22 { 23 23 struct list_head *cpu_list, local_list; 24 24
+2 -2
drivers/char/random.c
··· 479 479 480 480 static void crng_reseed(struct crng_state *crng, struct entropy_store *r); 481 481 static void push_to_pool(struct work_struct *work); 482 - static __u32 input_pool_data[INPUT_POOL_WORDS]; 483 - static __u32 blocking_pool_data[OUTPUT_POOL_WORDS]; 482 + static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy; 483 + static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy; 484 484 485 485 static struct entropy_store input_pool = { 486 486 .poolinfo = &poolinfo_table[0],
+1
fs/namespace.c
··· 2759 2759 return new_ns; 2760 2760 } 2761 2761 2762 + __latent_entropy 2762 2763 struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns, 2763 2764 struct user_namespace *user_ns, struct fs_struct *new_fs) 2764 2765 {
+7
include/linux/compiler-gcc.h
··· 188 188 #endif /* GCC_VERSION >= 40300 */ 189 189 190 190 #if GCC_VERSION >= 40500 191 + 192 + #ifndef __CHECKER__ 193 + #ifdef LATENT_ENTROPY_PLUGIN 194 + #define __latent_entropy __attribute__((latent_entropy)) 195 + #endif 196 + #endif 197 + 191 198 /* 192 199 * Mark a position in code as unreachable. This can be used to 193 200 * suppress control flow warnings after asm blocks that transfer
+4
include/linux/compiler.h
··· 406 406 # define __attribute_const__ /* unimplemented */ 407 407 #endif 408 408 409 + #ifndef __latent_entropy 410 + # define __latent_entropy 411 + #endif 412 + 409 413 /* 410 414 * Tell gcc if a function is cold. The compiler will assume any path 411 415 * directly leading to the call is unlikely.
+1 -1
include/linux/fdtable.h
··· 105 105 void put_files_struct(struct files_struct *fs); 106 106 void reset_files_struct(struct files_struct *); 107 107 int unshare_files(struct files_struct **); 108 - struct files_struct *dup_fd(struct files_struct *, int *); 108 + struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy; 109 109 void do_close_on_exec(struct files_struct *); 110 110 int iterate_fd(struct files_struct *, unsigned, 111 111 int (*)(const void *, struct file *, unsigned),
+1 -1
include/linux/genhd.h
··· 437 437 extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask); 438 438 439 439 /* drivers/char/random.c */ 440 - extern void add_disk_randomness(struct gendisk *disk); 440 + extern void add_disk_randomness(struct gendisk *disk) __latent_entropy; 441 441 extern void rand_initialize_disk(struct gendisk *disk); 442 442 443 443 static inline sector_t get_start_sect(struct block_device *bdev)
+3 -2
include/linux/init.h
··· 39 39 40 40 /* These are for everybody (although not all archs will actually 41 41 discard it in modules) */ 42 - #define __init __section(.init.text) __cold notrace 42 + #define __init __section(.init.text) __cold notrace __latent_entropy 43 43 #define __initdata __section(.init.data) 44 44 #define __initconst __constsection(.init.rodata) 45 45 #define __exitdata __section(.exit.data) ··· 86 86 #define __exit __section(.exit.text) __exitused __cold notrace 87 87 88 88 /* Used for MEMORY_HOTPLUG */ 89 - #define __meminit __section(.meminit.text) __cold notrace 89 + #define __meminit __section(.meminit.text) __cold notrace \ 90 + __latent_entropy 90 91 #define __meminitdata __section(.meminit.data) 91 92 #define __meminitconst __constsection(.meminit.rodata) 92 93 #define __memexit __section(.memexit.text) __exitused __cold notrace
+2 -2
include/linux/random.h
··· 30 30 #endif 31 31 32 32 extern void add_input_randomness(unsigned int type, unsigned int code, 33 - unsigned int value); 34 - extern void add_interrupt_randomness(int irq, int irq_flags); 33 + unsigned int value) __latent_entropy; 34 + extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy; 35 35 36 36 extern void get_random_bytes(void *buf, int nbytes); 37 37 extern int add_random_ready_callback(struct random_ready_callback *rdy);
+4 -2
kernel/fork.c
··· 404 404 } 405 405 406 406 #ifdef CONFIG_MMU 407 - static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) 407 + static __latent_entropy int dup_mmap(struct mm_struct *mm, 408 + struct mm_struct *oldmm) 408 409 { 409 410 struct vm_area_struct *mpnt, *tmp, *prev, **pprev; 410 411 struct rb_node **rb_link, *rb_parent; ··· 1297 1296 * parts of the process environment (as per the clone 1298 1297 * flags). The actual kick-off is left to the caller. 1299 1298 */ 1300 - static struct task_struct *copy_process(unsigned long clone_flags, 1299 + static __latent_entropy struct task_struct *copy_process( 1300 + unsigned long clone_flags, 1301 1301 unsigned long stack_start, 1302 1302 unsigned long stack_size, 1303 1303 int __user *child_tidptr,
+1 -1
kernel/rcu/tiny.c
··· 170 170 false)); 171 171 } 172 172 173 - static void rcu_process_callbacks(struct softirq_action *unused) 173 + static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused) 174 174 { 175 175 __rcu_process_callbacks(&rcu_sched_ctrlblk); 176 176 __rcu_process_callbacks(&rcu_bh_ctrlblk);
+1 -1
kernel/rcu/tree.c
··· 3013 3013 /* 3014 3014 * Do RCU core processing for the current CPU. 3015 3015 */ 3016 - static void rcu_process_callbacks(struct softirq_action *unused) 3016 + static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused) 3017 3017 { 3018 3018 struct rcu_state *rsp; 3019 3019
+1 -1
kernel/sched/fair.c
··· 8283 8283 * run_rebalance_domains is triggered when needed from the scheduler tick. 8284 8284 * Also triggered for nohz idle balancing (with nohz_balancing_kick set). 8285 8285 */ 8286 - static void run_rebalance_domains(struct softirq_action *h) 8286 + static __latent_entropy void run_rebalance_domains(struct softirq_action *h) 8287 8287 { 8288 8288 struct rq *this_rq = this_rq(); 8289 8289 enum cpu_idle_type idle = this_rq->idle_balance ?
+2 -2
kernel/softirq.c
··· 482 482 } 483 483 EXPORT_SYMBOL(__tasklet_hi_schedule_first); 484 484 485 - static void tasklet_action(struct softirq_action *a) 485 + static __latent_entropy void tasklet_action(struct softirq_action *a) 486 486 { 487 487 struct tasklet_struct *list; 488 488 ··· 518 518 } 519 519 } 520 520 521 - static void tasklet_hi_action(struct softirq_action *a) 521 + static __latent_entropy void tasklet_hi_action(struct softirq_action *a) 522 522 { 523 523 struct tasklet_struct *list; 524 524
+1 -1
kernel/time/timer.c
··· 1633 1633 /* 1634 1634 * This function runs timers and the timer-tq in bottom half context. 1635 1635 */ 1636 - static void run_timer_softirq(struct softirq_action *h) 1636 + static __latent_entropy void run_timer_softirq(struct softirq_action *h) 1637 1637 { 1638 1638 struct timer_base *base = this_cpu_ptr(&timer_bases[BASE_STD]); 1639 1639
+1 -1
lib/irq_poll.c
··· 74 74 } 75 75 EXPORT_SYMBOL(irq_poll_complete); 76 76 77 - static void irq_poll_softirq(struct softirq_action *h) 77 + static void __latent_entropy irq_poll_softirq(struct softirq_action *h) 78 78 { 79 79 struct list_head *list = this_cpu_ptr(&blk_cpu_iopoll); 80 80 int rearm = 0, budget = irq_poll_budget;
+1 -1
lib/random32.c
··· 47 47 } 48 48 #endif 49 49 50 - static DEFINE_PER_CPU(struct rnd_state, net_rand_state); 50 + static DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy; 51 51 52 52 /** 53 53 * prandom_u32_state - seeded pseudo-random number generator.
+1 -1
mm/page_alloc.c
··· 92 92 #endif 93 93 94 94 #ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY 95 - volatile u64 latent_entropy; 95 + volatile u64 latent_entropy __latent_entropy; 96 96 EXPORT_SYMBOL(latent_entropy); 97 97 #endif 98 98
+2 -2
net/core/dev.c
··· 3855 3855 } 3856 3856 EXPORT_SYMBOL(netif_rx_ni); 3857 3857 3858 - static void net_tx_action(struct softirq_action *h) 3858 + static __latent_entropy void net_tx_action(struct softirq_action *h) 3859 3859 { 3860 3860 struct softnet_data *sd = this_cpu_ptr(&softnet_data); 3861 3861 ··· 5187 5187 return work; 5188 5188 } 5189 5189 5190 - static void net_rx_action(struct softirq_action *h) 5190 + static __latent_entropy void net_rx_action(struct softirq_action *h) 5191 5191 { 5192 5192 struct softnet_data *sd = this_cpu_ptr(&softnet_data); 5193 5193 unsigned long time_limit = jiffies + 2;