at v6.12 131 lines 4.5 kB view raw
1/* SPDX-License-Identifier: GPL-2.0+ */ 2/* 3 * Read-Copy Update mechanism for mutual exclusion (tree-based version) 4 * 5 * Copyright IBM Corporation, 2008 6 * 7 * Author: Dipankar Sarma <dipankar@in.ibm.com> 8 * Paul E. McKenney <paulmck@linux.ibm.com> Hierarchical algorithm 9 * 10 * Based on the original work by Paul McKenney <paulmck@linux.ibm.com> 11 * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. 12 * 13 * For detailed explanation of Read-Copy Update mechanism see - 14 * Documentation/RCU 15 */ 16 17#ifndef __LINUX_RCUTREE_H 18#define __LINUX_RCUTREE_H 19 20void rcu_softirq_qs(void); 21void rcu_note_context_switch(bool preempt); 22int rcu_needs_cpu(void); 23void rcu_cpu_stall_reset(void); 24void rcu_request_urgent_qs_task(struct task_struct *t); 25 26/* 27 * Note a virtualization-based context switch. This is simply a 28 * wrapper around rcu_note_context_switch(), which allows TINY_RCU 29 * to save a few bytes. The caller must have disabled interrupts. 30 */ 31static inline void rcu_virt_note_context_switch(void) 32{ 33 rcu_note_context_switch(false); 34} 35 36void synchronize_rcu_expedited(void); 37void kvfree_call_rcu(struct rcu_head *head, void *ptr); 38void kvfree_rcu_barrier(void); 39 40void rcu_barrier(void); 41void rcu_momentary_eqs(void); 42void kfree_rcu_scheduler_running(void); 43bool rcu_gp_might_be_stalled(void); 44 45struct rcu_gp_oldstate { 46 unsigned long rgos_norm; 47 unsigned long rgos_exp; 48}; 49 50// Maximum number of rcu_gp_oldstate values corresponding to 51// not-yet-completed RCU grace periods. 52#define NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE 4 53 54/** 55 * same_state_synchronize_rcu_full - Are two old-state values identical? 56 * @rgosp1: First old-state value. 57 * @rgosp2: Second old-state value. 58 * 59 * The two old-state values must have been obtained from either 60 * get_state_synchronize_rcu_full(), start_poll_synchronize_rcu_full(), 61 * or get_completed_synchronize_rcu_full(). Returns @true if the two 62 * values are identical and @false otherwise. This allows structures 63 * whose lifetimes are tracked by old-state values to push these values 64 * to a list header, allowing those structures to be slightly smaller. 65 * 66 * Note that equality is judged on a bitwise basis, so that an 67 * @rcu_gp_oldstate structure with an already-completed state in one field 68 * will compare not-equal to a structure with an already-completed state 69 * in the other field. After all, the @rcu_gp_oldstate structure is opaque 70 * so how did such a situation come to pass in the first place? 71 */ 72static inline bool same_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp1, 73 struct rcu_gp_oldstate *rgosp2) 74{ 75 return rgosp1->rgos_norm == rgosp2->rgos_norm && rgosp1->rgos_exp == rgosp2->rgos_exp; 76} 77 78unsigned long start_poll_synchronize_rcu_expedited(void); 79void start_poll_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp); 80void cond_synchronize_rcu_expedited(unsigned long oldstate); 81void cond_synchronize_rcu_expedited_full(struct rcu_gp_oldstate *rgosp); 82unsigned long get_state_synchronize_rcu(void); 83void get_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp); 84unsigned long start_poll_synchronize_rcu(void); 85void start_poll_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp); 86bool poll_state_synchronize_rcu(unsigned long oldstate); 87bool poll_state_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp); 88void cond_synchronize_rcu(unsigned long oldstate); 89void cond_synchronize_rcu_full(struct rcu_gp_oldstate *rgosp); 90 91#ifdef CONFIG_PROVE_RCU 92void rcu_irq_exit_check_preempt(void); 93#else 94static inline void rcu_irq_exit_check_preempt(void) { } 95#endif 96 97struct task_struct; 98void rcu_preempt_deferred_qs(struct task_struct *t); 99 100void exit_rcu(void); 101 102void rcu_scheduler_starting(void); 103extern int rcu_scheduler_active; 104void rcu_end_inkernel_boot(void); 105bool rcu_inkernel_boot_has_ended(void); 106bool rcu_is_watching(void); 107#ifndef CONFIG_PREEMPTION 108void rcu_all_qs(void); 109#endif 110 111/* RCUtree hotplug events */ 112int rcutree_prepare_cpu(unsigned int cpu); 113int rcutree_online_cpu(unsigned int cpu); 114void rcutree_report_cpu_starting(unsigned int cpu); 115 116#ifdef CONFIG_HOTPLUG_CPU 117int rcutree_dead_cpu(unsigned int cpu); 118int rcutree_dying_cpu(unsigned int cpu); 119int rcutree_offline_cpu(unsigned int cpu); 120#else 121#define rcutree_dead_cpu NULL 122#define rcutree_dying_cpu NULL 123#define rcutree_offline_cpu NULL 124#endif 125 126void rcutree_migrate_callbacks(int cpu); 127 128/* Called from hotplug and also arm64 early secondary boot failure */ 129void rcutree_report_cpu_dead(void); 130 131#endif /* __LINUX_RCUTREE_H */