Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

srcu: Provide polling interfaces for Tiny SRCU grace periods

There is a need for a polling interface for SRCU grace
periods, so this commit supplies get_state_synchronize_srcu(),
start_poll_synchronize_srcu(), and poll_state_synchronize_srcu() for this
purpose. The first can be used if future grace periods are inevitable
(perhaps due to a later call_srcu() invocation), the second if future
grace periods might not otherwise happen, and the third to check if a
grace period has elapsed since the corresponding call to either of the
first two.

As with get_state_synchronize_rcu() and cond_synchronize_rcu(),
the return value from either get_state_synchronize_srcu() or
start_poll_synchronize_srcu() must be passed in to a later call to
poll_state_synchronize_srcu().

Link: https://lore.kernel.org/rcu/20201112201547.GF3365678@moria.home.lan/
Reported-by: Kent Overstreet <kent.overstreet@gmail.com>
[ paulmck: Add EXPORT_SYMBOL_GPL() per kernel test robot feedback. ]
[ paulmck: Apply feedback from Neeraj Upadhyay. ]
Link: https://lore.kernel.org/lkml/20201117004017.GA7444@paulmck-ThinkPad-P72/
Reviewed-by: Neeraj Upadhyay <neeraju@codeaurora.org>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>

+59 -2
+2
include/linux/rcupdate.h
··· 33 33 #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) 34 34 #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) 35 35 #define ulong2long(a) (*(long *)(&(a))) 36 + #define USHORT_CMP_GE(a, b) (USHRT_MAX / 2 >= (unsigned short)((a) - (b))) 37 + #define USHORT_CMP_LT(a, b) (USHRT_MAX / 2 < (unsigned short)((a) - (b))) 36 38 37 39 /* Exported common interfaces */ 38 40 void call_rcu(struct rcu_head *head, rcu_callback_t func);
+3
include/linux/srcu.h
··· 60 60 int __srcu_read_lock(struct srcu_struct *ssp) __acquires(ssp); 61 61 void __srcu_read_unlock(struct srcu_struct *ssp, int idx) __releases(ssp); 62 62 void synchronize_srcu(struct srcu_struct *ssp); 63 + unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp); 64 + unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp); 65 + bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie); 63 66 64 67 #ifdef CONFIG_DEBUG_LOCK_ALLOC 65 68
+1
include/linux/srcutiny.h
··· 16 16 struct srcu_struct { 17 17 short srcu_lock_nesting[2]; /* srcu_read_lock() nesting depth. */ 18 18 unsigned short srcu_idx; /* Current reader array element in bit 0x2. */ 19 + unsigned short srcu_idx_max; /* Furthest future srcu_idx request. */ 19 20 u8 srcu_gp_running; /* GP workqueue running? */ 20 21 u8 srcu_gp_waiting; /* GP waiting for readers? */ 21 22 struct swait_queue_head srcu_wq;
+53 -2
kernel/rcu/srcutiny.c
··· 34 34 ssp->srcu_gp_running = false; 35 35 ssp->srcu_gp_waiting = false; 36 36 ssp->srcu_idx = 0; 37 + ssp->srcu_idx_max = 0; 37 38 INIT_WORK(&ssp->srcu_work, srcu_drive_gp); 38 39 INIT_LIST_HEAD(&ssp->srcu_work.entry); 39 40 return 0; ··· 85 84 WARN_ON(ssp->srcu_gp_waiting); 86 85 WARN_ON(ssp->srcu_cb_head); 87 86 WARN_ON(&ssp->srcu_cb_head != ssp->srcu_cb_tail); 87 + WARN_ON(ssp->srcu_idx != ssp->srcu_idx_max); 88 + WARN_ON(ssp->srcu_idx & 0x1); 88 89 } 89 90 EXPORT_SYMBOL_GPL(cleanup_srcu_struct); 90 91 ··· 117 114 struct srcu_struct *ssp; 118 115 119 116 ssp = container_of(wp, struct srcu_struct, srcu_work); 120 - if (ssp->srcu_gp_running || !READ_ONCE(ssp->srcu_cb_head)) 117 + if (ssp->srcu_gp_running || USHORT_CMP_GE(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max))) 121 118 return; /* Already running or nothing to do. */ 122 119 123 120 /* Remove recently arrived callbacks and wait for readers. */ ··· 150 147 * straighten that out. 151 148 */ 152 149 WRITE_ONCE(ssp->srcu_gp_running, false); 153 - if (READ_ONCE(ssp->srcu_cb_head)) 150 + if (USHORT_CMP_LT(ssp->srcu_idx, READ_ONCE(ssp->srcu_idx_max))) 154 151 schedule_work(&ssp->srcu_work); 155 152 } 156 153 EXPORT_SYMBOL_GPL(srcu_drive_gp); 157 154 158 155 static void srcu_gp_start_if_needed(struct srcu_struct *ssp) 159 156 { 157 + unsigned short cookie; 158 + 159 + cookie = get_state_synchronize_srcu(ssp); 160 + if (USHORT_CMP_GE(READ_ONCE(ssp->srcu_idx_max), cookie)) 161 + return; 162 + WRITE_ONCE(ssp->srcu_idx_max, cookie); 160 163 if (!READ_ONCE(ssp->srcu_gp_running)) { 161 164 if (likely(srcu_init_done)) 162 165 schedule_work(&ssp->srcu_work); ··· 204 195 destroy_rcu_head_on_stack(&rs.head); 205 196 } 206 197 EXPORT_SYMBOL_GPL(synchronize_srcu); 198 + 199 + /* 200 + * get_state_synchronize_srcu - Provide an end-of-grace-period cookie 201 + */ 202 + unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp) 203 + { 204 + unsigned long ret; 205 + 206 + barrier(); 207 + ret = (READ_ONCE(ssp->srcu_idx) + 3) & ~0x1; 208 + barrier(); 209 + return ret & USHRT_MAX; 210 + } 211 + EXPORT_SYMBOL_GPL(get_state_synchronize_srcu); 212 + 213 + /* 214 + * start_poll_synchronize_srcu - Provide cookie and start grace period 215 + * 216 + * The difference between this and get_state_synchronize_srcu() is that 217 + * this function ensures that the poll_state_synchronize_srcu() will 218 + * eventually return the value true. 219 + */ 220 + unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp) 221 + { 222 + unsigned long ret = get_state_synchronize_srcu(ssp); 223 + 224 + srcu_gp_start_if_needed(ssp); 225 + return ret; 226 + } 227 + EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu); 228 + 229 + /* 230 + * poll_state_synchronize_srcu - Has cookie's grace period ended? 231 + */ 232 + bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie) 233 + { 234 + bool ret = USHORT_CMP_GE(READ_ONCE(ssp->srcu_idx), cookie); 235 + 236 + barrier(); 237 + return ret; 238 + } 239 + EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu); 207 240 208 241 /* Lockdep diagnostics. */ 209 242 void __init rcu_scheduler_starting(void)