Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

srcu: Provide internal interface to start a Tree SRCU grace period

There is a need for a polling interface for SRCU grace periods.
This polling needs to initiate an SRCU grace period without having
to queue (and manage) a callback. This commit therefore splits the
Tree SRCU __call_srcu() function into callback-initialization and
queuing/start-grace-period portions, with the latter in a new function
named srcu_gp_start_if_needed(). This function may be passed a NULL
callback pointer, in which case it will refrain from queuing anything.

Why have the new function mess with queuing? Locking considerations,
of course!

Link: https://lore.kernel.org/rcu/20201112201547.GF3365678@moria.home.lan/
Reported-by: Kent Overstreet <kent.overstreet@gmail.com>
Reviewed-by: Neeraj Upadhyay <neeraju@codeaurora.org>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>

+37 -29
+37 -29
kernel/rcu/srcutree.c
··· 808 808 } 809 809 810 810 /* 811 + * Start an SRCU grace period, and also queue the callback if non-NULL. 812 + */ 813 + static void srcu_gp_start_if_needed(struct srcu_struct *ssp, struct rcu_head *rhp, bool do_norm) 814 + { 815 + unsigned long flags; 816 + int idx; 817 + bool needexp = false; 818 + bool needgp = false; 819 + unsigned long s; 820 + struct srcu_data *sdp; 821 + 822 + idx = srcu_read_lock(ssp); 823 + sdp = raw_cpu_ptr(ssp->sda); 824 + spin_lock_irqsave_rcu_node(sdp, flags); 825 + rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp); 826 + rcu_segcblist_advance(&sdp->srcu_cblist, 827 + rcu_seq_current(&ssp->srcu_gp_seq)); 828 + s = rcu_seq_snap(&ssp->srcu_gp_seq); 829 + (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s); 830 + if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) { 831 + sdp->srcu_gp_seq_needed = s; 832 + needgp = true; 833 + } 834 + if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) { 835 + sdp->srcu_gp_seq_needed_exp = s; 836 + needexp = true; 837 + } 838 + spin_unlock_irqrestore_rcu_node(sdp, flags); 839 + if (needgp) 840 + srcu_funnel_gp_start(ssp, sdp, s, do_norm); 841 + else if (needexp) 842 + srcu_funnel_exp_start(ssp, sdp->mynode, s); 843 + srcu_read_unlock(ssp, idx); 844 + } 845 + 846 + /* 811 847 * Enqueue an SRCU callback on the srcu_data structure associated with 812 848 * the current CPU and the specified srcu_struct structure, initiating 813 849 * grace-period processing if it is not already running. ··· 874 838 static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp, 875 839 rcu_callback_t func, bool do_norm) 876 840 { 877 - unsigned long flags; 878 - int idx; 879 - bool needexp = false; 880 - bool needgp = false; 881 - unsigned long s; 882 - struct srcu_data *sdp; 883 - 884 841 check_init_srcu_struct(ssp); 885 842 if (debug_rcu_head_queue(rhp)) { 886 843 /* Probable double call_srcu(), so leak the callback. */ ··· 882 853 return; 883 854 } 884 855 rhp->func = func; 885 - idx = srcu_read_lock(ssp); 886 - sdp = raw_cpu_ptr(ssp->sda); 887 - spin_lock_irqsave_rcu_node(sdp, flags); 888 - rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp); 889 - rcu_segcblist_advance(&sdp->srcu_cblist, 890 - rcu_seq_current(&ssp->srcu_gp_seq)); 891 - s = rcu_seq_snap(&ssp->srcu_gp_seq); 892 - (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s); 893 - if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) { 894 - sdp->srcu_gp_seq_needed = s; 895 - needgp = true; 896 - } 897 - if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) { 898 - sdp->srcu_gp_seq_needed_exp = s; 899 - needexp = true; 900 - } 901 - spin_unlock_irqrestore_rcu_node(sdp, flags); 902 - if (needgp) 903 - srcu_funnel_gp_start(ssp, sdp, s, do_norm); 904 - else if (needexp) 905 - srcu_funnel_exp_start(ssp, sdp->mynode, s); 906 - srcu_read_unlock(ssp, idx); 856 + srcu_gp_start_if_needed(ssp, rhp, do_norm); 907 857 } 908 858 909 859 /**