Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

docs/RCU/rcubarrier: Right-adjust line numbers in code snippets

Line numbers in code snippets in rcubarrier.rst have beed left adjusted
since commit 4af498306ffd ("doc: Convert to rcubarrier.txt to ReST").
This might have been because right adjusting them had confused Sphinx.

The rules around a literal block in reST are:

- Need a blank line above it.
- A line with the same indent level as the line above it is regarded
as the end of it.

Those line numbers can be right adjusted by keeping indents at two-
digit numbers. While at it, add some spaces between the column of line
numbers and the code area for better readability.

Signed-off-by: Akira Yokosawa <akiyks@gmail.com>
Signed-off-by: Paul E. McKenney <paulmck@kernel.org>

authored by

Akira Yokosawa and committed by
Paul E. McKenney
eff86459 a75f7b48

+84 -84
+84 -84
Documentation/RCU/rcubarrier.rst
··· 72 72 call_srcu() on srcu_struct_2, then the following three lines of code 73 73 will be required when unloading:: 74 74 75 - 1 rcu_barrier(); 76 - 2 srcu_barrier(&srcu_struct_1); 77 - 3 srcu_barrier(&srcu_struct_2); 75 + 1 rcu_barrier(); 76 + 2 srcu_barrier(&srcu_struct_1); 77 + 3 srcu_barrier(&srcu_struct_2); 78 78 79 79 If latency is of the essence, workqueues could be used to run these 80 80 three functions concurrently. ··· 82 82 An ancient version of the rcutorture module makes use of rcu_barrier() 83 83 in its exit function as follows:: 84 84 85 - 1 static void 86 - 2 rcu_torture_cleanup(void) 87 - 3 { 88 - 4 int i; 89 - 5 90 - 6 fullstop = 1; 91 - 7 if (shuffler_task != NULL) { 92 - 8 VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task"); 93 - 9 kthread_stop(shuffler_task); 94 - 10 } 95 - 11 shuffler_task = NULL; 85 + 1 static void 86 + 2 rcu_torture_cleanup(void) 87 + 3 { 88 + 4 int i; 89 + 5 90 + 6 fullstop = 1; 91 + 7 if (shuffler_task != NULL) { 92 + 8 VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task"); 93 + 9 kthread_stop(shuffler_task); 94 + 10 } 95 + 11 shuffler_task = NULL; 96 96 12 97 - 13 if (writer_task != NULL) { 98 - 14 VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task"); 99 - 15 kthread_stop(writer_task); 100 - 16 } 101 - 17 writer_task = NULL; 97 + 13 if (writer_task != NULL) { 98 + 14 VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task"); 99 + 15 kthread_stop(writer_task); 100 + 16 } 101 + 17 writer_task = NULL; 102 102 18 103 - 19 if (reader_tasks != NULL) { 104 - 20 for (i = 0; i < nrealreaders; i++) { 105 - 21 if (reader_tasks[i] != NULL) { 106 - 22 VERBOSE_PRINTK_STRING( 107 - 23 "Stopping rcu_torture_reader task"); 108 - 24 kthread_stop(reader_tasks[i]); 109 - 25 } 110 - 26 reader_tasks[i] = NULL; 111 - 27 } 112 - 28 kfree(reader_tasks); 113 - 29 reader_tasks = NULL; 114 - 30 } 115 - 31 rcu_torture_current = NULL; 103 + 19 if (reader_tasks != NULL) { 104 + 20 for (i = 0; i < nrealreaders; i++) { 105 + 21 if (reader_tasks[i] != NULL) { 106 + 22 VERBOSE_PRINTK_STRING( 107 + 23 "Stopping rcu_torture_reader task"); 108 + 24 kthread_stop(reader_tasks[i]); 109 + 25 } 110 + 26 reader_tasks[i] = NULL; 111 + 27 } 112 + 28 kfree(reader_tasks); 113 + 29 reader_tasks = NULL; 114 + 30 } 115 + 31 rcu_torture_current = NULL; 116 116 32 117 - 33 if (fakewriter_tasks != NULL) { 118 - 34 for (i = 0; i < nfakewriters; i++) { 119 - 35 if (fakewriter_tasks[i] != NULL) { 120 - 36 VERBOSE_PRINTK_STRING( 121 - 37 "Stopping rcu_torture_fakewriter task"); 122 - 38 kthread_stop(fakewriter_tasks[i]); 123 - 39 } 124 - 40 fakewriter_tasks[i] = NULL; 125 - 41 } 126 - 42 kfree(fakewriter_tasks); 127 - 43 fakewriter_tasks = NULL; 128 - 44 } 117 + 33 if (fakewriter_tasks != NULL) { 118 + 34 for (i = 0; i < nfakewriters; i++) { 119 + 35 if (fakewriter_tasks[i] != NULL) { 120 + 36 VERBOSE_PRINTK_STRING( 121 + 37 "Stopping rcu_torture_fakewriter task"); 122 + 38 kthread_stop(fakewriter_tasks[i]); 123 + 39 } 124 + 40 fakewriter_tasks[i] = NULL; 125 + 41 } 126 + 42 kfree(fakewriter_tasks); 127 + 43 fakewriter_tasks = NULL; 128 + 44 } 129 129 45 130 - 46 if (stats_task != NULL) { 131 - 47 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task"); 132 - 48 kthread_stop(stats_task); 133 - 49 } 134 - 50 stats_task = NULL; 130 + 46 if (stats_task != NULL) { 131 + 47 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task"); 132 + 48 kthread_stop(stats_task); 133 + 49 } 134 + 50 stats_task = NULL; 135 135 51 136 - 52 /* Wait for all RCU callbacks to fire. */ 137 - 53 rcu_barrier(); 136 + 52 /* Wait for all RCU callbacks to fire. */ 137 + 53 rcu_barrier(); 138 138 54 139 - 55 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ 139 + 55 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */ 140 140 56 141 - 57 if (cur_ops->cleanup != NULL) 142 - 58 cur_ops->cleanup(); 143 - 59 if (atomic_read(&n_rcu_torture_error)) 144 - 60 rcu_torture_print_module_parms("End of test: FAILURE"); 145 - 61 else 146 - 62 rcu_torture_print_module_parms("End of test: SUCCESS"); 147 - 63 } 141 + 57 if (cur_ops->cleanup != NULL) 142 + 58 cur_ops->cleanup(); 143 + 59 if (atomic_read(&n_rcu_torture_error)) 144 + 60 rcu_torture_print_module_parms("End of test: FAILURE"); 145 + 61 else 146 + 62 rcu_torture_print_module_parms("End of test: SUCCESS"); 147 + 63 } 148 148 149 149 Line 6 sets a global variable that prevents any RCU callbacks from 150 150 re-posting themselves. This will not be necessary in most cases, since ··· 193 193 194 194 The original code for rcu_barrier() was roughly as follows:: 195 195 196 - 1 void rcu_barrier(void) 197 - 2 { 198 - 3 BUG_ON(in_interrupt()); 199 - 4 /* Take cpucontrol mutex to protect against CPU hotplug */ 200 - 5 mutex_lock(&rcu_barrier_mutex); 201 - 6 init_completion(&rcu_barrier_completion); 202 - 7 atomic_set(&rcu_barrier_cpu_count, 1); 203 - 8 on_each_cpu(rcu_barrier_func, NULL, 0, 1); 204 - 9 if (atomic_dec_and_test(&rcu_barrier_cpu_count)) 205 - 10 complete(&rcu_barrier_completion); 196 + 1 void rcu_barrier(void) 197 + 2 { 198 + 3 BUG_ON(in_interrupt()); 199 + 4 /* Take cpucontrol mutex to protect against CPU hotplug */ 200 + 5 mutex_lock(&rcu_barrier_mutex); 201 + 6 init_completion(&rcu_barrier_completion); 202 + 7 atomic_set(&rcu_barrier_cpu_count, 1); 203 + 8 on_each_cpu(rcu_barrier_func, NULL, 0, 1); 204 + 9 if (atomic_dec_and_test(&rcu_barrier_cpu_count)) 205 + 10 complete(&rcu_barrier_completion); 206 206 11 wait_for_completion(&rcu_barrier_completion); 207 207 12 mutex_unlock(&rcu_barrier_mutex); 208 208 13 } ··· 232 232 The rcu_barrier_func() runs on each CPU, where it invokes call_rcu() 233 233 to post an RCU callback, as follows:: 234 234 235 - 1 static void rcu_barrier_func(void *notused) 236 - 2 { 237 - 3 int cpu = smp_processor_id(); 238 - 4 struct rcu_data *rdp = &per_cpu(rcu_data, cpu); 239 - 5 struct rcu_head *head; 240 - 6 241 - 7 head = &rdp->barrier; 242 - 8 atomic_inc(&rcu_barrier_cpu_count); 243 - 9 call_rcu(head, rcu_barrier_callback); 244 - 10 } 235 + 1 static void rcu_barrier_func(void *notused) 236 + 2 { 237 + 3 int cpu = smp_processor_id(); 238 + 4 struct rcu_data *rdp = &per_cpu(rcu_data, cpu); 239 + 5 struct rcu_head *head; 240 + 6 241 + 7 head = &rdp->barrier; 242 + 8 atomic_inc(&rcu_barrier_cpu_count); 243 + 9 call_rcu(head, rcu_barrier_callback); 244 + 10 } 245 245 246 246 Lines 3 and 4 locate RCU's internal per-CPU rcu_data structure, 247 247 which contains the struct rcu_head that needed for the later call to ··· 254 254 rcu_barrier_cpu_count variable and finalizes the completion when it 255 255 reaches zero, as follows:: 256 256 257 - 1 static void rcu_barrier_callback(struct rcu_head *notused) 258 - 2 { 259 - 3 if (atomic_dec_and_test(&rcu_barrier_cpu_count)) 260 - 4 complete(&rcu_barrier_completion); 261 - 5 } 257 + 1 static void rcu_barrier_callback(struct rcu_head *notused) 258 + 2 { 259 + 3 if (atomic_dec_and_test(&rcu_barrier_cpu_count)) 260 + 4 complete(&rcu_barrier_completion); 261 + 5 } 262 262 263 263 .. _rcubarrier_quiz_3: 264 264