Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0+
2 : /*
3 : * Read-Copy Update mechanism for mutual exclusion
4 : *
5 : * Copyright IBM Corporation, 2001
6 : *
7 : * Authors: Dipankar Sarma <dipankar@in.ibm.com>
8 : * Manfred Spraul <manfred@colorfullife.com>
9 : *
10 : * Based on the original work by Paul McKenney <paulmck@linux.ibm.com>
11 : * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen.
12 : * Papers:
13 : * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf
14 : * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001)
15 : *
16 : * For detailed explanation of Read-Copy Update mechanism see -
17 : * http://lse.sourceforge.net/locking/rcupdate.html
18 : *
19 : */
20 : #include <linux/types.h>
21 : #include <linux/kernel.h>
22 : #include <linux/init.h>
23 : #include <linux/spinlock.h>
24 : #include <linux/smp.h>
25 : #include <linux/interrupt.h>
26 : #include <linux/sched/signal.h>
27 : #include <linux/sched/debug.h>
28 : #include <linux/atomic.h>
29 : #include <linux/bitops.h>
30 : #include <linux/percpu.h>
31 : #include <linux/notifier.h>
32 : #include <linux/cpu.h>
33 : #include <linux/mutex.h>
34 : #include <linux/export.h>
35 : #include <linux/hardirq.h>
36 : #include <linux/delay.h>
37 : #include <linux/moduleparam.h>
38 : #include <linux/kthread.h>
39 : #include <linux/tick.h>
40 : #include <linux/rcupdate_wait.h>
41 : #include <linux/sched/isolation.h>
42 : #include <linux/kprobes.h>
43 : #include <linux/slab.h>
44 : #include <linux/irq_work.h>
45 : #include <linux/rcupdate_trace.h>
46 :
47 : #define CREATE_TRACE_POINTS
48 :
49 : #include "rcu.h"
50 :
51 : #ifdef MODULE_PARAM_PREFIX
52 : #undef MODULE_PARAM_PREFIX
53 : #endif
54 : #define MODULE_PARAM_PREFIX "rcupdate."
55 :
56 : #ifndef CONFIG_TINY_RCU
57 : module_param(rcu_expedited, int, 0444);
58 : module_param(rcu_normal, int, 0444);
59 : static int rcu_normal_after_boot = IS_ENABLED(CONFIG_PREEMPT_RT);
60 : #if !defined(CONFIG_PREEMPT_RT) || defined(CONFIG_NO_HZ_FULL)
61 : module_param(rcu_normal_after_boot, int, 0444);
62 : #endif
63 : #endif /* #ifndef CONFIG_TINY_RCU */
64 :
65 : #ifdef CONFIG_DEBUG_LOCK_ALLOC
66 : /**
67 : * rcu_read_lock_held_common() - might we be in RCU-sched read-side critical section?
68 : * @ret: Best guess answer if lockdep cannot be relied on
69 : *
70 : * Returns true if lockdep must be ignored, in which case ``*ret`` contains
71 : * the best guess described below. Otherwise returns false, in which
72 : * case ``*ret`` tells the caller nothing and the caller should instead
73 : * consult lockdep.
74 : *
75 : * If CONFIG_DEBUG_LOCK_ALLOC is selected, set ``*ret`` to nonzero iff in an
76 : * RCU-sched read-side critical section. In absence of
77 : * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side
78 : * critical section unless it can prove otherwise. Note that disabling
79 : * of preemption (including disabling irqs) counts as an RCU-sched
80 : * read-side critical section. This is useful for debug checks in functions
81 : * that required that they be called within an RCU-sched read-side
82 : * critical section.
83 : *
84 : * Check debug_lockdep_rcu_enabled() to prevent false positives during boot
85 : * and while lockdep is disabled.
86 : *
87 : * Note that if the CPU is in the idle loop from an RCU point of view (ie:
88 : * that we are in the section between rcu_idle_enter() and rcu_idle_exit())
89 : * then rcu_read_lock_held() sets ``*ret`` to false even if the CPU did an
90 : * rcu_read_lock(). The reason for this is that RCU ignores CPUs that are
91 : * in such a section, considering these as in extended quiescent state,
92 : * so such a CPU is effectively never in an RCU read-side critical section
93 : * regardless of what RCU primitives it invokes. This state of affairs is
94 : * required --- we need to keep an RCU-free window in idle where the CPU may
95 : * possibly enter into low power mode. This way we can notice an extended
96 : * quiescent state to other CPUs that started a grace period. Otherwise
97 : * we would delay any grace period as long as we run in the idle task.
98 : *
99 : * Similarly, we avoid claiming an RCU read lock held if the current
100 : * CPU is offline.
101 : */
102 : static bool rcu_read_lock_held_common(bool *ret)
103 : {
104 : if (!debug_lockdep_rcu_enabled()) {
105 : *ret = true;
106 : return true;
107 : }
108 : if (!rcu_is_watching()) {
109 : *ret = false;
110 : return true;
111 : }
112 : if (!rcu_lockdep_current_cpu_online()) {
113 : *ret = false;
114 : return true;
115 : }
116 : return false;
117 : }
118 :
119 : int rcu_read_lock_sched_held(void)
120 : {
121 : bool ret;
122 :
123 : if (rcu_read_lock_held_common(&ret))
124 : return ret;
125 : return lock_is_held(&rcu_sched_lock_map) || !preemptible();
126 : }
127 : EXPORT_SYMBOL(rcu_read_lock_sched_held);
128 : #endif
129 :
130 : #ifndef CONFIG_TINY_RCU
131 :
132 : /*
133 : * Should expedited grace-period primitives always fall back to their
134 : * non-expedited counterparts? Intended for use within RCU. Note
135 : * that if the user specifies both rcu_expedited and rcu_normal, then
136 : * rcu_normal wins. (Except during the time period during boot from
137 : * when the first task is spawned until the rcu_set_runtime_mode()
138 : * core_initcall() is invoked, at which point everything is expedited.)
139 : */
140 : bool rcu_gp_is_normal(void)
141 : {
142 : return READ_ONCE(rcu_normal) &&
143 : rcu_scheduler_active != RCU_SCHEDULER_INIT;
144 : }
145 : EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
146 :
147 : static atomic_t rcu_expedited_nesting = ATOMIC_INIT(1);
148 :
149 : /*
150 : * Should normal grace-period primitives be expedited? Intended for
151 : * use within RCU. Note that this function takes the rcu_expedited
152 : * sysfs/boot variable and rcu_scheduler_active into account as well
153 : * as the rcu_expedite_gp() nesting. So looping on rcu_unexpedite_gp()
154 : * until rcu_gp_is_expedited() returns false is a -really- bad idea.
155 : */
156 : bool rcu_gp_is_expedited(void)
157 : {
158 : return rcu_expedited || atomic_read(&rcu_expedited_nesting);
159 : }
160 : EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
161 :
162 : /**
163 : * rcu_expedite_gp - Expedite future RCU grace periods
164 : *
165 : * After a call to this function, future calls to synchronize_rcu() and
166 : * friends act as the corresponding synchronize_rcu_expedited() function
167 : * had instead been called.
168 : */
169 : void rcu_expedite_gp(void)
170 : {
171 : atomic_inc(&rcu_expedited_nesting);
172 : }
173 : EXPORT_SYMBOL_GPL(rcu_expedite_gp);
174 :
175 : /**
176 : * rcu_unexpedite_gp - Cancel prior rcu_expedite_gp() invocation
177 : *
178 : * Undo a prior call to rcu_expedite_gp(). If all prior calls to
179 : * rcu_expedite_gp() are undone by a subsequent call to rcu_unexpedite_gp(),
180 : * and if the rcu_expedited sysfs/boot parameter is not set, then all
181 : * subsequent calls to synchronize_rcu() and friends will return to
182 : * their normal non-expedited behavior.
183 : */
184 : void rcu_unexpedite_gp(void)
185 : {
186 : atomic_dec(&rcu_expedited_nesting);
187 : }
188 : EXPORT_SYMBOL_GPL(rcu_unexpedite_gp);
189 :
190 : static bool rcu_boot_ended __read_mostly;
191 :
192 : /*
193 : * Inform RCU of the end of the in-kernel boot sequence.
194 : */
195 : void rcu_end_inkernel_boot(void)
196 : {
197 : rcu_unexpedite_gp();
198 : if (rcu_normal_after_boot)
199 : WRITE_ONCE(rcu_normal, 1);
200 : rcu_boot_ended = true;
201 : }
202 :
203 : /*
204 : * Let rcutorture know when it is OK to turn it up to eleven.
205 : */
206 : bool rcu_inkernel_boot_has_ended(void)
207 : {
208 : return rcu_boot_ended;
209 : }
210 : EXPORT_SYMBOL_GPL(rcu_inkernel_boot_has_ended);
211 :
212 : #endif /* #ifndef CONFIG_TINY_RCU */
213 :
214 : /*
215 : * Test each non-SRCU synchronous grace-period wait API. This is
216 : * useful just after a change in mode for these primitives, and
217 : * during early boot.
218 : */
219 0 : void rcu_test_sync_prims(void)
220 : {
221 : if (!IS_ENABLED(CONFIG_PROVE_RCU))
222 : return;
223 : synchronize_rcu();
224 : synchronize_rcu_expedited();
225 : }
226 :
227 : #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU)
228 :
229 : /*
230 : * Switch to run-time mode once RCU has fully initialized.
231 : */
232 1 : static int __init rcu_set_runtime_mode(void)
233 : {
234 : rcu_test_sync_prims();
235 1 : rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
236 : kfree_rcu_scheduler_running();
237 : rcu_test_sync_prims();
238 1 : return 0;
239 : }
240 : core_initcall(rcu_set_runtime_mode);
241 :
242 : #endif /* #if !defined(CONFIG_TINY_RCU) || defined(CONFIG_SRCU) */
243 :
244 : #ifdef CONFIG_DEBUG_LOCK_ALLOC
245 : static struct lock_class_key rcu_lock_key;
246 : struct lockdep_map rcu_lock_map = {
247 : .name = "rcu_read_lock",
248 : .key = &rcu_lock_key,
249 : .wait_type_outer = LD_WAIT_FREE,
250 : .wait_type_inner = LD_WAIT_CONFIG, /* PREEMPT_RT implies PREEMPT_RCU */
251 : };
252 : EXPORT_SYMBOL_GPL(rcu_lock_map);
253 :
254 : static struct lock_class_key rcu_bh_lock_key;
255 : struct lockdep_map rcu_bh_lock_map = {
256 : .name = "rcu_read_lock_bh",
257 : .key = &rcu_bh_lock_key,
258 : .wait_type_outer = LD_WAIT_FREE,
259 : .wait_type_inner = LD_WAIT_CONFIG, /* PREEMPT_RT makes BH preemptible. */
260 : };
261 : EXPORT_SYMBOL_GPL(rcu_bh_lock_map);
262 :
263 : static struct lock_class_key rcu_sched_lock_key;
264 : struct lockdep_map rcu_sched_lock_map = {
265 : .name = "rcu_read_lock_sched",
266 : .key = &rcu_sched_lock_key,
267 : .wait_type_outer = LD_WAIT_FREE,
268 : .wait_type_inner = LD_WAIT_SPIN,
269 : };
270 : EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
271 :
272 : // Tell lockdep when RCU callbacks are being invoked.
273 : static struct lock_class_key rcu_callback_key;
274 : struct lockdep_map rcu_callback_map =
275 : STATIC_LOCKDEP_MAP_INIT("rcu_callback", &rcu_callback_key);
276 : EXPORT_SYMBOL_GPL(rcu_callback_map);
277 :
278 : noinstr int notrace debug_lockdep_rcu_enabled(void)
279 : {
280 : return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && READ_ONCE(debug_locks) &&
281 : current->lockdep_recursion == 0;
282 : }
283 : EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
284 :
285 : /**
286 : * rcu_read_lock_held() - might we be in RCU read-side critical section?
287 : *
288 : * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU
289 : * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC,
290 : * this assumes we are in an RCU read-side critical section unless it can
291 : * prove otherwise. This is useful for debug checks in functions that
292 : * require that they be called within an RCU read-side critical section.
293 : *
294 : * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot
295 : * and while lockdep is disabled.
296 : *
297 : * Note that rcu_read_lock() and the matching rcu_read_unlock() must
298 : * occur in the same context, for example, it is illegal to invoke
299 : * rcu_read_unlock() in process context if the matching rcu_read_lock()
300 : * was invoked from within an irq handler.
301 : *
302 : * Note that rcu_read_lock() is disallowed if the CPU is either idle or
303 : * offline from an RCU perspective, so check for those as well.
304 : */
305 : int rcu_read_lock_held(void)
306 : {
307 : bool ret;
308 :
309 : if (rcu_read_lock_held_common(&ret))
310 : return ret;
311 : return lock_is_held(&rcu_lock_map);
312 : }
313 : EXPORT_SYMBOL_GPL(rcu_read_lock_held);
314 :
315 : /**
316 : * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
317 : *
318 : * Check for bottom half being disabled, which covers both the
319 : * CONFIG_PROVE_RCU and not cases. Note that if someone uses
320 : * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
321 : * will show the situation. This is useful for debug checks in functions
322 : * that require that they be called within an RCU read-side critical
323 : * section.
324 : *
325 : * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
326 : *
327 : * Note that rcu_read_lock_bh() is disallowed if the CPU is either idle or
328 : * offline from an RCU perspective, so check for those as well.
329 : */
330 : int rcu_read_lock_bh_held(void)
331 : {
332 : bool ret;
333 :
334 : if (rcu_read_lock_held_common(&ret))
335 : return ret;
336 : return in_softirq() || irqs_disabled();
337 : }
338 : EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
339 :
340 : int rcu_read_lock_any_held(void)
341 : {
342 : bool ret;
343 :
344 : if (rcu_read_lock_held_common(&ret))
345 : return ret;
346 : if (lock_is_held(&rcu_lock_map) ||
347 : lock_is_held(&rcu_bh_lock_map) ||
348 : lock_is_held(&rcu_sched_lock_map))
349 : return 1;
350 : return !preemptible();
351 : }
352 : EXPORT_SYMBOL_GPL(rcu_read_lock_any_held);
353 :
354 : #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
355 :
356 : /**
357 : * wakeme_after_rcu() - Callback function to awaken a task after grace period
358 : * @head: Pointer to rcu_head member within rcu_synchronize structure
359 : *
360 : * Awaken the corresponding task now that a grace period has elapsed.
361 : */
362 0 : void wakeme_after_rcu(struct rcu_head *head)
363 : {
364 : struct rcu_synchronize *rcu;
365 :
366 0 : rcu = container_of(head, struct rcu_synchronize, head);
367 0 : complete(&rcu->completion);
368 0 : }
369 : EXPORT_SYMBOL_GPL(wakeme_after_rcu);
370 :
371 0 : void __wait_rcu_gp(bool checktiny, int n, call_rcu_func_t *crcu_array,
372 : struct rcu_synchronize *rs_array)
373 : {
374 : int i;
375 : int j;
376 :
377 : /* Initialize and register callbacks for each crcu_array element. */
378 0 : for (i = 0; i < n; i++) {
379 0 : if (checktiny &&
380 0 : (crcu_array[i] == call_rcu)) {
381 : might_sleep();
382 0 : continue;
383 : }
384 0 : for (j = 0; j < i; j++)
385 0 : if (crcu_array[j] == crcu_array[i])
386 : break;
387 0 : if (j == i) {
388 0 : init_rcu_head_on_stack(&rs_array[i].head);
389 0 : init_completion(&rs_array[i].completion);
390 0 : (crcu_array[i])(&rs_array[i].head, wakeme_after_rcu);
391 : }
392 : }
393 :
394 : /* Wait for all callbacks to be invoked. */
395 0 : for (i = 0; i < n; i++) {
396 0 : if (checktiny &&
397 0 : (crcu_array[i] == call_rcu))
398 0 : continue;
399 0 : for (j = 0; j < i; j++)
400 0 : if (crcu_array[j] == crcu_array[i])
401 : break;
402 0 : if (j == i) {
403 0 : wait_for_completion(&rs_array[i].completion);
404 0 : destroy_rcu_head_on_stack(&rs_array[i].head);
405 : }
406 : }
407 0 : }
408 : EXPORT_SYMBOL_GPL(__wait_rcu_gp);
409 :
410 0 : void finish_rcuwait(struct rcuwait *w)
411 : {
412 0 : rcu_assign_pointer(w->task, NULL);
413 0 : __set_current_state(TASK_RUNNING);
414 0 : }
415 : EXPORT_SYMBOL_GPL(finish_rcuwait);
416 :
417 : #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
418 : void init_rcu_head(struct rcu_head *head)
419 : {
420 : debug_object_init(head, &rcuhead_debug_descr);
421 : }
422 : EXPORT_SYMBOL_GPL(init_rcu_head);
423 :
424 : void destroy_rcu_head(struct rcu_head *head)
425 : {
426 : debug_object_free(head, &rcuhead_debug_descr);
427 : }
428 : EXPORT_SYMBOL_GPL(destroy_rcu_head);
429 :
430 : static bool rcuhead_is_static_object(void *addr)
431 : {
432 : return true;
433 : }
434 :
435 : /**
436 : * init_rcu_head_on_stack() - initialize on-stack rcu_head for debugobjects
437 : * @head: pointer to rcu_head structure to be initialized
438 : *
439 : * This function informs debugobjects of a new rcu_head structure that
440 : * has been allocated as an auto variable on the stack. This function
441 : * is not required for rcu_head structures that are statically defined or
442 : * that are dynamically allocated on the heap. This function has no
443 : * effect for !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
444 : */
445 : void init_rcu_head_on_stack(struct rcu_head *head)
446 : {
447 : debug_object_init_on_stack(head, &rcuhead_debug_descr);
448 : }
449 : EXPORT_SYMBOL_GPL(init_rcu_head_on_stack);
450 :
451 : /**
452 : * destroy_rcu_head_on_stack() - destroy on-stack rcu_head for debugobjects
453 : * @head: pointer to rcu_head structure to be initialized
454 : *
455 : * This function informs debugobjects that an on-stack rcu_head structure
456 : * is about to go out of scope. As with init_rcu_head_on_stack(), this
457 : * function is not required for rcu_head structures that are statically
458 : * defined or that are dynamically allocated on the heap. Also as with
459 : * init_rcu_head_on_stack(), this function has no effect for
460 : * !CONFIG_DEBUG_OBJECTS_RCU_HEAD kernel builds.
461 : */
462 : void destroy_rcu_head_on_stack(struct rcu_head *head)
463 : {
464 : debug_object_free(head, &rcuhead_debug_descr);
465 : }
466 : EXPORT_SYMBOL_GPL(destroy_rcu_head_on_stack);
467 :
468 : const struct debug_obj_descr rcuhead_debug_descr = {
469 : .name = "rcu_head",
470 : .is_static_object = rcuhead_is_static_object,
471 : };
472 : EXPORT_SYMBOL_GPL(rcuhead_debug_descr);
473 : #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
474 :
475 : #if defined(CONFIG_TREE_RCU) || defined(CONFIG_RCU_TRACE)
476 : void do_trace_rcu_torture_read(const char *rcutorturename, struct rcu_head *rhp,
477 : unsigned long secs,
478 : unsigned long c_old, unsigned long c)
479 : {
480 : trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c);
481 : }
482 : EXPORT_SYMBOL_GPL(do_trace_rcu_torture_read);
483 : #else
484 : #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
485 : do { } while (0)
486 : #endif
487 :
488 : #if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST)
489 : /* Get rcutorture access to sched_setaffinity(). */
490 : long rcutorture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
491 : {
492 : int ret;
493 :
494 : ret = sched_setaffinity(pid, in_mask);
495 : WARN_ONCE(ret, "%s: sched_setaffinity() returned %d\n", __func__, ret);
496 : return ret;
497 : }
498 : EXPORT_SYMBOL_GPL(rcutorture_sched_setaffinity);
499 : #endif
500 :
501 : #ifdef CONFIG_RCU_STALL_COMMON
502 : int rcu_cpu_stall_ftrace_dump __read_mostly;
503 : module_param(rcu_cpu_stall_ftrace_dump, int, 0644);
504 : int rcu_cpu_stall_suppress __read_mostly; // !0 = suppress stall warnings.
505 : EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress);
506 : module_param(rcu_cpu_stall_suppress, int, 0644);
507 : int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
508 : module_param(rcu_cpu_stall_timeout, int, 0644);
509 : #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
510 :
511 : // Suppress boot-time RCU CPU stall warnings and rcutorture writer stall
512 : // warnings. Also used by rcutorture even if stall warnings are excluded.
513 : int rcu_cpu_stall_suppress_at_boot __read_mostly; // !0 = suppress boot stalls.
514 : EXPORT_SYMBOL_GPL(rcu_cpu_stall_suppress_at_boot);
515 : module_param(rcu_cpu_stall_suppress_at_boot, int, 0444);
516 :
517 : #ifdef CONFIG_PROVE_RCU
518 :
519 : /*
520 : * Early boot self test parameters.
521 : */
522 : static bool rcu_self_test;
523 : module_param(rcu_self_test, bool, 0444);
524 :
525 : static int rcu_self_test_counter;
526 :
527 : static void test_callback(struct rcu_head *r)
528 : {
529 : rcu_self_test_counter++;
530 : pr_info("RCU test callback executed %d\n", rcu_self_test_counter);
531 : }
532 :
533 : DEFINE_STATIC_SRCU(early_srcu);
534 : static unsigned long early_srcu_cookie;
535 :
536 : struct early_boot_kfree_rcu {
537 : struct rcu_head rh;
538 : };
539 :
540 : static void early_boot_test_call_rcu(void)
541 : {
542 : static struct rcu_head head;
543 : static struct rcu_head shead;
544 : struct early_boot_kfree_rcu *rhp;
545 :
546 : call_rcu(&head, test_callback);
547 : if (IS_ENABLED(CONFIG_SRCU)) {
548 : early_srcu_cookie = start_poll_synchronize_srcu(&early_srcu);
549 : call_srcu(&early_srcu, &shead, test_callback);
550 : }
551 : rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
552 : if (!WARN_ON_ONCE(!rhp))
553 : kfree_rcu(rhp, rh);
554 : }
555 :
556 : void rcu_early_boot_tests(void)
557 : {
558 : pr_info("Running RCU self tests\n");
559 :
560 : if (rcu_self_test)
561 : early_boot_test_call_rcu();
562 : rcu_test_sync_prims();
563 : }
564 :
565 : static int rcu_verify_early_boot_tests(void)
566 : {
567 : int ret = 0;
568 : int early_boot_test_counter = 0;
569 :
570 : if (rcu_self_test) {
571 : early_boot_test_counter++;
572 : rcu_barrier();
573 : if (IS_ENABLED(CONFIG_SRCU)) {
574 : early_boot_test_counter++;
575 : srcu_barrier(&early_srcu);
576 : WARN_ON_ONCE(!poll_state_synchronize_srcu(&early_srcu, early_srcu_cookie));
577 : }
578 : }
579 : if (rcu_self_test_counter != early_boot_test_counter) {
580 : WARN_ON(1);
581 : ret = -1;
582 : }
583 :
584 : return ret;
585 : }
586 : late_initcall(rcu_verify_early_boot_tests);
587 : #else
588 1 : void rcu_early_boot_tests(void) {}
589 : #endif /* CONFIG_PROVE_RCU */
590 :
591 : #include "tasks.h"
592 :
593 : #ifndef CONFIG_TINY_RCU
594 :
595 : /*
596 : * Print any significant non-default boot-time settings.
597 : */
598 : void __init rcupdate_announce_bootup_oddness(void)
599 : {
600 : if (rcu_normal)
601 : pr_info("\tNo expedited grace period (rcu_normal).\n");
602 : else if (rcu_normal_after_boot)
603 : pr_info("\tNo expedited grace period (rcu_normal_after_boot).\n");
604 : else if (rcu_expedited)
605 : pr_info("\tAll grace periods are expedited (rcu_expedited).\n");
606 : if (rcu_cpu_stall_suppress)
607 : pr_info("\tRCU CPU stall warnings suppressed (rcu_cpu_stall_suppress).\n");
608 : if (rcu_cpu_stall_timeout != CONFIG_RCU_CPU_STALL_TIMEOUT)
609 : pr_info("\tRCU CPU stall warnings timeout set to %d (rcu_cpu_stall_timeout).\n", rcu_cpu_stall_timeout);
610 : rcu_tasks_bootup_oddness();
611 : }
612 :
613 : #endif /* #ifndef CONFIG_TINY_RCU */
|