1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Read-Copy Update module-based torture test facility
4 *
5 * Copyright (C) IBM Corporation, 2005, 2006
6 *
7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8 * Josh Triplett <josh@joshtriplett.org>
9 *
10 * See also: Documentation/RCU/torture.rst
11 */
12
13 #define pr_fmt(fmt) fmt
14
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/kthread.h>
20 #include <linux/err.h>
21 #include <linux/spinlock.h>
22 #include <linux/smp.h>
23 #include <linux/rcupdate_wait.h>
24 #include <linux/rcu_notifier.h>
25 #include <linux/interrupt.h>
26 #include <linux/sched/signal.h>
27 #include <uapi/linux/sched/types.h>
28 #include <linux/atomic.h>
29 #include <linux/bitops.h>
30 #include <linux/completion.h>
31 #include <linux/moduleparam.h>
32 #include <linux/percpu.h>
33 #include <linux/notifier.h>
34 #include <linux/reboot.h>
35 #include <linux/freezer.h>
36 #include <linux/cpu.h>
37 #include <linux/delay.h>
38 #include <linux/stat.h>
39 #include <linux/srcu.h>
40 #include <linux/slab.h>
41 #include <linux/trace_clock.h>
42 #include <asm/byteorder.h>
43 #include <linux/torture.h>
44 #include <linux/vmalloc.h>
45 #include <linux/sched/debug.h>
46 #include <linux/sched/sysctl.h>
47 #include <linux/oom.h>
48 #include <linux/tick.h>
49 #include <linux/rcupdate_trace.h>
50 #include <linux/nmi.h>
51
52 #include "rcu.h"
53
54 MODULE_LICENSE("GPL");
55 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@joshtriplett.org>");
56
57 /* Bits for ->extendables field, extendables param, and related definitions. */
58 #define RCUTORTURE_RDR_SHIFT_1 8 /* Put SRCU index in upper bits. */
59 #define RCUTORTURE_RDR_MASK_1 (1 << RCUTORTURE_RDR_SHIFT_1)
60 #define RCUTORTURE_RDR_SHIFT_2 9 /* Put SRCU index in upper bits. */
61 #define RCUTORTURE_RDR_MASK_2 (1 << RCUTORTURE_RDR_SHIFT_2)
62 #define RCUTORTURE_RDR_BH 0x01 /* Extend readers by disabling bh. */
63 #define RCUTORTURE_RDR_IRQ 0x02 /* ... disabling interrupts. */
64 #define RCUTORTURE_RDR_PREEMPT 0x04 /* ... disabling preemption. */
65 #define RCUTORTURE_RDR_RBH 0x08 /* ... rcu_read_lock_bh(). */
66 #define RCUTORTURE_RDR_SCHED 0x10 /* ... rcu_read_lock_sched(). */
67 #define RCUTORTURE_RDR_RCU_1 0x20 /* ... entering another RCU reader. */
68 #define RCUTORTURE_RDR_RCU_2 0x40 /* ... entering another RCU reader. */
69 #define RCUTORTURE_RDR_NBITS 7 /* Number of bits defined above. */
70 #define RCUTORTURE_MAX_EXTEND \
71 (RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
72 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
73 #define RCUTORTURE_RDR_MAX_LOOPS 0x7 /* Maximum reader extensions. */
74 /* Must be power of two minus one. */
75 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
76
77 torture_param(int, extendables, RCUTORTURE_MAX_EXTEND,
78 "Extend readers by disabling bh (1), irqs (2), or preempt (4)");
79 torture_param(int, fqs_duration, 0, "Duration of fqs bursts (us), 0 to disable");
80 torture_param(int, fqs_holdoff, 0, "Holdoff time within fqs bursts (us)");
81 torture_param(int, fqs_stutter, 3, "Wait time between fqs bursts (s)");
82 torture_param(int, fwd_progress, 1, "Number of grace-period forward progress tasks (0 to disable)");
83 torture_param(int, fwd_progress_div, 4, "Fraction of CPU stall to wait");
84 torture_param(int, fwd_progress_holdoff, 60, "Time between forward-progress tests (s)");
85 torture_param(bool, fwd_progress_need_resched, 1, "Hide cond_resched() behind need_resched()");
86 torture_param(bool, gp_cond, false, "Use conditional/async GP wait primitives");
87 torture_param(bool, gp_cond_exp, false, "Use conditional/async expedited GP wait primitives");
88 torture_param(bool, gp_cond_full, false, "Use conditional/async full-state GP wait primitives");
89 torture_param(bool, gp_cond_exp_full, false,
90 "Use conditional/async full-stateexpedited GP wait primitives");
91 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
92 torture_param(bool, gp_normal, false, "Use normal (non-expedited) GP wait primitives");
93 torture_param(bool, gp_poll, false, "Use polling GP wait primitives");
94 torture_param(bool, gp_poll_exp, false, "Use polling expedited GP wait primitives");
95 torture_param(bool, gp_poll_full, false, "Use polling full-state GP wait primitives");
96 torture_param(bool, gp_poll_exp_full, false, "Use polling full-state expedited GP wait primitives");
97 torture_param(bool, gp_sync, false, "Use synchronous GP wait primitives");
98 torture_param(int, irqreader, 1, "Allow RCU readers from irq handlers");
99 torture_param(int, leakpointer, 0, "Leak pointer dereferences from readers");
100 torture_param(int, n_barrier_cbs, 0, "# of callbacks/kthreads for barrier testing");
101 torture_param(int, nfakewriters, 4, "Number of RCU fake writer threads");
102 torture_param(int, nreaders, -1, "Number of RCU reader threads");
103 torture_param(int, object_debug, 0, "Enable debug-object double call_rcu() testing");
104 torture_param(int, onoff_holdoff, 0, "Time after boot before CPU hotplugs (s)");
105 torture_param(int, onoff_interval, 0, "Time between CPU hotplugs (jiffies), 0=disable");
106 torture_param(int, nocbs_nthreads, 0, "Number of NOCB toggle threads, 0 to disable");
107 torture_param(int, nocbs_toggle, 1000, "Time between toggling nocb state (ms)");
108 torture_param(int, read_exit_delay, 13, "Delay between read-then-exit episodes (s)");
109 torture_param(int, read_exit_burst, 16, "# of read-then-exit bursts per episode, zero to disable");
110 torture_param(int, shuffle_interval, 3, "Number of seconds between shuffles");
111 torture_param(int, shutdown_secs, 0, "Shutdown time (s), <= zero to disable.");
112 torture_param(int, stall_cpu, 0, "Stall duration (s), zero to disable.");
113 torture_param(int, stall_cpu_holdoff, 10, "Time to wait before starting stall (s).");
114 torture_param(bool, stall_no_softlockup, false, "Avoid softlockup warning during cpu stall.");
115 torture_param(int, stall_cpu_irqsoff, 0, "Disable interrupts while stalling.");
116 torture_param(int, stall_cpu_block, 0, "Sleep while stalling.");
117 torture_param(int, stall_gp_kthread, 0, "Grace-period kthread stall duration (s).");
118 torture_param(int, stat_interval, 60, "Number of seconds between stats printk()s");
119 torture_param(int, stutter, 5, "Number of seconds to run/halt test");
120 torture_param(int, test_boost, 1, "Test RCU prio boost: 0=no, 1=maybe, 2=yes.");
121 torture_param(int, test_boost_duration, 4, "Duration of each boost test, seconds.");
122 torture_param(int, test_boost_interval, 7, "Interval between boost tests, seconds.");
123 torture_param(int, test_nmis, 0, "End-test NMI tests, 0 to disable.");
124 torture_param(bool, test_no_idle_hz, true, "Test support for tickless idle CPUs");
125 torture_param(int, test_srcu_lockdep, 0, "Test specified SRCU deadlock scenario.");
126 torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
127
128 static char *torture_type = "rcu";
129 module_param(torture_type, charp, 0444);
130 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, srcu, ...)");
131
132 static int nrealnocbers;
133 static int nrealreaders;
134 static struct task_struct *writer_task;
135 static struct task_struct **fakewriter_tasks;
136 static struct task_struct **reader_tasks;
137 static struct task_struct **nocb_tasks;
138 static struct task_struct *stats_task;
139 static struct task_struct *fqs_task;
140 static struct task_struct *boost_tasks[NR_CPUS];
141 static struct task_struct *stall_task;
142 static struct task_struct **fwd_prog_tasks;
143 static struct task_struct **barrier_cbs_tasks;
144 static struct task_struct *barrier_task;
145 static struct task_struct *read_exit_task;
146
147 #define RCU_TORTURE_PIPE_LEN 10
148
149 // Mailbox-like structure to check RCU global memory ordering.
150 struct rcu_torture_reader_check {
151 unsigned long rtc_myloops;
152 int rtc_chkrdr;
153 unsigned long rtc_chkloops;
154 int rtc_ready;
155 struct rcu_torture_reader_check *rtc_assigner;
156 } ____cacheline_internodealigned_in_smp;
157
158 // Update-side data structure used to check RCU readers.
159 struct rcu_torture {
160 struct rcu_head rtort_rcu;
161 int rtort_pipe_count;
162 struct list_head rtort_free;
163 int rtort_mbtest;
164 struct rcu_torture_reader_check *rtort_chkp;
165 };
166
167 static LIST_HEAD(rcu_torture_freelist);
168 static struct rcu_torture __rcu *rcu_torture_current;
169 static unsigned long rcu_torture_current_version;
170 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
171 static DEFINE_SPINLOCK(rcu_torture_lock);
172 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count);
173 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch);
174 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
175 static struct rcu_torture_reader_check *rcu_torture_reader_mbchk;
176 static atomic_t n_rcu_torture_alloc;
177 static atomic_t n_rcu_torture_alloc_fail;
178 static atomic_t n_rcu_torture_free;
179 static atomic_t n_rcu_torture_mberror;
180 static atomic_t n_rcu_torture_mbchk_fail;
181 static atomic_t n_rcu_torture_mbchk_tries;
182 static atomic_t n_rcu_torture_error;
183 static long n_rcu_torture_barrier_error;
184 static long n_rcu_torture_boost_ktrerror;
185 static long n_rcu_torture_boost_failure;
186 static long n_rcu_torture_boosts;
187 static atomic_long_t n_rcu_torture_timers;
188 static long n_barrier_attempts;
189 static long n_barrier_successes; /* did rcu_barrier test succeed? */
190 static unsigned long n_read_exits;
191 static struct list_head rcu_torture_removed;
192 static unsigned long shutdown_jiffies;
193 static unsigned long start_gp_seq;
194 static atomic_long_t n_nocb_offload;
195 static atomic_long_t n_nocb_deoffload;
196
197 static int rcu_torture_writer_state;
198 #define RTWS_FIXED_DELAY 0
199 #define RTWS_DELAY 1
200 #define RTWS_REPLACE 2
201 #define RTWS_DEF_FREE 3
202 #define RTWS_EXP_SYNC 4
203 #define RTWS_COND_GET 5
204 #define RTWS_COND_GET_FULL 6
205 #define RTWS_COND_GET_EXP 7
206 #define RTWS_COND_GET_EXP_FULL 8
207 #define RTWS_COND_SYNC 9
208 #define RTWS_COND_SYNC_FULL 10
209 #define RTWS_COND_SYNC_EXP 11
210 #define RTWS_COND_SYNC_EXP_FULL 12
211 #define RTWS_POLL_GET 13
212 #define RTWS_POLL_GET_FULL 14
213 #define RTWS_POLL_GET_EXP 15
214 #define RTWS_POLL_GET_EXP_FULL 16
215 #define RTWS_POLL_WAIT 17
216 #define RTWS_POLL_WAIT_FULL 18
217 #define RTWS_POLL_WAIT_EXP 19
218 #define RTWS_POLL_WAIT_EXP_FULL 20
219 #define RTWS_SYNC 21
220 #define RTWS_STUTTER 22
221 #define RTWS_STOPPING 23
222 static const char * const rcu_torture_writer_state_names[] = {
223 "RTWS_FIXED_DELAY",
224 "RTWS_DELAY",
225 "RTWS_REPLACE",
226 "RTWS_DEF_FREE",
227 "RTWS_EXP_SYNC",
228 "RTWS_COND_GET",
229 "RTWS_COND_GET_FULL",
230 "RTWS_COND_GET_EXP",
231 "RTWS_COND_GET_EXP_FULL",
232 "RTWS_COND_SYNC",
233 "RTWS_COND_SYNC_FULL",
234 "RTWS_COND_SYNC_EXP",
235 "RTWS_COND_SYNC_EXP_FULL",
236 "RTWS_POLL_GET",
237 "RTWS_POLL_GET_FULL",
238 "RTWS_POLL_GET_EXP",
239 "RTWS_POLL_GET_EXP_FULL",
240 "RTWS_POLL_WAIT",
241 "RTWS_POLL_WAIT_FULL",
242 "RTWS_POLL_WAIT_EXP",
243 "RTWS_POLL_WAIT_EXP_FULL",
244 "RTWS_SYNC",
245 "RTWS_STUTTER",
246 "RTWS_STOPPING",
247 };
248
249 /* Record reader segment types and duration for first failing read. */
250 struct rt_read_seg {
251 int rt_readstate;
252 unsigned long rt_delay_jiffies;
253 unsigned long rt_delay_ms;
254 unsigned long rt_delay_us;
255 bool rt_preempted;
256 };
257 static int err_segs_recorded;
258 static struct rt_read_seg err_segs[RCUTORTURE_RDR_MAX_SEGS];
259 static int rt_read_nsegs;
260
rcu_torture_writer_state_getname(void)261 static const char *rcu_torture_writer_state_getname(void)
262 {
263 unsigned int i = READ_ONCE(rcu_torture_writer_state);
264
265 if (i >= ARRAY_SIZE(rcu_torture_writer_state_names))
266 return "???";
267 return rcu_torture_writer_state_names[i];
268 }
269
270 #ifdef CONFIG_RCU_TRACE
rcu_trace_clock_local(void)271 static u64 notrace rcu_trace_clock_local(void)
272 {
273 u64 ts = trace_clock_local();
274
275 (void)do_div(ts, NSEC_PER_USEC);
276 return ts;
277 }
278 #else /* #ifdef CONFIG_RCU_TRACE */
rcu_trace_clock_local(void)279 static u64 notrace rcu_trace_clock_local(void)
280 {
281 return 0ULL;
282 }
283 #endif /* #else #ifdef CONFIG_RCU_TRACE */
284
285 /*
286 * Stop aggressive CPU-hog tests a bit before the end of the test in order
287 * to avoid interfering with test shutdown.
288 */
shutdown_time_arrived(void)289 static bool shutdown_time_arrived(void)
290 {
291 return shutdown_secs && time_after(jiffies, shutdown_jiffies - 30 * HZ);
292 }
293
294 static unsigned long boost_starttime; /* jiffies of next boost test start. */
295 static DEFINE_MUTEX(boost_mutex); /* protect setting boost_starttime */
296 /* and boost task create/destroy. */
297 static atomic_t barrier_cbs_count; /* Barrier callbacks registered. */
298 static bool barrier_phase; /* Test phase. */
299 static atomic_t barrier_cbs_invoked; /* Barrier callbacks invoked. */
300 static wait_queue_head_t *barrier_cbs_wq; /* Coordinate barrier testing. */
301 static DECLARE_WAIT_QUEUE_HEAD(barrier_wq);
302
303 static atomic_t rcu_fwd_cb_nodelay; /* Short rcu_torture_delay() delays. */
304
305 /*
306 * Allocate an element from the rcu_tortures pool.
307 */
308 static struct rcu_torture *
rcu_torture_alloc(void)309 rcu_torture_alloc(void)
310 {
311 struct list_head *p;
312
313 spin_lock_bh(&rcu_torture_lock);
314 if (list_empty(&rcu_torture_freelist)) {
315 atomic_inc(&n_rcu_torture_alloc_fail);
316 spin_unlock_bh(&rcu_torture_lock);
317 return NULL;
318 }
319 atomic_inc(&n_rcu_torture_alloc);
320 p = rcu_torture_freelist.next;
321 list_del_init(p);
322 spin_unlock_bh(&rcu_torture_lock);
323 return container_of(p, struct rcu_torture, rtort_free);
324 }
325
326 /*
327 * Free an element to the rcu_tortures pool.
328 */
329 static void
rcu_torture_free(struct rcu_torture * p)330 rcu_torture_free(struct rcu_torture *p)
331 {
332 atomic_inc(&n_rcu_torture_free);
333 spin_lock_bh(&rcu_torture_lock);
334 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
335 spin_unlock_bh(&rcu_torture_lock);
336 }
337
338 /*
339 * Operations vector for selecting different types of tests.
340 */
341
342 struct rcu_torture_ops {
343 int ttype;
344 void (*init)(void);
345 void (*cleanup)(void);
346 int (*readlock)(void);
347 void (*read_delay)(struct torture_random_state *rrsp,
348 struct rt_read_seg *rtrsp);
349 void (*readunlock)(int idx);
350 int (*readlock_held)(void);
351 unsigned long (*get_gp_seq)(void);
352 unsigned long (*gp_diff)(unsigned long new, unsigned long old);
353 void (*deferred_free)(struct rcu_torture *p);
354 void (*sync)(void);
355 void (*exp_sync)(void);
356 unsigned long (*get_gp_state_exp)(void);
357 unsigned long (*start_gp_poll_exp)(void);
358 void (*start_gp_poll_exp_full)(struct rcu_gp_oldstate *rgosp);
359 bool (*poll_gp_state_exp)(unsigned long oldstate);
360 void (*cond_sync_exp)(unsigned long oldstate);
361 void (*cond_sync_exp_full)(struct rcu_gp_oldstate *rgosp);
362 unsigned long (*get_comp_state)(void);
363 void (*get_comp_state_full)(struct rcu_gp_oldstate *rgosp);
364 bool (*same_gp_state)(unsigned long oldstate1, unsigned long oldstate2);
365 bool (*same_gp_state_full)(struct rcu_gp_oldstate *rgosp1, struct rcu_gp_oldstate *rgosp2);
366 unsigned long (*get_gp_state)(void);
367 void (*get_gp_state_full)(struct rcu_gp_oldstate *rgosp);
368 unsigned long (*get_gp_completed)(void);
369 void (*get_gp_completed_full)(struct rcu_gp_oldstate *rgosp);
370 unsigned long (*start_gp_poll)(void);
371 void (*start_gp_poll_full)(struct rcu_gp_oldstate *rgosp);
372 bool (*poll_gp_state)(unsigned long oldstate);
373 bool (*poll_gp_state_full)(struct rcu_gp_oldstate *rgosp);
374 bool (*poll_need_2gp)(bool poll, bool poll_full);
375 void (*cond_sync)(unsigned long oldstate);
376 void (*cond_sync_full)(struct rcu_gp_oldstate *rgosp);
377 call_rcu_func_t call;
378 void (*cb_barrier)(void);
379 void (*fqs)(void);
380 void (*stats)(void);
381 void (*gp_kthread_dbg)(void);
382 bool (*check_boost_failed)(unsigned long gp_state, int *cpup);
383 int (*stall_dur)(void);
384 void (*get_gp_data)(int *flags, unsigned long *gp_seq);
385 void (*gp_slow_register)(atomic_t *rgssp);
386 void (*gp_slow_unregister)(atomic_t *rgssp);
387 long cbflood_max;
388 int irq_capable;
389 int can_boost;
390 int extendables;
391 int slow_gps;
392 int no_pi_lock;
393 const char *name;
394 };
395
396 static struct rcu_torture_ops *cur_ops;
397
398 /*
399 * Definitions for rcu torture testing.
400 */
401
torture_readlock_not_held(void)402 static int torture_readlock_not_held(void)
403 {
404 return rcu_read_lock_bh_held() || rcu_read_lock_sched_held();
405 }
406
rcu_torture_read_lock(void)407 static int rcu_torture_read_lock(void)
408 {
409 rcu_read_lock();
410 return 0;
411 }
412
413 static void
rcu_read_delay(struct torture_random_state * rrsp,struct rt_read_seg * rtrsp)414 rcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
415 {
416 unsigned long started;
417 unsigned long completed;
418 const unsigned long shortdelay_us = 200;
419 unsigned long longdelay_ms = 300;
420 unsigned long long ts;
421
422 /* We want a short delay sometimes to make a reader delay the grace
423 * period, and we want a long delay occasionally to trigger
424 * force_quiescent_state. */
425
426 if (!atomic_read(&rcu_fwd_cb_nodelay) &&
427 !(torture_random(rrsp) % (nrealreaders * 2000 * longdelay_ms))) {
428 started = cur_ops->get_gp_seq();
429 ts = rcu_trace_clock_local();
430 if (preempt_count() & (SOFTIRQ_MASK | HARDIRQ_MASK))
431 longdelay_ms = 5; /* Avoid triggering BH limits. */
432 mdelay(longdelay_ms);
433 rtrsp->rt_delay_ms = longdelay_ms;
434 completed = cur_ops->get_gp_seq();
435 do_trace_rcu_torture_read(cur_ops->name, NULL, ts,
436 started, completed);
437 }
438 if (!(torture_random(rrsp) % (nrealreaders * 2 * shortdelay_us))) {
439 udelay(shortdelay_us);
440 rtrsp->rt_delay_us = shortdelay_us;
441 }
442 if (!preempt_count() &&
443 !(torture_random(rrsp) % (nrealreaders * 500))) {
444 torture_preempt_schedule(); /* QS only if preemptible. */
445 rtrsp->rt_preempted = true;
446 }
447 }
448
rcu_torture_read_unlock(int idx)449 static void rcu_torture_read_unlock(int idx)
450 {
451 rcu_read_unlock();
452 }
453
454 /*
455 * Update callback in the pipe. This should be invoked after a grace period.
456 */
457 static bool
rcu_torture_pipe_update_one(struct rcu_torture * rp)458 rcu_torture_pipe_update_one(struct rcu_torture *rp)
459 {
460 int i;
461 struct rcu_torture_reader_check *rtrcp = READ_ONCE(rp->rtort_chkp);
462
463 if (rtrcp) {
464 WRITE_ONCE(rp->rtort_chkp, NULL);
465 smp_store_release(&rtrcp->rtc_ready, 1); // Pair with smp_load_acquire().
466 }
467 i = rp->rtort_pipe_count;
468 if (i > RCU_TORTURE_PIPE_LEN)
469 i = RCU_TORTURE_PIPE_LEN;
470 atomic_inc(&rcu_torture_wcount[i]);
471 WRITE_ONCE(rp->rtort_pipe_count, i + 1);
472 ASSERT_EXCLUSIVE_WRITER(rp->rtort_pipe_count);
473 if (i + 1 >= RCU_TORTURE_PIPE_LEN) {
474 rp->rtort_mbtest = 0;
475 return true;
476 }
477 return false;
478 }
479
480 /*
481 * Update all callbacks in the pipe. Suitable for synchronous grace-period
482 * primitives.
483 */
484 static void
rcu_torture_pipe_update(struct rcu_torture * old_rp)485 rcu_torture_pipe_update(struct rcu_torture *old_rp)
486 {
487 struct rcu_torture *rp;
488 struct rcu_torture *rp1;
489
490 if (old_rp)
491 list_add(&old_rp->rtort_free, &rcu_torture_removed);
492 list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
493 if (rcu_torture_pipe_update_one(rp)) {
494 list_del(&rp->rtort_free);
495 rcu_torture_free(rp);
496 }
497 }
498 }
499
500 static void
rcu_torture_cb(struct rcu_head * p)501 rcu_torture_cb(struct rcu_head *p)
502 {
503 struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
504
505 if (torture_must_stop_irq()) {
506 /* Test is ending, just drop callbacks on the floor. */
507 /* The next initialization will pick up the pieces. */
508 return;
509 }
510 if (rcu_torture_pipe_update_one(rp))
511 rcu_torture_free(rp);
512 else
513 cur_ops->deferred_free(rp);
514 }
515
rcu_no_completed(void)516 static unsigned long rcu_no_completed(void)
517 {
518 return 0;
519 }
520
rcu_torture_deferred_free(struct rcu_torture * p)521 static void rcu_torture_deferred_free(struct rcu_torture *p)
522 {
523 call_rcu_hurry(&p->rtort_rcu, rcu_torture_cb);
524 }
525
rcu_sync_torture_init(void)526 static void rcu_sync_torture_init(void)
527 {
528 INIT_LIST_HEAD(&rcu_torture_removed);
529 }
530
rcu_poll_need_2gp(bool poll,bool poll_full)531 static bool rcu_poll_need_2gp(bool poll, bool poll_full)
532 {
533 return poll;
534 }
535
536 static struct rcu_torture_ops rcu_ops = {
537 .ttype = RCU_FLAVOR,
538 .init = rcu_sync_torture_init,
539 .readlock = rcu_torture_read_lock,
540 .read_delay = rcu_read_delay,
541 .readunlock = rcu_torture_read_unlock,
542 .readlock_held = torture_readlock_not_held,
543 .get_gp_seq = rcu_get_gp_seq,
544 .gp_diff = rcu_seq_diff,
545 .deferred_free = rcu_torture_deferred_free,
546 .sync = synchronize_rcu,
547 .exp_sync = synchronize_rcu_expedited,
548 .same_gp_state = same_state_synchronize_rcu,
549 .same_gp_state_full = same_state_synchronize_rcu_full,
550 .get_comp_state = get_completed_synchronize_rcu,
551 .get_comp_state_full = get_completed_synchronize_rcu_full,
552 .get_gp_state = get_state_synchronize_rcu,
553 .get_gp_state_full = get_state_synchronize_rcu_full,
554 .get_gp_completed = get_completed_synchronize_rcu,
555 .get_gp_completed_full = get_completed_synchronize_rcu_full,
556 .start_gp_poll = start_poll_synchronize_rcu,
557 .start_gp_poll_full = start_poll_synchronize_rcu_full,
558 .poll_gp_state = poll_state_synchronize_rcu,
559 .poll_gp_state_full = poll_state_synchronize_rcu_full,
560 .poll_need_2gp = rcu_poll_need_2gp,
561 .cond_sync = cond_synchronize_rcu,
562 .cond_sync_full = cond_synchronize_rcu_full,
563 .get_gp_state_exp = get_state_synchronize_rcu,
564 .start_gp_poll_exp = start_poll_synchronize_rcu_expedited,
565 .start_gp_poll_exp_full = start_poll_synchronize_rcu_expedited_full,
566 .poll_gp_state_exp = poll_state_synchronize_rcu,
567 .cond_sync_exp = cond_synchronize_rcu_expedited,
568 .call = call_rcu_hurry,
569 .cb_barrier = rcu_barrier,
570 .fqs = rcu_force_quiescent_state,
571 .gp_kthread_dbg = show_rcu_gp_kthreads,
572 .check_boost_failed = rcu_check_boost_fail,
573 .stall_dur = rcu_jiffies_till_stall_check,
574 .get_gp_data = rcutorture_get_gp_data,
575 .gp_slow_register = rcu_gp_slow_register,
576 .gp_slow_unregister = rcu_gp_slow_unregister,
577 .irq_capable = 1,
578 .can_boost = IS_ENABLED(CONFIG_RCU_BOOST),
579 .extendables = RCUTORTURE_MAX_EXTEND,
580 .name = "rcu"
581 };
582
583 /*
584 * Don't even think about trying any of these in real life!!!
585 * The names includes "busted", and they really means it!
586 * The only purpose of these functions is to provide a buggy RCU
587 * implementation to make sure that rcutorture correctly emits
588 * buggy-RCU error messages.
589 */
rcu_busted_torture_deferred_free(struct rcu_torture * p)590 static void rcu_busted_torture_deferred_free(struct rcu_torture *p)
591 {
592 /* This is a deliberate bug for testing purposes only! */
593 rcu_torture_cb(&p->rtort_rcu);
594 }
595
synchronize_rcu_busted(void)596 static void synchronize_rcu_busted(void)
597 {
598 /* This is a deliberate bug for testing purposes only! */
599 }
600
601 static void
call_rcu_busted(struct rcu_head * head,rcu_callback_t func)602 call_rcu_busted(struct rcu_head *head, rcu_callback_t func)
603 {
604 /* This is a deliberate bug for testing purposes only! */
605 func(head);
606 }
607
608 static struct rcu_torture_ops rcu_busted_ops = {
609 .ttype = INVALID_RCU_FLAVOR,
610 .init = rcu_sync_torture_init,
611 .readlock = rcu_torture_read_lock,
612 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
613 .readunlock = rcu_torture_read_unlock,
614 .readlock_held = torture_readlock_not_held,
615 .get_gp_seq = rcu_no_completed,
616 .deferred_free = rcu_busted_torture_deferred_free,
617 .sync = synchronize_rcu_busted,
618 .exp_sync = synchronize_rcu_busted,
619 .call = call_rcu_busted,
620 .irq_capable = 1,
621 .name = "busted"
622 };
623
624 /*
625 * Definitions for srcu torture testing.
626 */
627
628 DEFINE_STATIC_SRCU(srcu_ctl);
629 static struct srcu_struct srcu_ctld;
630 static struct srcu_struct *srcu_ctlp = &srcu_ctl;
631 static struct rcu_torture_ops srcud_ops;
632
srcu_get_gp_data(int * flags,unsigned long * gp_seq)633 static void srcu_get_gp_data(int *flags, unsigned long *gp_seq)
634 {
635 srcutorture_get_gp_data(srcu_ctlp, flags, gp_seq);
636 }
637
srcu_torture_read_lock(void)638 static int srcu_torture_read_lock(void)
639 {
640 if (cur_ops == &srcud_ops)
641 return srcu_read_lock_nmisafe(srcu_ctlp);
642 else
643 return srcu_read_lock(srcu_ctlp);
644 }
645
646 static void
srcu_read_delay(struct torture_random_state * rrsp,struct rt_read_seg * rtrsp)647 srcu_read_delay(struct torture_random_state *rrsp, struct rt_read_seg *rtrsp)
648 {
649 long delay;
650 const long uspertick = 1000000 / HZ;
651 const long longdelay = 10;
652
653 /* We want there to be long-running readers, but not all the time. */
654
655 delay = torture_random(rrsp) %
656 (nrealreaders * 2 * longdelay * uspertick);
657 if (!delay && in_task()) {
658 schedule_timeout_interruptible(longdelay);
659 rtrsp->rt_delay_jiffies = longdelay;
660 } else {
661 rcu_read_delay(rrsp, rtrsp);
662 }
663 }
664
srcu_torture_read_unlock(int idx)665 static void srcu_torture_read_unlock(int idx)
666 {
667 if (cur_ops == &srcud_ops)
668 srcu_read_unlock_nmisafe(srcu_ctlp, idx);
669 else
670 srcu_read_unlock(srcu_ctlp, idx);
671 }
672
torture_srcu_read_lock_held(void)673 static int torture_srcu_read_lock_held(void)
674 {
675 return srcu_read_lock_held(srcu_ctlp);
676 }
677
srcu_torture_completed(void)678 static unsigned long srcu_torture_completed(void)
679 {
680 return srcu_batches_completed(srcu_ctlp);
681 }
682
srcu_torture_deferred_free(struct rcu_torture * rp)683 static void srcu_torture_deferred_free(struct rcu_torture *rp)
684 {
685 call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
686 }
687
srcu_torture_synchronize(void)688 static void srcu_torture_synchronize(void)
689 {
690 synchronize_srcu(srcu_ctlp);
691 }
692
srcu_torture_get_gp_state(void)693 static unsigned long srcu_torture_get_gp_state(void)
694 {
695 return get_state_synchronize_srcu(srcu_ctlp);
696 }
697
srcu_torture_start_gp_poll(void)698 static unsigned long srcu_torture_start_gp_poll(void)
699 {
700 return start_poll_synchronize_srcu(srcu_ctlp);
701 }
702
srcu_torture_poll_gp_state(unsigned long oldstate)703 static bool srcu_torture_poll_gp_state(unsigned long oldstate)
704 {
705 return poll_state_synchronize_srcu(srcu_ctlp, oldstate);
706 }
707
srcu_torture_call(struct rcu_head * head,rcu_callback_t func)708 static void srcu_torture_call(struct rcu_head *head,
709 rcu_callback_t func)
710 {
711 call_srcu(srcu_ctlp, head, func);
712 }
713
srcu_torture_barrier(void)714 static void srcu_torture_barrier(void)
715 {
716 srcu_barrier(srcu_ctlp);
717 }
718
srcu_torture_stats(void)719 static void srcu_torture_stats(void)
720 {
721 srcu_torture_stats_print(srcu_ctlp, torture_type, TORTURE_FLAG);
722 }
723
srcu_torture_synchronize_expedited(void)724 static void srcu_torture_synchronize_expedited(void)
725 {
726 synchronize_srcu_expedited(srcu_ctlp);
727 }
728
729 static struct rcu_torture_ops srcu_ops = {
730 .ttype = SRCU_FLAVOR,
731 .init = rcu_sync_torture_init,
732 .readlock = srcu_torture_read_lock,
733 .read_delay = srcu_read_delay,
734 .readunlock = srcu_torture_read_unlock,
735 .readlock_held = torture_srcu_read_lock_held,
736 .get_gp_seq = srcu_torture_completed,
737 .deferred_free = srcu_torture_deferred_free,
738 .sync = srcu_torture_synchronize,
739 .exp_sync = srcu_torture_synchronize_expedited,
740 .get_gp_state = srcu_torture_get_gp_state,
741 .start_gp_poll = srcu_torture_start_gp_poll,
742 .poll_gp_state = srcu_torture_poll_gp_state,
743 .call = srcu_torture_call,
744 .cb_barrier = srcu_torture_barrier,
745 .stats = srcu_torture_stats,
746 .get_gp_data = srcu_get_gp_data,
747 .cbflood_max = 50000,
748 .irq_capable = 1,
749 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
750 .name = "srcu"
751 };
752
srcu_torture_init(void)753 static void srcu_torture_init(void)
754 {
755 rcu_sync_torture_init();
756 WARN_ON(init_srcu_struct(&srcu_ctld));
757 srcu_ctlp = &srcu_ctld;
758 }
759
srcu_torture_cleanup(void)760 static void srcu_torture_cleanup(void)
761 {
762 cleanup_srcu_struct(&srcu_ctld);
763 srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
764 }
765
766 /* As above, but dynamically allocated. */
767 static struct rcu_torture_ops srcud_ops = {
768 .ttype = SRCU_FLAVOR,
769 .init = srcu_torture_init,
770 .cleanup = srcu_torture_cleanup,
771 .readlock = srcu_torture_read_lock,
772 .read_delay = srcu_read_delay,
773 .readunlock = srcu_torture_read_unlock,
774 .readlock_held = torture_srcu_read_lock_held,
775 .get_gp_seq = srcu_torture_completed,
776 .deferred_free = srcu_torture_deferred_free,
777 .sync = srcu_torture_synchronize,
778 .exp_sync = srcu_torture_synchronize_expedited,
779 .get_gp_state = srcu_torture_get_gp_state,
780 .start_gp_poll = srcu_torture_start_gp_poll,
781 .poll_gp_state = srcu_torture_poll_gp_state,
782 .call = srcu_torture_call,
783 .cb_barrier = srcu_torture_barrier,
784 .stats = srcu_torture_stats,
785 .get_gp_data = srcu_get_gp_data,
786 .cbflood_max = 50000,
787 .irq_capable = 1,
788 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
789 .name = "srcud"
790 };
791
792 /* As above, but broken due to inappropriate reader extension. */
793 static struct rcu_torture_ops busted_srcud_ops = {
794 .ttype = SRCU_FLAVOR,
795 .init = srcu_torture_init,
796 .cleanup = srcu_torture_cleanup,
797 .readlock = srcu_torture_read_lock,
798 .read_delay = rcu_read_delay,
799 .readunlock = srcu_torture_read_unlock,
800 .readlock_held = torture_srcu_read_lock_held,
801 .get_gp_seq = srcu_torture_completed,
802 .deferred_free = srcu_torture_deferred_free,
803 .sync = srcu_torture_synchronize,
804 .exp_sync = srcu_torture_synchronize_expedited,
805 .call = srcu_torture_call,
806 .cb_barrier = srcu_torture_barrier,
807 .stats = srcu_torture_stats,
808 .irq_capable = 1,
809 .no_pi_lock = IS_ENABLED(CONFIG_TINY_SRCU),
810 .extendables = RCUTORTURE_MAX_EXTEND,
811 .name = "busted_srcud"
812 };
813
814 /*
815 * Definitions for trivial CONFIG_PREEMPT=n-only torture testing.
816 * This implementation does not necessarily work well with CPU hotplug.
817 */
818
synchronize_rcu_trivial(void)819 static void synchronize_rcu_trivial(void)
820 {
821 int cpu;
822
823 for_each_online_cpu(cpu) {
824 torture_sched_setaffinity(current->pid, cpumask_of(cpu));
825 WARN_ON_ONCE(raw_smp_processor_id() != cpu);
826 }
827 }
828
rcu_torture_read_lock_trivial(void)829 static int rcu_torture_read_lock_trivial(void)
830 {
831 preempt_disable();
832 return 0;
833 }
834
rcu_torture_read_unlock_trivial(int idx)835 static void rcu_torture_read_unlock_trivial(int idx)
836 {
837 preempt_enable();
838 }
839
840 static struct rcu_torture_ops trivial_ops = {
841 .ttype = RCU_TRIVIAL_FLAVOR,
842 .init = rcu_sync_torture_init,
843 .readlock = rcu_torture_read_lock_trivial,
844 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
845 .readunlock = rcu_torture_read_unlock_trivial,
846 .readlock_held = torture_readlock_not_held,
847 .get_gp_seq = rcu_no_completed,
848 .sync = synchronize_rcu_trivial,
849 .exp_sync = synchronize_rcu_trivial,
850 .irq_capable = 1,
851 .name = "trivial"
852 };
853
854 #ifdef CONFIG_TASKS_RCU
855
856 /*
857 * Definitions for RCU-tasks torture testing.
858 */
859
tasks_torture_read_lock(void)860 static int tasks_torture_read_lock(void)
861 {
862 return 0;
863 }
864
tasks_torture_read_unlock(int idx)865 static void tasks_torture_read_unlock(int idx)
866 {
867 }
868
rcu_tasks_torture_deferred_free(struct rcu_torture * p)869 static void rcu_tasks_torture_deferred_free(struct rcu_torture *p)
870 {
871 call_rcu_tasks(&p->rtort_rcu, rcu_torture_cb);
872 }
873
synchronize_rcu_mult_test(void)874 static void synchronize_rcu_mult_test(void)
875 {
876 synchronize_rcu_mult(call_rcu_tasks, call_rcu_hurry);
877 }
878
879 static struct rcu_torture_ops tasks_ops = {
880 .ttype = RCU_TASKS_FLAVOR,
881 .init = rcu_sync_torture_init,
882 .readlock = tasks_torture_read_lock,
883 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
884 .readunlock = tasks_torture_read_unlock,
885 .get_gp_seq = rcu_no_completed,
886 .deferred_free = rcu_tasks_torture_deferred_free,
887 .sync = synchronize_rcu_tasks,
888 .exp_sync = synchronize_rcu_mult_test,
889 .call = call_rcu_tasks,
890 .cb_barrier = rcu_barrier_tasks,
891 .gp_kthread_dbg = show_rcu_tasks_classic_gp_kthread,
892 .get_gp_data = rcu_tasks_get_gp_data,
893 .irq_capable = 1,
894 .slow_gps = 1,
895 .name = "tasks"
896 };
897
898 #define TASKS_OPS &tasks_ops,
899
900 #else // #ifdef CONFIG_TASKS_RCU
901
902 #define TASKS_OPS
903
904 #endif // #else #ifdef CONFIG_TASKS_RCU
905
906
907 #ifdef CONFIG_TASKS_RUDE_RCU
908
909 /*
910 * Definitions for rude RCU-tasks torture testing.
911 */
912
rcu_tasks_rude_torture_deferred_free(struct rcu_torture * p)913 static void rcu_tasks_rude_torture_deferred_free(struct rcu_torture *p)
914 {
915 call_rcu_tasks_rude(&p->rtort_rcu, rcu_torture_cb);
916 }
917
918 static struct rcu_torture_ops tasks_rude_ops = {
919 .ttype = RCU_TASKS_RUDE_FLAVOR,
920 .init = rcu_sync_torture_init,
921 .readlock = rcu_torture_read_lock_trivial,
922 .read_delay = rcu_read_delay, /* just reuse rcu's version. */
923 .readunlock = rcu_torture_read_unlock_trivial,
924 .get_gp_seq = rcu_no_completed,
925 .deferred_free = rcu_tasks_rude_torture_deferred_free,
926 .sync = synchronize_rcu_tasks_rude,
927 .exp_sync = synchronize_rcu_tasks_rude,
928 .call = call_rcu_tasks_rude,
929 .cb_barrier = rcu_barrier_tasks_rude,
930 .gp_kthread_dbg = show_rcu_tasks_rude_gp_kthread,
931 .get_gp_data = rcu_tasks_rude_get_gp_data,
932 .cbflood_max = 50000,
933 .irq_capable = 1,
934 .name = "tasks-rude"
935 };
936
937 #define TASKS_RUDE_OPS &tasks_rude_ops,
938
939 #else // #ifdef CONFIG_TASKS_RUDE_RCU
940
941 #define TASKS_RUDE_OPS
942
943 #endif // #else #ifdef CONFIG_TASKS_RUDE_RCU
944
945
946 #ifdef CONFIG_TASKS_TRACE_RCU
947
948 /*
949 * Definitions for tracing RCU-tasks torture testing.
950 */
951
tasks_tracing_torture_read_lock(void)952 static int tasks_tracing_torture_read_lock(void)
953 {
954 rcu_read_lock_trace();
955 return 0;
956 }
957
tasks_tracing_torture_read_unlock(int idx)958 static void tasks_tracing_torture_read_unlock(int idx)
959 {
960 rcu_read_unlock_trace();
961 }
962
rcu_tasks_tracing_torture_deferred_free(struct rcu_torture * p)963 static void rcu_tasks_tracing_torture_deferred_free(struct rcu_torture *p)
964 {
965 call_rcu_tasks_trace(&p->rtort_rcu, rcu_torture_cb);
966 }
967
968 static struct rcu_torture_ops tasks_tracing_ops = {
969 .ttype = RCU_TASKS_TRACING_FLAVOR,
970 .init = rcu_sync_torture_init,
971 .readlock = tasks_tracing_torture_read_lock,
972 .read_delay = srcu_read_delay, /* just reuse srcu's version. */
973 .readunlock = tasks_tracing_torture_read_unlock,
974 .readlock_held = rcu_read_lock_trace_held,
975 .get_gp_seq = rcu_no_completed,
976 .deferred_free = rcu_tasks_tracing_torture_deferred_free,
977 .sync = synchronize_rcu_tasks_trace,
978 .exp_sync = synchronize_rcu_tasks_trace,
979 .call = call_rcu_tasks_trace,
980 .cb_barrier = rcu_barrier_tasks_trace,
981 .gp_kthread_dbg = show_rcu_tasks_trace_gp_kthread,
982 .get_gp_data = rcu_tasks_trace_get_gp_data,
983 .cbflood_max = 50000,
984 .irq_capable = 1,
985 .slow_gps = 1,
986 .name = "tasks-tracing"
987 };
988
989 #define TASKS_TRACING_OPS &tasks_tracing_ops,
990
991 #else // #ifdef CONFIG_TASKS_TRACE_RCU
992
993 #define TASKS_TRACING_OPS
994
995 #endif // #else #ifdef CONFIG_TASKS_TRACE_RCU
996
997
rcutorture_seq_diff(unsigned long new,unsigned long old)998 static unsigned long rcutorture_seq_diff(unsigned long new, unsigned long old)
999 {
1000 if (!cur_ops->gp_diff)
1001 return new - old;
1002 return cur_ops->gp_diff(new, old);
1003 }
1004
1005 /*
1006 * RCU torture priority-boost testing. Runs one real-time thread per
1007 * CPU for moderate bursts, repeatedly starting grace periods and waiting
1008 * for them to complete. If a given grace period takes too long, we assume
1009 * that priority inversion has occurred.
1010 */
1011
1012 static int old_rt_runtime = -1;
1013
rcu_torture_disable_rt_throttle(void)1014 static void rcu_torture_disable_rt_throttle(void)
1015 {
1016 /*
1017 * Disable RT throttling so that rcutorture's boost threads don't get
1018 * throttled. Only possible if rcutorture is built-in otherwise the
1019 * user should manually do this by setting the sched_rt_period_us and
1020 * sched_rt_runtime sysctls.
1021 */
1022 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime != -1)
1023 return;
1024
1025 old_rt_runtime = sysctl_sched_rt_runtime;
1026 sysctl_sched_rt_runtime = -1;
1027 }
1028
rcu_torture_enable_rt_throttle(void)1029 static void rcu_torture_enable_rt_throttle(void)
1030 {
1031 if (!IS_BUILTIN(CONFIG_RCU_TORTURE_TEST) || old_rt_runtime == -1)
1032 return;
1033
1034 sysctl_sched_rt_runtime = old_rt_runtime;
1035 old_rt_runtime = -1;
1036 }
1037
rcu_torture_boost_failed(unsigned long gp_state,unsigned long * start)1038 static bool rcu_torture_boost_failed(unsigned long gp_state, unsigned long *start)
1039 {
1040 int cpu;
1041 static int dbg_done;
1042 unsigned long end = jiffies;
1043 bool gp_done;
1044 unsigned long j;
1045 static unsigned long last_persist;
1046 unsigned long lp;
1047 unsigned long mininterval = test_boost_duration * HZ - HZ / 2;
1048
1049 if (end - *start > mininterval) {
1050 // Recheck after checking time to avoid false positives.
1051 smp_mb(); // Time check before grace-period check.
1052 if (cur_ops->poll_gp_state(gp_state))
1053 return false; // passed, though perhaps just barely
1054 if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, &cpu)) {
1055 // At most one persisted message per boost test.
1056 j = jiffies;
1057 lp = READ_ONCE(last_persist);
1058 if (time_after(j, lp + mininterval) && cmpxchg(&last_persist, lp, j) == lp)
1059 pr_info("Boost inversion persisted: No QS from CPU %d\n", cpu);
1060 return false; // passed on a technicality
1061 }
1062 VERBOSE_TOROUT_STRING("rcu_torture_boost boosting failed");
1063 n_rcu_torture_boost_failure++;
1064 if (!xchg(&dbg_done, 1) && cur_ops->gp_kthread_dbg) {
1065 pr_info("Boost inversion thread ->rt_priority %u gp_state %lu jiffies %lu\n",
1066 current->rt_priority, gp_state, end - *start);
1067 cur_ops->gp_kthread_dbg();
1068 // Recheck after print to flag grace period ending during splat.
1069 gp_done = cur_ops->poll_gp_state(gp_state);
1070 pr_info("Boost inversion: GP %lu %s.\n", gp_state,
1071 gp_done ? "ended already" : "still pending");
1072
1073 }
1074
1075 return true; // failed
1076 } else if (cur_ops->check_boost_failed && !cur_ops->check_boost_failed(gp_state, NULL)) {
1077 *start = jiffies;
1078 }
1079
1080 return false; // passed
1081 }
1082
rcu_torture_boost(void * arg)1083 static int rcu_torture_boost(void *arg)
1084 {
1085 unsigned long endtime;
1086 unsigned long gp_state;
1087 unsigned long gp_state_time;
1088 unsigned long oldstarttime;
1089
1090 VERBOSE_TOROUT_STRING("rcu_torture_boost started");
1091
1092 /* Set real-time priority. */
1093 sched_set_fifo_low(current);
1094
1095 /* Each pass through the following loop does one boost-test cycle. */
1096 do {
1097 bool failed = false; // Test failed already in this test interval
1098 bool gp_initiated = false;
1099
1100 if (kthread_should_stop())
1101 goto checkwait;
1102
1103 /* Wait for the next test interval. */
1104 oldstarttime = READ_ONCE(boost_starttime);
1105 while (time_before(jiffies, oldstarttime)) {
1106 schedule_timeout_interruptible(oldstarttime - jiffies);
1107 if (stutter_wait("rcu_torture_boost"))
1108 sched_set_fifo_low(current);
1109 if (torture_must_stop())
1110 goto checkwait;
1111 }
1112
1113 // Do one boost-test interval.
1114 endtime = oldstarttime + test_boost_duration * HZ;
1115 while (time_before(jiffies, endtime)) {
1116 // Has current GP gone too long?
1117 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
1118 failed = rcu_torture_boost_failed(gp_state, &gp_state_time);
1119 // If we don't have a grace period in flight, start one.
1120 if (!gp_initiated || cur_ops->poll_gp_state(gp_state)) {
1121 gp_state = cur_ops->start_gp_poll();
1122 gp_initiated = true;
1123 gp_state_time = jiffies;
1124 }
1125 if (stutter_wait("rcu_torture_boost")) {
1126 sched_set_fifo_low(current);
1127 // If the grace period already ended,
1128 // we don't know when that happened, so
1129 // start over.
1130 if (cur_ops->poll_gp_state(gp_state))
1131 gp_initiated = false;
1132 }
1133 if (torture_must_stop())
1134 goto checkwait;
1135 }
1136
1137 // In case the grace period extended beyond the end of the loop.
1138 if (gp_initiated && !failed && !cur_ops->poll_gp_state(gp_state))
1139 rcu_torture_boost_failed(gp_state, &gp_state_time);
1140
1141 /*
1142 * Set the start time of the next test interval.
1143 * Yes, this is vulnerable to long delays, but such
1144 * delays simply cause a false negative for the next
1145 * interval. Besides, we are running at RT priority,
1146 * so delays should be relatively rare.
1147 */
1148 while (oldstarttime == READ_ONCE(boost_starttime) && !kthread_should_stop()) {
1149 if (mutex_trylock(&boost_mutex)) {
1150 if (oldstarttime == boost_starttime) {
1151 WRITE_ONCE(boost_starttime,
1152 jiffies + test_boost_interval * HZ);
1153 n_rcu_torture_boosts++;
1154 }
1155 mutex_unlock(&boost_mutex);
1156 break;
1157 }
1158 schedule_timeout_uninterruptible(HZ / 20);
1159 }
1160
1161 /* Go do the stutter. */
1162 checkwait: if (stutter_wait("rcu_torture_boost"))
1163 sched_set_fifo_low(current);
1164 } while (!torture_must_stop());
1165
1166 /* Clean up and exit. */
1167 while (!kthread_should_stop()) {
1168 torture_shutdown_absorb("rcu_torture_boost");
1169 schedule_timeout_uninterruptible(HZ / 20);
1170 }
1171 torture_kthread_stopping("rcu_torture_boost");
1172 return 0;
1173 }
1174
1175 /*
1176 * RCU torture force-quiescent-state kthread. Repeatedly induces
1177 * bursts of calls to force_quiescent_state(), increasing the probability
1178 * of occurrence of some important types of race conditions.
1179 */
1180 static int
rcu_torture_fqs(void * arg)1181 rcu_torture_fqs(void *arg)
1182 {
1183 unsigned long fqs_resume_time;
1184 int fqs_burst_remaining;
1185 int oldnice = task_nice(current);
1186
1187 VERBOSE_TOROUT_STRING("rcu_torture_fqs task started");
1188 do {
1189 fqs_resume_time = jiffies + fqs_stutter * HZ;
1190 while (time_before(jiffies, fqs_resume_time) &&
1191 !kthread_should_stop()) {
1192 schedule_timeout_interruptible(HZ / 20);
1193 }
1194 fqs_burst_remaining = fqs_duration;
1195 while (fqs_burst_remaining > 0 &&
1196 !kthread_should_stop()) {
1197 cur_ops->fqs();
1198 udelay(fqs_holdoff);
1199 fqs_burst_remaining -= fqs_holdoff;
1200 }
1201 if (stutter_wait("rcu_torture_fqs"))
1202 sched_set_normal(current, oldnice);
1203 } while (!torture_must_stop());
1204 torture_kthread_stopping("rcu_torture_fqs");
1205 return 0;
1206 }
1207
1208 // Used by writers to randomly choose from the available grace-period primitives.
1209 static int synctype[ARRAY_SIZE(rcu_torture_writer_state_names)] = { };
1210 static int nsynctypes;
1211
1212 /*
1213 * Determine which grace-period primitives are available.
1214 */
rcu_torture_write_types(void)1215 static void rcu_torture_write_types(void)
1216 {
1217 bool gp_cond1 = gp_cond, gp_cond_exp1 = gp_cond_exp, gp_cond_full1 = gp_cond_full;
1218 bool gp_cond_exp_full1 = gp_cond_exp_full, gp_exp1 = gp_exp, gp_poll_exp1 = gp_poll_exp;
1219 bool gp_poll_exp_full1 = gp_poll_exp_full, gp_normal1 = gp_normal, gp_poll1 = gp_poll;
1220 bool gp_poll_full1 = gp_poll_full, gp_sync1 = gp_sync;
1221
1222 /* Initialize synctype[] array. If none set, take default. */
1223 if (!gp_cond1 &&
1224 !gp_cond_exp1 &&
1225 !gp_cond_full1 &&
1226 !gp_cond_exp_full1 &&
1227 !gp_exp1 &&
1228 !gp_poll_exp1 &&
1229 !gp_poll_exp_full1 &&
1230 !gp_normal1 &&
1231 !gp_poll1 &&
1232 !gp_poll_full1 &&
1233 !gp_sync1) {
1234 gp_cond1 = true;
1235 gp_cond_exp1 = true;
1236 gp_cond_full1 = true;
1237 gp_cond_exp_full1 = true;
1238 gp_exp1 = true;
1239 gp_poll_exp1 = true;
1240 gp_poll_exp_full1 = true;
1241 gp_normal1 = true;
1242 gp_poll1 = true;
1243 gp_poll_full1 = true;
1244 gp_sync1 = true;
1245 }
1246 if (gp_cond1 && cur_ops->get_gp_state && cur_ops->cond_sync) {
1247 synctype[nsynctypes++] = RTWS_COND_GET;
1248 pr_info("%s: Testing conditional GPs.\n", __func__);
1249 } else if (gp_cond && (!cur_ops->get_gp_state || !cur_ops->cond_sync)) {
1250 pr_alert("%s: gp_cond without primitives.\n", __func__);
1251 }
1252 if (gp_cond_exp1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp) {
1253 synctype[nsynctypes++] = RTWS_COND_GET_EXP;
1254 pr_info("%s: Testing conditional expedited GPs.\n", __func__);
1255 } else if (gp_cond_exp && (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp)) {
1256 pr_alert("%s: gp_cond_exp without primitives.\n", __func__);
1257 }
1258 if (gp_cond_full1 && cur_ops->get_gp_state && cur_ops->cond_sync_full) {
1259 synctype[nsynctypes++] = RTWS_COND_GET_FULL;
1260 pr_info("%s: Testing conditional full-state GPs.\n", __func__);
1261 } else if (gp_cond_full && (!cur_ops->get_gp_state || !cur_ops->cond_sync_full)) {
1262 pr_alert("%s: gp_cond_full without primitives.\n", __func__);
1263 }
1264 if (gp_cond_exp_full1 && cur_ops->get_gp_state_exp && cur_ops->cond_sync_exp_full) {
1265 synctype[nsynctypes++] = RTWS_COND_GET_EXP_FULL;
1266 pr_info("%s: Testing conditional full-state expedited GPs.\n", __func__);
1267 } else if (gp_cond_exp_full &&
1268 (!cur_ops->get_gp_state_exp || !cur_ops->cond_sync_exp_full)) {
1269 pr_alert("%s: gp_cond_exp_full without primitives.\n", __func__);
1270 }
1271 if (gp_exp1 && cur_ops->exp_sync) {
1272 synctype[nsynctypes++] = RTWS_EXP_SYNC;
1273 pr_info("%s: Testing expedited GPs.\n", __func__);
1274 } else if (gp_exp && !cur_ops->exp_sync) {
1275 pr_alert("%s: gp_exp without primitives.\n", __func__);
1276 }
1277 if (gp_normal1 && cur_ops->deferred_free) {
1278 synctype[nsynctypes++] = RTWS_DEF_FREE;
1279 pr_info("%s: Testing asynchronous GPs.\n", __func__);
1280 } else if (gp_normal && !cur_ops->deferred_free) {
1281 pr_alert("%s: gp_normal without primitives.\n", __func__);
1282 }
1283 if (gp_poll1 && cur_ops->get_comp_state && cur_ops->same_gp_state &&
1284 cur_ops->start_gp_poll && cur_ops->poll_gp_state) {
1285 synctype[nsynctypes++] = RTWS_POLL_GET;
1286 pr_info("%s: Testing polling GPs.\n", __func__);
1287 } else if (gp_poll && (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)) {
1288 pr_alert("%s: gp_poll without primitives.\n", __func__);
1289 }
1290 if (gp_poll_full1 && cur_ops->get_comp_state_full && cur_ops->same_gp_state_full
1291 && cur_ops->start_gp_poll_full && cur_ops->poll_gp_state_full) {
1292 synctype[nsynctypes++] = RTWS_POLL_GET_FULL;
1293 pr_info("%s: Testing polling full-state GPs.\n", __func__);
1294 } else if (gp_poll_full && (!cur_ops->start_gp_poll_full || !cur_ops->poll_gp_state_full)) {
1295 pr_alert("%s: gp_poll_full without primitives.\n", __func__);
1296 }
1297 if (gp_poll_exp1 && cur_ops->start_gp_poll_exp && cur_ops->poll_gp_state_exp) {
1298 synctype[nsynctypes++] = RTWS_POLL_GET_EXP;
1299 pr_info("%s: Testing polling expedited GPs.\n", __func__);
1300 } else if (gp_poll_exp && (!cur_ops->start_gp_poll_exp || !cur_ops->poll_gp_state_exp)) {
1301 pr_alert("%s: gp_poll_exp without primitives.\n", __func__);
1302 }
1303 if (gp_poll_exp_full1 && cur_ops->start_gp_poll_exp_full && cur_ops->poll_gp_state_full) {
1304 synctype[nsynctypes++] = RTWS_POLL_GET_EXP_FULL;
1305 pr_info("%s: Testing polling full-state expedited GPs.\n", __func__);
1306 } else if (gp_poll_exp_full &&
1307 (!cur_ops->start_gp_poll_exp_full || !cur_ops->poll_gp_state_full)) {
1308 pr_alert("%s: gp_poll_exp_full without primitives.\n", __func__);
1309 }
1310 if (gp_sync1 && cur_ops->sync) {
1311 synctype[nsynctypes++] = RTWS_SYNC;
1312 pr_info("%s: Testing normal GPs.\n", __func__);
1313 } else if (gp_sync && !cur_ops->sync) {
1314 pr_alert("%s: gp_sync without primitives.\n", __func__);
1315 }
1316 }
1317
1318 /*
1319 * Do the specified rcu_torture_writer() synchronous grace period,
1320 * while also testing out the polled APIs. Note well that the single-CPU
1321 * grace-period optimizations must be accounted for.
1322 */
do_rtws_sync(struct torture_random_state * trsp,void (* sync)(void))1323 static void do_rtws_sync(struct torture_random_state *trsp, void (*sync)(void))
1324 {
1325 unsigned long cookie;
1326 struct rcu_gp_oldstate cookie_full;
1327 bool dopoll;
1328 bool dopoll_full;
1329 unsigned long r = torture_random(trsp);
1330
1331 dopoll = cur_ops->get_gp_state && cur_ops->poll_gp_state && !(r & 0x300);
1332 dopoll_full = cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full && !(r & 0xc00);
1333 if (dopoll || dopoll_full)
1334 cpus_read_lock();
1335 if (dopoll)
1336 cookie = cur_ops->get_gp_state();
1337 if (dopoll_full)
1338 cur_ops->get_gp_state_full(&cookie_full);
1339 if (cur_ops->poll_need_2gp && cur_ops->poll_need_2gp(dopoll, dopoll_full))
1340 sync();
1341 sync();
1342 WARN_ONCE(dopoll && !cur_ops->poll_gp_state(cookie),
1343 "%s: Cookie check 3 failed %pS() online %*pbl.",
1344 __func__, sync, cpumask_pr_args(cpu_online_mask));
1345 WARN_ONCE(dopoll_full && !cur_ops->poll_gp_state_full(&cookie_full),
1346 "%s: Cookie check 4 failed %pS() online %*pbl",
1347 __func__, sync, cpumask_pr_args(cpu_online_mask));
1348 if (dopoll || dopoll_full)
1349 cpus_read_unlock();
1350 }
1351
1352 /*
1353 * RCU torture writer kthread. Repeatedly substitutes a new structure
1354 * for that pointed to by rcu_torture_current, freeing the old structure
1355 * after a series of grace periods (the "pipeline").
1356 */
1357 static int
rcu_torture_writer(void * arg)1358 rcu_torture_writer(void *arg)
1359 {
1360 bool boot_ended;
1361 bool can_expedite = !rcu_gp_is_expedited() && !rcu_gp_is_normal();
1362 unsigned long cookie;
1363 struct rcu_gp_oldstate cookie_full;
1364 int expediting = 0;
1365 unsigned long gp_snap;
1366 unsigned long gp_snap1;
1367 struct rcu_gp_oldstate gp_snap_full;
1368 struct rcu_gp_oldstate gp_snap1_full;
1369 int i;
1370 int idx;
1371 int oldnice = task_nice(current);
1372 struct rcu_gp_oldstate rgo[NUM_ACTIVE_RCU_POLL_FULL_OLDSTATE];
1373 struct rcu_torture *rp;
1374 struct rcu_torture *old_rp;
1375 static DEFINE_TORTURE_RANDOM(rand);
1376 unsigned long stallsdone = jiffies;
1377 bool stutter_waited;
1378 unsigned long ulo[NUM_ACTIVE_RCU_POLL_OLDSTATE];
1379
1380 // If a new stall test is added, this must be adjusted.
1381 if (stall_cpu_holdoff + stall_gp_kthread + stall_cpu)
1382 stallsdone += (stall_cpu_holdoff + stall_gp_kthread + stall_cpu + 60) * HZ;
1383 VERBOSE_TOROUT_STRING("rcu_torture_writer task started");
1384 if (!can_expedite)
1385 pr_alert("%s" TORTURE_FLAG
1386 " GP expediting controlled from boot/sysfs for %s.\n",
1387 torture_type, cur_ops->name);
1388 if (WARN_ONCE(nsynctypes == 0,
1389 "%s: No update-side primitives.\n", __func__)) {
1390 /*
1391 * No updates primitives, so don't try updating.
1392 * The resulting test won't be testing much, hence the
1393 * above WARN_ONCE().
1394 */
1395 rcu_torture_writer_state = RTWS_STOPPING;
1396 torture_kthread_stopping("rcu_torture_writer");
1397 return 0;
1398 }
1399
1400 do {
1401 rcu_torture_writer_state = RTWS_FIXED_DELAY;
1402 torture_hrtimeout_us(500, 1000, &rand);
1403 rp = rcu_torture_alloc();
1404 if (rp == NULL)
1405 continue;
1406 rp->rtort_pipe_count = 0;
1407 ASSERT_EXCLUSIVE_WRITER(rp->rtort_pipe_count);
1408 rcu_torture_writer_state = RTWS_DELAY;
1409 udelay(torture_random(&rand) & 0x3ff);
1410 rcu_torture_writer_state = RTWS_REPLACE;
1411 old_rp = rcu_dereference_check(rcu_torture_current,
1412 current == writer_task);
1413 rp->rtort_mbtest = 1;
1414 rcu_assign_pointer(rcu_torture_current, rp);
1415 smp_wmb(); /* Mods to old_rp must follow rcu_assign_pointer() */
1416 if (old_rp) {
1417 i = old_rp->rtort_pipe_count;
1418 if (i > RCU_TORTURE_PIPE_LEN)
1419 i = RCU_TORTURE_PIPE_LEN;
1420 atomic_inc(&rcu_torture_wcount[i]);
1421 WRITE_ONCE(old_rp->rtort_pipe_count,
1422 old_rp->rtort_pipe_count + 1);
1423 ASSERT_EXCLUSIVE_WRITER(old_rp->rtort_pipe_count);
1424
1425 // Make sure readers block polled grace periods.
1426 if (cur_ops->get_gp_state && cur_ops->poll_gp_state) {
1427 idx = cur_ops->readlock();
1428 cookie = cur_ops->get_gp_state();
1429 WARN_ONCE(cur_ops->poll_gp_state(cookie),
1430 "%s: Cookie check 1 failed %s(%d) %lu->%lu\n",
1431 __func__,
1432 rcu_torture_writer_state_getname(),
1433 rcu_torture_writer_state,
1434 cookie, cur_ops->get_gp_state());
1435 if (cur_ops->get_gp_completed) {
1436 cookie = cur_ops->get_gp_completed();
1437 WARN_ON_ONCE(!cur_ops->poll_gp_state(cookie));
1438 }
1439 cur_ops->readunlock(idx);
1440 }
1441 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full) {
1442 idx = cur_ops->readlock();
1443 cur_ops->get_gp_state_full(&cookie_full);
1444 WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full),
1445 "%s: Cookie check 5 failed %s(%d) online %*pbl\n",
1446 __func__,
1447 rcu_torture_writer_state_getname(),
1448 rcu_torture_writer_state,
1449 cpumask_pr_args(cpu_online_mask));
1450 if (cur_ops->get_gp_completed_full) {
1451 cur_ops->get_gp_completed_full(&cookie_full);
1452 WARN_ON_ONCE(!cur_ops->poll_gp_state_full(&cookie_full));
1453 }
1454 cur_ops->readunlock(idx);
1455 }
1456 switch (synctype[torture_random(&rand) % nsynctypes]) {
1457 case RTWS_DEF_FREE:
1458 rcu_torture_writer_state = RTWS_DEF_FREE;
1459 cur_ops->deferred_free(old_rp);
1460 break;
1461 case RTWS_EXP_SYNC:
1462 rcu_torture_writer_state = RTWS_EXP_SYNC;
1463 do_rtws_sync(&rand, cur_ops->exp_sync);
1464 rcu_torture_pipe_update(old_rp);
1465 break;
1466 case RTWS_COND_GET:
1467 rcu_torture_writer_state = RTWS_COND_GET;
1468 gp_snap = cur_ops->get_gp_state();
1469 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1470 rcu_torture_writer_state = RTWS_COND_SYNC;
1471 cur_ops->cond_sync(gp_snap);
1472 rcu_torture_pipe_update(old_rp);
1473 break;
1474 case RTWS_COND_GET_EXP:
1475 rcu_torture_writer_state = RTWS_COND_GET_EXP;
1476 gp_snap = cur_ops->get_gp_state_exp();
1477 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1478 rcu_torture_writer_state = RTWS_COND_SYNC_EXP;
1479 cur_ops->cond_sync_exp(gp_snap);
1480 rcu_torture_pipe_update(old_rp);
1481 break;
1482 case RTWS_COND_GET_FULL:
1483 rcu_torture_writer_state = RTWS_COND_GET_FULL;
1484 cur_ops->get_gp_state_full(&gp_snap_full);
1485 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1486 rcu_torture_writer_state = RTWS_COND_SYNC_FULL;
1487 cur_ops->cond_sync_full(&gp_snap_full);
1488 rcu_torture_pipe_update(old_rp);
1489 break;
1490 case RTWS_COND_GET_EXP_FULL:
1491 rcu_torture_writer_state = RTWS_COND_GET_EXP_FULL;
1492 cur_ops->get_gp_state_full(&gp_snap_full);
1493 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1494 rcu_torture_writer_state = RTWS_COND_SYNC_EXP_FULL;
1495 cur_ops->cond_sync_exp_full(&gp_snap_full);
1496 rcu_torture_pipe_update(old_rp);
1497 break;
1498 case RTWS_POLL_GET:
1499 rcu_torture_writer_state = RTWS_POLL_GET;
1500 for (i = 0; i < ARRAY_SIZE(ulo); i++)
1501 ulo[i] = cur_ops->get_comp_state();
1502 gp_snap = cur_ops->start_gp_poll();
1503 rcu_torture_writer_state = RTWS_POLL_WAIT;
1504 while (!cur_ops->poll_gp_state(gp_snap)) {
1505 gp_snap1 = cur_ops->get_gp_state();
1506 for (i = 0; i < ARRAY_SIZE(ulo); i++)
1507 if (cur_ops->poll_gp_state(ulo[i]) ||
1508 cur_ops->same_gp_state(ulo[i], gp_snap1)) {
1509 ulo[i] = gp_snap1;
1510 break;
1511 }
1512 WARN_ON_ONCE(i >= ARRAY_SIZE(ulo));
1513 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1514 &rand);
1515 }
1516 rcu_torture_pipe_update(old_rp);
1517 break;
1518 case RTWS_POLL_GET_FULL:
1519 rcu_torture_writer_state = RTWS_POLL_GET_FULL;
1520 for (i = 0; i < ARRAY_SIZE(rgo); i++)
1521 cur_ops->get_comp_state_full(&rgo[i]);
1522 cur_ops->start_gp_poll_full(&gp_snap_full);
1523 rcu_torture_writer_state = RTWS_POLL_WAIT_FULL;
1524 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
1525 cur_ops->get_gp_state_full(&gp_snap1_full);
1526 for (i = 0; i < ARRAY_SIZE(rgo); i++)
1527 if (cur_ops->poll_gp_state_full(&rgo[i]) ||
1528 cur_ops->same_gp_state_full(&rgo[i],
1529 &gp_snap1_full)) {
1530 rgo[i] = gp_snap1_full;
1531 break;
1532 }
1533 WARN_ON_ONCE(i >= ARRAY_SIZE(rgo));
1534 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1535 &rand);
1536 }
1537 rcu_torture_pipe_update(old_rp);
1538 break;
1539 case RTWS_POLL_GET_EXP:
1540 rcu_torture_writer_state = RTWS_POLL_GET_EXP;
1541 gp_snap = cur_ops->start_gp_poll_exp();
1542 rcu_torture_writer_state = RTWS_POLL_WAIT_EXP;
1543 while (!cur_ops->poll_gp_state_exp(gp_snap))
1544 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1545 &rand);
1546 rcu_torture_pipe_update(old_rp);
1547 break;
1548 case RTWS_POLL_GET_EXP_FULL:
1549 rcu_torture_writer_state = RTWS_POLL_GET_EXP_FULL;
1550 cur_ops->start_gp_poll_exp_full(&gp_snap_full);
1551 rcu_torture_writer_state = RTWS_POLL_WAIT_EXP_FULL;
1552 while (!cur_ops->poll_gp_state_full(&gp_snap_full))
1553 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1554 &rand);
1555 rcu_torture_pipe_update(old_rp);
1556 break;
1557 case RTWS_SYNC:
1558 rcu_torture_writer_state = RTWS_SYNC;
1559 do_rtws_sync(&rand, cur_ops->sync);
1560 rcu_torture_pipe_update(old_rp);
1561 break;
1562 default:
1563 WARN_ON_ONCE(1);
1564 break;
1565 }
1566 }
1567 WRITE_ONCE(rcu_torture_current_version,
1568 rcu_torture_current_version + 1);
1569 /* Cycle through nesting levels of rcu_expedite_gp() calls. */
1570 if (can_expedite &&
1571 !(torture_random(&rand) & 0xff & (!!expediting - 1))) {
1572 WARN_ON_ONCE(expediting == 0 && rcu_gp_is_expedited());
1573 if (expediting >= 0)
1574 rcu_expedite_gp();
1575 else
1576 rcu_unexpedite_gp();
1577 if (++expediting > 3)
1578 expediting = -expediting;
1579 } else if (!can_expedite) { /* Disabled during boot, recheck. */
1580 can_expedite = !rcu_gp_is_expedited() &&
1581 !rcu_gp_is_normal();
1582 }
1583 rcu_torture_writer_state = RTWS_STUTTER;
1584 boot_ended = rcu_inkernel_boot_has_ended();
1585 stutter_waited = stutter_wait("rcu_torture_writer");
1586 if (stutter_waited &&
1587 !atomic_read(&rcu_fwd_cb_nodelay) &&
1588 !cur_ops->slow_gps &&
1589 !torture_must_stop() &&
1590 boot_ended &&
1591 time_after(jiffies, stallsdone))
1592 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++)
1593 if (list_empty(&rcu_tortures[i].rtort_free) &&
1594 rcu_access_pointer(rcu_torture_current) != &rcu_tortures[i]) {
1595 tracing_off();
1596 if (cur_ops->gp_kthread_dbg)
1597 cur_ops->gp_kthread_dbg();
1598 WARN(1, "%s: rtort_pipe_count: %d\n", __func__, rcu_tortures[i].rtort_pipe_count);
1599 rcu_ftrace_dump(DUMP_ALL);
1600 }
1601 if (stutter_waited)
1602 sched_set_normal(current, oldnice);
1603 } while (!torture_must_stop());
1604 rcu_torture_current = NULL; // Let stats task know that we are done.
1605 /* Reset expediting back to unexpedited. */
1606 if (expediting > 0)
1607 expediting = -expediting;
1608 while (can_expedite && expediting++ < 0)
1609 rcu_unexpedite_gp();
1610 WARN_ON_ONCE(can_expedite && rcu_gp_is_expedited());
1611 if (!can_expedite)
1612 pr_alert("%s" TORTURE_FLAG
1613 " Dynamic grace-period expediting was disabled.\n",
1614 torture_type);
1615 rcu_torture_writer_state = RTWS_STOPPING;
1616 torture_kthread_stopping("rcu_torture_writer");
1617 return 0;
1618 }
1619
1620 /*
1621 * RCU torture fake writer kthread. Repeatedly calls sync, with a random
1622 * delay between calls.
1623 */
1624 static int
rcu_torture_fakewriter(void * arg)1625 rcu_torture_fakewriter(void *arg)
1626 {
1627 unsigned long gp_snap;
1628 struct rcu_gp_oldstate gp_snap_full;
1629 DEFINE_TORTURE_RANDOM(rand);
1630
1631 VERBOSE_TOROUT_STRING("rcu_torture_fakewriter task started");
1632 set_user_nice(current, MAX_NICE);
1633
1634 if (WARN_ONCE(nsynctypes == 0,
1635 "%s: No update-side primitives.\n", __func__)) {
1636 /*
1637 * No updates primitives, so don't try updating.
1638 * The resulting test won't be testing much, hence the
1639 * above WARN_ONCE().
1640 */
1641 torture_kthread_stopping("rcu_torture_fakewriter");
1642 return 0;
1643 }
1644
1645 do {
1646 torture_hrtimeout_jiffies(torture_random(&rand) % 10, &rand);
1647 if (cur_ops->cb_barrier != NULL &&
1648 torture_random(&rand) % (nfakewriters * 8) == 0) {
1649 cur_ops->cb_barrier();
1650 } else {
1651 switch (synctype[torture_random(&rand) % nsynctypes]) {
1652 case RTWS_DEF_FREE:
1653 break;
1654 case RTWS_EXP_SYNC:
1655 cur_ops->exp_sync();
1656 break;
1657 case RTWS_COND_GET:
1658 gp_snap = cur_ops->get_gp_state();
1659 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1660 cur_ops->cond_sync(gp_snap);
1661 break;
1662 case RTWS_COND_GET_EXP:
1663 gp_snap = cur_ops->get_gp_state_exp();
1664 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1665 cur_ops->cond_sync_exp(gp_snap);
1666 break;
1667 case RTWS_COND_GET_FULL:
1668 cur_ops->get_gp_state_full(&gp_snap_full);
1669 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1670 cur_ops->cond_sync_full(&gp_snap_full);
1671 break;
1672 case RTWS_COND_GET_EXP_FULL:
1673 cur_ops->get_gp_state_full(&gp_snap_full);
1674 torture_hrtimeout_jiffies(torture_random(&rand) % 16, &rand);
1675 cur_ops->cond_sync_exp_full(&gp_snap_full);
1676 break;
1677 case RTWS_POLL_GET:
1678 gp_snap = cur_ops->start_gp_poll();
1679 while (!cur_ops->poll_gp_state(gp_snap)) {
1680 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1681 &rand);
1682 }
1683 break;
1684 case RTWS_POLL_GET_FULL:
1685 cur_ops->start_gp_poll_full(&gp_snap_full);
1686 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
1687 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1688 &rand);
1689 }
1690 break;
1691 case RTWS_POLL_GET_EXP:
1692 gp_snap = cur_ops->start_gp_poll_exp();
1693 while (!cur_ops->poll_gp_state_exp(gp_snap)) {
1694 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1695 &rand);
1696 }
1697 break;
1698 case RTWS_POLL_GET_EXP_FULL:
1699 cur_ops->start_gp_poll_exp_full(&gp_snap_full);
1700 while (!cur_ops->poll_gp_state_full(&gp_snap_full)) {
1701 torture_hrtimeout_jiffies(torture_random(&rand) % 16,
1702 &rand);
1703 }
1704 break;
1705 case RTWS_SYNC:
1706 cur_ops->sync();
1707 break;
1708 default:
1709 WARN_ON_ONCE(1);
1710 break;
1711 }
1712 }
1713 stutter_wait("rcu_torture_fakewriter");
1714 } while (!torture_must_stop());
1715
1716 torture_kthread_stopping("rcu_torture_fakewriter");
1717 return 0;
1718 }
1719
rcu_torture_timer_cb(struct rcu_head * rhp)1720 static void rcu_torture_timer_cb(struct rcu_head *rhp)
1721 {
1722 kfree(rhp);
1723 }
1724
1725 // Set up and carry out testing of RCU's global memory ordering
rcu_torture_reader_do_mbchk(long myid,struct rcu_torture * rtp,struct torture_random_state * trsp)1726 static void rcu_torture_reader_do_mbchk(long myid, struct rcu_torture *rtp,
1727 struct torture_random_state *trsp)
1728 {
1729 unsigned long loops;
1730 int noc = torture_num_online_cpus();
1731 int rdrchked;
1732 int rdrchker;
1733 struct rcu_torture_reader_check *rtrcp; // Me.
1734 struct rcu_torture_reader_check *rtrcp_assigner; // Assigned us to do checking.
1735 struct rcu_torture_reader_check *rtrcp_chked; // Reader being checked.
1736 struct rcu_torture_reader_check *rtrcp_chker; // Reader doing checking when not me.
1737
1738 if (myid < 0)
1739 return; // Don't try this from timer handlers.
1740
1741 // Increment my counter.
1742 rtrcp = &rcu_torture_reader_mbchk[myid];
1743 WRITE_ONCE(rtrcp->rtc_myloops, rtrcp->rtc_myloops + 1);
1744
1745 // Attempt to assign someone else some checking work.
1746 rdrchked = torture_random(trsp) % nrealreaders;
1747 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
1748 rdrchker = torture_random(trsp) % nrealreaders;
1749 rtrcp_chker = &rcu_torture_reader_mbchk[rdrchker];
1750 if (rdrchked != myid && rdrchked != rdrchker && noc >= rdrchked && noc >= rdrchker &&
1751 smp_load_acquire(&rtrcp->rtc_chkrdr) < 0 && // Pairs with smp_store_release below.
1752 !READ_ONCE(rtp->rtort_chkp) &&
1753 !smp_load_acquire(&rtrcp_chker->rtc_assigner)) { // Pairs with smp_store_release below.
1754 rtrcp->rtc_chkloops = READ_ONCE(rtrcp_chked->rtc_myloops);
1755 WARN_ON_ONCE(rtrcp->rtc_chkrdr >= 0);
1756 rtrcp->rtc_chkrdr = rdrchked;
1757 WARN_ON_ONCE(rtrcp->rtc_ready); // This gets set after the grace period ends.
1758 if (cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, NULL, rtrcp) ||
1759 cmpxchg_relaxed(&rtp->rtort_chkp, NULL, rtrcp))
1760 (void)cmpxchg_relaxed(&rtrcp_chker->rtc_assigner, rtrcp, NULL); // Back out.
1761 }
1762
1763 // If assigned some completed work, do it!
1764 rtrcp_assigner = READ_ONCE(rtrcp->rtc_assigner);
1765 if (!rtrcp_assigner || !smp_load_acquire(&rtrcp_assigner->rtc_ready))
1766 return; // No work or work not yet ready.
1767 rdrchked = rtrcp_assigner->rtc_chkrdr;
1768 if (WARN_ON_ONCE(rdrchked < 0))
1769 return;
1770 rtrcp_chked = &rcu_torture_reader_mbchk[rdrchked];
1771 loops = READ_ONCE(rtrcp_chked->rtc_myloops);
1772 atomic_inc(&n_rcu_torture_mbchk_tries);
1773 if (ULONG_CMP_LT(loops, rtrcp_assigner->rtc_chkloops))
1774 atomic_inc(&n_rcu_torture_mbchk_fail);
1775 rtrcp_assigner->rtc_chkloops = loops + ULONG_MAX / 2;
1776 rtrcp_assigner->rtc_ready = 0;
1777 smp_store_release(&rtrcp->rtc_assigner, NULL); // Someone else can assign us work.
1778 smp_store_release(&rtrcp_assigner->rtc_chkrdr, -1); // Assigner can again assign.
1779 }
1780
1781 /*
1782 * Do one extension of an RCU read-side critical section using the
1783 * current reader state in readstate (set to zero for initial entry
1784 * to extended critical section), set the new state as specified by
1785 * newstate (set to zero for final exit from extended critical section),
1786 * and random-number-generator state in trsp. If this is neither the
1787 * beginning or end of the critical section and if there was actually a
1788 * change, do a ->read_delay().
1789 */
rcutorture_one_extend(int * readstate,int newstate,struct torture_random_state * trsp,struct rt_read_seg * rtrsp)1790 static void rcutorture_one_extend(int *readstate, int newstate,
1791 struct torture_random_state *trsp,
1792 struct rt_read_seg *rtrsp)
1793 {
1794 unsigned long flags;
1795 int idxnew1 = -1;
1796 int idxnew2 = -1;
1797 int idxold1 = *readstate;
1798 int idxold2 = idxold1;
1799 int statesnew = ~*readstate & newstate;
1800 int statesold = *readstate & ~newstate;
1801
1802 WARN_ON_ONCE(idxold2 < 0);
1803 WARN_ON_ONCE((idxold2 >> RCUTORTURE_RDR_SHIFT_2) > 1);
1804 rtrsp->rt_readstate = newstate;
1805
1806 /* First, put new protection in place to avoid critical-section gap. */
1807 if (statesnew & RCUTORTURE_RDR_BH)
1808 local_bh_disable();
1809 if (statesnew & RCUTORTURE_RDR_RBH)
1810 rcu_read_lock_bh();
1811 if (statesnew & RCUTORTURE_RDR_IRQ)
1812 local_irq_disable();
1813 if (statesnew & RCUTORTURE_RDR_PREEMPT)
1814 preempt_disable();
1815 if (statesnew & RCUTORTURE_RDR_SCHED)
1816 rcu_read_lock_sched();
1817 if (statesnew & RCUTORTURE_RDR_RCU_1)
1818 idxnew1 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_1;
1819 if (statesnew & RCUTORTURE_RDR_RCU_2)
1820 idxnew2 = (cur_ops->readlock() & 0x1) << RCUTORTURE_RDR_SHIFT_2;
1821
1822 /*
1823 * Next, remove old protection, in decreasing order of strength
1824 * to avoid unlock paths that aren't safe in the stronger
1825 * context. Namely: BH can not be enabled with disabled interrupts.
1826 * Additionally PREEMPT_RT requires that BH is enabled in preemptible
1827 * context.
1828 */
1829 if (statesold & RCUTORTURE_RDR_IRQ)
1830 local_irq_enable();
1831 if (statesold & RCUTORTURE_RDR_PREEMPT)
1832 preempt_enable();
1833 if (statesold & RCUTORTURE_RDR_SCHED)
1834 rcu_read_unlock_sched();
1835 if (statesold & RCUTORTURE_RDR_BH)
1836 local_bh_enable();
1837 if (statesold & RCUTORTURE_RDR_RBH)
1838 rcu_read_unlock_bh();
1839 if (statesold & RCUTORTURE_RDR_RCU_2) {
1840 cur_ops->readunlock((idxold2 >> RCUTORTURE_RDR_SHIFT_2) & 0x1);
1841 WARN_ON_ONCE(idxnew2 != -1);
1842 idxold2 = 0;
1843 }
1844 if (statesold & RCUTORTURE_RDR_RCU_1) {
1845 bool lockit;
1846
1847 lockit = !cur_ops->no_pi_lock && !statesnew && !(torture_random(trsp) & 0xffff);
1848 if (lockit)
1849 raw_spin_lock_irqsave(¤t->pi_lock, flags);
1850 cur_ops->readunlock((idxold1 >> RCUTORTURE_RDR_SHIFT_1) & 0x1);
1851 WARN_ON_ONCE(idxnew1 != -1);
1852 idxold1 = 0;
1853 if (lockit)
1854 raw_spin_unlock_irqrestore(¤t->pi_lock, flags);
1855 }
1856
1857 /* Delay if neither beginning nor end and there was a change. */
1858 if ((statesnew || statesold) && *readstate && newstate)
1859 cur_ops->read_delay(trsp, rtrsp);
1860
1861 /* Update the reader state. */
1862 if (idxnew1 == -1)
1863 idxnew1 = idxold1 & RCUTORTURE_RDR_MASK_1;
1864 WARN_ON_ONCE(idxnew1 < 0);
1865 if (WARN_ON_ONCE((idxnew1 >> RCUTORTURE_RDR_SHIFT_1) > 1))
1866 pr_info("Unexpected idxnew1 value of %#x\n", idxnew1);
1867 if (idxnew2 == -1)
1868 idxnew2 = idxold2 & RCUTORTURE_RDR_MASK_2;
1869 WARN_ON_ONCE(idxnew2 < 0);
1870 WARN_ON_ONCE((idxnew2 >> RCUTORTURE_RDR_SHIFT_2) > 1);
1871 *readstate = idxnew1 | idxnew2 | newstate;
1872 WARN_ON_ONCE(*readstate < 0);
1873 if (WARN_ON_ONCE((*readstate >> RCUTORTURE_RDR_SHIFT_2) > 1))
1874 pr_info("Unexpected idxnew2 value of %#x\n", idxnew2);
1875 }
1876
1877 /* Return the biggest extendables mask given current RCU and boot parameters. */
rcutorture_extend_mask_max(void)1878 static int rcutorture_extend_mask_max(void)
1879 {
1880 int mask;
1881
1882 WARN_ON_ONCE(extendables & ~RCUTORTURE_MAX_EXTEND);
1883 mask = extendables & RCUTORTURE_MAX_EXTEND & cur_ops->extendables;
1884 mask = mask | RCUTORTURE_RDR_RCU_1 | RCUTORTURE_RDR_RCU_2;
1885 return mask;
1886 }
1887
1888 /* Return a random protection state mask, but with at least one bit set. */
1889 static int
rcutorture_extend_mask(int oldmask,struct torture_random_state * trsp)1890 rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
1891 {
1892 int mask = rcutorture_extend_mask_max();
1893 unsigned long randmask1 = torture_random(trsp);
1894 unsigned long randmask2 = randmask1 >> 3;
1895 unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED;
1896 unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ;
1897 unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
1898
1899 WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT_1);
1900 /* Mostly only one bit (need preemption!), sometimes lots of bits. */
1901 if (!(randmask1 & 0x7))
1902 mask = mask & randmask2;
1903 else
1904 mask = mask & (1 << (randmask2 % RCUTORTURE_RDR_NBITS));
1905
1906 // Can't have nested RCU reader without outer RCU reader.
1907 if (!(mask & RCUTORTURE_RDR_RCU_1) && (mask & RCUTORTURE_RDR_RCU_2)) {
1908 if (oldmask & RCUTORTURE_RDR_RCU_1)
1909 mask &= ~RCUTORTURE_RDR_RCU_2;
1910 else
1911 mask |= RCUTORTURE_RDR_RCU_1;
1912 }
1913
1914 /*
1915 * Can't enable bh w/irq disabled.
1916 */
1917 if (mask & RCUTORTURE_RDR_IRQ)
1918 mask |= oldmask & bhs;
1919
1920 /*
1921 * Ideally these sequences would be detected in debug builds
1922 * (regardless of RT), but until then don't stop testing
1923 * them on non-RT.
1924 */
1925 if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
1926 /* Can't modify BH in atomic context */
1927 if (oldmask & preempts_irq)
1928 mask &= ~bhs;
1929 if ((oldmask | mask) & preempts_irq)
1930 mask |= oldmask & bhs;
1931 }
1932
1933 return mask ?: RCUTORTURE_RDR_RCU_1;
1934 }
1935
1936 /*
1937 * Do a randomly selected number of extensions of an existing RCU read-side
1938 * critical section.
1939 */
1940 static struct rt_read_seg *
rcutorture_loop_extend(int * readstate,struct torture_random_state * trsp,struct rt_read_seg * rtrsp)1941 rcutorture_loop_extend(int *readstate, struct torture_random_state *trsp,
1942 struct rt_read_seg *rtrsp)
1943 {
1944 int i;
1945 int j;
1946 int mask = rcutorture_extend_mask_max();
1947
1948 WARN_ON_ONCE(!*readstate); /* -Existing- RCU read-side critsect! */
1949 if (!((mask - 1) & mask))
1950 return rtrsp; /* Current RCU reader not extendable. */
1951 /* Bias towards larger numbers of loops. */
1952 i = torture_random(trsp);
1953 i = ((i | (i >> 3)) & RCUTORTURE_RDR_MAX_LOOPS) + 1;
1954 for (j = 0; j < i; j++) {
1955 mask = rcutorture_extend_mask(*readstate, trsp);
1956 rcutorture_one_extend(readstate, mask, trsp, &rtrsp[j]);
1957 }
1958 return &rtrsp[j];
1959 }
1960
1961 /*
1962 * Do one read-side critical section, returning false if there was
1963 * no data to read. Can be invoked both from process context and
1964 * from a timer handler.
1965 */
rcu_torture_one_read(struct torture_random_state * trsp,long myid)1966 static bool rcu_torture_one_read(struct torture_random_state *trsp, long myid)
1967 {
1968 bool checkpolling = !(torture_random(trsp) & 0xfff);
1969 unsigned long cookie;
1970 struct rcu_gp_oldstate cookie_full;
1971 int i;
1972 unsigned long started;
1973 unsigned long completed;
1974 int newstate;
1975 struct rcu_torture *p;
1976 int pipe_count;
1977 int readstate = 0;
1978 struct rt_read_seg rtseg[RCUTORTURE_RDR_MAX_SEGS] = { { 0 } };
1979 struct rt_read_seg *rtrsp = &rtseg[0];
1980 struct rt_read_seg *rtrsp1;
1981 unsigned long long ts;
1982
1983 WARN_ON_ONCE(!rcu_is_watching());
1984 newstate = rcutorture_extend_mask(readstate, trsp);
1985 rcutorture_one_extend(&readstate, newstate, trsp, rtrsp++);
1986 if (checkpolling) {
1987 if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
1988 cookie = cur_ops->get_gp_state();
1989 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full)
1990 cur_ops->get_gp_state_full(&cookie_full);
1991 }
1992 started = cur_ops->get_gp_seq();
1993 ts = rcu_trace_clock_local();
1994 p = rcu_dereference_check(rcu_torture_current,
1995 !cur_ops->readlock_held || cur_ops->readlock_held());
1996 if (p == NULL) {
1997 /* Wait for rcu_torture_writer to get underway */
1998 rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
1999 return false;
2000 }
2001 if (p->rtort_mbtest == 0)
2002 atomic_inc(&n_rcu_torture_mberror);
2003 rcu_torture_reader_do_mbchk(myid, p, trsp);
2004 rtrsp = rcutorture_loop_extend(&readstate, trsp, rtrsp);
2005 preempt_disable();
2006 pipe_count = READ_ONCE(p->rtort_pipe_count);
2007 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
2008 // Should not happen in a correct RCU implementation,
2009 // happens quite often for torture_type=busted.
2010 pipe_count = RCU_TORTURE_PIPE_LEN;
2011 }
2012 completed = cur_ops->get_gp_seq();
2013 if (pipe_count > 1) {
2014 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu,
2015 ts, started, completed);
2016 rcu_ftrace_dump(DUMP_ALL);
2017 }
2018 __this_cpu_inc(rcu_torture_count[pipe_count]);
2019 completed = rcutorture_seq_diff(completed, started);
2020 if (completed > RCU_TORTURE_PIPE_LEN) {
2021 /* Should not happen, but... */
2022 completed = RCU_TORTURE_PIPE_LEN;
2023 }
2024 __this_cpu_inc(rcu_torture_batch[completed]);
2025 preempt_enable();
2026 if (checkpolling) {
2027 if (cur_ops->get_gp_state && cur_ops->poll_gp_state)
2028 WARN_ONCE(cur_ops->poll_gp_state(cookie),
2029 "%s: Cookie check 2 failed %s(%d) %lu->%lu\n",
2030 __func__,
2031 rcu_torture_writer_state_getname(),
2032 rcu_torture_writer_state,
2033 cookie, cur_ops->get_gp_state());
2034 if (cur_ops->get_gp_state_full && cur_ops->poll_gp_state_full)
2035 WARN_ONCE(cur_ops->poll_gp_state_full(&cookie_full),
2036 "%s: Cookie check 6 failed %s(%d) online %*pbl\n",
2037 __func__,
2038 rcu_torture_writer_state_getname(),
2039 rcu_torture_writer_state,
2040 cpumask_pr_args(cpu_online_mask));
2041 }
2042 rcutorture_one_extend(&readstate, 0, trsp, rtrsp);
2043 WARN_ON_ONCE(readstate);
2044 // This next splat is expected behavior if leakpointer, especially
2045 // for CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels.
2046 WARN_ON_ONCE(leakpointer && READ_ONCE(p->rtort_pipe_count) > 1);
2047
2048 /* If error or close call, record the sequence of reader protections. */
2049 if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) {
2050 i = 0;
2051 for (rtrsp1 = &rtseg[0]; rtrsp1 < rtrsp; rtrsp1++)
2052 err_segs[i++] = *rtrsp1;
2053 rt_read_nsegs = i;
2054 }
2055
2056 return true;
2057 }
2058
2059 static DEFINE_TORTURE_RANDOM_PERCPU(rcu_torture_timer_rand);
2060
2061 /*
2062 * RCU torture reader from timer handler. Dereferences rcu_torture_current,
2063 * incrementing the corresponding element of the pipeline array. The
2064 * counter in the element should never be greater than 1, otherwise, the
2065 * RCU implementation is broken.
2066 */
rcu_torture_timer(struct timer_list * unused)2067 static void rcu_torture_timer(struct timer_list *unused)
2068 {
2069 atomic_long_inc(&n_rcu_torture_timers);
2070 (void)rcu_torture_one_read(this_cpu_ptr(&rcu_torture_timer_rand), -1);
2071
2072 /* Test call_rcu() invocation from interrupt handler. */
2073 if (cur_ops->call) {
2074 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_NOWAIT);
2075
2076 if (rhp)
2077 cur_ops->call(rhp, rcu_torture_timer_cb);
2078 }
2079 }
2080
2081 /*
2082 * RCU torture reader kthread. Repeatedly dereferences rcu_torture_current,
2083 * incrementing the corresponding element of the pipeline array. The
2084 * counter in the element should never be greater than 1, otherwise, the
2085 * RCU implementation is broken.
2086 */
2087 static int
rcu_torture_reader(void * arg)2088 rcu_torture_reader(void *arg)
2089 {
2090 unsigned long lastsleep = jiffies;
2091 long myid = (long)arg;
2092 int mynumonline = myid;
2093 DEFINE_TORTURE_RANDOM(rand);
2094 struct timer_list t;
2095
2096 VERBOSE_TOROUT_STRING("rcu_torture_reader task started");
2097 set_user_nice(current, MAX_NICE);
2098 if (irqreader && cur_ops->irq_capable)
2099 timer_setup_on_stack(&t, rcu_torture_timer, 0);
2100 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2101 do {
2102 if (irqreader && cur_ops->irq_capable) {
2103 if (!timer_pending(&t))
2104 mod_timer(&t, jiffies + 1);
2105 }
2106 if (!rcu_torture_one_read(&rand, myid) && !torture_must_stop())
2107 schedule_timeout_interruptible(HZ);
2108 if (time_after(jiffies, lastsleep) && !torture_must_stop()) {
2109 torture_hrtimeout_us(500, 1000, &rand);
2110 lastsleep = jiffies + 10;
2111 }
2112 while (torture_num_online_cpus() < mynumonline && !torture_must_stop())
2113 schedule_timeout_interruptible(HZ / 5);
2114 stutter_wait("rcu_torture_reader");
2115 } while (!torture_must_stop());
2116 if (irqreader && cur_ops->irq_capable) {
2117 del_timer_sync(&t);
2118 destroy_timer_on_stack(&t);
2119 }
2120 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2121 torture_kthread_stopping("rcu_torture_reader");
2122 return 0;
2123 }
2124
2125 /*
2126 * Randomly Toggle CPUs' callback-offload state. This uses hrtimers to
2127 * increase race probabilities and fuzzes the interval between toggling.
2128 */
rcu_nocb_toggle(void * arg)2129 static int rcu_nocb_toggle(void *arg)
2130 {
2131 int cpu;
2132 int maxcpu = -1;
2133 int oldnice = task_nice(current);
2134 long r;
2135 DEFINE_TORTURE_RANDOM(rand);
2136 ktime_t toggle_delay;
2137 unsigned long toggle_fuzz;
2138 ktime_t toggle_interval = ms_to_ktime(nocbs_toggle);
2139
2140 VERBOSE_TOROUT_STRING("rcu_nocb_toggle task started");
2141 while (!rcu_inkernel_boot_has_ended())
2142 schedule_timeout_interruptible(HZ / 10);
2143 for_each_possible_cpu(cpu)
2144 maxcpu = cpu;
2145 WARN_ON(maxcpu < 0);
2146 if (toggle_interval > ULONG_MAX)
2147 toggle_fuzz = ULONG_MAX >> 3;
2148 else
2149 toggle_fuzz = toggle_interval >> 3;
2150 if (toggle_fuzz <= 0)
2151 toggle_fuzz = NSEC_PER_USEC;
2152 do {
2153 r = torture_random(&rand);
2154 cpu = (r >> 1) % (maxcpu + 1);
2155 if (r & 0x1) {
2156 rcu_nocb_cpu_offload(cpu);
2157 atomic_long_inc(&n_nocb_offload);
2158 } else {
2159 rcu_nocb_cpu_deoffload(cpu);
2160 atomic_long_inc(&n_nocb_deoffload);
2161 }
2162 toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval;
2163 set_current_state(TASK_INTERRUPTIBLE);
2164 schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL);
2165 if (stutter_wait("rcu_nocb_toggle"))
2166 sched_set_normal(current, oldnice);
2167 } while (!torture_must_stop());
2168 torture_kthread_stopping("rcu_nocb_toggle");
2169 return 0;
2170 }
2171
2172 /*
2173 * Print torture statistics. Caller must ensure that there is only
2174 * one call to this function at a given time!!! This is normally
2175 * accomplished by relying on the module system to only have one copy
2176 * of the module loaded, and then by giving the rcu_torture_stats
2177 * kthread full control (or the init/cleanup functions when rcu_torture_stats
2178 * thread is not running).
2179 */
2180 static void
rcu_torture_stats_print(void)2181 rcu_torture_stats_print(void)
2182 {
2183 int cpu;
2184 int i;
2185 long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
2186 long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
2187 struct rcu_torture *rtcp;
2188 static unsigned long rtcv_snap = ULONG_MAX;
2189 static bool splatted;
2190 struct task_struct *wtp;
2191
2192 for_each_possible_cpu(cpu) {
2193 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
2194 pipesummary[i] += READ_ONCE(per_cpu(rcu_torture_count, cpu)[i]);
2195 batchsummary[i] += READ_ONCE(per_cpu(rcu_torture_batch, cpu)[i]);
2196 }
2197 }
2198 for (i = RCU_TORTURE_PIPE_LEN; i >= 0; i--) {
2199 if (pipesummary[i] != 0)
2200 break;
2201 }
2202
2203 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
2204 rtcp = rcu_access_pointer(rcu_torture_current);
2205 pr_cont("rtc: %p %s: %lu tfle: %d rta: %d rtaf: %d rtf: %d ",
2206 rtcp,
2207 rtcp && !rcu_stall_is_suppressed_at_boot() ? "ver" : "VER",
2208 rcu_torture_current_version,
2209 list_empty(&rcu_torture_freelist),
2210 atomic_read(&n_rcu_torture_alloc),
2211 atomic_read(&n_rcu_torture_alloc_fail),
2212 atomic_read(&n_rcu_torture_free));
2213 pr_cont("rtmbe: %d rtmbkf: %d/%d rtbe: %ld rtbke: %ld ",
2214 atomic_read(&n_rcu_torture_mberror),
2215 atomic_read(&n_rcu_torture_mbchk_fail), atomic_read(&n_rcu_torture_mbchk_tries),
2216 n_rcu_torture_barrier_error,
2217 n_rcu_torture_boost_ktrerror);
2218 pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
2219 n_rcu_torture_boost_failure,
2220 n_rcu_torture_boosts,
2221 atomic_long_read(&n_rcu_torture_timers));
2222 torture_onoff_stats();
2223 pr_cont("barrier: %ld/%ld:%ld ",
2224 data_race(n_barrier_successes),
2225 data_race(n_barrier_attempts),
2226 data_race(n_rcu_torture_barrier_error));
2227 pr_cont("read-exits: %ld ", data_race(n_read_exits)); // Statistic.
2228 pr_cont("nocb-toggles: %ld:%ld\n",
2229 atomic_long_read(&n_nocb_offload), atomic_long_read(&n_nocb_deoffload));
2230
2231 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
2232 if (atomic_read(&n_rcu_torture_mberror) ||
2233 atomic_read(&n_rcu_torture_mbchk_fail) ||
2234 n_rcu_torture_barrier_error || n_rcu_torture_boost_ktrerror ||
2235 n_rcu_torture_boost_failure || i > 1) {
2236 pr_cont("%s", "!!! ");
2237 atomic_inc(&n_rcu_torture_error);
2238 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mberror));
2239 WARN_ON_ONCE(atomic_read(&n_rcu_torture_mbchk_fail));
2240 WARN_ON_ONCE(n_rcu_torture_barrier_error); // rcu_barrier()
2241 WARN_ON_ONCE(n_rcu_torture_boost_ktrerror); // no boost kthread
2242 WARN_ON_ONCE(n_rcu_torture_boost_failure); // boost failed (TIMER_SOFTIRQ RT prio?)
2243 WARN_ON_ONCE(i > 1); // Too-short grace period
2244 }
2245 pr_cont("Reader Pipe: ");
2246 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
2247 pr_cont(" %ld", pipesummary[i]);
2248 pr_cont("\n");
2249
2250 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
2251 pr_cont("Reader Batch: ");
2252 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
2253 pr_cont(" %ld", batchsummary[i]);
2254 pr_cont("\n");
2255
2256 pr_alert("%s%s ", torture_type, TORTURE_FLAG);
2257 pr_cont("Free-Block Circulation: ");
2258 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
2259 pr_cont(" %d", atomic_read(&rcu_torture_wcount[i]));
2260 }
2261 pr_cont("\n");
2262
2263 if (cur_ops->stats)
2264 cur_ops->stats();
2265 if (rtcv_snap == rcu_torture_current_version &&
2266 rcu_access_pointer(rcu_torture_current) &&
2267 !rcu_stall_is_suppressed()) {
2268 int __maybe_unused flags = 0;
2269 unsigned long __maybe_unused gp_seq = 0;
2270
2271 if (cur_ops->get_gp_data)
2272 cur_ops->get_gp_data(&flags, &gp_seq);
2273 wtp = READ_ONCE(writer_task);
2274 pr_alert("??? Writer stall state %s(%d) g%lu f%#x ->state %#x cpu %d\n",
2275 rcu_torture_writer_state_getname(),
2276 rcu_torture_writer_state, gp_seq, flags,
2277 wtp == NULL ? ~0U : wtp->__state,
2278 wtp == NULL ? -1 : (int)task_cpu(wtp));
2279 if (!splatted && wtp) {
2280 sched_show_task(wtp);
2281 splatted = true;
2282 }
2283 if (cur_ops->gp_kthread_dbg)
2284 cur_ops->gp_kthread_dbg();
2285 rcu_ftrace_dump(DUMP_ALL);
2286 }
2287 rtcv_snap = rcu_torture_current_version;
2288 }
2289
2290 /*
2291 * Periodically prints torture statistics, if periodic statistics printing
2292 * was specified via the stat_interval module parameter.
2293 */
2294 static int
rcu_torture_stats(void * arg)2295 rcu_torture_stats(void *arg)
2296 {
2297 VERBOSE_TOROUT_STRING("rcu_torture_stats task started");
2298 do {
2299 schedule_timeout_interruptible(stat_interval * HZ);
2300 rcu_torture_stats_print();
2301 torture_shutdown_absorb("rcu_torture_stats");
2302 } while (!torture_must_stop());
2303 torture_kthread_stopping("rcu_torture_stats");
2304 return 0;
2305 }
2306
2307 /* Test mem_dump_obj() and friends. */
rcu_torture_mem_dump_obj(void)2308 static void rcu_torture_mem_dump_obj(void)
2309 {
2310 struct rcu_head *rhp;
2311 struct kmem_cache *kcp;
2312 static int z;
2313
2314 kcp = kmem_cache_create("rcuscale", 136, 8, SLAB_STORE_USER, NULL);
2315 if (WARN_ON_ONCE(!kcp))
2316 return;
2317 rhp = kmem_cache_alloc(kcp, GFP_KERNEL);
2318 if (WARN_ON_ONCE(!rhp)) {
2319 kmem_cache_destroy(kcp);
2320 return;
2321 }
2322 pr_alert("mem_dump_obj() slab test: rcu_torture_stats = %px, &rhp = %px, rhp = %px, &z = %px\n", stats_task, &rhp, rhp, &z);
2323 pr_alert("mem_dump_obj(ZERO_SIZE_PTR):");
2324 mem_dump_obj(ZERO_SIZE_PTR);
2325 pr_alert("mem_dump_obj(NULL):");
2326 mem_dump_obj(NULL);
2327 pr_alert("mem_dump_obj(%px):", &rhp);
2328 mem_dump_obj(&rhp);
2329 pr_alert("mem_dump_obj(%px):", rhp);
2330 mem_dump_obj(rhp);
2331 pr_alert("mem_dump_obj(%px):", &rhp->func);
2332 mem_dump_obj(&rhp->func);
2333 pr_alert("mem_dump_obj(%px):", &z);
2334 mem_dump_obj(&z);
2335 kmem_cache_free(kcp, rhp);
2336 kmem_cache_destroy(kcp);
2337 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
2338 if (WARN_ON_ONCE(!rhp))
2339 return;
2340 pr_alert("mem_dump_obj() kmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
2341 pr_alert("mem_dump_obj(kmalloc %px):", rhp);
2342 mem_dump_obj(rhp);
2343 pr_alert("mem_dump_obj(kmalloc %px):", &rhp->func);
2344 mem_dump_obj(&rhp->func);
2345 kfree(rhp);
2346 rhp = vmalloc(4096);
2347 if (WARN_ON_ONCE(!rhp))
2348 return;
2349 pr_alert("mem_dump_obj() vmalloc test: rcu_torture_stats = %px, &rhp = %px, rhp = %px\n", stats_task, &rhp, rhp);
2350 pr_alert("mem_dump_obj(vmalloc %px):", rhp);
2351 mem_dump_obj(rhp);
2352 pr_alert("mem_dump_obj(vmalloc %px):", &rhp->func);
2353 mem_dump_obj(&rhp->func);
2354 vfree(rhp);
2355 }
2356
2357 static void
rcu_torture_print_module_parms(struct rcu_torture_ops * cur_ops,const char * tag)2358 rcu_torture_print_module_parms(struct rcu_torture_ops *cur_ops, const char *tag)
2359 {
2360 pr_alert("%s" TORTURE_FLAG
2361 "--- %s: nreaders=%d nfakewriters=%d "
2362 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
2363 "shuffle_interval=%d stutter=%d irqreader=%d "
2364 "fqs_duration=%d fqs_holdoff=%d fqs_stutter=%d "
2365 "test_boost=%d/%d test_boost_interval=%d "
2366 "test_boost_duration=%d shutdown_secs=%d "
2367 "stall_cpu=%d stall_cpu_holdoff=%d stall_cpu_irqsoff=%d "
2368 "stall_cpu_block=%d "
2369 "n_barrier_cbs=%d "
2370 "onoff_interval=%d onoff_holdoff=%d "
2371 "read_exit_delay=%d read_exit_burst=%d "
2372 "nocbs_nthreads=%d nocbs_toggle=%d "
2373 "test_nmis=%d\n",
2374 torture_type, tag, nrealreaders, nfakewriters,
2375 stat_interval, verbose, test_no_idle_hz, shuffle_interval,
2376 stutter, irqreader, fqs_duration, fqs_holdoff, fqs_stutter,
2377 test_boost, cur_ops->can_boost,
2378 test_boost_interval, test_boost_duration, shutdown_secs,
2379 stall_cpu, stall_cpu_holdoff, stall_cpu_irqsoff,
2380 stall_cpu_block,
2381 n_barrier_cbs,
2382 onoff_interval, onoff_holdoff,
2383 read_exit_delay, read_exit_burst,
2384 nocbs_nthreads, nocbs_toggle,
2385 test_nmis);
2386 }
2387
rcutorture_booster_cleanup(unsigned int cpu)2388 static int rcutorture_booster_cleanup(unsigned int cpu)
2389 {
2390 struct task_struct *t;
2391
2392 if (boost_tasks[cpu] == NULL)
2393 return 0;
2394 mutex_lock(&boost_mutex);
2395 t = boost_tasks[cpu];
2396 boost_tasks[cpu] = NULL;
2397 rcu_torture_enable_rt_throttle();
2398 mutex_unlock(&boost_mutex);
2399
2400 /* This must be outside of the mutex, otherwise deadlock! */
2401 torture_stop_kthread(rcu_torture_boost, t);
2402 return 0;
2403 }
2404
rcutorture_booster_init(unsigned int cpu)2405 static int rcutorture_booster_init(unsigned int cpu)
2406 {
2407 int retval;
2408
2409 if (boost_tasks[cpu] != NULL)
2410 return 0; /* Already created, nothing more to do. */
2411
2412 // Testing RCU priority boosting requires rcutorture do
2413 // some serious abuse. Counter this by running ksoftirqd
2414 // at higher priority.
2415 if (IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)) {
2416 struct sched_param sp;
2417 struct task_struct *t;
2418
2419 t = per_cpu(ksoftirqd, cpu);
2420 WARN_ON_ONCE(!t);
2421 sp.sched_priority = 2;
2422 sched_setscheduler_nocheck(t, SCHED_FIFO, &sp);
2423 }
2424
2425 /* Don't allow time recalculation while creating a new task. */
2426 mutex_lock(&boost_mutex);
2427 rcu_torture_disable_rt_throttle();
2428 VERBOSE_TOROUT_STRING("Creating rcu_torture_boost task");
2429 boost_tasks[cpu] = kthread_run_on_cpu(rcu_torture_boost, NULL,
2430 cpu, "rcu_torture_boost_%u");
2431 if (IS_ERR(boost_tasks[cpu])) {
2432 retval = PTR_ERR(boost_tasks[cpu]);
2433 VERBOSE_TOROUT_STRING("rcu_torture_boost task create failed");
2434 n_rcu_torture_boost_ktrerror++;
2435 boost_tasks[cpu] = NULL;
2436 mutex_unlock(&boost_mutex);
2437 return retval;
2438 }
2439 mutex_unlock(&boost_mutex);
2440 return 0;
2441 }
2442
rcu_torture_stall_nf(struct notifier_block * nb,unsigned long v,void * ptr)2443 static int rcu_torture_stall_nf(struct notifier_block *nb, unsigned long v, void *ptr)
2444 {
2445 pr_info("%s: v=%lu, duration=%lu.\n", __func__, v, (unsigned long)ptr);
2446 return NOTIFY_OK;
2447 }
2448
2449 static struct notifier_block rcu_torture_stall_block = {
2450 .notifier_call = rcu_torture_stall_nf,
2451 };
2452
2453 /*
2454 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
2455 * induces a CPU stall for the time specified by stall_cpu. If a new
2456 * stall test is added, stallsdone in rcu_torture_writer() must be adjusted.
2457 */
rcu_torture_stall(void * args)2458 static int rcu_torture_stall(void *args)
2459 {
2460 int idx;
2461 int ret;
2462 unsigned long stop_at;
2463
2464 VERBOSE_TOROUT_STRING("rcu_torture_stall task started");
2465 if (rcu_cpu_stall_notifiers) {
2466 ret = rcu_stall_chain_notifier_register(&rcu_torture_stall_block);
2467 if (ret)
2468 pr_info("%s: rcu_stall_chain_notifier_register() returned %d, %sexpected.\n",
2469 __func__, ret, !IS_ENABLED(CONFIG_RCU_STALL_COMMON) ? "un" : "");
2470 }
2471 if (stall_cpu_holdoff > 0) {
2472 VERBOSE_TOROUT_STRING("rcu_torture_stall begin holdoff");
2473 schedule_timeout_interruptible(stall_cpu_holdoff * HZ);
2474 VERBOSE_TOROUT_STRING("rcu_torture_stall end holdoff");
2475 }
2476 if (!kthread_should_stop() && stall_gp_kthread > 0) {
2477 VERBOSE_TOROUT_STRING("rcu_torture_stall begin GP stall");
2478 rcu_gp_set_torture_wait(stall_gp_kthread * HZ);
2479 for (idx = 0; idx < stall_gp_kthread + 2; idx++) {
2480 if (kthread_should_stop())
2481 break;
2482 schedule_timeout_uninterruptible(HZ);
2483 }
2484 }
2485 if (!kthread_should_stop() && stall_cpu > 0) {
2486 VERBOSE_TOROUT_STRING("rcu_torture_stall begin CPU stall");
2487 stop_at = ktime_get_seconds() + stall_cpu;
2488 /* RCU CPU stall is expected behavior in following code. */
2489 idx = cur_ops->readlock();
2490 if (stall_cpu_irqsoff)
2491 local_irq_disable();
2492 else if (!stall_cpu_block)
2493 preempt_disable();
2494 pr_alert("%s start on CPU %d.\n",
2495 __func__, raw_smp_processor_id());
2496 while (ULONG_CMP_LT((unsigned long)ktime_get_seconds(), stop_at) &&
2497 !kthread_should_stop())
2498 if (stall_cpu_block) {
2499 #ifdef CONFIG_PREEMPTION
2500 preempt_schedule();
2501 #else
2502 schedule_timeout_uninterruptible(HZ);
2503 #endif
2504 } else if (stall_no_softlockup) {
2505 touch_softlockup_watchdog();
2506 }
2507 if (stall_cpu_irqsoff)
2508 local_irq_enable();
2509 else if (!stall_cpu_block)
2510 preempt_enable();
2511 cur_ops->readunlock(idx);
2512 }
2513 pr_alert("%s end.\n", __func__);
2514 if (rcu_cpu_stall_notifiers && !ret) {
2515 ret = rcu_stall_chain_notifier_unregister(&rcu_torture_stall_block);
2516 if (ret)
2517 pr_info("%s: rcu_stall_chain_notifier_unregister() returned %d.\n", __func__, ret);
2518 }
2519 torture_shutdown_absorb("rcu_torture_stall");
2520 while (!kthread_should_stop())
2521 schedule_timeout_interruptible(10 * HZ);
2522 return 0;
2523 }
2524
2525 /* Spawn CPU-stall kthread, if stall_cpu specified. */
rcu_torture_stall_init(void)2526 static int __init rcu_torture_stall_init(void)
2527 {
2528 if (stall_cpu <= 0 && stall_gp_kthread <= 0)
2529 return 0;
2530 return torture_create_kthread(rcu_torture_stall, NULL, stall_task);
2531 }
2532
2533 /* State structure for forward-progress self-propagating RCU callback. */
2534 struct fwd_cb_state {
2535 struct rcu_head rh;
2536 int stop;
2537 };
2538
2539 /*
2540 * Forward-progress self-propagating RCU callback function. Because
2541 * callbacks run from softirq, this function is an implicit RCU read-side
2542 * critical section.
2543 */
rcu_torture_fwd_prog_cb(struct rcu_head * rhp)2544 static void rcu_torture_fwd_prog_cb(struct rcu_head *rhp)
2545 {
2546 struct fwd_cb_state *fcsp = container_of(rhp, struct fwd_cb_state, rh);
2547
2548 if (READ_ONCE(fcsp->stop)) {
2549 WRITE_ONCE(fcsp->stop, 2);
2550 return;
2551 }
2552 cur_ops->call(&fcsp->rh, rcu_torture_fwd_prog_cb);
2553 }
2554
2555 /* State for continuous-flood RCU callbacks. */
2556 struct rcu_fwd_cb {
2557 struct rcu_head rh;
2558 struct rcu_fwd_cb *rfc_next;
2559 struct rcu_fwd *rfc_rfp;
2560 int rfc_gps;
2561 };
2562
2563 #define MAX_FWD_CB_JIFFIES (8 * HZ) /* Maximum CB test duration. */
2564 #define MIN_FWD_CB_LAUNDERS 3 /* This many CB invocations to count. */
2565 #define MIN_FWD_CBS_LAUNDERED 100 /* Number of counted CBs. */
2566 #define FWD_CBS_HIST_DIV 10 /* Histogram buckets/second. */
2567 #define N_LAUNDERS_HIST (2 * MAX_FWD_CB_JIFFIES / (HZ / FWD_CBS_HIST_DIV))
2568
2569 struct rcu_launder_hist {
2570 long n_launders;
2571 unsigned long launder_gp_seq;
2572 };
2573
2574 struct rcu_fwd {
2575 spinlock_t rcu_fwd_lock;
2576 struct rcu_fwd_cb *rcu_fwd_cb_head;
2577 struct rcu_fwd_cb **rcu_fwd_cb_tail;
2578 long n_launders_cb;
2579 unsigned long rcu_fwd_startat;
2580 struct rcu_launder_hist n_launders_hist[N_LAUNDERS_HIST];
2581 unsigned long rcu_launder_gp_seq_start;
2582 int rcu_fwd_id;
2583 };
2584
2585 static DEFINE_MUTEX(rcu_fwd_mutex);
2586 static struct rcu_fwd *rcu_fwds;
2587 static unsigned long rcu_fwd_seq;
2588 static atomic_long_t rcu_fwd_max_cbs;
2589 static bool rcu_fwd_emergency_stop;
2590
rcu_torture_fwd_cb_hist(struct rcu_fwd * rfp)2591 static void rcu_torture_fwd_cb_hist(struct rcu_fwd *rfp)
2592 {
2593 unsigned long gps;
2594 unsigned long gps_old;
2595 int i;
2596 int j;
2597
2598 for (i = ARRAY_SIZE(rfp->n_launders_hist) - 1; i > 0; i--)
2599 if (rfp->n_launders_hist[i].n_launders > 0)
2600 break;
2601 pr_alert("%s: Callback-invocation histogram %d (duration %lu jiffies):",
2602 __func__, rfp->rcu_fwd_id, jiffies - rfp->rcu_fwd_startat);
2603 gps_old = rfp->rcu_launder_gp_seq_start;
2604 for (j = 0; j <= i; j++) {
2605 gps = rfp->n_launders_hist[j].launder_gp_seq;
2606 pr_cont(" %ds/%d: %ld:%ld",
2607 j + 1, FWD_CBS_HIST_DIV,
2608 rfp->n_launders_hist[j].n_launders,
2609 rcutorture_seq_diff(gps, gps_old));
2610 gps_old = gps;
2611 }
2612 pr_cont("\n");
2613 }
2614
2615 /* Callback function for continuous-flood RCU callbacks. */
rcu_torture_fwd_cb_cr(struct rcu_head * rhp)2616 static void rcu_torture_fwd_cb_cr(struct rcu_head *rhp)
2617 {
2618 unsigned long flags;
2619 int i;
2620 struct rcu_fwd_cb *rfcp = container_of(rhp, struct rcu_fwd_cb, rh);
2621 struct rcu_fwd_cb **rfcpp;
2622 struct rcu_fwd *rfp = rfcp->rfc_rfp;
2623
2624 rfcp->rfc_next = NULL;
2625 rfcp->rfc_gps++;
2626 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
2627 rfcpp = rfp->rcu_fwd_cb_tail;
2628 rfp->rcu_fwd_cb_tail = &rfcp->rfc_next;
2629 WRITE_ONCE(*rfcpp, rfcp);
2630 WRITE_ONCE(rfp->n_launders_cb, rfp->n_launders_cb + 1);
2631 i = ((jiffies - rfp->rcu_fwd_startat) / (HZ / FWD_CBS_HIST_DIV));
2632 if (i >= ARRAY_SIZE(rfp->n_launders_hist))
2633 i = ARRAY_SIZE(rfp->n_launders_hist) - 1;
2634 rfp->n_launders_hist[i].n_launders++;
2635 rfp->n_launders_hist[i].launder_gp_seq = cur_ops->get_gp_seq();
2636 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2637 }
2638
2639 // Give the scheduler a chance, even on nohz_full CPUs.
rcu_torture_fwd_prog_cond_resched(unsigned long iter)2640 static void rcu_torture_fwd_prog_cond_resched(unsigned long iter)
2641 {
2642 if (IS_ENABLED(CONFIG_PREEMPTION) && IS_ENABLED(CONFIG_NO_HZ_FULL)) {
2643 // Real call_rcu() floods hit userspace, so emulate that.
2644 if (need_resched() || (iter & 0xfff))
2645 schedule();
2646 return;
2647 }
2648 // No userspace emulation: CB invocation throttles call_rcu()
2649 cond_resched();
2650 }
2651
2652 /*
2653 * Free all callbacks on the rcu_fwd_cb_head list, either because the
2654 * test is over or because we hit an OOM event.
2655 */
rcu_torture_fwd_prog_cbfree(struct rcu_fwd * rfp)2656 static unsigned long rcu_torture_fwd_prog_cbfree(struct rcu_fwd *rfp)
2657 {
2658 unsigned long flags;
2659 unsigned long freed = 0;
2660 struct rcu_fwd_cb *rfcp;
2661
2662 for (;;) {
2663 spin_lock_irqsave(&rfp->rcu_fwd_lock, flags);
2664 rfcp = rfp->rcu_fwd_cb_head;
2665 if (!rfcp) {
2666 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2667 break;
2668 }
2669 rfp->rcu_fwd_cb_head = rfcp->rfc_next;
2670 if (!rfp->rcu_fwd_cb_head)
2671 rfp->rcu_fwd_cb_tail = &rfp->rcu_fwd_cb_head;
2672 spin_unlock_irqrestore(&rfp->rcu_fwd_lock, flags);
2673 kfree(rfcp);
2674 freed++;
2675 rcu_torture_fwd_prog_cond_resched(freed);
2676 if (tick_nohz_full_enabled()) {
2677 local_irq_save(flags);
2678 rcu_momentary_dyntick_idle();
2679 local_irq_restore(flags);
2680 }
2681 }
2682 return freed;
2683 }
2684
2685 /* Carry out need_resched()/cond_resched() forward-progress testing. */
rcu_torture_fwd_prog_nr(struct rcu_fwd * rfp,int * tested,int * tested_tries)2686 static void rcu_torture_fwd_prog_nr(struct rcu_fwd *rfp,
2687 int *tested, int *tested_tries)
2688 {
2689 unsigned long cver;
2690 unsigned long dur;
2691 struct fwd_cb_state fcs;
2692 unsigned long gps;
2693 int idx;
2694 int sd;
2695 int sd4;
2696 bool selfpropcb = false;
2697 unsigned long stopat;
2698 static DEFINE_TORTURE_RANDOM(trs);
2699
2700 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
2701 if (!cur_ops->sync)
2702 return; // Cannot do need_resched() forward progress testing without ->sync.
2703 if (cur_ops->call && cur_ops->cb_barrier) {
2704 init_rcu_head_on_stack(&fcs.rh);
2705 selfpropcb = true;
2706 }
2707
2708 /* Tight loop containing cond_resched(). */
2709 atomic_inc(&rcu_fwd_cb_nodelay);
2710 cur_ops->sync(); /* Later readers see above write. */
2711 if (selfpropcb) {
2712 WRITE_ONCE(fcs.stop, 0);
2713 cur_ops->call(&fcs.rh, rcu_torture_fwd_prog_cb);
2714 }
2715 cver = READ_ONCE(rcu_torture_current_version);
2716 gps = cur_ops->get_gp_seq();
2717 sd = cur_ops->stall_dur() + 1;
2718 sd4 = (sd + fwd_progress_div - 1) / fwd_progress_div;
2719 dur = sd4 + torture_random(&trs) % (sd - sd4);
2720 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
2721 stopat = rfp->rcu_fwd_startat + dur;
2722 while (time_before(jiffies, stopat) &&
2723 !shutdown_time_arrived() &&
2724 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2725 idx = cur_ops->readlock();
2726 udelay(10);
2727 cur_ops->readunlock(idx);
2728 if (!fwd_progress_need_resched || need_resched())
2729 cond_resched();
2730 }
2731 (*tested_tries)++;
2732 if (!time_before(jiffies, stopat) &&
2733 !shutdown_time_arrived() &&
2734 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2735 (*tested)++;
2736 cver = READ_ONCE(rcu_torture_current_version) - cver;
2737 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
2738 WARN_ON(!cver && gps < 2);
2739 pr_alert("%s: %d Duration %ld cver %ld gps %ld\n", __func__,
2740 rfp->rcu_fwd_id, dur, cver, gps);
2741 }
2742 if (selfpropcb) {
2743 WRITE_ONCE(fcs.stop, 1);
2744 cur_ops->sync(); /* Wait for running CB to complete. */
2745 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id);
2746 cur_ops->cb_barrier(); /* Wait for queued callbacks. */
2747 }
2748
2749 if (selfpropcb) {
2750 WARN_ON(READ_ONCE(fcs.stop) != 2);
2751 destroy_rcu_head_on_stack(&fcs.rh);
2752 }
2753 schedule_timeout_uninterruptible(HZ / 10); /* Let kthreads recover. */
2754 atomic_dec(&rcu_fwd_cb_nodelay);
2755 }
2756
2757 /* Carry out call_rcu() forward-progress testing. */
rcu_torture_fwd_prog_cr(struct rcu_fwd * rfp)2758 static void rcu_torture_fwd_prog_cr(struct rcu_fwd *rfp)
2759 {
2760 unsigned long cver;
2761 unsigned long flags;
2762 unsigned long gps;
2763 int i;
2764 long n_launders;
2765 long n_launders_cb_snap;
2766 long n_launders_sa;
2767 long n_max_cbs;
2768 long n_max_gps;
2769 struct rcu_fwd_cb *rfcp;
2770 struct rcu_fwd_cb *rfcpn;
2771 unsigned long stopat;
2772 unsigned long stoppedat;
2773
2774 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
2775 if (READ_ONCE(rcu_fwd_emergency_stop))
2776 return; /* Get out of the way quickly, no GP wait! */
2777 if (!cur_ops->call)
2778 return; /* Can't do call_rcu() fwd prog without ->call. */
2779
2780 /* Loop continuously posting RCU callbacks. */
2781 atomic_inc(&rcu_fwd_cb_nodelay);
2782 cur_ops->sync(); /* Later readers see above write. */
2783 WRITE_ONCE(rfp->rcu_fwd_startat, jiffies);
2784 stopat = rfp->rcu_fwd_startat + MAX_FWD_CB_JIFFIES;
2785 n_launders = 0;
2786 rfp->n_launders_cb = 0; // Hoist initialization for multi-kthread
2787 n_launders_sa = 0;
2788 n_max_cbs = 0;
2789 n_max_gps = 0;
2790 for (i = 0; i < ARRAY_SIZE(rfp->n_launders_hist); i++)
2791 rfp->n_launders_hist[i].n_launders = 0;
2792 cver = READ_ONCE(rcu_torture_current_version);
2793 gps = cur_ops->get_gp_seq();
2794 rfp->rcu_launder_gp_seq_start = gps;
2795 tick_dep_set_task(current, TICK_DEP_BIT_RCU);
2796 while (time_before(jiffies, stopat) &&
2797 !shutdown_time_arrived() &&
2798 !READ_ONCE(rcu_fwd_emergency_stop) && !torture_must_stop()) {
2799 rfcp = READ_ONCE(rfp->rcu_fwd_cb_head);
2800 rfcpn = NULL;
2801 if (rfcp)
2802 rfcpn = READ_ONCE(rfcp->rfc_next);
2803 if (rfcpn) {
2804 if (rfcp->rfc_gps >= MIN_FWD_CB_LAUNDERS &&
2805 ++n_max_gps >= MIN_FWD_CBS_LAUNDERED)
2806 break;
2807 rfp->rcu_fwd_cb_head = rfcpn;
2808 n_launders++;
2809 n_launders_sa++;
2810 } else if (!cur_ops->cbflood_max || cur_ops->cbflood_max > n_max_cbs) {
2811 rfcp = kmalloc(sizeof(*rfcp), GFP_KERNEL);
2812 if (WARN_ON_ONCE(!rfcp)) {
2813 schedule_timeout_interruptible(1);
2814 continue;
2815 }
2816 n_max_cbs++;
2817 n_launders_sa = 0;
2818 rfcp->rfc_gps = 0;
2819 rfcp->rfc_rfp = rfp;
2820 } else {
2821 rfcp = NULL;
2822 }
2823 if (rfcp)
2824 cur_ops->call(&rfcp->rh, rcu_torture_fwd_cb_cr);
2825 rcu_torture_fwd_prog_cond_resched(n_launders + n_max_cbs);
2826 if (tick_nohz_full_enabled()) {
2827 local_irq_save(flags);
2828 rcu_momentary_dyntick_idle();
2829 local_irq_restore(flags);
2830 }
2831 }
2832 stoppedat = jiffies;
2833 n_launders_cb_snap = READ_ONCE(rfp->n_launders_cb);
2834 cver = READ_ONCE(rcu_torture_current_version) - cver;
2835 gps = rcutorture_seq_diff(cur_ops->get_gp_seq(), gps);
2836 pr_alert("%s: Waiting for CBs: %pS() %d\n", __func__, cur_ops->cb_barrier, rfp->rcu_fwd_id);
2837 cur_ops->cb_barrier(); /* Wait for callbacks to be invoked. */
2838 (void)rcu_torture_fwd_prog_cbfree(rfp);
2839
2840 if (!torture_must_stop() && !READ_ONCE(rcu_fwd_emergency_stop) &&
2841 !shutdown_time_arrived()) {
2842 if (WARN_ON(n_max_gps < MIN_FWD_CBS_LAUNDERED) && cur_ops->gp_kthread_dbg)
2843 cur_ops->gp_kthread_dbg();
2844 pr_alert("%s Duration %lu barrier: %lu pending %ld n_launders: %ld n_launders_sa: %ld n_max_gps: %ld n_max_cbs: %ld cver %ld gps %ld #online %u\n",
2845 __func__,
2846 stoppedat - rfp->rcu_fwd_startat, jiffies - stoppedat,
2847 n_launders + n_max_cbs - n_launders_cb_snap,
2848 n_launders, n_launders_sa,
2849 n_max_gps, n_max_cbs, cver, gps, num_online_cpus());
2850 atomic_long_add(n_max_cbs, &rcu_fwd_max_cbs);
2851 mutex_lock(&rcu_fwd_mutex); // Serialize histograms.
2852 rcu_torture_fwd_cb_hist(rfp);
2853 mutex_unlock(&rcu_fwd_mutex);
2854 }
2855 schedule_timeout_uninterruptible(HZ); /* Let CBs drain. */
2856 tick_dep_clear_task(current, TICK_DEP_BIT_RCU);
2857 atomic_dec(&rcu_fwd_cb_nodelay);
2858 }
2859
2860
2861 /*
2862 * OOM notifier, but this only prints diagnostic information for the
2863 * current forward-progress test.
2864 */
rcutorture_oom_notify(struct notifier_block * self,unsigned long notused,void * nfreed)2865 static int rcutorture_oom_notify(struct notifier_block *self,
2866 unsigned long notused, void *nfreed)
2867 {
2868 int i;
2869 long ncbs;
2870 struct rcu_fwd *rfp;
2871
2872 mutex_lock(&rcu_fwd_mutex);
2873 rfp = rcu_fwds;
2874 if (!rfp) {
2875 mutex_unlock(&rcu_fwd_mutex);
2876 return NOTIFY_OK;
2877 }
2878 WARN(1, "%s invoked upon OOM during forward-progress testing.\n",
2879 __func__);
2880 for (i = 0; i < fwd_progress; i++) {
2881 rcu_torture_fwd_cb_hist(&rfp[i]);
2882 rcu_fwd_progress_check(1 + (jiffies - READ_ONCE(rfp[i].rcu_fwd_startat)) / 2);
2883 }
2884 WRITE_ONCE(rcu_fwd_emergency_stop, true);
2885 smp_mb(); /* Emergency stop before free and wait to avoid hangs. */
2886 ncbs = 0;
2887 for (i = 0; i < fwd_progress; i++)
2888 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
2889 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
2890 cur_ops->cb_barrier();
2891 ncbs = 0;
2892 for (i = 0; i < fwd_progress; i++)
2893 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
2894 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
2895 cur_ops->cb_barrier();
2896 ncbs = 0;
2897 for (i = 0; i < fwd_progress; i++)
2898 ncbs += rcu_torture_fwd_prog_cbfree(&rfp[i]);
2899 pr_info("%s: Freed %lu RCU callbacks.\n", __func__, ncbs);
2900 smp_mb(); /* Frees before return to avoid redoing OOM. */
2901 (*(unsigned long *)nfreed)++; /* Forward progress CBs freed! */
2902 pr_info("%s returning after OOM processing.\n", __func__);
2903 mutex_unlock(&rcu_fwd_mutex);
2904 return NOTIFY_OK;
2905 }
2906
2907 static struct notifier_block rcutorture_oom_nb = {
2908 .notifier_call = rcutorture_oom_notify
2909 };
2910
2911 /* Carry out grace-period forward-progress testing. */
rcu_torture_fwd_prog(void * args)2912 static int rcu_torture_fwd_prog(void *args)
2913 {
2914 bool firsttime = true;
2915 long max_cbs;
2916 int oldnice = task_nice(current);
2917 unsigned long oldseq = READ_ONCE(rcu_fwd_seq);
2918 struct rcu_fwd *rfp = args;
2919 int tested = 0;
2920 int tested_tries = 0;
2921
2922 VERBOSE_TOROUT_STRING("rcu_torture_fwd_progress task started");
2923 rcu_bind_current_to_nocb();
2924 if (!IS_ENABLED(CONFIG_SMP) || !IS_ENABLED(CONFIG_RCU_BOOST))
2925 set_user_nice(current, MAX_NICE);
2926 do {
2927 if (!rfp->rcu_fwd_id) {
2928 schedule_timeout_interruptible(fwd_progress_holdoff * HZ);
2929 WRITE_ONCE(rcu_fwd_emergency_stop, false);
2930 if (!firsttime) {
2931 max_cbs = atomic_long_xchg(&rcu_fwd_max_cbs, 0);
2932 pr_alert("%s n_max_cbs: %ld\n", __func__, max_cbs);
2933 }
2934 firsttime = false;
2935 WRITE_ONCE(rcu_fwd_seq, rcu_fwd_seq + 1);
2936 } else {
2937 while (READ_ONCE(rcu_fwd_seq) == oldseq && !torture_must_stop())
2938 schedule_timeout_interruptible(HZ / 20);
2939 oldseq = READ_ONCE(rcu_fwd_seq);
2940 }
2941 pr_alert("%s: Starting forward-progress test %d\n", __func__, rfp->rcu_fwd_id);
2942 if (rcu_inkernel_boot_has_ended() && torture_num_online_cpus() > rfp->rcu_fwd_id)
2943 rcu_torture_fwd_prog_cr(rfp);
2944 if ((cur_ops->stall_dur && cur_ops->stall_dur() > 0) &&
2945 (!IS_ENABLED(CONFIG_TINY_RCU) ||
2946 (rcu_inkernel_boot_has_ended() &&
2947 torture_num_online_cpus() > rfp->rcu_fwd_id)))
2948 rcu_torture_fwd_prog_nr(rfp, &tested, &tested_tries);
2949
2950 /* Avoid slow periods, better to test when busy. */
2951 if (stutter_wait("rcu_torture_fwd_prog"))
2952 sched_set_normal(current, oldnice);
2953 } while (!torture_must_stop());
2954 /* Short runs might not contain a valid forward-progress attempt. */
2955 if (!rfp->rcu_fwd_id) {
2956 WARN_ON(!tested && tested_tries >= 5);
2957 pr_alert("%s: tested %d tested_tries %d\n", __func__, tested, tested_tries);
2958 }
2959 torture_kthread_stopping("rcu_torture_fwd_prog");
2960 return 0;
2961 }
2962
2963 /* If forward-progress checking is requested and feasible, spawn the thread. */
rcu_torture_fwd_prog_init(void)2964 static int __init rcu_torture_fwd_prog_init(void)
2965 {
2966 int i;
2967 int ret = 0;
2968 struct rcu_fwd *rfp;
2969
2970 if (!fwd_progress)
2971 return 0; /* Not requested, so don't do it. */
2972 if (fwd_progress >= nr_cpu_ids) {
2973 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Limiting fwd_progress to # CPUs.\n");
2974 fwd_progress = nr_cpu_ids;
2975 } else if (fwd_progress < 0) {
2976 fwd_progress = nr_cpu_ids;
2977 }
2978 if ((!cur_ops->sync && !cur_ops->call) ||
2979 (!cur_ops->cbflood_max && (!cur_ops->stall_dur || cur_ops->stall_dur() <= 0)) ||
2980 cur_ops == &rcu_busted_ops) {
2981 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, unsupported by RCU flavor under test");
2982 fwd_progress = 0;
2983 return 0;
2984 }
2985 if (stall_cpu > 0) {
2986 VERBOSE_TOROUT_STRING("rcu_torture_fwd_prog_init: Disabled, conflicts with CPU-stall testing");
2987 fwd_progress = 0;
2988 if (IS_MODULE(CONFIG_RCU_TORTURE_TEST))
2989 return -EINVAL; /* In module, can fail back to user. */
2990 WARN_ON(1); /* Make sure rcutorture notices conflict. */
2991 return 0;
2992 }
2993 if (fwd_progress_holdoff <= 0)
2994 fwd_progress_holdoff = 1;
2995 if (fwd_progress_div <= 0)
2996 fwd_progress_div = 4;
2997 rfp = kcalloc(fwd_progress, sizeof(*rfp), GFP_KERNEL);
2998 fwd_prog_tasks = kcalloc(fwd_progress, sizeof(*fwd_prog_tasks), GFP_KERNEL);
2999 if (!rfp || !fwd_prog_tasks) {
3000 kfree(rfp);
3001 kfree(fwd_prog_tasks);
3002 fwd_prog_tasks = NULL;
3003 fwd_progress = 0;
3004 return -ENOMEM;
3005 }
3006 for (i = 0; i < fwd_progress; i++) {
3007 spin_lock_init(&rfp[i].rcu_fwd_lock);
3008 rfp[i].rcu_fwd_cb_tail = &rfp[i].rcu_fwd_cb_head;
3009 rfp[i].rcu_fwd_id = i;
3010 }
3011 mutex_lock(&rcu_fwd_mutex);
3012 rcu_fwds = rfp;
3013 mutex_unlock(&rcu_fwd_mutex);
3014 register_oom_notifier(&rcutorture_oom_nb);
3015 for (i = 0; i < fwd_progress; i++) {
3016 ret = torture_create_kthread(rcu_torture_fwd_prog, &rcu_fwds[i], fwd_prog_tasks[i]);
3017 if (ret) {
3018 fwd_progress = i;
3019 return ret;
3020 }
3021 }
3022 return 0;
3023 }
3024
rcu_torture_fwd_prog_cleanup(void)3025 static void rcu_torture_fwd_prog_cleanup(void)
3026 {
3027 int i;
3028 struct rcu_fwd *rfp;
3029
3030 if (!rcu_fwds || !fwd_prog_tasks)
3031 return;
3032 for (i = 0; i < fwd_progress; i++)
3033 torture_stop_kthread(rcu_torture_fwd_prog, fwd_prog_tasks[i]);
3034 unregister_oom_notifier(&rcutorture_oom_nb);
3035 mutex_lock(&rcu_fwd_mutex);
3036 rfp = rcu_fwds;
3037 rcu_fwds = NULL;
3038 mutex_unlock(&rcu_fwd_mutex);
3039 kfree(rfp);
3040 kfree(fwd_prog_tasks);
3041 fwd_prog_tasks = NULL;
3042 }
3043
3044 /* Callback function for RCU barrier testing. */
rcu_torture_barrier_cbf(struct rcu_head * rcu)3045 static void rcu_torture_barrier_cbf(struct rcu_head *rcu)
3046 {
3047 atomic_inc(&barrier_cbs_invoked);
3048 }
3049
3050 /* IPI handler to get callback posted on desired CPU, if online. */
rcu_torture_barrier1cb(void * rcu_void)3051 static int rcu_torture_barrier1cb(void *rcu_void)
3052 {
3053 struct rcu_head *rhp = rcu_void;
3054
3055 cur_ops->call(rhp, rcu_torture_barrier_cbf);
3056 return 0;
3057 }
3058
3059 /* kthread function to register callbacks used to test RCU barriers. */
rcu_torture_barrier_cbs(void * arg)3060 static int rcu_torture_barrier_cbs(void *arg)
3061 {
3062 long myid = (long)arg;
3063 bool lastphase = false;
3064 bool newphase;
3065 struct rcu_head rcu;
3066
3067 init_rcu_head_on_stack(&rcu);
3068 VERBOSE_TOROUT_STRING("rcu_torture_barrier_cbs task started");
3069 set_user_nice(current, MAX_NICE);
3070 do {
3071 wait_event(barrier_cbs_wq[myid],
3072 (newphase =
3073 smp_load_acquire(&barrier_phase)) != lastphase ||
3074 torture_must_stop());
3075 lastphase = newphase;
3076 if (torture_must_stop())
3077 break;
3078 /*
3079 * The above smp_load_acquire() ensures barrier_phase load
3080 * is ordered before the following ->call().
3081 */
3082 if (smp_call_on_cpu(myid, rcu_torture_barrier1cb, &rcu, 1))
3083 cur_ops->call(&rcu, rcu_torture_barrier_cbf);
3084
3085 if (atomic_dec_and_test(&barrier_cbs_count))
3086 wake_up(&barrier_wq);
3087 } while (!torture_must_stop());
3088 if (cur_ops->cb_barrier != NULL)
3089 cur_ops->cb_barrier();
3090 destroy_rcu_head_on_stack(&rcu);
3091 torture_kthread_stopping("rcu_torture_barrier_cbs");
3092 return 0;
3093 }
3094
3095 /* kthread function to drive and coordinate RCU barrier testing. */
rcu_torture_barrier(void * arg)3096 static int rcu_torture_barrier(void *arg)
3097 {
3098 int i;
3099
3100 VERBOSE_TOROUT_STRING("rcu_torture_barrier task starting");
3101 do {
3102 atomic_set(&barrier_cbs_invoked, 0);
3103 atomic_set(&barrier_cbs_count, n_barrier_cbs);
3104 /* Ensure barrier_phase ordered after prior assignments. */
3105 smp_store_release(&barrier_phase, !barrier_phase);
3106 for (i = 0; i < n_barrier_cbs; i++)
3107 wake_up(&barrier_cbs_wq[i]);
3108 wait_event(barrier_wq,
3109 atomic_read(&barrier_cbs_count) == 0 ||
3110 torture_must_stop());
3111 if (torture_must_stop())
3112 break;
3113 n_barrier_attempts++;
3114 cur_ops->cb_barrier(); /* Implies smp_mb() for wait_event(). */
3115 if (atomic_read(&barrier_cbs_invoked) != n_barrier_cbs) {
3116 n_rcu_torture_barrier_error++;
3117 pr_err("barrier_cbs_invoked = %d, n_barrier_cbs = %d\n",
3118 atomic_read(&barrier_cbs_invoked),
3119 n_barrier_cbs);
3120 WARN_ON(1);
3121 // Wait manually for the remaining callbacks
3122 i = 0;
3123 do {
3124 if (WARN_ON(i++ > HZ))
3125 i = INT_MIN;
3126 schedule_timeout_interruptible(1);
3127 cur_ops->cb_barrier();
3128 } while (atomic_read(&barrier_cbs_invoked) !=
3129 n_barrier_cbs &&
3130 !torture_must_stop());
3131 smp_mb(); // Can't trust ordering if broken.
3132 if (!torture_must_stop())
3133 pr_err("Recovered: barrier_cbs_invoked = %d\n",
3134 atomic_read(&barrier_cbs_invoked));
3135 } else {
3136 n_barrier_successes++;
3137 }
3138 schedule_timeout_interruptible(HZ / 10);
3139 } while (!torture_must_stop());
3140 torture_kthread_stopping("rcu_torture_barrier");
3141 return 0;
3142 }
3143
3144 /* Initialize RCU barrier testing. */
rcu_torture_barrier_init(void)3145 static int rcu_torture_barrier_init(void)
3146 {
3147 int i;
3148 int ret;
3149
3150 if (n_barrier_cbs <= 0)
3151 return 0;
3152 if (cur_ops->call == NULL || cur_ops->cb_barrier == NULL) {
3153 pr_alert("%s" TORTURE_FLAG
3154 " Call or barrier ops missing for %s,\n",
3155 torture_type, cur_ops->name);
3156 pr_alert("%s" TORTURE_FLAG
3157 " RCU barrier testing omitted from run.\n",
3158 torture_type);
3159 return 0;
3160 }
3161 atomic_set(&barrier_cbs_count, 0);
3162 atomic_set(&barrier_cbs_invoked, 0);
3163 barrier_cbs_tasks =
3164 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_tasks[0]),
3165 GFP_KERNEL);
3166 barrier_cbs_wq =
3167 kcalloc(n_barrier_cbs, sizeof(barrier_cbs_wq[0]), GFP_KERNEL);
3168 if (barrier_cbs_tasks == NULL || !barrier_cbs_wq)
3169 return -ENOMEM;
3170 for (i = 0; i < n_barrier_cbs; i++) {
3171 init_waitqueue_head(&barrier_cbs_wq[i]);
3172 ret = torture_create_kthread(rcu_torture_barrier_cbs,
3173 (void *)(long)i,
3174 barrier_cbs_tasks[i]);
3175 if (ret)
3176 return ret;
3177 }
3178 return torture_create_kthread(rcu_torture_barrier, NULL, barrier_task);
3179 }
3180
3181 /* Clean up after RCU barrier testing. */
rcu_torture_barrier_cleanup(void)3182 static void rcu_torture_barrier_cleanup(void)
3183 {
3184 int i;
3185
3186 torture_stop_kthread(rcu_torture_barrier, barrier_task);
3187 if (barrier_cbs_tasks != NULL) {
3188 for (i = 0; i < n_barrier_cbs; i++)
3189 torture_stop_kthread(rcu_torture_barrier_cbs,
3190 barrier_cbs_tasks[i]);
3191 kfree(barrier_cbs_tasks);
3192 barrier_cbs_tasks = NULL;
3193 }
3194 if (barrier_cbs_wq != NULL) {
3195 kfree(barrier_cbs_wq);
3196 barrier_cbs_wq = NULL;
3197 }
3198 }
3199
rcu_torture_can_boost(void)3200 static bool rcu_torture_can_boost(void)
3201 {
3202 static int boost_warn_once;
3203 int prio;
3204
3205 if (!(test_boost == 1 && cur_ops->can_boost) && test_boost != 2)
3206 return false;
3207 if (!cur_ops->start_gp_poll || !cur_ops->poll_gp_state)
3208 return false;
3209
3210 prio = rcu_get_gp_kthreads_prio();
3211 if (!prio)
3212 return false;
3213
3214 if (prio < 2) {
3215 if (boost_warn_once == 1)
3216 return false;
3217
3218 pr_alert("%s: WARN: RCU kthread priority too low to test boosting. Skipping RCU boost test. Try passing rcutree.kthread_prio > 1 on the kernel command line.\n", KBUILD_MODNAME);
3219 boost_warn_once = 1;
3220 return false;
3221 }
3222
3223 return true;
3224 }
3225
3226 static bool read_exit_child_stop;
3227 static bool read_exit_child_stopped;
3228 static wait_queue_head_t read_exit_wq;
3229
3230 // Child kthread which just does an rcutorture reader and exits.
rcu_torture_read_exit_child(void * trsp_in)3231 static int rcu_torture_read_exit_child(void *trsp_in)
3232 {
3233 struct torture_random_state *trsp = trsp_in;
3234
3235 set_user_nice(current, MAX_NICE);
3236 // Minimize time between reading and exiting.
3237 while (!kthread_should_stop())
3238 schedule_timeout_uninterruptible(HZ / 20);
3239 (void)rcu_torture_one_read(trsp, -1);
3240 return 0;
3241 }
3242
3243 // Parent kthread which creates and destroys read-exit child kthreads.
rcu_torture_read_exit(void * unused)3244 static int rcu_torture_read_exit(void *unused)
3245 {
3246 bool errexit = false;
3247 int i;
3248 struct task_struct *tsp;
3249 DEFINE_TORTURE_RANDOM(trs);
3250
3251 // Allocate and initialize.
3252 set_user_nice(current, MAX_NICE);
3253 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of test");
3254
3255 // Each pass through this loop does one read-exit episode.
3256 do {
3257 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: Start of episode");
3258 for (i = 0; i < read_exit_burst; i++) {
3259 if (READ_ONCE(read_exit_child_stop))
3260 break;
3261 stutter_wait("rcu_torture_read_exit");
3262 // Spawn child.
3263 tsp = kthread_run(rcu_torture_read_exit_child,
3264 &trs, "%s", "rcu_torture_read_exit_child");
3265 if (IS_ERR(tsp)) {
3266 TOROUT_ERRSTRING("out of memory");
3267 errexit = true;
3268 break;
3269 }
3270 cond_resched();
3271 kthread_stop(tsp);
3272 n_read_exits++;
3273 }
3274 VERBOSE_TOROUT_STRING("rcu_torture_read_exit: End of episode");
3275 rcu_barrier(); // Wait for task_struct free, avoid OOM.
3276 i = 0;
3277 for (; !errexit && !READ_ONCE(read_exit_child_stop) && i < read_exit_delay; i++)
3278 schedule_timeout_uninterruptible(HZ);
3279 } while (!errexit && !READ_ONCE(read_exit_child_stop));
3280
3281 // Clean up and exit.
3282 smp_store_release(&read_exit_child_stopped, true); // After reaping.
3283 smp_mb(); // Store before wakeup.
3284 wake_up(&read_exit_wq);
3285 while (!torture_must_stop())
3286 schedule_timeout_uninterruptible(HZ / 20);
3287 torture_kthread_stopping("rcu_torture_read_exit");
3288 return 0;
3289 }
3290
rcu_torture_read_exit_init(void)3291 static int rcu_torture_read_exit_init(void)
3292 {
3293 if (read_exit_burst <= 0)
3294 return 0;
3295 init_waitqueue_head(&read_exit_wq);
3296 read_exit_child_stop = false;
3297 read_exit_child_stopped = false;
3298 return torture_create_kthread(rcu_torture_read_exit, NULL,
3299 read_exit_task);
3300 }
3301
rcu_torture_read_exit_cleanup(void)3302 static void rcu_torture_read_exit_cleanup(void)
3303 {
3304 if (!read_exit_task)
3305 return;
3306 WRITE_ONCE(read_exit_child_stop, true);
3307 smp_mb(); // Above write before wait.
3308 wait_event(read_exit_wq, smp_load_acquire(&read_exit_child_stopped));
3309 torture_stop_kthread(rcutorture_read_exit, read_exit_task);
3310 }
3311
rcutorture_test_nmis(int n)3312 static void rcutorture_test_nmis(int n)
3313 {
3314 #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)
3315 int cpu;
3316 int dumpcpu;
3317 int i;
3318
3319 for (i = 0; i < n; i++) {
3320 preempt_disable();
3321 cpu = smp_processor_id();
3322 dumpcpu = cpu + 1;
3323 if (dumpcpu >= nr_cpu_ids)
3324 dumpcpu = 0;
3325 pr_alert("%s: CPU %d invoking dump_cpu_task(%d)\n", __func__, cpu, dumpcpu);
3326 dump_cpu_task(dumpcpu);
3327 preempt_enable();
3328 schedule_timeout_uninterruptible(15 * HZ);
3329 }
3330 #else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)
3331 WARN_ONCE(n, "Non-zero rcutorture.test_nmis=%d permitted only when rcutorture is built in.\n", test_nmis);
3332 #endif // #else // #if IS_BUILTIN(CONFIG_RCU_TORTURE_TEST)
3333 }
3334
3335 static enum cpuhp_state rcutor_hp;
3336
3337 static void
rcu_torture_cleanup(void)3338 rcu_torture_cleanup(void)
3339 {
3340 int firsttime;
3341 int flags = 0;
3342 unsigned long gp_seq = 0;
3343 int i;
3344
3345 if (torture_cleanup_begin()) {
3346 if (cur_ops->cb_barrier != NULL) {
3347 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier);
3348 cur_ops->cb_barrier();
3349 }
3350 if (cur_ops->gp_slow_unregister)
3351 cur_ops->gp_slow_unregister(NULL);
3352 return;
3353 }
3354 if (!cur_ops) {
3355 torture_cleanup_end();
3356 return;
3357 }
3358
3359 rcutorture_test_nmis(test_nmis);
3360
3361 if (cur_ops->gp_kthread_dbg)
3362 cur_ops->gp_kthread_dbg();
3363 rcu_torture_read_exit_cleanup();
3364 rcu_torture_barrier_cleanup();
3365 rcu_torture_fwd_prog_cleanup();
3366 torture_stop_kthread(rcu_torture_stall, stall_task);
3367 torture_stop_kthread(rcu_torture_writer, writer_task);
3368
3369 if (nocb_tasks) {
3370 for (i = 0; i < nrealnocbers; i++)
3371 torture_stop_kthread(rcu_nocb_toggle, nocb_tasks[i]);
3372 kfree(nocb_tasks);
3373 nocb_tasks = NULL;
3374 }
3375
3376 if (reader_tasks) {
3377 for (i = 0; i < nrealreaders; i++)
3378 torture_stop_kthread(rcu_torture_reader,
3379 reader_tasks[i]);
3380 kfree(reader_tasks);
3381 reader_tasks = NULL;
3382 }
3383 kfree(rcu_torture_reader_mbchk);
3384 rcu_torture_reader_mbchk = NULL;
3385
3386 if (fakewriter_tasks) {
3387 for (i = 0; i < nfakewriters; i++)
3388 torture_stop_kthread(rcu_torture_fakewriter,
3389 fakewriter_tasks[i]);
3390 kfree(fakewriter_tasks);
3391 fakewriter_tasks = NULL;
3392 }
3393
3394 if (cur_ops->get_gp_data)
3395 cur_ops->get_gp_data(&flags, &gp_seq);
3396 pr_alert("%s: End-test grace-period state: g%ld f%#x total-gps=%ld\n",
3397 cur_ops->name, (long)gp_seq, flags,
3398 rcutorture_seq_diff(gp_seq, start_gp_seq));
3399 torture_stop_kthread(rcu_torture_stats, stats_task);
3400 torture_stop_kthread(rcu_torture_fqs, fqs_task);
3401 if (rcu_torture_can_boost() && rcutor_hp >= 0)
3402 cpuhp_remove_state(rcutor_hp);
3403
3404 /*
3405 * Wait for all RCU callbacks to fire, then do torture-type-specific
3406 * cleanup operations.
3407 */
3408 if (cur_ops->cb_barrier != NULL) {
3409 pr_info("%s: Invoking %pS().\n", __func__, cur_ops->cb_barrier);
3410 cur_ops->cb_barrier();
3411 }
3412 if (cur_ops->cleanup != NULL)
3413 cur_ops->cleanup();
3414
3415 rcu_torture_mem_dump_obj();
3416
3417 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
3418
3419 if (err_segs_recorded) {
3420 pr_alert("Failure/close-call rcutorture reader segments:\n");
3421 if (rt_read_nsegs == 0)
3422 pr_alert("\t: No segments recorded!!!\n");
3423 firsttime = 1;
3424 for (i = 0; i < rt_read_nsegs; i++) {
3425 pr_alert("\t%d: %#x ", i, err_segs[i].rt_readstate);
3426 if (err_segs[i].rt_delay_jiffies != 0) {
3427 pr_cont("%s%ldjiffies", firsttime ? "" : "+",
3428 err_segs[i].rt_delay_jiffies);
3429 firsttime = 0;
3430 }
3431 if (err_segs[i].rt_delay_ms != 0) {
3432 pr_cont("%s%ldms", firsttime ? "" : "+",
3433 err_segs[i].rt_delay_ms);
3434 firsttime = 0;
3435 }
3436 if (err_segs[i].rt_delay_us != 0) {
3437 pr_cont("%s%ldus", firsttime ? "" : "+",
3438 err_segs[i].rt_delay_us);
3439 firsttime = 0;
3440 }
3441 pr_cont("%s\n",
3442 err_segs[i].rt_preempted ? "preempted" : "");
3443
3444 }
3445 }
3446 if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
3447 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
3448 else if (torture_onoff_failures())
3449 rcu_torture_print_module_parms(cur_ops,
3450 "End of test: RCU_HOTPLUG");
3451 else
3452 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
3453 torture_cleanup_end();
3454 if (cur_ops->gp_slow_unregister)
3455 cur_ops->gp_slow_unregister(NULL);
3456 }
3457
3458 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
rcu_torture_leak_cb(struct rcu_head * rhp)3459 static void rcu_torture_leak_cb(struct rcu_head *rhp)
3460 {
3461 }
3462
rcu_torture_err_cb(struct rcu_head * rhp)3463 static void rcu_torture_err_cb(struct rcu_head *rhp)
3464 {
3465 /*
3466 * This -might- happen due to race conditions, but is unlikely.
3467 * The scenario that leads to this happening is that the
3468 * first of the pair of duplicate callbacks is queued,
3469 * someone else starts a grace period that includes that
3470 * callback, then the second of the pair must wait for the
3471 * next grace period. Unlikely, but can happen. If it
3472 * does happen, the debug-objects subsystem won't have splatted.
3473 */
3474 pr_alert("%s: duplicated callback was invoked.\n", KBUILD_MODNAME);
3475 }
3476 #endif /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
3477
3478 /*
3479 * Verify that double-free causes debug-objects to complain, but only
3480 * if CONFIG_DEBUG_OBJECTS_RCU_HEAD=y. Otherwise, say that the test
3481 * cannot be carried out.
3482 */
rcu_test_debug_objects(void)3483 static void rcu_test_debug_objects(void)
3484 {
3485 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
3486 struct rcu_head rh1;
3487 struct rcu_head rh2;
3488 struct rcu_head *rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
3489
3490 init_rcu_head_on_stack(&rh1);
3491 init_rcu_head_on_stack(&rh2);
3492 pr_alert("%s: WARN: Duplicate call_rcu() test starting.\n", KBUILD_MODNAME);
3493
3494 /* Try to queue the rh2 pair of callbacks for the same grace period. */
3495 preempt_disable(); /* Prevent preemption from interrupting test. */
3496 rcu_read_lock(); /* Make it impossible to finish a grace period. */
3497 call_rcu_hurry(&rh1, rcu_torture_leak_cb); /* Start grace period. */
3498 local_irq_disable(); /* Make it harder to start a new grace period. */
3499 call_rcu_hurry(&rh2, rcu_torture_leak_cb);
3500 call_rcu_hurry(&rh2, rcu_torture_err_cb); /* Duplicate callback. */
3501 if (rhp) {
3502 call_rcu_hurry(rhp, rcu_torture_leak_cb);
3503 call_rcu_hurry(rhp, rcu_torture_err_cb); /* Another duplicate callback. */
3504 }
3505 local_irq_enable();
3506 rcu_read_unlock();
3507 preempt_enable();
3508
3509 /* Wait for them all to get done so we can safely return. */
3510 rcu_barrier();
3511 pr_alert("%s: WARN: Duplicate call_rcu() test complete.\n", KBUILD_MODNAME);
3512 destroy_rcu_head_on_stack(&rh1);
3513 destroy_rcu_head_on_stack(&rh2);
3514 kfree(rhp);
3515 #else /* #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
3516 pr_alert("%s: !CONFIG_DEBUG_OBJECTS_RCU_HEAD, not testing duplicate call_rcu()\n", KBUILD_MODNAME);
3517 #endif /* #else #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD */
3518 }
3519
rcutorture_sync(void)3520 static void rcutorture_sync(void)
3521 {
3522 static unsigned long n;
3523
3524 if (cur_ops->sync && !(++n & 0xfff))
3525 cur_ops->sync();
3526 }
3527
3528 static DEFINE_MUTEX(mut0);
3529 static DEFINE_MUTEX(mut1);
3530 static DEFINE_MUTEX(mut2);
3531 static DEFINE_MUTEX(mut3);
3532 static DEFINE_MUTEX(mut4);
3533 static DEFINE_MUTEX(mut5);
3534 static DEFINE_MUTEX(mut6);
3535 static DEFINE_MUTEX(mut7);
3536 static DEFINE_MUTEX(mut8);
3537 static DEFINE_MUTEX(mut9);
3538
3539 static DECLARE_RWSEM(rwsem0);
3540 static DECLARE_RWSEM(rwsem1);
3541 static DECLARE_RWSEM(rwsem2);
3542 static DECLARE_RWSEM(rwsem3);
3543 static DECLARE_RWSEM(rwsem4);
3544 static DECLARE_RWSEM(rwsem5);
3545 static DECLARE_RWSEM(rwsem6);
3546 static DECLARE_RWSEM(rwsem7);
3547 static DECLARE_RWSEM(rwsem8);
3548 static DECLARE_RWSEM(rwsem9);
3549
3550 DEFINE_STATIC_SRCU(srcu0);
3551 DEFINE_STATIC_SRCU(srcu1);
3552 DEFINE_STATIC_SRCU(srcu2);
3553 DEFINE_STATIC_SRCU(srcu3);
3554 DEFINE_STATIC_SRCU(srcu4);
3555 DEFINE_STATIC_SRCU(srcu5);
3556 DEFINE_STATIC_SRCU(srcu6);
3557 DEFINE_STATIC_SRCU(srcu7);
3558 DEFINE_STATIC_SRCU(srcu8);
3559 DEFINE_STATIC_SRCU(srcu9);
3560
srcu_lockdep_next(const char * f,const char * fl,const char * fs,const char * fu,int i,int cyclelen,int deadlock)3561 static int srcu_lockdep_next(const char *f, const char *fl, const char *fs, const char *fu, int i,
3562 int cyclelen, int deadlock)
3563 {
3564 int j = i + 1;
3565
3566 if (j >= cyclelen)
3567 j = deadlock ? 0 : -1;
3568 if (j >= 0)
3569 pr_info("%s: %s(%d), %s(%d), %s(%d)\n", f, fl, i, fs, j, fu, i);
3570 else
3571 pr_info("%s: %s(%d), %s(%d)\n", f, fl, i, fu, i);
3572 return j;
3573 }
3574
3575 // Test lockdep on SRCU-based deadlock scenarios.
rcu_torture_init_srcu_lockdep(void)3576 static void rcu_torture_init_srcu_lockdep(void)
3577 {
3578 int cyclelen;
3579 int deadlock;
3580 bool err = false;
3581 int i;
3582 int j;
3583 int idx;
3584 struct mutex *muts[] = { &mut0, &mut1, &mut2, &mut3, &mut4,
3585 &mut5, &mut6, &mut7, &mut8, &mut9 };
3586 struct rw_semaphore *rwsems[] = { &rwsem0, &rwsem1, &rwsem2, &rwsem3, &rwsem4,
3587 &rwsem5, &rwsem6, &rwsem7, &rwsem8, &rwsem9 };
3588 struct srcu_struct *srcus[] = { &srcu0, &srcu1, &srcu2, &srcu3, &srcu4,
3589 &srcu5, &srcu6, &srcu7, &srcu8, &srcu9 };
3590 int testtype;
3591
3592 if (!test_srcu_lockdep)
3593 return;
3594
3595 deadlock = test_srcu_lockdep / 1000;
3596 testtype = (test_srcu_lockdep / 10) % 100;
3597 cyclelen = test_srcu_lockdep % 10;
3598 WARN_ON_ONCE(ARRAY_SIZE(muts) != ARRAY_SIZE(srcus));
3599 if (WARN_ONCE(deadlock != !!deadlock,
3600 "%s: test_srcu_lockdep=%d and deadlock digit %d must be zero or one.\n",
3601 __func__, test_srcu_lockdep, deadlock))
3602 err = true;
3603 if (WARN_ONCE(cyclelen <= 0,
3604 "%s: test_srcu_lockdep=%d and cycle-length digit %d must be greater than zero.\n",
3605 __func__, test_srcu_lockdep, cyclelen))
3606 err = true;
3607 if (err)
3608 goto err_out;
3609
3610 if (testtype == 0) {
3611 pr_info("%s: test_srcu_lockdep = %05d: SRCU %d-way %sdeadlock.\n",
3612 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-");
3613 if (deadlock && cyclelen == 1)
3614 pr_info("%s: Expect hang.\n", __func__);
3615 for (i = 0; i < cyclelen; i++) {
3616 j = srcu_lockdep_next(__func__, "srcu_read_lock", "synchronize_srcu",
3617 "srcu_read_unlock", i, cyclelen, deadlock);
3618 idx = srcu_read_lock(srcus[i]);
3619 if (j >= 0)
3620 synchronize_srcu(srcus[j]);
3621 srcu_read_unlock(srcus[i], idx);
3622 }
3623 return;
3624 }
3625
3626 if (testtype == 1) {
3627 pr_info("%s: test_srcu_lockdep = %05d: SRCU/mutex %d-way %sdeadlock.\n",
3628 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-");
3629 for (i = 0; i < cyclelen; i++) {
3630 pr_info("%s: srcu_read_lock(%d), mutex_lock(%d), mutex_unlock(%d), srcu_read_unlock(%d)\n",
3631 __func__, i, i, i, i);
3632 idx = srcu_read_lock(srcus[i]);
3633 mutex_lock(muts[i]);
3634 mutex_unlock(muts[i]);
3635 srcu_read_unlock(srcus[i], idx);
3636
3637 j = srcu_lockdep_next(__func__, "mutex_lock", "synchronize_srcu",
3638 "mutex_unlock", i, cyclelen, deadlock);
3639 mutex_lock(muts[i]);
3640 if (j >= 0)
3641 synchronize_srcu(srcus[j]);
3642 mutex_unlock(muts[i]);
3643 }
3644 return;
3645 }
3646
3647 if (testtype == 2) {
3648 pr_info("%s: test_srcu_lockdep = %05d: SRCU/rwsem %d-way %sdeadlock.\n",
3649 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-");
3650 for (i = 0; i < cyclelen; i++) {
3651 pr_info("%s: srcu_read_lock(%d), down_read(%d), up_read(%d), srcu_read_unlock(%d)\n",
3652 __func__, i, i, i, i);
3653 idx = srcu_read_lock(srcus[i]);
3654 down_read(rwsems[i]);
3655 up_read(rwsems[i]);
3656 srcu_read_unlock(srcus[i], idx);
3657
3658 j = srcu_lockdep_next(__func__, "down_write", "synchronize_srcu",
3659 "up_write", i, cyclelen, deadlock);
3660 down_write(rwsems[i]);
3661 if (j >= 0)
3662 synchronize_srcu(srcus[j]);
3663 up_write(rwsems[i]);
3664 }
3665 return;
3666 }
3667
3668 #ifdef CONFIG_TASKS_TRACE_RCU
3669 if (testtype == 3) {
3670 pr_info("%s: test_srcu_lockdep = %05d: SRCU and Tasks Trace RCU %d-way %sdeadlock.\n",
3671 __func__, test_srcu_lockdep, cyclelen, deadlock ? "" : "non-");
3672 if (deadlock && cyclelen == 1)
3673 pr_info("%s: Expect hang.\n", __func__);
3674 for (i = 0; i < cyclelen; i++) {
3675 char *fl = i == 0 ? "rcu_read_lock_trace" : "srcu_read_lock";
3676 char *fs = i == cyclelen - 1 ? "synchronize_rcu_tasks_trace"
3677 : "synchronize_srcu";
3678 char *fu = i == 0 ? "rcu_read_unlock_trace" : "srcu_read_unlock";
3679
3680 j = srcu_lockdep_next(__func__, fl, fs, fu, i, cyclelen, deadlock);
3681 if (i == 0)
3682 rcu_read_lock_trace();
3683 else
3684 idx = srcu_read_lock(srcus[i]);
3685 if (j >= 0) {
3686 if (i == cyclelen - 1)
3687 synchronize_rcu_tasks_trace();
3688 else
3689 synchronize_srcu(srcus[j]);
3690 }
3691 if (i == 0)
3692 rcu_read_unlock_trace();
3693 else
3694 srcu_read_unlock(srcus[i], idx);
3695 }
3696 return;
3697 }
3698 #endif // #ifdef CONFIG_TASKS_TRACE_RCU
3699
3700 err_out:
3701 pr_info("%s: test_srcu_lockdep = %05d does nothing.\n", __func__, test_srcu_lockdep);
3702 pr_info("%s: test_srcu_lockdep = DNNL.\n", __func__);
3703 pr_info("%s: D: Deadlock if nonzero.\n", __func__);
3704 pr_info("%s: NN: Test number, 0=SRCU, 1=SRCU/mutex, 2=SRCU/rwsem, 3=SRCU/Tasks Trace RCU.\n", __func__);
3705 pr_info("%s: L: Cycle length.\n", __func__);
3706 if (!IS_ENABLED(CONFIG_TASKS_TRACE_RCU))
3707 pr_info("%s: NN=3 disallowed because kernel is built with CONFIG_TASKS_TRACE_RCU=n\n", __func__);
3708 }
3709
3710 static int __init
rcu_torture_init(void)3711 rcu_torture_init(void)
3712 {
3713 long i;
3714 int cpu;
3715 int firsterr = 0;
3716 int flags = 0;
3717 unsigned long gp_seq = 0;
3718 static struct rcu_torture_ops *torture_ops[] = {
3719 &rcu_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops, &busted_srcud_ops,
3720 TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS
3721 &trivial_ops,
3722 };
3723
3724 if (!torture_init_begin(torture_type, verbose))
3725 return -EBUSY;
3726
3727 /* Process args and tell the world that the torturer is on the job. */
3728 for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
3729 cur_ops = torture_ops[i];
3730 if (strcmp(torture_type, cur_ops->name) == 0)
3731 break;
3732 }
3733 if (i == ARRAY_SIZE(torture_ops)) {
3734 pr_alert("rcu-torture: invalid torture type: \"%s\"\n",
3735 torture_type);
3736 pr_alert("rcu-torture types:");
3737 for (i = 0; i < ARRAY_SIZE(torture_ops); i++)
3738 pr_cont(" %s", torture_ops[i]->name);
3739 pr_cont("\n");
3740 firsterr = -EINVAL;
3741 cur_ops = NULL;
3742 goto unwind;
3743 }
3744 if (cur_ops->fqs == NULL && fqs_duration != 0) {
3745 pr_alert("rcu-torture: ->fqs NULL and non-zero fqs_duration, fqs disabled.\n");
3746 fqs_duration = 0;
3747 }
3748 if (nocbs_nthreads != 0 && (cur_ops != &rcu_ops ||
3749 !IS_ENABLED(CONFIG_RCU_NOCB_CPU))) {
3750 pr_alert("rcu-torture types: %s and CONFIG_RCU_NOCB_CPU=%d, nocb toggle disabled.\n",
3751 cur_ops->name, IS_ENABLED(CONFIG_RCU_NOCB_CPU));
3752 nocbs_nthreads = 0;
3753 }
3754 if (cur_ops->init)
3755 cur_ops->init();
3756
3757 rcu_torture_init_srcu_lockdep();
3758
3759 if (nreaders >= 0) {
3760 nrealreaders = nreaders;
3761 } else {
3762 nrealreaders = num_online_cpus() - 2 - nreaders;
3763 if (nrealreaders <= 0)
3764 nrealreaders = 1;
3765 }
3766 rcu_torture_print_module_parms(cur_ops, "Start of test");
3767 if (cur_ops->get_gp_data)
3768 cur_ops->get_gp_data(&flags, &gp_seq);
3769 start_gp_seq = gp_seq;
3770 pr_alert("%s: Start-test grace-period state: g%ld f%#x\n",
3771 cur_ops->name, (long)gp_seq, flags);
3772
3773 /* Set up the freelist. */
3774
3775 INIT_LIST_HEAD(&rcu_torture_freelist);
3776 for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
3777 rcu_tortures[i].rtort_mbtest = 0;
3778 list_add_tail(&rcu_tortures[i].rtort_free,
3779 &rcu_torture_freelist);
3780 }
3781
3782 /* Initialize the statistics so that each run gets its own numbers. */
3783
3784 rcu_torture_current = NULL;
3785 rcu_torture_current_version = 0;
3786 atomic_set(&n_rcu_torture_alloc, 0);
3787 atomic_set(&n_rcu_torture_alloc_fail, 0);
3788 atomic_set(&n_rcu_torture_free, 0);
3789 atomic_set(&n_rcu_torture_mberror, 0);
3790 atomic_set(&n_rcu_torture_mbchk_fail, 0);
3791 atomic_set(&n_rcu_torture_mbchk_tries, 0);
3792 atomic_set(&n_rcu_torture_error, 0);
3793 n_rcu_torture_barrier_error = 0;
3794 n_rcu_torture_boost_ktrerror = 0;
3795 n_rcu_torture_boost_failure = 0;
3796 n_rcu_torture_boosts = 0;
3797 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
3798 atomic_set(&rcu_torture_wcount[i], 0);
3799 for_each_possible_cpu(cpu) {
3800 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
3801 per_cpu(rcu_torture_count, cpu)[i] = 0;
3802 per_cpu(rcu_torture_batch, cpu)[i] = 0;
3803 }
3804 }
3805 err_segs_recorded = 0;
3806 rt_read_nsegs = 0;
3807
3808 /* Start up the kthreads. */
3809
3810 rcu_torture_write_types();
3811 firsterr = torture_create_kthread(rcu_torture_writer, NULL,
3812 writer_task);
3813 if (torture_init_error(firsterr))
3814 goto unwind;
3815 if (nfakewriters > 0) {
3816 fakewriter_tasks = kcalloc(nfakewriters,
3817 sizeof(fakewriter_tasks[0]),
3818 GFP_KERNEL);
3819 if (fakewriter_tasks == NULL) {
3820 TOROUT_ERRSTRING("out of memory");
3821 firsterr = -ENOMEM;
3822 goto unwind;
3823 }
3824 }
3825 for (i = 0; i < nfakewriters; i++) {
3826 firsterr = torture_create_kthread(rcu_torture_fakewriter,
3827 NULL, fakewriter_tasks[i]);
3828 if (torture_init_error(firsterr))
3829 goto unwind;
3830 }
3831 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
3832 GFP_KERNEL);
3833 rcu_torture_reader_mbchk = kcalloc(nrealreaders, sizeof(*rcu_torture_reader_mbchk),
3834 GFP_KERNEL);
3835 if (!reader_tasks || !rcu_torture_reader_mbchk) {
3836 TOROUT_ERRSTRING("out of memory");
3837 firsterr = -ENOMEM;
3838 goto unwind;
3839 }
3840 for (i = 0; i < nrealreaders; i++) {
3841 rcu_torture_reader_mbchk[i].rtc_chkrdr = -1;
3842 firsterr = torture_create_kthread(rcu_torture_reader, (void *)i,
3843 reader_tasks[i]);
3844 if (torture_init_error(firsterr))
3845 goto unwind;
3846 }
3847 nrealnocbers = nocbs_nthreads;
3848 if (WARN_ON(nrealnocbers < 0))
3849 nrealnocbers = 1;
3850 if (WARN_ON(nocbs_toggle < 0))
3851 nocbs_toggle = HZ;
3852 if (nrealnocbers > 0) {
3853 nocb_tasks = kcalloc(nrealnocbers, sizeof(nocb_tasks[0]), GFP_KERNEL);
3854 if (nocb_tasks == NULL) {
3855 TOROUT_ERRSTRING("out of memory");
3856 firsterr = -ENOMEM;
3857 goto unwind;
3858 }
3859 } else {
3860 nocb_tasks = NULL;
3861 }
3862 for (i = 0; i < nrealnocbers; i++) {
3863 firsterr = torture_create_kthread(rcu_nocb_toggle, NULL, nocb_tasks[i]);
3864 if (torture_init_error(firsterr))
3865 goto unwind;
3866 }
3867 if (stat_interval > 0) {
3868 firsterr = torture_create_kthread(rcu_torture_stats, NULL,
3869 stats_task);
3870 if (torture_init_error(firsterr))
3871 goto unwind;
3872 }
3873 if (test_no_idle_hz && shuffle_interval > 0) {
3874 firsterr = torture_shuffle_init(shuffle_interval * HZ);
3875 if (torture_init_error(firsterr))
3876 goto unwind;
3877 }
3878 if (stutter < 0)
3879 stutter = 0;
3880 if (stutter) {
3881 int t;
3882
3883 t = cur_ops->stall_dur ? cur_ops->stall_dur() : stutter * HZ;
3884 firsterr = torture_stutter_init(stutter * HZ, t);
3885 if (torture_init_error(firsterr))
3886 goto unwind;
3887 }
3888 if (fqs_duration < 0)
3889 fqs_duration = 0;
3890 if (fqs_holdoff < 0)
3891 fqs_holdoff = 0;
3892 if (fqs_duration && fqs_holdoff) {
3893 /* Create the fqs thread */
3894 firsterr = torture_create_kthread(rcu_torture_fqs, NULL,
3895 fqs_task);
3896 if (torture_init_error(firsterr))
3897 goto unwind;
3898 }
3899 if (test_boost_interval < 1)
3900 test_boost_interval = 1;
3901 if (test_boost_duration < 2)
3902 test_boost_duration = 2;
3903 if (rcu_torture_can_boost()) {
3904
3905 boost_starttime = jiffies + test_boost_interval * HZ;
3906
3907 firsterr = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "RCU_TORTURE",
3908 rcutorture_booster_init,
3909 rcutorture_booster_cleanup);
3910 rcutor_hp = firsterr;
3911 if (torture_init_error(firsterr))
3912 goto unwind;
3913 }
3914 shutdown_jiffies = jiffies + shutdown_secs * HZ;
3915 firsterr = torture_shutdown_init(shutdown_secs, rcu_torture_cleanup);
3916 if (torture_init_error(firsterr))
3917 goto unwind;
3918 firsterr = torture_onoff_init(onoff_holdoff * HZ, onoff_interval,
3919 rcutorture_sync);
3920 if (torture_init_error(firsterr))
3921 goto unwind;
3922 firsterr = rcu_torture_stall_init();
3923 if (torture_init_error(firsterr))
3924 goto unwind;
3925 firsterr = rcu_torture_fwd_prog_init();
3926 if (torture_init_error(firsterr))
3927 goto unwind;
3928 firsterr = rcu_torture_barrier_init();
3929 if (torture_init_error(firsterr))
3930 goto unwind;
3931 firsterr = rcu_torture_read_exit_init();
3932 if (torture_init_error(firsterr))
3933 goto unwind;
3934 if (object_debug)
3935 rcu_test_debug_objects();
3936 torture_init_end();
3937 if (cur_ops->gp_slow_register && !WARN_ON_ONCE(!cur_ops->gp_slow_unregister))
3938 cur_ops->gp_slow_register(&rcu_fwd_cb_nodelay);
3939 return 0;
3940
3941 unwind:
3942 torture_init_end();
3943 rcu_torture_cleanup();
3944 if (shutdown_secs) {
3945 WARN_ON(!IS_MODULE(CONFIG_RCU_TORTURE_TEST));
3946 kernel_power_off();
3947 }
3948 return firsterr;
3949 }
3950
3951 module_init(rcu_torture_init);
3952 module_exit(rcu_torture_cleanup);
3953