Merge tag 'random-6.12-rc1-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[sfrench/cifs-2.6.git] / kernel / rcu / rcuscale.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Read-Copy Update module-based scalability-test facility
4  *
5  * Copyright (C) IBM Corporation, 2015
6  *
7  * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8  */
9
10 #define pr_fmt(fmt) fmt
11
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/mm.h>
16 #include <linux/module.h>
17 #include <linux/kthread.h>
18 #include <linux/err.h>
19 #include <linux/spinlock.h>
20 #include <linux/smp.h>
21 #include <linux/rcupdate.h>
22 #include <linux/interrupt.h>
23 #include <linux/sched.h>
24 #include <uapi/linux/sched/types.h>
25 #include <linux/atomic.h>
26 #include <linux/bitops.h>
27 #include <linux/completion.h>
28 #include <linux/moduleparam.h>
29 #include <linux/percpu.h>
30 #include <linux/notifier.h>
31 #include <linux/reboot.h>
32 #include <linux/freezer.h>
33 #include <linux/cpu.h>
34 #include <linux/delay.h>
35 #include <linux/stat.h>
36 #include <linux/srcu.h>
37 #include <linux/slab.h>
38 #include <asm/byteorder.h>
39 #include <linux/torture.h>
40 #include <linux/vmalloc.h>
41 #include <linux/rcupdate_trace.h>
42 #include <linux/sched/debug.h>
43
44 #include "rcu.h"
45
46 MODULE_DESCRIPTION("Read-Copy Update module-based scalability-test facility");
47 MODULE_LICENSE("GPL");
48 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
49
50 #define SCALE_FLAG "-scale:"
51 #define SCALEOUT_STRING(s) \
52         pr_alert("%s" SCALE_FLAG " %s\n", scale_type, s)
53 #define VERBOSE_SCALEOUT_STRING(s) \
54         do { if (verbose) pr_alert("%s" SCALE_FLAG " %s\n", scale_type, s); } while (0)
55 #define SCALEOUT_ERRSTRING(s) \
56         pr_alert("%s" SCALE_FLAG "!!! %s\n", scale_type, s)
57
58 /*
59  * The intended use cases for the nreaders and nwriters module parameters
60  * are as follows:
61  *
62  * 1.   Specify only the nr_cpus kernel boot parameter.  This will
63  *      set both nreaders and nwriters to the value specified by
64  *      nr_cpus for a mixed reader/writer test.
65  *
66  * 2.   Specify the nr_cpus kernel boot parameter, but set
67  *      rcuscale.nreaders to zero.  This will set nwriters to the
68  *      value specified by nr_cpus for an update-only test.
69  *
70  * 3.   Specify the nr_cpus kernel boot parameter, but set
71  *      rcuscale.nwriters to zero.  This will set nreaders to the
72  *      value specified by nr_cpus for a read-only test.
73  *
74  * Various other use cases may of course be specified.
75  *
76  * Note that this test's readers are intended only as a test load for
77  * the writers.  The reader scalability statistics will be overly
78  * pessimistic due to the per-critical-section interrupt disabling,
79  * test-end checks, and the pair of calls through pointers.
80  */
81
82 #ifdef MODULE
83 # define RCUSCALE_SHUTDOWN 0
84 #else
85 # define RCUSCALE_SHUTDOWN 1
86 #endif
87
88 torture_param(bool, gp_async, false, "Use asynchronous GP wait primitives");
89 torture_param(int, gp_async_max, 1000, "Max # outstanding waits per writer");
90 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
91 torture_param(int, holdoff, 10, "Holdoff time before test start (s)");
92 torture_param(int, minruntime, 0, "Minimum run time (s)");
93 torture_param(int, nreaders, -1, "Number of RCU reader threads");
94 torture_param(int, nwriters, -1, "Number of RCU updater threads");
95 torture_param(bool, shutdown, RCUSCALE_SHUTDOWN,
96               "Shutdown at end of scalability tests.");
97 torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
98 torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable");
99 torture_param(int, writer_holdoff_jiffies, 0, "Holdoff (jiffies) between GPs, zero to disable");
100 torture_param(int, kfree_rcu_test, 0, "Do we run a kfree_rcu() scale test?");
101 torture_param(int, kfree_mult, 1, "Multiple of kfree_obj size to allocate.");
102 torture_param(int, kfree_by_call_rcu, 0, "Use call_rcu() to emulate kfree_rcu()?");
103
104 static char *scale_type = "rcu";
105 module_param(scale_type, charp, 0444);
106 MODULE_PARM_DESC(scale_type, "Type of RCU to scalability-test (rcu, srcu, ...)");
107
108 // Structure definitions for custom fixed-per-task allocator.
109 struct writer_mblock {
110         struct rcu_head wmb_rh;
111         struct llist_node wmb_node;
112         struct writer_freelist *wmb_wfl;
113 };
114
115 struct writer_freelist {
116         struct llist_head ws_lhg;
117         atomic_t ws_inflight;
118         struct llist_head ____cacheline_internodealigned_in_smp ws_lhp;
119         struct writer_mblock *ws_mblocks;
120 };
121
122 static int nrealreaders;
123 static int nrealwriters;
124 static struct task_struct **writer_tasks;
125 static struct task_struct **reader_tasks;
126 static struct task_struct *shutdown_task;
127
128 static u64 **writer_durations;
129 static bool *writer_done;
130 static struct writer_freelist *writer_freelists;
131 static int *writer_n_durations;
132 static atomic_t n_rcu_scale_reader_started;
133 static atomic_t n_rcu_scale_writer_started;
134 static atomic_t n_rcu_scale_writer_finished;
135 static wait_queue_head_t shutdown_wq;
136 static u64 t_rcu_scale_writer_started;
137 static u64 t_rcu_scale_writer_finished;
138 static unsigned long b_rcu_gp_test_started;
139 static unsigned long b_rcu_gp_test_finished;
140
141 #define MAX_MEAS 10000
142 #define MIN_MEAS 100
143
144 /*
145  * Operations vector for selecting different types of tests.
146  */
147
148 struct rcu_scale_ops {
149         int ptype;
150         void (*init)(void);
151         void (*cleanup)(void);
152         int (*readlock)(void);
153         void (*readunlock)(int idx);
154         unsigned long (*get_gp_seq)(void);
155         unsigned long (*gp_diff)(unsigned long new, unsigned long old);
156         unsigned long (*exp_completed)(void);
157         void (*async)(struct rcu_head *head, rcu_callback_t func);
158         void (*gp_barrier)(void);
159         void (*sync)(void);
160         void (*exp_sync)(void);
161         struct task_struct *(*rso_gp_kthread)(void);
162         void (*stats)(void);
163         const char *name;
164 };
165
166 static struct rcu_scale_ops *cur_ops;
167
168 /*
169  * Definitions for rcu scalability testing.
170  */
171
172 static int rcu_scale_read_lock(void) __acquires(RCU)
173 {
174         rcu_read_lock();
175         return 0;
176 }
177
178 static void rcu_scale_read_unlock(int idx) __releases(RCU)
179 {
180         rcu_read_unlock();
181 }
182
183 static unsigned long __maybe_unused rcu_no_completed(void)
184 {
185         return 0;
186 }
187
188 static void rcu_sync_scale_init(void)
189 {
190 }
191
192 static struct rcu_scale_ops rcu_ops = {
193         .ptype          = RCU_FLAVOR,
194         .init           = rcu_sync_scale_init,
195         .readlock       = rcu_scale_read_lock,
196         .readunlock     = rcu_scale_read_unlock,
197         .get_gp_seq     = rcu_get_gp_seq,
198         .gp_diff        = rcu_seq_diff,
199         .exp_completed  = rcu_exp_batches_completed,
200         .async          = call_rcu_hurry,
201         .gp_barrier     = rcu_barrier,
202         .sync           = synchronize_rcu,
203         .exp_sync       = synchronize_rcu_expedited,
204         .name           = "rcu"
205 };
206
207 /*
208  * Definitions for srcu scalability testing.
209  */
210
211 DEFINE_STATIC_SRCU(srcu_ctl_scale);
212 static struct srcu_struct *srcu_ctlp = &srcu_ctl_scale;
213
214 static int srcu_scale_read_lock(void) __acquires(srcu_ctlp)
215 {
216         return srcu_read_lock(srcu_ctlp);
217 }
218
219 static void srcu_scale_read_unlock(int idx) __releases(srcu_ctlp)
220 {
221         srcu_read_unlock(srcu_ctlp, idx);
222 }
223
224 static unsigned long srcu_scale_completed(void)
225 {
226         return srcu_batches_completed(srcu_ctlp);
227 }
228
229 static void srcu_call_rcu(struct rcu_head *head, rcu_callback_t func)
230 {
231         call_srcu(srcu_ctlp, head, func);
232 }
233
234 static void srcu_rcu_barrier(void)
235 {
236         srcu_barrier(srcu_ctlp);
237 }
238
239 static void srcu_scale_synchronize(void)
240 {
241         synchronize_srcu(srcu_ctlp);
242 }
243
244 static void srcu_scale_stats(void)
245 {
246         srcu_torture_stats_print(srcu_ctlp, scale_type, SCALE_FLAG);
247 }
248
249 static void srcu_scale_synchronize_expedited(void)
250 {
251         synchronize_srcu_expedited(srcu_ctlp);
252 }
253
254 static struct rcu_scale_ops srcu_ops = {
255         .ptype          = SRCU_FLAVOR,
256         .init           = rcu_sync_scale_init,
257         .readlock       = srcu_scale_read_lock,
258         .readunlock     = srcu_scale_read_unlock,
259         .get_gp_seq     = srcu_scale_completed,
260         .gp_diff        = rcu_seq_diff,
261         .exp_completed  = srcu_scale_completed,
262         .async          = srcu_call_rcu,
263         .gp_barrier     = srcu_rcu_barrier,
264         .sync           = srcu_scale_synchronize,
265         .exp_sync       = srcu_scale_synchronize_expedited,
266         .stats          = srcu_scale_stats,
267         .name           = "srcu"
268 };
269
270 static struct srcu_struct srcud;
271
272 static void srcu_sync_scale_init(void)
273 {
274         srcu_ctlp = &srcud;
275         init_srcu_struct(srcu_ctlp);
276 }
277
278 static void srcu_sync_scale_cleanup(void)
279 {
280         cleanup_srcu_struct(srcu_ctlp);
281 }
282
283 static struct rcu_scale_ops srcud_ops = {
284         .ptype          = SRCU_FLAVOR,
285         .init           = srcu_sync_scale_init,
286         .cleanup        = srcu_sync_scale_cleanup,
287         .readlock       = srcu_scale_read_lock,
288         .readunlock     = srcu_scale_read_unlock,
289         .get_gp_seq     = srcu_scale_completed,
290         .gp_diff        = rcu_seq_diff,
291         .exp_completed  = srcu_scale_completed,
292         .async          = srcu_call_rcu,
293         .gp_barrier     = srcu_rcu_barrier,
294         .sync           = srcu_scale_synchronize,
295         .exp_sync       = srcu_scale_synchronize_expedited,
296         .stats          = srcu_scale_stats,
297         .name           = "srcud"
298 };
299
300 #ifdef CONFIG_TASKS_RCU
301
302 /*
303  * Definitions for RCU-tasks scalability testing.
304  */
305
306 static int tasks_scale_read_lock(void)
307 {
308         return 0;
309 }
310
311 static void tasks_scale_read_unlock(int idx)
312 {
313 }
314
315 static void rcu_tasks_scale_stats(void)
316 {
317         rcu_tasks_torture_stats_print(scale_type, SCALE_FLAG);
318 }
319
320 static struct rcu_scale_ops tasks_ops = {
321         .ptype          = RCU_TASKS_FLAVOR,
322         .init           = rcu_sync_scale_init,
323         .readlock       = tasks_scale_read_lock,
324         .readunlock     = tasks_scale_read_unlock,
325         .get_gp_seq     = rcu_no_completed,
326         .gp_diff        = rcu_seq_diff,
327         .async          = call_rcu_tasks,
328         .gp_barrier     = rcu_barrier_tasks,
329         .sync           = synchronize_rcu_tasks,
330         .exp_sync       = synchronize_rcu_tasks,
331         .rso_gp_kthread = get_rcu_tasks_gp_kthread,
332         .stats          = IS_ENABLED(CONFIG_TINY_RCU) ? NULL : rcu_tasks_scale_stats,
333         .name           = "tasks"
334 };
335
336 #define TASKS_OPS &tasks_ops,
337
338 #else // #ifdef CONFIG_TASKS_RCU
339
340 #define TASKS_OPS
341
342 #endif // #else // #ifdef CONFIG_TASKS_RCU
343
344 #ifdef CONFIG_TASKS_RUDE_RCU
345
346 /*
347  * Definitions for RCU-tasks-rude scalability testing.
348  */
349
350 static int tasks_rude_scale_read_lock(void)
351 {
352         return 0;
353 }
354
355 static void tasks_rude_scale_read_unlock(int idx)
356 {
357 }
358
359 static void rcu_tasks_rude_scale_stats(void)
360 {
361         rcu_tasks_rude_torture_stats_print(scale_type, SCALE_FLAG);
362 }
363
364 static struct rcu_scale_ops tasks_rude_ops = {
365         .ptype          = RCU_TASKS_RUDE_FLAVOR,
366         .init           = rcu_sync_scale_init,
367         .readlock       = tasks_rude_scale_read_lock,
368         .readunlock     = tasks_rude_scale_read_unlock,
369         .get_gp_seq     = rcu_no_completed,
370         .gp_diff        = rcu_seq_diff,
371         .sync           = synchronize_rcu_tasks_rude,
372         .exp_sync       = synchronize_rcu_tasks_rude,
373         .rso_gp_kthread = get_rcu_tasks_rude_gp_kthread,
374         .stats          = IS_ENABLED(CONFIG_TINY_RCU) ? NULL : rcu_tasks_rude_scale_stats,
375         .name           = "tasks-rude"
376 };
377
378 #define TASKS_RUDE_OPS &tasks_rude_ops,
379
380 #else // #ifdef CONFIG_TASKS_RUDE_RCU
381
382 #define TASKS_RUDE_OPS
383
384 #endif // #else // #ifdef CONFIG_TASKS_RUDE_RCU
385
386 #ifdef CONFIG_TASKS_TRACE_RCU
387
388 /*
389  * Definitions for RCU-tasks-trace scalability testing.
390  */
391
392 static int tasks_trace_scale_read_lock(void)
393 {
394         rcu_read_lock_trace();
395         return 0;
396 }
397
398 static void tasks_trace_scale_read_unlock(int idx)
399 {
400         rcu_read_unlock_trace();
401 }
402
403 static void rcu_tasks_trace_scale_stats(void)
404 {
405         rcu_tasks_trace_torture_stats_print(scale_type, SCALE_FLAG);
406 }
407
408 static struct rcu_scale_ops tasks_tracing_ops = {
409         .ptype          = RCU_TASKS_FLAVOR,
410         .init           = rcu_sync_scale_init,
411         .readlock       = tasks_trace_scale_read_lock,
412         .readunlock     = tasks_trace_scale_read_unlock,
413         .get_gp_seq     = rcu_no_completed,
414         .gp_diff        = rcu_seq_diff,
415         .async          = call_rcu_tasks_trace,
416         .gp_barrier     = rcu_barrier_tasks_trace,
417         .sync           = synchronize_rcu_tasks_trace,
418         .exp_sync       = synchronize_rcu_tasks_trace,
419         .rso_gp_kthread = get_rcu_tasks_trace_gp_kthread,
420         .stats          = IS_ENABLED(CONFIG_TINY_RCU) ? NULL : rcu_tasks_trace_scale_stats,
421         .name           = "tasks-tracing"
422 };
423
424 #define TASKS_TRACING_OPS &tasks_tracing_ops,
425
426 #else // #ifdef CONFIG_TASKS_TRACE_RCU
427
428 #define TASKS_TRACING_OPS
429
430 #endif // #else // #ifdef CONFIG_TASKS_TRACE_RCU
431
432 static unsigned long rcuscale_seq_diff(unsigned long new, unsigned long old)
433 {
434         if (!cur_ops->gp_diff)
435                 return new - old;
436         return cur_ops->gp_diff(new, old);
437 }
438
439 /*
440  * If scalability tests complete, wait for shutdown to commence.
441  */
442 static void rcu_scale_wait_shutdown(void)
443 {
444         cond_resched_tasks_rcu_qs();
445         if (atomic_read(&n_rcu_scale_writer_finished) < nrealwriters)
446                 return;
447         while (!torture_must_stop())
448                 schedule_timeout_uninterruptible(1);
449 }
450
451 /*
452  * RCU scalability reader kthread.  Repeatedly does empty RCU read-side
453  * critical section, minimizing update-side interference.  However, the
454  * point of this test is not to evaluate reader scalability, but instead
455  * to serve as a test load for update-side scalability testing.
456  */
457 static int
458 rcu_scale_reader(void *arg)
459 {
460         unsigned long flags;
461         int idx;
462         long me = (long)arg;
463
464         VERBOSE_SCALEOUT_STRING("rcu_scale_reader task started");
465         set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
466         set_user_nice(current, MAX_NICE);
467         atomic_inc(&n_rcu_scale_reader_started);
468
469         do {
470                 local_irq_save(flags);
471                 idx = cur_ops->readlock();
472                 cur_ops->readunlock(idx);
473                 local_irq_restore(flags);
474                 rcu_scale_wait_shutdown();
475         } while (!torture_must_stop());
476         torture_kthread_stopping("rcu_scale_reader");
477         return 0;
478 }
479
480 /*
481  * Allocate a writer_mblock structure for the specified rcu_scale_writer
482  * task.
483  */
484 static struct writer_mblock *rcu_scale_alloc(long me)
485 {
486         struct llist_node *llnp;
487         struct writer_freelist *wflp;
488         struct writer_mblock *wmbp;
489
490         if (WARN_ON_ONCE(!writer_freelists))
491                 return NULL;
492         wflp = &writer_freelists[me];
493         if (llist_empty(&wflp->ws_lhp)) {
494                 // ->ws_lhp is private to its rcu_scale_writer task.
495                 wmbp = container_of(llist_del_all(&wflp->ws_lhg), struct writer_mblock, wmb_node);
496                 wflp->ws_lhp.first = &wmbp->wmb_node;
497         }
498         llnp = llist_del_first(&wflp->ws_lhp);
499         if (!llnp)
500                 return NULL;
501         return container_of(llnp, struct writer_mblock, wmb_node);
502 }
503
504 /*
505  * Free a writer_mblock structure to its rcu_scale_writer task.
506  */
507 static void rcu_scale_free(struct writer_mblock *wmbp)
508 {
509         struct writer_freelist *wflp;
510
511         if (!wmbp)
512                 return;
513         wflp = wmbp->wmb_wfl;
514         llist_add(&wmbp->wmb_node, &wflp->ws_lhg);
515 }
516
517 /*
518  * Callback function for asynchronous grace periods from rcu_scale_writer().
519  */
520 static void rcu_scale_async_cb(struct rcu_head *rhp)
521 {
522         struct writer_mblock *wmbp = container_of(rhp, struct writer_mblock, wmb_rh);
523         struct writer_freelist *wflp = wmbp->wmb_wfl;
524
525         atomic_dec(&wflp->ws_inflight);
526         rcu_scale_free(wmbp);
527 }
528
529 /*
530  * RCU scale writer kthread.  Repeatedly does a grace period.
531  */
532 static int
533 rcu_scale_writer(void *arg)
534 {
535         int i = 0;
536         int i_max;
537         unsigned long jdone;
538         long me = (long)arg;
539         bool selfreport = false;
540         bool started = false, done = false, alldone = false;
541         u64 t;
542         DEFINE_TORTURE_RANDOM(tr);
543         u64 *wdp;
544         u64 *wdpp = writer_durations[me];
545         struct writer_freelist *wflp = &writer_freelists[me];
546         struct writer_mblock *wmbp = NULL;
547
548         VERBOSE_SCALEOUT_STRING("rcu_scale_writer task started");
549         WARN_ON(!wdpp);
550         set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
551         current->flags |= PF_NO_SETAFFINITY;
552         sched_set_fifo_low(current);
553
554         if (holdoff)
555                 schedule_timeout_idle(holdoff * HZ);
556
557         /*
558          * Wait until rcu_end_inkernel_boot() is called for normal GP tests
559          * so that RCU is not always expedited for normal GP tests.
560          * The system_state test is approximate, but works well in practice.
561          */
562         while (!gp_exp && system_state != SYSTEM_RUNNING)
563                 schedule_timeout_uninterruptible(1);
564
565         t = ktime_get_mono_fast_ns();
566         if (atomic_inc_return(&n_rcu_scale_writer_started) >= nrealwriters) {
567                 t_rcu_scale_writer_started = t;
568                 if (gp_exp) {
569                         b_rcu_gp_test_started =
570                                 cur_ops->exp_completed() / 2;
571                 } else {
572                         b_rcu_gp_test_started = cur_ops->get_gp_seq();
573                 }
574         }
575
576         jdone = jiffies + minruntime * HZ;
577         do {
578                 bool gp_succeeded = false;
579
580                 if (writer_holdoff)
581                         udelay(writer_holdoff);
582                 if (writer_holdoff_jiffies)
583                         schedule_timeout_idle(torture_random(&tr) % writer_holdoff_jiffies + 1);
584                 wdp = &wdpp[i];
585                 *wdp = ktime_get_mono_fast_ns();
586                 if (gp_async && !WARN_ON_ONCE(!cur_ops->async)) {
587                         if (!wmbp)
588                                 wmbp = rcu_scale_alloc(me);
589                         if (wmbp && atomic_read(&wflp->ws_inflight) < gp_async_max) {
590                                 atomic_inc(&wflp->ws_inflight);
591                                 cur_ops->async(&wmbp->wmb_rh, rcu_scale_async_cb);
592                                 wmbp = NULL;
593                                 gp_succeeded = true;
594                         } else if (!kthread_should_stop()) {
595                                 cur_ops->gp_barrier();
596                         } else {
597                                 rcu_scale_free(wmbp); /* Because we are stopping. */
598                                 wmbp = NULL;
599                         }
600                 } else if (gp_exp) {
601                         cur_ops->exp_sync();
602                         gp_succeeded = true;
603                 } else {
604                         cur_ops->sync();
605                         gp_succeeded = true;
606                 }
607                 t = ktime_get_mono_fast_ns();
608                 *wdp = t - *wdp;
609                 i_max = i;
610                 if (!started &&
611                     atomic_read(&n_rcu_scale_writer_started) >= nrealwriters)
612                         started = true;
613                 if (!done && i >= MIN_MEAS && time_after(jiffies, jdone)) {
614                         done = true;
615                         WRITE_ONCE(writer_done[me], true);
616                         sched_set_normal(current, 0);
617                         pr_alert("%s%s rcu_scale_writer %ld has %d measurements\n",
618                                  scale_type, SCALE_FLAG, me, MIN_MEAS);
619                         if (atomic_inc_return(&n_rcu_scale_writer_finished) >=
620                             nrealwriters) {
621                                 schedule_timeout_interruptible(10);
622                                 rcu_ftrace_dump(DUMP_ALL);
623                                 SCALEOUT_STRING("Test complete");
624                                 t_rcu_scale_writer_finished = t;
625                                 if (gp_exp) {
626                                         b_rcu_gp_test_finished =
627                                                 cur_ops->exp_completed() / 2;
628                                 } else {
629                                         b_rcu_gp_test_finished =
630                                                 cur_ops->get_gp_seq();
631                                 }
632                                 if (shutdown) {
633                                         smp_mb(); /* Assign before wake. */
634                                         wake_up(&shutdown_wq);
635                                 }
636                         }
637                 }
638                 if (done && !alldone &&
639                     atomic_read(&n_rcu_scale_writer_finished) >= nrealwriters)
640                         alldone = true;
641                 if (done && !alldone && time_after(jiffies, jdone + HZ * 60)) {
642                         static atomic_t dumped;
643                         int i;
644
645                         if (!atomic_xchg(&dumped, 1)) {
646                                 for (i = 0; i < nrealwriters; i++) {
647                                         if (writer_done[i])
648                                                 continue;
649                                         pr_info("%s: Task %ld flags writer %d:\n", __func__, me, i);
650                                         sched_show_task(writer_tasks[i]);
651                                 }
652                                 if (cur_ops->stats)
653                                         cur_ops->stats();
654                         }
655                 }
656                 if (!selfreport && time_after(jiffies, jdone + HZ * (70 + me))) {
657                         pr_info("%s: Writer %ld self-report: started %d done %d/%d->%d i %d jdone %lu.\n",
658                                 __func__, me, started, done, writer_done[me], atomic_read(&n_rcu_scale_writer_finished), i, jiffies - jdone);
659                         selfreport = true;
660                 }
661                 if (gp_succeeded && started && !alldone && i < MAX_MEAS - 1)
662                         i++;
663                 rcu_scale_wait_shutdown();
664         } while (!torture_must_stop());
665         if (gp_async && cur_ops->async) {
666                 rcu_scale_free(wmbp);
667                 cur_ops->gp_barrier();
668         }
669         writer_n_durations[me] = i_max + 1;
670         torture_kthread_stopping("rcu_scale_writer");
671         return 0;
672 }
673
674 static void
675 rcu_scale_print_module_parms(struct rcu_scale_ops *cur_ops, const char *tag)
676 {
677         pr_alert("%s" SCALE_FLAG
678                  "--- %s: gp_async=%d gp_async_max=%d gp_exp=%d holdoff=%d minruntime=%d nreaders=%d nwriters=%d writer_holdoff=%d writer_holdoff_jiffies=%d verbose=%d shutdown=%d\n",
679                  scale_type, tag, gp_async, gp_async_max, gp_exp, holdoff, minruntime, nrealreaders, nrealwriters, writer_holdoff, writer_holdoff_jiffies, verbose, shutdown);
680 }
681
682 /*
683  * Return the number if non-negative.  If -1, the number of CPUs.
684  * If less than -1, that much less than the number of CPUs, but
685  * at least one.
686  */
687 static int compute_real(int n)
688 {
689         int nr;
690
691         if (n >= 0) {
692                 nr = n;
693         } else {
694                 nr = num_online_cpus() + 1 + n;
695                 if (nr <= 0)
696                         nr = 1;
697         }
698         return nr;
699 }
700
701 /*
702  * kfree_rcu() scalability tests: Start a kfree_rcu() loop on all CPUs for number
703  * of iterations and measure total time and number of GP for all iterations to complete.
704  */
705
706 torture_param(int, kfree_nthreads, -1, "Number of threads running loops of kfree_rcu().");
707 torture_param(int, kfree_alloc_num, 8000, "Number of allocations and frees done in an iteration.");
708 torture_param(int, kfree_loops, 10, "Number of loops doing kfree_alloc_num allocations and frees.");
709 torture_param(bool, kfree_rcu_test_double, false, "Do we run a kfree_rcu() double-argument scale test?");
710 torture_param(bool, kfree_rcu_test_single, false, "Do we run a kfree_rcu() single-argument scale test?");
711
712 static struct task_struct **kfree_reader_tasks;
713 static int kfree_nrealthreads;
714 static atomic_t n_kfree_scale_thread_started;
715 static atomic_t n_kfree_scale_thread_ended;
716 static struct task_struct *kthread_tp;
717 static u64 kthread_stime;
718
719 struct kfree_obj {
720         char kfree_obj[8];
721         struct rcu_head rh;
722 };
723
724 /* Used if doing RCU-kfree'ing via call_rcu(). */
725 static void kfree_call_rcu(struct rcu_head *rh)
726 {
727         struct kfree_obj *obj = container_of(rh, struct kfree_obj, rh);
728
729         kfree(obj);
730 }
731
732 static int
733 kfree_scale_thread(void *arg)
734 {
735         int i, loop = 0;
736         long me = (long)arg;
737         struct kfree_obj *alloc_ptr;
738         u64 start_time, end_time;
739         long long mem_begin, mem_during = 0;
740         bool kfree_rcu_test_both;
741         DEFINE_TORTURE_RANDOM(tr);
742
743         VERBOSE_SCALEOUT_STRING("kfree_scale_thread task started");
744         set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
745         set_user_nice(current, MAX_NICE);
746         kfree_rcu_test_both = (kfree_rcu_test_single == kfree_rcu_test_double);
747
748         start_time = ktime_get_mono_fast_ns();
749
750         if (atomic_inc_return(&n_kfree_scale_thread_started) >= kfree_nrealthreads) {
751                 if (gp_exp)
752                         b_rcu_gp_test_started = cur_ops->exp_completed() / 2;
753                 else
754                         b_rcu_gp_test_started = cur_ops->get_gp_seq();
755         }
756
757         do {
758                 if (!mem_during) {
759                         mem_during = mem_begin = si_mem_available();
760                 } else if (loop % (kfree_loops / 4) == 0) {
761                         mem_during = (mem_during + si_mem_available()) / 2;
762                 }
763
764                 for (i = 0; i < kfree_alloc_num; i++) {
765                         alloc_ptr = kmalloc(kfree_mult * sizeof(struct kfree_obj), GFP_KERNEL);
766                         if (!alloc_ptr)
767                                 return -ENOMEM;
768
769                         if (kfree_by_call_rcu) {
770                                 call_rcu(&(alloc_ptr->rh), kfree_call_rcu);
771                                 continue;
772                         }
773
774                         // By default kfree_rcu_test_single and kfree_rcu_test_double are
775                         // initialized to false. If both have the same value (false or true)
776                         // both are randomly tested, otherwise only the one with value true
777                         // is tested.
778                         if ((kfree_rcu_test_single && !kfree_rcu_test_double) ||
779                                         (kfree_rcu_test_both && torture_random(&tr) & 0x800))
780                                 kfree_rcu_mightsleep(alloc_ptr);
781                         else
782                                 kfree_rcu(alloc_ptr, rh);
783                 }
784
785                 cond_resched();
786         } while (!torture_must_stop() && ++loop < kfree_loops);
787
788         if (atomic_inc_return(&n_kfree_scale_thread_ended) >= kfree_nrealthreads) {
789                 end_time = ktime_get_mono_fast_ns();
790
791                 if (gp_exp)
792                         b_rcu_gp_test_finished = cur_ops->exp_completed() / 2;
793                 else
794                         b_rcu_gp_test_finished = cur_ops->get_gp_seq();
795
796                 pr_alert("Total time taken by all kfree'ers: %llu ns, loops: %d, batches: %ld, memory footprint: %lldMB\n",
797                        (unsigned long long)(end_time - start_time), kfree_loops,
798                        rcuscale_seq_diff(b_rcu_gp_test_finished, b_rcu_gp_test_started),
799                        (mem_begin - mem_during) >> (20 - PAGE_SHIFT));
800
801                 if (shutdown) {
802                         smp_mb(); /* Assign before wake. */
803                         wake_up(&shutdown_wq);
804                 }
805         }
806
807         torture_kthread_stopping("kfree_scale_thread");
808         return 0;
809 }
810
811 static void
812 kfree_scale_cleanup(void)
813 {
814         int i;
815
816         if (torture_cleanup_begin())
817                 return;
818
819         if (kfree_reader_tasks) {
820                 for (i = 0; i < kfree_nrealthreads; i++)
821                         torture_stop_kthread(kfree_scale_thread,
822                                              kfree_reader_tasks[i]);
823                 kfree(kfree_reader_tasks);
824                 kfree_reader_tasks = NULL;
825         }
826
827         torture_cleanup_end();
828 }
829
830 /*
831  * shutdown kthread.  Just waits to be awakened, then shuts down system.
832  */
833 static int
834 kfree_scale_shutdown(void *arg)
835 {
836         wait_event_idle(shutdown_wq,
837                         atomic_read(&n_kfree_scale_thread_ended) >= kfree_nrealthreads);
838
839         smp_mb(); /* Wake before output. */
840
841         kfree_scale_cleanup();
842         kernel_power_off();
843         return -EINVAL;
844 }
845
846 // Used if doing RCU-kfree'ing via call_rcu().
847 static unsigned long jiffies_at_lazy_cb;
848 static struct rcu_head lazy_test1_rh;
849 static int rcu_lazy_test1_cb_called;
850 static void call_rcu_lazy_test1(struct rcu_head *rh)
851 {
852         jiffies_at_lazy_cb = jiffies;
853         WRITE_ONCE(rcu_lazy_test1_cb_called, 1);
854 }
855
856 static int __init
857 kfree_scale_init(void)
858 {
859         int firsterr = 0;
860         long i;
861         unsigned long jif_start;
862         unsigned long orig_jif;
863
864         pr_alert("%s" SCALE_FLAG
865                  "--- kfree_rcu_test: kfree_mult=%d kfree_by_call_rcu=%d kfree_nthreads=%d kfree_alloc_num=%d kfree_loops=%d kfree_rcu_test_double=%d kfree_rcu_test_single=%d\n",
866                  scale_type, kfree_mult, kfree_by_call_rcu, kfree_nthreads, kfree_alloc_num, kfree_loops, kfree_rcu_test_double, kfree_rcu_test_single);
867
868         // Also, do a quick self-test to ensure laziness is as much as
869         // expected.
870         if (kfree_by_call_rcu && !IS_ENABLED(CONFIG_RCU_LAZY)) {
871                 pr_alert("CONFIG_RCU_LAZY is disabled, falling back to kfree_rcu() for delayed RCU kfree'ing\n");
872                 kfree_by_call_rcu = 0;
873         }
874
875         if (kfree_by_call_rcu) {
876                 /* do a test to check the timeout. */
877                 orig_jif = rcu_get_jiffies_lazy_flush();
878
879                 rcu_set_jiffies_lazy_flush(2 * HZ);
880                 rcu_barrier();
881
882                 jif_start = jiffies;
883                 jiffies_at_lazy_cb = 0;
884                 call_rcu(&lazy_test1_rh, call_rcu_lazy_test1);
885
886                 smp_cond_load_relaxed(&rcu_lazy_test1_cb_called, VAL == 1);
887
888                 rcu_set_jiffies_lazy_flush(orig_jif);
889
890                 if (WARN_ON_ONCE(jiffies_at_lazy_cb - jif_start < 2 * HZ)) {
891                         pr_alert("ERROR: call_rcu() CBs are not being lazy as expected!\n");
892                         WARN_ON_ONCE(1);
893                         return -1;
894                 }
895
896                 if (WARN_ON_ONCE(jiffies_at_lazy_cb - jif_start > 3 * HZ)) {
897                         pr_alert("ERROR: call_rcu() CBs are being too lazy!\n");
898                         WARN_ON_ONCE(1);
899                         return -1;
900                 }
901         }
902
903         kfree_nrealthreads = compute_real(kfree_nthreads);
904         /* Start up the kthreads. */
905         if (shutdown) {
906                 init_waitqueue_head(&shutdown_wq);
907                 firsterr = torture_create_kthread(kfree_scale_shutdown, NULL,
908                                                   shutdown_task);
909                 if (torture_init_error(firsterr))
910                         goto unwind;
911                 schedule_timeout_uninterruptible(1);
912         }
913
914         pr_alert("kfree object size=%zu, kfree_by_call_rcu=%d\n",
915                         kfree_mult * sizeof(struct kfree_obj),
916                         kfree_by_call_rcu);
917
918         kfree_reader_tasks = kcalloc(kfree_nrealthreads, sizeof(kfree_reader_tasks[0]),
919                                GFP_KERNEL);
920         if (kfree_reader_tasks == NULL) {
921                 firsterr = -ENOMEM;
922                 goto unwind;
923         }
924
925         for (i = 0; i < kfree_nrealthreads; i++) {
926                 firsterr = torture_create_kthread(kfree_scale_thread, (void *)i,
927                                                   kfree_reader_tasks[i]);
928                 if (torture_init_error(firsterr))
929                         goto unwind;
930         }
931
932         while (atomic_read(&n_kfree_scale_thread_started) < kfree_nrealthreads)
933                 schedule_timeout_uninterruptible(1);
934
935         torture_init_end();
936         return 0;
937
938 unwind:
939         torture_init_end();
940         kfree_scale_cleanup();
941         return firsterr;
942 }
943
944 static void
945 rcu_scale_cleanup(void)
946 {
947         int i;
948         int j;
949         int ngps = 0;
950         u64 *wdp;
951         u64 *wdpp;
952
953         /*
954          * Would like warning at start, but everything is expedited
955          * during the mid-boot phase, so have to wait till the end.
956          */
957         if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp)
958                 SCALEOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!");
959         if (rcu_gp_is_normal() && gp_exp)
960                 SCALEOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!");
961         if (gp_exp && gp_async)
962                 SCALEOUT_ERRSTRING("No expedited async GPs, so went with async!");
963
964         // If built-in, just report all of the GP kthread's CPU time.
965         if (IS_BUILTIN(CONFIG_RCU_SCALE_TEST) && !kthread_tp && cur_ops->rso_gp_kthread)
966                 kthread_tp = cur_ops->rso_gp_kthread();
967         if (kthread_tp) {
968                 u32 ns;
969                 u64 us;
970
971                 kthread_stime = kthread_tp->stime - kthread_stime;
972                 us = div_u64_rem(kthread_stime, 1000, &ns);
973                 pr_info("rcu_scale: Grace-period kthread CPU time: %llu.%03u us\n", us, ns);
974                 show_rcu_gp_kthreads();
975         }
976         if (kfree_rcu_test) {
977                 kfree_scale_cleanup();
978                 return;
979         }
980
981         if (torture_cleanup_begin())
982                 return;
983         if (!cur_ops) {
984                 torture_cleanup_end();
985                 return;
986         }
987
988         if (reader_tasks) {
989                 for (i = 0; i < nrealreaders; i++)
990                         torture_stop_kthread(rcu_scale_reader,
991                                              reader_tasks[i]);
992                 kfree(reader_tasks);
993                 reader_tasks = NULL;
994         }
995
996         if (writer_tasks) {
997                 for (i = 0; i < nrealwriters; i++) {
998                         torture_stop_kthread(rcu_scale_writer,
999                                              writer_tasks[i]);
1000                         if (!writer_n_durations)
1001                                 continue;
1002                         j = writer_n_durations[i];
1003                         pr_alert("%s%s writer %d gps: %d\n",
1004                                  scale_type, SCALE_FLAG, i, j);
1005                         ngps += j;
1006                 }
1007                 pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n",
1008                          scale_type, SCALE_FLAG,
1009                          t_rcu_scale_writer_started, t_rcu_scale_writer_finished,
1010                          t_rcu_scale_writer_finished -
1011                          t_rcu_scale_writer_started,
1012                          ngps,
1013                          rcuscale_seq_diff(b_rcu_gp_test_finished,
1014                                            b_rcu_gp_test_started));
1015                 for (i = 0; i < nrealwriters; i++) {
1016                         if (!writer_durations)
1017                                 break;
1018                         if (!writer_n_durations)
1019                                 continue;
1020                         wdpp = writer_durations[i];
1021                         if (!wdpp)
1022                                 continue;
1023                         for (j = 0; j < writer_n_durations[i]; j++) {
1024                                 wdp = &wdpp[j];
1025                                 pr_alert("%s%s %4d writer-duration: %5d %llu\n",
1026                                         scale_type, SCALE_FLAG,
1027                                         i, j, *wdp);
1028                                 if (j % 100 == 0)
1029                                         schedule_timeout_uninterruptible(1);
1030                         }
1031                         kfree(writer_durations[i]);
1032                         if (writer_freelists) {
1033                                 int ctr = 0;
1034                                 struct llist_node *llnp;
1035                                 struct writer_freelist *wflp = &writer_freelists[i];
1036
1037                                 if (wflp->ws_mblocks) {
1038                                         llist_for_each(llnp, wflp->ws_lhg.first)
1039                                                 ctr++;
1040                                         llist_for_each(llnp, wflp->ws_lhp.first)
1041                                                 ctr++;
1042                                         WARN_ONCE(ctr != gp_async_max,
1043                                                   "%s: ctr = %d gp_async_max = %d\n",
1044                                                   __func__, ctr, gp_async_max);
1045                                         kfree(wflp->ws_mblocks);
1046                                 }
1047                         }
1048                 }
1049                 kfree(writer_tasks);
1050                 writer_tasks = NULL;
1051                 kfree(writer_durations);
1052                 writer_durations = NULL;
1053                 kfree(writer_n_durations);
1054                 writer_n_durations = NULL;
1055                 kfree(writer_done);
1056                 writer_done = NULL;
1057                 kfree(writer_freelists);
1058                 writer_freelists = NULL;
1059         }
1060
1061         /* Do torture-type-specific cleanup operations.  */
1062         if (cur_ops->cleanup != NULL)
1063                 cur_ops->cleanup();
1064
1065         torture_cleanup_end();
1066 }
1067
1068 /*
1069  * RCU scalability shutdown kthread.  Just waits to be awakened, then shuts
1070  * down system.
1071  */
1072 static int
1073 rcu_scale_shutdown(void *arg)
1074 {
1075         wait_event_idle(shutdown_wq, atomic_read(&n_rcu_scale_writer_finished) >= nrealwriters);
1076         smp_mb(); /* Wake before output. */
1077         rcu_scale_cleanup();
1078         kernel_power_off();
1079         return -EINVAL;
1080 }
1081
1082 static int __init
1083 rcu_scale_init(void)
1084 {
1085         int firsterr = 0;
1086         long i;
1087         long j;
1088         static struct rcu_scale_ops *scale_ops[] = {
1089                 &rcu_ops, &srcu_ops, &srcud_ops, TASKS_OPS TASKS_RUDE_OPS TASKS_TRACING_OPS
1090         };
1091
1092         if (!torture_init_begin(scale_type, verbose))
1093                 return -EBUSY;
1094
1095         /* Process args and announce that the scalability'er is on the job. */
1096         for (i = 0; i < ARRAY_SIZE(scale_ops); i++) {
1097                 cur_ops = scale_ops[i];
1098                 if (strcmp(scale_type, cur_ops->name) == 0)
1099                         break;
1100         }
1101         if (i == ARRAY_SIZE(scale_ops)) {
1102                 pr_alert("rcu-scale: invalid scale type: \"%s\"\n", scale_type);
1103                 pr_alert("rcu-scale types:");
1104                 for (i = 0; i < ARRAY_SIZE(scale_ops); i++)
1105                         pr_cont(" %s", scale_ops[i]->name);
1106                 pr_cont("\n");
1107                 firsterr = -EINVAL;
1108                 cur_ops = NULL;
1109                 goto unwind;
1110         }
1111         if (cur_ops->init)
1112                 cur_ops->init();
1113
1114         if (cur_ops->rso_gp_kthread) {
1115                 kthread_tp = cur_ops->rso_gp_kthread();
1116                 if (kthread_tp)
1117                         kthread_stime = kthread_tp->stime;
1118         }
1119         if (kfree_rcu_test)
1120                 return kfree_scale_init();
1121
1122         nrealwriters = compute_real(nwriters);
1123         nrealreaders = compute_real(nreaders);
1124         atomic_set(&n_rcu_scale_reader_started, 0);
1125         atomic_set(&n_rcu_scale_writer_started, 0);
1126         atomic_set(&n_rcu_scale_writer_finished, 0);
1127         rcu_scale_print_module_parms(cur_ops, "Start of test");
1128
1129         /* Start up the kthreads. */
1130
1131         if (shutdown) {
1132                 init_waitqueue_head(&shutdown_wq);
1133                 firsterr = torture_create_kthread(rcu_scale_shutdown, NULL,
1134                                                   shutdown_task);
1135                 if (torture_init_error(firsterr))
1136                         goto unwind;
1137                 schedule_timeout_uninterruptible(1);
1138         }
1139         reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
1140                                GFP_KERNEL);
1141         if (reader_tasks == NULL) {
1142                 SCALEOUT_ERRSTRING("out of memory");
1143                 firsterr = -ENOMEM;
1144                 goto unwind;
1145         }
1146         for (i = 0; i < nrealreaders; i++) {
1147                 firsterr = torture_create_kthread(rcu_scale_reader, (void *)i,
1148                                                   reader_tasks[i]);
1149                 if (torture_init_error(firsterr))
1150                         goto unwind;
1151         }
1152         while (atomic_read(&n_rcu_scale_reader_started) < nrealreaders)
1153                 schedule_timeout_uninterruptible(1);
1154         writer_tasks = kcalloc(nrealwriters, sizeof(writer_tasks[0]), GFP_KERNEL);
1155         writer_durations = kcalloc(nrealwriters, sizeof(*writer_durations), GFP_KERNEL);
1156         writer_n_durations = kcalloc(nrealwriters, sizeof(*writer_n_durations), GFP_KERNEL);
1157         writer_done = kcalloc(nrealwriters, sizeof(writer_done[0]), GFP_KERNEL);
1158         if (gp_async) {
1159                 if (gp_async_max <= 0) {
1160                         pr_warn("%s: gp_async_max = %d must be greater than zero.\n",
1161                                 __func__, gp_async_max);
1162                         WARN_ON_ONCE(IS_BUILTIN(CONFIG_RCU_TORTURE_TEST));
1163                         firsterr = -EINVAL;
1164                         goto unwind;
1165                 }
1166                 writer_freelists = kcalloc(nrealwriters, sizeof(writer_freelists[0]), GFP_KERNEL);
1167         }
1168         if (!writer_tasks || !writer_durations || !writer_n_durations || !writer_done ||
1169             (gp_async && !writer_freelists)) {
1170                 SCALEOUT_ERRSTRING("out of memory");
1171                 firsterr = -ENOMEM;
1172                 goto unwind;
1173         }
1174         for (i = 0; i < nrealwriters; i++) {
1175                 writer_durations[i] =
1176                         kcalloc(MAX_MEAS, sizeof(*writer_durations[i]),
1177                                 GFP_KERNEL);
1178                 if (!writer_durations[i]) {
1179                         firsterr = -ENOMEM;
1180                         goto unwind;
1181                 }
1182                 if (writer_freelists) {
1183                         struct writer_freelist *wflp = &writer_freelists[i];
1184
1185                         init_llist_head(&wflp->ws_lhg);
1186                         init_llist_head(&wflp->ws_lhp);
1187                         wflp->ws_mblocks = kcalloc(gp_async_max, sizeof(wflp->ws_mblocks[0]),
1188                                                    GFP_KERNEL);
1189                         if (!wflp->ws_mblocks) {
1190                                 firsterr = -ENOMEM;
1191                                 goto unwind;
1192                         }
1193                         for (j = 0; j < gp_async_max; j++) {
1194                                 struct writer_mblock *wmbp = &wflp->ws_mblocks[j];
1195
1196                                 wmbp->wmb_wfl = wflp;
1197                                 llist_add(&wmbp->wmb_node, &wflp->ws_lhp);
1198                         }
1199                 }
1200                 firsterr = torture_create_kthread(rcu_scale_writer, (void *)i,
1201                                                   writer_tasks[i]);
1202                 if (torture_init_error(firsterr))
1203                         goto unwind;
1204         }
1205         torture_init_end();
1206         return 0;
1207
1208 unwind:
1209         torture_init_end();
1210         rcu_scale_cleanup();
1211         if (shutdown) {
1212                 WARN_ON(!IS_MODULE(CONFIG_RCU_SCALE_TEST));
1213                 kernel_power_off();
1214         }
1215         return firsterr;
1216 }
1217
1218 module_init(rcu_scale_init);
1219 module_exit(rcu_scale_cleanup);