Apply memory policies to top two highest zones when highest zone is ZONE_MOVABLE
[sfrench/cifs-2.6.git] / kernel / rcutorture.c
1 /*
2  * Read-Copy Update module-based torture test facility
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17  *
18  * Copyright (C) IBM Corporation, 2005, 2006
19  *
20  * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21  *          Josh Triplett <josh@freedesktop.org>
22  *
23  * See also:  Documentation/RCU/torture.txt
24  */
25 #include <linux/types.h>
26 #include <linux/kernel.h>
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/kthread.h>
30 #include <linux/err.h>
31 #include <linux/spinlock.h>
32 #include <linux/smp.h>
33 #include <linux/rcupdate.h>
34 #include <linux/interrupt.h>
35 #include <linux/sched.h>
36 #include <asm/atomic.h>
37 #include <linux/bitops.h>
38 #include <linux/module.h>
39 #include <linux/completion.h>
40 #include <linux/moduleparam.h>
41 #include <linux/percpu.h>
42 #include <linux/notifier.h>
43 #include <linux/freezer.h>
44 #include <linux/cpu.h>
45 #include <linux/random.h>
46 #include <linux/delay.h>
47 #include <linux/byteorder/swabb.h>
48 #include <linux/stat.h>
49 #include <linux/srcu.h>
50
51 MODULE_LICENSE("GPL");
52 MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and "
53               "Josh Triplett <josh@freedesktop.org>");
54
55 static int nreaders = -1;       /* # reader threads, defaults to 2*ncpus */
56 static int nfakewriters = 4;    /* # fake writer threads */
57 static int stat_interval;       /* Interval between stats, in seconds. */
58                                 /*  Defaults to "only at end of test". */
59 static int verbose;             /* Print more debug info. */
60 static int test_no_idle_hz;     /* Test RCU's support for tickless idle CPUs. */
61 static int shuffle_interval = 5; /* Interval between shuffles (in sec)*/
62 static char *torture_type = "rcu"; /* What RCU implementation to torture. */
63
64 module_param(nreaders, int, 0444);
65 MODULE_PARM_DESC(nreaders, "Number of RCU reader threads");
66 module_param(nfakewriters, int, 0444);
67 MODULE_PARM_DESC(nfakewriters, "Number of RCU fake writer threads");
68 module_param(stat_interval, int, 0444);
69 MODULE_PARM_DESC(stat_interval, "Number of seconds between stats printk()s");
70 module_param(verbose, bool, 0444);
71 MODULE_PARM_DESC(verbose, "Enable verbose debugging printk()s");
72 module_param(test_no_idle_hz, bool, 0444);
73 MODULE_PARM_DESC(test_no_idle_hz, "Test support for tickless idle CPUs");
74 module_param(shuffle_interval, int, 0444);
75 MODULE_PARM_DESC(shuffle_interval, "Number of seconds between shuffles");
76 module_param(torture_type, charp, 0444);
77 MODULE_PARM_DESC(torture_type, "Type of RCU to torture (rcu, rcu_bh, srcu)");
78
79 #define TORTURE_FLAG "-torture:"
80 #define PRINTK_STRING(s) \
81         do { printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
82 #define VERBOSE_PRINTK_STRING(s) \
83         do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG s "\n", torture_type); } while (0)
84 #define VERBOSE_PRINTK_ERRSTRING(s) \
85         do { if (verbose) printk(KERN_ALERT "%s" TORTURE_FLAG "!!! " s "\n", torture_type); } while (0)
86
87 static char printk_buf[4096];
88
89 static int nrealreaders;
90 static struct task_struct *writer_task;
91 static struct task_struct **fakewriter_tasks;
92 static struct task_struct **reader_tasks;
93 static struct task_struct *stats_task;
94 static struct task_struct *shuffler_task;
95
96 #define RCU_TORTURE_PIPE_LEN 10
97
98 struct rcu_torture {
99         struct rcu_head rtort_rcu;
100         int rtort_pipe_count;
101         struct list_head rtort_free;
102         int rtort_mbtest;
103 };
104
105 static int fullstop = 0;        /* stop generating callbacks at test end. */
106 static LIST_HEAD(rcu_torture_freelist);
107 static struct rcu_torture *rcu_torture_current = NULL;
108 static long rcu_torture_current_version = 0;
109 static struct rcu_torture rcu_tortures[10 * RCU_TORTURE_PIPE_LEN];
110 static DEFINE_SPINLOCK(rcu_torture_lock);
111 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
112         { 0 };
113 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
114         { 0 };
115 static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
116 static atomic_t n_rcu_torture_alloc;
117 static atomic_t n_rcu_torture_alloc_fail;
118 static atomic_t n_rcu_torture_free;
119 static atomic_t n_rcu_torture_mberror;
120 static atomic_t n_rcu_torture_error;
121 static struct list_head rcu_torture_removed;
122
123 /*
124  * Allocate an element from the rcu_tortures pool.
125  */
126 static struct rcu_torture *
127 rcu_torture_alloc(void)
128 {
129         struct list_head *p;
130
131         spin_lock_bh(&rcu_torture_lock);
132         if (list_empty(&rcu_torture_freelist)) {
133                 atomic_inc(&n_rcu_torture_alloc_fail);
134                 spin_unlock_bh(&rcu_torture_lock);
135                 return NULL;
136         }
137         atomic_inc(&n_rcu_torture_alloc);
138         p = rcu_torture_freelist.next;
139         list_del_init(p);
140         spin_unlock_bh(&rcu_torture_lock);
141         return container_of(p, struct rcu_torture, rtort_free);
142 }
143
144 /*
145  * Free an element to the rcu_tortures pool.
146  */
147 static void
148 rcu_torture_free(struct rcu_torture *p)
149 {
150         atomic_inc(&n_rcu_torture_free);
151         spin_lock_bh(&rcu_torture_lock);
152         list_add_tail(&p->rtort_free, &rcu_torture_freelist);
153         spin_unlock_bh(&rcu_torture_lock);
154 }
155
156 struct rcu_random_state {
157         unsigned long rrs_state;
158         long rrs_count;
159 };
160
161 #define RCU_RANDOM_MULT 39916801  /* prime */
162 #define RCU_RANDOM_ADD  479001701 /* prime */
163 #define RCU_RANDOM_REFRESH 10000
164
165 #define DEFINE_RCU_RANDOM(name) struct rcu_random_state name = { 0, 0 }
166
167 /*
168  * Crude but fast random-number generator.  Uses a linear congruential
169  * generator, with occasional help from get_random_bytes().
170  */
171 static unsigned long
172 rcu_random(struct rcu_random_state *rrsp)
173 {
174         long refresh;
175
176         if (--rrsp->rrs_count < 0) {
177                 get_random_bytes(&refresh, sizeof(refresh));
178                 rrsp->rrs_state += refresh;
179                 rrsp->rrs_count = RCU_RANDOM_REFRESH;
180         }
181         rrsp->rrs_state = rrsp->rrs_state * RCU_RANDOM_MULT + RCU_RANDOM_ADD;
182         return swahw32(rrsp->rrs_state);
183 }
184
185 /*
186  * Operations vector for selecting different types of tests.
187  */
188
189 struct rcu_torture_ops {
190         void (*init)(void);
191         void (*cleanup)(void);
192         int (*readlock)(void);
193         void (*readdelay)(struct rcu_random_state *rrsp);
194         void (*readunlock)(int idx);
195         int (*completed)(void);
196         void (*deferredfree)(struct rcu_torture *p);
197         void (*sync)(void);
198         int (*stats)(char *page);
199         char *name;
200 };
201 static struct rcu_torture_ops *cur_ops = NULL;
202
203 /*
204  * Definitions for rcu torture testing.
205  */
206
207 static int rcu_torture_read_lock(void) __acquires(RCU)
208 {
209         rcu_read_lock();
210         return 0;
211 }
212
213 static void rcu_read_delay(struct rcu_random_state *rrsp)
214 {
215         long delay;
216         const long longdelay = 200;
217
218         /* We want there to be long-running readers, but not all the time. */
219
220         delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay);
221         if (!delay)
222                 udelay(longdelay);
223 }
224
225 static void rcu_torture_read_unlock(int idx) __releases(RCU)
226 {
227         rcu_read_unlock();
228 }
229
230 static int rcu_torture_completed(void)
231 {
232         return rcu_batches_completed();
233 }
234
235 static void
236 rcu_torture_cb(struct rcu_head *p)
237 {
238         int i;
239         struct rcu_torture *rp = container_of(p, struct rcu_torture, rtort_rcu);
240
241         if (fullstop) {
242                 /* Test is ending, just drop callbacks on the floor. */
243                 /* The next initialization will pick up the pieces. */
244                 return;
245         }
246         i = rp->rtort_pipe_count;
247         if (i > RCU_TORTURE_PIPE_LEN)
248                 i = RCU_TORTURE_PIPE_LEN;
249         atomic_inc(&rcu_torture_wcount[i]);
250         if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
251                 rp->rtort_mbtest = 0;
252                 rcu_torture_free(rp);
253         } else
254                 cur_ops->deferredfree(rp);
255 }
256
257 static void rcu_torture_deferred_free(struct rcu_torture *p)
258 {
259         call_rcu(&p->rtort_rcu, rcu_torture_cb);
260 }
261
262 static struct rcu_torture_ops rcu_ops = {
263         .init = NULL,
264         .cleanup = NULL,
265         .readlock = rcu_torture_read_lock,
266         .readdelay = rcu_read_delay,
267         .readunlock = rcu_torture_read_unlock,
268         .completed = rcu_torture_completed,
269         .deferredfree = rcu_torture_deferred_free,
270         .sync = synchronize_rcu,
271         .stats = NULL,
272         .name = "rcu"
273 };
274
275 static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
276 {
277         int i;
278         struct rcu_torture *rp;
279         struct rcu_torture *rp1;
280
281         cur_ops->sync();
282         list_add(&p->rtort_free, &rcu_torture_removed);
283         list_for_each_entry_safe(rp, rp1, &rcu_torture_removed, rtort_free) {
284                 i = rp->rtort_pipe_count;
285                 if (i > RCU_TORTURE_PIPE_LEN)
286                         i = RCU_TORTURE_PIPE_LEN;
287                 atomic_inc(&rcu_torture_wcount[i]);
288                 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
289                         rp->rtort_mbtest = 0;
290                         list_del(&rp->rtort_free);
291                         rcu_torture_free(rp);
292                 }
293         }
294 }
295
296 static void rcu_sync_torture_init(void)
297 {
298         INIT_LIST_HEAD(&rcu_torture_removed);
299 }
300
301 static struct rcu_torture_ops rcu_sync_ops = {
302         .init = rcu_sync_torture_init,
303         .cleanup = NULL,
304         .readlock = rcu_torture_read_lock,
305         .readdelay = rcu_read_delay,
306         .readunlock = rcu_torture_read_unlock,
307         .completed = rcu_torture_completed,
308         .deferredfree = rcu_sync_torture_deferred_free,
309         .sync = synchronize_rcu,
310         .stats = NULL,
311         .name = "rcu_sync"
312 };
313
314 /*
315  * Definitions for rcu_bh torture testing.
316  */
317
318 static int rcu_bh_torture_read_lock(void) __acquires(RCU_BH)
319 {
320         rcu_read_lock_bh();
321         return 0;
322 }
323
324 static void rcu_bh_torture_read_unlock(int idx) __releases(RCU_BH)
325 {
326         rcu_read_unlock_bh();
327 }
328
329 static int rcu_bh_torture_completed(void)
330 {
331         return rcu_batches_completed_bh();
332 }
333
334 static void rcu_bh_torture_deferred_free(struct rcu_torture *p)
335 {
336         call_rcu_bh(&p->rtort_rcu, rcu_torture_cb);
337 }
338
339 struct rcu_bh_torture_synchronize {
340         struct rcu_head head;
341         struct completion completion;
342 };
343
344 static void rcu_bh_torture_wakeme_after_cb(struct rcu_head *head)
345 {
346         struct rcu_bh_torture_synchronize *rcu;
347
348         rcu = container_of(head, struct rcu_bh_torture_synchronize, head);
349         complete(&rcu->completion);
350 }
351
352 static void rcu_bh_torture_synchronize(void)
353 {
354         struct rcu_bh_torture_synchronize rcu;
355
356         init_completion(&rcu.completion);
357         call_rcu_bh(&rcu.head, rcu_bh_torture_wakeme_after_cb);
358         wait_for_completion(&rcu.completion);
359 }
360
361 static struct rcu_torture_ops rcu_bh_ops = {
362         .init = NULL,
363         .cleanup = NULL,
364         .readlock = rcu_bh_torture_read_lock,
365         .readdelay = rcu_read_delay,  /* just reuse rcu's version. */
366         .readunlock = rcu_bh_torture_read_unlock,
367         .completed = rcu_bh_torture_completed,
368         .deferredfree = rcu_bh_torture_deferred_free,
369         .sync = rcu_bh_torture_synchronize,
370         .stats = NULL,
371         .name = "rcu_bh"
372 };
373
374 static struct rcu_torture_ops rcu_bh_sync_ops = {
375         .init = rcu_sync_torture_init,
376         .cleanup = NULL,
377         .readlock = rcu_bh_torture_read_lock,
378         .readdelay = rcu_read_delay,  /* just reuse rcu's version. */
379         .readunlock = rcu_bh_torture_read_unlock,
380         .completed = rcu_bh_torture_completed,
381         .deferredfree = rcu_sync_torture_deferred_free,
382         .sync = rcu_bh_torture_synchronize,
383         .stats = NULL,
384         .name = "rcu_bh_sync"
385 };
386
387 /*
388  * Definitions for srcu torture testing.
389  */
390
391 static struct srcu_struct srcu_ctl;
392
393 static void srcu_torture_init(void)
394 {
395         init_srcu_struct(&srcu_ctl);
396         rcu_sync_torture_init();
397 }
398
399 static void srcu_torture_cleanup(void)
400 {
401         synchronize_srcu(&srcu_ctl);
402         cleanup_srcu_struct(&srcu_ctl);
403 }
404
405 static int srcu_torture_read_lock(void) __acquires(&srcu_ctl)
406 {
407         return srcu_read_lock(&srcu_ctl);
408 }
409
410 static void srcu_read_delay(struct rcu_random_state *rrsp)
411 {
412         long delay;
413         const long uspertick = 1000000 / HZ;
414         const long longdelay = 10;
415
416         /* We want there to be long-running readers, but not all the time. */
417
418         delay = rcu_random(rrsp) % (nrealreaders * 2 * longdelay * uspertick);
419         if (!delay)
420                 schedule_timeout_interruptible(longdelay);
421 }
422
423 static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
424 {
425         srcu_read_unlock(&srcu_ctl, idx);
426 }
427
428 static int srcu_torture_completed(void)
429 {
430         return srcu_batches_completed(&srcu_ctl);
431 }
432
433 static void srcu_torture_synchronize(void)
434 {
435         synchronize_srcu(&srcu_ctl);
436 }
437
438 static int srcu_torture_stats(char *page)
439 {
440         int cnt = 0;
441         int cpu;
442         int idx = srcu_ctl.completed & 0x1;
443
444         cnt += sprintf(&page[cnt], "%s%s per-CPU(idx=%d):",
445                        torture_type, TORTURE_FLAG, idx);
446         for_each_possible_cpu(cpu) {
447                 cnt += sprintf(&page[cnt], " %d(%d,%d)", cpu,
448                                per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx],
449                                per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx]);
450         }
451         cnt += sprintf(&page[cnt], "\n");
452         return cnt;
453 }
454
455 static struct rcu_torture_ops srcu_ops = {
456         .init = srcu_torture_init,
457         .cleanup = srcu_torture_cleanup,
458         .readlock = srcu_torture_read_lock,
459         .readdelay = srcu_read_delay,
460         .readunlock = srcu_torture_read_unlock,
461         .completed = srcu_torture_completed,
462         .deferredfree = rcu_sync_torture_deferred_free,
463         .sync = srcu_torture_synchronize,
464         .stats = srcu_torture_stats,
465         .name = "srcu"
466 };
467
468 /*
469  * Definitions for sched torture testing.
470  */
471
472 static int sched_torture_read_lock(void)
473 {
474         preempt_disable();
475         return 0;
476 }
477
478 static void sched_torture_read_unlock(int idx)
479 {
480         preempt_enable();
481 }
482
483 static int sched_torture_completed(void)
484 {
485         return 0;
486 }
487
488 static void sched_torture_synchronize(void)
489 {
490         synchronize_sched();
491 }
492
493 static struct rcu_torture_ops sched_ops = {
494         .init = rcu_sync_torture_init,
495         .cleanup = NULL,
496         .readlock = sched_torture_read_lock,
497         .readdelay = rcu_read_delay,  /* just reuse rcu's version. */
498         .readunlock = sched_torture_read_unlock,
499         .completed = sched_torture_completed,
500         .deferredfree = rcu_sync_torture_deferred_free,
501         .sync = sched_torture_synchronize,
502         .stats = NULL,
503         .name = "sched"
504 };
505
506 /*
507  * RCU torture writer kthread.  Repeatedly substitutes a new structure
508  * for that pointed to by rcu_torture_current, freeing the old structure
509  * after a series of grace periods (the "pipeline").
510  */
511 static int
512 rcu_torture_writer(void *arg)
513 {
514         int i;
515         long oldbatch = rcu_batches_completed();
516         struct rcu_torture *rp;
517         struct rcu_torture *old_rp;
518         static DEFINE_RCU_RANDOM(rand);
519
520         VERBOSE_PRINTK_STRING("rcu_torture_writer task started");
521         set_user_nice(current, 19);
522
523         do {
524                 schedule_timeout_uninterruptible(1);
525                 if ((rp = rcu_torture_alloc()) == NULL)
526                         continue;
527                 rp->rtort_pipe_count = 0;
528                 udelay(rcu_random(&rand) & 0x3ff);
529                 old_rp = rcu_torture_current;
530                 rp->rtort_mbtest = 1;
531                 rcu_assign_pointer(rcu_torture_current, rp);
532                 smp_wmb();
533                 if (old_rp) {
534                         i = old_rp->rtort_pipe_count;
535                         if (i > RCU_TORTURE_PIPE_LEN)
536                                 i = RCU_TORTURE_PIPE_LEN;
537                         atomic_inc(&rcu_torture_wcount[i]);
538                         old_rp->rtort_pipe_count++;
539                         cur_ops->deferredfree(old_rp);
540                 }
541                 rcu_torture_current_version++;
542                 oldbatch = cur_ops->completed();
543         } while (!kthread_should_stop() && !fullstop);
544         VERBOSE_PRINTK_STRING("rcu_torture_writer task stopping");
545         while (!kthread_should_stop())
546                 schedule_timeout_uninterruptible(1);
547         return 0;
548 }
549
550 /*
551  * RCU torture fake writer kthread.  Repeatedly calls sync, with a random
552  * delay between calls.
553  */
554 static int
555 rcu_torture_fakewriter(void *arg)
556 {
557         DEFINE_RCU_RANDOM(rand);
558
559         VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task started");
560         set_user_nice(current, 19);
561
562         do {
563                 schedule_timeout_uninterruptible(1 + rcu_random(&rand)%10);
564                 udelay(rcu_random(&rand) & 0x3ff);
565                 cur_ops->sync();
566         } while (!kthread_should_stop() && !fullstop);
567
568         VERBOSE_PRINTK_STRING("rcu_torture_fakewriter task stopping");
569         while (!kthread_should_stop())
570                 schedule_timeout_uninterruptible(1);
571         return 0;
572 }
573
574 /*
575  * RCU torture reader kthread.  Repeatedly dereferences rcu_torture_current,
576  * incrementing the corresponding element of the pipeline array.  The
577  * counter in the element should never be greater than 1, otherwise, the
578  * RCU implementation is broken.
579  */
580 static int
581 rcu_torture_reader(void *arg)
582 {
583         int completed;
584         int idx;
585         DEFINE_RCU_RANDOM(rand);
586         struct rcu_torture *p;
587         int pipe_count;
588
589         VERBOSE_PRINTK_STRING("rcu_torture_reader task started");
590         set_user_nice(current, 19);
591
592         do {
593                 idx = cur_ops->readlock();
594                 completed = cur_ops->completed();
595                 p = rcu_dereference(rcu_torture_current);
596                 if (p == NULL) {
597                         /* Wait for rcu_torture_writer to get underway */
598                         cur_ops->readunlock(idx);
599                         schedule_timeout_interruptible(HZ);
600                         continue;
601                 }
602                 if (p->rtort_mbtest == 0)
603                         atomic_inc(&n_rcu_torture_mberror);
604                 cur_ops->readdelay(&rand);
605                 preempt_disable();
606                 pipe_count = p->rtort_pipe_count;
607                 if (pipe_count > RCU_TORTURE_PIPE_LEN) {
608                         /* Should not happen, but... */
609                         pipe_count = RCU_TORTURE_PIPE_LEN;
610                 }
611                 ++__get_cpu_var(rcu_torture_count)[pipe_count];
612                 completed = cur_ops->completed() - completed;
613                 if (completed > RCU_TORTURE_PIPE_LEN) {
614                         /* Should not happen, but... */
615                         completed = RCU_TORTURE_PIPE_LEN;
616                 }
617                 ++__get_cpu_var(rcu_torture_batch)[completed];
618                 preempt_enable();
619                 cur_ops->readunlock(idx);
620                 schedule();
621         } while (!kthread_should_stop() && !fullstop);
622         VERBOSE_PRINTK_STRING("rcu_torture_reader task stopping");
623         while (!kthread_should_stop())
624                 schedule_timeout_uninterruptible(1);
625         return 0;
626 }
627
628 /*
629  * Create an RCU-torture statistics message in the specified buffer.
630  */
631 static int
632 rcu_torture_printk(char *page)
633 {
634         int cnt = 0;
635         int cpu;
636         int i;
637         long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
638         long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 };
639
640         for_each_possible_cpu(cpu) {
641                 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
642                         pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i];
643                         batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i];
644                 }
645         }
646         for (i = RCU_TORTURE_PIPE_LEN - 1; i >= 0; i--) {
647                 if (pipesummary[i] != 0)
648                         break;
649         }
650         cnt += sprintf(&page[cnt], "%s%s ", torture_type, TORTURE_FLAG);
651         cnt += sprintf(&page[cnt],
652                        "rtc: %p ver: %ld tfle: %d rta: %d rtaf: %d rtf: %d "
653                        "rtmbe: %d",
654                        rcu_torture_current,
655                        rcu_torture_current_version,
656                        list_empty(&rcu_torture_freelist),
657                        atomic_read(&n_rcu_torture_alloc),
658                        atomic_read(&n_rcu_torture_alloc_fail),
659                        atomic_read(&n_rcu_torture_free),
660                        atomic_read(&n_rcu_torture_mberror));
661         if (atomic_read(&n_rcu_torture_mberror) != 0)
662                 cnt += sprintf(&page[cnt], " !!!");
663         cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
664         if (i > 1) {
665                 cnt += sprintf(&page[cnt], "!!! ");
666                 atomic_inc(&n_rcu_torture_error);
667         }
668         cnt += sprintf(&page[cnt], "Reader Pipe: ");
669         for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
670                 cnt += sprintf(&page[cnt], " %ld", pipesummary[i]);
671         cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
672         cnt += sprintf(&page[cnt], "Reader Batch: ");
673         for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
674                 cnt += sprintf(&page[cnt], " %ld", batchsummary[i]);
675         cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
676         cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
677         for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
678                 cnt += sprintf(&page[cnt], " %d",
679                                atomic_read(&rcu_torture_wcount[i]));
680         }
681         cnt += sprintf(&page[cnt], "\n");
682         if (cur_ops->stats)
683                 cnt += cur_ops->stats(&page[cnt]);
684         return cnt;
685 }
686
687 /*
688  * Print torture statistics.  Caller must ensure that there is only
689  * one call to this function at a given time!!!  This is normally
690  * accomplished by relying on the module system to only have one copy
691  * of the module loaded, and then by giving the rcu_torture_stats
692  * kthread full control (or the init/cleanup functions when rcu_torture_stats
693  * thread is not running).
694  */
695 static void
696 rcu_torture_stats_print(void)
697 {
698         int cnt;
699
700         cnt = rcu_torture_printk(printk_buf);
701         printk(KERN_ALERT "%s", printk_buf);
702 }
703
704 /*
705  * Periodically prints torture statistics, if periodic statistics printing
706  * was specified via the stat_interval module parameter.
707  *
708  * No need to worry about fullstop here, since this one doesn't reference
709  * volatile state or register callbacks.
710  */
711 static int
712 rcu_torture_stats(void *arg)
713 {
714         VERBOSE_PRINTK_STRING("rcu_torture_stats task started");
715         do {
716                 schedule_timeout_interruptible(stat_interval * HZ);
717                 rcu_torture_stats_print();
718         } while (!kthread_should_stop());
719         VERBOSE_PRINTK_STRING("rcu_torture_stats task stopping");
720         return 0;
721 }
722
723 static int rcu_idle_cpu;        /* Force all torture tasks off this CPU */
724
725 /* Shuffle tasks such that we allow @rcu_idle_cpu to become idle. A special case
726  * is when @rcu_idle_cpu = -1, when we allow the tasks to run on all CPUs.
727  */
728 static void rcu_torture_shuffle_tasks(void)
729 {
730         cpumask_t tmp_mask = CPU_MASK_ALL;
731         int i;
732
733         lock_cpu_hotplug();
734
735         /* No point in shuffling if there is only one online CPU (ex: UP) */
736         if (num_online_cpus() == 1) {
737                 unlock_cpu_hotplug();
738                 return;
739         }
740
741         if (rcu_idle_cpu != -1)
742                 cpu_clear(rcu_idle_cpu, tmp_mask);
743
744         set_cpus_allowed(current, tmp_mask);
745
746         if (reader_tasks) {
747                 for (i = 0; i < nrealreaders; i++)
748                         if (reader_tasks[i])
749                                 set_cpus_allowed(reader_tasks[i], tmp_mask);
750         }
751
752         if (fakewriter_tasks) {
753                 for (i = 0; i < nfakewriters; i++)
754                         if (fakewriter_tasks[i])
755                                 set_cpus_allowed(fakewriter_tasks[i], tmp_mask);
756         }
757
758         if (writer_task)
759                 set_cpus_allowed(writer_task, tmp_mask);
760
761         if (stats_task)
762                 set_cpus_allowed(stats_task, tmp_mask);
763
764         if (rcu_idle_cpu == -1)
765                 rcu_idle_cpu = num_online_cpus() - 1;
766         else
767                 rcu_idle_cpu--;
768
769         unlock_cpu_hotplug();
770 }
771
772 /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the
773  * system to become idle at a time and cut off its timer ticks. This is meant
774  * to test the support for such tickless idle CPU in RCU.
775  */
776 static int
777 rcu_torture_shuffle(void *arg)
778 {
779         VERBOSE_PRINTK_STRING("rcu_torture_shuffle task started");
780         do {
781                 schedule_timeout_interruptible(shuffle_interval * HZ);
782                 rcu_torture_shuffle_tasks();
783         } while (!kthread_should_stop());
784         VERBOSE_PRINTK_STRING("rcu_torture_shuffle task stopping");
785         return 0;
786 }
787
788 static inline void
789 rcu_torture_print_module_parms(char *tag)
790 {
791         printk(KERN_ALERT "%s" TORTURE_FLAG
792                 "--- %s: nreaders=%d nfakewriters=%d "
793                 "stat_interval=%d verbose=%d test_no_idle_hz=%d "
794                 "shuffle_interval = %d\n",
795                 torture_type, tag, nrealreaders, nfakewriters,
796                 stat_interval, verbose, test_no_idle_hz, shuffle_interval);
797 }
798
799 static void
800 rcu_torture_cleanup(void)
801 {
802         int i;
803
804         fullstop = 1;
805         if (shuffler_task) {
806                 VERBOSE_PRINTK_STRING("Stopping rcu_torture_shuffle task");
807                 kthread_stop(shuffler_task);
808         }
809         shuffler_task = NULL;
810
811         if (writer_task) {
812                 VERBOSE_PRINTK_STRING("Stopping rcu_torture_writer task");
813                 kthread_stop(writer_task);
814         }
815         writer_task = NULL;
816
817         if (reader_tasks) {
818                 for (i = 0; i < nrealreaders; i++) {
819                         if (reader_tasks[i]) {
820                                 VERBOSE_PRINTK_STRING(
821                                         "Stopping rcu_torture_reader task");
822                                 kthread_stop(reader_tasks[i]);
823                         }
824                         reader_tasks[i] = NULL;
825                 }
826                 kfree(reader_tasks);
827                 reader_tasks = NULL;
828         }
829         rcu_torture_current = NULL;
830
831         if (fakewriter_tasks) {
832                 for (i = 0; i < nfakewriters; i++) {
833                         if (fakewriter_tasks[i]) {
834                                 VERBOSE_PRINTK_STRING(
835                                         "Stopping rcu_torture_fakewriter task");
836                                 kthread_stop(fakewriter_tasks[i]);
837                         }
838                         fakewriter_tasks[i] = NULL;
839                 }
840                 kfree(fakewriter_tasks);
841                 fakewriter_tasks = NULL;
842         }
843
844         if (stats_task) {
845                 VERBOSE_PRINTK_STRING("Stopping rcu_torture_stats task");
846                 kthread_stop(stats_task);
847         }
848         stats_task = NULL;
849
850         /* Wait for all RCU callbacks to fire.  */
851         rcu_barrier();
852
853         rcu_torture_stats_print();  /* -After- the stats thread is stopped! */
854
855         if (cur_ops->cleanup)
856                 cur_ops->cleanup();
857         if (atomic_read(&n_rcu_torture_error))
858                 rcu_torture_print_module_parms("End of test: FAILURE");
859         else
860                 rcu_torture_print_module_parms("End of test: SUCCESS");
861 }
862
863 static int __init
864 rcu_torture_init(void)
865 {
866         int i;
867         int cpu;
868         int firsterr = 0;
869         static struct rcu_torture_ops *torture_ops[] =
870                 { &rcu_ops, &rcu_sync_ops, &rcu_bh_ops, &rcu_bh_sync_ops,
871                   &srcu_ops, &sched_ops, };
872
873         /* Process args and tell the world that the torturer is on the job. */
874         for (i = 0; i < ARRAY_SIZE(torture_ops); i++) {
875                 cur_ops = torture_ops[i];
876                 if (strcmp(torture_type, cur_ops->name) == 0)
877                         break;
878         }
879         if (i == ARRAY_SIZE(torture_ops)) {
880                 printk(KERN_ALERT "rcutorture: invalid torture type: \"%s\"\n",
881                        torture_type);
882                 return (-EINVAL);
883         }
884         if (cur_ops->init)
885                 cur_ops->init(); /* no "goto unwind" prior to this point!!! */
886
887         if (nreaders >= 0)
888                 nrealreaders = nreaders;
889         else
890                 nrealreaders = 2 * num_online_cpus();
891         rcu_torture_print_module_parms("Start of test");
892         fullstop = 0;
893
894         /* Set up the freelist. */
895
896         INIT_LIST_HEAD(&rcu_torture_freelist);
897         for (i = 0; i < ARRAY_SIZE(rcu_tortures); i++) {
898                 rcu_tortures[i].rtort_mbtest = 0;
899                 list_add_tail(&rcu_tortures[i].rtort_free,
900                               &rcu_torture_freelist);
901         }
902
903         /* Initialize the statistics so that each run gets its own numbers. */
904
905         rcu_torture_current = NULL;
906         rcu_torture_current_version = 0;
907         atomic_set(&n_rcu_torture_alloc, 0);
908         atomic_set(&n_rcu_torture_alloc_fail, 0);
909         atomic_set(&n_rcu_torture_free, 0);
910         atomic_set(&n_rcu_torture_mberror, 0);
911         atomic_set(&n_rcu_torture_error, 0);
912         for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
913                 atomic_set(&rcu_torture_wcount[i], 0);
914         for_each_possible_cpu(cpu) {
915                 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
916                         per_cpu(rcu_torture_count, cpu)[i] = 0;
917                         per_cpu(rcu_torture_batch, cpu)[i] = 0;
918                 }
919         }
920
921         /* Start up the kthreads. */
922
923         VERBOSE_PRINTK_STRING("Creating rcu_torture_writer task");
924         writer_task = kthread_run(rcu_torture_writer, NULL,
925                                   "rcu_torture_writer");
926         if (IS_ERR(writer_task)) {
927                 firsterr = PTR_ERR(writer_task);
928                 VERBOSE_PRINTK_ERRSTRING("Failed to create writer");
929                 writer_task = NULL;
930                 goto unwind;
931         }
932         fakewriter_tasks = kzalloc(nfakewriters * sizeof(fakewriter_tasks[0]),
933                                    GFP_KERNEL);
934         if (fakewriter_tasks == NULL) {
935                 VERBOSE_PRINTK_ERRSTRING("out of memory");
936                 firsterr = -ENOMEM;
937                 goto unwind;
938         }
939         for (i = 0; i < nfakewriters; i++) {
940                 VERBOSE_PRINTK_STRING("Creating rcu_torture_fakewriter task");
941                 fakewriter_tasks[i] = kthread_run(rcu_torture_fakewriter, NULL,
942                                                   "rcu_torture_fakewriter");
943                 if (IS_ERR(fakewriter_tasks[i])) {
944                         firsterr = PTR_ERR(fakewriter_tasks[i]);
945                         VERBOSE_PRINTK_ERRSTRING("Failed to create fakewriter");
946                         fakewriter_tasks[i] = NULL;
947                         goto unwind;
948                 }
949         }
950         reader_tasks = kzalloc(nrealreaders * sizeof(reader_tasks[0]),
951                                GFP_KERNEL);
952         if (reader_tasks == NULL) {
953                 VERBOSE_PRINTK_ERRSTRING("out of memory");
954                 firsterr = -ENOMEM;
955                 goto unwind;
956         }
957         for (i = 0; i < nrealreaders; i++) {
958                 VERBOSE_PRINTK_STRING("Creating rcu_torture_reader task");
959                 reader_tasks[i] = kthread_run(rcu_torture_reader, NULL,
960                                               "rcu_torture_reader");
961                 if (IS_ERR(reader_tasks[i])) {
962                         firsterr = PTR_ERR(reader_tasks[i]);
963                         VERBOSE_PRINTK_ERRSTRING("Failed to create reader");
964                         reader_tasks[i] = NULL;
965                         goto unwind;
966                 }
967         }
968         if (stat_interval > 0) {
969                 VERBOSE_PRINTK_STRING("Creating rcu_torture_stats task");
970                 stats_task = kthread_run(rcu_torture_stats, NULL,
971                                         "rcu_torture_stats");
972                 if (IS_ERR(stats_task)) {
973                         firsterr = PTR_ERR(stats_task);
974                         VERBOSE_PRINTK_ERRSTRING("Failed to create stats");
975                         stats_task = NULL;
976                         goto unwind;
977                 }
978         }
979         if (test_no_idle_hz) {
980                 rcu_idle_cpu = num_online_cpus() - 1;
981                 /* Create the shuffler thread */
982                 shuffler_task = kthread_run(rcu_torture_shuffle, NULL,
983                                           "rcu_torture_shuffle");
984                 if (IS_ERR(shuffler_task)) {
985                         firsterr = PTR_ERR(shuffler_task);
986                         VERBOSE_PRINTK_ERRSTRING("Failed to create shuffler");
987                         shuffler_task = NULL;
988                         goto unwind;
989                 }
990         }
991         return 0;
992
993 unwind:
994         rcu_torture_cleanup();
995         return firsterr;
996 }
997
998 module_init(rcu_torture_init);
999 module_exit(rcu_torture_cleanup);