]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - kernel/rcu/rcuperf.c
Merge tag 'tty-5.1-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty
[mirror_ubuntu-jammy-kernel.git] / kernel / rcu / rcuperf.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Read-Copy Update module-based performance-test facility
4 *
5 * Copyright (C) IBM Corporation, 2015
6 *
7 * Authors: Paul E. McKenney <paulmck@linux.ibm.com>
8 */
9
10 #define pr_fmt(fmt) fmt
11
12 #include <linux/types.h>
13 #include <linux/kernel.h>
14 #include <linux/init.h>
15 #include <linux/module.h>
16 #include <linux/kthread.h>
17 #include <linux/err.h>
18 #include <linux/spinlock.h>
19 #include <linux/smp.h>
20 #include <linux/rcupdate.h>
21 #include <linux/interrupt.h>
22 #include <linux/sched.h>
23 #include <uapi/linux/sched/types.h>
24 #include <linux/atomic.h>
25 #include <linux/bitops.h>
26 #include <linux/completion.h>
27 #include <linux/moduleparam.h>
28 #include <linux/percpu.h>
29 #include <linux/notifier.h>
30 #include <linux/reboot.h>
31 #include <linux/freezer.h>
32 #include <linux/cpu.h>
33 #include <linux/delay.h>
34 #include <linux/stat.h>
35 #include <linux/srcu.h>
36 #include <linux/slab.h>
37 #include <asm/byteorder.h>
38 #include <linux/torture.h>
39 #include <linux/vmalloc.h>
40
41 #include "rcu.h"
42
43 MODULE_LICENSE("GPL");
44 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com>");
45
46 #define PERF_FLAG "-perf:"
47 #define PERFOUT_STRING(s) \
48 pr_alert("%s" PERF_FLAG " %s\n", perf_type, s)
49 #define VERBOSE_PERFOUT_STRING(s) \
50 do { if (verbose) pr_alert("%s" PERF_FLAG " %s\n", perf_type, s); } while (0)
51 #define VERBOSE_PERFOUT_ERRSTRING(s) \
52 do { if (verbose) pr_alert("%s" PERF_FLAG "!!! %s\n", perf_type, s); } while (0)
53
54 /*
55 * The intended use cases for the nreaders and nwriters module parameters
56 * are as follows:
57 *
58 * 1. Specify only the nr_cpus kernel boot parameter. This will
59 * set both nreaders and nwriters to the value specified by
60 * nr_cpus for a mixed reader/writer test.
61 *
62 * 2. Specify the nr_cpus kernel boot parameter, but set
63 * rcuperf.nreaders to zero. This will set nwriters to the
64 * value specified by nr_cpus for an update-only test.
65 *
66 * 3. Specify the nr_cpus kernel boot parameter, but set
67 * rcuperf.nwriters to zero. This will set nreaders to the
68 * value specified by nr_cpus for a read-only test.
69 *
70 * Various other use cases may of course be specified.
71 */
72
73 #ifdef MODULE
74 # define RCUPERF_SHUTDOWN 0
75 #else
76 # define RCUPERF_SHUTDOWN 1
77 #endif
78
79 torture_param(bool, gp_async, false, "Use asynchronous GP wait primitives");
80 torture_param(int, gp_async_max, 1000, "Max # outstanding waits per reader");
81 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
82 torture_param(int, holdoff, 10, "Holdoff time before test start (s)");
83 torture_param(int, nreaders, -1, "Number of RCU reader threads");
84 torture_param(int, nwriters, -1, "Number of RCU updater threads");
85 torture_param(bool, shutdown, RCUPERF_SHUTDOWN,
86 "Shutdown at end of performance tests.");
87 torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
88 torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable");
89
90 static char *perf_type = "rcu";
91 module_param(perf_type, charp, 0444);
92 MODULE_PARM_DESC(perf_type, "Type of RCU to performance-test (rcu, rcu_bh, ...)");
93
94 static int nrealreaders;
95 static int nrealwriters;
96 static struct task_struct **writer_tasks;
97 static struct task_struct **reader_tasks;
98 static struct task_struct *shutdown_task;
99
100 static u64 **writer_durations;
101 static int *writer_n_durations;
102 static atomic_t n_rcu_perf_reader_started;
103 static atomic_t n_rcu_perf_writer_started;
104 static atomic_t n_rcu_perf_writer_finished;
105 static wait_queue_head_t shutdown_wq;
106 static u64 t_rcu_perf_writer_started;
107 static u64 t_rcu_perf_writer_finished;
108 static unsigned long b_rcu_perf_writer_started;
109 static unsigned long b_rcu_perf_writer_finished;
110 static DEFINE_PER_CPU(atomic_t, n_async_inflight);
111
112 static int rcu_perf_writer_state;
113 #define RTWS_INIT 0
114 #define RTWS_ASYNC 1
115 #define RTWS_BARRIER 2
116 #define RTWS_EXP_SYNC 3
117 #define RTWS_SYNC 4
118 #define RTWS_IDLE 5
119 #define RTWS_STOPPING 6
120
121 #define MAX_MEAS 10000
122 #define MIN_MEAS 100
123
124 /*
125 * Operations vector for selecting different types of tests.
126 */
127
128 struct rcu_perf_ops {
129 int ptype;
130 void (*init)(void);
131 void (*cleanup)(void);
132 int (*readlock)(void);
133 void (*readunlock)(int idx);
134 unsigned long (*get_gp_seq)(void);
135 unsigned long (*gp_diff)(unsigned long new, unsigned long old);
136 unsigned long (*exp_completed)(void);
137 void (*async)(struct rcu_head *head, rcu_callback_t func);
138 void (*gp_barrier)(void);
139 void (*sync)(void);
140 void (*exp_sync)(void);
141 const char *name;
142 };
143
144 static struct rcu_perf_ops *cur_ops;
145
146 /*
147 * Definitions for rcu perf testing.
148 */
149
150 static int rcu_perf_read_lock(void) __acquires(RCU)
151 {
152 rcu_read_lock();
153 return 0;
154 }
155
156 static void rcu_perf_read_unlock(int idx) __releases(RCU)
157 {
158 rcu_read_unlock();
159 }
160
161 static unsigned long __maybe_unused rcu_no_completed(void)
162 {
163 return 0;
164 }
165
166 static void rcu_sync_perf_init(void)
167 {
168 }
169
170 static struct rcu_perf_ops rcu_ops = {
171 .ptype = RCU_FLAVOR,
172 .init = rcu_sync_perf_init,
173 .readlock = rcu_perf_read_lock,
174 .readunlock = rcu_perf_read_unlock,
175 .get_gp_seq = rcu_get_gp_seq,
176 .gp_diff = rcu_seq_diff,
177 .exp_completed = rcu_exp_batches_completed,
178 .async = call_rcu,
179 .gp_barrier = rcu_barrier,
180 .sync = synchronize_rcu,
181 .exp_sync = synchronize_rcu_expedited,
182 .name = "rcu"
183 };
184
185 /*
186 * Definitions for srcu perf testing.
187 */
188
189 DEFINE_STATIC_SRCU(srcu_ctl_perf);
190 static struct srcu_struct *srcu_ctlp = &srcu_ctl_perf;
191
192 static int srcu_perf_read_lock(void) __acquires(srcu_ctlp)
193 {
194 return srcu_read_lock(srcu_ctlp);
195 }
196
197 static void srcu_perf_read_unlock(int idx) __releases(srcu_ctlp)
198 {
199 srcu_read_unlock(srcu_ctlp, idx);
200 }
201
202 static unsigned long srcu_perf_completed(void)
203 {
204 return srcu_batches_completed(srcu_ctlp);
205 }
206
207 static void srcu_call_rcu(struct rcu_head *head, rcu_callback_t func)
208 {
209 call_srcu(srcu_ctlp, head, func);
210 }
211
212 static void srcu_rcu_barrier(void)
213 {
214 srcu_barrier(srcu_ctlp);
215 }
216
217 static void srcu_perf_synchronize(void)
218 {
219 synchronize_srcu(srcu_ctlp);
220 }
221
222 static void srcu_perf_synchronize_expedited(void)
223 {
224 synchronize_srcu_expedited(srcu_ctlp);
225 }
226
227 static struct rcu_perf_ops srcu_ops = {
228 .ptype = SRCU_FLAVOR,
229 .init = rcu_sync_perf_init,
230 .readlock = srcu_perf_read_lock,
231 .readunlock = srcu_perf_read_unlock,
232 .get_gp_seq = srcu_perf_completed,
233 .gp_diff = rcu_seq_diff,
234 .exp_completed = srcu_perf_completed,
235 .async = srcu_call_rcu,
236 .gp_barrier = srcu_rcu_barrier,
237 .sync = srcu_perf_synchronize,
238 .exp_sync = srcu_perf_synchronize_expedited,
239 .name = "srcu"
240 };
241
242 static struct srcu_struct srcud;
243
244 static void srcu_sync_perf_init(void)
245 {
246 srcu_ctlp = &srcud;
247 init_srcu_struct(srcu_ctlp);
248 }
249
250 static void srcu_sync_perf_cleanup(void)
251 {
252 cleanup_srcu_struct(srcu_ctlp);
253 }
254
255 static struct rcu_perf_ops srcud_ops = {
256 .ptype = SRCU_FLAVOR,
257 .init = srcu_sync_perf_init,
258 .cleanup = srcu_sync_perf_cleanup,
259 .readlock = srcu_perf_read_lock,
260 .readunlock = srcu_perf_read_unlock,
261 .get_gp_seq = srcu_perf_completed,
262 .gp_diff = rcu_seq_diff,
263 .exp_completed = srcu_perf_completed,
264 .async = srcu_call_rcu,
265 .gp_barrier = srcu_rcu_barrier,
266 .sync = srcu_perf_synchronize,
267 .exp_sync = srcu_perf_synchronize_expedited,
268 .name = "srcud"
269 };
270
271 /*
272 * Definitions for RCU-tasks perf testing.
273 */
274
275 static int tasks_perf_read_lock(void)
276 {
277 return 0;
278 }
279
280 static void tasks_perf_read_unlock(int idx)
281 {
282 }
283
284 static struct rcu_perf_ops tasks_ops = {
285 .ptype = RCU_TASKS_FLAVOR,
286 .init = rcu_sync_perf_init,
287 .readlock = tasks_perf_read_lock,
288 .readunlock = tasks_perf_read_unlock,
289 .get_gp_seq = rcu_no_completed,
290 .gp_diff = rcu_seq_diff,
291 .async = call_rcu_tasks,
292 .gp_barrier = rcu_barrier_tasks,
293 .sync = synchronize_rcu_tasks,
294 .exp_sync = synchronize_rcu_tasks,
295 .name = "tasks"
296 };
297
298 static unsigned long rcuperf_seq_diff(unsigned long new, unsigned long old)
299 {
300 if (!cur_ops->gp_diff)
301 return new - old;
302 return cur_ops->gp_diff(new, old);
303 }
304
305 /*
306 * If performance tests complete, wait for shutdown to commence.
307 */
308 static void rcu_perf_wait_shutdown(void)
309 {
310 cond_resched_tasks_rcu_qs();
311 if (atomic_read(&n_rcu_perf_writer_finished) < nrealwriters)
312 return;
313 while (!torture_must_stop())
314 schedule_timeout_uninterruptible(1);
315 }
316
317 /*
318 * RCU perf reader kthread. Repeatedly does empty RCU read-side
319 * critical section, minimizing update-side interference.
320 */
321 static int
322 rcu_perf_reader(void *arg)
323 {
324 unsigned long flags;
325 int idx;
326 long me = (long)arg;
327
328 VERBOSE_PERFOUT_STRING("rcu_perf_reader task started");
329 set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
330 set_user_nice(current, MAX_NICE);
331 atomic_inc(&n_rcu_perf_reader_started);
332
333 do {
334 local_irq_save(flags);
335 idx = cur_ops->readlock();
336 cur_ops->readunlock(idx);
337 local_irq_restore(flags);
338 rcu_perf_wait_shutdown();
339 } while (!torture_must_stop());
340 torture_kthread_stopping("rcu_perf_reader");
341 return 0;
342 }
343
344 /*
345 * Callback function for asynchronous grace periods from rcu_perf_writer().
346 */
347 static void rcu_perf_async_cb(struct rcu_head *rhp)
348 {
349 atomic_dec(this_cpu_ptr(&n_async_inflight));
350 kfree(rhp);
351 }
352
353 /*
354 * RCU perf writer kthread. Repeatedly does a grace period.
355 */
356 static int
357 rcu_perf_writer(void *arg)
358 {
359 int i = 0;
360 int i_max;
361 long me = (long)arg;
362 struct rcu_head *rhp = NULL;
363 struct sched_param sp;
364 bool started = false, done = false, alldone = false;
365 u64 t;
366 u64 *wdp;
367 u64 *wdpp = writer_durations[me];
368
369 VERBOSE_PERFOUT_STRING("rcu_perf_writer task started");
370 WARN_ON(!wdpp);
371 set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
372 sp.sched_priority = 1;
373 sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
374
375 if (holdoff)
376 schedule_timeout_uninterruptible(holdoff * HZ);
377
378 t = ktime_get_mono_fast_ns();
379 if (atomic_inc_return(&n_rcu_perf_writer_started) >= nrealwriters) {
380 t_rcu_perf_writer_started = t;
381 if (gp_exp) {
382 b_rcu_perf_writer_started =
383 cur_ops->exp_completed() / 2;
384 } else {
385 b_rcu_perf_writer_started = cur_ops->get_gp_seq();
386 }
387 }
388
389 do {
390 if (writer_holdoff)
391 udelay(writer_holdoff);
392 wdp = &wdpp[i];
393 *wdp = ktime_get_mono_fast_ns();
394 if (gp_async) {
395 retry:
396 if (!rhp)
397 rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
398 if (rhp && atomic_read(this_cpu_ptr(&n_async_inflight)) < gp_async_max) {
399 rcu_perf_writer_state = RTWS_ASYNC;
400 atomic_inc(this_cpu_ptr(&n_async_inflight));
401 cur_ops->async(rhp, rcu_perf_async_cb);
402 rhp = NULL;
403 } else if (!kthread_should_stop()) {
404 rcu_perf_writer_state = RTWS_BARRIER;
405 cur_ops->gp_barrier();
406 goto retry;
407 } else {
408 kfree(rhp); /* Because we are stopping. */
409 }
410 } else if (gp_exp) {
411 rcu_perf_writer_state = RTWS_EXP_SYNC;
412 cur_ops->exp_sync();
413 } else {
414 rcu_perf_writer_state = RTWS_SYNC;
415 cur_ops->sync();
416 }
417 rcu_perf_writer_state = RTWS_IDLE;
418 t = ktime_get_mono_fast_ns();
419 *wdp = t - *wdp;
420 i_max = i;
421 if (!started &&
422 atomic_read(&n_rcu_perf_writer_started) >= nrealwriters)
423 started = true;
424 if (!done && i >= MIN_MEAS) {
425 done = true;
426 sp.sched_priority = 0;
427 sched_setscheduler_nocheck(current,
428 SCHED_NORMAL, &sp);
429 pr_alert("%s%s rcu_perf_writer %ld has %d measurements\n",
430 perf_type, PERF_FLAG, me, MIN_MEAS);
431 if (atomic_inc_return(&n_rcu_perf_writer_finished) >=
432 nrealwriters) {
433 schedule_timeout_interruptible(10);
434 rcu_ftrace_dump(DUMP_ALL);
435 PERFOUT_STRING("Test complete");
436 t_rcu_perf_writer_finished = t;
437 if (gp_exp) {
438 b_rcu_perf_writer_finished =
439 cur_ops->exp_completed() / 2;
440 } else {
441 b_rcu_perf_writer_finished =
442 cur_ops->get_gp_seq();
443 }
444 if (shutdown) {
445 smp_mb(); /* Assign before wake. */
446 wake_up(&shutdown_wq);
447 }
448 }
449 }
450 if (done && !alldone &&
451 atomic_read(&n_rcu_perf_writer_finished) >= nrealwriters)
452 alldone = true;
453 if (started && !alldone && i < MAX_MEAS - 1)
454 i++;
455 rcu_perf_wait_shutdown();
456 } while (!torture_must_stop());
457 if (gp_async) {
458 rcu_perf_writer_state = RTWS_BARRIER;
459 cur_ops->gp_barrier();
460 }
461 rcu_perf_writer_state = RTWS_STOPPING;
462 writer_n_durations[me] = i_max;
463 torture_kthread_stopping("rcu_perf_writer");
464 return 0;
465 }
466
467 static void
468 rcu_perf_print_module_parms(struct rcu_perf_ops *cur_ops, const char *tag)
469 {
470 pr_alert("%s" PERF_FLAG
471 "--- %s: nreaders=%d nwriters=%d verbose=%d shutdown=%d\n",
472 perf_type, tag, nrealreaders, nrealwriters, verbose, shutdown);
473 }
474
475 static void
476 rcu_perf_cleanup(void)
477 {
478 int i;
479 int j;
480 int ngps = 0;
481 u64 *wdp;
482 u64 *wdpp;
483
484 /*
485 * Would like warning at start, but everything is expedited
486 * during the mid-boot phase, so have to wait till the end.
487 */
488 if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp)
489 VERBOSE_PERFOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!");
490 if (rcu_gp_is_normal() && gp_exp)
491 VERBOSE_PERFOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!");
492 if (gp_exp && gp_async)
493 VERBOSE_PERFOUT_ERRSTRING("No expedited async GPs, so went with async!");
494
495 if (torture_cleanup_begin())
496 return;
497
498 if (reader_tasks) {
499 for (i = 0; i < nrealreaders; i++)
500 torture_stop_kthread(rcu_perf_reader,
501 reader_tasks[i]);
502 kfree(reader_tasks);
503 }
504
505 if (writer_tasks) {
506 for (i = 0; i < nrealwriters; i++) {
507 torture_stop_kthread(rcu_perf_writer,
508 writer_tasks[i]);
509 if (!writer_n_durations)
510 continue;
511 j = writer_n_durations[i];
512 pr_alert("%s%s writer %d gps: %d\n",
513 perf_type, PERF_FLAG, i, j);
514 ngps += j;
515 }
516 pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n",
517 perf_type, PERF_FLAG,
518 t_rcu_perf_writer_started, t_rcu_perf_writer_finished,
519 t_rcu_perf_writer_finished -
520 t_rcu_perf_writer_started,
521 ngps,
522 rcuperf_seq_diff(b_rcu_perf_writer_finished,
523 b_rcu_perf_writer_started));
524 for (i = 0; i < nrealwriters; i++) {
525 if (!writer_durations)
526 break;
527 if (!writer_n_durations)
528 continue;
529 wdpp = writer_durations[i];
530 if (!wdpp)
531 continue;
532 for (j = 0; j <= writer_n_durations[i]; j++) {
533 wdp = &wdpp[j];
534 pr_alert("%s%s %4d writer-duration: %5d %llu\n",
535 perf_type, PERF_FLAG,
536 i, j, *wdp);
537 if (j % 100 == 0)
538 schedule_timeout_uninterruptible(1);
539 }
540 kfree(writer_durations[i]);
541 }
542 kfree(writer_tasks);
543 kfree(writer_durations);
544 kfree(writer_n_durations);
545 }
546
547 /* Do torture-type-specific cleanup operations. */
548 if (cur_ops->cleanup != NULL)
549 cur_ops->cleanup();
550
551 torture_cleanup_end();
552 }
553
554 /*
555 * Return the number if non-negative. If -1, the number of CPUs.
556 * If less than -1, that much less than the number of CPUs, but
557 * at least one.
558 */
559 static int compute_real(int n)
560 {
561 int nr;
562
563 if (n >= 0) {
564 nr = n;
565 } else {
566 nr = num_online_cpus() + 1 + n;
567 if (nr <= 0)
568 nr = 1;
569 }
570 return nr;
571 }
572
573 /*
574 * RCU perf shutdown kthread. Just waits to be awakened, then shuts
575 * down system.
576 */
577 static int
578 rcu_perf_shutdown(void *arg)
579 {
580 do {
581 wait_event(shutdown_wq,
582 atomic_read(&n_rcu_perf_writer_finished) >=
583 nrealwriters);
584 } while (atomic_read(&n_rcu_perf_writer_finished) < nrealwriters);
585 smp_mb(); /* Wake before output. */
586 rcu_perf_cleanup();
587 kernel_power_off();
588 return -EINVAL;
589 }
590
591 static int __init
592 rcu_perf_init(void)
593 {
594 long i;
595 int firsterr = 0;
596 static struct rcu_perf_ops *perf_ops[] = {
597 &rcu_ops, &srcu_ops, &srcud_ops, &tasks_ops,
598 };
599
600 if (!torture_init_begin(perf_type, verbose))
601 return -EBUSY;
602
603 /* Process args and tell the world that the perf'er is on the job. */
604 for (i = 0; i < ARRAY_SIZE(perf_ops); i++) {
605 cur_ops = perf_ops[i];
606 if (strcmp(perf_type, cur_ops->name) == 0)
607 break;
608 }
609 if (i == ARRAY_SIZE(perf_ops)) {
610 pr_alert("rcu-perf: invalid perf type: \"%s\"\n", perf_type);
611 pr_alert("rcu-perf types:");
612 for (i = 0; i < ARRAY_SIZE(perf_ops); i++)
613 pr_cont(" %s", perf_ops[i]->name);
614 pr_cont("\n");
615 WARN_ON(!IS_MODULE(CONFIG_RCU_PERF_TEST));
616 firsterr = -EINVAL;
617 goto unwind;
618 }
619 if (cur_ops->init)
620 cur_ops->init();
621
622 nrealwriters = compute_real(nwriters);
623 nrealreaders = compute_real(nreaders);
624 atomic_set(&n_rcu_perf_reader_started, 0);
625 atomic_set(&n_rcu_perf_writer_started, 0);
626 atomic_set(&n_rcu_perf_writer_finished, 0);
627 rcu_perf_print_module_parms(cur_ops, "Start of test");
628
629 /* Start up the kthreads. */
630
631 if (shutdown) {
632 init_waitqueue_head(&shutdown_wq);
633 firsterr = torture_create_kthread(rcu_perf_shutdown, NULL,
634 shutdown_task);
635 if (firsterr)
636 goto unwind;
637 schedule_timeout_uninterruptible(1);
638 }
639 reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
640 GFP_KERNEL);
641 if (reader_tasks == NULL) {
642 VERBOSE_PERFOUT_ERRSTRING("out of memory");
643 firsterr = -ENOMEM;
644 goto unwind;
645 }
646 for (i = 0; i < nrealreaders; i++) {
647 firsterr = torture_create_kthread(rcu_perf_reader, (void *)i,
648 reader_tasks[i]);
649 if (firsterr)
650 goto unwind;
651 }
652 while (atomic_read(&n_rcu_perf_reader_started) < nrealreaders)
653 schedule_timeout_uninterruptible(1);
654 writer_tasks = kcalloc(nrealwriters, sizeof(reader_tasks[0]),
655 GFP_KERNEL);
656 writer_durations = kcalloc(nrealwriters, sizeof(*writer_durations),
657 GFP_KERNEL);
658 writer_n_durations =
659 kcalloc(nrealwriters, sizeof(*writer_n_durations),
660 GFP_KERNEL);
661 if (!writer_tasks || !writer_durations || !writer_n_durations) {
662 VERBOSE_PERFOUT_ERRSTRING("out of memory");
663 firsterr = -ENOMEM;
664 goto unwind;
665 }
666 for (i = 0; i < nrealwriters; i++) {
667 writer_durations[i] =
668 kcalloc(MAX_MEAS, sizeof(*writer_durations[i]),
669 GFP_KERNEL);
670 if (!writer_durations[i]) {
671 firsterr = -ENOMEM;
672 goto unwind;
673 }
674 firsterr = torture_create_kthread(rcu_perf_writer, (void *)i,
675 writer_tasks[i]);
676 if (firsterr)
677 goto unwind;
678 }
679 torture_init_end();
680 return 0;
681
682 unwind:
683 torture_init_end();
684 rcu_perf_cleanup();
685 return firsterr;
686 }
687
688 module_init(rcu_perf_init);
689 module_exit(rcu_perf_cleanup);