1 /*
2  * Read-Copy Update module-based performance-test facility
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, you can access it online at
16  * http://www.gnu.org/licenses/gpl-2.0.html.
17  *
18  * Copyright (C) IBM Corporation, 2015
19  *
20  * Authors: Paul E. McKenney <paulmck@us.ibm.com>
21  */
22 
23 #define pr_fmt(fmt) fmt
24 
25 #include <linux/types.h>
26 #include <linux/kernel.h>
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/kthread.h>
30 #include <linux/err.h>
31 #include <linux/spinlock.h>
32 #include <linux/smp.h>
33 #include <linux/rcupdate.h>
34 #include <linux/interrupt.h>
35 #include <linux/sched.h>
36 #include <uapi/linux/sched/types.h>
37 #include <linux/atomic.h>
38 #include <linux/bitops.h>
39 #include <linux/completion.h>
40 #include <linux/moduleparam.h>
41 #include <linux/percpu.h>
42 #include <linux/notifier.h>
43 #include <linux/reboot.h>
44 #include <linux/freezer.h>
45 #include <linux/cpu.h>
46 #include <linux/delay.h>
47 #include <linux/stat.h>
48 #include <linux/srcu.h>
49 #include <linux/slab.h>
50 #include <asm/byteorder.h>
51 #include <linux/torture.h>
52 #include <linux/vmalloc.h>
53 
54 #include "rcu.h"
55 
56 MODULE_LICENSE("GPL");
57 MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.vnet.ibm.com>");
58 
59 #define PERF_FLAG "-perf:"
60 #define PERFOUT_STRING(s) \
61 	pr_alert("%s" PERF_FLAG " %s\n", perf_type, s)
62 #define VERBOSE_PERFOUT_STRING(s) \
63 	do { if (verbose) pr_alert("%s" PERF_FLAG " %s\n", perf_type, s); } while (0)
64 #define VERBOSE_PERFOUT_ERRSTRING(s) \
65 	do { if (verbose) pr_alert("%s" PERF_FLAG "!!! %s\n", perf_type, s); } while (0)
66 
67 /*
68  * The intended use cases for the nreaders and nwriters module parameters
69  * are as follows:
70  *
71  * 1.	Specify only the nr_cpus kernel boot parameter.  This will
72  *	set both nreaders and nwriters to the value specified by
73  *	nr_cpus for a mixed reader/writer test.
74  *
75  * 2.	Specify the nr_cpus kernel boot parameter, but set
76  *	rcuperf.nreaders to zero.  This will set nwriters to the
77  *	value specified by nr_cpus for an update-only test.
78  *
79  * 3.	Specify the nr_cpus kernel boot parameter, but set
80  *	rcuperf.nwriters to zero.  This will set nreaders to the
81  *	value specified by nr_cpus for a read-only test.
82  *
83  * Various other use cases may of course be specified.
84  */
85 
86 torture_param(bool, gp_async, false, "Use asynchronous GP wait primitives");
87 torture_param(int, gp_async_max, 1000, "Max # outstanding waits per reader");
88 torture_param(bool, gp_exp, false, "Use expedited GP wait primitives");
89 torture_param(int, holdoff, 10, "Holdoff time before test start (s)");
90 torture_param(int, nreaders, -1, "Number of RCU reader threads");
91 torture_param(int, nwriters, -1, "Number of RCU updater threads");
92 torture_param(bool, shutdown, !IS_ENABLED(MODULE),
93 	      "Shutdown at end of performance tests.");
94 torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
95 torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable");
96 
97 static char *perf_type = "rcu";
98 module_param(perf_type, charp, 0444);
99 MODULE_PARM_DESC(perf_type, "Type of RCU to performance-test (rcu, rcu_bh, ...)");
100 
101 static int nrealreaders;
102 static int nrealwriters;
103 static struct task_struct **writer_tasks;
104 static struct task_struct **reader_tasks;
105 static struct task_struct *shutdown_task;
106 
107 static u64 **writer_durations;
108 static int *writer_n_durations;
109 static atomic_t n_rcu_perf_reader_started;
110 static atomic_t n_rcu_perf_writer_started;
111 static atomic_t n_rcu_perf_writer_finished;
112 static wait_queue_head_t shutdown_wq;
113 static u64 t_rcu_perf_writer_started;
114 static u64 t_rcu_perf_writer_finished;
115 static unsigned long b_rcu_perf_writer_started;
116 static unsigned long b_rcu_perf_writer_finished;
117 static DEFINE_PER_CPU(atomic_t, n_async_inflight);
118 
119 static int rcu_perf_writer_state;
120 #define RTWS_INIT		0
121 #define RTWS_ASYNC		1
122 #define RTWS_BARRIER		2
123 #define RTWS_EXP_SYNC		3
124 #define RTWS_SYNC		4
125 #define RTWS_IDLE		5
126 #define RTWS_STOPPING		6
127 
128 #define MAX_MEAS 10000
129 #define MIN_MEAS 100
130 
131 /*
132  * Operations vector for selecting different types of tests.
133  */
134 
135 struct rcu_perf_ops {
136 	int ptype;
137 	void (*init)(void);
138 	void (*cleanup)(void);
139 	int (*readlock)(void);
140 	void (*readunlock)(int idx);
141 	unsigned long (*get_gp_seq)(void);
142 	unsigned long (*gp_diff)(unsigned long new, unsigned long old);
143 	unsigned long (*exp_completed)(void);
144 	void (*async)(struct rcu_head *head, rcu_callback_t func);
145 	void (*gp_barrier)(void);
146 	void (*sync)(void);
147 	void (*exp_sync)(void);
148 	const char *name;
149 };
150 
151 static struct rcu_perf_ops *cur_ops;
152 
153 /*
154  * Definitions for rcu perf testing.
155  */
156 
rcu_perf_read_lock(void)157 static int rcu_perf_read_lock(void) __acquires(RCU)
158 {
159 	rcu_read_lock();
160 	return 0;
161 }
162 
rcu_perf_read_unlock(int idx)163 static void rcu_perf_read_unlock(int idx) __releases(RCU)
164 {
165 	rcu_read_unlock();
166 }
167 
rcu_no_completed(void)168 static unsigned long __maybe_unused rcu_no_completed(void)
169 {
170 	return 0;
171 }
172 
rcu_sync_perf_init(void)173 static void rcu_sync_perf_init(void)
174 {
175 }
176 
177 static struct rcu_perf_ops rcu_ops = {
178 	.ptype		= RCU_FLAVOR,
179 	.init		= rcu_sync_perf_init,
180 	.readlock	= rcu_perf_read_lock,
181 	.readunlock	= rcu_perf_read_unlock,
182 	.get_gp_seq	= rcu_get_gp_seq,
183 	.gp_diff	= rcu_seq_diff,
184 	.exp_completed	= rcu_exp_batches_completed,
185 	.async		= call_rcu,
186 	.gp_barrier	= rcu_barrier,
187 	.sync		= synchronize_rcu,
188 	.exp_sync	= synchronize_rcu_expedited,
189 	.name		= "rcu"
190 };
191 
192 /*
193  * Definitions for rcu_bh perf testing.
194  */
195 
rcu_bh_perf_read_lock(void)196 static int rcu_bh_perf_read_lock(void) __acquires(RCU_BH)
197 {
198 	rcu_read_lock_bh();
199 	return 0;
200 }
201 
rcu_bh_perf_read_unlock(int idx)202 static void rcu_bh_perf_read_unlock(int idx) __releases(RCU_BH)
203 {
204 	rcu_read_unlock_bh();
205 }
206 
207 static struct rcu_perf_ops rcu_bh_ops = {
208 	.ptype		= RCU_BH_FLAVOR,
209 	.init		= rcu_sync_perf_init,
210 	.readlock	= rcu_bh_perf_read_lock,
211 	.readunlock	= rcu_bh_perf_read_unlock,
212 	.get_gp_seq	= rcu_bh_get_gp_seq,
213 	.gp_diff	= rcu_seq_diff,
214 	.exp_completed	= rcu_exp_batches_completed_sched,
215 	.async		= call_rcu_bh,
216 	.gp_barrier	= rcu_barrier_bh,
217 	.sync		= synchronize_rcu_bh,
218 	.exp_sync	= synchronize_rcu_bh_expedited,
219 	.name		= "rcu_bh"
220 };
221 
222 /*
223  * Definitions for srcu perf testing.
224  */
225 
226 DEFINE_STATIC_SRCU(srcu_ctl_perf);
227 static struct srcu_struct *srcu_ctlp = &srcu_ctl_perf;
228 
srcu_perf_read_lock(void)229 static int srcu_perf_read_lock(void) __acquires(srcu_ctlp)
230 {
231 	return srcu_read_lock(srcu_ctlp);
232 }
233 
srcu_perf_read_unlock(int idx)234 static void srcu_perf_read_unlock(int idx) __releases(srcu_ctlp)
235 {
236 	srcu_read_unlock(srcu_ctlp, idx);
237 }
238 
srcu_perf_completed(void)239 static unsigned long srcu_perf_completed(void)
240 {
241 	return srcu_batches_completed(srcu_ctlp);
242 }
243 
srcu_call_rcu(struct rcu_head * head,rcu_callback_t func)244 static void srcu_call_rcu(struct rcu_head *head, rcu_callback_t func)
245 {
246 	call_srcu(srcu_ctlp, head, func);
247 }
248 
srcu_rcu_barrier(void)249 static void srcu_rcu_barrier(void)
250 {
251 	srcu_barrier(srcu_ctlp);
252 }
253 
srcu_perf_synchronize(void)254 static void srcu_perf_synchronize(void)
255 {
256 	synchronize_srcu(srcu_ctlp);
257 }
258 
srcu_perf_synchronize_expedited(void)259 static void srcu_perf_synchronize_expedited(void)
260 {
261 	synchronize_srcu_expedited(srcu_ctlp);
262 }
263 
264 static struct rcu_perf_ops srcu_ops = {
265 	.ptype		= SRCU_FLAVOR,
266 	.init		= rcu_sync_perf_init,
267 	.readlock	= srcu_perf_read_lock,
268 	.readunlock	= srcu_perf_read_unlock,
269 	.get_gp_seq	= srcu_perf_completed,
270 	.gp_diff	= rcu_seq_diff,
271 	.exp_completed	= srcu_perf_completed,
272 	.async		= srcu_call_rcu,
273 	.gp_barrier	= srcu_rcu_barrier,
274 	.sync		= srcu_perf_synchronize,
275 	.exp_sync	= srcu_perf_synchronize_expedited,
276 	.name		= "srcu"
277 };
278 
279 static struct srcu_struct srcud;
280 
srcu_sync_perf_init(void)281 static void srcu_sync_perf_init(void)
282 {
283 	srcu_ctlp = &srcud;
284 	init_srcu_struct(srcu_ctlp);
285 }
286 
srcu_sync_perf_cleanup(void)287 static void srcu_sync_perf_cleanup(void)
288 {
289 	cleanup_srcu_struct(srcu_ctlp);
290 }
291 
292 static struct rcu_perf_ops srcud_ops = {
293 	.ptype		= SRCU_FLAVOR,
294 	.init		= srcu_sync_perf_init,
295 	.cleanup	= srcu_sync_perf_cleanup,
296 	.readlock	= srcu_perf_read_lock,
297 	.readunlock	= srcu_perf_read_unlock,
298 	.get_gp_seq	= srcu_perf_completed,
299 	.gp_diff	= rcu_seq_diff,
300 	.exp_completed	= srcu_perf_completed,
301 	.async		= srcu_call_rcu,
302 	.gp_barrier	= srcu_rcu_barrier,
303 	.sync		= srcu_perf_synchronize,
304 	.exp_sync	= srcu_perf_synchronize_expedited,
305 	.name		= "srcud"
306 };
307 
308 /*
309  * Definitions for sched perf testing.
310  */
311 
sched_perf_read_lock(void)312 static int sched_perf_read_lock(void)
313 {
314 	preempt_disable();
315 	return 0;
316 }
317 
sched_perf_read_unlock(int idx)318 static void sched_perf_read_unlock(int idx)
319 {
320 	preempt_enable();
321 }
322 
323 static struct rcu_perf_ops sched_ops = {
324 	.ptype		= RCU_SCHED_FLAVOR,
325 	.init		= rcu_sync_perf_init,
326 	.readlock	= sched_perf_read_lock,
327 	.readunlock	= sched_perf_read_unlock,
328 	.get_gp_seq	= rcu_sched_get_gp_seq,
329 	.gp_diff	= rcu_seq_diff,
330 	.exp_completed	= rcu_exp_batches_completed_sched,
331 	.async		= call_rcu_sched,
332 	.gp_barrier	= rcu_barrier_sched,
333 	.sync		= synchronize_sched,
334 	.exp_sync	= synchronize_sched_expedited,
335 	.name		= "sched"
336 };
337 
338 /*
339  * Definitions for RCU-tasks perf testing.
340  */
341 
tasks_perf_read_lock(void)342 static int tasks_perf_read_lock(void)
343 {
344 	return 0;
345 }
346 
tasks_perf_read_unlock(int idx)347 static void tasks_perf_read_unlock(int idx)
348 {
349 }
350 
351 static struct rcu_perf_ops tasks_ops = {
352 	.ptype		= RCU_TASKS_FLAVOR,
353 	.init		= rcu_sync_perf_init,
354 	.readlock	= tasks_perf_read_lock,
355 	.readunlock	= tasks_perf_read_unlock,
356 	.get_gp_seq	= rcu_no_completed,
357 	.gp_diff	= rcu_seq_diff,
358 	.async		= call_rcu_tasks,
359 	.gp_barrier	= rcu_barrier_tasks,
360 	.sync		= synchronize_rcu_tasks,
361 	.exp_sync	= synchronize_rcu_tasks,
362 	.name		= "tasks"
363 };
364 
rcuperf_seq_diff(unsigned long new,unsigned long old)365 static unsigned long rcuperf_seq_diff(unsigned long new, unsigned long old)
366 {
367 	if (!cur_ops->gp_diff)
368 		return new - old;
369 	return cur_ops->gp_diff(new, old);
370 }
371 
372 /*
373  * If performance tests complete, wait for shutdown to commence.
374  */
rcu_perf_wait_shutdown(void)375 static void rcu_perf_wait_shutdown(void)
376 {
377 	cond_resched_tasks_rcu_qs();
378 	if (atomic_read(&n_rcu_perf_writer_finished) < nrealwriters)
379 		return;
380 	while (!torture_must_stop())
381 		schedule_timeout_uninterruptible(1);
382 }
383 
384 /*
385  * RCU perf reader kthread.  Repeatedly does empty RCU read-side
386  * critical section, minimizing update-side interference.
387  */
388 static int
rcu_perf_reader(void * arg)389 rcu_perf_reader(void *arg)
390 {
391 	unsigned long flags;
392 	int idx;
393 	long me = (long)arg;
394 
395 	VERBOSE_PERFOUT_STRING("rcu_perf_reader task started");
396 	set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
397 	set_user_nice(current, MAX_NICE);
398 	atomic_inc(&n_rcu_perf_reader_started);
399 
400 	do {
401 		local_irq_save(flags);
402 		idx = cur_ops->readlock();
403 		cur_ops->readunlock(idx);
404 		local_irq_restore(flags);
405 		rcu_perf_wait_shutdown();
406 	} while (!torture_must_stop());
407 	torture_kthread_stopping("rcu_perf_reader");
408 	return 0;
409 }
410 
411 /*
412  * Callback function for asynchronous grace periods from rcu_perf_writer().
413  */
rcu_perf_async_cb(struct rcu_head * rhp)414 static void rcu_perf_async_cb(struct rcu_head *rhp)
415 {
416 	atomic_dec(this_cpu_ptr(&n_async_inflight));
417 	kfree(rhp);
418 }
419 
420 /*
421  * RCU perf writer kthread.  Repeatedly does a grace period.
422  */
423 static int
rcu_perf_writer(void * arg)424 rcu_perf_writer(void *arg)
425 {
426 	int i = 0;
427 	int i_max;
428 	long me = (long)arg;
429 	struct rcu_head *rhp = NULL;
430 	struct sched_param sp;
431 	bool started = false, done = false, alldone = false;
432 	u64 t;
433 	u64 *wdp;
434 	u64 *wdpp = writer_durations[me];
435 
436 	VERBOSE_PERFOUT_STRING("rcu_perf_writer task started");
437 	WARN_ON(!wdpp);
438 	set_cpus_allowed_ptr(current, cpumask_of(me % nr_cpu_ids));
439 	sp.sched_priority = 1;
440 	sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
441 
442 	if (holdoff)
443 		schedule_timeout_uninterruptible(holdoff * HZ);
444 
445 	t = ktime_get_mono_fast_ns();
446 	if (atomic_inc_return(&n_rcu_perf_writer_started) >= nrealwriters) {
447 		t_rcu_perf_writer_started = t;
448 		if (gp_exp) {
449 			b_rcu_perf_writer_started =
450 				cur_ops->exp_completed() / 2;
451 		} else {
452 			b_rcu_perf_writer_started = cur_ops->get_gp_seq();
453 		}
454 	}
455 
456 	do {
457 		if (writer_holdoff)
458 			udelay(writer_holdoff);
459 		wdp = &wdpp[i];
460 		*wdp = ktime_get_mono_fast_ns();
461 		if (gp_async) {
462 retry:
463 			if (!rhp)
464 				rhp = kmalloc(sizeof(*rhp), GFP_KERNEL);
465 			if (rhp && atomic_read(this_cpu_ptr(&n_async_inflight)) < gp_async_max) {
466 				rcu_perf_writer_state = RTWS_ASYNC;
467 				atomic_inc(this_cpu_ptr(&n_async_inflight));
468 				cur_ops->async(rhp, rcu_perf_async_cb);
469 				rhp = NULL;
470 			} else if (!kthread_should_stop()) {
471 				rcu_perf_writer_state = RTWS_BARRIER;
472 				cur_ops->gp_barrier();
473 				goto retry;
474 			} else {
475 				kfree(rhp); /* Because we are stopping. */
476 			}
477 		} else if (gp_exp) {
478 			rcu_perf_writer_state = RTWS_EXP_SYNC;
479 			cur_ops->exp_sync();
480 		} else {
481 			rcu_perf_writer_state = RTWS_SYNC;
482 			cur_ops->sync();
483 		}
484 		rcu_perf_writer_state = RTWS_IDLE;
485 		t = ktime_get_mono_fast_ns();
486 		*wdp = t - *wdp;
487 		i_max = i;
488 		if (!started &&
489 		    atomic_read(&n_rcu_perf_writer_started) >= nrealwriters)
490 			started = true;
491 		if (!done && i >= MIN_MEAS) {
492 			done = true;
493 			sp.sched_priority = 0;
494 			sched_setscheduler_nocheck(current,
495 						   SCHED_NORMAL, &sp);
496 			pr_alert("%s%s rcu_perf_writer %ld has %d measurements\n",
497 				 perf_type, PERF_FLAG, me, MIN_MEAS);
498 			if (atomic_inc_return(&n_rcu_perf_writer_finished) >=
499 			    nrealwriters) {
500 				schedule_timeout_interruptible(10);
501 				rcu_ftrace_dump(DUMP_ALL);
502 				PERFOUT_STRING("Test complete");
503 				t_rcu_perf_writer_finished = t;
504 				if (gp_exp) {
505 					b_rcu_perf_writer_finished =
506 						cur_ops->exp_completed() / 2;
507 				} else {
508 					b_rcu_perf_writer_finished =
509 						cur_ops->get_gp_seq();
510 				}
511 				if (shutdown) {
512 					smp_mb(); /* Assign before wake. */
513 					wake_up(&shutdown_wq);
514 				}
515 			}
516 		}
517 		if (done && !alldone &&
518 		    atomic_read(&n_rcu_perf_writer_finished) >= nrealwriters)
519 			alldone = true;
520 		if (started && !alldone && i < MAX_MEAS - 1)
521 			i++;
522 		rcu_perf_wait_shutdown();
523 	} while (!torture_must_stop());
524 	if (gp_async) {
525 		rcu_perf_writer_state = RTWS_BARRIER;
526 		cur_ops->gp_barrier();
527 	}
528 	rcu_perf_writer_state = RTWS_STOPPING;
529 	writer_n_durations[me] = i_max;
530 	torture_kthread_stopping("rcu_perf_writer");
531 	return 0;
532 }
533 
534 static void
rcu_perf_print_module_parms(struct rcu_perf_ops * cur_ops,const char * tag)535 rcu_perf_print_module_parms(struct rcu_perf_ops *cur_ops, const char *tag)
536 {
537 	pr_alert("%s" PERF_FLAG
538 		 "--- %s: nreaders=%d nwriters=%d verbose=%d shutdown=%d\n",
539 		 perf_type, tag, nrealreaders, nrealwriters, verbose, shutdown);
540 }
541 
542 static void
rcu_perf_cleanup(void)543 rcu_perf_cleanup(void)
544 {
545 	int i;
546 	int j;
547 	int ngps = 0;
548 	u64 *wdp;
549 	u64 *wdpp;
550 
551 	/*
552 	 * Would like warning at start, but everything is expedited
553 	 * during the mid-boot phase, so have to wait till the end.
554 	 */
555 	if (rcu_gp_is_expedited() && !rcu_gp_is_normal() && !gp_exp)
556 		VERBOSE_PERFOUT_ERRSTRING("All grace periods expedited, no normal ones to measure!");
557 	if (rcu_gp_is_normal() && gp_exp)
558 		VERBOSE_PERFOUT_ERRSTRING("All grace periods normal, no expedited ones to measure!");
559 	if (gp_exp && gp_async)
560 		VERBOSE_PERFOUT_ERRSTRING("No expedited async GPs, so went with async!");
561 
562 	if (torture_cleanup_begin())
563 		return;
564 
565 	if (reader_tasks) {
566 		for (i = 0; i < nrealreaders; i++)
567 			torture_stop_kthread(rcu_perf_reader,
568 					     reader_tasks[i]);
569 		kfree(reader_tasks);
570 	}
571 
572 	if (writer_tasks) {
573 		for (i = 0; i < nrealwriters; i++) {
574 			torture_stop_kthread(rcu_perf_writer,
575 					     writer_tasks[i]);
576 			if (!writer_n_durations)
577 				continue;
578 			j = writer_n_durations[i];
579 			pr_alert("%s%s writer %d gps: %d\n",
580 				 perf_type, PERF_FLAG, i, j);
581 			ngps += j;
582 		}
583 		pr_alert("%s%s start: %llu end: %llu duration: %llu gps: %d batches: %ld\n",
584 			 perf_type, PERF_FLAG,
585 			 t_rcu_perf_writer_started, t_rcu_perf_writer_finished,
586 			 t_rcu_perf_writer_finished -
587 			 t_rcu_perf_writer_started,
588 			 ngps,
589 			 rcuperf_seq_diff(b_rcu_perf_writer_finished,
590 					  b_rcu_perf_writer_started));
591 		for (i = 0; i < nrealwriters; i++) {
592 			if (!writer_durations)
593 				break;
594 			if (!writer_n_durations)
595 				continue;
596 			wdpp = writer_durations[i];
597 			if (!wdpp)
598 				continue;
599 			for (j = 0; j <= writer_n_durations[i]; j++) {
600 				wdp = &wdpp[j];
601 				pr_alert("%s%s %4d writer-duration: %5d %llu\n",
602 					perf_type, PERF_FLAG,
603 					i, j, *wdp);
604 				if (j % 100 == 0)
605 					schedule_timeout_uninterruptible(1);
606 			}
607 			kfree(writer_durations[i]);
608 		}
609 		kfree(writer_tasks);
610 		kfree(writer_durations);
611 		kfree(writer_n_durations);
612 	}
613 
614 	/* Do flavor-specific cleanup operations.  */
615 	if (cur_ops->cleanup != NULL)
616 		cur_ops->cleanup();
617 
618 	torture_cleanup_end();
619 }
620 
621 /*
622  * Return the number if non-negative.  If -1, the number of CPUs.
623  * If less than -1, that much less than the number of CPUs, but
624  * at least one.
625  */
compute_real(int n)626 static int compute_real(int n)
627 {
628 	int nr;
629 
630 	if (n >= 0) {
631 		nr = n;
632 	} else {
633 		nr = num_online_cpus() + 1 + n;
634 		if (nr <= 0)
635 			nr = 1;
636 	}
637 	return nr;
638 }
639 
640 /*
641  * RCU perf shutdown kthread.  Just waits to be awakened, then shuts
642  * down system.
643  */
644 static int
rcu_perf_shutdown(void * arg)645 rcu_perf_shutdown(void *arg)
646 {
647 	do {
648 		wait_event(shutdown_wq,
649 			   atomic_read(&n_rcu_perf_writer_finished) >=
650 			   nrealwriters);
651 	} while (atomic_read(&n_rcu_perf_writer_finished) < nrealwriters);
652 	smp_mb(); /* Wake before output. */
653 	rcu_perf_cleanup();
654 	kernel_power_off();
655 	return -EINVAL;
656 }
657 
658 static int __init
rcu_perf_init(void)659 rcu_perf_init(void)
660 {
661 	long i;
662 	int firsterr = 0;
663 	static struct rcu_perf_ops *perf_ops[] = {
664 		&rcu_ops, &rcu_bh_ops, &srcu_ops, &srcud_ops, &sched_ops,
665 		&tasks_ops,
666 	};
667 
668 	if (!torture_init_begin(perf_type, verbose))
669 		return -EBUSY;
670 
671 	/* Process args and tell the world that the perf'er is on the job. */
672 	for (i = 0; i < ARRAY_SIZE(perf_ops); i++) {
673 		cur_ops = perf_ops[i];
674 		if (strcmp(perf_type, cur_ops->name) == 0)
675 			break;
676 	}
677 	if (i == ARRAY_SIZE(perf_ops)) {
678 		pr_alert("rcu-perf: invalid perf type: \"%s\"\n", perf_type);
679 		pr_alert("rcu-perf types:");
680 		for (i = 0; i < ARRAY_SIZE(perf_ops); i++)
681 			pr_cont(" %s", perf_ops[i]->name);
682 		pr_cont("\n");
683 		firsterr = -EINVAL;
684 		goto unwind;
685 	}
686 	if (cur_ops->init)
687 		cur_ops->init();
688 
689 	nrealwriters = compute_real(nwriters);
690 	nrealreaders = compute_real(nreaders);
691 	atomic_set(&n_rcu_perf_reader_started, 0);
692 	atomic_set(&n_rcu_perf_writer_started, 0);
693 	atomic_set(&n_rcu_perf_writer_finished, 0);
694 	rcu_perf_print_module_parms(cur_ops, "Start of test");
695 
696 	/* Start up the kthreads. */
697 
698 	if (shutdown) {
699 		init_waitqueue_head(&shutdown_wq);
700 		firsterr = torture_create_kthread(rcu_perf_shutdown, NULL,
701 						  shutdown_task);
702 		if (firsterr)
703 			goto unwind;
704 		schedule_timeout_uninterruptible(1);
705 	}
706 	reader_tasks = kcalloc(nrealreaders, sizeof(reader_tasks[0]),
707 			       GFP_KERNEL);
708 	if (reader_tasks == NULL) {
709 		VERBOSE_PERFOUT_ERRSTRING("out of memory");
710 		firsterr = -ENOMEM;
711 		goto unwind;
712 	}
713 	for (i = 0; i < nrealreaders; i++) {
714 		firsterr = torture_create_kthread(rcu_perf_reader, (void *)i,
715 						  reader_tasks[i]);
716 		if (firsterr)
717 			goto unwind;
718 	}
719 	while (atomic_read(&n_rcu_perf_reader_started) < nrealreaders)
720 		schedule_timeout_uninterruptible(1);
721 	writer_tasks = kcalloc(nrealwriters, sizeof(reader_tasks[0]),
722 			       GFP_KERNEL);
723 	writer_durations = kcalloc(nrealwriters, sizeof(*writer_durations),
724 				   GFP_KERNEL);
725 	writer_n_durations =
726 		kcalloc(nrealwriters, sizeof(*writer_n_durations),
727 			GFP_KERNEL);
728 	if (!writer_tasks || !writer_durations || !writer_n_durations) {
729 		VERBOSE_PERFOUT_ERRSTRING("out of memory");
730 		firsterr = -ENOMEM;
731 		goto unwind;
732 	}
733 	for (i = 0; i < nrealwriters; i++) {
734 		writer_durations[i] =
735 			kcalloc(MAX_MEAS, sizeof(*writer_durations[i]),
736 				GFP_KERNEL);
737 		if (!writer_durations[i]) {
738 			firsterr = -ENOMEM;
739 			goto unwind;
740 		}
741 		firsterr = torture_create_kthread(rcu_perf_writer, (void *)i,
742 						  writer_tasks[i]);
743 		if (firsterr)
744 			goto unwind;
745 	}
746 	torture_init_end();
747 	return 0;
748 
749 unwind:
750 	torture_init_end();
751 	rcu_perf_cleanup();
752 	return firsterr;
753 }
754 
755 module_init(rcu_perf_init);
756 module_exit(rcu_perf_cleanup);
757