1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_H
3 #define _LINUX_SCHED_H
4 
5 /*
6  * Define 'struct task_struct' and provide the main scheduler
7  * APIs (schedule(), wakeup variants, etc.)
8  */
9 
10 #include <uapi/linux/sched.h>
11 
12 #include <asm/current.h>
13 
14 #include <linux/pid.h>
15 #include <linux/sem.h>
16 #include <linux/shm.h>
17 #include <linux/kcov.h>
18 #include <linux/mutex.h>
19 #include <linux/plist.h>
20 #include <linux/hrtimer.h>
21 #include <linux/irqflags.h>
22 #include <linux/seccomp.h>
23 #include <linux/nodemask.h>
24 #include <linux/rcupdate.h>
25 #include <linux/refcount.h>
26 #include <linux/resource.h>
27 #include <linux/latencytop.h>
28 #include <linux/sched/prio.h>
29 #include <linux/sched/types.h>
30 #include <linux/signal_types.h>
31 #include <linux/mm_types_task.h>
32 #include <linux/task_io_accounting.h>
33 #include <linux/posix-timers.h>
34 #include <linux/rseq.h>
35 #include <linux/seqlock.h>
36 #include <linux/kcsan.h>
37 
38 /* task_struct member predeclarations (sorted alphabetically): */
39 struct audit_context;
40 struct backing_dev_info;
41 struct bio_list;
42 struct blk_plug;
43 struct capture_control;
44 struct cfs_rq;
45 struct fs_struct;
46 struct futex_pi_state;
47 struct io_context;
48 struct mempolicy;
49 struct nameidata;
50 struct nsproxy;
51 struct perf_event_context;
52 struct pid_namespace;
53 struct pipe_inode_info;
54 struct rcu_node;
55 struct reclaim_state;
56 struct robust_list_head;
57 struct root_domain;
58 struct rq;
59 struct sched_attr;
60 struct sched_param;
61 struct seq_file;
62 struct sighand_struct;
63 struct signal_struct;
64 struct task_delay_info;
65 struct task_group;
66 struct io_uring_task;
67 
68 /*
69  * Task state bitmask. NOTE! These bits are also
70  * encoded in fs/proc/array.c: get_task_state().
71  *
72  * We have two separate sets of flags: task->state
73  * is about runnability, while task->exit_state are
74  * about the task exiting. Confusing, but this way
75  * modifying one set can't modify the other one by
76  * mistake.
77  */
78 
79 /* Used in tsk->state: */
80 #define TASK_RUNNING			0x0000
81 #define TASK_INTERRUPTIBLE		0x0001
82 #define TASK_UNINTERRUPTIBLE		0x0002
83 #define __TASK_STOPPED			0x0004
84 #define __TASK_TRACED			0x0008
85 /* Used in tsk->exit_state: */
86 #define EXIT_DEAD			0x0010
87 #define EXIT_ZOMBIE			0x0020
88 #define EXIT_TRACE			(EXIT_ZOMBIE | EXIT_DEAD)
89 /* Used in tsk->state again: */
90 #define TASK_PARKED			0x0040
91 #define TASK_DEAD			0x0080
92 #define TASK_WAKEKILL			0x0100
93 #define TASK_WAKING			0x0200
94 #define TASK_NOLOAD			0x0400
95 #define TASK_NEW			0x0800
96 #define TASK_STATE_MAX			0x1000
97 
98 /* Convenience macros for the sake of set_current_state: */
99 #define TASK_KILLABLE			(TASK_WAKEKILL | TASK_UNINTERRUPTIBLE)
100 #define TASK_STOPPED			(TASK_WAKEKILL | __TASK_STOPPED)
101 #define TASK_TRACED			(TASK_WAKEKILL | __TASK_TRACED)
102 
103 #define TASK_IDLE			(TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
104 
105 /* Convenience macros for the sake of wake_up(): */
106 #define TASK_NORMAL			(TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
107 
108 /* get_task_state(): */
109 #define TASK_REPORT			(TASK_RUNNING | TASK_INTERRUPTIBLE | \
110 					 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
111 					 __TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
112 					 TASK_PARKED)
113 
114 #define task_is_traced(task)		((task->state & __TASK_TRACED) != 0)
115 
116 #define task_is_stopped(task)		((task->state & __TASK_STOPPED) != 0)
117 
118 #define task_is_stopped_or_traced(task)	((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
119 
120 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
121 
122 /*
123  * Special states are those that do not use the normal wait-loop pattern. See
124  * the comment with set_special_state().
125  */
126 #define is_special_task_state(state)				\
127 	((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
128 
129 #define __set_current_state(state_value)			\
130 	do {							\
131 		WARN_ON_ONCE(is_special_task_state(state_value));\
132 		current->task_state_change = _THIS_IP_;		\
133 		current->state = (state_value);			\
134 	} while (0)
135 
136 #define set_current_state(state_value)				\
137 	do {							\
138 		WARN_ON_ONCE(is_special_task_state(state_value));\
139 		current->task_state_change = _THIS_IP_;		\
140 		smp_store_mb(current->state, (state_value));	\
141 	} while (0)
142 
143 #define set_special_state(state_value)					\
144 	do {								\
145 		unsigned long flags; /* may shadow */			\
146 		WARN_ON_ONCE(!is_special_task_state(state_value));	\
147 		raw_spin_lock_irqsave(&current->pi_lock, flags);	\
148 		current->task_state_change = _THIS_IP_;			\
149 		current->state = (state_value);				\
150 		raw_spin_unlock_irqrestore(&current->pi_lock, flags);	\
151 	} while (0)
152 #else
153 /*
154  * set_current_state() includes a barrier so that the write of current->state
155  * is correctly serialised wrt the caller's subsequent test of whether to
156  * actually sleep:
157  *
158  *   for (;;) {
159  *	set_current_state(TASK_UNINTERRUPTIBLE);
160  *	if (CONDITION)
161  *	   break;
162  *
163  *	schedule();
164  *   }
165  *   __set_current_state(TASK_RUNNING);
166  *
167  * If the caller does not need such serialisation (because, for instance, the
168  * CONDITION test and condition change and wakeup are under the same lock) then
169  * use __set_current_state().
170  *
171  * The above is typically ordered against the wakeup, which does:
172  *
173  *   CONDITION = 1;
174  *   wake_up_state(p, TASK_UNINTERRUPTIBLE);
175  *
176  * where wake_up_state()/try_to_wake_up() executes a full memory barrier before
177  * accessing p->state.
178  *
179  * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
180  * once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
181  * TASK_RUNNING store which can collide with __set_current_state(TASK_RUNNING).
182  *
183  * However, with slightly different timing the wakeup TASK_RUNNING store can
184  * also collide with the TASK_UNINTERRUPTIBLE store. Losing that store is not
185  * a problem either because that will result in one extra go around the loop
186  * and our @cond test will save the day.
187  *
188  * Also see the comments of try_to_wake_up().
189  */
190 #define __set_current_state(state_value)				\
191 	current->state = (state_value)
192 
193 #define set_current_state(state_value)					\
194 	smp_store_mb(current->state, (state_value))
195 
196 /*
197  * set_special_state() should be used for those states when the blocking task
198  * can not use the regular condition based wait-loop. In that case we must
199  * serialize against wakeups such that any possible in-flight TASK_RUNNING stores
200  * will not collide with our state change.
201  */
202 #define set_special_state(state_value)					\
203 	do {								\
204 		unsigned long flags; /* may shadow */			\
205 		raw_spin_lock_irqsave(&current->pi_lock, flags);	\
206 		current->state = (state_value);				\
207 		raw_spin_unlock_irqrestore(&current->pi_lock, flags);	\
208 	} while (0)
209 
210 #endif
211 
212 /* Task command name length: */
213 #define TASK_COMM_LEN			16
214 
215 extern void scheduler_tick(void);
216 
217 #define	MAX_SCHEDULE_TIMEOUT		LONG_MAX
218 
219 extern long schedule_timeout(long timeout);
220 extern long schedule_timeout_interruptible(long timeout);
221 extern long schedule_timeout_killable(long timeout);
222 extern long schedule_timeout_uninterruptible(long timeout);
223 extern long schedule_timeout_idle(long timeout);
224 asmlinkage void schedule(void);
225 extern void schedule_preempt_disabled(void);
226 asmlinkage void preempt_schedule_irq(void);
227 
228 extern int __must_check io_schedule_prepare(void);
229 extern void io_schedule_finish(int token);
230 extern long io_schedule_timeout(long timeout);
231 extern void io_schedule(void);
232 
233 /**
234  * struct prev_cputime - snapshot of system and user cputime
235  * @utime: time spent in user mode
236  * @stime: time spent in system mode
237  * @lock: protects the above two fields
238  *
239  * Stores previous user/system time values such that we can guarantee
240  * monotonicity.
241  */
242 struct prev_cputime {
243 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
244 	u64				utime;
245 	u64				stime;
246 	raw_spinlock_t			lock;
247 #endif
248 };
249 
250 enum vtime_state {
251 	/* Task is sleeping or running in a CPU with VTIME inactive: */
252 	VTIME_INACTIVE = 0,
253 	/* Task is idle */
254 	VTIME_IDLE,
255 	/* Task runs in kernelspace in a CPU with VTIME active: */
256 	VTIME_SYS,
257 	/* Task runs in userspace in a CPU with VTIME active: */
258 	VTIME_USER,
259 	/* Task runs as guests in a CPU with VTIME active: */
260 	VTIME_GUEST,
261 };
262 
263 struct vtime {
264 	seqcount_t		seqcount;
265 	unsigned long long	starttime;
266 	enum vtime_state	state;
267 	unsigned int		cpu;
268 	u64			utime;
269 	u64			stime;
270 	u64			gtime;
271 };
272 
273 /*
274  * Utilization clamp constraints.
275  * @UCLAMP_MIN:	Minimum utilization
276  * @UCLAMP_MAX:	Maximum utilization
277  * @UCLAMP_CNT:	Utilization clamp constraints count
278  */
279 enum uclamp_id {
280 	UCLAMP_MIN = 0,
281 	UCLAMP_MAX,
282 	UCLAMP_CNT
283 };
284 
285 #ifdef CONFIG_SMP
286 extern struct root_domain def_root_domain;
287 extern struct mutex sched_domains_mutex;
288 #endif
289 
290 struct sched_info {
291 #ifdef CONFIG_SCHED_INFO
292 	/* Cumulative counters: */
293 
294 	/* # of times we have run on this CPU: */
295 	unsigned long			pcount;
296 
297 	/* Time spent waiting on a runqueue: */
298 	unsigned long long		run_delay;
299 
300 	/* Timestamps: */
301 
302 	/* When did we last run on a CPU? */
303 	unsigned long long		last_arrival;
304 
305 	/* When were we last queued to run? */
306 	unsigned long long		last_queued;
307 
308 #endif /* CONFIG_SCHED_INFO */
309 };
310 
311 /*
312  * Integer metrics need fixed point arithmetic, e.g., sched/fair
313  * has a few: load, load_avg, util_avg, freq, and capacity.
314  *
315  * We define a basic fixed point arithmetic range, and then formalize
316  * all these metrics based on that basic range.
317  */
318 # define SCHED_FIXEDPOINT_SHIFT		10
319 # define SCHED_FIXEDPOINT_SCALE		(1L << SCHED_FIXEDPOINT_SHIFT)
320 
321 /* Increase resolution of cpu_capacity calculations */
322 # define SCHED_CAPACITY_SHIFT		SCHED_FIXEDPOINT_SHIFT
323 # define SCHED_CAPACITY_SCALE		(1L << SCHED_CAPACITY_SHIFT)
324 
325 struct load_weight {
326 	unsigned long			weight;
327 	u32				inv_weight;
328 };
329 
330 /**
331  * struct util_est - Estimation utilization of FAIR tasks
332  * @enqueued: instantaneous estimated utilization of a task/cpu
333  * @ewma:     the Exponential Weighted Moving Average (EWMA)
334  *            utilization of a task
335  *
336  * Support data structure to track an Exponential Weighted Moving Average
337  * (EWMA) of a FAIR task's utilization. New samples are added to the moving
338  * average each time a task completes an activation. Sample's weight is chosen
339  * so that the EWMA will be relatively insensitive to transient changes to the
340  * task's workload.
341  *
342  * The enqueued attribute has a slightly different meaning for tasks and cpus:
343  * - task:   the task's util_avg at last task dequeue time
344  * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU
345  * Thus, the util_est.enqueued of a task represents the contribution on the
346  * estimated utilization of the CPU where that task is currently enqueued.
347  *
348  * Only for tasks we track a moving average of the past instantaneous
349  * estimated utilization. This allows to absorb sporadic drops in utilization
350  * of an otherwise almost periodic task.
351  */
352 struct util_est {
353 	unsigned int			enqueued;
354 	unsigned int			ewma;
355 #define UTIL_EST_WEIGHT_SHIFT		2
356 } __attribute__((__aligned__(sizeof(u64))));
357 
358 /*
359  * The load/runnable/util_avg accumulates an infinite geometric series
360  * (see __update_load_avg_cfs_rq() in kernel/sched/pelt.c).
361  *
362  * [load_avg definition]
363  *
364  *   load_avg = runnable% * scale_load_down(load)
365  *
366  * [runnable_avg definition]
367  *
368  *   runnable_avg = runnable% * SCHED_CAPACITY_SCALE
369  *
370  * [util_avg definition]
371  *
372  *   util_avg = running% * SCHED_CAPACITY_SCALE
373  *
374  * where runnable% is the time ratio that a sched_entity is runnable and
375  * running% the time ratio that a sched_entity is running.
376  *
377  * For cfs_rq, they are the aggregated values of all runnable and blocked
378  * sched_entities.
379  *
380  * The load/runnable/util_avg doesn't directly factor frequency scaling and CPU
381  * capacity scaling. The scaling is done through the rq_clock_pelt that is used
382  * for computing those signals (see update_rq_clock_pelt())
383  *
384  * N.B., the above ratios (runnable% and running%) themselves are in the
385  * range of [0, 1]. To do fixed point arithmetics, we therefore scale them
386  * to as large a range as necessary. This is for example reflected by
387  * util_avg's SCHED_CAPACITY_SCALE.
388  *
389  * [Overflow issue]
390  *
391  * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
392  * with the highest load (=88761), always runnable on a single cfs_rq,
393  * and should not overflow as the number already hits PID_MAX_LIMIT.
394  *
395  * For all other cases (including 32-bit kernels), struct load_weight's
396  * weight will overflow first before we do, because:
397  *
398  *    Max(load_avg) <= Max(load.weight)
399  *
400  * Then it is the load_weight's responsibility to consider overflow
401  * issues.
402  */
403 struct sched_avg {
404 	u64				last_update_time;
405 	u64				load_sum;
406 	u64				runnable_sum;
407 	u32				util_sum;
408 	u32				period_contrib;
409 	unsigned long			load_avg;
410 	unsigned long			runnable_avg;
411 	unsigned long			util_avg;
412 	struct util_est			util_est;
413 } ____cacheline_aligned;
414 
415 struct sched_statistics {
416 #ifdef CONFIG_SCHEDSTATS
417 	u64				wait_start;
418 	u64				wait_max;
419 	u64				wait_count;
420 	u64				wait_sum;
421 	u64				iowait_count;
422 	u64				iowait_sum;
423 
424 	u64				sleep_start;
425 	u64				sleep_max;
426 	s64				sum_sleep_runtime;
427 
428 	u64				block_start;
429 	u64				block_max;
430 	u64				exec_max;
431 	u64				slice_max;
432 
433 	u64				nr_migrations_cold;
434 	u64				nr_failed_migrations_affine;
435 	u64				nr_failed_migrations_running;
436 	u64				nr_failed_migrations_hot;
437 	u64				nr_forced_migrations;
438 
439 	u64				nr_wakeups;
440 	u64				nr_wakeups_sync;
441 	u64				nr_wakeups_migrate;
442 	u64				nr_wakeups_local;
443 	u64				nr_wakeups_remote;
444 	u64				nr_wakeups_affine;
445 	u64				nr_wakeups_affine_attempts;
446 	u64				nr_wakeups_passive;
447 	u64				nr_wakeups_idle;
448 #endif
449 };
450 
451 struct sched_entity {
452 	/* For load-balancing: */
453 	struct load_weight		load;
454 	struct rb_node			run_node;
455 	struct list_head		group_node;
456 	unsigned int			on_rq;
457 
458 	u64				exec_start;
459 	u64				sum_exec_runtime;
460 	u64				vruntime;
461 	u64				prev_sum_exec_runtime;
462 
463 	u64				nr_migrations;
464 
465 	struct sched_statistics		statistics;
466 
467 #ifdef CONFIG_FAIR_GROUP_SCHED
468 	int				depth;
469 	struct sched_entity		*parent;
470 	/* rq on which this entity is (to be) queued: */
471 	struct cfs_rq			*cfs_rq;
472 	/* rq "owned" by this entity/group: */
473 	struct cfs_rq			*my_q;
474 	/* cached value of my_q->h_nr_running */
475 	unsigned long			runnable_weight;
476 #endif
477 
478 #ifdef CONFIG_SMP
479 	/*
480 	 * Per entity load average tracking.
481 	 *
482 	 * Put into separate cache line so it does not
483 	 * collide with read-mostly values above.
484 	 */
485 	struct sched_avg		avg;
486 #endif
487 };
488 
489 struct sched_rt_entity {
490 	struct list_head		run_list;
491 	unsigned long			timeout;
492 	unsigned long			watchdog_stamp;
493 	unsigned int			time_slice;
494 	unsigned short			on_rq;
495 	unsigned short			on_list;
496 
497 	struct sched_rt_entity		*back;
498 #ifdef CONFIG_RT_GROUP_SCHED
499 	struct sched_rt_entity		*parent;
500 	/* rq on which this entity is (to be) queued: */
501 	struct rt_rq			*rt_rq;
502 	/* rq "owned" by this entity/group: */
503 	struct rt_rq			*my_q;
504 #endif
505 } __randomize_layout;
506 
507 struct sched_dl_entity {
508 	struct rb_node			rb_node;
509 
510 	/*
511 	 * Original scheduling parameters. Copied here from sched_attr
512 	 * during sched_setattr(), they will remain the same until
513 	 * the next sched_setattr().
514 	 */
515 	u64				dl_runtime;	/* Maximum runtime for each instance	*/
516 	u64				dl_deadline;	/* Relative deadline of each instance	*/
517 	u64				dl_period;	/* Separation of two instances (period) */
518 	u64				dl_bw;		/* dl_runtime / dl_period		*/
519 	u64				dl_density;	/* dl_runtime / dl_deadline		*/
520 
521 	/*
522 	 * Actual scheduling parameters. Initialized with the values above,
523 	 * they are continuously updated during task execution. Note that
524 	 * the remaining runtime could be < 0 in case we are in overrun.
525 	 */
526 	s64				runtime;	/* Remaining runtime for this instance	*/
527 	u64				deadline;	/* Absolute deadline for this instance	*/
528 	unsigned int			flags;		/* Specifying the scheduler behaviour	*/
529 
530 	/*
531 	 * Some bool flags:
532 	 *
533 	 * @dl_throttled tells if we exhausted the runtime. If so, the
534 	 * task has to wait for a replenishment to be performed at the
535 	 * next firing of dl_timer.
536 	 *
537 	 * @dl_boosted tells if we are boosted due to DI. If so we are
538 	 * outside bandwidth enforcement mechanism (but only until we
539 	 * exit the critical section);
540 	 *
541 	 * @dl_yielded tells if task gave up the CPU before consuming
542 	 * all its available runtime during the last job.
543 	 *
544 	 * @dl_non_contending tells if the task is inactive while still
545 	 * contributing to the active utilization. In other words, it
546 	 * indicates if the inactive timer has been armed and its handler
547 	 * has not been executed yet. This flag is useful to avoid race
548 	 * conditions between the inactive timer handler and the wakeup
549 	 * code.
550 	 *
551 	 * @dl_overrun tells if the task asked to be informed about runtime
552 	 * overruns.
553 	 */
554 	unsigned int			dl_throttled      : 1;
555 	unsigned int			dl_yielded        : 1;
556 	unsigned int			dl_non_contending : 1;
557 	unsigned int			dl_overrun	  : 1;
558 
559 	/*
560 	 * Bandwidth enforcement timer. Each -deadline task has its
561 	 * own bandwidth to be enforced, thus we need one timer per task.
562 	 */
563 	struct hrtimer			dl_timer;
564 
565 	/*
566 	 * Inactive timer, responsible for decreasing the active utilization
567 	 * at the "0-lag time". When a -deadline task blocks, it contributes
568 	 * to GRUB's active utilization until the "0-lag time", hence a
569 	 * timer is needed to decrease the active utilization at the correct
570 	 * time.
571 	 */
572 	struct hrtimer inactive_timer;
573 
574 #ifdef CONFIG_RT_MUTEXES
575 	/*
576 	 * Priority Inheritance. When a DEADLINE scheduling entity is boosted
577 	 * pi_se points to the donor, otherwise points to the dl_se it belongs
578 	 * to (the original one/itself).
579 	 */
580 	struct sched_dl_entity *pi_se;
581 #endif
582 };
583 
584 #ifdef CONFIG_UCLAMP_TASK
585 /* Number of utilization clamp buckets (shorter alias) */
586 #define UCLAMP_BUCKETS CONFIG_UCLAMP_BUCKETS_COUNT
587 
588 /*
589  * Utilization clamp for a scheduling entity
590  * @value:		clamp value "assigned" to a se
591  * @bucket_id:		bucket index corresponding to the "assigned" value
592  * @active:		the se is currently refcounted in a rq's bucket
593  * @user_defined:	the requested clamp value comes from user-space
594  *
595  * The bucket_id is the index of the clamp bucket matching the clamp value
596  * which is pre-computed and stored to avoid expensive integer divisions from
597  * the fast path.
598  *
599  * The active bit is set whenever a task has got an "effective" value assigned,
600  * which can be different from the clamp value "requested" from user-space.
601  * This allows to know a task is refcounted in the rq's bucket corresponding
602  * to the "effective" bucket_id.
603  *
604  * The user_defined bit is set whenever a task has got a task-specific clamp
605  * value requested from userspace, i.e. the system defaults apply to this task
606  * just as a restriction. This allows to relax default clamps when a less
607  * restrictive task-specific value has been requested, thus allowing to
608  * implement a "nice" semantic. For example, a task running with a 20%
609  * default boost can still drop its own boosting to 0%.
610  */
611 struct uclamp_se {
612 	unsigned int value		: bits_per(SCHED_CAPACITY_SCALE);
613 	unsigned int bucket_id		: bits_per(UCLAMP_BUCKETS);
614 	unsigned int active		: 1;
615 	unsigned int user_defined	: 1;
616 };
617 #endif /* CONFIG_UCLAMP_TASK */
618 
619 union rcu_special {
620 	struct {
621 		u8			blocked;
622 		u8			need_qs;
623 		u8			exp_hint; /* Hint for performance. */
624 		u8			need_mb; /* Readers need smp_mb(). */
625 	} b; /* Bits. */
626 	u32 s; /* Set of bits. */
627 };
628 
629 enum perf_event_task_context {
630 	perf_invalid_context = -1,
631 	perf_hw_context = 0,
632 	perf_sw_context,
633 	perf_nr_task_contexts,
634 };
635 
636 struct wake_q_node {
637 	struct wake_q_node *next;
638 };
639 
640 struct task_struct {
641 #ifdef CONFIG_THREAD_INFO_IN_TASK
642 	/*
643 	 * For reasons of header soup (see current_thread_info()), this
644 	 * must be the first element of task_struct.
645 	 */
646 	struct thread_info		thread_info;
647 #endif
648 	/* -1 unrunnable, 0 runnable, >0 stopped: */
649 	volatile long			state;
650 
651 	/*
652 	 * This begins the randomizable portion of task_struct. Only
653 	 * scheduling-critical items should be added above here.
654 	 */
655 	randomized_struct_fields_start
656 
657 	void				*stack;
658 	refcount_t			usage;
659 	/* Per task flags (PF_*), defined further below: */
660 	unsigned int			flags;
661 	unsigned int			ptrace;
662 
663 #ifdef CONFIG_SMP
664 	int				on_cpu;
665 	struct __call_single_node	wake_entry;
666 #ifdef CONFIG_THREAD_INFO_IN_TASK
667 	/* Current CPU: */
668 	unsigned int			cpu;
669 #endif
670 	unsigned int			wakee_flips;
671 	unsigned long			wakee_flip_decay_ts;
672 	struct task_struct		*last_wakee;
673 
674 	/*
675 	 * recent_used_cpu is initially set as the last CPU used by a task
676 	 * that wakes affine another task. Waker/wakee relationships can
677 	 * push tasks around a CPU where each wakeup moves to the next one.
678 	 * Tracking a recently used CPU allows a quick search for a recently
679 	 * used CPU that may be idle.
680 	 */
681 	int				recent_used_cpu;
682 	int				wake_cpu;
683 #endif
684 	int				on_rq;
685 
686 	int				prio;
687 	int				static_prio;
688 	int				normal_prio;
689 	unsigned int			rt_priority;
690 
691 	const struct sched_class	*sched_class;
692 	struct sched_entity		se;
693 	struct sched_rt_entity		rt;
694 #ifdef CONFIG_CGROUP_SCHED
695 	struct task_group		*sched_task_group;
696 #endif
697 	struct sched_dl_entity		dl;
698 
699 #ifdef CONFIG_UCLAMP_TASK
700 	/*
701 	 * Clamp values requested for a scheduling entity.
702 	 * Must be updated with task_rq_lock() held.
703 	 */
704 	struct uclamp_se		uclamp_req[UCLAMP_CNT];
705 	/*
706 	 * Effective clamp values used for a scheduling entity.
707 	 * Must be updated with task_rq_lock() held.
708 	 */
709 	struct uclamp_se		uclamp[UCLAMP_CNT];
710 #endif
711 
712 #ifdef CONFIG_PREEMPT_NOTIFIERS
713 	/* List of struct preempt_notifier: */
714 	struct hlist_head		preempt_notifiers;
715 #endif
716 
717 #ifdef CONFIG_BLK_DEV_IO_TRACE
718 	unsigned int			btrace_seq;
719 #endif
720 
721 	unsigned int			policy;
722 	int				nr_cpus_allowed;
723 	const cpumask_t			*cpus_ptr;
724 	cpumask_t			cpus_mask;
725 
726 #ifdef CONFIG_PREEMPT_RCU
727 	int				rcu_read_lock_nesting;
728 	union rcu_special		rcu_read_unlock_special;
729 	struct list_head		rcu_node_entry;
730 	struct rcu_node			*rcu_blocked_node;
731 #endif /* #ifdef CONFIG_PREEMPT_RCU */
732 
733 #ifdef CONFIG_TASKS_RCU
734 	unsigned long			rcu_tasks_nvcsw;
735 	u8				rcu_tasks_holdout;
736 	u8				rcu_tasks_idx;
737 	int				rcu_tasks_idle_cpu;
738 	struct list_head		rcu_tasks_holdout_list;
739 #endif /* #ifdef CONFIG_TASKS_RCU */
740 
741 #ifdef CONFIG_TASKS_TRACE_RCU
742 	int				trc_reader_nesting;
743 	int				trc_ipi_to_cpu;
744 	union rcu_special		trc_reader_special;
745 	bool				trc_reader_checked;
746 	struct list_head		trc_holdout_list;
747 #endif /* #ifdef CONFIG_TASKS_TRACE_RCU */
748 
749 	struct sched_info		sched_info;
750 
751 	struct list_head		tasks;
752 #ifdef CONFIG_SMP
753 	struct plist_node		pushable_tasks;
754 	struct rb_node			pushable_dl_tasks;
755 #endif
756 
757 	struct mm_struct		*mm;
758 	struct mm_struct		*active_mm;
759 
760 	/* Per-thread vma caching: */
761 	struct vmacache			vmacache;
762 
763 #ifdef SPLIT_RSS_COUNTING
764 	struct task_rss_stat		rss_stat;
765 #endif
766 	int				exit_state;
767 	int				exit_code;
768 	int				exit_signal;
769 	/* The signal sent when the parent dies: */
770 	int				pdeath_signal;
771 	/* JOBCTL_*, siglock protected: */
772 	unsigned long			jobctl;
773 
774 	/* Used for emulating ABI behavior of previous Linux versions: */
775 	unsigned int			personality;
776 
777 	/* Scheduler bits, serialized by scheduler locks: */
778 	unsigned			sched_reset_on_fork:1;
779 	unsigned			sched_contributes_to_load:1;
780 	unsigned			sched_migrated:1;
781 #ifdef CONFIG_PSI
782 	unsigned			sched_psi_wake_requeue:1;
783 #endif
784 
785 	/* Force alignment to the next boundary: */
786 	unsigned			:0;
787 
788 	/* Unserialized, strictly 'current' */
789 
790 	/*
791 	 * This field must not be in the scheduler word above due to wakelist
792 	 * queueing no longer being serialized by p->on_cpu. However:
793 	 *
794 	 * p->XXX = X;			ttwu()
795 	 * schedule()			  if (p->on_rq && ..) // false
796 	 *   smp_mb__after_spinlock();	  if (smp_load_acquire(&p->on_cpu) && //true
797 	 *   deactivate_task()		      ttwu_queue_wakelist())
798 	 *     p->on_rq = 0;			p->sched_remote_wakeup = Y;
799 	 *
800 	 * guarantees all stores of 'current' are visible before
801 	 * ->sched_remote_wakeup gets used, so it can be in this word.
802 	 */
803 	unsigned			sched_remote_wakeup:1;
804 
805 	/* Bit to tell LSMs we're in execve(): */
806 	unsigned			in_execve:1;
807 	unsigned			in_iowait:1;
808 #ifndef TIF_RESTORE_SIGMASK
809 	unsigned			restore_sigmask:1;
810 #endif
811 #ifdef CONFIG_MEMCG
812 	unsigned			in_user_fault:1;
813 #endif
814 #ifdef CONFIG_COMPAT_BRK
815 	unsigned			brk_randomized:1;
816 #endif
817 #ifdef CONFIG_CGROUPS
818 	/* disallow userland-initiated cgroup migration */
819 	unsigned			no_cgroup_migration:1;
820 	/* task is frozen/stopped (used by the cgroup freezer) */
821 	unsigned			frozen:1;
822 #endif
823 #ifdef CONFIG_BLK_CGROUP
824 	unsigned			use_memdelay:1;
825 #endif
826 #ifdef CONFIG_PSI
827 	/* Stalled due to lack of memory */
828 	unsigned			in_memstall:1;
829 #endif
830 
831 	unsigned long			atomic_flags; /* Flags requiring atomic access. */
832 
833 	struct restart_block		restart_block;
834 
835 	pid_t				pid;
836 	pid_t				tgid;
837 
838 #ifdef CONFIG_STACKPROTECTOR
839 	/* Canary value for the -fstack-protector GCC feature: */
840 	unsigned long			stack_canary;
841 #endif
842 	/*
843 	 * Pointers to the (original) parent process, youngest child, younger sibling,
844 	 * older sibling, respectively.  (p->father can be replaced with
845 	 * p->real_parent->pid)
846 	 */
847 
848 	/* Real parent process: */
849 	struct task_struct __rcu	*real_parent;
850 
851 	/* Recipient of SIGCHLD, wait4() reports: */
852 	struct task_struct __rcu	*parent;
853 
854 	/*
855 	 * Children/sibling form the list of natural children:
856 	 */
857 	struct list_head		children;
858 	struct list_head		sibling;
859 	struct task_struct		*group_leader;
860 
861 	/*
862 	 * 'ptraced' is the list of tasks this task is using ptrace() on.
863 	 *
864 	 * This includes both natural children and PTRACE_ATTACH targets.
865 	 * 'ptrace_entry' is this task's link on the p->parent->ptraced list.
866 	 */
867 	struct list_head		ptraced;
868 	struct list_head		ptrace_entry;
869 
870 	/* PID/PID hash table linkage. */
871 	struct pid			*thread_pid;
872 	struct hlist_node		pid_links[PIDTYPE_MAX];
873 	struct list_head		thread_group;
874 	struct list_head		thread_node;
875 
876 	struct completion		*vfork_done;
877 
878 	/* CLONE_CHILD_SETTID: */
879 	int __user			*set_child_tid;
880 
881 	/* CLONE_CHILD_CLEARTID: */
882 	int __user			*clear_child_tid;
883 
884 	u64				utime;
885 	u64				stime;
886 #ifdef CONFIG_ARCH_HAS_SCALED_CPUTIME
887 	u64				utimescaled;
888 	u64				stimescaled;
889 #endif
890 	u64				gtime;
891 	struct prev_cputime		prev_cputime;
892 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
893 	struct vtime			vtime;
894 #endif
895 
896 #ifdef CONFIG_NO_HZ_FULL
897 	atomic_t			tick_dep_mask;
898 #endif
899 	/* Context switch counts: */
900 	unsigned long			nvcsw;
901 	unsigned long			nivcsw;
902 
903 	/* Monotonic time in nsecs: */
904 	u64				start_time;
905 
906 	/* Boot based time in nsecs: */
907 	u64				start_boottime;
908 
909 	/* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */
910 	unsigned long			min_flt;
911 	unsigned long			maj_flt;
912 
913 	/* Empty if CONFIG_POSIX_CPUTIMERS=n */
914 	struct posix_cputimers		posix_cputimers;
915 
916 #ifdef CONFIG_POSIX_CPU_TIMERS_TASK_WORK
917 	struct posix_cputimers_work	posix_cputimers_work;
918 #endif
919 
920 	/* Process credentials: */
921 
922 	/* Tracer's credentials at attach: */
923 	const struct cred __rcu		*ptracer_cred;
924 
925 	/* Objective and real subjective task credentials (COW): */
926 	const struct cred __rcu		*real_cred;
927 
928 	/* Effective (overridable) subjective task credentials (COW): */
929 	const struct cred __rcu		*cred;
930 
931 #ifdef CONFIG_KEYS
932 	/* Cached requested key. */
933 	struct key			*cached_requested_key;
934 #endif
935 
936 	/*
937 	 * executable name, excluding path.
938 	 *
939 	 * - normally initialized setup_new_exec()
940 	 * - access it with [gs]et_task_comm()
941 	 * - lock it with task_lock()
942 	 */
943 	char				comm[TASK_COMM_LEN];
944 
945 	struct nameidata		*nameidata;
946 
947 #ifdef CONFIG_SYSVIPC
948 	struct sysv_sem			sysvsem;
949 	struct sysv_shm			sysvshm;
950 #endif
951 #ifdef CONFIG_DETECT_HUNG_TASK
952 	unsigned long			last_switch_count;
953 	unsigned long			last_switch_time;
954 #endif
955 	/* Filesystem information: */
956 	struct fs_struct		*fs;
957 
958 	/* Open file information: */
959 	struct files_struct		*files;
960 
961 #ifdef CONFIG_IO_URING
962 	struct io_uring_task		*io_uring;
963 #endif
964 
965 	/* Namespaces: */
966 	struct nsproxy			*nsproxy;
967 
968 	/* Signal handlers: */
969 	struct signal_struct		*signal;
970 	struct sighand_struct __rcu		*sighand;
971 	sigset_t			blocked;
972 	sigset_t			real_blocked;
973 	/* Restored if set_restore_sigmask() was used: */
974 	sigset_t			saved_sigmask;
975 	struct sigpending		pending;
976 	unsigned long			sas_ss_sp;
977 	size_t				sas_ss_size;
978 	unsigned int			sas_ss_flags;
979 
980 	struct callback_head		*task_works;
981 
982 #ifdef CONFIG_AUDIT
983 #ifdef CONFIG_AUDITSYSCALL
984 	struct audit_context		*audit_context;
985 #endif
986 	kuid_t				loginuid;
987 	unsigned int			sessionid;
988 #endif
989 	struct seccomp			seccomp;
990 
991 	/* Thread group tracking: */
992 	u64				parent_exec_id;
993 	u64				self_exec_id;
994 
995 	/* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
996 	spinlock_t			alloc_lock;
997 
998 	/* Protection of the PI data structures: */
999 	raw_spinlock_t			pi_lock;
1000 
1001 	struct wake_q_node		wake_q;
1002 
1003 #ifdef CONFIG_RT_MUTEXES
1004 	/* PI waiters blocked on a rt_mutex held by this task: */
1005 	struct rb_root_cached		pi_waiters;
1006 	/* Updated under owner's pi_lock and rq lock */
1007 	struct task_struct		*pi_top_task;
1008 	/* Deadlock detection and priority inheritance handling: */
1009 	struct rt_mutex_waiter		*pi_blocked_on;
1010 #endif
1011 
1012 #ifdef CONFIG_DEBUG_MUTEXES
1013 	/* Mutex deadlock detection: */
1014 	struct mutex_waiter		*blocked_on;
1015 #endif
1016 
1017 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1018 	int				non_block_count;
1019 #endif
1020 
1021 #ifdef CONFIG_TRACE_IRQFLAGS
1022 	struct irqtrace_events		irqtrace;
1023 	unsigned int			hardirq_threaded;
1024 	u64				hardirq_chain_key;
1025 	int				softirqs_enabled;
1026 	int				softirq_context;
1027 	int				irq_config;
1028 #endif
1029 
1030 #ifdef CONFIG_LOCKDEP
1031 # define MAX_LOCK_DEPTH			48UL
1032 	u64				curr_chain_key;
1033 	int				lockdep_depth;
1034 	unsigned int			lockdep_recursion;
1035 	struct held_lock		held_locks[MAX_LOCK_DEPTH];
1036 #endif
1037 
1038 #if defined(CONFIG_UBSAN) && !defined(CONFIG_UBSAN_TRAP)
1039 	unsigned int			in_ubsan;
1040 #endif
1041 
1042 	/* Journalling filesystem info: */
1043 	void				*journal_info;
1044 
1045 	/* Stacked block device info: */
1046 	struct bio_list			*bio_list;
1047 
1048 #ifdef CONFIG_BLOCK
1049 	/* Stack plugging: */
1050 	struct blk_plug			*plug;
1051 #endif
1052 
1053 	/* VM state: */
1054 	struct reclaim_state		*reclaim_state;
1055 
1056 	struct backing_dev_info		*backing_dev_info;
1057 
1058 	struct io_context		*io_context;
1059 
1060 #ifdef CONFIG_COMPACTION
1061 	struct capture_control		*capture_control;
1062 #endif
1063 	/* Ptrace state: */
1064 	unsigned long			ptrace_message;
1065 	kernel_siginfo_t		*last_siginfo;
1066 
1067 	struct task_io_accounting	ioac;
1068 #ifdef CONFIG_PSI
1069 	/* Pressure stall state */
1070 	unsigned int			psi_flags;
1071 #endif
1072 #ifdef CONFIG_TASK_XACCT
1073 	/* Accumulated RSS usage: */
1074 	u64				acct_rss_mem1;
1075 	/* Accumulated virtual memory usage: */
1076 	u64				acct_vm_mem1;
1077 	/* stime + utime since last update: */
1078 	u64				acct_timexpd;
1079 #endif
1080 #ifdef CONFIG_CPUSETS
1081 	/* Protected by ->alloc_lock: */
1082 	nodemask_t			mems_allowed;
1083 	/* Seqence number to catch updates: */
1084 	seqcount_spinlock_t		mems_allowed_seq;
1085 	int				cpuset_mem_spread_rotor;
1086 	int				cpuset_slab_spread_rotor;
1087 #endif
1088 #ifdef CONFIG_CGROUPS
1089 	/* Control Group info protected by css_set_lock: */
1090 	struct css_set __rcu		*cgroups;
1091 	/* cg_list protected by css_set_lock and tsk->alloc_lock: */
1092 	struct list_head		cg_list;
1093 #endif
1094 #ifdef CONFIG_X86_CPU_RESCTRL
1095 	u32				closid;
1096 	u32				rmid;
1097 #endif
1098 #ifdef CONFIG_FUTEX
1099 	struct robust_list_head __user	*robust_list;
1100 #ifdef CONFIG_COMPAT
1101 	struct compat_robust_list_head __user *compat_robust_list;
1102 #endif
1103 	struct list_head		pi_state_list;
1104 	struct futex_pi_state		*pi_state_cache;
1105 	struct mutex			futex_exit_mutex;
1106 	unsigned int			futex_state;
1107 #endif
1108 #ifdef CONFIG_PERF_EVENTS
1109 	struct perf_event_context	*perf_event_ctxp[perf_nr_task_contexts];
1110 	struct mutex			perf_event_mutex;
1111 	struct list_head		perf_event_list;
1112 #endif
1113 #ifdef CONFIG_DEBUG_PREEMPT
1114 	unsigned long			preempt_disable_ip;
1115 #endif
1116 #ifdef CONFIG_NUMA
1117 	/* Protected by alloc_lock: */
1118 	struct mempolicy		*mempolicy;
1119 	short				il_prev;
1120 	short				pref_node_fork;
1121 #endif
1122 #ifdef CONFIG_NUMA_BALANCING
1123 	int				numa_scan_seq;
1124 	unsigned int			numa_scan_period;
1125 	unsigned int			numa_scan_period_max;
1126 	int				numa_preferred_nid;
1127 	unsigned long			numa_migrate_retry;
1128 	/* Migration stamp: */
1129 	u64				node_stamp;
1130 	u64				last_task_numa_placement;
1131 	u64				last_sum_exec_runtime;
1132 	struct callback_head		numa_work;
1133 
1134 	/*
1135 	 * This pointer is only modified for current in syscall and
1136 	 * pagefault context (and for tasks being destroyed), so it can be read
1137 	 * from any of the following contexts:
1138 	 *  - RCU read-side critical section
1139 	 *  - current->numa_group from everywhere
1140 	 *  - task's runqueue locked, task not running
1141 	 */
1142 	struct numa_group __rcu		*numa_group;
1143 
1144 	/*
1145 	 * numa_faults is an array split into four regions:
1146 	 * faults_memory, faults_cpu, faults_memory_buffer, faults_cpu_buffer
1147 	 * in this precise order.
1148 	 *
1149 	 * faults_memory: Exponential decaying average of faults on a per-node
1150 	 * basis. Scheduling placement decisions are made based on these
1151 	 * counts. The values remain static for the duration of a PTE scan.
1152 	 * faults_cpu: Track the nodes the process was running on when a NUMA
1153 	 * hinting fault was incurred.
1154 	 * faults_memory_buffer and faults_cpu_buffer: Record faults per node
1155 	 * during the current scan window. When the scan completes, the counts
1156 	 * in faults_memory and faults_cpu decay and these values are copied.
1157 	 */
1158 	unsigned long			*numa_faults;
1159 	unsigned long			total_numa_faults;
1160 
1161 	/*
1162 	 * numa_faults_locality tracks if faults recorded during the last
1163 	 * scan window were remote/local or failed to migrate. The task scan
1164 	 * period is adapted based on the locality of the faults with different
1165 	 * weights depending on whether they were shared or private faults
1166 	 */
1167 	unsigned long			numa_faults_locality[3];
1168 
1169 	unsigned long			numa_pages_migrated;
1170 #endif /* CONFIG_NUMA_BALANCING */
1171 
1172 #ifdef CONFIG_RSEQ
1173 	struct rseq __user *rseq;
1174 	u32 rseq_sig;
1175 	/*
1176 	 * RmW on rseq_event_mask must be performed atomically
1177 	 * with respect to preemption.
1178 	 */
1179 	unsigned long rseq_event_mask;
1180 #endif
1181 
1182 	struct tlbflush_unmap_batch	tlb_ubc;
1183 
1184 	union {
1185 		refcount_t		rcu_users;
1186 		struct rcu_head		rcu;
1187 	};
1188 
1189 	/* Cache last used pipe for splice(): */
1190 	struct pipe_inode_info		*splice_pipe;
1191 
1192 	struct page_frag		task_frag;
1193 
1194 #ifdef CONFIG_TASK_DELAY_ACCT
1195 	struct task_delay_info		*delays;
1196 #endif
1197 
1198 #ifdef CONFIG_FAULT_INJECTION
1199 	int				make_it_fail;
1200 	unsigned int			fail_nth;
1201 #endif
1202 	/*
1203 	 * When (nr_dirtied >= nr_dirtied_pause), it's time to call
1204 	 * balance_dirty_pages() for a dirty throttling pause:
1205 	 */
1206 	int				nr_dirtied;
1207 	int				nr_dirtied_pause;
1208 	/* Start of a write-and-pause period: */
1209 	unsigned long			dirty_paused_when;
1210 
1211 #ifdef CONFIG_LATENCYTOP
1212 	int				latency_record_count;
1213 	struct latency_record		latency_record[LT_SAVECOUNT];
1214 #endif
1215 	/*
1216 	 * Time slack values; these are used to round up poll() and
1217 	 * select() etc timeout values. These are in nanoseconds.
1218 	 */
1219 	u64				timer_slack_ns;
1220 	u64				default_timer_slack_ns;
1221 
1222 #ifdef CONFIG_KASAN
1223 	unsigned int			kasan_depth;
1224 #endif
1225 
1226 #ifdef CONFIG_KCSAN
1227 	struct kcsan_ctx		kcsan_ctx;
1228 #ifdef CONFIG_TRACE_IRQFLAGS
1229 	struct irqtrace_events		kcsan_save_irqtrace;
1230 #endif
1231 #endif
1232 
1233 #if IS_ENABLED(CONFIG_KUNIT)
1234 	struct kunit			*kunit_test;
1235 #endif
1236 
1237 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
1238 	/* Index of current stored address in ret_stack: */
1239 	int				curr_ret_stack;
1240 	int				curr_ret_depth;
1241 
1242 	/* Stack of return addresses for return function tracing: */
1243 	struct ftrace_ret_stack		*ret_stack;
1244 
1245 	/* Timestamp for last schedule: */
1246 	unsigned long long		ftrace_timestamp;
1247 
1248 	/*
1249 	 * Number of functions that haven't been traced
1250 	 * because of depth overrun:
1251 	 */
1252 	atomic_t			trace_overrun;
1253 
1254 	/* Pause tracing: */
1255 	atomic_t			tracing_graph_pause;
1256 #endif
1257 
1258 #ifdef CONFIG_TRACING
1259 	/* State flags for use by tracers: */
1260 	unsigned long			trace;
1261 
1262 	/* Bitmask and counter of trace recursion: */
1263 	unsigned long			trace_recursion;
1264 #endif /* CONFIG_TRACING */
1265 
1266 #ifdef CONFIG_KCOV
1267 	/* See kernel/kcov.c for more details. */
1268 
1269 	/* Coverage collection mode enabled for this task (0 if disabled): */
1270 	unsigned int			kcov_mode;
1271 
1272 	/* Size of the kcov_area: */
1273 	unsigned int			kcov_size;
1274 
1275 	/* Buffer for coverage collection: */
1276 	void				*kcov_area;
1277 
1278 	/* KCOV descriptor wired with this task or NULL: */
1279 	struct kcov			*kcov;
1280 
1281 	/* KCOV common handle for remote coverage collection: */
1282 	u64				kcov_handle;
1283 
1284 	/* KCOV sequence number: */
1285 	int				kcov_sequence;
1286 
1287 	/* Collect coverage from softirq context: */
1288 	unsigned int			kcov_softirq;
1289 #endif
1290 
1291 #ifdef CONFIG_MEMCG
1292 	struct mem_cgroup		*memcg_in_oom;
1293 	gfp_t				memcg_oom_gfp_mask;
1294 	int				memcg_oom_order;
1295 
1296 	/* Number of pages to reclaim on returning to userland: */
1297 	unsigned int			memcg_nr_pages_over_high;
1298 
1299 	/* Used by memcontrol for targeted memcg charge: */
1300 	struct mem_cgroup		*active_memcg;
1301 #endif
1302 
1303 #ifdef CONFIG_BLK_CGROUP
1304 	struct request_queue		*throttle_queue;
1305 #endif
1306 
1307 #ifdef CONFIG_UPROBES
1308 	struct uprobe_task		*utask;
1309 #endif
1310 #if defined(CONFIG_BCACHE) || defined(CONFIG_BCACHE_MODULE)
1311 	unsigned int			sequential_io;
1312 	unsigned int			sequential_io_avg;
1313 #endif
1314 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
1315 	unsigned long			task_state_change;
1316 #endif
1317 	int				pagefault_disabled;
1318 #ifdef CONFIG_MMU
1319 	struct task_struct		*oom_reaper_list;
1320 #endif
1321 #ifdef CONFIG_VMAP_STACK
1322 	struct vm_struct		*stack_vm_area;
1323 #endif
1324 #ifdef CONFIG_THREAD_INFO_IN_TASK
1325 	/* A live task holds one reference: */
1326 	refcount_t			stack_refcount;
1327 #endif
1328 #ifdef CONFIG_LIVEPATCH
1329 	int patch_state;
1330 #endif
1331 #ifdef CONFIG_SECURITY
1332 	/* Used by LSM modules for access restriction: */
1333 	void				*security;
1334 #endif
1335 
1336 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
1337 	unsigned long			lowest_stack;
1338 	unsigned long			prev_lowest_stack;
1339 #endif
1340 
1341 #ifdef CONFIG_X86_MCE
1342 	void __user			*mce_vaddr;
1343 	__u64				mce_kflags;
1344 	u64				mce_addr;
1345 	__u64				mce_ripv : 1,
1346 					mce_whole_page : 1,
1347 					__mce_reserved : 62;
1348 	struct callback_head		mce_kill_me;
1349 #endif
1350 
1351 	/*
1352 	 * New fields for task_struct should be added above here, so that
1353 	 * they are included in the randomized portion of task_struct.
1354 	 */
1355 	randomized_struct_fields_end
1356 
1357 	/* CPU-specific state of this task: */
1358 	struct thread_struct		thread;
1359 
1360 	/*
1361 	 * WARNING: on x86, 'thread_struct' contains a variable-sized
1362 	 * structure.  It *MUST* be at the end of 'task_struct'.
1363 	 *
1364 	 * Do not put anything below here!
1365 	 */
1366 };
1367 
task_pid(struct task_struct * task)1368 static inline struct pid *task_pid(struct task_struct *task)
1369 {
1370 	return task->thread_pid;
1371 }
1372 
1373 /*
1374  * the helpers to get the task's different pids as they are seen
1375  * from various namespaces
1376  *
1377  * task_xid_nr()     : global id, i.e. the id seen from the init namespace;
1378  * task_xid_vnr()    : virtual id, i.e. the id seen from the pid namespace of
1379  *                     current.
1380  * task_xid_nr_ns()  : id seen from the ns specified;
1381  *
1382  * see also pid_nr() etc in include/linux/pid.h
1383  */
1384 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
1385 
task_pid_nr(struct task_struct * tsk)1386 static inline pid_t task_pid_nr(struct task_struct *tsk)
1387 {
1388 	return tsk->pid;
1389 }
1390 
task_pid_nr_ns(struct task_struct * tsk,struct pid_namespace * ns)1391 static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1392 {
1393 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns);
1394 }
1395 
task_pid_vnr(struct task_struct * tsk)1396 static inline pid_t task_pid_vnr(struct task_struct *tsk)
1397 {
1398 	return __task_pid_nr_ns(tsk, PIDTYPE_PID, NULL);
1399 }
1400 
1401 
task_tgid_nr(struct task_struct * tsk)1402 static inline pid_t task_tgid_nr(struct task_struct *tsk)
1403 {
1404 	return tsk->tgid;
1405 }
1406 
1407 /**
1408  * pid_alive - check that a task structure is not stale
1409  * @p: Task structure to be checked.
1410  *
1411  * Test if a process is not yet dead (at most zombie state)
1412  * If pid_alive fails, then pointers within the task structure
1413  * can be stale and must not be dereferenced.
1414  *
1415  * Return: 1 if the process is alive. 0 otherwise.
1416  */
pid_alive(const struct task_struct * p)1417 static inline int pid_alive(const struct task_struct *p)
1418 {
1419 	return p->thread_pid != NULL;
1420 }
1421 
task_pgrp_nr_ns(struct task_struct * tsk,struct pid_namespace * ns)1422 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1423 {
1424 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns);
1425 }
1426 
task_pgrp_vnr(struct task_struct * tsk)1427 static inline pid_t task_pgrp_vnr(struct task_struct *tsk)
1428 {
1429 	return __task_pid_nr_ns(tsk, PIDTYPE_PGID, NULL);
1430 }
1431 
1432 
task_session_nr_ns(struct task_struct * tsk,struct pid_namespace * ns)1433 static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1434 {
1435 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns);
1436 }
1437 
task_session_vnr(struct task_struct * tsk)1438 static inline pid_t task_session_vnr(struct task_struct *tsk)
1439 {
1440 	return __task_pid_nr_ns(tsk, PIDTYPE_SID, NULL);
1441 }
1442 
task_tgid_nr_ns(struct task_struct * tsk,struct pid_namespace * ns)1443 static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
1444 {
1445 	return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
1446 }
1447 
task_tgid_vnr(struct task_struct * tsk)1448 static inline pid_t task_tgid_vnr(struct task_struct *tsk)
1449 {
1450 	return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
1451 }
1452 
task_ppid_nr_ns(const struct task_struct * tsk,struct pid_namespace * ns)1453 static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
1454 {
1455 	pid_t pid = 0;
1456 
1457 	rcu_read_lock();
1458 	if (pid_alive(tsk))
1459 		pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns);
1460 	rcu_read_unlock();
1461 
1462 	return pid;
1463 }
1464 
task_ppid_nr(const struct task_struct * tsk)1465 static inline pid_t task_ppid_nr(const struct task_struct *tsk)
1466 {
1467 	return task_ppid_nr_ns(tsk, &init_pid_ns);
1468 }
1469 
1470 /* Obsolete, do not use: */
task_pgrp_nr(struct task_struct * tsk)1471 static inline pid_t task_pgrp_nr(struct task_struct *tsk)
1472 {
1473 	return task_pgrp_nr_ns(tsk, &init_pid_ns);
1474 }
1475 
1476 #define TASK_REPORT_IDLE	(TASK_REPORT + 1)
1477 #define TASK_REPORT_MAX		(TASK_REPORT_IDLE << 1)
1478 
task_state_index(struct task_struct * tsk)1479 static inline unsigned int task_state_index(struct task_struct *tsk)
1480 {
1481 	unsigned int tsk_state = READ_ONCE(tsk->state);
1482 	unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT;
1483 
1484 	BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
1485 
1486 	if (tsk_state == TASK_IDLE)
1487 		state = TASK_REPORT_IDLE;
1488 
1489 	return fls(state);
1490 }
1491 
task_index_to_char(unsigned int state)1492 static inline char task_index_to_char(unsigned int state)
1493 {
1494 	static const char state_char[] = "RSDTtXZPI";
1495 
1496 	BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1);
1497 
1498 	return state_char[state];
1499 }
1500 
task_state_to_char(struct task_struct * tsk)1501 static inline char task_state_to_char(struct task_struct *tsk)
1502 {
1503 	return task_index_to_char(task_state_index(tsk));
1504 }
1505 
1506 /**
1507  * is_global_init - check if a task structure is init. Since init
1508  * is free to have sub-threads we need to check tgid.
1509  * @tsk: Task structure to be checked.
1510  *
1511  * Check if a task structure is the first user space task the kernel created.
1512  *
1513  * Return: 1 if the task structure is init. 0 otherwise.
1514  */
is_global_init(struct task_struct * tsk)1515 static inline int is_global_init(struct task_struct *tsk)
1516 {
1517 	return task_tgid_nr(tsk) == 1;
1518 }
1519 
1520 extern struct pid *cad_pid;
1521 
1522 /*
1523  * Per process flags
1524  */
1525 #define PF_VCPU			0x00000001	/* I'm a virtual CPU */
1526 #define PF_IDLE			0x00000002	/* I am an IDLE thread */
1527 #define PF_EXITING		0x00000004	/* Getting shut down */
1528 #define PF_IO_WORKER		0x00000010	/* Task is an IO worker */
1529 #define PF_WQ_WORKER		0x00000020	/* I'm a workqueue worker */
1530 #define PF_FORKNOEXEC		0x00000040	/* Forked but didn't exec */
1531 #define PF_MCE_PROCESS		0x00000080      /* Process policy on mce errors */
1532 #define PF_SUPERPRIV		0x00000100	/* Used super-user privileges */
1533 #define PF_DUMPCORE		0x00000200	/* Dumped core */
1534 #define PF_SIGNALED		0x00000400	/* Killed by a signal */
1535 #define PF_MEMALLOC		0x00000800	/* Allocating memory */
1536 #define PF_NPROC_EXCEEDED	0x00001000	/* set_user() noticed that RLIMIT_NPROC was exceeded */
1537 #define PF_USED_MATH		0x00002000	/* If unset the fpu must be initialized before use */
1538 #define PF_USED_ASYNC		0x00004000	/* Used async_schedule*(), used by module init */
1539 #define PF_NOFREEZE		0x00008000	/* This thread should not be frozen */
1540 #define PF_FROZEN		0x00010000	/* Frozen for system suspend */
1541 #define PF_KSWAPD		0x00020000	/* I am kswapd */
1542 #define PF_MEMALLOC_NOFS	0x00040000	/* All allocation requests will inherit GFP_NOFS */
1543 #define PF_MEMALLOC_NOIO	0x00080000	/* All allocation requests will inherit GFP_NOIO */
1544 #define PF_LOCAL_THROTTLE	0x00100000	/* Throttle writes only against the bdi I write to,
1545 						 * I am cleaning dirty pages from some other bdi. */
1546 #define PF_KTHREAD		0x00200000	/* I am a kernel thread */
1547 #define PF_RANDOMIZE		0x00400000	/* Randomize virtual address space */
1548 #define PF_SWAPWRITE		0x00800000	/* Allowed to write to swap */
1549 #define PF_NO_SETAFFINITY	0x04000000	/* Userland is not allowed to meddle with cpus_mask */
1550 #define PF_MCE_EARLY		0x08000000      /* Early kill for mce process policy */
1551 #define PF_MEMALLOC_NOCMA	0x10000000	/* All allocation request will have _GFP_MOVABLE cleared */
1552 #define PF_FREEZER_SKIP		0x40000000	/* Freezer should not count it as freezable */
1553 #define PF_SUSPEND_TASK		0x80000000      /* This thread called freeze_processes() and should not be frozen */
1554 
1555 /*
1556  * Only the _current_ task can read/write to tsk->flags, but other
1557  * tasks can access tsk->flags in readonly mode for example
1558  * with tsk_used_math (like during threaded core dumping).
1559  * There is however an exception to this rule during ptrace
1560  * or during fork: the ptracer task is allowed to write to the
1561  * child->flags of its traced child (same goes for fork, the parent
1562  * can write to the child->flags), because we're guaranteed the
1563  * child is not running and in turn not changing child->flags
1564  * at the same time the parent does it.
1565  */
1566 #define clear_stopped_child_used_math(child)	do { (child)->flags &= ~PF_USED_MATH; } while (0)
1567 #define set_stopped_child_used_math(child)	do { (child)->flags |= PF_USED_MATH; } while (0)
1568 #define clear_used_math()			clear_stopped_child_used_math(current)
1569 #define set_used_math()				set_stopped_child_used_math(current)
1570 
1571 #define conditional_stopped_child_used_math(condition, child) \
1572 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1573 
1574 #define conditional_used_math(condition)	conditional_stopped_child_used_math(condition, current)
1575 
1576 #define copy_to_stopped_child_used_math(child) \
1577 	do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1578 
1579 /* NOTE: this will return 0 or PF_USED_MATH, it will never return 1 */
1580 #define tsk_used_math(p)			((p)->flags & PF_USED_MATH)
1581 #define used_math()				tsk_used_math(current)
1582 
is_percpu_thread(void)1583 static inline bool is_percpu_thread(void)
1584 {
1585 #ifdef CONFIG_SMP
1586 	return (current->flags & PF_NO_SETAFFINITY) &&
1587 		(current->nr_cpus_allowed  == 1);
1588 #else
1589 	return true;
1590 #endif
1591 }
1592 
1593 /* Per-process atomic flags. */
1594 #define PFA_NO_NEW_PRIVS		0	/* May not gain new privileges. */
1595 #define PFA_SPREAD_PAGE			1	/* Spread page cache over cpuset */
1596 #define PFA_SPREAD_SLAB			2	/* Spread some slab caches over cpuset */
1597 #define PFA_SPEC_SSB_DISABLE		3	/* Speculative Store Bypass disabled */
1598 #define PFA_SPEC_SSB_FORCE_DISABLE	4	/* Speculative Store Bypass force disabled*/
1599 #define PFA_SPEC_IB_DISABLE		5	/* Indirect branch speculation restricted */
1600 #define PFA_SPEC_IB_FORCE_DISABLE	6	/* Indirect branch speculation permanently restricted */
1601 #define PFA_SPEC_SSB_NOEXEC		7	/* Speculative Store Bypass clear on execve() */
1602 
1603 #define TASK_PFA_TEST(name, func)					\
1604 	static inline bool task_##func(struct task_struct *p)		\
1605 	{ return test_bit(PFA_##name, &p->atomic_flags); }
1606 
1607 #define TASK_PFA_SET(name, func)					\
1608 	static inline void task_set_##func(struct task_struct *p)	\
1609 	{ set_bit(PFA_##name, &p->atomic_flags); }
1610 
1611 #define TASK_PFA_CLEAR(name, func)					\
1612 	static inline void task_clear_##func(struct task_struct *p)	\
1613 	{ clear_bit(PFA_##name, &p->atomic_flags); }
1614 
TASK_PFA_TEST(NO_NEW_PRIVS,no_new_privs)1615 TASK_PFA_TEST(NO_NEW_PRIVS, no_new_privs)
1616 TASK_PFA_SET(NO_NEW_PRIVS, no_new_privs)
1617 
1618 TASK_PFA_TEST(SPREAD_PAGE, spread_page)
1619 TASK_PFA_SET(SPREAD_PAGE, spread_page)
1620 TASK_PFA_CLEAR(SPREAD_PAGE, spread_page)
1621 
1622 TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
1623 TASK_PFA_SET(SPREAD_SLAB, spread_slab)
1624 TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
1625 
1626 TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable)
1627 TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable)
1628 TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable)
1629 
1630 TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1631 TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1632 TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec)
1633 
1634 TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1635 TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable)
1636 
1637 TASK_PFA_TEST(SPEC_IB_DISABLE, spec_ib_disable)
1638 TASK_PFA_SET(SPEC_IB_DISABLE, spec_ib_disable)
1639 TASK_PFA_CLEAR(SPEC_IB_DISABLE, spec_ib_disable)
1640 
1641 TASK_PFA_TEST(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1642 TASK_PFA_SET(SPEC_IB_FORCE_DISABLE, spec_ib_force_disable)
1643 
1644 static inline void
1645 current_restore_flags(unsigned long orig_flags, unsigned long flags)
1646 {
1647 	current->flags &= ~flags;
1648 	current->flags |= orig_flags & flags;
1649 }
1650 
1651 extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
1652 extern int task_can_attach(struct task_struct *p, const struct cpumask *cs_cpus_allowed);
1653 #ifdef CONFIG_SMP
1654 extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
1655 extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
1656 #else
do_set_cpus_allowed(struct task_struct * p,const struct cpumask * new_mask)1657 static inline void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1658 {
1659 }
set_cpus_allowed_ptr(struct task_struct * p,const struct cpumask * new_mask)1660 static inline int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
1661 {
1662 	if (!cpumask_test_cpu(0, new_mask))
1663 		return -EINVAL;
1664 	return 0;
1665 }
1666 #endif
1667 
1668 extern int yield_to(struct task_struct *p, bool preempt);
1669 extern void set_user_nice(struct task_struct *p, long nice);
1670 extern int task_prio(const struct task_struct *p);
1671 
1672 /**
1673  * task_nice - return the nice value of a given task.
1674  * @p: the task in question.
1675  *
1676  * Return: The nice value [ -20 ... 0 ... 19 ].
1677  */
task_nice(const struct task_struct * p)1678 static inline int task_nice(const struct task_struct *p)
1679 {
1680 	return PRIO_TO_NICE((p)->static_prio);
1681 }
1682 
1683 extern int can_nice(const struct task_struct *p, const int nice);
1684 extern int task_curr(const struct task_struct *p);
1685 extern int idle_cpu(int cpu);
1686 extern int available_idle_cpu(int cpu);
1687 extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *);
1688 extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *);
1689 extern void sched_set_fifo(struct task_struct *p);
1690 extern void sched_set_fifo_low(struct task_struct *p);
1691 extern void sched_set_normal(struct task_struct *p, int nice);
1692 extern int sched_setattr(struct task_struct *, const struct sched_attr *);
1693 extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
1694 extern struct task_struct *idle_task(int cpu);
1695 
1696 /**
1697  * is_idle_task - is the specified task an idle task?
1698  * @p: the task in question.
1699  *
1700  * Return: 1 if @p is an idle task. 0 otherwise.
1701  */
is_idle_task(const struct task_struct * p)1702 static __always_inline bool is_idle_task(const struct task_struct *p)
1703 {
1704 	return !!(p->flags & PF_IDLE);
1705 }
1706 
1707 extern struct task_struct *curr_task(int cpu);
1708 extern void ia64_set_curr_task(int cpu, struct task_struct *p);
1709 
1710 void yield(void);
1711 
1712 union thread_union {
1713 #ifndef CONFIG_ARCH_TASK_STRUCT_ON_STACK
1714 	struct task_struct task;
1715 #endif
1716 #ifndef CONFIG_THREAD_INFO_IN_TASK
1717 	struct thread_info thread_info;
1718 #endif
1719 	unsigned long stack[THREAD_SIZE/sizeof(long)];
1720 };
1721 
1722 #ifndef CONFIG_THREAD_INFO_IN_TASK
1723 extern struct thread_info init_thread_info;
1724 #endif
1725 
1726 extern unsigned long init_stack[THREAD_SIZE / sizeof(unsigned long)];
1727 
1728 #ifdef CONFIG_THREAD_INFO_IN_TASK
task_thread_info(struct task_struct * task)1729 static inline struct thread_info *task_thread_info(struct task_struct *task)
1730 {
1731 	return &task->thread_info;
1732 }
1733 #elif !defined(__HAVE_THREAD_FUNCTIONS)
1734 # define task_thread_info(task)	((struct thread_info *)(task)->stack)
1735 #endif
1736 
1737 /*
1738  * find a task by one of its numerical ids
1739  *
1740  * find_task_by_pid_ns():
1741  *      finds a task by its pid in the specified namespace
1742  * find_task_by_vpid():
1743  *      finds a task by its virtual pid
1744  *
1745  * see also find_vpid() etc in include/linux/pid.h
1746  */
1747 
1748 extern struct task_struct *find_task_by_vpid(pid_t nr);
1749 extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
1750 
1751 /*
1752  * find a task by its virtual pid and get the task struct
1753  */
1754 extern struct task_struct *find_get_task_by_vpid(pid_t nr);
1755 
1756 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
1757 extern int wake_up_process(struct task_struct *tsk);
1758 extern void wake_up_new_task(struct task_struct *tsk);
1759 
1760 #ifdef CONFIG_SMP
1761 extern void kick_process(struct task_struct *tsk);
1762 #else
kick_process(struct task_struct * tsk)1763 static inline void kick_process(struct task_struct *tsk) { }
1764 #endif
1765 
1766 extern void __set_task_comm(struct task_struct *tsk, const char *from, bool exec);
1767 
set_task_comm(struct task_struct * tsk,const char * from)1768 static inline void set_task_comm(struct task_struct *tsk, const char *from)
1769 {
1770 	__set_task_comm(tsk, from, false);
1771 }
1772 
1773 extern char *__get_task_comm(char *to, size_t len, struct task_struct *tsk);
1774 #define get_task_comm(buf, tsk) ({			\
1775 	BUILD_BUG_ON(sizeof(buf) != TASK_COMM_LEN);	\
1776 	__get_task_comm(buf, sizeof(buf), tsk);		\
1777 })
1778 
1779 #ifdef CONFIG_SMP
scheduler_ipi(void)1780 static __always_inline void scheduler_ipi(void)
1781 {
1782 	/*
1783 	 * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting
1784 	 * TIF_NEED_RESCHED remotely (for the first time) will also send
1785 	 * this IPI.
1786 	 */
1787 	preempt_fold_need_resched();
1788 }
1789 extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
1790 #else
scheduler_ipi(void)1791 static inline void scheduler_ipi(void) { }
wait_task_inactive(struct task_struct * p,long match_state)1792 static inline unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1793 {
1794 	return 1;
1795 }
1796 #endif
1797 
1798 /*
1799  * Set thread flags in other task's structures.
1800  * See asm/thread_info.h for TIF_xxxx flags available:
1801  */
set_tsk_thread_flag(struct task_struct * tsk,int flag)1802 static inline void set_tsk_thread_flag(struct task_struct *tsk, int flag)
1803 {
1804 	set_ti_thread_flag(task_thread_info(tsk), flag);
1805 }
1806 
clear_tsk_thread_flag(struct task_struct * tsk,int flag)1807 static inline void clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1808 {
1809 	clear_ti_thread_flag(task_thread_info(tsk), flag);
1810 }
1811 
update_tsk_thread_flag(struct task_struct * tsk,int flag,bool value)1812 static inline void update_tsk_thread_flag(struct task_struct *tsk, int flag,
1813 					  bool value)
1814 {
1815 	update_ti_thread_flag(task_thread_info(tsk), flag, value);
1816 }
1817 
test_and_set_tsk_thread_flag(struct task_struct * tsk,int flag)1818 static inline int test_and_set_tsk_thread_flag(struct task_struct *tsk, int flag)
1819 {
1820 	return test_and_set_ti_thread_flag(task_thread_info(tsk), flag);
1821 }
1822 
test_and_clear_tsk_thread_flag(struct task_struct * tsk,int flag)1823 static inline int test_and_clear_tsk_thread_flag(struct task_struct *tsk, int flag)
1824 {
1825 	return test_and_clear_ti_thread_flag(task_thread_info(tsk), flag);
1826 }
1827 
test_tsk_thread_flag(struct task_struct * tsk,int flag)1828 static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
1829 {
1830 	return test_ti_thread_flag(task_thread_info(tsk), flag);
1831 }
1832 
set_tsk_need_resched(struct task_struct * tsk)1833 static inline void set_tsk_need_resched(struct task_struct *tsk)
1834 {
1835 	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1836 }
1837 
clear_tsk_need_resched(struct task_struct * tsk)1838 static inline void clear_tsk_need_resched(struct task_struct *tsk)
1839 {
1840 	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
1841 }
1842 
test_tsk_need_resched(struct task_struct * tsk)1843 static inline int test_tsk_need_resched(struct task_struct *tsk)
1844 {
1845 	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
1846 }
1847 
1848 /*
1849  * cond_resched() and cond_resched_lock(): latency reduction via
1850  * explicit rescheduling in places that are safe. The return
1851  * value indicates whether a reschedule was done in fact.
1852  * cond_resched_lock() will drop the spinlock before scheduling,
1853  */
1854 #ifndef CONFIG_PREEMPTION
1855 extern int _cond_resched(void);
1856 #else
_cond_resched(void)1857 static inline int _cond_resched(void) { return 0; }
1858 #endif
1859 
1860 #define cond_resched() ({			\
1861 	___might_sleep(__FILE__, __LINE__, 0);	\
1862 	_cond_resched();			\
1863 })
1864 
1865 extern int __cond_resched_lock(spinlock_t *lock);
1866 
1867 #define cond_resched_lock(lock) ({				\
1868 	___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
1869 	__cond_resched_lock(lock);				\
1870 })
1871 
cond_resched_rcu(void)1872 static inline void cond_resched_rcu(void)
1873 {
1874 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
1875 	rcu_read_unlock();
1876 	cond_resched();
1877 	rcu_read_lock();
1878 #endif
1879 }
1880 
1881 /*
1882  * Does a critical section need to be broken due to another
1883  * task waiting?: (technically does not depend on CONFIG_PREEMPTION,
1884  * but a general need for low latency)
1885  */
spin_needbreak(spinlock_t * lock)1886 static inline int spin_needbreak(spinlock_t *lock)
1887 {
1888 #ifdef CONFIG_PREEMPTION
1889 	return spin_is_contended(lock);
1890 #else
1891 	return 0;
1892 #endif
1893 }
1894 
need_resched(void)1895 static __always_inline bool need_resched(void)
1896 {
1897 	return unlikely(tif_need_resched());
1898 }
1899 
1900 /*
1901  * Wrappers for p->thread_info->cpu access. No-op on UP.
1902  */
1903 #ifdef CONFIG_SMP
1904 
task_cpu(const struct task_struct * p)1905 static inline unsigned int task_cpu(const struct task_struct *p)
1906 {
1907 #ifdef CONFIG_THREAD_INFO_IN_TASK
1908 	return READ_ONCE(p->cpu);
1909 #else
1910 	return READ_ONCE(task_thread_info(p)->cpu);
1911 #endif
1912 }
1913 
1914 extern void set_task_cpu(struct task_struct *p, unsigned int cpu);
1915 
1916 #else
1917 
task_cpu(const struct task_struct * p)1918 static inline unsigned int task_cpu(const struct task_struct *p)
1919 {
1920 	return 0;
1921 }
1922 
set_task_cpu(struct task_struct * p,unsigned int cpu)1923 static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
1924 {
1925 }
1926 
1927 #endif /* CONFIG_SMP */
1928 
1929 /*
1930  * In order to reduce various lock holder preemption latencies provide an
1931  * interface to see if a vCPU is currently running or not.
1932  *
1933  * This allows us to terminate optimistic spin loops and block, analogous to
1934  * the native optimistic spin heuristic of testing if the lock owner task is
1935  * running or not.
1936  */
1937 #ifndef vcpu_is_preempted
vcpu_is_preempted(int cpu)1938 static inline bool vcpu_is_preempted(int cpu)
1939 {
1940 	return false;
1941 }
1942 #endif
1943 
1944 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
1945 extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
1946 
1947 #ifndef TASK_SIZE_OF
1948 #define TASK_SIZE_OF(tsk)	TASK_SIZE
1949 #endif
1950 
1951 #ifdef CONFIG_RSEQ
1952 
1953 /*
1954  * Map the event mask on the user-space ABI enum rseq_cs_flags
1955  * for direct mask checks.
1956  */
1957 enum rseq_event_mask_bits {
1958 	RSEQ_EVENT_PREEMPT_BIT	= RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT_BIT,
1959 	RSEQ_EVENT_SIGNAL_BIT	= RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL_BIT,
1960 	RSEQ_EVENT_MIGRATE_BIT	= RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE_BIT,
1961 };
1962 
1963 enum rseq_event_mask {
1964 	RSEQ_EVENT_PREEMPT	= (1U << RSEQ_EVENT_PREEMPT_BIT),
1965 	RSEQ_EVENT_SIGNAL	= (1U << RSEQ_EVENT_SIGNAL_BIT),
1966 	RSEQ_EVENT_MIGRATE	= (1U << RSEQ_EVENT_MIGRATE_BIT),
1967 };
1968 
rseq_set_notify_resume(struct task_struct * t)1969 static inline void rseq_set_notify_resume(struct task_struct *t)
1970 {
1971 	if (t->rseq)
1972 		set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
1973 }
1974 
1975 void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
1976 
rseq_handle_notify_resume(struct ksignal * ksig,struct pt_regs * regs)1977 static inline void rseq_handle_notify_resume(struct ksignal *ksig,
1978 					     struct pt_regs *regs)
1979 {
1980 	if (current->rseq)
1981 		__rseq_handle_notify_resume(ksig, regs);
1982 }
1983 
rseq_signal_deliver(struct ksignal * ksig,struct pt_regs * regs)1984 static inline void rseq_signal_deliver(struct ksignal *ksig,
1985 				       struct pt_regs *regs)
1986 {
1987 	preempt_disable();
1988 	__set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
1989 	preempt_enable();
1990 	rseq_handle_notify_resume(ksig, regs);
1991 }
1992 
1993 /* rseq_preempt() requires preemption to be disabled. */
rseq_preempt(struct task_struct * t)1994 static inline void rseq_preempt(struct task_struct *t)
1995 {
1996 	__set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask);
1997 	rseq_set_notify_resume(t);
1998 }
1999 
2000 /* rseq_migrate() requires preemption to be disabled. */
rseq_migrate(struct task_struct * t)2001 static inline void rseq_migrate(struct task_struct *t)
2002 {
2003 	__set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask);
2004 	rseq_set_notify_resume(t);
2005 }
2006 
2007 /*
2008  * If parent process has a registered restartable sequences area, the
2009  * child inherits. Unregister rseq for a clone with CLONE_VM set.
2010  */
rseq_fork(struct task_struct * t,unsigned long clone_flags)2011 static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
2012 {
2013 	if (clone_flags & CLONE_VM) {
2014 		t->rseq = NULL;
2015 		t->rseq_sig = 0;
2016 		t->rseq_event_mask = 0;
2017 	} else {
2018 		t->rseq = current->rseq;
2019 		t->rseq_sig = current->rseq_sig;
2020 		t->rseq_event_mask = current->rseq_event_mask;
2021 	}
2022 }
2023 
rseq_execve(struct task_struct * t)2024 static inline void rseq_execve(struct task_struct *t)
2025 {
2026 	t->rseq = NULL;
2027 	t->rseq_sig = 0;
2028 	t->rseq_event_mask = 0;
2029 }
2030 
2031 #else
2032 
rseq_set_notify_resume(struct task_struct * t)2033 static inline void rseq_set_notify_resume(struct task_struct *t)
2034 {
2035 }
rseq_handle_notify_resume(struct ksignal * ksig,struct pt_regs * regs)2036 static inline void rseq_handle_notify_resume(struct ksignal *ksig,
2037 					     struct pt_regs *regs)
2038 {
2039 }
rseq_signal_deliver(struct ksignal * ksig,struct pt_regs * regs)2040 static inline void rseq_signal_deliver(struct ksignal *ksig,
2041 				       struct pt_regs *regs)
2042 {
2043 }
rseq_preempt(struct task_struct * t)2044 static inline void rseq_preempt(struct task_struct *t)
2045 {
2046 }
rseq_migrate(struct task_struct * t)2047 static inline void rseq_migrate(struct task_struct *t)
2048 {
2049 }
rseq_fork(struct task_struct * t,unsigned long clone_flags)2050 static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
2051 {
2052 }
rseq_execve(struct task_struct * t)2053 static inline void rseq_execve(struct task_struct *t)
2054 {
2055 }
2056 
2057 #endif
2058 
2059 #ifdef CONFIG_DEBUG_RSEQ
2060 
2061 void rseq_syscall(struct pt_regs *regs);
2062 
2063 #else
2064 
rseq_syscall(struct pt_regs * regs)2065 static inline void rseq_syscall(struct pt_regs *regs)
2066 {
2067 }
2068 
2069 #endif
2070 
2071 const struct sched_avg *sched_trace_cfs_rq_avg(struct cfs_rq *cfs_rq);
2072 char *sched_trace_cfs_rq_path(struct cfs_rq *cfs_rq, char *str, int len);
2073 int sched_trace_cfs_rq_cpu(struct cfs_rq *cfs_rq);
2074 
2075 const struct sched_avg *sched_trace_rq_avg_rt(struct rq *rq);
2076 const struct sched_avg *sched_trace_rq_avg_dl(struct rq *rq);
2077 const struct sched_avg *sched_trace_rq_avg_irq(struct rq *rq);
2078 
2079 int sched_trace_rq_cpu(struct rq *rq);
2080 int sched_trace_rq_cpu_capacity(struct rq *rq);
2081 int sched_trace_rq_nr_running(struct rq *rq);
2082 
2083 const struct cpumask *sched_trace_rd_span(struct root_domain *rd);
2084 
2085 #endif
2086