1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_SCHED_SIGNAL_H
3 #define _LINUX_SCHED_SIGNAL_H
4
5 #include <linux/rculist.h>
6 #include <linux/signal.h>
7 #include <linux/sched.h>
8 #include <linux/sched/jobctl.h>
9 #include <linux/sched/task.h>
10 #include <linux/cred.h>
11 #include <linux/refcount.h>
12 #include <linux/posix-timers.h>
13 #include <linux/mm_types.h>
14 #include <asm/ptrace.h>
15
16 /*
17 * Types defining task->signal and task->sighand and APIs using them:
18 */
19
20 struct sighand_struct {
21 spinlock_t siglock;
22 refcount_t count;
23 wait_queue_head_t signalfd_wqh;
24 struct k_sigaction action[_NSIG];
25 };
26
27 /*
28 * Per-process accounting stats:
29 */
30 struct pacct_struct {
31 int ac_flag;
32 long ac_exitcode;
33 unsigned long ac_mem;
34 u64 ac_utime, ac_stime;
35 unsigned long ac_minflt, ac_majflt;
36 };
37
38 struct cpu_itimer {
39 u64 expires;
40 u64 incr;
41 };
42
43 /*
44 * This is the atomic variant of task_cputime, which can be used for
45 * storing and updating task_cputime statistics without locking.
46 */
47 struct task_cputime_atomic {
48 atomic64_t utime;
49 atomic64_t stime;
50 atomic64_t sum_exec_runtime;
51 };
52
53 #define INIT_CPUTIME_ATOMIC \
54 (struct task_cputime_atomic) { \
55 .utime = ATOMIC64_INIT(0), \
56 .stime = ATOMIC64_INIT(0), \
57 .sum_exec_runtime = ATOMIC64_INIT(0), \
58 }
59 /**
60 * struct thread_group_cputimer - thread group interval timer counts
61 * @cputime_atomic: atomic thread group interval timers.
62 *
63 * This structure contains the version of task_cputime, above, that is
64 * used for thread group CPU timer calculations.
65 */
66 struct thread_group_cputimer {
67 struct task_cputime_atomic cputime_atomic;
68 };
69
70 struct multiprocess_signals {
71 sigset_t signal;
72 struct hlist_node node;
73 };
74
75 /*
76 * NOTE! "signal_struct" does not have its own
77 * locking, because a shared signal_struct always
78 * implies a shared sighand_struct, so locking
79 * sighand_struct is always a proper superset of
80 * the locking of signal_struct.
81 */
82 struct signal_struct {
83 refcount_t sigcnt;
84 atomic_t live;
85 int nr_threads;
86 struct list_head thread_head;
87
88 wait_queue_head_t wait_chldexit; /* for wait4() */
89
90 /* current thread group signal load-balancing target: */
91 struct task_struct *curr_target;
92
93 /* shared signal handling: */
94 struct sigpending shared_pending;
95
96 /* For collecting multiprocess signals during fork */
97 struct hlist_head multiprocess;
98
99 /* thread group exit support */
100 int group_exit_code;
101 /* overloaded:
102 * - notify group_exit_task when ->count is equal to notify_count
103 * - everyone except group_exit_task is stopped during signal delivery
104 * of fatal signals, group_exit_task processes the signal.
105 */
106 int notify_count;
107 struct task_struct *group_exit_task;
108
109 /* thread group stop support, overloads group_exit_code too */
110 int group_stop_count;
111 unsigned int flags; /* see SIGNAL_* flags below */
112
113 /*
114 * PR_SET_CHILD_SUBREAPER marks a process, like a service
115 * manager, to re-parent orphan (double-forking) child processes
116 * to this process instead of 'init'. The service manager is
117 * able to receive SIGCHLD signals and is able to investigate
118 * the process until it calls wait(). All children of this
119 * process will inherit a flag if they should look for a
120 * child_subreaper process at exit.
121 */
122 unsigned int is_child_subreaper:1;
123 unsigned int has_child_subreaper:1;
124
125 #ifdef CONFIG_POSIX_TIMERS
126
127 /* POSIX.1b Interval Timers */
128 int posix_timer_id;
129 struct list_head posix_timers;
130
131 /* ITIMER_REAL timer for the process */
132 struct hrtimer real_timer;
133 ktime_t it_real_incr;
134
135 /*
136 * ITIMER_PROF and ITIMER_VIRTUAL timers for the process, we use
137 * CPUCLOCK_PROF and CPUCLOCK_VIRT for indexing array as these
138 * values are defined to 0 and 1 respectively
139 */
140 struct cpu_itimer it[2];
141
142 /*
143 * Thread group totals for process CPU timers.
144 * See thread_group_cputimer(), et al, for details.
145 */
146 struct thread_group_cputimer cputimer;
147
148 #endif
149 /* Empty if CONFIG_POSIX_TIMERS=n */
150 struct posix_cputimers posix_cputimers;
151
152 /* PID/PID hash table linkage. */
153 struct pid *pids[PIDTYPE_MAX];
154
155 #ifdef CONFIG_NO_HZ_FULL
156 atomic_t tick_dep_mask;
157 #endif
158
159 struct pid *tty_old_pgrp;
160
161 /* boolean value for session group leader */
162 int leader;
163
164 struct tty_struct *tty; /* NULL if no tty */
165
166 #ifdef CONFIG_SCHED_AUTOGROUP
167 struct autogroup *autogroup;
168 #endif
169 /*
170 * Cumulative resource counters for dead threads in the group,
171 * and for reaped dead child processes forked by this group.
172 * Live threads maintain their own counters and add to these
173 * in __exit_signal, except for the group leader.
174 */
175 seqlock_t stats_lock;
176 u64 utime, stime, cutime, cstime;
177 u64 gtime;
178 u64 cgtime;
179 struct prev_cputime prev_cputime;
180 unsigned long nvcsw, nivcsw, cnvcsw, cnivcsw;
181 unsigned long min_flt, maj_flt, cmin_flt, cmaj_flt;
182 unsigned long inblock, oublock, cinblock, coublock;
183 unsigned long maxrss, cmaxrss;
184 struct task_io_accounting ioac;
185
186 /*
187 * Cumulative ns of schedule CPU time fo dead threads in the
188 * group, not including a zombie group leader, (This only differs
189 * from jiffies_to_ns(utime + stime) if sched_clock uses something
190 * other than jiffies.)
191 */
192 unsigned long long sum_sched_runtime;
193
194 /*
195 * We don't bother to synchronize most readers of this at all,
196 * because there is no reader checking a limit that actually needs
197 * to get both rlim_cur and rlim_max atomically, and either one
198 * alone is a single word that can safely be read normally.
199 * getrlimit/setrlimit use task_lock(current->group_leader) to
200 * protect this instead of the siglock, because they really
201 * have no need to disable irqs.
202 */
203 struct rlimit rlim[RLIM_NLIMITS];
204
205 #ifdef CONFIG_BSD_PROCESS_ACCT
206 struct pacct_struct pacct; /* per-process accounting information */
207 #endif
208 #ifdef CONFIG_TASKSTATS
209 struct taskstats *stats;
210 #endif
211 #ifdef CONFIG_AUDIT
212 unsigned audit_tty;
213 struct tty_audit_buf *tty_audit_buf;
214 #endif
215
216 /*
217 * Thread is the potential origin of an oom condition; kill first on
218 * oom
219 */
220 bool oom_flag_origin;
221 short oom_score_adj; /* OOM kill score adjustment */
222 short oom_score_adj_min; /* OOM kill score adjustment min value.
223 * Only settable by CAP_SYS_RESOURCE. */
224 struct mm_struct *oom_mm; /* recorded mm when the thread group got
225 * killed by the oom killer */
226
227 struct mutex cred_guard_mutex; /* guard against foreign influences on
228 * credential calculations
229 * (notably. ptrace)
230 * Deprecated do not use in new code.
231 * Use exec_update_mutex instead.
232 */
233 struct mutex exec_update_mutex; /* Held while task_struct is being
234 * updated during exec, and may have
235 * inconsistent permissions.
236 */
237 } __randomize_layout;
238
239 /*
240 * Bits in flags field of signal_struct.
241 */
242 #define SIGNAL_STOP_STOPPED 0x00000001 /* job control stop in effect */
243 #define SIGNAL_STOP_CONTINUED 0x00000002 /* SIGCONT since WCONTINUED reap */
244 #define SIGNAL_GROUP_EXIT 0x00000004 /* group exit in progress */
245 #define SIGNAL_GROUP_COREDUMP 0x00000008 /* coredump in progress */
246 /*
247 * Pending notifications to parent.
248 */
249 #define SIGNAL_CLD_STOPPED 0x00000010
250 #define SIGNAL_CLD_CONTINUED 0x00000020
251 #define SIGNAL_CLD_MASK (SIGNAL_CLD_STOPPED|SIGNAL_CLD_CONTINUED)
252
253 #define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
254
255 #define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
256 SIGNAL_STOP_CONTINUED)
257
signal_set_stop_flags(struct signal_struct * sig,unsigned int flags)258 static inline void signal_set_stop_flags(struct signal_struct *sig,
259 unsigned int flags)
260 {
261 WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP));
262 sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
263 }
264
265 /* If true, all threads except ->group_exit_task have pending SIGKILL */
signal_group_exit(const struct signal_struct * sig)266 static inline int signal_group_exit(const struct signal_struct *sig)
267 {
268 return (sig->flags & SIGNAL_GROUP_EXIT) ||
269 (sig->group_exit_task != NULL);
270 }
271
272 extern void flush_signals(struct task_struct *);
273 extern void ignore_signals(struct task_struct *);
274 extern void flush_signal_handlers(struct task_struct *, int force_default);
275 extern int dequeue_signal(struct task_struct *task,
276 sigset_t *mask, kernel_siginfo_t *info);
277
kernel_dequeue_signal(void)278 static inline int kernel_dequeue_signal(void)
279 {
280 struct task_struct *task = current;
281 kernel_siginfo_t __info;
282 int ret;
283
284 spin_lock_irq(&task->sighand->siglock);
285 ret = dequeue_signal(task, &task->blocked, &__info);
286 spin_unlock_irq(&task->sighand->siglock);
287
288 return ret;
289 }
290
kernel_signal_stop(void)291 static inline void kernel_signal_stop(void)
292 {
293 spin_lock_irq(¤t->sighand->siglock);
294 if (current->jobctl & JOBCTL_STOP_DEQUEUED)
295 set_special_state(TASK_STOPPED);
296 spin_unlock_irq(¤t->sighand->siglock);
297
298 schedule();
299 }
300 #ifdef __ARCH_SI_TRAPNO
301 # define ___ARCH_SI_TRAPNO(_a1) , _a1
302 #else
303 # define ___ARCH_SI_TRAPNO(_a1)
304 #endif
305 #ifdef __ia64__
306 # define ___ARCH_SI_IA64(_a1, _a2, _a3) , _a1, _a2, _a3
307 #else
308 # define ___ARCH_SI_IA64(_a1, _a2, _a3)
309 #endif
310
311 int force_sig_fault_to_task(int sig, int code, void __user *addr
312 ___ARCH_SI_TRAPNO(int trapno)
313 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
314 , struct task_struct *t);
315 int force_sig_fault(int sig, int code, void __user *addr
316 ___ARCH_SI_TRAPNO(int trapno)
317 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr));
318 int send_sig_fault(int sig, int code, void __user *addr
319 ___ARCH_SI_TRAPNO(int trapno)
320 ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
321 , struct task_struct *t);
322
323 int force_sig_mceerr(int code, void __user *, short);
324 int send_sig_mceerr(int code, void __user *, short, struct task_struct *);
325
326 int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper);
327 int force_sig_pkuerr(void __user *addr, u32 pkey);
328
329 int force_sig_ptrace_errno_trap(int errno, void __user *addr);
330
331 extern int send_sig_info(int, struct kernel_siginfo *, struct task_struct *);
332 extern void force_sigsegv(int sig);
333 extern int force_sig_info(struct kernel_siginfo *);
334 extern int __kill_pgrp_info(int sig, struct kernel_siginfo *info, struct pid *pgrp);
335 extern int kill_pid_info(int sig, struct kernel_siginfo *info, struct pid *pid);
336 extern int kill_pid_usb_asyncio(int sig, int errno, sigval_t addr, struct pid *,
337 const struct cred *);
338 extern int kill_pgrp(struct pid *pid, int sig, int priv);
339 extern int kill_pid(struct pid *pid, int sig, int priv);
340 extern __must_check bool do_notify_parent(struct task_struct *, int);
341 extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
342 extern void force_sig(int);
343 extern int send_sig(int, struct task_struct *, int);
344 extern int zap_other_threads(struct task_struct *p);
345 extern struct sigqueue *sigqueue_alloc(void);
346 extern void sigqueue_free(struct sigqueue *);
347 extern int send_sigqueue(struct sigqueue *, struct pid *, enum pid_type);
348 extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
349
restart_syscall(void)350 static inline int restart_syscall(void)
351 {
352 set_tsk_thread_flag(current, TIF_SIGPENDING);
353 return -ERESTARTNOINTR;
354 }
355
signal_pending(struct task_struct * p)356 static inline int signal_pending(struct task_struct *p)
357 {
358 return unlikely(test_tsk_thread_flag(p,TIF_SIGPENDING));
359 }
360
__fatal_signal_pending(struct task_struct * p)361 static inline int __fatal_signal_pending(struct task_struct *p)
362 {
363 return unlikely(sigismember(&p->pending.signal, SIGKILL));
364 }
365
fatal_signal_pending(struct task_struct * p)366 static inline int fatal_signal_pending(struct task_struct *p)
367 {
368 return signal_pending(p) && __fatal_signal_pending(p);
369 }
370
signal_pending_state(long state,struct task_struct * p)371 static inline int signal_pending_state(long state, struct task_struct *p)
372 {
373 if (!(state & (TASK_INTERRUPTIBLE | TASK_WAKEKILL)))
374 return 0;
375 if (!signal_pending(p))
376 return 0;
377
378 return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
379 }
380
381 /*
382 * This should only be used in fault handlers to decide whether we
383 * should stop the current fault routine to handle the signals
384 * instead, especially with the case where we've got interrupted with
385 * a VM_FAULT_RETRY.
386 */
fault_signal_pending(vm_fault_t fault_flags,struct pt_regs * regs)387 static inline bool fault_signal_pending(vm_fault_t fault_flags,
388 struct pt_regs *regs)
389 {
390 return unlikely((fault_flags & VM_FAULT_RETRY) &&
391 (fatal_signal_pending(current) ||
392 (user_mode(regs) && signal_pending(current))));
393 }
394
395 /*
396 * Reevaluate whether the task has signals pending delivery.
397 * Wake the task if so.
398 * This is required every time the blocked sigset_t changes.
399 * callers must hold sighand->siglock.
400 */
401 extern void recalc_sigpending_and_wake(struct task_struct *t);
402 extern void recalc_sigpending(void);
403 extern void calculate_sigpending(void);
404
405 extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
406
signal_wake_up(struct task_struct * t,bool resume)407 static inline void signal_wake_up(struct task_struct *t, bool resume)
408 {
409 signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
410 }
ptrace_signal_wake_up(struct task_struct * t,bool resume)411 static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
412 {
413 signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
414 }
415
416 void task_join_group_stop(struct task_struct *task);
417
418 #ifdef TIF_RESTORE_SIGMASK
419 /*
420 * Legacy restore_sigmask accessors. These are inefficient on
421 * SMP architectures because they require atomic operations.
422 */
423
424 /**
425 * set_restore_sigmask() - make sure saved_sigmask processing gets done
426 *
427 * This sets TIF_RESTORE_SIGMASK and ensures that the arch signal code
428 * will run before returning to user mode, to process the flag. For
429 * all callers, TIF_SIGPENDING is already set or it's no harm to set
430 * it. TIF_RESTORE_SIGMASK need not be in the set of bits that the
431 * arch code will notice on return to user mode, in case those bits
432 * are scarce. We set TIF_SIGPENDING here to ensure that the arch
433 * signal code always gets run when TIF_RESTORE_SIGMASK is set.
434 */
set_restore_sigmask(void)435 static inline void set_restore_sigmask(void)
436 {
437 set_thread_flag(TIF_RESTORE_SIGMASK);
438 }
439
clear_tsk_restore_sigmask(struct task_struct * task)440 static inline void clear_tsk_restore_sigmask(struct task_struct *task)
441 {
442 clear_tsk_thread_flag(task, TIF_RESTORE_SIGMASK);
443 }
444
clear_restore_sigmask(void)445 static inline void clear_restore_sigmask(void)
446 {
447 clear_thread_flag(TIF_RESTORE_SIGMASK);
448 }
test_tsk_restore_sigmask(struct task_struct * task)449 static inline bool test_tsk_restore_sigmask(struct task_struct *task)
450 {
451 return test_tsk_thread_flag(task, TIF_RESTORE_SIGMASK);
452 }
test_restore_sigmask(void)453 static inline bool test_restore_sigmask(void)
454 {
455 return test_thread_flag(TIF_RESTORE_SIGMASK);
456 }
test_and_clear_restore_sigmask(void)457 static inline bool test_and_clear_restore_sigmask(void)
458 {
459 return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK);
460 }
461
462 #else /* TIF_RESTORE_SIGMASK */
463
464 /* Higher-quality implementation, used if TIF_RESTORE_SIGMASK doesn't exist. */
set_restore_sigmask(void)465 static inline void set_restore_sigmask(void)
466 {
467 current->restore_sigmask = true;
468 }
clear_tsk_restore_sigmask(struct task_struct * task)469 static inline void clear_tsk_restore_sigmask(struct task_struct *task)
470 {
471 task->restore_sigmask = false;
472 }
clear_restore_sigmask(void)473 static inline void clear_restore_sigmask(void)
474 {
475 current->restore_sigmask = false;
476 }
test_restore_sigmask(void)477 static inline bool test_restore_sigmask(void)
478 {
479 return current->restore_sigmask;
480 }
test_tsk_restore_sigmask(struct task_struct * task)481 static inline bool test_tsk_restore_sigmask(struct task_struct *task)
482 {
483 return task->restore_sigmask;
484 }
test_and_clear_restore_sigmask(void)485 static inline bool test_and_clear_restore_sigmask(void)
486 {
487 if (!current->restore_sigmask)
488 return false;
489 current->restore_sigmask = false;
490 return true;
491 }
492 #endif
493
restore_saved_sigmask(void)494 static inline void restore_saved_sigmask(void)
495 {
496 if (test_and_clear_restore_sigmask())
497 __set_current_blocked(¤t->saved_sigmask);
498 }
499
500 extern int set_user_sigmask(const sigset_t __user *umask, size_t sigsetsize);
501
restore_saved_sigmask_unless(bool interrupted)502 static inline void restore_saved_sigmask_unless(bool interrupted)
503 {
504 if (interrupted)
505 WARN_ON(!test_thread_flag(TIF_SIGPENDING));
506 else
507 restore_saved_sigmask();
508 }
509
sigmask_to_save(void)510 static inline sigset_t *sigmask_to_save(void)
511 {
512 sigset_t *res = ¤t->blocked;
513 if (unlikely(test_restore_sigmask()))
514 res = ¤t->saved_sigmask;
515 return res;
516 }
517
kill_cad_pid(int sig,int priv)518 static inline int kill_cad_pid(int sig, int priv)
519 {
520 return kill_pid(cad_pid, sig, priv);
521 }
522
523 /* These can be the second arg to send_sig_info/send_group_sig_info. */
524 #define SEND_SIG_NOINFO ((struct kernel_siginfo *) 0)
525 #define SEND_SIG_PRIV ((struct kernel_siginfo *) 1)
526
527 /*
528 * True if we are on the alternate signal stack.
529 */
on_sig_stack(unsigned long sp)530 static inline int on_sig_stack(unsigned long sp)
531 {
532 /*
533 * If the signal stack is SS_AUTODISARM then, by construction, we
534 * can't be on the signal stack unless user code deliberately set
535 * SS_AUTODISARM when we were already on it.
536 *
537 * This improves reliability: if user state gets corrupted such that
538 * the stack pointer points very close to the end of the signal stack,
539 * then this check will enable the signal to be handled anyway.
540 */
541 if (current->sas_ss_flags & SS_AUTODISARM)
542 return 0;
543
544 #ifdef CONFIG_STACK_GROWSUP
545 return sp >= current->sas_ss_sp &&
546 sp - current->sas_ss_sp < current->sas_ss_size;
547 #else
548 return sp > current->sas_ss_sp &&
549 sp - current->sas_ss_sp <= current->sas_ss_size;
550 #endif
551 }
552
sas_ss_flags(unsigned long sp)553 static inline int sas_ss_flags(unsigned long sp)
554 {
555 if (!current->sas_ss_size)
556 return SS_DISABLE;
557
558 return on_sig_stack(sp) ? SS_ONSTACK : 0;
559 }
560
sas_ss_reset(struct task_struct * p)561 static inline void sas_ss_reset(struct task_struct *p)
562 {
563 p->sas_ss_sp = 0;
564 p->sas_ss_size = 0;
565 p->sas_ss_flags = SS_DISABLE;
566 }
567
sigsp(unsigned long sp,struct ksignal * ksig)568 static inline unsigned long sigsp(unsigned long sp, struct ksignal *ksig)
569 {
570 if (unlikely((ksig->ka.sa.sa_flags & SA_ONSTACK)) && ! sas_ss_flags(sp))
571 #ifdef CONFIG_STACK_GROWSUP
572 return current->sas_ss_sp;
573 #else
574 return current->sas_ss_sp + current->sas_ss_size;
575 #endif
576 return sp;
577 }
578
579 extern void __cleanup_sighand(struct sighand_struct *);
580 extern void flush_itimer_signals(void);
581
582 #define tasklist_empty() \
583 list_empty(&init_task.tasks)
584
585 #define next_task(p) \
586 list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
587
588 #define for_each_process(p) \
589 for (p = &init_task ; (p = next_task(p)) != &init_task ; )
590
591 extern bool current_is_single_threaded(void);
592
593 /*
594 * Careful: do_each_thread/while_each_thread is a double loop so
595 * 'break' will not work as expected - use goto instead.
596 */
597 #define do_each_thread(g, t) \
598 for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
599
600 #define while_each_thread(g, t) \
601 while ((t = next_thread(t)) != g)
602
603 #define __for_each_thread(signal, t) \
604 list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
605
606 #define for_each_thread(p, t) \
607 __for_each_thread((p)->signal, t)
608
609 /* Careful: this is a double loop, 'break' won't work as expected. */
610 #define for_each_process_thread(p, t) \
611 for_each_process(p) for_each_thread(p, t)
612
613 typedef int (*proc_visitor)(struct task_struct *p, void *data);
614 void walk_process_tree(struct task_struct *top, proc_visitor, void *);
615
616 static inline
task_pid_type(struct task_struct * task,enum pid_type type)617 struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
618 {
619 struct pid *pid;
620 if (type == PIDTYPE_PID)
621 pid = task_pid(task);
622 else
623 pid = task->signal->pids[type];
624 return pid;
625 }
626
task_tgid(struct task_struct * task)627 static inline struct pid *task_tgid(struct task_struct *task)
628 {
629 return task->signal->pids[PIDTYPE_TGID];
630 }
631
632 /*
633 * Without tasklist or RCU lock it is not safe to dereference
634 * the result of task_pgrp/task_session even if task == current,
635 * we can race with another thread doing sys_setsid/sys_setpgid.
636 */
task_pgrp(struct task_struct * task)637 static inline struct pid *task_pgrp(struct task_struct *task)
638 {
639 return task->signal->pids[PIDTYPE_PGID];
640 }
641
task_session(struct task_struct * task)642 static inline struct pid *task_session(struct task_struct *task)
643 {
644 return task->signal->pids[PIDTYPE_SID];
645 }
646
get_nr_threads(struct task_struct * task)647 static inline int get_nr_threads(struct task_struct *task)
648 {
649 return task->signal->nr_threads;
650 }
651
thread_group_leader(struct task_struct * p)652 static inline bool thread_group_leader(struct task_struct *p)
653 {
654 return p->exit_signal >= 0;
655 }
656
657 static inline
same_thread_group(struct task_struct * p1,struct task_struct * p2)658 bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
659 {
660 return p1->signal == p2->signal;
661 }
662
next_thread(const struct task_struct * p)663 static inline struct task_struct *next_thread(const struct task_struct *p)
664 {
665 return list_entry_rcu(p->thread_group.next,
666 struct task_struct, thread_group);
667 }
668
thread_group_empty(struct task_struct * p)669 static inline int thread_group_empty(struct task_struct *p)
670 {
671 return list_empty(&p->thread_group);
672 }
673
674 #define delay_group_leader(p) \
675 (thread_group_leader(p) && !thread_group_empty(p))
676
677 extern bool thread_group_exited(struct pid *pid);
678
679 extern struct sighand_struct *__lock_task_sighand(struct task_struct *task,
680 unsigned long *flags);
681
lock_task_sighand(struct task_struct * task,unsigned long * flags)682 static inline struct sighand_struct *lock_task_sighand(struct task_struct *task,
683 unsigned long *flags)
684 {
685 struct sighand_struct *ret;
686
687 ret = __lock_task_sighand(task, flags);
688 (void)__cond_lock(&task->sighand->siglock, ret);
689 return ret;
690 }
691
unlock_task_sighand(struct task_struct * task,unsigned long * flags)692 static inline void unlock_task_sighand(struct task_struct *task,
693 unsigned long *flags)
694 {
695 spin_unlock_irqrestore(&task->sighand->siglock, *flags);
696 }
697
task_rlimit(const struct task_struct * task,unsigned int limit)698 static inline unsigned long task_rlimit(const struct task_struct *task,
699 unsigned int limit)
700 {
701 return READ_ONCE(task->signal->rlim[limit].rlim_cur);
702 }
703
task_rlimit_max(const struct task_struct * task,unsigned int limit)704 static inline unsigned long task_rlimit_max(const struct task_struct *task,
705 unsigned int limit)
706 {
707 return READ_ONCE(task->signal->rlim[limit].rlim_max);
708 }
709
rlimit(unsigned int limit)710 static inline unsigned long rlimit(unsigned int limit)
711 {
712 return task_rlimit(current, limit);
713 }
714
rlimit_max(unsigned int limit)715 static inline unsigned long rlimit_max(unsigned int limit)
716 {
717 return task_rlimit_max(current, limit);
718 }
719
720 #endif /* _LINUX_SCHED_SIGNAL_H */
721