Lines Matching +full:signal +full:- +full:group

1 // SPDX-License-Identifier: GPL-2.0-only
43 #include <linux/signal.h>
44 #include <linux/posix-timers.h>
77 nr_threads--; in __unhash_process()
84 list_del_rcu(&p->tasks); in __unhash_process()
85 list_del_init(&p->sibling); in __unhash_process()
88 list_del_rcu(&p->thread_group); in __unhash_process()
89 list_del_rcu(&p->thread_node); in __unhash_process()
93 * This function expects the tasklist_lock write-locked.
97 struct signal_struct *sig = tsk->signal; in __exit_signal()
103 sighand = rcu_dereference_check(tsk->sighand, in __exit_signal()
105 spin_lock(&sighand->siglock); in __exit_signal()
114 tty = sig->tty; in __exit_signal()
115 sig->tty = NULL; in __exit_signal()
118 * If there is any task waiting for the group exit in __exit_signal()
121 if (sig->notify_count > 0 && !--sig->notify_count) in __exit_signal()
122 wake_up_process(sig->group_exec_task); in __exit_signal()
124 if (tsk == sig->curr_target) in __exit_signal()
125 sig->curr_target = next_thread(tsk); in __exit_signal()
128 add_device_randomness((const void*) &tsk->se.sum_exec_runtime, in __exit_signal()
133 * skip the group leader because it is the last user of signal_struct, in __exit_signal()
135 * see the empty ->thread_head list. in __exit_signal()
138 write_seqlock(&sig->stats_lock); in __exit_signal()
139 sig->utime += utime; in __exit_signal()
140 sig->stime += stime; in __exit_signal()
141 sig->gtime += task_gtime(tsk); in __exit_signal()
142 sig->min_flt += tsk->min_flt; in __exit_signal()
143 sig->maj_flt += tsk->maj_flt; in __exit_signal()
144 sig->nvcsw += tsk->nvcsw; in __exit_signal()
145 sig->nivcsw += tsk->nivcsw; in __exit_signal()
146 sig->inblock += task_io_get_inblock(tsk); in __exit_signal()
147 sig->oublock += task_io_get_oublock(tsk); in __exit_signal()
148 task_io_accounting_add(&sig->ioac, &tsk->ioac); in __exit_signal()
149 sig->sum_sched_runtime += tsk->se.sum_exec_runtime; in __exit_signal()
150 sig->nr_threads--; in __exit_signal()
152 write_sequnlock(&sig->stats_lock); in __exit_signal()
155 * Do this under ->siglock, we can race with another thread in __exit_signal()
158 flush_sigqueue(&tsk->pending); in __exit_signal()
159 tsk->sighand = NULL; in __exit_signal()
160 spin_unlock(&sighand->siglock); in __exit_signal()
165 flush_sigqueue(&sig->shared_pending); in __exit_signal()
183 if (refcount_dec_and_test(&task->rcu_users)) in put_task_struct_rcu_user()
184 call_rcu(&task->rcu, delayed_put_task_struct); in put_task_struct_rcu_user()
197 /* don't need to get the RCU readlock here - the process is dead and in release_task()
198 * can't be modifying its own credentials. But shut RCU-lockdep up */ in release_task()
207 thread_pid = get_pid(p->thread_pid); in release_task()
211 * If we are the last non-leader member of the thread in release_task()
212 * group, and the leader is zombie, then notify the in release_task()
213 * group leader's parent process. (if it wants notification.) in release_task()
216 leader = p->group_leader; in release_task()
218 && leader->exit_state == EXIT_ZOMBIE) { in release_task()
224 zap_leader = do_notify_parent(leader, leader->exit_signal); in release_task()
226 leader->exit_state = EXIT_DEAD; in release_task()
261 task = rcu_dereference(w->task); in rcuwait_wake_up()
271 * Determine if a process group is "orphaned", according to the POSIX
273 * by terminal-generated stop signals. Newly orphaned process groups are
285 (p->exit_state && thread_group_empty(p)) || in will_become_orphaned_pgrp()
286 is_global_init(p->real_parent)) in will_become_orphaned_pgrp()
289 if (task_pgrp(p->real_parent) != pgrp && in will_become_orphaned_pgrp()
290 task_session(p->real_parent) == task_session(p)) in will_become_orphaned_pgrp()
313 if (p->signal->flags & SIGNAL_STOP_STOPPED) in has_stopped_jobs()
335 parent = tsk->real_parent; in kill_orphaned_pgrp()
358 * and setting PF_POSTCOREDUMP. The core-inducing thread in coredump_task_exit()
359 * will increment ->nr_threads for each thread in the in coredump_task_exit()
360 * group without PF_POSTCOREDUMP set. in coredump_task_exit()
362 spin_lock_irq(&tsk->sighand->siglock); in coredump_task_exit()
363 tsk->flags |= PF_POSTCOREDUMP; in coredump_task_exit()
364 core_state = tsk->signal->core_state; in coredump_task_exit()
365 spin_unlock_irq(&tsk->sighand->siglock); in coredump_task_exit()
370 if (self.task->flags & PF_SIGNALED) in coredump_task_exit()
371 self.next = xchg(&core_state->dumper.next, &self); in coredump_task_exit()
376 * to core_state->dumper. in coredump_task_exit()
378 if (atomic_dec_and_test(&core_state->nr_threads)) in coredump_task_exit()
379 complete(&core_state->startup); in coredump_task_exit()
404 if (mm->owner != p) in mm_update_next_owner()
411 if (atomic_read(&mm->mm_users) <= 1) { in mm_update_next_owner()
412 WRITE_ONCE(mm->owner, NULL); in mm_update_next_owner()
420 list_for_each_entry(c, &p->children, sibling) { in mm_update_next_owner()
421 if (c->mm == mm) in mm_update_next_owner()
428 list_for_each_entry(c, &p->real_parent->children, sibling) { in mm_update_next_owner()
429 if (c->mm == mm) in mm_update_next_owner()
437 if (g->flags & PF_KTHREAD) in mm_update_next_owner()
440 if (c->mm == mm) in mm_update_next_owner()
442 if (c->mm) in mm_update_next_owner()
452 WRITE_ONCE(mm->owner, NULL); in mm_update_next_owner()
459 * The task_lock protects c->mm from changing. in mm_update_next_owner()
460 * We always want mm->owner->mm == mm in mm_update_next_owner()
468 if (c->mm != mm) { in mm_update_next_owner()
473 WRITE_ONCE(mm->owner, c); in mm_update_next_owner()
486 struct mm_struct *mm = current->mm; in exit_mm()
494 BUG_ON(mm != current->active_mm); in exit_mm()
500 * tsk->mm, and the loop in membarrier_global_expedited() may in exit_mm()
502 * rq->membarrier_state, so those would not issue an IPI. in exit_mm()
504 * user-space memory, before clearing tsk->mm or the in exit_mm()
505 * rq->membarrier_state. in exit_mm()
509 current->mm = NULL; in exit_mm()
526 if (!(t->flags & PF_EXITING)) in find_alive_thread()
538 struct task_struct *reaper = pid_ns->child_reaper; in find_child_reaper()
546 pid_ns->child_reaper = reaper; in find_child_reaper()
553 list_del_init(&p->ptrace_entry); in find_child_reaper()
564 * When we die, we re-parent all our children, and try to:
565 * 1. give them to another thread in our thread group, if such a member exists
579 if (father->signal->has_child_subreaper) { in find_new_reaper()
580 unsigned int ns_level = task_pid(father)->level; in find_new_reaper()
582 * Find the first ->is_child_subreaper ancestor in our pid_ns. in find_new_reaper()
586 * We check pid->level, this is slightly more efficient than in find_new_reaper()
589 for (reaper = father->real_parent; in find_new_reaper()
590 task_pid(reaper)->level == ns_level; in find_new_reaper()
591 reaper = reaper->real_parent) { in find_new_reaper()
594 if (!reaper->signal->is_child_subreaper) in find_new_reaper()
611 if (unlikely(p->exit_state == EXIT_DEAD)) in reparent_leader()
615 p->exit_signal = SIGCHLD; in reparent_leader()
618 if (!p->ptrace && in reparent_leader()
619 p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) { in reparent_leader()
620 if (do_notify_parent(p, p->exit_signal)) { in reparent_leader()
621 p->exit_state = EXIT_DEAD; in reparent_leader()
622 list_add(&p->ptrace_entry, dead); in reparent_leader()
642 if (unlikely(!list_empty(&father->ptraced))) in forget_original_parent()
647 if (list_empty(&father->children)) in forget_original_parent()
651 list_for_each_entry(p, &father->children, sibling) { in forget_original_parent()
653 RCU_INIT_POINTER(t->real_parent, reaper); in forget_original_parent()
654 BUG_ON((!t->ptrace) != (rcu_access_pointer(t->parent) == father)); in forget_original_parent()
655 if (likely(!t->ptrace)) in forget_original_parent()
656 t->parent = t->real_parent; in forget_original_parent()
657 if (t->pdeath_signal) in forget_original_parent()
658 group_send_sig_info(t->pdeath_signal, in forget_original_parent()
669 list_splice_tail_init(&father->children, &reaper->children); in forget_original_parent()
686 kill_orphaned_pgrp(tsk->group_leader, NULL); in exit_notify()
688 tsk->exit_state = EXIT_ZOMBIE; in exit_notify()
689 if (unlikely(tsk->ptrace)) { in exit_notify()
693 tsk->exit_signal : SIGCHLD; in exit_notify()
697 do_notify_parent(tsk, tsk->exit_signal); in exit_notify()
703 tsk->exit_state = EXIT_DEAD; in exit_notify()
704 list_add(&tsk->ptrace_entry, &dead); in exit_notify()
707 /* mt-exec, de_thread() is waiting for group leader */ in exit_notify()
708 if (unlikely(tsk->signal->notify_count < 0)) in exit_notify()
709 wake_up_process(tsk->signal->group_exec_task); in exit_notify()
713 list_del_init(&p->ptrace_entry); in exit_notify()
733 current->comm, task_pid_nr(current), free); in check_stack_usage()
744 struct sighand_struct *sighand = tsk->sighand; in synchronize_group_exit()
745 struct signal_struct *signal = tsk->signal; in synchronize_group_exit() local
747 spin_lock_irq(&sighand->siglock); in synchronize_group_exit()
748 signal->quick_threads--; in synchronize_group_exit()
749 if ((signal->quick_threads == 0) && in synchronize_group_exit()
750 !(signal->flags & SIGNAL_GROUP_EXIT)) { in synchronize_group_exit()
751 signal->flags = SIGNAL_GROUP_EXIT; in synchronize_group_exit()
752 signal->group_exit_code = code; in synchronize_group_exit()
753 signal->group_stop_count = 0; in synchronize_group_exit()
755 spin_unlock_irq(&sighand->siglock); in synchronize_group_exit()
765 WARN_ON(tsk->plug); in do_exit()
779 if (tsk->mm) in do_exit()
780 sync_mm_rss(tsk->mm); in do_exit()
782 group_dead = atomic_dec_and_test(&tsk->signal->live); in do_exit()
790 tsk->signal->group_exit_code ?: (int)code); in do_exit()
793 hrtimer_cancel(&tsk->signal->real_timer); in do_exit()
796 if (tsk->mm) in do_exit()
797 setmax_mm_hiwater_rss(&tsk->signal->maxrss, tsk->mm); in do_exit()
804 tsk->exit_code = code; in do_exit()
824 * Flush inherited counters to the parent - before the parent in do_exit()
825 * gets woken up by child-exit notifications. in do_exit()
844 if (unlikely(current->pi_state_cache)) in do_exit()
845 kfree(current->pi_state_cache); in do_exit()
852 if (tsk->io_context) in do_exit()
855 if (tsk->splice_pipe) in do_exit()
856 free_pipe_info(tsk->splice_pipe); in do_exit()
858 if (tsk->task_frag.page) in do_exit()
859 put_page(tsk->task_frag.page); in do_exit()
866 if (tsk->nr_dirtied) in do_exit()
867 __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied); in do_exit()
890 if (unlikely(!tsk->pid)) in make_task_dead()
895 current->comm, task_pid_nr(current), in make_task_dead()
904 if (unlikely(tsk->flags & PF_EXITING)) { in make_task_dead()
907 tsk->exit_state = EXIT_DEAD; in make_task_dead()
908 refcount_inc(&tsk->rcu_users); in make_task_dead()
921 * Take down every thread in the group. This is called by fatal signals
927 struct signal_struct *sig = current->signal; in do_group_exit()
929 if (sig->flags & SIGNAL_GROUP_EXIT) in do_group_exit()
930 exit_code = sig->group_exit_code; in do_group_exit()
931 else if (sig->group_exec_task) in do_group_exit()
934 struct sighand_struct *const sighand = current->sighand; in do_group_exit()
936 spin_lock_irq(&sighand->siglock); in do_group_exit()
937 if (sig->flags & SIGNAL_GROUP_EXIT) in do_group_exit()
939 exit_code = sig->group_exit_code; in do_group_exit()
940 else if (sig->group_exec_task) in do_group_exit()
943 sig->group_exit_code = exit_code; in do_group_exit()
944 sig->flags = SIGNAL_GROUP_EXIT; in do_group_exit()
947 spin_unlock_irq(&sighand->siglock); in do_group_exit()
955 * this kills every thread in the thread group. Note that any externally
956 * wait4()-ing process will get the correct exit code - even if this
957 * thread is not the thread group leader.
988 return wo->wo_type == PIDTYPE_MAX || in eligible_pid()
989 task_pid_type(p, wo->wo_type) == wo->wo_pid; in eligible_pid()
1002 if (ptrace || (wo->wo_flags & __WALL)) in eligible_child()
1007 * otherwise, wait for non-clone children *only*. in eligible_child()
1010 * using a signal other than SIGCHLD, or a non-leader thread which in eligible_child()
1013 if ((p->exit_signal != SIGCHLD) ^ !!(wo->wo_flags & __WCLONE)) in eligible_child()
1032 if (!likely(wo->wo_flags & WEXITED)) in wait_task_zombie()
1035 if (unlikely(wo->wo_flags & WNOWAIT)) { in wait_task_zombie()
1036 status = (p->signal->flags & SIGNAL_GROUP_EXIT) in wait_task_zombie()
1037 ? p->signal->group_exit_code : p->exit_code; in wait_task_zombie()
1041 if (wo->wo_rusage) in wait_task_zombie()
1042 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); in wait_task_zombie()
1051 if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE) in wait_task_zombie()
1060 * Check thread_group_leader() to exclude the traced sub-threads. in wait_task_zombie()
1063 struct signal_struct *sig = p->signal; in wait_task_zombie()
1064 struct signal_struct *psig = current->signal; in wait_task_zombie()
1069 * The resource counters for the group leader are in its in wait_task_zombie()
1070 * own task_struct. Those for dead threads in the group in wait_task_zombie()
1076 * p->signal fields because the whole thread group is dead in wait_task_zombie()
1079 * psig->stats_lock also protects us from our sub-threads in wait_task_zombie()
1081 * we change k_getrusage()-like users to rely on this lock in wait_task_zombie()
1082 * we have to take ->siglock as well. in wait_task_zombie()
1085 * the thread group, which consolidates times for all threads in wait_task_zombie()
1086 * in the group including the group leader. in wait_task_zombie()
1089 spin_lock_irq(&current->sighand->siglock); in wait_task_zombie()
1090 write_seqlock(&psig->stats_lock); in wait_task_zombie()
1091 psig->cutime += tgutime + sig->cutime; in wait_task_zombie()
1092 psig->cstime += tgstime + sig->cstime; in wait_task_zombie()
1093 psig->cgtime += task_gtime(p) + sig->gtime + sig->cgtime; in wait_task_zombie()
1094 psig->cmin_flt += in wait_task_zombie()
1095 p->min_flt + sig->min_flt + sig->cmin_flt; in wait_task_zombie()
1096 psig->cmaj_flt += in wait_task_zombie()
1097 p->maj_flt + sig->maj_flt + sig->cmaj_flt; in wait_task_zombie()
1098 psig->cnvcsw += in wait_task_zombie()
1099 p->nvcsw + sig->nvcsw + sig->cnvcsw; in wait_task_zombie()
1100 psig->cnivcsw += in wait_task_zombie()
1101 p->nivcsw + sig->nivcsw + sig->cnivcsw; in wait_task_zombie()
1102 psig->cinblock += in wait_task_zombie()
1104 sig->inblock + sig->cinblock; in wait_task_zombie()
1105 psig->coublock += in wait_task_zombie()
1107 sig->oublock + sig->coublock; in wait_task_zombie()
1108 maxrss = max(sig->maxrss, sig->cmaxrss); in wait_task_zombie()
1109 if (psig->cmaxrss < maxrss) in wait_task_zombie()
1110 psig->cmaxrss = maxrss; in wait_task_zombie()
1111 task_io_accounting_add(&psig->ioac, &p->ioac); in wait_task_zombie()
1112 task_io_accounting_add(&psig->ioac, &sig->ioac); in wait_task_zombie()
1113 write_sequnlock(&psig->stats_lock); in wait_task_zombie()
1114 spin_unlock_irq(&current->sighand->siglock); in wait_task_zombie()
1117 if (wo->wo_rusage) in wait_task_zombie()
1118 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); in wait_task_zombie()
1119 status = (p->signal->flags & SIGNAL_GROUP_EXIT) in wait_task_zombie()
1120 ? p->signal->group_exit_code : p->exit_code; in wait_task_zombie()
1121 wo->wo_stat = status; in wait_task_zombie()
1130 if (do_notify_parent(p, p->exit_signal)) in wait_task_zombie()
1132 p->exit_state = state; in wait_task_zombie()
1139 infop = wo->wo_info; in wait_task_zombie()
1142 infop->cause = CLD_EXITED; in wait_task_zombie()
1143 infop->status = status >> 8; in wait_task_zombie()
1145 infop->cause = (status & 0x80) ? CLD_DUMPED : CLD_KILLED; in wait_task_zombie()
1146 infop->status = status & 0x7f; in wait_task_zombie()
1148 infop->pid = pid; in wait_task_zombie()
1149 infop->uid = uid; in wait_task_zombie()
1158 if (task_is_traced(p) && !(p->jobctl & JOBCTL_LISTENING)) in task_stopped_code()
1159 return &p->exit_code; in task_stopped_code()
1161 if (p->signal->flags & SIGNAL_STOP_STOPPED) in task_stopped_code()
1162 return &p->signal->group_exit_code; in task_stopped_code()
1168 * wait_task_stopped - Wait for %TASK_STOPPED or %TASK_TRACED
1177 * non-zero. Also, grabs and releases @p->sighand->siglock.
1181 * should continue. Non-zero return, -errno on failure and @p's pid on
1196 if (!ptrace && !(wo->wo_flags & WUNTRACED)) in wait_task_stopped()
1203 spin_lock_irq(&p->sighand->siglock); in wait_task_stopped()
1213 if (!unlikely(wo->wo_flags & WNOWAIT)) in wait_task_stopped()
1218 spin_unlock_irq(&p->sighand->siglock); in wait_task_stopped()
1234 if (wo->wo_rusage) in wait_task_stopped()
1235 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); in wait_task_stopped()
1238 if (likely(!(wo->wo_flags & WNOWAIT))) in wait_task_stopped()
1239 wo->wo_stat = (exit_code << 8) | 0x7f; in wait_task_stopped()
1241 infop = wo->wo_info; in wait_task_stopped()
1243 infop->cause = why; in wait_task_stopped()
1244 infop->status = exit_code; in wait_task_stopped()
1245 infop->pid = pid; in wait_task_stopped()
1246 infop->uid = uid; in wait_task_stopped()
1252 * Handle do_wait work for one task in a live, non-stopped state.
1263 if (!unlikely(wo->wo_flags & WCONTINUED)) in wait_task_continued()
1266 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) in wait_task_continued()
1269 spin_lock_irq(&p->sighand->siglock); in wait_task_continued()
1270 /* Re-check with the lock held. */ in wait_task_continued()
1271 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) { in wait_task_continued()
1272 spin_unlock_irq(&p->sighand->siglock); in wait_task_continued()
1275 if (!unlikely(wo->wo_flags & WNOWAIT)) in wait_task_continued()
1276 p->signal->flags &= ~SIGNAL_STOP_CONTINUED; in wait_task_continued()
1278 spin_unlock_irq(&p->sighand->siglock); in wait_task_continued()
1284 if (wo->wo_rusage) in wait_task_continued()
1285 getrusage(p, RUSAGE_BOTH, wo->wo_rusage); in wait_task_continued()
1288 infop = wo->wo_info; in wait_task_continued()
1290 wo->wo_stat = 0xffff; in wait_task_continued()
1292 infop->cause = CLD_CONTINUED; in wait_task_continued()
1293 infop->pid = pid; in wait_task_continued()
1294 infop->uid = uid; in wait_task_continued()
1295 infop->status = SIGCONT; in wait_task_continued()
1303 * -ECHILD should be in ->notask_error before the first call.
1306 * then ->notask_error is 0 if @p is an eligible child,
1307 * or still -ECHILD.
1314 * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition in wait_consider_task()
1317 int exit_state = READ_ONCE(p->exit_state); in wait_consider_task()
1333 wo->notask_error = 0; in wait_consider_task()
1337 if (likely(!ptrace) && unlikely(p->ptrace)) { in wait_consider_task()
1339 * If it is traced by its real parent's group, just pretend in wait_consider_task()
1343 * This also hides group stop state from real parent; otherwise in wait_consider_task()
1344 * a single stop can be reported twice as group and ptrace stop. in wait_consider_task()
1355 /* we don't reap group leaders with subthreads */ in wait_consider_task()
1362 if (unlikely(ptrace) || likely(!p->ptrace)) in wait_consider_task()
1376 * to clear - this function will be called again in finite in wait_consider_task()
1382 * Stopped state is per-task and thus can't change once the in wait_consider_task()
1386 if (likely(!ptrace) || (wo->wo_flags & (WCONTINUED | WEXITED))) in wait_consider_task()
1387 wo->notask_error = 0; in wait_consider_task()
1393 wo->notask_error = 0; in wait_consider_task()
1413 * Do the work of do_wait() for one thread in the group, @tsk.
1415 * -ECHILD should be in ->notask_error before the first call.
1418 * ->notask_error is 0 if there were any eligible children,
1419 * or still -ECHILD.
1425 list_for_each_entry(p, &tsk->children, sibling) { in do_wait_thread()
1439 list_for_each_entry(p, &tsk->ptraced, ptrace_entry) { in ptrace_do_wait()
1459 if ((wo->wo_flags & __WNOTHREAD) && wait->private != p->parent) in child_wait_callback()
1467 __wake_up_sync_key(&parent->signal->wait_chldexit, in __wake_up_parent()
1475 !ptrace ? target->real_parent : target->parent; in is_effectively_child()
1477 return current == parent || (!(wo->wo_flags & __WNOTHREAD) && in is_effectively_child()
1492 target = pid_task(wo->wo_pid, PIDTYPE_TGID); in do_wait_pid()
1500 target = pid_task(wo->wo_pid, PIDTYPE_PID); in do_wait_pid()
1501 if (target && target->ptrace && in do_wait_pid()
1515 trace_sched_process_wait(wo->wo_pid); in do_wait()
1517 init_waitqueue_func_entry(&wo->child_wait, child_wait_callback); in do_wait()
1518 wo->child_wait.private = current; in do_wait()
1519 add_wait_queue(&current->signal->wait_chldexit, &wo->child_wait); in do_wait()
1523 * We will clear ->notask_error to zero if we see any child that in do_wait()
1527 wo->notask_error = -ECHILD; in do_wait()
1528 if ((wo->wo_type < PIDTYPE_MAX) && in do_wait()
1529 (!wo->wo_pid || !pid_has_task(wo->wo_pid, wo->wo_type))) in do_wait()
1535 if (wo->wo_type == PIDTYPE_PID) { in do_wait()
1551 if (wo->wo_flags & __WNOTHREAD) in do_wait()
1558 retval = wo->notask_error; in do_wait()
1559 if (!retval && !(wo->wo_flags & WNOHANG)) { in do_wait()
1560 retval = -ERESTARTSYS; in do_wait()
1568 remove_wait_queue(&current->signal->wait_chldexit, &wo->child_wait); in do_wait()
1583 return -EINVAL; in kernel_waitid()
1585 return -EINVAL; in kernel_waitid()
1594 return -EINVAL; in kernel_waitid()
1601 return -EINVAL; in kernel_waitid()
1611 return -EINVAL; in kernel_waitid()
1619 return -EINVAL; in kernel_waitid()
1632 ret = -EAGAIN; in kernel_waitid()
1650 return -EFAULT; in SYSCALL_DEFINE5()
1656 return -EFAULT; in SYSCALL_DEFINE5()
1658 unsafe_put_user(signo, &infop->si_signo, Efault); in SYSCALL_DEFINE5()
1659 unsafe_put_user(0, &infop->si_errno, Efault); in SYSCALL_DEFINE5()
1660 unsafe_put_user(info.cause, &infop->si_code, Efault); in SYSCALL_DEFINE5()
1661 unsafe_put_user(info.pid, &infop->si_pid, Efault); in SYSCALL_DEFINE5()
1662 unsafe_put_user(info.uid, &infop->si_uid, Efault); in SYSCALL_DEFINE5()
1663 unsafe_put_user(info.status, &infop->si_status, Efault); in SYSCALL_DEFINE5()
1668 return -EFAULT; in SYSCALL_DEFINE5()
1681 return -EINVAL; in kernel_wait4()
1683 /* -INT_MIN is not defined */ in kernel_wait4()
1685 return -ESRCH; in kernel_wait4()
1687 if (upid == -1) in kernel_wait4()
1691 pid = find_get_pid(-upid); in kernel_wait4()
1709 ret = -EFAULT; in kernel_wait4()
1738 return -EFAULT; in SYSCALL_DEFINE4()
1767 return -EFAULT; in COMPAT_SYSCALL_DEFINE4()
1791 return -EFAULT; in COMPAT_SYSCALL_DEFINE5()
1799 return -EFAULT; in COMPAT_SYSCALL_DEFINE5()
1801 unsafe_put_user(signo, &infop->si_signo, Efault); in COMPAT_SYSCALL_DEFINE5()
1802 unsafe_put_user(0, &infop->si_errno, Efault); in COMPAT_SYSCALL_DEFINE5()
1803 unsafe_put_user(info.cause, &infop->si_code, Efault); in COMPAT_SYSCALL_DEFINE5()
1804 unsafe_put_user(info.pid, &infop->si_pid, Efault); in COMPAT_SYSCALL_DEFINE5()
1805 unsafe_put_user(info.uid, &infop->si_uid, Efault); in COMPAT_SYSCALL_DEFINE5()
1806 unsafe_put_user(info.status, &infop->si_status, Efault); in COMPAT_SYSCALL_DEFINE5()
1811 return -EFAULT; in COMPAT_SYSCALL_DEFINE5()
1816 * thread_group_exited - check that a thread group has exited
1817 * @pid: tgid of thread group to be checked.
1819 * Test if the thread group represented by tgid has exited (all
1822 * Return: true if the thread group has exited. false otherwise.
1832 (READ_ONCE(task->exit_state) && thread_group_empty(task)); in thread_group_exited()