Lines Matching +full:lock +full:- +full:latency +full:- +full:ns
1 /* SPDX-License-Identifier: GPL-2.0 */
33 #include <linux/posix-timers.h>
75 * We have two separate sets of flags: task->state
76 * is about runnability, while task->exit_state are
82 /* Used in tsk->state: */
88 /* Used in tsk->exit_state: */
92 /* Used in tsk->state again: */
99 /* RT specific auxilliary flag to mark RT lock waiters */
119 #define task_is_running(task) (READ_ONCE((task)->__state) == TASK_RUNNING)
121 #define task_is_traced(task) ((READ_ONCE(task->__state) & __TASK_TRACED) != 0)
123 #define task_is_stopped(task) ((READ_ONCE(task->__state) & __TASK_STOPPED) != 0)
125 #define task_is_stopped_or_traced(task) ((READ_ONCE(task->__state) & (__TASK_STOPPED | __TASK_TRACE…
128 * Special states are those that do not use the normal wait-loop pattern. See
138 current->task_state_change = _THIS_IP_; \
144 current->task_state_change = _THIS_IP_; \
149 current->saved_state_change = current->task_state_change;\
150 current->task_state_change = _THIS_IP_; \
155 current->task_state_change = current->saved_state_change;\
166 * set_current_state() includes a barrier so that the write of current->state
180 * CONDITION test and condition change and wakeup are under the same lock) then
189 * accessing p->state.
191 * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
205 WRITE_ONCE(current->__state, (state_value)); \
211 smp_store_mb(current->__state, (state_value)); \
216 * can not use the regular condition based wait-loop. In that case we must
217 * serialize against wakeups such that any possible in-flight TASK_RUNNING
224 raw_spin_lock_irqsave(¤t->pi_lock, flags); \
226 WRITE_ONCE(current->__state, (state_value)); \
227 raw_spin_unlock_irqrestore(¤t->pi_lock, flags); \
234 * task when blocking on the lock is saved in task_struct::saved_state and
235 * restored after the lock has been acquired. These operations are
237 * lock related wakeups while the task is blocked on the lock are
242 * The lock operation looks like this:
248 * raw_spin_unlock_irq(&lock->wait_lock);
250 * raw_spin_lock_irq(&lock->wait_lock);
258 raw_spin_lock(¤t->pi_lock); \
259 current->saved_state = current->__state; \
261 WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT); \
262 raw_spin_unlock(¤t->pi_lock); \
268 raw_spin_lock(¤t->pi_lock); \
270 WRITE_ONCE(current->__state, current->saved_state); \
271 current->saved_state = TASK_RUNNING; \
272 raw_spin_unlock(¤t->pi_lock); \
275 #define get_current_state() READ_ONCE(current->__state)
302 * struct prev_cputime - snapshot of system and user cputime
305 * @lock: protects the above two fields
314 raw_spinlock_t lock; member
399 * struct util_est - Estimation utilization of FAIR tasks
411 * - task: the task's util_avg at last task dequeue time
412 * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU
467 * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
471 * For all other cases (including 32-bit kernels), struct load_weight's
528 /* For load-balancing: */
550 /* cached value of my_q->h_nr_running */
559 * collide with read-mostly values above.
636 * Bandwidth enforcement timer. Each -deadline task has its
643 * at the "0-lag time". When a -deadline task blocks, it contributes
644 * to GRUB's active utilization until the "0-lag time", hence a
669 * @user_defined: the requested clamp value comes from user-space
672 * which is pre-computed and stored to avoid expensive integer divisions from
676 * which can be different from the clamp value "requested" from user-space.
680 * The user_defined bit is set whenever a task has got a task-specific clamp
683 * restrictive task-specific value has been requested, thus allowing to
706 perf_invalid_context = -1,
740 * scheduling-critical items should be added above here.
860 /* Per-thread vma caching: */
892 * queueing no longer being serialized by p->on_cpu. However:
894 * p->XXX = X; ttwu()
895 * schedule() if (p->on_rq && ..) // false
896 * smp_mb__after_spinlock(); if (smp_load_acquire(&p->on_cpu) && //true
898 * p->on_rq = 0; p->sched_remote_wakeup = Y;
901 * ->sched_remote_wakeup gets used, so it can be in this word.
918 /* disallow userland-initiated cgroup migration */
947 /* Canary value for the -fstack-protector GCC feature: */
952 * older sibling, respectively. (p->father can be replaced with
953 * p->real_parent->pid)
973 * 'ptrace_entry' is this task's link on the p->parent->ptraced list.
1020 /* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */
1050 * - normally initialized setup_new_exec()
1051 * - access it with [gs]et_task_comm()
1052 * - lock it with task_lock()
1107 /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
1118 /* Updated under owner's pi_lock and rq lock */
1196 /* Protected by ->alloc_lock: */
1206 /* cg_list protected by css_set_lock and tsk->alloc_lock: */
1253 * - RCU read-side critical section
1254 * - current->numa_group from everywhere
1255 * - task's runqueue locked, task not running
1264 * faults_memory: Exponential decaying average of faults on a per-node
1323 /* Start of a write-and-pause period: */
1497 /* CPU-specific state of this task: */
1501 * WARNING: on x86, 'thread_struct' contains a variable-sized
1510 return task->thread_pid; in task_pid()
1520 * task_xid_nr_ns() : id seen from the ns specified;
1524 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
1528 return tsk->pid; in task_pid_nr()
1531 static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) in task_pid_nr_ns() argument
1533 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); in task_pid_nr_ns()
1544 return tsk->tgid; in task_tgid_nr()
1548 * pid_alive - check that a task structure is not stale
1559 return p->thread_pid != NULL; in pid_alive()
1562 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) in task_pgrp_nr_ns() argument
1564 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); in task_pgrp_nr_ns()
1573 static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) in task_session_nr_ns() argument
1575 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); in task_session_nr_ns()
1583 static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) in task_tgid_nr_ns() argument
1585 return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns); in task_tgid_nr_ns()
1593 static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns) in task_ppid_nr_ns() argument
1599 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns); in task_ppid_nr_ns()
1621 unsigned int tsk_state = READ_ONCE(tsk->__state); in task_state_index()
1622 unsigned int state = (tsk_state | tsk->exit_state) & TASK_REPORT; in task_state_index()
1636 BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1); in task_index_to_char()
1647 * is_global_init - check if a task structure is init. Since init
1648 * is free to have sub-threads we need to check tgid.
1672 #define PF_SUPERPRIV 0x00000100 /* Used super-user privileges */
1696 * Only the _current_ task can read/write to tsk->flags, but other
1697 * tasks can access tsk->flags in readonly mode for example
1701 * child->flags of its traced child (same goes for fork, the parent
1702 * can write to the child->flags), because we're guaranteed the
1703 * child is not running and in turn not changing child->flags
1706 #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1707 #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1712 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1717 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1720 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1726 return (current->flags & PF_NO_SETAFFINITY) && in is_percpu_thread()
1727 (current->nr_cpus_allowed == 1); in is_percpu_thread()
1733 /* Per-process atomic flags. */
1745 { return test_bit(PFA_##name, &p->atomic_flags); }
1749 { set_bit(PFA_##name, &p->atomic_flags); }
1753 { clear_bit(PFA_##name, &p->atomic_flags); }
1787 current->flags &= ~flags; in TASK_PFA_TEST()
1788 current->flags |= orig_flags & flags; in TASK_PFA_TEST()
1808 return -EINVAL; in set_cpus_allowed_ptr()
1813 if (src->user_cpus_ptr) in dup_user_cpus_ptr()
1814 return -EINVAL; in dup_user_cpus_ptr()
1819 WARN_ON(p->user_cpus_ptr); in release_user_cpus_ptr()
1833 * task_nice - return the nice value of a given task.
1836 * Return: The nice value [ -20 ... 0 ... 19 ].
1840 return PRIO_TO_NICE((p)->static_prio); in task_nice()
1857 * is_idle_task - is the specified task an idle task?
1864 return !!(p->flags & PF_IDLE); in is_idle_task()
1891 return &task->thread_info; in task_thread_info()
1894 # define task_thread_info(task) ((struct thread_info *)(task)->stack)
1909 extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
2009 * cond_resched() and cond_resched_lock(): latency reduction via
2046 extern int __cond_resched_lock(spinlock_t *lock);
2047 extern int __cond_resched_rwlock_read(rwlock_t *lock);
2048 extern int __cond_resched_rwlock_write(rwlock_t *lock);
2050 #define cond_resched_lock(lock) ({ \ argument
2052 __cond_resched_lock(lock); \
2055 #define cond_resched_rwlock_read(lock) ({ \ argument
2057 __cond_resched_rwlock_read(lock); \
2060 #define cond_resched_rwlock_write(lock) ({ \ argument
2062 __cond_resched_rwlock_write(lock); \
2077 * but a general need for low latency)
2079 static inline int spin_needbreak(spinlock_t *lock) in spin_needbreak() argument
2082 return spin_is_contended(lock); in spin_needbreak()
2090 * Returns non-zero if there is another task waiting on the rwlock.
2091 * Returns zero if the lock is not contended or the system / underlying
2094 * for low latency.
2096 static inline int rwlock_needbreak(rwlock_t *lock) in rwlock_needbreak() argument
2099 return rwlock_is_contended(lock); in rwlock_needbreak()
2111 * Wrappers for p->thread_info->cpu access. No-op on UP.
2118 return READ_ONCE(p->cpu); in task_cpu()
2120 return READ_ONCE(task_thread_info(p)->cpu); in task_cpu()
2142 * In order to reduce various lock holder preemption latencies provide an
2146 * the native optimistic spin heuristic of testing if the lock owner task is
2171 * Map the event mask on the user-space ABI enum rseq_cs_flags
2188 if (t->rseq) in rseq_set_notify_resume()
2197 if (current->rseq) in rseq_handle_notify_resume()
2205 __set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask); in rseq_signal_deliver()
2213 __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask); in rseq_preempt()
2220 __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask); in rseq_migrate()
2231 t->rseq = NULL; in rseq_fork()
2232 t->rseq_sig = 0; in rseq_fork()
2233 t->rseq_event_mask = 0; in rseq_fork()
2235 t->rseq = current->rseq; in rseq_fork()
2236 t->rseq_sig = current->rseq_sig; in rseq_fork()
2237 t->rseq_event_mask = current->rseq_event_mask; in rseq_fork()
2243 t->rseq = NULL; in rseq_execve()
2244 t->rseq_sig = 0; in rseq_execve()
2245 t->rseq_event_mask = 0; in rseq_execve()