Lines Matching +full:lock +full:- +full:latency +full:- +full:ns

1 /* SPDX-License-Identifier: GPL-2.0 */
34 #include <linux/posix-timers.h>
77 * We have two separate sets of flags: task->state
78 * is about runnability, while task->exit_state are
84 /* Used in tsk->state: */
90 /* Used in tsk->exit_state: */
94 /* Used in tsk->state again: */
107 #define TASK_ANY (TASK_STATE_MAX-1)
130 #define task_is_running(task) (READ_ONCE((task)->__state) == TASK_RUNNING)
132 #define task_is_traced(task) ((READ_ONCE(task->jobctl) & JOBCTL_TRACED) != 0)
133 #define task_is_stopped(task) ((READ_ONCE(task->jobctl) & JOBCTL_STOPPED) != 0)
134 #define task_is_stopped_or_traced(task) ((READ_ONCE(task->jobctl) & (JOBCTL_STOPPED | JOBCTL_TRACED…
137 * Special states are those that do not use the normal wait-loop pattern. See
147 current->task_state_change = _THIS_IP_; \
153 current->task_state_change = _THIS_IP_; \
158 current->saved_state_change = current->task_state_change;\
159 current->task_state_change = _THIS_IP_; \
164 current->task_state_change = current->saved_state_change;\
175 * set_current_state() includes a barrier so that the write of current->state
189 * CONDITION test and condition change and wakeup are under the same lock) then
198 * accessing p->state.
200 * Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
214 WRITE_ONCE(current->__state, (state_value)); \
220 smp_store_mb(current->__state, (state_value)); \
225 * can not use the regular condition based wait-loop. In that case we must
226 * serialize against wakeups such that any possible in-flight TASK_RUNNING
233 raw_spin_lock_irqsave(&current->pi_lock, flags); \
235 WRITE_ONCE(current->__state, (state_value)); \
236 raw_spin_unlock_irqrestore(&current->pi_lock, flags); \
243 * task when blocking on the lock is saved in task_struct::saved_state and
244 * restored after the lock has been acquired. These operations are
246 * lock related wakeups while the task is blocked on the lock are
251 * The lock operation looks like this:
257 * raw_spin_unlock_irq(&lock->wait_lock);
259 * raw_spin_lock_irq(&lock->wait_lock);
267 raw_spin_lock(&current->pi_lock); \
268 current->saved_state = current->__state; \
270 WRITE_ONCE(current->__state, TASK_RTLOCK_WAIT); \
271 raw_spin_unlock(&current->pi_lock); \
277 raw_spin_lock(&current->pi_lock); \
279 WRITE_ONCE(current->__state, current->saved_state); \
280 current->saved_state = TASK_RUNNING; \
281 raw_spin_unlock(&current->pi_lock); \
284 #define get_current_state() READ_ONCE(current->__state)
316 * struct prev_cputime - snapshot of system and user cputime
319 * @lock: protects the above two fields
328 raw_spinlock_t lock; member
413 * struct util_est - Estimation utilization of FAIR tasks
425 * - task: the task's util_avg at last task dequeue time
426 * - cfs_rq: the sum of util_est.enqueued for each RUNNABLE task on that CPU
481 * The 64-bit load_sum can have 4353082796 (=2^64/47742/88761) entities
485 * For all other cases (including 32-bit kernels), struct load_weight's
548 /* For load-balancing: */
568 /* cached value of my_q->h_nr_running */
577 * collide with read-mostly values above.
650 * Bandwidth enforcement timer. Each -deadline task has its
657 * at the "0-lag time". When a -deadline task blocks, it contributes
658 * to GRUB's active utilization until the "0-lag time", hence a
683 * @user_defined: the requested clamp value comes from user-space
686 * which is pre-computed and stored to avoid expensive integer divisions from
690 * which can be different from the clamp value "requested" from user-space.
694 * The user_defined bit is set whenever a task has got a task-specific clamp
697 * restrictive task-specific value has been requested, thus allowing to
720 perf_invalid_context = -1,
754 * scheduling-critical items should be added above here.
902 * queueing no longer being serialized by p->on_cpu. However:
904 * p->XXX = X; ttwu()
905 * schedule() if (p->on_rq && ..) // false
906 * smp_mb__after_spinlock(); if (smp_load_acquire(&p->on_cpu) && //true
908 * p->on_rq = 0; p->sched_remote_wakeup = Y;
911 * ->sched_remote_wakeup gets used, so it can be in this word.
932 /* disallow userland-initiated cgroup migration */
971 /* Canary value for the -fstack-protector GCC feature: */
976 * older sibling, respectively. (p->father can be replaced with
977 * p->real_parent->pid)
997 * 'ptrace_entry' is this task's link on the p->parent->ptraced list.
1044 /* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */
1074 * - normally initialized setup_new_exec()
1075 * - access it with [gs]et_task_comm()
1076 * - lock it with task_lock()
1131 /* Protection against (de-)allocation: mm, files, fs, tty, keyrings, mems_allowed, mempolicy: */
1142 /* Updated under owner's pi_lock and rq lock */
1218 /* Protected by ->alloc_lock: */
1228 /* cg_list protected by css_set_lock and tsk->alloc_lock: */
1275 * - RCU read-side critical section
1276 * - current->numa_group from everywhere
1277 * - task's runqueue locked, task not running
1286 * faults_memory: Exponential decaying average of faults on a per-node
1345 /* Start of a write-and-pause period: */
1523 * Per-task RV monitor. Nowadays fixed in RV_PER_TASK_MONITORS.
1537 /* CPU-specific state of this task: */
1541 * WARNING: on x86, 'thread_struct' contains a variable-sized
1550 return task->thread_pid; in task_pid()
1560 * task_xid_nr_ns() : id seen from the ns specified;
1564 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, struct pid_namespace *ns);
1568 return tsk->pid; in task_pid_nr()
1571 static inline pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) in task_pid_nr_ns() argument
1573 return __task_pid_nr_ns(tsk, PIDTYPE_PID, ns); in task_pid_nr_ns()
1584 return tsk->tgid; in task_tgid_nr()
1588 * pid_alive - check that a task structure is not stale
1599 return p->thread_pid != NULL; in pid_alive()
1602 static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) in task_pgrp_nr_ns() argument
1604 return __task_pid_nr_ns(tsk, PIDTYPE_PGID, ns); in task_pgrp_nr_ns()
1613 static inline pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) in task_session_nr_ns() argument
1615 return __task_pid_nr_ns(tsk, PIDTYPE_SID, ns); in task_session_nr_ns()
1623 static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) in task_tgid_nr_ns() argument
1625 return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns); in task_tgid_nr_ns()
1633 static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns) in task_ppid_nr_ns() argument
1639 pid = task_tgid_nr_ns(rcu_dereference(tsk->real_parent), ns); in task_ppid_nr_ns()
1682 return __task_state_index(READ_ONCE(tsk->__state), tsk->exit_state); in task_state_index()
1689 BUILD_BUG_ON(1 + ilog2(TASK_REPORT_MAX) != sizeof(state_char) - 1); in task_index_to_char()
1700 * is_global_init - check if a task structure is init. Since init
1701 * is free to have sub-threads we need to check tgid.
1726 #define PF_SUPERPRIV 0x00000100 /* Used super-user privileges */
1753 * Only the _current_ task can read/write to tsk->flags, but other
1754 * tasks can access tsk->flags in readonly mode for example
1758 * child->flags of its traced child (same goes for fork, the parent
1759 * can write to the child->flags), because we're guaranteed the
1760 * child is not running and in turn not changing child->flags
1763 #define clear_stopped_child_used_math(child) do { (child)->flags &= ~PF_USED_MATH; } while (0)
1764 #define set_stopped_child_used_math(child) do { (child)->flags |= PF_USED_MATH; } while (0)
1769 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= (condition) ? PF_USED_MATH : 0; } while (0)
1774 do { (child)->flags &= ~PF_USED_MATH, (child)->flags |= current->flags & PF_USED_MATH; } while (0)
1777 #define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
1783 return (current->flags & PF_NO_SETAFFINITY) && in is_percpu_thread()
1784 (current->nr_cpus_allowed == 1); in is_percpu_thread()
1790 /* Per-process atomic flags. */
1802 { return test_bit(PFA_##name, &p->atomic_flags); }
1806 { set_bit(PFA_##name, &p->atomic_flags); }
1810 { clear_bit(PFA_##name, &p->atomic_flags); }
1844 current->flags &= ~flags; in TASK_PFA_TEST()
1845 current->flags |= orig_flags & flags; in TASK_PFA_TEST()
1865 return -EINVAL; in set_cpus_allowed_ptr()
1870 if (src->user_cpus_ptr) in dup_user_cpus_ptr()
1871 return -EINVAL; in dup_user_cpus_ptr()
1876 WARN_ON(p->user_cpus_ptr); in release_user_cpus_ptr()
1890 * task_nice - return the nice value of a given task.
1893 * Return: The nice value [ -20 ... 0 ... 19 ].
1897 return PRIO_TO_NICE((p)->static_prio); in task_nice()
1914 * is_idle_task - is the specified task an idle task?
1921 return !!(p->flags & PF_IDLE); in is_idle_task()
1946 # define task_thread_info(task) (&(task)->thread_info)
1948 # define task_thread_info(task) ((struct thread_info *)(task)->stack)
1963 extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns);
2063 * cond_resched() and cond_resched_lock(): latency reduction via
2108 extern int __cond_resched_lock(spinlock_t *lock);
2109 extern int __cond_resched_rwlock_read(rwlock_t *lock);
2110 extern int __cond_resched_rwlock_write(rwlock_t *lock);
2113 #define MIGHT_RESCHED_PREEMPT_MASK ((1U << MIGHT_RESCHED_RCU_SHIFT) - 1)
2117 * Non RT kernels have an elevated preempt count due to the held lock,
2124 * cond_resched*lock() has to take that into account because it checks for
2131 #define cond_resched_lock(lock) ({ \ argument
2133 __cond_resched_lock(lock); \
2136 #define cond_resched_rwlock_read(lock) ({ \ argument
2138 __cond_resched_rwlock_read(lock); \
2141 #define cond_resched_rwlock_write(lock) ({ \ argument
2143 __cond_resched_rwlock_write(lock); \
2184 * Does the preemption model allow non-cooperative preemption?
2199 * but a general need for low latency)
2201 static inline int spin_needbreak(spinlock_t *lock) in spin_needbreak() argument
2204 return spin_is_contended(lock); in spin_needbreak()
2212 * Returns non-zero if there is another task waiting on the rwlock.
2213 * Returns zero if the lock is not contended or the system / underlying
2216 * for low latency.
2218 static inline int rwlock_needbreak(rwlock_t *lock) in rwlock_needbreak() argument
2221 return rwlock_is_contended(lock); in rwlock_needbreak()
2233 * Wrappers for p->thread_info->cpu access. No-op on UP.
2239 return READ_ONCE(task_thread_info(p)->cpu); in task_cpu()
2262 * In order to reduce various lock holder preemption latencies provide an
2266 * the native optimistic spin heuristic of testing if the lock owner task is
2287 * As lock holder preemption issue, we both skip spinning if in owner_on_cpu()
2290 return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner)); in owner_on_cpu()
2300 * Map the event mask on the user-space ABI enum rseq_cs_flags
2317 if (t->rseq) in rseq_set_notify_resume()
2326 if (current->rseq) in rseq_handle_notify_resume()
2334 __set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask); in rseq_signal_deliver()
2342 __set_bit(RSEQ_EVENT_PREEMPT_BIT, &t->rseq_event_mask); in rseq_preempt()
2349 __set_bit(RSEQ_EVENT_MIGRATE_BIT, &t->rseq_event_mask); in rseq_migrate()
2360 t->rseq = NULL; in rseq_fork()
2361 t->rseq_sig = 0; in rseq_fork()
2362 t->rseq_event_mask = 0; in rseq_fork()
2364 t->rseq = current->rseq; in rseq_fork()
2365 t->rseq_sig = current->rseq_sig; in rseq_fork()
2366 t->rseq_event_mask = current->rseq_event_mask; in rseq_fork()
2372 t->rseq = NULL; in rseq_execve()
2373 t->rseq_sig = 0; in rseq_execve()
2374 t->rseq_event_mask = 0; in rseq_execve()