Lines Matching +full:a +full:- +full:side

1 /* SPDX-License-Identifier: GPL-2.0 */
6 * seqcount_t / seqlock_t - a reader-writer consistency mechanism with
7 * lockless readers (read-only retry loops), and no writer starvation.
12 * - Based on x86_64 vsyscall gettimeofday: Keith Owens, Andrea Arcangeli
13 * - Sequence counters with associated locks, (C) 2020 Linutronix GmbH
17 #include <linux/kcsan-checks.h>
26 * The seqlock seqcount_t interface does not prescribe a precise sequence of
27 * read begin/retry/end. For readers, typically there is a call to
31 * As a consequence, we take the following best-effort approach for raw usage
32 * via seqcount_t under KCSAN: upon beginning a seq-reader critical section,
34 * atomics; if there is a matching read_seqcount_retry() call, no following
45 * Write side critical sections must be serialized and non-preemptible.
52 * as the writer can invalidate a pointer that a reader is following.
55 * locking primitives, use a sequence counter with associated lock
59 * serialization and non-preemptibility requirements, use a sequential
75 * Make sure we are not reinitializing a held lock: in __seqcount_init()
77 lockdep_init_map(&s->dep_map, name, key, 0); in __seqcount_init()
78 s->sequence = 0; in __seqcount_init()
87 * seqcount_init() - runtime initializer for seqcount_t
102 seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_); in seqcount_lockdep_reader_access()
103 seqcount_release(&l->dep_map, _RET_IP_); in seqcount_lockdep_reader_access()
114 * SEQCNT_ZERO() - static initializer for seqcount_t
122 * A sequence counter which associates the lock used for writer
124 * that the write side critical section is properly serialized.
127 * preemption protection is enforced in the write side function.
135 * For PREEMPT_RT, seqcount_LOCKNAME_t write side critical sections cannot
136 * disable preemption. It can lead to higher latencies, and the write side
140 * To remain preemptible while avoiding a possible livelock caused by the
141 * reader preempting the writer, use a different technique: let the reader
142 * detect if a seqcount_LOCKNAME_t writer is in progress. If that is the
144 * lock. This will allow any possibly-preempted writer to make progress
147 * This lock-unlock technique must be implemented for all of PREEMPT_RT
157 * typedef seqcount_LOCKNAME_t - sequence counter with LOCKNAME associated
161 * A plain sequence counter with external writer synchronization by
164 * that the write side critical section is properly serialized.
170 * seqcount_LOCKNAME_init() - runtime initializer for seqcount_LOCKNAME_t
178 seqcount_init(&____s->seqcount); \
179 __SEQ_LOCK(____s->lock = (_lock)); \
188 * SEQCOUNT_LOCKNAME() - Instantiate seqcount_LOCKNAME_t and helpers
189 * seqprop_LOCKNAME_*() - Property accessors for seqcount_LOCKNAME_t
207 return &s->seqcount; \
213 unsigned seq = READ_ONCE(s->seqcount.sequence); \
220 __SEQ_LOCK(lockbase##_unlock(s->lock)); \
223 * Re-read the sequence counter since the (possibly \
226 seq = READ_ONCE(s->seqcount.sequence); \
259 return READ_ONCE(s->sequence); in __seqprop_sequence()
274 …OUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, s->lock, raw_spin, raw_spin_lock(s->…
275 SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, s->lock, spin, spin_lock(s->l…
276 SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, s->lock, read, read_lock(s->l…
277 SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex, mutex_lock(s->…
280 * SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
312 * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
337 * raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
351 * read_seqcount_begin() - begin a seqcount_t read critical section
363 * raw_read_seqcount() - read the raw seqcount_t counter value
366 * raw_read_seqcount opens a read critical section of the given
383 * raw_seqcount_begin() - begin a seqcount_t read critical section w/o
387 * raw_seqcount_begin opens a read critical section of the given
389 * for the count to stabilize. If a writer is active when it begins, it
394 * small and has a high probability of success through other external
395 * means. It will save a single branching instruction.
409 * __read_seqcount_retry() - end a seqcount_t read section w/o barrier
421 * Return: true if a read section retry is required, else false
429 return unlikely(READ_ONCE(s->sequence) != start); in do___read_seqcount_retry()
433 * read_seqcount_retry() - end a seqcount_t read critical section
441 * Return: true if a read section retry is required, else false
453 * raw_write_seqcount_begin() - start a seqcount_t write section w/o lockdep
469 s->sequence++; in do_raw_write_seqcount_begin()
474 * raw_write_seqcount_end() - end a seqcount_t write section w/o lockdep
490 s->sequence++; in do_raw_write_seqcount_end()
495 * write_seqcount_begin_nested() - start a seqcount_t write section with
500 * See Documentation/locking/lockdep-design.rst
516 seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); in do_write_seqcount_begin_nested()
520 * write_seqcount_begin() - start a seqcount_t write side critical section
523 * Context: sequence counter write side sections must be serialized and
524 * non-preemptible. Preemption will be automatically disabled if and
545 * write_seqcount_end() - end a seqcount_t write side critical section
548 * Context: Preemption will be automatically re-enabled if and only if
561 seqcount_release(&s->dep_map, _RET_IP_); in do_write_seqcount_end()
566 * raw_write_seqcount_barrier() - do a seqcount_t write barrier
571 * the two back-to-back wmb()s.
574 * via WRITE_ONCE): a) to ensure the writes become visible to other threads
577 * neither writes before and after the barrier are enclosed in a seq-writer
612 s->sequence++; in do_raw_write_seqcount_barrier()
614 s->sequence++; in do_raw_write_seqcount_barrier()
619 * write_seqcount_invalidate() - invalidate in-progress seqcount_t read
620 * side operations
623 * After write_seqcount_invalidate, no seqcount_t read side operations
633 s->sequence+=2; in do_write_seqcount_invalidate()
640 * A sequence counter variant where the counter even/odd value is used to
642 * typically NMIs, to safely interrupt the write side critical section.
652 * SEQCNT_LATCH_ZERO() - static initializer for seqcount_latch_t
660 * seqcount_latch_init() - runtime initializer for seqcount_latch_t
663 #define seqcount_latch_init(s) seqcount_init(&(s)->seqcount)
666 * raw_read_seqcount_latch() - pick even/odd latch data copy
669 * See raw_write_seqcount_latch() for details and a full reader/writer
680 * Due to the dependent load, a full smp_rmb() is not needed. in raw_read_seqcount_latch()
682 return READ_ONCE(s->seqcount.sequence); in raw_read_seqcount_latch()
686 * read_seqcount_latch_retry() - end a seqcount_latch_t read section
690 * Return: true if a read section retry is required, else false
695 return read_seqcount_retry(&s->seqcount, start); in read_seqcount_latch_retry()
699 * raw_write_seqcount_latch() - redirect latch readers to even/odd copy
702 * The latch technique is a multiversion concurrency control method that allows
703 * queries during non-atomic modifications. If you can guarantee queries never
704 * interrupt the modification -- e.g. the concurrency is strictly between CPUs
705 * -- you most likely do not need this.
709 * latch allows the same for non-atomic updates. The trade-off is doubling the
714 * there is always one copy in a stable state, ready to give us an answer.
716 * The basic form is a data structure like::
723 * Where a modification, which is assumed to be externally serialized, does the
729 * latch->seq.sequence++;
732 * modify(latch->data[0], ...);
735 * latch->seq.sequence++;
738 * modify(latch->data[1], ...);
741 * The query will have a form like::
749 * seq = raw_read_seqcount_latch(&latch->seq);
752 * entry = data_query(latch->data[idx], ...);
755 * } while (read_seqcount_latch_retry(&latch->seq, seq));
766 * The non-requirement for atomic modifications does _NOT_ include
767 * the publishing of new entries in the case where data is a dynamic
776 * When data is a dynamic data structure; one should use regular RCU
782 s->seqcount.sequence++; in raw_write_seqcount_latch()
790 * and non-preemptibility.
793 * - Comments on top of seqcount_t
794 * - Documentation/locking/seqlock.rst
812 * seqlock_init() - dynamic initializer for seqlock_t
817 spin_lock_init(&(sl)->lock); \
818 seqcount_spinlock_init(&(sl)->seqcount, &(sl)->lock); \
822 * DEFINE_SEQLOCK(sl) - Define a statically allocated seqlock_t
829 * read_seqbegin() - start a seqlock_t read side critical section
836 unsigned ret = read_seqcount_begin(&sl->seqcount); in read_seqbegin()
838 kcsan_atomic_next(0); /* non-raw usage, assume closing read_seqretry() */ in read_seqbegin()
844 * read_seqretry() - end a seqlock_t read side section
848 * read_seqretry closes the read side critical section of given seqlock_t.
852 * Return: true if a read section retry is required, else false
862 return read_seqcount_retry(&sl->seqcount, start); in read_seqretry()
866 * For all seqlock_t write side functions, use the the internal
872 * write_seqlock() - start a seqlock_t write side critical section
875 * write_seqlock opens a write side critical section for the given
877 * that sequential lock. All seqlock_t write side sections are thus
878 * automatically serialized and non-preemptible.
880 * Context: if the seqlock_t read section, or other write side critical
886 spin_lock(&sl->lock); in write_seqlock()
887 do_write_seqcount_begin(&sl->seqcount.seqcount); in write_seqlock()
891 * write_sequnlock() - end a seqlock_t write side critical section
894 * write_sequnlock closes the (serialized and non-preemptible) write side
899 do_write_seqcount_end(&sl->seqcount.seqcount); in write_sequnlock()
900 spin_unlock(&sl->lock); in write_sequnlock()
904 * write_seqlock_bh() - start a softirqs-disabled seqlock_t write section
907 * _bh variant of write_seqlock(). Use only if the read side section, or
908 * other write side sections, can be invoked from softirq contexts.
912 spin_lock_bh(&sl->lock); in write_seqlock_bh()
913 do_write_seqcount_begin(&sl->seqcount.seqcount); in write_seqlock_bh()
917 * write_sequnlock_bh() - end a softirqs-disabled seqlock_t write section
920 * write_sequnlock_bh closes the serialized, non-preemptible, and
921 * softirqs-disabled, seqlock_t write side critical section opened with
926 do_write_seqcount_end(&sl->seqcount.seqcount); in write_sequnlock_bh()
927 spin_unlock_bh(&sl->lock); in write_sequnlock_bh()
931 * write_seqlock_irq() - start a non-interruptible seqlock_t write section
934 * _irq variant of write_seqlock(). Use only if the read side section, or
939 spin_lock_irq(&sl->lock); in write_seqlock_irq()
940 do_write_seqcount_begin(&sl->seqcount.seqcount); in write_seqlock_irq()
944 * write_sequnlock_irq() - end a non-interruptible seqlock_t write section
947 * write_sequnlock_irq closes the serialized and non-interruptible
948 * seqlock_t write side section opened with write_seqlock_irq().
952 do_write_seqcount_end(&sl->seqcount.seqcount); in write_sequnlock_irq()
953 spin_unlock_irq(&sl->lock); in write_sequnlock_irq()
960 spin_lock_irqsave(&sl->lock, flags); in __write_seqlock_irqsave()
961 do_write_seqcount_begin(&sl->seqcount.seqcount); in __write_seqlock_irqsave()
966 * write_seqlock_irqsave() - start a non-interruptible seqlock_t write
969 * @flags: Stack-allocated storage for saving caller's local interrupt
972 * _irqsave variant of write_seqlock(). Use it only if the read side
979 * write_sequnlock_irqrestore() - end non-interruptible seqlock_t write
984 * write_sequnlock_irqrestore closes the serialized and non-interruptible
990 do_write_seqcount_end(&sl->seqcount.seqcount); in write_sequnlock_irqrestore()
991 spin_unlock_irqrestore(&sl->lock, flags); in write_sequnlock_irqrestore()
995 * read_seqlock_excl() - begin a seqlock_t locking reader section
998 * read_seqlock_excl opens a seqlock_t locking reader critical section. A
1002 * Locking readers act like a normal spin_lock()/spin_unlock().
1012 spin_lock(&sl->lock); in read_seqlock_excl()
1016 * read_sequnlock_excl() - end a seqlock_t locking reader critical section
1021 spin_unlock(&sl->lock); in read_sequnlock_excl()
1025 * read_seqlock_excl_bh() - start a seqlock_t locking reader section with
1030 * seqlock_t write side section, *or other read sections*, can be invoked
1035 spin_lock_bh(&sl->lock); in read_seqlock_excl_bh()
1039 * read_sequnlock_excl_bh() - stop a seqlock_t softirq-disabled locking
1045 spin_unlock_bh(&sl->lock); in read_sequnlock_excl_bh()
1049 * read_seqlock_excl_irq() - start a non-interruptible seqlock_t locking
1054 * write side section, *or other read sections*, can be invoked from a
1059 spin_lock_irq(&sl->lock); in read_seqlock_excl_irq()
1063 * read_sequnlock_excl_irq() - end an interrupts-disabled seqlock_t
1069 spin_unlock_irq(&sl->lock); in read_sequnlock_excl_irq()
1076 spin_lock_irqsave(&sl->lock, flags); in __read_seqlock_excl_irqsave()
1081 * read_seqlock_excl_irqsave() - start a non-interruptible seqlock_t
1084 * @flags: Stack-allocated storage for saving caller's local interrupt
1088 * write side section, *or other read sections*, can be invoked from a
1095 * read_sequnlock_excl_irqrestore() - end non-interruptible seqlock_t
1103 spin_unlock_irqrestore(&sl->lock, flags); in read_sequnlock_excl_irqrestore()
1107 * read_seqbegin_or_lock() - begin a seqlock_t lockless or locking reader
1110 * reader will become a *lockless* seqlock_t reader as in read_seqbegin().
1111 * If the passed value is odd, the reader will become a *locking* reader
1113 * caller *must* initialize and pass an even value to @seq; this way, a
1116 * read_seqbegin_or_lock is an API designed to optimistically try a normal
1119 * itself into a full seqlock_t locking reader.
1122 * (too much retry loops) in the case of a sharp spike in write side
1132 * parameter, which is overloaded as a return parameter. This returned
1146 * need_seqretry() - validate seqlock_t "locking or lockless" read section
1150 * Return: true if a read section retry is required, false otherwise
1158 * done_seqretry() - end seqlock_t "locking or lockless" reader section
1162 * done_seqretry finishes the seqlock_t read side critical section started
1172 * read_seqbegin_or_lock_irqsave() - begin a seqlock_t lockless reader, or
1173 * a non-interruptible locking reader
1185 * 1. The saved local interrupts state in case of a locking reader, to
1189 * overloaded as a return parameter. Check read_seqbegin_or_lock().
1205 * done_seqretry_irqrestore() - end a seqlock_t lockless reader, or a
1206 * non-interruptible locking reader section
1209 * @flags: Caller's saved local interrupt state in case of a locking