Lines Matching +full:lock +full:- +full:- +full:- +full:-
1 /* SPDX-License-Identifier: GPL-2.0 */
8 * see Documentation/locking/lockdep-design.rst for more details.
41 to->class_cache[i] = NULL; in lockdep_copy_map()
45 * Every lock has a list of other locks that were taken after it.
56 /* used by BFS to record whether "prev -> this" only has -(*R)-> */
60 * The parent field is used to implement breadth-first search, and the
61 * bit 0 is reused to indicate if the lock has been accessed in BFS.
67 * struct lock_chain - lock dependency chain record
72 * @entry: the collided lock chains in lock_chain hash list
87 #define INITIAL_CHAIN_KEY -1
91 * One-way hash of the dependency chain up to this point. We
94 * We use it for dependency-caching and we skip detection
95 * passes and dependency-updates if there is a cache-hit, so
99 * as likely as possible - hence the 64-bit width.
113 * class_idx is zero-indexed; it points to the element in
114 * lock_classes this held lock instance belongs to. class_idx is in
115 * the range from 0 to (MAX_LOCKDEP_KEYS-1) inclusive.
119 * The lock-stack is unified in that the lock chains of interrupt
122 * context, and we also keep do not add cross-context lock
123 * dependencies - the lock usage graph walking covers that area
131 unsigned int irq_context:2; /* bit 0 - soft, bit 1 - hard */
142 * Initialization, self-test and debugging-output methods:
146 extern void lockdep_reset_lock(struct lockdep_map *lock);
158 #define LOCKDEP_RECURSION_MASK (LOCKDEP_OFF - 1)
167 current->lockdep_recursion += LOCKDEP_OFF; \
172 current->lockdep_recursion -= LOCKDEP_OFF; \
184 extern void lockdep_init_map_type(struct lockdep_map *lock, const char *name,
188 lockdep_init_map_waits(struct lockdep_map *lock, const char *name, in lockdep_init_map_waits() argument
191 lockdep_init_map_type(lock, name, key, subclass, inner, outer, LD_LOCK_NORMAL); in lockdep_init_map_waits()
195 lockdep_init_map_wait(struct lockdep_map *lock, const char *name, in lockdep_init_map_wait() argument
198 lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV); in lockdep_init_map_wait()
201 static inline void lockdep_init_map(struct lockdep_map *lock, const char *name, in lockdep_init_map() argument
204 lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV); in lockdep_init_map()
208 * Reinitialize a lock key - for cases where there is special locking or
210 * of dependencies wrong: they are either too broad (they need a class-split)
211 * or they are too narrow (they suffer from a false class-split):
213 #define lockdep_set_class(lock, key) \ argument
214 lockdep_init_map_type(&(lock)->dep_map, #key, key, 0, \
215 (lock)->dep_map.wait_type_inner, \
216 (lock)->dep_map.wait_type_outer, \
217 (lock)->dep_map.lock_type)
219 #define lockdep_set_class_and_name(lock, key, name) \ argument
220 lockdep_init_map_type(&(lock)->dep_map, name, key, 0, \
221 (lock)->dep_map.wait_type_inner, \
222 (lock)->dep_map.wait_type_outer, \
223 (lock)->dep_map.lock_type)
225 #define lockdep_set_class_and_subclass(lock, key, sub) \ argument
226 lockdep_init_map_type(&(lock)->dep_map, #key, key, sub, \
227 (lock)->dep_map.wait_type_inner, \
228 (lock)->dep_map.wait_type_outer, \
229 (lock)->dep_map.lock_type)
231 #define lockdep_set_subclass(lock, sub) \ argument
232 lockdep_init_map_type(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\
233 (lock)->dep_map.wait_type_inner, \
234 (lock)->dep_map.wait_type_outer, \
235 (lock)->dep_map.lock_type)
237 #define lockdep_set_novalidate_class(lock) \ argument
238 lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
243 #define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key) argument
245 static inline int lockdep_match_key(struct lockdep_map *lock, in lockdep_match_key() argument
248 return lock->key == key; in lockdep_match_key()
252 * Acquire a lock.
257 * 1: read-acquire (no recursion allowed)
258 * 2: read-acquire with same-instance recursion allowed
262 * 0: simple checks (freeing, held-at-exit-time, etc.)
265 extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
269 extern void lock_release(struct lockdep_map *lock, unsigned long ip);
272 #define LOCK_STATE_UNKNOWN -1
277 * Same "read" as for lock_acquire(), except -1 means any.
279 extern int lock_is_held_type(const struct lockdep_map *lock, int read);
281 static inline int lock_is_held(const struct lockdep_map *lock) in lock_is_held() argument
283 return lock_is_held_type(lock, -1); in lock_is_held()
286 #define lockdep_is_held(lock) lock_is_held(&(lock)->dep_map) argument
287 #define lockdep_is_held_type(lock, r) lock_is_held_type(&(lock)->dep_map, (r)) argument
289 extern void lock_set_class(struct lockdep_map *lock, const char *name,
296 static inline void lock_set_subclass(struct lockdep_map *lock, in lock_set_subclass() argument
299 lock_set_class(lock, lock->name, lock->key, subclass, ip); in lock_set_subclass()
302 extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
306 extern struct pin_cookie lock_pin_lock(struct lockdep_map *lock);
307 extern void lock_repin_lock(struct lockdep_map *lock, struct pin_cookie);
308 extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
310 #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
334 lockdep_assert_once(!current->lockdep_depth)
336 #define lockdep_recursing(tsk) ((tsk)->lockdep_recursion)
338 #define lockdep_pin_lock(l) lock_pin_lock(&(l)->dep_map)
339 #define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
340 #define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))
367 # define lockdep_init_map_type(lock, name, key, sub, inner, outer, type) \ argument
369 # define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \ argument
371 # define lockdep_init_map_wait(lock, name, key, sub, inner) \ argument
373 # define lockdep_init_map(lock, name, key, sub) \ argument
375 # define lockdep_set_class(lock, key) do { (void)(key); } while (0) argument
376 # define lockdep_set_class_and_name(lock, key, name) \ argument
378 #define lockdep_set_class_and_subclass(lock, key, sub) \ argument
380 #define lockdep_set_subclass(lock, sub) do { } while (0) argument
382 #define lockdep_set_novalidate_class(lock) do { } while (0) argument
405 * Dummy forward declarations, allow users to write less ifdef-y code
451 extern void lock_contended(struct lockdep_map *lock, unsigned long ip);
452 extern void lock_acquired(struct lockdep_map *lock, unsigned long ip);
454 #define LOCK_CONTENDED(_lock, try, lock) \ argument
457 lock_contended(&(_lock)->dep_map, _RET_IP_); \
458 lock(_lock); \
460 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
463 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \ argument
467 lock_contended(&(_lock)->dep_map, _RET_IP_); \
468 ____err = lock(_lock); \
471 lock_acquired(&(_lock)->dep_map, _RET_IP_); \
480 #define LOCK_CONTENDED(_lock, try, lock) \ argument
481 lock(_lock)
483 #define LOCK_CONTENDED_RETURN(_lock, try, lock) \ argument
484 lock(_lock)
511 * For trivial one-depth nesting of a lock-class, the following
513 * of nesting should define their own lock-nesting subclasses.)
519 * on the per lock-class debug mode:
560 # define might_lock(lock) \ argument
562 typecheck(struct lockdep_map *, &(lock)->dep_map); \
563 lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
564 lock_release(&(lock)->dep_map, _THIS_IP_); \
566 # define might_lock_read(lock) \ argument
568 typecheck(struct lockdep_map *, &(lock)->dep_map); \
569 lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
570 lock_release(&(lock)->dep_map, _THIS_IP_); \
572 # define might_lock_nested(lock, subclass) \ argument
574 typecheck(struct lockdep_map *, &(lock)->dep_map); \
575 lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL, \
577 lock_release(&(lock)->dep_map, _THIS_IP_); \
618 * Acceptable for protecting per-CPU resources accessed from BH.
619 * Much like in_softirq() - semantics are ambiguous, use carefully.
628 # define might_lock(lock) do { } while (0) argument
629 # define might_lock_read(lock) do { } while (0) argument
630 # define might_lock_nested(lock, subclass) do { } while (0) argument
644 WARN_ONCE(debug_locks && !current->lockdep_recursion && \
646 !(current->hardirq_threaded || current->irq_config), \