1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Runtime locking correctness validator
4  *
5  *  Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
6  *  Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
7  *
8  * see Documentation/locking/lockdep-design.rst for more details.
9  */
10 #ifndef __LINUX_LOCKDEP_TYPES_H
11 #define __LINUX_LOCKDEP_TYPES_H
12 
13 #include <linux/types.h>
14 
15 #define MAX_LOCKDEP_SUBCLASSES		8UL
16 
17 enum lockdep_wait_type {
18 	LD_WAIT_INV = 0,	/* not checked, catch all */
19 
20 	LD_WAIT_FREE,		/* wait free, rcu etc.. */
21 	LD_WAIT_SPIN,		/* spin loops, raw_spinlock_t etc.. */
22 
23 #ifdef CONFIG_PROVE_RAW_LOCK_NESTING
24 	LD_WAIT_CONFIG,		/* CONFIG_PREEMPT_LOCK, spinlock_t etc.. */
25 #else
26 	LD_WAIT_CONFIG = LD_WAIT_SPIN,
27 #endif
28 	LD_WAIT_SLEEP,		/* sleeping locks, mutex_t etc.. */
29 
30 	LD_WAIT_MAX,		/* must be last */
31 };
32 
33 #ifdef CONFIG_LOCKDEP
34 
35 /*
36  * We'd rather not expose kernel/lockdep_states.h this wide, but we do need
37  * the total number of states... :-(
38  *
39  * XXX_LOCK_USAGE_STATES is the number of lines in lockdep_states.h, for each
40  * of those we generates 4 states, Additionally we report on USED and USED_READ.
41  */
42 #define XXX_LOCK_USAGE_STATES		2
43 #define LOCK_TRACE_STATES		(XXX_LOCK_USAGE_STATES*4 + 2)
44 
45 /*
46  * NR_LOCKDEP_CACHING_CLASSES ... Number of classes
47  * cached in the instance of lockdep_map
48  *
49  * Currently main class (subclass == 0) and signle depth subclass
50  * are cached in lockdep_map. This optimization is mainly targeting
51  * on rq->lock. double_rq_lock() acquires this highly competitive with
52  * single depth.
53  */
54 #define NR_LOCKDEP_CACHING_CLASSES	2
55 
56 /*
57  * A lockdep key is associated with each lock object. For static locks we use
58  * the lock address itself as the key. Dynamically allocated lock objects can
59  * have a statically or dynamically allocated key. Dynamically allocated lock
60  * keys must be registered before being used and must be unregistered before
61  * the key memory is freed.
62  */
63 struct lockdep_subclass_key {
64 	char __one_byte;
65 } __attribute__ ((__packed__));
66 
67 /* hash_entry is used to keep track of dynamically allocated keys. */
68 struct lock_class_key {
69 	union {
70 		struct hlist_node		hash_entry;
71 		struct lockdep_subclass_key	subkeys[MAX_LOCKDEP_SUBCLASSES];
72 	};
73 };
74 
75 extern struct lock_class_key __lockdep_no_validate__;
76 
77 struct lock_trace;
78 
79 #define LOCKSTAT_POINTS		4
80 
81 /*
82  * The lock-class itself. The order of the structure members matters.
83  * reinit_class() zeroes the key member and all subsequent members.
84  */
85 struct lock_class {
86 	/*
87 	 * class-hash:
88 	 */
89 	struct hlist_node		hash_entry;
90 
91 	/*
92 	 * Entry in all_lock_classes when in use. Entry in free_lock_classes
93 	 * when not in use. Instances that are being freed are on one of the
94 	 * zapped_classes lists.
95 	 */
96 	struct list_head		lock_entry;
97 
98 	/*
99 	 * These fields represent a directed graph of lock dependencies,
100 	 * to every node we attach a list of "forward" and a list of
101 	 * "backward" graph nodes.
102 	 */
103 	struct list_head		locks_after, locks_before;
104 
105 	const struct lockdep_subclass_key *key;
106 	unsigned int			subclass;
107 	unsigned int			dep_gen_id;
108 
109 	/*
110 	 * IRQ/softirq usage tracking bits:
111 	 */
112 	unsigned long			usage_mask;
113 	const struct lock_trace		*usage_traces[LOCK_TRACE_STATES];
114 
115 	/*
116 	 * Generation counter, when doing certain classes of graph walking,
117 	 * to ensure that we check one node only once:
118 	 */
119 	int				name_version;
120 	const char			*name;
121 
122 	short				wait_type_inner;
123 	short				wait_type_outer;
124 
125 #ifdef CONFIG_LOCK_STAT
126 	unsigned long			contention_point[LOCKSTAT_POINTS];
127 	unsigned long			contending_point[LOCKSTAT_POINTS];
128 #endif
129 } __no_randomize_layout;
130 
131 #ifdef CONFIG_LOCK_STAT
132 struct lock_time {
133 	s64				min;
134 	s64				max;
135 	s64				total;
136 	unsigned long			nr;
137 };
138 
139 enum bounce_type {
140 	bounce_acquired_write,
141 	bounce_acquired_read,
142 	bounce_contended_write,
143 	bounce_contended_read,
144 	nr_bounce_types,
145 
146 	bounce_acquired = bounce_acquired_write,
147 	bounce_contended = bounce_contended_write,
148 };
149 
150 struct lock_class_stats {
151 	unsigned long			contention_point[LOCKSTAT_POINTS];
152 	unsigned long			contending_point[LOCKSTAT_POINTS];
153 	struct lock_time		read_waittime;
154 	struct lock_time		write_waittime;
155 	struct lock_time		read_holdtime;
156 	struct lock_time		write_holdtime;
157 	unsigned long			bounces[nr_bounce_types];
158 };
159 
160 struct lock_class_stats lock_stats(struct lock_class *class);
161 void clear_lock_stats(struct lock_class *class);
162 #endif
163 
164 /*
165  * Map the lock object (the lock instance) to the lock-class object.
166  * This is embedded into specific lock instances:
167  */
168 struct lockdep_map {
169 	struct lock_class_key		*key;
170 	struct lock_class		*class_cache[NR_LOCKDEP_CACHING_CLASSES];
171 	const char			*name;
172 	short				wait_type_outer; /* can be taken in this context */
173 	short				wait_type_inner; /* presents this context */
174 #ifdef CONFIG_LOCK_STAT
175 	int				cpu;
176 	unsigned long			ip;
177 #endif
178 };
179 
180 struct pin_cookie { unsigned int val; };
181 
182 #else /* !CONFIG_LOCKDEP */
183 
184 /*
185  * The class key takes no space if lockdep is disabled:
186  */
187 struct lock_class_key { };
188 
189 /*
190  * The lockdep_map takes no space if lockdep is disabled:
191  */
192 struct lockdep_map { };
193 
194 struct pin_cookie { };
195 
196 #endif /* !LOCKDEP */
197 
198 #endif /* __LINUX_LOCKDEP_TYPES_H */
199