1 /* SPDX-License-Identifier: BSD-3-Clause
2 *
3 * Copyright(c) 2016 Intel Corporation. All rights reserved.
4 *
5 * Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
6 * Keyon Jie <yang.jie@linux.intel.com>
7 */
8
9 /*
10 * Simple spinlock implementation for SOF.
11 */
12
13 #ifndef __SOF_SPINLOCK_H__
14 #define __SOF_SPINLOCK_H__
15
16 #include <arch/spinlock.h>
17 #include <sof/lib/memory.h>
18 #include <ipc/trace.h>
19
20 #include <stdint.h>
21
22 /*
23 * Lock debugging provides a simple interface to debug deadlocks. The rmbox
24 * trace output will show an output :-
25 *
26 * 0xd70 [41.306406] delta [0.359638] lock eal
27 * 0xd80 [41.306409] delta [0.000002] value 0x00000000000001b7
28 * 0xd90 [41.306411] delta [0.000002] value 0x0000000000000001
29 * 0xda0 [41.306413] delta [0.000002] value 0x0000000001000348
30 *
31 * "eal" indicates we are holding a lock with interrupts OFF. The next value
32 * is the line number of where the lock was acquired. The second number is the
33 * number of other locks held whilst this lock is held and the subsequent
34 * numbers list each lock and the line number of it's holder. e.g. to find
35 * the locks :-
36 *
37 * grep -rn lock --include *.c | grep 840 (search for lock at line 0x348)
38 * src/drivers/dw-dma.c:840: spinlock_init(&dma->lock);
39 *
40 * grep -rn lock --include *.c | grep 439
41 * src/lib/alloc.c:439: spin_lock_irq(&memmap.lock, flags);
42 *
43 * Every lock entry and exit shows LcE and LcX in trace alongside the lock
44 * line numbers in hex. e.g.
45 *
46 * 0xfd60 [11032.730567] delta [0.000004] lock LcE
47 * 0xfd70 [11032.730569] delta [0.000002] value 0x00000000000000ae
48 *
49 * Deadlock can be confirmed in rmbox :-
50 *
51 * Debug log:
52 * debug: 0x0 (00) = 0xdead0007 (-559087609) |....|
53 * ....
54 * Error log:
55 * using 19.20MHz timestamp clock
56 * 0xc30 [26.247240] delta [26.245851] lock DED
57 * 0xc40 [26.247242] delta [0.000002] value 0x00000000000002b4
58 * 0xc50 [26.247244] delta [0.000002] value 0x0000000000000109
59 *
60 * DED means deadlock has been detected and the DSP is now halted. The first
61 * value after DEA is the line number where deadlock occurs and the second
62 * number is the line number where the lock is allocated. These can be grepped
63 * like above.
64 */
65
66 #if CONFIG_DEBUG_LOCKS
67
68 #include <sof/debug/panic.h>
69 #include <sof/trace/trace.h>
70 #include <ipc/trace.h>
71 #include <user/trace.h>
72
73 #define DBG_LOCK_USERS 8
74 #define DBG_LOCK_TRIES 10000
75
76 extern uint32_t lock_dbg_atomic;
77 extern uint32_t lock_dbg_user[DBG_LOCK_USERS];
78
79 extern struct tr_ctx sl_tr;
80
81 /* panic on deadlock */
82 #define spin_try_lock_dbg(lock, line) \
83 do { \
84 int __tries; \
85 for (__tries = DBG_LOCK_TRIES; __tries > 0; __tries--) { \
86 if (arch_try_lock(lock)) \
87 break; /* lock acquired */ \
88 } \
89 if (__tries == 0) { \
90 tr_err_atomic(&sl_tr, "DED"); \
91 tr_err_atomic(&sl_tr, "line: %d", line); \
92 tr_err_atomic(&sl_tr, "user: %d", (lock)->user); \
93 panic(SOF_IPC_PANIC_DEADLOCK); /* lock not acquired */ \
94 } \
95 } while (0)
96
97 #if CONFIG_DEBUG_LOCKS_VERBOSE
98 #define spin_lock_log(lock, line) \
99 do { \
100 if (lock_dbg_atomic) { \
101 int __i = 0; \
102 int __count = lock_dbg_atomic >= DBG_LOCK_USERS \
103 ? DBG_LOCK_USERS : lock_dbg_atomic; \
104 tr_err_atomic(&sl_tr, "eal"); \
105 tr_err_atomic(&sl_tr, "line: %d", line); \
106 tr_err_atomic(&sl_tr, "dbg_atomic: %d", lock_dbg_atomic); \
107 for (__i = 0; __i < __count; __i++) { \
108 tr_err_atomic(&sl_tr, "value: %d", \
109 (lock_dbg_atomic << 24) | \
110 lock_dbg_user[__i]); \
111 } \
112 } \
113 } while (0)
114
115 #define spin_lock_dbg(line) \
116 do { \
117 tr_info(&sl_tr, "LcE"); \
118 tr_info(&sl_tr, "line: %d", line); \
119 } while (0)
120
121 #define spin_unlock_dbg(line) \
122 do { \
123 tr_info(&sl_tr, "LcX"); \
124 tr_info(&sl_tr, "line: %d", line); \
125 } while (0)
126
127 #else /* CONFIG_DEBUG_LOCKS_VERBOSE */
128 #define spin_lock_log(lock, line) do {} while (0)
129 #define spin_lock_dbg(line) do {} while (0)
130 #define spin_unlock_dbg(line) do {} while (0)
131 #endif /* CONFIG_DEBUG_LOCKS_VERBOSE */
132
133 #else /* CONFIG_DEBUG_LOCKS */
134
135 #define trace_lock(__e) do {} while (0)
136 #define tracev_lock(__e) do {} while (0)
137
138 #define spin_lock_dbg(line) do {} while (0)
139 #define spin_unlock_dbg(line) do {} while (0)
140
141 #endif /* CONFIG_DEBUG_LOCKS */
142
_spin_try_lock(spinlock_t * lock,int line)143 static inline int _spin_try_lock(spinlock_t *lock, int line)
144 {
145 spin_lock_dbg(line);
146 return arch_try_lock(lock);
147 }
148
149 #define spin_try_lock(lock) _spin_try_lock(lock, __LINE__)
150
151 /* all SMP spinlocks need init, nothing todo on UP */
_spinlock_init(spinlock_t * lock,int line)152 static inline void _spinlock_init(spinlock_t *lock, int line)
153 {
154 arch_spinlock_init(lock);
155 #if CONFIG_DEBUG_LOCKS
156 lock->user = line;
157 #endif
158 }
159
160 #define spinlock_init(lock) _spinlock_init(lock, __LINE__)
161
162 /* does nothing on UP systems */
_spin_lock(spinlock_t * lock,int line)163 static inline void _spin_lock(spinlock_t *lock, int line)
164 {
165 spin_lock_dbg(line);
166 #if CONFIG_DEBUG_LOCKS
167 spin_lock_log(lock, line);
168 spin_try_lock_dbg(lock, line);
169 #else
170 arch_spin_lock(lock);
171 #endif
172
173 /* spinlock has to be in a shared memory */
174 }
175
176 #define spin_lock(lock) _spin_lock(lock, __LINE__)
177
178 /* disables all IRQ sources and takes lock - enter atomic context */
179 uint32_t _spin_lock_irq(spinlock_t *lock);
180
181 #define spin_lock_irq(lock, flags) (flags = _spin_lock_irq(lock))
182
_spin_unlock(spinlock_t * lock,int line)183 static inline void _spin_unlock(spinlock_t *lock, int line)
184 {
185 arch_spin_unlock(lock);
186 #if CONFIG_DEBUG_LOCKS
187 spin_unlock_dbg(line);
188 #endif
189
190 /* spinlock has to be in a shared memory */
191 }
192
193 #define spin_unlock(lock) _spin_unlock(lock, __LINE__)
194
195 /* re-enables current IRQ sources and releases lock - leave atomic context */
196 void _spin_unlock_irq(spinlock_t *lock, uint32_t flags, int line);
197
198 #define spin_unlock_irq(lock, flags) _spin_unlock_irq(lock, flags, __LINE__)
199
200 #endif /* __SOF_SPINLOCK_H__ */
201