1 /*
2 * printk_safe.c - Safe printk for printk-deadlock-prone contexts
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include <linux/preempt.h>
19 #include <linux/spinlock.h>
20 #include <linux/debug_locks.h>
21 #include <linux/smp.h>
22 #include <linux/cpumask.h>
23 #include <linux/irq_work.h>
24 #include <linux/printk.h>
25
26 #include "internal.h"
27
28 /*
29 * printk() could not take logbuf_lock in NMI context. Instead,
30 * it uses an alternative implementation that temporary stores
31 * the strings into a per-CPU buffer. The content of the buffer
32 * is later flushed into the main ring buffer via IRQ work.
33 *
34 * The alternative implementation is chosen transparently
35 * by examinig current printk() context mask stored in @printk_context
36 * per-CPU variable.
37 *
38 * The implementation allows to flush the strings also from another CPU.
39 * There are situations when we want to make sure that all buffers
40 * were handled or when IRQs are blocked.
41 */
42 static int printk_safe_irq_ready __read_mostly;
43
44 #define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) - \
45 sizeof(atomic_t) - \
46 sizeof(atomic_t) - \
47 sizeof(struct irq_work))
48
49 struct printk_safe_seq_buf {
50 atomic_t len; /* length of written data */
51 atomic_t message_lost;
52 struct irq_work work; /* IRQ work that flushes the buffer */
53 unsigned char buffer[SAFE_LOG_BUF_LEN];
54 };
55
56 static DEFINE_PER_CPU(struct printk_safe_seq_buf, safe_print_seq);
57 static DEFINE_PER_CPU(int, printk_context);
58
59 #ifdef CONFIG_PRINTK_NMI
60 static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
61 #endif
62
63 /* Get flushed in a more safe context. */
queue_flush_work(struct printk_safe_seq_buf * s)64 static void queue_flush_work(struct printk_safe_seq_buf *s)
65 {
66 if (printk_safe_irq_ready)
67 irq_work_queue(&s->work);
68 }
69
70 /*
71 * Add a message to per-CPU context-dependent buffer. NMI and printk-safe
72 * have dedicated buffers, because otherwise printk-safe preempted by
73 * NMI-printk would have overwritten the NMI messages.
74 *
75 * The messages are flushed from irq work (or from panic()), possibly,
76 * from other CPU, concurrently with printk_safe_log_store(). Should this
77 * happen, printk_safe_log_store() will notice the buffer->len mismatch
78 * and repeat the write.
79 */
printk_safe_log_store(struct printk_safe_seq_buf * s,const char * fmt,va_list args)80 static __printf(2, 0) int printk_safe_log_store(struct printk_safe_seq_buf *s,
81 const char *fmt, va_list args)
82 {
83 int add;
84 size_t len;
85 va_list ap;
86
87 again:
88 len = atomic_read(&s->len);
89
90 /* The trailing '\0' is not counted into len. */
91 if (len >= sizeof(s->buffer) - 1) {
92 atomic_inc(&s->message_lost);
93 queue_flush_work(s);
94 return 0;
95 }
96
97 /*
98 * Make sure that all old data have been read before the buffer
99 * was reset. This is not needed when we just append data.
100 */
101 if (!len)
102 smp_rmb();
103
104 va_copy(ap, args);
105 add = vscnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, ap);
106 va_end(ap);
107 if (!add)
108 return 0;
109
110 /*
111 * Do it once again if the buffer has been flushed in the meantime.
112 * Note that atomic_cmpxchg() is an implicit memory barrier that
113 * makes sure that the data were written before updating s->len.
114 */
115 if (atomic_cmpxchg(&s->len, len, len + add) != len)
116 goto again;
117
118 queue_flush_work(s);
119 return add;
120 }
121
printk_safe_flush_line(const char * text,int len)122 static inline void printk_safe_flush_line(const char *text, int len)
123 {
124 /*
125 * Avoid any console drivers calls from here, because we may be
126 * in NMI or printk_safe context (when in panic). The messages
127 * must go only into the ring buffer at this stage. Consoles will
128 * get explicitly called later when a crashdump is not generated.
129 */
130 printk_deferred("%.*s", len, text);
131 }
132
133 /* printk part of the temporary buffer line by line */
printk_safe_flush_buffer(const char * start,size_t len)134 static int printk_safe_flush_buffer(const char *start, size_t len)
135 {
136 const char *c, *end;
137 bool header;
138
139 c = start;
140 end = start + len;
141 header = true;
142
143 /* Print line by line. */
144 while (c < end) {
145 if (*c == '\n') {
146 printk_safe_flush_line(start, c - start + 1);
147 start = ++c;
148 header = true;
149 continue;
150 }
151
152 /* Handle continuous lines or missing new line. */
153 if ((c + 1 < end) && printk_get_level(c)) {
154 if (header) {
155 c = printk_skip_level(c);
156 continue;
157 }
158
159 printk_safe_flush_line(start, c - start);
160 start = c++;
161 header = true;
162 continue;
163 }
164
165 header = false;
166 c++;
167 }
168
169 /* Check if there was a partial line. Ignore pure header. */
170 if (start < end && !header) {
171 static const char newline[] = KERN_CONT "\n";
172
173 printk_safe_flush_line(start, end - start);
174 printk_safe_flush_line(newline, strlen(newline));
175 }
176
177 return len;
178 }
179
report_message_lost(struct printk_safe_seq_buf * s)180 static void report_message_lost(struct printk_safe_seq_buf *s)
181 {
182 int lost = atomic_xchg(&s->message_lost, 0);
183
184 if (lost)
185 printk_deferred("Lost %d message(s)!\n", lost);
186 }
187
188 /*
189 * Flush data from the associated per-CPU buffer. The function
190 * can be called either via IRQ work or independently.
191 */
__printk_safe_flush(struct irq_work * work)192 static void __printk_safe_flush(struct irq_work *work)
193 {
194 static raw_spinlock_t read_lock =
195 __RAW_SPIN_LOCK_INITIALIZER(read_lock);
196 struct printk_safe_seq_buf *s =
197 container_of(work, struct printk_safe_seq_buf, work);
198 unsigned long flags;
199 size_t len;
200 int i;
201
202 /*
203 * The lock has two functions. First, one reader has to flush all
204 * available message to make the lockless synchronization with
205 * writers easier. Second, we do not want to mix messages from
206 * different CPUs. This is especially important when printing
207 * a backtrace.
208 */
209 raw_spin_lock_irqsave(&read_lock, flags);
210
211 i = 0;
212 more:
213 len = atomic_read(&s->len);
214
215 /*
216 * This is just a paranoid check that nobody has manipulated
217 * the buffer an unexpected way. If we printed something then
218 * @len must only increase. Also it should never overflow the
219 * buffer size.
220 */
221 if ((i && i >= len) || len > sizeof(s->buffer)) {
222 const char *msg = "printk_safe_flush: internal error\n";
223
224 printk_safe_flush_line(msg, strlen(msg));
225 len = 0;
226 }
227
228 if (!len)
229 goto out; /* Someone else has already flushed the buffer. */
230
231 /* Make sure that data has been written up to the @len */
232 smp_rmb();
233 i += printk_safe_flush_buffer(s->buffer + i, len - i);
234
235 /*
236 * Check that nothing has got added in the meantime and truncate
237 * the buffer. Note that atomic_cmpxchg() is an implicit memory
238 * barrier that makes sure that the data were copied before
239 * updating s->len.
240 */
241 if (atomic_cmpxchg(&s->len, len, 0) != len)
242 goto more;
243
244 out:
245 report_message_lost(s);
246 raw_spin_unlock_irqrestore(&read_lock, flags);
247 }
248
249 /**
250 * printk_safe_flush - flush all per-cpu nmi buffers.
251 *
252 * The buffers are flushed automatically via IRQ work. This function
253 * is useful only when someone wants to be sure that all buffers have
254 * been flushed at some point.
255 */
printk_safe_flush(void)256 void printk_safe_flush(void)
257 {
258 int cpu;
259
260 for_each_possible_cpu(cpu) {
261 #ifdef CONFIG_PRINTK_NMI
262 __printk_safe_flush(&per_cpu(nmi_print_seq, cpu).work);
263 #endif
264 __printk_safe_flush(&per_cpu(safe_print_seq, cpu).work);
265 }
266 }
267
268 /**
269 * printk_safe_flush_on_panic - flush all per-cpu nmi buffers when the system
270 * goes down.
271 *
272 * Similar to printk_safe_flush() but it can be called even in NMI context when
273 * the system goes down. It does the best effort to get NMI messages into
274 * the main ring buffer.
275 *
276 * Note that it could try harder when there is only one CPU online.
277 */
printk_safe_flush_on_panic(void)278 void printk_safe_flush_on_panic(void)
279 {
280 /*
281 * Make sure that we could access the main ring buffer.
282 * Do not risk a double release when more CPUs are up.
283 */
284 if (raw_spin_is_locked(&logbuf_lock)) {
285 if (num_online_cpus() > 1)
286 return;
287
288 debug_locks_off();
289 raw_spin_lock_init(&logbuf_lock);
290 }
291
292 printk_safe_flush();
293 }
294
295 #ifdef CONFIG_PRINTK_NMI
296 /*
297 * Safe printk() for NMI context. It uses a per-CPU buffer to
298 * store the message. NMIs are not nested, so there is always only
299 * one writer running. But the buffer might get flushed from another
300 * CPU, so we need to be careful.
301 */
vprintk_nmi(const char * fmt,va_list args)302 static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
303 {
304 struct printk_safe_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
305
306 return printk_safe_log_store(s, fmt, args);
307 }
308
printk_nmi_enter(void)309 void notrace printk_nmi_enter(void)
310 {
311 this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
312 }
313
printk_nmi_exit(void)314 void notrace printk_nmi_exit(void)
315 {
316 this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK);
317 }
318
319 /*
320 * Marks a code that might produce many messages in NMI context
321 * and the risk of losing them is more critical than eventual
322 * reordering.
323 *
324 * It has effect only when called in NMI context. Then printk()
325 * will try to store the messages into the main logbuf directly
326 * and use the per-CPU buffers only as a fallback when the lock
327 * is not available.
328 */
printk_nmi_direct_enter(void)329 void printk_nmi_direct_enter(void)
330 {
331 if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
332 this_cpu_or(printk_context, PRINTK_NMI_DIRECT_CONTEXT_MASK);
333 }
334
printk_nmi_direct_exit(void)335 void printk_nmi_direct_exit(void)
336 {
337 this_cpu_and(printk_context, ~PRINTK_NMI_DIRECT_CONTEXT_MASK);
338 }
339
340 #else
341
vprintk_nmi(const char * fmt,va_list args)342 static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
343 {
344 return 0;
345 }
346
347 #endif /* CONFIG_PRINTK_NMI */
348
349 /*
350 * Lock-less printk(), to avoid deadlocks should the printk() recurse
351 * into itself. It uses a per-CPU buffer to store the message, just like
352 * NMI.
353 */
vprintk_safe(const char * fmt,va_list args)354 static __printf(1, 0) int vprintk_safe(const char *fmt, va_list args)
355 {
356 struct printk_safe_seq_buf *s = this_cpu_ptr(&safe_print_seq);
357
358 return printk_safe_log_store(s, fmt, args);
359 }
360
361 /* Can be preempted by NMI. */
__printk_safe_enter(void)362 void __printk_safe_enter(void)
363 {
364 this_cpu_inc(printk_context);
365 }
366
367 /* Can be preempted by NMI. */
__printk_safe_exit(void)368 void __printk_safe_exit(void)
369 {
370 this_cpu_dec(printk_context);
371 }
372
vprintk_func(const char * fmt,va_list args)373 __printf(1, 0) int vprintk_func(const char *fmt, va_list args)
374 {
375 /*
376 * Try to use the main logbuf even in NMI. But avoid calling console
377 * drivers that might have their own locks.
378 */
379 if ((this_cpu_read(printk_context) & PRINTK_NMI_DIRECT_CONTEXT_MASK) &&
380 raw_spin_trylock(&logbuf_lock)) {
381 int len;
382
383 len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args);
384 raw_spin_unlock(&logbuf_lock);
385 defer_console_output();
386 return len;
387 }
388
389 /* Use extra buffer in NMI when logbuf_lock is taken or in safe mode. */
390 if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
391 return vprintk_nmi(fmt, args);
392
393 /* Use extra buffer to prevent a recursion deadlock in safe mode. */
394 if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK)
395 return vprintk_safe(fmt, args);
396
397 /* No obstacles. */
398 return vprintk_default(fmt, args);
399 }
400
printk_safe_init(void)401 void __init printk_safe_init(void)
402 {
403 int cpu;
404
405 for_each_possible_cpu(cpu) {
406 struct printk_safe_seq_buf *s;
407
408 s = &per_cpu(safe_print_seq, cpu);
409 init_irq_work(&s->work, __printk_safe_flush);
410
411 #ifdef CONFIG_PRINTK_NMI
412 s = &per_cpu(nmi_print_seq, cpu);
413 init_irq_work(&s->work, __printk_safe_flush);
414 #endif
415 }
416
417 /*
418 * In the highly unlikely event that a NMI were to trigger at
419 * this moment. Make sure IRQ work is set up before this
420 * variable is set.
421 */
422 barrier();
423 printk_safe_irq_ready = 1;
424
425 /* Flush pending messages that did not have scheduled IRQ works. */
426 printk_safe_flush();
427 }
428