Commit | Line | Data |
---|---|---|
42a0bb3f | 1 | /* |
099f1c84 | 2 | * printk_safe.c - Safe printk for printk-deadlock-prone contexts |
42a0bb3f PM |
3 | * |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public License | |
6 | * as published by the Free Software Foundation; either version 2 | |
7 | * of the License, or (at your option) any later version. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program; if not, see <http://www.gnu.org/licenses/>. | |
16 | */ | |
17 | ||
18 | #include <linux/preempt.h> | |
19 | #include <linux/spinlock.h> | |
cf9b1106 | 20 | #include <linux/debug_locks.h> |
42a0bb3f PM |
21 | #include <linux/smp.h> |
22 | #include <linux/cpumask.h> | |
23 | #include <linux/irq_work.h> | |
24 | #include <linux/printk.h> | |
25 | ||
26 | #include "internal.h" | |
27 | ||
28 | /* | |
29 | * printk() could not take logbuf_lock in NMI context. Instead, | |
30 | * it uses an alternative implementation that temporary stores | |
31 | * the strings into a per-CPU buffer. The content of the buffer | |
32 | * is later flushed into the main ring buffer via IRQ work. | |
33 | * | |
34 | * The alternative implementation is chosen transparently | |
099f1c84 SS |
35 | * by examinig current printk() context mask stored in @printk_context |
36 | * per-CPU variable. | |
42a0bb3f PM |
37 | * |
38 | * The implementation allows to flush the strings also from another CPU. | |
39 | * There are situations when we want to make sure that all buffers | |
40 | * were handled or when IRQs are blocked. | |
41 | */ | |
f92bac3b | 42 | static int printk_safe_irq_ready; |
42a0bb3f | 43 | |
f92bac3b | 44 | #define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) - \ |
ddb9baa8 SS |
45 | sizeof(atomic_t) - \ |
46 | sizeof(atomic_t) - \ | |
47 | sizeof(struct irq_work)) | |
42a0bb3f | 48 | |
f92bac3b | 49 | struct printk_safe_seq_buf { |
42a0bb3f | 50 | atomic_t len; /* length of written data */ |
ddb9baa8 | 51 | atomic_t message_lost; |
42a0bb3f | 52 | struct irq_work work; /* IRQ work that flushes the buffer */ |
f92bac3b | 53 | unsigned char buffer[SAFE_LOG_BUF_LEN]; |
42a0bb3f | 54 | }; |
099f1c84 SS |
55 | |
56 | static DEFINE_PER_CPU(struct printk_safe_seq_buf, safe_print_seq); | |
57 | static DEFINE_PER_CPU(int, printk_context); | |
58 | ||
59 | #ifdef CONFIG_PRINTK_NMI | |
f92bac3b | 60 | static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq); |
099f1c84 | 61 | #endif |
42a0bb3f | 62 | |
ddb9baa8 SS |
63 | /* Get flushed in a more safe context. */ |
64 | static void queue_flush_work(struct printk_safe_seq_buf *s) | |
65 | { | |
66 | if (printk_safe_irq_ready) { | |
67 | /* Make sure that IRQ work is really initialized. */ | |
68 | smp_rmb(); | |
69 | irq_work_queue(&s->work); | |
70 | } | |
71 | } | |
72 | ||
42a0bb3f | 73 | /* |
099f1c84 SS |
74 | * Add a message to per-CPU context-dependent buffer. NMI and printk-safe |
75 | * have dedicated buffers, because otherwise printk-safe preempted by | |
76 | * NMI-printk would have overwritten the NMI messages. | |
77 | * | |
78 | * The messages are fushed from irq work (or from panic()), possibly, | |
79 | * from other CPU, concurrently with printk_safe_log_store(). Should this | |
80 | * happen, printk_safe_log_store() will notice the buffer->len mismatch | |
81 | * and repeat the write. | |
42a0bb3f | 82 | */ |
099f1c84 SS |
83 | static int printk_safe_log_store(struct printk_safe_seq_buf *s, |
84 | const char *fmt, va_list args) | |
42a0bb3f | 85 | { |
099f1c84 | 86 | int add; |
42a0bb3f PM |
87 | size_t len; |
88 | ||
89 | again: | |
90 | len = atomic_read(&s->len); | |
91 | ||
4a998e32 PM |
92 | /* The trailing '\0' is not counted into len. */ |
93 | if (len >= sizeof(s->buffer) - 1) { | |
ddb9baa8 SS |
94 | atomic_inc(&s->message_lost); |
95 | queue_flush_work(s); | |
42a0bb3f | 96 | return 0; |
b522deab | 97 | } |
42a0bb3f PM |
98 | |
99 | /* | |
099f1c84 SS |
100 | * Make sure that all old data have been read before the buffer |
101 | * was reset. This is not needed when we just append data. | |
42a0bb3f PM |
102 | */ |
103 | if (!len) | |
104 | smp_rmb(); | |
105 | ||
4a998e32 | 106 | add = vscnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, args); |
ddb9baa8 SS |
107 | if (!add) |
108 | return 0; | |
42a0bb3f PM |
109 | |
110 | /* | |
111 | * Do it once again if the buffer has been flushed in the meantime. | |
112 | * Note that atomic_cmpxchg() is an implicit memory barrier that | |
113 | * makes sure that the data were written before updating s->len. | |
114 | */ | |
115 | if (atomic_cmpxchg(&s->len, len, len + add) != len) | |
116 | goto again; | |
117 | ||
ddb9baa8 | 118 | queue_flush_work(s); |
42a0bb3f PM |
119 | return add; |
120 | } | |
121 | ||
7acac344 | 122 | static inline void printk_safe_flush_line(const char *text, int len) |
42a0bb3f | 123 | { |
cf9b1106 | 124 | /* |
7acac344 SS |
125 | * Avoid any console drivers calls from here, because we may be |
126 | * in NMI or printk_safe context (when in panic). The messages | |
127 | * must go only into the ring buffer at this stage. Consoles will | |
128 | * get explicitly called later when a crashdump is not generated. | |
cf9b1106 | 129 | */ |
7acac344 | 130 | printk_deferred("%.*s", len, text); |
42a0bb3f PM |
131 | } |
132 | ||
22c2c7b2 | 133 | /* printk part of the temporary buffer line by line */ |
f92bac3b | 134 | static int printk_safe_flush_buffer(const char *start, size_t len) |
19feeff1 | 135 | { |
22c2c7b2 PM |
136 | const char *c, *end; |
137 | bool header; | |
138 | ||
139 | c = start; | |
140 | end = start + len; | |
141 | header = true; | |
142 | ||
143 | /* Print line by line. */ | |
144 | while (c < end) { | |
145 | if (*c == '\n') { | |
f92bac3b | 146 | printk_safe_flush_line(start, c - start + 1); |
22c2c7b2 PM |
147 | start = ++c; |
148 | header = true; | |
149 | continue; | |
150 | } | |
151 | ||
152 | /* Handle continuous lines or missing new line. */ | |
153 | if ((c + 1 < end) && printk_get_level(c)) { | |
154 | if (header) { | |
155 | c = printk_skip_level(c); | |
156 | continue; | |
157 | } | |
158 | ||
f92bac3b | 159 | printk_safe_flush_line(start, c - start); |
22c2c7b2 PM |
160 | start = c++; |
161 | header = true; | |
162 | continue; | |
163 | } | |
164 | ||
165 | header = false; | |
166 | c++; | |
167 | } | |
19feeff1 | 168 | |
22c2c7b2 PM |
169 | /* Check if there was a partial line. Ignore pure header. */ |
170 | if (start < end && !header) { | |
171 | static const char newline[] = KERN_CONT "\n"; | |
172 | ||
f92bac3b SS |
173 | printk_safe_flush_line(start, end - start); |
174 | printk_safe_flush_line(newline, strlen(newline)); | |
22c2c7b2 PM |
175 | } |
176 | ||
177 | return len; | |
19feeff1 SS |
178 | } |
179 | ||
ddb9baa8 SS |
180 | static void report_message_lost(struct printk_safe_seq_buf *s) |
181 | { | |
182 | int lost = atomic_xchg(&s->message_lost, 0); | |
183 | ||
184 | if (lost) | |
185 | printk_deferred("Lost %d message(s)!\n", lost); | |
186 | } | |
187 | ||
42a0bb3f | 188 | /* |
099f1c84 | 189 | * Flush data from the associated per-CPU buffer. The function |
42a0bb3f PM |
190 | * can be called either via IRQ work or independently. |
191 | */ | |
f92bac3b | 192 | static void __printk_safe_flush(struct irq_work *work) |
42a0bb3f PM |
193 | { |
194 | static raw_spinlock_t read_lock = | |
195 | __RAW_SPIN_LOCK_INITIALIZER(read_lock); | |
f92bac3b SS |
196 | struct printk_safe_seq_buf *s = |
197 | container_of(work, struct printk_safe_seq_buf, work); | |
42a0bb3f | 198 | unsigned long flags; |
22c2c7b2 PM |
199 | size_t len; |
200 | int i; | |
42a0bb3f PM |
201 | |
202 | /* | |
203 | * The lock has two functions. First, one reader has to flush all | |
204 | * available message to make the lockless synchronization with | |
205 | * writers easier. Second, we do not want to mix messages from | |
206 | * different CPUs. This is especially important when printing | |
207 | * a backtrace. | |
208 | */ | |
209 | raw_spin_lock_irqsave(&read_lock, flags); | |
210 | ||
211 | i = 0; | |
212 | more: | |
213 | len = atomic_read(&s->len); | |
214 | ||
215 | /* | |
216 | * This is just a paranoid check that nobody has manipulated | |
217 | * the buffer an unexpected way. If we printed something then | |
22c2c7b2 PM |
218 | * @len must only increase. Also it should never overflow the |
219 | * buffer size. | |
42a0bb3f | 220 | */ |
22c2c7b2 | 221 | if ((i && i >= len) || len > sizeof(s->buffer)) { |
f92bac3b | 222 | const char *msg = "printk_safe_flush: internal error\n"; |
19feeff1 | 223 | |
f92bac3b | 224 | printk_safe_flush_line(msg, strlen(msg)); |
22c2c7b2 | 225 | len = 0; |
19feeff1 | 226 | } |
42a0bb3f PM |
227 | |
228 | if (!len) | |
229 | goto out; /* Someone else has already flushed the buffer. */ | |
230 | ||
231 | /* Make sure that data has been written up to the @len */ | |
232 | smp_rmb(); | |
f92bac3b | 233 | i += printk_safe_flush_buffer(s->buffer + i, len - i); |
42a0bb3f PM |
234 | |
235 | /* | |
236 | * Check that nothing has got added in the meantime and truncate | |
237 | * the buffer. Note that atomic_cmpxchg() is an implicit memory | |
238 | * barrier that makes sure that the data were copied before | |
239 | * updating s->len. | |
240 | */ | |
241 | if (atomic_cmpxchg(&s->len, len, 0) != len) | |
242 | goto more; | |
243 | ||
244 | out: | |
ddb9baa8 | 245 | report_message_lost(s); |
42a0bb3f PM |
246 | raw_spin_unlock_irqrestore(&read_lock, flags); |
247 | } | |
248 | ||
249 | /** | |
f92bac3b | 250 | * printk_safe_flush - flush all per-cpu nmi buffers. |
42a0bb3f PM |
251 | * |
252 | * The buffers are flushed automatically via IRQ work. This function | |
253 | * is useful only when someone wants to be sure that all buffers have | |
254 | * been flushed at some point. | |
255 | */ | |
f92bac3b | 256 | void printk_safe_flush(void) |
42a0bb3f PM |
257 | { |
258 | int cpu; | |
259 | ||
099f1c84 SS |
260 | for_each_possible_cpu(cpu) { |
261 | #ifdef CONFIG_PRINTK_NMI | |
f92bac3b | 262 | __printk_safe_flush(&per_cpu(nmi_print_seq, cpu).work); |
099f1c84 SS |
263 | #endif |
264 | __printk_safe_flush(&per_cpu(safe_print_seq, cpu).work); | |
265 | } | |
42a0bb3f PM |
266 | } |
267 | ||
cf9b1106 | 268 | /** |
f92bac3b | 269 | * printk_safe_flush_on_panic - flush all per-cpu nmi buffers when the system |
cf9b1106 PM |
270 | * goes down. |
271 | * | |
f92bac3b | 272 | * Similar to printk_safe_flush() but it can be called even in NMI context when |
cf9b1106 PM |
273 | * the system goes down. It does the best effort to get NMI messages into |
274 | * the main ring buffer. | |
275 | * | |
276 | * Note that it could try harder when there is only one CPU online. | |
277 | */ | |
f92bac3b | 278 | void printk_safe_flush_on_panic(void) |
cf9b1106 PM |
279 | { |
280 | /* | |
281 | * Make sure that we could access the main ring buffer. | |
282 | * Do not risk a double release when more CPUs are up. | |
283 | */ | |
284 | if (in_nmi() && raw_spin_is_locked(&logbuf_lock)) { | |
285 | if (num_online_cpus() > 1) | |
286 | return; | |
287 | ||
288 | debug_locks_off(); | |
289 | raw_spin_lock_init(&logbuf_lock); | |
290 | } | |
291 | ||
f92bac3b | 292 | printk_safe_flush(); |
cf9b1106 PM |
293 | } |
294 | ||
099f1c84 SS |
295 | #ifdef CONFIG_PRINTK_NMI |
296 | /* | |
297 | * Safe printk() for NMI context. It uses a per-CPU buffer to | |
298 | * store the message. NMIs are not nested, so there is always only | |
299 | * one writer running. But the buffer might get flushed from another | |
300 | * CPU, so we need to be careful. | |
301 | */ | |
302 | static int vprintk_nmi(const char *fmt, va_list args) | |
303 | { | |
304 | struct printk_safe_seq_buf *s = this_cpu_ptr(&nmi_print_seq); | |
305 | ||
306 | return printk_safe_log_store(s, fmt, args); | |
307 | } | |
308 | ||
309 | void printk_nmi_enter(void) | |
310 | { | |
311 | this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK); | |
312 | } | |
313 | ||
314 | void printk_nmi_exit(void) | |
315 | { | |
316 | this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK); | |
317 | } | |
318 | ||
319 | #else | |
320 | ||
321 | static int vprintk_nmi(const char *fmt, va_list args) | |
322 | { | |
323 | return 0; | |
324 | } | |
325 | ||
326 | #endif /* CONFIG_PRINTK_NMI */ | |
327 | ||
328 | /* | |
329 | * Lock-less printk(), to avoid deadlocks should the printk() recurse | |
330 | * into itself. It uses a per-CPU buffer to store the message, just like | |
331 | * NMI. | |
332 | */ | |
333 | static int vprintk_safe(const char *fmt, va_list args) | |
334 | { | |
335 | struct printk_safe_seq_buf *s = this_cpu_ptr(&safe_print_seq); | |
336 | ||
337 | return printk_safe_log_store(s, fmt, args); | |
338 | } | |
339 | ||
340 | /* Can be preempted by NMI. */ | |
341 | void __printk_safe_enter(void) | |
342 | { | |
343 | this_cpu_inc(printk_context); | |
344 | } | |
345 | ||
346 | /* Can be preempted by NMI. */ | |
347 | void __printk_safe_exit(void) | |
348 | { | |
349 | this_cpu_dec(printk_context); | |
350 | } | |
351 | ||
352 | __printf(1, 0) int vprintk_func(const char *fmt, va_list args) | |
353 | { | |
354 | if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK) | |
355 | return vprintk_nmi(fmt, args); | |
356 | ||
357 | if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK) | |
358 | return vprintk_safe(fmt, args); | |
359 | ||
360 | return vprintk_default(fmt, args); | |
361 | } | |
362 | ||
f92bac3b | 363 | void __init printk_safe_init(void) |
42a0bb3f PM |
364 | { |
365 | int cpu; | |
366 | ||
367 | for_each_possible_cpu(cpu) { | |
099f1c84 SS |
368 | struct printk_safe_seq_buf *s; |
369 | ||
370 | s = &per_cpu(safe_print_seq, cpu); | |
371 | init_irq_work(&s->work, __printk_safe_flush); | |
42a0bb3f | 372 | |
099f1c84 SS |
373 | #ifdef CONFIG_PRINTK_NMI |
374 | s = &per_cpu(nmi_print_seq, cpu); | |
f92bac3b | 375 | init_irq_work(&s->work, __printk_safe_flush); |
099f1c84 | 376 | #endif |
42a0bb3f PM |
377 | } |
378 | ||
379 | /* Make sure that IRQ works are initialized before enabling. */ | |
380 | smp_wmb(); | |
f92bac3b | 381 | printk_safe_irq_ready = 1; |
42a0bb3f PM |
382 | |
383 | /* Flush pending messages that did not have scheduled IRQ works. */ | |
f92bac3b | 384 | printk_safe_flush(); |
42a0bb3f | 385 | } |