*/
#include <linux/percpu.h>
-#ifdef CONFIG_PRINTK_NMI
-
-extern atomic_t nmi_message_lost;
-static inline int get_nmi_message_lost(void)
-{
- return atomic_xchg(&nmi_message_lost, 0);
-}
-
-#else /* CONFIG_PRINTK_NMI */
-
-static inline int get_nmi_message_lost(void)
-{
- return 0;
-}
-
-#endif /* CONFIG_PRINTK_NMI */
-
#ifdef CONFIG_PRINTK
#define PRINTK_SAFE_CONTEXT_MASK 0x7fffffff
unsigned long flags;
int this_cpu;
int printed_len = 0;
- int nmi_message_lost;
bool in_sched = false;
/* cpu currently holding logbuf_lock in this function */
static unsigned int logbuf_cpu = UINT_MAX;
strlen(recursion_msg));
}
- nmi_message_lost = get_nmi_message_lost();
- if (unlikely(nmi_message_lost)) {
- text_len = scnprintf(textbuf, sizeof(textbuf),
- "BAD LUCK: lost %d message(s) from NMI context!",
- nmi_message_lost);
- printed_len += log_store(0, 2, LOG_PREFIX|LOG_NEWLINE, 0,
- NULL, 0, textbuf, text_len);
- }
-
/*
* The printf needs to come first; we need the syslog
* prefix which might be passed-in as a parameter.
* were handled or when IRQs are blocked.
*/
static int printk_safe_irq_ready;
-atomic_t nmi_message_lost;
#define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) - \
- sizeof(atomic_t) - sizeof(struct irq_work))
+ sizeof(atomic_t) - \
+ sizeof(atomic_t) - \
+ sizeof(struct irq_work))
struct printk_safe_seq_buf {
atomic_t len; /* length of written data */
+ atomic_t message_lost;
struct irq_work work; /* IRQ work that flushes the buffer */
unsigned char buffer[SAFE_LOG_BUF_LEN];
};
static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
#endif
+/* Get flushed in a more safe context. */
+static void queue_flush_work(struct printk_safe_seq_buf *s)
+{
+ if (printk_safe_irq_ready) {
+ /* Make sure that IRQ work is really initialized. */
+ smp_rmb();
+ irq_work_queue(&s->work);
+ }
+}
+
/*
* Add a message to per-CPU context-dependent buffer. NMI and printk-safe
* have dedicated buffers, because otherwise printk-safe preempted by
/* The trailing '\0' is not counted into len. */
if (len >= sizeof(s->buffer) - 1) {
- atomic_inc(&nmi_message_lost);
+ atomic_inc(&s->message_lost);
+ queue_flush_work(s);
return 0;
}
smp_rmb();
add = vscnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, args);
+ if (!add)
+ return 0;
/*
* Do it once again if the buffer has been flushed in the meantime.
if (atomic_cmpxchg(&s->len, len, len + add) != len)
goto again;
- /* Get flushed in a more safe context. */
- if (add && printk_safe_irq_ready) {
- /* Make sure that IRQ work is really initialized. */
- smp_rmb();
- irq_work_queue(&s->work);
- }
-
+ queue_flush_work(s);
return add;
}
return len;
}
+static void report_message_lost(struct printk_safe_seq_buf *s)
+{
+ int lost = atomic_xchg(&s->message_lost, 0);
+
+ if (lost)
+ printk_deferred("Lost %d message(s)!\n", lost);
+}
+
/*
* Flush data from the associated per-CPU buffer. The function
* can be called either via IRQ work or independently.
goto more;
out:
+ report_message_lost(s);
raw_spin_unlock_irqrestore(&read_lock, flags);
}