path: root/kernel/printk/printk_safe.c
diff options
authorSergey Senozhatsky <>2020-03-03 20:30:02 +0900
committerLinus Torvalds <>2020-04-10 13:18:57 -0700
commitab6f762f0f53162d41497708b33c9a3236d3609e (patch)
treee0d405ac3f1a4c99cc46609cb24bc64ee5cfea19 /kernel/printk/printk_safe.c
parent87ad46e601340394cd75c1c79b19ca906f82c543 (diff)
printk: queue wake_up_klogd irq_work only if per-CPU areas are ready
printk_deferred(), similarly to printk_safe/printk_nmi, does not immediately attempt to print a new message on the consoles, avoiding calls into non-reentrant kernel paths, e.g. scheduler or timekeeping, which potentially can deadlock the system. Those printk() flavors, instead, rely on per-CPU flush irq_work to print messages from safer contexts. For same reasons (recursive scheduler or timekeeping calls) printk() uses per-CPU irq_work in order to wake up user space syslog/kmsg readers. However, only printk_safe/printk_nmi do make sure that per-CPU areas have been initialised and that it's safe to modify per-CPU irq_work. This means that, for instance, should printk_deferred() be invoked "too early", that is before per-CPU areas are initialised, printk_deferred() will perform illegal per-CPU access. Lech Perczak [0] reports that after commit 1b710b1b10ef ("char/random: silence a lockdep splat with printk()") user-space syslog/kmsg readers are not able to read new kernel messages. The reason is printk_deferred() being called too early (as was pointed out by Petr and John). Fix printk_deferred() and do not queue per-CPU irq_work before per-CPU areas are initialized. Link: Reported-by: Lech Perczak <> Signed-off-by: Sergey Senozhatsky <> Tested-by: Jann Horn <> Reviewed-by: Petr Mladek <> Cc: Greg Kroah-Hartman <> Cc: Theodore Ts'o <> Cc: John Ogness <> Signed-off-by: Linus Torvalds <>
Diffstat (limited to 'kernel/printk/printk_safe.c')
1 files changed, 1 insertions, 10 deletions
diff --git a/kernel/printk/printk_safe.c b/kernel/printk/printk_safe.c
index b4045e782743..d9a659a686f3 100644
--- a/kernel/printk/printk_safe.c
+++ b/kernel/printk/printk_safe.c
@@ -27,7 +27,6 @@
* There are situations when we want to make sure that all buffers
* were handled or when IRQs are blocked.
-static int printk_safe_irq_ready __read_mostly;
sizeof(atomic_t) - \
@@ -51,7 +50,7 @@ static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
/* Get flushed in a more safe context. */
static void queue_flush_work(struct printk_safe_seq_buf *s)
- if (printk_safe_irq_ready)
+ if (printk_percpu_data_ready())
@@ -402,14 +401,6 @@ void __init printk_safe_init(void)
- /*
- * In the highly unlikely event that a NMI were to trigger at
- * this moment. Make sure IRQ work is set up before this
- * variable is set.
- */
- barrier();
- printk_safe_irq_ready = 1;
/* Flush pending messages that did not have scheduled IRQ works. */