The below backtrace can be observed on -rt kernel with
CONFIG_DEBUG_MODULE_RONX (4.9 kernel CONFIG_DEBUG_RODATA) option enabled:
BUG: sleeping function called from invalid context at kernel/locking/rtmutex.c:993
in_atomic(): 1, irqs_disabled(): 128, pid: 14, name: migration/0
1 lock held by migration/0/14:
#0: (tasklist_lock){+.+...}, at: [<
c01183e8>] update_sections_early+0x24/0xdc
irq event stamp: 38
hardirqs last enabled at (37): [<
c08f6f7c>] _raw_spin_unlock_irq+0x24/0x68
hardirqs last disabled at (38): [<
c01fdfe8>] multi_cpu_stop+0xd8/0x138
softirqs last enabled at (0): [<
c01303ec>] copy_process.part.5+0x238/0x1b64
softirqs last disabled at (0): [< (null)>] (null)
Preemption disabled at: [<
c01fe244>] cpu_stopper_thread+0x80/0x10c
CPU: 0 PID: 14 Comm: migration/0 Not tainted
4.9.21-rt16-02220-g49e319c #15
Hardware name: Generic DRA74X (Flattened Device Tree)
[<
c0112014>] (unwind_backtrace) from [<
c010d370>] (show_stack+0x10/0x14)
[<
c010d370>] (show_stack) from [<
c049beb8>] (dump_stack+0xa8/0xd4)
[<
c049beb8>] (dump_stack) from [<
c01631a0>] (___might_sleep+0x1bc/0x2ac)
[<
c01631a0>] (___might_sleep) from [<
c08f7244>] (__rt_spin_lock+0x1c/0x30)
[<
c08f7244>] (__rt_spin_lock) from [<
c08f77a4>] (rt_read_lock+0x54/0x68)
[<
c08f77a4>] (rt_read_lock) from [<
c01183e8>] (update_sections_early+0x24/0xdc)
[<
c01183e8>] (update_sections_early) from [<
c01184b0>] (__fix_kernmem_perms+0x10/0x1c)
[<
c01184b0>] (__fix_kernmem_perms) from [<
c01fe010>] (multi_cpu_stop+0x100/0x138)
[<
c01fe010>] (multi_cpu_stop) from [<
c01fe24c>] (cpu_stopper_thread+0x88/0x10c)
[<
c01fe24c>] (cpu_stopper_thread) from [<
c015edc4>] (smpboot_thread_fn+0x174/0x31c)
[<
c015edc4>] (smpboot_thread_fn) from [<
c015a988>] (kthread+0xf0/0x108)
[<
c015a988>] (kthread) from [<
c0108818>] (ret_from_fork+0x14/0x3c)
Freeing unused kernel memory: 1024K (
c0d00000 -
c0e00000)
The stop_machine() is called with cpus = NULL from fix_kernmem_perms() and
mark_rodata_ro() which means only one CPU will execute
update_sections_early() while all other CPUs will spin and wait. Hence,
it's safe to remove tasklist locking from update_sections_early(). As part
of this change also mark functions which are local to this module as
static.
Signed-off-by: Grygorii Strashko <grygorii.strashko@ti.com>
Acked-by: Laura Abbott <labbott@redhat.com>
Acked-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
}
+/**
+ * update_sections_early intended to be called only through stop_machine
+ * framework and executed by only one CPU while all other CPUs will spin and
+ * wait, so no locking is required in this function.
+ */
static void update_sections_early(struct section_perm perms[], int n)
{
struct task_struct *t, *s;
- read_lock(&tasklist_lock);
for_each_process(t) {
if (t->flags & PF_KTHREAD)
continue;
for_each_thread(t, s)
set_section_perms(perms, n, true, s->mm);
}
- read_unlock(&tasklist_lock);
set_section_perms(perms, n, true, current->active_mm);
set_section_perms(perms, n, true, &init_mm);
}
-int __fix_kernmem_perms(void *unused)
+static int __fix_kernmem_perms(void *unused)
{
update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
return 0;
}
-void fix_kernmem_perms(void)
+static void fix_kernmem_perms(void)
{
stop_machine(__fix_kernmem_perms, NULL, NULL);
}
-int __mark_rodata_ro(void *unused)
+static int __mark_rodata_ro(void *unused)
{
update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
return 0;