summaryrefslogtreecommitdiff
path: root/arch/s390
diff options
context:
space:
mode:
authorAlexander Gordeev <agordeev@linux.ibm.com>2022-09-01 10:33:51 +0200
committerVasily Gorbik <gor@linux.ibm.com>2022-09-14 16:46:00 +0200
commit6cbd7cc2ebbe074522246f50628cbae34915bb95 (patch)
tree4c239c2f1e68c783d7780a275b8a41777261bcfc /arch/s390
parentd61bb30e434db7df2a5f1ad5d773ed3a876dee03 (diff)
downloadlinux-rpi-6cbd7cc2ebbe074522246f50628cbae34915bb95.tar.gz
linux-rpi-6cbd7cc2ebbe074522246f50628cbae34915bb95.tar.bz2
linux-rpi-6cbd7cc2ebbe074522246f50628cbae34915bb95.zip
s390/smp: call smp_reinit_ipl_cpu() before scheduler is available
Currently smp_reinit_ipl_cpu() is a pre-SMP early initcall. That ensures no CPU is running in parallel, but still not enough to assume the code is exclusive, since the scheduling is already available. Move the function call to arch_call_rest_init() callback to ensure no thread could be preempted and allow lockless allocation of the kernel page tables. That is needed to allow a follow-up rework of the absolute lowcore access mechanism. Suggested-by: Heiko Carstens <hca@linux.ibm.com> Signed-off-by: Alexander Gordeev <agordeev@linux.ibm.com> Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/include/asm/smp.h1
-rw-r--r--arch/s390/kernel/setup.c1
-rw-r--r--arch/s390/kernel/smp.c3
3 files changed, 3 insertions, 2 deletions
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index 7f5d4763357b..59cd27255f38 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -58,6 +58,7 @@ static inline void smp_cpus_done(unsigned int max_cpus)
{
}
+extern int smp_reinit_ipl_cpu(void);
extern int smp_rescan_cpus(void);
extern void __noreturn cpu_die(void);
extern void __cpu_die(unsigned int cpu);
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index bbd4bde4f65d..063f0512a64a 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -395,6 +395,7 @@ void __init arch_call_rest_init(void)
{
unsigned long stack;
+ smp_reinit_ipl_cpu();
stack = stack_alloc();
if (!stack)
panic("Couldn't allocate kernel stack");
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 30c91d565933..0e8e5546933f 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -1256,7 +1256,7 @@ static __always_inline void set_new_lowcore(struct lowcore *lc)
: "memory", "cc");
}
-static int __init smp_reinit_ipl_cpu(void)
+int __init smp_reinit_ipl_cpu(void)
{
unsigned long async_stack, nodat_stack, mcck_stack;
struct lowcore *lc, *lc_ipl;
@@ -1291,4 +1291,3 @@ static int __init smp_reinit_ipl_cpu(void)
return 0;
}
-early_initcall(smp_reinit_ipl_cpu);