diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-09 08:11:13 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-09 08:11:13 -0800 |
commit | 72f318897e50c29b91efd1ed24515a93c138a2ba (patch) | |
tree | 7e7ef8138d5afacd1be4655e4458dc4cee432d1e /arch/s390 | |
parent | a0e86bd4252519321b0d102dc4ed90557aa7bee9 (diff) | |
parent | 2fa1d4fce599809e6bd7d95756709a5faef30710 (diff) | |
download | kernel-common-72f318897e50c29b91efd1ed24515a93c138a2ba.tar.gz kernel-common-72f318897e50c29b91efd1ed24515a93c138a2ba.tar.bz2 kernel-common-72f318897e50c29b91efd1ed24515a93c138a2ba.zip |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (31 commits)
[S390] disassembler: mark exception causing instructions
[S390] Enable exception traces by default
[S390] return address of compat signals
[S390] sysctl: get rid of dead declaration
[S390] dasd: fix fixpoint divide exception in define_extent
[S390] dasd: add sanity check to detect path connection error
[S390] qdio: fix kernel panic for zfcp 31-bit
[S390] Add s390x description to Documentation/kdump/kdump.txt
[S390] Add VMCOREINFO_SYMBOL(high_memory) to vmcoreinfo
[S390] dasd: fix expiration handling for recovery requests
[S390] outstanding interrupts vs. smp_send_stop
[S390] ipc: call generic sys_ipc demultiplexer
[S390] zcrypt: Fix error return codes.
[S390] zcrypt: Rework length parameter checking.
[S390] cleanup trap handling
[S390] Remove Kerntypes leftovers
[S390] topology: increase poll frequency if change is anticipated
[S390] entry[64].S improvements
[S390] make arch/s390 subdirectories depend on config option
[S390] kvm: move cmf host id constant out of lowcore
...
Fix up conflicts in arch/s390/kernel/{smp.c,topology.c} due to the
sysdev removal clashing with "topology: get rid of ifdefs" which moved
some of that code around.
Diffstat (limited to 'arch/s390')
40 files changed, 1626 insertions, 1840 deletions
diff --git a/arch/s390/Kbuild b/arch/s390/Kbuild index ae4b01060edd..9858476fa0fe 100644 --- a/arch/s390/Kbuild +++ b/arch/s390/Kbuild @@ -1,6 +1,7 @@ -obj-y += kernel/ -obj-y += mm/ -obj-y += crypto/ -obj-y += appldata/ -obj-y += hypfs/ -obj-y += kvm/ +obj-y += kernel/ +obj-y += mm/ +obj-$(CONFIG_KVM) += kvm/ +obj-$(CONFIG_CRYPTO_HW) += crypto/ +obj-$(CONFIG_S390_HYPFS_FS) += hypfs/ +obj-$(CONFIG_APPLDATA_BASE) += appldata/ +obj-$(CONFIG_MATHEMU) += math-emu/ diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 28d183c42751..d1727584230a 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -193,18 +193,13 @@ config HOTPLUG_CPU Say N if you want to disable CPU hotplug. config SCHED_MC - def_bool y - prompt "Multi-core scheduler support" - depends on SMP - help - Multi-core scheduler support improves the CPU scheduler's decision - making when dealing with multi-core CPU chips at a cost of slightly - increased overhead in some places. + def_bool n config SCHED_BOOK def_bool y prompt "Book scheduler support" - depends on SMP && SCHED_MC + depends on SMP + select SCHED_MC help Book scheduler support improves the CPU scheduler's decision making when dealing with machines that have several books. diff --git a/arch/s390/Makefile b/arch/s390/Makefile index 27a0b5df5ead..e9f353341693 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile @@ -99,7 +99,6 @@ core-y += arch/s390/ libs-y += arch/s390/lib/ drivers-y += drivers/s390/ -drivers-$(CONFIG_MATHEMU) += arch/s390/math-emu/ # must be linked after kernel drivers-$(CONFIG_OPROFILE) += arch/s390/oprofile/ diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile index 635d677d3281..f2737a005afc 100644 --- a/arch/s390/boot/Makefile +++ b/arch/s390/boot/Makefile @@ -23,4 +23,4 @@ $(obj)/compressed/vmlinux: FORCE install: $(CONFIGURE) $(obj)/image sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/image \ - System.map Kerntypes "$(INSTALL_PATH)" + System.map "$(INSTALL_PATH)" diff --git a/arch/s390/include/asm/kdebug.h b/arch/s390/include/asm/kdebug.h index 40db27cd6e60..5c1abd47612a 100644 --- a/arch/s390/include/asm/kdebug.h +++ b/arch/s390/include/asm/kdebug.h @@ -22,6 +22,6 @@ enum die_val { DIE_NMI_IPI, }; -extern void die(const char *, struct pt_regs *, long); +extern void die(struct pt_regs *, const char *); #endif diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h index 9e13c7d56cc1..707f2306725b 100644 --- a/arch/s390/include/asm/lowcore.h +++ b/arch/s390/include/asm/lowcore.h @@ -97,47 +97,52 @@ struct _lowcore { __u32 gpregs_save_area[16]; /* 0x0180 */ __u32 cregs_save_area[16]; /* 0x01c0 */ + /* Save areas. */ + __u32 save_area_sync[8]; /* 0x0200 */ + __u32 save_area_async[8]; /* 0x0220 */ + __u32 save_area_restart[1]; /* 0x0240 */ + __u8 pad_0x0244[0x0248-0x0244]; /* 0x0244 */ + /* Return psws. */ - __u32 save_area[16]; /* 0x0200 */ - psw_t return_psw; /* 0x0240 */ - psw_t return_mcck_psw; /* 0x0248 */ + psw_t return_psw; /* 0x0248 */ + psw_t return_mcck_psw; /* 0x0250 */ /* CPU time accounting values */ - __u64 sync_enter_timer; /* 0x0250 */ - __u64 async_enter_timer; /* 0x0258 */ - __u64 mcck_enter_timer; /* 0x0260 */ - __u64 exit_timer; /* 0x0268 */ - __u64 user_timer; /* 0x0270 */ - __u64 system_timer; /* 0x0278 */ - __u64 steal_timer; /* 0x0280 */ - __u64 last_update_timer; /* 0x0288 */ - __u64 last_update_clock; /* 0x0290 */ + __u64 sync_enter_timer; /* 0x0258 */ + __u64 async_enter_timer; /* 0x0260 */ + __u64 mcck_enter_timer; /* 0x0268 */ + __u64 exit_timer; /* 0x0270 */ + __u64 user_timer; /* 0x0278 */ + __u64 system_timer; /* 0x0280 */ + __u64 steal_timer; /* 0x0288 */ + __u64 last_update_timer; /* 0x0290 */ + __u64 last_update_clock; /* 0x0298 */ /* Current process. */ - __u32 current_task; /* 0x0298 */ - __u32 thread_info; /* 0x029c */ - __u32 kernel_stack; /* 0x02a0 */ + __u32 current_task; /* 0x02a0 */ + __u32 thread_info; /* 0x02a4 */ + __u32 kernel_stack; /* 0x02a8 */ /* Interrupt and panic stack. */ - __u32 async_stack; /* 0x02a4 */ - __u32 panic_stack; /* 0x02a8 */ + __u32 async_stack; /* 0x02ac */ + __u32 panic_stack; /* 0x02b0 */ /* Address space pointer. */ - __u32 kernel_asce; /* 0x02ac */ - __u32 user_asce; /* 0x02b0 */ - __u32 current_pid; /* 0x02b4 */ + __u32 kernel_asce; /* 0x02b4 */ + __u32 user_asce; /* 0x02b8 */ + __u32 current_pid; /* 0x02bc */ /* SMP info area */ - __u32 cpu_nr; /* 0x02b8 */ - __u32 softirq_pending; /* 0x02bc */ - __u32 percpu_offset; /* 0x02c0 */ - __u32 ext_call_fast; /* 0x02c4 */ - __u64 int_clock; /* 0x02c8 */ - __u64 mcck_clock; /* 0x02d0 */ - __u64 clock_comparator; /* 0x02d8 */ - __u32 machine_flags; /* 0x02e0 */ - __u32 ftrace_func; /* 0x02e4 */ - __u8 pad_0x02e8[0x0300-0x02e8]; /* 0x02e8 */ + __u32 cpu_nr; /* 0x02c0 */ + __u32 softirq_pending; /* 0x02c4 */ + __u32 percpu_offset; /* 0x02c8 */ + __u32 ext_call_fast; /* 0x02cc */ + __u64 int_clock; /* 0x02d0 */ + __u64 mcck_clock; /* 0x02d8 */ + __u64 clock_comparator; /* 0x02e0 */ + __u32 machine_flags; /* 0x02e8 */ + __u32 ftrace_func; /* 0x02ec */ + __u8 pad_0x02f8[0x0300-0x02f0]; /* 0x02f0 */ /* Interrupt response block */ __u8 irb[64]; /* 0x0300 */ @@ -229,57 +234,62 @@ struct _lowcore { psw_t mcck_new_psw; /* 0x01e0 */ psw_t io_new_psw; /* 0x01f0 */ - /* Entry/exit save area & return psws. */ - __u64 save_area[16]; /* 0x0200 */ - psw_t return_psw; /* 0x0280 */ - psw_t return_mcck_psw; /* 0x0290 */ + /* Save areas. */ + __u64 save_area_sync[8]; /* 0x0200 */ + __u64 save_area_async[8]; /* 0x0240 */ + __u64 save_area_restart[1]; /* 0x0280 */ + __u8 pad_0x0288[0x0290-0x0288]; /* 0x0288 */ + + /* Return psws. */ + psw_t return_psw; /* 0x0290 */ + psw_t return_mcck_psw; /* 0x02a0 */ /* CPU accounting and timing values. */ - __u64 sync_enter_timer; /* 0x02a0 */ - __u64 async_enter_timer; /* 0x02a8 */ - __u64 mcck_enter_timer; /* 0x02b0 */ - __u64 exit_timer; /* 0x02b8 */ - __u64 user_timer; /* 0x02c0 */ - __u64 system_timer; /* 0x02c8 */ - __u64 steal_timer; /* 0x02d0 */ - __u64 last_update_timer; /* 0x02d8 */ - __u64 last_update_clock; /* 0x02e0 */ + __u64 sync_enter_timer; /* 0x02b0 */ + __u64 async_enter_timer; /* 0x02b8 */ + __u64 mcck_enter_timer; /* 0x02c0 */ + __u64 exit_timer; /* 0x02c8 */ + __u64 user_timer; /* 0x02d0 */ + __u64 system_timer; /* 0x02d8 */ + __u64 steal_timer; /* 0x02e0 */ + __u64 last_update_timer; /* 0x02e8 */ + __u64 last_update_clock; /* 0x02f0 */ /* Current process. */ - __u64 current_task; /* 0x02e8 */ - __u64 thread_info; /* 0x02f0 */ - __u64 kernel_stack; /* 0x02f8 */ + __u64 current_task; /* 0x02f8 */ + __u64 thread_info; /* 0x0300 */ + __u64 kernel_stack; /* 0x0308 */ /* Interrupt and panic stack. */ - __u64 async_stack; /* 0x0300 */ - __u64 panic_stack; /* 0x0308 */ + __u64 async_stack; /* 0x0310 */ + __u64 panic_stack; /* 0x0318 */ /* Address space pointer. */ - __u64 kernel_asce; /* 0x0310 */ - __u64 user_asce; /* 0x0318 */ - __u64 current_pid; /* 0x0320 */ + __u64 kernel_asce; /* 0x0320 */ + __u64 user_asce; /* 0x0328 */ + __u64 current_pid; /* 0x0330 */ /* SMP info area */ - __u32 cpu_nr; /* 0x0328 */ - __u32 softirq_pending; /* 0x032c */ - __u64 percpu_offset; /* 0x0330 */ - __u64 ext_call_fast; /* 0x0338 */ - __u64 int_clock; /* 0x0340 */ - __u64 mcck_clock; /* 0x0348 */ - __u64 clock_comparator; /* 0x0350 */ - __u64 vdso_per_cpu_data; /* 0x0358 */ - __u64 machine_flags; /* 0x0360 */ - __u64 ftrace_func; /* 0x0368 */ - __u64 gmap; /* 0x0370 */ - __u64 cmf_hpp; /* 0x0378 */ + __u32 cpu_nr; /* 0x0338 */ + __u32 softirq_pending; /* 0x033c */ + __u64 percpu_offset; /* 0x0340 */ + __u64 ext_call_fast; /* 0x0348 */ + __u64 int_clock; /* 0x0350 */ + __u64 mcck_clock; /* 0x0358 */ + __u64 clock_comparator; /* 0x0360 */ + __u64 vdso_per_cpu_data; /* 0x0368 */ + __u64 machine_flags; /* 0x0370 */ + __u64 ftrace_func; /* 0x0378 */ + __u64 gmap; /* 0x0380 */ + __u8 pad_0x0388[0x0400-0x0388]; /* 0x0388 */ /* Interrupt response block. */ - __u8 irb[64]; /* 0x0380 */ + __u8 irb[64]; /* 0x0400 */ /* Per cpu primary space access list */ - __u32 paste[16]; /* 0x03c0 */ + __u32 paste[16]; /* 0x0440 */ - __u8 pad_0x0400[0x0e00-0x0400]; /* 0x0400 */ + __u8 pad_0x0480[0x0e00-0x0480]; /* 0x0480 */ /* * 0xe00 contains the address of the IPL Parameter Information diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 4f289ff0b7fe..011358c1b18e 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -128,28 +128,11 @@ static inline int is_zero_pfn(unsigned long pfn) * effect, this also makes sure that 64 bit module code cannot be used * as system call address. */ - extern unsigned long VMALLOC_START; +extern unsigned long VMALLOC_END; +extern struct page *vmemmap; -#ifndef __s390x__ -#define VMALLOC_SIZE (96UL << 20) -#define VMALLOC_END 0x7e000000UL -#define VMEM_MAP_END 0x80000000UL -#else /* __s390x__ */ -#define VMALLOC_SIZE (128UL << 30) -#define VMALLOC_END 0x3e000000000UL -#define VMEM_MAP_END 0x40000000000UL -#endif /* __s390x__ */ - -/* - * VMEM_MAX_PHYS is the highest physical address that can be added to the 1:1 - * mapping. This needs to be calculated at compile time since the size of the - * VMEM_MAP is static but the size of struct page can change. - */ -#define VMEM_MAX_PAGES ((VMEM_MAP_END - VMALLOC_END) / sizeof(struct page)) -#define VMEM_MAX_PFN min(VMALLOC_START >> PAGE_SHIFT, VMEM_MAX_PAGES) -#define VMEM_MAX_PHYS ((VMEM_MAX_PFN << PAGE_SHIFT) & ~((16 << 20) - 1)) -#define vmemmap ((struct page *) VMALLOC_END) +#define VMEM_MAX_PHYS ((unsigned long) vmemmap) /* * A 31 bit pagetable entry of S390 has following format: diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h index 5f33d37d032c..27272f6a14c2 100644 --- a/arch/s390/include/asm/processor.h +++ b/arch/s390/include/asm/processor.h @@ -80,8 +80,6 @@ struct thread_struct { unsigned int acrs[NUM_ACRS]; unsigned long ksp; /* kernel stack pointer */ mm_segment_t mm_segment; - unsigned long prot_addr; /* address of protection-excep. */ - unsigned int trap_no; unsigned long gmap_addr; /* address of last gmap fault. */ struct per_regs per_user; /* User specified PER registers */ struct per_event per_event; /* Cause of the last PER trap */ diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h index a65846340d51..56da355678f4 100644 --- a/arch/s390/include/asm/ptrace.h +++ b/arch/s390/include/asm/ptrace.h @@ -324,7 +324,8 @@ struct pt_regs psw_t psw; unsigned long gprs[NUM_GPRS]; unsigned long orig_gpr2; - unsigned int svc_code; + unsigned int int_code; + unsigned long int_parm_long; }; /* diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h index e63d13dd3bf5..d75c8e78f7e3 100644 --- a/arch/s390/include/asm/qdio.h +++ b/arch/s390/include/asm/qdio.h @@ -352,7 +352,7 @@ typedef void qdio_handler_t(struct ccw_device *, unsigned int, int, * @no_output_qs: number of output queues * @input_handler: handler to be called for input queues * @output_handler: handler to be called for output queues - * @queue_start_poll: polling handlers (one per input queue or NULL) + * @queue_start_poll_array: polling handlers (one per input queue or NULL) * @int_parm: interruption parameter * @input_sbal_addr_array: address of no_input_qs * 128 pointers * @output_sbal_addr_array: address of no_output_qs * 128 pointers @@ -372,7 +372,8 @@ struct qdio_initialize { unsigned int no_output_qs; qdio_handler_t *input_handler; qdio_handler_t *output_handler; - void (**queue_start_poll) (struct ccw_device *, int, unsigned long); + void (**queue_start_poll_array) (struct ccw_device *, int, + unsigned long); int scan_threshold; unsigned long int_parm; void **input_sbal_addr_array; diff --git a/arch/s390/include/asm/sigp.h b/arch/s390/include/asm/sigp.h index e3bffd4e2d66..7040b8567cd0 100644 --- a/arch/s390/include/asm/sigp.h +++ b/arch/s390/include/asm/sigp.h @@ -56,6 +56,7 @@ enum { ec_schedule = 0, ec_call_function, ec_call_function_single, + ec_stop_cpu, }; /* diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h index ab47a69fdf07..c32e9123b40c 100644 --- a/arch/s390/include/asm/smp.h +++ b/arch/s390/include/asm/smp.h @@ -23,7 +23,6 @@ extern void __cpu_die (unsigned int cpu); extern int __cpu_up (unsigned int cpu); extern struct mutex smp_cpu_state_mutex; -extern int smp_cpu_polarization[]; extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); diff --git a/arch/s390/include/asm/sparsemem.h b/arch/s390/include/asm/sparsemem.h index 545d219e6a2d..0fb34027d3f6 100644 --- a/arch/s390/include/asm/sparsemem.h +++ b/arch/s390/include/asm/sparsemem.h @@ -4,8 +4,8 @@ #ifdef CONFIG_64BIT #define SECTION_SIZE_BITS 28 -#define MAX_PHYSADDR_BITS 42 -#define MAX_PHYSMEM_BITS 42 +#define MAX_PHYSADDR_BITS 46 +#define MAX_PHYSMEM_BITS 46 #else diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h index b239ff53b189..fb214dd9b7e0 100644 --- a/arch/s390/include/asm/syscall.h +++ b/arch/s390/include/asm/syscall.h @@ -27,7 +27,7 @@ static inline long syscall_get_nr(struct task_struct *task, struct pt_regs *regs) { return test_tsk_thread_flag(task, TIF_SYSCALL) ? - (regs->svc_code & 0xffff) : -1; + (regs->int_code & 0xffff) : -1; } static inline void syscall_rollback(struct task_struct *task, diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h index ef573c1d71a7..d73cc6b60000 100644 --- a/arch/s390/include/asm/system.h +++ b/arch/s390/include/asm/system.h @@ -20,8 +20,6 @@ struct task_struct; -extern int sysctl_userprocess_debug; - extern struct task_struct *__switch_to(void *, void *); extern void update_per_regs(struct task_struct *task); diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index 005d77d8ae2a..0837de80c351 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h @@ -4,6 +4,10 @@ #include <linux/cpumask.h> #include <asm/sysinfo.h> +struct cpu; + +#ifdef CONFIG_SCHED_BOOK + extern unsigned char cpu_core_id[NR_CPUS]; extern cpumask_t cpu_core_map[NR_CPUS]; @@ -16,8 +20,6 @@ static inline const struct cpumask *cpu_coregroup_mask(int cpu) #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) #define mc_capable() (1) -#ifdef CONFIG_SCHED_BOOK - extern unsigned char cpu_book_id[NR_CPUS]; extern cpumask_t cpu_book_map[NR_CPUS]; @@ -29,19 +31,45 @@ static inline const struct cpumask *cpu_book_mask(int cpu) #define topology_book_id(cpu) (cpu_book_id[cpu]) #define topology_book_cpumask(cpu) (&cpu_book_map[cpu]) -#endif /* CONFIG_SCHED_BOOK */ - +int topology_cpu_init(struct cpu *); int topology_set_cpu_management(int fc); void topology_schedule_update(void); void store_topology(struct sysinfo_15_1_x *info); +void topology_expect_change(void); + +#else /* CONFIG_SCHED_BOOK */ + +static inline void topology_schedule_update(void) { } +static inline int topology_cpu_init(struct cpu *cpu) { return 0; } +static inline void topology_expect_change(void) { } -#define POLARIZATION_UNKNWN (-1) +#endif /* CONFIG_SCHED_BOOK */ + +#define POLARIZATION_UNKNOWN (-1) #define POLARIZATION_HRZ (0) #define POLARIZATION_VL (1) #define POLARIZATION_VM (2) #define POLARIZATION_VH (3) -#ifdef CONFIG_SMP +extern int cpu_polarization[]; + +static inline void cpu_set_polarization(int cpu, int val) +{ +#ifdef CONFIG_SCHED_BOOK + cpu_polarization[cpu] = val; +#endif +} + +static inline int cpu_read_polarization(int cpu) +{ +#ifdef CONFIG_SCHED_BOOK + return cpu_polarization[cpu]; +#else + return POLARIZATION_HRZ; +#endif +} + +#ifdef CONFIG_SCHED_BOOK void s390_init_cpu_topology(void); #else static inline void s390_init_cpu_topology(void) diff --git a/arch/s390/include/asm/unistd.h b/arch/s390/include/asm/unistd.h index 58de4c91c333..8a8008fe7b8f 100644 --- a/arch/s390/include/asm/unistd.h +++ b/arch/s390/include/asm/unistd.h @@ -398,6 +398,7 @@ #define __ARCH_WANT_SYS_SIGNAL #define __ARCH_WANT_SYS_UTIME #define __ARCH_WANT_SYS_SOCKETCALL +#define __ARCH_WANT_SYS_IPC #define __ARCH_WANT_SYS_FADVISE64 #define __ARCH_WANT_SYS_GETPGRP #define __ARCH_WANT_SYS_LLSEEK diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index dd4f07640919..7d9ec924e7e7 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile @@ -32,7 +32,8 @@ extra-y += head.o init_task.o vmlinux.lds extra-y += $(if $(CONFIG_64BIT),head64.o,head31.o) obj-$(CONFIG_MODULES) += s390_ksyms.o module.o -obj-$(CONFIG_SMP) += smp.o topology.o +obj-$(CONFIG_SMP) += smp.o +obj-$(CONFIG_SCHED_BOOK) += topology.o obj-$(CONFIG_SMP) += $(if $(CONFIG_64BIT),switch_cpu64.o, \ switch_cpu.o) obj-$(CONFIG_HIBERNATION) += suspend.o swsusp_asm64.o diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 751318765e2e..6e6a72e66d60 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c @@ -45,7 +45,8 @@ int main(void) DEFINE(__PT_PSW, offsetof(struct pt_regs, psw)); DEFINE(__PT_GPRS, offsetof(struct pt_regs, gprs)); DEFINE(__PT_ORIG_GPR2, offsetof(struct pt_regs, orig_gpr2)); - DEFINE(__PT_SVC_CODE, offsetof(struct pt_regs, svc_code)); + DEFINE(__PT_INT_CODE, offsetof(struct pt_regs, int_code)); + DEFINE(__PT_INT_PARM_LONG, offsetof(struct pt_regs, int_parm_long)); DEFINE(__PT_SIZE, sizeof(struct pt_regs)); BLANK(); DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain)); @@ -108,7 +109,9 @@ int main(void) DEFINE(__LC_PGM_NEW_PSW, offsetof(struct _lowcore, program_new_psw)); DEFINE(__LC_MCK_NEW_PSW, offsetof(struct _lowcore, mcck_new_psw)); DEFINE(__LC_IO_NEW_PSW, offsetof(struct _lowcore, io_new_psw)); - DEFINE(__LC_SAVE_AREA, offsetof(struct _lowcore, save_area)); + DEFINE(__LC_SAVE_AREA_SYNC, offsetof(struct _lowcore, save_area_sync)); + DEFINE(__LC_SAVE_AREA_ASYNC, offsetof(struct _lowcore, save_area_async)); + DEFINE(__LC_SAVE_AREA_RESTART, offsetof(struct _lowcore, save_area_restart)); DEFINE(__LC_RETURN_PSW, offsetof(struct _lowcore, return_psw)); DEFINE(__LC_RETURN_MCCK_PSW, offsetof(struct _lowcore, return_mcck_psw)); DEFINE(__LC_SYNC_ENTER_TIMER, offsetof(struct _lowcore, sync_enter_timer)); @@ -150,7 +153,6 @@ int main(void) DEFINE(__LC_LAST_BREAK, offsetof(struct _lowcore, breaking_event_addr)); DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap)); - DEFINE(__LC_CMF_HPP, offsetof(struct _lowcore, cmf_hpp)); DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce)); #endif /* CONFIG_32BIT */ return 0; diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S index f8828d38fa6e..3aa4d00aaf50 100644 --- a/arch/s390/kernel/base.S +++ b/arch/s390/kernel/base.S @@ -33,7 +33,7 @@ s390_base_mcck_handler_fn: .previous ENTRY(s390_base_ext_handler) - stmg %r0,%r15,__LC_SAVE_AREA + stmg %r0,%r15,__LC_SAVE_AREA_ASYNC basr %r13,0 0: aghi %r15,-STACK_FRAME_OVERHEAD larl %r1,s390_base_ext_handler_fn @@ -41,7 +41,7 @@ ENTRY(s390_base_ext_handler) ltgr %r1,%r1 jz 1f basr %r14,%r1 -1: lmg %r0,%r15,__LC_SAVE_AREA +1: lmg %r0,%r15,__LC_SAVE_AREA_ASYNC ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit lpswe __LC_EXT_OLD_PSW @@ -53,7 +53,7 @@ s390_base_ext_handler_fn: .previous ENTRY(s390_base_pgm_handler) - stmg %r0,%r15,__LC_SAVE_AREA + stmg %r0,%r15,__LC_SAVE_AREA_SYNC basr %r13,0 0: aghi %r15,-STACK_FRAME_OVERHEAD larl %r1,s390_base_pgm_handler_fn @@ -61,7 +61,7 @@ ENTRY(s390_base_pgm_handler) ltgr %r1,%r1 jz 1f basr %r14,%r1 - lmg %r0,%r15,__LC_SAVE_AREA + lmg %r0,%r15,__LC_SAVE_AREA_SYNC lpswe __LC_PGM_OLD_PSW 1: lpswe disabled_wait_psw-0b(%r13) @@ -142,7 +142,7 @@ s390_base_mcck_handler_fn: .previous ENTRY(s390_base_ext_handler) - stm %r0,%r15,__LC_SAVE_AREA + stm %r0,%r15,__LC_SAVE_AREA_ASYNC basr %r13,0 0: ahi %r15,-STACK_FRAME_OVERHEAD l %r1,2f-0b(%r13) @@ -150,7 +150,7 @@ ENTRY(s390_base_ext_handler) ltr %r1,%r1 jz 1f basr %r14,%r1 -1: lm %r0,%r15,__LC_SAVE_AREA +1: lm %r0,%r15,__LC_SAVE_AREA_ASYNC ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit lpsw __LC_EXT_OLD_PSW @@ -164,7 +164,7 @@ s390_base_ext_handler_fn: .previous ENTRY(s390_base_pgm_handler) - stm %r0,%r15,__LC_SAVE_AREA + stm %r0,%r15,__LC_SAVE_AREA_SYNC basr %r13,0 0: ahi %r15,-STACK_FRAME_OVERHEAD l %r1,2f-0b(%r13) @@ -172,7 +172,7 @@ ENTRY(s390_base_pgm_handler) ltr %r1,%r1 jz 1f basr %r14,%r1 - lm %r0,%r15,__LC_SAVE_AREA + lm %r0,%r15,__LC_SAVE_AREA_SYNC lpsw __LC_PGM_OLD_PSW 1: lpsw disabled_wait_psw-0b(%r13) diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index 84a982898448..ab64bdbab2ae 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c @@ -278,9 +278,6 @@ asmlinkage long sys32_ipc(u32 call, int first, int second, int third, u32 ptr) { if (call >> 16) /* hack for backward compatibility */ return -EINVAL; - - call &= 0xffff; - switch (call) { case SEMTIMEDOP: return compat_sys_semtimedop(first, compat_ptr(ptr), diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index 4f68c81d3ffa..6fe78c2f95d9 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -501,8 +501,12 @@ static int setup_frame32(int sig, struct k_sigaction *ka, /* We forgot to include these in the sigcontext. To avoid breaking binary compatibility, they are passed as args. */ - regs->gprs[4] = current->thread.trap_no; - regs->gprs[5] = current->thread.prot_addr; + if (sig == SIGSEGV || sig == SIGBUS || sig == SIGILL || + sig == SIGTRAP || sig == SIGFPE) { + /* set extra registers only for synchronous signals */ + regs->gprs[4] = regs->int_code & 127; + regs->gprs[5] = regs->int_parm_long; + } /* Place signal number on stack to allow backtrace from handler. */ if (__put_user(regs->gprs[2], (int __force __user *) &frame->signo)) @@ -544,9 +548,9 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info, /* Set up to return from userspace. If provided, use a stub already in userspace. */ if (ka->sa.sa_flags & SA_RESTORER) { - regs->gprs[14] = (__u64) ka->sa.sa_restorer; + regs->gprs[14] = (__u64) ka->sa.sa_restorer | PSW32_ADDR_AMODE; } else { - regs->gprs[14] = (__u64) frame->retcode; + regs->gprs[14] = (__u64) frame->retcode | PSW32_ADDR_AMODE; err |= __put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn, (u16 __force __user *)(frame->retcode)); } diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index 45df6d456aa1..e2f847599c8e 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c @@ -1578,10 +1578,15 @@ void show_code(struct pt_regs *regs) ptr += sprintf(ptr, "%s Code:", mode); hops = 0; while (start < end && hops < 8) { - *ptr++ = (start == 32) ? '>' : ' '; + opsize = insn_length(code[start]); + if (start + opsize == 32) + *ptr++ = '#'; + else if (start == 32) + *ptr++ = '>'; + else + *ptr++ = ' '; addr = regs->psw.addr + start - 32; ptr += sprintf(ptr, ONELONG, addr); - opsize = insn_length(code[start]); if (start + opsize >= end) break; for (i = 0; i < opsize; i++) diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index c9ffe0025197..52098d6dfaa7 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c @@ -434,18 +434,22 @@ static void __init append_to_cmdline(size_t (*ipl_data)(char *, size_t)) } } -static void __init setup_boot_command_line(void) +static inline int has_ebcdic_char(const char *str) { int i; - /* convert arch command line to ascii */ - for (i = 0; i < ARCH_COMMAND_LINE_SIZE; i++) - if (COMMAND_LINE[i] & 0x80) - break; - if (i < ARCH_COMMAND_LINE_SIZE) - EBCASC(COMMAND_LINE, ARCH_COMMAND_LINE_SIZE); - COMMAND_LINE[ARCH_COMMAND_LINE_SIZE-1] = 0; + for (i = 0; str[i]; i++) + if (str[i] & 0x80) + return 1; + return 0; +} +static void __init setup_boot_command_line(void) +{ + COMMAND_LINE[ARCH_COMMAND_LINE_SIZE - 1] = 0; + /* convert arch command line to ascii if necessary */ + if (has_ebcdic_char(COMMAND_LINE)) + EBCASC(COMMAND_LINE, ARCH_COMMAND_LINE_SIZE); /* copy arch command line */ strlcpy(boot_command_line, strstrip(COMMAND_LINE), ARCH_COMMAND_LINE_SIZE); diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index b13157057e02..3705700ed374 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -19,32 +19,22 @@ #include <asm/unistd.h> #include <asm/page.h> -/* - * Stack layout for the system_call stack entry. - * The first few entries are identical to the user_regs_struct. - */ -SP_PTREGS = STACK_FRAME_OVERHEAD -SP_ARGS = STACK_FRAME_OVERHEAD + __PT_ARGS -SP_PSW = STACK_FRAME_OVERHEAD + __PT_PSW -SP_R0 = STACK_FRAME_OVERHEAD + __PT_GPRS -SP_R1 = STACK_FRAME_OVERHEAD + __PT_GPRS + 4 -SP_R2 = STACK_FRAME_OVERHEAD + __PT_GPRS + 8 -SP_R3 = STACK_FRAME_OVERHEAD + __PT_GPRS + 12 -SP_R4 = STACK_FRAME_OVERHEAD + __PT_GPRS + 16 -SP_R5 = STACK_FRAME_OVERHEAD + __PT_GPRS + 20 -SP_R6 = STACK_FRAME_OVERHEAD + __PT_GPRS + 24 -SP_R7 = STACK_FRAME_OVERHEAD + __PT_GPRS + 28 -SP_R8 = STACK_FRAME_OVERHEAD + __PT_GPRS + 32 -SP_R9 = STACK_FRAME_OVERHEAD + __PT_GPRS + 36 -SP_R10 = STACK_FRAME_OVERHEAD + __PT_GPRS + 40 -SP_R11 = STACK_FRAME_OVERHEAD + __PT_GPRS + 44 -SP_R12 = STACK_FRAME_OVERHEAD + __PT_GPRS + 48 -SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 52 -SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56 -SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 60 -SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2 -SP_SVC_CODE = STACK_FRAME_OVERHEAD + __PT_SVC_CODE -SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE +__PT_R0 = __PT_GPRS +__PT_R1 = __PT_GPRS + 4 +__PT_R2 = __PT_GPRS + 8 +__PT_R3 = __PT_GPRS + 12 +__PT_R4 = __PT_GPRS + 16 +__PT_R5 = __PT_GPRS + 20 +__PT_R6 = __PT_GPRS + 24 +__PT_R7 = __PT_GPRS + 28 +__PT_R8 = __PT_GPRS + 32 +__PT_R9 = __PT_GPRS + 36 +__PT_R10 = __PT_GPRS + 40 +__PT_R11 = __PT_GPRS + 44 +__PT_R12 = __PT_GPRS + 48 +__PT_R13 = __PT_GPRS + 524 +__PT_R14 = __PT_GPRS + 56 +__PT_R15 = __PT_GPRS + 60 _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ _TIF_MCCK_PENDING | _TIF_PER_TRAP ) @@ -58,133 +48,91 @@ STACK_SIZE = 1 << STACK_SHIFT #define BASED(name) name-system_call(%r13) -#ifdef CONFIG_TRACE_IRQFLAGS .macro TRACE_IRQS_ON +#ifdef CONFIG_TRACE_IRQFLAGS basr %r2,%r0 - l %r1,BASED(.Ltrace_irq_on_caller) - basr %r14,%r1 + l %r1,BASED(.Lhardirqs_on) + basr %r14,%r1 # call trace_hardirqs_on_caller +#endif .endm .macro TRACE_IRQS_OFF +#ifdef CONFIG_TRACE_IRQFLAGS basr %r2,%r0 - l %r1,BASED(.Ltrace_irq_off_caller) - basr %r14,%r1 - .endm -#else -#define TRACE_IRQS_ON -#define TRACE_IRQS_OFF + l %r1,BASED(.Lhardirqs_off) + basr %r14,%r1 # call trace_hardirqs_off_caller #endif + .endm -#ifdef CONFIG_LOCKDEP .macro LOCKDEP_SYS_EXIT - tm SP_PSW+1(%r15),0x01 # returning to user ? - jz 0f +#ifdef CONFIG_LOCKDEP + tm __PT_PSW+1(%r11),0x01 # returning to user ? + jz .+10 l %r1,BASED(.Llockdep_sys_exit) - basr %r14,%r1 -0: - .endm -#else -#define LOCKDEP_SYS_EXIT + basr %r14,%r1 # call lockdep_sys_exit #endif - -/* - * Register usage in interrupt handlers: - * R9 - pointer to current task structure - * R13 - pointer to literal pool - * R14 - return register for function calls - * R15 - kernel stack pointer - */ - - .macro UPDATE_VTIME lc_from,lc_to,lc_sum - lm %r10,%r11,\lc_from - sl %r10,\lc_to - sl %r11,\lc_to+4 - bc 3,BASED(0f) - sl %r10,BASED(.Lc_1) -0: al %r10,\lc_sum - al %r11,\lc_sum+4 - bc 12,BASED(1f) - al %r10,BASED(.Lc_1) -1: stm %r10,%r11,\lc_sum - .endm - - .macro SAVE_ALL_SVC psworg,savearea - stm %r12,%r15,\savearea - l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13 - l %r15,__LC_KERNEL_STACK # problem state -> load ksp - s %r15,BASED(.Lc_spsize) # make room for registers & psw - .endm - - .macro SAVE_ALL_BASE savearea - stm %r12,%r15,\savearea - l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13 .endm - .macro SAVE_ALL_PGM psworg,savearea - tm \psworg+1,0x01 # test problem state bit + .macro CHECK_STACK stacksize,savearea #ifdef CONFIG_CHECK_STACK - bnz BASED(1f) - tml %r15,STACK_SIZE - CONFIG_STACK_GUARD - bnz BASED(2f) - la %r12,\psworg - b BASED(stack_overflow) -#else - bz BASED(2f) + tml %r15,\stacksize - CONFIG_STACK_GUARD + la %r14,\savearea + jz stack_overflow #endif -1: l %r15,__LC_KERNEL_STACK # problem state -> load ksp -2: s %r15,BASED(.Lc_spsize) # make room for registers & psw .endm - .macro SAVE_ALL_ASYNC psworg,savearea - stm %r12,%r15,\savearea - l %r13,__LC_SVC_NEW_PSW+4 # load &system_call to %r13 - la %r12,\psworg - tm \psworg+1,0x01 # test problem state bit - bnz BASED(1f) # from user -> load async stack - clc \psworg+4(4),BASED(.Lcritical_end) - bhe BASED(0f) - clc \psworg+4(4),BASED(.Lcritical_start) - bl BASED(0f) - l %r14,BASED(.Lcleanup_critical) - basr %r14,%r14 - tm 1(%r12),0x01 # retest problem state after cleanup - bnz BASED(1f) -0: l %r14,__LC_ASYNC_STACK # are we already on the async stack ? + .macro SWITCH_ASYNC savearea,stack,shift + tmh %r8,0x0001 # interrupting from user ? + jnz 1f + lr %r14,%r9 + sl %r14,BASED(.Lcritical_start) + cl %r14,BASED(.Lcritical_length) + jhe 0f + la %r11,\savearea # inside critical section, do cleanup + bras %r14,cleanup_critical + tmh %r8,0x0001 # retest problem state after cleanup + jnz 1f +0: l %r14,\stack # are we already on the target stack? slr %r14,%r15 - sra %r14,STACK_SHIFT -#ifdef CONFIG_CHECK_STACK - bnz BASED(1f) - tml %r15,STACK_SIZE - CONFIG_STACK_GUARD - bnz BASED(2f) - b BASED(stack_overflow) -#else - bz BASED(2f) -#endif -1: l %r15,__LC_ASYNC_STACK -2: s %r15,BASED(.Lc_spsize) # make room for registers & psw + sra %r14,\shift + jnz 1f + CHECK_STACK 1<<\shift,\savearea + j 2f +1: l %r15,\stack # load target stack +2: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) + la %r11,STACK_FRAME_OVERHEAD(%r15) .endm - .macro CREATE_STACK_FRAME savearea - xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) - st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2 - mvc SP_R12(16,%r15),\savearea # move %r12-%r15 to stack - stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack + .macro ADD64 high,low,timer + al \high,\timer + al \low,\timer+4 + brc 12,.+8 + ahi \high,1 .endm - .macro RESTORE_ALL psworg,sync - mvc \psworg(8),SP_PSW(%r15) # move user PSW to lowcore - .if !\sync - ni \psworg+1,0xfd # clear wait state bit - .endif - lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 of user - stpt __LC_EXIT_TIMER - lpsw \psworg # back to caller + .macro SUB64 high,low,timer + sl \high,\timer + sl \low,\timer+4 + brc 3,.+8 + ahi \high,-1 + .endm + + .macro UPDATE_VTIME high,low,enter_timer + lm \high,\low,__LC_EXIT_TIMER + SUB64 \high,\low,\enter_timer + ADD64 \high,\low,__LC_USER_TIMER + stm \high,\low,__LC_USER_TIMER + lm \high,\low,__LC_LAST_UPDATE_TIMER + SUB64 \high,\low,__LC_EXIT_TIMER + ADD64 \high,\low,__LC_SYSTEM_TIMER + stm \high,\low,__LC_SYSTEM_TIMER + mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer .endm .macro REENABLE_IRQS - mvc __SF_EMPTY(1,%r15),SP_PSW(%r15) - ni __SF_EMPTY(%r15),0xbf - ssm __SF_EMPTY(%r15) + st %r8,__LC_RETURN_PSW + ni __LC_RETURN_PSW,0xbf + ssm __LC_RETURN_PSW .endm .section .kprobes.text, "ax" @@ -197,14 +145,13 @@ STACK_SIZE = 1 << STACK_SHIFT * gpr2 = prev */ ENTRY(__switch_to) - basr %r1,0 -0: l %r4,__THREAD_info(%r2) # get thread_info of prev + l %r4,__THREAD_info(%r2) # get thread_info of prev l %r5,__THREAD_info(%r3) # get thread_info of next tm __TI_flags+3(%r4),_TIF_MCCK_PENDING # machine check pending? - bz 1f-0b(%r1) + jz 0f ni __TI_flags+3(%r4),255-_TIF_MCCK_PENDING # clear flag in prev oi __TI_flags+3(%r5),_TIF_MCCK_PENDING # set it in next -1: stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task +0: stm %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task st %r15,__THREAD_ksp(%r2) # store kernel stack of prev l %r15,__THREAD_ksp(%r3) # load kernel stack of next lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 @@ -224,48 +171,55 @@ __critical_start: ENTRY(system_call) stpt __LC_SYNC_ENTER_TIMER -sysc_saveall: - SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA - CREATE_STACK_FRAME __LC_SAVE_AREA - l %r12,__LC_THREAD_INFO # load pointer to thread_info struct - mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW - mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC - oi __TI_flags+3(%r12),_TIF_SYSCALL +sysc_stm: + stm %r8,%r15,__LC_SAVE_AREA_SYNC + l %r12,__LC_THREAD_INFO + l %r13,__LC_SVC_NEW_PSW+4 +sysc_per: + l %r15,__LC_KERNEL_STACK + ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) + la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs sysc_vtime: - UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER -sysc_stime: - UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER -sysc_update: - mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER + UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER + stm %r0,%r7,__PT_R0(%r11) + mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC + mvc __PT_PSW(8,%r11),__LC_SVC_OLD_PSW + mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC sysc_do_svc: - xr %r7,%r7 - icm %r7,3,SP_SVC_CODE+2(%r15)# load svc number and test for svc 0 - bnz BASED(sysc_nr_ok) # svc number > 0 + oi __TI_flags+3(%r12),_TIF_SYSCALL + lh %r8,__PT_INT_CODE+2(%r11) + sla %r8,2 # shift and test for svc0 + jnz sysc_nr_ok # svc 0: system call number in %r1 cl %r1,BASED(.Lnr_syscalls) - bnl BASED(sysc_nr_ok) - sth %r1,SP_SVC_CODE+2(%r15) - lr %r7,%r1 # copy svc number to %r7 + jnl sysc_nr_ok + sth %r1,__PT_INT_CODE+2(%r11) + lr %r8,%r1 + sla %r8,2 sysc_nr_ok: - sll %r7,2 # svc number *4 - l %r10,BASED(.Lsysc_table) + l %r10,BASED(.Lsys_call_table) # 31 bit system call table + xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) + st %r2,__PT_ORIG_GPR2(%r11) + st %r7,STACK_FRAME_OVERHEAD(%r15) + l %r9,0(%r8,%r10) # get system call addr. tm __TI_flags+2(%r12),_TIF_TRACE >> 8 - mvc SP_ARGS(4,%r15),SP_R7(%r15) - l %r8,0(%r7,%r10) # get system call addr. - bnz BASED(sysc_tracesys) - basr %r14,%r8 # call sys_xxxx - st %r2,SP_R2(%r15) # store return value (change R2 on stack) + jnz sysc_tracesys + basr %r14,%r9 # call sys_xxxx + st %r2,__PT_R2(%r11) # store return value sysc_return: LOCKDEP_SYS_EXIT sysc_tif: - tm SP_PSW+1(%r15),0x01 # returning to user ? - bno BASED(sysc_restore) + tm __PT_PSW+1(%r11),0x01 # returning to user ? + jno sysc_restore tm __TI_flags+3(%r12),_TIF_WORK_SVC - bnz BASED(sysc_work) # there is work to do (signals etc.) + jnz sysc_work # check for work ni __TI_flags+3(%r12),255-_TIF_SYSCALL sysc_restore: - RESTORE_ALL __LC_RETURN_PSW,1 + mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) + stpt __LC_EXIT_TIMER + lm %r0,%r15,__PT_R0(%r11) + lpsw __LC_RETURN_PSW sysc_done: # @@ -273,16 +227,16 @@ sysc_done: # sysc_work: tm __TI_flags+3(%r12),_TIF_MCCK_PENDING - bo BASED(sysc_mcck_pending) + jo sysc_mcck_pending tm __TI_flags+3(%r12),_TIF_NEED_RESCHED - bo BASED(sysc_reschedule) + jo sysc_reschedule tm __TI_flags+3(%r12),_TIF_SIGPENDING - bo BASED(sysc_sigpending) + jo sysc_sigpending tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME - bo BASED(sysc_notify_resume) + jo sysc_notify_resume tm __TI_flags+3(%r12),_TIF_PER_TRAP - bo BASED(sysc_singlestep) - b BASED(sysc_return) # beware of critical section cleanup + jo sysc_singlestep + j sysc_return # beware of critical section cleanup # # _TIF_NEED_RESCHED is set, call schedule @@ -290,13 +244,13 @@ sysc_work: sysc_reschedule: l %r1,BASED(.Lschedule) la %r14,BASED(sysc_return) - br %r1 # call scheduler + br %r1 # call schedule # # _TIF_MCCK_PENDING is set, call handler # sysc_mcck_pending: - l %r1,BASED(.Ls390_handle_mcck) + l %r1,BASED(.Lhandle_mcck) la %r14,BASED(sysc_return) br %r1 # TIF bit will be cleared by handler @@ -305,23 +259,24 @@ sysc_mcck_pending: # sysc_sigpending: ni __TI_flags+3(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP - la %r2,SP_PTREGS(%r15) # load pt_regs + lr %r2,%r11 # pass pointer to pt_regs l %r1,BASED(.Ldo_signal) basr %r14,%r1 # call do_signal tm __TI_flags+3(%r12),_TIF_SYSCALL - bno BASED(sysc_return) - lm %r2,%r6,SP_R2(%r15) # load svc arguments - xr %r7,%r7 # svc 0 returns -ENOSYS - clc SP_SVC_CODE+2(2,%r15),BASED(.Lnr_syscalls+2) - bnl BASED(sysc_nr_ok) # invalid svc number -> do svc 0 - icm %r7,3,SP_SVC_CODE+2(%r15)# load new svc number - b BASED(sysc_nr_ok) # restart svc + jno sysc_return + lm %r2,%r7,__PT_R2(%r11) # load svc arguments + xr %r8,%r8 # svc 0 returns -ENOSYS + clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2) + jnl sysc_nr_ok # invalid svc number -> do svc 0 + lh %r8,__PT_INT_CODE+2(%r11) # load new svc number + sla %r8,2 + j sysc_nr_ok # restart svc # # _TIF_NOTIFY_RESUME is set, call do_notify_resume # sysc_notify_resume: - la %r2,SP_PTREGS(%r15) # load pt_regs + lr %r2,%r11 # pass pointer to pt_regs l %r1,BASED(.Ldo_notify_resume) la %r14,BASED(sysc_return) br %r1 # call do_notify_resume @@ -331,56 +286,57 @@ sysc_notify_resume: # sysc_singlestep: ni __TI_flags+3(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP) - la %r2,SP_PTREGS(%r15) # address of register-save area - l %r1,BASED(.Lhandle_per) # load adr. of per handler - la %r14,BASED(sysc_return) # load adr. of system return - br %r1 # branch to do_per_trap + lr %r2,%r11 # pass pointer to pt_regs + l %r1,BASED(.Ldo_per_trap) + la %r14,BASED(sysc_return) + br %r1 # call do_per_trap # # call tracehook_report_syscall_entry/tracehook_report_syscall_exit before # and after the system call # sysc_tracesys: - l %r1,BASED(.Ltrace_entry) - la %r2,SP_PTREGS(%r15) # load pt_regs + l %r1,BASED(.Ltrace_enter) + lr %r2,%r11 # pass pointer to pt_regs la %r3,0 xr %r0,%r0 - icm %r0,3,SP_SVC_CODE(%r15) - st %r0,SP_R2(%r15) - basr %r14,%r1 + icm %r0,3,__PT_INT_CODE+2(%r11) + st %r0,__PT_R2(%r11) + basr %r14,%r1 # call do_syscall_trace_enter cl %r2,BASED(.Lnr_syscalls) - bnl BASED(sysc_tracenogo) - lr %r7,%r2 - sll %r7,2 # svc number *4 - l %r8,0(%r7,%r10) + jnl sysc_tracenogo + lr %r8,%r2 + sll %r8,2 + l %r9,0(%r8,%r10) sysc_tracego: - lm %r3,%r6,SP_R3(%r15) - mvc SP_ARGS(4,%r15),SP_R7(%r15) - l %r2,SP_ORIG_R2(%r15) - basr %r14,%r8 # call sys_xxx - st %r2,SP_R2(%r15) # store return value + lm %r3,%r7,__PT_R3(%r11) + st %r7,STACK_FRAME_OVERHEAD(%r15) + l %r2,__PT_ORIG_GPR2(%r11) + basr %r14,%r9 # call sys_xxx + st %r2,__PT_R2(%r11) # store return value sysc_tracenogo: tm __TI_flags+2(%r12),_TIF_TRACE >> 8 - bz BASED(sysc_return) + jz sysc_return l %r1,BASED(.Ltrace_exit) - la %r2,SP_PTREGS(%r15) # load pt_regs + lr %r2,%r11 # pass pointer to pt_regs la %r14,BASED(sysc_return) - br %r1 + br %r1 # call do_syscall_trace_exit # # a new process exits the kernel with ret_from_fork # ENTRY(ret_from_fork) + la %r11,STACK_FRAME_OVERHEAD(%r15) + l %r12,__LC_THREAD_INFO l %r13,__LC_SVC_NEW_PSW+4 - l %r12,__LC_THREAD_INFO # load pointer to thread_info struct - tm SP_PSW+1(%r15),0x01 # forking a kernel thread ? - bo BASED(0f) - st %r15,SP_R15(%r15) # store stack pointer for new kthread -0: l %r1,BASED(.Lschedtail) - basr %r14,%r1 + tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? + jo 0f + st %r15,__PT_R15(%r11) # store stack pointer for new kthread +0: l %r1,BASED(.Lschedule_tail) + basr %r14,%r1 # call schedule_tail TRACE_IRQS_ON - stosm __SF_EMPTY(%r15),0x03 # reenable interrupts - b BASED(sysc_tracenogo) + ssm __LC_SVC_NEW_PSW # reenable interrupts + j sysc_tracenogo # # kernel_execve function needs to deal with pt_regs that is not @@ -390,153 +346,98 @@ ENTRY(kernel_execve) stm %r12,%r15,48(%r15) lr %r14,%r15 l %r13,__LC_SVC_NEW_PSW+4 - s %r15,BASED(.Lc_spsize) + ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) st %r14,__SF_BACKCHAIN(%r15) - la %r12,SP_PTREGS(%r15) + la %r12,STACK_FRAME_OVERHEAD(%r15) xc 0(__PT_SIZE,%r12),0(%r12) l %r1,BASED(.Ldo_execve) lr %r5,%r12 - basr %r14,%r1 + basr %r14,%r1 # call do_execve ltr %r2,%r2 - be BASED(0f) - a %r15,BASED(.Lc_spsize) + je 0f + ahi %r15,(STACK_FRAME_OVERHEAD + __PT_SIZE) lm %r12,%r15,48(%r15) br %r14 # execve succeeded. -0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts +0: ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts l %r15,__LC_KERNEL_STACK # load ksp - s %r15,BASED(.Lc_spsize) # make room for registers & psw - mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs + ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) + la %r11,STACK_FRAME_OVERHEAD(%r15) + mvc 0(__PT_SIZE,%r11),0(%r12) # copy pt_regs l %r12,__LC_THREAD_INFO xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) - stosm __SF_EMPTY(%r15),0x03 # reenable interrupts + ssm __LC_SVC_NEW_PSW # reenable interrupts l %r1,BASED(.Lexecve_tail) - basr %r14,%r1 - b BASED(sysc_return) + basr %r14,%r1 # call execve_tail + j sysc_return /* * Program check handler routine */ ENTRY(pgm_check_handler) -/* - * First we need to check for a special case: - * Single stepping an instruction that disables the PER event mask will - * cause a PER event AFTER the mask has been set. Example: SVC or LPSW. - * For a single stepped SVC the program check handler gets control after - * the SVC new PSW has been loaded. But we want to execute the SVC first and - * then handle the PER event. Therefore we update the SVC old PSW to point - * to the pgm_check_handler and branch to the SVC handler after we checked - * if we have to load the kernel stack register. - * For every other possible cause for PER event without the PER mask set - * we just ignore the PER event (FIXME: is there anything we have to do - * for LPSW?). - */ stpt __LC_SYNC_ENTER_TIMER - SAVE_ALL_BASE __LC_SAVE_AREA - tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception - bnz BASED(pgm_per) # got per exception -> special case - SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA - CREATE_STACK_FRAME __LC_SAVE_AREA - mvc SP_PSW(8,%r15),__LC_PGM_OLD_PSW - l %r12,__LC_THREAD_INFO # load pointer to thread_info struct - tm SP_PSW+1(%r15),0x01 # interrupting from user ? - bz BASED(pgm_no_vtime) - UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER - UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER - mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER -pgm_no_vtime: - l %r3,__LC_PGM_ILC # load program interruption code - l %r4,__LC_TRANS_EXC_CODE - REENABLE_IRQS - la %r8,0x7f - nr %r8,%r3 - sll %r8,2 - l %r1,BASED(.Ljump_table) - l %r1,0(%r8,%r1) # load address of handler routine - la %r2,SP_PTREGS(%r15) # address of register-save area - basr %r14,%r1 # branch to interrupt-handler -pgm_exit: - b BASED(sysc_return) - -# -# handle per exception -# -pgm_per: - tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on - bnz BASED(pgm_per_std) # ok, normal per event from user space -# ok its one of the special cases, now we need to find out which one - clc __LC_PGM_OLD_PSW(8),__LC_SVC_NEW_PSW - be BASED(pgm_svcper) -# no interesting special case, ignore PER event - lm %r12,%r15,__LC_SAVE_AREA - lpsw 0x28 - -# -# Normal per exception -# -pgm_per_std: - SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA - CREATE_STACK_FRAME __LC_SAVE_AREA - mvc SP_PSW(8,%r15),__LC_PGM_OLD_PSW - l %r12,__LC_THREAD_INFO # load pointer to thread_info struct - tm SP_PSW+1(%r15),0x01 # interrupting from user ? - bz BASED(pgm_no_vtime2) - UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER - UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER - mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER -pgm_no_vtime2: + stm %r8,%r15,__LC_SAVE_AREA_SYNC + l %r12,__LC_THREAD_INFO + l %r13,__LC_SVC_NEW_PSW+4 + lm %r8,%r9,__LC_PGM_OLD_PSW + tmh %r8,0x0001 # test problem state bit + jnz 1f # -> fault in user space + tmh %r8,0x4000 # PER bit set in old PSW ? + jnz 0f # -> enabled, can't be a double fault + tm __LC_PGM_ILC+3,0x80 # check for per exception + jnz pgm_svcper # -> single stepped svc +0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC + j 2f +1: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER + l %r15,__LC_KERNEL_STACK +2: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) + la %r11,STACK_FRAME_OVERHEAD(%r15) + stm %r0,%r7,__PT_R0(%r11) + mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC + stm %r8,%r9,__PT_PSW(%r11) + mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC + mvc __PT_INT_PARM_LONG(4,%r11),__LC_TRANS_EXC_CODE + tm __LC_PGM_ILC+3,0x80 # check for per exception + jz 0f l %r1,__TI_task(%r12) - tm SP_PSW+1(%r15),0x01 # kernel per event ? - bz BASED(kernel_per) - mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE + tmh %r8,0x0001 # kernel per event ? + jz pgm_kprobe + oi __TI_flags+3(%r12),_TIF_PER_TRAP mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS + mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID - oi __TI_flags+3(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP - l %r3,__LC_PGM_ILC # load program interruption code - l %r4,__LC_TRANS_EXC_CODE - REENABLE_IRQS - la %r8,0x7f - nr %r8,%r3 # clear per-event-bit and ilc - be BASED(pgm_exit2) # only per or per+check ? - sll %r8,2 +0: REENABLE_IRQS + xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) l %r1,BASED(.Ljump_table) - l %r1,0(%r8,%r1) # load address of handler routine - la %r2,SP_PTREGS(%r15) # address of register-save area + la %r10,0x7f + n %r10,__PT_INT_CODE(%r11) + je sysc_return + sll %r10,2 + l %r1,0(%r10,%r1) # load address of handler routine + lr %r2,%r11 # pass pointer to pt_regs basr %r14,%r1 # branch to interrupt-handler -pgm_exit2: - b BASED(sysc_return) + j sysc_return # -# it was a single stepped SVC that is causing all the trouble +# PER event in supervisor state, must be kprobes # -pgm_svcper: - SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA - CREATE_STACK_FRAME __LC_SAVE_AREA - l %r12,__LC_THREAD_INFO # load pointer to thread_info struct - mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW - mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC - oi __TI_flags+3(%r12),(_TIF_SYSCALL | _TIF_PER_TRAP) - UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER - UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER - mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER - l %r8,__TI_task(%r12) - mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE - mvc __THREAD_per_address(4,%r8),__LC_PER_ADDRESS - mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID - stosm __SF_EMPTY(%r15),0x03 # reenable interrupts - lm %r2,%r6,SP_R2(%r15) # load svc arguments - b BASED(sysc_do_svc) +pgm_kprobe: + REENABLE_IRQS + xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) + l %r1,BASED(.Ldo_per_trap) + lr %r2,%r11 # pass pointer to pt_regs + basr %r14,%r1 # call do_per_trap + j sysc_return # -# per was called from kernel, must be kprobes +# single stepped system call # -kernel_per: - REENABLE_IRQS - la %r2,SP_PTREGS(%r15) # address of register-save area - l %r1,BASED(.Lhandle_per) # load adr. of per handler - basr %r14,%r1 # branch to do_single_step - b BASED(pgm_exit) +pgm_svcper: + oi __TI_flags+3(%r12),_TIF_PER_TRAP + mvc __LC_RETURN_PSW(4),__LC_SVC_NEW_PSW + mvc __LC_RETURN_PSW+4(4),BASED(.Lsysc_per) + lpsw __LC_RETURN_PSW # branch to sysc_per and enable irqs /* * IO interrupt handler routine @@ -545,28 +446,35 @@ kernel_per: ENTRY(io_int_handler) stck __LC_INT_CLOCK stpt __LC_ASYNC_ENTER_TIMER - SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+16 - CREATE_STACK_FRAME __LC_SAVE_AREA+16 - mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack - l %r12,__LC_THREAD_INFO # load pointer to thread_info struct - tm SP_PSW+1(%r15),0x01 # interrupting from user ? - bz BASED(io_no_vtime) - UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER - UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER - mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER -io_no_vtime: + stm %r8,%r15,__LC_SAVE_AREA_ASYNC + l %r12,__LC_THREAD_INFO + l %r13,__LC_SVC_NEW_PSW+4 + lm %r8,%r9,__LC_IO_OLD_PSW + tmh %r8,0x0001 # interrupting from user ? + jz io_skip + UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER +io_skip: + SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT + stm %r0,%r7,__PT_R0(%r11) + mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC + stm %r8,%r9,__PT_PSW(%r11) TRACE_IRQS_OFF - l %r1,BASED(.Ldo_IRQ) # load address of do_IRQ - la %r2,SP_PTREGS(%r15) # address of register-save area - basr %r14,%r1 # branch to standard irq handler + xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) + l %r1,BASED(.Ldo_IRQ) + lr %r2,%r11 # pass pointer to pt_regs + basr %r14,%r1 # call do_IRQ io_return: LOCKDEP_SYS_EXIT TRACE_IRQS_ON io_tif: tm __TI_flags+3(%r12),_TIF_WORK_INT - bnz BASED(io_work) # there is work to do (signals etc.) + jnz io_work # there is work to do (signals etc.) io_restore: - RESTORE_ALL __LC_RETURN_PSW,0 + mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) + ni __LC_RETURN_PSW+1,0xfd # clean wait state bit + stpt __LC_EXIT_TIMER + lm %r0,%r15,__PT_R0(%r11) + lpsw __LC_RETURN_PSW io_done: # @@ -577,28 +485,29 @@ io_done: # Before any work can be done, a switch to the kernel stack is required. # io_work: - tm SP_PSW+1(%r15),0x01 # returning to user ? - bo BASED(io_work_user) # yes -> do resched & signal + tm __PT_PSW+1(%r11),0x01 # returning to user ? + jo io_work_user # yes -> do resched & signal #ifdef CONFIG_PREEMPT # check for preemptive scheduling icm %r0,15,__TI_precount(%r12) - bnz BASED(io_restore) # preemption disabled + jnz io_restore # preemption disabled tm __TI_flags+3(%r12),_TIF_NEED_RESCHED - bno BASED(io_restore) + jno io_restore # switch to kernel stack - l %r1,SP_R15(%r15) - s %r1,BASED(.Lc_spsize) - mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) - xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain + l %r1,__PT_R15(%r11) + ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) + mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) + xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) + la %r11,STACK_FRAME_OVERHEAD(%r1) lr %r15,%r1 # TRACE_IRQS_ON already done at io_return, call # TRACE_IRQS_OFF to keep things symmetrical TRACE_IRQS_OFF - l %r1,BASED(.Lpreempt_schedule_irq) + l %r1,BASED(.Lpreempt_irq) basr %r14,%r1 # call preempt_schedule_irq - b BASED(io_return) + j io_return #else - b BASED(io_restore) + j io_restore #endif # @@ -606,9 +515,10 @@ io_work: # io_work_user: l %r1,__LC_KERNEL_STACK - s %r1,BASED(.Lc_spsize) - mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) - xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain + ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) + mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) + xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) + la %r11,STACK_FRAME_OVERHEAD(%r1) lr %r15,%r1 # @@ -618,24 +528,24 @@ io_work_user: # io_work_tif: tm __TI_flags+3(%r12),_TIF_MCCK_PENDING - bo BASED(io_mcck_pending) + jo io_mcck_pending tm __TI_flags+3(%r12),_TIF_NEED_RESCHED - bo BASED(io_reschedule) + jo io_reschedule tm __TI_flags+3(%r12),_TIF_SIGPENDING - bo BASED(io_sigpending) + jo io_sigpending tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME - bo BASED(io_notify_resume) - b BASED(io_return) # beware of critical section cleanup + jo io_notify_resume + j io_return # beware of critical section cleanup # # _TIF_MCCK_PENDING is set, call handler # io_mcck_pending: # TRACE_IRQS_ON already done at io_return - l %r1,BASED(.Ls390_handle_mcck) + l %r1,BASED(.Lhandle_mcck) basr %r14,%r1 # TIF bit will be cleared by handler TRACE_IRQS_OFF - b BASED(io_return) + j io_return # # _TIF_NEED_RESCHED is set, call schedule @@ -643,37 +553,37 @@ io_mcck_pending: io_reschedule: # TRACE_IRQS_ON already done at io_return l %r1,BASED(.Lschedule) - stosm __SF_EMPTY(%r15),0x03 # reenable interrupts + ssm __LC_SVC_NEW_PSW # reenable interrupts basr %r14,%r1 # call scheduler - stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts + ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts TRACE_IRQS_OFF - b BASED(io_return) + j io_return # # _TIF_SIGPENDING is set, call do_signal # io_sigpending: # TRACE_IRQS_ON already done at io_return - stosm __SF_EMPTY(%r15),0x03 # reenable interrupts - la %r2,SP_PTREGS(%r15) # load pt_regs l %r1,BASED(.Ldo_signal) + ssm __LC_SVC_NEW_PSW # reenable interrupts + lr %r2,%r11 # pass pointer to pt_regs basr %r14,%r1 # call do_signal - stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts + ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts TRACE_IRQS_OFF - b BASED(io_return) + j io_return # # _TIF_SIGPENDING is set, call do_signal # io_notify_resume: # TRACE_IRQS_ON already done at io_return - stosm __SF_EMPTY(%r15),0x03 # reenable interrupts - la %r2,SP_PTREGS(%r15) # load pt_regs l %r1,BASED(.Ldo_notify_resume) - basr %r14,%r1 # call do_signal - stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts + ssm __LC_SVC_NEW_PSW # reenable interrupts + lr %r2,%r11 # pass pointer to pt_regs + basr %r14,%r1 # call do_notify_resume + ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts TRACE_IRQS_OFF - b BASED(io_return) + j io_return /* * External interrupt handler routine @@ -682,23 +592,25 @@ io_notify_resume: ENTRY(ext_int_handler) stck __LC_INT_CLOCK stpt __LC_ASYNC_ENTER_TIMER - SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+16 - CREATE_STACK_FRAME __LC_SAVE_AREA+16 - mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack - l %r12,__LC_THREAD_INFO # load pointer to thread_info struct - tm SP_PSW+1(%r15),0x01 # interrupting from user ? - bz BASED(ext_no_vtime) - UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER - UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER - mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER -ext_no_vtime: + stm %r8,%r15,__LC_SAVE_AREA_ASYNC + l %r12,__LC_THREAD_INFO + l %r13,__LC_SVC_NEW_PSW+4 + lm %r8,%r9,__LC_EXT_OLD_PSW + tmh %r8,0x0001 # interrupting from user ? + jz ext_skip + UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER +ext_skip: + SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT + stm %r0,%r7,__PT_R0(%r11) + mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC + stm %r8,%r9,__PT_PSW(%r11) TRACE_IRQS_OFF - la %r2,SP_PTREGS(%r15) # address of register-save area + lr %r2,%r11 # pass pointer to pt_regs l %r3,__LC_CPU_ADDRESS # get cpu address + interruption code l %r4,__LC_EXT_PARAMS # get external parameters l %r1,BASED(.Ldo_extint) - basr %r14,%r1 - b BASED(io_return) + basr %r14,%r1 # call do_extint + j io_return __critical_end: @@ -710,82 +622,74 @@ ENTRY(mcck_int_handler) stck __LC_MCCK_CLOCK spt __LC_CPU_TIMER_SAVE_AREA # revalidate cpu timer lm %r0,%r15,__LC_GPREGS_SAVE_AREA # revalidate gprs - SAVE_ALL_BASE __LC_SAVE_AREA+32 - la %r12,__LC_MCK_OLD_PSW + l %r12,__LC_THREAD_INFO + l %r13,__LC_SVC_NEW_PSW+4 + lm %r8,%r9,__LC_MCK_OLD_PSW tm __LC_MCCK_CODE,0x80 # system damage? - bo BASED(mcck_int_main) # yes -> rest of mcck code invalid - mvc __LC_MCCK_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA + jo mcck_panic # yes -> rest of mcck code invalid + la %r14,__LC_CPU_TIMER_SAVE_AREA + mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? - bo BASED(1f) + jo 3f la %r14,__LC_SYNC_ENTER_TIMER clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER - bl BASED(0f) + jl 0f la %r14,__LC_ASYNC_ENTER_TIMER 0: clc 0(8,%r14),__LC_EXIT_TIMER - bl BASED(0f) + jl 1f la %r14,__LC_EXIT_TIMER -0: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER - bl BASED(0f) +1: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER + jl 2f la %r14,__LC_LAST_UPDATE_TIMER -0: spt 0(%r14) +2: spt 0(%r14) mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) -1: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? - bno BASED(mcck_int_main) # no -> skip cleanup critical - tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit - bnz BASED(mcck_int_main) # from user -> load async stack - clc __LC_MCK_OLD_PSW+4(4),BASED(.Lcritical_end) - bhe BASED(mcck_int_main) - clc __LC_MCK_OLD_PSW+4(4),BASED(.Lcritical_start) - bl BASED(mcck_int_main) - l %r14,BASED(.Lcleanup_critical) - basr %r14,%r14 -mcck_int_main: - l %r14,__LC_PANIC_STACK # are we already on the panic stack? - slr %r14,%r15 - sra %r14,PAGE_SHIFT - be BASED(0f) - l %r15,__LC_PANIC_STACK # load panic stack -0: s %r15,BASED(.Lc_spsize) # make room for registers & psw - CREATE_STACK_FRAME __LC_SAVE_AREA+32 - mvc SP_PSW(8,%r15),0(%r12) - l %r12,__LC_THREAD_INFO # load pointer to thread_info struct - tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? - bno BASED(mcck_no_vtime) # no -> skip cleanup critical - tm SP_PSW+1(%r15),0x01 # interrupting from user ? - bz BASED(mcck_no_vtime) - UPDATE_VTIME __LC_EXIT_TIMER,__LC_MCCK_ENTER_TIMER,__LC_USER_TIMER - UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER - mvc __LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER -mcck_no_vtime: - la %r2,SP_PTREGS(%r15) # load pt_regs - l %r1,BASED(.Ls390_mcck) - basr %r14,%r1 # call machine check handler - tm SP_PSW+1(%r15),0x01 # returning to user ? - bno BASED(mcck_return) +3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? + jno mcck_panic # no -> skip cleanup critical + tm %r8,0x0001 # interrupting from user ? + jz mcck_skip + UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER +mcck_skip: + SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT + mvc __PT_R0(64,%r11),__LC_GPREGS_SAVE_AREA + stm %r8,%r9,__PT_PSW(%r11) + xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) + l %r1,BASED(.Ldo_machine_check) + lr %r2,%r11 # pass pointer to pt_regs + basr %r14,%r1 # call s390_do_machine_check + tm __PT_PSW+1(%r11),0x01 # returning to user ? + jno mcck_return l %r1,__LC_KERNEL_STACK # switch to kernel stack - s %r1,BASED(.Lc_spsize) - mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) - xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) # clear back chain + ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) + mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) + xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) + la %r11,STACK_FRAME_OVERHEAD(%r15) lr %r15,%r1 - stosm __SF_EMPTY(%r15),0x04 # turn dat on + ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off tm __TI_flags+3(%r12),_TIF_MCCK_PENDING - bno BASED(mcck_return) + jno mcck_return TRACE_IRQS_OFF - l %r1,BASED(.Ls390_handle_mcck) - basr %r14,%r1 # call machine check handler + l %r1,BASED(.Lhandle_mcck) + basr %r14,%r1 # call s390_handle_mcck TRACE_IRQS_ON mcck_return: - mvc __LC_RETURN_MCCK_PSW(8),SP_PSW(%r15) # move return PSW + mvc __LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? - bno BASED(0f) - lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 + jno 0f + lm %r0,%r15,__PT_R0(%r11) stpt __LC_EXIT_TIMER - lpsw __LC_RETURN_MCCK_PSW # back to caller -0: lm %r0,%r15,SP_R0(%r15) # load gprs 0-15 - lpsw __LC_RETURN_MCCK_PSW # back to caller + lpsw __LC_RETURN_MCCK_PSW +0: lm %r0,%r15,__PT_R0(%r11) + lpsw __LC_RETURN_MCCK_PSW - RESTORE_ALL __LC_RETURN_MCCK_PSW,0 +mcck_panic: + l %r14,__LC_PANIC_STACK + slr %r14,%r15 + sra %r14,PAGE_SHIFT + jz 0f + l %r15,__LC_PANIC_STACK +0: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) + j mcck_skip /* * Restart interruption handler, kick starter for additional CPUs @@ -799,18 +703,18 @@ restart_base: stck __LC_LAST_UPDATE_CLOCK mvc __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1) mvc __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1) - l %r15,__LC_SAVE_AREA+60 # load ksp + l %r15,__LC_GPREGS_SAVE_AREA+60 # load ksp lctl %c0,%c15,__LC_CREGS_SAVE_AREA # get new ctl regs lam %a0,%a15,__LC_AREGS_SAVE_AREA - lm %r6,%r15,__SF_GPRS(%r15) # load registers from clone + lm %r6,%r15,__SF_GPRS(%r15)# load registers from clone l %r1,__LC_THREAD_INFO mvc __LC_USER_TIMER(8),__TI_user_timer(%r1) mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1) xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER - stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on + ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off basr %r14,0 l %r14,restart_addr-.(%r14) - basr %r14,%r14 # branch to start_secondary + basr %r14,%r14 # call start_secondary restart_addr: .long start_secondary .align 8 @@ -835,19 +739,19 @@ restart_go: # PSW restart interrupt handler # ENTRY(psw_restart_int_handler) - st %r15,__LC_SAVE_AREA+48(%r0) # save r15 + st %r15,__LC_SAVE_AREA_RESTART basr %r15,0 0: l %r15,.Lrestart_stack-0b(%r15) # load restart stack l %r15,0(%r15) - ahi %r15,-SP_SIZE # make room for pt_regs - stm %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack - mvc SP_R15(4,%r15),__LC_SAVE_AREA+48(%r0)# store saved %r15 to stack - mvc SP_PSW(8,%r15),__LC_RST_OLD_PSW(%r0) # store restart old psw - xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0 + ahi %r15,-__PT_SIZE # create pt_regs on stack + stm %r0,%r14,__PT_R0(%r15) + mvc __PT_R15(4,%r15),__LC_SAVE_AREA_RESTART + mvc __PT_PSW(8,%r15),__LC_RST_OLD_PSW # store restart old psw + ahi %r15,-STACK_FRAME_OVERHEAD + xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) basr %r14,0 1: l %r14,.Ldo_restart-1b(%r14) basr %r14,%r14 - basr %r14,0 # load disabled wait PSW if 2: lpsw restart_psw_crash-2b(%r14) # do_restart returns .align 4 @@ -869,215 +773,174 @@ restart_psw_crash: */ stack_overflow: l %r15,__LC_PANIC_STACK # change to panic stack - sl %r15,BASED(.Lc_spsize) - mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack - stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack - la %r1,__LC_SAVE_AREA - ch %r12,BASED(.L0x020) # old psw addr == __LC_SVC_OLD_PSW ? - be BASED(0f) - ch %r12,BASED(.L0x028) # old psw addr == __LC_PGM_OLD_PSW ? - be BASED(0f) - la %r1,__LC_SAVE_AREA+16 -0: mvc SP_R12(16,%r15),0(%r1) # move %r12-%r15 to stack - xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear back chain - l %r1,BASED(1f) # branch to kernel_stack_overflow - la %r2,SP_PTREGS(%r15) # load pt_regs - br %r1 + ahi %r15,-__PT_SIZE # create pt_regs + stm %r0,%r7,__PT_R0(%r15) + stm %r8,%r9,__PT_PSW(%r15) + mvc __PT_R8(32,%r11),0(%r14) + lr %r15,%r11 + ahi %r15,-STACK_FRAME_OVERHEAD + l %r1,BASED(1f) + xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) + lr %r2,%r11 # pass pointer to pt_regs + br %r1 # branch to kernel_stack_overflow 1: .long kernel_stack_overflow #endif -cleanup_table_system_call: - .long system_call + 0x80000000, sysc_do_svc + 0x80000000 -cleanup_table_sysc_tif: - .long sysc_tif + 0x80000000, sysc_restore + 0x80000000 -cleanup_table_sysc_restore: - .long sysc_restore + 0x80000000, sysc_done + 0x80000000 -cleanup_table_io_tif: - .long io_tif + 0x80000000, io_restore + 0x80000000 -cleanup_table_io_restore: - .long io_restore + 0x80000000, io_done + 0x80000000 +cleanup_table: + .long system_call + 0x80000000 + .long sysc_do_svc + 0x80000000 + .long sysc_tif + 0x80000000 + .long sysc_restore + 0x80000000 + .long sysc_done + 0x80000000 + .long io_tif + 0x80000000 + .long io_restore + 0x80000000 + .long io_done + 0x80000000 cleanup_critical: - clc 4(4,%r12),BASED(cleanup_table_system_call) - bl BASED(0f) - clc 4(4,%r12),BASED(cleanup_table_system_call+4) - bl BASED(cleanup_system_call) -0: - clc 4(4,%r12),BASED(cleanup_table_sysc_tif) - bl BASED(0f) - clc 4(4,%r12),BASED(cleanup_table_sysc_tif+4) - bl BASED(cleanup_sysc_tif) -0: - clc 4(4,%r12),BASED(cleanup_table_sysc_restore) - bl BASED(0f) - clc 4(4,%r12),BASED(cleanup_table_sysc_restore+4) - bl BASED(cleanup_sysc_restore) -0: - clc 4(4,%r12),BASED(cleanup_table_io_tif) - bl BASED(0f) - clc 4(4,%r12),BASED(cleanup_table_io_tif+4) - bl BASED(cleanup_io_tif) -0: - clc 4(4,%r12),BASED(cleanup_table_io_restore) - bl BASED(0f) - clc 4(4,%r12),BASED(cleanup_table_io_restore+4) - bl BASED(cleanup_io_restore) -0: - br %r14 + cl %r9,BASED(cleanup_table) # system_call + jl 0f + cl %r9,BASED(cleanup_table+4) # sysc_do_svc + jl cleanup_system_call + cl %r9,BASED(cleanup_table+8) # sysc_tif + jl 0f + cl %r9,BASED(cleanup_table+12) # sysc_restore + jl cleanup_sysc_tif + cl %r9,BASED(cleanup_table+16) # sysc_done + jl cleanup_sysc_restore + cl %r9,BASED(cleanup_table+20) # io_tif + jl 0f + cl %r9,BASED(cleanup_table+24) # io_restore + jl cleanup_io_tif + cl %r9,BASED(cleanup_table+28) # io_done + jl cleanup_io_restore +0: br %r14 cleanup_system_call: - mvc __LC_RETURN_PSW(8),0(%r12) - clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+4) - bh BASED(0f) - mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER - c %r12,BASED(.Lmck_old_psw) - be BASED(0f) + # check if stpt has been executed + cl %r9,BASED(cleanup_system_call_insn) + jh 0f mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER -0: c %r12,BASED(.Lmck_old_psw) - la %r12,__LC_SAVE_AREA+32 - be BASED(0f) - la %r12,__LC_SAVE_AREA+16 -0: clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+8) - bhe BASED(cleanup_vtime) - clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn) - bh BASED(0f) - mvc __LC_SAVE_AREA(16),0(%r12) -0: st %r13,4(%r12) - l %r15,__LC_KERNEL_STACK # problem state -> load ksp - s %r15,BASED(.Lc_spsize) # make room for registers & psw - st %r15,12(%r12) - CREATE_STACK_FRAME __LC_SAVE_AREA - mvc 0(4,%r12),__LC_THREAD_INFO - l %r12,__LC_THREAD_INFO - mvc SP_PSW(8,%r15),__LC_SVC_OLD_PSW - mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC - oi __TI_flags+3(%r12),_TIF_SYSCALL -cleanup_vtime: - clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+12) - bhe BASED(cleanup_stime) - UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER -cleanup_stime: - clc __LC_RETURN_PSW+4(4),BASED(cleanup_system_call_insn+16) - bh BASED(cleanup_update) - UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER -cleanup_update: + chi %r11,__LC_SAVE_AREA_ASYNC + je 0f + mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER +0: # check if stm has been executed + cl %r9,BASED(cleanup_system_call_insn+4) + jh 0f + mvc __LC_SAVE_AREA_SYNC(32),0(%r11) +0: # set up saved registers r12, and r13 + st %r12,16(%r11) # r12 thread-info pointer + st %r13,20(%r11) # r13 literal-pool pointer + # check if the user time calculation has been done + cl %r9,BASED(cleanup_system_call_insn+8) + jh 0f + l %r10,__LC_EXIT_TIMER + l %r15,__LC_EXIT_TIMER+4 + SUB64 %r10,%r15,__LC_SYNC_ENTER_TIMER + ADD64 %r10,%r15,__LC_USER_TIMER + st %r10,__LC_USER_TIMER + st %r15,__LC_USER_TIMER+4 +0: # check if the system time calculation has been done + cl %r9,BASED(cleanup_system_call_insn+12) + jh 0f + l %r10,__LC_LAST_UPDATE_TIMER + l %r15,__LC_LAST_UPDATE_TIMER+4 + SUB64 %r10,%r15,__LC_EXIT_TIMER + ADD64 %r10,%r15,__LC_SYSTEM_TIMER + st %r10,__LC_SYSTEM_TIMER + st %r15,__LC_SYSTEM_TIMER+4 +0: # update accounting time stamp mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER - mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_system_call+4) - la %r12,__LC_RETURN_PSW + # set up saved register 11 + l %r15,__LC_KERNEL_STACK + ahi %r15,-__PT_SIZE + st %r15,12(%r11) # r11 pt_regs pointer + # fill pt_regs + mvc __PT_R8(32,%r15),__LC_SAVE_AREA_SYNC + stm %r0,%r7,__PT_R0(%r15) + mvc __PT_PSW(8,%r15),__LC_SVC_OLD_PSW + mvc __PT_INT_CODE(4,%r15),__LC_SVC_ILC + # setup saved register 15 + ahi %r15,-STACK_FRAME_OVERHEAD + st %r15,28(%r11) # r15 stack pointer + # set new psw address and exit + l %r9,BASED(cleanup_table+4) # sysc_do_svc + 0x80000000 br %r14 cleanup_system_call_insn: - .long sysc_saveall + 0x80000000 .long system_call + 0x80000000 - .long sysc_vtime + 0x80000000 - .long sysc_stime + 0x80000000 - .long sysc_update + 0x80000000 + .long sysc_stm + 0x80000000 + .long sysc_vtime + 0x80000000 + 36 + .long sysc_vtime + 0x80000000 + 76 cleanup_sysc_tif: - mvc __LC_RETURN_PSW(4),0(%r12) - mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_sysc_tif) - la %r12,__LC_RETURN_PSW + l %r9,BASED(cleanup_table+8) # sysc_tif + 0x80000000 br %r14 cleanup_sysc_restore: - clc 4(4,%r12),BASED(cleanup_sysc_restore_insn) - be BASED(2f) - mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER - c %r12,BASED(.Lmck_old_psw) - be BASED(0f) - mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER -0: clc 4(4,%r12),BASED(cleanup_sysc_restore_insn+4) - be BASED(2f) - mvc __LC_RETURN_PSW(8),SP_PSW(%r15) - c %r12,BASED(.Lmck_old_psw) - la %r12,__LC_SAVE_AREA+32 - be BASED(1f) - la %r12,__LC_SAVE_AREA+16 -1: mvc 0(16,%r12),SP_R12(%r15) - lm %r0,%r11,SP_R0(%r15) - l %r15,SP_R15(%r15) -2: la %r12,__LC_RETURN_PSW + cl %r9,BASED(cleanup_sysc_restore_insn) + jhe 0f + l %r9,12(%r11) # get saved pointer to pt_regs + mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) + mvc 0(32,%r11),__PT_R8(%r9) + lm %r0,%r7,__PT_R0(%r9) +0: lm %r8,%r9,__LC_RETURN_PSW br %r14 cleanup_sysc_restore_insn: .long sysc_done - 4 + 0x80000000 - .long sysc_done - 8 + 0x80000000 cleanup_io_tif: - mvc __LC_RETURN_PSW(4),0(%r12) - mvc __LC_RETURN_PSW+4(4),BASED(cleanup_table_io_tif) - la %r12,__LC_RETURN_PSW + l %r9,BASED(cleanup_table+20) # io_tif + 0x80000000 br %r14 cleanup_io_restore: - clc 4(4,%r12),BASED(cleanup_io_restore_insn) - be BASED(1f) - mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER - clc 4(4,%r12),BASED(cleanup_io_restore_insn+4) - be BASED(1f) - mvc __LC_RETURN_PSW(8),SP_PSW(%r15) - mvc __LC_SAVE_AREA+32(16),SP_R12(%r15) - lm %r0,%r11,SP_R0(%r15) - l %r15,SP_R15(%r15) -1: la %r12,__LC_RETURN_PSW + cl %r9,BASED(cleanup_io_restore_insn) + jhe 0f + l %r9,12(%r11) # get saved r11 pointer to pt_regs + mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) + ni __LC_RETURN_PSW+1,0xfd # clear wait state bit + mvc 0(32,%r11),__PT_R8(%r9) + lm %r0,%r7,__PT_R0(%r9) +0: lm %r8,%r9,__LC_RETURN_PSW br %r14 cleanup_io_restore_insn: .long io_done - 4 + 0x80000000 - .long io_done - 8 + 0x80000000 /* * Integer constants */ - .align 4 -.Lc_spsize: .long SP_SIZE -.Lc_overhead: .long STACK_FRAME_OVERHEAD -.Lnr_syscalls: .long NR_syscalls -.L0x018: .short 0x018 -.L0x020: .short 0x020 -.L0x028: .short 0x028 -.L0x030: .short 0x030 -.L0x038: .short 0x038 -.Lc_1: .long 1 + .align 4 +.Lnr_syscalls: .long NR_syscalls /* * Symbol constants */ -.Ls390_mcck: .long s390_do_machine_check -.Ls390_handle_mcck: - .long s390_handle_mcck -.Lmck_old_psw: .long __LC_MCK_OLD_PSW -.Ldo_IRQ: .long do_IRQ -.Ldo_extint: .long do_extint -.Ldo_signal: .long do_signal -.Ldo_notify_resume: - .long do_notify_resume -.Lhandle_per: .long do_per_trap -.Ldo_execve: .long do_execve -.Lexecve_tail: .long execve_tail -.Ljump_table: .long pgm_check_table -.Lschedule: .long schedule +.Ldo_machine_check: .long s390_do_machine_check +.Lhandle_mcck: .long s390_handle_mcck +.Ldo_IRQ: .long do_IRQ +.Ldo_extint: .long do_extint +.Ldo_signal: .long do_signal +.Ldo_notify_resume: .long do_notify_resume +.Ldo_per_trap: .long do_per_trap +.Ldo_execve: .long do_execve +.Lexecve_tail: .long execve_tail +.Ljump_table: .long pgm_check_table +.Lschedule: .long schedule #ifdef CONFIG_PREEMPT -.Lpreempt_schedule_irq: - .long preempt_schedule_irq +.Lpreempt_irq: .long preempt_schedule_irq #endif -.Ltrace_entry: .long do_syscall_trace_enter -.Ltrace_exit: .long do_syscall_trace_exit -.Lschedtail: .long schedule_tail -.Lsysc_table: .long sys_call_table +.Ltrace_enter: .long do_syscall_trace_enter +.Ltrace_exit: .long do_syscall_trace_exit +.Lschedule_tail: .long schedule_tail +.Lsys_call_table: .long sys_call_table +.Lsysc_per: .long sysc_per + 0x80000000 #ifdef CONFIG_TRACE_IRQFLAGS -.Ltrace_irq_on_caller: - .long trace_hardirqs_on_caller -.Ltrace_irq_off_caller: - .long trace_hardirqs_off_caller +.Lhardirqs_on: .long trace_hardirqs_on_caller +.Lhardirqs_off: .long trace_hardirqs_off_caller #endif #ifdef CONFIG_LOCKDEP -.Llockdep_sys_exit: - .long lockdep_sys_exit +.Llockdep_sys_exit: .long lockdep_sys_exit #endif -.Lcritical_start: - .long __critical_start + 0x80000000 -.Lcritical_end: - .long __critical_end + 0x80000000 -.Lcleanup_critical: - .long cleanup_critical +.Lcritical_start: .long __critical_start + 0x80000000 +.Lcritical_length: .long __critical_end - __critical_start .section .rodata, "a" #define SYSCALL(esa,esame,emu) .long esa diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h index ef8fb1d6e8d7..bf538aaf407d 100644 --- a/arch/s390/kernel/entry.h +++ b/arch/s390/kernel/entry.h @@ -6,15 +6,15 @@ #include <asm/ptrace.h> -extern void (*pgm_check_table[128])(struct pt_regs *, long, unsigned long); +extern void (*pgm_check_table[128])(struct pt_regs *); extern void *restart_stack; asmlinkage long do_syscall_trace_enter(struct pt_regs *regs); asmlinkage void do_syscall_trace_exit(struct pt_regs *regs); -void do_protection_exception(struct pt_regs *, long, unsigned long); -void do_dat_exception(struct pt_regs *, long, unsigned long); -void do_asce_exception(struct pt_regs *, long, unsigned long); +void do_protection_exception(struct pt_regs *regs); +void do_dat_exception(struct pt_regs *regs); +void do_asce_exception(struct pt_regs *regs); void do_per_trap(struct pt_regs *regs); void syscall_trace(struct pt_regs *regs, int entryexit); @@ -28,7 +28,7 @@ void do_extint(struct pt_regs *regs, unsigned int, unsigned int, unsigned long); void do_restart(void); int __cpuinit start_secondary(void *cpuvoid); void __init startup_init(void); -void die(const char * str, struct pt_regs * regs, long err); +void die(struct pt_regs *regs, const char *str); void __init time_init(void); diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 83a93747e2fd..412a7b8783d7 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S @@ -19,32 +19,22 @@ #include <asm/unistd.h> #include <asm/page.h> -/* - * Stack layout for the system_call stack entry. - * The first few entries are identical to the user_regs_struct. - */ -SP_PTREGS = STACK_FRAME_OVERHEAD -SP_ARGS = STACK_FRAME_OVERHEAD + __PT_ARGS -SP_PSW = STACK_FRAME_OVERHEAD + __PT_PSW -SP_R0 = STACK_FRAME_OVERHEAD + __PT_GPRS -SP_R1 = STACK_FRAME_OVERHEAD + __PT_GPRS + 8 -SP_R2 = STACK_FRAME_OVERHEAD + __PT_GPRS + 16 -SP_R3 = STACK_FRAME_OVERHEAD + __PT_GPRS + 24 -SP_R4 = STACK_FRAME_OVERHEAD + __PT_GPRS + 32 -SP_R5 = STACK_FRAME_OVERHEAD + __PT_GPRS + 40 -SP_R6 = STACK_FRAME_OVERHEAD + __PT_GPRS + 48 -SP_R7 = STACK_FRAME_OVERHEAD + __PT_GPRS + 56 -SP_R8 = STACK_FRAME_OVERHEAD + __PT_GPRS + 64 -SP_R9 = STACK_FRAME_OVERHEAD + __PT_GPRS + 72 -SP_R10 = STACK_FRAME_OVERHEAD + __PT_GPRS + 80 -SP_R11 = STACK_FRAME_OVERHEAD + __PT_GPRS + 88 -SP_R12 = STACK_FRAME_OVERHEAD + __PT_GPRS + 96 -SP_R13 = STACK_FRAME_OVERHEAD + __PT_GPRS + 104 -SP_R14 = STACK_FRAME_OVERHEAD + __PT_GPRS + 112 -SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 120 -SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2 -SP_SVC_CODE = STACK_FRAME_OVERHEAD + __PT_SVC_CODE -SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE +__PT_R0 = __PT_GPRS +__PT_R1 = __PT_GPRS + 8 +__PT_R2 = __PT_GPRS + 16 +__PT_R3 = __PT_GPRS + 24 +__PT_R4 = __PT_GPRS + 32 +__PT_R5 = __PT_GPRS + 40 +__PT_R6 = __PT_GPRS + 48 +__PT_R7 = __PT_GPRS + 56 +__PT_R8 = __PT_GPRS + 64 +__PT_R9 = __PT_GPRS + 72 +__PT_R10 = __PT_GPRS + 80 +__PT_R11 = __PT_GPRS + 88 +__PT_R12 = __PT_GPRS + 96 +__PT_R13 = __PT_GPRS + 104 +__PT_R14 = __PT_GPRS + 112 +__PT_R15 = __PT_GPRS + 120 STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER STACK_SIZE = 1 << STACK_SHIFT @@ -59,154 +49,103 @@ _TIF_EXIT_SIE = (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_MCCK_PENDING) #define BASED(name) name-system_call(%r13) - .macro SPP newpp -#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) - tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP - jz .+8 - .insn s,0xb2800000,\newpp -#endif - .endm - - .macro HANDLE_SIE_INTERCEPT -#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) - tm __TI_flags+6(%r12),_TIF_SIE>>8 - jz 0f - SPP __LC_CMF_HPP # set host id - clc SP_PSW+8(8,%r15),BASED(.Lsie_loop) - jl 0f - clc SP_PSW+8(8,%r15),BASED(.Lsie_done) - jhe 0f - mvc SP_PSW+8(8,%r15),BASED(.Lsie_loop) -0: -#endif - .endm - -#ifdef CONFIG_TRACE_IRQFLAGS .macro TRACE_IRQS_ON +#ifdef CONFIG_TRACE_IRQFLAGS basr %r2,%r0 brasl %r14,trace_hardirqs_on_caller +#endif .endm .macro TRACE_IRQS_OFF +#ifdef CONFIG_TRACE_IRQFLAGS basr %r2,%r0 brasl %r14,trace_hardirqs_off_caller - .endm -#else -#define TRACE_IRQS_ON -#define TRACE_IRQS_OFF #endif + .endm -#ifdef CONFIG_LOCKDEP .macro LOCKDEP_SYS_EXIT - tm SP_PSW+1(%r15),0x01 # returning to user ? - jz 0f +#ifdef CONFIG_LOCKDEP + tm __PT_PSW+1(%r11),0x01 # returning to user ? + jz .+10 brasl %r14,lockdep_sys_exit -0: - .endm -#else -#define LOCKDEP_SYS_EXIT #endif - - .macro UPDATE_VTIME lc_from,lc_to,lc_sum - lg %r10,\lc_from - slg %r10,\lc_to - alg %r10,\lc_sum - stg %r10,\lc_sum .endm -/* - * Register usage in interrupt handlers: - * R9 - pointer to current task structure - * R13 - pointer to literal pool - * R14 - return register for function calls - * R15 - kernel stack pointer - */ + .macro SPP newpp +#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) + tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP + jz .+8 + .insn s,0xb2800000,\newpp +#endif + .endm - .macro SAVE_ALL_SVC psworg,savearea - stmg %r11,%r15,\savearea - lg %r15,__LC_KERNEL_STACK # problem state -> load ksp - aghi %r15,-SP_SIZE # make room for registers & psw - lg %r11,__LC_LAST_BREAK + .macro HANDLE_SIE_INTERCEPT scratch +#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) + tm __TI_flags+6(%r12),_TIF_SIE>>8 + jz .+42 + tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_SPP + jz .+8 + .insn s,0xb2800000,BASED(.Lhost_id) # set host id + lgr \scratch,%r9 + slg \scratch,BASED(.Lsie_loop) + clg \scratch,BASED(.Lsie_length) + jhe .+10 + lg %r9,BASED(.Lsie_loop) +#endif .endm - .macro SAVE_ALL_PGM psworg,savearea - stmg %r11,%r15,\savearea - tm \psworg+1,0x01 # test problem state bit + .macro CHECK_STACK stacksize,savearea #ifdef CONFIG_CHECK_STACK - jnz 1f - tml %r15,STACK_SIZE - CONFIG_STACK_GUARD - jnz 2f - la %r12,\psworg - j stack_overflow -#else - jz 2f + tml %r15,\stacksize - CONFIG_STACK_GUARD + lghi %r14,\savearea + jz stack_overflow #endif -1: lg %r15,__LC_KERNEL_STACK # problem state -> load ksp -2: aghi %r15,-SP_SIZE # make room for registers & psw - larl %r13,system_call - lg %r11,__LC_LAST_BREAK .endm - .macro SAVE_ALL_ASYNC psworg,savearea - stmg %r11,%r15,\savearea - larl %r13,system_call - lg %r11,__LC_LAST_BREAK - la %r12,\psworg - tm \psworg+1,0x01 # test problem state bit - jnz 1f # from user -> load kernel stack - clc \psworg+8(8),BASED(.Lcritical_end) + .macro SWITCH_ASYNC savearea,stack,shift + tmhh %r8,0x0001 # interrupting from user ? + jnz 1f + lgr %r14,%r9 + slg %r14,BASED(.Lcritical_start) + clg %r14,BASED(.Lcritical_length) jhe 0f - clc \psworg+8(8),BASED(.Lcritical_start) - jl 0f + lghi %r11,\savearea # inside critical section, do cleanup brasl %r14,cleanup_critical - tm 1(%r12),0x01 # retest problem state after cleanup + tmhh %r8,0x0001 # retest problem state after cleanup jnz 1f -0: lg %r14,__LC_ASYNC_STACK # are we already on the async. stack ? +0: lg %r14,\stack # are we already on the target stack? slgr %r14,%r15 - srag %r14,%r14,STACK_SHIFT -#ifdef CONFIG_CHECK_STACK + srag %r14,%r14,\shift jnz 1f - tml %r15,STACK_SIZE - CONFIG_STACK_GUARD - jnz 2f - j stack_overflow -#else - jz 2f -#endif -1: lg %r15,__LC_ASYNC_STACK # load async stack -2: aghi %r15,-SP_SIZE # make room for registers & psw - .endm - - .macro CREATE_STACK_FRAME savearea - xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) - stg %r2,SP_ORIG_R2(%r15) # store original content of gpr 2 - mvc SP_R11(40,%r15),\savearea # move %r11-%r15 to stack - stmg %r0,%r10,SP_R0(%r15) # store gprs %r0-%r10 to kernel stack + CHECK_STACK 1<<\shift,\savearea + j 2f +1: lg %r15,\stack # load target stack +2: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) + la %r11,STACK_FRAME_OVERHEAD(%r15) .endm - .macro RESTORE_ALL psworg,sync - mvc \psworg(16),SP_PSW(%r15) # move user PSW to lowcore - .if !\sync - ni \psworg+1,0xfd # clear wait state bit - .endif - lg %r14,__LC_VDSO_PER_CPU - lmg %r0,%r13,SP_R0(%r15) # load gprs 0-13 of user - stpt __LC_EXIT_TIMER - mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER - lmg %r14,%r15,SP_R14(%r15) # load grps 14-15 of user - lpswe \psworg # back to caller + .macro UPDATE_VTIME scratch,enter_timer + lg \scratch,__LC_EXIT_TIMER + slg \scratch,\enter_timer + alg \scratch,__LC_USER_TIMER + stg \scratch,__LC_USER_TIMER + lg \scratch,__LC_LAST_UPDATE_TIMER + slg \scratch,__LC_EXIT_TIMER + alg \scratch,__LC_SYSTEM_TIMER + stg \scratch,__LC_SYSTEM_TIMER + mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer .endm - .macro LAST_BREAK - srag %r10,%r11,23 - jz 0f - stg %r11,__TI_last_break(%r12) -0: + .macro LAST_BREAK scratch + srag \scratch,%r10,23 + jz .+10 + stg %r10,__TI_last_break(%r12) .endm .macro REENABLE_IRQS - mvc __SF_EMPTY(1,%r15),SP_PSW(%r15) - ni __SF_EMPTY(%r15),0xbf - ssm __SF_EMPTY(%r15) + stg %r8,__LC_RETURN_PSW + ni __LC_RETURN_PSW,0xbf + ssm __LC_RETURN_PSW .endm .section .kprobes.text, "ax" @@ -245,55 +184,66 @@ __critical_start: ENTRY(system_call) stpt __LC_SYNC_ENTER_TIMER -sysc_saveall: - SAVE_ALL_SVC __LC_SVC_OLD_PSW,__LC_SAVE_AREA - CREATE_STACK_FRAME __LC_SAVE_AREA - lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct - mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW - mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC - oi __TI_flags+7(%r12),_TIF_SYSCALL +sysc_stmg: + stmg %r8,%r15,__LC_SAVE_AREA_SYNC + lg %r10,__LC_LAST_BREAK + lg %r12,__LC_THREAD_INFO + larl %r13,system_call +sysc_per: + lg %r15,__LC_KERNEL_STACK + aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) + la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs sysc_vtime: - UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER -sysc_stime: - UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER -sysc_update: - mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER - LAST_BREAK + UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER + LAST_BREAK %r13 + stmg %r0,%r7,__PT_R0(%r11) + mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC + mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW + mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC sysc_do_svc: - llgh %r7,SP_SVC_CODE+2(%r15) - slag %r7,%r7,2 # shift and test for svc 0 + oi __TI_flags+7(%r12),_TIF_SYSCALL + llgh %r8,__PT_INT_CODE+2(%r11) + slag %r8,%r8,2 # shift and test for svc 0 jnz sysc_nr_ok # svc 0: system call number in %r1 - llgfr %r1,%r1 # clear high word in r1 + llgfr %r1,%r1 # clear high word in r1 cghi %r1,NR_syscalls jnl sysc_nr_ok - sth %r1,SP_SVC_CODE+2(%r15) - slag %r7,%r1,2 # shift and test for svc 0 + sth %r1,__PT_INT_CODE+2(%r11) + slag %r8,%r1,2 sysc_nr_ok: - larl %r10,sys_call_table + larl %r10,sys_call_table # 64 bit system call table #ifdef CONFIG_COMPAT - tm __TI_flags+5(%r12),(_TIF_31BIT>>16) # running in 31 bit mode ? + tm __TI_flags+5(%r12),(_TIF_31BIT>>16) jno sysc_noemu - larl %r10,sys_call_table_emu # use 31 bit emulation system calls + larl %r10,sys_call_table_emu # 31 bit system call table sysc_noemu: #endif + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) + stg %r2,__PT_ORIG_GPR2(%r11) + stg %r7,STACK_FRAME_OVERHEAD(%r15) + lgf %r9,0(%r8,%r10) # get system call add. tm __TI_flags+6(%r12),_TIF_TRACE >> 8 - mvc SP_ARGS(8,%r15),SP_R7(%r15) - lgf %r8,0(%r7,%r10) # load address of system call routine jnz sysc_tracesys - basr %r14,%r8 # call sys_xxxx - stg %r2,SP_R2(%r15) # store return value (change R2 on stack) + basr %r14,%r9 # call sys_xxxx + stg %r2,__PT_R2(%r11) # store return value sysc_return: LOCKDEP_SYS_EXIT sysc_tif: - tm SP_PSW+1(%r15),0x01 # returning to user ? + tm __PT_PSW+1(%r11),0x01 # returning to user ? jno sysc_restore tm __TI_flags+7(%r12),_TIF_WORK_SVC - jnz sysc_work # there is work to do (signals etc.) + jnz sysc_work # check for work ni __TI_flags+7(%r12),255-_TIF_SYSCALL sysc_restore: - RESTORE_ALL __LC_RETURN_PSW,1 + lg %r14,__LC_VDSO_PER_CPU + lmg %r0,%r10,__PT_R0(%r11) + mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) + stpt __LC_EXIT_TIMER + mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER + lmg %r11,%r15,__PT_R11(%r11) + lpswe __LC_RETURN_PSW sysc_done: # @@ -317,7 +267,7 @@ sysc_work: # sysc_reschedule: larl %r14,sysc_return - jg schedule # return point is sysc_return + jg schedule # # _TIF_MCCK_PENDING is set, call handler @@ -331,33 +281,33 @@ sysc_mcck_pending: # sysc_sigpending: ni __TI_flags+7(%r12),255-_TIF_PER_TRAP # clear TIF_PER_TRAP - la %r2,SP_PTREGS(%r15) # load pt_regs - brasl %r14,do_signal # call do_signal + lgr %r2,%r11 # pass pointer to pt_regs + brasl %r14,do_signal tm __TI_flags+7(%r12),_TIF_SYSCALL jno sysc_return - lmg %r2,%r6,SP_R2(%r15) # load svc arguments - lghi %r7,0 # svc 0 returns -ENOSYS - lh %r1,SP_SVC_CODE+2(%r15) # load new svc number + lmg %r2,%r7,__PT_R2(%r11) # load svc arguments + lghi %r8,0 # svc 0 returns -ENOSYS + lh %r1,__PT_INT_CODE+2(%r11) # load new svc number cghi %r1,NR_syscalls jnl sysc_nr_ok # invalid svc number -> do svc 0 - slag %r7,%r1,2 + slag %r8,%r1,2 j sysc_nr_ok # restart svc # # _TIF_NOTIFY_RESUME is set, call do_notify_resume # sysc_notify_resume: - la %r2,SP_PTREGS(%r15) # load pt_regs + lgr %r2,%r11 # pass pointer to pt_regs larl %r14,sysc_return - jg do_notify_resume # call do_notify_resume + jg do_notify_resume # # _TIF_PER_TRAP is set, call do_per_trap # sysc_singlestep: ni __TI_flags+7(%r12),255-(_TIF_SYSCALL | _TIF_PER_TRAP) - la %r2,SP_PTREGS(%r15) # address of register-save area - larl %r14,sysc_return # load adr. of system return + lgr %r2,%r11 # pass pointer to pt_regs + larl %r14,sysc_return jg do_per_trap # @@ -365,41 +315,41 @@ sysc_singlestep: # and after the system call # sysc_tracesys: - la %r2,SP_PTREGS(%r15) # load pt_regs + lgr %r2,%r11 # pass pointer to pt_regs la %r3,0 - llgh %r0,SP_SVC_CODE+2(%r15) - stg %r0,SP_R2(%r15) + llgh %r0,__PT_INT_CODE+2(%r11) + stg %r0,__PT_R2(%r11) brasl %r14,do_syscall_trace_enter lghi %r0,NR_syscalls clgr %r0,%r2 jnh sysc_tracenogo - sllg %r7,%r2,2 # svc number *4 - lgf %r8,0(%r7,%r10) + sllg %r8,%r2,2 + lgf %r9,0(%r8,%r10) sysc_tracego: - lmg %r3,%r6,SP_R3(%r15) - mvc SP_ARGS(8,%r15),SP_R7(%r15) - lg %r2,SP_ORIG_R2(%r15) - basr %r14,%r8 # call sys_xxx - stg %r2,SP_R2(%r15) # store return value + lmg %r3,%r7,__PT_R3(%r11) + stg %r7,STACK_FRAME_OVERHEAD(%r15) + lg %r2,__PT_ORIG_GPR2(%r11) + basr %r14,%r9 # call sys_xxx + stg %r2,__PT_R2(%r11) # store return value sysc_tracenogo: tm __TI_flags+6(%r12),_TIF_TRACE >> 8 jz sysc_return - la %r2,SP_PTREGS(%r15) # load pt_regs - larl %r14,sysc_return # return point is sysc_return + lgr %r2,%r11 # pass pointer to pt_regs + larl %r14,sysc_return jg do_syscall_trace_exit # # a new process exits the kernel with ret_from_fork # ENTRY(ret_from_fork) - lg %r13,__LC_SVC_NEW_PSW+8 - lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct - tm SP_PSW+1(%r15),0x01 # forking a kernel thread ? + la %r11,STACK_FRAME_OVERHEAD(%r15) + lg %r12,__LC_THREAD_INFO + tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? jo 0f - stg %r15,SP_R15(%r15) # store stack pointer for new kthread + stg %r15,__PT_R15(%r11) # store stack pointer for new kthread 0: brasl %r14,schedule_tail TRACE_IRQS_ON - stosm 24(%r15),0x03 # reenable interrupts + ssm __LC_SVC_NEW_PSW # reenable interrupts j sysc_tracenogo # @@ -409,26 +359,26 @@ ENTRY(ret_from_fork) ENTRY(kernel_execve) stmg %r12,%r15,96(%r15) lgr %r14,%r15 - aghi %r15,-SP_SIZE + aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) stg %r14,__SF_BACKCHAIN(%r15) - la %r12,SP_PTREGS(%r15) + la %r12,STACK_FRAME_OVERHEAD(%r15) xc 0(__PT_SIZE,%r12),0(%r12) lgr %r5,%r12 brasl %r14,do_execve ltgfr %r2,%r2 je 0f - aghi %r15,SP_SIZE + aghi %r15,(STACK_FRAME_OVERHEAD + __PT_SIZE) lmg %r12,%r15,96(%r15) br %r14 # execve succeeded. -0: stnsm __SF_EMPTY(%r15),0xfc # disable interrupts +0: ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts lg %r15,__LC_KERNEL_STACK # load ksp - aghi %r15,-SP_SIZE # make room for registers & psw - lg %r13,__LC_SVC_NEW_PSW+8 - mvc SP_PTREGS(__PT_SIZE,%r15),0(%r12) # copy pt_regs + aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) + la %r11,STACK_FRAME_OVERHEAD(%r15) + mvc 0(__PT_SIZE,%r11),0(%r12) # copy pt_regs lg %r12,__LC_THREAD_INFO xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) - stosm __SF_EMPTY(%r15),0x03 # reenable interrupts + ssm __LC_SVC_NEW_PSW # reenable interrupts brasl %r14,execve_tail j sysc_return @@ -437,127 +387,72 @@ ENTRY(kernel_execve) */ ENTRY(pgm_check_handler) -/* - * First we need to check for a special case: - * Single stepping an instruction that disables the PER event mask will - * cause a PER event AFTER the mask has been set. Example: SVC or LPSW. - * For a single stepped SVC the program check handler gets control after - * the SVC new PSW has been loaded. But we want to execute the SVC first and - * then handle the PER event. Therefore we update the SVC old PSW to point - * to the pgm_check_handler and branch to the SVC handler after we checked - * if we have to load the kernel stack register. - * For every other possible cause for PER event without the PER mask set - * we just ignore the PER event (FIXME: is there anything we have to do - * for LPSW?). - */ stpt __LC_SYNC_ENTER_TIMER - tm __LC_PGM_INT_CODE+1,0x80 # check whether we got a per exception - jnz pgm_per # got per exception -> special case - SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA - CREATE_STACK_FRAME __LC_SAVE_AREA - mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW - lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct - HANDLE_SIE_INTERCEPT - tm SP_PSW+1(%r15),0x01 # interrupting from user ? - jz pgm_no_vtime - UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER - UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER - mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER - LAST_BREAK -pgm_no_vtime: - stg %r11,SP_ARGS(%r15) - lgf %r3,__LC_PGM_ILC # load program interruption code - lg %r4,__LC_TRANS_EXC_CODE - REENABLE_IRQS - lghi %r8,0x7f - ngr %r8,%r3 - sll %r8,3 - larl %r1,pgm_check_table - lg %r1,0(%r8,%r1) # load address of handler routine - la %r2,SP_PTREGS(%r15) # address of register-save area - basr %r14,%r1 # branch to interrupt-handler -pgm_exit: - j sysc_return - -# -# handle per exception -# -pgm_per: - tm __LC_PGM_OLD_PSW,0x40 # test if per event recording is on - jnz pgm_per_std # ok, normal per event from user space -# ok its one of the special cases, now we need to find out which one - clc __LC_PGM_OLD_PSW(16),__LC_SVC_NEW_PSW - je pgm_svcper -# no interesting special case, ignore PER event - lpswe __LC_PGM_OLD_PSW - -# -# Normal per exception -# -pgm_per_std: - SAVE_ALL_PGM __LC_PGM_OLD_PSW,__LC_SAVE_AREA - CREATE_STACK_FRAME __LC_SAVE_AREA - mvc SP_PSW(16,%r15),__LC_PGM_OLD_PSW - lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct - HANDLE_SIE_INTERCEPT - tm SP_PSW+1(%r15),0x01 # interrupting from user ? - jz pgm_no_vtime2 - UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER - UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER - mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER - LAST_BREAK -pgm_no_vtime2: + stmg %r8,%r15,__LC_SAVE_AREA_SYNC + lg %r10,__LC_LAST_BREAK + lg %r12,__LC_THREAD_INFO + larl %r13,system_call + lmg %r8,%r9,__LC_PGM_OLD_PSW + HANDLE_SIE_INTERCEPT %r14 + tmhh %r8,0x0001 # test problem state bit + jnz 1f # -> fault in user space + tmhh %r8,0x4000 # PER bit set in old PSW ? + jnz 0f # -> enabled, can't be a double fault + tm __LC_PGM_ILC+3,0x80 # check for per exception + jnz pgm_svcper # -> single stepped svc +0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC + j 2f +1: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER + LAST_BREAK %r14 + lg %r15,__LC_KERNEL_STACK +2: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) + la %r11,STACK_FRAME_OVERHEAD(%r15) + stmg %r0,%r7,__PT_R0(%r11) + mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC + stmg %r8,%r9,__PT_PSW(%r11) + mvc __PT_INT_CODE(4,%r11),__LC_PGM_ILC + mvc __PT_INT_PARM_LONG(8,%r11),__LC_TRANS_EXC_CODE + stg %r10,__PT_ARGS(%r11) + tm __LC_PGM_ILC+3,0x80 # check for per exception + jz 0f lg %r1,__TI_task(%r12) - tm SP_PSW+1(%r15),0x01 # kernel per event ? - jz kernel_per - mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE + tmhh %r8,0x0001 # kernel per event ? + jz pgm_kprobe + oi __TI_flags+7(%r12),_TIF_PER_TRAP mvc __THREAD_per_address(8,%r1),__LC_PER_ADDRESS + mvc __THREAD_per_cause(2,%r1),__LC_PER_CAUSE mvc __THREAD_per_paid(1,%r1),__LC_PER_PAID - oi __TI_flags+7(%r12),_TIF_PER_TRAP # set TIF_PER_TRAP - lgf %r3,__LC_PGM_ILC # load program interruption code - lg %r4,__LC_TRANS_EXC_CODE - REENABLE_IRQS - lghi %r8,0x7f - ngr %r8,%r3 # clear per-event-bit and ilc - je pgm_exit2 - sll %r8,3 +0: REENABLE_IRQS + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) larl %r1,pgm_check_table - lg %r1,0(%r8,%r1) # load address of handler routine - la %r2,SP_PTREGS(%r15) # address of register-save area + llgh %r10,__PT_INT_CODE+2(%r11) + nill %r10,0x007f + sll %r10,3 + je sysc_return + lg %r1,0(%r10,%r1) # load address of handler routine + lgr %r2,%r11 # pass pointer to pt_regs basr %r14,%r1 # branch to interrupt-handler -pgm_exit2: j sysc_return # -# it was a single stepped SVC that is causing all the trouble +# PER event in supervisor state, must be kprobes # -pgm_svcper: - SAVE_ALL_PGM __LC_SVC_OLD_PSW,__LC_SAVE_AREA - CREATE_STACK_FRAME __LC_SAVE_AREA - lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct - mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW - mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC - oi __TI_flags+7(%r12),(_TIF_SYSCALL | _TIF_PER_TRAP) - UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER - UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER - mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER - LAST_BREAK - lg %r8,__TI_task(%r12) - mvc __THREAD_per_cause(2,%r8),__LC_PER_CAUSE - mvc __THREAD_per_address(8,%r8),__LC_PER_ADDRESS - mvc __THREAD_per_paid(1,%r8),__LC_PER_PAID - stosm __SF_EMPTY(%r15),0x03 # reenable interrupts - lmg %r2,%r6,SP_R2(%r15) # load svc arguments - j sysc_do_svc +pgm_kprobe: + REENABLE_IRQS + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) + lgr %r2,%r11 # pass pointer to pt_regs + brasl %r14,do_per_trap + j sysc_return # -# per was called from kernel, must be kprobes +# single stepped system call # -kernel_per: - REENABLE_IRQS - la %r2,SP_PTREGS(%r15) # address of register-save area - brasl %r14,do_per_trap - j pgm_exit +pgm_svcper: + oi __TI_flags+7(%r12),_TIF_PER_TRAP + mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW + larl %r14,sysc_per + stg %r14,__LC_RETURN_PSW+8 + lpswe __LC_RETURN_PSW # branch to sysc_per and enable irqs /* * IO interrupt handler routine @@ -565,21 +460,25 @@ kernel_per: ENTRY(io_int_handler) stck __LC_INT_CLOCK stpt __LC_ASYNC_ENTER_TIMER - SAVE_ALL_ASYNC __LC_IO_OLD_PSW,__LC_SAVE_AREA+40 - CREATE_STACK_FRAME __LC_SAVE_AREA+40 - mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack - lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct - HANDLE_SIE_INTERCEPT - tm SP_PSW+1(%r15),0x01 # interrupting from user ? - jz io_no_vtime - UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER - UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER - mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER - LAST_BREAK -io_no_vtime: + stmg %r8,%r15,__LC_SAVE_AREA_ASYNC + lg %r10,__LC_LAST_BREAK + lg %r12,__LC_THREAD_INFO + larl %r13,system_call + lmg %r8,%r9,__LC_IO_OLD_PSW + HANDLE_SIE_INTERCEPT %r14 + SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT + tmhh %r8,0x0001 # interrupting from user? + jz io_skip + UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER + LAST_BREAK %r14 +io_skip: + stmg %r0,%r7,__PT_R0(%r11) + mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC + stmg %r8,%r9,__PT_PSW(%r11) TRACE_IRQS_OFF - la %r2,SP_PTREGS(%r15) # address of register-save area - brasl %r14,do_IRQ # call standard irq handler + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) + lgr %r2,%r11 # pass pointer to pt_regs + brasl %r14,do_IRQ io_return: LOCKDEP_SYS_EXIT TRACE_IRQS_ON @@ -587,7 +486,14 @@ io_tif: tm __TI_flags+7(%r12),_TIF_WORK_INT jnz io_work # there is work to do (signals etc.) io_restore: - RESTORE_ALL __LC_RETURN_PSW,0 + lg %r14,__LC_VDSO_PER_CPU + lmg %r0,%r10,__PT_R0(%r11) + mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) + ni __LC_RETURN_PSW+1,0xfd # clear wait state bit + stpt __LC_EXIT_TIMER + mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER + lmg %r11,%r15,__PT_R11(%r11) + lpswe __LC_RETURN_PSW io_done: # @@ -600,7 +506,7 @@ io_done: # Before any work can be done, a switch to the kernel stack is required. # io_work: - tm SP_PSW+1(%r15),0x01 # returning to user ? + tm __PT_PSW+1(%r11),0x01 # returning to user ? jo io_work_user # yes -> do resched & signal #ifdef CONFIG_PREEMPT # check for preemptive scheduling @@ -609,10 +515,11 @@ io_work: tm __TI_flags+7(%r12),_TIF_NEED_RESCHED jno io_restore # switch to kernel stack - lg %r1,SP_R15(%r15) - aghi %r1,-SP_SIZE - mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) - xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain + lg %r1,__PT_R15(%r11) + aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) + mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) + xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) + la %r11,STACK_FRAME_OVERHEAD(%r1) lgr %r15,%r1 # TRACE_IRQS_ON already done at io_return, call # TRACE_IRQS_OFF to keep things symmetrical @@ -628,9 +535,10 @@ io_work: # io_work_user: lg %r1,__LC_KERNEL_STACK - aghi %r1,-SP_SIZE - mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) - xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain + aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) + mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) + xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) + la %r11,STACK_FRAME_OVERHEAD(%r1) lgr %r15,%r1 # @@ -663,9 +571,9 @@ io_mcck_pending: # io_reschedule: # TRACE_IRQS_ON already done at io_return - stosm __SF_EMPTY(%r15),0x03 # reenable interrupts + ssm __LC_SVC_NEW_PSW # reenable interrupts brasl %r14,schedule # call scheduler - stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts + ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts TRACE_IRQS_OFF j io_return @@ -674,10 +582,10 @@ io_reschedule: # io_sigpending: # TRACE_IRQS_ON already done at io_return - stosm __SF_EMPTY(%r15),0x03 # reenable interrupts - la %r2,SP_PTREGS(%r15) # load pt_regs - brasl %r14,do_signal # call do_signal - stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts + ssm __LC_SVC_NEW_PSW # reenable interrupts + lgr %r2,%r11 # pass pointer to pt_regs + brasl %r14,do_signal + ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts TRACE_IRQS_OFF j io_return @@ -686,10 +594,10 @@ io_sigpending: # io_notify_resume: # TRACE_IRQS_ON already done at io_return - stosm __SF_EMPTY(%r15),0x03 # reenable interrupts - la %r2,SP_PTREGS(%r15) # load pt_regs - brasl %r14,do_notify_resume # call do_notify_resume - stnsm __SF_EMPTY(%r15),0xfc # disable I/O and ext. interrupts + ssm __LC_SVC_NEW_PSW # reenable interrupts + lgr %r2,%r11 # pass pointer to pt_regs + brasl %r14,do_notify_resume + ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts TRACE_IRQS_OFF j io_return @@ -699,21 +607,24 @@ io_notify_resume: ENTRY(ext_int_handler) stck __LC_INT_CLOCK stpt __LC_ASYNC_ENTER_TIMER - SAVE_ALL_ASYNC __LC_EXT_OLD_PSW,__LC_SAVE_AREA+40 - CREATE_STACK_FRAME __LC_SAVE_AREA+40 - mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack - lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct - HANDLE_SIE_INTERCEPT - tm SP_PSW+1(%r15),0x01 # interrupting from user ? - jz ext_no_vtime - UPDATE_VTIME __LC_EXIT_TIMER,__LC_ASYNC_ENTER_TIMER,__LC_USER_TIMER - UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER - mvc __LC_LAST_UPDATE_TIMER(8),__LC_ASYNC_ENTER_TIMER - LAST_BREAK -ext_no_vtime: + stmg %r8,%r15,__LC_SAVE_AREA_ASYNC + lg %r10,__LC_LAST_BREAK + lg %r12,__LC_THREAD_INFO + larl %r13,system_call + lmg %r8,%r9,__LC_EXT_OLD_PSW + HANDLE_SIE_INTERCEPT %r14 + SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT + tmhh %r8,0x0001 # interrupting from user ? + jz ext_skip + UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER + LAST_BREAK %r14 +ext_skip: + stmg %r0,%r7,__PT_R0(%r11) + mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC + stmg %r8,%r9,__PT_PSW(%r11) TRACE_IRQS_OFF lghi %r1,4096 - la %r2,SP_PTREGS(%r15) # address of register-save area + lgr %r2,%r11 # pass pointer to pt_regs llgf %r3,__LC_CPU_ADDRESS # get cpu address + interruption code llgf %r4,__LC_EXT_PARAMS # get external parameter lg %r5,__LC_EXT_PARAMS2-4096(%r1) # get 64 bit external parameter @@ -730,81 +641,77 @@ ENTRY(mcck_int_handler) la %r1,4095 # revalidate r1 spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs - stmg %r11,%r15,__LC_SAVE_AREA+80 + lg %r10,__LC_LAST_BREAK + lg %r12,__LC_THREAD_INFO larl %r13,system_call - lg %r11,__LC_LAST_BREAK - la %r12,__LC_MCK_OLD_PSW + lmg %r8,%r9,__LC_MCK_OLD_PSW + HANDLE_SIE_INTERCEPT %r14 tm __LC_MCCK_CODE,0x80 # system damage? - jo mcck_int_main # yes -> rest of mcck code invalid - la %r14,4095 - mvc __LC_MCCK_ENTER_TIMER(8),__LC_CPU_TIMER_SAVE_AREA-4095(%r14) + jo mcck_panic # yes -> rest of mcck code invalid + lghi %r14,__LC_CPU_TIMER_SAVE_AREA + mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? - jo 1f + jo 3f la %r14,__LC_SYNC_ENTER_TIMER clc 0(8,%r14),__LC_ASYNC_ENTER_TIMER jl 0f la %r14,__LC_ASYNC_ENTER_TIMER 0: clc 0(8,%r14),__LC_EXIT_TIMER - jl 0f + jl 1f la %r14,__LC_EXIT_TIMER -0: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER - jl 0f +1: clc 0(8,%r14),__LC_LAST_UPDATE_TIMER + jl 2f la %r14,__LC_LAST_UPDATE_TIMER -0: spt 0(%r14) +2: spt 0(%r14) mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) -1: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? - jno mcck_int_main # no -> skip cleanup critical - tm __LC_MCK_OLD_PSW+1,0x01 # test problem state bit - jnz mcck_int_main # from user -> load kernel stack - clc __LC_MCK_OLD_PSW+8(8),BASED(.Lcritical_end) - jhe mcck_int_main - clc __LC_MCK_OLD_PSW+8(8),BASED(.Lcritical_start) - jl mcck_int_main - brasl %r14,cleanup_critical -mcck_int_main: - lg %r14,__LC_PANIC_STACK # are we already on the panic stack? - slgr %r14,%r15 - srag %r14,%r14,PAGE_SHIFT - jz 0f - lg %r15,__LC_PANIC_STACK # load panic stack -0: aghi %r15,-SP_SIZE # make room for registers & psw - CREATE_STACK_FRAME __LC_SAVE_AREA+80 - mvc SP_PSW(16,%r15),0(%r12) - lg %r12,__LC_THREAD_INFO # load pointer to thread_info struct - tm __LC_MCCK_CODE+2,0x08 # mwp of old psw valid? - jno mcck_no_vtime # no -> no timer update - HANDLE_SIE_INTERCEPT - tm SP_PSW+1(%r15),0x01 # interrupting from user ? - jz mcck_no_vtime - UPDATE_VTIME __LC_EXIT_TIMER,__LC_MCCK_ENTER_TIMER,__LC_USER_TIMER - UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER - mvc __LC_LAST_UPDATE_TIMER(8),__LC_MCCK_ENTER_TIMER - LAST_BREAK -mcck_no_vtime: - la %r2,SP_PTREGS(%r15) # load pt_regs +3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? + jno mcck_panic # no -> skip cleanup critical + SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_PANIC_STACK,PAGE_SHIFT + tm %r8,0x0001 # interrupting from user ? + jz mcck_skip + UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER + LAST_BREAK %r14 +mcck_skip: + lghi %r14,__LC_GPREGS_SAVE_AREA + mvc __PT_R0(128,%r11),0(%r14) + stmg %r8,%r9,__PT_PSW(%r11) + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) + lgr %r2,%r11 # pass pointer to pt_regs brasl %r14,s390_do_machine_check - tm SP_PSW+1(%r15),0x01 # returning to user ? + tm __PT_PSW+1(%r11),0x01 # returning to user ? jno mcck_return lg %r1,__LC_KERNEL_STACK # switch to kernel stack - aghi %r1,-SP_SIZE - mvc SP_PTREGS(__PT_SIZE,%r1),SP_PTREGS(%r15) - xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) # clear back chain + aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) + mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) + xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) + la %r11,STACK_FRAME_OVERHEAD(%r1) lgr %r15,%r1 - stosm __SF_EMPTY(%r15),0x04 # turn dat on + ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off tm __TI_flags+7(%r12),_TIF_MCCK_PENDING jno mcck_return TRACE_IRQS_OFF brasl %r14,s390_handle_mcck TRACE_IRQS_ON mcck_return: - mvc __LC_RETURN_MCCK_PSW(16),SP_PSW(%r15) # move return PSW + lg %r14,__LC_VDSO_PER_CPU + lmg %r0,%r10,__PT_R0(%r11) + mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW ni __LC_RETURN_MCCK_PSW+1,0xfd # clear wait state bit - lmg %r0,%r15,SP_R0(%r15) # load gprs 0-15 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? jno 0f stpt __LC_EXIT_TIMER -0: lpswe __LC_RETURN_MCCK_PSW # back to caller -mcck_done: + mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER +0: lmg %r11,%r15,__PT_R11(%r11) + lpswe __LC_RETURN_MCCK_PSW + +mcck_panic: + lg %r14,__LC_PANIC_STACK + slgr %r14,%r15 + srag %r14,%r14,PAGE_SHIFT + jz 0f + lg %r15,__LC_PANIC_STACK +0: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) + j mcck_skip /* * Restart interruption handler, kick starter for additional CPUs @@ -818,17 +725,18 @@ restart_base: stck __LC_LAST_UPDATE_CLOCK mvc __LC_LAST_UPDATE_TIMER(8),restart_vtime-restart_base(%r1) mvc __LC_EXIT_TIMER(8),restart_vtime-restart_base(%r1) - lg %r15,__LC_SAVE_AREA+120 # load ksp + lghi %r10,__LC_GPREGS_SAVE_AREA + lg %r15,120(%r10) # load ksp lghi %r10,__LC_CREGS_SAVE_AREA - lctlg %c0,%c15,0(%r10) # get new ctl regs + lctlg %c0,%c15,0(%r10) # get new ctl regs lghi %r10,__LC_AREGS_SAVE_AREA lam %a0,%a15,0(%r10) - lmg %r6,%r15,__SF_GPRS(%r15) # load registers from clone + lmg %r6,%r15,__SF_GPRS(%r15)# load registers from clone lg %r1,__LC_THREAD_INFO mvc __LC_USER_TIMER(8),__TI_user_timer(%r1) mvc __LC_SYSTEM_TIMER(8),__TI_system_timer(%r1) xc __LC_STEAL_TIMER(8),__LC_STEAL_TIMER - stosm __SF_EMPTY(%r15),0x04 # now we can turn dat on + ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off brasl %r14,start_secondary .align 8 restart_vtime: @@ -852,16 +760,16 @@ restart_go: # PSW restart interrupt handler # ENTRY(psw_restart_int_handler) - stg %r15,__LC_SAVE_AREA+120(%r0) # save r15 + stg %r15,__LC_SAVE_AREA_RESTART larl %r15,restart_stack # load restart stack lg %r15,0(%r15) - aghi %r15,-SP_SIZE # make room for pt_regs - stmg %r0,%r14,SP_R0(%r15) # store gprs %r0-%r14 to stack - mvc SP_R15(8,%r15),__LC_SAVE_AREA+120(%r0)# store saved %r15 to stack - mvc SP_PSW(16,%r15),__LC_RST_OLD_PSW(%r0)# store restart old psw - xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # set backchain to 0 + aghi %r15,-__PT_SIZE # create pt_regs on stack + stmg %r0,%r14,__PT_R0(%r15) + mvc __PT_R15(8,%r15),__LC_SAVE_AREA_RESTART + mvc __PT_PSW(16,%r15),__LC_RST_OLD_PSW # store restart old psw + aghi %r15,-STACK_FRAME_OVERHEAD + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) brasl %r14,do_restart - larl %r14,restart_psw_crash # load disabled wait PSW if lpswe 0(%r14) # do_restart returns .align 8 @@ -877,172 +785,153 @@ restart_psw_crash: * Setup a pt_regs so that show_trace can provide a good call trace. */ stack_overflow: - lg %r15,__LC_PANIC_STACK # change to panic stack - aghi %r15,-SP_SIZE - mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack - stmg %r0,%r10,SP_R0(%r15) # store gprs %r0-%r10 to kernel stack - la %r1,__LC_SAVE_AREA - chi %r12,__LC_SVC_OLD_PSW - je 0f - chi %r12,__LC_PGM_OLD_PSW - je 0f - la %r1,__LC_SAVE_AREA+40 -0: mvc SP_R11(40,%r15),0(%r1) # move %r11-%r15 to stack - mvc SP_ARGS(8,%r15),__LC_LAST_BREAK - xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain - la %r2,SP_PTREGS(%r15) # load pt_regs + lg %r11,__LC_PANIC_STACK # change to panic stack + aghi %r11,-__PT_SIZE # create pt_regs + stmg %r0,%r7,__PT_R0(%r11) + stmg %r8,%r9,__PT_PSW(%r11) + mvc __PT_R8(64,%r11),0(%r14) + stg %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2 + lgr %r15,%r11 + aghi %r15,-STACK_FRAME_OVERHEAD + xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) + lgr %r2,%r11 # pass pointer to pt_regs jg kernel_stack_overflow #endif -cleanup_table_system_call: - .quad system_call, sysc_do_svc -cleanup_table_sysc_tif: - .quad sysc_tif, sysc_restore -cleanup_table_sysc_restore: - .quad sysc_restore, sysc_done -cleanup_table_io_tif: - .quad io_tif, io_restore -cleanup_table_io_restore: - .quad io_restore, io_done + .align 8 +cleanup_table: + .quad system_call + .quad sysc_do_svc + .quad sysc_tif + .quad sysc_restore + .quad sysc_done + .quad io_tif + .quad io_restore + .quad io_done cleanup_critical: - clc 8(8,%r12),BASED(cleanup_table_system_call) + clg %r9,BASED(cleanup_table) # system_call jl 0f - clc 8(8,%r12),BASED(cleanup_table_system_call+8) + clg %r9,BASED(cleanup_table+8) # sysc_do_svc jl cleanup_system_call -0: - clc 8(8,%r12),BASED(cleanup_table_sysc_tif) + clg %r9,BASED(cleanup_table+16) # sysc_tif jl 0f - clc 8(8,%r12),BASED(cleanup_table_sysc_tif+8) + clg %r9,BASED(cleanup_table+24) # sysc_restore jl cleanup_sysc_tif -0: - clc 8(8,%r12),BASED(cleanup_table_sysc_restore) - jl 0f - clc 8(8,%r12),BASED(cleanup_table_sysc_restore+8) + clg %r9,BASED(cleanup_table+32) # sysc_done jl cleanup_sysc_restore -0: - clc 8(8,%r12),BASED(cleanup_table_io_tif) + clg %r9,BASED(cleanup_table+40) # io_tif jl 0f - clc 8(8,%r12),BASED(cleanup_table_io_tif+8) + clg %r9,BASED(cleanup_table+48) # io_restore jl cleanup_io_tif -0: - clc 8(8,%r12),BASED(cleanup_table_io_restore) - jl 0f - clc 8(8,%r12),BASED(cleanup_table_io_restore+8) + clg %r9,BASED(cleanup_table+56) # io_done jl cleanup_io_restore -0: - br %r14 +0: br %r14 + cleanup_system_call: - mvc __LC_RETURN_PSW(16),0(%r12) - clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+8) + # check if stpt has been executed + clg %r9,BASED(cleanup_system_call_insn) jh 0f - mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER - cghi %r12,__LC_MCK_OLD_PSW - je 0f mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER -0: cghi %r12,__LC_MCK_OLD_PSW - la %r12,__LC_SAVE_AREA+80 + cghi %r11,__LC_SAVE_AREA_ASYNC je 0f - la %r12,__LC_SAVE_AREA+40 -0: clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+16) - jhe cleanup_vtime - clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn) + mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER +0: # check if stmg has been executed + clg %r9,BASED(cleanup_system_call_insn+8) jh 0f - mvc __LC_SAVE_AREA(40),0(%r12) -0: lg %r15,__LC_KERNEL_STACK # problem state -> load ksp - aghi %r15,-SP_SIZE # make room for registers & psw - stg %r15,32(%r12) - stg %r11,0(%r12) - CREATE_STACK_FRAME __LC_SAVE_AREA - mvc 8(8,%r12),__LC_THREAD_INFO - lg %r12,__LC_THREAD_INFO - mvc SP_PSW(16,%r15),__LC_SVC_OLD_PSW - mvc SP_SVC_CODE(4,%r15),__LC_SVC_ILC - oi __TI_flags+7(%r12),_TIF_SYSCALL -cleanup_vtime: - clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+24) - jhe cleanup_stime - UPDATE_VTIME __LC_EXIT_TIMER,__LC_SYNC_ENTER_TIMER,__LC_USER_TIMER -cleanup_stime: - clc __LC_RETURN_PSW+8(8),BASED(cleanup_system_call_insn+32) - jh cleanup_update - UPDATE_VTIME __LC_LAST_UPDATE_TIMER,__LC_EXIT_TIMER,__LC_SYSTEM_TIMER -cleanup_update: + mvc __LC_SAVE_AREA_SYNC(64),0(%r11) +0: # check if base register setup + TIF bit load has been done + clg %r9,BASED(cleanup_system_call_insn+16) + jhe 0f + # set up saved registers r10 and r12 + stg %r10,16(%r11) # r10 last break + stg %r12,32(%r11) # r12 thread-info pointer +0: # check if the user time update has been done + clg %r9,BASED(cleanup_system_call_insn+24) + jh 0f + lg %r15,__LC_EXIT_TIMER + slg %r15,__LC_SYNC_ENTER_TIMER + alg %r15,__LC_USER_TIMER + stg %r15,__LC_USER_TIMER +0: # check if the system time update has been done + clg %r9,BASED(cleanup_system_call_insn+32) + jh 0f + lg %r15,__LC_LAST_UPDATE_TIMER + slg %r15,__LC_EXIT_TIMER + alg %r15,__LC_SYSTEM_TIMER + stg %r15,__LC_SYSTEM_TIMER +0: # update accounting time stamp mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER - srag %r12,%r11,23 - lg %r12,__LC_THREAD_INFO + # do LAST_BREAK + lg %r9,16(%r11) + srag %r9,%r9,23 jz 0f - stg %r11,__TI_last_break(%r12) -0: mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_system_call+8) - la %r12,__LC_RETURN_PSW + mvc __TI_last_break(8,%r12),16(%r11) +0: # set up saved register r11 + lg %r15,__LC_KERNEL_STACK + aghi %r15,-__PT_SIZE + stg %r15,24(%r11) # r11 pt_regs pointer + # fill pt_regs + mvc __PT_R8(64,%r15),__LC_SAVE_AREA_SYNC + stmg %r0,%r7,__PT_R0(%r15) + mvc __PT_PSW(16,%r15),__LC_SVC_OLD_PSW + mvc __PT_INT_CODE(4,%r15),__LC_SVC_ILC + # setup saved register r15 + aghi %r15,-STACK_FRAME_OVERHEAD + stg %r15,56(%r11) # r15 stack pointer + # set new psw address and exit + larl %r9,sysc_do_svc br %r14 cleanup_system_call_insn: - .quad sysc_saveall .quad system_call - .quad sysc_vtime - .quad sysc_stime - .quad sysc_update + .quad sysc_stmg + .quad sysc_per + .quad sysc_vtime+18 + .quad sysc_vtime+42 cleanup_sysc_tif: - mvc __LC_RETURN_PSW(8),0(%r12) - mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_sysc_tif) - la %r12,__LC_RETURN_PSW + larl %r9,sysc_tif br %r14 cleanup_sysc_restore: - clc 8(8,%r12),BASED(cleanup_sysc_restore_insn) - je 2f - clc 8(8,%r12),BASED(cleanup_sysc_restore_insn+8) - jhe 0f - mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER - cghi %r12,__LC_MCK_OLD_PSW + clg %r9,BASED(cleanup_sysc_restore_insn) je 0f - mvc __LC_EXIT_TIMER(8),__LC_ASYNC_ENTER_TIMER -0: mvc __LC_RETURN_PSW(16),SP_PSW(%r15) - cghi %r12,__LC_MCK_OLD_PSW - la %r12,__LC_SAVE_AREA+80 - je 1f - la %r12,__LC_SAVE_AREA+40 -1: mvc 0(40,%r12),SP_R11(%r15) - lmg %r0,%r10,SP_R0(%r15) - lg %r15,SP_R15(%r15) -2: la %r12,__LC_RETURN_PSW + lg %r9,24(%r11) # get saved pointer to pt_regs + mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) + mvc 0(64,%r11),__PT_R8(%r9) + lmg %r0,%r7,__PT_R0(%r9) +0: lmg %r8,%r9,__LC_RETURN_PSW br %r14 cleanup_sysc_restore_insn: .quad sysc_done - 4 - .quad sysc_done - 16 cleanup_io_tif: - mvc __LC_RETURN_PSW(8),0(%r12) - mvc __LC_RETURN_PSW+8(8),BASED(cleanup_table_io_tif) - la %r12,__LC_RETURN_PSW + larl %r9,io_tif br %r14 cleanup_io_restore: - clc 8(8,%r12),BASED(cleanup_io_restore_insn) - je 1f - clc 8(8,%r12),BASED(cleanup_io_restore_insn+8) - jhe 0f - mvc __LC_EXIT_TIMER(8),__LC_MCCK_ENTER_TIMER -0: mvc __LC_RETURN_PSW(16),SP_PSW(%r15) - mvc __LC_SAVE_AREA+80(40),SP_R11(%r15) - lmg %r0,%r10,SP_R0(%r15) - lg %r15,SP_R15(%r15) -1: la %r12,__LC_RETURN_PSW + clg %r9,BASED(cleanup_io_restore_insn) + je 0f + lg %r9,24(%r11) # get saved r11 pointer to pt_regs + mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) + ni __LC_RETURN_PSW+1,0xfd # clear wait state bit + mvc 0(64,%r11),__PT_R8(%r9) + lmg %r0,%r7,__PT_R0(%r9) +0: lmg %r8,%r9,__LC_RETURN_PSW br %r14 cleanup_io_restore_insn: .quad io_done - 4 - .quad io_done - 16 /* * Integer constants */ - .align 4 + .align 8 .Lcritical_start: - .quad __critical_start -.Lcritical_end: - .quad __critical_end + .quad __critical_start +.Lcritical_length: + .quad __critical_end - __critical_start + #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) /* @@ -1054,6 +943,7 @@ ENTRY(sie64a) stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers stg %r2,__SF_EMPTY(%r15) # save control block pointer stg %r3,__SF_EMPTY+8(%r15) # save guest register save area + xc __SF_EMPTY+16(8,%r15),__SF_EMPTY+16(%r15) # host id == 0 lmg %r0,%r13,0(%r3) # load guest gprs 0-13 lg %r14,__LC_THREAD_INFO # pointer thread_info struct oi __TI_flags+6(%r14),_TIF_SIE>>8 @@ -1070,7 +960,7 @@ sie_gmap: SPP __SF_EMPTY(%r15) # set guest id sie 0(%r14) sie_done: - SPP __LC_CMF_HPP # set host id + SPP __SF_EMPTY+16(%r15) # set host id lg %r14,__LC_THREAD_INFO # pointer thread_info struct sie_exit: lctlg %c1,%c1,__LC_USER_ASCE # load primary asce @@ -1093,8 +983,10 @@ sie_fault: .align 8 .Lsie_loop: .quad sie_loop -.Lsie_done: - .quad sie_done +.Lsie_length: + .quad sie_done - sie_loop +.Lhost_id: + .quad 0 .section __ex_table,"a" .quad sie_loop,sie_fault diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S index 900068d2bf92..c27a0727f930 100644 --- a/arch/s390/kernel/head.S +++ b/arch/s390/kernel/head.S @@ -329,8 +329,8 @@ iplstart: # # reset files in VM reader # - stidp __LC_SAVE_AREA # store cpuid - tm __LC_SAVE_AREA,0xff # running VM ? + stidp __LC_SAVE_AREA_SYNC # store cpuid + tm __LC_SAVE_AREA_SYNC,0xff# running VM ? bno .Lnoreset la %r2,.Lreset lhi %r3,26 diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c index 3cd0f25ab015..47b168fb29c4 100644 --- a/arch/s390/kernel/machine_kexec.c +++ b/arch/s390/kernel/machine_kexec.c @@ -208,6 +208,7 @@ void machine_kexec_cleanup(struct kimage *image) void arch_crash_save_vmcoreinfo(void) { VMCOREINFO_SYMBOL(lowcore_ptr); + VMCOREINFO_SYMBOL(high_memory); VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS); } diff --git a/arch/s390/kernel/mem_detect.c b/arch/s390/kernel/mem_detect.c index 19b4568f4cee..22d502e885ed 100644 --- a/arch/s390/kernel/mem_detect.c +++ b/arch/s390/kernel/mem_detect.c @@ -64,70 +64,82 @@ void detect_memory_layout(struct mem_chunk chunk[]) EXPORT_SYMBOL(detect_memory_layout); /* + * Move memory chunks array from index "from" to index "to" + */ +static void mem_chunk_move(struct mem_chunk chunk[], int to, int from) +{ + int cnt = MEMORY_CHUNKS - to; + + memmove(&chunk[to], &chunk[from], cnt * sizeof(struct mem_chunk)); +} + +/* + * Initialize memory chunk + */ +static void mem_chunk_init(struct mem_chunk *chunk, unsigned long addr, + unsigned long size, int type) +{ + chunk->type = type; + chunk->addr = addr; + chunk->size = size; +} + +/* * Create memory hole with given address, size, and type */ -void create_mem_hole(struct mem_chunk chunks[], unsigned long addr, +void create_mem_hole(struct mem_chunk chunk[], unsigned long addr, unsigned long size, int type) { - unsigned long start, end, new_size; - int i; + unsigned long lh_start, lh_end, lh_size, ch_start, ch_end, ch_size; + int i, ch_type; for (i = 0; i < MEMORY_CHUNKS; i++) { - if (chunks[i].size == 0) - continue; - if (addr + size < chunks[i].addr) - continue; - if (addr >= chunks[i].addr + chunks[i].size) + if (chunk[i].size == 0) continue; - start = max(addr, chunks[i].addr); - end = min(addr + size, chunks[i].addr + chunks[i].size); - new_size = end - start; - if (new_size == 0) - continue; - if (start == chunks[i].addr && - end == chunks[i].addr + chunks[i].size) { - /* Remove chunk */ - chunks[i].type = type; - } else if (start == chunks[i].addr) { - /* Make chunk smaller at start */ - if (i >= MEMORY_CHUNKS - 1) - panic("Unable to create memory hole"); - memmove(&chunks[i + 1], &chunks[i], - sizeof(struct mem_chunk) * - (MEMORY_CHUNKS - (i + 1))); - chunks[i + 1].addr = chunks[i].addr + new_size; - chunks[i + 1].size = chunks[i].size - new_size; - chunks[i].size = new_size; - chunks[i].type = type; - i += 1; - } else if (end == chunks[i].addr + chunks[i].size) { - /* Make chunk smaller at end */ - if (i >= MEMORY_CHUNKS - 1) - panic("Unable to create memory hole"); - memmove(&chunks[i + 1], &chunks[i], - sizeof(struct mem_chunk) * - (MEMORY_CHUNKS - (i + 1))); - chunks[i + 1].addr = start; - chunks[i + 1].size = new_size; - chunks[i + 1].type = type; - chunks[i].size -= new_size; + + /* Define chunk properties */ + ch_start = chunk[i].addr; + ch_size = chunk[i].size; + ch_end = ch_start + ch_size - 1; + ch_type = chunk[i].type; + + /* Is memory chunk hit by memory hole? */ + if (addr + size <= ch_start) + continue; /* No: memory hole in front of chunk */ + if (addr > ch_end) + continue; /* No: memory hole after chunk */ + + /* Yes: Define local hole properties */ + lh_start = max(addr, chunk[i].addr); + lh_end = min(addr + size - 1, ch_end); + lh_size = lh_end - lh_start + 1; + + if (lh_start == ch_start && lh_end == ch_end) { + /* Hole covers complete memory chunk */ + mem_chunk_init(&chunk[i], lh_start, lh_size, type); + } else if (lh_end == ch_end) { + /* Hole starts in memory chunk and convers chunk end */ + mem_chunk_move(chunk, i + 1, i); + mem_chunk_init(&chunk[i], ch_start, ch_size - lh_size, + ch_type); + mem_chunk_init(&chunk[i + 1], lh_start, lh_size, type); i += 1; + } else if (lh_start == ch_start) { + /* Hole ends in memory chunk */ + mem_chunk_move(chunk, i + 1, i); + mem_chunk_init(&chunk[i], lh_start, lh_size, type); + mem_chunk_init(&chunk[i + 1], lh_end + 1, + ch_size - lh_size, ch_type); + break; } else { - /* Create memory hole */ - if (i >= MEMORY_CHUNKS - 2) - panic("Unable to create memory hole"); - memmove(&chunks[i + 2], &chunks[i], - sizeof(struct mem_chunk) * - (MEMORY_CHUNKS - (i + 2))); - chunks[i + 1].addr = addr; - chunks[i + 1].size = size; - chunks[i + 1].type = type; - chunks[i + 2].addr = addr + size; - chunks[i + 2].size = - chunks[i].addr + chunks[i].size - (addr + size); - chunks[i + 2].type = chunks[i].type; - chunks[i].size = addr - chunks[i].addr; - i += 2; + /* Hole splits memory chunk */ + mem_chunk_move(chunk, i + 2, i); + mem_chunk_init(&chunk[i], ch_start, + lh_start - ch_start, ch_type); + mem_chunk_init(&chunk[i + 1], lh_start, lh_size, type); + mem_chunk_init(&chunk[i + 2], lh_end + 1, + ch_end - lh_end, ch_type); + break; } } } diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S index 732a793ec53a..36b32658fb24 100644 --- a/arch/s390/kernel/reipl64.S +++ b/arch/s390/kernel/reipl64.S @@ -17,11 +17,11 @@ # ENTRY(store_status) /* Save register one and load save area base */ - stg %r1,__LC_SAVE_AREA+120(%r0) + stg %r1,__LC_SAVE_AREA_RESTART lghi %r1,SAVE_AREA_BASE /* General purpose registers */ stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) - lg %r2,__LC_SAVE_AREA+120(%r0) + lg %r2,__LC_SAVE_AREA_RESTART stg %r2,__LC_GPREGS_SAVE_AREA-SAVE_AREA_BASE+8(%r1) /* Control registers */ stctg %c0,%c15,__LC_CREGS_SAVE_AREA-SAVE_AREA_BASE(%r1) diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index f11d1b037c50..354de0763eff 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -95,6 +95,15 @@ struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS]; int __initdata memory_end_set; unsigned long __initdata memory_end; +unsigned long VMALLOC_START; +EXPORT_SYMBOL(VMALLOC_START); + +unsigned long VMALLOC_END; +EXPORT_SYMBOL(VMALLOC_END); + +struct page *vmemmap; +EXPORT_SYMBOL(vmemmap); + /* An array with a pointer to the lowcore of every CPU. */ struct _lowcore *lowcore_ptr[NR_CPUS]; EXPORT_SYMBOL(lowcore_ptr); @@ -278,6 +287,15 @@ static int __init early_parse_mem(char *p) } early_param("mem", early_parse_mem); +static int __init parse_vmalloc(char *arg) +{ + if (!arg) + return -EINVAL; + VMALLOC_END = (memparse(arg, &arg) + PAGE_SIZE - 1) & PAGE_MASK; + return 0; +} +early_param("vmalloc", parse_vmalloc); + unsigned int user_mode = HOME_SPACE_MODE; EXPORT_SYMBOL_GPL(user_mode); @@ -383,7 +401,6 @@ setup_lowcore(void) __ctl_set_bit(14, 29); } #else - lc->cmf_hpp = -1ULL; lc->vdso_per_cpu_data = (unsigned long) &lc->paste[0]; #endif lc->sync_enter_timer = S390_lowcore.sync_enter_timer; @@ -479,8 +496,7 @@ EXPORT_SYMBOL_GPL(real_memory_size); static void __init setup_memory_end(void) { - unsigned long memory_size; - unsigned long max_mem; + unsigned long vmax, vmalloc_size, tmp; int i; @@ -490,12 +506,9 @@ static void __init setup_memory_end(void) memory_end_set = 1; } #endif - memory_size = 0; + real_memory_size = 0; memory_end &= PAGE_MASK; - max_mem = memory_end ? min(VMEM_MAX_PHYS, memory_end) : VMEM_MAX_PHYS; - memory_end = min(max_mem, memory_end); - /* * Make sure all chunks are MAX_ORDER aligned so we don't need the * extra checks that HOLES_IN_ZONE would require. @@ -515,23 +528,48 @@ static void __init setup_memory_end(void) chunk->addr = start; chunk->size = end - start; } + real_memory_size = max(real_memory_size, + chunk->addr + chunk->size); } + /* Choose kernel address space layout: 2, 3, or 4 levels. */ +#ifdef CONFIG_64BIT + vmalloc_size = VMALLOC_END ?: 128UL << 30; + tmp = (memory_end ?: real_memory_size) / PAGE_SIZE; + tmp = tmp * (sizeof(struct page) + PAGE_SIZE) + vmalloc_size; + if (tmp <= (1UL << 42)) + vmax = 1UL << 42; /* 3-level kernel page table */ + else + vmax = 1UL << 53; /* 4-level kernel page table */ +#else + vmalloc_size = VMALLOC_END ?: 96UL << 20; + vmax = 1UL << 31; /* 2-level kernel page table */ +#endif + /* vmalloc area is at the end of the kernel address space. */ + VMALLOC_END = vmax; + VMALLOC_START = vmax - vmalloc_size; + + /* Split remaining virtual space between 1:1 mapping & vmemmap array */ + tmp = VMALLOC_START / (PAGE_SIZE + sizeof(struct page)); + tmp = VMALLOC_START - tmp * sizeof(struct page); + tmp &= ~((vmax >> 11) - 1); /* align to page table level */ + tmp = min(tmp, 1UL << MAX_PHYSMEM_BITS); + vmemmap = (struct page *) tmp; + + /* Take care that memory_end is set and <= vmemmap */ + memory_end = min(memory_end ?: real_memory_size, tmp); + + /* Fixup memory chunk array to fit into 0..memory_end */ for (i = 0; i < MEMORY_CHUNKS; i++) { struct mem_chunk *chunk = &memory_chunk[i]; - real_memory_size = max(real_memory_size, - chunk->addr + chunk->size); - if (chunk->addr >= max_mem) { + if (chunk->addr >= memory_end) { memset(chunk, 0, sizeof(*chunk)); continue; } - if (chunk->addr + chunk->size > max_mem) - chunk->size = max_mem - chunk->addr; - memory_size = max(memory_size, chunk->addr + chunk->size); + if (chunk->addr + chunk->size > memory_end) + chunk->size = memory_end - chunk->addr; } - if (!memory_end) - memory_end = memory_size; } void *restart_stack __attribute__((__section__(".data"))); @@ -655,7 +693,6 @@ static int __init verify_crash_base(unsigned long crash_base, static void __init reserve_kdump_bootmem(unsigned long addr, unsigned long size, int type) { - create_mem_hole(memory_chunk, addr, size, type); } diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 7f6f9f354545..a8ba840294ff 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -302,9 +302,13 @@ static int setup_frame(int sig, struct k_sigaction *ka, /* We forgot to include these in the sigcontext. To avoid breaking binary compatibility, they are passed as args. */ - regs->gprs[4] = current->thread.trap_no; - regs->gprs[5] = current->thread.prot_addr; - regs->gprs[6] = task_thread_info(current)->last_break; + if (sig == SIGSEGV || sig == SIGBUS || sig == SIGILL || + sig == SIGTRAP || sig == SIGFPE) { + /* set extra registers only for synchronous signals */ + regs->gprs[4] = regs->int_code & 127; + regs->gprs[5] = regs->int_parm_long; + regs->gprs[6] = task_thread_info(current)->last_break; + } /* Place signal number on stack to allow backtrace from handler. */ if (__put_user(regs->gprs[2], (int __user *) &frame->signo)) @@ -434,13 +438,13 @@ void do_signal(struct pt_regs *regs) * call information. */ current_thread_info()->system_call = - test_thread_flag(TIF_SYSCALL) ? regs->svc_code : 0; + test_thread_flag(TIF_SYSCALL) ? regs->int_code : 0; signr = get_signal_to_deliver(&info, &ka, regs, NULL); if (signr > 0) { /* Whee! Actually deliver the signal. */ if (current_thread_info()->system_call) { - regs->svc_code = current_thread_info()->system_call; + regs->int_code = current_thread_info()->system_call; /* Check for system call restarting. */ switch (regs->gprs[2]) { case -ERESTART_RESTARTBLOCK: @@ -457,7 +461,7 @@ void do_signal(struct pt_regs *regs) regs->gprs[2] = regs->orig_gpr2; regs->psw.addr = __rewind_psw(regs->psw, - regs->svc_code >> 16); + regs->int_code >> 16); break; } } @@ -488,11 +492,11 @@ void do_signal(struct pt_regs *regs) /* No handlers present - check for system call restart */ clear_thread_flag(TIF_SYSCALL); if (current_thread_info()->system_call) { - regs->svc_code = current_thread_info()->system_call; + regs->int_code = current_thread_info()->system_call; switch (regs->gprs[2]) { case -ERESTART_RESTARTBLOCK: /* Restart with sys_restart_syscall */ - regs->svc_code = __NR_restart_syscall; + regs->int_code = __NR_restart_syscall; /* fallthrough */ case -ERESTARTNOHAND: case -ERESTARTSYS: diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 66cca03c0282..2398ce6b15ae 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c @@ -69,9 +69,7 @@ enum s390_cpu_state { }; DEFINE_MUTEX(smp_cpu_state_mutex); -int smp_cpu_polarization[NR_CPUS]; static int smp_cpu_state[NR_CPUS]; -static int cpu_management; static DEFINE_PER_CPU(struct cpu, cpu_devices); @@ -149,29 +147,59 @@ void smp_switch_to_ipl_cpu(void (*func)(void *), void *data) sp -= sizeof(struct pt_regs); regs = (struct pt_regs *) sp; memcpy(®s->gprs, ¤t_lc->gpregs_save_area, sizeof(regs->gprs)); - regs->psw = lc->psw_save_area; + regs->psw = current_lc->psw_save_area; sp -= STACK_FRAME_OVERHEAD; sf = (struct stack_frame *) sp; - sf->back_chain = regs->gprs[15]; + sf->back_chain = 0; smp_switch_to_cpu(func, data, sp, stap(), __cpu_logical_map[0]); } +static void smp_stop_cpu(void) +{ + while (sigp(smp_processor_id(), sigp_stop) == sigp_busy) + cpu_relax(); +} + void smp_send_stop(void) { - int cpu, rc; + cpumask_t cpumask; + int cpu; + u64 end; /* Disable all interrupts/machine checks */ __load_psw_mask(psw_kernel_bits | PSW_MASK_DAT); trace_hardirqs_off(); - /* stop all processors */ - for_each_online_cpu(cpu) { - if (cpu == smp_processor_id()) - continue; - do { - rc = sigp(cpu, sigp_stop); - } while (rc == sigp_busy); + cpumask_copy(&cpumask, cpu_online_mask); + cpumask_clear_cpu(smp_processor_id(), &cpumask); + + if (oops_in_progress) { + /* + * Give the other cpus the opportunity to complete + * outstanding interrupts before stopping them. + */ + end = get_clock() + (1000000UL << 12); + for_each_cpu(cpu, &cpumask) { + set_bit(ec_stop_cpu, (unsigned long *) + &lowcore_ptr[cpu]->ext_call_fast); + while (sigp(cpu, sigp_emergency_signal) == sigp_busy && + get_clock() < end) + cpu_relax(); + } + while (get_clock() < end) { + for_each_cpu(cpu, &cpumask) + if (cpu_stopped(cpu)) + cpumask_clear_cpu(cpu, &cpumask); + if (cpumask_empty(&cpumask)) + break; + cpu_relax(); + } + } + /* stop all processors */ + for_each_cpu(cpu, &cpumask) { + while (sigp(cpu, sigp_stop) == sigp_busy) + cpu_relax(); while (!cpu_stopped(cpu)) cpu_relax(); } @@ -187,7 +215,7 @@ static void do_ext_call_interrupt(unsigned int ext_int_code, { unsigned long bits; - if (ext_int_code == 0x1202) + if ((ext_int_code & 0xffff) == 0x1202) kstat_cpu(smp_processor_id()).irqs[EXTINT_EXC]++; else kstat_cpu(smp_processor_id()).irqs[EXTINT_EMS]++; @@ -196,6 +224,9 @@ static void do_ext_call_interrupt(unsigned int ext_int_code, */ bits = xchg(&S390_lowcore.ext_call_fast, 0); + if (test_bit(ec_stop_cpu, &bits)) + smp_stop_cpu(); + if (test_bit(ec_schedule, &bits)) scheduler_ipi(); @@ -204,6 +235,7 @@ static void do_ext_call_interrupt(unsigned int ext_int_code, if (test_bit(ec_call_function_single, &bits)) generic_smp_call_function_single_interrupt(); + } /* @@ -369,7 +401,7 @@ static int smp_rescan_cpus_sigp(cpumask_t avail) if (cpu_known(cpu_id)) continue; __cpu_logical_map[logical_cpu] = cpu_id; - smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN; + cpu_set_polarization(logical_cpu, POLARIZATION_UNKNOWN); if (!cpu_stopped(logical_cpu)) continue; set_cpu_present(logical_cpu, true); @@ -403,7 +435,7 @@ static int smp_rescan_cpus_sclp(cpumask_t avail) if (cpu_known(cpu_id)) continue; __cpu_logical_map[logical_cpu] = cpu_id; - smp_cpu_polarization[logical_cpu] = POLARIZATION_UNKNWN; + cpu_set_polarization(logical_cpu, POLARIZATION_UNKNOWN); set_cpu_present(logical_cpu, true); if (cpu >= info->configured) smp_cpu_state[logical_cpu] = CPU_STATE_STANDBY; @@ -656,7 +688,7 @@ int __cpuinit __cpu_up(unsigned int cpu) - sizeof(struct stack_frame)); memset(sf, 0, sizeof(struct stack_frame)); sf->gprs[9] = (unsigned long) sf; - cpu_lowcore->save_area[15] = (unsigned long) sf; + cpu_lowcore->gpregs_save_area[15] = (unsigned long) sf; __ctl_store(cpu_lowcore->cregs_save_area, 0, 15); atomic_inc(&init_mm.context.attach_count); asm volatile( @@ -806,7 +838,7 @@ void __init smp_prepare_boot_cpu(void) S390_lowcore.percpu_offset = __per_cpu_offset[0]; current_set[0] = current; smp_cpu_state[0] = CPU_STATE_CONFIGURED; - smp_cpu_polarization[0] = POLARIZATION_UNKNWN; + cpu_set_polarization(0, POLARIZATION_UNKNOWN); } void __init smp_cpus_done(unsigned int max_cpus) @@ -868,7 +900,8 @@ static ssize_t cpu_configure_store(struct device *dev, rc = sclp_cpu_deconfigure(__cpu_logical_map[cpu]); if (!rc) { smp_cpu_state[cpu] = CPU_STATE_STANDBY; - smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; + cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); + topology_expect_change(); } } break; @@ -877,7 +910,8 @@ static ssize_t cpu_configure_store(struct device *dev, rc = sclp_cpu_configure(__cpu_logical_map[cpu]); if (!rc) { smp_cpu_state[cpu] = CPU_STATE_CONFIGURED; - smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; + cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); + topology_expect_change(); } } break; @@ -892,35 +926,6 @@ out: static DEVICE_ATTR(configure, 0644, cpu_configure_show, cpu_configure_store); #endif /* CONFIG_HOTPLUG_CPU */ -static ssize_t cpu_polarization_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - int cpu = dev->id; - ssize_t count; - - mutex_lock(&smp_cpu_state_mutex); - switch (smp_cpu_polarization[cpu]) { - case POLARIZATION_HRZ: - count = sprintf(buf, "horizontal\n"); - break; - case POLARIZATION_VL: - count = sprintf(buf, "vertical:low\n"); - break; - case POLARIZATION_VM: - count = sprintf(buf, "vertical:medium\n"); - break; - case POLARIZATION_VH: - count = sprintf(buf, "vertical:high\n"); - break; - default: - count = sprintf(buf, "unknown\n"); - break; - } - mutex_unlock(&smp_cpu_state_mutex); - return count; -} -static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL); - static ssize_t show_cpu_address(struct device *dev, struct device_attribute *attr, char *buf) { @@ -928,13 +933,11 @@ static ssize_t show_cpu_address(struct device *dev, } static DEVICE_ATTR(address, 0444, show_cpu_address, NULL); - static struct attribute *cpu_common_attrs[] = { #ifdef CONFIG_HOTPLUG_CPU &dev_attr_configure.attr, #endif &dev_attr_address.attr, - &dev_attr_polarization.attr, NULL, }; @@ -1055,11 +1058,20 @@ static int __devinit smp_add_present_cpu(int cpu) rc = sysfs_create_group(&s->kobj, &cpu_common_attr_group); if (rc) goto out_cpu; - if (!cpu_online(cpu)) - goto out; - rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group); - if (!rc) - return 0; + if (cpu_online(cpu)) { + rc = sysfs_create_group(&s->kobj, &cpu_online_attr_group); + if (rc) + goto out_online; + } + rc = topology_cpu_init(c); + if (rc) + goto out_topology; + return 0; + +out_topology: + if (cpu_online(cpu)) + sysfs_remove_group(&s->kobj, &cpu_online_attr_group); +out_online: sysfs_remove_group(&s->kobj, &cpu_common_attr_group); out_cpu: #ifdef CONFIG_HOTPLUG_CPU @@ -1111,61 +1123,16 @@ static ssize_t __ref rescan_store(struct device *dev, static DEVICE_ATTR(rescan, 0200, NULL, rescan_store); #endif /* CONFIG_HOTPLUG_CPU */ -static ssize_t dispatching_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - ssize_t count; - - mutex_lock(&smp_cpu_state_mutex); - count = sprintf(buf, "%d\n", cpu_management); - mutex_unlock(&smp_cpu_state_mutex); - return count; -} - -static ssize_t dispatching_store(struct device *dev, - struct device_attribute *attr, - const char *buf, - size_t count) -{ - int val, rc; - char delim; - - if (sscanf(buf, "%d %c", &val, &delim) != 1) - return -EINVAL; - if (val != 0 && val != 1) - return -EINVAL; - rc = 0; - get_online_cpus(); - mutex_lock(&smp_cpu_state_mutex); - if (cpu_management == val) - goto out; - rc = topology_set_cpu_management(val); - if (!rc) - cpu_management = val; -out: - mutex_unlock(&smp_cpu_state_mutex); - put_online_cpus(); - return rc ? rc : count; -} -static DEVICE_ATTR(dispatching, 0644, dispatching_show, - dispatching_store); - -static int __init topology_init(void) +static int __init s390_smp_init(void) { - int cpu; - int rc; + int cpu, rc; register_cpu_notifier(&smp_cpu_nb); - #ifdef CONFIG_HOTPLUG_CPU rc = device_create_file(cpu_subsys.dev_root, &dev_attr_rescan); if (rc) return rc; #endif - rc = device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching); - if (rc) - return rc; for_each_present_cpu(cpu) { rc = smp_add_present_cpu(cpu); if (rc) @@ -1173,4 +1140,4 @@ static int __init topology_init(void) } return 0; } -subsys_initcall(topology_init); +subsys_initcall(s390_smp_init); diff --git a/arch/s390/kernel/sys_s390.c b/arch/s390/kernel/sys_s390.c index 476081440df9..78ea1948ff51 100644 --- a/arch/s390/kernel/sys_s390.c +++ b/arch/s390/kernel/sys_s390.c @@ -60,74 +60,22 @@ out: } /* - * sys_ipc() is the de-multiplexer for the SysV IPC calls.. - * - * This is really horribly ugly. + * sys_ipc() is the de-multiplexer for the SysV IPC calls. */ SYSCALL_DEFINE5(s390_ipc, uint, call, int, first, unsigned long, second, unsigned long, third, void __user *, ptr) { - struct ipc_kludge tmp; - int ret; - - switch (call) { - case SEMOP: - return sys_semtimedop(first, (struct sembuf __user *)ptr, - (unsigned)second, NULL); - case SEMTIMEDOP: - return sys_semtimedop(first, (struct sembuf __user *)ptr, - (unsigned)second, - (const struct timespec __user *) third); - case SEMGET: - return sys_semget(first, (int)second, third); - case SEMCTL: { - union semun fourth; - if (!ptr) - return -EINVAL; - if (get_user(fourth.__pad, (void __user * __user *) ptr)) - return -EFAULT; - return sys_semctl(first, (int)second, third, fourth); - } - case MSGSND: - return sys_msgsnd (first, (struct msgbuf __user *) ptr, - (size_t)second, third); - break; - case MSGRCV: - if (!ptr) - return -EINVAL; - if (copy_from_user (&tmp, (struct ipc_kludge __user *) ptr, - sizeof (struct ipc_kludge))) - return -EFAULT; - return sys_msgrcv (first, tmp.msgp, - (size_t)second, tmp.msgtyp, third); - case MSGGET: - return sys_msgget((key_t)first, (int)second); - case MSGCTL: - return sys_msgctl(first, (int)second, - (struct msqid_ds __user *)ptr); - - case SHMAT: { - ulong raddr; - ret = do_shmat(first, (char __user *)ptr, - (int)second, &raddr); - if (ret) - return ret; - return put_user (raddr, (ulong __user *) third); - break; - } - case SHMDT: - return sys_shmdt ((char __user *)ptr); - case SHMGET: - return sys_shmget(first, (size_t)second, third); - case SHMCTL: - return sys_shmctl(first, (int)second, - (struct shmid_ds __user *) ptr); - default: - return -ENOSYS; - - } - - return -EINVAL; + if (call >> 16) + return -EINVAL; + /* The s390 sys_ipc variant has only five parameters instead of six + * like the generic variant. The only difference is the handling of + * the SEMTIMEDOP subcall where on s390 the third parameter is used + * as a pointer to a struct timespec where the generic variant uses + * the fifth parameter. + * Therefore we can call the generic variant by simply passing the + * third parameter also as fifth parameter. + */ + return sys_ipc(call, first, second, third, ptr, third); } #ifdef CONFIG_64BIT diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 6e0e29b29a7b..7370a41948ca 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c @@ -1,22 +1,22 @@ /* - * Copyright IBM Corp. 2007 + * Copyright IBM Corp. 2007,2011 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> */ #define KMSG_COMPONENT "cpu" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt -#include <linux/kernel.h> -#include <linux/mm.h> -#include <linux/init.h> -#include <linux/device.h> +#include <linux/workqueue.h> #include <linux/bootmem.h> +#include <linux/cpuset.h> +#include <linux/device.h> +#include <linux/kernel.h> #include <linux/sched.h> -#include <linux/workqueue.h> +#include <linux/init.h> +#include <linux/delay.h> #include <linux/cpu.h> #include <linux/smp.h> -#include <linux/cpuset.h> -#include <asm/delay.h> +#include <linux/mm.h> #define PTF_HORIZONTAL (0UL) #define PTF_VERTICAL (1UL) @@ -31,7 +31,6 @@ struct mask_info { static int topology_enabled = 1; static void topology_work_fn(struct work_struct *work); static struct sysinfo_15_1_x *tl_info; -static struct timer_list topology_timer; static void set_topology_timer(void); static DECLARE_WORK(topology_work, topology_work_fn); /* topology_lock protects the core linked list */ @@ -41,11 +40,12 @@ static struct mask_info core_info; cpumask_t cpu_core_map[NR_CPUS]; unsigned char cpu_core_id[NR_CPUS]; -#ifdef CONFIG_SCHED_BOOK static struct mask_info book_info; cpumask_t cpu_book_map[NR_CPUS]; unsigned char cpu_book_id[NR_CPUS]; -#endif + +/* smp_cpu_state_mutex must be held when accessing this array */ +int cpu_polarization[NR_CPUS]; static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) { @@ -71,7 +71,7 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu, struct mask_info *book, struct mask_info *core, - int z10) + int one_core_per_cpu) { unsigned int cpu; @@ -85,18 +85,16 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu, for_each_present_cpu(lcpu) { if (cpu_logical_map(lcpu) != rcpu) continue; -#ifdef CONFIG_SCHED_BOOK cpumask_set_cpu(lcpu, &book->mask); cpu_book_id[lcpu] = book->id; -#endif cpumask_set_cpu(lcpu, &core->mask); - if (z10) { + if (one_core_per_cpu) { cpu_core_id[lcpu] = rcpu; core = core->next; } else { cpu_core_id[lcpu] = core->id; } - smp_cpu_polarization[lcpu] = tl_cpu->pp; + cpu_set_polarization(lcpu, tl_cpu->pp); } } return core; @@ -111,13 +109,11 @@ static void clear_masks(void) cpumask_clear(&info->mask); info = info->next; } -#ifdef CONFIG_SCHED_BOOK info = &book_info; while (info) { cpumask_clear(&info->mask); info = info->next; } -#endif } static union topology_entry *next_tle(union topology_entry *tle) @@ -127,66 +123,75 @@ static union topology_entry *next_tle(union topology_entry *tle) return (union topology_entry *)((struct topology_container *)tle + 1); } -static void tl_to_cores(struct sysinfo_15_1_x *info) +static void __tl_to_cores_generic(struct sysinfo_15_1_x *info) { -#ifdef CONFIG_SCHED_BOOK - struct mask_info *book = &book_info; - struct cpuid cpu_id; -#else - struct mask_info *book = NULL; -#endif struct mask_info *core = &core_info; + struct mask_info *book = &book_info; union topology_entry *tle, *end; - int z10 = 0; -#ifdef CONFIG_SCHED_BOOK - get_cpu_id(&cpu_id); - z10 = cpu_id.machine == 0x2097 || cpu_id.machine == 0x2098; -#endif - spin_lock_irq(&topology_lock); - clear_masks(); tle = info->tle; end = (union topology_entry *)((unsigned long)info + info->length); while (tle < end) { -#ifdef CONFIG_SCHED_BOOK - if (z10) { - switch (tle->nl) { - case 1: - book = book->next; - book->id = tle->container.id; - break; - case 0: - core = add_cpus_to_mask(&tle->cpu, book, core, z10); - break; - default: - clear_masks(); - goto out; - } - tle = next_tle(tle); - continue; - } -#endif switch (tle->nl) { -#ifdef CONFIG_SCHED_BOOK case 2: book = book->next; book->id = tle->container.id; break; -#endif case 1: core = core->next; core->id = tle->container.id; break; case 0: - add_cpus_to_mask(&tle->cpu, book, core, z10); + add_cpus_to_mask(&tle->cpu, book, core, 0); break; default: clear_masks(); - goto out; + return; } tle = next_tle(tle); } -out: +} + +static void __tl_to_cores_z10(struct sysinfo_15_1_x *info) +{ + struct mask_info *core = &core_info; + struct mask_info *book = &book_info; + union topology_entry *tle, *end; + + tle = info->tle; + end = (union topology_entry *)((unsigned long)info + info->length); + while (tle < end) { + switch (tle->nl) { + case 1: + book = book->next; + book->id = tle->container.id; + break; + case 0: + core = add_cpus_to_mask(&tle->cpu, book, core, 1); + break; + default: + clear_masks(); + return; + } + tle = next_tle(tle); + } +} + +static void tl_to_cores(struct sysinfo_15_1_x *info) +{ + struct cpuid cpu_id; + + get_cpu_id(&cpu_id); + spin_lock_irq(&topology_lock); + clear_masks(); + switch (cpu_id.machine) { + case 0x2097: + case 0x2098: + __tl_to_cores_z10(info); + break; + default: + __tl_to_cores_generic(info); + } spin_unlock_irq(&topology_lock); } @@ -196,7 +201,7 @@ static void topology_update_polarization_simple(void) mutex_lock(&smp_cpu_state_mutex); for_each_possible_cpu(cpu) - smp_cpu_polarization[cpu] = POLARIZATION_HRZ; + cpu_set_polarization(cpu, POLARIZATION_HRZ); mutex_unlock(&smp_cpu_state_mutex); } @@ -215,8 +220,7 @@ static int ptf(unsigned long fc) int topology_set_cpu_management(int fc) { - int cpu; - int rc; + int cpu, rc; if (!MACHINE_HAS_TOPOLOGY) return -EOPNOTSUPP; @@ -227,7 +231,7 @@ int topology_set_cpu_management(int fc) if (rc) return -EBUSY; for_each_possible_cpu(cpu) - smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; + cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); return rc; } @@ -239,22 +243,18 @@ static void update_cpu_core_map(void) spin_lock_irqsave(&topology_lock, flags); for_each_possible_cpu(cpu) { cpu_core_map[cpu] = cpu_group_map(&core_info, cpu); -#ifdef CONFIG_SCHED_BOOK cpu_book_map[cpu] = cpu_group_map(&book_info, cpu); -#endif } spin_unlock_irqrestore(&topology_lock, flags); } void store_topology(struct sysinfo_15_1_x *info) { -#ifdef CONFIG_SCHED_BOOK int rc; rc = stsi(info, 15, 1, 3); if (rc != -ENOSYS) return; -#endif stsi(info, 15, 1, 2); } @@ -296,12 +296,30 @@ static void topology_timer_fn(unsigned long ignored) set_topology_timer(); } +static struct timer_list topology_timer = + TIMER_DEFERRED_INITIALIZER(topology_timer_fn, 0, 0); + +static atomic_t topology_poll = ATOMIC_INIT(0); + static void set_topology_timer(void) { - topology_timer.function = topology_timer_fn; - topology_timer.data = 0; - topology_timer.expires = jiffies + 60 * HZ; - add_timer(&topology_timer); + if (atomic_add_unless(&topology_poll, -1, 0)) + mod_timer(&topology_timer, jiffies + HZ / 10); + else + mod_timer(&topology_timer, jiffies + HZ * 60); +} + +void topology_expect_change(void) +{ + if (!MACHINE_HAS_TOPOLOGY) + return; + /* This is racy, but it doesn't matter since it is just a heuristic. + * Worst case is that we poll in a higher frequency for a bit longer. + */ + if (atomic_read(&topology_poll) > 60) + return; + atomic_add(60, &topology_poll); + set_topology_timer(); } static int __init early_parse_topology(char *p) @@ -313,23 +331,6 @@ static int __init early_parse_topology(char *p) } early_param("topology", early_parse_topology); -static int __init init_topology_update(void) -{ - int rc; - - rc = 0; - if (!MACHINE_HAS_TOPOLOGY) { - topology_update_polarization_simple(); - goto out; - } - init_timer_deferrable(&topology_timer); - set_topology_timer(); -out: - update_cpu_core_map(); - return rc; -} -__initcall(init_topology_update); - static void __init alloc_masks(struct sysinfo_15_1_x *info, struct mask_info *mask, int offset) { @@ -357,10 +358,108 @@ void __init s390_init_cpu_topology(void) store_topology(info); pr_info("The CPU configuration topology of the machine is:"); for (i = 0; i < TOPOLOGY_NR_MAG; i++) - printk(" %d", info->mag[i]); - printk(" / %d\n", info->mnest); + printk(KERN_CONT " %d", info->mag[i]); + printk(KERN_CONT " / %d\n", info->mnest); alloc_masks(info, &core_info, 1); -#ifdef CONFIG_SCHED_BOOK alloc_masks(info, &book_info, 2); -#endif } + +static int cpu_management; + +static ssize_t dispatching_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + ssize_t count; + + mutex_lock(&smp_cpu_state_mutex); + count = sprintf(buf, "%d\n", cpu_management); + mutex_unlock(&smp_cpu_state_mutex); + return count; +} + +static ssize_t dispatching_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + int val, rc; + char delim; + + if (sscanf(buf, "%d %c", &val, &delim) != 1) + return -EINVAL; + if (val != 0 && val != 1) + return -EINVAL; + rc = 0; + get_online_cpus(); + mutex_lock(&smp_cpu_state_mutex); + if (cpu_management == val) + goto out; + rc = topology_set_cpu_management(val); + if (rc) + goto out; + cpu_management = val; + topology_expect_change(); +out: + mutex_unlock(&smp_cpu_state_mutex); + put_online_cpus(); + return rc ? rc : count; +} +static DEVICE_ATTR(dispatching, 0644, dispatching_show, + dispatching_store); + +static ssize_t cpu_polarization_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + int cpu = dev->id; + ssize_t count; + + mutex_lock(&smp_cpu_state_mutex); + switch (cpu_read_polarization(cpu)) { + case POLARIZATION_HRZ: + count = sprintf(buf, "horizontal\n"); + break; + case POLARIZATION_VL: + count = sprintf(buf, "vertical:low\n"); + break; + case POLARIZATION_VM: + count = sprintf(buf, "vertical:medium\n"); + break; + case POLARIZATION_VH: + count = sprintf(buf, "vertical:high\n"); + break; + default: + count = sprintf(buf, "unknown\n"); + break; + } + mutex_unlock(&smp_cpu_state_mutex); + return count; +} +static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL); + +static struct attribute *topology_cpu_attrs[] = { + &dev_attr_polarization.attr, + NULL, +}; + +static struct attribute_group topology_cpu_attr_group = { + .attrs = topology_cpu_attrs, +}; + +int topology_cpu_init(struct cpu *cpu) +{ + return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group); +} + +static int __init topology_init(void) +{ + if (!MACHINE_HAS_TOPOLOGY) { + topology_update_polarization_simple(); + goto out; + } + set_topology_timer(); +out: + update_cpu_core_map(); + return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching); +} +device_initcall(topology_init); diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index a9807dd86276..5ce3750b181f 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -43,9 +43,9 @@ #include <asm/debug.h> #include "entry.h" -void (*pgm_check_table[128])(struct pt_regs *, long, unsigned long); +void (*pgm_check_table[128])(struct pt_regs *regs); -int show_unhandled_signals; +int show_unhandled_signals = 1; #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) @@ -234,7 +234,7 @@ void show_regs(struct pt_regs *regs) static DEFINE_SPINLOCK(die_lock); -void die(const char * str, struct pt_regs * regs, long err) +void die(struct pt_regs *regs, const char *str) { static int die_counter; @@ -243,7 +243,7 @@ void die(const char * str, struct pt_regs * regs, long err) console_verbose(); spin_lock_irq(&die_lock); bust_spinlocks(1); - printk("%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter); + printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter); #ifdef CONFIG_PREEMPT printk("PREEMPT "); #endif @@ -254,7 +254,7 @@ void die(const char * str, struct pt_regs * regs, long err) printk("DEBUG_PAGEALLOC"); #endif printk("\n"); - notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV); + notify_die(DIE_OOPS, str, regs, 0, regs->int_code & 0xffff, SIGSEGV); show_regs(regs); bust_spinlocks(0); add_taint(TAINT_DIE); @@ -267,8 +267,7 @@ void die(const char * str, struct pt_regs * regs, long err) do_exit(SIGSEGV); } -static void inline report_user_fault(struct pt_regs *regs, long int_code, - int signr) +static inline void report_user_fault(struct pt_regs *regs, int signr) { if ((task_pid_nr(current) > 1) && !show_unhandled_signals) return; @@ -276,7 +275,7 @@ static void inline report_user_fault(struct pt_regs *regs, long int_code, return; if (!printk_ratelimit()) return; - printk("User process fault: interruption code 0x%lX ", int_code); + printk("User process fault: interruption code 0x%X ", regs->int_code); print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN); printk("\n"); show_regs(regs); @@ -287,19 +286,28 @@ int is_valid_bugaddr(unsigned long addr) return 1; } -static inline void __kprobes do_trap(long pgm_int_code, int signr, char *str, - struct pt_regs *regs, siginfo_t *info) +static inline void __user *get_psw_address(struct pt_regs *regs) { - if (notify_die(DIE_TRAP, str, regs, pgm_int_code, - pgm_int_code, signr) == NOTIFY_STOP) + return (void __user *) + ((regs->psw.addr - (regs->int_code >> 16)) & PSW_ADDR_INSN); +} + +static void __kprobes do_trap(struct pt_regs *regs, + int si_signo, int si_code, char *str) +{ + siginfo_t info; + + if (notify_die(DIE_TRAP, str, regs, 0, + regs->int_code, si_signo) == NOTIFY_STOP) return; if (regs->psw.mask & PSW_MASK_PSTATE) { - struct task_struct *tsk = current; - - tsk->thread.trap_no = pgm_int_code & 0xffff; - force_sig_info(signr, info, tsk); - report_user_fault(regs, pgm_int_code, signr); + info.si_signo = si_signo; + info.si_errno = 0; + info.si_code = si_code; + info.si_addr = get_psw_address(regs); + force_sig_info(si_signo, &info, current); + report_user_fault(regs, si_signo); } else { const struct exception_table_entry *fixup; fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); @@ -311,18 +319,11 @@ static inline void __kprobes do_trap(long pgm_int_code, int signr, char *str, btt = report_bug(regs->psw.addr & PSW_ADDR_INSN, regs); if (btt == BUG_TRAP_TYPE_WARN) return; - die(str, regs, pgm_int_code); + die(regs, str); } } } -static inline void __user *get_psw_address(struct pt_regs *regs, - long pgm_int_code) -{ - return (void __user *) - ((regs->psw.addr - (pgm_int_code >> 16)) & PSW_ADDR_INSN); -} - void __kprobes do_per_trap(struct pt_regs *regs) { siginfo_t info; @@ -339,26 +340,19 @@ void __kprobes do_per_trap(struct pt_regs *regs) force_sig_info(SIGTRAP, &info, current); } -static void default_trap_handler(struct pt_regs *regs, long pgm_int_code, - unsigned long trans_exc_code) +static void default_trap_handler(struct pt_regs *regs) { if (regs->psw.mask & PSW_MASK_PSTATE) { - report_user_fault(regs, pgm_int_code, SIGSEGV); + report_user_fault(regs, SIGSEGV); do_exit(SIGSEGV); } else - die("Unknown program exception", regs, pgm_int_code); + die(regs, "Unknown program exception"); } #define DO_ERROR_INFO(name, signr, sicode, str) \ -static void name(struct pt_regs *regs, long pgm_int_code, \ - unsigned long trans_exc_code) \ +static void name(struct pt_regs *regs) \ { \ - siginfo_t info; \ - info.si_signo = signr; \ - info.si_errno = 0; \ - info.si_code = sicode; \ - info.si_addr = get_psw_address(regs, pgm_int_code); \ - do_trap(pgm_int_code, signr, str, regs, &info); \ + do_trap(regs, signr, sicode, str); \ } DO_ERROR_INFO(addressing_exception, SIGILL, ILL_ILLADR, @@ -388,42 +382,34 @@ DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN, DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN, "translation exception") -static inline void do_fp_trap(struct pt_regs *regs, void __user *location, - int fpc, long pgm_int_code) +static inline void do_fp_trap(struct pt_regs *regs, int fpc) { - siginfo_t si; - - si.si_signo = SIGFPE; - si.si_errno = 0; - si.si_addr = location; - si.si_code = 0; + int si_code = 0; /* FPC[2] is Data Exception Code */ if ((fpc & 0x00000300) == 0) { /* bits 6 and 7 of DXC are 0 iff IEEE exception */ if (fpc & 0x8000) /* invalid fp operation */ - si.si_code = FPE_FLTINV; + si_code = FPE_FLTINV; else if (fpc & 0x4000) /* div by 0 */ - si.si_code = FPE_FLTDIV; + si_code = FPE_FLTDIV; else if (fpc & 0x2000) /* overflow */ - si.si_code = FPE_FLTOVF; + si_code = FPE_FLTOVF; else if (fpc & 0x1000) /* underflow */ - si.si_code = FPE_FLTUND; + si_code = FPE_FLTUND; else if (fpc & 0x0800) /* inexact */ - si.si_code = FPE_FLTRES; + si_code = FPE_FLTRES; } - do_trap(pgm_int_code, SIGFPE, - "floating point exception", regs, &si); + do_trap(regs, SIGFPE, si_code, "floating point exception"); } -static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code, - unsigned long trans_exc_code) +static void __kprobes illegal_op(struct pt_regs *regs) { siginfo_t info; __u8 opcode[6]; __u16 __user *location; int signal = 0; - location = get_psw_address(regs, pgm_int_code); + location = get_psw_address(regs); if (regs->psw.mask & PSW_MASK_PSTATE) { if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) @@ -467,44 +453,31 @@ static void __kprobes illegal_op(struct pt_regs *regs, long pgm_int_code, * If we get an illegal op in kernel mode, send it through the * kprobes notifier. If kprobes doesn't pick it up, SIGILL */ - if (notify_die(DIE_BPT, "bpt", regs, pgm_int_code, + if (notify_die(DIE_BPT, "bpt", regs, 0, 3, SIGTRAP) != NOTIFY_STOP) signal = SIGILL; } #ifdef CONFIG_MATHEMU if (signal == SIGFPE) - do_fp_trap(regs, location, - current->thread.fp_regs.fpc, pgm_int_code); - else if (signal == SIGSEGV) { - info.si_signo = signal; - info.si_errno = 0; - info.si_code = SEGV_MAPERR; - info.si_addr = (void __user *) location; - do_trap(pgm_int_code, signal, - "user address fault", regs, &info); - } else + do_fp_trap(regs, current->thread.fp_regs.fpc); + else if (signal == SIGSEGV) + do_trap(regs, signal, SEGV_MAPERR, "user address fault"); + else #endif - if (signal) { - info.si_signo = signal; - info.si_errno = 0; - info.si_code = ILL_ILLOPC; - info.si_addr = (void __user *) location; - do_trap(pgm_int_code, signal, - "illegal operation", regs, &info); - } + if (signal) + do_trap(regs, signal, ILL_ILLOPC, "illegal operation"); } #ifdef CONFIG_MATHEMU -void specification_exception(struct pt_regs *regs, long pgm_int_code, - unsigned long trans_exc_code) +void specification_exception(struct pt_regs *regs) { __u8 opcode[6]; __u16 __user *location = NULL; int signal = 0; - location = (__u16 __user *) get_psw_address(regs, pgm_int_code); + location = (__u16 __user *) get_psw_address(regs); if (regs->psw.mask & PSW_MASK_PSTATE) { get_user(*((__u16 *) opcode), location); @@ -539,30 +512,21 @@ void specification_exception(struct pt_regs *regs, long pgm_int_code, signal = SIGILL; if (signal == SIGFPE) - do_fp_trap(regs, location, - current->thread.fp_regs.fpc, pgm_int_code); - else if (signal) { - siginfo_t info; - info.si_signo = signal; - info.si_errno = 0; - info.si_code = ILL_ILLOPN; - info.si_addr = location; - do_trap(pgm_int_code, signal, - "specification exception", regs, &info); - } + do_fp_trap(regs, current->thread.fp_regs.fpc); + else if (signal) + do_trap(regs, signal, ILL_ILLOPN, "specification exception"); } #else DO_ERROR_INFO(specification_exception, SIGILL, ILL_ILLOPN, "specification exception"); #endif -static void data_exception(struct pt_regs *regs, long pgm_int_code, - unsigned long trans_exc_code) +static void data_exception(struct pt_regs *regs) { __u16 __user *location; int signal = 0; - location = get_psw_address(regs, pgm_int_code); + location = get_psw_address(regs); if (MACHINE_HAS_IEEE) asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); @@ -627,32 +591,18 @@ static void data_exception(struct pt_regs *regs, long pgm_int_code, else signal = SIGILL; if (signal == SIGFPE) - do_fp_trap(regs, location, - current->thread.fp_regs.fpc, pgm_int_code); - else if (signal) { - siginfo_t info; - info.si_signo = signal; - info.si_errno = 0; - info.si_code = ILL_ILLOPN; - info.si_addr = location; - do_trap(pgm_int_code, signal, "data exception", regs, &info); - } + do_fp_trap(regs, current->thread.fp_regs.fpc); + else if (signal) + do_trap(regs, signal, ILL_ILLOPN, "data exception"); } -static void space_switch_exception(struct pt_regs *regs, long pgm_int_code, - unsigned long trans_exc_code) +static void space_switch_exception(struct pt_regs *regs) { - siginfo_t info; - /* Set user psw back to home space mode. */ if (regs->psw.mask & PSW_MASK_PSTATE) regs->psw.mask |= PSW_ASC_HOME; /* Send SIGILL. */ - info.si_signo = SIGILL; - info.si_errno = 0; - info.si_code = ILL_PRVOPC; - info.si_addr = get_psw_address(regs, pgm_int_code); - do_trap(pgm_int_code, SIGILL, "space switch event", regs, &info); + do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event"); } void __kprobes kernel_stack_overflow(struct pt_regs * regs) diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index a9a301866b3c..354dd39073ef 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -125,8 +125,7 @@ static inline int user_space_fault(unsigned long trans_exc_code) return trans_exc_code != 3; } -static inline void report_user_fault(struct pt_regs *regs, long int_code, - int signr, unsigned long address) +static inline void report_user_fault(struct pt_regs *regs, long signr) { if ((task_pid_nr(current) > 1) && !show_unhandled_signals) return; @@ -134,10 +133,12 @@ static inline void report_user_fault(struct pt_regs *regs, long int_code, return; if (!printk_ratelimit()) return; - printk("User process fault: interruption code 0x%lX ", int_code); + printk(KERN_ALERT "User process fault: interruption code 0x%X ", + regs->int_code); print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN); - printk("\n"); - printk("failing address: %lX\n", address); + printk(KERN_CONT "\n"); + printk(KERN_ALERT "failing address: %lX\n", + regs->int_parm_long & __FAIL_ADDR_MASK); show_regs(regs); } @@ -145,24 +146,18 @@ static inline void report_user_fault(struct pt_regs *regs, long int_code, * Send SIGSEGV to task. This is an external routine * to keep the stack usage of do_page_fault small. */ -static noinline void do_sigsegv(struct pt_regs *regs, long int_code, - int si_code, unsigned long trans_exc_code) +static noinline void do_sigsegv(struct pt_regs *regs, int si_code) { struct siginfo si; - unsigned long address; - address = trans_exc_code & __FAIL_ADDR_MASK; - current->thread.prot_addr = address; - current->thread.trap_no = int_code; - report_user_fault(regs, int_code, SIGSEGV, address); + report_user_fault(regs, SIGSEGV); si.si_signo = SIGSEGV; si.si_code = si_code; - si.si_addr = (void __user *) address; + si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK); force_sig_info(SIGSEGV, &si, current); } -static noinline void do_no_context(struct pt_regs *regs, long int_code, - unsigned long trans_exc_code) +static noinline void do_no_context(struct pt_regs *regs) { const struct exception_table_entry *fixup; unsigned long address; @@ -178,55 +173,48 @@ static noinline void do_no_context(struct pt_regs *regs, long int_code, * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ - address = trans_exc_code & __FAIL_ADDR_MASK; - if (!user_space_fault(trans_exc_code)) + address = regs->int_parm_long & __FAIL_ADDR_MASK; + if (!user_space_fault(regs->int_parm_long)) printk(KERN_ALERT "Unable to handle kernel pointer dereference" " at virtual kernel address %p\n", (void *)address); else printk(KERN_ALERT "Unable to handle kernel paging request" " at virtual user address %p\n", (void *)address); - die("Oops", regs, int_code); + die(regs, "Oops"); do_exit(SIGKILL); } -static noinline void do_low_address(struct pt_regs *regs, long int_code, - unsigned long trans_exc_code) +static noinline void do_low_address(struct pt_regs *regs) { /* Low-address protection hit in kernel mode means NULL pointer write access in kernel mode. */ if (regs->psw.mask & PSW_MASK_PSTATE) { /* Low-address protection hit in user mode 'cannot happen'. */ - die ("Low-address protection", regs, int_code); + die (regs, "Low-address protection"); do_exit(SIGKILL); } - do_no_context(regs, int_code, trans_exc_code); + do_no_context(regs); } -static noinline void do_sigbus(struct pt_regs *regs, long int_code, - unsigned long trans_exc_code) +static noinline void do_sigbus(struct pt_regs *regs) { struct task_struct *tsk = current; - unsigned long address; struct siginfo si; /* * Send a sigbus, regardless of whether we were in kernel * or user mode. */ - address = trans_exc_code & __FAIL_ADDR_MASK; - tsk->thread.prot_addr = address; - tsk->thread.trap_no = int_code; si.si_signo = SIGBUS; si.si_errno = 0; si.si_code = BUS_ADRERR; - si.si_addr = (void __user *) address; + si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK); force_sig_info(SIGBUS, &si, tsk); } -static noinline void do_fault_error(struct pt_regs *regs, long int_code, - unsigned long trans_exc_code, int fault) +static noinline void do_fault_error(struct pt_regs *regs, int fault) { int si_code; @@ -238,24 +226,24 @@ static noinline void do_fault_error(struct pt_regs *regs, long int_code, /* User mode accesses just cause a SIGSEGV */ si_code = (fault == VM_FAULT_BADMAP) ? SEGV_MAPERR : SEGV_ACCERR; - do_sigsegv(regs, int_code, si_code, trans_exc_code); + do_sigsegv(regs, si_code); return; } case VM_FAULT_BADCONTEXT: - do_no_context(regs, int_code, trans_exc_code); + do_no_context(regs); break; default: /* fault & VM_FAULT_ERROR */ if (fault & VM_FAULT_OOM) { if (!(regs->psw.mask & PSW_MASK_PSTATE)) - do_no_context(regs, int_code, trans_exc_code); + do_no_context(regs); else pagefault_out_of_memory(); } else if (fault & VM_FAULT_SIGBUS) { /* Kernel mode? Handle exceptions or die */ if (!(regs->psw.mask & PSW_MASK_PSTATE)) - do_no_context(regs, int_code, trans_exc_code); + do_no_context(regs); else - do_sigbus(regs, int_code, trans_exc_code); + do_sigbus(regs); } else BUG(); break; @@ -273,12 +261,12 @@ static noinline void do_fault_error(struct pt_regs *regs, long int_code, * 11 Page translation -> Not present (nullification) * 3b Region third trans. -> Not present (nullification) */ -static inline int do_exception(struct pt_regs *regs, int access, - unsigned long trans_exc_code) +static inline int do_exception(struct pt_regs *regs, int access) { struct task_struct *tsk; struct mm_struct *mm; struct vm_area_struct *vma; + unsigned long trans_exc_code; unsigned long address; unsigned int flags; int fault; @@ -288,6 +276,7 @@ static inline int do_exception(struct pt_regs *regs, int access, tsk = current; mm = tsk->mm; + trans_exc_code = regs->int_parm_long; /* * Verify that the fault happened in user space, that @@ -387,45 +376,46 @@ out: return fault; } -void __kprobes do_protection_exception(struct pt_regs *regs, long pgm_int_code, - unsigned long trans_exc_code) +void __kprobes do_protection_exception(struct pt_regs *regs) { + unsigned long trans_exc_code; int fault; + trans_exc_code = regs->int_parm_long; /* Protection exception is suppressing, decrement psw address. */ - regs->psw.addr = __rewind_psw(regs->psw, pgm_int_code >> 16); + regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16); /* * Check for low-address protection. This needs to be treated * as a special case because the translation exception code * field is not guaranteed to contain valid data in this case. */ if (unlikely(!(trans_exc_code & 4))) { - do_low_address(regs, pgm_int_code, trans_exc_code); + do_low_address(regs); return; } - fault = do_exception(regs, VM_WRITE, trans_exc_code); + fault = do_exception(regs, VM_WRITE); if (unlikely(fault)) - do_fault_error(regs, 4, trans_exc_code, fault); + do_fault_error(regs, fault); } -void __kprobes do_dat_exception(struct pt_regs *regs, long pgm_int_code, - unsigned long trans_exc_code) +void __kprobes do_dat_exception(struct pt_regs *regs) { int access, fault; access = VM_READ | VM_EXEC | VM_WRITE; - fault = do_exception(regs, access, trans_exc_code); + fault = do_exception(regs, access); if (unlikely(fault)) - do_fault_error(regs, pgm_int_code & 255, trans_exc_code, fault); + do_fault_error(regs, fault); } #ifdef CONFIG_64BIT -void __kprobes do_asce_exception(struct pt_regs *regs, long pgm_int_code, - unsigned long trans_exc_code) +void __kprobes do_asce_exception(struct pt_regs *regs) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; + unsigned long trans_exc_code; + trans_exc_code = regs->int_parm_long; if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) goto no_context; @@ -440,12 +430,12 @@ void __kprobes do_asce_exception(struct pt_regs *regs, long pgm_int_code, /* User mode accesses just cause a SIGSEGV */ if (regs->psw.mask & PSW_MASK_PSTATE) { - do_sigsegv(regs, pgm_int_code, SEGV_MAPERR, trans_exc_code); + do_sigsegv(regs, SEGV_MAPERR); return; } no_context: - do_no_context(regs, pgm_int_code, trans_exc_code); + do_no_context(regs); } #endif @@ -459,14 +449,15 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write) regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT; regs.psw.addr = (unsigned long) __builtin_return_address(0); regs.psw.addr |= PSW_ADDR_AMODE; - uaddr &= PAGE_MASK; + regs.int_code = pgm_int_code; + regs.int_parm_long = (uaddr & PAGE_MASK) | 2; access = write ? VM_WRITE : VM_READ; - fault = do_exception(®s, access, uaddr | 2); + fault = do_exception(®s, access); if (unlikely(fault)) { if (fault & VM_FAULT_OOM) return -EFAULT; else if (fault & VM_FAULT_SIGBUS) - do_sigbus(®s, pgm_int_code, uaddr); + do_sigbus(®s); } return fault ? -EFAULT : 0; } @@ -509,7 +500,7 @@ int pfault_init(void) .reserved = __PF_RES_FIELD }; int rc; - if (!MACHINE_IS_VM || pfault_disable) + if (pfault_disable) return -1; asm volatile( " diag %1,%0,0x258\n" @@ -530,7 +521,7 @@ void pfault_fini(void) .refversn = 2, }; - if (!MACHINE_IS_VM || pfault_disable) + if (pfault_disable) return; asm volatile( " diag %0,0,0x258\n" @@ -643,8 +634,6 @@ static int __init pfault_irq_init(void) { int rc; - if (!MACHINE_IS_VM) - return 0; rc = register_external_interrupt(0x2603, pfault_interrupt); if (rc) goto out_extint; diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index d4b9fb4d0042..5d633019d8f3 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -93,18 +93,22 @@ static unsigned long setup_zero_pages(void) void __init paging_init(void) { unsigned long max_zone_pfns[MAX_NR_ZONES]; - unsigned long pgd_type; + unsigned long pgd_type, asce_bits; init_mm.pgd = swapper_pg_dir; - S390_lowcore.kernel_asce = __pa(init_mm.pgd) & PAGE_MASK; #ifdef CONFIG_64BIT - /* A three level page table (4TB) is enough for the kernel space. */ - S390_lowcore.kernel_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; - pgd_type = _REGION3_ENTRY_EMPTY; + if (VMALLOC_END > (1UL << 42)) { + asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH; + pgd_type = _REGION2_ENTRY_EMPTY; + } else { + asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH; + pgd_type = _REGION3_ENTRY_EMPTY; + } #else - S390_lowcore.kernel_asce |= _ASCE_TABLE_LENGTH; + asce_bits = _ASCE_TABLE_LENGTH; pgd_type = _SEGMENT_ENTRY_EMPTY; #endif + S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits; clear_table((unsigned long *) init_mm.pgd, pgd_type, sizeof(unsigned long)*2048); vmem_map_init(); diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 301c84d3b542..9a4d02f64f16 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -33,17 +33,6 @@ #define FRAG_MASK 0x03 #endif -unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE; -EXPORT_SYMBOL(VMALLOC_START); - -static int __init parse_vmalloc(char *arg) -{ - if (!arg) - return -EINVAL; - VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK; - return 0; -} -early_param("vmalloc", parse_vmalloc); unsigned long *crst_table_alloc(struct mm_struct *mm) { @@ -267,7 +256,10 @@ static int gmap_alloc_table(struct gmap *gmap, struct page *page; unsigned long *new; + /* since we dont free the gmap table until gmap_free we can unlock */ + spin_unlock(&gmap->mm->page_table_lock); page = alloc_pages(GFP_KERNEL, ALLOC_ORDER); + spin_lock(&gmap->mm->page_table_lock); if (!page) return -ENOMEM; new = (unsigned long *) page_to_phys(page); |