Creation of Cybook 2416 (actually Gen4) repository
This commit is contained in:
37
arch/s390/kernel/Makefile
Normal file
37
arch/s390/kernel/Makefile
Normal file
@@ -0,0 +1,37 @@
|
||||
#
|
||||
# Makefile for the linux kernel.
|
||||
#
|
||||
|
||||
EXTRA_AFLAGS := -traditional
|
||||
|
||||
obj-y := bitmap.o traps.o time.o process.o base.o early.o \
|
||||
setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
|
||||
semaphore.o s390_ext.o debug.o irq.o ipl.o
|
||||
|
||||
obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
|
||||
obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
|
||||
|
||||
extra-y += head.o init_task.o vmlinux.lds
|
||||
|
||||
obj-$(CONFIG_MODULES) += s390_ksyms.o module.o
|
||||
obj-$(CONFIG_SMP) += smp.o
|
||||
|
||||
obj-$(CONFIG_AUDIT) += audit.o
|
||||
compat-obj-$(CONFIG_AUDIT) += compat_audit.o
|
||||
obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \
|
||||
compat_wrapper.o compat_exec_domain.o \
|
||||
binfmt_elf32.o $(compat-obj-y)
|
||||
|
||||
obj-$(CONFIG_VIRT_TIMER) += vtime.o
|
||||
obj-$(CONFIG_STACKTRACE) += stacktrace.o
|
||||
obj-$(CONFIG_KPROBES) += kprobes.o
|
||||
|
||||
# Kexec part
|
||||
S390_KEXEC_OBJS := machine_kexec.o crash.o
|
||||
S390_KEXEC_OBJS += $(if $(CONFIG_64BIT),relocate_kernel64.o,relocate_kernel.o)
|
||||
obj-$(CONFIG_KEXEC) += $(S390_KEXEC_OBJS)
|
||||
|
||||
#
|
||||
# This is just to get the dependencies...
|
||||
#
|
||||
binfmt_elf32.o: $(TOPDIR)/fs/binfmt_elf.c
|
||||
48
arch/s390/kernel/asm-offsets.c
Normal file
48
arch/s390/kernel/asm-offsets.c
Normal file
@@ -0,0 +1,48 @@
|
||||
/*
|
||||
* Generate definitions needed by assembly language modules.
|
||||
* This code generates raw asm output which is post-processed to extract
|
||||
* and format the required data.
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
|
||||
/* Use marker if you need to separate the values later */
|
||||
|
||||
#define DEFINE(sym, val, marker) \
|
||||
asm volatile("\n->" #sym " %0 " #val " " #marker : : "i" (val))
|
||||
|
||||
#define BLANK() asm volatile("\n->" : : )
|
||||
|
||||
int main(void)
|
||||
{
|
||||
DEFINE(__THREAD_info, offsetof(struct task_struct, thread_info),);
|
||||
DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp),);
|
||||
DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info),);
|
||||
DEFINE(__THREAD_mm_segment,
|
||||
offsetof(struct task_struct, thread.mm_segment),);
|
||||
BLANK();
|
||||
DEFINE(__TASK_pid, offsetof(struct task_struct, pid),);
|
||||
BLANK();
|
||||
DEFINE(__PER_atmid, offsetof(per_struct, lowcore.words.perc_atmid),);
|
||||
DEFINE(__PER_address, offsetof(per_struct, lowcore.words.address),);
|
||||
DEFINE(__PER_access_id, offsetof(per_struct, lowcore.words.access_id),);
|
||||
BLANK();
|
||||
DEFINE(__TI_task, offsetof(struct thread_info, task),);
|
||||
DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain),);
|
||||
DEFINE(__TI_flags, offsetof(struct thread_info, flags),);
|
||||
DEFINE(__TI_cpu, offsetof(struct thread_info, cpu),);
|
||||
DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count),);
|
||||
BLANK();
|
||||
DEFINE(__PT_ARGS, offsetof(struct pt_regs, args),);
|
||||
DEFINE(__PT_PSW, offsetof(struct pt_regs, psw),);
|
||||
DEFINE(__PT_GPRS, offsetof(struct pt_regs, gprs),);
|
||||
DEFINE(__PT_ORIG_GPR2, offsetof(struct pt_regs, orig_gpr2),);
|
||||
DEFINE(__PT_ILC, offsetof(struct pt_regs, ilc),);
|
||||
DEFINE(__PT_TRAP, offsetof(struct pt_regs, trap),);
|
||||
DEFINE(__PT_SIZE, sizeof(struct pt_regs),);
|
||||
BLANK();
|
||||
DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain),);
|
||||
DEFINE(__SF_GPRS, offsetof(struct stack_frame, gprs),);
|
||||
DEFINE(__SF_EMPTY, offsetof(struct stack_frame, empty1),);
|
||||
return 0;
|
||||
}
|
||||
66
arch/s390/kernel/audit.c
Normal file
66
arch/s390/kernel/audit.c
Normal file
@@ -0,0 +1,66 @@
|
||||
#include <linux/init.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/audit.h>
|
||||
#include <asm/unistd.h>
|
||||
|
||||
static unsigned dir_class[] = {
|
||||
#include <asm-generic/audit_dir_write.h>
|
||||
~0U
|
||||
};
|
||||
|
||||
static unsigned read_class[] = {
|
||||
#include <asm-generic/audit_read.h>
|
||||
~0U
|
||||
};
|
||||
|
||||
static unsigned write_class[] = {
|
||||
#include <asm-generic/audit_write.h>
|
||||
~0U
|
||||
};
|
||||
|
||||
static unsigned chattr_class[] = {
|
||||
#include <asm-generic/audit_change_attr.h>
|
||||
~0U
|
||||
};
|
||||
|
||||
int audit_classify_syscall(int abi, unsigned syscall)
|
||||
{
|
||||
#ifdef CONFIG_COMPAT
|
||||
extern int s390_classify_syscall(unsigned);
|
||||
if (abi == AUDIT_ARCH_S390)
|
||||
return s390_classify_syscall(syscall);
|
||||
#endif
|
||||
switch(syscall) {
|
||||
case __NR_open:
|
||||
return 2;
|
||||
case __NR_openat:
|
||||
return 3;
|
||||
case __NR_socketcall:
|
||||
return 4;
|
||||
case __NR_execve:
|
||||
return 5;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static int __init audit_classes_init(void)
|
||||
{
|
||||
#ifdef CONFIG_COMPAT
|
||||
extern __u32 s390_dir_class[];
|
||||
extern __u32 s390_write_class[];
|
||||
extern __u32 s390_read_class[];
|
||||
extern __u32 s390_chattr_class[];
|
||||
audit_register_class(AUDIT_CLASS_WRITE_32, s390_write_class);
|
||||
audit_register_class(AUDIT_CLASS_READ_32, s390_read_class);
|
||||
audit_register_class(AUDIT_CLASS_DIR_WRITE_32, s390_dir_class);
|
||||
audit_register_class(AUDIT_CLASS_CHATTR_32, s390_chattr_class);
|
||||
#endif
|
||||
audit_register_class(AUDIT_CLASS_WRITE, write_class);
|
||||
audit_register_class(AUDIT_CLASS_READ, read_class);
|
||||
audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
|
||||
audit_register_class(AUDIT_CLASS_CHATTR, chattr_class);
|
||||
return 0;
|
||||
}
|
||||
|
||||
__initcall(audit_classes_init);
|
||||
150
arch/s390/kernel/base.S
Normal file
150
arch/s390/kernel/base.S
Normal file
@@ -0,0 +1,150 @@
|
||||
/*
|
||||
* arch/s390/kernel/base.S
|
||||
*
|
||||
* Copyright IBM Corp. 2006,2007
|
||||
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
|
||||
* Michael Holzheu <holzheu@de.ibm.com>
|
||||
*/
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/lowcore.h>
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
|
||||
.globl s390_base_mcck_handler
|
||||
s390_base_mcck_handler:
|
||||
basr %r13,0
|
||||
0: lg %r15,__LC_PANIC_STACK # load panic stack
|
||||
aghi %r15,-STACK_FRAME_OVERHEAD
|
||||
larl %r1,s390_base_mcck_handler_fn
|
||||
lg %r1,0(%r1)
|
||||
ltgr %r1,%r1
|
||||
jz 1f
|
||||
basr %r14,%r1
|
||||
1: la %r1,4095
|
||||
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)
|
||||
lpswe __LC_MCK_OLD_PSW
|
||||
|
||||
.section .bss
|
||||
.globl s390_base_mcck_handler_fn
|
||||
s390_base_mcck_handler_fn:
|
||||
.quad 0
|
||||
.previous
|
||||
|
||||
.globl s390_base_ext_handler
|
||||
s390_base_ext_handler:
|
||||
stmg %r0,%r15,__LC_SAVE_AREA
|
||||
basr %r13,0
|
||||
0: aghi %r15,-STACK_FRAME_OVERHEAD
|
||||
larl %r1,s390_base_ext_handler_fn
|
||||
lg %r1,0(%r1)
|
||||
ltgr %r1,%r1
|
||||
jz 1f
|
||||
basr %r14,%r1
|
||||
1: lmg %r0,%r15,__LC_SAVE_AREA
|
||||
ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
|
||||
lpswe __LC_EXT_OLD_PSW
|
||||
|
||||
.section .bss
|
||||
.globl s390_base_ext_handler_fn
|
||||
s390_base_ext_handler_fn:
|
||||
.quad 0
|
||||
.previous
|
||||
|
||||
.globl s390_base_pgm_handler
|
||||
s390_base_pgm_handler:
|
||||
stmg %r0,%r15,__LC_SAVE_AREA
|
||||
basr %r13,0
|
||||
0: aghi %r15,-STACK_FRAME_OVERHEAD
|
||||
larl %r1,s390_base_pgm_handler_fn
|
||||
lg %r1,0(%r1)
|
||||
ltgr %r1,%r1
|
||||
jz 1f
|
||||
basr %r14,%r1
|
||||
lmg %r0,%r15,__LC_SAVE_AREA
|
||||
lpswe __LC_PGM_OLD_PSW
|
||||
1: lpswe disabled_wait_psw-0b(%r13)
|
||||
|
||||
.align 8
|
||||
disabled_wait_psw:
|
||||
.quad 0x0002000180000000,0x0000000000000000 + s390_base_pgm_handler
|
||||
|
||||
.section .bss
|
||||
.globl s390_base_pgm_handler_fn
|
||||
s390_base_pgm_handler_fn:
|
||||
.quad 0
|
||||
.previous
|
||||
|
||||
#else /* CONFIG_64BIT */
|
||||
|
||||
.globl s390_base_mcck_handler
|
||||
s390_base_mcck_handler:
|
||||
basr %r13,0
|
||||
0: l %r15,__LC_PANIC_STACK # load panic stack
|
||||
ahi %r15,-STACK_FRAME_OVERHEAD
|
||||
l %r1,2f-0b(%r13)
|
||||
l %r1,0(%r1)
|
||||
ltr %r1,%r1
|
||||
jz 1f
|
||||
basr %r14,%r1
|
||||
1: lm %r0,%r15,__LC_GPREGS_SAVE_AREA
|
||||
lpsw __LC_MCK_OLD_PSW
|
||||
|
||||
2: .long s390_base_mcck_handler_fn
|
||||
|
||||
.section .bss
|
||||
.globl s390_base_mcck_handler_fn
|
||||
s390_base_mcck_handler_fn:
|
||||
.long 0
|
||||
.previous
|
||||
|
||||
.globl s390_base_ext_handler
|
||||
s390_base_ext_handler:
|
||||
stm %r0,%r15,__LC_SAVE_AREA
|
||||
basr %r13,0
|
||||
0: ahi %r15,-STACK_FRAME_OVERHEAD
|
||||
l %r1,2f-0b(%r13)
|
||||
l %r1,0(%r1)
|
||||
ltr %r1,%r1
|
||||
jz 1f
|
||||
basr %r14,%r1
|
||||
1: lm %r0,%r15,__LC_SAVE_AREA
|
||||
ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
|
||||
lpsw __LC_EXT_OLD_PSW
|
||||
|
||||
2: .long s390_base_ext_handler_fn
|
||||
|
||||
.section .bss
|
||||
.globl s390_base_ext_handler_fn
|
||||
s390_base_ext_handler_fn:
|
||||
.long 0
|
||||
.previous
|
||||
|
||||
.globl s390_base_pgm_handler
|
||||
s390_base_pgm_handler:
|
||||
stm %r0,%r15,__LC_SAVE_AREA
|
||||
basr %r13,0
|
||||
0: ahi %r15,-STACK_FRAME_OVERHEAD
|
||||
l %r1,2f-0b(%r13)
|
||||
l %r1,0(%r1)
|
||||
ltr %r1,%r1
|
||||
jz 1f
|
||||
basr %r14,%r1
|
||||
lm %r0,%r15,__LC_SAVE_AREA
|
||||
lpsw __LC_PGM_OLD_PSW
|
||||
|
||||
1: lpsw disabled_wait_psw-0b(%r13)
|
||||
|
||||
2: .long s390_base_pgm_handler_fn
|
||||
|
||||
disabled_wait_psw:
|
||||
.align 8
|
||||
.long 0x000a0000,0x00000000 + s390_base_pgm_handler
|
||||
|
||||
.section .bss
|
||||
.globl s390_base_pgm_handler_fn
|
||||
s390_base_pgm_handler_fn:
|
||||
.long 0
|
||||
.previous
|
||||
|
||||
#endif /* CONFIG_64BIT */
|
||||
203
arch/s390/kernel/binfmt_elf32.c
Normal file
203
arch/s390/kernel/binfmt_elf32.c
Normal file
@@ -0,0 +1,203 @@
|
||||
/*
|
||||
* Support for 32-bit Linux for S390 ELF binaries.
|
||||
*
|
||||
* Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
|
||||
* Author(s): Gerhard Tonn (ton@de.ibm.com)
|
||||
*
|
||||
* Heavily inspired by the 32-bit Sparc compat code which is
|
||||
* Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com)
|
||||
* Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz)
|
||||
*/
|
||||
|
||||
#define __ASMS390_ELF_H
|
||||
|
||||
#include <linux/time.h>
|
||||
|
||||
/*
|
||||
* These are used to set parameters in the core dumps.
|
||||
*/
|
||||
#define ELF_CLASS ELFCLASS32
|
||||
#define ELF_DATA ELFDATA2MSB
|
||||
#define ELF_ARCH EM_S390
|
||||
|
||||
/*
|
||||
* This is used to ensure we don't load something for the wrong architecture.
|
||||
*/
|
||||
#define elf_check_arch(x) \
|
||||
(((x)->e_machine == EM_S390 || (x)->e_machine == EM_S390_OLD) \
|
||||
&& (x)->e_ident[EI_CLASS] == ELF_CLASS)
|
||||
|
||||
/* ELF register definitions */
|
||||
#define NUM_GPRS 16
|
||||
#define NUM_FPRS 16
|
||||
#define NUM_ACRS 16
|
||||
|
||||
/* For SVR4/S390 the function pointer to be registered with `atexit` is
|
||||
passed in R14. */
|
||||
#define ELF_PLAT_INIT(_r, load_addr) \
|
||||
do { \
|
||||
_r->gprs[14] = 0; \
|
||||
} while(0)
|
||||
|
||||
#define USE_ELF_CORE_DUMP
|
||||
#define ELF_EXEC_PAGESIZE 4096
|
||||
|
||||
/* This is the location that an ET_DYN program is loaded if exec'ed. Typical
|
||||
use of this is to invoke "./ld.so someprog" to test out a new version of
|
||||
the loader. We need to make sure that it is out of the way of the program
|
||||
that it will "exec", and that there is sufficient room for the brk. */
|
||||
|
||||
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
|
||||
|
||||
/* Wow, the "main" arch needs arch dependent functions too.. :) */
|
||||
|
||||
/* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
|
||||
now struct_user_regs, they are different) */
|
||||
|
||||
#define ELF_CORE_COPY_REGS(pr_reg, regs) dump_regs32(regs, &pr_reg);
|
||||
|
||||
#define ELF_CORE_COPY_TASK_REGS(tsk, regs) dump_task_regs32(tsk, regs)
|
||||
|
||||
#define ELF_CORE_COPY_FPREGS(tsk, fpregs) dump_task_fpu(tsk, fpregs)
|
||||
|
||||
/* This yields a mask that user programs can use to figure out what
|
||||
instruction set this CPU supports. */
|
||||
|
||||
#define ELF_HWCAP (0)
|
||||
|
||||
/* This yields a string that ld.so will use to load implementation
|
||||
specific libraries for optimization. This is more specific in
|
||||
intent than poking at uname or /proc/cpuinfo.
|
||||
|
||||
For the moment, we have only optimizations for the Intel generations,
|
||||
but that could change... */
|
||||
|
||||
#define ELF_PLATFORM (NULL)
|
||||
|
||||
#define SET_PERSONALITY(ex, ibcs2) \
|
||||
do { \
|
||||
if (ibcs2) \
|
||||
set_personality(PER_SVR4); \
|
||||
else if (current->personality != PER_LINUX32) \
|
||||
set_personality(PER_LINUX); \
|
||||
set_thread_flag(TIF_31BIT); \
|
||||
} while (0)
|
||||
|
||||
#include "compat_linux.h"
|
||||
|
||||
typedef _s390_fp_regs32 elf_fpregset_t;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
|
||||
_psw_t32 psw;
|
||||
__u32 gprs[__NUM_GPRS];
|
||||
__u32 acrs[__NUM_ACRS];
|
||||
__u32 orig_gpr2;
|
||||
} s390_regs32;
|
||||
typedef s390_regs32 elf_gregset_t;
|
||||
|
||||
static inline int dump_regs32(struct pt_regs *ptregs, elf_gregset_t *regs)
|
||||
{
|
||||
int i;
|
||||
|
||||
memcpy(®s->psw.mask, &ptregs->psw.mask, 4);
|
||||
memcpy(®s->psw.addr, (char *)&ptregs->psw.addr + 4, 4);
|
||||
for (i = 0; i < NUM_GPRS; i++)
|
||||
regs->gprs[i] = ptregs->gprs[i];
|
||||
save_access_regs(regs->acrs);
|
||||
regs->orig_gpr2 = ptregs->orig_gpr2;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int dump_task_regs32(struct task_struct *tsk, elf_gregset_t *regs)
|
||||
{
|
||||
struct pt_regs *ptregs = task_pt_regs(tsk);
|
||||
int i;
|
||||
|
||||
memcpy(®s->psw.mask, &ptregs->psw.mask, 4);
|
||||
memcpy(®s->psw.addr, (char *)&ptregs->psw.addr + 4, 4);
|
||||
for (i = 0; i < NUM_GPRS; i++)
|
||||
regs->gprs[i] = ptregs->gprs[i];
|
||||
memcpy(regs->acrs, tsk->thread.acrs, sizeof(regs->acrs));
|
||||
regs->orig_gpr2 = ptregs->orig_gpr2;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline int dump_task_fpu(struct task_struct *tsk, elf_fpregset_t *fpregs)
|
||||
{
|
||||
if (tsk == current)
|
||||
save_fp_regs((s390_fp_regs *) fpregs);
|
||||
else
|
||||
memcpy(fpregs, &tsk->thread.fp_regs, sizeof(elf_fpregset_t));
|
||||
return 1;
|
||||
}
|
||||
|
||||
#include <asm/processor.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/elfcore.h>
|
||||
#include <linux/binfmts.h>
|
||||
#include <linux/compat.h>
|
||||
|
||||
#define elf_prstatus elf_prstatus32
|
||||
struct elf_prstatus32
|
||||
{
|
||||
struct elf_siginfo pr_info; /* Info associated with signal */
|
||||
short pr_cursig; /* Current signal */
|
||||
u32 pr_sigpend; /* Set of pending signals */
|
||||
u32 pr_sighold; /* Set of held signals */
|
||||
pid_t pr_pid;
|
||||
pid_t pr_ppid;
|
||||
pid_t pr_pgrp;
|
||||
pid_t pr_sid;
|
||||
struct compat_timeval pr_utime; /* User time */
|
||||
struct compat_timeval pr_stime; /* System time */
|
||||
struct compat_timeval pr_cutime; /* Cumulative user time */
|
||||
struct compat_timeval pr_cstime; /* Cumulative system time */
|
||||
elf_gregset_t pr_reg; /* GP registers */
|
||||
int pr_fpvalid; /* True if math co-processor being used. */
|
||||
};
|
||||
|
||||
#define elf_prpsinfo elf_prpsinfo32
|
||||
struct elf_prpsinfo32
|
||||
{
|
||||
char pr_state; /* numeric process state */
|
||||
char pr_sname; /* char for pr_state */
|
||||
char pr_zomb; /* zombie */
|
||||
char pr_nice; /* nice val */
|
||||
u32 pr_flag; /* flags */
|
||||
u16 pr_uid;
|
||||
u16 pr_gid;
|
||||
pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
|
||||
/* Lots missing */
|
||||
char pr_fname[16]; /* filename of executable */
|
||||
char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
|
||||
};
|
||||
|
||||
#include <linux/highuid.h>
|
||||
|
||||
/*
|
||||
#define init_elf_binfmt init_elf32_binfmt
|
||||
*/
|
||||
|
||||
#undef start_thread
|
||||
#define start_thread start_thread31
|
||||
|
||||
MODULE_DESCRIPTION("Binary format loader for compatibility with 32bit Linux for S390 binaries,"
|
||||
" Copyright 2000 IBM Corporation");
|
||||
MODULE_AUTHOR("Gerhard Tonn <ton@de.ibm.com>");
|
||||
|
||||
#undef MODULE_DESCRIPTION
|
||||
#undef MODULE_AUTHOR
|
||||
|
||||
#undef cputime_to_timeval
|
||||
#define cputime_to_timeval cputime_to_compat_timeval
|
||||
static inline void
|
||||
cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
|
||||
{
|
||||
value->tv_usec = cputime % 1000000;
|
||||
value->tv_sec = cputime / 1000000;
|
||||
}
|
||||
|
||||
#include "../../../fs/binfmt_elf.c"
|
||||
|
||||
56
arch/s390/kernel/bitmap.S
Normal file
56
arch/s390/kernel/bitmap.S
Normal file
@@ -0,0 +1,56 @@
|
||||
/*
|
||||
* arch/s390/kernel/bitmap.S
|
||||
* Bitmaps for set_bit, clear_bit, test_and_set_bit, ...
|
||||
* See include/asm-s390/{bitops.h|posix_types.h} for details
|
||||
*
|
||||
* S390 version
|
||||
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
|
||||
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
|
||||
*/
|
||||
|
||||
.globl _oi_bitmap
|
||||
_oi_bitmap:
|
||||
.byte 0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80
|
||||
|
||||
.globl _ni_bitmap
|
||||
_ni_bitmap:
|
||||
.byte 0xFE,0xFD,0xFB,0xF7,0xEF,0xDF,0xBF,0x7F
|
||||
|
||||
.globl _zb_findmap
|
||||
_zb_findmap:
|
||||
.byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
|
||||
.byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5
|
||||
.byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
|
||||
.byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6
|
||||
.byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
|
||||
.byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5
|
||||
.byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
|
||||
.byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,7
|
||||
.byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
|
||||
.byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5
|
||||
.byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
|
||||
.byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,6
|
||||
.byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
|
||||
.byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,5
|
||||
.byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,4
|
||||
.byte 0,1,0,2,0,1,0,3,0,1,0,2,0,1,0,8
|
||||
|
||||
.globl _sb_findmap
|
||||
_sb_findmap:
|
||||
.byte 8,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
|
||||
.byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
|
||||
.byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
|
||||
.byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
|
||||
.byte 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
|
||||
.byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
|
||||
.byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
|
||||
.byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
|
||||
.byte 7,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
|
||||
.byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
|
||||
.byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
|
||||
.byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
|
||||
.byte 6,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
|
||||
.byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
|
||||
.byte 5,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
|
||||
.byte 4,0,1,0,2,0,1,0,3,0,1,0,2,0,1,0
|
||||
|
||||
38
arch/s390/kernel/compat_audit.c
Normal file
38
arch/s390/kernel/compat_audit.c
Normal file
@@ -0,0 +1,38 @@
|
||||
#undef __s390x__
|
||||
#include <asm/unistd.h>
|
||||
|
||||
unsigned s390_dir_class[] = {
|
||||
#include <asm-generic/audit_dir_write.h>
|
||||
~0U
|
||||
};
|
||||
|
||||
unsigned s390_chattr_class[] = {
|
||||
#include <asm-generic/audit_change_attr.h>
|
||||
~0U
|
||||
};
|
||||
|
||||
unsigned s390_write_class[] = {
|
||||
#include <asm-generic/audit_write.h>
|
||||
~0U
|
||||
};
|
||||
|
||||
unsigned s390_read_class[] = {
|
||||
#include <asm-generic/audit_read.h>
|
||||
~0U
|
||||
};
|
||||
|
||||
int s390_classify_syscall(unsigned syscall)
|
||||
{
|
||||
switch(syscall) {
|
||||
case __NR_open:
|
||||
return 2;
|
||||
case __NR_openat:
|
||||
return 3;
|
||||
case __NR_socketcall:
|
||||
return 4;
|
||||
case __NR_execve:
|
||||
return 5;
|
||||
default:
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
29
arch/s390/kernel/compat_exec_domain.c
Normal file
29
arch/s390/kernel/compat_exec_domain.c
Normal file
@@ -0,0 +1,29 @@
|
||||
/*
|
||||
* Support for 32-bit Linux for S390 personality.
|
||||
*
|
||||
* Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
|
||||
* Author(s): Gerhard Tonn (ton@de.ibm.com)
|
||||
*
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/personality.h>
|
||||
#include <linux/sched.h>
|
||||
|
||||
static struct exec_domain s390_exec_domain;
|
||||
|
||||
static int __init s390_init (void)
|
||||
{
|
||||
s390_exec_domain.name = "Linux/s390";
|
||||
s390_exec_domain.handler = NULL;
|
||||
s390_exec_domain.pers_low = PER_LINUX32;
|
||||
s390_exec_domain.pers_high = PER_LINUX32;
|
||||
s390_exec_domain.signal_map = default_exec_domain.signal_map;
|
||||
s390_exec_domain.signal_invmap = default_exec_domain.signal_invmap;
|
||||
register_exec_domain(&s390_exec_domain);
|
||||
return 0;
|
||||
}
|
||||
|
||||
__initcall(s390_init);
|
||||
972
arch/s390/kernel/compat_linux.c
Normal file
972
arch/s390/kernel/compat_linux.c
Normal file
@@ -0,0 +1,972 @@
|
||||
/*
|
||||
* arch/s390x/kernel/linux32.c
|
||||
*
|
||||
* S390 version
|
||||
* Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
|
||||
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
|
||||
* Gerhard Tonn (ton@de.ibm.com)
|
||||
* Thomas Spatzier (tspat@de.ibm.com)
|
||||
*
|
||||
* Conversion between 31bit and 64bit native syscalls.
|
||||
*
|
||||
* Heavily inspired by the 32-bit Sparc compat code which is
|
||||
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
|
||||
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/resource.h>
|
||||
#include <linux/times.h>
|
||||
#include <linux/utsname.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/sem.h>
|
||||
#include <linux/msg.h>
|
||||
#include <linux/shm.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/uio.h>
|
||||
#include <linux/nfs_fs.h>
|
||||
#include <linux/quota.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/sunrpc/svc.h>
|
||||
#include <linux/nfsd/nfsd.h>
|
||||
#include <linux/nfsd/cache.h>
|
||||
#include <linux/nfsd/xdr.h>
|
||||
#include <linux/nfsd/syscall.h>
|
||||
#include <linux/poll.h>
|
||||
#include <linux/personality.h>
|
||||
#include <linux/stat.h>
|
||||
#include <linux/filter.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/highuid.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/ipv6.h>
|
||||
#include <linux/in.h>
|
||||
#include <linux/icmpv6.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/sysctl.h>
|
||||
#include <linux/binfmts.h>
|
||||
#include <linux/capability.h>
|
||||
#include <linux/compat.h>
|
||||
#include <linux/vfs.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/fadvise.h>
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <asm/ipc.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/semaphore.h>
|
||||
|
||||
#include <net/scm.h>
|
||||
#include <net/sock.h>
|
||||
|
||||
#include "compat_linux.h"
|
||||
|
||||
long psw_user32_bits = (PSW_BASE32_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
|
||||
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
|
||||
PSW_MASK_PSTATE | PSW_DEFAULT_KEY);
|
||||
long psw32_user_bits = (PSW32_BASE_BITS | PSW32_MASK_DAT | PSW32_ASC_HOME |
|
||||
PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK |
|
||||
PSW32_MASK_PSTATE);
|
||||
|
||||
/* For this source file, we want overflow handling. */
|
||||
|
||||
#undef high2lowuid
|
||||
#undef high2lowgid
|
||||
#undef low2highuid
|
||||
#undef low2highgid
|
||||
#undef SET_UID16
|
||||
#undef SET_GID16
|
||||
#undef NEW_TO_OLD_UID
|
||||
#undef NEW_TO_OLD_GID
|
||||
#undef SET_OLDSTAT_UID
|
||||
#undef SET_OLDSTAT_GID
|
||||
#undef SET_STAT_UID
|
||||
#undef SET_STAT_GID
|
||||
|
||||
#define high2lowuid(uid) ((uid) > 65535) ? (u16)overflowuid : (u16)(uid)
|
||||
#define high2lowgid(gid) ((gid) > 65535) ? (u16)overflowgid : (u16)(gid)
|
||||
#define low2highuid(uid) ((uid) == (u16)-1) ? (uid_t)-1 : (uid_t)(uid)
|
||||
#define low2highgid(gid) ((gid) == (u16)-1) ? (gid_t)-1 : (gid_t)(gid)
|
||||
#define SET_UID16(var, uid) var = high2lowuid(uid)
|
||||
#define SET_GID16(var, gid) var = high2lowgid(gid)
|
||||
#define NEW_TO_OLD_UID(uid) high2lowuid(uid)
|
||||
#define NEW_TO_OLD_GID(gid) high2lowgid(gid)
|
||||
#define SET_OLDSTAT_UID(stat, uid) (stat).st_uid = high2lowuid(uid)
|
||||
#define SET_OLDSTAT_GID(stat, gid) (stat).st_gid = high2lowgid(gid)
|
||||
#define SET_STAT_UID(stat, uid) (stat).st_uid = high2lowuid(uid)
|
||||
#define SET_STAT_GID(stat, gid) (stat).st_gid = high2lowgid(gid)
|
||||
|
||||
asmlinkage long sys32_chown16(const char __user * filename, u16 user, u16 group)
|
||||
{
|
||||
return sys_chown(filename, low2highuid(user), low2highgid(group));
|
||||
}
|
||||
|
||||
asmlinkage long sys32_lchown16(const char __user * filename, u16 user, u16 group)
|
||||
{
|
||||
return sys_lchown(filename, low2highuid(user), low2highgid(group));
|
||||
}
|
||||
|
||||
asmlinkage long sys32_fchown16(unsigned int fd, u16 user, u16 group)
|
||||
{
|
||||
return sys_fchown(fd, low2highuid(user), low2highgid(group));
|
||||
}
|
||||
|
||||
asmlinkage long sys32_setregid16(u16 rgid, u16 egid)
|
||||
{
|
||||
return sys_setregid(low2highgid(rgid), low2highgid(egid));
|
||||
}
|
||||
|
||||
asmlinkage long sys32_setgid16(u16 gid)
|
||||
{
|
||||
return sys_setgid((gid_t)gid);
|
||||
}
|
||||
|
||||
asmlinkage long sys32_setreuid16(u16 ruid, u16 euid)
|
||||
{
|
||||
return sys_setreuid(low2highuid(ruid), low2highuid(euid));
|
||||
}
|
||||
|
||||
asmlinkage long sys32_setuid16(u16 uid)
|
||||
{
|
||||
return sys_setuid((uid_t)uid);
|
||||
}
|
||||
|
||||
asmlinkage long sys32_setresuid16(u16 ruid, u16 euid, u16 suid)
|
||||
{
|
||||
return sys_setresuid(low2highuid(ruid), low2highuid(euid),
|
||||
low2highuid(suid));
|
||||
}
|
||||
|
||||
asmlinkage long sys32_getresuid16(u16 __user *ruid, u16 __user *euid, u16 __user *suid)
|
||||
{
|
||||
int retval;
|
||||
|
||||
if (!(retval = put_user(high2lowuid(current->uid), ruid)) &&
|
||||
!(retval = put_user(high2lowuid(current->euid), euid)))
|
||||
retval = put_user(high2lowuid(current->suid), suid);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
asmlinkage long sys32_setresgid16(u16 rgid, u16 egid, u16 sgid)
|
||||
{
|
||||
return sys_setresgid(low2highgid(rgid), low2highgid(egid),
|
||||
low2highgid(sgid));
|
||||
}
|
||||
|
||||
asmlinkage long sys32_getresgid16(u16 __user *rgid, u16 __user *egid, u16 __user *sgid)
|
||||
{
|
||||
int retval;
|
||||
|
||||
if (!(retval = put_user(high2lowgid(current->gid), rgid)) &&
|
||||
!(retval = put_user(high2lowgid(current->egid), egid)))
|
||||
retval = put_user(high2lowgid(current->sgid), sgid);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
asmlinkage long sys32_setfsuid16(u16 uid)
|
||||
{
|
||||
return sys_setfsuid((uid_t)uid);
|
||||
}
|
||||
|
||||
asmlinkage long sys32_setfsgid16(u16 gid)
|
||||
{
|
||||
return sys_setfsgid((gid_t)gid);
|
||||
}
|
||||
|
||||
static int groups16_to_user(u16 __user *grouplist, struct group_info *group_info)
|
||||
{
|
||||
int i;
|
||||
u16 group;
|
||||
|
||||
for (i = 0; i < group_info->ngroups; i++) {
|
||||
group = (u16)GROUP_AT(group_info, i);
|
||||
if (put_user(group, grouplist+i))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int groups16_from_user(struct group_info *group_info, u16 __user *grouplist)
|
||||
{
|
||||
int i;
|
||||
u16 group;
|
||||
|
||||
for (i = 0; i < group_info->ngroups; i++) {
|
||||
if (get_user(group, grouplist+i))
|
||||
return -EFAULT;
|
||||
GROUP_AT(group_info, i) = (gid_t)group;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
asmlinkage long sys32_getgroups16(int gidsetsize, u16 __user *grouplist)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (gidsetsize < 0)
|
||||
return -EINVAL;
|
||||
|
||||
get_group_info(current->group_info);
|
||||
i = current->group_info->ngroups;
|
||||
if (gidsetsize) {
|
||||
if (i > gidsetsize) {
|
||||
i = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
if (groups16_to_user(grouplist, current->group_info)) {
|
||||
i = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
out:
|
||||
put_group_info(current->group_info);
|
||||
return i;
|
||||
}
|
||||
|
||||
asmlinkage long sys32_setgroups16(int gidsetsize, u16 __user *grouplist)
|
||||
{
|
||||
struct group_info *group_info;
|
||||
int retval;
|
||||
|
||||
if (!capable(CAP_SETGID))
|
||||
return -EPERM;
|
||||
if ((unsigned)gidsetsize > NGROUPS_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
group_info = groups_alloc(gidsetsize);
|
||||
if (!group_info)
|
||||
return -ENOMEM;
|
||||
retval = groups16_from_user(group_info, grouplist);
|
||||
if (retval) {
|
||||
put_group_info(group_info);
|
||||
return retval;
|
||||
}
|
||||
|
||||
retval = set_current_groups(group_info);
|
||||
put_group_info(group_info);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
asmlinkage long sys32_getuid16(void)
|
||||
{
|
||||
return high2lowuid(current->uid);
|
||||
}
|
||||
|
||||
asmlinkage long sys32_geteuid16(void)
|
||||
{
|
||||
return high2lowuid(current->euid);
|
||||
}
|
||||
|
||||
asmlinkage long sys32_getgid16(void)
|
||||
{
|
||||
return high2lowgid(current->gid);
|
||||
}
|
||||
|
||||
asmlinkage long sys32_getegid16(void)
|
||||
{
|
||||
return high2lowgid(current->egid);
|
||||
}
|
||||
|
||||
/* 32-bit timeval and related flotsam. */
|
||||
|
||||
static inline long get_tv32(struct timeval *o, struct compat_timeval __user *i)
|
||||
{
|
||||
return (!access_ok(VERIFY_READ, o, sizeof(*o)) ||
|
||||
(__get_user(o->tv_sec, &i->tv_sec) ||
|
||||
__get_user(o->tv_usec, &i->tv_usec)));
|
||||
}
|
||||
|
||||
static inline long put_tv32(struct compat_timeval __user *o, struct timeval *i)
|
||||
{
|
||||
return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
|
||||
(__put_user(i->tv_sec, &o->tv_sec) ||
|
||||
__put_user(i->tv_usec, &o->tv_usec)));
|
||||
}
|
||||
|
||||
/*
|
||||
* sys32_ipc() is the de-multiplexer for the SysV IPC calls in 32bit emulation.
|
||||
*
|
||||
* This is really horribly ugly.
|
||||
*/
|
||||
#ifdef CONFIG_SYSVIPC
|
||||
asmlinkage long sys32_ipc(u32 call, int first, int second, int third, u32 ptr)
|
||||
{
|
||||
if (call >> 16) /* hack for backward compatibility */
|
||||
return -EINVAL;
|
||||
|
||||
call &= 0xffff;
|
||||
|
||||
switch (call) {
|
||||
case SEMTIMEDOP:
|
||||
return compat_sys_semtimedop(first, compat_ptr(ptr),
|
||||
second, compat_ptr(third));
|
||||
case SEMOP:
|
||||
/* struct sembuf is the same on 32 and 64bit :)) */
|
||||
return sys_semtimedop(first, compat_ptr(ptr),
|
||||
second, NULL);
|
||||
case SEMGET:
|
||||
return sys_semget(first, second, third);
|
||||
case SEMCTL:
|
||||
return compat_sys_semctl(first, second, third,
|
||||
compat_ptr(ptr));
|
||||
case MSGSND:
|
||||
return compat_sys_msgsnd(first, second, third,
|
||||
compat_ptr(ptr));
|
||||
case MSGRCV:
|
||||
return compat_sys_msgrcv(first, second, 0, third,
|
||||
0, compat_ptr(ptr));
|
||||
case MSGGET:
|
||||
return sys_msgget((key_t) first, second);
|
||||
case MSGCTL:
|
||||
return compat_sys_msgctl(first, second, compat_ptr(ptr));
|
||||
case SHMAT:
|
||||
return compat_sys_shmat(first, second, third,
|
||||
0, compat_ptr(ptr));
|
||||
case SHMDT:
|
||||
return sys_shmdt(compat_ptr(ptr));
|
||||
case SHMGET:
|
||||
return sys_shmget(first, (unsigned)second, third);
|
||||
case SHMCTL:
|
||||
return compat_sys_shmctl(first, second, compat_ptr(ptr));
|
||||
}
|
||||
|
||||
return -ENOSYS;
|
||||
}
|
||||
#endif
|
||||
|
||||
asmlinkage long sys32_truncate64(const char __user * path, unsigned long high, unsigned long low)
|
||||
{
|
||||
if ((int)high < 0)
|
||||
return -EINVAL;
|
||||
else
|
||||
return sys_truncate(path, (high << 32) | low);
|
||||
}
|
||||
|
||||
asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long high, unsigned long low)
|
||||
{
|
||||
if ((int)high < 0)
|
||||
return -EINVAL;
|
||||
else
|
||||
return sys_ftruncate(fd, (high << 32) | low);
|
||||
}
|
||||
|
||||
int cp_compat_stat(struct kstat *stat, struct compat_stat __user *statbuf)
|
||||
{
|
||||
compat_ino_t ino;
|
||||
int err;
|
||||
|
||||
if (!old_valid_dev(stat->dev) || !old_valid_dev(stat->rdev))
|
||||
return -EOVERFLOW;
|
||||
|
||||
ino = stat->ino;
|
||||
if (sizeof(ino) < sizeof(stat->ino) && ino != stat->ino)
|
||||
return -EOVERFLOW;
|
||||
|
||||
err = put_user(old_encode_dev(stat->dev), &statbuf->st_dev);
|
||||
err |= put_user(stat->ino, &statbuf->st_ino);
|
||||
err |= put_user(stat->mode, &statbuf->st_mode);
|
||||
err |= put_user(stat->nlink, &statbuf->st_nlink);
|
||||
err |= put_user(high2lowuid(stat->uid), &statbuf->st_uid);
|
||||
err |= put_user(high2lowgid(stat->gid), &statbuf->st_gid);
|
||||
err |= put_user(old_encode_dev(stat->rdev), &statbuf->st_rdev);
|
||||
err |= put_user(stat->size, &statbuf->st_size);
|
||||
err |= put_user(stat->atime.tv_sec, &statbuf->st_atime);
|
||||
err |= put_user(stat->atime.tv_nsec, &statbuf->st_atime_nsec);
|
||||
err |= put_user(stat->mtime.tv_sec, &statbuf->st_mtime);
|
||||
err |= put_user(stat->mtime.tv_nsec, &statbuf->st_mtime_nsec);
|
||||
err |= put_user(stat->ctime.tv_sec, &statbuf->st_ctime);
|
||||
err |= put_user(stat->ctime.tv_nsec, &statbuf->st_ctime_nsec);
|
||||
err |= put_user(stat->blksize, &statbuf->st_blksize);
|
||||
err |= put_user(stat->blocks, &statbuf->st_blocks);
|
||||
/* fixme
|
||||
err |= put_user(0, &statbuf->__unused4[0]);
|
||||
err |= put_user(0, &statbuf->__unused4[1]);
|
||||
*/
|
||||
return err;
|
||||
}
|
||||
|
||||
asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
|
||||
struct compat_timespec __user *interval)
|
||||
{
|
||||
struct timespec t;
|
||||
int ret;
|
||||
mm_segment_t old_fs = get_fs ();
|
||||
|
||||
set_fs (KERNEL_DS);
|
||||
ret = sys_sched_rr_get_interval(pid,
|
||||
(struct timespec __force __user *) &t);
|
||||
set_fs (old_fs);
|
||||
if (put_compat_timespec(&t, interval))
|
||||
return -EFAULT;
|
||||
return ret;
|
||||
}
|
||||
|
||||
asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
|
||||
compat_sigset_t __user *oset, size_t sigsetsize)
|
||||
{
|
||||
sigset_t s;
|
||||
compat_sigset_t s32;
|
||||
int ret;
|
||||
mm_segment_t old_fs = get_fs();
|
||||
|
||||
if (set) {
|
||||
if (copy_from_user (&s32, set, sizeof(compat_sigset_t)))
|
||||
return -EFAULT;
|
||||
switch (_NSIG_WORDS) {
|
||||
case 4: s.sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
|
||||
case 3: s.sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
|
||||
case 2: s.sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
|
||||
case 1: s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
|
||||
}
|
||||
}
|
||||
set_fs (KERNEL_DS);
|
||||
ret = sys_rt_sigprocmask(how,
|
||||
set ? (sigset_t __force __user *) &s : NULL,
|
||||
oset ? (sigset_t __force __user *) &s : NULL,
|
||||
sigsetsize);
|
||||
set_fs (old_fs);
|
||||
if (ret) return ret;
|
||||
if (oset) {
|
||||
switch (_NSIG_WORDS) {
|
||||
case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
|
||||
case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2];
|
||||
case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
|
||||
case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
|
||||
}
|
||||
if (copy_to_user (oset, &s32, sizeof(compat_sigset_t)))
|
||||
return -EFAULT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
|
||||
size_t sigsetsize)
|
||||
{
|
||||
sigset_t s;
|
||||
compat_sigset_t s32;
|
||||
int ret;
|
||||
mm_segment_t old_fs = get_fs();
|
||||
|
||||
set_fs (KERNEL_DS);
|
||||
ret = sys_rt_sigpending((sigset_t __force __user *) &s, sigsetsize);
|
||||
set_fs (old_fs);
|
||||
if (!ret) {
|
||||
switch (_NSIG_WORDS) {
|
||||
case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3];
|
||||
case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2];
|
||||
case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1];
|
||||
case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0];
|
||||
}
|
||||
if (copy_to_user (set, &s32, sizeof(compat_sigset_t)))
|
||||
return -EFAULT;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
asmlinkage long
|
||||
sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo)
|
||||
{
|
||||
siginfo_t info;
|
||||
int ret;
|
||||
mm_segment_t old_fs = get_fs();
|
||||
|
||||
if (copy_siginfo_from_user32(&info, uinfo))
|
||||
return -EFAULT;
|
||||
set_fs (KERNEL_DS);
|
||||
ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force __user *) &info);
|
||||
set_fs (old_fs);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* sys32_execve() executes a new program after the asm stub has set
|
||||
* things up for us. This should basically do what I want it to.
|
||||
*/
|
||||
asmlinkage long
|
||||
sys32_execve(struct pt_regs regs)
|
||||
{
|
||||
int error;
|
||||
char * filename;
|
||||
|
||||
filename = getname(compat_ptr(regs.orig_gpr2));
|
||||
error = PTR_ERR(filename);
|
||||
if (IS_ERR(filename))
|
||||
goto out;
|
||||
error = compat_do_execve(filename, compat_ptr(regs.gprs[3]),
|
||||
compat_ptr(regs.gprs[4]), ®s);
|
||||
if (error == 0)
|
||||
{
|
||||
task_lock(current);
|
||||
current->ptrace &= ~PT_DTRACE;
|
||||
task_unlock(current);
|
||||
current->thread.fp_regs.fpc=0;
|
||||
asm volatile("sfpc %0,0" : : "d" (0));
|
||||
}
|
||||
putname(filename);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
|
||||
asmlinkage long
|
||||
sys32_init_module(void __user *umod, unsigned long len,
|
||||
const char __user *uargs)
|
||||
{
|
||||
return sys_init_module(umod, len, uargs);
|
||||
}
|
||||
|
||||
asmlinkage long
|
||||
sys32_delete_module(const char __user *name_user, unsigned int flags)
|
||||
{
|
||||
return sys_delete_module(name_user, flags);
|
||||
}
|
||||
|
||||
#else /* CONFIG_MODULES */
|
||||
|
||||
asmlinkage long
|
||||
sys32_init_module(void __user *umod, unsigned long len,
|
||||
const char __user *uargs)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
asmlinkage long
|
||||
sys32_delete_module(const char __user *name_user, unsigned int flags)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_MODULES */
|
||||
|
||||
/* Translations due to time_t size differences. Which affects all
|
||||
sorts of things, like timeval and itimerval. */
|
||||
|
||||
extern struct timezone sys_tz;
|
||||
|
||||
asmlinkage long sys32_gettimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
|
||||
{
|
||||
if (tv) {
|
||||
struct timeval ktv;
|
||||
do_gettimeofday(&ktv);
|
||||
if (put_tv32(tv, &ktv))
|
||||
return -EFAULT;
|
||||
}
|
||||
if (tz) {
|
||||
if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
|
||||
return -EFAULT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline long get_ts32(struct timespec *o, struct compat_timeval __user *i)
|
||||
{
|
||||
long usec;
|
||||
|
||||
if (!access_ok(VERIFY_READ, i, sizeof(*i)))
|
||||
return -EFAULT;
|
||||
if (__get_user(o->tv_sec, &i->tv_sec))
|
||||
return -EFAULT;
|
||||
if (__get_user(usec, &i->tv_usec))
|
||||
return -EFAULT;
|
||||
o->tv_nsec = usec * 1000;
|
||||
return 0;
|
||||
}
|
||||
|
||||
asmlinkage long sys32_settimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
|
||||
{
|
||||
struct timespec kts;
|
||||
struct timezone ktz;
|
||||
|
||||
if (tv) {
|
||||
if (get_ts32(&kts, tv))
|
||||
return -EFAULT;
|
||||
}
|
||||
if (tz) {
|
||||
if (copy_from_user(&ktz, tz, sizeof(ktz)))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
|
||||
}
|
||||
|
||||
/* These are here just in case some old sparc32 binary calls it. */
|
||||
asmlinkage long sys32_pause(void)
|
||||
{
|
||||
current->state = TASK_INTERRUPTIBLE;
|
||||
schedule();
|
||||
return -ERESTARTNOHAND;
|
||||
}
|
||||
|
||||
asmlinkage long sys32_pread64(unsigned int fd, char __user *ubuf,
|
||||
size_t count, u32 poshi, u32 poslo)
|
||||
{
|
||||
if ((compat_ssize_t) count < 0)
|
||||
return -EINVAL;
|
||||
return sys_pread64(fd, ubuf, count, ((loff_t)AA(poshi) << 32) | AA(poslo));
|
||||
}
|
||||
|
||||
asmlinkage long sys32_pwrite64(unsigned int fd, const char __user *ubuf,
|
||||
size_t count, u32 poshi, u32 poslo)
|
||||
{
|
||||
if ((compat_ssize_t) count < 0)
|
||||
return -EINVAL;
|
||||
return sys_pwrite64(fd, ubuf, count, ((loff_t)AA(poshi) << 32) | AA(poslo));
|
||||
}
|
||||
|
||||
asmlinkage compat_ssize_t sys32_readahead(int fd, u32 offhi, u32 offlo, s32 count)
|
||||
{
|
||||
return sys_readahead(fd, ((loff_t)AA(offhi) << 32) | AA(offlo), count);
|
||||
}
|
||||
|
||||
asmlinkage long sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, size_t count)
|
||||
{
|
||||
mm_segment_t old_fs = get_fs();
|
||||
int ret;
|
||||
off_t of;
|
||||
|
||||
if (offset && get_user(of, offset))
|
||||
return -EFAULT;
|
||||
|
||||
set_fs(KERNEL_DS);
|
||||
ret = sys_sendfile(out_fd, in_fd,
|
||||
offset ? (off_t __force __user *) &of : NULL, count);
|
||||
set_fs(old_fs);
|
||||
|
||||
if (offset && put_user(of, offset))
|
||||
return -EFAULT;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
asmlinkage long sys32_sendfile64(int out_fd, int in_fd,
|
||||
compat_loff_t __user *offset, s32 count)
|
||||
{
|
||||
mm_segment_t old_fs = get_fs();
|
||||
int ret;
|
||||
loff_t lof;
|
||||
|
||||
if (offset && get_user(lof, offset))
|
||||
return -EFAULT;
|
||||
|
||||
set_fs(KERNEL_DS);
|
||||
ret = sys_sendfile64(out_fd, in_fd,
|
||||
offset ? (loff_t __force __user *) &lof : NULL,
|
||||
count);
|
||||
set_fs(old_fs);
|
||||
|
||||
if (offset && put_user(lof, offset))
|
||||
return -EFAULT;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SYSCTL_SYSCALL
|
||||
struct __sysctl_args32 {
|
||||
u32 name;
|
||||
int nlen;
|
||||
u32 oldval;
|
||||
u32 oldlenp;
|
||||
u32 newval;
|
||||
u32 newlen;
|
||||
u32 __unused[4];
|
||||
};
|
||||
|
||||
asmlinkage long sys32_sysctl(struct __sysctl_args32 __user *args)
|
||||
{
|
||||
struct __sysctl_args32 tmp;
|
||||
int error;
|
||||
size_t oldlen;
|
||||
size_t __user *oldlenp = NULL;
|
||||
unsigned long addr = (((unsigned long)&args->__unused[0]) + 7) & ~7;
|
||||
|
||||
if (copy_from_user(&tmp, args, sizeof(tmp)))
|
||||
return -EFAULT;
|
||||
|
||||
if (tmp.oldval && tmp.oldlenp) {
|
||||
/* Duh, this is ugly and might not work if sysctl_args
|
||||
is in read-only memory, but do_sysctl does indirectly
|
||||
a lot of uaccess in both directions and we'd have to
|
||||
basically copy the whole sysctl.c here, and
|
||||
glibc's __sysctl uses rw memory for the structure
|
||||
anyway. */
|
||||
if (get_user(oldlen, (u32 __user *)compat_ptr(tmp.oldlenp)) ||
|
||||
put_user(oldlen, (size_t __user *)addr))
|
||||
return -EFAULT;
|
||||
oldlenp = (size_t __user *)addr;
|
||||
}
|
||||
|
||||
lock_kernel();
|
||||
error = do_sysctl(compat_ptr(tmp.name), tmp.nlen, compat_ptr(tmp.oldval),
|
||||
oldlenp, compat_ptr(tmp.newval), tmp.newlen);
|
||||
unlock_kernel();
|
||||
if (oldlenp) {
|
||||
if (!error) {
|
||||
if (get_user(oldlen, (size_t __user *)addr) ||
|
||||
put_user(oldlen, (u32 __user *)compat_ptr(tmp.oldlenp)))
|
||||
error = -EFAULT;
|
||||
}
|
||||
if (copy_to_user(args->__unused, tmp.__unused,
|
||||
sizeof(tmp.__unused)))
|
||||
error = -EFAULT;
|
||||
}
|
||||
return error;
|
||||
}
|
||||
#endif
|
||||
|
||||
struct stat64_emu31 {
|
||||
unsigned long long st_dev;
|
||||
unsigned int __pad1;
|
||||
#define STAT64_HAS_BROKEN_ST_INO 1
|
||||
u32 __st_ino;
|
||||
unsigned int st_mode;
|
||||
unsigned int st_nlink;
|
||||
u32 st_uid;
|
||||
u32 st_gid;
|
||||
unsigned long long st_rdev;
|
||||
unsigned int __pad3;
|
||||
long st_size;
|
||||
u32 st_blksize;
|
||||
unsigned char __pad4[4];
|
||||
u32 __pad5; /* future possible st_blocks high bits */
|
||||
u32 st_blocks; /* Number 512-byte blocks allocated. */
|
||||
u32 st_atime;
|
||||
u32 __pad6;
|
||||
u32 st_mtime;
|
||||
u32 __pad7;
|
||||
u32 st_ctime;
|
||||
u32 __pad8; /* will be high 32 bits of ctime someday */
|
||||
unsigned long st_ino;
|
||||
};
|
||||
|
||||
static int cp_stat64(struct stat64_emu31 __user *ubuf, struct kstat *stat)
|
||||
{
|
||||
struct stat64_emu31 tmp;
|
||||
|
||||
memset(&tmp, 0, sizeof(tmp));
|
||||
|
||||
tmp.st_dev = huge_encode_dev(stat->dev);
|
||||
tmp.st_ino = stat->ino;
|
||||
tmp.__st_ino = (u32)stat->ino;
|
||||
tmp.st_mode = stat->mode;
|
||||
tmp.st_nlink = (unsigned int)stat->nlink;
|
||||
tmp.st_uid = stat->uid;
|
||||
tmp.st_gid = stat->gid;
|
||||
tmp.st_rdev = huge_encode_dev(stat->rdev);
|
||||
tmp.st_size = stat->size;
|
||||
tmp.st_blksize = (u32)stat->blksize;
|
||||
tmp.st_blocks = (u32)stat->blocks;
|
||||
tmp.st_atime = (u32)stat->atime.tv_sec;
|
||||
tmp.st_mtime = (u32)stat->mtime.tv_sec;
|
||||
tmp.st_ctime = (u32)stat->ctime.tv_sec;
|
||||
|
||||
return copy_to_user(ubuf,&tmp,sizeof(tmp)) ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
asmlinkage long sys32_stat64(char __user * filename, struct stat64_emu31 __user * statbuf)
|
||||
{
|
||||
struct kstat stat;
|
||||
int ret = vfs_stat(filename, &stat);
|
||||
if (!ret)
|
||||
ret = cp_stat64(statbuf, &stat);
|
||||
return ret;
|
||||
}
|
||||
|
||||
asmlinkage long sys32_lstat64(char __user * filename, struct stat64_emu31 __user * statbuf)
|
||||
{
|
||||
struct kstat stat;
|
||||
int ret = vfs_lstat(filename, &stat);
|
||||
if (!ret)
|
||||
ret = cp_stat64(statbuf, &stat);
|
||||
return ret;
|
||||
}
|
||||
|
||||
asmlinkage long sys32_fstat64(unsigned long fd, struct stat64_emu31 __user * statbuf)
|
||||
{
|
||||
struct kstat stat;
|
||||
int ret = vfs_fstat(fd, &stat);
|
||||
if (!ret)
|
||||
ret = cp_stat64(statbuf, &stat);
|
||||
return ret;
|
||||
}
|
||||
|
||||
asmlinkage long sys32_fstatat64(unsigned int dfd, char __user *filename,
|
||||
struct stat64_emu31 __user* statbuf, int flag)
|
||||
{
|
||||
struct kstat stat;
|
||||
int error = -EINVAL;
|
||||
|
||||
if ((flag & ~AT_SYMLINK_NOFOLLOW) != 0)
|
||||
goto out;
|
||||
|
||||
if (flag & AT_SYMLINK_NOFOLLOW)
|
||||
error = vfs_lstat_fd(dfd, filename, &stat);
|
||||
else
|
||||
error = vfs_stat_fd(dfd, filename, &stat);
|
||||
|
||||
if (!error)
|
||||
error = cp_stat64(statbuf, &stat);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Linux/i386 didn't use to be able to handle more than
|
||||
* 4 system call parameters, so these system calls used a memory
|
||||
* block for parameter passing..
|
||||
*/
|
||||
|
||||
struct mmap_arg_struct_emu31 {
|
||||
u32 addr;
|
||||
u32 len;
|
||||
u32 prot;
|
||||
u32 flags;
|
||||
u32 fd;
|
||||
u32 offset;
|
||||
};
|
||||
|
||||
/* common code for old and new mmaps */
|
||||
static inline long do_mmap2(
|
||||
unsigned long addr, unsigned long len,
|
||||
unsigned long prot, unsigned long flags,
|
||||
unsigned long fd, unsigned long pgoff)
|
||||
{
|
||||
struct file * file = NULL;
|
||||
unsigned long error = -EBADF;
|
||||
|
||||
flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
|
||||
if (!(flags & MAP_ANONYMOUS)) {
|
||||
file = fget(fd);
|
||||
if (!file)
|
||||
goto out;
|
||||
}
|
||||
|
||||
down_write(¤t->mm->mmap_sem);
|
||||
error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
|
||||
if (!IS_ERR((void *) error) && error + len >= 0x80000000ULL) {
|
||||
/* Result is out of bounds. */
|
||||
do_munmap(current->mm, addr, len);
|
||||
error = -ENOMEM;
|
||||
}
|
||||
up_write(¤t->mm->mmap_sem);
|
||||
|
||||
if (file)
|
||||
fput(file);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
asmlinkage unsigned long
|
||||
old32_mmap(struct mmap_arg_struct_emu31 __user *arg)
|
||||
{
|
||||
struct mmap_arg_struct_emu31 a;
|
||||
int error = -EFAULT;
|
||||
|
||||
if (copy_from_user(&a, arg, sizeof(a)))
|
||||
goto out;
|
||||
|
||||
error = -EINVAL;
|
||||
if (a.offset & ~PAGE_MASK)
|
||||
goto out;
|
||||
|
||||
error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
asmlinkage long
|
||||
sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg)
|
||||
{
|
||||
struct mmap_arg_struct_emu31 a;
|
||||
int error = -EFAULT;
|
||||
|
||||
if (copy_from_user(&a, arg, sizeof(a)))
|
||||
goto out;
|
||||
error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
asmlinkage long sys32_read(unsigned int fd, char __user * buf, size_t count)
|
||||
{
|
||||
if ((compat_ssize_t) count < 0)
|
||||
return -EINVAL;
|
||||
|
||||
return sys_read(fd, buf, count);
|
||||
}
|
||||
|
||||
asmlinkage long sys32_write(unsigned int fd, char __user * buf, size_t count)
|
||||
{
|
||||
if ((compat_ssize_t) count < 0)
|
||||
return -EINVAL;
|
||||
|
||||
return sys_write(fd, buf, count);
|
||||
}
|
||||
|
||||
asmlinkage long sys32_clone(struct pt_regs regs)
|
||||
{
|
||||
unsigned long clone_flags;
|
||||
unsigned long newsp;
|
||||
int __user *parent_tidptr, *child_tidptr;
|
||||
|
||||
clone_flags = regs.gprs[3] & 0xffffffffUL;
|
||||
newsp = regs.orig_gpr2 & 0x7fffffffUL;
|
||||
parent_tidptr = compat_ptr(regs.gprs[4]);
|
||||
child_tidptr = compat_ptr(regs.gprs[5]);
|
||||
if (!newsp)
|
||||
newsp = regs.gprs[15];
|
||||
return do_fork(clone_flags, newsp, ®s, 0,
|
||||
parent_tidptr, child_tidptr);
|
||||
}
|
||||
|
||||
/*
|
||||
* 31 bit emulation wrapper functions for sys_fadvise64/fadvise64_64.
|
||||
* These need to rewrite the advise values for POSIX_FADV_{DONTNEED,NOREUSE}
|
||||
* because the 31 bit values differ from the 64 bit values.
|
||||
*/
|
||||
|
||||
asmlinkage long
|
||||
sys32_fadvise64(int fd, loff_t offset, size_t len, int advise)
|
||||
{
|
||||
if (advise == 4)
|
||||
advise = POSIX_FADV_DONTNEED;
|
||||
else if (advise == 5)
|
||||
advise = POSIX_FADV_NOREUSE;
|
||||
return sys_fadvise64(fd, offset, len, advise);
|
||||
}
|
||||
|
||||
struct fadvise64_64_args {
|
||||
int fd;
|
||||
long long offset;
|
||||
long long len;
|
||||
int advice;
|
||||
};
|
||||
|
||||
asmlinkage long
|
||||
sys32_fadvise64_64(struct fadvise64_64_args __user *args)
|
||||
{
|
||||
struct fadvise64_64_args a;
|
||||
|
||||
if ( copy_from_user(&a, args, sizeof(a)) )
|
||||
return -EFAULT;
|
||||
if (a.advice == 4)
|
||||
a.advice = POSIX_FADV_DONTNEED;
|
||||
else if (a.advice == 5)
|
||||
a.advice = POSIX_FADV_NOREUSE;
|
||||
return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice);
|
||||
}
|
||||
165
arch/s390/kernel/compat_linux.h
Normal file
165
arch/s390/kernel/compat_linux.h
Normal file
@@ -0,0 +1,165 @@
|
||||
#ifndef _ASM_S390X_S390_H
|
||||
#define _ASM_S390X_S390_H
|
||||
|
||||
#include <linux/compat.h>
|
||||
#include <linux/socket.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/nfs_fs.h>
|
||||
#include <linux/sunrpc/svc.h>
|
||||
#include <linux/nfsd/nfsd.h>
|
||||
#include <linux/nfsd/export.h>
|
||||
|
||||
/* Macro that masks the high order bit of an 32 bit pointer and converts it*/
|
||||
/* to a 64 bit pointer */
|
||||
#define A(__x) ((unsigned long)((__x) & 0x7FFFFFFFUL))
|
||||
#define AA(__x) \
|
||||
((unsigned long)(__x))
|
||||
|
||||
/* Now 32bit compatibility types */
|
||||
struct ipc_kludge_32 {
|
||||
__u32 msgp; /* pointer */
|
||||
__s32 msgtyp;
|
||||
};
|
||||
|
||||
struct old_sigaction32 {
|
||||
__u32 sa_handler; /* Really a pointer, but need to deal with 32 bits */
|
||||
compat_old_sigset_t sa_mask; /* A 32 bit mask */
|
||||
__u32 sa_flags;
|
||||
__u32 sa_restorer; /* Another 32 bit pointer */
|
||||
};
|
||||
|
||||
typedef struct compat_siginfo {
|
||||
int si_signo;
|
||||
int si_errno;
|
||||
int si_code;
|
||||
|
||||
union {
|
||||
int _pad[((128/sizeof(int)) - 3)];
|
||||
|
||||
/* kill() */
|
||||
struct {
|
||||
pid_t _pid; /* sender's pid */
|
||||
uid_t _uid; /* sender's uid */
|
||||
} _kill;
|
||||
|
||||
/* POSIX.1b timers */
|
||||
struct {
|
||||
compat_timer_t _tid; /* timer id */
|
||||
int _overrun; /* overrun count */
|
||||
compat_sigval_t _sigval; /* same as below */
|
||||
int _sys_private; /* not to be passed to user */
|
||||
} _timer;
|
||||
|
||||
/* POSIX.1b signals */
|
||||
struct {
|
||||
pid_t _pid; /* sender's pid */
|
||||
uid_t _uid; /* sender's uid */
|
||||
compat_sigval_t _sigval;
|
||||
} _rt;
|
||||
|
||||
/* SIGCHLD */
|
||||
struct {
|
||||
pid_t _pid; /* which child */
|
||||
uid_t _uid; /* sender's uid */
|
||||
int _status;/* exit code */
|
||||
compat_clock_t _utime;
|
||||
compat_clock_t _stime;
|
||||
} _sigchld;
|
||||
|
||||
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
|
||||
struct {
|
||||
__u32 _addr; /* faulting insn/memory ref. - pointer */
|
||||
} _sigfault;
|
||||
|
||||
/* SIGPOLL */
|
||||
struct {
|
||||
int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
|
||||
int _fd;
|
||||
} _sigpoll;
|
||||
} _sifields;
|
||||
} compat_siginfo_t;
|
||||
|
||||
/*
|
||||
* How these fields are to be accessed.
|
||||
*/
|
||||
#define si_pid _sifields._kill._pid
|
||||
#define si_uid _sifields._kill._uid
|
||||
#define si_status _sifields._sigchld._status
|
||||
#define si_utime _sifields._sigchld._utime
|
||||
#define si_stime _sifields._sigchld._stime
|
||||
#define si_value _sifields._rt._sigval
|
||||
#define si_int _sifields._rt._sigval.sival_int
|
||||
#define si_ptr _sifields._rt._sigval.sival_ptr
|
||||
#define si_addr _sifields._sigfault._addr
|
||||
#define si_band _sifields._sigpoll._band
|
||||
#define si_fd _sifields._sigpoll._fd
|
||||
#define si_tid _sifields._timer._tid
|
||||
#define si_overrun _sifields._timer._overrun
|
||||
|
||||
/* asm/sigcontext.h */
|
||||
typedef union
|
||||
{
|
||||
__u64 d;
|
||||
__u32 f;
|
||||
} freg_t32;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
unsigned int fpc;
|
||||
freg_t32 fprs[__NUM_FPRS];
|
||||
} _s390_fp_regs32;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
__u32 mask;
|
||||
__u32 addr;
|
||||
} _psw_t32 __attribute__ ((aligned(8)));
|
||||
|
||||
typedef struct
|
||||
{
|
||||
_psw_t32 psw;
|
||||
__u32 gprs[__NUM_GPRS];
|
||||
__u32 acrs[__NUM_ACRS];
|
||||
} _s390_regs_common32;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
_s390_regs_common32 regs;
|
||||
_s390_fp_regs32 fpregs;
|
||||
} _sigregs32;
|
||||
|
||||
#define _SIGCONTEXT_NSIG32 64
|
||||
#define _SIGCONTEXT_NSIG_BPW32 32
|
||||
#define __SIGNAL_FRAMESIZE32 96
|
||||
#define _SIGMASK_COPY_SIZE32 (sizeof(u32)*2)
|
||||
|
||||
struct sigcontext32
|
||||
{
|
||||
__u32 oldmask[_COMPAT_NSIG_WORDS];
|
||||
__u32 sregs; /* pointer */
|
||||
};
|
||||
|
||||
/* asm/signal.h */
|
||||
struct sigaction32 {
|
||||
__u32 sa_handler; /* pointer */
|
||||
__u32 sa_flags;
|
||||
__u32 sa_restorer; /* pointer */
|
||||
compat_sigset_t sa_mask; /* mask last for extensibility */
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
__u32 ss_sp; /* pointer */
|
||||
int ss_flags;
|
||||
compat_size_t ss_size;
|
||||
} stack_t32;
|
||||
|
||||
/* asm/ucontext.h */
|
||||
struct ucontext32 {
|
||||
__u32 uc_flags;
|
||||
__u32 uc_link; /* pointer */
|
||||
stack_t32 uc_stack;
|
||||
_sigregs32 uc_mcontext;
|
||||
compat_sigset_t uc_sigmask; /* mask last for extensibility */
|
||||
};
|
||||
|
||||
#endif /* _ASM_S390X_S390_H */
|
||||
83
arch/s390/kernel/compat_ptrace.h
Normal file
83
arch/s390/kernel/compat_ptrace.h
Normal file
@@ -0,0 +1,83 @@
|
||||
#ifndef _PTRACE32_H
|
||||
#define _PTRACE32_H
|
||||
|
||||
#include "compat_linux.h" /* needed for _psw_t32 */
|
||||
|
||||
typedef struct {
|
||||
__u32 cr[3];
|
||||
} per_cr_words32;
|
||||
|
||||
typedef struct {
|
||||
__u16 perc_atmid; /* 0x096 */
|
||||
__u32 address; /* 0x098 */
|
||||
__u8 access_id; /* 0x0a1 */
|
||||
} per_lowcore_words32;
|
||||
|
||||
typedef struct {
|
||||
union {
|
||||
per_cr_words32 words;
|
||||
} control_regs;
|
||||
/*
|
||||
* Use these flags instead of setting em_instruction_fetch
|
||||
* directly they are used so that single stepping can be
|
||||
* switched on & off while not affecting other tracing
|
||||
*/
|
||||
unsigned single_step : 1;
|
||||
unsigned instruction_fetch : 1;
|
||||
unsigned : 30;
|
||||
/*
|
||||
* These addresses are copied into cr10 & cr11 if single
|
||||
* stepping is switched off
|
||||
*/
|
||||
__u32 starting_addr;
|
||||
__u32 ending_addr;
|
||||
union {
|
||||
per_lowcore_words32 words;
|
||||
} lowcore;
|
||||
} per_struct32;
|
||||
|
||||
struct user_regs_struct32
|
||||
{
|
||||
_psw_t32 psw;
|
||||
u32 gprs[NUM_GPRS];
|
||||
u32 acrs[NUM_ACRS];
|
||||
u32 orig_gpr2;
|
||||
s390_fp_regs fp_regs;
|
||||
/*
|
||||
* These per registers are in here so that gdb can modify them
|
||||
* itself as there is no "official" ptrace interface for hardware
|
||||
* watchpoints. This is the way intel does it.
|
||||
*/
|
||||
per_struct32 per_info;
|
||||
u32 ieee_instruction_pointer;
|
||||
/* Used to give failing instruction back to user for ieee exceptions */
|
||||
};
|
||||
|
||||
struct user32 {
|
||||
/* We start with the registers, to mimic the way that "memory"
|
||||
is returned from the ptrace(3,...) function. */
|
||||
struct user_regs_struct32 regs; /* Where the registers are actually stored */
|
||||
/* The rest of this junk is to help gdb figure out what goes where */
|
||||
u32 u_tsize; /* Text segment size (pages). */
|
||||
u32 u_dsize; /* Data segment size (pages). */
|
||||
u32 u_ssize; /* Stack segment size (pages). */
|
||||
u32 start_code; /* Starting virtual address of text. */
|
||||
u32 start_stack; /* Starting virtual address of stack area.
|
||||
This is actually the bottom of the stack,
|
||||
the top of the stack is always found in the
|
||||
esp register. */
|
||||
s32 signal; /* Signal that caused the core dump. */
|
||||
u32 u_ar0; /* Used by gdb to help find the values for */
|
||||
/* the registers. */
|
||||
u32 magic; /* To uniquely identify a core file */
|
||||
char u_comm[32]; /* User command that was responsible */
|
||||
};
|
||||
|
||||
typedef struct
|
||||
{
|
||||
__u32 len;
|
||||
__u32 kernel_addr;
|
||||
__u32 process_addr;
|
||||
} ptrace_area_emu31;
|
||||
|
||||
#endif /* _PTRACE32_H */
|
||||
585
arch/s390/kernel/compat_signal.c
Normal file
585
arch/s390/kernel/compat_signal.c
Normal file
@@ -0,0 +1,585 @@
|
||||
/*
|
||||
* arch/s390/kernel/compat_signal.c
|
||||
*
|
||||
* Copyright (C) IBM Corp. 2000,2006
|
||||
* Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
|
||||
* Gerhard Tonn (ton@de.ibm.com)
|
||||
*
|
||||
* Copyright (C) 1991, 1992 Linus Torvalds
|
||||
*
|
||||
* 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
|
||||
*/
|
||||
|
||||
#include <linux/compat.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/unistd.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/tty.h>
|
||||
#include <linux/personality.h>
|
||||
#include <linux/binfmts.h>
|
||||
#include <asm/ucontext.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/lowcore.h>
|
||||
#include "compat_linux.h"
|
||||
#include "compat_ptrace.h"
|
||||
|
||||
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
|
||||
|
||||
typedef struct
|
||||
{
|
||||
__u8 callee_used_stack[__SIGNAL_FRAMESIZE32];
|
||||
struct sigcontext32 sc;
|
||||
_sigregs32 sregs;
|
||||
int signo;
|
||||
__u8 retcode[S390_SYSCALL_SIZE];
|
||||
} sigframe32;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
__u8 callee_used_stack[__SIGNAL_FRAMESIZE32];
|
||||
__u8 retcode[S390_SYSCALL_SIZE];
|
||||
compat_siginfo_t info;
|
||||
struct ucontext32 uc;
|
||||
} rt_sigframe32;
|
||||
|
||||
int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
|
||||
{
|
||||
int err;
|
||||
|
||||
if (!access_ok (VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
|
||||
return -EFAULT;
|
||||
|
||||
/* If you change siginfo_t structure, please be sure
|
||||
this code is fixed accordingly.
|
||||
It should never copy any pad contained in the structure
|
||||
to avoid security leaks, but must copy the generic
|
||||
3 ints plus the relevant union member.
|
||||
This routine must convert siginfo from 64bit to 32bit as well
|
||||
at the same time. */
|
||||
err = __put_user(from->si_signo, &to->si_signo);
|
||||
err |= __put_user(from->si_errno, &to->si_errno);
|
||||
err |= __put_user((short)from->si_code, &to->si_code);
|
||||
if (from->si_code < 0)
|
||||
err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
|
||||
else {
|
||||
switch (from->si_code >> 16) {
|
||||
case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
|
||||
case __SI_MESGQ >> 16:
|
||||
err |= __put_user(from->si_int, &to->si_int);
|
||||
/* fallthrough */
|
||||
case __SI_KILL >> 16:
|
||||
err |= __put_user(from->si_pid, &to->si_pid);
|
||||
err |= __put_user(from->si_uid, &to->si_uid);
|
||||
break;
|
||||
case __SI_CHLD >> 16:
|
||||
err |= __put_user(from->si_pid, &to->si_pid);
|
||||
err |= __put_user(from->si_uid, &to->si_uid);
|
||||
err |= __put_user(from->si_utime, &to->si_utime);
|
||||
err |= __put_user(from->si_stime, &to->si_stime);
|
||||
err |= __put_user(from->si_status, &to->si_status);
|
||||
break;
|
||||
case __SI_FAULT >> 16:
|
||||
err |= __put_user((unsigned long) from->si_addr,
|
||||
&to->si_addr);
|
||||
break;
|
||||
case __SI_POLL >> 16:
|
||||
err |= __put_user(from->si_band, &to->si_band);
|
||||
err |= __put_user(from->si_fd, &to->si_fd);
|
||||
break;
|
||||
case __SI_TIMER >> 16:
|
||||
err |= __put_user(from->si_tid, &to->si_tid);
|
||||
err |= __put_user(from->si_overrun, &to->si_overrun);
|
||||
err |= __put_user(from->si_int, &to->si_int);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
|
||||
{
|
||||
int err;
|
||||
u32 tmp;
|
||||
|
||||
if (!access_ok (VERIFY_READ, from, sizeof(compat_siginfo_t)))
|
||||
return -EFAULT;
|
||||
|
||||
err = __get_user(to->si_signo, &from->si_signo);
|
||||
err |= __get_user(to->si_errno, &from->si_errno);
|
||||
err |= __get_user(to->si_code, &from->si_code);
|
||||
|
||||
if (to->si_code < 0)
|
||||
err |= __copy_from_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
|
||||
else {
|
||||
switch (to->si_code >> 16) {
|
||||
case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
|
||||
case __SI_MESGQ >> 16:
|
||||
err |= __get_user(to->si_int, &from->si_int);
|
||||
/* fallthrough */
|
||||
case __SI_KILL >> 16:
|
||||
err |= __get_user(to->si_pid, &from->si_pid);
|
||||
err |= __get_user(to->si_uid, &from->si_uid);
|
||||
break;
|
||||
case __SI_CHLD >> 16:
|
||||
err |= __get_user(to->si_pid, &from->si_pid);
|
||||
err |= __get_user(to->si_uid, &from->si_uid);
|
||||
err |= __get_user(to->si_utime, &from->si_utime);
|
||||
err |= __get_user(to->si_stime, &from->si_stime);
|
||||
err |= __get_user(to->si_status, &from->si_status);
|
||||
break;
|
||||
case __SI_FAULT >> 16:
|
||||
err |= __get_user(tmp, &from->si_addr);
|
||||
to->si_addr = (void __user *)(u64) (tmp & PSW32_ADDR_INSN);
|
||||
break;
|
||||
case __SI_POLL >> 16:
|
||||
err |= __get_user(to->si_band, &from->si_band);
|
||||
err |= __get_user(to->si_fd, &from->si_fd);
|
||||
break;
|
||||
case __SI_TIMER >> 16:
|
||||
err |= __get_user(to->si_tid, &from->si_tid);
|
||||
err |= __get_user(to->si_overrun, &from->si_overrun);
|
||||
err |= __get_user(to->si_int, &from->si_int);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
asmlinkage long
|
||||
sys32_sigaction(int sig, const struct old_sigaction32 __user *act,
|
||||
struct old_sigaction32 __user *oact)
|
||||
{
|
||||
struct k_sigaction new_ka, old_ka;
|
||||
unsigned long sa_handler, sa_restorer;
|
||||
int ret;
|
||||
|
||||
if (act) {
|
||||
compat_old_sigset_t mask;
|
||||
if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
|
||||
__get_user(sa_handler, &act->sa_handler) ||
|
||||
__get_user(sa_restorer, &act->sa_restorer) ||
|
||||
__get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
|
||||
__get_user(mask, &act->sa_mask))
|
||||
return -EFAULT;
|
||||
new_ka.sa.sa_handler = (__sighandler_t) sa_handler;
|
||||
new_ka.sa.sa_restorer = (void (*)(void)) sa_restorer;
|
||||
siginitset(&new_ka.sa.sa_mask, mask);
|
||||
}
|
||||
|
||||
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
|
||||
|
||||
if (!ret && oact) {
|
||||
sa_handler = (unsigned long) old_ka.sa.sa_handler;
|
||||
sa_restorer = (unsigned long) old_ka.sa.sa_restorer;
|
||||
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
|
||||
__put_user(sa_handler, &oact->sa_handler) ||
|
||||
__put_user(sa_restorer, &oact->sa_restorer) ||
|
||||
__put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
|
||||
__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
asmlinkage long
|
||||
sys32_rt_sigaction(int sig, const struct sigaction32 __user *act,
|
||||
struct sigaction32 __user *oact, size_t sigsetsize)
|
||||
{
|
||||
struct k_sigaction new_ka, old_ka;
|
||||
unsigned long sa_handler;
|
||||
int ret;
|
||||
compat_sigset_t set32;
|
||||
|
||||
/* XXX: Don't preclude handling different sized sigset_t's. */
|
||||
if (sigsetsize != sizeof(compat_sigset_t))
|
||||
return -EINVAL;
|
||||
|
||||
if (act) {
|
||||
ret = get_user(sa_handler, &act->sa_handler);
|
||||
ret |= __copy_from_user(&set32, &act->sa_mask,
|
||||
sizeof(compat_sigset_t));
|
||||
switch (_NSIG_WORDS) {
|
||||
case 4: new_ka.sa.sa_mask.sig[3] = set32.sig[6]
|
||||
| (((long)set32.sig[7]) << 32);
|
||||
case 3: new_ka.sa.sa_mask.sig[2] = set32.sig[4]
|
||||
| (((long)set32.sig[5]) << 32);
|
||||
case 2: new_ka.sa.sa_mask.sig[1] = set32.sig[2]
|
||||
| (((long)set32.sig[3]) << 32);
|
||||
case 1: new_ka.sa.sa_mask.sig[0] = set32.sig[0]
|
||||
| (((long)set32.sig[1]) << 32);
|
||||
}
|
||||
ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
|
||||
|
||||
if (ret)
|
||||
return -EFAULT;
|
||||
new_ka.sa.sa_handler = (__sighandler_t) sa_handler;
|
||||
}
|
||||
|
||||
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
|
||||
|
||||
if (!ret && oact) {
|
||||
switch (_NSIG_WORDS) {
|
||||
case 4:
|
||||
set32.sig[7] = (old_ka.sa.sa_mask.sig[3] >> 32);
|
||||
set32.sig[6] = old_ka.sa.sa_mask.sig[3];
|
||||
case 3:
|
||||
set32.sig[5] = (old_ka.sa.sa_mask.sig[2] >> 32);
|
||||
set32.sig[4] = old_ka.sa.sa_mask.sig[2];
|
||||
case 2:
|
||||
set32.sig[3] = (old_ka.sa.sa_mask.sig[1] >> 32);
|
||||
set32.sig[2] = old_ka.sa.sa_mask.sig[1];
|
||||
case 1:
|
||||
set32.sig[1] = (old_ka.sa.sa_mask.sig[0] >> 32);
|
||||
set32.sig[0] = old_ka.sa.sa_mask.sig[0];
|
||||
}
|
||||
ret = put_user((unsigned long)old_ka.sa.sa_handler, &oact->sa_handler);
|
||||
ret |= __copy_to_user(&oact->sa_mask, &set32,
|
||||
sizeof(compat_sigset_t));
|
||||
ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
asmlinkage long
|
||||
sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
stack_t kss, koss;
|
||||
unsigned long ss_sp;
|
||||
int ret, err = 0;
|
||||
mm_segment_t old_fs = get_fs();
|
||||
|
||||
if (uss) {
|
||||
if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
|
||||
return -EFAULT;
|
||||
err |= __get_user(ss_sp, &uss->ss_sp);
|
||||
err |= __get_user(kss.ss_size, &uss->ss_size);
|
||||
err |= __get_user(kss.ss_flags, &uss->ss_flags);
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
kss.ss_sp = (void __user *) ss_sp;
|
||||
}
|
||||
|
||||
set_fs (KERNEL_DS);
|
||||
ret = do_sigaltstack((stack_t __force __user *) (uss ? &kss : NULL),
|
||||
(stack_t __force __user *) (uoss ? &koss : NULL),
|
||||
regs->gprs[15]);
|
||||
set_fs (old_fs);
|
||||
|
||||
if (!ret && uoss) {
|
||||
if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
|
||||
return -EFAULT;
|
||||
ss_sp = (unsigned long) koss.ss_sp;
|
||||
err |= __put_user(ss_sp, &uoss->ss_sp);
|
||||
err |= __put_user(koss.ss_size, &uoss->ss_size);
|
||||
err |= __put_user(koss.ss_flags, &uoss->ss_flags);
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
|
||||
{
|
||||
_s390_regs_common32 regs32;
|
||||
int err, i;
|
||||
|
||||
regs32.psw.mask = PSW32_MASK_MERGE(psw32_user_bits,
|
||||
(__u32)(regs->psw.mask >> 32));
|
||||
regs32.psw.addr = PSW32_ADDR_AMODE31 | (__u32) regs->psw.addr;
|
||||
for (i = 0; i < NUM_GPRS; i++)
|
||||
regs32.gprs[i] = (__u32) regs->gprs[i];
|
||||
save_access_regs(current->thread.acrs);
|
||||
memcpy(regs32.acrs, current->thread.acrs, sizeof(regs32.acrs));
|
||||
err = __copy_to_user(&sregs->regs, ®s32, sizeof(regs32));
|
||||
if (err)
|
||||
return err;
|
||||
save_fp_regs(¤t->thread.fp_regs);
|
||||
/* s390_fp_regs and _s390_fp_regs32 are the same ! */
|
||||
return __copy_to_user(&sregs->fpregs, ¤t->thread.fp_regs,
|
||||
sizeof(_s390_fp_regs32));
|
||||
}
|
||||
|
||||
static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs)
|
||||
{
|
||||
_s390_regs_common32 regs32;
|
||||
int err, i;
|
||||
|
||||
/* Alwys make any pending restarted system call return -EINTR */
|
||||
current_thread_info()->restart_block.fn = do_no_restart_syscall;
|
||||
|
||||
err = __copy_from_user(®s32, &sregs->regs, sizeof(regs32));
|
||||
if (err)
|
||||
return err;
|
||||
regs->psw.mask = PSW_MASK_MERGE(regs->psw.mask,
|
||||
(__u64)regs32.psw.mask << 32);
|
||||
regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN);
|
||||
for (i = 0; i < NUM_GPRS; i++)
|
||||
regs->gprs[i] = (__u64) regs32.gprs[i];
|
||||
memcpy(current->thread.acrs, regs32.acrs, sizeof(current->thread.acrs));
|
||||
restore_access_regs(current->thread.acrs);
|
||||
|
||||
err = __copy_from_user(¤t->thread.fp_regs, &sregs->fpregs,
|
||||
sizeof(_s390_fp_regs32));
|
||||
current->thread.fp_regs.fpc &= FPC_VALID_MASK;
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
restore_fp_regs(¤t->thread.fp_regs);
|
||||
regs->trap = -1; /* disable syscall checks */
|
||||
return 0;
|
||||
}
|
||||
|
||||
asmlinkage long sys32_sigreturn(struct pt_regs *regs)
|
||||
{
|
||||
sigframe32 __user *frame = (sigframe32 __user *)regs->gprs[15];
|
||||
sigset_t set;
|
||||
|
||||
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
|
||||
goto badframe;
|
||||
if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
|
||||
goto badframe;
|
||||
|
||||
sigdelsetmask(&set, ~_BLOCKABLE);
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
current->blocked = set;
|
||||
recalc_sigpending();
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
|
||||
if (restore_sigregs32(regs, &frame->sregs))
|
||||
goto badframe;
|
||||
|
||||
return regs->gprs[2];
|
||||
|
||||
badframe:
|
||||
force_sig(SIGSEGV, current);
|
||||
return 0;
|
||||
}
|
||||
|
||||
asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs)
|
||||
{
|
||||
rt_sigframe32 __user *frame = (rt_sigframe32 __user *)regs->gprs[15];
|
||||
sigset_t set;
|
||||
stack_t st;
|
||||
__u32 ss_sp;
|
||||
int err;
|
||||
mm_segment_t old_fs = get_fs();
|
||||
|
||||
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
|
||||
goto badframe;
|
||||
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
|
||||
goto badframe;
|
||||
|
||||
sigdelsetmask(&set, ~_BLOCKABLE);
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
current->blocked = set;
|
||||
recalc_sigpending();
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
|
||||
if (restore_sigregs32(regs, &frame->uc.uc_mcontext))
|
||||
goto badframe;
|
||||
|
||||
err = __get_user(ss_sp, &frame->uc.uc_stack.ss_sp);
|
||||
st.ss_sp = compat_ptr(ss_sp);
|
||||
err |= __get_user(st.ss_size, &frame->uc.uc_stack.ss_size);
|
||||
err |= __get_user(st.ss_flags, &frame->uc.uc_stack.ss_flags);
|
||||
if (err)
|
||||
goto badframe;
|
||||
|
||||
set_fs (KERNEL_DS);
|
||||
do_sigaltstack((stack_t __force __user *)&st, NULL, regs->gprs[15]);
|
||||
set_fs (old_fs);
|
||||
|
||||
return regs->gprs[2];
|
||||
|
||||
badframe:
|
||||
force_sig(SIGSEGV, current);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up a signal frame.
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* Determine which stack to use..
|
||||
*/
|
||||
static inline void __user *
|
||||
get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
|
||||
{
|
||||
unsigned long sp;
|
||||
|
||||
/* Default to using normal stack */
|
||||
sp = (unsigned long) A(regs->gprs[15]);
|
||||
|
||||
/* This is the X/Open sanctioned signal stack switching. */
|
||||
if (ka->sa.sa_flags & SA_ONSTACK) {
|
||||
if (! sas_ss_flags(sp))
|
||||
sp = current->sas_ss_sp + current->sas_ss_size;
|
||||
}
|
||||
|
||||
/* This is the legacy signal stack switching. */
|
||||
else if (!user_mode(regs) &&
|
||||
!(ka->sa.sa_flags & SA_RESTORER) &&
|
||||
ka->sa.sa_restorer) {
|
||||
sp = (unsigned long) ka->sa.sa_restorer;
|
||||
}
|
||||
|
||||
return (void __user *)((sp - frame_size) & -8ul);
|
||||
}
|
||||
|
||||
static inline int map_signal(int sig)
|
||||
{
|
||||
if (current_thread_info()->exec_domain
|
||||
&& current_thread_info()->exec_domain->signal_invmap
|
||||
&& sig < 32)
|
||||
return current_thread_info()->exec_domain->signal_invmap[sig];
|
||||
else
|
||||
return sig;
|
||||
}
|
||||
|
||||
static int setup_frame32(int sig, struct k_sigaction *ka,
|
||||
sigset_t *set, struct pt_regs * regs)
|
||||
{
|
||||
sigframe32 __user *frame = get_sigframe(ka, regs, sizeof(sigframe32));
|
||||
if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe32)))
|
||||
goto give_sigsegv;
|
||||
|
||||
if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE32))
|
||||
goto give_sigsegv;
|
||||
|
||||
if (save_sigregs32(regs, &frame->sregs))
|
||||
goto give_sigsegv;
|
||||
if (__put_user((unsigned long) &frame->sregs, &frame->sc.sregs))
|
||||
goto give_sigsegv;
|
||||
|
||||
/* Set up to return from userspace. If provided, use a stub
|
||||
already in userspace. */
|
||||
if (ka->sa.sa_flags & SA_RESTORER) {
|
||||
regs->gprs[14] = (__u64) ka->sa.sa_restorer;
|
||||
} else {
|
||||
regs->gprs[14] = (__u64) frame->retcode;
|
||||
if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn,
|
||||
(u16 __user *)(frame->retcode)))
|
||||
goto give_sigsegv;
|
||||
}
|
||||
|
||||
/* Set up backchain. */
|
||||
if (__put_user(regs->gprs[15], (unsigned int __user *) frame))
|
||||
goto give_sigsegv;
|
||||
|
||||
/* Set up registers for signal handler */
|
||||
regs->gprs[15] = (__u64) frame;
|
||||
regs->psw.addr = (__u64) ka->sa.sa_handler;
|
||||
|
||||
regs->gprs[2] = map_signal(sig);
|
||||
regs->gprs[3] = (__u64) &frame->sc;
|
||||
|
||||
/* We forgot to include these in the sigcontext.
|
||||
To avoid breaking binary compatibility, they are passed as args. */
|
||||
regs->gprs[4] = current->thread.trap_no;
|
||||
regs->gprs[5] = current->thread.prot_addr;
|
||||
|
||||
/* Place signal number on stack to allow backtrace from handler. */
|
||||
if (__put_user(regs->gprs[2], (int __user *) &frame->signo))
|
||||
goto give_sigsegv;
|
||||
return 0;
|
||||
|
||||
give_sigsegv:
|
||||
force_sigsegv(sig, current);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info,
|
||||
sigset_t *set, struct pt_regs * regs)
|
||||
{
|
||||
int err = 0;
|
||||
rt_sigframe32 __user *frame = get_sigframe(ka, regs, sizeof(rt_sigframe32));
|
||||
if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe32)))
|
||||
goto give_sigsegv;
|
||||
|
||||
if (copy_siginfo_to_user32(&frame->info, info))
|
||||
goto give_sigsegv;
|
||||
|
||||
/* Create the ucontext. */
|
||||
err |= __put_user(0, &frame->uc.uc_flags);
|
||||
err |= __put_user(0, &frame->uc.uc_link);
|
||||
err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
|
||||
err |= __put_user(sas_ss_flags(regs->gprs[15]),
|
||||
&frame->uc.uc_stack.ss_flags);
|
||||
err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
|
||||
err |= save_sigregs32(regs, &frame->uc.uc_mcontext);
|
||||
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
|
||||
if (err)
|
||||
goto give_sigsegv;
|
||||
|
||||
/* Set up to return from userspace. If provided, use a stub
|
||||
already in userspace. */
|
||||
if (ka->sa.sa_flags & SA_RESTORER) {
|
||||
regs->gprs[14] = (__u64) ka->sa.sa_restorer;
|
||||
} else {
|
||||
regs->gprs[14] = (__u64) frame->retcode;
|
||||
err |= __put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn,
|
||||
(u16 __user *)(frame->retcode));
|
||||
}
|
||||
|
||||
/* Set up backchain. */
|
||||
if (__put_user(regs->gprs[15], (unsigned int __user *) frame))
|
||||
goto give_sigsegv;
|
||||
|
||||
/* Set up registers for signal handler */
|
||||
regs->gprs[15] = (__u64) frame;
|
||||
regs->psw.addr = (__u64) ka->sa.sa_handler;
|
||||
|
||||
regs->gprs[2] = map_signal(sig);
|
||||
regs->gprs[3] = (__u64) &frame->info;
|
||||
regs->gprs[4] = (__u64) &frame->uc;
|
||||
return 0;
|
||||
|
||||
give_sigsegv:
|
||||
force_sigsegv(sig, current);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/*
|
||||
* OK, we're invoking a handler
|
||||
*/
|
||||
|
||||
int
|
||||
handle_signal32(unsigned long sig, struct k_sigaction *ka,
|
||||
siginfo_t *info, sigset_t *oldset, struct pt_regs * regs)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Set up the stack frame */
|
||||
if (ka->sa.sa_flags & SA_SIGINFO)
|
||||
ret = setup_rt_frame32(sig, ka, info, oldset, regs);
|
||||
else
|
||||
ret = setup_frame32(sig, ka, oldset, regs);
|
||||
|
||||
if (ret == 0) {
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask);
|
||||
if (!(ka->sa.sa_flags & SA_NODEFER))
|
||||
sigaddset(¤t->blocked,sig);
|
||||
recalc_sigpending();
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
1684
arch/s390/kernel/compat_wrapper.S
Normal file
1684
arch/s390/kernel/compat_wrapper.S
Normal file
File diff suppressed because it is too large
Load Diff
113
arch/s390/kernel/cpcmd.c
Normal file
113
arch/s390/kernel/cpcmd.c
Normal file
@@ -0,0 +1,113 @@
|
||||
/*
|
||||
* arch/s390/kernel/cpcmd.c
|
||||
*
|
||||
* S390 version
|
||||
* Copyright (C) 1999,2005 IBM Deutschland Entwicklung GmbH, IBM Corporation
|
||||
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
|
||||
* Christian Borntraeger (cborntra@de.ibm.com),
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/ebcdic.h>
|
||||
#include <asm/cpcmd.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/io.h>
|
||||
|
||||
static DEFINE_SPINLOCK(cpcmd_lock);
|
||||
static char cpcmd_buf[241];
|
||||
|
||||
/*
|
||||
* __cpcmd has some restrictions over cpcmd
|
||||
* - the response buffer must reside below 2GB (if any)
|
||||
* - __cpcmd is unlocked and therefore not SMP-safe
|
||||
*/
|
||||
int __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
|
||||
{
|
||||
unsigned cmdlen;
|
||||
int return_code, return_len;
|
||||
|
||||
cmdlen = strlen(cmd);
|
||||
BUG_ON(cmdlen > 240);
|
||||
memcpy(cpcmd_buf, cmd, cmdlen);
|
||||
ASCEBC(cpcmd_buf, cmdlen);
|
||||
|
||||
if (response != NULL && rlen > 0) {
|
||||
register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
|
||||
register unsigned long reg3 asm ("3") = (addr_t) response;
|
||||
register unsigned long reg4 asm ("4") = cmdlen | 0x40000000L;
|
||||
register unsigned long reg5 asm ("5") = rlen;
|
||||
|
||||
memset(response, 0, rlen);
|
||||
asm volatile(
|
||||
#ifndef CONFIG_64BIT
|
||||
" diag %2,%0,0x8\n"
|
||||
" brc 8,1f\n"
|
||||
" ar %1,%4\n"
|
||||
#else /* CONFIG_64BIT */
|
||||
" sam31\n"
|
||||
" diag %2,%0,0x8\n"
|
||||
" sam64\n"
|
||||
" brc 8,1f\n"
|
||||
" agr %1,%4\n"
|
||||
#endif /* CONFIG_64BIT */
|
||||
"1:\n"
|
||||
: "+d" (reg4), "+d" (reg5)
|
||||
: "d" (reg2), "d" (reg3), "d" (rlen) : "cc");
|
||||
return_code = (int) reg4;
|
||||
return_len = (int) reg5;
|
||||
EBCASC(response, rlen);
|
||||
} else {
|
||||
register unsigned long reg2 asm ("2") = (addr_t) cpcmd_buf;
|
||||
register unsigned long reg3 asm ("3") = cmdlen;
|
||||
return_len = 0;
|
||||
asm volatile(
|
||||
#ifndef CONFIG_64BIT
|
||||
" diag %1,%0,0x8\n"
|
||||
#else /* CONFIG_64BIT */
|
||||
" sam31\n"
|
||||
" diag %1,%0,0x8\n"
|
||||
" sam64\n"
|
||||
#endif /* CONFIG_64BIT */
|
||||
: "+d" (reg3) : "d" (reg2) : "cc");
|
||||
return_code = (int) reg3;
|
||||
}
|
||||
if (response_code != NULL)
|
||||
*response_code = return_code;
|
||||
return return_len;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(__cpcmd);
|
||||
|
||||
int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
|
||||
{
|
||||
char *lowbuf;
|
||||
int len;
|
||||
unsigned long flags;
|
||||
|
||||
if ((virt_to_phys(response) != (unsigned long) response) ||
|
||||
(((unsigned long)response + rlen) >> 31)) {
|
||||
lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA);
|
||||
if (!lowbuf) {
|
||||
printk(KERN_WARNING
|
||||
"cpcmd: could not allocate response buffer\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
spin_lock_irqsave(&cpcmd_lock, flags);
|
||||
len = __cpcmd(cmd, lowbuf, rlen, response_code);
|
||||
spin_unlock_irqrestore(&cpcmd_lock, flags);
|
||||
memcpy(response, lowbuf, rlen);
|
||||
kfree(lowbuf);
|
||||
} else {
|
||||
spin_lock_irqsave(&cpcmd_lock, flags);
|
||||
len = __cpcmd(cmd, response, rlen, response_code);
|
||||
spin_unlock_irqrestore(&cpcmd_lock, flags);
|
||||
}
|
||||
return len;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(cpcmd);
|
||||
16
arch/s390/kernel/crash.c
Normal file
16
arch/s390/kernel/crash.c
Normal file
@@ -0,0 +1,16 @@
|
||||
/*
|
||||
* arch/s390/kernel/crash.c
|
||||
*
|
||||
* (C) Copyright IBM Corp. 2005
|
||||
*
|
||||
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/threads.h>
|
||||
#include <linux/kexec.h>
|
||||
#include <linux/reboot.h>
|
||||
|
||||
void machine_crash_shutdown(struct pt_regs *regs)
|
||||
{
|
||||
}
|
||||
1539
arch/s390/kernel/debug.c
Normal file
1539
arch/s390/kernel/debug.c
Normal file
File diff suppressed because it is too large
Load Diff
313
arch/s390/kernel/early.c
Normal file
313
arch/s390/kernel/early.c
Normal file
@@ -0,0 +1,313 @@
|
||||
/*
|
||||
* arch/s390/kernel/early.c
|
||||
*
|
||||
* Copyright IBM Corp. 2007
|
||||
* Author(s): Hongjie Yang <hongjie@us.ibm.com>,
|
||||
* Heiko Carstens <heiko.carstens@de.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/lockdep.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pfn.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/ipl.h>
|
||||
#include <asm/lowcore.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/cpcmd.h>
|
||||
#include <asm/sclp.h>
|
||||
|
||||
/*
|
||||
* Create a Kernel NSS if the SAVESYS= parameter is defined
|
||||
*/
|
||||
#define DEFSYS_CMD_SIZE 96
|
||||
#define SAVESYS_CMD_SIZE 32
|
||||
|
||||
char kernel_nss_name[NSS_NAME_SIZE + 1];
|
||||
|
||||
#ifdef CONFIG_SHARED_KERNEL
|
||||
static noinline __init void create_kernel_nss(void)
|
||||
{
|
||||
unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size;
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
unsigned int sinitrd_pfn, einitrd_pfn;
|
||||
#endif
|
||||
int response;
|
||||
char *savesys_ptr;
|
||||
char upper_command_line[COMMAND_LINE_SIZE];
|
||||
char defsys_cmd[DEFSYS_CMD_SIZE];
|
||||
char savesys_cmd[SAVESYS_CMD_SIZE];
|
||||
|
||||
/* Do nothing if we are not running under VM */
|
||||
if (!MACHINE_IS_VM)
|
||||
return;
|
||||
|
||||
/* Convert COMMAND_LINE to upper case */
|
||||
for (i = 0; i < strlen(COMMAND_LINE); i++)
|
||||
upper_command_line[i] = toupper(COMMAND_LINE[i]);
|
||||
|
||||
savesys_ptr = strstr(upper_command_line, "SAVESYS=");
|
||||
|
||||
if (!savesys_ptr)
|
||||
return;
|
||||
|
||||
savesys_ptr += 8; /* Point to the beginning of the NSS name */
|
||||
for (i = 0; i < NSS_NAME_SIZE; i++) {
|
||||
if (savesys_ptr[i] == ' ' || savesys_ptr[i] == '\0')
|
||||
break;
|
||||
kernel_nss_name[i] = savesys_ptr[i];
|
||||
}
|
||||
|
||||
stext_pfn = PFN_DOWN(__pa(&_stext));
|
||||
eshared_pfn = PFN_DOWN(__pa(&_eshared));
|
||||
end_pfn = PFN_UP(__pa(&_end));
|
||||
min_size = end_pfn << 2;
|
||||
|
||||
sprintf(defsys_cmd, "DEFSYS %s 00000-%.5X EW %.5X-%.5X SR %.5X-%.5X",
|
||||
kernel_nss_name, stext_pfn - 1, stext_pfn, eshared_pfn - 1,
|
||||
eshared_pfn, end_pfn);
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
if (INITRD_START && INITRD_SIZE) {
|
||||
sinitrd_pfn = PFN_DOWN(__pa(INITRD_START));
|
||||
einitrd_pfn = PFN_UP(__pa(INITRD_START + INITRD_SIZE));
|
||||
min_size = einitrd_pfn << 2;
|
||||
sprintf(defsys_cmd, "%s EW %.5X-%.5X", defsys_cmd,
|
||||
sinitrd_pfn, einitrd_pfn);
|
||||
}
|
||||
#endif
|
||||
|
||||
sprintf(defsys_cmd, "%s EW MINSIZE=%.7iK", defsys_cmd, min_size);
|
||||
sprintf(savesys_cmd, "SAVESYS %s \n IPL %s",
|
||||
kernel_nss_name, kernel_nss_name);
|
||||
|
||||
__cpcmd(defsys_cmd, NULL, 0, &response);
|
||||
|
||||
if (response != 0)
|
||||
return;
|
||||
|
||||
__cpcmd(savesys_cmd, NULL, 0, &response);
|
||||
|
||||
if (response != strlen(savesys_cmd))
|
||||
return;
|
||||
|
||||
ipl_flags = IPL_NSS_VALID;
|
||||
}
|
||||
|
||||
#else /* CONFIG_SHARED_KERNEL */
|
||||
|
||||
static inline void create_kernel_nss(void) { }
|
||||
|
||||
#endif /* CONFIG_SHARED_KERNEL */
|
||||
|
||||
/*
|
||||
* Clear bss memory
|
||||
*/
|
||||
static noinline __init void clear_bss_section(void)
|
||||
{
|
||||
memset(__bss_start, 0, __bss_stop - __bss_start);
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize storage key for kernel pages
|
||||
*/
|
||||
static noinline __init void init_kernel_storage_key(void)
|
||||
{
|
||||
unsigned long end_pfn, init_pfn;
|
||||
|
||||
end_pfn = PFN_UP(__pa(&_end));
|
||||
|
||||
for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++)
|
||||
page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY);
|
||||
}
|
||||
|
||||
static noinline __init void detect_machine_type(void)
|
||||
{
|
||||
struct cpuinfo_S390 *cpuinfo = &S390_lowcore.cpu_data;
|
||||
|
||||
get_cpu_id(&S390_lowcore.cpu_data.cpu_id);
|
||||
|
||||
/* Running under z/VM ? */
|
||||
if (cpuinfo->cpu_id.version == 0xff)
|
||||
machine_flags |= 1;
|
||||
|
||||
/* Running on a P/390 ? */
|
||||
if (cpuinfo->cpu_id.machine == 0x7490)
|
||||
machine_flags |= 4;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
static noinline __init int memory_fast_detect(void)
|
||||
{
|
||||
unsigned long val0 = 0;
|
||||
unsigned long val1 = 0xc;
|
||||
int ret = -ENOSYS;
|
||||
|
||||
if (ipl_flags & IPL_NSS_VALID)
|
||||
return -ENOSYS;
|
||||
|
||||
asm volatile(
|
||||
" diag %1,%2,0x260\n"
|
||||
"0: lhi %0,0\n"
|
||||
"1:\n"
|
||||
EX_TABLE(0b,1b)
|
||||
: "+d" (ret), "+d" (val0), "+d" (val1) : : "cc");
|
||||
|
||||
if (ret || val0 != val1)
|
||||
return -ENOSYS;
|
||||
|
||||
memory_chunk[0].size = val0 + 1;
|
||||
return 0;
|
||||
}
|
||||
#else
|
||||
static inline int memory_fast_detect(void)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
#endif
|
||||
|
||||
#define ADDR2G (1UL << 31)
|
||||
|
||||
static noinline __init unsigned long sclp_memory_detect(void)
|
||||
{
|
||||
struct sclp_readinfo_sccb *sccb;
|
||||
unsigned long long memsize;
|
||||
|
||||
sccb = &s390_readinfo_sccb;
|
||||
|
||||
if (sccb->header.response_code != 0x10)
|
||||
return 0;
|
||||
|
||||
if (sccb->rnsize)
|
||||
memsize = sccb->rnsize << 20;
|
||||
else
|
||||
memsize = sccb->rnsize2 << 20;
|
||||
if (sccb->rnmax)
|
||||
memsize *= sccb->rnmax;
|
||||
else
|
||||
memsize *= sccb->rnmax2;
|
||||
#ifndef CONFIG_64BIT
|
||||
/*
|
||||
* Can't deal with more than 2G in 31 bit addressing mode, so
|
||||
* limit the value in order to avoid strange side effects.
|
||||
*/
|
||||
if (memsize > ADDR2G)
|
||||
memsize = ADDR2G;
|
||||
#endif
|
||||
return (unsigned long) memsize;
|
||||
}
|
||||
|
||||
static inline __init unsigned long __tprot(unsigned long addr)
|
||||
{
|
||||
int cc = -1;
|
||||
|
||||
asm volatile(
|
||||
" tprot 0(%1),0\n"
|
||||
"0: ipm %0\n"
|
||||
" srl %0,28\n"
|
||||
"1:\n"
|
||||
EX_TABLE(0b,1b)
|
||||
: "+d" (cc) : "a" (addr) : "cc");
|
||||
return (unsigned long)cc;
|
||||
}
|
||||
|
||||
/* Checking memory in 128KB increments. */
|
||||
#define CHUNK_INCR (1UL << 17)
|
||||
|
||||
static noinline __init void find_memory_chunks(unsigned long memsize)
|
||||
{
|
||||
unsigned long addr = 0, old_addr = 0;
|
||||
unsigned long old_cc = CHUNK_READ_WRITE;
|
||||
unsigned long cc;
|
||||
int chunk = 0;
|
||||
|
||||
while (chunk < MEMORY_CHUNKS) {
|
||||
cc = __tprot(addr);
|
||||
while (cc == old_cc) {
|
||||
addr += CHUNK_INCR;
|
||||
cc = __tprot(addr);
|
||||
#ifndef CONFIG_64BIT
|
||||
if (addr == ADDR2G)
|
||||
break;
|
||||
#endif
|
||||
}
|
||||
|
||||
if (old_addr != addr &&
|
||||
(old_cc == CHUNK_READ_WRITE || old_cc == CHUNK_READ_ONLY)) {
|
||||
memory_chunk[chunk].addr = old_addr;
|
||||
memory_chunk[chunk].size = addr - old_addr;
|
||||
memory_chunk[chunk].type = old_cc;
|
||||
chunk++;
|
||||
}
|
||||
|
||||
old_addr = addr;
|
||||
old_cc = cc;
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
if (addr == ADDR2G)
|
||||
break;
|
||||
#endif
|
||||
/*
|
||||
* Finish memory detection at the first hole, unless
|
||||
* - we reached the hsa -> skip it.
|
||||
* - we know there must be more.
|
||||
*/
|
||||
if (cc == -1UL && !memsize && old_addr != ADDR2G)
|
||||
break;
|
||||
if (memsize && addr >= memsize)
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static __init void early_pgm_check_handler(void)
|
||||
{
|
||||
unsigned long addr;
|
||||
const struct exception_table_entry *fixup;
|
||||
|
||||
addr = S390_lowcore.program_old_psw.addr;
|
||||
fixup = search_exception_tables(addr & PSW_ADDR_INSN);
|
||||
if (!fixup)
|
||||
disabled_wait(0);
|
||||
S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE;
|
||||
}
|
||||
|
||||
static noinline __init void setup_lowcore_early(void)
|
||||
{
|
||||
psw_t psw;
|
||||
|
||||
psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
|
||||
psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_ext_handler;
|
||||
S390_lowcore.external_new_psw = psw;
|
||||
psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
|
||||
S390_lowcore.program_new_psw = psw;
|
||||
s390_base_pgm_handler_fn = early_pgm_check_handler;
|
||||
}
|
||||
|
||||
/*
|
||||
* Save ipl parameters, clear bss memory, initialize storage keys
|
||||
* and create a kernel NSS at startup if the SAVESYS= parm is defined
|
||||
*/
|
||||
void __init startup_init(void)
|
||||
{
|
||||
unsigned long memsize;
|
||||
|
||||
ipl_save_parameters();
|
||||
clear_bss_section();
|
||||
init_kernel_storage_key();
|
||||
lockdep_init();
|
||||
lockdep_off();
|
||||
detect_machine_type();
|
||||
create_kernel_nss();
|
||||
sort_main_extable();
|
||||
setup_lowcore_early();
|
||||
sclp_readinfo_early();
|
||||
memsize = sclp_memory_detect();
|
||||
if (memory_fast_detect() < 0)
|
||||
find_memory_chunks(memsize);
|
||||
lockdep_on();
|
||||
}
|
||||
401
arch/s390/kernel/ebcdic.c
Normal file
401
arch/s390/kernel/ebcdic.c
Normal file
@@ -0,0 +1,401 @@
|
||||
/*
|
||||
* arch/s390/kernel/ebcdic.c
|
||||
* ECBDIC -> ASCII, ASCII -> ECBDIC,
|
||||
* upper to lower case (EBCDIC) conversion tables.
|
||||
*
|
||||
* S390 version
|
||||
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
|
||||
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
* Martin Peschke <peschke@fh-brandenburg.de>
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <asm/types.h>
|
||||
#include <asm/ebcdic.h>
|
||||
|
||||
/*
|
||||
* ASCII (IBM PC 437) -> EBCDIC 037
|
||||
*/
|
||||
__u8 _ascebc[256] =
|
||||
{
|
||||
/*00 NUL SOH STX ETX EOT ENQ ACK BEL */
|
||||
0x00, 0x01, 0x02, 0x03, 0x37, 0x2D, 0x2E, 0x2F,
|
||||
/*08 BS HT LF VT FF CR SO SI */
|
||||
/* ->NL */
|
||||
0x16, 0x05, 0x15, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
|
||||
/*10 DLE DC1 DC2 DC3 DC4 NAK SYN ETB */
|
||||
0x10, 0x11, 0x12, 0x13, 0x3C, 0x3D, 0x32, 0x26,
|
||||
/*18 CAN EM SUB ESC FS GS RS US */
|
||||
/* ->IGS ->IRS ->IUS */
|
||||
0x18, 0x19, 0x3F, 0x27, 0x22, 0x1D, 0x1E, 0x1F,
|
||||
/*20 SP ! " # $ % & ' */
|
||||
0x40, 0x5A, 0x7F, 0x7B, 0x5B, 0x6C, 0x50, 0x7D,
|
||||
/*28 ( ) * + , - . / */
|
||||
0x4D, 0x5D, 0x5C, 0x4E, 0x6B, 0x60, 0x4B, 0x61,
|
||||
/*30 0 1 2 3 4 5 6 7 */
|
||||
0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
|
||||
/*38 8 9 : ; < = > ? */
|
||||
0xF8, 0xF9, 0x7A, 0x5E, 0x4C, 0x7E, 0x6E, 0x6F,
|
||||
/*40 @ A B C D E F G */
|
||||
0x7C, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
|
||||
/*48 H I J K L M N O */
|
||||
0xC8, 0xC9, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6,
|
||||
/*50 P Q R S T U V W */
|
||||
0xD7, 0xD8, 0xD9, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6,
|
||||
/*58 X Y Z [ \ ] ^ _ */
|
||||
0xE7, 0xE8, 0xE9, 0xBA, 0xE0, 0xBB, 0xB0, 0x6D,
|
||||
/*60 ` a b c d e f g */
|
||||
0x79, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
|
||||
/*68 h i j k l m n o */
|
||||
0x88, 0x89, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96,
|
||||
/*70 p q r s t u v w */
|
||||
0x97, 0x98, 0x99, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6,
|
||||
/*78 x y z { | } ~ DL */
|
||||
0xA7, 0xA8, 0xA9, 0xC0, 0x4F, 0xD0, 0xA1, 0x07,
|
||||
/*80*/
|
||||
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
|
||||
/*88*/
|
||||
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
|
||||
/*90*/
|
||||
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
|
||||
/*98*/
|
||||
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
|
||||
/*A0*/
|
||||
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
|
||||
/*A8*/
|
||||
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
|
||||
/*B0*/
|
||||
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
|
||||
/*B8*/
|
||||
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
|
||||
/*C0*/
|
||||
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
|
||||
/*C8*/
|
||||
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
|
||||
/*D0*/
|
||||
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
|
||||
/*D8*/
|
||||
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
|
||||
/*E0 sz */
|
||||
0x3F, 0x59, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
|
||||
/*E8*/
|
||||
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
|
||||
/*F0*/
|
||||
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
|
||||
/*F8*/
|
||||
0x90, 0x3F, 0x3F, 0x3F, 0x3F, 0xEA, 0x3F, 0xFF
|
||||
};
|
||||
|
||||
/*
|
||||
* EBCDIC 037 -> ASCII (IBM PC 437)
|
||||
*/
|
||||
__u8 _ebcasc[256] =
|
||||
{
|
||||
/* 0x00 NUL SOH STX ETX *SEL HT *RNL DEL */
|
||||
0x00, 0x01, 0x02, 0x03, 0x07, 0x09, 0x07, 0x7F,
|
||||
/* 0x08 -GE -SPS -RPT VT FF CR SO SI */
|
||||
0x07, 0x07, 0x07, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
|
||||
/* 0x10 DLE DC1 DC2 DC3 -RES -NL BS -POC
|
||||
-ENP ->LF */
|
||||
0x10, 0x11, 0x12, 0x13, 0x07, 0x0A, 0x08, 0x07,
|
||||
/* 0x18 CAN EM -UBS -CU1 -IFS -IGS -IRS -ITB
|
||||
-IUS */
|
||||
0x18, 0x19, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
|
||||
/* 0x20 -DS -SOS FS -WUS -BYP LF ETB ESC
|
||||
-INP */
|
||||
0x07, 0x07, 0x1C, 0x07, 0x07, 0x0A, 0x17, 0x1B,
|
||||
/* 0x28 -SA -SFE -SM -CSP -MFA ENQ ACK BEL
|
||||
-SW */
|
||||
0x07, 0x07, 0x07, 0x07, 0x07, 0x05, 0x06, 0x07,
|
||||
/* 0x30 ---- ---- SYN -IR -PP -TRN -NBS EOT */
|
||||
0x07, 0x07, 0x16, 0x07, 0x07, 0x07, 0x07, 0x04,
|
||||
/* 0x38 -SBS -IT -RFF -CU3 DC4 NAK ---- SUB */
|
||||
0x07, 0x07, 0x07, 0x07, 0x14, 0x15, 0x07, 0x1A,
|
||||
/* 0x40 SP RSP <20><> ---- */
|
||||
0x20, 0xFF, 0x83, 0x84, 0x85, 0xA0, 0x07, 0x86,
|
||||
/* 0x48 . < ( + | */
|
||||
0x87, 0xA4, 0x9B, 0x2E, 0x3C, 0x28, 0x2B, 0x7C,
|
||||
/* 0x50 & ---- */
|
||||
0x26, 0x82, 0x88, 0x89, 0x8A, 0xA1, 0x8C, 0x07,
|
||||
/* 0x58 <20><> ! $ * ) ; */
|
||||
0x8D, 0xE1, 0x21, 0x24, 0x2A, 0x29, 0x3B, 0xAA,
|
||||
/* 0x60 - / ---- <20><> ---- ---- ---- */
|
||||
0x2D, 0x2F, 0x07, 0x8E, 0x07, 0x07, 0x07, 0x8F,
|
||||
/* 0x68 ---- , % _ > ? */
|
||||
0x80, 0xA5, 0x07, 0x2C, 0x25, 0x5F, 0x3E, 0x3F,
|
||||
/* 0x70 ---- ---- ---- ---- ---- ---- ---- */
|
||||
0x07, 0x90, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
|
||||
/* 0x78 * ` : # @ ' = " */
|
||||
0x70, 0x60, 0x3A, 0x23, 0x40, 0x27, 0x3D, 0x22,
|
||||
/* 0x80 * a b c d e f g */
|
||||
0x07, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
|
||||
/* 0x88 h i ---- ---- ---- */
|
||||
0x68, 0x69, 0xAE, 0xAF, 0x07, 0x07, 0x07, 0xF1,
|
||||
/* 0x90 <20><> j k l m n o p */
|
||||
0xF8, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70,
|
||||
/* 0x98 q r ---- ---- */
|
||||
0x71, 0x72, 0xA6, 0xA7, 0x91, 0x07, 0x92, 0x07,
|
||||
/* 0xA0 ~ s t u v w x */
|
||||
0xE6, 0x7E, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
|
||||
/* 0xA8 y z ---- ---- ---- ---- */
|
||||
0x79, 0x7A, 0xAD, 0xAB, 0x07, 0x07, 0x07, 0x07,
|
||||
/* 0xB0 ^ ---- <20><> ---- */
|
||||
0x5E, 0x9C, 0x9D, 0xFA, 0x07, 0x07, 0x07, 0xAC,
|
||||
/* 0xB8 ---- [ ] ---- ---- ---- ---- */
|
||||
0xAB, 0x07, 0x5B, 0x5D, 0x07, 0x07, 0x07, 0x07,
|
||||
/* 0xC0 { A B C D E F G */
|
||||
0x7B, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
|
||||
/* 0xC8 H I ---- <20><> ---- */
|
||||
0x48, 0x49, 0x07, 0x93, 0x94, 0x95, 0xA2, 0x07,
|
||||
/* 0xD0 } J K L M N O P */
|
||||
0x7D, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50,
|
||||
/* 0xD8 Q R ---- <20><> */
|
||||
0x51, 0x52, 0x07, 0x96, 0x81, 0x97, 0xA3, 0x98,
|
||||
/* 0xE0 \ S T U V W X */
|
||||
0x5C, 0xF6, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
|
||||
/* 0xE8 Y Z ---- <20><> ---- ---- ---- */
|
||||
0x59, 0x5A, 0xFD, 0x07, 0x99, 0x07, 0x07, 0x07,
|
||||
/* 0xF0 0 1 2 3 4 5 6 7 */
|
||||
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
|
||||
/* 0xF8 8 9 ---- ---- <20><> ---- ---- ---- */
|
||||
0x38, 0x39, 0x07, 0x07, 0x9A, 0x07, 0x07, 0x07
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* ASCII (IBM PC 437) -> EBCDIC 500
|
||||
*/
|
||||
__u8 _ascebc_500[256] =
|
||||
{
|
||||
/*00 NUL SOH STX ETX EOT ENQ ACK BEL */
|
||||
0x00, 0x01, 0x02, 0x03, 0x37, 0x2D, 0x2E, 0x2F,
|
||||
/*08 BS HT LF VT FF CR SO SI */
|
||||
/* ->NL */
|
||||
0x16, 0x05, 0x15, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
|
||||
/*10 DLE DC1 DC2 DC3 DC4 NAK SYN ETB */
|
||||
0x10, 0x11, 0x12, 0x13, 0x3C, 0x3D, 0x32, 0x26,
|
||||
/*18 CAN EM SUB ESC FS GS RS US */
|
||||
/* ->IGS ->IRS ->IUS */
|
||||
0x18, 0x19, 0x3F, 0x27, 0x22, 0x1D, 0x1E, 0x1F,
|
||||
/*20 SP ! " # $ % & ' */
|
||||
0x40, 0x4F, 0x7F, 0x7B, 0x5B, 0x6C, 0x50, 0x7D,
|
||||
/*28 ( ) * + , - . / */
|
||||
0x4D, 0x5D, 0x5C, 0x4E, 0x6B, 0x60, 0x4B, 0x61,
|
||||
/*30 0 1 2 3 4 5 6 7 */
|
||||
0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
|
||||
/*38 8 9 : ; < = > ? */
|
||||
0xF8, 0xF9, 0x7A, 0x5E, 0x4C, 0x7E, 0x6E, 0x6F,
|
||||
/*40 @ A B C D E F G */
|
||||
0x7C, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
|
||||
/*48 H I J K L M N O */
|
||||
0xC8, 0xC9, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6,
|
||||
/*50 P Q R S T U V W */
|
||||
0xD7, 0xD8, 0xD9, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6,
|
||||
/*58 X Y Z [ \ ] ^ _ */
|
||||
0xE7, 0xE8, 0xE9, 0x4A, 0xE0, 0x5A, 0x5F, 0x6D,
|
||||
/*60 ` a b c d e f g */
|
||||
0x79, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
|
||||
/*68 h i j k l m n o */
|
||||
0x88, 0x89, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96,
|
||||
/*70 p q r s t u v w */
|
||||
0x97, 0x98, 0x99, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6,
|
||||
/*78 x y z { | } ~ DL */
|
||||
0xA7, 0xA8, 0xA9, 0xC0, 0xBB, 0xD0, 0xA1, 0x07,
|
||||
/*80*/
|
||||
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
|
||||
/*88*/
|
||||
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
|
||||
/*90*/
|
||||
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
|
||||
/*98*/
|
||||
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
|
||||
/*A0*/
|
||||
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
|
||||
/*A8*/
|
||||
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
|
||||
/*B0*/
|
||||
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
|
||||
/*B8*/
|
||||
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
|
||||
/*C0*/
|
||||
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
|
||||
/*C8*/
|
||||
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
|
||||
/*D0*/
|
||||
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
|
||||
/*D8*/
|
||||
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
|
||||
/*E0 sz */
|
||||
0x3F, 0x59, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
|
||||
/*E8*/
|
||||
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
|
||||
/*F0*/
|
||||
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
|
||||
/*F8*/
|
||||
0x90, 0x3F, 0x3F, 0x3F, 0x3F, 0xEA, 0x3F, 0xFF
|
||||
};
|
||||
|
||||
/*
|
||||
* EBCDIC 500 -> ASCII (IBM PC 437)
|
||||
*/
|
||||
__u8 _ebcasc_500[256] =
|
||||
{
|
||||
/* 0x00 NUL SOH STX ETX *SEL HT *RNL DEL */
|
||||
0x00, 0x01, 0x02, 0x03, 0x07, 0x09, 0x07, 0x7F,
|
||||
/* 0x08 -GE -SPS -RPT VT FF CR SO SI */
|
||||
0x07, 0x07, 0x07, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
|
||||
/* 0x10 DLE DC1 DC2 DC3 -RES -NL BS -POC
|
||||
-ENP ->LF */
|
||||
0x10, 0x11, 0x12, 0x13, 0x07, 0x0A, 0x08, 0x07,
|
||||
/* 0x18 CAN EM -UBS -CU1 -IFS -IGS -IRS -ITB
|
||||
-IUS */
|
||||
0x18, 0x19, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
|
||||
/* 0x20 -DS -SOS FS -WUS -BYP LF ETB ESC
|
||||
-INP */
|
||||
0x07, 0x07, 0x1C, 0x07, 0x07, 0x0A, 0x17, 0x1B,
|
||||
/* 0x28 -SA -SFE -SM -CSP -MFA ENQ ACK BEL
|
||||
-SW */
|
||||
0x07, 0x07, 0x07, 0x07, 0x07, 0x05, 0x06, 0x07,
|
||||
/* 0x30 ---- ---- SYN -IR -PP -TRN -NBS EOT */
|
||||
0x07, 0x07, 0x16, 0x07, 0x07, 0x07, 0x07, 0x04,
|
||||
/* 0x38 -SBS -IT -RFF -CU3 DC4 NAK ---- SUB */
|
||||
0x07, 0x07, 0x07, 0x07, 0x14, 0x15, 0x07, 0x1A,
|
||||
/* 0x40 SP RSP <20><> ---- */
|
||||
0x20, 0xFF, 0x83, 0x84, 0x85, 0xA0, 0x07, 0x86,
|
||||
/* 0x48 [ . < ( + ! */
|
||||
0x87, 0xA4, 0x5B, 0x2E, 0x3C, 0x28, 0x2B, 0x21,
|
||||
/* 0x50 & ---- */
|
||||
0x26, 0x82, 0x88, 0x89, 0x8A, 0xA1, 0x8C, 0x07,
|
||||
/* 0x58 <20><> ] $ * ) ; ^ */
|
||||
0x8D, 0xE1, 0x5D, 0x24, 0x2A, 0x29, 0x3B, 0x5E,
|
||||
/* 0x60 - / ---- <20><> ---- ---- ---- */
|
||||
0x2D, 0x2F, 0x07, 0x8E, 0x07, 0x07, 0x07, 0x8F,
|
||||
/* 0x68 ---- , % _ > ? */
|
||||
0x80, 0xA5, 0x07, 0x2C, 0x25, 0x5F, 0x3E, 0x3F,
|
||||
/* 0x70 ---- ---- ---- ---- ---- ---- ---- */
|
||||
0x07, 0x90, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07,
|
||||
/* 0x78 * ` : # @ ' = " */
|
||||
0x70, 0x60, 0x3A, 0x23, 0x40, 0x27, 0x3D, 0x22,
|
||||
/* 0x80 * a b c d e f g */
|
||||
0x07, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
|
||||
/* 0x88 h i ---- ---- ---- */
|
||||
0x68, 0x69, 0xAE, 0xAF, 0x07, 0x07, 0x07, 0xF1,
|
||||
/* 0x90 <20><> j k l m n o p */
|
||||
0xF8, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F, 0x70,
|
||||
/* 0x98 q r ---- ---- */
|
||||
0x71, 0x72, 0xA6, 0xA7, 0x91, 0x07, 0x92, 0x07,
|
||||
/* 0xA0 ~ s t u v w x */
|
||||
0xE6, 0x7E, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78,
|
||||
/* 0xA8 y z ---- ---- ---- ---- */
|
||||
0x79, 0x7A, 0xAD, 0xAB, 0x07, 0x07, 0x07, 0x07,
|
||||
/* 0xB0 ---- <20><> ---- */
|
||||
0x9B, 0x9C, 0x9D, 0xFA, 0x07, 0x07, 0x07, 0xAC,
|
||||
/* 0xB8 ---- | ---- ---- ---- ---- */
|
||||
0xAB, 0x07, 0xAA, 0x7C, 0x07, 0x07, 0x07, 0x07,
|
||||
/* 0xC0 { A B C D E F G */
|
||||
0x7B, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
|
||||
/* 0xC8 H I ---- <20><> ---- */
|
||||
0x48, 0x49, 0x07, 0x93, 0x94, 0x95, 0xA2, 0x07,
|
||||
/* 0xD0 } J K L M N O P */
|
||||
0x7D, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F, 0x50,
|
||||
/* 0xD8 Q R ---- <20><> */
|
||||
0x51, 0x52, 0x07, 0x96, 0x81, 0x97, 0xA3, 0x98,
|
||||
/* 0xE0 \ S T U V W X */
|
||||
0x5C, 0xF6, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58,
|
||||
/* 0xE8 Y Z ---- <20><> ---- ---- ---- */
|
||||
0x59, 0x5A, 0xFD, 0x07, 0x99, 0x07, 0x07, 0x07,
|
||||
/* 0xF0 0 1 2 3 4 5 6 7 */
|
||||
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
|
||||
/* 0xF8 8 9 ---- ---- <20><> ---- ---- ---- */
|
||||
0x38, 0x39, 0x07, 0x07, 0x9A, 0x07, 0x07, 0x07
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* EBCDIC 037/500 conversion table:
|
||||
* from upper to lower case
|
||||
*/
|
||||
__u8 _ebc_tolower[256] =
|
||||
{
|
||||
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
|
||||
0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
|
||||
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
|
||||
0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
|
||||
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
|
||||
0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F,
|
||||
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
|
||||
0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F,
|
||||
0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
|
||||
0x48, 0x49, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F,
|
||||
0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
|
||||
0x58, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F,
|
||||
0x60, 0x61, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
|
||||
0x48, 0x49, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
|
||||
0x70, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
|
||||
0x58, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F,
|
||||
0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
|
||||
0x88, 0x89, 0x8A, 0x8B, 0x8C, 0x8D, 0x8E, 0x8F,
|
||||
0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
|
||||
0x98, 0x99, 0x9A, 0x9B, 0x9C, 0x9D, 0x9C, 0x9F,
|
||||
0xA0, 0xA1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7,
|
||||
0xA8, 0xA9, 0xAA, 0xAB, 0x8C, 0x8D, 0x8E, 0xAF,
|
||||
0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7,
|
||||
0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF,
|
||||
0xC0, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
|
||||
0x88, 0x89, 0xCA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF,
|
||||
0xD0, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
|
||||
0x98, 0x99, 0xDA, 0xDB, 0xDC, 0xDD, 0xDE, 0xDF,
|
||||
0xE0, 0xE1, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7,
|
||||
0xA8, 0xA9, 0xEA, 0xCB, 0xCC, 0xCD, 0xCE, 0xCF,
|
||||
0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
|
||||
0xF8, 0xF9, 0xFA, 0xDB, 0xDC, 0xDD, 0xDE, 0xFF
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
* EBCDIC 037/500 conversion table:
|
||||
* from lower to upper case
|
||||
*/
|
||||
__u8 _ebc_toupper[256] =
|
||||
{
|
||||
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
|
||||
0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
|
||||
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
|
||||
0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F,
|
||||
0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
|
||||
0x28, 0x29, 0x2A, 0x2B, 0x2C, 0x2D, 0x2E, 0x2F,
|
||||
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
|
||||
0x38, 0x39, 0x3A, 0x3B, 0x3C, 0x3D, 0x3E, 0x3F,
|
||||
0x40, 0x41, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
|
||||
0x68, 0x69, 0x4A, 0x4B, 0x4C, 0x4D, 0x4E, 0x4F,
|
||||
0x50, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
|
||||
0x78, 0x59, 0x5A, 0x5B, 0x5C, 0x5D, 0x5E, 0x5F,
|
||||
0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
|
||||
0x68, 0x69, 0x6A, 0x6B, 0x6C, 0x6D, 0x6E, 0x6F,
|
||||
0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
|
||||
0x78, 0x79, 0x7A, 0x7B, 0x7C, 0x7D, 0x7E, 0x7F,
|
||||
0x80, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
|
||||
0xC8, 0xC9, 0x8A, 0x8B, 0xAC, 0xAD, 0xAE, 0x8F,
|
||||
0x90, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7,
|
||||
0xD8, 0xD9, 0x9A, 0x9B, 0x9E, 0x9D, 0x9E, 0x9F,
|
||||
0xA0, 0xA1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
|
||||
0xE8, 0xE9, 0xAA, 0xAB, 0xAC, 0xAD, 0xAE, 0xAF,
|
||||
0xB0, 0xB1, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7,
|
||||
0xB8, 0xB9, 0xBA, 0xBB, 0xBC, 0xBD, 0xBE, 0xBF,
|
||||
0xC0, 0xC1, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7,
|
||||
0xC8, 0xC9, 0xCA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
|
||||
0xD0, 0xD1, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7,
|
||||
0xD8, 0xD9, 0xDA, 0xFB, 0xFC, 0xFD, 0xFE, 0xDF,
|
||||
0xE0, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7,
|
||||
0xE8, 0xE9, 0xEA, 0xEB, 0xEC, 0xED, 0xEE, 0xEF,
|
||||
0xF0, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7,
|
||||
0xF8, 0xF9, 0xFA, 0xFB, 0xFC, 0xFD, 0xFE, 0xFF
|
||||
};
|
||||
|
||||
EXPORT_SYMBOL(_ascebc_500);
|
||||
EXPORT_SYMBOL(_ebcasc_500);
|
||||
EXPORT_SYMBOL(_ascebc);
|
||||
EXPORT_SYMBOL(_ebcasc);
|
||||
EXPORT_SYMBOL(_ebc_tolower);
|
||||
EXPORT_SYMBOL(_ebc_toupper);
|
||||
|
||||
1065
arch/s390/kernel/entry.S
Normal file
1065
arch/s390/kernel/entry.S
Normal file
File diff suppressed because it is too large
Load Diff
1038
arch/s390/kernel/entry64.S
Normal file
1038
arch/s390/kernel/entry64.S
Normal file
File diff suppressed because it is too large
Load Diff
467
arch/s390/kernel/head.S
Normal file
467
arch/s390/kernel/head.S
Normal file
@@ -0,0 +1,467 @@
|
||||
/*
|
||||
* arch/s390/kernel/head.S
|
||||
*
|
||||
* Copyright (C) IBM Corp. 1999,2006
|
||||
*
|
||||
* Author(s): Hartmut Penner <hp@de.ibm.com>
|
||||
* Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
* Rob van der Heij <rvdhei@iae.nl>
|
||||
* Heiko Carstens <heiko.carstens@de.ibm.com>
|
||||
*
|
||||
* There are 5 different IPL methods
|
||||
* 1) load the image directly into ram at address 0 and do an PSW restart
|
||||
* 2) linload will load the image from address 0x10000 to memory 0x10000
|
||||
* and start the code thru LPSW 0x0008000080010000 (VM only, deprecated)
|
||||
* 3) generate the tape ipl header, store the generated image on a tape
|
||||
* and ipl from it
|
||||
* In case of SL tape you need to IPL 5 times to get past VOL1 etc
|
||||
* 4) generate the vm reader ipl header, move the generated image to the
|
||||
* VM reader (use option NOH!) and do a ipl from reader (VM only)
|
||||
* 5) direct call of start by the SALIPL loader
|
||||
* We use the cpuid to distinguish between VM and native ipl
|
||||
* params for kernel are pushed to 0x10400 (see setup.h)
|
||||
*
|
||||
*/
|
||||
|
||||
#include <asm/setup.h>
|
||||
#include <asm/lowcore.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/thread_info.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#define ARCH_OFFSET 4
|
||||
#else
|
||||
#define ARCH_OFFSET 0
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_IPL
|
||||
.org 0
|
||||
.long 0x00080000,0x80000000+startup # Just a restart PSW
|
||||
#else
|
||||
#ifdef CONFIG_IPL_TAPE
|
||||
#define IPL_BS 1024
|
||||
.org 0
|
||||
.long 0x00080000,0x80000000+iplstart # The first 24 bytes are loaded
|
||||
.long 0x27000000,0x60000001 # by ipl to addresses 0-23.
|
||||
.long 0x02000000,0x20000000+IPL_BS # (a PSW and two CCWs).
|
||||
.long 0x00000000,0x00000000 # external old psw
|
||||
.long 0x00000000,0x00000000 # svc old psw
|
||||
.long 0x00000000,0x00000000 # program check old psw
|
||||
.long 0x00000000,0x00000000 # machine check old psw
|
||||
.long 0x00000000,0x00000000 # io old psw
|
||||
.long 0x00000000,0x00000000
|
||||
.long 0x00000000,0x00000000
|
||||
.long 0x00000000,0x00000000
|
||||
.long 0x000a0000,0x00000058 # external new psw
|
||||
.long 0x000a0000,0x00000060 # svc new psw
|
||||
.long 0x000a0000,0x00000068 # program check new psw
|
||||
.long 0x000a0000,0x00000070 # machine check new psw
|
||||
.long 0x00080000,0x80000000+.Lioint # io new psw
|
||||
|
||||
.org 0x100
|
||||
#
|
||||
# subroutine for loading from tape
|
||||
# Paramters:
|
||||
# R1 = device number
|
||||
# R2 = load address
|
||||
.Lloader:
|
||||
st %r14,.Lldret
|
||||
la %r3,.Lorbread # r3 = address of orb
|
||||
la %r5,.Lirb # r5 = address of irb
|
||||
st %r2,.Lccwread+4 # initialize CCW data addresses
|
||||
lctl %c6,%c6,.Lcr6
|
||||
slr %r2,%r2
|
||||
.Lldlp:
|
||||
la %r6,3 # 3 retries
|
||||
.Lssch:
|
||||
ssch 0(%r3) # load chunk of IPL_BS bytes
|
||||
bnz .Llderr
|
||||
.Lw4end:
|
||||
bas %r14,.Lwait4io
|
||||
tm 8(%r5),0x82 # do we have a problem ?
|
||||
bnz .Lrecov
|
||||
slr %r7,%r7
|
||||
icm %r7,3,10(%r5) # get residual count
|
||||
lcr %r7,%r7
|
||||
la %r7,IPL_BS(%r7) # IPL_BS-residual=#bytes read
|
||||
ar %r2,%r7 # add to total size
|
||||
tm 8(%r5),0x01 # found a tape mark ?
|
||||
bnz .Ldone
|
||||
l %r0,.Lccwread+4 # update CCW data addresses
|
||||
ar %r0,%r7
|
||||
st %r0,.Lccwread+4
|
||||
b .Lldlp
|
||||
.Ldone:
|
||||
l %r14,.Lldret
|
||||
br %r14 # r2 contains the total size
|
||||
.Lrecov:
|
||||
bas %r14,.Lsense # do the sensing
|
||||
bct %r6,.Lssch # dec. retry count & branch
|
||||
b .Llderr
|
||||
#
|
||||
# Sense subroutine
|
||||
#
|
||||
.Lsense:
|
||||
st %r14,.Lsnsret
|
||||
la %r7,.Lorbsense
|
||||
ssch 0(%r7) # start sense command
|
||||
bnz .Llderr
|
||||
bas %r14,.Lwait4io
|
||||
l %r14,.Lsnsret
|
||||
tm 8(%r5),0x82 # do we have a problem ?
|
||||
bnz .Llderr
|
||||
br %r14
|
||||
#
|
||||
# Wait for interrupt subroutine
|
||||
#
|
||||
.Lwait4io:
|
||||
lpsw .Lwaitpsw
|
||||
.Lioint:
|
||||
c %r1,0xb8 # compare subchannel number
|
||||
bne .Lwait4io
|
||||
tsch 0(%r5)
|
||||
slr %r0,%r0
|
||||
tm 8(%r5),0x82 # do we have a problem ?
|
||||
bnz .Lwtexit
|
||||
tm 8(%r5),0x04 # got device end ?
|
||||
bz .Lwait4io
|
||||
.Lwtexit:
|
||||
br %r14
|
||||
.Llderr:
|
||||
lpsw .Lcrash
|
||||
|
||||
.align 8
|
||||
.Lorbread:
|
||||
.long 0x00000000,0x0080ff00,.Lccwread
|
||||
.align 8
|
||||
.Lorbsense:
|
||||
.long 0x00000000,0x0080ff00,.Lccwsense
|
||||
.align 8
|
||||
.Lccwread:
|
||||
.long 0x02200000+IPL_BS,0x00000000
|
||||
.Lccwsense:
|
||||
.long 0x04200001,0x00000000
|
||||
.Lwaitpsw:
|
||||
.long 0x020a0000,0x80000000+.Lioint
|
||||
|
||||
.Lirb: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
|
||||
.Lcr6: .long 0xff000000
|
||||
.align 8
|
||||
.Lcrash:.long 0x000a0000,0x00000000
|
||||
.Lldret:.long 0
|
||||
.Lsnsret: .long 0
|
||||
#endif /* CONFIG_IPL_TAPE */
|
||||
|
||||
#ifdef CONFIG_IPL_VM
|
||||
#define IPL_BS 0x730
|
||||
.org 0
|
||||
.long 0x00080000,0x80000000+iplstart # The first 24 bytes are loaded
|
||||
.long 0x02000018,0x60000050 # by ipl to addresses 0-23.
|
||||
.long 0x02000068,0x60000050 # (a PSW and two CCWs).
|
||||
.fill 80-24,1,0x40 # bytes 24-79 are discarded !!
|
||||
.long 0x020000f0,0x60000050 # The next 160 byte are loaded
|
||||
.long 0x02000140,0x60000050 # to addresses 0x18-0xb7
|
||||
.long 0x02000190,0x60000050 # They form the continuation
|
||||
.long 0x020001e0,0x60000050 # of the CCW program started
|
||||
.long 0x02000230,0x60000050 # by ipl and load the range
|
||||
.long 0x02000280,0x60000050 # 0x0f0-0x730 from the image
|
||||
.long 0x020002d0,0x60000050 # to the range 0x0f0-0x730
|
||||
.long 0x02000320,0x60000050 # in memory. At the end of
|
||||
.long 0x02000370,0x60000050 # the channel program the PSW
|
||||
.long 0x020003c0,0x60000050 # at location 0 is loaded.
|
||||
.long 0x02000410,0x60000050 # Initial processing starts
|
||||
.long 0x02000460,0x60000050 # at 0xf0 = iplstart.
|
||||
.long 0x020004b0,0x60000050
|
||||
.long 0x02000500,0x60000050
|
||||
.long 0x02000550,0x60000050
|
||||
.long 0x020005a0,0x60000050
|
||||
.long 0x020005f0,0x60000050
|
||||
.long 0x02000640,0x60000050
|
||||
.long 0x02000690,0x60000050
|
||||
.long 0x020006e0,0x20000050
|
||||
|
||||
.org 0xf0
|
||||
#
|
||||
# subroutine for loading cards from the reader
|
||||
#
|
||||
.Lloader:
|
||||
la %r3,.Lorb # r2 = address of orb into r2
|
||||
la %r5,.Lirb # r4 = address of irb
|
||||
la %r6,.Lccws
|
||||
la %r7,20
|
||||
.Linit:
|
||||
st %r2,4(%r6) # initialize CCW data addresses
|
||||
la %r2,0x50(%r2)
|
||||
la %r6,8(%r6)
|
||||
bct 7,.Linit
|
||||
|
||||
lctl %c6,%c6,.Lcr6 # set IO subclass mask
|
||||
slr %r2,%r2
|
||||
.Lldlp:
|
||||
ssch 0(%r3) # load chunk of 1600 bytes
|
||||
bnz .Llderr
|
||||
.Lwait4irq:
|
||||
mvc 0x78(8),.Lnewpsw # set up IO interrupt psw
|
||||
lpsw .Lwaitpsw
|
||||
.Lioint:
|
||||
c %r1,0xb8 # compare subchannel number
|
||||
bne .Lwait4irq
|
||||
tsch 0(%r5)
|
||||
|
||||
slr %r0,%r0
|
||||
ic %r0,8(%r5) # get device status
|
||||
chi %r0,8 # channel end ?
|
||||
be .Lcont
|
||||
chi %r0,12 # channel end + device end ?
|
||||
be .Lcont
|
||||
|
||||
l %r0,4(%r5)
|
||||
s %r0,8(%r3) # r0/8 = number of ccws executed
|
||||
mhi %r0,10 # *10 = number of bytes in ccws
|
||||
lh %r3,10(%r5) # get residual count
|
||||
sr %r0,%r3 # #ccws*80-residual=#bytes read
|
||||
ar %r2,%r0
|
||||
|
||||
br %r14 # r2 contains the total size
|
||||
|
||||
.Lcont:
|
||||
ahi %r2,0x640 # add 0x640 to total size
|
||||
la %r6,.Lccws
|
||||
la %r7,20
|
||||
.Lincr:
|
||||
l %r0,4(%r6) # update CCW data addresses
|
||||
ahi %r0,0x640
|
||||
st %r0,4(%r6)
|
||||
ahi %r6,8
|
||||
bct 7,.Lincr
|
||||
|
||||
b .Lldlp
|
||||
.Llderr:
|
||||
lpsw .Lcrash
|
||||
|
||||
.align 8
|
||||
.Lorb: .long 0x00000000,0x0080ff00,.Lccws
|
||||
.Lirb: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
|
||||
.Lcr6: .long 0xff000000
|
||||
.Lloadp:.long 0,0
|
||||
.align 8
|
||||
.Lcrash:.long 0x000a0000,0x00000000
|
||||
.Lnewpsw:
|
||||
.long 0x00080000,0x80000000+.Lioint
|
||||
.Lwaitpsw:
|
||||
.long 0x020a0000,0x80000000+.Lioint
|
||||
|
||||
.align 8
|
||||
.Lccws: .rept 19
|
||||
.long 0x02600050,0x00000000
|
||||
.endr
|
||||
.long 0x02200050,0x00000000
|
||||
#endif /* CONFIG_IPL_VM */
|
||||
|
||||
iplstart:
|
||||
lh %r1,0xb8 # test if subchannel number
|
||||
bct %r1,.Lnoload # is valid
|
||||
l %r1,0xb8 # load ipl subchannel number
|
||||
la %r2,IPL_BS # load start address
|
||||
bas %r14,.Lloader # load rest of ipl image
|
||||
l %r12,.Lparm # pointer to parameter area
|
||||
st %r1,IPL_DEVICE+ARCH_OFFSET-PARMAREA(%r12) # save ipl device number
|
||||
|
||||
#
|
||||
# load parameter file from ipl device
|
||||
#
|
||||
.Lagain1:
|
||||
l %r2,.Linitrd # ramdisk loc. is temp
|
||||
bas %r14,.Lloader # load parameter file
|
||||
ltr %r2,%r2 # got anything ?
|
||||
bz .Lnopf
|
||||
chi %r2,895
|
||||
bnh .Lnotrunc
|
||||
la %r2,895
|
||||
.Lnotrunc:
|
||||
l %r4,.Linitrd
|
||||
clc 0(3,%r4),.L_hdr # if it is HDRx
|
||||
bz .Lagain1 # skip dataset header
|
||||
clc 0(3,%r4),.L_eof # if it is EOFx
|
||||
bz .Lagain1 # skip dateset trailer
|
||||
la %r5,0(%r4,%r2)
|
||||
lr %r3,%r2
|
||||
.Lidebc:
|
||||
tm 0(%r5),0x80 # high order bit set ?
|
||||
bo .Ldocv # yes -> convert from EBCDIC
|
||||
ahi %r5,-1
|
||||
bct %r3,.Lidebc
|
||||
b .Lnocv
|
||||
.Ldocv:
|
||||
l %r3,.Lcvtab
|
||||
tr 0(256,%r4),0(%r3) # convert parameters to ascii
|
||||
tr 256(256,%r4),0(%r3)
|
||||
tr 512(256,%r4),0(%r3)
|
||||
tr 768(122,%r4),0(%r3)
|
||||
.Lnocv: la %r3,COMMAND_LINE-PARMAREA(%r12) # load adr. of command line
|
||||
mvc 0(256,%r3),0(%r4)
|
||||
mvc 256(256,%r3),256(%r4)
|
||||
mvc 512(256,%r3),512(%r4)
|
||||
mvc 768(122,%r3),768(%r4)
|
||||
slr %r0,%r0
|
||||
b .Lcntlp
|
||||
.Ldelspc:
|
||||
ic %r0,0(%r2,%r3)
|
||||
chi %r0,0x20 # is it a space ?
|
||||
be .Lcntlp
|
||||
ahi %r2,1
|
||||
b .Leolp
|
||||
.Lcntlp:
|
||||
brct %r2,.Ldelspc
|
||||
.Leolp:
|
||||
slr %r0,%r0
|
||||
stc %r0,0(%r2,%r3) # terminate buffer
|
||||
.Lnopf:
|
||||
|
||||
#
|
||||
# load ramdisk from ipl device
|
||||
#
|
||||
.Lagain2:
|
||||
l %r2,.Linitrd # addr of ramdisk
|
||||
st %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12)
|
||||
bas %r14,.Lloader # load ramdisk
|
||||
st %r2,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r12) # store size of rd
|
||||
ltr %r2,%r2
|
||||
bnz .Lrdcont
|
||||
st %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) # no ramdisk found
|
||||
.Lrdcont:
|
||||
l %r2,.Linitrd
|
||||
|
||||
clc 0(3,%r2),.L_hdr # skip HDRx and EOFx
|
||||
bz .Lagain2
|
||||
clc 0(3,%r2),.L_eof
|
||||
bz .Lagain2
|
||||
|
||||
#ifdef CONFIG_IPL_VM
|
||||
#
|
||||
# reset files in VM reader
|
||||
#
|
||||
stidp __LC_CPUID # store cpuid
|
||||
tm __LC_CPUID,0xff # running VM ?
|
||||
bno .Lnoreset
|
||||
la %r2,.Lreset
|
||||
lhi %r3,26
|
||||
diag %r2,%r3,8
|
||||
la %r5,.Lirb
|
||||
stsch 0(%r5) # check if irq is pending
|
||||
tm 30(%r5),0x0f # by verifying if any of the
|
||||
bnz .Lwaitforirq # activity or status control
|
||||
tm 31(%r5),0xff # bits is set in the schib
|
||||
bz .Lnoreset
|
||||
.Lwaitforirq:
|
||||
mvc 0x78(8),.Lrdrnewpsw # set up IO interrupt psw
|
||||
.Lwaitrdrirq:
|
||||
lpsw .Lrdrwaitpsw
|
||||
.Lrdrint:
|
||||
c %r1,0xb8 # compare subchannel number
|
||||
bne .Lwaitrdrirq
|
||||
la %r5,.Lirb
|
||||
tsch 0(%r5)
|
||||
.Lnoreset:
|
||||
b .Lnoload
|
||||
|
||||
.align 8
|
||||
.Lrdrnewpsw:
|
||||
.long 0x00080000,0x80000000+.Lrdrint
|
||||
.Lrdrwaitpsw:
|
||||
.long 0x020a0000,0x80000000+.Lrdrint
|
||||
#endif
|
||||
|
||||
#
|
||||
# everything loaded, go for it
|
||||
#
|
||||
.Lnoload:
|
||||
l %r1,.Lstartup
|
||||
br %r1
|
||||
|
||||
.Linitrd:.long _end + 0x400000 # default address of initrd
|
||||
.Lparm: .long PARMAREA
|
||||
.Lstartup: .long startup
|
||||
.Lcvtab:.long _ebcasc # ebcdic to ascii table
|
||||
.Lreset:.byte 0xc3,0xc8,0xc1,0xd5,0xc7,0xc5,0x40,0xd9,0xc4,0xd9,0x40
|
||||
.byte 0xc1,0xd3,0xd3,0x40,0xd2,0xc5,0xc5,0xd7,0x40,0xd5,0xd6
|
||||
.byte 0xc8,0xd6,0xd3,0xc4 # "change rdr all keep nohold"
|
||||
.L_eof: .long 0xc5d6c600 /* C'EOF' */
|
||||
.L_hdr: .long 0xc8c4d900 /* C'HDR' */
|
||||
|
||||
#endif /* CONFIG_IPL */
|
||||
|
||||
#
|
||||
# SALIPL loader support. Based on a patch by Rob van der Heij.
|
||||
# This entry point is called directly from the SALIPL loader and
|
||||
# doesn't need a builtin ipl record.
|
||||
#
|
||||
.org 0x800
|
||||
.globl start
|
||||
start:
|
||||
stm %r0,%r15,0x07b0 # store registers
|
||||
basr %r12,%r0
|
||||
.base:
|
||||
l %r11,.parm
|
||||
l %r8,.cmd # pointer to command buffer
|
||||
|
||||
ltr %r9,%r9 # do we have SALIPL parameters?
|
||||
bp .sk8x8
|
||||
|
||||
mvc 0(64,%r8),0x00b0 # copy saved registers
|
||||
xc 64(240-64,%r8),0(%r8) # remainder of buffer
|
||||
tr 0(64,%r8),.lowcase
|
||||
b .gotr
|
||||
.sk8x8:
|
||||
mvc 0(240,%r8),0(%r9) # copy iplparms into buffer
|
||||
.gotr:
|
||||
l %r10,.tbl # EBCDIC to ASCII table
|
||||
tr 0(240,%r8),0(%r10)
|
||||
slr %r0,%r0
|
||||
st %r0,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r11)
|
||||
st %r0,INITRD_START+ARCH_OFFSET-PARMAREA(%r11)
|
||||
j startup # continue with startup
|
||||
.tbl: .long _ebcasc # translate table
|
||||
.cmd: .long COMMAND_LINE # address of command line buffer
|
||||
.parm: .long PARMAREA
|
||||
.lowcase:
|
||||
.byte 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07
|
||||
.byte 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f
|
||||
.byte 0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17
|
||||
.byte 0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f
|
||||
.byte 0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27
|
||||
.byte 0x28,0x29,0x2a,0x2b,0x2c,0x2d,0x2e,0x2f
|
||||
.byte 0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37
|
||||
.byte 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f
|
||||
.byte 0x40,0x41,0x42,0x43,0x44,0x45,0x46,0x47
|
||||
.byte 0x48,0x49,0x4a,0x4b,0x4c,0x4d,0x4e,0x4f
|
||||
.byte 0x50,0x51,0x52,0x53,0x54,0x55,0x56,0x57
|
||||
.byte 0x58,0x59,0x5a,0x5b,0x5c,0x5d,0x5e,0x5f
|
||||
.byte 0x60,0x61,0x62,0x63,0x64,0x65,0x66,0x67
|
||||
.byte 0x68,0x69,0x6a,0x6b,0x6c,0x6d,0x6e,0x6f
|
||||
.byte 0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77
|
||||
.byte 0x78,0x79,0x7a,0x7b,0x7c,0x7d,0x7e,0x7f
|
||||
|
||||
.byte 0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87
|
||||
.byte 0x88,0x89,0x8a,0x8b,0x8c,0x8d,0x8e,0x8f
|
||||
.byte 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97
|
||||
.byte 0x98,0x99,0x9a,0x9b,0x9c,0x9d,0x9e,0x9f
|
||||
.byte 0xa0,0xa1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7
|
||||
.byte 0xa8,0xa9,0xaa,0xab,0xac,0xad,0xae,0xaf
|
||||
.byte 0xb0,0xb1,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7
|
||||
.byte 0xb8,0xb9,0xba,0xbb,0xbc,0xbd,0xbe,0xbf
|
||||
.byte 0xc0,0x81,0x82,0x83,0x84,0x85,0x86,0x87 # .abcdefg
|
||||
.byte 0x88,0x89,0xca,0xcb,0xcc,0xcd,0xce,0xcf # hi
|
||||
.byte 0xd0,0x91,0x92,0x93,0x94,0x95,0x96,0x97 # .jklmnop
|
||||
.byte 0x98,0x99,0xda,0xdb,0xdc,0xdd,0xde,0xdf # qr
|
||||
.byte 0xe0,0xe1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7 # ..stuvwx
|
||||
.byte 0xa8,0xa9,0xea,0xeb,0xec,0xed,0xee,0xef # yz
|
||||
.byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7
|
||||
.byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#include "head64.S"
|
||||
#else
|
||||
#include "head31.S"
|
||||
#endif
|
||||
188
arch/s390/kernel/head31.S
Normal file
188
arch/s390/kernel/head31.S
Normal file
@@ -0,0 +1,188 @@
|
||||
/*
|
||||
* arch/s390/kernel/head31.S
|
||||
*
|
||||
* Copyright (C) IBM Corp. 2005,2006
|
||||
*
|
||||
* Author(s): Hartmut Penner <hp@de.ibm.com>
|
||||
* Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
* Rob van der Heij <rvdhei@iae.nl>
|
||||
* Heiko Carstens <heiko.carstens@de.ibm.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#
|
||||
# startup-code at 0x10000, running in absolute addressing mode
|
||||
# this is called either by the ipl loader or directly by PSW restart
|
||||
# or linload or SALIPL
|
||||
#
|
||||
.org 0x10000
|
||||
startup:basr %r13,0 # get base
|
||||
.LPG0: l %r13,0f-.LPG0(%r13)
|
||||
b 0(%r13)
|
||||
0: .long startup_continue
|
||||
|
||||
#
|
||||
# params at 10400 (setup.h)
|
||||
#
|
||||
.org PARMAREA
|
||||
.long 0,0 # IPL_DEVICE
|
||||
.long 0,0 # INITRD_START
|
||||
.long 0,0 # INITRD_SIZE
|
||||
|
||||
.org COMMAND_LINE
|
||||
.byte "root=/dev/ram0 ro"
|
||||
.byte 0
|
||||
|
||||
.org 0x11000
|
||||
|
||||
startup_continue:
|
||||
basr %r13,0 # get base
|
||||
.LPG1: mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0)
|
||||
lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
|
||||
l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
|
||||
# move IPL device to lowcore
|
||||
mvc __LC_IPLDEV(4),IPL_DEVICE-PARMAREA(%r12)
|
||||
#
|
||||
# Setup stack
|
||||
#
|
||||
l %r15,.Linittu-.LPG1(%r13)
|
||||
mvc __LC_CURRENT(4),__TI_task(%r15)
|
||||
ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE
|
||||
st %r15,__LC_KERNEL_STACK # set end of kernel stack
|
||||
ahi %r15,-96
|
||||
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
|
||||
#
|
||||
# Save ipl parameters, clear bss memory, initialize storage key for kernel pages,
|
||||
# and create a kernel NSS if the SAVESYS= parm is defined
|
||||
#
|
||||
l %r14,.Lstartup_init-.LPG1(%r13)
|
||||
basr %r14,%r14
|
||||
|
||||
l %r12,.Lmflags-.LPG1(%r13) # get address of machine_flags
|
||||
#
|
||||
# find out if we have an IEEE fpu
|
||||
#
|
||||
mvc __LC_PGM_NEW_PSW(8),.Lpcfpu-.LPG1(%r13)
|
||||
efpc %r0,0 # test IEEE extract fpc instruction
|
||||
oi 3(%r12),2 # set IEEE fpu flag
|
||||
.Lchkfpu:
|
||||
|
||||
#
|
||||
# find out if we have the CSP instruction
|
||||
#
|
||||
mvc __LC_PGM_NEW_PSW(8),.Lpccsp-.LPG1(%r13)
|
||||
la %r0,0
|
||||
lr %r1,%r0
|
||||
la %r2,4
|
||||
csp %r0,%r2 # Test CSP instruction
|
||||
oi 3(%r12),8 # set CSP flag
|
||||
.Lchkcsp:
|
||||
|
||||
#
|
||||
# find out if we have the MVPG instruction
|
||||
#
|
||||
mvc __LC_PGM_NEW_PSW(8),.Lpcmvpg-.LPG1(%r13)
|
||||
sr %r0,%r0
|
||||
la %r1,0
|
||||
la %r2,0
|
||||
mvpg %r1,%r2 # Test CSP instruction
|
||||
oi 3(%r12),16 # set MVPG flag
|
||||
.Lchkmvpg:
|
||||
|
||||
#
|
||||
# find out if we have the IDTE instruction
|
||||
#
|
||||
mvc __LC_PGM_NEW_PSW(8),.Lpcidte-.LPG1(%r13)
|
||||
.long 0xb2b10000 # store facility list
|
||||
tm 0xc8,0x08 # check bit for clearing-by-ASCE
|
||||
bno .Lchkidte-.LPG1(%r13)
|
||||
lhi %r1,2094
|
||||
lhi %r2,0
|
||||
.long 0xb98e2001
|
||||
oi 3(%r12),0x80 # set IDTE flag
|
||||
.Lchkidte:
|
||||
|
||||
#
|
||||
# find out if the diag 0x9c is available
|
||||
#
|
||||
mvc __LC_PGM_NEW_PSW(8),.Lpcdiag9c-.LPG1(%r13)
|
||||
stap __LC_CPUID+4 # store cpu address
|
||||
lh %r1,__LC_CPUID+4
|
||||
diag %r1,0,0x9c # test diag 0x9c
|
||||
oi 2(%r12),1 # set diag9c flag
|
||||
.Lchkdiag9c:
|
||||
|
||||
lpsw .Lentry-.LPG1(13) # jump to _stext in primary-space,
|
||||
# virtual and never return ...
|
||||
.align 8
|
||||
.Lentry:.long 0x00080000,0x80000000 + _stext
|
||||
.Lctl: .long 0x04b50002 # cr0: various things
|
||||
.long 0 # cr1: primary space segment table
|
||||
.long .Lduct # cr2: dispatchable unit control table
|
||||
.long 0 # cr3: instruction authorization
|
||||
.long 0 # cr4: instruction authorization
|
||||
.long .Lduct # cr5: primary-aste origin
|
||||
.long 0 # cr6: I/O interrupts
|
||||
.long 0 # cr7: secondary space segment table
|
||||
.long 0 # cr8: access registers translation
|
||||
.long 0 # cr9: tracing off
|
||||
.long 0 # cr10: tracing off
|
||||
.long 0 # cr11: tracing off
|
||||
.long 0 # cr12: tracing off
|
||||
.long 0 # cr13: home space segment table
|
||||
.long 0xc0000000 # cr14: machine check handling off
|
||||
.long 0 # cr15: linkage stack operations
|
||||
.Lpcfpu:.long 0x00080000,0x80000000 + .Lchkfpu
|
||||
.Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp
|
||||
.Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg
|
||||
.Lpcidte:.long 0x00080000,0x80000000 + .Lchkidte
|
||||
.Lpcdiag9c:.long 0x00080000,0x80000000 + .Lchkdiag9c
|
||||
.Lmchunk:.long memory_chunk
|
||||
.Lmflags:.long machine_flags
|
||||
.Lbss_bgn: .long __bss_start
|
||||
.Lbss_end: .long _end
|
||||
.Lparmaddr: .long PARMAREA
|
||||
.Linittu: .long init_thread_union
|
||||
.Lstartup_init:
|
||||
.long startup_init
|
||||
.align 64
|
||||
.Lduct: .long 0,0,0,0,.Lduald,0,0,0
|
||||
.long 0,0,0,0,0,0,0,0
|
||||
.align 128
|
||||
.Lduald:.rept 8
|
||||
.long 0x80000000,0,0,0 # invalid access-list entries
|
||||
.endr
|
||||
|
||||
.org 0x12000
|
||||
.globl _ehead
|
||||
_ehead:
|
||||
#ifdef CONFIG_SHARED_KERNEL
|
||||
.org 0x100000
|
||||
#endif
|
||||
|
||||
#
|
||||
# startup-code, running in absolute addressing mode
|
||||
#
|
||||
.globl _stext
|
||||
_stext: basr %r13,0 # get base
|
||||
.LPG3:
|
||||
# check control registers
|
||||
stctl %c0,%c15,0(%r15)
|
||||
oi 2(%r15),0x40 # enable sigp emergency signal
|
||||
oi 0(%r15),0x10 # switch on low address protection
|
||||
lctl %c0,%c15,0(%r15)
|
||||
|
||||
#
|
||||
lam 0,15,.Laregs-.LPG3(%r13) # load access regs needed by uaccess
|
||||
l %r14,.Lstart-.LPG3(%r13)
|
||||
basr %r14,%r14 # call start_kernel
|
||||
#
|
||||
# We returned from start_kernel ?!? PANIK
|
||||
#
|
||||
basr %r13,0
|
||||
lpsw .Ldw-.(%r13) # load disabled wait psw
|
||||
#
|
||||
.align 8
|
||||
.Ldw: .long 0x000a0000,0x00000000
|
||||
.Lstart:.long start_kernel
|
||||
.Laregs:.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
|
||||
191
arch/s390/kernel/head64.S
Normal file
191
arch/s390/kernel/head64.S
Normal file
@@ -0,0 +1,191 @@
|
||||
/*
|
||||
* arch/s390/kernel/head64.S
|
||||
*
|
||||
* Copyright (C) IBM Corp. 1999,2006
|
||||
*
|
||||
* Author(s): Hartmut Penner <hp@de.ibm.com>
|
||||
* Martin Schwidefsky <schwidefsky@de.ibm.com>
|
||||
* Rob van der Heij <rvdhei@iae.nl>
|
||||
* Heiko Carstens <heiko.carstens@de.ibm.com>
|
||||
*
|
||||
*/
|
||||
|
||||
#
|
||||
# startup-code at 0x10000, running in absolute addressing mode
|
||||
# this is called either by the ipl loader or directly by PSW restart
|
||||
# or linload or SALIPL
|
||||
#
|
||||
.org 0x10000
|
||||
startup:basr %r13,0 # get base
|
||||
.LPG0: l %r13,0f-.LPG0(%r13)
|
||||
b 0(%r13)
|
||||
0: .long startup_continue
|
||||
|
||||
#
|
||||
# params at 10400 (setup.h)
|
||||
#
|
||||
.org PARMAREA
|
||||
.quad 0 # IPL_DEVICE
|
||||
.quad 0 # INITRD_START
|
||||
.quad 0 # INITRD_SIZE
|
||||
|
||||
.org COMMAND_LINE
|
||||
.byte "root=/dev/ram0 ro"
|
||||
.byte 0
|
||||
|
||||
.org 0x11000
|
||||
|
||||
startup_continue:
|
||||
basr %r13,0 # get base
|
||||
.LPG1: sll %r13,1 # remove high order bit
|
||||
srl %r13,1
|
||||
lhi %r1,1 # mode 1 = esame
|
||||
mvi __LC_AR_MODE_ID,1 # set esame flag
|
||||
slr %r0,%r0 # set cpuid to zero
|
||||
sigp %r1,%r0,0x12 # switch to esame mode
|
||||
sam64 # switch to 64 bit mode
|
||||
lctlg %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
|
||||
lg %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
|
||||
# move IPL device to lowcore
|
||||
mvc __LC_IPLDEV(4),IPL_DEVICE+4-PARMAREA(%r12)
|
||||
#
|
||||
# Setup stack
|
||||
#
|
||||
larl %r15,init_thread_union
|
||||
lg %r14,__TI_task(%r15) # cache current in lowcore
|
||||
stg %r14,__LC_CURRENT
|
||||
aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE
|
||||
stg %r15,__LC_KERNEL_STACK # set end of kernel stack
|
||||
aghi %r15,-160
|
||||
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
|
||||
#
|
||||
# Save ipl parameters, clear bss memory, initialize storage key for kernel pages,
|
||||
# and create a kernel NSS if the SAVESYS= parm is defined
|
||||
#
|
||||
brasl %r14,startup_init
|
||||
# set program check new psw mask
|
||||
mvc __LC_PGM_NEW_PSW(8),.Lpcmsk-.LPG1(%r13)
|
||||
larl %r12,machine_flags
|
||||
#
|
||||
# find out if we have the MVPG instruction
|
||||
#
|
||||
la %r1,0f-.LPG1(%r13) # set program check address
|
||||
stg %r1,__LC_PGM_NEW_PSW+8
|
||||
sgr %r0,%r0
|
||||
lghi %r1,0
|
||||
lghi %r2,0
|
||||
mvpg %r1,%r2 # test MVPG instruction
|
||||
oi 7(%r12),16 # set MVPG flag
|
||||
0:
|
||||
|
||||
#
|
||||
# find out if the diag 0x44 works in 64 bit mode
|
||||
#
|
||||
la %r1,0f-.LPG1(%r13) # set program check address
|
||||
stg %r1,__LC_PGM_NEW_PSW+8
|
||||
diag 0,0,0x44 # test diag 0x44
|
||||
oi 7(%r12),32 # set diag44 flag
|
||||
0:
|
||||
|
||||
#
|
||||
# find out if we have the IDTE instruction
|
||||
#
|
||||
la %r1,0f-.LPG1(%r13) # set program check address
|
||||
stg %r1,__LC_PGM_NEW_PSW+8
|
||||
.long 0xb2b10000 # store facility list
|
||||
tm 0xc8,0x08 # check bit for clearing-by-ASCE
|
||||
bno 0f-.LPG1(%r13)
|
||||
lhi %r1,2094
|
||||
lhi %r2,0
|
||||
.long 0xb98e2001
|
||||
oi 7(%r12),0x80 # set IDTE flag
|
||||
0:
|
||||
|
||||
#
|
||||
# find out if the diag 0x9c is available
|
||||
#
|
||||
la %r1,0f-.LPG1(%r13) # set program check address
|
||||
stg %r1,__LC_PGM_NEW_PSW+8
|
||||
stap __LC_CPUID+4 # store cpu address
|
||||
lh %r1,__LC_CPUID+4
|
||||
diag %r1,0,0x9c # test diag 0x9c
|
||||
oi 6(%r12),1 # set diag9c flag
|
||||
0:
|
||||
|
||||
#
|
||||
# find out if we have the MVCOS instruction
|
||||
#
|
||||
la %r1,0f-.LPG1(%r13) # set program check address
|
||||
stg %r1,__LC_PGM_NEW_PSW+8
|
||||
.short 0xc800 # mvcos 0(%r0),0(%r0),%r0
|
||||
.short 0x0000
|
||||
.short 0x0000
|
||||
0: tm 0x8f,0x13 # special-operation exception?
|
||||
bno 1f-.LPG1(%r13) # if yes, MVCOS is present
|
||||
oi 6(%r12),2 # set MVCOS flag
|
||||
1:
|
||||
|
||||
lpswe .Lentry-.LPG1(13) # jump to _stext in primary-space,
|
||||
# virtual and never return ...
|
||||
.align 16
|
||||
.Lentry:.quad 0x0000000180000000,_stext
|
||||
.Lctl: .quad 0x04b50002 # cr0: various things
|
||||
.quad 0 # cr1: primary space segment table
|
||||
.quad .Lduct # cr2: dispatchable unit control table
|
||||
.quad 0 # cr3: instruction authorization
|
||||
.quad 0 # cr4: instruction authorization
|
||||
.quad .Lduct # cr5: primary-aste origin
|
||||
.quad 0 # cr6: I/O interrupts
|
||||
.quad 0 # cr7: secondary space segment table
|
||||
.quad 0 # cr8: access registers translation
|
||||
.quad 0 # cr9: tracing off
|
||||
.quad 0 # cr10: tracing off
|
||||
.quad 0 # cr11: tracing off
|
||||
.quad 0 # cr12: tracing off
|
||||
.quad 0 # cr13: home space segment table
|
||||
.quad 0xc0000000 # cr14: machine check handling off
|
||||
.quad 0 # cr15: linkage stack operations
|
||||
.Lpcmsk:.quad 0x0000000180000000
|
||||
.L4malign:.quad 0xffffffffffc00000
|
||||
.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
|
||||
.Lnop: .long 0x07000700
|
||||
.Lparmaddr:
|
||||
.quad PARMAREA
|
||||
.align 64
|
||||
.Lduct: .long 0,0,0,0,.Lduald,0,0,0
|
||||
.long 0,0,0,0,0,0,0,0
|
||||
.align 128
|
||||
.Lduald:.rept 8
|
||||
.long 0x80000000,0,0,0 # invalid access-list entries
|
||||
.endr
|
||||
|
||||
.org 0x12000
|
||||
.globl _ehead
|
||||
_ehead:
|
||||
#ifdef CONFIG_SHARED_KERNEL
|
||||
.org 0x100000
|
||||
#endif
|
||||
|
||||
#
|
||||
# startup-code, running in absolute addressing mode
|
||||
#
|
||||
.globl _stext
|
||||
_stext: basr %r13,0 # get base
|
||||
.LPG3:
|
||||
# check control registers
|
||||
stctg %c0,%c15,0(%r15)
|
||||
oi 6(%r15),0x40 # enable sigp emergency signal
|
||||
oi 4(%r15),0x10 # switch on low address proctection
|
||||
lctlg %c0,%c15,0(%r15)
|
||||
|
||||
lam 0,15,.Laregs-.LPG3(%r13) # load acrs needed by uaccess
|
||||
brasl %r14,start_kernel # go to C code
|
||||
#
|
||||
# We returned from start_kernel ?!? PANIK
|
||||
#
|
||||
basr %r13,0
|
||||
lpswe .Ldw-.(%r13) # load disabled wait psw
|
||||
|
||||
.align 8
|
||||
.Ldw: .quad 0x0002000180000000,0x0000000000000000
|
||||
.Laregs:.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
|
||||
44
arch/s390/kernel/init_task.c
Normal file
44
arch/s390/kernel/init_task.c
Normal file
@@ -0,0 +1,44 @@
|
||||
/*
|
||||
* arch/s390/kernel/init_task.c
|
||||
*
|
||||
* S390 version
|
||||
*
|
||||
* Derived from "arch/i386/kernel/init_task.c"
|
||||
*/
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/init_task.h>
|
||||
#include <linux/mqueue.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
static struct fs_struct init_fs = INIT_FS;
|
||||
static struct files_struct init_files = INIT_FILES;
|
||||
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
|
||||
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
|
||||
struct mm_struct init_mm = INIT_MM(init_mm);
|
||||
|
||||
EXPORT_SYMBOL(init_mm);
|
||||
|
||||
/*
|
||||
* Initial thread structure.
|
||||
*
|
||||
* We need to make sure that this is 8192-byte aligned due to the
|
||||
* way process stacks are handled. This is done by having a special
|
||||
* "init_task" linker map entry..
|
||||
*/
|
||||
union thread_union init_thread_union
|
||||
__attribute__((__section__(".data.init_task"))) =
|
||||
{ INIT_THREAD_INFO(init_task) };
|
||||
|
||||
/*
|
||||
* Initial task structure.
|
||||
*
|
||||
* All other task structs will be allocated on slabs in fork.c
|
||||
*/
|
||||
struct task_struct init_task = INIT_TASK(init_task);
|
||||
|
||||
EXPORT_SYMBOL(init_task);
|
||||
1099
arch/s390/kernel/ipl.c
Normal file
1099
arch/s390/kernel/ipl.c
Normal file
File diff suppressed because it is too large
Load Diff
106
arch/s390/kernel/irq.c
Normal file
106
arch/s390/kernel/irq.c
Normal file
@@ -0,0 +1,106 @@
|
||||
/*
|
||||
* arch/s390/kernel/irq.c
|
||||
*
|
||||
* Copyright IBM Corp. 2004,2007
|
||||
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
|
||||
* Thomas Spatzier (tspat@de.ibm.com)
|
||||
*
|
||||
* This file contains interrupt related functions.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/profile.h>
|
||||
|
||||
/*
|
||||
* show_interrupts is needed by /proc/interrupts.
|
||||
*/
|
||||
int show_interrupts(struct seq_file *p, void *v)
|
||||
{
|
||||
static const char *intrclass_names[] = { "EXT", "I/O", };
|
||||
int i = *(loff_t *) v, j;
|
||||
|
||||
if (i == 0) {
|
||||
seq_puts(p, " ");
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "CPU%d ",j);
|
||||
seq_putc(p, '\n');
|
||||
}
|
||||
|
||||
if (i < NR_IRQS) {
|
||||
seq_printf(p, "%s: ", intrclass_names[i]);
|
||||
#ifndef CONFIG_SMP
|
||||
seq_printf(p, "%10u ", kstat_irqs(i));
|
||||
#else
|
||||
for_each_online_cpu(j)
|
||||
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
|
||||
#endif
|
||||
seq_putc(p, '\n');
|
||||
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* For compatibilty only. S/390 specific setup of interrupts et al. is done
|
||||
* much later in init_channel_subsystem().
|
||||
*/
|
||||
void __init
|
||||
init_IRQ(void)
|
||||
{
|
||||
/* nothing... */
|
||||
}
|
||||
|
||||
/*
|
||||
* Switch to the asynchronous interrupt stack for softirq execution.
|
||||
*/
|
||||
extern void __do_softirq(void);
|
||||
|
||||
asmlinkage void do_softirq(void)
|
||||
{
|
||||
unsigned long flags, old, new;
|
||||
|
||||
if (in_interrupt())
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
if (local_softirq_pending()) {
|
||||
/* Get current stack pointer. */
|
||||
asm volatile("la %0,0(15)" : "=a" (old));
|
||||
/* Check against async. stack address range. */
|
||||
new = S390_lowcore.async_stack;
|
||||
if (((new - old) >> (PAGE_SHIFT + THREAD_ORDER)) != 0) {
|
||||
/* Need to switch to the async. stack. */
|
||||
new -= STACK_FRAME_OVERHEAD;
|
||||
((struct stack_frame *) new)->back_chain = old;
|
||||
|
||||
asm volatile(" la 15,0(%0)\n"
|
||||
" basr 14,%2\n"
|
||||
" la 15,0(%1)\n"
|
||||
: : "a" (new), "a" (old),
|
||||
"a" (__do_softirq)
|
||||
: "0", "1", "2", "3", "4", "5", "14",
|
||||
"cc", "memory" );
|
||||
} else
|
||||
/* We are already on the async stack. */
|
||||
__do_softirq();
|
||||
}
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL(do_softirq);
|
||||
|
||||
void init_irq_proc(void)
|
||||
{
|
||||
struct proc_dir_entry *root_irq_dir;
|
||||
|
||||
root_irq_dir = proc_mkdir("irq", NULL);
|
||||
create_prof_cpu_mask(root_irq_dir);
|
||||
}
|
||||
674
arch/s390/kernel/kprobes.c
Normal file
674
arch/s390/kernel/kprobes.c
Normal file
@@ -0,0 +1,674 @@
|
||||
/*
|
||||
* Kernel Probes (KProbes)
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||
*
|
||||
* Copyright (C) IBM Corporation, 2002, 2006
|
||||
*
|
||||
* s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/preempt.h>
|
||||
#include <linux/stop_machine.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/kdebug.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
|
||||
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
|
||||
|
||||
int __kprobes arch_prepare_kprobe(struct kprobe *p)
|
||||
{
|
||||
/* Make sure the probe isn't going on a difficult instruction */
|
||||
if (is_prohibited_opcode((kprobe_opcode_t *) p->addr))
|
||||
return -EINVAL;
|
||||
|
||||
if ((unsigned long)p->addr & 0x01) {
|
||||
printk("Attempt to register kprobe at an unaligned address\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Use the get_insn_slot() facility for correctness */
|
||||
if (!(p->ainsn.insn = get_insn_slot()))
|
||||
return -ENOMEM;
|
||||
|
||||
memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
|
||||
|
||||
get_instruction_type(&p->ainsn);
|
||||
p->opcode = *p->addr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction)
|
||||
{
|
||||
switch (*(__u8 *) instruction) {
|
||||
case 0x0c: /* bassm */
|
||||
case 0x0b: /* bsm */
|
||||
case 0x83: /* diag */
|
||||
case 0x44: /* ex */
|
||||
return -EINVAL;
|
||||
}
|
||||
switch (*(__u16 *) instruction) {
|
||||
case 0x0101: /* pr */
|
||||
case 0xb25a: /* bsa */
|
||||
case 0xb240: /* bakr */
|
||||
case 0xb258: /* bsg */
|
||||
case 0xb218: /* pc */
|
||||
case 0xb228: /* pt */
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __kprobes get_instruction_type(struct arch_specific_insn *ainsn)
|
||||
{
|
||||
/* default fixup method */
|
||||
ainsn->fixup = FIXUP_PSW_NORMAL;
|
||||
|
||||
/* save r1 operand */
|
||||
ainsn->reg = (*ainsn->insn & 0xf0) >> 4;
|
||||
|
||||
/* save the instruction length (pop 5-5) in bytes */
|
||||
switch (*(__u8 *) (ainsn->insn) >> 4) {
|
||||
case 0:
|
||||
ainsn->ilen = 2;
|
||||
break;
|
||||
case 1:
|
||||
case 2:
|
||||
ainsn->ilen = 4;
|
||||
break;
|
||||
case 3:
|
||||
ainsn->ilen = 6;
|
||||
break;
|
||||
}
|
||||
|
||||
switch (*(__u8 *) ainsn->insn) {
|
||||
case 0x05: /* balr */
|
||||
case 0x0d: /* basr */
|
||||
ainsn->fixup = FIXUP_RETURN_REGISTER;
|
||||
/* if r2 = 0, no branch will be taken */
|
||||
if ((*ainsn->insn & 0x0f) == 0)
|
||||
ainsn->fixup |= FIXUP_BRANCH_NOT_TAKEN;
|
||||
break;
|
||||
case 0x06: /* bctr */
|
||||
case 0x07: /* bcr */
|
||||
ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
|
||||
break;
|
||||
case 0x45: /* bal */
|
||||
case 0x4d: /* bas */
|
||||
ainsn->fixup = FIXUP_RETURN_REGISTER;
|
||||
break;
|
||||
case 0x47: /* bc */
|
||||
case 0x46: /* bct */
|
||||
case 0x86: /* bxh */
|
||||
case 0x87: /* bxle */
|
||||
ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
|
||||
break;
|
||||
case 0x82: /* lpsw */
|
||||
ainsn->fixup = FIXUP_NOT_REQUIRED;
|
||||
break;
|
||||
case 0xb2: /* lpswe */
|
||||
if (*(((__u8 *) ainsn->insn) + 1) == 0xb2) {
|
||||
ainsn->fixup = FIXUP_NOT_REQUIRED;
|
||||
}
|
||||
break;
|
||||
case 0xa7: /* bras */
|
||||
if ((*ainsn->insn & 0x0f) == 0x05) {
|
||||
ainsn->fixup |= FIXUP_RETURN_REGISTER;
|
||||
}
|
||||
break;
|
||||
case 0xc0:
|
||||
if ((*ainsn->insn & 0x0f) == 0x00 /* larl */
|
||||
|| (*ainsn->insn & 0x0f) == 0x05) /* brasl */
|
||||
ainsn->fixup |= FIXUP_RETURN_REGISTER;
|
||||
break;
|
||||
case 0xeb:
|
||||
if (*(((__u8 *) ainsn->insn) + 5 ) == 0x44 || /* bxhg */
|
||||
*(((__u8 *) ainsn->insn) + 5) == 0x45) {/* bxleg */
|
||||
ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
|
||||
}
|
||||
break;
|
||||
case 0xe3: /* bctg */
|
||||
if (*(((__u8 *) ainsn->insn) + 5) == 0x46) {
|
||||
ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int __kprobes swap_instruction(void *aref)
|
||||
{
|
||||
struct ins_replace_args *args = aref;
|
||||
u32 *addr;
|
||||
u32 instr;
|
||||
int err = -EFAULT;
|
||||
|
||||
/*
|
||||
* Text segment is read-only, hence we use stura to bypass dynamic
|
||||
* address translation to exchange the instruction. Since stura
|
||||
* always operates on four bytes, but we only want to exchange two
|
||||
* bytes do some calculations to get things right. In addition we
|
||||
* shall not cross any page boundaries (vmalloc area!) when writing
|
||||
* the new instruction.
|
||||
*/
|
||||
addr = (u32 *)((unsigned long)args->ptr & -4UL);
|
||||
if ((unsigned long)args->ptr & 2)
|
||||
instr = ((*addr) & 0xffff0000) | args->new;
|
||||
else
|
||||
instr = ((*addr) & 0x0000ffff) | args->new << 16;
|
||||
|
||||
asm volatile(
|
||||
" lra %1,0(%1)\n"
|
||||
"0: stura %2,%1\n"
|
||||
"1: la %0,0\n"
|
||||
"2:\n"
|
||||
EX_TABLE(0b,2b)
|
||||
: "+d" (err)
|
||||
: "a" (addr), "d" (instr)
|
||||
: "memory", "cc");
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
void __kprobes arch_arm_kprobe(struct kprobe *p)
|
||||
{
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
unsigned long status = kcb->kprobe_status;
|
||||
struct ins_replace_args args;
|
||||
|
||||
args.ptr = p->addr;
|
||||
args.old = p->opcode;
|
||||
args.new = BREAKPOINT_INSTRUCTION;
|
||||
|
||||
kcb->kprobe_status = KPROBE_SWAP_INST;
|
||||
stop_machine_run(swap_instruction, &args, NR_CPUS);
|
||||
kcb->kprobe_status = status;
|
||||
}
|
||||
|
||||
void __kprobes arch_disarm_kprobe(struct kprobe *p)
|
||||
{
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
unsigned long status = kcb->kprobe_status;
|
||||
struct ins_replace_args args;
|
||||
|
||||
args.ptr = p->addr;
|
||||
args.old = BREAKPOINT_INSTRUCTION;
|
||||
args.new = p->opcode;
|
||||
|
||||
kcb->kprobe_status = KPROBE_SWAP_INST;
|
||||
stop_machine_run(swap_instruction, &args, NR_CPUS);
|
||||
kcb->kprobe_status = status;
|
||||
}
|
||||
|
||||
void __kprobes arch_remove_kprobe(struct kprobe *p)
|
||||
{
|
||||
mutex_lock(&kprobe_mutex);
|
||||
free_insn_slot(p->ainsn.insn, 0);
|
||||
mutex_unlock(&kprobe_mutex);
|
||||
}
|
||||
|
||||
static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
|
||||
{
|
||||
per_cr_bits kprobe_per_regs[1];
|
||||
|
||||
memset(kprobe_per_regs, 0, sizeof(per_cr_bits));
|
||||
regs->psw.addr = (unsigned long)p->ainsn.insn | PSW_ADDR_AMODE;
|
||||
|
||||
/* Set up the per control reg info, will pass to lctl */
|
||||
kprobe_per_regs[0].em_instruction_fetch = 1;
|
||||
kprobe_per_regs[0].starting_addr = (unsigned long)p->ainsn.insn;
|
||||
kprobe_per_regs[0].ending_addr = (unsigned long)p->ainsn.insn + 1;
|
||||
|
||||
/* Set the PER control regs, turns on single step for this address */
|
||||
__ctl_load(kprobe_per_regs, 9, 11);
|
||||
regs->psw.mask |= PSW_MASK_PER;
|
||||
regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK);
|
||||
}
|
||||
|
||||
static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
|
||||
{
|
||||
kcb->prev_kprobe.kp = kprobe_running();
|
||||
kcb->prev_kprobe.status = kcb->kprobe_status;
|
||||
kcb->prev_kprobe.kprobe_saved_imask = kcb->kprobe_saved_imask;
|
||||
memcpy(kcb->prev_kprobe.kprobe_saved_ctl, kcb->kprobe_saved_ctl,
|
||||
sizeof(kcb->kprobe_saved_ctl));
|
||||
}
|
||||
|
||||
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
|
||||
{
|
||||
__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
|
||||
kcb->kprobe_status = kcb->prev_kprobe.status;
|
||||
kcb->kprobe_saved_imask = kcb->prev_kprobe.kprobe_saved_imask;
|
||||
memcpy(kcb->kprobe_saved_ctl, kcb->prev_kprobe.kprobe_saved_ctl,
|
||||
sizeof(kcb->kprobe_saved_ctl));
|
||||
}
|
||||
|
||||
static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
|
||||
struct kprobe_ctlblk *kcb)
|
||||
{
|
||||
__get_cpu_var(current_kprobe) = p;
|
||||
/* Save the interrupt and per flags */
|
||||
kcb->kprobe_saved_imask = regs->psw.mask &
|
||||
(PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK);
|
||||
/* Save the control regs that govern PER */
|
||||
__ctl_store(kcb->kprobe_saved_ctl, 9, 11);
|
||||
}
|
||||
|
||||
/* Called with kretprobe_lock held */
|
||||
void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct kretprobe_instance *ri;
|
||||
|
||||
if ((ri = get_free_rp_inst(rp)) != NULL) {
|
||||
ri->rp = rp;
|
||||
ri->task = current;
|
||||
ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
|
||||
|
||||
/* Replace the return addr with trampoline addr */
|
||||
regs->gprs[14] = (unsigned long)&kretprobe_trampoline;
|
||||
|
||||
add_rp_inst(ri);
|
||||
} else {
|
||||
rp->nmissed++;
|
||||
}
|
||||
}
|
||||
|
||||
static int __kprobes kprobe_handler(struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe *p;
|
||||
int ret = 0;
|
||||
unsigned long *addr = (unsigned long *)
|
||||
((regs->psw.addr & PSW_ADDR_INSN) - 2);
|
||||
struct kprobe_ctlblk *kcb;
|
||||
|
||||
/*
|
||||
* We don't want to be preempted for the entire
|
||||
* duration of kprobe processing
|
||||
*/
|
||||
preempt_disable();
|
||||
kcb = get_kprobe_ctlblk();
|
||||
|
||||
/* Check we're not actually recursing */
|
||||
if (kprobe_running()) {
|
||||
p = get_kprobe(addr);
|
||||
if (p) {
|
||||
if (kcb->kprobe_status == KPROBE_HIT_SS &&
|
||||
*p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
|
||||
regs->psw.mask &= ~PSW_MASK_PER;
|
||||
regs->psw.mask |= kcb->kprobe_saved_imask;
|
||||
goto no_kprobe;
|
||||
}
|
||||
/* We have reentered the kprobe_handler(), since
|
||||
* another probe was hit while within the handler.
|
||||
* We here save the original kprobes variables and
|
||||
* just single step on the instruction of the new probe
|
||||
* without calling any user handlers.
|
||||
*/
|
||||
save_previous_kprobe(kcb);
|
||||
set_current_kprobe(p, regs, kcb);
|
||||
kprobes_inc_nmissed_count(p);
|
||||
prepare_singlestep(p, regs);
|
||||
kcb->kprobe_status = KPROBE_REENTER;
|
||||
return 1;
|
||||
} else {
|
||||
p = __get_cpu_var(current_kprobe);
|
||||
if (p->break_handler && p->break_handler(p, regs)) {
|
||||
goto ss_probe;
|
||||
}
|
||||
}
|
||||
goto no_kprobe;
|
||||
}
|
||||
|
||||
p = get_kprobe(addr);
|
||||
if (!p)
|
||||
/*
|
||||
* No kprobe at this address. The fault has not been
|
||||
* caused by a kprobe breakpoint. The race of breakpoint
|
||||
* vs. kprobe remove does not exist because on s390 we
|
||||
* use stop_machine_run to arm/disarm the breakpoints.
|
||||
*/
|
||||
goto no_kprobe;
|
||||
|
||||
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
|
||||
set_current_kprobe(p, regs, kcb);
|
||||
if (p->pre_handler && p->pre_handler(p, regs))
|
||||
/* handler has already set things up, so skip ss setup */
|
||||
return 1;
|
||||
|
||||
ss_probe:
|
||||
prepare_singlestep(p, regs);
|
||||
kcb->kprobe_status = KPROBE_HIT_SS;
|
||||
return 1;
|
||||
|
||||
no_kprobe:
|
||||
preempt_enable_no_resched();
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Function return probe trampoline:
|
||||
* - init_kprobes() establishes a probepoint here
|
||||
* - When the probed function returns, this probe
|
||||
* causes the handlers to fire
|
||||
*/
|
||||
void kretprobe_trampoline_holder(void)
|
||||
{
|
||||
asm volatile(".global kretprobe_trampoline\n"
|
||||
"kretprobe_trampoline: bcr 0,0\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* Called when the probe at kretprobe trampoline is hit
|
||||
*/
|
||||
static int __kprobes trampoline_probe_handler(struct kprobe *p,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
struct kretprobe_instance *ri = NULL;
|
||||
struct hlist_head *head, empty_rp;
|
||||
struct hlist_node *node, *tmp;
|
||||
unsigned long flags, orig_ret_address = 0;
|
||||
unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
|
||||
|
||||
INIT_HLIST_HEAD(&empty_rp);
|
||||
spin_lock_irqsave(&kretprobe_lock, flags);
|
||||
head = kretprobe_inst_table_head(current);
|
||||
|
||||
/*
|
||||
* It is possible to have multiple instances associated with a given
|
||||
* task either because an multiple functions in the call path
|
||||
* have a return probe installed on them, and/or more then one return
|
||||
* return probe was registered for a target function.
|
||||
*
|
||||
* We can handle this because:
|
||||
* - instances are always inserted at the head of the list
|
||||
* - when multiple return probes are registered for the same
|
||||
* function, the first instance's ret_addr will point to the
|
||||
* real return address, and all the rest will point to
|
||||
* kretprobe_trampoline
|
||||
*/
|
||||
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
|
||||
if (ri->task != current)
|
||||
/* another task is sharing our hash bucket */
|
||||
continue;
|
||||
|
||||
if (ri->rp && ri->rp->handler)
|
||||
ri->rp->handler(ri, regs);
|
||||
|
||||
orig_ret_address = (unsigned long)ri->ret_addr;
|
||||
recycle_rp_inst(ri, &empty_rp);
|
||||
|
||||
if (orig_ret_address != trampoline_address) {
|
||||
/*
|
||||
* This is the real return address. Any other
|
||||
* instances associated with this task are for
|
||||
* other calls deeper on the call stack
|
||||
*/
|
||||
break;
|
||||
}
|
||||
}
|
||||
BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
|
||||
regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE;
|
||||
|
||||
reset_current_kprobe();
|
||||
spin_unlock_irqrestore(&kretprobe_lock, flags);
|
||||
preempt_enable_no_resched();
|
||||
|
||||
hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
|
||||
hlist_del(&ri->hlist);
|
||||
kfree(ri);
|
||||
}
|
||||
/*
|
||||
* By returning a non-zero value, we are telling
|
||||
* kprobe_handler() that we don't want the post_handler
|
||||
* to run (and have re-enabled preemption)
|
||||
*/
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Called after single-stepping. p->addr is the address of the
|
||||
* instruction whose first byte has been replaced by the "breakpoint"
|
||||
* instruction. To avoid the SMP problems that can occur when we
|
||||
* temporarily put back the original opcode to single-step, we
|
||||
* single-stepped a copy of the instruction. The address of this
|
||||
* copy is p->ainsn.insn.
|
||||
*/
|
||||
static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
|
||||
regs->psw.addr &= PSW_ADDR_INSN;
|
||||
|
||||
if (p->ainsn.fixup & FIXUP_PSW_NORMAL)
|
||||
regs->psw.addr = (unsigned long)p->addr +
|
||||
((unsigned long)regs->psw.addr -
|
||||
(unsigned long)p->ainsn.insn);
|
||||
|
||||
if (p->ainsn.fixup & FIXUP_BRANCH_NOT_TAKEN)
|
||||
if ((unsigned long)regs->psw.addr -
|
||||
(unsigned long)p->ainsn.insn == p->ainsn.ilen)
|
||||
regs->psw.addr = (unsigned long)p->addr + p->ainsn.ilen;
|
||||
|
||||
if (p->ainsn.fixup & FIXUP_RETURN_REGISTER)
|
||||
regs->gprs[p->ainsn.reg] = ((unsigned long)p->addr +
|
||||
(regs->gprs[p->ainsn.reg] -
|
||||
(unsigned long)p->ainsn.insn))
|
||||
| PSW_ADDR_AMODE;
|
||||
|
||||
regs->psw.addr |= PSW_ADDR_AMODE;
|
||||
/* turn off PER mode */
|
||||
regs->psw.mask &= ~PSW_MASK_PER;
|
||||
/* Restore the original per control regs */
|
||||
__ctl_load(kcb->kprobe_saved_ctl, 9, 11);
|
||||
regs->psw.mask |= kcb->kprobe_saved_imask;
|
||||
}
|
||||
|
||||
static int __kprobes post_kprobe_handler(struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe *cur = kprobe_running();
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
|
||||
if (!cur)
|
||||
return 0;
|
||||
|
||||
if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
|
||||
kcb->kprobe_status = KPROBE_HIT_SSDONE;
|
||||
cur->post_handler(cur, regs, 0);
|
||||
}
|
||||
|
||||
resume_execution(cur, regs);
|
||||
|
||||
/*Restore back the original saved kprobes variables and continue. */
|
||||
if (kcb->kprobe_status == KPROBE_REENTER) {
|
||||
restore_previous_kprobe(kcb);
|
||||
goto out;
|
||||
}
|
||||
reset_current_kprobe();
|
||||
out:
|
||||
preempt_enable_no_resched();
|
||||
|
||||
/*
|
||||
* if somebody else is singlestepping across a probe point, psw mask
|
||||
* will have PER set, in which case, continue the remaining processing
|
||||
* of do_single_step, as if this is not a probe hit.
|
||||
*/
|
||||
if (regs->psw.mask & PSW_MASK_PER) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
|
||||
{
|
||||
struct kprobe *cur = kprobe_running();
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
const struct exception_table_entry *entry;
|
||||
|
||||
switch(kcb->kprobe_status) {
|
||||
case KPROBE_SWAP_INST:
|
||||
/* We are here because the instruction replacement failed */
|
||||
return 0;
|
||||
case KPROBE_HIT_SS:
|
||||
case KPROBE_REENTER:
|
||||
/*
|
||||
* We are here because the instruction being single
|
||||
* stepped caused a page fault. We reset the current
|
||||
* kprobe and the nip points back to the probe address
|
||||
* and allow the page fault handler to continue as a
|
||||
* normal page fault.
|
||||
*/
|
||||
regs->psw.addr = (unsigned long)cur->addr | PSW_ADDR_AMODE;
|
||||
regs->psw.mask &= ~PSW_MASK_PER;
|
||||
regs->psw.mask |= kcb->kprobe_saved_imask;
|
||||
if (kcb->kprobe_status == KPROBE_REENTER)
|
||||
restore_previous_kprobe(kcb);
|
||||
else
|
||||
reset_current_kprobe();
|
||||
preempt_enable_no_resched();
|
||||
break;
|
||||
case KPROBE_HIT_ACTIVE:
|
||||
case KPROBE_HIT_SSDONE:
|
||||
/*
|
||||
* We increment the nmissed count for accounting,
|
||||
* we can also use npre/npostfault count for accouting
|
||||
* these specific fault cases.
|
||||
*/
|
||||
kprobes_inc_nmissed_count(cur);
|
||||
|
||||
/*
|
||||
* We come here because instructions in the pre/post
|
||||
* handler caused the page_fault, this could happen
|
||||
* if handler tries to access user space by
|
||||
* copy_from_user(), get_user() etc. Let the
|
||||
* user-specified handler try to fix it first.
|
||||
*/
|
||||
if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
|
||||
return 1;
|
||||
|
||||
/*
|
||||
* In case the user-specified fault handler returned
|
||||
* zero, try to fix up.
|
||||
*/
|
||||
entry = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
|
||||
if (entry) {
|
||||
regs->psw.addr = entry->fixup | PSW_ADDR_AMODE;
|
||||
return 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* fixup_exception() could not handle it,
|
||||
* Let do_page_fault() fix it.
|
||||
*/
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Wrapper routine to for handling exceptions.
|
||||
*/
|
||||
int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
|
||||
unsigned long val, void *data)
|
||||
{
|
||||
struct die_args *args = (struct die_args *)data;
|
||||
int ret = NOTIFY_DONE;
|
||||
|
||||
switch (val) {
|
||||
case DIE_BPT:
|
||||
if (kprobe_handler(args->regs))
|
||||
ret = NOTIFY_STOP;
|
||||
break;
|
||||
case DIE_SSTEP:
|
||||
if (post_kprobe_handler(args->regs))
|
||||
ret = NOTIFY_STOP;
|
||||
break;
|
||||
case DIE_TRAP:
|
||||
case DIE_PAGE_FAULT:
|
||||
/* kprobe_running() needs smp_processor_id() */
|
||||
preempt_disable();
|
||||
if (kprobe_running() &&
|
||||
kprobe_fault_handler(args->regs, args->trapnr))
|
||||
ret = NOTIFY_STOP;
|
||||
preempt_enable();
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
|
||||
{
|
||||
struct jprobe *jp = container_of(p, struct jprobe, kp);
|
||||
unsigned long addr;
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
|
||||
memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
|
||||
|
||||
/* setup return addr to the jprobe handler routine */
|
||||
regs->psw.addr = (unsigned long)(jp->entry) | PSW_ADDR_AMODE;
|
||||
|
||||
/* r14 is the function return address */
|
||||
kcb->jprobe_saved_r14 = (unsigned long)regs->gprs[14];
|
||||
/* r15 is the stack pointer */
|
||||
kcb->jprobe_saved_r15 = (unsigned long)regs->gprs[15];
|
||||
addr = (unsigned long)kcb->jprobe_saved_r15;
|
||||
|
||||
memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr,
|
||||
MIN_STACK_SIZE(addr));
|
||||
return 1;
|
||||
}
|
||||
|
||||
void __kprobes jprobe_return(void)
|
||||
{
|
||||
asm volatile(".word 0x0002");
|
||||
}
|
||||
|
||||
void __kprobes jprobe_return_end(void)
|
||||
{
|
||||
asm volatile("bcr 0,0");
|
||||
}
|
||||
|
||||
int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_r15);
|
||||
|
||||
/* Put the regs back */
|
||||
memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
|
||||
/* put the stack back */
|
||||
memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
|
||||
MIN_STACK_SIZE(stack_addr));
|
||||
preempt_enable_no_resched();
|
||||
return 1;
|
||||
}
|
||||
|
||||
static struct kprobe trampoline_p = {
|
||||
.addr = (kprobe_opcode_t *) & kretprobe_trampoline,
|
||||
.pre_handler = trampoline_probe_handler
|
||||
};
|
||||
|
||||
int __init arch_init_kprobes(void)
|
||||
{
|
||||
return register_kprobe(&trampoline_p);
|
||||
}
|
||||
71
arch/s390/kernel/machine_kexec.c
Normal file
71
arch/s390/kernel/machine_kexec.c
Normal file
@@ -0,0 +1,71 @@
|
||||
/*
|
||||
* arch/s390/kernel/machine_kexec.c
|
||||
*
|
||||
* Copyright IBM Corp. 2005,2006
|
||||
*
|
||||
* Author(s): Rolf Adelsberger,
|
||||
* Heiko Carstens <heiko.carstens@de.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/kexec.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <asm/cio.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/reset.h>
|
||||
#include <asm/ipl.h>
|
||||
|
||||
typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long);
|
||||
|
||||
extern const unsigned char relocate_kernel[];
|
||||
extern const unsigned long long relocate_kernel_len;
|
||||
|
||||
int machine_kexec_prepare(struct kimage *image)
|
||||
{
|
||||
void *reboot_code_buffer;
|
||||
|
||||
/* Can't replace kernel image since it is read-only. */
|
||||
if (ipl_flags & IPL_NSS_VALID)
|
||||
return -ENOSYS;
|
||||
|
||||
/* We don't support anything but the default image type for now. */
|
||||
if (image->type != KEXEC_TYPE_DEFAULT)
|
||||
return -EINVAL;
|
||||
|
||||
/* Get the destination where the assembler code should be copied to.*/
|
||||
reboot_code_buffer = (void *) page_to_phys(image->control_code_page);
|
||||
|
||||
/* Then copy it */
|
||||
memcpy(reboot_code_buffer, relocate_kernel, relocate_kernel_len);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void machine_kexec_cleanup(struct kimage *image)
|
||||
{
|
||||
}
|
||||
|
||||
void machine_shutdown(void)
|
||||
{
|
||||
printk(KERN_INFO "kexec: machine_shutdown called\n");
|
||||
}
|
||||
|
||||
void machine_kexec(struct kimage *image)
|
||||
{
|
||||
relocate_kernel_t data_mover;
|
||||
|
||||
smp_send_stop();
|
||||
pfault_fini();
|
||||
s390_reset_system();
|
||||
|
||||
data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page);
|
||||
|
||||
/* Call the moving routine */
|
||||
(*data_mover)(&image->head, image->start);
|
||||
for (;;);
|
||||
}
|
||||
406
arch/s390/kernel/module.c
Normal file
406
arch/s390/kernel/module.c
Normal file
@@ -0,0 +1,406 @@
|
||||
/*
|
||||
* arch/s390/kernel/module.c - Kernel module help for s390.
|
||||
*
|
||||
* S390 version
|
||||
* Copyright (C) 2002, 2003 IBM Deutschland Entwicklung GmbH,
|
||||
* IBM Corporation
|
||||
* Author(s): Arnd Bergmann (arndb@de.ibm.com)
|
||||
* Martin Schwidefsky (schwidefsky@de.ibm.com)
|
||||
*
|
||||
* based on i386 version
|
||||
* Copyright (C) 2001 Rusty Russell.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
|
||||
*/
|
||||
#include <linux/module.h>
|
||||
#include <linux/elf.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/moduleloader.h>
|
||||
|
||||
#if 0
|
||||
#define DEBUGP printk
|
||||
#else
|
||||
#define DEBUGP(fmt , ...)
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
#define PLT_ENTRY_SIZE 12
|
||||
#else /* CONFIG_64BIT */
|
||||
#define PLT_ENTRY_SIZE 20
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
void *module_alloc(unsigned long size)
|
||||
{
|
||||
if (size == 0)
|
||||
return NULL;
|
||||
return vmalloc(size);
|
||||
}
|
||||
|
||||
/* Free memory returned from module_alloc */
|
||||
void module_free(struct module *mod, void *module_region)
|
||||
{
|
||||
vfree(module_region);
|
||||
/* FIXME: If module_region == mod->init_region, trim exception
|
||||
table entries. */
|
||||
}
|
||||
|
||||
static void
|
||||
check_rela(Elf_Rela *rela, struct module *me)
|
||||
{
|
||||
struct mod_arch_syminfo *info;
|
||||
|
||||
info = me->arch.syminfo + ELF_R_SYM (rela->r_info);
|
||||
switch (ELF_R_TYPE (rela->r_info)) {
|
||||
case R_390_GOT12: /* 12 bit GOT offset. */
|
||||
case R_390_GOT16: /* 16 bit GOT offset. */
|
||||
case R_390_GOT20: /* 20 bit GOT offset. */
|
||||
case R_390_GOT32: /* 32 bit GOT offset. */
|
||||
case R_390_GOT64: /* 64 bit GOT offset. */
|
||||
case R_390_GOTENT: /* 32 bit PC rel. to GOT entry shifted by 1. */
|
||||
case R_390_GOTPLT12: /* 12 bit offset to jump slot. */
|
||||
case R_390_GOTPLT16: /* 16 bit offset to jump slot. */
|
||||
case R_390_GOTPLT20: /* 20 bit offset to jump slot. */
|
||||
case R_390_GOTPLT32: /* 32 bit offset to jump slot. */
|
||||
case R_390_GOTPLT64: /* 64 bit offset to jump slot. */
|
||||
case R_390_GOTPLTENT: /* 32 bit rel. offset to jump slot >> 1. */
|
||||
if (info->got_offset == -1UL) {
|
||||
info->got_offset = me->arch.got_size;
|
||||
me->arch.got_size += sizeof(void*);
|
||||
}
|
||||
break;
|
||||
case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */
|
||||
case R_390_PLT32DBL: /* 32 bit PC rel. PLT shifted by 1. */
|
||||
case R_390_PLT32: /* 32 bit PC relative PLT address. */
|
||||
case R_390_PLT64: /* 64 bit PC relative PLT address. */
|
||||
case R_390_PLTOFF16: /* 16 bit offset from GOT to PLT. */
|
||||
case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */
|
||||
case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
|
||||
if (info->plt_offset == -1UL) {
|
||||
info->plt_offset = me->arch.plt_size;
|
||||
me->arch.plt_size += PLT_ENTRY_SIZE;
|
||||
}
|
||||
break;
|
||||
case R_390_COPY:
|
||||
case R_390_GLOB_DAT:
|
||||
case R_390_JMP_SLOT:
|
||||
case R_390_RELATIVE:
|
||||
/* Only needed if we want to support loading of
|
||||
modules linked with -shared. */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Account for GOT and PLT relocations. We can't add sections for
|
||||
* got and plt but we can increase the core module size.
|
||||
*/
|
||||
int
|
||||
module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
|
||||
char *secstrings, struct module *me)
|
||||
{
|
||||
Elf_Shdr *symtab;
|
||||
Elf_Sym *symbols;
|
||||
Elf_Rela *rela;
|
||||
char *strings;
|
||||
int nrela, i, j;
|
||||
|
||||
/* Find symbol table and string table. */
|
||||
symtab = NULL;
|
||||
for (i = 0; i < hdr->e_shnum; i++)
|
||||
switch (sechdrs[i].sh_type) {
|
||||
case SHT_SYMTAB:
|
||||
symtab = sechdrs + i;
|
||||
break;
|
||||
}
|
||||
if (!symtab) {
|
||||
printk(KERN_ERR "module %s: no symbol table\n", me->name);
|
||||
return -ENOEXEC;
|
||||
}
|
||||
|
||||
/* Allocate one syminfo structure per symbol. */
|
||||
me->arch.nsyms = symtab->sh_size / sizeof(Elf_Sym);
|
||||
me->arch.syminfo = vmalloc(me->arch.nsyms *
|
||||
sizeof(struct mod_arch_syminfo));
|
||||
if (!me->arch.syminfo)
|
||||
return -ENOMEM;
|
||||
symbols = (void *) hdr + symtab->sh_offset;
|
||||
strings = (void *) hdr + sechdrs[symtab->sh_link].sh_offset;
|
||||
for (i = 0; i < me->arch.nsyms; i++) {
|
||||
if (symbols[i].st_shndx == SHN_UNDEF &&
|
||||
strcmp(strings + symbols[i].st_name,
|
||||
"_GLOBAL_OFFSET_TABLE_") == 0)
|
||||
/* "Define" it as absolute. */
|
||||
symbols[i].st_shndx = SHN_ABS;
|
||||
me->arch.syminfo[i].got_offset = -1UL;
|
||||
me->arch.syminfo[i].plt_offset = -1UL;
|
||||
me->arch.syminfo[i].got_initialized = 0;
|
||||
me->arch.syminfo[i].plt_initialized = 0;
|
||||
}
|
||||
|
||||
/* Search for got/plt relocations. */
|
||||
me->arch.got_size = me->arch.plt_size = 0;
|
||||
for (i = 0; i < hdr->e_shnum; i++) {
|
||||
if (sechdrs[i].sh_type != SHT_RELA)
|
||||
continue;
|
||||
nrela = sechdrs[i].sh_size / sizeof(Elf_Rela);
|
||||
rela = (void *) hdr + sechdrs[i].sh_offset;
|
||||
for (j = 0; j < nrela; j++)
|
||||
check_rela(rela + j, me);
|
||||
}
|
||||
|
||||
/* Increase core size by size of got & plt and set start
|
||||
offsets for got and plt. */
|
||||
me->core_size = ALIGN(me->core_size, 4);
|
||||
me->arch.got_offset = me->core_size;
|
||||
me->core_size += me->arch.got_size;
|
||||
me->arch.plt_offset = me->core_size;
|
||||
me->core_size += me->arch.plt_size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
apply_relocate(Elf_Shdr *sechdrs, const char *strtab, unsigned int symindex,
|
||||
unsigned int relsec, struct module *me)
|
||||
{
|
||||
printk(KERN_ERR "module %s: RELOCATION unsupported\n",
|
||||
me->name);
|
||||
return -ENOEXEC;
|
||||
}
|
||||
|
||||
static int
|
||||
apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
|
||||
struct module *me)
|
||||
{
|
||||
struct mod_arch_syminfo *info;
|
||||
Elf_Addr loc, val;
|
||||
int r_type, r_sym;
|
||||
|
||||
/* This is where to make the change */
|
||||
loc = base + rela->r_offset;
|
||||
/* This is the symbol it is referring to. Note that all
|
||||
undefined symbols have been resolved. */
|
||||
r_sym = ELF_R_SYM(rela->r_info);
|
||||
r_type = ELF_R_TYPE(rela->r_info);
|
||||
info = me->arch.syminfo + r_sym;
|
||||
val = symtab[r_sym].st_value;
|
||||
|
||||
switch (r_type) {
|
||||
case R_390_8: /* Direct 8 bit. */
|
||||
case R_390_12: /* Direct 12 bit. */
|
||||
case R_390_16: /* Direct 16 bit. */
|
||||
case R_390_20: /* Direct 20 bit. */
|
||||
case R_390_32: /* Direct 32 bit. */
|
||||
case R_390_64: /* Direct 64 bit. */
|
||||
val += rela->r_addend;
|
||||
if (r_type == R_390_8)
|
||||
*(unsigned char *) loc = val;
|
||||
else if (r_type == R_390_12)
|
||||
*(unsigned short *) loc = (val & 0xfff) |
|
||||
(*(unsigned short *) loc & 0xf000);
|
||||
else if (r_type == R_390_16)
|
||||
*(unsigned short *) loc = val;
|
||||
else if (r_type == R_390_20)
|
||||
*(unsigned int *) loc =
|
||||
(*(unsigned int *) loc & 0xf00000ff) |
|
||||
(val & 0xfff) << 16 | (val & 0xff000) >> 4;
|
||||
else if (r_type == R_390_32)
|
||||
*(unsigned int *) loc = val;
|
||||
else if (r_type == R_390_64)
|
||||
*(unsigned long *) loc = val;
|
||||
break;
|
||||
case R_390_PC16: /* PC relative 16 bit. */
|
||||
case R_390_PC16DBL: /* PC relative 16 bit shifted by 1. */
|
||||
case R_390_PC32DBL: /* PC relative 32 bit shifted by 1. */
|
||||
case R_390_PC32: /* PC relative 32 bit. */
|
||||
case R_390_PC64: /* PC relative 64 bit. */
|
||||
val += rela->r_addend - loc;
|
||||
if (r_type == R_390_PC16)
|
||||
*(unsigned short *) loc = val;
|
||||
else if (r_type == R_390_PC16DBL)
|
||||
*(unsigned short *) loc = val >> 1;
|
||||
else if (r_type == R_390_PC32DBL)
|
||||
*(unsigned int *) loc = val >> 1;
|
||||
else if (r_type == R_390_PC32)
|
||||
*(unsigned int *) loc = val;
|
||||
else if (r_type == R_390_PC64)
|
||||
*(unsigned long *) loc = val;
|
||||
break;
|
||||
case R_390_GOT12: /* 12 bit GOT offset. */
|
||||
case R_390_GOT16: /* 16 bit GOT offset. */
|
||||
case R_390_GOT20: /* 20 bit GOT offset. */
|
||||
case R_390_GOT32: /* 32 bit GOT offset. */
|
||||
case R_390_GOT64: /* 64 bit GOT offset. */
|
||||
case R_390_GOTENT: /* 32 bit PC rel. to GOT entry shifted by 1. */
|
||||
case R_390_GOTPLT12: /* 12 bit offset to jump slot. */
|
||||
case R_390_GOTPLT20: /* 20 bit offset to jump slot. */
|
||||
case R_390_GOTPLT16: /* 16 bit offset to jump slot. */
|
||||
case R_390_GOTPLT32: /* 32 bit offset to jump slot. */
|
||||
case R_390_GOTPLT64: /* 64 bit offset to jump slot. */
|
||||
case R_390_GOTPLTENT: /* 32 bit rel. offset to jump slot >> 1. */
|
||||
if (info->got_initialized == 0) {
|
||||
Elf_Addr *gotent;
|
||||
|
||||
gotent = me->module_core + me->arch.got_offset +
|
||||
info->got_offset;
|
||||
*gotent = val;
|
||||
info->got_initialized = 1;
|
||||
}
|
||||
val = info->got_offset + rela->r_addend;
|
||||
if (r_type == R_390_GOT12 ||
|
||||
r_type == R_390_GOTPLT12)
|
||||
*(unsigned short *) loc = (val & 0xfff) |
|
||||
(*(unsigned short *) loc & 0xf000);
|
||||
else if (r_type == R_390_GOT16 ||
|
||||
r_type == R_390_GOTPLT16)
|
||||
*(unsigned short *) loc = val;
|
||||
else if (r_type == R_390_GOT20 ||
|
||||
r_type == R_390_GOTPLT20)
|
||||
*(unsigned int *) loc =
|
||||
(*(unsigned int *) loc & 0xf00000ff) |
|
||||
(val & 0xfff) << 16 | (val & 0xff000) >> 4;
|
||||
else if (r_type == R_390_GOT32 ||
|
||||
r_type == R_390_GOTPLT32)
|
||||
*(unsigned int *) loc = val;
|
||||
else if (r_type == R_390_GOTENT ||
|
||||
r_type == R_390_GOTPLTENT)
|
||||
*(unsigned int *) loc =
|
||||
(val + (Elf_Addr) me->module_core - loc) >> 1;
|
||||
else if (r_type == R_390_GOT64 ||
|
||||
r_type == R_390_GOTPLT64)
|
||||
*(unsigned long *) loc = val;
|
||||
break;
|
||||
case R_390_PLT16DBL: /* 16 bit PC rel. PLT shifted by 1. */
|
||||
case R_390_PLT32DBL: /* 32 bit PC rel. PLT shifted by 1. */
|
||||
case R_390_PLT32: /* 32 bit PC relative PLT address. */
|
||||
case R_390_PLT64: /* 64 bit PC relative PLT address. */
|
||||
case R_390_PLTOFF16: /* 16 bit offset from GOT to PLT. */
|
||||
case R_390_PLTOFF32: /* 32 bit offset from GOT to PLT. */
|
||||
case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
|
||||
if (info->plt_initialized == 0) {
|
||||
unsigned int *ip;
|
||||
ip = me->module_core + me->arch.plt_offset +
|
||||
info->plt_offset;
|
||||
#ifndef CONFIG_64BIT
|
||||
ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
|
||||
ip[1] = 0x100607f1;
|
||||
ip[2] = val;
|
||||
#else /* CONFIG_64BIT */
|
||||
ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
|
||||
ip[1] = 0x100a0004;
|
||||
ip[2] = 0x07f10000;
|
||||
ip[3] = (unsigned int) (val >> 32);
|
||||
ip[4] = (unsigned int) val;
|
||||
#endif /* CONFIG_64BIT */
|
||||
info->plt_initialized = 1;
|
||||
}
|
||||
if (r_type == R_390_PLTOFF16 ||
|
||||
r_type == R_390_PLTOFF32
|
||||
|| r_type == R_390_PLTOFF64
|
||||
)
|
||||
val = me->arch.plt_offset - me->arch.got_offset +
|
||||
info->plt_offset + rela->r_addend;
|
||||
else
|
||||
val = (Elf_Addr) me->module_core +
|
||||
me->arch.plt_offset + info->plt_offset +
|
||||
rela->r_addend - loc;
|
||||
if (r_type == R_390_PLT16DBL)
|
||||
*(unsigned short *) loc = val >> 1;
|
||||
else if (r_type == R_390_PLTOFF16)
|
||||
*(unsigned short *) loc = val;
|
||||
else if (r_type == R_390_PLT32DBL)
|
||||
*(unsigned int *) loc = val >> 1;
|
||||
else if (r_type == R_390_PLT32 ||
|
||||
r_type == R_390_PLTOFF32)
|
||||
*(unsigned int *) loc = val;
|
||||
else if (r_type == R_390_PLT64 ||
|
||||
r_type == R_390_PLTOFF64)
|
||||
*(unsigned long *) loc = val;
|
||||
break;
|
||||
case R_390_GOTOFF16: /* 16 bit offset to GOT. */
|
||||
case R_390_GOTOFF32: /* 32 bit offset to GOT. */
|
||||
case R_390_GOTOFF64: /* 64 bit offset to GOT. */
|
||||
val = val + rela->r_addend -
|
||||
((Elf_Addr) me->module_core + me->arch.got_offset);
|
||||
if (r_type == R_390_GOTOFF16)
|
||||
*(unsigned short *) loc = val;
|
||||
else if (r_type == R_390_GOTOFF32)
|
||||
*(unsigned int *) loc = val;
|
||||
else if (r_type == R_390_GOTOFF64)
|
||||
*(unsigned long *) loc = val;
|
||||
break;
|
||||
case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
|
||||
case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
|
||||
val = (Elf_Addr) me->module_core + me->arch.got_offset +
|
||||
rela->r_addend - loc;
|
||||
if (r_type == R_390_GOTPC)
|
||||
*(unsigned int *) loc = val;
|
||||
else if (r_type == R_390_GOTPCDBL)
|
||||
*(unsigned int *) loc = val >> 1;
|
||||
break;
|
||||
case R_390_COPY:
|
||||
case R_390_GLOB_DAT: /* Create GOT entry. */
|
||||
case R_390_JMP_SLOT: /* Create PLT entry. */
|
||||
case R_390_RELATIVE: /* Adjust by program base. */
|
||||
/* Only needed if we want to support loading of
|
||||
modules linked with -shared. */
|
||||
break;
|
||||
default:
|
||||
printk(KERN_ERR "module %s: Unknown relocation: %u\n",
|
||||
me->name, r_type);
|
||||
return -ENOEXEC;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
|
||||
unsigned int symindex, unsigned int relsec,
|
||||
struct module *me)
|
||||
{
|
||||
Elf_Addr base;
|
||||
Elf_Sym *symtab;
|
||||
Elf_Rela *rela;
|
||||
unsigned long i, n;
|
||||
int rc;
|
||||
|
||||
DEBUGP("Applying relocate section %u to %u\n",
|
||||
relsec, sechdrs[relsec].sh_info);
|
||||
base = sechdrs[sechdrs[relsec].sh_info].sh_addr;
|
||||
symtab = (Elf_Sym *) sechdrs[symindex].sh_addr;
|
||||
rela = (Elf_Rela *) sechdrs[relsec].sh_addr;
|
||||
n = sechdrs[relsec].sh_size / sizeof(Elf_Rela);
|
||||
|
||||
for (i = 0; i < n; i++, rela++) {
|
||||
rc = apply_rela(rela, base, symtab, me);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int module_finalize(const Elf_Ehdr *hdr,
|
||||
const Elf_Shdr *sechdrs,
|
||||
struct module *me)
|
||||
{
|
||||
vfree(me->arch.syminfo);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void module_arch_cleanup(struct module *mod)
|
||||
{
|
||||
}
|
||||
389
arch/s390/kernel/process.c
Normal file
389
arch/s390/kernel/process.c
Normal file
@@ -0,0 +1,389 @@
|
||||
/*
|
||||
* arch/s390/kernel/process.c
|
||||
*
|
||||
* S390 version
|
||||
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
|
||||
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
|
||||
* Hartmut Penner (hp@de.ibm.com),
|
||||
* Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
|
||||
*
|
||||
* Derived from "arch/i386/kernel/process.c"
|
||||
* Copyright (C) 1995, Linus Torvalds
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file handles the architecture-dependent parts of process handling..
|
||||
*/
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/unistd.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/user.h>
|
||||
#include <linux/a.out.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/notifier.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/timer.h>
|
||||
|
||||
asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
|
||||
|
||||
/*
|
||||
* Return saved PC of a blocked thread. used in kernel/sched.
|
||||
* resume in entry.S does not create a new stack frame, it
|
||||
* just stores the registers %r6-%r15 to the frame given by
|
||||
* schedule. We want to return the address of the caller of
|
||||
* schedule, so we have to walk the backchain one time to
|
||||
* find the frame schedule() store its return address.
|
||||
*/
|
||||
unsigned long thread_saved_pc(struct task_struct *tsk)
|
||||
{
|
||||
struct stack_frame *sf, *low, *high;
|
||||
|
||||
if (!tsk || !task_stack_page(tsk))
|
||||
return 0;
|
||||
low = task_stack_page(tsk);
|
||||
high = (struct stack_frame *) task_pt_regs(tsk);
|
||||
sf = (struct stack_frame *) (tsk->thread.ksp & PSW_ADDR_INSN);
|
||||
if (sf <= low || sf > high)
|
||||
return 0;
|
||||
sf = (struct stack_frame *) (sf->back_chain & PSW_ADDR_INSN);
|
||||
if (sf <= low || sf > high)
|
||||
return 0;
|
||||
return sf->gprs[8];
|
||||
}
|
||||
|
||||
/*
|
||||
* Need to know about CPUs going idle?
|
||||
*/
|
||||
static ATOMIC_NOTIFIER_HEAD(idle_chain);
|
||||
|
||||
int register_idle_notifier(struct notifier_block *nb)
|
||||
{
|
||||
return atomic_notifier_chain_register(&idle_chain, nb);
|
||||
}
|
||||
EXPORT_SYMBOL(register_idle_notifier);
|
||||
|
||||
int unregister_idle_notifier(struct notifier_block *nb)
|
||||
{
|
||||
return atomic_notifier_chain_unregister(&idle_chain, nb);
|
||||
}
|
||||
EXPORT_SYMBOL(unregister_idle_notifier);
|
||||
|
||||
void do_monitor_call(struct pt_regs *regs, long interruption_code)
|
||||
{
|
||||
/* disable monitor call class 0 */
|
||||
__ctl_clear_bit(8, 15);
|
||||
|
||||
atomic_notifier_call_chain(&idle_chain, CPU_NOT_IDLE,
|
||||
(void *)(long) smp_processor_id());
|
||||
}
|
||||
|
||||
extern void s390_handle_mcck(void);
|
||||
/*
|
||||
* The idle loop on a S390...
|
||||
*/
|
||||
static void default_idle(void)
|
||||
{
|
||||
int cpu, rc;
|
||||
|
||||
/* CPU is going idle. */
|
||||
cpu = smp_processor_id();
|
||||
|
||||
local_irq_disable();
|
||||
if (need_resched()) {
|
||||
local_irq_enable();
|
||||
return;
|
||||
}
|
||||
|
||||
rc = atomic_notifier_call_chain(&idle_chain,
|
||||
CPU_IDLE, (void *)(long) cpu);
|
||||
if (rc != NOTIFY_OK && rc != NOTIFY_DONE)
|
||||
BUG();
|
||||
if (rc != NOTIFY_OK) {
|
||||
local_irq_enable();
|
||||
return;
|
||||
}
|
||||
|
||||
/* enable monitor call class 0 */
|
||||
__ctl_set_bit(8, 15);
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
if (cpu_is_offline(cpu)) {
|
||||
preempt_enable_no_resched();
|
||||
cpu_die();
|
||||
}
|
||||
#endif
|
||||
|
||||
local_mcck_disable();
|
||||
if (test_thread_flag(TIF_MCCK_PENDING)) {
|
||||
local_mcck_enable();
|
||||
local_irq_enable();
|
||||
s390_handle_mcck();
|
||||
return;
|
||||
}
|
||||
|
||||
trace_hardirqs_on();
|
||||
/* Wait for external, I/O or machine check interrupt. */
|
||||
__load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
|
||||
PSW_MASK_IO | PSW_MASK_EXT);
|
||||
}
|
||||
|
||||
void cpu_idle(void)
|
||||
{
|
||||
for (;;) {
|
||||
while (!need_resched())
|
||||
default_idle();
|
||||
|
||||
preempt_enable_no_resched();
|
||||
schedule();
|
||||
preempt_disable();
|
||||
}
|
||||
}
|
||||
|
||||
void show_regs(struct pt_regs *regs)
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
|
||||
printk("CPU: %d %s\n", task_thread_info(tsk)->cpu, print_tainted());
|
||||
printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
|
||||
current->comm, current->pid, (void *) tsk,
|
||||
(void *) tsk->thread.ksp);
|
||||
|
||||
show_registers(regs);
|
||||
/* Show stack backtrace if pt_regs is from kernel mode */
|
||||
if (!(regs->psw.mask & PSW_MASK_PSTATE))
|
||||
show_trace(NULL, (unsigned long *) regs->gprs[15]);
|
||||
}
|
||||
|
||||
extern void kernel_thread_starter(void);
|
||||
|
||||
asm(
|
||||
".align 4\n"
|
||||
"kernel_thread_starter:\n"
|
||||
" la 2,0(10)\n"
|
||||
" basr 14,9\n"
|
||||
" la 2,0\n"
|
||||
" br 11\n");
|
||||
|
||||
int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
|
||||
{
|
||||
struct pt_regs regs;
|
||||
|
||||
memset(®s, 0, sizeof(regs));
|
||||
regs.psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT;
|
||||
regs.psw.addr = (unsigned long) kernel_thread_starter | PSW_ADDR_AMODE;
|
||||
regs.gprs[9] = (unsigned long) fn;
|
||||
regs.gprs[10] = (unsigned long) arg;
|
||||
regs.gprs[11] = (unsigned long) do_exit;
|
||||
regs.orig_gpr2 = -1;
|
||||
|
||||
/* Ok, create the new process.. */
|
||||
return do_fork(flags | CLONE_VM | CLONE_UNTRACED,
|
||||
0, ®s, 0, NULL, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Free current thread data structures etc..
|
||||
*/
|
||||
void exit_thread(void)
|
||||
{
|
||||
}
|
||||
|
||||
void flush_thread(void)
|
||||
{
|
||||
clear_used_math();
|
||||
clear_tsk_thread_flag(current, TIF_USEDFPU);
|
||||
}
|
||||
|
||||
void release_thread(struct task_struct *dead_task)
|
||||
{
|
||||
}
|
||||
|
||||
int copy_thread(int nr, unsigned long clone_flags, unsigned long new_stackp,
|
||||
unsigned long unused,
|
||||
struct task_struct * p, struct pt_regs * regs)
|
||||
{
|
||||
struct fake_frame
|
||||
{
|
||||
struct stack_frame sf;
|
||||
struct pt_regs childregs;
|
||||
} *frame;
|
||||
|
||||
frame = container_of(task_pt_regs(p), struct fake_frame, childregs);
|
||||
p->thread.ksp = (unsigned long) frame;
|
||||
/* Store access registers to kernel stack of new process. */
|
||||
frame->childregs = *regs;
|
||||
frame->childregs.gprs[2] = 0; /* child returns 0 on fork. */
|
||||
frame->childregs.gprs[15] = new_stackp;
|
||||
frame->sf.back_chain = 0;
|
||||
|
||||
/* new return point is ret_from_fork */
|
||||
frame->sf.gprs[8] = (unsigned long) ret_from_fork;
|
||||
|
||||
/* fake return stack for resume(), don't go back to schedule */
|
||||
frame->sf.gprs[9] = (unsigned long) frame;
|
||||
|
||||
/* Save access registers to new thread structure. */
|
||||
save_access_regs(&p->thread.acrs[0]);
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
/*
|
||||
* save fprs to current->thread.fp_regs to merge them with
|
||||
* the emulated registers and then copy the result to the child.
|
||||
*/
|
||||
save_fp_regs(¤t->thread.fp_regs);
|
||||
memcpy(&p->thread.fp_regs, ¤t->thread.fp_regs,
|
||||
sizeof(s390_fp_regs));
|
||||
p->thread.user_seg = __pa((unsigned long) p->mm->pgd) | _SEGMENT_TABLE;
|
||||
/* Set a new TLS ? */
|
||||
if (clone_flags & CLONE_SETTLS)
|
||||
p->thread.acrs[0] = regs->gprs[6];
|
||||
#else /* CONFIG_64BIT */
|
||||
/* Save the fpu registers to new thread structure. */
|
||||
save_fp_regs(&p->thread.fp_regs);
|
||||
p->thread.user_seg = __pa((unsigned long) p->mm->pgd) | _REGION_TABLE;
|
||||
/* Set a new TLS ? */
|
||||
if (clone_flags & CLONE_SETTLS) {
|
||||
if (test_thread_flag(TIF_31BIT)) {
|
||||
p->thread.acrs[0] = (unsigned int) regs->gprs[6];
|
||||
} else {
|
||||
p->thread.acrs[0] = (unsigned int)(regs->gprs[6] >> 32);
|
||||
p->thread.acrs[1] = (unsigned int) regs->gprs[6];
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_64BIT */
|
||||
/* start new process with ar4 pointing to the correct address space */
|
||||
p->thread.mm_segment = get_fs();
|
||||
/* Don't copy debug registers */
|
||||
memset(&p->thread.per_info,0,sizeof(p->thread.per_info));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
asmlinkage long sys_fork(struct pt_regs regs)
|
||||
{
|
||||
return do_fork(SIGCHLD, regs.gprs[15], ®s, 0, NULL, NULL);
|
||||
}
|
||||
|
||||
asmlinkage long sys_clone(struct pt_regs regs)
|
||||
{
|
||||
unsigned long clone_flags;
|
||||
unsigned long newsp;
|
||||
int __user *parent_tidptr, *child_tidptr;
|
||||
|
||||
clone_flags = regs.gprs[3];
|
||||
newsp = regs.orig_gpr2;
|
||||
parent_tidptr = (int __user *) regs.gprs[4];
|
||||
child_tidptr = (int __user *) regs.gprs[5];
|
||||
if (!newsp)
|
||||
newsp = regs.gprs[15];
|
||||
return do_fork(clone_flags, newsp, ®s, 0,
|
||||
parent_tidptr, child_tidptr);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is trivial, and on the face of it looks like it
|
||||
* could equally well be done in user mode.
|
||||
*
|
||||
* Not so, for quite unobvious reasons - register pressure.
|
||||
* In user mode vfork() cannot have a stack frame, and if
|
||||
* done by calling the "clone()" system call directly, you
|
||||
* do not have enough call-clobbered registers to hold all
|
||||
* the information you need.
|
||||
*/
|
||||
asmlinkage long sys_vfork(struct pt_regs regs)
|
||||
{
|
||||
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD,
|
||||
regs.gprs[15], ®s, 0, NULL, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* sys_execve() executes a new program.
|
||||
*/
|
||||
asmlinkage long sys_execve(struct pt_regs regs)
|
||||
{
|
||||
int error;
|
||||
char * filename;
|
||||
|
||||
filename = getname((char __user *) regs.orig_gpr2);
|
||||
error = PTR_ERR(filename);
|
||||
if (IS_ERR(filename))
|
||||
goto out;
|
||||
error = do_execve(filename, (char __user * __user *) regs.gprs[3],
|
||||
(char __user * __user *) regs.gprs[4], ®s);
|
||||
if (error == 0) {
|
||||
task_lock(current);
|
||||
current->ptrace &= ~PT_DTRACE;
|
||||
task_unlock(current);
|
||||
current->thread.fp_regs.fpc = 0;
|
||||
if (MACHINE_HAS_IEEE)
|
||||
asm volatile("sfpc %0,%0" : : "d" (0));
|
||||
}
|
||||
putname(filename);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* fill in the FPU structure for a core dump.
|
||||
*/
|
||||
int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
|
||||
{
|
||||
#ifndef CONFIG_64BIT
|
||||
/*
|
||||
* save fprs to current->thread.fp_regs to merge them with
|
||||
* the emulated registers and then copy the result to the dump.
|
||||
*/
|
||||
save_fp_regs(¤t->thread.fp_regs);
|
||||
memcpy(fpregs, ¤t->thread.fp_regs, sizeof(s390_fp_regs));
|
||||
#else /* CONFIG_64BIT */
|
||||
save_fp_regs(fpregs);
|
||||
#endif /* CONFIG_64BIT */
|
||||
return 1;
|
||||
}
|
||||
|
||||
unsigned long get_wchan(struct task_struct *p)
|
||||
{
|
||||
struct stack_frame *sf, *low, *high;
|
||||
unsigned long return_address;
|
||||
int count;
|
||||
|
||||
if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p))
|
||||
return 0;
|
||||
low = task_stack_page(p);
|
||||
high = (struct stack_frame *) task_pt_regs(p);
|
||||
sf = (struct stack_frame *) (p->thread.ksp & PSW_ADDR_INSN);
|
||||
if (sf <= low || sf > high)
|
||||
return 0;
|
||||
for (count = 0; count < 16; count++) {
|
||||
sf = (struct stack_frame *) (sf->back_chain & PSW_ADDR_INSN);
|
||||
if (sf <= low || sf > high)
|
||||
return 0;
|
||||
return_address = sf->gprs[8] & PSW_ADDR_INSN;
|
||||
if (!in_sched_functions(return_address))
|
||||
return return_address;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
769
arch/s390/kernel/ptrace.c
Normal file
769
arch/s390/kernel/ptrace.c
Normal file
@@ -0,0 +1,769 @@
|
||||
/*
|
||||
* arch/s390/kernel/ptrace.c
|
||||
*
|
||||
* S390 version
|
||||
* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
|
||||
* Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
|
||||
* Martin Schwidefsky (schwidefsky@de.ibm.com)
|
||||
*
|
||||
* Based on PowerPC version
|
||||
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
|
||||
*
|
||||
* Derived from "arch/m68k/kernel/ptrace.c"
|
||||
* Copyright (C) 1994 by Hamish Macdonald
|
||||
* Taken from linux/kernel/ptrace.c and modified for M680x0.
|
||||
* linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
|
||||
*
|
||||
* Modified by Cort Dougan (cort@cs.nmt.edu)
|
||||
*
|
||||
*
|
||||
* This file is subject to the terms and conditions of the GNU General
|
||||
* Public License. See the file README.legal in the main directory of
|
||||
* this archive for more details.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/user.h>
|
||||
#include <linux/security.h>
|
||||
#include <linux/audit.h>
|
||||
#include <linux/signal.h>
|
||||
|
||||
#include <asm/segment.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/unistd.h>
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
#include "compat_ptrace.h"
|
||||
#endif
|
||||
|
||||
static void
|
||||
FixPerRegisters(struct task_struct *task)
|
||||
{
|
||||
struct pt_regs *regs;
|
||||
per_struct *per_info;
|
||||
|
||||
regs = task_pt_regs(task);
|
||||
per_info = (per_struct *) &task->thread.per_info;
|
||||
per_info->control_regs.bits.em_instruction_fetch =
|
||||
per_info->single_step | per_info->instruction_fetch;
|
||||
|
||||
if (per_info->single_step) {
|
||||
per_info->control_regs.bits.starting_addr = 0;
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (test_thread_flag(TIF_31BIT))
|
||||
per_info->control_regs.bits.ending_addr = 0x7fffffffUL;
|
||||
else
|
||||
#endif
|
||||
per_info->control_regs.bits.ending_addr = PSW_ADDR_INSN;
|
||||
} else {
|
||||
per_info->control_regs.bits.starting_addr =
|
||||
per_info->starting_addr;
|
||||
per_info->control_regs.bits.ending_addr =
|
||||
per_info->ending_addr;
|
||||
}
|
||||
/*
|
||||
* if any of the control reg tracing bits are on
|
||||
* we switch on per in the psw
|
||||
*/
|
||||
if (per_info->control_regs.words.cr[0] & PER_EM_MASK)
|
||||
regs->psw.mask |= PSW_MASK_PER;
|
||||
else
|
||||
regs->psw.mask &= ~PSW_MASK_PER;
|
||||
|
||||
if (per_info->control_regs.bits.em_storage_alteration)
|
||||
per_info->control_regs.bits.storage_alt_space_ctl = 1;
|
||||
else
|
||||
per_info->control_regs.bits.storage_alt_space_ctl = 0;
|
||||
}
|
||||
|
||||
static void set_single_step(struct task_struct *task)
|
||||
{
|
||||
task->thread.per_info.single_step = 1;
|
||||
FixPerRegisters(task);
|
||||
}
|
||||
|
||||
static void clear_single_step(struct task_struct *task)
|
||||
{
|
||||
task->thread.per_info.single_step = 0;
|
||||
FixPerRegisters(task);
|
||||
}
|
||||
|
||||
/*
|
||||
* Called by kernel/ptrace.c when detaching..
|
||||
*
|
||||
* Make sure single step bits etc are not set.
|
||||
*/
|
||||
void
|
||||
ptrace_disable(struct task_struct *child)
|
||||
{
|
||||
/* make sure the single step bit is not set. */
|
||||
clear_single_step(child);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
# define __ADDR_MASK 3
|
||||
#else
|
||||
# define __ADDR_MASK 7
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Read the word at offset addr from the user area of a process. The
|
||||
* trouble here is that the information is littered over different
|
||||
* locations. The process registers are found on the kernel stack,
|
||||
* the floating point stuff and the trace settings are stored in
|
||||
* the task structure. In addition the different structures in
|
||||
* struct user contain pad bytes that should be read as zeroes.
|
||||
* Lovely...
|
||||
*/
|
||||
static int
|
||||
peek_user(struct task_struct *child, addr_t addr, addr_t data)
|
||||
{
|
||||
struct user *dummy = NULL;
|
||||
addr_t offset, tmp, mask;
|
||||
|
||||
/*
|
||||
* Stupid gdb peeks/pokes the access registers in 64 bit with
|
||||
* an alignment of 4. Programmers from hell...
|
||||
*/
|
||||
mask = __ADDR_MASK;
|
||||
#ifdef CONFIG_64BIT
|
||||
if (addr >= (addr_t) &dummy->regs.acrs &&
|
||||
addr < (addr_t) &dummy->regs.orig_gpr2)
|
||||
mask = 3;
|
||||
#endif
|
||||
if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
|
||||
return -EIO;
|
||||
|
||||
if (addr < (addr_t) &dummy->regs.acrs) {
|
||||
/*
|
||||
* psw and gprs are stored on the stack
|
||||
*/
|
||||
tmp = *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr);
|
||||
if (addr == (addr_t) &dummy->regs.psw.mask)
|
||||
/* Remove per bit from user psw. */
|
||||
tmp &= ~PSW_MASK_PER;
|
||||
|
||||
} else if (addr < (addr_t) &dummy->regs.orig_gpr2) {
|
||||
/*
|
||||
* access registers are stored in the thread structure
|
||||
*/
|
||||
offset = addr - (addr_t) &dummy->regs.acrs;
|
||||
#ifdef CONFIG_64BIT
|
||||
/*
|
||||
* Very special case: old & broken 64 bit gdb reading
|
||||
* from acrs[15]. Result is a 64 bit value. Read the
|
||||
* 32 bit acrs[15] value and shift it by 32. Sick...
|
||||
*/
|
||||
if (addr == (addr_t) &dummy->regs.acrs[15])
|
||||
tmp = ((unsigned long) child->thread.acrs[15]) << 32;
|
||||
else
|
||||
#endif
|
||||
tmp = *(addr_t *)((addr_t) &child->thread.acrs + offset);
|
||||
|
||||
} else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
|
||||
/*
|
||||
* orig_gpr2 is stored on the kernel stack
|
||||
*/
|
||||
tmp = (addr_t) task_pt_regs(child)->orig_gpr2;
|
||||
|
||||
} else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
|
||||
/*
|
||||
* floating point regs. are stored in the thread structure
|
||||
*/
|
||||
offset = addr - (addr_t) &dummy->regs.fp_regs;
|
||||
tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset);
|
||||
if (addr == (addr_t) &dummy->regs.fp_regs.fpc)
|
||||
tmp &= (unsigned long) FPC_VALID_MASK
|
||||
<< (BITS_PER_LONG - 32);
|
||||
|
||||
} else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
|
||||
/*
|
||||
* per_info is found in the thread structure
|
||||
*/
|
||||
offset = addr - (addr_t) &dummy->regs.per_info;
|
||||
tmp = *(addr_t *)((addr_t) &child->thread.per_info + offset);
|
||||
|
||||
} else
|
||||
tmp = 0;
|
||||
|
||||
return put_user(tmp, (addr_t __user *) data);
|
||||
}
|
||||
|
||||
/*
|
||||
* Write a word to the user area of a process at location addr. This
|
||||
* operation does have an additional problem compared to peek_user.
|
||||
* Stores to the program status word and on the floating point
|
||||
* control register needs to get checked for validity.
|
||||
*/
|
||||
static int
|
||||
poke_user(struct task_struct *child, addr_t addr, addr_t data)
|
||||
{
|
||||
struct user *dummy = NULL;
|
||||
addr_t offset, mask;
|
||||
|
||||
/*
|
||||
* Stupid gdb peeks/pokes the access registers in 64 bit with
|
||||
* an alignment of 4. Programmers from hell indeed...
|
||||
*/
|
||||
mask = __ADDR_MASK;
|
||||
#ifdef CONFIG_64BIT
|
||||
if (addr >= (addr_t) &dummy->regs.acrs &&
|
||||
addr < (addr_t) &dummy->regs.orig_gpr2)
|
||||
mask = 3;
|
||||
#endif
|
||||
if ((addr & mask) || addr > sizeof(struct user) - __ADDR_MASK)
|
||||
return -EIO;
|
||||
|
||||
if (addr < (addr_t) &dummy->regs.acrs) {
|
||||
/*
|
||||
* psw and gprs are stored on the stack
|
||||
*/
|
||||
if (addr == (addr_t) &dummy->regs.psw.mask &&
|
||||
#ifdef CONFIG_COMPAT
|
||||
data != PSW_MASK_MERGE(psw_user32_bits, data) &&
|
||||
#endif
|
||||
data != PSW_MASK_MERGE(psw_user_bits, data))
|
||||
/* Invalid psw mask. */
|
||||
return -EINVAL;
|
||||
#ifndef CONFIG_64BIT
|
||||
if (addr == (addr_t) &dummy->regs.psw.addr)
|
||||
/* I'd like to reject addresses without the
|
||||
high order bit but older gdb's rely on it */
|
||||
data |= PSW_ADDR_AMODE;
|
||||
#endif
|
||||
*(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
|
||||
|
||||
} else if (addr < (addr_t) (&dummy->regs.orig_gpr2)) {
|
||||
/*
|
||||
* access registers are stored in the thread structure
|
||||
*/
|
||||
offset = addr - (addr_t) &dummy->regs.acrs;
|
||||
#ifdef CONFIG_64BIT
|
||||
/*
|
||||
* Very special case: old & broken 64 bit gdb writing
|
||||
* to acrs[15] with a 64 bit value. Ignore the lower
|
||||
* half of the value and write the upper 32 bit to
|
||||
* acrs[15]. Sick...
|
||||
*/
|
||||
if (addr == (addr_t) &dummy->regs.acrs[15])
|
||||
child->thread.acrs[15] = (unsigned int) (data >> 32);
|
||||
else
|
||||
#endif
|
||||
*(addr_t *)((addr_t) &child->thread.acrs + offset) = data;
|
||||
|
||||
} else if (addr == (addr_t) &dummy->regs.orig_gpr2) {
|
||||
/*
|
||||
* orig_gpr2 is stored on the kernel stack
|
||||
*/
|
||||
task_pt_regs(child)->orig_gpr2 = data;
|
||||
|
||||
} else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
|
||||
/*
|
||||
* floating point regs. are stored in the thread structure
|
||||
*/
|
||||
if (addr == (addr_t) &dummy->regs.fp_regs.fpc &&
|
||||
(data & ~((unsigned long) FPC_VALID_MASK
|
||||
<< (BITS_PER_LONG - 32))) != 0)
|
||||
return -EINVAL;
|
||||
offset = addr - (addr_t) &dummy->regs.fp_regs;
|
||||
*(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data;
|
||||
|
||||
} else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
|
||||
/*
|
||||
* per_info is found in the thread structure
|
||||
*/
|
||||
offset = addr - (addr_t) &dummy->regs.per_info;
|
||||
*(addr_t *)((addr_t) &child->thread.per_info + offset) = data;
|
||||
|
||||
}
|
||||
|
||||
FixPerRegisters(child);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
do_ptrace_normal(struct task_struct *child, long request, long addr, long data)
|
||||
{
|
||||
unsigned long tmp;
|
||||
ptrace_area parea;
|
||||
int copied, ret;
|
||||
|
||||
switch (request) {
|
||||
case PTRACE_PEEKTEXT:
|
||||
case PTRACE_PEEKDATA:
|
||||
/* Remove high order bit from address (only for 31 bit). */
|
||||
addr &= PSW_ADDR_INSN;
|
||||
/* read word at location addr. */
|
||||
copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
|
||||
if (copied != sizeof(tmp))
|
||||
return -EIO;
|
||||
return put_user(tmp, (unsigned long __force __user *) data);
|
||||
|
||||
case PTRACE_PEEKUSR:
|
||||
/* read the word at location addr in the USER area. */
|
||||
return peek_user(child, addr, data);
|
||||
|
||||
case PTRACE_POKETEXT:
|
||||
case PTRACE_POKEDATA:
|
||||
/* Remove high order bit from address (only for 31 bit). */
|
||||
addr &= PSW_ADDR_INSN;
|
||||
/* write the word at location addr. */
|
||||
copied = access_process_vm(child, addr, &data, sizeof(data),1);
|
||||
if (copied != sizeof(data))
|
||||
return -EIO;
|
||||
return 0;
|
||||
|
||||
case PTRACE_POKEUSR:
|
||||
/* write the word at location addr in the USER area */
|
||||
return poke_user(child, addr, data);
|
||||
|
||||
case PTRACE_PEEKUSR_AREA:
|
||||
case PTRACE_POKEUSR_AREA:
|
||||
if (copy_from_user(&parea, (void __force __user *) addr,
|
||||
sizeof(parea)))
|
||||
return -EFAULT;
|
||||
addr = parea.kernel_addr;
|
||||
data = parea.process_addr;
|
||||
copied = 0;
|
||||
while (copied < parea.len) {
|
||||
if (request == PTRACE_PEEKUSR_AREA)
|
||||
ret = peek_user(child, addr, data);
|
||||
else {
|
||||
addr_t utmp;
|
||||
if (get_user(utmp,
|
||||
(addr_t __force __user *) data))
|
||||
return -EFAULT;
|
||||
ret = poke_user(child, addr, utmp);
|
||||
}
|
||||
if (ret)
|
||||
return ret;
|
||||
addr += sizeof(unsigned long);
|
||||
data += sizeof(unsigned long);
|
||||
copied += sizeof(unsigned long);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
return ptrace_request(child, request, addr, data);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
/*
|
||||
* Now the fun part starts... a 31 bit program running in the
|
||||
* 31 bit emulation tracing another program. PTRACE_PEEKTEXT,
|
||||
* PTRACE_PEEKDATA, PTRACE_POKETEXT and PTRACE_POKEDATA are easy
|
||||
* to handle, the difference to the 64 bit versions of the requests
|
||||
* is that the access is done in multiples of 4 byte instead of
|
||||
* 8 bytes (sizeof(unsigned long) on 31/64 bit).
|
||||
* The ugly part are PTRACE_PEEKUSR, PTRACE_PEEKUSR_AREA,
|
||||
* PTRACE_POKEUSR and PTRACE_POKEUSR_AREA. If the traced program
|
||||
* is a 31 bit program too, the content of struct user can be
|
||||
* emulated. A 31 bit program peeking into the struct user of
|
||||
* a 64 bit program is a no-no.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Same as peek_user but for a 31 bit program.
|
||||
*/
|
||||
static int
|
||||
peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
|
||||
{
|
||||
struct user32 *dummy32 = NULL;
|
||||
per_struct32 *dummy_per32 = NULL;
|
||||
addr_t offset;
|
||||
__u32 tmp;
|
||||
|
||||
if (!test_thread_flag(TIF_31BIT) ||
|
||||
(addr & 3) || addr > sizeof(struct user) - 3)
|
||||
return -EIO;
|
||||
|
||||
if (addr < (addr_t) &dummy32->regs.acrs) {
|
||||
/*
|
||||
* psw and gprs are stored on the stack
|
||||
*/
|
||||
if (addr == (addr_t) &dummy32->regs.psw.mask) {
|
||||
/* Fake a 31 bit psw mask. */
|
||||
tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32);
|
||||
tmp = PSW32_MASK_MERGE(psw32_user_bits, tmp);
|
||||
} else if (addr == (addr_t) &dummy32->regs.psw.addr) {
|
||||
/* Fake a 31 bit psw address. */
|
||||
tmp = (__u32) task_pt_regs(child)->psw.addr |
|
||||
PSW32_ADDR_AMODE31;
|
||||
} else {
|
||||
/* gpr 0-15 */
|
||||
tmp = *(__u32 *)((addr_t) &task_pt_regs(child)->psw +
|
||||
addr*2 + 4);
|
||||
}
|
||||
} else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
|
||||
/*
|
||||
* access registers are stored in the thread structure
|
||||
*/
|
||||
offset = addr - (addr_t) &dummy32->regs.acrs;
|
||||
tmp = *(__u32*)((addr_t) &child->thread.acrs + offset);
|
||||
|
||||
} else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
|
||||
/*
|
||||
* orig_gpr2 is stored on the kernel stack
|
||||
*/
|
||||
tmp = *(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4);
|
||||
|
||||
} else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
|
||||
/*
|
||||
* floating point regs. are stored in the thread structure
|
||||
*/
|
||||
offset = addr - (addr_t) &dummy32->regs.fp_regs;
|
||||
tmp = *(__u32 *)((addr_t) &child->thread.fp_regs + offset);
|
||||
|
||||
} else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
|
||||
/*
|
||||
* per_info is found in the thread structure
|
||||
*/
|
||||
offset = addr - (addr_t) &dummy32->regs.per_info;
|
||||
/* This is magic. See per_struct and per_struct32. */
|
||||
if ((offset >= (addr_t) &dummy_per32->control_regs &&
|
||||
offset < (addr_t) (&dummy_per32->control_regs + 1)) ||
|
||||
(offset >= (addr_t) &dummy_per32->starting_addr &&
|
||||
offset <= (addr_t) &dummy_per32->ending_addr) ||
|
||||
offset == (addr_t) &dummy_per32->lowcore.words.address)
|
||||
offset = offset*2 + 4;
|
||||
else
|
||||
offset = offset*2;
|
||||
tmp = *(__u32 *)((addr_t) &child->thread.per_info + offset);
|
||||
|
||||
} else
|
||||
tmp = 0;
|
||||
|
||||
return put_user(tmp, (__u32 __user *) data);
|
||||
}
|
||||
|
||||
/*
|
||||
* Same as poke_user but for a 31 bit program.
|
||||
*/
|
||||
static int
|
||||
poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
|
||||
{
|
||||
struct user32 *dummy32 = NULL;
|
||||
per_struct32 *dummy_per32 = NULL;
|
||||
addr_t offset;
|
||||
__u32 tmp;
|
||||
|
||||
if (!test_thread_flag(TIF_31BIT) ||
|
||||
(addr & 3) || addr > sizeof(struct user32) - 3)
|
||||
return -EIO;
|
||||
|
||||
tmp = (__u32) data;
|
||||
|
||||
if (addr < (addr_t) &dummy32->regs.acrs) {
|
||||
/*
|
||||
* psw, gprs, acrs and orig_gpr2 are stored on the stack
|
||||
*/
|
||||
if (addr == (addr_t) &dummy32->regs.psw.mask) {
|
||||
/* Build a 64 bit psw mask from 31 bit mask. */
|
||||
if (tmp != PSW32_MASK_MERGE(psw32_user_bits, tmp))
|
||||
/* Invalid psw mask. */
|
||||
return -EINVAL;
|
||||
task_pt_regs(child)->psw.mask =
|
||||
PSW_MASK_MERGE(psw_user32_bits, (__u64) tmp << 32);
|
||||
} else if (addr == (addr_t) &dummy32->regs.psw.addr) {
|
||||
/* Build a 64 bit psw address from 31 bit address. */
|
||||
task_pt_regs(child)->psw.addr =
|
||||
(__u64) tmp & PSW32_ADDR_INSN;
|
||||
} else {
|
||||
/* gpr 0-15 */
|
||||
*(__u32*)((addr_t) &task_pt_regs(child)->psw
|
||||
+ addr*2 + 4) = tmp;
|
||||
}
|
||||
} else if (addr < (addr_t) (&dummy32->regs.orig_gpr2)) {
|
||||
/*
|
||||
* access registers are stored in the thread structure
|
||||
*/
|
||||
offset = addr - (addr_t) &dummy32->regs.acrs;
|
||||
*(__u32*)((addr_t) &child->thread.acrs + offset) = tmp;
|
||||
|
||||
} else if (addr == (addr_t) (&dummy32->regs.orig_gpr2)) {
|
||||
/*
|
||||
* orig_gpr2 is stored on the kernel stack
|
||||
*/
|
||||
*(__u32*)((addr_t) &task_pt_regs(child)->orig_gpr2 + 4) = tmp;
|
||||
|
||||
} else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
|
||||
/*
|
||||
* floating point regs. are stored in the thread structure
|
||||
*/
|
||||
if (addr == (addr_t) &dummy32->regs.fp_regs.fpc &&
|
||||
(tmp & ~FPC_VALID_MASK) != 0)
|
||||
/* Invalid floating point control. */
|
||||
return -EINVAL;
|
||||
offset = addr - (addr_t) &dummy32->regs.fp_regs;
|
||||
*(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp;
|
||||
|
||||
} else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
|
||||
/*
|
||||
* per_info is found in the thread structure.
|
||||
*/
|
||||
offset = addr - (addr_t) &dummy32->regs.per_info;
|
||||
/*
|
||||
* This is magic. See per_struct and per_struct32.
|
||||
* By incident the offsets in per_struct are exactly
|
||||
* twice the offsets in per_struct32 for all fields.
|
||||
* The 8 byte fields need special handling though,
|
||||
* because the second half (bytes 4-7) is needed and
|
||||
* not the first half.
|
||||
*/
|
||||
if ((offset >= (addr_t) &dummy_per32->control_regs &&
|
||||
offset < (addr_t) (&dummy_per32->control_regs + 1)) ||
|
||||
(offset >= (addr_t) &dummy_per32->starting_addr &&
|
||||
offset <= (addr_t) &dummy_per32->ending_addr) ||
|
||||
offset == (addr_t) &dummy_per32->lowcore.words.address)
|
||||
offset = offset*2 + 4;
|
||||
else
|
||||
offset = offset*2;
|
||||
*(__u32 *)((addr_t) &child->thread.per_info + offset) = tmp;
|
||||
|
||||
}
|
||||
|
||||
FixPerRegisters(child);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
do_ptrace_emu31(struct task_struct *child, long request, long addr, long data)
|
||||
{
|
||||
unsigned int tmp; /* 4 bytes !! */
|
||||
ptrace_area_emu31 parea;
|
||||
int copied, ret;
|
||||
|
||||
switch (request) {
|
||||
case PTRACE_PEEKTEXT:
|
||||
case PTRACE_PEEKDATA:
|
||||
/* read word at location addr. */
|
||||
copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
|
||||
if (copied != sizeof(tmp))
|
||||
return -EIO;
|
||||
return put_user(tmp, (unsigned int __force __user *) data);
|
||||
|
||||
case PTRACE_PEEKUSR:
|
||||
/* read the word at location addr in the USER area. */
|
||||
return peek_user_emu31(child, addr, data);
|
||||
|
||||
case PTRACE_POKETEXT:
|
||||
case PTRACE_POKEDATA:
|
||||
/* write the word at location addr. */
|
||||
tmp = data;
|
||||
copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 1);
|
||||
if (copied != sizeof(tmp))
|
||||
return -EIO;
|
||||
return 0;
|
||||
|
||||
case PTRACE_POKEUSR:
|
||||
/* write the word at location addr in the USER area */
|
||||
return poke_user_emu31(child, addr, data);
|
||||
|
||||
case PTRACE_PEEKUSR_AREA:
|
||||
case PTRACE_POKEUSR_AREA:
|
||||
if (copy_from_user(&parea, (void __force __user *) addr,
|
||||
sizeof(parea)))
|
||||
return -EFAULT;
|
||||
addr = parea.kernel_addr;
|
||||
data = parea.process_addr;
|
||||
copied = 0;
|
||||
while (copied < parea.len) {
|
||||
if (request == PTRACE_PEEKUSR_AREA)
|
||||
ret = peek_user_emu31(child, addr, data);
|
||||
else {
|
||||
__u32 utmp;
|
||||
if (get_user(utmp,
|
||||
(__u32 __force __user *) data))
|
||||
return -EFAULT;
|
||||
ret = poke_user_emu31(child, addr, utmp);
|
||||
}
|
||||
if (ret)
|
||||
return ret;
|
||||
addr += sizeof(unsigned int);
|
||||
data += sizeof(unsigned int);
|
||||
copied += sizeof(unsigned int);
|
||||
}
|
||||
return 0;
|
||||
case PTRACE_GETEVENTMSG:
|
||||
return put_user((__u32) child->ptrace_message,
|
||||
(unsigned int __force __user *) data);
|
||||
case PTRACE_GETSIGINFO:
|
||||
if (child->last_siginfo == NULL)
|
||||
return -EINVAL;
|
||||
return copy_siginfo_to_user32((compat_siginfo_t
|
||||
__force __user *) data,
|
||||
child->last_siginfo);
|
||||
case PTRACE_SETSIGINFO:
|
||||
if (child->last_siginfo == NULL)
|
||||
return -EINVAL;
|
||||
return copy_siginfo_from_user32(child->last_siginfo,
|
||||
(compat_siginfo_t
|
||||
__force __user *) data);
|
||||
}
|
||||
return ptrace_request(child, request, addr, data);
|
||||
}
|
||||
#endif
|
||||
|
||||
#define PT32_IEEE_IP 0x13c
|
||||
|
||||
static int
|
||||
do_ptrace(struct task_struct *child, long request, long addr, long data)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (request == PTRACE_ATTACH)
|
||||
return ptrace_attach(child);
|
||||
|
||||
/*
|
||||
* Special cases to get/store the ieee instructions pointer.
|
||||
*/
|
||||
if (child == current) {
|
||||
if (request == PTRACE_PEEKUSR && addr == PT_IEEE_IP)
|
||||
return peek_user(child, addr, data);
|
||||
if (request == PTRACE_POKEUSR && addr == PT_IEEE_IP)
|
||||
return poke_user(child, addr, data);
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (request == PTRACE_PEEKUSR &&
|
||||
addr == PT32_IEEE_IP && test_thread_flag(TIF_31BIT))
|
||||
return peek_user_emu31(child, addr, data);
|
||||
if (request == PTRACE_POKEUSR &&
|
||||
addr == PT32_IEEE_IP && test_thread_flag(TIF_31BIT))
|
||||
return poke_user_emu31(child, addr, data);
|
||||
#endif
|
||||
}
|
||||
|
||||
ret = ptrace_check_attach(child, request == PTRACE_KILL);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
switch (request) {
|
||||
case PTRACE_SYSCALL:
|
||||
/* continue and stop at next (return from) syscall */
|
||||
case PTRACE_CONT:
|
||||
/* restart after signal. */
|
||||
if (!valid_signal(data))
|
||||
return -EIO;
|
||||
if (request == PTRACE_SYSCALL)
|
||||
set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
|
||||
else
|
||||
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
|
||||
child->exit_code = data;
|
||||
/* make sure the single step bit is not set. */
|
||||
clear_single_step(child);
|
||||
wake_up_process(child);
|
||||
return 0;
|
||||
|
||||
case PTRACE_KILL:
|
||||
/*
|
||||
* make the child exit. Best I can do is send it a sigkill.
|
||||
* perhaps it should be put in the status that it wants to
|
||||
* exit.
|
||||
*/
|
||||
if (child->exit_state == EXIT_ZOMBIE) /* already dead */
|
||||
return 0;
|
||||
child->exit_code = SIGKILL;
|
||||
/* make sure the single step bit is not set. */
|
||||
clear_single_step(child);
|
||||
wake_up_process(child);
|
||||
return 0;
|
||||
|
||||
case PTRACE_SINGLESTEP:
|
||||
/* set the trap flag. */
|
||||
if (!valid_signal(data))
|
||||
return -EIO;
|
||||
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
|
||||
child->exit_code = data;
|
||||
if (data)
|
||||
set_tsk_thread_flag(child, TIF_SINGLE_STEP);
|
||||
else
|
||||
set_single_step(child);
|
||||
/* give it a chance to run. */
|
||||
wake_up_process(child);
|
||||
return 0;
|
||||
|
||||
case PTRACE_DETACH:
|
||||
/* detach a process that was attached. */
|
||||
return ptrace_detach(child, data);
|
||||
|
||||
|
||||
/* Do requests that differ for 31/64 bit */
|
||||
default:
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (test_thread_flag(TIF_31BIT))
|
||||
return do_ptrace_emu31(child, request, addr, data);
|
||||
#endif
|
||||
return do_ptrace_normal(child, request, addr, data);
|
||||
}
|
||||
/* Not reached. */
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
asmlinkage long
|
||||
sys_ptrace(long request, long pid, long addr, long data)
|
||||
{
|
||||
struct task_struct *child;
|
||||
int ret;
|
||||
|
||||
lock_kernel();
|
||||
if (request == PTRACE_TRACEME) {
|
||||
ret = ptrace_traceme();
|
||||
goto out;
|
||||
}
|
||||
|
||||
child = ptrace_get_task_struct(pid);
|
||||
if (IS_ERR(child)) {
|
||||
ret = PTR_ERR(child);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = do_ptrace(child, request, addr, data);
|
||||
put_task_struct(child);
|
||||
out:
|
||||
unlock_kernel();
|
||||
return ret;
|
||||
}
|
||||
|
||||
asmlinkage void
|
||||
syscall_trace(struct pt_regs *regs, int entryexit)
|
||||
{
|
||||
if (unlikely(current->audit_context) && entryexit)
|
||||
audit_syscall_exit(AUDITSC_RESULT(regs->gprs[2]), regs->gprs[2]);
|
||||
|
||||
if (!test_thread_flag(TIF_SYSCALL_TRACE))
|
||||
goto out;
|
||||
if (!(current->ptrace & PT_PTRACED))
|
||||
goto out;
|
||||
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
|
||||
? 0x80 : 0));
|
||||
|
||||
/*
|
||||
* If the debuffer has set an invalid system call number,
|
||||
* we prepare to skip the system call restart handling.
|
||||
*/
|
||||
if (!entryexit && regs->gprs[2] >= NR_syscalls)
|
||||
regs->trap = -1;
|
||||
|
||||
/*
|
||||
* this isn't the same as continuing with a signal, but it will do
|
||||
* for normal use. strace only continues with a signal if the
|
||||
* stopping signal is not SIGTRAP. -brl
|
||||
*/
|
||||
if (current->exit_code) {
|
||||
send_sig(current->exit_code, current, 1);
|
||||
current->exit_code = 0;
|
||||
}
|
||||
out:
|
||||
if (unlikely(current->audit_context) && !entryexit)
|
||||
audit_syscall_entry(test_thread_flag(TIF_31BIT)?AUDIT_ARCH_S390:AUDIT_ARCH_S390X,
|
||||
regs->gprs[2], regs->orig_gpr2, regs->gprs[3],
|
||||
regs->gprs[4], regs->gprs[5]);
|
||||
}
|
||||
86
arch/s390/kernel/reipl.S
Normal file
86
arch/s390/kernel/reipl.S
Normal file
@@ -0,0 +1,86 @@
|
||||
/*
|
||||
* arch/s390/kernel/reipl.S
|
||||
*
|
||||
* S390 version
|
||||
* Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
|
||||
* Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com)
|
||||
*/
|
||||
|
||||
#include <asm/lowcore.h>
|
||||
|
||||
#
|
||||
# do_reipl_asm
|
||||
# Parameter: r2 = schid of reipl device
|
||||
#
|
||||
.globl do_reipl_asm
|
||||
do_reipl_asm: basr %r13,0
|
||||
.Lpg0: lpsw .Lnewpsw-.Lpg0(%r13)
|
||||
.Lpg1: # do store status of all registers
|
||||
|
||||
stm %r0,%r15,__LC_GPREGS_SAVE_AREA
|
||||
stctl %c0,%c15,__LC_CREGS_SAVE_AREA
|
||||
stam %a0,%a15,__LC_AREGS_SAVE_AREA
|
||||
l %r10,.Ldump_pfx-.Lpg0(%r13)
|
||||
mvc __LC_PREFIX_SAVE_AREA(4),0(%r10)
|
||||
stckc .Lclkcmp-.Lpg0(%r13)
|
||||
mvc __LC_CLOCK_COMP_SAVE_AREA(8),.Lclkcmp-.Lpg0(%r13)
|
||||
stpt __LC_CPU_TIMER_SAVE_AREA
|
||||
st %r13, __LC_PSW_SAVE_AREA+4
|
||||
lctl %c6,%c6,.Lall-.Lpg0(%r13)
|
||||
lr %r1,%r2
|
||||
mvc __LC_PGM_NEW_PSW(8),.Lpcnew-.Lpg0(%r13)
|
||||
stsch .Lschib-.Lpg0(%r13)
|
||||
oi .Lschib+5-.Lpg0(%r13),0x84
|
||||
.Lecs: xi .Lschib+27-.Lpg0(%r13),0x01
|
||||
msch .Lschib-.Lpg0(%r13)
|
||||
lhi %r0,5
|
||||
.Lssch: ssch .Liplorb-.Lpg0(%r13)
|
||||
jz .L001
|
||||
brct %r0,.Lssch
|
||||
bas %r14,.Ldisab-.Lpg0(%r13)
|
||||
.L001: mvc __LC_IO_NEW_PSW(8),.Lionew-.Lpg0(%r13)
|
||||
.Ltpi: lpsw .Lwaitpsw-.Lpg0(%r13)
|
||||
.Lcont: c %r1,__LC_SUBCHANNEL_ID
|
||||
jnz .Ltpi
|
||||
clc __LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13)
|
||||
jnz .Ltpi
|
||||
tsch .Liplirb-.Lpg0(%r13)
|
||||
tm .Liplirb+9-.Lpg0(%r13),0xbf
|
||||
jz .L002
|
||||
bas %r14,.Ldisab-.Lpg0(%r13)
|
||||
.L002: tm .Liplirb+8-.Lpg0(%r13),0xf3
|
||||
jz .L003
|
||||
bas %r14,.Ldisab-.Lpg0(%r13)
|
||||
.L003: st %r1,__LC_SUBCHANNEL_ID
|
||||
lpsw 0
|
||||
sigp 0,0,0(6)
|
||||
.Ldisab: st %r14,.Ldispsw+4-.Lpg0(%r13)
|
||||
lpsw .Ldispsw-.Lpg0(%r13)
|
||||
.align 8
|
||||
.Lclkcmp: .quad 0x0000000000000000
|
||||
.Lall: .long 0xff000000
|
||||
.Ldump_pfx: .long dump_prefix_page
|
||||
.align 8
|
||||
.Lnewpsw: .long 0x00080000,0x80000000+.Lpg1
|
||||
.Lpcnew: .long 0x00080000,0x80000000+.Lecs
|
||||
.Lionew: .long 0x00080000,0x80000000+.Lcont
|
||||
.Lwaitpsw: .long 0x020a0000,0x00000000+.Ltpi
|
||||
.Ldispsw: .long 0x000a0000,0x00000000
|
||||
.Liplccws: .long 0x02000000,0x60000018
|
||||
.long 0x08000008,0x20000001
|
||||
.Liplorb: .long 0x0049504c,0x0040ff80
|
||||
.long 0x00000000+.Liplccws
|
||||
.Lschib: .long 0x00000000,0x00000000
|
||||
.long 0x00000000,0x00000000
|
||||
.long 0x00000000,0x00000000
|
||||
.long 0x00000000,0x00000000
|
||||
.long 0x00000000,0x00000000
|
||||
.long 0x00000000,0x00000000
|
||||
.Liplirb: .long 0x00000000,0x00000000
|
||||
.long 0x00000000,0x00000000
|
||||
.long 0x00000000,0x00000000
|
||||
.long 0x00000000,0x00000000
|
||||
.long 0x00000000,0x00000000
|
||||
.long 0x00000000,0x00000000
|
||||
.long 0x00000000,0x00000000
|
||||
.long 0x00000000,0x00000000
|
||||
113
arch/s390/kernel/reipl64.S
Normal file
113
arch/s390/kernel/reipl64.S
Normal file
@@ -0,0 +1,113 @@
|
||||
/*
|
||||
* arch/s390/kernel/reipl.S
|
||||
*
|
||||
* S390 version
|
||||
* Copyright (C) 2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
|
||||
* Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com)
|
||||
Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
|
||||
*/
|
||||
|
||||
#include <asm/lowcore.h>
|
||||
|
||||
#
|
||||
# do_reipl_asm
|
||||
# Parameter: r2 = schid of reipl device
|
||||
#
|
||||
|
||||
.globl do_reipl_asm
|
||||
do_reipl_asm: basr %r13,0
|
||||
.Lpg0: lpswe .Lnewpsw-.Lpg0(%r13)
|
||||
.Lpg1: # do store status of all registers
|
||||
|
||||
stg %r1,.Lregsave-.Lpg0(%r13)
|
||||
lghi %r1,0x1000
|
||||
stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-0x1000(%r1)
|
||||
lg %r0,.Lregsave-.Lpg0(%r13)
|
||||
stg %r0,__LC_GPREGS_SAVE_AREA-0x1000+8(%r1)
|
||||
stctg %c0,%c15,__LC_CREGS_SAVE_AREA-0x1000(%r1)
|
||||
stam %a0,%a15,__LC_AREGS_SAVE_AREA-0x1000(%r1)
|
||||
lg %r10,.Ldump_pfx-.Lpg0(%r13)
|
||||
mvc __LC_PREFIX_SAVE_AREA-0x1000(4,%r1),0(%r10)
|
||||
stfpc __LC_FP_CREG_SAVE_AREA-0x1000(%r1)
|
||||
stckc .Lclkcmp-.Lpg0(%r13)
|
||||
mvc __LC_CLOCK_COMP_SAVE_AREA-0x1000(8,%r1),.Lclkcmp-.Lpg0(%r13)
|
||||
stpt __LC_CPU_TIMER_SAVE_AREA-0x1000(%r1)
|
||||
stg %r13, __LC_PSW_SAVE_AREA-0x1000+8(%r1)
|
||||
|
||||
lctlg %c6,%c6,.Lall-.Lpg0(%r13)
|
||||
lgr %r1,%r2
|
||||
mvc __LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13)
|
||||
stsch .Lschib-.Lpg0(%r13)
|
||||
oi .Lschib+5-.Lpg0(%r13),0x84
|
||||
.Lecs: xi .Lschib+27-.Lpg0(%r13),0x01
|
||||
msch .Lschib-.Lpg0(%r13)
|
||||
lghi %r0,5
|
||||
.Lssch: ssch .Liplorb-.Lpg0(%r13)
|
||||
jz .L001
|
||||
brct %r0,.Lssch
|
||||
bas %r14,.Ldisab-.Lpg0(%r13)
|
||||
.L001: mvc __LC_IO_NEW_PSW(16),.Lionew-.Lpg0(%r13)
|
||||
.Ltpi: lpswe .Lwaitpsw-.Lpg0(%r13)
|
||||
.Lcont: c %r1,__LC_SUBCHANNEL_ID
|
||||
jnz .Ltpi
|
||||
clc __LC_IO_INT_PARM(4),.Liplorb-.Lpg0(%r13)
|
||||
jnz .Ltpi
|
||||
tsch .Liplirb-.Lpg0(%r13)
|
||||
tm .Liplirb+9-.Lpg0(%r13),0xbf
|
||||
jz .L002
|
||||
bas %r14,.Ldisab-.Lpg0(%r13)
|
||||
.L002: tm .Liplirb+8-.Lpg0(%r13),0xf3
|
||||
jz .L003
|
||||
bas %r14,.Ldisab-.Lpg0(%r13)
|
||||
.L003: st %r1,__LC_SUBCHANNEL_ID
|
||||
lhi %r1,0 # mode 0 = esa
|
||||
slr %r0,%r0 # set cpuid to zero
|
||||
sigp %r1,%r0,0x12 # switch to esa mode
|
||||
lpsw 0
|
||||
.Ldisab: sll %r14,1
|
||||
srl %r14,1 # need to kill hi bit to avoid specification exceptions.
|
||||
st %r14,.Ldispsw+12-.Lpg0(%r13)
|
||||
lpswe .Ldispsw-.Lpg0(%r13)
|
||||
.align 8
|
||||
.Lclkcmp: .quad 0x0000000000000000
|
||||
.Lall: .quad 0x00000000ff000000
|
||||
.Ldump_pfx: .quad dump_prefix_page
|
||||
.Lregsave: .quad 0x0000000000000000
|
||||
.align 16
|
||||
/*
|
||||
* These addresses have to be 31 bit otherwise
|
||||
* the sigp will throw a specifcation exception
|
||||
* when switching to ESA mode as bit 31 be set
|
||||
* in the ESA psw.
|
||||
* Bit 31 of the addresses has to be 0 for the
|
||||
* 31bit lpswe instruction a fact they appear to have
|
||||
* ommited from the pop.
|
||||
*/
|
||||
.Lnewpsw: .quad 0x0000000080000000
|
||||
.quad .Lpg1
|
||||
.Lpcnew: .quad 0x0000000080000000
|
||||
.quad .Lecs
|
||||
.Lionew: .quad 0x0000000080000000
|
||||
.quad .Lcont
|
||||
.Lwaitpsw: .quad 0x0202000080000000
|
||||
.quad .Ltpi
|
||||
.Ldispsw: .quad 0x0002000080000000
|
||||
.quad 0x0000000000000000
|
||||
.Liplccws: .long 0x02000000,0x60000018
|
||||
.long 0x08000008,0x20000001
|
||||
.Liplorb: .long 0x0049504c,0x0040ff80
|
||||
.long 0x00000000+.Liplccws
|
||||
.Lschib: .long 0x00000000,0x00000000
|
||||
.long 0x00000000,0x00000000
|
||||
.long 0x00000000,0x00000000
|
||||
.long 0x00000000,0x00000000
|
||||
.long 0x00000000,0x00000000
|
||||
.long 0x00000000,0x00000000
|
||||
.Liplirb: .long 0x00000000,0x00000000
|
||||
.long 0x00000000,0x00000000
|
||||
.long 0x00000000,0x00000000
|
||||
.long 0x00000000,0x00000000
|
||||
.long 0x00000000,0x00000000
|
||||
.long 0x00000000,0x00000000
|
||||
.long 0x00000000,0x00000000
|
||||
.long 0x00000000,0x00000000
|
||||
117
arch/s390/kernel/relocate_kernel.S
Normal file
117
arch/s390/kernel/relocate_kernel.S
Normal file
@@ -0,0 +1,117 @@
|
||||
/*
|
||||
* arch/s390/kernel/relocate_kernel.S
|
||||
*
|
||||
* (C) Copyright IBM Corp. 2005
|
||||
*
|
||||
* Author(s): Rolf Adelsberger,
|
||||
* Heiko Carstens <heiko.carstens@de.ibm.com>
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
* moves the new kernel to its destination...
|
||||
* %r2 = pointer to first kimage_entry_t
|
||||
* %r3 = start address - where to jump to after the job is done...
|
||||
*
|
||||
* %r5 will be used as temp. storage
|
||||
* %r6 holds the destination address
|
||||
* %r7 = PAGE_SIZE
|
||||
* %r8 holds the source address
|
||||
* %r9 = PAGE_SIZE
|
||||
* %r10 is a page mask
|
||||
*/
|
||||
|
||||
.text
|
||||
.globl relocate_kernel
|
||||
relocate_kernel:
|
||||
basr %r13,0 # base address
|
||||
.base:
|
||||
stnsm sys_msk-.base(%r13),0xfb # disable DAT
|
||||
stctl %c0,%c15,ctlregs-.base(%r13)
|
||||
stm %r0,%r15,gprregs-.base(%r13)
|
||||
la %r1,load_psw-.base(%r13)
|
||||
mvc 0(8,%r0),0(%r1)
|
||||
la %r0,.back-.base(%r13)
|
||||
st %r0,4(%r0)
|
||||
oi 4(%r0),0x80
|
||||
mvc 0x68(8,%r0),0(%r1)
|
||||
la %r0,.back_pgm-.base(%r13)
|
||||
st %r0,0x6c(%r0)
|
||||
oi 0x6c(%r0),0x80
|
||||
lhi %r0,0
|
||||
diag %r0,%r0,0x308
|
||||
.back:
|
||||
basr %r13,0
|
||||
.back_base:
|
||||
oi have_diag308-.back_base(%r13),0x01
|
||||
lctl %c0,%c15,ctlregs-.back_base(%r13)
|
||||
lm %r0,%r15,gprregs-.back_base(%r13)
|
||||
j .start_reloc
|
||||
.back_pgm:
|
||||
lm %r0,%r15,gprregs-.base(%r13)
|
||||
.start_reloc:
|
||||
lhi %r10,-1 # preparing the mask
|
||||
sll %r10,12 # shift it such that it becomes 0xf000
|
||||
.top:
|
||||
lhi %r7,4096 # load PAGE_SIZE in r7
|
||||
lhi %r9,4096 # load PAGE_SIZE in r9
|
||||
l %r5,0(%r2) # read another word for indirection page
|
||||
ahi %r2,4 # increment pointer
|
||||
tml %r5,0x1 # is it a destination page?
|
||||
je .indir_check # NO, goto "indir_check"
|
||||
lr %r6,%r5 # r6 = r5
|
||||
nr %r6,%r10 # mask it out and...
|
||||
j .top # ...next iteration
|
||||
.indir_check:
|
||||
tml %r5,0x2 # is it a indirection page?
|
||||
je .done_test # NO, goto "done_test"
|
||||
nr %r5,%r10 # YES, mask out,
|
||||
lr %r2,%r5 # move it into the right register,
|
||||
j .top # and read next...
|
||||
.done_test:
|
||||
tml %r5,0x4 # is it the done indicator?
|
||||
je .source_test # NO! Well, then it should be the source indicator...
|
||||
j .done # ok, lets finish it here...
|
||||
.source_test:
|
||||
tml %r5,0x8 # it should be a source indicator...
|
||||
je .top # NO, ignore it...
|
||||
lr %r8,%r5 # r8 = r5
|
||||
nr %r8,%r10 # masking
|
||||
0: mvcle %r6,%r8,0x0 # copy PAGE_SIZE bytes from r8 to r6 - pad with 0
|
||||
jo 0b
|
||||
j .top
|
||||
.done:
|
||||
sr %r0,%r0 # clear register r0
|
||||
la %r4,load_psw-.base(%r13) # load psw-address into the register
|
||||
o %r3,4(%r4) # or load address into psw
|
||||
st %r3,4(%r4)
|
||||
mvc 0(8,%r0),0(%r4) # copy psw to absolute address 0
|
||||
tm have_diag308-.base(%r13),0x01
|
||||
jno .no_diag308
|
||||
diag %r0,%r0,0x308
|
||||
.no_diag308:
|
||||
sr %r1,%r1 # clear %r1
|
||||
sr %r2,%r2 # clear %r2
|
||||
sigp %r1,%r2,0x12 # set cpuid to zero
|
||||
lpsw 0 # hopefully start new kernel...
|
||||
|
||||
.align 8
|
||||
load_psw:
|
||||
.long 0x00080000,0x80000000
|
||||
sys_msk:
|
||||
.quad 0
|
||||
ctlregs:
|
||||
.rept 16
|
||||
.long 0
|
||||
.endr
|
||||
gprregs:
|
||||
.rept 16
|
||||
.long 0
|
||||
.endr
|
||||
have_diag308:
|
||||
.byte 0
|
||||
.align 8
|
||||
relocate_kernel_end:
|
||||
.globl relocate_kernel_len
|
||||
relocate_kernel_len:
|
||||
.quad relocate_kernel_end - relocate_kernel
|
||||
120
arch/s390/kernel/relocate_kernel64.S
Normal file
120
arch/s390/kernel/relocate_kernel64.S
Normal file
@@ -0,0 +1,120 @@
|
||||
/*
|
||||
* arch/s390/kernel/relocate_kernel64.S
|
||||
*
|
||||
* (C) Copyright IBM Corp. 2005
|
||||
*
|
||||
* Author(s): Rolf Adelsberger,
|
||||
* Heiko Carstens <heiko.carstens@de.ibm.com>
|
||||
*
|
||||
*/
|
||||
|
||||
/*
|
||||
* moves the new kernel to its destination...
|
||||
* %r2 = pointer to first kimage_entry_t
|
||||
* %r3 = start address - where to jump to after the job is done...
|
||||
*
|
||||
* %r5 will be used as temp. storage
|
||||
* %r6 holds the destination address
|
||||
* %r7 = PAGE_SIZE
|
||||
* %r8 holds the source address
|
||||
* %r9 = PAGE_SIZE
|
||||
*
|
||||
* 0xf000 is a page_mask
|
||||
*/
|
||||
|
||||
.text
|
||||
.globl relocate_kernel
|
||||
relocate_kernel:
|
||||
basr %r13,0 # base address
|
||||
.base:
|
||||
stnsm sys_msk-.base(%r13),0xfb # disable DAT
|
||||
stctg %c0,%c15,ctlregs-.base(%r13)
|
||||
stmg %r0,%r15,gprregs-.base(%r13)
|
||||
lghi %r0,3
|
||||
sllg %r0,%r0,31
|
||||
stg %r0,0x1d0(%r0)
|
||||
la %r0,.back_pgm-.base(%r13)
|
||||
stg %r0,0x1d8(%r0)
|
||||
la %r1,load_psw-.base(%r13)
|
||||
mvc 0(8,%r0),0(%r1)
|
||||
la %r0,.back-.base(%r13)
|
||||
st %r0,4(%r0)
|
||||
oi 4(%r0),0x80
|
||||
lghi %r0,0
|
||||
diag %r0,%r0,0x308
|
||||
.back:
|
||||
lhi %r1,1 # mode 1 = esame
|
||||
sigp %r1,%r0,0x12 # switch to esame mode
|
||||
sam64 # switch to 64 bit addressing mode
|
||||
basr %r13,0
|
||||
.back_base:
|
||||
oi have_diag308-.back_base(%r13),0x01
|
||||
lctlg %c0,%c15,ctlregs-.back_base(%r13)
|
||||
lmg %r0,%r15,gprregs-.back_base(%r13)
|
||||
j .top
|
||||
.back_pgm:
|
||||
lmg %r0,%r15,gprregs-.base(%r13)
|
||||
.top:
|
||||
lghi %r7,4096 # load PAGE_SIZE in r7
|
||||
lghi %r9,4096 # load PAGE_SIZE in r9
|
||||
lg %r5,0(%r2) # read another word for indirection page
|
||||
aghi %r2,8 # increment pointer
|
||||
tml %r5,0x1 # is it a destination page?
|
||||
je .indir_check # NO, goto "indir_check"
|
||||
lgr %r6,%r5 # r6 = r5
|
||||
nill %r6,0xf000 # mask it out and...
|
||||
j .top # ...next iteration
|
||||
.indir_check:
|
||||
tml %r5,0x2 # is it a indirection page?
|
||||
je .done_test # NO, goto "done_test"
|
||||
nill %r5,0xf000 # YES, mask out,
|
||||
lgr %r2,%r5 # move it into the right register,
|
||||
j .top # and read next...
|
||||
.done_test:
|
||||
tml %r5,0x4 # is it the done indicator?
|
||||
je .source_test # NO! Well, then it should be the source indicator...
|
||||
j .done # ok, lets finish it here...
|
||||
.source_test:
|
||||
tml %r5,0x8 # it should be a source indicator...
|
||||
je .top # NO, ignore it...
|
||||
lgr %r8,%r5 # r8 = r5
|
||||
nill %r8,0xf000 # masking
|
||||
0: mvcle %r6,%r8,0x0 # copy PAGE_SIZE bytes from r8 to r6 - pad with 0
|
||||
jo 0b
|
||||
j .top
|
||||
.done:
|
||||
sgr %r0,%r0 # clear register r0
|
||||
la %r4,load_psw-.base(%r13) # load psw-address into the register
|
||||
o %r3,4(%r4) # or load address into psw
|
||||
st %r3,4(%r4)
|
||||
mvc 0(8,%r0),0(%r4) # copy psw to absolute address 0
|
||||
tm have_diag308-.base(%r13),0x01
|
||||
jno .no_diag308
|
||||
diag %r0,%r0,0x308
|
||||
.no_diag308:
|
||||
sam31 # 31 bit mode
|
||||
sr %r1,%r1 # erase register r1
|
||||
sr %r2,%r2 # erase register r2
|
||||
sigp %r1,%r2,0x12 # set cpuid to zero
|
||||
lpsw 0 # hopefully start new kernel...
|
||||
|
||||
.align 8
|
||||
load_psw:
|
||||
.long 0x00080000,0x80000000
|
||||
sys_msk:
|
||||
.quad 0
|
||||
ctlregs:
|
||||
.rept 16
|
||||
.quad 0
|
||||
.endr
|
||||
gprregs:
|
||||
.rept 16
|
||||
.quad 0
|
||||
.endr
|
||||
have_diag308:
|
||||
.byte 0
|
||||
.align 8
|
||||
relocate_kernel_end:
|
||||
.globl relocate_kernel_len
|
||||
relocate_kernel_len:
|
||||
.quad relocate_kernel_end - relocate_kernel
|
||||
140
arch/s390/kernel/s390_ext.c
Normal file
140
arch/s390/kernel/s390_ext.c
Normal file
@@ -0,0 +1,140 @@
|
||||
/*
|
||||
* arch/s390/kernel/s390_ext.c
|
||||
*
|
||||
* S390 version
|
||||
* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
|
||||
* Author(s): Holger Smolinski (Holger.Smolinski@de.ibm.com),
|
||||
* Martin Schwidefsky (schwidefsky@de.ibm.com)
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#include <asm/lowcore.h>
|
||||
#include <asm/s390_ext.h>
|
||||
#include <asm/irq_regs.h>
|
||||
#include <asm/irq.h>
|
||||
|
||||
/*
|
||||
* ext_int_hash[index] is the start of the list for all external interrupts
|
||||
* that hash to this index. With the current set of external interrupts
|
||||
* (0x1202 external call, 0x1004 cpu timer, 0x2401 hwc console, 0x4000
|
||||
* iucv and 0x2603 pfault) this is always the first element.
|
||||
*/
|
||||
ext_int_info_t *ext_int_hash[256] = { NULL, };
|
||||
|
||||
static inline int ext_hash(__u16 code)
|
||||
{
|
||||
return (code + (code >> 9)) & 0xff;
|
||||
}
|
||||
|
||||
int register_external_interrupt(__u16 code, ext_int_handler_t handler)
|
||||
{
|
||||
ext_int_info_t *p;
|
||||
int index;
|
||||
|
||||
p = kmalloc(sizeof(ext_int_info_t), GFP_ATOMIC);
|
||||
if (p == NULL)
|
||||
return -ENOMEM;
|
||||
p->code = code;
|
||||
p->handler = handler;
|
||||
index = ext_hash(code);
|
||||
p->next = ext_int_hash[index];
|
||||
ext_int_hash[index] = p;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int register_early_external_interrupt(__u16 code, ext_int_handler_t handler,
|
||||
ext_int_info_t *p)
|
||||
{
|
||||
int index;
|
||||
|
||||
if (p == NULL)
|
||||
return -EINVAL;
|
||||
p->code = code;
|
||||
p->handler = handler;
|
||||
index = ext_hash(code);
|
||||
p->next = ext_int_hash[index];
|
||||
ext_int_hash[index] = p;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int unregister_external_interrupt(__u16 code, ext_int_handler_t handler)
|
||||
{
|
||||
ext_int_info_t *p, *q;
|
||||
int index;
|
||||
|
||||
index = ext_hash(code);
|
||||
q = NULL;
|
||||
p = ext_int_hash[index];
|
||||
while (p != NULL) {
|
||||
if (p->code == code && p->handler == handler)
|
||||
break;
|
||||
q = p;
|
||||
p = p->next;
|
||||
}
|
||||
if (p == NULL)
|
||||
return -ENOENT;
|
||||
if (q != NULL)
|
||||
q->next = p->next;
|
||||
else
|
||||
ext_int_hash[index] = p->next;
|
||||
kfree(p);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int unregister_early_external_interrupt(__u16 code, ext_int_handler_t handler,
|
||||
ext_int_info_t *p)
|
||||
{
|
||||
ext_int_info_t *q;
|
||||
int index;
|
||||
|
||||
if (p == NULL || p->code != code || p->handler != handler)
|
||||
return -EINVAL;
|
||||
index = ext_hash(code);
|
||||
q = ext_int_hash[index];
|
||||
if (p != q) {
|
||||
while (q != NULL) {
|
||||
if (q->next == p)
|
||||
break;
|
||||
q = q->next;
|
||||
}
|
||||
if (q == NULL)
|
||||
return -ENOENT;
|
||||
q->next = p->next;
|
||||
} else
|
||||
ext_int_hash[index] = p->next;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void do_extint(struct pt_regs *regs, unsigned short code)
|
||||
{
|
||||
ext_int_info_t *p;
|
||||
int index;
|
||||
struct pt_regs *old_regs;
|
||||
|
||||
old_regs = set_irq_regs(regs);
|
||||
irq_enter();
|
||||
asm volatile ("mc 0,0");
|
||||
if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer)
|
||||
/**
|
||||
* Make sure that the i/o interrupt did not "overtake"
|
||||
* the last HZ timer interrupt.
|
||||
*/
|
||||
account_ticks(S390_lowcore.int_clock);
|
||||
kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
|
||||
index = ext_hash(code);
|
||||
for (p = ext_int_hash[index]; p; p = p->next) {
|
||||
if (likely(p->code == code))
|
||||
p->handler(code);
|
||||
}
|
||||
irq_exit();
|
||||
set_irq_regs(old_regs);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(register_external_interrupt);
|
||||
EXPORT_SYMBOL(unregister_external_interrupt);
|
||||
53
arch/s390/kernel/s390_ksyms.c
Normal file
53
arch/s390/kernel/s390_ksyms.c
Normal file
@@ -0,0 +1,53 @@
|
||||
/*
|
||||
* arch/s390/kernel/s390_ksyms.c
|
||||
*
|
||||
* S390 version
|
||||
*/
|
||||
#include <linux/highuid.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <asm/checksum.h>
|
||||
#include <asm/cpcmd.h>
|
||||
#include <asm/delay.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/setup.h>
|
||||
#ifdef CONFIG_IP_MULTICAST
|
||||
#include <net/arp.h>
|
||||
#endif
|
||||
|
||||
/*
|
||||
* memory management
|
||||
*/
|
||||
EXPORT_SYMBOL(_oi_bitmap);
|
||||
EXPORT_SYMBOL(_ni_bitmap);
|
||||
EXPORT_SYMBOL(_zb_findmap);
|
||||
EXPORT_SYMBOL(_sb_findmap);
|
||||
EXPORT_SYMBOL(diag10);
|
||||
|
||||
/*
|
||||
* semaphore ops
|
||||
*/
|
||||
EXPORT_SYMBOL(__up);
|
||||
EXPORT_SYMBOL(__down);
|
||||
EXPORT_SYMBOL(__down_interruptible);
|
||||
|
||||
/*
|
||||
* binfmt_elf loader
|
||||
*/
|
||||
extern int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs);
|
||||
EXPORT_SYMBOL(dump_fpu);
|
||||
EXPORT_SYMBOL(empty_zero_page);
|
||||
|
||||
/*
|
||||
* misc.
|
||||
*/
|
||||
EXPORT_SYMBOL(machine_flags);
|
||||
EXPORT_SYMBOL(__udelay);
|
||||
EXPORT_SYMBOL(kernel_thread);
|
||||
EXPORT_SYMBOL(csum_fold);
|
||||
EXPORT_SYMBOL(console_mode);
|
||||
EXPORT_SYMBOL(console_devno);
|
||||
EXPORT_SYMBOL(console_irq);
|
||||
108
arch/s390/kernel/semaphore.c
Normal file
108
arch/s390/kernel/semaphore.c
Normal file
@@ -0,0 +1,108 @@
|
||||
/*
|
||||
* linux/arch/s390/kernel/semaphore.c
|
||||
*
|
||||
* S390 version
|
||||
* Copyright (C) 1998-2000 IBM Corporation
|
||||
* Author(s): Martin Schwidefsky
|
||||
*
|
||||
* Derived from "linux/arch/i386/kernel/semaphore.c
|
||||
* Copyright (C) 1999, Linus Torvalds
|
||||
*
|
||||
*/
|
||||
#include <linux/sched.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
#include <asm/semaphore.h>
|
||||
|
||||
/*
|
||||
* Atomically update sem->count. Equivalent to:
|
||||
* old_val = sem->count.counter;
|
||||
* new_val = ((old_val >= 0) ? old_val : 0) + incr;
|
||||
* sem->count.counter = new_val;
|
||||
* return old_val;
|
||||
*/
|
||||
static inline int __sem_update_count(struct semaphore *sem, int incr)
|
||||
{
|
||||
int old_val, new_val;
|
||||
|
||||
asm volatile(
|
||||
" l %0,0(%3)\n"
|
||||
"0: ltr %1,%0\n"
|
||||
" jhe 1f\n"
|
||||
" lhi %1,0\n"
|
||||
"1: ar %1,%4\n"
|
||||
" cs %0,%1,0(%3)\n"
|
||||
" jl 0b\n"
|
||||
: "=&d" (old_val), "=&d" (new_val), "=m" (sem->count)
|
||||
: "a" (&sem->count), "d" (incr), "m" (sem->count)
|
||||
: "cc");
|
||||
return old_val;
|
||||
}
|
||||
|
||||
/*
|
||||
* The inline function up() incremented count but the result
|
||||
* was <= 0. This indicates that some process is waiting on
|
||||
* the semaphore. The semaphore is free and we'll wake the
|
||||
* first sleeping process, so we set count to 1 unless some
|
||||
* other cpu has called up in the meantime in which case
|
||||
* we just increment count by 1.
|
||||
*/
|
||||
void __up(struct semaphore *sem)
|
||||
{
|
||||
__sem_update_count(sem, 1);
|
||||
wake_up(&sem->wait);
|
||||
}
|
||||
|
||||
/*
|
||||
* The inline function down() decremented count and the result
|
||||
* was < 0. The wait loop will atomically test and update the
|
||||
* semaphore counter following the rules:
|
||||
* count > 0: decrement count, wake up queue and exit.
|
||||
* count <= 0: set count to -1, go to sleep.
|
||||
*/
|
||||
void __sched __down(struct semaphore * sem)
|
||||
{
|
||||
struct task_struct *tsk = current;
|
||||
DECLARE_WAITQUEUE(wait, tsk);
|
||||
|
||||
__set_task_state(tsk, TASK_UNINTERRUPTIBLE);
|
||||
add_wait_queue_exclusive(&sem->wait, &wait);
|
||||
while (__sem_update_count(sem, -1) <= 0) {
|
||||
schedule();
|
||||
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
remove_wait_queue(&sem->wait, &wait);
|
||||
__set_task_state(tsk, TASK_RUNNING);
|
||||
wake_up(&sem->wait);
|
||||
}
|
||||
|
||||
/*
|
||||
* Same as __down() with an additional test for signals.
|
||||
* If a signal is pending the count is updated as follows:
|
||||
* count > 0: wake up queue and exit.
|
||||
* count <= 0: set count to 0, wake up queue and exit.
|
||||
*/
|
||||
int __sched __down_interruptible(struct semaphore * sem)
|
||||
{
|
||||
int retval = 0;
|
||||
struct task_struct *tsk = current;
|
||||
DECLARE_WAITQUEUE(wait, tsk);
|
||||
|
||||
__set_task_state(tsk, TASK_INTERRUPTIBLE);
|
||||
add_wait_queue_exclusive(&sem->wait, &wait);
|
||||
while (__sem_update_count(sem, -1) <= 0) {
|
||||
if (signal_pending(current)) {
|
||||
__sem_update_count(sem, 0);
|
||||
retval = -EINTR;
|
||||
break;
|
||||
}
|
||||
schedule();
|
||||
set_task_state(tsk, TASK_INTERRUPTIBLE);
|
||||
}
|
||||
remove_wait_queue(&sem->wait, &wait);
|
||||
__set_task_state(tsk, TASK_RUNNING);
|
||||
wake_up(&sem->wait);
|
||||
return retval;
|
||||
}
|
||||
|
||||
861
arch/s390/kernel/setup.c
Normal file
861
arch/s390/kernel/setup.c
Normal file
@@ -0,0 +1,861 @@
|
||||
/*
|
||||
* arch/s390/kernel/setup.c
|
||||
*
|
||||
* S390 version
|
||||
* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
|
||||
* Author(s): Hartmut Penner (hp@de.ibm.com),
|
||||
* Martin Schwidefsky (schwidefsky@de.ibm.com)
|
||||
*
|
||||
* Derived from "arch/i386/kernel/setup.c"
|
||||
* Copyright (C) 1995, Linus Torvalds
|
||||
*/
|
||||
|
||||
/*
|
||||
* This file handles the architecture-dependent parts of initialization
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/unistd.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/user.h>
|
||||
#include <linux/a.out.h>
|
||||
#include <linux/tty.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/initrd.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/root_dev.h>
|
||||
#include <linux/console.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/pfn.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/reboot.h>
|
||||
|
||||
#include <asm/ipl.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/system.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/mmu_context.h>
|
||||
#include <asm/cpcmd.h>
|
||||
#include <asm/lowcore.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/ebcdic.h>
|
||||
#include <asm/compat.h>
|
||||
|
||||
long psw_kernel_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY |
|
||||
PSW_MASK_MCHECK | PSW_DEFAULT_KEY);
|
||||
long psw_user_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
|
||||
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
|
||||
PSW_MASK_PSTATE | PSW_DEFAULT_KEY);
|
||||
|
||||
/*
|
||||
* User copy operations.
|
||||
*/
|
||||
struct uaccess_ops uaccess;
|
||||
EXPORT_SYMBOL_GPL(uaccess);
|
||||
|
||||
/*
|
||||
* Machine setup..
|
||||
*/
|
||||
unsigned int console_mode = 0;
|
||||
unsigned int console_devno = -1;
|
||||
unsigned int console_irq = -1;
|
||||
unsigned long machine_flags = 0;
|
||||
|
||||
struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS];
|
||||
volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
|
||||
static unsigned long __initdata memory_end;
|
||||
|
||||
/*
|
||||
* This is set up by the setup-routine at boot-time
|
||||
* for S390 need to find out, what we have to setup
|
||||
* using address 0x10400 ...
|
||||
*/
|
||||
|
||||
#include <asm/setup.h>
|
||||
|
||||
static struct resource code_resource = {
|
||||
.name = "Kernel code",
|
||||
.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
|
||||
};
|
||||
|
||||
static struct resource data_resource = {
|
||||
.name = "Kernel data",
|
||||
.flags = IORESOURCE_BUSY | IORESOURCE_MEM,
|
||||
};
|
||||
|
||||
/*
|
||||
* cpu_init() initializes state that is per-CPU.
|
||||
*/
|
||||
void __devinit cpu_init (void)
|
||||
{
|
||||
int addr = hard_smp_processor_id();
|
||||
|
||||
/*
|
||||
* Store processor id in lowcore (used e.g. in timer_interrupt)
|
||||
*/
|
||||
get_cpu_id(&S390_lowcore.cpu_data.cpu_id);
|
||||
S390_lowcore.cpu_data.cpu_addr = addr;
|
||||
|
||||
/*
|
||||
* Force FPU initialization:
|
||||
*/
|
||||
clear_thread_flag(TIF_USEDFPU);
|
||||
clear_used_math();
|
||||
|
||||
atomic_inc(&init_mm.mm_count);
|
||||
current->active_mm = &init_mm;
|
||||
if (current->mm)
|
||||
BUG();
|
||||
enter_lazy_tlb(&init_mm, current);
|
||||
}
|
||||
|
||||
/*
|
||||
* VM halt and poweroff setup routines
|
||||
*/
|
||||
char vmhalt_cmd[128] = "";
|
||||
char vmpoff_cmd[128] = "";
|
||||
static char vmpanic_cmd[128] = "";
|
||||
|
||||
static void strncpy_skip_quote(char *dst, char *src, int n)
|
||||
{
|
||||
int sx, dx;
|
||||
|
||||
dx = 0;
|
||||
for (sx = 0; src[sx] != 0; sx++) {
|
||||
if (src[sx] == '"') continue;
|
||||
dst[dx++] = src[sx];
|
||||
if (dx >= n) break;
|
||||
}
|
||||
}
|
||||
|
||||
static int __init vmhalt_setup(char *str)
|
||||
{
|
||||
strncpy_skip_quote(vmhalt_cmd, str, 127);
|
||||
vmhalt_cmd[127] = 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
__setup("vmhalt=", vmhalt_setup);
|
||||
|
||||
static int __init vmpoff_setup(char *str)
|
||||
{
|
||||
strncpy_skip_quote(vmpoff_cmd, str, 127);
|
||||
vmpoff_cmd[127] = 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
__setup("vmpoff=", vmpoff_setup);
|
||||
|
||||
static int vmpanic_notify(struct notifier_block *self, unsigned long event,
|
||||
void *data)
|
||||
{
|
||||
if (MACHINE_IS_VM && strlen(vmpanic_cmd) > 0)
|
||||
cpcmd(vmpanic_cmd, NULL, 0, NULL);
|
||||
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
#define PANIC_PRI_VMPANIC 0
|
||||
|
||||
static struct notifier_block vmpanic_nb = {
|
||||
.notifier_call = vmpanic_notify,
|
||||
.priority = PANIC_PRI_VMPANIC
|
||||
};
|
||||
|
||||
static int __init vmpanic_setup(char *str)
|
||||
{
|
||||
static int register_done __initdata = 0;
|
||||
|
||||
strncpy_skip_quote(vmpanic_cmd, str, 127);
|
||||
vmpanic_cmd[127] = 0;
|
||||
if (!register_done) {
|
||||
register_done = 1;
|
||||
atomic_notifier_chain_register(&panic_notifier_list,
|
||||
&vmpanic_nb);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
__setup("vmpanic=", vmpanic_setup);
|
||||
|
||||
/*
|
||||
* condev= and conmode= setup parameter.
|
||||
*/
|
||||
|
||||
static int __init condev_setup(char *str)
|
||||
{
|
||||
int vdev;
|
||||
|
||||
vdev = simple_strtoul(str, &str, 0);
|
||||
if (vdev >= 0 && vdev < 65536) {
|
||||
console_devno = vdev;
|
||||
console_irq = -1;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
__setup("condev=", condev_setup);
|
||||
|
||||
static int __init conmode_setup(char *str)
|
||||
{
|
||||
#if defined(CONFIG_SCLP_CONSOLE)
|
||||
if (strncmp(str, "hwc", 4) == 0 || strncmp(str, "sclp", 5) == 0)
|
||||
SET_CONSOLE_SCLP;
|
||||
#endif
|
||||
#if defined(CONFIG_TN3215_CONSOLE)
|
||||
if (strncmp(str, "3215", 5) == 0)
|
||||
SET_CONSOLE_3215;
|
||||
#endif
|
||||
#if defined(CONFIG_TN3270_CONSOLE)
|
||||
if (strncmp(str, "3270", 5) == 0)
|
||||
SET_CONSOLE_3270;
|
||||
#endif
|
||||
return 1;
|
||||
}
|
||||
|
||||
__setup("conmode=", conmode_setup);
|
||||
|
||||
static void __init conmode_default(void)
|
||||
{
|
||||
char query_buffer[1024];
|
||||
char *ptr;
|
||||
|
||||
if (MACHINE_IS_VM) {
|
||||
cpcmd("QUERY CONSOLE", query_buffer, 1024, NULL);
|
||||
console_devno = simple_strtoul(query_buffer + 5, NULL, 16);
|
||||
ptr = strstr(query_buffer, "SUBCHANNEL =");
|
||||
console_irq = simple_strtoul(ptr + 13, NULL, 16);
|
||||
cpcmd("QUERY TERM", query_buffer, 1024, NULL);
|
||||
ptr = strstr(query_buffer, "CONMODE");
|
||||
/*
|
||||
* Set the conmode to 3215 so that the device recognition
|
||||
* will set the cu_type of the console to 3215. If the
|
||||
* conmode is 3270 and we don't set it back then both
|
||||
* 3215 and the 3270 driver will try to access the console
|
||||
* device (3215 as console and 3270 as normal tty).
|
||||
*/
|
||||
cpcmd("TERM CONMODE 3215", NULL, 0, NULL);
|
||||
if (ptr == NULL) {
|
||||
#if defined(CONFIG_SCLP_CONSOLE)
|
||||
SET_CONSOLE_SCLP;
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
if (strncmp(ptr + 8, "3270", 4) == 0) {
|
||||
#if defined(CONFIG_TN3270_CONSOLE)
|
||||
SET_CONSOLE_3270;
|
||||
#elif defined(CONFIG_TN3215_CONSOLE)
|
||||
SET_CONSOLE_3215;
|
||||
#elif defined(CONFIG_SCLP_CONSOLE)
|
||||
SET_CONSOLE_SCLP;
|
||||
#endif
|
||||
} else if (strncmp(ptr + 8, "3215", 4) == 0) {
|
||||
#if defined(CONFIG_TN3215_CONSOLE)
|
||||
SET_CONSOLE_3215;
|
||||
#elif defined(CONFIG_TN3270_CONSOLE)
|
||||
SET_CONSOLE_3270;
|
||||
#elif defined(CONFIG_SCLP_CONSOLE)
|
||||
SET_CONSOLE_SCLP;
|
||||
#endif
|
||||
}
|
||||
} else if (MACHINE_IS_P390) {
|
||||
#if defined(CONFIG_TN3215_CONSOLE)
|
||||
SET_CONSOLE_3215;
|
||||
#elif defined(CONFIG_TN3270_CONSOLE)
|
||||
SET_CONSOLE_3270;
|
||||
#endif
|
||||
} else {
|
||||
#if defined(CONFIG_SCLP_CONSOLE)
|
||||
SET_CONSOLE_SCLP;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
void (*_machine_restart)(char *command) = machine_restart_smp;
|
||||
void (*_machine_halt)(void) = machine_halt_smp;
|
||||
void (*_machine_power_off)(void) = machine_power_off_smp;
|
||||
#else
|
||||
/*
|
||||
* Reboot, halt and power_off routines for non SMP.
|
||||
*/
|
||||
static void do_machine_restart_nonsmp(char * __unused)
|
||||
{
|
||||
do_reipl();
|
||||
}
|
||||
|
||||
static void do_machine_halt_nonsmp(void)
|
||||
{
|
||||
if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
|
||||
__cpcmd(vmhalt_cmd, NULL, 0, NULL);
|
||||
signal_processor(smp_processor_id(), sigp_stop_and_store_status);
|
||||
}
|
||||
|
||||
static void do_machine_power_off_nonsmp(void)
|
||||
{
|
||||
if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
|
||||
__cpcmd(vmpoff_cmd, NULL, 0, NULL);
|
||||
signal_processor(smp_processor_id(), sigp_stop_and_store_status);
|
||||
}
|
||||
|
||||
void (*_machine_restart)(char *command) = do_machine_restart_nonsmp;
|
||||
void (*_machine_halt)(void) = do_machine_halt_nonsmp;
|
||||
void (*_machine_power_off)(void) = do_machine_power_off_nonsmp;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Reboot, halt and power_off stubs. They just call _machine_restart,
|
||||
* _machine_halt or _machine_power_off.
|
||||
*/
|
||||
|
||||
void machine_restart(char *command)
|
||||
{
|
||||
if (!in_interrupt() || oops_in_progress)
|
||||
/*
|
||||
* Only unblank the console if we are called in enabled
|
||||
* context or a bust_spinlocks cleared the way for us.
|
||||
*/
|
||||
console_unblank();
|
||||
_machine_restart(command);
|
||||
}
|
||||
|
||||
void machine_halt(void)
|
||||
{
|
||||
if (!in_interrupt() || oops_in_progress)
|
||||
/*
|
||||
* Only unblank the console if we are called in enabled
|
||||
* context or a bust_spinlocks cleared the way for us.
|
||||
*/
|
||||
console_unblank();
|
||||
_machine_halt();
|
||||
}
|
||||
|
||||
void machine_power_off(void)
|
||||
{
|
||||
if (!in_interrupt() || oops_in_progress)
|
||||
/*
|
||||
* Only unblank the console if we are called in enabled
|
||||
* context or a bust_spinlocks cleared the way for us.
|
||||
*/
|
||||
console_unblank();
|
||||
_machine_power_off();
|
||||
}
|
||||
|
||||
/*
|
||||
* Dummy power off function.
|
||||
*/
|
||||
void (*pm_power_off)(void) = machine_power_off;
|
||||
|
||||
static int __init early_parse_mem(char *p)
|
||||
{
|
||||
memory_end = memparse(p, &p);
|
||||
return 0;
|
||||
}
|
||||
early_param("mem", early_parse_mem);
|
||||
|
||||
/*
|
||||
* "ipldelay=XXX[sm]" sets ipl delay in seconds or minutes
|
||||
*/
|
||||
static int __init early_parse_ipldelay(char *p)
|
||||
{
|
||||
unsigned long delay = 0;
|
||||
|
||||
delay = simple_strtoul(p, &p, 0);
|
||||
|
||||
switch (*p) {
|
||||
case 's':
|
||||
case 'S':
|
||||
delay *= 1000000;
|
||||
break;
|
||||
case 'm':
|
||||
case 'M':
|
||||
delay *= 60 * 1000000;
|
||||
}
|
||||
|
||||
/* now wait for the requested amount of time */
|
||||
udelay(delay);
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("ipldelay", early_parse_ipldelay);
|
||||
|
||||
#ifdef CONFIG_S390_SWITCH_AMODE
|
||||
unsigned int switch_amode = 0;
|
||||
EXPORT_SYMBOL_GPL(switch_amode);
|
||||
|
||||
static void set_amode_and_uaccess(unsigned long user_amode,
|
||||
unsigned long user32_amode)
|
||||
{
|
||||
psw_user_bits = PSW_BASE_BITS | PSW_MASK_DAT | user_amode |
|
||||
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
|
||||
PSW_MASK_PSTATE | PSW_DEFAULT_KEY;
|
||||
#ifdef CONFIG_COMPAT
|
||||
psw_user32_bits = PSW_BASE32_BITS | PSW_MASK_DAT | user_amode |
|
||||
PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
|
||||
PSW_MASK_PSTATE | PSW_DEFAULT_KEY;
|
||||
psw32_user_bits = PSW32_BASE_BITS | PSW32_MASK_DAT | user32_amode |
|
||||
PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK |
|
||||
PSW32_MASK_PSTATE;
|
||||
#endif
|
||||
psw_kernel_bits = PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
|
||||
PSW_MASK_MCHECK | PSW_DEFAULT_KEY;
|
||||
|
||||
if (MACHINE_HAS_MVCOS) {
|
||||
printk("mvcos available.\n");
|
||||
memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess));
|
||||
} else {
|
||||
printk("mvcos not available.\n");
|
||||
memcpy(&uaccess, &uaccess_pt, sizeof(uaccess));
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Switch kernel/user addressing modes?
|
||||
*/
|
||||
static int __init early_parse_switch_amode(char *p)
|
||||
{
|
||||
switch_amode = 1;
|
||||
return 0;
|
||||
}
|
||||
early_param("switch_amode", early_parse_switch_amode);
|
||||
|
||||
#else /* CONFIG_S390_SWITCH_AMODE */
|
||||
static inline void set_amode_and_uaccess(unsigned long user_amode,
|
||||
unsigned long user32_amode)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_S390_SWITCH_AMODE */
|
||||
|
||||
#ifdef CONFIG_S390_EXEC_PROTECT
|
||||
unsigned int s390_noexec = 0;
|
||||
EXPORT_SYMBOL_GPL(s390_noexec);
|
||||
|
||||
/*
|
||||
* Enable execute protection?
|
||||
*/
|
||||
static int __init early_parse_noexec(char *p)
|
||||
{
|
||||
if (!strncmp(p, "off", 3))
|
||||
return 0;
|
||||
switch_amode = 1;
|
||||
s390_noexec = 1;
|
||||
return 0;
|
||||
}
|
||||
early_param("noexec", early_parse_noexec);
|
||||
#endif /* CONFIG_S390_EXEC_PROTECT */
|
||||
|
||||
static void setup_addressing_mode(void)
|
||||
{
|
||||
if (s390_noexec) {
|
||||
printk("S390 execute protection active, ");
|
||||
set_amode_and_uaccess(PSW_ASC_SECONDARY, PSW32_ASC_SECONDARY);
|
||||
return;
|
||||
}
|
||||
if (switch_amode) {
|
||||
printk("S390 address spaces switched, ");
|
||||
set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY);
|
||||
}
|
||||
}
|
||||
|
||||
static void __init
|
||||
setup_lowcore(void)
|
||||
{
|
||||
struct _lowcore *lc;
|
||||
int lc_pages;
|
||||
|
||||
/*
|
||||
* Setup lowcore for boot cpu
|
||||
*/
|
||||
lc_pages = sizeof(void *) == 8 ? 2 : 1;
|
||||
lc = (struct _lowcore *)
|
||||
__alloc_bootmem(lc_pages * PAGE_SIZE, lc_pages * PAGE_SIZE, 0);
|
||||
memset(lc, 0, lc_pages * PAGE_SIZE);
|
||||
lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
|
||||
lc->restart_psw.addr =
|
||||
PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
|
||||
if (switch_amode)
|
||||
lc->restart_psw.mask |= PSW_ASC_HOME;
|
||||
lc->external_new_psw.mask = psw_kernel_bits;
|
||||
lc->external_new_psw.addr =
|
||||
PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
|
||||
lc->svc_new_psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT;
|
||||
lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
|
||||
lc->program_new_psw.mask = psw_kernel_bits;
|
||||
lc->program_new_psw.addr =
|
||||
PSW_ADDR_AMODE | (unsigned long)pgm_check_handler;
|
||||
lc->mcck_new_psw.mask =
|
||||
psw_kernel_bits & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT;
|
||||
lc->mcck_new_psw.addr =
|
||||
PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
|
||||
lc->io_new_psw.mask = psw_kernel_bits;
|
||||
lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
|
||||
lc->ipl_device = S390_lowcore.ipl_device;
|
||||
lc->jiffy_timer = -1LL;
|
||||
lc->kernel_stack = ((unsigned long) &init_thread_union) + THREAD_SIZE;
|
||||
lc->async_stack = (unsigned long)
|
||||
__alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0) + ASYNC_SIZE;
|
||||
lc->panic_stack = (unsigned long)
|
||||
__alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0) + PAGE_SIZE;
|
||||
lc->current_task = (unsigned long) init_thread_union.thread_info.task;
|
||||
lc->thread_info = (unsigned long) &init_thread_union;
|
||||
#ifndef CONFIG_64BIT
|
||||
if (MACHINE_HAS_IEEE) {
|
||||
lc->extended_save_area_addr = (__u32)
|
||||
__alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0);
|
||||
/* enable extended save area */
|
||||
__ctl_set_bit(14, 29);
|
||||
}
|
||||
#endif
|
||||
set_prefix((u32)(unsigned long) lc);
|
||||
}
|
||||
|
||||
static void __init
|
||||
setup_resources(void)
|
||||
{
|
||||
struct resource *res, *sub_res;
|
||||
int i;
|
||||
|
||||
code_resource.start = (unsigned long) &_text;
|
||||
code_resource.end = (unsigned long) &_etext - 1;
|
||||
data_resource.start = (unsigned long) &_etext;
|
||||
data_resource.end = (unsigned long) &_edata - 1;
|
||||
|
||||
for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
|
||||
res = alloc_bootmem_low(sizeof(struct resource));
|
||||
res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
|
||||
switch (memory_chunk[i].type) {
|
||||
case CHUNK_READ_WRITE:
|
||||
res->name = "System RAM";
|
||||
break;
|
||||
case CHUNK_READ_ONLY:
|
||||
res->name = "System ROM";
|
||||
res->flags |= IORESOURCE_READONLY;
|
||||
break;
|
||||
default:
|
||||
res->name = "reserved";
|
||||
}
|
||||
res->start = memory_chunk[i].addr;
|
||||
res->end = memory_chunk[i].addr + memory_chunk[i].size - 1;
|
||||
request_resource(&iomem_resource, res);
|
||||
|
||||
if (code_resource.start >= res->start &&
|
||||
code_resource.start <= res->end &&
|
||||
code_resource.end > res->end) {
|
||||
sub_res = alloc_bootmem_low(sizeof(struct resource));
|
||||
memcpy(sub_res, &code_resource,
|
||||
sizeof(struct resource));
|
||||
sub_res->end = res->end;
|
||||
code_resource.start = res->end + 1;
|
||||
request_resource(res, sub_res);
|
||||
}
|
||||
|
||||
if (code_resource.start >= res->start &&
|
||||
code_resource.start <= res->end &&
|
||||
code_resource.end <= res->end)
|
||||
request_resource(res, &code_resource);
|
||||
|
||||
if (data_resource.start >= res->start &&
|
||||
data_resource.start <= res->end &&
|
||||
data_resource.end > res->end) {
|
||||
sub_res = alloc_bootmem_low(sizeof(struct resource));
|
||||
memcpy(sub_res, &data_resource,
|
||||
sizeof(struct resource));
|
||||
sub_res->end = res->end;
|
||||
data_resource.start = res->end + 1;
|
||||
request_resource(res, sub_res);
|
||||
}
|
||||
|
||||
if (data_resource.start >= res->start &&
|
||||
data_resource.start <= res->end &&
|
||||
data_resource.end <= res->end)
|
||||
request_resource(res, &data_resource);
|
||||
}
|
||||
}
|
||||
|
||||
static void __init setup_memory_end(void)
|
||||
{
|
||||
unsigned long real_size, memory_size;
|
||||
unsigned long max_mem, max_phys;
|
||||
int i;
|
||||
|
||||
memory_size = real_size = 0;
|
||||
max_phys = VMALLOC_END_INIT - VMALLOC_MIN_SIZE;
|
||||
memory_end &= PAGE_MASK;
|
||||
|
||||
max_mem = memory_end ? min(max_phys, memory_end) : max_phys;
|
||||
|
||||
for (i = 0; i < MEMORY_CHUNKS; i++) {
|
||||
struct mem_chunk *chunk = &memory_chunk[i];
|
||||
|
||||
real_size = max(real_size, chunk->addr + chunk->size);
|
||||
if (chunk->addr >= max_mem) {
|
||||
memset(chunk, 0, sizeof(*chunk));
|
||||
continue;
|
||||
}
|
||||
if (chunk->addr + chunk->size > max_mem)
|
||||
chunk->size = max_mem - chunk->addr;
|
||||
memory_size = max(memory_size, chunk->addr + chunk->size);
|
||||
}
|
||||
if (!memory_end)
|
||||
memory_end = memory_size;
|
||||
}
|
||||
|
||||
static void __init
|
||||
setup_memory(void)
|
||||
{
|
||||
unsigned long bootmap_size;
|
||||
unsigned long start_pfn, end_pfn;
|
||||
int i;
|
||||
|
||||
/*
|
||||
* partially used pages are not usable - thus
|
||||
* we are rounding upwards:
|
||||
*/
|
||||
start_pfn = PFN_UP(__pa(&_end));
|
||||
end_pfn = max_pfn = PFN_DOWN(memory_end);
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
/*
|
||||
* Move the initrd in case the bitmap of the bootmem allocater
|
||||
* would overwrite it.
|
||||
*/
|
||||
|
||||
if (INITRD_START && INITRD_SIZE) {
|
||||
unsigned long bmap_size;
|
||||
unsigned long start;
|
||||
|
||||
bmap_size = bootmem_bootmap_pages(end_pfn - start_pfn + 1);
|
||||
bmap_size = PFN_PHYS(bmap_size);
|
||||
|
||||
if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) {
|
||||
start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE;
|
||||
|
||||
if (start + INITRD_SIZE > memory_end) {
|
||||
printk("initrd extends beyond end of memory "
|
||||
"(0x%08lx > 0x%08lx)\n"
|
||||
"disabling initrd\n",
|
||||
start + INITRD_SIZE, memory_end);
|
||||
INITRD_START = INITRD_SIZE = 0;
|
||||
} else {
|
||||
printk("Moving initrd (0x%08lx -> 0x%08lx, "
|
||||
"size: %ld)\n",
|
||||
INITRD_START, start, INITRD_SIZE);
|
||||
memmove((void *) start, (void *) INITRD_START,
|
||||
INITRD_SIZE);
|
||||
INITRD_START = start;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Initialize the boot-time allocator
|
||||
*/
|
||||
bootmap_size = init_bootmem(start_pfn, end_pfn);
|
||||
|
||||
/*
|
||||
* Register RAM areas with the bootmem allocator.
|
||||
*/
|
||||
|
||||
for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
|
||||
unsigned long start_chunk, end_chunk, pfn;
|
||||
|
||||
if (memory_chunk[i].type != CHUNK_READ_WRITE)
|
||||
continue;
|
||||
start_chunk = PFN_DOWN(memory_chunk[i].addr);
|
||||
end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size) - 1;
|
||||
end_chunk = min(end_chunk, end_pfn);
|
||||
if (start_chunk >= end_chunk)
|
||||
continue;
|
||||
add_active_range(0, start_chunk, end_chunk);
|
||||
pfn = max(start_chunk, start_pfn);
|
||||
for (; pfn <= end_chunk; pfn++)
|
||||
page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY);
|
||||
}
|
||||
|
||||
psw_set_key(PAGE_DEFAULT_KEY);
|
||||
|
||||
free_bootmem_with_active_regions(0, max_pfn);
|
||||
|
||||
/*
|
||||
* Reserve memory used for lowcore/command line/kernel image.
|
||||
*/
|
||||
reserve_bootmem(0, (unsigned long)_ehead);
|
||||
reserve_bootmem((unsigned long)_stext,
|
||||
PFN_PHYS(start_pfn) - (unsigned long)_stext);
|
||||
/*
|
||||
* Reserve the bootmem bitmap itself as well. We do this in two
|
||||
* steps (first step was init_bootmem()) because this catches
|
||||
* the (very unlikely) case of us accidentally initializing the
|
||||
* bootmem allocator with an invalid RAM area.
|
||||
*/
|
||||
reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size);
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
if (INITRD_START && INITRD_SIZE) {
|
||||
if (INITRD_START + INITRD_SIZE <= memory_end) {
|
||||
reserve_bootmem(INITRD_START, INITRD_SIZE);
|
||||
initrd_start = INITRD_START;
|
||||
initrd_end = initrd_start + INITRD_SIZE;
|
||||
} else {
|
||||
printk("initrd extends beyond end of memory "
|
||||
"(0x%08lx > 0x%08lx)\ndisabling initrd\n",
|
||||
initrd_start + INITRD_SIZE, memory_end);
|
||||
initrd_start = initrd_end = 0;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
* Setup function called from init/main.c just after the banner
|
||||
* was printed.
|
||||
*/
|
||||
|
||||
void __init
|
||||
setup_arch(char **cmdline_p)
|
||||
{
|
||||
/*
|
||||
* print what head.S has found out about the machine
|
||||
*/
|
||||
#ifndef CONFIG_64BIT
|
||||
printk((MACHINE_IS_VM) ?
|
||||
"We are running under VM (31 bit mode)\n" :
|
||||
"We are running native (31 bit mode)\n");
|
||||
printk((MACHINE_HAS_IEEE) ?
|
||||
"This machine has an IEEE fpu\n" :
|
||||
"This machine has no IEEE fpu\n");
|
||||
#else /* CONFIG_64BIT */
|
||||
printk((MACHINE_IS_VM) ?
|
||||
"We are running under VM (64 bit mode)\n" :
|
||||
"We are running native (64 bit mode)\n");
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
/* Save unparsed command line copy for /proc/cmdline */
|
||||
strlcpy(boot_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
|
||||
|
||||
*cmdline_p = COMMAND_LINE;
|
||||
*(*cmdline_p + COMMAND_LINE_SIZE - 1) = '\0';
|
||||
|
||||
ROOT_DEV = Root_RAM0;
|
||||
|
||||
init_mm.start_code = PAGE_OFFSET;
|
||||
init_mm.end_code = (unsigned long) &_etext;
|
||||
init_mm.end_data = (unsigned long) &_edata;
|
||||
init_mm.brk = (unsigned long) &_end;
|
||||
|
||||
if (MACHINE_HAS_MVCOS)
|
||||
memcpy(&uaccess, &uaccess_mvcos, sizeof(uaccess));
|
||||
else
|
||||
memcpy(&uaccess, &uaccess_std, sizeof(uaccess));
|
||||
|
||||
parse_early_param();
|
||||
|
||||
setup_memory_end();
|
||||
setup_addressing_mode();
|
||||
setup_memory();
|
||||
setup_resources();
|
||||
setup_lowcore();
|
||||
|
||||
cpu_init();
|
||||
__cpu_logical_map[0] = S390_lowcore.cpu_data.cpu_addr;
|
||||
smp_setup_cpu_possible_map();
|
||||
|
||||
/*
|
||||
* Create kernel page tables and switch to virtual addressing.
|
||||
*/
|
||||
paging_init();
|
||||
|
||||
/* Setup default console */
|
||||
conmode_default();
|
||||
}
|
||||
|
||||
void print_cpu_info(struct cpuinfo_S390 *cpuinfo)
|
||||
{
|
||||
printk("cpu %d "
|
||||
#ifdef CONFIG_SMP
|
||||
"phys_idx=%d "
|
||||
#endif
|
||||
"vers=%02X ident=%06X machine=%04X unused=%04X\n",
|
||||
cpuinfo->cpu_nr,
|
||||
#ifdef CONFIG_SMP
|
||||
cpuinfo->cpu_addr,
|
||||
#endif
|
||||
cpuinfo->cpu_id.version,
|
||||
cpuinfo->cpu_id.ident,
|
||||
cpuinfo->cpu_id.machine,
|
||||
cpuinfo->cpu_id.unused);
|
||||
}
|
||||
|
||||
/*
|
||||
* show_cpuinfo - Get information on one CPU for use by procfs.
|
||||
*/
|
||||
|
||||
static int show_cpuinfo(struct seq_file *m, void *v)
|
||||
{
|
||||
struct cpuinfo_S390 *cpuinfo;
|
||||
unsigned long n = (unsigned long) v - 1;
|
||||
|
||||
s390_adjust_jiffies();
|
||||
preempt_disable();
|
||||
if (!n) {
|
||||
seq_printf(m, "vendor_id : IBM/S390\n"
|
||||
"# processors : %i\n"
|
||||
"bogomips per cpu: %lu.%02lu\n",
|
||||
num_online_cpus(), loops_per_jiffy/(500000/HZ),
|
||||
(loops_per_jiffy/(5000/HZ))%100);
|
||||
}
|
||||
if (cpu_online(n)) {
|
||||
#ifdef CONFIG_SMP
|
||||
if (smp_processor_id() == n)
|
||||
cpuinfo = &S390_lowcore.cpu_data;
|
||||
else
|
||||
cpuinfo = &lowcore_ptr[n]->cpu_data;
|
||||
#else
|
||||
cpuinfo = &S390_lowcore.cpu_data;
|
||||
#endif
|
||||
seq_printf(m, "processor %li: "
|
||||
"version = %02X, "
|
||||
"identification = %06X, "
|
||||
"machine = %04X\n",
|
||||
n, cpuinfo->cpu_id.version,
|
||||
cpuinfo->cpu_id.ident,
|
||||
cpuinfo->cpu_id.machine);
|
||||
}
|
||||
preempt_enable();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *c_start(struct seq_file *m, loff_t *pos)
|
||||
{
|
||||
return *pos < NR_CPUS ? (void *)((unsigned long) *pos + 1) : NULL;
|
||||
}
|
||||
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
|
||||
{
|
||||
++*pos;
|
||||
return c_start(m, pos);
|
||||
}
|
||||
static void c_stop(struct seq_file *m, void *v)
|
||||
{
|
||||
}
|
||||
struct seq_operations cpuinfo_op = {
|
||||
.start = c_start,
|
||||
.next = c_next,
|
||||
.stop = c_stop,
|
||||
.show = show_cpuinfo,
|
||||
};
|
||||
|
||||
515
arch/s390/kernel/signal.c
Normal file
515
arch/s390/kernel/signal.c
Normal file
@@ -0,0 +1,515 @@
|
||||
/*
|
||||
* arch/s390/kernel/signal.c
|
||||
*
|
||||
* Copyright (C) IBM Corp. 1999,2006
|
||||
* Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
|
||||
*
|
||||
* Based on Intel version
|
||||
*
|
||||
* Copyright (C) 1991, 1992 Linus Torvalds
|
||||
*
|
||||
* 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/signal.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/unistd.h>
|
||||
#include <linux/stddef.h>
|
||||
#include <linux/tty.h>
|
||||
#include <linux/personality.h>
|
||||
#include <linux/binfmts.h>
|
||||
#include <asm/ucontext.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/lowcore.h>
|
||||
|
||||
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
|
||||
|
||||
|
||||
typedef struct
|
||||
{
|
||||
__u8 callee_used_stack[__SIGNAL_FRAMESIZE];
|
||||
struct sigcontext sc;
|
||||
_sigregs sregs;
|
||||
int signo;
|
||||
__u8 retcode[S390_SYSCALL_SIZE];
|
||||
} sigframe;
|
||||
|
||||
typedef struct
|
||||
{
|
||||
__u8 callee_used_stack[__SIGNAL_FRAMESIZE];
|
||||
__u8 retcode[S390_SYSCALL_SIZE];
|
||||
struct siginfo info;
|
||||
struct ucontext uc;
|
||||
} rt_sigframe;
|
||||
|
||||
/*
|
||||
* Atomically swap in the new signal mask, and wait for a signal.
|
||||
*/
|
||||
asmlinkage int
|
||||
sys_sigsuspend(int history0, int history1, old_sigset_t mask)
|
||||
{
|
||||
mask &= _BLOCKABLE;
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
current->saved_sigmask = current->blocked;
|
||||
siginitset(¤t->blocked, mask);
|
||||
recalc_sigpending();
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
|
||||
current->state = TASK_INTERRUPTIBLE;
|
||||
schedule();
|
||||
set_thread_flag(TIF_RESTORE_SIGMASK);
|
||||
|
||||
return -ERESTARTNOHAND;
|
||||
}
|
||||
|
||||
asmlinkage long
|
||||
sys_sigaction(int sig, const struct old_sigaction __user *act,
|
||||
struct old_sigaction __user *oact)
|
||||
{
|
||||
struct k_sigaction new_ka, old_ka;
|
||||
int ret;
|
||||
|
||||
if (act) {
|
||||
old_sigset_t mask;
|
||||
if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
|
||||
__get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
|
||||
__get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
|
||||
__get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
|
||||
__get_user(mask, &act->sa_mask))
|
||||
return -EFAULT;
|
||||
siginitset(&new_ka.sa.sa_mask, mask);
|
||||
}
|
||||
|
||||
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
|
||||
|
||||
if (!ret && oact) {
|
||||
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
|
||||
__put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
|
||||
__put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
|
||||
__put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
|
||||
__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
asmlinkage long
|
||||
sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
return do_sigaltstack(uss, uoss, regs->gprs[15]);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/* Returns non-zero on fault. */
|
||||
static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
|
||||
{
|
||||
_sigregs user_sregs;
|
||||
|
||||
save_access_regs(current->thread.acrs);
|
||||
|
||||
/* Copy a 'clean' PSW mask to the user to avoid leaking
|
||||
information about whether PER is currently on. */
|
||||
user_sregs.regs.psw.mask = PSW_MASK_MERGE(psw_user_bits, regs->psw.mask);
|
||||
user_sregs.regs.psw.addr = regs->psw.addr;
|
||||
memcpy(&user_sregs.regs.gprs, ®s->gprs, sizeof(sregs->regs.gprs));
|
||||
memcpy(&user_sregs.regs.acrs, current->thread.acrs,
|
||||
sizeof(sregs->regs.acrs));
|
||||
/*
|
||||
* We have to store the fp registers to current->thread.fp_regs
|
||||
* to merge them with the emulated registers.
|
||||
*/
|
||||
save_fp_regs(¤t->thread.fp_regs);
|
||||
memcpy(&user_sregs.fpregs, ¤t->thread.fp_regs,
|
||||
sizeof(s390_fp_regs));
|
||||
return __copy_to_user(sregs, &user_sregs, sizeof(_sigregs));
|
||||
}
|
||||
|
||||
/* Returns positive number on error */
|
||||
static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
|
||||
{
|
||||
int err;
|
||||
_sigregs user_sregs;
|
||||
|
||||
/* Alwys make any pending restarted system call return -EINTR */
|
||||
current_thread_info()->restart_block.fn = do_no_restart_syscall;
|
||||
|
||||
err = __copy_from_user(&user_sregs, sregs, sizeof(_sigregs));
|
||||
if (err)
|
||||
return err;
|
||||
regs->psw.mask = PSW_MASK_MERGE(regs->psw.mask,
|
||||
user_sregs.regs.psw.mask);
|
||||
regs->psw.addr = PSW_ADDR_AMODE | user_sregs.regs.psw.addr;
|
||||
memcpy(®s->gprs, &user_sregs.regs.gprs, sizeof(sregs->regs.gprs));
|
||||
memcpy(¤t->thread.acrs, &user_sregs.regs.acrs,
|
||||
sizeof(sregs->regs.acrs));
|
||||
restore_access_regs(current->thread.acrs);
|
||||
|
||||
memcpy(¤t->thread.fp_regs, &user_sregs.fpregs,
|
||||
sizeof(s390_fp_regs));
|
||||
current->thread.fp_regs.fpc &= FPC_VALID_MASK;
|
||||
|
||||
restore_fp_regs(¤t->thread.fp_regs);
|
||||
regs->trap = -1; /* disable syscall checks */
|
||||
return 0;
|
||||
}
|
||||
|
||||
asmlinkage long sys_sigreturn(struct pt_regs *regs)
|
||||
{
|
||||
sigframe __user *frame = (sigframe __user *)regs->gprs[15];
|
||||
sigset_t set;
|
||||
|
||||
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
|
||||
goto badframe;
|
||||
if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE))
|
||||
goto badframe;
|
||||
|
||||
sigdelsetmask(&set, ~_BLOCKABLE);
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
current->blocked = set;
|
||||
recalc_sigpending();
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
|
||||
if (restore_sigregs(regs, &frame->sregs))
|
||||
goto badframe;
|
||||
|
||||
return regs->gprs[2];
|
||||
|
||||
badframe:
|
||||
force_sig(SIGSEGV, current);
|
||||
return 0;
|
||||
}
|
||||
|
||||
asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
|
||||
{
|
||||
rt_sigframe __user *frame = (rt_sigframe __user *)regs->gprs[15];
|
||||
sigset_t set;
|
||||
|
||||
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
|
||||
goto badframe;
|
||||
if (__copy_from_user(&set.sig, &frame->uc.uc_sigmask, sizeof(set)))
|
||||
goto badframe;
|
||||
|
||||
sigdelsetmask(&set, ~_BLOCKABLE);
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
current->blocked = set;
|
||||
recalc_sigpending();
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
|
||||
if (restore_sigregs(regs, &frame->uc.uc_mcontext))
|
||||
goto badframe;
|
||||
|
||||
if (do_sigaltstack(&frame->uc.uc_stack, NULL,
|
||||
regs->gprs[15]) == -EFAULT)
|
||||
goto badframe;
|
||||
return regs->gprs[2];
|
||||
|
||||
badframe:
|
||||
force_sig(SIGSEGV, current);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up a signal frame.
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* Determine which stack to use..
|
||||
*/
|
||||
static inline void __user *
|
||||
get_sigframe(struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
|
||||
{
|
||||
unsigned long sp;
|
||||
|
||||
/* Default to using normal stack */
|
||||
sp = regs->gprs[15];
|
||||
|
||||
/* This is the X/Open sanctioned signal stack switching. */
|
||||
if (ka->sa.sa_flags & SA_ONSTACK) {
|
||||
if (! sas_ss_flags(sp))
|
||||
sp = current->sas_ss_sp + current->sas_ss_size;
|
||||
}
|
||||
|
||||
/* This is the legacy signal stack switching. */
|
||||
else if (!user_mode(regs) &&
|
||||
!(ka->sa.sa_flags & SA_RESTORER) &&
|
||||
ka->sa.sa_restorer) {
|
||||
sp = (unsigned long) ka->sa.sa_restorer;
|
||||
}
|
||||
|
||||
return (void __user *)((sp - frame_size) & -8ul);
|
||||
}
|
||||
|
||||
static inline int map_signal(int sig)
|
||||
{
|
||||
if (current_thread_info()->exec_domain
|
||||
&& current_thread_info()->exec_domain->signal_invmap
|
||||
&& sig < 32)
|
||||
return current_thread_info()->exec_domain->signal_invmap[sig];
|
||||
else
|
||||
return sig;
|
||||
}
|
||||
|
||||
static int setup_frame(int sig, struct k_sigaction *ka,
|
||||
sigset_t *set, struct pt_regs * regs)
|
||||
{
|
||||
sigframe __user *frame;
|
||||
|
||||
frame = get_sigframe(ka, regs, sizeof(sigframe));
|
||||
if (!access_ok(VERIFY_WRITE, frame, sizeof(sigframe)))
|
||||
goto give_sigsegv;
|
||||
|
||||
if (__copy_to_user(&frame->sc.oldmask, &set->sig, _SIGMASK_COPY_SIZE))
|
||||
goto give_sigsegv;
|
||||
|
||||
if (save_sigregs(regs, &frame->sregs))
|
||||
goto give_sigsegv;
|
||||
if (__put_user(&frame->sregs, &frame->sc.sregs))
|
||||
goto give_sigsegv;
|
||||
|
||||
/* Set up to return from userspace. If provided, use a stub
|
||||
already in userspace. */
|
||||
if (ka->sa.sa_flags & SA_RESTORER) {
|
||||
regs->gprs[14] = (unsigned long)
|
||||
ka->sa.sa_restorer | PSW_ADDR_AMODE;
|
||||
} else {
|
||||
regs->gprs[14] = (unsigned long)
|
||||
frame->retcode | PSW_ADDR_AMODE;
|
||||
if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn,
|
||||
(u16 __user *)(frame->retcode)))
|
||||
goto give_sigsegv;
|
||||
}
|
||||
|
||||
/* Set up backchain. */
|
||||
if (__put_user(regs->gprs[15], (addr_t __user *) frame))
|
||||
goto give_sigsegv;
|
||||
|
||||
/* Set up registers for signal handler */
|
||||
regs->gprs[15] = (unsigned long) frame;
|
||||
regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
|
||||
|
||||
regs->gprs[2] = map_signal(sig);
|
||||
regs->gprs[3] = (unsigned long) &frame->sc;
|
||||
|
||||
/* We forgot to include these in the sigcontext.
|
||||
To avoid breaking binary compatibility, they are passed as args. */
|
||||
regs->gprs[4] = current->thread.trap_no;
|
||||
regs->gprs[5] = current->thread.prot_addr;
|
||||
|
||||
/* Place signal number on stack to allow backtrace from handler. */
|
||||
if (__put_user(regs->gprs[2], (int __user *) &frame->signo))
|
||||
goto give_sigsegv;
|
||||
return 0;
|
||||
|
||||
give_sigsegv:
|
||||
force_sigsegv(sig, current);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
|
||||
sigset_t *set, struct pt_regs * regs)
|
||||
{
|
||||
int err = 0;
|
||||
rt_sigframe __user *frame;
|
||||
|
||||
frame = get_sigframe(ka, regs, sizeof(rt_sigframe));
|
||||
if (!access_ok(VERIFY_WRITE, frame, sizeof(rt_sigframe)))
|
||||
goto give_sigsegv;
|
||||
|
||||
if (copy_siginfo_to_user(&frame->info, info))
|
||||
goto give_sigsegv;
|
||||
|
||||
/* Create the ucontext. */
|
||||
err |= __put_user(0, &frame->uc.uc_flags);
|
||||
err |= __put_user(NULL, &frame->uc.uc_link);
|
||||
err |= __put_user((void __user *)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
|
||||
err |= __put_user(sas_ss_flags(regs->gprs[15]),
|
||||
&frame->uc.uc_stack.ss_flags);
|
||||
err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
|
||||
err |= save_sigregs(regs, &frame->uc.uc_mcontext);
|
||||
err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
|
||||
if (err)
|
||||
goto give_sigsegv;
|
||||
|
||||
/* Set up to return from userspace. If provided, use a stub
|
||||
already in userspace. */
|
||||
if (ka->sa.sa_flags & SA_RESTORER) {
|
||||
regs->gprs[14] = (unsigned long)
|
||||
ka->sa.sa_restorer | PSW_ADDR_AMODE;
|
||||
} else {
|
||||
regs->gprs[14] = (unsigned long)
|
||||
frame->retcode | PSW_ADDR_AMODE;
|
||||
if (__put_user(S390_SYSCALL_OPCODE | __NR_rt_sigreturn,
|
||||
(u16 __user *)(frame->retcode)))
|
||||
goto give_sigsegv;
|
||||
}
|
||||
|
||||
/* Set up backchain. */
|
||||
if (__put_user(regs->gprs[15], (addr_t __user *) frame))
|
||||
goto give_sigsegv;
|
||||
|
||||
/* Set up registers for signal handler */
|
||||
regs->gprs[15] = (unsigned long) frame;
|
||||
regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE;
|
||||
|
||||
regs->gprs[2] = map_signal(sig);
|
||||
regs->gprs[3] = (unsigned long) &frame->info;
|
||||
regs->gprs[4] = (unsigned long) &frame->uc;
|
||||
return 0;
|
||||
|
||||
give_sigsegv:
|
||||
force_sigsegv(sig, current);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
/*
|
||||
* OK, we're invoking a handler
|
||||
*/
|
||||
|
||||
static int
|
||||
handle_signal(unsigned long sig, struct k_sigaction *ka,
|
||||
siginfo_t *info, sigset_t *oldset, struct pt_regs * regs)
|
||||
{
|
||||
int ret;
|
||||
|
||||
/* Set up the stack frame */
|
||||
if (ka->sa.sa_flags & SA_SIGINFO)
|
||||
ret = setup_rt_frame(sig, ka, info, oldset, regs);
|
||||
else
|
||||
ret = setup_frame(sig, ka, oldset, regs);
|
||||
|
||||
if (ret == 0) {
|
||||
spin_lock_irq(¤t->sighand->siglock);
|
||||
sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask);
|
||||
if (!(ka->sa.sa_flags & SA_NODEFER))
|
||||
sigaddset(¤t->blocked,sig);
|
||||
recalc_sigpending();
|
||||
spin_unlock_irq(¤t->sighand->siglock);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note that 'init' is a special process: it doesn't get signals it doesn't
|
||||
* want to handle. Thus you cannot kill init even with a SIGKILL even by
|
||||
* mistake.
|
||||
*
|
||||
* Note that we go through the signals twice: once to check the signals that
|
||||
* the kernel can handle, and then we build all the user-level signal handling
|
||||
* stack-frames in one go after that.
|
||||
*/
|
||||
void do_signal(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long retval = 0, continue_addr = 0, restart_addr = 0;
|
||||
siginfo_t info;
|
||||
int signr;
|
||||
struct k_sigaction ka;
|
||||
sigset_t *oldset;
|
||||
|
||||
/*
|
||||
* We want the common case to go fast, which
|
||||
* is why we may in certain cases get here from
|
||||
* kernel mode. Just return without doing anything
|
||||
* if so.
|
||||
*/
|
||||
if (!user_mode(regs))
|
||||
return;
|
||||
|
||||
if (test_thread_flag(TIF_RESTORE_SIGMASK))
|
||||
oldset = ¤t->saved_sigmask;
|
||||
else
|
||||
oldset = ¤t->blocked;
|
||||
|
||||
/* Are we from a system call? */
|
||||
if (regs->trap == __LC_SVC_OLD_PSW) {
|
||||
continue_addr = regs->psw.addr;
|
||||
restart_addr = continue_addr - regs->ilc;
|
||||
retval = regs->gprs[2];
|
||||
|
||||
/* Prepare for system call restart. We do this here so that a
|
||||
debugger will see the already changed PSW. */
|
||||
switch (retval) {
|
||||
case -ERESTARTNOHAND:
|
||||
case -ERESTARTSYS:
|
||||
case -ERESTARTNOINTR:
|
||||
regs->gprs[2] = regs->orig_gpr2;
|
||||
regs->psw.addr = restart_addr;
|
||||
break;
|
||||
case -ERESTART_RESTARTBLOCK:
|
||||
regs->gprs[2] = -EINTR;
|
||||
}
|
||||
regs->trap = -1; /* Don't deal with this again. */
|
||||
}
|
||||
|
||||
/* Get signal to deliver. When running under ptrace, at this point
|
||||
the debugger may change all our registers ... */
|
||||
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
|
||||
|
||||
/* Depending on the signal settings we may need to revert the
|
||||
decision to restart the system call. */
|
||||
if (signr > 0 && regs->psw.addr == restart_addr) {
|
||||
if (retval == -ERESTARTNOHAND
|
||||
|| (retval == -ERESTARTSYS
|
||||
&& !(current->sighand->action[signr-1].sa.sa_flags
|
||||
& SA_RESTART))) {
|
||||
regs->gprs[2] = -EINTR;
|
||||
regs->psw.addr = continue_addr;
|
||||
}
|
||||
}
|
||||
|
||||
if (signr > 0) {
|
||||
/* Whee! Actually deliver the signal. */
|
||||
#ifdef CONFIG_COMPAT
|
||||
if (test_thread_flag(TIF_31BIT)) {
|
||||
extern int handle_signal32(unsigned long sig,
|
||||
struct k_sigaction *ka,
|
||||
siginfo_t *info,
|
||||
sigset_t *oldset,
|
||||
struct pt_regs *regs);
|
||||
if (handle_signal32(
|
||||
signr, &ka, &info, oldset, regs) == 0) {
|
||||
if (test_thread_flag(TIF_RESTORE_SIGMASK))
|
||||
clear_thread_flag(TIF_RESTORE_SIGMASK);
|
||||
}
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
|
||||
/*
|
||||
* A signal was successfully delivered; the saved
|
||||
* sigmask will have been stored in the signal frame,
|
||||
* and will be restored by sigreturn, so we can simply
|
||||
* clear the TIF_RESTORE_SIGMASK flag.
|
||||
*/
|
||||
if (test_thread_flag(TIF_RESTORE_SIGMASK))
|
||||
clear_thread_flag(TIF_RESTORE_SIGMASK);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* If there's no signal to deliver, we just put the saved sigmask back.
|
||||
*/
|
||||
if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
|
||||
clear_thread_flag(TIF_RESTORE_SIGMASK);
|
||||
sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL);
|
||||
}
|
||||
|
||||
/* Restart a different system call. */
|
||||
if (retval == -ERESTART_RESTARTBLOCK
|
||||
&& regs->psw.addr == continue_addr) {
|
||||
regs->gprs[2] = __NR_restart_syscall;
|
||||
set_thread_flag(TIF_RESTART_SVC);
|
||||
}
|
||||
}
|
||||
787
arch/s390/kernel/smp.c
Normal file
787
arch/s390/kernel/smp.c
Normal file
@@ -0,0 +1,787 @@
|
||||
/*
|
||||
* arch/s390/kernel/smp.c
|
||||
*
|
||||
* Copyright (C) IBM Corp. 1999,2006
|
||||
* Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
|
||||
* Martin Schwidefsky (schwidefsky@de.ibm.com)
|
||||
* Heiko Carstens (heiko.carstens@de.ibm.com)
|
||||
*
|
||||
* based on other smp stuff by
|
||||
* (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
|
||||
* (c) 1998 Ingo Molnar
|
||||
*
|
||||
* We work with logical cpu numbering everywhere we can. The only
|
||||
* functions using the real cpu address (got from STAP) are the sigp
|
||||
* functions. For all other functions we use the identity mapping.
|
||||
* That means that cpu_number_map[i] == i for every cpu. cpu_number_map is
|
||||
* used e.g. to find the idle task belonging to a logical cpu. Every array
|
||||
* in the kernel is sorted by the logical cpu number and not by the physical
|
||||
* one which is causing all the confusion with __cpu_logical_map and
|
||||
* cpu_number_map in other architectures.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/cache.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/timex.h>
|
||||
#include <asm/ipl.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/sigp.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/s390_ext.h>
|
||||
#include <asm/cpcmd.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/timer.h>
|
||||
|
||||
extern volatile int __cpu_logical_map[];
|
||||
|
||||
/*
|
||||
* An array with a pointer the lowcore of every CPU.
|
||||
*/
|
||||
|
||||
struct _lowcore *lowcore_ptr[NR_CPUS];
|
||||
|
||||
cpumask_t cpu_online_map = CPU_MASK_NONE;
|
||||
cpumask_t cpu_possible_map = CPU_MASK_NONE;
|
||||
|
||||
static struct task_struct *current_set[NR_CPUS];
|
||||
|
||||
static void smp_ext_bitcall(int, ec_bit_sig);
|
||||
|
||||
/*
|
||||
* Structure and data for __smp_call_function_map(). This is designed to
|
||||
* minimise static memory requirements. It also looks cleaner.
|
||||
*/
|
||||
static DEFINE_SPINLOCK(call_lock);
|
||||
|
||||
struct call_data_struct {
|
||||
void (*func) (void *info);
|
||||
void *info;
|
||||
cpumask_t started;
|
||||
cpumask_t finished;
|
||||
int wait;
|
||||
};
|
||||
|
||||
static struct call_data_struct * call_data;
|
||||
|
||||
/*
|
||||
* 'Call function' interrupt callback
|
||||
*/
|
||||
static void do_call_function(void)
|
||||
{
|
||||
void (*func) (void *info) = call_data->func;
|
||||
void *info = call_data->info;
|
||||
int wait = call_data->wait;
|
||||
|
||||
cpu_set(smp_processor_id(), call_data->started);
|
||||
(*func)(info);
|
||||
if (wait)
|
||||
cpu_set(smp_processor_id(), call_data->finished);;
|
||||
}
|
||||
|
||||
static void __smp_call_function_map(void (*func) (void *info), void *info,
|
||||
int nonatomic, int wait, cpumask_t map)
|
||||
{
|
||||
struct call_data_struct data;
|
||||
int cpu, local = 0;
|
||||
|
||||
/*
|
||||
* Can deadlock when interrupts are disabled or if in wrong context.
|
||||
*/
|
||||
WARN_ON(irqs_disabled() || in_irq());
|
||||
|
||||
/*
|
||||
* Check for local function call. We have to have the same call order
|
||||
* as in on_each_cpu() because of machine_restart_smp().
|
||||
*/
|
||||
if (cpu_isset(smp_processor_id(), map)) {
|
||||
local = 1;
|
||||
cpu_clear(smp_processor_id(), map);
|
||||
}
|
||||
|
||||
cpus_and(map, map, cpu_online_map);
|
||||
if (cpus_empty(map))
|
||||
goto out;
|
||||
|
||||
data.func = func;
|
||||
data.info = info;
|
||||
data.started = CPU_MASK_NONE;
|
||||
data.wait = wait;
|
||||
if (wait)
|
||||
data.finished = CPU_MASK_NONE;
|
||||
|
||||
spin_lock_bh(&call_lock);
|
||||
call_data = &data;
|
||||
|
||||
for_each_cpu_mask(cpu, map)
|
||||
smp_ext_bitcall(cpu, ec_call_function);
|
||||
|
||||
/* Wait for response */
|
||||
while (!cpus_equal(map, data.started))
|
||||
cpu_relax();
|
||||
|
||||
if (wait)
|
||||
while (!cpus_equal(map, data.finished))
|
||||
cpu_relax();
|
||||
|
||||
spin_unlock_bh(&call_lock);
|
||||
|
||||
out:
|
||||
local_irq_disable();
|
||||
if (local)
|
||||
func(info);
|
||||
local_irq_enable();
|
||||
}
|
||||
|
||||
/*
|
||||
* smp_call_function:
|
||||
* @func: the function to run; this must be fast and non-blocking
|
||||
* @info: an arbitrary pointer to pass to the function
|
||||
* @nonatomic: unused
|
||||
* @wait: if true, wait (atomically) until function has completed on other CPUs
|
||||
*
|
||||
* Run a function on all other CPUs.
|
||||
*
|
||||
* You must not call this function with disabled interrupts or from a
|
||||
* hardware interrupt handler. You may call it from a bottom half.
|
||||
*/
|
||||
int smp_call_function(void (*func) (void *info), void *info, int nonatomic,
|
||||
int wait)
|
||||
{
|
||||
cpumask_t map;
|
||||
|
||||
preempt_disable();
|
||||
map = cpu_online_map;
|
||||
cpu_clear(smp_processor_id(), map);
|
||||
__smp_call_function_map(func, info, nonatomic, wait, map);
|
||||
preempt_enable();
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(smp_call_function);
|
||||
|
||||
/*
|
||||
* smp_call_function_on:
|
||||
* @func: the function to run; this must be fast and non-blocking
|
||||
* @info: an arbitrary pointer to pass to the function
|
||||
* @nonatomic: unused
|
||||
* @wait: if true, wait (atomically) until function has completed on other CPUs
|
||||
* @cpu: the CPU where func should run
|
||||
*
|
||||
* Run a function on one processor.
|
||||
*
|
||||
* You must not call this function with disabled interrupts or from a
|
||||
* hardware interrupt handler. You may call it from a bottom half.
|
||||
*/
|
||||
int smp_call_function_on(void (*func) (void *info), void *info, int nonatomic,
|
||||
int wait, int cpu)
|
||||
{
|
||||
cpumask_t map = CPU_MASK_NONE;
|
||||
|
||||
preempt_disable();
|
||||
cpu_set(cpu, map);
|
||||
__smp_call_function_map(func, info, nonatomic, wait, map);
|
||||
preempt_enable();
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(smp_call_function_on);
|
||||
|
||||
static void do_send_stop(void)
|
||||
{
|
||||
int cpu, rc;
|
||||
|
||||
/* stop all processors */
|
||||
for_each_online_cpu(cpu) {
|
||||
if (cpu == smp_processor_id())
|
||||
continue;
|
||||
do {
|
||||
rc = signal_processor(cpu, sigp_stop);
|
||||
} while (rc == sigp_busy);
|
||||
}
|
||||
}
|
||||
|
||||
static void do_store_status(void)
|
||||
{
|
||||
int cpu, rc;
|
||||
|
||||
/* store status of all processors in their lowcores (real 0) */
|
||||
for_each_online_cpu(cpu) {
|
||||
if (cpu == smp_processor_id())
|
||||
continue;
|
||||
do {
|
||||
rc = signal_processor_p(
|
||||
(__u32)(unsigned long) lowcore_ptr[cpu], cpu,
|
||||
sigp_store_status_at_address);
|
||||
} while(rc == sigp_busy);
|
||||
}
|
||||
}
|
||||
|
||||
static void do_wait_for_stop(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
/* Wait for all other cpus to enter stopped state */
|
||||
for_each_online_cpu(cpu) {
|
||||
if (cpu == smp_processor_id())
|
||||
continue;
|
||||
while(!smp_cpu_not_running(cpu))
|
||||
cpu_relax();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* this function sends a 'stop' sigp to all other CPUs in the system.
|
||||
* it goes straight through.
|
||||
*/
|
||||
void smp_send_stop(void)
|
||||
{
|
||||
/* Disable all interrupts/machine checks */
|
||||
__load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
|
||||
|
||||
/* write magic number to zero page (absolute 0) */
|
||||
lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC;
|
||||
|
||||
/* stop other processors. */
|
||||
do_send_stop();
|
||||
|
||||
/* wait until other processors are stopped */
|
||||
do_wait_for_stop();
|
||||
|
||||
/* store status of other processors. */
|
||||
do_store_status();
|
||||
}
|
||||
|
||||
/*
|
||||
* Reboot, halt and power_off routines for SMP.
|
||||
*/
|
||||
|
||||
void machine_restart_smp(char * __unused)
|
||||
{
|
||||
smp_send_stop();
|
||||
do_reipl();
|
||||
}
|
||||
|
||||
void machine_halt_smp(void)
|
||||
{
|
||||
smp_send_stop();
|
||||
if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
|
||||
__cpcmd(vmhalt_cmd, NULL, 0, NULL);
|
||||
signal_processor(smp_processor_id(), sigp_stop_and_store_status);
|
||||
for (;;);
|
||||
}
|
||||
|
||||
void machine_power_off_smp(void)
|
||||
{
|
||||
smp_send_stop();
|
||||
if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
|
||||
__cpcmd(vmpoff_cmd, NULL, 0, NULL);
|
||||
signal_processor(smp_processor_id(), sigp_stop_and_store_status);
|
||||
for (;;);
|
||||
}
|
||||
|
||||
/*
|
||||
* This is the main routine where commands issued by other
|
||||
* cpus are handled.
|
||||
*/
|
||||
|
||||
static void do_ext_call_interrupt(__u16 code)
|
||||
{
|
||||
unsigned long bits;
|
||||
|
||||
/*
|
||||
* handle bit signal external calls
|
||||
*
|
||||
* For the ec_schedule signal we have to do nothing. All the work
|
||||
* is done automatically when we return from the interrupt.
|
||||
*/
|
||||
bits = xchg(&S390_lowcore.ext_call_fast, 0);
|
||||
|
||||
if (test_bit(ec_call_function, &bits))
|
||||
do_call_function();
|
||||
}
|
||||
|
||||
/*
|
||||
* Send an external call sigp to another cpu and return without waiting
|
||||
* for its completion.
|
||||
*/
|
||||
static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
|
||||
{
|
||||
/*
|
||||
* Set signaling bit in lowcore of target cpu and kick it
|
||||
*/
|
||||
set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
|
||||
while(signal_processor(cpu, sigp_emergency_signal) == sigp_busy)
|
||||
udelay(10);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
/*
|
||||
* this function sends a 'purge tlb' signal to another CPU.
|
||||
*/
|
||||
void smp_ptlb_callback(void *info)
|
||||
{
|
||||
local_flush_tlb();
|
||||
}
|
||||
|
||||
void smp_ptlb_all(void)
|
||||
{
|
||||
on_each_cpu(smp_ptlb_callback, NULL, 0, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(smp_ptlb_all);
|
||||
#endif /* ! CONFIG_64BIT */
|
||||
|
||||
/*
|
||||
* this function sends a 'reschedule' IPI to another CPU.
|
||||
* it goes straight through and wastes no time serializing
|
||||
* anything. Worst case is that we lose a reschedule ...
|
||||
*/
|
||||
void smp_send_reschedule(int cpu)
|
||||
{
|
||||
smp_ext_bitcall(cpu, ec_schedule);
|
||||
}
|
||||
|
||||
/*
|
||||
* parameter area for the set/clear control bit callbacks
|
||||
*/
|
||||
struct ec_creg_mask_parms {
|
||||
unsigned long orvals[16];
|
||||
unsigned long andvals[16];
|
||||
};
|
||||
|
||||
/*
|
||||
* callback for setting/clearing control bits
|
||||
*/
|
||||
static void smp_ctl_bit_callback(void *info) {
|
||||
struct ec_creg_mask_parms *pp = info;
|
||||
unsigned long cregs[16];
|
||||
int i;
|
||||
|
||||
__ctl_store(cregs, 0, 15);
|
||||
for (i = 0; i <= 15; i++)
|
||||
cregs[i] = (cregs[i] & pp->andvals[i]) | pp->orvals[i];
|
||||
__ctl_load(cregs, 0, 15);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set a bit in a control register of all cpus
|
||||
*/
|
||||
void smp_ctl_set_bit(int cr, int bit)
|
||||
{
|
||||
struct ec_creg_mask_parms parms;
|
||||
|
||||
memset(&parms.orvals, 0, sizeof(parms.orvals));
|
||||
memset(&parms.andvals, 0xff, sizeof(parms.andvals));
|
||||
parms.orvals[cr] = 1 << bit;
|
||||
on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear a bit in a control register of all cpus
|
||||
*/
|
||||
void smp_ctl_clear_bit(int cr, int bit)
|
||||
{
|
||||
struct ec_creg_mask_parms parms;
|
||||
|
||||
memset(&parms.orvals, 0, sizeof(parms.orvals));
|
||||
memset(&parms.andvals, 0xff, sizeof(parms.andvals));
|
||||
parms.andvals[cr] = ~(1L << bit);
|
||||
on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Lets check how many CPUs we have.
|
||||
*/
|
||||
|
||||
static unsigned int
|
||||
__init smp_count_cpus(void)
|
||||
{
|
||||
unsigned int cpu, num_cpus;
|
||||
__u16 boot_cpu_addr;
|
||||
|
||||
/*
|
||||
* cpu 0 is the boot cpu. See smp_prepare_boot_cpu.
|
||||
*/
|
||||
|
||||
boot_cpu_addr = S390_lowcore.cpu_data.cpu_addr;
|
||||
current_thread_info()->cpu = 0;
|
||||
num_cpus = 1;
|
||||
for (cpu = 0; cpu <= 65535; cpu++) {
|
||||
if ((__u16) cpu == boot_cpu_addr)
|
||||
continue;
|
||||
__cpu_logical_map[1] = (__u16) cpu;
|
||||
if (signal_processor(1, sigp_sense) ==
|
||||
sigp_not_operational)
|
||||
continue;
|
||||
num_cpus++;
|
||||
}
|
||||
|
||||
printk("Detected %d CPU's\n",(int) num_cpus);
|
||||
printk("Boot cpu address %2X\n", boot_cpu_addr);
|
||||
|
||||
return num_cpus;
|
||||
}
|
||||
|
||||
/*
|
||||
* Activate a secondary processor.
|
||||
*/
|
||||
int __devinit start_secondary(void *cpuvoid)
|
||||
{
|
||||
/* Setup the cpu */
|
||||
cpu_init();
|
||||
preempt_disable();
|
||||
/* Enable TOD clock interrupts on the secondary cpu. */
|
||||
init_cpu_timer();
|
||||
#ifdef CONFIG_VIRT_TIMER
|
||||
/* Enable cpu timer interrupts on the secondary cpu. */
|
||||
init_cpu_vtimer();
|
||||
#endif
|
||||
/* Enable pfault pseudo page faults on this cpu. */
|
||||
pfault_init();
|
||||
|
||||
/* Mark this cpu as online */
|
||||
cpu_set(smp_processor_id(), cpu_online_map);
|
||||
/* Switch on interrupts */
|
||||
local_irq_enable();
|
||||
/* Print info about this processor */
|
||||
print_cpu_info(&S390_lowcore.cpu_data);
|
||||
/* cpu_idle will call schedule for us */
|
||||
cpu_idle();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __init smp_create_idle(unsigned int cpu)
|
||||
{
|
||||
struct task_struct *p;
|
||||
|
||||
/*
|
||||
* don't care about the psw and regs settings since we'll never
|
||||
* reschedule the forked task.
|
||||
*/
|
||||
p = fork_idle(cpu);
|
||||
if (IS_ERR(p))
|
||||
panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
|
||||
current_set[cpu] = p;
|
||||
}
|
||||
|
||||
/* Reserving and releasing of CPUs */
|
||||
|
||||
static DEFINE_SPINLOCK(smp_reserve_lock);
|
||||
static int smp_cpu_reserved[NR_CPUS];
|
||||
|
||||
int
|
||||
smp_get_cpu(cpumask_t cpu_mask)
|
||||
{
|
||||
unsigned long flags;
|
||||
int cpu;
|
||||
|
||||
spin_lock_irqsave(&smp_reserve_lock, flags);
|
||||
/* Try to find an already reserved cpu. */
|
||||
for_each_cpu_mask(cpu, cpu_mask) {
|
||||
if (smp_cpu_reserved[cpu] != 0) {
|
||||
smp_cpu_reserved[cpu]++;
|
||||
/* Found one. */
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
/* Reserve a new cpu from cpu_mask. */
|
||||
for_each_cpu_mask(cpu, cpu_mask) {
|
||||
if (cpu_online(cpu)) {
|
||||
smp_cpu_reserved[cpu]++;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
cpu = -ENODEV;
|
||||
out:
|
||||
spin_unlock_irqrestore(&smp_reserve_lock, flags);
|
||||
return cpu;
|
||||
}
|
||||
|
||||
void
|
||||
smp_put_cpu(int cpu)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&smp_reserve_lock, flags);
|
||||
smp_cpu_reserved[cpu]--;
|
||||
spin_unlock_irqrestore(&smp_reserve_lock, flags);
|
||||
}
|
||||
|
||||
static int
|
||||
cpu_stopped(int cpu)
|
||||
{
|
||||
__u32 status;
|
||||
|
||||
/* Check for stopped state */
|
||||
if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) {
|
||||
if (status & 0x40)
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Upping and downing of CPUs */
|
||||
|
||||
int
|
||||
__cpu_up(unsigned int cpu)
|
||||
{
|
||||
struct task_struct *idle;
|
||||
struct _lowcore *cpu_lowcore;
|
||||
struct stack_frame *sf;
|
||||
sigp_ccode ccode;
|
||||
int curr_cpu;
|
||||
|
||||
for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) {
|
||||
__cpu_logical_map[cpu] = (__u16) curr_cpu;
|
||||
if (cpu_stopped(cpu))
|
||||
break;
|
||||
}
|
||||
|
||||
if (!cpu_stopped(cpu))
|
||||
return -ENODEV;
|
||||
|
||||
ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
|
||||
cpu, sigp_set_prefix);
|
||||
if (ccode){
|
||||
printk("sigp_set_prefix failed for cpu %d "
|
||||
"with condition code %d\n",
|
||||
(int) cpu, (int) ccode);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
idle = current_set[cpu];
|
||||
cpu_lowcore = lowcore_ptr[cpu];
|
||||
cpu_lowcore->kernel_stack = (unsigned long)
|
||||
task_stack_page(idle) + (THREAD_SIZE);
|
||||
sf = (struct stack_frame *) (cpu_lowcore->kernel_stack
|
||||
- sizeof(struct pt_regs)
|
||||
- sizeof(struct stack_frame));
|
||||
memset(sf, 0, sizeof(struct stack_frame));
|
||||
sf->gprs[9] = (unsigned long) sf;
|
||||
cpu_lowcore->save_area[15] = (unsigned long) sf;
|
||||
__ctl_store(cpu_lowcore->cregs_save_area[0], 0, 15);
|
||||
asm volatile(
|
||||
" stam 0,15,0(%0)"
|
||||
: : "a" (&cpu_lowcore->access_regs_save_area) : "memory");
|
||||
cpu_lowcore->percpu_offset = __per_cpu_offset[cpu];
|
||||
cpu_lowcore->current_task = (unsigned long) idle;
|
||||
cpu_lowcore->cpu_data.cpu_nr = cpu;
|
||||
eieio();
|
||||
|
||||
while (signal_processor(cpu,sigp_restart) == sigp_busy)
|
||||
udelay(10);
|
||||
|
||||
while (!cpu_online(cpu))
|
||||
cpu_relax();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static unsigned int __initdata additional_cpus;
|
||||
static unsigned int __initdata possible_cpus;
|
||||
|
||||
void __init smp_setup_cpu_possible_map(void)
|
||||
{
|
||||
unsigned int phy_cpus, pos_cpus, cpu;
|
||||
|
||||
phy_cpus = smp_count_cpus();
|
||||
pos_cpus = min(phy_cpus + additional_cpus, (unsigned int) NR_CPUS);
|
||||
|
||||
if (possible_cpus)
|
||||
pos_cpus = min(possible_cpus, (unsigned int) NR_CPUS);
|
||||
|
||||
for (cpu = 0; cpu < pos_cpus; cpu++)
|
||||
cpu_set(cpu, cpu_possible_map);
|
||||
|
||||
phy_cpus = min(phy_cpus, pos_cpus);
|
||||
|
||||
for (cpu = 0; cpu < phy_cpus; cpu++)
|
||||
cpu_set(cpu, cpu_present_map);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
|
||||
static int __init setup_additional_cpus(char *s)
|
||||
{
|
||||
additional_cpus = simple_strtoul(s, NULL, 0);
|
||||
return 0;
|
||||
}
|
||||
early_param("additional_cpus", setup_additional_cpus);
|
||||
|
||||
static int __init setup_possible_cpus(char *s)
|
||||
{
|
||||
possible_cpus = simple_strtoul(s, NULL, 0);
|
||||
return 0;
|
||||
}
|
||||
early_param("possible_cpus", setup_possible_cpus);
|
||||
|
||||
int
|
||||
__cpu_disable(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct ec_creg_mask_parms cr_parms;
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
spin_lock_irqsave(&smp_reserve_lock, flags);
|
||||
if (smp_cpu_reserved[cpu] != 0) {
|
||||
spin_unlock_irqrestore(&smp_reserve_lock, flags);
|
||||
return -EBUSY;
|
||||
}
|
||||
cpu_clear(cpu, cpu_online_map);
|
||||
|
||||
/* Disable pfault pseudo page faults on this cpu. */
|
||||
pfault_fini();
|
||||
|
||||
memset(&cr_parms.orvals, 0, sizeof(cr_parms.orvals));
|
||||
memset(&cr_parms.andvals, 0xff, sizeof(cr_parms.andvals));
|
||||
|
||||
/* disable all external interrupts */
|
||||
cr_parms.orvals[0] = 0;
|
||||
cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 |
|
||||
1<<11 | 1<<10 | 1<< 6 | 1<< 4);
|
||||
/* disable all I/O interrupts */
|
||||
cr_parms.orvals[6] = 0;
|
||||
cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 |
|
||||
1<<27 | 1<<26 | 1<<25 | 1<<24);
|
||||
/* disable most machine checks */
|
||||
cr_parms.orvals[14] = 0;
|
||||
cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24);
|
||||
|
||||
smp_ctl_bit_callback(&cr_parms);
|
||||
|
||||
spin_unlock_irqrestore(&smp_reserve_lock, flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
__cpu_die(unsigned int cpu)
|
||||
{
|
||||
/* Wait until target cpu is down */
|
||||
while (!smp_cpu_not_running(cpu))
|
||||
cpu_relax();
|
||||
printk("Processor %d spun down\n", cpu);
|
||||
}
|
||||
|
||||
void
|
||||
cpu_die(void)
|
||||
{
|
||||
idle_task_exit();
|
||||
signal_processor(smp_processor_id(), sigp_stop);
|
||||
BUG();
|
||||
for(;;);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_HOTPLUG_CPU */
|
||||
|
||||
/*
|
||||
* Cycle through the processors and setup structures.
|
||||
*/
|
||||
|
||||
void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
{
|
||||
unsigned long stack;
|
||||
unsigned int cpu;
|
||||
int i;
|
||||
|
||||
/* request the 0x1201 emergency signal external interrupt */
|
||||
if (register_external_interrupt(0x1201, do_ext_call_interrupt) != 0)
|
||||
panic("Couldn't request external interrupt 0x1201");
|
||||
memset(lowcore_ptr,0,sizeof(lowcore_ptr));
|
||||
/*
|
||||
* Initialize prefix pages and stacks for all possible cpus
|
||||
*/
|
||||
print_cpu_info(&S390_lowcore.cpu_data);
|
||||
|
||||
for_each_possible_cpu(i) {
|
||||
lowcore_ptr[i] = (struct _lowcore *)
|
||||
__get_free_pages(GFP_KERNEL|GFP_DMA,
|
||||
sizeof(void*) == 8 ? 1 : 0);
|
||||
stack = __get_free_pages(GFP_KERNEL,ASYNC_ORDER);
|
||||
if (lowcore_ptr[i] == NULL || stack == 0ULL)
|
||||
panic("smp_boot_cpus failed to allocate memory\n");
|
||||
|
||||
*(lowcore_ptr[i]) = S390_lowcore;
|
||||
lowcore_ptr[i]->async_stack = stack + (ASYNC_SIZE);
|
||||
stack = __get_free_pages(GFP_KERNEL,0);
|
||||
if (stack == 0ULL)
|
||||
panic("smp_boot_cpus failed to allocate memory\n");
|
||||
lowcore_ptr[i]->panic_stack = stack + (PAGE_SIZE);
|
||||
#ifndef CONFIG_64BIT
|
||||
if (MACHINE_HAS_IEEE) {
|
||||
lowcore_ptr[i]->extended_save_area_addr =
|
||||
(__u32) __get_free_pages(GFP_KERNEL,0);
|
||||
if (lowcore_ptr[i]->extended_save_area_addr == 0)
|
||||
panic("smp_boot_cpus failed to "
|
||||
"allocate memory\n");
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#ifndef CONFIG_64BIT
|
||||
if (MACHINE_HAS_IEEE)
|
||||
ctl_set_bit(14, 29); /* enable extended save area */
|
||||
#endif
|
||||
set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]);
|
||||
|
||||
for_each_possible_cpu(cpu)
|
||||
if (cpu != smp_processor_id())
|
||||
smp_create_idle(cpu);
|
||||
}
|
||||
|
||||
void __devinit smp_prepare_boot_cpu(void)
|
||||
{
|
||||
BUG_ON(smp_processor_id() != 0);
|
||||
|
||||
cpu_set(0, cpu_online_map);
|
||||
S390_lowcore.percpu_offset = __per_cpu_offset[0];
|
||||
current_set[0] = current;
|
||||
}
|
||||
|
||||
void smp_cpus_done(unsigned int max_cpus)
|
||||
{
|
||||
cpu_present_map = cpu_possible_map;
|
||||
}
|
||||
|
||||
/*
|
||||
* the frequency of the profiling timer can be changed
|
||||
* by writing a multiplier value into /proc/profile.
|
||||
*
|
||||
* usually you want to run this on all CPUs ;)
|
||||
*/
|
||||
int setup_profiling_timer(unsigned int multiplier)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static DEFINE_PER_CPU(struct cpu, cpu_devices);
|
||||
|
||||
static int __init topology_init(void)
|
||||
{
|
||||
int cpu;
|
||||
int ret;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct cpu *c = &per_cpu(cpu_devices, cpu);
|
||||
|
||||
c->hotpluggable = 1;
|
||||
ret = register_cpu(c, cpu);
|
||||
if (ret)
|
||||
printk(KERN_WARNING "topology_init: register_cpu %d "
|
||||
"failed (%d)\n", cpu, ret);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
subsys_initcall(topology_init);
|
||||
|
||||
EXPORT_SYMBOL(cpu_online_map);
|
||||
EXPORT_SYMBOL(cpu_possible_map);
|
||||
EXPORT_SYMBOL(lowcore_ptr);
|
||||
EXPORT_SYMBOL(smp_ctl_set_bit);
|
||||
EXPORT_SYMBOL(smp_ctl_clear_bit);
|
||||
EXPORT_SYMBOL(smp_get_cpu);
|
||||
EXPORT_SYMBOL(smp_put_cpu);
|
||||
88
arch/s390/kernel/stacktrace.c
Normal file
88
arch/s390/kernel/stacktrace.c
Normal file
@@ -0,0 +1,88 @@
|
||||
/*
|
||||
* arch/s390/kernel/stacktrace.c
|
||||
*
|
||||
* Stack trace management functions
|
||||
*
|
||||
* Copyright (C) IBM Corp. 2006
|
||||
* Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/sched.h>
|
||||
#include <linux/stacktrace.h>
|
||||
#include <linux/kallsyms.h>
|
||||
|
||||
static unsigned long save_context_stack(struct stack_trace *trace,
|
||||
unsigned int *skip,
|
||||
unsigned long sp,
|
||||
unsigned long low,
|
||||
unsigned long high)
|
||||
{
|
||||
struct stack_frame *sf;
|
||||
struct pt_regs *regs;
|
||||
unsigned long addr;
|
||||
|
||||
while(1) {
|
||||
sp &= PSW_ADDR_INSN;
|
||||
if (sp < low || sp > high)
|
||||
return sp;
|
||||
sf = (struct stack_frame *)sp;
|
||||
while(1) {
|
||||
addr = sf->gprs[8] & PSW_ADDR_INSN;
|
||||
if (!(*skip))
|
||||
trace->entries[trace->nr_entries++] = addr;
|
||||
else
|
||||
(*skip)--;
|
||||
if (trace->nr_entries >= trace->max_entries)
|
||||
return sp;
|
||||
low = sp;
|
||||
sp = sf->back_chain & PSW_ADDR_INSN;
|
||||
if (!sp)
|
||||
break;
|
||||
if (sp <= low || sp > high - sizeof(*sf))
|
||||
return sp;
|
||||
sf = (struct stack_frame *)sp;
|
||||
}
|
||||
/* Zero backchain detected, check for interrupt frame. */
|
||||
sp = (unsigned long)(sf + 1);
|
||||
if (sp <= low || sp > high - sizeof(*regs))
|
||||
return sp;
|
||||
regs = (struct pt_regs *)sp;
|
||||
addr = regs->psw.addr & PSW_ADDR_INSN;
|
||||
if (!(*skip))
|
||||
trace->entries[trace->nr_entries++] = addr;
|
||||
else
|
||||
(*skip)--;
|
||||
if (trace->nr_entries >= trace->max_entries)
|
||||
return sp;
|
||||
low = sp;
|
||||
sp = regs->gprs[15];
|
||||
}
|
||||
}
|
||||
|
||||
void save_stack_trace(struct stack_trace *trace, struct task_struct *task)
|
||||
{
|
||||
register unsigned long sp asm ("15");
|
||||
unsigned long orig_sp, new_sp;
|
||||
|
||||
orig_sp = sp & PSW_ADDR_INSN;
|
||||
|
||||
new_sp = save_context_stack(trace, &trace->skip, orig_sp,
|
||||
S390_lowcore.panic_stack - PAGE_SIZE,
|
||||
S390_lowcore.panic_stack);
|
||||
if ((new_sp != orig_sp) && !trace->all_contexts)
|
||||
return;
|
||||
new_sp = save_context_stack(trace, &trace->skip, new_sp,
|
||||
S390_lowcore.async_stack - ASYNC_SIZE,
|
||||
S390_lowcore.async_stack);
|
||||
if ((new_sp != orig_sp) && !trace->all_contexts)
|
||||
return;
|
||||
if (task)
|
||||
save_context_stack(trace, &trace->skip, new_sp,
|
||||
(unsigned long) task_stack_page(task),
|
||||
(unsigned long) task_stack_page(task) + THREAD_SIZE);
|
||||
else
|
||||
save_context_stack(trace, &trace->skip, new_sp,
|
||||
S390_lowcore.thread_info,
|
||||
S390_lowcore.thread_info + THREAD_SIZE);
|
||||
return;
|
||||
}
|
||||
288
arch/s390/kernel/sys_s390.c
Normal file
288
arch/s390/kernel/sys_s390.c
Normal file
@@ -0,0 +1,288 @@
|
||||
/*
|
||||
* arch/s390/kernel/sys_s390.c
|
||||
*
|
||||
* S390 version
|
||||
* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
|
||||
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
|
||||
* Thomas Spatzier (tspat@de.ibm.com)
|
||||
*
|
||||
* Derived from "arch/i386/kernel/sys_i386.c"
|
||||
*
|
||||
* This file contains various random system calls that
|
||||
* have a non-standard calling sequence on the Linux/s390
|
||||
* platform.
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/sem.h>
|
||||
#include <linux/msg.h>
|
||||
#include <linux/shm.h>
|
||||
#include <linux/stat.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/mman.h>
|
||||
#include <linux/file.h>
|
||||
#include <linux/utsname.h>
|
||||
#include <linux/personality.h>
|
||||
#include <linux/unistd.h>
|
||||
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/ipc.h>
|
||||
|
||||
/*
|
||||
* sys_pipe() is the normal C calling standard for creating
|
||||
* a pipe. It's not the way Unix traditionally does this, though.
|
||||
*/
|
||||
asmlinkage long sys_pipe(unsigned long __user *fildes)
|
||||
{
|
||||
int fd[2];
|
||||
int error;
|
||||
|
||||
error = do_pipe(fd);
|
||||
if (!error) {
|
||||
if (copy_to_user(fildes, fd, 2*sizeof(int)))
|
||||
error = -EFAULT;
|
||||
}
|
||||
return error;
|
||||
}
|
||||
|
||||
/* common code for old and new mmaps */
|
||||
static inline long do_mmap2(
|
||||
unsigned long addr, unsigned long len,
|
||||
unsigned long prot, unsigned long flags,
|
||||
unsigned long fd, unsigned long pgoff)
|
||||
{
|
||||
long error = -EBADF;
|
||||
struct file * file = NULL;
|
||||
|
||||
flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
|
||||
if (!(flags & MAP_ANONYMOUS)) {
|
||||
file = fget(fd);
|
||||
if (!file)
|
||||
goto out;
|
||||
}
|
||||
|
||||
down_write(¤t->mm->mmap_sem);
|
||||
error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
|
||||
up_write(¤t->mm->mmap_sem);
|
||||
|
||||
if (file)
|
||||
fput(file);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
/*
|
||||
* Perform the select(nd, in, out, ex, tv) and mmap() system
|
||||
* calls. Linux for S/390 isn't able to handle more than 5
|
||||
* system call parameters, so these system calls used a memory
|
||||
* block for parameter passing..
|
||||
*/
|
||||
|
||||
struct mmap_arg_struct {
|
||||
unsigned long addr;
|
||||
unsigned long len;
|
||||
unsigned long prot;
|
||||
unsigned long flags;
|
||||
unsigned long fd;
|
||||
unsigned long offset;
|
||||
};
|
||||
|
||||
asmlinkage long sys_mmap2(struct mmap_arg_struct __user *arg)
|
||||
{
|
||||
struct mmap_arg_struct a;
|
||||
int error = -EFAULT;
|
||||
|
||||
if (copy_from_user(&a, arg, sizeof(a)))
|
||||
goto out;
|
||||
error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
asmlinkage long old_mmap(struct mmap_arg_struct __user *arg)
|
||||
{
|
||||
struct mmap_arg_struct a;
|
||||
long error = -EFAULT;
|
||||
|
||||
if (copy_from_user(&a, arg, sizeof(a)))
|
||||
goto out;
|
||||
|
||||
error = -EINVAL;
|
||||
if (a.offset & ~PAGE_MASK)
|
||||
goto out;
|
||||
|
||||
error = do_mmap2(a.addr, a.len, a.prot, a.flags, a.fd, a.offset >> PAGE_SHIFT);
|
||||
out:
|
||||
return error;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
struct sel_arg_struct {
|
||||
unsigned long n;
|
||||
fd_set __user *inp, *outp, *exp;
|
||||
struct timeval __user *tvp;
|
||||
};
|
||||
|
||||
asmlinkage long old_select(struct sel_arg_struct __user *arg)
|
||||
{
|
||||
struct sel_arg_struct a;
|
||||
|
||||
if (copy_from_user(&a, arg, sizeof(a)))
|
||||
return -EFAULT;
|
||||
/* sys_select() does the appropriate kernel locking */
|
||||
return sys_select(a.n, a.inp, a.outp, a.exp, a.tvp);
|
||||
|
||||
}
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
/*
|
||||
* sys_ipc() is the de-multiplexer for the SysV IPC calls..
|
||||
*
|
||||
* This is really horribly ugly.
|
||||
*/
|
||||
asmlinkage long sys_ipc(uint call, int first, unsigned long second,
|
||||
unsigned long third, void __user *ptr)
|
||||
{
|
||||
struct ipc_kludge tmp;
|
||||
int ret;
|
||||
|
||||
switch (call) {
|
||||
case SEMOP:
|
||||
return sys_semtimedop(first, (struct sembuf __user *)ptr,
|
||||
(unsigned)second, NULL);
|
||||
case SEMTIMEDOP:
|
||||
return sys_semtimedop(first, (struct sembuf __user *)ptr,
|
||||
(unsigned)second,
|
||||
(const struct timespec __user *) third);
|
||||
case SEMGET:
|
||||
return sys_semget(first, (int)second, third);
|
||||
case SEMCTL: {
|
||||
union semun fourth;
|
||||
if (!ptr)
|
||||
return -EINVAL;
|
||||
if (get_user(fourth.__pad, (void __user * __user *) ptr))
|
||||
return -EFAULT;
|
||||
return sys_semctl(first, (int)second, third, fourth);
|
||||
}
|
||||
case MSGSND:
|
||||
return sys_msgsnd (first, (struct msgbuf __user *) ptr,
|
||||
(size_t)second, third);
|
||||
break;
|
||||
case MSGRCV:
|
||||
if (!ptr)
|
||||
return -EINVAL;
|
||||
if (copy_from_user (&tmp, (struct ipc_kludge __user *) ptr,
|
||||
sizeof (struct ipc_kludge)))
|
||||
return -EFAULT;
|
||||
return sys_msgrcv (first, tmp.msgp,
|
||||
(size_t)second, tmp.msgtyp, third);
|
||||
case MSGGET:
|
||||
return sys_msgget((key_t)first, (int)second);
|
||||
case MSGCTL:
|
||||
return sys_msgctl(first, (int)second,
|
||||
(struct msqid_ds __user *)ptr);
|
||||
|
||||
case SHMAT: {
|
||||
ulong raddr;
|
||||
ret = do_shmat(first, (char __user *)ptr,
|
||||
(int)second, &raddr);
|
||||
if (ret)
|
||||
return ret;
|
||||
return put_user (raddr, (ulong __user *) third);
|
||||
break;
|
||||
}
|
||||
case SHMDT:
|
||||
return sys_shmdt ((char __user *)ptr);
|
||||
case SHMGET:
|
||||
return sys_shmget(first, (size_t)second, third);
|
||||
case SHMCTL:
|
||||
return sys_shmctl(first, (int)second,
|
||||
(struct shmid_ds __user *) ptr);
|
||||
default:
|
||||
return -ENOSYS;
|
||||
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
asmlinkage long s390x_newuname(struct new_utsname __user *name)
|
||||
{
|
||||
int ret = sys_newuname(name);
|
||||
|
||||
if (current->personality == PER_LINUX32 && !ret) {
|
||||
ret = copy_to_user(name->machine, "s390\0\0\0\0", 8);
|
||||
if (ret) ret = -EFAULT;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
asmlinkage long s390x_personality(unsigned long personality)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (current->personality == PER_LINUX32 && personality == PER_LINUX)
|
||||
personality = PER_LINUX32;
|
||||
ret = sys_personality(personality);
|
||||
if (ret == PER_LINUX32)
|
||||
ret = PER_LINUX;
|
||||
|
||||
return ret;
|
||||
}
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
/*
|
||||
* Wrapper function for sys_fadvise64/fadvise64_64
|
||||
*/
|
||||
#ifndef CONFIG_64BIT
|
||||
|
||||
asmlinkage long
|
||||
s390_fadvise64(int fd, u32 offset_high, u32 offset_low, size_t len, int advice)
|
||||
{
|
||||
return sys_fadvise64(fd, (u64) offset_high << 32 | offset_low,
|
||||
len, advice);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
struct fadvise64_64_args {
|
||||
int fd;
|
||||
long long offset;
|
||||
long long len;
|
||||
int advice;
|
||||
};
|
||||
|
||||
asmlinkage long
|
||||
s390_fadvise64_64(struct fadvise64_64_args __user *args)
|
||||
{
|
||||
struct fadvise64_64_args a;
|
||||
|
||||
if ( copy_from_user(&a, args, sizeof(a)) )
|
||||
return -EFAULT;
|
||||
return sys_fadvise64_64(a.fd, a.offset, a.len, a.advice);
|
||||
}
|
||||
|
||||
/*
|
||||
* Do a system call from kernel instead of calling sys_execve so we
|
||||
* end up with proper pt_regs.
|
||||
*/
|
||||
int kernel_execve(const char *filename, char *const argv[], char *const envp[])
|
||||
{
|
||||
register const char *__arg1 asm("2") = filename;
|
||||
register char *const*__arg2 asm("3") = argv;
|
||||
register char *const*__arg3 asm("4") = envp;
|
||||
register long __svcres asm("2");
|
||||
asm volatile(
|
||||
"svc %b1"
|
||||
: "=d" (__svcres)
|
||||
: "i" (__NR_execve),
|
||||
"0" (__arg1),
|
||||
"d" (__arg2),
|
||||
"d" (__arg3) : "memory");
|
||||
return __svcres;
|
||||
}
|
||||
324
arch/s390/kernel/syscalls.S
Normal file
324
arch/s390/kernel/syscalls.S
Normal file
@@ -0,0 +1,324 @@
|
||||
/*
|
||||
* definitions for sys_call_table, each line represents an
|
||||
* entry in the table in the form
|
||||
* SYSCALL(31 bit syscall, 64 bit syscall, 31 bit emulated syscall)
|
||||
*
|
||||
* this file is meant to be included from entry.S and entry64.S
|
||||
*/
|
||||
|
||||
#define NI_SYSCALL SYSCALL(sys_ni_syscall,sys_ni_syscall,sys_ni_syscall)
|
||||
|
||||
NI_SYSCALL /* 0 */
|
||||
SYSCALL(sys_exit,sys_exit,sys32_exit_wrapper)
|
||||
SYSCALL(sys_fork_glue,sys_fork_glue,sys_fork_glue)
|
||||
SYSCALL(sys_read,sys_read,sys32_read_wrapper)
|
||||
SYSCALL(sys_write,sys_write,sys32_write_wrapper)
|
||||
SYSCALL(sys_open,sys_open,sys32_open_wrapper) /* 5 */
|
||||
SYSCALL(sys_close,sys_close,sys32_close_wrapper)
|
||||
SYSCALL(sys_restart_syscall,sys_restart_syscall,sys_restart_syscall)
|
||||
SYSCALL(sys_creat,sys_creat,sys32_creat_wrapper)
|
||||
SYSCALL(sys_link,sys_link,sys32_link_wrapper)
|
||||
SYSCALL(sys_unlink,sys_unlink,sys32_unlink_wrapper) /* 10 */
|
||||
SYSCALL(sys_execve_glue,sys_execve_glue,sys32_execve_glue)
|
||||
SYSCALL(sys_chdir,sys_chdir,sys32_chdir_wrapper)
|
||||
SYSCALL(sys_time,sys_ni_syscall,sys32_time_wrapper) /* old time syscall */
|
||||
SYSCALL(sys_mknod,sys_mknod,sys32_mknod_wrapper)
|
||||
SYSCALL(sys_chmod,sys_chmod,sys32_chmod_wrapper) /* 15 */
|
||||
SYSCALL(sys_lchown16,sys_ni_syscall,sys32_lchown16_wrapper) /* old lchown16 syscall*/
|
||||
NI_SYSCALL /* old break syscall holder */
|
||||
NI_SYSCALL /* old stat syscall holder */
|
||||
SYSCALL(sys_lseek,sys_lseek,sys32_lseek_wrapper)
|
||||
SYSCALL(sys_getpid,sys_getpid,sys_getpid) /* 20 */
|
||||
SYSCALL(sys_mount,sys_mount,sys32_mount_wrapper)
|
||||
SYSCALL(sys_oldumount,sys_oldumount,sys32_oldumount_wrapper)
|
||||
SYSCALL(sys_setuid16,sys_ni_syscall,sys32_setuid16_wrapper) /* old setuid16 syscall*/
|
||||
SYSCALL(sys_getuid16,sys_ni_syscall,sys32_getuid16) /* old getuid16 syscall*/
|
||||
SYSCALL(sys_stime,sys_ni_syscall,sys32_stime_wrapper) /* 25 old stime syscall */
|
||||
SYSCALL(sys_ptrace,sys_ptrace,sys32_ptrace_wrapper)
|
||||
SYSCALL(sys_alarm,sys_alarm,sys32_alarm_wrapper)
|
||||
NI_SYSCALL /* old fstat syscall */
|
||||
SYSCALL(sys_pause,sys_pause,sys32_pause)
|
||||
SYSCALL(sys_utime,sys_utime,compat_sys_utime_wrapper) /* 30 */
|
||||
NI_SYSCALL /* old stty syscall */
|
||||
NI_SYSCALL /* old gtty syscall */
|
||||
SYSCALL(sys_access,sys_access,sys32_access_wrapper)
|
||||
SYSCALL(sys_nice,sys_nice,sys32_nice_wrapper)
|
||||
NI_SYSCALL /* 35 old ftime syscall */
|
||||
SYSCALL(sys_sync,sys_sync,sys_sync)
|
||||
SYSCALL(sys_kill,sys_kill,sys32_kill_wrapper)
|
||||
SYSCALL(sys_rename,sys_rename,sys32_rename_wrapper)
|
||||
SYSCALL(sys_mkdir,sys_mkdir,sys32_mkdir_wrapper)
|
||||
SYSCALL(sys_rmdir,sys_rmdir,sys32_rmdir_wrapper) /* 40 */
|
||||
SYSCALL(sys_dup,sys_dup,sys32_dup_wrapper)
|
||||
SYSCALL(sys_pipe,sys_pipe,sys32_pipe_wrapper)
|
||||
SYSCALL(sys_times,sys_times,compat_sys_times_wrapper)
|
||||
NI_SYSCALL /* old prof syscall */
|
||||
SYSCALL(sys_brk,sys_brk,sys32_brk_wrapper) /* 45 */
|
||||
SYSCALL(sys_setgid16,sys_ni_syscall,sys32_setgid16_wrapper) /* old setgid16 syscall*/
|
||||
SYSCALL(sys_getgid16,sys_ni_syscall,sys32_getgid16) /* old getgid16 syscall*/
|
||||
SYSCALL(sys_signal,sys_signal,sys32_signal_wrapper)
|
||||
SYSCALL(sys_geteuid16,sys_ni_syscall,sys32_geteuid16) /* old geteuid16 syscall */
|
||||
SYSCALL(sys_getegid16,sys_ni_syscall,sys32_getegid16) /* 50 old getegid16 syscall */
|
||||
SYSCALL(sys_acct,sys_acct,sys32_acct_wrapper)
|
||||
SYSCALL(sys_umount,sys_umount,sys32_umount_wrapper)
|
||||
NI_SYSCALL /* old lock syscall */
|
||||
SYSCALL(sys_ioctl,sys_ioctl,compat_sys_ioctl_wrapper)
|
||||
SYSCALL(sys_fcntl,sys_fcntl,compat_sys_fcntl_wrapper) /* 55 */
|
||||
NI_SYSCALL /* intel mpx syscall */
|
||||
SYSCALL(sys_setpgid,sys_setpgid,sys32_setpgid_wrapper)
|
||||
NI_SYSCALL /* old ulimit syscall */
|
||||
NI_SYSCALL /* old uname syscall */
|
||||
SYSCALL(sys_umask,sys_umask,sys32_umask_wrapper) /* 60 */
|
||||
SYSCALL(sys_chroot,sys_chroot,sys32_chroot_wrapper)
|
||||
SYSCALL(sys_ustat,sys_ustat,sys32_ustat_wrapper)
|
||||
SYSCALL(sys_dup2,sys_dup2,sys32_dup2_wrapper)
|
||||
SYSCALL(sys_getppid,sys_getppid,sys_getppid)
|
||||
SYSCALL(sys_getpgrp,sys_getpgrp,sys_getpgrp) /* 65 */
|
||||
SYSCALL(sys_setsid,sys_setsid,sys_setsid)
|
||||
SYSCALL(sys_sigaction,sys_sigaction,sys32_sigaction_wrapper)
|
||||
NI_SYSCALL /* old sgetmask syscall*/
|
||||
NI_SYSCALL /* old ssetmask syscall*/
|
||||
SYSCALL(sys_setreuid16,sys_ni_syscall,sys32_setreuid16_wrapper) /* old setreuid16 syscall */
|
||||
SYSCALL(sys_setregid16,sys_ni_syscall,sys32_setregid16_wrapper) /* old setregid16 syscall */
|
||||
SYSCALL(sys_sigsuspend,sys_sigsuspend,sys_sigsuspend_wrapper)
|
||||
SYSCALL(sys_sigpending,sys_sigpending,compat_sys_sigpending_wrapper)
|
||||
SYSCALL(sys_sethostname,sys_sethostname,sys32_sethostname_wrapper)
|
||||
SYSCALL(sys_setrlimit,sys_setrlimit,compat_sys_setrlimit_wrapper) /* 75 */
|
||||
SYSCALL(sys_old_getrlimit,sys_getrlimit,compat_sys_old_getrlimit_wrapper)
|
||||
SYSCALL(sys_getrusage,sys_getrusage,compat_sys_getrusage_wrapper)
|
||||
SYSCALL(sys_gettimeofday,sys_gettimeofday,sys32_gettimeofday_wrapper)
|
||||
SYSCALL(sys_settimeofday,sys_settimeofday,sys32_settimeofday_wrapper)
|
||||
SYSCALL(sys_getgroups16,sys_ni_syscall,sys32_getgroups16_wrapper) /* 80 old getgroups16 syscall */
|
||||
SYSCALL(sys_setgroups16,sys_ni_syscall,sys32_setgroups16_wrapper) /* old setgroups16 syscall */
|
||||
NI_SYSCALL /* old select syscall */
|
||||
SYSCALL(sys_symlink,sys_symlink,sys32_symlink_wrapper)
|
||||
NI_SYSCALL /* old lstat syscall */
|
||||
SYSCALL(sys_readlink,sys_readlink,sys32_readlink_wrapper) /* 85 */
|
||||
SYSCALL(sys_uselib,sys_uselib,sys32_uselib_wrapper)
|
||||
SYSCALL(sys_swapon,sys_swapon,sys32_swapon_wrapper)
|
||||
SYSCALL(sys_reboot,sys_reboot,sys32_reboot_wrapper)
|
||||
SYSCALL(sys_ni_syscall,sys_ni_syscall,old32_readdir_wrapper) /* old readdir syscall */
|
||||
SYSCALL(old_mmap,old_mmap,old32_mmap_wrapper) /* 90 */
|
||||
SYSCALL(sys_munmap,sys_munmap,sys32_munmap_wrapper)
|
||||
SYSCALL(sys_truncate,sys_truncate,sys32_truncate_wrapper)
|
||||
SYSCALL(sys_ftruncate,sys_ftruncate,sys32_ftruncate_wrapper)
|
||||
SYSCALL(sys_fchmod,sys_fchmod,sys32_fchmod_wrapper)
|
||||
SYSCALL(sys_fchown16,sys_ni_syscall,sys32_fchown16_wrapper) /* 95 old fchown16 syscall*/
|
||||
SYSCALL(sys_getpriority,sys_getpriority,sys32_getpriority_wrapper)
|
||||
SYSCALL(sys_setpriority,sys_setpriority,sys32_setpriority_wrapper)
|
||||
NI_SYSCALL /* old profil syscall */
|
||||
SYSCALL(sys_statfs,sys_statfs,compat_sys_statfs_wrapper)
|
||||
SYSCALL(sys_fstatfs,sys_fstatfs,compat_sys_fstatfs_wrapper) /* 100 */
|
||||
NI_SYSCALL /* ioperm for i386 */
|
||||
SYSCALL(sys_socketcall,sys_socketcall,compat_sys_socketcall_wrapper)
|
||||
SYSCALL(sys_syslog,sys_syslog,sys32_syslog_wrapper)
|
||||
SYSCALL(sys_setitimer,sys_setitimer,compat_sys_setitimer_wrapper)
|
||||
SYSCALL(sys_getitimer,sys_getitimer,compat_sys_getitimer_wrapper) /* 105 */
|
||||
SYSCALL(sys_newstat,sys_newstat,compat_sys_newstat_wrapper)
|
||||
SYSCALL(sys_newlstat,sys_newlstat,compat_sys_newlstat_wrapper)
|
||||
SYSCALL(sys_newfstat,sys_newfstat,compat_sys_newfstat_wrapper)
|
||||
NI_SYSCALL /* old uname syscall */
|
||||
SYSCALL(sys_lookup_dcookie,sys_lookup_dcookie,sys32_lookup_dcookie_wrapper) /* 110 */
|
||||
SYSCALL(sys_vhangup,sys_vhangup,sys_vhangup)
|
||||
NI_SYSCALL /* old "idle" system call */
|
||||
NI_SYSCALL /* vm86old for i386 */
|
||||
SYSCALL(sys_wait4,sys_wait4,compat_sys_wait4_wrapper)
|
||||
SYSCALL(sys_swapoff,sys_swapoff,sys32_swapoff_wrapper) /* 115 */
|
||||
SYSCALL(sys_sysinfo,sys_sysinfo,compat_sys_sysinfo_wrapper)
|
||||
SYSCALL(sys_ipc,sys_ipc,sys32_ipc_wrapper)
|
||||
SYSCALL(sys_fsync,sys_fsync,sys32_fsync_wrapper)
|
||||
SYSCALL(sys_sigreturn_glue,sys_sigreturn_glue,sys32_sigreturn_glue)
|
||||
SYSCALL(sys_clone_glue,sys_clone_glue,sys32_clone_glue) /* 120 */
|
||||
SYSCALL(sys_setdomainname,sys_setdomainname,sys32_setdomainname_wrapper)
|
||||
SYSCALL(sys_newuname,s390x_newuname,sys32_newuname_wrapper)
|
||||
NI_SYSCALL /* modify_ldt for i386 */
|
||||
SYSCALL(sys_adjtimex,sys_adjtimex,compat_sys_adjtimex_wrapper)
|
||||
SYSCALL(sys_mprotect,sys_mprotect,sys32_mprotect_wrapper) /* 125 */
|
||||
SYSCALL(sys_sigprocmask,sys_sigprocmask,compat_sys_sigprocmask_wrapper)
|
||||
NI_SYSCALL /* old "create module" */
|
||||
SYSCALL(sys_init_module,sys_init_module,sys32_init_module_wrapper)
|
||||
SYSCALL(sys_delete_module,sys_delete_module,sys32_delete_module_wrapper)
|
||||
NI_SYSCALL /* 130: old get_kernel_syms */
|
||||
SYSCALL(sys_quotactl,sys_quotactl,sys32_quotactl_wrapper)
|
||||
SYSCALL(sys_getpgid,sys_getpgid,sys32_getpgid_wrapper)
|
||||
SYSCALL(sys_fchdir,sys_fchdir,sys32_fchdir_wrapper)
|
||||
SYSCALL(sys_bdflush,sys_bdflush,sys32_bdflush_wrapper)
|
||||
SYSCALL(sys_sysfs,sys_sysfs,sys32_sysfs_wrapper) /* 135 */
|
||||
SYSCALL(sys_personality,s390x_personality,sys32_personality_wrapper)
|
||||
NI_SYSCALL /* for afs_syscall */
|
||||
SYSCALL(sys_setfsuid16,sys_ni_syscall,sys32_setfsuid16_wrapper) /* old setfsuid16 syscall */
|
||||
SYSCALL(sys_setfsgid16,sys_ni_syscall,sys32_setfsgid16_wrapper) /* old setfsgid16 syscall */
|
||||
SYSCALL(sys_llseek,sys_llseek,sys32_llseek_wrapper) /* 140 */
|
||||
SYSCALL(sys_getdents,sys_getdents,sys32_getdents_wrapper)
|
||||
SYSCALL(sys_select,sys_select,compat_sys_select_wrapper)
|
||||
SYSCALL(sys_flock,sys_flock,sys32_flock_wrapper)
|
||||
SYSCALL(sys_msync,sys_msync,sys32_msync_wrapper)
|
||||
SYSCALL(sys_readv,sys_readv,compat_sys_readv_wrapper) /* 145 */
|
||||
SYSCALL(sys_writev,sys_writev,compat_sys_writev_wrapper)
|
||||
SYSCALL(sys_getsid,sys_getsid,sys32_getsid_wrapper)
|
||||
SYSCALL(sys_fdatasync,sys_fdatasync,sys32_fdatasync_wrapper)
|
||||
SYSCALL(sys_sysctl,sys_sysctl,sys32_sysctl_wrapper)
|
||||
SYSCALL(sys_mlock,sys_mlock,sys32_mlock_wrapper) /* 150 */
|
||||
SYSCALL(sys_munlock,sys_munlock,sys32_munlock_wrapper)
|
||||
SYSCALL(sys_mlockall,sys_mlockall,sys32_mlockall_wrapper)
|
||||
SYSCALL(sys_munlockall,sys_munlockall,sys_munlockall)
|
||||
SYSCALL(sys_sched_setparam,sys_sched_setparam,sys32_sched_setparam_wrapper)
|
||||
SYSCALL(sys_sched_getparam,sys_sched_getparam,sys32_sched_getparam_wrapper) /* 155 */
|
||||
SYSCALL(sys_sched_setscheduler,sys_sched_setscheduler,sys32_sched_setscheduler_wrapper)
|
||||
SYSCALL(sys_sched_getscheduler,sys_sched_getscheduler,sys32_sched_getscheduler_wrapper)
|
||||
SYSCALL(sys_sched_yield,sys_sched_yield,sys_sched_yield)
|
||||
SYSCALL(sys_sched_get_priority_max,sys_sched_get_priority_max,sys32_sched_get_priority_max_wrapper)
|
||||
SYSCALL(sys_sched_get_priority_min,sys_sched_get_priority_min,sys32_sched_get_priority_min_wrapper) /* 160 */
|
||||
SYSCALL(sys_sched_rr_get_interval,sys_sched_rr_get_interval,sys32_sched_rr_get_interval_wrapper)
|
||||
SYSCALL(sys_nanosleep,sys_nanosleep,compat_sys_nanosleep_wrapper)
|
||||
SYSCALL(sys_mremap,sys_mremap,sys32_mremap_wrapper)
|
||||
SYSCALL(sys_setresuid16,sys_ni_syscall,sys32_setresuid16_wrapper) /* old setresuid16 syscall */
|
||||
SYSCALL(sys_getresuid16,sys_ni_syscall,sys32_getresuid16_wrapper) /* 165 old getresuid16 syscall */
|
||||
NI_SYSCALL /* for vm86 */
|
||||
NI_SYSCALL /* old sys_query_module */
|
||||
SYSCALL(sys_poll,sys_poll,sys32_poll_wrapper)
|
||||
SYSCALL(sys_nfsservctl,sys_nfsservctl,compat_sys_nfsservctl_wrapper)
|
||||
SYSCALL(sys_setresgid16,sys_ni_syscall,sys32_setresgid16_wrapper) /* 170 old setresgid16 syscall */
|
||||
SYSCALL(sys_getresgid16,sys_ni_syscall,sys32_getresgid16_wrapper) /* old getresgid16 syscall */
|
||||
SYSCALL(sys_prctl,sys_prctl,sys32_prctl_wrapper)
|
||||
SYSCALL(sys_rt_sigreturn_glue,sys_rt_sigreturn_glue,sys32_rt_sigreturn_glue)
|
||||
SYSCALL(sys_rt_sigaction,sys_rt_sigaction,sys32_rt_sigaction_wrapper)
|
||||
SYSCALL(sys_rt_sigprocmask,sys_rt_sigprocmask,sys32_rt_sigprocmask_wrapper) /* 175 */
|
||||
SYSCALL(sys_rt_sigpending,sys_rt_sigpending,sys32_rt_sigpending_wrapper)
|
||||
SYSCALL(sys_rt_sigtimedwait,sys_rt_sigtimedwait,compat_sys_rt_sigtimedwait_wrapper)
|
||||
SYSCALL(sys_rt_sigqueueinfo,sys_rt_sigqueueinfo,sys32_rt_sigqueueinfo_wrapper)
|
||||
SYSCALL(sys_rt_sigsuspend,sys_rt_sigsuspend,compat_sys_rt_sigsuspend_wrapper)
|
||||
SYSCALL(sys_pread64,sys_pread64,sys32_pread64_wrapper) /* 180 */
|
||||
SYSCALL(sys_pwrite64,sys_pwrite64,sys32_pwrite64_wrapper)
|
||||
SYSCALL(sys_chown16,sys_ni_syscall,sys32_chown16_wrapper) /* old chown16 syscall */
|
||||
SYSCALL(sys_getcwd,sys_getcwd,sys32_getcwd_wrapper)
|
||||
SYSCALL(sys_capget,sys_capget,sys32_capget_wrapper)
|
||||
SYSCALL(sys_capset,sys_capset,sys32_capset_wrapper) /* 185 */
|
||||
SYSCALL(sys_sigaltstack_glue,sys_sigaltstack_glue,sys32_sigaltstack_glue)
|
||||
SYSCALL(sys_sendfile,sys_sendfile64,sys32_sendfile_wrapper)
|
||||
NI_SYSCALL /* streams1 */
|
||||
NI_SYSCALL /* streams2 */
|
||||
SYSCALL(sys_vfork_glue,sys_vfork_glue,sys_vfork_glue) /* 190 */
|
||||
SYSCALL(sys_getrlimit,sys_getrlimit,compat_sys_getrlimit_wrapper)
|
||||
SYSCALL(sys_mmap2,sys_mmap2,sys32_mmap2_wrapper)
|
||||
SYSCALL(sys_truncate64,sys_ni_syscall,sys32_truncate64_wrapper)
|
||||
SYSCALL(sys_ftruncate64,sys_ni_syscall,sys32_ftruncate64_wrapper)
|
||||
SYSCALL(sys_stat64,sys_ni_syscall,sys32_stat64_wrapper) /* 195 */
|
||||
SYSCALL(sys_lstat64,sys_ni_syscall,sys32_lstat64_wrapper)
|
||||
SYSCALL(sys_fstat64,sys_ni_syscall,sys32_fstat64_wrapper)
|
||||
SYSCALL(sys_lchown,sys_lchown,sys32_lchown_wrapper)
|
||||
SYSCALL(sys_getuid,sys_getuid,sys_getuid)
|
||||
SYSCALL(sys_getgid,sys_getgid,sys_getgid) /* 200 */
|
||||
SYSCALL(sys_geteuid,sys_geteuid,sys_geteuid)
|
||||
SYSCALL(sys_getegid,sys_getegid,sys_getegid)
|
||||
SYSCALL(sys_setreuid,sys_setreuid,sys32_setreuid_wrapper)
|
||||
SYSCALL(sys_setregid,sys_setregid,sys32_setregid_wrapper)
|
||||
SYSCALL(sys_getgroups,sys_getgroups,sys32_getgroups_wrapper) /* 205 */
|
||||
SYSCALL(sys_setgroups,sys_setgroups,sys32_setgroups_wrapper)
|
||||
SYSCALL(sys_fchown,sys_fchown,sys32_fchown_wrapper)
|
||||
SYSCALL(sys_setresuid,sys_setresuid,sys32_setresuid_wrapper)
|
||||
SYSCALL(sys_getresuid,sys_getresuid,sys32_getresuid_wrapper)
|
||||
SYSCALL(sys_setresgid,sys_setresgid,sys32_setresgid_wrapper) /* 210 */
|
||||
SYSCALL(sys_getresgid,sys_getresgid,sys32_getresgid_wrapper)
|
||||
SYSCALL(sys_chown,sys_chown,sys32_chown_wrapper)
|
||||
SYSCALL(sys_setuid,sys_setuid,sys32_setuid_wrapper)
|
||||
SYSCALL(sys_setgid,sys_setgid,sys32_setgid_wrapper)
|
||||
SYSCALL(sys_setfsuid,sys_setfsuid,sys32_setfsuid_wrapper) /* 215 */
|
||||
SYSCALL(sys_setfsgid,sys_setfsgid,sys32_setfsgid_wrapper)
|
||||
SYSCALL(sys_pivot_root,sys_pivot_root,sys32_pivot_root_wrapper)
|
||||
SYSCALL(sys_mincore,sys_mincore,sys32_mincore_wrapper)
|
||||
SYSCALL(sys_madvise,sys_madvise,sys32_madvise_wrapper)
|
||||
SYSCALL(sys_getdents64,sys_getdents64,sys32_getdents64_wrapper) /* 220 */
|
||||
SYSCALL(sys_fcntl64,sys_ni_syscall,compat_sys_fcntl64_wrapper)
|
||||
SYSCALL(sys_readahead,sys_readahead,sys32_readahead)
|
||||
SYSCALL(sys_sendfile64,sys_ni_syscall,sys32_sendfile64)
|
||||
SYSCALL(sys_setxattr,sys_setxattr,sys32_setxattr_wrapper)
|
||||
SYSCALL(sys_lsetxattr,sys_lsetxattr,sys32_lsetxattr_wrapper) /* 225 */
|
||||
SYSCALL(sys_fsetxattr,sys_fsetxattr,sys32_fsetxattr_wrapper)
|
||||
SYSCALL(sys_getxattr,sys_getxattr,sys32_getxattr_wrapper)
|
||||
SYSCALL(sys_lgetxattr,sys_lgetxattr,sys32_lgetxattr_wrapper)
|
||||
SYSCALL(sys_fgetxattr,sys_fgetxattr,sys32_fgetxattr_wrapper)
|
||||
SYSCALL(sys_listxattr,sys_listxattr,sys32_listxattr_wrapper) /* 230 */
|
||||
SYSCALL(sys_llistxattr,sys_llistxattr,sys32_llistxattr_wrapper)
|
||||
SYSCALL(sys_flistxattr,sys_flistxattr,sys32_flistxattr_wrapper)
|
||||
SYSCALL(sys_removexattr,sys_removexattr,sys32_removexattr_wrapper)
|
||||
SYSCALL(sys_lremovexattr,sys_lremovexattr,sys32_lremovexattr_wrapper)
|
||||
SYSCALL(sys_fremovexattr,sys_fremovexattr,sys32_fremovexattr_wrapper) /* 235 */
|
||||
SYSCALL(sys_gettid,sys_gettid,sys_gettid)
|
||||
SYSCALL(sys_tkill,sys_tkill,sys_tkill)
|
||||
SYSCALL(sys_futex,sys_futex,compat_sys_futex_wrapper)
|
||||
SYSCALL(sys_sched_setaffinity,sys_sched_setaffinity,sys32_sched_setaffinity_wrapper)
|
||||
SYSCALL(sys_sched_getaffinity,sys_sched_getaffinity,sys32_sched_getaffinity_wrapper) /* 240 */
|
||||
SYSCALL(sys_tgkill,sys_tgkill,sys_tgkill)
|
||||
NI_SYSCALL /* reserved for TUX */
|
||||
SYSCALL(sys_io_setup,sys_io_setup,sys32_io_setup_wrapper)
|
||||
SYSCALL(sys_io_destroy,sys_io_destroy,sys32_io_destroy_wrapper)
|
||||
SYSCALL(sys_io_getevents,sys_io_getevents,sys32_io_getevents_wrapper) /* 245 */
|
||||
SYSCALL(sys_io_submit,sys_io_submit,sys32_io_submit_wrapper)
|
||||
SYSCALL(sys_io_cancel,sys_io_cancel,sys32_io_cancel_wrapper)
|
||||
SYSCALL(sys_exit_group,sys_exit_group,sys32_exit_group_wrapper)
|
||||
SYSCALL(sys_epoll_create,sys_epoll_create,sys_epoll_create_wrapper)
|
||||
SYSCALL(sys_epoll_ctl,sys_epoll_ctl,sys_epoll_ctl_wrapper) /* 250 */
|
||||
SYSCALL(sys_epoll_wait,sys_epoll_wait,sys_epoll_wait_wrapper)
|
||||
SYSCALL(sys_set_tid_address,sys_set_tid_address,sys32_set_tid_address_wrapper)
|
||||
SYSCALL(s390_fadvise64,sys_fadvise64_64,sys32_fadvise64_wrapper)
|
||||
SYSCALL(sys_timer_create,sys_timer_create,sys32_timer_create_wrapper)
|
||||
SYSCALL(sys_timer_settime,sys_timer_settime,sys32_timer_settime_wrapper) /* 255 */
|
||||
SYSCALL(sys_timer_gettime,sys_timer_gettime,sys32_timer_gettime_wrapper)
|
||||
SYSCALL(sys_timer_getoverrun,sys_timer_getoverrun,sys32_timer_getoverrun_wrapper)
|
||||
SYSCALL(sys_timer_delete,sys_timer_delete,sys32_timer_delete_wrapper)
|
||||
SYSCALL(sys_clock_settime,sys_clock_settime,sys32_clock_settime_wrapper)
|
||||
SYSCALL(sys_clock_gettime,sys_clock_gettime,sys32_clock_gettime_wrapper) /* 260 */
|
||||
SYSCALL(sys_clock_getres,sys_clock_getres,sys32_clock_getres_wrapper)
|
||||
SYSCALL(sys_clock_nanosleep,sys_clock_nanosleep,sys32_clock_nanosleep_wrapper)
|
||||
NI_SYSCALL /* reserved for vserver */
|
||||
SYSCALL(s390_fadvise64_64,sys_ni_syscall,sys32_fadvise64_64_wrapper)
|
||||
SYSCALL(sys_statfs64,sys_statfs64,compat_sys_statfs64_wrapper)
|
||||
SYSCALL(sys_fstatfs64,sys_fstatfs64,compat_sys_fstatfs64_wrapper)
|
||||
SYSCALL(sys_remap_file_pages,sys_remap_file_pages,sys32_remap_file_pages_wrapper)
|
||||
NI_SYSCALL /* 268 sys_mbind */
|
||||
NI_SYSCALL /* 269 sys_get_mempolicy */
|
||||
NI_SYSCALL /* 270 sys_set_mempolicy */
|
||||
SYSCALL(sys_mq_open,sys_mq_open,compat_sys_mq_open_wrapper)
|
||||
SYSCALL(sys_mq_unlink,sys_mq_unlink,sys32_mq_unlink_wrapper)
|
||||
SYSCALL(sys_mq_timedsend,sys_mq_timedsend,compat_sys_mq_timedsend_wrapper)
|
||||
SYSCALL(sys_mq_timedreceive,sys_mq_timedreceive,compat_sys_mq_timedreceive_wrapper)
|
||||
SYSCALL(sys_mq_notify,sys_mq_notify,compat_sys_mq_notify_wrapper) /* 275 */
|
||||
SYSCALL(sys_mq_getsetattr,sys_mq_getsetattr,compat_sys_mq_getsetattr_wrapper)
|
||||
SYSCALL(sys_kexec_load,sys_kexec_load,compat_sys_kexec_load_wrapper)
|
||||
SYSCALL(sys_add_key,sys_add_key,compat_sys_add_key_wrapper)
|
||||
SYSCALL(sys_request_key,sys_request_key,compat_sys_request_key_wrapper)
|
||||
SYSCALL(sys_keyctl,sys_keyctl,compat_sys_keyctl) /* 280 */
|
||||
SYSCALL(sys_waitid,sys_waitid,compat_sys_waitid_wrapper)
|
||||
SYSCALL(sys_ioprio_set,sys_ioprio_set,sys_ioprio_set_wrapper)
|
||||
SYSCALL(sys_ioprio_get,sys_ioprio_get,sys_ioprio_get_wrapper)
|
||||
SYSCALL(sys_inotify_init,sys_inotify_init,sys_inotify_init)
|
||||
SYSCALL(sys_inotify_add_watch,sys_inotify_add_watch,sys_inotify_add_watch_wrapper) /* 285 */
|
||||
SYSCALL(sys_inotify_rm_watch,sys_inotify_rm_watch,sys_inotify_rm_watch_wrapper)
|
||||
NI_SYSCALL /* 287 sys_migrate_pages */
|
||||
SYSCALL(sys_openat,sys_openat,compat_sys_openat_wrapper)
|
||||
SYSCALL(sys_mkdirat,sys_mkdirat,sys_mkdirat_wrapper)
|
||||
SYSCALL(sys_mknodat,sys_mknodat,sys_mknodat_wrapper) /* 290 */
|
||||
SYSCALL(sys_fchownat,sys_fchownat,sys_fchownat_wrapper)
|
||||
SYSCALL(sys_futimesat,sys_futimesat,compat_sys_futimesat_wrapper)
|
||||
SYSCALL(sys_fstatat64,sys_newfstatat,sys32_fstatat64_wrapper)
|
||||
SYSCALL(sys_unlinkat,sys_unlinkat,sys_unlinkat_wrapper)
|
||||
SYSCALL(sys_renameat,sys_renameat,sys_renameat_wrapper) /* 295 */
|
||||
SYSCALL(sys_linkat,sys_linkat,sys_linkat_wrapper)
|
||||
SYSCALL(sys_symlinkat,sys_symlinkat,sys_symlinkat_wrapper)
|
||||
SYSCALL(sys_readlinkat,sys_readlinkat,sys_readlinkat_wrapper)
|
||||
SYSCALL(sys_fchmodat,sys_fchmodat,sys_fchmodat_wrapper)
|
||||
SYSCALL(sys_faccessat,sys_faccessat,sys_faccessat_wrapper) /* 300 */
|
||||
SYSCALL(sys_pselect6,sys_pselect6,compat_sys_pselect6_wrapper)
|
||||
SYSCALL(sys_ppoll,sys_ppoll,compat_sys_ppoll_wrapper)
|
||||
SYSCALL(sys_unshare,sys_unshare,sys_unshare_wrapper)
|
||||
SYSCALL(sys_set_robust_list,sys_set_robust_list,compat_sys_set_robust_list_wrapper)
|
||||
SYSCALL(sys_get_robust_list,sys_get_robust_list,compat_sys_get_robust_list_wrapper)
|
||||
SYSCALL(sys_splice,sys_splice,sys_splice_wrapper)
|
||||
SYSCALL(sys_sync_file_range,sys_sync_file_range,sys_sync_file_range_wrapper)
|
||||
SYSCALL(sys_tee,sys_tee,sys_tee_wrapper)
|
||||
SYSCALL(sys_vmsplice,sys_vmsplice,compat_sys_vmsplice_wrapper)
|
||||
NI_SYSCALL /* 310 sys_move_pages */
|
||||
SYSCALL(sys_getcpu,sys_getcpu,sys_getcpu_wrapper)
|
||||
SYSCALL(sys_epoll_pwait,sys_epoll_pwait,compat_sys_epoll_pwait_wrapper)
|
||||
SYSCALL(sys_utimes,sys_utimes,compat_sys_utimes_wrapper)
|
||||
1410
arch/s390/kernel/time.c
Normal file
1410
arch/s390/kernel/time.c
Normal file
File diff suppressed because it is too large
Load Diff
746
arch/s390/kernel/traps.c
Normal file
746
arch/s390/kernel/traps.c
Normal file
@@ -0,0 +1,746 @@
|
||||
/*
|
||||
* arch/s390/kernel/traps.c
|
||||
*
|
||||
* S390 version
|
||||
* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
|
||||
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
|
||||
* Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
|
||||
*
|
||||
* Derived from "arch/i386/kernel/traps.c"
|
||||
* Copyright (C) 1991, 1992 Linus Torvalds
|
||||
*/
|
||||
|
||||
/*
|
||||
* 'Traps.c' handles hardware traps and faults after we have saved some
|
||||
* state in 'asm.s'.
|
||||
*/
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/timer.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/smp_lock.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kallsyms.h>
|
||||
#include <linux/reboot.h>
|
||||
#include <linux/kprobes.h>
|
||||
|
||||
#include <asm/system.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/io.h>
|
||||
#include <asm/atomic.h>
|
||||
#include <asm/mathemu.h>
|
||||
#include <asm/cpcmd.h>
|
||||
#include <asm/s390_ext.h>
|
||||
#include <asm/lowcore.h>
|
||||
#include <asm/debug.h>
|
||||
#include <asm/kdebug.h>
|
||||
|
||||
/* Called from entry.S only */
|
||||
extern void handle_per_exception(struct pt_regs *regs);
|
||||
|
||||
typedef void pgm_check_handler_t(struct pt_regs *, long);
|
||||
pgm_check_handler_t *pgm_check_table[128];
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
#ifdef CONFIG_PROCESS_DEBUG
|
||||
int sysctl_userprocess_debug = 1;
|
||||
#else
|
||||
int sysctl_userprocess_debug = 0;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
extern pgm_check_handler_t do_protection_exception;
|
||||
extern pgm_check_handler_t do_dat_exception;
|
||||
extern pgm_check_handler_t do_monitor_call;
|
||||
|
||||
#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
#define FOURLONG "%08lx %08lx %08lx %08lx\n"
|
||||
static int kstack_depth_to_print = 12;
|
||||
#else /* CONFIG_64BIT */
|
||||
#define FOURLONG "%016lx %016lx %016lx %016lx\n"
|
||||
static int kstack_depth_to_print = 20;
|
||||
#endif /* CONFIG_64BIT */
|
||||
|
||||
ATOMIC_NOTIFIER_HEAD(s390die_chain);
|
||||
|
||||
int register_die_notifier(struct notifier_block *nb)
|
||||
{
|
||||
return atomic_notifier_chain_register(&s390die_chain, nb);
|
||||
}
|
||||
EXPORT_SYMBOL(register_die_notifier);
|
||||
|
||||
int unregister_die_notifier(struct notifier_block *nb)
|
||||
{
|
||||
return atomic_notifier_chain_unregister(&s390die_chain, nb);
|
||||
}
|
||||
EXPORT_SYMBOL(unregister_die_notifier);
|
||||
|
||||
/*
|
||||
* For show_trace we have tree different stack to consider:
|
||||
* - the panic stack which is used if the kernel stack has overflown
|
||||
* - the asynchronous interrupt stack (cpu related)
|
||||
* - the synchronous kernel stack (process related)
|
||||
* The stack trace can start at any of the three stack and can potentially
|
||||
* touch all of them. The order is: panic stack, async stack, sync stack.
|
||||
*/
|
||||
static unsigned long
|
||||
__show_trace(unsigned long sp, unsigned long low, unsigned long high)
|
||||
{
|
||||
struct stack_frame *sf;
|
||||
struct pt_regs *regs;
|
||||
|
||||
while (1) {
|
||||
sp = sp & PSW_ADDR_INSN;
|
||||
if (sp < low || sp > high - sizeof(*sf))
|
||||
return sp;
|
||||
sf = (struct stack_frame *) sp;
|
||||
printk("([<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
|
||||
print_symbol("%s)\n", sf->gprs[8] & PSW_ADDR_INSN);
|
||||
/* Follow the backchain. */
|
||||
while (1) {
|
||||
low = sp;
|
||||
sp = sf->back_chain & PSW_ADDR_INSN;
|
||||
if (!sp)
|
||||
break;
|
||||
if (sp <= low || sp > high - sizeof(*sf))
|
||||
return sp;
|
||||
sf = (struct stack_frame *) sp;
|
||||
printk(" [<%016lx>] ", sf->gprs[8] & PSW_ADDR_INSN);
|
||||
print_symbol("%s\n", sf->gprs[8] & PSW_ADDR_INSN);
|
||||
}
|
||||
/* Zero backchain detected, check for interrupt frame. */
|
||||
sp = (unsigned long) (sf + 1);
|
||||
if (sp <= low || sp > high - sizeof(*regs))
|
||||
return sp;
|
||||
regs = (struct pt_regs *) sp;
|
||||
printk(" [<%016lx>] ", regs->psw.addr & PSW_ADDR_INSN);
|
||||
print_symbol("%s\n", regs->psw.addr & PSW_ADDR_INSN);
|
||||
low = sp;
|
||||
sp = regs->gprs[15];
|
||||
}
|
||||
}
|
||||
|
||||
void show_trace(struct task_struct *task, unsigned long *stack)
|
||||
{
|
||||
register unsigned long __r15 asm ("15");
|
||||
unsigned long sp;
|
||||
|
||||
sp = (unsigned long) stack;
|
||||
if (!sp)
|
||||
sp = task ? task->thread.ksp : __r15;
|
||||
printk("Call Trace:\n");
|
||||
#ifdef CONFIG_CHECK_STACK
|
||||
sp = __show_trace(sp, S390_lowcore.panic_stack - 4096,
|
||||
S390_lowcore.panic_stack);
|
||||
#endif
|
||||
sp = __show_trace(sp, S390_lowcore.async_stack - ASYNC_SIZE,
|
||||
S390_lowcore.async_stack);
|
||||
if (task)
|
||||
__show_trace(sp, (unsigned long) task_stack_page(task),
|
||||
(unsigned long) task_stack_page(task) + THREAD_SIZE);
|
||||
else
|
||||
__show_trace(sp, S390_lowcore.thread_info,
|
||||
S390_lowcore.thread_info + THREAD_SIZE);
|
||||
printk("\n");
|
||||
if (!task)
|
||||
task = current;
|
||||
debug_show_held_locks(task);
|
||||
}
|
||||
|
||||
void show_stack(struct task_struct *task, unsigned long *sp)
|
||||
{
|
||||
register unsigned long * __r15 asm ("15");
|
||||
unsigned long *stack;
|
||||
int i;
|
||||
|
||||
if (!sp)
|
||||
stack = task ? (unsigned long *) task->thread.ksp : __r15;
|
||||
else
|
||||
stack = sp;
|
||||
|
||||
for (i = 0; i < kstack_depth_to_print; i++) {
|
||||
if (((addr_t) stack & (THREAD_SIZE-1)) == 0)
|
||||
break;
|
||||
if (i && ((i * sizeof (long) % 32) == 0))
|
||||
printk("\n ");
|
||||
printk("%p ", (void *)*stack++);
|
||||
}
|
||||
printk("\n");
|
||||
show_trace(task, sp);
|
||||
}
|
||||
|
||||
/*
|
||||
* The architecture-independent dump_stack generator
|
||||
*/
|
||||
void dump_stack(void)
|
||||
{
|
||||
show_stack(NULL, NULL);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(dump_stack);
|
||||
|
||||
void show_registers(struct pt_regs *regs)
|
||||
{
|
||||
mm_segment_t old_fs;
|
||||
char *mode;
|
||||
int i;
|
||||
|
||||
mode = (regs->psw.mask & PSW_MASK_PSTATE) ? "User" : "Krnl";
|
||||
printk("%s PSW : %p %p",
|
||||
mode, (void *) regs->psw.mask,
|
||||
(void *) regs->psw.addr);
|
||||
print_symbol(" (%s)\n", regs->psw.addr & PSW_ADDR_INSN);
|
||||
printk("%s GPRS: " FOURLONG, mode,
|
||||
regs->gprs[0], regs->gprs[1], regs->gprs[2], regs->gprs[3]);
|
||||
printk(" " FOURLONG,
|
||||
regs->gprs[4], regs->gprs[5], regs->gprs[6], regs->gprs[7]);
|
||||
printk(" " FOURLONG,
|
||||
regs->gprs[8], regs->gprs[9], regs->gprs[10], regs->gprs[11]);
|
||||
printk(" " FOURLONG,
|
||||
regs->gprs[12], regs->gprs[13], regs->gprs[14], regs->gprs[15]);
|
||||
|
||||
#if 0
|
||||
/* FIXME: this isn't needed any more but it changes the ksymoops
|
||||
* input. To remove or not to remove ... */
|
||||
save_access_regs(regs->acrs);
|
||||
printk("%s ACRS: %08x %08x %08x %08x\n", mode,
|
||||
regs->acrs[0], regs->acrs[1], regs->acrs[2], regs->acrs[3]);
|
||||
printk(" %08x %08x %08x %08x\n",
|
||||
regs->acrs[4], regs->acrs[5], regs->acrs[6], regs->acrs[7]);
|
||||
printk(" %08x %08x %08x %08x\n",
|
||||
regs->acrs[8], regs->acrs[9], regs->acrs[10], regs->acrs[11]);
|
||||
printk(" %08x %08x %08x %08x\n",
|
||||
regs->acrs[12], regs->acrs[13], regs->acrs[14], regs->acrs[15]);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Print the first 20 byte of the instruction stream at the
|
||||
* time of the fault.
|
||||
*/
|
||||
old_fs = get_fs();
|
||||
if (regs->psw.mask & PSW_MASK_PSTATE)
|
||||
set_fs(USER_DS);
|
||||
else
|
||||
set_fs(KERNEL_DS);
|
||||
printk("%s Code: ", mode);
|
||||
for (i = 0; i < 20; i++) {
|
||||
unsigned char c;
|
||||
if (__get_user(c, (char __user *)(regs->psw.addr + i))) {
|
||||
printk(" Bad PSW.");
|
||||
break;
|
||||
}
|
||||
printk("%02x ", c);
|
||||
}
|
||||
set_fs(old_fs);
|
||||
|
||||
printk("\n");
|
||||
}
|
||||
|
||||
/* This is called from fs/proc/array.c */
|
||||
char *task_show_regs(struct task_struct *task, char *buffer)
|
||||
{
|
||||
struct pt_regs *regs;
|
||||
|
||||
regs = task_pt_regs(task);
|
||||
buffer += sprintf(buffer, "task: %p, ksp: %p\n",
|
||||
task, (void *)task->thread.ksp);
|
||||
buffer += sprintf(buffer, "User PSW : %p %p\n",
|
||||
(void *) regs->psw.mask, (void *)regs->psw.addr);
|
||||
|
||||
buffer += sprintf(buffer, "User GPRS: " FOURLONG,
|
||||
regs->gprs[0], regs->gprs[1],
|
||||
regs->gprs[2], regs->gprs[3]);
|
||||
buffer += sprintf(buffer, " " FOURLONG,
|
||||
regs->gprs[4], regs->gprs[5],
|
||||
regs->gprs[6], regs->gprs[7]);
|
||||
buffer += sprintf(buffer, " " FOURLONG,
|
||||
regs->gprs[8], regs->gprs[9],
|
||||
regs->gprs[10], regs->gprs[11]);
|
||||
buffer += sprintf(buffer, " " FOURLONG,
|
||||
regs->gprs[12], regs->gprs[13],
|
||||
regs->gprs[14], regs->gprs[15]);
|
||||
buffer += sprintf(buffer, "User ACRS: %08x %08x %08x %08x\n",
|
||||
task->thread.acrs[0], task->thread.acrs[1],
|
||||
task->thread.acrs[2], task->thread.acrs[3]);
|
||||
buffer += sprintf(buffer, " %08x %08x %08x %08x\n",
|
||||
task->thread.acrs[4], task->thread.acrs[5],
|
||||
task->thread.acrs[6], task->thread.acrs[7]);
|
||||
buffer += sprintf(buffer, " %08x %08x %08x %08x\n",
|
||||
task->thread.acrs[8], task->thread.acrs[9],
|
||||
task->thread.acrs[10], task->thread.acrs[11]);
|
||||
buffer += sprintf(buffer, " %08x %08x %08x %08x\n",
|
||||
task->thread.acrs[12], task->thread.acrs[13],
|
||||
task->thread.acrs[14], task->thread.acrs[15]);
|
||||
return buffer;
|
||||
}
|
||||
|
||||
static DEFINE_SPINLOCK(die_lock);
|
||||
|
||||
void die(const char * str, struct pt_regs * regs, long err)
|
||||
{
|
||||
static int die_counter;
|
||||
|
||||
debug_stop_all();
|
||||
console_verbose();
|
||||
spin_lock_irq(&die_lock);
|
||||
bust_spinlocks(1);
|
||||
printk("%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
|
||||
show_regs(regs);
|
||||
bust_spinlocks(0);
|
||||
spin_unlock_irq(&die_lock);
|
||||
if (in_interrupt())
|
||||
panic("Fatal exception in interrupt");
|
||||
if (panic_on_oops)
|
||||
panic("Fatal exception: panic_on_oops");
|
||||
do_exit(SIGSEGV);
|
||||
}
|
||||
|
||||
static void inline
|
||||
report_user_fault(long interruption_code, struct pt_regs *regs)
|
||||
{
|
||||
#if defined(CONFIG_SYSCTL)
|
||||
if (!sysctl_userprocess_debug)
|
||||
return;
|
||||
#endif
|
||||
#if defined(CONFIG_SYSCTL) || defined(CONFIG_PROCESS_DEBUG)
|
||||
printk("User process fault: interruption code 0x%lX\n",
|
||||
interruption_code);
|
||||
show_regs(regs);
|
||||
#endif
|
||||
}
|
||||
|
||||
static void __kprobes inline do_trap(long interruption_code, int signr,
|
||||
char *str, struct pt_regs *regs,
|
||||
siginfo_t *info)
|
||||
{
|
||||
/*
|
||||
* We got all needed information from the lowcore and can
|
||||
* now safely switch on interrupts.
|
||||
*/
|
||||
if (regs->psw.mask & PSW_MASK_PSTATE)
|
||||
local_irq_enable();
|
||||
|
||||
if (notify_die(DIE_TRAP, str, regs, interruption_code,
|
||||
interruption_code, signr) == NOTIFY_STOP)
|
||||
return;
|
||||
|
||||
if (regs->psw.mask & PSW_MASK_PSTATE) {
|
||||
struct task_struct *tsk = current;
|
||||
|
||||
tsk->thread.trap_no = interruption_code & 0xffff;
|
||||
force_sig_info(signr, info, tsk);
|
||||
report_user_fault(interruption_code, regs);
|
||||
} else {
|
||||
const struct exception_table_entry *fixup;
|
||||
fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
|
||||
if (fixup)
|
||||
regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE;
|
||||
else
|
||||
die(str, regs, interruption_code);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void __user *get_check_address(struct pt_regs *regs)
|
||||
{
|
||||
return (void __user *)((regs->psw.addr-S390_lowcore.pgm_ilc) & PSW_ADDR_INSN);
|
||||
}
|
||||
|
||||
void __kprobes do_single_step(struct pt_regs *regs)
|
||||
{
|
||||
if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0,
|
||||
SIGTRAP) == NOTIFY_STOP){
|
||||
return;
|
||||
}
|
||||
if ((current->ptrace & PT_PTRACED) != 0)
|
||||
force_sig(SIGTRAP, current);
|
||||
}
|
||||
|
||||
static void default_trap_handler(struct pt_regs * regs, long interruption_code)
|
||||
{
|
||||
if (regs->psw.mask & PSW_MASK_PSTATE) {
|
||||
local_irq_enable();
|
||||
do_exit(SIGSEGV);
|
||||
report_user_fault(interruption_code, regs);
|
||||
} else
|
||||
die("Unknown program exception", regs, interruption_code);
|
||||
}
|
||||
|
||||
#define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \
|
||||
static void name(struct pt_regs * regs, long interruption_code) \
|
||||
{ \
|
||||
siginfo_t info; \
|
||||
info.si_signo = signr; \
|
||||
info.si_errno = 0; \
|
||||
info.si_code = sicode; \
|
||||
info.si_addr = siaddr; \
|
||||
do_trap(interruption_code, signr, str, regs, &info); \
|
||||
}
|
||||
|
||||
DO_ERROR_INFO(SIGILL, "addressing exception", addressing_exception,
|
||||
ILL_ILLADR, get_check_address(regs))
|
||||
DO_ERROR_INFO(SIGILL, "execute exception", execute_exception,
|
||||
ILL_ILLOPN, get_check_address(regs))
|
||||
DO_ERROR_INFO(SIGFPE, "fixpoint divide exception", divide_exception,
|
||||
FPE_INTDIV, get_check_address(regs))
|
||||
DO_ERROR_INFO(SIGFPE, "fixpoint overflow exception", overflow_exception,
|
||||
FPE_INTOVF, get_check_address(regs))
|
||||
DO_ERROR_INFO(SIGFPE, "HFP overflow exception", hfp_overflow_exception,
|
||||
FPE_FLTOVF, get_check_address(regs))
|
||||
DO_ERROR_INFO(SIGFPE, "HFP underflow exception", hfp_underflow_exception,
|
||||
FPE_FLTUND, get_check_address(regs))
|
||||
DO_ERROR_INFO(SIGFPE, "HFP significance exception", hfp_significance_exception,
|
||||
FPE_FLTRES, get_check_address(regs))
|
||||
DO_ERROR_INFO(SIGFPE, "HFP divide exception", hfp_divide_exception,
|
||||
FPE_FLTDIV, get_check_address(regs))
|
||||
DO_ERROR_INFO(SIGFPE, "HFP square root exception", hfp_sqrt_exception,
|
||||
FPE_FLTINV, get_check_address(regs))
|
||||
DO_ERROR_INFO(SIGILL, "operand exception", operand_exception,
|
||||
ILL_ILLOPN, get_check_address(regs))
|
||||
DO_ERROR_INFO(SIGILL, "privileged operation", privileged_op,
|
||||
ILL_PRVOPC, get_check_address(regs))
|
||||
DO_ERROR_INFO(SIGILL, "special operation exception", special_op_exception,
|
||||
ILL_ILLOPN, get_check_address(regs))
|
||||
DO_ERROR_INFO(SIGILL, "translation exception", translation_exception,
|
||||
ILL_ILLOPN, get_check_address(regs))
|
||||
|
||||
static inline void
|
||||
do_fp_trap(struct pt_regs *regs, void __user *location,
|
||||
int fpc, long interruption_code)
|
||||
{
|
||||
siginfo_t si;
|
||||
|
||||
si.si_signo = SIGFPE;
|
||||
si.si_errno = 0;
|
||||
si.si_addr = location;
|
||||
si.si_code = 0;
|
||||
/* FPC[2] is Data Exception Code */
|
||||
if ((fpc & 0x00000300) == 0) {
|
||||
/* bits 6 and 7 of DXC are 0 iff IEEE exception */
|
||||
if (fpc & 0x8000) /* invalid fp operation */
|
||||
si.si_code = FPE_FLTINV;
|
||||
else if (fpc & 0x4000) /* div by 0 */
|
||||
si.si_code = FPE_FLTDIV;
|
||||
else if (fpc & 0x2000) /* overflow */
|
||||
si.si_code = FPE_FLTOVF;
|
||||
else if (fpc & 0x1000) /* underflow */
|
||||
si.si_code = FPE_FLTUND;
|
||||
else if (fpc & 0x0800) /* inexact */
|
||||
si.si_code = FPE_FLTRES;
|
||||
}
|
||||
current->thread.ieee_instruction_pointer = (addr_t) location;
|
||||
do_trap(interruption_code, SIGFPE,
|
||||
"floating point exception", regs, &si);
|
||||
}
|
||||
|
||||
static void illegal_op(struct pt_regs * regs, long interruption_code)
|
||||
{
|
||||
siginfo_t info;
|
||||
__u8 opcode[6];
|
||||
__u16 __user *location;
|
||||
int signal = 0;
|
||||
|
||||
location = get_check_address(regs);
|
||||
|
||||
/*
|
||||
* We got all needed information from the lowcore and can
|
||||
* now safely switch on interrupts.
|
||||
*/
|
||||
if (regs->psw.mask & PSW_MASK_PSTATE)
|
||||
local_irq_enable();
|
||||
|
||||
if (regs->psw.mask & PSW_MASK_PSTATE) {
|
||||
if (get_user(*((__u16 *) opcode), (__u16 __user *) location))
|
||||
return;
|
||||
if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) {
|
||||
if (current->ptrace & PT_PTRACED)
|
||||
force_sig(SIGTRAP, current);
|
||||
else
|
||||
signal = SIGILL;
|
||||
#ifdef CONFIG_MATHEMU
|
||||
} else if (opcode[0] == 0xb3) {
|
||||
if (get_user(*((__u16 *) (opcode+2)), location+1))
|
||||
return;
|
||||
signal = math_emu_b3(opcode, regs);
|
||||
} else if (opcode[0] == 0xed) {
|
||||
if (get_user(*((__u32 *) (opcode+2)),
|
||||
(__u32 __user *)(location+1)))
|
||||
return;
|
||||
signal = math_emu_ed(opcode, regs);
|
||||
} else if (*((__u16 *) opcode) == 0xb299) {
|
||||
if (get_user(*((__u16 *) (opcode+2)), location+1))
|
||||
return;
|
||||
signal = math_emu_srnm(opcode, regs);
|
||||
} else if (*((__u16 *) opcode) == 0xb29c) {
|
||||
if (get_user(*((__u16 *) (opcode+2)), location+1))
|
||||
return;
|
||||
signal = math_emu_stfpc(opcode, regs);
|
||||
} else if (*((__u16 *) opcode) == 0xb29d) {
|
||||
if (get_user(*((__u16 *) (opcode+2)), location+1))
|
||||
return;
|
||||
signal = math_emu_lfpc(opcode, regs);
|
||||
#endif
|
||||
} else
|
||||
signal = SIGILL;
|
||||
} else {
|
||||
/*
|
||||
* If we get an illegal op in kernel mode, send it through the
|
||||
* kprobes notifier. If kprobes doesn't pick it up, SIGILL
|
||||
*/
|
||||
if (notify_die(DIE_BPT, "bpt", regs, interruption_code,
|
||||
3, SIGTRAP) != NOTIFY_STOP)
|
||||
signal = SIGILL;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MATHEMU
|
||||
if (signal == SIGFPE)
|
||||
do_fp_trap(regs, location,
|
||||
current->thread.fp_regs.fpc, interruption_code);
|
||||
else if (signal == SIGSEGV) {
|
||||
info.si_signo = signal;
|
||||
info.si_errno = 0;
|
||||
info.si_code = SEGV_MAPERR;
|
||||
info.si_addr = (void __user *) location;
|
||||
do_trap(interruption_code, signal,
|
||||
"user address fault", regs, &info);
|
||||
} else
|
||||
#endif
|
||||
if (signal) {
|
||||
info.si_signo = signal;
|
||||
info.si_errno = 0;
|
||||
info.si_code = ILL_ILLOPC;
|
||||
info.si_addr = (void __user *) location;
|
||||
do_trap(interruption_code, signal,
|
||||
"illegal operation", regs, &info);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#ifdef CONFIG_MATHEMU
|
||||
asmlinkage void
|
||||
specification_exception(struct pt_regs * regs, long interruption_code)
|
||||
{
|
||||
__u8 opcode[6];
|
||||
__u16 __user *location = NULL;
|
||||
int signal = 0;
|
||||
|
||||
location = (__u16 __user *) get_check_address(regs);
|
||||
|
||||
/*
|
||||
* We got all needed information from the lowcore and can
|
||||
* now safely switch on interrupts.
|
||||
*/
|
||||
if (regs->psw.mask & PSW_MASK_PSTATE)
|
||||
local_irq_enable();
|
||||
|
||||
if (regs->psw.mask & PSW_MASK_PSTATE) {
|
||||
get_user(*((__u16 *) opcode), location);
|
||||
switch (opcode[0]) {
|
||||
case 0x28: /* LDR Rx,Ry */
|
||||
signal = math_emu_ldr(opcode);
|
||||
break;
|
||||
case 0x38: /* LER Rx,Ry */
|
||||
signal = math_emu_ler(opcode);
|
||||
break;
|
||||
case 0x60: /* STD R,D(X,B) */
|
||||
get_user(*((__u16 *) (opcode+2)), location+1);
|
||||
signal = math_emu_std(opcode, regs);
|
||||
break;
|
||||
case 0x68: /* LD R,D(X,B) */
|
||||
get_user(*((__u16 *) (opcode+2)), location+1);
|
||||
signal = math_emu_ld(opcode, regs);
|
||||
break;
|
||||
case 0x70: /* STE R,D(X,B) */
|
||||
get_user(*((__u16 *) (opcode+2)), location+1);
|
||||
signal = math_emu_ste(opcode, regs);
|
||||
break;
|
||||
case 0x78: /* LE R,D(X,B) */
|
||||
get_user(*((__u16 *) (opcode+2)), location+1);
|
||||
signal = math_emu_le(opcode, regs);
|
||||
break;
|
||||
default:
|
||||
signal = SIGILL;
|
||||
break;
|
||||
}
|
||||
} else
|
||||
signal = SIGILL;
|
||||
|
||||
if (signal == SIGFPE)
|
||||
do_fp_trap(regs, location,
|
||||
current->thread.fp_regs.fpc, interruption_code);
|
||||
else if (signal) {
|
||||
siginfo_t info;
|
||||
info.si_signo = signal;
|
||||
info.si_errno = 0;
|
||||
info.si_code = ILL_ILLOPN;
|
||||
info.si_addr = location;
|
||||
do_trap(interruption_code, signal,
|
||||
"specification exception", regs, &info);
|
||||
}
|
||||
}
|
||||
#else
|
||||
DO_ERROR_INFO(SIGILL, "specification exception", specification_exception,
|
||||
ILL_ILLOPN, get_check_address(regs));
|
||||
#endif
|
||||
|
||||
static void data_exception(struct pt_regs * regs, long interruption_code)
|
||||
{
|
||||
__u16 __user *location;
|
||||
int signal = 0;
|
||||
|
||||
location = get_check_address(regs);
|
||||
|
||||
/*
|
||||
* We got all needed information from the lowcore and can
|
||||
* now safely switch on interrupts.
|
||||
*/
|
||||
if (regs->psw.mask & PSW_MASK_PSTATE)
|
||||
local_irq_enable();
|
||||
|
||||
if (MACHINE_HAS_IEEE)
|
||||
asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc));
|
||||
|
||||
#ifdef CONFIG_MATHEMU
|
||||
else if (regs->psw.mask & PSW_MASK_PSTATE) {
|
||||
__u8 opcode[6];
|
||||
get_user(*((__u16 *) opcode), location);
|
||||
switch (opcode[0]) {
|
||||
case 0x28: /* LDR Rx,Ry */
|
||||
signal = math_emu_ldr(opcode);
|
||||
break;
|
||||
case 0x38: /* LER Rx,Ry */
|
||||
signal = math_emu_ler(opcode);
|
||||
break;
|
||||
case 0x60: /* STD R,D(X,B) */
|
||||
get_user(*((__u16 *) (opcode+2)), location+1);
|
||||
signal = math_emu_std(opcode, regs);
|
||||
break;
|
||||
case 0x68: /* LD R,D(X,B) */
|
||||
get_user(*((__u16 *) (opcode+2)), location+1);
|
||||
signal = math_emu_ld(opcode, regs);
|
||||
break;
|
||||
case 0x70: /* STE R,D(X,B) */
|
||||
get_user(*((__u16 *) (opcode+2)), location+1);
|
||||
signal = math_emu_ste(opcode, regs);
|
||||
break;
|
||||
case 0x78: /* LE R,D(X,B) */
|
||||
get_user(*((__u16 *) (opcode+2)), location+1);
|
||||
signal = math_emu_le(opcode, regs);
|
||||
break;
|
||||
case 0xb3:
|
||||
get_user(*((__u16 *) (opcode+2)), location+1);
|
||||
signal = math_emu_b3(opcode, regs);
|
||||
break;
|
||||
case 0xed:
|
||||
get_user(*((__u32 *) (opcode+2)),
|
||||
(__u32 __user *)(location+1));
|
||||
signal = math_emu_ed(opcode, regs);
|
||||
break;
|
||||
case 0xb2:
|
||||
if (opcode[1] == 0x99) {
|
||||
get_user(*((__u16 *) (opcode+2)), location+1);
|
||||
signal = math_emu_srnm(opcode, regs);
|
||||
} else if (opcode[1] == 0x9c) {
|
||||
get_user(*((__u16 *) (opcode+2)), location+1);
|
||||
signal = math_emu_stfpc(opcode, regs);
|
||||
} else if (opcode[1] == 0x9d) {
|
||||
get_user(*((__u16 *) (opcode+2)), location+1);
|
||||
signal = math_emu_lfpc(opcode, regs);
|
||||
} else
|
||||
signal = SIGILL;
|
||||
break;
|
||||
default:
|
||||
signal = SIGILL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
if (current->thread.fp_regs.fpc & FPC_DXC_MASK)
|
||||
signal = SIGFPE;
|
||||
else
|
||||
signal = SIGILL;
|
||||
if (signal == SIGFPE)
|
||||
do_fp_trap(regs, location,
|
||||
current->thread.fp_regs.fpc, interruption_code);
|
||||
else if (signal) {
|
||||
siginfo_t info;
|
||||
info.si_signo = signal;
|
||||
info.si_errno = 0;
|
||||
info.si_code = ILL_ILLOPN;
|
||||
info.si_addr = location;
|
||||
do_trap(interruption_code, signal,
|
||||
"data exception", regs, &info);
|
||||
}
|
||||
}
|
||||
|
||||
static void space_switch_exception(struct pt_regs * regs, long int_code)
|
||||
{
|
||||
siginfo_t info;
|
||||
|
||||
/* Set user psw back to home space mode. */
|
||||
if (regs->psw.mask & PSW_MASK_PSTATE)
|
||||
regs->psw.mask |= PSW_ASC_HOME;
|
||||
/* Send SIGILL. */
|
||||
info.si_signo = SIGILL;
|
||||
info.si_errno = 0;
|
||||
info.si_code = ILL_PRVOPC;
|
||||
info.si_addr = get_check_address(regs);
|
||||
do_trap(int_code, SIGILL, "space switch event", regs, &info);
|
||||
}
|
||||
|
||||
asmlinkage void kernel_stack_overflow(struct pt_regs * regs)
|
||||
{
|
||||
bust_spinlocks(1);
|
||||
printk("Kernel stack overflow.\n");
|
||||
show_regs(regs);
|
||||
bust_spinlocks(0);
|
||||
panic("Corrupt kernel stack, can't continue.");
|
||||
}
|
||||
|
||||
/* init is done in lowcore.S and head.S */
|
||||
|
||||
void __init trap_init(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 128; i++)
|
||||
pgm_check_table[i] = &default_trap_handler;
|
||||
pgm_check_table[1] = &illegal_op;
|
||||
pgm_check_table[2] = &privileged_op;
|
||||
pgm_check_table[3] = &execute_exception;
|
||||
pgm_check_table[4] = &do_protection_exception;
|
||||
pgm_check_table[5] = &addressing_exception;
|
||||
pgm_check_table[6] = &specification_exception;
|
||||
pgm_check_table[7] = &data_exception;
|
||||
pgm_check_table[8] = &overflow_exception;
|
||||
pgm_check_table[9] = ÷_exception;
|
||||
pgm_check_table[0x0A] = &overflow_exception;
|
||||
pgm_check_table[0x0B] = ÷_exception;
|
||||
pgm_check_table[0x0C] = &hfp_overflow_exception;
|
||||
pgm_check_table[0x0D] = &hfp_underflow_exception;
|
||||
pgm_check_table[0x0E] = &hfp_significance_exception;
|
||||
pgm_check_table[0x0F] = &hfp_divide_exception;
|
||||
pgm_check_table[0x10] = &do_dat_exception;
|
||||
pgm_check_table[0x11] = &do_dat_exception;
|
||||
pgm_check_table[0x12] = &translation_exception;
|
||||
pgm_check_table[0x13] = &special_op_exception;
|
||||
#ifdef CONFIG_64BIT
|
||||
pgm_check_table[0x38] = &do_dat_exception;
|
||||
pgm_check_table[0x39] = &do_dat_exception;
|
||||
pgm_check_table[0x3A] = &do_dat_exception;
|
||||
pgm_check_table[0x3B] = &do_dat_exception;
|
||||
#endif /* CONFIG_64BIT */
|
||||
pgm_check_table[0x15] = &operand_exception;
|
||||
pgm_check_table[0x1C] = &space_switch_exception;
|
||||
pgm_check_table[0x1D] = &hfp_sqrt_exception;
|
||||
pgm_check_table[0x40] = &do_monitor_call;
|
||||
pfault_irq_init();
|
||||
}
|
||||
130
arch/s390/kernel/vmlinux.lds.S
Normal file
130
arch/s390/kernel/vmlinux.lds.S
Normal file
@@ -0,0 +1,130 @@
|
||||
/* ld script to make s390 Linux kernel
|
||||
* Written by Martin Schwidefsky (schwidefsky@de.ibm.com)
|
||||
*/
|
||||
|
||||
#include <asm-generic/vmlinux.lds.h>
|
||||
|
||||
#ifndef CONFIG_64BIT
|
||||
OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
|
||||
OUTPUT_ARCH(s390)
|
||||
ENTRY(_start)
|
||||
jiffies = jiffies_64 + 4;
|
||||
#else
|
||||
OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
|
||||
OUTPUT_ARCH(s390:64-bit)
|
||||
ENTRY(_start)
|
||||
jiffies = jiffies_64;
|
||||
#endif
|
||||
|
||||
SECTIONS
|
||||
{
|
||||
. = 0x00000000;
|
||||
_text = .; /* Text and read-only data */
|
||||
.text : {
|
||||
*(.text)
|
||||
SCHED_TEXT
|
||||
LOCK_TEXT
|
||||
KPROBES_TEXT
|
||||
*(.fixup)
|
||||
*(.gnu.warning)
|
||||
} = 0x0700
|
||||
|
||||
_etext = .; /* End of text section */
|
||||
|
||||
RODATA
|
||||
|
||||
#ifdef CONFIG_SHARED_KERNEL
|
||||
. = ALIGN(1048576); /* VM shared segments are 1MB aligned */
|
||||
#endif
|
||||
|
||||
. = ALIGN(4096);
|
||||
_eshared = .; /* End of shareable data */
|
||||
|
||||
. = ALIGN(16); /* Exception table */
|
||||
__start___ex_table = .;
|
||||
__ex_table : { *(__ex_table) }
|
||||
__stop___ex_table = .;
|
||||
|
||||
.data : { /* Data */
|
||||
*(.data)
|
||||
CONSTRUCTORS
|
||||
}
|
||||
|
||||
. = ALIGN(4096);
|
||||
__nosave_begin = .;
|
||||
.data_nosave : { *(.data.nosave) }
|
||||
. = ALIGN(4096);
|
||||
__nosave_end = .;
|
||||
|
||||
. = ALIGN(4096);
|
||||
.data.page_aligned : { *(.data.idt) }
|
||||
|
||||
. = ALIGN(256);
|
||||
.data.cacheline_aligned : { *(.data.cacheline_aligned) }
|
||||
|
||||
. = ALIGN(256);
|
||||
.data.read_mostly : { *(.data.read_mostly) }
|
||||
_edata = .; /* End of data section */
|
||||
|
||||
. = ALIGN(8192); /* init_task */
|
||||
.data.init_task : { *(.data.init_task) }
|
||||
|
||||
/* will be freed after init */
|
||||
. = ALIGN(4096); /* Init code and data */
|
||||
__init_begin = .;
|
||||
.init.text : {
|
||||
_sinittext = .;
|
||||
*(.init.text)
|
||||
_einittext = .;
|
||||
}
|
||||
.init.data : { *(.init.data) }
|
||||
. = ALIGN(256);
|
||||
__setup_start = .;
|
||||
.init.setup : { *(.init.setup) }
|
||||
__setup_end = .;
|
||||
__initcall_start = .;
|
||||
.initcall.init : {
|
||||
INITCALLS
|
||||
}
|
||||
__initcall_end = .;
|
||||
__con_initcall_start = .;
|
||||
.con_initcall.init : { *(.con_initcall.init) }
|
||||
__con_initcall_end = .;
|
||||
SECURITY_INIT
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
. = ALIGN(256);
|
||||
__initramfs_start = .;
|
||||
.init.ramfs : { *(.init.initramfs) }
|
||||
. = ALIGN(2);
|
||||
__initramfs_end = .;
|
||||
#endif
|
||||
. = ALIGN(256);
|
||||
__per_cpu_start = .;
|
||||
.data.percpu : { *(.data.percpu) }
|
||||
__per_cpu_end = .;
|
||||
. = ALIGN(4096);
|
||||
__init_end = .;
|
||||
/* freed after init ends here */
|
||||
|
||||
__bss_start = .; /* BSS */
|
||||
.bss : { *(.bss) }
|
||||
. = ALIGN(2);
|
||||
__bss_stop = .;
|
||||
|
||||
_end = . ;
|
||||
|
||||
/* Sections to be discarded */
|
||||
/DISCARD/ : {
|
||||
*(.exit.text) *(.exit.data) *(.exitcall.exit)
|
||||
}
|
||||
|
||||
/* Stabs debugging sections. */
|
||||
.stab 0 : { *(.stab) }
|
||||
.stabstr 0 : { *(.stabstr) }
|
||||
.stab.excl 0 : { *(.stab.excl) }
|
||||
.stab.exclstr 0 : { *(.stab.exclstr) }
|
||||
.stab.index 0 : { *(.stab.index) }
|
||||
.stab.indexstr 0 : { *(.stab.indexstr) }
|
||||
.comment 0 : { *(.comment) }
|
||||
}
|
||||
577
arch/s390/kernel/vtime.c
Normal file
577
arch/s390/kernel/vtime.c
Normal file
@@ -0,0 +1,577 @@
|
||||
/*
|
||||
* arch/s390/kernel/vtime.c
|
||||
* Virtual cpu timer based timer functions.
|
||||
*
|
||||
* S390 version
|
||||
* Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation
|
||||
* Author(s): Jan Glauber <jan.glauber@de.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/timex.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/kernel_stat.h>
|
||||
#include <linux/rcupdate.h>
|
||||
#include <linux/posix-timers.h>
|
||||
|
||||
#include <asm/s390_ext.h>
|
||||
#include <asm/timer.h>
|
||||
#include <asm/irq_regs.h>
|
||||
|
||||
static ext_int_info_t ext_int_info_timer;
|
||||
static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
|
||||
|
||||
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
|
||||
/*
|
||||
* Update process times based on virtual cpu times stored by entry.S
|
||||
* to the lowcore fields user_timer, system_timer & steal_clock.
|
||||
*/
|
||||
void account_tick_vtime(struct task_struct *tsk)
|
||||
{
|
||||
cputime_t cputime;
|
||||
__u64 timer, clock;
|
||||
int rcu_user_flag;
|
||||
|
||||
timer = S390_lowcore.last_update_timer;
|
||||
clock = S390_lowcore.last_update_clock;
|
||||
asm volatile (" STPT %0\n" /* Store current cpu timer value */
|
||||
" STCK %1" /* Store current tod clock value */
|
||||
: "=m" (S390_lowcore.last_update_timer),
|
||||
"=m" (S390_lowcore.last_update_clock) );
|
||||
S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
|
||||
S390_lowcore.steal_clock += S390_lowcore.last_update_clock - clock;
|
||||
|
||||
cputime = S390_lowcore.user_timer >> 12;
|
||||
rcu_user_flag = cputime != 0;
|
||||
S390_lowcore.user_timer -= cputime << 12;
|
||||
S390_lowcore.steal_clock -= cputime << 12;
|
||||
account_user_time(tsk, cputime);
|
||||
|
||||
cputime = S390_lowcore.system_timer >> 12;
|
||||
S390_lowcore.system_timer -= cputime << 12;
|
||||
S390_lowcore.steal_clock -= cputime << 12;
|
||||
account_system_time(tsk, HARDIRQ_OFFSET, cputime);
|
||||
|
||||
cputime = S390_lowcore.steal_clock;
|
||||
if ((__s64) cputime > 0) {
|
||||
cputime >>= 12;
|
||||
S390_lowcore.steal_clock -= cputime << 12;
|
||||
account_steal_time(tsk, cputime);
|
||||
}
|
||||
|
||||
run_local_timers();
|
||||
if (rcu_pending(smp_processor_id()))
|
||||
rcu_check_callbacks(smp_processor_id(), rcu_user_flag);
|
||||
scheduler_tick();
|
||||
run_posix_cpu_timers(tsk);
|
||||
}
|
||||
|
||||
/*
|
||||
* Update process times based on virtual cpu times stored by entry.S
|
||||
* to the lowcore fields user_timer, system_timer & steal_clock.
|
||||
*/
|
||||
void account_vtime(struct task_struct *tsk)
|
||||
{
|
||||
cputime_t cputime;
|
||||
__u64 timer;
|
||||
|
||||
timer = S390_lowcore.last_update_timer;
|
||||
asm volatile (" STPT %0" /* Store current cpu timer value */
|
||||
: "=m" (S390_lowcore.last_update_timer) );
|
||||
S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
|
||||
|
||||
cputime = S390_lowcore.user_timer >> 12;
|
||||
S390_lowcore.user_timer -= cputime << 12;
|
||||
S390_lowcore.steal_clock -= cputime << 12;
|
||||
account_user_time(tsk, cputime);
|
||||
|
||||
cputime = S390_lowcore.system_timer >> 12;
|
||||
S390_lowcore.system_timer -= cputime << 12;
|
||||
S390_lowcore.steal_clock -= cputime << 12;
|
||||
account_system_time(tsk, 0, cputime);
|
||||
}
|
||||
|
||||
/*
|
||||
* Update process times based on virtual cpu times stored by entry.S
|
||||
* to the lowcore fields user_timer, system_timer & steal_clock.
|
||||
*/
|
||||
void account_system_vtime(struct task_struct *tsk)
|
||||
{
|
||||
cputime_t cputime;
|
||||
__u64 timer;
|
||||
|
||||
timer = S390_lowcore.last_update_timer;
|
||||
asm volatile (" STPT %0" /* Store current cpu timer value */
|
||||
: "=m" (S390_lowcore.last_update_timer) );
|
||||
S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
|
||||
|
||||
cputime = S390_lowcore.system_timer >> 12;
|
||||
S390_lowcore.system_timer -= cputime << 12;
|
||||
S390_lowcore.steal_clock -= cputime << 12;
|
||||
account_system_time(tsk, 0, cputime);
|
||||
}
|
||||
|
||||
static inline void set_vtimer(__u64 expires)
|
||||
{
|
||||
__u64 timer;
|
||||
|
||||
asm volatile (" STPT %0\n" /* Store current cpu timer value */
|
||||
" SPT %1" /* Set new value immediatly afterwards */
|
||||
: "=m" (timer) : "m" (expires) );
|
||||
S390_lowcore.system_timer += S390_lowcore.last_update_timer - timer;
|
||||
S390_lowcore.last_update_timer = expires;
|
||||
|
||||
/* store expire time for this CPU timer */
|
||||
per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires;
|
||||
}
|
||||
#else
|
||||
static inline void set_vtimer(__u64 expires)
|
||||
{
|
||||
S390_lowcore.last_update_timer = expires;
|
||||
asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer));
|
||||
|
||||
/* store expire time for this CPU timer */
|
||||
per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void start_cpu_timer(void)
|
||||
{
|
||||
struct vtimer_queue *vt_list;
|
||||
|
||||
vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
|
||||
|
||||
/* CPU timer interrupt is pending, don't reprogramm it */
|
||||
if (vt_list->idle & 1LL<<63)
|
||||
return;
|
||||
|
||||
if (!list_empty(&vt_list->list))
|
||||
set_vtimer(vt_list->idle);
|
||||
}
|
||||
|
||||
static void stop_cpu_timer(void)
|
||||
{
|
||||
struct vtimer_queue *vt_list;
|
||||
|
||||
vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
|
||||
|
||||
/* nothing to do */
|
||||
if (list_empty(&vt_list->list)) {
|
||||
vt_list->idle = VTIMER_MAX_SLICE;
|
||||
goto fire;
|
||||
}
|
||||
|
||||
/* store the actual expire value */
|
||||
asm volatile ("STPT %0" : "=m" (vt_list->idle));
|
||||
|
||||
/*
|
||||
* If the CPU timer is negative we don't reprogramm
|
||||
* it because we will get instantly an interrupt.
|
||||
*/
|
||||
if (vt_list->idle & 1LL<<63)
|
||||
return;
|
||||
|
||||
vt_list->offset += vt_list->to_expire - vt_list->idle;
|
||||
|
||||
/*
|
||||
* We cannot halt the CPU timer, we just write a value that
|
||||
* nearly never expires (only after 71 years) and re-write
|
||||
* the stored expire value if we continue the timer
|
||||
*/
|
||||
fire:
|
||||
set_vtimer(VTIMER_MAX_SLICE);
|
||||
}
|
||||
|
||||
/*
|
||||
* Sorted add to a list. List is linear searched until first bigger
|
||||
* element is found.
|
||||
*/
|
||||
static void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
|
||||
{
|
||||
struct vtimer_list *event;
|
||||
|
||||
list_for_each_entry(event, head, entry) {
|
||||
if (event->expires > timer->expires) {
|
||||
list_add_tail(&timer->entry, &event->entry);
|
||||
return;
|
||||
}
|
||||
}
|
||||
list_add_tail(&timer->entry, head);
|
||||
}
|
||||
|
||||
/*
|
||||
* Do the callback functions of expired vtimer events.
|
||||
* Called from within the interrupt handler.
|
||||
*/
|
||||
static void do_callbacks(struct list_head *cb_list)
|
||||
{
|
||||
struct vtimer_queue *vt_list;
|
||||
struct vtimer_list *event, *tmp;
|
||||
void (*fn)(unsigned long);
|
||||
unsigned long data;
|
||||
|
||||
if (list_empty(cb_list))
|
||||
return;
|
||||
|
||||
vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
|
||||
|
||||
list_for_each_entry_safe(event, tmp, cb_list, entry) {
|
||||
fn = event->function;
|
||||
data = event->data;
|
||||
fn(data);
|
||||
|
||||
if (!event->interval)
|
||||
/* delete one shot timer */
|
||||
list_del_init(&event->entry);
|
||||
else {
|
||||
/* move interval timer back to list */
|
||||
spin_lock(&vt_list->lock);
|
||||
list_del_init(&event->entry);
|
||||
list_add_sorted(event, &vt_list->list);
|
||||
spin_unlock(&vt_list->lock);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Handler for the virtual CPU timer.
|
||||
*/
|
||||
static void do_cpu_timer_interrupt(__u16 error_code)
|
||||
{
|
||||
int cpu;
|
||||
__u64 next, delta;
|
||||
struct vtimer_queue *vt_list;
|
||||
struct vtimer_list *event, *tmp;
|
||||
struct list_head *ptr;
|
||||
/* the callback queue */
|
||||
struct list_head cb_list;
|
||||
|
||||
INIT_LIST_HEAD(&cb_list);
|
||||
cpu = smp_processor_id();
|
||||
vt_list = &per_cpu(virt_cpu_timer, cpu);
|
||||
|
||||
/* walk timer list, fire all expired events */
|
||||
spin_lock(&vt_list->lock);
|
||||
|
||||
if (vt_list->to_expire < VTIMER_MAX_SLICE)
|
||||
vt_list->offset += vt_list->to_expire;
|
||||
|
||||
list_for_each_entry_safe(event, tmp, &vt_list->list, entry) {
|
||||
if (event->expires > vt_list->offset)
|
||||
/* found first unexpired event, leave */
|
||||
break;
|
||||
|
||||
/* re-charge interval timer, we have to add the offset */
|
||||
if (event->interval)
|
||||
event->expires = event->interval + vt_list->offset;
|
||||
|
||||
/* move expired timer to the callback queue */
|
||||
list_move_tail(&event->entry, &cb_list);
|
||||
}
|
||||
spin_unlock(&vt_list->lock);
|
||||
do_callbacks(&cb_list);
|
||||
|
||||
/* next event is first in list */
|
||||
spin_lock(&vt_list->lock);
|
||||
if (!list_empty(&vt_list->list)) {
|
||||
ptr = vt_list->list.next;
|
||||
event = list_entry(ptr, struct vtimer_list, entry);
|
||||
next = event->expires - vt_list->offset;
|
||||
|
||||
/* add the expired time from this interrupt handler
|
||||
* and the callback functions
|
||||
*/
|
||||
asm volatile ("STPT %0" : "=m" (delta));
|
||||
delta = 0xffffffffffffffffLL - delta + 1;
|
||||
vt_list->offset += delta;
|
||||
next -= delta;
|
||||
} else {
|
||||
vt_list->offset = 0;
|
||||
next = VTIMER_MAX_SLICE;
|
||||
}
|
||||
spin_unlock(&vt_list->lock);
|
||||
set_vtimer(next);
|
||||
}
|
||||
|
||||
void init_virt_timer(struct vtimer_list *timer)
|
||||
{
|
||||
timer->function = NULL;
|
||||
INIT_LIST_HEAD(&timer->entry);
|
||||
spin_lock_init(&timer->lock);
|
||||
}
|
||||
EXPORT_SYMBOL(init_virt_timer);
|
||||
|
||||
static inline int vtimer_pending(struct vtimer_list *timer)
|
||||
{
|
||||
return (!list_empty(&timer->entry));
|
||||
}
|
||||
|
||||
/*
|
||||
* this function should only run on the specified CPU
|
||||
*/
|
||||
static void internal_add_vtimer(struct vtimer_list *timer)
|
||||
{
|
||||
unsigned long flags;
|
||||
__u64 done;
|
||||
struct vtimer_list *event;
|
||||
struct vtimer_queue *vt_list;
|
||||
|
||||
vt_list = &per_cpu(virt_cpu_timer, timer->cpu);
|
||||
spin_lock_irqsave(&vt_list->lock, flags);
|
||||
|
||||
if (timer->cpu != smp_processor_id())
|
||||
printk("internal_add_vtimer: BUG, running on wrong CPU");
|
||||
|
||||
/* if list is empty we only have to set the timer */
|
||||
if (list_empty(&vt_list->list)) {
|
||||
/* reset the offset, this may happen if the last timer was
|
||||
* just deleted by mod_virt_timer and the interrupt
|
||||
* didn't happen until here
|
||||
*/
|
||||
vt_list->offset = 0;
|
||||
goto fire;
|
||||
}
|
||||
|
||||
/* save progress */
|
||||
asm volatile ("STPT %0" : "=m" (done));
|
||||
|
||||
/* calculate completed work */
|
||||
done = vt_list->to_expire - done + vt_list->offset;
|
||||
vt_list->offset = 0;
|
||||
|
||||
list_for_each_entry(event, &vt_list->list, entry)
|
||||
event->expires -= done;
|
||||
|
||||
fire:
|
||||
list_add_sorted(timer, &vt_list->list);
|
||||
|
||||
/* get first element, which is the next vtimer slice */
|
||||
event = list_entry(vt_list->list.next, struct vtimer_list, entry);
|
||||
|
||||
set_vtimer(event->expires);
|
||||
spin_unlock_irqrestore(&vt_list->lock, flags);
|
||||
/* release CPU acquired in prepare_vtimer or mod_virt_timer() */
|
||||
put_cpu();
|
||||
}
|
||||
|
||||
static inline int prepare_vtimer(struct vtimer_list *timer)
|
||||
{
|
||||
if (!timer->function) {
|
||||
printk("add_virt_timer: uninitialized timer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!timer->expires || timer->expires > VTIMER_MAX_SLICE) {
|
||||
printk("add_virt_timer: invalid timer expire value!\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (vtimer_pending(timer)) {
|
||||
printk("add_virt_timer: timer pending\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
timer->cpu = get_cpu();
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* add_virt_timer - add an oneshot virtual CPU timer
|
||||
*/
|
||||
void add_virt_timer(void *new)
|
||||
{
|
||||
struct vtimer_list *timer;
|
||||
|
||||
timer = (struct vtimer_list *)new;
|
||||
|
||||
if (prepare_vtimer(timer) < 0)
|
||||
return;
|
||||
|
||||
timer->interval = 0;
|
||||
internal_add_vtimer(timer);
|
||||
}
|
||||
EXPORT_SYMBOL(add_virt_timer);
|
||||
|
||||
/*
|
||||
* add_virt_timer_int - add an interval virtual CPU timer
|
||||
*/
|
||||
void add_virt_timer_periodic(void *new)
|
||||
{
|
||||
struct vtimer_list *timer;
|
||||
|
||||
timer = (struct vtimer_list *)new;
|
||||
|
||||
if (prepare_vtimer(timer) < 0)
|
||||
return;
|
||||
|
||||
timer->interval = timer->expires;
|
||||
internal_add_vtimer(timer);
|
||||
}
|
||||
EXPORT_SYMBOL(add_virt_timer_periodic);
|
||||
|
||||
/*
|
||||
* If we change a pending timer the function must be called on the CPU
|
||||
* where the timer is running on, e.g. by smp_call_function_on()
|
||||
*
|
||||
* The original mod_timer adds the timer if it is not pending. For compatibility
|
||||
* we do the same. The timer will be added on the current CPU as a oneshot timer.
|
||||
*
|
||||
* returns whether it has modified a pending timer (1) or not (0)
|
||||
*/
|
||||
int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
|
||||
{
|
||||
struct vtimer_queue *vt_list;
|
||||
unsigned long flags;
|
||||
int cpu;
|
||||
|
||||
if (!timer->function) {
|
||||
printk("mod_virt_timer: uninitialized timer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!expires || expires > VTIMER_MAX_SLICE) {
|
||||
printk("mod_virt_timer: invalid expire range\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is a common optimization triggered by the
|
||||
* networking code - if the timer is re-modified
|
||||
* to be the same thing then just return:
|
||||
*/
|
||||
if (timer->expires == expires && vtimer_pending(timer))
|
||||
return 1;
|
||||
|
||||
cpu = get_cpu();
|
||||
vt_list = &per_cpu(virt_cpu_timer, cpu);
|
||||
|
||||
/* disable interrupts before test if timer is pending */
|
||||
spin_lock_irqsave(&vt_list->lock, flags);
|
||||
|
||||
/* if timer isn't pending add it on the current CPU */
|
||||
if (!vtimer_pending(timer)) {
|
||||
spin_unlock_irqrestore(&vt_list->lock, flags);
|
||||
/* we do not activate an interval timer with mod_virt_timer */
|
||||
timer->interval = 0;
|
||||
timer->expires = expires;
|
||||
timer->cpu = cpu;
|
||||
internal_add_vtimer(timer);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* check if we run on the right CPU */
|
||||
if (timer->cpu != cpu) {
|
||||
printk("mod_virt_timer: running on wrong CPU, check your code\n");
|
||||
spin_unlock_irqrestore(&vt_list->lock, flags);
|
||||
put_cpu();
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
list_del_init(&timer->entry);
|
||||
timer->expires = expires;
|
||||
|
||||
/* also change the interval if we have an interval timer */
|
||||
if (timer->interval)
|
||||
timer->interval = expires;
|
||||
|
||||
/* the timer can't expire anymore so we can release the lock */
|
||||
spin_unlock_irqrestore(&vt_list->lock, flags);
|
||||
internal_add_vtimer(timer);
|
||||
return 1;
|
||||
}
|
||||
EXPORT_SYMBOL(mod_virt_timer);
|
||||
|
||||
/*
|
||||
* delete a virtual timer
|
||||
*
|
||||
* returns whether the deleted timer was pending (1) or not (0)
|
||||
*/
|
||||
int del_virt_timer(struct vtimer_list *timer)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct vtimer_queue *vt_list;
|
||||
|
||||
/* check if timer is pending */
|
||||
if (!vtimer_pending(timer))
|
||||
return 0;
|
||||
|
||||
vt_list = &per_cpu(virt_cpu_timer, timer->cpu);
|
||||
spin_lock_irqsave(&vt_list->lock, flags);
|
||||
|
||||
/* we don't interrupt a running timer, just let it expire! */
|
||||
list_del_init(&timer->entry);
|
||||
|
||||
/* last timer removed */
|
||||
if (list_empty(&vt_list->list)) {
|
||||
vt_list->to_expire = 0;
|
||||
vt_list->offset = 0;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&vt_list->lock, flags);
|
||||
return 1;
|
||||
}
|
||||
EXPORT_SYMBOL(del_virt_timer);
|
||||
|
||||
/*
|
||||
* Start the virtual CPU timer on the current CPU.
|
||||
*/
|
||||
void init_cpu_vtimer(void)
|
||||
{
|
||||
struct vtimer_queue *vt_list;
|
||||
|
||||
/* kick the virtual timer */
|
||||
S390_lowcore.exit_timer = VTIMER_MAX_SLICE;
|
||||
S390_lowcore.last_update_timer = VTIMER_MAX_SLICE;
|
||||
asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer));
|
||||
asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock));
|
||||
|
||||
/* enable cpu timer interrupts */
|
||||
__ctl_set_bit(0,10);
|
||||
|
||||
vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
|
||||
INIT_LIST_HEAD(&vt_list->list);
|
||||
spin_lock_init(&vt_list->lock);
|
||||
vt_list->to_expire = 0;
|
||||
vt_list->offset = 0;
|
||||
vt_list->idle = 0;
|
||||
|
||||
}
|
||||
|
||||
static int vtimer_idle_notify(struct notifier_block *self,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
switch (action) {
|
||||
case CPU_IDLE:
|
||||
stop_cpu_timer();
|
||||
break;
|
||||
case CPU_NOT_IDLE:
|
||||
start_cpu_timer();
|
||||
break;
|
||||
}
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block vtimer_idle_nb = {
|
||||
.notifier_call = vtimer_idle_notify,
|
||||
};
|
||||
|
||||
void __init vtime_init(void)
|
||||
{
|
||||
/* request the cpu timer external interrupt */
|
||||
if (register_early_external_interrupt(0x1005, do_cpu_timer_interrupt,
|
||||
&ext_int_info_timer) != 0)
|
||||
panic("Couldn't request external interrupt 0x1005");
|
||||
|
||||
if (register_idle_notifier(&vtimer_idle_nb))
|
||||
panic("Couldn't register idle notifier");
|
||||
|
||||
/* Enable cpu timer interrupts on the boot cpu. */
|
||||
init_cpu_vtimer();
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user