summaryrefslogtreecommitdiff
path: root/arch/s390/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kernel')
-rw-r--r--arch/s390/kernel/Makefile4
-rw-r--r--arch/s390/kernel/base.S150
-rw-r--r--arch/s390/kernel/binfmt_elf32.c2
-rw-r--r--arch/s390/kernel/compat_exec_domain.c5
-rw-r--r--arch/s390/kernel/compat_linux.c67
-rw-r--r--arch/s390/kernel/compat_linux.h31
-rw-r--r--arch/s390/kernel/compat_signal.c8
-rw-r--r--arch/s390/kernel/compat_wrapper.S6
-rw-r--r--arch/s390/kernel/cpcmd.c14
-rw-r--r--arch/s390/kernel/crash.c1
-rw-r--r--arch/s390/kernel/debug.c23
-rw-r--r--arch/s390/kernel/early.c306
-rw-r--r--arch/s390/kernel/ebcdic.c1
-rw-r--r--arch/s390/kernel/head31.S194
-rw-r--r--arch/s390/kernel/head64.S193
-rw-r--r--arch/s390/kernel/ipl.c106
-rw-r--r--arch/s390/kernel/irq.c15
-rw-r--r--arch/s390/kernel/kprobes.c32
-rw-r--r--arch/s390/kernel/machine_kexec.c1
-rw-r--r--arch/s390/kernel/module.c5
-rw-r--r--arch/s390/kernel/process.c4
-rw-r--r--arch/s390/kernel/profile.c20
-rw-r--r--arch/s390/kernel/ptrace.c46
-rw-r--r--arch/s390/kernel/reset.S90
-rw-r--r--arch/s390/kernel/s390_ext.c8
-rw-r--r--arch/s390/kernel/setup.c157
-rw-r--r--arch/s390/kernel/signal.c2
-rw-r--r--arch/s390/kernel/smp.c47
-rw-r--r--arch/s390/kernel/stacktrace.c10
-rw-r--r--arch/s390/kernel/syscalls.S2
-rw-r--r--arch/s390/kernel/time.c1187
-rw-r--r--arch/s390/kernel/traps.c24
-rw-r--r--arch/s390/kernel/vmlinux.lds.S16
-rw-r--r--arch/s390/kernel/vtime.c10
34 files changed, 1990 insertions, 797 deletions
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index a81881c9b297..5492d25d7d69 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -4,9 +4,9 @@
EXTRA_AFLAGS := -traditional
-obj-y := bitmap.o traps.o time.o process.o reset.o \
+obj-y := bitmap.o traps.o time.o process.o base.o early.o \
setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
- semaphore.o s390_ext.o debug.o profile.o irq.o ipl.o
+ semaphore.o s390_ext.o debug.o irq.o ipl.o
obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
new file mode 100644
index 000000000000..dc7e5259770f
--- /dev/null
+++ b/arch/s390/kernel/base.S
@@ -0,0 +1,150 @@
+/*
+ * arch/s390/kernel/base.S
+ *
+ * Copyright IBM Corp. 2006,2007
+ * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
+ * Michael Holzheu <holzheu@de.ibm.com>
+ */
+
+#include <asm/ptrace.h>
+#include <asm/lowcore.h>
+
+#ifdef CONFIG_64BIT
+
+ .globl s390_base_mcck_handler
+s390_base_mcck_handler:
+ basr %r13,0
+0: lg %r15,__LC_PANIC_STACK # load panic stack
+ aghi %r15,-STACK_FRAME_OVERHEAD
+ larl %r1,s390_base_mcck_handler_fn
+ lg %r1,0(%r1)
+ ltgr %r1,%r1
+ jz 1f
+ basr %r14,%r1
+1: la %r1,4095
+ lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)
+ lpswe __LC_MCK_OLD_PSW
+
+ .section .bss
+ .globl s390_base_mcck_handler_fn
+s390_base_mcck_handler_fn:
+ .quad 0
+ .previous
+
+ .globl s390_base_ext_handler
+s390_base_ext_handler:
+ stmg %r0,%r15,__LC_SAVE_AREA
+ basr %r13,0
+0: aghi %r15,-STACK_FRAME_OVERHEAD
+ larl %r1,s390_base_ext_handler_fn
+ lg %r1,0(%r1)
+ ltgr %r1,%r1
+ jz 1f
+ basr %r14,%r1
+1: lmg %r0,%r15,__LC_SAVE_AREA
+ ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
+ lpswe __LC_EXT_OLD_PSW
+
+ .section .bss
+ .globl s390_base_ext_handler_fn
+s390_base_ext_handler_fn:
+ .quad 0
+ .previous
+
+ .globl s390_base_pgm_handler
+s390_base_pgm_handler:
+ stmg %r0,%r15,__LC_SAVE_AREA
+ basr %r13,0
+0: aghi %r15,-STACK_FRAME_OVERHEAD
+ larl %r1,s390_base_pgm_handler_fn
+ lg %r1,0(%r1)
+ ltgr %r1,%r1
+ jz 1f
+ basr %r14,%r1
+ lmg %r0,%r15,__LC_SAVE_AREA
+ lpswe __LC_PGM_OLD_PSW
+1: lpswe disabled_wait_psw-0b(%r13)
+
+ .align 8
+disabled_wait_psw:
+ .quad 0x0002000180000000,0x0000000000000000 + s390_base_pgm_handler
+
+ .section .bss
+ .globl s390_base_pgm_handler_fn
+s390_base_pgm_handler_fn:
+ .quad 0
+ .previous
+
+#else /* CONFIG_64BIT */
+
+ .globl s390_base_mcck_handler
+s390_base_mcck_handler:
+ basr %r13,0
+0: l %r15,__LC_PANIC_STACK # load panic stack
+ ahi %r15,-STACK_FRAME_OVERHEAD
+ l %r1,2f-0b(%r13)
+ l %r1,0(%r1)
+ ltr %r1,%r1
+ jz 1f
+ basr %r14,%r1
+1: lm %r0,%r15,__LC_GPREGS_SAVE_AREA
+ lpsw __LC_MCK_OLD_PSW
+
+2: .long s390_base_mcck_handler_fn
+
+ .section .bss
+ .globl s390_base_mcck_handler_fn
+s390_base_mcck_handler_fn:
+ .long 0
+ .previous
+
+ .globl s390_base_ext_handler
+s390_base_ext_handler:
+ stm %r0,%r15,__LC_SAVE_AREA
+ basr %r13,0
+0: ahi %r15,-STACK_FRAME_OVERHEAD
+ l %r1,2f-0b(%r13)
+ l %r1,0(%r1)
+ ltr %r1,%r1
+ jz 1f
+ basr %r14,%r1
+1: lm %r0,%r15,__LC_SAVE_AREA
+ ni __LC_EXT_OLD_PSW+1,0xfd # clear wait state bit
+ lpsw __LC_EXT_OLD_PSW
+
+2: .long s390_base_ext_handler_fn
+
+ .section .bss
+ .globl s390_base_ext_handler_fn
+s390_base_ext_handler_fn:
+ .long 0
+ .previous
+
+ .globl s390_base_pgm_handler
+s390_base_pgm_handler:
+ stm %r0,%r15,__LC_SAVE_AREA
+ basr %r13,0
+0: ahi %r15,-STACK_FRAME_OVERHEAD
+ l %r1,2f-0b(%r13)
+ l %r1,0(%r1)
+ ltr %r1,%r1
+ jz 1f
+ basr %r14,%r1
+ lm %r0,%r15,__LC_SAVE_AREA
+ lpsw __LC_PGM_OLD_PSW
+
+1: lpsw disabled_wait_psw-0b(%r13)
+
+2: .long s390_base_pgm_handler_fn
+
+disabled_wait_psw:
+ .align 8
+ .long 0x000a0000,0x00000000 + s390_base_pgm_handler
+
+ .section .bss
+ .globl s390_base_pgm_handler_fn
+s390_base_pgm_handler_fn:
+ .long 0
+ .previous
+
+#endif /* CONFIG_64BIT */
diff --git a/arch/s390/kernel/binfmt_elf32.c b/arch/s390/kernel/binfmt_elf32.c
index 5c46054195cb..f1e40ca00d8d 100644
--- a/arch/s390/kernel/binfmt_elf32.c
+++ b/arch/s390/kernel/binfmt_elf32.c
@@ -192,7 +192,7 @@ MODULE_AUTHOR("Gerhard Tonn <ton@de.ibm.com>");
#undef cputime_to_timeval
#define cputime_to_timeval cputime_to_compat_timeval
-static __inline__ void
+static inline void
cputime_to_compat_timeval(const cputime_t cputime, struct compat_timeval *value)
{
value->tv_usec = cputime % 1000000;
diff --git a/arch/s390/kernel/compat_exec_domain.c b/arch/s390/kernel/compat_exec_domain.c
index 71d27c493568..914d49444f92 100644
--- a/arch/s390/kernel/compat_exec_domain.c
+++ b/arch/s390/kernel/compat_exec_domain.c
@@ -12,10 +12,9 @@
#include <linux/personality.h>
#include <linux/sched.h>
-struct exec_domain s390_exec_domain;
+static struct exec_domain s390_exec_domain;
-static int __init
-s390_init (void)
+static int __init s390_init (void)
{
s390_exec_domain.name = "Linux/s390";
s390_exec_domain.handler = NULL;
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 5b33f823863a..664c669b1856 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -69,6 +69,12 @@
#include "compat_linux.h"
+long psw_user32_bits = (PSW_BASE32_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
+ PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
+ PSW_MASK_PSTATE | PSW_DEFAULT_KEY);
+long psw32_user_bits = (PSW32_BASE_BITS | PSW32_MASK_DAT | PSW32_ASC_HOME |
+ PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK |
+ PSW32_MASK_PSTATE);
/* For this source file, we want overflow handling. */
@@ -392,51 +398,6 @@ int cp_compat_stat(struct kstat *stat, struct compat_stat __user *statbuf)
return err;
}
-struct sysinfo32 {
- s32 uptime;
- u32 loads[3];
- u32 totalram;
- u32 freeram;
- u32 sharedram;
- u32 bufferram;
- u32 totalswap;
- u32 freeswap;
- unsigned short procs;
- unsigned short pads;
- u32 totalhigh;
- u32 freehigh;
- unsigned int mem_unit;
- char _f[8];
-};
-
-asmlinkage long sys32_sysinfo(struct sysinfo32 __user *info)
-{
- struct sysinfo s;
- int ret, err;
- mm_segment_t old_fs = get_fs ();
-
- set_fs (KERNEL_DS);
- ret = sys_sysinfo((struct sysinfo __user *) &s);
- set_fs (old_fs);
- err = put_user (s.uptime, &info->uptime);
- err |= __put_user (s.loads[0], &info->loads[0]);
- err |= __put_user (s.loads[1], &info->loads[1]);
- err |= __put_user (s.loads[2], &info->loads[2]);
- err |= __put_user (s.totalram, &info->totalram);
- err |= __put_user (s.freeram, &info->freeram);
- err |= __put_user (s.sharedram, &info->sharedram);
- err |= __put_user (s.bufferram, &info->bufferram);
- err |= __put_user (s.totalswap, &info->totalswap);
- err |= __put_user (s.freeswap, &info->freeswap);
- err |= __put_user (s.procs, &info->procs);
- err |= __put_user (s.totalhigh, &info->totalhigh);
- err |= __put_user (s.freehigh, &info->freehigh);
- err |= __put_user (s.mem_unit, &info->mem_unit);
- if (err)
- return -EFAULT;
- return ret;
-}
-
asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
struct compat_timespec __user *interval)
{
@@ -445,7 +406,8 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
mm_segment_t old_fs = get_fs ();
set_fs (KERNEL_DS);
- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *) &t);
+ ret = sys_sched_rr_get_interval(pid,
+ (struct timespec __force __user *) &t);
set_fs (old_fs);
if (put_compat_timespec(&t, interval))
return -EFAULT;
@@ -472,8 +434,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
}
set_fs (KERNEL_DS);
ret = sys_rt_sigprocmask(how,
- set ? (sigset_t __user *) &s : NULL,
- oset ? (sigset_t __user *) &s : NULL,
+ set ? (sigset_t __force __user *) &s : NULL,
+ oset ? (sigset_t __force __user *) &s : NULL,
sigsetsize);
set_fs (old_fs);
if (ret) return ret;
@@ -499,7 +461,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
mm_segment_t old_fs = get_fs();
set_fs (KERNEL_DS);
- ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize);
+ ret = sys_rt_sigpending((sigset_t __force __user *) &s, sigsetsize);
set_fs (old_fs);
if (!ret) {
switch (_NSIG_WORDS) {
@@ -524,7 +486,7 @@ sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo)
if (copy_siginfo_from_user32(&info, uinfo))
return -EFAULT;
set_fs (KERNEL_DS);
- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *) &info);
+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force __user *) &info);
set_fs (old_fs);
return ret;
}
@@ -682,7 +644,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offse
set_fs(KERNEL_DS);
ret = sys_sendfile(out_fd, in_fd,
- offset ? (off_t __user *) &of : NULL, count);
+ offset ? (off_t __force __user *) &of : NULL, count);
set_fs(old_fs);
if (offset && put_user(of, offset))
@@ -703,7 +665,8 @@ asmlinkage long sys32_sendfile64(int out_fd, int in_fd,
set_fs(KERNEL_DS);
ret = sys_sendfile64(out_fd, in_fd,
- offset ? (loff_t __user *) &lof : NULL, count);
+ offset ? (loff_t __force __user *) &lof : NULL,
+ count);
set_fs(old_fs);
if (offset && put_user(lof, offset))
diff --git a/arch/s390/kernel/compat_linux.h b/arch/s390/kernel/compat_linux.h
index 1a18e29668ef..e89f8c0c42a0 100644
--- a/arch/s390/kernel/compat_linux.h
+++ b/arch/s390/kernel/compat_linux.h
@@ -115,37 +115,6 @@ typedef struct
__u32 addr;
} _psw_t32 __attribute__ ((aligned(8)));
-#define PSW32_MASK_PER 0x40000000UL
-#define PSW32_MASK_DAT 0x04000000UL
-#define PSW32_MASK_IO 0x02000000UL
-#define PSW32_MASK_EXT 0x01000000UL
-#define PSW32_MASK_KEY 0x00F00000UL
-#define PSW32_MASK_MCHECK 0x00040000UL
-#define PSW32_MASK_WAIT 0x00020000UL
-#define PSW32_MASK_PSTATE 0x00010000UL
-#define PSW32_MASK_ASC 0x0000C000UL
-#define PSW32_MASK_CC 0x00003000UL
-#define PSW32_MASK_PM 0x00000f00UL
-
-#define PSW32_ADDR_AMODE31 0x80000000UL
-#define PSW32_ADDR_INSN 0x7FFFFFFFUL
-
-#define PSW32_BASE_BITS 0x00080000UL
-
-#define PSW32_ASC_PRIMARY 0x00000000UL
-#define PSW32_ASC_ACCREG 0x00004000UL
-#define PSW32_ASC_SECONDARY 0x00008000UL
-#define PSW32_ASC_HOME 0x0000C000UL
-
-#define PSW32_USER_BITS (PSW32_BASE_BITS | PSW32_MASK_DAT | PSW32_ASC_HOME | \
- PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK | \
- PSW32_MASK_PSTATE)
-
-#define PSW32_MASK_MERGE(CURRENT,NEW) \
- (((CURRENT) & ~(PSW32_MASK_CC|PSW32_MASK_PM)) | \
- ((NEW) & (PSW32_MASK_CC|PSW32_MASK_PM)))
-
-
typedef struct
{
_psw_t32 psw;
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 861888ab8c13..887a9881d0d0 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -275,8 +275,8 @@ sys32_sigaltstack(const stack_t32 __user *uss, stack_t32 __user *uoss,
}
set_fs (KERNEL_DS);
- ret = do_sigaltstack((stack_t __user *) (uss ? &kss : NULL),
- (stack_t __user *) (uoss ? &koss : NULL),
+ ret = do_sigaltstack((stack_t __force __user *) (uss ? &kss : NULL),
+ (stack_t __force __user *) (uoss ? &koss : NULL),
regs->gprs[15]);
set_fs (old_fs);
@@ -298,7 +298,7 @@ static int save_sigregs32(struct pt_regs *regs, _sigregs32 __user *sregs)
_s390_regs_common32 regs32;
int err, i;
- regs32.psw.mask = PSW32_MASK_MERGE(PSW32_USER_BITS,
+ regs32.psw.mask = PSW32_MASK_MERGE(psw32_user_bits,
(__u32)(regs->psw.mask >> 32));
regs32.psw.addr = PSW32_ADDR_AMODE31 | (__u32) regs->psw.addr;
for (i = 0; i < NUM_GPRS; i++)
@@ -401,7 +401,7 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs)
goto badframe;
set_fs (KERNEL_DS);
- do_sigaltstack((stack_t __user *)&st, NULL, regs->gprs[15]);
+ do_sigaltstack((stack_t __force __user *)&st, NULL, regs->gprs[15]);
set_fs (old_fs);
return regs->gprs[2];
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 71e54ef0931e..97901296894e 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -517,10 +517,10 @@ sys32_swapoff_wrapper:
llgtr %r2,%r2 # const char *
jg sys_swapoff # branch to system call
- .globl sys32_sysinfo_wrapper
-sys32_sysinfo_wrapper:
+ .globl compat_sys_sysinfo_wrapper
+compat_sys_sysinfo_wrapper:
llgtr %r2,%r2 # struct sysinfo_emu31 *
- jg sys32_sysinfo # branch to system call
+ jg compat_sys_sysinfo # branch to system call
.globl sys32_ipc_wrapper
sys32_ipc_wrapper:
diff --git a/arch/s390/kernel/cpcmd.c b/arch/s390/kernel/cpcmd.c
index a5972f1541fe..6c89f30c8e31 100644
--- a/arch/s390/kernel/cpcmd.c
+++ b/arch/s390/kernel/cpcmd.c
@@ -16,6 +16,7 @@
#include <asm/ebcdic.h>
#include <asm/cpcmd.h>
#include <asm/system.h>
+#include <asm/io.h>
static DEFINE_SPINLOCK(cpcmd_lock);
static char cpcmd_buf[241];
@@ -88,13 +89,8 @@ int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
int len;
unsigned long flags;
- if ((rlen == 0) || (response == NULL)
- || !((unsigned long)response >> 31)) {
- spin_lock_irqsave(&cpcmd_lock, flags);
- len = __cpcmd(cmd, response, rlen, response_code);
- spin_unlock_irqrestore(&cpcmd_lock, flags);
- }
- else {
+ if ((virt_to_phys(response) != (unsigned long) response) ||
+ (((unsigned long)response + rlen) >> 31)) {
lowbuf = kmalloc(rlen, GFP_KERNEL | GFP_DMA);
if (!lowbuf) {
printk(KERN_WARNING
@@ -106,6 +102,10 @@ int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
spin_unlock_irqrestore(&cpcmd_lock, flags);
memcpy(response, lowbuf, rlen);
kfree(lowbuf);
+ } else {
+ spin_lock_irqsave(&cpcmd_lock, flags);
+ len = __cpcmd(cmd, response, rlen, response_code);
+ spin_unlock_irqrestore(&cpcmd_lock, flags);
}
return len;
}
diff --git a/arch/s390/kernel/crash.c b/arch/s390/kernel/crash.c
index 926cceeae0fa..8cc7c9fa64f5 100644
--- a/arch/s390/kernel/crash.c
+++ b/arch/s390/kernel/crash.c
@@ -9,6 +9,7 @@
#include <linux/threads.h>
#include <linux/kexec.h>
+#include <linux/reboot.h>
void machine_crash_shutdown(struct pt_regs *regs)
{
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index bb57bc0e3fc8..eca3fe595ff4 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -120,7 +120,7 @@ struct debug_view debug_hex_ascii_view = {
NULL
};
-struct debug_view debug_level_view = {
+static struct debug_view debug_level_view = {
"level",
&debug_prolog_level_fn,
NULL,
@@ -129,7 +129,7 @@ struct debug_view debug_level_view = {
NULL
};
-struct debug_view debug_pages_view = {
+static struct debug_view debug_pages_view = {
"pages",
&debug_prolog_pages_fn,
NULL,
@@ -138,7 +138,7 @@ struct debug_view debug_pages_view = {
NULL
};
-struct debug_view debug_flush_view = {
+static struct debug_view debug_flush_view = {
"flush",
NULL,
NULL,
@@ -156,18 +156,18 @@ struct debug_view debug_sprintf_view = {
NULL
};
-
+/* used by dump analysis tools to determine version of debug feature */
unsigned int debug_feature_version = __DEBUG_FEATURE_VERSION;
/* static globals */
static debug_info_t *debug_area_first = NULL;
static debug_info_t *debug_area_last = NULL;
-DECLARE_MUTEX(debug_lock);
+static DECLARE_MUTEX(debug_lock);
static int initialized;
-static struct file_operations debug_file_ops = {
+static const struct file_operations debug_file_ops = {
.owner = THIS_MODULE,
.read = debug_output,
.write = debug_input,
@@ -852,7 +852,6 @@ debug_finish_entry(debug_info_t * id, debug_entry_t* active, int level,
static int debug_stoppable=1;
static int debug_active=1;
-#define CTL_S390DBF 5677
#define CTL_S390DBF_STOPPABLE 5678
#define CTL_S390DBF_ACTIVE 5679
@@ -905,7 +904,7 @@ static struct ctl_table s390dbf_dir_table[] = {
{ .ctl_name = 0 }
};
-struct ctl_table_header *s390dbf_sysctl_header;
+static struct ctl_table_header *s390dbf_sysctl_header;
void
debug_stop_all(void)
@@ -1054,7 +1053,7 @@ __init debug_init(void)
{
int rc = 0;
- s390dbf_sysctl_header = register_sysctl_table(s390dbf_dir_table, 1);
+ s390dbf_sysctl_header = register_sysctl_table(s390dbf_dir_table);
down(&debug_lock);
debug_debugfs_root_entry = debugfs_create_dir(DEBUG_DIR_ROOT,NULL);
printk(KERN_INFO "debug: Initialization complete\n");
@@ -1300,8 +1299,7 @@ out:
* flushes debug areas
*/
-void
-debug_flush(debug_info_t* id, int area)
+static void debug_flush(debug_info_t* id, int area)
{
unsigned long flags;
int i,j;
@@ -1511,8 +1509,7 @@ out:
/*
* clean up module
*/
-void
-__exit debug_exit(void)
+static void __exit debug_exit(void)
{
debugfs_remove(debug_debugfs_root_entry);
unregister_sysctl_table(s390dbf_sysctl_header);
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
new file mode 100644
index 000000000000..e518dd53eff5
--- /dev/null
+++ b/arch/s390/kernel/early.c
@@ -0,0 +1,306 @@
+/*
+ * arch/s390/kernel/early.c
+ *
+ * Copyright IBM Corp. 2007
+ * Author(s): Hongjie Yang <hongjie@us.ibm.com>,
+ * Heiko Carstens <heiko.carstens@de.ibm.com>
+ */
+
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/lockdep.h>
+#include <linux/module.h>
+#include <linux/pfn.h>
+#include <linux/uaccess.h>
+#include <asm/lowcore.h>
+#include <asm/processor.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/cpcmd.h>
+#include <asm/sclp.h>
+
+/*
+ * Create a Kernel NSS if the SAVESYS= parameter is defined
+ */
+#define DEFSYS_CMD_SIZE 96
+#define SAVESYS_CMD_SIZE 32
+
+char kernel_nss_name[NSS_NAME_SIZE + 1];
+
+#ifdef CONFIG_SHARED_KERNEL
+static noinline __init void create_kernel_nss(void)
+{
+ unsigned int i, stext_pfn, eshared_pfn, end_pfn, min_size;
+#ifdef CONFIG_BLK_DEV_INITRD
+ unsigned int sinitrd_pfn, einitrd_pfn;
+#endif
+ int response;
+ char *savesys_ptr;
+ char upper_command_line[COMMAND_LINE_SIZE];
+ char defsys_cmd[DEFSYS_CMD_SIZE];
+ char savesys_cmd[SAVESYS_CMD_SIZE];
+
+ /* Do nothing if we are not running under VM */
+ if (!MACHINE_IS_VM)
+ return;
+
+ /* Convert COMMAND_LINE to upper case */
+ for (i = 0; i < strlen(COMMAND_LINE); i++)
+ upper_command_line[i] = toupper(COMMAND_LINE[i]);
+
+ savesys_ptr = strstr(upper_command_line, "SAVESYS=");
+
+ if (!savesys_ptr)
+ return;
+
+ savesys_ptr += 8; /* Point to the beginning of the NSS name */
+ for (i = 0; i < NSS_NAME_SIZE; i++) {
+ if (savesys_ptr[i] == ' ' || savesys_ptr[i] == '\0')
+ break;
+ kernel_nss_name[i] = savesys_ptr[i];
+ }
+
+ stext_pfn = PFN_DOWN(__pa(&_stext));
+ eshared_pfn = PFN_DOWN(__pa(&_eshared));
+ end_pfn = PFN_UP(__pa(&_end));
+ min_size = end_pfn << 2;
+
+ sprintf(defsys_cmd, "DEFSYS %s 00000-%.5X EW %.5X-%.5X SR %.5X-%.5X",
+ kernel_nss_name, stext_pfn - 1, stext_pfn, eshared_pfn - 1,
+ eshared_pfn, end_pfn);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (INITRD_START && INITRD_SIZE) {
+ sinitrd_pfn = PFN_DOWN(__pa(INITRD_START));
+ einitrd_pfn = PFN_UP(__pa(INITRD_START + INITRD_SIZE));
+ min_size = einitrd_pfn << 2;
+ sprintf(defsys_cmd, "%s EW %.5X-%.5X", defsys_cmd,
+ sinitrd_pfn, einitrd_pfn);
+ }
+#endif
+
+ sprintf(defsys_cmd, "%s EW MINSIZE=%.7iK", defsys_cmd, min_size);
+ sprintf(savesys_cmd, "SAVESYS %s \n IPL %s",
+ kernel_nss_name, kernel_nss_name);
+
+ __cpcmd(defsys_cmd, NULL, 0, &response);
+
+ if (response != 0)
+ return;
+
+ __cpcmd(savesys_cmd, NULL, 0, &response);
+
+ if (response != strlen(savesys_cmd))
+ return;
+
+ ipl_flags = IPL_NSS_VALID;
+}
+
+#else /* CONFIG_SHARED_KERNEL */
+
+static inline void create_kernel_nss(void) { }
+
+#endif /* CONFIG_SHARED_KERNEL */
+
+/*
+ * Clear bss memory
+ */
+static noinline __init void clear_bss_section(void)
+{
+ memset(__bss_start, 0, _end - __bss_start);
+}
+
+/*
+ * Initialize storage key for kernel pages
+ */
+static noinline __init void init_kernel_storage_key(void)
+{
+ unsigned long end_pfn, init_pfn;
+
+ end_pfn = PFN_UP(__pa(&_end));
+
+ for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++)
+ page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY);
+}
+
+static noinline __init void detect_machine_type(void)
+{
+ struct cpuinfo_S390 *cpuinfo = &S390_lowcore.cpu_data;
+
+ asm volatile("stidp %0" : "=m" (S390_lowcore.cpu_data.cpu_id));
+
+ /* Running under z/VM ? */
+ if (cpuinfo->cpu_id.version == 0xff)
+ machine_flags |= 1;
+
+ /* Running on a P/390 ? */
+ if (cpuinfo->cpu_id.machine == 0x7490)
+ machine_flags |= 4;
+}
+
+static noinline __init int memory_fast_detect(void)
+{
+
+ unsigned long val0 = 0;
+ unsigned long val1 = 0xc;
+ int ret = -ENOSYS;
+
+ if (ipl_flags & IPL_NSS_VALID)
+ return -ENOSYS;
+
+ asm volatile(
+ " diag %1,%2,0x260\n"
+ "0: lhi %0,0\n"
+ "1:\n"
+ EX_TABLE(0b,1b)
+ : "+d" (ret), "+d" (val0), "+d" (val1) : : "cc");
+
+ if (ret || val0 != val1)
+ return -ENOSYS;
+
+ memory_chunk[0].size = val0;
+ return 0;
+}
+
+#define ADDR2G (1UL << 31)
+
+static noinline __init unsigned long sclp_memory_detect(void)
+{
+ struct sclp_readinfo_sccb *sccb;
+ unsigned long long memsize;
+
+ sccb = &s390_readinfo_sccb;
+
+ if (sccb->header.response_code != 0x10)
+ return 0;
+
+ if (sccb->rnsize)
+ memsize = sccb->rnsize << 20;
+ else
+ memsize = sccb->rnsize2 << 20;
+ if (sccb->rnmax)
+ memsize *= sccb->rnmax;
+ else
+ memsize *= sccb->rnmax2;
+#ifndef CONFIG_64BIT
+ /*
+ * Can't deal with more than 2G in 31 bit addressing mode, so
+ * limit the value in order to avoid strange side effects.
+ */
+ if (memsize > ADDR2G)
+ memsize = ADDR2G;
+#endif
+ return (unsigned long) memsize;
+}
+
+static inline __init unsigned long __tprot(unsigned long addr)
+{
+ int cc = -1;
+
+ asm volatile(
+ " tprot 0(%1),0\n"
+ "0: ipm %0\n"
+ " srl %0,28\n"
+ "1:\n"
+ EX_TABLE(0b,1b)
+ : "+d" (cc) : "a" (addr) : "cc");
+ return (unsigned long)cc;
+}
+
+/* Checking memory in 128KB increments. */
+#define CHUNK_INCR (1UL << 17)
+
+static noinline __init void find_memory_chunks(unsigned long memsize)
+{
+ unsigned long addr = 0, old_addr = 0;
+ unsigned long old_cc = CHUNK_READ_WRITE;
+ unsigned long cc;
+ int chunk = 0;
+
+ while (chunk < MEMORY_CHUNKS) {
+ cc = __tprot(addr);
+ while (cc == old_cc) {
+ addr += CHUNK_INCR;
+ cc = __tprot(addr);
+#ifndef CONFIG_64BIT
+ if (addr == ADDR2G)
+ break;
+#endif
+ }
+
+ if (old_addr != addr &&
+ (old_cc == CHUNK_READ_WRITE || old_cc == CHUNK_READ_ONLY)) {
+ memory_chunk[chunk].addr = old_addr;
+ memory_chunk[chunk].size = addr - old_addr;
+ memory_chunk[chunk].type = old_cc;
+ chunk++;
+ }
+
+ old_addr = addr;
+ old_cc = cc;
+
+#ifndef CONFIG_64BIT
+ if (addr == ADDR2G)
+ break;
+#endif
+ /*
+ * Finish memory detection at the first hole, unless
+ * - we reached the hsa -> skip it.
+ * - we know there must be more.
+ */
+ if (cc == -1UL && !memsize && old_addr != ADDR2G)
+ break;
+ if (memsize && addr >= memsize)
+ break;
+ }
+}
+
+static __init void early_pgm_check_handler(void)
+{
+ unsigned long addr;
+ const struct exception_table_entry *fixup;
+
+ addr = S390_lowcore.program_old_psw.addr;
+ fixup = search_exception_tables(addr & PSW_ADDR_INSN);
+ if (!fixup)
+ disabled_wait(0);
+ S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE;
+}
+
+static noinline __init void setup_lowcore_early(void)
+{
+ psw_t psw;
+
+ psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
+ psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_ext_handler;
+ S390_lowcore.external_new_psw = psw;
+ psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
+ S390_lowcore.program_new_psw = psw;
+ s390_base_pgm_handler_fn = early_pgm_check_handler;
+}
+
+/*
+ * Save ipl parameters, clear bss memory, initialize storage keys
+ * and create a kernel NSS at startup if the SAVESYS= parm is defined
+ */
+void __init startup_init(void)
+{
+ unsigned long memsize;
+
+ ipl_save_parameters();
+ clear_bss_section();
+ init_kernel_storage_key();
+ lockdep_init();
+ lockdep_off();
+ detect_machine_type();
+ create_kernel_nss();
+ sort_main_extable();
+ setup_lowcore_early();
+ sclp_readinfo_early();
+ memsize = sclp_memory_detect();
+ if (memory_fast_detect() < 0)
+ find_memory_chunks(memsize);
+ lockdep_on();
+}
diff --git a/arch/s390/kernel/ebcdic.c b/arch/s390/kernel/ebcdic.c
index bb0f973137f0..cc0dc609d738 100644
--- a/arch/s390/kernel/ebcdic.c
+++ b/arch/s390/kernel/ebcdic.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <asm/types.h>
+#include <asm/ebcdic.h>
/*
* ASCII (IBM PC 437) -> EBCDIC 037
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
index eca507050e47..453fd3b4edea 100644
--- a/arch/s390/kernel/head31.S
+++ b/arch/s390/kernel/head31.S
@@ -51,176 +51,15 @@ startup_continue:
st %r15,__LC_KERNEL_STACK # set end of kernel stack
ahi %r15,-96
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
-
- l %r14,.Lipl_save_parameters-.LPG1(%r13)
- basr %r14,%r14
#
-# clear bss memory
+# Save ipl parameters, clear bss memory, initialize storage key for kernel pages,
+# and create a kernel NSS if the SAVESYS= parm is defined
#
- l %r2,.Lbss_bgn-.LPG1(%r13) # start of bss
- l %r3,.Lbss_end-.LPG1(%r13) # end of bss
- sr %r3,%r2 # length of bss
- sr %r4,%r4
- sr %r5,%r5 # set src,length and pad to zero
- sr %r0,%r0
- mvcle %r2,%r4,0 # clear mem
- jo .-4 # branch back, if not finish
-
- l %r2,.Lrcp-.LPG1(%r13) # Read SCP forced command word
-.Lservicecall:
- stosm .Lpmask-.LPG1(%r13),0x01 # authorize ext interrupts
-
- stctl %r0, %r0,.Lcr-.LPG1(%r13) # get cr0
- la %r1,0x200 # set bit 22
- o %r1,.Lcr-.LPG1(%r13) # or old cr0 with r1
- st %r1,.Lcr-.LPG1(%r13)
- lctl %r0, %r0,.Lcr-.LPG1(%r13) # load modified cr0
-
- mvc __LC_EXT_NEW_PSW(8),.Lpcext-.LPG1(%r13) # set postcall psw
- la %r1, .Lsclph-.LPG1(%r13)
- a %r1,__LC_EXT_NEW_PSW+4 # set handler
- st %r1,__LC_EXT_NEW_PSW+4
-
- l %r4,.Lsccbaddr-.LPG1(%r13) # %r4 is our index for sccb stuff
- lr %r1,%r4 # our sccb
- .insn rre,0xb2200000,%r2,%r1 # service call
- ipm %r1
- srl %r1,28 # get cc code
- xr %r3, %r3
- chi %r1,3
- be .Lfchunk-.LPG1(%r13) # leave
- chi %r1,2
- be .Lservicecall-.LPG1(%r13)
- lpsw .Lwaitsclp-.LPG1(%r13)
-.Lsclph:
- lh %r1,.Lsccbr-.Lsccb(%r4)
- chi %r1,0x10 # 0x0010 is the sucess code
- je .Lprocsccb # let's process the sccb
- chi %r1,0x1f0
- bne .Lfchunk-.LPG1(%r13) # unhandled error code
- c %r2, .Lrcp-.LPG1(%r13) # Did we try Read SCP forced
- bne .Lfchunk-.LPG1(%r13) # if no, give up
- l %r2, .Lrcp2-.LPG1(%r13) # try with Read SCP
- b .Lservicecall-.LPG1(%r13)
-.Lprocsccb:
- lhi %r1,0
- icm %r1,3,.Lscpincr1-.Lsccb(%r4) # use this one if != 0
- jnz .Lscnd
- lhi %r1,0x800 # otherwise report 2GB
-.Lscnd:
- lhi %r3,0x800 # limit reported memory size to 2GB
- cr %r1,%r3
- jl .Lno2gb
- lr %r1,%r3
-.Lno2gb:
- xr %r3,%r3 # same logic
- ic %r3,.Lscpa1-.Lsccb(%r4)
- chi %r3,0x00
- jne .Lcompmem
- l %r3,.Lscpa2-.Lsccb(%r4)
-.Lcompmem:
- mr %r2,%r1 # mem in MB on 128-bit
- l %r1,.Lonemb-.LPG1(%r13)
- mr %r2,%r1 # mem size in bytes in %r3
- b .Lfchunk-.LPG1(%r13)
-
- .align 4
-.Lipl_save_parameters:
- .long ipl_save_parameters
-.Linittu:
- .long init_thread_union
-.Lpmask:
- .byte 0
- .align 8
-.Lpcext:.long 0x00080000,0x80000000
-.Lcr:
- .long 0x00 # place holder for cr0
- .align 8
-.Lwaitsclp:
- .long 0x010a0000,0x80000000 + .Lsclph
-.Lrcp:
- .int 0x00120001 # Read SCP forced code
-.Lrcp2:
- .int 0x00020001 # Read SCP code
-.Lonemb:
- .int 0x100000
-.Lfchunk:
+ l %r14,.Lstartup_init-.LPG1(%r13)
+ basr %r14,%r14
-#
-# find memory chunks.
-#
- lr %r9,%r3 # end of mem
- mvc __LC_PGM_NEW_PSW(8),.Lpcmem-.LPG1(%r13)
- la %r1,1 # test in increments of 128KB
- sll %r1,17
- l %r3,.Lmchunk-.LPG1(%r13) # get pointer to memory_chunk array
- slr %r4,%r4 # set start of chunk to zero
- slr %r5,%r5 # set end of chunk to zero
- slr %r6,%r6 # set access code to zero
- la %r10,MEMORY_CHUNKS # number of chunks
-.Lloop:
- tprot 0(%r5),0 # test protection of first byte
- ipm %r7
- srl %r7,28
- clr %r6,%r7 # compare cc with last access code
- be .Lsame-.LPG1(%r13)
- lhi %r8,0 # no program checks
- b .Lsavchk-.LPG1(%r13)
-.Lsame:
- ar %r5,%r1 # add 128KB to end of chunk
- bno .Lloop-.LPG1(%r13) # r1 < 0x80000000 -> loop
-.Lchkmem: # > 2GB or tprot got a program check
- lhi %r8,1 # set program check flag
-.Lsavchk:
- clr %r4,%r5 # chunk size > 0?
- be .Lchkloop-.LPG1(%r13)
- st %r4,0(%r3) # store start address of chunk
- lr %r0,%r5
- slr %r0,%r4
- st %r0,4(%r3) # store size of chunk
- st %r6,8(%r3) # store type of chunk
- la %r3,12(%r3)
- ahi %r10,-1 # update chunk number
-.Lchkloop:
- lr %r6,%r7 # set access code to last cc
- # we got an exception or we're starting a new
- # chunk , we must check if we should
- # still try to find valid memory (if we detected
- # the amount of available storage), and if we
- # have chunks left
- xr %r0,%r0
- clr %r0,%r9 # did we detect memory?
- je .Ldonemem # if not, leave
- chi %r10,0 # do we have chunks left?
- je .Ldonemem
- chi %r8,1 # program check ?
- je .Lpgmchk
- lr %r4,%r5 # potential new chunk
- alr %r5,%r1 # add 128KB to end of chunk
- j .Llpcnt
-.Lpgmchk:
- alr %r5,%r1 # add 128KB to end of chunk
- lr %r4,%r5 # potential new chunk
-.Llpcnt:
- clr %r5,%r9 # should we go on?
- jl .Lloop
-.Ldonemem:
l %r12,.Lmflags-.LPG1(%r13) # get address of machine_flags
#
-# find out if we are running under VM
-#
- stidp __LC_CPUID # store cpuid
- tm __LC_CPUID,0xff # running under VM ?
- bno .Lnovm-.LPG1(%r13)
- oi 3(%r12),1 # set VM flag
-.Lnovm:
- lh %r0,__LC_CPUID+4 # get cpu version
- chi %r0,0x7490 # running on a P/390 ?
- bne .Lnop390-.LPG1(%r13)
- oi 3(%r12),4 # set P/390 flag
-.Lnop390:
-
-#
# find out if we have an IEEE fpu
#
mvc __LC_PGM_NEW_PSW(8),.Lpcfpu-.LPG1(%r13)
@@ -295,7 +134,6 @@ startup_continue:
.long 0 # cr15: linkage stack operations
.Lduct: .long 0,0,0,0,0,0,0,0
.long 0,0,0,0,0,0,0,0
-.Lpcmem:.long 0x00080000,0x80000000 + .Lchkmem
.Lpcfpu:.long 0x00080000,0x80000000 + .Lchkfpu
.Lpccsp:.long 0x00080000,0x80000000 + .Lchkcsp
.Lpcmvpg:.long 0x00080000,0x80000000 + .Lchkmvpg
@@ -306,7 +144,9 @@ startup_continue:
.Lbss_bgn: .long __bss_start
.Lbss_end: .long _end
.Lparmaddr: .long PARMAREA
-.Lsccbaddr: .long .Lsccb
+.Linittu: .long init_thread_union
+.Lstartup_init:
+ .long startup_init
.globl ipl_schib
ipl_schib:
@@ -322,26 +162,6 @@ ipl_devno:
.word 0
.org 0x12000
-.globl s390_readinfo_sccb
-s390_readinfo_sccb:
-.Lsccb:
- .hword 0x1000 # length, one page
- .byte 0x00,0x00,0x00
- .byte 0x80 # variable response bit set
-.Lsccbr:
- .hword 0x00 # response code
-.Lscpincr1:
- .hword 0x00
-.Lscpa1:
- .byte 0x00
- .fill 89,1,0
-.Lscpa2:
- .int 0x00
-.Lscpincr2:
- .quad 0x00
- .fill 3984,1,0
- .org 0x13000
-
#ifdef CONFIG_SHARED_KERNEL
.org 0x100000
#endif
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 6ba3f4512dd1..b8fec4e5c5d4 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -58,183 +58,15 @@ startup_continue:
stg %r15,__LC_KERNEL_STACK # set end of kernel stack
aghi %r15,-160
xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
-
- brasl %r14,ipl_save_parameters
#
-# clear bss memory
+# Save ipl parameters, clear bss memory, initialize storage key for kernel pages,
+# and create a kernel NSS if the SAVESYS= parm is defined
#
- larl %r2,__bss_start # start of bss segment
- larl %r3,_end # end of bss segment
- sgr %r3,%r2 # length of bss
- sgr %r4,%r4 #
- sgr %r5,%r5 # set src,length and pad to zero
- mvcle %r2,%r4,0 # clear mem
- jo .-4 # branch back, if not finish
+ brasl %r14,startup_init
# set program check new psw mask
mvc __LC_PGM_NEW_PSW(8),.Lpcmsk-.LPG1(%r13)
- larl %r1,.Lslowmemdetect # set program check address
- stg %r1,__LC_PGM_NEW_PSW+8
- lghi %r1,0xc
- diag %r0,%r1,0x260 # get memory size of virtual machine
- cgr %r0,%r1 # different? -> old detection routine
- jne .Lslowmemdetect
- aghi %r1,1 # size is one more than end
- larl %r2,memory_chunk
- stg %r1,8(%r2) # store size of chunk
- j .Ldonemem
-
-.Lslowmemdetect:
- l %r2,.Lrcp-.LPG1(%r13) # Read SCP forced command word
-.Lservicecall:
- stosm .Lpmask-.LPG1(%r13),0x01 # authorize ext interrupts
-
- stctg %r0,%r0,.Lcr-.LPG1(%r13) # get cr0
- la %r1,0x200 # set bit 22
- og %r1,.Lcr-.LPG1(%r13) # or old cr0 with r1
- stg %r1,.Lcr-.LPG1(%r13)
- lctlg %r0,%r0,.Lcr-.LPG1(%r13) # load modified cr0
-
- mvc __LC_EXT_NEW_PSW(8),.Lpcmsk-.LPG1(%r13) # set postcall psw
- larl %r1,.Lsclph
- stg %r1,__LC_EXT_NEW_PSW+8 # set handler
-
- larl %r4,.Lsccb # %r4 is our index for sccb stuff
- lgr %r1,%r4 # our sccb
- .insn rre,0xb2200000,%r2,%r1 # service call
- ipm %r1
- srl %r1,28 # get cc code
- xr %r3,%r3
- chi %r1,3
- be .Lfchunk-.LPG1(%r13) # leave
- chi %r1,2
- be .Lservicecall-.LPG1(%r13)
- lpswe .Lwaitsclp-.LPG1(%r13)
-.Lsclph:
- lh %r1,.Lsccbr-.Lsccb(%r4)
- chi %r1,0x10 # 0x0010 is the sucess code
- je .Lprocsccb # let's process the sccb
- chi %r1,0x1f0
- bne .Lfchunk-.LPG1(%r13) # unhandled error code
- c %r2,.Lrcp-.LPG1(%r13) # Did we try Read SCP forced
- bne .Lfchunk-.LPG1(%r13) # if no, give up
- l %r2,.Lrcp2-.LPG1(%r13) # try with Read SCP
- b .Lservicecall-.LPG1(%r13)
-.Lprocsccb:
- lghi %r1,0
- icm %r1,3,.Lscpincr1-.Lsccb(%r4) # use this one if != 0
- jnz .Lscnd
- lg %r1,.Lscpincr2-.Lsccb(%r4) # otherwise use this one
-.Lscnd:
- xr %r3,%r3 # same logic
- ic %r3,.Lscpa1-.Lsccb(%r4)
- chi %r3,0x00
- jne .Lcompmem
- l %r3,.Lscpa2-.Lsccb(%r4)
-.Lcompmem:
- mlgr %r2,%r1 # mem in MB on 128-bit
- l %r1,.Lonemb-.LPG1(%r13)
- mlgr %r2,%r1 # mem size in bytes in %r3
- b .Lfchunk-.LPG1(%r13)
-
- .align 4
-.Lpmask:
- .byte 0
- .align 8
-.Lcr:
- .quad 0x00 # place holder for cr0
-.Lwaitsclp:
- .quad 0x0102000180000000,.Lsclph
-.Lrcp:
- .int 0x00120001 # Read SCP forced code
-.Lrcp2:
- .int 0x00020001 # Read SCP code
-.Lonemb:
- .int 0x100000
-
-.Lfchunk:
-
-#
-# find memory chunks.
-#
- lgr %r9,%r3 # end of mem
- larl %r1,.Lchkmem # set program check address
- stg %r1,__LC_PGM_NEW_PSW+8
- la %r1,1 # test in increments of 128KB
- sllg %r1,%r1,17
- larl %r3,memory_chunk
- slgr %r4,%r4 # set start of chunk to zero
- slgr %r5,%r5 # set end of chunk to zero
- slr %r6,%r6 # set access code to zero
- la %r10,MEMORY_CHUNKS # number of chunks
-.Lloop:
- tprot 0(%r5),0 # test protection of first byte
- ipm %r7
- srl %r7,28
- clr %r6,%r7 # compare cc with last access code
- je .Lsame
- lghi %r8,0 # no program checks
- j .Lsavchk
-.Lsame:
- algr %r5,%r1 # add 128KB to end of chunk
- # no need to check here,
- brc 12,.Lloop # this is the same chunk
-.Lchkmem: # > 16EB or tprot got a program check
- lghi %r8,1 # set program check flag
-.Lsavchk:
- clgr %r4,%r5 # chunk size > 0?
- je .Lchkloop
- stg %r4,0(%r3) # store start address of chunk
- lgr %r0,%r5
- slgr %r0,%r4
- stg %r0,8(%r3) # store size of chunk
- st %r6,20(%r3) # store type of chunk
- la %r3,24(%r3)
- ahi %r10,-1 # update chunk number
-.Lchkloop:
- lr %r6,%r7 # set access code to last cc
- # we got an exception or we're starting a new
- # chunk , we must check if we should
- # still try to find valid memory (if we detected
- # the amount of available storage), and if we
- # have chunks left
- lghi %r4,1
- sllg %r4,%r4,31
- clgr %r5,%r4
- je .Lhsaskip
- xr %r0, %r0
- clgr %r0, %r9 # did we detect memory?
- je .Ldonemem # if not, leave
- chi %r10, 0 # do we have chunks left?
- je .Ldonemem
-.Lhsaskip:
- chi %r8,1 # program check ?
- je .Lpgmchk
- lgr %r4,%r5 # potential new chunk
- algr %r5,%r1 # add 128KB to end of chunk
- j .Llpcnt
-.Lpgmchk:
- algr %r5,%r1 # add 128KB to end of chunk
- lgr %r4,%r5 # potential new chunk
-.Llpcnt:
- clgr %r5,%r9 # should we go on?
- jl .Lloop
-.Ldonemem:
-
larl %r12,machine_flags
#
-# find out if we are running under VM
-#
- stidp __LC_CPUID # store cpuid
- tm __LC_CPUID,0xff # running under VM ?
- bno 0f-.LPG1(%r13)
- oi 7(%r12),1 # set VM flag
-0: lh %r0,__LC_CPUID+4 # get cpu version
- chi %r0,0x7490 # running on a P/390 ?
- bne 1f-.LPG1(%r13)
- oi 7(%r12),4 # set P/390 flag
-1:
-
-#
# find out if we have the MVPG instruction
#
la %r1,0f-.LPG1(%r13) # set program check address
@@ -336,25 +168,6 @@ ipl_devno:
.word 0
.org 0x12000
-.globl s390_readinfo_sccb
-s390_readinfo_sccb:
-.Lsccb:
- .hword 0x1000 # length, one page
- .byte 0x00,0x00,0x00
- .byte 0x80 # variable response bit set
-.Lsccbr:
- .hword 0x00 # response code
-.Lscpincr1:
- .hword 0x00
-.Lscpa1:
- .byte 0x00
- .fill 89,1,0
-.Lscpa2:
- .int 0x00
-.Lscpincr2:
- .quad 0x00
- .fill 3984,1,0
- .org 0x13000
#ifdef CONFIG_SHARED_KERNEL
.org 0x100000
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 9e9972e8a52b..052259530651 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -20,26 +20,27 @@
#include <asm/cio.h>
#include <asm/ebcdic.h>
#include <asm/reset.h>
+#include <asm/sclp.h>
#define IPL_PARM_BLOCK_VERSION 0
-#define LOADPARM_LEN 8
-extern char s390_readinfo_sccb[];
-#define SCCB_VALID (*((__u16*)&s390_readinfo_sccb[6]) == 0x0010)
-#define SCCB_LOADPARM (&s390_readinfo_sccb[24])
-#define SCCB_FLAG (s390_readinfo_sccb[91])
+#define SCCB_VALID (s390_readinfo_sccb.header.response_code == 0x10)
+#define SCCB_LOADPARM (&s390_readinfo_sccb.loadparm)
+#define SCCB_FLAG (s390_readinfo_sccb.flags)
enum ipl_type {
IPL_TYPE_NONE = 1,
IPL_TYPE_UNKNOWN = 2,
IPL_TYPE_CCW = 4,
IPL_TYPE_FCP = 8,
+ IPL_TYPE_NSS = 16,
};
#define IPL_NONE_STR "none"
#define IPL_UNKNOWN_STR "unknown"
#define IPL_CCW_STR "ccw"
#define IPL_FCP_STR "fcp"
+#define IPL_NSS_STR "nss"
static char *ipl_type_str(enum ipl_type type)
{
@@ -50,6 +51,8 @@ static char *ipl_type_str(enum ipl_type type)
return IPL_CCW_STR;
case IPL_TYPE_FCP:
return IPL_FCP_STR;
+ case IPL_TYPE_NSS:
+ return IPL_NSS_STR;
case IPL_TYPE_UNKNOWN:
default:
return IPL_UNKNOWN_STR;
@@ -64,6 +67,7 @@ enum ipl_method {
IPL_METHOD_FCP_RO_DIAG,
IPL_METHOD_FCP_RW_DIAG,
IPL_METHOD_FCP_RO_VM,
+ IPL_METHOD_NSS,
};
enum shutdown_action {
@@ -114,11 +118,14 @@ enum diag308_rc {
static int diag308_set_works = 0;
static int reipl_capabilities = IPL_TYPE_UNKNOWN;
+
static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN;
static enum ipl_method reipl_method = IPL_METHOD_NONE;
static struct ipl_parameter_block *reipl_block_fcp;
static struct ipl_parameter_block *reipl_block_ccw;
+static char reipl_nss_name[NSS_NAME_SIZE + 1];
+
static int dump_capabilities = IPL_TYPE_NONE;
static enum ipl_type dump_type = IPL_TYPE_NONE;
static enum ipl_method dump_method = IPL_METHOD_NONE;
@@ -173,6 +180,24 @@ static struct subsys_attribute sys_##_prefix##_##_name##_attr = \
sys_##_prefix##_##_name##_show, \
sys_##_prefix##_##_name##_store);
+#define DEFINE_IPL_ATTR_STR_RW(_prefix, _name, _fmt_out, _fmt_in, _value)\
+static ssize_t sys_##_prefix##_##_name##_show(struct subsystem *subsys, \
+ char *page) \
+{ \
+ return sprintf(page, _fmt_out, _value); \
+} \
+static ssize_t sys_##_prefix##_##_name##_store(struct subsystem *subsys,\
+ const char *buf, size_t len) \
+{ \
+ if (sscanf(buf, _fmt_in, _value) != 1) \
+ return -EINVAL; \
+ return len; \
+} \
+static struct subsys_attribute sys_##_prefix##_##_name##_attr = \
+ __ATTR(_name,(S_IRUGO | S_IWUSR), \
+ sys_##_prefix##_##_name##_show, \
+ sys_##_prefix##_##_name##_store);
+
static void make_attrs_ro(struct attribute **attrs)
{
while (*attrs) {
@@ -189,6 +214,8 @@ static enum ipl_type ipl_get_type(void)
{
struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
+ if (ipl_flags & IPL_NSS_VALID)
+ return IPL_TYPE_NSS;
if (!(ipl_flags & IPL_DEVNO_VALID))
return IPL_TYPE_UNKNOWN;
if (!(ipl_flags & IPL_PARMBLOCK_VALID))
@@ -324,6 +351,20 @@ static struct attribute_group ipl_ccw_attr_group = {
.attrs = ipl_ccw_attrs,
};
+/* NSS ipl device attributes */
+
+DEFINE_IPL_ATTR_RO(ipl_nss, name, "%s\n", kernel_nss_name);
+
+static struct attribute *ipl_nss_attrs[] = {
+ &sys_ipl_type_attr.attr,
+ &sys_ipl_nss_name_attr.attr,
+ NULL,
+};
+
+static struct attribute_group ipl_nss_attr_group = {
+ .attrs = ipl_nss_attrs,
+};
+
/* UNKNOWN ipl device attributes */
static struct attribute *ipl_unknown_attrs[] = {
@@ -432,6 +473,21 @@ static struct attribute_group reipl_ccw_attr_group = {
.attrs = reipl_ccw_attrs,
};
+
+/* NSS reipl device attributes */
+
+DEFINE_IPL_ATTR_STR_RW(reipl_nss, name, "%s\n", "%s\n", reipl_nss_name);
+
+static struct attribute *reipl_nss_attrs[] = {
+ &sys_reipl_nss_name_attr.attr,
+ NULL,
+};
+
+static struct attribute_group reipl_nss_attr_group = {
+ .name = IPL_NSS_STR,
+ .attrs = reipl_nss_attrs,
+};
+
/* reipl type */
static int reipl_set_type(enum ipl_type type)
@@ -454,6 +510,9 @@ static int reipl_set_type(enum ipl_type type)
else
reipl_method = IPL_METHOD_FCP_RO_DIAG;
break;
+ case IPL_TYPE_NSS:
+ reipl_method = IPL_METHOD_NSS;
+ break;
default:
reipl_method = IPL_METHOD_NONE;
}
@@ -475,6 +534,8 @@ static ssize_t reipl_type_store(struct subsystem *subsys, const char *buf,
rc = reipl_set_type(IPL_TYPE_CCW);
else if (strncmp(buf, IPL_FCP_STR, strlen(IPL_FCP_STR)) == 0)
rc = reipl_set_type(IPL_TYPE_FCP);
+ else if (strncmp(buf, IPL_NSS_STR, strlen(IPL_NSS_STR)) == 0)
+ rc = reipl_set_type(IPL_TYPE_NSS);
return (rc != 0) ? rc : len;
}
@@ -647,6 +708,10 @@ void do_reipl(void)
case IPL_METHOD_FCP_RO_VM:
__cpcmd("IPL", NULL, 0, NULL);
break;
+ case IPL_METHOD_NSS:
+ sprintf(buf, "IPL %s", reipl_nss_name);
+ __cpcmd(buf, NULL, 0, NULL);
+ break;
case IPL_METHOD_NONE:
default:
if (MACHINE_IS_VM)
@@ -733,6 +798,10 @@ static int __init ipl_init(void)
case IPL_TYPE_FCP:
rc = ipl_register_fcp_files();
break;
+ case IPL_TYPE_NSS:
+ rc = sysfs_create_group(&ipl_subsys.kset.kobj,
+ &ipl_nss_attr_group);
+ break;
default:
rc = sysfs_create_group(&ipl_subsys.kset.kobj,
&ipl_unknown_attr_group);
@@ -755,6 +824,20 @@ static void __init reipl_probe(void)
free_page((unsigned long)buffer);
}
+static int __init reipl_nss_init(void)
+{
+ int rc;
+
+ if (!MACHINE_IS_VM)
+ return 0;
+ rc = sysfs_create_group(&reipl_subsys.kset.kobj, &reipl_nss_attr_group);
+ if (rc)
+ return rc;
+ strncpy(reipl_nss_name, kernel_nss_name, NSS_NAME_SIZE + 1);
+ reipl_capabilities |= IPL_TYPE_NSS;
+ return 0;
+}
+
static int __init reipl_ccw_init(void)
{
int rc;
@@ -837,6 +920,9 @@ static int __init reipl_init(void)
rc = reipl_fcp_init();
if (rc)
return rc;
+ rc = reipl_nss_init();
+ if (rc)
+ return rc;
rc = reipl_set_type(ipl_get_type());
if (rc)
return rc;
@@ -993,8 +1079,6 @@ static void do_reset_calls(void)
reset->fn();
}
-extern void reset_mcck_handler(void);
-extern void reset_pgm_handler(void);
extern __u32 dump_prefix_page;
void s390_reset_system(void)
@@ -1016,14 +1100,14 @@ void s390_reset_system(void)
__ctl_clear_bit(0,28);
/* Set new machine check handler */
- S390_lowcore.mcck_new_psw.mask = PSW_KERNEL_BITS & ~PSW_MASK_MCHECK;
+ S390_lowcore.mcck_new_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
S390_lowcore.mcck_new_psw.addr =
- PSW_ADDR_AMODE | (unsigned long) &reset_mcck_handler;
+ PSW_ADDR_AMODE | (unsigned long) s390_base_mcck_handler;
/* Set new program check handler */
- S390_lowcore.program_new_psw.mask = PSW_KERNEL_BITS & ~PSW_MASK_MCHECK;
+ S390_lowcore.program_new_psw.mask = psw_kernel_bits & ~PSW_MASK_MCHECK;
S390_lowcore.program_new_psw.addr =
- PSW_ADDR_AMODE | (unsigned long) &reset_pgm_handler;
+ PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler;
do_reset_calls();
}
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 1eef50918615..8f0cbca31203 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -1,9 +1,9 @@
/*
* arch/s390/kernel/irq.c
*
- * S390 version
- * Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation
+ * Copyright IBM Corp. 2004,2007
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ * Thomas Spatzier (tspat@de.ibm.com)
*
* This file contains interrupt related functions.
*/
@@ -14,6 +14,8 @@
#include <linux/interrupt.h>
#include <linux/seq_file.h>
#include <linux/cpu.h>
+#include <linux/proc_fs.h>
+#include <linux/profile.h>
/*
* show_interrupts is needed by /proc/interrupts.
@@ -93,5 +95,12 @@ asmlinkage void do_softirq(void)
local_irq_restore(flags);
}
-
EXPORT_SYMBOL(do_softirq);
+
+void init_irq_proc(void)
+{
+ struct proc_dir_entry *root_irq_dir;
+
+ root_irq_dir = proc_mkdir("irq", NULL);
+ create_prof_cpu_mask(root_irq_dir);
+}
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 576368c4f605..a466bab6677e 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -155,15 +155,34 @@ void __kprobes get_instruction_type(struct arch_specific_insn *ainsn)
static int __kprobes swap_instruction(void *aref)
{
struct ins_replace_args *args = aref;
+ u32 *addr;
+ u32 instr;
int err = -EFAULT;
+ /*
+ * Text segment is read-only, hence we use stura to bypass dynamic
+ * address translation to exchange the instruction. Since stura
+ * always operates on four bytes, but we only want to exchange two
+ * bytes do some calculations to get things right. In addition we
+ * shall not cross any page boundaries (vmalloc area!) when writing
+ * the new instruction.
+ */
+ addr = (u32 *)ALIGN((unsigned long)args->ptr, 4);
+ if ((unsigned long)args->ptr & 2)
+ instr = ((*addr) & 0xffff0000) | args->new;
+ else
+ instr = ((*addr) & 0x0000ffff) | args->new << 16;
+
asm volatile(
- "0: mvc 0(2,%2),0(%3)\n"
- "1: la %0,0\n"
+ " lra %1,0(%1)\n"
+ "0: stura %2,%1\n"
+ "1: la %0,0\n"
"2:\n"
EX_TABLE(0b,2b)
- : "+d" (err), "=m" (*args->ptr)
- : "a" (args->ptr), "a" (&args->new), "m" (args->new));
+ : "+d" (err)
+ : "a" (addr), "d" (instr)
+ : "memory", "cc");
+
return err;
}
@@ -356,7 +375,7 @@ no_kprobe:
* - When the probed function returns, this probe
* causes the handlers to fire
*/
-void __kprobes kretprobe_trampoline_holder(void)
+void kretprobe_trampoline_holder(void)
{
asm volatile(".global kretprobe_trampoline\n"
"kretprobe_trampoline: bcr 0,0\n");
@@ -365,7 +384,8 @@ void __kprobes kretprobe_trampoline_holder(void)
/*
* Called when the probe at kretprobe trampoline is hit
*/
-int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+static int __kprobes trampoline_probe_handler(struct kprobe *p,
+ struct pt_regs *regs)
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp;
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index f6d9bcc0f75b..52f57af252b4 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -11,6 +11,7 @@
#include <linux/mm.h>
#include <linux/kexec.h>
#include <linux/delay.h>
+#include <linux/reboot.h>
#include <asm/cio.h>
#include <asm/setup.h>
#include <asm/pgtable.h>
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index d989ed45a7aa..39d1dd752529 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -30,6 +30,7 @@
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
+#include <linux/moduleloader.h>
#if 0
#define DEBUGP printk
@@ -58,7 +59,7 @@ void module_free(struct module *mod, void *module_region)
table entries. */
}
-static inline void
+static void
check_rela(Elf_Rela *rela, struct module *me)
{
struct mod_arch_syminfo *info;
@@ -181,7 +182,7 @@ apply_relocate(Elf_Shdr *sechdrs, const char *strtab, unsigned int symindex,
return -ENOEXEC;
}
-static inline int
+static int
apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
struct module *me)
{
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 6603fbb41d07..5acfac654f9d 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -144,7 +144,7 @@ static void default_idle(void)
trace_hardirqs_on();
/* Wait for external, I/O or machine check interrupt. */
- __load_psw_mask(PSW_KERNEL_BITS | PSW_MASK_WAIT |
+ __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
PSW_MASK_IO | PSW_MASK_EXT);
}
@@ -190,7 +190,7 @@ int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
struct pt_regs regs;
memset(&regs, 0, sizeof(regs));
- regs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO | PSW_MASK_EXT;
+ regs.psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT;
regs.psw.addr = (unsigned long) kernel_thread_starter | PSW_ADDR_AMODE;
regs.gprs[9] = (unsigned long) fn;
regs.gprs[10] = (unsigned long) arg;
diff --git a/arch/s390/kernel/profile.c b/arch/s390/kernel/profile.c
deleted file mode 100644
index b81aa1f569ca..000000000000
--- a/arch/s390/kernel/profile.c
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * arch/s390/kernel/profile.c
- *
- * Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Author(s): Thomas Spatzier (tspat@de.ibm.com)
- *
- */
-#include <linux/proc_fs.h>
-#include <linux/profile.h>
-
-static struct proc_dir_entry * root_irq_dir;
-
-void init_irq_proc(void)
-{
- /* create /proc/irq */
- root_irq_dir = proc_mkdir("irq", NULL);
-
- /* create /proc/irq/prof_cpu_mask */
- create_prof_cpu_mask(root_irq_dir);
-}
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 8f36504075ed..2a8f0872ea8b 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -86,15 +86,13 @@ FixPerRegisters(struct task_struct *task)
per_info->control_regs.bits.storage_alt_space_ctl = 0;
}
-void
-set_single_step(struct task_struct *task)
+static void set_single_step(struct task_struct *task)
{
task->thread.per_info.single_step = 1;
FixPerRegisters(task);
}
-void
-clear_single_step(struct task_struct *task)
+static void clear_single_step(struct task_struct *task)
{
task->thread.per_info.single_step = 0;
FixPerRegisters(task);
@@ -232,9 +230,9 @@ poke_user(struct task_struct *child, addr_t addr, addr_t data)
*/
if (addr == (addr_t) &dummy->regs.psw.mask &&
#ifdef CONFIG_COMPAT
- data != PSW_MASK_MERGE(PSW_USER32_BITS, data) &&
+ data != PSW_MASK_MERGE(psw_user32_bits, data) &&
#endif
- data != PSW_MASK_MERGE(PSW_USER_BITS, data))
+ data != PSW_MASK_MERGE(psw_user_bits, data))
/* Invalid psw mask. */
return -EINVAL;
#ifndef CONFIG_64BIT
@@ -309,7 +307,7 @@ do_ptrace_normal(struct task_struct *child, long request, long addr, long data)
copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
if (copied != sizeof(tmp))
return -EIO;
- return put_user(tmp, (unsigned long __user *) data);
+ return put_user(tmp, (unsigned long __force __user *) data);
case PTRACE_PEEKUSR:
/* read the word at location addr in the USER area. */
@@ -331,7 +329,7 @@ do_ptrace_normal(struct task_struct *child, long request, long addr, long data)
case PTRACE_PEEKUSR_AREA:
case PTRACE_POKEUSR_AREA:
- if (copy_from_user(&parea, (void __user *) addr,
+ if (copy_from_user(&parea, (void __force __user *) addr,
sizeof(parea)))
return -EFAULT;
addr = parea.kernel_addr;
@@ -341,10 +339,11 @@ do_ptrace_normal(struct task_struct *child, long request, long addr, long data)
if (request == PTRACE_PEEKUSR_AREA)
ret = peek_user(child, addr, data);
else {
- addr_t tmp;
- if (get_user (tmp, (addr_t __user *) data))
+ addr_t utmp;
+ if (get_user(utmp,
+ (addr_t __force __user *) data))
return -EFAULT;
- ret = poke_user(child, addr, tmp);
+ ret = poke_user(child, addr, utmp);
}
if (ret)
return ret;
@@ -394,7 +393,7 @@ peek_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
if (addr == (addr_t) &dummy32->regs.psw.mask) {
/* Fake a 31 bit psw mask. */
tmp = (__u32)(task_pt_regs(child)->psw.mask >> 32);
- tmp = PSW32_MASK_MERGE(PSW32_USER_BITS, tmp);
+ tmp = PSW32_MASK_MERGE(psw32_user_bits, tmp);
} else if (addr == (addr_t) &dummy32->regs.psw.addr) {
/* Fake a 31 bit psw address. */
tmp = (__u32) task_pt_regs(child)->psw.addr |
@@ -469,11 +468,11 @@ poke_user_emu31(struct task_struct *child, addr_t addr, addr_t data)
*/
if (addr == (addr_t) &dummy32->regs.psw.mask) {
/* Build a 64 bit psw mask from 31 bit mask. */
- if (tmp != PSW32_MASK_MERGE(PSW32_USER_BITS, tmp))
+ if (tmp != PSW32_MASK_MERGE(psw32_user_bits, tmp))
/* Invalid psw mask. */
return -EINVAL;
task_pt_regs(child)->psw.mask =
- PSW_MASK_MERGE(PSW_USER32_BITS, (__u64) tmp << 32);
+ PSW_MASK_MERGE(psw_user32_bits, (__u64) tmp << 32);
} else if (addr == (addr_t) &dummy32->regs.psw.addr) {
/* Build a 64 bit psw address from 31 bit address. */
task_pt_regs(child)->psw.addr =
@@ -550,7 +549,7 @@ do_ptrace_emu31(struct task_struct *child, long request, long addr, long data)
copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
if (copied != sizeof(tmp))
return -EIO;
- return put_user(tmp, (unsigned int __user *) data);
+ return put_user(tmp, (unsigned int __force __user *) data);
case PTRACE_PEEKUSR:
/* read the word at location addr in the USER area. */
@@ -571,7 +570,7 @@ do_ptrace_emu31(struct task_struct *child, long request, long addr, long data)
case PTRACE_PEEKUSR_AREA:
case PTRACE_POKEUSR_AREA:
- if (copy_from_user(&parea, (void __user *) addr,
+ if (copy_from_user(&parea, (void __force __user *) addr,
sizeof(parea)))
return -EFAULT;
addr = parea.kernel_addr;
@@ -581,10 +580,11 @@ do_ptrace_emu31(struct task_struct *child, long request, long addr, long data)
if (request == PTRACE_PEEKUSR_AREA)
ret = peek_user_emu31(child, addr, data);
else {
- __u32 tmp;
- if (get_user (tmp, (__u32 __user *) data))
+ __u32 utmp;
+ if (get_user(utmp,
+ (__u32 __force __user *) data))
return -EFAULT;
- ret = poke_user_emu31(child, addr, tmp);
+ ret = poke_user_emu31(child, addr, utmp);
}
if (ret)
return ret;
@@ -595,17 +595,19 @@ do_ptrace_emu31(struct task_struct *child, long request, long addr, long data)
return 0;
case PTRACE_GETEVENTMSG:
return put_user((__u32) child->ptrace_message,
- (unsigned int __user *) data);
+ (unsigned int __force __user *) data);
case PTRACE_GETSIGINFO:
if (child->last_siginfo == NULL)
return -EINVAL;
- return copy_siginfo_to_user32((compat_siginfo_t __user *) data,
+ return copy_siginfo_to_user32((compat_siginfo_t
+ __force __user *) data,
child->last_siginfo);
case PTRACE_SETSIGINFO:
if (child->last_siginfo == NULL)
return -EINVAL;
return copy_siginfo_from_user32(child->last_siginfo,
- (compat_siginfo_t __user *) data);
+ (compat_siginfo_t
+ __force __user *) data);
}
return ptrace_request(child, request, addr, data);
}
diff --git a/arch/s390/kernel/reset.S b/arch/s390/kernel/reset.S
deleted file mode 100644
index 8a87355161fa..000000000000
--- a/arch/s390/kernel/reset.S
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * arch/s390/kernel/reset.S
- *
- * Copyright (C) IBM Corp. 2006
- * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
- * Michael Holzheu <holzheu@de.ibm.com>
- */
-
-#include <asm/ptrace.h>
-#include <asm/lowcore.h>
-
-#ifdef CONFIG_64BIT
-
- .globl reset_mcck_handler
-reset_mcck_handler:
- basr %r13,0
-0: lg %r15,__LC_PANIC_STACK # load panic stack
- aghi %r15,-STACK_FRAME_OVERHEAD
- lg %r1,s390_reset_mcck_handler-0b(%r13)
- ltgr %r1,%r1
- jz 1f
- basr %r14,%r1
-1: la %r1,4095
- lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)
- lpswe __LC_MCK_OLD_PSW
-
- .globl s390_reset_mcck_handler
-s390_reset_mcck_handler:
- .quad 0
-
- .globl reset_pgm_handler
-reset_pgm_handler:
- stmg %r0,%r15,__LC_SAVE_AREA
- basr %r13,0
-0: lg %r15,__LC_PANIC_STACK # load panic stack
- aghi %r15,-STACK_FRAME_OVERHEAD
- lg %r1,s390_reset_pgm_handler-0b(%r13)
- ltgr %r1,%r1
- jz 1f
- basr %r14,%r1
- lmg %r0,%r15,__LC_SAVE_AREA
- lpswe __LC_PGM_OLD_PSW
-1: lpswe disabled_wait_psw-0b(%r13)
- .globl s390_reset_pgm_handler
-s390_reset_pgm_handler:
- .quad 0
- .align 8
-disabled_wait_psw:
- .quad 0x0002000180000000,0x0000000000000000 + reset_pgm_handler
-
-#else /* CONFIG_64BIT */
-
- .globl reset_mcck_handler
-reset_mcck_handler:
- basr %r13,0
-0: l %r15,__LC_PANIC_STACK # load panic stack
- ahi %r15,-STACK_FRAME_OVERHEAD
- l %r1,s390_reset_mcck_handler-0b(%r13)
- ltr %r1,%r1
- jz 1f
- basr %r14,%r1
-1: lm %r0,%r15,__LC_GPREGS_SAVE_AREA
- lpsw __LC_MCK_OLD_PSW
-
- .globl s390_reset_mcck_handler
-s390_reset_mcck_handler:
- .long 0
-
- .globl reset_pgm_handler
-reset_pgm_handler:
- stm %r0,%r15,__LC_SAVE_AREA
- basr %r13,0
-0: l %r15,__LC_PANIC_STACK # load panic stack
- ahi %r15,-STACK_FRAME_OVERHEAD
- l %r1,s390_reset_pgm_handler-0b(%r13)
- ltr %r1,%r1
- jz 1f
- basr %r14,%r1
- lm %r0,%r15,__LC_SAVE_AREA
- lpsw __LC_PGM_OLD_PSW
-
-1: lpsw disabled_wait_psw-0b(%r13)
- .globl s390_reset_pgm_handler
-s390_reset_pgm_handler:
- .long 0
-disabled_wait_psw:
- .align 8
- .long 0x000a0000,0x00000000 + reset_pgm_handler
-
-#endif /* CONFIG_64BIT */
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c
index bc5beaa8f98e..acf93dba7727 100644
--- a/arch/s390/kernel/s390_ext.c
+++ b/arch/s390/kernel/s390_ext.c
@@ -125,14 +125,12 @@ void do_extint(struct pt_regs *regs, unsigned short code)
* Make sure that the i/o interrupt did not "overtake"
* the last HZ timer interrupt.
*/
- account_ticks();
+ account_ticks(S390_lowcore.int_clock);
kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
index = ext_hash(code);
for (p = ext_int_hash[index]; p; p = p->next) {
- if (likely(p->code == code)) {
- if (likely(p->handler))
- p->handler(code);
- }
+ if (likely(p->code == code))
+ p->handler(code);
}
irq_exit();
set_irq_regs(old_regs);
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 5d8ee3baac14..50c5210fbc64 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -38,6 +38,8 @@
#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/pfn.h>
+#include <linux/ctype.h>
+#include <linux/reboot.h>
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -49,6 +51,14 @@
#include <asm/page.h>
#include <asm/ptrace.h>
#include <asm/sections.h>
+#include <asm/ebcdic.h>
+#include <asm/compat.h>
+
+long psw_kernel_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY |
+ PSW_MASK_MCHECK | PSW_DEFAULT_KEY);
+long psw_user_bits = (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
+ PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
+ PSW_MASK_PSTATE | PSW_DEFAULT_KEY);
/*
* User copy operations.
@@ -117,9 +127,9 @@ void __devinit cpu_init (void)
*/
char vmhalt_cmd[128] = "";
char vmpoff_cmd[128] = "";
-char vmpanic_cmd[128] = "";
+static char vmpanic_cmd[128] = "";
-static inline void strncpy_skip_quote(char *dst, char *src, int n)
+static void strncpy_skip_quote(char *dst, char *src, int n)
{
int sx, dx;
@@ -275,10 +285,6 @@ static void __init conmode_default(void)
}
#ifdef CONFIG_SMP
-extern void machine_restart_smp(char *);
-extern void machine_halt_smp(void);
-extern void machine_power_off_smp(void);
-
void (*_machine_restart)(char *command) = machine_restart_smp;
void (*_machine_halt)(void) = machine_halt_smp;
void (*_machine_power_off)(void) = machine_power_off_smp;
@@ -386,6 +392,84 @@ static int __init early_parse_ipldelay(char *p)
}
early_param("ipldelay", early_parse_ipldelay);
+#ifdef CONFIG_S390_SWITCH_AMODE
+unsigned int switch_amode = 0;
+EXPORT_SYMBOL_GPL(switch_amode);
+
+static void set_amode_and_uaccess(unsigned long user_amode,
+ unsigned long user32_amode)
+{
+ psw_user_bits = PSW_BASE_BITS | PSW_MASK_DAT | user_amode |
+ PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
+ PSW_MASK_PSTATE | PSW_DEFAULT_KEY;
+#ifdef CONFIG_COMPAT
+ psw_user32_bits = PSW_BASE32_BITS | PSW_MASK_DAT | user_amode |
+ PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK |
+ PSW_MASK_PSTATE | PSW_DEFAULT_KEY;
+ psw32_user_bits = PSW32_BASE_BITS | PSW32_MASK_DAT | user32_amode |
+ PSW32_MASK_IO | PSW32_MASK_EXT | PSW32_MASK_MCHECK |
+ PSW32_MASK_PSTATE;
+#endif
+ psw_kernel_bits = PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME |
+ PSW_MASK_MCHECK | PSW_DEFAULT_KEY;
+
+ if (MACHINE_HAS_MVCOS) {
+ printk("mvcos available.\n");
+ memcpy(&uaccess, &uaccess_mvcos_switch, sizeof(uaccess));
+ } else {
+ printk("mvcos not available.\n");
+ memcpy(&uaccess, &uaccess_pt, sizeof(uaccess));
+ }
+}
+
+/*
+ * Switch kernel/user addressing modes?
+ */
+static int __init early_parse_switch_amode(char *p)
+{
+ switch_amode = 1;
+ return 0;
+}
+early_param("switch_amode", early_parse_switch_amode);
+
+#else /* CONFIG_S390_SWITCH_AMODE */
+static inline void set_amode_and_uaccess(unsigned long user_amode,
+ unsigned long user32_amode)
+{
+}
+#endif /* CONFIG_S390_SWITCH_AMODE */
+
+#ifdef CONFIG_S390_EXEC_PROTECT
+unsigned int s390_noexec = 0;
+EXPORT_SYMBOL_GPL(s390_noexec);
+
+/*
+ * Enable execute protection?
+ */
+static int __init early_parse_noexec(char *p)
+{
+ if (!strncmp(p, "off", 3))
+ return 0;
+ switch_amode = 1;
+ s390_noexec = 1;
+ return 0;
+}
+early_param("noexec", early_parse_noexec);
+#endif /* CONFIG_S390_EXEC_PROTECT */
+
+static void setup_addressing_mode(void)
+{
+ if (s390_noexec) {
+ printk("S390 execute protection active, ");
+ set_amode_and_uaccess(PSW_ASC_SECONDARY, PSW32_ASC_SECONDARY);
+ return;
+ }
+ if (switch_amode) {
+ printk("S390 address spaces switched, ");
+ set_amode_and_uaccess(PSW_ASC_PRIMARY, PSW32_ASC_PRIMARY);
+ }
+}
+
static void __init
setup_lowcore(void)
{
@@ -402,19 +486,21 @@ setup_lowcore(void)
lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY;
lc->restart_psw.addr =
PSW_ADDR_AMODE | (unsigned long) restart_int_handler;
- lc->external_new_psw.mask = PSW_KERNEL_BITS;
+ if (switch_amode)
+ lc->restart_psw.mask |= PSW_ASC_HOME;
+ lc->external_new_psw.mask = psw_kernel_bits;
lc->external_new_psw.addr =
PSW_ADDR_AMODE | (unsigned long) ext_int_handler;
- lc->svc_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_IO | PSW_MASK_EXT;
+ lc->svc_new_psw.mask = psw_kernel_bits | PSW_MASK_IO | PSW_MASK_EXT;
lc->svc_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) system_call;
- lc->program_new_psw.mask = PSW_KERNEL_BITS;
+ lc->program_new_psw.mask = psw_kernel_bits;
lc->program_new_psw.addr =
PSW_ADDR_AMODE | (unsigned long)pgm_check_handler;
lc->mcck_new_psw.mask =
- PSW_KERNEL_BITS & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT;
+ psw_kernel_bits & ~PSW_MASK_MCHECK & ~PSW_MASK_DAT;
lc->mcck_new_psw.addr =
PSW_ADDR_AMODE | (unsigned long) mcck_int_handler;
- lc->io_new_psw.mask = PSW_KERNEL_BITS;
+ lc->io_new_psw.mask = psw_kernel_bits;
lc->io_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) io_int_handler;
lc->ipl_device = S390_lowcore.ipl_device;
lc->jiffy_timer = -1LL;
@@ -439,7 +525,7 @@ setup_lowcore(void)
static void __init
setup_resources(void)
{
- struct resource *res;
+ struct resource *res, *sub_res;
int i;
code_resource.start = (unsigned long) &_text;
@@ -464,8 +550,38 @@ setup_resources(void)
res->start = memory_chunk[i].addr;
res->end = memory_chunk[i].addr + memory_chunk[i].size - 1;
request_resource(&iomem_resource, res);
- request_resource(res, &code_resource);
- request_resource(res, &data_resource);
+
+ if (code_resource.start >= res->start &&
+ code_resource.start <= res->end &&
+ code_resource.end > res->end) {
+ sub_res = alloc_bootmem_low(sizeof(struct resource));
+ memcpy(sub_res, &code_resource,
+ sizeof(struct resource));
+ sub_res->end = res->end;
+ code_resource.start = res->end + 1;
+ request_resource(res, sub_res);
+ }
+
+ if (code_resource.start >= res->start &&
+ code_resource.start <= res->end &&
+ code_resource.end <= res->end)
+ request_resource(res, &code_resource);
+
+ if (data_resource.start >= res->start &&
+ data_resource.start <= res->end &&
+ data_resource.end > res->end) {
+ sub_res = alloc_bootmem_low(sizeof(struct resource));
+ memcpy(sub_res, &data_resource,
+ sizeof(struct resource));
+ sub_res->end = res->end;
+ data_resource.start = res->end + 1;
+ request_resource(res, sub_res);
+ }
+
+ if (data_resource.start >= res->start &&
+ data_resource.start <= res->end &&
+ data_resource.end <= res->end)
+ request_resource(res, &data_resource);
}
}
@@ -495,16 +611,13 @@ static void __init setup_memory_end(void)
}
if (!memory_end)
memory_end = memory_size;
- if (real_size > memory_end)
- printk("More memory detected than supported. Unused: %luk\n",
- (real_size - memory_end) >> 10);
}
static void __init
setup_memory(void)
{
unsigned long bootmap_size;
- unsigned long start_pfn, end_pfn, init_pfn;
+ unsigned long start_pfn, end_pfn;
int i;
/*
@@ -514,10 +627,6 @@ setup_memory(void)
start_pfn = PFN_UP(__pa(&_end));
end_pfn = max_pfn = PFN_DOWN(memory_end);
- /* Initialize storage key for kernel pages */
- for (init_pfn = 0 ; init_pfn < start_pfn; init_pfn++)
- page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY);
-
#ifdef CONFIG_BLK_DEV_INITRD
/*
* Move the initrd in case the bitmap of the bootmem allocater
@@ -631,7 +740,7 @@ setup_arch(char **cmdline_p)
#endif /* CONFIG_64BIT */
/* Save unparsed command line copy for /proc/cmdline */
- strlcpy(saved_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
+ strlcpy(boot_command_line, COMMAND_LINE, COMMAND_LINE_SIZE);
*cmdline_p = COMMAND_LINE;
*(*cmdline_p + COMMAND_LINE_SIZE - 1) = '\0';
@@ -651,6 +760,7 @@ setup_arch(char **cmdline_p)
parse_early_param();
setup_memory_end();
+ setup_addressing_mode();
setup_memory();
setup_resources();
setup_lowcore();
@@ -694,6 +804,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
struct cpuinfo_S390 *cpuinfo;
unsigned long n = (unsigned long) v - 1;
+ s390_adjust_jiffies();
preempt_disable();
if (!n) {
seq_printf(m, "vendor_id : IBM/S390\n"
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 4c8a7954ef48..554f9cf7499c 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -119,7 +119,7 @@ static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
/* Copy a 'clean' PSW mask to the user to avoid leaking
information about whether PER is currently on. */
- user_sregs.regs.psw.mask = PSW_MASK_MERGE(PSW_USER_BITS, regs->psw.mask);
+ user_sregs.regs.psw.mask = PSW_MASK_MERGE(psw_user_bits, regs->psw.mask);
user_sregs.regs.psw.addr = regs->psw.addr;
memcpy(&user_sregs.regs.gprs, &regs->gprs, sizeof(sregs->regs.gprs));
memcpy(&user_sregs.regs.acrs, current->thread.acrs,
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index c0cd255fddbd..83a4ea6e3d60 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -22,23 +22,23 @@
#include <linux/module.h>
#include <linux/init.h>
-
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/kernel_stat.h>
#include <linux/smp_lock.h>
-
#include <linux/delay.h>
#include <linux/cache.h>
#include <linux/interrupt.h>
#include <linux/cpu.h>
-
+#include <linux/timex.h>
+#include <asm/setup.h>
#include <asm/sigp.h>
#include <asm/pgalloc.h>
#include <asm/irq.h>
#include <asm/s390_ext.h>
#include <asm/cpcmd.h>
#include <asm/tlbflush.h>
+#include <asm/timer.h>
extern volatile int __cpu_logical_map[];
@@ -53,17 +53,11 @@ cpumask_t cpu_possible_map = CPU_MASK_NONE;
static struct task_struct *current_set[NR_CPUS];
-/*
- * Reboot, halt and power_off routines for SMP.
- */
-extern char vmhalt_cmd[];
-extern char vmpoff_cmd[];
-
static void smp_ext_bitcall(int, ec_bit_sig);
static void smp_ext_bitcall_others(ec_bit_sig);
/*
-5B * Structure and data for smp_call_function(). This is designed to minimise
+ * Structure and data for smp_call_function(). This is designed to minimise
* static memory requirements. It also looks cleaner.
*/
static DEFINE_SPINLOCK(call_lock);
@@ -110,7 +104,7 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
* remote CPUs are nearly ready to execute <<func>> or are or have executed.
*
* You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
+ * hardware interrupt handler.
*/
{
struct call_data_struct data;
@@ -119,8 +113,8 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
if (cpus <= 0)
return 0;
- /* Can deadlock when called with interrupts disabled */
- WARN_ON(irqs_disabled());
+ /* Can deadlock when interrupts are disabled or if in wrong context */
+ WARN_ON(irqs_disabled() || in_irq());
data.func = func;
data.info = info;
@@ -129,7 +123,7 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
if (wait)
atomic_set(&data.finished, 0);
- spin_lock(&call_lock);
+ spin_lock_bh(&call_lock);
call_data = &data;
/* Send a message to all other CPUs and wait for them to respond */
smp_ext_bitcall_others(ec_call_function);
@@ -141,7 +135,7 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
if (wait)
while (atomic_read(&data.finished) != cpus)
cpu_relax();
- spin_unlock(&call_lock);
+ spin_unlock_bh(&call_lock);
return 0;
}
@@ -165,6 +159,9 @@ int smp_call_function_on(void (*func) (void *info), void *info,
if (!cpu_online(cpu))
return -EINVAL;
+ /* Can deadlock when interrupts are disabled or if in wrong context */
+ WARN_ON(irqs_disabled() || in_irq());
+
/* disable preemption for local function call */
curr_cpu = get_cpu();
@@ -200,7 +197,7 @@ int smp_call_function_on(void (*func) (void *info), void *info,
}
EXPORT_SYMBOL(smp_call_function_on);
-static inline void do_send_stop(void)
+static void do_send_stop(void)
{
int cpu, rc;
@@ -214,7 +211,7 @@ static inline void do_send_stop(void)
}
}
-static inline void do_store_status(void)
+static void do_store_status(void)
{
int cpu, rc;
@@ -230,7 +227,7 @@ static inline void do_store_status(void)
}
}
-static inline void do_wait_for_stop(void)
+static void do_wait_for_stop(void)
{
int cpu;
@@ -250,7 +247,7 @@ static inline void do_wait_for_stop(void)
void smp_send_stop(void)
{
/* Disable all interrupts/machine checks */
- __load_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK);
+ __load_psw_mask(psw_kernel_bits & ~PSW_MASK_MCHECK);
/* write magic number to zero page (absolute 0) */
lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC;
@@ -298,7 +295,7 @@ void machine_power_off_smp(void)
* cpus are handled.
*/
-void do_ext_call_interrupt(__u16 code)
+static void do_ext_call_interrupt(__u16 code)
{
unsigned long bits;
@@ -385,7 +382,7 @@ struct ec_creg_mask_parms {
/*
* callback for setting/clearing control bits
*/
-void smp_ctl_bit_callback(void *info) {
+static void smp_ctl_bit_callback(void *info) {
struct ec_creg_mask_parms *pp = info;
unsigned long cregs[16];
int i;
@@ -458,17 +455,15 @@ __init smp_count_cpus(void)
/*
* Activate a secondary processor.
*/
-extern void init_cpu_timer(void);
-extern void init_cpu_vtimer(void);
-
int __devinit start_secondary(void *cpuvoid)
{
/* Setup the cpu */
cpu_init();
preempt_disable();
- /* init per CPU timer */
+ /* Enable TOD clock interrupts on the secondary cpu. */
init_cpu_timer();
#ifdef CONFIG_VIRT_TIMER
+ /* Enable cpu timer interrupts on the secondary cpu. */
init_cpu_vtimer();
#endif
/* Enable pfault pseudo page faults on this cpu. */
@@ -542,7 +537,7 @@ smp_put_cpu(int cpu)
spin_unlock_irqrestore(&smp_reserve_lock, flags);
}
-static inline int
+static int
cpu_stopped(int cpu)
{
__u32 status;
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index 0d14a4789bf2..2e5c65a1863e 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -11,11 +11,11 @@
#include <linux/stacktrace.h>
#include <linux/kallsyms.h>
-static inline unsigned long save_context_stack(struct stack_trace *trace,
- unsigned int *skip,
- unsigned long sp,
- unsigned long low,
- unsigned long high)
+static unsigned long save_context_stack(struct stack_trace *trace,
+ unsigned int *skip,
+ unsigned long sp,
+ unsigned long low,
+ unsigned long high)
{
struct stack_frame *sf;
struct pt_regs *regs;
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index a4ceae3dbcf1..a52c44455bf0 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -124,7 +124,7 @@ NI_SYSCALL /* old "idle" system call */
NI_SYSCALL /* vm86old for i386 */
SYSCALL(sys_wait4,sys_wait4,compat_sys_wait4_wrapper)
SYSCALL(sys_swapoff,sys_swapoff,sys32_swapoff_wrapper) /* 115 */
-SYSCALL(sys_sysinfo,sys_sysinfo,sys32_sysinfo_wrapper)
+SYSCALL(sys_sysinfo,sys_sysinfo,compat_sys_sysinfo_wrapper)
SYSCALL(sys_ipc,sys_ipc,sys32_ipc_wrapper)
SYSCALL(sys_fsync,sys_fsync,sys32_fsync_wrapper)
SYSCALL(sys_sigreturn_glue,sys_sigreturn_glue,sys32_sigreturn_glue)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 6cceed4df73e..ee9fd7b85928 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -37,11 +37,15 @@
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <asm/timer.h>
+#include <asm/etr.h>
/* change this if you have some constant time drift */
#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ)
#define CLK_TICKS_PER_JIFFY ((unsigned long) USECS_PER_JIFFY << 12)
+/* The value of the TOD clock for 1.1.1970. */
+#define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
+
/*
* Create a small time difference between the timer interrupts
* on the different cpus to avoid lock contention.
@@ -51,6 +55,7 @@
#define TICK_SIZE tick
static ext_int_info_t ext_int_info_cc;
+static ext_int_info_t ext_int_etr_cc;
static u64 init_timer_cc;
static u64 jiffies_timer_cc;
static u64 xtime_cc;
@@ -89,29 +94,21 @@ void tod_to_timeval(__u64 todval, struct timespec *xtime)
#define s390_do_profile() do { ; } while(0)
#endif /* CONFIG_PROFILING */
-
/*
- * timer_interrupt() needs to keep up the real-time clock,
- * as well as call the "do_timer()" routine every clocktick
+ * Advance the per cpu tick counter up to the time given with the
+ * "time" argument. The per cpu update consists of accounting
+ * the virtual cpu time, calling update_process_times and calling
+ * the profiling hook. If xtime is before time it is advanced as well.
*/
-void account_ticks(void)
+void account_ticks(u64 time)
{
- __u64 tmp;
__u32 ticks;
+ __u64 tmp;
/* Calculate how many ticks have passed. */
- if (S390_lowcore.int_clock < S390_lowcore.jiffy_timer) {
- /*
- * We have to program the clock comparator even if
- * no tick has passed. That happens if e.g. an i/o
- * interrupt wakes up an idle processor that has
- * switched off its hz timer.
- */
- tmp = S390_lowcore.jiffy_timer + CPU_DEVIATION;
- asm volatile ("SCKC %0" : : "m" (tmp));
+ if (time < S390_lowcore.jiffy_timer)
return;
- }
- tmp = S390_lowcore.int_clock - S390_lowcore.jiffy_timer;
+ tmp = time - S390_lowcore.jiffy_timer;
if (tmp >= 2*CLK_TICKS_PER_JIFFY) { /* more than two ticks ? */
ticks = __div(tmp, CLK_TICKS_PER_JIFFY) + 1;
S390_lowcore.jiffy_timer +=
@@ -124,10 +121,6 @@ void account_ticks(void)
S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY;
}
- /* set clock comparator for next tick */
- tmp = S390_lowcore.jiffy_timer + CPU_DEVIATION;
- asm volatile ("SCKC %0" : : "m" (tmp));
-
#ifdef CONFIG_SMP
/*
* Do not rely on the boot cpu to do the calls to do_timer.
@@ -173,7 +166,7 @@ int sysctl_hz_timer = 1;
* Stop the HZ tick on the current CPU.
* Only cpu_idle may call this function.
*/
-static inline void stop_hz_timer(void)
+static void stop_hz_timer(void)
{
unsigned long flags;
unsigned long seq, next;
@@ -210,20 +203,21 @@ static inline void stop_hz_timer(void)
if (timer >= jiffies_timer_cc)
todval = timer;
}
- asm volatile ("SCKC %0" : : "m" (todval));
+ set_clock_comparator(todval);
}
/*
* Start the HZ tick on the current CPU.
* Only cpu_idle may call this function.
*/
-static inline void start_hz_timer(void)
+static void start_hz_timer(void)
{
BUG_ON(!in_interrupt());
if (!cpu_isset(smp_processor_id(), nohz_cpu_mask))
return;
- account_ticks();
+ account_ticks(get_clock());
+ set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION);
cpu_clear(smp_processor_id(), nohz_cpu_mask);
}
@@ -245,7 +239,7 @@ static struct notifier_block nohz_idle_nb = {
.notifier_call = nohz_idle_notify,
};
-void __init nohz_init(void)
+static void __init nohz_init(void)
{
if (register_idle_notifier(&nohz_idle_nb))
panic("Couldn't register idle notifier");
@@ -254,24 +248,57 @@ void __init nohz_init(void)
#endif
/*
- * Start the clock comparator on the current CPU.
+ * Set up per cpu jiffy timer and set the clock comparator.
+ */
+static void setup_jiffy_timer(void)
+{
+ /* Set up clock comparator to next jiffy. */
+ S390_lowcore.jiffy_timer =
+ jiffies_timer_cc + (jiffies_64 + 1) * CLK_TICKS_PER_JIFFY;
+ set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION);
+}
+
+/*
+ * Set up lowcore and control register of the current cpu to
+ * enable TOD clock and clock comparator interrupts.
*/
void init_cpu_timer(void)
{
- unsigned long cr0;
- __u64 timer;
+ setup_jiffy_timer();
- timer = jiffies_timer_cc + jiffies_64 * CLK_TICKS_PER_JIFFY;
- S390_lowcore.jiffy_timer = timer + CLK_TICKS_PER_JIFFY;
- timer += CLK_TICKS_PER_JIFFY + CPU_DEVIATION;
- asm volatile ("SCKC %0" : : "m" (timer));
- /* allow clock comparator timer interrupt */
- __ctl_store(cr0, 0, 0);
- cr0 |= 0x800;
- __ctl_load(cr0, 0, 0);
+ /* Enable clock comparator timer interrupt. */
+ __ctl_set_bit(0,11);
+
+ /* Always allow ETR external interrupts, even without an ETR. */
+ __ctl_set_bit(0, 4);
}
-extern void vtime_init(void);
+static void clock_comparator_interrupt(__u16 code)
+{
+ /* set clock comparator for next tick */
+ set_clock_comparator(S390_lowcore.jiffy_timer + CPU_DEVIATION);
+}
+
+static void etr_reset(void);
+static void etr_init(void);
+static void etr_ext_handler(__u16);
+
+/*
+ * Get the TOD clock running.
+ */
+static u64 __init reset_tod_clock(void)
+{
+ u64 time;
+
+ etr_reset();
+ if (store_clock(&time) == 0)
+ return time;
+ /* TOD clock not running. Set the clock to Unix Epoch. */
+ if (set_clock(TOD_UNIX_EPOCH) != 0 || store_clock(&time) != 0)
+ panic("TOD clock not operational.");
+
+ return TOD_UNIX_EPOCH;
+}
static cycle_t read_tod_clock(void)
{
@@ -285,7 +312,7 @@ static struct clocksource clocksource_tod = {
.mask = -1ULL,
.mult = 1000,
.shift = 12,
- .is_continuous = 1,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
@@ -295,48 +322,31 @@ static struct clocksource clocksource_tod = {
*/
void __init time_init(void)
{
- __u64 set_time_cc;
- int cc;
-
- /* kick the TOD clock */
- asm volatile(
- " stck 0(%2)\n"
- " ipm %0\n"
- " srl %0,28"
- : "=d" (cc), "=m" (init_timer_cc)
- : "a" (&init_timer_cc) : "cc");
- switch (cc) {
- case 0: /* clock in set state: all is fine */
- break;
- case 1: /* clock in non-set state: FIXME */
- printk("time_init: TOD clock in non-set state\n");
- break;
- case 2: /* clock in error state: FIXME */
- printk("time_init: TOD clock in error state\n");
- break;
- case 3: /* clock in stopped or not-operational state: FIXME */
- printk("time_init: TOD clock stopped/non-operational\n");
- break;
- }
+ init_timer_cc = reset_tod_clock();
+ xtime_cc = init_timer_cc + CLK_TICKS_PER_JIFFY;
jiffies_timer_cc = init_timer_cc - jiffies_64 * CLK_TICKS_PER_JIFFY;
/* set xtime */
- xtime_cc = init_timer_cc + CLK_TICKS_PER_JIFFY;
- set_time_cc = init_timer_cc - 0x8126d60e46000000LL +
- (0x3c26700LL*1000000*4096);
- tod_to_timeval(set_time_cc, &xtime);
+ tod_to_timeval(init_timer_cc - TOD_UNIX_EPOCH, &xtime);
set_normalized_timespec(&wall_to_monotonic,
-xtime.tv_sec, -xtime.tv_nsec);
/* request the clock comparator external interrupt */
- if (register_early_external_interrupt(0x1004, NULL,
+ if (register_early_external_interrupt(0x1004,
+ clock_comparator_interrupt,
&ext_int_info_cc) != 0)
panic("Couldn't request external interrupt 0x1004");
if (clocksource_register(&clocksource_tod) != 0)
panic("Could not register TOD clock source");
- init_cpu_timer();
+ /* request the etr external interrupt */
+ if (register_early_external_interrupt(0x1406, etr_ext_handler,
+ &ext_int_etr_cc) != 0)
+ panic("Couldn't request external interrupt 0x1406");
+
+ /* Enable TOD clock interrupts on the boot cpu. */
+ init_cpu_timer();
#ifdef CONFIG_NO_IDLE_HZ
nohz_init();
@@ -345,5 +355,1048 @@ void __init time_init(void)
#ifdef CONFIG_VIRT_TIMER
vtime_init();
#endif
+ etr_init();
+}
+
+/*
+ * External Time Reference (ETR) code.
+ */
+static int etr_port0_online;
+static int etr_port1_online;
+
+static int __init early_parse_etr(char *p)
+{
+ if (strncmp(p, "off", 3) == 0)
+ etr_port0_online = etr_port1_online = 0;
+ else if (strncmp(p, "port0", 5) == 0)
+ etr_port0_online = 1;
+ else if (strncmp(p, "port1", 5) == 0)
+ etr_port1_online = 1;
+ else if (strncmp(p, "on", 2) == 0)
+ etr_port0_online = etr_port1_online = 1;
+ return 0;
+}
+early_param("etr", early_parse_etr);
+
+enum etr_event {
+ ETR_EVENT_PORT0_CHANGE,
+ ETR_EVENT_PORT1_CHANGE,
+ ETR_EVENT_PORT_ALERT,
+ ETR_EVENT_SYNC_CHECK,
+ ETR_EVENT_SWITCH_LOCAL,
+ ETR_EVENT_UPDATE,
+};
+
+enum etr_flags {
+ ETR_FLAG_ENOSYS,
+ ETR_FLAG_EACCES,
+ ETR_FLAG_STEAI,
+};
+
+/*
+ * Valid bit combinations of the eacr register are (x = don't care):
+ * e0 e1 dp p0 p1 ea es sl
+ * 0 0 x 0 0 0 0 0 initial, disabled state
+ * 0 0 x 0 1 1 0 0 port 1 online
+ * 0 0 x 1 0 1 0 0 port 0 online
+ * 0 0 x 1 1 1 0 0 both ports online
+ * 0 1 x 0 1 1 0 0 port 1 online and usable, ETR or PPS mode
+ * 0 1 x 0 1 1 0 1 port 1 online, usable and ETR mode
+ * 0 1 x 0 1 1 1 0 port 1 online, usable, PPS mode, in-sync
+ * 0 1 x 0 1 1 1 1 port 1 online, usable, ETR mode, in-sync
+ * 0 1 x 1 1 1 0 0 both ports online, port 1 usable
+ * 0 1 x 1 1 1 1 0 both ports online, port 1 usable, PPS mode, in-sync
+ * 0 1 x 1 1 1 1 1 both ports online, port 1 usable, ETR mode, in-sync
+ * 1 0 x 1 0 1 0 0 port 0 online and usable, ETR or PPS mode
+ * 1 0 x 1 0 1 0 1 port 0 online, usable and ETR mode
+ * 1 0 x 1 0 1 1 0 port 0 online, usable, PPS mode, in-sync
+ * 1 0 x 1 0 1 1 1 port 0 online, usable, ETR mode, in-sync
+ * 1 0 x 1 1 1 0 0 both ports online, port 0 usable
+ * 1 0 x 1 1 1 1 0 both ports online, port 0 usable, PPS mode, in-sync
+ * 1 0 x 1 1 1 1 1 both ports online, port 0 usable, ETR mode, in-sync
+ * 1 1 x 1 1 1 1 0 both ports online & usable, ETR, in-sync
+ * 1 1 x 1 1 1 1 1 both ports online & usable, ETR, in-sync
+ */
+static struct etr_eacr etr_eacr;
+static u64 etr_tolec; /* time of last eacr update */
+static unsigned long etr_flags;
+static struct etr_aib etr_port0;
+static int etr_port0_uptodate;
+static struct etr_aib etr_port1;
+static int etr_port1_uptodate;
+static unsigned long etr_events;
+static struct timer_list etr_timer;
+static struct tasklet_struct etr_tasklet;
+static DEFINE_PER_CPU(atomic_t, etr_sync_word);
+
+static void etr_timeout(unsigned long dummy);
+static void etr_tasklet_fn(unsigned long dummy);
+
+/*
+ * The etr get_clock function. It will write the current clock value
+ * to the clock pointer and return 0 if the clock is in sync with the
+ * external time source. If the clock mode is local it will return
+ * -ENOSYS and -EAGAIN if the clock is not in sync with the external
+ * reference. This function is what ETR is all about..
+ */
+int get_sync_clock(unsigned long long *clock)
+{
+ atomic_t *sw_ptr;
+ unsigned int sw0, sw1;
+
+ sw_ptr = &get_cpu_var(etr_sync_word);
+ sw0 = atomic_read(sw_ptr);
+ *clock = get_clock();
+ sw1 = atomic_read(sw_ptr);
+ put_cpu_var(etr_sync_sync);
+ if (sw0 == sw1 && (sw0 & 0x80000000U))
+ /* Success: time is in sync. */
+ return 0;
+ if (test_bit(ETR_FLAG_ENOSYS, &etr_flags))
+ return -ENOSYS;
+ if (test_bit(ETR_FLAG_EACCES, &etr_flags))
+ return -EACCES;
+ return -EAGAIN;
+}
+EXPORT_SYMBOL(get_sync_clock);
+
+/*
+ * Make get_sync_clock return -EAGAIN.
+ */
+static void etr_disable_sync_clock(void *dummy)
+{
+ atomic_t *sw_ptr = &__get_cpu_var(etr_sync_word);
+ /*
+ * Clear the in-sync bit 2^31. All get_sync_clock calls will
+ * fail until the sync bit is turned back on. In addition
+ * increase the "sequence" counter to avoid the race of an
+ * etr event and the complete recovery against get_sync_clock.
+ */
+ atomic_clear_mask(0x80000000, sw_ptr);
+ atomic_inc(sw_ptr);
+}
+
+/*
+ * Make get_sync_clock return 0 again.
+ * Needs to be called from a context disabled for preemption.
+ */
+static void etr_enable_sync_clock(void)
+{
+ atomic_t *sw_ptr = &__get_cpu_var(etr_sync_word);
+ atomic_set_mask(0x80000000, sw_ptr);
+}
+
+/*
+ * Reset ETR attachment.
+ */
+static void etr_reset(void)
+{
+ etr_eacr = (struct etr_eacr) {
+ .e0 = 0, .e1 = 0, ._pad0 = 4, .dp = 0,
+ .p0 = 0, .p1 = 0, ._pad1 = 0, .ea = 0,
+ .es = 0, .sl = 0 };
+ if (etr_setr(&etr_eacr) == 0)
+ etr_tolec = get_clock();
+ else {
+ set_bit(ETR_FLAG_ENOSYS, &etr_flags);
+ if (etr_port0_online || etr_port1_online) {
+ printk(KERN_WARNING "Running on non ETR capable "
+ "machine, only local mode available.\n");
+ etr_port0_online = etr_port1_online = 0;
+ }
+ }
+}
+
+static void etr_init(void)
+{
+ struct etr_aib aib;
+
+ if (test_bit(ETR_FLAG_ENOSYS, &etr_flags))
+ return;
+ /* Check if this machine has the steai instruction. */
+ if (etr_steai(&aib, ETR_STEAI_STEPPING_PORT) == 0)
+ set_bit(ETR_FLAG_STEAI, &etr_flags);
+ setup_timer(&etr_timer, etr_timeout, 0UL);
+ tasklet_init(&etr_tasklet, etr_tasklet_fn, 0);
+ if (!etr_port0_online && !etr_port1_online)
+ set_bit(ETR_FLAG_EACCES, &etr_flags);
+ if (etr_port0_online) {
+ set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
+ tasklet_hi_schedule(&etr_tasklet);
+ }
+ if (etr_port1_online) {
+ set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
+ tasklet_hi_schedule(&etr_tasklet);
+ }
+}
+
+/*
+ * Two sorts of ETR machine checks. The architecture reads:
+ * "When a machine-check niterruption occurs and if a switch-to-local or
+ * ETR-sync-check interrupt request is pending but disabled, this pending
+ * disabled interruption request is indicated and is cleared".
+ * Which means that we can get etr_switch_to_local events from the machine
+ * check handler although the interruption condition is disabled. Lovely..
+ */
+
+/*
+ * Switch to local machine check. This is called when the last usable
+ * ETR port goes inactive. After switch to local the clock is not in sync.
+ */
+void etr_switch_to_local(void)
+{
+ if (!etr_eacr.sl)
+ return;
+ etr_disable_sync_clock(NULL);
+ set_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events);
+ tasklet_hi_schedule(&etr_tasklet);
+}
+
+/*
+ * ETR sync check machine check. This is called when the ETR OTE and the
+ * local clock OTE are farther apart than the ETR sync check tolerance.
+ * After a ETR sync check the clock is not in sync. The machine check
+ * is broadcasted to all cpus at the same time.
+ */
+void etr_sync_check(void)
+{
+ if (!etr_eacr.es)
+ return;
+ etr_disable_sync_clock(NULL);
+ set_bit(ETR_EVENT_SYNC_CHECK, &etr_events);
+ tasklet_hi_schedule(&etr_tasklet);
+}
+
+/*
+ * ETR external interrupt. There are two causes:
+ * 1) port state change, check the usability of the port
+ * 2) port alert, one of the ETR-data-validity bits (v1-v2 bits of the
+ * sldr-status word) or ETR-data word 1 (edf1) or ETR-data word 3 (edf3)
+ * or ETR-data word 4 (edf4) has changed.
+ */
+static void etr_ext_handler(__u16 code)
+{
+ struct etr_interruption_parameter *intparm =
+ (struct etr_interruption_parameter *) &S390_lowcore.ext_params;
+
+ if (intparm->pc0)
+ /* ETR port 0 state change. */
+ set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
+ if (intparm->pc1)
+ /* ETR port 1 state change. */
+ set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
+ if (intparm->eai)
+ /*
+ * ETR port alert on either port 0, 1 or both.
+ * Both ports are not up-to-date now.
+ */
+ set_bit(ETR_EVENT_PORT_ALERT, &etr_events);
+ tasklet_hi_schedule(&etr_tasklet);
+}
+
+static void etr_timeout(unsigned long dummy)
+{
+ set_bit(ETR_EVENT_UPDATE, &etr_events);
+ tasklet_hi_schedule(&etr_tasklet);
+}
+
+/*
+ * Check if the etr mode is pss.
+ */
+static inline int etr_mode_is_pps(struct etr_eacr eacr)
+{
+ return eacr.es && !eacr.sl;
+}
+
+/*
+ * Check if the etr mode is etr.
+ */
+static inline int etr_mode_is_etr(struct etr_eacr eacr)
+{
+ return eacr.es && eacr.sl;
+}
+
+/*
+ * Check if the port can be used for TOD synchronization.
+ * For PPS mode the port has to receive OTEs. For ETR mode
+ * the port has to receive OTEs, the ETR stepping bit has to
+ * be zero and the validity bits for data frame 1, 2, and 3
+ * have to be 1.
+ */
+static int etr_port_valid(struct etr_aib *aib, int port)
+{
+ unsigned int psc;
+
+ /* Check that this port is receiving OTEs. */
+ if (aib->tsp == 0)
+ return 0;
+
+ psc = port ? aib->esw.psc1 : aib->esw.psc0;
+ if (psc == etr_lpsc_pps_mode)
+ return 1;
+ if (psc == etr_lpsc_operational_step)
+ return !aib->esw.y && aib->slsw.v1 &&
+ aib->slsw.v2 && aib->slsw.v3;
+ return 0;
+}
+
+/*
+ * Check if two ports are on the same network.
+ */
+static int etr_compare_network(struct etr_aib *aib1, struct etr_aib *aib2)
+{
+ // FIXME: any other fields we have to compare?
+ return aib1->edf1.net_id == aib2->edf1.net_id;
+}
+
+/*
+ * Wrapper for etr_stei that converts physical port states
+ * to logical port states to be consistent with the output
+ * of stetr (see etr_psc vs. etr_lpsc).
+ */
+static void etr_steai_cv(struct etr_aib *aib, unsigned int func)
+{
+ BUG_ON(etr_steai(aib, func) != 0);
+ /* Convert port state to logical port state. */
+ if (aib->esw.psc0 == 1)
+ aib->esw.psc0 = 2;
+ else if (aib->esw.psc0 == 0 && aib->esw.p == 0)
+ aib->esw.psc0 = 1;
+ if (aib->esw.psc1 == 1)
+ aib->esw.psc1 = 2;
+ else if (aib->esw.psc1 == 0 && aib->esw.p == 1)
+ aib->esw.psc1 = 1;
+}
+
+/*
+ * Check if the aib a2 is still connected to the same attachment as
+ * aib a1, the etv values differ by one and a2 is valid.
+ */
+static int etr_aib_follows(struct etr_aib *a1, struct etr_aib *a2, int p)
+{
+ int state_a1, state_a2;
+
+ /* Paranoia check: e0/e1 should better be the same. */
+ if (a1->esw.eacr.e0 != a2->esw.eacr.e0 ||
+ a1->esw.eacr.e1 != a2->esw.eacr.e1)
+ return 0;
+
+ /* Still connected to the same etr ? */
+ state_a1 = p ? a1->esw.psc1 : a1->esw.psc0;
+ state_a2 = p ? a2->esw.psc1 : a2->esw.psc0;
+ if (state_a1 == etr_lpsc_operational_step) {
+ if (state_a2 != etr_lpsc_operational_step ||
+ a1->edf1.net_id != a2->edf1.net_id ||
+ a1->edf1.etr_id != a2->edf1.etr_id ||
+ a1->edf1.etr_pn != a2->edf1.etr_pn)
+ return 0;
+ } else if (state_a2 != etr_lpsc_pps_mode)
+ return 0;
+
+ /* The ETV value of a2 needs to be ETV of a1 + 1. */
+ if (a1->edf2.etv + 1 != a2->edf2.etv)
+ return 0;
+
+ if (!etr_port_valid(a2, p))
+ return 0;
+
+ return 1;
+}
+
+/*
+ * The time is "clock". xtime is what we think the time is.
+ * Adjust the value by a multiple of jiffies and add the delta to ntp.
+ * "delay" is an approximation how long the synchronization took. If
+ * the time correction is positive, then "delay" is subtracted from
+ * the time difference and only the remaining part is passed to ntp.
+ */
+static void etr_adjust_time(unsigned long long clock, unsigned long long delay)
+{
+ unsigned long long delta, ticks;
+ struct timex adjust;
+
+ /*
+ * We don't have to take the xtime lock because the cpu
+ * executing etr_adjust_time is running disabled in
+ * tasklet context and all other cpus are looping in
+ * etr_sync_cpu_start.
+ */
+ if (clock > xtime_cc) {
+ /* It is later than we thought. */
+ delta = ticks = clock - xtime_cc;
+ delta = ticks = (delta < delay) ? 0 : delta - delay;
+ delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
+ init_timer_cc = init_timer_cc + delta;
+ jiffies_timer_cc = jiffies_timer_cc + delta;
+ xtime_cc = xtime_cc + delta;
+ adjust.offset = ticks * (1000000 / HZ);
+ } else {
+ /* It is earlier than we thought. */
+ delta = ticks = xtime_cc - clock;
+ delta -= do_div(ticks, CLK_TICKS_PER_JIFFY);
+ init_timer_cc = init_timer_cc - delta;
+ jiffies_timer_cc = jiffies_timer_cc - delta;
+ xtime_cc = xtime_cc - delta;
+ adjust.offset = -ticks * (1000000 / HZ);
+ }
+ if (adjust.offset != 0) {
+ printk(KERN_NOTICE "etr: time adjusted by %li micro-seconds\n",
+ adjust.offset);
+ adjust.modes = ADJ_OFFSET_SINGLESHOT;
+ do_adjtimex(&adjust);
+ }
+}
+
+static void etr_sync_cpu_start(void *dummy)
+{
+ int *in_sync = dummy;
+
+ etr_enable_sync_clock();
+ /*
+ * This looks like a busy wait loop but it isn't. etr_sync_cpus
+ * is called on all other cpus while the TOD clocks is stopped.
+ * __udelay will stop the cpu on an enabled wait psw until the
+ * TOD is running again.
+ */
+ while (*in_sync == 0)
+ __udelay(1);
+ if (*in_sync != 1)
+ /* Didn't work. Clear per-cpu in sync bit again. */
+ etr_disable_sync_clock(NULL);
+ /*
+ * This round of TOD syncing is done. Set the clock comparator
+ * to the next tick and let the processor continue.
+ */
+ setup_jiffy_timer();
+}
+
+static void etr_sync_cpu_end(void *dummy)
+{
+}
+
+/*
+ * Sync the TOD clock using the port refered to by aibp. This port
+ * has to be enabled and the other port has to be disabled. The
+ * last eacr update has to be more than 1.6 seconds in the past.
+ */
+static int etr_sync_clock(struct etr_aib *aib, int port)
+{
+ struct etr_aib *sync_port;
+ unsigned long long clock, delay;
+ int in_sync, follows;
+ int rc;
+
+ /* Check if the current aib is adjacent to the sync port aib. */
+ sync_port = (port == 0) ? &etr_port0 : &etr_port1;
+ follows = etr_aib_follows(sync_port, aib, port);
+ memcpy(sync_port, aib, sizeof(*aib));
+ if (!follows)
+ return -EAGAIN;
+
+ /*
+ * Catch all other cpus and make them wait until we have
+ * successfully synced the clock. smp_call_function will
+ * return after all other cpus are in etr_sync_cpu_start.
+ */
+ in_sync = 0;
+ preempt_disable();
+ smp_call_function(etr_sync_cpu_start,&in_sync,0,0);
+ local_irq_disable();
+ etr_enable_sync_clock();
+
+ /* Set clock to next OTE. */
+ __ctl_set_bit(14, 21);
+ __ctl_set_bit(0, 29);
+ clock = ((unsigned long long) (aib->edf2.etv + 1)) << 32;
+ if (set_clock(clock) == 0) {
+ __udelay(1); /* Wait for the clock to start. */
+ __ctl_clear_bit(0, 29);
+ __ctl_clear_bit(14, 21);
+ etr_stetr(aib);
+ /* Adjust Linux timing variables. */
+ delay = (unsigned long long)
+ (aib->edf2.etv - sync_port->edf2.etv) << 32;
+ etr_adjust_time(clock, delay);
+ setup_jiffy_timer();
+ /* Verify that the clock is properly set. */
+ if (!etr_aib_follows(sync_port, aib, port)) {
+ /* Didn't work. */
+ etr_disable_sync_clock(NULL);
+ in_sync = -EAGAIN;
+ rc = -EAGAIN;
+ } else {
+ in_sync = 1;
+ rc = 0;
+ }
+ } else {
+ /* Could not set the clock ?!? */
+ __ctl_clear_bit(0, 29);
+ __ctl_clear_bit(14, 21);
+ etr_disable_sync_clock(NULL);
+ in_sync = -EAGAIN;
+ rc = -EAGAIN;
+ }
+ local_irq_enable();
+ smp_call_function(etr_sync_cpu_end,NULL,0,0);
+ preempt_enable();
+ return rc;
+}
+
+/*
+ * Handle the immediate effects of the different events.
+ * The port change event is used for online/offline changes.
+ */
+static struct etr_eacr etr_handle_events(struct etr_eacr eacr)
+{
+ if (test_and_clear_bit(ETR_EVENT_SYNC_CHECK, &etr_events))
+ eacr.es = 0;
+ if (test_and_clear_bit(ETR_EVENT_SWITCH_LOCAL, &etr_events))
+ eacr.es = eacr.sl = 0;
+ if (test_and_clear_bit(ETR_EVENT_PORT_ALERT, &etr_events))
+ etr_port0_uptodate = etr_port1_uptodate = 0;
+
+ if (test_and_clear_bit(ETR_EVENT_PORT0_CHANGE, &etr_events)) {
+ if (eacr.e0)
+ /*
+ * Port change of an enabled port. We have to
+ * assume that this can have caused an stepping
+ * port switch.
+ */
+ etr_tolec = get_clock();
+ eacr.p0 = etr_port0_online;
+ if (!eacr.p0)
+ eacr.e0 = 0;
+ etr_port0_uptodate = 0;
+ }
+ if (test_and_clear_bit(ETR_EVENT_PORT1_CHANGE, &etr_events)) {
+ if (eacr.e1)
+ /*
+ * Port change of an enabled port. We have to
+ * assume that this can have caused an stepping
+ * port switch.
+ */
+ etr_tolec = get_clock();
+ eacr.p1 = etr_port1_online;
+ if (!eacr.p1)
+ eacr.e1 = 0;
+ etr_port1_uptodate = 0;
+ }
+ clear_bit(ETR_EVENT_UPDATE, &etr_events);
+ return eacr;
+}
+
+/*
+ * Set up a timer that expires after the etr_tolec + 1.6 seconds if
+ * one of the ports needs an update.
+ */
+static void etr_set_tolec_timeout(unsigned long long now)
+{
+ unsigned long micros;
+
+ if ((!etr_eacr.p0 || etr_port0_uptodate) &&
+ (!etr_eacr.p1 || etr_port1_uptodate))
+ return;
+ micros = (now > etr_tolec) ? ((now - etr_tolec) >> 12) : 0;
+ micros = (micros > 1600000) ? 0 : 1600000 - micros;
+ mod_timer(&etr_timer, jiffies + (micros * HZ) / 1000000 + 1);
+}
+
+/*
+ * Set up a time that expires after 1/2 second.
+ */
+static void etr_set_sync_timeout(void)
+{
+ mod_timer(&etr_timer, jiffies + HZ/2);
+}
+
+/*
+ * Update the aib information for one or both ports.
+ */
+static struct etr_eacr etr_handle_update(struct etr_aib *aib,
+ struct etr_eacr eacr)
+{
+ /* With both ports disabled the aib information is useless. */
+ if (!eacr.e0 && !eacr.e1)
+ return eacr;
+
+ /* Update port0 or port1 with aib stored in etr_tasklet_fn. */
+ if (aib->esw.q == 0) {
+ /* Information for port 0 stored. */
+ if (eacr.p0 && !etr_port0_uptodate) {
+ etr_port0 = *aib;
+ if (etr_port0_online)
+ etr_port0_uptodate = 1;
+ }
+ } else {
+ /* Information for port 1 stored. */
+ if (eacr.p1 && !etr_port1_uptodate) {
+ etr_port1 = *aib;
+ if (etr_port0_online)
+ etr_port1_uptodate = 1;
+ }
+ }
+
+ /*
+ * Do not try to get the alternate port aib if the clock
+ * is not in sync yet.
+ */
+ if (!eacr.es)
+ return eacr;
+
+ /*
+ * If steai is available we can get the information about
+ * the other port immediately. If only stetr is available the
+ * data-port bit toggle has to be used.
+ */
+ if (test_bit(ETR_FLAG_STEAI, &etr_flags)) {
+ if (eacr.p0 && !etr_port0_uptodate) {
+ etr_steai_cv(&etr_port0, ETR_STEAI_PORT_0);
+ etr_port0_uptodate = 1;
+ }
+ if (eacr.p1 && !etr_port1_uptodate) {
+ etr_steai_cv(&etr_port1, ETR_STEAI_PORT_1);
+ etr_port1_uptodate = 1;
+ }
+ } else {
+ /*
+ * One port was updated above, if the other
+ * port is not uptodate toggle dp bit.
+ */
+ if ((eacr.p0 && !etr_port0_uptodate) ||
+ (eacr.p1 && !etr_port1_uptodate))
+ eacr.dp ^= 1;
+ else
+ eacr.dp = 0;
+ }
+ return eacr;
+}
+
+/*
+ * Write new etr control register if it differs from the current one.
+ * Return 1 if etr_tolec has been updated as well.
+ */
+static void etr_update_eacr(struct etr_eacr eacr)
+{
+ int dp_changed;
+
+ if (memcmp(&etr_eacr, &eacr, sizeof(eacr)) == 0)
+ /* No change, return. */
+ return;
+ /*
+ * The disable of an active port of the change of the data port
+ * bit can/will cause a change in the data port.
+ */
+ dp_changed = etr_eacr.e0 > eacr.e0 || etr_eacr.e1 > eacr.e1 ||
+ (etr_eacr.dp ^ eacr.dp) != 0;
+ etr_eacr = eacr;
+ etr_setr(&etr_eacr);
+ if (dp_changed)
+ etr_tolec = get_clock();
+}
+
+/*
+ * ETR tasklet. In this function you'll find the main logic. In
+ * particular this is the only function that calls etr_update_eacr(),
+ * it "controls" the etr control register.
+ */
+static void etr_tasklet_fn(unsigned long dummy)
+{
+ unsigned long long now;
+ struct etr_eacr eacr;
+ struct etr_aib aib;
+ int sync_port;
+
+ /* Create working copy of etr_eacr. */
+ eacr = etr_eacr;
+
+ /* Check for the different events and their immediate effects. */
+ eacr = etr_handle_events(eacr);
+
+ /* Check if ETR is supposed to be active. */
+ eacr.ea = eacr.p0 || eacr.p1;
+ if (!eacr.ea) {
+ /* Both ports offline. Reset everything. */
+ eacr.dp = eacr.es = eacr.sl = 0;
+ on_each_cpu(etr_disable_sync_clock, NULL, 0, 1);
+ del_timer_sync(&etr_timer);
+ etr_update_eacr(eacr);
+ set_bit(ETR_FLAG_EACCES, &etr_flags);
+ return;
+ }
+
+ /* Store aib to get the current ETR status word. */
+ BUG_ON(etr_stetr(&aib) != 0);
+ etr_port0.esw = etr_port1.esw = aib.esw; /* Copy status word. */
+ now = get_clock();
+
+ /*
+ * Update the port information if the last stepping port change
+ * or data port change is older than 1.6 seconds.
+ */
+ if (now >= etr_tolec + (1600000 << 12))
+ eacr = etr_handle_update(&aib, eacr);
+
+ /*
+ * Select ports to enable. The prefered synchronization mode is PPS.
+ * If a port can be enabled depends on a number of things:
+ * 1) The port needs to be online and uptodate. A port is not
+ * disabled just because it is not uptodate, but it is only
+ * enabled if it is uptodate.
+ * 2) The port needs to have the same mode (pps / etr).
+ * 3) The port needs to be usable -> etr_port_valid() == 1
+ * 4) To enable the second port the clock needs to be in sync.
+ * 5) If both ports are useable and are ETR ports, the network id
+ * has to be the same.
+ * The eacr.sl bit is used to indicate etr mode vs. pps mode.
+ */
+ if (eacr.p0 && aib.esw.psc0 == etr_lpsc_pps_mode) {
+ eacr.sl = 0;
+ eacr.e0 = 1;
+ if (!etr_mode_is_pps(etr_eacr))
+ eacr.es = 0;
+ if (!eacr.es || !eacr.p1 || aib.esw.psc1 != etr_lpsc_pps_mode)
+ eacr.e1 = 0;
+ // FIXME: uptodate checks ?
+ else if (etr_port0_uptodate && etr_port1_uptodate)
+ eacr.e1 = 1;
+ sync_port = (etr_port0_uptodate &&
+ etr_port_valid(&etr_port0, 0)) ? 0 : -1;
+ clear_bit(ETR_FLAG_EACCES, &etr_flags);
+ } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_pps_mode) {
+ eacr.sl = 0;
+ eacr.e0 = 0;
+ eacr.e1 = 1;
+ if (!etr_mode_is_pps(etr_eacr))
+ eacr.es = 0;
+ sync_port = (etr_port1_uptodate &&
+ etr_port_valid(&etr_port1, 1)) ? 1 : -1;
+ clear_bit(ETR_FLAG_EACCES, &etr_flags);
+ } else if (eacr.p0 && aib.esw.psc0 == etr_lpsc_operational_step) {
+ eacr.sl = 1;
+ eacr.e0 = 1;
+ if (!etr_mode_is_etr(etr_eacr))
+ eacr.es = 0;
+ if (!eacr.es || !eacr.p1 ||
+ aib.esw.psc1 != etr_lpsc_operational_alt)
+ eacr.e1 = 0;
+ else if (etr_port0_uptodate && etr_port1_uptodate &&
+ etr_compare_network(&etr_port0, &etr_port1))
+ eacr.e1 = 1;
+ sync_port = (etr_port0_uptodate &&
+ etr_port_valid(&etr_port0, 0)) ? 0 : -1;
+ clear_bit(ETR_FLAG_EACCES, &etr_flags);
+ } else if (eacr.p1 && aib.esw.psc1 == etr_lpsc_operational_step) {
+ eacr.sl = 1;
+ eacr.e0 = 0;
+ eacr.e1 = 1;
+ if (!etr_mode_is_etr(etr_eacr))
+ eacr.es = 0;
+ sync_port = (etr_port1_uptodate &&
+ etr_port_valid(&etr_port1, 1)) ? 1 : -1;
+ clear_bit(ETR_FLAG_EACCES, &etr_flags);
+ } else {
+ /* Both ports not usable. */
+ eacr.es = eacr.sl = 0;
+ sync_port = -1;
+ set_bit(ETR_FLAG_EACCES, &etr_flags);
+ }
+
+ /*
+ * If the clock is in sync just update the eacr and return.
+ * If there is no valid sync port wait for a port update.
+ */
+ if (eacr.es || sync_port < 0) {
+ etr_update_eacr(eacr);
+ etr_set_tolec_timeout(now);
+ return;
+ }
+
+ /*
+ * Prepare control register for clock syncing
+ * (reset data port bit, set sync check control.
+ */
+ eacr.dp = 0;
+ eacr.es = 1;
+
+ /*
+ * Update eacr and try to synchronize the clock. If the update
+ * of eacr caused a stepping port switch (or if we have to
+ * assume that a stepping port switch has occured) or the
+ * clock syncing failed, reset the sync check control bit
+ * and set up a timer to try again after 0.5 seconds
+ */
+ etr_update_eacr(eacr);
+ if (now < etr_tolec + (1600000 << 12) ||
+ etr_sync_clock(&aib, sync_port) != 0) {
+ /* Sync failed. Try again in 1/2 second. */
+ eacr.es = 0;
+ etr_update_eacr(eacr);
+ etr_set_sync_timeout();
+ } else
+ etr_set_tolec_timeout(now);
+}
+
+/*
+ * Sysfs interface functions
+ */
+static struct sysdev_class etr_sysclass = {
+ set_kset_name("etr")
+};
+
+static struct sys_device etr_port0_dev = {
+ .id = 0,
+ .cls = &etr_sysclass,
+};
+
+static struct sys_device etr_port1_dev = {
+ .id = 1,
+ .cls = &etr_sysclass,
+};
+
+/*
+ * ETR class attributes
+ */
+static ssize_t etr_stepping_port_show(struct sysdev_class *class, char *buf)
+{
+ return sprintf(buf, "%i\n", etr_port0.esw.p);
+}
+
+static SYSDEV_CLASS_ATTR(stepping_port, 0400, etr_stepping_port_show, NULL);
+
+static ssize_t etr_stepping_mode_show(struct sysdev_class *class, char *buf)
+{
+ char *mode_str;
+
+ if (etr_mode_is_pps(etr_eacr))
+ mode_str = "pps";
+ else if (etr_mode_is_etr(etr_eacr))
+ mode_str = "etr";
+ else
+ mode_str = "local";
+ return sprintf(buf, "%s\n", mode_str);
+}
+
+static SYSDEV_CLASS_ATTR(stepping_mode, 0400, etr_stepping_mode_show, NULL);
+
+/*
+ * ETR port attributes
+ */
+static inline struct etr_aib *etr_aib_from_dev(struct sys_device *dev)
+{
+ if (dev == &etr_port0_dev)
+ return etr_port0_online ? &etr_port0 : NULL;
+ else
+ return etr_port1_online ? &etr_port1 : NULL;
+}
+
+static ssize_t etr_online_show(struct sys_device *dev, char *buf)
+{
+ unsigned int online;
+
+ online = (dev == &etr_port0_dev) ? etr_port0_online : etr_port1_online;
+ return sprintf(buf, "%i\n", online);
+}
+
+static ssize_t etr_online_store(struct sys_device *dev,
+ const char *buf, size_t count)
+{
+ unsigned int value;
+
+ value = simple_strtoul(buf, NULL, 0);
+ if (value != 0 && value != 1)
+ return -EINVAL;
+ if (test_bit(ETR_FLAG_ENOSYS, &etr_flags))
+ return -ENOSYS;
+ if (dev == &etr_port0_dev) {
+ if (etr_port0_online == value)
+ return count; /* Nothing to do. */
+ etr_port0_online = value;
+ set_bit(ETR_EVENT_PORT0_CHANGE, &etr_events);
+ tasklet_hi_schedule(&etr_tasklet);
+ } else {
+ if (etr_port1_online == value)
+ return count; /* Nothing to do. */
+ etr_port1_online = value;
+ set_bit(ETR_EVENT_PORT1_CHANGE, &etr_events);
+ tasklet_hi_schedule(&etr_tasklet);
+ }
+ return count;
+}
+
+static SYSDEV_ATTR(online, 0600, etr_online_show, etr_online_store);
+
+static ssize_t etr_stepping_control_show(struct sys_device *dev, char *buf)
+{
+ return sprintf(buf, "%i\n", (dev == &etr_port0_dev) ?
+ etr_eacr.e0 : etr_eacr.e1);
+}
+
+static SYSDEV_ATTR(stepping_control, 0400, etr_stepping_control_show, NULL);
+
+static ssize_t etr_mode_code_show(struct sys_device *dev, char *buf)
+{
+ if (!etr_port0_online && !etr_port1_online)
+ /* Status word is not uptodate if both ports are offline. */
+ return -ENODATA;
+ return sprintf(buf, "%i\n", (dev == &etr_port0_dev) ?
+ etr_port0.esw.psc0 : etr_port0.esw.psc1);
+}
+
+static SYSDEV_ATTR(state_code, 0400, etr_mode_code_show, NULL);
+
+static ssize_t etr_untuned_show(struct sys_device *dev, char *buf)
+{
+ struct etr_aib *aib = etr_aib_from_dev(dev);
+
+ if (!aib || !aib->slsw.v1)
+ return -ENODATA;
+ return sprintf(buf, "%i\n", aib->edf1.u);
+}
+
+static SYSDEV_ATTR(untuned, 0400, etr_untuned_show, NULL);
+
+static ssize_t etr_network_id_show(struct sys_device *dev, char *buf)
+{
+ struct etr_aib *aib = etr_aib_from_dev(dev);
+
+ if (!aib || !aib->slsw.v1)
+ return -ENODATA;
+ return sprintf(buf, "%i\n", aib->edf1.net_id);
+}
+
+static SYSDEV_ATTR(network, 0400, etr_network_id_show, NULL);
+
+static ssize_t etr_id_show(struct sys_device *dev, char *buf)
+{
+ struct etr_aib *aib = etr_aib_from_dev(dev);
+
+ if (!aib || !aib->slsw.v1)
+ return -ENODATA;
+ return sprintf(buf, "%i\n", aib->edf1.etr_id);
+}
+
+static SYSDEV_ATTR(id, 0400, etr_id_show, NULL);
+
+static ssize_t etr_port_number_show(struct sys_device *dev, char *buf)
+{
+ struct etr_aib *aib = etr_aib_from_dev(dev);
+
+ if (!aib || !aib->slsw.v1)
+ return -ENODATA;
+ return sprintf(buf, "%i\n", aib->edf1.etr_pn);
+}
+
+static SYSDEV_ATTR(port, 0400, etr_port_number_show, NULL);
+
+static ssize_t etr_coupled_show(struct sys_device *dev, char *buf)
+{
+ struct etr_aib *aib = etr_aib_from_dev(dev);
+
+ if (!aib || !aib->slsw.v3)
+ return -ENODATA;
+ return sprintf(buf, "%i\n", aib->edf3.c);
+}
+
+static SYSDEV_ATTR(coupled, 0400, etr_coupled_show, NULL);
+
+static ssize_t etr_local_time_show(struct sys_device *dev, char *buf)
+{
+ struct etr_aib *aib = etr_aib_from_dev(dev);
+
+ if (!aib || !aib->slsw.v3)
+ return -ENODATA;
+ return sprintf(buf, "%i\n", aib->edf3.blto);
+}
+
+static SYSDEV_ATTR(local_time, 0400, etr_local_time_show, NULL);
+
+static ssize_t etr_utc_offset_show(struct sys_device *dev, char *buf)
+{
+ struct etr_aib *aib = etr_aib_from_dev(dev);
+
+ if (!aib || !aib->slsw.v3)
+ return -ENODATA;
+ return sprintf(buf, "%i\n", aib->edf3.buo);
+}
+
+static SYSDEV_ATTR(utc_offset, 0400, etr_utc_offset_show, NULL);
+
+static struct sysdev_attribute *etr_port_attributes[] = {
+ &attr_online,
+ &attr_stepping_control,
+ &attr_state_code,
+ &attr_untuned,
+ &attr_network,
+ &attr_id,
+ &attr_port,
+ &attr_coupled,
+ &attr_local_time,
+ &attr_utc_offset,
+ NULL
+};
+
+static int __init etr_register_port(struct sys_device *dev)
+{
+ struct sysdev_attribute **attr;
+ int rc;
+
+ rc = sysdev_register(dev);
+ if (rc)
+ goto out;
+ for (attr = etr_port_attributes; *attr; attr++) {
+ rc = sysdev_create_file(dev, *attr);
+ if (rc)
+ goto out_unreg;
+ }
+ return 0;
+out_unreg:
+ for (; attr >= etr_port_attributes; attr--)
+ sysdev_remove_file(dev, *attr);
+ sysdev_unregister(dev);
+out:
+ return rc;
+}
+
+static void __init etr_unregister_port(struct sys_device *dev)
+{
+ struct sysdev_attribute **attr;
+
+ for (attr = etr_port_attributes; *attr; attr++)
+ sysdev_remove_file(dev, *attr);
+ sysdev_unregister(dev);
+}
+
+static int __init etr_init_sysfs(void)
+{
+ int rc;
+
+ rc = sysdev_class_register(&etr_sysclass);
+ if (rc)
+ goto out;
+ rc = sysdev_class_create_file(&etr_sysclass, &attr_stepping_port);
+ if (rc)
+ goto out_unreg_class;
+ rc = sysdev_class_create_file(&etr_sysclass, &attr_stepping_mode);
+ if (rc)
+ goto out_remove_stepping_port;
+ rc = etr_register_port(&etr_port0_dev);
+ if (rc)
+ goto out_remove_stepping_mode;
+ rc = etr_register_port(&etr_port1_dev);
+ if (rc)
+ goto out_remove_port0;
+ return 0;
+
+out_remove_port0:
+ etr_unregister_port(&etr_port0_dev);
+out_remove_stepping_mode:
+ sysdev_class_remove_file(&etr_sysclass, &attr_stepping_mode);
+out_remove_stepping_port:
+ sysdev_class_remove_file(&etr_sysclass, &attr_stepping_port);
+out_unreg_class:
+ sysdev_class_unregister(&etr_sysclass);
+out:
+ return rc;
}
+device_initcall(etr_init_sysfs);
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 3cbb0dcf1f1d..f0e5a320e2ec 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -283,7 +283,7 @@ char *task_show_regs(struct task_struct *task, char *buffer)
return buffer;
}
-DEFINE_SPINLOCK(die_lock);
+static DEFINE_SPINLOCK(die_lock);
void die(const char * str, struct pt_regs * regs, long err)
{
@@ -364,8 +364,7 @@ void __kprobes do_single_step(struct pt_regs *regs)
force_sig(SIGTRAP, current);
}
-asmlinkage void
-default_trap_handler(struct pt_regs * regs, long interruption_code)
+static void default_trap_handler(struct pt_regs * regs, long interruption_code)
{
if (regs->psw.mask & PSW_MASK_PSTATE) {
local_irq_enable();
@@ -376,7 +375,7 @@ default_trap_handler(struct pt_regs * regs, long interruption_code)
}
#define DO_ERROR_INFO(signr, str, name, sicode, siaddr) \
-asmlinkage void name(struct pt_regs * regs, long interruption_code) \
+static void name(struct pt_regs * regs, long interruption_code) \
{ \
siginfo_t info; \
info.si_signo = signr; \
@@ -442,7 +441,7 @@ do_fp_trap(struct pt_regs *regs, void __user *location,
"floating point exception", regs, &si);
}
-asmlinkage void illegal_op(struct pt_regs * regs, long interruption_code)
+static void illegal_op(struct pt_regs * regs, long interruption_code)
{
siginfo_t info;
__u8 opcode[6];
@@ -491,8 +490,15 @@ asmlinkage void illegal_op(struct pt_regs * regs, long interruption_code)
#endif
} else
signal = SIGILL;
- } else
- signal = SIGILL;
+ } else {
+ /*
+ * If we get an illegal op in kernel mode, send it through the
+ * kprobes notifier. If kprobes doesn't pick it up, SIGILL
+ */
+ if (notify_die(DIE_BPT, "bpt", regs, interruption_code,
+ 3, SIGTRAP) != NOTIFY_STOP)
+ signal = SIGILL;
+ }
#ifdef CONFIG_MATHEMU
if (signal == SIGFPE)
@@ -585,7 +591,7 @@ DO_ERROR_INFO(SIGILL, "specification exception", specification_exception,
ILL_ILLOPN, get_check_address(regs));
#endif
-asmlinkage void data_exception(struct pt_regs * regs, long interruption_code)
+static void data_exception(struct pt_regs * regs, long interruption_code)
{
__u16 __user *location;
int signal = 0;
@@ -675,7 +681,7 @@ asmlinkage void data_exception(struct pt_regs * regs, long interruption_code)
}
}
-asmlinkage void space_switch_exception(struct pt_regs * regs, long int_code)
+static void space_switch_exception(struct pt_regs * regs, long int_code)
{
siginfo_t info;
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index fe0f2e97ba7b..c30716ae130c 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -31,18 +31,19 @@ SECTIONS
_etext = .; /* End of text section */
- . = ALIGN(16); /* Exception table */
- __start___ex_table = .;
- __ex_table : { *(__ex_table) }
- __stop___ex_table = .;
-
RODATA
#ifdef CONFIG_SHARED_KERNEL
. = ALIGN(1048576); /* VM shared segments are 1MB aligned */
+#endif
+ . = ALIGN(4096);
_eshared = .; /* End of shareable data */
-#endif
+
+ . = ALIGN(16); /* Exception table */
+ __start___ex_table = .;
+ __ex_table : { *(__ex_table) }
+ __stop___ex_table = .;
.data : { /* Data */
*(.data)
@@ -90,11 +91,14 @@ SECTIONS
.con_initcall.init : { *(.con_initcall.init) }
__con_initcall_end = .;
SECURITY_INIT
+
+#ifdef CONFIG_BLK_DEV_INITRD
. = ALIGN(256);
__initramfs_start = .;
.init.ramfs : { *(.init.initramfs) }
. = ALIGN(2);
__initramfs_end = .;
+#endif
. = ALIGN(256);
__per_cpu_start = .;
.data.percpu : { *(.data.percpu) }
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 21baaf5496d6..9d5b02801b46 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -25,7 +25,7 @@
#include <asm/irq_regs.h>
static ext_int_info_t ext_int_info_timer;
-DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
+static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
/*
@@ -524,16 +524,15 @@ EXPORT_SYMBOL(del_virt_timer);
void init_cpu_vtimer(void)
{
struct vtimer_queue *vt_list;
- unsigned long cr0;
/* kick the virtual timer */
S390_lowcore.exit_timer = VTIMER_MAX_SLICE;
S390_lowcore.last_update_timer = VTIMER_MAX_SLICE;
asm volatile ("SPT %0" : : "m" (S390_lowcore.last_update_timer));
asm volatile ("STCK %0" : "=m" (S390_lowcore.last_update_clock));
- __ctl_store(cr0, 0, 0);
- cr0 |= 0x400;
- __ctl_load(cr0, 0, 0);
+
+ /* enable cpu timer interrupts */
+ __ctl_set_bit(0,10);
vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
INIT_LIST_HEAD(&vt_list->list);
@@ -572,6 +571,7 @@ void __init vtime_init(void)
if (register_idle_notifier(&vtimer_idle_nb))
panic("Couldn't register idle notifier");
+ /* Enable cpu timer interrupts on the boot cpu. */
init_cpu_vtimer();
}