summaryrefslogtreecommitdiff
path: root/arch/s390
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/Kconfig17
-rw-r--r--arch/s390/appldata/appldata.h16
-rw-r--r--arch/s390/appldata/appldata_base.c81
-rw-r--r--arch/s390/appldata/appldata_os.c1
-rw-r--r--arch/s390/defconfig1
-rw-r--r--arch/s390/hypfs/hypfs.h2
-rw-r--r--arch/s390/hypfs/hypfs_diag.c16
-rw-r--r--arch/s390/hypfs/hypfs_diag.h2
-rw-r--r--arch/s390/hypfs/inode.c12
-rw-r--r--arch/s390/kernel/Makefile3
-rw-r--r--arch/s390/kernel/entry.S12
-rw-r--r--arch/s390/kernel/entry64.S16
-rw-r--r--arch/s390/kernel/head.S69
-rw-r--r--arch/s390/kernel/head31.S48
-rw-r--r--arch/s390/kernel/head64.S59
-rw-r--r--arch/s390/kernel/ipl.c942
-rw-r--r--arch/s390/kernel/kprobes.c657
-rw-r--r--arch/s390/kernel/reipl.S33
-rw-r--r--arch/s390/kernel/reipl64.S34
-rw-r--r--arch/s390/kernel/reipl_diag.c39
-rw-r--r--arch/s390/kernel/s390_ksyms.c6
-rw-r--r--arch/s390/kernel/setup.c272
-rw-r--r--arch/s390/kernel/signal.c40
-rw-r--r--arch/s390/kernel/smp.c10
-rw-r--r--arch/s390/kernel/traps.c31
-rw-r--r--arch/s390/kernel/vmlinux.lds.S3
-rw-r--r--arch/s390/lib/Makefile4
-rw-r--r--arch/s390/lib/uaccess.S211
-rw-r--r--arch/s390/lib/uaccess64.S207
-rw-r--r--arch/s390/lib/uaccess_mvcos.c156
-rw-r--r--arch/s390/lib/uaccess_std.c340
-rw-r--r--arch/s390/mm/cmm.c30
-rw-r--r--arch/s390/mm/fault.c40
-rw-r--r--arch/s390/mm/init.c36
34 files changed, 2466 insertions, 980 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 2f4f70c4dbb2..b216ca659cdf 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -460,8 +460,7 @@ config S390_HYPFS_FS
information in an s390 hypervisor environment.
config KEXEC
- bool "kexec system call (EXPERIMENTAL)"
- depends on EXPERIMENTAL
+ bool "kexec system call"
help
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
@@ -487,8 +486,22 @@ source "drivers/net/Kconfig"
source "fs/Kconfig"
+menu "Instrumentation Support"
+
source "arch/s390/oprofile/Kconfig"
+config KPROBES
+ bool "Kprobes (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && MODULES
+ help
+ Kprobes allows you to trap at almost any kernel address and
+ execute a callback function. register_kprobe() establishes
+ a probepoint and specifies the callback. Kprobes is useful
+ for kernel debugging, non-intrusive instrumentation and testing.
+ If in doubt, say "N".
+
+endmenu
+
source "arch/s390/Kconfig.debug"
source "security/Kconfig"
diff --git a/arch/s390/appldata/appldata.h b/arch/s390/appldata/appldata.h
index 71d65eb30650..0429481dea63 100644
--- a/arch/s390/appldata/appldata.h
+++ b/arch/s390/appldata/appldata.h
@@ -29,22 +29,6 @@
#define CTL_APPLDATA_NET_SUM 2125
#define CTL_APPLDATA_PROC 2126
-#ifndef CONFIG_64BIT
-
-#define APPLDATA_START_INTERVAL_REC 0x00 /* Function codes for */
-#define APPLDATA_STOP_REC 0x01 /* DIAG 0xDC */
-#define APPLDATA_GEN_EVENT_RECORD 0x02
-#define APPLDATA_START_CONFIG_REC 0x03
-
-#else
-
-#define APPLDATA_START_INTERVAL_REC 0x80
-#define APPLDATA_STOP_REC 0x81
-#define APPLDATA_GEN_EVENT_RECORD 0x82
-#define APPLDATA_START_CONFIG_REC 0x83
-
-#endif /* CONFIG_64BIT */
-
#define P_INFO(x...) printk(KERN_INFO MY_PRINT_NAME " info: " x)
#define P_ERROR(x...) printk(KERN_ERR MY_PRINT_NAME " error: " x)
#define P_WARNING(x...) printk(KERN_WARNING MY_PRINT_NAME " status: " x)
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index a0a94e0ef8d1..b69ed742f981 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -14,20 +14,20 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/errno.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
-#include <asm/smp.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/page-flags.h>
#include <linux/swap.h>
#include <linux/pagemap.h>
#include <linux/sysctl.h>
-#include <asm/timer.h>
-//#include <linux/kernel_stat.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/workqueue.h>
+#include <asm/appldata.h>
+#include <asm/timer.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+#include <asm/smp.h>
#include "appldata.h"
@@ -39,34 +39,6 @@
#define TOD_MICRO 0x01000 /* nr. of TOD clock units
for 1 microsecond */
-
-/*
- * Parameter list for DIAGNOSE X'DC'
- */
-#ifndef CONFIG_64BIT
-struct appldata_parameter_list {
- u16 diag; /* The DIAGNOSE code X'00DC' */
- u8 function; /* The function code for the DIAGNOSE */
- u8 parlist_length; /* Length of the parameter list */
- u32 product_id_addr; /* Address of the 16-byte product ID */
- u16 reserved;
- u16 buffer_length; /* Length of the application data buffer */
- u32 buffer_addr; /* Address of the application data buffer */
-};
-#else
-struct appldata_parameter_list {
- u16 diag;
- u8 function;
- u8 parlist_length;
- u32 unused01;
- u16 reserved;
- u16 buffer_length;
- u32 unused02;
- u64 product_id_addr;
- u64 buffer_addr;
-};
-#endif /* CONFIG_64BIT */
-
/*
* /proc entries (sysctl)
*/
@@ -181,46 +153,17 @@ static void appldata_work_fn(void *data)
int appldata_diag(char record_nr, u16 function, unsigned long buffer,
u16 length, char *mod_lvl)
{
- unsigned long ry;
- struct appldata_product_id {
- char prod_nr[7]; /* product nr. */
- char prod_fn[2]; /* product function */
- char record_nr; /* record nr. */
- char version_nr[2]; /* version */
- char release_nr[2]; /* release */
- char mod_lvl[2]; /* modification lvl. */
- } appldata_product_id = {
- /* all strings are EBCDIC, record_nr is byte */
+ struct appldata_product_id id = {
.prod_nr = {0xD3, 0xC9, 0xD5, 0xE4,
- 0xE7, 0xD2, 0xD9}, /* "LINUXKR" */
- .prod_fn = {0xD5, 0xD3}, /* "NL" */
+ 0xE7, 0xD2, 0xD9}, /* "LINUXKR" */
+ .prod_fn = 0xD5D3, /* "NL" */
.record_nr = record_nr,
- .version_nr = {0xF2, 0xF6}, /* "26" */
- .release_nr = {0xF0, 0xF1}, /* "01" */
- .mod_lvl = {mod_lvl[0], mod_lvl[1]},
- };
- struct appldata_parameter_list appldata_parameter_list = {
- .diag = 0xDC,
- .function = function,
- .parlist_length =
- sizeof(appldata_parameter_list),
- .buffer_length = length,
- .product_id_addr =
- (unsigned long) &appldata_product_id,
- .buffer_addr = virt_to_phys((void *) buffer)
+ .version_nr = 0xF2F6, /* "26" */
+ .release_nr = 0xF0F1, /* "01" */
+ .mod_lvl = (mod_lvl[0]) << 8 | mod_lvl[1],
};
- if (!MACHINE_IS_VM)
- return -ENOSYS;
- ry = -1;
- asm volatile(
- "diag %1,%0,0xDC\n\t"
- : "=d" (ry)
- : "d" (&appldata_parameter_list),
- "m" (appldata_parameter_list),
- "m" (appldata_product_id)
- : "cc");
- return (int) ry;
+ return appldata_asm(&id, function, (void *) buffer, length);
}
/************************ timer, work, DIAG <END> ****************************/
diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c
index 161acc5c8a1b..76a15523ae9e 100644
--- a/arch/s390/appldata/appldata_os.c
+++ b/arch/s390/appldata/appldata_os.c
@@ -16,6 +16,7 @@
#include <linux/kernel_stat.h>
#include <linux/netdevice.h>
#include <linux/sched.h>
+#include <asm/appldata.h>
#include <asm/smp.h>
#include "appldata.h"
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index f1d4591eddbb..35da53986b1b 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -428,6 +428,7 @@ CONFIG_S390_TAPE_34XX=m
# CONFIG_VMLOGRDR is not set
# CONFIG_VMCP is not set
# CONFIG_MONREADER is not set
+CONFIG_MONWRITER=m
#
# Cryptographic devices
diff --git a/arch/s390/hypfs/hypfs.h b/arch/s390/hypfs/hypfs.h
index ea5567be00fc..f3dbd91965c6 100644
--- a/arch/s390/hypfs/hypfs.h
+++ b/arch/s390/hypfs/hypfs.h
@@ -1,5 +1,5 @@
/*
- * fs/hypfs/hypfs.h
+ * arch/s390/hypfs/hypfs.h
* Hypervisor filesystem for Linux on s390.
*
* Copyright (C) IBM Corp. 2006
diff --git a/arch/s390/hypfs/hypfs_diag.c b/arch/s390/hypfs/hypfs_diag.c
index 1785bce2b919..75144efbb92b 100644
--- a/arch/s390/hypfs/hypfs_diag.c
+++ b/arch/s390/hypfs/hypfs_diag.c
@@ -1,5 +1,5 @@
/*
- * fs/hypfs/hypfs_diag.c
+ * arch/s390/hypfs/hypfs_diag.c
* Hypervisor filesystem for Linux on s390. Diag 204 and 224
* implementation.
*
@@ -432,12 +432,14 @@ static int diag204_probe(void)
buf = diag204_get_buffer(INFO_EXT, &pages);
if (!IS_ERR(buf)) {
- if (diag204(SUBC_STIB7 | INFO_EXT, pages, buf) >= 0) {
+ if (diag204((unsigned long)SUBC_STIB7 |
+ (unsigned long)INFO_EXT, pages, buf) >= 0) {
diag204_store_sc = SUBC_STIB7;
diag204_info_type = INFO_EXT;
goto out;
}
- if (diag204(SUBC_STIB6 | INFO_EXT, pages, buf) >= 0) {
+ if (diag204((unsigned long)SUBC_STIB6 |
+ (unsigned long)INFO_EXT, pages, buf) >= 0) {
diag204_store_sc = SUBC_STIB7;
diag204_info_type = INFO_EXT;
goto out;
@@ -452,7 +454,8 @@ static int diag204_probe(void)
rc = PTR_ERR(buf);
goto fail_alloc;
}
- if (diag204(SUBC_STIB4 | INFO_SIMPLE, pages, buf) >= 0) {
+ if (diag204((unsigned long)SUBC_STIB4 |
+ (unsigned long)INFO_SIMPLE, pages, buf) >= 0) {
diag204_store_sc = SUBC_STIB4;
diag204_info_type = INFO_SIMPLE;
goto out;
@@ -476,7 +479,8 @@ static void *diag204_store(void)
buf = diag204_get_buffer(diag204_info_type, &pages);
if (IS_ERR(buf))
goto out;
- if (diag204(diag204_store_sc | diag204_info_type, pages, buf) < 0)
+ if (diag204((unsigned long)diag204_store_sc |
+ (unsigned long)diag204_info_type, pages, buf) < 0)
return ERR_PTR(-ENOSYS);
out:
return buf;
@@ -531,7 +535,7 @@ __init int hypfs_diag_init(void)
return rc;
}
-__exit void hypfs_diag_exit(void)
+void hypfs_diag_exit(void)
{
diag224_delete_name_table();
diag204_free_buffer();
diff --git a/arch/s390/hypfs/hypfs_diag.h b/arch/s390/hypfs/hypfs_diag.h
index 793dea6b9bb6..256b384aebe1 100644
--- a/arch/s390/hypfs/hypfs_diag.h
+++ b/arch/s390/hypfs/hypfs_diag.h
@@ -1,5 +1,5 @@
/*
- * fs/hypfs/hypfs_diag.h
+ * arch/s390/hypfs_diag.h
* Hypervisor filesystem for Linux on s390.
*
* Copyright (C) IBM Corp. 2006
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 18c091925ea5..bdade5f2e325 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -1,5 +1,5 @@
/*
- * fs/hypfs/inode.c
+ * arch/s390/hypfs/inode.c
* Hypervisor filesystem for Linux on s390.
*
* Copyright (C) IBM Corp. 2006
@@ -312,10 +312,12 @@ static void hypfs_kill_super(struct super_block *sb)
{
struct hypfs_sb_info *sb_info = sb->s_fs_info;
- hypfs_delete_tree(sb->s_root);
- hypfs_remove(sb_info->update_file);
- kfree(sb->s_fs_info);
- sb->s_fs_info = NULL;
+ if (sb->s_root) {
+ hypfs_delete_tree(sb->s_root);
+ hypfs_remove(sb_info->update_file);
+ kfree(sb->s_fs_info);
+ sb->s_fs_info = NULL;
+ }
kill_litter_super(sb);
}
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile
index 9a33ed6ca696..aa978978d3d1 100644
--- a/arch/s390/kernel/Makefile
+++ b/arch/s390/kernel/Makefile
@@ -6,7 +6,7 @@ EXTRA_AFLAGS := -traditional
obj-y := bitmap.o traps.o time.o process.o \
setup.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \
- semaphore.o s390_ext.o debug.o profile.o irq.o reipl_diag.o
+ semaphore.o s390_ext.o debug.o profile.o irq.o ipl.o
obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o)
obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o)
@@ -24,6 +24,7 @@ obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o \
obj-$(CONFIG_VIRT_TIMER) += vtime.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
+obj-$(CONFIG_KPROBES) += kprobes.o
# Kexec part
S390_KEXEC_OBJS := machine_kexec.o crash.o
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 5b5799ac8f83..0c712b78a7e8 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -505,6 +505,8 @@ pgm_no_vtime2:
mvc __THREAD_per+__PER_address(4,%r1),__LC_PER_ADDRESS
mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
oi __TI_flags+3(%r9),_TIF_SINGLE_STEP # set TIF_SINGLE_STEP
+ tm SP_PSW+1(%r15),0x01 # kernel per event ?
+ bz BASED(kernel_per)
l %r3,__LC_PGM_ILC # load program interruption code
la %r8,0x7f
nr %r8,%r3 # clear per-event-bit and ilc
@@ -536,6 +538,16 @@ pgm_no_vtime3:
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
b BASED(sysc_do_svc)
+#
+# per was called from kernel, must be kprobes
+#
+kernel_per:
+ mvi SP_TRAP+1(%r15),0x28 # set trap indication to pgm check
+ la %r2,SP_PTREGS(%r15) # address of register-save area
+ l %r1,BASED(.Lhandle_per) # load adr. of per handler
+ la %r14,BASED(sysc_leave) # load adr. of system return
+ br %r1 # branch to do_single_step
+
/*
* IO interrupt handler routine
*/
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 56f5f613b868..29bbfbab7332 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -518,6 +518,8 @@ pgm_no_vtime2:
#endif
lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
lg %r1,__TI_task(%r9)
+ tm SP_PSW+1(%r15),0x01 # kernel per event ?
+ jz kernel_per
mvc __THREAD_per+__PER_atmid(2,%r1),__LC_PER_ATMID
mvc __THREAD_per+__PER_address(8,%r1),__LC_PER_ADDRESS
mvc __THREAD_per+__PER_access_id(1,%r1),__LC_PER_ACCESS_ID
@@ -553,6 +555,16 @@ pgm_no_vtime3:
stosm __SF_EMPTY(%r15),0x03 # reenable interrupts
j sysc_do_svc
+#
+# per was called from kernel, must be kprobes
+#
+kernel_per:
+ lhi %r0,__LC_PGM_OLD_PSW
+ sth %r0,SP_TRAP(%r15) # set trap indication to pgm check
+ la %r2,SP_PTREGS(%r15) # address of register-save area
+ larl %r14,sysc_leave # load adr. of system ret, no work
+ jg do_single_step # branch to do_single_step
+
/*
* IO interrupt handler routine
*/
@@ -815,7 +827,7 @@ restart_go:
*/
stack_overflow:
lg %r15,__LC_PANIC_STACK # change to panic stack
- aghi %r1,-SP_SIZE
+ aghi %r15,-SP_SIZE
mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
stmg %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
la %r1,__LC_SAVE_AREA
@@ -823,7 +835,7 @@ stack_overflow:
je 0f
chi %r12,__LC_PGM_OLD_PSW
je 0f
- la %r1,__LC_SAVE_AREA+16
+ la %r1,__LC_SAVE_AREA+32
0: mvc SP_R12(32,%r15),0(%r1) # move %r12-%r15 to stack
xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) # clear back chain
la %r2,SP_PTREGS(%r15) # load pt_regs
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index adad8863ee2f..0f1db268a8a9 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -272,7 +272,7 @@ iplstart:
# load parameter file from ipl device
#
.Lagain1:
- l %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) # ramdisk loc. is temp
+ l %r2,.Linitrd # ramdisk loc. is temp
bas %r14,.Lloader # load parameter file
ltr %r2,%r2 # got anything ?
bz .Lnopf
@@ -280,7 +280,7 @@ iplstart:
bnh .Lnotrunc
la %r2,895
.Lnotrunc:
- l %r4,INITRD_START+ARCH_OFFSET-PARMAREA(%r12)
+ l %r4,.Linitrd
clc 0(3,%r4),.L_hdr # if it is HDRx
bz .Lagain1 # skip dataset header
clc 0(3,%r4),.L_eof # if it is EOFx
@@ -323,14 +323,15 @@ iplstart:
# load ramdisk from ipl device
#
.Lagain2:
- l %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) # addr of ramdisk
+ l %r2,.Linitrd # addr of ramdisk
+ st %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12)
bas %r14,.Lloader # load ramdisk
st %r2,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r12) # store size of ramdisk
ltr %r2,%r2
bnz .Lrdcont
st %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12) # no ramdisk found
.Lrdcont:
- l %r2,INITRD_START+ARCH_OFFSET-PARMAREA(%r12)
+ l %r2,.Linitrd
clc 0(3,%r2),.L_hdr # skip HDRx and EOFx
bz .Lagain2
@@ -379,6 +380,7 @@ iplstart:
l %r1,.Lstartup
br %r1
+.Linitrd:.long _end + 0x400000 # default address of initrd
.Lparm: .long PARMAREA
.Lstartup: .long startup
.Lcvtab:.long _ebcasc # ebcdic to ascii table
@@ -479,65 +481,6 @@ start:
.byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7
.byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff
-.macro GET_IPL_DEVICE
-.Lget_ipl_device:
- l %r1,0xb8 # get sid
- sll %r1,15 # test if subchannel is enabled
- srl %r1,31
- ltr %r1,%r1
- bz 2f-.LPG1(%r13) # subchannel disabled
- l %r1,0xb8
- la %r5,.Lipl_schib-.LPG1(%r13)
- stsch 0(%r5) # get schib of subchannel
- bnz 2f-.LPG1(%r13) # schib not available
- tm 5(%r5),0x01 # devno valid?
- bno 2f-.LPG1(%r13)
- la %r6,ipl_parameter_flags-.LPG1(%r13)
- oi 3(%r6),0x01 # set flag
- la %r2,ipl_devno-.LPG1(%r13)
- mvc 0(2,%r2),6(%r5) # store devno
- tm 4(%r5),0x80 # qdio capable device?
- bno 2f-.LPG1(%r13)
- oi 3(%r6),0x02 # set flag
-
- # copy ipl parameters
-
- lhi %r0,4096
- l %r2,20(%r0) # get address of parameter list
- lhi %r3,IPL_PARMBLOCK_ORIGIN
- st %r3,20(%r0)
- lhi %r4,1
- cr %r2,%r3 # start parameters < destination ?
- jl 0f
- lhi %r1,1 # copy direction is upwards
- j 1f
-0: lhi %r1,-1 # copy direction is downwards
- ar %r2,%r0
- ar %r3,%r0
- ar %r2,%r1
- ar %r3,%r1
-1: mvc 0(1,%r3),0(%r2) # finally copy ipl parameters
- ar %r3,%r1
- ar %r2,%r1
- sr %r0,%r4
- jne 1b
- b 2f-.LPG1(%r13)
-
- .align 4
-.Lipl_schib:
- .rept 13
- .long 0
- .endr
-
- .globl ipl_parameter_flags
-ipl_parameter_flags:
- .long 0
- .globl ipl_devno
-ipl_devno:
- .word 0
-2:
-.endm
-
#ifdef CONFIG_64BIT
#include "head64.S"
#else
diff --git a/arch/s390/kernel/head31.S b/arch/s390/kernel/head31.S
index a4dc61f3285e..1fa9fa1ca740 100644
--- a/arch/s390/kernel/head31.S
+++ b/arch/s390/kernel/head31.S
@@ -26,8 +26,8 @@ startup:basr %r13,0 # get base
#
.org PARMAREA
.long 0,0 # IPL_DEVICE
- .long 0,RAMDISK_ORIGIN # INITRD_START
- .long 0,RAMDISK_SIZE # INITRD_SIZE
+ .long 0,0 # INITRD_START
+ .long 0,0 # INITRD_SIZE
.org COMMAND_LINE
.byte "root=/dev/ram0 ro"
@@ -37,12 +37,23 @@ startup:basr %r13,0 # get base
startup_continue:
basr %r13,0 # get base
-.LPG1: GET_IPL_DEVICE
+.LPG1: mvi __LC_AR_MODE_ID,0 # set ESA flag (mode 0)
lctl %c0,%c15,.Lctl-.LPG1(%r13) # load control registers
l %r12,.Lparmaddr-.LPG1(%r13) # pointer to parameter area
# move IPL device to lowcore
mvc __LC_IPLDEV(4),IPL_DEVICE-PARMAREA(%r12)
+#
+# Setup stack
+#
+ l %r15,.Linittu-.LPG1(%r13)
+ mvc __LC_CURRENT(4),__TI_task(%r15)
+ ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE
+ st %r15,__LC_KERNEL_STACK # set end of kernel stack
+ ahi %r15,-96
+ xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
+ l %r14,.Lipl_save_parameters-.LPG1(%r13)
+ basr %r14,%r14
#
# clear bss memory
#
@@ -114,6 +125,10 @@ startup_continue:
b .Lfchunk-.LPG1(%r13)
.align 4
+.Lipl_save_parameters:
+ .long ipl_save_parameters
+.Linittu:
+ .long init_thread_union
.Lpmask:
.byte 0
.align 8
@@ -273,7 +288,23 @@ startup_continue:
.Lbss_end: .long _end
.Lparmaddr: .long PARMAREA
.Lsccbaddr: .long .Lsccb
+
+ .globl ipl_schib
+ipl_schib:
+ .rept 13
+ .long 0
+ .endr
+
+ .globl ipl_flags
+ipl_flags:
+ .long 0
+ .globl ipl_devno
+ipl_devno:
+ .word 0
+
.org 0x12000
+.globl s390_readinfo_sccb
+s390_readinfo_sccb:
.Lsccb:
.hword 0x1000 # length, one page
.byte 0x00,0x00,0x00
@@ -302,16 +333,6 @@ startup_continue:
.globl _stext
_stext: basr %r13,0 # get base
.LPG3:
-#
-# Setup stack
-#
- l %r15,.Linittu-.LPG3(%r13)
- mvc __LC_CURRENT(4),__TI_task(%r15)
- ahi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union+THREAD_SIZE
- st %r15,__LC_KERNEL_STACK # set end of kernel stack
- ahi %r15,-96
- xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
-
# check control registers
stctl %c0,%c15,0(%r15)
oi 2(%r15),0x40 # enable sigp emergency signal
@@ -330,6 +351,5 @@ _stext: basr %r13,0 # get base
#
.align 8
.Ldw: .long 0x000a0000,0x00000000
-.Linittu:.long init_thread_union
.Lstart:.long start_kernel
.Laregs:.long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index 9d80c5b1ef95..a8bdd96494c7 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -26,8 +26,8 @@ startup:basr %r13,0 # get base
#
.org PARMAREA
.quad 0 # IPL_DEVICE
- .quad RAMDISK_ORIGIN # INITRD_START
- .quad RAMDISK_SIZE # INITRD_SIZE
+ .quad 0 # INITRD_START
+ .quad 0 # INITRD_SIZE
.org COMMAND_LINE
.byte "root=/dev/ram0 ro"
@@ -39,8 +39,8 @@ startup_continue:
basr %r13,0 # get base
.LPG1: sll %r13,1 # remove high order bit
srl %r13,1
- GET_IPL_DEVICE
lhi %r1,1 # mode 1 = esame
+ mvi __LC_AR_MODE_ID,1 # set esame flag
slr %r0,%r0 # set cpuid to zero
sigp %r1,%r0,0x12 # switch to esame mode
sam64 # switch to 64 bit mode
@@ -48,7 +48,18 @@ startup_continue:
lg %r12,.Lparmaddr-.LPG1(%r13)# pointer to parameter area
# move IPL device to lowcore
mvc __LC_IPLDEV(4),IPL_DEVICE+4-PARMAREA(%r12)
+#
+# Setup stack
+#
+ larl %r15,init_thread_union
+ lg %r14,__TI_task(%r15) # cache current in lowcore
+ stg %r14,__LC_CURRENT
+ aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE
+ stg %r15,__LC_KERNEL_STACK # set end of kernel stack
+ aghi %r15,-160
+ xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
+ brasl %r14,ipl_save_parameters
#
# clear bss memory
#
@@ -239,6 +250,19 @@ startup_continue:
oi 7(%r12),0x80 # set IDTE flag
0:
+#
+# find out if we have the MVCOS instruction
+#
+ la %r1,0f-.LPG1(%r13) # set program check address
+ stg %r1,__LC_PGM_NEW_PSW+8
+ .short 0xc800 # mvcos 0(%r0),0(%r0),%r0
+ .short 0x0000
+ .short 0x0000
+0: tm 0x8f,0x13 # special-operation exception?
+ bno 1f-.LPG1(%r13) # if yes, MVCOS is present
+ oi 6(%r12),2 # set MVCOS flag
+1:
+
lpswe .Lentry-.LPG1(13) # jump to _stext in primary-space,
# virtual and never return ...
.align 16
@@ -268,7 +292,22 @@ startup_continue:
.Lparmaddr:
.quad PARMAREA
+ .globl ipl_schib
+ipl_schib:
+ .rept 13
+ .long 0
+ .endr
+
+ .globl ipl_flags
+ipl_flags:
+ .long 0
+ .globl ipl_devno
+ipl_devno:
+ .word 0
+
.org 0x12000
+.globl s390_readinfo_sccb
+s390_readinfo_sccb:
.Lsccb:
.hword 0x1000 # length, one page
.byte 0x00,0x00,0x00
@@ -297,24 +336,12 @@ startup_continue:
.globl _stext
_stext: basr %r13,0 # get base
.LPG3:
-#
-# Setup stack
-#
- larl %r15,init_thread_union
- lg %r14,__TI_task(%r15) # cache current in lowcore
- stg %r14,__LC_CURRENT
- aghi %r15,1<<(PAGE_SHIFT+THREAD_ORDER) # init_task_union + THREAD_SIZE
- stg %r15,__LC_KERNEL_STACK # set end of kernel stack
- aghi %r15,-160
- xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) # clear backchain
-
# check control registers
stctg %c0,%c15,0(%r15)
oi 6(%r15),0x40 # enable sigp emergency signal
oi 4(%r15),0x10 # switch on low address proctection
lctlg %c0,%c15,0(%r15)
-#
lam 0,15,.Laregs-.LPG3(%r13) # load access regs needed by uaccess
brasl %r14,start_kernel # go to C code
#
@@ -322,7 +349,7 @@ _stext: basr %r13,0 # get base
#
basr %r13,0
lpswe .Ldw-.(%r13) # load disabled wait psw
-#
+
.align 8
.Ldw: .quad 0x0002000180000000,0x0000000000000000
.Laregs: .long 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
new file mode 100644
index 000000000000..6555cc48e28f
--- /dev/null
+++ b/arch/s390/kernel/ipl.c
@@ -0,0 +1,942 @@
+/*
+ * arch/s390/kernel/ipl.c
+ * ipl/reipl/dump support for Linux on s390.
+ *
+ * Copyright (C) IBM Corp. 2005,2006
+ * Author(s): Michael Holzheu <holzheu@de.ibm.com>
+ * Heiko Carstens <heiko.carstens@de.ibm.com>
+ * Volker Sameske <sameske@de.ibm.com>
+ */
+
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/reboot.h>
+#include <asm/smp.h>
+#include <asm/setup.h>
+#include <asm/cpcmd.h>
+#include <asm/cio.h>
+
+#define IPL_PARM_BLOCK_VERSION 0
+
+enum ipl_type {
+ IPL_TYPE_NONE = 1,
+ IPL_TYPE_UNKNOWN = 2,
+ IPL_TYPE_CCW = 4,
+ IPL_TYPE_FCP = 8,
+};
+
+#define IPL_NONE_STR "none"
+#define IPL_UNKNOWN_STR "unknown"
+#define IPL_CCW_STR "ccw"
+#define IPL_FCP_STR "fcp"
+
+static char *ipl_type_str(enum ipl_type type)
+{
+ switch (type) {
+ case IPL_TYPE_NONE:
+ return IPL_NONE_STR;
+ case IPL_TYPE_CCW:
+ return IPL_CCW_STR;
+ case IPL_TYPE_FCP:
+ return IPL_FCP_STR;
+ case IPL_TYPE_UNKNOWN:
+ default:
+ return IPL_UNKNOWN_STR;
+ }
+}
+
+enum ipl_method {
+ IPL_METHOD_NONE,
+ IPL_METHOD_CCW_CIO,
+ IPL_METHOD_CCW_DIAG,
+ IPL_METHOD_CCW_VM,
+ IPL_METHOD_FCP_RO_DIAG,
+ IPL_METHOD_FCP_RW_DIAG,
+ IPL_METHOD_FCP_RO_VM,
+};
+
+enum shutdown_action {
+ SHUTDOWN_REIPL,
+ SHUTDOWN_DUMP,
+ SHUTDOWN_STOP,
+};
+
+#define SHUTDOWN_REIPL_STR "reipl"
+#define SHUTDOWN_DUMP_STR "dump"
+#define SHUTDOWN_STOP_STR "stop"
+
+static char *shutdown_action_str(enum shutdown_action action)
+{
+ switch (action) {
+ case SHUTDOWN_REIPL:
+ return SHUTDOWN_REIPL_STR;
+ case SHUTDOWN_DUMP:
+ return SHUTDOWN_DUMP_STR;
+ case SHUTDOWN_STOP:
+ return SHUTDOWN_STOP_STR;
+ default:
+ BUG();
+ }
+}
+
+enum diag308_subcode {
+ DIAG308_IPL = 3,
+ DIAG308_DUMP = 4,
+ DIAG308_SET = 5,
+ DIAG308_STORE = 6,
+};
+
+enum diag308_ipl_type {
+ DIAG308_IPL_TYPE_FCP = 0,
+ DIAG308_IPL_TYPE_CCW = 2,
+};
+
+enum diag308_opt {
+ DIAG308_IPL_OPT_IPL = 0x10,
+ DIAG308_IPL_OPT_DUMP = 0x20,
+};
+
+enum diag308_rc {
+ DIAG308_RC_OK = 1,
+};
+
+static int diag308_set_works = 0;
+
+static int reipl_capabilities = IPL_TYPE_UNKNOWN;
+static enum ipl_type reipl_type = IPL_TYPE_UNKNOWN;
+static enum ipl_method reipl_method = IPL_METHOD_NONE;
+static struct ipl_parameter_block *reipl_block_fcp;
+static struct ipl_parameter_block *reipl_block_ccw;
+
+static int dump_capabilities = IPL_TYPE_NONE;
+static enum ipl_type dump_type = IPL_TYPE_NONE;
+static enum ipl_method dump_method = IPL_METHOD_NONE;
+static struct ipl_parameter_block *dump_block_fcp;
+static struct ipl_parameter_block *dump_block_ccw;
+
+static enum shutdown_action on_panic_action = SHUTDOWN_STOP;
+
+static int diag308(unsigned long subcode, void *addr)
+{
+ register unsigned long _addr asm("0") = (unsigned long)addr;
+ register unsigned long _rc asm("1") = 0;
+
+ asm volatile (
+ " diag %0,%2,0x308\n"
+ "0: \n"
+ ".section __ex_table,\"a\"\n"
+#ifdef CONFIG_64BIT
+ " .align 8\n"
+ " .quad 0b, 0b\n"
+#else
+ " .align 4\n"
+ " .long 0b, 0b\n"
+#endif
+ ".previous\n"
+ : "+d" (_addr), "+d" (_rc)
+ : "d" (subcode) : "cc", "memory" );
+
+ return _rc;
+}
+
+/* SYSFS */
+
+#define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value) \
+static ssize_t sys_##_prefix##_##_name##_show(struct subsystem *subsys, \
+ char *page) \
+{ \
+ return sprintf(page, _format, _value); \
+} \
+static struct subsys_attribute sys_##_prefix##_##_name##_attr = \
+ __ATTR(_name, S_IRUGO, sys_##_prefix##_##_name##_show, NULL);
+
+#define DEFINE_IPL_ATTR_RW(_prefix, _name, _fmt_out, _fmt_in, _value) \
+static ssize_t sys_##_prefix##_##_name##_show(struct subsystem *subsys, \
+ char *page) \
+{ \
+ return sprintf(page, _fmt_out, \
+ (unsigned long long) _value); \
+} \
+static ssize_t sys_##_prefix##_##_name##_store(struct subsystem *subsys,\
+ const char *buf, size_t len) \
+{ \
+ unsigned long long value; \
+ if (sscanf(buf, _fmt_in, &value) != 1) \
+ return -EINVAL; \
+ _value = value; \
+ return len; \
+} \
+static struct subsys_attribute sys_##_prefix##_##_name##_attr = \
+ __ATTR(_name,(S_IRUGO | S_IWUSR), \
+ sys_##_prefix##_##_name##_show, \
+ sys_##_prefix##_##_name##_store);
+
+static void make_attrs_ro(struct attribute **attrs)
+{
+ while (*attrs) {
+ (*attrs)->mode = S_IRUGO;
+ attrs++;
+ }
+}
+
+/*
+ * ipl section
+ */
+
+static enum ipl_type ipl_get_type(void)
+{
+ struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
+
+ if (!(ipl_flags & IPL_DEVNO_VALID))
+ return IPL_TYPE_UNKNOWN;
+ if (!(ipl_flags & IPL_PARMBLOCK_VALID))
+ return IPL_TYPE_CCW;
+ if (ipl->hdr.version > IPL_MAX_SUPPORTED_VERSION)
+ return IPL_TYPE_UNKNOWN;
+ if (ipl->hdr.pbt != DIAG308_IPL_TYPE_FCP)
+ return IPL_TYPE_UNKNOWN;
+ return IPL_TYPE_FCP;
+}
+
+static ssize_t ipl_type_show(struct subsystem *subsys, char *page)
+{
+ return sprintf(page, "%s\n", ipl_type_str(ipl_get_type()));
+}
+
+static struct subsys_attribute sys_ipl_type_attr = __ATTR_RO(ipl_type);
+
+static ssize_t sys_ipl_device_show(struct subsystem *subsys, char *page)
+{
+ struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
+
+ switch (ipl_get_type()) {
+ case IPL_TYPE_CCW:
+ return sprintf(page, "0.0.%04x\n", ipl_devno);
+ case IPL_TYPE_FCP:
+ return sprintf(page, "0.0.%04x\n", ipl->ipl_info.fcp.devno);
+ default:
+ return 0;
+ }
+}
+
+static struct subsys_attribute sys_ipl_device_attr =
+ __ATTR(device, S_IRUGO, sys_ipl_device_show, NULL);
+
+static ssize_t ipl_parameter_read(struct kobject *kobj, char *buf, loff_t off,
+ size_t count)
+{
+ unsigned int size = IPL_PARMBLOCK_SIZE;
+
+ if (off > size)
+ return 0;
+ if (off + count > size)
+ count = size - off;
+ memcpy(buf, (void *)IPL_PARMBLOCK_START + off, count);
+ return count;
+}
+
+static struct bin_attribute ipl_parameter_attr = {
+ .attr = {
+ .name = "binary_parameter",
+ .mode = S_IRUGO,
+ .owner = THIS_MODULE,
+ },
+ .size = PAGE_SIZE,
+ .read = &ipl_parameter_read,
+};
+
+static ssize_t ipl_scp_data_read(struct kobject *kobj, char *buf, loff_t off,
+ size_t count)
+{
+ unsigned int size = IPL_PARMBLOCK_START->ipl_info.fcp.scp_data_len;
+ void *scp_data = &IPL_PARMBLOCK_START->ipl_info.fcp.scp_data;
+
+ if (off > size)
+ return 0;
+ if (off + count > size)
+ count = size - off;
+ memcpy(buf, scp_data + off, count);
+ return count;
+}
+
+static struct bin_attribute ipl_scp_data_attr = {
+ .attr = {
+ .name = "scp_data",
+ .mode = S_IRUGO,
+ .owner = THIS_MODULE,
+ },
+ .size = PAGE_SIZE,
+ .read = &ipl_scp_data_read,
+};
+
+/* FCP ipl device attributes */
+
+DEFINE_IPL_ATTR_RO(ipl_fcp, wwpn, "0x%016llx\n", (unsigned long long)
+ IPL_PARMBLOCK_START->ipl_info.fcp.wwpn);
+DEFINE_IPL_ATTR_RO(ipl_fcp, lun, "0x%016llx\n", (unsigned long long)
+ IPL_PARMBLOCK_START->ipl_info.fcp.lun);
+DEFINE_IPL_ATTR_RO(ipl_fcp, bootprog, "%lld\n", (unsigned long long)
+ IPL_PARMBLOCK_START->ipl_info.fcp.bootprog);
+DEFINE_IPL_ATTR_RO(ipl_fcp, br_lba, "%lld\n", (unsigned long long)
+ IPL_PARMBLOCK_START->ipl_info.fcp.br_lba);
+
+static struct attribute *ipl_fcp_attrs[] = {
+ &sys_ipl_type_attr.attr,
+ &sys_ipl_device_attr.attr,
+ &sys_ipl_fcp_wwpn_attr.attr,
+ &sys_ipl_fcp_lun_attr.attr,
+ &sys_ipl_fcp_bootprog_attr.attr,
+ &sys_ipl_fcp_br_lba_attr.attr,
+ NULL,
+};
+
+static struct attribute_group ipl_fcp_attr_group = {
+ .attrs = ipl_fcp_attrs,
+};
+
+/* CCW ipl device attributes */
+
+static struct attribute *ipl_ccw_attrs[] = {
+ &sys_ipl_type_attr.attr,
+ &sys_ipl_device_attr.attr,
+ NULL,
+};
+
+static struct attribute_group ipl_ccw_attr_group = {
+ .attrs = ipl_ccw_attrs,
+};
+
+/* UNKNOWN ipl device attributes */
+
+static struct attribute *ipl_unknown_attrs[] = {
+ &sys_ipl_type_attr.attr,
+ NULL,
+};
+
+static struct attribute_group ipl_unknown_attr_group = {
+ .attrs = ipl_unknown_attrs,
+};
+
+static decl_subsys(ipl, NULL, NULL);
+
+/*
+ * reipl section
+ */
+
+/* FCP reipl device attributes */
+
+DEFINE_IPL_ATTR_RW(reipl_fcp, wwpn, "0x%016llx\n", "%016llx\n",
+ reipl_block_fcp->ipl_info.fcp.wwpn);
+DEFINE_IPL_ATTR_RW(reipl_fcp, lun, "0x%016llx\n", "%016llx\n",
+ reipl_block_fcp->ipl_info.fcp.lun);
+DEFINE_IPL_ATTR_RW(reipl_fcp, bootprog, "%lld\n", "%lld\n",
+ reipl_block_fcp->ipl_info.fcp.bootprog);
+DEFINE_IPL_ATTR_RW(reipl_fcp, br_lba, "%lld\n", "%lld\n",
+ reipl_block_fcp->ipl_info.fcp.br_lba);
+DEFINE_IPL_ATTR_RW(reipl_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
+ reipl_block_fcp->ipl_info.fcp.devno);
+
+static struct attribute *reipl_fcp_attrs[] = {
+ &sys_reipl_fcp_device_attr.attr,
+ &sys_reipl_fcp_wwpn_attr.attr,
+ &sys_reipl_fcp_lun_attr.attr,
+ &sys_reipl_fcp_bootprog_attr.attr,
+ &sys_reipl_fcp_br_lba_attr.attr,
+ NULL,
+};
+
+static struct attribute_group reipl_fcp_attr_group = {
+ .name = IPL_FCP_STR,
+ .attrs = reipl_fcp_attrs,
+};
+
+/* CCW reipl device attributes */
+
+DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
+ reipl_block_ccw->ipl_info.ccw.devno);
+
+static struct attribute *reipl_ccw_attrs[] = {
+ &sys_reipl_ccw_device_attr.attr,
+ NULL,
+};
+
+static struct attribute_group reipl_ccw_attr_group = {
+ .name = IPL_CCW_STR,
+ .attrs = reipl_ccw_attrs,
+};
+
+/* reipl type */
+
+static int reipl_set_type(enum ipl_type type)
+{
+ if (!(reipl_capabilities & type))
+ return -EINVAL;
+
+ switch(type) {
+ case IPL_TYPE_CCW:
+ if (MACHINE_IS_VM)
+ reipl_method = IPL_METHOD_CCW_VM;
+ else
+ reipl_method = IPL_METHOD_CCW_CIO;
+ break;
+ case IPL_TYPE_FCP:
+ if (diag308_set_works)
+ reipl_method = IPL_METHOD_FCP_RW_DIAG;
+ else if (MACHINE_IS_VM)
+ reipl_method = IPL_METHOD_FCP_RO_VM;
+ else
+ reipl_method = IPL_METHOD_FCP_RO_DIAG;
+ break;
+ default:
+ reipl_method = IPL_METHOD_NONE;
+ }
+ reipl_type = type;
+ return 0;
+}
+
+static ssize_t reipl_type_show(struct subsystem *subsys, char *page)
+{
+ return sprintf(page, "%s\n", ipl_type_str(reipl_type));
+}
+
+static ssize_t reipl_type_store(struct subsystem *subsys, const char *buf,
+ size_t len)
+{
+ int rc = -EINVAL;
+
+ if (strncmp(buf, IPL_CCW_STR, strlen(IPL_CCW_STR)) == 0)
+ rc = reipl_set_type(IPL_TYPE_CCW);
+ else if (strncmp(buf, IPL_FCP_STR, strlen(IPL_FCP_STR)) == 0)
+ rc = reipl_set_type(IPL_TYPE_FCP);
+ return (rc != 0) ? rc : len;
+}
+
+static struct subsys_attribute reipl_type_attr =
+ __ATTR(reipl_type, 0644, reipl_type_show, reipl_type_store);
+
+static decl_subsys(reipl, NULL, NULL);
+
+/*
+ * dump section
+ */
+
+/* FCP dump device attributes */
+
+DEFINE_IPL_ATTR_RW(dump_fcp, wwpn, "0x%016llx\n", "%016llx\n",
+ dump_block_fcp->ipl_info.fcp.wwpn);
+DEFINE_IPL_ATTR_RW(dump_fcp, lun, "0x%016llx\n", "%016llx\n",
+ dump_block_fcp->ipl_info.fcp.lun);
+DEFINE_IPL_ATTR_RW(dump_fcp, bootprog, "%lld\n", "%lld\n",
+ dump_block_fcp->ipl_info.fcp.bootprog);
+DEFINE_IPL_ATTR_RW(dump_fcp, br_lba, "%lld\n", "%lld\n",
+ dump_block_fcp->ipl_info.fcp.br_lba);
+DEFINE_IPL_ATTR_RW(dump_fcp, device, "0.0.%04llx\n", "0.0.%llx\n",
+ dump_block_fcp->ipl_info.fcp.devno);
+
+static struct attribute *dump_fcp_attrs[] = {
+ &sys_dump_fcp_device_attr.attr,
+ &sys_dump_fcp_wwpn_attr.attr,
+ &sys_dump_fcp_lun_attr.attr,
+ &sys_dump_fcp_bootprog_attr.attr,
+ &sys_dump_fcp_br_lba_attr.attr,
+ NULL,
+};
+
+static struct attribute_group dump_fcp_attr_group = {
+ .name = IPL_FCP_STR,
+ .attrs = dump_fcp_attrs,
+};
+
+/* CCW dump device attributes */
+
+DEFINE_IPL_ATTR_RW(dump_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
+ dump_block_ccw->ipl_info.ccw.devno);
+
+static struct attribute *dump_ccw_attrs[] = {
+ &sys_dump_ccw_device_attr.attr,
+ NULL,
+};
+
+static struct attribute_group dump_ccw_attr_group = {
+ .name = IPL_CCW_STR,
+ .attrs = dump_ccw_attrs,
+};
+
+/* dump type */
+
+static int dump_set_type(enum ipl_type type)
+{
+ if (!(dump_capabilities & type))
+ return -EINVAL;
+ switch(type) {
+ case IPL_TYPE_CCW:
+ if (MACHINE_IS_VM)
+ dump_method = IPL_METHOD_CCW_VM;
+ else
+ dump_method = IPL_METHOD_CCW_CIO;
+ break;
+ case IPL_TYPE_FCP:
+ dump_method = IPL_METHOD_FCP_RW_DIAG;
+ break;
+ default:
+ dump_method = IPL_METHOD_NONE;
+ }
+ dump_type = type;
+ return 0;
+}
+
+static ssize_t dump_type_show(struct subsystem *subsys, char *page)
+{
+ return sprintf(page, "%s\n", ipl_type_str(dump_type));
+}
+
+static ssize_t dump_type_store(struct subsystem *subsys, const char *buf,
+ size_t len)
+{
+ int rc = -EINVAL;
+
+ if (strncmp(buf, IPL_NONE_STR, strlen(IPL_NONE_STR)) == 0)
+ rc = dump_set_type(IPL_TYPE_NONE);
+ else if (strncmp(buf, IPL_CCW_STR, strlen(IPL_CCW_STR)) == 0)
+ rc = dump_set_type(IPL_TYPE_CCW);
+ else if (strncmp(buf, IPL_FCP_STR, strlen(IPL_FCP_STR)) == 0)
+ rc = dump_set_type(IPL_TYPE_FCP);
+ return (rc != 0) ? rc : len;
+}
+
+static struct subsys_attribute dump_type_attr =
+ __ATTR(dump_type, 0644, dump_type_show, dump_type_store);
+
+static decl_subsys(dump, NULL, NULL);
+
+#ifdef CONFIG_SMP
+static void dump_smp_stop_all(void)
+{
+ int cpu;
+ preempt_disable();
+ for_each_online_cpu(cpu) {
+ if (cpu == smp_processor_id())
+ continue;
+ while (signal_processor(cpu, sigp_stop) == sigp_busy)
+ udelay(10);
+ }
+ preempt_enable();
+}
+#else
+#define dump_smp_stop_all() do { } while (0)
+#endif
+
+/*
+ * Shutdown actions section
+ */
+
+static decl_subsys(shutdown_actions, NULL, NULL);
+
+/* on panic */
+
+static ssize_t on_panic_show(struct subsystem *subsys, char *page)
+{
+ return sprintf(page, "%s\n", shutdown_action_str(on_panic_action));
+}
+
+static ssize_t on_panic_store(struct subsystem *subsys, const char *buf,
+ size_t len)
+{
+ if (strncmp(buf, SHUTDOWN_REIPL_STR, strlen(SHUTDOWN_REIPL_STR)) == 0)
+ on_panic_action = SHUTDOWN_REIPL;
+ else if (strncmp(buf, SHUTDOWN_DUMP_STR,
+ strlen(SHUTDOWN_DUMP_STR)) == 0)
+ on_panic_action = SHUTDOWN_DUMP;
+ else if (strncmp(buf, SHUTDOWN_STOP_STR,
+ strlen(SHUTDOWN_STOP_STR)) == 0)
+ on_panic_action = SHUTDOWN_STOP;
+ else
+ return -EINVAL;
+
+ return len;
+}
+
+static struct subsys_attribute on_panic_attr =
+ __ATTR(on_panic, 0644, on_panic_show, on_panic_store);
+
+static void print_fcp_block(struct ipl_parameter_block *fcp_block)
+{
+ printk(KERN_EMERG "wwpn: %016llx\n",
+ (unsigned long long)fcp_block->ipl_info.fcp.wwpn);
+ printk(KERN_EMERG "lun: %016llx\n",
+ (unsigned long long)fcp_block->ipl_info.fcp.lun);
+ printk(KERN_EMERG "bootprog: %lld\n",
+ (unsigned long long)fcp_block->ipl_info.fcp.bootprog);
+ printk(KERN_EMERG "br_lba: %lld\n",
+ (unsigned long long)fcp_block->ipl_info.fcp.br_lba);
+ printk(KERN_EMERG "device: %llx\n",
+ (unsigned long long)fcp_block->ipl_info.fcp.devno);
+ printk(KERN_EMERG "opt: %x\n", fcp_block->ipl_info.fcp.opt);
+}
+
+void do_reipl(void)
+{
+ struct ccw_dev_id devid;
+ static char buf[100];
+
+ switch (reipl_type) {
+ case IPL_TYPE_CCW:
+ printk(KERN_EMERG "reboot on ccw device: 0.0.%04x\n",
+ reipl_block_ccw->ipl_info.ccw.devno);
+ break;
+ case IPL_TYPE_FCP:
+ printk(KERN_EMERG "reboot on fcp device:\n");
+ print_fcp_block(reipl_block_fcp);
+ break;
+ default:
+ break;
+ }
+
+ switch (reipl_method) {
+ case IPL_METHOD_CCW_CIO:
+ devid.devno = reipl_block_ccw->ipl_info.ccw.devno;
+ devid.ssid = 0;
+ reipl_ccw_dev(&devid);
+ break;
+ case IPL_METHOD_CCW_VM:
+ sprintf(buf, "IPL %X", reipl_block_ccw->ipl_info.ccw.devno);
+ cpcmd(buf, NULL, 0, NULL);
+ break;
+ case IPL_METHOD_CCW_DIAG:
+ diag308(DIAG308_SET, reipl_block_ccw);
+ diag308(DIAG308_IPL, NULL);
+ break;
+ case IPL_METHOD_FCP_RW_DIAG:
+ diag308(DIAG308_SET, reipl_block_fcp);
+ diag308(DIAG308_IPL, NULL);
+ break;
+ case IPL_METHOD_FCP_RO_DIAG:
+ diag308(DIAG308_IPL, NULL);
+ break;
+ case IPL_METHOD_FCP_RO_VM:
+ cpcmd("IPL", NULL, 0, NULL);
+ break;
+ case IPL_METHOD_NONE:
+ default:
+ if (MACHINE_IS_VM)
+ cpcmd("IPL", NULL, 0, NULL);
+ diag308(DIAG308_IPL, NULL);
+ break;
+ }
+ panic("reipl failed!\n");
+}
+
+static void do_dump(void)
+{
+ struct ccw_dev_id devid;
+ static char buf[100];
+
+ switch (dump_type) {
+ case IPL_TYPE_CCW:
+ printk(KERN_EMERG "Automatic dump on ccw device: 0.0.%04x\n",
+ dump_block_ccw->ipl_info.ccw.devno);
+ break;
+ case IPL_TYPE_FCP:
+ printk(KERN_EMERG "Automatic dump on fcp device:\n");
+ print_fcp_block(dump_block_fcp);
+ break;
+ default:
+ return;
+ }
+
+ switch (dump_method) {
+ case IPL_METHOD_CCW_CIO:
+ dump_smp_stop_all();
+ devid.devno = dump_block_ccw->ipl_info.ccw.devno;
+ devid.ssid = 0;
+ reipl_ccw_dev(&devid);
+ break;
+ case IPL_METHOD_CCW_VM:
+ dump_smp_stop_all();
+ sprintf(buf, "STORE STATUS");
+ cpcmd(buf, NULL, 0, NULL);
+ sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno);
+ cpcmd(buf, NULL, 0, NULL);
+ break;
+ case IPL_METHOD_CCW_DIAG:
+ diag308(DIAG308_SET, dump_block_ccw);
+ diag308(DIAG308_DUMP, NULL);
+ break;
+ case IPL_METHOD_FCP_RW_DIAG:
+ diag308(DIAG308_SET, dump_block_fcp);
+ diag308(DIAG308_DUMP, NULL);
+ break;
+ case IPL_METHOD_NONE:
+ default:
+ return;
+ }
+ printk(KERN_EMERG "Dump failed!\n");
+}
+
+/* init functions */
+
+static int __init ipl_register_fcp_files(void)
+{
+ int rc;
+
+ rc = sysfs_create_group(&ipl_subsys.kset.kobj,
+ &ipl_fcp_attr_group);
+ if (rc)
+ goto out;
+ rc = sysfs_create_bin_file(&ipl_subsys.kset.kobj,
+ &ipl_parameter_attr);
+ if (rc)
+ goto out_ipl_parm;
+ rc = sysfs_create_bin_file(&ipl_subsys.kset.kobj,
+ &ipl_scp_data_attr);
+ if (!rc)
+ goto out;
+
+ sysfs_remove_bin_file(&ipl_subsys.kset.kobj, &ipl_parameter_attr);
+
+out_ipl_parm:
+ sysfs_remove_group(&ipl_subsys.kset.kobj, &ipl_fcp_attr_group);
+out:
+ return rc;
+}
+
+static int __init ipl_init(void)
+{
+ int rc;
+
+ rc = firmware_register(&ipl_subsys);
+ if (rc)
+ return rc;
+ switch (ipl_get_type()) {
+ case IPL_TYPE_CCW:
+ rc = sysfs_create_group(&ipl_subsys.kset.kobj,
+ &ipl_ccw_attr_group);
+ break;
+ case IPL_TYPE_FCP:
+ rc = ipl_register_fcp_files();
+ break;
+ default:
+ rc = sysfs_create_group(&ipl_subsys.kset.kobj,
+ &ipl_unknown_attr_group);
+ break;
+ }
+ if (rc)
+ firmware_unregister(&ipl_subsys);
+ return rc;
+}
+
+static void __init reipl_probe(void)
+{
+ void *buffer;
+
+ buffer = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!buffer)
+ return;
+ if (diag308(DIAG308_STORE, buffer) == DIAG308_RC_OK)
+ diag308_set_works = 1;
+ free_page((unsigned long)buffer);
+}
+
+static int __init reipl_ccw_init(void)
+{
+ int rc;
+
+ reipl_block_ccw = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!reipl_block_ccw)
+ return -ENOMEM;
+ rc = sysfs_create_group(&reipl_subsys.kset.kobj, &reipl_ccw_attr_group);
+ if (rc) {
+ free_page((unsigned long)reipl_block_ccw);
+ return rc;
+ }
+ reipl_block_ccw->hdr.len = IPL_PARM_BLK_CCW_LEN;
+ reipl_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION;
+ reipl_block_ccw->hdr.blk0_len = sizeof(reipl_block_ccw->ipl_info.ccw);
+ reipl_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW;
+ if (ipl_get_type() == IPL_TYPE_CCW)
+ reipl_block_ccw->ipl_info.ccw.devno = ipl_devno;
+ reipl_capabilities |= IPL_TYPE_CCW;
+ return 0;
+}
+
+static int __init reipl_fcp_init(void)
+{
+ int rc;
+
+ if ((!diag308_set_works) && (ipl_get_type() != IPL_TYPE_FCP))
+ return 0;
+ if ((!diag308_set_works) && (ipl_get_type() == IPL_TYPE_FCP))
+ make_attrs_ro(reipl_fcp_attrs);
+
+ reipl_block_fcp = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!reipl_block_fcp)
+ return -ENOMEM;
+ rc = sysfs_create_group(&reipl_subsys.kset.kobj, &reipl_fcp_attr_group);
+ if (rc) {
+ free_page((unsigned long)reipl_block_fcp);
+ return rc;
+ }
+ if (ipl_get_type() == IPL_TYPE_FCP) {
+ memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE);
+ } else {
+ reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN;
+ reipl_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION;
+ reipl_block_fcp->hdr.blk0_len =
+ sizeof(reipl_block_fcp->ipl_info.fcp);
+ reipl_block_fcp->hdr.pbt = DIAG308_IPL_TYPE_FCP;
+ reipl_block_fcp->ipl_info.fcp.opt = DIAG308_IPL_OPT_IPL;
+ }
+ reipl_capabilities |= IPL_TYPE_FCP;
+ return 0;
+}
+
+static int __init reipl_init(void)
+{
+ int rc;
+
+ rc = firmware_register(&reipl_subsys);
+ if (rc)
+ return rc;
+ rc = subsys_create_file(&reipl_subsys, &reipl_type_attr);
+ if (rc) {
+ firmware_unregister(&reipl_subsys);
+ return rc;
+ }
+ rc = reipl_ccw_init();
+ if (rc)
+ return rc;
+ rc = reipl_fcp_init();
+ if (rc)
+ return rc;
+ rc = reipl_set_type(ipl_get_type());
+ if (rc)
+ return rc;
+ return 0;
+}
+
+static int __init dump_ccw_init(void)
+{
+ int rc;
+
+ dump_block_ccw = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!dump_block_ccw)
+ return -ENOMEM;
+ rc = sysfs_create_group(&dump_subsys.kset.kobj, &dump_ccw_attr_group);
+ if (rc) {
+ free_page((unsigned long)dump_block_ccw);
+ return rc;
+ }
+ dump_block_ccw->hdr.len = IPL_PARM_BLK_CCW_LEN;
+ dump_block_ccw->hdr.version = IPL_PARM_BLOCK_VERSION;
+ dump_block_ccw->hdr.blk0_len = sizeof(reipl_block_ccw->ipl_info.ccw);
+ dump_block_ccw->hdr.pbt = DIAG308_IPL_TYPE_CCW;
+ dump_capabilities |= IPL_TYPE_CCW;
+ return 0;
+}
+
+extern char s390_readinfo_sccb[];
+
+static int __init dump_fcp_init(void)
+{
+ int rc;
+
+ if(!(s390_readinfo_sccb[91] & 0x2))
+ return 0; /* LDIPL DUMP is not installed */
+ if (!diag308_set_works)
+ return 0;
+ dump_block_fcp = (void *) get_zeroed_page(GFP_KERNEL);
+ if (!dump_block_fcp)
+ return -ENOMEM;
+ rc = sysfs_create_group(&dump_subsys.kset.kobj, &dump_fcp_attr_group);
+ if (rc) {
+ free_page((unsigned long)dump_block_fcp);
+ return rc;
+ }
+ dump_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN;
+ dump_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION;
+ dump_block_fcp->hdr.blk0_len = sizeof(dump_block_fcp->ipl_info.fcp);
+ dump_block_fcp->hdr.pbt = DIAG308_IPL_TYPE_FCP;
+ dump_block_fcp->ipl_info.fcp.opt = DIAG308_IPL_OPT_DUMP;
+ dump_capabilities |= IPL_TYPE_FCP;
+ return 0;
+}
+
+#define SHUTDOWN_ON_PANIC_PRIO 0
+
+static int shutdown_on_panic_notify(struct notifier_block *self,
+ unsigned long event, void *data)
+{
+ if (on_panic_action == SHUTDOWN_DUMP)
+ do_dump();
+ else if (on_panic_action == SHUTDOWN_REIPL)
+ do_reipl();
+ return NOTIFY_OK;
+}
+
+static struct notifier_block shutdown_on_panic_nb = {
+ .notifier_call = shutdown_on_panic_notify,
+ .priority = SHUTDOWN_ON_PANIC_PRIO
+};
+
+static int __init dump_init(void)
+{
+ int rc;
+
+ rc = firmware_register(&dump_subsys);
+ if (rc)
+ return rc;
+ rc = subsys_create_file(&dump_subsys, &dump_type_attr);
+ if (rc) {
+ firmware_unregister(&dump_subsys);
+ return rc;
+ }
+ rc = dump_ccw_init();
+ if (rc)
+ return rc;
+ rc = dump_fcp_init();
+ if (rc)
+ return rc;
+ dump_set_type(IPL_TYPE_NONE);
+ return 0;
+}
+
+static int __init shutdown_actions_init(void)
+{
+ int rc;
+
+ rc = firmware_register(&shutdown_actions_subsys);
+ if (rc)
+ return rc;
+ rc = subsys_create_file(&shutdown_actions_subsys, &on_panic_attr);
+ if (rc) {
+ firmware_unregister(&shutdown_actions_subsys);
+ return rc;
+ }
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &shutdown_on_panic_nb);
+ return 0;
+}
+
+static int __init s390_ipl_init(void)
+{
+ int rc;
+
+ reipl_probe();
+ rc = ipl_init();
+ if (rc)
+ return rc;
+ rc = reipl_init();
+ if (rc)
+ return rc;
+ rc = dump_init();
+ if (rc)
+ return rc;
+ rc = shutdown_actions_init();
+ if (rc)
+ return rc;
+ return 0;
+}
+
+__initcall(s390_ipl_init);
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
new file mode 100644
index 000000000000..ca28fb0b3790
--- /dev/null
+++ b/arch/s390/kernel/kprobes.c
@@ -0,0 +1,657 @@
+/*
+ * Kernel Probes (KProbes)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2006
+ *
+ * s390 port, used ppc64 as template. Mike Grundy <grundym@us.ibm.com>
+ */
+
+#include <linux/config.h>
+#include <linux/kprobes.h>
+#include <linux/ptrace.h>
+#include <linux/preempt.h>
+#include <linux/stop_machine.h>
+#include <asm/cacheflush.h>
+#include <asm/kdebug.h>
+#include <asm/sections.h>
+#include <asm/uaccess.h>
+#include <linux/module.h>
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+ /* Make sure the probe isn't going on a difficult instruction */
+ if (is_prohibited_opcode((kprobe_opcode_t *) p->addr))
+ return -EINVAL;
+
+ if ((unsigned long)p->addr & 0x01) {
+ printk("Attempt to register kprobe at an unaligned address\n");
+ return -EINVAL;
+ }
+
+ /* Use the get_insn_slot() facility for correctness */
+ if (!(p->ainsn.insn = get_insn_slot()))
+ return -ENOMEM;
+
+ memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+
+ get_instruction_type(&p->ainsn);
+ p->opcode = *p->addr;
+ return 0;
+}
+
+int __kprobes is_prohibited_opcode(kprobe_opcode_t *instruction)
+{
+ switch (*(__u8 *) instruction) {
+ case 0x0c: /* bassm */
+ case 0x0b: /* bsm */
+ case 0x83: /* diag */
+ case 0x44: /* ex */
+ return -EINVAL;
+ }
+ switch (*(__u16 *) instruction) {
+ case 0x0101: /* pr */
+ case 0xb25a: /* bsa */
+ case 0xb240: /* bakr */
+ case 0xb258: /* bsg */
+ case 0xb218: /* pc */
+ case 0xb228: /* pt */
+ return -EINVAL;
+ }
+ return 0;
+}
+
+void __kprobes get_instruction_type(struct arch_specific_insn *ainsn)
+{
+ /* default fixup method */
+ ainsn->fixup = FIXUP_PSW_NORMAL;
+
+ /* save r1 operand */
+ ainsn->reg = (*ainsn->insn & 0xf0) >> 4;
+
+ /* save the instruction length (pop 5-5) in bytes */
+ switch (*(__u8 *) (ainsn->insn) >> 4) {
+ case 0:
+ ainsn->ilen = 2;
+ break;
+ case 1:
+ case 2:
+ ainsn->ilen = 4;
+ break;
+ case 3:
+ ainsn->ilen = 6;
+ break;
+ }
+
+ switch (*(__u8 *) ainsn->insn) {
+ case 0x05: /* balr */
+ case 0x0d: /* basr */
+ ainsn->fixup = FIXUP_RETURN_REGISTER;
+ /* if r2 = 0, no branch will be taken */
+ if ((*ainsn->insn & 0x0f) == 0)
+ ainsn->fixup |= FIXUP_BRANCH_NOT_TAKEN;
+ break;
+ case 0x06: /* bctr */
+ case 0x07: /* bcr */
+ ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
+ break;
+ case 0x45: /* bal */
+ case 0x4d: /* bas */
+ ainsn->fixup = FIXUP_RETURN_REGISTER;
+ break;
+ case 0x47: /* bc */
+ case 0x46: /* bct */
+ case 0x86: /* bxh */
+ case 0x87: /* bxle */
+ ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
+ break;
+ case 0x82: /* lpsw */
+ ainsn->fixup = FIXUP_NOT_REQUIRED;
+ break;
+ case 0xb2: /* lpswe */
+ if (*(((__u8 *) ainsn->insn) + 1) == 0xb2) {
+ ainsn->fixup = FIXUP_NOT_REQUIRED;
+ }
+ break;
+ case 0xa7: /* bras */
+ if ((*ainsn->insn & 0x0f) == 0x05) {
+ ainsn->fixup |= FIXUP_RETURN_REGISTER;
+ }
+ break;
+ case 0xc0:
+ if ((*ainsn->insn & 0x0f) == 0x00 /* larl */
+ || (*ainsn->insn & 0x0f) == 0x05) /* brasl */
+ ainsn->fixup |= FIXUP_RETURN_REGISTER;
+ break;
+ case 0xeb:
+ if (*(((__u8 *) ainsn->insn) + 5 ) == 0x44 || /* bxhg */
+ *(((__u8 *) ainsn->insn) + 5) == 0x45) {/* bxleg */
+ ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
+ }
+ break;
+ case 0xe3: /* bctg */
+ if (*(((__u8 *) ainsn->insn) + 5) == 0x46) {
+ ainsn->fixup = FIXUP_BRANCH_NOT_TAKEN;
+ }
+ break;
+ }
+}
+
+static int __kprobes swap_instruction(void *aref)
+{
+ struct ins_replace_args *args = aref;
+ int err = -EFAULT;
+
+ asm volatile(
+ "0: mvc 0(2,%2),0(%3)\n"
+ "1: la %0,0\n"
+ "2:\n"
+ EX_TABLE(0b,2b)
+ : "+d" (err), "=m" (*args->ptr)
+ : "a" (args->ptr), "a" (&args->new), "m" (args->new));
+ return err;
+}
+
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ unsigned long status = kcb->kprobe_status;
+ struct ins_replace_args args;
+
+ args.ptr = p->addr;
+ args.old = p->opcode;
+ args.new = BREAKPOINT_INSTRUCTION;
+
+ kcb->kprobe_status = KPROBE_SWAP_INST;
+ stop_machine_run(swap_instruction, &args, NR_CPUS);
+ kcb->kprobe_status = status;
+}
+
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ unsigned long status = kcb->kprobe_status;
+ struct ins_replace_args args;
+
+ args.ptr = p->addr;
+ args.old = BREAKPOINT_INSTRUCTION;
+ args.new = p->opcode;
+
+ kcb->kprobe_status = KPROBE_SWAP_INST;
+ stop_machine_run(swap_instruction, &args, NR_CPUS);
+ kcb->kprobe_status = status;
+}
+
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+ mutex_lock(&kprobe_mutex);
+ free_insn_slot(p->ainsn.insn);
+ mutex_unlock(&kprobe_mutex);
+}
+
+static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
+{
+ per_cr_bits kprobe_per_regs[1];
+
+ memset(kprobe_per_regs, 0, sizeof(per_cr_bits));
+ regs->psw.addr = (unsigned long)p->ainsn.insn | PSW_ADDR_AMODE;
+
+ /* Set up the per control reg info, will pass to lctl */
+ kprobe_per_regs[0].em_instruction_fetch = 1;
+ kprobe_per_regs[0].starting_addr = (unsigned long)p->ainsn.insn;
+ kprobe_per_regs[0].ending_addr = (unsigned long)p->ainsn.insn + 1;
+
+ /* Set the PER control regs, turns on single step for this address */
+ __ctl_load(kprobe_per_regs, 9, 11);
+ regs->psw.mask |= PSW_MASK_PER;
+ regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK);
+}
+
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ kcb->prev_kprobe.kp = kprobe_running();
+ kcb->prev_kprobe.status = kcb->kprobe_status;
+ kcb->prev_kprobe.kprobe_saved_imask = kcb->kprobe_saved_imask;
+ memcpy(kcb->prev_kprobe.kprobe_saved_ctl, kcb->kprobe_saved_ctl,
+ sizeof(kcb->kprobe_saved_ctl));
+}
+
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
+ kcb->kprobe_status = kcb->prev_kprobe.status;
+ kcb->kprobe_saved_imask = kcb->prev_kprobe.kprobe_saved_imask;
+ memcpy(kcb->kprobe_saved_ctl, kcb->prev_kprobe.kprobe_saved_ctl,
+ sizeof(kcb->kprobe_saved_ctl));
+}
+
+static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ __get_cpu_var(current_kprobe) = p;
+ /* Save the interrupt and per flags */
+ kcb->kprobe_saved_imask = regs->psw.mask &
+ (PSW_MASK_PER | PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK);
+ /* Save the control regs that govern PER */
+ __ctl_store(kcb->kprobe_saved_ctl, 9, 11);
+}
+
+/* Called with kretprobe_lock held */
+void __kprobes arch_prepare_kretprobe(struct kretprobe *rp,
+ struct pt_regs *regs)
+{
+ struct kretprobe_instance *ri;
+
+ if ((ri = get_free_rp_inst(rp)) != NULL) {
+ ri->rp = rp;
+ ri->task = current;
+ ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
+
+ /* Replace the return addr with trampoline addr */
+ regs->gprs[14] = (unsigned long)&kretprobe_trampoline;
+
+ add_rp_inst(ri);
+ } else {
+ rp->nmissed++;
+ }
+}
+
+static int __kprobes kprobe_handler(struct pt_regs *regs)
+{
+ struct kprobe *p;
+ int ret = 0;
+ unsigned long *addr = (unsigned long *)
+ ((regs->psw.addr & PSW_ADDR_INSN) - 2);
+ struct kprobe_ctlblk *kcb;
+
+ /*
+ * We don't want to be preempted for the entire
+ * duration of kprobe processing
+ */
+ preempt_disable();
+ kcb = get_kprobe_ctlblk();
+
+ /* Check we're not actually recursing */
+ if (kprobe_running()) {
+ p = get_kprobe(addr);
+ if (p) {
+ if (kcb->kprobe_status == KPROBE_HIT_SS &&
+ *p->ainsn.insn == BREAKPOINT_INSTRUCTION) {
+ regs->psw.mask &= ~PSW_MASK_PER;
+ regs->psw.mask |= kcb->kprobe_saved_imask;
+ goto no_kprobe;
+ }
+ /* We have reentered the kprobe_handler(), since
+ * another probe was hit while within the handler.
+ * We here save the original kprobes variables and
+ * just single step on the instruction of the new probe
+ * without calling any user handlers.
+ */
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p, regs, kcb);
+ kprobes_inc_nmissed_count(p);
+ prepare_singlestep(p, regs);
+ kcb->kprobe_status = KPROBE_REENTER;
+ return 1;
+ } else {
+ p = __get_cpu_var(current_kprobe);
+ if (p->break_handler && p->break_handler(p, regs)) {
+ goto ss_probe;
+ }
+ }
+ goto no_kprobe;
+ }
+
+ p = get_kprobe(addr);
+ if (!p) {
+ if (*addr != BREAKPOINT_INSTRUCTION) {
+ /*
+ * The breakpoint instruction was removed right
+ * after we hit it. Another cpu has removed
+ * either a probepoint or a debugger breakpoint
+ * at this address. In either case, no further
+ * handling of this interrupt is appropriate.
+ *
+ */
+ ret = 1;
+ }
+ /* Not one of ours: let kernel handle it */
+ goto no_kprobe;
+ }
+
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+ set_current_kprobe(p, regs, kcb);
+ if (p->pre_handler && p->pre_handler(p, regs))
+ /* handler has already set things up, so skip ss setup */
+ return 1;
+
+ss_probe:
+ prepare_singlestep(p, regs);
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ return 1;
+
+no_kprobe:
+ preempt_enable_no_resched();
+ return ret;
+}
+
+/*
+ * Function return probe trampoline:
+ * - init_kprobes() establishes a probepoint here
+ * - When the probed function returns, this probe
+ * causes the handlers to fire
+ */
+void __kprobes kretprobe_trampoline_holder(void)
+{
+ asm volatile(".global kretprobe_trampoline\n"
+ "kretprobe_trampoline: bcr 0,0\n");
+}
+
+/*
+ * Called when the probe at kretprobe trampoline is hit
+ */
+int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct kretprobe_instance *ri = NULL;
+ struct hlist_head *head;
+ struct hlist_node *node, *tmp;
+ unsigned long flags, orig_ret_address = 0;
+ unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
+
+ spin_lock_irqsave(&kretprobe_lock, flags);
+ head = kretprobe_inst_table_head(current);
+
+ /*
+ * It is possible to have multiple instances associated with a given
+ * task either because an multiple functions in the call path
+ * have a return probe installed on them, and/or more then one return
+ * return probe was registered for a target function.
+ *
+ * We can handle this because:
+ * - instances are always inserted at the head of the list
+ * - when multiple return probes are registered for the same
+ * function, the first instance's ret_addr will point to the
+ * real return address, and all the rest will point to
+ * kretprobe_trampoline
+ */
+ hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+ if (ri->task != current)
+ /* another task is sharing our hash bucket */
+ continue;
+
+ if (ri->rp && ri->rp->handler)
+ ri->rp->handler(ri, regs);
+
+ orig_ret_address = (unsigned long)ri->ret_addr;
+ recycle_rp_inst(ri);
+
+ if (orig_ret_address != trampoline_address) {
+ /*
+ * This is the real return address. Any other
+ * instances associated with this task are for
+ * other calls deeper on the call stack
+ */
+ break;
+ }
+ }
+ BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address));
+ regs->psw.addr = orig_ret_address | PSW_ADDR_AMODE;
+
+ reset_current_kprobe();
+ spin_unlock_irqrestore(&kretprobe_lock, flags);
+ preempt_enable_no_resched();
+
+ /*
+ * By returning a non-zero value, we are telling
+ * kprobe_handler() that we don't want the post_handler
+ * to run (and have re-enabled preemption)
+ */
+ return 1;
+}
+
+/*
+ * Called after single-stepping. p->addr is the address of the
+ * instruction whose first byte has been replaced by the "breakpoint"
+ * instruction. To avoid the SMP problems that can occur when we
+ * temporarily put back the original opcode to single-step, we
+ * single-stepped a copy of the instruction. The address of this
+ * copy is p->ainsn.insn.
+ */
+static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ regs->psw.addr &= PSW_ADDR_INSN;
+
+ if (p->ainsn.fixup & FIXUP_PSW_NORMAL)
+ regs->psw.addr = (unsigned long)p->addr +
+ ((unsigned long)regs->psw.addr -
+ (unsigned long)p->ainsn.insn);
+
+ if (p->ainsn.fixup & FIXUP_BRANCH_NOT_TAKEN)
+ if ((unsigned long)regs->psw.addr -
+ (unsigned long)p->ainsn.insn == p->ainsn.ilen)
+ regs->psw.addr = (unsigned long)p->addr + p->ainsn.ilen;
+
+ if (p->ainsn.fixup & FIXUP_RETURN_REGISTER)
+ regs->gprs[p->ainsn.reg] = ((unsigned long)p->addr +
+ (regs->gprs[p->ainsn.reg] -
+ (unsigned long)p->ainsn.insn))
+ | PSW_ADDR_AMODE;
+
+ regs->psw.addr |= PSW_ADDR_AMODE;
+ /* turn off PER mode */
+ regs->psw.mask &= ~PSW_MASK_PER;
+ /* Restore the original per control regs */
+ __ctl_load(kcb->kprobe_saved_ctl, 9, 11);
+ regs->psw.mask |= kcb->kprobe_saved_imask;
+}
+
+static int __kprobes post_kprobe_handler(struct pt_regs *regs)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (!cur)
+ return 0;
+
+ if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ cur->post_handler(cur, regs, 0);
+ }
+
+ resume_execution(cur, regs);
+
+ /*Restore back the original saved kprobes variables and continue. */
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ goto out;
+ }
+ reset_current_kprobe();
+out:
+ preempt_enable_no_resched();
+
+ /*
+ * if somebody else is singlestepping across a probe point, psw mask
+ * will have PER set, in which case, continue the remaining processing
+ * of do_single_step, as if this is not a probe hit.
+ */
+ if (regs->psw.mask & PSW_MASK_PER) {
+ return 0;
+ }
+
+ return 1;
+}
+
+static int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ const struct exception_table_entry *entry;
+
+ switch(kcb->kprobe_status) {
+ case KPROBE_SWAP_INST:
+ /* We are here because the instruction replacement failed */
+ return 0;
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ /*
+ * We are here because the instruction being single
+ * stepped caused a page fault. We reset the current
+ * kprobe and the nip points back to the probe address
+ * and allow the page fault handler to continue as a
+ * normal page fault.
+ */
+ regs->psw.addr = (unsigned long)cur->addr | PSW_ADDR_AMODE;
+ regs->psw.mask &= ~PSW_MASK_PER;
+ regs->psw.mask |= kcb->kprobe_saved_imask;
+ if (kcb->kprobe_status == KPROBE_REENTER)
+ restore_previous_kprobe(kcb);
+ else
+ reset_current_kprobe();
+ preempt_enable_no_resched();
+ break;
+ case KPROBE_HIT_ACTIVE:
+ case KPROBE_HIT_SSDONE:
+ /*
+ * We increment the nmissed count for accounting,
+ * we can also use npre/npostfault count for accouting
+ * these specific fault cases.
+ */
+ kprobes_inc_nmissed_count(cur);
+
+ /*
+ * We come here because instructions in the pre/post
+ * handler caused the page_fault, this could happen
+ * if handler tries to access user space by
+ * copy_from_user(), get_user() etc. Let the
+ * user-specified handler try to fix it first.
+ */
+ if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
+ return 1;
+
+ /*
+ * In case the user-specified fault handler returned
+ * zero, try to fix up.
+ */
+ entry = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN);
+ if (entry) {
+ regs->psw.addr = entry->fixup | PSW_ADDR_AMODE;
+ return 1;
+ }
+
+ /*
+ * fixup_exception() could not handle it,
+ * Let do_page_fault() fix it.
+ */
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
+/*
+ * Wrapper routine to for handling exceptions.
+ */
+int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ struct die_args *args = (struct die_args *)data;
+ int ret = NOTIFY_DONE;
+
+ switch (val) {
+ case DIE_BPT:
+ if (kprobe_handler(args->regs))
+ ret = NOTIFY_STOP;
+ break;
+ case DIE_SSTEP:
+ if (post_kprobe_handler(args->regs))
+ ret = NOTIFY_STOP;
+ break;
+ case DIE_TRAP:
+ case DIE_PAGE_FAULT:
+ /* kprobe_running() needs smp_processor_id() */
+ preempt_disable();
+ if (kprobe_running() &&
+ kprobe_fault_handler(args->regs, args->trapnr))
+ ret = NOTIFY_STOP;
+ preempt_enable();
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct jprobe *jp = container_of(p, struct jprobe, kp);
+ unsigned long addr;
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ memcpy(&kcb->jprobe_saved_regs, regs, sizeof(struct pt_regs));
+
+ /* setup return addr to the jprobe handler routine */
+ regs->psw.addr = (unsigned long)(jp->entry) | PSW_ADDR_AMODE;
+
+ /* r14 is the function return address */
+ kcb->jprobe_saved_r14 = (unsigned long)regs->gprs[14];
+ /* r15 is the stack pointer */
+ kcb->jprobe_saved_r15 = (unsigned long)regs->gprs[15];
+ addr = (unsigned long)kcb->jprobe_saved_r15;
+
+ memcpy(kcb->jprobes_stack, (kprobe_opcode_t *) addr,
+ MIN_STACK_SIZE(addr));
+ return 1;
+}
+
+void __kprobes jprobe_return(void)
+{
+ asm volatile(".word 0x0002");
+}
+
+void __kprobes jprobe_return_end(void)
+{
+ asm volatile("bcr 0,0");
+}
+
+int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ unsigned long stack_addr = (unsigned long)(kcb->jprobe_saved_r15);
+
+ /* Put the regs back */
+ memcpy(regs, &kcb->jprobe_saved_regs, sizeof(struct pt_regs));
+ /* put the stack back */
+ memcpy((kprobe_opcode_t *) stack_addr, kcb->jprobes_stack,
+ MIN_STACK_SIZE(stack_addr));
+ preempt_enable_no_resched();
+ return 1;
+}
+
+static struct kprobe trampoline_p = {
+ .addr = (kprobe_opcode_t *) & kretprobe_trampoline,
+ .pre_handler = trampoline_probe_handler
+};
+
+int __init arch_init_kprobes(void)
+{
+ return register_kprobe(&trampoline_p);
+}
diff --git a/arch/s390/kernel/reipl.S b/arch/s390/kernel/reipl.S
index 658e5ac484f9..4562cdbce8eb 100644
--- a/arch/s390/kernel/reipl.S
+++ b/arch/s390/kernel/reipl.S
@@ -8,13 +8,30 @@
#include <asm/lowcore.h>
- .globl do_reipl
-do_reipl: basr %r13,0
+ .globl do_reipl_asm
+do_reipl_asm: basr %r13,0
.Lpg0: lpsw .Lnewpsw-.Lpg0(%r13)
-.Lpg1: lctl %c6,%c6,.Lall-.Lpg0(%r13)
- stctl %c0,%c0,.Lctlsave-.Lpg0(%r13)
- ni .Lctlsave-.Lpg0(%r13),0xef
- lctl %c0,%c0,.Lctlsave-.Lpg0(%r13)
+
+ # switch off lowcore protection
+
+.Lpg1: stctl %c0,%c0,.Lctlsave1-.Lpg0(%r13)
+ stctl %c0,%c0,.Lctlsave2-.Lpg0(%r13)
+ ni .Lctlsave1-.Lpg0(%r13),0xef
+ lctl %c0,%c0,.Lctlsave1-.Lpg0(%r13)
+
+ # do store status of all registers
+
+ stm %r0,%r15,__LC_GPREGS_SAVE_AREA
+ stctl %c0,%c15,__LC_CREGS_SAVE_AREA
+ mvc __LC_CREGS_SAVE_AREA(4),.Lctlsave2-.Lpg0(%r13)
+ stam %a0,%a15,__LC_AREGS_SAVE_AREA
+ stpx __LC_PREFIX_SAVE_AREA
+ stckc .Lclkcmp-.Lpg0(%r13)
+ mvc __LC_CLOCK_COMP_SAVE_AREA(8),.Lclkcmp-.Lpg0(%r13)
+ stpt __LC_CPU_TIMER_SAVE_AREA
+ st %r13, __LC_PSW_SAVE_AREA+4
+
+ lctl %c6,%c6,.Lall-.Lpg0(%r13)
lr %r1,%r2
mvc __LC_PGM_NEW_PSW(8),.Lpcnew-.Lpg0(%r13)
stsch .Lschib-.Lpg0(%r13)
@@ -46,9 +63,11 @@ do_reipl: basr %r13,0
.Ldisab: st %r14,.Ldispsw+4-.Lpg0(%r13)
lpsw .Ldispsw-.Lpg0(%r13)
.align 8
+.Lclkcmp: .quad 0x0000000000000000
.Lall: .long 0xff000000
.Lnull: .long 0x00000000
-.Lctlsave: .long 0x00000000
+.Lctlsave1: .long 0x00000000
+.Lctlsave2: .long 0x00000000
.align 8
.Lnewpsw: .long 0x00080000,0x80000000+.Lpg1
.Lpcnew: .long 0x00080000,0x80000000+.Lecs
diff --git a/arch/s390/kernel/reipl64.S b/arch/s390/kernel/reipl64.S
index 4d090d60f3ef..95bd1e234f63 100644
--- a/arch/s390/kernel/reipl64.S
+++ b/arch/s390/kernel/reipl64.S
@@ -8,13 +8,30 @@
*/
#include <asm/lowcore.h>
- .globl do_reipl
-do_reipl: basr %r13,0
-.Lpg0: lpswe .Lnewpsw-.Lpg0(%r13)
+ .globl do_reipl_asm
+do_reipl_asm: basr %r13,0
+
+ # do store status of all registers
+
+.Lpg0: stg %r1,.Lregsave-.Lpg0(%r13)
+ lghi %r1,0x1000
+ stmg %r0,%r15,__LC_GPREGS_SAVE_AREA-0x1000(%r1)
+ lg %r0,.Lregsave-.Lpg0(%r13)
+ stg %r0,__LC_GPREGS_SAVE_AREA-0x1000+8(%r1)
+ stctg %c0,%c15,__LC_CREGS_SAVE_AREA-0x1000(%r1)
+ stam %a0,%a15,__LC_AREGS_SAVE_AREA-0x1000(%r1)
+ stpx __LC_PREFIX_SAVE_AREA-0x1000(%r1)
+ stfpc __LC_FP_CREG_SAVE_AREA-0x1000(%r1)
+ stckc .Lclkcmp-.Lpg0(%r13)
+ mvc __LC_CLOCK_COMP_SAVE_AREA-0x1000(8,%r1),.Lclkcmp-.Lpg0(%r13)
+ stpt __LC_CPU_TIMER_SAVE_AREA-0x1000(%r1)
+ stg %r13, __LC_PSW_SAVE_AREA-0x1000+8(%r1)
+
+ lpswe .Lnewpsw-.Lpg0(%r13)
.Lpg1: lctlg %c6,%c6,.Lall-.Lpg0(%r13)
- stctg %c0,%c0,.Lctlsave-.Lpg0(%r13)
- ni .Lctlsave+4-.Lpg0(%r13),0xef
- lctlg %c0,%c0,.Lctlsave-.Lpg0(%r13)
+ stctg %c0,%c0,.Lregsave-.Lpg0(%r13)
+ ni .Lregsave+4-.Lpg0(%r13),0xef
+ lctlg %c0,%c0,.Lregsave-.Lpg0(%r13)
lgr %r1,%r2
mvc __LC_PGM_NEW_PSW(16),.Lpcnew-.Lpg0(%r13)
stsch .Lschib-.Lpg0(%r13)
@@ -50,8 +67,9 @@ do_reipl: basr %r13,0
st %r14,.Ldispsw+12-.Lpg0(%r13)
lpswe .Ldispsw-.Lpg0(%r13)
.align 8
+.Lclkcmp: .quad 0x0000000000000000
.Lall: .quad 0x00000000ff000000
-.Lctlsave: .quad 0x0000000000000000
+.Lregsave: .quad 0x0000000000000000
.Lnull: .long 0x0000000000000000
.align 16
/*
@@ -92,5 +110,3 @@ do_reipl: basr %r13,0
.long 0x00000000,0x00000000
.long 0x00000000,0x00000000
-
-
diff --git a/arch/s390/kernel/reipl_diag.c b/arch/s390/kernel/reipl_diag.c
deleted file mode 100644
index 1f33951ba439..000000000000
--- a/arch/s390/kernel/reipl_diag.c
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * This file contains the implementation of the
- * Linux re-IPL support
- *
- * (C) Copyright IBM Corp. 2005
- *
- * Author(s): Volker Sameske (sameske@de.ibm.com)
- *
- */
-
-#include <linux/kernel.h>
-
-static unsigned int reipl_diag_rc1;
-static unsigned int reipl_diag_rc2;
-
-/*
- * re-IPL the system using the last used IPL parameters
- */
-void reipl_diag(void)
-{
- asm volatile (
- " la %%r4,0\n"
- " la %%r5,0\n"
- " diag %%r4,%2,0x308\n"
- "0:\n"
- " st %%r4,%0\n"
- " st %%r5,%1\n"
- ".section __ex_table,\"a\"\n"
-#ifdef CONFIG_64BIT
- " .align 8\n"
- " .quad 0b, 0b\n"
-#else
- " .align 4\n"
- " .long 0b, 0b\n"
-#endif
- ".previous\n"
- : "=m" (reipl_diag_rc1), "=m" (reipl_diag_rc2)
- : "d" (3) : "cc", "4", "5" );
-}
diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c
index c73a45467fa4..9f19e833a562 100644
--- a/arch/s390/kernel/s390_ksyms.c
+++ b/arch/s390/kernel/s390_ksyms.c
@@ -25,12 +25,6 @@ EXPORT_SYMBOL(_oi_bitmap);
EXPORT_SYMBOL(_ni_bitmap);
EXPORT_SYMBOL(_zb_findmap);
EXPORT_SYMBOL(_sb_findmap);
-EXPORT_SYMBOL(__copy_from_user_asm);
-EXPORT_SYMBOL(__copy_to_user_asm);
-EXPORT_SYMBOL(__copy_in_user_asm);
-EXPORT_SYMBOL(__clear_user_asm);
-EXPORT_SYMBOL(__strncpy_from_user_asm);
-EXPORT_SYMBOL(__strnlen_user_asm);
EXPORT_SYMBOL(diag10);
/*
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index c902f059c7aa..e3d9325f6022 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -37,6 +37,7 @@
#include <linux/kernel_stat.h>
#include <linux/device.h>
#include <linux/notifier.h>
+#include <linux/pfn.h>
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -50,6 +51,12 @@
#include <asm/sections.h>
/*
+ * User copy operations.
+ */
+struct uaccess_ops uaccess;
+EXPORT_SYMBOL_GPL(uaccess);
+
+/*
* Machine setup..
*/
unsigned int console_mode = 0;
@@ -284,16 +291,9 @@ void (*_machine_power_off)(void) = machine_power_off_smp;
/*
* Reboot, halt and power_off routines for non SMP.
*/
-extern void reipl(unsigned long devno);
-extern void reipl_diag(void);
static void do_machine_restart_nonsmp(char * __unused)
{
- reipl_diag();
-
- if (MACHINE_IS_VM)
- cpcmd ("IPL", NULL, 0, NULL);
- else
- reipl (0x10000 | S390_lowcore.ipl_device);
+ do_reipl();
}
static void do_machine_halt_nonsmp(void)
@@ -501,13 +501,47 @@ setup_memory(void)
* partially used pages are not usable - thus
* we are rounding upwards:
*/
- start_pfn = (__pa(&_end) + PAGE_SIZE - 1) >> PAGE_SHIFT;
- end_pfn = max_pfn = memory_end >> PAGE_SHIFT;
+ start_pfn = PFN_UP(__pa(&_end));
+ end_pfn = max_pfn = PFN_DOWN(memory_end);
/* Initialize storage key for kernel pages */
for (init_pfn = 0 ; init_pfn < start_pfn; init_pfn++)
page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY);
+#ifdef CONFIG_BLK_DEV_INITRD
+ /*
+ * Move the initrd in case the bitmap of the bootmem allocater
+ * would overwrite it.
+ */
+
+ if (INITRD_START && INITRD_SIZE) {
+ unsigned long bmap_size;
+ unsigned long start;
+
+ bmap_size = bootmem_bootmap_pages(end_pfn - start_pfn + 1);
+ bmap_size = PFN_PHYS(bmap_size);
+
+ if (PFN_PHYS(start_pfn) + bmap_size > INITRD_START) {
+ start = PFN_PHYS(start_pfn) + bmap_size + PAGE_SIZE;
+
+ if (start + INITRD_SIZE > memory_end) {
+ printk("initrd extends beyond end of memory "
+ "(0x%08lx > 0x%08lx)\n"
+ "disabling initrd\n",
+ start + INITRD_SIZE, memory_end);
+ INITRD_START = INITRD_SIZE = 0;
+ } else {
+ printk("Moving initrd (0x%08lx -> 0x%08lx, "
+ "size: %ld)\n",
+ INITRD_START, start, INITRD_SIZE);
+ memmove((void *) start, (void *) INITRD_START,
+ INITRD_SIZE);
+ INITRD_START = start;
+ }
+ }
+ }
+#endif
+
/*
* Initialize the boot-time allocator (with low memory only):
*/
@@ -559,7 +593,7 @@ setup_memory(void)
reserve_bootmem(start_pfn << PAGE_SHIFT, bootmap_size);
#ifdef CONFIG_BLK_DEV_INITRD
- if (INITRD_START) {
+ if (INITRD_START && INITRD_SIZE) {
if (INITRD_START + INITRD_SIZE <= memory_end) {
reserve_bootmem(INITRD_START, INITRD_SIZE);
initrd_start = INITRD_START;
@@ -613,6 +647,11 @@ setup_arch(char **cmdline_p)
memory_end = memory_size;
+ if (MACHINE_HAS_MVCOS)
+ memcpy(&uaccess, &uaccess_mvcos, sizeof(uaccess));
+ else
+ memcpy(&uaccess, &uaccess_std, sizeof(uaccess));
+
parse_early_param();
#ifndef CONFIG_64BIT
@@ -720,214 +759,3 @@ struct seq_operations cpuinfo_op = {
.show = show_cpuinfo,
};
-#define DEFINE_IPL_ATTR(_name, _format, _value) \
-static ssize_t ipl_##_name##_show(struct subsystem *subsys, \
- char *page) \
-{ \
- return sprintf(page, _format, _value); \
-} \
-static struct subsys_attribute ipl_##_name##_attr = \
- __ATTR(_name, S_IRUGO, ipl_##_name##_show, NULL);
-
-DEFINE_IPL_ATTR(wwpn, "0x%016llx\n", (unsigned long long)
- IPL_PARMBLOCK_START->fcp.wwpn);
-DEFINE_IPL_ATTR(lun, "0x%016llx\n", (unsigned long long)
- IPL_PARMBLOCK_START->fcp.lun);
-DEFINE_IPL_ATTR(bootprog, "%lld\n", (unsigned long long)
- IPL_PARMBLOCK_START->fcp.bootprog);
-DEFINE_IPL_ATTR(br_lba, "%lld\n", (unsigned long long)
- IPL_PARMBLOCK_START->fcp.br_lba);
-
-enum ipl_type_type {
- ipl_type_unknown,
- ipl_type_ccw,
- ipl_type_fcp,
-};
-
-static enum ipl_type_type
-get_ipl_type(void)
-{
- struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
-
- if (!IPL_DEVNO_VALID)
- return ipl_type_unknown;
- if (!IPL_PARMBLOCK_VALID)
- return ipl_type_ccw;
- if (ipl->hdr.header.version > IPL_MAX_SUPPORTED_VERSION)
- return ipl_type_unknown;
- if (ipl->fcp.pbt != IPL_TYPE_FCP)
- return ipl_type_unknown;
- return ipl_type_fcp;
-}
-
-static ssize_t
-ipl_type_show(struct subsystem *subsys, char *page)
-{
- switch (get_ipl_type()) {
- case ipl_type_ccw:
- return sprintf(page, "ccw\n");
- case ipl_type_fcp:
- return sprintf(page, "fcp\n");
- default:
- return sprintf(page, "unknown\n");
- }
-}
-
-static struct subsys_attribute ipl_type_attr = __ATTR_RO(ipl_type);
-
-static ssize_t
-ipl_device_show(struct subsystem *subsys, char *page)
-{
- struct ipl_parameter_block *ipl = IPL_PARMBLOCK_START;
-
- switch (get_ipl_type()) {
- case ipl_type_ccw:
- return sprintf(page, "0.0.%04x\n", ipl_devno);
- case ipl_type_fcp:
- return sprintf(page, "0.0.%04x\n", ipl->fcp.devno);
- default:
- return 0;
- }
-}
-
-static struct subsys_attribute ipl_device_attr =
- __ATTR(device, S_IRUGO, ipl_device_show, NULL);
-
-static struct attribute *ipl_fcp_attrs[] = {
- &ipl_type_attr.attr,
- &ipl_device_attr.attr,
- &ipl_wwpn_attr.attr,
- &ipl_lun_attr.attr,
- &ipl_bootprog_attr.attr,
- &ipl_br_lba_attr.attr,
- NULL,
-};
-
-static struct attribute_group ipl_fcp_attr_group = {
- .attrs = ipl_fcp_attrs,
-};
-
-static struct attribute *ipl_ccw_attrs[] = {
- &ipl_type_attr.attr,
- &ipl_device_attr.attr,
- NULL,
-};
-
-static struct attribute_group ipl_ccw_attr_group = {
- .attrs = ipl_ccw_attrs,
-};
-
-static struct attribute *ipl_unknown_attrs[] = {
- &ipl_type_attr.attr,
- NULL,
-};
-
-static struct attribute_group ipl_unknown_attr_group = {
- .attrs = ipl_unknown_attrs,
-};
-
-static ssize_t
-ipl_parameter_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
-{
- unsigned int size = IPL_PARMBLOCK_SIZE;
-
- if (off > size)
- return 0;
- if (off + count > size)
- count = size - off;
-
- memcpy(buf, (void *) IPL_PARMBLOCK_START + off, count);
- return count;
-}
-
-static struct bin_attribute ipl_parameter_attr = {
- .attr = {
- .name = "binary_parameter",
- .mode = S_IRUGO,
- .owner = THIS_MODULE,
- },
- .size = PAGE_SIZE,
- .read = &ipl_parameter_read,
-};
-
-static ssize_t
-ipl_scp_data_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
-{
- unsigned int size = IPL_PARMBLOCK_START->fcp.scp_data_len;
- void *scp_data = &IPL_PARMBLOCK_START->fcp.scp_data;
-
- if (off > size)
- return 0;
- if (off + count > size)
- count = size - off;
-
- memcpy(buf, scp_data + off, count);
- return count;
-}
-
-static struct bin_attribute ipl_scp_data_attr = {
- .attr = {
- .name = "scp_data",
- .mode = S_IRUGO,
- .owner = THIS_MODULE,
- },
- .size = PAGE_SIZE,
- .read = &ipl_scp_data_read,
-};
-
-static decl_subsys(ipl, NULL, NULL);
-
-static int ipl_register_fcp_files(void)
-{
- int rc;
-
- rc = sysfs_create_group(&ipl_subsys.kset.kobj,
- &ipl_fcp_attr_group);
- if (rc)
- goto out;
- rc = sysfs_create_bin_file(&ipl_subsys.kset.kobj,
- &ipl_parameter_attr);
- if (rc)
- goto out_ipl_parm;
- rc = sysfs_create_bin_file(&ipl_subsys.kset.kobj,
- &ipl_scp_data_attr);
- if (!rc)
- goto out;
-
- sysfs_remove_bin_file(&ipl_subsys.kset.kobj, &ipl_parameter_attr);
-
-out_ipl_parm:
- sysfs_remove_group(&ipl_subsys.kset.kobj, &ipl_fcp_attr_group);
-out:
- return rc;
-}
-
-static int __init
-ipl_device_sysfs_register(void) {
- int rc;
-
- rc = firmware_register(&ipl_subsys);
- if (rc)
- goto out;
-
- switch (get_ipl_type()) {
- case ipl_type_ccw:
- rc = sysfs_create_group(&ipl_subsys.kset.kobj,
- &ipl_ccw_attr_group);
- break;
- case ipl_type_fcp:
- rc = ipl_register_fcp_files();
- break;
- default:
- rc = sysfs_create_group(&ipl_subsys.kset.kobj,
- &ipl_unknown_attr_group);
- break;
- }
-
- if (rc)
- firmware_unregister(&ipl_subsys);
-out:
- return rc;
-}
-
-__initcall(ipl_device_sysfs_register);
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index a887b686f279..642095ec7c07 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -114,29 +114,26 @@ sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
static int save_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
{
unsigned long old_mask = regs->psw.mask;
- int err;
-
+ _sigregs user_sregs;
+
save_access_regs(current->thread.acrs);
/* Copy a 'clean' PSW mask to the user to avoid leaking
information about whether PER is currently on. */
regs->psw.mask = PSW_MASK_MERGE(PSW_USER_BITS, regs->psw.mask);
- err = __copy_to_user(&sregs->regs.psw, &regs->psw,
- sizeof(sregs->regs.psw)+sizeof(sregs->regs.gprs));
+ memcpy(&user_sregs.regs.psw, &regs->psw, sizeof(sregs->regs.psw) +
+ sizeof(sregs->regs.gprs));
regs->psw.mask = old_mask;
- if (err != 0)
- return err;
- err = __copy_to_user(&sregs->regs.acrs, current->thread.acrs,
- sizeof(sregs->regs.acrs));
- if (err != 0)
- return err;
+ memcpy(&user_sregs.regs.acrs, current->thread.acrs,
+ sizeof(sregs->regs.acrs));
/*
* We have to store the fp registers to current->thread.fp_regs
* to merge them with the emulated registers.
*/
save_fp_regs(&current->thread.fp_regs);
- return __copy_to_user(&sregs->fpregs, &current->thread.fp_regs,
- sizeof(s390_fp_regs));
+ memcpy(&user_sregs.fpregs, &current->thread.fp_regs,
+ sizeof(s390_fp_regs));
+ return __copy_to_user(sregs, &user_sregs, sizeof(_sigregs));
}
/* Returns positive number on error */
@@ -144,27 +141,25 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs)
{
unsigned long old_mask = regs->psw.mask;
int err;
+ _sigregs user_sregs;
/* Alwys make any pending restarted system call return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
- err = __copy_from_user(&regs->psw, &sregs->regs.psw,
- sizeof(sregs->regs.psw)+sizeof(sregs->regs.gprs));
+ err = __copy_from_user(&user_sregs, sregs, sizeof(_sigregs));
regs->psw.mask = PSW_MASK_MERGE(old_mask, regs->psw.mask);
regs->psw.addr |= PSW_ADDR_AMODE;
if (err)
return err;
- err = __copy_from_user(&current->thread.acrs, &sregs->regs.acrs,
- sizeof(sregs->regs.acrs));
- if (err)
- return err;
+ memcpy(&regs->psw, &user_sregs.regs.psw, sizeof(sregs->regs.psw) +
+ sizeof(sregs->regs.gprs));
+ memcpy(&current->thread.acrs, &user_sregs.regs.acrs,
+ sizeof(sregs->regs.acrs));
restore_access_regs(current->thread.acrs);
- err = __copy_from_user(&current->thread.fp_regs, &sregs->fpregs,
- sizeof(s390_fp_regs));
+ memcpy(&current->thread.fp_regs, &user_sregs.fpregs,
+ sizeof(s390_fp_regs));
current->thread.fp_regs.fpc &= FPC_VALID_MASK;
- if (err)
- return err;
restore_fp_regs(&current->thread.fp_regs);
regs->trap = -1; /* disable syscall checks */
@@ -457,6 +452,7 @@ void do_signal(struct pt_regs *regs)
case -ERESTART_RESTARTBLOCK:
regs->gprs[2] = -EINTR;
}
+ regs->trap = -1; /* Don't deal with this again. */
}
/* Get signal to deliver. When running under ptrace, at this point
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 8e03219eea76..b2e6f4c8d382 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -59,9 +59,6 @@ static struct task_struct *current_set[NR_CPUS];
extern char vmhalt_cmd[];
extern char vmpoff_cmd[];
-extern void reipl(unsigned long devno);
-extern void reipl_diag(void);
-
static void smp_ext_bitcall(int, ec_bit_sig);
static void smp_ext_bitcall_others(ec_bit_sig);
@@ -279,12 +276,7 @@ static void do_machine_restart(void * __unused)
* interrupted by an external interrupt and s390irq
* locks are always held disabled).
*/
- reipl_diag();
-
- if (MACHINE_IS_VM)
- cpcmd ("IPL", NULL, 0, NULL);
- else
- reipl (0x10000 | S390_lowcore.ipl_device);
+ do_reipl();
}
void machine_restart_smp(char * __unused)
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index bde1d1d59858..c4982c963424 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -29,6 +29,7 @@
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/reboot.h>
+#include <linux/kprobes.h>
#include <asm/system.h>
#include <asm/uaccess.h>
@@ -39,6 +40,7 @@
#include <asm/s390_ext.h>
#include <asm/lowcore.h>
#include <asm/debug.h>
+#include <asm/kdebug.h>
/* Called from entry.S only */
extern void handle_per_exception(struct pt_regs *regs);
@@ -74,6 +76,20 @@ static int kstack_depth_to_print = 12;
static int kstack_depth_to_print = 20;
#endif /* CONFIG_64BIT */
+ATOMIC_NOTIFIER_HEAD(s390die_chain);
+
+int register_die_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&s390die_chain, nb);
+}
+EXPORT_SYMBOL(register_die_notifier);
+
+int unregister_die_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&s390die_chain, nb);
+}
+EXPORT_SYMBOL(unregister_die_notifier);
+
/*
* For show_trace we have tree different stack to consider:
* - the panic stack which is used if the kernel stack has overflown
@@ -305,8 +321,9 @@ report_user_fault(long interruption_code, struct pt_regs *regs)
#endif
}
-static void inline do_trap(long interruption_code, int signr, char *str,
- struct pt_regs *regs, siginfo_t *info)
+static void __kprobes inline do_trap(long interruption_code, int signr,
+ char *str, struct pt_regs *regs,
+ siginfo_t *info)
{
/*
* We got all needed information from the lowcore and can
@@ -315,6 +332,10 @@ static void inline do_trap(long interruption_code, int signr, char *str,
if (regs->psw.mask & PSW_MASK_PSTATE)
local_irq_enable();
+ if (notify_die(DIE_TRAP, str, regs, interruption_code,
+ interruption_code, signr) == NOTIFY_STOP)
+ return;
+
if (regs->psw.mask & PSW_MASK_PSTATE) {
struct task_struct *tsk = current;
@@ -336,8 +357,12 @@ static inline void __user *get_check_address(struct pt_regs *regs)
return (void __user *)((regs->psw.addr-S390_lowcore.pgm_ilc) & PSW_ADDR_INSN);
}
-void do_single_step(struct pt_regs *regs)
+void __kprobes do_single_step(struct pt_regs *regs)
{
+ if (notify_die(DIE_SSTEP, "sstep", regs, 0, 0,
+ SIGTRAP) == NOTIFY_STOP){
+ return;
+ }
if ((current->ptrace & PT_PTRACED) != 0)
force_sig(SIGTRAP, current);
}
diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S
index ff5f7bb34f75..af9e69a03011 100644
--- a/arch/s390/kernel/vmlinux.lds.S
+++ b/arch/s390/kernel/vmlinux.lds.S
@@ -24,6 +24,7 @@ SECTIONS
*(.text)
SCHED_TEXT
LOCK_TEXT
+ KPROBES_TEXT
*(.fixup)
*(.gnu.warning)
} = 0x0700
@@ -117,7 +118,7 @@ SECTIONS
/* Sections to be discarded */
/DISCARD/ : {
- *(.exitcall.exit)
+ *(.exit.text) *(.exit.data) *(.exitcall.exit)
}
/* Stabs debugging sections. */
diff --git a/arch/s390/lib/Makefile b/arch/s390/lib/Makefile
index e05d087a6eae..c42ffedfdb49 100644
--- a/arch/s390/lib/Makefile
+++ b/arch/s390/lib/Makefile
@@ -4,6 +4,6 @@
EXTRA_AFLAGS := -traditional
-lib-y += delay.o string.o
-lib-y += $(if $(CONFIG_64BIT),uaccess64.o,uaccess.o)
+lib-y += delay.o string.o uaccess_std.o
+lib-$(CONFIG_64BIT) += uaccess_mvcos.o
lib-$(CONFIG_SMP) += spinlock.o
diff --git a/arch/s390/lib/uaccess.S b/arch/s390/lib/uaccess.S
deleted file mode 100644
index 837275284d9f..000000000000
--- a/arch/s390/lib/uaccess.S
+++ /dev/null
@@ -1,211 +0,0 @@
-/*
- * arch/s390/lib/uaccess.S
- * __copy_{from|to}_user functions.
- *
- * s390
- * Copyright (C) 2000,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Authors(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * These functions have standard call interface
- */
-
-#include <linux/errno.h>
-#include <asm/lowcore.h>
-#include <asm/asm-offsets.h>
-
- .text
- .align 4
- .globl __copy_from_user_asm
- # %r2 = to, %r3 = n, %r4 = from
-__copy_from_user_asm:
- slr %r0,%r0
-0: mvcp 0(%r3,%r2),0(%r4),%r0
- jnz 1f
- slr %r2,%r2
- br %r14
-1: la %r2,256(%r2)
- la %r4,256(%r4)
- ahi %r3,-256
-2: mvcp 0(%r3,%r2),0(%r4),%r0
- jnz 1b
-3: slr %r2,%r2
- br %r14
-4: lhi %r0,-4096
- lr %r5,%r4
- slr %r5,%r0
- nr %r5,%r0 # %r5 = (%r4 + 4096) & -4096
- slr %r5,%r4 # %r5 = #bytes to next user page boundary
- clr %r3,%r5 # copy crosses next page boundary ?
- jnh 6f # no, the current page faulted
- # move with the reduced length which is < 256
-5: mvcp 0(%r5,%r2),0(%r4),%r0
- slr %r3,%r5
-6: lr %r2,%r3
- br %r14
- .section __ex_table,"a"
- .long 0b,4b
- .long 2b,4b
- .long 5b,6b
- .previous
-
- .align 4
- .text
- .globl __copy_to_user_asm
- # %r2 = from, %r3 = n, %r4 = to
-__copy_to_user_asm:
- slr %r0,%r0
-0: mvcs 0(%r3,%r4),0(%r2),%r0
- jnz 1f
- slr %r2,%r2
- br %r14
-1: la %r2,256(%r2)
- la %r4,256(%r4)
- ahi %r3,-256
-2: mvcs 0(%r3,%r4),0(%r2),%r0
- jnz 1b
-3: slr %r2,%r2
- br %r14
-4: lhi %r0,-4096
- lr %r5,%r4
- slr %r5,%r0
- nr %r5,%r0 # %r5 = (%r4 + 4096) & -4096
- slr %r5,%r4 # %r5 = #bytes to next user page boundary
- clr %r3,%r5 # copy crosses next page boundary ?
- jnh 6f # no, the current page faulted
- # move with the reduced length which is < 256
-5: mvcs 0(%r5,%r4),0(%r2),%r0
- slr %r3,%r5
-6: lr %r2,%r3
- br %r14
- .section __ex_table,"a"
- .long 0b,4b
- .long 2b,4b
- .long 5b,6b
- .previous
-
- .align 4
- .text
- .globl __copy_in_user_asm
- # %r2 = from, %r3 = n, %r4 = to
-__copy_in_user_asm:
- ahi %r3,-1
- jo 6f
- sacf 256
- bras %r1,4f
-0: ahi %r3,257
-1: mvc 0(1,%r4),0(%r2)
- la %r2,1(%r2)
- la %r4,1(%r4)
- ahi %r3,-1
- jnz 1b
-2: lr %r2,%r3
- br %r14
-3: mvc 0(256,%r4),0(%r2)
- la %r2,256(%r2)
- la %r4,256(%r4)
-4: ahi %r3,-256
- jnm 3b
-5: ex %r3,4(%r1)
- sacf 0
-6: slr %r2,%r2
- br %r14
- .section __ex_table,"a"
- .long 1b,2b
- .long 3b,0b
- .long 5b,0b
- .previous
-
- .align 4
- .text
- .globl __clear_user_asm
- # %r2 = to, %r3 = n
-__clear_user_asm:
- bras %r5,0f
- .long empty_zero_page
-0: l %r5,0(%r5)
- slr %r0,%r0
-1: mvcs 0(%r3,%r2),0(%r5),%r0
- jnz 2f
- slr %r2,%r2
- br %r14
-2: la %r2,256(%r2)
- ahi %r3,-256
-3: mvcs 0(%r3,%r2),0(%r5),%r0
- jnz 2b
-4: slr %r2,%r2
- br %r14
-5: lhi %r0,-4096
- lr %r4,%r2
- slr %r4,%r0
- nr %r4,%r0 # %r4 = (%r2 + 4096) & -4096
- slr %r4,%r2 # %r4 = #bytes to next user page boundary
- clr %r3,%r4 # clear crosses next page boundary ?
- jnh 7f # no, the current page faulted
- # clear with the reduced length which is < 256
-6: mvcs 0(%r4,%r2),0(%r5),%r0
- slr %r3,%r4
-7: lr %r2,%r3
- br %r14
- .section __ex_table,"a"
- .long 1b,5b
- .long 3b,5b
- .long 6b,7b
- .previous
-
- .align 4
- .text
- .globl __strncpy_from_user_asm
- # %r2 = count, %r3 = dst, %r4 = src
-__strncpy_from_user_asm:
- lhi %r0,0
- lr %r1,%r4
- la %r4,0(%r4) # clear high order bit from %r4
- la %r2,0(%r2,%r4) # %r2 points to first byte after string
- sacf 256
-0: srst %r2,%r1
- jo 0b
- sacf 0
- lr %r1,%r2
- jh 1f # \0 found in string ?
- ahi %r1,1 # include \0 in copy
-1: slr %r1,%r4 # %r1 = copy length (without \0)
- slr %r2,%r4 # %r2 = return length (including \0)
-2: mvcp 0(%r1,%r3),0(%r4),%r0
- jnz 3f
- br %r14
-3: la %r3,256(%r3)
- la %r4,256(%r4)
- ahi %r1,-256
- mvcp 0(%r1,%r3),0(%r4),%r0
- jnz 3b
- br %r14
-4: sacf 0
- lhi %r2,-EFAULT
- br %r14
- .section __ex_table,"a"
- .long 0b,4b
- .previous
-
- .align 4
- .text
- .globl __strnlen_user_asm
- # %r2 = count, %r3 = src
-__strnlen_user_asm:
- lhi %r0,0
- lr %r1,%r3
- la %r3,0(%r3) # clear high order bit from %r4
- la %r2,0(%r2,%r3) # %r2 points to first byte after string
- sacf 256
-0: srst %r2,%r1
- jo 0b
- sacf 0
- ahi %r2,1 # strnlen_user result includes the \0
- # or return count+1 if \0 not found
- slr %r2,%r3
- br %r14
-2: sacf 0
- slr %r2,%r2 # return 0 on exception
- br %r14
- .section __ex_table,"a"
- .long 0b,2b
- .previous
diff --git a/arch/s390/lib/uaccess64.S b/arch/s390/lib/uaccess64.S
deleted file mode 100644
index 1f755be22f92..000000000000
--- a/arch/s390/lib/uaccess64.S
+++ /dev/null
@@ -1,207 +0,0 @@
-/*
- * arch/s390x/lib/uaccess.S
- * __copy_{from|to}_user functions.
- *
- * s390
- * Copyright (C) 2000,2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
- * Authors(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
- *
- * These functions have standard call interface
- */
-
-#include <linux/errno.h>
-#include <asm/lowcore.h>
-#include <asm/asm-offsets.h>
-
- .text
- .align 4
- .globl __copy_from_user_asm
- # %r2 = to, %r3 = n, %r4 = from
-__copy_from_user_asm:
- slgr %r0,%r0
-0: mvcp 0(%r3,%r2),0(%r4),%r0
- jnz 1f
- slgr %r2,%r2
- br %r14
-1: la %r2,256(%r2)
- la %r4,256(%r4)
- aghi %r3,-256
-2: mvcp 0(%r3,%r2),0(%r4),%r0
- jnz 1b
-3: slgr %r2,%r2
- br %r14
-4: lghi %r0,-4096
- lgr %r5,%r4
- slgr %r5,%r0
- ngr %r5,%r0 # %r5 = (%r4 + 4096) & -4096
- slgr %r5,%r4 # %r5 = #bytes to next user page boundary
- clgr %r3,%r5 # copy crosses next page boundary ?
- jnh 6f # no, the current page faulted
- # move with the reduced length which is < 256
-5: mvcp 0(%r5,%r2),0(%r4),%r0
- slgr %r3,%r5
-6: lgr %r2,%r3
- br %r14
- .section __ex_table,"a"
- .quad 0b,4b
- .quad 2b,4b
- .quad 5b,6b
- .previous
-
- .align 4
- .text
- .globl __copy_to_user_asm
- # %r2 = from, %r3 = n, %r4 = to
-__copy_to_user_asm:
- slgr %r0,%r0
-0: mvcs 0(%r3,%r4),0(%r2),%r0
- jnz 1f
- slgr %r2,%r2
- br %r14
-1: la %r2,256(%r2)
- la %r4,256(%r4)
- aghi %r3,-256
-2: mvcs 0(%r3,%r4),0(%r2),%r0
- jnz 1b
-3: slgr %r2,%r2
- br %r14
-4: lghi %r0,-4096
- lgr %r5,%r4
- slgr %r5,%r0
- ngr %r5,%r0 # %r5 = (%r4 + 4096) & -4096
- slgr %r5,%r4 # %r5 = #bytes to next user page boundary
- clgr %r3,%r5 # copy crosses next page boundary ?
- jnh 6f # no, the current page faulted
- # move with the reduced length which is < 256
-5: mvcs 0(%r5,%r4),0(%r2),%r0
- slgr %r3,%r5
-6: lgr %r2,%r3
- br %r14
- .section __ex_table,"a"
- .quad 0b,4b
- .quad 2b,4b
- .quad 5b,6b
- .previous
-
- .align 4
- .text
- .globl __copy_in_user_asm
- # %r2 = from, %r3 = n, %r4 = to
-__copy_in_user_asm:
- aghi %r3,-1
- jo 6f
- sacf 256
- bras %r1,4f
-0: aghi %r3,257
-1: mvc 0(1,%r4),0(%r2)
- la %r2,1(%r2)
- la %r4,1(%r4)
- aghi %r3,-1
- jnz 1b
-2: lgr %r2,%r3
- br %r14
-3: mvc 0(256,%r4),0(%r2)
- la %r2,256(%r2)
- la %r4,256(%r4)
-4: aghi %r3,-256
- jnm 3b
-5: ex %r3,4(%r1)
- sacf 0
-6: slgr %r2,%r2
- br 14
- .section __ex_table,"a"
- .quad 1b,2b
- .quad 3b,0b
- .quad 5b,0b
- .previous
-
- .align 4
- .text
- .globl __clear_user_asm
- # %r2 = to, %r3 = n
-__clear_user_asm:
- slgr %r0,%r0
- larl %r5,empty_zero_page
-1: mvcs 0(%r3,%r2),0(%r5),%r0
- jnz 2f
- slgr %r2,%r2
- br %r14
-2: la %r2,256(%r2)
- aghi %r3,-256
-3: mvcs 0(%r3,%r2),0(%r5),%r0
- jnz 2b
-4: slgr %r2,%r2
- br %r14
-5: lghi %r0,-4096
- lgr %r4,%r2
- slgr %r4,%r0
- ngr %r4,%r0 # %r4 = (%r2 + 4096) & -4096
- slgr %r4,%r2 # %r4 = #bytes to next user page boundary
- clgr %r3,%r4 # clear crosses next page boundary ?
- jnh 7f # no, the current page faulted
- # clear with the reduced length which is < 256
-6: mvcs 0(%r4,%r2),0(%r5),%r0
- slgr %r3,%r4
-7: lgr %r2,%r3
- br %r14
- .section __ex_table,"a"
- .quad 1b,5b
- .quad 3b,5b
- .quad 6b,7b
- .previous
-
- .align 4
- .text
- .globl __strncpy_from_user_asm
- # %r2 = count, %r3 = dst, %r4 = src
-__strncpy_from_user_asm:
- lghi %r0,0
- lgr %r1,%r4
- la %r2,0(%r2,%r4) # %r2 points to first byte after string
- sacf 256
-0: srst %r2,%r1
- jo 0b
- sacf 0
- lgr %r1,%r2
- jh 1f # \0 found in string ?
- aghi %r1,1 # include \0 in copy
-1: slgr %r1,%r4 # %r1 = copy length (without \0)
- slgr %r2,%r4 # %r2 = return length (including \0)
-2: mvcp 0(%r1,%r3),0(%r4),%r0
- jnz 3f
- br %r14
-3: la %r3,256(%r3)
- la %r4,256(%r4)
- aghi %r1,-256
- mvcp 0(%r1,%r3),0(%r4),%r0
- jnz 3b
- br %r14
-4: sacf 0
- lghi %r2,-EFAULT
- br %r14
- .section __ex_table,"a"
- .quad 0b,4b
- .previous
-
- .align 4
- .text
- .globl __strnlen_user_asm
- # %r2 = count, %r3 = src
-__strnlen_user_asm:
- lghi %r0,0
- lgr %r1,%r3
- la %r2,0(%r2,%r3) # %r2 points to first byte after string
- sacf 256
-0: srst %r2,%r1
- jo 0b
- sacf 0
- aghi %r2,1 # strnlen_user result includes the \0
- # or return count+1 if \0 not found
- slgr %r2,%r3
- br %r14
-2: sacf 0
- slgr %r2,%r2 # return 0 on exception
- br %r14
- .section __ex_table,"a"
- .quad 0b,2b
- .previous
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c
new file mode 100644
index 000000000000..86c96d6c191a
--- /dev/null
+++ b/arch/s390/lib/uaccess_mvcos.c
@@ -0,0 +1,156 @@
+/*
+ * arch/s390/lib/uaccess_mvcos.c
+ *
+ * Optimized user space space access functions based on mvcos.
+ *
+ * Copyright (C) IBM Corp. 2006
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ * Gerald Schaefer (gerald.schaefer@de.ibm.com)
+ */
+
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <asm/uaccess.h>
+#include <asm/futex.h>
+
+#ifndef __s390x__
+#define AHI "ahi"
+#define ALR "alr"
+#define CLR "clr"
+#define LHI "lhi"
+#define SLR "slr"
+#else
+#define AHI "aghi"
+#define ALR "algr"
+#define CLR "clgr"
+#define LHI "lghi"
+#define SLR "slgr"
+#endif
+
+size_t copy_from_user_mvcos(size_t size, const void __user *ptr, void *x)
+{
+ register unsigned long reg0 asm("0") = 0x81UL;
+ unsigned long tmp1, tmp2;
+
+ tmp1 = -4096UL;
+ asm volatile(
+ "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n"
+ " jz 4f\n"
+ "1:"ALR" %0,%3\n"
+ " "SLR" %1,%3\n"
+ " "SLR" %2,%3\n"
+ " j 0b\n"
+ "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
+ " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
+ " "SLR" %4,%1\n"
+ " "CLR" %0,%4\n" /* copy crosses next page boundary? */
+ " jnh 5f\n"
+ "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n"
+ " "SLR" %0,%4\n"
+ " j 5f\n"
+ "4:"SLR" %0,%0\n"
+ "5: \n"
+ EX_TABLE(0b,2b) EX_TABLE(3b,5b)
+ : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
+ : "d" (reg0) : "cc", "memory");
+ return size;
+}
+
+size_t copy_to_user_mvcos(size_t size, void __user *ptr, const void *x)
+{
+ register unsigned long reg0 asm("0") = 0x810000UL;
+ unsigned long tmp1, tmp2;
+
+ tmp1 = -4096UL;
+ asm volatile(
+ "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
+ " jz 4f\n"
+ "1:"ALR" %0,%3\n"
+ " "SLR" %1,%3\n"
+ " "SLR" %2,%3\n"
+ " j 0b\n"
+ "2: la %4,4095(%1)\n"/* %4 = ptr + 4095 */
+ " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */
+ " "SLR" %4,%1\n"
+ " "CLR" %0,%4\n" /* copy crosses next page boundary? */
+ " jnh 5f\n"
+ "3: .insn ss,0xc80000000000,0(%4,%1),0(%2),0\n"
+ " "SLR" %0,%4\n"
+ " j 5f\n"
+ "4:"SLR" %0,%0\n"
+ "5: \n"
+ EX_TABLE(0b,2b) EX_TABLE(3b,5b)
+ : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
+ : "d" (reg0) : "cc", "memory");
+ return size;
+}
+
+size_t copy_in_user_mvcos(size_t size, void __user *to, const void __user *from)
+{
+ register unsigned long reg0 asm("0") = 0x810081UL;
+ unsigned long tmp1, tmp2;
+
+ tmp1 = -4096UL;
+ /* FIXME: copy with reduced length. */
+ asm volatile(
+ "0: .insn ss,0xc80000000000,0(%0,%1),0(%2),0\n"
+ " jz 2f\n"
+ "1:"ALR" %0,%3\n"
+ " "SLR" %1,%3\n"
+ " "SLR" %2,%3\n"
+ " j 0b\n"
+ "2:"SLR" %0,%0\n"
+ "3: \n"
+ EX_TABLE(0b,3b)
+ : "+a" (size), "+a" (to), "+a" (from), "+a" (tmp1), "=a" (tmp2)
+ : "d" (reg0) : "cc", "memory");
+ return size;
+}
+
+size_t clear_user_mvcos(size_t size, void __user *to)
+{
+ register unsigned long reg0 asm("0") = 0x810000UL;
+ unsigned long tmp1, tmp2;
+
+ tmp1 = -4096UL;
+ asm volatile(
+ "0: .insn ss,0xc80000000000,0(%0,%1),0(%4),0\n"
+ " jz 4f\n"
+ "1:"ALR" %0,%2\n"
+ " "SLR" %1,%2\n"
+ " j 0b\n"
+ "2: la %3,4095(%1)\n"/* %4 = to + 4095 */
+ " nr %3,%2\n" /* %4 = (to + 4095) & -4096 */
+ " "SLR" %3,%1\n"
+ " "CLR" %0,%3\n" /* copy crosses next page boundary? */
+ " jnh 5f\n"
+ "3: .insn ss,0xc80000000000,0(%3,%1),0(%4),0\n"
+ " "SLR" %0,%3\n"
+ " j 5f\n"
+ "4:"SLR" %0,%0\n"
+ "5: \n"
+ EX_TABLE(0b,2b) EX_TABLE(3b,5b)
+ : "+a" (size), "+a" (to), "+a" (tmp1), "=a" (tmp2)
+ : "a" (empty_zero_page), "d" (reg0) : "cc", "memory");
+ return size;
+}
+
+extern size_t copy_from_user_std_small(size_t, const void __user *, void *);
+extern size_t copy_to_user_std_small(size_t, void __user *, const void *);
+extern size_t strnlen_user_std(size_t, const char __user *);
+extern size_t strncpy_from_user_std(size_t, const char __user *, char *);
+extern int futex_atomic_op(int, int __user *, int, int *);
+extern int futex_atomic_cmpxchg(int __user *, int, int);
+
+struct uaccess_ops uaccess_mvcos = {
+ .copy_from_user = copy_from_user_mvcos,
+ .copy_from_user_small = copy_from_user_std_small,
+ .copy_to_user = copy_to_user_mvcos,
+ .copy_to_user_small = copy_to_user_std_small,
+ .copy_in_user = copy_in_user_mvcos,
+ .clear_user = clear_user_mvcos,
+ .strnlen_user = strnlen_user_std,
+ .strncpy_from_user = strncpy_from_user_std,
+ .futex_atomic_op = futex_atomic_op,
+ .futex_atomic_cmpxchg = futex_atomic_cmpxchg,
+};
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c
new file mode 100644
index 000000000000..9a4d4a29ea79
--- /dev/null
+++ b/arch/s390/lib/uaccess_std.c
@@ -0,0 +1,340 @@
+/*
+ * arch/s390/lib/uaccess_std.c
+ *
+ * Standard user space access functions based on mvcp/mvcs and doing
+ * interesting things in the secondary space mode.
+ *
+ * Copyright (C) IBM Corp. 2006
+ * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
+ * Gerald Schaefer (gerald.schaefer@de.ibm.com)
+ */
+
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <asm/uaccess.h>
+#include <asm/futex.h>
+
+#ifndef __s390x__
+#define AHI "ahi"
+#define ALR "alr"
+#define CLR "clr"
+#define LHI "lhi"
+#define SLR "slr"
+#else
+#define AHI "aghi"
+#define ALR "algr"
+#define CLR "clgr"
+#define LHI "lghi"
+#define SLR "slgr"
+#endif
+
+size_t copy_from_user_std(size_t size, const void __user *ptr, void *x)
+{
+ unsigned long tmp1, tmp2;
+
+ tmp1 = -256UL;
+ asm volatile(
+ "0: mvcp 0(%0,%2),0(%1),%3\n"
+ " jz 5f\n"
+ "1:"ALR" %0,%3\n"
+ " la %1,256(%1)\n"
+ " la %2,256(%2)\n"
+ "2: mvcp 0(%0,%2),0(%1),%3\n"
+ " jnz 1b\n"
+ " j 5f\n"
+ "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
+ " "LHI" %3,-4096\n"
+ " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
+ " "SLR" %4,%1\n"
+ " "CLR" %0,%4\n" /* copy crosses next page boundary? */
+ " jnh 6f\n"
+ "4: mvcp 0(%4,%2),0(%1),%3\n"
+ " "SLR" %0,%4\n"
+ " j 6f\n"
+ "5:"SLR" %0,%0\n"
+ "6: \n"
+ EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
+ : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
+ : : "cc", "memory");
+ return size;
+}
+
+size_t copy_from_user_std_small(size_t size, const void __user *ptr, void *x)
+{
+ unsigned long tmp1, tmp2;
+
+ tmp1 = 0UL;
+ asm volatile(
+ "0: mvcp 0(%0,%2),0(%1),%3\n"
+ " "SLR" %0,%0\n"
+ " j 3f\n"
+ "1: la %4,255(%1)\n" /* %4 = ptr + 255 */
+ " "LHI" %3,-4096\n"
+ " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
+ " "SLR" %4,%1\n"
+ " "CLR" %0,%4\n" /* copy crosses next page boundary? */
+ " jnh 3f\n"
+ "2: mvcp 0(%4,%2),0(%1),%3\n"
+ " "SLR" %0,%4\n"
+ "3:\n"
+ EX_TABLE(0b,1b) EX_TABLE(2b,3b)
+ : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
+ : : "cc", "memory");
+ return size;
+}
+
+size_t copy_to_user_std(size_t size, void __user *ptr, const void *x)
+{
+ unsigned long tmp1, tmp2;
+
+ tmp1 = -256UL;
+ asm volatile(
+ "0: mvcs 0(%0,%1),0(%2),%3\n"
+ " jz 5f\n"
+ "1:"ALR" %0,%3\n"
+ " la %1,256(%1)\n"
+ " la %2,256(%2)\n"
+ "2: mvcs 0(%0,%1),0(%2),%3\n"
+ " jnz 1b\n"
+ " j 5f\n"
+ "3: la %4,255(%1)\n" /* %4 = ptr + 255 */
+ " "LHI" %3,-4096\n"
+ " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */
+ " "SLR" %4,%1\n"
+ " "CLR" %0,%4\n" /* copy crosses next page boundary? */
+ " jnh 6f\n"
+ "4: mvcs 0(%4,%1),0(%2),%3\n"
+ " "SLR" %0,%4\n"
+ " j 6f\n"
+ "5:"SLR" %0,%0\n"
+ "6: \n"
+ EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b)
+ : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
+ : : "cc", "memory");
+ return size;
+}
+
+size_t copy_to_user_std_small(size_t size, void __user *ptr, const void *x)
+{
+ unsigned long tmp1, tmp2;
+
+ tmp1 = 0UL;
+ asm volatile(
+ "0: mvcs 0(%0,%1),0(%2),%3\n"
+ " "SLR" %0,%0\n"
+ " j 3f\n"
+ "1: la %4,255(%1)\n" /* ptr + 255 */
+ " "LHI" %3,-4096\n"
+ " nr %4,%3\n" /* (ptr + 255) & -4096UL */
+ " "SLR" %4,%1\n"
+ " "CLR" %0,%4\n" /* copy crosses next page boundary? */
+ " jnh 3f\n"
+ "2: mvcs 0(%4,%1),0(%2),%3\n"
+ " "SLR" %0,%4\n"
+ "3:\n"
+ EX_TABLE(0b,1b) EX_TABLE(2b,3b)
+ : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2)
+ : : "cc", "memory");
+ return size;
+}
+
+size_t copy_in_user_std(size_t size, void __user *to, const void __user *from)
+{
+ unsigned long tmp1;
+
+ asm volatile(
+ " "AHI" %0,-1\n"
+ " jo 5f\n"
+ " sacf 256\n"
+ " bras %3,3f\n"
+ "0:"AHI" %0,257\n"
+ "1: mvc 0(1,%1),0(%2)\n"
+ " la %1,1(%1)\n"
+ " la %2,1(%2)\n"
+ " "AHI" %0,-1\n"
+ " jnz 1b\n"
+ " j 5f\n"
+ "2: mvc 0(256,%1),0(%2)\n"
+ " la %1,256(%1)\n"
+ " la %2,256(%2)\n"
+ "3:"AHI" %0,-256\n"
+ " jnm 2b\n"
+ "4: ex %0,1b-0b(%3)\n"
+ " sacf 0\n"
+ "5: "SLR" %0,%0\n"
+ "6:\n"
+ EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
+ : "+a" (size), "+a" (to), "+a" (from), "=a" (tmp1)
+ : : "cc", "memory");
+ return size;
+}
+
+size_t clear_user_std(size_t size, void __user *to)
+{
+ unsigned long tmp1, tmp2;
+
+ asm volatile(
+ " "AHI" %0,-1\n"
+ " jo 5f\n"
+ " sacf 256\n"
+ " bras %3,3f\n"
+ " xc 0(1,%1),0(%1)\n"
+ "0:"AHI" %0,257\n"
+ " la %2,255(%1)\n" /* %2 = ptr + 255 */
+ " srl %2,12\n"
+ " sll %2,12\n" /* %2 = (ptr + 255) & -4096 */
+ " "SLR" %2,%1\n"
+ " "CLR" %0,%2\n" /* clear crosses next page boundary? */
+ " jnh 5f\n"
+ " "AHI" %2,-1\n"
+ "1: ex %2,0(%3)\n"
+ " "AHI" %2,1\n"
+ " "SLR" %0,%2\n"
+ " j 5f\n"
+ "2: xc 0(256,%1),0(%1)\n"
+ " la %1,256(%1)\n"
+ "3:"AHI" %0,-256\n"
+ " jnm 2b\n"
+ "4: ex %0,0(%3)\n"
+ " sacf 0\n"
+ "5: "SLR" %0,%0\n"
+ "6:\n"
+ EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
+ : "+a" (size), "+a" (to), "=a" (tmp1), "=a" (tmp2)
+ : : "cc", "memory");
+ return size;
+}
+
+size_t strnlen_user_std(size_t size, const char __user *src)
+{
+ register unsigned long reg0 asm("0") = 0UL;
+ unsigned long tmp1, tmp2;
+
+ asm volatile(
+ " la %2,0(%1)\n"
+ " la %3,0(%0,%1)\n"
+ " "SLR" %0,%0\n"
+ " sacf 256\n"
+ "0: srst %3,%2\n"
+ " jo 0b\n"
+ " la %0,1(%3)\n" /* strnlen_user results includes \0 */
+ " "SLR" %0,%1\n"
+ "1: sacf 0\n"
+ EX_TABLE(0b,1b)
+ : "+a" (size), "+a" (src), "=a" (tmp1), "=a" (tmp2)
+ : "d" (reg0) : "cc", "memory");
+ return size;
+}
+
+size_t strncpy_from_user_std(size_t size, const char __user *src, char *dst)
+{
+ register unsigned long reg0 asm("0") = 0UL;
+ unsigned long tmp1, tmp2;
+
+ asm volatile(
+ " la %3,0(%1)\n"
+ " la %4,0(%0,%1)\n"
+ " sacf 256\n"
+ "0: srst %4,%3\n"
+ " jo 0b\n"
+ " sacf 0\n"
+ " la %0,0(%4)\n"
+ " jh 1f\n" /* found \0 in string ? */
+ " "AHI" %4,1\n" /* include \0 in copy */
+ "1:"SLR" %0,%1\n" /* %0 = return length (without \0) */
+ " "SLR" %4,%1\n" /* %4 = copy length (including \0) */
+ "2: mvcp 0(%4,%2),0(%1),%5\n"
+ " jz 9f\n"
+ "3:"AHI" %4,-256\n"
+ " la %1,256(%1)\n"
+ " la %2,256(%2)\n"
+ "4: mvcp 0(%4,%2),0(%1),%5\n"
+ " jnz 3b\n"
+ " j 9f\n"
+ "7: sacf 0\n"
+ "8:"LHI" %0,%6\n"
+ "9:\n"
+ EX_TABLE(0b,7b) EX_TABLE(2b,8b) EX_TABLE(4b,8b)
+ : "+a" (size), "+a" (src), "+d" (dst), "=a" (tmp1), "=a" (tmp2)
+ : "d" (reg0), "K" (-EFAULT) : "cc", "memory");
+ return size;
+}
+
+#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
+ asm volatile( \
+ " sacf 256\n" \
+ "0: l %1,0(%6)\n" \
+ "1:"insn \
+ "2: cs %1,%2,0(%6)\n" \
+ "3: jl 1b\n" \
+ " lhi %0,0\n" \
+ "4: sacf 0\n" \
+ EX_TABLE(0b,4b) EX_TABLE(2b,4b) EX_TABLE(3b,4b) \
+ : "=d" (ret), "=&d" (oldval), "=&d" (newval), \
+ "=m" (*uaddr) \
+ : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \
+ "m" (*uaddr) : "cc");
+
+int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old)
+{
+ int oldval = 0, newval, ret;
+
+ inc_preempt_count();
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op("lr %2,%5\n",
+ ret, oldval, newval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op("lr %2,%1\nar %2,%5\n",
+ ret, oldval, newval, uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op("lr %2,%1\nor %2,%5\n",
+ ret, oldval, newval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op("lr %2,%1\nnr %2,%5\n",
+ ret, oldval, newval, uaddr, oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op("lr %2,%1\nxr %2,%5\n",
+ ret, oldval, newval, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+ dec_preempt_count();
+ *old = oldval;
+ return ret;
+}
+
+int futex_atomic_cmpxchg(int __user *uaddr, int oldval, int newval)
+{
+ int ret;
+
+ asm volatile(
+ " sacf 256\n"
+ " cs %1,%4,0(%5)\n"
+ "0: lr %0,%1\n"
+ "1: sacf 0\n"
+ EX_TABLE(0b,1b)
+ : "=d" (ret), "+d" (oldval), "=m" (*uaddr)
+ : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr)
+ : "cc", "memory" );
+ return ret;
+}
+
+struct uaccess_ops uaccess_std = {
+ .copy_from_user = copy_from_user_std,
+ .copy_from_user_small = copy_from_user_std_small,
+ .copy_to_user = copy_to_user_std,
+ .copy_to_user_small = copy_to_user_std_small,
+ .copy_in_user = copy_in_user_std,
+ .clear_user = clear_user_std,
+ .strnlen_user = strnlen_user_std,
+ .strncpy_from_user = strncpy_from_user_std,
+ .futex_atomic_op = futex_atomic_op,
+ .futex_atomic_cmpxchg = futex_atomic_cmpxchg,
+};
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c
index ceea51cff03b..786a44dba5bf 100644
--- a/arch/s390/mm/cmm.c
+++ b/arch/s390/mm/cmm.c
@@ -53,22 +53,6 @@ static void cmm_timer_fn(unsigned long);
static void cmm_set_timer(void);
static long
-cmm_strtoul(const char *cp, char **endp)
-{
- unsigned int base = 10;
-
- if (*cp == '0') {
- base = 8;
- cp++;
- if ((*cp == 'x' || *cp == 'X') && isxdigit(cp[1])) {
- base = 16;
- cp++;
- }
- }
- return simple_strtoul(cp, endp, base);
-}
-
-static long
cmm_alloc_pages(long pages, long *counter, struct cmm_page_array **list)
{
struct cmm_page_array *pa;
@@ -276,7 +260,7 @@ cmm_pages_handler(ctl_table *ctl, int write, struct file *filp,
return -EFAULT;
buf[sizeof(buf) - 1] = '\0';
cmm_skip_blanks(buf, &p);
- pages = cmm_strtoul(p, &p);
+ pages = simple_strtoul(p, &p, 0);
if (ctl == &cmm_table[0])
cmm_set_pages(pages);
else
@@ -317,9 +301,9 @@ cmm_timeout_handler(ctl_table *ctl, int write, struct file *filp,
return -EFAULT;
buf[sizeof(buf) - 1] = '\0';
cmm_skip_blanks(buf, &p);
- pages = cmm_strtoul(p, &p);
+ pages = simple_strtoul(p, &p, 0);
cmm_skip_blanks(p, &p);
- seconds = cmm_strtoul(p, &p);
+ seconds = simple_strtoul(p, &p, 0);
cmm_set_timeout(pages, seconds);
} else {
len = sprintf(buf, "%ld %ld\n",
@@ -382,24 +366,24 @@ cmm_smsg_target(char *from, char *msg)
if (strncmp(msg, "SHRINK", 6) == 0) {
if (!cmm_skip_blanks(msg + 6, &msg))
return;
- pages = cmm_strtoul(msg, &msg);
+ pages = simple_strtoul(msg, &msg, 0);
cmm_skip_blanks(msg, &msg);
if (*msg == '\0')
cmm_set_pages(pages);
} else if (strncmp(msg, "RELEASE", 7) == 0) {
if (!cmm_skip_blanks(msg + 7, &msg))
return;
- pages = cmm_strtoul(msg, &msg);
+ pages = simple_strtoul(msg, &msg, 0);
cmm_skip_blanks(msg, &msg);
if (*msg == '\0')
cmm_add_timed_pages(pages);
} else if (strncmp(msg, "REUSE", 5) == 0) {
if (!cmm_skip_blanks(msg + 5, &msg))
return;
- pages = cmm_strtoul(msg, &msg);
+ pages = simple_strtoul(msg, &msg, 0);
if (!cmm_skip_blanks(msg, &msg))
return;
- seconds = cmm_strtoul(msg, &msg);
+ seconds = simple_strtoul(msg, &msg, 0);
cmm_skip_blanks(msg, &msg);
if (*msg == '\0')
cmm_set_timeout(pages, seconds);
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 7cd82575813d..44f0cda7e72e 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -25,10 +25,12 @@
#include <linux/console.h>
#include <linux/module.h>
#include <linux/hardirq.h>
+#include <linux/kprobes.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
+#include <asm/kdebug.h>
#ifndef CONFIG_64BIT
#define __FAIL_ADDR_MASK 0x7ffff000
@@ -48,6 +50,38 @@ extern int sysctl_userprocess_debug;
extern void die(const char *,struct pt_regs *,long);
+#ifdef CONFIG_KPROBES
+ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
+int register_page_fault_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
+}
+
+int unregister_page_fault_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
+}
+
+static inline int notify_page_fault(enum die_val val, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig)
+{
+ struct die_args args = {
+ .regs = regs,
+ .str = str,
+ .err = err,
+ .trapnr = trap,
+ .signr = sig
+ };
+ return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
+}
+#else
+static inline int notify_page_fault(enum die_val val, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig)
+{
+ return NOTIFY_DONE;
+}
+#endif
+
extern spinlock_t timerlist_lock;
/*
@@ -159,7 +193,7 @@ static void do_sigsegv(struct pt_regs *regs, unsigned long error_code,
* 11 Page translation -> Not present (nullification)
* 3b Region third trans. -> Not present (nullification)
*/
-static inline void
+static inline void __kprobes
do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
{
struct task_struct *tsk;
@@ -173,6 +207,10 @@ do_exception(struct pt_regs *regs, unsigned long error_code, int is_protection)
tsk = current;
mm = tsk->mm;
+ if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
+ SIGSEGV) == NOTIFY_STOP)
+ return;
+
/*
* Check for low-address protection. This needs to be treated
* as a special case because the translation exception code
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 6e6b6de77770..cfd9b8f7a523 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -108,16 +108,23 @@ void __init paging_init(void)
unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
static const int ssm_mask = 0x04000000L;
unsigned long ro_start_pfn, ro_end_pfn;
+ unsigned long zones_size[MAX_NR_ZONES];
ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
+ memset(zones_size, 0, sizeof(zones_size));
+ zones_size[ZONE_DMA] = max_low_pfn;
+ free_area_init_node(0, &contig_page_data, zones_size,
+ __pa(PAGE_OFFSET) >> PAGE_SHIFT,
+ zholes_size);
+
/* unmap whole virtual address space */
pg_dir = swapper_pg_dir;
- for (i=0;i<KERNEL_PGD_PTRS;i++)
- pmd_clear((pmd_t*)pg_dir++);
+ for (i = 0; i < PTRS_PER_PGD; i++)
+ pmd_clear((pmd_t *) pg_dir++);
/*
* map whole physical memory to virtual memory (identity mapping)
@@ -131,10 +138,7 @@ void __init paging_init(void)
*/
pg_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
- pg_dir->pgd0 = (_PAGE_TABLE | __pa(pg_table));
- pg_dir->pgd1 = (_PAGE_TABLE | (__pa(pg_table)+1024));
- pg_dir->pgd2 = (_PAGE_TABLE | (__pa(pg_table)+2048));
- pg_dir->pgd3 = (_PAGE_TABLE | (__pa(pg_table)+3072));
+ pmd_populate_kernel(&init_mm, (pmd_t *) pg_dir, pg_table);
pg_dir++;
for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) {
@@ -143,8 +147,8 @@ void __init paging_init(void)
else
pte = pfn_pte(pfn, PAGE_KERNEL);
if (pfn >= max_low_pfn)
- pte_clear(&init_mm, 0, &pte);
- set_pte(pg_table, pte);
+ pte_val(pte) = _PAGE_TYPE_EMPTY;
+ set_pte(pg_table, pte);
pfn++;
}
}
@@ -159,16 +163,6 @@ void __init paging_init(void)
: : "m" (pgdir_k), "m" (ssm_mask));
local_flush_tlb();
-
- {
- unsigned long zones_size[MAX_NR_ZONES];
-
- memset(zones_size, 0, sizeof(zones_size));
- zones_size[ZONE_DMA] = max_low_pfn;
- free_area_init_node(0, &contig_page_data, zones_size,
- __pa(PAGE_OFFSET) >> PAGE_SHIFT,
- zholes_size);
- }
return;
}
@@ -236,10 +230,8 @@ void __init paging_init(void)
pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
else
pte = pfn_pte(pfn, PAGE_KERNEL);
- if (pfn >= max_low_pfn) {
- pte_clear(&init_mm, 0, &pte);
- continue;
- }
+ if (pfn >= max_low_pfn)
+ pte_val(pte) = _PAGE_TYPE_EMPTY;
set_pte(pt_dir, pte);
pfn++;
}